summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile4
-rw-r--r--drivers/accel/Kconfig8
-rw-r--r--drivers/accel/Makefile4
-rw-r--r--drivers/accel/habanalabs/Kconfig (renamed from drivers/misc/habanalabs/Kconfig)8
-rw-r--r--drivers/accel/habanalabs/Makefile (renamed from drivers/misc/habanalabs/Makefile)2
-rw-r--r--drivers/accel/habanalabs/common/Makefile (renamed from drivers/misc/habanalabs/common/Makefile)0
-rw-r--r--drivers/accel/habanalabs/common/asid.c (renamed from drivers/misc/habanalabs/common/asid.c)0
-rw-r--r--drivers/accel/habanalabs/common/command_buffer.c (renamed from drivers/misc/habanalabs/common/command_buffer.c)22
-rw-r--r--drivers/accel/habanalabs/common/command_submission.c (renamed from drivers/misc/habanalabs/common/command_submission.c)133
-rw-r--r--drivers/accel/habanalabs/common/context.c (renamed from drivers/misc/habanalabs/common/context.c)0
-rw-r--r--drivers/accel/habanalabs/common/debugfs.c (renamed from drivers/misc/habanalabs/common/debugfs.c)0
-rw-r--r--drivers/accel/habanalabs/common/decoder.c (renamed from drivers/misc/habanalabs/common/decoder.c)0
-rw-r--r--drivers/accel/habanalabs/common/device.c (renamed from drivers/misc/habanalabs/common/device.c)109
-rw-r--r--drivers/accel/habanalabs/common/firmware_if.c (renamed from drivers/misc/habanalabs/common/firmware_if.c)189
-rw-r--r--drivers/accel/habanalabs/common/habanalabs.h (renamed from drivers/misc/habanalabs/common/habanalabs.h)122
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_drv.c (renamed from drivers/misc/habanalabs/common/habanalabs_drv.c)6
-rw-r--r--drivers/accel/habanalabs/common/habanalabs_ioctl.c (renamed from drivers/misc/habanalabs/common/habanalabs_ioctl.c)101
-rw-r--r--drivers/accel/habanalabs/common/hw_queue.c (renamed from drivers/misc/habanalabs/common/hw_queue.c)0
-rw-r--r--drivers/accel/habanalabs/common/hwmon.c (renamed from drivers/misc/habanalabs/common/hwmon.c)0
-rw-r--r--drivers/accel/habanalabs/common/irq.c (renamed from drivers/misc/habanalabs/common/irq.c)38
-rw-r--r--drivers/accel/habanalabs/common/memory.c (renamed from drivers/misc/habanalabs/common/memory.c)371
-rw-r--r--drivers/accel/habanalabs/common/memory_mgr.c (renamed from drivers/misc/habanalabs/common/memory_mgr.c)3
-rw-r--r--drivers/accel/habanalabs/common/mmu/Makefile (renamed from drivers/misc/habanalabs/common/mmu/Makefile)0
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu.c (renamed from drivers/misc/habanalabs/common/mmu/mmu.c)2
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu_v1.c (renamed from drivers/misc/habanalabs/common/mmu/mmu_v1.c)1
-rw-r--r--drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c (renamed from drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c)0
-rw-r--r--drivers/accel/habanalabs/common/pci/Makefile (renamed from drivers/misc/habanalabs/common/pci/Makefile)0
-rw-r--r--drivers/accel/habanalabs/common/pci/pci.c (renamed from drivers/misc/habanalabs/common/pci/pci.c)10
-rw-r--r--drivers/accel/habanalabs/common/security.c (renamed from drivers/misc/habanalabs/common/security.c)174
-rw-r--r--drivers/accel/habanalabs/common/security.h163
-rw-r--r--drivers/accel/habanalabs/common/state_dump.c (renamed from drivers/misc/habanalabs/common/state_dump.c)2
-rw-r--r--drivers/accel/habanalabs/common/sysfs.c (renamed from drivers/misc/habanalabs/common/sysfs.c)0
-rw-r--r--drivers/accel/habanalabs/gaudi/Makefile (renamed from drivers/misc/habanalabs/gaudi/Makefile)0
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi.c (renamed from drivers/misc/habanalabs/gaudi/gaudi.c)31
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudiP.h (renamed from drivers/misc/habanalabs/gaudi/gaudiP.h)2
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi_coresight.c (renamed from drivers/misc/habanalabs/gaudi/gaudi_coresight.c)3
-rw-r--r--drivers/accel/habanalabs/gaudi/gaudi_security.c (renamed from drivers/misc/habanalabs/gaudi/gaudi_security.c)0
-rw-r--r--drivers/accel/habanalabs/gaudi2/Makefile (renamed from drivers/misc/habanalabs/gaudi2/Makefile)0
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2.c (renamed from drivers/misc/habanalabs/gaudi2/gaudi2.c)1183
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2P.h (renamed from drivers/misc/habanalabs/gaudi2/gaudi2P.h)39
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c (renamed from drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c)10
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_coresight_regs.h (renamed from drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h)0
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_masks.h (renamed from drivers/misc/habanalabs/gaudi2/gaudi2_masks.h)0
-rw-r--r--drivers/accel/habanalabs/gaudi2/gaudi2_security.c (renamed from drivers/misc/habanalabs/gaudi2/gaudi2_security.c)37
-rw-r--r--drivers/accel/habanalabs/goya/Makefile (renamed from drivers/misc/habanalabs/goya/Makefile)0
-rw-r--r--drivers/accel/habanalabs/goya/goya.c (renamed from drivers/misc/habanalabs/goya/goya.c)16
-rw-r--r--drivers/accel/habanalabs/goya/goyaP.h (renamed from drivers/misc/habanalabs/goya/goyaP.h)2
-rw-r--r--drivers/accel/habanalabs/goya/goya_coresight.c (renamed from drivers/misc/habanalabs/goya/goya_coresight.c)2
-rw-r--r--drivers/accel/habanalabs/goya/goya_hwmgr.c (renamed from drivers/misc/habanalabs/goya/goya_hwmgr.c)0
-rw-r--r--drivers/accel/habanalabs/goya/goya_security.c (renamed from drivers/misc/habanalabs/goya/goya_security.c)0
-rw-r--r--drivers/accel/habanalabs/include/common/cpucp_if.h (renamed from drivers/misc/habanalabs/include/common/cpucp_if.h)96
-rw-r--r--drivers/accel/habanalabs/include/common/hl_boot_if.h (renamed from drivers/misc/habanalabs/include/common/hl_boot_if.h)151
-rw-r--r--drivers/accel/habanalabs/include/common/qman_if.h (renamed from drivers/misc/habanalabs/include/common/qman_if.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h)2
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/stlb_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_async_events.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_coresight.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_fw_if.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_masks.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_packets.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_packets.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi/gaudi_reg_map.h (renamed from drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h211
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h)15
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h)41
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h)6
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h)9
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h)6
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_sync_mngr_glbl_regs.h1203
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h)1
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h)3
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h)3
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h)3
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h)27
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_masks.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h (renamed from drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h)1
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h)2
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_coresight.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h)0
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h)23
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_packets.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h)4
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h (renamed from drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h)16
-rw-r--r--drivers/accel/habanalabs/include/gaudi2/gaudi2_special_blocks.h157
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/cpu_if_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/cpu_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/goya_blocks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/goya_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/goya_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/ic_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mc_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mme_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mmu_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/mmu_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/pcie_aux_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_etr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_spi_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/stlb_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/stlb_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/asic_reg/tpc_pll_regs.h (renamed from drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya.h (renamed from drivers/misc/habanalabs/include/goya/goya.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya_async_events.h (renamed from drivers/misc/habanalabs/include/goya/goya_async_events.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya_coresight.h (renamed from drivers/misc/habanalabs/include/goya/goya_coresight.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya_fw_if.h (renamed from drivers/misc/habanalabs/include/goya/goya_fw_if.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya_packets.h (renamed from drivers/misc/habanalabs/include/goya/goya_packets.h)0
-rw-r--r--drivers/accel/habanalabs/include/goya/goya_reg_map.h (renamed from drivers/misc/habanalabs/include/goya/goya_reg_map.h)0
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h (renamed from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h)0
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_0.h (renamed from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h)0
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_1.h (renamed from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h)0
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v2_0.h (renamed from drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h)0
-rw-r--r--drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h (renamed from drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h)0
-rw-r--r--drivers/accel/ivpu/Kconfig15
-rw-r--r--drivers/accel/ivpu/Makefile16
-rw-r--r--drivers/accel/ivpu/TODO11
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c655
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h190
-rw-r--r--drivers/accel/ivpu/ivpu_fw.c434
-rw-r--r--drivers/accel/ivpu/ivpu_fw.h38
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c749
-rw-r--r--drivers/accel/ivpu/ivpu_gem.h127
-rw-r--r--drivers/accel/ivpu/ivpu_hw.h170
-rw-r--r--drivers/accel/ivpu/ivpu_hw_mtl.c1084
-rw-r--r--drivers/accel/ivpu/ivpu_hw_mtl_reg.h280
-rw-r--r--drivers/accel/ivpu/ivpu_hw_reg_io.h115
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.c510
-rw-r--r--drivers/accel/ivpu/ivpu_ipc.h93
-rw-r--r--drivers/accel/ivpu/ivpu_job.c615
-rw-r--r--drivers/accel/ivpu/ivpu_job.h67
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.c180
-rw-r--r--drivers/accel/ivpu/ivpu_jsm_msg.h23
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.c883
-rw-r--r--drivers/accel/ivpu/ivpu_mmu.h50
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.c398
-rw-r--r--drivers/accel/ivpu/ivpu_mmu_context.h50
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c329
-rw-r--r--drivers/accel/ivpu/ivpu_pm.h38
-rw-r--r--drivers/accel/ivpu/vpu_boot_api.h349
-rw-r--r--drivers/accel/ivpu/vpu_jsm_api.h1008
-rw-r--r--drivers/accessibility/speakup/main.c2
-rw-r--r--drivers/acpi/acpi_lpit.c1
-rw-r--r--drivers/acpi/acpi_pnp.c14
-rw-r--r--drivers/acpi/acpica/Makefile2
-rw-r--r--drivers/acpi/acpica/hwvalid.c7
-rw-r--r--drivers/acpi/acpica/nsrepair.c12
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/apei/einj.c4
-rw-r--r--drivers/acpi/battery.c35
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/acpi/cppc_acpi.c71
-rw-r--r--drivers/acpi/device_pm.c19
-rw-r--r--drivers/acpi/device_sysfs.c10
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/ioapic.c1
-rw-r--r--drivers/acpi/nfit/core.c2
-rw-r--r--drivers/acpi/pfr_telemetry.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic_bytcrc.c1
-rw-r--r--drivers/acpi/pmic/intel_pmic_chtdc_ti.c26
-rw-r--r--drivers/acpi/pptt.c93
-rw-r--r--drivers/acpi/processor_idle.c30
-rw-r--r--drivers/acpi/processor_perflib.c38
-rw-r--r--drivers/acpi/resource.c26
-rw-r--r--drivers/acpi/sysfs.c2
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/acpi/video_detect.c2
-rw-r--r--drivers/amba/bus.c4
-rw-r--r--drivers/android/binder.c68
-rw-r--r--drivers/android/binder_alloc.c2
-rw-r--r--drivers/android/binder_internal.h3
-rw-r--r--drivers/android/binderfs.c8
-rw-r--r--drivers/ata/Kconfig33
-rw-r--r--drivers/ata/Makefile4
-rw-r--r--drivers/ata/acard-ahci.c8
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/ahci_octeon.c6
-rw-r--r--drivers/ata/libahci.c171
-rw-r--r--drivers/ata/libata-core.c86
-rw-r--r--drivers/ata/libata-eh.c117
-rw-r--r--drivers/ata/libata-sata.c7
-rw-r--r--drivers/ata/libata-scsi.c64
-rw-r--r--drivers/ata/libata-sff.c10
-rw-r--r--drivers/ata/libata-trace.c2
-rw-r--r--drivers/ata/pata_octeon_cf.c16
-rw-r--r--drivers/ata/pata_palmld.c137
-rw-r--r--drivers/ata/pata_parport/Kconfig141
-rw-r--r--drivers/ata/pata_parport/Makefile19
-rw-r--r--drivers/ata/pata_parport/aten.c (renamed from drivers/block/paride/aten.c)2
-rw-r--r--drivers/ata/pata_parport/bpck.c (renamed from drivers/block/paride/bpck.c)2
-rw-r--r--drivers/ata/pata_parport/bpck6.c (renamed from drivers/block/paride/bpck6.c)2
-rw-r--r--drivers/ata/pata_parport/comm.c (renamed from drivers/block/paride/comm.c)2
-rw-r--r--drivers/ata/pata_parport/dstr.c (renamed from drivers/block/paride/dstr.c)2
-rw-r--r--drivers/ata/pata_parport/epat.c (renamed from drivers/block/paride/epat.c)2
-rw-r--r--drivers/ata/pata_parport/epia.c (renamed from drivers/block/paride/epia.c)2
-rw-r--r--drivers/ata/pata_parport/fit2.c (renamed from drivers/block/paride/fit2.c)2
-rw-r--r--drivers/ata/pata_parport/fit3.c (renamed from drivers/block/paride/fit3.c)2
-rw-r--r--drivers/ata/pata_parport/friq.c (renamed from drivers/block/paride/friq.c)2
-rw-r--r--drivers/ata/pata_parport/frpw.c (renamed from drivers/block/paride/frpw.c)2
-rw-r--r--drivers/ata/pata_parport/kbic.c (renamed from drivers/block/paride/kbic.c)2
-rw-r--r--drivers/ata/pata_parport/ktti.c (renamed from drivers/block/paride/ktti.c)2
-rw-r--r--drivers/ata/pata_parport/on20.c (renamed from drivers/block/paride/on20.c)2
-rw-r--r--drivers/ata/pata_parport/on26.c (renamed from drivers/block/paride/on26.c)2
-rw-r--r--drivers/ata/pata_parport/pata_parport.c761
-rw-r--r--drivers/ata/pata_parport/ppc6lnx.c (renamed from drivers/block/paride/ppc6lnx.c)0
-rw-r--r--drivers/ata/pata_samsung_cf.c662
-rw-r--r--drivers/ata/sata_fsl.c5
-rw-r--r--drivers/ata/sata_inic162x.c14
-rw-r--r--drivers/ata/sata_promise.c2
-rw-r--r--drivers/ata/sata_sil24.c7
-rw-r--r--drivers/ata/sata_sx4.c2
-rw-r--r--drivers/base/arch_topology.c12
-rw-r--r--drivers/base/auxiliary.c2
-rw-r--r--drivers/base/base.h21
-rw-r--r--drivers/base/bus.c575
-rw-r--r--drivers/base/cacheinfo.c161
-rw-r--r--drivers/base/class.c34
-rw-r--r--drivers/base/component.c2
-rw-r--r--drivers/base/core.c531
-rw-r--r--drivers/base/cpu.c40
-rw-r--r--drivers/base/dd.c36
-rw-r--r--drivers/base/devtmpfs.c28
-rw-r--r--drivers/base/driver.c29
-rw-r--r--drivers/base/memory.c9
-rw-r--r--drivers/base/node.c3
-rw-r--r--drivers/base/physical_location.c5
-rw-r--r--drivers/base/platform.c48
-rw-r--r--drivers/base/power/domain.c5
-rw-r--r--drivers/base/power/runtime.c28
-rw-r--r--drivers/base/regmap/regmap-irq.c25
-rw-r--r--drivers/base/regmap/regmap-mdio.c41
-rw-r--r--drivers/base/regmap/regmap.c6
-rw-r--r--drivers/base/soc.c4
-rw-r--r--drivers/base/swnode.c63
-rw-r--r--drivers/base/test/property-entry-test.c30
-rw-r--r--drivers/base/transport_class.c17
-rw-r--r--drivers/bcma/main.c6
-rw-r--r--drivers/block/Kconfig29
-rw-r--r--drivers/block/brd.c82
-rw-r--r--drivers/block/drbd/Makefile2
-rw-r--r--drivers/block/drbd/drbd_buildtag.c22
-rw-r--r--drivers/block/drbd/drbd_debugfs.c2
-rw-r--r--drivers/block/drbd/drbd_int.h13
-rw-r--r--drivers/block/drbd/drbd_interval.c6
-rw-r--r--drivers/block/drbd/drbd_main.c20
-rw-r--r--drivers/block/drbd/drbd_proc.c2
-rw-r--r--drivers/block/drbd/drbd_vli.h2
-rw-r--r--drivers/block/loop.c14
-rw-r--r--drivers/block/null_blk/main.c3
-rw-r--r--drivers/block/paride/Kconfig302
-rw-r--r--drivers/block/paride/Makefile29
-rw-r--r--drivers/block/paride/Transition-notes128
-rw-r--r--drivers/block/paride/mkd31
-rw-r--r--drivers/block/paride/paride.c479
-rw-r--r--drivers/block/paride/paride.h172
-rw-r--r--drivers/block/paride/pcd.c1042
-rw-r--r--drivers/block/paride/pd.c1032
-rw-r--r--drivers/block/paride/pf.c1057
-rw-r--r--drivers/block/paride/pg.c734
-rw-r--r--drivers/block/paride/pseudo.h102
-rw-r--r--drivers/block/paride/pt.c1024
-rw-r--r--drivers/block/ps3vram.c7
-rw-r--r--drivers/block/rbd.c7
-rw-r--r--drivers/block/ublk_drv.c407
-rw-r--r--drivers/block/virtio_blk.c4
-rw-r--r--drivers/block/zram/zram_drv.c84
-rw-r--r--drivers/bluetooth/btintel.c116
-rw-r--r--drivers/bluetooth/btintel.h13
-rw-r--r--drivers/bluetooth/btusb.c16
-rw-r--r--drivers/bluetooth/hci_qca.c11
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c4
-rw-r--r--drivers/bus/mhi/Makefile4
-rw-r--r--drivers/bus/mhi/ep/main.c89
-rw-r--r--drivers/bus/mhi/ep/sm.c42
-rw-r--r--drivers/bus/mhi/host/init.c6
-rw-r--r--drivers/bus/mips_cdmm.c4
-rw-r--r--drivers/bus/simple-pm-bus.c46
-rw-r--r--drivers/bus/sunxi-rsb.c15
-rw-r--r--drivers/char/agp/agp.h6
-rw-r--r--drivers/char/applicom.c5
-rw-r--r--drivers/char/hw_random/Kconfig10
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/jh7110-trng.c393
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c2
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c113
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c6
-rw-r--r--drivers/char/pcmcia/synclink_cs.c40
-rw-r--r--drivers/char/tpm/eventlog/acpi.c5
-rw-r--r--drivers/char/tpm/eventlog/efi.c13
-rw-r--r--drivers/char/tpm/eventlog/of.c35
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c5
-rw-r--r--drivers/char/tpm/tpm-chip.c1
-rw-r--r--drivers/char/tpm/tpm2-cmd.c4
-rw-r--r--drivers/char/tpm/tpm_crb.c100
-rw-r--r--drivers/char/tpm/tpm_i2c_atmel.c5
-rw-r--r--drivers/char/tpm/tpm_i2c_infineon.c5
-rw-r--r--drivers/char/tpm/tpm_i2c_nuvoton.c6
-rw-r--r--drivers/char/tpm/tpm_tis_i2c.c5
-rw-r--r--drivers/char/virtio_console.c5
-rw-r--r--drivers/clk/clk.c8
-rw-r--r--drivers/clk/davinci/Makefile4
-rw-r--r--drivers/clk/davinci/pll-dm355.c77
-rw-r--r--drivers/clk/davinci/pll-dm365.c146
-rw-r--r--drivers/clk/davinci/pll.c8
-rw-r--r--drivers/clk/davinci/pll.h5
-rw-r--r--drivers/clk/davinci/psc-dm355.c89
-rw-r--r--drivers/clk/davinci/psc-dm365.c111
-rw-r--r--drivers/clk/davinci/psc.c6
-rw-r--r--drivers/clk/davinci/psc.h7
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c18
-rw-r--r--drivers/clk/microchip/clk-mpfs-ccc.c10
-rw-r--r--drivers/clk/renesas/r9a06g032-clocks.c28
-rw-r--r--drivers/clk/samsung/Kconfig32
-rw-r--r--drivers/clk/samsung/Makefile4
-rw-r--r--drivers/clk/samsung/clk-pll.c181
-rw-r--r--drivers/clk/samsung/clk-pll.h21
-rw-r--r--drivers/clk/samsung/clk-s3c2410-dclk.c440
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c446
-rw-r--r--drivers/clk/samsung/clk-s3c2412.c254
-rw-r--r--drivers/clk/samsung/clk-s3c2443.c438
-rw-r--r--drivers/clocksource/Kconfig4
-rw-r--r--drivers/clocksource/acpi_pm.c6
-rw-r--r--drivers/clocksource/em_sti.c7
-rw-r--r--drivers/clocksource/sh_cmt.c7
-rw-r--r--drivers/clocksource/sh_tmu.c7
-rw-r--r--drivers/clocksource/timer-microchip-pit64b.c12
-rw-r--r--drivers/clocksource/timer-riscv.c27
-rw-r--r--drivers/clocksource/timer-sun4i.c3
-rw-r--r--drivers/comedi/Kconfig2
-rw-r--r--drivers/comedi/comedi_fops.c1
-rw-r--r--drivers/counter/Kconfig91
-rw-r--r--drivers/cpufreq/Kconfig12
-rw-r--r--drivers/cpufreq/Kconfig.arm81
-rw-r--r--drivers/cpufreq/Makefile8
-rw-r--r--drivers/cpufreq/amd-pstate.c705
-rw-r--r--drivers/cpufreq/brcmstb-avs-cpufreq.c5
-rw-r--r--drivers/cpufreq/cpufreq.c10
-rw-r--r--drivers/cpufreq/davinci-cpufreq.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/cpufreq/loongson1-cpufreq.c222
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c5
-rw-r--r--drivers/cpufreq/omap-cpufreq.c4
-rw-r--r--drivers/cpufreq/qcom-cpufreq-hw.c30
-rw-r--r--drivers/cpufreq/s3c2410-cpufreq.c155
-rw-r--r--drivers/cpufreq/s3c2412-cpufreq.c240
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c492
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c321
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq-debugfs.c163
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c648
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c206
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c6
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c3
-rw-r--r--drivers/cpuidle/Kconfig1
-rw-r--r--drivers/cpuidle/Kconfig.arm10
-rw-r--r--drivers/cpuidle/cpuidle-arm.c4
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c12
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c2
-rw-r--r--drivers/cpuidle/cpuidle-mvebu-v7.c15
-rw-r--r--drivers/cpuidle/cpuidle-psci-domain.c7
-rw-r--r--drivers/cpuidle/cpuidle-psci.c25
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c4
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c19
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c31
-rw-r--r--drivers/cpuidle/cpuidle.c72
-rw-r--r--drivers/cpuidle/driver.c4
-rw-r--r--drivers/cpuidle/dt_idle_states.c2
-rw-r--r--drivers/cpuidle/governors/teo.c102
-rw-r--r--drivers/cpuidle/poll_state.c8
-rw-r--r--drivers/cpuidle/sysfs.c6
-rw-r--r--drivers/crypto/Kconfig10
-rw-r--r--drivers/crypto/Makefile1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c1
-rw-r--r--drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h2
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c4
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c13
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c4
-rw-r--r--drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c11
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c10
-rw-r--r--drivers/crypto/aspeed/Kconfig11
-rw-r--r--drivers/crypto/aspeed/Makefile4
-rw-r--r--drivers/crypto/aspeed/aspeed-acry.c828
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.c5
-rw-r--r--drivers/crypto/aspeed/aspeed-hace.h2
-rw-r--r--drivers/crypto/atmel-aes.c7
-rw-r--r--drivers/crypto/atmel-ecc.c3
-rw-r--r--drivers/crypto/atmel-i2c.c4
-rw-r--r--drivers/crypto/atmel-i2c.h4
-rw-r--r--drivers/crypto/atmel-sha.c10
-rw-r--r--drivers/crypto/atmel-sha204a.c3
-rw-r--r--drivers/crypto/atmel-tdes.c4
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c14
-rw-r--r--drivers/crypto/bcm/cipher.c102
-rw-r--r--drivers/crypto/bcm/cipher.h7
-rw-r--r--drivers/crypto/caam/blob_gen.c2
-rw-r--r--drivers/crypto/caam/caamalg.c16
-rw-r--r--drivers/crypto/caam/caamalg_qi.c16
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c56
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h10
-rw-r--r--drivers/crypto/caam/caamhash.c18
-rw-r--r--drivers/crypto/caam/caampkc.c31
-rw-r--r--drivers/crypto/caam/caamprng.c12
-rw-r--r--drivers/crypto/caam/caamrng.c11
-rw-r--r--drivers/crypto/caam/ctrl.c4
-rw-r--r--drivers/crypto/caam/desc_constr.h3
-rw-r--r--drivers/crypto/caam/key_gen.c2
-rw-r--r--drivers/crypto/caam/qi.c4
-rw-r--r--drivers/crypto/caam/qi.h12
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_algs.c10
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_aead.c4
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_skcipher.c8
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c12
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c21
-rw-r--r--drivers/crypto/ccp/sev-dev.c16
-rw-r--r--drivers/crypto/ccp/sp-pci.c46
-rw-r--r--drivers/crypto/ccree/cc_cipher.c2
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c6
-rw-r--r--drivers/crypto/hifn_795x.c4
-rw-r--r--drivers/crypto/hisilicon/Kconfig8
-rw-r--r--drivers/crypto/hisilicon/qm.c225
-rw-r--r--drivers/crypto/hisilicon/sec/sec_algs.c6
-rw-r--r--drivers/crypto/hisilicon/sec2/sec_crypto.c10
-rw-r--r--drivers/crypto/hisilicon/sgl.c4
-rw-r--r--drivers/crypto/img-hash.c12
-rw-r--r--drivers/crypto/inside-secure/safexcel.c15
-rw-r--r--drivers/crypto/inside-secure/safexcel.h6
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c21
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c54
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c4
-rw-r--r--drivers/crypto/marvell/cesa/hash.c41
-rw-r--r--drivers/crypto/marvell/cesa/tdma.c2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/Makefile11
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.c9
-rw-r--r--drivers/crypto/marvell/octeontx2/cn10k_cpt.h2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_common.h2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c14
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptlf.c11
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c2
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c6
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c2
-rw-r--r--drivers/crypto/mxs-dcp.c8
-rw-r--r--drivers/crypto/nx/nx-common-powernv.c13
-rw-r--r--drivers/crypto/nx/nx-common-pseries.c6
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_access_macros.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c6
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs_send.c3
-rw-r--r--drivers/crypto/qat/qat_common/qat_bl.c115
-rw-r--r--drivers/crypto/qat/qat_common/qat_bl.h4
-rw-r--r--drivers/crypto/qat/qat_common/qat_comp_algs.c169
-rw-r--r--drivers/crypto/qat/qat_common/qat_compression.c2
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c2
-rw-r--r--drivers/crypto/qce/core.c4
-rw-r--r--drivers/crypto/s5p-sss.c8
-rw-r--r--drivers/crypto/sahara.c4
-rw-r--r--drivers/crypto/stm32/stm32-cryp.c37
-rw-r--r--drivers/crypto/stm32/stm32-hash.c266
-rw-r--r--drivers/crypto/talitos.c6
-rw-r--r--drivers/crypto/ux500/Kconfig22
-rw-r--r--drivers/crypto/ux500/Makefile7
-rw-r--r--drivers/crypto/ux500/hash/Makefile11
-rw-r--r--drivers/crypto/ux500/hash/hash_alg.h398
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c1966
-rw-r--r--drivers/crypto/virtio/virtio_crypto_akcipher_algs.c2
-rw-r--r--drivers/cxl/core/memdev.c4
-rw-r--r--drivers/cxl/core/port.c8
-rw-r--r--drivers/cxl/cxl.h4
-rw-r--r--drivers/cxl/cxlmem.h2
-rw-r--r--drivers/dax/Kconfig1
-rw-r--r--drivers/dax/bus.c2
-rw-r--r--drivers/dax/device.c2
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/dma-buf/dma-buf-sysfs-stats.c2
-rw-r--r--drivers/dma-buf/dma-buf.c14
-rw-r--r--drivers/dma-buf/dma-fence.c2
-rw-r--r--drivers/dma-buf/udmabuf.c28
-rw-r--r--drivers/dma/Kconfig28
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/at_xdmac.c220
-rw-r--r--drivers/dma/bcm2835-dma.c4
-rw-r--r--drivers/dma/dma-axi-dmac.c4
-rw-r--r--drivers/dma/dmaengine.c26
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c6
-rw-r--r--drivers/dma/dw-edma/Kconfig5
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c196
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.h10
-rw-r--r--drivers/dma/dw-edma/dw-edma-pcie.c56
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.c100
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-core.h1
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.c374
-rw-r--r--drivers/dma/dw-edma/dw-edma-v0-debugfs.h5
-rw-r--r--drivers/dma/dw/core.c11
-rw-r--r--drivers/dma/fsl-edma.c8
-rw-r--r--drivers/dma/fsl-qdma.c10
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/idxd/cdev.c2
-rw-r--r--drivers/dma/idxd/device.c14
-rw-r--r--drivers/dma/idxd/dma.c6
-rw-r--r--drivers/dma/idxd/init.c7
-rw-r--r--drivers/dma/idxd/sysfs.c4
-rw-r--r--drivers/dma/img-mdc-dma.c4
-rw-r--r--drivers/dma/imx-dma.c4
-rw-r--r--drivers/dma/imx-sdma.c43
-rw-r--r--drivers/dma/mcf-edma.c5
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c4
-rw-r--r--drivers/dma/mmp_pdma.c4
-rw-r--r--drivers/dma/mmp_tdma.c11
-rw-r--r--drivers/dma/moxart-dma.c4
-rw-r--r--drivers/dma/mv_xor_v2.c7
-rw-r--r--drivers/dma/mxs-dma.c4
-rw-r--r--drivers/dma/nbpfaxi.c4
-rw-r--r--drivers/dma/ppc4xx/adma.c12
-rw-r--r--drivers/dma/ptdma/ptdma-dmaengine.c2
-rw-r--r--drivers/dma/pxa_dma.c4
-rw-r--r--drivers/dma/qcom/bam_dma.c4
-rw-r--r--drivers/dma/s3c24xx-dma.c1428
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c7
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.h1
-rw-r--r--drivers/dma/sh/usb-dmac.c4
-rw-r--r--drivers/dma/stm32-dmamux.c4
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/dma/sun4i-dma.c4
-rw-r--r--drivers/dma/sun6i-dma.c7
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/cppi41.c10
-rw-r--r--drivers/dma/ti/k3-psil-am62a.c196
-rw-r--r--drivers/dma/ti/k3-psil-priv.h1
-rw-r--r--drivers/dma/ti/k3-psil.c1
-rw-r--r--drivers/dma/ti/k3-udma.c42
-rw-r--r--drivers/dma/ti/omap-dma.c4
-rw-r--r--drivers/dma/xilinx/Makefile1
-rw-r--r--drivers/dma/xilinx/xdma-regs.h166
-rw-r--r--drivers/dma/xilinx/xdma.c974
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c4
-rw-r--r--drivers/edac/Kconfig8
-rw-r--r--drivers/edac/Makefile1
-rw-r--r--drivers/edac/amd64_edac.c217
-rw-r--r--drivers/edac/amd64_edac.h24
-rw-r--r--drivers/edac/i10nm_base.c459
-rw-r--r--drivers/edac/qcom_edac.c7
-rw-r--r--drivers/edac/skx_common.c78
-rw-r--r--drivers/edac/skx_common.h61
-rw-r--r--drivers/edac/zynqmp_edac.c467
-rw-r--r--drivers/eisa/eisa-bus.c4
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c1
-rw-r--r--drivers/firewire/core-cdev.c43
-rw-r--r--drivers/firewire/core-device.c8
-rw-r--r--drivers/firewire/core-transaction.c53
-rw-r--r--drivers/firewire/core.h9
-rw-r--r--drivers/firmware/arm_ffa/bus.c4
-rw-r--r--drivers/firmware/arm_scmi/bus.c3
-rw-r--r--drivers/firmware/dmi-sysfs.c14
-rw-r--r--drivers/firmware/efi/cper_cxl.c12
-rw-r--r--drivers/firmware/efi/earlycon.c41
-rw-r--r--drivers/firmware/efi/efi-init.c2
-rw-r--r--drivers/firmware/efi/efi.c76
-rw-r--r--drivers/firmware/efi/esrt.c15
-rw-r--r--drivers/firmware/efi/libstub/Makefile4
-rw-r--r--drivers/firmware/efi/libstub/arm64-entry.S67
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c26
-rw-r--r--drivers/firmware/efi/libstub/arm64.c50
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c67
-rw-r--r--drivers/firmware/efi/libstub/efistub.h23
-rw-r--r--drivers/firmware/efi/libstub/zboot.c2
-rw-r--r--drivers/firmware/efi/memattr.c9
-rw-r--r--drivers/firmware/efi/sysfb_efi.c8
-rw-r--r--drivers/firmware/efi/vars.c38
-rw-r--r--drivers/firmware/google/Kconfig8
-rw-r--r--drivers/firmware/google/framebuffer-coreboot.c4
-rw-r--r--drivers/firmware/google/gsmi.c2
-rw-r--r--drivers/firmware/psci/psci.c42
-rw-r--r--drivers/firmware/stratix10-svc.c25
-rw-r--r--drivers/firmware/sysfb_simplefb.c43
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/fpga/dfl-afu-region.c1
-rw-r--r--drivers/fpga/dfl-afu.h2
-rw-r--r--drivers/fpga/dfl-fme-perf.c2
-rw-r--r--drivers/fpga/dfl-fme-pr.c4
-rw-r--r--drivers/fpga/dfl-fme-pr.h2
-rw-r--r--drivers/fpga/dfl.c253
-rw-r--r--drivers/fpga/dfl.h43
-rw-r--r--drivers/fpga/fpga-bridge.c11
-rw-r--r--drivers/fpga/intel-m10-bmc-sec-update.c432
-rw-r--r--drivers/fpga/microchip-spi.c145
-rw-r--r--drivers/fpga/stratix10-soc.c4
-rw-r--r--drivers/fsi/fsi-core.c6
-rw-r--r--drivers/gpio/Kconfig28
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/TODO4
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c398
-rw-r--r--drivers/gpio/gpio-104-idi-48.c336
-rw-r--r--drivers/gpio/gpio-davinci.c3
-rw-r--r--drivers/gpio/gpio-ge.c1
-rw-r--r--drivers/gpio/gpio-gpio-mm.c154
-rw-r--r--drivers/gpio/gpio-i8255.c320
-rw-r--r--drivers/gpio/gpio-i8255.h54
-rw-r--r--drivers/gpio/gpio-iop.c59
-rw-r--r--drivers/gpio/gpio-msc313.c6
-rw-r--r--drivers/gpio/gpio-mvebu.c6
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c34
-rw-r--r--drivers/gpio/gpio-pca9570.c24
-rw-r--r--drivers/gpio/gpio-pcf857x.c118
-rw-r--r--drivers/gpio/gpio-regmap.c17
-rw-r--r--drivers/gpio/gpio-rockchip.c2
-rw-r--r--drivers/gpio/gpio-sim.c11
-rw-r--r--drivers/gpio/gpio-tegra186.c41
-rw-r--r--drivers/gpio/gpio-ucb1400.c85
-rw-r--r--drivers/gpio/gpio-vf610.c43
-rw-r--r--drivers/gpio/gpio-wcd934x.c1
-rw-r--r--drivers/gpio/gpio-xilinx.c11
-rw-r--r--drivers/gpio/gpio-zevio.c9
-rw-r--r--drivers/gpio/gpiolib-acpi.c22
-rw-r--r--drivers/gpio/gpiolib-acpi.h5
-rw-r--r--drivers/gpio/gpiolib-cdev.c21
-rw-r--r--drivers/gpio/gpiolib-devres.c55
-rw-r--r--drivers/gpio/gpiolib-of.c140
-rw-r--r--drivers/gpio/gpiolib-of.h5
-rw-r--r--drivers/gpio/gpiolib.c85
-rw-r--r--drivers/gpio/gpiolib.h10
-rw-r--r--drivers/gpu/drm/Kconfig70
-rw-r--r--drivers/gpu/drm/Makefile9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c344
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c215
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c255
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c210
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v1_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_3.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v4_3.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h)15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c155
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c245
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c88
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/imu_v11_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v10_1.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c101
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c303
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v8_10.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c71
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c786
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h83
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c165
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h26
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c205
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c153
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h17
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c196
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c91
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c101
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4948
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c7553
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c103
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h220
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h170
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h199
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h112
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c66
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c242
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c70
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c209
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c212
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h133
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h267
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h105
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h17
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h157
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/Makefile37
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c1046
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c (renamed from drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c)18
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c95
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c)17
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c)42
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c1323
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c2528
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.h40
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c577
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.c114
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_resource.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.c398
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h28
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c (renamed from drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c)410
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h (renamed from drivers/gpu/drm/amd/display/include/i2caux_interface.h)70
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c2246
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h79
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c106
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h (renamed from drivers/gpu/drm/tdfx/tdfx_drv.h)42
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c441
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h)36
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c389
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h (renamed from drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.c)11
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c208
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h56
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c1701
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h182
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c259
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c414
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c79
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c (renamed from drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c)312
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h41
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c579
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c (renamed from drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c)13
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h (renamed from drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h)5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c833
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c240
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h47
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h17
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h124
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c12
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c21
-rw-r--r--drivers/gpu/drm/amd/display/include/ddc_service_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h5
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c140
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c74
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h36
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c55
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c31
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h157
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h50
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h87
-rw-r--r--drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c18
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c10
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c13
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c87
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c21
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c1
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c57
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h7
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c51
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c67
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c80
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c24
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c7
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c1
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c1
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_out.c1
-rw-r--r--drivers/gpu/drm/ast/Kconfig2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c7
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c9
-rw-r--r--drivers/gpu/drm/bridge/Kconfig11
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c5
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c6
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c5
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c7
-rw-r--r--drivers/gpu/drm/bridge/cadence/Kconfig21
-rw-r--r--drivers/gpu/drm/bridge/cadence/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c (renamed from drivers/gpu/drm/bridge/cdns-dsi.c)83
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h84
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c51
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h16
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c1
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c5
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c5
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c80
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c133
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c321
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c26
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c5
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c346
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c5
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c16
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c5
-rw-r--r--drivers/gpu/drm/bridge/panel.c13
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c6
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c22
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c38
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c5
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c5
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c1
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c12
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c6
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c5
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c7
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c71
-rw-r--r--drivers/gpu/drm/drm_atomic.c23
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c124
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c4
-rw-r--r--drivers/gpu/drm/drm_blend.c13
-rw-r--r--drivers/gpu/drm/drm_bridge.c294
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c27
-rw-r--r--drivers/gpu/drm/drm_bufs.c12
-rw-r--r--drivers/gpu/drm/drm_client.c54
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c174
-rw-r--r--drivers/gpu/drm/drm_context.c36
-rw-r--r--drivers/gpu/drm/drm_debugfs.c110
-rw-r--r--drivers/gpu/drm/drm_drv.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c528
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c321
-rw-r--r--drivers/gpu/drm/drm_fbdev_generic.c130
-rw-r--r--drivers/gpu/drm/drm_file.c18
-rw-r--r--drivers/gpu/drm/drm_format_helper.c496
-rw-r--r--drivers/gpu/drm/drm_fourcc.c4
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c11
-rw-r--r--drivers/gpu/drm/drm_gem.c21
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c31
-rw-r--r--drivers/gpu/drm/drm_gem_dma_helper.c7
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c75
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c12
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_ioc32.c13
-rw-r--r--drivers/gpu/drm/drm_ioctl.c25
-rw-r--r--drivers/gpu/drm/drm_lease.c66
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c158
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c56
-rw-r--r--drivers/gpu/drm/drm_mode_config.c10
-rw-r--r--drivers/gpu/drm/drm_modes.c551
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c39
-rw-r--r--drivers/gpu/drm/drm_plane.c5
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c160
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c2
-rw-r--r--drivers/gpu/drm/drm_vm.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c54
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c66
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c36
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c18
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h86
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c50
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c12
-rw-r--r--drivers/gpu/drm/gma500/Kconfig2
-rw-r--r--drivers/gpu/drm/gma500/backlight.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c15
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c1
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c2
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2
-rw-r--r--drivers/gpu/drm/gud/gud_connector.c10
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c18
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h1
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c223
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c14
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h1
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c4
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i810/Makefile8
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c1266
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c101
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h246
-rw-r--r--drivers/gpu/drm/i915/Kconfig17
-rw-r--r--drivers/gpu/drm/i915/Makefile20
-rw-r--r--drivers/gpu/drm/i915/display/dvo_ch7xxx.c22
-rw-r--r--drivers/gpu/drm/i915/display/dvo_sil164.c13
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c85
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c328
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c573
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight_regs.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c115
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c218
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c1265
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h46
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c219
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h123
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h34
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h124
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h39
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c135
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c306
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c310
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c328
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c410
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_dev.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo_regs.h54
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_hti.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c360
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c101
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c55
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c441
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c32
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c55
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c49
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c2
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c45
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.h2
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c16
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c49
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h303
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c42
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c58
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c58
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c6
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c23
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c35
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c10
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c15
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h2
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c103
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c191
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c167
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c141
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_print.h51
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h45
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c42
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c34
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h30
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_renderstate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c330
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c15
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c20
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_migrate.c173
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c12
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c20
-rw-r--r--drivers/gpu/drm/i915/gt/shmem_utils.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c210
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h15
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c137
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h47
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c47
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c22
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c38
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_print.h48
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c120
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c99
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h3
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c291
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c4
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_config.c5
-rw-r--r--drivers/gpu/drm/i915/i915_config.h23
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c33
-rw-r--r--drivers/gpu/drm/i915/i915_deps.c2
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c194
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h134
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c22
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c51
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c92
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h2
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c43
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c436
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h6
-rw-r--r--drivers/gpu/drm/i915/i915_params.c91
-rw-r--r--drivers/gpu/drm/i915/i915_params.h1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c53
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c51
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h110
-rw-r--r--drivers/gpu/drm/i915/i915_request.c1
-rw-r--r--drivers/gpu/drm/i915/i915_scatterlist.c13
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c2
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c80
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h52
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c4
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.h17
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h3
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c7
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h2
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c1
-rw-r--r--drivers/gpu/drm/i915/intel_mchbar_regs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c11
-rw-r--r--drivers/gpu/drm/i915/intel_pm_types.h2
-rw-r--r--drivers/gpu/drm/i915/intel_region_ttm.c17
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c59
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h13
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h21
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c128
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h9
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c8
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h1
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c36
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_huc.c11
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c18
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c10
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c35
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c20
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c28
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c4
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.c (renamed from drivers/gpu/drm/i915/intel_dram.c)0
-rw-r--r--drivers/gpu/drm/i915/soc/intel_dram.h (renamed from drivers/gpu/drm/i915/intel_dram.h)0
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c171
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.h18
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.c (renamed from drivers/gpu/drm/i915/intel_pch.c)0
-rw-r--r--drivers/gpu/drm/i915/soc/intel_pch.h (renamed from drivers/gpu/drm/i915/intel_pch.h)0
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c1
-rw-r--r--drivers/gpu/drm/imx/Kconfig41
-rw-r--r--drivers/gpu/drm/imx/Makefile10
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.c23
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.h7
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c15
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Kconfig41
-rw-r--r--drivers/gpu/drm/imx/ipuv3/Makefile11
-rw-r--r--drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c (renamed from drivers/gpu/drm/imx/dw_hdmi-imx.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c (renamed from drivers/gpu/drm/imx/imx-drm-core.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm.h (renamed from drivers/gpu/drm/imx/imx-drm.h)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-ldb.c (renamed from drivers/gpu/drm/imx/imx-ldb.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-tve.c (renamed from drivers/gpu/drm/imx/imx-tve.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c (renamed from drivers/gpu/drm/imx/ipuv3-crtc.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c (renamed from drivers/gpu/drm/imx/ipuv3-plane.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/ipuv3-plane.h (renamed from drivers/gpu/drm/imx/ipuv3-plane.h)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c (renamed from drivers/gpu/drm/imx/parallel-display.c)0
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c1
-rw-r--r--drivers/gpu/drm/kmb/kmb_crtc.c1
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c1
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c13
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_interface.c1
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_mode.c1
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_aal.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ccorr.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_gamma.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c32
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c3
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c23
-rw-r--r--drivers/gpu/drm/mga/Makefile11
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c1168
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c104
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h685
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c197
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c169
-rw-r--r--drivers/gpu/drm/mga/mga_state.c1099
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c167
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Kconfig7
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h18
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c61
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h66
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h47
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c872
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c147
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c27
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c30
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h21
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c20
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c14
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c9
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c5
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c9
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c157
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c3
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.h4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.c50
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c22
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c69
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c20
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c12
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c243
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c21
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c18
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h12
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c8
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c15
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h15
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c148
-rw-r--r--drivers/gpu/drm/msm/msm_mdss.c16
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig2
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig14
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dfp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvfw/hs.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/outp.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c26
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c41
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c5
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c4
-rw-r--r--drivers/gpu/drm/panel/Kconfig38
-rw-r--r--drivers/gpu/drm/panel/Makefile4
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c96
-rw-r--r--drivers/gpu/drm/panel/panel-auo-a030jtn01.c308
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c42
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c16
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c11
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c46
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c451
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c6
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c1
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c58
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c106
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c24
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c14
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c5
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c364
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c6
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c2
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c5
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c44
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c33
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c19
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c6
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c341
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c398
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c350
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c112
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c4
-rw-r--r--drivers/gpu/drm/r128/Makefile10
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c228
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.h31
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c944
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c116
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h544
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c199
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c118
-rw-r--r--drivers/gpu/drm/r128/r128_state.c1641
-rw-r--r--drivers/gpu/drm/radeon/Kconfig2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h10
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/r300.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_auxch.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c39
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c82
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c26
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c92
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c497
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c1
-rw-r--r--drivers/gpu/drm/savage/Makefile9
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c1082
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c91
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h580
-rw-r--r--drivers/gpu/drm/savage/savage_state.c1169
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c19
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c10
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_plane.c1
-rw-r--r--drivers/gpu/drm/sis/Makefile10
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c143
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h80
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c363
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c33
-rw-r--r--drivers/gpu/drm/sprd/sprd_dpu.c5
-rw-r--r--drivers/gpu/drm/sprd/sprd_drm.c1
-rw-r--r--drivers/gpu/drm/sprd/sprd_dsi.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c141
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/tdfx/Makefile8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c90
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c6
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/firewall.c3
-rw-r--r--drivers/gpu/drm/tegra/gem.c5
-rw-r--r--drivers/gpu/drm/tegra/nvdec.c16
-rw-r--r--drivers/gpu/drm/tegra/submit.c19
-rw-r--r--drivers/gpu/drm/tegra/vic.c39
-rw-r--r--drivers/gpu/drm/tests/Makefile8
-rw-r--r--drivers/gpu/drm/tests/drm_client_modeset_test.c110
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c68
-rw-r--r--drivers/gpu/drm/tests/drm_connector_test.c76
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c384
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c105
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.h11
-rw-r--r--drivers/gpu/drm/tests/drm_managed_test.c71
-rw-r--r--drivers/gpu/drm/tests/drm_modes_test.c158
-rw-r--r--drivers/gpu/drm/tests/drm_probe_helper_test.c218
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c1
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c6
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c1
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c2
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c1
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c9
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c2
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c15
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9163.c6
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c36
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c5
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c20
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c5
-rw-r--r--drivers/gpu/drm/tiny/ofdrm.c40
-rw-r--r--drivers/gpu/drm/tiny/panel-mipi-dbi.c10
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c140
-rw-r--r--drivers/gpu/drm/tiny/st7586.c39
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c227
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c131
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c22
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c62
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c8
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c1
-rw-r--r--drivers/gpu/drm/vc4/Kconfig16
-rw-r--r--drivers/gpu/drm/vc4/Makefile7
-rw-r--r--drivers/gpu/drm/vc4/tests/.kunitconfig13
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.c200
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock.h63
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c41
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_output.c138
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_mock_plane.c47
-rw-r--r--drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c1039
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c217
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c36
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c34
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c5
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h148
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c189
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c49
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi_regs.h4
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c272
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c139
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c145
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h20
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c62
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c365
-rw-r--r--drivers/gpu/drm/via/Makefile8
-rw-r--r--drivers/gpu/drm/via/via_3d_reg.h1771
-rw-r--r--drivers/gpu/drm/via/via_dri1.c3630
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_trace.h26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c27
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c3
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/cdma.c16
-rw-r--r--drivers/gpu/host1x/cdma.h2
-rw-r--r--drivers/gpu/host1x/context.c8
-rw-r--r--drivers/gpu/host1x/debug.c7
-rw-r--r--drivers/gpu/host1x/dev.c4
-rw-r--r--drivers/gpu/host1x/dev.h10
-rw-r--r--drivers/gpu/host1x/fence.c118
-rw-r--r--drivers/gpu/host1x/fence.h19
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c60
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x06_uclass.h2
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x07_uclass.h2
-rw-r--r--drivers/gpu/host1x/hw/hw_host1x08_uclass.h2
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c74
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c3
-rw-r--r--drivers/gpu/host1x/intr.c334
-rw-r--r--drivers/gpu/host1x/intr.h83
-rw-r--r--drivers/gpu/host1x/job.c12
-rw-r--r--drivers/gpu/host1x/syncpt.c98
-rw-r--r--drivers/gpu/host1x/syncpt.h3
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c1
-rw-r--r--drivers/greybus/core.c14
-rw-r--r--drivers/hid/.kunitconfig1
-rw-r--r--drivers/hid/Kconfig39
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/amd-sfh-hid/Kconfig2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_client.c13
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.c2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_hid.h1
-rw-r--r--drivers/hid/bpf/Kconfig16
-rw-r--r--drivers/hid/bpf/Makefile11
-rw-r--r--drivers/hid/bpf/entrypoints/Makefile93
-rw-r--r--drivers/hid/bpf/entrypoints/README4
-rw-r--r--drivers/hid/bpf/entrypoints/entrypoints.bpf.c25
-rw-r--r--drivers/hid/bpf/entrypoints/entrypoints.lskel.h248
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c551
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.h25
-rw-r--r--drivers/hid/bpf/hid_bpf_jmp_table.c565
-rw-r--r--drivers/hid/hid-asus.c37
-rw-r--r--drivers/hid/hid-bigbenff.c75
-rw-r--r--drivers/hid/hid-core.c53
-rw-r--r--drivers/hid/hid-debug.c1
-rw-r--r--drivers/hid/hid-elecom.c16
-rw-r--r--drivers/hid/hid-evision.c53
-rw-r--r--drivers/hid/hid-hyperv.c6
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-input-test.c80
-rw-r--r--drivers/hid/hid-input.c52
-rw-r--r--drivers/hid/hid-letsketch.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/hid/hid-logitech-hidpp.c155
-rw-r--r--drivers/hid/hid-mcp2221.c3
-rw-r--r--drivers/hid/hid-multitouch.c39
-rw-r--r--drivers/hid/hid-playstation.c41
-rw-r--r--drivers/hid/hid-quirks.c5
-rw-r--r--drivers/hid/hid-sensor-custom.c242
-rw-r--r--drivers/hid/hid-sensor-hub.c6
-rw-r--r--drivers/hid/hid-sony.c1021
-rw-r--r--drivers/hid/hid-steam.c385
-rw-r--r--drivers/hid/hid-uclogic-core-test.c105
-rw-r--r--drivers/hid/hid-uclogic-core.c61
-rw-r--r--drivers/hid/hid-uclogic-params-test.c16
-rw-r--r--drivers/hid/hid-uclogic-params.c124
-rw-r--r--drivers/hid/hid-uclogic-params.h40
-rw-r--r--drivers/hid/hid-uclogic-rdesc-test.c3
-rw-r--r--drivers/hid/hid-uclogic-rdesc.c6
-rw-r--r--drivers/hid/hid-uclogic-rdesc.h5
-rw-r--r--drivers/hid/i2c-hid/Kconfig31
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-acpi.c26
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c24
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c42
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-goodix.c98
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h3
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c2
-rw-r--r--drivers/hid/surface-hid/surface_hid.c8
-rw-r--r--drivers/hid/surface-hid/surface_hid_core.c2
-rw-r--r--drivers/hid/surface-hid/surface_kbd.c8
-rw-r--r--drivers/hid/uhid.c3
-rw-r--r--drivers/hid/usbhid/hid-core.c9
-rw-r--r--drivers/hsi/clients/cmt_speech.c2
-rw-r--r--drivers/hsi/hsi_core.c2
-rw-r--r--drivers/hv/hv.c18
-rw-r--r--drivers/hv/hv_balloon.c6
-rw-r--r--drivers/hv/hv_common.c9
-rw-r--r--drivers/hv/hv_util.c4
-rw-r--r--drivers/hv/vmbus_drv.c6
-rw-r--r--drivers/hwmon/Kconfig37
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/aht10.c3
-rw-r--r--drivers/hwmon/aquacomputer_d5next.c444
-rw-r--r--drivers/hwmon/asus-ec-sensors.c3
-rw-r--r--drivers/hwmon/coretemp.c132
-rw-r--r--drivers/hwmon/drivetemp.c11
-rw-r--r--drivers/hwmon/emc2305.c24
-rw-r--r--drivers/hwmon/ftsteutates.c555
-rw-r--r--drivers/hwmon/gxp-fan-ctrl.c253
-rw-r--r--drivers/hwmon/hih6130.c4
-rw-r--r--drivers/hwmon/ibmpex.c2
-rw-r--r--drivers/hwmon/iio_hwmon.c8
-rw-r--r--drivers/hwmon/intel-m10-bmc-hwmon.c229
-rw-r--r--drivers/hwmon/it87.c195
-rw-r--r--drivers/hwmon/ltc2945.c132
-rw-r--r--drivers/hwmon/mc34vr500.c263
-rw-r--r--drivers/hwmon/mlxreg-fan.c6
-rw-r--r--drivers/hwmon/nct6775-core.c2
-rw-r--r--drivers/hwmon/nct6775-platform.c150
-rw-r--r--drivers/hwmon/nzxt-smart2.c1
-rw-r--r--drivers/hwmon/oxp-sensors.c52
-rw-r--r--drivers/hwmon/peci/cputemp.c2
-rw-r--r--drivers/hwmon/pmbus/Kconfig36
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c16
-rw-r--r--drivers/hwmon/pmbus/max16601.c14
-rw-r--r--drivers/hwmon/pmbus/mpq7932.c156
-rw-r--r--drivers/hwmon/pmbus/pmbus.h5
-rw-r--r--drivers/hwmon/pmbus/tda38640.c74
-rw-r--r--drivers/hwmon/s3c-hwmon.c379
-rw-r--r--drivers/hwmon/sht15.c8
-rw-r--r--drivers/hwmon/sht21.c4
-rw-r--r--drivers/hwtracing/coresight/Kconfig35
-rw-r--r--drivers/hwtracing/coresight/Makefile5
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c87
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c23
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-sysfs.c15
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c93
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c91
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h3
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c45
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c19
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c211
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.h35
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c259
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.h62
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.c297
-rw-r--r--drivers/hwtracing/coresight/coresight-trace-id.h156
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c648
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h125
-rw-r--r--drivers/hwtracing/intel_th/core.c6
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h4
-rw-r--r--drivers/hwtracing/intel_th/msu.c2
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c10
-rw-r--r--drivers/hwtracing/stm/Kconfig1
-rw-r--r--drivers/hwtracing/stm/core.c2
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c77
-rw-r--r--drivers/i2c/busses/Kconfig29
-rw-r--r--drivers/i2c/busses/Makefile2
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c4
-rw-r--r--drivers/i2c/busses/i2c-au1550.c4
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c4
-rw-r--r--drivers/i2c/busses/i2c-cadence.c23
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c46
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c13
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h5
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c33
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c4
-rw-r--r--drivers/i2c/busses/i2c-gpio.c47
-rw-r--r--drivers/i2c/busses/i2c-gxp.c620
-rw-r--r--drivers/i2c/busses/i2c-i801.c310
-rw-r--r--drivers/i2c/busses/i2c-ls2x.c370
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c7
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c4
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c8
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c4
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c72
-rw-r--r--drivers/i2c/busses/i2c-st.c9
-rw-r--r--drivers/i2c/busses/i2c-xiic.c586
-rw-r--r--drivers/i2c/i2c-core-acpi.c13
-rw-r--r--drivers/i2c/i2c-core-base.c107
-rw-r--r--drivers/i2c/i2c-core-of.c66
-rw-r--r--drivers/i2c/i2c-dev.c16
-rw-r--r--drivers/i3c/device.c14
-rw-r--r--drivers/i3c/master.c4
-rw-r--r--drivers/idle/intel_idle.c21
-rw-r--r--drivers/iio/accel/Kconfig2
-rw-r--r--drivers/iio/accel/bma400.h4
-rw-r--r--drivers/iio/accel/bma400_core.c29
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c1
-rw-r--r--drivers/iio/accel/mma9551_core.c10
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c1
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/st_accel_spi.c5
-rw-r--r--drivers/iio/adc/Kconfig40
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ad7291.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c4
-rw-r--r--drivers/iio/adc/berlin2-adc.c4
-rw-r--r--drivers/iio/adc/ep93xx_adc.c8
-rw-r--r--drivers/iio/adc/imx8qxp-adc.c11
-rw-r--r--drivers/iio/adc/imx93_adc.c484
-rw-r--r--drivers/iio/adc/max11410.c3
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c8
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c1
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c99
-rw-r--r--drivers/iio/adc/stm32-dfsdm.h60
-rw-r--r--drivers/iio/adc/ti-adc128s052.c54
-rw-r--r--drivers/iio/adc/ti-ads7924.c474
-rw-r--r--drivers/iio/adc/ti-lmp92064.c332
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c32
-rw-r--r--drivers/iio/adc/xilinx-ams.c11
-rw-r--r--drivers/iio/cdc/ad7746.c3
-rw-r--r--drivers/iio/chemical/scd30_core.c46
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c4
-rw-r--r--drivers/iio/dac/Kconfig21
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5686.c7
-rw-r--r--drivers/iio/dac/ad5686.h1
-rw-r--r--drivers/iio/dac/ad5696-i2c.c2
-rw-r--r--drivers/iio/dac/max5522.c207
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c1
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_trace.c2
-rw-r--r--drivers/iio/imu/fxos8700_core.c111
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig1
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c12
-rw-r--r--drivers/iio/industrialio-core.c64
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/cm32181.c9
-rw-r--r--drivers/iio/light/hid-sensor-als.c27
-rw-r--r--drivers/iio/light/hid-sensor-prox.c37
-rw-r--r--drivers/iio/light/max44009.c5
-rw-r--r--drivers/iio/light/tsl2563.c189
-rw-r--r--drivers/iio/light/vcnl4000.c449
-rw-r--r--drivers/iio/magnetometer/Kconfig14
-rw-r--r--drivers/iio/magnetometer/Makefile2
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c1
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c5
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c5
-rw-r--r--drivers/iio/magnetometer/tmag5273.c743
-rw-r--r--drivers/iio/pressure/ms5611.h4
-rw-r--r--drivers/iio/pressure/ms5611_core.c49
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c6
-rw-r--r--drivers/iio/pressure/ms5611_spi.c6
-rw-r--r--drivers/infiniband/core/cma.c300
-rw-r--r--drivers/infiniband/core/sa_query.c171
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c8
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c635
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/restrack.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h26
-rw-r--r--drivers/infiniband/hw/erdma/erdma_cm.c3
-rw-r--r--drivers/infiniband/hw/erdma/erdma_hw.h4
-rw-r--r--drivers/infiniband/hw/erdma/erdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c59
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.h5
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c92
-rw-r--r--drivers/infiniband/hw/hfi1/init.c2
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c4
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h15
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c55
-rw-r--r--drivers/infiniband/hw/hfi1/user_pages.c61
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c81
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h19
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c109
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c17
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c66
-rw-r--r--drivers/infiniband/hw/irdma/cm.c3
-rw-r--r--drivers/infiniband/hw/irdma/hw.c2
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c313
-rw-r--r--drivers/infiniband/hw/mana/main.c22
-rw-r--r--drivers/infiniband/hw/mana/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c8
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c121
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c45
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h3
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c28
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c18
-rw-r--r--drivers/infiniband/hw/mlx5/main.c106
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h54
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c490
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c67
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c164
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h4
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c7
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c4
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c13
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h38
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c606
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c46
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h108
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c202
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c115
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h32
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_mem.c23
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c10
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c2
-rw-r--r--drivers/input/input.c16
-rw-r--r--drivers/input/keyboard/Kconfig19
-rw-r--r--drivers/input/keyboard/Makefile2
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c315
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c195
-rw-r--r--drivers/input/mouse/Kconfig6
-rw-r--r--drivers/input/mouse/Makefile1
-rw-r--r--drivers/input/mouse/pxa930_trkball.c250
-rw-r--r--drivers/input/serio/hyperv-keyboard.c4
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/input/touchscreen/Kconfig42
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c10
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c464
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c458
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c220
-rw-r--r--drivers/interconnect/core.c14
-rw-r--r--drivers/interconnect/qcom/Kconfig36
-rw-r--r--drivers/interconnect/qcom/Makefile8
-rw-r--r--drivers/interconnect/qcom/qdu1000.c1067
-rw-r--r--drivers/interconnect/qcom/qdu1000.h95
-rw-r--r--drivers/interconnect/qcom/sa8775p.c2541
-rw-r--r--drivers/interconnect/qcom/sc7180.h4
-rw-r--r--drivers/interconnect/qcom/sc8180x.c38
-rw-r--r--drivers/interconnect/qcom/sc8180x.h4
-rw-r--r--drivers/interconnect/qcom/sc8280xp.c25
-rw-r--r--drivers/interconnect/qcom/sc8280xp.h4
-rw-r--r--drivers/interconnect/qcom/sdm670.c440
-rw-r--r--drivers/interconnect/qcom/sdm670.h128
-rw-r--r--drivers/interconnect/qcom/sdx55.h4
-rw-r--r--drivers/interconnect/qcom/sm8150.c21
-rw-r--r--drivers/interconnect/qcom/sm8150.h4
-rw-r--r--drivers/interconnect/qcom/sm8250.c21
-rw-r--r--drivers/interconnect/qcom/sm8250.h4
-rw-r--r--drivers/interconnect/qcom/sm8550.c2318
-rw-r--r--drivers/interconnect/qcom/sm8550.h178
-rw-r--r--drivers/iommu/Kconfig18
-rw-r--r--drivers/iommu/amd/init.c16
-rw-r--r--drivers/iommu/amd/iommu.c94
-rw-r--r--drivers/iommu/apple-dart.c632
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c2
-rw-r--r--drivers/iommu/arm/arm-smmu/qcom_iommu.c23
-rw-r--r--drivers/iommu/dma-iommu.c21
-rw-r--r--drivers/iommu/exynos-iommu.c216
-rw-r--r--drivers/iommu/fsl_pamu_domain.c6
-rw-r--r--drivers/iommu/intel/Kconfig11
-rw-r--r--drivers/iommu/intel/Makefile1
-rw-r--r--drivers/iommu/intel/dmar.c33
-rw-r--r--drivers/iommu/intel/iommu.c124
-rw-r--r--drivers/iommu/intel/iommu.h115
-rw-r--r--drivers/iommu/intel/irq_remapping.c3
-rw-r--r--drivers/iommu/intel/pasid.c20
-rw-r--r--drivers/iommu/intel/perfmon.c877
-rw-r--r--drivers/iommu/intel/perfmon.h64
-rw-r--r--drivers/iommu/intel/svm.c90
-rw-r--r--drivers/iommu/iommu-traces.c1
-rw-r--r--drivers/iommu/iommu.c195
-rw-r--r--drivers/iommu/iommufd/Kconfig2
-rw-r--r--drivers/iommu/iommufd/device.c8
-rw-r--r--drivers/iommu/iommufd/iommufd_private.h2
-rw-r--r--drivers/iommu/iommufd/main.c3
-rw-r--r--drivers/iommu/iommufd/pages.c6
-rw-r--r--drivers/iommu/iommufd/vfio_compat.c107
-rw-r--r--drivers/iommu/ipmmu-vmsa.c28
-rw-r--r--drivers/iommu/msm_iommu.c6
-rw-r--r--drivers/iommu/mtk_iommu.c9
-rw-r--r--drivers/iommu/mtk_iommu_v1.c4
-rw-r--r--drivers/iommu/of_iommu.c96
-rw-r--r--drivers/iommu/omap-iommu.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c1
-rw-r--r--drivers/iommu/s390-iommu.c24
-rw-r--r--drivers/iommu/sprd-iommu.c16
-rw-r--r--drivers/iommu/sun50i-iommu.c1
-rw-r--r--drivers/iommu/tegra-gart.c6
-rw-r--r--drivers/iommu/tegra-smmu.c9
-rw-r--r--drivers/ipack/devices/ipoctal.c6
-rw-r--r--drivers/ipack/ipack.c4
-rw-r--r--drivers/irqchip/Kconfig8
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-alpine-msi.c9
-rw-r--r--drivers/irqchip/irq-apple-aic.c161
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c3
-rw-r--r--drivers/irqchip/irq-aspeed-scu-ic.c5
-rw-r--r--drivers/irqchip/irq-bcm2836.c5
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c3
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c6
-rw-r--r--drivers/irqchip/irq-davinci-aintc.c163
-rw-r--r--drivers/irqchip/irq-gic-v2m.c5
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c17
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c5
-rw-r--r--drivers/irqchip/irq-gic-v3.c23
-rw-r--r--drivers/irqchip/irq-gic-v4.c9
-rw-r--r--drivers/irqchip/irq-gic.c6
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c1
-rw-r--r--drivers/irqchip/irq-loongson-liointc.c13
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c9
-rw-r--r--drivers/irqchip/irq-mvebu-gicp.c1
-rw-r--r--drivers/irqchip/irq-mvebu-odmi.c13
-rw-r--r--drivers/irqchip/irq-ti-sci-intr.c1
-rw-r--r--drivers/irqchip/irqchip.c8
-rw-r--r--drivers/leds/Kconfig19
-rw-r--r--drivers/leds/Makefile2
-rw-r--r--drivers/leds/flash/leds-mt6360.c38
-rw-r--r--drivers/leds/led-class.c139
-rw-r--r--drivers/leds/leds-an30259a.c21
-rw-r--r--drivers/leds/leds-asic3.c177
-rw-r--r--drivers/leds/leds-bcm6328.c49
-rw-r--r--drivers/leds/leds-bcm6358.c32
-rw-r--r--drivers/leds/leds-bd2802.c5
-rw-r--r--drivers/leds/leds-blinkm.c5
-rw-r--r--drivers/leds/leds-is31fl319x.c7
-rw-r--r--drivers/leds/leds-is31fl32xx.c5
-rw-r--r--drivers/leds/leds-lm3530.c5
-rw-r--r--drivers/leds/leds-lm3532.c5
-rw-r--r--drivers/leds/leds-lm355x.c6
-rw-r--r--drivers/leds/leds-lm3642.c5
-rw-r--r--drivers/leds/leds-lm3692x.c6
-rw-r--r--drivers/leds/leds-lm3697.c5
-rw-r--r--drivers/leds/leds-lp3944.c5
-rw-r--r--drivers/leds/leds-lp3952.c5
-rw-r--r--drivers/leds/leds-lp5521.c6
-rw-r--r--drivers/leds/leds-lp5523.c6
-rw-r--r--drivers/leds/leds-lp5562.c5
-rw-r--r--drivers/leds/leds-lp8501.c6
-rw-r--r--drivers/leds/leds-lp8860.c5
-rw-r--r--drivers/leds/leds-mt6323.c30
-rw-r--r--drivers/leds/leds-pca9532.c9
-rw-r--r--drivers/leds/leds-pca955x.c26
-rw-r--r--drivers/leds/leds-pca963x.c6
-rw-r--r--drivers/leds/leds-pm8058.c29
-rw-r--r--drivers/leds/leds-pwm.c4
-rw-r--r--drivers/leds/leds-s3c24xx.c83
-rw-r--r--drivers/leds/leds-syscon.c49
-rw-r--r--drivers/leds/leds-tca6507.c8
-rw-r--r--drivers/leds/leds-tlc591xx.c5
-rw-r--r--drivers/leds/leds-turris-omnia.c5
-rw-r--r--drivers/leds/leds.h1
-rw-r--r--drivers/leds/simple/simatic-ipc-leds-gpio.c2
-rw-r--r--drivers/leds/trigger/ledtrig-disk.c4
-rw-r--r--drivers/macintosh/macio_asic.c7
-rw-r--r--drivers/mcb/mcb-core.c4
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/dm-audit.c2
-rw-r--r--drivers/md/dm-bio-prison-v1.c19
-rw-r--r--drivers/md/dm-bio-prison-v1.h1
-rw-r--r--drivers/md/dm-bio-prison-v2.c15
-rw-r--r--drivers/md/dm-bio-prison-v2.h11
-rw-r--r--drivers/md/dm-bio-record.h1
-rw-r--r--drivers/md/dm-bufio.c130
-rw-r--r--drivers/md/dm-builtin.c3
-rw-r--r--drivers/md/dm-cache-background-tracker.c17
-rw-r--r--drivers/md/dm-cache-background-tracker.h47
-rw-r--r--drivers/md/dm-cache-block-types.h1
-rw-r--r--drivers/md/dm-cache-metadata.c74
-rw-r--r--drivers/md/dm-cache-metadata.h5
-rw-r--r--drivers/md/dm-cache-policy-internal.h14
-rw-r--r--drivers/md/dm-cache-policy-smq.c166
-rw-r--r--drivers/md/dm-cache-policy.c3
-rw-r--r--drivers/md/dm-cache-policy.h7
-rw-r--r--drivers/md/dm-cache-target.c141
-rw-r--r--drivers/md/dm-clone-target.c2
-rw-r--r--drivers/md/dm-core.h9
-rw-r--r--drivers/md/dm-crypt.c125
-rw-r--r--drivers/md/dm-delay.c7
-rw-r--r--drivers/md/dm-dust.c2
-rw-r--r--drivers/md/dm-ebs-target.c5
-rw-r--r--drivers/md/dm-era-target.c122
-rw-r--r--drivers/md/dm-exception-store.c7
-rw-r--r--drivers/md/dm-exception-store.h57
-rw-r--r--drivers/md/dm-flakey.c58
-rw-r--r--drivers/md/dm-ima.c5
-rw-r--r--drivers/md/dm-ima.h7
-rw-r--r--drivers/md/dm-init.c5
-rw-r--r--drivers/md/dm-integrity.c545
-rw-r--r--drivers/md/dm-io-rewind.c8
-rw-r--r--drivers/md/dm-io-tracker.h1
-rw-r--r--drivers/md/dm-io.c88
-rw-r--r--drivers/md/dm-ioctl.c168
-rw-r--r--drivers/md/dm-kcopyd.c61
-rw-r--r--drivers/md/dm-linear.c5
-rw-r--r--drivers/md/dm-log-userspace-base.c15
-rw-r--r--drivers/md/dm-log-userspace-transfer.c8
-rw-r--r--drivers/md/dm-log-userspace-transfer.h1
-rw-r--r--drivers/md/dm-log-writes.c23
-rw-r--r--drivers/md/dm-log.c65
-rw-r--r--drivers/md/dm-mpath.c125
-rw-r--r--drivers/md/dm-mpath.h3
-rw-r--r--drivers/md/dm-path-selector.c4
-rw-r--r--drivers/md/dm-path-selector.h28
-rw-r--r--drivers/md/dm-ps-historical-service-time.c2
-rw-r--r--drivers/md/dm-ps-io-affinity.c6
-rw-r--r--drivers/md/dm-ps-queue-length.c15
-rw-r--r--drivers/md/dm-ps-round-robin.c22
-rw-r--r--drivers/md/dm-ps-service-time.c26
-rw-r--r--drivers/md/dm-raid.c35
-rw-r--r--drivers/md/dm-raid1.c92
-rw-r--r--drivers/md/dm-region-hash.c29
-rw-r--r--drivers/md/dm-rq.c27
-rw-r--r--drivers/md/dm-rq.h3
-rw-r--r--drivers/md/dm-snap-persistent.c48
-rw-r--r--drivers/md/dm-snap-transient.c18
-rw-r--r--drivers/md/dm-snap.c91
-rw-r--r--drivers/md/dm-stats.c103
-rw-r--r--drivers/md/dm-stats.h6
-rw-r--r--drivers/md/dm-stripe.c53
-rw-r--r--drivers/md/dm-switch.c47
-rw-r--r--drivers/md/dm-sysfs.c12
-rw-r--r--drivers/md/dm-table.c58
-rw-r--r--drivers/md/dm-target.c6
-rw-r--r--drivers/md/dm-thin-metadata.c66
-rw-r--r--drivers/md/dm-thin-metadata.h1
-rw-r--r--drivers/md/dm-thin.c88
-rw-r--r--drivers/md/dm-uevent.c6
-rw-r--r--drivers/md/dm-uevent.h6
-rw-r--r--drivers/md/dm-unstripe.c1
-rw-r--r--drivers/md/dm-verity-fec.c30
-rw-r--r--drivers/md/dm-verity-fec.h18
-rw-r--r--drivers/md/dm-verity-target.c83
-rw-r--r--drivers/md/dm-verity-verify-sig.c2
-rw-r--r--drivers/md/dm-verity-verify-sig.h2
-rw-r--r--drivers/md/dm-verity.h8
-rw-r--r--drivers/md/dm-writecache.c171
-rw-r--r--drivers/md/dm-zero.c1
-rw-r--r--drivers/md/dm-zone.c2
-rw-r--r--drivers/md/dm-zoned-metadata.c22
-rw-r--r--drivers/md/dm-zoned-target.c1
-rw-r--r--drivers/md/dm.c116
-rw-r--r--drivers/md/dm.h16
-rw-r--r--drivers/md/md.c65
-rw-r--r--drivers/md/md.h9
-rw-r--r--drivers/md/persistent-data/dm-array.c82
-rw-r--r--drivers/md/persistent-data/dm-array.h3
-rw-r--r--drivers/md/persistent-data/dm-bitset.c14
-rw-r--r--drivers/md/persistent-data/dm-bitset.h1
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c32
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h7
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c52
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c21
-rw-r--r--drivers/md/persistent-data/dm-btree.c130
-rw-r--r--drivers/md/persistent-data/dm-btree.h15
-rw-r--r--drivers/md/persistent-data/dm-persistent-data-internal.h7
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c52
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.h11
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.c13
-rw-r--r--drivers/md/persistent-data/dm-space-map-disk.h1
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c24
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.h1
-rw-r--r--drivers/md/persistent-data/dm-space-map.h1
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c18
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.h3
-rw-r--r--drivers/media/common/Kconfig3
-rw-r--r--drivers/media/common/Makefile1
-rw-r--r--drivers/media/common/uvc.c183
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c5
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c2
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c2
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c27
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.h5
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_hgo.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lif.c1
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_pipe.c18
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h26
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_rpf.c64
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_wpf.c4
-rw-r--r--drivers/media/platform/samsung/s3c-camif/Kconfig8
-rw-r--r--drivers/media/platform/ti/davinci/Kconfig16
-rw-r--r--drivers/media/platform/ti/davinci/Makefile3
-rw-r--r--drivers/media/platform/ti/davinci/vpbe.c840
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_display.c1510
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_osd.c1582
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_osd_regs.h352
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_venc.c676
-rw-r--r--drivers/media/platform/ti/davinci/vpbe_venc_regs.h165
-rw-r--r--drivers/media/platform/ti/davinci/vpss.c529
-rw-r--r--drivers/media/rc/rc-main.c2
-rw-r--r--drivers/media/usb/uvc/Kconfig1
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c342
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c185
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c2
-rw-r--r--drivers/media/usb/uvc/uvc_status.c125
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c111
-rw-r--r--drivers/media/usb/uvc/uvc_video.c58
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h39
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-api.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev-priv.h14
-rw-r--r--drivers/media/v4l2-core/v4l2-subdev.c46
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c2
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c4
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c2
-rw-r--r--drivers/memstick/core/memstick.c6
-rw-r--r--drivers/mfd/Kconfig95
-rw-r--r--drivers/mfd/Makefile12
-rw-r--r--drivers/mfd/arizona-core.c2
-rw-r--r--drivers/mfd/asic3.c1071
-rw-r--r--drivers/mfd/axp20x.c27
-rw-r--r--drivers/mfd/cros_ec_dev.c5
-rw-r--r--drivers/mfd/htc-pasic3.c210
-rw-r--r--drivers/mfd/intel-m10-bmc-core.c122
-rw-r--r--drivers/mfd/intel-m10-bmc-pmci.c455
-rw-r--r--drivers/mfd/intel-m10-bmc-spi.c168
-rw-r--r--drivers/mfd/intel-m10-bmc.c238
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c8
-rw-r--r--drivers/mfd/kempld-core.c7
-rw-r--r--drivers/mfd/lm3533-core.c2
-rw-r--r--drivers/mfd/max8925-core.c6
-rw-r--r--drivers/mfd/ntxec.c1
-rw-r--r--drivers/mfd/ocelot-core.c68
-rw-r--r--drivers/mfd/pcf50633-adc.c7
-rw-r--r--drivers/mfd/qcom-pm8xxx.c3
-rw-r--r--drivers/mfd/rk808.c1
-rw-r--r--drivers/mfd/simple-mfd-i2c.c2
-rw-r--r--drivers/mfd/syscon.c27
-rw-r--r--drivers/mfd/t7l66xb.c427
-rw-r--r--drivers/mfd/tc6387xb.c228
-rw-r--r--drivers/mfd/tc6393xb.c907
-rw-r--r--drivers/mfd/tmio_core.c70
-rw-r--r--drivers/mfd/twl-core.c9
-rw-r--r--drivers/mfd/twl4030-power.c6
-rw-r--r--drivers/mfd/ucb1400_core.c158
-rw-r--r--drivers/mfd/wm97xx-core.c4
-rw-r--r--drivers/misc/Kconfig21
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/cxl/context.c2
-rw-r--r--drivers/misc/eeprom/at25.c8
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c10
-rw-r--r--drivers/misc/enclosure.c2
-rw-r--r--drivers/misc/fastrpc.c13
-rw-r--r--drivers/misc/genwqe/card_utils.c6
-rw-r--r--drivers/misc/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h213
-rw-r--r--drivers/misc/isl29003.c10
-rw-r--r--drivers/misc/lkdtm/heap.c1
-rw-r--r--drivers/misc/mei/bus-fixup.c26
-rw-r--r--drivers/misc/mei/bus.c7
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c4
-rw-r--r--drivers/misc/mei/mei_dev.h5
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c4
-rw-r--r--drivers/misc/ocxl/context.c4
-rw-r--r--drivers/misc/ocxl/sysfs.c2
-rw-r--r--drivers/misc/open-dice.c18
-rw-r--r--drivers/misc/pci_endpoint_test.c4
-rw-r--r--drivers/misc/sgi-gru/grufile.c4
-rw-r--r--drivers/misc/sgi-gru/grukservices.c8
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/tifm_core.c4
-rw-r--r--drivers/misc/uacce/uacce.c52
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c2
-rw-r--r--drivers/misc/xilinx_tmr_inject.c171
-rw-r--r--drivers/misc/xilinx_tmr_manager.c220
-rw-r--r--drivers/mmc/core/bus.c4
-rw-r--r--drivers/mmc/core/sdio_bus.c21
-rw-r--r--drivers/mmc/core/sdio_cis.c12
-rw-r--r--drivers/mmc/core/sdio_uart.c13
-rw-r--r--drivers/mmc/host/Kconfig68
-rw-r--r--drivers/mmc/host/Makefile3
-rw-r--r--drivers/mmc/host/jz4740_mmc.c10
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c23
-rw-r--r--drivers/mmc/host/mmc_spi.c8
-rw-r--r--drivers/mmc/host/s3cmci.c1777
-rw-r--r--drivers/mmc/host/s3cmci.h75
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c113
-rw-r--r--drivers/mmc/host/tmio_mmc.c227
-rw-r--r--drivers/most/Kconfig2
-rw-r--r--drivers/most/most_cdev.c5
-rw-r--r--drivers/most/most_snd.c10
-rw-r--r--drivers/most/most_usb.c6
-rw-r--r--drivers/mtd/mtdpart.c10
-rw-r--r--drivers/mtd/nand/raw/Kconfig2
-rw-r--r--drivers/mtd/nand/raw/s3c2410.c60
-rw-r--r--drivers/net/Kconfig14
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/bonding/bond_debugfs.c2
-rw-r--r--drivers/net/bonding/bond_main.c10
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_platform.c4
-rw-r--r--drivers/net/can/dev/bittiming.c120
-rw-r--r--drivers/net/can/dev/calc_bittiming.c34
-rw-r--r--drivers/net/can/dev/dev.c21
-rw-r--r--drivers/net/can/dev/netlink.c49
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c225
-rw-r--r--drivers/net/can/sja1000/ems_pci.c154
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c1
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c18
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h26
-rw-r--r--drivers/net/can/usb/esd_usb.c70
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c44
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c122
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h12
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c68
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c30
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.h1
-rw-r--r--drivers/net/dsa/Kconfig7
-rw-r--r--drivers/net/dsa/lan9303-core.c169
-rw-r--r--drivers/net/dsa/microchip/Kconfig10
-rw-r--r--drivers/net/dsa/microchip/Makefile5
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c25
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h2
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h33
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c246
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h69
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c1201
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h86
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp_reg.h142
-rw-r--r--drivers/net/dsa/microchip/lan937x.h1
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c9
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h3
-rw-r--r--drivers/net/dsa/mt7530.c113
-rw-r--r--drivers/net/dsa/mt7530.h15
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c201
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h23
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c12
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c66
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h18
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c32
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c46
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/switchdev.c83
-rw-r--r--drivers/net/dsa/mv88e6xxx/switchdev.h19
-rw-r--r--drivers/net/dsa/ocelot/Kconfig32
-rw-r--r--drivers/net/dsa/ocelot/Makefile13
-rw-r--r--drivers/net/dsa/ocelot/felix.c59
-rw-r--r--drivers/net/dsa/ocelot/felix.h2
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c64
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c163
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c1
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c92
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c49
-rw-r--r--drivers/net/dsa/qca/qca8k.h5
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h16
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c137
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c24
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c6
-rw-r--r--drivers/net/ethernet/adi/adin1110.c1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h49
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c94
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c24
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c415
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c5
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c10
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c22
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c474
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c8
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c11
-rw-r--r--drivers/net/ethernet/cadence/macb.h29
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c208
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c83
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c39
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c34
-rw-r--r--drivers/net/ethernet/engleder/Makefile2
-rw-r--r--drivers/net/ethernet/engleder/tsnep.h16
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c479
-rw-r--r--drivers/net/ethernet/engleder/tsnep_tc.c21
-rw-r--r--drivers/net/ethernet/engleder/tsnep_xdp.c19
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c15
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig14
-rw-r--r--drivers/net/ethernet/freescale/enetc/Makefile7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c746
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h40
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_cbdr.c8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c232
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h137
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_mdio.c119
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c113
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c27
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c182
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c12
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c149
-rw-r--r--drivers/net/ethernet/fungible/funeth/Kconfig2
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c192
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c29
-rw-r--r--drivers/net/ethernet/intel/Kconfig3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c9
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c68
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_alloc.h22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c1038
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb.h28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c65
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c56
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h46
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c94
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h34
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c425
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c252
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h643
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_status.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c157
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.c32
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_client.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_status.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c58
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c93
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c1897
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h445
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c128
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c97
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c1916
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h69
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h328
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c377
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_idc.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c1005
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1104
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c74
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c133
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c463
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h87
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c264
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c183
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_mbx.c21
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c203
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c86
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c29
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c64
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c14
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c56
-rw-r--r--drivers/net/ethernet/intel/igc/igc_xdp.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c27
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c58
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c237
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c21
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c1
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c30
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c309
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c56
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c8
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c512
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h42
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c30
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_sgmii.c46
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c43
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h9
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c11
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c312
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c222
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c227
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c197
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c224
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c678
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c213
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c337
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c131
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c164
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c755
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c178
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h53
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h54
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c166
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/emad.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c167
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c6
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_goto.c10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c14
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h32
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c7
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c198
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c16
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c94
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c46
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c121
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c7
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h124
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h2511
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_police.c53
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_pool.c81
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c102
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.h41
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c332
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c7
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_qos.c59
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c335
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.c1
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc.h74
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c1260
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c16
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c2531
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c291
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c1356
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h120
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c4
-rw-r--r--drivers/net/ethernet/microchip/vcap/Makefile2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h499
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c1201
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h13
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h13
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c77
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c19
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c127
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_private.h15
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c1910
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h10
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.c412
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_tc.h32
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c37
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c2
-rw-r--r--drivers/net/ethernet/mscc/Kconfig1
-rw-r--r--drivers/net/ethernet/mscc/Makefile1
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c66
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c31
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mm.c215
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_stats.c332
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c190
-rw-r--r--drivers/net/ethernet/mscc/vsc7514_regs.c159
-rw-r--r--drivers/net/ethernet/netronome/Kconfig2
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile4
-rw-r--r--drivers/net/ethernet/netronome/nfp/crypto/ipsec.c89
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/dp.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/dp.c49
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c113
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c229
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/dcb.c571
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c43
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.h46
-rw-r--r--drivers/net/ethernet/ni/nixge.c141
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c6
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c76
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h25
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c117
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c233
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h42
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c33
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c109
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c14
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h20
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c18
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h6
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c191
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c54
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c24
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c554
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h50
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c37
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c105
-rw-r--r--drivers/net/ethernet/sfc/Kconfig1
-rw-r--r--drivers/net/ethernet/sfc/Makefile3
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c30
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c114
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.h7
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.c57
-rw-r--r--drivers/net/ethernet/sfc/ef100_rep.h10
-rw-r--r--drivers/net/ethernet/sfc/efx.c9
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.c731
-rw-r--r--drivers/net/ethernet/sfc/efx_devlink.h47
-rw-r--r--drivers/net/ethernet/sfc/mae.c218
-rw-r--r--drivers/net/ethernet/sfc/mae.h40
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c72
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h8
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h8
-rw-r--r--drivers/net/ethernet/sfc/siena/efx.c4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c334
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c20
-rw-r--r--drivers/net/ethernet/sunplus/spl2sw_mdio.c6
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c97
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c22
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c170
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c50
-rw-r--r--drivers/net/ethernet/wangxun/Kconfig2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c18
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.h8
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c1197
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h42
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c2004
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h32
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h409
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/Makefile2
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe.h79
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c22
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h9
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c70
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h5
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c583
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c286
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h12
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_type.h98
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile3
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe.h43
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c19
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h9
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c116
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h6
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c569
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h35
-rw-r--r--drivers/net/hamradio/baycom_epp.c8
-rw-r--r--drivers/net/hyperv/netvsc.c29
-rw-r--r--drivers/net/hyperv/netvsc_drv.c8
-rw-r--r--drivers/net/ieee802154/at86rf230.c90
-rw-r--r--drivers/net/ieee802154/cc2520.c136
-rw-r--r--drivers/net/ipa/Makefile9
-rw-r--r--drivers/net/ipa/gsi.c486
-rw-r--r--drivers/net/ipa/gsi.h7
-rw-r--r--drivers/net/ipa/gsi_reg.c151
-rw-r--r--drivers/net/ipa/gsi_reg.h504
-rw-r--r--drivers/net/ipa/ipa.h4
-rw-r--r--drivers/net/ipa/ipa_cmd.c38
-rw-r--r--drivers/net/ipa/ipa_endpoint.c585
-rw-r--r--drivers/net/ipa/ipa_endpoint.h4
-rw-r--r--drivers/net/ipa/ipa_interrupt.c142
-rw-r--r--drivers/net/ipa/ipa_interrupt.h48
-rw-r--r--drivers/net/ipa/ipa_main.c122
-rw-r--r--drivers/net/ipa/ipa_mem.c28
-rw-r--r--drivers/net/ipa/ipa_mem.h8
-rw-r--r--drivers/net/ipa/ipa_power.c19
-rw-r--r--drivers/net/ipa/ipa_power.h12
-rw-r--r--drivers/net/ipa/ipa_reg.c90
-rw-r--r--drivers/net/ipa/ipa_reg.h190
-rw-r--r--drivers/net/ipa/ipa_resource.c16
-rw-r--r--drivers/net/ipa/ipa_table.c68
-rw-r--r--drivers/net/ipa/ipa_uc.c27
-rw-r--r--drivers/net/ipa/ipa_uc.h8
-rw-r--r--drivers/net/ipa/ipa_version.h6
-rw-r--r--drivers/net/ipa/reg.h133
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.1.c291
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.5.1.c303
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.0.c308
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.11.c313
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.5.c311
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.9.c312
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c283
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c269
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c271
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c255
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c287
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.7.c271
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c271
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c2
-rw-r--r--drivers/net/macsec.c133
-rw-r--r--drivers/net/mdio/Kconfig11
-rw-r--r--drivers/net/mdio/Makefile1
-rw-r--r--drivers/net/mdio/fwnode_mdio.c8
-rw-r--r--drivers/net/mdio/mdio-aspeed.c48
-rw-r--r--drivers/net/mdio/mdio-bitbang.c77
-rw-r--r--drivers/net/mdio/mdio-cavium.c111
-rw-r--r--drivers/net/mdio/mdio-cavium.h9
-rw-r--r--drivers/net/mdio/mdio-i2c.c38
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c154
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c8
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c6
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c54
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c38
-rw-r--r--drivers/net/mdio/mdio-mux-meson-gxl.c164
-rw-r--r--drivers/net/mdio/mdio-mvusb.c6
-rw-r--r--drivers/net/mdio/mdio-octeon.c6
-rw-r--r--drivers/net/mdio/mdio-thunder.c6
-rw-r--r--drivers/net/netdevsim/bpf.c4
-rw-r--r--drivers/net/netdevsim/dev.c50
-rw-r--r--drivers/net/netdevsim/health.c20
-rw-r--r--drivers/net/netdevsim/ipsec.c14
-rw-r--r--drivers/net/netdevsim/netdev.c1
-rw-r--r--drivers/net/pcs/pcs-lynx.c20
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c6
-rw-r--r--drivers/net/pcs/pcs-xpcs.c4
-rw-r--r--drivers/net/phy/Kconfig9
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/dp83822.c6
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/mdio-open-alliance.h46
-rw-r--r--drivers/net/phy/mdio_bus.c466
-rw-r--r--drivers/net/phy/meson-gxl.c4
-rw-r--r--drivers/net/phy/micrel.c870
-rw-r--r--drivers/net/phy/microchip_t1.c70
-rw-r--r--drivers/net/phy/motorcomm.c559
-rw-r--r--drivers/net/phy/mxl-gpy.c5
-rw-r--r--drivers/net/phy/ncn26000.c171
-rw-r--r--drivers/net/phy/phy-c45.c514
-rw-r--r--drivers/net/phy/phy-core.c5
-rw-r--r--drivers/net/phy/phy.c417
-rw-r--r--drivers/net/phy/phy_device.c58
-rw-r--r--drivers/net/phy/phylink.c28
-rw-r--r--drivers/net/phy/sfp.c39
-rw-r--r--drivers/net/tap.c2
-rw-r--r--drivers/net/thunderbolt/Kconfig12
-rw-r--r--drivers/net/thunderbolt/Makefile6
-rw-r--r--drivers/net/thunderbolt/main.c (renamed from drivers/net/thunderbolt.c)48
-rw-r--r--drivers/net/thunderbolt/trace.c10
-rw-r--r--drivers/net/thunderbolt/trace.h141
-rw-r--r--drivers/net/tun.c7
-rw-r--r--drivers/net/usb/cdc_ether.c114
-rw-r--r--drivers/net/usb/kalmia.c8
-rw-r--r--drivers/net/usb/plusb.c4
-rw-r--r--drivers/net/usb/r8152.c179
-rw-r--r--drivers/net/usb/usbnet.c29
-rw-r--r--drivers/net/veth.c91
-rw-r--r--drivers/net/virtio_net.c436
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c50
-rw-r--r--drivers/net/wireless/ath/Kconfig1
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c51
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h16
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c93
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c48
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c24
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c371
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h12
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c104
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig34
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile27
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.c964
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h184
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c939
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h822
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.c357
-rw-r--r--drivers/net/wireless/ath/ath12k/dbring.h80
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.c102
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h67
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c1580
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h1816
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c2596
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.h106
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c4234
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h145
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c1211
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h41
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c2222
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h1142
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h2961
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.c850
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_rx.h704
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.c145
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h194
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h144
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c789
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.h316
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c1041
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h312
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c7038
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h76
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c616
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h46
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c1374
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h135
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.c342
-rw-r--r--drivers/net/wireless/ath/ath12k/peer.h67
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c3087
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h569
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c732
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.h95
-rw-r--r--drivers/net/wireless/ath/ath12k/rx_desc.h1441
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h152
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c6600
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h4803
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c74
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c64
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h82
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_wow.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c128
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c42
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h148
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c33
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h8
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c11
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c16
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c14
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h145
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c80
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c5
-rw-r--r--drivers/net/wireless/intersil/orinoco/hermes.c1
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c76
-rw-r--r--drivers/net/wireless/marvell/libertas/cmdresp.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c3
-rw-r--r--drivers/net/wireless/marvell/libertas/types.h21
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11h.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/Kconfig5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h23
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c26
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c132
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c124
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h67
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c46
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c35
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c194
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c193
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c116
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c110
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/init.c416
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c149
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.h24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c249
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/regs.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c8
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c3
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Kconfig3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h142
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c1899
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c24
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c13
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c45
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c28
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c450
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h46
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c52
-rw-r--r--drivers/net/wireless/realtek/rtw88/bf.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c6
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c50
-rw-r--r--drivers/net/wireless/realtek/rtw88/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c41
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h3
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c18
-rw-r--r--drivers/net/wireless/realtek/rtw88/wow.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c1813
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c130
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h295
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c43
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c146
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h54
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c99
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h19
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c17
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c19
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h25
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c26
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c353
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c1
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h2
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c26
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c2
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf.h3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c11
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c29
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c16
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c2
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/nvdimm/Kconfig19
-rw-r--r--drivers/nvdimm/btt.c16
-rw-r--r--drivers/nvdimm/bus.c4
-rw-r--r--drivers/nvdimm/dax_devs.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c2
-rw-r--r--drivers/nvdimm/nd-core.h10
-rw-r--r--drivers/nvdimm/nd.h6
-rw-r--r--drivers/nvdimm/pfn_devs.c42
-rw-r--r--drivers/nvdimm/pmem.c24
-rw-r--r--drivers/nvdimm/region_devs.c4
-rw-r--r--drivers/nvme/host/auth.c44
-rw-r--r--drivers/nvme/host/constants.c16
-rw-r--r--drivers/nvme/host/core.c128
-rw-r--r--drivers/nvme/host/fabrics.c19
-rw-r--r--drivers/nvme/host/ioctl.c9
-rw-r--r--drivers/nvme/host/nvme.h16
-rw-r--r--drivers/nvme/host/pci.c132
-rw-r--r--drivers/nvme/host/tcp.c10
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/fc.c4
-rw-r--r--drivers/nvme/target/io-cmd-file.c10
-rw-r--r--drivers/nvme/target/passthru.c5
-rw-r--r--drivers/nvme/target/tcp.c10
-rw-r--r--drivers/nvme/target/zns.c3
-rw-r--r--drivers/nvmem/Kconfig10
-rw-r--r--drivers/nvmem/Makefile1
-rw-r--r--drivers/nvmem/brcm_nvram.c3
-rw-r--r--drivers/nvmem/core.c205
-rw-r--r--drivers/nvmem/imx-ocotp.c4
-rw-r--r--drivers/nvmem/qcom-spmi-sdam.c14
-rw-r--r--drivers/nvmem/rave-sp-eeprom.c2
-rw-r--r--drivers/nvmem/stm32-bsec-optee-ta.c298
-rw-r--r--drivers/nvmem/stm32-bsec-optee-ta.h80
-rw-r--r--drivers/nvmem/stm32-romem.c84
-rw-r--r--drivers/nvmem/sunxi_sid.c23
-rw-r--r--drivers/of/Kconfig14
-rw-r--r--drivers/of/address.c62
-rw-r--r--drivers/of/base.c3
-rw-r--r--drivers/of/device.c10
-rw-r--r--drivers/of/dynamic.c31
-rw-r--r--drivers/of/fdt.c6
-rw-r--r--drivers/of/irq.c12
-rw-r--r--drivers/of/kobj.c2
-rw-r--r--drivers/of/of_reserved_mem.c13
-rw-r--r--drivers/of/overlay.c2
-rw-r--r--drivers/of/platform.c19
-rw-r--r--drivers/of/property.c94
-rw-r--r--drivers/of/unittest-data/testcases_common.dtsi1
-rw-r--r--drivers/of/unittest-data/tests-lifecycle.dtsi8
-rw-r--r--drivers/of/unittest.c150
-rw-r--r--drivers/opp/Kconfig1
-rw-r--r--drivers/opp/debugfs.c2
-rw-r--r--drivers/parisc/pdc_stable.c9
-rw-r--r--drivers/parport/Kconfig11
-rw-r--r--drivers/parport/Makefile1
-rw-r--r--drivers/parport/parport_ax88796.c418
-rw-r--r--drivers/parport/parport_pc.c125
-rw-r--r--drivers/pci/controller/Kconfig5
-rw-r--r--drivers/pci/controller/dwc/Kconfig23
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c2
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c200
-rw-r--r--drivers/pci/controller/dwc/pcie-bt1.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-ep.c12
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c25
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c195
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h21
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c1
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c15
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c9
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c1
-rw-r--r--drivers/pci/controller/pci-hyperv.c8
-rw-r--r--drivers/pci/controller/pci-loongson.c71
-rw-r--r--drivers/pci/controller/pci-tegra.c6
-rw-r--r--drivers/pci/controller/pci-versatile.c1
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c1
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c1
-rw-r--r--drivers/pci/controller/pcie-mt7621.c2
-rw-r--r--drivers/pci/controller/vmd.c97
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c38
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-vntb.c1
-rw-r--r--drivers/pci/endpoint/pci-ep-cfs.c1
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c33
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c1
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c1
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c1
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c2
-rw-r--r--drivers/pci/hotplug/shpchp_core.c1
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/p2pdma.c8
-rw-r--r--drivers/pci/pci-acpi.c45
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/pci.c76
-rw-r--r--drivers/pci/pci.h63
-rw-r--r--drivers/pci/pcie/aer.c51
-rw-r--r--drivers/pci/pcie/aspm.c165
-rw-r--r--drivers/pci/pcie/dpc.c4
-rw-r--r--drivers/pci/pcie/portdrv.c16
-rw-r--r--drivers/pci/probe.c4
-rw-r--r--drivers/pci/quirks.c23
-rw-r--r--drivers/pci/setup-bus.c236
-rw-r--r--drivers/pci/slot.c2
-rw-r--r--drivers/pci/switch/switchtec.c13
-rw-r--r--drivers/pcmcia/Kconfig12
-rw-r--r--drivers/pcmcia/Makefile5
-rw-r--r--drivers/pcmcia/ds.c4
-rw-r--r--drivers/pcmcia/pxa2xx_base.c8
-rw-r--r--drivers/pcmcia/pxa2xx_mainstone.c122
-rw-r--r--drivers/pcmcia/sa1100_generic.c5
-rw-r--r--drivers/pcmcia/sa1100_h3600.c2
-rw-r--r--drivers/pcmcia/sa1100_simpad.c115
-rw-r--r--drivers/pcmcia/sa1111_badge4.c158
-rw-r--r--drivers/pcmcia/sa1111_generic.c8
-rw-r--r--drivers/pcmcia/sa1111_lubbock.c155
-rw-r--r--drivers/perf/arm-cmn.c1
-rw-r--r--drivers/perf/arm_pmu.c19
-rw-r--r--drivers/perf/arm_spe_pmu.c160
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c3
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c16
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.c9
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pmu.h4
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c2
-rw-r--r--drivers/perf/marvell_cn10k_ddr_pmu.c10
-rw-r--r--drivers/perf/marvell_cn10k_tad_pmu.c22
-rw-r--r--drivers/perf/riscv_pmu_sbi.c8
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c59
-rw-r--r--drivers/phy/mediatek/phy-mtk-io.h4
-rw-r--r--drivers/phy/phy-can-transceiver.c4
-rw-r--r--drivers/phy/phy-core.c51
-rw-r--r--drivers/phy/qualcomm/Kconfig68
-rw-r--r--drivers/phy/qualcomm/Makefile14
-rw-r--r--drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c259
-rw-r--r--drivers/phy/qualcomm/phy-qcom-pcie2.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-combo.c750
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcie.c588
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h15
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_20.h23
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v2.h25
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h3
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h31
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h31
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h19
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h16
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_20.h18
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h82
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v6.h32
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h30
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h5
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h77
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h45
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-ufs.c688
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp-usb.c83
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h23
-rw-r--r--drivers/phy/qualcomm/phy-qcom-snps-eusb2.c441
-rw-r--r--drivers/phy/renesas/r8a779f0-ether-serdes.c65
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c7
-rw-r--r--drivers/phy/tegra/Makefile1
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c64
-rw-r--r--drivers/phy/tegra/xusb.c23
-rw-r--r--drivers/phy/tegra/xusb.h23
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c66
-rw-r--r--drivers/pinctrl/Kconfig2
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c13
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c10
-rw-r--r--drivers/pinctrl/core.c14
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.c6
-rw-r--r--drivers/pinctrl/freescale/pinctrl-mxs.h6
-rw-r--r--drivers/pinctrl/intel/pinctrl-alderlake.c18
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c10
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c31
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c31
-rw-r--r--drivers/pinctrl/intel/pinctrl-cedarfork.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-elkhartlake.c24
-rw-r--r--drivers/pinctrl/intel/pinctrl-emmitsburg.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-geminilake.c21
-rw-r--r--drivers/pinctrl/intel/pinctrl-icelake.c35
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c90
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h55
-rw-r--r--drivers/pinctrl/intel/pinctrl-jasperlake.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-lakefield.c13
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c12
-rw-r--r--drivers/pinctrl/intel/pinctrl-lynxpoint.c8
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-meteorlake.c23
-rw-r--r--drivers/pinctrl/intel/pinctrl-moorefield.c6
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c37
-rw-r--r--drivers/pinctrl/intel/pinctrl-tigerlake.c30
-rw-r--r--drivers/pinctrl/mediatek/Kconfig5
-rw-r--r--drivers/pinctrl/mediatek/Makefile1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7981.c1048
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt8195.c4
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c1
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c5
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c8
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c4
-rw-r--r--drivers/pinctrl/pinctrl-at91.c44
-rw-r--r--drivers/pinctrl/pinctrl-da850-pupd.c6
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c10
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_i2c.c5
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c1
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c6
-rw-r--r--drivers/pinctrl/pinctrl-thunderbay.c7
-rw-r--r--drivers/pinctrl/pinmux.c4
-rw-r--r--drivers/pinctrl/qcom/Kconfig50
-rw-r--r--drivers/pinctrl/qcom/Makefile5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c861
-rw-r--r--drivers/pinctrl/qcom/pinctrl-lpass-lpi.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c9
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c11
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c8
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c1274
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c1537
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c240
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c1790
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c8
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/ralink/pinctrl-mt7620.c164
-rw-r--r--drivers/pinctrl/ralink/pinctrl-mt7621.c48
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt2880.c28
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt305x.c82
-rw-r--r--drivers/pinctrl/ralink/pinctrl-rt3883.c44
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a77950.c244
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779g0.c112
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c25
-rw-r--r--drivers/pinctrl/samsung/Kconfig5
-rw-r--r--drivers/pinctrl/samsung/Makefile1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c653
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c12
-rw-r--r--drivers/pinctrl/starfive/Kconfig33
-rw-r--r--drivers/pinctrl/starfive/Makefile4
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c177
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c449
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c982
-rw-r--r--drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h70
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c1
-rw-r--r--drivers/pinctrl/sunplus/sppctl.c1
-rw-r--r--drivers/platform/chrome/Kconfig12
-rw-r--r--drivers/platform/chrome/Makefile4
-rw-r--r--drivers/platform/chrome/cros_ec.c15
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c25
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c14
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c12
-rw-r--r--drivers/platform/chrome/cros_ec_proto_test.c13
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c40
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c123
-rw-r--r--drivers/platform/chrome/cros_ec_typec.h85
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c362
-rw-r--r--drivers/platform/chrome/cros_typec_switch.c16
-rw-r--r--drivers/platform/chrome/cros_typec_vdm.c148
-rw-r--r--drivers/platform/chrome/cros_typec_vdm.h13
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c3
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c28
-rw-r--r--drivers/platform/surface/aggregator/bus.c10
-rw-r--r--drivers/platform/surface/aggregator/controller.c44
-rw-r--r--drivers/platform/surface/aggregator/ssh_msgb.h4
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c11
-rw-r--r--drivers/platform/surface/aggregator/trace.h73
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_cdev.c6
-rw-r--r--drivers/platform/surface/surface_aggregator_hub.c8
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c2
-rw-r--r--drivers/platform/surface/surface_aggregator_tabletsw.c12
-rw-r--r--drivers/platform/surface/surface_dtx.c20
-rw-r--r--drivers/platform/surface/surface_hotplug.c13
-rw-r--r--drivers/platform/surface/surface_platform_profile.c2
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/acerhdf.c79
-rw-r--r--drivers/platform/x86/amd/Kconfig1
-rw-r--r--drivers/platform/x86/amd/pmc.c40
-rw-r--r--drivers/platform/x86/amd/pmf/Kconfig1
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c9
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c14
-rw-r--r--drivers/platform/x86/amd/pmf/core.c32
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h3
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c28
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c4
-rw-r--r--drivers/platform/x86/dell/Kconfig8
-rw-r--r--drivers/platform/x86/dell/dell-smo8800.c5
-rw-r--r--drivers/platform/x86/dell/dell-wmi-ddv.c528
-rw-r--r--drivers/platform/x86/dell/dell-wmi-sysman/sysman.c2
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c2
-rw-r--r--drivers/platform/x86/intel/Kconfig13
-rw-r--r--drivers/platform/x86/intel/Makefile4
-rw-r--r--drivers/platform/x86/intel/int1092/intel_sar.c15
-rw-r--r--drivers/platform/x86/intel/int3472/Kconfig1
-rw-r--r--drivers/platform/x86/intel/int3472/Makefile2
-rw-r--r--drivers/platform/x86/intel/int3472/clk_and_regulator.c34
-rw-r--r--drivers/platform/x86/intel/int3472/common.h18
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c108
-rw-r--r--drivers/platform/x86/intel/int3472/led.c75
-rw-r--r--drivers/platform/x86/intel/oaktrail.c6
-rw-r--r--drivers/platform/x86/intel/pmc/tgl.c6
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c6
-rw-r--r--drivers/platform/x86/intel/tpmi.c415
-rw-r--r--drivers/platform/x86/intel/vsec.c39
-rw-r--r--drivers/platform/x86/intel/vsec.h6
-rw-r--r--drivers/platform/x86/mlx-platform.c1246
-rw-r--r--drivers/platform/x86/nvidia-wmi-ec-backlight.c6
-rw-r--r--drivers/platform/x86/think-lmi.c8
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c9
-rw-r--r--drivers/platform/x86/uv_sysfs.c6
-rw-r--r--drivers/platform/x86/wmi.c15
-rw-r--r--drivers/platform/x86/x86-android-tablets.c2
-rw-r--r--drivers/power/reset/Kconfig7
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/odroid-go-ultra-poweroff.c177
-rw-r--r--drivers/power/reset/syscon-reboot.c6
-rw-r--r--drivers/power/supply/Kconfig63
-rw-r--r--drivers/power/supply/Makefile6
-rw-r--r--drivers/power/supply/ab8500_fg.c22
-rw-r--r--drivers/power/supply/bq2415x_charger.c42
-rw-r--r--drivers/power/supply/bq24190_charger.c2
-rw-r--r--drivers/power/supply/bq24257_charger.c8
-rw-r--r--drivers/power/supply/bq256xx_charger.c4
-rw-r--r--drivers/power/supply/bq25890_charger.c181
-rw-r--r--drivers/power/supply/bq27xxx_battery.c8
-rw-r--r--drivers/power/supply/charger-manager.c6
-rw-r--r--drivers/power/supply/collie_battery.c4
-rw-r--r--drivers/power/supply/da9150-charger.c9
-rw-r--r--drivers/power/supply/ds2760_battery.c8
-rw-r--r--drivers/power/supply/ds2780_battery.c8
-rw-r--r--drivers/power/supply/ds2781_battery.c8
-rw-r--r--drivers/power/supply/lp8788-charger.c7
-rw-r--r--drivers/power/supply/ltc4162-l-charger.c12
-rw-r--r--drivers/power/supply/max14577_charger.c2
-rw-r--r--drivers/power/supply/max1721x_battery.c8
-rw-r--r--drivers/power/supply/max77650-charger.c8
-rw-r--r--drivers/power/supply/max77693_charger.c6
-rw-r--r--drivers/power/supply/mp2629_charger.c2
-rw-r--r--drivers/power/supply/olpc_battery.c2
-rw-r--r--drivers/power/supply/pcf50633-charger.c6
-rw-r--r--drivers/power/supply/pda_power.c520
-rw-r--r--drivers/power/supply/power_supply_core.c93
-rw-r--r--drivers/power/supply/power_supply_leds.c1
-rw-r--r--drivers/power/supply/power_supply_sysfs.c10
-rw-r--r--drivers/power/supply/rt9467-charger.c1282
-rw-r--r--drivers/power/supply/rt9471.c930
-rw-r--r--drivers/power/supply/s3c_adc_battery.c453
-rw-r--r--drivers/power/supply/surface_battery.c4
-rw-r--r--drivers/power/supply/surface_charger.c2
-rw-r--r--drivers/power/supply/test_power.c3
-rw-r--r--drivers/power/supply/tosa_battery.c512
-rw-r--r--drivers/power/supply/twl4030_charger.c6
-rw-r--r--drivers/power/supply/wm8350_power.c2
-rw-r--r--drivers/power/supply/z2_battery.c318
-rw-r--r--drivers/powercap/idle_inject.c65
-rw-r--r--drivers/powercap/intel_rapl_common.c13
-rw-r--r--drivers/powercap/powercap_sys.c14
-rw-r--r--drivers/pps/clients/pps-ldisc.c6
-rw-r--r--drivers/ptp/ptp_qoriq.c50
-rw-r--r--drivers/rapidio/rio-driver.c4
-rw-r--r--drivers/regulator/Kconfig8
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/act8945a-regulator.c6
-rw-r--r--drivers/regulator/fixed-helper.c2
-rw-r--r--drivers/regulator/max20411-regulator.c163
-rw-r--r--drivers/regulator/max597x-regulator.c52
-rw-r--r--drivers/regulator/max77802-regulator.c34
-rw-r--r--drivers/regulator/mcp16502.c1
-rw-r--r--drivers/regulator/s5m8767.c6
-rw-r--r--drivers/regulator/scmi-regulator.c16
-rw-r--r--drivers/regulator/tps65219-regulator.c24
-rw-r--r--drivers/remoteproc/remoteproc_core.c5
-rw-r--r--drivers/rpmsg/rpmsg_core.c4
-rw-r--r--drivers/rtc/Kconfig17
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-efi.c48
-rw-r--r--drivers/rtc/rtc-sunplus.c4
-rw-r--r--drivers/rtc/rtc-v3020.c369
-rw-r--r--drivers/s390/block/dasd.c5
-rw-r--r--drivers/s390/block/dasd_3990_erp.c10
-rw-r--r--drivers/s390/block/dasd_alias.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c104
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_fba.c14
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/char/Kconfig11
-rw-r--r--drivers/s390/char/Makefile4
-rw-r--r--drivers/s390/char/con3215.c4
-rw-r--r--drivers/s390/char/con3270.c2331
-rw-r--r--drivers/s390/char/diag_ftp.c4
-rw-r--r--drivers/s390/char/fs3270.c124
-rw-r--r--drivers/s390/char/raw3270.c376
-rw-r--r--drivers/s390/char/raw3270.h227
-rw-r--r--drivers/s390/char/sclp_early.c2
-rw-r--r--drivers/s390/char/sclp_ftp.c6
-rw-r--r--drivers/s390/char/tty3270.c1963
-rw-r--r--drivers/s390/char/tty3270.h15
-rw-r--r--drivers/s390/cio/css.c25
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c17
-rw-r--r--drivers/s390/cio/scm.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c365
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.h3
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c116
-rw-r--r--drivers/s390/crypto/zcrypt_api.c6
-rw-r--r--drivers/s390/net/ctcm_fsms.c32
-rw-r--r--drivers/s390/net/ctcm_main.c16
-rw-r--r--drivers/s390/net/ctcm_mpc.c15
-rw-r--r--drivers/s390/net/ism.h19
-rw-r--r--drivers/s390/net/ism_drv.c376
-rw-r--r--drivers/s390/net/qeth_core_main.c14
-rw-r--r--drivers/s390/net/qeth_core_sys.c66
-rw-r--r--drivers/s390/net/qeth_ethtool.c6
-rw-r--r--drivers/s390/net/qeth_l2_main.c53
-rw-r--r--drivers/s390/net/qeth_l2_sys.c28
-rw-r--r--drivers/s390/net/qeth_l3_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_sys.c83
-rw-r--r--drivers/sbus/char/oradax.c2
-rw-r--r--drivers/scsi/3w-sas.c12
-rw-r--r--drivers/scsi/3w-sas.h4
-rw-r--r--drivers/scsi/aacraid/aachba.c5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c3
-rw-r--r--drivers/scsi/ch.c30
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c2
-rw-r--r--drivers/scsi/cxlflash/superpipe.c34
-rw-r--r--drivers/scsi/cxlflash/vlun.c32
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c26
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c13
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c22
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c12
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c2
-rw-r--r--drivers/scsi/ipr.c20
-rw-r--r--drivers/scsi/ips.c11
-rw-r--r--drivers/scsi/iscsi_tcp.c3
-rw-r--r--drivers/scsi/libsas/sas_ata.c99
-rw-r--r--drivers/scsi/libsas/sas_discover.c6
-rw-r--r--drivers/scsi/libsas/sas_expander.c125
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c63
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c95
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c83
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_vmid.c41
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c16
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h5
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mvumi.c4
-rw-r--r--drivers/scsi/mvumi.h6
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h51
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_edif.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_edif_bsg.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c407
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h110
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c107
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c302
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c38
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c61
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c106
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c13
-rw-r--r--drivers/scsi/scsi.c14
-rw-r--r--drivers/scsi/scsi_ioctl.c7
-rw-r--r--drivers/scsi/scsi_lib.c78
-rw-r--r--drivers/scsi/scsi_scan.c33
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/scsi_transport_spi.c31
-rw-r--r--drivers/scsi/sd.c119
-rw-r--r--drivers/scsi/sd_zbc.c8
-rw-r--r--drivers/scsi/ses.c14
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c3
-rw-r--r--drivers/scsi/snic/snic_debugfs.c4
-rw-r--r--drivers/scsi/sr.c11
-rw-r--r--drivers/scsi/sr_ioctl.c17
-rw-r--r--drivers/scsi/storvsc_drv.c4
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/sh/maple/maple.c7
-rw-r--r--drivers/slimbus/core.c4
-rw-r--r--drivers/soc/atmel/soc.c9
-rw-r--r--drivers/soc/atmel/soc.h3
-rw-r--r--drivers/soc/imx/gpcv2.c2
-rw-r--r--drivers/soc/ixp4xx/ixp4xx-npe.c6
-rw-r--r--drivers/soc/qcom/apr.c4
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c2
-rw-r--r--drivers/soc/qcom/qmi_interface.c3
-rw-r--r--drivers/soc/renesas/rcar-sysc.c2
-rw-r--r--drivers/soc/samsung/Kconfig26
-rw-r--r--drivers/soc/samsung/Makefile1
-rw-r--r--drivers/soc/samsung/s3c-pm-debug.c79
-rw-r--r--drivers/soc/ti/smartreflex.c4
-rw-r--r--drivers/soundwire/bus.c56
-rw-r--r--drivers/soundwire/bus.h3
-rw-r--r--drivers/soundwire/bus_type.c13
-rw-r--r--drivers/soundwire/cadence_master.c80
-rw-r--r--drivers/soundwire/cadence_master.h22
-rw-r--r--drivers/soundwire/debugfs.c13
-rw-r--r--drivers/soundwire/intel.c27
-rw-r--r--drivers/soundwire/intel_auxdevice.c1
-rw-r--r--drivers/soundwire/stream.c42
-rw-r--r--drivers/spi/Kconfig62
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/atmel-quadspi.c8
-rw-r--r--drivers/spi/spi-altera-core.c30
-rw-r--r--drivers/spi/spi-altera-dfl.c36
-rw-r--r--drivers/spi/spi-altera-platform.c36
-rw-r--r--drivers/spi/spi-ar934x.c10
-rw-r--r--drivers/spi/spi-armada-3700.c98
-rw-r--r--drivers/spi/spi-at91-usart.c40
-rw-r--r--drivers/spi/spi-ath79.c40
-rw-r--r--drivers/spi/spi-atmel.c254
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c493
-rw-r--r--drivers/spi/spi-bcmbca-hsspi.c654
-rw-r--r--drivers/spi/spi-cadence-quadspi.c42
-rw-r--r--drivers/spi/spi-dw-core.c2
-rw-r--r--drivers/spi/spi-geni-qcom.c213
-rw-r--r--drivers/spi/spi-intel-pci.c13
-rw-r--r--drivers/spi/spi-intel.c10
-rw-r--r--drivers/spi/spi-loopback-test.c8
-rw-r--r--drivers/spi/spi-mem.c2
-rw-r--r--drivers/spi/spi-mtk-snfi.c41
-rw-r--r--drivers/spi/spi-omap-100k.c490
-rw-r--r--drivers/spi/spi-omap-uwire.c16
-rw-r--r--drivers/spi/spi-pl022.c1
-rw-r--r--drivers/spi/spi-s3c24xx-regs.h41
-rw-r--r--drivers/spi/spi-s3c24xx.c596
-rw-r--r--drivers/spi/spi-synquacer.c7
-rw-r--r--drivers/spi/spi-xilinx.c9
-rw-r--r--drivers/spi/spi.c94
-rw-r--r--drivers/spi/spidev.c33
-rw-r--r--drivers/spmi/spmi.c2
-rw-r--r--drivers/ssb/main.c4
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c7
-rw-r--r--drivers/staging/greybus/audio_codec.c6
-rw-r--r--drivers/staging/greybus/gbphy.c14
-rw-r--r--drivers/staging/greybus/gpio.c6
-rw-r--r--drivers/staging/greybus/hid.c2
-rw-r--r--drivers/staging/greybus/uart.c4
-rw-r--r--drivers/staging/greybus/usb.c2
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c3
-rw-r--r--drivers/staging/media/Kconfig1
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm_bo.c2
-rw-r--r--drivers/staging/media/deprecated/meye/meye.c4
-rw-r--r--drivers/staging/media/deprecated/stkwebcam/stk-webcam.c2
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/Kconfig58
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/Makefile4
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/TODO7
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/ccdc_hw_device.h80
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.c934
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h308
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc_regs.h297
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.c879
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h171
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc_regs.h140
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/isif.c1127
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/isif.h518
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/isif_regs.h256
-rw-r--r--drivers/staging/media/deprecated/vpfe_capture/vpfe_capture.c1902
-rw-r--r--drivers/staging/pi433/TODO3
-rw-r--r--drivers/staging/pi433/pi433_if.c11
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c67
-rw-r--r--drivers/staging/r8188eu/core/rtw_fw.c10
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c5
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c31
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c23
-rw-r--r--drivers/staging/r8188eu/core/rtw_recv.c30
-rw-r--r--drivers/staging/r8188eu/core/rtw_sta_mgt.c26
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c478
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c4
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c35
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rf6052.c7
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_xmit.c42
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c31
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c80
-rw-r--r--drivers/staging/r8188eu/include/drv_types.h4
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h2
-rw-r--r--drivers/staging/r8188eu/include/osdep_intf.h32
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_cmd.h2
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h21
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_xmit.h16
-rw-r--r--drivers/staging/r8188eu/include/rtw_cmd.h3
-rw-r--r--drivers/staging/r8188eu/include/rtw_io.h257
-rw-r--r--drivers/staging/r8188eu/include/rtw_pwrctrl.h3
-rw-r--r--drivers/staging/r8188eu/include/rtw_xmit.h57
-rw-r--r--drivers/staging/r8188eu/include/usb_ops.h2
-rw-r--r--drivers/staging/r8188eu/include/usb_ops_linux.h29
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c6
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c17
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c33
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_ops_linux.c106
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c22
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c355
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h219
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c374
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c86
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h208
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c342
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h45
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c14
-rw-r--r--drivers/staging/rts5208/ms.c2
-rw-r--r--drivers/staging/vc04_services/Makefile2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/Makefile2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c12
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.h3
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/Makefile5
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c10
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c6
-rw-r--r--drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h63
-rw-r--r--drivers/staging/vc04_services/interface/TODO5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c136
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c226
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h38
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c36
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h11
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/Makefile5
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c15
-rw-r--r--drivers/staging/vme_user/vme.h26
-rw-r--r--drivers/staging/vme_user/vme_bridge.h36
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h171
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c2
-rw-r--r--drivers/target/target_core_file.c18
-rw-r--r--drivers/target/target_core_pscsi.c12
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/tee/tee_core.c2
-rw-r--r--drivers/tee/tee_shm.c37
-rw-r--r--drivers/thermal/Kconfig18
-rw-r--r--drivers/thermal/Makefile7
-rw-r--r--drivers/thermal/amlogic_thermal.c1
-rw-r--r--drivers/thermal/armada_thermal.c32
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c11
-rw-r--r--drivers/thermal/broadcom/brcmstb_thermal.c4
-rw-r--r--drivers/thermal/da9062-thermal.c52
-rw-r--r--drivers/thermal/dove_thermal.c7
-rw-r--r--drivers/thermal/gov_bang_bang.c37
-rw-r--r--drivers/thermal/gov_fair_share.c18
-rw-r--r--drivers/thermal/gov_power_allocator.c51
-rw-r--r--drivers/thermal/gov_step_wise.c22
-rw-r--r--drivers/thermal/hisi_thermal.c18
-rw-r--r--drivers/thermal/imx8mm_thermal.c1
-rw-r--r--drivers/thermal/imx_sc_thermal.c9
-rw-r--r--drivers/thermal/imx_thermal.c72
-rw-r--r--drivers/thermal/intel/Kconfig11
-rw-r--r--drivers/thermal/intel/Makefile1
-rw-r--r--drivers/thermal/intel/int340x_thermal/Kconfig2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c58
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c352
-rw-r--r--drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h17
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c129
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c53
-rw-r--r--drivers/thermal/intel/intel_hfi.c3
-rw-r--r--drivers/thermal/intel/intel_menlow.c12
-rw-r--r--drivers/thermal/intel/intel_pch_thermal.c410
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c545
-rw-r--r--drivers/thermal/intel/intel_quark_dts_thermal.c55
-rw-r--r--drivers/thermal/intel/intel_soc_dts_iosf.c35
-rw-r--r--drivers/thermal/intel/intel_tcc.c139
-rw-r--r--drivers/thermal/intel/intel_tcc_cooling.c37
-rw-r--r--drivers/thermal/intel/x86_pkg_temp_thermal.c176
-rw-r--r--drivers/thermal/kirkwood_thermal.c4
-rw-r--r--drivers/thermal/mediatek/Kconfig37
-rw-r--r--drivers/thermal/mediatek/Makefile2
-rw-r--r--drivers/thermal/mediatek/auxadc_thermal.c (renamed from drivers/thermal/mtk_thermal.c)151
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c1224
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c3
-rw-r--r--drivers/thermal/qcom/qcom-spmi-temp-alarm.c45
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c655
-rw-r--r--drivers/thermal/qcom/tsens-v1.c340
-rw-r--r--drivers/thermal/qcom/tsens.c219
-rw-r--r--drivers/thermal/qcom/tsens.h46
-rw-r--r--drivers/thermal/qoriq_thermal.c1
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c43
-rw-r--r--drivers/thermal/rcar_thermal.c53
-rw-r--r--drivers/thermal/rockchip_thermal.c10
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c65
-rw-r--r--drivers/thermal/spear_thermal.c4
-rw-r--r--drivers/thermal/st/Kconfig4
-rw-r--r--drivers/thermal/st/Makefile1
-rw-r--r--drivers/thermal/st/st_thermal.c47
-rw-r--r--drivers/thermal/st/st_thermal_syscfg.c174
-rw-r--r--drivers/thermal/st/stm_thermal.c1
-rw-r--r--drivers/thermal/sun8i_thermal.c4
-rw-r--r--drivers/thermal/tegra/soctherm.c35
-rw-r--r--drivers/thermal/tegra/tegra30-tsensor.c18
-rw-r--r--drivers/thermal/thermal_acpi.c117
-rw-r--r--drivers/thermal/thermal_core.c145
-rw-r--r--drivers/thermal/thermal_core.h28
-rw-r--r--drivers/thermal/thermal_helpers.c70
-rw-r--r--drivers/thermal/thermal_mmio.c4
-rw-r--r--drivers/thermal/thermal_netlink.c24
-rw-r--r--drivers/thermal/thermal_netlink.h3
-rw-r--r--drivers/thermal/thermal_of.c116
-rw-r--r--drivers/thermal/thermal_sysfs.c135
-rw-r--r--drivers/thermal/thermal_trip.c182
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal.h15
-rw-r--r--drivers/thermal/uniphier_thermal.c31
-rw-r--r--drivers/thunderbolt/acpi.c13
-rw-r--r--drivers/thunderbolt/ctl.c52
-rw-r--r--drivers/thunderbolt/ctl.h2
-rw-r--r--drivers/thunderbolt/debugfs.c5
-rw-r--r--drivers/thunderbolt/switch.c46
-rw-r--r--drivers/thunderbolt/tb.c508
-rw-r--r--drivers/thunderbolt/tb.h41
-rw-r--r--drivers/thunderbolt/tb_msgs.h11
-rw-r--r--drivers/thunderbolt/tb_regs.h36
-rw-r--r--drivers/thunderbolt/tunnel.c506
-rw-r--r--drivers/thunderbolt/tunnel.h18
-rw-r--r--drivers/thunderbolt/usb4.c572
-rw-r--r--drivers/thunderbolt/xdomain.c6
-rw-r--r--drivers/tty/amiserial.c12
-rw-r--r--drivers/tty/hvc/hvc_console.c4
-rw-r--r--drivers/tty/hvc/hvc_console.h2
-rw-r--r--drivers/tty/hvc/hvc_iucv.c6
-rw-r--r--drivers/tty/hvc/hvcs.c91
-rw-r--r--drivers/tty/moxa.c82
-rw-r--r--drivers/tty/mxser.c11
-rw-r--r--drivers/tty/n_gsm.c160
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serial/8250/8250_dfl.c167
-rw-r--r--drivers/tty/serial/8250/8250_dma.c21
-rw-r--r--drivers/tty/serial/8250/8250_early.c4
-rw-r--r--drivers/tty/serial/8250/8250_pci.c25
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c494
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.c40
-rw-r--r--drivers/tty/serial/8250/8250_pcilib.h15
-rw-r--r--drivers/tty/serial/8250/8250_port.c57
-rw-r--r--drivers/tty/serial/8250/Kconfig27
-rw-r--r--drivers/tty/serial/8250/Makefile3
-rw-r--r--drivers/tty/serial/Kconfig24
-rw-r--r--drivers/tty/serial/Makefile2
-rw-r--r--drivers/tty/serial/arc_uart.c7
-rw-r--r--drivers/tty/serial/earlycon-semihost.c (renamed from drivers/tty/serial/earlycon-arm-semihost.c)25
-rw-r--r--drivers/tty/serial/earlycon.c9
-rw-r--r--drivers/tty/serial/fsl_lpuart.c119
-rw-r--r--drivers/tty/serial/imx.c308
-rw-r--r--drivers/tty/serial/liteuart.c241
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max310x.c3
-rw-r--r--drivers/tty/serial/msm_serial.c1
-rw-r--r--drivers/tty/serial/pch_uart.c2
-rw-r--r--drivers/tty/serial/pic32_uart.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c638
-rw-r--r--drivers/tty/serial/samsung_tty.c199
-rw-r--r--drivers/tty/serial/sc16is7xx.c51
-rw-r--r--drivers/tty/serial/sccnxp.c12
-rw-r--r--drivers/tty/serial/serial-tegra.c7
-rw-r--r--drivers/tty/serial/serial_core.c77
-rw-r--r--drivers/tty/serial/stm32-usart.c39
-rw-r--r--drivers/tty/serial/sunhv.c8
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/synclink_gt.c21
-rw-r--r--drivers/tty/tty_io.c8
-rw-r--r--drivers/tty/tty_ioctl.c8
-rw-r--r--drivers/tty/tty_port.c22
-rw-r--r--drivers/tty/vt/vc_screen.c9
-rw-r--r--drivers/tty/vt/vt.c310
-rw-r--r--drivers/ufs/core/Makefile2
-rw-r--r--drivers/ufs/core/ufs-mcq.c431
-rw-r--r--drivers/ufs/core/ufs_bsg.c144
-rw-r--r--drivers/ufs/core/ufshcd-priv.h109
-rw-r--r--drivers/ufs/core/ufshcd.c863
-rw-r--r--drivers/ufs/core/ufshpb.c4
-rw-r--r--drivers/ufs/host/Kconfig19
-rw-r--r--drivers/ufs/host/Makefile1
-rw-r--r--drivers/ufs/host/ufs-exynos.c10
-rw-r--r--drivers/ufs/host/ufs-qcom.c548
-rw-r--r--drivers/ufs/host/ufs-qcom.h97
-rw-r--r--drivers/ufs/host/ufs-sprd.c458
-rw-r--r--drivers/ufs/host/ufs-sprd.h85
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_hv_generic.c5
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c2
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.h4
-rw-r--r--drivers/usb/cdns3/cdnsp-ring.c110
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c10
-rw-r--r--drivers/usb/chipidea/debug.c2
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c6
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/common/ulpi.c18
-rw-r--r--drivers/usb/core/devio.c3
-rw-r--r--drivers/usb/core/driver.c6
-rw-r--r--drivers/usb/core/hub.c5
-rw-r--r--drivers/usb/core/message.c8
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/sysfs.c5
-rw-r--r--drivers/usb/core/usb.c10
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/debug.h3
-rw-r--r--drivers/usb/dwc3/debugfs.c19
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c2
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c1
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/early/xhci-dbc.c8
-rw-r--r--drivers/usb/fotg210/Kconfig2
-rw-r--r--drivers/usb/fotg210/fotg210-core.c83
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.c69
-rw-r--r--drivers/usb/fotg210/fotg210-hcd.h1
-rw-r--r--drivers/usb/fotg210/fotg210-udc.c162
-rw-r--r--drivers/usb/fotg210/fotg210-udc.h4
-rw-r--r--drivers/usb/fotg210/fotg210.h27
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/composite.c102
-rw-r--r--drivers/usb/gadget/configfs.c500
-rw-r--r--drivers/usb/gadget/function/f_fs.c12
-rw-r--r--drivers/usb/gadget/function/f_uac2.c1
-rw-r--r--drivers/usb/gadget/function/f_uvc.c150
-rw-r--r--drivers/usb/gadget/function/u_ether.c42
-rw-r--r--drivers/usb/gadget/function/u_serial.c23
-rw-r--r--drivers/usb/gadget/function/u_uvc.h18
-rw-r--r--drivers/usb/gadget/function/uvc.h4
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c1106
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.h52
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c16
-rw-r--r--drivers/usb/gadget/legacy/hid.c7
-rw-r--r--drivers/usb/gadget/udc/Kconfig48
-rw-r--r--drivers/usb/gadget/udc/Makefile4
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c14
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c1
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c1
-rw-r--r--drivers/usb/gadget/udc/fusb300_udc.c11
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c1
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c3
-rw-r--r--drivers/usb/gadget/udc/lpc32xx_udc.c2
-rw-r--r--drivers/usb/gadget/udc/m66592-udc.c1
-rw-r--r--drivers/usb/gadget/udc/max3420_udc.c1
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c1
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c1
-rw-r--r--drivers/usb/gadget/udc/net2272.c1
-rw-r--r--drivers/usb/gadget/udc/net2280.c1
-rw-r--r--drivers/usb/gadget/udc/omap_udc.c25
-rw-r--r--drivers/usb/gadget/udc/pch_udc.c1
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c64
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c136
-rw-r--r--drivers/usb/gadget/udc/renesas_usbf.c3406
-rw-r--r--drivers/usb/gadget/udc/rzv2m_usb3drd.c139
-rw-r--r--drivers/usb/gadget/udc/s3c-hsudc.c1319
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c1980
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.h99
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc_regs.h146
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c1
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c50
-rw-r--r--drivers/usb/host/Kconfig54
-rw-r--r--drivers/usb/host/Makefile8
-rw-r--r--drivers/usb/host/ehci-exynos.c23
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c3
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c2
-rw-r--r--drivers/usb/host/max3421-hcd.c15
-rw-r--r--drivers/usb/host/ohci-exynos.c23
-rw-r--r--drivers/usb/host/ohci-hcd.c18
-rw-r--r--drivers/usb/host/ohci-omap.c14
-rw-r--r--drivers/usb/host/ohci-pxa27x.c9
-rw-r--r--drivers/usb/host/ohci-sa1111.c5
-rw-r--r--drivers/usb/host/ohci-tmio.c364
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/uhci-hcd.c6
-rw-r--r--drivers/usb/host/xhci-debugfs.c2
-rw-r--r--drivers/usb/host/xhci-hub.c257
-rw-r--r--drivers/usb/host/xhci-mem.c338
-rw-r--r--drivers/usb/host/xhci-mvebu.c2
-rw-r--r--drivers/usb/host/xhci-plat.c144
-rw-r--r--drivers/usb/host/xhci-plat.h7
-rw-r--r--drivers/usb/host/xhci-rcar.c102
-rw-r--r--drivers/usb/host/xhci-rcar.h55
-rw-r--r--drivers/usb/host/xhci-ring.c88
-rw-r--r--drivers/usb/host/xhci-rzv2m.c38
-rw-r--r--drivers/usb/host/xhci-rzv2m.h16
-rw-r--r--drivers/usb/host/xhci-tegra.c392
-rw-r--r--drivers/usb/host/xhci.c81
-rw-r--r--drivers/usb/host/xhci.h40
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c4
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h11
-rw-r--r--drivers/usb/mon/mon_bin.c3
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c3
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h1
-rw-r--r--drivers/usb/mtu3/mtu3_qmu.c7
-rw-r--r--drivers/usb/musb/da8xx.c4
-rw-r--r--drivers/usb/musb/mediatek.c3
-rw-r--r--drivers/usb/musb/sunxi.c99
-rw-r--r--drivers/usb/phy/Kconfig17
-rw-r--r--drivers/usb/phy/Makefile1
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c1639
-rw-r--r--drivers/usb/phy/phy.c6
-rw-r--r--drivers/usb/roles/class.c3
-rw-r--r--drivers/usb/serial/console.c2
-rw-r--r--drivers/usb/serial/option.c4
-rw-r--r--drivers/usb/serial/usb-serial.c6
-rw-r--r--drivers/usb/storage/ene_ub6250.c2
-rw-r--r--drivers/usb/typec/altmodes/displayport.c20
-rw-r--r--drivers/usb/typec/bus.c37
-rw-r--r--drivers/usb/typec/bus.h2
-rw-r--r--drivers/usb/typec/class.c17
-rw-r--r--drivers/usb/typec/hd3ss3220.c29
-rw-r--r--drivers/usb/typec/mux/Kconfig6
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/gpio-sbu-mux.c172
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c13
-rw-r--r--drivers/usb/typec/pd.c9
-rw-r--r--drivers/usb/typec/retimer.h2
-rw-r--r--drivers/usb/typec/tcpm/Makefile1
-rw-r--r--drivers/usb/typec/tcpm/maxim_contaminant.c387
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c19
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h89
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c (renamed from drivers/usb/typec/tcpm/tcpci_maxim.c)53
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c60
-rw-r--r--drivers/usb/typec/tipd/core.c38
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c173
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h8
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c22
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c2
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c2
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h1
-rw-r--r--drivers/vfio/Kconfig2
-rw-r--r--drivers/vfio/container.c7
-rw-r--r--drivers/vfio/group.c7
-rw-r--r--drivers/vfio/iommufd.c19
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c2
-rw-r--r--drivers/vfio/vfio.h8
-rw-r--r--drivers/vfio/vfio_iommu_type1.c25
-rw-r--r--drivers/vfio/vfio_main.c7
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/scsi.c21
-rw-r--r--drivers/vhost/vdpa.c4
-rw-r--r--drivers/vhost/vhost.c3
-rw-r--r--drivers/vhost/vhost.h1
-rw-r--r--drivers/vhost/vringh.c5
-rw-r--r--drivers/vhost/vsock.c214
-rw-r--r--drivers/video/backlight/Kconfig22
-rw-r--r--drivers/video/backlight/Makefile3
-rw-r--r--drivers/video/backlight/aat2870_bl.c7
-rw-r--r--drivers/video/backlight/arcxcnn_bl.c5
-rw-r--r--drivers/video/backlight/backlight.c2
-rw-r--r--drivers/video/backlight/ipaq_micro_bl.c7
-rw-r--r--drivers/video/backlight/ktd253-backlight.c9
-rw-r--r--drivers/video/backlight/ktz8866.c208
-rw-r--r--drivers/video/backlight/locomolcd.c10
-rw-r--r--drivers/video/backlight/pwm_bl.c68
-rw-r--r--drivers/video/backlight/sky81452-backlight.c2
-rw-r--r--drivers/video/backlight/tosa_bl.c172
-rw-r--r--drivers/video/backlight/tosa_bl.h8
-rw-r--r--drivers/video/backlight/tosa_lcd.c284
-rw-r--r--drivers/video/console/newport_con.c9
-rw-r--r--drivers/video/console/sticon.c9
-rw-r--r--drivers/video/console/vgacon.c8
-rw-r--r--drivers/video/fbdev/68328fb.c2
-rw-r--r--drivers/video/fbdev/Kconfig72
-rw-r--r--drivers/video/fbdev/Makefile3
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c22
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c6
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c8
-rw-r--r--drivers/video/fbdev/aty/radeon_backlight.c6
-rw-r--r--drivers/video/fbdev/clps711x-fb.c10
-rw-r--r--drivers/video/fbdev/core/fb_defio.c18
-rw-r--r--drivers/video/fbdev/core/fbcon.c83
-rw-r--r--drivers/video/fbdev/core/fbmem.c37
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/core/fbsysfs.c1
-rw-r--r--drivers/video/fbdev/efifb.c35
-rw-r--r--drivers/video/fbdev/hyperv_fb.c22
-rw-r--r--drivers/video/fbdev/mx3fb.c7
-rw-r--r--drivers/video/fbdev/nvidia/nv_backlight.c8
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c81
-rw-r--r--drivers/video/fbdev/offb.c33
-rw-r--r--drivers/video/fbdev/omap/Kconfig9
-rw-r--r--drivers/video/fbdev/omap/Makefile6
-rw-r--r--drivers/video/fbdev/omap/lcd_h3.c82
-rw-r--r--drivers/video/fbdev/omap/lcd_htcherald.c59
-rw-r--r--drivers/video/fbdev/omap/lcd_inn1510.c69
-rw-r--r--drivers/video/fbdev/omap/lcd_inn1610.c99
-rw-r--r--drivers/video/fbdev/omap/lcd_palmtt.c65
-rw-r--r--drivers/video/fbdev/omap/lcd_palmz71.c59
-rw-r--r--drivers/video/fbdev/omap/lcdc.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c8
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c3
-rw-r--r--drivers/video/fbdev/riva/fbdev.c8
-rw-r--r--drivers/video/fbdev/s3c2410fb-regs-lcd.h143
-rw-r--r--drivers/video/fbdev/s3c2410fb.c1142
-rw-r--r--drivers/video/fbdev/s3c2410fb.h48
-rw-r--r--drivers/video/fbdev/sa1100fb.c1
-rw-r--r--drivers/video/fbdev/simplefb.c19
-rw-r--r--drivers/video/fbdev/tmiofb.c1040
-rw-r--r--drivers/video/fbdev/vesafb.c37
-rw-r--r--drivers/video/fbdev/vga16fb.c15
-rw-r--r--drivers/video/fbdev/w100fb.c1644
-rw-r--r--drivers/video/fbdev/w100fb.h924
-rw-r--r--drivers/virtio/virtio.c4
-rw-r--r--drivers/w1/masters/Kconfig7
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/ds1wm.c675
-rw-r--r--drivers/w1/w1.c10
-rw-r--r--drivers/watchdog/Kconfig9
-rw-r--r--drivers/watchdog/diag288_wdt.c161
-rw-r--r--drivers/watchdog/s3c2410_wdt.c84
-rw-r--r--drivers/watchdog/wdt285.c2
-rw-r--r--drivers/xen/efi.c61
-rw-r--r--drivers/xen/events/events_base.c9
-rw-r--r--drivers/xen/gntalloc.c2
-rw-r--r--drivers/xen/gntdev.c4
-rw-r--r--drivers/xen/grant-dma-iommu.c11
-rw-r--r--drivers/xen/platform-pci.c5
-rw-r--r--drivers/xen/privcmd-buf.c2
-rw-r--r--drivers/xen/privcmd.c4
-rw-r--r--drivers/xen/pvcalls-back.c10
-rw-r--r--drivers/xen/sys-hypervisor.c71
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c4
-rw-r--r--drivers/zorro/zorro-driver.c4
4791 files changed, 257162 insertions, 156122 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index 9beeee520073..7d01fa2b2b7e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -98,7 +98,6 @@ obj-$(CONFIG_DIO) += dio/
obj-$(CONFIG_SBUS) += sbus/
obj-$(CONFIG_ZORRO) += zorro/
obj-$(CONFIG_ATA_OVER_ETH) += block/aoe/
-obj-$(CONFIG_PARIDE) += block/paride/
obj-$(CONFIG_TC) += tc/
obj-$(CONFIG_USB_PHY) += usb/
obj-$(CONFIG_USB) += usb/
@@ -137,7 +136,7 @@ obj-$(CONFIG_CRYPTO) += crypto/
obj-$(CONFIG_SUPERH) += sh/
obj-y += clocksource/
obj-$(CONFIG_DCA) += dca/
-obj-$(CONFIG_HID) += hid/
+obj-$(CONFIG_HID_SUPPORT) += hid/
obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_OF) += of/
obj-$(CONFIG_SSB) += ssb/
@@ -189,3 +188,4 @@ obj-$(CONFIG_COUNTER) += counter/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_PECI) += peci/
obj-$(CONFIG_HTE) += hte/
+obj-$(CONFIG_DRM_ACCEL) += accel/
diff --git a/drivers/accel/Kconfig b/drivers/accel/Kconfig
index c9ce849b2984..c437206aa3f1 100644
--- a/drivers/accel/Kconfig
+++ b/drivers/accel/Kconfig
@@ -6,9 +6,10 @@
# as, but not limited to, Machine-Learning and Deep-Learning acceleration
# devices
#
+if DRM
+
menuconfig DRM_ACCEL
bool "Compute Acceleration Framework"
- depends on DRM
help
Framework for device drivers of compute acceleration devices, such
as, but not limited to, Machine-Learning and Deep-Learning
@@ -22,3 +23,8 @@ menuconfig DRM_ACCEL
major number than GPUs, and will be exposed to user-space using
different device files, called accel/accel* (in /dev, sysfs
and debugfs).
+
+source "drivers/accel/habanalabs/Kconfig"
+source "drivers/accel/ivpu/Kconfig"
+
+endif
diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile
new file mode 100644
index 000000000000..07aa77aed1c8
--- /dev/null
+++ b/drivers/accel/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-y += habanalabs/
+obj-y += ivpu/
diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/accel/habanalabs/Kconfig
index bd01d0d940c0..be85336107f9 100644
--- a/drivers/misc/habanalabs/Kconfig
+++ b/drivers/accel/habanalabs/Kconfig
@@ -3,8 +3,10 @@
# HabanaLabs AI accelerators driver
#
-config HABANA_AI
- tristate "HabanaAI accelerators (habanalabs)"
+config DRM_ACCEL_HABANALABS
+ tristate "HabanaLabs AI accelerators"
+ depends on DRM_ACCEL
+ depends on X86_64
depends on PCI && HAS_IOMEM
select GENERIC_ALLOCATOR
select HWMON
@@ -19,7 +21,7 @@ config HABANA_AI
the user to submit workloads to the devices.
The user-space interface is described in
- include/uapi/misc/habanalabs.h
+ include/uapi/drm/habanalabs_accel.h
If unsure, say N.
diff --git a/drivers/misc/habanalabs/Makefile b/drivers/accel/habanalabs/Makefile
index a48a9e0969ed..98510cdd5066 100644
--- a/drivers/misc/habanalabs/Makefile
+++ b/drivers/accel/habanalabs/Makefile
@@ -3,7 +3,7 @@
# Makefile for HabanaLabs AI accelerators driver
#
-obj-$(CONFIG_HABANA_AI) := habanalabs.o
+obj-$(CONFIG_DRM_ACCEL_HABANALABS) := habanalabs.o
include $(src)/common/Makefile
habanalabs-y += $(HL_COMMON_FILES)
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/accel/habanalabs/common/Makefile
index e6abffea9f87..e6abffea9f87 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/accel/habanalabs/common/Makefile
diff --git a/drivers/misc/habanalabs/common/asid.c b/drivers/accel/habanalabs/common/asid.c
index c9c2619cc43d..c9c2619cc43d 100644
--- a/drivers/misc/habanalabs/common/asid.c
+++ b/drivers/accel/habanalabs/common/asid.c
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/accel/habanalabs/common/command_buffer.c
index 2b332991ac6a..3a0535ac28b1 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/accel/habanalabs/common/command_buffer.c
@@ -5,7 +5,7 @@
* All Rights Reserved.
*/
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/mm.h>
@@ -88,6 +88,7 @@ static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb)
{
if (cb->is_pool) {
+ atomic_set(&cb->is_handle_destroyed, 0);
spin_lock(&hdev->cb_pool_lock);
list_add(&cb->pool_list, &hdev->cb_pool);
spin_unlock(&hdev->cb_pool_lock);
@@ -298,8 +299,25 @@ int hl_cb_create(struct hl_device *hdev, struct hl_mem_mgr *mmg,
int hl_cb_destroy(struct hl_mem_mgr *mmg, u64 cb_handle)
{
+ struct hl_cb *cb;
int rc;
+ cb = hl_cb_get(mmg, cb_handle);
+ if (!cb) {
+ dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n",
+ cb_handle);
+ return -EINVAL;
+ }
+
+ /* Make sure that CB handle isn't destroyed more than once */
+ rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1);
+ hl_cb_put(cb);
+ if (rc) {
+ dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n",
+ cb_handle);
+ return -EINVAL;
+ }
+
rc = hl_mmap_mem_buf_put_handle(mmg, cb_handle);
if (rc < 0)
return rc; /* Invalid handle */
@@ -350,7 +368,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
int rc;
if (!hl_device_operational(hdev, &status)) {
- dev_warn_ratelimited(hdev->dev,
+ dev_dbg_ratelimited(hdev->dev,
"Device is %s. Can't execute CB IOCTL\n",
hdev->status[status]);
return -EBUSY;
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/accel/habanalabs/common/command_submission.c
index ea0e5101c10e..8270db0a72a2 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/accel/habanalabs/common/command_submission.c
@@ -5,7 +5,7 @@
* All Rights Reserved.
*/
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/uaccess.h>
@@ -13,7 +13,8 @@
#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
HL_CS_FLAGS_COLLECTIVE_WAIT | HL_CS_FLAGS_RESERVE_SIGNALS_ONLY | \
- HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND)
+ HL_CS_FLAGS_UNRESERVE_SIGNALS_ONLY | HL_CS_FLAGS_ENGINE_CORE_COMMAND | \
+ HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
#define MAX_TS_ITER_NUM 10
@@ -397,8 +398,16 @@ static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
* flow by calling 'hl_hw_queue_update_ci'.
*/
if (cs_needs_completion(cs) &&
- (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW))
+ (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) {
+
+ /* In CS based completions, the timestamp is already available,
+ * so no need to extract it from job
+ */
+ if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB)
+ cs->completion_timestamp = job->timestamp;
+
cs_put(cs);
+ }
hl_cs_job_put(job);
}
@@ -775,7 +784,7 @@ out:
}
if (cs->timestamp) {
- cs->fence->timestamp = ktime_get();
+ cs->fence->timestamp = cs->completion_timestamp;
hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
cs->fence->timestamp, cs->fence->error);
}
@@ -1117,6 +1126,27 @@ void hl_release_pending_user_interrupts(struct hl_device *hdev)
wake_pending_user_interrupt_threads(interrupt);
}
+static void force_complete_cs(struct hl_device *hdev)
+{
+ struct hl_cs *cs;
+
+ spin_lock(&hdev->cs_mirror_lock);
+
+ list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) {
+ cs->fence->error = -EIO;
+ complete_all(&cs->fence->completion);
+ }
+
+ spin_unlock(&hdev->cs_mirror_lock);
+}
+
+void hl_abort_waitings_for_completion(struct hl_device *hdev)
+{
+ force_complete_cs(hdev);
+ force_complete_multi_cs(hdev);
+ hl_release_pending_user_interrupts(hdev);
+}
+
static void job_wq_completion(struct work_struct *work)
{
struct hl_cs_job *job = container_of(work, struct hl_cs_job,
@@ -1274,6 +1304,8 @@ static enum hl_cs_type hl_cs_get_cs_type(u32 cs_type_flags)
return CS_UNRESERVE_SIGNALS;
else if (cs_type_flags & HL_CS_FLAGS_ENGINE_CORE_COMMAND)
return CS_TYPE_ENGINE_CORE;
+ else if (cs_type_flags & HL_CS_FLAGS_FLUSH_PCI_HBW_WRITES)
+ return CS_TYPE_FLUSH_PCI_HBW_WRITES;
else
return CS_TYPE_DEFAULT;
}
@@ -1286,6 +1318,13 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
enum hl_device_status status;
enum hl_cs_type cs_type;
bool is_sync_stream;
+ int i;
+
+ for (i = 0 ; i < sizeof(args->in.pad) ; i++)
+ if (args->in.pad[i]) {
+ dev_dbg(hdev->dev, "Padding bytes must be 0\n");
+ return -EINVAL;
+ }
if (!hl_device_operational(hdev, &status)) {
return -EBUSY;
@@ -2422,6 +2461,21 @@ static int cs_ioctl_engine_cores(struct hl_fpriv *hpriv, u64 engine_cores,
return rc;
}
+static int cs_ioctl_flush_pci_hbw_writes(struct hl_fpriv *hpriv)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ if (!prop->hbw_flush_reg) {
+ dev_dbg(hdev->dev, "HBW flush is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ RREG32(prop->hbw_flush_reg);
+
+ return 0;
+}
+
int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
{
union hl_cs_args *args = data;
@@ -2478,6 +2532,9 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = cs_ioctl_engine_cores(hpriv, args->in.engine_cores,
args->in.num_engine_cores, args->in.core_command);
break;
+ case CS_TYPE_FLUSH_PCI_HBW_WRITES:
+ rc = cs_ioctl_flush_pci_hbw_writes(hpriv);
+ break;
default:
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
args->in.cs_flags,
@@ -2569,7 +2626,9 @@ report_results:
*status = CS_WAIT_STATUS_BUSY;
}
- if (error == -ETIMEDOUT || error == -EIO)
+ if (completion_rc == -ERESTARTSYS)
+ rc = completion_rc;
+ else if (error == -ETIMEDOUT || error == -EIO)
rc = error;
return rc;
@@ -2699,7 +2758,8 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_com
break;
default:
dev_err(hdev->dev, "Invalid fence status\n");
- return -EINVAL;
+ rc = -EINVAL;
+ break;
}
}
@@ -2828,6 +2888,9 @@ static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data,
if (completion_rc > 0)
mcs_data->timestamp = mcs_compl->timestamp;
+ if (completion_rc == -ERESTARTSYS)
+ return completion_rc;
+
mcs_data->wait_status = completion_rc;
return 0;
@@ -2870,7 +2933,13 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
u32 size_to_copy;
u64 *cs_seq_arr;
u8 seq_arr_len;
- int rc;
+ int rc, i;
+
+ for (i = 0 ; i < sizeof(args->in.pad) ; i++)
+ if (args->in.pad[i]) {
+ dev_dbg(hdev->dev, "Padding bytes must be 0\n");
+ return -EINVAL;
+ }
if (!hdev->supports_wait_for_multi_cs) {
dev_err(hdev->dev, "Wait for multi CS is not supported\n");
@@ -2973,15 +3042,15 @@ put_ctx:
free_seq_arr:
kfree(cs_seq_arr);
- if (rc)
- return rc;
-
- if (mcs_data.wait_status == -ERESTARTSYS) {
+ if (rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for Multi-CS\n");
- return -EINTR;
+ rc = -EINTR;
}
+ if (rc)
+ return rc;
+
/* update output args */
memset(args, 0, sizeof(*args));
@@ -3119,19 +3188,18 @@ start_over:
goto start_over;
}
} else {
+ /* Fill up the new registration node info */
+ requested_offset_record->ts_reg_info.buf = buf;
+ requested_offset_record->ts_reg_info.cq_cb = cq_cb;
+ requested_offset_record->ts_reg_info.timestamp_kernel_addr =
+ (u64 *) ts_buff->user_buff_address + ts_offset;
+ requested_offset_record->cq_kernel_addr =
+ (u64 *) cq_cb->kernel_address + cq_offset;
+ requested_offset_record->cq_target_value = target_value;
+
spin_unlock_irqrestore(wait_list_lock, flags);
}
- /* Fill up the new registration node info */
- requested_offset_record->ts_reg_info.in_use = 1;
- requested_offset_record->ts_reg_info.buf = buf;
- requested_offset_record->ts_reg_info.cq_cb = cq_cb;
- requested_offset_record->ts_reg_info.timestamp_kernel_addr =
- (u64 *) ts_buff->user_buff_address + ts_offset;
- requested_offset_record->cq_kernel_addr =
- (u64 *) cq_cb->kernel_address + cq_offset;
- requested_offset_record->cq_target_value = target_value;
-
*pend = requested_offset_record;
dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
@@ -3179,7 +3247,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
goto put_cq_cb;
}
- /* Find first available record */
+ /* get ts buffer record */
rc = ts_buff_get_kernel_ts_record(buf, cq_cb, ts_offset,
cq_counters_offset, target_value,
&interrupt->wait_list_lock, &pend);
@@ -3227,7 +3295,19 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
* Note that we cannot have sorted list by target value,
* in order to shorten the list pass loop, since
* same list could have nodes for different cq counter handle.
+ * Note:
+ * Mark ts buff offset as in use here in the spinlock protection area
+ * to avoid getting in the re-use section in ts_buff_get_kernel_ts_record
+ * before adding the node to the list. this scenario might happen when
+ * multiple threads are racing on same offset and one thread could
+ * set the ts buff in ts_buff_get_kernel_ts_record then the other thread
+ * takes over and get to ts_buff_get_kernel_ts_record and then we will try
+ * to re-use the same ts buff offset, and will try to delete a non existing
+ * node from the list.
*/
+ if (register_ts_record)
+ pend->ts_reg_info.in_use = 1;
+
list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
@@ -3489,14 +3569,15 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
+ struct hl_device *hdev = hpriv->hdev;
union hl_wait_cs_args *args = data;
u32 flags = args->in.flags;
int rc;
- /* If the device is not operational, no point in waiting for any command submission or
- * user interrupt
+ /* If the device is not operational, or if an error has happened and user should release the
+ * device, there is no point in waiting for any command submission or user interrupt.
*/
- if (!hl_device_operational(hpriv->hdev, NULL))
+ if (!hl_device_operational(hpriv->hdev, NULL) || hdev->reset_info.watchdog_active)
return -EBUSY;
if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/accel/habanalabs/common/context.c
index 9c8b1b37b510..9c8b1b37b510 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/accel/habanalabs/common/context.c
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/accel/habanalabs/common/debugfs.c
index 945c0e6758ca..945c0e6758ca 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/accel/habanalabs/common/debugfs.c
diff --git a/drivers/misc/habanalabs/common/decoder.c b/drivers/accel/habanalabs/common/decoder.c
index 2aab14d74b53..2aab14d74b53 100644
--- a/drivers/misc/habanalabs/common/decoder.c
+++ b/drivers/accel/habanalabs/common/decoder.c
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 87ab329e65d4..9933e5858a36 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -7,7 +7,7 @@
#define pr_fmt(fmt) "habanalabs: " fmt
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/pci.h>
@@ -428,8 +428,10 @@ static void hpriv_release(struct kref *ref)
*/
reset_device = hdev->reset_upon_device_release || hdev->reset_info.watchdog_active;
- /* Unless device is reset in any case, check idle status and reset if device is not idle */
- if (!reset_device && hdev->pdev && !hdev->pldm)
+ /* Check the device idle status and reset if not idle.
+ * Skip it if already in reset, or if device is going to be reset in any case.
+ */
+ if (!hdev->reset_info.in_reset && !reset_device && hdev->pdev && !hdev->pldm)
device_is_idle = hdev->asic_funcs->is_device_idle(hdev, idle_mask,
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
if (!device_is_idle) {
@@ -511,11 +513,6 @@ static int hl_device_release(struct inode *inode, struct file *filp)
return 0;
}
- /* Each pending user interrupt holds the user's context, hence we
- * must release them all before calling hl_ctx_mgr_fini().
- */
- hl_release_pending_user_interrupts(hpriv->hdev);
-
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
hl_mem_mgr_fini(&hpriv->mem_mgr);
@@ -1428,8 +1425,8 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
int hl_device_reset(struct hl_device *hdev, u32 flags)
{
bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
- reset_upon_device_release = false, schedule_hard_reset = false, delay_reset,
- from_dev_release, from_watchdog_thread;
+ reset_upon_device_release = false, schedule_hard_reset = false,
+ delay_reset, from_dev_release, from_watchdog_thread;
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
struct hl_ctx *ctx;
int i, rc;
@@ -1446,12 +1443,17 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
delay_reset = !!(flags & HL_DRV_RESET_DELAY);
from_watchdog_thread = !!(flags & HL_DRV_RESET_FROM_WD_THR);
+ if (!hard_reset && (hl_device_status(hdev) == HL_DEVICE_STATUS_MALFUNCTION)) {
+ dev_dbg(hdev->dev, "soft-reset isn't supported on a malfunctioning device\n");
+ return 0;
+ }
+
if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
hard_instead_soft = true;
hard_reset = true;
}
- if (hdev->reset_upon_device_release && (flags & HL_DRV_RESET_DEV_RELEASE)) {
+ if (hdev->reset_upon_device_release && from_dev_release) {
if (hard_reset) {
dev_crit(hdev->dev,
"Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
@@ -1512,6 +1514,7 @@ do_reset:
&hdev->device_release_watchdog_work.reset_work);
if (from_dev_release) {
+ hdev->reset_info.in_compute_reset = 0;
flags |= HL_DRV_RESET_HARD;
flags &= ~HL_DRV_RESET_DEV_RELEASE;
hard_reset = true;
@@ -1566,7 +1569,8 @@ kill_processes:
if (rc == -EBUSY) {
if (hdev->device_fini_pending) {
dev_crit(hdev->dev,
- "Failed to kill all open processes, stopping hard reset\n");
+ "%s Failed to kill all open processes, stopping hard reset\n",
+ dev_name(&(hdev)->pdev->dev));
goto out_err;
}
@@ -1576,7 +1580,8 @@ kill_processes:
if (rc) {
dev_crit(hdev->dev,
- "Failed to kill all open processes, stopping hard reset\n");
+ "%s Failed to kill all open processes, stopping hard reset\n",
+ dev_name(&(hdev)->pdev->dev));
goto out_err;
}
@@ -1627,14 +1632,16 @@ kill_processes:
* ensure driver puts the driver in a unusable state
*/
dev_crit(hdev->dev,
- "Consecutive FW fatal errors received, stopping hard reset\n");
+ "%s Consecutive FW fatal errors received, stopping hard reset\n",
+ dev_name(&(hdev)->pdev->dev));
rc = -EIO;
goto out_err;
}
if (hdev->kernel_ctx) {
dev_crit(hdev->dev,
- "kernel ctx was alive during hard reset, something is terribly wrong\n");
+ "%s kernel ctx was alive during hard reset, something is terribly wrong\n",
+ dev_name(&(hdev)->pdev->dev));
rc = -EBUSY;
goto out_err;
}
@@ -1732,7 +1739,7 @@ kill_processes:
rc = hdev->asic_funcs->scrub_device_mem(hdev);
if (rc) {
dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);
- return rc;
+ goto out_err;
}
spin_lock(&hdev->reset_info.lock);
@@ -1752,9 +1759,13 @@ kill_processes:
hdev->reset_info.needs_reset = false;
if (hard_reset)
- dev_info(hdev->dev, "Successfully finished resetting the device\n");
+ dev_info(hdev->dev,
+ "Successfully finished resetting the %s device\n",
+ dev_name(&(hdev)->pdev->dev));
else
- dev_dbg(hdev->dev, "Successfully finished resetting the device\n");
+ dev_dbg(hdev->dev,
+ "Successfully finished resetting the %s device\n",
+ dev_name(&(hdev)->pdev->dev));
if (hard_reset) {
hdev->reset_info.hard_reset_cnt++;
@@ -1789,7 +1800,9 @@ out_err:
hdev->reset_info.in_compute_reset = 0;
if (hard_reset) {
- dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
+ dev_err(hdev->dev,
+ "%s Failed to reset! Device is NOT usable\n",
+ dev_name(&(hdev)->pdev->dev));
hdev->reset_info.hard_reset_cnt++;
} else if (reset_upon_device_release) {
spin_unlock(&hdev->reset_info.lock);
@@ -1870,6 +1883,8 @@ out:
hl_ctx_put(ctx);
+ hl_abort_waitings_for_completion(hdev);
+
return 0;
device_reset:
@@ -2186,7 +2201,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
}
dev_notice(hdev->dev,
- "Successfully added device to habanalabs driver\n");
+ "Successfully added device %s to habanalabs driver\n",
+ dev_name(&(hdev)->pdev->dev));
hdev->init_done = true;
@@ -2235,11 +2251,11 @@ out_disabled:
device_cdev_sysfs_add(hdev);
if (hdev->pdev)
dev_err(&hdev->pdev->dev,
- "Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->cdev_idx);
+ "Failed to initialize hl%d. Device %s is NOT usable !\n",
+ hdev->cdev_idx, dev_name(&(hdev)->pdev->dev));
else
- pr_err("Failed to initialize hl%d. Device is NOT usable !\n",
- hdev->cdev_idx);
+ pr_err("Failed to initialize hl%d. Device %s is NOT usable !\n",
+ hdev->cdev_idx, dev_name(&(hdev)->pdev->dev));
return rc;
}
@@ -2295,7 +2311,8 @@ void hl_device_fini(struct hl_device *hdev)
if (ktime_compare(ktime_get(), timeout) > 0) {
dev_crit(hdev->dev,
- "Failed to remove device because reset function did not finish\n");
+ "%s Failed to remove device because reset function did not finish\n",
+ dev_name(&(hdev)->pdev->dev));
return;
}
}
@@ -2363,7 +2380,7 @@ void hl_device_fini(struct hl_device *hdev)
hl_mmu_fini(hdev);
- vfree(hdev->captured_err_info.pgf_info.user_mappings);
+ vfree(hdev->captured_err_info.page_fault_info.user_mappings);
hl_eq_fini(hdev, &hdev->event_queue);
@@ -2402,7 +2419,12 @@ void hl_device_fini(struct hl_device *hdev)
*/
inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
{
- return readl(hdev->rmmio + reg);
+ u32 val = readl(hdev->rmmio + reg);
+
+ if (unlikely(trace_habanalabs_rreg32_enabled()))
+ trace_habanalabs_rreg32(hdev->dev, reg, val);
+
+ return val;
}
/*
@@ -2417,12 +2439,17 @@ inline u32 hl_rreg(struct hl_device *hdev, u32 reg)
*/
inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val)
{
+ if (unlikely(trace_habanalabs_wreg32_enabled()))
+ trace_habanalabs_wreg32(hdev->dev, reg, val);
+
writel(val, hdev->rmmio + reg);
}
void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
u8 flags)
{
+ struct razwi_info *razwi_info = &hdev->captured_err_info.razwi_info;
+
if (num_of_engines > HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR) {
dev_err(hdev->dev,
"Number of possible razwi initiators (%u) exceeded limit (%u)\n",
@@ -2431,15 +2458,17 @@ void hl_capture_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_
}
/* In case it's the first razwi since the device was opened, capture its parameters */
- if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info_recorded, 0, 1))
+ if (atomic_cmpxchg(&hdev->captured_err_info.razwi_info.razwi_detected, 0, 1))
return;
- hdev->captured_err_info.razwi.timestamp = ktime_to_ns(ktime_get());
- hdev->captured_err_info.razwi.addr = addr;
- hdev->captured_err_info.razwi.num_of_possible_engines = num_of_engines;
- memcpy(&hdev->captured_err_info.razwi.engine_id[0], &engine_id[0],
+ razwi_info->razwi.timestamp = ktime_to_ns(ktime_get());
+ razwi_info->razwi.addr = addr;
+ razwi_info->razwi.num_of_possible_engines = num_of_engines;
+ memcpy(&razwi_info->razwi.engine_id[0], &engine_id[0],
num_of_engines * sizeof(u16));
- hdev->captured_err_info.razwi.flags = flags;
+ razwi_info->razwi.flags = flags;
+
+ razwi_info->razwi_info_available = true;
}
void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_of_engines,
@@ -2453,7 +2482,7 @@ void hl_handle_razwi(struct hl_device *hdev, u64 addr, u16 *engine_id, u16 num_o
static void hl_capture_user_mappings(struct hl_device *hdev, bool is_pmmu)
{
- struct page_fault_info *pgf_info = &hdev->captured_err_info.pgf_info;
+ struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
struct hl_vm_hash_node *hnode;
struct hl_userptr *userptr;
@@ -2515,14 +2544,18 @@ finish:
void hl_capture_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu)
{
+ struct page_fault_info *pgf_info = &hdev->captured_err_info.page_fault_info;
+
/* Capture only the first page fault */
- if (atomic_cmpxchg(&hdev->captured_err_info.pgf_info_recorded, 0, 1))
+ if (atomic_cmpxchg(&pgf_info->page_fault_detected, 0, 1))
return;
- hdev->captured_err_info.pgf_info.pgf.timestamp = ktime_to_ns(ktime_get());
- hdev->captured_err_info.pgf_info.pgf.addr = addr;
- hdev->captured_err_info.pgf_info.pgf.engine_id = eng_id;
+ pgf_info->page_fault.timestamp = ktime_to_ns(ktime_get());
+ pgf_info->page_fault.addr = addr;
+ pgf_info->page_fault.engine_id = eng_id;
hl_capture_user_mappings(hdev, is_pmmu);
+
+ pgf_info->page_fault_info_available = true;
}
void hl_handle_page_fault(struct hl_device *hdev, u64 addr, u16 eng_id, bool is_pmmu,
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/accel/habanalabs/common/firmware_if.c
index 228b92278e48..da892d8fb3d6 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/accel/habanalabs/common/firmware_if.c
@@ -14,8 +14,32 @@
#include <linux/ctype.h>
#include <linux/vmalloc.h>
+#include <trace/events/habanalabs.h>
+
#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
+static char *comms_cmd_str_arr[COMMS_INVLD_LAST] = {
+ [COMMS_NOOP] = __stringify(COMMS_NOOP),
+ [COMMS_CLR_STS] = __stringify(COMMS_CLR_STS),
+ [COMMS_RST_STATE] = __stringify(COMMS_RST_STATE),
+ [COMMS_PREP_DESC] = __stringify(COMMS_PREP_DESC),
+ [COMMS_DATA_RDY] = __stringify(COMMS_DATA_RDY),
+ [COMMS_EXEC] = __stringify(COMMS_EXEC),
+ [COMMS_RST_DEV] = __stringify(COMMS_RST_DEV),
+ [COMMS_GOTO_WFE] = __stringify(COMMS_GOTO_WFE),
+ [COMMS_SKIP_BMC] = __stringify(COMMS_SKIP_BMC),
+ [COMMS_PREP_DESC_ELBI] = __stringify(COMMS_PREP_DESC_ELBI),
+};
+
+static char *comms_sts_str_arr[COMMS_STS_INVLD_LAST] = {
+ [COMMS_STS_NOOP] = __stringify(COMMS_STS_NOOP),
+ [COMMS_STS_ACK] = __stringify(COMMS_STS_ACK),
+ [COMMS_STS_OK] = __stringify(COMMS_STS_OK),
+ [COMMS_STS_ERR] = __stringify(COMMS_STS_ERR),
+ [COMMS_STS_VALID_ERR] = __stringify(COMMS_STS_VALID_ERR),
+ [COMMS_STS_TIMEOUT_ERR] = __stringify(COMMS_STS_TIMEOUT_ERR),
+};
+
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
@@ -311,7 +335,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
tmp);
else
- dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
+ dev_err(hdev->dev, "Device CPU packet timeout (status = 0x%x)\n", tmp);
hdev->device_cpu_disabled = true;
goto out;
}
@@ -1322,13 +1346,12 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
break;
default:
dev_err(hdev->dev,
- "Device boot progress - Invalid status code %d\n",
- status);
+ "Device boot progress - Invalid or unexpected status code %d\n", status);
break;
}
}
-static int hl_fw_wait_preboot_ready(struct hl_device *hdev)
+int hl_fw_wait_preboot_ready(struct hl_device *hdev)
{
struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
u32 status;
@@ -1353,8 +1376,8 @@ static int hl_fw_wait_preboot_ready(struct hl_device *hdev)
pre_fw_load->wait_for_preboot_timeout);
if (rc) {
- dev_err(hdev->dev, "CPU boot ready status timeout\n");
detect_cpu_boot_status(hdev, status);
+ dev_err(hdev->dev, "CPU boot ready timeout (status = %d)\n", status);
/* If we read all FF, then something is totally wrong, no point
* of reading specific errors
@@ -1634,6 +1657,7 @@ static void hl_fw_dynamic_send_cmd(struct hl_device *hdev,
val = FIELD_PREP(COMMS_COMMAND_CMD_MASK, cmd);
val |= FIELD_PREP(COMMS_COMMAND_SIZE_MASK, size);
+ trace_habanalabs_comms_send_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
WREG32(le32_to_cpu(dyn_regs->kmd_msg_to_cpu), val);
}
@@ -1691,6 +1715,8 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
+ trace_habanalabs_comms_wait_status(hdev->dev, comms_sts_str_arr[expected_status]);
+
/* Wait for expected status */
rc = hl_poll_timeout(
hdev,
@@ -1706,6 +1732,8 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
return -EIO;
}
+ trace_habanalabs_comms_wait_status_done(hdev->dev, comms_sts_str_arr[expected_status]);
+
/*
* skip storing FW response for NOOP to preserve the actual desired
* FW status
@@ -1778,6 +1806,8 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
{
int rc;
+ trace_habanalabs_comms_protocol_cmd(hdev->dev, comms_cmd_str_arr[cmd]);
+
/* first send clear command to clean former commands */
rc = hl_fw_dynamic_send_clear_cmd(hdev, fw_loader);
if (rc)
@@ -1884,7 +1914,7 @@ static int hl_fw_dynamic_validate_memory_bound(struct hl_device *hdev,
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
- * @fw_desc: the descriptor form FW
+ * @fw_desc: the descriptor from FW
*
* @return 0 on success, otherwise non-zero error code
*/
@@ -1901,11 +1931,11 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
int rc;
if (le32_to_cpu(fw_desc->header.magic) != HL_COMMS_DESC_MAGIC)
- dev_warn(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
+ dev_dbg(hdev->dev, "Invalid magic for dynamic FW descriptor (%x)\n",
fw_desc->header.magic);
if (fw_desc->header.version != HL_COMMS_DESC_VER)
- dev_warn(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
+ dev_dbg(hdev->dev, "Invalid version for dynamic FW descriptor (%x)\n",
fw_desc->header.version);
/*
@@ -1976,6 +2006,43 @@ static int hl_fw_dynamic_validate_response(struct hl_device *hdev,
return rc;
}
+/*
+ * hl_fw_dynamic_read_descriptor_msg - read and show the ascii msg that sent by fw
+ *
+ * @hdev: pointer to the habanalabs device structure
+ * @fw_desc: the descriptor from FW
+ */
+static void hl_fw_dynamic_read_descriptor_msg(struct hl_device *hdev,
+ struct lkd_fw_comms_desc *fw_desc)
+{
+ int i;
+ char *msg;
+
+ for (i = 0 ; i < LKD_FW_ASCII_MSG_MAX ; i++) {
+ if (!fw_desc->ascii_msg[i].valid)
+ return;
+
+ /* force NULL termination */
+ msg = fw_desc->ascii_msg[i].msg;
+ msg[LKD_FW_ASCII_MSG_MAX_LEN - 1] = '\0';
+
+ switch (fw_desc->ascii_msg[i].msg_lvl) {
+ case LKD_FW_ASCII_MSG_ERR:
+ dev_err(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
+ break;
+ case LKD_FW_ASCII_MSG_WRN:
+ dev_warn(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
+ break;
+ case LKD_FW_ASCII_MSG_INF:
+ dev_info(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
+ break;
+ default:
+ dev_dbg(hdev->dev, "fw: %s", fw_desc->ascii_msg[i].msg);
+ break;
+ }
+ }
+}
+
/**
* hl_fw_dynamic_read_and_validate_descriptor - read and validate FW descriptor
*
@@ -1988,9 +2055,10 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
struct fw_load_mgr *fw_loader)
{
struct lkd_fw_comms_desc *fw_desc;
- void __iomem *src, *temp_fw_desc;
struct pci_mem_region *region;
struct fw_response *response;
+ void *temp_fw_desc;
+ void __iomem *src;
u16 fw_data_size;
enum pci_region region_id;
int rc;
@@ -2039,6 +2107,10 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
rc = hl_fw_dynamic_validate_descriptor(hdev, fw_loader,
(struct lkd_fw_comms_desc *) temp_fw_desc);
+
+ if (!rc)
+ hl_fw_dynamic_read_descriptor_msg(hdev, temp_fw_desc);
+
vfree(temp_fw_desc);
return rc;
@@ -2354,7 +2426,7 @@ static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
hdev->fw_poll_interval_usec,
dyn_loader->wait_for_bl_timeout);
if (rc) {
- dev_err(hdev->dev, "failed to wait for boot\n");
+ dev_err(hdev->dev, "failed to wait for boot (status = %d)\n", status);
return rc;
}
@@ -2381,7 +2453,7 @@ static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
hdev->fw_poll_interval_usec,
fw_loader->cpu_timeout);
if (rc) {
- dev_err(hdev->dev, "failed to wait for Linux\n");
+ dev_err(hdev->dev, "failed to wait for Linux (status = %d)\n", status);
return rc;
}
@@ -2459,51 +2531,54 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
struct fw_load_mgr *fw_loader, u8 msg_type, void *data)
{
- struct lkd_msg_comms msg;
+ struct lkd_msg_comms *msg;
int rc;
- memset(&msg, 0, sizeof(msg));
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
/* create message to be sent */
- msg.header.type = msg_type;
- msg.header.size = cpu_to_le16(sizeof(struct comms_msg_header));
- msg.header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
+ msg->header.type = msg_type;
+ msg->header.size = cpu_to_le16(sizeof(struct comms_msg_header));
+ msg->header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
switch (msg_type) {
case HL_COMMS_RESET_CAUSE_TYPE:
- msg.reset_cause = *(__u8 *) data;
+ msg->reset_cause = *(__u8 *) data;
break;
default:
dev_err(hdev->dev,
"Send COMMS message - invalid message type %u\n",
msg_type);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
sizeof(struct lkd_msg_comms));
if (rc)
- return rc;
+ goto out;
/* copy message to space allocated by FW */
- rc = hl_fw_dynamic_copy_msg(hdev, &msg, fw_loader);
+ rc = hl_fw_dynamic_copy_msg(hdev, msg, fw_loader);
if (rc)
- return rc;
+ goto out;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_DATA_RDY,
0, true,
fw_loader->cpu_timeout);
if (rc)
- return rc;
+ goto out;
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_EXEC,
0, true,
fw_loader->cpu_timeout);
- if (rc)
- return rc;
- return 0;
+out:
+ kfree(msg);
+ return rc;
}
/**
@@ -2560,13 +2635,43 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
}
if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
+ struct lkd_fw_binning_info *binning_info;
+
rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader, 0);
if (rc)
goto protocol_err;
/* read preboot version */
- return hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
+ rc = hl_fw_dynamic_read_device_fw_version(hdev, FW_COMP_PREBOOT,
fw_loader->dynamic_loader.comm_desc.cur_fw_ver);
+
+ if (rc)
+ return rc;
+
+ /* read binning info from preboot */
+ if (hdev->support_preboot_binning) {
+ binning_info = &fw_loader->dynamic_loader.comm_desc.binning_info;
+ hdev->tpc_binning = le64_to_cpu(binning_info->tpc_mask_l);
+ hdev->dram_binning = le32_to_cpu(binning_info->dram_mask);
+ hdev->edma_binning = le32_to_cpu(binning_info->edma_mask);
+ hdev->decoder_binning = le32_to_cpu(binning_info->dec_mask);
+ hdev->rotator_binning = le32_to_cpu(binning_info->rot_mask);
+
+ rc = hdev->asic_funcs->set_dram_properties(hdev);
+ if (rc)
+ return rc;
+
+ rc = hdev->asic_funcs->set_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ dev_dbg(hdev->dev,
+ "Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x, rot:0x%x\n",
+ hdev->tpc_binning, hdev->dram_binning, hdev->edma_binning,
+ hdev->decoder_binning, hdev->rotator_binning);
+ }
+
+ return 0;
}
/* load boot fit to FW */
@@ -2687,7 +2792,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
if (rc) {
dev_dbg(hdev->dev,
- "No boot fit request received, resuming boot\n");
+ "No boot fit request received (status = %d), resuming boot\n", status);
} else {
rc = hdev->asic_funcs->load_boot_fit_to_device(hdev);
if (rc)
@@ -2710,7 +2815,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
if (rc) {
dev_err(hdev->dev,
- "Timeout waiting for boot fit load ack\n");
+ "Timeout waiting for boot fit load ack (status = %d)\n", status);
goto out;
}
@@ -2788,7 +2893,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
if (rc) {
dev_err(hdev->dev,
- "Failed to get ACK on skipping BMC, %d\n",
+ "Failed to get ACK on skipping BMC (status = %d)\n",
status);
WREG32(msg_to_cpu_reg, KMD_MSG_NA);
rc = -EIO;
@@ -2815,7 +2920,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
"Device reports FIT image is corrupted\n");
else
dev_err(hdev->dev,
- "Failed to load firmware to device, %d\n",
+ "Failed to load firmware to device (status = %d)\n",
status);
rc = -EIO;
@@ -3043,3 +3148,27 @@ int hl_fw_get_sec_attest_info(struct hl_device *hdev, struct cpucp_sec_attest_in
sizeof(struct cpucp_sec_attest_info), nonce,
HL_CPUCP_SEC_ATTEST_INFO_TINEOUT_USEC);
}
+
+int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
+ dma_addr_t buff, u32 *size)
+{
+ struct cpucp_packet pkt = {0};
+ u64 result;
+ int rc = 0;
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_GENERIC_PASSTHROUGH << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(buff);
+ pkt.data_max_size = cpu_to_le32(*size);
+ pkt.pkt_subidx = cpu_to_le32(sub_opcode);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *)&pkt, sizeof(pkt),
+ HL_CPUCP_INFO_TIMEOUT_USEC, &result);
+ if (rc)
+ dev_err(hdev->dev, "failed to send CPUCP data of generic fw pkt\n");
+ else
+ dev_dbg(hdev->dev, "generic pkt was successful, result: 0x%llx\n", result);
+
+ *size = (u32)result;
+
+ return rc;
+}
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
index e2527d976ee0..fa05e76d3d21 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/accel/habanalabs/common/habanalabs.h
@@ -11,7 +11,7 @@
#include "../include/common/cpucp_if.h"
#include "../include/common/qman_if.h"
#include "../include/hw_ip/mmu/mmu_general.h"
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include <linux/cdev.h>
#include <linux/iopoll.h>
@@ -29,6 +29,8 @@
#include <linux/coresight.h>
#include <linux/dma-buf.h>
+#include "security.h"
+
#define HL_NAME "habanalabs"
struct hl_device;
@@ -375,7 +377,8 @@ enum hl_cs_type {
CS_TYPE_COLLECTIVE_WAIT,
CS_RESERVE_SIGNALS,
CS_UNRESERVE_SIGNALS,
- CS_TYPE_ENGINE_CORE
+ CS_TYPE_ENGINE_CORE,
+ CS_TYPE_FLUSH_PCI_HBW_WRITES,
};
/*
@@ -545,6 +548,8 @@ struct hl_hints_range {
/**
* struct asic_fixed_properties - ASIC specific immutable properties.
* @hw_queues_props: H/W queues properties.
+ * @special_blocks: points to an array containing special blocks info.
+ * @skip_special_blocks_cfg: special blocks skip configs.
* @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
* available sensors.
* @uboot_ver: F/W U-boot version.
@@ -644,6 +649,10 @@ struct hl_hints_range {
* (i.e. the DRAM supports multiple page sizes), otherwise
* it will shall be equal to dram_page_size.
* @num_engine_cores: number of engine cpu cores
+ * @num_of_special_blocks: special_blocks array size.
+ * @glbl_err_cause_num: global err cause number.
+ * @hbw_flush_reg: register to read to generate HBW flush. value of 0 means HBW flush is
+ * not supported.
* @collective_first_sob: first sync object available for collective use
* @collective_first_mon: first monitor available for collective use
* @sync_stream_first_sob: first sync object available for sync stream use
@@ -692,6 +701,8 @@ struct hl_hints_range {
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
+ struct hl_special_block_info *special_blocks;
+ struct hl_skip_blocks_cfg skip_special_blocks_cfg;
struct cpucp_info cpucp_info;
char uboot_ver[VERSION_MAX_LEN];
char preboot_ver[VERSION_MAX_LEN];
@@ -764,6 +775,9 @@ struct asic_fixed_properties {
u32 xbar_edge_enabled_mask;
u32 device_mem_alloc_default_page_size;
u32 num_engine_cores;
+ u32 num_of_special_blocks;
+ u32 glbl_err_cause_num;
+ u32 hbw_flush_reg;
u16 collective_first_sob;
u16 collective_first_mon;
u16 sync_stream_first_sob;
@@ -935,6 +949,7 @@ struct hl_mmap_mem_buf {
* @size: holds the CB's size.
* @roundup_size: holds the cb size after roundup to page size.
* @cs_cnt: holds number of CS that this CB participates in.
+ * @is_handle_destroyed: atomic boolean indicating whether or not the CB handle was destroyed.
* @is_pool: true if CB was acquired from the pool, false otherwise.
* @is_internal: internally allocated
* @is_mmu_mapped: true if the CB is mapped to the device's MMU.
@@ -951,6 +966,7 @@ struct hl_cb {
u32 size;
u32 roundup_size;
atomic_t cs_cnt;
+ atomic_t is_handle_destroyed;
u8 is_pool;
u8 is_internal;
u8 is_mmu_mapped;
@@ -1077,20 +1093,25 @@ struct hl_cq {
atomic_t free_slots_cnt;
};
+enum hl_user_interrupt_type {
+ HL_USR_INTERRUPT_CQ = 0,
+ HL_USR_INTERRUPT_DECODER,
+};
+
/**
* struct hl_user_interrupt - holds user interrupt information
* @hdev: pointer to the device structure
+ * @type: user interrupt type
* @wait_list_head: head to the list of user threads pending on this interrupt
* @wait_list_lock: protects wait_list_head
* @interrupt_id: msix interrupt id
- * @is_decoder: whether this entry represents a decoder interrupt
*/
struct hl_user_interrupt {
- struct hl_device *hdev;
- struct list_head wait_list_head;
- spinlock_t wait_list_lock;
- u32 interrupt_id;
- bool is_decoder;
+ struct hl_device *hdev;
+ enum hl_user_interrupt_type type;
+ struct list_head wait_list_head;
+ spinlock_t wait_list_lock;
+ u32 interrupt_id;
};
/**
@@ -1540,8 +1561,10 @@ struct engines_data {
* @check_if_razwi_happened: check if there was a razwi due to RR violation.
* @access_dev_mem: access device memory
* @set_dram_bar_base: set the base of the DRAM BAR
- * @set_engine_cores: set a config command to enigne cores
+ * @set_engine_cores: set a config command to engine cores
* @send_device_activity: indication to FW about device availability
+ * @set_dram_properties: set DRAM related properties.
+ * @set_binning_masks: set binning/enable masks for all relevant components.
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -1679,6 +1702,8 @@ struct hl_asic_funcs {
int (*set_engine_cores)(struct hl_device *hdev, u32 *core_ids,
u32 num_cores, u32 core_command);
int (*send_device_activity)(struct hl_device *hdev, bool open);
+ int (*set_dram_properties)(struct hl_device *hdev);
+ int (*set_binning_masks)(struct hl_device *hdev);
};
@@ -1739,8 +1764,9 @@ struct hl_cs_counters_atomic {
* struct hl_dmabuf_priv - a dma-buf private object.
* @dmabuf: pointer to dma-buf object.
* @ctx: pointer to the dma-buf owner's context.
- * @phys_pg_pack: pointer to physical page pack if the dma-buf was exported for
- * memory allocation handle.
+ * @phys_pg_pack: pointer to physical page pack if the dma-buf was exported
+ * where virtual memory is supported.
+ * @memhash_hnode: pointer to the memhash node. this object holds the export count.
* @device_address: physical address of the device's memory. Relevant only
* if phys_pg_pack is NULL (dma-buf was exported from address).
* The total size can be taken from the dmabuf object.
@@ -1749,6 +1775,7 @@ struct hl_dmabuf_priv {
struct dma_buf *dmabuf;
struct hl_ctx *ctx;
struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct hl_vm_hash_node *memhash_hnode;
uint64_t device_address;
};
@@ -1923,6 +1950,7 @@ struct hl_userptr {
* @type: CS_TYPE_*.
* @jobs_cnt: counter of submitted jobs on all queues.
* @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
+ * @completion_timestamp: timestamp of the last completed cs job.
* @sob_addr_offset: sob offset from the configuration base address.
* @initial_sob_count: count of completed signals in SOB before current submission of signal or
* cs with encaps signals.
@@ -1955,6 +1983,7 @@ struct hl_cs {
struct list_head staged_cs_node;
struct list_head debugfs_list;
struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
+ ktime_t completion_timestamp;
u64 sequence;
u64 staged_sequence;
u64 timeout_jiffies;
@@ -1990,6 +2019,7 @@ struct hl_cs {
* @debugfs_list: node in debugfs list of command submission jobs.
* @refcount: reference counter for usage of the CS job.
* @queue_type: the type of the H/W queue this job is submitted to.
+ * @timestamp: timestamp upon job completion
* @id: the id of this job inside a CS.
* @hw_queue_id: the id of the H/W queue this job is submitted to.
* @user_cb_size: the actual size of the CB we got from the user.
@@ -2016,6 +2046,7 @@ struct hl_cs_job {
struct list_head debugfs_list;
struct kref refcount;
enum hl_queue_type queue_type;
+ ktime_t timestamp;
u32 id;
u32 hw_queue_id;
u32 user_cb_size;
@@ -2076,12 +2107,16 @@ struct hl_cs_parser {
* hl_userptr).
* @node: node to hang on the hash table in context object.
* @vaddr: key virtual address.
+ * @handle: memory handle for device memory allocation.
* @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
+ * @export_cnt: number of exports from within the VA block.
*/
struct hl_vm_hash_node {
struct hlist_node node;
u64 vaddr;
+ u64 handle;
void *ptr;
+ int export_cnt;
};
/**
@@ -2109,10 +2144,10 @@ struct hl_vm_hw_block_list_node {
* @pages: the physical page array.
* @npages: num physical pages in the pack.
* @total_size: total size of all the pages in this list.
+ * @exported_size: buffer exported size.
* @node: used to attach to deletion list that is used when all the allocations are cleared
* at the teardown of the context.
* @mapping_cnt: number of shared mappings.
- * @exporting_cnt: number of dma-buf exporting.
* @asid: the context related to this list.
* @page_size: size of each page in the pack.
* @flags: HL_MEM_* flags related to this list.
@@ -2126,9 +2161,9 @@ struct hl_vm_phys_pg_pack {
u64 *pages;
u64 npages;
u64 total_size;
+ u64 exported_size;
struct list_head node;
atomic_t mapping_cnt;
- u32 exporting_cnt;
u32 asid;
u32 page_size;
u32 flags;
@@ -2675,11 +2710,11 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
p->size = sz; \
})
-#define HL_USR_INTR_STRUCT_INIT(usr_intr, hdev, intr_id, decoder) \
+#define HL_USR_INTR_STRUCT_INIT(usr_intr, hdev, intr_id, intr_type) \
({ \
usr_intr.hdev = hdev; \
usr_intr.interrupt_id = intr_id; \
- usr_intr.is_decoder = decoder; \
+ usr_intr.type = intr_type; \
INIT_LIST_HEAD(&usr_intr.wait_list_head); \
spin_lock_init(&usr_intr.wait_list_lock); \
})
@@ -2961,37 +2996,53 @@ struct undefined_opcode_info {
};
/**
- * struct page_fault_info - info about page fault
- * @pgf_info: page fault information.
+ * struct page_fault_info - page fault information.
+ * @page_fault: holds information collected during a page fault.
* @user_mappings: buffer containing user mappings.
* @num_of_user_mappings: number of user mappings.
+ * @page_fault_detected: if set as 1, then a page-fault was discovered for the
+ * first time after the driver has finished booting-up.
+ * Since we're looking for the page-fault's root cause,
+ * we don't care of the others that might follow it-
+ * so once changed to 1, it will remain that way.
+ * @page_fault_info_available: indicates that a page fault info is now available.
*/
struct page_fault_info {
- struct hl_page_fault_info pgf;
+ struct hl_page_fault_info page_fault;
struct hl_user_mapping *user_mappings;
u64 num_of_user_mappings;
+ atomic_t page_fault_detected;
+ bool page_fault_info_available;
+};
+
+/**
+ * struct razwi_info - RAZWI information.
+ * @razwi: holds information collected during a RAZWI
+ * @razwi_detected: if set as 1, then a RAZWI was discovered for the
+ * first time after the driver has finished booting-up.
+ * Since we're looking for the RAZWI's root cause,
+ * we don't care of the others that might follow it-
+ * so once changed to 1, it will remain that way.
+ * @razwi_info_available: indicates that a RAZWI info is now available.
+ */
+struct razwi_info {
+ struct hl_info_razwi_event razwi;
+ atomic_t razwi_detected;
+ bool razwi_info_available;
};
/**
* struct hl_error_info - holds information collected during an error.
* @cs_timeout: CS timeout error information.
- * @razwi: razwi information.
- * @razwi_info_recorded: if set writing to razwi information is enabled.
- * otherwise - disabled, so the first (root cause) razwi will not be
- * overwritten.
- * @undef_opcode: undefined opcode information
- * @pgf_info: page fault information.
- * @pgf_info_recorded: if set writing to page fault information is enabled.
- * otherwise - disabled, so the first (root cause) page fault will not be
- * overwritten.
+ * @razwi_info: RAZWI information.
+ * @undef_opcode: undefined opcode information.
+ * @page_fault_info: page fault information.
*/
struct hl_error_info {
struct cs_timeout_info cs_timeout;
- struct hl_info_razwi_event razwi;
- atomic_t razwi_info_recorded;
+ struct razwi_info razwi_info;
struct undefined_opcode_info undef_opcode;
- struct page_fault_info pgf_info;
- atomic_t pgf_info_recorded;
+ struct page_fault_info page_fault_info;
};
/**
@@ -3157,6 +3208,8 @@ struct hl_reset_info {
* @edma_binning: contains mask of edma engines that is received from the f/w which
* indicates which edma engines are binned-out
* @device_release_watchdog_timeout_sec: device release watchdog timeout value in seconds.
+ * @rotator_binning: contains mask of rotators engines that is received from the f/w
+ * which indicates which rotator engines are binned-out(Gaudi3 and above).
* @id: device minor.
* @id_control: minor of the control device.
* @cdev_idx: char device index. Used for setting its name.
@@ -3214,6 +3267,7 @@ struct hl_reset_info {
* @heartbeat: Controls if we want to enable the heartbeat mechanism vs. the f/w, which verifies
* that the f/w is always alive. Used only for testing.
* @supports_ctx_switch: true if a ctx switch is required upon first submission.
+ * @support_preboot_binning: true if we support read binning info from preboot.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -3322,6 +3376,7 @@ struct hl_device {
u32 decoder_binning;
u32 edma_binning;
u32 device_release_watchdog_timeout_sec;
+ u32 rotator_binning;
u16 id;
u16 id_control;
u16 cdev_idx;
@@ -3355,6 +3410,7 @@ struct hl_device {
u8 supports_mmu_prefetch;
u8 reset_upon_device_release;
u8 supports_ctx_switch;
+ u8 support_preboot_binning;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -3729,6 +3785,7 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev);
void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev);
int hl_fw_init_cpu(struct hl_device *hdev);
+int hl_fw_wait_preboot_ready(struct hl_device *hdev);
int hl_fw_read_preboot_status(struct hl_device *hdev);
int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
@@ -3772,6 +3829,8 @@ int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
void hl_fw_set_pll_profile(struct hl_device *hdev);
void hl_sysfs_add_dev_clk_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp);
void hl_sysfs_add_dev_vrm_attr(struct hl_device *hdev, struct attribute_group *dev_vrm_attr_grp);
+int hl_fw_send_generic_request(struct hl_device *hdev, enum hl_passthrough_type sub_opcode,
+ dma_addr_t buff, u32 *size);
void hw_sob_get(struct hl_hw_sob *hw_sob);
void hw_sob_put(struct hl_hw_sob *hw_sob);
@@ -3786,6 +3845,7 @@ void hl_dec_fini(struct hl_device *hdev);
void hl_dec_ctx_fini(struct hl_ctx *ctx);
void hl_release_pending_user_interrupts(struct hl_device *hdev);
+void hl_abort_waitings_for_completion(struct hl_device *hdev);
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
index 7815c60df54e..03dae57dc838 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
@@ -222,9 +222,11 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_debugfs_add_file(hpriv);
atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1);
- atomic_set(&hdev->captured_err_info.razwi_info_recorded, 0);
- atomic_set(&hdev->captured_err_info.pgf_info_recorded, 0);
+ atomic_set(&hdev->captured_err_info.razwi_info.razwi_detected, 0);
+ atomic_set(&hdev->captured_err_info.page_fault_info.page_fault_detected, 0);
hdev->captured_err_info.undef_opcode.write_enable = true;
+ hdev->captured_err_info.razwi_info.razwi_info_available = false;
+ hdev->captured_err_info.page_fault_info.page_fault_info_available = false;
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
index b6abfa7761a7..5005e6fca691 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c
@@ -7,7 +7,7 @@
#define pr_fmt(fmt) "habanalabs: " fmt
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include <linux/fs.h>
@@ -607,16 +607,20 @@ static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
- struct hl_info_razwi_event *info = &hdev->captured_err_info.razwi;
- void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct razwi_info *razwi_info;
if ((!max_size) || (!out))
return -EINVAL;
- return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_info_razwi_event)))
- ? -EFAULT : 0;
+ razwi_info = &hdev->captured_err_info.razwi_info;
+ if (!razwi_info->razwi_info_available)
+ return 0;
+
+ return copy_to_user(out, &razwi_info->razwi,
+ min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0;
}
static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
@@ -786,16 +790,20 @@ static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
u32 max_size = args->return_size;
- struct hl_page_fault_info *info = &hdev->captured_err_info.pgf_info.pgf;
- void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ struct page_fault_info *pgf_info;
if ((!max_size) || (!out))
return -EINVAL;
- return copy_to_user(out, info, min_t(size_t, max_size, sizeof(struct hl_page_fault_info)))
- ? -EFAULT : 0;
+ pgf_info = &hdev->captured_err_info.page_fault_info;
+ if (!pgf_info->page_fault_info_available)
+ return 0;
+
+ return copy_to_user(out, &pgf_info->page_fault,
+ min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0;
}
static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
@@ -806,18 +814,68 @@ static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
struct page_fault_info *pgf_info;
u64 actual_size;
- pgf_info = &hdev->captured_err_info.pgf_info;
- args->array_size = pgf_info->num_of_user_mappings;
-
if (!out)
return -EINVAL;
+ pgf_info = &hdev->captured_err_info.page_fault_info;
+ if (!pgf_info->page_fault_info_available)
+ return 0;
+
+ args->array_size = pgf_info->num_of_user_mappings;
+
actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping);
if (user_buf_size < actual_size)
return -ENOMEM;
- return copy_to_user(out, pgf_info->user_mappings, min_t(size_t, user_buf_size, actual_size))
- ? -EFAULT : 0;
+ return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0;
+}
+
+static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args)
+{
+ void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer;
+ u32 size = info_args->return_size;
+ dma_addr_t dma_handle;
+ bool need_input_buff;
+ void *fw_buff;
+ int rc = 0;
+
+ switch (info_args->fw_sub_opcode) {
+ case HL_PASSTHROUGH_VERSIONS:
+ need_input_buff = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (size > SZ_1M) {
+ dev_err(hdev->dev, "buffer size cannot exceed 1MB\n");
+ return -EINVAL;
+ }
+
+ fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle);
+ if (!fw_buff)
+ return -ENOMEM;
+
+
+ if (need_input_buff && copy_from_user(fw_buff, buff, size)) {
+ dev_dbg(hdev->dev, "Failed to copy from user FW buff\n");
+ rc = -EFAULT;
+ goto free_buff;
+ }
+
+ rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size);
+ if (rc)
+ goto free_buff;
+
+ if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) {
+ dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n");
+ rc = -EFAULT;
+ }
+
+free_buff:
+ hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff);
+
+ return rc;
}
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
@@ -826,9 +884,13 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
enum hl_device_status status;
struct hl_info_args *args = data;
struct hl_device *hdev = hpriv->hdev;
-
int rc;
+ if (args->pad) {
+ dev_dbg(hdev->dev, "Padding bytes must be 0\n");
+ return -EINVAL;
+ }
+
/*
* Information is returned for the following opcodes even if the device
* is disabled or in reset.
@@ -893,7 +955,7 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
}
if (!hl_device_operational(hdev, &status)) {
- dev_warn_ratelimited(dev,
+ dev_dbg_ratelimited(dev,
"Device is %s. Can't execute INFO IOCTL\n",
hdev->status[status]);
return -EBUSY;
@@ -947,6 +1009,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_ENGINE_STATUS:
return engine_status_info(hpriv, args);
+ case HL_INFO_FW_GENERIC_REQ:
+ return send_fw_generic_request(hdev, args);
+
default:
dev_err(dev, "Invalid request %d\n", args->op);
rc = -EINVAL;
@@ -975,7 +1040,7 @@ static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
int rc = 0;
if (!hl_device_operational(hdev, &status)) {
- dev_warn_ratelimited(hdev->dev,
+ dev_dbg_ratelimited(hdev->dev,
"Device is %s. Can't execute DEBUG IOCTL\n",
hdev->status[status]);
return -EBUSY;
@@ -1072,8 +1137,6 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
retcode = -EFAULT;
goto out_err;
}
- } else if (cmd & IOC_OUT) {
- memset(kdata, 0, usize);
}
retcode = func(hpriv, kdata);
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/accel/habanalabs/common/hw_queue.c
index d0087c0ec48c..d0087c0ec48c 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/accel/habanalabs/common/hw_queue.c
diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/accel/habanalabs/common/hwmon.c
index 55eb0203817f..55eb0203817f 100644
--- a/drivers/misc/habanalabs/common/hwmon.c
+++ b/drivers/accel/habanalabs/common/hwmon.c
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/accel/habanalabs/common/irq.c
index 94d537fd4fde..04844e843a7b 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/accel/habanalabs/common/irq.c
@@ -72,15 +72,17 @@ static void irq_handle_eqe(struct work_struct *work)
* @hdev: pointer to device structure
* @cs_seq: command submission sequence
* @cq: completion queue
+ * @timestamp: interrupt timestamp
*
*/
-static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq)
+static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq, ktime_t timestamp)
{
struct hl_hw_queue *queue;
struct hl_cs_job *job;
queue = &hdev->kernel_queues[cq->hw_queue_id];
job = queue->shadow_queue[hl_pi_2_offset(cs_seq)];
+ job->timestamp = timestamp;
queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
atomic_inc(&queue->ci);
@@ -91,9 +93,10 @@ static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq)
*
* @hdev: pointer to device structure
* @cs_seq: command submission sequence
+ * @timestamp: interrupt timestamp
*
*/
-static void cs_finish(struct hl_device *hdev, u16 cs_seq)
+static void cs_finish(struct hl_device *hdev, u16 cs_seq, ktime_t timestamp)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct hl_hw_queue *queue;
@@ -113,6 +116,7 @@ static void cs_finish(struct hl_device *hdev, u16 cs_seq)
atomic_inc(&queue->ci);
}
+ cs->completion_timestamp = timestamp;
queue_work(hdev->cs_cmplt_wq, &cs->finish_work);
}
@@ -130,6 +134,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
bool shadow_index_valid, entry_ready;
u16 shadow_index;
struct hl_cq_entry *cq_entry, *cq_base;
+ ktime_t timestamp = ktime_get();
if (hdev->disabled) {
dev_dbg(hdev->dev,
@@ -171,9 +176,9 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
if (shadow_index_valid && !hdev->disabled) {
if (hdev->asic_prop.completion_mode ==
HL_COMPLETION_MODE_CS)
- cs_finish(hdev, shadow_index);
+ cs_finish(hdev, shadow_index, timestamp);
else
- job_finish(hdev, shadow_index, cq);
+ job_finish(hdev, shadow_index, cq, timestamp);
}
/* Clear CQ entry ready bit */
@@ -228,7 +233,7 @@ static void hl_ts_free_objects(struct work_struct *work)
* list to a dedicated workqueue to do the actual put.
*/
static int handle_registration_node(struct hl_device *hdev, struct hl_user_pending_interrupt *pend,
- struct list_head **free_list)
+ struct list_head **free_list, ktime_t now)
{
struct timestamp_reg_free_node *free_node;
u64 timestamp;
@@ -246,7 +251,7 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi
if (!free_node)
return -ENOMEM;
- timestamp = ktime_get_ns();
+ timestamp = ktime_to_ns(now);
*pend->ts_reg_info.timestamp_kernel_addr = timestamp;
@@ -298,7 +303,7 @@ static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interru
if (pend->ts_reg_info.buf) {
if (!reg_node_handle_fail) {
rc = handle_registration_node(hdev, pend,
- &ts_reg_free_list_head);
+ &ts_reg_free_list_head, now);
if (rc)
reg_node_handle_fail = true;
}
@@ -333,13 +338,22 @@ irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
struct hl_user_interrupt *user_int = arg;
struct hl_device *hdev = user_int->hdev;
- if (user_int->is_decoder)
- handle_user_interrupt(hdev, &hdev->common_decoder_interrupt);
- else
+ switch (user_int->type) {
+ case HL_USR_INTERRUPT_CQ:
handle_user_interrupt(hdev, &hdev->common_user_cq_interrupt);
- /* Handle user cq or decoder interrupts registered on this specific irq */
- handle_user_interrupt(hdev, user_int);
+ /* Handle user cq interrupt registered on this specific irq */
+ handle_user_interrupt(hdev, user_int);
+ break;
+ case HL_USR_INTERRUPT_DECODER:
+ handle_user_interrupt(hdev, &hdev->common_decoder_interrupt);
+
+ /* Handle decoder interrupt registered on this specific irq */
+ handle_user_interrupt(hdev, user_int);
+ break;
+ default:
+ break;
+ }
return IRQ_HANDLED;
}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/accel/habanalabs/common/memory.c
index 5e9ae7600d75..761a47e89b00 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/accel/habanalabs/common/memory.c
@@ -5,7 +5,7 @@
* All Rights Reserved.
*/
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
#include "../include/hw_ip/mmu/mmu_general.h"
@@ -19,7 +19,9 @@ MODULE_IMPORT_NS(DMA_BUF);
#define HL_MMU_DEBUG 0
/* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
-#define DRAM_POOL_PAGE_SIZE SZ_8M
+#define DRAM_POOL_PAGE_SIZE SZ_8M
+
+#define MEM_HANDLE_INVALID ULONG_MAX
static int allocate_timestamps_buffers(struct hl_fpriv *hpriv,
struct hl_mem_in *args, u64 *handle);
@@ -371,12 +373,6 @@ static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
return -EINVAL;
}
- if (phys_pg_pack->exporting_cnt) {
- spin_unlock(&vm->idr_lock);
- dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
- return -EINVAL;
- }
-
/* must remove from idr before the freeing of the physical pages as the refcount of the pool
* is also the trigger of the idr destroy
*/
@@ -1240,6 +1236,7 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
hnode->ptr = vm_type;
hnode->vaddr = ret_vaddr;
+ hnode->handle = is_userptr ? MEM_HANDLE_INVALID : handle;
mutex_lock(&ctx->mem_hash_lock);
hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
@@ -1313,6 +1310,12 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
return -EINVAL;
}
+ if (hnode->export_cnt) {
+ mutex_unlock(&ctx->mem_hash_lock);
+ dev_err(hdev->dev, "failed to unmap %#llx, memory is exported\n", vaddr);
+ return -EINVAL;
+ }
+
hash_del(&hnode->node);
mutex_unlock(&ctx->mem_hash_lock);
@@ -1545,10 +1548,10 @@ static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
}
static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
- u64 page_size, struct device *dev,
- enum dma_data_direction dir)
+ u64 page_size, u64 exported_size,
+ struct device *dev, enum dma_data_direction dir)
{
- u64 chunk_size, bar_address, dma_max_seg_size;
+ u64 chunk_size, bar_address, dma_max_seg_size, cur_size_to_export, cur_npages;
struct asic_fixed_properties *prop;
int rc, i, j, nents, cur_page;
struct scatterlist *sg;
@@ -1574,16 +1577,23 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
if (!sgt)
return ERR_PTR(-ENOMEM);
+ /* remove export size restrictions in case not explicitly defined */
+ cur_size_to_export = exported_size ? exported_size : (npages * page_size);
+
/* If the size of each page is larger than the dma max segment size,
* then we can't combine pages and the number of entries in the SGL
* will just be the
* <number of pages> * <chunks of max segment size in each page>
*/
- if (page_size > dma_max_seg_size)
- nents = npages * DIV_ROUND_UP_ULL(page_size, dma_max_seg_size);
- else
+ if (page_size > dma_max_seg_size) {
+ /* we should limit number of pages according to the exported size */
+ cur_npages = DIV_ROUND_UP_SECTOR_T(cur_size_to_export, page_size);
+ nents = cur_npages * DIV_ROUND_UP_SECTOR_T(page_size, dma_max_seg_size);
+ } else {
+ cur_npages = npages;
+
/* Get number of non-contiguous chunks */
- for (i = 1, nents = 1, chunk_size = page_size ; i < npages ; i++) {
+ for (i = 1, nents = 1, chunk_size = page_size ; i < cur_npages ; i++) {
if (pages[i - 1] + page_size != pages[i] ||
chunk_size + page_size > dma_max_seg_size) {
nents++;
@@ -1593,6 +1603,7 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
chunk_size += page_size;
}
+ }
rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
if (rc)
@@ -1615,7 +1626,8 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
else
cur_device_address += dma_max_seg_size;
- chunk_size = min(size_left, dma_max_seg_size);
+ /* make sure not to export over exported size */
+ chunk_size = min3(size_left, dma_max_seg_size, cur_size_to_export);
bar_address = hdev->dram_pci_bar_start + cur_device_address;
@@ -1623,6 +1635,8 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
if (rc)
goto error_unmap;
+ cur_size_to_export -= chunk_size;
+
if (size_left > dma_max_seg_size) {
size_left -= dma_max_seg_size;
} else {
@@ -1634,7 +1648,7 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
/* Merge pages and put them into the scatterlist */
for_each_sgtable_dma_sg(sgt, sg, i) {
chunk_size = page_size;
- for (j = cur_page + 1 ; j < npages ; j++) {
+ for (j = cur_page + 1 ; j < cur_npages ; j++) {
if (pages[j - 1] + page_size != pages[j] ||
chunk_size + page_size > dma_max_seg_size)
break;
@@ -1645,10 +1659,13 @@ static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64
bar_address = hdev->dram_pci_bar_start +
(pages[cur_page] - prop->dram_base_address);
+ /* make sure not to export over exported size */
+ chunk_size = min(chunk_size, cur_size_to_export);
rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
if (rc)
goto error_unmap;
+ cur_size_to_export -= chunk_size;
cur_page = j;
}
}
@@ -1719,6 +1736,7 @@ static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
phys_pg_pack->pages,
phys_pg_pack->npages,
phys_pg_pack->page_size,
+ phys_pg_pack->exported_size,
attachment->dev,
dir);
else
@@ -1726,6 +1744,7 @@ static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
&hl_dmabuf->device_address,
1,
hl_dmabuf->dmabuf->size,
+ 0,
attachment->dev,
dir);
@@ -1763,18 +1782,20 @@ static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
static void hl_release_dmabuf(struct dma_buf *dmabuf)
{
struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
- struct hl_ctx *ctx = hl_dmabuf->ctx;
- struct hl_device *hdev = ctx->hdev;
- struct hl_vm *vm = &hdev->vm;
+ struct hl_ctx *ctx;
- if (hl_dmabuf->phys_pg_pack) {
- spin_lock(&vm->idr_lock);
- hl_dmabuf->phys_pg_pack->exporting_cnt--;
- spin_unlock(&vm->idr_lock);
- }
+ if (!hl_dmabuf)
+ return;
- hl_ctx_put(hl_dmabuf->ctx);
+ ctx = hl_dmabuf->ctx;
+ if (hl_dmabuf->memhash_hnode) {
+ mutex_lock(&ctx->mem_hash_lock);
+ hl_dmabuf->memhash_hnode->export_cnt--;
+ mutex_unlock(&ctx->mem_hash_lock);
+ }
+
+ hl_ctx_put(ctx);
kfree(hl_dmabuf);
}
@@ -1785,7 +1806,7 @@ static const struct dma_buf_ops habanalabs_dmabuf_ops = {
.release = hl_release_dmabuf,
};
-static int export_dmabuf_common(struct hl_ctx *ctx,
+static int export_dmabuf(struct hl_ctx *ctx,
struct hl_dmabuf_priv *hl_dmabuf,
u64 total_size, int flags, int *dmabuf_fd)
{
@@ -1806,7 +1827,7 @@ static int export_dmabuf_common(struct hl_ctx *ctx,
fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
if (fd < 0) {
- dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf\n");
+ dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
rc = fd;
goto err_dma_buf_put;
}
@@ -1819,36 +1840,13 @@ static int export_dmabuf_common(struct hl_ctx *ctx,
return 0;
err_dma_buf_put:
+ hl_dmabuf->dmabuf->priv = NULL;
dma_buf_put(hl_dmabuf->dmabuf);
return rc;
}
-/**
- * export_dmabuf_from_addr() - export a dma-buf object for the given memory
- * address and size.
- * @ctx: pointer to the context structure.
- * @device_addr: device memory physical address.
- * @size: size of device memory.
- * @flags: DMA-BUF file/FD flags.
- * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
- *
- * Create and export a dma-buf object for an existing memory allocation inside
- * the device memory, and return a FD which is associated with the dma-buf
- * object.
- *
- * Return: 0 on success, non-zero for failure.
- */
-static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr,
- u64 size, int flags, int *dmabuf_fd)
+static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size)
{
- struct hl_dmabuf_priv *hl_dmabuf;
- struct hl_device *hdev = ctx->hdev;
- struct asic_fixed_properties *prop;
- u64 bar_address;
- int rc;
-
- prop = &hdev->asic_prop;
-
if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
dev_dbg(hdev->dev,
"exported device memory address 0x%llx should be aligned to 0x%lx\n",
@@ -1863,49 +1861,150 @@ static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr,
return -EINVAL;
}
+ return 0;
+}
+
+static int validate_export_params_no_mmu(struct hl_device *hdev, u64 device_addr, u64 size)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 bar_address;
+ int rc;
+
+ rc = validate_export_params_common(hdev, device_addr, size);
+ if (rc)
+ return rc;
+
if (device_addr < prop->dram_user_base_address ||
- device_addr + size > prop->dram_end_address ||
- device_addr + size < device_addr) {
+ (device_addr + size) > prop->dram_end_address ||
+ (device_addr + size) < device_addr) {
dev_dbg(hdev->dev,
"DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
device_addr, size);
return -EINVAL;
}
- bar_address = hdev->dram_pci_bar_start +
- (device_addr - prop->dram_base_address);
+ bar_address = hdev->dram_pci_bar_start + (device_addr - prop->dram_base_address);
- if (bar_address + size >
- hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
- bar_address + size < bar_address) {
+ if ((bar_address + size) > (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
+ (bar_address + size) < bar_address) {
dev_dbg(hdev->dev,
"DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n",
device_addr, size);
return -EINVAL;
}
- hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
- if (!hl_dmabuf)
- return -ENOMEM;
+ return 0;
+}
- hl_dmabuf->device_address = device_addr;
+static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 size, u64 offset,
+ struct hl_vm_phys_pg_pack *phys_pg_pack)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u64 bar_address;
+ int i, rc;
- rc = export_dmabuf_common(ctx, hl_dmabuf, size, flags, dmabuf_fd);
+ rc = validate_export_params_common(hdev, device_addr, size);
if (rc)
- goto err_free_dmabuf_wrapper;
+ return rc;
+
+ if ((offset + size) > phys_pg_pack->total_size) {
+ dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n",
+ offset, size, phys_pg_pack->total_size);
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+
+ bar_address = hdev->dram_pci_bar_start +
+ (phys_pg_pack->pages[i] - prop->dram_base_address);
+
+ if ((bar_address + phys_pg_pack->page_size) >
+ (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
+ (bar_address + phys_pg_pack->page_size) < bar_address) {
+ dev_dbg(hdev->dev,
+ "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
+ phys_pg_pack->pages[i],
+ phys_pg_pack->page_size);
+
+ return -EINVAL;
+ }
+ }
return 0;
+}
-err_free_dmabuf_wrapper:
- kfree(hl_dmabuf);
- return rc;
+static struct hl_vm_hash_node *memhash_node_export_get(struct hl_ctx *ctx, u64 addr)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm_hash_node *hnode;
+
+ /* get the memory handle */
+ mutex_lock(&ctx->mem_hash_lock);
+ hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)addr)
+ if (addr == hnode->vaddr)
+ break;
+
+ if (!hnode) {
+ mutex_unlock(&ctx->mem_hash_lock);
+ dev_dbg(hdev->dev, "map address %#llx not found\n", addr);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (upper_32_bits(hnode->handle)) {
+ mutex_unlock(&ctx->mem_hash_lock);
+ dev_dbg(hdev->dev, "invalid handle %#llx for map address %#llx\n",
+ hnode->handle, addr);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * node found, increase export count so this memory cannot be unmapped
+ * and the hash node cannot be deleted.
+ */
+ hnode->export_cnt++;
+ mutex_unlock(&ctx->mem_hash_lock);
+
+ return hnode;
+}
+
+static void memhash_node_export_put(struct hl_ctx *ctx, struct hl_vm_hash_node *hnode)
+{
+ mutex_lock(&ctx->mem_hash_lock);
+ hnode->export_cnt--;
+ mutex_unlock(&ctx->mem_hash_lock);
+}
+
+static struct hl_vm_phys_pg_pack *get_phys_pg_pack_from_hash_node(struct hl_device *hdev,
+ struct hl_vm_hash_node *hnode)
+{
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct hl_vm *vm = &hdev->vm;
+
+ spin_lock(&vm->idr_lock);
+ phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) hnode->handle);
+ if (!phys_pg_pack) {
+ spin_unlock(&vm->idr_lock);
+ dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) hnode->handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ spin_unlock(&vm->idr_lock);
+
+ if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
+ dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", hnode->handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return phys_pg_pack;
}
/**
- * export_dmabuf_from_handle() - export a dma-buf object for the given memory
- * handle.
+ * export_dmabuf_from_addr() - export a dma-buf object for the given memory
+ * address and size.
* @ctx: pointer to the context structure.
- * @handle: device memory allocation handle.
+ * @addr: device address.
+ * @size: size of device memory to export.
+ * @offset: the offset into the buffer from which to start exporting
* @flags: DMA-BUF file/FD flags.
* @dmabuf_fd: pointer to result FD that represents the dma-buf object.
*
@@ -1915,87 +2014,69 @@ err_free_dmabuf_wrapper:
*
* Return: 0 on success, non-zero for failure.
*/
-static int export_dmabuf_from_handle(struct hl_ctx *ctx, u64 handle, int flags,
- int *dmabuf_fd)
+static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 addr, u64 size, u64 offset,
+ int flags, int *dmabuf_fd)
{
- struct hl_vm_phys_pg_pack *phys_pg_pack;
- struct hl_dmabuf_priv *hl_dmabuf;
- struct hl_device *hdev = ctx->hdev;
+ struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ struct hl_vm_hash_node *hnode = NULL;
struct asic_fixed_properties *prop;
- struct hl_vm *vm = &hdev->vm;
- u64 bar_address;
- int rc, i;
+ struct hl_dmabuf_priv *hl_dmabuf;
+ struct hl_device *hdev;
+ u64 export_addr;
+ int rc;
+ hdev = ctx->hdev;
prop = &hdev->asic_prop;
- if (upper_32_bits(handle)) {
- dev_dbg(hdev->dev, "no match for handle 0x%llx\n", handle);
+ /* offset must be 0 in devices without virtual memory support */
+ if (!prop->dram_supports_virtual_memory && offset) {
+ dev_dbg(hdev->dev, "offset is not allowed in device without virtual memory\n");
return -EINVAL;
}
- spin_lock(&vm->idr_lock);
+ export_addr = addr + offset;
- phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) handle);
- if (!phys_pg_pack) {
- spin_unlock(&vm->idr_lock);
- dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) handle);
- return -EINVAL;
- }
-
- /* increment now to avoid freeing device memory while exporting */
- phys_pg_pack->exporting_cnt++;
-
- spin_unlock(&vm->idr_lock);
-
- if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
- dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", handle);
- rc = -EINVAL;
- goto err_dec_exporting_cnt;
- }
-
- for (i = 0 ; i < phys_pg_pack->npages ; i++) {
-
- bar_address = hdev->dram_pci_bar_start +
- (phys_pg_pack->pages[i] -
- prop->dram_base_address);
-
- if (bar_address + phys_pg_pack->page_size >
- hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
- bar_address + phys_pg_pack->page_size < bar_address) {
-
- dev_dbg(hdev->dev,
- "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
- phys_pg_pack->pages[i],
- phys_pg_pack->page_size);
+ hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
+ if (!hl_dmabuf)
+ return -ENOMEM;
- rc = -EINVAL;
- goto err_dec_exporting_cnt;
+ if (prop->dram_supports_virtual_memory) {
+ hnode = memhash_node_export_get(ctx, addr);
+ if (IS_ERR(hnode)) {
+ rc = PTR_ERR(hnode);
+ goto err_free_dmabuf_wrapper;
}
- }
+ phys_pg_pack = get_phys_pg_pack_from_hash_node(hdev, hnode);
+ if (IS_ERR(phys_pg_pack)) {
+ rc = PTR_ERR(phys_pg_pack);
+ goto dec_memhash_export_cnt;
+ }
+ rc = validate_export_params(hdev, export_addr, size, offset, phys_pg_pack);
+ if (rc)
+ goto dec_memhash_export_cnt;
- hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
- if (!hl_dmabuf) {
- rc = -ENOMEM;
- goto err_dec_exporting_cnt;
+ phys_pg_pack->exported_size = size;
+ hl_dmabuf->phys_pg_pack = phys_pg_pack;
+ hl_dmabuf->memhash_hnode = hnode;
+ } else {
+ rc = validate_export_params_no_mmu(hdev, export_addr, size);
+ if (rc)
+ goto err_free_dmabuf_wrapper;
}
- hl_dmabuf->phys_pg_pack = phys_pg_pack;
+ hl_dmabuf->device_address = export_addr;
- rc = export_dmabuf_common(ctx, hl_dmabuf, phys_pg_pack->total_size,
- flags, dmabuf_fd);
+ rc = export_dmabuf(ctx, hl_dmabuf, size, flags, dmabuf_fd);
if (rc)
- goto err_free_dmabuf_wrapper;
+ goto dec_memhash_export_cnt;
return 0;
+dec_memhash_export_cnt:
+ if (prop->dram_supports_virtual_memory)
+ memhash_node_export_put(ctx, hnode);
err_free_dmabuf_wrapper:
kfree(hl_dmabuf);
-
-err_dec_exporting_cnt:
- spin_lock(&vm->idr_lock);
- phys_pg_pack->exporting_cnt--;
- spin_unlock(&vm->idr_lock);
-
return rc;
}
@@ -2082,19 +2163,20 @@ static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, v
{
struct hl_ts_buff *ts_buff = buf->private;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE);
return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
}
static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
{
struct hl_ts_buff *ts_buff = NULL;
- u32 size, num_elements;
+ u32 num_elements;
+ size_t size;
void *p;
num_elements = *(u32 *)args;
- ts_buff = kzalloc(sizeof(*ts_buff), GFP_KERNEL);
+ ts_buff = kzalloc(sizeof(*ts_buff), gfp);
if (!ts_buff)
return -ENOMEM;
@@ -2180,7 +2262,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
int rc, dmabuf_fd = -EBADF;
if (!hl_device_operational(hdev, &status)) {
- dev_warn_ratelimited(hdev->dev,
+ dev_dbg_ratelimited(hdev->dev,
"Device is %s. Can't execute MEMORY IOCTL\n",
hdev->status[status]);
return -EBUSY;
@@ -2269,17 +2351,12 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
break;
case HL_MEM_OP_EXPORT_DMABUF_FD:
- if (hdev->asic_prop.dram_supports_virtual_memory)
- rc = export_dmabuf_from_handle(ctx,
- args->in.export_dmabuf_fd.handle,
- args->in.flags,
- &dmabuf_fd);
- else
- rc = export_dmabuf_from_addr(ctx,
- args->in.export_dmabuf_fd.handle,
- args->in.export_dmabuf_fd.mem_size,
- args->in.flags,
- &dmabuf_fd);
+ rc = export_dmabuf_from_addr(ctx,
+ args->in.export_dmabuf_fd.addr,
+ args->in.export_dmabuf_fd.mem_size,
+ args->in.export_dmabuf_fd.offset,
+ args->in.flags,
+ &dmabuf_fd);
memset(args, 0, sizeof(*args));
args->out.fd = dmabuf_fd;
break;
diff --git a/drivers/misc/habanalabs/common/memory_mgr.c b/drivers/accel/habanalabs/common/memory_mgr.c
index 1936d653699e..0f2759e26547 100644
--- a/drivers/misc/habanalabs/common/memory_mgr.c
+++ b/drivers/accel/habanalabs/common/memory_mgr.c
@@ -25,8 +25,7 @@ struct hl_mmap_mem_buf *hl_mmap_mem_buf_get(struct hl_mem_mgr *mmg, u64 handle)
buf = idr_find(&mmg->handles, lower_32_bits(handle >> PAGE_SHIFT));
if (!buf) {
spin_unlock(&mmg->lock);
- dev_warn(mmg->dev,
- "Buff get failed, no match to handle %#llx\n", handle);
+ dev_dbg(mmg->dev, "Buff get failed, no match to handle %#llx\n", handle);
return NULL;
}
kref_get(&buf->refcount);
diff --git a/drivers/misc/habanalabs/common/mmu/Makefile b/drivers/accel/habanalabs/common/mmu/Makefile
index 1806c524e04a..1806c524e04a 100644
--- a/drivers/misc/habanalabs/common/mmu/Makefile
+++ b/drivers/accel/habanalabs/common/mmu/Makefile
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/accel/habanalabs/common/mmu/mmu.c
index 2c1005f74cf4..a42ae8bc61e8 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu.c
@@ -781,7 +781,7 @@ static void mmu_dma_mem_free_from_chunk(struct gen_pool *pool,
struct gen_pool_chunk *chunk,
void *data)
{
- struct hl_device *hdev = (struct hl_device *)data;
+ struct hl_device *hdev = data;
hl_asic_dma_free_coherent(hdev, (chunk->end_addr - chunk->start_addr) + 1,
(void *)chunk->start_addr, chunk->phys_addr);
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/accel/habanalabs/common/mmu/mmu_v1.c
index 8a40de4a4761..d925dc4dd097 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu_v1.c
@@ -345,7 +345,6 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
}
hop2_pte_addr = hop2_addr;
- hop2_pte_addr = hop2_addr;
for (i = 0 ; i < num_of_hop3 ; i++) {
clear_pte(ctx, hop2_pte_addr);
put_pte(ctx, hop2_addr);
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c b/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
index afe7ef964f82..afe7ef964f82 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c
+++ b/drivers/accel/habanalabs/common/mmu/mmu_v2_hr.c
diff --git a/drivers/misc/habanalabs/common/pci/Makefile b/drivers/accel/habanalabs/common/pci/Makefile
index dc922a686683..dc922a686683 100644
--- a/drivers/misc/habanalabs/common/pci/Makefile
+++ b/drivers/accel/habanalabs/common/pci/Makefile
diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/accel/habanalabs/common/pci/pci.c
index 5fe3da5fba30..d1f4c695baf2 100644
--- a/drivers/misc/habanalabs/common/pci/pci.c
+++ b/drivers/accel/habanalabs/common/pci/pci.c
@@ -10,6 +10,8 @@
#include <linux/pci.h>
+#include <trace/events/habanalabs.h>
+
#define HL_PLDM_PCI_ELBI_TIMEOUT_MSEC (HL_PCI_ELBI_TIMEOUT_MSEC * 100)
#define IATU_REGION_CTRL_REGION_EN_MASK BIT(31)
@@ -120,6 +122,9 @@ int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data)
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data);
+ if (unlikely(trace_habanalabs_elbi_read_enabled()))
+ trace_habanalabs_elbi_read(hdev->dev, (u32) addr, val);
+
return 0;
}
@@ -179,8 +184,11 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
usleep_range(300, 500);
}
- if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
+ if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) {
+ if (unlikely(trace_habanalabs_elbi_write_enabled()))
+ trace_habanalabs_elbi_write(hdev->dev, (u32) addr, val);
return 0;
+ }
if (val & PCI_CONFIG_ELBI_STS_ERR)
return -EIO;
diff --git a/drivers/misc/habanalabs/common/security.c b/drivers/accel/habanalabs/common/security.c
index 6196c0487c8b..5f03ade07ead 100644
--- a/drivers/misc/habanalabs/common/security.c
+++ b/drivers/accel/habanalabs/common/security.c
@@ -7,6 +7,19 @@
#include "habanalabs.h"
+static const char * const hl_glbl_error_cause[HL_MAX_NUM_OF_GLBL_ERR_CAUSE] = {
+ "Error due to un-priv read",
+ "Error due to un-secure read",
+ "Error due to read from unmapped reg",
+ "Error due to un-priv write",
+ "Error due to un-secure write",
+ "Error due to write to unmapped reg",
+ "External I/F write sec violation",
+ "External I/F write to un-mapped reg",
+ "Read to write only",
+ "Write to read only"
+};
+
/**
* hl_get_pb_block - return the relevant block within the block array
*
@@ -598,3 +611,164 @@ void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
blocks_array_size);
}
+
+static u32 hl_automated_get_block_base_addr(struct hl_device *hdev,
+ struct hl_special_block_info *block_info,
+ u32 major, u32 minor, u32 sub_minor)
+{
+ u32 fw_block_base_address = block_info->base_addr +
+ major * block_info->major_offset +
+ minor * block_info->minor_offset +
+ sub_minor * block_info->sub_minor_offset;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ /* Calculation above returns an address for FW use, and therefore should
+ * be casted for driver use.
+ */
+ return (fw_block_base_address - lower_32_bits(prop->cfg_base_address));
+}
+
+static bool hl_check_block_type_exclusion(struct hl_skip_blocks_cfg *skip_blocks_cfg,
+ int block_type)
+{
+ int i;
+
+ /* Check if block type is listed in the exclusion list of block types */
+ for (i = 0 ; i < skip_blocks_cfg->block_types_len ; i++)
+ if (block_type == skip_blocks_cfg->block_types[i])
+ return true;
+
+ return false;
+}
+
+static bool hl_check_block_range_exclusion(struct hl_device *hdev,
+ struct hl_skip_blocks_cfg *skip_blocks_cfg,
+ struct hl_special_block_info *block_info,
+ u32 major, u32 minor, u32 sub_minor)
+{
+ u32 blocks_in_range, block_base_addr_in_range, block_base_addr;
+ int i, j;
+
+ block_base_addr = hl_automated_get_block_base_addr(hdev, block_info,
+ major, minor, sub_minor);
+
+ for (i = 0 ; i < skip_blocks_cfg->block_ranges_len ; i++) {
+ blocks_in_range = (skip_blocks_cfg->block_ranges[i].end -
+ skip_blocks_cfg->block_ranges[i].start) /
+ HL_BLOCK_SIZE + 1;
+ for (j = 0 ; j < blocks_in_range ; j++) {
+ block_base_addr_in_range = skip_blocks_cfg->block_ranges[i].start +
+ j * HL_BLOCK_SIZE;
+ if (block_base_addr == block_base_addr_in_range)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int hl_read_glbl_errors(struct hl_device *hdev,
+ u32 blk_idx, u32 major, u32 minor, u32 sub_minor, void *data)
+{
+ struct hl_special_block_info *special_blocks = hdev->asic_prop.special_blocks;
+ struct hl_special_block_info *current_block = &special_blocks[blk_idx];
+ u32 glbl_err_addr, glbl_err_cause, addr_val, cause_val, block_base,
+ base = current_block->base_addr - lower_32_bits(hdev->asic_prop.cfg_base_address);
+ int i;
+
+ block_base = base + major * current_block->major_offset +
+ minor * current_block->minor_offset +
+ sub_minor * current_block->sub_minor_offset;
+
+ glbl_err_cause = block_base + HL_GLBL_ERR_CAUSE_OFFSET;
+ cause_val = RREG32(glbl_err_cause);
+ if (!cause_val)
+ return 0;
+
+ glbl_err_addr = block_base + HL_GLBL_ERR_ADDR_OFFSET;
+ addr_val = RREG32(glbl_err_addr);
+
+ for (i = 0 ; i < hdev->asic_prop.glbl_err_cause_num ; i++) {
+ if (cause_val & BIT(i))
+ dev_err_ratelimited(hdev->dev,
+ "%s, addr %#llx\n",
+ hl_glbl_error_cause[i],
+ hdev->asic_prop.cfg_base_address + block_base +
+ FIELD_GET(HL_GLBL_ERR_ADDRESS_MASK, addr_val));
+ }
+
+ WREG32(glbl_err_cause, cause_val);
+
+ return 0;
+}
+
+void hl_check_for_glbl_errors(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_special_blocks_cfg special_blocks_cfg;
+ struct iterate_special_ctx glbl_err_iter;
+ int rc;
+
+ memset(&special_blocks_cfg, 0, sizeof(special_blocks_cfg));
+ special_blocks_cfg.skip_blocks_cfg = &prop->skip_special_blocks_cfg;
+
+ glbl_err_iter.fn = &hl_read_glbl_errors;
+ glbl_err_iter.data = &special_blocks_cfg;
+
+ rc = hl_iterate_special_blocks(hdev, &glbl_err_iter);
+ if (rc)
+ dev_err_ratelimited(hdev->dev,
+ "Could not iterate special blocks, glbl error check failed\n");
+}
+
+int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx)
+{
+ struct hl_special_blocks_cfg *special_blocks_cfg =
+ (struct hl_special_blocks_cfg *)ctx->data;
+ struct hl_skip_blocks_cfg *skip_blocks_cfg =
+ special_blocks_cfg->skip_blocks_cfg;
+ u32 major, minor, sub_minor, blk_idx, num_blocks;
+ struct hl_special_block_info *block_info_arr;
+ int rc;
+
+ block_info_arr = hdev->asic_prop.special_blocks;
+ if (!block_info_arr)
+ return -EINVAL;
+
+ num_blocks = hdev->asic_prop.num_of_special_blocks;
+
+ for (blk_idx = 0 ; blk_idx < num_blocks ; blk_idx++, block_info_arr++) {
+ if (hl_check_block_type_exclusion(skip_blocks_cfg, block_info_arr->block_type))
+ continue;
+
+ for (major = 0 ; major < block_info_arr->major ; major++) {
+ minor = 0;
+ do {
+ sub_minor = 0;
+ do {
+ if ((hl_check_block_range_exclusion(hdev,
+ skip_blocks_cfg, block_info_arr,
+ major, minor, sub_minor)) ||
+ (skip_blocks_cfg->skip_block_hook &&
+ skip_blocks_cfg->skip_block_hook(hdev,
+ special_blocks_cfg,
+ blk_idx, major, minor, sub_minor))) {
+ sub_minor++;
+ continue;
+ }
+
+ rc = ctx->fn(hdev, blk_idx, major, minor,
+ sub_minor, ctx->data);
+ if (rc)
+ return rc;
+
+ sub_minor++;
+ } while (sub_minor < block_info_arr->sub_minor);
+
+ minor++;
+ } while (minor < block_info_arr->minor);
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/accel/habanalabs/common/security.h b/drivers/accel/habanalabs/common/security.h
new file mode 100644
index 000000000000..234b4a6ed8bc
--- /dev/null
+++ b/drivers/accel/habanalabs/common/security.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef SECURITY_H_
+#define SECURITY_H_
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+
+extern struct hl_device *hdev;
+
+/* special blocks */
+#define HL_MAX_NUM_OF_GLBL_ERR_CAUSE 10
+#define HL_GLBL_ERR_ADDRESS_MASK GENMASK(11, 0)
+/* GLBL_ERR_ADDR register offset from the start of the block */
+#define HL_GLBL_ERR_ADDR_OFFSET 0xF44
+/* GLBL_ERR_CAUSE register offset from the start of the block */
+#define HL_GLBL_ERR_CAUSE_OFFSET 0xF48
+
+/*
+ * struct hl_special_block_info - stores address details of a particular type of
+ * IP block which has a SPECIAL part.
+ *
+ * @block_type: block type as described in every ASIC's block_types enum.
+ * @base_addr: base address of the first block of particular type,
+ * e.g., address of NIC0_UMR0_0 of 'NIC_UMR' block.
+ * @major: number of major blocks of particular type.
+ * @minor: number of minor blocks of particular type.
+ * @sub_minor: number of sub minor blocks of particular type.
+ * @major_offset: address gap between 2 consecutive major blocks of particular type,
+ * e.g., offset between NIC0_UMR0_0 and NIC1_UMR0_0 is 0x80000.
+ * @minor_offset: address gap between 2 consecutive minor blocks of particular type,
+ * e.g., offset between NIC0_UMR0_0 and NIC0_UMR1_0 is 0x20000.
+ * @sub_minor_offset: address gap between 2 consecutive sub_minor blocks of particular
+ * type, e.g., offset between NIC0_UMR0_0 and NIC0_UMR0_1 is 0x1000.
+ *
+ * e.g., in Gaudi2, NIC_UMR blocks can be interpreted as:
+ * NIC<major>_UMR<minor>_<sub_minor> where major=12, minor=2, sub_minor=15.
+ * In other words, for each of 12 major numbers (i.e 0 to 11) there are
+ * 2 blocks with different minor numbers (i.e. 0 to 1). Again, for each minor
+ * number there are 15 blocks with different sub_minor numbers (i.e. 0 to 14).
+ * So different blocks are NIC0_UMR0_0, NIC0_UMR0_1, ..., NIC0_UMR1_0, ....,
+ * NIC11_UMR1_14.
+ *
+ * Struct's formatted data is located in the SOL-based auto-generated protbits headers.
+ */
+struct hl_special_block_info {
+ int block_type;
+ u32 base_addr;
+ u32 major;
+ u32 minor;
+ u32 sub_minor;
+ u32 major_offset;
+ u32 minor_offset;
+ u32 sub_minor_offset;
+};
+
+/*
+ * struct hl_automated_pb_cfg - represents configurations of a particular type
+ * of IP block which has protection bits.
+ *
+ * @addr: address details as described in hl_automation_pb_addr struct.
+ * @prot_map: each bit corresponds to one among 32 protection configuration regs
+ * (e.g., SPECIAL_GLBL_PRIV). '1' means 0xffffffff and '0' means 0x0
+ * to be written into the corresponding protection configuration reg.
+ * This bit is meaningful if same bit in data_map is 0, otherwise ignored.
+ * @data_map: each bit corresponds to one among 32 protection configuration regs
+ * (e.g., SPECIAL_GLBL_PRIV). '1' means corresponding protection
+ * configuration reg is to be written with a value in array pointed
+ * by 'data', otherwise the value is decided by 'prot_map'.
+ * @data: pointer to data array which stores the config value(s) to be written
+ * to corresponding protection configuration reg(s).
+ * @data_size: size of the data array.
+ *
+ * Each bit of 'data_map' and 'prot_map' fields corresponds to one among 32
+ * protection configuration registers e.g., SPECIAL GLBL PRIV regs (starting at
+ * offset 0xE80). '1' in 'data_map' means protection configuration to be done
+ * using configuration in data array. '0' in 'data_map" means protection
+ * configuration to be done as per the value of corresponding bit in 'prot_map'.
+ * '1' in 'prot_map' means the register to be programmed with 0xFFFFFFFF
+ * (all non-protected). '0' in 'prot_map' means the register to be programmed
+ * with 0x0 (all protected).
+ *
+ * e.g., prot_map = 0x00000001, data_map = 0xC0000000 , data = {0xff, 0x12}
+ * SPECIAL_GLBL_PRIV[0] = 0xFFFFFFFF
+ * SPECIAL_GLBL_PRIV[1..29] = 0x0
+ * SPECIAL_GLBL_PRIV[30] = 0xFF
+ * SPECIAL_GLBL_PRIV[31] = 0x12
+ */
+struct hl_automated_pb_cfg {
+ struct hl_special_block_info addr;
+ u32 prot_map;
+ u32 data_map;
+ const u32 *data;
+ u8 data_size;
+};
+
+/* struct hl_special_blocks_cfg - holds special blocks cfg data.
+ *
+ * @priv_automated_pb_cfg: points to the main privileged PB array.
+ * @sec_automated_pb_cfg: points to the main secured PB array.
+ * @skip_blocks_cfg: holds arrays of block types & block ranges to be excluded.
+ * @priv_cfg_size: size of the main privileged PB array.
+ * @sec_cfg_size: size of the main secured PB array.
+ * @prot_lvl_priv: indication if it's a privileged/secured PB configurations.
+ */
+struct hl_special_blocks_cfg {
+ struct hl_automated_pb_cfg *priv_automated_pb_cfg;
+ struct hl_automated_pb_cfg *sec_automated_pb_cfg;
+ struct hl_skip_blocks_cfg *skip_blocks_cfg;
+ u32 priv_cfg_size;
+ u32 sec_cfg_size;
+ u8 prot_lvl_priv;
+};
+
+/* Automated security */
+
+/* struct hl_skip_blocks_cfg - holds arrays of block types & block ranges to be
+ * excluded from special blocks configurations.
+ *
+ * @block_types: an array of block types NOT to be configured.
+ * @block_types_len: len of an array of block types not to be configured.
+ * @block_ranges: an array of block ranges not to be configured.
+ * @block_ranges_len: len of an array of block ranges not to be configured.
+ * @skip_block_hook: hook that will be called before initializing special blocks.
+ */
+struct hl_skip_blocks_cfg {
+ int *block_types;
+ size_t block_types_len;
+ struct range *block_ranges;
+ size_t block_ranges_len;
+ bool (*skip_block_hook)(struct hl_device *hdev,
+ struct hl_special_blocks_cfg *special_blocks_cfg,
+ u32 blk_idx, u32 major, u32 minor, u32 sub_minor);
+};
+
+/**
+ * struct iterate_special_ctx - HW module special block iterator
+ * @fn: function to apply to each HW module special block instance
+ * @data: optional internal data to the function iterator
+ */
+struct iterate_special_ctx {
+ /*
+ * callback for the HW module special block iterator
+ * @hdev: pointer to the habanalabs device structure
+ * @block_id: block (ASIC specific definition can be dcore/hdcore)
+ * @major: major block index within block_id
+ * @minor: minor block index within the major block
+ * @sub_minor: sub_minor block index within the minor block
+ * @data: function specific data
+ */
+ int (*fn)(struct hl_device *hdev, u32 block_id, u32 major, u32 minor,
+ u32 sub_minor, void *data);
+ void *data;
+};
+
+int hl_iterate_special_blocks(struct hl_device *hdev, struct iterate_special_ctx *ctx);
+void hl_check_for_glbl_errors(struct hl_device *hdev);
+
+#endif /* SECURITY_H_ */
diff --git a/drivers/misc/habanalabs/common/state_dump.c b/drivers/accel/habanalabs/common/state_dump.c
index 74726907c95e..3a9931f24259 100644
--- a/drivers/misc/habanalabs/common/state_dump.c
+++ b/drivers/accel/habanalabs/common/state_dump.c
@@ -6,7 +6,7 @@
*/
#include <linux/vmalloc.h>
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "habanalabs.h"
/**
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/accel/habanalabs/common/sysfs.c
index 735d8bed0066..735d8bed0066 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/accel/habanalabs/common/sysfs.c
diff --git a/drivers/misc/habanalabs/gaudi/Makefile b/drivers/accel/habanalabs/gaudi/Makefile
index 10577c33a816..10577c33a816 100644
--- a/drivers/misc/habanalabs/gaudi/Makefile
+++ b/drivers/accel/habanalabs/gaudi/Makefile
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/accel/habanalabs/gaudi/gaudi.c
index 9f5e208701ba..bb858b94e1e8 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi.c
@@ -701,6 +701,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->dma_mask = 48;
+ prop->hbw_flush_reg = mmPCIE_WRAP_RR_ELBI_RD_SEC_REG_CTRL;
+
return 0;
}
@@ -4236,8 +4238,8 @@ static int gaudi_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
@@ -6432,12 +6434,6 @@ static int gaudi_send_job_on_qman0(struct hl_device *hdev,
else
timeout = HL_DEVICE_TIMEOUT_USEC;
- if (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) {
- dev_err_ratelimited(hdev->dev,
- "Can't send driver job on QMAN0 because the device is not idle\n");
- return -EBUSY;
- }
-
fence_ptr = hl_asic_dma_pool_zalloc(hdev, 4, GFP_KERNEL, &fence_dma_addr);
if (!fence_ptr) {
dev_err(hdev->dev,
@@ -7584,7 +7580,7 @@ static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type)
return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6;
}
-static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type)
+static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
ktime_t zero_time = ktime_set(0, 0);
@@ -7612,6 +7608,7 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type)
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
+ *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to overheating\n");
break;
@@ -7619,6 +7616,7 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev, u16 event_type)
case GAUDI_EVENT_FIX_THERMAL_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
+ *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev,
"Thermal envelop is safe, back to optimal clock\n");
break;
@@ -7887,8 +7885,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entr
break;
case GAUDI_EVENT_FIX_POWER_ENV_S ... GAUDI_EVENT_FIX_THERMAL_ENV_E:
- event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
- gaudi_print_clk_change_info(hdev, event_type);
+ gaudi_print_clk_change_info(hdev, event_type, &event_mask);
hl_fw_unmask_irq(hdev, event_type);
break;
@@ -9133,6 +9130,16 @@ static u32 *gaudi_get_stream_master_qid_arr(void)
return gaudi_stream_master;
}
+static int gaudi_set_dram_properties(struct hl_device *hdev)
+{
+ return 0;
+}
+
+static int gaudi_set_binning_masks(struct hl_device *hdev)
+{
+ return 0;
+}
+
static void gaudi_check_if_razwi_happened(struct hl_device *hdev)
{
}
@@ -9259,6 +9266,8 @@ static const struct hl_asic_funcs gaudi_funcs = {
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = gaudi_set_hbm_bar_base,
.send_device_activity = gaudi_send_device_activity,
+ .set_dram_properties = gaudi_set_dram_properties,
+ .set_binning_masks = gaudi_set_binning_masks,
};
/**
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/accel/habanalabs/gaudi/gaudiP.h
index 4fbcf3f0afe5..3d88d56c8eb3 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/accel/habanalabs/gaudi/gaudiP.h
@@ -8,7 +8,7 @@
#ifndef GAUDIP_H_
#define GAUDIP_H_
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "../common/habanalabs.h"
#include "../include/common/hl_boot_if.h"
#include "../include/gaudi/gaudi_packets.h"
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/accel/habanalabs/gaudi/gaudi_coresight.c
index 08108f5fed67..3455b14554c6 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi_coresight.c
@@ -11,7 +11,8 @@
#include "../include/gaudi/gaudi_masks.h"
#include "../include/gaudi/gaudi_reg_map.h"
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
+
#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET
#define SPMU_EVENT_TYPES_OFFSET 0x400
#define SPMU_MAX_COUNTERS 6
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/accel/habanalabs/gaudi/gaudi_security.c
index 81a3c79a8bc6..81a3c79a8bc6 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/accel/habanalabs/gaudi/gaudi_security.c
diff --git a/drivers/misc/habanalabs/gaudi2/Makefile b/drivers/accel/habanalabs/gaudi2/Makefile
index 1e047883ba74..1e047883ba74 100644
--- a/drivers/misc/habanalabs/gaudi2/Makefile
+++ b/drivers/accel/habanalabs/gaudi2/Makefile
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2.c b/drivers/accel/habanalabs/gaudi2/gaudi2.c
index e793fb2bdcbe..6f415fa94eee 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2.c
@@ -7,6 +7,7 @@
#include "gaudi2P.h"
#include "gaudi2_masks.h"
+#include "../include/gaudi2/gaudi2_special_blocks.h"
#include "../include/hw_ip/mmu/mmu_general.h"
#include "../include/hw_ip/mmu/mmu_v2_0.h"
#include "../include/gaudi2/gaudi2_packets.h"
@@ -53,6 +54,7 @@
#define GAUDI2_HIF_HMMU_FULL_MASK 0xFFFF
#define GAUDI2_DECODER_FULL_MASK 0x3FF
+#define GAUDI2_NA_EVENT_CAUSE 0xFF
#define GAUDI2_NUM_OF_QM_ERR_CAUSE 18
#define GAUDI2_NUM_OF_QM_LCP_ERR_CAUSE 25
#define GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE 3
@@ -675,14 +677,13 @@ static const char * const gaudi2_kdma_core_interrupts_cause[GAUDI2_NUM_OF_DMA_CO
struct gaudi2_sm_sei_cause_data {
const char *cause_name;
const char *log_name;
- u32 log_mask;
};
static const struct gaudi2_sm_sei_cause_data
gaudi2_sm_sei_cause[GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE] = {
- {"calculated SO value overflow/underflow", "SOB group ID", 0x7FF},
- {"payload address of monitor is not aligned to 4B", "monitor addr", 0xFFFF},
- {"armed monitor write got BRESP (SLVERR or DECERR)", "AXI id", 0xFFFF},
+ {"calculated SO value overflow/underflow", "SOB ID"},
+ {"payload address of monitor is not aligned to 4B", "monitor addr"},
+ {"armed monitor write got BRESP (SLVERR or DECERR)", "AXI id"},
};
static const char * const
@@ -1568,7 +1569,7 @@ enum rtr_id {
DCORE3_RTR7,
};
-static const u32 gaudi2_tpc_initiator_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = {
+static const u32 gaudi2_tpc_initiator_hbw_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = {
DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2, DCORE0_RTR3, DCORE0_RTR3,
DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5, DCORE1_RTR4, DCORE1_RTR4,
DCORE2_RTR3, DCORE2_RTR3, DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1,
@@ -1576,33 +1577,61 @@ static const u32 gaudi2_tpc_initiator_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORE
DCORE0_RTR0
};
-static const u32 gaudi2_dec_initiator_rtr_id[NUMBER_OF_DEC] = {
+static const u32 gaudi2_tpc_initiator_lbw_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = {
+ DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2,
+ DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5,
+ DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1, DCORE2_RTR0, DCORE2_RTR0,
+ DCORE3_RTR5, DCORE3_RTR5, DCORE3_RTR6, DCORE3_RTR6, DCORE3_RTR7, DCORE3_RTR7,
+ DCORE0_RTR0
+};
+
+static const u32 gaudi2_dec_initiator_hbw_rtr_id[NUMBER_OF_DEC] = {
DCORE0_RTR0, DCORE0_RTR0, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0, DCORE2_RTR0,
DCORE3_RTR7, DCORE3_RTR7, DCORE0_RTR0, DCORE0_RTR0
};
-static const u32 gaudi2_nic_initiator_rtr_id[NIC_NUMBER_OF_MACROS] = {
+static const u32 gaudi2_dec_initiator_lbw_rtr_id[NUMBER_OF_DEC] = {
+ DCORE0_RTR1, DCORE0_RTR1, DCORE1_RTR6, DCORE1_RTR6, DCORE2_RTR1, DCORE2_RTR1,
+ DCORE3_RTR6, DCORE3_RTR6, DCORE0_RTR0, DCORE0_RTR0
+};
+
+static const u32 gaudi2_nic_initiator_hbw_rtr_id[NIC_NUMBER_OF_MACROS] = {
DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0,
DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7
};
-struct sft_info {
- u8 interface_id;
- u8 dcore_id;
+static const u32 gaudi2_nic_initiator_lbw_rtr_id[NIC_NUMBER_OF_MACROS] = {
+ DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0,
+ DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7
};
-static const struct sft_info gaudi2_edma_initiator_sft_id[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = {
- {0, 0}, {1, 0}, {0, 1}, {1, 1}, {1, 2}, {1, 3}, {0, 2}, {0, 3},
+static const u32 gaudi2_edma_initiator_hbw_sft[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = {
+ mmSFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT1_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT1_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT2_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT2_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT3_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE,
+ mmSFT3_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE
};
-static const u32 gaudi2_pdma_initiator_rtr_id[NUM_OF_PDMA] = {
+static const u32 gaudi2_pdma_initiator_hbw_rtr_id[NUM_OF_PDMA] = {
DCORE0_RTR0, DCORE0_RTR0
};
-static const u32 gaudi2_rot_initiator_rtr_id[NUM_OF_ROT] = {
+static const u32 gaudi2_pdma_initiator_lbw_rtr_id[NUM_OF_PDMA] = {
+ DCORE0_RTR2, DCORE0_RTR2
+};
+
+static const u32 gaudi2_rot_initiator_hbw_rtr_id[NUM_OF_ROT] = {
DCORE2_RTR0, DCORE3_RTR7
};
+static const u32 gaudi2_rot_initiator_lbw_rtr_id[NUM_OF_ROT] = {
+ DCORE2_RTR2, DCORE3_RTR5
+};
+
struct mme_initiators_rtr_id {
u32 wap0;
u32 wap1;
@@ -1655,6 +1684,30 @@ struct hbm_mc_error_causes {
char cause[50];
};
+static struct hl_special_block_info gaudi2_special_blocks[] = GAUDI2_SPECIAL_BLOCKS;
+
+/* Special blocks iterator is currently used to configure security protection bits,
+ * and read global errors. Most HW blocks are addressable and those who aren't (N/A)-
+ * must be skipped. Following configurations are commonly used for both PB config
+ * and global error reading, since currently they both share the same settings.
+ * Once it changes, we must remember to use separate configurations for either one.
+ */
+static int gaudi2_iterator_skip_block_types[] = {
+ GAUDI2_BLOCK_TYPE_PLL,
+ GAUDI2_BLOCK_TYPE_EU_BIST,
+ GAUDI2_BLOCK_TYPE_HBM,
+ GAUDI2_BLOCK_TYPE_XFT
+};
+
+static struct range gaudi2_iterator_skip_block_ranges[] = {
+ /* Skip all PSOC blocks except for PSOC_GLOBAL_CONF */
+ {mmPSOC_I2C_M0_BASE, mmPSOC_EFUSE_BASE},
+ {mmPSOC_BTL_BASE, mmPSOC_MSTR_IF_RR_SHRD_HBW_BASE},
+ /* Skip all CPU blocks except for CPU_IF */
+ {mmCPU_CA53_CFG_BASE, mmCPU_CA53_CFG_BASE},
+ {mmCPU_TIMESTAMP_BASE, mmCPU_MSTR_IF_RR_SHRD_HBW_BASE}
+};
+
static struct hbm_mc_error_causes hbm_mc_spi[GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE] = {
{HBM_MC_SPI_TEMP_PIN_CHG_MASK, "temperature pins changed"},
{HBM_MC_SPI_THR_ENG_MASK, "temperature-based throttling engaged"},
@@ -2070,6 +2123,8 @@ static int gaudi2_set_fixed_properties(struct hl_device *hdev)
prop->dma_mask = 64;
+ prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0;
+
return 0;
}
@@ -2434,6 +2489,25 @@ static int gaudi2_set_cluster_binning_masks(struct hl_device *hdev)
return 0;
}
+static int gaudi2_set_binning_masks(struct hl_device *hdev)
+{
+ int rc;
+
+ rc = gaudi2_set_cluster_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ rc = gaudi2_set_tpc_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ rc = gaudi2_set_dec_binning_masks(hdev);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
static int gaudi2_cpucp_info_get(struct hl_device *hdev)
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
@@ -2485,19 +2559,11 @@ static int gaudi2_cpucp_info_get(struct hl_device *hdev)
* at this point the DRAM parameters need to be updated according to data obtained
* from the FW
*/
- rc = gaudi2_set_dram_properties(hdev);
+ rc = hdev->asic_funcs->set_dram_properties(hdev);
if (rc)
return rc;
- rc = gaudi2_set_cluster_binning_masks(hdev);
- if (rc)
- return rc;
-
- rc = gaudi2_set_tpc_binning_masks(hdev);
- if (rc)
- return rc;
-
- rc = gaudi2_set_dec_binning_masks(hdev);
+ rc = hdev->asic_funcs->set_binning_masks(hdev);
if (rc)
return rc;
@@ -2925,11 +2991,11 @@ static void gaudi2_user_interrupt_setup(struct hl_device *hdev)
/* Initialize common user CQ interrupt */
HL_USR_INTR_STRUCT_INIT(hdev->common_user_cq_interrupt, hdev,
- HL_COMMON_USER_CQ_INTERRUPT_ID, false);
+ HL_COMMON_USER_CQ_INTERRUPT_ID, HL_USR_INTERRUPT_CQ);
/* Initialize common decoder interrupt */
HL_USR_INTR_STRUCT_INIT(hdev->common_decoder_interrupt, hdev,
- HL_COMMON_DEC_INTERRUPT_ID, true);
+ HL_COMMON_DEC_INTERRUPT_ID, HL_USR_INTERRUPT_DECODER);
/* User interrupts structure holds both decoder and user interrupts from various engines.
* We first initialize the decoder interrupts and then we add the user interrupts.
@@ -2942,10 +3008,11 @@ static void gaudi2_user_interrupt_setup(struct hl_device *hdev)
*/
for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, j = 0 ; i <= GAUDI2_IRQ_NUM_SHARED_DEC1_NRM;
i += 2, j++)
- HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, true);
+ HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i,
+ HL_USR_INTERRUPT_DECODER);
for (i = GAUDI2_IRQ_NUM_USER_FIRST, k = 0 ; k < prop->user_interrupt_count; i++, j++, k++)
- HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, false);
+ HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, HL_USR_INTERRUPT_CQ);
}
static inline int gaudi2_get_non_zero_random_int(void)
@@ -2955,6 +3022,99 @@ static inline int gaudi2_get_non_zero_random_int(void)
return rand ? rand : 1;
}
+static void gaudi2_special_blocks_free(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_skip_blocks_cfg *skip_special_blocks_cfg =
+ &prop->skip_special_blocks_cfg;
+
+ kfree(prop->special_blocks);
+ kfree(skip_special_blocks_cfg->block_types);
+ kfree(skip_special_blocks_cfg->block_ranges);
+}
+
+static void gaudi2_special_blocks_iterator_free(struct hl_device *hdev)
+{
+ gaudi2_special_blocks_free(hdev);
+}
+
+static bool gaudi2_special_block_skip(struct hl_device *hdev,
+ struct hl_special_blocks_cfg *special_blocks_cfg,
+ u32 blk_idx, u32 major, u32 minor, u32 sub_minor)
+{
+ return false;
+}
+
+static int gaudi2_special_blocks_config(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ int i, rc;
+
+ /* Configure Special blocks */
+ prop->glbl_err_cause_num = GAUDI2_NUM_OF_GLBL_ERR_CAUSE;
+ prop->num_of_special_blocks = ARRAY_SIZE(gaudi2_special_blocks);
+ prop->special_blocks = kmalloc_array(prop->num_of_special_blocks,
+ sizeof(*prop->special_blocks), GFP_KERNEL);
+ if (!prop->special_blocks)
+ return -ENOMEM;
+
+ for (i = 0 ; i < prop->num_of_special_blocks ; i++)
+ memcpy(&prop->special_blocks[i], &gaudi2_special_blocks[i],
+ sizeof(*prop->special_blocks));
+
+ /* Configure when to skip Special blocks */
+ memset(&prop->skip_special_blocks_cfg, 0, sizeof(prop->skip_special_blocks_cfg));
+ prop->skip_special_blocks_cfg.skip_block_hook = gaudi2_special_block_skip;
+
+ if (ARRAY_SIZE(gaudi2_iterator_skip_block_types)) {
+ prop->skip_special_blocks_cfg.block_types =
+ kmalloc_array(ARRAY_SIZE(gaudi2_iterator_skip_block_types),
+ sizeof(gaudi2_iterator_skip_block_types[0]), GFP_KERNEL);
+ if (!prop->skip_special_blocks_cfg.block_types) {
+ rc = -ENOMEM;
+ goto free_special_blocks;
+ }
+
+ memcpy(prop->skip_special_blocks_cfg.block_types, gaudi2_iterator_skip_block_types,
+ sizeof(gaudi2_iterator_skip_block_types));
+
+ prop->skip_special_blocks_cfg.block_types_len =
+ ARRAY_SIZE(gaudi2_iterator_skip_block_types);
+ }
+
+ if (ARRAY_SIZE(gaudi2_iterator_skip_block_ranges)) {
+ prop->skip_special_blocks_cfg.block_ranges =
+ kmalloc_array(ARRAY_SIZE(gaudi2_iterator_skip_block_ranges),
+ sizeof(gaudi2_iterator_skip_block_ranges[0]), GFP_KERNEL);
+ if (!prop->skip_special_blocks_cfg.block_ranges) {
+ rc = -ENOMEM;
+ goto free_skip_special_blocks_types;
+ }
+
+ for (i = 0 ; i < ARRAY_SIZE(gaudi2_iterator_skip_block_ranges) ; i++)
+ memcpy(&prop->skip_special_blocks_cfg.block_ranges[i],
+ &gaudi2_iterator_skip_block_ranges[i],
+ sizeof(struct range));
+
+ prop->skip_special_blocks_cfg.block_ranges_len =
+ ARRAY_SIZE(gaudi2_iterator_skip_block_ranges);
+ }
+
+ return 0;
+
+free_skip_special_blocks_types:
+ kfree(prop->skip_special_blocks_cfg.block_types);
+free_special_blocks:
+ kfree(prop->special_blocks);
+
+ return rc;
+}
+
+static int gaudi2_special_blocks_iterator_config(struct hl_device *hdev)
+{
+ return gaudi2_special_blocks_config(hdev);
+}
+
static int gaudi2_sw_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -3050,8 +3210,15 @@ static int gaudi2_sw_init(struct hl_device *hdev)
hdev->asic_funcs->set_pci_memory_regions(hdev);
+ rc = gaudi2_special_blocks_iterator_config(hdev);
+ if (rc)
+ goto free_scratchpad_mem;
+
return 0;
+free_scratchpad_mem:
+ hl_asic_dma_pool_free(hdev, gaudi2->scratchpad_kernel_address,
+ gaudi2->scratchpad_bus_address);
free_virt_msix_db_mem:
hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
free_cpu_accessible_dma_pool:
@@ -3071,6 +3238,8 @@ static int gaudi2_sw_fini(struct hl_device *hdev)
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct gaudi2_device *gaudi2 = hdev->asic_specific;
+ gaudi2_special_blocks_iterator_free(hdev);
+
hl_cpu_accessible_dma_pool_free(hdev, prop->pmmu.page_size, gaudi2->virt_msix_db_cpu_addr);
gen_pool_destroy(hdev->cpu_accessible_dma_pool);
@@ -5483,7 +5652,31 @@ static void gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_rese
skip_reset:
if (driver_performs_reset || hard_reset)
- gaudi2_poll_btm_indication(hdev, reset_sleep_ms, poll_timeout_us);
+ /*
+ * Instead of waiting for BTM indication we should wait for preboot ready:
+ * Consider the below scenario:
+ * 1. FW update is being triggered
+ * - setting the dirty bit
+ * 2. hard reset will be triggered due to the dirty bit
+ * 3. FW initiates the reset:
+ * - dirty bit cleared
+ * - BTM indication cleared
+ * - preboot ready indication cleared
+ * 4. during hard reset:
+ * - BTM indication will be set
+ * - BIST test performed and another reset triggered
+ * 5. only after this reset the preboot will set the preboot ready
+ *
+ * when polling on BTM indication alone we can lose sync with FW while trying to
+ * communicate with FW that is during reset.
+ * to overcome this we will always wait to preboot ready indication
+ */
+ if ((hdev->fw_components & FW_TYPE_PREBOOT_CPU)) {
+ msleep(reset_sleep_ms);
+ hl_fw_wait_preboot_ready(hdev);
+ } else {
+ gaudi2_poll_btm_indication(hdev, reset_sleep_ms, poll_timeout_us);
+ }
else
gaudi2_get_soft_rst_done_indication(hdev, poll_timeout_us);
@@ -5538,8 +5731,8 @@ static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
#ifdef _HAS_DMA_MMAP_COHERENT
@@ -6803,38 +6996,37 @@ static inline bool is_info_event(u32 event)
switch (event) {
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE:
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S ... GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
+
+ /* return in case of NIC status event - these events are received periodically and not as
+ * an indication to an error.
+ */
+ case GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0 ... GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1:
return true;
default:
return false;
}
}
-static void gaudi2_print_irq_info(struct hl_device *hdev, u16 event_type)
+static void gaudi2_print_event(struct hl_device *hdev, u16 event_type,
+ bool ratelimited, const char *fmt, ...)
{
- char desc[64] = "";
- bool event_valid = false;
+ struct va_format vaf;
+ va_list args;
- /* return in case of NIC status event - these events are received periodically and not as
- * an indication to an error, thus not printed.
- */
- if (event_type >= GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0 &&
- event_type <= GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1)
- return;
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
- if (gaudi2_irq_map_table[event_type].valid) {
- snprintf(desc, sizeof(desc), gaudi2_irq_map_table[event_type].name);
- event_valid = true;
- }
-
- if (!event_valid)
- snprintf(desc, sizeof(desc), "N/A");
-
- if (is_info_event(event_type))
- dev_info_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
- event_type, desc);
+ if (ratelimited)
+ dev_err_ratelimited(hdev->dev, "%s: %pV\n",
+ gaudi2_irq_map_table[event_type].valid ?
+ gaudi2_irq_map_table[event_type].name : "N/A Event", &vaf);
else
- dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
- event_type, desc);
+ dev_err(hdev->dev, "%s: %pV\n",
+ gaudi2_irq_map_table[event_type].valid ?
+ gaudi2_irq_map_table[event_type].name : "N/A Event", &vaf);
+
+ va_end(args);
}
static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
@@ -6847,7 +7039,7 @@ static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type,
ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom);
memory_wrapper_idx = ecc_data->memory_wrapper_idx;
- dev_err(hdev->dev,
+ gaudi2_print_event(hdev, event_type, !ecc_data->is_critical,
"ECC error detected. address: %#llx. Syndrom: %#llx. block id %u. critical %u.\n",
ecc_address, ecc_syndrom, memory_wrapper_idx, ecc_data->is_critical);
@@ -6987,10 +7179,10 @@ static void print_qman_data_on_err(struct hl_device *hdev, u32 qid_base, u32 str
gaudi2_print_last_pqes_on_err(hdev, qid_base, i, qman_base, false);
}
-static void gaudi2_handle_qman_err_generic(struct hl_device *hdev, const char *qm_name,
- u64 qman_base, u32 qid_base)
+static int gaudi2_handle_qman_err_generic(struct hl_device *hdev, u16 event_type,
+ u64 qman_base, u32 qid_base)
{
- u32 i, j, glbl_sts_val, arb_err_val, num_error_causes;
+ u32 i, j, glbl_sts_val, arb_err_val, num_error_causes, error_count = 0;
u64 glbl_sts_addr, arb_err_addr;
char reg_desc[32];
@@ -7013,12 +7205,14 @@ static void gaudi2_handle_qman_err_generic(struct hl_device *hdev, const char *q
}
for (j = 0 ; j < num_error_causes ; j++)
- if (glbl_sts_val & BIT(j))
- dev_err_ratelimited(hdev->dev, "%s %s. err cause: %s\n",
- qm_name, reg_desc,
- i == QMAN_STREAMS ?
- gaudi2_qman_lower_cp_error_cause[j] :
- gaudi2_qman_error_cause[j]);
+ if (glbl_sts_val & BIT(j)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "%s. err cause: %s", reg_desc,
+ i == QMAN_STREAMS ?
+ gaudi2_qman_lower_cp_error_cause[j] :
+ gaudi2_qman_error_cause[j]);
+ error_count++;
+ }
print_qman_data_on_err(hdev, qid_base, i, qman_base);
}
@@ -7026,18 +7220,23 @@ static void gaudi2_handle_qman_err_generic(struct hl_device *hdev, const char *q
arb_err_val = RREG32(arb_err_addr);
if (!arb_err_val)
- return;
+ goto out;
for (j = 0 ; j < GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE ; j++) {
- if (arb_err_val & BIT(j))
- dev_err_ratelimited(hdev->dev, "%s ARB_ERR. err cause: %s\n",
- qm_name, gaudi2_qman_arb_error_cause[j]);
+ if (arb_err_val & BIT(j)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "ARB_ERR. err cause: %s",
+ gaudi2_qman_arb_error_cause[j]);
+ error_count++;
+ }
}
+
+out:
+ return error_count;
}
static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
u64 rtr_mstr_if_base_addr, bool is_write, char *name,
- bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info,
enum gaudi2_engine_id id, u64 *event_mask)
{
u32 razwi_hi, razwi_lo, razwi_xy;
@@ -7045,26 +7244,14 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
u8 rd_wr_flag;
if (is_write) {
- if (read_razwi_regs) {
- razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HI);
- razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_LO);
- razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_XY);
- } else {
- razwi_hi = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_hi_reg);
- razwi_lo = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_lo_reg);
- razwi_xy = le32_to_cpu(razwi_info->hbw.rr_aw_razwi_id_reg);
- }
+ razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HI);
+ razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_LO);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_XY);
rd_wr_flag = HL_RAZWI_WRITE;
} else {
- if (read_razwi_regs) {
- razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI);
- razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_LO);
- razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_XY);
- } else {
- razwi_hi = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_hi_reg);
- razwi_lo = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_lo_reg);
- razwi_xy = le32_to_cpu(razwi_info->hbw.rr_ar_razwi_id_reg);
- }
+ razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI);
+ razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_LO);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_XY);
rd_wr_flag = HL_RAZWI_READ;
}
@@ -7078,38 +7265,26 @@ static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev,
static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev,
u64 rtr_mstr_if_base_addr, bool is_write, char *name,
- bool read_razwi_regs, struct hl_eq_razwi_info *razwi_info,
enum gaudi2_engine_id id, u64 *event_mask)
{
- u32 razwi_addr, razwi_xy;
+ u64 razwi_addr = CFG_BASE;
+ u32 razwi_xy;
u16 eng_id = id;
u8 rd_wr_flag;
if (is_write) {
- if (read_razwi_regs) {
- razwi_addr = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI);
- razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_XY);
- } else {
- razwi_addr = le32_to_cpu(razwi_info->lbw.rr_aw_razwi_reg);
- razwi_xy = le32_to_cpu(razwi_info->lbw.rr_aw_razwi_id_reg);
- }
-
+ razwi_addr += RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_XY);
rd_wr_flag = HL_RAZWI_WRITE;
} else {
- if (read_razwi_regs) {
- razwi_addr = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI);
- razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_XY);
- } else {
- razwi_addr = le32_to_cpu(razwi_info->lbw.rr_ar_razwi_reg);
- razwi_xy = le32_to_cpu(razwi_info->lbw.rr_ar_razwi_id_reg);
- }
-
+ razwi_addr += RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI);
+ razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_XY);
rd_wr_flag = HL_RAZWI_READ;
}
hl_handle_razwi(hdev, razwi_addr, &eng_id, 1, rd_wr_flag | HL_RAZWI_LBW, event_mask);
dev_err_ratelimited(hdev->dev,
- "%s-RAZWI SHARED RR LBW %s error, mstr_if 0x%llx, captured address 0x%x Initiator coordinates 0x%x\n",
+ "%s-RAZWI SHARED RR LBW %s error, mstr_if 0x%llx, captured address 0x%llX Initiator coordinates 0x%x\n",
name, is_write ? "WR" : "RD", rtr_mstr_if_base_addr, razwi_addr,
razwi_xy);
}
@@ -7164,183 +7339,148 @@ static enum gaudi2_engine_id gaudi2_razwi_calc_engine_id(struct hl_device *hdev,
*/
static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev,
enum razwi_event_sources module, u8 module_idx,
- u8 module_sub_idx, struct hl_eq_razwi_info *razwi_info,
- u64 *event_mask)
+ u8 module_sub_idx, u64 *event_mask)
{
- bool via_sft = false, read_razwi_regs = false;
- u32 rtr_id, dcore_id, dcore_rtr_id, sft_id, eng_id;
- u64 rtr_mstr_if_base_addr;
+ bool via_sft = false;
+ u32 hbw_rtr_id, lbw_rtr_id, dcore_id, dcore_rtr_id, eng_id;
+ u64 hbw_rtr_mstr_if_base_addr, lbw_rtr_mstr_if_base_addr;
u32 hbw_shrd_aw = 0, hbw_shrd_ar = 0;
u32 lbw_shrd_aw = 0, lbw_shrd_ar = 0;
char initiator_name[64];
- if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX) || !razwi_info)
- read_razwi_regs = true;
-
switch (module) {
case RAZWI_TPC:
- rtr_id = gaudi2_tpc_initiator_rtr_id[module_idx];
+ hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx];
+
+ /* TODO : remove this check and depend only on tpc routers table
+ * when SW-118828 is resolved
+ */
+ if (!hdev->asic_prop.fw_security_enabled &&
+ ((module_idx == 0) || (module_idx == 1)))
+ lbw_rtr_id = DCORE0_RTR0;
+ else
+ lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "TPC_%u", module_idx);
break;
case RAZWI_MME:
sprintf(initiator_name, "MME_%u", module_idx);
switch (module_sub_idx) {
case MME_WAP0:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap0;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap0;
break;
case MME_WAP1:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap1;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap1;
break;
case MME_WRITE:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].write;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].write;
break;
case MME_READ:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].read;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].read;
break;
case MME_SBTE0:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte0;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte0;
break;
case MME_SBTE1:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte1;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte1;
break;
case MME_SBTE2:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte2;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte2;
break;
case MME_SBTE3:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte3;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte3;
break;
case MME_SBTE4:
- rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte4;
+ hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte4;
break;
default:
return;
}
+ lbw_rtr_id = hbw_rtr_id;
break;
case RAZWI_EDMA:
- sft_id = gaudi2_edma_initiator_sft_id[module_idx].interface_id;
- dcore_id = gaudi2_edma_initiator_sft_id[module_idx].dcore_id;
+ hbw_rtr_mstr_if_base_addr = gaudi2_edma_initiator_hbw_sft[module_idx];
+ dcore_id = module_idx / NUM_OF_EDMA_PER_DCORE;
+ /* SFT has separate MSTR_IF for LBW, only there we can
+ * read the LBW razwi related registers
+ */
+ lbw_rtr_mstr_if_base_addr = mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE +
+ dcore_id * SFT_DCORE_OFFSET;
via_sft = true;
sprintf(initiator_name, "EDMA_%u", module_idx);
break;
case RAZWI_PDMA:
- rtr_id = gaudi2_pdma_initiator_rtr_id[module_idx];
+ hbw_rtr_id = gaudi2_pdma_initiator_hbw_rtr_id[module_idx];
+ lbw_rtr_id = gaudi2_pdma_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "PDMA_%u", module_idx);
break;
case RAZWI_NIC:
- rtr_id = gaudi2_nic_initiator_rtr_id[module_idx];
+ hbw_rtr_id = gaudi2_nic_initiator_hbw_rtr_id[module_idx];
+ lbw_rtr_id = gaudi2_nic_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "NIC_%u", module_idx);
break;
case RAZWI_DEC:
- rtr_id = gaudi2_dec_initiator_rtr_id[module_idx];
+ hbw_rtr_id = gaudi2_dec_initiator_hbw_rtr_id[module_idx];
+ lbw_rtr_id = gaudi2_dec_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "DEC_%u", module_idx);
break;
case RAZWI_ROT:
- rtr_id = gaudi2_rot_initiator_rtr_id[module_idx];
+ hbw_rtr_id = gaudi2_rot_initiator_hbw_rtr_id[module_idx];
+ lbw_rtr_id = gaudi2_rot_initiator_lbw_rtr_id[module_idx];
sprintf(initiator_name, "ROT_%u", module_idx);
break;
default:
return;
}
- if (!read_razwi_regs) {
- if (le32_to_cpu(razwi_info->razwi_happened_mask) & RAZWI_HAPPENED_HBW) {
- hbw_shrd_aw = le32_to_cpu(razwi_info->razwi_happened_mask) &
- RAZWI_HAPPENED_AW;
- hbw_shrd_ar = le32_to_cpu(razwi_info->razwi_happened_mask) &
- RAZWI_HAPPENED_AR;
- } else if (le32_to_cpu(razwi_info->razwi_happened_mask) & RAZWI_HAPPENED_LBW) {
- lbw_shrd_aw = le32_to_cpu(razwi_info->razwi_happened_mask) &
- RAZWI_HAPPENED_AW;
- lbw_shrd_ar = le32_to_cpu(razwi_info->razwi_happened_mask) &
- RAZWI_HAPPENED_AR;
- }
- rtr_mstr_if_base_addr = 0;
-
- goto dump_info;
- }
-
/* Find router mstr_if register base */
- if (via_sft) {
- rtr_mstr_if_base_addr = mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE +
- dcore_id * SFT_DCORE_OFFSET +
- sft_id * SFT_IF_OFFSET +
- RTR_MSTR_IF_OFFSET;
- } else {
- dcore_id = rtr_id / NUM_OF_RTR_PER_DCORE;
- dcore_rtr_id = rtr_id % NUM_OF_RTR_PER_DCORE;
- rtr_mstr_if_base_addr = mmDCORE0_RTR0_CTRL_BASE +
+ if (!via_sft) {
+ dcore_id = hbw_rtr_id / NUM_OF_RTR_PER_DCORE;
+ dcore_rtr_id = hbw_rtr_id % NUM_OF_RTR_PER_DCORE;
+ hbw_rtr_mstr_if_base_addr = mmDCORE0_RTR0_CTRL_BASE +
dcore_id * DCORE_OFFSET +
dcore_rtr_id * DCORE_RTR_OFFSET +
RTR_MSTR_IF_OFFSET;
+ lbw_rtr_mstr_if_base_addr = hbw_rtr_mstr_if_base_addr +
+ (((s32)lbw_rtr_id - hbw_rtr_id) * DCORE_RTR_OFFSET);
}
/* Find out event cause by reading "RAZWI_HAPPENED" registers */
- hbw_shrd_aw = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED);
-
- hbw_shrd_ar = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED);
-
- if (via_sft) {
- /* SFT has separate MSTR_IF for LBW, only there we can
- * read the LBW razwi related registers
- */
- u64 base;
-
- base = mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE + dcore_id * SFT_DCORE_OFFSET +
- RTR_LBW_MSTR_IF_OFFSET;
-
- lbw_shrd_aw = RREG32(base + RR_SHRD_LBW_AW_RAZWI_HAPPENED);
-
- lbw_shrd_ar = RREG32(base + RR_SHRD_LBW_AR_RAZWI_HAPPENED);
- } else {
- lbw_shrd_aw = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED);
-
- lbw_shrd_ar = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED);
- }
-
-dump_info:
- /* check if there is no RR razwi indication at all */
- if (!hbw_shrd_aw && !hbw_shrd_ar && !lbw_shrd_aw && !lbw_shrd_ar)
- return;
+ hbw_shrd_aw = RREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED);
+ hbw_shrd_ar = RREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED);
+ lbw_shrd_aw = RREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED);
+ lbw_shrd_ar = RREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED);
eng_id = gaudi2_razwi_calc_engine_id(hdev, module, module_idx);
if (hbw_shrd_aw) {
- gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, true,
- initiator_name, read_razwi_regs, razwi_info,
- eng_id, event_mask);
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, hbw_rtr_mstr_if_base_addr, true,
+ initiator_name, eng_id, event_mask);
/* Clear event indication */
- if (read_razwi_regs)
- WREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED, hbw_shrd_aw);
+ WREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED, hbw_shrd_aw);
}
if (hbw_shrd_ar) {
- gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, false,
- initiator_name, read_razwi_regs, razwi_info,
- eng_id, event_mask);
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, hbw_rtr_mstr_if_base_addr, false,
+ initiator_name, eng_id, event_mask);
/* Clear event indication */
- if (read_razwi_regs)
- WREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED, hbw_shrd_ar);
+ WREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED, hbw_shrd_ar);
}
if (lbw_shrd_aw) {
- gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, true,
- initiator_name, read_razwi_regs, razwi_info,
- eng_id, event_mask);
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, lbw_rtr_mstr_if_base_addr, true,
+ initiator_name, eng_id, event_mask);
/* Clear event indication */
- if (read_razwi_regs)
- WREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED, lbw_shrd_aw);
+ WREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED, lbw_shrd_aw);
}
if (lbw_shrd_ar) {
- gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr, false,
- initiator_name, read_razwi_regs, razwi_info,
- eng_id, event_mask);
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, lbw_rtr_mstr_if_base_addr, false,
+ initiator_name, eng_id, event_mask);
/* Clear event indication */
- if (read_razwi_regs)
- WREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED, lbw_shrd_ar);
+ WREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED, lbw_shrd_ar);
}
}
@@ -7352,42 +7492,38 @@ static void gaudi2_check_if_razwi_happened(struct hl_device *hdev)
/* check all TPCs */
for (mod_idx = 0 ; mod_idx < (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1) ; mod_idx++) {
if (prop->tpc_enabled_mask & BIT(mod_idx))
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL,
- NULL);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, mod_idx, 0, NULL);
}
/* check all MMEs */
for (mod_idx = 0 ; mod_idx < (NUM_OF_MME_PER_DCORE * NUM_OF_DCORES) ; mod_idx++)
for (sub_mod = MME_WAP0 ; sub_mod < MME_INITIATORS_MAX ; sub_mod++)
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mod_idx,
- sub_mod, NULL, NULL);
+ sub_mod, NULL);
/* check all EDMAs */
for (mod_idx = 0 ; mod_idx < (NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES) ; mod_idx++)
if (prop->edma_enabled_mask & BIT(mod_idx))
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL,
- NULL);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, mod_idx, 0, NULL);
/* check all PDMAs */
for (mod_idx = 0 ; mod_idx < NUM_OF_PDMA ; mod_idx++)
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL,
- NULL);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_PDMA, mod_idx, 0, NULL);
/* check all NICs */
for (mod_idx = 0 ; mod_idx < NIC_NUMBER_OF_PORTS ; mod_idx++)
if (hdev->nic_ports_mask & BIT(mod_idx))
gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_NIC, mod_idx >> 1, 0,
- NULL, NULL);
+ NULL);
/* check all DECs */
for (mod_idx = 0 ; mod_idx < NUMBER_OF_DEC ; mod_idx++)
if (prop->decoder_enabled_mask & BIT(mod_idx))
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL,
- NULL);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, mod_idx, 0, NULL);
/* check all ROTs */
for (mod_idx = 0 ; mod_idx < NUM_OF_ROT ; mod_idx++)
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL, NULL);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, mod_idx, 0, NULL);
}
static const char *gaudi2_get_initiators_name(u32 rtr_id)
@@ -7645,19 +7781,19 @@ static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u
u64 *event_mask)
{
u16 engines[HL_RAZWI_MAX_NUM_OF_ENGINES_PER_RTR], num_of_eng;
- u32 razwi_addr;
+ u64 razwi_addr = CFG_BASE;
u8 rd_wr_flag;
num_of_eng = gaudi2_get_razwi_initiators(rtr_id, &engines[0]);
if (is_write) {
- razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_ADDR);
+ razwi_addr += RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_ADDR);
rd_wr_flag = HL_RAZWI_WRITE;
/* Clear set indication */
WREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AW_SET, 0x1);
} else {
- razwi_addr = RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_ADDR);
+ razwi_addr += RREG32(rtr_ctrl_base_addr + DEC_RAZWI_LBW_AR_ADDR);
rd_wr_flag = HL_RAZWI_READ;
/* Clear set indication */
@@ -7667,7 +7803,7 @@ static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u
hl_handle_razwi(hdev, razwi_addr, &engines[0], num_of_eng, rd_wr_flag | HL_RAZWI_LBW,
event_mask);
dev_err_ratelimited(hdev->dev,
- "RAZWI PSOC unmapped LBW %s error, rtr id %u, address %#x\n",
+ "RAZWI PSOC unmapped LBW %s error, rtr id %u, address 0x%llX\n",
is_write ? "WR" : "RD", rtr_id, razwi_addr);
dev_err_ratelimited(hdev->dev,
@@ -7675,17 +7811,17 @@ static void gaudi2_razwi_unmapped_addr_lbw_printf_info(struct hl_device *hdev, u
}
/* PSOC RAZWI interrupt occurs only when trying to access a bad address */
-static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *event_mask)
+static int gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *event_mask)
{
u32 hbw_aw_set, hbw_ar_set, lbw_aw_set, lbw_ar_set, rtr_id, dcore_id, dcore_rtr_id, xy,
- razwi_mask_info, razwi_intr = 0;
+ razwi_mask_info, razwi_intr = 0, error_count = 0;
int rtr_map_arr_len = NUM_OF_RTR_PER_DCORE * NUM_OF_DCORES;
u64 rtr_ctrl_base_addr;
if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX)) {
razwi_intr = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT);
if (!razwi_intr)
- return;
+ return 0;
}
razwi_mask_info = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_MASK_INFO);
@@ -7743,33 +7879,41 @@ static void gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *eve
gaudi2_razwi_unmapped_addr_lbw_printf_info(hdev, rtr_id,
rtr_ctrl_base_addr, false, event_mask);
+ error_count++;
+
clear:
/* Clear Interrupts only on pldm or if f/w doesn't handle interrupts */
if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX))
WREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT, razwi_intr);
+
+ return error_count;
}
-static void _gaudi2_handle_qm_sei_err(struct hl_device *hdev, u64 qman_base)
+static int _gaudi2_handle_qm_sei_err(struct hl_device *hdev, u64 qman_base, u16 event_type)
{
- u32 i, sts_val, sts_clr_val = 0;
+ u32 i, sts_val, sts_clr_val = 0, error_count = 0;
sts_val = RREG32(qman_base + QM_SEI_STATUS_OFFSET);
for (i = 0 ; i < GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "QM SEI. err cause: %s\n",
- gaudi2_qm_sei_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_qm_sei_error_cause[i]);
sts_clr_val |= BIT(i);
+ error_count++;
}
}
WREG32(qman_base + QM_SEI_STATUS_OFFSET, sts_clr_val);
+
+ return error_count;
}
-static void gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type,
- struct hl_eq_razwi_info *razwi_info, u64 *event_mask)
+static int gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type,
+ bool extended_err_check, u64 *event_mask)
{
enum razwi_event_sources module;
+ u32 error_count = 0;
u64 qman_base;
u8 index;
@@ -7808,26 +7952,30 @@ static void gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type,
module = RAZWI_ROT;
break;
default:
- return;
+ return 0;
}
- _gaudi2_handle_qm_sei_err(hdev, qman_base);
+ error_count = _gaudi2_handle_qm_sei_err(hdev, qman_base, event_type);
/* There is a single event per NIC macro, so should check its both QMAN blocks */
if (event_type >= GAUDI2_EVENT_NIC0_AXI_ERROR_RESPONSE &&
event_type <= GAUDI2_EVENT_NIC11_AXI_ERROR_RESPONSE)
- _gaudi2_handle_qm_sei_err(hdev, qman_base + NIC_QM_OFFSET);
+ error_count += _gaudi2_handle_qm_sei_err(hdev,
+ qman_base + NIC_QM_OFFSET, event_type);
- /* check if RAZWI happened */
- if (razwi_info)
- gaudi2_ack_module_razwi_event_handler(hdev, module, 0, 0, razwi_info, event_mask);
+ if (extended_err_check) {
+ /* check if RAZWI happened */
+ gaudi2_ack_module_razwi_event_handler(hdev, module, 0, 0, event_mask);
+ hl_check_for_glbl_errors(hdev);
+ }
+
+ return error_count;
}
-static void gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type)
+static int gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
- u32 qid_base;
+ u32 qid_base, error_count = 0;
u64 qman_base;
- char desc[32];
u8 index;
switch (event_type) {
@@ -7835,194 +7983,207 @@ static void gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type)
index = event_type - GAUDI2_EVENT_TPC0_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE0_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_TPC%d_QM", index);
break;
case GAUDI2_EVENT_TPC6_QM ... GAUDI2_EVENT_TPC11_QM:
index = event_type - GAUDI2_EVENT_TPC6_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE1_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_TPC%d_QM", index);
break;
case GAUDI2_EVENT_TPC12_QM ... GAUDI2_EVENT_TPC17_QM:
index = event_type - GAUDI2_EVENT_TPC12_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE2_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_TPC%d_QM", index);
break;
case GAUDI2_EVENT_TPC18_QM ... GAUDI2_EVENT_TPC23_QM:
index = event_type - GAUDI2_EVENT_TPC18_QM;
qid_base = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 + index * QMAN_STREAMS;
qman_base = mmDCORE3_TPC0_QM_BASE + index * DCORE_TPC_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_TPC%d_QM", index);
break;
case GAUDI2_EVENT_TPC24_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0;
qman_base = mmDCORE0_TPC6_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_TPC6_QM");
break;
case GAUDI2_EVENT_MME0_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE0_MME_0_0;
qman_base = mmDCORE0_MME_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_MME_QM");
break;
case GAUDI2_EVENT_MME1_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE1_MME_0_0;
qman_base = mmDCORE1_MME_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_MME_QM");
break;
case GAUDI2_EVENT_MME2_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE2_MME_0_0;
qman_base = mmDCORE2_MME_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_MME_QM");
break;
case GAUDI2_EVENT_MME3_QM:
qid_base = GAUDI2_QUEUE_ID_DCORE3_MME_0_0;
qman_base = mmDCORE3_MME_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_MME_QM");
break;
case GAUDI2_EVENT_HDMA0_QM:
+ index = 0;
qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0;
qman_base = mmDCORE0_EDMA0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_EDMA0_QM");
break;
case GAUDI2_EVENT_HDMA1_QM:
+ index = 1;
qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0;
qman_base = mmDCORE0_EDMA1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_EDMA1_QM");
break;
case GAUDI2_EVENT_HDMA2_QM:
+ index = 2;
qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0;
qman_base = mmDCORE1_EDMA0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_EDMA0_QM");
break;
case GAUDI2_EVENT_HDMA3_QM:
+ index = 3;
qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0;
qman_base = mmDCORE1_EDMA1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_EDMA1_QM");
break;
case GAUDI2_EVENT_HDMA4_QM:
+ index = 4;
qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0;
qman_base = mmDCORE2_EDMA0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_EDMA0_QM");
break;
case GAUDI2_EVENT_HDMA5_QM:
+ index = 5;
qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0;
qman_base = mmDCORE2_EDMA1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_EDMA1_QM");
break;
case GAUDI2_EVENT_HDMA6_QM:
+ index = 6;
qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0;
qman_base = mmDCORE3_EDMA0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_EDMA0_QM");
break;
case GAUDI2_EVENT_HDMA7_QM:
+ index = 7;
qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0;
qman_base = mmDCORE3_EDMA1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_EDMA1_QM");
break;
case GAUDI2_EVENT_PDMA0_QM:
qid_base = GAUDI2_QUEUE_ID_PDMA_0_0;
qman_base = mmPDMA0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "PDMA0_QM");
break;
case GAUDI2_EVENT_PDMA1_QM:
qid_base = GAUDI2_QUEUE_ID_PDMA_1_0;
qman_base = mmPDMA1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "PDMA1_QM");
break;
case GAUDI2_EVENT_ROTATOR0_ROT0_QM:
qid_base = GAUDI2_QUEUE_ID_ROT_0_0;
qman_base = mmROT0_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "ROTATOR0_QM");
break;
case GAUDI2_EVENT_ROTATOR1_ROT1_QM:
qid_base = GAUDI2_QUEUE_ID_ROT_1_0;
qman_base = mmROT1_QM_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "ROTATOR1_QM");
break;
default:
- return;
+ return 0;
}
- gaudi2_handle_qman_err_generic(hdev, desc, qman_base, qid_base);
+ error_count = gaudi2_handle_qman_err_generic(hdev, event_type, qman_base, qid_base);
/* Handle EDMA QM SEI here because there is no AXI error response event for EDMA */
- if (event_type >= GAUDI2_EVENT_HDMA2_QM && event_type <= GAUDI2_EVENT_HDMA5_QM)
- _gaudi2_handle_qm_sei_err(hdev, qman_base);
+ if (event_type >= GAUDI2_EVENT_HDMA2_QM && event_type <= GAUDI2_EVENT_HDMA5_QM) {
+ error_count += _gaudi2_handle_qm_sei_err(hdev, qman_base, event_type);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_EDMA, index, 0, event_mask);
+ }
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_handle_arc_farm_sei_err(struct hl_device *hdev)
+static int gaudi2_handle_arc_farm_sei_err(struct hl_device *hdev, u16 event_type)
{
- u32 i, sts_val, sts_clr_val = 0;
+ u32 i, sts_val, sts_clr_val = 0, error_count = 0;
sts_val = RREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS);
for (i = 0 ; i < GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "ARC SEI. err cause: %s\n",
- gaudi2_arc_sei_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_arc_sei_error_cause[i]);
sts_clr_val |= BIT(i);
+ error_count++;
}
}
+ hl_check_for_glbl_errors(hdev);
+
WREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR, sts_clr_val);
+
+ return error_count;
}
-static void gaudi2_handle_cpu_sei_err(struct hl_device *hdev)
+static int gaudi2_handle_cpu_sei_err(struct hl_device *hdev, u16 event_type)
{
- u32 i, sts_val, sts_clr_val = 0;
+ u32 i, sts_val, sts_clr_val = 0, error_count = 0;
sts_val = RREG32(mmCPU_IF_CPU_SEI_INTR_STS);
for (i = 0 ; i < GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "CPU SEI. err cause: %s\n",
- gaudi2_cpu_sei_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_cpu_sei_error_cause[i]);
sts_clr_val |= BIT(i);
+ error_count++;
}
}
+ hl_check_for_glbl_errors(hdev);
+
WREG32(mmCPU_IF_CPU_SEI_INTR_CLR, sts_clr_val);
+
+ return error_count;
}
-static void gaudi2_handle_rot_err(struct hl_device *hdev, u8 rot_index,
+static int gaudi2_handle_rot_err(struct hl_device *hdev, u8 rot_index, u16 event_type,
struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause,
u64 *event_mask)
{
u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data);
+ u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_ROT_ERR_CAUSE ; i++)
- if (intr_cause_data & BIT(i))
- dev_err_ratelimited(hdev->dev, "ROT%u. err cause: %s\n",
- rot_index, guadi2_rot_error_cause[i]);
+ if (intr_cause_data & BIT(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", guadi2_rot_error_cause[i]);
+ error_count++;
+ }
/* check if RAZWI happened */
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, rot_index, 0,
- &razwi_with_intr_cause->razwi_info, event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_ROT, rot_index, 0, event_mask);
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_tpc_ack_interrupts(struct hl_device *hdev, u8 tpc_index, char *interrupt_name,
+static int gaudi2_tpc_ack_interrupts(struct hl_device *hdev, u8 tpc_index, u16 event_type,
struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause,
u64 *event_mask)
{
u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data);
+ u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_TPC_INTR_CAUSE ; i++)
- if (intr_cause_data & BIT(i))
- dev_err_ratelimited(hdev->dev, "TPC%d_%s interrupt cause: %s\n",
- tpc_index, interrupt_name, gaudi2_tpc_interrupts_cause[i]);
+ if (intr_cause_data & BIT(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "interrupt cause: %s", gaudi2_tpc_interrupts_cause[i]);
+ error_count++;
+ }
/* check if RAZWI happened */
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, tpc_index, 0,
- &razwi_with_intr_cause->razwi_info, event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_TPC, tpc_index, 0, event_mask);
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, const char *interrupt_name,
- struct hl_eq_razwi_info *razwi_info, u64 *event_mask)
+static int gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, u16 event_type,
+ u64 *event_mask)
{
- u32 sts_addr, sts_val, sts_clr_val = 0;
+ u32 sts_addr, sts_val, sts_clr_val = 0, error_count = 0;
int i;
if (dec_index < NUM_OF_VDEC_PER_DCORE * NUM_OF_DCORES)
@@ -8039,24 +8200,27 @@ static void gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, const ch
for (i = 0 ; i < GAUDI2_NUM_OF_DEC_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "DEC%u_%s err cause: %s\n",
- dec_index, interrupt_name, gaudi2_dec_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_dec_error_cause[i]);
sts_clr_val |= BIT(i);
+ error_count++;
}
}
/* check if RAZWI happened */
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, razwi_info,
- event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_DEC, dec_index, 0, event_mask);
+ hl_check_for_glbl_errors(hdev);
/* Write 1 clear errors */
WREG32(sts_addr, sts_clr_val);
+
+ return error_count;
}
-static void gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, const char *interrupt_name,
- struct hl_eq_razwi_info *razwi_info, u64 *event_mask)
+static int gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, u16 event_type,
+ u64 *event_mask)
{
- u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0;
+ u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0;
int i;
sts_addr = mmDCORE0_MME_CTRL_LO_INTR_CAUSE + DCORE_OFFSET * mme_index;
@@ -8066,35 +8230,45 @@ static void gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, const ch
for (i = 0 ; i < GAUDI2_NUM_OF_MME_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "MME%u_%s err cause: %s\n",
- mme_index, interrupt_name, guadi2_mme_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", guadi2_mme_error_cause[i]);
sts_clr_val |= BIT(i);
+ error_count++;
}
}
/* check if RAZWI happened */
for (i = MME_WRITE ; i < MME_INITIATORS_MAX ; i++)
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, i, razwi_info,
- event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, i, event_mask);
+
+ hl_check_for_glbl_errors(hdev);
WREG32(sts_clr_addr, sts_clr_val);
+
+ return error_count;
}
-static void gaudi2_handle_mme_sbte_err(struct hl_device *hdev, u8 mme_index, u8 sbte_index,
+static int gaudi2_handle_mme_sbte_err(struct hl_device *hdev, u16 event_type,
u64 intr_cause_data)
{
- int i;
+ int i, error_count = 0;
for (i = 0 ; i < GAUDI2_NUM_OF_MME_SBTE_ERR_CAUSE ; i++)
- if (intr_cause_data & BIT(i))
- dev_err_ratelimited(hdev->dev, "MME%uSBTE%u_AXI_ERR_RSP err cause: %s\n",
- mme_index, sbte_index, guadi2_mme_sbte_error_cause[i]);
+ if (intr_cause_data & BIT(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", guadi2_mme_sbte_error_cause[i]);
+ error_count++;
+ }
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index,
- struct hl_eq_razwi_info *razwi_info, u64 *event_mask)
+static int gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index, u16 event_type,
+ u64 *event_mask)
{
- u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0;
+ u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0;
int i;
sts_addr = mmDCORE0_MME_ACC_INTR_CAUSE + DCORE_OFFSET * mme_index;
@@ -8104,24 +8278,27 @@ static void gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index,
for (i = 0 ; i < GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE ; i++) {
if (sts_val & BIT(i)) {
- dev_err_ratelimited(hdev->dev,
- "MME%u_WAP_SOURCE_RESULT_INVALID err cause: %s\n",
- mme_index, guadi2_mme_wap_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", guadi2_mme_wap_error_cause[i]);
sts_clr_val |= BIT(i);
+ error_count++;
}
}
/* check if RAZWI happened on WAP0/1 */
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP0, razwi_info,
- event_mask);
- gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP1, razwi_info,
- event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP0, event_mask);
+ gaudi2_ack_module_razwi_event_handler(hdev, RAZWI_MME, mme_index, MME_WAP1, event_mask);
+ hl_check_for_glbl_errors(hdev);
WREG32(sts_clr_addr, sts_clr_val);
+
+ return error_count;
}
-static void gaudi2_handle_kdma_core_event(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_kdma_core_event(struct hl_device *hdev, u16 event_type,
+ u64 intr_cause_data)
{
+ u32 error_count = 0;
int i;
/* If an AXI read or write error is received, an error is reported and
@@ -8130,19 +8307,33 @@ static void gaudi2_handle_kdma_core_event(struct hl_device *hdev, u64 intr_cause
* the actual error caused by a LBW KDMA transaction.
*/
for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++)
- if (intr_cause_data & BIT(i))
- dev_err_ratelimited(hdev->dev, "kdma core err cause: %s\n",
- gaudi2_kdma_core_interrupts_cause[i]);
+ if (intr_cause_data & BIT(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_kdma_core_interrupts_cause[i]);
+ error_count++;
+ }
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_handle_dma_core_event(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type,
+ u64 intr_cause_data)
{
+ u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++)
- if (intr_cause_data & BIT(i))
- dev_err_ratelimited(hdev->dev, "dma core err cause: %s\n",
- gaudi2_dma_core_interrupts_cause[i]);
+ if (intr_cause_data & BIT(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_dma_core_interrupts_cause[i]);
+ error_count++;
+ }
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev, u64 *event_mask)
@@ -8151,86 +8342,98 @@ static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev,
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
- gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true,
- NULL, GAUDI2_ENGINE_ID_PCIE, event_mask);
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE",
+ GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
- gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true,
- NULL, GAUDI2_ENGINE_ID_PCIE, event_mask);
+ gaudi2_razwi_rr_hbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE",
+ GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
- gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE", true,
- NULL, GAUDI2_ENGINE_ID_PCIE, event_mask);
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, true, "PCIE",
+ GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED;
if (RREG32(razwi_happened_addr)) {
- gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE", true,
- NULL, GAUDI2_ENGINE_ID_PCIE, event_mask);
+ gaudi2_razwi_rr_lbw_shared_printf_info(hdev, mstr_if_base_addr, false, "PCIE",
+ GAUDI2_ENGINE_ID_PCIE, event_mask);
WREG32(razwi_happened_addr, 0x1);
}
}
-static void gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u64 intr_cause_data,
- u64 *event_mask)
+static int gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u16 event_type,
+ u64 intr_cause_data, u64 *event_mask)
{
+ u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) {
if (!(intr_cause_data & BIT_ULL(i)))
continue;
- dev_err_ratelimited(hdev->dev, "PCIE ADDR DEC Error: %s\n",
- gaudi2_pcie_addr_dec_error_cause[i]);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_pcie_addr_dec_error_cause[i]);
+ error_count++;
switch (intr_cause_data & BIT_ULL(i)) {
case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK:
+ hl_check_for_glbl_errors(hdev);
break;
case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK:
gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask);
break;
}
}
+
+ return error_count;
}
-static void gaudi2_handle_pif_fatal(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_pif_fatal(struct hl_device *hdev, u16 event_type,
+ u64 intr_cause_data)
{
+ u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE ; i++) {
- if (intr_cause_data & BIT_ULL(i))
- dev_err_ratelimited(hdev->dev, "PMMU PIF err cause: %s\n",
- gaudi2_pmmu_fatal_interrupts_cause[i]);
+ if (intr_cause_data & BIT_ULL(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_pmmu_fatal_interrupts_cause[i]);
+ error_count++;
+ }
}
+
+ return error_count;
}
-static void gaudi2_handle_hif_fatal(struct hl_device *hdev, u16 event_type, u64 intr_cause_data)
+static int gaudi2_handle_hif_fatal(struct hl_device *hdev, u16 event_type, u64 intr_cause_data)
{
- u32 dcore_id, hif_id;
+ u32 error_count = 0;
int i;
- dcore_id = (event_type - GAUDI2_EVENT_HIF0_FATAL) / 4;
- hif_id = (event_type - GAUDI2_EVENT_HIF0_FATAL) % 4;
-
for (i = 0 ; i < GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE ; i++) {
- if (intr_cause_data & BIT_ULL(i))
- dev_err_ratelimited(hdev->dev, "DCORE%u_HIF%u: %s\n", dcore_id, hif_id,
- gaudi2_hif_fatal_interrupts_cause[i]);
+ if (intr_cause_data & BIT_ULL(i)) {
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_hif_fatal_interrupts_cause[i]);
+ error_count++;
+ }
}
+
+ return error_count;
}
static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu,
u64 *event_mask)
{
- u32 valid, val;
+ u32 valid, val, axid_l, axid_h;
u64 addr;
valid = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID));
@@ -8243,8 +8446,11 @@ static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool
addr <<= 32;
addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA));
- dev_err_ratelimited(hdev->dev, "%s page fault on va 0x%llx\n",
- is_pmmu ? "PMMU" : "HMMU", addr);
+ axid_l = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_FAULT_ID_LSB));
+ axid_h = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_FAULT_ID_MSB));
+
+ dev_err_ratelimited(hdev->dev, "%s page fault on va 0x%llx, transaction id 0x%llX\n",
+ is_pmmu ? "PMMU" : "HMMU", addr, ((u64)axid_h << 32) + axid_l);
hl_handle_page_fault(hdev, addr, 0, is_pmmu, event_mask);
WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE), 0);
@@ -8270,18 +8476,18 @@ static void gaudi2_handle_access_error(struct hl_device *hdev, u64 mmu_base, boo
WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE), 0);
}
-static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char *mmu_name,
+static int gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, u16 event_type,
u64 mmu_base, bool is_pmmu, u64 *event_mask)
{
- u32 spi_sei_cause, interrupt_clr = 0x0;
+ u32 spi_sei_cause, interrupt_clr = 0x0, error_count = 0;
int i;
spi_sei_cause = RREG32(mmu_base + MMU_SPI_SEI_CAUSE_OFFSET);
for (i = 0 ; i < GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE ; i++) {
if (spi_sei_cause & BIT(i)) {
- dev_err_ratelimited(hdev->dev, "%s SPI_SEI ERR. err cause: %s\n",
- mmu_name, gaudi2_mmu_spi_sei[i].cause);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s", gaudi2_mmu_spi_sei[i].cause);
if (i == 0)
gaudi2_handle_page_error(hdev, mmu_base, is_pmmu, event_mask);
@@ -8290,6 +8496,8 @@ static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char
if (gaudi2_mmu_spi_sei[i].clear_bit >= 0)
interrupt_clr |= BIT(gaudi2_mmu_spi_sei[i].clear_bit);
+
+ error_count++;
}
}
@@ -8298,12 +8506,14 @@ static void gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, const char
/* Clear interrupt */
WREG32(mmu_base + MMU_INTERRUPT_CLR_OFFSET, interrupt_clr);
+
+ return error_count;
}
-static void gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index)
+static int gaudi2_handle_sm_err(struct hl_device *hdev, u16 event_type, u8 sm_index)
{
- u32 sei_cause_addr, sei_cause_val, sei_cause_cause, sei_cause_log;
- u32 cq_intr_addr, cq_intr_val, cq_intr_queue_index;
+ u32 sei_cause_addr, sei_cause_val, sei_cause_cause, sei_cause_log,
+ cq_intr_addr, cq_intr_val, cq_intr_queue_index, error_count = 0;
int i;
sei_cause_addr = mmDCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE + DCORE_OFFSET * sm_index;
@@ -8323,11 +8533,12 @@ static void gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index)
if (!(sei_cause_cause & BIT(i)))
continue;
- dev_err_ratelimited(hdev->dev, "SM%u SEI ERR. err cause: %s. %s: 0x%X\n",
- sm_index,
- gaudi2_sm_sei_cause[i].cause_name,
- gaudi2_sm_sei_cause[i].log_name,
- sei_cause_log & gaudi2_sm_sei_cause[i].log_mask);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s. %s: 0x%X\n",
+ gaudi2_sm_sei_cause[i].cause_name,
+ gaudi2_sm_sei_cause[i].log_name,
+ sei_cause_log);
+ error_count++;
break;
}
@@ -8343,16 +8554,21 @@ static void gaudi2_handle_sm_err(struct hl_device *hdev, u8 sm_index)
dev_err_ratelimited(hdev->dev, "SM%u err. err cause: CQ_INTR. queue index: %u\n",
sm_index, cq_intr_queue_index);
+ error_count++;
/* Clear CQ_INTR */
WREG32(cq_intr_addr, 0);
}
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type, u64 *event_mask)
+static int gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
bool is_pmmu = false;
- char desc[32];
+ u32 error_count = 0;
u64 mmu_base;
u8 index;
@@ -8360,54 +8576,49 @@ static void gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type
case GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM ... GAUDI2_EVENT_HMMU3_SECURITY_ERROR:
index = (event_type - GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM) / 3;
mmu_base = mmDCORE0_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_HMMU%d", index);
break;
case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_3_AXI_ERR_RSP:
index = (event_type - GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP);
mmu_base = mmDCORE0_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE0_HMMU%d", index);
break;
case GAUDI2_EVENT_HMMU8_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU11_SECURITY_ERROR:
index = (event_type - GAUDI2_EVENT_HMMU8_PAGE_FAULT_WR_PERM) / 3;
mmu_base = mmDCORE1_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_HMMU%d", index);
break;
case GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_11_AXI_ERR_RSP:
index = (event_type - GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP);
mmu_base = mmDCORE1_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE1_HMMU%d", index);
break;
case GAUDI2_EVENT_HMMU7_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU4_SECURITY_ERROR:
index = (event_type - GAUDI2_EVENT_HMMU7_PAGE_FAULT_WR_PERM) / 3;
mmu_base = mmDCORE2_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_HMMU%d", index);
break;
case GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_4_AXI_ERR_RSP:
index = (event_type - GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP);
mmu_base = mmDCORE2_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE2_HMMU%d", index);
break;
case GAUDI2_EVENT_HMMU15_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR:
index = (event_type - GAUDI2_EVENT_HMMU15_PAGE_FAULT_WR_PERM) / 3;
mmu_base = mmDCORE3_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_HMMU%d", index);
break;
case GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP:
index = (event_type - GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP);
mmu_base = mmDCORE3_HMMU0_MMU_BASE + index * DCORE_HMMU_OFFSET;
- snprintf(desc, ARRAY_SIZE(desc), "DCORE3_HMMU%d", index);
break;
case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR:
case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0:
is_pmmu = true;
mmu_base = mmPMMU_HBW_MMU_BASE;
- snprintf(desc, ARRAY_SIZE(desc), "PMMU");
break;
default:
- return;
+ return 0;
}
- gaudi2_handle_mmu_spi_sei_generic(hdev, desc, mmu_base, is_pmmu, event_mask);
+ error_count = gaudi2_handle_mmu_spi_sei_generic(hdev, event_type, mmu_base,
+ is_pmmu, event_mask);
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
@@ -8527,22 +8738,17 @@ static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type,
cause_idx = sei_data->hdr.sei_cause;
if (cause_idx > GAUDI2_NUM_OF_HBM_SEI_CAUSE - 1) {
- dev_err_ratelimited(hdev->dev, "Invalid HBM SEI event cause (%d) provided by FW\n",
- cause_idx);
+ gaudi2_print_event(hdev, event_type, true,
+ "err cause: %s",
+ "Invalid HBM SEI event cause (%d) provided by FW\n", cause_idx);
return true;
}
- if (sei_data->hdr.is_critical)
- dev_err(hdev->dev,
- "System Critical Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n",
- hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
- hbm_mc_sei_cause[cause_idx]);
-
- else
- dev_err_ratelimited(hdev->dev,
- "System Non-Critical Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n",
- hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
- hbm_mc_sei_cause[cause_idx]);
+ gaudi2_print_event(hdev, event_type, !sei_data->hdr.is_critical,
+ "System %s Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s\n",
+ sei_data->hdr.is_critical ? "Critical" : "Non-critical",
+ hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel,
+ hbm_mc_sei_cause[cause_idx]);
/* Print error-specific info */
switch (cause_idx) {
@@ -8586,24 +8792,33 @@ static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type,
return require_hard_reset;
}
-static void gaudi2_handle_hbm_cattrip(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_hbm_cattrip(struct hl_device *hdev, u16 event_type,
+ u64 intr_cause_data)
{
- dev_err(hdev->dev,
- "HBM catastrophic temperature error (CATTRIP) cause %#llx\n",
- intr_cause_data);
+ if (intr_cause_data) {
+ gaudi2_print_event(hdev, event_type, true,
+ "temperature error cause: %#llx", intr_cause_data);
+ return 1;
+ }
+
+ return 0;
}
-static void gaudi2_handle_hbm_mc_spi(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_hbm_mc_spi(struct hl_device *hdev, u64 intr_cause_data)
{
- u32 i;
+ u32 i, error_count = 0;
for (i = 0 ; i < GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE ; i++)
- if (intr_cause_data & hbm_mc_spi[i].mask)
+ if (intr_cause_data & hbm_mc_spi[i].mask) {
dev_dbg(hdev->dev, "HBM spi event: notification cause(%s)\n",
hbm_mc_spi[i].cause);
+ error_count++;
+ }
+
+ return error_count;
}
-static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type)
+static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type, u64 *event_mask)
{
ktime_t zero_time = ktime_set(0, 0);
@@ -8615,13 +8830,13 @@ static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type)
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
- dev_info_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");
+ dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n");
break;
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
- dev_info_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");
+ dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n");
break;
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S:
@@ -8629,12 +8844,14 @@ static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type)
hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
+ *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n");
break;
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
+ *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n");
break;
@@ -8646,43 +8863,49 @@ static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type)
mutex_unlock(&hdev->clk_throttling.lock);
}
-static void gaudi2_print_out_of_sync_info(struct hl_device *hdev,
+static void gaudi2_print_out_of_sync_info(struct hl_device *hdev, u16 event_type,
struct cpucp_pkt_sync_err *sync_err)
{
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
- dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
- le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
+ gaudi2_print_event(hdev, event_type, false,
+ "FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
+ le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci),
+ q->pi, atomic_read(&q->ci));
}
-static void gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev)
+static int gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev, u16 event_type)
{
- u32 p2p_intr, msix_gw_intr;
+ u32 p2p_intr, msix_gw_intr, error_count = 0;
p2p_intr = RREG32(mmPCIE_WRAP_P2P_INTR);
msix_gw_intr = RREG32(mmPCIE_WRAP_MSIX_GW_INTR);
if (p2p_intr) {
- dev_err_ratelimited(hdev->dev,
+ gaudi2_print_event(hdev, event_type, true,
"pcie p2p transaction terminated due to security, req_id(0x%x)\n",
RREG32(mmPCIE_WRAP_P2P_REQ_ID));
WREG32(mmPCIE_WRAP_P2P_INTR, 0x1);
+ error_count++;
}
if (msix_gw_intr) {
- dev_err_ratelimited(hdev->dev,
+ gaudi2_print_event(hdev, event_type, true,
"pcie msi-x gen denied due to vector num check failure, vec(0x%X)\n",
RREG32(mmPCIE_WRAP_MSIX_GW_VEC));
WREG32(mmPCIE_WRAP_MSIX_GW_INTR, 0x1);
+ error_count++;
}
+
+ return error_count;
}
-static void gaudi2_handle_pcie_drain(struct hl_device *hdev,
+static int gaudi2_handle_pcie_drain(struct hl_device *hdev,
struct hl_eq_pcie_drain_ind_data *drain_data)
{
- u64 lbw_rd, lbw_wr, hbw_rd, hbw_wr, cause;
+ u64 lbw_rd, lbw_wr, hbw_rd, hbw_wr, cause, error_count = 0;
cause = le64_to_cpu(drain_data->intr_cause.intr_cause_data);
lbw_rd = le64_to_cpu(drain_data->drain_rd_addr_lbw);
@@ -8690,39 +8913,52 @@ static void gaudi2_handle_pcie_drain(struct hl_device *hdev,
hbw_rd = le64_to_cpu(drain_data->drain_rd_addr_hbw);
hbw_wr = le64_to_cpu(drain_data->drain_wr_addr_hbw);
- if (cause & BIT_ULL(0))
+ if (cause & BIT_ULL(0)) {
dev_err_ratelimited(hdev->dev,
"PCIE AXI drain LBW completed, read_err %u, write_err %u\n",
!!lbw_rd, !!lbw_wr);
+ error_count++;
+ }
- if (cause & BIT_ULL(1))
+ if (cause & BIT_ULL(1)) {
dev_err_ratelimited(hdev->dev,
"PCIE AXI drain HBW completed, raddr %#llx, waddr %#llx\n",
hbw_rd, hbw_wr);
+ error_count++;
+ }
+
+ return error_count;
}
-static void gaudi2_handle_psoc_drain(struct hl_device *hdev, u64 intr_cause_data)
+static int gaudi2_handle_psoc_drain(struct hl_device *hdev, u64 intr_cause_data)
{
+ u32 error_count = 0;
int i;
for (i = 0 ; i < GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE ; i++) {
- if (intr_cause_data & BIT_ULL(i))
+ if (intr_cause_data & BIT_ULL(i)) {
dev_err_ratelimited(hdev->dev, "PSOC %s completed\n",
gaudi2_psoc_axi_drain_interrupts_cause[i]);
+ error_count++;
+ }
}
+
+ hl_check_for_glbl_errors(hdev);
+
+ return error_count;
}
-static void gaudi2_print_cpu_pkt_failure_info(struct hl_device *hdev,
+static void gaudi2_print_cpu_pkt_failure_info(struct hl_device *hdev, u16 event_type,
struct cpucp_pkt_sync_err *sync_err)
{
struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ];
- dev_warn(hdev->dev,
+ gaudi2_print_event(hdev, event_type, false,
"FW reported sanity check failure, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n",
le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci));
}
-static void hl_arc_event_handle(struct hl_device *hdev,
+static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type,
struct hl_eq_engine_arc_intr_data *data)
{
struct hl_engine_arc_dccm_queue_full_irq *q;
@@ -8737,12 +8973,13 @@ static void hl_arc_event_handle(struct hl_device *hdev,
case ENGINE_ARC_DCCM_QUEUE_FULL_IRQ:
q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload;
- dev_err_ratelimited(hdev->dev,
+ gaudi2_print_event(hdev, event_type, true,
"ARC DCCM Full event: EngId: %u, Intr_type: %u, Qidx: %u\n",
engine_id, intr_type, q->queue_index);
- break;
+ return 1;
default:
- dev_err_ratelimited(hdev->dev, "Unknown ARC event type\n");
+ gaudi2_print_event(hdev, event_type, true, "Unknown ARC event type\n");
+ return 0;
}
}
@@ -8750,8 +8987,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
{
struct gaudi2_device *gaudi2 = hdev->asic_specific;
bool reset_required = false, is_critical = false;
- u32 ctl, reset_flags = HL_DRV_RESET_HARD;
- int index, sbte_index;
+ u32 index, ctl, reset_flags = HL_DRV_RESET_HARD, error_count = 0;
u64 event_mask = 0;
u16 event_type;
@@ -8767,8 +9003,6 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
gaudi2->events_stat[event_type]++;
gaudi2->events_stat_aggregate[event_type]++;
- gaudi2_print_irq_info(hdev, event_type);
-
switch (event_type) {
case GAUDI2_EVENT_PCIE_CORE_SERR ... GAUDI2_EVENT_ARC0_ECC_DERR:
fallthrough;
@@ -8777,6 +9011,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
reset_required = gaudi2_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
is_critical = eq_entry->ecc_data.is_critical;
+ error_count++;
break;
case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_PDMA1_QM:
@@ -8784,48 +9019,48 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_ROTATOR0_ROT0_QM ... GAUDI2_EVENT_ROTATOR1_ROT1_QM:
fallthrough;
case GAUDI2_EVENT_NIC0_QM0 ... GAUDI2_EVENT_NIC11_QM1:
- gaudi2_handle_qman_err(hdev, event_type);
+ error_count = gaudi2_handle_qman_err(hdev, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_ARC_AXI_ERROR_RESPONSE_0:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
- gaudi2_handle_arc_farm_sei_err(hdev);
+ error_count = gaudi2_handle_arc_farm_sei_err(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_AXI_ERR_RSP:
- gaudi2_handle_cpu_sei_err(hdev);
+ error_count = gaudi2_handle_cpu_sei_err(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP:
case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP:
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
- gaudi2_handle_qm_sei_err(hdev, event_type, &eq_entry->razwi_info, &event_mask);
+ error_count = gaudi2_handle_qm_sei_err(hdev, event_type, true, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE:
case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE:
index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE;
- gaudi2_handle_rot_err(hdev, index, &eq_entry->razwi_with_intr_cause, &event_mask);
- gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask);
+ error_count = gaudi2_handle_rot_err(hdev, index, event_type,
+ &eq_entry->razwi_with_intr_cause, &event_mask);
+ error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC24_AXI_ERR_RSP:
index = event_type - GAUDI2_EVENT_TPC0_AXI_ERR_RSP;
- gaudi2_tpc_ack_interrupts(hdev, index, "AXI_ERR_RSP",
+ error_count = gaudi2_tpc_ack_interrupts(hdev, index, event_type,
&eq_entry->razwi_with_intr_cause, &event_mask);
- gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask);
+ error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE ... GAUDI2_EVENT_DEC9_AXI_ERR_RSPONSE:
index = event_type - GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE;
- gaudi2_handle_dec_err(hdev, index, "AXI_ERR_RESPONSE", &eq_entry->razwi_info,
- &event_mask);
+ error_count = gaudi2_handle_dec_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8856,8 +9091,8 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_TPC24_KERNEL_ERR:
index = (event_type - GAUDI2_EVENT_TPC0_KERNEL_ERR) /
(GAUDI2_EVENT_TPC1_KERNEL_ERR - GAUDI2_EVENT_TPC0_KERNEL_ERR);
- gaudi2_tpc_ack_interrupts(hdev, index, "KRN_ERR", &eq_entry->razwi_with_intr_cause,
- &event_mask);
+ error_count = gaudi2_tpc_ack_interrupts(hdev, index, event_type,
+ &eq_entry->razwi_with_intr_cause, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8873,7 +9108,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_DEC9_SPI:
index = (event_type - GAUDI2_EVENT_DEC0_SPI) /
(GAUDI2_EVENT_DEC1_SPI - GAUDI2_EVENT_DEC0_SPI);
- gaudi2_handle_dec_err(hdev, index, "SPI", &eq_entry->razwi_info, &event_mask);
+ error_count = gaudi2_handle_dec_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8884,9 +9119,8 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = (event_type - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE) /
(GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE -
GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE);
- gaudi2_handle_mme_err(hdev, index,
- "CTRL_AXI_ERROR_RESPONSE", &eq_entry->razwi_info, &event_mask);
- gaudi2_handle_qm_sei_err(hdev, event_type, NULL, &event_mask);
+ error_count = gaudi2_handle_mme_err(hdev, index, event_type, &event_mask);
+ error_count += gaudi2_handle_qm_sei_err(hdev, event_type, false, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8897,8 +9131,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = (event_type - GAUDI2_EVENT_MME0_QMAN_SW_ERROR) /
(GAUDI2_EVENT_MME1_QMAN_SW_ERROR -
GAUDI2_EVENT_MME0_QMAN_SW_ERROR);
- gaudi2_handle_mme_err(hdev, index, "QMAN_SW_ERROR", &eq_entry->razwi_info,
- &event_mask);
+ error_count = gaudi2_handle_mme_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8909,25 +9142,25 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
index = (event_type - GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID) /
(GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID -
GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID);
- gaudi2_handle_mme_wap_err(hdev, index, &eq_entry->razwi_info, &event_mask);
+ error_count = gaudi2_handle_mme_wap_err(hdev, index, event_type, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP:
case GAUDI2_EVENT_KDMA0_CORE:
- gaudi2_handle_kdma_core_event(hdev,
+ error_count = gaudi2_handle_kdma_core_event(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_PDMA1_CORE:
- gaudi2_handle_dma_core_event(hdev,
+ error_count = gaudi2_handle_dma_core_event(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_PCIE_ADDR_DEC_ERR:
- gaudi2_print_pcie_addr_dec_info(hdev,
+ error_count = gaudi2_print_pcie_addr_dec_info(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data), &event_mask);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
@@ -8937,27 +9170,27 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP:
case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR:
case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0:
- gaudi2_handle_mmu_spi_sei_err(hdev, event_type, &event_mask);
+ error_count = gaudi2_handle_mmu_spi_sei_err(hdev, event_type, &event_mask);
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_HIF0_FATAL ... GAUDI2_EVENT_HIF12_FATAL:
- gaudi2_handle_hif_fatal(hdev, event_type,
+ error_count = gaudi2_handle_hif_fatal(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PMMU_FATAL_0:
- gaudi2_handle_pif_fatal(hdev,
+ error_count = gaudi2_handle_pif_fatal(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC63_RAZWI_OR_PID_MIN_MAX_INTERRUPT:
- gaudi2_ack_psoc_razwi_event_handler(hdev, &event_mask);
+ error_count = gaudi2_ack_psoc_razwi_event_handler(hdev, &event_mask);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -8967,33 +9200,39 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
reset_required = true;
}
+ error_count++;
break;
case GAUDI2_EVENT_HBM_CATTRIP_0 ... GAUDI2_EVENT_HBM_CATTRIP_5:
- gaudi2_handle_hbm_cattrip(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ error_count = gaudi2_handle_hbm_cattrip(hdev, event_type,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_HBM0_MC0_SPI ... GAUDI2_EVENT_HBM5_MC1_SPI:
- gaudi2_handle_hbm_mc_spi(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ error_count = gaudi2_handle_hbm_mc_spi(hdev,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_DRAIN_COMPLETE:
- gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
+ error_count = gaudi2_handle_pcie_drain(hdev, &eq_entry->pcie_drain_ind_data);
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC59_RPM_ERROR_OR_DRAIN:
- gaudi2_handle_psoc_drain(hdev, le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
+ error_count = gaudi2_handle_psoc_drain(hdev,
+ le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_AXI_ECC:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_L2_RAM_ECC:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
@@ -9001,31 +9240,30 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME1_SBTE4_AXI_ERR_RSP:
case GAUDI2_EVENT_MME2_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME2_SBTE4_AXI_ERR_RSP:
case GAUDI2_EVENT_MME3_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME3_SBTE4_AXI_ERR_RSP:
- index = (event_type - GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP) /
- (GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP -
- GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP);
- sbte_index = (event_type - GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP) %
- (GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP -
- GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP);
- gaudi2_handle_mme_sbte_err(hdev, index, sbte_index,
+ error_count = gaudi2_handle_mme_sbte_err(hdev, event_type,
le64_to_cpu(eq_entry->intr_cause.intr_cause_data));
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_VM0_ALARM_A ... GAUDI2_EVENT_VM3_ALARM_B:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC_AXI_ERR_RSP:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PSOC_PRSTN_FALL:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_APB_TIMEOUT:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
reset_flags |= HL_DRV_RESET_FW_FATAL_ERR;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_FATAL_ERR:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_TPC0_BMON_SPMU:
@@ -9078,6 +9316,7 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_DEC8_BMON_SPMU:
case GAUDI2_EVENT_DEC9_BMON_SPMU:
case GAUDI2_EVENT_ROTATOR0_BMON_SPMU ... GAUDI2_EVENT_SM3_BMON_SPMU:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
@@ -9085,67 +9324,87 @@ static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_ent
case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E:
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S:
case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E:
- gaudi2_print_clk_change_info(hdev, event_type);
- event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
+ gaudi2_print_clk_change_info(hdev, event_type, &event_mask);
+ error_count = GAUDI2_NA_EVENT_CAUSE;
break;
case GAUDI2_EVENT_CPU_PKT_QUEUE_OUT_SYNC:
- gaudi2_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
+ gaudi2_print_out_of_sync_info(hdev, event_type, &eq_entry->pkt_sync_err);
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_PCIE_FLR_REQUESTED:
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ error_count = GAUDI2_NA_EVENT_CAUSE;
/* Do nothing- FW will handle it */
break;
case GAUDI2_EVENT_PCIE_P2P_MSIX:
- gaudi2_handle_pcie_p2p_msix(hdev);
+ error_count = gaudi2_handle_pcie_p2p_msix(hdev, event_type);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE ... GAUDI2_EVENT_SM3_AXI_ERROR_RESPONSE:
index = event_type - GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE;
- gaudi2_handle_sm_err(hdev, index);
+ error_count = gaudi2_handle_sm_err(hdev, event_type, index);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_PSOC_MME_PLL_LOCK_ERR ... GAUDI2_EVENT_DCORE2_HBM_PLL_LOCK_ERR:
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE:
dev_info(hdev->dev, "CPLD shutdown cause, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_EVENT:
dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n",
le64_to_cpu(eq_entry->data[0]));
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED:
- gaudi2_print_cpu_pkt_failure_info(hdev, &eq_entry->pkt_sync_err);
+ gaudi2_print_cpu_pkt_failure_info(hdev, event_type, &eq_entry->pkt_sync_err);
+ error_count = GAUDI2_NA_EVENT_CAUSE;
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
break;
case GAUDI2_EVENT_ARC_DCCM_FULL:
- hl_arc_event_handle(hdev, &eq_entry->arc_data);
+ error_count = hl_arc_event_handle(hdev, event_type, &eq_entry->arc_data);
event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR;
break;
case GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED:
+ case GAUDI2_EVENT_DEV_RESET_REQ:
event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR;
+ error_count = GAUDI2_NA_EVENT_CAUSE;
is_critical = true;
break;
default:
- if (gaudi2_irq_map_table[event_type].valid)
+ if (gaudi2_irq_map_table[event_type].valid) {
dev_err_ratelimited(hdev->dev, "Cannot find handler for event %d\n",
event_type);
+ error_count = GAUDI2_NA_EVENT_CAUSE;
+ }
}
+ /* Make sure to dump an error in case no error cause was printed so far.
+ * Note that although we have counted the errors, we use this number as
+ * a boolean.
+ */
+ if (error_count == GAUDI2_NA_EVENT_CAUSE && !is_info_event(event_type))
+ gaudi2_print_event(hdev, event_type, true, "%d", event_type);
+ else if (error_count == 0)
+ gaudi2_print_event(hdev, event_type, true,
+ "No error cause for H/W event %u\n", event_type);
+
if ((gaudi2_irq_map_table[event_type].reset || reset_required) &&
(hdev->hard_reset_on_fw_events ||
(hdev->asic_prop.fw_security_enabled && is_critical)))
@@ -10116,8 +10375,8 @@ static int gaudi2_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
address = pci_resource_start(hdev->pdev, SRAM_CFG_BAR_ID) + offset_in_bar;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = remap_pfn_range(vma, vma->vm_start, address >> PAGE_SHIFT,
block_size, vma->vm_page_prot);
@@ -10466,6 +10725,8 @@ static const struct hl_asic_funcs gaudi2_funcs = {
.set_dram_bar_base = gaudi2_set_hbm_bar_base,
.set_engine_cores = gaudi2_set_engine_cores,
.send_device_activity = gaudi2_send_device_activity,
+ .set_dram_properties = gaudi2_set_dram_properties,
+ .set_binning_masks = gaudi2_set_binning_masks,
};
void gaudi2_set_asic_funcs(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2P.h b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
index b4383c199bbb..2687404d9d21 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2P.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2P.h
@@ -8,7 +8,7 @@
#ifndef GAUDI2P_H_
#define GAUDI2P_H_
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "../common/habanalabs.h"
#include "../include/common/hl_boot_if.h"
#include "../include/gaudi2/gaudi2.h"
@@ -240,6 +240,8 @@
#define GAUDI2_SOB_INCREMENT_BY_ONE (FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1) | \
FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1))
+#define GAUDI2_NUM_OF_GLBL_ERR_CAUSE 8
+
enum gaudi2_reserved_sob_id {
GAUDI2_RESERVED_SOB_CS_COMPLETION_FIRST,
GAUDI2_RESERVED_SOB_CS_COMPLETION_LAST =
@@ -532,6 +534,41 @@ struct gaudi2_device {
u32 num_of_valid_hw_events;
};
+/*
+ * Types of the Gaudi2 IP blocks, used by special blocks iterator.
+ * Required for scenarios where only particular block types can be
+ * addressed (e.g., special PLDM images).
+ */
+enum gaudi2_block_types {
+ GAUDI2_BLOCK_TYPE_PLL,
+ GAUDI2_BLOCK_TYPE_RTR,
+ GAUDI2_BLOCK_TYPE_CPU,
+ GAUDI2_BLOCK_TYPE_HIF,
+ GAUDI2_BLOCK_TYPE_HBM,
+ GAUDI2_BLOCK_TYPE_NIC,
+ GAUDI2_BLOCK_TYPE_PCIE,
+ GAUDI2_BLOCK_TYPE_PCIE_PMA,
+ GAUDI2_BLOCK_TYPE_PDMA,
+ GAUDI2_BLOCK_TYPE_EDMA,
+ GAUDI2_BLOCK_TYPE_PMMU,
+ GAUDI2_BLOCK_TYPE_PSOC,
+ GAUDI2_BLOCK_TYPE_ROT,
+ GAUDI2_BLOCK_TYPE_ARC_FARM,
+ GAUDI2_BLOCK_TYPE_DEC,
+ GAUDI2_BLOCK_TYPE_MME,
+ GAUDI2_BLOCK_TYPE_EU_BIST,
+ GAUDI2_BLOCK_TYPE_SYNC_MNGR,
+ GAUDI2_BLOCK_TYPE_STLB,
+ GAUDI2_BLOCK_TYPE_TPC,
+ GAUDI2_BLOCK_TYPE_HMMU,
+ GAUDI2_BLOCK_TYPE_SRAM,
+ GAUDI2_BLOCK_TYPE_XBAR,
+ GAUDI2_BLOCK_TYPE_KDMA,
+ GAUDI2_BLOCK_TYPE_XDMA,
+ GAUDI2_BLOCK_TYPE_XFT,
+ GAUDI2_BLOCK_TYPE_MAX
+};
+
extern const u32 gaudi2_dma_core_blocks_bases[DMA_CORE_ID_SIZE];
extern const u32 gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_SIZE];
extern const u32 gaudi2_mme_acc_blocks_bases[MME_ID_SIZE];
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
index 56c6ab692482..1dfbe293ecec 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight.c
@@ -5,7 +5,7 @@
* All Rights Reserved.
*/
#include "gaudi2_coresight_regs.h"
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#define GAUDI2_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 2000)
#define SPMU_MAX_COUNTERS 6
@@ -2376,10 +2376,10 @@ static int gaudi2_config_bmon(struct hl_device *hdev, struct hl_debug_params *pa
WREG32(base_reg + mmBMON_ADDRH_S2_OFFSET, upper_32_bits(input->start_addr2));
WREG32(base_reg + mmBMON_ADDRL_E2_OFFSET, lower_32_bits(input->end_addr2));
WREG32(base_reg + mmBMON_ADDRH_E2_OFFSET, upper_32_bits(input->end_addr2));
- WREG32(base_reg + mmBMON_ADDRL_S3_OFFSET, lower_32_bits(input->start_addr2));
- WREG32(base_reg + mmBMON_ADDRH_S3_OFFSET, upper_32_bits(input->start_addr2));
- WREG32(base_reg + mmBMON_ADDRL_E3_OFFSET, lower_32_bits(input->end_addr2));
- WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, upper_32_bits(input->end_addr2));
+ WREG32(base_reg + mmBMON_ADDRL_S3_OFFSET, lower_32_bits(input->start_addr3));
+ WREG32(base_reg + mmBMON_ADDRH_S3_OFFSET, upper_32_bits(input->start_addr3));
+ WREG32(base_reg + mmBMON_ADDRL_E3_OFFSET, lower_32_bits(input->end_addr3));
+ WREG32(base_reg + mmBMON_ADDRH_E3_OFFSET, upper_32_bits(input->end_addr3));
WREG32(base_reg + mmBMON_IDL_OFFSET, 0x0);
WREG32(base_reg + mmBMON_IDH_OFFSET, 0x0);
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight_regs.h
index df8729286e06..df8729286e06 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_coresight_regs.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_coresight_regs.h
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h b/drivers/accel/habanalabs/gaudi2/gaudi2_masks.h
index e9ac87828221..e9ac87828221 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_masks.h
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_masks.h
diff --git a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
index 768c2f3dc900..a212f82e6604 100644
--- a/drivers/misc/habanalabs/gaudi2/gaudi2_security.c
+++ b/drivers/accel/habanalabs/gaudi2/gaudi2_security.c
@@ -1561,6 +1561,7 @@ static const u32 gaudi2_pb_dcr0_tpc0_unsecured_regs[] = {
mmDCORE0_TPC0_CFG_LUT_FUNC128_BASE_ADDR_HI,
mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_LO,
mmDCORE0_TPC0_CFG_LUT_FUNC256_BASE_ADDR_HI,
+ mmDCORE0_TPC0_CFG_KERNEL_KERNEL_CONFIG,
mmDCORE0_TPC0_CFG_KERNEL_SRF_0,
mmDCORE0_TPC0_CFG_KERNEL_SRF_1,
mmDCORE0_TPC0_CFG_KERNEL_SRF_2,
@@ -1666,6 +1667,10 @@ static const u32 gaudi2_pb_dcr0_sm_glbl[] = {
mmDCORE0_SYNC_MNGR_GLBL_BASE,
};
+static const u32 gaudi2_pb_dcr1_sm_glbl[] = {
+ mmDCORE1_SYNC_MNGR_GLBL_BASE,
+};
+
static const struct range gaudi2_pb_dcr0_sm_glbl_unsecured_regs[] = {
{mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63},
{mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_1, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63},
@@ -1678,14 +1683,14 @@ static const struct range gaudi2_pb_dcr0_sm_glbl_unsecured_regs[] = {
};
static const struct range gaudi2_pb_dcr_x_sm_glbl_unsecured_regs[] = {
- {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63},
- {mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63},
- {mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63},
- {mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_PI_63},
- {mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_63},
- {mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_63},
- {mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0, mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_63},
- {mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_0, mmDCORE0_SYNC_MNGR_GLBL_CQ_INC_MODE_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_0, mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_63},
+ {mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_0, mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_63},
};
static const u32 gaudi2_pb_arc_sched[] = {
@@ -3358,14 +3363,6 @@ static int gaudi2_init_protection_bits(struct hl_device *hdev)
/* Sync Manager GLBL */
- /* Unsecure all CQ registers */
- rc |= hl_init_pb_ranges(hdev, NUM_OF_DCORES, DCORE_OFFSET,
- HL_PB_SINGLE_INSTANCE, HL_PB_NA,
- gaudi2_pb_dcr0_sm_glbl,
- ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl),
- gaudi2_pb_dcr_x_sm_glbl_unsecured_regs,
- ARRAY_SIZE(gaudi2_pb_dcr_x_sm_glbl_unsecured_regs));
-
/* Secure Dcore0 CQ0 registers */
rc |= hl_init_pb_ranges(hdev, HL_PB_SHARED, HL_PB_NA,
HL_PB_SINGLE_INSTANCE, HL_PB_NA,
@@ -3374,6 +3371,14 @@ static int gaudi2_init_protection_bits(struct hl_device *hdev)
gaudi2_pb_dcr0_sm_glbl_unsecured_regs,
ARRAY_SIZE(gaudi2_pb_dcr0_sm_glbl_unsecured_regs));
+ /* Unsecure all other CQ registers */
+ rc |= hl_init_pb_ranges(hdev, NUM_OF_DCORES - 1, DCORE_OFFSET,
+ HL_PB_SINGLE_INSTANCE, HL_PB_NA,
+ gaudi2_pb_dcr1_sm_glbl,
+ ARRAY_SIZE(gaudi2_pb_dcr1_sm_glbl),
+ gaudi2_pb_dcr_x_sm_glbl_unsecured_regs,
+ ARRAY_SIZE(gaudi2_pb_dcr_x_sm_glbl_unsecured_regs));
+
/* PSOC.
* Except for PSOC_GLOBAL_CONF, skip when security is enabled in F/W, because the blocks are
* protected by privileged RR.
diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/accel/habanalabs/goya/Makefile
index b3f3b7b96683..b3f3b7b96683 100644
--- a/drivers/misc/habanalabs/goya/Makefile
+++ b/drivers/accel/habanalabs/goya/Makefile
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/accel/habanalabs/goya/goya.c
index 0f083fcf81a6..df65e9bdc18a 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/accel/habanalabs/goya/goya.c
@@ -2880,8 +2880,8 @@ static int goya_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
{
int rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
- VM_DONTCOPY | VM_NORESERVE;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP |
+ VM_DONTCOPY | VM_NORESERVE);
rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr,
(dma_addr - HOST_PHYS_BASE), size);
@@ -5420,6 +5420,16 @@ static int goya_scrub_device_dram(struct hl_device *hdev, u64 val)
return -EOPNOTSUPP;
}
+static int goya_set_dram_properties(struct hl_device *hdev)
+{
+ return 0;
+}
+
+static int goya_set_binning_masks(struct hl_device *hdev)
+{
+ return 0;
+}
+
static int goya_send_device_activity(struct hl_device *hdev, bool open)
{
return 0;
@@ -5518,6 +5528,8 @@ static const struct hl_asic_funcs goya_funcs = {
.access_dev_mem = hl_access_dev_mem,
.set_dram_bar_base = goya_set_ddr_bar_base,
.send_device_activity = goya_send_device_activity,
+ .set_dram_properties = goya_set_dram_properties,
+ .set_binning_masks = goya_set_binning_masks,
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/accel/habanalabs/goya/goyaP.h
index d6ec43d6f6b0..5df3d30b91fd 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/accel/habanalabs/goya/goyaP.h
@@ -8,7 +8,7 @@
#ifndef GOYAP_H_
#define GOYAP_H_
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#include "../common/habanalabs.h"
#include "../include/common/hl_boot_if.h"
#include "../include/goya/goya_packets.h"
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/accel/habanalabs/goya/goya_coresight.c
index 2c5133cfae65..e7ac3046cfaa 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/accel/habanalabs/goya/goya_coresight.c
@@ -10,7 +10,7 @@
#include "../include/goya/asic_reg/goya_regs.h"
#include "../include/goya/asic_reg/goya_masks.h"
-#include <uapi/misc/habanalabs.h>
+#include <uapi/drm/habanalabs_accel.h>
#define GOYA_PLDM_CORESIGHT_TIMEOUT_USEC (CORESIGHT_TIMEOUT_USEC * 100)
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/accel/habanalabs/goya/goya_hwmgr.c
index b595721751c1..b595721751c1 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/accel/habanalabs/goya/goya_hwmgr.c
diff --git a/drivers/misc/habanalabs/goya/goya_security.c b/drivers/accel/habanalabs/goya/goya_security.c
index 14c3bae3ccdc..14c3bae3ccdc 100644
--- a/drivers/misc/habanalabs/goya/goya_security.c
+++ b/drivers/accel/habanalabs/goya/goya_security.c
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/accel/habanalabs/include/common/cpucp_if.h
index baa5aa43b6f4..d713252a4f13 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/accel/habanalabs/include/common/cpucp_if.h
@@ -344,6 +344,16 @@ struct hl_eq_engine_arc_intr_data {
__le64 pad[5];
};
+#define ADDR_DEC_ADDRESS_COUNT_MAX 4
+
+/* Data structure specifies details of ADDR_DEC interrupt */
+struct hl_eq_addr_dec_intr_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 addr[ADDR_DEC_ADDRESS_COUNT_MAX];
+ __u8 addr_cnt;
+ __u8 pad[7];
+};
+
struct hl_eq_entry {
struct hl_eq_header hdr;
union {
@@ -358,6 +368,7 @@ struct hl_eq_entry {
struct hl_eq_razwi_with_intr_cause razwi_with_intr_cause;
struct hl_eq_hbm_sei_data sei_data; /* Gaudi2 HBM */
struct hl_eq_engine_arc_intr_data arc_data;
+ struct hl_eq_addr_dec_intr_data addr_dec;
__le64 data[7];
};
};
@@ -643,6 +654,10 @@ enum pq_init_status {
* data corruption in case of mismatched driver/FW versions.
* Relevant only to Gaudi.
*
+ * CPUCP_PACKET_GENERIC_PASSTHROUGH -
+ * Generic opcode for all firmware info that is only passed to host
+ * through the LKD, without getting parsed there.
+ *
* CPUCP_PACKET_ACTIVE_STATUS_SET -
* LKD sends FW indication whether device is free or in use, this indication is reported
* also to the BMC.
@@ -704,9 +719,12 @@ enum cpucp_packet_id {
CPUCP_PACKET_RESERVED5, /* not used */
CPUCP_PACKET_RESERVED6, /* not used */
CPUCP_PACKET_RESERVED7, /* not used */
+ CPUCP_PACKET_GENERIC_PASSTHROUGH, /* IOCTL */
CPUCP_PACKET_RESERVED8, /* not used */
- CPUCP_PACKET_RESERVED9, /* not used */
CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */
+ CPUCP_PACKET_RESERVED9, /* not used */
+ CPUCP_PACKET_RESERVED10, /* not used */
+ CPUCP_PACKET_RESERVED11, /* not used */
CPUCP_PACKET_ID_MAX /* must be last */
};
@@ -727,6 +745,11 @@ enum cpucp_packet_id {
#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48
#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull
+#define CPUCP_PKT_RES_EEPROM_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_EEPROM_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_EEPROM_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_EEPROM_OUT1_MASK 0x0000000000FF0000ull
+
#define CPUCP_PKT_VAL_PFC_IN1_SHIFT 0
#define CPUCP_PKT_VAL_PFC_IN1_MASK 0x0000000000000001ull
#define CPUCP_PKT_VAL_PFC_IN2_SHIFT 1
@@ -805,8 +828,13 @@ struct cpucp_packet {
__le32 nonce;
};
- /* For NIC requests */
- __le32 port_index;
+ union {
+ /* For NIC requests */
+ __le32 port_index;
+
+ /* For Generic packet sub index */
+ __le32 pkt_subidx;
+ };
};
struct cpucp_unmask_irq_arr_packet {
@@ -881,7 +909,9 @@ enum cpucp_in_attributes {
cpucp_in_max,
cpucp_in_lowest = 6,
cpucp_in_highest = 7,
- cpucp_in_reset_history
+ cpucp_in_reset_history,
+ cpucp_in_intr_alarm_a,
+ cpucp_in_intr_alarm_b,
};
enum cpucp_curr_attributes {
@@ -976,6 +1006,11 @@ enum pll_index {
IC_PLL = 16,
MC_PLL = 17,
EMMC_PLL = 18,
+ D2D_PLL = 19,
+ CS_PLL = 20,
+ C2C_PLL = 21,
+ NCH_PLL = 22,
+ C2M_PLL = 23,
PLL_MAX
};
@@ -1135,8 +1170,9 @@ enum cpucp_serdes_type {
HLS1_SERDES_TYPE,
HLS1H_SERDES_TYPE,
HLS2_SERDES_TYPE,
- UNKNOWN_SERDES_TYPE,
- MAX_NUM_SERDES_TYPE = UNKNOWN_SERDES_TYPE
+ HLS2_TYPE_1_SERDES_TYPE,
+ MAX_NUM_SERDES_TYPE, /* number of types */
+ UNKNOWN_SERDES_TYPE = 0xFFFF /* serdes_type is u16 */
};
struct cpucp_nic_info {
@@ -1161,6 +1197,21 @@ struct page_discard_info {
};
/*
+ * struct frac_val - fracture value represented by "integer.frac".
+ * @integer: the integer part of the fracture value;
+ * @frac: the fracture part of the fracture value.
+ */
+struct frac_val {
+ union {
+ struct {
+ __le16 integer;
+ __le16 frac;
+ };
+ __le32 val;
+ };
+};
+
+/*
* struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp".
* @integer: the integer part of the SER value;
* @exp: the exponent part of the SER value.
@@ -1183,8 +1234,12 @@ struct ser_val {
* @pcs_link: has PCS link.
* @phy_ready: is PHY ready.
* @auto_neg: is Autoneg enabled.
- * @timeout_retransmission_cnt: timeout retransmission events
- * @high_ber_cnt: high ber events
+ * @timeout_retransmission_cnt: timeout retransmission events.
+ * @high_ber_cnt: high ber events.
+ * @pre_fec_ser: pre FEC SER value.
+ * @post_fec_ser: post FEC SER value.
+ * @throughput: measured throughput.
+ * @latency: measured latency.
*/
struct cpucp_nic_status {
__le32 port;
@@ -1200,6 +1255,10 @@ struct cpucp_nic_status {
__u8 auto_neg;
__le32 timeout_retransmission_cnt;
__le32 high_ber_cnt;
+ struct ser_val pre_fec_ser;
+ struct ser_val post_fec_ser;
+ struct frac_val bandwidth;
+ struct frac_val lat;
};
enum cpucp_hbm_row_replace_cause {
@@ -1292,6 +1351,7 @@ struct cpucp_dev_info_signed {
__u8 certificate[SEC_CERTIFICATE_BUF_SZ];
};
+#define DCORE_MON_REGS_SZ 512
/*
* struct dcore_monitor_regs_data - DCORE monitor regs data.
* the structure follows sync manager block layout. relevant only to Gaudi.
@@ -1302,11 +1362,11 @@ struct cpucp_dev_info_signed {
* @mon_status: array of monitor status.
*/
struct dcore_monitor_regs_data {
- __le32 mon_pay_addrl[512];
- __le32 mon_pay_addrh[512];
- __le32 mon_pay_data[512];
- __le32 mon_arm[512];
- __le32 mon_status[512];
+ __le32 mon_pay_addrl[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_addrh[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_data[DCORE_MON_REGS_SZ];
+ __le32 mon_arm[DCORE_MON_REGS_SZ];
+ __le32 mon_status[DCORE_MON_REGS_SZ];
};
/* contains SM data for each SYNC_MNGR (relevant only to Gaudi) */
@@ -1317,4 +1377,14 @@ struct cpucp_monitor_dump {
struct dcore_monitor_regs_data sync_mngr_e_n;
};
+/*
+ * The Type of the generic request (and other input arguments) will be fetched from user by reading
+ * from "pkt_subidx" field in struct cpucp_packet.
+ *
+ * HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions.
+ */
+enum hl_passthrough_type {
+ HL_PASSTHROUGH_VERSIONS,
+};
+
#endif /* CPUCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/accel/habanalabs/include/common/hl_boot_if.h
index e0ea51cc7475..2256add057c5 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/accel/habanalabs/include/common/hl_boot_if.h
@@ -41,6 +41,19 @@ enum cpu_boot_err {
};
/*
+ * Mask for fatal failures
+ * This mask contains all possible fatal failures, and a dynamic code
+ * will clear the non-relevant ones.
+ */
+#define CPU_BOOT_ERR_FATAL_MASK \
+ ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \
+ (1 << CPU_BOOT_ERR_PLL_FAIL) | \
+ (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \
+ (1 << CPU_BOOT_ERR_BINNING_FAIL) | \
+ (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \
+ (1 << CPU_BOOT_ERR_EEPROM_FAIL))
+
+/*
* CPU error bits in BOOT_ERROR registers
*
* CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed.
@@ -439,7 +452,7 @@ struct cpu_dyn_regs {
/* TODO: remove the desc magic after the code is updated to use message */
/* HCDM - Habana Communications Descriptor Magic */
#define HL_COMMS_DESC_MAGIC 0x4843444D
-#define HL_COMMS_DESC_VER 1
+#define HL_COMMS_DESC_VER 3
/* HCMv - Habana Communications Message + header version */
#define HL_COMMS_MSG_MAGIC_VALUE 0x48434D00
@@ -450,8 +463,10 @@ struct cpu_dyn_regs {
((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
#define HL_COMMS_MSG_MAGIC_V0 HL_COMMS_DESC_MAGIC
#define HL_COMMS_MSG_MAGIC_V1 HL_COMMS_MSG_MAGIC_VER(1)
+#define HL_COMMS_MSG_MAGIC_V2 HL_COMMS_MSG_MAGIC_VER(2)
+#define HL_COMMS_MSG_MAGIC_V3 HL_COMMS_MSG_MAGIC_VER(3)
-#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V1
+#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V3
#define HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC(magic) \
(((magic) & HL_COMMS_MSG_MAGIC_MASK) == \
@@ -474,22 +489,31 @@ enum comms_msg_type {
/*
* Binning information shared between LKD and FW
- * @tpc_mask - TPC binning information
+ * @tpc_mask_l - TPC binning information lower 64 bit
* @dec_mask - Decoder binning information
- * @hbm_mask - HBM binning information
+ * @dram_mask - DRAM binning information
* @edma_mask - EDMA binning information
* @mme_mask_l - MME binning information lower 32
* @mme_mask_h - MME binning information upper 32
- * @reserved - reserved field for 64 bit alignment
+ * @rot_mask - Rotator binning information
+ * @xbar_mask - xBAR binning information
+ * @reserved - reserved field for future binning info w/o ABI change
+ * @tpc_mask_h - TPC binning information upper 64 bit
+ * @nic_mask - NIC binning information
*/
struct lkd_fw_binning_info {
- __le64 tpc_mask;
+ __le64 tpc_mask_l;
__le32 dec_mask;
- __le32 hbm_mask;
+ __le32 dram_mask;
__le32 edma_mask;
__le32 mme_mask_l;
__le32 mme_mask_h;
- __le32 reserved;
+ __le32 rot_mask;
+ __le32 xbar_mask;
+ __le32 reserved0;
+ __le64 tpc_mask_h;
+ __le64 nic_mask;
+ __le32 reserved1[8];
};
/* TODO: remove this struct after the code is updated to use message */
@@ -512,6 +536,23 @@ struct comms_msg_header {
__u8 reserved[4]; /* pad to 64 bit */
};
+enum lkd_fw_ascii_msg_lvls {
+ LKD_FW_ASCII_MSG_ERR = 0,
+ LKD_FW_ASCII_MSG_WRN = 1,
+ LKD_FW_ASCII_MSG_INF = 2,
+ LKD_FW_ASCII_MSG_DBG = 3,
+};
+
+#define LKD_FW_ASCII_MSG_MAX_LEN 128
+#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */
+
+struct lkd_fw_ascii_msg {
+ __u8 valid;
+ __u8 msg_lvl;
+ __u8 reserved[6];
+ char msg[LKD_FW_ASCII_MSG_MAX_LEN];
+};
+
/* this is the main FW descriptor - consider ABI when changing */
struct lkd_fw_comms_desc {
struct comms_desc_header header;
@@ -521,6 +562,8 @@ struct lkd_fw_comms_desc {
/* can be used for 1 more version w/o ABI change */
char reserved0[VERSION_MAX_LEN];
__le64 img_addr; /* address for next FW component load */
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
};
enum comms_reset_cause {
@@ -545,6 +588,8 @@ struct lkd_fw_comms_msg {
char reserved0[VERSION_MAX_LEN];
/* address for next FW component load */
__le64 img_addr;
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
};
struct {
__u8 reset_cause;
@@ -552,7 +597,7 @@ struct lkd_fw_comms_msg {
struct {
__u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
};
- struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_binning_info binning_conf;
};
};
@@ -699,4 +744,92 @@ struct comms_status {
};
};
+/**
+ * HL_MODULES_MAX_NUM is determined by the size of modules_mask in struct
+ * hl_component_versions
+ */
+enum hl_modules {
+ HL_MODULES_BOOT_INFO = 0,
+ HL_MODULES_EEPROM,
+ HL_MODULES_FDT,
+ HL_MODULES_I2C,
+ HL_MODULES_LZ4,
+ HL_MODULES_MBEDTLS,
+ HL_MODULES_MAX_NUM = 16
+};
+
+/**
+ * HL_COMPONENTS_MAX_NUM is determined by the size of components_mask in
+ * struct cpucp_versions
+ */
+enum hl_components {
+ HL_COMPONENTS_PID = 0,
+ HL_COMPONENTS_MGMT,
+ HL_COMPONENTS_PREBOOT,
+ HL_COMPONENTS_PPBOOT,
+ HL_COMPONENTS_ARMCP,
+ HL_COMPONENTS_CPLD,
+ HL_COMPONENTS_UBOOT,
+ HL_COMPONENTS_MAX_NUM = 16
+};
+
+/**
+ * struct hl_component_versions - versions associated with hl component.
+ * @struct_size: size of all the struct (including dynamic size of modules).
+ * @modules_offset: offset of the modules field in this struct.
+ * @component: version of the component itself.
+ * @fw_os: Firmware OS Version.
+ * @modules_mask: i'th bit (from LSB) is a flag - on if module i in enum
+ * hl_modules is used.
+ * @modules_counter: number of set bits in modules_mask.
+ * @reserved: reserved for future use.
+ * @modules: versions of the component's modules. Elborated explanation in
+ * struct cpucp_versions.
+ */
+struct hl_component_versions {
+ __le16 struct_size;
+ __le16 modules_offset;
+ __u8 component[VERSION_MAX_LEN];
+ __u8 fw_os[VERSION_MAX_LEN];
+ __le16 modules_mask;
+ __u8 modules_counter;
+ __u8 reserved[1];
+ __u8 modules[][VERSION_MAX_LEN];
+};
+
+/**
+ * struct hl_fw_versions - all versions (fuse, cpucp's components with their
+ * modules)
+ * @struct_size: size of all the struct (including dynamic size of components).
+ * @components_offset: offset of the components field in this struct.
+ * @fuse: silicon production FUSE information.
+ * @components_mask: i'th bit (from LSB) is a flag - on if component i in enum
+ * hl_components is used.
+ * @components_counter: number of set bits in components_mask.
+ * @reserved: reserved for future use.
+ * @components: versions of hl components. Index i corresponds to the i'th bit
+ * that is *on* in components_mask. For example, if
+ * components_mask=0b101, then *components represents arcpid and
+ * *(hl_component_versions*)((char*)components + 1') represents
+ * preboot, where 1' = components[0].struct_size.
+ */
+struct hl_fw_versions {
+ __le16 struct_size;
+ __le16 components_offset;
+ __u8 fuse[VERSION_MAX_LEN];
+ __le16 components_mask;
+ __u8 components_counter;
+ __u8 reserved[1];
+ struct hl_component_versions components[];
+};
+
+/* Max size of struct hl_component_versions */
+#define HL_COMPONENT_VERSIONS_MAX_SIZE \
+ (sizeof(struct hl_component_versions) + HL_MODULES_MAX_NUM * \
+ VERSION_MAX_LEN)
+
+/* Max size of struct hl_fw_versions */
+#define HL_FW_VERSIONS_MAX_SIZE (sizeof(struct hl_fw_versions) + \
+ HL_COMPONENTS_MAX_NUM * HL_COMPONENT_VERSIONS_MAX_SIZE)
+
#endif /* HL_BOOT_IF_H */
diff --git a/drivers/misc/habanalabs/include/common/qman_if.h b/drivers/accel/habanalabs/include/common/qman_if.h
index 7ed7739575ee..7ed7739575ee 100644
--- a/drivers/misc/habanalabs/include/common/qman_if.h
+++ b/drivers/accel/habanalabs/include/common/qman_if.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h
index cf80e31317ad..cf80e31317ad 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/cpu_if_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h
index d079a37acab8..d079a37acab8 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h
index 1fdd5d5fc6d2..1fdd5d5fc6d2 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h
index 48376aabc3ba..48376aabc3ba 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h
index 8e56a93d88a1..8e56a93d88a1 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h
index 4d8d8f26c5d4..4d8d8f26c5d4 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h
index c3ef300849be..c3ef300849be 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma1_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h
index a42862cd5ae0..a42862cd5ae0 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h
index 8c4d4e016852..8c4d4e016852 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma2_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h
index fb145f416fe6..fb145f416fe6 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h
index a4b461ca3d94..a4b461ca3d94 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma3_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h
index 192d11404b1c..192d11404b1c 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h
index f0cbda0d1e4d..f0cbda0d1e4d 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma4_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h
index 6e07c6fb6fc9..6e07c6fb6fc9 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h
index 0faea21756c5..0faea21756c5 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma5_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h
index 4962c13e2e2e..4962c13e2e2e 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h
index af87adb94c94..af87adb94c94 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma6_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h
index 8dd705d20195..8dd705d20195 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h
index d6c631f63e3e..d6c631f63e3e 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma7_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h
index 8c1c72df4469..8c1c72df4469 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h
index b2b593fcec30..b2b593fcec30 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_down_ch1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h
index 8a10c6a76156..8a10c6a76156 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_n_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h
index cd61289a1e8a..cd61289a1e8a 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h
index 3f32370a14c7..3f32370a14c7 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_down_ch1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h
index 78c18da7154b..78c18da7154b 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_e_s_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h
index 4ccaf8712948..4ccaf8712948 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h
index 9236f4183084..9236f4183084 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_down_ch1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h
index da60893a5fab..da60893a5fab 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_n_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h
index 56ffc920d58a..56ffc920d58a 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h
index cbc642918deb..cbc642918deb 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_down_ch1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h
index 2382bc41bea6..2382bc41bea6 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/dma_if_w_s_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h
index c7596aac7a5c..c7596aac7a5c 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_blocks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index 1a6576666794..23ee8691db46 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -320,4 +320,6 @@
#define mmPSOC_TPC_PLL_NR 0xC73100
#define mmIF_W_PLL_NR 0x488100
+#define mmPCIE_WRAP_RR_ELBI_RD_SEC_REG_CTRL 0xC01208
+
#endif /* ASIC_REG_GAUDI_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h
index 083d073a0128..083d073a0128 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h
index e6dd30ce0ca7..e6dd30ce0ca7 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h
index 4f078b328b00..4f078b328b00 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h
index 6c07f7d45490..6c07f7d45490 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme1_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h
index a1f2eb8b91bd..a1f2eb8b91bd 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h
index c1ea6a422010..c1ea6a422010 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme2_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h
index 36f6edc72e3d..36f6edc72e3d 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mme3_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h
index 61465b599850..61465b599850 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/mmu_up_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h
index bd37b6452133..bd37b6452133 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h
index 7c97f4041b8e..7c97f4041b8e 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h
index fe96c575b5c6..fe96c575b5c6 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic0_qm1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h
index 0d1caf057ad0..0d1caf057ad0 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h
index 1b115ee6d6f0..1b115ee6d6f0 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic1_qm1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h
index a89116a4586f..a89116a4586f 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h
index b7f091ddc89c..b7f091ddc89c 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic2_qm1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h
index 4712cc62b009..4712cc62b009 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h
index 7fa040f65004..7fa040f65004 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic3_qm1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h
index 99d5319672dd..99d5319672dd 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h
index 34b21b21da52..34b21b21da52 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nic4_qm1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h
index 2efa2a54deb4..2efa2a54deb4 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h
index a6047d4e2560..a6047d4e2560 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h
index 9de8442f9bc2..9de8442f9bc2 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_2_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h
index 34fd47685edd..34fd47685edd 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_3_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h
index 543a98f81767..543a98f81767 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_4_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h
index 95486b7ddf1d..95486b7ddf1d 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_5_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h
index b79c59887b21..b79c59887b21 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_6_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h
index 3a6a34ba2958..3a6a34ba2958 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/nif_rtr_ctrl_7_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
index 2585c70f59ef..2585c70f59ef 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_cpu_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h
index b7c33e025db5..b7c33e025db5 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_etr_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h
index 6703e678ee9f..6703e678ee9f 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h
index 1b5cfcc1d85f..1b5cfcc1d85f 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_global_conf_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h
index 9ce24597d4b0..9ce24597d4b0 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/psoc_timestamp_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h
index ddf824392cf7..ddf824392cf7 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h
index c6d517dbbd54..c6d517dbbd54 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h
index 330e5b42d679..330e5b42d679 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_2_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h
index d749f1968e5e..d749f1968e5e 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_3_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h
index ad48773c4bbd..ad48773c4bbd 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_4_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h
index 6c27850ca3f5..6c27850ca3f5 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_5_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h
index a9ea89aa6405..a9ea89aa6405 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_6_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h
index a37772c531d9..a37772c531d9 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/sif_rtr_ctrl_7_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/stlb_regs.h
index 07d2a9000102..07d2a9000102 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/stlb_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/stlb_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h
index 8f67c11c8de9..8f67c11c8de9 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h
index b82a906265a8..b82a906265a8 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h
index 8e71532c6f36..8e71532c6f36 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h
index f9e310ab6df2..f9e310ab6df2 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h
index 6736c476d979..6736c476d979 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h
index af10ef7a87d9..af10ef7a87d9 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc1_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h
index 3e77c37952bc..3e77c37952bc 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h
index 2919e2fa58f8..2919e2fa58f8 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc2_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h
index 6d42469659f1..6d42469659f1 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h
index 5f2a0fd86c9e..5f2a0fd86c9e 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc3_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h
index 7a9447f39a74..7a9447f39a74 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h
index 80e63402f6e0..80e63402f6e0 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc4_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h
index f428f891935a..f428f891935a 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h
index cd3a810ff4c4..cd3a810ff4c4 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc5_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h
index eb251e72813f..eb251e72813f 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h
index e35ef7fd8b1c..e35ef7fd8b1c 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc6_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h
index 1887b10e58e2..1887b10e58e2 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h
index 5c36c972c027..5c36c972c027 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi/asic_reg/tpc7_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi.h b/drivers/accel/habanalabs/include/gaudi/gaudi.h
index ffae107b1693..ffae107b1693 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h b/drivers/accel/habanalabs/include/gaudi/gaudi_async_events.h
index c07ed4ed304c..c07ed4ed304c 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_async_events.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_async_events.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h b/drivers/accel/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
index 479b6b038254..479b6b038254 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_async_ids_map_extended.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h b/drivers/accel/habanalabs/include/gaudi/gaudi_coresight.h
index c45cc7f4d4d7..c45cc7f4d4d7 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_coresight.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_coresight.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/accel/habanalabs/include/gaudi/gaudi_fw_if.h
index 2dba02757d37..2dba02757d37 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_fw_if.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h b/drivers/accel/habanalabs/include/gaudi/gaudi_masks.h
index 880c57b26c63..880c57b26c63 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/accel/habanalabs/include/gaudi/gaudi_packets.h
index 66fc083a7c6a..66fc083a7c6a 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_packets.h
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/accel/habanalabs/include/gaudi/gaudi_reg_map.h
index 92f25c2ae083..92f25c2ae083 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
+++ b/drivers/accel/habanalabs/include/gaudi/gaudi_reg_map.h
diff --git a/drivers/accel/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h b/drivers/accel/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h
new file mode 100644
index 000000000000..22a6ab9a7f47
--- /dev/null
+++ b/drivers/accel/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 HabanaLabs Ltd.
+ * All Rights Reserved.
+ */
+
+#ifndef __GAUDI2_ARC_COMMON_PACKETS_H__
+#define __GAUDI2_ARC_COMMON_PACKETS_H__
+
+enum {
+ CPU_ID_SCHED_ARC0 = 0, /* FARM_ARC0 */
+ CPU_ID_SCHED_ARC1 = 1, /* FARM_ARC1 */
+ CPU_ID_SCHED_ARC2 = 2, /* FARM_ARC2 */
+ CPU_ID_SCHED_ARC3 = 3, /* FARM_ARC3 */
+ /* Dcore1 MME Engine ARC instance used as scheduler */
+ CPU_ID_SCHED_ARC4 = 4, /* DCORE1_MME0 */
+ /* Dcore3 MME Engine ARC instance used as scheduler */
+ CPU_ID_SCHED_ARC5 = 5, /* DCORE3_MME0 */
+
+ CPU_ID_TPC_QMAN_ARC0 = 6, /* DCORE0_TPC0 */
+ CPU_ID_TPC_QMAN_ARC1 = 7, /* DCORE0_TPC1 */
+ CPU_ID_TPC_QMAN_ARC2 = 8, /* DCORE0_TPC2 */
+ CPU_ID_TPC_QMAN_ARC3 = 9, /* DCORE0_TPC3 */
+ CPU_ID_TPC_QMAN_ARC4 = 10, /* DCORE0_TPC4 */
+ CPU_ID_TPC_QMAN_ARC5 = 11, /* DCORE0_TPC5 */
+ CPU_ID_TPC_QMAN_ARC6 = 12, /* DCORE1_TPC0 */
+ CPU_ID_TPC_QMAN_ARC7 = 13, /* DCORE1_TPC1 */
+ CPU_ID_TPC_QMAN_ARC8 = 14, /* DCORE1_TPC2 */
+ CPU_ID_TPC_QMAN_ARC9 = 15, /* DCORE1_TPC3 */
+ CPU_ID_TPC_QMAN_ARC10 = 16, /* DCORE1_TPC4 */
+ CPU_ID_TPC_QMAN_ARC11 = 17, /* DCORE1_TPC5 */
+ CPU_ID_TPC_QMAN_ARC12 = 18, /* DCORE2_TPC0 */
+ CPU_ID_TPC_QMAN_ARC13 = 19, /* DCORE2_TPC1 */
+ CPU_ID_TPC_QMAN_ARC14 = 20, /* DCORE2_TPC2 */
+ CPU_ID_TPC_QMAN_ARC15 = 21, /* DCORE2_TPC3 */
+ CPU_ID_TPC_QMAN_ARC16 = 22, /* DCORE2_TPC4 */
+ CPU_ID_TPC_QMAN_ARC17 = 23, /* DCORE2_TPC5 */
+ CPU_ID_TPC_QMAN_ARC18 = 24, /* DCORE3_TPC0 */
+ CPU_ID_TPC_QMAN_ARC19 = 25, /* DCORE3_TPC1 */
+ CPU_ID_TPC_QMAN_ARC20 = 26, /* DCORE3_TPC2 */
+ CPU_ID_TPC_QMAN_ARC21 = 27, /* DCORE3_TPC3 */
+ CPU_ID_TPC_QMAN_ARC22 = 28, /* DCORE3_TPC4 */
+ CPU_ID_TPC_QMAN_ARC23 = 29, /* DCORE3_TPC5 */
+ CPU_ID_TPC_QMAN_ARC24 = 30, /* DCORE0_TPC6 - Never present */
+
+ CPU_ID_MME_QMAN_ARC0 = 31, /* DCORE0_MME0 */
+ CPU_ID_MME_QMAN_ARC1 = 32, /* DCORE2_MME0 */
+
+ CPU_ID_EDMA_QMAN_ARC0 = 33, /* DCORE0_EDMA0 */
+ CPU_ID_EDMA_QMAN_ARC1 = 34, /* DCORE0_EDMA1 */
+ CPU_ID_EDMA_QMAN_ARC2 = 35, /* DCORE1_EDMA0 */
+ CPU_ID_EDMA_QMAN_ARC3 = 36, /* DCORE1_EDMA1 */
+ CPU_ID_EDMA_QMAN_ARC4 = 37, /* DCORE2_EDMA0 */
+ CPU_ID_EDMA_QMAN_ARC5 = 38, /* DCORE2_EDMA1 */
+ CPU_ID_EDMA_QMAN_ARC6 = 39, /* DCORE3_EDMA0 */
+ CPU_ID_EDMA_QMAN_ARC7 = 40, /* DCORE3_EDMA1 */
+
+ CPU_ID_PDMA_QMAN_ARC0 = 41, /* DCORE0_PDMA0 */
+ CPU_ID_PDMA_QMAN_ARC1 = 42, /* DCORE0_PDMA1 */
+
+ CPU_ID_ROT_QMAN_ARC0 = 43, /* ROT0 */
+ CPU_ID_ROT_QMAN_ARC1 = 44, /* ROT1 */
+
+ CPU_ID_NIC_QMAN_ARC0 = 45, /* NIC0_0 */
+ CPU_ID_NIC_QMAN_ARC1 = 46, /* NIC0_1 */
+ CPU_ID_NIC_QMAN_ARC2 = 47, /* NIC1_0 */
+ CPU_ID_NIC_QMAN_ARC3 = 48, /* NIC1_1 */
+ CPU_ID_NIC_QMAN_ARC4 = 49, /* NIC2_0 */
+ CPU_ID_NIC_QMAN_ARC5 = 50, /* NIC2_1 */
+ CPU_ID_NIC_QMAN_ARC6 = 51, /* NIC3_0 */
+ CPU_ID_NIC_QMAN_ARC7 = 52, /* NIC3_1 */
+ CPU_ID_NIC_QMAN_ARC8 = 53, /* NIC4_0 */
+ CPU_ID_NIC_QMAN_ARC9 = 54, /* NIC4_1 */
+ CPU_ID_NIC_QMAN_ARC10 = 55, /* NIC5_0 */
+ CPU_ID_NIC_QMAN_ARC11 = 56, /* NIC5_1 */
+ CPU_ID_NIC_QMAN_ARC12 = 57, /* NIC6_0 */
+ CPU_ID_NIC_QMAN_ARC13 = 58, /* NIC6_1 */
+ CPU_ID_NIC_QMAN_ARC14 = 59, /* NIC7_0 */
+ CPU_ID_NIC_QMAN_ARC15 = 60, /* NIC7_1 */
+ CPU_ID_NIC_QMAN_ARC16 = 61, /* NIC8_0 */
+ CPU_ID_NIC_QMAN_ARC17 = 62, /* NIC8_1 */
+ CPU_ID_NIC_QMAN_ARC18 = 63, /* NIC9_0 */
+ CPU_ID_NIC_QMAN_ARC19 = 64, /* NIC9_1 */
+ CPU_ID_NIC_QMAN_ARC20 = 65, /* NIC10_0 */
+ CPU_ID_NIC_QMAN_ARC21 = 66, /* NIC10_1 */
+ CPU_ID_NIC_QMAN_ARC22 = 67, /* NIC11_0 */
+ CPU_ID_NIC_QMAN_ARC23 = 68, /* NIC11_1 */
+
+ CPU_ID_MAX = 69,
+ CPU_ID_SCHED_MAX = 6,
+
+ CPU_ID_ALL = 0xFE,
+ CPU_ID_INVALID = 0xFF,
+};
+
+enum arc_regions_t {
+ ARC_REGION0_UNSED = 0,
+ /*
+ * Extension registers
+ * None
+ */
+ ARC_REGION1_SRAM = 1,
+ /*
+ * Extension registers
+ * AUX_SRAM_LSB_ADDR
+ * AUX_SRAM_MSB_ADDR
+ * ARC Address: 0x1000_0000
+ */
+ ARC_REGION2_CFG = 2,
+ /*
+ * Extension registers
+ * AUX_CFG_LSB_ADDR
+ * AUX_CFG_MSB_ADDR
+ * ARC Address: 0x2000_0000
+ */
+ ARC_REGION3_GENERAL = 3,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_0
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_0
+ * ARC Address: 0x3000_0000
+ */
+ ARC_REGION4_HBM0_FW = 4,
+ /*
+ * Extension registers
+ * AUX_HBM0_LSB_ADDR
+ * AUX_HBM0_MSB_ADDR
+ * AUX_HBM0_OFFSET
+ * ARC Address: 0x4000_0000
+ */
+ ARC_REGION5_HBM1_GC_DATA = 5,
+ /*
+ * Extension registers
+ * AUX_HBM1_LSB_ADDR
+ * AUX_HBM1_MSB_ADDR
+ * AUX_HBM1_OFFSET
+ * ARC Address: 0x5000_0000
+ */
+ ARC_REGION6_HBM2_GC_DATA = 6,
+ /*
+ * Extension registers
+ * AUX_HBM2_LSB_ADDR
+ * AUX_HBM2_MSB_ADDR
+ * AUX_HBM2_OFFSET
+ * ARC Address: 0x6000_0000
+ */
+ ARC_REGION7_HBM3_GC_DATA = 7,
+ /*
+ * Extension registers
+ * AUX_HBM3_LSB_ADDR
+ * AUX_HBM3_MSB_ADDR
+ * AUX_HBM3_OFFSET
+ * ARC Address: 0x7000_0000
+ */
+ ARC_REGION8_DCCM = 8,
+ /*
+ * Extension registers
+ * None
+ * ARC Address: 0x8000_0000
+ */
+ ARC_REGION9_PCIE = 9,
+ /*
+ * Extension registers
+ * AUX_PCIE_LSB_ADDR
+ * AUX_PCIE_MSB_ADDR
+ * ARC Address: 0x9000_0000
+ */
+ ARC_REGION10_GENERAL = 10,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_1
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_1
+ * ARC Address: 0xA000_0000
+ */
+ ARC_REGION11_GENERAL = 11,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_2
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_2
+ * ARC Address: 0xB000_0000
+ */
+ ARC_REGION12_GENERAL = 12,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_3
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_3
+ * ARC Address: 0xC000_0000
+ */
+ ARC_REGION13_GENERAL = 13,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_4
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_4
+ * ARC Address: 0xD000_0000
+ */
+ ARC_REGION14_GENERAL = 14,
+ /*
+ * Extension registers
+ * AUX_GENERAL_PURPOSE_LSB_ADDR_5
+ * AUX_GENERAL_PURPOSE_MSB_ADDR_5
+ * ARC Address: 0xE000_0000
+ */
+ ARC_REGION15_LBU = 15
+ /*
+ * Extension registers
+ * None
+ * ARC Address: 0xF000_0000
+ */
+};
+
+#endif /* __GAUDI2_ARC_COMMON_PACKETS_H__ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h
index 1974df13b5f9..1974df13b5f9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_acp_eng_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h
index fc2c52af6509..fc2c52af6509 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h
index 5345b5faa3a2..5345b5faa3a2 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h
index bde077eed285..bde077eed285 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h
index 491af75c12c3..491af75c12c3 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_arc0_dup_eng_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h
index 12d6a124a2e9..12d6a124a2e9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h
index 23f9d2df52a7..23f9d2df52a7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h
index bee4de0b28d6..bee4de0b28d6 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h
index b9f09e8199e6..b9f09e8199e6 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_ctx_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h
index d6dd2c066fa9..d6dd2c066fa9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_kdma_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h
index 5903dbacec80..5903dbacec80 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h
index e312cf810c0e..e312cf810c0e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/arc_farm_kdma_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h
index 9b3eceec9d5d..9b3eceec9d5d 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/cpu_if_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h
index 296ab832013f..296ab832013f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h
index e26f0d77c9dc..e26f0d77c9dc 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_dec0_cmd_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h
index 8de48939243b..8de48939243b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h
index f73e76c8f5bd..f73e76c8f5bd 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_ctx_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h
index d600f6bf70d8..d600f6bf70d8 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h
index 84f068e4c602..84f068e4c602 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h
index 0fc45300df81..0fc45300df81 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_arc_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h
index 88d2a133f129..88d2a133f129 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h
index 0b0a76a5b2a0..0b0a76a5b2a0 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h
index 102e2a65811c..102e2a65811c 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h
index 32d475b9ed11..32d475b9ed11 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h
index b608a634562f..b608a634562f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_core_ctx_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h
index c3a462f2a9ac..c3a462f2a9ac 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_edma1_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h
index df51eac10dd7..2965b6a3b423 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_masks.h
@@ -150,8 +150,7 @@
#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP1_PAGE_SIZE_SHIFT 16
#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP1_PAGE_SIZE_MASK 0xF0000
#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_SHIFT 20
-#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK \
-0x100000
+#define DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK 0x100000
/* DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG */
#define DCORE0_HMMU0_MMU_CORE_SEP_CACHE_RNG_CORE_SET_MASK_SHIFT 0
@@ -235,23 +234,19 @@
/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32 */
#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32_ILLEGAL_ADDR_63_32_SHIFT 0
-#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32_ILLEGAL_ADDR_63_32_MASK \
-0xFFFFFFFF
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_63_32_ILLEGAL_ADDR_63_32_MASK 0xFFFFFFFF
/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0 */
#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0_ILLEGAL_ADDR_31_0_SHIFT 0
-#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0_ILLEGAL_ADDR_31_0_MASK \
-0xFFFFFFFF
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_WRITE_31_0_ILLEGAL_ADDR_31_0_MASK 0xFFFFFFFF
/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32 */
#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32_ILLEGAL_ADDR_63_32_SHIFT 0
-#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32_ILLEGAL_ADDR_63_32_MASK \
-0xFFFFFFFF
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_63_32_ILLEGAL_ADDR_63_32_MASK 0xFFFFFFFF
/* DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0 */
#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0_ILLEGAL_ADDR_31_0_SHIFT 0
-#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0_ILLEGAL_ADDR_31_0_MASK \
-0xFFFFFFFF
+#define DCORE0_HMMU0_MMU_ILLEGAL_ADDR_READ_31_0_ILLEGAL_ADDR_31_0_MASK 0xFFFFFFFF
/* DCORE0_HMMU0_MMU_RAZWI_WRITE_VLD */
#define DCORE0_HMMU0_MMU_RAZWI_WRITE_VLD_R_SHIFT 0
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h
index 08ccd695ec89..08ccd695ec89 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_mmu_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h
index 192eba5f07bb..a311778b21e7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_masks.h
@@ -92,8 +92,7 @@
#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_SHIFT 20
#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK 0x100000
#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_SHIFT 21
-#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_MASK \
-0x7E00000
+#define DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LARGE_PAGE_INDICATION_BIT_MASK 0x7E00000
/* DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_63_32 */
#define DCORE0_HMMU0_STLB_LINK_LIST_LOOKUP_MASK_63_32_R_SHIFT 0
@@ -228,12 +227,8 @@
#define DCORE0_HMMU0_STLB_MEM_READ_ARPROT_R_MASK 0x7
/* DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION */
-#define \
-DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_SHIFT \
-0
-#define \
-DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK \
-0x1
+#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_SHIFT 0
+#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK 0x1
#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_SHIFT 1
#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_MASK 0x2
#define DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_SHIFT 2
@@ -261,53 +256,43 @@ DCORE0_HMMU0_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK \
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_0_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_1_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_2_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_3_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_4_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_5_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_6_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_7_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_8_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_SHIFT 0
-#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_MASK \
-0x1FF
+#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MATRIX_H3_9_ASID_POLY_MATRIX_H3_MASK 0x1FF
/* DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_10 */
#define DCORE0_HMMU0_STLB_ASID_SCR_POLY_MAT_H3_10_ASID_POLY_MATRIX_H3_SHIFT 0
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h
index 864a259f68e2..864a259f68e2 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_hmmu0_stlb_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h
index 07bed3ec740e..07bed3ec740e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_acc_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h
index c9043979fd69..c9043979fd69 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h
index 7d74aea4576f..7d74aea4576f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout0_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h
index f6f519eb5f6f..f6f519eb5f6f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h
index 0e0c056ade9b..0e0c056ade9b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_cout1_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h
index 34c6134a2f93..34c6134a2f93 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h
index 55065032f87c..55065032f87c 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in0_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h
index 6022b387eacf..6022b387eacf 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h
index f9c9b01f0d1a..f9c9b01f0d1a 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in1_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h
index d96119b8c435..d96119b8c435 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h
index c80d6817efe1..c80d6817efe1 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in2_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h
index 753b31dc1760..753b31dc1760 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h
index f68d043edcd9..f68d043edcd9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in3_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h
index a6dce326bd74..a6dce326bd74 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_master_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h
index 5ace0f43cc78..5ace0f43cc78 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_agu_in4_slave_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h
index b375393dfdc0..b375393dfdc0 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_base_addr_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h
index 7c22b9383f3c..fb53feb0a1a6 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_end_regs.h
@@ -20,8 +20,7 @@
*****************************************
*/
-#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_KERNEL_SIZE_MINUS_1 \
-0x40CB280
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_KERNEL_SIZE_MINUS_1 0x40CB280
#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_CONV_LOW 0x40CB284
@@ -29,8 +28,7 @@
#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_OUTER_LOOP 0x40CB28C
-#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_NUM_ITERATIONS_MINUS_1 \
-0x40CB290
+#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_NUM_ITERATIONS_MINUS_1 0x40CB290
#define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_END_SB_REPEAT 0x40CB294
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h
index d17c165faf8b..d17c165faf8b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_non_tensor_start_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h
index 7b77884e0024..7b77884e0024 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_a_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h
index a2a2ba454d6d..a2a2ba454d6d 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_b_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h
index 7ad7b197cf87..7ad7b197cf87 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_arch_tensor_cout_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h
index f699661d76aa..da0c94075e64 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_masks.h
@@ -78,8 +78,7 @@
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_MASTER_WAIT_SLAVE_FENCE_SHIFT 15
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_MASTER_WAIT_SLAVE_FENCE_MASK 0x8000
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SEND_FENCE2MASTER_SHIFT 16
-#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SEND_FENCE2MASTER_MASK \
-0x10000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SEND_FENCE2MASTER_MASK 0x10000
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SIGNAL_EN_SHIFT 17
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE_SIGNAL_EN_MASK 0x20000
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_SLV_ADR_SHIFT 18
@@ -87,11 +86,9 @@
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_SLV_ADR_SHIFT 19
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_SLV_ADR_MASK 0x80000
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_MSTR_ADR_PLUS4_SHIFT 20
-#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_MSTR_ADR_PLUS4_MASK \
-0x100000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE0_USE_MSTR_ADR_PLUS4_MASK 0x100000
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_MSTR_ADR_PLUS4_SHIFT 21
-#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_MSTR_ADR_PLUS4_MASK \
-0x200000
+#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_DW0_SLAVE1_USE_MSTR_ADR_PLUS4_MASK 0x200000
/* DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0 */
#define DCORE0_MME_CTRL_LO_ARCH_SYNC_OBJ_ADDR0_V_SHIFT 0
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h
index a51617a6f1fb..a51617a6f1fb 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_mme_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h
index 1b91c9c13132..1b91c9c13132 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_ctrl_lo_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h
index f702fe6e9365..f702fe6e9365 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_acp_eng_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h
index 917f8ab88373..917f8ab88373 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h
index c7ebaf73c51e..c7ebaf73c51e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h
index 61654e37335b..61654e37335b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_arc_dup_eng_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h
index 32089b8250ed..32089b8250ed 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h
index e168c1cc2a7d..e168c1cc2a7d 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_axuser_secured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h
index 543aba18ef68..543aba18ef68 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h
index c45583fcc2cf..c45583fcc2cf 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h
index 077ae5232790..077ae5232790 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h
index 211fa2c2c35b..211fa2c2c35b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_sbte0_mstr_if_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h
index 374a01d2b8d5..374a01d2b8d5 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_mme_wb0_mstr_if_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h
index 22f4d6c805c5..22f4d6c805c5 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h
index 3a7290b3a5c9..3a7290b3a5c9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_hbw_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h
index 5b52b88fee0f..5b52b88fee0f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_prvt_lbw_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h
index d9b3f5cd392b..d9b3f5cd392b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_hbw_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h
index 1bba940d3031..1bba940d3031 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_rtr0_mstr_if_rr_shrd_lbw_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h
index f21540501cdd..f21540501cdd 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h
index c3c4991e6660..c3c4991e6660 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_glbl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h
index 76b273a41255..76b273a41255 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h
index 0bddc734329f..0bddc734329f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_mstr_if_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h
index 3a5b27df0ab4..3a5b27df0ab4 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h
index 8f082a1c9b1b..8f082a1c9b1b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_sync_mngr_objs_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h
index 2d4a22680a23..2d4a22680a23 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h
index cdab39debd2c..cdab39debd2c 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h
index 4ef1c1edc5f7..4ef1c1edc5f7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_kernel_tensor_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h
index cdecbd0f9d84..cdecbd0f9d84 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h
index 4cd9e26a150f..4cd9e26a150f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h
index 8da278a3f3fe..8da278a3f3fe 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_sync_object_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h
index 2e4ff06e4858..2e4ff06e4858 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_qm_tensor_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h
index 4d48f0c6880b..4d48f0c6880b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h
index 76ab8a1a7f31..76ab8a1a7f31 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_cfg_special_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h
index f07da4a24f06..f07da4a24f06 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_busmon_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h
index aee9cbc78c3d..aee9cbc78c3d 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_etf_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h
index dee670b666ee..dee670b666ee 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_funnel_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h
index 580ae57476bd..580ae57476bd 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_spmu_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h
index 91686c563fe5..91686c563fe5 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_eml_stm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h
index e007dabc5382..e007dabc5382 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_arc_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h
index 149b85f5f045..149b85f5f045 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h
index d4aad1875ad6..d4aad1875ad6 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h
index cca8683cbca1..cca8683cbca1 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_tpc0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h
index e68667cc795a..e68667cc795a 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_dec_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
index f7ffdcbd1a76..f7ffdcbd1a76 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
index 4c1bb5306cba..4c1bb5306cba 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
index e413905ffe25..e413905ffe25 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
index bce75ac6e279..bce75ac6e279 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h
index 68dd98459c86..1c02f3dfdb6e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_masks.h
@@ -106,8 +106,7 @@
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWBURST_VIOL_SHIFT 2
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWBURST_VIOL_MASK 0x4
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_SHIFT 3
-#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_MASK \
-0x8
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWADDR_SIZE_ALIGN_VIOL_MASK 0x8
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_SHIFT 4
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_AWSIZE_VIOL_MASK 0x10
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARLEN_GT_31_SHIFT 5
@@ -117,8 +116,7 @@
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_SHIFT 7
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_MASK 0x80
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_SHIFT 8
-#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK \
-0x100
+#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK 0x100
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_SHIFT 9
#define DCORE0_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_MASK 0x200
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h
index d2844307a6bf..d2844307a6bf 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_brdg_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h
index 89b522b12998..89b522b12998 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore0_vdec0_ctrl_special_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h
index 622613dc76fb..622613dc76fb 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_mme_ctrl_lo_regs.h
diff --git a/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_sync_mngr_glbl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_sync_mngr_glbl_regs.h
new file mode 100644
index 000000000000..3d3802755814
--- /dev/null
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore1_sync_mngr_glbl_regs.h
@@ -0,0 +1,1203 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2016-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+/************************************
+ ** This is an auto-generated file **
+ ** DO NOT EDIT BELOW **
+ ************************************/
+
+#ifndef ASIC_REG_DCORE1_SYNC_MNGR_GLBL_REGS_H_
+#define ASIC_REG_DCORE1_SYNC_MNGR_GLBL_REGS_H_
+
+/*
+ *****************************************
+ * DCORE1_SYNC_MNGR_GLBL
+ * (Prototype: SOB_GLBL)
+ *****************************************
+ */
+
+#define mmDCORE1_SYNC_MNGR_GLBL_SM_SEI_MASK 0x431E000
+
+#define mmDCORE1_SYNC_MNGR_GLBL_SM_SEI_CAUSE 0x431E004
+
+#define mmDCORE1_SYNC_MNGR_GLBL_L2H_CPMR_L 0x431E008
+
+#define mmDCORE1_SYNC_MNGR_GLBL_L2H_CPMR_H 0x431E00C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_L2H_MASK_L 0x431E020
+
+#define mmDCORE1_SYNC_MNGR_GLBL_L2H_MASK_H 0x431E024
+
+#define mmDCORE1_SYNC_MNGR_GLBL_ASID_SEC 0x431E030
+
+#define mmDCORE1_SYNC_MNGR_GLBL_ASID_PRIV_ONLY 0x431E034
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DELAY 0x431E038
+
+#define mmDCORE1_SYNC_MNGR_GLBL_PI_SIZE 0x431E03C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_SOB_ONLY 0x431E040
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INTR 0x431E044
+
+#define mmDCORE1_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV 0x431E048
+
+#define mmDCORE1_SYNC_MNGR_GLBL_PI_INC_MODE_SIZE 0x431E04C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 0x431E050
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_1 0x431E054
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_2 0x431E058
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_3 0x431E05C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_4 0x431E060
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_5 0x431E064
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_6 0x431E068
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_7 0x431E06C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_8 0x431E070
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_9 0x431E074
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_10 0x431E078
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_11 0x431E07C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_12 0x431E080
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_13 0x431E084
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_14 0x431E088
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_15 0x431E08C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_16 0x431E090
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_17 0x431E094
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_18 0x431E098
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_19 0x431E09C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_20 0x431E0A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_21 0x431E0A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_22 0x431E0A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_23 0x431E0AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_24 0x431E0B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_25 0x431E0B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_26 0x431E0B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_27 0x431E0BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_28 0x431E0C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_29 0x431E0C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_30 0x431E0C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_31 0x431E0CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_32 0x431E0D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_33 0x431E0D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_34 0x431E0D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_35 0x431E0DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_36 0x431E0E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_37 0x431E0E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_38 0x431E0E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_39 0x431E0EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_40 0x431E0F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_41 0x431E0F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_42 0x431E0F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_43 0x431E0FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_44 0x431E100
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_45 0x431E104
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_46 0x431E108
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_47 0x431E10C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_48 0x431E110
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_49 0x431E114
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_50 0x431E118
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_51 0x431E11C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_52 0x431E120
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_53 0x431E124
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_54 0x431E128
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_55 0x431E12C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_56 0x431E130
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_57 0x431E134
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_58 0x431E138
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_59 0x431E13C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_60 0x431E140
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_61 0x431E144
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_62 0x431E148
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_63 0x431E14C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 0x431E150
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_1 0x431E154
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_2 0x431E158
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_3 0x431E15C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_4 0x431E160
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_5 0x431E164
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_6 0x431E168
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_7 0x431E16C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_8 0x431E170
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_9 0x431E174
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_10 0x431E178
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_11 0x431E17C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_12 0x431E180
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_13 0x431E184
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_14 0x431E188
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_15 0x431E18C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_16 0x431E190
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_17 0x431E194
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_18 0x431E198
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_19 0x431E19C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_20 0x431E1A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_21 0x431E1A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_22 0x431E1A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_23 0x431E1AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_24 0x431E1B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_25 0x431E1B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_26 0x431E1B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_27 0x431E1BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_28 0x431E1C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_29 0x431E1C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_30 0x431E1C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_31 0x431E1CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_32 0x431E1D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_33 0x431E1D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_34 0x431E1D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_35 0x431E1DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_36 0x431E1E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_37 0x431E1E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_38 0x431E1E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_39 0x431E1EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_40 0x431E1F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_41 0x431E1F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_42 0x431E1F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_43 0x431E1FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_44 0x431E200
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_45 0x431E204
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_46 0x431E208
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_47 0x431E20C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_48 0x431E210
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_49 0x431E214
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_50 0x431E218
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_51 0x431E21C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_52 0x431E220
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_53 0x431E224
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_54 0x431E228
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_55 0x431E22C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_56 0x431E230
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_57 0x431E234
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_58 0x431E238
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_59 0x431E23C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_60 0x431E240
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_61 0x431E244
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_62 0x431E248
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_63 0x431E24C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 0x431E250
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_1 0x431E254
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_2 0x431E258
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_3 0x431E25C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_4 0x431E260
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_5 0x431E264
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_6 0x431E268
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_7 0x431E26C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_8 0x431E270
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_9 0x431E274
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_10 0x431E278
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_11 0x431E27C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_12 0x431E280
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_13 0x431E284
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_14 0x431E288
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_15 0x431E28C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_16 0x431E290
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_17 0x431E294
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_18 0x431E298
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_19 0x431E29C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_20 0x431E2A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_21 0x431E2A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_22 0x431E2A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_23 0x431E2AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_24 0x431E2B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_25 0x431E2B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_26 0x431E2B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_27 0x431E2BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_28 0x431E2C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_29 0x431E2C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_30 0x431E2C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_31 0x431E2CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_32 0x431E2D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_33 0x431E2D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_34 0x431E2D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_35 0x431E2DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_36 0x431E2E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_37 0x431E2E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_38 0x431E2E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_39 0x431E2EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_40 0x431E2F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_41 0x431E2F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_42 0x431E2F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_43 0x431E2FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_44 0x431E300
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_45 0x431E304
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_46 0x431E308
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_47 0x431E30C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_48 0x431E310
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_49 0x431E314
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_50 0x431E318
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_51 0x431E31C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_52 0x431E320
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_53 0x431E324
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_54 0x431E328
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_55 0x431E32C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_56 0x431E330
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_57 0x431E334
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_58 0x431E338
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_59 0x431E33C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_60 0x431E340
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_61 0x431E344
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_62 0x431E348
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_63 0x431E34C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_0 0x431E350
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_1 0x431E354
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_2 0x431E358
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_3 0x431E35C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_4 0x431E360
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_5 0x431E364
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_6 0x431E368
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_7 0x431E36C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_8 0x431E370
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_9 0x431E374
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_10 0x431E378
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_11 0x431E37C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_12 0x431E380
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_13 0x431E384
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_14 0x431E388
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_15 0x431E38C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_16 0x431E390
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_17 0x431E394
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_18 0x431E398
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_19 0x431E39C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_20 0x431E3A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_21 0x431E3A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_22 0x431E3A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_23 0x431E3AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_24 0x431E3B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_25 0x431E3B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_26 0x431E3B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_27 0x431E3BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_28 0x431E3C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_29 0x431E3C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_30 0x431E3C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_31 0x431E3CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_32 0x431E3D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_33 0x431E3D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_34 0x431E3D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_35 0x431E3DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_36 0x431E3E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_37 0x431E3E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_38 0x431E3E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_39 0x431E3EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_40 0x431E3F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_41 0x431E3F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_42 0x431E3F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_43 0x431E3FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_44 0x431E400
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_45 0x431E404
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_46 0x431E408
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_47 0x431E40C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_48 0x431E410
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_49 0x431E414
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_50 0x431E418
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_51 0x431E41C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_52 0x431E420
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_53 0x431E424
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_54 0x431E428
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_55 0x431E42C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_56 0x431E430
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_57 0x431E434
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_58 0x431E438
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_59 0x431E43C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_60 0x431E440
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_61 0x431E444
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_62 0x431E448
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_PI_63 0x431E44C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_0 0x431E450
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_1 0x431E454
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_2 0x431E458
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_3 0x431E45C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_4 0x431E460
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_5 0x431E464
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_6 0x431E468
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_7 0x431E46C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_8 0x431E470
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_9 0x431E474
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_10 0x431E478
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_11 0x431E47C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_12 0x431E480
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_13 0x431E484
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_14 0x431E488
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_15 0x431E48C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_16 0x431E490
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_17 0x431E494
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_18 0x431E498
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_19 0x431E49C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_20 0x431E4A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_21 0x431E4A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_22 0x431E4A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_23 0x431E4AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_24 0x431E4B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_25 0x431E4B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_26 0x431E4B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_27 0x431E4BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_28 0x431E4C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_29 0x431E4C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_30 0x431E4C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_31 0x431E4CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_32 0x431E4D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_33 0x431E4D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_34 0x431E4D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_35 0x431E4DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_36 0x431E4E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_37 0x431E4E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_38 0x431E4E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_39 0x431E4EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_40 0x431E4F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_41 0x431E4F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_42 0x431E4F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_43 0x431E4FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_44 0x431E500
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_45 0x431E504
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_46 0x431E508
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_47 0x431E50C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_48 0x431E510
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_49 0x431E514
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_50 0x431E518
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_51 0x431E51C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_52 0x431E520
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_53 0x431E524
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_54 0x431E528
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_55 0x431E52C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_56 0x431E530
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_57 0x431E534
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_58 0x431E538
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_59 0x431E53C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_60 0x431E540
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_61 0x431E544
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_62 0x431E548
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_SEC_63 0x431E54C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_0 0x431E550
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_1 0x431E554
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_2 0x431E558
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_3 0x431E55C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_4 0x431E560
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_5 0x431E564
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_6 0x431E568
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_7 0x431E56C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_8 0x431E570
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_9 0x431E574
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_10 0x431E578
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_11 0x431E57C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_12 0x431E580
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_13 0x431E584
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_14 0x431E588
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_15 0x431E58C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_16 0x431E590
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_17 0x431E594
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_18 0x431E598
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_19 0x431E59C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_20 0x431E5A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_21 0x431E5A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_22 0x431E5A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_23 0x431E5AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_24 0x431E5B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_25 0x431E5B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_26 0x431E5B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_27 0x431E5BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_28 0x431E5C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_29 0x431E5C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_30 0x431E5C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_31 0x431E5CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_32 0x431E5D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_33 0x431E5D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_34 0x431E5D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_35 0x431E5DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_36 0x431E5E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_37 0x431E5E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_38 0x431E5E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_39 0x431E5EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_40 0x431E5F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_41 0x431E5F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_42 0x431E5F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_43 0x431E5FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_44 0x431E600
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_45 0x431E604
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_46 0x431E608
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_47 0x431E60C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_48 0x431E610
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_49 0x431E614
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_50 0x431E618
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_51 0x431E61C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_52 0x431E620
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_53 0x431E624
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_54 0x431E628
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_55 0x431E62C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_56 0x431E630
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_57 0x431E634
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_58 0x431E638
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_59 0x431E63C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_60 0x431E640
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_61 0x431E644
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_62 0x431E648
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_L_63 0x431E64C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_0 0x431E650
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_1 0x431E654
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_2 0x431E658
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_3 0x431E65C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_4 0x431E660
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_5 0x431E664
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_6 0x431E668
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_7 0x431E66C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_8 0x431E670
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_9 0x431E674
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_10 0x431E678
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_11 0x431E67C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_12 0x431E680
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_13 0x431E684
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_14 0x431E688
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_15 0x431E68C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_16 0x431E690
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_17 0x431E694
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_18 0x431E698
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_19 0x431E69C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_20 0x431E6A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_21 0x431E6A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_22 0x431E6A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_23 0x431E6AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_24 0x431E6B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_25 0x431E6B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_26 0x431E6B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_27 0x431E6BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_28 0x431E6C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_29 0x431E6C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_30 0x431E6C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_31 0x431E6CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_32 0x431E6D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_33 0x431E6D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_34 0x431E6D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_35 0x431E6DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_36 0x431E6E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_37 0x431E6E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_38 0x431E6E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_39 0x431E6EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_40 0x431E6F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_41 0x431E6F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_42 0x431E6F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_43 0x431E6FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_44 0x431E700
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_45 0x431E704
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_46 0x431E708
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_47 0x431E70C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_48 0x431E710
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_49 0x431E714
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_50 0x431E718
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_51 0x431E71C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_52 0x431E720
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_53 0x431E724
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_54 0x431E728
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_55 0x431E72C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_56 0x431E730
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_57 0x431E734
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_58 0x431E738
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_59 0x431E73C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_60 0x431E740
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_61 0x431E744
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_62 0x431E748
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_ADDR_H_63 0x431E74C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_0 0x431E750
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_1 0x431E754
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_2 0x431E758
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_3 0x431E75C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_4 0x431E760
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_5 0x431E764
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_6 0x431E768
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_7 0x431E76C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_8 0x431E770
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_9 0x431E774
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_10 0x431E778
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_11 0x431E77C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_12 0x431E780
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_13 0x431E784
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_14 0x431E788
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_15 0x431E78C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_16 0x431E790
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_17 0x431E794
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_18 0x431E798
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_19 0x431E79C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_20 0x431E7A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_21 0x431E7A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_22 0x431E7A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_23 0x431E7AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_24 0x431E7B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_25 0x431E7B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_26 0x431E7B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_27 0x431E7BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_28 0x431E7C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_29 0x431E7C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_30 0x431E7C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_31 0x431E7CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_32 0x431E7D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_33 0x431E7D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_34 0x431E7D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_35 0x431E7DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_36 0x431E7E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_37 0x431E7E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_38 0x431E7E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_39 0x431E7EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_40 0x431E7F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_41 0x431E7F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_42 0x431E7F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_43 0x431E7FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_44 0x431E800
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_45 0x431E804
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_46 0x431E808
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_47 0x431E80C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_48 0x431E810
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_49 0x431E814
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_50 0x431E818
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_51 0x431E81C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_52 0x431E820
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_53 0x431E824
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_54 0x431E828
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_55 0x431E82C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_56 0x431E830
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_57 0x431E834
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_58 0x431E838
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_59 0x431E83C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_60 0x431E840
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_61 0x431E844
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_62 0x431E848
+
+#define mmDCORE1_SYNC_MNGR_GLBL_LBW_DATA_63 0x431E84C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_0 0x431E850
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_1 0x431E854
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_2 0x431E858
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_3 0x431E85C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_4 0x431E860
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_5 0x431E864
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_6 0x431E868
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_7 0x431E86C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_8 0x431E870
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_9 0x431E874
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_10 0x431E878
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_11 0x431E87C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_12 0x431E880
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_13 0x431E884
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_14 0x431E888
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_15 0x431E88C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_16 0x431E890
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_17 0x431E894
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_18 0x431E898
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_19 0x431E89C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_20 0x431E8A0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_21 0x431E8A4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_22 0x431E8A8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_23 0x431E8AC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_24 0x431E8B0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_25 0x431E8B4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_26 0x431E8B8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_27 0x431E8BC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_28 0x431E8C0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_29 0x431E8C4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_30 0x431E8C8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_31 0x431E8CC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_32 0x431E8D0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_33 0x431E8D4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_34 0x431E8D8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_35 0x431E8DC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_36 0x431E8E0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_37 0x431E8E4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_38 0x431E8E8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_39 0x431E8EC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_40 0x431E8F0
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_41 0x431E8F4
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_42 0x431E8F8
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_43 0x431E8FC
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_44 0x431E900
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_45 0x431E904
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_46 0x431E908
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_47 0x431E90C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_48 0x431E910
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_49 0x431E914
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_50 0x431E918
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_51 0x431E91C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_52 0x431E920
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_53 0x431E924
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_54 0x431E928
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_55 0x431E92C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_56 0x431E930
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_57 0x431E934
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_58 0x431E938
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_59 0x431E93C
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_60 0x431E940
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_61 0x431E944
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_62 0x431E948
+
+#define mmDCORE1_SYNC_MNGR_GLBL_CQ_INC_MODE_63 0x431E94C
+
+#endif /* ASIC_REG_DCORE1_SYNC_MNGR_GLBL_REGS_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h
index b06469f5a279..b06469f5a279 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/dcore3_mme_ctrl_lo_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h
index 3caee4515ad6..3caee4515ad6 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_blocks_linux_driver.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
index 6aa1b1412462..0bf3092bfeea 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/gaudi2_regs.h
@@ -31,6 +31,7 @@
#include "dcore0_sync_mngr_objs_regs.h"
#include "dcore0_sync_mngr_glbl_regs.h"
#include "dcore0_sync_mngr_mstr_if_axuser_regs.h"
+#include "dcore1_sync_mngr_glbl_regs.h"
#include "pdma0_qm_arc_aux_regs.h"
#include "pdma0_core_ctx_regs.h"
#include "pdma0_core_regs.h"
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h
index d49906a68511..d49906a68511 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h
index acb19c1cd4bd..acb19c1cd4bd 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h
index 5f380a44dd21..5f380a44dd21 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qm_arc_aux0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h
index eaee29da4244..eaee29da4244 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_qpc0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h
index 2153319a50a0..2153319a50a0 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_completion_queue_ci_1_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h
index de8eac74c2fb..de8eac74c2fb 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/nic0_umr0_0_unsecure_doorbell0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h
index 44182fc18234..44182fc18234 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h
index cc5842ec6ceb..2ee79d8e62d0 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dbi_regs.h
@@ -48,8 +48,7 @@
#define mmPCIE_DBI_PCI_CAP_PTR_REG 0x4C02034
-#define mmPCIE_DBI_MAX_LATENCY_MIN_GRANT_INTERRUPT_PIN_INTERRUPT_LINE_REG \
-0x4C0203C
+#define mmPCIE_DBI_MAX_LATENCY_MIN_GRANT_INTERRUPT_PIN_INTERRUPT_LINE_REG 0x4C0203C
#define mmPCIE_DBI_CAP_ID_NXT_PTR_REG 0x4C02040
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h
index 2b5af010c7a5..2b5af010c7a5 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h
index dc7d3f6a4b50..dc7d3f6a4b50 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_dec0_cmd_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h
index 242c6525bd71..242c6525bd71 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_dec_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
index 98d035463561..98d035463561 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_abnrm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
index 33ef37619417..33ef37619417 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_l2c_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
index c4587d5d6406..c4587d5d6406 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_nrm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
index 35349ad375d0..35349ad375d0 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_axuser_msix_vcd_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h
index d29837883216..7a96aebf08b3 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_masks.h
@@ -116,8 +116,7 @@
#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_SHIFT 7
#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARBURST_VIOL_MASK 0x80
#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_SHIFT 8
-#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK \
-0x100
+#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARADDR_SIZE_ALIGN_VIOL_MASK 0x100
#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_SHIFT 9
#define PCIE_VDEC0_BRDG_CTRL_HBW_AXI_VIOL_CAUSE_ARSIZE_VIOL_MASK 0x200
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h
index c7badd212f2b..c7badd212f2b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_brdg_ctrl_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h
index 491b0cd935af..491b0cd935af 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_vdec0_ctrl_special_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h
index a09422f2f281..a09422f2f281 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h
index 46558e7a7f63..46558e7a7f63 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pcie_wrap_special_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h
index bacbe4c6fc3c..bacbe4c6fc3c 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h
index 02b57f07cfaf..02b57f07cfaf 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_ctx_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h
index 909cda03c246..909cda03c246 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h
index 84079b5077e2..84079b5077e2 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h
index 15d257e3830e..15d257e3830e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_core_special_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h
index 9b1cb609d134..9b1cb609d134 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_arc_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h
index d2e0756ec5f2..d2e0756ec5f2 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h
index 8bf0516b83f7..8bf0516b83f7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_axuser_secured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h
index 96c0ce176e73..96c0ce176e73 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h
index b79cae8f5571..b79cae8f5571 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h
index 77d803c938d4..77d803c938d4 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h
index ccc6dfd22dd7..ccc6dfd22dd7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_core_ctx_axuser_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h
index 5fd72d050fff..5fd72d050fff 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pdma1_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h
index 0276506ea523..b4f32632cd36 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_masks.h
@@ -228,8 +228,7 @@
/* PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION */
#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_SHIFT 0
-#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK \
-0x1
+#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_RANGE_INVALIDATION_ENABLE_MASK 0x1
#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_SHIFT 1
#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_EN_MASK 0x2
#define PMMU_HBW_STLB_RANGE_CACHE_INVALIDATION_INVALIDATION_ASID_SHIFT 2
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h
index 87c66c08e24a..87c66c08e24a 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_hbw_stlb_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h
index dd12793734b4..dd12793734b4 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/pmmu_pif_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h
index 42e67c1059c4..42e67c1059c4 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h
index 980a3e0054c5..980a3e0054c5 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_etr_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h
index 9be3d656da3a..85a81e2cb546 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_masks.h
@@ -1306,11 +1306,9 @@
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC2_SHIFT 12
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC2_MASK 0x3F000
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC3_SHIFT 18
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC3_MASK \
-0xFC0000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_PC_LOC3_MASK 0xFC0000
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_HBM_LOC0_SHIFT 24
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_HBM_LOC0_MASK \
-0x3F000000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL0_ADDR_EXTMEM_HBM_LOC0_MASK 0x3F000000
/* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1 */
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_ADDR_EXTMEM_HBM_LOC1_SHIFT 0
@@ -1322,24 +1320,17 @@
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_CNT_EN_SHIFT 13
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_CNT_EN_MASK 0x2000
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_EN_SHIFT 14
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_EN_MASK \
-0x4000
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_SHIFT \
-16
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_MASK \
-0xFF0000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_EN_MASK 0x4000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_SHIFT 16
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_NON_LIN_HBM_ALL_ADDR_MASK_MASK 0xFF0000
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_HBM_NUM_SHIFT 24
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL1_HBM_NUM_MASK 0x7000000
/* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2 */
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_SHIFT \
-0
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_MASK \
-0xFFFF
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_SHIFT \
-16
-#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_MASK \
-0xFFFF0000
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_SHIFT 0
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_HBM_CNT_MASK_MASK 0xFFFF
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_SHIFT 16
+#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL2_SCRAM_NONLIN_EXTM_PC_MASK_MASK 0xFFFF0000
/* PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3 */
#define PSOC_GLOBAL_CONF_AXI_DRAIN_NL_SRC_CTRL3_HBM_MAP0_SHIFT 0
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h
index 48980fa8e37b..48980fa8e37b 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_global_conf_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h
index e0cf35226e7f..e0cf35226e7f 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h
index 6a89624f01d1..6a89624f01d1 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_reset_conf_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h
index 699becc28887..699becc28887 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/psoc_timestamp_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h
index 79320320ebcb..79320320ebcb 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_desc_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_masks.h
index f2e739ede3d9..f2e739ede3d9 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_masks.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_masks.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h
index e83daa33d737..e83daa33d737 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_arc_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h
index 8e040a2ef1c1..8e040a2ef1c1 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_axuser_nonsecured_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h
index 077ae2347a3d..077ae2347a3d 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_cgm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h
index de3c85510af2..de3c85510af2 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_regs.h
index 7d85dc5559da..7d85dc5559da 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/rot0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/rot0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h
index e8aebd7f5f85..e8aebd7f5f85 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_edge_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h b/drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h
index 3d39d1a94851..3d39d1a94851 100644
--- a/drivers/misc/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h
+++ b/drivers/accel/habanalabs/include/gaudi2/asic_reg/xbar_mid_0_regs.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2.h
index 5b4f9e108798..5b4f9e108798 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h
index 305b576222e6..50852cc80373 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_events.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_events.h
@@ -958,6 +958,7 @@ enum gaudi2_async_event_id {
GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1 = 1318,
GAUDI2_EVENT_ARC_DCCM_FULL = 1319,
GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED = 1320,
+ GAUDI2_EVENT_DEV_RESET_REQ = 1321,
GAUDI2_EVENT_SIZE,
};
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
index d510cb10c883..82be01bea98e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_async_ids_map_extended.h
@@ -2665,6 +2665,8 @@ static struct gaudi2_async_events_ids_map gaudi2_irq_map_table[] = {
.msg = 1, .reset = 0, .name = "ARC_DCCM_FULL" },
{ .fc_id = 1320, .cpu_id = 626, .valid = 1,
.msg = 1, .reset = 1, .name = "FP32_NOT_SUPPORTED" },
+ { .fc_id = 1321, .cpu_id = 627, .valid = 1,
+ .msg = 1, .reset = 1, .name = "DEV_RESET_REQ" },
};
#endif /* __GAUDI2_ASYNC_IDS_MAP_EVENTS_EXT_H_ */
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_coresight.h
index 14f09d7758c7..14f09d7758c7 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_coresight.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_coresight.h
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h
index e4a7d5725096..82f3ca2a3966 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_fw_if.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_fw_if.h
@@ -20,22 +20,25 @@
#define GAUDI2_NUM_MME 4
+#define NUM_OF_GPIOS_PER_PORT 16
+#define GAUDI2_WD_GPIO (62 % NUM_OF_GPIOS_PER_PORT)
+
#define GAUDI2_ARCPID_TX_MB_SIZE 0x1000
#define GAUDI2_ARCPID_RX_MB_SIZE 0x400
#define GAUDI2_ARM_TX_MB_SIZE 0x400
#define GAUDI2_ARM_RX_MB_SIZE 0x1800
#define GAUDI2_DCCM_BASE_ADDR 0x27020000
-#define GAUDI2_ARCPID_TX_MB_ADDR GAUDI2_DCCM_BASE_ADDR
-
-#define GAUDI2_ARCPID_RX_MB_ADDR (GAUDI2_ARCPID_TX_MB_ADDR + \
- GAUDI2_ARCPID_TX_MB_SIZE)
#define GAUDI2_ARM_TX_MB_ADDR GAUDI2_MAILBOX_BASE_ADDR
#define GAUDI2_ARM_RX_MB_ADDR (GAUDI2_ARM_TX_MB_ADDR + \
GAUDI2_ARM_TX_MB_SIZE)
+#define GAUDI2_ARCPID_TX_MB_ADDR (GAUDI2_ARM_RX_MB_ADDR + GAUDI2_ARM_RX_MB_SIZE)
+
+#define GAUDI2_ARCPID_RX_MB_ADDR (GAUDI2_ARCPID_TX_MB_ADDR + GAUDI2_ARCPID_TX_MB_SIZE)
+
#define GAUDI2_ARM_TX_MB_OFFSET (GAUDI2_ARM_TX_MB_ADDR - \
GAUDI2_SP_SRAM_BASE_ADDR)
@@ -58,7 +61,9 @@ struct gaudi2_cold_rst_data {
u32 spsram_init_done : 1;
u32 fake_security_enable : 1;
u32 fake_sig_validation_en : 1;
- u32 reserved : 26;
+ u32 bist_skip_enable : 1;
+ u32 bist_need_iatu_config : 1;
+ u32 reserved : 24;
};
__le32 data;
};
@@ -77,10 +82,10 @@ enum gaudi2_rst_src {
};
struct gaudi2_redundancy_ctx {
- int redundant_hbm;
- int redundant_edma;
- int redundant_tpc;
- int redundant_vdec;
+ __le32 redundant_hbm;
+ __le32 redundant_edma;
+ __le32 redundant_tpc;
+ __le32 redundant_vdec;
__le64 hbm_mask;
__le64 edma_mask;
__le64 tpc_mask;
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_packets.h
index 8bf90fc18bf5..a812f8503f90 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_packets.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_packets.h
@@ -59,7 +59,7 @@ struct gaudi2_packet {
/* The rest of the packet data follows. Use the corresponding
* packet_XXX struct to deference the data, based on packet type
*/
- u8 contents[0];
+ u8 contents[];
};
struct packet_nop {
@@ -80,7 +80,7 @@ struct packet_wreg32 {
struct packet_wreg_bulk {
__le32 size64;
__le32 ctl;
- __le64 values[0]; /* data starts here */
+ __le64 values[]; /* data starts here */
};
struct packet_msg_long {
diff --git a/drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h
index ae7feb388f63..f3eaeb6d9b7e 100644
--- a/drivers/misc/habanalabs/include/gaudi2/gaudi2_reg_map.h
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_reg_map.h
@@ -24,14 +24,14 @@
#define mmGIC_HOST_HALT_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_10
#define mmGIC_HOST_INTS_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_11
#define mmGIC_HOST_SOFT_RST_IRQ_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_12
-#define mmEEPROM_COPY_LOCATION_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_13
#define mmCPU_RST_STATUS_TO_HOST mmPSOC_GLOBAL_CONF_SCRATCHPAD_14
-#define mmENGINE_ARC_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_15
-#define mmPID_CFG_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_18
/*
- * TODO: mmGIC_RAZWI_STATUS_REG is temporary
- * macro and to be removed after GAUDI2 PO
+ * Single scratchpad register used for all ARCs to notify dccm queue full event to FW.
+ * So a new event would overwrite any unhandled previous event. In other words, incase
+ * of multiple events before previous ones are handled, last one would be considered.
*/
+#define mmENGINE_ARC_IRQ_CTRL_POLL_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_15
+#define mmPID_CFG_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_18
#define mmGIC_RAZWI_STATUS_REG mmPSOC_GLOBAL_CONF_SCRATCHPAD_19
#define mmCPU_BOOT_DEV_STS0 mmPSOC_GLOBAL_CONF_SCRATCHPAD_20
#define mmCPU_BOOT_DEV_STS1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_21
@@ -40,11 +40,10 @@
#define mmCPU_BOOT_ERR1 mmPSOC_GLOBAL_CONF_SCRATCHPAD_25
#define mmUPD_STS mmPSOC_GLOBAL_CONF_SCRATCHPAD_26
#define mmUPD_CMD mmPSOC_GLOBAL_CONF_SCRATCHPAD_27
-#define mmUBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_29
+#define mmPPBOOT_VER_OFFSET mmPSOC_GLOBAL_CONF_SCRATCHPAD_28
#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
#define mmRST_SRC mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_0
-#define mmPREBOOT_PCIE_EN mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1
#define mmCOLD_RST_DATA mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_2
#define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3
#define mmPID_CMD_REQ_REG mmPSOC_PID_PID_CMD_0
@@ -55,5 +54,8 @@
#define mmPID_CMD_TELEMETRY_REG_0_HI mmPSOC_PID_PID_CMD_5
#define mmPID_CMD_TELEMETRY_REG_1 mmPSOC_PID_PID_CMD_6
#define mmPID_CMD_TELEMETRY_REG_1_HI mmPSOC_PID_PID_CMD_7
+#define mmWD_GPIO_OUTSET_REG mmPSOC_GPIO3_OUTENSET
+#define mmWD_GPIO_DATAOUT_REG mmPSOC_GPIO3_DATAOUT
+#define mmSTM_PROFILER_SPE_REG mmPSOC_STM_STMSPER
#endif /* GAUDI2_REG_MAP_H_ */
diff --git a/drivers/accel/habanalabs/include/gaudi2/gaudi2_special_blocks.h b/drivers/accel/habanalabs/include/gaudi2/gaudi2_special_blocks.h
new file mode 100644
index 000000000000..a55668f92dd1
--- /dev/null
+++ b/drivers/accel/habanalabs/include/gaudi2/gaudi2_special_blocks.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+/*
+ * This file was generated automatically.
+ * DON'T EDIT THIS FILE.
+ */
+
+#ifndef GAUDI2_SPECIAL_BLOCKS_H
+#define GAUDI2_SPECIAL_BLOCKS_H
+
+#define GAUDI2_SPECIAL_BLOCKS { \
+ { GAUDI2_BLOCK_TYPE_TPC, 0xfc008000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_TPC, 0xfc00a000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_TPC, 0xfc00b000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_TPC, 0xfc00c000, 4, 6, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HMMU, 0xfc080000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HMMU, 0xfc081000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HMMU, 0xfc083000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HMMU, 0xfc084000, 4, 4, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0c8000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0c9000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0ca000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0cb000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0cc000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_EU_BIST, 0xfc0cd000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0ce000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0cf000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0d0000, 4, 5, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0d1000, 4, 5, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0f8000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_MME, 0xfc0f9000, 4, 2, 0, 0x200000, 0x1000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_SYNC_MNGR, 0xfc11e000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_SYNC_MNGR, 0xfc11f000, 4, 0, 0, 0x200000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HIF, 0xfc120000, 4, 4, 0, 0x200000, 0x4000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc140000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc141000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc142000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc143000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc144000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc145000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_SRAM, 0xfc180000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfc181000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_SRAM, 0xfc182000, 4, 8, 0, 0x200000, 0x8000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1c8000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1ca000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1cb000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_EDMA, 0xfc1cc000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfc1e3000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfc1e4000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfc1e5000, 4, 2, 0, 0x200000, 0x10000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc01000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc04000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc07000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc10000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc14000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc15000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PCIE, 0xfcc16000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4a000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4b000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4e000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc4f000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc53000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc54000, 2, 0, 0, 0x1000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc58000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc59000, 2, 0, 0, 0x3000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc5a000, 2, 0, 0, 0x3000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc5b000, 2, 0, 0, 0x3000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc60000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc61000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc62000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc63000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc64000, 3, 0, 0, 0x1000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcc6c000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcc6d000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcc6e000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc74000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc76000, 3, 0, 0, 0x1000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc79000, 2, 0, 0, 0x1000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc7b000, 3, 0, 0, 0x1000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PSOC, 0xfcc7f000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc88000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc8a000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc8b000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PDMA, 0xfcc8c000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_CPU, 0xfccc0000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_CPU, 0xfccc1000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_CPU, 0xfccc3000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd00000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd01000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd02000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PMMU, 0xfcd03000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd04000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd05000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_XBAR, 0xfcd40000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd41000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd42000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd43000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd44000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_XBAR, 0xfcd48000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd55000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd64000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd65000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcd74000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ROT, 0xfce08000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ROT, 0xfce0a000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ROT, 0xfce0b000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ROT, 0xfce0c000, 2, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce40000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce41000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce42000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce43000, 4, 2, 0, 0x10000, 0x4000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce48000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce49000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce4a000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce4b000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_RTR, 0xfce4c000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce81000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce82000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce88000, 4, 0, 0, 0x20000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce89000, 4, 0, 0, 0x20000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce8b000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce8c000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_ARC_FARM, 0xfce8f000, 4, 0, 0, 0x20000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfcf03000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfcf04000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_DEC, 0xfcf05000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_XFT, 0xfcf40000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcf41000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcf42000, 4, 0, 0, 0x10000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcf43000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcf53000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_PLL, 0xfcf73000, 1, 0, 0, 0x0, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HBM, 0xfd000000, 6, 2, 0, 0x80000, 0x20000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_HBM, 0xfd001000, 6, 2, 8, 0x80000, 0x20000, 0x1000 }, \
+ { GAUDI2_BLOCK_TYPE_HBM, 0xfd009000, 6, 2, 0, 0x80000, 0x20000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd400000, 12, 2, 15, 0x80000, 0x20000, 0x1000 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd418000, 12, 2, 0, 0x80000, 0x20000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd41a000, 12, 2, 0, 0x80000, 0x20000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd41f000, 12, 2, 0, 0x80000, 0x20000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd448000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd449000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd44a000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd44c000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd450000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd452000, 12, 2, 0, 0x80000, 0x1000, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd454000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd455000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd460000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd468000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+ { GAUDI2_BLOCK_TYPE_NIC, 0xfd469000, 12, 0, 0, 0x80000, 0x0, 0x0 }, \
+}
+
+#endif
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
index 4e0dbbbbde20..4e0dbbbbde20 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
index f3faf1aad91a..f3faf1aad91a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_ca53_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_if_regs.h
index cf657918962a..cf657918962a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_if_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_if_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
index 8c8f9726d4b9..8c8f9726d4b9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/cpu_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h
index 028143408401..028143408401 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
index 0b246fe6ad04..0b246fe6ad04 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_0_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
index 5449031722f2..5449031722f2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_1_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
index a4768521d18a..a4768521d18a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_2_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
index 619d01897ff8..619d01897ff8 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_3_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
index 038617e163f1..038617e163f1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_ch_4_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_masks.h
index f43b564af1be..f43b564af1be 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_regs.h
index c3bfc1b8e3fd..c3bfc1b8e3fd 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_macro_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_macro_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
index bc977488c072..bc977488c072 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
index c4abc7ff1fc6..c4abc7ff1fc6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_nrtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
index b17f72c31ab6..b17f72c31ab6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
index bf360b301154..bf360b301154 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_0_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
index 51d432d05ac4..51d432d05ac4 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_1_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
index 18fc0c2b6cc2..18fc0c2b6cc2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_2_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
index 6cf7204bf5cc..6cf7204bf5cc 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_3_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
index 36fef2682875..36fef2682875 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/dma_qm_4_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h b/drivers/accel/habanalabs/include/goya/asic_reg/goya_blocks.h
index 85b15010cd7a..85b15010cd7a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_blocks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/goya_blocks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/goya_masks.h
index 9ff3cb245580..9ff3cb245580 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/goya_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/goya_regs.h
index ce65c9da5c60..ce65c9da5c60 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/goya_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/goya_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/ic_pll_regs.h
index 4ae7fed8b18c..4ae7fed8b18c 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/ic_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/ic_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mc_pll_regs.h
index 6d35d852798b..6d35d852798b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mc_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mc_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
index 6c23f8b96e7e..6c23f8b96e7e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
index 122e9d529939..122e9d529939 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme1_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
index 00ce2252bbfb..00ce2252bbfb 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme2_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
index 8e3eb7fd2070..8e3eb7fd2070 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme3_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
index 79b67bbc8567..79b67bbc8567 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme4_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
index 0ac3c37ce47f..0ac3c37ce47f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme5_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
index 50c49cce72a6..50c49cce72a6 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme6_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
index fe7d95bdcef9..fe7d95bdcef9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
index 5f8b85d2b4b1..5f8b85d2b4b1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_masks.h
index 1882c413cbe0..1882c413cbe0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_masks.h
index e464e381555c..e464e381555c 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_regs.h
index 538708beffc9..538708beffc9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mme_regs.h
index 0396cbfd5c89..0396cbfd5c89 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mme_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mme_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/mmu_masks.h
index c3e69062b135..c3e69062b135 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mmu_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/mmu_regs.h
index 7ec81f12031e..7ec81f12031e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/mmu_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/mmu_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
index ceb59f2e28b3..ceb59f2e28b3 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
index dd067f301ac2..dd067f301ac2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/pci_nrtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
index 35b1d8ac6f63..35b1d8ac6f63 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/pcie_aux_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h
index d1e55aace4a0..d1e55aace4a0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/pcie_wrap_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
index 9271ea95ebe9..9271ea95ebe9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_emmc_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
index b7c33e025db5..b7c33e025db5 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_etr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
index 324266653c9a..324266653c9a 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
index 8141f422e712..8141f422e712 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_global_conf_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
index 4789ebb9c337..4789ebb9c337 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_mme_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
index 27a296ea6c3d..27a296ea6c3d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_pci_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
index 66aee7fa6b1e..66aee7fa6b1e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_spi_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h
index 9ce24597d4b0..9ce24597d4b0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/psoc_timestamp_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
index 2ea1770b078f..2ea1770b078f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x0_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
index 37e0713efa73..37e0713efa73 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x1_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
index d2572279a2b9..d2572279a2b9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x2_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
index 68c5b402c506..68c5b402c506 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x3_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
index a42f1ba06d28..a42f1ba06d28 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/sram_y0_x4_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/stlb_masks.h
index 94f2ed4a36bd..94f2ed4a36bd 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/stlb_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/stlb_regs.h
index 35013f65acd2..35013f65acd2 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/stlb_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/stlb_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
index 89c9507a512f..89c9507a512f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
index 7d71c4b73a5e..7d71c4b73a5e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
index 9395f2458771..9395f2458771 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
index bc51df573bf0..bc51df573bf0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
index 553c6b6bd5ec..553c6b6bd5ec 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
index 8495479c3659..8495479c3659 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_eml_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
index 43fafcf01041..43fafcf01041 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
index ce3346dd2042..ce3346dd2042 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_nrtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
index 2e4b45947944..2e4b45947944 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_masks.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
index 4fa09eb88878..4fa09eb88878 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc0_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
index 928eef1808ae..928eef1808ae 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
index 30ae0f307328..30ae0f307328 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
index b95de4f95ba9..b95de4f95ba9 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
index 0f91e307879e..0f91e307879e 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc1_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
index 73421227f35b..73421227f35b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
index 27b66bf2da9f..27b66bf2da9f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
index 31e5b2f53905..31e5b2f53905 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
index 4eddeaa15d94..4eddeaa15d94 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc2_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
index ce573a1a8361..ce573a1a8361 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
index 11d81fca0a0f..11d81fca0a0f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
index e41595a19e69..e41595a19e69 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
index 34a438b1efe5..34a438b1efe5 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc3_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
index d44caf0fc1bb..d44caf0fc1bb 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
index f13a6532961f..f13a6532961f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
index db081fc17cfc..db081fc17cfc 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
index 8c5372303b28..8c5372303b28 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc4_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
index 5139fde71011..5139fde71011 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
index 1e7cd6e1e888..1e7cd6e1e888 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
index ac0d3820cd6b..ac0d3820cd6b 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
index 57f83bc3b17d..57f83bc3b17d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc5_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
index 94e0191c06c1..94e0191c06c1 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
index 7a1a0e87b225..7a1a0e87b225 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
index 80fa0fe0f60f..80fa0fe0f60f 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
index d6cae8b8af66..d6cae8b8af66 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc6_rtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
index 234147adb779..234147adb779 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cfg_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
index 4c160632fe7d..4c160632fe7d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_cmdq_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
index 0c13d4d167aa..0c13d4d167aa 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_nrtr_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
index cbe11425bfb0..cbe11425bfb0 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc7_qm_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h b/drivers/accel/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
index e25e19660a9d..e25e19660a9d 100644
--- a/drivers/misc/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
+++ b/drivers/accel/habanalabs/include/goya/asic_reg/tpc_pll_regs.h
diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/accel/habanalabs/include/goya/goya.h
index 1b4ca435021d..1b4ca435021d 100644
--- a/drivers/misc/habanalabs/include/goya/goya.h
+++ b/drivers/accel/habanalabs/include/goya/goya.h
diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/accel/habanalabs/include/goya/goya_async_events.h
index 09081401cb1d..09081401cb1d 100644
--- a/drivers/misc/habanalabs/include/goya/goya_async_events.h
+++ b/drivers/accel/habanalabs/include/goya/goya_async_events.h
diff --git a/drivers/misc/habanalabs/include/goya/goya_coresight.h b/drivers/accel/habanalabs/include/goya/goya_coresight.h
index 6e933c0ca5cd..6e933c0ca5cd 100644
--- a/drivers/misc/habanalabs/include/goya/goya_coresight.h
+++ b/drivers/accel/habanalabs/include/goya/goya_coresight.h
diff --git a/drivers/misc/habanalabs/include/goya/goya_fw_if.h b/drivers/accel/habanalabs/include/goya/goya_fw_if.h
index bc05f86c73ac..bc05f86c73ac 100644
--- a/drivers/misc/habanalabs/include/goya/goya_fw_if.h
+++ b/drivers/accel/habanalabs/include/goya/goya_fw_if.h
diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/accel/habanalabs/include/goya/goya_packets.h
index 896799204fb0..896799204fb0 100644
--- a/drivers/misc/habanalabs/include/goya/goya_packets.h
+++ b/drivers/accel/habanalabs/include/goya/goya_packets.h
diff --git a/drivers/misc/habanalabs/include/goya/goya_reg_map.h b/drivers/accel/habanalabs/include/goya/goya_reg_map.h
index f3ab282cafa4..f3ab282cafa4 100644
--- a/drivers/misc/habanalabs/include/goya/goya_reg_map.h
+++ b/drivers/accel/habanalabs/include/goya/goya_reg_map.h
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h
index d408feecd483..d408feecd483 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_general.h
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
index 86511002e367..86511002e367 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
+++ b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
index 9c727a5d47b4..9c727a5d47b4 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
+++ b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v2_0.h
index cd7bf25d2da9..cd7bf25d2da9 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v2_0.h
+++ b/drivers/accel/habanalabs/include/hw_ip/mmu/mmu_v2_0.h
diff --git a/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h b/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h
index f5d497dc9bdc..f5d497dc9bdc 100644
--- a/drivers/misc/habanalabs/include/hw_ip/pci/pci_general.h
+++ b/drivers/accel/habanalabs/include/hw_ip/pci/pci_general.h
diff --git a/drivers/accel/ivpu/Kconfig b/drivers/accel/ivpu/Kconfig
new file mode 100644
index 000000000000..9bdf168bf1d0
--- /dev/null
+++ b/drivers/accel/ivpu/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config DRM_ACCEL_IVPU
+ tristate "Intel VPU for Meteor Lake and newer"
+ depends on DRM_ACCEL
+ depends on X86_64 && !UML
+ depends on PCI && PCI_MSI
+ select FW_LOADER
+ select SHMEM
+ help
+ Choose this option if you have a system that has an 14th generation Intel CPU
+ or newer. VPU stands for Versatile Processing Unit and it's a CPU-integrated
+ inference accelerator for Computer Vision and Deep Learning applications.
+
+ If "M" is selected, the module will be called intel_vpu.
diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile
new file mode 100644
index 000000000000..80f1fb3548ae
--- /dev/null
+++ b/drivers/accel/ivpu/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2023 Intel Corporation
+
+intel_vpu-y := \
+ ivpu_drv.o \
+ ivpu_fw.o \
+ ivpu_gem.o \
+ ivpu_hw_mtl.o \
+ ivpu_ipc.o \
+ ivpu_job.o \
+ ivpu_jsm_msg.o \
+ ivpu_mmu.o \
+ ivpu_mmu_context.o \
+ ivpu_pm.o
+
+obj-$(CONFIG_DRM_ACCEL_IVPU) += intel_vpu.o \ No newline at end of file
diff --git a/drivers/accel/ivpu/TODO b/drivers/accel/ivpu/TODO
new file mode 100644
index 000000000000..9077217ae10f
--- /dev/null
+++ b/drivers/accel/ivpu/TODO
@@ -0,0 +1,11 @@
+- Move to threaded_irqs to mitigate potential infinite loop in ivpu_ipc_irq_handler()
+- Implement support for BLOB IDs
+- Add debugfs support to improve debugging and testing
+- Add tracing events for performance debugging
+- Implement HW based scheduling support
+- Use syncobjs for submit/sync
+- Refactor IPC protocol to improve message latency
+- Implement BO cache and MADVISE IOCTL
+- Add support for user allocated buffers using prime import and dma-buf heaps
+- Refactor struct ivpu_bo to use struct drm_gem_shmem_object
+- Add driver/device documentation
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
new file mode 100644
index 000000000000..231f29bb5025
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <drm/drm_accel.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_prime.h>
+
+#include "vpu_boot_api.h"
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_gem.h"
+#include "ivpu_hw.h"
+#include "ivpu_ipc.h"
+#include "ivpu_job.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_mmu.h"
+#include "ivpu_mmu_context.h"
+#include "ivpu_pm.h"
+
+#ifndef DRIVER_VERSION_STR
+#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
+ __stringify(DRM_IVPU_DRIVER_MINOR) "."
+#endif
+
+static const struct drm_driver driver;
+
+static struct lock_class_key submitted_jobs_xa_lock_class_key;
+
+int ivpu_dbg_mask;
+module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
+MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
+
+int ivpu_test_mode;
+module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
+MODULE_PARM_DESC(test_mode, "Test mode: 0 - normal operation, 1 - fw unit test, 2 - null hw");
+
+u8 ivpu_pll_min_ratio;
+module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
+MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency");
+
+u8 ivpu_pll_max_ratio = U8_MAX;
+module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
+MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
+
+struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+
+ kref_get(&file_priv->ref);
+
+ ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
+ file_priv->ctx.id, kref_read(&file_priv->ref));
+
+ return file_priv;
+}
+
+struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
+{
+ struct ivpu_file_priv *file_priv;
+
+ xa_lock_irq(&vdev->context_xa);
+ file_priv = xa_load(&vdev->context_xa, id);
+ /* file_priv may still be in context_xa during file_priv_release() */
+ if (file_priv && !kref_get_unless_zero(&file_priv->ref))
+ file_priv = NULL;
+ xa_unlock_irq(&vdev->context_xa);
+
+ if (file_priv)
+ ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
+ file_priv->ctx.id, kref_read(&file_priv->ref));
+
+ return file_priv;
+}
+
+static void file_priv_release(struct kref *ref)
+{
+ struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
+ struct ivpu_device *vdev = file_priv->vdev;
+
+ ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
+
+ ivpu_cmdq_release_all(file_priv);
+ ivpu_bo_remove_all_bos_from_context(&file_priv->ctx);
+ ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+ ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+ drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
+ mutex_destroy(&file_priv->lock);
+ kfree(file_priv);
+}
+
+void ivpu_file_priv_put(struct ivpu_file_priv **link)
+{
+ struct ivpu_file_priv *file_priv = *link;
+ struct ivpu_device *vdev = file_priv->vdev;
+
+ drm_WARN_ON(&vdev->drm, !file_priv);
+
+ ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
+ file_priv->ctx.id, kref_read(&file_priv->ref));
+
+ *link = NULL;
+ kref_put(&file_priv->ref, file_priv_release);
+}
+
+static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
+ struct drm_ivpu_param *args = data;
+ int ret = 0;
+
+ switch (args->param) {
+ case DRM_IVPU_PARAM_DEVICE_ID:
+ args->value = pdev->device;
+ break;
+ case DRM_IVPU_PARAM_DEVICE_REVISION:
+ args->value = pdev->revision;
+ break;
+ case DRM_IVPU_PARAM_PLATFORM_TYPE:
+ args->value = vdev->platform;
+ break;
+ case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
+ args->value = ivpu_hw_reg_pll_freq_get(vdev);
+ break;
+ case DRM_IVPU_PARAM_NUM_CONTEXTS:
+ args->value = ivpu_get_context_count(vdev);
+ break;
+ case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
+ args->value = vdev->hw->ranges.user_low.start;
+ break;
+ case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
+ args->value = file_priv->priority;
+ break;
+ case DRM_IVPU_PARAM_CONTEXT_ID:
+ args->value = file_priv->ctx.id;
+ break;
+ case DRM_IVPU_PARAM_FW_API_VERSION:
+ if (args->index < VPU_FW_API_VER_NUM) {
+ struct vpu_firmware_header *fw_hdr;
+
+ fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
+ args->value = fw_hdr->api_version[args->index];
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
+ ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
+ break;
+ case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
+ args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
+ break;
+ case DRM_IVPU_PARAM_TILE_CONFIG:
+ args->value = vdev->hw->tile_fuse;
+ break;
+ case DRM_IVPU_PARAM_SKU:
+ args->value = vdev->hw->sku;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct drm_ivpu_param *args = data;
+ int ret = 0;
+
+ switch (args->param) {
+ case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
+ if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
+ file_priv->priority = args->value;
+ else
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ivpu_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ struct ivpu_file_priv *file_priv;
+ u32 ctx_id;
+ void *old;
+ int ret;
+
+ ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
+ return ret;
+ }
+
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv) {
+ ret = -ENOMEM;
+ goto err_xa_erase;
+ }
+
+ file_priv->vdev = vdev;
+ file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL;
+ kref_init(&file_priv->ref);
+ mutex_init(&file_priv->lock);
+
+ ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
+ if (ret)
+ goto err_mutex_destroy;
+
+ old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
+ if (xa_is_err(old)) {
+ ret = xa_err(old);
+ ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
+ goto err_ctx_fini;
+ }
+
+ ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
+ ctx_id, current->comm, task_pid_nr(current));
+
+ file->driver_priv = file_priv;
+ return 0;
+
+err_ctx_fini:
+ ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+err_mutex_destroy:
+ mutex_destroy(&file_priv->lock);
+ kfree(file_priv);
+err_xa_erase:
+ xa_erase_irq(&vdev->context_xa, ctx_id);
+ return ret;
+}
+
+static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+
+ ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
+ file_priv->ctx.id, current->comm, task_pid_nr(current));
+
+ ivpu_file_priv_put(&file_priv);
+}
+
+static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
+};
+
+static int ivpu_wait_for_ready(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_consumer cons;
+ struct ivpu_ipc_hdr ipc_hdr;
+ unsigned long timeout;
+ int ret;
+
+ if (ivpu_test_mode == IVPU_TEST_MODE_FW_TEST)
+ return 0;
+
+ ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG);
+
+ timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
+ while (1) {
+ ret = ivpu_ipc_irq_handler(vdev);
+ if (ret)
+ break;
+ ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
+ if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
+ break;
+
+ cond_resched();
+ }
+
+ ivpu_ipc_consumer_del(vdev, &cons);
+
+ if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
+ ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n",
+ ipc_hdr.data_addr);
+ return -EIO;
+ }
+
+ if (!ret)
+ ivpu_info(vdev, "VPU ready message received successfully\n");
+ else
+ ivpu_hw_diagnose_failure(vdev);
+
+ return ret;
+}
+
+/**
+ * ivpu_boot() - Start VPU firmware
+ * @vdev: VPU device
+ *
+ * This function is paired with ivpu_shutdown() but it doesn't power up the
+ * VPU because power up has to be called very early in ivpu_probe().
+ */
+int ivpu_boot(struct ivpu_device *vdev)
+{
+ int ret;
+
+ /* Update boot params located at first 4KB of FW memory */
+ ivpu_fw_boot_params_setup(vdev, vdev->fw->mem->kvaddr);
+
+ ret = ivpu_hw_boot_fw(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_wait_for_ready(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
+ return ret;
+ }
+
+ ivpu_hw_irq_clear(vdev);
+ enable_irq(vdev->irq);
+ ivpu_hw_irq_enable(vdev);
+ ivpu_ipc_enable(vdev);
+ return 0;
+}
+
+int ivpu_shutdown(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_hw_irq_disable(vdev);
+ disable_irq(vdev->irq);
+ ivpu_ipc_disable(vdev);
+ ivpu_mmu_disable(vdev);
+
+ ret = ivpu_hw_power_down(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
+
+ return ret;
+}
+
+static const struct file_operations ivpu_fops = {
+ .owner = THIS_MODULE,
+ DRM_ACCEL_FOPS,
+};
+
+static const struct drm_driver driver = {
+ .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
+
+ .open = ivpu_open,
+ .postclose = ivpu_postclose,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = ivpu_gem_prime_import,
+ .gem_prime_mmap = drm_gem_prime_mmap,
+
+ .ioctls = ivpu_drm_ioctls,
+ .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
+ .fops = &ivpu_fops,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRM_IVPU_DRIVER_MAJOR,
+ .minor = DRM_IVPU_DRIVER_MINOR,
+};
+
+static int ivpu_irq_init(struct ivpu_device *vdev)
+{
+ struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
+ return ret;
+ }
+
+ vdev->irq = pci_irq_vector(pdev, 0);
+
+ ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
+ IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_pci_init(struct ivpu_device *vdev)
+{
+ struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
+ struct resource *bar0 = &pdev->resource[0];
+ struct resource *bar4 = &pdev->resource[4];
+ int ret;
+
+ ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
+ vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
+ if (IS_ERR(vdev->regv)) {
+ ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
+ return PTR_ERR(vdev->regv);
+ }
+
+ ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
+ vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
+ if (IS_ERR(vdev->regb)) {
+ ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
+ return PTR_ERR(vdev->regb);
+ }
+
+ ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(38));
+ if (ret) {
+ ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
+ return ret;
+ }
+ dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
+
+ /* Clear any pending errors */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
+ return ret;
+ }
+
+ pci_set_master(pdev);
+
+ return 0;
+}
+
+static int ivpu_dev_init(struct ivpu_device *vdev)
+{
+ int ret;
+
+ vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
+ if (!vdev->hw)
+ return -ENOMEM;
+
+ vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
+ if (!vdev->mmu)
+ return -ENOMEM;
+
+ vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
+ if (!vdev->fw)
+ return -ENOMEM;
+
+ vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
+ if (!vdev->ipc)
+ return -ENOMEM;
+
+ vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
+ if (!vdev->pm)
+ return -ENOMEM;
+
+ vdev->hw->ops = &ivpu_hw_mtl_ops;
+ vdev->platform = IVPU_PLATFORM_INVALID;
+ vdev->context_xa_limit.min = IVPU_GLOBAL_CONTEXT_MMU_SSID + 1;
+ vdev->context_xa_limit.max = IVPU_CONTEXT_LIMIT;
+ atomic64_set(&vdev->unique_id_counter, 0);
+ xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
+ xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
+ lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
+
+ ret = ivpu_pci_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize PCI device: %d\n", ret);
+ goto err_xa_destroy;
+ }
+
+ ret = ivpu_irq_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize IRQs: %d\n", ret);
+ goto err_xa_destroy;
+ }
+
+ /* Init basic HW info based on buttress registers which are accessible before power up */
+ ret = ivpu_hw_info_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize HW info: %d\n", ret);
+ goto err_xa_destroy;
+ }
+
+ /* Power up early so the rest of init code can access VPU registers */
+ ret = ivpu_hw_power_up(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
+ goto err_xa_destroy;
+ }
+
+ ret = ivpu_mmu_global_context_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize global MMU context: %d\n", ret);
+ goto err_power_down;
+ }
+
+ ret = ivpu_mmu_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize MMU device: %d\n", ret);
+ goto err_mmu_gctx_fini;
+ }
+
+ ret = ivpu_fw_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize firmware: %d\n", ret);
+ goto err_mmu_gctx_fini;
+ }
+
+ ret = ivpu_ipc_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize IPC: %d\n", ret);
+ goto err_fw_fini;
+ }
+
+ ret = ivpu_pm_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize PM: %d\n", ret);
+ goto err_ipc_fini;
+ }
+
+ ret = ivpu_job_done_thread_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize job done thread: %d\n", ret);
+ goto err_ipc_fini;
+ }
+
+ ret = ivpu_fw_load(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to load firmware: %d\n", ret);
+ goto err_job_done_thread_fini;
+ }
+
+ ret = ivpu_boot(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to boot: %d\n", ret);
+ goto err_job_done_thread_fini;
+ }
+
+ ivpu_pm_enable(vdev);
+
+ return 0;
+
+err_job_done_thread_fini:
+ ivpu_job_done_thread_fini(vdev);
+err_ipc_fini:
+ ivpu_ipc_fini(vdev);
+err_fw_fini:
+ ivpu_fw_fini(vdev);
+err_mmu_gctx_fini:
+ ivpu_mmu_global_context_fini(vdev);
+err_power_down:
+ ivpu_hw_power_down(vdev);
+err_xa_destroy:
+ xa_destroy(&vdev->submitted_jobs_xa);
+ xa_destroy(&vdev->context_xa);
+ return ret;
+}
+
+static void ivpu_dev_fini(struct ivpu_device *vdev)
+{
+ ivpu_pm_disable(vdev);
+ ivpu_shutdown(vdev);
+ ivpu_job_done_thread_fini(vdev);
+ ivpu_ipc_fini(vdev);
+ ivpu_fw_fini(vdev);
+ ivpu_mmu_global_context_fini(vdev);
+
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+ xa_destroy(&vdev->submitted_jobs_xa);
+ drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
+ xa_destroy(&vdev->context_xa);
+}
+
+static struct pci_device_id ivpu_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
+
+static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ivpu_device *vdev;
+ int ret;
+
+ vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
+ if (IS_ERR(vdev))
+ return PTR_ERR(vdev);
+
+ pci_set_drvdata(pdev, vdev);
+
+ ret = ivpu_dev_init(vdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize VPU device: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_dev_register(&vdev->drm, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
+ ivpu_dev_fini(vdev);
+ }
+
+ return ret;
+}
+
+static void ivpu_remove(struct pci_dev *pdev)
+{
+ struct ivpu_device *vdev = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(&vdev->drm);
+ ivpu_dev_fini(vdev);
+}
+
+static const struct dev_pm_ops ivpu_drv_pci_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
+ SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
+};
+
+static const struct pci_error_handlers ivpu_drv_pci_err = {
+ .reset_prepare = ivpu_pm_reset_prepare_cb,
+ .reset_done = ivpu_pm_reset_done_cb,
+};
+
+static struct pci_driver ivpu_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = ivpu_pci_ids,
+ .probe = ivpu_probe,
+ .remove = ivpu_remove,
+ .driver = {
+ .pm = &ivpu_drv_pci_pm,
+ },
+ .err_handler = &ivpu_drv_pci_err,
+};
+
+module_pci_driver(ivpu_pci_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_VERSION(DRIVER_VERSION_STR);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
new file mode 100644
index 000000000000..f47b4965db2e
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -0,0 +1,190 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_DRV_H__
+#define __IVPU_DRV_H__
+
+#include <drm/drm_device.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_print.h>
+
+#include <linux/pci.h>
+#include <linux/xarray.h>
+#include <uapi/drm/ivpu_accel.h>
+
+#include "ivpu_mmu_context.h"
+
+#define DRIVER_NAME "intel_vpu"
+#define DRIVER_DESC "Driver for Intel Versatile Processing Unit (VPU)"
+#define DRIVER_DATE "20230117"
+
+#define PCI_DEVICE_ID_MTL 0x7d1d
+
+#define IVPU_GLOBAL_CONTEXT_MMU_SSID 0
+#define IVPU_CONTEXT_LIMIT 64
+#define IVPU_NUM_ENGINES 2
+
+#define IVPU_PLATFORM_SILICON 0
+#define IVPU_PLATFORM_SIMICS 2
+#define IVPU_PLATFORM_FPGA 3
+#define IVPU_PLATFORM_INVALID 8
+
+#define IVPU_DBG_REG BIT(0)
+#define IVPU_DBG_IRQ BIT(1)
+#define IVPU_DBG_MMU BIT(2)
+#define IVPU_DBG_FILE BIT(3)
+#define IVPU_DBG_MISC BIT(4)
+#define IVPU_DBG_FW_BOOT BIT(5)
+#define IVPU_DBG_PM BIT(6)
+#define IVPU_DBG_IPC BIT(7)
+#define IVPU_DBG_BO BIT(8)
+#define IVPU_DBG_JOB BIT(9)
+#define IVPU_DBG_JSM BIT(10)
+#define IVPU_DBG_KREF BIT(11)
+#define IVPU_DBG_RPM BIT(12)
+
+#define ivpu_err(vdev, fmt, ...) \
+ drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
+
+#define ivpu_err_ratelimited(vdev, fmt, ...) \
+ drm_err_ratelimited(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
+
+#define ivpu_warn(vdev, fmt, ...) \
+ drm_warn(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
+
+#define ivpu_warn_ratelimited(vdev, fmt, ...) \
+ drm_err_ratelimited(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
+
+#define ivpu_info(vdev, fmt, ...) drm_info(&(vdev)->drm, fmt, ##__VA_ARGS__)
+
+#define ivpu_dbg(vdev, type, fmt, args...) do { \
+ if (unlikely(IVPU_DBG_##type & ivpu_dbg_mask)) \
+ dev_dbg((vdev)->drm.dev, "[%s] " fmt, #type, ##args); \
+} while (0)
+
+#define IVPU_WA(wa_name) (vdev->wa.wa_name)
+
+struct ivpu_wa_table {
+ bool punit_disabled;
+ bool clear_runtime_mem;
+};
+
+struct ivpu_hw_info;
+struct ivpu_mmu_info;
+struct ivpu_fw_info;
+struct ivpu_ipc_info;
+struct ivpu_pm_info;
+
+struct ivpu_device {
+ struct drm_device drm;
+ void __iomem *regb;
+ void __iomem *regv;
+ u32 platform;
+ u32 irq;
+
+ struct ivpu_wa_table wa;
+ struct ivpu_hw_info *hw;
+ struct ivpu_mmu_info *mmu;
+ struct ivpu_fw_info *fw;
+ struct ivpu_ipc_info *ipc;
+ struct ivpu_pm_info *pm;
+
+ struct ivpu_mmu_context gctx;
+ struct xarray context_xa;
+ struct xa_limit context_xa_limit;
+
+ struct xarray submitted_jobs_xa;
+ struct task_struct *job_done_thread;
+
+ atomic64_t unique_id_counter;
+
+ struct {
+ int boot;
+ int jsm;
+ int tdr;
+ int reschedule_suspend;
+ } timeout;
+};
+
+/*
+ * file_priv has its own refcount (ref) that allows user space to close the fd
+ * without blocking even if VPU is still processing some jobs.
+ */
+struct ivpu_file_priv {
+ struct kref ref;
+ struct ivpu_device *vdev;
+ struct mutex lock; /* Protects cmdq */
+ struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
+ struct ivpu_mmu_context ctx;
+ u32 priority;
+ bool has_mmu_faults;
+};
+
+extern int ivpu_dbg_mask;
+extern u8 ivpu_pll_min_ratio;
+extern u8 ivpu_pll_max_ratio;
+
+#define IVPU_TEST_MODE_DISABLED 0
+#define IVPU_TEST_MODE_FW_TEST 1
+#define IVPU_TEST_MODE_NULL_HW 2
+extern int ivpu_test_mode;
+
+struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
+struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id);
+void ivpu_file_priv_put(struct ivpu_file_priv **link);
+
+int ivpu_boot(struct ivpu_device *vdev);
+int ivpu_shutdown(struct ivpu_device *vdev);
+
+static inline bool ivpu_is_mtl(struct ivpu_device *vdev)
+{
+ return to_pci_dev(vdev->drm.dev)->device == PCI_DEVICE_ID_MTL;
+}
+
+static inline u8 ivpu_revision(struct ivpu_device *vdev)
+{
+ return to_pci_dev(vdev->drm.dev)->revision;
+}
+
+static inline u16 ivpu_device_id(struct ivpu_device *vdev)
+{
+ return to_pci_dev(vdev->drm.dev)->device;
+}
+
+static inline struct ivpu_device *to_ivpu_device(struct drm_device *dev)
+{
+ return container_of(dev, struct ivpu_device, drm);
+}
+
+static inline u32 ivpu_get_context_count(struct ivpu_device *vdev)
+{
+ struct xa_limit ctx_limit = vdev->context_xa_limit;
+
+ return (ctx_limit.max - ctx_limit.min + 1);
+}
+
+static inline u32 ivpu_get_platform(struct ivpu_device *vdev)
+{
+ WARN_ON_ONCE(vdev->platform == IVPU_PLATFORM_INVALID);
+ return vdev->platform;
+}
+
+static inline bool ivpu_is_silicon(struct ivpu_device *vdev)
+{
+ return ivpu_get_platform(vdev) == IVPU_PLATFORM_SILICON;
+}
+
+static inline bool ivpu_is_simics(struct ivpu_device *vdev)
+{
+ return ivpu_get_platform(vdev) == IVPU_PLATFORM_SIMICS;
+}
+
+static inline bool ivpu_is_fpga(struct ivpu_device *vdev)
+{
+ return ivpu_get_platform(vdev) == IVPU_PLATFORM_FPGA;
+}
+
+#endif /* __IVPU_DRV_H__ */
diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c
new file mode 100644
index 000000000000..f58951a0d81b
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_fw.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/firmware.h>
+#include <linux/highmem.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+
+#include "vpu_boot_api.h"
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_gem.h"
+#include "ivpu_hw.h"
+#include "ivpu_ipc.h"
+#include "ivpu_pm.h"
+
+#define FW_GLOBAL_MEM_START (2ull * SZ_1G)
+#define FW_GLOBAL_MEM_END (3ull * SZ_1G)
+#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
+#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */
+#define FW_RUNTIME_MAX_SIZE SZ_512M
+#define FW_SHAVE_NN_MAX_SIZE SZ_2M
+#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START)
+#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
+#define FW_VERSION_HEADER_SIZE SZ_4K
+#define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
+
+#define WATCHDOG_MSS_REDIRECT 32
+#define WATCHDOG_NCE_REDIRECT 33
+
+#define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31)
+
+#define IVPU_FW_CHECK_API(vdev, fw_hdr, name, min_major) \
+ ivpu_fw_check_api(vdev, fw_hdr, #name, \
+ VPU_##name##_API_VER_INDEX, \
+ VPU_##name##_API_VER_MAJOR, \
+ VPU_##name##_API_VER_MINOR, min_major)
+
+static char *ivpu_firmware;
+module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
+MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
+
+static int ivpu_fw_request(struct ivpu_device *vdev)
+{
+ static const char * const fw_names[] = {
+ "mtl_vpu.bin",
+ "intel/vpu/mtl_vpu_v0.0.bin"
+ };
+ int ret = -ENOENT;
+ int i;
+
+ if (ivpu_firmware)
+ return request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
+
+ for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
+ ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i], vdev->drm.dev);
+ if (!ret)
+ return 0;
+ }
+
+ ivpu_err(vdev, "Failed to request firmware: %d\n", ret);
+ return ret;
+}
+
+static int
+ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr,
+ const char *str, int index, u16 expected_major, u16 expected_minor,
+ u16 min_major)
+{
+ u16 major = (u16)(fw_hdr->api_version[index] >> 16);
+ u16 minor = (u16)(fw_hdr->api_version[index]);
+
+ if (major < min_major) {
+ ivpu_err(vdev, "Incompatible FW %s API version: %d.%d, required %d.0 or later\n",
+ str, major, minor, min_major);
+ return -EINVAL;
+ }
+ if (major != expected_major) {
+ ivpu_warn(vdev, "Major FW %s API version different: %d.%d (expected %d.%d)\n",
+ str, major, minor, expected_major, expected_minor);
+ }
+ ivpu_dbg(vdev, FW_BOOT, "FW %s API version: %d.%d (expected %d.%d)\n",
+ str, major, minor, expected_major, expected_minor);
+
+ return 0;
+}
+
+static int ivpu_fw_parse(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
+ u64 runtime_addr, image_load_addr, runtime_size, image_size;
+
+ if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
+ ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
+ return -EINVAL;
+ }
+
+ if (fw_hdr->header_version != VPU_FW_HEADER_VERSION) {
+ ivpu_err(vdev, "Invalid firmware header version: %u\n", fw_hdr->header_version);
+ return -EINVAL;
+ }
+
+ runtime_addr = fw_hdr->boot_params_load_address;
+ runtime_size = fw_hdr->runtime_size;
+ image_load_addr = fw_hdr->image_load_address;
+ image_size = fw_hdr->image_size;
+
+ if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
+ ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
+ return -EINVAL;
+ }
+
+ if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
+ ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
+ return -EINVAL;
+ }
+
+ if (FW_FILE_IMAGE_OFFSET + image_size > fw->file->size) {
+ ivpu_err(vdev, "Invalid image size: %llu\n", image_size);
+ return -EINVAL;
+ }
+
+ if (image_load_addr < runtime_addr ||
+ image_load_addr + image_size > runtime_addr + runtime_size) {
+ ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
+ image_load_addr, image_size);
+ return -EINVAL;
+ }
+
+ if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
+ ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
+ return -EINVAL;
+ }
+
+ if (fw_hdr->entry_point < image_load_addr ||
+ fw_hdr->entry_point >= image_load_addr + image_size) {
+ ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
+ return -EINVAL;
+ }
+ ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
+ fw_hdr->header_version, fw_hdr->image_format);
+ ivpu_dbg(vdev, FW_BOOT, "FW version: %s\n", (char *)fw_hdr + VPU_FW_HEADER_SIZE);
+
+ if (IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT, 3))
+ return -EINVAL;
+ if (IVPU_FW_CHECK_API(vdev, fw_hdr, JSM, 3))
+ return -EINVAL;
+
+ fw->runtime_addr = runtime_addr;
+ fw->runtime_size = runtime_size;
+ fw->image_load_offset = image_load_addr - runtime_addr;
+ fw->image_size = image_size;
+ fw->shave_nn_size = PAGE_ALIGN(fw_hdr->shave_nn_fw_size);
+
+ fw->cold_boot_entry_point = fw_hdr->entry_point;
+ fw->entry_point = fw->cold_boot_entry_point;
+
+ ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
+ fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
+ ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
+ fw->runtime_addr, image_load_addr, fw->entry_point);
+
+ return 0;
+}
+
+static void ivpu_fw_release(struct ivpu_device *vdev)
+{
+ release_firmware(vdev->fw->file);
+}
+
+static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
+ u64 size = FW_SHARED_MEM_SIZE;
+
+ if (start + size > FW_GLOBAL_MEM_END) {
+ ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
+ return -EINVAL;
+ }
+
+ ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size);
+ return 0;
+}
+
+static int ivpu_fw_mem_init(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ int ret;
+
+ ret = ivpu_fw_update_global_range(vdev);
+ if (ret)
+ return ret;
+
+ fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
+ if (!fw->mem) {
+ ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
+ return -ENOMEM;
+ }
+
+ if (fw->shave_nn_size) {
+ fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start,
+ fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
+ if (!fw->mem_shave_nn) {
+ ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
+ ivpu_bo_free_internal(fw->mem);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+
+ if (fw->mem_shave_nn) {
+ ivpu_bo_free_internal(fw->mem_shave_nn);
+ fw->mem_shave_nn = NULL;
+ }
+
+ ivpu_bo_free_internal(fw->mem);
+ fw->mem = NULL;
+}
+
+int ivpu_fw_init(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_fw_request(vdev);
+ if (ret)
+ return ret;
+
+ ret = ivpu_fw_parse(vdev);
+ if (ret)
+ goto err_fw_release;
+
+ ret = ivpu_fw_mem_init(vdev);
+ if (ret)
+ goto err_fw_release;
+
+ return 0;
+
+err_fw_release:
+ ivpu_fw_release(vdev);
+ return ret;
+}
+
+void ivpu_fw_fini(struct ivpu_device *vdev)
+{
+ ivpu_fw_mem_fini(vdev);
+ ivpu_fw_release(vdev);
+}
+
+int ivpu_fw_load(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ u64 image_end_offset = fw->image_load_offset + fw->image_size;
+
+ memset(fw->mem->kvaddr, 0, fw->image_load_offset);
+ memcpy(fw->mem->kvaddr + fw->image_load_offset,
+ fw->file->data + FW_FILE_IMAGE_OFFSET, fw->image_size);
+
+ if (IVPU_WA(clear_runtime_mem)) {
+ u8 *start = fw->mem->kvaddr + image_end_offset;
+ u64 size = fw->mem->base.size - image_end_offset;
+
+ memset(start, 0, size);
+ }
+
+ wmb(); /* Flush WC buffers after writing fw->mem */
+
+ return 0;
+}
+
+static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
+{
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.magic = 0x%x\n",
+ boot_params->magic);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_id = 0x%x\n",
+ boot_params->vpu_id);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_count = 0x%x\n",
+ boot_params->vpu_count);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.frequency = %u\n",
+ boot_params->frequency);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.perf_clk_frequency = %u\n",
+ boot_params->perf_clk_frequency);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_start = 0x%llx\n",
+ boot_params->ipc_header_area_start);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_size = 0x%x\n",
+ boot_params->ipc_header_area_size);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_base = 0x%llx\n",
+ boot_params->shared_region_base);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_size = 0x%x\n",
+ boot_params->shared_region_size);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_start = 0x%llx\n",
+ boot_params->ipc_payload_area_start);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_size = 0x%x\n",
+ boot_params->ipc_payload_area_size);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_base = 0x%llx\n",
+ boot_params->global_aliased_pio_base);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_size = 0x%x\n",
+ boot_params->global_aliased_pio_size);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.autoconfig = 0x%x\n",
+ boot_params->autoconfig);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 0x%x\n",
+ boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
+ boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
+ boot_params->global_memory_allocator_base);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
+ boot_params->global_memory_allocator_size);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
+ boot_params->shave_nn_fw_base);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_mss = 0x%x\n",
+ boot_params->watchdog_irq_mss);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
+ boot_params->watchdog_irq_nce);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
+ boot_params->host_to_vpu_irq);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
+ boot_params->job_done_irq);
+
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
+ boot_params->host_version_id);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.si_stepping = 0x%x\n",
+ boot_params->si_stepping);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.device_id = 0x%llx\n",
+ boot_params->device_id);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.feature_exclusion = 0x%llx\n",
+ boot_params->feature_exclusion);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.sku = 0x%llx\n",
+ boot_params->sku);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.min_freq_pll_ratio = 0x%x\n",
+ boot_params->min_freq_pll_ratio);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.pn_freq_pll_ratio = 0x%x\n",
+ boot_params->pn_freq_pll_ratio);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.max_freq_pll_ratio = 0x%x\n",
+ boot_params->max_freq_pll_ratio);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.default_trace_level = 0x%x\n",
+ boot_params->default_trace_level);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.tracing_buff_message_format_mask = 0x%llx\n",
+ boot_params->tracing_buff_message_format_mask);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_destination_mask = 0x%x\n",
+ boot_params->trace_destination_mask);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_hw_component_mask = 0x%llx\n",
+ boot_params->trace_hw_component_mask);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.boot_type = 0x%x\n",
+ boot_params->boot_type);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_base = 0x%llx\n",
+ boot_params->punit_telemetry_sram_base);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_size = 0x%llx\n",
+ boot_params->punit_telemetry_sram_size);
+ ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
+ boot_params->vpu_telemetry_enable);
+}
+
+void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
+{
+ struct ivpu_bo *ipc_mem_rx = vdev->ipc->mem_rx;
+
+ /* In case of warm boot we only have to reset the entrypoint addr */
+ if (!ivpu_fw_is_cold_boot(vdev)) {
+ boot_params->save_restore_ret_address = 0;
+ vdev->pm->is_warmboot = true;
+ return;
+ }
+
+ vdev->pm->is_warmboot = false;
+
+ boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
+ boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
+ boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev);
+
+ /*
+ * Uncached region of VPU address space, covers IPC buffers, job queues
+ * and log buffers, programmable to L2$ Uncached by VPU MTRR
+ */
+ boot_params->shared_region_base = vdev->hw->ranges.global_low.start;
+ boot_params->shared_region_size = vdev->hw->ranges.global_low.end -
+ vdev->hw->ranges.global_low.start;
+
+ boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
+ boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
+
+ boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
+ boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
+
+ boot_params->global_aliased_pio_base =
+ vdev->hw->ranges.global_aliased_pio.start;
+ boot_params->global_aliased_pio_size =
+ ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio);
+
+ /* Allow configuration for L2C_PAGE_TABLE with boot param value */
+ boot_params->autoconfig = 1;
+
+ /* Enable L2 cache for first 2GB of high memory */
+ boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
+ boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
+ ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start);
+
+ if (vdev->fw->mem_shave_nn)
+ boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
+
+ boot_params->watchdog_irq_mss = WATCHDOG_MSS_REDIRECT;
+ boot_params->watchdog_irq_nce = WATCHDOG_NCE_REDIRECT;
+ boot_params->si_stepping = ivpu_revision(vdev);
+ boot_params->device_id = ivpu_device_id(vdev);
+ boot_params->feature_exclusion = vdev->hw->tile_fuse;
+ boot_params->sku = vdev->hw->sku;
+
+ boot_params->min_freq_pll_ratio = vdev->hw->pll.min_ratio;
+ boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio;
+ boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio;
+
+ boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
+ boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
+ boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
+
+ wmb(); /* Flush WC buffers after writing bootparams */
+
+ ivpu_fw_boot_params_print(vdev, boot_params);
+}
diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h
new file mode 100644
index 000000000000..8d275c802d1c
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_fw.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_FW_H__
+#define __IVPU_FW_H__
+
+struct ivpu_device;
+struct ivpu_bo;
+struct vpu_boot_params;
+
+struct ivpu_fw_info {
+ const struct firmware *file;
+ struct ivpu_bo *mem;
+ struct ivpu_bo *mem_shave_nn;
+ struct ivpu_bo *mem_log_crit;
+ struct ivpu_bo *mem_log_verb;
+ u64 runtime_addr;
+ u32 runtime_size;
+ u64 image_load_offset;
+ u32 image_size;
+ u32 shave_nn_size;
+ u64 entry_point; /* Cold or warm boot entry point for next boot */
+ u64 cold_boot_entry_point;
+};
+
+int ivpu_fw_init(struct ivpu_device *vdev);
+void ivpu_fw_fini(struct ivpu_device *vdev);
+int ivpu_fw_load(struct ivpu_device *vdev);
+void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *bp);
+
+static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev)
+{
+ return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point;
+}
+
+#endif /* __IVPU_FW_H__ */
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
new file mode 100644
index 000000000000..52b339aefadc
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/set_memory.h>
+#include <linux/xarray.h>
+
+#include <drm/drm_cache.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_file.h>
+#include <drm/drm_utils.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_gem.h"
+#include "ivpu_hw.h"
+#include "ivpu_mmu.h"
+#include "ivpu_mmu_context.h"
+
+MODULE_IMPORT_NS(DMA_BUF);
+
+static const struct drm_gem_object_funcs ivpu_gem_funcs;
+
+static struct lock_class_key prime_bo_lock_class_key;
+
+static int __must_check prime_alloc_pages_locked(struct ivpu_bo *bo)
+{
+ /* Pages are managed by the underlying dma-buf */
+ return 0;
+}
+
+static void prime_free_pages_locked(struct ivpu_bo *bo)
+{
+ /* Pages are managed by the underlying dma-buf */
+}
+
+static int prime_map_pages_locked(struct ivpu_bo *bo)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ struct sg_table *sgt;
+
+ sgt = dma_buf_map_attachment_unlocked(bo->base.import_attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt)) {
+ ivpu_err(vdev, "Failed to map attachment: %ld\n", PTR_ERR(sgt));
+ return PTR_ERR(sgt);
+ }
+
+ bo->sgt = sgt;
+ return 0;
+}
+
+static void prime_unmap_pages_locked(struct ivpu_bo *bo)
+{
+ dma_buf_unmap_attachment_unlocked(bo->base.import_attach, bo->sgt, DMA_BIDIRECTIONAL);
+ bo->sgt = NULL;
+}
+
+static const struct ivpu_bo_ops prime_ops = {
+ .type = IVPU_BO_TYPE_PRIME,
+ .name = "prime",
+ .alloc_pages = prime_alloc_pages_locked,
+ .free_pages = prime_free_pages_locked,
+ .map_pages = prime_map_pages_locked,
+ .unmap_pages = prime_unmap_pages_locked,
+};
+
+static int __must_check shmem_alloc_pages_locked(struct ivpu_bo *bo)
+{
+ int npages = bo->base.size >> PAGE_SHIFT;
+ struct page **pages;
+
+ pages = drm_gem_get_pages(&bo->base);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ if (bo->flags & DRM_IVPU_BO_WC)
+ set_pages_array_wc(pages, npages);
+ else if (bo->flags & DRM_IVPU_BO_UNCACHED)
+ set_pages_array_uc(pages, npages);
+
+ bo->pages = pages;
+ return 0;
+}
+
+static void shmem_free_pages_locked(struct ivpu_bo *bo)
+{
+ if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
+ set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
+
+ drm_gem_put_pages(&bo->base, bo->pages, true, false);
+ bo->pages = NULL;
+}
+
+static int ivpu_bo_map_pages_locked(struct ivpu_bo *bo)
+{
+ int npages = bo->base.size >> PAGE_SHIFT;
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = drm_prime_pages_to_sg(&vdev->drm, bo->pages, npages);
+ if (IS_ERR(sgt)) {
+ ivpu_err(vdev, "Failed to allocate sgtable\n");
+ return PTR_ERR(sgt);
+ }
+
+ ret = dma_map_sgtable(vdev->drm.dev, sgt, DMA_BIDIRECTIONAL, 0);
+ if (ret) {
+ ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret);
+ goto err_free_sgt;
+ }
+
+ bo->sgt = sgt;
+ return 0;
+
+err_free_sgt:
+ kfree(sgt);
+ return ret;
+}
+
+static void ivpu_bo_unmap_pages_locked(struct ivpu_bo *bo)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+
+ dma_unmap_sgtable(vdev->drm.dev, bo->sgt, DMA_BIDIRECTIONAL, 0);
+ sg_free_table(bo->sgt);
+ kfree(bo->sgt);
+ bo->sgt = NULL;
+}
+
+static const struct ivpu_bo_ops shmem_ops = {
+ .type = IVPU_BO_TYPE_SHMEM,
+ .name = "shmem",
+ .alloc_pages = shmem_alloc_pages_locked,
+ .free_pages = shmem_free_pages_locked,
+ .map_pages = ivpu_bo_map_pages_locked,
+ .unmap_pages = ivpu_bo_unmap_pages_locked,
+};
+
+static int __must_check internal_alloc_pages_locked(struct ivpu_bo *bo)
+{
+ unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
+ struct page **pages;
+ int ret;
+
+ pages = kvmalloc_array(npages, sizeof(*bo->pages), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < npages; i++) {
+ pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+ if (!pages[i]) {
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+ cond_resched();
+ }
+
+ bo->pages = pages;
+ return 0;
+
+err_free_pages:
+ while (i--)
+ put_page(pages[i]);
+ kvfree(pages);
+ return ret;
+}
+
+static void internal_free_pages_locked(struct ivpu_bo *bo)
+{
+ unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++)
+ put_page(bo->pages[i]);
+
+ kvfree(bo->pages);
+ bo->pages = NULL;
+}
+
+static const struct ivpu_bo_ops internal_ops = {
+ .type = IVPU_BO_TYPE_INTERNAL,
+ .name = "internal",
+ .alloc_pages = internal_alloc_pages_locked,
+ .free_pages = internal_free_pages_locked,
+ .map_pages = ivpu_bo_map_pages_locked,
+ .unmap_pages = ivpu_bo_unmap_pages_locked,
+};
+
+static int __must_check ivpu_bo_alloc_and_map_pages_locked(struct ivpu_bo *bo)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ int ret;
+
+ lockdep_assert_held(&bo->lock);
+ drm_WARN_ON(&vdev->drm, bo->sgt);
+
+ ret = bo->ops->alloc_pages(bo);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate pages for BO: %d", ret);
+ return ret;
+ }
+
+ ret = bo->ops->map_pages(bo);
+ if (ret) {
+ ivpu_err(vdev, "Failed to map pages for BO: %d", ret);
+ goto err_free_pages;
+ }
+ return ret;
+
+err_free_pages:
+ bo->ops->free_pages(bo);
+ return ret;
+}
+
+static void ivpu_bo_unmap_and_free_pages(struct ivpu_bo *bo)
+{
+ mutex_lock(&bo->lock);
+
+ WARN_ON(!bo->sgt);
+ bo->ops->unmap_pages(bo);
+ WARN_ON(bo->sgt);
+ bo->ops->free_pages(bo);
+ WARN_ON(bo->pages);
+
+ mutex_unlock(&bo->lock);
+}
+
+/*
+ * ivpu_bo_pin() - pin the backing physical pages and map them to VPU.
+ *
+ * This function pins physical memory pages, then maps the physical pages
+ * to IOMMU address space and finally updates the VPU MMU page tables
+ * to allow the VPU to translate VPU address to IOMMU address.
+ */
+int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ int ret = 0;
+
+ mutex_lock(&bo->lock);
+
+ if (!bo->vpu_addr) {
+ ivpu_err(vdev, "vpu_addr not set for BO ctx_id: %d handle: %d\n",
+ bo->ctx->id, bo->handle);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!bo->sgt) {
+ ret = ivpu_bo_alloc_and_map_pages_locked(bo);
+ if (ret)
+ goto unlock;
+ }
+
+ if (!bo->mmu_mapped) {
+ ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, bo->sgt,
+ ivpu_bo_is_snooped(bo));
+ if (ret) {
+ ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret);
+ goto unlock;
+ }
+ bo->mmu_mapped = true;
+ }
+
+unlock:
+ mutex_unlock(&bo->lock);
+
+ return ret;
+}
+
+static int
+ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
+ const struct ivpu_addr_range *range)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ int ret;
+
+ if (!range) {
+ if (bo->flags & DRM_IVPU_BO_HIGH_MEM)
+ range = &vdev->hw->ranges.user_high;
+ else
+ range = &vdev->hw->ranges.user_low;
+ }
+
+ mutex_lock(&ctx->lock);
+ ret = ivpu_mmu_context_insert_node_locked(ctx, range, bo->base.size, &bo->mm_node);
+ if (!ret) {
+ bo->ctx = ctx;
+ bo->vpu_addr = bo->mm_node.start;
+ list_add_tail(&bo->ctx_node, &ctx->bo_list);
+ }
+ mutex_unlock(&ctx->lock);
+
+ return ret;
+}
+
+static void ivpu_bo_free_vpu_addr(struct ivpu_bo *bo)
+{
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+ struct ivpu_mmu_context *ctx = bo->ctx;
+
+ ivpu_dbg(vdev, BO, "remove from ctx: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
+ ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
+
+ mutex_lock(&bo->lock);
+
+ if (bo->mmu_mapped) {
+ drm_WARN_ON(&vdev->drm, !bo->sgt);
+ ivpu_mmu_context_unmap_sgt(vdev, ctx, bo->vpu_addr, bo->sgt);
+ bo->mmu_mapped = false;
+ }
+
+ mutex_lock(&ctx->lock);
+ list_del(&bo->ctx_node);
+ bo->vpu_addr = 0;
+ bo->ctx = NULL;
+ ivpu_mmu_context_remove_node_locked(ctx, &bo->mm_node);
+ mutex_unlock(&ctx->lock);
+
+ mutex_unlock(&bo->lock);
+}
+
+void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx)
+{
+ struct ivpu_bo *bo, *tmp;
+
+ list_for_each_entry_safe(bo, tmp, &ctx->bo_list, ctx_node)
+ ivpu_bo_free_vpu_addr(bo);
+}
+
+static struct ivpu_bo *
+ivpu_bo_alloc(struct ivpu_device *vdev, struct ivpu_mmu_context *mmu_context,
+ u64 size, u32 flags, const struct ivpu_bo_ops *ops,
+ const struct ivpu_addr_range *range, u64 user_ptr)
+{
+ struct ivpu_bo *bo;
+ int ret = 0;
+
+ if (drm_WARN_ON(&vdev->drm, size == 0 || !PAGE_ALIGNED(size)))
+ return ERR_PTR(-EINVAL);
+
+ switch (flags & DRM_IVPU_BO_CACHE_MASK) {
+ case DRM_IVPU_BO_CACHED:
+ case DRM_IVPU_BO_UNCACHED:
+ case DRM_IVPU_BO_WC:
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&bo->lock);
+ bo->base.funcs = &ivpu_gem_funcs;
+ bo->flags = flags;
+ bo->ops = ops;
+ bo->user_ptr = user_ptr;
+
+ if (ops->type == IVPU_BO_TYPE_SHMEM)
+ ret = drm_gem_object_init(&vdev->drm, &bo->base, size);
+ else
+ drm_gem_private_object_init(&vdev->drm, &bo->base, size);
+
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize drm object\n");
+ goto err_free;
+ }
+
+ if (flags & DRM_IVPU_BO_MAPPABLE) {
+ ret = drm_gem_create_mmap_offset(&bo->base);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate mmap offset\n");
+ goto err_release;
+ }
+ }
+
+ if (mmu_context) {
+ ret = ivpu_bo_alloc_vpu_addr(bo, mmu_context, range);
+ if (ret) {
+ ivpu_err(vdev, "Failed to add BO to context: %d\n", ret);
+ goto err_release;
+ }
+ }
+
+ return bo;
+
+err_release:
+ drm_gem_object_release(&bo->base);
+err_free:
+ kfree(bo);
+ return ERR_PTR(ret);
+}
+
+static void ivpu_bo_free(struct drm_gem_object *obj)
+{
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+
+ if (bo->ctx)
+ ivpu_dbg(vdev, BO, "free: ctx %d vpu_addr 0x%llx allocated %d mmu_mapped %d\n",
+ bo->ctx->id, bo->vpu_addr, (bool)bo->sgt, bo->mmu_mapped);
+ else
+ ivpu_dbg(vdev, BO, "free: ctx (released) allocated %d mmu_mapped %d\n",
+ (bool)bo->sgt, bo->mmu_mapped);
+
+ drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
+
+ vunmap(bo->kvaddr);
+
+ if (bo->ctx)
+ ivpu_bo_free_vpu_addr(bo);
+
+ if (bo->sgt)
+ ivpu_bo_unmap_and_free_pages(bo);
+
+ if (bo->base.import_attach)
+ drm_prime_gem_destroy(&bo->base, bo->sgt);
+
+ drm_gem_object_release(&bo->base);
+
+ mutex_destroy(&bo->lock);
+ kfree(bo);
+}
+
+static int ivpu_bo_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
+
+ ivpu_dbg(vdev, BO, "mmap: ctx %u handle %u vpu_addr 0x%llx size %zu type %s",
+ bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size, bo->ops->name);
+
+ if (obj->import_attach) {
+ /* Drop the reference drm_gem_mmap_obj() acquired.*/
+ drm_gem_object_put(obj);
+ vma->vm_private_data = NULL;
+ return dma_buf_mmap(obj->dma_buf, vma, 0);
+ }
+
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND);
+ vma->vm_page_prot = ivpu_bo_pgprot(bo, vm_get_page_prot(vma->vm_flags));
+
+ return 0;
+}
+
+static struct sg_table *ivpu_bo_get_sg_table(struct drm_gem_object *obj)
+{
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ loff_t npages = obj->size >> PAGE_SHIFT;
+ int ret = 0;
+
+ mutex_lock(&bo->lock);
+
+ if (!bo->sgt)
+ ret = ivpu_bo_alloc_and_map_pages_locked(bo);
+
+ mutex_unlock(&bo->lock);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ return drm_prime_pages_to_sg(obj->dev, bo->pages, npages);
+}
+
+static vm_fault_t ivpu_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct ivpu_bo *bo = to_ivpu_bo(obj);
+ loff_t npages = obj->size >> PAGE_SHIFT;
+ pgoff_t page_offset;
+ struct page *page;
+ vm_fault_t ret;
+ int err;
+
+ mutex_lock(&bo->lock);
+
+ if (!bo->sgt) {
+ err = ivpu_bo_alloc_and_map_pages_locked(bo);
+ if (err) {
+ ret = vmf_error(err);
+ goto unlock;
+ }
+ }
+
+ /* We don't use vmf->pgoff since that has the fake offset */
+ page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ if (page_offset >= npages) {
+ ret = VM_FAULT_SIGBUS;
+ } else {
+ page = bo->pages[page_offset];
+ ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
+ }
+
+unlock:
+ mutex_unlock(&bo->lock);
+
+ return ret;
+}
+
+static const struct vm_operations_struct ivpu_vm_ops = {
+ .fault = ivpu_vm_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs ivpu_gem_funcs = {
+ .free = ivpu_bo_free,
+ .mmap = ivpu_bo_mmap,
+ .vm_ops = &ivpu_vm_ops,
+ .get_sg_table = ivpu_bo_get_sg_table,
+};
+
+int
+ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_bo_create *args = data;
+ u64 size = PAGE_ALIGN(args->size);
+ struct ivpu_bo *bo;
+ int ret;
+
+ if (args->flags & ~DRM_IVPU_BO_FLAGS)
+ return -EINVAL;
+
+ if (size == 0)
+ return -EINVAL;
+
+ bo = ivpu_bo_alloc(vdev, &file_priv->ctx, size, args->flags, &shmem_ops, NULL, 0);
+ if (IS_ERR(bo)) {
+ ivpu_err(vdev, "Failed to create BO: %pe (ctx %u size %llu flags 0x%x)",
+ bo, file_priv->ctx.id, args->size, args->flags);
+ return PTR_ERR(bo);
+ }
+
+ ret = drm_gem_handle_create(file, &bo->base, &bo->handle);
+ if (!ret) {
+ args->vpu_addr = bo->vpu_addr;
+ args->handle = bo->handle;
+ }
+
+ drm_gem_object_put(&bo->base);
+
+ ivpu_dbg(vdev, BO, "alloc shmem: ctx %u vpu_addr 0x%llx size %zu flags 0x%x\n",
+ file_priv->ctx.id, bo->vpu_addr, bo->base.size, bo->flags);
+
+ return ret;
+}
+
+struct ivpu_bo *
+ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags)
+{
+ const struct ivpu_addr_range *range;
+ struct ivpu_addr_range fixed_range;
+ struct ivpu_bo *bo;
+ pgprot_t prot;
+ int ret;
+
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(vpu_addr));
+ drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size));
+
+ if (vpu_addr) {
+ fixed_range.start = vpu_addr;
+ fixed_range.end = vpu_addr + size;
+ range = &fixed_range;
+ } else {
+ range = &vdev->hw->ranges.global_low;
+ }
+
+ bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
+ if (IS_ERR(bo)) {
+ ivpu_err(vdev, "Failed to create BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)",
+ bo, vpu_addr, size, flags);
+ return NULL;
+ }
+
+ ret = ivpu_bo_pin(bo);
+ if (ret)
+ goto err_put;
+
+ if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
+ drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT);
+
+ prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
+ bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot);
+ if (!bo->kvaddr) {
+ ivpu_err(vdev, "Failed to map BO into kernel virtual memory\n");
+ goto err_put;
+ }
+
+ ivpu_dbg(vdev, BO, "alloc internal: ctx 0 vpu_addr 0x%llx size %zu flags 0x%x\n",
+ bo->vpu_addr, bo->base.size, flags);
+
+ return bo;
+
+err_put:
+ drm_gem_object_put(&bo->base);
+ return NULL;
+}
+
+void ivpu_bo_free_internal(struct ivpu_bo *bo)
+{
+ drm_gem_object_put(&bo->base);
+}
+
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
+{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ struct dma_buf_attachment *attach;
+ struct ivpu_bo *bo;
+
+ attach = dma_buf_attach(buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(buf);
+
+ bo = ivpu_bo_alloc(vdev, NULL, buf->size, DRM_IVPU_BO_MAPPABLE, &prime_ops, NULL, 0);
+ if (IS_ERR(bo)) {
+ ivpu_err(vdev, "Failed to import BO: %pe (size %lu)", bo, buf->size);
+ goto err_detach;
+ }
+
+ lockdep_set_class(&bo->lock, &prime_bo_lock_class_key);
+
+ bo->base.import_attach = attach;
+
+ return &bo->base;
+
+err_detach:
+ dma_buf_detach(buf, attach);
+ dma_buf_put(buf);
+ return ERR_CAST(bo);
+}
+
+int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ struct drm_ivpu_bo_info *args = data;
+ struct drm_gem_object *obj;
+ struct ivpu_bo *bo;
+ int ret = 0;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ bo = to_ivpu_bo(obj);
+
+ mutex_lock(&bo->lock);
+
+ if (!bo->ctx) {
+ ret = ivpu_bo_alloc_vpu_addr(bo, &file_priv->ctx, NULL);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate vpu_addr: %d\n", ret);
+ goto unlock;
+ }
+ }
+
+ args->flags = bo->flags;
+ args->mmap_offset = drm_vma_node_offset_addr(&obj->vma_node);
+ args->vpu_addr = bo->vpu_addr;
+ args->size = obj->size;
+unlock:
+ mutex_unlock(&bo->lock);
+ drm_gem_object_put(obj);
+ return ret;
+}
+
+int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_ivpu_bo_wait *args = data;
+ struct drm_gem_object *obj;
+ unsigned long timeout;
+ long ret;
+
+ timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -EINVAL;
+
+ ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, true, timeout);
+ if (ret == 0) {
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ args->job_status = to_ivpu_bo(obj)->job_status;
+ }
+
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
+static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
+{
+ unsigned long dma_refcount = 0;
+
+ if (bo->base.dma_buf && bo->base.dma_buf->file)
+ dma_refcount = atomic_long_read(&bo->base.dma_buf->file->f_count);
+
+ drm_printf(p, "%5u %6d %16llx %10lu %10u %12lu %14s\n",
+ bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.size,
+ kref_read(&bo->base.refcount), dma_refcount, bo->ops->name);
+}
+
+void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
+{
+ struct ivpu_device *vdev = to_ivpu_device(dev);
+ struct ivpu_file_priv *file_priv;
+ unsigned long ctx_id;
+ struct ivpu_bo *bo;
+
+ drm_printf(p, "%5s %6s %16s %10s %10s %12s %14s\n",
+ "ctx", "handle", "vpu_addr", "size", "refcount", "dma_refcount", "type");
+
+ mutex_lock(&vdev->gctx.lock);
+ list_for_each_entry(bo, &vdev->gctx.bo_list, ctx_node)
+ ivpu_bo_print_info(bo, p);
+ mutex_unlock(&vdev->gctx.lock);
+
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+ file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
+ if (!file_priv)
+ continue;
+
+ mutex_lock(&file_priv->ctx.lock);
+ list_for_each_entry(bo, &file_priv->ctx.bo_list, ctx_node)
+ ivpu_bo_print_info(bo, p);
+ mutex_unlock(&file_priv->ctx.lock);
+
+ ivpu_file_priv_put(&file_priv);
+ }
+}
+
+void ivpu_bo_list_print(struct drm_device *dev)
+{
+ struct drm_printer p = drm_info_printer(dev->dev);
+
+ ivpu_bo_list(dev, &p);
+}
diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h
new file mode 100644
index 000000000000..6b0ceda5f253
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_gem.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+#ifndef __IVPU_GEM_H__
+#define __IVPU_GEM_H__
+
+#include <drm/drm_gem.h>
+#include <drm/drm_mm.h>
+
+struct dma_buf;
+struct ivpu_bo_ops;
+struct ivpu_file_priv;
+
+struct ivpu_bo {
+ struct drm_gem_object base;
+ const struct ivpu_bo_ops *ops;
+
+ struct ivpu_mmu_context *ctx;
+ struct list_head ctx_node;
+ struct drm_mm_node mm_node;
+
+ struct mutex lock; /* Protects: pages, sgt, mmu_mapped */
+ struct sg_table *sgt;
+ struct page **pages;
+ bool mmu_mapped;
+
+ void *kvaddr;
+ u64 vpu_addr;
+ u32 handle;
+ u32 flags;
+ uintptr_t user_ptr;
+ u32 job_status;
+};
+
+enum ivpu_bo_type {
+ IVPU_BO_TYPE_SHMEM = 1,
+ IVPU_BO_TYPE_INTERNAL,
+ IVPU_BO_TYPE_PRIME,
+};
+
+struct ivpu_bo_ops {
+ enum ivpu_bo_type type;
+ const char *name;
+ int (*alloc_pages)(struct ivpu_bo *bo);
+ void (*free_pages)(struct ivpu_bo *bo);
+ int (*map_pages)(struct ivpu_bo *bo);
+ void (*unmap_pages)(struct ivpu_bo *bo);
+};
+
+int ivpu_bo_pin(struct ivpu_bo *bo);
+void ivpu_bo_remove_all_bos_from_context(struct ivpu_mmu_context *ctx);
+void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p);
+void ivpu_bo_list_print(struct drm_device *dev);
+
+struct ivpu_bo *
+ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
+void ivpu_bo_free_internal(struct ivpu_bo *bo);
+struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
+void ivpu_bo_unmap_sgt_and_remove_from_context(struct ivpu_bo *bo);
+
+int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+
+static inline struct ivpu_bo *to_ivpu_bo(struct drm_gem_object *obj)
+{
+ return container_of(obj, struct ivpu_bo, base);
+}
+
+static inline struct page *ivpu_bo_get_page(struct ivpu_bo *bo, u64 offset)
+{
+ if (offset > bo->base.size || !bo->pages)
+ return NULL;
+
+ return bo->pages[offset / PAGE_SIZE];
+}
+
+static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
+{
+ return bo->flags & DRM_IVPU_BO_CACHE_MASK;
+}
+
+static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
+{
+ return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
+}
+
+static inline pgprot_t ivpu_bo_pgprot(struct ivpu_bo *bo, pgprot_t prot)
+{
+ if (bo->flags & DRM_IVPU_BO_WC)
+ return pgprot_writecombine(prot);
+
+ if (bo->flags & DRM_IVPU_BO_UNCACHED)
+ return pgprot_noncached(prot);
+
+ return prot;
+}
+
+static inline struct ivpu_device *ivpu_bo_to_vdev(struct ivpu_bo *bo)
+{
+ return to_ivpu_device(bo->base.dev);
+}
+
+static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr)
+{
+ if (vpu_addr < bo->vpu_addr)
+ return NULL;
+
+ if (vpu_addr >= (bo->vpu_addr + bo->base.size))
+ return NULL;
+
+ return bo->kvaddr + (vpu_addr - bo->vpu_addr);
+}
+
+static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr)
+{
+ if (cpu_addr < bo->kvaddr)
+ return 0;
+
+ if (cpu_addr >= (bo->kvaddr + bo->base.size))
+ return 0;
+
+ return bo->vpu_addr + (cpu_addr - bo->kvaddr);
+}
+
+#endif /* __IVPU_GEM_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h
new file mode 100644
index 000000000000..50a9304ab09c
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_H__
+#define __IVPU_HW_H__
+
+#include "ivpu_drv.h"
+
+struct ivpu_hw_ops {
+ int (*info_init)(struct ivpu_device *vdev);
+ int (*power_up)(struct ivpu_device *vdev);
+ int (*boot_fw)(struct ivpu_device *vdev);
+ int (*power_down)(struct ivpu_device *vdev);
+ bool (*is_idle)(struct ivpu_device *vdev);
+ void (*wdt_disable)(struct ivpu_device *vdev);
+ void (*diagnose_failure)(struct ivpu_device *vdev);
+ u32 (*reg_pll_freq_get)(struct ivpu_device *vdev);
+ u32 (*reg_telemetry_offset_get)(struct ivpu_device *vdev);
+ u32 (*reg_telemetry_size_get)(struct ivpu_device *vdev);
+ u32 (*reg_telemetry_enable_get)(struct ivpu_device *vdev);
+ void (*reg_db_set)(struct ivpu_device *vdev, u32 db_id);
+ u32 (*reg_ipc_rx_addr_get)(struct ivpu_device *vdev);
+ u32 (*reg_ipc_rx_count_get)(struct ivpu_device *vdev);
+ void (*reg_ipc_tx_set)(struct ivpu_device *vdev, u32 vpu_addr);
+ void (*irq_clear)(struct ivpu_device *vdev);
+ void (*irq_enable)(struct ivpu_device *vdev);
+ void (*irq_disable)(struct ivpu_device *vdev);
+ irqreturn_t (*irq_handler)(int irq, void *ptr);
+};
+
+struct ivpu_addr_range {
+ resource_size_t start;
+ resource_size_t end;
+};
+
+struct ivpu_hw_info {
+ const struct ivpu_hw_ops *ops;
+ struct {
+ struct ivpu_addr_range global_low;
+ struct ivpu_addr_range global_high;
+ struct ivpu_addr_range user_low;
+ struct ivpu_addr_range user_high;
+ struct ivpu_addr_range global_aliased_pio;
+ } ranges;
+ struct {
+ u8 min_ratio;
+ u8 max_ratio;
+ /*
+ * Pll ratio for the efficiency frequency. The VPU has optimum
+ * performance to power ratio at this frequency.
+ */
+ u8 pn_ratio;
+ u32 profiling_freq;
+ } pll;
+ u32 tile_fuse;
+ u32 sku;
+ u16 config;
+};
+
+extern const struct ivpu_hw_ops ivpu_hw_mtl_ops;
+
+static inline int ivpu_hw_info_init(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->info_init(vdev);
+};
+
+static inline int ivpu_hw_power_up(struct ivpu_device *vdev)
+{
+ ivpu_dbg(vdev, PM, "HW power up\n");
+
+ return vdev->hw->ops->power_up(vdev);
+};
+
+static inline int ivpu_hw_boot_fw(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->boot_fw(vdev);
+};
+
+static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->is_idle(vdev);
+};
+
+static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
+{
+ ivpu_dbg(vdev, PM, "HW power down\n");
+
+ return vdev->hw->ops->power_down(vdev);
+};
+
+static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
+{
+ vdev->hw->ops->wdt_disable(vdev);
+};
+
+/* Register indirect accesses */
+static inline u32 ivpu_hw_reg_pll_freq_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_pll_freq_get(vdev);
+};
+
+static inline u32 ivpu_hw_reg_telemetry_offset_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_telemetry_offset_get(vdev);
+};
+
+static inline u32 ivpu_hw_reg_telemetry_size_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_telemetry_size_get(vdev);
+};
+
+static inline u32 ivpu_hw_reg_telemetry_enable_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_telemetry_enable_get(vdev);
+};
+
+static inline void ivpu_hw_reg_db_set(struct ivpu_device *vdev, u32 db_id)
+{
+ vdev->hw->ops->reg_db_set(vdev, db_id);
+};
+
+static inline u32 ivpu_hw_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_ipc_rx_addr_get(vdev);
+};
+
+static inline u32 ivpu_hw_reg_ipc_rx_count_get(struct ivpu_device *vdev)
+{
+ return vdev->hw->ops->reg_ipc_rx_count_get(vdev);
+};
+
+static inline void ivpu_hw_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+{
+ vdev->hw->ops->reg_ipc_tx_set(vdev, vpu_addr);
+};
+
+static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
+{
+ vdev->hw->ops->irq_clear(vdev);
+};
+
+static inline void ivpu_hw_irq_enable(struct ivpu_device *vdev)
+{
+ vdev->hw->ops->irq_enable(vdev);
+};
+
+static inline void ivpu_hw_irq_disable(struct ivpu_device *vdev)
+{
+ vdev->hw->ops->irq_disable(vdev);
+};
+
+static inline void ivpu_hw_init_range(struct ivpu_addr_range *range, u64 start, u64 size)
+{
+ range->start = start;
+ range->end = start + size;
+}
+
+static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
+{
+ return range->end - range->start;
+}
+
+static inline void ivpu_hw_diagnose_failure(struct ivpu_device *vdev)
+{
+ vdev->hw->ops->diagnose_failure(vdev);
+}
+
+#endif /* __IVPU_HW_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl.c b/drivers/accel/ivpu/ivpu_hw_mtl.c
new file mode 100644
index 000000000000..62bfaa9081c4
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_mtl.c
@@ -0,0 +1,1084 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include "ivpu_drv.h"
+#include "ivpu_fw.h"
+#include "ivpu_hw_mtl_reg.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_hw.h"
+#include "ivpu_ipc.h"
+#include "ivpu_mmu.h"
+#include "ivpu_pm.h"
+
+#define TILE_FUSE_ENABLE_BOTH 0x0
+#define TILE_FUSE_ENABLE_UPPER 0x1
+#define TILE_FUSE_ENABLE_LOWER 0x2
+
+#define TILE_SKU_BOTH_MTL 0x3630
+#define TILE_SKU_LOWER_MTL 0x3631
+#define TILE_SKU_UPPER_MTL 0x3632
+
+/* Work point configuration values */
+#define WP_CONFIG_1_TILE_5_3_RATIO 0x0101
+#define WP_CONFIG_1_TILE_4_3_RATIO 0x0102
+#define WP_CONFIG_2_TILE_5_3_RATIO 0x0201
+#define WP_CONFIG_2_TILE_4_3_RATIO 0x0202
+#define WP_CONFIG_0_TILE_PLL_OFF 0x0000
+
+#define PLL_REF_CLK_FREQ (50 * 1000000)
+#define PLL_SIMULATION_FREQ (10 * 1000000)
+#define PLL_RATIO_TO_FREQ(x) ((x) * PLL_REF_CLK_FREQ)
+#define PLL_DEFAULT_EPP_VALUE 0x80
+
+#define TIM_SAFE_ENABLE 0xf1d0dead
+#define TIM_WATCHDOG_RESET_VALUE 0xffffffff
+
+#define TIMEOUT_US (150 * USEC_PER_MSEC)
+#define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
+#define PLL_TIMEOUT_US (1500 * USEC_PER_MSEC)
+#define IDLE_TIMEOUT_US (500 * USEC_PER_MSEC)
+
+#define ICB_0_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
+
+#define ICB_1_IRQ_MASK ((REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
+
+#define ICB_0_1_IRQ_MASK ((((u64)ICB_1_IRQ_MASK) << 32) | ICB_0_IRQ_MASK)
+
+#define BUTTRESS_IRQ_MASK ((REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE)) | \
+ (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR)) | \
+ (REG_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR)))
+
+#define BUTTRESS_IRQ_ENABLE_MASK ((u32)~BUTTRESS_IRQ_MASK)
+#define BUTTRESS_IRQ_DISABLE_MASK ((u32)-1)
+
+#define ITF_FIREWALL_VIOLATION_MASK ((REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
+ (REG_FLD(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
+
+static char *ivpu_platform_to_str(u32 platform)
+{
+ switch (platform) {
+ case IVPU_PLATFORM_SILICON:
+ return "IVPU_PLATFORM_SILICON";
+ case IVPU_PLATFORM_SIMICS:
+ return "IVPU_PLATFORM_SIMICS";
+ case IVPU_PLATFORM_FPGA:
+ return "IVPU_PLATFORM_FPGA";
+ default:
+ return "Invalid platform";
+ }
+}
+
+static void ivpu_hw_read_platform(struct ivpu_device *vdev)
+{
+ u32 gen_ctrl = REGV_RD32(MTL_VPU_HOST_SS_GEN_CTRL);
+ u32 platform = REG_GET_FLD(MTL_VPU_HOST_SS_GEN_CTRL, PS, gen_ctrl);
+
+ if (platform == IVPU_PLATFORM_SIMICS || platform == IVPU_PLATFORM_FPGA)
+ vdev->platform = platform;
+ else
+ vdev->platform = IVPU_PLATFORM_SILICON;
+
+ ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
+ ivpu_platform_to_str(vdev->platform), vdev->platform);
+}
+
+static void ivpu_hw_wa_init(struct ivpu_device *vdev)
+{
+ vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
+ vdev->wa.clear_runtime_mem = false;
+}
+
+static void ivpu_hw_timeouts_init(struct ivpu_device *vdev)
+{
+ if (ivpu_is_simics(vdev) || ivpu_is_fpga(vdev)) {
+ vdev->timeout.boot = 100000;
+ vdev->timeout.jsm = 50000;
+ vdev->timeout.tdr = 2000000;
+ vdev->timeout.reschedule_suspend = 1000;
+ } else {
+ vdev->timeout.boot = 1000;
+ vdev->timeout.jsm = 500;
+ vdev->timeout.tdr = 2000;
+ vdev->timeout.reschedule_suspend = 10;
+ }
+}
+
+static int ivpu_pll_wait_for_cmd_send(struct ivpu_device *vdev)
+{
+ return REGB_POLL_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, 0, PLL_TIMEOUT_US);
+}
+
+/* Send KMD initiated workpoint change */
+static int ivpu_pll_cmd_send(struct ivpu_device *vdev, u16 min_ratio, u16 max_ratio,
+ u16 target_ratio, u16 config)
+{
+ int ret;
+ u32 val;
+
+ ret = ivpu_pll_wait_for_cmd_send(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync before WP request: %d\n", ret);
+ return ret;
+ }
+
+ val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD0);
+ val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MIN_RATIO, min_ratio, val);
+ val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD0, MAX_RATIO, max_ratio, val);
+ REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD0, val);
+
+ val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD1);
+ val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, TARGET_RATIO, target_ratio, val);
+ val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD1, EPP, PLL_DEFAULT_EPP_VALUE, val);
+ REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD1, val);
+
+ val = REGB_RD32(MTL_BUTTRESS_WP_REQ_PAYLOAD2);
+ val = REG_SET_FLD_NUM(MTL_BUTTRESS_WP_REQ_PAYLOAD2, CONFIG, config, val);
+ REGB_WR32(MTL_BUTTRESS_WP_REQ_PAYLOAD2, val);
+
+ val = REGB_RD32(MTL_BUTTRESS_WP_REQ_CMD);
+ val = REG_SET_FLD(MTL_BUTTRESS_WP_REQ_CMD, SEND, val);
+ REGB_WR32(MTL_BUTTRESS_WP_REQ_CMD, val);
+
+ ret = ivpu_pll_wait_for_cmd_send(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to sync after WP request: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_pll_wait_for_lock(struct ivpu_device *vdev, bool enable)
+{
+ u32 exp_val = enable ? 0x1 : 0x0;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ return REGB_POLL_FLD(MTL_BUTTRESS_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US);
+}
+
+static int ivpu_pll_wait_for_status_ready(struct ivpu_device *vdev)
+{
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ return REGB_POLL_FLD(MTL_BUTTRESS_VPU_STATUS, READY, 1, PLL_TIMEOUT_US);
+}
+
+static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u8 fuse_min_ratio, fuse_max_ratio, fuse_pn_ratio;
+ u32 fmin_fuse, fmax_fuse;
+
+ fmin_fuse = REGB_RD32(MTL_BUTTRESS_FMIN_FUSE);
+ fuse_min_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, MIN_RATIO, fmin_fuse);
+ fuse_pn_ratio = REG_GET_FLD(MTL_BUTTRESS_FMIN_FUSE, PN_RATIO, fmin_fuse);
+
+ fmax_fuse = REGB_RD32(MTL_BUTTRESS_FMAX_FUSE);
+ fuse_max_ratio = REG_GET_FLD(MTL_BUTTRESS_FMAX_FUSE, MAX_RATIO, fmax_fuse);
+
+ hw->pll.min_ratio = clamp_t(u8, ivpu_pll_min_ratio, fuse_min_ratio, fuse_max_ratio);
+ hw->pll.max_ratio = clamp_t(u8, ivpu_pll_max_ratio, hw->pll.min_ratio, fuse_max_ratio);
+ hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
+}
+
+static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u16 target_ratio;
+ u16 config;
+ int ret;
+
+ if (IVPU_WA(punit_disabled)) {
+ ivpu_dbg(vdev, PM, "Skipping PLL request on %s\n",
+ ivpu_platform_to_str(vdev->platform));
+ return 0;
+ }
+
+ if (enable) {
+ target_ratio = hw->pll.pn_ratio;
+ config = hw->config;
+ } else {
+ target_ratio = 0;
+ config = 0;
+ }
+
+ ivpu_dbg(vdev, PM, "PLL workpoint request: %d Hz\n", PLL_RATIO_TO_FREQ(target_ratio));
+
+ ret = ivpu_pll_cmd_send(vdev, hw->pll.min_ratio, hw->pll.max_ratio, target_ratio, config);
+ if (ret) {
+ ivpu_err(vdev, "Failed to send PLL workpoint request: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_pll_wait_for_lock(vdev, enable);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for PLL lock\n");
+ return ret;
+ }
+
+ if (enable) {
+ ret = ivpu_pll_wait_for_status_ready(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ivpu_pll_enable(struct ivpu_device *vdev)
+{
+ return ivpu_pll_drive(vdev, true);
+}
+
+static int ivpu_pll_disable(struct ivpu_device *vdev)
+{
+ return ivpu_pll_drive(vdev, false);
+}
+
+static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_CLR);
+
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
+
+ REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_CLR, val);
+}
+
+static void ivpu_boot_host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_SET);
+
+ if (enable) {
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, TOP_NOC, val);
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, DSS_MAS, val);
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_RST_SET, MSS_MAS, val);
+ }
+
+ REGV_WR32(MTL_VPU_HOST_SS_CPR_RST_SET, val);
+}
+
+static void ivpu_boot_host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_CLK_SET);
+
+ if (enable) {
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+ } else {
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
+ }
+
+ REGV_WR32(MTL_VPU_HOST_SS_CPR_CLK_SET, val);
+}
+
+static int ivpu_boot_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qrenqn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QACCEPTN);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
+{
+ u32 val = REGV_RD32(MTL_VPU_TOP_NOC_QDENY);
+
+ if (!REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
+ !REG_TEST_FLD_NUM(MTL_VPU_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
+ return -EIO;
+
+ return 0;
+}
+
+static int ivpu_boot_host_ss_configure(struct ivpu_device *vdev)
+{
+ ivpu_boot_host_ss_rst_clr_assert(vdev);
+
+ return ivpu_boot_noc_qreqn_check(vdev, 0x0);
+}
+
+static void ivpu_boot_vpu_idle_gen_disable(struct ivpu_device *vdev)
+{
+ REGV_WR32(MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN, 0x0);
+}
+
+static int ivpu_boot_host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ val = REGV_RD32(MTL_VPU_HOST_SS_NOC_QREQN);
+ if (enable)
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ else
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
+ REGV_WR32(MTL_VPU_HOST_SS_NOC_QREQN, val);
+
+ ret = ivpu_boot_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_boot_host_ss_axi_enable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_host_ss_axi_drive(vdev, true);
+}
+
+static int ivpu_boot_host_ss_axi_disable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_host_ss_axi_drive(vdev, false);
+}
+
+static int ivpu_boot_host_ss_top_noc_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ val = REGV_RD32(MTL_VPU_TOP_NOC_QREQN);
+ if (enable) {
+ val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_SET_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ } else {
+ val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, CPU_CTRL, val);
+ val = REG_CLR_FLD(MTL_VPU_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
+ }
+ REGV_WR32(MTL_VPU_TOP_NOC_QREQN, val);
+
+ ret = ivpu_boot_top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_top_noc_qdeny_check(vdev, 0x0);
+ if (ret)
+ ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_boot_host_ss_top_noc_enable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_host_ss_top_noc_drive(vdev, true);
+}
+
+static int ivpu_boot_host_ss_top_noc_disable(struct ivpu_device *vdev)
+{
+ return ivpu_boot_host_ss_top_noc_drive(vdev, false);
+}
+
+static void ivpu_boot_pwr_island_trickle_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+ else
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
+
+ REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
+}
+
+static void ivpu_boot_pwr_island_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+ else
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
+
+ REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0, val);
+}
+
+static int ivpu_boot_wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
+{
+ /* FPGA model (UPF) is not power aware, skipped Power Island polling */
+ if (ivpu_is_fpga(vdev))
+ return 0;
+
+ return REGV_POLL_FLD(MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU,
+ exp_val, PWR_ISLAND_STATUS_TIMEOUT_US);
+}
+
+static void ivpu_boot_pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0);
+
+ if (enable)
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+ else
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
+
+ REGV_WR32(MTL_VPU_HOST_SS_AON_PWR_ISO_EN0, val);
+}
+
+static void ivpu_boot_dpu_active_drive(struct ivpu_device *vdev, bool enable)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE);
+
+ if (enable)
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+ else
+ val = REG_CLR_FLD(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
+
+ REGV_WR32(MTL_VPU_HOST_SS_AON_DPU_ACTIVE, val);
+}
+
+static int ivpu_boot_pwr_domain_disable(struct ivpu_device *vdev)
+{
+ ivpu_boot_dpu_active_drive(vdev, false);
+ ivpu_boot_pwr_island_isolation_drive(vdev, true);
+ ivpu_boot_pwr_island_trickle_drive(vdev, false);
+ ivpu_boot_pwr_island_drive(vdev, false);
+
+ return ivpu_boot_wait_for_pwr_island_status(vdev, 0x0);
+}
+
+static int ivpu_boot_pwr_domain_enable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_boot_pwr_island_trickle_drive(vdev, true);
+ ivpu_boot_pwr_island_drive(vdev, true);
+
+ ret = ivpu_boot_wait_for_pwr_island_status(vdev, 0x1);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for power island status\n");
+ return ret;
+ }
+
+ ret = ivpu_boot_top_noc_qrenqn_check(vdev, 0x0);
+ if (ret) {
+ ivpu_err(vdev, "Failed qrenqn check %d\n", ret);
+ return ret;
+ }
+
+ ivpu_boot_host_ss_clk_drive(vdev, true);
+ ivpu_boot_pwr_island_isolation_drive(vdev, false);
+ ivpu_boot_host_ss_rst_drive(vdev, true);
+ ivpu_boot_dpu_active_drive(vdev, true);
+
+ return ret;
+}
+
+static void ivpu_boot_no_snoop_enable(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES);
+
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
+
+ REGV_WR32(MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES, val);
+}
+
+static void ivpu_boot_tbu_mmu_enable(struct ivpu_device *vdev)
+{
+ u32 val = REGV_RD32(MTL_VPU_HOST_IF_TBU_MMUSSIDV);
+
+ if (ivpu_is_fpga(vdev)) {
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
+ } else {
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_AWMMUSSIDV, val);
+ val = REG_SET_FLD(MTL_VPU_HOST_IF_TBU_MMUSSIDV, TBU3_ARMMUSSIDV, val);
+ }
+
+ REGV_WR32(MTL_VPU_HOST_IF_TBU_MMUSSIDV, val);
+}
+
+static void ivpu_boot_soc_cpu_boot(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ val = REGV_RD32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
+ val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
+
+ val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
+ REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+ val = REG_SET_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
+ REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+ val = REG_CLR_FLD(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
+ REGV_WR32(MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
+
+ val = vdev->fw->entry_point >> 9;
+ REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
+
+ val = REG_SET_FLD(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
+ REGV_WR32(MTL_VPU_HOST_SS_LOADING_ADDRESS_LO, val);
+
+ ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
+ vdev->fw->entry_point == vdev->fw->cold_boot_entry_point ? "cold boot" : "resume");
+}
+
+static int ivpu_boot_d0i3_drive(struct ivpu_device *vdev, bool enable)
+{
+ int ret;
+ u32 val;
+
+ ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Failed to sync before D0i3 transition: %d\n", ret);
+ return ret;
+ }
+
+ val = REGB_RD32(MTL_BUTTRESS_VPU_D0I3_CONTROL);
+ if (enable)
+ val = REG_SET_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
+ else
+ val = REG_CLR_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, I3, val);
+ REGB_WR32(MTL_BUTTRESS_VPU_D0I3_CONTROL, val);
+
+ ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_D0I3_CONTROL, INPROGRESS, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Failed to sync after D0i3 transition: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_hw_mtl_info_init(struct ivpu_device *vdev)
+{
+ struct ivpu_hw_info *hw = vdev->hw;
+ u32 tile_fuse;
+
+ tile_fuse = REGB_RD32(MTL_BUTTRESS_TILE_FUSE);
+ if (!REG_TEST_FLD(MTL_BUTTRESS_TILE_FUSE, VALID, tile_fuse))
+ ivpu_warn(vdev, "Tile Fuse: Invalid (0x%x)\n", tile_fuse);
+
+ hw->tile_fuse = REG_GET_FLD(MTL_BUTTRESS_TILE_FUSE, SKU, tile_fuse);
+ switch (hw->tile_fuse) {
+ case TILE_FUSE_ENABLE_LOWER:
+ hw->sku = TILE_SKU_LOWER_MTL;
+ hw->config = WP_CONFIG_1_TILE_5_3_RATIO;
+ ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Lower\n");
+ break;
+ case TILE_FUSE_ENABLE_UPPER:
+ hw->sku = TILE_SKU_UPPER_MTL;
+ hw->config = WP_CONFIG_1_TILE_4_3_RATIO;
+ ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Upper\n");
+ break;
+ case TILE_FUSE_ENABLE_BOTH:
+ hw->sku = TILE_SKU_BOTH_MTL;
+ hw->config = WP_CONFIG_2_TILE_5_3_RATIO;
+ ivpu_dbg(vdev, MISC, "Tile Fuse: Enable Both\n");
+ break;
+ default:
+ hw->config = WP_CONFIG_0_TILE_PLL_OFF;
+ ivpu_dbg(vdev, MISC, "Tile Fuse: Disable\n");
+ break;
+ }
+
+ ivpu_pll_init_frequency_ratios(vdev);
+
+ ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M);
+ ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M);
+ ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M);
+ ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G);
+ hw->ranges.global_aliased_pio = hw->ranges.user_low;
+
+ return 0;
+}
+
+static int ivpu_hw_mtl_reset(struct ivpu_device *vdev)
+{
+ int ret;
+ u32 val;
+
+ if (IVPU_WA(punit_disabled))
+ return 0;
+
+ ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret) {
+ ivpu_err(vdev, "Timed out waiting for TRIGGER bit\n");
+ return ret;
+ }
+
+ val = REGB_RD32(MTL_BUTTRESS_VPU_IP_RESET);
+ val = REG_SET_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, val);
+ REGB_WR32(MTL_BUTTRESS_VPU_IP_RESET, val);
+
+ ret = REGB_POLL_FLD(MTL_BUTTRESS_VPU_IP_RESET, TRIGGER, 0, TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for RESET completion\n");
+
+ return ret;
+}
+
+static int ivpu_hw_mtl_d0i3_enable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_boot_d0i3_drive(vdev, true);
+ if (ret)
+ ivpu_err(vdev, "Failed to enable D0i3: %d\n", ret);
+
+ udelay(5); /* VPU requires 5 us to complete the transition */
+
+ return ret;
+}
+
+static int ivpu_hw_mtl_d0i3_disable(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_boot_d0i3_drive(vdev, false);
+ if (ret)
+ ivpu_err(vdev, "Failed to disable D0i3: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_hw_mtl_power_up(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_hw_read_platform(vdev);
+ ivpu_hw_wa_init(vdev);
+ ivpu_hw_timeouts_init(vdev);
+
+ ret = ivpu_hw_mtl_reset(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to reset HW: %d\n", ret);
+
+ ret = ivpu_hw_mtl_d0i3_disable(vdev);
+ if (ret)
+ ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
+
+ ret = ivpu_pll_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable PLL: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_host_ss_configure(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * The control circuitry for vpu_idle indication logic powers up active.
+ * To ensure unnecessary low power mode signal from LRT during bring up,
+ * KMD disables the circuitry prior to bringing up the Main Power island.
+ */
+ ivpu_boot_vpu_idle_gen_disable(vdev);
+
+ ret = ivpu_boot_pwr_domain_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_host_ss_axi_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_boot_host_ss_top_noc_enable(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_hw_mtl_boot_fw(struct ivpu_device *vdev)
+{
+ ivpu_boot_no_snoop_enable(vdev);
+ ivpu_boot_tbu_mmu_enable(vdev);
+ ivpu_boot_soc_cpu_boot(vdev);
+
+ return 0;
+}
+
+static bool ivpu_hw_mtl_is_idle(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ if (IVPU_WA(punit_disabled))
+ return true;
+
+ val = REGB_RD32(MTL_BUTTRESS_VPU_STATUS);
+ return REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, READY, val) &&
+ REG_TEST_FLD(MTL_BUTTRESS_VPU_STATUS, IDLE, val);
+}
+
+static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
+{
+ int ret = 0;
+
+ /* FPGA requires manual clearing of IP_Reset bit by enabling quiescent state */
+ if (ivpu_is_fpga(vdev)) {
+ if (ivpu_boot_host_ss_top_noc_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable TOP NOC\n");
+ ret = -EIO;
+ }
+
+ if (ivpu_boot_host_ss_axi_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable AXI\n");
+ ret = -EIO;
+ }
+ }
+
+ if (ivpu_boot_pwr_domain_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable power domain\n");
+ ret = -EIO;
+ }
+
+ if (ivpu_pll_disable(vdev)) {
+ ivpu_err(vdev, "Failed to disable PLL\n");
+ ret = -EIO;
+ }
+
+ if (ivpu_hw_mtl_d0i3_enable(vdev))
+ ivpu_warn(vdev, "Failed to enable D0I3\n");
+
+ return ret;
+}
+
+static void ivpu_hw_mtl_wdt_disable(struct ivpu_device *vdev)
+{
+ u32 val;
+
+ /* Enable writing and set non-zero WDT value */
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
+
+ /* Enable writing and disable watchdog timer */
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_WDOG_EN, 0);
+
+ /* Now clear the timeout interrupt */
+ val = REGV_RD32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG);
+ val = REG_CLR_FLD(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_GEN_CONFIG, val);
+}
+
+/* Register indirect accesses */
+static u32 ivpu_hw_mtl_reg_pll_freq_get(struct ivpu_device *vdev)
+{
+ u32 pll_curr_ratio;
+
+ pll_curr_ratio = REGB_RD32(MTL_BUTTRESS_CURRENT_PLL);
+ pll_curr_ratio &= MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK;
+
+ if (!ivpu_is_silicon(vdev))
+ return PLL_SIMULATION_FREQ;
+
+ return PLL_RATIO_TO_FREQ(pll_curr_ratio);
+}
+
+static u32 ivpu_hw_mtl_reg_telemetry_offset_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_OFFSET);
+}
+
+static u32 ivpu_hw_mtl_reg_telemetry_size_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_SIZE);
+}
+
+static u32 ivpu_hw_mtl_reg_telemetry_enable_get(struct ivpu_device *vdev)
+{
+ return REGB_RD32(MTL_BUTTRESS_VPU_TELEMETRY_ENABLE);
+}
+
+static void ivpu_hw_mtl_reg_db_set(struct ivpu_device *vdev, u32 db_id)
+{
+ u32 reg_stride = MTL_VPU_CPU_SS_DOORBELL_1 - MTL_VPU_CPU_SS_DOORBELL_0;
+ u32 val = REG_FLD(MTL_VPU_CPU_SS_DOORBELL_0, SET);
+
+ REGV_WR32I(MTL_VPU_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
+}
+
+static u32 ivpu_hw_mtl_reg_ipc_rx_addr_get(struct ivpu_device *vdev)
+{
+ return REGV_RD32(MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM);
+}
+
+static u32 ivpu_hw_mtl_reg_ipc_rx_count_get(struct ivpu_device *vdev)
+{
+ u32 count = REGV_RD32_SILENT(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT);
+
+ return REG_GET_FLD(MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
+}
+
+static void ivpu_hw_mtl_reg_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
+{
+ REGV_WR32(MTL_VPU_CPU_SS_TIM_IPC_FIFO, vpu_addr);
+}
+
+static void ivpu_hw_mtl_irq_clear(struct ivpu_device *vdev)
+{
+ REGV_WR64(MTL_VPU_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK);
+}
+
+static void ivpu_hw_mtl_irq_enable(struct ivpu_device *vdev)
+{
+ REGV_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK);
+ REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK);
+ REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_ENABLE_MASK);
+ REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+}
+
+static void ivpu_hw_mtl_irq_disable(struct ivpu_device *vdev)
+{
+ REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+ REGB_WR32(MTL_BUTTRESS_LOCAL_INT_MASK, BUTTRESS_IRQ_DISABLE_MASK);
+ REGV_WR64(MTL_VPU_HOST_SS_ICB_ENABLE_0, 0x0ull);
+ REGB_WR32(MTL_VPU_HOST_SS_FW_SOC_IRQ_EN, 0x0);
+}
+
+static void ivpu_hw_mtl_irq_wdt_nce_handler(struct ivpu_device *vdev)
+{
+ ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
+
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_mtl_irq_wdt_mss_handler(struct ivpu_device *vdev)
+{
+ ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
+
+ ivpu_hw_wdt_disable(vdev);
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+static void ivpu_hw_mtl_irq_noc_firewall_handler(struct ivpu_device *vdev)
+{
+ ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
+
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+/* Handler for IRQs from VPU core (irqV) */
+static u32 ivpu_hw_mtl_irqv_handler(struct ivpu_device *vdev, int irq)
+{
+ u32 status = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+
+ REGV_WR32(MTL_VPU_HOST_SS_ICB_CLEAR_0, status);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
+ ivpu_mmu_irq_evtq_handler(vdev);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
+ ivpu_ipc_irq_handler(vdev);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
+ ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
+ ivpu_mmu_irq_gerr_handler(vdev);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
+ ivpu_hw_mtl_irq_wdt_mss_handler(vdev);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
+ ivpu_hw_mtl_irq_wdt_nce_handler(vdev);
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
+ ivpu_hw_mtl_irq_noc_firewall_handler(vdev);
+
+ return status;
+}
+
+/* Handler for IRQs from Buttress core (irqB) */
+static u32 ivpu_hw_mtl_irqb_handler(struct ivpu_device *vdev, int irq)
+{
+ u32 status = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+ bool schedule_recovery = false;
+
+ if (status == 0)
+ return 0;
+
+ /* Disable global interrupt before handling local buttress interrupts */
+ REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x1);
+
+ if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, FREQ_CHANGE, status))
+ ivpu_dbg(vdev, IRQ, "FREQ_CHANGE irq: %08x", REGB_RD32(MTL_BUTTRESS_CURRENT_PLL));
+
+ if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, status)) {
+ ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
+ REGB_WR32(MTL_BUTTRESS_ATS_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, status)) {
+ u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
+
+ ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
+ ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
+ REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
+ REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
+ REGB_WR32(MTL_BUTTRESS_UFI_ERR_CLEAR, 0x1);
+ schedule_recovery = true;
+ }
+
+ /*
+ * Clear local interrupt status by writing 0 to all bits.
+ * This must be done after interrupts are cleared at the source.
+ * Writing 1 triggers an interrupt, so we can't perform read update write.
+ */
+ REGB_WR32(MTL_BUTTRESS_INTERRUPT_STAT, 0x0);
+
+ /* Re-enable global interrupt */
+ REGB_WR32(MTL_BUTTRESS_GLOBAL_INT_MASK, 0x0);
+
+ if (schedule_recovery)
+ ivpu_pm_schedule_recovery(vdev);
+
+ return status;
+}
+
+static irqreturn_t ivpu_hw_mtl_irq_handler(int irq, void *ptr)
+{
+ struct ivpu_device *vdev = ptr;
+ u32 ret_irqv, ret_irqb;
+
+ ret_irqv = ivpu_hw_mtl_irqv_handler(vdev, irq);
+ ret_irqb = ivpu_hw_mtl_irqb_handler(vdev, irq);
+
+ return IRQ_RETVAL(ret_irqb | ret_irqv);
+}
+
+static void ivpu_hw_mtl_diagnose_failure(struct ivpu_device *vdev)
+{
+ u32 irqv = REGV_RD32(MTL_VPU_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK;
+ u32 irqb = REGB_RD32(MTL_BUTTRESS_INTERRUPT_STAT) & BUTTRESS_IRQ_MASK;
+
+ if (ivpu_hw_mtl_reg_ipc_rx_count_get(vdev))
+ ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, irqv))
+ ivpu_err(vdev, "WDT MSS timeout detected\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, irqv))
+ ivpu_err(vdev, "WDT NCE timeout detected\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, irqv))
+ ivpu_err(vdev, "NOC Firewall irq detected\n");
+
+ if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, ATS_ERR, irqb))
+ ivpu_err(vdev, "ATS_ERR irq 0x%016llx", REGB_RD64(MTL_BUTTRESS_ATS_ERR_LOG_0));
+
+ if (REG_TEST_FLD(MTL_BUTTRESS_INTERRUPT_STAT, UFI_ERR, irqb)) {
+ u32 ufi_log = REGB_RD32(MTL_BUTTRESS_UFI_ERR_LOG);
+
+ ivpu_err(vdev, "UFI_ERR irq (0x%08x) opcode: 0x%02lx axi_id: 0x%02lx cq_id: 0x%03lx",
+ ufi_log, REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, OPCODE, ufi_log),
+ REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, AXI_ID, ufi_log),
+ REG_GET_FLD(MTL_BUTTRESS_UFI_ERR_LOG, CQ_ID, ufi_log));
+ }
+}
+
+const struct ivpu_hw_ops ivpu_hw_mtl_ops = {
+ .info_init = ivpu_hw_mtl_info_init,
+ .power_up = ivpu_hw_mtl_power_up,
+ .is_idle = ivpu_hw_mtl_is_idle,
+ .power_down = ivpu_hw_mtl_power_down,
+ .boot_fw = ivpu_hw_mtl_boot_fw,
+ .wdt_disable = ivpu_hw_mtl_wdt_disable,
+ .diagnose_failure = ivpu_hw_mtl_diagnose_failure,
+ .reg_pll_freq_get = ivpu_hw_mtl_reg_pll_freq_get,
+ .reg_telemetry_offset_get = ivpu_hw_mtl_reg_telemetry_offset_get,
+ .reg_telemetry_size_get = ivpu_hw_mtl_reg_telemetry_size_get,
+ .reg_telemetry_enable_get = ivpu_hw_mtl_reg_telemetry_enable_get,
+ .reg_db_set = ivpu_hw_mtl_reg_db_set,
+ .reg_ipc_rx_addr_get = ivpu_hw_mtl_reg_ipc_rx_addr_get,
+ .reg_ipc_rx_count_get = ivpu_hw_mtl_reg_ipc_rx_count_get,
+ .reg_ipc_tx_set = ivpu_hw_mtl_reg_ipc_tx_set,
+ .irq_clear = ivpu_hw_mtl_irq_clear,
+ .irq_enable = ivpu_hw_mtl_irq_enable,
+ .irq_disable = ivpu_hw_mtl_irq_disable,
+ .irq_handler = ivpu_hw_mtl_irq_handler,
+};
diff --git a/drivers/accel/ivpu/ivpu_hw_mtl_reg.h b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
new file mode 100644
index 000000000000..d83ccfd9a871
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_mtl_reg.h
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_MTL_REG_H__
+#define __IVPU_HW_MTL_REG_H__
+
+#include <linux/bits.h>
+
+#define MTL_BUTTRESS_INTERRUPT_TYPE 0x00000000u
+
+#define MTL_BUTTRESS_INTERRUPT_STAT 0x00000004u
+#define MTL_BUTTRESS_INTERRUPT_STAT_FREQ_CHANGE_MASK BIT_MASK(0)
+#define MTL_BUTTRESS_INTERRUPT_STAT_ATS_ERR_MASK BIT_MASK(1)
+#define MTL_BUTTRESS_INTERRUPT_STAT_UFI_ERR_MASK BIT_MASK(2)
+
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD0 0x00000008u
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MIN_RATIO_MASK GENMASK(15, 0)
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD0_MAX_RATIO_MASK GENMASK(31, 16)
+
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD1 0x0000000cu
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_TARGET_RATIO_MASK GENMASK(15, 0)
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD1_EPP_MASK GENMASK(31, 16)
+
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD2 0x00000010u
+#define MTL_BUTTRESS_WP_REQ_PAYLOAD2_CONFIG_MASK GENMASK(15, 0)
+
+#define MTL_BUTTRESS_WP_REQ_CMD 0x00000014u
+#define MTL_BUTTRESS_WP_REQ_CMD_SEND_MASK BIT_MASK(0)
+
+#define MTL_BUTTRESS_WP_DOWNLOAD 0x00000018u
+#define MTL_BUTTRESS_WP_DOWNLOAD_TARGET_RATIO_MASK GENMASK(15, 0)
+
+#define MTL_BUTTRESS_CURRENT_PLL 0x0000001cu
+#define MTL_BUTTRESS_CURRENT_PLL_RATIO_MASK GENMASK(15, 0)
+
+#define MTL_BUTTRESS_PLL_ENABLE 0x00000020u
+
+#define MTL_BUTTRESS_FMIN_FUSE 0x00000024u
+#define MTL_BUTTRESS_FMIN_FUSE_MIN_RATIO_MASK GENMASK(7, 0)
+#define MTL_BUTTRESS_FMIN_FUSE_PN_RATIO_MASK GENMASK(15, 8)
+
+#define MTL_BUTTRESS_FMAX_FUSE 0x00000028u
+#define MTL_BUTTRESS_FMAX_FUSE_MAX_RATIO_MASK GENMASK(7, 0)
+
+#define MTL_BUTTRESS_TILE_FUSE 0x0000002cu
+#define MTL_BUTTRESS_TILE_FUSE_VALID_MASK BIT_MASK(0)
+#define MTL_BUTTRESS_TILE_FUSE_SKU_MASK GENMASK(3, 2)
+
+#define MTL_BUTTRESS_LOCAL_INT_MASK 0x00000030u
+#define MTL_BUTTRESS_GLOBAL_INT_MASK 0x00000034u
+
+#define MTL_BUTTRESS_PLL_STATUS 0x00000040u
+#define MTL_BUTTRESS_PLL_STATUS_LOCK_MASK BIT_MASK(1)
+
+#define MTL_BUTTRESS_VPU_STATUS 0x00000044u
+#define MTL_BUTTRESS_VPU_STATUS_READY_MASK BIT_MASK(0)
+#define MTL_BUTTRESS_VPU_STATUS_IDLE_MASK BIT_MASK(1)
+
+#define MTL_BUTTRESS_VPU_D0I3_CONTROL 0x00000060u
+#define MTL_BUTTRESS_VPU_D0I3_CONTROL_INPROGRESS_MASK BIT_MASK(0)
+#define MTL_BUTTRESS_VPU_D0I3_CONTROL_I3_MASK BIT_MASK(2)
+
+#define MTL_BUTTRESS_VPU_IP_RESET 0x00000050u
+#define MTL_BUTTRESS_VPU_IP_RESET_TRIGGER_MASK BIT_MASK(0)
+
+#define MTL_BUTTRESS_VPU_TELEMETRY_OFFSET 0x00000080u
+#define MTL_BUTTRESS_VPU_TELEMETRY_SIZE 0x00000084u
+#define MTL_BUTTRESS_VPU_TELEMETRY_ENABLE 0x00000088u
+
+#define MTL_BUTTRESS_ATS_ERR_LOG_0 0x000000a0u
+#define MTL_BUTTRESS_ATS_ERR_LOG_1 0x000000a4u
+#define MTL_BUTTRESS_ATS_ERR_CLEAR 0x000000a8u
+
+#define MTL_BUTTRESS_UFI_ERR_LOG 0x000000b0u
+#define MTL_BUTTRESS_UFI_ERR_LOG_CQ_ID_MASK GENMASK(11, 0)
+#define MTL_BUTTRESS_UFI_ERR_LOG_AXI_ID_MASK GENMASK(19, 12)
+#define MTL_BUTTRESS_UFI_ERR_LOG_OPCODE_MASK GENMASK(24, 20)
+
+#define MTL_BUTTRESS_UFI_ERR_CLEAR 0x000000b4u
+
+#define MTL_VPU_HOST_SS_CPR_CLK_SET 0x00000084u
+#define MTL_VPU_HOST_SS_CPR_CLK_SET_TOP_NOC_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_CPR_CLK_SET_DSS_MAS_MASK BIT_MASK(10)
+#define MTL_VPU_HOST_SS_CPR_CLK_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define MTL_VPU_HOST_SS_CPR_RST_SET 0x00000094u
+#define MTL_VPU_HOST_SS_CPR_RST_SET_TOP_NOC_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_CPR_RST_SET_DSS_MAS_MASK BIT_MASK(10)
+#define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK BIT_MASK(11)
+
+#define MTL_VPU_HOST_SS_CPR_RST_CLR 0x00000098u
+#define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK BIT_MASK(10)
+#define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK BIT_MASK(11)
+
+#define MTL_VPU_HOST_SS_HW_VERSION 0x00000108u
+#define MTL_VPU_HOST_SS_HW_VERSION_SOC_REVISION_MASK GENMASK(7, 0)
+#define MTL_VPU_HOST_SS_HW_VERSION_SOC_NUMBER_MASK GENMASK(15, 8)
+#define MTL_VPU_HOST_SS_HW_VERSION_VPU_GENERATION_MASK GENMASK(23, 16)
+
+#define MTL_VPU_HOST_SS_GEN_CTRL 0x00000118u
+#define MTL_VPU_HOST_SS_GEN_CTRL_PS_MASK GENMASK(31, 29)
+
+#define MTL_VPU_HOST_SS_NOC_QREQN 0x00000154u
+#define MTL_VPU_HOST_SS_NOC_QREQN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define MTL_VPU_HOST_SS_NOC_QACCEPTN 0x00000158u
+#define MTL_VPU_HOST_SS_NOC_QACCEPTN_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define MTL_VPU_HOST_SS_NOC_QDENY 0x0000015cu
+#define MTL_VPU_HOST_SS_NOC_QDENY_TOP_SOCMMIO_MASK BIT_MASK(0)
+
+#define MTL_VPU_TOP_NOC_QREQN 0x00000160u
+#define MTL_VPU_TOP_NOC_QREQN_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QREQN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define MTL_VPU_TOP_NOC_QACCEPTN 0x00000164u
+#define MTL_VPU_TOP_NOC_QACCEPTN_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QACCEPTN_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define MTL_VPU_TOP_NOC_QDENY 0x00000168u
+#define MTL_VPU_TOP_NOC_QDENY_CPU_CTRL_MASK BIT_MASK(0)
+#define MTL_VPU_TOP_NOC_QDENY_HOSTIF_L2CACHE_MASK BIT_MASK(1)
+
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN 0x00000170u
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_ROM_CMX_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_DBG_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_CSS_CTRL_MASK BIT_MASK(2)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_DEC400_MASK BIT_MASK(3)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_NCE_MASK BIT_MASK(4)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_MASK BIT_MASK(5)
+#define MTL_VPU_HOST_SS_FW_SOC_IRQ_EN_MSS_MBI_CMX_MASK BIT_MASK(6)
+
+#define MTL_VPU_HOST_SS_ICB_STATUS_0 0x00010210u
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_0_INT_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_1_INT_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_2_INT_MASK BIT_MASK(2)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_TIMER_3_INT_MASK BIT_MASK(3)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_HOST_IPC_FIFO_INT_MASK BIT_MASK(4)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_0_INT_MASK BIT_MASK(5)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_1_INT_MASK BIT_MASK(6)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_MMU_IRQ_2_INT_MASK BIT_MASK(7)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_NOC_FIREWALL_INT_MASK BIT_MASK(8)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_0_INT_MASK BIT_MASK(30)
+#define MTL_VPU_HOST_SS_ICB_STATUS_0_CPU_INT_REDIRECT_1_INT_MASK BIT_MASK(31)
+
+#define MTL_VPU_HOST_SS_ICB_STATUS_1 0x00010214u
+#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_2_INT_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_3_INT_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_SS_ICB_STATUS_1_CPU_INT_REDIRECT_4_INT_MASK BIT_MASK(2)
+
+#define MTL_VPU_HOST_SS_ICB_CLEAR_0 0x00010220u
+#define MTL_VPU_HOST_SS_ICB_CLEAR_1 0x00010224u
+#define MTL_VPU_HOST_SS_ICB_ENABLE_0 0x00010240u
+
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_ATM 0x000200f4u
+
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT 0x000200fcu
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_READ_POINTER_MASK GENMASK(7, 0)
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_WRITE_POINTER_MASK GENMASK(15, 8)
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_FILL_LEVEL_MASK GENMASK(23, 16)
+#define MTL_VPU_HOST_SS_TIM_IPC_FIFO_STAT_RSVD0_MASK GENMASK(31, 24)
+
+#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0 0x00030020u
+#define MTL_VPU_HOST_SS_AON_PWR_ISO_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0 0x00030024u
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0 0x00030028u
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0_MSS_CPU_MASK BIT_MASK(3)
+
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0 0x0003002cu
+#define MTL_VPU_HOST_SS_AON_PWR_ISLAND_STATUS0_MSS_CPU_MASK BIT_MASK(3)
+
+#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN 0x00030200u
+#define MTL_VPU_HOST_SS_AON_VPU_IDLE_GEN_EN_MASK BIT_MASK(0)
+
+#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE 0x00030204u
+#define MTL_VPU_HOST_SS_AON_DPU_ACTIVE_DPU_ACTIVE_MASK BIT_MASK(0)
+
+#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO 0x00041040u
+#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_DONE_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IOSF_RS_ID_MASK GENMASK(2, 1)
+#define MTL_VPU_HOST_SS_LOADING_ADDRESS_LO_IMAGE_LOCATION_MASK GENMASK(31, 3)
+
+#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR 0x00082020u
+#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_FINAL_PLL_FREQ_MASK GENMASK(15, 0)
+#define MTL_VPU_HOST_SS_WORKPOINT_CONFIG_MIRROR_CONFIG_ID_MASK GENMASK(31, 16)
+
+#define MTL_VPU_HOST_MMU_IDR0 0x00200000u
+#define MTL_VPU_HOST_MMU_IDR1 0x00200004u
+#define MTL_VPU_HOST_MMU_IDR3 0x0020000cu
+#define MTL_VPU_HOST_MMU_IDR5 0x00200014u
+#define MTL_VPU_HOST_MMU_CR0 0x00200020u
+#define MTL_VPU_HOST_MMU_CR0ACK 0x00200024u
+#define MTL_VPU_HOST_MMU_CR1 0x00200028u
+#define MTL_VPU_HOST_MMU_CR2 0x0020002cu
+#define MTL_VPU_HOST_MMU_IRQ_CTRL 0x00200050u
+#define MTL_VPU_HOST_MMU_IRQ_CTRLACK 0x00200054u
+
+#define MTL_VPU_HOST_MMU_GERROR 0x00200060u
+#define MTL_VPU_HOST_MMU_GERROR_CMDQ_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_MMU_GERROR_EVTQ_ABT_MASK BIT_MASK(2)
+#define MTL_VPU_HOST_MMU_GERROR_PRIQ_ABT_MASK BIT_MASK(3)
+#define MTL_VPU_HOST_MMU_GERROR_MSI_CMDQ_ABT_MASK BIT_MASK(4)
+#define MTL_VPU_HOST_MMU_GERROR_MSI_EVTQ_ABT_MASK BIT_MASK(5)
+#define MTL_VPU_HOST_MMU_GERROR_MSI_PRIQ_ABT_MASK BIT_MASK(6)
+#define MTL_VPU_HOST_MMU_GERROR_MSI_ABT_MASK BIT_MASK(7)
+
+#define MTL_VPU_HOST_MMU_GERRORN 0x00200064u
+
+#define MTL_VPU_HOST_MMU_STRTAB_BASE 0x00200080u
+#define MTL_VPU_HOST_MMU_STRTAB_BASE_CFG 0x00200088u
+#define MTL_VPU_HOST_MMU_CMDQ_BASE 0x00200090u
+#define MTL_VPU_HOST_MMU_CMDQ_PROD 0x00200098u
+#define MTL_VPU_HOST_MMU_CMDQ_CONS 0x0020009cu
+#define MTL_VPU_HOST_MMU_EVTQ_BASE 0x002000a0u
+#define MTL_VPU_HOST_MMU_EVTQ_PROD 0x002000a8u
+#define MTL_VPU_HOST_MMU_EVTQ_CONS 0x002000acu
+#define MTL_VPU_HOST_MMU_EVTQ_PROD_SEC (0x002000a8u + SZ_64K)
+#define MTL_VPU_HOST_MMU_EVTQ_CONS_SEC (0x002000acu + SZ_64K)
+
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES 0x00360000u
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_CACHE_OVERRIDE_EN_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AWCACHE_OVERRIDE_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_ARCACHE_OVERRIDE_MASK BIT_MASK(2)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_NOSNOOP_OVERRIDE_EN_MASK BIT_MASK(3)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AW_NOSNOOP_OVERRIDE_MASK BIT_MASK(4)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_AR_NOSNOOP_OVERRIDE_MASK BIT_MASK(5)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AW_CONTEXT_FLAG_MASK GENMASK(10, 6)
+#define MTL_VPU_HOST_IF_TCU_PTW_OVERRIDES_PTW_AR_CONTEXT_FLAG_MASK GENMASK(15, 11)
+
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV 0x00360004u
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_AWMMUSSIDV_MASK BIT_MASK(0)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU0_ARMMUSSIDV_MASK BIT_MASK(1)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_AWMMUSSIDV_MASK BIT_MASK(2)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU1_ARMMUSSIDV_MASK BIT_MASK(3)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_AWMMUSSIDV_MASK BIT_MASK(4)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU2_ARMMUSSIDV_MASK BIT_MASK(5)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_AWMMUSSIDV_MASK BIT_MASK(6)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU3_ARMMUSSIDV_MASK BIT_MASK(7)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_AWMMUSSIDV_MASK BIT_MASK(8)
+#define MTL_VPU_HOST_IF_TBU_MMUSSIDV_TBU4_ARMMUSSIDV_MASK BIT_MASK(9)
+
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_BASE 0x04000000u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_CTRL 0x04000000u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_PC_REG 0x04400010u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_NPC_REG 0x04400014u
+#define MTL_VPU_CPU_SS_DSU_LEON_RT_DSU_TRAP_REG 0x04400020u
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET 0x06010004u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_CLK_SET_CPU_DSU_MASK BIT_MASK(1)
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR 0x06010018u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_RST_CLR_CPU_DSU_MASK BIT_MASK(1)
+
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC 0x06010040u
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN0_MASK BIT_MASK(0)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME0_MASK BIT_MASK(1)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTRUN1_MASK BIT_MASK(2)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RESUME1_MASK BIT_MASK(3)
+#define MTL_VPU_CPU_SS_MSSCPU_CPR_LEON_RT_VEC_IRQI_RSTVEC_MASK GENMASK(31, 4)
+
+#define MTL_VPU_CPU_SS_TIM_WATCHDOG 0x0602009cu
+#define MTL_VPU_CPU_SS_TIM_WDOG_EN 0x060200a4u
+#define MTL_VPU_CPU_SS_TIM_SAFE 0x060200a8u
+#define MTL_VPU_CPU_SS_TIM_IPC_FIFO 0x060200f0u
+
+#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG 0x06021008u
+#define MTL_VPU_CPU_SS_TIM_GEN_CONFIG_WDOG_TO_INT_CLR_MASK BIT_MASK(9)
+
+#define MTL_VPU_CPU_SS_DOORBELL_0 0x06300000u
+#define MTL_VPU_CPU_SS_DOORBELL_0_SET_MASK BIT_MASK(0)
+
+#define MTL_VPU_CPU_SS_DOORBELL_1 0x06301000u
+
+#endif /* __IVPU_HW_MTL_REG_H__ */
diff --git a/drivers/accel/ivpu/ivpu_hw_reg_io.h b/drivers/accel/ivpu/ivpu_hw_reg_io.h
new file mode 100644
index 000000000000..43c2c0c2d050
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_hw_reg_io.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_HW_REG_IO_H__
+#define __IVPU_HW_REG_IO_H__
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include "ivpu_drv.h"
+
+#define REG_POLL_SLEEP_US 50
+#define REG_IO_ERROR 0xffffffff
+
+#define REGB_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regb, (reg), #reg, __func__)
+#define REGB_RD32_SILENT(reg) readl(vdev->regb + (reg))
+#define REGB_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regb, (reg), #reg, __func__)
+#define REGB_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regb, (reg), (val), #reg, __func__)
+#define REGB_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regb, (reg), (val), #reg, __func__)
+
+#define REGV_RD32(reg) ivpu_hw_reg_rd32(vdev, vdev->regv, (reg), #reg, __func__)
+#define REGV_RD32_SILENT(reg) readl(vdev->regv + (reg))
+#define REGV_RD64(reg) ivpu_hw_reg_rd64(vdev, vdev->regv, (reg), #reg, __func__)
+#define REGV_WR32(reg, val) ivpu_hw_reg_wr32(vdev, vdev->regv, (reg), (val), #reg, __func__)
+#define REGV_WR64(reg, val) ivpu_hw_reg_wr64(vdev, vdev->regv, (reg), (val), #reg, __func__)
+
+#define REGV_WR32I(reg, stride, index, val) \
+ ivpu_hw_reg_wr32_index(vdev, vdev->regv, (reg), (stride), (index), (val), #reg, __func__)
+
+#define REG_FLD(REG, FLD) \
+ (REG##_##FLD##_MASK)
+#define REG_FLD_NUM(REG, FLD, num) \
+ FIELD_PREP(REG##_##FLD##_MASK, num)
+#define REG_GET_FLD(REG, FLD, val) \
+ FIELD_GET(REG##_##FLD##_MASK, val)
+#define REG_CLR_FLD(REG, FLD, val) \
+ ((val) & ~(REG##_##FLD##_MASK))
+#define REG_SET_FLD(REG, FLD, val) \
+ ((val) | (REG##_##FLD##_MASK))
+#define REG_SET_FLD_NUM(REG, FLD, num, val) \
+ (((val) & ~(REG##_##FLD##_MASK)) | FIELD_PREP(REG##_##FLD##_MASK, num))
+#define REG_TEST_FLD(REG, FLD, val) \
+ ((REG##_##FLD##_MASK) == ((val) & (REG##_##FLD##_MASK)))
+#define REG_TEST_FLD_NUM(REG, FLD, num, val) \
+ ((num) == FIELD_GET(REG##_##FLD##_MASK, val))
+
+#define REGB_POLL(reg, var, cond, timeout_us) \
+ read_poll_timeout(REGB_RD32_SILENT, var, cond, REG_POLL_SLEEP_US, timeout_us, false, reg)
+
+#define REGV_POLL(reg, var, cond, timeout_us) \
+ read_poll_timeout(REGV_RD32_SILENT, var, cond, REG_POLL_SLEEP_US, timeout_us, false, reg)
+
+#define REGB_POLL_FLD(reg, fld, val, timeout_us) \
+({ \
+ u32 var; \
+ REGB_POLL(reg, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)), timeout_us); \
+})
+
+#define REGV_POLL_FLD(reg, fld, val, timeout_us) \
+({ \
+ u32 var; \
+ REGV_POLL(reg, var, (FIELD_GET(reg##_##fld##_MASK, var) == (val)), timeout_us); \
+})
+
+static inline u32
+ivpu_hw_reg_rd32(struct ivpu_device *vdev, void __iomem *base, u32 reg,
+ const char *name, const char *func)
+{
+ u32 val = readl(base + reg);
+
+ ivpu_dbg(vdev, REG, "%s RD: %s (0x%08x) => 0x%08x\n", func, name, reg, val);
+ return val;
+}
+
+static inline u64
+ivpu_hw_reg_rd64(struct ivpu_device *vdev, void __iomem *base, u32 reg,
+ const char *name, const char *func)
+{
+ u64 val = readq(base + reg);
+
+ ivpu_dbg(vdev, REG, "%s RD: %s (0x%08x) => 0x%016llx\n", func, name, reg, val);
+ return val;
+}
+
+static inline void
+ivpu_hw_reg_wr32(struct ivpu_device *vdev, void __iomem *base, u32 reg, u32 val,
+ const char *name, const char *func)
+{
+ ivpu_dbg(vdev, REG, "%s WR: %s (0x%08x) <= 0x%08x\n", func, name, reg, val);
+ writel(val, base + reg);
+}
+
+static inline void
+ivpu_hw_reg_wr64(struct ivpu_device *vdev, void __iomem *base, u32 reg, u64 val,
+ const char *name, const char *func)
+{
+ ivpu_dbg(vdev, REG, "%s WR: %s (0x%08x) <= 0x%016llx\n", func, name, reg, val);
+ writeq(val, base + reg);
+}
+
+static inline void
+ivpu_hw_reg_wr32_index(struct ivpu_device *vdev, void __iomem *base, u32 reg,
+ u32 stride, u32 index, u32 val, const char *name,
+ const char *func)
+{
+ reg += index * stride;
+
+ ivpu_dbg(vdev, REG, "%s WR: %s_%d (0x%08x) <= 0x%08x\n", func, name, index, reg, val);
+ writel(val, base + reg);
+}
+
+#endif /* __IVPU_HW_REG_IO_H__ */
diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c
new file mode 100644
index 000000000000..3adcfa80fc0e
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_ipc.c
@@ -0,0 +1,510 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/genalloc.h>
+#include <linux/highmem.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_gem.h"
+#include "ivpu_hw.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_ipc.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_pm.h"
+
+#define IPC_MAX_RX_MSG 128
+#define IS_KTHREAD() (get_current()->flags & PF_KTHREAD)
+
+struct ivpu_ipc_tx_buf {
+ struct ivpu_ipc_hdr ipc;
+ struct vpu_jsm_msg jsm;
+};
+
+struct ivpu_ipc_rx_msg {
+ struct list_head link;
+ struct ivpu_ipc_hdr *ipc_hdr;
+ struct vpu_jsm_msg *jsm_msg;
+};
+
+static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c,
+ struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr)
+{
+ ivpu_dbg(vdev, IPC,
+ "%s: vpu:0x%x (data_addr:0x%08x, data_size:0x%x, channel:0x%x, src_node:0x%x, dst_node:0x%x, status:0x%x)",
+ c, vpu_addr, ipc_hdr->data_addr, ipc_hdr->data_size, ipc_hdr->channel,
+ ipc_hdr->src_node, ipc_hdr->dst_node, ipc_hdr->status);
+}
+
+static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c,
+ struct vpu_jsm_msg *jsm_msg, u32 vpu_addr)
+{
+ u32 *payload = (u32 *)&jsm_msg->payload;
+
+ ivpu_dbg(vdev, JSM,
+ "%s: vpu:0x%08x (type:0x%x, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n",
+ c, vpu_addr, jsm_msg->type, jsm_msg->status, jsm_msg->request_id, jsm_msg->result,
+ payload[0], payload[1], payload[2], payload[3], payload[4]);
+}
+
+static void
+ivpu_ipc_rx_mark_free(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
+ struct vpu_jsm_msg *jsm_msg)
+{
+ ipc_hdr->status = IVPU_IPC_HDR_FREE;
+ if (jsm_msg)
+ jsm_msg->status = VPU_JSM_MSG_FREE;
+ wmb(); /* Flush WC buffers for message statuses */
+}
+
+static void ivpu_ipc_mem_fini(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ ivpu_bo_free_internal(ipc->mem_rx);
+ ivpu_bo_free_internal(ipc->mem_tx);
+}
+
+static int
+ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct vpu_jsm_msg *req)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_tx_buf *tx_buf;
+ u32 tx_buf_vpu_addr;
+ u32 jsm_vpu_addr;
+
+ tx_buf_vpu_addr = gen_pool_alloc(ipc->mm_tx, sizeof(*tx_buf));
+ if (!tx_buf_vpu_addr) {
+ ivpu_err(vdev, "Failed to reserve IPC buffer, size %ld\n",
+ sizeof(*tx_buf));
+ return -ENOMEM;
+ }
+
+ tx_buf = ivpu_to_cpu_addr(ipc->mem_tx, tx_buf_vpu_addr);
+ if (drm_WARN_ON(&vdev->drm, !tx_buf)) {
+ gen_pool_free(ipc->mm_tx, tx_buf_vpu_addr, sizeof(*tx_buf));
+ return -EIO;
+ }
+
+ jsm_vpu_addr = tx_buf_vpu_addr + offsetof(struct ivpu_ipc_tx_buf, jsm);
+
+ if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE)
+ ivpu_warn(vdev, "IPC message vpu:0x%x not released by firmware\n",
+ tx_buf_vpu_addr);
+
+ if (tx_buf->jsm.status != VPU_JSM_MSG_FREE)
+ ivpu_warn(vdev, "JSM message vpu:0x%x not released by firmware\n",
+ jsm_vpu_addr);
+
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ tx_buf->ipc.data_addr = jsm_vpu_addr;
+ /* TODO: Set data_size to actual JSM message size, not union of all messages */
+ tx_buf->ipc.data_size = sizeof(*req);
+ tx_buf->ipc.channel = cons->channel;
+ tx_buf->ipc.src_node = 0;
+ tx_buf->ipc.dst_node = 1;
+ tx_buf->ipc.status = IVPU_IPC_HDR_ALLOCATED;
+ tx_buf->jsm.type = req->type;
+ tx_buf->jsm.status = VPU_JSM_MSG_ALLOCATED;
+ tx_buf->jsm.payload = req->payload;
+
+ req->request_id = atomic_inc_return(&ipc->request_id);
+ tx_buf->jsm.request_id = req->request_id;
+ cons->request_id = req->request_id;
+ wmb(); /* Flush WC buffers for IPC, JSM msgs */
+
+ cons->tx_vpu_addr = tx_buf_vpu_addr;
+
+ ivpu_jsm_msg_dump(vdev, "TX", &tx_buf->jsm, jsm_vpu_addr);
+ ivpu_ipc_msg_dump(vdev, "TX", &tx_buf->ipc, tx_buf_vpu_addr);
+
+ return 0;
+}
+
+static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ if (vpu_addr)
+ gen_pool_free(ipc->mm_tx, vpu_addr, sizeof(struct ivpu_ipc_tx_buf));
+}
+
+static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr)
+{
+ ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr);
+}
+
+void
+ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, u32 channel)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ INIT_LIST_HEAD(&cons->link);
+ cons->channel = channel;
+ cons->tx_vpu_addr = 0;
+ cons->request_id = 0;
+ spin_lock_init(&cons->rx_msg_lock);
+ INIT_LIST_HEAD(&cons->rx_msg_list);
+ init_waitqueue_head(&cons->rx_msg_wq);
+
+ spin_lock_irq(&ipc->cons_list_lock);
+ list_add_tail(&cons->link, &ipc->cons_list);
+ spin_unlock_irq(&ipc->cons_list_lock);
+}
+
+void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_rx_msg *rx_msg, *r;
+
+ spin_lock_irq(&ipc->cons_list_lock);
+ list_del(&cons->link);
+ spin_unlock_irq(&ipc->cons_list_lock);
+
+ spin_lock_irq(&cons->rx_msg_lock);
+ list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) {
+ list_del(&rx_msg->link);
+ ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
+ atomic_dec(&ipc->rx_msg_count);
+ kfree(rx_msg);
+ }
+ spin_unlock_irq(&cons->rx_msg_lock);
+
+ ivpu_ipc_tx_release(vdev, cons->tx_vpu_addr);
+}
+
+static int
+ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ int ret;
+
+ ret = mutex_lock_interruptible(&ipc->lock);
+ if (ret)
+ return ret;
+
+ if (!ipc->on) {
+ ret = -EAGAIN;
+ goto unlock;
+ }
+
+ ret = ivpu_ipc_tx_prepare(vdev, cons, req);
+ if (ret)
+ goto unlock;
+
+ ivpu_ipc_tx(vdev, cons->tx_vpu_addr);
+
+unlock:
+ mutex_unlock(&ipc->lock);
+ return ret;
+}
+
+int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct ivpu_ipc_hdr *ipc_buf,
+ struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_rx_msg *rx_msg;
+ int wait_ret, ret = 0;
+
+ wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq,
+ (IS_KTHREAD() && kthread_should_stop()) ||
+ !list_empty(&cons->rx_msg_list),
+ msecs_to_jiffies(timeout_ms));
+
+ if (IS_KTHREAD() && kthread_should_stop())
+ return -EINTR;
+
+ if (wait_ret == 0)
+ return -ETIMEDOUT;
+
+ if (wait_ret < 0)
+ return -ERESTARTSYS;
+
+ spin_lock_irq(&cons->rx_msg_lock);
+ rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link);
+ if (!rx_msg) {
+ spin_unlock_irq(&cons->rx_msg_lock);
+ return -EAGAIN;
+ }
+ list_del(&rx_msg->link);
+ spin_unlock_irq(&cons->rx_msg_lock);
+
+ if (ipc_buf)
+ memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf));
+ if (rx_msg->jsm_msg) {
+ u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*ipc_payload));
+
+ if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) {
+ ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n", rx_msg->jsm_msg->result);
+ ret = -EBADMSG;
+ }
+
+ if (ipc_payload)
+ memcpy(ipc_payload, rx_msg->jsm_msg, size);
+ }
+
+ ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
+ atomic_dec(&ipc->rx_msg_count);
+ kfree(rx_msg);
+
+ return ret;
+}
+
+static int
+ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp_type,
+ struct vpu_jsm_msg *resp, u32 channel,
+ unsigned long timeout_ms)
+{
+ struct ivpu_ipc_consumer cons;
+ int ret;
+
+ ivpu_ipc_consumer_add(vdev, &cons, channel);
+
+ ret = ivpu_ipc_send(vdev, &cons, req);
+ if (ret) {
+ ivpu_warn(vdev, "IPC send failed: %d\n", ret);
+ goto consumer_del;
+ }
+
+ ret = ivpu_ipc_receive(vdev, &cons, NULL, resp, timeout_ms);
+ if (ret) {
+ ivpu_warn(vdev, "IPC receive failed: type 0x%x, ret %d\n", req->type, ret);
+ goto consumer_del;
+ }
+
+ if (resp->type != expected_resp_type) {
+ ivpu_warn(vdev, "Invalid JSM response type: 0x%x\n", resp->type);
+ ret = -EBADE;
+ }
+
+consumer_del:
+ ivpu_ipc_consumer_del(vdev, &cons);
+ return ret;
+}
+
+int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp_type,
+ struct vpu_jsm_msg *resp, u32 channel,
+ unsigned long timeout_ms)
+{
+ struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
+ struct vpu_jsm_msg hb_resp;
+ int ret, hb_ret;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return ret;
+
+ ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp_type, resp,
+ channel, timeout_ms);
+ if (ret != -ETIMEDOUT)
+ goto rpm_put;
+
+ hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
+ &hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
+ vdev->timeout.jsm);
+ if (hb_ret == -ETIMEDOUT) {
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_pm_schedule_recovery(vdev);
+ }
+
+rpm_put:
+ ivpu_rpm_put(vdev);
+ return ret;
+}
+
+static bool
+ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
+{
+ if (cons->channel != ipc_hdr->channel)
+ return false;
+
+ if (!jsm_msg || jsm_msg->request_id == cons->request_id)
+ return true;
+
+ return false;
+}
+
+static void
+ivpu_ipc_dispatch(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_rx_msg *rx_msg;
+ unsigned long flags;
+
+ lockdep_assert_held(&ipc->cons_list_lock);
+
+ rx_msg = kzalloc(sizeof(*rx_msg), GFP_ATOMIC);
+ if (!rx_msg) {
+ ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
+ return;
+ }
+
+ atomic_inc(&ipc->rx_msg_count);
+
+ rx_msg->ipc_hdr = ipc_hdr;
+ rx_msg->jsm_msg = jsm_msg;
+
+ spin_lock_irqsave(&cons->rx_msg_lock, flags);
+ list_add_tail(&rx_msg->link, &cons->rx_msg_list);
+ spin_unlock_irqrestore(&cons->rx_msg_lock, flags);
+
+ wake_up(&cons->rx_msg_wq);
+}
+
+int ivpu_ipc_irq_handler(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_consumer *cons;
+ struct ivpu_ipc_hdr *ipc_hdr;
+ struct vpu_jsm_msg *jsm_msg;
+ unsigned long flags;
+ bool dispatched;
+ u32 vpu_addr;
+
+ /*
+ * Driver needs to purge all messages from IPC FIFO to clear IPC interrupt.
+ * Without purge IPC FIFO to 0 next IPC interrupts won't be generated.
+ */
+ while (ivpu_hw_reg_ipc_rx_count_get(vdev)) {
+ vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev);
+ if (vpu_addr == REG_IO_ERROR) {
+ ivpu_err(vdev, "Failed to read IPC rx addr register\n");
+ return -EIO;
+ }
+
+ ipc_hdr = ivpu_to_cpu_addr(ipc->mem_rx, vpu_addr);
+ if (!ipc_hdr) {
+ ivpu_warn(vdev, "IPC msg 0x%x out of range\n", vpu_addr);
+ continue;
+ }
+ ivpu_ipc_msg_dump(vdev, "RX", ipc_hdr, vpu_addr);
+
+ jsm_msg = NULL;
+ if (ipc_hdr->channel != IVPU_IPC_CHAN_BOOT_MSG) {
+ jsm_msg = ivpu_to_cpu_addr(ipc->mem_rx, ipc_hdr->data_addr);
+ if (!jsm_msg) {
+ ivpu_warn(vdev, "JSM msg 0x%x out of range\n", ipc_hdr->data_addr);
+ ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL);
+ continue;
+ }
+ ivpu_jsm_msg_dump(vdev, "RX", jsm_msg, ipc_hdr->data_addr);
+ }
+
+ if (atomic_read(&ipc->rx_msg_count) > IPC_MAX_RX_MSG) {
+ ivpu_warn(vdev, "IPC RX msg dropped, msg count %d\n", IPC_MAX_RX_MSG);
+ ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
+ continue;
+ }
+
+ dispatched = false;
+ spin_lock_irqsave(&ipc->cons_list_lock, flags);
+ list_for_each_entry(cons, &ipc->cons_list, link) {
+ if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) {
+ ivpu_ipc_dispatch(vdev, cons, ipc_hdr, jsm_msg);
+ dispatched = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
+
+ if (!dispatched) {
+ ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n", vpu_addr);
+ ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg);
+ }
+ }
+
+ return 0;
+}
+
+int ivpu_ipc_init(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ int ret = -ENOMEM;
+
+ ipc->mem_tx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
+ if (!ipc->mem_tx)
+ return ret;
+
+ ipc->mem_rx = ivpu_bo_alloc_internal(vdev, 0, SZ_16K, DRM_IVPU_BO_WC);
+ if (!ipc->mem_rx)
+ goto err_free_tx;
+
+ ipc->mm_tx = devm_gen_pool_create(vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT),
+ -1, "TX_IPC_JSM");
+ if (IS_ERR(ipc->mm_tx)) {
+ ret = PTR_ERR(ipc->mm_tx);
+ ivpu_err(vdev, "Failed to create gen pool, %pe\n", ipc->mm_tx);
+ goto err_free_rx;
+ }
+
+ ret = gen_pool_add(ipc->mm_tx, ipc->mem_tx->vpu_addr, ipc->mem_tx->base.size, -1);
+ if (ret) {
+ ivpu_err(vdev, "gen_pool_add failed, ret %d\n", ret);
+ goto err_free_rx;
+ }
+
+ INIT_LIST_HEAD(&ipc->cons_list);
+ spin_lock_init(&ipc->cons_list_lock);
+ drmm_mutex_init(&vdev->drm, &ipc->lock);
+
+ ivpu_ipc_reset(vdev);
+ return 0;
+
+err_free_rx:
+ ivpu_bo_free_internal(ipc->mem_rx);
+err_free_tx:
+ ivpu_bo_free_internal(ipc->mem_tx);
+ return ret;
+}
+
+void ivpu_ipc_fini(struct ivpu_device *vdev)
+{
+ ivpu_ipc_mem_fini(vdev);
+}
+
+void ivpu_ipc_enable(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ mutex_lock(&ipc->lock);
+ ipc->on = true;
+ mutex_unlock(&ipc->lock);
+}
+
+void ivpu_ipc_disable(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+ struct ivpu_ipc_consumer *cons, *c;
+ unsigned long flags;
+
+ mutex_lock(&ipc->lock);
+ ipc->on = false;
+ mutex_unlock(&ipc->lock);
+
+ spin_lock_irqsave(&ipc->cons_list_lock, flags);
+ list_for_each_entry_safe(cons, c, &ipc->cons_list, link)
+ wake_up(&cons->rx_msg_wq);
+ spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
+}
+
+void ivpu_ipc_reset(struct ivpu_device *vdev)
+{
+ struct ivpu_ipc_info *ipc = vdev->ipc;
+
+ mutex_lock(&ipc->lock);
+
+ memset(ipc->mem_tx->kvaddr, 0, ipc->mem_tx->base.size);
+ memset(ipc->mem_rx->kvaddr, 0, ipc->mem_rx->base.size);
+ wmb(); /* Flush WC buffers for TX and RX rings */
+
+ mutex_unlock(&ipc->lock);
+}
diff --git a/drivers/accel/ivpu/ivpu_ipc.h b/drivers/accel/ivpu/ivpu_ipc.h
new file mode 100644
index 000000000000..9838202ecfad
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_ipc.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_IPC_H__
+#define __IVPU_IPC_H__
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include "vpu_jsm_api.h"
+
+struct ivpu_bo;
+
+/* VPU FW boot notification */
+#define IVPU_IPC_CHAN_BOOT_MSG 0x3ff
+#define IVPU_IPC_BOOT_MSG_DATA_ADDR 0x424f4f54
+
+/* The alignment to be used for IPC Buffers and IPC Data. */
+#define IVPU_IPC_ALIGNMENT 64
+
+#define IVPU_IPC_HDR_FREE 0
+#define IVPU_IPC_HDR_ALLOCATED 0
+
+/**
+ * struct ivpu_ipc_hdr - The IPC message header structure, exchanged
+ * with the VPU device firmware.
+ * @data_addr: The VPU address of the payload (JSM message)
+ * @data_size: The size of the payload.
+ * @channel: The channel used.
+ * @src_node: The Node ID of the sender.
+ * @dst_node: The Node ID of the intended receiver.
+ * @status: IPC buffer usage status
+ */
+struct ivpu_ipc_hdr {
+ u32 data_addr;
+ u32 data_size;
+ u16 channel;
+ u8 src_node;
+ u8 dst_node;
+ u8 status;
+} __packed __aligned(IVPU_IPC_ALIGNMENT);
+
+struct ivpu_ipc_consumer {
+ struct list_head link;
+ u32 channel;
+ u32 tx_vpu_addr;
+ u32 request_id;
+
+ spinlock_t rx_msg_lock; /* Protects rx_msg_list */
+ struct list_head rx_msg_list;
+ wait_queue_head_t rx_msg_wq;
+};
+
+struct ivpu_ipc_info {
+ struct gen_pool *mm_tx;
+ struct ivpu_bo *mem_tx;
+ struct ivpu_bo *mem_rx;
+
+ atomic_t rx_msg_count;
+
+ spinlock_t cons_list_lock; /* Protects cons_list */
+ struct list_head cons_list;
+
+ atomic_t request_id;
+ struct mutex lock; /* Lock on status */
+ bool on;
+};
+
+int ivpu_ipc_init(struct ivpu_device *vdev);
+void ivpu_ipc_fini(struct ivpu_device *vdev);
+
+void ivpu_ipc_enable(struct ivpu_device *vdev);
+void ivpu_ipc_disable(struct ivpu_device *vdev);
+void ivpu_ipc_reset(struct ivpu_device *vdev);
+
+int ivpu_ipc_irq_handler(struct ivpu_device *vdev);
+
+void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ u32 channel);
+void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons);
+
+int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
+ struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *ipc_payload,
+ unsigned long timeout_ms);
+
+int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
+ enum vpu_ipc_msg_type expected_resp_type,
+ struct vpu_jsm_msg *resp, u32 channel,
+ unsigned long timeout_ms);
+
+#endif /* __IVPU_IPC_H__ */
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
new file mode 100644
index 000000000000..94068aedf97c
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <drm/drm_file.h>
+
+#include <linux/bitfield.h>
+#include <linux/highmem.h>
+#include <linux/kthread.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <uapi/drm/ivpu_accel.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_hw.h"
+#include "ivpu_ipc.h"
+#include "ivpu_job.h"
+#include "ivpu_jsm_msg.h"
+#include "ivpu_pm.h"
+
+#define CMD_BUF_IDX 0
+#define JOB_ID_JOB_MASK GENMASK(7, 0)
+#define JOB_ID_CONTEXT_MASK GENMASK(31, 8)
+#define JOB_MAX_BUFFER_COUNT 65535
+
+static unsigned int ivpu_tdr_timeout_ms;
+module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, uint, 0644);
+MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
+
+static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq)
+{
+ ivpu_hw_reg_db_set(vdev, cmdq->db_id);
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct vpu_job_queue_header *jobq_header;
+ struct ivpu_cmdq *cmdq;
+
+ cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL);
+ if (!cmdq)
+ return NULL;
+
+ cmdq->mem = ivpu_bo_alloc_internal(vdev, 0, SZ_4K, DRM_IVPU_BO_WC);
+ if (!cmdq->mem)
+ goto cmdq_free;
+
+ cmdq->db_id = file_priv->ctx.id + engine * ivpu_get_context_count(vdev);
+ cmdq->entry_count = (u32)((cmdq->mem->base.size - sizeof(struct vpu_job_queue_header)) /
+ sizeof(struct vpu_job_queue_entry));
+
+ cmdq->jobq = (struct vpu_job_queue *)cmdq->mem->kvaddr;
+ jobq_header = &cmdq->jobq->header;
+ jobq_header->engine_idx = engine;
+ jobq_header->head = 0;
+ jobq_header->tail = 0;
+ wmb(); /* Flush WC buffer for jobq->header */
+
+ return cmdq;
+
+cmdq_free:
+ kfree(cmdq);
+ return NULL;
+}
+
+static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
+{
+ if (!cmdq)
+ return;
+
+ ivpu_bo_free_internal(cmdq->mem);
+ kfree(cmdq);
+}
+
+static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
+ int ret;
+
+ lockdep_assert_held(&file_priv->lock);
+
+ if (!cmdq) {
+ cmdq = ivpu_cmdq_alloc(file_priv, engine);
+ if (!cmdq)
+ return NULL;
+ file_priv->cmdq[engine] = cmdq;
+ }
+
+ if (cmdq->db_registered)
+ return cmdq;
+
+ ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id,
+ cmdq->mem->vpu_addr, cmdq->mem->base.size);
+ if (ret)
+ return NULL;
+
+ cmdq->db_registered = true;
+
+ return cmdq;
+}
+
+static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine)
+{
+ struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
+
+ lockdep_assert_held(&file_priv->lock);
+
+ if (cmdq) {
+ file_priv->cmdq[engine] = NULL;
+ if (cmdq->db_registered)
+ ivpu_jsm_unregister_db(file_priv->vdev, cmdq->db_id);
+
+ ivpu_cmdq_free(file_priv, cmdq);
+ }
+}
+
+void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv)
+{
+ int i;
+
+ mutex_lock(&file_priv->lock);
+
+ for (i = 0; i < IVPU_NUM_ENGINES; i++)
+ ivpu_cmdq_release_locked(file_priv, i);
+
+ mutex_unlock(&file_priv->lock);
+}
+
+/*
+ * Mark the doorbell as unregistered and reset job queue pointers.
+ * This function needs to be called when the VPU hardware is restarted
+ * and FW looses job queue state. The next time job queue is used it
+ * will be registered again.
+ */
+static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
+{
+ struct ivpu_cmdq *cmdq = file_priv->cmdq[engine];
+
+ lockdep_assert_held(&file_priv->lock);
+
+ if (cmdq) {
+ cmdq->db_registered = false;
+ cmdq->jobq->header.head = 0;
+ cmdq->jobq->header.tail = 0;
+ wmb(); /* Flush WC buffer for jobq header */
+ }
+}
+
+static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv)
+{
+ int i;
+
+ mutex_lock(&file_priv->lock);
+
+ for (i = 0; i < IVPU_NUM_ENGINES; i++)
+ ivpu_cmdq_reset_locked(file_priv, i);
+
+ mutex_unlock(&file_priv->lock);
+}
+
+void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
+{
+ struct ivpu_file_priv *file_priv;
+ unsigned long ctx_id;
+
+ xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
+ file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
+ if (!file_priv)
+ continue;
+
+ ivpu_cmdq_reset_all(file_priv);
+
+ ivpu_file_priv_put(&file_priv);
+ }
+}
+
+static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
+{
+ struct ivpu_device *vdev = job->vdev;
+ struct vpu_job_queue_header *header = &cmdq->jobq->header;
+ struct vpu_job_queue_entry *entry;
+ u32 tail = READ_ONCE(header->tail);
+ u32 next_entry = (tail + 1) % cmdq->entry_count;
+
+ /* Check if there is space left in job queue */
+ if (next_entry == header->head) {
+ ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n",
+ job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail);
+ return -EBUSY;
+ }
+
+ entry = &cmdq->jobq->job[tail];
+ entry->batch_buf_addr = job->cmd_buf_vpu_addr;
+ entry->job_id = job->job_id;
+ entry->flags = 0;
+ wmb(); /* Ensure that tail is updated after filling entry */
+ header->tail = next_entry;
+ wmb(); /* Flush WC buffer for jobq header */
+
+ return 0;
+}
+
+struct ivpu_fence {
+ struct dma_fence base;
+ spinlock_t lock; /* protects base */
+ struct ivpu_device *vdev;
+};
+
+static inline struct ivpu_fence *to_vpu_fence(struct dma_fence *fence)
+{
+ return container_of(fence, struct ivpu_fence, base);
+}
+
+static const char *ivpu_fence_get_driver_name(struct dma_fence *fence)
+{
+ return DRIVER_NAME;
+}
+
+static const char *ivpu_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct ivpu_fence *ivpu_fence = to_vpu_fence(fence);
+
+ return dev_name(ivpu_fence->vdev->drm.dev);
+}
+
+static const struct dma_fence_ops ivpu_fence_ops = {
+ .get_driver_name = ivpu_fence_get_driver_name,
+ .get_timeline_name = ivpu_fence_get_timeline_name,
+};
+
+static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
+{
+ struct ivpu_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ fence->vdev = vdev;
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base, &ivpu_fence_ops, &fence->lock, dma_fence_context_alloc(1), 1);
+
+ return &fence->base;
+}
+
+static void job_get(struct ivpu_job *job, struct ivpu_job **link)
+{
+ struct ivpu_device *vdev = job->vdev;
+
+ kref_get(&job->ref);
+ *link = job;
+
+ ivpu_dbg(vdev, KREF, "Job get: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
+}
+
+static void job_release(struct kref *ref)
+{
+ struct ivpu_job *job = container_of(ref, struct ivpu_job, ref);
+ struct ivpu_device *vdev = job->vdev;
+ u32 i;
+
+ for (i = 0; i < job->bo_count; i++)
+ if (job->bos[i])
+ drm_gem_object_put(&job->bos[i]->base);
+
+ dma_fence_put(job->done_fence);
+ ivpu_file_priv_put(&job->file_priv);
+
+ ivpu_dbg(vdev, KREF, "Job released: id %u\n", job->job_id);
+ kfree(job);
+
+ /* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */
+ ivpu_rpm_put(vdev);
+}
+
+static void job_put(struct ivpu_job *job)
+{
+ struct ivpu_device *vdev = job->vdev;
+
+ ivpu_dbg(vdev, KREF, "Job put: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
+ kref_put(&job->ref, job_release);
+}
+
+static struct ivpu_job *
+ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
+{
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ivpu_job *job;
+ size_t buf_size;
+ int ret;
+
+ ret = ivpu_rpm_get(vdev);
+ if (ret < 0)
+ return NULL;
+
+ buf_size = sizeof(*job) + bo_count * sizeof(struct ivpu_bo *);
+ job = kzalloc(buf_size, GFP_KERNEL);
+ if (!job)
+ goto err_rpm_put;
+
+ kref_init(&job->ref);
+
+ job->vdev = vdev;
+ job->engine_idx = engine_idx;
+ job->bo_count = bo_count;
+ job->done_fence = ivpu_fence_create(vdev);
+ if (!job->done_fence) {
+ ivpu_warn_ratelimited(vdev, "Failed to create a fence\n");
+ goto err_free_job;
+ }
+
+ job->file_priv = ivpu_file_priv_get(file_priv);
+
+ ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
+
+ return job;
+
+err_free_job:
+ kfree(job);
+err_rpm_put:
+ ivpu_rpm_put(vdev);
+ return NULL;
+}
+
+static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+{
+ struct ivpu_job *job;
+
+ job = xa_erase(&vdev->submitted_jobs_xa, job_id);
+ if (!job)
+ return -ENOENT;
+
+ if (job->file_priv->has_mmu_faults)
+ job_status = VPU_JSM_STATUS_ABORTED;
+
+ job->bos[CMD_BUF_IDX]->job_status = job_status;
+ dma_fence_signal(job->done_fence);
+
+ ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n",
+ job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
+
+ job_put(job);
+ return 0;
+}
+
+static void ivpu_job_done_message(struct ivpu_device *vdev, void *msg)
+{
+ struct vpu_ipc_msg_payload_job_done *payload;
+ struct vpu_jsm_msg *job_ret_msg = msg;
+ int ret;
+
+ payload = (struct vpu_ipc_msg_payload_job_done *)&job_ret_msg->payload;
+
+ ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
+ if (ret)
+ ivpu_err(vdev, "Failed to finish job %d: %d\n", payload->job_id, ret);
+}
+
+void ivpu_jobs_abort_all(struct ivpu_device *vdev)
+{
+ struct ivpu_job *job;
+ unsigned long id;
+
+ xa_for_each(&vdev->submitted_jobs_xa, id, job)
+ ivpu_job_done(vdev, id, VPU_JSM_STATUS_ABORTED);
+}
+
+static int ivpu_direct_job_submission(struct ivpu_job *job)
+{
+ struct ivpu_file_priv *file_priv = job->file_priv;
+ struct ivpu_device *vdev = job->vdev;
+ struct xa_limit job_id_range;
+ struct ivpu_cmdq *cmdq;
+ int ret;
+
+ mutex_lock(&file_priv->lock);
+
+ cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
+ if (!cmdq) {
+ ivpu_warn(vdev, "Failed get job queue, ctx %d engine %d\n",
+ file_priv->ctx.id, job->engine_idx);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
+ job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
+
+ job_get(job, &job);
+ ret = xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
+ if (ret) {
+ ivpu_warn_ratelimited(vdev, "Failed to allocate job id: %d\n", ret);
+ goto err_job_put;
+ }
+
+ ret = ivpu_cmdq_push_job(cmdq, job);
+ if (ret)
+ goto err_xa_erase;
+
+ ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n",
+ job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
+ job->engine_idx, cmdq->jobq->header.tail);
+
+ if (ivpu_test_mode == IVPU_TEST_MODE_NULL_HW) {
+ ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+ cmdq->jobq->header.head = cmdq->jobq->header.tail;
+ wmb(); /* Flush WC buffer for jobq header */
+ } else {
+ ivpu_cmdq_ring_db(vdev, cmdq);
+ }
+
+ mutex_unlock(&file_priv->lock);
+ return 0;
+
+err_xa_erase:
+ xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_job_put:
+ job_put(job);
+err_unlock:
+ mutex_unlock(&file_priv->lock);
+ return ret;
+}
+
+static int
+ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles,
+ u32 buf_count, u32 commands_offset)
+{
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct ww_acquire_ctx acquire_ctx;
+ struct ivpu_bo *bo;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < buf_count; i++) {
+ struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]);
+
+ if (!obj)
+ return -ENOENT;
+
+ job->bos[i] = to_ivpu_bo(obj);
+
+ ret = ivpu_bo_pin(job->bos[i]);
+ if (ret)
+ return ret;
+ }
+
+ bo = job->bos[CMD_BUF_IDX];
+ if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_READ)) {
+ ivpu_warn(vdev, "Buffer is already in use\n");
+ return -EBUSY;
+ }
+
+ if (commands_offset >= bo->base.size) {
+ ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset);
+ return -EINVAL;
+ }
+
+ job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
+
+ ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
+ &acquire_ctx);
+ if (ret) {
+ ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < buf_count; i++) {
+ ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
+ if (ret) {
+ ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
+ goto unlock_reservations;
+ }
+ }
+
+ for (i = 0; i < buf_count; i++)
+ dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
+
+unlock_reservations:
+ drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
+
+ wmb(); /* Flush write combining buffers */
+
+ return ret;
+}
+
+int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ int ret = 0;
+ struct ivpu_file_priv *file_priv = file->driver_priv;
+ struct ivpu_device *vdev = file_priv->vdev;
+ struct drm_ivpu_submit *params = data;
+ struct ivpu_job *job;
+ u32 *buf_handles;
+
+ if (params->engine > DRM_IVPU_ENGINE_COPY)
+ return -EINVAL;
+
+ if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(params->commands_offset, 8))
+ return -EINVAL;
+
+ if (!file_priv->ctx.id)
+ return -EINVAL;
+
+ if (file_priv->has_mmu_faults)
+ return -EBADFD;
+
+ buf_handles = kcalloc(params->buffer_count, sizeof(u32), GFP_KERNEL);
+ if (!buf_handles)
+ return -ENOMEM;
+
+ ret = copy_from_user(buf_handles,
+ (void __user *)params->buffers_ptr,
+ params->buffer_count * sizeof(u32));
+ if (ret) {
+ ret = -EFAULT;
+ goto free_handles;
+ }
+
+ ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
+ file_priv->ctx.id, params->buffer_count);
+
+ job = ivpu_create_job(file_priv, params->engine, params->buffer_count);
+ if (!job) {
+ ivpu_err(vdev, "Failed to create job\n");
+ ret = -ENOMEM;
+ goto free_handles;
+ }
+
+ ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
+ params->commands_offset);
+ if (ret) {
+ ivpu_err(vdev, "Failed to prepare job, ret %d\n", ret);
+ goto job_put;
+ }
+
+ ret = ivpu_direct_job_submission(job);
+ if (ret) {
+ dma_fence_signal(job->done_fence);
+ ivpu_err(vdev, "Failed to submit job to the HW, ret %d\n", ret);
+ }
+
+job_put:
+ job_put(job);
+free_handles:
+ kfree(buf_handles);
+
+ return ret;
+}
+
+static int ivpu_job_done_thread(void *arg)
+{
+ struct ivpu_device *vdev = (struct ivpu_device *)arg;
+ struct ivpu_ipc_consumer cons;
+ struct vpu_jsm_msg jsm_msg;
+ bool jobs_submitted;
+ unsigned int timeout;
+ int ret;
+
+ ivpu_dbg(vdev, JOB, "Started %s\n", __func__);
+
+ ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET);
+
+ while (!kthread_should_stop()) {
+ timeout = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
+ jobs_submitted = !xa_empty(&vdev->submitted_jobs_xa);
+ ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout);
+ if (!ret) {
+ ivpu_job_done_message(vdev, &jsm_msg);
+ } else if (ret == -ETIMEDOUT) {
+ if (jobs_submitted && !xa_empty(&vdev->submitted_jobs_xa)) {
+ ivpu_err(vdev, "TDR detected, timeout %d ms", timeout);
+ ivpu_hw_diagnose_failure(vdev);
+ ivpu_pm_schedule_recovery(vdev);
+ }
+ }
+ }
+
+ ivpu_ipc_consumer_del(vdev, &cons);
+
+ ivpu_jobs_abort_all(vdev);
+
+ ivpu_dbg(vdev, JOB, "Stopped %s\n", __func__);
+ return 0;
+}
+
+int ivpu_job_done_thread_init(struct ivpu_device *vdev)
+{
+ struct task_struct *thread;
+
+ thread = kthread_run(&ivpu_job_done_thread, (void *)vdev, "ivpu_job_done_thread");
+ if (IS_ERR(thread)) {
+ ivpu_err(vdev, "Failed to start job completion thread\n");
+ return -EIO;
+ }
+
+ get_task_struct(thread);
+ wake_up_process(thread);
+
+ vdev->job_done_thread = thread;
+
+ return 0;
+}
+
+void ivpu_job_done_thread_fini(struct ivpu_device *vdev)
+{
+ kthread_stop(vdev->job_done_thread);
+ put_task_struct(vdev->job_done_thread);
+}
diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h
new file mode 100644
index 000000000000..aa1f0b9479b0
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_job.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_JOB_H__
+#define __IVPU_JOB_H__
+
+#include <linux/kref.h>
+#include <linux/idr.h>
+
+#include "ivpu_gem.h"
+
+struct ivpu_device;
+struct ivpu_file_priv;
+
+/**
+ * struct ivpu_cmdq - Object representing device queue used to send jobs.
+ * @jobq: Pointer to job queue memory shared with the device
+ * @mem: Memory allocated for the job queue, shared with device
+ * @entry_count Number of job entries in the queue
+ * @db_id: Doorbell assigned to this job queue
+ * @db_registered: True if doorbell is registered in device
+ */
+struct ivpu_cmdq {
+ struct vpu_job_queue *jobq;
+ struct ivpu_bo *mem;
+ u32 entry_count;
+ u32 db_id;
+ bool db_registered;
+};
+
+/**
+ * struct ivpu_job - KMD object that represents batchbuffer / DMA buffer.
+ * Each batch / DMA buffer is a job to be submitted and executed by the VPU FW.
+ * This is a unit of execution, and be tracked by the job_id for
+ * any status reporting from VPU FW through IPC JOB RET/DONE message.
+ * @file_priv: The client that submitted this job
+ * @job_id: Job ID for KMD tracking and job status reporting from VPU FW
+ * @status: Status of the Job from IPC JOB RET/DONE message
+ * @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job
+ * @submit_status_offset: Offset within batch buffer where job completion handler
+ will update the job status
+ */
+struct ivpu_job {
+ struct kref ref;
+ struct ivpu_device *vdev;
+ struct ivpu_file_priv *file_priv;
+ struct dma_fence *done_fence;
+ u64 cmd_buf_vpu_addr;
+ u32 job_id;
+ u32 engine_idx;
+ size_t bo_count;
+ struct ivpu_bo *bos[];
+};
+
+int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
+
+void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv);
+void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
+
+int ivpu_job_done_thread_init(struct ivpu_device *vdev);
+void ivpu_job_done_thread_fini(struct ivpu_device *vdev);
+
+void ivpu_jobs_abort_all(struct ivpu_device *vdev);
+
+#endif /* __IVPU_JOB_H__ */
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.c b/drivers/accel/ivpu/ivpu_jsm_msg.c
new file mode 100644
index 000000000000..831bfd2b2d39
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include "ivpu_drv.h"
+#include "ivpu_ipc.h"
+#include "ivpu_jsm_msg.h"
+
+int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
+ u64 jobq_base, u32 jobq_size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB };
+ struct vpu_jsm_msg resp;
+ int ret = 0;
+
+ req.payload.register_db.db_idx = db_id;
+ req.payload.register_db.jobq_base = jobq_base;
+ req.payload.register_db.jobq_size = jobq_size;
+ req.payload.register_db.host_ssid = ctx_id;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_err(vdev, "Failed to register doorbell %d: %d\n", db_id, ret);
+ return ret;
+ }
+
+ ivpu_dbg(vdev, JSM, "Doorbell %d registered to context %d\n", db_id, ctx_id);
+
+ return 0;
+}
+
+int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB };
+ struct vpu_jsm_msg resp;
+ int ret = 0;
+
+ req.payload.unregister_db.db_idx = db_id;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_warn(vdev, "Failed to unregister doorbell %d: %d\n", db_id, ret);
+ return ret;
+ }
+
+ ivpu_dbg(vdev, JSM, "Doorbell %d unregistered\n", db_id);
+
+ return 0;
+}
+
+int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (engine > VPU_ENGINE_COPY)
+ return -EINVAL;
+
+ req.payload.query_engine_hb.engine_idx = engine;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_err(vdev, "Failed to get heartbeat from engine %d: %d\n", engine, ret);
+ return ret;
+ }
+
+ *heartbeat = resp.payload.query_engine_hb_done.heartbeat;
+ return ret;
+}
+
+int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_RESET };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (engine > VPU_ENGINE_COPY)
+ return -EINVAL;
+
+ req.payload.engine_reset.engine_idx = engine;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_RESET_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_err(vdev, "Failed to reset engine %d: %d\n", engine, ret);
+
+ return ret;
+}
+
+int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_ENGINE_PREEMPT };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (engine > VPU_ENGINE_COPY)
+ return -EINVAL;
+
+ req.payload.engine_preempt.engine_idx = engine;
+ req.payload.engine_preempt.preempt_id = preempt_id;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_ENGINE_PREEMPT_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_err(vdev, "Failed to preempt engine %d: %d\n", engine, ret);
+
+ return ret;
+}
+
+int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_DYNDBG_CONTROL };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ if (!strncpy(req.payload.dyndbg_control.dyndbg_cmd, command, VPU_DYNDBG_CMD_MAX_LEN - 1))
+ return -ENOMEM;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_DYNDBG_CONTROL_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn(vdev, "Failed to send command \"%s\": ret %d\n", command, ret);
+
+ return ret;
+}
+
+int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
+ u64 *trace_hw_component_mask)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_GET_CAPABILITY };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret) {
+ ivpu_warn(vdev, "Failed to get trace capability: %d\n", ret);
+ return ret;
+ }
+
+ *trace_destination_mask = resp.payload.trace_capability.trace_destination_mask;
+ *trace_hw_component_mask = resp.payload.trace_capability.trace_hw_component_mask;
+
+ return ret;
+}
+
+int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
+ u64 trace_hw_component_mask)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_TRACE_SET_CONFIG };
+ struct vpu_jsm_msg resp;
+ int ret;
+
+ req.payload.trace_config.trace_level = trace_level;
+ req.payload.trace_config.trace_destination_mask = trace_destination_mask;
+ req.payload.trace_config.trace_hw_component_mask = trace_hw_component_mask;
+
+ ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+ if (ret)
+ ivpu_warn(vdev, "Failed to set config: %d\n", ret);
+
+ return ret;
+}
+
+int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid)
+{
+ struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_SSID_RELEASE };
+ struct vpu_jsm_msg resp;
+
+ req.payload.ssid_release.host_ssid = host_ssid;
+
+ return ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_SSID_RELEASE_DONE, &resp,
+ VPU_IPC_CHAN_ASYNC_CMD, vdev->timeout.jsm);
+}
diff --git a/drivers/accel/ivpu/ivpu_jsm_msg.h b/drivers/accel/ivpu/ivpu_jsm_msg.h
new file mode 100644
index 000000000000..ab50d7b017c1
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_jsm_msg.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_JSM_MSG_H__
+#define __IVPU_JSM_MSG_H__
+
+#include "vpu_jsm_api.h"
+
+int ivpu_jsm_register_db(struct ivpu_device *vdev, u32 ctx_id, u32 db_id,
+ u64 jobq_base, u32 jobq_size);
+int ivpu_jsm_unregister_db(struct ivpu_device *vdev, u32 db_id);
+int ivpu_jsm_get_heartbeat(struct ivpu_device *vdev, u32 engine, u64 *heartbeat);
+int ivpu_jsm_reset_engine(struct ivpu_device *vdev, u32 engine);
+int ivpu_jsm_preempt_engine(struct ivpu_device *vdev, u32 engine, u32 preempt_id);
+int ivpu_jsm_dyndbg_control(struct ivpu_device *vdev, char *command, size_t size);
+int ivpu_jsm_trace_get_capability(struct ivpu_device *vdev, u32 *trace_destination_mask,
+ u64 *trace_hw_component_mask);
+int ivpu_jsm_trace_set_config(struct ivpu_device *vdev, u32 trace_level, u32 trace_destination_mask,
+ u64 trace_hw_component_mask);
+int ivpu_jsm_context_release(struct ivpu_device *vdev, u32 host_ssid);
+#endif
diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c
new file mode 100644
index 000000000000..694e978aba66
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_mmu.c
@@ -0,0 +1,883 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/highmem.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_hw_mtl_reg.h"
+#include "ivpu_hw_reg_io.h"
+#include "ivpu_mmu.h"
+#include "ivpu_mmu_context.h"
+#include "ivpu_pm.h"
+
+#define IVPU_MMU_IDR0_REF 0x080f3e0f
+#define IVPU_MMU_IDR0_REF_SIMICS 0x080f3e1f
+#define IVPU_MMU_IDR1_REF 0x0e739d18
+#define IVPU_MMU_IDR3_REF 0x0000003c
+#define IVPU_MMU_IDR5_REF 0x00040070
+#define IVPU_MMU_IDR5_REF_SIMICS 0x00000075
+#define IVPU_MMU_IDR5_REF_FPGA 0x00800075
+
+#define IVPU_MMU_CDTAB_ENT_SIZE 64
+#define IVPU_MMU_CDTAB_ENT_COUNT_LOG2 8 /* 256 entries */
+#define IVPU_MMU_CDTAB_ENT_COUNT ((u32)1 << IVPU_MMU_CDTAB_ENT_COUNT_LOG2)
+
+#define IVPU_MMU_STREAM_ID0 0
+#define IVPU_MMU_STREAM_ID3 3
+
+#define IVPU_MMU_STRTAB_ENT_SIZE 64
+#define IVPU_MMU_STRTAB_ENT_COUNT 4
+#define IVPU_MMU_STRTAB_CFG_LOG2SIZE 2
+#define IVPU_MMU_STRTAB_CFG IVPU_MMU_STRTAB_CFG_LOG2SIZE
+
+#define IVPU_MMU_Q_COUNT_LOG2 4 /* 16 entries */
+#define IVPU_MMU_Q_COUNT ((u32)1 << IVPU_MMU_Q_COUNT_LOG2)
+#define IVPU_MMU_Q_WRAP_BIT (IVPU_MMU_Q_COUNT << 1)
+#define IVPU_MMU_Q_WRAP_MASK (IVPU_MMU_Q_WRAP_BIT - 1)
+#define IVPU_MMU_Q_IDX_MASK (IVPU_MMU_Q_COUNT - 1)
+#define IVPU_MMU_Q_IDX(val) ((val) & IVPU_MMU_Q_IDX_MASK)
+
+#define IVPU_MMU_CMDQ_CMD_SIZE 16
+#define IVPU_MMU_CMDQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_CMDQ_CMD_SIZE)
+
+#define IVPU_MMU_EVTQ_CMD_SIZE 32
+#define IVPU_MMU_EVTQ_SIZE (IVPU_MMU_Q_COUNT * IVPU_MMU_EVTQ_CMD_SIZE)
+
+#define IVPU_MMU_CMD_OPCODE GENMASK(7, 0)
+
+#define IVPU_MMU_CMD_SYNC_0_CS GENMASK(13, 12)
+#define IVPU_MMU_CMD_SYNC_0_MSH GENMASK(23, 22)
+#define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24)
+#define IVPU_MMU_CMD_SYNC_0_MSI_ATTR GENMASK(27, 24)
+#define IVPU_MMU_CMD_SYNC_0_MSI_DATA GENMASK(63, 32)
+
+#define IVPU_MMU_CMD_CFGI_0_SSEC BIT(10)
+#define IVPU_MMU_CMD_CFGI_0_SSV BIT(11)
+#define IVPU_MMU_CMD_CFGI_0_SSID GENMASK(31, 12)
+#define IVPU_MMU_CMD_CFGI_0_SID GENMASK(63, 32)
+#define IVPU_MMU_CMD_CFGI_1_RANGE GENMASK(4, 0)
+
+#define IVPU_MMU_CMD_TLBI_0_ASID GENMASK(63, 48)
+#define IVPU_MMU_CMD_TLBI_0_VMID GENMASK(47, 32)
+
+#define CMD_PREFETCH_CFG 0x1
+#define CMD_CFGI_STE 0x3
+#define CMD_CFGI_ALL 0x4
+#define CMD_CFGI_CD 0x5
+#define CMD_CFGI_CD_ALL 0x6
+#define CMD_TLBI_NH_ASID 0x11
+#define CMD_TLBI_EL2_ALL 0x20
+#define CMD_TLBI_NSNH_ALL 0x30
+#define CMD_SYNC 0x46
+
+#define IVPU_MMU_EVT_F_UUT 0x01
+#define IVPU_MMU_EVT_C_BAD_STREAMID 0x02
+#define IVPU_MMU_EVT_F_STE_FETCH 0x03
+#define IVPU_MMU_EVT_C_BAD_STE 0x04
+#define IVPU_MMU_EVT_F_BAD_ATS_TREQ 0x05
+#define IVPU_MMU_EVT_F_STREAM_DISABLED 0x06
+#define IVPU_MMU_EVT_F_TRANSL_FORBIDDEN 0x07
+#define IVPU_MMU_EVT_C_BAD_SUBSTREAMID 0x08
+#define IVPU_MMU_EVT_F_CD_FETCH 0x09
+#define IVPU_MMU_EVT_C_BAD_CD 0x0a
+#define IVPU_MMU_EVT_F_WALK_EABT 0x0b
+#define IVPU_MMU_EVT_F_TRANSLATION 0x10
+#define IVPU_MMU_EVT_F_ADDR_SIZE 0x11
+#define IVPU_MMU_EVT_F_ACCESS 0x12
+#define IVPU_MMU_EVT_F_PERMISSION 0x13
+#define IVPU_MMU_EVT_F_TLB_CONFLICT 0x20
+#define IVPU_MMU_EVT_F_CFG_CONFLICT 0x21
+#define IVPU_MMU_EVT_E_PAGE_REQUEST 0x24
+#define IVPU_MMU_EVT_F_VMS_FETCH 0x25
+
+#define IVPU_MMU_EVT_OP_MASK GENMASK_ULL(7, 0)
+#define IVPU_MMU_EVT_SSID_MASK GENMASK_ULL(31, 12)
+
+#define IVPU_MMU_Q_BASE_RWA BIT(62)
+#define IVPU_MMU_Q_BASE_ADDR_MASK GENMASK_ULL(51, 5)
+#define IVPU_MMU_STRTAB_BASE_RA BIT(62)
+#define IVPU_MMU_STRTAB_BASE_ADDR_MASK GENMASK_ULL(51, 6)
+
+#define IVPU_MMU_IRQ_EVTQ_EN BIT(2)
+#define IVPU_MMU_IRQ_GERROR_EN BIT(0)
+
+#define IVPU_MMU_CR0_ATSCHK BIT(4)
+#define IVPU_MMU_CR0_CMDQEN BIT(3)
+#define IVPU_MMU_CR0_EVTQEN BIT(2)
+#define IVPU_MMU_CR0_PRIQEN BIT(1)
+#define IVPU_MMU_CR0_SMMUEN BIT(0)
+
+#define IVPU_MMU_CR1_TABLE_SH GENMASK(11, 10)
+#define IVPU_MMU_CR1_TABLE_OC GENMASK(9, 8)
+#define IVPU_MMU_CR1_TABLE_IC GENMASK(7, 6)
+#define IVPU_MMU_CR1_QUEUE_SH GENMASK(5, 4)
+#define IVPU_MMU_CR1_QUEUE_OC GENMASK(3, 2)
+#define IVPU_MMU_CR1_QUEUE_IC GENMASK(1, 0)
+#define IVPU_MMU_CACHE_NC 0
+#define IVPU_MMU_CACHE_WB 1
+#define IVPU_MMU_CACHE_WT 2
+#define IVPU_MMU_SH_NSH 0
+#define IVPU_MMU_SH_OSH 2
+#define IVPU_MMU_SH_ISH 3
+
+#define IVPU_MMU_CMDQ_OP GENMASK_ULL(7, 0)
+
+#define IVPU_MMU_CD_0_TCR_T0SZ GENMASK_ULL(5, 0)
+#define IVPU_MMU_CD_0_TCR_TG0 GENMASK_ULL(7, 6)
+#define IVPU_MMU_CD_0_TCR_IRGN0 GENMASK_ULL(9, 8)
+#define IVPU_MMU_CD_0_TCR_ORGN0 GENMASK_ULL(11, 10)
+#define IVPU_MMU_CD_0_TCR_SH0 GENMASK_ULL(13, 12)
+#define IVPU_MMU_CD_0_TCR_EPD0 BIT_ULL(14)
+#define IVPU_MMU_CD_0_TCR_EPD1 BIT_ULL(30)
+#define IVPU_MMU_CD_0_ENDI BIT(15)
+#define IVPU_MMU_CD_0_V BIT(31)
+#define IVPU_MMU_CD_0_TCR_IPS GENMASK_ULL(34, 32)
+#define IVPU_MMU_CD_0_TCR_TBI0 BIT_ULL(38)
+#define IVPU_MMU_CD_0_AA64 BIT(41)
+#define IVPU_MMU_CD_0_S BIT(44)
+#define IVPU_MMU_CD_0_R BIT(45)
+#define IVPU_MMU_CD_0_A BIT(46)
+#define IVPU_MMU_CD_0_ASET BIT(47)
+#define IVPU_MMU_CD_0_ASID GENMASK_ULL(63, 48)
+
+#define IVPU_MMU_CD_1_TTB0_MASK GENMASK_ULL(51, 4)
+
+#define IVPU_MMU_STE_0_S1CDMAX GENMASK_ULL(63, 59)
+#define IVPU_MMU_STE_0_S1FMT GENMASK_ULL(5, 4)
+#define IVPU_MMU_STE_0_S1FMT_LINEAR 0
+#define IVPU_MMU_STE_DWORDS 8
+#define IVPU_MMU_STE_0_CFG_S1_TRANS 5
+#define IVPU_MMU_STE_0_CFG GENMASK_ULL(3, 1)
+#define IVPU_MMU_STE_0_S1CTXPTR_MASK GENMASK_ULL(51, 6)
+#define IVPU_MMU_STE_0_V BIT(0)
+
+#define IVPU_MMU_STE_1_STRW_NSEL1 0ul
+#define IVPU_MMU_STE_1_CONT GENMASK_ULL(16, 13)
+#define IVPU_MMU_STE_1_STRW GENMASK_ULL(31, 30)
+#define IVPU_MMU_STE_1_PRIVCFG GENMASK_ULL(49, 48)
+#define IVPU_MMU_STE_1_PRIVCFG_UNPRIV 2ul
+#define IVPU_MMU_STE_1_INSTCFG GENMASK_ULL(51, 50)
+#define IVPU_MMU_STE_1_INSTCFG_DATA 2ul
+#define IVPU_MMU_STE_1_MEV BIT(19)
+#define IVPU_MMU_STE_1_S1STALLD BIT(27)
+#define IVPU_MMU_STE_1_S1C_CACHE_NC 0ul
+#define IVPU_MMU_STE_1_S1C_CACHE_WBRA 1ul
+#define IVPU_MMU_STE_1_S1C_CACHE_WT 2ul
+#define IVPU_MMU_STE_1_S1C_CACHE_WB 3ul
+#define IVPU_MMU_STE_1_S1CIR GENMASK_ULL(3, 2)
+#define IVPU_MMU_STE_1_S1COR GENMASK_ULL(5, 4)
+#define IVPU_MMU_STE_1_S1CSH GENMASK_ULL(7, 6)
+#define IVPU_MMU_STE_1_S1DSS GENMASK_ULL(1, 0)
+#define IVPU_MMU_STE_1_S1DSS_TERMINATE 0x0
+
+#define IVPU_MMU_REG_TIMEOUT_US (10 * USEC_PER_MSEC)
+#define IVPU_MMU_QUEUE_TIMEOUT_US (100 * USEC_PER_MSEC)
+
+#define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT)) | \
+ (REG_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT)))
+
+static char *ivpu_mmu_event_to_str(u32 cmd)
+{
+ switch (cmd) {
+ case IVPU_MMU_EVT_F_UUT:
+ return "Unsupported Upstream Transaction";
+ case IVPU_MMU_EVT_C_BAD_STREAMID:
+ return "Transaction StreamID out of range";
+ case IVPU_MMU_EVT_F_STE_FETCH:
+ return "Fetch of STE caused external abort";
+ case IVPU_MMU_EVT_C_BAD_STE:
+ return "Used STE invalid";
+ case IVPU_MMU_EVT_F_BAD_ATS_TREQ:
+ return "Address Request disallowed for a StreamID";
+ case IVPU_MMU_EVT_F_STREAM_DISABLED:
+ return "Transaction marks non-substream disabled";
+ case IVPU_MMU_EVT_F_TRANSL_FORBIDDEN:
+ return "MMU bypass is disallowed for this StreamID";
+ case IVPU_MMU_EVT_C_BAD_SUBSTREAMID:
+ return "Invalid StreamID";
+ case IVPU_MMU_EVT_F_CD_FETCH:
+ return "Fetch of CD caused external abort";
+ case IVPU_MMU_EVT_C_BAD_CD:
+ return "Fetched CD invalid";
+ case IVPU_MMU_EVT_F_WALK_EABT:
+ return " An external abort occurred fetching a TLB";
+ case IVPU_MMU_EVT_F_TRANSLATION:
+ return "Translation fault";
+ case IVPU_MMU_EVT_F_ADDR_SIZE:
+ return " Output address caused address size fault";
+ case IVPU_MMU_EVT_F_ACCESS:
+ return "Access flag fault";
+ case IVPU_MMU_EVT_F_PERMISSION:
+ return "Permission fault occurred on page access";
+ case IVPU_MMU_EVT_F_TLB_CONFLICT:
+ return "A TLB conflict";
+ case IVPU_MMU_EVT_F_CFG_CONFLICT:
+ return "A configuration cache conflict";
+ case IVPU_MMU_EVT_E_PAGE_REQUEST:
+ return "Page request hint from a client device";
+ case IVPU_MMU_EVT_F_VMS_FETCH:
+ return "Fetch of VMS caused external abort";
+ default:
+ return "Unknown CMDQ command";
+ }
+}
+
+static void ivpu_mmu_config_check(struct ivpu_device *vdev)
+{
+ u32 val_ref;
+ u32 val;
+
+ if (ivpu_is_simics(vdev))
+ val_ref = IVPU_MMU_IDR0_REF_SIMICS;
+ else
+ val_ref = IVPU_MMU_IDR0_REF;
+
+ val = REGV_RD32(MTL_VPU_HOST_MMU_IDR0);
+ if (val != val_ref)
+ ivpu_dbg(vdev, MMU, "IDR0 0x%x != IDR0_REF 0x%x\n", val, val_ref);
+
+ val = REGV_RD32(MTL_VPU_HOST_MMU_IDR1);
+ if (val != IVPU_MMU_IDR1_REF)
+ ivpu_dbg(vdev, MMU, "IDR1 0x%x != IDR1_REF 0x%x\n", val, IVPU_MMU_IDR1_REF);
+
+ val = REGV_RD32(MTL_VPU_HOST_MMU_IDR3);
+ if (val != IVPU_MMU_IDR3_REF)
+ ivpu_dbg(vdev, MMU, "IDR3 0x%x != IDR3_REF 0x%x\n", val, IVPU_MMU_IDR3_REF);
+
+ if (ivpu_is_simics(vdev))
+ val_ref = IVPU_MMU_IDR5_REF_SIMICS;
+ else if (ivpu_is_fpga(vdev))
+ val_ref = IVPU_MMU_IDR5_REF_FPGA;
+ else
+ val_ref = IVPU_MMU_IDR5_REF;
+
+ val = REGV_RD32(MTL_VPU_HOST_MMU_IDR5);
+ if (val != val_ref)
+ ivpu_dbg(vdev, MMU, "IDR5 0x%x != IDR5_REF 0x%x\n", val, val_ref);
+}
+
+static int ivpu_mmu_cdtab_alloc(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
+ size_t size = IVPU_MMU_CDTAB_ENT_COUNT * IVPU_MMU_CDTAB_ENT_SIZE;
+
+ cdtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &cdtab->dma, GFP_KERNEL);
+ if (!cdtab->base)
+ return -ENOMEM;
+
+ ivpu_dbg(vdev, MMU, "CDTAB alloc: dma=%pad size=%zu\n", &cdtab->dma, size);
+
+ return 0;
+}
+
+static int ivpu_mmu_strtab_alloc(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_strtab *strtab = &mmu->strtab;
+ size_t size = IVPU_MMU_STRTAB_ENT_COUNT * IVPU_MMU_STRTAB_ENT_SIZE;
+
+ strtab->base = dmam_alloc_coherent(vdev->drm.dev, size, &strtab->dma, GFP_KERNEL);
+ if (!strtab->base)
+ return -ENOMEM;
+
+ strtab->base_cfg = IVPU_MMU_STRTAB_CFG;
+ strtab->dma_q = IVPU_MMU_STRTAB_BASE_RA;
+ strtab->dma_q |= strtab->dma & IVPU_MMU_STRTAB_BASE_ADDR_MASK;
+
+ ivpu_dbg(vdev, MMU, "STRTAB alloc: dma=%pad dma_q=%pad size=%zu\n",
+ &strtab->dma, &strtab->dma_q, size);
+
+ return 0;
+}
+
+static int ivpu_mmu_cmdq_alloc(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_queue *q = &mmu->cmdq;
+
+ q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_CMDQ_SIZE, &q->dma, GFP_KERNEL);
+ if (!q->base)
+ return -ENOMEM;
+
+ q->dma_q = IVPU_MMU_Q_BASE_RWA;
+ q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
+ q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
+
+ ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n",
+ &q->dma, &q->dma_q, IVPU_MMU_CMDQ_SIZE);
+
+ return 0;
+}
+
+static int ivpu_mmu_evtq_alloc(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_queue *q = &mmu->evtq;
+
+ q->base = dmam_alloc_coherent(vdev->drm.dev, IVPU_MMU_EVTQ_SIZE, &q->dma, GFP_KERNEL);
+ if (!q->base)
+ return -ENOMEM;
+
+ q->dma_q = IVPU_MMU_Q_BASE_RWA;
+ q->dma_q |= q->dma & IVPU_MMU_Q_BASE_ADDR_MASK;
+ q->dma_q |= IVPU_MMU_Q_COUNT_LOG2;
+
+ ivpu_dbg(vdev, MMU, "EVTQ alloc: dma=%pad dma_q=%pad size=%u\n",
+ &q->dma, &q->dma_q, IVPU_MMU_EVTQ_SIZE);
+
+ return 0;
+}
+
+static int ivpu_mmu_structs_alloc(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_mmu_cdtab_alloc(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate cdtab: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_strtab_alloc(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate strtab: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_cmdq_alloc(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_evtq_alloc(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to allocate evtq: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_mmu_reg_write(struct ivpu_device *vdev, u32 reg, u32 val)
+{
+ u32 reg_ack = reg + 4; /* ACK register is 4B after base register */
+ u32 val_ack;
+ int ret;
+
+ REGV_WR32(reg, val);
+
+ ret = REGV_POLL(reg_ack, val_ack, (val == val_ack), IVPU_MMU_REG_TIMEOUT_US);
+ if (ret)
+ ivpu_err(vdev, "Failed to write register 0x%x\n", reg);
+
+ return ret;
+}
+
+static int ivpu_mmu_irqs_setup(struct ivpu_device *vdev)
+{
+ u32 irq_ctrl = IVPU_MMU_IRQ_EVTQ_EN | IVPU_MMU_IRQ_GERROR_EN;
+ int ret;
+
+ ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, 0);
+ if (ret)
+ return ret;
+
+ return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_IRQ_CTRL, irq_ctrl);
+}
+
+static int ivpu_mmu_cmdq_wait_for_cons(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq;
+
+ return REGV_POLL(MTL_VPU_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons),
+ IVPU_MMU_QUEUE_TIMEOUT_US);
+}
+
+static int ivpu_mmu_cmdq_cmd_write(struct ivpu_device *vdev, const char *name, u64 data0, u64 data1)
+{
+ struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
+ u64 *queue_buffer = q->base;
+ int idx = IVPU_MMU_Q_IDX(q->prod) * (IVPU_MMU_CMDQ_CMD_SIZE / sizeof(*queue_buffer));
+
+ if (!CIRC_SPACE(IVPU_MMU_Q_IDX(q->prod), IVPU_MMU_Q_IDX(q->cons), IVPU_MMU_Q_COUNT)) {
+ ivpu_err(vdev, "Failed to write MMU CMD %s\n", name);
+ return -EBUSY;
+ }
+
+ queue_buffer[idx] = data0;
+ queue_buffer[idx + 1] = data1;
+ q->prod = (q->prod + 1) & IVPU_MMU_Q_WRAP_MASK;
+
+ ivpu_dbg(vdev, MMU, "CMD write: %s data: 0x%llx 0x%llx\n", name, data0, data1);
+
+ return 0;
+}
+
+static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_queue *q = &vdev->mmu->cmdq;
+ u64 val;
+ int ret;
+
+ val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_SYNC) |
+ FIELD_PREP(IVPU_MMU_CMD_SYNC_0_CS, 0x2) |
+ FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSH, 0x3) |
+ FIELD_PREP(IVPU_MMU_CMD_SYNC_0_MSI_ATTR, 0xf);
+
+ ret = ivpu_mmu_cmdq_cmd_write(vdev, "SYNC", val, 0);
+ if (ret)
+ return ret;
+
+ clflush_cache_range(q->base, IVPU_MMU_CMDQ_SIZE);
+ REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, q->prod);
+
+ ret = ivpu_mmu_cmdq_wait_for_cons(vdev);
+ if (ret)
+ ivpu_err(vdev, "Timed out waiting for consumer: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_mmu_cmdq_write_cfgi_all(struct ivpu_device *vdev)
+{
+ u64 data0 = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_CFGI_ALL);
+ u64 data1 = FIELD_PREP(IVPU_MMU_CMD_CFGI_1_RANGE, 0x1f);
+
+ return ivpu_mmu_cmdq_cmd_write(vdev, "CFGI_ALL", data0, data1);
+}
+
+static int ivpu_mmu_cmdq_write_tlbi_nh_asid(struct ivpu_device *vdev, u16 ssid)
+{
+ u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NH_ASID) |
+ FIELD_PREP(IVPU_MMU_CMD_TLBI_0_ASID, ssid);
+
+ return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NH_ASID", val, 0);
+}
+
+static int ivpu_mmu_cmdq_write_tlbi_nsnh_all(struct ivpu_device *vdev)
+{
+ u64 val = FIELD_PREP(IVPU_MMU_CMD_OPCODE, CMD_TLBI_NSNH_ALL);
+
+ return ivpu_mmu_cmdq_cmd_write(vdev, "TLBI_NSNH_ALL", val, 0);
+}
+
+static int ivpu_mmu_reset(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ u32 val;
+ int ret;
+
+ memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE);
+ clflush_cache_range(mmu->cmdq.base, IVPU_MMU_CMDQ_SIZE);
+ mmu->cmdq.prod = 0;
+ mmu->cmdq.cons = 0;
+
+ memset(mmu->evtq.base, 0, IVPU_MMU_EVTQ_SIZE);
+ clflush_cache_range(mmu->evtq.base, IVPU_MMU_EVTQ_SIZE);
+ mmu->evtq.prod = 0;
+ mmu->evtq.cons = 0;
+
+ ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, 0);
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(IVPU_MMU_CR1_TABLE_SH, IVPU_MMU_SH_ISH) |
+ FIELD_PREP(IVPU_MMU_CR1_TABLE_OC, IVPU_MMU_CACHE_WB) |
+ FIELD_PREP(IVPU_MMU_CR1_TABLE_IC, IVPU_MMU_CACHE_WB) |
+ FIELD_PREP(IVPU_MMU_CR1_QUEUE_SH, IVPU_MMU_SH_ISH) |
+ FIELD_PREP(IVPU_MMU_CR1_QUEUE_OC, IVPU_MMU_CACHE_WB) |
+ FIELD_PREP(IVPU_MMU_CR1_QUEUE_IC, IVPU_MMU_CACHE_WB);
+ REGV_WR32(MTL_VPU_HOST_MMU_CR1, val);
+
+ REGV_WR64(MTL_VPU_HOST_MMU_STRTAB_BASE, mmu->strtab.dma_q);
+ REGV_WR32(MTL_VPU_HOST_MMU_STRTAB_BASE_CFG, mmu->strtab.base_cfg);
+
+ REGV_WR64(MTL_VPU_HOST_MMU_CMDQ_BASE, mmu->cmdq.dma_q);
+ REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_PROD, 0);
+ REGV_WR32(MTL_VPU_HOST_MMU_CMDQ_CONS, 0);
+
+ val = IVPU_MMU_CR0_CMDQEN;
+ ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ if (ret)
+ return ret;
+
+ ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
+ if (ret)
+ return ret;
+
+ ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
+ if (ret)
+ return ret;
+
+ ret = ivpu_mmu_cmdq_sync(vdev);
+ if (ret)
+ return ret;
+
+ REGV_WR64(MTL_VPU_HOST_MMU_EVTQ_BASE, mmu->evtq.dma_q);
+ REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC, 0);
+ REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, 0);
+
+ val |= IVPU_MMU_CR0_EVTQEN;
+ ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ if (ret)
+ return ret;
+
+ val |= IVPU_MMU_CR0_ATSCHK;
+ ret = ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+ if (ret)
+ return ret;
+
+ ret = ivpu_mmu_irqs_setup(vdev);
+ if (ret)
+ return ret;
+
+ val |= IVPU_MMU_CR0_SMMUEN;
+ return ivpu_mmu_reg_write(vdev, MTL_VPU_HOST_MMU_CR0, val);
+}
+
+static void ivpu_mmu_strtab_link_cd(struct ivpu_device *vdev, u32 sid)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_strtab *strtab = &mmu->strtab;
+ struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
+ u64 *entry = strtab->base + (sid * IVPU_MMU_STRTAB_ENT_SIZE);
+ u64 str[2];
+
+ str[0] = FIELD_PREP(IVPU_MMU_STE_0_CFG, IVPU_MMU_STE_0_CFG_S1_TRANS) |
+ FIELD_PREP(IVPU_MMU_STE_0_S1CDMAX, IVPU_MMU_CDTAB_ENT_COUNT_LOG2) |
+ FIELD_PREP(IVPU_MMU_STE_0_S1FMT, IVPU_MMU_STE_0_S1FMT_LINEAR) |
+ IVPU_MMU_STE_0_V |
+ (cdtab->dma & IVPU_MMU_STE_0_S1CTXPTR_MASK);
+
+ str[1] = FIELD_PREP(IVPU_MMU_STE_1_S1DSS, IVPU_MMU_STE_1_S1DSS_TERMINATE) |
+ FIELD_PREP(IVPU_MMU_STE_1_S1CIR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
+ FIELD_PREP(IVPU_MMU_STE_1_S1COR, IVPU_MMU_STE_1_S1C_CACHE_NC) |
+ FIELD_PREP(IVPU_MMU_STE_1_S1CSH, IVPU_MMU_SH_NSH) |
+ FIELD_PREP(IVPU_MMU_STE_1_PRIVCFG, IVPU_MMU_STE_1_PRIVCFG_UNPRIV) |
+ FIELD_PREP(IVPU_MMU_STE_1_INSTCFG, IVPU_MMU_STE_1_INSTCFG_DATA) |
+ FIELD_PREP(IVPU_MMU_STE_1_STRW, IVPU_MMU_STE_1_STRW_NSEL1) |
+ FIELD_PREP(IVPU_MMU_STE_1_CONT, IVPU_MMU_STRTAB_CFG_LOG2SIZE) |
+ IVPU_MMU_STE_1_MEV |
+ IVPU_MMU_STE_1_S1STALLD;
+
+ WRITE_ONCE(entry[1], str[1]);
+ WRITE_ONCE(entry[0], str[0]);
+
+ clflush_cache_range(entry, IVPU_MMU_STRTAB_ENT_SIZE);
+
+ ivpu_dbg(vdev, MMU, "STRTAB write entry (SSID=%u): 0x%llx, 0x%llx\n", sid, str[0], str[1]);
+}
+
+static int ivpu_mmu_strtab_init(struct ivpu_device *vdev)
+{
+ ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID0);
+ ivpu_mmu_strtab_link_cd(vdev, IVPU_MMU_STREAM_ID3);
+
+ return 0;
+}
+
+int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&mmu->lock);
+ if (ret)
+ return ret;
+
+ if (!mmu->on) {
+ ret = 0;
+ goto unlock;
+ }
+
+ ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid);
+ if (ret)
+ goto unlock;
+
+ ret = ivpu_mmu_cmdq_sync(vdev);
+unlock:
+ mutex_unlock(&mmu->lock);
+ return ret;
+}
+
+static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
+ u64 *entry;
+ u64 cd[4];
+ int ret;
+
+ if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
+ return -EINVAL;
+
+ entry = cdtab->base + (ssid * IVPU_MMU_CDTAB_ENT_SIZE);
+
+ if (cd_dma != 0) {
+ cd[0] = FIELD_PREP(IVPU_MMU_CD_0_TCR_T0SZ, 26) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_TG0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_IRGN0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_ORGN0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_SH0, 0) |
+ FIELD_PREP(IVPU_MMU_CD_0_TCR_IPS, 3) |
+ FIELD_PREP(IVPU_MMU_CD_0_ASID, ssid) |
+ IVPU_MMU_CD_0_TCR_EPD1 |
+ IVPU_MMU_CD_0_AA64 |
+ IVPU_MMU_CD_0_R |
+ IVPU_MMU_CD_0_ASET |
+ IVPU_MMU_CD_0_V;
+ cd[1] = cd_dma & IVPU_MMU_CD_1_TTB0_MASK;
+ cd[2] = 0;
+ cd[3] = 0x0000000000007444;
+
+ /* For global context generate memory fault on VPU */
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
+ cd[0] |= IVPU_MMU_CD_0_A;
+ } else {
+ memset(cd, 0, sizeof(cd));
+ }
+
+ WRITE_ONCE(entry[1], cd[1]);
+ WRITE_ONCE(entry[2], cd[2]);
+ WRITE_ONCE(entry[3], cd[3]);
+ WRITE_ONCE(entry[0], cd[0]);
+
+ clflush_cache_range(entry, IVPU_MMU_CDTAB_ENT_SIZE);
+
+ ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
+ cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
+
+ ret = mutex_lock_interruptible(&mmu->lock);
+ if (ret)
+ return ret;
+
+ if (!mmu->on) {
+ ret = 0;
+ goto unlock;
+ }
+
+ ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
+ if (ret)
+ goto unlock;
+
+ ret = ivpu_mmu_cmdq_sync(vdev);
+unlock:
+ mutex_unlock(&mmu->lock);
+ return ret;
+}
+
+static int ivpu_mmu_cd_add_gbl(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_mmu_cd_add(vdev, 0, vdev->gctx.pgtable.pgd_dma);
+ if (ret)
+ ivpu_err(vdev, "Failed to add global CD entry: %d\n", ret);
+
+ return ret;
+}
+
+static int ivpu_mmu_cd_add_user(struct ivpu_device *vdev, u32 ssid, dma_addr_t cd_dma)
+{
+ int ret;
+
+ if (ssid == 0) {
+ ivpu_err(vdev, "Invalid SSID: %u\n", ssid);
+ return -EINVAL;
+ }
+
+ ret = ivpu_mmu_cd_add(vdev, ssid, cd_dma);
+ if (ret)
+ ivpu_err(vdev, "Failed to add CD entry SSID=%u: %d\n", ssid, ret);
+
+ return ret;
+}
+
+int ivpu_mmu_init(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ int ret;
+
+ ivpu_dbg(vdev, MMU, "Init..\n");
+
+ drmm_mutex_init(&vdev->drm, &mmu->lock);
+ ivpu_mmu_config_check(vdev);
+
+ ret = ivpu_mmu_structs_alloc(vdev);
+ if (ret)
+ return ret;
+
+ ret = ivpu_mmu_strtab_init(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_cd_add_gbl(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize strtab: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
+ return ret;
+ }
+
+ ivpu_dbg(vdev, MMU, "Init done\n");
+
+ return 0;
+}
+
+int ivpu_mmu_enable(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+ int ret;
+
+ mutex_lock(&mmu->lock);
+
+ mmu->on = true;
+
+ ret = ivpu_mmu_reset(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to reset MMU: %d\n", ret);
+ goto err;
+ }
+
+ ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
+ if (ret)
+ goto err;
+
+ ret = ivpu_mmu_cmdq_write_tlbi_nsnh_all(vdev);
+ if (ret)
+ goto err;
+
+ ret = ivpu_mmu_cmdq_sync(vdev);
+ if (ret)
+ goto err;
+
+ mutex_unlock(&mmu->lock);
+
+ return 0;
+err:
+ mmu->on = false;
+ mutex_unlock(&mmu->lock);
+ return ret;
+}
+
+void ivpu_mmu_disable(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_info *mmu = vdev->mmu;
+
+ mutex_lock(&mmu->lock);
+ mmu->on = false;
+ mutex_unlock(&mmu->lock);
+}
+
+static void ivpu_mmu_dump_event(struct ivpu_device *vdev, u32 *event)
+{
+ u32 ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
+ u32 op = FIELD_GET(IVPU_MMU_EVT_OP_MASK, event[0]);
+ u64 fetch_addr = ((u64)event[7]) << 32 | event[6];
+ u64 in_addr = ((u64)event[5]) << 32 | event[4];
+ u32 sid = event[1];
+
+ ivpu_err(vdev, "MMU EVTQ: 0x%x (%s) SSID: %d SID: %d, e[2] %08x, e[3] %08x, in addr: 0x%llx, fetch addr: 0x%llx\n",
+ op, ivpu_mmu_event_to_str(op), ssid, sid, event[2], event[3], in_addr, fetch_addr);
+}
+
+static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
+{
+ struct ivpu_mmu_queue *evtq = &vdev->mmu->evtq;
+ u32 idx = IVPU_MMU_Q_IDX(evtq->cons);
+ u32 *evt = evtq->base + (idx * IVPU_MMU_EVTQ_CMD_SIZE);
+
+ evtq->prod = REGV_RD32(MTL_VPU_HOST_MMU_EVTQ_PROD_SEC);
+ if (!CIRC_CNT(IVPU_MMU_Q_IDX(evtq->prod), IVPU_MMU_Q_IDX(evtq->cons), IVPU_MMU_Q_COUNT))
+ return NULL;
+
+ clflush_cache_range(evt, IVPU_MMU_EVTQ_CMD_SIZE);
+
+ evtq->cons = (evtq->cons + 1) & IVPU_MMU_Q_WRAP_MASK;
+ REGV_WR32(MTL_VPU_HOST_MMU_EVTQ_CONS_SEC, evtq->cons);
+
+ return evt;
+}
+
+void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
+{
+ bool schedule_recovery = false;
+ u32 *event;
+ u32 ssid;
+
+ ivpu_dbg(vdev, IRQ, "MMU event queue\n");
+
+ while ((event = ivpu_mmu_get_event(vdev)) != NULL) {
+ ivpu_mmu_dump_event(vdev, event);
+
+ ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
+ if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
+ schedule_recovery = true;
+ else
+ ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+ }
+
+ if (schedule_recovery)
+ ivpu_pm_schedule_recovery(vdev);
+}
+
+void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
+{
+ u32 gerror_val, gerrorn_val, active;
+
+ ivpu_dbg(vdev, IRQ, "MMU error\n");
+
+ gerror_val = REGV_RD32(MTL_VPU_HOST_MMU_GERROR);
+ gerrorn_val = REGV_RD32(MTL_VPU_HOST_MMU_GERRORN);
+
+ active = gerror_val ^ gerrorn_val;
+ if (!(active & IVPU_MMU_GERROR_ERR_MASK))
+ return;
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_ABT, active))
+ ivpu_warn_ratelimited(vdev, "MMU MSI ABT write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_PRIQ_ABT, active))
+ ivpu_warn_ratelimited(vdev, "MMU PRIQ MSI ABT write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_EVTQ_ABT, active))
+ ivpu_warn_ratelimited(vdev, "MMU EVTQ MSI ABT write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, MSI_CMDQ_ABT, active))
+ ivpu_warn_ratelimited(vdev, "MMU CMDQ MSI ABT write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, PRIQ_ABT, active))
+ ivpu_err_ratelimited(vdev, "MMU PRIQ write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, EVTQ_ABT, active))
+ ivpu_err_ratelimited(vdev, "MMU EVTQ write aborted\n");
+
+ if (REG_TEST_FLD(MTL_VPU_HOST_MMU_GERROR, CMDQ, active))
+ ivpu_err_ratelimited(vdev, "MMU CMDQ write aborted\n");
+
+ REGV_WR32(MTL_VPU_HOST_MMU_GERRORN, gerror_val);
+}
+
+int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable)
+{
+ return ivpu_mmu_cd_add_user(vdev, ssid, pgtable->pgd_dma);
+}
+
+void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid)
+{
+ ivpu_mmu_cd_add_user(vdev, ssid, 0); /* 0 will clear CD entry */
+}
diff --git a/drivers/accel/ivpu/ivpu_mmu.h b/drivers/accel/ivpu/ivpu_mmu.h
new file mode 100644
index 000000000000..cb551126806b
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_mmu.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_MMU_H__
+#define __IVPU_MMU_H__
+
+struct ivpu_device;
+
+struct ivpu_mmu_cdtab {
+ void *base;
+ dma_addr_t dma;
+};
+
+struct ivpu_mmu_strtab {
+ void *base;
+ dma_addr_t dma;
+ u64 dma_q;
+ u32 base_cfg;
+};
+
+struct ivpu_mmu_queue {
+ void *base;
+ dma_addr_t dma;
+ u64 dma_q;
+ u32 prod;
+ u32 cons;
+};
+
+struct ivpu_mmu_info {
+ struct mutex lock; /* Protects cdtab, strtab, cmdq, on */
+ struct ivpu_mmu_cdtab cdtab;
+ struct ivpu_mmu_strtab strtab;
+ struct ivpu_mmu_queue cmdq;
+ struct ivpu_mmu_queue evtq;
+ bool on;
+};
+
+int ivpu_mmu_init(struct ivpu_device *vdev);
+void ivpu_mmu_disable(struct ivpu_device *vdev);
+int ivpu_mmu_enable(struct ivpu_device *vdev);
+int ivpu_mmu_set_pgtable(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable);
+void ivpu_mmu_clear_pgtable(struct ivpu_device *vdev, int ssid);
+int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
+
+void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
+void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
+
+#endif /* __IVPU_MMU_H__ */
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c
new file mode 100644
index 000000000000..8ce9b12ac356
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_mmu_context.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/highmem.h>
+
+#include "ivpu_drv.h"
+#include "ivpu_hw.h"
+#include "ivpu_mmu.h"
+#include "ivpu_mmu_context.h"
+
+#define IVPU_MMU_PGD_INDEX_MASK GENMASK(38, 30)
+#define IVPU_MMU_PMD_INDEX_MASK GENMASK(29, 21)
+#define IVPU_MMU_PTE_INDEX_MASK GENMASK(20, 12)
+#define IVPU_MMU_ENTRY_FLAGS_MASK GENMASK(11, 0)
+#define IVPU_MMU_ENTRY_FLAG_NG BIT(11)
+#define IVPU_MMU_ENTRY_FLAG_AF BIT(10)
+#define IVPU_MMU_ENTRY_FLAG_USER BIT(6)
+#define IVPU_MMU_ENTRY_FLAG_LLC_COHERENT BIT(2)
+#define IVPU_MMU_ENTRY_FLAG_TYPE_PAGE BIT(1)
+#define IVPU_MMU_ENTRY_FLAG_VALID BIT(0)
+
+#define IVPU_MMU_PAGE_SIZE SZ_4K
+#define IVPU_MMU_PTE_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PAGE_SIZE)
+#define IVPU_MMU_PMD_MAP_SIZE (IVPU_MMU_PGTABLE_ENTRIES * IVPU_MMU_PTE_MAP_SIZE)
+#define IVPU_MMU_PGTABLE_SIZE (IVPU_MMU_PGTABLE_ENTRIES * sizeof(u64))
+
+#define IVPU_MMU_DUMMY_ADDRESS 0xdeadb000
+#define IVPU_MMU_ENTRY_VALID (IVPU_MMU_ENTRY_FLAG_TYPE_PAGE | IVPU_MMU_ENTRY_FLAG_VALID)
+#define IVPU_MMU_ENTRY_INVALID (IVPU_MMU_DUMMY_ADDRESS & ~IVPU_MMU_ENTRY_FLAGS_MASK)
+#define IVPU_MMU_ENTRY_MAPPED (IVPU_MMU_ENTRY_FLAG_AF | IVPU_MMU_ENTRY_FLAG_USER | \
+ IVPU_MMU_ENTRY_FLAG_NG | IVPU_MMU_ENTRY_VALID)
+
+static int ivpu_mmu_pgtable_init(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+{
+ dma_addr_t pgd_dma;
+ u64 *pgd;
+
+ pgd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pgd_dma, GFP_KERNEL);
+ if (!pgd)
+ return -ENOMEM;
+
+ pgtable->pgd = pgd;
+ pgtable->pgd_dma = pgd_dma;
+
+ return 0;
+}
+
+static void ivpu_mmu_pgtable_free(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable)
+{
+ int pgd_index, pmd_index;
+
+ for (pgd_index = 0; pgd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pgd_index) {
+ u64 **pmd_entries = pgtable->pgd_cpu_entries[pgd_index];
+ u64 *pmd = pgtable->pgd_entries[pgd_index];
+
+ if (!pmd_entries)
+ continue;
+
+ for (pmd_index = 0; pmd_index < IVPU_MMU_PGTABLE_ENTRIES; ++pmd_index) {
+ if (pmd_entries[pmd_index])
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE,
+ pmd_entries[pmd_index],
+ pmd[pmd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+ }
+
+ kfree(pmd_entries);
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd_entries[pgd_index],
+ pgtable->pgd[pgd_index] & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+ }
+
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pgtable->pgd,
+ pgtable->pgd_dma & ~IVPU_MMU_ENTRY_FLAGS_MASK);
+}
+
+static u64*
+ivpu_mmu_ensure_pmd(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable, u64 pgd_index)
+{
+ u64 **pmd_entries;
+ dma_addr_t pmd_dma;
+ u64 *pmd;
+
+ if (pgtable->pgd_entries[pgd_index])
+ return pgtable->pgd_entries[pgd_index];
+
+ pmd = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pmd_dma, GFP_KERNEL);
+ if (!pmd)
+ return NULL;
+
+ pmd_entries = kzalloc(IVPU_MMU_PGTABLE_SIZE, GFP_KERNEL);
+ if (!pmd_entries)
+ goto err_free_pgd;
+
+ pgtable->pgd_entries[pgd_index] = pmd;
+ pgtable->pgd_cpu_entries[pgd_index] = pmd_entries;
+ pgtable->pgd[pgd_index] = pmd_dma | IVPU_MMU_ENTRY_VALID;
+
+ return pmd;
+
+err_free_pgd:
+ dma_free_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, pmd, pmd_dma);
+ return NULL;
+}
+
+static u64*
+ivpu_mmu_ensure_pte(struct ivpu_device *vdev, struct ivpu_mmu_pgtable *pgtable,
+ int pgd_index, int pmd_index)
+{
+ dma_addr_t pte_dma;
+ u64 *pte;
+
+ if (pgtable->pgd_cpu_entries[pgd_index][pmd_index])
+ return pgtable->pgd_cpu_entries[pgd_index][pmd_index];
+
+ pte = dma_alloc_wc(vdev->drm.dev, IVPU_MMU_PGTABLE_SIZE, &pte_dma, GFP_KERNEL);
+ if (!pte)
+ return NULL;
+
+ pgtable->pgd_cpu_entries[pgd_index][pmd_index] = pte;
+ pgtable->pgd_entries[pgd_index][pmd_index] = pte_dma | IVPU_MMU_ENTRY_VALID;
+
+ return pte;
+}
+
+static int
+ivpu_mmu_context_map_page(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, dma_addr_t dma_addr, int prot)
+{
+ u64 *pte;
+ int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+
+ /* Allocate PMD - second level page table if needed */
+ if (!ivpu_mmu_ensure_pmd(vdev, &ctx->pgtable, pgd_index))
+ return -ENOMEM;
+
+ /* Allocate PTE - third level page table if needed */
+ pte = ivpu_mmu_ensure_pte(vdev, &ctx->pgtable, pgd_index, pmd_index);
+ if (!pte)
+ return -ENOMEM;
+
+ /* Update PTE - third level page table with DMA address */
+ pte[pte_index] = dma_addr | prot;
+
+ return 0;
+}
+
+static void ivpu_mmu_context_unmap_page(struct ivpu_mmu_context *ctx, u64 vpu_addr)
+{
+ int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ int pte_index = FIELD_GET(IVPU_MMU_PTE_INDEX_MASK, vpu_addr);
+
+ /* Update PTE with dummy physical address and clear flags */
+ ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index][pte_index] = IVPU_MMU_ENTRY_INVALID;
+}
+
+static void
+ivpu_mmu_context_flush_page_tables(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
+{
+ u64 end_addr = vpu_addr + size;
+ u64 *pgd = ctx->pgtable.pgd;
+
+ /* Align to PMD entry (2 MB) */
+ vpu_addr &= ~(IVPU_MMU_PTE_MAP_SIZE - 1);
+
+ while (vpu_addr < end_addr) {
+ int pgd_index = FIELD_GET(IVPU_MMU_PGD_INDEX_MASK, vpu_addr);
+ u64 pmd_end = (pgd_index + 1) * (u64)IVPU_MMU_PMD_MAP_SIZE;
+ u64 *pmd = ctx->pgtable.pgd_entries[pgd_index];
+
+ while (vpu_addr < end_addr && vpu_addr < pmd_end) {
+ int pmd_index = FIELD_GET(IVPU_MMU_PMD_INDEX_MASK, vpu_addr);
+ u64 *pte = ctx->pgtable.pgd_cpu_entries[pgd_index][pmd_index];
+
+ clflush_cache_range(pte, IVPU_MMU_PGTABLE_SIZE);
+ vpu_addr += IVPU_MMU_PTE_MAP_SIZE;
+ }
+ clflush_cache_range(pmd, IVPU_MMU_PGTABLE_SIZE);
+ }
+ clflush_cache_range(pgd, IVPU_MMU_PGTABLE_SIZE);
+}
+
+static int
+ivpu_mmu_context_map_pages(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, dma_addr_t dma_addr, size_t size, int prot)
+{
+ while (size) {
+ int ret = ivpu_mmu_context_map_page(vdev, ctx, vpu_addr, dma_addr, prot);
+
+ if (ret)
+ return ret;
+
+ vpu_addr += IVPU_MMU_PAGE_SIZE;
+ dma_addr += IVPU_MMU_PAGE_SIZE;
+ size -= IVPU_MMU_PAGE_SIZE;
+ }
+
+ return 0;
+}
+
+static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_addr, size_t size)
+{
+ while (size) {
+ ivpu_mmu_context_unmap_page(ctx, vpu_addr);
+ vpu_addr += IVPU_MMU_PAGE_SIZE;
+ size -= IVPU_MMU_PAGE_SIZE;
+ }
+}
+
+int
+ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, struct sg_table *sgt, bool llc_coherent)
+{
+ struct scatterlist *sg;
+ int prot;
+ int ret;
+ u64 i;
+
+ if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
+ return -EINVAL;
+ /*
+ * VPU is only 32 bit, but DMA engine is 38 bit
+ * Ranges < 2 GB are reserved for VPU internal registers
+ * Limit range to 8 GB
+ */
+ if (vpu_addr < SZ_2G || vpu_addr > SZ_8G)
+ return -EINVAL;
+
+ prot = IVPU_MMU_ENTRY_MAPPED;
+ if (llc_coherent)
+ prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT;
+
+ mutex_lock(&ctx->lock);
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ u64 dma_addr = sg_dma_address(sg) - sg->offset;
+ size_t size = sg_dma_len(sg) + sg->offset;
+
+ ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
+ if (ret) {
+ ivpu_err(vdev, "Failed to map context pages\n");
+ mutex_unlock(&ctx->lock);
+ return ret;
+ }
+ ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
+ vpu_addr += size;
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
+ if (ret)
+ ivpu_err(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+ return ret;
+}
+
+void
+ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, struct sg_table *sgt)
+{
+ struct scatterlist *sg;
+ int ret;
+ u64 i;
+
+ if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
+ ivpu_warn(vdev, "Unaligned vpu_addr: 0x%llx\n", vpu_addr);
+
+ mutex_lock(&ctx->lock);
+
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ size_t size = sg_dma_len(sg) + sg->offset;
+
+ ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
+ ivpu_mmu_context_flush_page_tables(ctx, vpu_addr, size);
+ vpu_addr += size;
+ }
+
+ mutex_unlock(&ctx->lock);
+
+ ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
+ if (ret)
+ ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret);
+}
+
+int
+ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
+ const struct ivpu_addr_range *range,
+ u64 size, struct drm_mm_node *node)
+{
+ lockdep_assert_held(&ctx->lock);
+
+ return drm_mm_insert_node_in_range(&ctx->mm, node, size, IVPU_MMU_PAGE_SIZE,
+ 0, range->start, range->end, DRM_MM_INSERT_BEST);
+}
+
+void
+ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx, struct drm_mm_node *node)
+{
+ lockdep_assert_held(&ctx->lock);
+
+ drm_mm_remove_node(node);
+}
+
+static int
+ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 context_id)
+{
+ u64 start, end;
+ int ret;
+
+ mutex_init(&ctx->lock);
+ INIT_LIST_HEAD(&ctx->bo_list);
+
+ ret = ivpu_mmu_pgtable_init(vdev, &ctx->pgtable);
+ if (ret)
+ return ret;
+
+ if (!context_id) {
+ start = vdev->hw->ranges.global_low.start;
+ end = vdev->hw->ranges.global_high.end;
+ } else {
+ start = vdev->hw->ranges.user_low.start;
+ end = vdev->hw->ranges.user_high.end;
+ }
+
+ drm_mm_init(&ctx->mm, start, end - start);
+ ctx->id = context_id;
+
+ return 0;
+}
+
+static void ivpu_mmu_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+{
+ drm_WARN_ON(&vdev->drm, !ctx->pgtable.pgd);
+
+ mutex_destroy(&ctx->lock);
+ ivpu_mmu_pgtable_free(vdev, &ctx->pgtable);
+ drm_mm_takedown(&ctx->mm);
+}
+
+int ivpu_mmu_global_context_init(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_context_init(vdev, &vdev->gctx, IVPU_GLOBAL_CONTEXT_MMU_SSID);
+}
+
+void ivpu_mmu_global_context_fini(struct ivpu_device *vdev)
+{
+ return ivpu_mmu_context_fini(vdev, &vdev->gctx);
+}
+
+void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid)
+{
+ struct ivpu_file_priv *file_priv;
+
+ xa_lock(&vdev->context_xa);
+
+ file_priv = xa_load(&vdev->context_xa, ssid);
+ if (file_priv)
+ file_priv->has_mmu_faults = true;
+
+ xa_unlock(&vdev->context_xa);
+}
+
+int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id)
+{
+ int ret;
+
+ drm_WARN_ON(&vdev->drm, !ctx_id);
+
+ ret = ivpu_mmu_context_init(vdev, ctx, ctx_id);
+ if (ret) {
+ ivpu_err(vdev, "Failed to initialize context: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_set_pgtable(vdev, ctx_id, &ctx->pgtable);
+ if (ret) {
+ ivpu_err(vdev, "Failed to set page table: %d\n", ret);
+ goto err_context_fini;
+ }
+
+ return 0;
+
+err_context_fini:
+ ivpu_mmu_context_fini(vdev, ctx);
+ return ret;
+}
+
+void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+{
+ drm_WARN_ON(&vdev->drm, !ctx->id);
+
+ ivpu_mmu_clear_pgtable(vdev, ctx->id);
+ ivpu_mmu_context_fini(vdev, ctx);
+}
diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h
new file mode 100644
index 000000000000..ddf11b95023a
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_mmu_context.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_MMU_CONTEXT_H__
+#define __IVPU_MMU_CONTEXT_H__
+
+#include <drm/drm_mm.h>
+
+struct ivpu_device;
+struct ivpu_file_priv;
+struct ivpu_addr_range;
+
+#define IVPU_MMU_PGTABLE_ENTRIES 512
+
+struct ivpu_mmu_pgtable {
+ u64 **pgd_cpu_entries[IVPU_MMU_PGTABLE_ENTRIES];
+ u64 *pgd_entries[IVPU_MMU_PGTABLE_ENTRIES];
+ u64 *pgd;
+ dma_addr_t pgd_dma;
+};
+
+struct ivpu_mmu_context {
+ struct mutex lock; /* protects: mm, pgtable, bo_list */
+ struct drm_mm mm;
+ struct ivpu_mmu_pgtable pgtable;
+ struct list_head bo_list;
+ u32 id;
+};
+
+int ivpu_mmu_global_context_init(struct ivpu_device *vdev);
+void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
+
+int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id);
+void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
+void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
+
+int ivpu_mmu_context_insert_node_locked(struct ivpu_mmu_context *ctx,
+ const struct ivpu_addr_range *range,
+ u64 size, struct drm_mm_node *node);
+void ivpu_mmu_context_remove_node_locked(struct ivpu_mmu_context *ctx,
+ struct drm_mm_node *node);
+
+int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, struct sg_table *sgt, bool llc_coherent);
+void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
+ u64 vpu_addr, struct sg_table *sgt);
+
+#endif /* __IVPU_MMU_CONTEXT_H__ */
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
new file mode 100644
index 000000000000..553bcbd787b3
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#include <linux/highmem.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/reboot.h>
+
+#include "vpu_boot_api.h"
+#include "ivpu_drv.h"
+#include "ivpu_hw.h"
+#include "ivpu_fw.h"
+#include "ivpu_ipc.h"
+#include "ivpu_job.h"
+#include "ivpu_mmu.h"
+#include "ivpu_pm.h"
+
+static bool ivpu_disable_recovery;
+module_param_named_unsafe(disable_recovery, ivpu_disable_recovery, bool, 0644);
+MODULE_PARM_DESC(disable_recovery, "Disables recovery when VPU hang is detected");
+
+#define PM_RESCHEDULE_LIMIT 5
+
+static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+
+ ivpu_cmdq_reset_all_contexts(vdev);
+ ivpu_ipc_reset(vdev);
+ ivpu_fw_load(vdev);
+ fw->entry_point = fw->cold_boot_entry_point;
+}
+
+static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev)
+{
+ struct ivpu_fw_info *fw = vdev->fw;
+ struct vpu_boot_params *bp = fw->mem->kvaddr;
+
+ if (!bp->save_restore_ret_address) {
+ ivpu_pm_prepare_cold_boot(vdev);
+ return;
+ }
+
+ ivpu_dbg(vdev, FW_BOOT, "Save/restore entry point %llx", bp->save_restore_ret_address);
+ fw->entry_point = bp->save_restore_ret_address;
+}
+
+static int ivpu_suspend(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ret = ivpu_shutdown(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to shutdown VPU: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ivpu_resume(struct ivpu_device *vdev)
+{
+ int ret;
+
+retry:
+ ret = ivpu_hw_power_up(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
+ return ret;
+ }
+
+ ret = ivpu_mmu_enable(vdev);
+ if (ret) {
+ ivpu_err(vdev, "Failed to resume MMU: %d\n", ret);
+ ivpu_hw_power_down(vdev);
+ return ret;
+ }
+
+ ret = ivpu_boot(vdev);
+ if (ret) {
+ ivpu_mmu_disable(vdev);
+ ivpu_hw_power_down(vdev);
+ if (!ivpu_fw_is_cold_boot(vdev)) {
+ ivpu_warn(vdev, "Failed to resume the FW: %d. Retrying cold boot..\n", ret);
+ ivpu_pm_prepare_cold_boot(vdev);
+ goto retry;
+ } else {
+ ivpu_err(vdev, "Failed to resume the FW: %d\n", ret);
+ }
+ }
+
+ return ret;
+}
+
+static void ivpu_pm_recovery_work(struct work_struct *work)
+{
+ struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
+ struct ivpu_device *vdev = pm->vdev;
+ char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
+ int ret;
+
+ ret = pci_reset_function(to_pci_dev(vdev->drm.dev));
+ if (ret)
+ ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
+
+ kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
+}
+
+void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
+{
+ struct ivpu_pm_info *pm = vdev->pm;
+
+ if (ivpu_disable_recovery) {
+ ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
+ return;
+ }
+
+ if (ivpu_is_fpga(vdev)) {
+ ivpu_err(vdev, "Recovery not available on FPGA\n");
+ return;
+ }
+
+ /* Schedule recovery if it's not in progress */
+ if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) {
+ ivpu_hw_irq_disable(vdev);
+ queue_work(system_long_wq, &pm->recovery_work);
+ }
+}
+
+int ivpu_pm_suspend_cb(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ int ret;
+
+ ivpu_dbg(vdev, PM, "Suspend..\n");
+
+ ret = ivpu_suspend(vdev);
+ if (ret && vdev->pm->suspend_reschedule_counter) {
+ ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
+ vdev->pm->suspend_reschedule_counter);
+ pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
+ vdev->pm->suspend_reschedule_counter--;
+ return -EBUSY;
+ } else if (!vdev->pm->suspend_reschedule_counter) {
+ ivpu_warn(vdev, "Failed to enter idle, force suspend\n");
+ ivpu_pm_prepare_cold_boot(vdev);
+ } else {
+ ivpu_pm_prepare_warm_boot(vdev);
+ }
+
+ vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
+
+ pci_save_state(to_pci_dev(dev));
+ pci_set_power_state(to_pci_dev(dev), PCI_D3hot);
+
+ ivpu_dbg(vdev, PM, "Suspend done.\n");
+
+ return ret;
+}
+
+int ivpu_pm_resume_cb(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ int ret;
+
+ ivpu_dbg(vdev, PM, "Resume..\n");
+
+ pci_set_power_state(to_pci_dev(dev), PCI_D0);
+ pci_restore_state(to_pci_dev(dev));
+
+ ret = ivpu_resume(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to resume: %d\n", ret);
+
+ ivpu_dbg(vdev, PM, "Resume done.\n");
+
+ return ret;
+}
+
+int ivpu_pm_runtime_suspend_cb(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ int ret;
+
+ ivpu_dbg(vdev, PM, "Runtime suspend..\n");
+
+ if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
+ ivpu_dbg(vdev, PM, "Failed to enter idle, rescheduling suspend, retries left %d\n",
+ vdev->pm->suspend_reschedule_counter);
+ pm_schedule_suspend(dev, vdev->timeout.reschedule_suspend);
+ vdev->pm->suspend_reschedule_counter--;
+ return -EAGAIN;
+ }
+
+ ret = ivpu_suspend(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
+
+ if (!vdev->pm->suspend_reschedule_counter) {
+ ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n");
+ ivpu_pm_prepare_cold_boot(vdev);
+ } else {
+ ivpu_pm_prepare_warm_boot(vdev);
+ }
+
+ vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
+
+ ivpu_dbg(vdev, PM, "Runtime suspend done.\n");
+
+ return 0;
+}
+
+int ivpu_pm_runtime_resume_cb(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct ivpu_device *vdev = to_ivpu_device(drm);
+ int ret;
+
+ ivpu_dbg(vdev, PM, "Runtime resume..\n");
+
+ ret = ivpu_resume(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
+
+ ivpu_dbg(vdev, PM, "Runtime resume done.\n");
+
+ return ret;
+}
+
+int ivpu_rpm_get(struct ivpu_device *vdev)
+{
+ int ret;
+
+ ivpu_dbg(vdev, RPM, "rpm_get count %d\n", atomic_read(&vdev->drm.dev->power.usage_count));
+
+ ret = pm_runtime_resume_and_get(vdev->drm.dev);
+ if (!drm_WARN_ON(&vdev->drm, ret < 0))
+ vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
+
+ return ret;
+}
+
+void ivpu_rpm_put(struct ivpu_device *vdev)
+{
+ ivpu_dbg(vdev, RPM, "rpm_put count %d\n", atomic_read(&vdev->drm.dev->power.usage_count));
+
+ pm_runtime_mark_last_busy(vdev->drm.dev);
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+}
+
+void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
+{
+ struct ivpu_device *vdev = pci_get_drvdata(pdev);
+
+ pm_runtime_get_sync(vdev->drm.dev);
+
+ ivpu_dbg(vdev, PM, "Pre-reset..\n");
+ atomic_set(&vdev->pm->in_reset, 1);
+ ivpu_shutdown(vdev);
+ ivpu_pm_prepare_cold_boot(vdev);
+ ivpu_jobs_abort_all(vdev);
+ ivpu_dbg(vdev, PM, "Pre-reset done.\n");
+}
+
+void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
+{
+ struct ivpu_device *vdev = pci_get_drvdata(pdev);
+ int ret;
+
+ ivpu_dbg(vdev, PM, "Post-reset..\n");
+ ret = ivpu_resume(vdev);
+ if (ret)
+ ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
+ atomic_set(&vdev->pm->in_reset, 0);
+ ivpu_dbg(vdev, PM, "Post-reset done.\n");
+
+ pm_runtime_put_autosuspend(vdev->drm.dev);
+}
+
+int ivpu_pm_init(struct ivpu_device *vdev)
+{
+ struct device *dev = vdev->drm.dev;
+ struct ivpu_pm_info *pm = vdev->pm;
+
+ pm->vdev = vdev;
+ pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
+
+ atomic_set(&pm->in_reset, 0);
+ INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
+
+ pm_runtime_use_autosuspend(dev);
+
+ if (ivpu_disable_recovery)
+ pm_runtime_set_autosuspend_delay(dev, -1);
+ else if (ivpu_is_silicon(vdev))
+ pm_runtime_set_autosuspend_delay(dev, 100);
+ else
+ pm_runtime_set_autosuspend_delay(dev, 60000);
+
+ return 0;
+}
+
+void ivpu_pm_enable(struct ivpu_device *vdev)
+{
+ struct device *dev = vdev->drm.dev;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_allow(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ ivpu_dbg(vdev, RPM, "Enable RPM count %d\n", atomic_read(&dev->power.usage_count));
+}
+
+void ivpu_pm_disable(struct ivpu_device *vdev)
+{
+ struct device *dev = vdev->drm.dev;
+
+ ivpu_dbg(vdev, RPM, "Disable RPM count %d\n", atomic_read(&dev->power.usage_count));
+
+ pm_runtime_get_noresume(vdev->drm.dev);
+ pm_runtime_forbid(vdev->drm.dev);
+}
diff --git a/drivers/accel/ivpu/ivpu_pm.h b/drivers/accel/ivpu/ivpu_pm.h
new file mode 100644
index 000000000000..dc1b3758e13f
--- /dev/null
+++ b/drivers/accel/ivpu/ivpu_pm.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef __IVPU_PM_H__
+#define __IVPU_PM_H__
+
+#include <linux/types.h>
+
+struct ivpu_device;
+
+struct ivpu_pm_info {
+ struct ivpu_device *vdev;
+ struct work_struct recovery_work;
+ atomic_t in_reset;
+ bool is_warmboot;
+ u32 suspend_reschedule_counter;
+};
+
+int ivpu_pm_init(struct ivpu_device *vdev);
+void ivpu_pm_enable(struct ivpu_device *vdev);
+void ivpu_pm_disable(struct ivpu_device *vdev);
+
+int ivpu_pm_suspend_cb(struct device *dev);
+int ivpu_pm_resume_cb(struct device *dev);
+int ivpu_pm_runtime_suspend_cb(struct device *dev);
+int ivpu_pm_runtime_resume_cb(struct device *dev);
+
+void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev);
+void ivpu_pm_reset_done_cb(struct pci_dev *pdev);
+
+int __must_check ivpu_rpm_get(struct ivpu_device *vdev);
+void ivpu_rpm_put(struct ivpu_device *vdev);
+
+void ivpu_pm_schedule_recovery(struct ivpu_device *vdev);
+
+#endif /* __IVPU_PM_H__ */
diff --git a/drivers/accel/ivpu/vpu_boot_api.h b/drivers/accel/ivpu/vpu_boot_api.h
new file mode 100644
index 000000000000..6b71be92ba65
--- /dev/null
+++ b/drivers/accel/ivpu/vpu_boot_api.h
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+#ifndef VPU_BOOT_API_H
+#define VPU_BOOT_API_H
+
+/*
+ * =========== FW API version information beginning ================
+ * The bellow values will be used to construct the version info this way:
+ * fw_bin_header->api_version[VPU_BOOT_API_VER_ID] = (VPU_BOOT_API_VER_MAJOR << 16) |
+ * VPU_BOOT_API_VER_MINOR;
+ * VPU_BOOT_API_VER_PATCH will be ignored. KMD and compatibility is not affected if this changes.
+ */
+
+/*
+ * Major version changes that break backward compatibility.
+ * Major version must start from 1 and can only be incremented.
+ */
+#define VPU_BOOT_API_VER_MAJOR 3
+
+/*
+ * Minor version changes when API backward compatibility is preserved.
+ * Resets to 0 if Major version is incremented.
+ */
+#define VPU_BOOT_API_VER_MINOR 12
+
+/*
+ * API header changed (field names, documentation, formatting) but API itself has not been changed
+ */
+#define VPU_BOOT_API_VER_PATCH 2
+
+/*
+ * Index in the API version table
+ * Must be unique for each API
+ */
+#define VPU_BOOT_API_VER_INDEX 0
+/* ------------ FW API version information end ---------------------*/
+
+#pragma pack(push, 1)
+
+/*
+ * Firmware image header format
+ */
+#define VPU_FW_HEADER_SIZE 4096
+#define VPU_FW_HEADER_VERSION 0x1
+#define VPU_FW_VERSION_SIZE 32
+#define VPU_FW_API_VER_NUM 16
+
+struct vpu_firmware_header {
+ u32 header_version;
+ u32 image_format;
+ u64 image_load_address;
+ u32 image_size;
+ u64 entry_point;
+ u8 vpu_version[VPU_FW_VERSION_SIZE];
+ u32 compression_type;
+ u64 firmware_version_load_address;
+ u32 firmware_version_size;
+ u64 boot_params_load_address;
+ u32 api_version[VPU_FW_API_VER_NUM];
+ /* Size of memory require for firmware execution */
+ u32 runtime_size;
+ u32 shave_nn_fw_size;
+};
+
+/*
+ * Firmware boot parameters format
+ */
+
+#define VPU_BOOT_PLL_COUNT 3
+#define VPU_BOOT_PLL_OUT_COUNT 4
+
+/** Values for boot_type field */
+#define VPU_BOOT_TYPE_COLDBOOT 0
+#define VPU_BOOT_TYPE_WARMBOOT 1
+
+/** Value for magic filed */
+#define VPU_BOOT_PARAMS_MAGIC 0x10000
+
+/** VPU scheduling mode. By default, OS scheduling is used. */
+#define VPU_SCHEDULING_MODE_OS 0
+#define VPU_SCHEDULING_MODE_HW 1
+
+enum VPU_BOOT_L2_CACHE_CFG_TYPE {
+ VPU_BOOT_L2_CACHE_CFG_UPA = 0,
+ VPU_BOOT_L2_CACHE_CFG_NN = 1,
+ VPU_BOOT_L2_CACHE_CFG_NUM = 2
+};
+
+/**
+ * Logging destinations.
+ *
+ * Logging output can be directed to different logging destinations. This enum
+ * defines the list of logging destinations supported by the VPU firmware (NOTE:
+ * a specific VPU FW binary may support only a subset of such output
+ * destinations, depending on the target platform and compile options).
+ */
+enum vpu_trace_destination {
+ VPU_TRACE_DESTINATION_PIPEPRINT = 0x1,
+ VPU_TRACE_DESTINATION_VERBOSE_TRACING = 0x2,
+ VPU_TRACE_DESTINATION_NORTH_PEAK = 0x4,
+};
+
+/*
+ * Processor bit shifts (for loggable HW components).
+ */
+#define VPU_TRACE_PROC_BIT_ARM 0
+#define VPU_TRACE_PROC_BIT_LRT 1
+#define VPU_TRACE_PROC_BIT_LNN 2
+#define VPU_TRACE_PROC_BIT_SHV_0 3
+#define VPU_TRACE_PROC_BIT_SHV_1 4
+#define VPU_TRACE_PROC_BIT_SHV_2 5
+#define VPU_TRACE_PROC_BIT_SHV_3 6
+#define VPU_TRACE_PROC_BIT_SHV_4 7
+#define VPU_TRACE_PROC_BIT_SHV_5 8
+#define VPU_TRACE_PROC_BIT_SHV_6 9
+#define VPU_TRACE_PROC_BIT_SHV_7 10
+#define VPU_TRACE_PROC_BIT_SHV_8 11
+#define VPU_TRACE_PROC_BIT_SHV_9 12
+#define VPU_TRACE_PROC_BIT_SHV_10 13
+#define VPU_TRACE_PROC_BIT_SHV_11 14
+#define VPU_TRACE_PROC_BIT_SHV_12 15
+#define VPU_TRACE_PROC_BIT_SHV_13 16
+#define VPU_TRACE_PROC_BIT_SHV_14 17
+#define VPU_TRACE_PROC_BIT_SHV_15 18
+#define VPU_TRACE_PROC_BIT_ACT_SHV_0 19
+#define VPU_TRACE_PROC_BIT_ACT_SHV_1 20
+#define VPU_TRACE_PROC_BIT_ACT_SHV_2 21
+#define VPU_TRACE_PROC_BIT_ACT_SHV_3 22
+#define VPU_TRACE_PROC_NO_OF_HW_DEVS 23
+
+/* KMB HW component IDs are sequential, so define first and last IDs. */
+#define VPU_TRACE_PROC_BIT_KMB_FIRST VPU_TRACE_PROC_BIT_LRT
+#define VPU_TRACE_PROC_BIT_KMB_LAST VPU_TRACE_PROC_BIT_SHV_15
+
+struct vpu_boot_l2_cache_config {
+ u8 use;
+ u8 cfg;
+};
+
+struct vpu_warm_boot_section {
+ u32 src;
+ u32 dst;
+ u32 size;
+ u32 core_id;
+ u32 is_clear_op;
+};
+
+struct vpu_boot_params {
+ u32 magic;
+ u32 vpu_id;
+ u32 vpu_count;
+ u32 pad0[5];
+ /* Clock frequencies: 0x20 - 0xFF */
+ u32 frequency;
+ u32 pll[VPU_BOOT_PLL_COUNT][VPU_BOOT_PLL_OUT_COUNT];
+ u32 perf_clk_frequency;
+ u32 pad1[42];
+ /* Memory regions: 0x100 - 0x1FF */
+ u64 ipc_header_area_start;
+ u32 ipc_header_area_size;
+ u64 shared_region_base;
+ u32 shared_region_size;
+ u64 ipc_payload_area_start;
+ u32 ipc_payload_area_size;
+ u64 global_aliased_pio_base;
+ u32 global_aliased_pio_size;
+ u32 autoconfig;
+ struct vpu_boot_l2_cache_config cache_defaults[VPU_BOOT_L2_CACHE_CFG_NUM];
+ u64 global_memory_allocator_base;
+ u32 global_memory_allocator_size;
+ /**
+ * ShaveNN FW section VPU base address
+ * On VPU2.7 HW this address must be within 2GB range starting from L2C_PAGE_TABLE base
+ */
+ u64 shave_nn_fw_base;
+ u64 save_restore_ret_address; /* stores the address of FW's restore entry point */
+ u32 pad2[43];
+ /* IRQ re-direct numbers: 0x200 - 0x2FF */
+ s32 watchdog_irq_mss;
+ s32 watchdog_irq_nce;
+ /* ARM -> VPU doorbell interrupt. ARM is notifying VPU of async command or compute job. */
+ u32 host_to_vpu_irq;
+ /* VPU -> ARM job done interrupt. VPU is notifying ARM of compute job completion. */
+ u32 job_done_irq;
+ /* VPU -> ARM IRQ line to use to request MMU update. */
+ u32 mmu_update_request_irq;
+ /* ARM -> VPU IRQ line to use to notify of MMU update completion. */
+ u32 mmu_update_done_irq;
+ /* ARM -> VPU IRQ line to use to request power level change. */
+ u32 set_power_level_irq;
+ /* VPU -> ARM IRQ line to use to notify of power level change completion. */
+ u32 set_power_level_done_irq;
+ /* VPU -> ARM IRQ line to use to notify of VPU idle state change */
+ u32 set_vpu_idle_update_irq;
+ /* VPU -> ARM IRQ line to use to request counter reset. */
+ u32 metric_query_event_irq;
+ /* ARM -> VPU IRQ line to use to notify of counter reset completion. */
+ u32 metric_query_event_done_irq;
+ /* VPU -> ARM IRQ line to use to notify of preemption completion. */
+ u32 preemption_done_irq;
+ /* Padding. */
+ u32 pad3[52];
+ /* Silicon information: 0x300 - 0x3FF */
+ u32 host_version_id;
+ u32 si_stepping;
+ u64 device_id;
+ u64 feature_exclusion;
+ u64 sku;
+ /** PLL ratio for minimum clock frequency */
+ u32 min_freq_pll_ratio;
+ /** PLL ratio for maximum clock frequency */
+ u32 max_freq_pll_ratio;
+ /**
+ * Initial log level threshold (messages with log level severity less than
+ * the threshold will not be logged); applies to every enabled logging
+ * destination and loggable HW component. See 'mvLog_t' enum for acceptable
+ * values.
+ */
+ u32 default_trace_level;
+ u32 boot_type;
+ u64 punit_telemetry_sram_base;
+ u64 punit_telemetry_sram_size;
+ u32 vpu_telemetry_enable;
+ u64 crit_tracing_buff_addr;
+ u32 crit_tracing_buff_size;
+ u64 verbose_tracing_buff_addr;
+ u32 verbose_tracing_buff_size;
+ u64 verbose_tracing_sw_component_mask; /* TO BE REMOVED */
+ /**
+ * Mask of destinations to which logging messages are delivered; bitwise OR
+ * of values defined in vpu_trace_destination enum.
+ */
+ u32 trace_destination_mask;
+ /**
+ * Mask of hardware components for which logging is enabled; bitwise OR of
+ * bits defined by the VPU_TRACE_PROC_BIT_* macros.
+ */
+ u64 trace_hw_component_mask;
+ /** Mask of trace message formats supported by the driver */
+ u64 tracing_buff_message_format_mask;
+ u64 trace_reserved_1[2];
+ /**
+ * Period at which the VPU reads the temp sensor values into MMIO, on
+ * platforms where that is necessary (in ms). 0 to disable reads.
+ */
+ u32 temp_sensor_period_ms;
+ /** PLL ratio for efficient clock frequency */
+ u32 pn_freq_pll_ratio;
+ u32 pad4[28];
+ /* Warm boot information: 0x400 - 0x43F */
+ u32 warm_boot_sections_count;
+ u32 warm_boot_start_address_reference;
+ u32 warm_boot_section_info_address_offset;
+ u32 pad5[13];
+ /* Power States transitions timestamps: 0x440 - 0x46F*/
+ struct {
+ /* VPU_IDLE -> VPU_ACTIVE transition initiated timestamp */
+ u64 vpu_active_state_requested;
+ /* VPU_IDLE -> VPU_ACTIVE transition completed timestamp */
+ u64 vpu_active_state_achieved;
+ /* VPU_ACTIVE -> VPU_IDLE transition initiated timestamp */
+ u64 vpu_idle_state_requested;
+ /* VPU_ACTIVE -> VPU_IDLE transition completed timestamp */
+ u64 vpu_idle_state_achieved;
+ /* VPU_IDLE -> VPU_STANDBY transition initiated timestamp */
+ u64 vpu_standby_state_requested;
+ /* VPU_IDLE -> VPU_STANDBY transition completed timestamp */
+ u64 vpu_standby_state_achieved;
+ } power_states_timestamps;
+ /* VPU scheduling mode. Values defined by VPU_SCHEDULING_MODE_* macros. */
+ u32 vpu_scheduling_mode;
+ /* Present call period in milliseconds. */
+ u32 vpu_focus_present_timer_ms;
+ /* Unused/reserved: 0x478 - 0xFFF */
+ u32 pad6[738];
+};
+
+/*
+ * Magic numbers set between host and vpu to detect corruptio of tracing init
+ */
+
+#define VPU_TRACING_BUFFER_CANARY (0xCAFECAFE)
+
+/* Tracing buffer message format definitions */
+#define VPU_TRACING_FORMAT_STRING 0
+#define VPU_TRACING_FORMAT_MIPI 2
+/*
+ * Header of the tracing buffer.
+ * The below defined header will be stored at the beginning of
+ * each allocated tracing buffer, followed by a series of 256b
+ * of ASCII trace message entries.
+ */
+struct vpu_tracing_buffer_header {
+ /**
+ * Magic number set by host to detect corruption
+ * @see VPU_TRACING_BUFFER_CANARY
+ */
+ u32 host_canary_start;
+ /* offset from start of buffer for trace entries */
+ u32 read_index;
+ u32 pad_to_cache_line_size_0[14];
+ /* End of first cache line */
+
+ /**
+ * Magic number set by host to detect corruption
+ * @see VPU_TRACING_BUFFER_CANARY
+ */
+ u32 vpu_canary_start;
+ /* offset from start of buffer from write start */
+ u32 write_index;
+ /* counter for buffer wrapping */
+ u32 wrap_count;
+ /* legacy field - do not use */
+ u32 reserved_0;
+ /**
+ * Size of the log buffer include this header (@header_size) and space
+ * reserved for all messages. If @alignment` is greater that 0 the @Size
+ * must be multiple of @Alignment.
+ */
+ u32 size;
+ /* Header version */
+ u16 header_version;
+ /* Header size */
+ u16 header_size;
+ /*
+ * Format of the messages in the trace buffer
+ * 0 - null terminated string
+ * 1 - size + null terminated string
+ * 2 - MIPI-SysT encoding
+ */
+ u32 format;
+ /*
+ * Message alignment
+ * 0 - messages are place 1 after another
+ * n - every message starts and multiple on offset
+ */
+ u32 alignment; /* 64, 128, 256 */
+ /* Name of the logging entity, i.e "LRT", "LNN", "SHV0", etc */
+ char name[16];
+ u32 pad_to_cache_line_size_1[4];
+ /* End of second cache line */
+};
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h
new file mode 100644
index 000000000000..2949ec8365bd
--- /dev/null
+++ b/drivers/accel/ivpu/vpu_jsm_api.h
@@ -0,0 +1,1008 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2020-2023 Intel Corporation
+ */
+
+/**
+ * @file
+ * @brief JSM shared definitions
+ *
+ * @ingroup Jsm
+ * @brief JSM shared definitions
+ * @{
+ */
+#ifndef VPU_JSM_API_H
+#define VPU_JSM_API_H
+
+/*
+ * Major version changes that break backward compatibility
+ */
+#define VPU_JSM_API_VER_MAJOR 3
+
+/*
+ * Minor version changes when API backward compatibility is preserved.
+ */
+#define VPU_JSM_API_VER_MINOR 0
+
+/*
+ * API header changed (field names, documentation, formatting) but API itself has not been changed
+ */
+#define VPU_JSM_API_VER_PATCH 1
+
+/*
+ * Index in the API version table
+ */
+#define VPU_JSM_API_VER_INDEX 4
+
+/*
+ * Number of Priority Bands for Hardware Scheduling
+ * Bands: RealTime, Focus, Normal, Idle
+ */
+#define VPU_HWS_NUM_PRIORITY_BANDS 4
+
+/* Max number of impacted contexts that can be dealt with the engine reset command */
+#define VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS 3
+
+/** Pack the API structures for now, once alignment issues are fixed this can be removed */
+#pragma pack(push, 1)
+
+/*
+ * Engine indexes.
+ */
+#define VPU_ENGINE_COMPUTE 0
+#define VPU_ENGINE_COPY 1
+#define VPU_ENGINE_NB 2
+
+/*
+ * VPU status values.
+ */
+#define VPU_JSM_STATUS_SUCCESS 0x0U
+#define VPU_JSM_STATUS_PARSING_ERR 0x1U
+#define VPU_JSM_STATUS_PROCESSING_ERR 0x2U
+#define VPU_JSM_STATUS_PREEMPTED 0x3U
+#define VPU_JSM_STATUS_ABORTED 0x4U
+#define VPU_JSM_STATUS_USER_CTX_VIOL_ERR 0x5U
+#define VPU_JSM_STATUS_GLOBAL_CTX_VIOL_ERR 0x6U
+#define VPU_JSM_STATUS_MVNCI_WRONG_INPUT_FORMAT 0x7U
+#define VPU_JSM_STATUS_MVNCI_UNSUPPORTED_NETWORK_ELEMENT 0x8U
+#define VPU_JSM_STATUS_MVNCI_INVALID_HANDLE 0x9U
+#define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU
+#define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU
+#define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU
+/* Job status returned when the job was preempted mid-inference */
+#define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU
+
+/*
+ * Host <-> VPU IPC channels.
+ * ASYNC commands use a high priority channel, other messages use low-priority ones.
+ */
+#define VPU_IPC_CHAN_ASYNC_CMD 0
+#define VPU_IPC_CHAN_GEN_CMD 10
+#define VPU_IPC_CHAN_JOB_RET 11
+
+/*
+ * Job flags bit masks.
+ */
+#define VPU_JOB_FLAGS_NULL_SUBMISSION_MASK 0x00000001
+
+/*
+ * Sizes of the reserved areas in jobs, in bytes.
+ */
+#define VPU_JOB_RESERVED_BYTES 16
+/*
+ * Sizes of the reserved areas in job queues, in bytes.
+ */
+#define VPU_JOB_QUEUE_RESERVED_BYTES 52
+
+/*
+ * Max length (including trailing NULL char) of trace entity name (e.g., the
+ * name of a logging destination or a loggable HW component).
+ */
+#define VPU_TRACE_ENTITY_NAME_MAX_LEN 32
+
+/*
+ * Max length (including trailing NULL char) of a dyndbg command.
+ *
+ * NOTE: 96 is used so that the size of 'struct vpu_ipc_msg' in the JSM API is
+ * 128 bytes (multiple of 64 bytes, the cache line size).
+ */
+#define VPU_DYNDBG_CMD_MAX_LEN 96
+
+/*
+ * Job format.
+ */
+struct vpu_job_queue_entry {
+ u64 batch_buf_addr; /**< Address of VPU commands batch buffer */
+ u32 job_id; /**< Job ID */
+ u32 flags; /**< Flags bit field, see VPU_JOB_FLAGS_* above */
+ u64 root_page_table_addr; /**< Address of root page table to use for this job */
+ u64 root_page_table_update_counter; /**< Page tables update events counter */
+ u64 preemption_buffer_address; /**< Address of the preemption buffer to use for this job */
+ u64 preemption_buffer_size; /**< Size of the preemption buffer to use for this job */
+ u8 reserved_0[VPU_JOB_RESERVED_BYTES];
+};
+
+/*
+ * Job queue control registers.
+ */
+struct vpu_job_queue_header {
+ u32 engine_idx;
+ u32 head;
+ u32 tail;
+ u8 reserved_0[VPU_JOB_QUEUE_RESERVED_BYTES];
+};
+
+/*
+ * Job queue format.
+ */
+struct vpu_job_queue {
+ struct vpu_job_queue_header header;
+ struct vpu_job_queue_entry job[];
+};
+
+/**
+ * Logging entity types.
+ *
+ * This enum defines the different types of entities involved in logging.
+ */
+enum vpu_trace_entity_type {
+ /** Logging destination (entity where logs can be stored / printed). */
+ VPU_TRACE_ENTITY_TYPE_DESTINATION = 1,
+ /** Loggable HW component (HW entity that can be logged). */
+ VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2,
+};
+
+/*
+ * Host <-> VPU IPC messages types.
+ */
+enum vpu_ipc_msg_type {
+ VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF,
+ /* IPC Host -> Device, Async commands */
+ VPU_JSM_MSG_ASYNC_CMD = 0x1100,
+ VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD,
+ VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101,
+ VPU_JSM_MSG_REGISTER_DB = 0x1102,
+ VPU_JSM_MSG_UNREGISTER_DB = 0x1103,
+ VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104,
+ VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105,
+ VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106,
+ VPU_JSM_MSG_SET_POWER_LEVEL = 0x1107,
+ /* @deprecated */
+ VPU_JSM_MSG_METRIC_STREAMER_OPEN = 0x1108,
+ /* @deprecated */
+ VPU_JSM_MSG_METRIC_STREAMER_CLOSE = 0x1109,
+ /** Configure logging (used to modify configuration passed in boot params). */
+ VPU_JSM_MSG_TRACE_SET_CONFIG = 0x110a,
+ /** Return current logging configuration. */
+ VPU_JSM_MSG_TRACE_GET_CONFIG = 0x110b,
+ /**
+ * Get masks of destinations and HW components supported by the firmware
+ * (may vary between HW generations and FW compile
+ * time configurations)
+ */
+ VPU_JSM_MSG_TRACE_GET_CAPABILITY = 0x110c,
+ /** Get the name of a destination or HW component. */
+ VPU_JSM_MSG_TRACE_GET_NAME = 0x110d,
+ /**
+ * Release resource associated with host ssid . All jobs that belong to the host_ssid
+ * aborted and removed from internal scheduling queues. All doorbells assigned
+ * to the host_ssid are unregistered and any internal FW resources belonging to
+ * the host_ssid are released.
+ */
+ VPU_JSM_MSG_SSID_RELEASE = 0x110e,
+ /**
+ * Start collecting metric data.
+ * @see vpu_jsm_metric_streamer_start
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_START = 0x110f,
+ /**
+ * Stop collecting metric data. This command will return success if it is called
+ * for a metric stream that has already been stopped or was never started.
+ * @see vpu_jsm_metric_streamer_stop
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_STOP = 0x1110,
+ /**
+ * Update current and next buffer for metric data collection. This command can
+ * also be used to request information about the number of collected samples
+ * and the amount of data written to the buffer.
+ * @see vpu_jsm_metric_streamer_update
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_UPDATE = 0x1111,
+ /**
+ * Request description of selected metric groups and metric counters within
+ * each group. The VPU will write the description of groups and counters to
+ * the buffer specified in the command structure.
+ * @see vpu_jsm_metric_streamer_start
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112,
+ /** Control command: Priority band setup */
+ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113,
+ /** Control command: Create command queue */
+ VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114,
+ /** Control command: Destroy command queue */
+ VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115,
+ /** Control command: Set context scheduling properties */
+ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116,
+ /*
+ * Register a doorbell to notify VPU of new work. The doorbell may later be
+ * deallocated or reassigned to another context.
+ */
+ VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117,
+ /* IPC Host -> Device, General commands */
+ VPU_JSM_MSG_GENERAL_CMD = 0x1200,
+ VPU_JSM_MSG_BLOB_DEINIT = VPU_JSM_MSG_GENERAL_CMD,
+ /**
+ * Control dyndbg behavior by executing a dyndbg command; equivalent to
+ * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`.
+ */
+ VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201,
+ /* IPC Device -> Host, Job completion */
+ VPU_JSM_MSG_JOB_DONE = 0x2100,
+ /* IPC Device -> Host, Async command completion */
+ VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200,
+ VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE,
+ VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201,
+ VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202,
+ VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203,
+ VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204,
+ VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205,
+ VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206,
+ VPU_JSM_MSG_SET_POWER_LEVEL_DONE = 0x2207,
+ /* @deprecated */
+ VPU_JSM_MSG_METRIC_STREAMER_OPEN_DONE = 0x2208,
+ /* @deprecated */
+ VPU_JSM_MSG_METRIC_STREAMER_CLOSE_DONE = 0x2209,
+ /** Response to VPU_JSM_MSG_TRACE_SET_CONFIG. */
+ VPU_JSM_MSG_TRACE_SET_CONFIG_RSP = 0x220a,
+ /** Response to VPU_JSM_MSG_TRACE_GET_CONFIG. */
+ VPU_JSM_MSG_TRACE_GET_CONFIG_RSP = 0x220b,
+ /** Response to VPU_JSM_MSG_TRACE_GET_CAPABILITY. */
+ VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c,
+ /** Response to VPU_JSM_MSG_TRACE_GET_NAME. */
+ VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d,
+ /** Response to VPU_JSM_MSG_SSID_RELEASE. */
+ VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e,
+ /**
+ * Response to VPU_JSM_MSG_METRIC_STREAMER_START.
+ * VPU will return an error result if metric collection cannot be started,
+ * e.g. when the specified metric mask is invalid.
+ * @see vpu_jsm_metric_streamer_done
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_START_DONE = 0x220f,
+ /**
+ * Response to VPU_JSM_MSG_METRIC_STREAMER_STOP.
+ * Returns information about collected metric data.
+ * @see vpu_jsm_metric_streamer_done
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE = 0x2210,
+ /**
+ * Response to VPU_JSM_MSG_METRIC_STREAMER_UPDATE.
+ * Returns information about collected metric data.
+ * @see vpu_jsm_metric_streamer_done
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE = 0x2211,
+ /**
+ * Response to VPU_JSM_MSG_METRIC_STREAMER_INFO.
+ * Returns a description of the metric groups and metric counters.
+ * @see vpu_jsm_metric_streamer_done
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE = 0x2212,
+ /**
+ * Asynchronous event sent from the VPU to the host either when the current
+ * metric buffer is full or when the VPU has collected a multiple of
+ * @notify_sample_count samples as indicated through the start command
+ * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected
+ * metric data.
+ * @see vpu_jsm_metric_streamer_done
+ */
+ VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213,
+ /** Response to control command: Priority band setup */
+ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214,
+ /** Response to control command: Create command queue */
+ VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215,
+ /** Response to control command: Destroy command queue */
+ VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216,
+ /** Response to control command: Set context scheduling properties */
+ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217,
+ /* IPC Device -> Host, General command completion */
+ VPU_JSM_MSG_GENERAL_CMD_DONE = 0x2300,
+ VPU_JSM_MSG_BLOB_DEINIT_DONE = VPU_JSM_MSG_GENERAL_CMD_DONE,
+ /** Response to VPU_JSM_MSG_DYNDBG_CONTROL. */
+ VPU_JSM_MSG_DYNDBG_CONTROL_RSP = 0x2301,
+};
+
+enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED };
+
+/*
+ * Host <-> LRT IPC message payload definitions
+ */
+struct vpu_ipc_msg_payload_engine_reset {
+ /* Engine to be reset. */
+ u32 engine_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+struct vpu_ipc_msg_payload_engine_preempt {
+ /* Engine to be preempted. */
+ u32 engine_idx;
+ /* ID of the preemption request. */
+ u32 preempt_id;
+};
+
+/*
+ * @brief Register doorbell command structure.
+ * This structure supports doorbell registration for only OS scheduling.
+ * @see VPU_JSM_MSG_REGISTER_DB
+ */
+struct vpu_ipc_msg_payload_register_db {
+ /* Index of the doorbell to register. */
+ u32 db_idx;
+ /* Reserved */
+ u32 reserved_0;
+ /* Virtual address in Global GTT pointing to the start of job queue. */
+ u64 jobq_base;
+ /* Size of the job queue in bytes. */
+ u32 jobq_size;
+ /* Host sub-stream ID for the context assigned to the doorbell. */
+ u32 host_ssid;
+};
+
+/**
+ * @brief Unregister doorbell command structure.
+ * Request structure to unregister a doorbell for both HW and OS scheduling.
+ * @see VPU_JSM_MSG_UNREGISTER_DB
+ */
+struct vpu_ipc_msg_payload_unregister_db {
+ /* Index of the doorbell to unregister. */
+ u32 db_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+struct vpu_ipc_msg_payload_query_engine_hb {
+ /* Engine to return heartbeat value. */
+ u32 engine_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+struct vpu_ipc_msg_payload_power_level {
+ /**
+ * Requested power level. The power level value is in the
+ * range [0, power_level_count-1] where power_level_count
+ * is the number of available power levels as returned by
+ * the get power level count command. A power level of 0
+ * corresponds to the maximum possible power level, while
+ * power_level_count-1 corresponds to the minimum possible
+ * power level. Values outside of this range are not
+ * considered to be valid.
+ */
+ u32 power_level;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+struct vpu_ipc_msg_payload_ssid_release {
+ /* Host sub-stream ID for the context to be released. */
+ u32 host_ssid;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+/**
+ * @brief Metric streamer start command structure.
+ * This structure is also used with VPU_JSM_MSG_METRIC_STREAMER_INFO to request metric
+ * groups and metric counters description from the firmware.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_START
+ * @see VPU_JSM_MSG_METRIC_STREAMER_INFO
+ */
+struct vpu_jsm_metric_streamer_start {
+ /**
+ * Bitmask to select the desired metric groups.
+ * A metric group can belong only to one metric streamer instance at a time.
+ * Since each metric streamer instance has a unique set of metric groups, it
+ * can also identify a metric streamer instance if more than one instance was
+ * started. If the VPU device does not support multiple metric streamer instances,
+ * then VPU_JSM_MSG_METRIC_STREAMER_START will return an error even if the second
+ * instance has different groups to the first.
+ */
+ u64 metric_group_mask;
+ /** Sampling rate in nanoseconds. */
+ u64 sampling_rate;
+ /**
+ * If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message
+ * after every @notify_sample_count samples is collected or dropped by the VPU.
+ * If set to UINT_MAX the VPU will only generate a notification when the metric
+ * buffer is full. If set to 0 the VPU will never generate a notification.
+ */
+ u32 notify_sample_count;
+ u32 reserved_0;
+ /**
+ * Address and size of the buffer where the VPU will write metric data. The
+ * VPU writes all counters from enabled metric groups one after another. If
+ * there is no space left to write data at the next sample period the VPU
+ * will switch to the next buffer (@see next_buffer_addr) and will optionally
+ * send a notification to the host driver if @notify_sample_count is non-zero.
+ * If @next_buffer_addr is NULL the VPU will stop collecting metric data.
+ */
+ u64 buffer_addr;
+ u64 buffer_size;
+ /**
+ * Address and size of the next buffer to write metric data to after the initial
+ * buffer is full. If the address is NULL the VPU will stop collecting metric
+ * data.
+ */
+ u64 next_buffer_addr;
+ u64 next_buffer_size;
+};
+
+/**
+ * @brief Metric streamer stop command structure.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_STOP
+ */
+struct vpu_jsm_metric_streamer_stop {
+ /** Bitmask to select the desired metric groups. */
+ u64 metric_group_mask;
+};
+
+/**
+ * Provide VPU FW with buffers to write metric data.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_UPDATE
+ */
+struct vpu_jsm_metric_streamer_update {
+ /** Metric group mask that identifies metric streamer instance. */
+ u64 metric_group_mask;
+ /**
+ * Address and size of the buffer where the VPU will write metric data. If
+ * the buffer address is 0 or same as the currently used buffer the VPU will
+ * continue writing metric data to the current buffer. In this case the
+ * buffer size is ignored and the size of the current buffer is unchanged.
+ * If the address is non-zero and differs from the current buffer address the
+ * VPU will immediately switch data collection to the new buffer.
+ */
+ u64 buffer_addr;
+ u64 buffer_size;
+ /**
+ * Address and size of the next buffer to write metric data after the initial
+ * buffer is full. If the address is NULL the VPU will stop collecting metric
+ * data but will continue to record dropped samples.
+ *
+ * Note that there is a hazard possible if both buffer_addr and the next_buffer_addr
+ * are non-zero in same update request. It is the host's responsibility to ensure
+ * that both addresses make sense even if the VPU just switched to writing samples
+ * from the current to the next buffer.
+ */
+ u64 next_buffer_addr;
+ u64 next_buffer_size;
+};
+
+struct vpu_ipc_msg_payload_blob_deinit {
+ /* 64-bit unique ID for the blob to be de-initialized. */
+ u64 blob_id;
+};
+
+struct vpu_ipc_msg_payload_job_done {
+ /* Engine to which the job was submitted. */
+ u32 engine_idx;
+ /* Index of the doorbell to which the job was submitted */
+ u32 db_idx;
+ /* ID of the completed job */
+ u32 job_id;
+ /* Status of the completed job */
+ u32 job_status;
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved_0;
+ /* Command queue id */
+ u64 cmdq_id;
+};
+
+struct vpu_jsm_engine_reset_context {
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved_0;
+ /* Command queue id */
+ u64 cmdq_id;
+ /* Flags: 0: cause of hang; 1: collateral damage of reset */
+ u64 flags;
+};
+
+struct vpu_ipc_msg_payload_engine_reset_done {
+ /* Engine ordinal */
+ u32 engine_idx;
+ /* Number of impacted contexts */
+ u32 num_impacted_contexts;
+ /* Array of impacted command queue ids and their flags */
+ struct vpu_jsm_engine_reset_context
+ impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS];
+};
+
+struct vpu_ipc_msg_payload_engine_preempt_done {
+ /* Engine preempted. */
+ u32 engine_idx;
+ /* ID of the preemption request. */
+ u32 preempt_id;
+};
+
+/**
+ * Response structure for register doorbell command for both OS
+ * and HW scheduling.
+ * @see VPU_JSM_MSG_REGISTER_DB
+ * @see VPU_JSM_MSG_HWS_REGISTER_DB
+ */
+struct vpu_ipc_msg_payload_register_db_done {
+ /* Index of the registered doorbell. */
+ u32 db_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+/**
+ * Response structure for unregister doorbell command for both OS
+ * and HW scheduling.
+ * @see VPU_JSM_MSG_UNREGISTER_DB
+ */
+struct vpu_ipc_msg_payload_unregister_db_done {
+ /* Index of the unregistered doorbell. */
+ u32 db_idx;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+struct vpu_ipc_msg_payload_query_engine_hb_done {
+ /* Engine returning heartbeat value. */
+ u32 engine_idx;
+ /* Reserved */
+ u32 reserved_0;
+ /* Heartbeat value. */
+ u64 heartbeat;
+};
+
+struct vpu_ipc_msg_payload_get_power_level_count_done {
+ /**
+ * Number of supported power levels. The maximum possible
+ * value of power_level_count is 16 but this may vary across
+ * implementations.
+ */
+ u32 power_level_count;
+ /* Reserved */
+ u32 reserved_0;
+ /**
+ * Power consumption limit for each supported power level in
+ * [0-100%] range relative to power level 0.
+ */
+ u8 power_limit[16];
+};
+
+struct vpu_ipc_msg_payload_blob_deinit_done {
+ /* 64-bit unique ID for the blob de-initialized. */
+ u64 blob_id;
+};
+
+/* HWS priority band setup request / response */
+struct vpu_ipc_msg_payload_hws_priority_band_setup {
+ /*
+ * Grace period in 100ns units when preempting another priority band for
+ * this priority band
+ */
+ u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ /*
+ * Default quantum in 100ns units for scheduling across processes
+ * within a priority band
+ */
+ u64 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
+ /*
+ * Default grace period in 100ns units for processes that preempt each
+ * other within a priority band
+ */
+ u64 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
+ /*
+ * For normal priority band, specifies the target VPU percentage
+ * in situations when it's starved by the focus band.
+ */
+ u32 normal_band_percentage;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+/* HWS create command queue request */
+struct vpu_ipc_msg_payload_hws_create_cmdq {
+ /* Process id */
+ u64 process_id;
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved;
+ /* Command queue id */
+ u64 cmdq_id;
+ /* Command queue base */
+ u64 cmdq_base;
+ /* Command queue size */
+ u32 cmdq_size;
+ /* Reserved */
+ u32 reserved_0;
+};
+
+/* HWS create command queue response */
+struct vpu_ipc_msg_payload_hws_create_cmdq_rsp {
+ /* Process id */
+ u64 process_id;
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved;
+ /* Command queue id */
+ u64 cmdq_id;
+};
+
+/* HWS destroy command queue request / response */
+struct vpu_ipc_msg_payload_hws_destroy_cmdq {
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved;
+ /* Command queue id */
+ u64 cmdq_id;
+};
+
+/* HWS set context scheduling properties request / response */
+struct vpu_ipc_msg_payload_hws_set_context_sched_properties {
+ /* Host SSID */
+ u32 host_ssid;
+ /* Zero Padding */
+ u32 reserved_0;
+ /* Command queue id */
+ u64 cmdq_id;
+ /* Priority band to assign to work of this context */
+ u32 priority_band;
+ /* Inside realtime band assigns a further priority */
+ u32 realtime_priority_level;
+ /* Priority relative to other contexts in the same process */
+ u32 in_process_priority;
+ /* Zero padding / Reserved */
+ u32 reserved_1;
+ /* Context quantum relative to other contexts of same priority in the same process */
+ u64 context_quantum;
+ /* Grace period when preempting context of the same priority within the same process */
+ u64 grace_period_same_priority;
+ /* Grace period when preempting context of a lower priority within the same process */
+ u64 grace_period_lower_priority;
+};
+
+/*
+ * @brief Register doorbell command structure.
+ * This structure supports doorbell registration for both HW and OS scheduling.
+ * Note: Queue base and size are added here so that the same structure can be used for
+ * OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored
+ * and cmdq_base and cmdq_size will be used. For HW scheduling, cmdq_base and cmdq_size will be
+ * ignored and cmdq_id is used.
+ * @see VPU_JSM_MSG_HWS_REGISTER_DB
+ */
+struct vpu_jsm_hws_register_db {
+ /* Index of the doorbell to register. */
+ u32 db_id;
+ /* Host sub-stream ID for the context assigned to the doorbell. */
+ u32 host_ssid;
+ /* ID of the command queue associated with the doorbell. */
+ u64 cmdq_id;
+ /* Virtual address pointing to the start of command queue. */
+ u64 cmdq_base;
+ /* Size of the command queue in bytes. */
+ u64 cmdq_size;
+};
+
+/**
+ * Payload for VPU_JSM_MSG_TRACE_SET_CONFIG[_RSP] and
+ * VPU_JSM_MSG_TRACE_GET_CONFIG_RSP messages.
+ *
+ * The payload is interpreted differently depending on the type of message:
+ *
+ * - For VPU_JSM_MSG_TRACE_SET_CONFIG, the payload specifies the desired
+ * logging configuration to be set.
+ *
+ * - For VPU_JSM_MSG_TRACE_SET_CONFIG_RSP, the payload reports the logging
+ * configuration that was set after a VPU_JSM_MSG_TRACE_SET_CONFIG request.
+ * The host can compare this payload with the one it sent in the
+ * VPU_JSM_MSG_TRACE_SET_CONFIG request to check whether or not the
+ * configuration was set as desired.
+ *
+ * - VPU_JSM_MSG_TRACE_GET_CONFIG_RSP, the payload reports the current logging
+ * configuration.
+ */
+struct vpu_ipc_msg_payload_trace_config {
+ /**
+ * Logging level (currently set or to be set); see 'mvLog_t' enum for
+ * acceptable values. The specified logging level applies to all
+ * destinations and HW components
+ */
+ u32 trace_level;
+ /**
+ * Bitmask of logging destinations (currently enabled or to be enabled);
+ * bitwise OR of values defined in logging_destination enum.
+ */
+ u32 trace_destination_mask;
+ /**
+ * Bitmask of loggable HW components (currently enabled or to be enabled);
+ * bitwise OR of values defined in loggable_hw_component enum.
+ */
+ u64 trace_hw_component_mask;
+ u64 reserved_0; /**< Reserved for future extensions. */
+};
+
+/**
+ * Payload for VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP messages.
+ */
+struct vpu_ipc_msg_payload_trace_capability_rsp {
+ u32 trace_destination_mask; /**< Bitmask of supported logging destinations. */
+ u32 reserved_0;
+ u64 trace_hw_component_mask; /**< Bitmask of supported loggable HW components. */
+ u64 reserved_1; /**< Reserved for future extensions. */
+};
+
+/**
+ * Payload for VPU_JSM_MSG_TRACE_GET_NAME requests.
+ */
+struct vpu_ipc_msg_payload_trace_get_name {
+ /**
+ * The type of the entity to query name for; see logging_entity_type for
+ * possible values.
+ */
+ u32 entity_type;
+ u32 reserved_0;
+ /**
+ * The ID of the entity to query name for; possible values depends on the
+ * entity type.
+ */
+ u64 entity_id;
+};
+
+/**
+ * Payload for VPU_JSM_MSG_TRACE_GET_NAME_RSP responses.
+ */
+struct vpu_ipc_msg_payload_trace_get_name_rsp {
+ /**
+ * The type of the entity whose name was queried; see logging_entity_type
+ * for possible values.
+ */
+ u32 entity_type;
+ u32 reserved_0;
+ /**
+ * The ID of the entity whose name was queried; possible values depends on
+ * the entity type.
+ */
+ u64 entity_id;
+ /** Reserved for future extensions. */
+ u64 reserved_1;
+ /** The name of the entity. */
+ char entity_name[VPU_TRACE_ENTITY_NAME_MAX_LEN];
+};
+
+/**
+ * Data sent from the VPU to the host in all metric streamer response messages
+ * and in asynchronous notification.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_START_DONE
+ * @see VPU_JSM_MSG_METRIC_STREAMER_STOP_DONE
+ * @see VPU_JSM_MSG_METRIC_STREAMER_UPDATE_DONE
+ * @see VPU_JSM_MSG_METRIC_STREAMER_INFO_DONE
+ * @see VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION
+ */
+struct vpu_jsm_metric_streamer_done {
+ /** Metric group mask that identifies metric streamer instance. */
+ u64 metric_group_mask;
+ /**
+ * Size in bytes of single sample - total size of all enabled counters.
+ * Some VPU implementations may align sample_size to more than 8 bytes.
+ */
+ u32 sample_size;
+ u32 reserved_0;
+ /**
+ * Number of samples collected since the metric streamer was started.
+ * This will be 0 if the metric streamer was not started.
+ */
+ u32 samples_collected;
+ /**
+ * Number of samples dropped since the metric streamer was started. This
+ * is incremented every time the metric streamer is not able to write
+ * collected samples because the current buffer is full and there is no
+ * next buffer to switch to.
+ */
+ u32 samples_dropped;
+ /** Address of the buffer that contains the latest metric data. */
+ u64 buffer_addr;
+ /**
+ * Number of bytes written into the metric data buffer. In response to the
+ * VPU_JSM_MSG_METRIC_STREAMER_INFO request this field contains the size of
+ * all group and counter descriptors. The size is updated even if the buffer
+ * in the request was NULL or too small to hold descriptors of all counters
+ */
+ u64 bytes_written;
+};
+
+/**
+ * Metric group description placed in the metric buffer after successful completion
+ * of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more
+ * @vpu_jsm_metric_counter_descriptor records.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_INFO
+ */
+struct vpu_jsm_metric_group_descriptor {
+ /**
+ * Offset to the next metric group (8-byte aligned). If this offset is 0 this
+ * is the last descriptor. The value of metric_info_size must be greater than
+ * or equal to sizeof(struct vpu_jsm_metric_group_descriptor) + name_string_size
+ * + description_string_size and must be 8-byte aligned.
+ */
+ u32 next_metric_group_info_offset;
+ /**
+ * Offset to the first metric counter description record (8-byte aligned).
+ * @see vpu_jsm_metric_counter_descriptor
+ */
+ u32 next_metric_counter_info_offset;
+ /** Index of the group. This corresponds to bit index in metric_group_mask. */
+ u32 group_id;
+ /** Number of counters in the metric group. */
+ u32 num_counters;
+ /** Data size for all counters, must be a multiple of 8 bytes.*/
+ u32 metric_group_data_size;
+ /**
+ * Metric group domain number. Cannot use multiple, simultaneous metric groups
+ * from the same domain.
+ */
+ u32 domain;
+ /**
+ * Counter name string size. The string must include a null termination character.
+ * The FW may use a fixed size name or send a different name for each counter.
+ * If the VPU uses fixed size strings, all characters from the end of the name
+ * to the of the fixed size character array must be zeroed.
+ */
+ u32 name_string_size;
+ /** Counter description string size, @see name_string_size */
+ u32 description_string_size;
+ u64 reserved_0;
+ /**
+ * Right after this structure, the VPU writes name and description of
+ * the metric group.
+ */
+};
+
+/**
+ * Metric counter description, placed in the buffer after vpu_jsm_metric_group_descriptor.
+ * @see VPU_JSM_MSG_METRIC_STREAMER_INFO
+ */
+struct vpu_jsm_metric_counter_descriptor {
+ /**
+ * Offset to the next counter in a group (8-byte aligned). If this offset is
+ * 0 this is the last counter in the group.
+ */
+ u32 next_metric_counter_info_offset;
+ /**
+ * Offset to the counter data from the start of samples in this metric group.
+ * Note that metric_data_offset % metric_data_size must be 0.
+ */
+ u32 metric_data_offset;
+ /** Size of the metric counter data in bytes. */
+ u32 metric_data_size;
+ /** Metric type, see Level Zero API for definitions. */
+ u32 tier;
+ /** Metric type, see set_metric_type_t for definitions. */
+ u32 metric_type;
+ /** Metric type, see set_value_type_t for definitions. */
+ u32 metric_value_type;
+ /**
+ * Counter name string size. The string must include a null termination character.
+ * The FW may use a fixed size name or send a different name for each counter.
+ * If the VPU uses fixed size strings, all characters from the end of the name
+ * to the of the fixed size character array must be zeroed.
+ */
+ u32 name_string_size;
+ /** Counter description string size, @see name_string_size */
+ u32 description_string_size;
+ /** Counter component name string size, @see name_string_size */
+ u32 component_string_size;
+ /** Counter string size, @see name_string_size */
+ u32 units_string_size;
+ u64 reserved_0;
+ /**
+ * Right after this structure, the VPU writes name, description
+ * component and unit strings.
+ */
+};
+
+/**
+ * Payload for VPU_JSM_MSG_DYNDBG_CONTROL requests.
+ *
+ * VPU_JSM_MSG_DYNDBG_CONTROL are used to control the VPU FW Dynamic Debug
+ * feature, which allows developers to selectively enable / disable MVLOG_DEBUG
+ * messages. This is equivalent to the Dynamic Debug functionality provided by
+ * Linux
+ * (https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html)
+ * The host can control Dynamic Debug behavior by sending dyndbg commands, which
+ * have the same syntax as Linux
+ * dyndbg commands.
+ *
+ * NOTE: in order for MVLOG_DEBUG messages to be actually printed, the host
+ * still has to set the logging level to MVLOG_DEBUG, using the
+ * VPU_JSM_MSG_TRACE_SET_CONFIG command.
+ *
+ * The host can see the current dynamic debug configuration by executing a
+ * special 'show' command. The dyndbg configuration will be printed to the
+ * configured logging destination using MVLOG_INFO logging level.
+ */
+struct vpu_ipc_msg_payload_dyndbg_control {
+ /**
+ * Dyndbg command (same format as Linux dyndbg); must be a NULL-terminated
+ * string.
+ */
+ char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN];
+};
+
+/*
+ * Payloads union, used to define complete message format.
+ */
+union vpu_ipc_msg_payload {
+ struct vpu_ipc_msg_payload_engine_reset engine_reset;
+ struct vpu_ipc_msg_payload_engine_preempt engine_preempt;
+ struct vpu_ipc_msg_payload_register_db register_db;
+ struct vpu_ipc_msg_payload_unregister_db unregister_db;
+ struct vpu_ipc_msg_payload_query_engine_hb query_engine_hb;
+ struct vpu_ipc_msg_payload_power_level power_level;
+ struct vpu_jsm_metric_streamer_start metric_streamer_start;
+ struct vpu_jsm_metric_streamer_stop metric_streamer_stop;
+ struct vpu_jsm_metric_streamer_update metric_streamer_update;
+ struct vpu_ipc_msg_payload_blob_deinit blob_deinit;
+ struct vpu_ipc_msg_payload_ssid_release ssid_release;
+ struct vpu_jsm_hws_register_db hws_register_db;
+ struct vpu_ipc_msg_payload_job_done job_done;
+ struct vpu_ipc_msg_payload_engine_reset_done engine_reset_done;
+ struct vpu_ipc_msg_payload_engine_preempt_done engine_preempt_done;
+ struct vpu_ipc_msg_payload_register_db_done register_db_done;
+ struct vpu_ipc_msg_payload_unregister_db_done unregister_db_done;
+ struct vpu_ipc_msg_payload_query_engine_hb_done query_engine_hb_done;
+ struct vpu_ipc_msg_payload_get_power_level_count_done get_power_level_count_done;
+ struct vpu_jsm_metric_streamer_done metric_streamer_done;
+ struct vpu_ipc_msg_payload_blob_deinit_done blob_deinit_done;
+ struct vpu_ipc_msg_payload_trace_config trace_config;
+ struct vpu_ipc_msg_payload_trace_capability_rsp trace_capability;
+ struct vpu_ipc_msg_payload_trace_get_name trace_get_name;
+ struct vpu_ipc_msg_payload_trace_get_name_rsp trace_get_name_rsp;
+ struct vpu_ipc_msg_payload_dyndbg_control dyndbg_control;
+ struct vpu_ipc_msg_payload_hws_priority_band_setup hws_priority_band_setup;
+ struct vpu_ipc_msg_payload_hws_create_cmdq hws_create_cmdq;
+ struct vpu_ipc_msg_payload_hws_create_cmdq_rsp hws_create_cmdq_rsp;
+ struct vpu_ipc_msg_payload_hws_destroy_cmdq hws_destroy_cmdq;
+ struct vpu_ipc_msg_payload_hws_set_context_sched_properties
+ hws_set_context_sched_properties;
+};
+
+/*
+ * Host <-> LRT IPC message base structure.
+ *
+ * NOTE: All instances of this object must be aligned on a 64B boundary
+ * to allow proper handling of VPU cache operations.
+ */
+struct vpu_jsm_msg {
+ /* Reserved */
+ u64 reserved_0;
+ /* Message type, see vpu_ipc_msg_type enum. */
+ u32 type;
+ /* Buffer status, see vpu_ipc_msg_status enum. */
+ u32 status;
+ /*
+ * Request ID, provided by the host in a request message and passed
+ * back by VPU in the response message.
+ */
+ u32 request_id;
+ /* Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */
+ u32 result;
+ u64 reserved_1;
+ /* Message payload depending on message type, see vpu_ipc_msg_payload union. */
+ union vpu_ipc_msg_payload payload;
+};
+
+#pragma pack(pop)
+
+#endif
+
+///@}
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c
index 4733fd6334ab..56c073103cbb 100644
--- a/drivers/accessibility/speakup/main.c
+++ b/drivers/accessibility/speakup/main.c
@@ -2490,7 +2490,7 @@ MODULE_PARM_DESC(punc_level, "Controls the level of punctuation spoken as the sc
MODULE_PARM_DESC(reading_punc, "It controls the level of punctuation when reviewing the screen with speakup's screen review commands.");
MODULE_PARM_DESC(cursor_time, "This controls cursor delay when using arrow keys.");
MODULE_PARM_DESC(say_control, "This controls if speakup speaks shift, alt and control when those keys are pressed or not.");
-MODULE_PARM_DESC(say_word_ctl, "Sets thw say_word_ctl on load.");
+MODULE_PARM_DESC(say_word_ctl, "Sets the say_word_ctl on load.");
MODULE_PARM_DESC(no_interrupt, "Controls if typing interrupts output from speakup.");
MODULE_PARM_DESC(key_echo, "Controls if speakup speaks keys when they are typed. One = on zero = off or don't echo keys.");
MODULE_PARM_DESC(cur_phonetic, "Controls if speakup speaks letters phonetically during navigation. One = on zero = off or don't speak phonetically.");
diff --git a/drivers/acpi/acpi_lpit.c b/drivers/acpi/acpi_lpit.c
index 50540d4d4948..3843d2576d3f 100644
--- a/drivers/acpi/acpi_lpit.c
+++ b/drivers/acpi/acpi_lpit.c
@@ -10,6 +10,7 @@
#include <linux/acpi.h>
#include <asm/msr.h>
#include <asm/tsc.h>
+#include "internal.h"
struct lpit_residency_info {
struct acpi_generic_address gaddr;
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index ffdcfcd4a10d..01abf26764b0 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -348,10 +348,22 @@ static bool acpi_pnp_match(const char *idstr, const struct acpi_device_id **matc
return false;
}
+/*
+ * If one of the device IDs below is present in the list of device IDs of a
+ * given ACPI device object, the PNP scan handler will not attach to that
+ * object, because there is a proper non-PNP driver in the kernel for the
+ * device represented by it.
+ */
+static const struct acpi_device_id acpi_nonpnp_device_ids[] = {
+ {"INTC1080"},
+ {"INTC1081"},
+ {""},
+};
+
static int acpi_pnp_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
- return 1;
+ return !!acpi_match_device_ids(adev, acpi_nonpnp_device_ids);
}
static struct acpi_scan_handler acpi_pnp_handler = {
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 9e0d95d76fff..30f3fc13c29d 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -3,7 +3,7 @@
# Makefile for ACPICA Core interpreter
#
-ccflags-y := -Os -D_LINUX -DBUILDING_ACPICA
+ccflags-y := -D_LINUX -DBUILDING_ACPICA
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 915b26448d2c..0d392e7b0747 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -23,8 +23,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width);
*
* The table is used to implement the Microsoft port access rules that
* first appeared in Windows XP. Some ports are always illegal, and some
- * ports are only illegal if the BIOS calls _OSI with a win_XP string or
- * later (meaning that the BIOS itelf is post-XP.)
+ * ports are only illegal if the BIOS calls _OSI with nothing newer than
+ * the specific _OSI strings.
*
* This provides ACPICA with the desired port protections and
* Microsoft compatibility.
@@ -145,7 +145,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Port illegality may depend on the _OSI calls made by the BIOS */
- if (acpi_gbl_osi_data >= port_info->osi_dependency) {
+ if (port_info->osi_dependency == ACPI_ALWAYS_ILLEGAL ||
+ acpi_gbl_osi_data == port_info->osi_dependency) {
ACPI_DEBUG_PRINT((ACPI_DB_VALUES,
"Denied AML access to port 0x%8.8X%8.8X/%X (%s 0x%.4X-0x%.4X)\n",
ACPI_FORMAT_UINT64(address),
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index 367fcd201f96..ec512e06a48e 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -181,8 +181,9 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
* Try to fix if there was no return object. Warning if failed to fix.
*/
if (!return_object) {
- if (expected_btypes && (!(expected_btypes & ACPI_RTYPE_NONE))) {
- if (package_index != ACPI_NOT_PACKAGE_ELEMENT) {
+ if (expected_btypes) {
+ if (!(expected_btypes & ACPI_RTYPE_NONE) &&
+ package_index != ACPI_NOT_PACKAGE_ELEMENT) {
ACPI_WARN_PREDEFINED((AE_INFO,
info->full_pathname,
ACPI_WARN_ALWAYS,
@@ -196,14 +197,15 @@ acpi_ns_simple_repair(struct acpi_evaluate_info *info,
if (ACPI_SUCCESS(status)) {
return (AE_OK); /* Repair was successful */
}
- } else {
+ }
+
+ if (expected_btypes != ACPI_RTYPE_NONE) {
ACPI_WARN_PREDEFINED((AE_INFO,
info->full_pathname,
ACPI_WARN_ALWAYS,
"Missing expected return value"));
+ return (AE_AML_NO_RETURN_VALUE);
}
-
- return (AE_AML_NO_RETURN_VALUE);
}
}
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index b2cfdfef3194..a0592d15dd37 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -44,7 +44,7 @@ static char *acpi_ns_copy_device_id(struct acpi_pnp_device_id *dest,
acpi_status
acpi_get_handle(acpi_handle parent,
- acpi_string pathname, acpi_handle *ret_handle)
+ const char *pathname, acpi_handle *ret_handle)
{
acpi_status status;
struct acpi_namespace_node *node = NULL;
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index ab86b2f4e719..b4373e575660 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -616,6 +616,10 @@ static int error_type_set(void *data, u64 val)
u32 available_error_type = 0;
u32 tval, vendor;
+ /* Only low 32 bits for error type are valid */
+ if (val & GENMASK_ULL(63, 32))
+ return -EINVAL;
+
/*
* Vendor defined types have 0x80000000 bit set, and
* are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index f4badcdde76e..9c67ed02d797 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -42,6 +42,8 @@
#define ACPI_BATTERY_STATE_CHARGING 0x2
#define ACPI_BATTERY_STATE_CRITICAL 0x4
+#define MAX_STRING_LENGTH 64
+
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_AUTHOR("Alexey Starikovskiy <astarikovskiy@suse.de>");
MODULE_DESCRIPTION("ACPI Battery Driver");
@@ -118,10 +120,10 @@ struct acpi_battery {
int capacity_granularity_1;
int capacity_granularity_2;
int alarm;
- char model_number[32];
- char serial_number[32];
- char type[32];
- char oem_info[32];
+ char model_number[MAX_STRING_LENGTH];
+ char serial_number[MAX_STRING_LENGTH];
+ char type[MAX_STRING_LENGTH];
+ char oem_info[MAX_STRING_LENGTH];
int state;
int power_unit;
unsigned long flags;
@@ -437,16 +439,25 @@ static int extract_package(struct acpi_battery *battery,
element = &package->package.elements[i];
if (offsets[i].mode) {
u8 *ptr = (u8 *)battery + offsets[i].offset;
+ u32 len = MAX_STRING_LENGTH;
+
+ switch (element->type) {
+ case ACPI_TYPE_BUFFER:
+ if (len > element->buffer.length + 1)
+ len = element->buffer.length + 1;
+
+ fallthrough;
+ case ACPI_TYPE_STRING:
+ strscpy(ptr, element->string.pointer, len);
- if (element->type == ACPI_TYPE_STRING ||
- element->type == ACPI_TYPE_BUFFER)
- strncpy(ptr, element->string.pointer, 32);
- else if (element->type == ACPI_TYPE_INTEGER) {
- strncpy(ptr, (u8 *)&element->integer.value,
- sizeof(u64));
- ptr[sizeof(u64)] = 0;
- } else
+ break;
+ case ACPI_TYPE_INTEGER:
+ strscpy(ptr, (u8 *)&element->integer.value, sizeof(u64) + 1);
+
+ break;
+ default:
*ptr = 0; /* don't have value */
+ }
} else {
int *x = (int *)((u8 *)battery + offsets[i].offset);
*x = (element->type == ACPI_TYPE_INTEGER) ?
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 0c05ccde1f7a..9531dd0fef50 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1014,7 +1014,7 @@ static int acpi_bus_match(struct device *dev, struct device_driver *drv)
&& !acpi_match_device_ids(acpi_dev, acpi_drv->ids);
}
-static int acpi_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int acpi_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
return __acpi_device_uevent_modalias(to_acpi_device(dev), env);
}
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 0f17b1c32718..c51d3ccb4cca 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -193,7 +193,7 @@ static struct attribute *cppc_attrs[] = {
};
ATTRIBUTE_GROUPS(cppc);
-static struct kobj_type cppc_ktype = {
+static const struct kobj_type cppc_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = cppc_groups,
};
@@ -595,6 +595,7 @@ bool __weak cpc_supported_by_cpu(void)
/**
* pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
+ * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
*
* Check and allocate the cppc_pcc_data memory.
* In some processor configurations it is possible that same subspace
@@ -1154,6 +1155,19 @@ int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
}
/**
+ * cppc_get_epp_perf - Get the epp register value.
+ * @cpunum: CPU from which to get epp preference value.
+ * @epp_perf: Return address.
+ *
+ * Return: 0 for success, -EIO otherwise.
+ */
+int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
+{
+ return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
+}
+EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
+
+/**
* cppc_get_perf_caps - Get a CPU's performance capabilities.
* @cpunum: CPU from which to get capabilities info.
* @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
@@ -1365,6 +1379,60 @@ out_err:
}
EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
+/*
+ * Set Energy Performance Preference Register value through
+ * Performance Controls Interface
+ */
+int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
+{
+ int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
+ struct cpc_register_resource *epp_set_reg;
+ struct cpc_register_resource *auto_sel_reg;
+ struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
+ struct cppc_pcc_data *pcc_ss_data = NULL;
+ int ret;
+
+ if (!cpc_desc) {
+ pr_debug("No CPC descriptor for CPU:%d\n", cpu);
+ return -ENODEV;
+ }
+
+ auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
+ epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
+
+ if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
+ if (pcc_ss_id < 0) {
+ pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
+ return -ENODEV;
+ }
+
+ if (CPC_SUPPORTED(auto_sel_reg)) {
+ ret = cpc_write(cpu, auto_sel_reg, enable);
+ if (ret)
+ return ret;
+ }
+
+ if (CPC_SUPPORTED(epp_set_reg)) {
+ ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
+ if (ret)
+ return ret;
+ }
+
+ pcc_ss_data = pcc_data[pcc_ss_id];
+
+ down_write(&pcc_ss_data->pcc_lock);
+ /* after writing CPC, transfer the ownership of PCC to platform */
+ ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
+ up_write(&pcc_ss_data->pcc_lock);
+ } else {
+ ret = -ENOTSUPP;
+ pr_debug("_CPC in PCC is not supported\n");
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
+
/**
* cppc_set_enable - Set to enable CPPC on the processor by writing the
* Continuous Performance Control package EnableRegister field.
@@ -1536,6 +1604,7 @@ EXPORT_SYMBOL_GPL(cppc_set_perf);
/**
* cppc_get_transition_latency - returns frequency transition latency in ns
+ * @cpu_num: CPU number for per_cpu().
*
* ACPI CPPC does not explicitly specify how a platform can specify the
* transition latency for performance change requests. The closest we have
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 97450f4003cc..f007116a8427 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -484,6 +484,25 @@ void acpi_dev_power_up_children_with_adr(struct acpi_device *adev)
acpi_dev_for_each_child(adev, acpi_power_up_if_adr_present, NULL);
}
+/**
+ * acpi_dev_power_state_for_wake - Deepest power state for wakeup signaling
+ * @adev: ACPI companion of the target device.
+ *
+ * Evaluate _S0W for @adev and return the value produced by it or return
+ * ACPI_STATE_UNKNOWN on errors (including _S0W not present).
+ */
+u8 acpi_dev_power_state_for_wake(struct acpi_device *adev)
+{
+ unsigned long long state;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
+ if (ACPI_FAILURE(status))
+ return ACPI_STATE_UNKNOWN;
+
+ return state;
+}
+
#ifdef CONFIG_PM
static DEFINE_MUTEX(acpi_pm_notifier_lock);
static DEFINE_MUTEX(acpi_pm_notifier_install_lock);
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 120873dad2cc..0fbfbaa8d8e3 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -78,7 +78,7 @@ static void acpi_data_node_release(struct kobject *kobj)
complete(&dn->kobj_done);
}
-static struct kobj_type acpi_data_node_ktype = {
+static const struct kobj_type acpi_data_node_ktype = {
.sysfs_ops = &acpi_data_node_sysfs_ops,
.default_groups = acpi_data_node_default_groups,
.release = acpi_data_node_release,
@@ -133,7 +133,7 @@ static void acpi_hide_nondev_subnodes(struct acpi_device_data *data)
* -EINVAL: output error
* -ENOMEM: output is truncated
*/
-static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
+static int create_pnp_modalias(const struct acpi_device *acpi_dev, char *modalias,
int size)
{
int len;
@@ -191,7 +191,7 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
* only be called for devices having ACPI_DT_NAMESPACE_HID in their list of
* ACPI/PNP IDs.
*/
-static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
+static int create_of_modalias(const struct acpi_device *acpi_dev, char *modalias,
int size)
{
struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
@@ -239,7 +239,7 @@ static int create_of_modalias(struct acpi_device *acpi_dev, char *modalias,
return len;
}
-int __acpi_device_uevent_modalias(struct acpi_device *adev,
+int __acpi_device_uevent_modalias(const struct acpi_device *adev,
struct kobj_uevent_env *env)
{
int len;
@@ -277,7 +277,7 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev,
* Because other buses do not support ACPI HIDs & CIDs, e.g. for a device with
* hid:IBM0001 and cid:ACPI0001 you get: "acpi:IBM0001:ACPI0001".
*/
-int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
+int acpi_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
{
return __acpi_device_uevent_modalias(acpi_companion_match(dev), env);
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index ec584442fb29..06ad497067ac 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -120,7 +120,7 @@ int acpi_bus_register_early_device(int type);
Device Matching and Notification
-------------------------------------------------------------------------- */
struct acpi_device *acpi_companion_match(const struct device *dev);
-int __acpi_device_uevent_modalias(struct acpi_device *adev,
+int __acpi_device_uevent_modalias(const struct acpi_device *adev,
struct kobj_uevent_env *env);
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index a690c7b18623..6677955b4a8e 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -24,6 +24,7 @@
#include <linux/acpi.h>
#include <linux/pci.h>
#include <acpi/acpi.h>
+#include "internal.h"
struct acpi_pci_ioapic {
acpi_handle root_handle;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index f1cc5ec6a3b6..4e48d6db05eb 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3297,8 +3297,8 @@ void acpi_nfit_shutdown(void *data)
mutex_lock(&acpi_desc->init_mutex);
set_bit(ARS_CANCEL, &acpi_desc->scrub_flags);
- cancel_delayed_work_sync(&acpi_desc->dwork);
mutex_unlock(&acpi_desc->init_mutex);
+ cancel_delayed_work_sync(&acpi_desc->dwork);
/*
* Bounce the nvdimm bus lock to make sure any in-flight
diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c
index 27fb6cdad75f..843f678ade0c 100644
--- a/drivers/acpi/pfr_telemetry.c
+++ b/drivers/acpi/pfr_telemetry.c
@@ -310,7 +310,7 @@ pfrt_log_mmap(struct file *file, struct vm_area_struct *vma)
return -EROFS;
/* changing from read to write with mprotect is not allowed */
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
pfrt_log_dev = to_pfrt_log_dev(file);
diff --git a/drivers/acpi/pmic/intel_pmic_bytcrc.c b/drivers/acpi/pmic/intel_pmic_bytcrc.c
index 9ea79f210965..2b09f8da5400 100644
--- a/drivers/acpi/pmic/intel_pmic_bytcrc.c
+++ b/drivers/acpi/pmic/intel_pmic_bytcrc.c
@@ -283,6 +283,7 @@ static const struct intel_pmic_opregion_data intel_crc_pmic_opregion_data = {
.power_table_count= ARRAY_SIZE(power_table),
.thermal_table = thermal_table,
.thermal_table_count = ARRAY_SIZE(thermal_table),
+ .pmic_i2c_address = 0x6e,
};
static int intel_crc_pmic_opregion_probe(struct platform_device *pdev)
diff --git a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
index 418eec523025..c84ef3d15181 100644
--- a/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
+++ b/drivers/acpi/pmic/intel_pmic_chtdc_ti.c
@@ -20,19 +20,19 @@
#define CHTDC_TI_GPADC 0x5a
static struct pmic_table chtdc_ti_power_table[] = {
- { .address = 0x00, .reg = 0x41 },
- { .address = 0x04, .reg = 0x42 },
- { .address = 0x08, .reg = 0x43 },
- { .address = 0x0c, .reg = 0x45 },
- { .address = 0x10, .reg = 0x46 },
- { .address = 0x14, .reg = 0x47 },
- { .address = 0x18, .reg = 0x48 },
- { .address = 0x1c, .reg = 0x49 },
- { .address = 0x20, .reg = 0x4a },
- { .address = 0x24, .reg = 0x4b },
- { .address = 0x28, .reg = 0x4c },
- { .address = 0x2c, .reg = 0x4d },
- { .address = 0x30, .reg = 0x4e },
+ { .address = 0x00, .reg = 0x41 }, /* LDO1 */
+ { .address = 0x04, .reg = 0x42 }, /* LDO2 */
+ { .address = 0x08, .reg = 0x43 }, /* LDO3 */
+ { .address = 0x0c, .reg = 0x45 }, /* LDO5 */
+ { .address = 0x10, .reg = 0x46 }, /* LDO6 */
+ { .address = 0x14, .reg = 0x47 }, /* LDO7 */
+ { .address = 0x18, .reg = 0x48 }, /* LDO8 */
+ { .address = 0x1c, .reg = 0x49 }, /* LDO9 */
+ { .address = 0x20, .reg = 0x4a }, /* LD10 */
+ { .address = 0x24, .reg = 0x4b }, /* LD11 */
+ { .address = 0x28, .reg = 0x4c }, /* LD12 */
+ { .address = 0x2c, .reg = 0x4d }, /* LD13 */
+ { .address = 0x30, .reg = 0x4e }, /* LD14 */
};
static struct pmic_table chtdc_ti_thermal_table[] = {
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index c91342dcbcd6..10975bb603fb 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -81,6 +81,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type)
* acpi_pptt_walk_cache() - Attempt to find the requested acpi_pptt_cache
* @table_hdr: Pointer to the head of the PPTT table
* @local_level: passed res reflects this cache level
+ * @split_levels: Number of split cache levels (data/instruction).
* @res: cache resource in the PPTT we want to walk
* @found: returns a pointer to the requested level if found
* @level: the requested cache level
@@ -100,6 +101,7 @@ static inline bool acpi_pptt_match_type(int table_type, int type)
*/
static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
unsigned int local_level,
+ unsigned int *split_levels,
struct acpi_subtable_header *res,
struct acpi_pptt_cache **found,
unsigned int level, int type)
@@ -113,8 +115,17 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
while (cache) {
local_level++;
+ if (!(cache->flags & ACPI_PPTT_CACHE_TYPE_VALID)) {
+ cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache);
+ continue;
+ }
+
+ if (split_levels &&
+ (acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_DATA) ||
+ acpi_pptt_match_type(cache->attributes, ACPI_PPTT_CACHE_TYPE_INSTR)))
+ *split_levels = local_level;
+
if (local_level == level &&
- cache->flags & ACPI_PPTT_CACHE_TYPE_VALID &&
acpi_pptt_match_type(cache->attributes, type)) {
if (*found != NULL && cache != *found)
pr_warn("Found duplicate cache level/type unable to determine uniqueness\n");
@@ -135,8 +146,8 @@ static unsigned int acpi_pptt_walk_cache(struct acpi_table_header *table_hdr,
static struct acpi_pptt_cache *
acpi_find_cache_level(struct acpi_table_header *table_hdr,
struct acpi_pptt_processor *cpu_node,
- unsigned int *starting_level, unsigned int level,
- int type)
+ unsigned int *starting_level, unsigned int *split_levels,
+ unsigned int level, int type)
{
struct acpi_subtable_header *res;
unsigned int number_of_levels = *starting_level;
@@ -149,7 +160,8 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
resource++;
local_level = acpi_pptt_walk_cache(table_hdr, *starting_level,
- res, &ret, level, type);
+ split_levels, res, &ret,
+ level, type);
/*
* we are looking for the max depth. Since its potentially
* possible for a given node to have resources with differing
@@ -165,29 +177,29 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr,
}
/**
- * acpi_count_levels() - Given a PPTT table, and a CPU node, count the caches
+ * acpi_count_levels() - Given a PPTT table, and a CPU node, count the cache
+ * levels and split cache levels (data/instruction).
* @table_hdr: Pointer to the head of the PPTT table
* @cpu_node: processor node we wish to count caches for
+ * @levels: Number of levels if success.
+ * @split_levels: Number of split cache levels (data/instruction) if
+ * success. Can by NULL.
*
* Given a processor node containing a processing unit, walk into it and count
* how many levels exist solely for it, and then walk up each level until we hit
* the root node (ignore the package level because it may be possible to have
- * caches that exist across packages). Count the number of cache levels that
- * exist at each level on the way up.
- *
- * Return: Total number of levels found.
+ * caches that exist across packages). Count the number of cache levels and
+ * split cache levels (data/instruction) that exist at each level on the way
+ * up.
*/
-static int acpi_count_levels(struct acpi_table_header *table_hdr,
- struct acpi_pptt_processor *cpu_node)
+static void acpi_count_levels(struct acpi_table_header *table_hdr,
+ struct acpi_pptt_processor *cpu_node,
+ unsigned int *levels, unsigned int *split_levels)
{
- int total_levels = 0;
-
do {
- acpi_find_cache_level(table_hdr, cpu_node, &total_levels, 0, 0);
+ acpi_find_cache_level(table_hdr, cpu_node, levels, split_levels, 0, 0);
cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
} while (cpu_node);
-
- return total_levels;
}
/**
@@ -281,19 +293,6 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he
return NULL;
}
-static int acpi_find_cache_levels(struct acpi_table_header *table_hdr,
- u32 acpi_cpu_id)
-{
- int number_of_levels = 0;
- struct acpi_pptt_processor *cpu;
-
- cpu = acpi_find_processor_node(table_hdr, acpi_cpu_id);
- if (cpu)
- number_of_levels = acpi_count_levels(table_hdr, cpu);
-
- return number_of_levels;
-}
-
static u8 acpi_cache_type(enum cache_type type)
{
switch (type) {
@@ -334,7 +333,7 @@ static struct acpi_pptt_cache *acpi_find_cache_node(struct acpi_table_header *ta
while (cpu_node && !found) {
found = acpi_find_cache_level(table_hdr, cpu_node,
- &total_levels, level, acpi_type);
+ &total_levels, NULL, level, acpi_type);
*node = cpu_node;
cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent);
}
@@ -602,32 +601,48 @@ static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag)
}
/**
- * acpi_find_last_cache_level() - Determines the number of cache levels for a PE
+ * acpi_get_cache_info() - Determine the number of cache levels and
+ * split cache levels (data/instruction) and for a PE.
* @cpu: Kernel logical CPU number
+ * @levels: Number of levels if success.
+ * @split_levels: Number of levels being split (i.e. data/instruction)
+ * if success. Can by NULL.
*
* Given a logical CPU number, returns the number of levels of cache represented
* in the PPTT. Errors caused by lack of a PPTT table, or otherwise, return 0
* indicating we didn't find any cache levels.
*
- * Return: Cache levels visible to this core.
+ * Return: -ENOENT if no PPTT table or no PPTT processor struct found.
+ * 0 on success.
*/
-int acpi_find_last_cache_level(unsigned int cpu)
+int acpi_get_cache_info(unsigned int cpu, unsigned int *levels,
+ unsigned int *split_levels)
{
- u32 acpi_cpu_id;
+ struct acpi_pptt_processor *cpu_node;
struct acpi_table_header *table;
- int number_of_levels = 0;
+ u32 acpi_cpu_id;
+
+ *levels = 0;
+ if (split_levels)
+ *split_levels = 0;
table = acpi_get_pptt();
if (!table)
return -ENOENT;
- pr_debug("Cache Setup find last level CPU=%d\n", cpu);
+ pr_debug("Cache Setup: find cache levels for CPU=%d\n", cpu);
acpi_cpu_id = get_acpi_id_for_cpu(cpu);
- number_of_levels = acpi_find_cache_levels(table, acpi_cpu_id);
- pr_debug("Cache Setup find last level level=%d\n", number_of_levels);
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (!cpu_node)
+ return -ENOENT;
+
+ acpi_count_levels(table, cpu_node, levels, split_levels);
- return number_of_levels;
+ pr_debug("Cache Setup: last_level=%d split_levels=%d\n",
+ *levels, split_levels ? *split_levels : -1);
+
+ return 0;
}
/**
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 7bf882fcd64b..9718d07cc2a2 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -109,8 +109,8 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
static void __cpuidle acpi_safe_halt(void)
{
if (!tif_need_resched()) {
- safe_halt();
- local_irq_disable();
+ raw_safe_halt();
+ raw_local_irq_disable();
}
}
@@ -147,7 +147,7 @@ static void lapic_timer_check_state(int state, struct acpi_processor *pr,
static void __lapic_timer_propagate_broadcast(void *arg)
{
- struct acpi_processor *pr = (struct acpi_processor *) arg;
+ struct acpi_processor *pr = arg;
if (pr->power.timer_broadcast_on_state < INT_MAX)
tick_broadcast_enable();
@@ -523,8 +523,11 @@ static int acpi_idle_bm_check(void)
return bm_status;
}
-static void wait_for_freeze(void)
+static __cpuidle void io_idle(unsigned long addr)
{
+ /* IO port based C-state */
+ inb(addr);
+
#ifdef CONFIG_X86
/* No delay is needed if we are in guest */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
@@ -569,9 +572,7 @@ static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
} else if (cx->entry_method == ACPI_CSTATE_HALT) {
acpi_safe_halt();
} else {
- /* IO port based C-state */
- inb(cx->address);
- wait_for_freeze();
+ io_idle(cx->address);
}
perf_lopwr_cb(false);
@@ -593,8 +594,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
if (cx->entry_method == ACPI_CSTATE_HALT)
safe_halt();
else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
- inb(cx->address);
- wait_for_freeze();
+ io_idle(cx->address);
} else
return -ENODEV;
@@ -607,7 +607,7 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
return 0;
}
-static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
+static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
{
return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
@@ -642,6 +642,8 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
*/
bool dis_bm = pr->flags.bm_control;
+ instrumentation_begin();
+
/* If we can skip BM, demote to a safe state. */
if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
dis_bm = false;
@@ -663,11 +665,11 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
raw_spin_unlock(&c3_lock);
}
- ct_idle_enter();
+ ct_cpuidle_enter();
acpi_idle_do_entry(cx);
- ct_idle_exit();
+ ct_cpuidle_exit();
/* Re-enable bus master arbitration */
if (dis_bm) {
@@ -677,6 +679,8 @@ static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
raw_spin_unlock(&c3_lock);
}
+ instrumentation_end();
+
return index;
}
@@ -1219,6 +1223,8 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
state->target_residency = lpi->min_residency;
if (lpi->arch_flags)
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH)
+ state->flags |= CPUIDLE_FLAG_RCU_IDLE;
state->enter = acpi_idle_lpi_enter;
drv->safe_state_index = i;
}
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 970f04a958cd..4265814c74f8 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -53,6 +53,8 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
{
acpi_status status = 0;
unsigned long long ppc = 0;
+ s32 qos_value;
+ int index;
int ret;
if (!pr)
@@ -72,17 +74,30 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
}
}
+ index = ppc;
+
+ if (pr->performance_platform_limit == index ||
+ ppc >= pr->performance->state_count)
+ return 0;
+
pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id,
- (int)ppc, ppc ? "" : "not");
+ index, index ? "is" : "is not");
- pr->performance_platform_limit = (int)ppc;
+ pr->performance_platform_limit = index;
- if (ppc >= pr->performance->state_count ||
- unlikely(!freq_qos_request_active(&pr->perflib_req)))
+ if (unlikely(!freq_qos_request_active(&pr->perflib_req)))
return 0;
- ret = freq_qos_update_request(&pr->perflib_req,
- pr->performance->states[ppc].core_frequency * 1000);
+ /*
+ * If _PPC returns 0, it means that all of the available states can be
+ * used ("no limit").
+ */
+ if (index == 0)
+ qos_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+ else
+ qos_value = pr->performance->states[index].core_frequency * 1000;
+
+ ret = freq_qos_update_request(&pr->perflib_req, qos_value);
if (ret < 0) {
pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
pr->id, ret);
@@ -166,9 +181,16 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy)
if (!pr)
continue;
+ /*
+ * Reset performance_platform_limit in case there is a stale
+ * value in it, so as to make it match the "no limit" QoS value
+ * below.
+ */
+ pr->performance_platform_limit = 0;
+
ret = freq_qos_add_request(&policy->constraints,
- &pr->perflib_req,
- FREQ_QOS_MAX, INT_MAX);
+ &pr->perflib_req, FREQ_QOS_MAX,
+ FREQ_QOS_MAX_DEFAULT_VALUE);
if (ret < 0)
pr_err("Failed to add freq constraint for CPU%d (%d)\n",
cpu, ret);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 192d1784e409..a222bda7e15b 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -467,17 +467,34 @@ static const struct dmi_system_id lenovo_laptop[] = {
{ }
};
-static const struct dmi_system_id schenker_gm_rg[] = {
+static const struct dmi_system_id tongfang_gm_rg[] = {
{
- .ident = "XMG CORE 15 (M22)",
+ .ident = "TongFang GMxRGxx/XMG CORE 15 (M22)/TUXEDO Stellaris 15 Gen4 AMD",
.matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
},
},
{ }
};
+static const struct dmi_system_id maingear_laptop[] = {
+ {
+ .ident = "MAINGEAR Vector Pro 2 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-15A3070T"),
+ }
+ },
+ {
+ .ident = "MAINGEAR Vector Pro 2 17",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Micro Electronics Inc"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MG-VCP2-17A3070T"),
+ },
+ },
+ { }
+};
+
struct irq_override_cmp {
const struct dmi_system_id *system;
unsigned char irq;
@@ -492,7 +509,8 @@ static const struct irq_override_cmp override_table[] = {
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
{ lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
- { schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+ { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+ { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
};
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 7db3b530279b..7f4ff56c9d42 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -953,7 +953,7 @@ static struct attribute *hotplug_profile_attrs[] = {
};
ATTRIBUTE_GROUPS(hotplug_profile);
-static struct kobj_type acpi_hotplug_profile_ktype = {
+static const struct kobj_type acpi_hotplug_profile_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = hotplug_profile_groups,
};
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 5fbc32b802d0..7b4680da57d7 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -555,7 +555,8 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = {
ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT,
ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT,
ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT,
- ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI };
+ ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI,
+ ACPI_SIG_NBFT };
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index a8c02608dde4..710ac640267d 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -434,7 +434,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
/* Lenovo Ideapad Z570 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_NAME, "102434U"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Ideapad Z570"),
},
},
{
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index ff7454a38058..ce88af9eb562 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -235,9 +235,9 @@ static int amba_match(struct device *dev, struct device_driver *drv)
return amba_lookup(pcdrv->id_table, pcdev) != NULL;
}
-static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int amba_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct amba_device *pcdev = to_amba_device(dev);
+ const struct amba_device *pcdev = to_amba_device(dev);
int retval = 0;
retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 880224ec6abb..fb56bfc45096 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -277,11 +277,11 @@ _binder_proc_lock(struct binder_proc *proc, int line)
/**
* binder_proc_unlock() - Release spinlock for given binder_proc
- * @proc: struct binder_proc to acquire
+ * @proc: struct binder_proc to acquire
*
* Release lock acquired via binder_proc_lock()
*/
-#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
+#define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__)
static void
_binder_proc_unlock(struct binder_proc *proc, int line)
__releases(&proc->outer_lock)
@@ -378,7 +378,7 @@ _binder_node_inner_lock(struct binder_node *node, int line)
}
/**
- * binder_node_unlock() - Release node and inner locks
+ * binder_node_inner_unlock() - Release node and inner locks
* @node: struct binder_node to acquire
*
* Release lock acquired via binder_node_lock()
@@ -1194,13 +1194,13 @@ static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
}
/**
- * binder_dec_ref() - dec the ref for given handle
+ * binder_dec_ref_olocked() - dec the ref for given handle
* @ref: ref to be decremented
* @strong: if true, strong decrement, else weak
*
* Decrement the ref.
*
- * Return: true if ref is cleaned up and ready to be freed
+ * Return: %true if ref is cleaned up and ready to be freed.
*/
static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
{
@@ -2728,7 +2728,10 @@ binder_find_outdated_transaction_ilocked(struct binder_transaction *t,
*
* Return: 0 if the transaction was successfully queued
* BR_DEAD_REPLY if the target process or thread is dead
- * BR_FROZEN_REPLY if the target process or thread is frozen
+ * BR_FROZEN_REPLY if the target process or thread is frozen and
+ * the sync transaction was rejected
+ * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen
+ * and the async transaction was successfully queued
*/
static int binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
@@ -2738,6 +2741,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
bool oneway = !!(t->flags & TF_ONE_WAY);
bool pending_async = false;
struct binder_transaction *t_outdated = NULL;
+ bool frozen = false;
BUG_ON(!node);
binder_node_lock(node);
@@ -2751,15 +2755,16 @@ static int binder_proc_transaction(struct binder_transaction *t,
binder_inner_proc_lock(proc);
if (proc->is_frozen) {
+ frozen = true;
proc->sync_recv |= !oneway;
proc->async_recv |= oneway;
}
- if ((proc->is_frozen && !oneway) || proc->is_dead ||
+ if ((frozen && !oneway) || proc->is_dead ||
(thread && thread->is_dead)) {
binder_inner_proc_unlock(proc);
binder_node_unlock(node);
- return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
+ return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
}
if (!thread && !pending_async)
@@ -2770,7 +2775,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
} else if (!pending_async) {
binder_enqueue_work_ilocked(&t->work, &proc->todo);
} else {
- if ((t->flags & TF_UPDATE_TXN) && proc->is_frozen) {
+ if ((t->flags & TF_UPDATE_TXN) && frozen) {
t_outdated = binder_find_outdated_transaction_ilocked(t,
&node->async_todo);
if (t_outdated) {
@@ -2807,14 +2812,17 @@ static int binder_proc_transaction(struct binder_transaction *t,
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
+ if (oneway && frozen)
+ return BR_TRANSACTION_PENDING_FROZEN;
+
return 0;
}
/**
* binder_get_node_refs_for_txn() - Get required refs on node for txn
* @node: struct binder_node for which to get refs
- * @proc: returns @node->proc if valid
- * @error: if no @proc then returns BR_DEAD_REPLY
+ * @procp: returns @node->proc if valid
+ * @error: if no @procp then returns BR_DEAD_REPLY
*
* User-space normally keeps the node alive when creating a transaction
* since it has a reference to the target. The local strong ref keeps it
@@ -2828,8 +2836,8 @@ static int binder_proc_transaction(struct binder_transaction *t,
* constructing the transaction, so we take that here as well.
*
* Return: The target_node with refs taken or NULL if no @node->proc is NULL.
- * Also sets @proc if valid. If the @node->proc is NULL indicating that the
- * target proc has died, @error is set to BR_DEAD_REPLY
+ * Also sets @procp if valid. If the @node->proc is NULL indicating that the
+ * target proc has died, @error is set to BR_DEAD_REPLY.
*/
static struct binder_node *binder_get_node_refs_for_txn(
struct binder_node *node,
@@ -3607,9 +3615,17 @@ static void binder_transaction(struct binder_proc *proc,
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
- binder_enqueue_thread_work(thread, tcomplete);
return_error = binder_proc_transaction(t, target_proc, NULL);
- if (return_error)
+ /*
+ * Let the caller know when async transaction reaches a frozen
+ * process and is put in a pending queue, waiting for the target
+ * process to be unfrozen.
+ */
+ if (return_error == BR_TRANSACTION_PENDING_FROZEN)
+ tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
+ binder_enqueue_thread_work(thread, tcomplete);
+ if (return_error &&
+ return_error != BR_TRANSACTION_PENDING_FROZEN)
goto err_dead_proc_or_thread;
}
if (target_thread)
@@ -4440,10 +4456,13 @@ retry:
binder_stat_br(proc, thread, cmd);
} break;
case BINDER_WORK_TRANSACTION_COMPLETE:
+ case BINDER_WORK_TRANSACTION_PENDING:
case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
if (proc->oneway_spam_detection_enabled &&
w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
cmd = BR_ONEWAY_SPAM_SUSPECT;
+ else if (w->type == BINDER_WORK_TRANSACTION_PENDING)
+ cmd = BR_TRANSACTION_PENDING_FROZEN;
else
cmd = BR_TRANSACTION_COMPLETE;
binder_inner_proc_unlock(proc);
@@ -5006,20 +5025,14 @@ static __poll_t binder_poll(struct file *filp,
return 0;
}
-static int binder_ioctl_write_read(struct file *filp,
- unsigned int cmd, unsigned long arg,
+static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
- unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
- if (size != sizeof(struct binder_write_read)) {
- ret = -EINVAL;
- goto out;
- }
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
@@ -5296,7 +5309,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
- unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
@@ -5318,7 +5330,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
switch (cmd) {
case BINDER_WRITE_READ:
- ret = binder_ioctl_write_read(filp, cmd, arg, thread);
+ ret = binder_ioctl_write_read(filp, arg, thread);
if (ret)
goto err;
break;
@@ -5361,10 +5373,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case BINDER_VERSION: {
struct binder_version __user *ver = ubuf;
- if (size != sizeof(struct binder_version)) {
- ret = -EINVAL;
- goto err;
- }
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
@@ -5572,8 +5580,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
return -EPERM;
}
- vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE);
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
@@ -6170,6 +6177,7 @@ static const char * const binder_return_strings[] = {
"BR_FAILED_REPLY",
"BR_FROZEN_REPLY",
"BR_ONEWAY_SPAM_SUSPECT",
+ "BR_TRANSACTION_PENDING_FROZEN"
};
static const char * const binder_command_strings[] = {
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 4ad42b0f75cd..55a3c3c2409f 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1019,7 +1019,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
if (vma) {
trace_binder_unmap_user_start(alloc, index);
- zap_page_range(vma, page_addr, PAGE_SIZE);
+ zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL);
trace_binder_unmap_user_end(alloc, index);
}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index abe19d88c6ec..28ef5b3704b1 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -133,7 +133,7 @@ enum binder_stat_types {
};
struct binder_stats {
- atomic_t br[_IOC_NR(BR_ONEWAY_SPAM_SUSPECT) + 1];
+ atomic_t br[_IOC_NR(BR_TRANSACTION_PENDING_FROZEN) + 1];
atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
atomic_t obj_created[BINDER_STAT_COUNT];
atomic_t obj_deleted[BINDER_STAT_COUNT];
@@ -152,6 +152,7 @@ struct binder_work {
enum binder_work_type {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_TRANSACTION_PENDING,
BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT,
BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 09b2ce7e4c34..76e7d6676657 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -222,14 +222,14 @@ err:
}
/**
- * binderfs_ctl_ioctl - handle binder device node allocation requests
+ * binder_ctl_ioctl - handle binder device node allocation requests
*
* The request handler for the binder-control device. All requests operate on
* the binderfs mount the binder-control device resides in:
* - BINDER_CTL_ADD
* Allocate a new binder device.
*
- * Return: 0 on success, negative errno on failure
+ * Return: %0 on success, negative errno on failure.
*/
static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
@@ -352,7 +352,7 @@ static inline bool is_binderfs_control_device(const struct dentry *dentry)
return info->control_dentry == dentry;
}
-static int binderfs_rename(struct user_namespace *mnt_userns,
+static int binderfs_rename(struct mnt_idmap *idmap,
struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry,
unsigned int flags)
@@ -361,7 +361,7 @@ static int binderfs_rename(struct user_namespace *mnt_userns,
is_binderfs_control_device(new_dentry))
return -EPERM;
- return simple_rename(&init_user_ns, old_dir, old_dentry, new_dir,
+ return simple_rename(idmap, old_dir, old_dentry, new_dir,
new_dentry, flags);
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 9695c4404e26..b56fba76b43f 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -1083,15 +1083,6 @@ config PATA_OPTI
If unsure, say N.
-config PATA_PALMLD
- tristate "Palm LifeDrive PATA support"
- depends on MACH_PALMLD
- help
- This option enables support for Palm LifeDrive's internal ATA
- port via the new ATA layer.
-
- If unsure, say N.
-
config PATA_PCMCIA
tristate "PCMCIA PATA support"
depends on PCMCIA
@@ -1145,16 +1136,6 @@ config PATA_RZ1000
If unsure, say N.
-config PATA_SAMSUNG_CF
- tristate "Samsung SoC PATA support"
- depends on SAMSUNG_DEV_IDE || COMPILE_TEST
- select PATA_TIMINGS
- help
- This option enables basic support for Samsung's S3C/S5P board
- PATA controllers via the new ATA layer
-
- If unsure, say N.
-
config PATA_WINBOND_VLB
tristate "Winbond W83759A VLB PATA support (Experimental)"
depends on ISA
@@ -1163,6 +1144,20 @@ config PATA_WINBOND_VLB
Support for the Winbond W83759A controller on Vesa Local Bus
systems.
+config PATA_PARPORT
+ tristate "Parallel port IDE device support"
+ depends on PARPORT_PC
+ help
+ There are many external CD-ROM and disk devices that connect through
+ your computer's parallel port. Most of them are actually IDE devices
+ using a parallel port IDE adapter. This option enables the
+ PATA_PARPORT subsystem which contains drivers for many of these
+ external drives.
+ Read <file:Documentation/admin-guide/blockdev/paride.rst> for more
+ information.
+
+source "drivers/ata/pata_parport/Kconfig"
+
comment "Generic fallback / legacy drivers"
config PATA_ACPI
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index d2e36d367274..20e6645ab737 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -105,15 +105,15 @@ obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
obj-$(CONFIG_PATA_OPTI) += pata_opti.o
obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
-obj-$(CONFIG_PATA_PALMLD) += pata_palmld.o
obj-$(CONFIG_PATA_PLATFORM) += pata_platform.o
obj-$(CONFIG_PATA_OF_PLATFORM) += pata_of_platform.o
obj-$(CONFIG_PATA_RB532) += pata_rb532_cf.o
obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
-obj-$(CONFIG_PATA_SAMSUNG_CF) += pata_samsung_cf.o
obj-$(CONFIG_PATA_PXA) += pata_pxa.o
+obj-$(CONFIG_PATA_PARPORT) += pata_parport/
+
# Should be last but two libata driver
obj-$(CONFIG_PATA_ACPI) += pata_acpi.o
# Should be last but one libata driver
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 7654a40c12b4..993eadd173da 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -57,7 +57,7 @@ struct acard_sg {
};
static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc);
-static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static void acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int acard_ahci_port_start(struct ata_port *ap);
static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -248,7 +248,7 @@ static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc)
return AC_ERR_OK;
}
-static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ahci_port_priv *pp = qc->ap->private_data;
u8 *rx_fis = pp->rx_fis;
@@ -263,13 +263,11 @@ static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
* Setup FIS.
*/
if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
- !(qc->flags & ATA_QCFLAG_FAILED)) {
+ !(qc->flags & ATA_QCFLAG_EH)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
} else
ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
-
- return true;
}
static int acard_ahci_port_start(struct ata_port *ap)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 14a1c0d14916..3bb9bb483fe3 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -421,6 +421,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_low_power }, /* Ice Lake LP AHCI */
{ PCI_VDEVICE(INTEL, 0x02d3), board_ahci_low_power }, /* Comet Lake PCH-U AHCI */
{ PCI_VDEVICE(INTEL, 0x02d7), board_ahci_low_power }, /* Comet Lake PCH RAID */
+ { PCI_VDEVICE(INTEL, 0xa0d3), board_ahci_low_power }, /* Tiger Lake UP{3,4} AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/ata/ahci_octeon.c b/drivers/ata/ahci_octeon.c
index b9460b91288f..5021ab3ede49 100644
--- a/drivers/ata/ahci_octeon.c
+++ b/drivers/ata/ahci_octeon.c
@@ -73,11 +73,6 @@ static int ahci_octeon_probe(struct platform_device *pdev)
return 0;
}
-static int ahci_octeon_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id octeon_ahci_match[] = {
{ .compatible = "cavium,octeon-7130-sata-uctl", },
{ /* sentinel */ }
@@ -86,7 +81,6 @@ MODULE_DEVICE_TABLE(of, octeon_ahci_match);
static struct platform_driver ahci_octeon_driver = {
.probe = ahci_octeon_probe,
- .remove = ahci_octeon_remove,
.driver = {
.name = "octeon-ahci",
.of_match_table = octeon_ahci_match,
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 29acc35bf4a6..8f216de76648 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -55,7 +55,8 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
+static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc);
@@ -157,6 +158,7 @@ struct ata_port_operations ahci_ops = {
.qc_prep = ahci_qc_prep,
.qc_issue = ahci_qc_issue,
.qc_fill_rtf = ahci_qc_fill_rtf,
+ .qc_ncq_fill_rtf = ahci_qc_ncq_fill_rtf,
.freeze = ahci_freeze,
.thaw = ahci_thaw,
@@ -1847,18 +1849,47 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
ata_port_abort(ap);
}
-static void ahci_handle_port_interrupt(struct ata_port *ap,
- void __iomem *port_mmio, u32 status)
+static void ahci_qc_complete(struct ata_port *ap, void __iomem *port_mmio)
{
struct ata_eh_info *ehi = &ap->link.eh_info;
struct ahci_port_priv *pp = ap->private_data;
- struct ahci_host_priv *hpriv = ap->host->private_data;
- int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
u32 qc_active = 0;
int rc;
+ /*
+ * pp->active_link is not reliable once FBS is enabled, both
+ * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
+ * NCQ and non-NCQ commands may be in flight at the same time.
+ */
+ if (pp->fbs_enabled) {
+ if (ap->qc_active) {
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
+ }
+ } else {
+ /* pp->active_link is valid iff any command is in flight */
+ if (ap->qc_active && pp->active_link->sactive)
+ qc_active = readl(port_mmio + PORT_SCR_ACT);
+ else
+ qc_active = readl(port_mmio + PORT_CMD_ISSUE);
+ }
+
+ rc = ata_qc_complete_multiple(ap, qc_active);
+ if (unlikely(rc < 0 && !(ap->pflags & ATA_PFLAG_RESETTING))) {
+ ehi->err_mask |= AC_ERR_HSM;
+ ehi->action |= ATA_EH_RESET;
+ ata_port_freeze(ap);
+ }
+}
+
+static void ahci_handle_port_interrupt(struct ata_port *ap,
+ void __iomem *port_mmio, u32 status)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ struct ahci_host_priv *hpriv = ap->host->private_data;
+
/* ignore BAD_PMP while resetting */
- if (unlikely(resetting))
+ if (unlikely(ap->pflags & ATA_PFLAG_RESETTING))
status &= ~PORT_IRQ_BAD_PMP;
if (sata_lpm_ignore_phy_events(&ap->link)) {
@@ -1867,6 +1898,12 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
}
if (unlikely(status & PORT_IRQ_ERROR)) {
+ /*
+ * Before getting the error notification, we may have
+ * received SDB FISes notifying successful completions.
+ * Handle these first and then handle the error.
+ */
+ ahci_qc_complete(ap, port_mmio);
ahci_error_intr(ap, status);
return;
}
@@ -1903,32 +1940,8 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
}
}
- /* pp->active_link is not reliable once FBS is enabled, both
- * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
- * NCQ and non-NCQ commands may be in flight at the same time.
- */
- if (pp->fbs_enabled) {
- if (ap->qc_active) {
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
- }
- } else {
- /* pp->active_link is valid iff any command is in flight */
- if (ap->qc_active && pp->active_link->sactive)
- qc_active = readl(port_mmio + PORT_SCR_ACT);
- else
- qc_active = readl(port_mmio + PORT_CMD_ISSUE);
- }
-
-
- rc = ata_qc_complete_multiple(ap, qc_active);
-
- /* while resetting, invalid completions are expected */
- if (unlikely(rc < 0 && !resetting)) {
- ehi->err_mask |= AC_ERR_HSM;
- ehi->action |= ATA_EH_RESET;
- ata_port_freeze(ap);
- }
+ /* Handle completed commands */
+ ahci_qc_complete(ap, port_mmio);
}
static void ahci_port_intr(struct ata_port *ap)
@@ -2053,11 +2066,18 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
}
EXPORT_SYMBOL_GPL(ahci_qc_issue);
-static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ahci_port_priv *pp = qc->ap->private_data;
u8 *rx_fis = pp->rx_fis;
+ /*
+ * rtf may already be filled (e.g. for successful NCQ commands).
+ * If that is the case, we have nothing to do.
+ */
+ if (qc->flags & ATA_QCFLAG_RTF_FILLED)
+ return;
+
if (pp->fbs_enabled)
rx_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
@@ -2068,9 +2088,12 @@ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
* Setup FIS.
*/
if (qc->tf.protocol == ATA_PROT_PIO && qc->dma_dir == DMA_FROM_DEVICE &&
- !(qc->flags & ATA_QCFLAG_FAILED)) {
+ !(qc->flags & ATA_QCFLAG_EH)) {
ata_tf_from_fis(rx_fis + RX_FIS_PIO_SETUP, &qc->result_tf);
qc->result_tf.status = (rx_fis + RX_FIS_PIO_SETUP)[15];
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
+ return;
+ }
/*
* For NCQ commands, we never get a D2H FIS, so reading the D2H Register
@@ -2080,15 +2103,85 @@ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
* instead. However, the SDB FIS does not contain the LBA, so we can't
* use the ata_tf_from_fis() helper.
*/
- } else if (ata_is_ncq(qc->tf.protocol)) {
+ if (ata_is_ncq(qc->tf.protocol)) {
const u8 *fis = rx_fis + RX_FIS_SDB;
+ /*
+ * Successful NCQ commands have been filled already.
+ * A failed NCQ command will read the status here.
+ * (Note that a failed NCQ command will get a more specific
+ * error when reading the NCQ Command Error log.)
+ */
qc->result_tf.status = fis[2];
qc->result_tf.error = fis[3];
- } else
- ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
+ return;
+ }
+
+ ata_tf_from_fis(rx_fis + RX_FIS_D2H_REG, &qc->result_tf);
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
+}
+
+static void ahci_qc_ncq_fill_rtf(struct ata_port *ap, u64 done_mask)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ const u8 *fis;
+
+ /* No outstanding commands. */
+ if (!ap->qc_active)
+ return;
+
+ /*
+ * FBS not enabled, so read status and error once, since they are shared
+ * for all QCs.
+ */
+ if (!pp->fbs_enabled) {
+ u8 status, error;
+
+ /* No outstanding NCQ commands. */
+ if (!pp->active_link->sactive)
+ return;
+
+ fis = pp->rx_fis + RX_FIS_SDB;
+ status = fis[2];
+ error = fis[3];
- return true;
+ while (done_mask) {
+ struct ata_queued_cmd *qc;
+ unsigned int tag = __ffs64(done_mask);
+
+ qc = ata_qc_from_tag(ap, tag);
+ if (qc && ata_is_ncq(qc->tf.protocol)) {
+ qc->result_tf.status = status;
+ qc->result_tf.error = error;
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
+ }
+ done_mask &= ~(1ULL << tag);
+ }
+
+ return;
+ }
+
+ /*
+ * FBS enabled, so read the status and error for each QC, since the QCs
+ * can belong to different PMP links. (Each PMP link has its own FIS
+ * Receive Area.)
+ */
+ while (done_mask) {
+ struct ata_queued_cmd *qc;
+ unsigned int tag = __ffs64(done_mask);
+
+ qc = ata_qc_from_tag(ap, tag);
+ if (qc && ata_is_ncq(qc->tf.protocol)) {
+ fis = pp->rx_fis;
+ fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
+ fis += RX_FIS_SDB;
+ qc->result_tf.status = fis[2];
+ qc->result_tf.error = fis[3];
+ qc->flags |= ATA_QCFLAG_RTF_FILLED;
+ }
+ done_mask &= ~(1ULL << tag);
+ }
}
static void ahci_freeze(struct ata_port *ap)
@@ -2138,7 +2231,7 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
+ if (qc->flags & ATA_QCFLAG_EH)
ahci_kick_engine(ap);
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 884ae73b11ea..14c17c3bda4e 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -552,7 +552,7 @@ static const u8 ata_rw_cmds[] = {
0,
0,
0,
- ATA_CMD_WRITE_MULTI_FUA_EXT,
+ 0,
/* pio */
ATA_CMD_PIO_READ,
ATA_CMD_PIO_WRITE,
@@ -574,17 +574,18 @@ static const u8 ata_rw_cmds[] = {
};
/**
- * ata_rwcmd_protocol - set taskfile r/w commands and protocol
- * @tf: command to examine and configure
- * @dev: device tf belongs to
+ * ata_set_rwcmd_protocol - set taskfile r/w command and protocol
+ * @dev: target device for the taskfile
+ * @tf: taskfile to examine and configure
*
- * Examine the device configuration and tf->flags to calculate
- * the proper read/write commands and protocol to use.
+ * Examine the device configuration and tf->flags to determine
+ * the proper read/write command and protocol to use for @tf.
*
* LOCKING:
* caller.
*/
-static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
+static bool ata_set_rwcmd_protocol(struct ata_device *dev,
+ struct ata_taskfile *tf)
{
u8 cmd;
@@ -607,11 +608,12 @@ static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
}
cmd = ata_rw_cmds[index + fua + lba48 + write];
- if (cmd) {
- tf->command = cmd;
- return 0;
- }
- return -1;
+ if (!cmd)
+ return false;
+
+ tf->command = cmd;
+
+ return true;
}
/**
@@ -725,7 +727,8 @@ int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
} else if (dev->flags & ATA_DFLAG_LBA) {
tf->flags |= ATA_TFLAG_LBA;
- if (lba_28_ok(block, n_block)) {
+ /* We need LBA48 for FUA writes */
+ if (!(tf->flags & ATA_TFLAG_FUA) && lba_28_ok(block, n_block)) {
/* use LBA28 */
tf->device |= (block >> 24) & 0xf;
} else if (lba_48_ok(block, n_block)) {
@@ -740,11 +743,12 @@ int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
tf->hob_lbah = (block >> 40) & 0xff;
tf->hob_lbam = (block >> 32) & 0xff;
tf->hob_lbal = (block >> 24) & 0xff;
- } else
+ } else {
/* request too large even for LBA48 */
return -ERANGE;
+ }
- if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+ if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
return -EINVAL;
tf->nsect = n_block & 0xff;
@@ -762,7 +766,7 @@ int ata_build_rw_tf(struct ata_queued_cmd *qc, u64 block, u32 n_block,
if (!lba_28_ok(block, n_block))
return -ERANGE;
- if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
+ if (unlikely(!ata_set_rwcmd_protocol(dev, tf)))
return -EINVAL;
/* Convert LBA to CHS */
@@ -1590,7 +1594,7 @@ static unsigned ata_exec_internal_sg(struct ata_device *dev,
ap->ops->post_internal_cmd(qc);
/* perform minimal error analysis */
- if (qc->flags & ATA_QCFLAG_FAILED) {
+ if (qc->flags & ATA_QCFLAG_EH) {
if (qc->result_tf.status & (ATA_ERR | ATA_DF))
qc->err_mask |= AC_ERR_DEV;
@@ -2420,6 +2424,28 @@ static void ata_dev_config_chs(struct ata_device *dev)
dev->heads, dev->sectors);
}
+static void ata_dev_config_fua(struct ata_device *dev)
+{
+ /* Ignore FUA support if its use is disabled globally */
+ if (!libata_fua)
+ goto nofua;
+
+ /* Ignore devices without support for WRITE DMA FUA EXT */
+ if (!(dev->flags & ATA_DFLAG_LBA48) || !ata_id_has_fua(dev->id))
+ goto nofua;
+
+ /* Ignore known bad devices and devices that lack NCQ support */
+ if (!ata_ncq_supported(dev) || (dev->horkage & ATA_HORKAGE_NO_FUA))
+ goto nofua;
+
+ dev->flags |= ATA_DFLAG_FUA;
+
+ return;
+
+nofua:
+ dev->flags &= ~ATA_DFLAG_FUA;
+}
+
static void ata_dev_config_devslp(struct ata_device *dev)
{
u8 *sata_setting = dev->link->ap->sector_buf;
@@ -2508,7 +2534,8 @@ static void ata_dev_print_features(struct ata_device *dev)
return;
ata_dev_info(dev,
- "Features:%s%s%s%s%s%s\n",
+ "Features:%s%s%s%s%s%s%s\n",
+ dev->flags & ATA_DFLAG_FUA ? " FUA" : "",
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
@@ -2669,6 +2696,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_chs(dev);
}
+ ata_dev_config_fua(dev);
ata_dev_config_devslp(dev);
ata_dev_config_sense_reporting(dev);
ata_dev_config_zac(dev);
@@ -3109,7 +3137,7 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit)
*/
if (spd > 1)
mask &= (1 << (spd - 1)) - 1;
- else
+ else if (link->sata_spd)
return -EINVAL;
/* were we already at the bottom? */
@@ -4045,6 +4073,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "Samsung SSD 870*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NO_NCQ_ON_ATI },
+ { "SAMSUNG*MZ7LH*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM |
+ ATA_HORKAGE_NO_NCQ_ON_ATI, },
{ "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM },
@@ -4103,6 +4134,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
*/
{ "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
+ /* Buggy FUA */
+ { "Maxtor", "BANC1G10", ATA_HORKAGE_NO_FUA },
+ { "WDC*WD2500J*", NULL, ATA_HORKAGE_NO_FUA },
+ { "OCZ-VERTEX*", NULL, ATA_HORKAGE_NO_FUA },
+ { "INTEL*SSDSC2CT*", NULL, ATA_HORKAGE_NO_FUA },
+
/* End Marker */
{ }
};
@@ -4683,10 +4720,10 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
/* XXX: New EH and old EH use different mechanisms to
* synchronize EH with regular execution path.
*
- * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
+ * In new EH, a qc owned by EH is marked with ATA_QCFLAG_EH.
* Normal execution path is responsible for not accessing a
- * failed qc. libata core enforces the rule by returning NULL
- * from ata_qc_from_tag() for failed qcs.
+ * qc owned by EH. libata core enforces the rule by returning NULL
+ * from ata_qc_from_tag() for qcs owned by EH.
*
* Old EH depends on ata_qc_complete() nullifying completion
* requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
@@ -4698,7 +4735,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
struct ata_eh_info *ehi = &dev->link->eh_info;
if (unlikely(qc->err_mask))
- qc->flags |= ATA_QCFLAG_FAILED;
+ qc->flags |= ATA_QCFLAG_EH;
/*
* Finish internal commands without any further processing
@@ -4715,7 +4752,7 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* Non-internal qc has failed. Fill the result TF and
* summon EH.
*/
- if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
+ if (unlikely(qc->flags & ATA_QCFLAG_EH)) {
fill_result_tf(qc);
trace_ata_qc_complete_failed(qc);
ata_qc_schedule_eh(qc);
@@ -6214,6 +6251,7 @@ static const struct ata_force_param force_tbl[] __initconst = {
force_horkage_onoff(lpm, ATA_HORKAGE_NOLPM),
force_horkage_onoff(setxfer, ATA_HORKAGE_NOSETXFER),
force_horkage_on(dump_id, ATA_HORKAGE_DUMP_ID),
+ force_horkage_onoff(fua, ATA_HORKAGE_NO_FUA),
force_horkage_on(disable, ATA_HORKAGE_DISABLE),
};
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 34303ce67c14..a6c901811802 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -565,17 +565,23 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
{
int i;
unsigned long flags;
+ struct scsi_cmnd *scmd, *tmp;
+ int nr_timedout = 0;
/* make sure sff pio task is not running */
ata_sff_flush_pio_task(ap);
+ if (!ap->ops->error_handler)
+ return;
+
/* synchronize with host lock and sort out timeouts */
- /* For new EH, all qcs are finished in one of three ways -
+ /*
+ * For new EH, all qcs are finished in one of three ways -
* normal completion, error completion, and SCSI timeout.
* Both completions can race against SCSI timeout. When normal
* completion wins, the qc never reaches EH. When error
- * completion wins, the qc has ATA_QCFLAG_FAILED set.
+ * completion wins, the qc has ATA_QCFLAG_EH set.
*
* When SCSI timeout wins, things are a bit more complex.
* Normal or error completion can occur after the timeout but
@@ -584,64 +590,61 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
* timed out iff its associated qc is active and not failed.
*/
spin_lock_irqsave(ap->lock, flags);
- if (ap->ops->error_handler) {
- struct scsi_cmnd *scmd, *tmp;
- int nr_timedout = 0;
-
- /* This must occur under the ap->lock as we don't want
- a polled recovery to race the real interrupt handler
-
- The lost_interrupt handler checks for any completed but
- non-notified command and completes much like an IRQ handler.
- We then fall into the error recovery code which will treat
- this as if normal completion won the race */
-
- if (ap->ops->lost_interrupt)
- ap->ops->lost_interrupt(ap);
+ /*
+ * This must occur under the ap->lock as we don't want
+ * a polled recovery to race the real interrupt handler
+ *
+ * The lost_interrupt handler checks for any completed but
+ * non-notified command and completes much like an IRQ handler.
+ *
+ * We then fall into the error recovery code which will treat
+ * this as if normal completion won the race
+ */
+ if (ap->ops->lost_interrupt)
+ ap->ops->lost_interrupt(ap);
- list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
- struct ata_queued_cmd *qc;
+ list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
+ struct ata_queued_cmd *qc;
- ata_qc_for_each_raw(ap, qc, i) {
- if (qc->flags & ATA_QCFLAG_ACTIVE &&
- qc->scsicmd == scmd)
- break;
- }
+ ata_qc_for_each_raw(ap, qc, i) {
+ if (qc->flags & ATA_QCFLAG_ACTIVE &&
+ qc->scsicmd == scmd)
+ break;
+ }
- if (i < ATA_MAX_QUEUE) {
- /* the scmd has an associated qc */
- if (!(qc->flags & ATA_QCFLAG_FAILED)) {
- /* which hasn't failed yet, timeout */
- qc->err_mask |= AC_ERR_TIMEOUT;
- qc->flags |= ATA_QCFLAG_FAILED;
- nr_timedout++;
- }
- } else {
- /* Normal completion occurred after
- * SCSI timeout but before this point.
- * Successfully complete it.
- */
- scmd->retries = scmd->allowed;
- scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
+ if (i < ATA_MAX_QUEUE) {
+ /* the scmd has an associated qc */
+ if (!(qc->flags & ATA_QCFLAG_EH)) {
+ /* which hasn't failed yet, timeout */
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ qc->flags |= ATA_QCFLAG_EH;
+ nr_timedout++;
}
+ } else {
+ /* Normal completion occurred after
+ * SCSI timeout but before this point.
+ * Successfully complete it.
+ */
+ scmd->retries = scmd->allowed;
+ scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
}
+ }
- /* If we have timed out qcs. They belong to EH from
- * this point but the state of the controller is
- * unknown. Freeze the port to make sure the IRQ
- * handler doesn't diddle with those qcs. This must
- * be done atomically w.r.t. setting QCFLAG_FAILED.
- */
- if (nr_timedout)
- __ata_port_freeze(ap);
+ /*
+ * If we have timed out qcs. They belong to EH from
+ * this point but the state of the controller is
+ * unknown. Freeze the port to make sure the IRQ
+ * handler doesn't diddle with those qcs. This must
+ * be done atomically w.r.t. setting ATA_QCFLAG_EH.
+ */
+ if (nr_timedout)
+ __ata_port_freeze(ap);
+ /* initialize eh_tries */
+ ap->eh_tries = ATA_EH_MAX_TRIES;
- /* initialize eh_tries */
- ap->eh_tries = ATA_EH_MAX_TRIES;
- }
spin_unlock_irqrestore(ap->lock, flags);
-
}
EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
@@ -911,12 +914,12 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
WARN_ON(!ap->ops->error_handler);
- qc->flags |= ATA_QCFLAG_FAILED;
+ qc->flags |= ATA_QCFLAG_EH;
ata_eh_set_pending(ap, 1);
/* The following will fail if timeout has already expired.
* ata_scsi_error() takes care of such scmds on EH entry.
- * Note that ATA_QCFLAG_FAILED is unconditionally set after
+ * Note that ATA_QCFLAG_EH is unconditionally set after
* this function completes.
*/
blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
@@ -994,7 +997,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
/* include internal tag in iteration */
ata_qc_for_each_with_internal(ap, qc, tag) {
if (qc && (!link || qc->dev->link == link)) {
- qc->flags |= ATA_QCFLAG_FAILED;
+ qc->flags |= ATA_QCFLAG_EH;
ata_qc_complete(qc);
nr_aborted++;
}
@@ -1954,7 +1957,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
all_err_mask |= ehc->i.err_mask;
ata_qc_for_each_raw(ap, qc, tag) {
- if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ if (!(qc->flags & ATA_QCFLAG_EH) ||
qc->flags & ATA_QCFLAG_RETRY ||
ata_dev_phys_link(qc->dev) != link)
continue;
@@ -2232,7 +2235,7 @@ static void ata_eh_link_report(struct ata_link *link)
desc = ehc->i.desc;
ata_qc_for_each_raw(ap, qc, tag) {
- if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ if (!(qc->flags & ATA_QCFLAG_EH) ||
ata_dev_phys_link(qc->dev) != link ||
((qc->flags & ATA_QCFLAG_QUIET) &&
qc->err_mask == AC_ERR_DEV))
@@ -2298,7 +2301,7 @@ static void ata_eh_link_report(struct ata_link *link)
char data_buf[20] = "";
char cdb_buf[70] = "";
- if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ if (!(qc->flags & ATA_QCFLAG_EH) ||
ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
continue;
@@ -3802,7 +3805,7 @@ void ata_eh_finish(struct ata_port *ap)
/* retry or finish qcs */
ata_qc_for_each_raw(ap, qc, tag) {
- if (!(qc->flags & ATA_QCFLAG_FAILED))
+ if (!(qc->flags & ATA_QCFLAG_EH))
continue;
if (qc->err_mask) {
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index 18ef14e749a0..f3e7396e3191 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -655,6 +655,9 @@ int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
return -EINVAL;
}
+ if (ap->ops->qc_ncq_fill_rtf)
+ ap->ops->qc_ncq_fill_rtf(ap, done_mask);
+
while (done_mask) {
struct ata_queued_cmd *qc;
unsigned int tag = __ffs64(done_mask);
@@ -1429,7 +1432,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
/* has LLDD analyzed already? */
ata_qc_for_each_raw(ap, qc, tag) {
- if (!(qc->flags & ATA_QCFLAG_FAILED))
+ if (!(qc->flags & ATA_QCFLAG_EH))
continue;
if (qc->err_mask)
@@ -1477,7 +1480,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
}
ata_qc_for_each_raw(ap, qc, tag) {
- if (!(qc->flags & ATA_QCFLAG_FAILED) ||
+ if (!(qc->flags & ATA_QCFLAG_EH) ||
ata_dev_phys_link(qc->dev) != link)
continue;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index cbb3a7a50816..e093c7a7deeb 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -383,8 +383,12 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
u8 scsi_cmd[MAX_COMMAND_SIZE];
u8 args[4], *argbuf = NULL;
int argsize = 0;
- enum dma_data_direction data_dir;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ .sense = sensebuf,
+ .sense_len = sizeof(sensebuf),
+ };
int cmd_result;
if (arg == NULL)
@@ -407,11 +411,9 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
scsi_cmd[1] = (4 << 1); /* PIO Data-in */
scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev,
block count in sector count field */
- data_dir = DMA_FROM_DEVICE;
} else {
scsi_cmd[1] = (3 << 1); /* Non-data */
scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */
- data_dir = DMA_NONE;
}
scsi_cmd[0] = ATA_16;
@@ -429,9 +431,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
- cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
- sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
-
+ cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, argbuf,
+ argsize, 10 * HZ, 5, &exec_args);
if (cmd_result < 0) {
rc = cmd_result;
goto error;
@@ -491,6 +492,11 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
u8 args[7];
struct scsi_sense_hdr sshdr;
int cmd_result;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ .sense = sensebuf,
+ .sense_len = sizeof(sensebuf),
+ };
if (arg == NULL)
return -EINVAL;
@@ -513,9 +519,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
/* Good values for timeout and retries? Values below
from scsi_ioctl_send_command() for default case... */
- cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
- sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
-
+ cmd_result = scsi_execute_cmd(scsidev, scsi_cmd, REQ_OP_DRV_IN, NULL,
+ 0, 10 * HZ, 5, &exec_args);
if (cmd_result < 0) {
rc = cmd_result;
goto error;
@@ -1654,7 +1659,8 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *cdb = cmd->cmnd;
- int need_sense = (qc->err_mask != 0);
+ int need_sense = (qc->err_mask != 0) &&
+ !(qc->flags & ATA_QCFLAG_SENSE_VALID);
/* For ATA pass thru (SAT) commands, generate a sense block if
* user mandated it or if there's an error. Note that if we
@@ -1668,12 +1674,11 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
((cdb[2] & 0x20) || need_sense))
ata_gen_passthru_sense(qc);
- else if (qc->flags & ATA_QCFLAG_SENSE_VALID)
- cmd->result = SAM_STAT_CHECK_CONDITION;
else if (need_sense)
ata_gen_ata_sense(qc);
else
- cmd->result = SAM_STAT_GOOD;
+ /* Keep the SCSI ML and status byte, clear host byte. */
+ cmd->result &= 0x0000ffff;
if (need_sense && !ap->ops->error_handler)
ata_dump_status(ap, &qc->result_tf);
@@ -2240,30 +2245,6 @@ static unsigned int ata_msense_rw_recovery(u8 *buf, bool changeable)
return sizeof(def_rw_recovery_mpage);
}
-/*
- * We can turn this into a real blacklist if it's needed, for now just
- * blacklist any Maxtor BANC1G10 revision firmware
- */
-static int ata_dev_supports_fua(u16 *id)
-{
- unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1];
-
- if (!libata_fua)
- return 0;
- if (!ata_id_has_fua(id))
- return 0;
-
- ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
- ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw));
-
- if (strcmp(model, "Maxtor"))
- return 1;
- if (strcmp(fw, "BANC1G10"))
- return 1;
-
- return 0; /* blacklisted */
-}
-
/**
* ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
* @args: device IDENTIFY data / SCSI command of interest.
@@ -2287,7 +2268,7 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
};
u8 pg, spg;
unsigned int ebd, page_control, six_byte;
- u8 dpofua, bp = 0xff;
+ u8 dpofua = 0, bp = 0xff;
u16 fp;
six_byte = (scsicmd[0] == MODE_SENSE);
@@ -2350,9 +2331,7 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
goto invalid_fld;
}
- dpofua = 0;
- if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
- (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
+ if (dev->flags & ATA_DFLAG_FUA)
dpofua = 1 << 4;
if (six_byte) {
@@ -3266,11 +3245,12 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf)
u8 supported = 0;
unsigned int err = 0;
- if (cdb[2] != 1) {
+ if (cdb[2] != 1 && cdb[2] != 3) {
ata_dev_warn(dev, "invalid command format %d\n", cdb[2]);
err = 2;
goto out;
}
+
switch (cdb[3]) {
case INQUIRY:
case MODE_SENSE:
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 153f49e00713..cd82d3b5ed14 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1377,14 +1377,10 @@ EXPORT_SYMBOL_GPL(ata_sff_qc_issue);
*
* LOCKING:
* spin_lock_irqsave(host lock)
- *
- * RETURNS:
- * true indicating that result TF is successfully filled.
*/
-bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
+void ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc)
{
qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf);
- return true;
}
EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf);
@@ -2073,7 +2069,7 @@ void ata_sff_error_handler(struct ata_port *ap)
unsigned long flags;
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
+ if (qc && !(qc->flags & ATA_QCFLAG_EH))
qc = NULL;
spin_lock_irqsave(ap->lock, flags);
@@ -2796,7 +2792,7 @@ void ata_bmdma_error_handler(struct ata_port *ap)
bool thaw = false;
qc = __ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
+ if (qc && !(qc->flags & ATA_QCFLAG_EH))
qc = NULL;
/* reset PIO HSM and stop DMA engine */
diff --git a/drivers/ata/libata-trace.c b/drivers/ata/libata-trace.c
index e0e4d0d5a100..9b5363fd0ab0 100644
--- a/drivers/ata/libata-trace.c
+++ b/drivers/ata/libata-trace.c
@@ -142,7 +142,7 @@ libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags)
trace_seq_printf(p, "QUIET ");
if (qc_flags & ATA_QCFLAG_RETRY)
trace_seq_printf(p, "RETRY ");
- if (qc_flags & ATA_QCFLAG_FAILED)
+ if (qc_flags & ATA_QCFLAG_EH)
trace_seq_printf(p, "FAILED ");
if (qc_flags & ATA_QCFLAG_SENSE_VALID)
trace_seq_printf(p, "SENSE_VALID ");
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 35608a0cf552..4cbcdc5da038 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -67,7 +67,7 @@ module_param(enable_dma, int, 0444);
MODULE_PARM_DESC(enable_dma,
"Enable use of DMA on interfaces that support it (0=no dma [default], 1=use dma)");
-/**
+/*
* Convert nanosecond based time to setting used in the
* boot bus timing register, based on timing multiple
*/
@@ -114,7 +114,7 @@ static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
cvmx_write_csr(CVMX_MIO_BOOT_REG_CFGX(cs), reg_cfg.u64);
}
-/**
+/*
* Called after libata determines the needed PIO mode. This
* function programs the Octeon bootbus regions to support the
* timing requirements of the PIO mode.
@@ -278,7 +278,7 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
cvmx_write_csr(cf_port->dma_base + DMA_TIM, dma_tim.u64);
}
-/**
+/*
* Handle an 8 bit I/O request.
*
* @qc: Queued command
@@ -317,7 +317,7 @@ static unsigned int octeon_cf_data_xfer8(struct ata_queued_cmd *qc,
return buflen;
}
-/**
+/*
* Handle a 16 bit I/O request.
*
* @qc: Queued command
@@ -372,7 +372,7 @@ static unsigned int octeon_cf_data_xfer16(struct ata_queued_cmd *qc,
return buflen;
}
-/**
+/*
* Read the taskfile for 16bit non-True IDE only.
*/
static void octeon_cf_tf_read16(struct ata_port *ap, struct ata_taskfile *tf)
@@ -453,7 +453,7 @@ static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
return 0;
}
-/**
+/*
* Load the taskfile for 16bit non-True IDE only. The device_addr is
* not loaded, we do this as part of octeon_cf_exec_command16.
*/
@@ -525,7 +525,7 @@ static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
ap->ops->sff_exec_command(ap, &qc->tf);
}
-/**
+/*
* Start a DMA transfer that was already setup
*
* @qc: Information about the DMA
@@ -580,7 +580,7 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
}
-/**
+/*
*
* LOCKING:
* spin_lock_irqsave(host lock)
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
deleted file mode 100644
index 51caa2a427dd..000000000000
--- a/drivers/ata/pata_palmld.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * drivers/ata/pata_palmld.c
- *
- * Driver for IDE channel in Palm LifeDrive
- *
- * Based on research of:
- * Alex Osborne <ato@meshy.org>
- *
- * Rewrite for mainline:
- * Marek Vasut <marek.vasut@gmail.com>
- *
- * Rewritten version based on pata_ixp4xx_cf.c:
- * ixp4xx PATA/Compact Flash driver
- * Copyright (C) 2006-07 Tower Technologies
- * Author: Alessandro Zummo <a.zummo@towertech.it>
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/libata.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-
-#include <scsi/scsi_host.h>
-
-#define DRV_NAME "pata_palmld"
-
-struct palmld_pata {
- struct ata_host *host;
- struct gpio_desc *power;
- struct gpio_desc *reset;
-};
-
-static struct scsi_host_template palmld_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations palmld_port_ops = {
- .inherits = &ata_sff_port_ops,
- .sff_data_xfer = ata_sff_data_xfer32,
- .cable_detect = ata_cable_40wire,
-};
-
-static int palmld_pata_probe(struct platform_device *pdev)
-{
- struct palmld_pata *lda;
- struct ata_port *ap;
- void __iomem *mem;
- struct device *dev = &pdev->dev;
- int ret;
-
- lda = devm_kzalloc(dev, sizeof(*lda), GFP_KERNEL);
- if (!lda)
- return -ENOMEM;
-
- /* allocate host */
- lda->host = ata_host_alloc(dev, 1);
- if (!lda->host)
- return -ENOMEM;
-
- /* remap drive's physical memory address */
- mem = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mem))
- return PTR_ERR(mem);
-
- /* request and activate power and reset GPIOs */
- lda->power = devm_gpiod_get(dev, "power", GPIOD_OUT_HIGH);
- if (IS_ERR(lda->power))
- return PTR_ERR(lda->power);
- lda->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(lda->reset)) {
- gpiod_set_value(lda->power, 0);
- return PTR_ERR(lda->reset);
- }
-
- /* Assert reset to reset the drive */
- gpiod_set_value(lda->reset, 1);
- msleep(30);
- gpiod_set_value(lda->reset, 0);
- msleep(30);
-
- /* setup the ata port */
- ap = lda->host->ports[0];
- ap->ops = &palmld_port_ops;
- ap->pio_mask = ATA_PIO4;
- ap->flags |= ATA_FLAG_PIO_POLLING;
-
- /* memory mapping voodoo */
- ap->ioaddr.cmd_addr = mem + 0x10;
- ap->ioaddr.altstatus_addr = mem + 0xe;
- ap->ioaddr.ctl_addr = mem + 0xe;
-
- /* start the port */
- ata_sff_std_ports(&ap->ioaddr);
-
- /* activate host */
- ret = ata_host_activate(lda->host, 0, NULL, IRQF_TRIGGER_RISING,
- &palmld_sht);
- /* power down on failure */
- if (ret) {
- gpiod_set_value(lda->power, 0);
- return ret;
- }
-
- platform_set_drvdata(pdev, lda);
- return 0;
-}
-
-static int palmld_pata_remove(struct platform_device *pdev)
-{
- struct palmld_pata *lda = platform_get_drvdata(pdev);
-
- ata_platform_remove_one(pdev);
-
- /* power down the HDD */
- gpiod_set_value(lda->power, 0);
-
- return 0;
-}
-
-static struct platform_driver palmld_pata_platform_driver = {
- .driver = {
- .name = DRV_NAME,
- },
- .probe = palmld_pata_probe,
- .remove = palmld_pata_remove,
-};
-
-module_platform_driver(palmld_pata_platform_driver);
-
-MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
-MODULE_DESCRIPTION("PalmLD PATA driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/pata_parport/Kconfig b/drivers/ata/pata_parport/Kconfig
new file mode 100644
index 000000000000..0893a13e7979
--- /dev/null
+++ b/drivers/ata/pata_parport/Kconfig
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: GPL-2.0
+
+comment "Parallel IDE protocol modules"
+ depends on PATA_PARPORT
+
+config PATA_PARPORT_ATEN
+ tristate "ATEN EH-100 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the ATEN EH-100 parallel port IDE
+ protocol. This protocol is used in some inexpensive low performance
+ parallel port kits made in Hong Kong.
+
+config PATA_PARPORT_BPCK
+ tristate "MicroSolutions backpack (Series 5) protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the Micro Solutions BACKPACK
+ parallel port Series 5 IDE protocol. (Most BACKPACK drives made
+ before 1999 were Series 5) Series 5 drives will NOT always have the
+ Series noted on the bottom of the drive. Series 6 drivers will.
+
+ In other words, if your BACKPACK drive doesn't say "Series 6" on the
+ bottom, enable this option.
+
+config PATA_PARPORT_BPCK6
+ tristate "MicroSolutions backpack (Series 6) protocol"
+ depends on (PATA_PARPORT) && !64BIT
+ help
+ This option enables support for the Micro Solutions BACKPACK
+ parallel port Series 6 IDE protocol. (Most BACKPACK drives made
+ after 1999 were Series 6) Series 6 drives will have the Series noted
+ on the bottom of the drive. Series 5 drivers don't always have it
+ noted.
+
+ In other words, if your BACKPACK drive says "Series 6" on the
+ bottom, enable this option.
+
+config PATA_PARPORT_COMM
+ tristate "DataStor Commuter protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the Commuter parallel port IDE
+ protocol from DataStor.
+
+config PATA_PARPORT_DSTR
+ tristate "DataStor EP-2000 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the EP-2000 parallel port IDE
+ protocol from DataStor
+
+config PATA_PARPORT_FIT2
+ tristate "FIT TD-2000 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the TD-2000 parallel port IDE
+ protocol from Fidelity International Technology. This is a simple
+ (low speed) adapter that is used in some portable hard drives.
+
+config PATA_PARPORT_FIT3
+ tristate "FIT TD-3000 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the TD-3000 parallel port IDE
+ protocol from Fidelity International Technology. This protocol is
+ used in newer models of their portable disk, CD-ROM and PD/CD
+ devices.
+
+config PATA_PARPORT_EPAT
+ tristate "Shuttle EPAT/EPEZ protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the EPAT parallel port IDE protocol.
+ EPAT is a parallel port IDE adapter manufactured by Shuttle
+ Technology and widely used in devices from major vendors such as
+ Hewlett-Packard, SyQuest, Imation and Avatar.
+
+config PATA_PARPORT_EPATC8
+ bool "Support c7/c8 chips"
+ depends on PATA_PARPORT_EPAT
+ help
+ This option enables support for the newer Shuttle EP1284 (aka c7 and
+ c8) chip. You need this if you are using any recent Imation SuperDisk
+ (LS-120) drive.
+
+config PATA_PARPORT_EPIA
+ tristate "Shuttle EPIA protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the (obsolete) EPIA parallel port
+ IDE protocol from Shuttle Technology. This adapter can still be
+ found in some no-name kits.
+
+config PATA_PARPORT_FRIQ
+ tristate "Freecom IQ ASIC-2 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for version 2 of the Freecom IQ parallel
+ port IDE adapter. This adapter is used by the Maxell Superdisk
+ drive.
+
+config PATA_PARPORT_FRPW
+ tristate "FreeCom power protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the Freecom power parallel port IDE
+ protocol.
+
+config PATA_PARPORT_KBIC
+ tristate "KingByte KBIC-951A/971A protocols"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the KBIC-951A and KBIC-971A parallel
+ port IDE protocols from KingByte Information Corp. KingByte's
+ adapters appear in many no-name portable disk and CD-ROM products,
+ especially in Europe.
+
+config PATA_PARPORT_KTTI
+ tristate "KT PHd protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the "PHd" parallel port IDE protocol
+ from KT Technology. This is a simple (low speed) adapter that is
+ used in some 2.5" portable hard drives.
+
+config PATA_PARPORT_ON20
+ tristate "OnSpec 90c20 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the (obsolete) 90c20 parallel port
+ IDE protocol from OnSpec (often marketed under the ValuStore brand
+ name).
+
+config PATA_PARPORT_ON26
+ tristate "OnSpec 90c26 protocol"
+ depends on PATA_PARPORT
+ help
+ This option enables support for the 90c26 parallel port IDE protocol
+ from OnSpec Electronics (often marketed under the ValuStore brand
+ name).
diff --git a/drivers/ata/pata_parport/Makefile b/drivers/ata/pata_parport/Makefile
new file mode 100644
index 000000000000..0932c8d55b91
--- /dev/null
+++ b/drivers/ata/pata_parport/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_PATA_PARPORT) += pata_parport.o
+
+obj-$(CONFIG_PATA_PARPORT_ATEN) += aten.o
+obj-$(CONFIG_PATA_PARPORT_BPCK) += bpck.o
+obj-$(CONFIG_PATA_PARPORT_COMM) += comm.o
+obj-$(CONFIG_PATA_PARPORT_DSTR) += dstr.o
+obj-$(CONFIG_PATA_PARPORT_KBIC) += kbic.o
+obj-$(CONFIG_PATA_PARPORT_EPAT) += epat.o
+obj-$(CONFIG_PATA_PARPORT_EPIA) += epia.o
+obj-$(CONFIG_PATA_PARPORT_FRPW) += frpw.o
+obj-$(CONFIG_PATA_PARPORT_FRIQ) += friq.o
+obj-$(CONFIG_PATA_PARPORT_FIT2) += fit2.o
+obj-$(CONFIG_PATA_PARPORT_FIT3) += fit3.o
+obj-$(CONFIG_PATA_PARPORT_ON20) += on20.o
+obj-$(CONFIG_PATA_PARPORT_ON26) += on26.o
+obj-$(CONFIG_PATA_PARPORT_KTTI) += ktti.o
+obj-$(CONFIG_PATA_PARPORT_BPCK6) += bpck6.o
diff --git a/drivers/block/paride/aten.c b/drivers/ata/pata_parport/aten.c
index 2695465568ad..b66508bedbd0 100644
--- a/drivers/block/paride/aten.c
+++ b/drivers/ata/pata_parport/aten.c
@@ -25,7 +25,7 @@
#include <linux/types.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
#define j44(a,b) ((((a>>4)&0x0f)|(b&0xf0))^0x88)
diff --git a/drivers/block/paride/bpck.c b/drivers/ata/pata_parport/bpck.c
index d880a9465e9b..5fb3cf9ba11d 100644
--- a/drivers/block/paride/bpck.c
+++ b/drivers/ata/pata_parport/bpck.c
@@ -24,7 +24,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
#undef r2
#undef w2
diff --git a/drivers/block/paride/bpck6.c b/drivers/ata/pata_parport/bpck6.c
index ec64e7f5d1ce..d897e2a28efe 100644
--- a/drivers/block/paride/bpck6.c
+++ b/drivers/ata/pata_parport/bpck6.c
@@ -31,7 +31,7 @@
#include <linux/parport.h>
#include "ppc6lnx.c"
-#include "paride.h"
+#include <linux/pata_parport.h>
/* PARAMETERS */
static bool verbose; /* set this to 1 to see debugging messages and whatnot */
diff --git a/drivers/block/paride/comm.c b/drivers/ata/pata_parport/comm.c
index 9bcd35495323..1775e7ed9336 100644
--- a/drivers/block/paride/comm.c
+++ b/drivers/ata/pata_parport/comm.c
@@ -24,7 +24,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
/* mode codes: 0 nybble reads, 8-bit writes
1 8-bit reads and writes
diff --git a/drivers/block/paride/dstr.c b/drivers/ata/pata_parport/dstr.c
index accc5c777cbb..edf414d186a6 100644
--- a/drivers/block/paride/dstr.c
+++ b/drivers/ata/pata_parport/dstr.c
@@ -23,7 +23,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
/* mode codes: 0 nybble reads, 8-bit writes
1 8-bit reads and writes
diff --git a/drivers/block/paride/epat.c b/drivers/ata/pata_parport/epat.c
index 1bcdff77322e..6ce2dee7657f 100644
--- a/drivers/block/paride/epat.c
+++ b/drivers/ata/pata_parport/epat.c
@@ -26,7 +26,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
#define j44(a,b) (((a>>4)&0x0f)+(b&0xf0))
#define j53(a,b) (((a>>3)&0x1f)+((b<<4)&0xe0))
diff --git a/drivers/block/paride/epia.c b/drivers/ata/pata_parport/epia.c
index fb0e782d055e..417d5a3c7f72 100644
--- a/drivers/block/paride/epia.c
+++ b/drivers/ata/pata_parport/epia.c
@@ -27,7 +27,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
/* mode codes: 0 nybble reads on port 1, 8-bit writes
1 5/3 reads on ports 1 & 2, 8-bit writes
diff --git a/drivers/block/paride/fit2.c b/drivers/ata/pata_parport/fit2.c
index 381283753ae4..3c7a1069b026 100644
--- a/drivers/block/paride/fit2.c
+++ b/drivers/ata/pata_parport/fit2.c
@@ -23,7 +23,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0))
diff --git a/drivers/block/paride/fit3.c b/drivers/ata/pata_parport/fit3.c
index 275d269458eb..cd95f4f0edc2 100644
--- a/drivers/block/paride/fit3.c
+++ b/drivers/ata/pata_parport/fit3.c
@@ -27,7 +27,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
#define j44(a,b) (((a>>3)&0x0f)|((b<<1)&0xf0))
diff --git a/drivers/block/paride/friq.c b/drivers/ata/pata_parport/friq.c
index 4f2ba244689b..da1d0cb016d6 100644
--- a/drivers/block/paride/friq.c
+++ b/drivers/ata/pata_parport/friq.c
@@ -35,7 +35,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
#define CMD(x) w2(4);w0(0xff);w0(0xff);w0(0x73);w0(0x73);\
w0(0xc9);w0(0xc9);w0(0x26);w0(0x26);w0(x);w0(x);
diff --git a/drivers/block/paride/frpw.c b/drivers/ata/pata_parport/frpw.c
index c3cde364603a..7bc8fa16d5d8 100644
--- a/drivers/block/paride/frpw.c
+++ b/drivers/ata/pata_parport/frpw.c
@@ -33,7 +33,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
#define cec4 w2(0xc);w2(0xe);w2(0xe);w2(0xc);w2(4);w2(4);w2(4);
#define j44(l,h) (((l>>4)&0x0f)|(h&0xf0))
diff --git a/drivers/block/paride/kbic.c b/drivers/ata/pata_parport/kbic.c
index 35999c415ee3..f0960eb68635 100644
--- a/drivers/block/paride/kbic.c
+++ b/drivers/ata/pata_parport/kbic.c
@@ -28,7 +28,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
#define r12w() (delay_p,inw(pi->port+1)&0xffff)
diff --git a/drivers/block/paride/ktti.c b/drivers/ata/pata_parport/ktti.c
index 117ab0e8ccf0..fc4f707fed1f 100644
--- a/drivers/block/paride/ktti.c
+++ b/drivers/ata/pata_parport/ktti.c
@@ -19,7 +19,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
#define j44(a,b) (((a>>4)&0x0f)|(b&0xf0))
diff --git a/drivers/block/paride/on20.c b/drivers/ata/pata_parport/on20.c
index 0173697a1a4d..995fc41e3122 100644
--- a/drivers/block/paride/on20.c
+++ b/drivers/ata/pata_parport/on20.c
@@ -22,7 +22,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
#define op(f) w2(4);w0(f);w2(5);w2(0xd);w2(5);w2(0xd);w2(5);w2(4);
#define vl(v) w2(4);w0(v);w2(5);w2(7);w2(5);w2(4);
diff --git a/drivers/block/paride/on26.c b/drivers/ata/pata_parport/on26.c
index 95ba256921f2..35f1c481a782 100644
--- a/drivers/block/paride/on26.c
+++ b/drivers/ata/pata_parport/on26.c
@@ -26,7 +26,7 @@
#include <linux/wait.h>
#include <asm/io.h>
-#include "paride.h"
+#include <linux/pata_parport.h>
/* mode codes: 0 nybble reads, 8-bit writes
1 8-bit reads and writes
diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c
new file mode 100644
index 000000000000..294a266a0dda
--- /dev/null
+++ b/drivers/ata/pata_parport/pata_parport.c
@@ -0,0 +1,761 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Ondrej Zary
+ * based on paride.c by Grant R. Guenther <grant@torque.net>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/parport.h>
+#include <linux/pata_parport.h>
+
+#define DRV_NAME "pata_parport"
+
+static DEFINE_IDR(parport_list);
+static DEFINE_IDR(protocols);
+static DEFINE_IDA(pata_parport_bus_dev_ids);
+static DEFINE_MUTEX(pi_mutex);
+
+static bool probe = true;
+module_param(probe, bool, 0644);
+MODULE_PARM_DESC(probe, "Enable automatic device probing (0=off, 1=on [default])");
+
+/*
+ * libata drivers cannot sleep so this driver claims parport before activating
+ * the ata host and keeps it claimed (and protocol connected) until the ata
+ * host is removed. Unfortunately, this means that you cannot use any chained
+ * devices (neither other pata_parport devices nor a printer).
+ */
+static void pi_connect(struct pi_adapter *pi)
+{
+ parport_claim_or_block(pi->pardev);
+ pi->proto->connect(pi);
+}
+
+static void pi_disconnect(struct pi_adapter *pi)
+{
+ pi->proto->disconnect(pi);
+ parport_release(pi->pardev);
+}
+
+static void pata_parport_dev_select(struct ata_port *ap, unsigned int device)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+ u8 tmp;
+
+ if (device == 0)
+ tmp = ATA_DEVICE_OBS;
+ else
+ tmp = ATA_DEVICE_OBS | ATA_DEV1;
+
+ pi->proto->write_regr(pi, 0, ATA_REG_DEVICE, tmp);
+ ata_sff_pause(ap);
+}
+
+static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+ u8 nsect, lbal;
+
+ pata_parport_dev_select(ap, device);
+
+ pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0x55);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0xaa);
+
+ pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0xaa);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0x55);
+
+ pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 055);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0xaa);
+
+ nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
+ lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
+
+ return (nsect == 0x55) && (lbal == 0xaa);
+}
+
+static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask,
+ unsigned long deadline)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ /* software reset. causes dev0 to be selected */
+ pi->proto->write_regr(pi, 1, 6, ap->ctl);
+ udelay(20);
+ pi->proto->write_regr(pi, 1, 6, ap->ctl | ATA_SRST);
+ udelay(20);
+ pi->proto->write_regr(pi, 1, 6, ap->ctl);
+ ap->last_ctl = ap->ctl;
+
+ /* wait the port to become ready */
+ return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
+}
+
+static int pata_parport_softreset(struct ata_link *link, unsigned int *classes,
+ unsigned long deadline)
+{
+ struct ata_port *ap = link->ap;
+ unsigned int devmask = 0;
+ int rc;
+ u8 err;
+
+ /* determine if device 0/1 are present */
+ if (pata_parport_devchk(ap, 0))
+ devmask |= (1 << 0);
+ if (pata_parport_devchk(ap, 1))
+ devmask |= (1 << 1);
+
+ /* select device 0 again */
+ pata_parport_dev_select(ap, 0);
+
+ /* issue bus reset */
+ rc = pata_parport_bus_softreset(ap, devmask, deadline);
+ if (rc && rc != -ENODEV) {
+ ata_link_err(link, "SRST failed (errno=%d)\n", rc);
+ return rc;
+ }
+
+ /* determine by signature whether we have ATA or ATAPI devices */
+ classes[0] = ata_sff_dev_classify(&link->device[0],
+ devmask & (1 << 0), &err);
+ if (err != 0x81)
+ classes[1] = ata_sff_dev_classify(&link->device[1],
+ devmask & (1 << 1), &err);
+
+ return 0;
+}
+
+static u8 pata_parport_check_status(struct ata_port *ap)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ return pi->proto->read_regr(pi, 0, ATA_REG_STATUS);
+}
+
+static u8 pata_parport_check_altstatus(struct ata_port *ap)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ return pi->proto->read_regr(pi, 1, 6);
+}
+
+static void pata_parport_tf_load(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ if (tf->ctl != ap->last_ctl) {
+ pi->proto->write_regr(pi, 1, 6, tf->ctl);
+ ap->last_ctl = tf->ctl;
+ ata_wait_idle(ap);
+ }
+
+ if (tf->flags & ATA_TFLAG_ISADDR) {
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ pi->proto->write_regr(pi, 0, ATA_REG_FEATURE,
+ tf->hob_feature);
+ pi->proto->write_regr(pi, 0, ATA_REG_NSECT,
+ tf->hob_nsect);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAL,
+ tf->hob_lbal);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAM,
+ tf->hob_lbam);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAH,
+ tf->hob_lbah);
+ }
+ pi->proto->write_regr(pi, 0, ATA_REG_FEATURE, tf->feature);
+ pi->proto->write_regr(pi, 0, ATA_REG_NSECT, tf->nsect);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAL, tf->lbal);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAM, tf->lbam);
+ pi->proto->write_regr(pi, 0, ATA_REG_LBAH, tf->lbah);
+ }
+
+ if (tf->flags & ATA_TFLAG_DEVICE)
+ pi->proto->write_regr(pi, 0, ATA_REG_DEVICE, tf->device);
+
+ ata_wait_idle(ap);
+}
+
+static void pata_parport_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ tf->status = pi->proto->read_regr(pi, 0, ATA_REG_STATUS);
+ tf->error = pi->proto->read_regr(pi, 0, ATA_REG_ERR);
+ tf->nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
+ tf->lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
+ tf->lbam = pi->proto->read_regr(pi, 0, ATA_REG_LBAM);
+ tf->lbah = pi->proto->read_regr(pi, 0, ATA_REG_LBAH);
+ tf->device = pi->proto->read_regr(pi, 0, ATA_REG_DEVICE);
+
+ if (tf->flags & ATA_TFLAG_LBA48) {
+ pi->proto->write_regr(pi, 1, 6, tf->ctl | ATA_HOB);
+ tf->hob_feature = pi->proto->read_regr(pi, 0, ATA_REG_ERR);
+ tf->hob_nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
+ tf->hob_lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
+ tf->hob_lbam = pi->proto->read_regr(pi, 0, ATA_REG_LBAM);
+ tf->hob_lbah = pi->proto->read_regr(pi, 0, ATA_REG_LBAH);
+ pi->proto->write_regr(pi, 1, 6, tf->ctl);
+ ap->last_ctl = tf->ctl;
+ }
+}
+
+static void pata_parport_exec_command(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ struct pi_adapter *pi = ap->host->private_data;
+
+ pi->proto->write_regr(pi, 0, ATA_REG_CMD, tf->command);
+ ata_sff_pause(ap);
+}
+
+static unsigned int pata_parport_data_xfer(struct ata_queued_cmd *qc,
+ unsigned char *buf, unsigned int buflen, int rw)
+{
+ struct ata_port *ap = qc->dev->link->ap;
+ struct pi_adapter *pi = ap->host->private_data;
+
+ if (rw == READ)
+ pi->proto->read_block(pi, buf, buflen);
+ else
+ pi->proto->write_block(pi, buf, buflen);
+
+ return buflen;
+}
+
+static void pata_parport_drain_fifo(struct ata_queued_cmd *qc)
+{
+ int count;
+ struct ata_port *ap;
+ struct pi_adapter *pi;
+ char junk[2];
+
+ /* We only need to flush incoming data when a command was running */
+ if (qc == NULL || qc->dma_dir == DMA_TO_DEVICE)
+ return;
+
+ ap = qc->ap;
+ pi = ap->host->private_data;
+ /* Drain up to 64K of data before we give up this recovery method */
+ for (count = 0; (pata_parport_check_status(ap) & ATA_DRQ)
+ && count < 65536; count += 2) {
+ pi->proto->read_block(pi, junk, 2);
+ }
+
+ if (count)
+ ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
+}
+
+static struct ata_port_operations pata_parport_port_ops = {
+ .inherits = &ata_sff_port_ops,
+
+ .softreset = pata_parport_softreset,
+ .hardreset = NULL,
+
+ .sff_dev_select = pata_parport_dev_select,
+ .sff_check_status = pata_parport_check_status,
+ .sff_check_altstatus = pata_parport_check_altstatus,
+ .sff_tf_load = pata_parport_tf_load,
+ .sff_tf_read = pata_parport_tf_read,
+ .sff_exec_command = pata_parport_exec_command,
+ .sff_data_xfer = pata_parport_data_xfer,
+ .sff_drain_fifo = pata_parport_drain_fifo,
+};
+
+static const struct ata_port_info pata_parport_port_info = {
+ .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_PIO_POLLING,
+ .pio_mask = ATA_PIO0,
+ /* No DMA */
+ .port_ops = &pata_parport_port_ops,
+};
+
+static void pi_release(struct pi_adapter *pi)
+{
+ parport_unregister_device(pi->pardev);
+ if (pi->proto->release_proto)
+ pi->proto->release_proto(pi);
+ module_put(pi->proto->owner);
+}
+
+static int default_test_proto(struct pi_adapter *pi, char *scratch)
+{
+ int j, k;
+ int e[2] = { 0, 0 };
+
+ pi->proto->connect(pi);
+
+ for (j = 0; j < 2; j++) {
+ pi->proto->write_regr(pi, 0, 6, 0xa0 + j * 0x10);
+ for (k = 0; k < 256; k++) {
+ pi->proto->write_regr(pi, 0, 2, k ^ 0xaa);
+ pi->proto->write_regr(pi, 0, 3, k ^ 0x55);
+ if (pi->proto->read_regr(pi, 0, 2) != (k ^ 0xaa))
+ e[j]++;
+ }
+ }
+ pi->proto->disconnect(pi);
+
+ dev_dbg(&pi->dev, "%s: port 0x%x, mode %d, test=(%d,%d)\n",
+ pi->proto->name, pi->port, pi->mode, e[0], e[1]);
+
+ return e[0] && e[1]; /* not here if both > 0 */
+}
+
+static int pi_test_proto(struct pi_adapter *pi, char *scratch)
+{
+ int res;
+
+ parport_claim_or_block(pi->pardev);
+ if (pi->proto->test_proto)
+ res = pi->proto->test_proto(pi, scratch, 1);
+ else
+ res = default_test_proto(pi, scratch);
+ parport_release(pi->pardev);
+
+ return res;
+}
+
+static bool pi_probe_mode(struct pi_adapter *pi, int max, char *scratch)
+{
+ int best, range;
+
+ if (pi->mode != -1) {
+ if (pi->mode >= max)
+ return false;
+ range = 3;
+ if (pi->mode >= pi->proto->epp_first)
+ range = 8;
+ if (range == 8 && pi->port % 8)
+ return false;
+ return !pi_test_proto(pi, scratch);
+ }
+ best = -1;
+ for (pi->mode = 0; pi->mode < max; pi->mode++) {
+ range = 3;
+ if (pi->mode >= pi->proto->epp_first)
+ range = 8;
+ if (range == 8 && pi->port % 8)
+ break;
+ if (!pi_test_proto(pi, scratch))
+ best = pi->mode;
+ }
+ pi->mode = best;
+ return best > -1;
+}
+
+static bool pi_probe_unit(struct pi_adapter *pi, int unit, char *scratch)
+{
+ int max, s, e;
+
+ s = unit;
+ e = s + 1;
+
+ if (s == -1) {
+ s = 0;
+ e = pi->proto->max_units;
+ }
+
+ if (pi->proto->test_port) {
+ parport_claim_or_block(pi->pardev);
+ max = pi->proto->test_port(pi);
+ parport_release(pi->pardev);
+ } else {
+ max = pi->proto->max_mode;
+ }
+
+ if (pi->proto->probe_unit) {
+ parport_claim_or_block(pi->pardev);
+ for (pi->unit = s; pi->unit < e; pi->unit++) {
+ if (pi->proto->probe_unit(pi)) {
+ parport_release(pi->pardev);
+ return pi_probe_mode(pi, max, scratch);
+ }
+ }
+ parport_release(pi->pardev);
+ return false;
+ }
+
+ return pi_probe_mode(pi, max, scratch);
+}
+
+static void pata_parport_dev_release(struct device *dev)
+{
+ struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev);
+
+ kfree(pi);
+}
+
+static void pata_parport_bus_release(struct device *dev)
+{
+ /* nothing to do here but required to avoid warning on device removal */
+}
+
+static struct bus_type pata_parport_bus_type = {
+ .name = DRV_NAME,
+};
+
+static struct device pata_parport_bus = {
+ .init_name = DRV_NAME,
+ .release = pata_parport_bus_release,
+};
+
+static struct scsi_host_template pata_parport_sht = {
+ PATA_PARPORT_SHT("pata_parport")
+};
+
+struct pi_device_match {
+ struct parport *parport;
+ struct pi_protocol *proto;
+};
+
+static int pi_find_dev(struct device *dev, void *data)
+{
+ struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev);
+ struct pi_device_match *match = data;
+
+ return pi->pardev->port == match->parport && pi->proto == match->proto;
+}
+
+static struct pi_adapter *pi_init_one(struct parport *parport,
+ struct pi_protocol *pr, int mode, int unit, int delay)
+{
+ struct pardev_cb par_cb = { };
+ char scratch[512];
+ const struct ata_port_info *ppi[] = { &pata_parport_port_info };
+ struct ata_host *host;
+ struct pi_adapter *pi;
+ struct pi_device_match match = { .parport = parport, .proto = pr };
+ int id;
+
+ /*
+ * Abort if there's a device already registered on the same parport
+ * using the same protocol.
+ */
+ if (bus_for_each_dev(&pata_parport_bus_type, NULL, &match, pi_find_dev))
+ return NULL;
+
+ pi = kzalloc(sizeof(struct pi_adapter), GFP_KERNEL);
+ if (!pi)
+ return NULL;
+
+ /* set up pi->dev before pi_probe_unit() so it can use dev_printk() */
+ pi->dev.parent = &pata_parport_bus;
+ pi->dev.bus = &pata_parport_bus_type;
+ pi->dev.driver = &pr->driver;
+ pi->dev.release = pata_parport_dev_release;
+ id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL);
+ if (id < 0)
+ return NULL; /* pata_parport_dev_release will do kfree(pi) */
+ pi->dev.id = id;
+ dev_set_name(&pi->dev, "pata_parport.%u", pi->dev.id);
+ if (device_register(&pi->dev)) {
+ put_device(&pi->dev);
+ goto out_ida_free;
+ }
+
+ pi->proto = pr;
+
+ if (!try_module_get(pi->proto->owner))
+ goto out_unreg_dev;
+ if (pi->proto->init_proto && pi->proto->init_proto(pi) < 0)
+ goto out_module_put;
+
+ pi->delay = (delay == -1) ? pi->proto->default_delay : delay;
+ pi->mode = mode;
+ pi->port = parport->base;
+
+ par_cb.private = pi;
+ pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb,
+ pi->dev.id);
+ if (!pi->pardev)
+ goto out_module_put;
+
+ if (!pi_probe_unit(pi, unit, scratch)) {
+ dev_info(&pi->dev, "Adapter not found\n");
+ goto out_unreg_parport;
+ }
+
+ pi->proto->log_adapter(pi, scratch, 1);
+
+ host = ata_host_alloc_pinfo(&pi->pardev->dev, ppi, 1);
+ if (!host)
+ goto out_unreg_parport;
+ dev_set_drvdata(&pi->dev, host);
+ host->private_data = pi;
+
+ ata_port_desc(host->ports[0], "port %s", pi->pardev->port->name);
+ ata_port_desc(host->ports[0], "protocol %s", pi->proto->name);
+
+ pi_connect(pi);
+ if (ata_host_activate(host, 0, NULL, 0, &pata_parport_sht))
+ goto out_unreg_parport;
+
+ return pi;
+
+out_unreg_parport:
+ pi_disconnect(pi);
+ parport_unregister_device(pi->pardev);
+ if (pi->proto->release_proto)
+ pi->proto->release_proto(pi);
+out_module_put:
+ module_put(pi->proto->owner);
+out_unreg_dev:
+ device_unregister(&pi->dev);
+out_ida_free:
+ ida_free(&pata_parport_bus_dev_ids, pi->dev.id);
+ return NULL;
+}
+
+int pata_parport_register_driver(struct pi_protocol *pr)
+{
+ int error;
+ struct parport *parport;
+ int port_num;
+
+ pr->driver.bus = &pata_parport_bus_type;
+ pr->driver.name = pr->name;
+ error = driver_register(&pr->driver);
+ if (error)
+ return error;
+
+ mutex_lock(&pi_mutex);
+ error = idr_alloc(&protocols, pr, 0, 0, GFP_KERNEL);
+ if (error < 0) {
+ driver_unregister(&pr->driver);
+ mutex_unlock(&pi_mutex);
+ return error;
+ }
+
+ pr_info("pata_parport: protocol %s registered\n", pr->name);
+
+ if (probe) {
+ /* probe all parports using this protocol */
+ idr_for_each_entry(&parport_list, parport, port_num)
+ pi_init_one(parport, pr, -1, 0, -1);
+ }
+ mutex_unlock(&pi_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pata_parport_register_driver);
+
+void pata_parport_unregister_driver(struct pi_protocol *pr)
+{
+ struct pi_protocol *pr_iter;
+ int id = -1;
+
+ mutex_lock(&pi_mutex);
+ idr_for_each_entry(&protocols, pr_iter, id) {
+ if (pr_iter == pr)
+ break;
+ }
+ idr_remove(&protocols, id);
+ mutex_unlock(&pi_mutex);
+ driver_unregister(&pr->driver);
+}
+EXPORT_SYMBOL_GPL(pata_parport_unregister_driver);
+
+static ssize_t new_device_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ char port[12] = "auto";
+ char protocol[8] = "auto";
+ int mode = -1, unit = -1, delay = -1;
+ struct pi_protocol *pr, *pr_wanted;
+ struct device_driver *drv;
+ struct parport *parport;
+ int port_num, port_wanted, pr_num;
+ bool ok = false;
+
+ if (sscanf(buf, "%11s %7s %d %d %d",
+ port, protocol, &mode, &unit, &delay) < 1)
+ return -EINVAL;
+
+ if (sscanf(port, "parport%u", &port_wanted) < 1) {
+ if (strcmp(port, "auto")) {
+ pr_err("invalid port name %s\n", port);
+ return -EINVAL;
+ }
+ port_wanted = -1;
+ }
+
+ drv = driver_find(protocol, &pata_parport_bus_type);
+ if (!drv) {
+ if (strcmp(protocol, "auto")) {
+ pr_err("protocol %s not found\n", protocol);
+ return -EINVAL;
+ }
+ pr_wanted = NULL;
+ } else {
+ pr_wanted = container_of(drv, struct pi_protocol, driver);
+ }
+
+ mutex_lock(&pi_mutex);
+ /* walk all parports */
+ idr_for_each_entry(&parport_list, parport, port_num) {
+ if (port_num == port_wanted || port_wanted == -1) {
+ parport = parport_find_number(port_num);
+ if (!parport) {
+ pr_err("no such port %s\n", port);
+ mutex_unlock(&pi_mutex);
+ return -ENODEV;
+ }
+ /* walk all protocols */
+ idr_for_each_entry(&protocols, pr, pr_num) {
+ if (pr == pr_wanted || !pr_wanted)
+ if (pi_init_one(parport, pr, mode, unit,
+ delay))
+ ok = true;
+ }
+ parport_put_port(parport);
+ }
+ }
+ mutex_unlock(&pi_mutex);
+ if (!ok)
+ return -ENODEV;
+
+ return count;
+}
+static BUS_ATTR_WO(new_device);
+
+static void pi_remove_one(struct device *dev)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct pi_adapter *pi = host->private_data;
+
+ ata_host_detach(host);
+ pi_disconnect(pi);
+ pi_release(pi);
+ device_unregister(dev);
+ ida_free(&pata_parport_bus_dev_ids, dev->id);
+ /* pata_parport_dev_release will do kfree(pi) */
+}
+
+static ssize_t delete_device_store(struct bus_type *bus, const char *buf,
+ size_t count)
+{
+ struct device *dev;
+
+ mutex_lock(&pi_mutex);
+ dev = bus_find_device_by_name(bus, NULL, buf);
+ if (!dev) {
+ mutex_unlock(&pi_mutex);
+ return -ENODEV;
+ }
+
+ pi_remove_one(dev);
+ mutex_unlock(&pi_mutex);
+
+ return count;
+}
+static BUS_ATTR_WO(delete_device);
+
+static void pata_parport_attach(struct parport *port)
+{
+ struct pi_protocol *pr;
+ int pr_num, id;
+
+ mutex_lock(&pi_mutex);
+ id = idr_alloc(&parport_list, port, port->number, port->number,
+ GFP_KERNEL);
+ if (id < 0) {
+ mutex_unlock(&pi_mutex);
+ return;
+ }
+
+ if (probe) {
+ /* probe this port using all protocols */
+ idr_for_each_entry(&protocols, pr, pr_num)
+ pi_init_one(port, pr, -1, 0, -1);
+ }
+ mutex_unlock(&pi_mutex);
+}
+
+static int pi_remove_port(struct device *dev, void *p)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct pi_adapter *pi = host->private_data;
+
+ if (pi->pardev->port == p)
+ pi_remove_one(dev);
+
+ return 0;
+}
+
+static void pata_parport_detach(struct parport *port)
+{
+ mutex_lock(&pi_mutex);
+ bus_for_each_dev(&pata_parport_bus_type, NULL, port, pi_remove_port);
+ idr_remove(&parport_list, port->number);
+ mutex_unlock(&pi_mutex);
+}
+
+static struct parport_driver pata_parport_driver = {
+ .name = DRV_NAME,
+ .match_port = pata_parport_attach,
+ .detach = pata_parport_detach,
+ .devmodel = true,
+};
+
+static __init int pata_parport_init(void)
+{
+ int error;
+
+ error = bus_register(&pata_parport_bus_type);
+ if (error) {
+ pr_err("failed to register pata_parport bus, error: %d\n", error);
+ return error;
+ }
+
+ error = device_register(&pata_parport_bus);
+ if (error) {
+ pr_err("failed to register pata_parport bus, error: %d\n", error);
+ goto out_unregister_bus;
+ }
+
+ error = bus_create_file(&pata_parport_bus_type, &bus_attr_new_device);
+ if (error) {
+ pr_err("unable to create sysfs file, error: %d\n", error);
+ goto out_unregister_dev;
+ }
+
+ error = bus_create_file(&pata_parport_bus_type, &bus_attr_delete_device);
+ if (error) {
+ pr_err("unable to create sysfs file, error: %d\n", error);
+ goto out_remove_new;
+ }
+
+ error = parport_register_driver(&pata_parport_driver);
+ if (error) {
+ pr_err("unable to register parport driver, error: %d\n", error);
+ goto out_remove_del;
+ }
+
+ return 0;
+
+out_remove_del:
+ bus_remove_file(&pata_parport_bus_type, &bus_attr_delete_device);
+out_remove_new:
+ bus_remove_file(&pata_parport_bus_type, &bus_attr_new_device);
+out_unregister_dev:
+ device_unregister(&pata_parport_bus);
+out_unregister_bus:
+ bus_unregister(&pata_parport_bus_type);
+ return error;
+}
+
+static __exit void pata_parport_exit(void)
+{
+ parport_unregister_driver(&pata_parport_driver);
+ bus_remove_file(&pata_parport_bus_type, &bus_attr_new_device);
+ bus_remove_file(&pata_parport_bus_type, &bus_attr_delete_device);
+ device_unregister(&pata_parport_bus);
+ bus_unregister(&pata_parport_bus_type);
+}
+
+MODULE_AUTHOR("Ondrej Zary");
+MODULE_DESCRIPTION("driver for parallel port ATA adapters");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("paride");
+
+module_init(pata_parport_init);
+module_exit(pata_parport_exit);
diff --git a/drivers/block/paride/ppc6lnx.c b/drivers/ata/pata_parport/ppc6lnx.c
index 5e5521d3b1dd..5e5521d3b1dd 100644
--- a/drivers/block/paride/ppc6lnx.c
+++ b/drivers/ata/pata_parport/ppc6lnx.c
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
deleted file mode 100644
index aba1536ddd44..000000000000
--- a/drivers/ata/pata_samsung_cf.c
+++ /dev/null
@@ -1,662 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com
- *
- * PATA driver for Samsung SoCs.
- * Supports CF Interface in True IDE mode. Currently only PIO mode has been
- * implemented; UDMA support has to be added.
- *
- * Based on:
- * PATA driver for AT91SAM9260 Static Memory Controller
- * PATA driver for Toshiba SCC controller
-*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/init.h>
-#include <linux/clk.h>
-#include <linux/libata.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <linux/platform_data/ata-samsung_cf.h>
-
-#define DRV_NAME "pata_samsung_cf"
-#define DRV_VERSION "0.1"
-
-#define S3C_CFATA_REG(x) (x)
-#define S3C_CFATA_MUX S3C_CFATA_REG(0x0)
-#define S3C_ATA_CTRL S3C_CFATA_REG(0x0)
-#define S3C_ATA_CMD S3C_CFATA_REG(0x8)
-#define S3C_ATA_IRQ S3C_CFATA_REG(0x10)
-#define S3C_ATA_IRQ_MSK S3C_CFATA_REG(0x14)
-#define S3C_ATA_CFG S3C_CFATA_REG(0x18)
-
-#define S3C_ATA_PIO_TIME S3C_CFATA_REG(0x2c)
-#define S3C_ATA_PIO_DTR S3C_CFATA_REG(0x54)
-#define S3C_ATA_PIO_FED S3C_CFATA_REG(0x58)
-#define S3C_ATA_PIO_SCR S3C_CFATA_REG(0x5c)
-#define S3C_ATA_PIO_LLR S3C_CFATA_REG(0x60)
-#define S3C_ATA_PIO_LMR S3C_CFATA_REG(0x64)
-#define S3C_ATA_PIO_LHR S3C_CFATA_REG(0x68)
-#define S3C_ATA_PIO_DVR S3C_CFATA_REG(0x6c)
-#define S3C_ATA_PIO_CSD S3C_CFATA_REG(0x70)
-#define S3C_ATA_PIO_DAD S3C_CFATA_REG(0x74)
-#define S3C_ATA_PIO_RDATA S3C_CFATA_REG(0x7c)
-
-#define S3C_CFATA_MUX_TRUEIDE 0x01
-#define S3C_ATA_CFG_SWAP 0x40
-#define S3C_ATA_CFG_IORDYEN 0x02
-
-enum s3c_cpu_type {
- TYPE_S3C64XX,
- TYPE_S5PV210,
-};
-
-/*
- * struct s3c_ide_info - S3C PATA instance.
- * @clk: The clock resource for this controller.
- * @ide_addr: The area mapped for the hardware registers.
- * @sfr_addr: The area mapped for the special function registers.
- * @irq: The IRQ number we are using.
- * @cpu_type: The exact type of this controller.
- * @fifo_status_reg: The ATA_FIFO_STATUS register offset.
- */
-struct s3c_ide_info {
- struct clk *clk;
- void __iomem *ide_addr;
- void __iomem *sfr_addr;
- int irq;
- enum s3c_cpu_type cpu_type;
- unsigned int fifo_status_reg;
-};
-
-static void pata_s3c_set_endian(void __iomem *s3c_ide_regbase, u8 mode)
-{
- u32 reg = readl(s3c_ide_regbase + S3C_ATA_CFG);
- reg = mode ? (reg & ~S3C_ATA_CFG_SWAP) : (reg | S3C_ATA_CFG_SWAP);
- writel(reg, s3c_ide_regbase + S3C_ATA_CFG);
-}
-
-static void pata_s3c_cfg_mode(void __iomem *s3c_ide_sfrbase)
-{
- /* Select true-ide as the internal operating mode */
- writel(readl(s3c_ide_sfrbase + S3C_CFATA_MUX) | S3C_CFATA_MUX_TRUEIDE,
- s3c_ide_sfrbase + S3C_CFATA_MUX);
-}
-
-static unsigned long
-pata_s3c_setup_timing(struct s3c_ide_info *info, const struct ata_timing *ata)
-{
- int t1 = ata->setup;
- int t2 = ata->act8b;
- int t2i = ata->rec8b;
- ulong piotime;
-
- piotime = ((t2i & 0xff) << 12) | ((t2 & 0xff) << 4) | (t1 & 0xf);
-
- return piotime;
-}
-
-static void pata_s3c_set_piomode(struct ata_port *ap, struct ata_device *adev)
-{
- struct s3c_ide_info *info = ap->host->private_data;
- struct ata_timing timing;
- int cycle_time;
- ulong ata_cfg = readl(info->ide_addr + S3C_ATA_CFG);
- ulong piotime;
-
- /* Enables IORDY if mode requires it */
- if (ata_pio_need_iordy(adev))
- ata_cfg |= S3C_ATA_CFG_IORDYEN;
- else
- ata_cfg &= ~S3C_ATA_CFG_IORDYEN;
-
- cycle_time = (int)(1000000000UL / clk_get_rate(info->clk));
-
- ata_timing_compute(adev, adev->pio_mode, &timing,
- cycle_time * 1000, 0);
-
- piotime = pata_s3c_setup_timing(info, &timing);
-
- writel(ata_cfg, info->ide_addr + S3C_ATA_CFG);
- writel(piotime, info->ide_addr + S3C_ATA_PIO_TIME);
-}
-
-/*
- * Waits until the IDE controller is able to perform next read/write
- * operation to the disk. Needed for 64XX series boards only.
- */
-static int wait_for_host_ready(struct s3c_ide_info *info)
-{
- ulong timeout;
- void __iomem *fifo_reg = info->ide_addr + info->fifo_status_reg;
-
- /* wait for maximum of 20 msec */
- timeout = jiffies + msecs_to_jiffies(20);
- while (time_before(jiffies, timeout)) {
- if ((readl(fifo_reg) >> 28) == 0)
- return 0;
- }
- return -EBUSY;
-}
-
-/*
- * Writes to one of the task file registers.
- */
-static void ata_outb(struct ata_host *host, u8 addr, void __iomem *reg)
-{
- struct s3c_ide_info *info = host->private_data;
-
- wait_for_host_ready(info);
- writeb(addr, reg);
-}
-
-/*
- * Reads from one of the task file registers.
- */
-static u8 ata_inb(struct ata_host *host, void __iomem *reg)
-{
- struct s3c_ide_info *info = host->private_data;
- u8 temp;
-
- wait_for_host_ready(info);
- (void) readb(reg);
- wait_for_host_ready(info);
- temp = readb(info->ide_addr + S3C_ATA_PIO_RDATA);
- return temp;
-}
-
-/*
- * pata_s3c_tf_load - send taskfile registers to host controller
- */
-static void pata_s3c_tf_load(struct ata_port *ap,
- const struct ata_taskfile *tf)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
- unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
-
- if (tf->ctl != ap->last_ctl) {
- ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
- ap->last_ctl = tf->ctl;
- ata_wait_idle(ap);
- }
-
- if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
- ata_outb(ap->host, tf->hob_feature, ioaddr->feature_addr);
- ata_outb(ap->host, tf->hob_nsect, ioaddr->nsect_addr);
- ata_outb(ap->host, tf->hob_lbal, ioaddr->lbal_addr);
- ata_outb(ap->host, tf->hob_lbam, ioaddr->lbam_addr);
- ata_outb(ap->host, tf->hob_lbah, ioaddr->lbah_addr);
- }
-
- if (is_addr) {
- ata_outb(ap->host, tf->feature, ioaddr->feature_addr);
- ata_outb(ap->host, tf->nsect, ioaddr->nsect_addr);
- ata_outb(ap->host, tf->lbal, ioaddr->lbal_addr);
- ata_outb(ap->host, tf->lbam, ioaddr->lbam_addr);
- ata_outb(ap->host, tf->lbah, ioaddr->lbah_addr);
- }
-
- if (tf->flags & ATA_TFLAG_DEVICE)
- ata_outb(ap->host, tf->device, ioaddr->device_addr);
-
- ata_wait_idle(ap);
-}
-
-/*
- * pata_s3c_tf_read - input device's ATA taskfile shadow registers
- */
-static void pata_s3c_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
- tf->error = ata_inb(ap->host, ioaddr->error_addr);
- tf->nsect = ata_inb(ap->host, ioaddr->nsect_addr);
- tf->lbal = ata_inb(ap->host, ioaddr->lbal_addr);
- tf->lbam = ata_inb(ap->host, ioaddr->lbam_addr);
- tf->lbah = ata_inb(ap->host, ioaddr->lbah_addr);
- tf->device = ata_inb(ap->host, ioaddr->device_addr);
-
- if (tf->flags & ATA_TFLAG_LBA48) {
- ata_outb(ap->host, tf->ctl | ATA_HOB, ioaddr->ctl_addr);
- tf->hob_feature = ata_inb(ap->host, ioaddr->error_addr);
- tf->hob_nsect = ata_inb(ap->host, ioaddr->nsect_addr);
- tf->hob_lbal = ata_inb(ap->host, ioaddr->lbal_addr);
- tf->hob_lbam = ata_inb(ap->host, ioaddr->lbam_addr);
- tf->hob_lbah = ata_inb(ap->host, ioaddr->lbah_addr);
- ata_outb(ap->host, tf->ctl, ioaddr->ctl_addr);
- ap->last_ctl = tf->ctl;
- }
-}
-
-/*
- * pata_s3c_exec_command - issue ATA command to host controller
- */
-static void pata_s3c_exec_command(struct ata_port *ap,
- const struct ata_taskfile *tf)
-{
- ata_outb(ap->host, tf->command, ap->ioaddr.command_addr);
- ata_sff_pause(ap);
-}
-
-/*
- * pata_s3c_check_status - Read device status register
- */
-static u8 pata_s3c_check_status(struct ata_port *ap)
-{
- return ata_inb(ap->host, ap->ioaddr.status_addr);
-}
-
-/*
- * pata_s3c_check_altstatus - Read alternate device status register
- */
-static u8 pata_s3c_check_altstatus(struct ata_port *ap)
-{
- return ata_inb(ap->host, ap->ioaddr.altstatus_addr);
-}
-
-/*
- * pata_s3c_data_xfer - Transfer data by PIO
- */
-static unsigned int pata_s3c_data_xfer(struct ata_queued_cmd *qc,
- unsigned char *buf, unsigned int buflen, int rw)
-{
- struct ata_port *ap = qc->dev->link->ap;
- struct s3c_ide_info *info = ap->host->private_data;
- void __iomem *data_addr = ap->ioaddr.data_addr;
- unsigned int words = buflen >> 1, i;
- u16 *data_ptr = (u16 *)buf;
-
- /* Requires wait same as in ata_inb/ata_outb */
- if (rw == READ)
- for (i = 0; i < words; i++, data_ptr++) {
- wait_for_host_ready(info);
- (void) readw(data_addr);
- wait_for_host_ready(info);
- *data_ptr = readw(info->ide_addr
- + S3C_ATA_PIO_RDATA);
- }
- else
- for (i = 0; i < words; i++, data_ptr++) {
- wait_for_host_ready(info);
- writew(*data_ptr, data_addr);
- }
-
- if (buflen & 0x01)
- dev_err(ap->dev, "unexpected trailing data\n");
-
- return words << 1;
-}
-
-/*
- * pata_s3c_dev_select - Select device on ATA bus
- */
-static void pata_s3c_dev_select(struct ata_port *ap, unsigned int device)
-{
- u8 tmp = ATA_DEVICE_OBS;
-
- if (device != 0)
- tmp |= ATA_DEV1;
-
- ata_outb(ap->host, tmp, ap->ioaddr.device_addr);
- ata_sff_pause(ap);
-}
-
-/*
- * pata_s3c_devchk - PATA device presence detection
- */
-static bool pata_s3c_devchk(struct ata_port *ap, unsigned int device)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
- u8 nsect, lbal;
-
- pata_s3c_dev_select(ap, device);
-
- ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
- ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
-
- ata_outb(ap->host, 0xaa, ioaddr->nsect_addr);
- ata_outb(ap->host, 0x55, ioaddr->lbal_addr);
-
- ata_outb(ap->host, 0x55, ioaddr->nsect_addr);
- ata_outb(ap->host, 0xaa, ioaddr->lbal_addr);
-
- nsect = ata_inb(ap->host, ioaddr->nsect_addr);
- lbal = ata_inb(ap->host, ioaddr->lbal_addr);
-
- if ((nsect == 0x55) && (lbal == 0xaa))
- return true; /* we found a device */
-
- return false; /* nothing found */
-}
-
-/*
- * pata_s3c_wait_after_reset - wait for devices to become ready after reset
- */
-static int pata_s3c_wait_after_reset(struct ata_link *link,
- unsigned long deadline)
-{
- int rc;
-
- ata_msleep(link->ap, ATA_WAIT_AFTER_RESET);
-
- /* always check readiness of the master device */
- rc = ata_sff_wait_ready(link, deadline);
- /* -ENODEV means the odd clown forgot the D7 pulldown resistor
- * and TF status is 0xff, bail out on it too.
- */
- if (rc)
- return rc;
-
- return 0;
-}
-
-/*
- * pata_s3c_bus_softreset - PATA device software reset
- */
-static int pata_s3c_bus_softreset(struct ata_port *ap,
- unsigned long deadline)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
- /* software reset. causes dev0 to be selected */
- ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
- udelay(20);
- ata_outb(ap->host, ap->ctl | ATA_SRST, ioaddr->ctl_addr);
- udelay(20);
- ata_outb(ap->host, ap->ctl, ioaddr->ctl_addr);
- ap->last_ctl = ap->ctl;
-
- return pata_s3c_wait_after_reset(&ap->link, deadline);
-}
-
-/*
- * pata_s3c_softreset - reset host port via ATA SRST
- */
-static int pata_s3c_softreset(struct ata_link *link, unsigned int *classes,
- unsigned long deadline)
-{
- struct ata_port *ap = link->ap;
- unsigned int devmask = 0;
- int rc;
- u8 err;
-
- /* determine if device 0 is present */
- if (pata_s3c_devchk(ap, 0))
- devmask |= (1 << 0);
-
- /* select device 0 again */
- pata_s3c_dev_select(ap, 0);
-
- /* issue bus reset */
- rc = pata_s3c_bus_softreset(ap, deadline);
- /* if link is occupied, -ENODEV too is an error */
- if (rc && rc != -ENODEV) {
- ata_link_err(link, "SRST failed (errno=%d)\n", rc);
- return rc;
- }
-
- /* determine by signature whether we have ATA or ATAPI devices */
- classes[0] = ata_sff_dev_classify(&ap->link.device[0],
- devmask & (1 << 0), &err);
-
- return 0;
-}
-
-/*
- * pata_s3c_set_devctl - Write device control register
- */
-static void pata_s3c_set_devctl(struct ata_port *ap, u8 ctl)
-{
- ata_outb(ap->host, ctl, ap->ioaddr.ctl_addr);
-}
-
-static struct scsi_host_template pata_s3c_sht = {
- ATA_PIO_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations pata_s3c_port_ops = {
- .inherits = &ata_sff_port_ops,
- .sff_check_status = pata_s3c_check_status,
- .sff_check_altstatus = pata_s3c_check_altstatus,
- .sff_tf_load = pata_s3c_tf_load,
- .sff_tf_read = pata_s3c_tf_read,
- .sff_data_xfer = pata_s3c_data_xfer,
- .sff_exec_command = pata_s3c_exec_command,
- .sff_dev_select = pata_s3c_dev_select,
- .sff_set_devctl = pata_s3c_set_devctl,
- .softreset = pata_s3c_softreset,
- .set_piomode = pata_s3c_set_piomode,
-};
-
-static struct ata_port_operations pata_s5p_port_ops = {
- .inherits = &ata_sff_port_ops,
- .set_piomode = pata_s3c_set_piomode,
-};
-
-static void pata_s3c_enable(void __iomem *s3c_ide_regbase, bool state)
-{
- u32 temp = readl(s3c_ide_regbase + S3C_ATA_CTRL);
- temp = state ? (temp | 1) : (temp & ~1);
- writel(temp, s3c_ide_regbase + S3C_ATA_CTRL);
-}
-
-static irqreturn_t pata_s3c_irq(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- struct s3c_ide_info *info = host->private_data;
- u32 reg;
-
- reg = readl(info->ide_addr + S3C_ATA_IRQ);
- writel(reg, info->ide_addr + S3C_ATA_IRQ);
-
- return ata_sff_interrupt(irq, dev_instance);
-}
-
-static void pata_s3c_hwinit(struct s3c_ide_info *info,
- struct s3c_ide_platdata *pdata)
-{
- switch (info->cpu_type) {
- case TYPE_S3C64XX:
- /* Configure as big endian */
- pata_s3c_cfg_mode(info->sfr_addr);
- pata_s3c_set_endian(info->ide_addr, 1);
- pata_s3c_enable(info->ide_addr, true);
- msleep(100);
-
- /* Remove IRQ Status */
- writel(0x1f, info->ide_addr + S3C_ATA_IRQ);
- writel(0x1b, info->ide_addr + S3C_ATA_IRQ_MSK);
- break;
-
- case TYPE_S5PV210:
- /* Configure as little endian */
- pata_s3c_set_endian(info->ide_addr, 0);
- pata_s3c_enable(info->ide_addr, true);
- msleep(100);
-
- /* Remove IRQ Status */
- writel(0x3f, info->ide_addr + S3C_ATA_IRQ);
- writel(0x3f, info->ide_addr + S3C_ATA_IRQ_MSK);
- break;
-
- default:
- BUG();
- }
-}
-
-static int __init pata_s3c_probe(struct platform_device *pdev)
-{
- struct s3c_ide_platdata *pdata = dev_get_platdata(&pdev->dev);
- struct device *dev = &pdev->dev;
- struct s3c_ide_info *info;
- struct resource *res;
- struct ata_port *ap;
- struct ata_host *host;
- enum s3c_cpu_type cpu_type;
- int ret;
-
- cpu_type = platform_get_device_id(pdev)->driver_data;
-
- info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->irq = platform_get_irq(pdev, 0);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- info->ide_addr = devm_ioremap_resource(dev, res);
- if (IS_ERR(info->ide_addr))
- return PTR_ERR(info->ide_addr);
-
- info->clk = devm_clk_get(&pdev->dev, "cfcon");
- if (IS_ERR(info->clk)) {
- dev_err(dev, "failed to get access to cf controller clock\n");
- ret = PTR_ERR(info->clk);
- info->clk = NULL;
- return ret;
- }
-
- clk_enable(info->clk);
-
- /* init ata host */
- host = ata_host_alloc(dev, 1);
- if (!host) {
- dev_err(dev, "failed to allocate ide host\n");
- ret = -ENOMEM;
- goto stop_clk;
- }
-
- ap = host->ports[0];
- ap->pio_mask = ATA_PIO4;
-
- if (cpu_type == TYPE_S3C64XX) {
- ap->ops = &pata_s3c_port_ops;
- info->sfr_addr = info->ide_addr + 0x1800;
- info->ide_addr += 0x1900;
- info->fifo_status_reg = 0x94;
- } else {
- ap->ops = &pata_s5p_port_ops;
- info->fifo_status_reg = 0x84;
- }
-
- info->cpu_type = cpu_type;
-
- if (info->irq <= 0) {
- ap->flags |= ATA_FLAG_PIO_POLLING;
- info->irq = 0;
- ata_port_desc(ap, "no IRQ, using PIO polling\n");
- }
-
- ap->ioaddr.cmd_addr = info->ide_addr + S3C_ATA_CMD;
- ap->ioaddr.data_addr = info->ide_addr + S3C_ATA_PIO_DTR;
- ap->ioaddr.error_addr = info->ide_addr + S3C_ATA_PIO_FED;
- ap->ioaddr.feature_addr = info->ide_addr + S3C_ATA_PIO_FED;
- ap->ioaddr.nsect_addr = info->ide_addr + S3C_ATA_PIO_SCR;
- ap->ioaddr.lbal_addr = info->ide_addr + S3C_ATA_PIO_LLR;
- ap->ioaddr.lbam_addr = info->ide_addr + S3C_ATA_PIO_LMR;
- ap->ioaddr.lbah_addr = info->ide_addr + S3C_ATA_PIO_LHR;
- ap->ioaddr.device_addr = info->ide_addr + S3C_ATA_PIO_DVR;
- ap->ioaddr.status_addr = info->ide_addr + S3C_ATA_PIO_CSD;
- ap->ioaddr.command_addr = info->ide_addr + S3C_ATA_PIO_CSD;
- ap->ioaddr.altstatus_addr = info->ide_addr + S3C_ATA_PIO_DAD;
- ap->ioaddr.ctl_addr = info->ide_addr + S3C_ATA_PIO_DAD;
-
- ata_port_desc(ap, "mmio cmd 0x%llx ",
- (unsigned long long)res->start);
-
- host->private_data = info;
-
- if (pdata && pdata->setup_gpio)
- pdata->setup_gpio();
-
- /* Set endianness and enable the interface */
- pata_s3c_hwinit(info, pdata);
-
- ret = ata_host_activate(host, info->irq,
- info->irq ? pata_s3c_irq : NULL,
- 0, &pata_s3c_sht);
- if (ret)
- goto stop_clk;
-
- return 0;
-
-stop_clk:
- clk_disable(info->clk);
- return ret;
-}
-
-static int __exit pata_s3c_remove(struct platform_device *pdev)
-{
- struct ata_host *host = platform_get_drvdata(pdev);
- struct s3c_ide_info *info = host->private_data;
-
- ata_host_detach(host);
-
- clk_disable(info->clk);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int pata_s3c_suspend(struct device *dev)
-{
- struct ata_host *host = dev_get_drvdata(dev);
-
- ata_host_suspend(host, PMSG_SUSPEND);
- return 0;
-}
-
-static int pata_s3c_resume(struct device *dev)
-{
- struct ata_host *host = dev_get_drvdata(dev);
- struct s3c_ide_platdata *pdata = dev_get_platdata(dev);
- struct s3c_ide_info *info = host->private_data;
-
- pata_s3c_hwinit(info, pdata);
- ata_host_resume(host);
-
- return 0;
-}
-
-static const struct dev_pm_ops pata_s3c_pm_ops = {
- .suspend = pata_s3c_suspend,
- .resume = pata_s3c_resume,
-};
-#endif
-
-/* driver device registration */
-static const struct platform_device_id pata_s3c_driver_ids[] = {
- {
- .name = "s3c64xx-pata",
- .driver_data = TYPE_S3C64XX,
- }, {
- .name = "s5pv210-pata",
- .driver_data = TYPE_S5PV210,
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(platform, pata_s3c_driver_ids);
-
-static struct platform_driver pata_s3c_driver = {
- .remove = __exit_p(pata_s3c_remove),
- .id_table = pata_s3c_driver_ids,
- .driver = {
- .name = DRV_NAME,
-#ifdef CONFIG_PM_SLEEP
- .pm = &pata_s3c_pm_ops,
-#endif
- },
-};
-
-module_platform_driver_probe(pata_s3c_driver, pata_s3c_probe);
-
-MODULE_AUTHOR("Abhilash Kesavan, <a.kesavan@samsung.com>");
-MODULE_DESCRIPTION("low-level driver for Samsung PATA controller");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index b9a4f68b371d..b052c5a65c17 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -566,7 +566,7 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
return 0;
}
-static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct sata_fsl_port_priv *pp = qc->ap->private_data;
struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data;
@@ -577,7 +577,6 @@ static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
cd = pp->cmdentry + tag;
ata_tf_from_fis(cd->sfis, &qc->result_tf);
- return true;
}
static int sata_fsl_scr_write(struct ata_link *link,
@@ -1042,7 +1041,7 @@ static void sata_fsl_error_handler(struct ata_port *ap)
static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc)
{
- if (qc->flags & ATA_QCFLAG_FAILED)
+ if (qc->flags & ATA_QCFLAG_EH)
qc->err_mask |= AC_ERR_OTHER;
if (qc->err_mask) {
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 11e518f0111c..2833c722118d 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -566,7 +566,7 @@ static void inic_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
tf->status = readb(port_base + PORT_TF_COMMAND);
}
-static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void inic_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ata_taskfile *rtf = &qc->result_tf;
struct ata_taskfile tf;
@@ -580,12 +580,10 @@ static bool inic_qc_fill_rtf(struct ata_queued_cmd *qc)
*/
inic_tf_read(qc->ap, &tf);
- if (!(tf.status & ATA_ERR))
- return false;
-
- rtf->status = tf.status;
- rtf->error = tf.error;
- return true;
+ if (tf.status & ATA_ERR) {
+ rtf->status = tf.status;
+ rtf->error = tf.error;
+ }
}
static void inic_freeze(struct ata_port *ap)
@@ -672,7 +670,7 @@ static void inic_error_handler(struct ata_port *ap)
static void inic_post_internal_cmd(struct ata_queued_cmd *qc)
{
/* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
+ if (qc->flags & ATA_QCFLAG_EH)
inic_reset_port(inic_port_base(qc->ap));
}
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 9cd7d8b71361..4e60e6c4c35a 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -828,7 +828,7 @@ static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
+ if (qc->flags & ATA_QCFLAG_EH)
pdc_reset_port(ap);
}
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 2fef6ce93f07..22cc9e9789dd 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -328,7 +328,7 @@ static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val);
static int sil24_qc_defer(struct ata_queued_cmd *qc);
static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc);
static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc);
-static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
+static void sil24_qc_fill_rtf(struct ata_queued_cmd *qc);
static void sil24_pmp_attach(struct ata_port *ap);
static void sil24_pmp_detach(struct ata_port *ap);
static void sil24_freeze(struct ata_port *ap);
@@ -901,10 +901,9 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
return 0;
}
-static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
{
sil24_read_tf(qc->ap, qc->hw_tag, &qc->result_tf);
- return true;
}
static void sil24_pmp_attach(struct ata_port *ap)
@@ -1185,7 +1184,7 @@ static void sil24_post_internal_cmd(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
- if ((qc->flags & ATA_QCFLAG_FAILED) && sil24_init_port(ap))
+ if ((qc->flags & ATA_QCFLAG_EH) && sil24_init_port(ap))
ata_eh_freeze_port(ap);
}
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index ab70cbc78f96..a92c60455b1d 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -866,7 +866,7 @@ static void pdc_post_internal_cmd(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
/* make DMA engine forget about the failed command */
- if (qc->flags & ATA_QCFLAG_FAILED)
+ if (qc->flags & ATA_QCFLAG_EH)
pdc_reset_port(ap);
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index e7d6e6657ffa..b1c1dd38ab01 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -736,7 +736,7 @@ void update_siblings_masks(unsigned int cpuid)
ret = detect_cache_attributes(cpuid);
if (ret && ret != -ENOENT)
- pr_info("Early cacheinfo failed, ret = %d\n", ret);
+ pr_info("Early cacheinfo allocation failed, ret = %d\n", ret);
/* update core and thread sibling masks */
for_each_online_cpu(cpu) {
@@ -825,7 +825,7 @@ __weak int __init parse_acpi_topology(void)
#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
void __init init_cpu_topology(void)
{
- int ret;
+ int cpu, ret;
reset_cpu_topology();
ret = parse_acpi_topology();
@@ -840,6 +840,14 @@ void __init init_cpu_topology(void)
reset_cpu_topology();
return;
}
+
+ for_each_possible_cpu(cpu) {
+ ret = fetch_cache_info(cpu);
+ if (ret) {
+ pr_err("Early cacheinfo failed, ret = %d\n", ret);
+ break;
+ }
+ }
}
void store_cpu_topology(unsigned int cpuid)
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 8c5e65930617..4d4c2c8d26c4 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -185,7 +185,7 @@ static int auxiliary_match(struct device *dev, struct device_driver *drv)
return !!auxiliary_match_id(auxdrv->id_table, auxdev);
}
-static int auxiliary_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int auxiliary_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const char *name, *p;
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 7d4803c03d3e..726a12a244c0 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -52,8 +52,23 @@ struct subsys_private {
struct kset glue_dirs;
struct class *class;
+
+ struct lock_class_key lock_key;
};
-#define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj)
+#define to_subsys_private(obj) container_of_const(obj, struct subsys_private, subsys.kobj)
+
+static inline struct subsys_private *subsys_get(struct subsys_private *sp)
+{
+ if (sp)
+ kset_get(&sp->subsys);
+ return sp;
+}
+
+static inline void subsys_put(struct subsys_private *sp)
+{
+ if (sp)
+ kset_put(&sp->subsys);
+}
struct driver_private {
struct kobject kobj;
@@ -130,6 +145,8 @@ struct kobject *virtual_device_parent(struct device *dev);
extern int bus_add_device(struct device *dev);
extern void bus_probe_device(struct device *dev);
extern void bus_remove_device(struct device *dev);
+void bus_notify(struct device *dev, enum bus_notifier_event value);
+bool bus_is_registered(const struct bus_type *bus);
extern int bus_add_driver(struct device_driver *drv);
extern void bus_remove_driver(struct device_driver *drv);
@@ -158,6 +175,8 @@ extern void device_block_probing(void);
extern void device_unblock_probing(void);
extern void deferred_probe_extend_timeout(void);
extern void driver_deferred_probe_trigger(void);
+const char *device_get_devnode(const struct device *dev, umode_t *mode,
+ kuid_t *uid, kgid_t *gid, const char **tmp);
/* /sys/devices directory */
extern struct kset *devices_kset;
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 4ec6dbab73be..cfe8615d5106 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -6,6 +6,7 @@
* Copyright (c) 2002-3 Open Source Development Labs
* Copyright (c) 2007 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (c) 2007 Novell Inc.
+ * Copyright (c) 2023 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
*/
#include <linux/async.h>
@@ -24,6 +25,9 @@
/* /sys/devices/system */
static struct kset *system_kset;
+/* /sys/bus */
+static struct kset *bus_kset;
+
#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
/*
@@ -39,19 +43,63 @@ static struct kset *system_kset;
static int __must_check bus_rescan_devices_helper(struct device *dev,
void *data);
+/**
+ * bus_to_subsys - Turn a struct bus_type into a struct subsys_private
+ *
+ * @bus: pointer to the struct bus_type to look up
+ *
+ * The driver core internals needs to work on the subsys_private structure, not
+ * the external struct bus_type pointer. This function walks the list of
+ * registered busses in the system and finds the matching one and returns the
+ * internal struct subsys_private that relates to that bus.
+ *
+ * Note, the reference count of the return value is INCREMENTED if it is not
+ * NULL. A call to subsys_put() must be done when finished with the pointer in
+ * order for it to be properly freed.
+ */
+static struct subsys_private *bus_to_subsys(const struct bus_type *bus)
+{
+ struct subsys_private *sp = NULL;
+ struct kobject *kobj;
+
+ if (!bus)
+ return NULL;
+
+ spin_lock(&bus_kset->list_lock);
+
+ if (list_empty(&bus_kset->list))
+ goto done;
+
+ list_for_each_entry(kobj, &bus_kset->list, entry) {
+ struct kset *kset = container_of(kobj, struct kset, kobj);
+
+ sp = container_of_const(kset, struct subsys_private, subsys);
+ if (sp->bus == bus)
+ goto done;
+ }
+ sp = NULL;
+done:
+ sp = subsys_get(sp);
+ spin_unlock(&bus_kset->list_lock);
+ return sp;
+}
+
static struct bus_type *bus_get(struct bus_type *bus)
{
- if (bus) {
- kset_get(&bus->p->subsys);
+ struct subsys_private *sp = bus_to_subsys(bus);
+
+ if (sp)
return bus;
- }
return NULL;
}
-static void bus_put(struct bus_type *bus)
+static void bus_put(const struct bus_type *bus)
{
- if (bus)
- kset_put(&bus->p->subsys);
+ struct subsys_private *sp = bus_to_subsys(bus);
+
+ /* two puts are required as the call to bus_to_subsys incremented it again */
+ subsys_put(sp);
+ subsys_put(sp);
}
static ssize_t drv_attr_show(struct kobject *kobj, struct attribute *attr,
@@ -91,7 +139,7 @@ static void driver_release(struct kobject *kobj)
kfree(drv_priv);
}
-static struct kobj_type driver_ktype = {
+static const struct kobj_type driver_ktype = {
.sysfs_ops = &driver_sysfs_ops,
.release = driver_release,
};
@@ -128,37 +176,42 @@ static const struct sysfs_ops bus_sysfs_ops = {
.store = bus_attr_store,
};
-int bus_create_file(struct bus_type *bus, struct bus_attribute *attr)
+int bus_create_file(const struct bus_type *bus, struct bus_attribute *attr)
{
+ struct subsys_private *sp = bus_to_subsys(bus);
int error;
- if (bus_get(bus)) {
- error = sysfs_create_file(&bus->p->subsys.kobj, &attr->attr);
- bus_put(bus);
- } else
- error = -EINVAL;
+
+ if (!sp)
+ return -EINVAL;
+
+ error = sysfs_create_file(&sp->subsys.kobj, &attr->attr);
+
+ subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_create_file);
-void bus_remove_file(struct bus_type *bus, struct bus_attribute *attr)
+void bus_remove_file(const struct bus_type *bus, struct bus_attribute *attr)
{
- if (bus_get(bus)) {
- sysfs_remove_file(&bus->p->subsys.kobj, &attr->attr);
- bus_put(bus);
- }
+ struct subsys_private *sp = bus_to_subsys(bus);
+
+ if (!sp)
+ return;
+
+ sysfs_remove_file(&sp->subsys.kobj, &attr->attr);
+ subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_remove_file);
static void bus_release(struct kobject *kobj)
{
struct subsys_private *priv = to_subsys_private(kobj);
- struct bus_type *bus = priv->bus;
+ lockdep_unregister_key(&priv->lock_key);
kfree(priv);
- bus->p = NULL;
}
-static struct kobj_type bus_ktype = {
+static const struct kobj_type bus_ktype = {
.sysfs_ops = &bus_sysfs_ops,
.release = bus_release,
};
@@ -176,8 +229,6 @@ static const struct kset_uevent_ops bus_uevent_ops = {
.filter = bus_uevent_filter,
};
-static struct kset *bus_kset;
-
/* Manually detach a device from its associated driver. */
static ssize_t unbind_store(struct device_driver *drv, const char *buf,
size_t count)
@@ -225,16 +276,31 @@ static DRIVER_ATTR_IGNORE_LOCKDEP(bind, 0200, NULL, bind_store);
static ssize_t drivers_autoprobe_show(struct bus_type *bus, char *buf)
{
- return sysfs_emit(buf, "%d\n", bus->p->drivers_autoprobe);
+ struct subsys_private *sp = bus_to_subsys(bus);
+ int ret;
+
+ if (!sp)
+ return -EINVAL;
+
+ ret = sysfs_emit(buf, "%d\n", sp->drivers_autoprobe);
+ subsys_put(sp);
+ return ret;
}
static ssize_t drivers_autoprobe_store(struct bus_type *bus,
const char *buf, size_t count)
{
+ struct subsys_private *sp = bus_to_subsys(bus);
+
+ if (!sp)
+ return -EINVAL;
+
if (buf[0] == '0')
- bus->p->drivers_autoprobe = 0;
+ sp->drivers_autoprobe = 0;
else
- bus->p->drivers_autoprobe = 1;
+ sp->drivers_autoprobe = 1;
+
+ subsys_put(sp);
return count;
}
@@ -285,21 +351,23 @@ static struct device *next_device(struct klist_iter *i)
* to retain this data, it should do so, and increment the reference
* count in the supplied callback.
*/
-int bus_for_each_dev(struct bus_type *bus, struct device *start,
+int bus_for_each_dev(const struct bus_type *bus, struct device *start,
void *data, int (*fn)(struct device *, void *))
{
+ struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device *dev;
int error = 0;
- if (!bus || !bus->p)
+ if (!sp)
return -EINVAL;
- klist_iter_init_node(&bus->p->klist_devices, &i,
+ klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
while (!error && (dev = next_device(&i)))
error = fn(dev, data);
klist_iter_exit(&i);
+ subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_for_each_dev);
@@ -319,67 +387,28 @@ EXPORT_SYMBOL_GPL(bus_for_each_dev);
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
*/
-struct device *bus_find_device(struct bus_type *bus,
+struct device *bus_find_device(const struct bus_type *bus,
struct device *start, const void *data,
int (*match)(struct device *dev, const void *data))
{
+ struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device *dev;
- if (!bus || !bus->p)
+ if (!sp)
return NULL;
- klist_iter_init_node(&bus->p->klist_devices, &i,
+ klist_iter_init_node(&sp->klist_devices, &i,
(start ? &start->p->knode_bus : NULL));
while ((dev = next_device(&i)))
if (match(dev, data) && get_device(dev))
break;
klist_iter_exit(&i);
+ subsys_put(sp);
return dev;
}
EXPORT_SYMBOL_GPL(bus_find_device);
-/**
- * subsys_find_device_by_id - find a device with a specific enumeration number
- * @subsys: subsystem
- * @id: index 'id' in struct device
- * @hint: device to check first
- *
- * Check the hint's next object and if it is a match return it directly,
- * otherwise, fall back to a full list search. Either way a reference for
- * the returned object is taken.
- */
-struct device *subsys_find_device_by_id(struct bus_type *subsys, unsigned int id,
- struct device *hint)
-{
- struct klist_iter i;
- struct device *dev;
-
- if (!subsys)
- return NULL;
-
- if (hint) {
- klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus);
- dev = next_device(&i);
- if (dev && dev->id == id && get_device(dev)) {
- klist_iter_exit(&i);
- return dev;
- }
- klist_iter_exit(&i);
- }
-
- klist_iter_init_node(&subsys->p->klist_devices, &i, NULL);
- while ((dev = next_device(&i))) {
- if (dev->id == id && get_device(dev)) {
- klist_iter_exit(&i);
- return dev;
- }
- }
- klist_iter_exit(&i);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(subsys_find_device_by_id);
-
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
@@ -411,21 +440,23 @@ static struct device_driver *next_driver(struct klist_iter *i)
* in the callback. It must also be sure to increment the refcount
* so it doesn't disappear before returning to the caller.
*/
-int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
+int bus_for_each_drv(const struct bus_type *bus, struct device_driver *start,
void *data, int (*fn)(struct device_driver *, void *))
{
+ struct subsys_private *sp = bus_to_subsys(bus);
struct klist_iter i;
struct device_driver *drv;
int error = 0;
- if (!bus)
+ if (!sp)
return -EINVAL;
- klist_iter_init_node(&bus->p->klist_drivers, &i,
+ klist_iter_init_node(&sp->klist_drivers, &i,
start ? &start->p->knode_bus : NULL);
while ((drv = next_driver(&i)) && !error)
error = fn(drv, data);
klist_iter_exit(&i);
+ subsys_put(sp);
return error;
}
EXPORT_SYMBOL_GPL(bus_for_each_drv);
@@ -440,32 +471,46 @@ EXPORT_SYMBOL_GPL(bus_for_each_drv);
*/
int bus_add_device(struct device *dev)
{
- struct bus_type *bus = bus_get(dev->bus);
- int error = 0;
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+ int error;
- if (bus) {
- pr_debug("bus: '%s': add device %s\n", bus->name, dev_name(dev));
- error = device_add_groups(dev, bus->dev_groups);
- if (error)
- goto out_put;
- error = sysfs_create_link(&bus->p->devices_kset->kobj,
- &dev->kobj, dev_name(dev));
- if (error)
- goto out_groups;
- error = sysfs_create_link(&dev->kobj,
- &dev->bus->p->subsys.kobj, "subsystem");
- if (error)
- goto out_subsys;
- klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices);
+ if (!sp) {
+ /*
+ * This is a normal operation for many devices that do not
+ * have a bus assigned to them, just say that all went
+ * well.
+ */
+ return 0;
}
+
+ /*
+ * Reference in sp is now incremented and will be dropped when
+ * the device is removed from the bus
+ */
+
+ pr_debug("bus: '%s': add device %s\n", sp->bus->name, dev_name(dev));
+
+ error = device_add_groups(dev, sp->bus->dev_groups);
+ if (error)
+ goto out_put;
+
+ error = sysfs_create_link(&sp->devices_kset->kobj, &dev->kobj, dev_name(dev));
+ if (error)
+ goto out_groups;
+
+ error = sysfs_create_link(&dev->kobj, &sp->subsys.kobj, "subsystem");
+ if (error)
+ goto out_subsys;
+
+ klist_add_tail(&dev->p->knode_bus, &sp->klist_devices);
return 0;
out_subsys:
- sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
+ sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev));
out_groups:
- device_remove_groups(dev, bus->dev_groups);
+ device_remove_groups(dev, sp->bus->dev_groups);
out_put:
- bus_put(dev->bus);
+ subsys_put(sp);
return error;
}
@@ -477,20 +522,21 @@ out_put:
*/
void bus_probe_device(struct device *dev)
{
- struct bus_type *bus = dev->bus;
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
struct subsys_interface *sif;
- if (!bus)
+ if (!sp)
return;
- if (bus->p->drivers_autoprobe)
+ if (sp->drivers_autoprobe)
device_initial_probe(dev);
- mutex_lock(&bus->p->mutex);
- list_for_each_entry(sif, &bus->p->interfaces, node)
+ mutex_lock(&sp->mutex);
+ list_for_each_entry(sif, &sp->interfaces, node)
if (sif->add_dev)
sif->add_dev(dev, sif);
- mutex_unlock(&bus->p->mutex);
+ mutex_unlock(&sp->mutex);
+ subsys_put(sp);
}
/**
@@ -505,21 +551,20 @@ void bus_probe_device(struct device *dev)
*/
void bus_remove_device(struct device *dev)
{
- struct bus_type *bus = dev->bus;
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
struct subsys_interface *sif;
- if (!bus)
+ if (!sp)
return;
- mutex_lock(&bus->p->mutex);
- list_for_each_entry(sif, &bus->p->interfaces, node)
+ mutex_lock(&sp->mutex);
+ list_for_each_entry(sif, &sp->interfaces, node)
if (sif->remove_dev)
sif->remove_dev(dev, sif);
- mutex_unlock(&bus->p->mutex);
+ mutex_unlock(&sp->mutex);
sysfs_remove_link(&dev->kobj, "subsystem");
- sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
- dev_name(dev));
+ sysfs_remove_link(&sp->devices_kset->kobj, dev_name(dev));
device_remove_groups(dev, dev->bus->dev_groups);
if (klist_node_attached(&dev->p->knode_bus))
klist_del(&dev->p->knode_bus);
@@ -527,7 +572,14 @@ void bus_remove_device(struct device *dev)
pr_debug("bus: '%s': remove device %s\n",
dev->bus->name, dev_name(dev));
device_release_driver(dev);
- bus_put(dev->bus);
+
+ /*
+ * Decrement the reference count twice, once for the bus_to_subsys()
+ * call in the start of this function, and the second one from the
+ * reference increment in bus_add_device()
+ */
+ subsys_put(sp);
+ subsys_put(sp);
}
static int __must_check add_bind_files(struct device_driver *drv)
@@ -552,7 +604,7 @@ static void remove_bind_files(struct device_driver *drv)
static BUS_ATTR_WO(drivers_probe);
static BUS_ATTR_RW(drivers_autoprobe);
-static int add_probe_files(struct bus_type *bus)
+static int add_probe_files(const struct bus_type *bus)
{
int retval;
@@ -567,7 +619,7 @@ out:
return retval;
}
-static void remove_probe_files(struct bus_type *bus)
+static void remove_probe_files(const struct bus_type *bus)
{
bus_remove_file(bus, &bus_attr_drivers_autoprobe);
bus_remove_file(bus, &bus_attr_drivers_probe);
@@ -589,15 +641,18 @@ static DRIVER_ATTR_WO(uevent);
*/
int bus_add_driver(struct device_driver *drv)
{
- struct bus_type *bus;
+ struct subsys_private *sp = bus_to_subsys(drv->bus);
struct driver_private *priv;
int error = 0;
- bus = bus_get(drv->bus);
- if (!bus)
+ if (!sp)
return -EINVAL;
- pr_debug("bus: '%s': add driver %s\n", bus->name, drv->name);
+ /*
+ * Reference in sp is now incremented and will be dropped when
+ * the driver is removed from the bus
+ */
+ pr_debug("bus: '%s': add driver %s\n", sp->bus->name, drv->name);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
@@ -607,14 +662,14 @@ int bus_add_driver(struct device_driver *drv)
klist_init(&priv->klist_devices, NULL, NULL);
priv->driver = drv;
drv->p = priv;
- priv->kobj.kset = bus->p->drivers_kset;
+ priv->kobj.kset = sp->drivers_kset;
error = kobject_init_and_add(&priv->kobj, &driver_ktype, NULL,
"%s", drv->name);
if (error)
goto out_unregister;
- klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
- if (drv->bus->p->drivers_autoprobe) {
+ klist_add_tail(&priv->knode_bus, &sp->klist_drivers);
+ if (sp->drivers_autoprobe) {
error = driver_attach(drv);
if (error)
goto out_del_list;
@@ -626,7 +681,7 @@ int bus_add_driver(struct device_driver *drv)
printk(KERN_ERR "%s: uevent attr (%s) failed\n",
__func__, drv->name);
}
- error = driver_add_groups(drv, bus->drv_groups);
+ error = driver_add_groups(drv, sp->bus->drv_groups);
if (error) {
/* How the hell do we get out of this pickle? Give up */
printk(KERN_ERR "%s: driver_add_groups(%s) failed\n",
@@ -651,7 +706,7 @@ out_unregister:
/* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
- bus_put(bus);
+ subsys_put(sp);
return error;
}
@@ -665,19 +720,29 @@ out_put_bus:
*/
void bus_remove_driver(struct device_driver *drv)
{
- if (!drv->bus)
+ struct subsys_private *sp = bus_to_subsys(drv->bus);
+
+ if (!sp)
return;
+ pr_debug("bus: '%s': remove driver %s\n", sp->bus->name, drv->name);
+
if (!drv->suppress_bind_attrs)
remove_bind_files(drv);
- driver_remove_groups(drv, drv->bus->drv_groups);
+ driver_remove_groups(drv, sp->bus->drv_groups);
driver_remove_file(drv, &driver_attr_uevent);
klist_remove(&drv->p->knode_bus);
- pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name);
driver_detach(drv);
module_remove_driver(drv);
kobject_put(&drv->p->kobj);
- bus_put(drv->bus);
+
+ /*
+ * Decrement the reference count twice, once for the bus_to_subsys()
+ * call in the start of this function, and the second one from the
+ * reference increment in bus_add_driver()
+ */
+ subsys_put(sp);
+ subsys_put(sp);
}
/* Helper for bus_rescan_devices's iter */
@@ -727,18 +792,6 @@ int device_reprobe(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_reprobe);
-static int bus_add_groups(struct bus_type *bus,
- const struct attribute_group **groups)
-{
- return sysfs_create_groups(&bus->p->subsys.kobj, groups);
-}
-
-static void bus_remove_groups(struct bus_type *bus,
- const struct attribute_group **groups)
-{
- sysfs_remove_groups(&bus->p->subsys.kobj, groups);
-}
-
static void klist_devices_get(struct klist_node *n)
{
struct device_private *dev_prv = to_device_private_bus(n);
@@ -758,10 +811,18 @@ static void klist_devices_put(struct klist_node *n)
static ssize_t bus_uevent_store(struct bus_type *bus,
const char *buf, size_t count)
{
- int rc;
+ struct subsys_private *sp = bus_to_subsys(bus);
+ int ret;
- rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
- return rc ? rc : count;
+ if (!sp)
+ return -EINVAL;
+
+ ret = kobject_synth_uevent(&sp->subsys.kobj, buf, count);
+ subsys_put(sp);
+
+ if (ret)
+ return ret;
+ return count;
}
/*
* "open code" the old BUS_ATTR() macro here. We want to use BUS_ATTR_WO()
@@ -784,23 +845,24 @@ int bus_register(struct bus_type *bus)
{
int retval;
struct subsys_private *priv;
- struct lock_class_key *key = &bus->lock_key;
+ struct kobject *bus_kobj;
+ struct lock_class_key *key;
priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->bus = bus;
- bus->p = priv;
BLOCKING_INIT_NOTIFIER_HEAD(&priv->bus_notifier);
- retval = kobject_set_name(&priv->subsys.kobj, "%s", bus->name);
+ bus_kobj = &priv->subsys.kobj;
+ retval = kobject_set_name(bus_kobj, "%s", bus->name);
if (retval)
goto out;
- priv->subsys.kobj.kset = bus_kset;
- priv->subsys.kobj.ktype = &bus_ktype;
+ bus_kobj->kset = bus_kset;
+ bus_kobj->ktype = &bus_ktype;
priv->drivers_autoprobe = 1;
retval = kset_register(&priv->subsys);
@@ -811,21 +873,21 @@ int bus_register(struct bus_type *bus)
if (retval)
goto bus_uevent_fail;
- priv->devices_kset = kset_create_and_add("devices", NULL,
- &priv->subsys.kobj);
+ priv->devices_kset = kset_create_and_add("devices", NULL, bus_kobj);
if (!priv->devices_kset) {
retval = -ENOMEM;
goto bus_devices_fail;
}
- priv->drivers_kset = kset_create_and_add("drivers", NULL,
- &priv->subsys.kobj);
+ priv->drivers_kset = kset_create_and_add("drivers", NULL, bus_kobj);
if (!priv->drivers_kset) {
retval = -ENOMEM;
goto bus_drivers_fail;
}
INIT_LIST_HEAD(&priv->interfaces);
+ key = &priv->lock_key;
+ lockdep_register_key(key);
__mutex_init(&priv->mutex, "subsys mutex", key);
klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
klist_init(&priv->klist_drivers, NULL, NULL);
@@ -834,7 +896,7 @@ int bus_register(struct bus_type *bus)
if (retval)
goto bus_probe_files_fail;
- retval = bus_add_groups(bus, bus->bus_groups);
+ retval = sysfs_create_groups(bus_kobj, bus->bus_groups);
if (retval)
goto bus_groups_fail;
@@ -844,16 +906,15 @@ int bus_register(struct bus_type *bus)
bus_groups_fail:
remove_probe_files(bus);
bus_probe_files_fail:
- kset_unregister(bus->p->drivers_kset);
+ kset_unregister(priv->drivers_kset);
bus_drivers_fail:
- kset_unregister(bus->p->devices_kset);
+ kset_unregister(priv->devices_kset);
bus_devices_fail:
bus_remove_file(bus, &bus_attr_uevent);
bus_uevent_fail:
- kset_unregister(&bus->p->subsys);
+ kset_unregister(&priv->subsys);
out:
- kfree(bus->p);
- bus->p = NULL;
+ kfree(priv);
return retval;
}
EXPORT_SYMBOL_GPL(bus_register);
@@ -865,43 +926,82 @@ EXPORT_SYMBOL_GPL(bus_register);
* Unregister the child subsystems and the bus itself.
* Finally, we call bus_put() to release the refcount
*/
-void bus_unregister(struct bus_type *bus)
+void bus_unregister(const struct bus_type *bus)
{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct kobject *bus_kobj;
+
+ if (!sp)
+ return;
+
pr_debug("bus: '%s': unregistering\n", bus->name);
if (bus->dev_root)
device_unregister(bus->dev_root);
- bus_remove_groups(bus, bus->bus_groups);
+
+ bus_kobj = &sp->subsys.kobj;
+ sysfs_remove_groups(bus_kobj, bus->bus_groups);
remove_probe_files(bus);
- kset_unregister(bus->p->drivers_kset);
- kset_unregister(bus->p->devices_kset);
bus_remove_file(bus, &bus_attr_uevent);
- kset_unregister(&bus->p->subsys);
+
+ kset_unregister(sp->drivers_kset);
+ kset_unregister(sp->devices_kset);
+ kset_unregister(&sp->subsys);
+ subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_unregister);
-int bus_register_notifier(struct bus_type *bus, struct notifier_block *nb)
+int bus_register_notifier(const struct bus_type *bus, struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&bus->p->bus_notifier, nb);
+ struct subsys_private *sp = bus_to_subsys(bus);
+ int retval;
+
+ if (!sp)
+ return -EINVAL;
+
+ retval = blocking_notifier_chain_register(&sp->bus_notifier, nb);
+ subsys_put(sp);
+ return retval;
}
EXPORT_SYMBOL_GPL(bus_register_notifier);
-int bus_unregister_notifier(struct bus_type *bus, struct notifier_block *nb)
+int bus_unregister_notifier(const struct bus_type *bus, struct notifier_block *nb)
{
- return blocking_notifier_chain_unregister(&bus->p->bus_notifier, nb);
+ struct subsys_private *sp = bus_to_subsys(bus);
+ int retval;
+
+ if (!sp)
+ return -EINVAL;
+ retval = blocking_notifier_chain_unregister(&sp->bus_notifier, nb);
+ subsys_put(sp);
+ return retval;
}
EXPORT_SYMBOL_GPL(bus_unregister_notifier);
-struct kset *bus_get_kset(struct bus_type *bus)
+void bus_notify(struct device *dev, enum bus_notifier_event value)
{
- return &bus->p->subsys;
+ struct subsys_private *sp = bus_to_subsys(dev->bus);
+
+ if (!sp)
+ return;
+
+ blocking_notifier_call_chain(&sp->bus_notifier, value, dev);
+ subsys_put(sp);
}
-EXPORT_SYMBOL_GPL(bus_get_kset);
-struct klist *bus_get_device_klist(struct bus_type *bus)
+struct kset *bus_get_kset(const struct bus_type *bus)
{
- return &bus->p->klist_devices;
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct kset *kset;
+
+ if (!sp)
+ return NULL;
+
+ kset = &sp->subsys;
+ subsys_put(sp);
+
+ return kset;
}
-EXPORT_SYMBOL_GPL(bus_get_device_klist);
+EXPORT_SYMBOL_GPL(bus_get_kset);
/*
* Yes, this forcibly breaks the klist abstraction temporarily. It
@@ -934,13 +1034,16 @@ void bus_sort_breadthfirst(struct bus_type *bus,
int (*compare)(const struct device *a,
const struct device *b))
{
+ struct subsys_private *sp = bus_to_subsys(bus);
LIST_HEAD(sorted_devices);
struct klist_node *n, *tmp;
struct device_private *dev_prv;
struct device *dev;
struct klist *device_klist;
- device_klist = bus_get_device_klist(bus);
+ if (!sp)
+ return;
+ device_klist = &sp->klist_devices;
spin_lock(&device_klist->k_lock);
list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) {
@@ -950,13 +1053,19 @@ void bus_sort_breadthfirst(struct bus_type *bus,
}
list_splice(&sorted_devices, &device_klist->k_list);
spin_unlock(&device_klist->k_lock);
+ subsys_put(sp);
}
EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
+struct subsys_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+
/**
* subsys_dev_iter_init - initialize subsys device iterator
* @iter: subsys iterator to initialize
- * @subsys: the subsys we wanna iterate over
+ * @sp: the subsys private (i.e. bus) we wanna iterate over
* @start: the device to start iterating from, if any
* @type: device_type of the devices to iterate over, NULL for all
*
@@ -965,17 +1074,16 @@ EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
* otherwise if it is NULL, the iteration starts at the beginning of
* the list.
*/
-void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys,
- struct device *start, const struct device_type *type)
+static void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct subsys_private *sp,
+ struct device *start, const struct device_type *type)
{
struct klist_node *start_knode = NULL;
if (start)
start_knode = &start->p->knode_bus;
- klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode);
+ klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
iter->type = type;
}
-EXPORT_SYMBOL_GPL(subsys_dev_iter_init);
/**
* subsys_dev_iter_next - iterate to the next device
@@ -989,7 +1097,7 @@ EXPORT_SYMBOL_GPL(subsys_dev_iter_init);
* free to do whatever it wants to do with the device including
* calling back into subsys code.
*/
-struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
+static struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
{
struct klist_node *knode;
struct device *dev;
@@ -1003,7 +1111,6 @@ struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
return dev;
}
}
-EXPORT_SYMBOL_GPL(subsys_dev_iter_next);
/**
* subsys_dev_iter_exit - finish iteration
@@ -1012,34 +1119,38 @@ EXPORT_SYMBOL_GPL(subsys_dev_iter_next);
* Finish an iteration. Always call this function after iteration is
* complete whether the iteration ran till the end or not.
*/
-void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
+static void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
{
klist_iter_exit(&iter->ki);
}
-EXPORT_SYMBOL_GPL(subsys_dev_iter_exit);
int subsys_interface_register(struct subsys_interface *sif)
{
- struct bus_type *subsys;
+ struct subsys_private *sp;
struct subsys_dev_iter iter;
struct device *dev;
if (!sif || !sif->subsys)
return -ENODEV;
- subsys = bus_get(sif->subsys);
- if (!subsys)
+ sp = bus_to_subsys(sif->subsys);
+ if (!sp)
return -EINVAL;
- mutex_lock(&subsys->p->mutex);
- list_add_tail(&sif->node, &subsys->p->interfaces);
+ /*
+ * Reference in sp is now incremented and will be dropped when
+ * the interface is removed from the bus
+ */
+
+ mutex_lock(&sp->mutex);
+ list_add_tail(&sif->node, &sp->interfaces);
if (sif->add_dev) {
- subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+ subsys_dev_iter_init(&iter, sp, NULL, NULL);
while ((dev = subsys_dev_iter_next(&iter)))
sif->add_dev(dev, sif);
subsys_dev_iter_exit(&iter);
}
- mutex_unlock(&subsys->p->mutex);
+ mutex_unlock(&sp->mutex);
return 0;
}
@@ -1047,26 +1158,34 @@ EXPORT_SYMBOL_GPL(subsys_interface_register);
void subsys_interface_unregister(struct subsys_interface *sif)
{
- struct bus_type *subsys;
+ struct subsys_private *sp;
struct subsys_dev_iter iter;
struct device *dev;
if (!sif || !sif->subsys)
return;
- subsys = sif->subsys;
+ sp = bus_to_subsys(sif->subsys);
+ if (!sp)
+ return;
- mutex_lock(&subsys->p->mutex);
+ mutex_lock(&sp->mutex);
list_del_init(&sif->node);
if (sif->remove_dev) {
- subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+ subsys_dev_iter_init(&iter, sp, NULL, NULL);
while ((dev = subsys_dev_iter_next(&iter)))
sif->remove_dev(dev, sif);
subsys_dev_iter_exit(&iter);
}
- mutex_unlock(&subsys->p->mutex);
-
- bus_put(subsys);
+ mutex_unlock(&sp->mutex);
+
+ /*
+ * Decrement the reference count twice, once for the bus_to_subsys()
+ * call in the start of this function, and the second one from the
+ * reference increment in subsys_interface_register()
+ */
+ subsys_put(sp);
+ subsys_put(sp);
}
EXPORT_SYMBOL_GPL(subsys_interface_unregister);
@@ -1166,6 +1285,76 @@ int subsys_virtual_register(struct bus_type *subsys,
}
EXPORT_SYMBOL_GPL(subsys_virtual_register);
+/**
+ * driver_find - locate driver on a bus by its name.
+ * @name: name of the driver.
+ * @bus: bus to scan for the driver.
+ *
+ * Call kset_find_obj() to iterate over list of drivers on
+ * a bus to find driver by name. Return driver if found.
+ *
+ * This routine provides no locking to prevent the driver it returns
+ * from being unregistered or unloaded while the caller is using it.
+ * The caller is responsible for preventing this.
+ */
+struct device_driver *driver_find(const char *name, struct bus_type *bus)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ struct kobject *k;
+ struct driver_private *priv;
+
+ if (!sp)
+ return NULL;
+
+ k = kset_find_obj(sp->drivers_kset, name);
+ subsys_put(sp);
+ if (!k)
+ return NULL;
+
+ priv = to_driver(k);
+
+ /* Drop reference added by kset_find_obj() */
+ kobject_put(k);
+ return priv->driver;
+}
+EXPORT_SYMBOL_GPL(driver_find);
+
+/*
+ * Warning, the value could go to "removed" instantly after calling this function, so be very
+ * careful when calling it...
+ */
+bool bus_is_registered(const struct bus_type *bus)
+{
+ struct subsys_private *sp = bus_to_subsys(bus);
+ bool is_initialized = false;
+
+ if (sp) {
+ is_initialized = true;
+ subsys_put(sp);
+ }
+ return is_initialized;
+}
+
+/**
+ * bus_get_dev_root - return a pointer to the "device root" of a bus
+ * @bus: bus to return the device root of.
+ *
+ * If a bus has a "device root" structure, return it, WITH THE REFERENCE
+ * COUNT INCREMENTED.
+ *
+ * Note, when finished with the device, a call to put_device() is required.
+ *
+ * If the device root is not present (or bus is not a valid pointer), NULL
+ * will be returned.
+ */
+struct device *bus_get_dev_root(const struct bus_type *bus)
+{
+ if (bus)
+ return get_device(bus->dev_root);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(bus_get_dev_root);
+
int __init buses_init(void)
{
bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 950b22cdb5f7..f6573c335f4c 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -229,8 +229,71 @@ static int cache_setup_of_node(unsigned int cpu)
return 0;
}
+
+static int of_count_cache_leaves(struct device_node *np)
+{
+ unsigned int leaves = 0;
+
+ if (of_property_read_bool(np, "cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "i-cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "d-cache-size"))
+ ++leaves;
+
+ if (!leaves) {
+ /* The '[i-|d-|]cache-size' property is required, but
+ * if absent, fallback on the 'cache-unified' property.
+ */
+ if (of_property_read_bool(np, "cache-unified"))
+ return 1;
+ else
+ return 2;
+ }
+
+ return leaves;
+}
+
+int init_of_cache_level(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct device_node *np = of_cpu_device_node_get(cpu);
+ struct device_node *prev = NULL;
+ unsigned int levels = 0, leaves, level;
+
+ leaves = of_count_cache_leaves(np);
+ if (leaves > 0)
+ levels = 1;
+
+ prev = np;
+ while ((np = of_find_next_cache_node(np))) {
+ of_node_put(prev);
+ prev = np;
+ if (!of_device_is_compatible(np, "cache"))
+ goto err_out;
+ if (of_property_read_u32(np, "cache-level", &level))
+ goto err_out;
+ if (level <= levels)
+ goto err_out;
+
+ leaves += of_count_cache_leaves(np);
+ levels = level;
+ }
+
+ of_node_put(np);
+ this_cpu_ci->num_levels = levels;
+ this_cpu_ci->num_leaves = leaves;
+
+ return 0;
+
+err_out:
+ of_node_put(np);
+ return -EINVAL;
+}
+
#else
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
+int init_of_cache_level(unsigned int cpu) { return 0; }
#endif
int __weak cache_setup_acpi(unsigned int cpu)
@@ -256,7 +319,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
{
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf, *sib_leaf;
- unsigned int index;
+ unsigned int index, sib_index;
int ret = 0;
if (this_cpu_ci->cpu_map_populated)
@@ -284,11 +347,13 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
if (i == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
-
- sib_leaf = per_cpu_cacheinfo_idx(i, index);
- if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
- cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
- cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
+ sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ break;
+ }
}
}
/* record the maximum cache line size */
@@ -302,7 +367,7 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
static void cache_shared_cpu_map_remove(unsigned int cpu)
{
struct cacheinfo *this_leaf, *sib_leaf;
- unsigned int sibling, index;
+ unsigned int sibling, index, sib_index;
for (index = 0; index < cache_leaves(cpu); index++) {
this_leaf = per_cpu_cacheinfo_idx(cpu, index);
@@ -313,9 +378,14 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
if (sibling == cpu || !sib_cpu_ci->info_list)
continue;/* skip if itself or no cacheinfo */
- sib_leaf = per_cpu_cacheinfo_idx(sibling, index);
- cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
- cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+ for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
+ sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
+ break;
+ }
+ }
}
}
}
@@ -326,10 +396,6 @@ static void free_cache_attributes(unsigned int cpu)
return;
cache_shared_cpu_map_remove(cpu);
-
- kfree(per_cpu_cacheinfo(cpu));
- per_cpu_cacheinfo(cpu) = NULL;
- cache_leaves(cpu) = 0;
}
int __weak init_cache_level(unsigned int cpu)
@@ -342,29 +408,71 @@ int __weak populate_cache_leaves(unsigned int cpu)
return -ENOENT;
}
+static inline
+int allocate_cache_info(int cpu)
+{
+ per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
+ sizeof(struct cacheinfo), GFP_ATOMIC);
+ if (!per_cpu_cacheinfo(cpu)) {
+ cache_leaves(cpu) = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int fetch_cache_info(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci;
+ unsigned int levels = 0, split_levels = 0;
+ int ret;
+
+ if (acpi_disabled) {
+ ret = init_of_cache_level(cpu);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = acpi_get_cache_info(cpu, &levels, &split_levels);
+ if (ret < 0)
+ return ret;
+
+ this_cpu_ci = get_cpu_cacheinfo(cpu);
+ this_cpu_ci->num_levels = levels;
+ /*
+ * This assumes that:
+ * - there cannot be any split caches (data/instruction)
+ * above a unified cache
+ * - data/instruction caches come by pair
+ */
+ this_cpu_ci->num_leaves = levels + split_levels;
+ }
+ if (!cache_leaves(cpu))
+ return -ENOENT;
+
+ return allocate_cache_info(cpu);
+}
+
int detect_cache_attributes(unsigned int cpu)
{
int ret;
- /* Since early detection of the cacheinfo is allowed via this
- * function and this also gets called as CPU hotplug callbacks via
- * cacheinfo_cpu_online, the initialisation can be skipped and only
- * CPU maps can be updated as the CPU online status would be update
- * if called via cacheinfo_cpu_online path.
+ /* Since early initialization/allocation of the cacheinfo is allowed
+ * via fetch_cache_info() and this also gets called as CPU hotplug
+ * callbacks via cacheinfo_cpu_online, the init/alloc can be skipped
+ * as it will happen only once (the cacheinfo memory is never freed).
+ * Just populate the cacheinfo.
*/
if (per_cpu_cacheinfo(cpu))
- goto update_cpu_map;
+ goto populate_leaves;
if (init_cache_level(cpu) || !cache_leaves(cpu))
return -ENOENT;
- per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
- sizeof(struct cacheinfo), GFP_ATOMIC);
- if (per_cpu_cacheinfo(cpu) == NULL) {
- cache_leaves(cpu) = 0;
- return -ENOMEM;
- }
+ ret = allocate_cache_info(cpu);
+ if (ret)
+ return ret;
+populate_leaves:
/*
* populate_cache_leaves() may completely setup the cache leaves and
* shared_cpu_map or it may leave it partially setup.
@@ -373,7 +481,6 @@ int detect_cache_attributes(unsigned int cpu)
if (ret)
goto free_ci;
-update_cpu_map:
/*
* For systems using DT for cache hierarchy, fw_token
* and shared_cpu_map will be set up here only if they are
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 86ec554cfe60..2373b3e210d8 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -53,6 +53,8 @@ static void class_release(struct kobject *kobj)
pr_debug("class '%s': release.\n", class->name);
+ class->p = NULL;
+
if (class->class_release)
class->class_release(class);
else
@@ -64,7 +66,7 @@ static void class_release(struct kobject *kobj)
static const struct kobj_ns_type_operations *class_child_ns_type(const struct kobject *kobj)
{
- struct subsys_private *cp = to_subsys_private(kobj);
+ const struct subsys_private *cp = to_subsys_private(kobj);
struct class *class = cp->class;
return class->ns_type;
@@ -75,7 +77,7 @@ static const struct sysfs_ops class_sysfs_ops = {
.store = class_attr_store,
};
-static struct kobj_type class_ktype = {
+static const struct kobj_type class_ktype = {
.sysfs_ops = &class_sysfs_ops,
.release = class_release,
.child_ns_type = class_child_ns_type,
@@ -97,6 +99,7 @@ int class_create_file_ns(struct class *cls, const struct class_attribute *attr,
error = -EINVAL;
return error;
}
+EXPORT_SYMBOL_GPL(class_create_file_ns);
void class_remove_file_ns(struct class *cls, const struct class_attribute *attr,
const void *ns)
@@ -104,6 +107,7 @@ void class_remove_file_ns(struct class *cls, const struct class_attribute *attr,
if (cls)
sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns);
}
+EXPORT_SYMBOL_GPL(class_remove_file_ns);
static struct class *class_get(struct class *cls)
{
@@ -186,17 +190,21 @@ int __class_register(struct class *cls, struct lock_class_key *key)
cls->p = cp;
error = kset_register(&cp->subsys);
- if (error) {
- kfree(cp);
- return error;
- }
+ if (error)
+ goto err_out;
+
error = class_add_groups(class_get(cls), cls->class_groups);
class_put(cls);
if (error) {
kobject_del(&cp->subsys.kobj);
kfree_const(cp->subsys.kobj.name);
- kfree(cp);
+ goto err_out;
}
+ return 0;
+
+err_out:
+ kfree(cp);
+ cls->p = NULL;
return error;
}
EXPORT_SYMBOL_GPL(__class_register);
@@ -207,6 +215,7 @@ void class_unregister(struct class *cls)
class_remove_groups(cls, cls->class_groups);
kset_unregister(&cls->p->subsys);
}
+EXPORT_SYMBOL_GPL(class_unregister);
static void class_create_release(struct class *cls)
{
@@ -270,6 +279,7 @@ void class_destroy(struct class *cls)
class_unregister(cls);
}
+EXPORT_SYMBOL_GPL(class_destroy);
/**
* class_dev_iter_init - initialize class device iterator
@@ -454,6 +464,7 @@ int class_interface_register(struct class_interface *class_intf)
return 0;
}
+EXPORT_SYMBOL_GPL(class_interface_register);
void class_interface_unregister(struct class_interface *class_intf)
{
@@ -476,6 +487,7 @@ void class_interface_unregister(struct class_interface *class_intf)
class_put(parent);
}
+EXPORT_SYMBOL_GPL(class_interface_unregister);
ssize_t show_class_attr_string(struct class *class,
struct class_attribute *attr, char *buf)
@@ -582,11 +594,3 @@ int __init classes_init(void)
return -ENOMEM;
return 0;
}
-
-EXPORT_SYMBOL_GPL(class_create_file_ns);
-EXPORT_SYMBOL_GPL(class_remove_file_ns);
-EXPORT_SYMBOL_GPL(class_unregister);
-EXPORT_SYMBOL_GPL(class_destroy);
-
-EXPORT_SYMBOL_GPL(class_interface_register);
-EXPORT_SYMBOL_GPL(class_interface_unregister);
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 5eadeac6c532..7dbf14a1d915 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -125,7 +125,7 @@ static void component_debugfs_add(struct aggregate_device *m)
static void component_debugfs_del(struct aggregate_device *m)
{
- debugfs_remove(debugfs_lookup(dev_name(m->parent), component_debugfs_dir));
+ debugfs_lookup_and_remove(dev_name(m->parent), component_debugfs_dir);
}
#else
diff --git a/drivers/base/core.c b/drivers/base/core.c
index a3e14143ec0c..e54a10b5dbd7 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -54,11 +54,12 @@ static LIST_HEAD(deferred_sync);
static unsigned int defer_sync_state_count = 1;
static DEFINE_MUTEX(fwnode_link_lock);
static bool fw_devlink_is_permissive(void);
+static void __fw_devlink_link_to_consumers(struct device *dev);
static bool fw_devlink_drv_reg_done;
static bool fw_devlink_best_effort;
/**
- * fwnode_link_add - Create a link between two fwnode_handles.
+ * __fwnode_link_add - Create a link between two fwnode_handles.
* @con: Consumer end of the link.
* @sup: Supplier end of the link.
*
@@ -74,35 +75,42 @@ static bool fw_devlink_best_effort;
* Attempts to create duplicate links between the same pair of fwnode handles
* are ignored and there is no reference counting.
*/
-int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
+static int __fwnode_link_add(struct fwnode_handle *con,
+ struct fwnode_handle *sup, u8 flags)
{
struct fwnode_link *link;
- int ret = 0;
-
- mutex_lock(&fwnode_link_lock);
list_for_each_entry(link, &sup->consumers, s_hook)
- if (link->consumer == con)
- goto out;
+ if (link->consumer == con) {
+ link->flags |= flags;
+ return 0;
+ }
link = kzalloc(sizeof(*link), GFP_KERNEL);
- if (!link) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!link)
+ return -ENOMEM;
link->supplier = sup;
INIT_LIST_HEAD(&link->s_hook);
link->consumer = con;
INIT_LIST_HEAD(&link->c_hook);
+ link->flags = flags;
list_add(&link->s_hook, &sup->consumers);
list_add(&link->c_hook, &con->suppliers);
pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
con, sup);
-out:
- mutex_unlock(&fwnode_link_lock);
+ return 0;
+}
+
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
+{
+ int ret;
+
+ mutex_lock(&fwnode_link_lock);
+ ret = __fwnode_link_add(con, sup, 0);
+ mutex_unlock(&fwnode_link_lock);
return ret;
}
@@ -122,6 +130,19 @@ static void __fwnode_link_del(struct fwnode_link *link)
}
/**
+ * __fwnode_link_cycle - Mark a fwnode link as being part of a cycle.
+ * @link: the fwnode_link to be marked
+ *
+ * The fwnode_link_lock needs to be held when this function is called.
+ */
+static void __fwnode_link_cycle(struct fwnode_link *link)
+{
+ pr_debug("%pfwf: Relaxing link with %pfwf\n",
+ link->consumer, link->supplier);
+ link->flags |= FWLINK_FLAG_CYCLE;
+}
+
+/**
* fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
* @fwnode: fwnode whose supplier links need to be deleted
*
@@ -181,7 +202,51 @@ void fw_devlink_purge_absent_suppliers(struct fwnode_handle *fwnode)
}
EXPORT_SYMBOL_GPL(fw_devlink_purge_absent_suppliers);
-#ifdef CONFIG_SRCU
+/**
+ * __fwnode_links_move_consumers - Move consumer from @from to @to fwnode_handle
+ * @from: move consumers away from this fwnode
+ * @to: move consumers to this fwnode
+ *
+ * Move all consumer links from @from fwnode to @to fwnode.
+ */
+static void __fwnode_links_move_consumers(struct fwnode_handle *from,
+ struct fwnode_handle *to)
+{
+ struct fwnode_link *link, *tmp;
+
+ list_for_each_entry_safe(link, tmp, &from->consumers, s_hook) {
+ __fwnode_link_add(link->consumer, to, link->flags);
+ __fwnode_link_del(link);
+ }
+}
+
+/**
+ * __fw_devlink_pickup_dangling_consumers - Pick up dangling consumers
+ * @fwnode: fwnode from which to pick up dangling consumers
+ * @new_sup: fwnode of new supplier
+ *
+ * If the @fwnode has a corresponding struct device and the device supports
+ * probing (that is, added to a bus), then we want to let fw_devlink create
+ * MANAGED device links to this device, so leave @fwnode and its descendant's
+ * fwnode links alone.
+ *
+ * Otherwise, move its consumers to the new supplier @new_sup.
+ */
+static void __fw_devlink_pickup_dangling_consumers(struct fwnode_handle *fwnode,
+ struct fwnode_handle *new_sup)
+{
+ struct fwnode_handle *child;
+
+ if (fwnode->dev && fwnode->dev->bus)
+ return;
+
+ fwnode->flags |= FWNODE_FLAG_NOT_DEVICE;
+ __fwnode_links_move_consumers(fwnode, new_sup);
+
+ fwnode_for_each_available_child_node(fwnode, child)
+ __fw_devlink_pickup_dangling_consumers(child, new_sup);
+}
+
static DEFINE_MUTEX(device_links_lock);
DEFINE_STATIC_SRCU(device_links_srcu);
@@ -220,47 +285,6 @@ static void device_link_remove_from_lists(struct device_link *link)
list_del_rcu(&link->s_node);
list_del_rcu(&link->c_node);
}
-#else /* !CONFIG_SRCU */
-static DECLARE_RWSEM(device_links_lock);
-
-static inline void device_links_write_lock(void)
-{
- down_write(&device_links_lock);
-}
-
-static inline void device_links_write_unlock(void)
-{
- up_write(&device_links_lock);
-}
-
-int device_links_read_lock(void)
-{
- down_read(&device_links_lock);
- return 0;
-}
-
-void device_links_read_unlock(int not_used)
-{
- up_read(&device_links_lock);
-}
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-int device_links_read_lock_held(void)
-{
- return lockdep_is_held(&device_links_lock);
-}
-#endif
-
-static inline void device_link_synchronize_removal(void)
-{
-}
-
-static void device_link_remove_from_lists(struct device_link *link)
-{
- list_del(&link->s_node);
- list_del(&link->c_node);
-}
-#endif /* !CONFIG_SRCU */
static bool device_is_ancestor(struct device *dev, struct device *target)
{
@@ -272,6 +296,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target)
return false;
}
+static inline bool device_link_flag_is_sync_state_only(u32 flags)
+{
+ return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) ==
+ (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED);
+}
+
/**
* device_is_dependent - Check if one device depends on another one
* @dev: Device to check dependencies for.
@@ -298,8 +328,7 @@ int device_is_dependent(struct device *dev, void *target)
return ret;
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if ((link->flags & ~DL_FLAG_INFERRED) ==
- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ if (device_link_flag_is_sync_state_only(link->flags))
continue;
if (link->consumer == target)
@@ -372,8 +401,7 @@ static int device_reorder_to_tail(struct device *dev, void *not_used)
device_for_each_child(dev, NULL, device_reorder_to_tail);
list_for_each_entry(link, &dev->links.consumers, s_node) {
- if ((link->flags & ~DL_FLAG_INFERRED) ==
- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
+ if (device_link_flag_is_sync_state_only(link->flags))
continue;
device_reorder_to_tail(link->consumer, NULL);
}
@@ -634,7 +662,8 @@ postcore_initcall(devlink_class_init);
DL_FLAG_AUTOREMOVE_SUPPLIER | \
DL_FLAG_AUTOPROBE_CONSUMER | \
DL_FLAG_SYNC_STATE_ONLY | \
- DL_FLAG_INFERRED)
+ DL_FLAG_INFERRED | \
+ DL_FLAG_CYCLE)
#define DL_ADD_VALID_FLAGS (DL_MANAGED_LINK_FLAGS | DL_FLAG_STATELESS | \
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)
@@ -703,8 +732,6 @@ struct device_link *device_link_add(struct device *consumer,
if (!consumer || !supplier || consumer == supplier ||
flags & ~DL_ADD_VALID_FLAGS ||
(flags & DL_FLAG_STATELESS && flags & DL_MANAGED_LINK_FLAGS) ||
- (flags & DL_FLAG_SYNC_STATE_ONLY &&
- (flags & ~DL_FLAG_INFERRED) != DL_FLAG_SYNC_STATE_ONLY) ||
(flags & DL_FLAG_AUTOPROBE_CONSUMER &&
flags & (DL_FLAG_AUTOREMOVE_CONSUMER |
DL_FLAG_AUTOREMOVE_SUPPLIER)))
@@ -720,6 +747,10 @@ struct device_link *device_link_add(struct device *consumer,
if (!(flags & DL_FLAG_STATELESS))
flags |= DL_FLAG_MANAGED;
+ if (flags & DL_FLAG_SYNC_STATE_ONLY &&
+ !device_link_flag_is_sync_state_only(flags))
+ return NULL;
+
device_links_write_lock();
device_pm_lock();
@@ -984,6 +1015,21 @@ static bool dev_is_best_effort(struct device *dev)
(dev->fwnode && (dev->fwnode->flags & FWNODE_FLAG_BEST_EFFORT));
}
+static struct fwnode_handle *fwnode_links_check_suppliers(
+ struct fwnode_handle *fwnode)
+{
+ struct fwnode_link *link;
+
+ if (!fwnode || fw_devlink_is_permissive())
+ return NULL;
+
+ list_for_each_entry(link, &fwnode->suppliers, c_hook)
+ if (!(link->flags & FWLINK_FLAG_CYCLE))
+ return link->supplier;
+
+ return NULL;
+}
+
/**
* device_links_check_suppliers - Check presence of supplier drivers.
* @dev: Consumer device.
@@ -1011,11 +1057,8 @@ int device_links_check_suppliers(struct device *dev)
* probe.
*/
mutex_lock(&fwnode_link_lock);
- if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
- !fw_devlink_is_permissive()) {
- sup_fw = list_first_entry(&dev->fwnode->suppliers,
- struct fwnode_link,
- c_hook)->supplier;
+ sup_fw = fwnode_links_check_suppliers(dev->fwnode);
+ if (sup_fw) {
if (!dev_is_best_effort(dev)) {
fwnode_ret = -EPROBE_DEFER;
dev_err_probe(dev, -EPROBE_DEFER,
@@ -1204,7 +1247,9 @@ static ssize_t waiting_for_supplier_show(struct device *dev,
bool val;
device_lock(dev);
- val = !list_empty(&dev->fwnode->suppliers);
+ mutex_lock(&fwnode_link_lock);
+ val = !!fwnode_links_check_suppliers(dev->fwnode);
+ mutex_unlock(&fwnode_link_lock);
device_unlock(dev);
return sysfs_emit(buf, "%u\n", val);
}
@@ -1267,16 +1312,23 @@ void device_links_driver_bound(struct device *dev)
* them. So, fw_devlink no longer needs to create device links to any
* of the device's suppliers.
*
- * Also, if a child firmware node of this bound device is not added as
- * a device by now, assume it is never going to be added and make sure
- * other devices don't defer probe indefinitely by waiting for such a
- * child device.
+ * Also, if a child firmware node of this bound device is not added as a
+ * device by now, assume it is never going to be added. Make this bound
+ * device the fallback supplier to the dangling consumers of the child
+ * firmware node because this bound device is probably implementing the
+ * child firmware node functionality and we don't want the dangling
+ * consumers to defer probe indefinitely waiting for a device for the
+ * child firmware node.
*/
if (dev->fwnode && dev->fwnode->dev == dev) {
struct fwnode_handle *child;
fwnode_links_purge_suppliers(dev->fwnode);
+ mutex_lock(&fwnode_link_lock);
fwnode_for_each_available_child_node(dev->fwnode, child)
- fw_devlink_purge_absent_suppliers(child);
+ __fw_devlink_pickup_dangling_consumers(child,
+ dev->fwnode);
+ __fw_devlink_link_to_consumers(dev);
+ mutex_unlock(&fwnode_link_lock);
}
device_remove_file(dev, &dev_attr_waiting_for_supplier);
@@ -1633,8 +1685,11 @@ static int __init fw_devlink_strict_setup(char *arg)
}
early_param("fw_devlink.strict", fw_devlink_strict_setup);
-u32 fw_devlink_get_flags(void)
+static inline u32 fw_devlink_get_flags(u8 fwlink_flags)
{
+ if (fwlink_flags & FWLINK_FLAG_CYCLE)
+ return FW_DEVLINK_FLAGS_PERMISSIVE | DL_FLAG_CYCLE;
+
return fw_devlink_flags;
}
@@ -1672,7 +1727,7 @@ static void fw_devlink_relax_link(struct device_link *link)
if (!(link->flags & DL_FLAG_INFERRED))
return;
- if (link->flags == (DL_FLAG_MANAGED | FW_DEVLINK_FLAGS_PERMISSIVE))
+ if (device_link_flag_is_sync_state_only(link->flags))
return;
pm_runtime_drop_link(link);
@@ -1769,44 +1824,138 @@ static void fw_devlink_unblock_consumers(struct device *dev)
device_links_write_unlock();
}
+
+static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+ bool ret;
+
+ if (!(fwnode->flags & FWNODE_FLAG_INITIALIZED))
+ return false;
+
+ dev = get_dev_from_fwnode(fwnode);
+ ret = !dev || dev->links.status == DL_DEV_NO_DRIVER;
+ put_device(dev);
+
+ return ret;
+}
+
+static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+
+ fwnode_for_each_parent_node(fwnode, parent) {
+ if (fwnode_init_without_drv(parent)) {
+ fwnode_handle_put(parent);
+ return true;
+ }
+ }
+
+ return false;
+}
+
/**
- * fw_devlink_relax_cycle - Convert cyclic links to SYNC_STATE_ONLY links
- * @con: Device to check dependencies for.
- * @sup: Device to check against.
- *
- * Check if @sup depends on @con or any device dependent on it (its child or
- * its consumer etc). When such a cyclic dependency is found, convert all
- * device links created solely by fw_devlink into SYNC_STATE_ONLY device links.
- * This is the equivalent of doing fw_devlink=permissive just between the
- * devices in the cycle. We need to do this because, at this point, fw_devlink
- * can't tell which of these dependencies is not a real dependency.
- *
- * Return 1 if a cycle is found. Otherwise, return 0.
+ * __fw_devlink_relax_cycles - Relax and mark dependency cycles.
+ * @con: Potential consumer device.
+ * @sup_handle: Potential supplier's fwnode.
+ *
+ * Needs to be called with fwnode_lock and device link lock held.
+ *
+ * Check if @sup_handle or any of its ancestors or suppliers direct/indirectly
+ * depend on @con. This function can detect multiple cyles between @sup_handle
+ * and @con. When such dependency cycles are found, convert all device links
+ * created solely by fw_devlink into SYNC_STATE_ONLY device links. Also, mark
+ * all fwnode links in the cycle with FWLINK_FLAG_CYCLE so that when they are
+ * converted into a device link in the future, they are created as
+ * SYNC_STATE_ONLY device links. This is the equivalent of doing
+ * fw_devlink=permissive just between the devices in the cycle. We need to do
+ * this because, at this point, fw_devlink can't tell which of these
+ * dependencies is not a real dependency.
+ *
+ * Return true if one or more cycles were found. Otherwise, return false.
*/
-static int fw_devlink_relax_cycle(struct device *con, void *sup)
+static bool __fw_devlink_relax_cycles(struct device *con,
+ struct fwnode_handle *sup_handle)
{
- struct device_link *link;
- int ret;
+ struct device *sup_dev = NULL, *par_dev = NULL;
+ struct fwnode_link *link;
+ struct device_link *dev_link;
+ bool ret = false;
- if (con == sup)
- return 1;
+ if (!sup_handle)
+ return false;
- ret = device_for_each_child(con, sup, fw_devlink_relax_cycle);
- if (ret)
- return ret;
+ /*
+ * We aren't trying to find all cycles. Just a cycle between con and
+ * sup_handle.
+ */
+ if (sup_handle->flags & FWNODE_FLAG_VISITED)
+ return false;
- list_for_each_entry(link, &con->links.consumers, s_node) {
- if ((link->flags & ~DL_FLAG_INFERRED) ==
- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED))
- continue;
+ sup_handle->flags |= FWNODE_FLAG_VISITED;
- if (!fw_devlink_relax_cycle(link->consumer, sup))
- continue;
+ sup_dev = get_dev_from_fwnode(sup_handle);
- ret = 1;
+ /* Termination condition. */
+ if (sup_dev == con) {
+ ret = true;
+ goto out;
+ }
- fw_devlink_relax_link(link);
+ /*
+ * If sup_dev is bound to a driver and @con hasn't started binding to a
+ * driver, sup_dev can't be a consumer of @con. So, no need to check
+ * further.
+ */
+ if (sup_dev && sup_dev->links.status == DL_DEV_DRIVER_BOUND &&
+ con->links.status == DL_DEV_NO_DRIVER) {
+ ret = false;
+ goto out;
+ }
+
+ list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
+ if (__fw_devlink_relax_cycles(con, link->supplier)) {
+ __fwnode_link_cycle(link);
+ ret = true;
+ }
+ }
+
+ /*
+ * Give priority to device parent over fwnode parent to account for any
+ * quirks in how fwnodes are converted to devices.
+ */
+ if (sup_dev)
+ par_dev = get_device(sup_dev->parent);
+ else
+ par_dev = fwnode_get_next_parent_dev(sup_handle);
+
+ if (par_dev && __fw_devlink_relax_cycles(con, par_dev->fwnode))
+ ret = true;
+
+ if (!sup_dev)
+ goto out;
+
+ list_for_each_entry(dev_link, &sup_dev->links.suppliers, c_node) {
+ /*
+ * Ignore a SYNC_STATE_ONLY flag only if it wasn't marked as
+ * such due to a cycle.
+ */
+ if (device_link_flag_is_sync_state_only(dev_link->flags) &&
+ !(dev_link->flags & DL_FLAG_CYCLE))
+ continue;
+
+ if (__fw_devlink_relax_cycles(con,
+ dev_link->supplier->fwnode)) {
+ fw_devlink_relax_link(dev_link);
+ dev_link->flags |= DL_FLAG_CYCLE;
+ ret = true;
+ }
}
+
+out:
+ sup_handle->flags &= ~FWNODE_FLAG_VISITED;
+ put_device(sup_dev);
+ put_device(par_dev);
return ret;
}
@@ -1814,7 +1963,7 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup)
* fw_devlink_create_devlink - Create a device link from a consumer to fwnode
* @con: consumer device for the device link
* @sup_handle: fwnode handle of supplier
- * @flags: devlink flags
+ * @link: fwnode link that's being converted to a device link
*
* This function will try to create a device link between the consumer device
* @con and the supplier device represented by @sup_handle.
@@ -1831,10 +1980,17 @@ static int fw_devlink_relax_cycle(struct device *con, void *sup)
* possible to do that in the future
*/
static int fw_devlink_create_devlink(struct device *con,
- struct fwnode_handle *sup_handle, u32 flags)
+ struct fwnode_handle *sup_handle,
+ struct fwnode_link *link)
{
struct device *sup_dev;
int ret = 0;
+ u32 flags;
+
+ if (con->fwnode == link->consumer)
+ flags = fw_devlink_get_flags(link->flags);
+ else
+ flags = FW_DEVLINK_FLAGS_PERMISSIVE;
/*
* In some cases, a device P might also be a supplier to its child node
@@ -1855,7 +2011,26 @@ static int fw_devlink_create_devlink(struct device *con,
fwnode_is_ancestor_of(sup_handle, con->fwnode))
return -EINVAL;
- sup_dev = get_dev_from_fwnode(sup_handle);
+ /*
+ * SYNC_STATE_ONLY device links don't block probing and supports cycles.
+ * So cycle detection isn't necessary and shouldn't be done.
+ */
+ if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) {
+ device_links_write_lock();
+ if (__fw_devlink_relax_cycles(con, sup_handle)) {
+ __fwnode_link_cycle(link);
+ flags = fw_devlink_get_flags(link->flags);
+ dev_info(con, "Fixed dependency cycle(s) with %pfwf\n",
+ sup_handle);
+ }
+ device_links_write_unlock();
+ }
+
+ if (sup_handle->flags & FWNODE_FLAG_NOT_DEVICE)
+ sup_dev = fwnode_get_next_parent_dev(sup_handle);
+ else
+ sup_dev = get_dev_from_fwnode(sup_handle);
+
if (sup_dev) {
/*
* If it's one of those drivers that don't actually bind to
@@ -1864,71 +2039,34 @@ static int fw_devlink_create_devlink(struct device *con,
*/
if (sup_dev->links.status == DL_DEV_NO_DRIVER &&
sup_handle->flags & FWNODE_FLAG_INITIALIZED) {
+ dev_dbg(con,
+ "Not linking %pfwf - dev might never probe\n",
+ sup_handle);
ret = -EINVAL;
goto out;
}
- /*
- * If this fails, it is due to cycles in device links. Just
- * give up on this link and treat it as invalid.
- */
- if (!device_link_add(con, sup_dev, flags) &&
- !(flags & DL_FLAG_SYNC_STATE_ONLY)) {
- dev_info(con, "Fixing up cyclic dependency with %s\n",
- dev_name(sup_dev));
- device_links_write_lock();
- fw_devlink_relax_cycle(con, sup_dev);
- device_links_write_unlock();
- device_link_add(con, sup_dev,
- FW_DEVLINK_FLAGS_PERMISSIVE);
+ if (!device_link_add(con, sup_dev, flags)) {
+ dev_err(con, "Failed to create device link with %s\n",
+ dev_name(sup_dev));
ret = -EINVAL;
}
goto out;
}
- /* Supplier that's already initialized without a struct device. */
- if (sup_handle->flags & FWNODE_FLAG_INITIALIZED)
- return -EINVAL;
-
/*
- * DL_FLAG_SYNC_STATE_ONLY doesn't block probing and supports
- * cycles. So cycle detection isn't necessary and shouldn't be
- * done.
+ * Supplier or supplier's ancestor already initialized without a struct
+ * device or being probed by a driver.
*/
- if (flags & DL_FLAG_SYNC_STATE_ONLY)
- return -EAGAIN;
-
- /*
- * If we can't find the supplier device from its fwnode, it might be
- * due to a cyclic dependency between fwnodes. Some of these cycles can
- * be broken by applying logic. Check for these types of cycles and
- * break them so that devices in the cycle probe properly.
- *
- * If the supplier's parent is dependent on the consumer, then the
- * consumer and supplier have a cyclic dependency. Since fw_devlink
- * can't tell which of the inferred dependencies are incorrect, don't
- * enforce probe ordering between any of the devices in this cyclic
- * dependency. Do this by relaxing all the fw_devlink device links in
- * this cycle and by treating the fwnode link between the consumer and
- * the supplier as an invalid dependency.
- */
- sup_dev = fwnode_get_next_parent_dev(sup_handle);
- if (sup_dev && device_is_dependent(con, sup_dev)) {
- dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
- sup_handle, dev_name(sup_dev));
- device_links_write_lock();
- fw_devlink_relax_cycle(con, sup_dev);
- device_links_write_unlock();
- ret = -EINVAL;
- } else {
- /*
- * Can't check for cycles or no cycles. So let's try
- * again later.
- */
- ret = -EAGAIN;
+ if (fwnode_init_without_drv(sup_handle) ||
+ fwnode_ancestor_init_without_drv(sup_handle)) {
+ dev_dbg(con, "Not linking %pfwf - might never become dev\n",
+ sup_handle);
+ return -EINVAL;
}
+ ret = -EAGAIN;
out:
put_device(sup_dev);
return ret;
@@ -1956,7 +2094,6 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
struct fwnode_link *link, *tmp;
list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
- u32 dl_flags = fw_devlink_get_flags();
struct device *con_dev;
bool own_link = true;
int ret;
@@ -1986,14 +2123,13 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
con_dev = NULL;
} else {
own_link = false;
- dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
}
}
if (!con_dev)
continue;
- ret = fw_devlink_create_devlink(con_dev, fwnode, dl_flags);
+ ret = fw_devlink_create_devlink(con_dev, fwnode, link);
put_device(con_dev);
if (!own_link || ret == -EAGAIN)
continue;
@@ -2013,10 +2149,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
*
* The function creates normal (non-SYNC_STATE_ONLY) device links between @dev
* and the real suppliers of @dev. Once these device links are created, the
- * fwnode links are deleted. When such device links are successfully created,
- * this function is called recursively on those supplier devices. This is
- * needed to detect and break some invalid cycles in fwnode links. See
- * fw_devlink_create_devlink() for more details.
+ * fwnode links are deleted.
*
* In addition, it also looks at all the suppliers of the entire fwnode tree
* because some of the child devices of @dev that have not been added yet
@@ -2034,44 +2167,16 @@ static void __fw_devlink_link_to_suppliers(struct device *dev,
bool own_link = (dev->fwnode == fwnode);
struct fwnode_link *link, *tmp;
struct fwnode_handle *child = NULL;
- u32 dl_flags;
-
- if (own_link)
- dl_flags = fw_devlink_get_flags();
- else
- dl_flags = FW_DEVLINK_FLAGS_PERMISSIVE;
list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
int ret;
- struct device *sup_dev;
struct fwnode_handle *sup = link->supplier;
- ret = fw_devlink_create_devlink(dev, sup, dl_flags);
+ ret = fw_devlink_create_devlink(dev, sup, link);
if (!own_link || ret == -EAGAIN)
continue;
__fwnode_link_del(link);
-
- /* If no device link was created, nothing more to do. */
- if (ret)
- continue;
-
- /*
- * If a device link was successfully created to a supplier, we
- * now need to try and link the supplier to all its suppliers.
- *
- * This is needed to detect and delete false dependencies in
- * fwnode links that haven't been converted to a device link
- * yet. See comments in fw_devlink_create_devlink() for more
- * details on the false dependency.
- *
- * Without deleting these false dependencies, some devices will
- * never probe because they'll keep waiting for their false
- * dependency fwnode links to be converted to device links.
- */
- sup_dev = get_dev_from_fwnode(sup);
- __fw_devlink_link_to_suppliers(sup_dev, sup_dev->fwnode);
- put_device(sup_dev);
}
/*
@@ -2354,7 +2459,7 @@ static void device_get_ownership(const struct kobject *kobj, kuid_t *uid, kgid_t
dev->class->get_ownership(dev, uid, gid);
}
-static struct kobj_type device_ktype = {
+static const struct kobj_type device_ktype = {
.release = device_release,
.sysfs_ops = &dev_sysfs_ops,
.namespace = device_namespace,
@@ -2387,9 +2492,9 @@ static const char *dev_uevent_name(const struct kobject *kobj)
return NULL;
}
-static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
+static int dev_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
{
- struct device *dev = kobj_to_dev(kobj);
+ const struct device *dev = kobj_to_dev(kobj);
int retval = 0;
/* add device node properties if present */
@@ -2992,7 +3097,7 @@ struct kobj_ns_type_operations *class_dir_child_ns_type(const struct kobject *ko
return dir->class->ns_type;
}
-static struct kobj_type class_dir_ktype = {
+static const struct kobj_type class_dir_ktype = {
.release = class_dir_release,
.sysfs_ops = &kobj_sysfs_ops,
.child_ns_type = class_dir_child_ns_type
@@ -3026,8 +3131,9 @@ static DEFINE_MUTEX(gdp_mutex);
static struct kobject *get_device_parent(struct device *dev,
struct device *parent)
{
+ struct kobject *kobj = NULL;
+
if (dev->class) {
- struct kobject *kobj = NULL;
struct kobject *parent_kobj;
struct kobject *k;
@@ -3075,8 +3181,15 @@ static struct kobject *get_device_parent(struct device *dev,
}
/* subsystems can specify a default root directory for their devices */
- if (!parent && dev->bus && dev->bus->dev_root)
- return &dev->bus->dev_root->kobj;
+ if (!parent && dev->bus) {
+ struct device *dev_root = bus_get_dev_root(dev->bus);
+
+ if (dev_root) {
+ kobj = &dev_root->kobj;
+ put_device(dev_root);
+ return kobj;
+ }
+ }
if (parent)
return &parent->kobj;
@@ -3413,7 +3526,7 @@ int device_add(struct device *dev)
/* we require the name to be set before, and pass NULL */
error = kobject_add(&dev->kobj, dev->kobj.parent, NULL);
if (error) {
- glue_dir = get_glue_dir(dev);
+ glue_dir = kobj;
goto Error;
}
@@ -3453,10 +3566,7 @@ int device_add(struct device *dev)
/* Notify clients of device addition. This call must come
* after dpm_sysfs_add() and before kobject_uevent().
*/
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_ADD_DEVICE, dev);
-
+ bus_notify(dev, BUS_NOTIFY_ADD_DEVICE);
kobject_uevent(&dev->kobj, KOBJ_ADD);
/*
@@ -3513,6 +3623,7 @@ done:
device_pm_remove(dev);
dpm_sysfs_remove(dev);
DPMError:
+ dev->driver = NULL;
bus_remove_device(dev);
BusError:
device_remove_attrs(dev);
@@ -3636,9 +3747,7 @@ void device_del(struct device *dev)
* before dpm_sysfs_remove().
*/
noio_flag = memalloc_noio_save();
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_DEL_DEVICE, dev);
+ bus_notify(dev, BUS_NOTIFY_DEL_DEVICE);
dpm_sysfs_remove(dev);
if (parent)
@@ -3669,9 +3778,7 @@ void device_del(struct device *dev)
device_platform_notify_remove(dev);
device_links_purge(dev);
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_REMOVED_DEVICE, dev);
+ bus_notify(dev, BUS_NOTIFY_REMOVED_DEVICE);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
glue_dir = get_glue_dir(dev);
kobject_del(&dev->kobj);
@@ -3739,7 +3846,7 @@ static struct device *next_device(struct klist_iter *i)
* a name. This memory is returned in tmp and needs to be
* freed by the caller.
*/
-const char *device_get_devnode(struct device *dev,
+const char *device_get_devnode(const struct device *dev,
umode_t *mode, kuid_t *uid, kgid_t *gid,
const char **tmp)
{
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4c98849577d4..182c6122f815 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -125,17 +125,6 @@ static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#endif /* CONFIG_HOTPLUG_CPU */
-struct bus_type cpu_subsys = {
- .name = "cpu",
- .dev_name = "cpu",
- .match = cpu_subsys_match,
-#ifdef CONFIG_HOTPLUG_CPU
- .online = cpu_subsys_online,
- .offline = cpu_subsys_offline,
-#endif
-};
-EXPORT_SYMBOL_GPL(cpu_subsys);
-
#ifdef CONFIG_KEXEC
#include <linux/kexec.h>
@@ -336,7 +325,7 @@ static ssize_t print_cpu_modalias(struct device *dev,
return len;
}
-static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int cpu_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (buf) {
@@ -348,6 +337,20 @@ static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
}
#endif
+struct bus_type cpu_subsys = {
+ .name = "cpu",
+ .dev_name = "cpu",
+ .match = cpu_subsys_match,
+#ifdef CONFIG_HOTPLUG_CPU
+ .online = cpu_subsys_online,
+ .offline = cpu_subsys_offline,
+#endif
+#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
+ .uevent = cpu_uevent,
+#endif
+};
+EXPORT_SYMBOL_GPL(cpu_subsys);
+
/*
* register_cpu - Setup a sysfs device for a CPU.
* @cpu - cpu->hotpluggable field set to 1 will generate a control file in
@@ -368,9 +371,6 @@ int register_cpu(struct cpu *cpu, int num)
cpu->dev.offline_disabled = !cpu->hotpluggable;
cpu->dev.offline = !cpu_online(num);
cpu->dev.of_node = of_get_cpu_node(num, NULL);
-#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
- cpu->dev.bus->uevent = cpu_uevent;
-#endif
cpu->dev.groups = common_cpu_attr_groups;
if (cpu->hotpluggable)
cpu->dev.groups = hotplugable_cpu_attr_groups;
@@ -610,9 +610,13 @@ static const struct attribute_group cpu_root_vulnerabilities_group = {
static void __init cpu_register_vulnerabilities(void)
{
- if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
- &cpu_root_vulnerabilities_group))
- pr_err("Unable to register CPU vulnerabilities\n");
+ struct device *dev = bus_get_dev_root(&cpu_subsys);
+
+ if (dev) {
+ if (sysfs_create_group(&dev->kobj, &cpu_root_vulnerabilities_group))
+ pr_err("Unable to register CPU vulnerabilities\n");
+ put_device(dev);
+ }
}
#else
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index e9b2f9c25efe..8def2ba08a82 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -257,13 +257,11 @@ static int deferred_devs_show(struct seq_file *s, void *data)
DEFINE_SHOW_ATTRIBUTE(deferred_devs);
#ifdef CONFIG_MODULES
-int driver_deferred_probe_timeout = 10;
+static int driver_deferred_probe_timeout = 10;
#else
-int driver_deferred_probe_timeout;
+static int driver_deferred_probe_timeout;
#endif
-EXPORT_SYMBOL_GPL(driver_deferred_probe_timeout);
-
static int __init deferred_probe_timeout_setup(char *str)
{
int timeout;
@@ -372,7 +370,7 @@ late_initcall(deferred_probe_initcall);
static void __exit deferred_probe_exit(void)
{
- debugfs_remove_recursive(debugfs_lookup("devices_deferred", NULL));
+ debugfs_lookup_and_remove("devices_deferred", NULL);
}
__exitcall(deferred_probe_exit);
@@ -413,10 +411,7 @@ static void driver_bound(struct device *dev)
driver_deferred_probe_del(dev);
driver_deferred_probe_trigger();
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_BOUND_DRIVER, dev);
-
+ bus_notify(dev, BUS_NOTIFY_BOUND_DRIVER);
kobject_uevent(&dev->kobj, KOBJ_BIND);
}
@@ -435,9 +430,7 @@ static int driver_sysfs_add(struct device *dev)
{
int ret;
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_BIND_DRIVER, dev);
+ bus_notify(dev, BUS_NOTIFY_BIND_DRIVER);
ret = sysfs_create_link(&dev->driver->p->kobj, &dev->kobj,
kobject_name(&dev->kobj));
@@ -502,9 +495,8 @@ int device_bind_driver(struct device *dev)
device_links_force_bind(dev);
driver_bound(dev);
}
- else if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+ else
+ bus_notify(dev, BUS_NOTIFY_DRIVER_NOT_BOUND);
return ret;
}
EXPORT_SYMBOL_GPL(device_bind_driver);
@@ -695,9 +687,7 @@ dev_groups_failed:
probe_failed:
driver_sysfs_remove(dev);
sysfs_failed:
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
+ bus_notify(dev, BUS_NOTIFY_DRIVER_NOT_BOUND);
if (dev->bus && dev->bus->dma_cleanup)
dev->bus->dma_cleanup(dev);
pinctrl_bind_failed:
@@ -1243,10 +1233,7 @@ static void __device_release_driver(struct device *dev, struct device *parent)
driver_sysfs_remove(dev);
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_UNBIND_DRIVER,
- dev);
+ bus_notify(dev, BUS_NOTIFY_UNBIND_DRIVER);
pm_runtime_put_sync(dev);
@@ -1260,11 +1247,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
klist_remove(&dev->p->knode_driver);
device_pm_check_callbacks(dev);
- if (dev->bus)
- blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
- BUS_NOTIFY_UNBOUND_DRIVER,
- dev);
+ bus_notify(dev, BUS_NOTIFY_UNBOUND_DRIVER);
kobject_uevent(&dev->kobj, KOBJ_UNBIND);
}
}
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index e4bffeabf344..ae72d4ba8547 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -13,6 +13,8 @@
* overwrite the default setting if needed.
*/
+#define pr_fmt(fmt) "devtmpfs: " fmt
+
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/mount.h>
@@ -173,7 +175,7 @@ static int dev_mkdir(const char *name, umode_t mode)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mkdir(&init_user_ns, d_inode(path.dentry), dentry, mode);
+ err = vfs_mkdir(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode);
if (!err)
/* mark as kernel-created inode */
d_inode(dentry)->i_private = &thread;
@@ -223,7 +225,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
if (IS_ERR(dentry))
return PTR_ERR(dentry);
- err = vfs_mknod(&init_user_ns, d_inode(path.dentry), dentry, mode,
+ err = vfs_mknod(&nop_mnt_idmap, d_inode(path.dentry), dentry, mode,
dev->devt);
if (!err) {
struct iattr newattrs;
@@ -233,7 +235,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
newattrs.ia_gid = gid;
newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
inode_lock(d_inode(dentry));
- notify_change(&init_user_ns, dentry, &newattrs, NULL);
+ notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
/* mark as kernel-created inode */
@@ -254,7 +256,7 @@ static int dev_rmdir(const char *name)
return PTR_ERR(dentry);
if (d_really_is_positive(dentry)) {
if (d_inode(dentry)->i_private == &thread)
- err = vfs_rmdir(&init_user_ns, d_inode(parent.dentry),
+ err = vfs_rmdir(&nop_mnt_idmap, d_inode(parent.dentry),
dentry);
else
err = -EPERM;
@@ -341,9 +343,9 @@ static int handle_remove(const char *nodename, struct device *dev)
newattrs.ia_valid =
ATTR_UID|ATTR_GID|ATTR_MODE;
inode_lock(d_inode(dentry));
- notify_change(&init_user_ns, dentry, &newattrs, NULL);
+ notify_change(&nop_mnt_idmap, dentry, &newattrs, NULL);
inode_unlock(d_inode(dentry));
- err = vfs_unlink(&init_user_ns, d_inode(parent.dentry),
+ err = vfs_unlink(&nop_mnt_idmap, d_inode(parent.dentry),
dentry, NULL);
if (!err || err == -ENOENT)
deleted = 1;
@@ -376,9 +378,9 @@ int __init devtmpfs_mount(void)
err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
- printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
+ pr_info("error mounting %d\n", err);
else
- printk(KERN_INFO "devtmpfs: mounted\n");
+ pr_info("mounted\n");
return err;
}
@@ -460,14 +462,12 @@ int __init devtmpfs_init(void)
mnt = vfs_kern_mount(&internal_fs_type, 0, "devtmpfs", opts);
if (IS_ERR(mnt)) {
- printk(KERN_ERR "devtmpfs: unable to create devtmpfs %ld\n",
- PTR_ERR(mnt));
+ pr_err("unable to create devtmpfs %ld\n", PTR_ERR(mnt));
return PTR_ERR(mnt);
}
err = register_filesystem(&dev_fs_type);
if (err) {
- printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
- "type %i\n", err);
+ pr_err("unable to register devtmpfs type %d\n", err);
return err;
}
@@ -480,12 +480,12 @@ int __init devtmpfs_init(void)
}
if (err) {
- printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
+ pr_err("unable to create devtmpfs %d\n", err);
unregister_filesystem(&dev_fs_type);
thread = NULL;
return err;
}
- printk(KERN_INFO "devtmpfs: initialized\n");
+ pr_info("initialized\n");
return 0;
}
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 676b6275d5b5..c8436c26ed6a 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -224,7 +224,7 @@ int driver_register(struct device_driver *drv)
int ret;
struct device_driver *other;
- if (!drv->bus->p) {
+ if (!bus_is_registered(drv->bus)) {
pr_err("Driver '%s' was unable to register with bus_type '%s' because the bus was not initialized.\n",
drv->name, drv->bus->name);
return -EINVAL;
@@ -274,30 +274,3 @@ void driver_unregister(struct device_driver *drv)
bus_remove_driver(drv);
}
EXPORT_SYMBOL_GPL(driver_unregister);
-
-/**
- * driver_find - locate driver on a bus by its name.
- * @name: name of the driver.
- * @bus: bus to scan for the driver.
- *
- * Call kset_find_obj() to iterate over list of drivers on
- * a bus to find driver by name. Return driver if found.
- *
- * This routine provides no locking to prevent the driver it returns
- * from being unregistered or unloaded while the caller is using it.
- * The caller is responsible for preventing this.
- */
-struct device_driver *driver_find(const char *name, struct bus_type *bus)
-{
- struct kobject *k = kset_find_obj(bus->p->drivers_kset, name);
- struct driver_private *priv;
-
- if (k) {
- /* Drop reference added by kset_find_obj() */
- kobject_put(k);
- priv = to_driver(k);
- return priv->driver;
- }
- return NULL;
-}
-EXPORT_SYMBOL_GPL(driver_find);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index fe98fb8d94e5..b456ac213610 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -115,18 +115,13 @@ unsigned long __weak memory_block_size_bytes(void)
}
EXPORT_SYMBOL_GPL(memory_block_size_bytes);
-/*
- * Show the first physical section index (number) of this memory block.
- */
+/* Show the memory block ID, relative to the memory block size */
static ssize_t phys_index_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct memory_block *mem = to_memory_block(dev);
- unsigned long phys_index;
-
- phys_index = mem->start_section_nr / sections_per_block;
- return sysfs_emit(buf, "%08lx\n", phys_index);
+ return sysfs_emit(buf, "%08lx\n", memory_block_id(mem->start_section_nr));
}
/*
diff --git a/drivers/base/node.c b/drivers/base/node.c
index faf3597a96da..b46db17124f3 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -587,6 +587,9 @@ static const struct attribute_group *node_dev_groups[] = {
#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
&arch_node_dev_group,
#endif
+#ifdef CONFIG_MEMORY_FAILURE
+ &memory_failure_attr_group,
+#endif
NULL
};
diff --git a/drivers/base/physical_location.c b/drivers/base/physical_location.c
index 87af641cfe1a..951819e71b4a 100644
--- a/drivers/base/physical_location.c
+++ b/drivers/base/physical_location.c
@@ -24,8 +24,11 @@ bool dev_add_physical_location(struct device *dev)
dev->physical_location =
kzalloc(sizeof(*dev->physical_location), GFP_KERNEL);
- if (!dev->physical_location)
+ if (!dev->physical_location) {
+ ACPI_FREE(pld);
return false;
+ }
+
dev->physical_location->panel = pld->panel;
dev->physical_location->vertical_position = pld->vertical_position;
dev->physical_location->horizontal_position = pld->horizontal_position;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 968f3d71eeab..77510e4f47de 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -441,11 +441,9 @@ static int __platform_get_irq_byname(struct platform_device *dev,
struct resource *r;
int ret;
- if (!dev->dev.of_node || IS_ENABLED(CONFIG_OF_IRQ)) {
- ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name);
- if (ret > 0 || ret == -EPROBE_DEFER)
- return ret;
- }
+ ret = fwnode_irq_get_byname(dev_fwnode(&dev->dev), name);
+ if (ret > 0 || ret == -EPROBE_DEFER)
+ return ret;
r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
if (r) {
@@ -499,6 +497,8 @@ EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
* platform_add_devices - add a numbers of platform devices
* @devs: array of platform devices to add
* @num: number of platform devices in array
+ *
+ * Return: 0 on success, negative error number on failure.
*/
int platform_add_devices(struct platform_device **devs, int num)
{
@@ -883,6 +883,13 @@ static int platform_probe_fail(struct platform_device *pdev)
return -ENXIO;
}
+static int is_bound_to_driver(struct device *dev, void *driver)
+{
+ if (dev->driver == driver)
+ return 1;
+ return 0;
+}
+
/**
* __platform_driver_probe - register driver for non-hotpluggable device
* @drv: platform driver structure
@@ -906,7 +913,7 @@ static int platform_probe_fail(struct platform_device *pdev)
int __init_or_module __platform_driver_probe(struct platform_driver *drv,
int (*probe)(struct platform_device *), struct module *module)
{
- int retval, code;
+ int retval;
if (drv->driver.probe_type == PROBE_PREFER_ASYNCHRONOUS) {
pr_err("%s: drivers registered with %s can not be probed asynchronously\n",
@@ -932,24 +939,21 @@ int __init_or_module __platform_driver_probe(struct platform_driver *drv,
/* temporary section violation during probe() */
drv->probe = probe;
- retval = code = __platform_driver_register(drv, module);
+ retval = __platform_driver_register(drv, module);
if (retval)
return retval;
- /*
- * Fixup that section violation, being paranoid about code scanning
- * the list of drivers in order to probe new devices. Check to see
- * if the probe was successful, and make sure any forced probes of
- * new devices fail.
- */
- spin_lock(&drv->driver.bus->p->klist_drivers.k_lock);
+ /* Force all new probes of this driver to fail */
drv->probe = platform_probe_fail;
- if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list))
- retval = -ENODEV;
- spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock);
- if (code != retval)
+ /* Walk all platform devices and see if any actually bound to this driver.
+ * If not, return an error as the device should have done so by now.
+ */
+ if (!bus_for_each_dev(&platform_bus_type, NULL, &drv->driver, is_bound_to_driver)) {
+ retval = -ENODEV;
platform_driver_unregister(drv);
+ }
+
return retval;
}
EXPORT_SYMBOL_GPL(__platform_driver_probe);
@@ -1353,9 +1357,9 @@ static int platform_match(struct device *dev, struct device_driver *drv)
return (strcmp(pdev->name, drv->name) == 0);
}
-static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int platform_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct platform_device *pdev = to_platform_device(dev);
+ const struct platform_device *pdev = to_platform_device(dev);
int rc;
/* Some devices have extra OF data and an OF-style MODALIAS */
@@ -1416,7 +1420,9 @@ static void platform_remove(struct device *_dev)
struct platform_driver *drv = to_platform_driver(_dev->driver);
struct platform_device *dev = to_platform_device(_dev);
- if (drv->remove) {
+ if (drv->remove_new) {
+ drv->remove_new(dev);
+ } else if (drv->remove) {
int ret = drv->remove(dev);
if (ret)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 967bcf9d415e..6097644ebdc5 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -220,13 +220,10 @@ static void genpd_debug_add(struct generic_pm_domain *genpd);
static void genpd_debug_remove(struct generic_pm_domain *genpd)
{
- struct dentry *d;
-
if (!genpd_debugfs_dir)
return;
- d = debugfs_lookup(genpd->name, genpd_debugfs_dir);
- debugfs_remove(d);
+ debugfs_lookup_and_remove(genpd->name, genpd_debugfs_dir);
}
static void genpd_update_accounting(struct generic_pm_domain *genpd)
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 50e726b6c2cf..4545669cb973 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -468,7 +468,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
int (*callback)(struct device *);
int retval;
- trace_rpm_idle_rcuidle(dev, rpmflags);
+ trace_rpm_idle(dev, rpmflags);
retval = rpm_check_suspend_allowed(dev);
if (retval < 0)
; /* Conditions are wrong. */
@@ -508,7 +508,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.request_pending = true;
queue_work(pm_wq, &dev->power.work);
}
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
+ trace_rpm_return_int(dev, _THIS_IP_, 0);
return 0;
}
@@ -530,7 +530,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
wake_up_all(&dev->power.wait_queue);
out:
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
}
@@ -562,7 +562,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval;
- trace_rpm_suspend_rcuidle(dev, rpmflags);
+ trace_rpm_suspend(dev, rpmflags);
repeat:
retval = rpm_check_suspend_allowed(dev);
@@ -713,7 +713,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
out:
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
@@ -765,7 +765,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
struct device *parent = NULL;
int retval = 0;
- trace_rpm_resume_rcuidle(dev, rpmflags);
+ trace_rpm_resume(dev, rpmflags);
repeat:
if (dev->power.runtime_error) {
@@ -935,7 +935,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock_irq(&dev->power.lock);
}
- trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
+ trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
}
@@ -1091,7 +1091,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
if (retval < 0) {
return retval;
} else if (retval > 0) {
- trace_rpm_usage_rcuidle(dev, rpmflags);
+ trace_rpm_usage(dev, rpmflags);
return 0;
}
}
@@ -1129,7 +1129,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
if (retval < 0) {
return retval;
} else if (retval > 0) {
- trace_rpm_usage_rcuidle(dev, rpmflags);
+ trace_rpm_usage(dev, rpmflags);
return 0;
}
}
@@ -1212,7 +1212,7 @@ int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count)
} else {
retval = atomic_inc_not_zero(&dev->power.usage_count);
}
- trace_rpm_usage_rcuidle(dev, 0);
+ trace_rpm_usage(dev, 0);
spin_unlock_irqrestore(&dev->power.lock, flags);
return retval;
@@ -1576,7 +1576,7 @@ void pm_runtime_allow(struct device *dev)
if (ret == 0)
rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
else if (ret > 0)
- trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC);
+ trace_rpm_usage(dev, RPM_AUTO | RPM_ASYNC);
out:
spin_unlock_irq(&dev->power.lock);
@@ -1646,7 +1646,7 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
atomic_inc(&dev->power.usage_count);
rpm_resume(dev, 0);
} else {
- trace_rpm_usage_rcuidle(dev, 0);
+ trace_rpm_usage(dev, 0);
}
}
@@ -1864,6 +1864,10 @@ static bool pm_runtime_need_not_resume(struct device *dev)
* sure the device is put into low power state and it should only be used during
* system-wide PM transitions to sleep states. It assumes that the analogous
* pm_runtime_force_resume() will be used to resume the device.
+ *
+ * Do not use with DPM_FLAG_SMART_SUSPEND as this can lead to an inconsistent
+ * state where this function has called the ->runtime_suspend callback but the
+ * PM core marks the driver as runtime active.
*/
int pm_runtime_force_suspend(struct device *dev)
{
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index a8f185430a07..8c903b8c9714 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -189,12 +189,8 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
if (!d->type_buf_def[i])
continue;
reg = d->get_irq_reg(d, d->chip->type_base, i);
- if (d->chip->type_invert)
- ret = regmap_update_bits(d->map, reg,
- d->type_buf_def[i], ~d->type_buf[i]);
- else
- ret = regmap_update_bits(d->map, reg,
- d->type_buf_def[i], d->type_buf[i]);
+ ret = regmap_update_bits(d->map, reg,
+ d->type_buf_def[i], d->type_buf[i]);
if (ret != 0)
dev_err(d->map->dev, "Failed to sync type in %x\n",
reg);
@@ -882,20 +878,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
*/
dev_warn(map->dev, "mask_base and unmask_base are inverted, please fix it");
- /* Might as well warn about mask_invert while we're at it... */
- if (chip->mask_invert)
- dev_warn(map->dev, "mask_invert=true ignored");
-
- d->mask_base = chip->unmask_base;
- d->unmask_base = chip->mask_base;
- } else if (chip->mask_invert) {
- /*
- * Swap the roles of mask_base and unmask_base if the bits are
- * inverted. This is deprecated, drivers should use unmask_base
- * directly.
- */
- dev_warn(map->dev, "mask_invert=true is deprecated; please switch to unmask_base");
-
d->mask_base = chip->unmask_base;
d->unmask_base = chip->mask_base;
} else {
@@ -1028,9 +1010,6 @@ int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
ret = regmap_read(map, reg, &d->type_buf_def[i]);
- if (d->chip->type_invert)
- d->type_buf_def[i] = ~d->type_buf_def[i];
-
if (ret) {
dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
reg, ret);
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
index f7293040a2b1..6aa6a2409478 100644
--- a/drivers/base/regmap/regmap-mdio.c
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -10,31 +10,21 @@
/* Clause-45 mask includes the device type (5 bit) and actual register number (16 bit) */
#define REGNUM_C45_MASK GENMASK(20, 0)
-static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int *val)
+static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
{
+ struct mdio_device *mdio_dev = context;
int ret;
+ if (unlikely(reg & ~REGNUM_C22_MASK))
+ return -ENXIO;
+
ret = mdiodev_read(mdio_dev, reg);
if (ret < 0)
return ret;
*val = ret & REGVAL_MASK;
- return 0;
-}
-
-static int regmap_mdio_write(struct mdio_device *mdio_dev, u32 reg, unsigned int val)
-{
- return mdiodev_write(mdio_dev, reg, val);
-}
-
-static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
-{
- struct mdio_device *mdio_dev = context;
-
- if (unlikely(reg & ~REGNUM_C22_MASK))
- return -ENXIO;
- return regmap_mdio_read(mdio_dev, reg, val);
+ return 0;
}
static int regmap_mdio_c22_write(void *context, unsigned int reg, unsigned int val)
@@ -55,21 +45,36 @@ static const struct regmap_bus regmap_mdio_c22_bus = {
static int regmap_mdio_c45_read(void *context, unsigned int reg, unsigned int *val)
{
struct mdio_device *mdio_dev = context;
+ unsigned int devad;
+ int ret;
if (unlikely(reg & ~REGNUM_C45_MASK))
return -ENXIO;
- return regmap_mdio_read(mdio_dev, MII_ADDR_C45 | reg, val);
+ devad = reg >> REGMAP_MDIO_C45_DEVAD_SHIFT;
+ reg = reg & REGMAP_MDIO_C45_REGNUM_MASK;
+
+ ret = mdiodev_c45_read(mdio_dev, devad, reg);
+ if (ret < 0)
+ return ret;
+
+ *val = ret & REGVAL_MASK;
+
+ return 0;
}
static int regmap_mdio_c45_write(void *context, unsigned int reg, unsigned int val)
{
struct mdio_device *mdio_dev = context;
+ unsigned int devad;
if (unlikely(reg & ~REGNUM_C45_MASK))
return -ENXIO;
- return regmap_mdio_write(mdio_dev, MII_ADDR_C45 | reg, val);
+ devad = reg >> REGMAP_MDIO_C45_DEVAD_SHIFT;
+ reg = reg & REGMAP_MDIO_C45_REGNUM_MASK;
+
+ return mdiodev_c45_write(mdio_dev, devad, reg, val);
}
static const struct regmap_bus regmap_mdio_c45_bus = {
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index d12d669157f2..d2a54eb0efd9 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1942,6 +1942,8 @@ static int _regmap_bus_reg_write(void *context, unsigned int reg,
{
struct regmap *map = context;
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
return map->bus->reg_write(map->bus_context, reg, val);
}
@@ -2840,6 +2842,8 @@ static int _regmap_bus_reg_read(void *context, unsigned int reg,
{
struct regmap *map = context;
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
return map->bus->reg_read(map->bus_context, reg, val);
}
@@ -3231,6 +3235,8 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
*change = false;
if (regmap_volatile(map, reg) && map->reg_update_bits) {
+ reg += map->reg_base;
+ reg >>= map->format.reg_downshift;
ret = map->reg_update_bits(map->bus_context, reg, mask, val);
if (ret == 0 && change)
*change = true;
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 22130b5f789d..0fb1d4ab9d8a 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -30,6 +30,7 @@ struct soc_device {
static struct bus_type soc_bus_type = {
.name = "soc",
};
+static bool soc_bus_registered;
static DEVICE_ATTR(machine, 0444, soc_info_show, NULL);
static DEVICE_ATTR(family, 0444, soc_info_show, NULL);
@@ -117,7 +118,7 @@ struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr
const struct attribute_group **soc_attr_groups;
int ret;
- if (!soc_bus_type.p) {
+ if (!soc_bus_registered) {
if (early_soc_dev_attr)
return ERR_PTR(-EBUSY);
early_soc_dev_attr = soc_dev_attr;
@@ -183,6 +184,7 @@ static int __init soc_bus_register(void)
ret = bus_register(&soc_bus_type);
if (ret)
return ret;
+ soc_bus_registered = true;
if (early_soc_dev_attr)
return PTR_ERR(soc_device_register(early_soc_dev_attr));
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 0a482212c7e8..1886995a0b3a 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -760,7 +760,7 @@ static void software_node_release(struct kobject *kobj)
kfree(swnode);
}
-static struct kobj_type software_node_type = {
+static const struct kobj_type software_node_type = {
.release = software_node_release,
.sysfs_ops = &kobj_sysfs_ops,
};
@@ -820,67 +820,6 @@ swnode_register(const struct software_node *node, struct swnode *parent,
}
/**
- * software_node_register_nodes - Register an array of software nodes
- * @nodes: Zero terminated array of software nodes to be registered
- *
- * Register multiple software nodes at once. If any node in the array
- * has its .parent pointer set (which can only be to another software_node),
- * then its parent **must** have been registered before it is; either outside
- * of this function or by ordering the array such that parent comes before
- * child.
- */
-int software_node_register_nodes(const struct software_node *nodes)
-{
- int ret;
- int i;
-
- for (i = 0; nodes[i].name; i++) {
- const struct software_node *parent = nodes[i].parent;
-
- if (parent && !software_node_to_swnode(parent)) {
- ret = -EINVAL;
- goto err_unregister_nodes;
- }
-
- ret = software_node_register(&nodes[i]);
- if (ret)
- goto err_unregister_nodes;
- }
-
- return 0;
-
-err_unregister_nodes:
- software_node_unregister_nodes(nodes);
- return ret;
-}
-EXPORT_SYMBOL_GPL(software_node_register_nodes);
-
-/**
- * software_node_unregister_nodes - Unregister an array of software nodes
- * @nodes: Zero terminated array of software nodes to be unregistered
- *
- * Unregister multiple software nodes at once. If parent pointers are set up
- * in any of the software nodes then the array **must** be ordered such that
- * parents come before their children.
- *
- * NOTE: If you are uncertain whether the array is ordered such that
- * parents will be unregistered before their children, it is wiser to
- * remove the nodes individually, in the correct order (child before
- * parent).
- */
-void software_node_unregister_nodes(const struct software_node *nodes)
-{
- unsigned int i = 0;
-
- while (nodes[i].name)
- i++;
-
- while (i--)
- software_node_unregister(&nodes[i]);
-}
-EXPORT_SYMBOL_GPL(software_node_unregister_nodes);
-
-/**
* software_node_register_node_group - Register a group of software nodes
* @node_group: NULL terminated array of software node pointers to be registered
*
diff --git a/drivers/base/test/property-entry-test.c b/drivers/base/test/property-entry-test.c
index 6071d5bc128c..dd2b606d76a3 100644
--- a/drivers/base/test/property-entry-test.c
+++ b/drivers/base/test/property-entry-test.c
@@ -405,20 +405,18 @@ static void pe_test_move_inline_str(struct kunit *test)
/* Handling of reference properties */
static void pe_test_reference(struct kunit *test)
{
- static const struct software_node nodes[] = {
- { .name = "1", },
- { .name = "2", },
- { }
- };
+ static const struct software_node node1 = { .name = "1" };
+ static const struct software_node node2 = { .name = "2" };
+ static const struct software_node *group[] = { &node1, &node2, NULL };
static const struct software_node_ref_args refs[] = {
- SOFTWARE_NODE_REFERENCE(&nodes[0]),
- SOFTWARE_NODE_REFERENCE(&nodes[1], 3, 4),
+ SOFTWARE_NODE_REFERENCE(&node1),
+ SOFTWARE_NODE_REFERENCE(&node2, 3, 4),
};
const struct property_entry entries[] = {
- PROPERTY_ENTRY_REF("ref-1", &nodes[0]),
- PROPERTY_ENTRY_REF("ref-2", &nodes[1], 1, 2),
+ PROPERTY_ENTRY_REF("ref-1", &node1),
+ PROPERTY_ENTRY_REF("ref-2", &node2, 1, 2),
PROPERTY_ENTRY_REF_ARRAY("ref-3", refs),
{ }
};
@@ -427,7 +425,7 @@ static void pe_test_reference(struct kunit *test)
struct fwnode_reference_args ref;
int error;
- error = software_node_register_nodes(nodes);
+ error = software_node_register_node_group(group);
KUNIT_ASSERT_EQ(test, error, 0);
node = fwnode_create_software_node(entries, NULL);
@@ -436,7 +434,7 @@ static void pe_test_reference(struct kunit *test)
error = fwnode_property_get_reference_args(node, "ref-1", NULL,
0, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
- KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node1);
KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
/* wrong index */
@@ -447,7 +445,7 @@ static void pe_test_reference(struct kunit *test)
error = fwnode_property_get_reference_args(node, "ref-2", NULL,
1, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
- KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2);
KUNIT_EXPECT_EQ(test, ref.nargs, 1U);
KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
@@ -455,7 +453,7 @@ static void pe_test_reference(struct kunit *test)
error = fwnode_property_get_reference_args(node, "ref-2", NULL,
3, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
- KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2);
KUNIT_EXPECT_EQ(test, ref.nargs, 3U);
KUNIT_EXPECT_EQ(test, ref.args[0], 1LLU);
KUNIT_EXPECT_EQ(test, ref.args[1], 2LLU);
@@ -470,14 +468,14 @@ static void pe_test_reference(struct kunit *test)
error = fwnode_property_get_reference_args(node, "ref-3", NULL,
0, 0, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
- KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[0]);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node1);
KUNIT_EXPECT_EQ(test, ref.nargs, 0U);
/* second reference in the array */
error = fwnode_property_get_reference_args(node, "ref-3", NULL,
2, 1, &ref);
KUNIT_ASSERT_EQ(test, error, 0);
- KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &nodes[1]);
+ KUNIT_EXPECT_PTR_EQ(test, to_software_node(ref.fwnode), &node2);
KUNIT_EXPECT_EQ(test, ref.nargs, 2U);
KUNIT_EXPECT_EQ(test, ref.args[0], 3LLU);
KUNIT_EXPECT_EQ(test, ref.args[1], 4LLU);
@@ -488,7 +486,7 @@ static void pe_test_reference(struct kunit *test)
KUNIT_EXPECT_NE(test, error, 0);
fwnode_remove_software_node(node);
- software_node_unregister_nodes(nodes);
+ software_node_unregister_node_group(group);
}
static struct kunit_case property_entry_test_cases[] = {
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c
index ccc86206e508..09ee2a1e35bb 100644
--- a/drivers/base/transport_class.c
+++ b/drivers/base/transport_class.c
@@ -155,12 +155,27 @@ static int transport_add_class_device(struct attribute_container *cont,
struct device *dev,
struct device *classdev)
{
+ struct transport_class *tclass = class_to_transport_class(cont->class);
int error = attribute_container_add_class_device(classdev);
struct transport_container *tcont =
attribute_container_to_transport_container(cont);
- if (!error && tcont->statistics)
+ if (error)
+ goto err_remove;
+
+ if (tcont->statistics) {
error = sysfs_create_group(&classdev->kobj, tcont->statistics);
+ if (error)
+ goto err_del;
+ }
+
+ return 0;
+
+err_del:
+ attribute_container_class_device_del(classdev);
+err_remove:
+ if (tclass->remove)
+ tclass->remove(tcont, dev, classdev);
return error;
}
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 0a8469e0b13a..7b39f010bbb3 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -28,7 +28,7 @@ static DEFINE_MUTEX(bcma_buses_mutex);
static int bcma_bus_match(struct device *dev, struct device_driver *drv);
static int bcma_device_probe(struct device *dev);
static void bcma_device_remove(struct device *dev);
-static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
+static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env);
static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -627,9 +627,9 @@ static void bcma_device_remove(struct device *dev)
put_device(dev);
}
-static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int bcma_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+ const struct bcma_device *core = container_of_const(dev, struct bcma_device, dev);
return add_uevent_var(env,
"MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a41145d52de9..f79f20430ef7 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -103,35 +103,6 @@ config GDROM
Most users will want to say "Y" here.
You can also build this as a module which will be called gdrom.
-config PARIDE
- tristate "Parallel port IDE device support"
- depends on PARPORT_PC
- help
- There are many external CD-ROM and disk devices that connect through
- your computer's parallel port. Most of them are actually IDE devices
- using a parallel port IDE adapter. This option enables the PARIDE
- subsystem which contains drivers for many of these external drives.
- Read <file:Documentation/admin-guide/blockdev/paride.rst> for more information.
-
- If you have said Y to the "Parallel-port support" configuration
- option, you may share a single port between your printer and other
- parallel port devices. Answer Y to build PARIDE support into your
- kernel, or M if you would like to build it as a loadable module. If
- your parallel port support is in a loadable module, you must build
- PARIDE as a module. If you built PARIDE support into your kernel,
- you may still build the individual protocol modules and high-level
- drivers as loadable modules. If you build this support as a module,
- it will be called paride.
-
- To use the PARIDE support, you must say Y or M here and also to at
- least one high-level driver (e.g. "Parallel port IDE disks",
- "Parallel port ATAPI CD-ROMs", "Parallel port ATAPI disks" etc.) and
- to at least one protocol driver (e.g. "ATEN EH-100 protocol",
- "MicroSolutions backpack protocol", "DataStor Commuter protocol"
- etc.).
-
-source "drivers/block/paride/Kconfig"
-
source "drivers/block/mtip32xx/Kconfig"
source "drivers/block/zram/Kconfig"
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 20acc4a1fd6d..34177f1bd97d 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -78,32 +78,25 @@ static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
}
/*
- * Look up and return a brd's page for a given sector.
- * If one does not exist, allocate an empty page, and insert that. Then
- * return it.
+ * Insert a new page for a given sector, if one does not already exist.
*/
-static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
+static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
{
pgoff_t idx;
struct page *page;
- gfp_t gfp_flags;
+ int ret = 0;
page = brd_lookup_page(brd, sector);
if (page)
- return page;
+ return 0;
- /*
- * Must use NOIO because we don't want to recurse back into the
- * block or filesystem layers from page reclaim.
- */
- gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
- page = alloc_page(gfp_flags);
+ page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
if (!page)
- return NULL;
+ return -ENOMEM;
- if (radix_tree_preload(GFP_NOIO)) {
+ if (radix_tree_maybe_preload(gfp)) {
__free_page(page);
- return NULL;
+ return -ENOMEM;
}
spin_lock(&brd->brd_lock);
@@ -112,16 +105,17 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
if (radix_tree_insert(&brd->brd_pages, idx, page)) {
__free_page(page);
page = radix_tree_lookup(&brd->brd_pages, idx);
- BUG_ON(!page);
- BUG_ON(page->index != idx);
+ if (!page)
+ ret = -ENOMEM;
+ else if (page->index != idx)
+ ret = -EIO;
} else {
brd->brd_nr_pages++;
}
spin_unlock(&brd->brd_lock);
radix_tree_preload_end();
-
- return page;
+ return ret;
}
/*
@@ -170,20 +164,22 @@ static void brd_free_pages(struct brd_device *brd)
/*
* copy_to_brd_setup must be called before copy_to_brd. It may sleep.
*/
-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
+static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
+ gfp_t gfp)
{
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
size_t copy;
+ int ret;
copy = min_t(size_t, n, PAGE_SIZE - offset);
- if (!brd_insert_page(brd, sector))
- return -ENOSPC;
+ ret = brd_insert_page(brd, sector, gfp);
+ if (ret)
+ return ret;
if (copy < n) {
sector += copy >> SECTOR_SHIFT;
- if (!brd_insert_page(brd, sector))
- return -ENOSPC;
+ ret = brd_insert_page(brd, sector, gfp);
}
- return 0;
+ return ret;
}
/*
@@ -256,20 +252,26 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
* Process a single bvec of a bio.
*/
static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, enum req_op op,
+ unsigned int len, unsigned int off, blk_opf_t opf,
sector_t sector)
{
void *mem;
int err = 0;
- if (op_is_write(op)) {
- err = copy_to_brd_setup(brd, sector, len);
+ if (op_is_write(opf)) {
+ /*
+ * Must use NOIO because we don't want to recurse back into the
+ * block or filesystem layers from page reclaim.
+ */
+ gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
+
+ err = copy_to_brd_setup(brd, sector, len, gfp);
if (err)
goto out;
}
mem = kmap_atomic(page);
- if (!op_is_write(op)) {
+ if (!op_is_write(opf)) {
copy_from_brd(mem + off, brd, sector, len);
flush_dcache_page(page);
} else {
@@ -298,8 +300,12 @@ static void brd_submit_bio(struct bio *bio)
(len & (SECTOR_SIZE - 1)));
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
- bio_op(bio), sector);
+ bio->bi_opf, sector);
if (err) {
+ if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return;
+ }
bio_io_error(bio);
return;
}
@@ -309,23 +315,9 @@ static void brd_submit_bio(struct bio *bio)
bio_endio(bio);
}
-static int brd_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, enum req_op op)
-{
- struct brd_device *brd = bdev->bd_disk->private_data;
- int err;
-
- if (PageTransHuge(page))
- return -ENOTSUPP;
- err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector);
- page_endio(page, op_is_write(op), err);
- return err;
-}
-
static const struct block_device_operations brd_fops = {
.owner = THIS_MODULE,
.submit_bio = brd_submit_bio,
- .rw_page = brd_rw_page,
};
/*
@@ -411,7 +403,9 @@ static int brd_alloc(int i)
/* Tell the block layer that this is not a rotational device */
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, disk->queue);
err = add_disk(disk);
if (err)
goto out_cleanup_disk;
diff --git a/drivers/block/drbd/Makefile b/drivers/block/drbd/Makefile
index c93e462130ff..67a8b352a1d5 100644
--- a/drivers/block/drbd/Makefile
+++ b/drivers/block/drbd/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-drbd-y := drbd_bitmap.o drbd_proc.o
+drbd-y := drbd_buildtag.o drbd_bitmap.o drbd_proc.o
drbd-y += drbd_worker.o drbd_receiver.o drbd_req.o drbd_actlog.o
drbd-y += drbd_main.o drbd_strings.o drbd_nl.o
drbd-y += drbd_interval.o drbd_state.o
diff --git a/drivers/block/drbd/drbd_buildtag.c b/drivers/block/drbd/drbd_buildtag.c
new file mode 100644
index 000000000000..cb1aa66d7d5d
--- /dev/null
+++ b/drivers/block/drbd/drbd_buildtag.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/drbd_config.h>
+#include <linux/module.h>
+
+const char *drbd_buildtag(void)
+{
+ /* DRBD built from external sources has here a reference to the
+ * git hash of the source code.
+ */
+
+ static char buildtag[38] = "\0uilt-in";
+
+ if (buildtag[0] == 0) {
+#ifdef MODULE
+ sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
+#else
+ buildtag[0] = 'b';
+#endif
+ }
+
+ return buildtag;
+}
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c
index a72c096aa5b1..12460b584bcb 100644
--- a/drivers/block/drbd/drbd_debugfs.c
+++ b/drivers/block/drbd/drbd_debugfs.c
@@ -844,7 +844,7 @@ static int drbd_version_show(struct seq_file *m, void *ignored)
{
seq_printf(m, "# %s\n", drbd_buildtag());
seq_printf(m, "VERSION=%s\n", REL_VERSION);
- seq_printf(m, "API_VERSION=%u\n", API_VERSION);
+ seq_printf(m, "API_VERSION=%u\n", GENL_MAGIC_VERSION);
seq_printf(m, "PRO_VERSION_MIN=%u\n", PRO_VERSION_MIN);
seq_printf(m, "PRO_VERSION_MAX=%u\n", PRO_VERSION_MAX);
return 0;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index ae713338aa46..d89b7d03d4c8 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -34,21 +34,12 @@
#include <linux/prefetch.h>
#include <linux/drbd_genl_api.h>
#include <linux/drbd.h>
+#include <linux/drbd_config.h>
#include "drbd_strings.h"
#include "drbd_state.h"
#include "drbd_protocol.h"
#include "drbd_polymorph_printk.h"
-#ifdef __CHECKER__
-# define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
-# define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
-# define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
-#else
-# define __protected_by(x)
-# define __protected_read_by(x)
-# define __protected_write_by(x)
-#endif
-
/* shared module parameters, defined in drbd_main.c */
#ifdef CONFIG_DRBD_FAULT_INJECTION
extern int drbd_enable_faults;
@@ -774,7 +765,7 @@ struct drbd_device {
unsigned long flags;
/* configured by drbdsetup */
- struct drbd_backing_dev *ldev __protected_by(local);
+ struct drbd_backing_dev *ldev;
sector_t p_size; /* partner's disk size */
struct request_queue *rq_queue;
diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c
index 5024ffd6143d..873beda6de24 100644
--- a/drivers/block/drbd/drbd_interval.c
+++ b/drivers/block/drbd/drbd_interval.c
@@ -58,7 +58,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this)
* drbd_contains_interval - check if a tree contains a given interval
* @root: red black tree root
* @sector: start sector of @interval
- * @interval: may not be a valid pointer
+ * @interval: may be an invalid pointer
*
* Returns if the tree contains the node @interval with start sector @start.
* Does not dereference @interval until @interval is known to be a valid object
@@ -95,6 +95,10 @@ drbd_contains_interval(struct rb_root *root, sector_t sector,
void
drbd_remove_interval(struct rb_root *root, struct drbd_interval *this)
{
+ /* avoid endless loop */
+ if (drbd_interval_empty(this))
+ return;
+
rb_erase_augmented(&this->rb, root, &augment_callbacks);
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e43dfb9eb6ad..2c764f7ee4a7 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2899,7 +2899,7 @@ static int __init drbd_init(void)
pr_info("initialized. "
"Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
- API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
+ GENL_MAGIC_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
pr_info("%s\n", drbd_buildtag());
pr_info("registered as block device major %d\n", DRBD_MAJOR);
return 0; /* Success! */
@@ -3776,24 +3776,6 @@ _drbd_insert_fault(struct drbd_device *device, unsigned int type)
}
#endif
-const char *drbd_buildtag(void)
-{
- /* DRBD built from external sources has here a reference to the
- git hash of the source code. */
-
- static char buildtag[38] = "\0uilt-in";
-
- if (buildtag[0] == 0) {
-#ifdef MODULE
- sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
-#else
- buildtag[0] = 'b';
-#endif
- }
-
- return buildtag;
-}
-
module_init(drbd_init)
module_exit(drbd_cleanup)
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 2227fb0db1ce..1d0feafceadc 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -228,7 +228,7 @@ int drbd_seq_show(struct seq_file *seq, void *v)
};
seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
- API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX, drbd_buildtag());
+ GENL_MAGIC_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX, drbd_buildtag());
/*
cs .. connection state
diff --git a/drivers/block/drbd/drbd_vli.h b/drivers/block/drbd/drbd_vli.h
index 1ee81e3c2152..941c511cc4da 100644
--- a/drivers/block/drbd/drbd_vli.h
+++ b/drivers/block/drbd/drbd_vli.h
@@ -327,7 +327,7 @@ static inline int bitstream_get_bits(struct bitstream *bs, u64 *out, int bits)
*/
static inline int vli_encode_bits(struct bitstream *bs, u64 in)
{
- u64 code = code;
+ u64 code;
int bits = __vli_encode_bits(&code, in);
if (bits <= 0)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1518a6423279..5f04235e4ff7 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -90,7 +90,7 @@ struct loop_cmd {
};
#define LOOP_IDLE_WORKER_TIMEOUT (60 * HZ)
-#define LOOP_DEFAULT_HW_Q_DEPTH (128)
+#define LOOP_DEFAULT_HW_Q_DEPTH 128
static DEFINE_IDR(loop_index_idr);
static DEFINE_MUTEX(loop_ctl_mutex);
@@ -1792,9 +1792,15 @@ static int hw_queue_depth = LOOP_DEFAULT_HW_Q_DEPTH;
static int loop_set_hw_queue_depth(const char *s, const struct kernel_param *p)
{
- int ret = kstrtoint(s, 10, &hw_queue_depth);
+ int qd, ret;
- return (ret || (hw_queue_depth < 1)) ? -EINVAL : 0;
+ ret = kstrtoint(s, 0, &qd);
+ if (ret < 0)
+ return ret;
+ if (qd < 1)
+ return -EINVAL;
+ hw_queue_depth = qd;
+ return 0;
}
static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
@@ -1803,7 +1809,7 @@ static const struct kernel_param_ops loop_hw_qdepth_param_ops = {
};
device_param_cb(hw_queue_depth, &loop_hw_qdepth_param_ops, &hw_queue_depth, 0444);
-MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 128");
+MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: " __stringify(LOOP_DEFAULT_HW_Q_DEPTH));
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 7d28e3aa406c..4c601ca9552a 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -2123,8 +2123,7 @@ static int null_add_dev(struct nullb_device *dev)
blk_queue_physical_block_size(nullb->q, dev->blocksize);
if (!dev->max_sectors)
dev->max_sectors = queue_max_hw_sectors(nullb->q);
- dev->max_sectors = min_t(unsigned int, dev->max_sectors,
- BLK_DEF_MAX_SECTORS);
+ dev->max_sectors = min(dev->max_sectors, BLK_DEF_MAX_SECTORS);
blk_queue_max_hw_sectors(nullb->q, dev->max_sectors);
if (dev->virt_boundary)
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
deleted file mode 100644
index a295634597ba..000000000000
--- a/drivers/block/paride/Kconfig
+++ /dev/null
@@ -1,302 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# PARIDE configuration
-#
-# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module,
-# PARIDE must also be a module.
-# PARIDE only supports PC style parports. Tough for USB or other parports...
-
-comment "Parallel IDE high-level drivers"
- depends on PARIDE
-
-config PARIDE_PD
- tristate "Parallel port IDE disks"
- depends on PARIDE
- help
- This option enables the high-level driver for IDE-type disk devices
- connected through a parallel port. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- parallel port IDE driver, otherwise you should answer M to build
- it as a loadable module. The module will be called pd. You
- must also have at least one parallel port protocol driver in your
- system. Among the devices supported by this driver are the SyQuest
- EZ-135, EZ-230 and SparQ drives, the Avatar Shark and the backpack
- hard drives from MicroSolutions.
-
-config PARIDE_PCD
- tristate "Parallel port ATAPI CD-ROMs"
- depends on PARIDE
- select CDROM
- help
- This option enables the high-level driver for ATAPI CD-ROM devices
- connected through a parallel port. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- parallel port ATAPI CD-ROM driver, otherwise you should answer M to
- build it as a loadable module. The module will be called pcd. You
- must also have at least one parallel port protocol driver in your
- system. Among the devices supported by this driver are the
- MicroSolutions backpack CD-ROM drives and the Freecom Power CD. If
- you have such a CD-ROM drive, you should also say Y or M to "ISO
- 9660 CD-ROM file system support" below, because that's the file
- system used on CD-ROMs.
-
-config PARIDE_PF
- tristate "Parallel port ATAPI disks"
- depends on PARIDE
- help
- This option enables the high-level driver for ATAPI disk devices
- connected through a parallel port. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- parallel port ATAPI disk driver, otherwise you should answer M
- to build it as a loadable module. The module will be called pf.
- You must also have at least one parallel port protocol driver in
- your system. Among the devices supported by this driver are the
- MicroSolutions backpack PD/CD drive and the Imation Superdisk
- LS-120 drive.
-
-config PARIDE_PT
- tristate "Parallel port ATAPI tapes"
- depends on PARIDE
- help
- This option enables the high-level driver for ATAPI tape devices
- connected through a parallel port. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- parallel port ATAPI disk driver, otherwise you should answer M
- to build it as a loadable module. The module will be called pt.
- You must also have at least one parallel port protocol driver in
- your system. Among the devices supported by this driver is the
- parallel port version of the HP 5GB drive.
-
-config PARIDE_PG
- tristate "Parallel port generic ATAPI devices"
- depends on PARIDE
- help
- This option enables a special high-level driver for generic ATAPI
- devices connected through a parallel port. The driver allows user
- programs, such as cdrtools, to send ATAPI commands directly to a
- device.
-
- If you chose to build PARIDE support into your kernel, you may
- answer Y here to build in the parallel port generic ATAPI driver,
- otherwise you should answer M to build it as a loadable module. The
- module will be called pg.
-
- You must also have at least one parallel port protocol driver in
- your system.
-
- This driver implements an API loosely related to the generic SCSI
- driver. See <file:include/linux/pg.h>. for details.
-
- You can obtain the most recent version of cdrtools from
- <ftp://ftp.berlios.de/pub/cdrecord/>. Versions 1.6.1a3 and
- later fully support this driver.
-
-comment "Parallel IDE protocol modules"
- depends on PARIDE
-
-config PARIDE_ATEN
- tristate "ATEN EH-100 protocol"
- depends on PARIDE
- help
- This option enables support for the ATEN EH-100 parallel port IDE
- protocol. This protocol is used in some inexpensive low performance
- parallel port kits made in Hong Kong. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- protocol driver, otherwise you should answer M to build it as a
- loadable module. The module will be called aten. You must also
- have a high-level driver for the type of device that you want to
- support.
-
-config PARIDE_BPCK
- tristate "MicroSolutions backpack (Series 5) protocol"
- depends on PARIDE
- help
- This option enables support for the Micro Solutions BACKPACK
- parallel port Series 5 IDE protocol. (Most BACKPACK drives made
- before 1999 were Series 5) Series 5 drives will NOT always have the
- Series noted on the bottom of the drive. Series 6 drivers will.
-
- In other words, if your BACKPACK drive doesn't say "Series 6" on the
- bottom, enable this option.
-
- If you chose to build PARIDE support into your kernel, you may
- answer Y here to build in the protocol driver, otherwise you should
- answer M to build it as a loadable module. The module will be
- called bpck. You must also have a high-level driver for the type
- of device that you want to support.
-
-config PARIDE_BPCK6
- tristate "MicroSolutions backpack (Series 6) protocol"
- depends on PARIDE && !64BIT
- help
- This option enables support for the Micro Solutions BACKPACK
- parallel port Series 6 IDE protocol. (Most BACKPACK drives made
- after 1999 were Series 6) Series 6 drives will have the Series noted
- on the bottom of the drive. Series 5 drivers don't always have it
- noted.
-
- In other words, if your BACKPACK drive says "Series 6" on the
- bottom, enable this option.
-
- If you chose to build PARIDE support into your kernel, you may
- answer Y here to build in the protocol driver, otherwise you should
- answer M to build it as a loadable module. The module will be
- called bpck6. You must also have a high-level driver for the type
- of device that you want to support.
-
-config PARIDE_COMM
- tristate "DataStor Commuter protocol"
- depends on PARIDE
- help
- This option enables support for the Commuter parallel port IDE
- protocol from DataStor. If you chose to build PARIDE support
- into your kernel, you may answer Y here to build in the protocol
- driver, otherwise you should answer M to build it as a loadable
- module. The module will be called comm. You must also have
- a high-level driver for the type of device that you want to support.
-
-config PARIDE_DSTR
- tristate "DataStor EP-2000 protocol"
- depends on PARIDE
- help
- This option enables support for the EP-2000 parallel port IDE
- protocol from DataStor. If you chose to build PARIDE support
- into your kernel, you may answer Y here to build in the protocol
- driver, otherwise you should answer M to build it as a loadable
- module. The module will be called dstr. You must also have
- a high-level driver for the type of device that you want to support.
-
-config PARIDE_FIT2
- tristate "FIT TD-2000 protocol"
- depends on PARIDE
- help
- This option enables support for the TD-2000 parallel port IDE
- protocol from Fidelity International Technology. This is a simple
- (low speed) adapter that is used in some portable hard drives. If
- you chose to build PARIDE support into your kernel, you may answer Y
- here to build in the protocol driver, otherwise you should answer M
- to build it as a loadable module. The module will be called ktti.
- You must also have a high-level driver for the type of device that
- you want to support.
-
-config PARIDE_FIT3
- tristate "FIT TD-3000 protocol"
- depends on PARIDE
- help
- This option enables support for the TD-3000 parallel port IDE
- protocol from Fidelity International Technology. This protocol is
- used in newer models of their portable disk, CD-ROM and PD/CD
- devices. If you chose to build PARIDE support into your kernel, you
- may answer Y here to build in the protocol driver, otherwise you
- should answer M to build it as a loadable module. The module will be
- called fit3. You must also have a high-level driver for the type
- of device that you want to support.
-
-config PARIDE_EPAT
- tristate "Shuttle EPAT/EPEZ protocol"
- depends on PARIDE
- help
- This option enables support for the EPAT parallel port IDE protocol.
- EPAT is a parallel port IDE adapter manufactured by Shuttle
- Technology and widely used in devices from major vendors such as
- Hewlett-Packard, SyQuest, Imation and Avatar. If you chose to build
- PARIDE support into your kernel, you may answer Y here to build in
- the protocol driver, otherwise you should answer M to build it as a
- loadable module. The module will be called epat. You must also
- have a high-level driver for the type of device that you want to
- support.
-
-config PARIDE_EPATC8
- bool "Support c7/c8 chips"
- depends on PARIDE_EPAT
- help
- This option enables support for the newer Shuttle EP1284 (aka c7 and
- c8) chip. You need this if you are using any recent Imation SuperDisk
- (LS-120) drive.
-
-config PARIDE_EPIA
- tristate "Shuttle EPIA protocol"
- depends on PARIDE
- help
- This option enables support for the (obsolete) EPIA parallel port
- IDE protocol from Shuttle Technology. This adapter can still be
- found in some no-name kits. If you chose to build PARIDE support
- into your kernel, you may answer Y here to build in the protocol
- driver, otherwise you should answer M to build it as a loadable
- module. The module will be called epia. You must also have a
- high-level driver for the type of device that you want to support.
-
-config PARIDE_FRIQ
- tristate "Freecom IQ ASIC-2 protocol"
- depends on PARIDE
- help
- This option enables support for version 2 of the Freecom IQ parallel
- port IDE adapter. This adapter is used by the Maxell Superdisk
- drive. If you chose to build PARIDE support into your kernel, you
- may answer Y here to build in the protocol driver, otherwise you
- should answer M to build it as a loadable module. The module will be
- called friq. You must also have a high-level driver for the type
- of device that you want to support.
-
-config PARIDE_FRPW
- tristate "FreeCom power protocol"
- depends on PARIDE
- help
- This option enables support for the Freecom power parallel port IDE
- protocol. If you chose to build PARIDE support into your kernel, you
- may answer Y here to build in the protocol driver, otherwise you
- should answer M to build it as a loadable module. The module will be
- called frpw. You must also have a high-level driver for the type
- of device that you want to support.
-
-config PARIDE_KBIC
- tristate "KingByte KBIC-951A/971A protocols"
- depends on PARIDE
- help
- This option enables support for the KBIC-951A and KBIC-971A parallel
- port IDE protocols from KingByte Information Corp. KingByte's
- adapters appear in many no-name portable disk and CD-ROM products,
- especially in Europe. If you chose to build PARIDE support into your
- kernel, you may answer Y here to build in the protocol driver,
- otherwise you should answer M to build it as a loadable module. The
- module will be called kbic. You must also have a high-level driver
- for the type of device that you want to support.
-
-config PARIDE_KTTI
- tristate "KT PHd protocol"
- depends on PARIDE
- help
- This option enables support for the "PHd" parallel port IDE protocol
- from KT Technology. This is a simple (low speed) adapter that is
- used in some 2.5" portable hard drives. If you chose to build PARIDE
- support into your kernel, you may answer Y here to build in the
- protocol driver, otherwise you should answer M to build it as a
- loadable module. The module will be called ktti. You must also
- have a high-level driver for the type of device that you want to
- support.
-
-config PARIDE_ON20
- tristate "OnSpec 90c20 protocol"
- depends on PARIDE
- help
- This option enables support for the (obsolete) 90c20 parallel port
- IDE protocol from OnSpec (often marketed under the ValuStore brand
- name). If you chose to build PARIDE support into your kernel, you
- may answer Y here to build in the protocol driver, otherwise you
- should answer M to build it as a loadable module. The module will
- be called on20. You must also have a high-level driver for the
- type of device that you want to support.
-
-config PARIDE_ON26
- tristate "OnSpec 90c26 protocol"
- depends on PARIDE
- help
- This option enables support for the 90c26 parallel port IDE protocol
- from OnSpec Electronics (often marketed under the ValuStore brand
- name). If you chose to build PARIDE support into your kernel, you
- may answer Y here to build in the protocol driver, otherwise you
- should answer M to build it as a loadable module. The module will be
- called on26. You must also have a high-level driver for the type
- of device that you want to support.
-
-#
diff --git a/drivers/block/paride/Makefile b/drivers/block/paride/Makefile
deleted file mode 100644
index cf1742a8475e..000000000000
--- a/drivers/block/paride/Makefile
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Makefile for Parallel port IDE device drivers.
-#
-# 7 October 2000, Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
-# Rewritten to use lists instead of if-statements.
-#
-
-obj-$(CONFIG_PARIDE) += paride.o
-obj-$(CONFIG_PARIDE_ATEN) += aten.o
-obj-$(CONFIG_PARIDE_BPCK) += bpck.o
-obj-$(CONFIG_PARIDE_COMM) += comm.o
-obj-$(CONFIG_PARIDE_DSTR) += dstr.o
-obj-$(CONFIG_PARIDE_KBIC) += kbic.o
-obj-$(CONFIG_PARIDE_EPAT) += epat.o
-obj-$(CONFIG_PARIDE_EPIA) += epia.o
-obj-$(CONFIG_PARIDE_FRPW) += frpw.o
-obj-$(CONFIG_PARIDE_FRIQ) += friq.o
-obj-$(CONFIG_PARIDE_FIT2) += fit2.o
-obj-$(CONFIG_PARIDE_FIT3) += fit3.o
-obj-$(CONFIG_PARIDE_ON20) += on20.o
-obj-$(CONFIG_PARIDE_ON26) += on26.o
-obj-$(CONFIG_PARIDE_KTTI) += ktti.o
-obj-$(CONFIG_PARIDE_BPCK6) += bpck6.o
-obj-$(CONFIG_PARIDE_PD) += pd.o
-obj-$(CONFIG_PARIDE_PCD) += pcd.o
-obj-$(CONFIG_PARIDE_PF) += pf.o
-obj-$(CONFIG_PARIDE_PT) += pt.o
-obj-$(CONFIG_PARIDE_PG) += pg.o
diff --git a/drivers/block/paride/Transition-notes b/drivers/block/paride/Transition-notes
deleted file mode 100644
index 70374907c020..000000000000
--- a/drivers/block/paride/Transition-notes
+++ /dev/null
@@ -1,128 +0,0 @@
-Lemma 1:
- If ps_tq is scheduled, ps_tq_active is 1. ps_tq_int() can be called
- only when ps_tq_active is 1.
-Proof: All assignments to ps_tq_active and all scheduling of ps_tq happen
- under ps_spinlock. There are three places where that can happen:
- one in ps_set_intr() (A) and two in ps_tq_int() (B and C).
- Consider the sequnce of these events. A can not be preceded by
- anything except B, since it is under if (!ps_tq_active) under
- ps_spinlock. C is always preceded by B, since we can't reach it
- other than through B and we don't drop ps_spinlock between them.
- IOW, the sequence is A?(BA|BC|B)*. OTOH, number of B can not exceed
- the sum of numbers of A and C, since each call of ps_tq_int() is
- the result of ps_tq execution. Therefore, the sequence starts with
- A and each B is preceded by either A or C. Moments when we enter
- ps_tq_int() are sandwiched between {A,C} and B in that sequence,
- since at any time number of B can not exceed the number of these
- moments which, in turn, can not exceed the number of A and C.
- In other words, the sequence of events is (A or C set ps_tq_active to
- 1 and schedule ps_tq, ps_tq is executed, ps_tq_int() is entered,
- B resets ps_tq_active)*.
-
-
-consider the following area:
- * in do_pd_request1(): to calls of pi_do_claimed() and return in
- case when pd_req is NULL.
- * in next_request(): to call of do_pd_request1()
- * in do_pd_read(): to call of ps_set_intr()
- * in do_pd_read_start(): to calls of pi_do_claimed(), next_request()
-and ps_set_intr()
- * in do_pd_read_drq(): to calls of pi_do_claimed() and next_request()
- * in do_pd_write(): to call of ps_set_intr()
- * in do_pd_write_start(): to calls of pi_do_claimed(), next_request()
-and ps_set_intr()
- * in do_pd_write_done(): to calls of pi_do_claimed() and next_request()
- * in ps_set_intr(): to check for ps_tq_active and to scheduling
- ps_tq if ps_tq_active was 0.
- * in ps_tq_int(): from the moment when we get ps_spinlock() to the
- return, call of con() or scheduling ps_tq.
- * in pi_schedule_claimed() when called from pi_do_claimed() called from
- pd.c, everything until returning 1 or setting or setting ->claim_cont
- on the path that returns 0
- * in pi_do_claimed() when called from pd.c, everything until the call
- of pi_do_claimed() plus the everything until the call of cont() if
- pi_do_claimed() has returned 1.
- * in pi_wake_up() called for PIA that belongs to pd.c, everything from
- the moment when pi_spinlock has been acquired.
-
-Lemma 2:
- 1) at any time at most one thread of execution can be in that area or
- be preempted there.
- 2) When there is such a thread, pd_busy is set or pd_lock is held by
- that thread.
- 3) When there is such a thread, ps_tq_active is 0 or ps_spinlock is
- held by that thread.
- 4) When there is such a thread, all PIA belonging to pd.c have NULL
- ->claim_cont or pi_spinlock is held by thread in question.
-
-Proof: consider the first moment when the above is not true.
-
-(1) can become not true if some thread enters that area while another is there.
- a) do_pd_request1() can be called from next_request() or do_pd_request()
- In the first case the thread was already in the area. In the second,
- the thread was holding pd_lock and found pd_busy not set, which would
- mean that (2) was already not true.
- b) ps_set_intr() and pi_schedule_claimed() can be called only from the
- area.
- c) pi_do_claimed() is called by pd.c only from the area.
- d) ps_tq_int() can enter the area only when the thread is holding
- ps_spinlock and ps_tq_active is 1 (due to Lemma 1). It means that
- (3) was already not true.
- e) do_pd_{read,write}* could be called only from the area. The only
- case that needs consideration is call from pi_wake_up() and there
- we would have to be called for the PIA that got ->claimed_cont
- from pd.c. That could happen only if pi_do_claimed() had been
- called from pd.c for that PIA, which happens only for PIA belonging
- to pd.c.
- f) pi_wake_up() can enter the area only when the thread is holding
- pi_spinlock and ->claimed_cont is non-NULL for PIA belonging to
- pd.c. It means that (4) was already not true.
-
-(2) can become not true only when pd_lock is released by the thread in question.
- Indeed, pd_busy is reset only in the area and thread that resets
- it is holding pd_lock. The only place within the area where we
- release pd_lock is in pd_next_buf() (called from within the area).
- But that code does not reset pd_busy, so pd_busy would have to be
- 0 when pd_next_buf() had acquired pd_lock. If it become 0 while
- we were acquiring the lock, (1) would be already false, since
- the thread that had reset it would be in the area simulateously.
- If it was 0 before we tried to acquire pd_lock, (2) would be
- already false.
-
-For similar reasons, (3) can become not true only when ps_spinlock is released
-by the thread in question. However, all such places within the area are right
-after resetting ps_tq_active to 0.
-
-(4) is done the same way - all places where we release pi_spinlock within
-the area are either after resetting ->claimed_cont to NULL while holding
-pi_spinlock, or after not tocuhing ->claimed_cont since acquiring pi_spinlock
-also in the area. The only place where ->claimed_cont is made non-NULL is
-in the area, under pi_spinlock and we do not release it until after leaving
-the area.
-
-QED.
-
-
-Corollary 1: ps_tq_active can be killed. Indeed, the only place where we
-check its value is in ps_set_intr() and if it had been non-zero at that
-point, we would have violated either (2.1) (if it was set while ps_set_intr()
-was acquiring ps_spinlock) or (2.3) (if it was set when we started to
-acquire ps_spinlock).
-
-Corollary 2: ps_spinlock can be killed. Indeed, Lemma 1 and Lemma 2 show
-that the only possible contention is between scheduling ps_tq followed by
-immediate release of spinlock and beginning of execution of ps_tq on
-another CPU.
-
-Corollary 3: assignment to pd_busy in do_pd_read_start() and do_pd_write_start()
-can be killed. Indeed, we are not holding pd_lock and thus pd_busy is already
-1 here.
-
-Corollary 4: in ps_tq_int() uses of con can be replaced with uses of
-ps_continuation, since the latter is changed only from the area.
-We don't need to reset it to NULL, since we are guaranteed that there
-will be a call of ps_set_intr() before we look at ps_continuation again.
-We can remove the check for ps_continuation being NULL for the same
-reason - the value is guaranteed to be set by the last ps_set_intr() and
-we never pass it NULL. Assignements in the beginning of ps_set_intr()
-can be taken to callers as long as they remain within the area.
diff --git a/drivers/block/paride/mkd b/drivers/block/paride/mkd
deleted file mode 100644
index 6d0d802479ea..000000000000
--- a/drivers/block/paride/mkd
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#
-# mkd -- a script to create the device special files for the PARIDE subsystem
-#
-# block devices: pd (45), pcd (46), pf (47)
-# character devices: pt (96), pg (97)
-#
-function mkdev {
- mknod $1 $2 $3 $4 ; chmod 0660 $1 ; chown root:disk $1
-}
-#
-function pd {
- D=$( printf \\$( printf "x%03x" $[ $1 + 97 ] ) )
- mkdev pd$D b 45 $[ $1 * 16 ]
- for P in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
- do mkdev pd$D$P b 45 $[ $1 * 16 + $P ]
- done
-}
-#
-cd /dev
-#
-for u in 0 1 2 3 ; do pd $u ; done
-for u in 0 1 2 3 ; do mkdev pcd$u b 46 $u ; done
-for u in 0 1 2 3 ; do mkdev pf$u b 47 $u ; done
-for u in 0 1 2 3 ; do mkdev pt$u c 96 $u ; done
-for u in 0 1 2 3 ; do mkdev npt$u c 96 $[ $u + 128 ] ; done
-for u in 0 1 2 3 ; do mkdev pg$u c 97 $u ; done
-#
-# end of mkd
-
diff --git a/drivers/block/paride/paride.c b/drivers/block/paride/paride.c
deleted file mode 100644
index 0e287993b778..000000000000
--- a/drivers/block/paride/paride.c
+++ /dev/null
@@ -1,479 +0,0 @@
-/*
- paride.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is the base module for the family of device drivers
- that support parallel port IDE devices.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.05.03 Use spinlocks
- 1.02 GRG 1998.05.05 init_proto, release_proto, ktti
- 1.03 GRG 1998.08.15 eliminate compiler warning
- 1.04 GRG 1998.11.28 added support for FRIQ
- 1.05 TMW 2000.06.06 use parport_find_number instead of
- parport_enumerate
- 1.06 TMW 2001.03.26 more sane parport-or-not resource management
-*/
-
-#define PI_VERSION "1.06"
-
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/ioport.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/sched.h> /* TASK_* */
-#include <linux/parport.h>
-#include <linux/slab.h>
-
-#include "paride.h"
-
-MODULE_LICENSE("GPL");
-
-#define MAX_PROTOS 32
-
-static struct pi_protocol *protocols[MAX_PROTOS];
-
-static DEFINE_SPINLOCK(pi_spinlock);
-
-void pi_write_regr(PIA * pi, int cont, int regr, int val)
-{
- pi->proto->write_regr(pi, cont, regr, val);
-}
-
-EXPORT_SYMBOL(pi_write_regr);
-
-int pi_read_regr(PIA * pi, int cont, int regr)
-{
- return pi->proto->read_regr(pi, cont, regr);
-}
-
-EXPORT_SYMBOL(pi_read_regr);
-
-void pi_write_block(PIA * pi, char *buf, int count)
-{
- pi->proto->write_block(pi, buf, count);
-}
-
-EXPORT_SYMBOL(pi_write_block);
-
-void pi_read_block(PIA * pi, char *buf, int count)
-{
- pi->proto->read_block(pi, buf, count);
-}
-
-EXPORT_SYMBOL(pi_read_block);
-
-static void pi_wake_up(void *p)
-{
- PIA *pi = (PIA *) p;
- unsigned long flags;
- void (*cont) (void) = NULL;
-
- spin_lock_irqsave(&pi_spinlock, flags);
-
- if (pi->claim_cont && !parport_claim(pi->pardev)) {
- cont = pi->claim_cont;
- pi->claim_cont = NULL;
- pi->claimed = 1;
- }
-
- spin_unlock_irqrestore(&pi_spinlock, flags);
-
- wake_up(&(pi->parq));
-
- if (cont)
- cont();
-}
-
-int pi_schedule_claimed(PIA * pi, void (*cont) (void))
-{
- unsigned long flags;
-
- spin_lock_irqsave(&pi_spinlock, flags);
- if (pi->pardev && parport_claim(pi->pardev)) {
- pi->claim_cont = cont;
- spin_unlock_irqrestore(&pi_spinlock, flags);
- return 0;
- }
- pi->claimed = 1;
- spin_unlock_irqrestore(&pi_spinlock, flags);
- return 1;
-}
-EXPORT_SYMBOL(pi_schedule_claimed);
-
-void pi_do_claimed(PIA * pi, void (*cont) (void))
-{
- if (pi_schedule_claimed(pi, cont))
- cont();
-}
-
-EXPORT_SYMBOL(pi_do_claimed);
-
-static void pi_claim(PIA * pi)
-{
- if (pi->claimed)
- return;
- pi->claimed = 1;
- if (pi->pardev)
- wait_event(pi->parq,
- !parport_claim((struct pardevice *) pi->pardev));
-}
-
-static void pi_unclaim(PIA * pi)
-{
- pi->claimed = 0;
- if (pi->pardev)
- parport_release((struct pardevice *) (pi->pardev));
-}
-
-void pi_connect(PIA * pi)
-{
- pi_claim(pi);
- pi->proto->connect(pi);
-}
-
-EXPORT_SYMBOL(pi_connect);
-
-void pi_disconnect(PIA * pi)
-{
- pi->proto->disconnect(pi);
- pi_unclaim(pi);
-}
-
-EXPORT_SYMBOL(pi_disconnect);
-
-static void pi_unregister_parport(PIA * pi)
-{
- if (pi->pardev) {
- parport_unregister_device((struct pardevice *) (pi->pardev));
- pi->pardev = NULL;
- }
-}
-
-void pi_release(PIA * pi)
-{
- pi_unregister_parport(pi);
- if (pi->proto->release_proto)
- pi->proto->release_proto(pi);
- module_put(pi->proto->owner);
-}
-
-EXPORT_SYMBOL(pi_release);
-
-static int default_test_proto(PIA * pi, char *scratch, int verbose)
-{
- int j, k;
- int e[2] = { 0, 0 };
-
- pi->proto->connect(pi);
-
- for (j = 0; j < 2; j++) {
- pi_write_regr(pi, 0, 6, 0xa0 + j * 0x10);
- for (k = 0; k < 256; k++) {
- pi_write_regr(pi, 0, 2, k ^ 0xaa);
- pi_write_regr(pi, 0, 3, k ^ 0x55);
- if (pi_read_regr(pi, 0, 2) != (k ^ 0xaa))
- e[j]++;
- }
- }
- pi->proto->disconnect(pi);
-
- if (verbose)
- printk("%s: %s: port 0x%x, mode %d, test=(%d,%d)\n",
- pi->device, pi->proto->name, pi->port,
- pi->mode, e[0], e[1]);
-
- return (e[0] && e[1]); /* not here if both > 0 */
-}
-
-static int pi_test_proto(PIA * pi, char *scratch, int verbose)
-{
- int res;
-
- pi_claim(pi);
- if (pi->proto->test_proto)
- res = pi->proto->test_proto(pi, scratch, verbose);
- else
- res = default_test_proto(pi, scratch, verbose);
- pi_unclaim(pi);
-
- return res;
-}
-
-int paride_register(PIP * pr)
-{
- int k;
-
- for (k = 0; k < MAX_PROTOS; k++)
- if (protocols[k] && !strcmp(pr->name, protocols[k]->name)) {
- printk("paride: %s protocol already registered\n",
- pr->name);
- return -1;
- }
- k = 0;
- while ((k < MAX_PROTOS) && (protocols[k]))
- k++;
- if (k == MAX_PROTOS) {
- printk("paride: protocol table full\n");
- return -1;
- }
- protocols[k] = pr;
- pr->index = k;
- printk("paride: %s registered as protocol %d\n", pr->name, k);
- return 0;
-}
-
-EXPORT_SYMBOL(paride_register);
-
-void paride_unregister(PIP * pr)
-{
- if (!pr)
- return;
- if (protocols[pr->index] != pr) {
- printk("paride: %s not registered\n", pr->name);
- return;
- }
- protocols[pr->index] = NULL;
-}
-
-EXPORT_SYMBOL(paride_unregister);
-
-static int pi_register_parport(PIA *pi, int verbose, int unit)
-{
- struct parport *port;
- struct pardev_cb par_cb;
-
- port = parport_find_base(pi->port);
- if (!port)
- return 0;
- memset(&par_cb, 0, sizeof(par_cb));
- par_cb.wakeup = pi_wake_up;
- par_cb.private = (void *)pi;
- pi->pardev = parport_register_dev_model(port, pi->device, &par_cb,
- unit);
- parport_put_port(port);
- if (!pi->pardev)
- return 0;
-
- init_waitqueue_head(&pi->parq);
-
- if (verbose)
- printk("%s: 0x%x is %s\n", pi->device, pi->port, port->name);
-
- pi->parname = (char *) port->name;
-
- return 1;
-}
-
-static int pi_probe_mode(PIA * pi, int max, char *scratch, int verbose)
-{
- int best, range;
-
- if (pi->mode != -1) {
- if (pi->mode >= max)
- return 0;
- range = 3;
- if (pi->mode >= pi->proto->epp_first)
- range = 8;
- if ((range == 8) && (pi->port % 8))
- return 0;
- pi->reserved = range;
- return (!pi_test_proto(pi, scratch, verbose));
- }
- best = -1;
- for (pi->mode = 0; pi->mode < max; pi->mode++) {
- range = 3;
- if (pi->mode >= pi->proto->epp_first)
- range = 8;
- if ((range == 8) && (pi->port % 8))
- break;
- pi->reserved = range;
- if (!pi_test_proto(pi, scratch, verbose))
- best = pi->mode;
- }
- pi->mode = best;
- return (best > -1);
-}
-
-static int pi_probe_unit(PIA * pi, int unit, char *scratch, int verbose)
-{
- int max, s, e;
-
- s = unit;
- e = s + 1;
-
- if (s == -1) {
- s = 0;
- e = pi->proto->max_units;
- }
-
- if (!pi_register_parport(pi, verbose, s))
- return 0;
-
- if (pi->proto->test_port) {
- pi_claim(pi);
- max = pi->proto->test_port(pi);
- pi_unclaim(pi);
- } else
- max = pi->proto->max_mode;
-
- if (pi->proto->probe_unit) {
- pi_claim(pi);
- for (pi->unit = s; pi->unit < e; pi->unit++)
- if (pi->proto->probe_unit(pi)) {
- pi_unclaim(pi);
- if (pi_probe_mode(pi, max, scratch, verbose))
- return 1;
- pi_unregister_parport(pi);
- return 0;
- }
- pi_unclaim(pi);
- pi_unregister_parport(pi);
- return 0;
- }
-
- if (!pi_probe_mode(pi, max, scratch, verbose)) {
- pi_unregister_parport(pi);
- return 0;
- }
- return 1;
-
-}
-
-int pi_init(PIA * pi, int autoprobe, int port, int mode,
- int unit, int protocol, int delay, char *scratch,
- int devtype, int verbose, char *device)
-{
- int p, k, s, e;
- int lpts[7] = { 0x3bc, 0x378, 0x278, 0x268, 0x27c, 0x26c, 0 };
-
- s = protocol;
- e = s + 1;
-
- if (!protocols[0])
- request_module("paride_protocol");
-
- if (autoprobe) {
- s = 0;
- e = MAX_PROTOS;
- } else if ((s < 0) || (s >= MAX_PROTOS) || (port <= 0) ||
- (!protocols[s]) || (unit < 0) ||
- (unit >= protocols[s]->max_units)) {
- printk("%s: Invalid parameters\n", device);
- return 0;
- }
-
- for (p = s; p < e; p++) {
- struct pi_protocol *proto = protocols[p];
- if (!proto)
- continue;
- /* still racy */
- if (!try_module_get(proto->owner))
- continue;
- pi->proto = proto;
- pi->private = 0;
- if (proto->init_proto && proto->init_proto(pi) < 0) {
- pi->proto = NULL;
- module_put(proto->owner);
- continue;
- }
- if (delay == -1)
- pi->delay = pi->proto->default_delay;
- else
- pi->delay = delay;
- pi->devtype = devtype;
- pi->device = device;
-
- pi->parname = NULL;
- pi->pardev = NULL;
- init_waitqueue_head(&pi->parq);
- pi->claimed = 0;
- pi->claim_cont = NULL;
-
- pi->mode = mode;
- if (port != -1) {
- pi->port = port;
- if (pi_probe_unit(pi, unit, scratch, verbose))
- break;
- pi->port = 0;
- } else {
- k = 0;
- while ((pi->port = lpts[k++]))
- if (pi_probe_unit
- (pi, unit, scratch, verbose))
- break;
- if (pi->port)
- break;
- }
- if (pi->proto->release_proto)
- pi->proto->release_proto(pi);
- module_put(proto->owner);
- }
-
- if (!pi->port) {
- if (autoprobe)
- printk("%s: Autoprobe failed\n", device);
- else
- printk("%s: Adapter not found\n", device);
- return 0;
- }
-
- if (pi->parname)
- printk("%s: Sharing %s at 0x%x\n", pi->device,
- pi->parname, pi->port);
-
- pi->proto->log_adapter(pi, scratch, verbose);
-
- return 1;
-}
-
-EXPORT_SYMBOL(pi_init);
-
-static int pi_probe(struct pardevice *par_dev)
-{
- struct device_driver *drv = par_dev->dev.driver;
- int len = strlen(drv->name);
-
- if (strncmp(par_dev->name, drv->name, len))
- return -ENODEV;
-
- return 0;
-}
-
-void *pi_register_driver(char *name)
-{
- struct parport_driver *parp_drv;
- int ret;
-
- parp_drv = kzalloc(sizeof(*parp_drv), GFP_KERNEL);
- if (!parp_drv)
- return NULL;
-
- parp_drv->name = name;
- parp_drv->probe = pi_probe;
- parp_drv->devmodel = true;
-
- ret = parport_register_driver(parp_drv);
- if (ret) {
- kfree(parp_drv);
- return NULL;
- }
- return (void *)parp_drv;
-}
-EXPORT_SYMBOL(pi_register_driver);
-
-void pi_unregister_driver(void *_drv)
-{
- struct parport_driver *drv = _drv;
-
- parport_unregister_driver(drv);
- kfree(drv);
-}
-EXPORT_SYMBOL(pi_unregister_driver);
diff --git a/drivers/block/paride/paride.h b/drivers/block/paride/paride.h
deleted file mode 100644
index ddb9e589da7f..000000000000
--- a/drivers/block/paride/paride.h
+++ /dev/null
@@ -1,172 +0,0 @@
-#ifndef __DRIVERS_PARIDE_H__
-#define __DRIVERS_PARIDE_H__
-
-/*
- paride.h (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GPL.
-
- This file defines the interface between the high-level parallel
- IDE device drivers (pd, pf, pcd, pt) and the adapter chips.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.05.05 init_proto, release_proto
-*/
-
-#define PARIDE_H_VERSION "1.01"
-
-/* Some adapters need to know what kind of device they are in
-
- Values for devtype:
-*/
-
-#define PI_PD 0 /* IDE disk */
-#define PI_PCD 1 /* ATAPI CDrom */
-#define PI_PF 2 /* ATAPI disk */
-#define PI_PT 3 /* ATAPI tape */
-#define PI_PG 4 /* ATAPI generic */
-
-/* The paride module contains no state, instead the drivers allocate
- a pi_adapter data structure and pass it to paride in every operation.
-
-*/
-
-struct pi_adapter {
-
- struct pi_protocol *proto; /* adapter protocol */
- int port; /* base address of parallel port */
- int mode; /* transfer mode in use */
- int delay; /* adapter delay setting */
- int devtype; /* device type: PI_PD etc. */
- char *device; /* name of driver */
- int unit; /* unit number for chained adapters */
- int saved_r0; /* saved port state */
- int saved_r2; /* saved port state */
- int reserved; /* number of ports reserved */
- unsigned long private; /* for protocol module */
-
- wait_queue_head_t parq; /* semaphore for parport sharing */
- void *pardev; /* pointer to pardevice */
- char *parname; /* parport name */
- int claimed; /* parport has already been claimed */
- void (*claim_cont)(void); /* continuation for parport wait */
-};
-
-typedef struct pi_adapter PIA;
-
-/* functions exported by paride to the high level drivers */
-
-extern int pi_init(PIA *pi,
- int autoprobe, /* 1 to autoprobe */
- int port, /* base port address */
- int mode, /* -1 for autoprobe */
- int unit, /* unit number, if supported */
- int protocol, /* protocol to use */
- int delay, /* -1 to use adapter specific default */
- char * scratch, /* address of 512 byte buffer */
- int devtype, /* device type: PI_PD, PI_PCD, etc ... */
- int verbose, /* log verbose data while probing */
- char *device /* name of the driver */
- ); /* returns 0 on failure, 1 on success */
-
-extern void pi_release(PIA *pi);
-
-/* registers are addressed as (cont,regr)
-
- cont: 0 for command register file, 1 for control register(s)
- regr: 0-7 for register number.
-
-*/
-
-extern void pi_write_regr(PIA *pi, int cont, int regr, int val);
-
-extern int pi_read_regr(PIA *pi, int cont, int regr);
-
-extern void pi_write_block(PIA *pi, char * buf, int count);
-
-extern void pi_read_block(PIA *pi, char * buf, int count);
-
-extern void pi_connect(PIA *pi);
-
-extern void pi_disconnect(PIA *pi);
-
-extern void pi_do_claimed(PIA *pi, void (*cont)(void));
-extern int pi_schedule_claimed(PIA *pi, void (*cont)(void));
-
-/* macros and functions exported to the protocol modules */
-
-#define delay_p (pi->delay?udelay(pi->delay):(void)0)
-#define out_p(offs,byte) outb(byte,pi->port+offs); delay_p;
-#define in_p(offs) (delay_p,inb(pi->port+offs))
-
-#define w0(byte) {out_p(0,byte);}
-#define r0() (in_p(0) & 0xff)
-#define w1(byte) {out_p(1,byte);}
-#define r1() (in_p(1) & 0xff)
-#define w2(byte) {out_p(2,byte);}
-#define r2() (in_p(2) & 0xff)
-#define w3(byte) {out_p(3,byte);}
-#define w4(byte) {out_p(4,byte);}
-#define r4() (in_p(4) & 0xff)
-#define w4w(data) {outw(data,pi->port+4); delay_p;}
-#define w4l(data) {outl(data,pi->port+4); delay_p;}
-#define r4w() (delay_p,inw(pi->port+4)&0xffff)
-#define r4l() (delay_p,inl(pi->port+4)&0xffffffff)
-
-static inline u16 pi_swab16( char *b, int k)
-
-{ union { u16 u; char t[2]; } r;
-
- r.t[0]=b[2*k+1]; r.t[1]=b[2*k];
- return r.u;
-}
-
-static inline u32 pi_swab32( char *b, int k)
-
-{ union { u32 u; char f[4]; } r;
-
- r.f[0]=b[4*k+1]; r.f[1]=b[4*k];
- r.f[2]=b[4*k+3]; r.f[3]=b[4*k+2];
- return r.u;
-}
-
-struct pi_protocol {
-
- char name[8]; /* name for this protocol */
- int index; /* index into protocol table */
-
- int max_mode; /* max mode number */
- int epp_first; /* modes >= this use 8 ports */
-
- int default_delay; /* delay parameter if not specified */
- int max_units; /* max chained units probed for */
-
- void (*write_regr)(PIA *,int,int,int);
- int (*read_regr)(PIA *,int,int);
- void (*write_block)(PIA *,char *,int);
- void (*read_block)(PIA *,char *,int);
-
- void (*connect)(PIA *);
- void (*disconnect)(PIA *);
-
- int (*test_port)(PIA *);
- int (*probe_unit)(PIA *);
- int (*test_proto)(PIA *,char *,int);
- void (*log_adapter)(PIA *,char *,int);
-
- int (*init_proto)(PIA *);
- void (*release_proto)(PIA *);
- struct module *owner;
-};
-
-typedef struct pi_protocol PIP;
-
-extern int paride_register( PIP * );
-extern void paride_unregister ( PIP * );
-void *pi_register_driver(char *);
-void pi_unregister_driver(void *);
-
-#endif /* __DRIVERS_PARIDE_H__ */
-/* end of paride.h */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
deleted file mode 100644
index a5ab40784119..000000000000
--- a/drivers/block/paride/pcd.c
+++ /dev/null
@@ -1,1042 +0,0 @@
-/*
- pcd.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is a high-level driver for parallel port ATAPI CD-ROM
- drives based on chips supported by the paride module.
-
- By default, the driver will autoprobe for a single parallel
- port ATAPI CD-ROM drive, but if their individual parameters are
- specified, the driver can handle up to 4 drives.
-
- The behaviour of the pcd driver can be altered by setting
- some parameters from the insmod command line. The following
- parameters are adjustable:
-
- drive0 These four arguments can be arrays of
- drive1 1-6 integers as follows:
- drive2
- drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
-
- Where,
-
- <prt> is the base of the parallel port address for
- the corresponding drive. (required)
-
- <pro> is the protocol number for the adapter that
- supports this drive. These numbers are
- logged by 'paride' when the protocol modules
- are initialised. (0 if not given)
-
- <uni> for those adapters that support chained
- devices, this is the unit selector for the
- chain of devices on the given port. It should
- be zero for devices that don't support chaining.
- (0 if not given)
-
- <mod> this can be -1 to choose the best mode, or one
- of the mode numbers supported by the adapter.
- (-1 if not given)
-
- <slv> ATAPI CD-ROMs can be jumpered to master or slave.
- Set this to 0 to choose the master drive, 1 to
- choose the slave, -1 (the default) to choose the
- first drive found.
-
- <dly> some parallel ports require the driver to
- go more slowly. -1 sets a default value that
- should work with the chosen protocol. Otherwise,
- set this to a small integer, the larger it is
- the slower the port i/o. In some cases, setting
- this to zero will speed up the device. (default -1)
-
- major You may use this parameter to override the
- default major number (46) that this driver
- will use. Be sure to change the device
- name as well.
-
- name This parameter is a character string that
- contains the name the kernel will use for this
- device (in /proc output, for instance).
- (default "pcd")
-
- verbose This parameter controls the amount of logging
- that the driver will do. Set it to 0 for
- normal operation, 1 to see autoprobe progress
- messages, or 2 to see additional debugging
- output. (default 0)
-
- nice This parameter controls the driver's use of
- idle CPU time, at the expense of some speed.
-
- If this driver is built into the kernel, you can use the
- following kernel command line parameters, with the same values
- as the corresponding module parameters listed above:
-
- pcd.drive0
- pcd.drive1
- pcd.drive2
- pcd.drive3
- pcd.nice
-
- In addition, you can use the parameter pcd.disable to disable
- the driver entirely.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.01.24 Added test unit ready support
- 1.02 GRG 1998.05.06 Changes to pcd_completion, ready_wait,
- and loosen interpretation of ATAPI
- standard for clearing error status.
- Use spinlocks. Eliminate sti().
- 1.03 GRG 1998.06.16 Eliminated an Ugh
- 1.04 GRG 1998.08.15 Added extra debugging, improvements to
- pcd_completion, use HZ in loop timing
- 1.05 GRG 1998.08.16 Conformed to "Uniform CD-ROM" standard
- 1.06 GRG 1998.08.19 Added audio ioctl support
- 1.07 GRG 1998.09.24 Increased reset timeout, added jumbo support
-
-*/
-
-#define PCD_VERSION "1.07"
-#define PCD_MAJOR 46
-#define PCD_NAME "pcd"
-#define PCD_UNITS 4
-
-/* Here are things one can override from the insmod command.
- Most are autoprobed by paride unless set here. Verbose is off
- by default.
-
-*/
-
-static int verbose = 0;
-static int major = PCD_MAJOR;
-static char *name = PCD_NAME;
-static int nice = 0;
-static int disable = 0;
-
-static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
-
-static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
-static int pcd_drive_count;
-
-enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
-
-/* end of parameters */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/cdrom.h>
-#include <linux/spinlock.h>
-#include <linux/blk-mq.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-
-static DEFINE_MUTEX(pcd_mutex);
-static DEFINE_SPINLOCK(pcd_lock);
-
-module_param(verbose, int, 0644);
-module_param(major, int, 0);
-module_param(name, charp, 0);
-module_param(nice, int, 0);
-module_param_array(drive0, int, NULL, 0);
-module_param_array(drive1, int, NULL, 0);
-module_param_array(drive2, int, NULL, 0);
-module_param_array(drive3, int, NULL, 0);
-
-#include "paride.h"
-#include "pseudo.h"
-
-#define PCD_RETRIES 5
-#define PCD_TMO 800 /* timeout in jiffies */
-#define PCD_DELAY 50 /* spin delay in uS */
-#define PCD_READY_TMO 20 /* in seconds */
-#define PCD_RESET_TMO 100 /* in tenths of a second */
-
-#define PCD_SPIN (1000000*PCD_TMO)/(HZ*PCD_DELAY)
-
-#define IDE_ERR 0x01
-#define IDE_DRQ 0x08
-#define IDE_READY 0x40
-#define IDE_BUSY 0x80
-
-static int pcd_open(struct cdrom_device_info *cdi, int purpose);
-static void pcd_release(struct cdrom_device_info *cdi);
-static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr);
-static unsigned int pcd_check_events(struct cdrom_device_info *cdi,
- unsigned int clearing, int slot_nr);
-static int pcd_tray_move(struct cdrom_device_info *cdi, int position);
-static int pcd_lock_door(struct cdrom_device_info *cdi, int lock);
-static int pcd_drive_reset(struct cdrom_device_info *cdi);
-static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn);
-static int pcd_audio_ioctl(struct cdrom_device_info *cdi,
- unsigned int cmd, void *arg);
-static int pcd_packet(struct cdrom_device_info *cdi,
- struct packet_command *cgc);
-
-static void do_pcd_read_drq(void);
-static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd);
-static void do_pcd_read(void);
-
-struct pcd_unit {
- struct pi_adapter pia; /* interface to paride layer */
- struct pi_adapter *pi;
- int drive; /* master/slave */
- int last_sense; /* result of last request sense */
- int changed; /* media change seen */
- int present; /* does this unit exist ? */
- char *name; /* pcd0, pcd1, etc */
- struct cdrom_device_info info; /* uniform cdrom interface */
- struct gendisk *disk;
- struct blk_mq_tag_set tag_set;
- struct list_head rq_list;
-};
-
-static struct pcd_unit pcd[PCD_UNITS];
-
-static char pcd_scratch[64];
-static char pcd_buffer[2048]; /* raw block buffer */
-static int pcd_bufblk = -1; /* block in buffer, in CD units,
- -1 for nothing there. See also
- pd_unit.
- */
-
-/* the variables below are used mainly in the I/O request engine, which
- processes only one request at a time.
-*/
-
-static struct pcd_unit *pcd_current; /* current request's drive */
-static struct request *pcd_req;
-static int pcd_retries; /* retries on current request */
-static int pcd_busy; /* request being processed ? */
-static int pcd_sector; /* address of next requested sector */
-static int pcd_count; /* number of blocks still to do */
-static char *pcd_buf; /* buffer for request in progress */
-static void *par_drv; /* reference of parport driver */
-
-/* kernel glue structures */
-
-static int pcd_block_open(struct block_device *bdev, fmode_t mode)
-{
- struct pcd_unit *cd = bdev->bd_disk->private_data;
- int ret;
-
- bdev_check_media_change(bdev);
-
- mutex_lock(&pcd_mutex);
- ret = cdrom_open(&cd->info, bdev, mode);
- mutex_unlock(&pcd_mutex);
-
- return ret;
-}
-
-static void pcd_block_release(struct gendisk *disk, fmode_t mode)
-{
- struct pcd_unit *cd = disk->private_data;
- mutex_lock(&pcd_mutex);
- cdrom_release(&cd->info, mode);
- mutex_unlock(&pcd_mutex);
-}
-
-static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- struct pcd_unit *cd = bdev->bd_disk->private_data;
- int ret;
-
- mutex_lock(&pcd_mutex);
- ret = cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
- mutex_unlock(&pcd_mutex);
-
- return ret;
-}
-
-static unsigned int pcd_block_check_events(struct gendisk *disk,
- unsigned int clearing)
-{
- struct pcd_unit *cd = disk->private_data;
- return cdrom_check_events(&cd->info, clearing);
-}
-
-static const struct block_device_operations pcd_bdops = {
- .owner = THIS_MODULE,
- .open = pcd_block_open,
- .release = pcd_block_release,
- .ioctl = pcd_block_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = blkdev_compat_ptr_ioctl,
-#endif
- .check_events = pcd_block_check_events,
-};
-
-static const struct cdrom_device_ops pcd_dops = {
- .open = pcd_open,
- .release = pcd_release,
- .drive_status = pcd_drive_status,
- .check_events = pcd_check_events,
- .tray_move = pcd_tray_move,
- .lock_door = pcd_lock_door,
- .get_mcn = pcd_get_mcn,
- .reset = pcd_drive_reset,
- .audio_ioctl = pcd_audio_ioctl,
- .generic_packet = pcd_packet,
- .capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
- CDC_MCN | CDC_MEDIA_CHANGED | CDC_RESET |
- CDC_PLAY_AUDIO | CDC_GENERIC_PACKET | CDC_CD_R |
- CDC_CD_RW,
-};
-
-static const struct blk_mq_ops pcd_mq_ops = {
- .queue_rq = pcd_queue_rq,
-};
-
-static int pcd_open(struct cdrom_device_info *cdi, int purpose)
-{
- struct pcd_unit *cd = cdi->handle;
- if (!cd->present)
- return -ENODEV;
- return 0;
-}
-
-static void pcd_release(struct cdrom_device_info *cdi)
-{
-}
-
-static inline int status_reg(struct pcd_unit *cd)
-{
- return pi_read_regr(cd->pi, 1, 6);
-}
-
-static inline int read_reg(struct pcd_unit *cd, int reg)
-{
- return pi_read_regr(cd->pi, 0, reg);
-}
-
-static inline void write_reg(struct pcd_unit *cd, int reg, int val)
-{
- pi_write_regr(cd->pi, 0, reg, val);
-}
-
-static int pcd_wait(struct pcd_unit *cd, int go, int stop, char *fun, char *msg)
-{
- int j, r, e, s, p;
-
- j = 0;
- while ((((r = status_reg(cd)) & go) || (stop && (!(r & stop))))
- && (j++ < PCD_SPIN))
- udelay(PCD_DELAY);
-
- if ((r & (IDE_ERR & stop)) || (j > PCD_SPIN)) {
- s = read_reg(cd, 7);
- e = read_reg(cd, 1);
- p = read_reg(cd, 2);
- if (j > PCD_SPIN)
- e |= 0x100;
- if (fun)
- printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
- " loop=%d phase=%d\n",
- cd->name, fun, msg, r, s, e, j, p);
- return (s << 8) + r;
- }
- return 0;
-}
-
-static int pcd_command(struct pcd_unit *cd, char *cmd, int dlen, char *fun)
-{
- pi_connect(cd->pi);
-
- write_reg(cd, 6, 0xa0 + 0x10 * cd->drive);
-
- if (pcd_wait(cd, IDE_BUSY | IDE_DRQ, 0, fun, "before command")) {
- pi_disconnect(cd->pi);
- return -1;
- }
-
- write_reg(cd, 4, dlen % 256);
- write_reg(cd, 5, dlen / 256);
- write_reg(cd, 7, 0xa0); /* ATAPI packet command */
-
- if (pcd_wait(cd, IDE_BUSY, IDE_DRQ, fun, "command DRQ")) {
- pi_disconnect(cd->pi);
- return -1;
- }
-
- if (read_reg(cd, 2) != 1) {
- printk("%s: %s: command phase error\n", cd->name, fun);
- pi_disconnect(cd->pi);
- return -1;
- }
-
- pi_write_block(cd->pi, cmd, 12);
-
- return 0;
-}
-
-static int pcd_completion(struct pcd_unit *cd, char *buf, char *fun)
-{
- int r, d, p, n, k, j;
-
- r = -1;
- k = 0;
- j = 0;
-
- if (!pcd_wait(cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR,
- fun, "completion")) {
- r = 0;
- while (read_reg(cd, 7) & IDE_DRQ) {
- d = read_reg(cd, 4) + 256 * read_reg(cd, 5);
- n = (d + 3) & 0xfffc;
- p = read_reg(cd, 2) & 3;
-
- if ((p == 2) && (n > 0) && (j == 0)) {
- pi_read_block(cd->pi, buf, n);
- if (verbose > 1)
- printk("%s: %s: Read %d bytes\n",
- cd->name, fun, n);
- r = 0;
- j++;
- } else {
- if (verbose > 1)
- printk
- ("%s: %s: Unexpected phase %d, d=%d, k=%d\n",
- cd->name, fun, p, d, k);
- if (verbose < 2)
- printk_once(
- "%s: WARNING: ATAPI phase errors\n",
- cd->name);
- mdelay(1);
- }
- if (k++ > PCD_TMO) {
- printk("%s: Stuck DRQ\n", cd->name);
- break;
- }
- if (pcd_wait
- (cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR, fun,
- "completion")) {
- r = -1;
- break;
- }
- }
- }
-
- pi_disconnect(cd->pi);
-
- return r;
-}
-
-static void pcd_req_sense(struct pcd_unit *cd, char *fun)
-{
- char rs_cmd[12] = { 0x03, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
- char buf[16];
- int r, c;
-
- r = pcd_command(cd, rs_cmd, 16, "Request sense");
- mdelay(1);
- if (!r)
- pcd_completion(cd, buf, "Request sense");
-
- cd->last_sense = -1;
- c = 2;
- if (!r) {
- if (fun)
- printk("%s: %s: Sense key: %x, ASC: %x, ASQ: %x\n",
- cd->name, fun, buf[2] & 0xf, buf[12], buf[13]);
- c = buf[2] & 0xf;
- cd->last_sense =
- c | ((buf[12] & 0xff) << 8) | ((buf[13] & 0xff) << 16);
- }
- if ((c == 2) || (c == 6))
- cd->changed = 1;
-}
-
-static int pcd_atapi(struct pcd_unit *cd, char *cmd, int dlen, char *buf, char *fun)
-{
- int r;
-
- r = pcd_command(cd, cmd, dlen, fun);
- mdelay(1);
- if (!r)
- r = pcd_completion(cd, buf, fun);
- if (r)
- pcd_req_sense(cd, fun);
-
- return r;
-}
-
-static int pcd_packet(struct cdrom_device_info *cdi, struct packet_command *cgc)
-{
- return pcd_atapi(cdi->handle, cgc->cmd, cgc->buflen, cgc->buffer,
- "generic packet");
-}
-
-#define DBMSG(msg) ((verbose>1)?(msg):NULL)
-
-static unsigned int pcd_check_events(struct cdrom_device_info *cdi,
- unsigned int clearing, int slot_nr)
-{
- struct pcd_unit *cd = cdi->handle;
- int res = cd->changed;
- if (res)
- cd->changed = 0;
- return res ? DISK_EVENT_MEDIA_CHANGE : 0;
-}
-
-static int pcd_lock_door(struct cdrom_device_info *cdi, int lock)
-{
- char un_cmd[12] = { 0x1e, 0, 0, 0, lock, 0, 0, 0, 0, 0, 0, 0 };
-
- return pcd_atapi(cdi->handle, un_cmd, 0, pcd_scratch,
- lock ? "lock door" : "unlock door");
-}
-
-static int pcd_tray_move(struct cdrom_device_info *cdi, int position)
-{
- char ej_cmd[12] = { 0x1b, 0, 0, 0, 3 - position, 0, 0, 0, 0, 0, 0, 0 };
-
- return pcd_atapi(cdi->handle, ej_cmd, 0, pcd_scratch,
- position ? "eject" : "close tray");
-}
-
-static void pcd_sleep(int cs)
-{
- schedule_timeout_interruptible(cs);
-}
-
-static int pcd_reset(struct pcd_unit *cd)
-{
- int i, k, flg;
- int expect[5] = { 1, 1, 1, 0x14, 0xeb };
-
- pi_connect(cd->pi);
- write_reg(cd, 6, 0xa0 + 0x10 * cd->drive);
- write_reg(cd, 7, 8);
-
- pcd_sleep(20 * HZ / 1000); /* delay a bit */
-
- k = 0;
- while ((k++ < PCD_RESET_TMO) && (status_reg(cd) & IDE_BUSY))
- pcd_sleep(HZ / 10);
-
- flg = 1;
- for (i = 0; i < 5; i++)
- flg &= (read_reg(cd, i + 1) == expect[i]);
-
- if (verbose) {
- printk("%s: Reset (%d) signature = ", cd->name, k);
- for (i = 0; i < 5; i++)
- printk("%3x", read_reg(cd, i + 1));
- if (!flg)
- printk(" (incorrect)");
- printk("\n");
- }
-
- pi_disconnect(cd->pi);
- return flg - 1;
-}
-
-static int pcd_drive_reset(struct cdrom_device_info *cdi)
-{
- return pcd_reset(cdi->handle);
-}
-
-static int pcd_ready_wait(struct pcd_unit *cd, int tmo)
-{
- char tr_cmd[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- int k, p;
-
- k = 0;
- while (k < tmo) {
- cd->last_sense = 0;
- pcd_atapi(cd, tr_cmd, 0, NULL, DBMSG("test unit ready"));
- p = cd->last_sense;
- if (!p)
- return 0;
- if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
- return p;
- k++;
- pcd_sleep(HZ);
- }
- return 0x000020; /* timeout */
-}
-
-static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr)
-{
- char rc_cmd[12] = { 0x25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- struct pcd_unit *cd = cdi->handle;
-
- if (pcd_ready_wait(cd, PCD_READY_TMO))
- return CDS_DRIVE_NOT_READY;
- if (pcd_atapi(cd, rc_cmd, 8, pcd_scratch, DBMSG("check media")))
- return CDS_NO_DISC;
- return CDS_DISC_OK;
-}
-
-static int pcd_identify(struct pcd_unit *cd)
-{
- char id_cmd[12] = { 0x12, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
- char id[18];
- int k, s;
-
- pcd_bufblk = -1;
-
- s = pcd_atapi(cd, id_cmd, 36, pcd_buffer, "identify");
-
- if (s)
- return -1;
- if ((pcd_buffer[0] & 0x1f) != 5) {
- if (verbose)
- printk("%s: %s is not a CD-ROM\n",
- cd->name, cd->drive ? "Slave" : "Master");
- return -1;
- }
- memcpy(id, pcd_buffer + 16, 16);
- id[16] = 0;
- k = 16;
- while ((k >= 0) && (id[k] <= 0x20)) {
- id[k] = 0;
- k--;
- }
-
- printk("%s: %s: %s\n", cd->name, cd->drive ? "Slave" : "Master", id);
-
- return 0;
-}
-
-/*
- * returns 0, with id set if drive is detected, otherwise an error code.
- */
-static int pcd_probe(struct pcd_unit *cd, int ms)
-{
- if (ms == -1) {
- for (cd->drive = 0; cd->drive <= 1; cd->drive++)
- if (!pcd_reset(cd) && !pcd_identify(cd))
- return 0;
- } else {
- cd->drive = ms;
- if (!pcd_reset(cd) && !pcd_identify(cd))
- return 0;
- }
- return -ENODEV;
-}
-
-static int pcd_probe_capabilities(struct pcd_unit *cd)
-{
- char cmd[12] = { 0x5a, 1 << 3, 0x2a, 0, 0, 0, 0, 18, 0, 0, 0, 0 };
- char buffer[32];
- int ret;
-
- ret = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
- if (ret)
- return ret;
-
- /* we should now have the cap page */
- if ((buffer[11] & 1) == 0)
- cd->info.mask |= CDC_CD_R;
- if ((buffer[11] & 2) == 0)
- cd->info.mask |= CDC_CD_RW;
- if ((buffer[12] & 1) == 0)
- cd->info.mask |= CDC_PLAY_AUDIO;
- if ((buffer[14] & 1) == 0)
- cd->info.mask |= CDC_LOCK;
- if ((buffer[14] & 8) == 0)
- cd->info.mask |= CDC_OPEN_TRAY;
- if ((buffer[14] >> 6) == 0)
- cd->info.mask |= CDC_CLOSE_TRAY;
-
- return 0;
-}
-
-/* I/O request processing */
-static int pcd_queue;
-
-static int set_next_request(void)
-{
- struct pcd_unit *cd;
- int old_pos = pcd_queue;
-
- do {
- cd = &pcd[pcd_queue];
- if (++pcd_queue == PCD_UNITS)
- pcd_queue = 0;
- if (cd->present && !list_empty(&cd->rq_list)) {
- pcd_req = list_first_entry(&cd->rq_list, struct request,
- queuelist);
- list_del_init(&pcd_req->queuelist);
- blk_mq_start_request(pcd_req);
- break;
- }
- } while (pcd_queue != old_pos);
-
- return pcd_req != NULL;
-}
-
-static void pcd_request(void)
-{
- struct pcd_unit *cd;
-
- if (pcd_busy)
- return;
-
- if (!pcd_req && !set_next_request())
- return;
-
- cd = pcd_req->q->disk->private_data;
- if (cd != pcd_current)
- pcd_bufblk = -1;
- pcd_current = cd;
- pcd_sector = blk_rq_pos(pcd_req);
- pcd_count = blk_rq_cur_sectors(pcd_req);
- pcd_buf = bio_data(pcd_req->bio);
- pcd_busy = 1;
- ps_set_intr(do_pcd_read, NULL, 0, nice);
-}
-
-static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct pcd_unit *cd = hctx->queue->queuedata;
-
- if (rq_data_dir(bd->rq) != READ) {
- blk_mq_start_request(bd->rq);
- return BLK_STS_IOERR;
- }
-
- spin_lock_irq(&pcd_lock);
- list_add_tail(&bd->rq->queuelist, &cd->rq_list);
- pcd_request();
- spin_unlock_irq(&pcd_lock);
-
- return BLK_STS_OK;
-}
-
-static inline void next_request(blk_status_t err)
-{
- unsigned long saved_flags;
-
- spin_lock_irqsave(&pcd_lock, saved_flags);
- if (!blk_update_request(pcd_req, err, blk_rq_cur_bytes(pcd_req))) {
- __blk_mq_end_request(pcd_req, err);
- pcd_req = NULL;
- }
- pcd_busy = 0;
- pcd_request();
- spin_unlock_irqrestore(&pcd_lock, saved_flags);
-}
-
-static int pcd_ready(void)
-{
- return (((status_reg(pcd_current) & (IDE_BUSY | IDE_DRQ)) == IDE_DRQ));
-}
-
-static void pcd_transfer(void)
-{
-
- while (pcd_count && (pcd_sector / 4 == pcd_bufblk)) {
- int o = (pcd_sector % 4) * 512;
- memcpy(pcd_buf, pcd_buffer + o, 512);
- pcd_count--;
- pcd_buf += 512;
- pcd_sector++;
- }
-}
-
-static void pcd_start(void)
-{
- int b, i;
- char rd_cmd[12] = { 0xa8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0 };
-
- pcd_bufblk = pcd_sector / 4;
- b = pcd_bufblk;
- for (i = 0; i < 4; i++) {
- rd_cmd[5 - i] = b & 0xff;
- b = b >> 8;
- }
-
- if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
- pcd_bufblk = -1;
- next_request(BLK_STS_IOERR);
- return;
- }
-
- mdelay(1);
-
- ps_set_intr(do_pcd_read_drq, pcd_ready, PCD_TMO, nice);
-}
-
-static void do_pcd_read(void)
-{
- pcd_busy = 1;
- pcd_retries = 0;
- pcd_transfer();
- if (!pcd_count) {
- next_request(0);
- return;
- }
-
- pi_do_claimed(pcd_current->pi, pcd_start);
-}
-
-static void do_pcd_read_drq(void)
-{
- unsigned long saved_flags;
-
- if (pcd_completion(pcd_current, pcd_buffer, "read block")) {
- if (pcd_retries < PCD_RETRIES) {
- mdelay(1);
- pcd_retries++;
- pi_do_claimed(pcd_current->pi, pcd_start);
- return;
- }
- pcd_bufblk = -1;
- next_request(BLK_STS_IOERR);
- return;
- }
-
- do_pcd_read();
- spin_lock_irqsave(&pcd_lock, saved_flags);
- pcd_request();
- spin_unlock_irqrestore(&pcd_lock, saved_flags);
-}
-
-/* the audio_ioctl stuff is adapted from sr_ioctl.c */
-
-static int pcd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg)
-{
- struct pcd_unit *cd = cdi->handle;
-
- switch (cmd) {
-
- case CDROMREADTOCHDR:
-
- {
- char cmd[12] =
- { GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12,
- 0, 0, 0 };
- struct cdrom_tochdr *tochdr =
- (struct cdrom_tochdr *) arg;
- char buffer[32];
- int r;
-
- r = pcd_atapi(cd, cmd, 12, buffer, "read toc header");
-
- tochdr->cdth_trk0 = buffer[2];
- tochdr->cdth_trk1 = buffer[3];
-
- return r ? -EIO : 0;
- }
-
- case CDROMREADTOCENTRY:
-
- {
- char cmd[12] =
- { GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12,
- 0, 0, 0 };
-
- struct cdrom_tocentry *tocentry =
- (struct cdrom_tocentry *) arg;
- unsigned char buffer[32];
- int r;
-
- cmd[1] =
- (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0);
- cmd[6] = tocentry->cdte_track;
-
- r = pcd_atapi(cd, cmd, 12, buffer, "read toc entry");
-
- tocentry->cdte_ctrl = buffer[5] & 0xf;
- tocentry->cdte_adr = buffer[5] >> 4;
- tocentry->cdte_datamode =
- (tocentry->cdte_ctrl & 0x04) ? 1 : 0;
- if (tocentry->cdte_format == CDROM_MSF) {
- tocentry->cdte_addr.msf.minute = buffer[9];
- tocentry->cdte_addr.msf.second = buffer[10];
- tocentry->cdte_addr.msf.frame = buffer[11];
- } else
- tocentry->cdte_addr.lba =
- (((((buffer[8] << 8) + buffer[9]) << 8)
- + buffer[10]) << 8) + buffer[11];
-
- return r ? -EIO : 0;
- }
-
- default:
-
- return -ENOSYS;
- }
-}
-
-static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
-{
- char cmd[12] =
- { GPCMD_READ_SUBCHANNEL, 0, 0x40, 2, 0, 0, 0, 0, 24, 0, 0, 0 };
- char buffer[32];
-
- if (pcd_atapi(cdi->handle, cmd, 24, buffer, "get mcn"))
- return -EIO;
-
- memcpy(mcn->medium_catalog_number, buffer + 9, 13);
- mcn->medium_catalog_number[13] = 0;
-
- return 0;
-}
-
-static int pcd_init_unit(struct pcd_unit *cd, bool autoprobe, int port,
- int mode, int unit, int protocol, int delay, int ms)
-{
- struct gendisk *disk;
- int ret;
-
- ret = blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE);
- if (ret)
- return ret;
-
- disk = blk_mq_alloc_disk(&cd->tag_set, cd);
- if (IS_ERR(disk)) {
- ret = PTR_ERR(disk);
- goto out_free_tag_set;
- }
-
- INIT_LIST_HEAD(&cd->rq_list);
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
- cd->disk = disk;
- cd->pi = &cd->pia;
- cd->present = 0;
- cd->last_sense = 0;
- cd->changed = 1;
- cd->drive = (*drives[cd - pcd])[D_SLV];
-
- cd->name = &cd->info.name[0];
- snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
- cd->info.ops = &pcd_dops;
- cd->info.handle = cd;
- cd->info.speed = 0;
- cd->info.capacity = 1;
- cd->info.mask = 0;
- disk->major = major;
- disk->first_minor = unit;
- disk->minors = 1;
- strcpy(disk->disk_name, cd->name); /* umm... */
- disk->fops = &pcd_bdops;
- disk->flags |= GENHD_FL_NO_PART;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
- disk->event_flags = DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE;
-
- if (!pi_init(cd->pi, autoprobe, port, mode, unit, protocol, delay,
- pcd_buffer, PI_PCD, verbose, cd->name)) {
- ret = -ENODEV;
- goto out_free_disk;
- }
- ret = pcd_probe(cd, ms);
- if (ret)
- goto out_pi_release;
-
- cd->present = 1;
- pcd_probe_capabilities(cd);
- ret = register_cdrom(cd->disk, &cd->info);
- if (ret)
- goto out_pi_release;
- ret = add_disk(cd->disk);
- if (ret)
- goto out_unreg_cdrom;
- return 0;
-
-out_unreg_cdrom:
- unregister_cdrom(&cd->info);
-out_pi_release:
- pi_release(cd->pi);
-out_free_disk:
- put_disk(cd->disk);
-out_free_tag_set:
- blk_mq_free_tag_set(&cd->tag_set);
- return ret;
-}
-
-static int __init pcd_init(void)
-{
- int found = 0, unit;
-
- if (disable)
- return -EINVAL;
-
- if (register_blkdev(major, name))
- return -EBUSY;
-
- pr_info("%s: %s version %s, major %d, nice %d\n",
- name, name, PCD_VERSION, major, nice);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- goto out_unregister_blkdev;
- }
-
- for (unit = 0; unit < PCD_UNITS; unit++) {
- if ((*drives[unit])[D_PRT])
- pcd_drive_count++;
- }
-
- if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
- if (!pcd_init_unit(pcd, 1, -1, -1, -1, -1, -1, -1))
- found++;
- } else {
- for (unit = 0; unit < PCD_UNITS; unit++) {
- struct pcd_unit *cd = &pcd[unit];
- int *conf = *drives[unit];
-
- if (!conf[D_PRT])
- continue;
- if (!pcd_init_unit(cd, 0, conf[D_PRT], conf[D_MOD],
- conf[D_UNI], conf[D_PRO], conf[D_DLY],
- conf[D_SLV]))
- found++;
- }
- }
-
- if (!found) {
- pr_info("%s: No CD-ROM drive found\n", name);
- goto out_unregister_pi_driver;
- }
-
- return 0;
-
-out_unregister_pi_driver:
- pi_unregister_driver(par_drv);
-out_unregister_blkdev:
- unregister_blkdev(major, name);
- return -ENODEV;
-}
-
-static void __exit pcd_exit(void)
-{
- struct pcd_unit *cd;
- int unit;
-
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->present)
- continue;
-
- unregister_cdrom(&cd->info);
- del_gendisk(cd->disk);
- pi_release(cd->pi);
- put_disk(cd->disk);
-
- blk_mq_free_tag_set(&cd->tag_set);
- }
- pi_unregister_driver(par_drv);
- unregister_blkdev(major, name);
-}
-
-MODULE_LICENSE("GPL");
-module_init(pcd_init)
-module_exit(pcd_exit)
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
deleted file mode 100644
index f8a75bc90f70..000000000000
--- a/drivers/block/paride/pd.c
+++ /dev/null
@@ -1,1032 +0,0 @@
-/*
- pd.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is the high-level driver for parallel port IDE hard
- drives based on chips supported by the paride module.
-
- By default, the driver will autoprobe for a single parallel
- port IDE drive, but if their individual parameters are
- specified, the driver can handle up to 4 drives.
-
- The behaviour of the pd driver can be altered by setting
- some parameters from the insmod command line. The following
- parameters are adjustable:
-
- drive0 These four arguments can be arrays of
- drive1 1-8 integers as follows:
- drive2
- drive3 <prt>,<pro>,<uni>,<mod>,<geo>,<sby>,<dly>,<slv>
-
- Where,
-
- <prt> is the base of the parallel port address for
- the corresponding drive. (required)
-
- <pro> is the protocol number for the adapter that
- supports this drive. These numbers are
- logged by 'paride' when the protocol modules
- are initialised. (0 if not given)
-
- <uni> for those adapters that support chained
- devices, this is the unit selector for the
- chain of devices on the given port. It should
- be zero for devices that don't support chaining.
- (0 if not given)
-
- <mod> this can be -1 to choose the best mode, or one
- of the mode numbers supported by the adapter.
- (-1 if not given)
-
- <geo> this defaults to 0 to indicate that the driver
- should use the CHS geometry provided by the drive
- itself. If set to 1, the driver will provide
- a logical geometry with 64 heads and 32 sectors
- per track, to be consistent with most SCSI
- drivers. (0 if not given)
-
- <sby> set this to zero to disable the power saving
- standby mode, if needed. (1 if not given)
-
- <dly> some parallel ports require the driver to
- go more slowly. -1 sets a default value that
- should work with the chosen protocol. Otherwise,
- set this to a small integer, the larger it is
- the slower the port i/o. In some cases, setting
- this to zero will speed up the device. (default -1)
-
- <slv> IDE disks can be jumpered to master or slave.
- Set this to 0 to choose the master drive, 1 to
- choose the slave, -1 (the default) to choose the
- first drive found.
-
-
- major You may use this parameter to override the
- default major number (45) that this driver
- will use. Be sure to change the device
- name as well.
-
- name This parameter is a character string that
- contains the name the kernel will use for this
- device (in /proc output, for instance).
- (default "pd")
-
- cluster The driver will attempt to aggregate requests
- for adjacent blocks into larger multi-block
- clusters. The maximum cluster size (in 512
- byte sectors) is set with this parameter.
- (default 64)
-
- verbose This parameter controls the amount of logging
- that the driver will do. Set it to 0 for
- normal operation, 1 to see autoprobe progress
- messages, or 2 to see additional debugging
- output. (default 0)
-
- nice This parameter controls the driver's use of
- idle CPU time, at the expense of some speed.
-
- If this driver is built into the kernel, you can use kernel
- the following command line parameters, with the same values
- as the corresponding module parameters listed above:
-
- pd.drive0
- pd.drive1
- pd.drive2
- pd.drive3
- pd.cluster
- pd.nice
-
- In addition, you can use the parameter pd.disable to disable
- the driver entirely.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1997.01.24 Restored pd_reset()
- Added eject ioctl
- 1.02 GRG 1998.05.06 SMP spinlock changes,
- Added slave support
- 1.03 GRG 1998.06.16 Eliminate an Ugh.
- 1.04 GRG 1998.08.15 Extra debugging, use HZ in loop timing
- 1.05 GRG 1998.09.24 Added jumbo support
-
-*/
-
-#define PD_VERSION "1.05"
-#define PD_MAJOR 45
-#define PD_NAME "pd"
-#define PD_UNITS 4
-
-/* Here are things one can override from the insmod command.
- Most are autoprobed by paride unless set here. Verbose is off
- by default.
-
-*/
-#include <linux/types.h>
-
-static int verbose = 0;
-static int major = PD_MAJOR;
-static char *name = PD_NAME;
-static int cluster = 64;
-static int nice = 0;
-static int disable = 0;
-
-static int drive0[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
-static int drive1[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
-static int drive2[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
-static int drive3[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
-
-static int (*drives[4])[8] = {&drive0, &drive1, &drive2, &drive3};
-
-enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
-
-/* end of parameters */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/gfp.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/hdreg.h>
-#include <linux/cdrom.h> /* for the eject ioctl */
-#include <linux/blk-mq.h>
-#include <linux/blkpg.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-#include <linux/workqueue.h>
-
-static DEFINE_MUTEX(pd_mutex);
-static DEFINE_SPINLOCK(pd_lock);
-
-module_param(verbose, int, 0);
-module_param(major, int, 0);
-module_param(name, charp, 0);
-module_param(cluster, int, 0);
-module_param(nice, int, 0);
-module_param_array(drive0, int, NULL, 0);
-module_param_array(drive1, int, NULL, 0);
-module_param_array(drive2, int, NULL, 0);
-module_param_array(drive3, int, NULL, 0);
-
-#include "paride.h"
-
-#define PD_BITS 4
-
-/* numbers for "SCSI" geometry */
-
-#define PD_LOG_HEADS 64
-#define PD_LOG_SECTS 32
-
-#define PD_ID_OFF 54
-#define PD_ID_LEN 14
-
-#define PD_MAX_RETRIES 5
-#define PD_TMO 800 /* interrupt timeout in jiffies */
-#define PD_SPIN_DEL 50 /* spin delay in micro-seconds */
-
-#define PD_SPIN (1000000*PD_TMO)/(HZ*PD_SPIN_DEL)
-
-#define STAT_ERR 0x00001
-#define STAT_INDEX 0x00002
-#define STAT_ECC 0x00004
-#define STAT_DRQ 0x00008
-#define STAT_SEEK 0x00010
-#define STAT_WRERR 0x00020
-#define STAT_READY 0x00040
-#define STAT_BUSY 0x00080
-
-#define ERR_AMNF 0x00100
-#define ERR_TK0NF 0x00200
-#define ERR_ABRT 0x00400
-#define ERR_MCR 0x00800
-#define ERR_IDNF 0x01000
-#define ERR_MC 0x02000
-#define ERR_UNC 0x04000
-#define ERR_TMO 0x10000
-
-#define IDE_READ 0x20
-#define IDE_WRITE 0x30
-#define IDE_READ_VRFY 0x40
-#define IDE_INIT_DEV_PARMS 0x91
-#define IDE_STANDBY 0x96
-#define IDE_ACKCHANGE 0xdb
-#define IDE_DOORLOCK 0xde
-#define IDE_DOORUNLOCK 0xdf
-#define IDE_IDENTIFY 0xec
-#define IDE_EJECT 0xed
-
-#define PD_NAMELEN 8
-
-struct pd_unit {
- struct pi_adapter pia; /* interface to paride layer */
- struct pi_adapter *pi;
- int access; /* count of active opens ... */
- int capacity; /* Size of this volume in sectors */
- int heads; /* physical geometry */
- int sectors;
- int cylinders;
- int can_lba;
- int drive; /* master=0 slave=1 */
- int changed; /* Have we seen a disk change ? */
- int removable; /* removable media device ? */
- int standby;
- int alt_geom;
- char name[PD_NAMELEN]; /* pda, pdb, etc ... */
- struct gendisk *gd;
- struct blk_mq_tag_set tag_set;
- struct list_head rq_list;
-};
-
-static struct pd_unit pd[PD_UNITS];
-
-struct pd_req {
- /* for REQ_OP_DRV_IN: */
- enum action (*func)(struct pd_unit *disk);
-};
-
-static char pd_scratch[512]; /* scratch block buffer */
-
-static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
- "READY", "BUSY", "AMNF", "TK0NF", "ABRT", "MCR",
- "IDNF", "MC", "UNC", "???", "TMO"
-};
-
-static void *par_drv; /* reference of parport driver */
-
-static inline int status_reg(struct pd_unit *disk)
-{
- return pi_read_regr(disk->pi, 1, 6);
-}
-
-static inline int read_reg(struct pd_unit *disk, int reg)
-{
- return pi_read_regr(disk->pi, 0, reg);
-}
-
-static inline void write_status(struct pd_unit *disk, int val)
-{
- pi_write_regr(disk->pi, 1, 6, val);
-}
-
-static inline void write_reg(struct pd_unit *disk, int reg, int val)
-{
- pi_write_regr(disk->pi, 0, reg, val);
-}
-
-static inline u8 DRIVE(struct pd_unit *disk)
-{
- return 0xa0+0x10*disk->drive;
-}
-
-/* ide command interface */
-
-static void pd_print_error(struct pd_unit *disk, char *msg, int status)
-{
- int i;
-
- printk("%s: %s: status = 0x%x =", disk->name, msg, status);
- for (i = 0; i < ARRAY_SIZE(pd_errs); i++)
- if (status & (1 << i))
- printk(" %s", pd_errs[i]);
- printk("\n");
-}
-
-static void pd_reset(struct pd_unit *disk)
-{ /* called only for MASTER drive */
- write_status(disk, 4);
- udelay(50);
- write_status(disk, 0);
- udelay(250);
-}
-
-#define DBMSG(msg) ((verbose>1)?(msg):NULL)
-
-static int pd_wait_for(struct pd_unit *disk, int w, char *msg)
-{ /* polled wait */
- int k, r, e;
-
- k = 0;
- while (k < PD_SPIN) {
- r = status_reg(disk);
- k++;
- if (((r & w) == w) && !(r & STAT_BUSY))
- break;
- udelay(PD_SPIN_DEL);
- }
- e = (read_reg(disk, 1) << 8) + read_reg(disk, 7);
- if (k >= PD_SPIN)
- e |= ERR_TMO;
- if ((e & (STAT_ERR | ERR_TMO)) && (msg != NULL))
- pd_print_error(disk, msg, e);
- return e;
-}
-
-static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func)
-{
- write_reg(disk, 6, DRIVE(disk) + h);
- write_reg(disk, 1, 0); /* the IDE task file */
- write_reg(disk, 2, n);
- write_reg(disk, 3, s);
- write_reg(disk, 4, c0);
- write_reg(disk, 5, c1);
- write_reg(disk, 7, func);
-
- udelay(1);
-}
-
-static void pd_ide_command(struct pd_unit *disk, int func, int block, int count)
-{
- int c1, c0, h, s;
-
- if (disk->can_lba) {
- s = block & 255;
- c0 = (block >>= 8) & 255;
- c1 = (block >>= 8) & 255;
- h = ((block >>= 8) & 15) + 0x40;
- } else {
- s = (block % disk->sectors) + 1;
- h = (block /= disk->sectors) % disk->heads;
- c0 = (block /= disk->heads) % 256;
- c1 = (block >>= 8);
- }
- pd_send_command(disk, count, s, h, c0, c1, func);
-}
-
-/* The i/o request engine */
-
-enum action {Fail = 0, Ok = 1, Hold, Wait};
-
-static struct request *pd_req; /* current request */
-static enum action (*phase)(void);
-
-static void run_fsm(void);
-
-static void ps_tq_int(struct work_struct *work);
-
-static DECLARE_DELAYED_WORK(fsm_tq, ps_tq_int);
-
-static void schedule_fsm(void)
-{
- if (!nice)
- schedule_delayed_work(&fsm_tq, 0);
- else
- schedule_delayed_work(&fsm_tq, nice-1);
-}
-
-static void ps_tq_int(struct work_struct *work)
-{
- run_fsm();
-}
-
-static enum action do_pd_io_start(void);
-static enum action pd_special(void);
-static enum action do_pd_read_start(void);
-static enum action do_pd_write_start(void);
-static enum action do_pd_read_drq(void);
-static enum action do_pd_write_done(void);
-
-static int pd_queue;
-static int pd_claimed;
-
-static struct pd_unit *pd_current; /* current request's drive */
-static PIA *pi_current; /* current request's PIA */
-
-static int set_next_request(void)
-{
- struct gendisk *disk;
- struct request_queue *q;
- int old_pos = pd_queue;
-
- do {
- disk = pd[pd_queue].gd;
- q = disk ? disk->queue : NULL;
- if (++pd_queue == PD_UNITS)
- pd_queue = 0;
- if (q) {
- struct pd_unit *disk = q->queuedata;
-
- if (list_empty(&disk->rq_list))
- continue;
-
- pd_req = list_first_entry(&disk->rq_list,
- struct request,
- queuelist);
- list_del_init(&pd_req->queuelist);
- blk_mq_start_request(pd_req);
- break;
- }
- } while (pd_queue != old_pos);
-
- return pd_req != NULL;
-}
-
-static void run_fsm(void)
-{
- while (1) {
- enum action res;
- int stop = 0;
-
- if (!phase) {
- pd_current = pd_req->q->disk->private_data;
- pi_current = pd_current->pi;
- phase = do_pd_io_start;
- }
-
- switch (pd_claimed) {
- case 0:
- pd_claimed = 1;
- if (!pi_schedule_claimed(pi_current, run_fsm))
- return;
- fallthrough;
- case 1:
- pd_claimed = 2;
- pi_current->proto->connect(pi_current);
- }
-
- switch(res = phase()) {
- case Ok: case Fail: {
- blk_status_t err;
-
- err = res == Ok ? 0 : BLK_STS_IOERR;
- pi_disconnect(pi_current);
- pd_claimed = 0;
- phase = NULL;
- spin_lock_irq(&pd_lock);
- if (!blk_update_request(pd_req, err,
- blk_rq_cur_bytes(pd_req))) {
- __blk_mq_end_request(pd_req, err);
- pd_req = NULL;
- stop = !set_next_request();
- }
- spin_unlock_irq(&pd_lock);
- if (stop)
- return;
- }
- fallthrough;
- case Hold:
- schedule_fsm();
- return;
- case Wait:
- pi_disconnect(pi_current);
- pd_claimed = 0;
- }
- }
-}
-
-static int pd_retries = 0; /* i/o error retry count */
-static int pd_block; /* address of next requested block */
-static int pd_count; /* number of blocks still to do */
-static int pd_run; /* sectors in current cluster */
-static char *pd_buf; /* buffer for request in progress */
-
-static enum action do_pd_io_start(void)
-{
- switch (req_op(pd_req)) {
- case REQ_OP_DRV_IN:
- phase = pd_special;
- return pd_special();
- case REQ_OP_READ:
- case REQ_OP_WRITE:
- pd_block = blk_rq_pos(pd_req);
- pd_count = blk_rq_cur_sectors(pd_req);
- if (pd_block + pd_count > get_capacity(pd_req->q->disk))
- return Fail;
- pd_run = blk_rq_sectors(pd_req);
- pd_buf = bio_data(pd_req->bio);
- pd_retries = 0;
- if (req_op(pd_req) == REQ_OP_READ)
- return do_pd_read_start();
- else
- return do_pd_write_start();
- default:
- break;
- }
- return Fail;
-}
-
-static enum action pd_special(void)
-{
- struct pd_req *req = blk_mq_rq_to_pdu(pd_req);
-
- return req->func(pd_current);
-}
-
-static int pd_next_buf(void)
-{
- unsigned long saved_flags;
-
- pd_count--;
- pd_run--;
- pd_buf += 512;
- pd_block++;
- if (!pd_run)
- return 1;
- if (pd_count)
- return 0;
- spin_lock_irqsave(&pd_lock, saved_flags);
- if (!blk_update_request(pd_req, 0, blk_rq_cur_bytes(pd_req))) {
- __blk_mq_end_request(pd_req, 0);
- pd_req = NULL;
- pd_count = 0;
- pd_buf = NULL;
- } else {
- pd_count = blk_rq_cur_sectors(pd_req);
- pd_buf = bio_data(pd_req->bio);
- }
- spin_unlock_irqrestore(&pd_lock, saved_flags);
- return !pd_count;
-}
-
-static unsigned long pd_timeout;
-
-static enum action do_pd_read_start(void)
-{
- if (pd_wait_for(pd_current, STAT_READY, "do_pd_read") & STAT_ERR) {
- if (pd_retries < PD_MAX_RETRIES) {
- pd_retries++;
- return Wait;
- }
- return Fail;
- }
- pd_ide_command(pd_current, IDE_READ, pd_block, pd_run);
- phase = do_pd_read_drq;
- pd_timeout = jiffies + PD_TMO;
- return Hold;
-}
-
-static enum action do_pd_write_start(void)
-{
- if (pd_wait_for(pd_current, STAT_READY, "do_pd_write") & STAT_ERR) {
- if (pd_retries < PD_MAX_RETRIES) {
- pd_retries++;
- return Wait;
- }
- return Fail;
- }
- pd_ide_command(pd_current, IDE_WRITE, pd_block, pd_run);
- while (1) {
- if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_write_drq") & STAT_ERR) {
- if (pd_retries < PD_MAX_RETRIES) {
- pd_retries++;
- return Wait;
- }
- return Fail;
- }
- pi_write_block(pd_current->pi, pd_buf, 512);
- if (pd_next_buf())
- break;
- }
- phase = do_pd_write_done;
- pd_timeout = jiffies + PD_TMO;
- return Hold;
-}
-
-static inline int pd_ready(void)
-{
- return !(status_reg(pd_current) & STAT_BUSY);
-}
-
-static enum action do_pd_read_drq(void)
-{
- if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
- return Hold;
-
- while (1) {
- if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_read_drq") & STAT_ERR) {
- if (pd_retries < PD_MAX_RETRIES) {
- pd_retries++;
- phase = do_pd_read_start;
- return Wait;
- }
- return Fail;
- }
- pi_read_block(pd_current->pi, pd_buf, 512);
- if (pd_next_buf())
- break;
- }
- return Ok;
-}
-
-static enum action do_pd_write_done(void)
-{
- if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
- return Hold;
-
- if (pd_wait_for(pd_current, STAT_READY, "do_pd_write_done") & STAT_ERR) {
- if (pd_retries < PD_MAX_RETRIES) {
- pd_retries++;
- phase = do_pd_write_start;
- return Wait;
- }
- return Fail;
- }
- return Ok;
-}
-
-/* special io requests */
-
-/* According to the ATA standard, the default CHS geometry should be
- available following a reset. Some Western Digital drives come up
- in a mode where only LBA addresses are accepted until the device
- parameters are initialised.
-*/
-
-static void pd_init_dev_parms(struct pd_unit *disk)
-{
- pd_wait_for(disk, 0, DBMSG("before init_dev_parms"));
- pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0,
- IDE_INIT_DEV_PARMS);
- udelay(300);
- pd_wait_for(disk, 0, "Initialise device parameters");
-}
-
-static enum action pd_door_lock(struct pd_unit *disk)
-{
- if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
- pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORLOCK);
- pd_wait_for(disk, STAT_READY, "Lock done");
- }
- return Ok;
-}
-
-static enum action pd_door_unlock(struct pd_unit *disk)
-{
- if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
- pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
- pd_wait_for(disk, STAT_READY, "Lock done");
- }
- return Ok;
-}
-
-static enum action pd_eject(struct pd_unit *disk)
-{
- pd_wait_for(disk, 0, DBMSG("before unlock on eject"));
- pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
- pd_wait_for(disk, 0, DBMSG("after unlock on eject"));
- pd_wait_for(disk, 0, DBMSG("before eject"));
- pd_send_command(disk, 0, 0, 0, 0, 0, IDE_EJECT);
- pd_wait_for(disk, 0, DBMSG("after eject"));
- return Ok;
-}
-
-static enum action pd_media_check(struct pd_unit *disk)
-{
- int r = pd_wait_for(disk, STAT_READY, DBMSG("before media_check"));
- if (!(r & STAT_ERR)) {
- pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
- r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after READ_VRFY"));
- } else
- disk->changed = 1; /* say changed if other error */
- if (r & ERR_MC) {
- disk->changed = 1;
- pd_send_command(disk, 1, 0, 0, 0, 0, IDE_ACKCHANGE);
- pd_wait_for(disk, STAT_READY, DBMSG("RDY after ACKCHANGE"));
- pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
- r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after VRFY"));
- }
- return Ok;
-}
-
-static void pd_standby_off(struct pd_unit *disk)
-{
- pd_wait_for(disk, 0, DBMSG("before STANDBY"));
- pd_send_command(disk, 0, 0, 0, 0, 0, IDE_STANDBY);
- pd_wait_for(disk, 0, DBMSG("after STANDBY"));
-}
-
-static enum action pd_identify(struct pd_unit *disk)
-{
- int j;
- char id[PD_ID_LEN + 1];
-
-/* WARNING: here there may be dragons. reset() applies to both drives,
- but we call it only on probing the MASTER. This should allow most
- common configurations to work, but be warned that a reset can clear
- settings on the SLAVE drive.
-*/
-
- if (disk->drive == 0)
- pd_reset(disk);
-
- write_reg(disk, 6, DRIVE(disk));
- pd_wait_for(disk, 0, DBMSG("before IDENT"));
- pd_send_command(disk, 1, 0, 0, 0, 0, IDE_IDENTIFY);
-
- if (pd_wait_for(disk, STAT_DRQ, DBMSG("IDENT DRQ")) & STAT_ERR)
- return Fail;
- pi_read_block(disk->pi, pd_scratch, 512);
- disk->can_lba = pd_scratch[99] & 2;
- disk->sectors = le16_to_cpu(*(__le16 *) (pd_scratch + 12));
- disk->heads = le16_to_cpu(*(__le16 *) (pd_scratch + 6));
- disk->cylinders = le16_to_cpu(*(__le16 *) (pd_scratch + 2));
- if (disk->can_lba)
- disk->capacity = le32_to_cpu(*(__le32 *) (pd_scratch + 120));
- else
- disk->capacity = disk->sectors * disk->heads * disk->cylinders;
-
- for (j = 0; j < PD_ID_LEN; j++)
- id[j ^ 1] = pd_scratch[j + PD_ID_OFF];
- j = PD_ID_LEN - 1;
- while ((j >= 0) && (id[j] <= 0x20))
- j--;
- j++;
- id[j] = 0;
-
- disk->removable = pd_scratch[0] & 0x80;
-
- printk("%s: %s, %s, %d blocks [%dM], (%d/%d/%d), %s media\n",
- disk->name, id,
- disk->drive ? "slave" : "master",
- disk->capacity, disk->capacity / 2048,
- disk->cylinders, disk->heads, disk->sectors,
- disk->removable ? "removable" : "fixed");
-
- if (disk->capacity)
- pd_init_dev_parms(disk);
- if (!disk->standby)
- pd_standby_off(disk);
-
- return Ok;
-}
-
-/* end of io request engine */
-
-static blk_status_t pd_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct pd_unit *disk = hctx->queue->queuedata;
-
- spin_lock_irq(&pd_lock);
- if (!pd_req) {
- pd_req = bd->rq;
- blk_mq_start_request(pd_req);
- } else
- list_add_tail(&bd->rq->queuelist, &disk->rq_list);
- spin_unlock_irq(&pd_lock);
-
- run_fsm();
- return BLK_STS_OK;
-}
-
-static int pd_special_command(struct pd_unit *disk,
- enum action (*func)(struct pd_unit *disk))
-{
- struct request *rq;
- struct pd_req *req;
-
- rq = blk_mq_alloc_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
- req = blk_mq_rq_to_pdu(rq);
-
- req->func = func;
- blk_execute_rq(rq, false);
- blk_mq_free_request(rq);
- return 0;
-}
-
-/* kernel glue structures */
-
-static int pd_open(struct block_device *bdev, fmode_t mode)
-{
- struct pd_unit *disk = bdev->bd_disk->private_data;
-
- mutex_lock(&pd_mutex);
- disk->access++;
-
- if (disk->removable) {
- pd_special_command(disk, pd_media_check);
- pd_special_command(disk, pd_door_lock);
- }
- mutex_unlock(&pd_mutex);
- return 0;
-}
-
-static int pd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct pd_unit *disk = bdev->bd_disk->private_data;
-
- if (disk->alt_geom) {
- geo->heads = PD_LOG_HEADS;
- geo->sectors = PD_LOG_SECTS;
- geo->cylinders = disk->capacity / (geo->heads * geo->sectors);
- } else {
- geo->heads = disk->heads;
- geo->sectors = disk->sectors;
- geo->cylinders = disk->cylinders;
- }
-
- return 0;
-}
-
-static int pd_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct pd_unit *disk = bdev->bd_disk->private_data;
-
- switch (cmd) {
- case CDROMEJECT:
- mutex_lock(&pd_mutex);
- if (disk->access == 1)
- pd_special_command(disk, pd_eject);
- mutex_unlock(&pd_mutex);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static void pd_release(struct gendisk *p, fmode_t mode)
-{
- struct pd_unit *disk = p->private_data;
-
- mutex_lock(&pd_mutex);
- if (!--disk->access && disk->removable)
- pd_special_command(disk, pd_door_unlock);
- mutex_unlock(&pd_mutex);
-}
-
-static unsigned int pd_check_events(struct gendisk *p, unsigned int clearing)
-{
- struct pd_unit *disk = p->private_data;
- int r;
- if (!disk->removable)
- return 0;
- pd_special_command(disk, pd_media_check);
- r = disk->changed;
- disk->changed = 0;
- return r ? DISK_EVENT_MEDIA_CHANGE : 0;
-}
-
-static const struct block_device_operations pd_fops = {
- .owner = THIS_MODULE,
- .open = pd_open,
- .release = pd_release,
- .ioctl = pd_ioctl,
- .compat_ioctl = pd_ioctl,
- .getgeo = pd_getgeo,
- .check_events = pd_check_events,
-};
-
-/* probing */
-
-static const struct blk_mq_ops pd_mq_ops = {
- .queue_rq = pd_queue_rq,
-};
-
-static int pd_probe_drive(struct pd_unit *disk, int autoprobe, int port,
- int mode, int unit, int protocol, int delay)
-{
- int index = disk - pd;
- int *parm = *drives[index];
- struct gendisk *p;
- int ret;
-
- disk->pi = &disk->pia;
- disk->access = 0;
- disk->changed = 1;
- disk->capacity = 0;
- disk->drive = parm[D_SLV];
- snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a' + index);
- disk->alt_geom = parm[D_GEO];
- disk->standby = parm[D_SBY];
- INIT_LIST_HEAD(&disk->rq_list);
-
- if (!pi_init(disk->pi, autoprobe, port, mode, unit, protocol, delay,
- pd_scratch, PI_PD, verbose, disk->name))
- return -ENXIO;
-
- memset(&disk->tag_set, 0, sizeof(disk->tag_set));
- disk->tag_set.ops = &pd_mq_ops;
- disk->tag_set.cmd_size = sizeof(struct pd_req);
- disk->tag_set.nr_hw_queues = 1;
- disk->tag_set.nr_maps = 1;
- disk->tag_set.queue_depth = 2;
- disk->tag_set.numa_node = NUMA_NO_NODE;
- disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
- ret = blk_mq_alloc_tag_set(&disk->tag_set);
- if (ret)
- goto pi_release;
-
- p = blk_mq_alloc_disk(&disk->tag_set, disk);
- if (IS_ERR(p)) {
- ret = PTR_ERR(p);
- goto free_tag_set;
- }
- disk->gd = p;
-
- strcpy(p->disk_name, disk->name);
- p->fops = &pd_fops;
- p->major = major;
- p->first_minor = (disk - pd) << PD_BITS;
- p->minors = 1 << PD_BITS;
- p->events = DISK_EVENT_MEDIA_CHANGE;
- p->private_data = disk;
- blk_queue_max_hw_sectors(p->queue, cluster);
- blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
-
- if (disk->drive == -1) {
- for (disk->drive = 0; disk->drive <= 1; disk->drive++) {
- ret = pd_special_command(disk, pd_identify);
- if (ret == 0)
- break;
- }
- } else {
- ret = pd_special_command(disk, pd_identify);
- }
- if (ret)
- goto put_disk;
- set_capacity(disk->gd, disk->capacity);
- ret = add_disk(disk->gd);
- if (ret)
- goto cleanup_disk;
- return 0;
-cleanup_disk:
- put_disk(disk->gd);
-put_disk:
- put_disk(p);
- disk->gd = NULL;
-free_tag_set:
- blk_mq_free_tag_set(&disk->tag_set);
-pi_release:
- pi_release(disk->pi);
- return ret;
-}
-
-static int __init pd_init(void)
-{
- int found = 0, unit, pd_drive_count = 0;
- struct pd_unit *disk;
-
- if (disable)
- return -ENODEV;
-
- if (register_blkdev(major, name))
- return -ENODEV;
-
- printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
- name, name, PD_VERSION, major, cluster, nice);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- goto out_unregister_blkdev;
- }
-
- for (unit = 0; unit < PD_UNITS; unit++) {
- int *parm = *drives[unit];
-
- if (parm[D_PRT])
- pd_drive_count++;
- }
-
- if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
- if (!pd_probe_drive(pd, 1, -1, -1, -1, -1, -1))
- found++;
- } else {
- for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
- int *parm = *drives[unit];
- if (!parm[D_PRT])
- continue;
- if (!pd_probe_drive(disk, 0, parm[D_PRT], parm[D_MOD],
- parm[D_UNI], parm[D_PRO], parm[D_DLY]))
- found++;
- }
- }
- if (!found) {
- printk("%s: no valid drive found\n", name);
- goto out_pi_unregister_driver;
- }
-
- return 0;
-
-out_pi_unregister_driver:
- pi_unregister_driver(par_drv);
-out_unregister_blkdev:
- unregister_blkdev(major, name);
- return -ENODEV;
-}
-
-static void __exit pd_exit(void)
-{
- struct pd_unit *disk;
- int unit;
- unregister_blkdev(major, name);
- for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
- struct gendisk *p = disk->gd;
- if (p) {
- disk->gd = NULL;
- del_gendisk(p);
- put_disk(p);
- blk_mq_free_tag_set(&disk->tag_set);
- pi_release(disk->pi);
- }
- }
-}
-
-MODULE_LICENSE("GPL");
-module_init(pd_init)
-module_exit(pd_exit)
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
deleted file mode 100644
index eec1b9fde245..000000000000
--- a/drivers/block/paride/pf.c
+++ /dev/null
@@ -1,1057 +0,0 @@
-/*
- pf.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is the high-level driver for parallel port ATAPI disk
- drives based on chips supported by the paride module.
-
- By default, the driver will autoprobe for a single parallel
- port ATAPI disk drive, but if their individual parameters are
- specified, the driver can handle up to 4 drives.
-
- The behaviour of the pf driver can be altered by setting
- some parameters from the insmod command line. The following
- parameters are adjustable:
-
- drive0 These four arguments can be arrays of
- drive1 1-7 integers as follows:
- drive2
- drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<lun>,<dly>
-
- Where,
-
- <prt> is the base of the parallel port address for
- the corresponding drive. (required)
-
- <pro> is the protocol number for the adapter that
- supports this drive. These numbers are
- logged by 'paride' when the protocol modules
- are initialised. (0 if not given)
-
- <uni> for those adapters that support chained
- devices, this is the unit selector for the
- chain of devices on the given port. It should
- be zero for devices that don't support chaining.
- (0 if not given)
-
- <mod> this can be -1 to choose the best mode, or one
- of the mode numbers supported by the adapter.
- (-1 if not given)
-
- <slv> ATAPI CDroms can be jumpered to master or slave.
- Set this to 0 to choose the master drive, 1 to
- choose the slave, -1 (the default) to choose the
- first drive found.
-
- <lun> Some ATAPI devices support multiple LUNs.
- One example is the ATAPI PD/CD drive from
- Matshita/Panasonic. This device has a
- CD drive on LUN 0 and a PD drive on LUN 1.
- By default, the driver will search for the
- first LUN with a supported device. Set
- this parameter to force it to use a specific
- LUN. (default -1)
-
- <dly> some parallel ports require the driver to
- go more slowly. -1 sets a default value that
- should work with the chosen protocol. Otherwise,
- set this to a small integer, the larger it is
- the slower the port i/o. In some cases, setting
- this to zero will speed up the device. (default -1)
-
- major You may use this parameter to override the
- default major number (47) that this driver
- will use. Be sure to change the device
- name as well.
-
- name This parameter is a character string that
- contains the name the kernel will use for this
- device (in /proc output, for instance).
- (default "pf").
-
- cluster The driver will attempt to aggregate requests
- for adjacent blocks into larger multi-block
- clusters. The maximum cluster size (in 512
- byte sectors) is set with this parameter.
- (default 64)
-
- verbose This parameter controls the amount of logging
- that the driver will do. Set it to 0 for
- normal operation, 1 to see autoprobe progress
- messages, or 2 to see additional debugging
- output. (default 0)
-
- nice This parameter controls the driver's use of
- idle CPU time, at the expense of some speed.
-
- If this driver is built into the kernel, you can use the
- following command line parameters, with the same values
- as the corresponding module parameters listed above:
-
- pf.drive0
- pf.drive1
- pf.drive2
- pf.drive3
- pf.cluster
- pf.nice
-
- In addition, you can use the parameter pf.disable to disable
- the driver entirely.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.05.03 Changes for SMP. Eliminate sti().
- Fix for drives that don't clear STAT_ERR
- until after next CDB delivered.
- Small change in pf_completion to round
- up transfer size.
- 1.02 GRG 1998.06.16 Eliminated an Ugh
- 1.03 GRG 1998.08.16 Use HZ in loop timings, extra debugging
- 1.04 GRG 1998.09.24 Added jumbo support
-
-*/
-
-#define PF_VERSION "1.04"
-#define PF_MAJOR 47
-#define PF_NAME "pf"
-#define PF_UNITS 4
-
-#include <linux/types.h>
-
-/* Here are things one can override from the insmod command.
- Most are autoprobed by paride unless set here. Verbose is off
- by default.
-
-*/
-
-static bool verbose = 0;
-static int major = PF_MAJOR;
-static char *name = PF_NAME;
-static int cluster = 64;
-static int nice = 0;
-static int disable = 0;
-
-static int drive0[7] = { 0, 0, 0, -1, -1, -1, -1 };
-static int drive1[7] = { 0, 0, 0, -1, -1, -1, -1 };
-static int drive2[7] = { 0, 0, 0, -1, -1, -1, -1 };
-static int drive3[7] = { 0, 0, 0, -1, -1, -1, -1 };
-
-static int (*drives[4])[7] = {&drive0, &drive1, &drive2, &drive3};
-static int pf_drive_count;
-
-enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
-
-/* end of parameters */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/hdreg.h>
-#include <linux/cdrom.h>
-#include <linux/spinlock.h>
-#include <linux/blk-mq.h>
-#include <linux/blkpg.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-
-static DEFINE_MUTEX(pf_mutex);
-static DEFINE_SPINLOCK(pf_spin_lock);
-
-module_param(verbose, bool, 0644);
-module_param(major, int, 0);
-module_param(name, charp, 0);
-module_param(cluster, int, 0);
-module_param(nice, int, 0);
-module_param_array(drive0, int, NULL, 0);
-module_param_array(drive1, int, NULL, 0);
-module_param_array(drive2, int, NULL, 0);
-module_param_array(drive3, int, NULL, 0);
-
-#include "paride.h"
-#include "pseudo.h"
-
-/* constants for faking geometry numbers */
-
-#define PF_FD_MAX 8192 /* use FD geometry under this size */
-#define PF_FD_HDS 2
-#define PF_FD_SPT 18
-#define PF_HD_HDS 64
-#define PF_HD_SPT 32
-
-#define PF_MAX_RETRIES 5
-#define PF_TMO 800 /* interrupt timeout in jiffies */
-#define PF_SPIN_DEL 50 /* spin delay in micro-seconds */
-
-#define PF_SPIN (1000000*PF_TMO)/(HZ*PF_SPIN_DEL)
-
-#define STAT_ERR 0x00001
-#define STAT_INDEX 0x00002
-#define STAT_ECC 0x00004
-#define STAT_DRQ 0x00008
-#define STAT_SEEK 0x00010
-#define STAT_WRERR 0x00020
-#define STAT_READY 0x00040
-#define STAT_BUSY 0x00080
-
-#define ATAPI_REQ_SENSE 0x03
-#define ATAPI_LOCK 0x1e
-#define ATAPI_DOOR 0x1b
-#define ATAPI_MODE_SENSE 0x5a
-#define ATAPI_CAPACITY 0x25
-#define ATAPI_IDENTIFY 0x12
-#define ATAPI_READ_10 0x28
-#define ATAPI_WRITE_10 0x2a
-
-static int pf_open(struct block_device *bdev, fmode_t mode);
-static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd);
-static int pf_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
-static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static void pf_release(struct gendisk *disk, fmode_t mode);
-
-static void do_pf_read(void);
-static void do_pf_read_start(void);
-static void do_pf_write(void);
-static void do_pf_write_start(void);
-static void do_pf_read_drq(void);
-static void do_pf_write_done(void);
-
-#define PF_NM 0
-#define PF_RO 1
-#define PF_RW 2
-
-#define PF_NAMELEN 8
-
-struct pf_unit {
- struct pi_adapter pia; /* interface to paride layer */
- struct pi_adapter *pi;
- int removable; /* removable media device ? */
- int media_status; /* media present ? WP ? */
- int drive; /* drive */
- int lun;
- int access; /* count of active opens ... */
- int present; /* device present ? */
- char name[PF_NAMELEN]; /* pf0, pf1, ... */
- struct gendisk *disk;
- struct blk_mq_tag_set tag_set;
- struct list_head rq_list;
-};
-
-static struct pf_unit units[PF_UNITS];
-
-static int pf_identify(struct pf_unit *pf);
-static void pf_lock(struct pf_unit *pf, int func);
-static void pf_eject(struct pf_unit *pf);
-static unsigned int pf_check_events(struct gendisk *disk,
- unsigned int clearing);
-
-static char pf_scratch[512]; /* scratch block buffer */
-
-/* the variables below are used mainly in the I/O request engine, which
- processes only one request at a time.
-*/
-
-static int pf_retries = 0; /* i/o error retry count */
-static int pf_busy = 0; /* request being processed ? */
-static struct request *pf_req; /* current request */
-static int pf_block; /* address of next requested block */
-static int pf_count; /* number of blocks still to do */
-static int pf_run; /* sectors in current cluster */
-static int pf_cmd; /* current command READ/WRITE */
-static struct pf_unit *pf_current;/* unit of current request */
-static int pf_mask; /* stopper for pseudo-int */
-static char *pf_buf; /* buffer for request in progress */
-static void *par_drv; /* reference of parport driver */
-
-/* kernel glue structures */
-
-static const struct block_device_operations pf_fops = {
- .owner = THIS_MODULE,
- .open = pf_open,
- .release = pf_release,
- .ioctl = pf_ioctl,
- .compat_ioctl = pf_ioctl,
- .getgeo = pf_getgeo,
- .check_events = pf_check_events,
-};
-
-static const struct blk_mq_ops pf_mq_ops = {
- .queue_rq = pf_queue_rq,
-};
-
-static int pf_open(struct block_device *bdev, fmode_t mode)
-{
- struct pf_unit *pf = bdev->bd_disk->private_data;
- int ret;
-
- mutex_lock(&pf_mutex);
- pf_identify(pf);
-
- ret = -ENODEV;
- if (pf->media_status == PF_NM)
- goto out;
-
- ret = -EROFS;
- if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE))
- goto out;
-
- ret = 0;
- pf->access++;
- if (pf->removable)
- pf_lock(pf, 1);
-out:
- mutex_unlock(&pf_mutex);
- return ret;
-}
-
-static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct pf_unit *pf = bdev->bd_disk->private_data;
- sector_t capacity = get_capacity(pf->disk);
-
- if (capacity < PF_FD_MAX) {
- geo->cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT);
- geo->heads = PF_FD_HDS;
- geo->sectors = PF_FD_SPT;
- } else {
- geo->cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT);
- geo->heads = PF_HD_HDS;
- geo->sectors = PF_HD_SPT;
- }
-
- return 0;
-}
-
-static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
-{
- struct pf_unit *pf = bdev->bd_disk->private_data;
-
- if (cmd != CDROMEJECT)
- return -EINVAL;
-
- if (pf->access != 1)
- return -EBUSY;
- mutex_lock(&pf_mutex);
- pf_eject(pf);
- mutex_unlock(&pf_mutex);
-
- return 0;
-}
-
-static void pf_release(struct gendisk *disk, fmode_t mode)
-{
- struct pf_unit *pf = disk->private_data;
-
- mutex_lock(&pf_mutex);
- if (pf->access <= 0) {
- mutex_unlock(&pf_mutex);
- WARN_ON(1);
- return;
- }
-
- pf->access--;
-
- if (!pf->access && pf->removable)
- pf_lock(pf, 0);
-
- mutex_unlock(&pf_mutex);
-}
-
-static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing)
-{
- return DISK_EVENT_MEDIA_CHANGE;
-}
-
-static inline int status_reg(struct pf_unit *pf)
-{
- return pi_read_regr(pf->pi, 1, 6);
-}
-
-static inline int read_reg(struct pf_unit *pf, int reg)
-{
- return pi_read_regr(pf->pi, 0, reg);
-}
-
-static inline void write_reg(struct pf_unit *pf, int reg, int val)
-{
- pi_write_regr(pf->pi, 0, reg, val);
-}
-
-static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg)
-{
- int j, r, e, s, p;
-
- j = 0;
- while ((((r = status_reg(pf)) & go) || (stop && (!(r & stop))))
- && (j++ < PF_SPIN))
- udelay(PF_SPIN_DEL);
-
- if ((r & (STAT_ERR & stop)) || (j > PF_SPIN)) {
- s = read_reg(pf, 7);
- e = read_reg(pf, 1);
- p = read_reg(pf, 2);
- if (j > PF_SPIN)
- e |= 0x100;
- if (fun)
- printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
- " loop=%d phase=%d\n",
- pf->name, fun, msg, r, s, e, j, p);
- return (e << 8) + s;
- }
- return 0;
-}
-
-static int pf_command(struct pf_unit *pf, char *cmd, int dlen, char *fun)
-{
- pi_connect(pf->pi);
-
- write_reg(pf, 6, 0xa0+0x10*pf->drive);
-
- if (pf_wait(pf, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
- pi_disconnect(pf->pi);
- return -1;
- }
-
- write_reg(pf, 4, dlen % 256);
- write_reg(pf, 5, dlen / 256);
- write_reg(pf, 7, 0xa0); /* ATAPI packet command */
-
- if (pf_wait(pf, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
- pi_disconnect(pf->pi);
- return -1;
- }
-
- if (read_reg(pf, 2) != 1) {
- printk("%s: %s: command phase error\n", pf->name, fun);
- pi_disconnect(pf->pi);
- return -1;
- }
-
- pi_write_block(pf->pi, cmd, 12);
-
- return 0;
-}
-
-static int pf_completion(struct pf_unit *pf, char *buf, char *fun)
-{
- int r, s, n;
-
- r = pf_wait(pf, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
- fun, "completion");
-
- if ((read_reg(pf, 2) & 2) && (read_reg(pf, 7) & STAT_DRQ)) {
- n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) +
- 3) & 0xfffc);
- pi_read_block(pf->pi, buf, n);
- }
-
- s = pf_wait(pf, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
-
- pi_disconnect(pf->pi);
-
- return (r ? r : s);
-}
-
-static void pf_req_sense(struct pf_unit *pf, int quiet)
-{
- char rs_cmd[12] =
- { ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
- char buf[16];
- int r;
-
- r = pf_command(pf, rs_cmd, 16, "Request sense");
- mdelay(1);
- if (!r)
- pf_completion(pf, buf, "Request sense");
-
- if ((!r) && (!quiet))
- printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
- pf->name, buf[2] & 0xf, buf[12], buf[13]);
-}
-
-static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fun)
-{
- int r;
-
- r = pf_command(pf, cmd, dlen, fun);
- mdelay(1);
- if (!r)
- r = pf_completion(pf, buf, fun);
- if (r)
- pf_req_sense(pf, !fun);
-
- return r;
-}
-
-static void pf_lock(struct pf_unit *pf, int func)
-{
- char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 };
-
- pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock");
-}
-
-static void pf_eject(struct pf_unit *pf)
-{
- char ej_cmd[12] = { ATAPI_DOOR, pf->lun << 5, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
-
- pf_lock(pf, 0);
- pf_atapi(pf, ej_cmd, 0, pf_scratch, "eject");
-}
-
-#define PF_RESET_TMO 30 /* in tenths of a second */
-
-static void pf_sleep(int cs)
-{
- schedule_timeout_interruptible(cs);
-}
-
-/* the ATAPI standard actually specifies the contents of all 7 registers
- after a reset, but the specification is ambiguous concerning the last
- two bytes, and different drives interpret the standard differently.
- */
-
-static int pf_reset(struct pf_unit *pf)
-{
- int i, k, flg;
- int expect[5] = { 1, 1, 1, 0x14, 0xeb };
-
- pi_connect(pf->pi);
- write_reg(pf, 6, 0xa0+0x10*pf->drive);
- write_reg(pf, 7, 8);
-
- pf_sleep(20 * HZ / 1000);
-
- k = 0;
- while ((k++ < PF_RESET_TMO) && (status_reg(pf) & STAT_BUSY))
- pf_sleep(HZ / 10);
-
- flg = 1;
- for (i = 0; i < 5; i++)
- flg &= (read_reg(pf, i + 1) == expect[i]);
-
- if (verbose) {
- printk("%s: Reset (%d) signature = ", pf->name, k);
- for (i = 0; i < 5; i++)
- printk("%3x", read_reg(pf, i + 1));
- if (!flg)
- printk(" (incorrect)");
- printk("\n");
- }
-
- pi_disconnect(pf->pi);
- return flg - 1;
-}
-
-static void pf_mode_sense(struct pf_unit *pf)
-{
- char ms_cmd[12] =
- { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 };
- char buf[8];
-
- pf_atapi(pf, ms_cmd, 8, buf, "mode sense");
- pf->media_status = PF_RW;
- if (buf[3] & 0x80)
- pf->media_status = PF_RO;
-}
-
-static void xs(char *buf, char *targ, int offs, int len)
-{
- int j, k, l;
-
- j = 0;
- l = 0;
- for (k = 0; k < len; k++)
- if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
- l = targ[j++] = buf[k + offs];
- if (l == 0x20)
- j--;
- targ[j] = 0;
-}
-
-static int xl(char *buf, int offs)
-{
- int v, k;
-
- v = 0;
- for (k = 0; k < 4; k++)
- v = v * 256 + (buf[k + offs] & 0xff);
- return v;
-}
-
-static void pf_get_capacity(struct pf_unit *pf)
-{
- char rc_cmd[12] = { ATAPI_CAPACITY, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- char buf[8];
- int bs;
-
- if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) {
- pf->media_status = PF_NM;
- return;
- }
- set_capacity(pf->disk, xl(buf, 0) + 1);
- bs = xl(buf, 4);
- if (bs != 512) {
- set_capacity(pf->disk, 0);
- if (verbose)
- printk("%s: Drive %d, LUN %d,"
- " unsupported block size %d\n",
- pf->name, pf->drive, pf->lun, bs);
- }
-}
-
-static int pf_identify(struct pf_unit *pf)
-{
- int dt, s;
- char *ms[2] = { "master", "slave" };
- char mf[10], id[18];
- char id_cmd[12] =
- { ATAPI_IDENTIFY, pf->lun << 5, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
- char buf[36];
-
- s = pf_atapi(pf, id_cmd, 36, buf, "identify");
- if (s)
- return -1;
-
- dt = buf[0] & 0x1f;
- if ((dt != 0) && (dt != 7)) {
- if (verbose)
- printk("%s: Drive %d, LUN %d, unsupported type %d\n",
- pf->name, pf->drive, pf->lun, dt);
- return -1;
- }
-
- xs(buf, mf, 8, 8);
- xs(buf, id, 16, 16);
-
- pf->removable = (buf[1] & 0x80);
-
- pf_mode_sense(pf);
- pf_mode_sense(pf);
- pf_mode_sense(pf);
-
- pf_get_capacity(pf);
-
- printk("%s: %s %s, %s LUN %d, type %d",
- pf->name, mf, id, ms[pf->drive], pf->lun, dt);
- if (pf->removable)
- printk(", removable");
- if (pf->media_status == PF_NM)
- printk(", no media\n");
- else {
- if (pf->media_status == PF_RO)
- printk(", RO");
- printk(", %llu blocks\n",
- (unsigned long long)get_capacity(pf->disk));
- }
- return 0;
-}
-
-/*
- * returns 0, with id set if drive is detected, otherwise an error code.
- */
-static int pf_probe(struct pf_unit *pf)
-{
- if (pf->drive == -1) {
- for (pf->drive = 0; pf->drive <= 1; pf->drive++)
- if (!pf_reset(pf)) {
- if (pf->lun != -1)
- return pf_identify(pf);
- else
- for (pf->lun = 0; pf->lun < 8; pf->lun++)
- if (!pf_identify(pf))
- return 0;
- }
- } else {
- if (pf_reset(pf))
- return -1;
- if (pf->lun != -1)
- return pf_identify(pf);
- for (pf->lun = 0; pf->lun < 8; pf->lun++)
- if (!pf_identify(pf))
- return 0;
- }
- return -ENODEV;
-}
-
-/* The i/o request engine */
-
-static int pf_start(struct pf_unit *pf, int cmd, int b, int c)
-{
- int i;
- char io_cmd[12] = { cmd, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
- for (i = 0; i < 4; i++) {
- io_cmd[5 - i] = b & 0xff;
- b = b >> 8;
- }
-
- io_cmd[8] = c & 0xff;
- io_cmd[7] = (c >> 8) & 0xff;
-
- i = pf_command(pf, io_cmd, c * 512, "start i/o");
-
- mdelay(1);
-
- return i;
-}
-
-static int pf_ready(void)
-{
- return (((status_reg(pf_current) & (STAT_BUSY | pf_mask)) == pf_mask));
-}
-
-static int pf_queue;
-
-static int set_next_request(void)
-{
- struct pf_unit *pf;
- int old_pos = pf_queue;
-
- do {
- pf = &units[pf_queue];
- if (++pf_queue == PF_UNITS)
- pf_queue = 0;
- if (pf->present && !list_empty(&pf->rq_list)) {
- pf_req = list_first_entry(&pf->rq_list, struct request,
- queuelist);
- list_del_init(&pf_req->queuelist);
- blk_mq_start_request(pf_req);
- break;
- }
- } while (pf_queue != old_pos);
-
- return pf_req != NULL;
-}
-
-static void pf_end_request(blk_status_t err)
-{
- if (!pf_req)
- return;
- if (!blk_update_request(pf_req, err, blk_rq_cur_bytes(pf_req))) {
- __blk_mq_end_request(pf_req, err);
- pf_req = NULL;
- }
-}
-
-static void pf_request(void)
-{
- if (pf_busy)
- return;
-repeat:
- if (!pf_req && !set_next_request())
- return;
-
- pf_current = pf_req->q->disk->private_data;
- pf_block = blk_rq_pos(pf_req);
- pf_run = blk_rq_sectors(pf_req);
- pf_count = blk_rq_cur_sectors(pf_req);
-
- if (pf_block + pf_count > get_capacity(pf_req->q->disk)) {
- pf_end_request(BLK_STS_IOERR);
- goto repeat;
- }
-
- pf_cmd = rq_data_dir(pf_req);
- pf_buf = bio_data(pf_req->bio);
- pf_retries = 0;
-
- pf_busy = 1;
- if (pf_cmd == READ)
- pi_do_claimed(pf_current->pi, do_pf_read);
- else if (pf_cmd == WRITE)
- pi_do_claimed(pf_current->pi, do_pf_write);
- else {
- pf_busy = 0;
- pf_end_request(BLK_STS_IOERR);
- goto repeat;
- }
-}
-
-static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
-{
- struct pf_unit *pf = hctx->queue->queuedata;
-
- spin_lock_irq(&pf_spin_lock);
- list_add_tail(&bd->rq->queuelist, &pf->rq_list);
- pf_request();
- spin_unlock_irq(&pf_spin_lock);
-
- return BLK_STS_OK;
-}
-
-static int pf_next_buf(void)
-{
- unsigned long saved_flags;
-
- pf_count--;
- pf_run--;
- pf_buf += 512;
- pf_block++;
- if (!pf_run)
- return 1;
- if (!pf_count) {
- spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(0);
- spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
- if (!pf_req)
- return 1;
- pf_count = blk_rq_cur_sectors(pf_req);
- pf_buf = bio_data(pf_req->bio);
- }
- return 0;
-}
-
-static inline void next_request(blk_status_t err)
-{
- unsigned long saved_flags;
-
- spin_lock_irqsave(&pf_spin_lock, saved_flags);
- pf_end_request(err);
- pf_busy = 0;
- pf_request();
- spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
-}
-
-/* detach from the calling context - in case the spinlock is held */
-static void do_pf_read(void)
-{
- ps_set_intr(do_pf_read_start, NULL, 0, nice);
-}
-
-static void do_pf_read_start(void)
-{
- pf_busy = 1;
-
- if (pf_start(pf_current, ATAPI_READ_10, pf_block, pf_run)) {
- pi_disconnect(pf_current->pi);
- if (pf_retries < PF_MAX_RETRIES) {
- pf_retries++;
- pi_do_claimed(pf_current->pi, do_pf_read_start);
- return;
- }
- next_request(BLK_STS_IOERR);
- return;
- }
- pf_mask = STAT_DRQ;
- ps_set_intr(do_pf_read_drq, pf_ready, PF_TMO, nice);
-}
-
-static void do_pf_read_drq(void)
-{
- while (1) {
- if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
- "read block", "completion") & STAT_ERR) {
- pi_disconnect(pf_current->pi);
- if (pf_retries < PF_MAX_RETRIES) {
- pf_req_sense(pf_current, 0);
- pf_retries++;
- pi_do_claimed(pf_current->pi, do_pf_read_start);
- return;
- }
- next_request(BLK_STS_IOERR);
- return;
- }
- pi_read_block(pf_current->pi, pf_buf, 512);
- if (pf_next_buf())
- break;
- }
- pi_disconnect(pf_current->pi);
- next_request(0);
-}
-
-static void do_pf_write(void)
-{
- ps_set_intr(do_pf_write_start, NULL, 0, nice);
-}
-
-static void do_pf_write_start(void)
-{
- pf_busy = 1;
-
- if (pf_start(pf_current, ATAPI_WRITE_10, pf_block, pf_run)) {
- pi_disconnect(pf_current->pi);
- if (pf_retries < PF_MAX_RETRIES) {
- pf_retries++;
- pi_do_claimed(pf_current->pi, do_pf_write_start);
- return;
- }
- next_request(BLK_STS_IOERR);
- return;
- }
-
- while (1) {
- if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
- "write block", "data wait") & STAT_ERR) {
- pi_disconnect(pf_current->pi);
- if (pf_retries < PF_MAX_RETRIES) {
- pf_retries++;
- pi_do_claimed(pf_current->pi, do_pf_write_start);
- return;
- }
- next_request(BLK_STS_IOERR);
- return;
- }
- pi_write_block(pf_current->pi, pf_buf, 512);
- if (pf_next_buf())
- break;
- }
- pf_mask = 0;
- ps_set_intr(do_pf_write_done, pf_ready, PF_TMO, nice);
-}
-
-static void do_pf_write_done(void)
-{
- if (pf_wait(pf_current, STAT_BUSY, 0, "write block", "done") & STAT_ERR) {
- pi_disconnect(pf_current->pi);
- if (pf_retries < PF_MAX_RETRIES) {
- pf_retries++;
- pi_do_claimed(pf_current->pi, do_pf_write_start);
- return;
- }
- next_request(BLK_STS_IOERR);
- return;
- }
- pi_disconnect(pf_current->pi);
- next_request(0);
-}
-
-static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port,
- int mode, int unit, int protocol, int delay, int ms)
-{
- struct gendisk *disk;
- int ret;
-
- ret = blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE);
- if (ret)
- return ret;
-
- disk = blk_mq_alloc_disk(&pf->tag_set, pf);
- if (IS_ERR(disk)) {
- ret = PTR_ERR(disk);
- goto out_free_tag_set;
- }
- disk->major = major;
- disk->first_minor = pf - units;
- disk->minors = 1;
- strcpy(disk->disk_name, pf->name);
- disk->fops = &pf_fops;
- disk->flags |= GENHD_FL_NO_PART;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
- disk->private_data = pf;
-
- blk_queue_max_segments(disk->queue, cluster);
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
-
- INIT_LIST_HEAD(&pf->rq_list);
- pf->disk = disk;
- pf->pi = &pf->pia;
- pf->media_status = PF_NM;
- pf->drive = (*drives[disk->first_minor])[D_SLV];
- pf->lun = (*drives[disk->first_minor])[D_LUN];
- snprintf(pf->name, PF_NAMELEN, "%s%d", name, disk->first_minor);
-
- if (!pi_init(pf->pi, autoprobe, port, mode, unit, protocol, delay,
- pf_scratch, PI_PF, verbose, pf->name)) {
- ret = -ENODEV;
- goto out_free_disk;
- }
- ret = pf_probe(pf);
- if (ret)
- goto out_pi_release;
-
- ret = add_disk(disk);
- if (ret)
- goto out_pi_release;
- pf->present = 1;
- return 0;
-
-out_pi_release:
- pi_release(pf->pi);
-out_free_disk:
- put_disk(pf->disk);
-out_free_tag_set:
- blk_mq_free_tag_set(&pf->tag_set);
- return ret;
-}
-
-static int __init pf_init(void)
-{ /* preliminary initialisation */
- struct pf_unit *pf;
- int found = 0, unit;
-
- if (disable)
- return -EINVAL;
-
- if (register_blkdev(major, name))
- return -EBUSY;
-
- printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
- name, name, PF_VERSION, major, cluster, nice);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- goto out_unregister_blkdev;
- }
-
- for (unit = 0; unit < PF_UNITS; unit++) {
- if (!(*drives[unit])[D_PRT])
- pf_drive_count++;
- }
-
- pf = units;
- if (pf_drive_count == 0) {
- if (pf_init_unit(pf, 1, -1, -1, -1, -1, -1, verbose))
- found++;
- } else {
- for (unit = 0; unit < PF_UNITS; unit++, pf++) {
- int *conf = *drives[unit];
- if (!conf[D_PRT])
- continue;
- if (pf_init_unit(pf, 0, conf[D_PRT], conf[D_MOD],
- conf[D_UNI], conf[D_PRO], conf[D_DLY],
- verbose))
- found++;
- }
- }
- if (!found) {
- printk("%s: No ATAPI disk detected\n", name);
- goto out_unregister_pi_driver;
- }
- pf_busy = 0;
- return 0;
-
-out_unregister_pi_driver:
- pi_unregister_driver(par_drv);
-out_unregister_blkdev:
- unregister_blkdev(major, name);
- return -ENODEV;
-}
-
-static void __exit pf_exit(void)
-{
- struct pf_unit *pf;
- int unit;
-
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->present)
- continue;
- del_gendisk(pf->disk);
- put_disk(pf->disk);
- blk_mq_free_tag_set(&pf->tag_set);
- pi_release(pf->pi);
- }
-
- unregister_blkdev(major, name);
-}
-
-MODULE_LICENSE("GPL");
-module_init(pf_init)
-module_exit(pf_exit)
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
deleted file mode 100644
index 3b5882bfb736..000000000000
--- a/drivers/block/paride/pg.c
+++ /dev/null
@@ -1,734 +0,0 @@
-/*
- pg.c (c) 1998 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- The pg driver provides a simple character device interface for
- sending ATAPI commands to a device. With the exception of the
- ATAPI reset operation, all operations are performed by a pair
- of read and write operations to the appropriate /dev/pgN device.
- A write operation delivers a command and any outbound data in
- a single buffer. Normally, the write will succeed unless the
- device is offline or malfunctioning, or there is already another
- command pending. If the write succeeds, it should be followed
- immediately by a read operation, to obtain any returned data and
- status information. A read will fail if there is no operation
- in progress.
-
- As a special case, the device can be reset with a write operation,
- and in this case, no following read is expected, or permitted.
-
- There are no ioctl() operations. Any single operation
- may transfer at most PG_MAX_DATA bytes. Note that the driver must
- copy the data through an internal buffer. In keeping with all
- current ATAPI devices, command packets are assumed to be exactly
- 12 bytes in length.
-
- To permit future changes to this interface, the headers in the
- read and write buffers contain a single character "magic" flag.
- Currently this flag must be the character "P".
-
- By default, the driver will autoprobe for a single parallel
- port ATAPI device, but if their individual parameters are
- specified, the driver can handle up to 4 devices.
-
- To use this device, you must have the following device
- special files defined:
-
- /dev/pg0 c 97 0
- /dev/pg1 c 97 1
- /dev/pg2 c 97 2
- /dev/pg3 c 97 3
-
- (You'll need to change the 97 to something else if you use
- the 'major' parameter to install the driver on a different
- major number.)
-
- The behaviour of the pg driver can be altered by setting
- some parameters from the insmod command line. The following
- parameters are adjustable:
-
- drive0 These four arguments can be arrays of
- drive1 1-6 integers as follows:
- drive2
- drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
-
- Where,
-
- <prt> is the base of the parallel port address for
- the corresponding drive. (required)
-
- <pro> is the protocol number for the adapter that
- supports this drive. These numbers are
- logged by 'paride' when the protocol modules
- are initialised. (0 if not given)
-
- <uni> for those adapters that support chained
- devices, this is the unit selector for the
- chain of devices on the given port. It should
- be zero for devices that don't support chaining.
- (0 if not given)
-
- <mod> this can be -1 to choose the best mode, or one
- of the mode numbers supported by the adapter.
- (-1 if not given)
-
- <slv> ATAPI devices can be jumpered to master or slave.
- Set this to 0 to choose the master drive, 1 to
- choose the slave, -1 (the default) to choose the
- first drive found.
-
- <dly> some parallel ports require the driver to
- go more slowly. -1 sets a default value that
- should work with the chosen protocol. Otherwise,
- set this to a small integer, the larger it is
- the slower the port i/o. In some cases, setting
- this to zero will speed up the device. (default -1)
-
- major You may use this parameter to override the
- default major number (97) that this driver
- will use. Be sure to change the device
- name as well.
-
- name This parameter is a character string that
- contains the name the kernel will use for this
- device (in /proc output, for instance).
- (default "pg").
-
- verbose This parameter controls the amount of logging
- that is done by the driver. Set it to 0 for
- quiet operation, to 1 to enable progress
- messages while the driver probes for devices,
- or to 2 for full debug logging. (default 0)
-
- If this driver is built into the kernel, you can use
- the following command line parameters, with the same values
- as the corresponding module parameters listed above:
-
- pg.drive0
- pg.drive1
- pg.drive2
- pg.drive3
-
- In addition, you can use the parameter pg.disable to disable
- the driver entirely.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.06.16 Bug fixes
- 1.02 GRG 1998.09.24 Added jumbo support
-
-*/
-
-#define PG_VERSION "1.02"
-#define PG_MAJOR 97
-#define PG_NAME "pg"
-#define PG_UNITS 4
-
-#ifndef PI_PG
-#define PI_PG 4
-#endif
-
-#include <linux/types.h>
-/* Here are things one can override from the insmod command.
- Most are autoprobed by paride unless set here. Verbose is 0
- by default.
-
-*/
-
-static int verbose;
-static int major = PG_MAJOR;
-static char *name = PG_NAME;
-static int disable = 0;
-
-static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
-
-static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
-static int pg_drive_count;
-
-enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
-
-/* end of parameters */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/mtio.h>
-#include <linux/pg.h>
-#include <linux/device.h>
-#include <linux/sched.h> /* current, TASK_* */
-#include <linux/mutex.h>
-#include <linux/jiffies.h>
-
-#include <linux/uaccess.h>
-
-module_param(verbose, int, 0644);
-module_param(major, int, 0);
-module_param(name, charp, 0);
-module_param_array(drive0, int, NULL, 0);
-module_param_array(drive1, int, NULL, 0);
-module_param_array(drive2, int, NULL, 0);
-module_param_array(drive3, int, NULL, 0);
-
-#include "paride.h"
-
-#define PG_SPIN_DEL 50 /* spin delay in micro-seconds */
-#define PG_SPIN 200
-#define PG_TMO HZ
-#define PG_RESET_TMO 10*HZ
-
-#define STAT_ERR 0x01
-#define STAT_INDEX 0x02
-#define STAT_ECC 0x04
-#define STAT_DRQ 0x08
-#define STAT_SEEK 0x10
-#define STAT_WRERR 0x20
-#define STAT_READY 0x40
-#define STAT_BUSY 0x80
-
-#define ATAPI_IDENTIFY 0x12
-
-static DEFINE_MUTEX(pg_mutex);
-static int pg_open(struct inode *inode, struct file *file);
-static int pg_release(struct inode *inode, struct file *file);
-static ssize_t pg_read(struct file *filp, char __user *buf,
- size_t count, loff_t * ppos);
-static ssize_t pg_write(struct file *filp, const char __user *buf,
- size_t count, loff_t * ppos);
-static int pg_detect(void);
-
-#define PG_NAMELEN 8
-
-struct pg {
- struct pi_adapter pia; /* interface to paride layer */
- struct pi_adapter *pi;
- int busy; /* write done, read expected */
- int start; /* jiffies at command start */
- int dlen; /* transfer size requested */
- unsigned long timeout; /* timeout requested */
- int status; /* last sense key */
- int drive; /* drive */
- unsigned long access; /* count of active opens ... */
- int present; /* device present ? */
- char *bufptr;
- char name[PG_NAMELEN]; /* pg0, pg1, ... */
-};
-
-static struct pg devices[PG_UNITS];
-
-static int pg_identify(struct pg *dev, int log);
-
-static char pg_scratch[512]; /* scratch block buffer */
-
-static struct class *pg_class;
-static void *par_drv; /* reference of parport driver */
-
-/* kernel glue structures */
-
-static const struct file_operations pg_fops = {
- .owner = THIS_MODULE,
- .read = pg_read,
- .write = pg_write,
- .open = pg_open,
- .release = pg_release,
- .llseek = noop_llseek,
-};
-
-static void pg_init_units(void)
-{
- int unit;
-
- pg_drive_count = 0;
- for (unit = 0; unit < PG_UNITS; unit++) {
- int *parm = *drives[unit];
- struct pg *dev = &devices[unit];
- dev->pi = &dev->pia;
- clear_bit(0, &dev->access);
- dev->busy = 0;
- dev->present = 0;
- dev->bufptr = NULL;
- dev->drive = parm[D_SLV];
- snprintf(dev->name, PG_NAMELEN, "%s%c", name, 'a'+unit);
- if (parm[D_PRT])
- pg_drive_count++;
- }
-}
-
-static inline int status_reg(struct pg *dev)
-{
- return pi_read_regr(dev->pi, 1, 6);
-}
-
-static inline int read_reg(struct pg *dev, int reg)
-{
- return pi_read_regr(dev->pi, 0, reg);
-}
-
-static inline void write_reg(struct pg *dev, int reg, int val)
-{
- pi_write_regr(dev->pi, 0, reg, val);
-}
-
-static inline u8 DRIVE(struct pg *dev)
-{
- return 0xa0+0x10*dev->drive;
-}
-
-static void pg_sleep(int cs)
-{
- schedule_timeout_interruptible(cs);
-}
-
-static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg)
-{
- int j, r, e, s, p, to;
-
- dev->status = 0;
-
- j = 0;
- while ((((r = status_reg(dev)) & go) || (stop && (!(r & stop))))
- && time_before(jiffies, tmo)) {
- if (j++ < PG_SPIN)
- udelay(PG_SPIN_DEL);
- else
- pg_sleep(1);
- }
-
- to = time_after_eq(jiffies, tmo);
-
- if ((r & (STAT_ERR & stop)) || to) {
- s = read_reg(dev, 7);
- e = read_reg(dev, 1);
- p = read_reg(dev, 2);
- if (verbose > 1)
- printk("%s: %s: stat=0x%x err=0x%x phase=%d%s\n",
- dev->name, msg, s, e, p, to ? " timeout" : "");
- if (to)
- e |= 0x100;
- dev->status = (e >> 4) & 0xff;
- return -1;
- }
- return 0;
-}
-
-static int pg_command(struct pg *dev, char *cmd, int dlen, unsigned long tmo)
-{
- int k;
-
- pi_connect(dev->pi);
-
- write_reg(dev, 6, DRIVE(dev));
-
- if (pg_wait(dev, STAT_BUSY | STAT_DRQ, 0, tmo, "before command"))
- goto fail;
-
- write_reg(dev, 4, dlen % 256);
- write_reg(dev, 5, dlen / 256);
- write_reg(dev, 7, 0xa0); /* ATAPI packet command */
-
- if (pg_wait(dev, STAT_BUSY, STAT_DRQ, tmo, "command DRQ"))
- goto fail;
-
- if (read_reg(dev, 2) != 1) {
- printk("%s: command phase error\n", dev->name);
- goto fail;
- }
-
- pi_write_block(dev->pi, cmd, 12);
-
- if (verbose > 1) {
- printk("%s: Command sent, dlen=%d packet= ", dev->name, dlen);
- for (k = 0; k < 12; k++)
- printk("%02x ", cmd[k] & 0xff);
- printk("\n");
- }
- return 0;
-fail:
- pi_disconnect(dev->pi);
- return -1;
-}
-
-static int pg_completion(struct pg *dev, char *buf, unsigned long tmo)
-{
- int r, d, n, p;
-
- r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
- tmo, "completion");
-
- dev->dlen = 0;
-
- while (read_reg(dev, 7) & STAT_DRQ) {
- d = (read_reg(dev, 4) + 256 * read_reg(dev, 5));
- n = ((d + 3) & 0xfffc);
- p = read_reg(dev, 2) & 3;
- if (p == 0)
- pi_write_block(dev->pi, buf, n);
- if (p == 2)
- pi_read_block(dev->pi, buf, n);
- if (verbose > 1)
- printk("%s: %s %d bytes\n", dev->name,
- p ? "Read" : "Write", n);
- dev->dlen += (1 - p) * d;
- buf += d;
- r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
- tmo, "completion");
- }
-
- pi_disconnect(dev->pi);
-
- return r;
-}
-
-static int pg_reset(struct pg *dev)
-{
- int i, k, err;
- int expect[5] = { 1, 1, 1, 0x14, 0xeb };
- int got[5];
-
- pi_connect(dev->pi);
- write_reg(dev, 6, DRIVE(dev));
- write_reg(dev, 7, 8);
-
- pg_sleep(20 * HZ / 1000);
-
- k = 0;
- while ((k++ < PG_RESET_TMO) && (status_reg(dev) & STAT_BUSY))
- pg_sleep(1);
-
- for (i = 0; i < 5; i++)
- got[i] = read_reg(dev, i + 1);
-
- err = memcmp(expect, got, sizeof(got)) ? -1 : 0;
-
- if (verbose) {
- printk("%s: Reset (%d) signature = ", dev->name, k);
- for (i = 0; i < 5; i++)
- printk("%3x", got[i]);
- if (err)
- printk(" (incorrect)");
- printk("\n");
- }
-
- pi_disconnect(dev->pi);
- return err;
-}
-
-static void xs(char *buf, char *targ, int len)
-{
- char l = '\0';
- int k;
-
- for (k = 0; k < len; k++) {
- char c = *buf++;
- if (c != ' ' && c != l)
- l = *targ++ = c;
- }
- if (l == ' ')
- targ--;
- *targ = '\0';
-}
-
-static int pg_identify(struct pg *dev, int log)
-{
- int s;
- char *ms[2] = { "master", "slave" };
- char mf[10], id[18];
- char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
- char buf[36];
-
- s = pg_command(dev, id_cmd, 36, jiffies + PG_TMO);
- if (s)
- return -1;
- s = pg_completion(dev, buf, jiffies + PG_TMO);
- if (s)
- return -1;
-
- if (log) {
- xs(buf + 8, mf, 8);
- xs(buf + 16, id, 16);
- printk("%s: %s %s, %s\n", dev->name, mf, id, ms[dev->drive]);
- }
-
- return 0;
-}
-
-/*
- * returns 0, with id set if drive is detected
- * -1, if drive detection failed
- */
-static int pg_probe(struct pg *dev)
-{
- if (dev->drive == -1) {
- for (dev->drive = 0; dev->drive <= 1; dev->drive++)
- if (!pg_reset(dev))
- return pg_identify(dev, 1);
- } else {
- if (!pg_reset(dev))
- return pg_identify(dev, 1);
- }
- return -1;
-}
-
-static int pg_detect(void)
-{
- struct pg *dev = &devices[0];
- int k, unit;
-
- printk("%s: %s version %s, major %d\n", name, name, PG_VERSION, major);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- return -1;
- }
-
- k = 0;
- if (pg_drive_count == 0) {
- if (pi_init(dev->pi, 1, -1, -1, -1, -1, -1, pg_scratch,
- PI_PG, verbose, dev->name)) {
- if (!pg_probe(dev)) {
- dev->present = 1;
- k++;
- } else
- pi_release(dev->pi);
- }
-
- } else
- for (unit = 0; unit < PG_UNITS; unit++, dev++) {
- int *parm = *drives[unit];
- if (!parm[D_PRT])
- continue;
- if (pi_init(dev->pi, 0, parm[D_PRT], parm[D_MOD],
- parm[D_UNI], parm[D_PRO], parm[D_DLY],
- pg_scratch, PI_PG, verbose, dev->name)) {
- if (!pg_probe(dev)) {
- dev->present = 1;
- k++;
- } else
- pi_release(dev->pi);
- }
- }
-
- if (k)
- return 0;
-
- pi_unregister_driver(par_drv);
- printk("%s: No ATAPI device detected\n", name);
- return -1;
-}
-
-static int pg_open(struct inode *inode, struct file *file)
-{
- int unit = iminor(inode) & 0x7f;
- struct pg *dev = &devices[unit];
- int ret = 0;
-
- mutex_lock(&pg_mutex);
- if ((unit >= PG_UNITS) || (!dev->present)) {
- ret = -ENODEV;
- goto out;
- }
-
- if (test_and_set_bit(0, &dev->access)) {
- ret = -EBUSY;
- goto out;
- }
-
- if (dev->busy) {
- pg_reset(dev);
- dev->busy = 0;
- }
-
- pg_identify(dev, (verbose > 1));
-
- dev->bufptr = kmalloc(PG_MAX_DATA, GFP_KERNEL);
- if (dev->bufptr == NULL) {
- clear_bit(0, &dev->access);
- printk("%s: buffer allocation failed\n", dev->name);
- ret = -ENOMEM;
- goto out;
- }
-
- file->private_data = dev;
-
-out:
- mutex_unlock(&pg_mutex);
- return ret;
-}
-
-static int pg_release(struct inode *inode, struct file *file)
-{
- struct pg *dev = file->private_data;
-
- kfree(dev->bufptr);
- dev->bufptr = NULL;
- clear_bit(0, &dev->access);
-
- return 0;
-}
-
-static ssize_t pg_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
-{
- struct pg *dev = filp->private_data;
- struct pg_write_hdr hdr;
- int hs = sizeof (hdr);
-
- if (dev->busy)
- return -EBUSY;
- if (count < hs)
- return -EINVAL;
-
- if (copy_from_user(&hdr, buf, hs))
- return -EFAULT;
-
- if (hdr.magic != PG_MAGIC)
- return -EINVAL;
- if (hdr.dlen < 0 || hdr.dlen > PG_MAX_DATA)
- return -EINVAL;
- if ((count - hs) > PG_MAX_DATA)
- return -EINVAL;
-
- if (hdr.func == PG_RESET) {
- if (count != hs)
- return -EINVAL;
- if (pg_reset(dev))
- return -EIO;
- return count;
- }
-
- if (hdr.func != PG_COMMAND)
- return -EINVAL;
-
- dev->start = jiffies;
- dev->timeout = hdr.timeout * HZ + HZ / 2 + jiffies;
-
- if (pg_command(dev, hdr.packet, hdr.dlen, jiffies + PG_TMO)) {
- if (dev->status & 0x10)
- return -ETIME;
- return -EIO;
- }
-
- dev->busy = 1;
-
- if (copy_from_user(dev->bufptr, buf + hs, count - hs))
- return -EFAULT;
- return count;
-}
-
-static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
-{
- struct pg *dev = filp->private_data;
- struct pg_read_hdr hdr;
- int hs = sizeof (hdr);
- int copy;
-
- if (!dev->busy)
- return -EINVAL;
- if (count < hs)
- return -EINVAL;
-
- dev->busy = 0;
-
- if (pg_completion(dev, dev->bufptr, dev->timeout))
- if (dev->status & 0x10)
- return -ETIME;
-
- memset(&hdr, 0, sizeof(hdr));
- hdr.magic = PG_MAGIC;
- hdr.dlen = dev->dlen;
- copy = 0;
-
- if (hdr.dlen < 0) {
- hdr.dlen = -1 * hdr.dlen;
- copy = hdr.dlen;
- if (copy > (count - hs))
- copy = count - hs;
- }
-
- hdr.duration = (jiffies - dev->start + HZ / 2) / HZ;
- hdr.scsi = dev->status & 0x0f;
-
- if (copy_to_user(buf, &hdr, hs))
- return -EFAULT;
- if (copy > 0)
- if (copy_to_user(buf + hs, dev->bufptr, copy))
- return -EFAULT;
- return copy + hs;
-}
-
-static int __init pg_init(void)
-{
- int unit;
- int err;
-
- if (disable){
- err = -EINVAL;
- goto out;
- }
-
- pg_init_units();
-
- if (pg_detect()) {
- err = -ENODEV;
- goto out;
- }
-
- err = register_chrdev(major, name, &pg_fops);
- if (err < 0) {
- printk("pg_init: unable to get major number %d\n", major);
- for (unit = 0; unit < PG_UNITS; unit++) {
- struct pg *dev = &devices[unit];
- if (dev->present)
- pi_release(dev->pi);
- }
- goto out;
- }
- major = err; /* In case the user specified `major=0' (dynamic) */
- pg_class = class_create(THIS_MODULE, "pg");
- if (IS_ERR(pg_class)) {
- err = PTR_ERR(pg_class);
- goto out_chrdev;
- }
- for (unit = 0; unit < PG_UNITS; unit++) {
- struct pg *dev = &devices[unit];
- if (dev->present)
- device_create(pg_class, NULL, MKDEV(major, unit), NULL,
- "pg%u", unit);
- }
- err = 0;
- goto out;
-
-out_chrdev:
- unregister_chrdev(major, "pg");
-out:
- return err;
-}
-
-static void __exit pg_exit(void)
-{
- int unit;
-
- for (unit = 0; unit < PG_UNITS; unit++) {
- struct pg *dev = &devices[unit];
- if (dev->present)
- device_destroy(pg_class, MKDEV(major, unit));
- }
- class_destroy(pg_class);
- unregister_chrdev(major, name);
-
- for (unit = 0; unit < PG_UNITS; unit++) {
- struct pg *dev = &devices[unit];
- if (dev->present)
- pi_release(dev->pi);
- }
-}
-
-MODULE_LICENSE("GPL");
-module_init(pg_init)
-module_exit(pg_exit)
diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h
deleted file mode 100644
index bc3703294143..000000000000
--- a/drivers/block/paride/pseudo.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- pseudo.h (c) 1997-8 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is the "pseudo-interrupt" logic for parallel port drivers.
-
- This module is #included into each driver. It makes one
- function available:
-
- ps_set_intr( void (*continuation)(void),
- int (*ready)(void),
- int timeout,
- int nice )
-
- Which will arrange for ready() to be evaluated frequently and
- when either it returns true, or timeout jiffies have passed,
- continuation() will be invoked.
-
- If nice is 1, the test will done approximately once a
- jiffy. If nice is 0, the test will also be done whenever
- the scheduler runs (by adding it to a task queue). If
- nice is greater than 1, the test will be done once every
- (nice-1) jiffies.
-
-*/
-
-/* Changes:
-
- 1.01 1998.05.03 Switched from cli()/sti() to spinlocks
- 1.02 1998.12.14 Added support for nice > 1
-*/
-
-#define PS_VERSION "1.02"
-
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-
-static void ps_tq_int(struct work_struct *work);
-
-static void (* ps_continuation)(void);
-static int (* ps_ready)(void);
-static unsigned long ps_timeout;
-static int ps_tq_active = 0;
-static int ps_nice = 0;
-
-static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
-
-static DECLARE_DELAYED_WORK(ps_tq, ps_tq_int);
-
-static void ps_set_intr(void (*continuation)(void),
- int (*ready)(void),
- int timeout, int nice)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ps_spinlock,flags);
-
- ps_continuation = continuation;
- ps_ready = ready;
- ps_timeout = jiffies + timeout;
- ps_nice = nice;
-
- if (!ps_tq_active) {
- ps_tq_active = 1;
- if (!ps_nice)
- schedule_delayed_work(&ps_tq, 0);
- else
- schedule_delayed_work(&ps_tq, ps_nice-1);
- }
- spin_unlock_irqrestore(&ps_spinlock,flags);
-}
-
-static void ps_tq_int(struct work_struct *work)
-{
- void (*con)(void);
- unsigned long flags;
-
- spin_lock_irqsave(&ps_spinlock,flags);
-
- con = ps_continuation;
- ps_tq_active = 0;
-
- if (!con) {
- spin_unlock_irqrestore(&ps_spinlock,flags);
- return;
- }
- if (!ps_ready || ps_ready() || time_after_eq(jiffies, ps_timeout)) {
- ps_continuation = NULL;
- spin_unlock_irqrestore(&ps_spinlock,flags);
- con();
- return;
- }
- ps_tq_active = 1;
- if (!ps_nice)
- schedule_delayed_work(&ps_tq, 0);
- else
- schedule_delayed_work(&ps_tq, ps_nice-1);
- spin_unlock_irqrestore(&ps_spinlock,flags);
-}
-
-/* end of pseudo.h */
-
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
deleted file mode 100644
index e815312a00ad..000000000000
--- a/drivers/block/paride/pt.c
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- pt.c (c) 1998 Grant R. Guenther <grant@torque.net>
- Under the terms of the GNU General Public License.
-
- This is the high-level driver for parallel port ATAPI tape
- drives based on chips supported by the paride module.
-
- The driver implements both rewinding and non-rewinding
- devices, filemarks, and the rewind ioctl. It allocates
- a small internal "bounce buffer" for each open device, but
- otherwise expects buffering and blocking to be done at the
- user level. As with most block-structured tapes, short
- writes are padded to full tape blocks, so reading back a file
- may return more data than was actually written.
-
- By default, the driver will autoprobe for a single parallel
- port ATAPI tape drive, but if their individual parameters are
- specified, the driver can handle up to 4 drives.
-
- The rewinding devices are named /dev/pt0, /dev/pt1, ...
- while the non-rewinding devices are /dev/npt0, /dev/npt1, etc.
-
- The behaviour of the pt driver can be altered by setting
- some parameters from the insmod command line. The following
- parameters are adjustable:
-
- drive0 These four arguments can be arrays of
- drive1 1-6 integers as follows:
- drive2
- drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
-
- Where,
-
- <prt> is the base of the parallel port address for
- the corresponding drive. (required)
-
- <pro> is the protocol number for the adapter that
- supports this drive. These numbers are
- logged by 'paride' when the protocol modules
- are initialised. (0 if not given)
-
- <uni> for those adapters that support chained
- devices, this is the unit selector for the
- chain of devices on the given port. It should
- be zero for devices that don't support chaining.
- (0 if not given)
-
- <mod> this can be -1 to choose the best mode, or one
- of the mode numbers supported by the adapter.
- (-1 if not given)
-
- <slv> ATAPI devices can be jumpered to master or slave.
- Set this to 0 to choose the master drive, 1 to
- choose the slave, -1 (the default) to choose the
- first drive found.
-
- <dly> some parallel ports require the driver to
- go more slowly. -1 sets a default value that
- should work with the chosen protocol. Otherwise,
- set this to a small integer, the larger it is
- the slower the port i/o. In some cases, setting
- this to zero will speed up the device. (default -1)
-
- major You may use this parameter to override the
- default major number (96) that this driver
- will use. Be sure to change the device
- name as well.
-
- name This parameter is a character string that
- contains the name the kernel will use for this
- device (in /proc output, for instance).
- (default "pt").
-
- verbose This parameter controls the amount of logging
- that the driver will do. Set it to 0 for
- normal operation, 1 to see autoprobe progress
- messages, or 2 to see additional debugging
- output. (default 0)
-
- If this driver is built into the kernel, you can use
- the following command line parameters, with the same values
- as the corresponding module parameters listed above:
-
- pt.drive0
- pt.drive1
- pt.drive2
- pt.drive3
-
- In addition, you can use the parameter pt.disable to disable
- the driver entirely.
-
-*/
-
-/* Changes:
-
- 1.01 GRG 1998.05.06 Round up transfer size, fix ready_wait,
- loosed interpretation of ATAPI standard
- for clearing error status.
- Eliminate sti();
- 1.02 GRG 1998.06.16 Eliminate an Ugh.
- 1.03 GRG 1998.08.15 Adjusted PT_TMO, use HZ in loop timing,
- extra debugging
- 1.04 GRG 1998.09.24 Repair minor coding error, added jumbo support
-
-*/
-
-#define PT_VERSION "1.04"
-#define PT_MAJOR 96
-#define PT_NAME "pt"
-#define PT_UNITS 4
-
-#include <linux/types.h>
-
-/* Here are things one can override from the insmod command.
- Most are autoprobed by paride unless set here. Verbose is on
- by default.
-
-*/
-
-static int verbose = 0;
-static int major = PT_MAJOR;
-static char *name = PT_NAME;
-static int disable = 0;
-
-static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
-static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
-
-static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
-
-#define D_PRT 0
-#define D_PRO 1
-#define D_UNI 2
-#define D_MOD 3
-#define D_SLV 4
-#define D_DLY 5
-
-#define DU (*drives[unit])
-
-/* end of parameters */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/mtio.h>
-#include <linux/device.h>
-#include <linux/sched.h> /* current, TASK_*, schedule_timeout() */
-#include <linux/mutex.h>
-
-#include <linux/uaccess.h>
-
-module_param(verbose, int, 0);
-module_param(major, int, 0);
-module_param(name, charp, 0);
-module_param_array(drive0, int, NULL, 0);
-module_param_array(drive1, int, NULL, 0);
-module_param_array(drive2, int, NULL, 0);
-module_param_array(drive3, int, NULL, 0);
-
-#include "paride.h"
-
-#define PT_MAX_RETRIES 5
-#define PT_TMO 3000 /* interrupt timeout in jiffies */
-#define PT_SPIN_DEL 50 /* spin delay in micro-seconds */
-#define PT_RESET_TMO 30 /* 30 seconds */
-#define PT_READY_TMO 60 /* 60 seconds */
-#define PT_REWIND_TMO 1200 /* 20 minutes */
-
-#define PT_SPIN ((1000000/(HZ*PT_SPIN_DEL))*PT_TMO)
-
-#define STAT_ERR 0x00001
-#define STAT_INDEX 0x00002
-#define STAT_ECC 0x00004
-#define STAT_DRQ 0x00008
-#define STAT_SEEK 0x00010
-#define STAT_WRERR 0x00020
-#define STAT_READY 0x00040
-#define STAT_BUSY 0x00080
-#define STAT_SENSE 0x1f000
-
-#define ATAPI_TEST_READY 0x00
-#define ATAPI_REWIND 0x01
-#define ATAPI_REQ_SENSE 0x03
-#define ATAPI_READ_6 0x08
-#define ATAPI_WRITE_6 0x0a
-#define ATAPI_WFM 0x10
-#define ATAPI_IDENTIFY 0x12
-#define ATAPI_MODE_SENSE 0x1a
-#define ATAPI_LOG_SENSE 0x4d
-
-static DEFINE_MUTEX(pt_mutex);
-static int pt_open(struct inode *inode, struct file *file);
-static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-static int pt_release(struct inode *inode, struct file *file);
-static ssize_t pt_read(struct file *filp, char __user *buf,
- size_t count, loff_t * ppos);
-static ssize_t pt_write(struct file *filp, const char __user *buf,
- size_t count, loff_t * ppos);
-static int pt_detect(void);
-
-/* bits in tape->flags */
-
-#define PT_MEDIA 1
-#define PT_WRITE_OK 2
-#define PT_REWIND 4
-#define PT_WRITING 8
-#define PT_READING 16
-#define PT_EOF 32
-
-#define PT_NAMELEN 8
-#define PT_BUFSIZE 16384
-
-struct pt_unit {
- struct pi_adapter pia; /* interface to paride layer */
- struct pi_adapter *pi;
- int flags; /* various state flags */
- int last_sense; /* result of last request sense */
- int drive; /* drive */
- atomic_t available; /* 1 if access is available 0 otherwise */
- int bs; /* block size */
- int capacity; /* Size of tape in KB */
- int present; /* device present ? */
- char *bufptr;
- char name[PT_NAMELEN]; /* pf0, pf1, ... */
-};
-
-static int pt_identify(struct pt_unit *tape);
-
-static struct pt_unit pt[PT_UNITS];
-
-static char pt_scratch[512]; /* scratch block buffer */
-static void *par_drv; /* reference of parport driver */
-
-/* kernel glue structures */
-
-static const struct file_operations pt_fops = {
- .owner = THIS_MODULE,
- .read = pt_read,
- .write = pt_write,
- .unlocked_ioctl = pt_ioctl,
- .open = pt_open,
- .release = pt_release,
- .llseek = noop_llseek,
-};
-
-/* sysfs class support */
-static struct class *pt_class;
-
-static inline int status_reg(struct pi_adapter *pi)
-{
- return pi_read_regr(pi, 1, 6);
-}
-
-static inline int read_reg(struct pi_adapter *pi, int reg)
-{
- return pi_read_regr(pi, 0, reg);
-}
-
-static inline void write_reg(struct pi_adapter *pi, int reg, int val)
-{
- pi_write_regr(pi, 0, reg, val);
-}
-
-static inline u8 DRIVE(struct pt_unit *tape)
-{
- return 0xa0+0x10*tape->drive;
-}
-
-static int pt_wait(struct pt_unit *tape, int go, int stop, char *fun, char *msg)
-{
- int j, r, e, s, p;
- struct pi_adapter *pi = tape->pi;
-
- j = 0;
- while ((((r = status_reg(pi)) & go) || (stop && (!(r & stop))))
- && (j++ < PT_SPIN))
- udelay(PT_SPIN_DEL);
-
- if ((r & (STAT_ERR & stop)) || (j > PT_SPIN)) {
- s = read_reg(pi, 7);
- e = read_reg(pi, 1);
- p = read_reg(pi, 2);
- if (j > PT_SPIN)
- e |= 0x100;
- if (fun)
- printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
- " loop=%d phase=%d\n",
- tape->name, fun, msg, r, s, e, j, p);
- return (e << 8) + s;
- }
- return 0;
-}
-
-static int pt_command(struct pt_unit *tape, char *cmd, int dlen, char *fun)
-{
- struct pi_adapter *pi = tape->pi;
- pi_connect(pi);
-
- write_reg(pi, 6, DRIVE(tape));
-
- if (pt_wait(tape, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
- pi_disconnect(pi);
- return -1;
- }
-
- write_reg(pi, 4, dlen % 256);
- write_reg(pi, 5, dlen / 256);
- write_reg(pi, 7, 0xa0); /* ATAPI packet command */
-
- if (pt_wait(tape, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
- pi_disconnect(pi);
- return -1;
- }
-
- if (read_reg(pi, 2) != 1) {
- printk("%s: %s: command phase error\n", tape->name, fun);
- pi_disconnect(pi);
- return -1;
- }
-
- pi_write_block(pi, cmd, 12);
-
- return 0;
-}
-
-static int pt_completion(struct pt_unit *tape, char *buf, char *fun)
-{
- struct pi_adapter *pi = tape->pi;
- int r, s, n, p;
-
- r = pt_wait(tape, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
- fun, "completion");
-
- if (read_reg(pi, 7) & STAT_DRQ) {
- n = (((read_reg(pi, 4) + 256 * read_reg(pi, 5)) +
- 3) & 0xfffc);
- p = read_reg(pi, 2) & 3;
- if (p == 0)
- pi_write_block(pi, buf, n);
- if (p == 2)
- pi_read_block(pi, buf, n);
- }
-
- s = pt_wait(tape, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
-
- pi_disconnect(pi);
-
- return (r ? r : s);
-}
-
-static void pt_req_sense(struct pt_unit *tape, int quiet)
-{
- char rs_cmd[12] = { ATAPI_REQ_SENSE, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
- char buf[16];
- int r;
-
- r = pt_command(tape, rs_cmd, 16, "Request sense");
- mdelay(1);
- if (!r)
- pt_completion(tape, buf, "Request sense");
-
- tape->last_sense = -1;
- if (!r) {
- if (!quiet)
- printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
- tape->name, buf[2] & 0xf, buf[12], buf[13]);
- tape->last_sense = (buf[2] & 0xf) | ((buf[12] & 0xff) << 8)
- | ((buf[13] & 0xff) << 16);
- }
-}
-
-static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char *fun)
-{
- int r;
-
- r = pt_command(tape, cmd, dlen, fun);
- mdelay(1);
- if (!r)
- r = pt_completion(tape, buf, fun);
- if (r)
- pt_req_sense(tape, !fun);
-
- return r;
-}
-
-static void pt_sleep(int cs)
-{
- schedule_timeout_interruptible(cs);
-}
-
-static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg)
-{
- struct pi_adapter *pi = tape->pi;
- int k, e, s;
-
- k = 0;
- e = 0;
- s = 0;
- while (k < tmo) {
- pt_sleep(pause);
- k++;
- pi_connect(pi);
- write_reg(pi, 6, DRIVE(tape));
- s = read_reg(pi, 7);
- e = read_reg(pi, 1);
- pi_disconnect(pi);
- if (s & (STAT_ERR | STAT_SEEK))
- break;
- }
- if ((k >= tmo) || (s & STAT_ERR)) {
- if (k >= tmo)
- printk("%s: %s DSC timeout\n", tape->name, msg);
- else
- printk("%s: %s stat=0x%x err=0x%x\n", tape->name, msg, s,
- e);
- pt_req_sense(tape, 0);
- return 0;
- }
- return 1;
-}
-
-static void pt_media_access_cmd(struct pt_unit *tape, int tmo, char *cmd, char *fun)
-{
- if (pt_command(tape, cmd, 0, fun)) {
- pt_req_sense(tape, 0);
- return;
- }
- pi_disconnect(tape->pi);
- pt_poll_dsc(tape, HZ, tmo, fun);
-}
-
-static void pt_rewind(struct pt_unit *tape)
-{
- char rw_cmd[12] = { ATAPI_REWIND, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-
- pt_media_access_cmd(tape, PT_REWIND_TMO, rw_cmd, "rewind");
-}
-
-static void pt_write_fm(struct pt_unit *tape)
-{
- char wm_cmd[12] = { ATAPI_WFM, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 };
-
- pt_media_access_cmd(tape, PT_TMO, wm_cmd, "write filemark");
-}
-
-#define DBMSG(msg) ((verbose>1)?(msg):NULL)
-
-static int pt_reset(struct pt_unit *tape)
-{
- struct pi_adapter *pi = tape->pi;
- int i, k, flg;
- int expect[5] = { 1, 1, 1, 0x14, 0xeb };
-
- pi_connect(pi);
- write_reg(pi, 6, DRIVE(tape));
- write_reg(pi, 7, 8);
-
- pt_sleep(20 * HZ / 1000);
-
- k = 0;
- while ((k++ < PT_RESET_TMO) && (status_reg(pi) & STAT_BUSY))
- pt_sleep(HZ / 10);
-
- flg = 1;
- for (i = 0; i < 5; i++)
- flg &= (read_reg(pi, i + 1) == expect[i]);
-
- if (verbose) {
- printk("%s: Reset (%d) signature = ", tape->name, k);
- for (i = 0; i < 5; i++)
- printk("%3x", read_reg(pi, i + 1));
- if (!flg)
- printk(" (incorrect)");
- printk("\n");
- }
-
- pi_disconnect(pi);
- return flg - 1;
-}
-
-static int pt_ready_wait(struct pt_unit *tape, int tmo)
-{
- char tr_cmd[12] = { ATAPI_TEST_READY, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- int k, p;
-
- k = 0;
- while (k < tmo) {
- tape->last_sense = 0;
- pt_atapi(tape, tr_cmd, 0, NULL, DBMSG("test unit ready"));
- p = tape->last_sense;
- if (!p)
- return 0;
- if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
- return p;
- k++;
- pt_sleep(HZ);
- }
- return 0x000020; /* timeout */
-}
-
-static void xs(char *buf, char *targ, int offs, int len)
-{
- int j, k, l;
-
- j = 0;
- l = 0;
- for (k = 0; k < len; k++)
- if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
- l = targ[j++] = buf[k + offs];
- if (l == 0x20)
- j--;
- targ[j] = 0;
-}
-
-static int xn(char *buf, int offs, int size)
-{
- int v, k;
-
- v = 0;
- for (k = 0; k < size; k++)
- v = v * 256 + (buf[k + offs] & 0xff);
- return v;
-}
-
-static int pt_identify(struct pt_unit *tape)
-{
- int dt, s;
- char *ms[2] = { "master", "slave" };
- char mf[10], id[18];
- char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
- char ms_cmd[12] =
- { ATAPI_MODE_SENSE, 0, 0x2a, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
- char ls_cmd[12] =
- { ATAPI_LOG_SENSE, 0, 0x71, 0, 0, 0, 0, 0, 36, 0, 0, 0 };
- char buf[36];
-
- s = pt_atapi(tape, id_cmd, 36, buf, "identify");
- if (s)
- return -1;
-
- dt = buf[0] & 0x1f;
- if (dt != 1) {
- if (verbose)
- printk("%s: Drive %d, unsupported type %d\n",
- tape->name, tape->drive, dt);
- return -1;
- }
-
- xs(buf, mf, 8, 8);
- xs(buf, id, 16, 16);
-
- tape->flags = 0;
- tape->capacity = 0;
- tape->bs = 0;
-
- if (!pt_ready_wait(tape, PT_READY_TMO))
- tape->flags |= PT_MEDIA;
-
- if (!pt_atapi(tape, ms_cmd, 36, buf, "mode sense")) {
- if (!(buf[2] & 0x80))
- tape->flags |= PT_WRITE_OK;
- tape->bs = xn(buf, 10, 2);
- }
-
- if (!pt_atapi(tape, ls_cmd, 36, buf, "log sense"))
- tape->capacity = xn(buf, 24, 4);
-
- printk("%s: %s %s, %s", tape->name, mf, id, ms[tape->drive]);
- if (!(tape->flags & PT_MEDIA))
- printk(", no media\n");
- else {
- if (!(tape->flags & PT_WRITE_OK))
- printk(", RO");
- printk(", blocksize %d, %d MB\n", tape->bs, tape->capacity / 1024);
- }
-
- return 0;
-}
-
-
-/*
- * returns 0, with id set if drive is detected
- * -1, if drive detection failed
- */
-static int pt_probe(struct pt_unit *tape)
-{
- if (tape->drive == -1) {
- for (tape->drive = 0; tape->drive <= 1; tape->drive++)
- if (!pt_reset(tape))
- return pt_identify(tape);
- } else {
- if (!pt_reset(tape))
- return pt_identify(tape);
- }
- return -1;
-}
-
-static int pt_detect(void)
-{
- struct pt_unit *tape;
- int specified = 0, found = 0;
- int unit;
-
- printk("%s: %s version %s, major %d\n", name, name, PT_VERSION, major);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- return -1;
- }
-
- specified = 0;
- for (unit = 0; unit < PT_UNITS; unit++) {
- struct pt_unit *tape = &pt[unit];
- tape->pi = &tape->pia;
- atomic_set(&tape->available, 1);
- tape->flags = 0;
- tape->last_sense = 0;
- tape->present = 0;
- tape->bufptr = NULL;
- tape->drive = DU[D_SLV];
- snprintf(tape->name, PT_NAMELEN, "%s%d", name, unit);
- if (!DU[D_PRT])
- continue;
- specified++;
- if (pi_init(tape->pi, 0, DU[D_PRT], DU[D_MOD], DU[D_UNI],
- DU[D_PRO], DU[D_DLY], pt_scratch, PI_PT,
- verbose, tape->name)) {
- if (!pt_probe(tape)) {
- tape->present = 1;
- found++;
- } else
- pi_release(tape->pi);
- }
- }
- if (specified == 0) {
- tape = pt;
- if (pi_init(tape->pi, 1, -1, -1, -1, -1, -1, pt_scratch,
- PI_PT, verbose, tape->name)) {
- if (!pt_probe(tape)) {
- tape->present = 1;
- found++;
- } else
- pi_release(tape->pi);
- }
-
- }
- if (found)
- return 0;
-
- pi_unregister_driver(par_drv);
- printk("%s: No ATAPI tape drive detected\n", name);
- return -1;
-}
-
-static int pt_open(struct inode *inode, struct file *file)
-{
- int unit = iminor(inode) & 0x7F;
- struct pt_unit *tape = pt + unit;
- int err;
-
- mutex_lock(&pt_mutex);
- if (unit >= PT_UNITS || (!tape->present)) {
- mutex_unlock(&pt_mutex);
- return -ENODEV;
- }
-
- err = -EBUSY;
- if (!atomic_dec_and_test(&tape->available))
- goto out;
-
- pt_identify(tape);
-
- err = -ENODEV;
- if (!(tape->flags & PT_MEDIA))
- goto out;
-
- err = -EROFS;
- if ((!(tape->flags & PT_WRITE_OK)) && (file->f_mode & FMODE_WRITE))
- goto out;
-
- if (!(iminor(inode) & 128))
- tape->flags |= PT_REWIND;
-
- err = -ENOMEM;
- tape->bufptr = kmalloc(PT_BUFSIZE, GFP_KERNEL);
- if (tape->bufptr == NULL) {
- printk("%s: buffer allocation failed\n", tape->name);
- goto out;
- }
-
- file->private_data = tape;
- mutex_unlock(&pt_mutex);
- return 0;
-
-out:
- atomic_inc(&tape->available);
- mutex_unlock(&pt_mutex);
- return err;
-}
-
-static long pt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- struct pt_unit *tape = file->private_data;
- struct mtop __user *p = (void __user *)arg;
- struct mtop mtop;
-
- switch (cmd) {
- case MTIOCTOP:
- if (copy_from_user(&mtop, p, sizeof(struct mtop)))
- return -EFAULT;
-
- switch (mtop.mt_op) {
-
- case MTREW:
- mutex_lock(&pt_mutex);
- pt_rewind(tape);
- mutex_unlock(&pt_mutex);
- return 0;
-
- case MTWEOF:
- mutex_lock(&pt_mutex);
- pt_write_fm(tape);
- mutex_unlock(&pt_mutex);
- return 0;
-
- default:
- /* FIXME: rate limit ?? */
- printk(KERN_DEBUG "%s: Unimplemented mt_op %d\n", tape->name,
- mtop.mt_op);
- return -EINVAL;
- }
-
- default:
- return -ENOTTY;
- }
-}
-
-static int
-pt_release(struct inode *inode, struct file *file)
-{
- struct pt_unit *tape = file->private_data;
-
- if (atomic_read(&tape->available) > 1)
- return -EINVAL;
-
- if (tape->flags & PT_WRITING)
- pt_write_fm(tape);
-
- if (tape->flags & PT_REWIND)
- pt_rewind(tape);
-
- kfree(tape->bufptr);
- tape->bufptr = NULL;
-
- atomic_inc(&tape->available);
-
- return 0;
-
-}
-
-static ssize_t pt_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
-{
- struct pt_unit *tape = filp->private_data;
- struct pi_adapter *pi = tape->pi;
- char rd_cmd[12] = { ATAPI_READ_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- int k, n, r, p, s, t, b;
-
- if (!(tape->flags & (PT_READING | PT_WRITING))) {
- tape->flags |= PT_READING;
- if (pt_atapi(tape, rd_cmd, 0, NULL, "start read-ahead"))
- return -EIO;
- } else if (tape->flags & PT_WRITING)
- return -EIO;
-
- if (tape->flags & PT_EOF)
- return 0;
-
- t = 0;
-
- while (count > 0) {
-
- if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "read"))
- return -EIO;
-
- n = count;
- if (n > 32768)
- n = 32768; /* max per command */
- b = (n - 1 + tape->bs) / tape->bs;
- n = b * tape->bs; /* rounded up to even block */
-
- rd_cmd[4] = b;
-
- r = pt_command(tape, rd_cmd, n, "read");
-
- mdelay(1);
-
- if (r) {
- pt_req_sense(tape, 0);
- return -EIO;
- }
-
- while (1) {
-
- r = pt_wait(tape, STAT_BUSY,
- STAT_DRQ | STAT_ERR | STAT_READY,
- DBMSG("read DRQ"), "");
-
- if (r & STAT_SENSE) {
- pi_disconnect(pi);
- pt_req_sense(tape, 0);
- return -EIO;
- }
-
- if (r)
- tape->flags |= PT_EOF;
-
- s = read_reg(pi, 7);
-
- if (!(s & STAT_DRQ))
- break;
-
- n = (read_reg(pi, 4) + 256 * read_reg(pi, 5));
- p = (read_reg(pi, 2) & 3);
- if (p != 2) {
- pi_disconnect(pi);
- printk("%s: Phase error on read: %d\n", tape->name,
- p);
- return -EIO;
- }
-
- while (n > 0) {
- k = n;
- if (k > PT_BUFSIZE)
- k = PT_BUFSIZE;
- pi_read_block(pi, tape->bufptr, k);
- n -= k;
- b = k;
- if (b > count)
- b = count;
- if (copy_to_user(buf + t, tape->bufptr, b)) {
- pi_disconnect(pi);
- return -EFAULT;
- }
- t += b;
- count -= b;
- }
-
- }
- pi_disconnect(pi);
- if (tape->flags & PT_EOF)
- break;
- }
-
- return t;
-
-}
-
-static ssize_t pt_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
-{
- struct pt_unit *tape = filp->private_data;
- struct pi_adapter *pi = tape->pi;
- char wr_cmd[12] = { ATAPI_WRITE_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
- int k, n, r, p, s, t, b;
-
- if (!(tape->flags & PT_WRITE_OK))
- return -EROFS;
-
- if (!(tape->flags & (PT_READING | PT_WRITING))) {
- tape->flags |= PT_WRITING;
- if (pt_atapi
- (tape, wr_cmd, 0, NULL, "start buffer-available mode"))
- return -EIO;
- } else if (tape->flags & PT_READING)
- return -EIO;
-
- if (tape->flags & PT_EOF)
- return -ENOSPC;
-
- t = 0;
-
- while (count > 0) {
-
- if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "write"))
- return -EIO;
-
- n = count;
- if (n > 32768)
- n = 32768; /* max per command */
- b = (n - 1 + tape->bs) / tape->bs;
- n = b * tape->bs; /* rounded up to even block */
-
- wr_cmd[4] = b;
-
- r = pt_command(tape, wr_cmd, n, "write");
-
- mdelay(1);
-
- if (r) { /* error delivering command only */
- pt_req_sense(tape, 0);
- return -EIO;
- }
-
- while (1) {
-
- r = pt_wait(tape, STAT_BUSY,
- STAT_DRQ | STAT_ERR | STAT_READY,
- DBMSG("write DRQ"), NULL);
-
- if (r & STAT_SENSE) {
- pi_disconnect(pi);
- pt_req_sense(tape, 0);
- return -EIO;
- }
-
- if (r)
- tape->flags |= PT_EOF;
-
- s = read_reg(pi, 7);
-
- if (!(s & STAT_DRQ))
- break;
-
- n = (read_reg(pi, 4) + 256 * read_reg(pi, 5));
- p = (read_reg(pi, 2) & 3);
- if (p != 0) {
- pi_disconnect(pi);
- printk("%s: Phase error on write: %d \n",
- tape->name, p);
- return -EIO;
- }
-
- while (n > 0) {
- k = n;
- if (k > PT_BUFSIZE)
- k = PT_BUFSIZE;
- b = k;
- if (b > count)
- b = count;
- if (copy_from_user(tape->bufptr, buf + t, b)) {
- pi_disconnect(pi);
- return -EFAULT;
- }
- pi_write_block(pi, tape->bufptr, k);
- t += b;
- count -= b;
- n -= k;
- }
-
- }
- pi_disconnect(pi);
- if (tape->flags & PT_EOF)
- break;
- }
-
- return t;
-}
-
-static int __init pt_init(void)
-{
- int unit;
- int err;
-
- if (disable) {
- err = -EINVAL;
- goto out;
- }
-
- if (pt_detect()) {
- err = -ENODEV;
- goto out;
- }
-
- err = register_chrdev(major, name, &pt_fops);
- if (err < 0) {
- printk("pt_init: unable to get major number %d\n", major);
- for (unit = 0; unit < PT_UNITS; unit++)
- if (pt[unit].present)
- pi_release(pt[unit].pi);
- goto out;
- }
- major = err;
- pt_class = class_create(THIS_MODULE, "pt");
- if (IS_ERR(pt_class)) {
- err = PTR_ERR(pt_class);
- goto out_chrdev;
- }
-
- for (unit = 0; unit < PT_UNITS; unit++)
- if (pt[unit].present) {
- device_create(pt_class, NULL, MKDEV(major, unit), NULL,
- "pt%d", unit);
- device_create(pt_class, NULL, MKDEV(major, unit + 128),
- NULL, "pt%dn", unit);
- }
- goto out;
-
-out_chrdev:
- unregister_chrdev(major, "pt");
-out:
- return err;
-}
-
-static void __exit pt_exit(void)
-{
- int unit;
- for (unit = 0; unit < PT_UNITS; unit++)
- if (pt[unit].present) {
- device_destroy(pt_class, MKDEV(major, unit));
- device_destroy(pt_class, MKDEV(major, unit + 128));
- }
- class_destroy(pt_class);
- unregister_chrdev(major, name);
- for (unit = 0; unit < PT_UNITS; unit++)
- if (pt[unit].present)
- pi_release(pt[unit].pi);
-}
-
-MODULE_LICENSE("GPL");
-module_init(pt_init)
-module_exit(pt_exit)
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 574e470b220b..38d42af01b25 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -586,10 +586,6 @@ static void ps3vram_submit_bio(struct bio *bio)
dev_dbg(&dev->core, "%s\n", __func__);
- bio = bio_split_to_limits(bio);
- if (!bio)
- return;
-
spin_lock_irq(&priv->lock);
busy = !bio_list_empty(&priv->list);
bio_list_add(&priv->list, bio);
@@ -749,9 +745,6 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
gendisk->private_data = dev;
strscpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
set_capacity(gendisk, priv->size >> 9);
- blk_queue_max_segments(gendisk->queue, BLK_MAX_SEGMENTS);
- blk_queue_max_segment_size(gendisk->queue, BLK_MAX_SEGMENT_SIZE);
- blk_queue_max_hw_sectors(gendisk->queue, BLK_SAFE_MAX_SECTORS);
dev_info(&dev->core, "%s: Using %llu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 04453f4a319c..1faca7e07a4d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3068,13 +3068,12 @@ static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
for (i = 0; i < obj_req->copyup_bvec_count; i++) {
unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
+ struct page *page = alloc_page(GFP_NOIO);
- obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
- if (!obj_req->copyup_bvecs[i].bv_page)
+ if (!page)
return -ENOMEM;
- obj_req->copyup_bvecs[i].bv_offset = 0;
- obj_req->copyup_bvecs[i].bv_len = len;
+ bvec_set_page(&obj_req->copyup_bvecs[i], page, len, 0);
obj_overlap -= len;
}
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index e54693204630..b9c759cef00e 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -42,6 +42,7 @@
#include <linux/mm.h>
#include <asm/page.h>
#include <linux/task_work.h>
+#include <linux/namei.h>
#include <uapi/linux/ublk_cmd.h>
#define UBLK_MINORS (1U << MINORBITS)
@@ -51,10 +52,12 @@
| UBLK_F_URING_CMD_COMP_IN_TASK \
| UBLK_F_NEED_GET_DATA \
| UBLK_F_USER_RECOVERY \
- | UBLK_F_USER_RECOVERY_REISSUE)
+ | UBLK_F_USER_RECOVERY_REISSUE \
+ | UBLK_F_UNPRIVILEGED_DEV)
/* All UBLK_PARAM_TYPE_* should be included here */
-#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD)
+#define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | \
+ UBLK_PARAM_TYPE_DISCARD | UBLK_PARAM_TYPE_DEVT)
struct ublk_rq_data {
struct llist_node node;
@@ -137,7 +140,7 @@ struct ublk_device {
char *__queues;
- unsigned short queue_size;
+ unsigned int queue_size;
struct ublksrv_ctrl_dev_info dev_info;
struct blk_mq_tag_set tag_set;
@@ -147,6 +150,7 @@ struct ublk_device {
#define UB_STATE_OPEN 0
#define UB_STATE_USED 1
+#define UB_STATE_DELETED 2
unsigned long state;
int ub_number;
@@ -159,7 +163,7 @@ struct ublk_device {
struct completion completion;
unsigned int nr_queues_ready;
- atomic_t nr_aborted_queues;
+ unsigned int nr_privileged_daemon;
/*
* Our ubq->daemon may be killed without any notification, so
@@ -185,6 +189,15 @@ static wait_queue_head_t ublk_idr_wq; /* wait until one idr is freed */
static DEFINE_MUTEX(ublk_ctl_mutex);
+/*
+ * Max ublk devices allowed to add
+ *
+ * It can be extended to one per-user limit in future or even controlled
+ * by cgroup.
+ */
+static unsigned int ublks_max = 64;
+static unsigned int ublks_added; /* protected by ublk_ctl_mutex */
+
static struct miscdevice ublk_misc;
static void ublk_dev_param_basic_apply(struct ublk_device *ub)
@@ -255,6 +268,10 @@ static int ublk_validate_params(const struct ublk_device *ub)
return -EINVAL;
}
+ /* dev_t is read-only */
+ if (ub->params.types & UBLK_PARAM_TYPE_DEVT)
+ return -EINVAL;
+
return 0;
}
@@ -306,7 +323,7 @@ static inline struct ublk_queue *ublk_get_queue(struct ublk_device *dev,
static inline bool ublk_rq_has_data(const struct request *rq)
{
- return rq->bio && bio_has_data(rq->bio);
+ return bio_has_data(rq->bio);
}
static inline struct ublksrv_io_desc *ublk_get_iod(struct ublk_queue *ubq,
@@ -361,8 +378,50 @@ static void ublk_free_disk(struct gendisk *disk)
put_device(&ub->cdev_dev);
}
+static void ublk_store_owner_uid_gid(unsigned int *owner_uid,
+ unsigned int *owner_gid)
+{
+ kuid_t uid;
+ kgid_t gid;
+
+ current_uid_gid(&uid, &gid);
+
+ *owner_uid = from_kuid(&init_user_ns, uid);
+ *owner_gid = from_kgid(&init_user_ns, gid);
+}
+
+static int ublk_open(struct block_device *bdev, fmode_t mode)
+{
+ struct ublk_device *ub = bdev->bd_disk->private_data;
+
+ if (capable(CAP_SYS_ADMIN))
+ return 0;
+
+ /*
+ * If it is one unprivileged device, only owner can open
+ * the disk. Otherwise it could be one trap made by one
+ * evil user who grants this disk's privileges to other
+ * users deliberately.
+ *
+ * This way is reasonable too given anyone can create
+ * unprivileged device, and no need other's grant.
+ */
+ if (ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV) {
+ unsigned int curr_uid, curr_gid;
+
+ ublk_store_owner_uid_gid(&curr_uid, &curr_gid);
+
+ if (curr_uid != ub->dev_info.owner_uid || curr_gid !=
+ ub->dev_info.owner_gid)
+ return -EPERM;
+ }
+
+ return 0;
+}
+
static const struct block_device_operations ub_fops = {
.owner = THIS_MODULE,
+ .open = ublk_open,
.free_disk = ublk_free_disk,
};
@@ -607,7 +666,7 @@ static void ublk_complete_rq(struct request *req)
}
/*
- * FLUSH or DISCARD usually won't return bytes returned, so end them
+ * FLUSH, DISCARD or WRITE_ZEROES usually won't return bytes returned, so end them
* directly.
*
* Both the two needn't unmap.
@@ -1179,6 +1238,9 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq)
ubq->ubq_daemon = current;
get_task_struct(ubq->ubq_daemon);
ub->nr_queues_ready++;
+
+ if (capable(CAP_SYS_ADMIN))
+ ub->nr_privileged_daemon++;
}
if (ub->nr_queues_ready == ub->dev_info.nr_hw_queues)
complete_all(&ub->completion);
@@ -1203,6 +1265,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
u32 cmd_op = cmd->cmd_op;
unsigned tag = ub_cmd->tag;
int ret = -EINVAL;
+ struct request *req;
pr_devel("%s: received: cmd op %d queue %d tag %d result %d\n",
__func__, cmd->cmd_op, ub_cmd->q_id, tag,
@@ -1253,8 +1316,8 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
*/
if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV)
goto out;
- /* FETCH_RQ has to provide IO buffer */
- if (!ub_cmd->addr)
+ /* FETCH_RQ has to provide IO buffer if NEED GET DATA is not enabled */
+ if (!ub_cmd->addr && !ublk_need_get_data(ubq))
goto out;
io->cmd = cmd;
io->flags |= UBLK_IO_FLAG_ACTIVE;
@@ -1263,8 +1326,12 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
ublk_mark_io_ready(ub, ubq);
break;
case UBLK_IO_COMMIT_AND_FETCH_REQ:
- /* FETCH_RQ has to provide IO buffer */
- if (!ub_cmd->addr)
+ req = blk_mq_tag_to_rq(ub->tag_set.tags[ub_cmd->q_id], tag);
+ /*
+ * COMMIT_AND_FETCH_REQ has to provide IO buffer if NEED GET DATA is
+ * not enabled or it is Read IO.
+ */
+ if (!ub_cmd->addr && (!ublk_need_get_data(ubq) || req_op(req) == REQ_OP_READ))
goto out;
if (!(io->flags & UBLK_IO_FLAG_OWNED_BY_SRV))
goto out;
@@ -1433,6 +1500,8 @@ static int ublk_add_chdev(struct ublk_device *ub)
ret = cdev_device_add(&ub->cdev, dev);
if (ret)
goto fail;
+
+ ublks_added++;
return 0;
fail:
put_device(dev);
@@ -1475,6 +1544,7 @@ static void ublk_remove(struct ublk_device *ub)
cancel_work_sync(&ub->quiesce_work);
cdev_device_del(&ub->cdev, &ub->cdev_dev);
put_device(&ub->cdev_dev);
+ ublks_added--;
}
static struct ublk_device *ublk_get_device_from_id(int idx)
@@ -1493,21 +1563,16 @@ static struct ublk_device *ublk_get_device_from_id(int idx)
return ub;
}
-static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
+static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
int ublksrv_pid = (int)header->data[0];
- struct ublk_device *ub;
struct gendisk *disk;
int ret = -EINVAL;
if (ublksrv_pid <= 0)
return -EINVAL;
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
wait_for_completion_interruptible(&ub->completion);
schedule_delayed_work(&ub->monitor_work, UBLK_DAEMON_MONITOR_PERIOD);
@@ -1519,7 +1584,7 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
goto out_unlock;
}
- disk = blk_mq_alloc_disk(&ub->tag_set, ub);
+ disk = blk_mq_alloc_disk(&ub->tag_set, NULL);
if (IS_ERR(disk)) {
ret = PTR_ERR(disk);
goto out_unlock;
@@ -1535,6 +1600,10 @@ static int ublk_ctrl_start_dev(struct io_uring_cmd *cmd)
if (ret)
goto out_put_disk;
+ /* don't probe partitions if any one ubq daemon is un-trusted */
+ if (ub->nr_privileged_daemon != ub->nr_queues_ready)
+ set_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
+
get_device(&ub->cdev_dev);
ret = add_disk(disk);
if (ret) {
@@ -1552,21 +1621,20 @@ out_put_disk:
put_disk(disk);
out_unlock:
mutex_unlock(&ub->mutex);
- ublk_put_device(ub);
return ret;
}
-static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_queue_affinity(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
void __user *argp = (void __user *)(unsigned long)header->addr;
- struct ublk_device *ub;
cpumask_var_t cpumask;
unsigned long queue;
unsigned int retlen;
unsigned int i;
- int ret = -EINVAL;
-
+ int ret;
+
if (header->len * BITS_PER_BYTE < nr_cpu_ids)
return -EINVAL;
if (header->len & (sizeof(unsigned long)-1))
@@ -1574,17 +1642,12 @@ static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
if (!header->addr)
return -EINVAL;
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
queue = header->data[0];
if (queue >= ub->dev_info.nr_hw_queues)
- goto out_put_device;
+ return -EINVAL;
- ret = -ENOMEM;
if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
- goto out_put_device;
+ return -ENOMEM;
for_each_possible_cpu(i) {
if (ub->tag_set.map[HCTX_TYPE_DEFAULT].mq_map[i] == queue)
@@ -1602,8 +1665,6 @@ static int ublk_ctrl_get_queue_affinity(struct io_uring_cmd *cmd)
ret = 0;
out_free_cpumask:
free_cpumask_var(cpumask);
-out_put_device:
- ublk_put_device(ub);
return ret;
}
@@ -1630,19 +1691,34 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd)
__func__, header->queue_id);
return -EINVAL;
}
+
if (copy_from_user(&info, argp, sizeof(info)))
return -EFAULT;
- ublk_dump_dev_info(&info);
+
+ if (capable(CAP_SYS_ADMIN))
+ info.flags &= ~UBLK_F_UNPRIVILEGED_DEV;
+ else if (!(info.flags & UBLK_F_UNPRIVILEGED_DEV))
+ return -EPERM;
+
+ /* the created device is always owned by current user */
+ ublk_store_owner_uid_gid(&info.owner_uid, &info.owner_gid);
+
if (header->dev_id != info.dev_id) {
pr_warn("%s: dev id not match %u %u\n",
__func__, header->dev_id, info.dev_id);
return -EINVAL;
}
+ ublk_dump_dev_info(&info);
+
ret = mutex_lock_killable(&ublk_ctl_mutex);
if (ret)
return ret;
+ ret = -EACCES;
+ if (ublks_added >= ublks_max)
+ goto out_unlock;
+
ret = -ENOMEM;
ub = kzalloc(sizeof(*ub), GFP_KERNEL);
if (!ub)
@@ -1724,33 +1800,43 @@ static inline bool ublk_idr_freed(int id)
return ptr == NULL;
}
-static int ublk_ctrl_del_dev(int idx)
+static int ublk_ctrl_del_dev(struct ublk_device **p_ub)
{
- struct ublk_device *ub;
+ struct ublk_device *ub = *p_ub;
+ int idx = ub->ub_number;
int ret;
ret = mutex_lock_killable(&ublk_ctl_mutex);
if (ret)
return ret;
- ub = ublk_get_device_from_id(idx);
- if (ub) {
+ if (!test_bit(UB_STATE_DELETED, &ub->state)) {
ublk_remove(ub);
- ublk_put_device(ub);
- ret = 0;
- } else {
- ret = -ENODEV;
+ set_bit(UB_STATE_DELETED, &ub->state);
}
+ /* Mark the reference as consumed */
+ *p_ub = NULL;
+ ublk_put_device(ub);
+ mutex_unlock(&ublk_ctl_mutex);
+
/*
* Wait until the idr is removed, then it can be reused after
* DEL_DEV command is returned.
+ *
+ * If we returns because of user interrupt, future delete command
+ * may come:
+ *
+ * - the device number isn't freed, this device won't or needn't
+ * be deleted again, since UB_STATE_DELETED is set, and device
+ * will be released after the last reference is dropped
+ *
+ * - the device number is freed already, we will not find this
+ * device via ublk_get_device_from_id()
*/
- if (!ret)
- wait_event(ublk_idr_wq, ublk_idr_freed(idx));
- mutex_unlock(&ublk_ctl_mutex);
+ wait_event_interruptible(ublk_idr_wq, ublk_idr_freed(idx));
- return ret;
+ return 0;
}
static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
@@ -1762,50 +1848,52 @@ static inline void ublk_ctrl_cmd_dump(struct io_uring_cmd *cmd)
header->data[0], header->addr, header->len);
}
-static int ublk_ctrl_stop_dev(struct io_uring_cmd *cmd)
+static int ublk_ctrl_stop_dev(struct ublk_device *ub)
{
- struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
- struct ublk_device *ub;
-
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
ublk_stop_dev(ub);
cancel_work_sync(&ub->stop_work);
cancel_work_sync(&ub->quiesce_work);
- ublk_put_device(ub);
return 0;
}
-static int ublk_ctrl_get_dev_info(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_dev_info(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
void __user *argp = (void __user *)(unsigned long)header->addr;
- struct ublk_device *ub;
- int ret = 0;
if (header->len < sizeof(struct ublksrv_ctrl_dev_info) || !header->addr)
return -EINVAL;
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
if (copy_to_user(argp, &ub->dev_info, sizeof(ub->dev_info)))
- ret = -EFAULT;
- ublk_put_device(ub);
+ return -EFAULT;
- return ret;
+ return 0;
+}
+
+/* TYPE_DEVT is readonly, so fill it up before returning to userspace */
+static void ublk_ctrl_fill_params_devt(struct ublk_device *ub)
+{
+ ub->params.devt.char_major = MAJOR(ub->cdev_dev.devt);
+ ub->params.devt.char_minor = MINOR(ub->cdev_dev.devt);
+
+ if (ub->ub_disk) {
+ ub->params.devt.disk_major = MAJOR(disk_devt(ub->ub_disk));
+ ub->params.devt.disk_minor = MINOR(disk_devt(ub->ub_disk));
+ } else {
+ ub->params.devt.disk_major = 0;
+ ub->params.devt.disk_minor = 0;
+ }
+ ub->params.types |= UBLK_PARAM_TYPE_DEVT;
}
-static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
+static int ublk_ctrl_get_params(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
- struct ublk_device *ub;
int ret;
if (header->len <= sizeof(ph) || !header->addr)
@@ -1820,27 +1908,23 @@ static int ublk_ctrl_get_params(struct io_uring_cmd *cmd)
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
mutex_lock(&ub->mutex);
+ ublk_ctrl_fill_params_devt(ub);
if (copy_to_user(argp, &ub->params, ph.len))
ret = -EFAULT;
else
ret = 0;
mutex_unlock(&ub->mutex);
- ublk_put_device(ub);
return ret;
}
-static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
+static int ublk_ctrl_set_params(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
void __user *argp = (void __user *)(unsigned long)header->addr;
struct ublk_params_header ph;
- struct ublk_device *ub;
int ret = -EFAULT;
if (header->len <= sizeof(ph) || !header->addr)
@@ -1855,10 +1939,6 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return -EINVAL;
-
/* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
@@ -1871,7 +1951,6 @@ static int ublk_ctrl_set_params(struct io_uring_cmd *cmd)
ret = ublk_validate_params(ub);
}
mutex_unlock(&ub->mutex);
- ublk_put_device(ub);
return ret;
}
@@ -1898,17 +1977,13 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
}
}
-static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
+static int ublk_ctrl_start_recovery(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
- struct ublk_device *ub;
int ret = -EINVAL;
int i;
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return ret;
-
mutex_lock(&ub->mutex);
if (!ublk_can_use_recovery(ub))
goto out_unlock;
@@ -1936,25 +2011,21 @@ static int ublk_ctrl_start_recovery(struct io_uring_cmd *cmd)
/* set to NULL, otherwise new ubq_daemon cannot mmap the io_cmd_buf */
ub->mm = NULL;
ub->nr_queues_ready = 0;
+ ub->nr_privileged_daemon = 0;
init_completion(&ub->completion);
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
- ublk_put_device(ub);
return ret;
}
-static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
+static int ublk_ctrl_end_recovery(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
int ublksrv_pid = (int)header->data[0];
- struct ublk_device *ub;
int ret = -EINVAL;
- ub = ublk_get_device_from_id(header->dev_id);
- if (!ub)
- return ret;
-
pr_devel("%s: Waiting for new ubq_daemons(nr: %d) are ready, dev id %d...\n",
__func__, ub->dev_info.nr_hw_queues, header->dev_id);
/* wait until new ubq_daemon sending all FETCH_REQ */
@@ -1982,7 +2053,115 @@ static int ublk_ctrl_end_recovery(struct io_uring_cmd *cmd)
ret = 0;
out_unlock:
mutex_unlock(&ub->mutex);
- ublk_put_device(ub);
+ return ret;
+}
+
+/*
+ * All control commands are sent via /dev/ublk-control, so we have to check
+ * the destination device's permission
+ */
+static int ublk_char_dev_permission(struct ublk_device *ub,
+ const char *dev_path, int mask)
+{
+ int err;
+ struct path path;
+ struct kstat stat;
+
+ err = kern_path(dev_path, LOOKUP_FOLLOW, &path);
+ if (err)
+ return err;
+
+ err = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+ if (err)
+ goto exit;
+
+ err = -EPERM;
+ if (stat.rdev != ub->cdev_dev.devt || !S_ISCHR(stat.mode))
+ goto exit;
+
+ err = inode_permission(&nop_mnt_idmap,
+ d_backing_inode(path.dentry), mask);
+exit:
+ path_put(&path);
+ return err;
+}
+
+static int ublk_ctrl_uring_cmd_permission(struct ublk_device *ub,
+ struct io_uring_cmd *cmd)
+{
+ struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ bool unprivileged = ub->dev_info.flags & UBLK_F_UNPRIVILEGED_DEV;
+ void __user *argp = (void __user *)(unsigned long)header->addr;
+ char *dev_path = NULL;
+ int ret = 0;
+ int mask;
+
+ if (!unprivileged) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ /*
+ * The new added command of UBLK_CMD_GET_DEV_INFO2 includes
+ * char_dev_path in payload too, since userspace may not
+ * know if the specified device is created as unprivileged
+ * mode.
+ */
+ if (cmd->cmd_op != UBLK_CMD_GET_DEV_INFO2)
+ return 0;
+ }
+
+ /*
+ * User has to provide the char device path for unprivileged ublk
+ *
+ * header->addr always points to the dev path buffer, and
+ * header->dev_path_len records length of dev path buffer.
+ */
+ if (!header->dev_path_len || header->dev_path_len > PATH_MAX)
+ return -EINVAL;
+
+ if (header->len < header->dev_path_len)
+ return -EINVAL;
+
+ dev_path = kmalloc(header->dev_path_len + 1, GFP_KERNEL);
+ if (!dev_path)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(dev_path, argp, header->dev_path_len))
+ goto exit;
+ dev_path[header->dev_path_len] = 0;
+
+ ret = -EINVAL;
+ switch (cmd->cmd_op) {
+ case UBLK_CMD_GET_DEV_INFO:
+ case UBLK_CMD_GET_DEV_INFO2:
+ case UBLK_CMD_GET_QUEUE_AFFINITY:
+ case UBLK_CMD_GET_PARAMS:
+ mask = MAY_READ;
+ break;
+ case UBLK_CMD_START_DEV:
+ case UBLK_CMD_STOP_DEV:
+ case UBLK_CMD_ADD_DEV:
+ case UBLK_CMD_DEL_DEV:
+ case UBLK_CMD_SET_PARAMS:
+ case UBLK_CMD_START_USER_RECOVERY:
+ case UBLK_CMD_END_USER_RECOVERY:
+ mask = MAY_READ | MAY_WRITE;
+ break;
+ default:
+ goto exit;
+ }
+
+ ret = ublk_char_dev_permission(ub, dev_path, mask);
+ if (!ret) {
+ header->len -= header->dev_path_len;
+ header->addr += header->dev_path_len;
+ }
+ pr_devel("%s: dev id %d cmd_op %x uid %d gid %d path %s ret %d\n",
+ __func__, ub->ub_number, cmd->cmd_op,
+ ub->dev_info.owner_uid, ub->dev_info.owner_gid,
+ dev_path, ret);
+exit:
+ kfree(dev_path);
return ret;
}
@@ -1990,6 +2169,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
+ struct ublk_device *ub = NULL;
int ret = -EINVAL;
if (issue_flags & IO_URING_F_NONBLOCK)
@@ -2000,45 +2180,61 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
if (!(issue_flags & IO_URING_F_SQE128))
goto out;
- ret = -EPERM;
- if (!capable(CAP_SYS_ADMIN))
- goto out;
+ if (cmd->cmd_op != UBLK_CMD_ADD_DEV) {
+ ret = -ENODEV;
+ ub = ublk_get_device_from_id(header->dev_id);
+ if (!ub)
+ goto out;
+
+ ret = ublk_ctrl_uring_cmd_permission(ub, cmd);
+ } else {
+ /* ADD_DEV permission check is done in command handler */
+ ret = 0;
+ }
+
+ if (ret)
+ goto put_dev;
- ret = -ENODEV;
switch (cmd->cmd_op) {
case UBLK_CMD_START_DEV:
- ret = ublk_ctrl_start_dev(cmd);
+ ret = ublk_ctrl_start_dev(ub, cmd);
break;
case UBLK_CMD_STOP_DEV:
- ret = ublk_ctrl_stop_dev(cmd);
+ ret = ublk_ctrl_stop_dev(ub);
break;
case UBLK_CMD_GET_DEV_INFO:
- ret = ublk_ctrl_get_dev_info(cmd);
+ case UBLK_CMD_GET_DEV_INFO2:
+ ret = ublk_ctrl_get_dev_info(ub, cmd);
break;
case UBLK_CMD_ADD_DEV:
ret = ublk_ctrl_add_dev(cmd);
break;
case UBLK_CMD_DEL_DEV:
- ret = ublk_ctrl_del_dev(header->dev_id);
+ ret = ublk_ctrl_del_dev(&ub);
break;
case UBLK_CMD_GET_QUEUE_AFFINITY:
- ret = ublk_ctrl_get_queue_affinity(cmd);
+ ret = ublk_ctrl_get_queue_affinity(ub, cmd);
break;
case UBLK_CMD_GET_PARAMS:
- ret = ublk_ctrl_get_params(cmd);
+ ret = ublk_ctrl_get_params(ub, cmd);
break;
case UBLK_CMD_SET_PARAMS:
- ret = ublk_ctrl_set_params(cmd);
+ ret = ublk_ctrl_set_params(ub, cmd);
break;
case UBLK_CMD_START_USER_RECOVERY:
- ret = ublk_ctrl_start_recovery(cmd);
+ ret = ublk_ctrl_start_recovery(ub, cmd);
break;
case UBLK_CMD_END_USER_RECOVERY:
- ret = ublk_ctrl_end_recovery(cmd);
+ ret = ublk_ctrl_end_recovery(ub, cmd);
break;
default:
+ ret = -ENOTSUPP;
break;
}
+
+ put_dev:
+ if (ub)
+ ublk_put_device(ub);
out:
io_uring_cmd_done(cmd, ret, 0);
pr_devel("%s: cmd done ret %d cmd_op %x, dev id %d qid %d\n",
@@ -2105,5 +2301,8 @@ static void __exit ublk_exit(void)
module_init(ublk_init);
module_exit(ublk_exit);
+module_param(ublks_max, int, 0444);
+MODULE_PARM_DESC(ublks_max, "max number of ublk devices allowed to add(default: 64)");
+
MODULE_AUTHOR("Ming Lei <ming.lei@redhat.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a77fa917428..dc6e9b989910 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -170,9 +170,7 @@ static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool un
WARN_ON_ONCE(n != segments);
- req->special_vec.bv_page = virt_to_page(range);
- req->special_vec.bv_offset = offset_in_page(range);
- req->special_vec.bv_len = sizeof(*range) * segments;
+ bvec_set_virt(&req->special_vec, range, sizeof(*range) * segments);
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
return 0;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e290d6d97047..aa490da3cef2 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -190,7 +190,7 @@ static inline bool valid_io_request(struct zram *zram,
end = start + (size >> SECTOR_SHIFT);
bound = zram->disksize >> SECTOR_SHIFT;
- /* out of range range */
+ /* out of range */
if (unlikely(start >= bound || end > bound || start > end))
return false;
@@ -703,9 +703,7 @@ static ssize_t writeback_store(struct device *dev,
for (; nr_pages != 0; index++, nr_pages--) {
struct bio_vec bvec;
- bvec.bv_page = page;
- bvec.bv_len = PAGE_SIZE;
- bvec.bv_offset = 0;
+ bvec_set_page(&bvec, page, PAGE_SIZE, 0);
spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
@@ -1140,7 +1138,7 @@ static ssize_t recomp_algorithm_store(struct device *dev,
while (*args) {
args = next_arg(args, &param, &val);
- if (!*val)
+ if (!val || !*val)
return -EINVAL;
if (!strcmp(param, "algo")) {
@@ -1380,12 +1378,9 @@ out:
static int zram_bvec_read_from_bdev(struct zram *zram, struct page *page,
u32 index, struct bio *bio, bool partial_io)
{
- struct bio_vec bvec = {
- .bv_page = page,
- .bv_len = PAGE_SIZE,
- .bv_offset = 0,
- };
+ struct bio_vec bvec;
+ bvec_set_page(&bvec, page, PAGE_SIZE, 0);
return read_from_bdev(zram, &bvec, zram_get_element(zram, index), bio,
partial_io);
}
@@ -1453,10 +1448,6 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
/* Slot should be unlocked before the function call */
zram_slot_unlock(zram, index);
- /* A null bio means rw_page was used, we must fallback to bio */
- if (!bio)
- return -EOPNOTSUPP;
-
ret = zram_bvec_read_from_bdev(zram, page, index, bio,
partial_io);
}
@@ -1652,9 +1643,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
memcpy_from_bvec(dst + offset, bvec);
kunmap_atomic(dst);
- vec.bv_page = page;
- vec.bv_len = PAGE_SIZE;
- vec.bv_offset = 0;
+ bvec_set_page(&vec, page, PAGE_SIZE, 0);
}
ret = __zram_bvec_write(zram, &vec, index, bio);
@@ -1824,7 +1813,7 @@ static ssize_t recompress_store(struct device *dev,
while (*args) {
args = next_arg(args, &param, &val);
- if (!*val)
+ if (!val || !*val)
return -EINVAL;
if (!strcmp(param, "type")) {
@@ -2081,61 +2070,6 @@ static void zram_slot_free_notify(struct block_device *bdev,
zram_slot_unlock(zram, index);
}
-static int zram_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, enum req_op op)
-{
- int offset, ret;
- u32 index;
- struct zram *zram;
- struct bio_vec bv;
- unsigned long start_time;
-
- if (PageTransHuge(page))
- return -ENOTSUPP;
- zram = bdev->bd_disk->private_data;
-
- if (!valid_io_request(zram, sector, PAGE_SIZE)) {
- atomic64_inc(&zram->stats.invalid_io);
- ret = -EINVAL;
- goto out;
- }
-
- index = sector >> SECTORS_PER_PAGE_SHIFT;
- offset = (sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
-
- bv.bv_page = page;
- bv.bv_len = PAGE_SIZE;
- bv.bv_offset = 0;
-
- start_time = bdev_start_io_acct(bdev->bd_disk->part0,
- SECTORS_PER_PAGE, op, jiffies);
- ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL);
- bdev_end_io_acct(bdev->bd_disk->part0, op, start_time);
-out:
- /*
- * If I/O fails, just return error(ie, non-zero) without
- * calling page_endio.
- * It causes resubmit the I/O with bio request by upper functions
- * of rw_page(e.g., swap_readpage, __swap_writepage) and
- * bio->bi_end_io does things to handle the error
- * (e.g., SetPageError, set_page_dirty and extra works).
- */
- if (unlikely(ret < 0))
- return ret;
-
- switch (ret) {
- case 0:
- page_endio(page, op_is_write(op), 0);
- break;
- case 1:
- ret = 0;
- break;
- default:
- WARN_ON(1);
- }
- return ret;
-}
-
static void zram_destroy_comps(struct zram *zram)
{
u32 prio;
@@ -2290,7 +2224,6 @@ static const struct block_device_operations zram_devops = {
.open = zram_open,
.submit_bio = zram_submit_bio,
.swap_slot_free_notify = zram_slot_free_notify,
- .rw_page = zram_rw_page,
.owner = THIS_MODULE
};
@@ -2385,10 +2318,11 @@ static int zram_add(void)
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
- /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
+ /* Actual capacity set using sysfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */
blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, zram->disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
/*
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index d4e2cb9a4eb4..bede8b005594 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/regmap.h>
+#include <linux/acpi.h>
#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
@@ -24,6 +25,9 @@
#define ECDSA_OFFSET 644
#define ECDSA_HEADER_LEN 320
+#define BTINTEL_PPAG_NAME "PPAG"
+#define BTINTEL_PPAG_PREFIX "\\_SB_.PCI0.XHCI.RHUB"
+
#define CMD_WRITE_BOOT_PARAMS 0xfc0e
struct cmd_write_boot_params {
__le32 boot_addr;
@@ -1278,6 +1282,63 @@ static int btintel_read_debug_features(struct hci_dev *hdev,
return 0;
}
+static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data,
+ void **ret)
+{
+ acpi_status status;
+ size_t len;
+ struct btintel_ppag *ppag = data;
+ union acpi_object *p, *elements;
+ struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct hci_dev *hdev = ppag->hdev;
+
+ status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string);
+ if (ACPI_FAILURE(status)) {
+ bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status));
+ return status;
+ }
+
+ if (strncmp(BTINTEL_PPAG_PREFIX, string.pointer,
+ strlen(BTINTEL_PPAG_PREFIX))) {
+ kfree(string.pointer);
+ return AE_OK;
+ }
+
+ len = strlen(string.pointer);
+ if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) {
+ kfree(string.pointer);
+ return AE_OK;
+ }
+ kfree(string.pointer);
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ bt_dev_warn(hdev, "ACPI Failure: %s", acpi_format_exception(status));
+ return status;
+ }
+
+ p = buffer.pointer;
+ ppag = (struct btintel_ppag *)data;
+
+ if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) {
+ kfree(buffer.pointer);
+ bt_dev_warn(hdev, "Invalid object type: %d or package count: %d",
+ p->type, p->package.count);
+ return AE_ERROR;
+ }
+
+ elements = p->package.elements;
+
+ /* PPAG table is located at element[1] */
+ p = &elements[1];
+
+ ppag->domain = (u32)p->package.elements[0].integer.value;
+ ppag->mode = (u32)p->package.elements[1].integer.value;
+ kfree(buffer.pointer);
+ return AE_CTRL_TERMINATE;
+}
+
static int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
@@ -2251,6 +2312,58 @@ error:
return err;
}
+static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver)
+{
+ acpi_status status;
+ struct btintel_ppag ppag;
+ struct sk_buff *skb;
+ struct btintel_loc_aware_reg ppag_cmd;
+
+ /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */
+ switch (ver->cnvr_top & 0xFFF) {
+ case 0x504: /* Hrp2 */
+ case 0x202: /* Jfp2 */
+ case 0x201: /* Jfp1 */
+ return;
+ }
+
+ memset(&ppag, 0, sizeof(ppag));
+
+ ppag.hdev = hdev;
+ status = acpi_walk_namespace(ACPI_TYPE_ANY, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, NULL,
+ btintel_ppag_callback, &ppag, NULL);
+
+ if (ACPI_FAILURE(status)) {
+ /* Do not log warning message if ACPI entry is not found */
+ if (status == AE_NOT_FOUND)
+ return;
+ bt_dev_warn(hdev, "PPAG: ACPI Failure: %s", acpi_format_exception(status));
+ return;
+ }
+
+ if (ppag.domain != 0x12) {
+ bt_dev_warn(hdev, "PPAG-BT Domain disabled");
+ return;
+ }
+
+ /* PPAG mode, BIT0 = 0 Disabled, BIT0 = 1 Enabled */
+ if (!(ppag.mode & BIT(0))) {
+ bt_dev_dbg(hdev, "PPAG disabled");
+ return;
+ }
+
+ ppag_cmd.mcc = cpu_to_le32(0);
+ ppag_cmd.sel = cpu_to_le32(0); /* 0 - Enable , 1 - Disable, 2 - Testing mode */
+ ppag_cmd.delta = cpu_to_le32(0);
+ skb = __hci_cmd_sync(hdev, 0xfe19, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb));
+ return;
+ }
+ kfree_skb(skb);
+}
+
static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
@@ -2297,6 +2410,9 @@ static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
+ /* Set PPAG feature */
+ btintel_set_ppag(hdev, ver);
+
/* Read the Intel version information after loading the FW */
err = btintel_read_version_tlv(hdev, &new_ver);
if (err)
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index e0060e58573c..8e7da877efae 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -137,6 +137,19 @@ struct intel_offload_use_cases {
__u8 preset[8];
} __packed;
+/* structure to store the PPAG data read from ACPI table */
+struct btintel_ppag {
+ u32 domain;
+ u32 mode;
+ struct hci_dev *hdev;
+};
+
+struct btintel_loc_aware_reg {
+ __le32 mcc;
+ __le32 sel;
+ __le32 delta;
+} __packed;
+
#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8))
#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16))
#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 2ad4efdd9e40..18bc94718711 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -64,6 +64,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_INTEL_BROKEN_SHUTDOWN_LED BIT(24)
#define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25)
#define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26)
+#define BTUSB_ACTIONS_SEMI BIT(27)
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -492,6 +493,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
.driver_info = BTUSB_IGNORE },
+ /* Realtek 8821CE Bluetooth devices */
+ { USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -566,6 +571,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -677,6 +685,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ /* Actions Semiconductor ATS2851 based devices */
+ { USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI },
+
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
@@ -4098,6 +4109,11 @@ static int btusb_probe(struct usb_interface *intf,
set_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags);
}
+ if (id->driver_info & BTUSB_ACTIONS_SEMI) {
+ /* Support is advertised, but not implemented */
+ set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks);
+ }
+
if (!reset)
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index bbe9cf1cae27..3df8c3606e93 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -128,13 +128,13 @@ struct qca_memdump_event_hdr {
__u8 evt;
__u8 plen;
__u16 opcode;
- __u16 seq_no;
+ __le16 seq_no;
__u8 reserved;
} __packed;
struct qca_dump_size {
- u32 dump_size;
+ __le32 dump_size;
} __packed;
struct qca_data {
@@ -1588,10 +1588,11 @@ static bool qca_wakeup(struct hci_dev *hdev)
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
- /* UART driver handles the interrupt from BT SoC.So we need to use
- * device handle of UART driver to get the status of device may wakeup.
+ /* BT SoC attached through the serial bus is handled by the serdev driver.
+ * So we need to use the device handle of the serdev driver to get the
+ * status of device may wakeup.
*/
- wakeup = device_may_wakeup(hu->serdev->ctrl->dev.parent);
+ wakeup = device_may_wakeup(&hu->serdev->ctrl->dev);
bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup);
return wakeup;
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 774f307844b4..36cb091a33b4 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -124,9 +124,9 @@ out:
/*
* fsl_mc_bus_uevent - callback invoked when a device is added
*/
-static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int fsl_mc_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ const struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
if (add_uevent_var(env, "MODALIAS=fsl-mc:v%08Xd%s",
mc_dev->obj_desc.vendor,
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
index 46981331b38f..354204b0ef3a 100644
--- a/drivers/bus/mhi/Makefile
+++ b/drivers/bus/mhi/Makefile
@@ -1,5 +1,5 @@
# Host MHI stack
-obj-y += host/
+obj-$(CONFIG_MHI_BUS) += host/
# Endpoint MHI stack
-obj-y += ep/
+obj-$(CONFIG_MHI_BUS_EP) += ep/
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 1dc8a3557a46..a6a48e515478 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -123,6 +123,13 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
int ret;
ch_id = MHI_TRE_GET_CMD_CHID(el);
+
+ /* Check if the channel is supported by the controller */
+ if ((ch_id >= mhi_cntrl->max_chan) || !mhi_cntrl->mhi_chan[ch_id].name) {
+ dev_err(dev, "Channel (%u) not supported!\n", ch_id);
+ return -ENODEV;
+ }
+
mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
@@ -196,9 +203,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
mhi_ep_mmio_disable_chdb(mhi_cntrl, ch_id);
/* Send channel disconnect status to client drivers */
- result.transaction_status = -ENOTCONN;
- result.bytes_xferd = 0;
- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
/* Set channel state to STOP */
mhi_chan->state = MHI_CH_STATE_STOP;
@@ -217,7 +226,7 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
mutex_unlock(&mhi_chan->lock);
break;
case MHI_PKT_TYPE_RESET_CHAN_CMD:
- dev_dbg(dev, "Received STOP command for channel (%u)\n", ch_id);
+ dev_dbg(dev, "Received RESET command for channel (%u)\n", ch_id);
if (!ch_ring->started) {
dev_err(dev, "Channel (%u) not opened\n", ch_id);
return -ENODEV;
@@ -228,9 +237,11 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele
mhi_ep_ring_reset(mhi_cntrl, ch_ring);
/* Send channel disconnect status to client driver */
- result.transaction_status = -ENOTCONN;
- result.bytes_xferd = 0;
- mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
/* Set channel state to DISABLED */
mhi_chan->state = MHI_CH_STATE_DISABLED;
@@ -719,24 +730,37 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work)
list_del(&itr->node);
ring = itr->ring;
+ chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ mutex_lock(&chan->lock);
+
+ /*
+ * The ring could've stopped while we waited to grab the (chan->lock), so do
+ * a sanity check before going further.
+ */
+ if (!ring->started) {
+ mutex_unlock(&chan->lock);
+ kfree(itr);
+ continue;
+ }
+
/* Update the write offset for the ring */
ret = mhi_ep_update_wr_offset(ring);
if (ret) {
dev_err(dev, "Error updating write offset for ring\n");
+ mutex_unlock(&chan->lock);
kfree(itr);
continue;
}
/* Sanity check to make sure there are elements in the ring */
if (ring->rd_offset == ring->wr_offset) {
+ mutex_unlock(&chan->lock);
kfree(itr);
continue;
}
el = &ring->ring_cache[ring->rd_offset];
- chan = &mhi_cntrl->mhi_chan[ring->ch_id];
- mutex_lock(&chan->lock);
dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id);
ret = mhi_ep_process_ch_ring(ring, el);
if (ret) {
@@ -973,44 +997,25 @@ static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
static void mhi_ep_reset_worker(struct work_struct *work)
{
struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
enum mhi_state cur_state;
- int ret;
- mhi_ep_abort_transfer(mhi_cntrl);
+ mhi_ep_power_down(mhi_cntrl);
+
+ mutex_lock(&mhi_cntrl->state_lock);
- spin_lock_bh(&mhi_cntrl->state_lock);
/* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
mhi_ep_mmio_reset(mhi_cntrl);
cur_state = mhi_cntrl->mhi_state;
- spin_unlock_bh(&mhi_cntrl->state_lock);
/*
* Only proceed further if the reset is due to SYS_ERR. The host will
* issue reset during shutdown also and we don't need to do re-init in
* that case.
*/
- if (cur_state == MHI_STATE_SYS_ERR) {
- mhi_ep_mmio_init(mhi_cntrl);
-
- /* Set AMSS EE before signaling ready state */
- mhi_ep_mmio_set_env(mhi_cntrl, MHI_EE_AMSS);
-
- /* All set, notify the host that we are ready */
- ret = mhi_ep_set_ready_state(mhi_cntrl);
- if (ret)
- return;
-
- dev_dbg(dev, "READY state notification sent to the host\n");
-
- ret = mhi_ep_enable(mhi_cntrl);
- if (ret) {
- dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
- return;
- }
+ if (cur_state == MHI_STATE_SYS_ERR)
+ mhi_ep_power_up(mhi_cntrl);
- enable_irq(mhi_cntrl->irq);
- }
+ mutex_unlock(&mhi_cntrl->state_lock);
}
/*
@@ -1089,11 +1094,11 @@ EXPORT_SYMBOL_GPL(mhi_ep_power_up);
void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
{
- if (mhi_cntrl->enabled)
+ if (mhi_cntrl->enabled) {
mhi_ep_abort_transfer(mhi_cntrl);
-
- kfree(mhi_cntrl->mhi_event);
- disable_irq(mhi_cntrl->irq);
+ kfree(mhi_cntrl->mhi_event);
+ disable_irq(mhi_cntrl->irq);
+ }
}
EXPORT_SYMBOL_GPL(mhi_ep_power_down);
@@ -1119,6 +1124,7 @@ void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
/* Set channel state to SUSPENDED */
+ mhi_chan->state = MHI_CH_STATE_SUSPENDED;
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED);
mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
@@ -1148,6 +1154,7 @@ void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
/* Set channel state to RUNNING */
+ mhi_chan->state = MHI_CH_STATE_RUNNING;
tmp &= ~CHAN_CTX_CHSTATE_MASK;
tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING);
mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp);
@@ -1381,8 +1388,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
- spin_lock_init(&mhi_cntrl->state_lock);
spin_lock_init(&mhi_cntrl->list_lock);
+ mutex_init(&mhi_cntrl->state_lock);
mutex_init(&mhi_cntrl->event_lock);
/* Set MHI version and AMSS EE before enumeration */
@@ -1543,9 +1550,9 @@ void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
}
EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
-static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int mhi_ep_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ const struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
mhi_dev->name);
diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
index 3655c19e23c7..fd200b2ac0bb 100644
--- a/drivers/bus/mhi/ep/sm.c
+++ b/drivers/bus/mhi/ep/sm.c
@@ -63,24 +63,23 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
int ret;
/* If MHI is in M3, resume suspended channels */
- spin_lock_bh(&mhi_cntrl->state_lock);
+ mutex_lock(&mhi_cntrl->state_lock);
+
old_state = mhi_cntrl->mhi_state;
if (old_state == MHI_STATE_M3)
mhi_ep_resume_channels(mhi_cntrl);
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
- spin_unlock_bh(&mhi_cntrl->state_lock);
-
if (ret) {
mhi_ep_handle_syserr(mhi_cntrl);
- return ret;
+ goto err_unlock;
}
/* Signal host that the device moved to M0 */
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
if (ret) {
dev_err(dev, "Failed sending M0 state change event\n");
- return ret;
+ goto err_unlock;
}
if (old_state == MHI_STATE_READY) {
@@ -88,11 +87,14 @@ int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EE_AMSS);
if (ret) {
dev_err(dev, "Failed sending AMSS EE event\n");
- return ret;
+ goto err_unlock;
}
}
- return 0;
+err_unlock:
+ mutex_unlock(&mhi_cntrl->state_lock);
+
+ return ret;
}
int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
@@ -100,13 +102,12 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
struct device *dev = &mhi_cntrl->mhi_dev->dev;
int ret;
- spin_lock_bh(&mhi_cntrl->state_lock);
- ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
- spin_unlock_bh(&mhi_cntrl->state_lock);
+ mutex_lock(&mhi_cntrl->state_lock);
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
if (ret) {
mhi_ep_handle_syserr(mhi_cntrl);
- return ret;
+ goto err_unlock;
}
mhi_ep_suspend_channels(mhi_cntrl);
@@ -115,10 +116,13 @@ int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
if (ret) {
dev_err(dev, "Failed sending M3 state change event\n");
- return ret;
+ goto err_unlock;
}
- return 0;
+err_unlock:
+ mutex_unlock(&mhi_cntrl->state_lock);
+
+ return ret;
}
int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
@@ -127,22 +131,24 @@ int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
enum mhi_state mhi_state;
int ret, is_ready;
- spin_lock_bh(&mhi_cntrl->state_lock);
+ mutex_lock(&mhi_cntrl->state_lock);
+
/* Ensure that the MHISTATUS is set to RESET by host */
mhi_state = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_MHISTATE_MASK);
is_ready = mhi_ep_mmio_masked_read(mhi_cntrl, EP_MHISTATUS, MHISTATUS_READY_MASK);
if (mhi_state != MHI_STATE_RESET || is_ready) {
dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
- spin_unlock_bh(&mhi_cntrl->state_lock);
- return -EIO;
+ ret = -EIO;
+ goto err_unlock;
}
ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
- spin_unlock_bh(&mhi_cntrl->state_lock);
-
if (ret)
mhi_ep_handle_syserr(mhi_cntrl);
+err_unlock:
+ mutex_unlock(&mhi_cntrl->state_lock);
+
return ret;
}
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index bf672de35131..3d779ee6396d 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -1395,9 +1395,9 @@ void mhi_driver_unregister(struct mhi_driver *mhi_drv)
}
EXPORT_SYMBOL_GPL(mhi_driver_unregister);
-static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int mhi_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mhi_device *mhi_dev = to_mhi_device(dev);
+ const struct mhi_device *mhi_dev = to_mhi_device(dev);
return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
mhi_dev->name);
@@ -1449,4 +1449,4 @@ postcore_initcall(mhi_init);
module_exit(mhi_exit);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MHI Host Interface");
+MODULE_DESCRIPTION("Modem Host Interface");
diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
index fca0d0669aa9..554e1992edd4 100644
--- a/drivers/bus/mips_cdmm.c
+++ b/drivers/bus/mips_cdmm.c
@@ -67,9 +67,9 @@ static int mips_cdmm_match(struct device *dev, struct device_driver *drv)
return mips_cdmm_lookup(cdrv->id_table, cdev) != NULL;
}
-static int mips_cdmm_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int mips_cdmm_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
+ const struct mips_cdmm_device *cdev = to_mips_cdmm_device(dev);
int retval = 0;
retval = add_uevent_var(env, "CDMM_CPU=%u", cdev->cpu);
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 6b8d6257ed8a..7afe1947e1c0 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -8,17 +8,24 @@
* for more details.
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+struct simple_pm_bus {
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
static int simple_pm_bus_probe(struct platform_device *pdev)
{
const struct device *dev = &pdev->dev;
const struct of_dev_auxdata *lookup = dev_get_platdata(dev);
struct device_node *np = dev->of_node;
const struct of_device_id *match;
+ struct simple_pm_bus *bus;
/*
* Allow user to use driver_override to bind this driver to a
@@ -44,6 +51,16 @@ static int simple_pm_bus_probe(struct platform_device *pdev)
return -ENODEV;
}
+ bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->num_clks = devm_clk_bulk_get_all(&pdev->dev, &bus->clks);
+ if (bus->num_clks < 0)
+ return dev_err_probe(&pdev->dev, bus->num_clks, "failed to get clocks\n");
+
+ dev_set_drvdata(&pdev->dev, bus);
+
dev_dbg(&pdev->dev, "%s\n", __func__);
pm_runtime_enable(&pdev->dev);
@@ -67,6 +84,34 @@ static int simple_pm_bus_remove(struct platform_device *pdev)
return 0;
}
+static int simple_pm_bus_runtime_suspend(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(bus->num_clks, bus->clks);
+
+ return 0;
+}
+
+static int simple_pm_bus_runtime_resume(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_bulk_prepare_enable(bus->num_clks, bus->clks);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops simple_pm_bus_pm_ops = {
+ RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+};
+
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
static const struct of_device_id simple_pm_bus_of_match[] = {
@@ -85,6 +130,7 @@ static struct platform_driver simple_pm_bus_driver = {
.driver = {
.name = "simple-pm-bus",
.of_match_table = simple_pm_bus_of_match,
+ .pm = pm_ptr(&simple_pm_bus_pm_ops),
},
};
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 3aa91aed3bf7..696c0aefb0ca 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -172,12 +172,17 @@ static void sunxi_rsb_device_remove(struct device *dev)
drv->remove(to_sunxi_rsb_device(dev));
}
+static int sunxi_rsb_device_modalias(const struct device *dev, struct kobj_uevent_env *env)
+{
+ return of_device_uevent_modalias(dev, env);
+}
+
static struct bus_type sunxi_rsb_bus = {
.name = RSB_CTRL_NAME,
.match = sunxi_rsb_device_match,
.probe = sunxi_rsb_device_probe,
.remove = sunxi_rsb_device_remove,
- .uevent = of_device_uevent_modalias,
+ .uevent = sunxi_rsb_device_modalias,
};
static void sunxi_rsb_dev_release(struct device *dev)
@@ -857,7 +862,13 @@ static int __init sunxi_rsb_init(void)
return ret;
}
- return platform_driver_register(&sunxi_rsb_driver);
+ ret = platform_driver_register(&sunxi_rsb_driver);
+ if (ret) {
+ bus_unregister(&sunxi_rsb_bus);
+ return ret;
+ }
+
+ return 0;
}
module_init(sunxi_rsb_init);
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index bb09d64cd51e..8771dcc9b8e2 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -236,6 +236,12 @@ void agp3_generic_tlbflush(struct agp_memory *mem);
int agp3_generic_configure(void);
void agp3_generic_cleanup(void);
+/* GATT allocation. Returns/accepts GATT kernel virtual address. */
+#define alloc_gatt_pages(order) \
+ ((char *)__get_free_pages(GFP_KERNEL, (order)))
+#define free_gatt_pages(table, order) \
+ free_pages((unsigned long)(table), (order))
+
/* aperture sizes have been standardised since v3 */
#define AGP_GENERIC_SIZES_ENTRIES 11
extern const struct aper_size_info_16 agp3_generic_sizes[];
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index 36203d3fa6ea..69314532f38c 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -197,8 +197,10 @@ static int __init applicom_init(void)
if (!pci_match_id(applicom_pci_tbl, dev))
continue;
- if (pci_enable_device(dev))
+ if (pci_enable_device(dev)) {
+ pci_dev_put(dev);
return -EIO;
+ }
RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO);
@@ -207,6 +209,7 @@ static int __init applicom_init(void)
"space at 0x%llx\n",
(unsigned long long)pci_resource_start(dev, 0));
pci_disable_device(dev);
+ pci_dev_put(dev);
return -EIO;
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 3da8e85f8aae..4fdf07ae3c54 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -549,6 +549,16 @@ config HW_RANDOM_CN10K
To compile this driver as a module, choose M here.
The module will be called cn10k_rng. If unsure, say Y.
+config HW_RANDOM_JH7110
+ tristate "StarFive JH7110 Random Number Generator support"
+ depends on SOC_STARFIVE || COMPILE_TEST
+ help
+ This driver provides support for the True Random Number
+ Generator in StarFive JH7110 SoCs.
+
+ To compile this driver as a module, choose M here.
+ The module will be called jh7110-trng.
+
endif # HW_RANDOM
config UML_RANDOM
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 3e948cf04476..09bde4a0f971 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -47,3 +47,4 @@ obj-$(CONFIG_HW_RANDOM_XIPHERA) += xiphera-trng.o
obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o
+obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o
diff --git a/drivers/char/hw_random/jh7110-trng.c b/drivers/char/hw_random/jh7110-trng.c
new file mode 100644
index 000000000000..38474d48a25e
--- /dev/null
+++ b/drivers/char/hw_random/jh7110-trng.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TRNG driver for the StarFive JH7110 SoC
+ *
+ * Copyright (C) 2022 StarFive Technology Co.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/hw_random.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/random.h>
+#include <linux/reset.h>
+
+/* trng register offset */
+#define STARFIVE_CTRL 0x00
+#define STARFIVE_STAT 0x04
+#define STARFIVE_MODE 0x08
+#define STARFIVE_SMODE 0x0C
+#define STARFIVE_IE 0x10
+#define STARFIVE_ISTAT 0x14
+#define STARFIVE_RAND0 0x20
+#define STARFIVE_RAND1 0x24
+#define STARFIVE_RAND2 0x28
+#define STARFIVE_RAND3 0x2C
+#define STARFIVE_RAND4 0x30
+#define STARFIVE_RAND5 0x34
+#define STARFIVE_RAND6 0x38
+#define STARFIVE_RAND7 0x3C
+#define STARFIVE_AUTO_RQSTS 0x60
+#define STARFIVE_AUTO_AGE 0x64
+
+/* CTRL CMD */
+#define STARFIVE_CTRL_EXEC_NOP 0x0
+#define STARFIVE_CTRL_GENE_RANDNUM 0x1
+#define STARFIVE_CTRL_EXEC_RANDRESEED 0x2
+
+/* STAT */
+#define STARFIVE_STAT_NONCE_MODE BIT(2)
+#define STARFIVE_STAT_R256 BIT(3)
+#define STARFIVE_STAT_MISSION_MODE BIT(8)
+#define STARFIVE_STAT_SEEDED BIT(9)
+#define STARFIVE_STAT_LAST_RESEED(x) ((x) << 16)
+#define STARFIVE_STAT_SRVC_RQST BIT(27)
+#define STARFIVE_STAT_RAND_GENERATING BIT(30)
+#define STARFIVE_STAT_RAND_SEEDING BIT(31)
+
+/* MODE */
+#define STARFIVE_MODE_R256 BIT(3)
+
+/* SMODE */
+#define STARFIVE_SMODE_NONCE_MODE BIT(2)
+#define STARFIVE_SMODE_MISSION_MODE BIT(8)
+#define STARFIVE_SMODE_MAX_REJECTS(x) ((x) << 16)
+
+/* IE */
+#define STARFIVE_IE_RAND_RDY_EN BIT(0)
+#define STARFIVE_IE_SEED_DONE_EN BIT(1)
+#define STARFIVE_IE_LFSR_LOCKUP_EN BIT(4)
+#define STARFIVE_IE_GLBL_EN BIT(31)
+
+#define STARFIVE_IE_ALL (STARFIVE_IE_GLBL_EN | \
+ STARFIVE_IE_RAND_RDY_EN | \
+ STARFIVE_IE_SEED_DONE_EN | \
+ STARFIVE_IE_LFSR_LOCKUP_EN)
+
+/* ISTAT */
+#define STARFIVE_ISTAT_RAND_RDY BIT(0)
+#define STARFIVE_ISTAT_SEED_DONE BIT(1)
+#define STARFIVE_ISTAT_LFSR_LOCKUP BIT(4)
+
+#define STARFIVE_RAND_LEN sizeof(u32)
+
+#define to_trng(p) container_of(p, struct starfive_trng, rng)
+
+enum reseed {
+ RANDOM_RESEED,
+ NONCE_RESEED,
+};
+
+enum mode {
+ PRNG_128BIT,
+ PRNG_256BIT,
+};
+
+struct starfive_trng {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *hclk;
+ struct clk *ahb;
+ struct reset_control *rst;
+ struct hwrng rng;
+ struct completion random_done;
+ struct completion reseed_done;
+ u32 mode;
+ u32 mission;
+ u32 reseed;
+ /* protects against concurrent write to ctrl register */
+ spinlock_t write_lock;
+};
+
+static u16 autoreq;
+module_param(autoreq, ushort, 0);
+MODULE_PARM_DESC(autoreq, "Auto-reseeding after random number requests by host reaches specified counter:\n"
+ " 0 - disable counter\n"
+ " other - reload value for internal counter");
+
+static u16 autoage;
+module_param(autoage, ushort, 0);
+MODULE_PARM_DESC(autoage, "Auto-reseeding after specified timer countdowns to 0:\n"
+ " 0 - disable timer\n"
+ " other - reload value for internal timer");
+
+static inline int starfive_trng_wait_idle(struct starfive_trng *trng)
+{
+ u32 stat;
+
+ return readl_relaxed_poll_timeout(trng->base + STARFIVE_STAT, stat,
+ !(stat & (STARFIVE_STAT_RAND_GENERATING |
+ STARFIVE_STAT_RAND_SEEDING)),
+ 10, 100000);
+}
+
+static inline void starfive_trng_irq_mask_clear(struct starfive_trng *trng)
+{
+ /* clear register: ISTAT */
+ u32 data = readl(trng->base + STARFIVE_ISTAT);
+
+ writel(data, trng->base + STARFIVE_ISTAT);
+}
+
+static int starfive_trng_cmd(struct starfive_trng *trng, u32 cmd, bool wait)
+{
+ int wait_time = 1000;
+
+ /* allow up to 40 us for wait == 0 */
+ if (!wait)
+ wait_time = 40;
+
+ switch (cmd) {
+ case STARFIVE_CTRL_GENE_RANDNUM:
+ reinit_completion(&trng->random_done);
+ spin_lock_irq(&trng->write_lock);
+ writel(cmd, trng->base + STARFIVE_CTRL);
+ spin_unlock_irq(&trng->write_lock);
+ if (!wait_for_completion_timeout(&trng->random_done, usecs_to_jiffies(wait_time)))
+ return -ETIMEDOUT;
+ break;
+ case STARFIVE_CTRL_EXEC_RANDRESEED:
+ reinit_completion(&trng->reseed_done);
+ spin_lock_irq(&trng->write_lock);
+ writel(cmd, trng->base + STARFIVE_CTRL);
+ spin_unlock_irq(&trng->write_lock);
+ if (!wait_for_completion_timeout(&trng->reseed_done, usecs_to_jiffies(wait_time)))
+ return -ETIMEDOUT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int starfive_trng_init(struct hwrng *rng)
+{
+ struct starfive_trng *trng = to_trng(rng);
+ u32 mode, intr = 0;
+
+ /* setup Auto Request/Age register */
+ writel(autoage, trng->base + STARFIVE_AUTO_AGE);
+ writel(autoreq, trng->base + STARFIVE_AUTO_RQSTS);
+
+ /* clear register: ISTAT */
+ starfive_trng_irq_mask_clear(trng);
+
+ intr |= STARFIVE_IE_ALL;
+ writel(intr, trng->base + STARFIVE_IE);
+
+ mode = readl(trng->base + STARFIVE_MODE);
+
+ switch (trng->mode) {
+ case PRNG_128BIT:
+ mode &= ~STARFIVE_MODE_R256;
+ break;
+ case PRNG_256BIT:
+ mode |= STARFIVE_MODE_R256;
+ break;
+ default:
+ mode |= STARFIVE_MODE_R256;
+ break;
+ }
+
+ writel(mode, trng->base + STARFIVE_MODE);
+
+ return starfive_trng_cmd(trng, STARFIVE_CTRL_EXEC_RANDRESEED, 1);
+}
+
+static irqreturn_t starfive_trng_irq(int irq, void *priv)
+{
+ u32 status;
+ struct starfive_trng *trng = (struct starfive_trng *)priv;
+
+ status = readl(trng->base + STARFIVE_ISTAT);
+ if (status & STARFIVE_ISTAT_RAND_RDY) {
+ writel(STARFIVE_ISTAT_RAND_RDY, trng->base + STARFIVE_ISTAT);
+ complete(&trng->random_done);
+ }
+
+ if (status & STARFIVE_ISTAT_SEED_DONE) {
+ writel(STARFIVE_ISTAT_SEED_DONE, trng->base + STARFIVE_ISTAT);
+ complete(&trng->reseed_done);
+ }
+
+ if (status & STARFIVE_ISTAT_LFSR_LOCKUP) {
+ writel(STARFIVE_ISTAT_LFSR_LOCKUP, trng->base + STARFIVE_ISTAT);
+ /* SEU occurred, reseeding required*/
+ spin_lock(&trng->write_lock);
+ writel(STARFIVE_CTRL_EXEC_RANDRESEED, trng->base + STARFIVE_CTRL);
+ spin_unlock(&trng->write_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void starfive_trng_cleanup(struct hwrng *rng)
+{
+ struct starfive_trng *trng = to_trng(rng);
+
+ writel(0, trng->base + STARFIVE_CTRL);
+
+ reset_control_assert(trng->rst);
+ clk_disable_unprepare(trng->hclk);
+ clk_disable_unprepare(trng->ahb);
+}
+
+static int starfive_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+{
+ struct starfive_trng *trng = to_trng(rng);
+ int ret;
+
+ pm_runtime_get_sync(trng->dev);
+
+ if (trng->mode == PRNG_256BIT)
+ max = min_t(size_t, max, (STARFIVE_RAND_LEN * 8));
+ else
+ max = min_t(size_t, max, (STARFIVE_RAND_LEN * 4));
+
+ if (wait) {
+ ret = starfive_trng_wait_idle(trng);
+ if (ret)
+ return -ETIMEDOUT;
+ }
+
+ ret = starfive_trng_cmd(trng, STARFIVE_CTRL_GENE_RANDNUM, wait);
+ if (ret)
+ return ret;
+
+ memcpy_fromio(buf, trng->base + STARFIVE_RAND0, max);
+
+ pm_runtime_put_sync_autosuspend(trng->dev);
+
+ return max;
+}
+
+static int starfive_trng_probe(struct platform_device *pdev)
+{
+ int ret;
+ int irq;
+ struct starfive_trng *trng;
+
+ trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL);
+ if (!trng)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, trng);
+ trng->dev = &pdev->dev;
+
+ trng->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(trng->base))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->base),
+ "Error remapping memory for platform device.\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ init_completion(&trng->random_done);
+ init_completion(&trng->reseed_done);
+ spin_lock_init(&trng->write_lock);
+
+ ret = devm_request_irq(&pdev->dev, irq, starfive_trng_irq, 0, pdev->name,
+ (void *)trng);
+ if (ret)
+ return dev_err_probe(&pdev->dev, irq,
+ "Failed to register interrupt handler\n");
+
+ trng->hclk = devm_clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(trng->hclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->hclk),
+ "Error getting hardware reference clock\n");
+
+ trng->ahb = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(trng->ahb))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->ahb),
+ "Error getting ahb reference clock\n");
+
+ trng->rst = devm_reset_control_get_shared(&pdev->dev, NULL);
+ if (IS_ERR(trng->rst))
+ return dev_err_probe(&pdev->dev, PTR_ERR(trng->rst),
+ "Error getting hardware reset line\n");
+
+ clk_prepare_enable(trng->hclk);
+ clk_prepare_enable(trng->ahb);
+ reset_control_deassert(trng->rst);
+
+ trng->rng.name = dev_driver_string(&pdev->dev);
+ trng->rng.init = starfive_trng_init;
+ trng->rng.cleanup = starfive_trng_cleanup;
+ trng->rng.read = starfive_trng_read;
+
+ trng->mode = PRNG_256BIT;
+ trng->mission = 1;
+ trng->reseed = RANDOM_RESEED;
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 100);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_hwrng_register(&pdev->dev, &trng->rng);
+ if (ret) {
+ pm_runtime_disable(&pdev->dev);
+
+ reset_control_assert(trng->rst);
+ clk_disable_unprepare(trng->ahb);
+ clk_disable_unprepare(trng->hclk);
+
+ return dev_err_probe(&pdev->dev, ret, "Failed to register hwrng\n");
+ }
+
+ return 0;
+}
+
+static int __maybe_unused starfive_trng_suspend(struct device *dev)
+{
+ struct starfive_trng *trng = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(trng->hclk);
+ clk_disable_unprepare(trng->ahb);
+
+ return 0;
+}
+
+static int __maybe_unused starfive_trng_resume(struct device *dev)
+{
+ struct starfive_trng *trng = dev_get_drvdata(dev);
+
+ clk_prepare_enable(trng->hclk);
+ clk_prepare_enable(trng->ahb);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(starfive_trng_pm_ops, starfive_trng_suspend,
+ starfive_trng_resume);
+
+static const struct of_device_id trng_dt_ids[] __maybe_unused = {
+ { .compatible = "starfive,jh7110-trng" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, trng_dt_ids);
+
+static struct platform_driver starfive_trng_driver = {
+ .probe = starfive_trng_probe,
+ .driver = {
+ .name = "jh7110-trng",
+ .pm = &starfive_trng_pm_ops,
+ .of_match_table = of_match_ptr(trng_dt_ids),
+ },
+};
+
+module_platform_driver(starfive_trng_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("StarFive True Random Number Generator");
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
index 7c1aee5e11b7..3f1c9f1573e7 100644
--- a/drivers/char/ipmi/ipmi_ipmb.c
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -27,7 +27,7 @@ MODULE_PARM_DESC(bmcaddr, "Address to use for BMC.");
static unsigned int retry_time_ms = 250;
module_param(retry_time_ms, uint, 0644);
-MODULE_PARM_DESC(max_retries, "Timeout time between retries, in milliseconds.");
+MODULE_PARM_DESC(retry_time_ms, "Timeout time between retries, in milliseconds.");
static unsigned int max_retries = 1;
module_param(max_retries, uint, 0644);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 4bfd1e306616..a5ddebb1edea 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -74,7 +74,8 @@
/*
* Timer values
*/
-#define SSIF_MSG_USEC 60000 /* 60ms between message tries. */
+#define SSIF_MSG_USEC 60000 /* 60ms between message tries (T3). */
+#define SSIF_REQ_RETRY_USEC 60000 /* 60ms between send retries (T6). */
#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
/* How many times to we retry sending/receiving the message. */
@@ -82,7 +83,9 @@
#define SSIF_RECV_RETRIES 250
#define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
+#define SSIF_REQ_RETRY_MSEC (SSIF_REQ_RETRY_USEC / 1000)
#define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
+#define SSIF_REQ_RETRY_JIFFIES ((SSIF_REQ_RETRY_USEC * 1000) / TICK_NSEC)
#define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
/*
@@ -92,7 +95,7 @@
#define SSIF_WATCH_WATCHDOG_TIMEOUT msecs_to_jiffies(250)
enum ssif_intf_state {
- SSIF_NORMAL,
+ SSIF_IDLE,
SSIF_GETTING_FLAGS,
SSIF_GETTING_EVENTS,
SSIF_CLEARING_FLAGS,
@@ -100,8 +103,8 @@ enum ssif_intf_state {
/* FIXME - add watchdog stuff. */
};
-#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \
- && (ssif)->curr_msg == NULL)
+#define IS_SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_IDLE \
+ && (ssif)->curr_msg == NULL)
/*
* Indexes into stats[] in ssif_info below.
@@ -229,6 +232,9 @@ struct ssif_info {
bool got_alert;
bool waiting_alert;
+ /* Used to inform the timeout that it should do a resend. */
+ bool do_resend;
+
/*
* If set to true, this will request events the next time the
* state machine is idle.
@@ -241,12 +247,6 @@ struct ssif_info {
*/
bool req_flags;
- /*
- * Used to perform timer operations when run-to-completion
- * mode is on. This is a countdown timer.
- */
- int rtc_us_timer;
-
/* Used for sending/receiving data. +1 for the length. */
unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
unsigned int data_len;
@@ -348,9 +348,9 @@ static void return_hosed_msg(struct ssif_info *ssif_info,
/*
* Must be called with the message lock held. This will release the
- * message lock. Note that the caller will check SSIF_IDLE and start a
- * new operation, so there is no need to check for new messages to
- * start in here.
+ * message lock. Note that the caller will check IS_SSIF_IDLE and
+ * start a new operation, so there is no need to check for new
+ * messages to start in here.
*/
static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
{
@@ -367,7 +367,7 @@ static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
if (start_send(ssif_info, msg, 3) != 0) {
/* Error, just go to normal state. */
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
}
}
@@ -382,7 +382,7 @@ static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
mb[1] = IPMI_GET_MSG_FLAGS_CMD;
if (start_send(ssif_info, mb, 2) != 0)
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
}
static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
@@ -393,7 +393,7 @@ static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
ssif_info->curr_msg = NULL;
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
ipmi_free_smi_msg(msg);
}
@@ -407,7 +407,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
msg = ipmi_alloc_smi_msg();
if (!msg) {
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -430,7 +430,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
msg = ipmi_alloc_smi_msg();
if (!msg) {
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -448,9 +448,9 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info,
/*
* Must be called with the message lock held. This will release the
- * message lock. Note that the caller will check SSIF_IDLE and start a
- * new operation, so there is no need to check for new messages to
- * start in here.
+ * message lock. Note that the caller will check IS_SSIF_IDLE and
+ * start a new operation, so there is no need to check for new
+ * messages to start in here.
*/
static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
{
@@ -466,7 +466,7 @@ static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
/* Events available. */
start_event_fetch(ssif_info, flags);
else {
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
}
}
@@ -530,7 +530,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
static void start_get(struct ssif_info *ssif_info)
{
- ssif_info->rtc_us_timer = 0;
ssif_info->multi_pos = 0;
ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
@@ -538,22 +537,28 @@ static void start_get(struct ssif_info *ssif_info)
ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
}
+static void start_resend(struct ssif_info *ssif_info);
+
static void retry_timeout(struct timer_list *t)
{
struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
unsigned long oflags, *flags;
- bool waiting;
+ bool waiting, resend;
if (ssif_info->stopping)
return;
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ resend = ssif_info->do_resend;
+ ssif_info->do_resend = false;
waiting = ssif_info->waiting_alert;
ssif_info->waiting_alert = false;
ipmi_ssif_unlock_cond(ssif_info, flags);
if (waiting)
start_get(ssif_info);
+ if (resend)
+ start_resend(ssif_info);
}
static void watch_timeout(struct timer_list *t)
@@ -568,7 +573,7 @@ static void watch_timeout(struct timer_list *t)
if (ssif_info->watch_timeout) {
mod_timer(&ssif_info->watch_timer,
jiffies + ssif_info->watch_timeout);
- if (SSIF_IDLE(ssif_info)) {
+ if (IS_SSIF_IDLE(ssif_info)) {
start_flag_fetch(ssif_info, flags); /* Releases lock */
return;
}
@@ -602,8 +607,6 @@ static void ssif_alert(struct i2c_client *client, enum i2c_alert_protocol type,
start_get(ssif_info);
}
-static int start_resend(struct ssif_info *ssif_info);
-
static void msg_done_handler(struct ssif_info *ssif_info, int result,
unsigned char *data, unsigned int len)
{
@@ -622,7 +625,6 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
ssif_info->waiting_alert = true;
- ssif_info->rtc_us_timer = SSIF_MSG_USEC;
if (!ssif_info->stopping)
mod_timer(&ssif_info->retry_timer,
jiffies + SSIF_MSG_JIFFIES);
@@ -756,7 +758,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
switch (ssif_info->ssif_state) {
- case SSIF_NORMAL:
+ case SSIF_IDLE:
ipmi_ssif_unlock_cond(ssif_info, flags);
if (!msg)
break;
@@ -774,7 +776,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
* Error fetching flags, or invalid length,
* just give up for now.
*/
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
dev_warn(&ssif_info->client->dev,
"Error getting flags: %d %d, %x\n",
@@ -809,7 +811,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
"Invalid response clearing flags: %x %x\n",
data[0], data[1]);
}
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
ipmi_ssif_unlock_cond(ssif_info, flags);
break;
@@ -887,7 +889,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
- if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
+ if (IS_SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
if (ssif_info->req_events)
start_event_fetch(ssif_info, flags);
else if (ssif_info->req_flags)
@@ -909,31 +911,23 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
if (result < 0) {
ssif_info->retries_left--;
if (ssif_info->retries_left > 0) {
- if (!start_resend(ssif_info)) {
- ssif_inc_stat(ssif_info, send_retries);
- return;
- }
- /* request failed, just return the error. */
- ssif_inc_stat(ssif_info, send_errors);
-
- if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
- dev_dbg(&ssif_info->client->dev,
- "%s: Out of retries\n", __func__);
- msg_done_handler(ssif_info, -EIO, NULL, 0);
+ /*
+ * Wait the retry timeout time per the spec,
+ * then redo the send.
+ */
+ ssif_info->do_resend = true;
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_REQ_RETRY_JIFFIES);
return;
}
ssif_inc_stat(ssif_info, send_errors);
- /*
- * Got an error on transmit, let the done routine
- * handle it.
- */
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
dev_dbg(&ssif_info->client->dev,
- "%s: Error %d\n", __func__, result);
+ "%s: Out of retries\n", __func__);
- msg_done_handler(ssif_info, result, NULL, 0);
+ msg_done_handler(ssif_info, -EIO, NULL, 0);
return;
}
@@ -987,7 +981,6 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
/* Wait a jiffie then request the next message */
ssif_info->waiting_alert = true;
ssif_info->retries_left = SSIF_RECV_RETRIES;
- ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
if (!ssif_info->stopping)
mod_timer(&ssif_info->retry_timer,
jiffies + SSIF_MSG_PART_JIFFIES);
@@ -996,7 +989,7 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
}
}
-static int start_resend(struct ssif_info *ssif_info)
+static void start_resend(struct ssif_info *ssif_info)
{
int command;
@@ -1021,7 +1014,6 @@ static int start_resend(struct ssif_info *ssif_info)
ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
- return 0;
}
static int start_send(struct ssif_info *ssif_info,
@@ -1036,7 +1028,8 @@ static int start_send(struct ssif_info *ssif_info,
ssif_info->retries_left = SSIF_SEND_RETRIES;
memcpy(ssif_info->data + 1, data, len);
ssif_info->data_len = len;
- return start_resend(ssif_info);
+ start_resend(ssif_info);
+ return 0;
}
/* Must be called with the message lock held. */
@@ -1046,7 +1039,7 @@ static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
unsigned long oflags;
restart:
- if (!SSIF_IDLE(ssif_info)) {
+ if (!IS_SSIF_IDLE(ssif_info)) {
ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -1269,7 +1262,7 @@ static void shutdown_ssif(void *send_info)
dev_set_drvdata(&ssif_info->client->dev, NULL);
/* make sure the driver is not looking for flags any more. */
- while (ssif_info->ssif_state != SSIF_NORMAL)
+ while (ssif_info->ssif_state != SSIF_IDLE)
schedule_timeout(1);
ssif_info->stopping = true;
@@ -1334,8 +1327,10 @@ static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
if (ret) {
retry_cnt--;
- if (retry_cnt > 0)
+ if (retry_cnt > 0) {
+ msleep(SSIF_REQ_RETRY_MSEC);
goto retry1;
+ }
return -ENODEV;
}
@@ -1476,8 +1471,10 @@ retry_write:
32, msg);
if (ret) {
retry_cnt--;
- if (retry_cnt > 0)
+ if (retry_cnt > 0) {
+ msleep(SSIF_REQ_RETRY_MSEC);
goto retry_write;
+ }
dev_err(&client->dev, "Could not write multi-part start, though the BMC said it could handle it. Just limit sends to one part.\n");
return ret;
}
@@ -1839,7 +1836,7 @@ static int ssif_probe(struct i2c_client *client)
}
spin_lock_init(&ssif_info->lock);
- ssif_info->ssif_state = SSIF_NORMAL;
+ ssif_info->ssif_state = SSIF_IDLE;
timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
timer_setup(&ssif_info->watch_timer, watch_timeout, 0);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 83bf2a4dcb57..ffb101d349f0 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -343,7 +343,7 @@ static unsigned zero_mmap_capabilities(struct file *file)
/* can't do an in-place private mapping if there's no MMU */
static inline int private_mapping_ok(struct vm_area_struct *vma)
{
- return vma->vm_flags & VM_MAYSHARE;
+ return is_nommu_shared_mapping(vma->vm_flags);
}
#else
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index f8231e2e84be..b35f651837c8 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -206,7 +206,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
refcount_set(&vdata->refcnt, 1);
vma->vm_private_data = vdata;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
if (vdata->type == MSPEC_UNCACHED)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &mspec_vm_ops;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index adaec8fd4b16..e656f42a28ac 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -529,7 +529,8 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
DEBUGP(5, dev, "NumRecBytes is valid\n");
break;
}
- usleep_range(10000, 11000);
+ /* can not sleep as this is in atomic context */
+ mdelay(10);
}
if (i == 100) {
DEBUGP(5, dev, "Timeout waiting for NumRecBytes getting "
@@ -549,7 +550,8 @@ static int set_protocol(struct cm4000_dev *dev, struct ptsreq *ptsreq)
}
break;
}
- usleep_range(10000, 11000);
+ /* can not sleep as this is in atomic context */
+ mdelay(10);
}
/* check whether it is a short PTS reply? */
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index b2735be81ab2..6ddfeb2fe98f 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -377,8 +377,8 @@ static void async_mode(MGSLPC_INFO *info);
static void tx_timeout(struct timer_list *t);
-static int carrier_raised(struct tty_port *port);
-static void dtr_rts(struct tty_port *port, int onoff);
+static bool carrier_raised(struct tty_port *port);
+static void dtr_rts(struct tty_port *port, bool active);
#if SYNCLINK_GENERIC_HDLC
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
@@ -483,7 +483,7 @@ static void* mgslpc_get_text_ptr(void)
return mgslpc_get_text_ptr;
}
-/**
+/*
* line discipline callback wrappers
*
* The wrappers maintain line discipline references
@@ -1309,7 +1309,7 @@ static int startup(MGSLPC_INFO * info, struct tty_struct *tty)
if (tty)
clear_bit(TTY_IO_ERROR, &tty->flags);
- tty_port_set_initialized(&info->port, 1);
+ tty_port_set_initialized(&info->port, true);
return 0;
}
@@ -1359,7 +1359,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
- tty_port_set_initialized(&info->port, 0);
+ tty_port_set_initialized(&info->port, false);
}
static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
@@ -2430,7 +2430,7 @@ static void mgslpc_hangup(struct tty_struct *tty)
tty_port_hangup(&info->port);
}
-static int carrier_raised(struct tty_port *port)
+static bool carrier_raised(struct tty_port *port)
{
MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
unsigned long flags;
@@ -2439,18 +2439,16 @@ static int carrier_raised(struct tty_port *port)
get_signals(info);
spin_unlock_irqrestore(&info->lock, flags);
- if (info->serial_signals & SerialSignal_DCD)
- return 1;
- return 0;
+ return info->serial_signals & SerialSignal_DCD;
}
-static void dtr_rts(struct tty_port *port, int onoff)
+static void dtr_rts(struct tty_port *port, bool active)
{
MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
unsigned long flags;
spin_lock_irqsave(&info->lock, flags);
- if (onoff)
+ if (active)
info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
@@ -3857,7 +3855,7 @@ static void tx_timeout(struct timer_list *t)
#if SYNCLINK_GENERIC_HDLC
-/**
+/*
* called by generic HDLC layer when protocol selected (PPP, frame relay, etc.)
* set encoding and frame check sequence (FCS) options
*
@@ -3910,7 +3908,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
return 0;
}
-/**
+/*
* called by generic HDLC layer to send frame
*
* skb socket buffer containing HDLC frame
@@ -3955,7 +3953,7 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-/**
+/*
* called by network layer when interface enabled
* claim resources and initialize hardware
*
@@ -4018,7 +4016,7 @@ static int hdlcdev_open(struct net_device *dev)
return 0;
}
-/**
+/*
* called by network layer when interface is disabled
* shutdown hardware and release resources
*
@@ -4049,7 +4047,7 @@ static int hdlcdev_close(struct net_device *dev)
return 0;
}
-/**
+/*
* called by network layer to process IOCTL call to network device
*
* dev pointer to network device structure
@@ -4152,7 +4150,7 @@ static int hdlcdev_wan_ioctl(struct net_device *dev, struct if_settings *ifs)
}
}
-/**
+/*
* called by network layer when transmit timeout is detected
*
* dev pointer to network device structure
@@ -4175,7 +4173,7 @@ static void hdlcdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
netif_wake_queue(dev);
}
-/**
+/*
* called by device driver when transmit completes
* reenable network layer transmit if stopped
*
@@ -4187,7 +4185,7 @@ static void hdlcdev_tx_done(MGSLPC_INFO *info)
netif_wake_queue(info->netdev);
}
-/**
+/*
* called by device driver when frame received
* pass frame to network layer
*
@@ -4227,7 +4225,7 @@ static const struct net_device_ops hdlcdev_ops = {
.ndo_tx_timeout = hdlcdev_tx_timeout,
};
-/**
+/*
* called by device driver when adding device instance
* do generic HDLC initialization
*
@@ -4275,7 +4273,7 @@ static int hdlcdev_init(MGSLPC_INFO *info)
return 0;
}
-/**
+/*
* called by device driver when removing device instance
* do generic HDLC cleanup
*
diff --git a/drivers/char/tpm/eventlog/acpi.c b/drivers/char/tpm/eventlog/acpi.c
index 0913d3eb8d51..40360e599bc3 100644
--- a/drivers/char/tpm/eventlog/acpi.c
+++ b/drivers/char/tpm/eventlog/acpi.c
@@ -14,6 +14,7 @@
* Access to the event log extended by the TCG BIOS of PC platform
*/
+#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/security.h>
@@ -135,7 +136,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = kmalloc(len, GFP_KERNEL);
+ log->bios_event_log = devm_kmalloc(&chip->dev, len, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
@@ -160,7 +161,7 @@ int tpm_read_log_acpi(struct tpm_chip *chip)
return format;
err:
- kfree(log->bios_event_log);
+ devm_kfree(&chip->dev, log->bios_event_log);
log->bios_event_log = NULL;
return ret;
}
diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c
index e6cb9d525e30..4e9d7c2bf32e 100644
--- a/drivers/char/tpm/eventlog/efi.c
+++ b/drivers/char/tpm/eventlog/efi.c
@@ -6,6 +6,7 @@
* Thiebaud Weksteen <tweek@google.com>
*/
+#include <linux/device.h>
#include <linux/efi.h>
#include <linux/tpm_eventlog.h>
@@ -55,7 +56,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
}
/* malloc EventLog space */
- log->bios_event_log = kmemdup(log_tbl->log, log_size, GFP_KERNEL);
+ log->bios_event_log = devm_kmemdup(&chip->dev, log_tbl->log, log_size, GFP_KERNEL);
if (!log->bios_event_log) {
ret = -ENOMEM;
goto out;
@@ -76,7 +77,7 @@ int tpm_read_log_efi(struct tpm_chip *chip)
MEMREMAP_WB);
if (!final_tbl) {
pr_err("Could not map UEFI TPM final log\n");
- kfree(log->bios_event_log);
+ devm_kfree(&chip->dev, log->bios_event_log);
ret = -ENOMEM;
goto out;
}
@@ -91,11 +92,11 @@ int tpm_read_log_efi(struct tpm_chip *chip)
* Allocate memory for the 'combined log' where we will append the
* 'final events log' to.
*/
- tmp = krealloc(log->bios_event_log,
- log_size + final_events_log_size,
- GFP_KERNEL);
+ tmp = devm_krealloc(&chip->dev, log->bios_event_log,
+ log_size + final_events_log_size,
+ GFP_KERNEL);
if (!tmp) {
- kfree(log->bios_event_log);
+ devm_kfree(&chip->dev, log->bios_event_log);
ret = -ENOMEM;
goto out;
}
diff --git a/drivers/char/tpm/eventlog/of.c b/drivers/char/tpm/eventlog/of.c
index a9ce66d09a75..930fe43d5daf 100644
--- a/drivers/char/tpm/eventlog/of.c
+++ b/drivers/char/tpm/eventlog/of.c
@@ -10,13 +10,44 @@
* Read the event log created by the firmware on PPC64
*/
+#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/tpm_eventlog.h>
#include "../tpm.h"
#include "common.h"
+static int tpm_read_log_memory_region(struct tpm_chip *chip)
+{
+ struct device_node *node;
+ struct resource res;
+ int rc;
+
+ node = of_parse_phandle(chip->dev.parent->of_node, "memory-region", 0);
+ if (!node)
+ return -ENODEV;
+
+ rc = of_address_to_resource(node, 0, &res);
+ of_node_put(node);
+ if (rc)
+ return rc;
+
+ chip->log.bios_event_log = devm_memremap(&chip->dev, res.start, resource_size(&res),
+ MEMREMAP_WB);
+ if (IS_ERR(chip->log.bios_event_log))
+ return -ENOMEM;
+
+ chip->log.bios_event_log_end = chip->log.bios_event_log + resource_size(&res);
+
+ return chip->flags & TPM_CHIP_FLAG_TPM2 ? EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 :
+ EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2;
+}
+
int tpm_read_log_of(struct tpm_chip *chip)
{
struct device_node *np;
@@ -38,7 +69,7 @@ int tpm_read_log_of(struct tpm_chip *chip)
sizep = of_get_property(np, "linux,sml-size", NULL);
basep = of_get_property(np, "linux,sml-base", NULL);
if (sizep == NULL && basep == NULL)
- return -ENODEV;
+ return tpm_read_log_memory_region(chip);
if (sizep == NULL || basep == NULL)
return -EIO;
@@ -65,7 +96,7 @@ int tpm_read_log_of(struct tpm_chip *chip)
return -EIO;
}
- log->bios_event_log = kmemdup(__va(base), size, GFP_KERNEL);
+ log->bios_event_log = devm_kmemdup(&chip->dev, __va(base), size, GFP_KERNEL);
if (!log->bios_event_log)
return -ENOMEM;
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 8156bb2af78c..c4d0b744e3cc 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -101,8 +101,7 @@ static const struct st33zp24_phy_ops i2c_phy_ops = {
* @return: 0 in case of success.
* -1 in other case.
*/
-static int st33zp24_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int st33zp24_i2c_probe(struct i2c_client *client)
{
struct st33zp24_i2c_phy *phy;
@@ -161,7 +160,7 @@ static struct i2c_driver st33zp24_i2c_driver = {
.of_match_table = of_match_ptr(of_st33zp24_i2c_match),
.acpi_match_table = ACPI_PTR(st33zp24_i2c_acpi_match),
},
- .probe = st33zp24_i2c_probe,
+ .probe_new = st33zp24_i2c_probe,
.remove = st33zp24_i2c_remove,
.id_table = st33zp24_i2c_id
};
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 741d8f3e8fb3..b99f55f2d4fd 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -267,7 +267,6 @@ static void tpm_dev_release(struct device *dev)
idr_remove(&dev_nums_idr, chip->dev_num);
mutex_unlock(&idr_lock);
- kfree(chip->log.bios_event_log);
kfree(chip->work_space.context_buf);
kfree(chip->work_space.session_buf);
kfree(chip->allocated_banks);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 65d03867e114..93545be190a5 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -777,10 +777,12 @@ out:
int tpm2_find_cc(struct tpm_chip *chip, u32 cc)
{
+ u32 cc_mask;
int i;
+ cc_mask = 1 << TPM2_CC_ATTR_VENDOR | GENMASK(15, 0);
for (i = 0; i < chip->nr_commands; i++)
- if (cc == (chip->cc_attrs_tbl[i] & GENMASK(15, 0)))
+ if (cc == (chip->cc_attrs_tbl[i] & cc_mask))
return i;
return -1;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 7e9da671a0e8..d43a0d7b97a8 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -98,6 +98,8 @@ struct crb_priv {
u8 __iomem *rsp;
u32 cmd_size;
u32 smc_func_id;
+ u32 __iomem *pluton_start_addr;
+ u32 __iomem *pluton_reply_addr;
};
struct tpm2_crb_smc {
@@ -108,6 +110,11 @@ struct tpm2_crb_smc {
u32 smc_func_id;
};
+struct tpm2_crb_pluton {
+ u64 start_addr;
+ u64 reply_addr;
+};
+
static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
unsigned long timeout)
{
@@ -127,6 +134,25 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
return ((ioread32(reg) & mask) == value);
}
+static int crb_try_pluton_doorbell(struct crb_priv *priv, bool wait_for_complete)
+{
+ if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
+ return 0;
+
+ if (!crb_wait_for_reg_32(priv->pluton_reply_addr, ~0, 1, TPM2_TIMEOUT_C))
+ return -ETIME;
+
+ iowrite32(1, priv->pluton_start_addr);
+ if (wait_for_complete == false)
+ return 0;
+
+ if (!crb_wait_for_reg_32(priv->pluton_start_addr,
+ 0xffffffff, 0, 200))
+ return -ETIME;
+
+ return 0;
+}
+
/**
* __crb_go_idle - request tpm crb device to go the idle state
*
@@ -145,6 +171,8 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value,
*/
static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
{
+ int rc;
+
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
@@ -152,6 +180,10 @@ static int __crb_go_idle(struct device *dev, struct crb_priv *priv)
iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req);
+ rc = crb_try_pluton_doorbell(priv, true);
+ if (rc)
+ return rc;
+
if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
CRB_CTRL_REQ_GO_IDLE/* mask */,
0, /* value */
@@ -188,12 +220,19 @@ static int crb_go_idle(struct tpm_chip *chip)
*/
static int __crb_cmd_ready(struct device *dev, struct crb_priv *priv)
{
+ int rc;
+
if ((priv->sm == ACPI_TPM2_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) ||
(priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC))
return 0;
iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req);
+
+ rc = crb_try_pluton_doorbell(priv, true);
+ if (rc)
+ return rc;
+
if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req,
CRB_CTRL_REQ_CMD_READY /* mask */,
0, /* value */
@@ -371,6 +410,10 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
return -E2BIG;
}
+ /* Seems to be necessary for every command */
+ if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON)
+ __crb_cmd_ready(&chip->dev, priv);
+
memcpy_toio(priv->cmd, buf, len);
/* Make sure that cmd is populated before issuing start. */
@@ -394,7 +437,10 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len)
rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id);
}
- return rc;
+ if (rc)
+ return rc;
+
+ return crb_try_pluton_doorbell(priv, false);
}
static void crb_cancel(struct tpm_chip *chip)
@@ -524,15 +570,18 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
return ret;
acpi_dev_free_resource_list(&acpi_resource_list);
- if (resource_type(iores_array) != IORESOURCE_MEM) {
- dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
- return -EINVAL;
- } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
- IORESOURCE_MEM) {
- dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
- memset(iores_array + TPM_CRB_MAX_RESOURCES,
- 0, sizeof(*iores_array));
- iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
+ /* Pluton doesn't appear to define ACPI memory regions */
+ if (priv->sm != ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ if (resource_type(iores_array) != IORESOURCE_MEM) {
+ dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ return -EINVAL;
+ } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
+ IORESOURCE_MEM) {
+ dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
+ memset(iores_array + TPM_CRB_MAX_RESOURCES,
+ 0, sizeof(*iores_array));
+ iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
+ }
}
iores = NULL;
@@ -656,6 +705,22 @@ out_relinquish_locality:
return ret;
}
+static int crb_map_pluton(struct device *dev, struct crb_priv *priv,
+ struct acpi_table_tpm2 *buf, struct tpm2_crb_pluton *crb_pluton)
+{
+ priv->pluton_start_addr = crb_map_res(dev, NULL, NULL,
+ crb_pluton->start_addr, 4);
+ if (IS_ERR(priv->pluton_start_addr))
+ return PTR_ERR(priv->pluton_start_addr);
+
+ priv->pluton_reply_addr = crb_map_res(dev, NULL, NULL,
+ crb_pluton->reply_addr, 4);
+ if (IS_ERR(priv->pluton_reply_addr))
+ return PTR_ERR(priv->pluton_reply_addr);
+
+ return 0;
+}
+
static int crb_acpi_add(struct acpi_device *device)
{
struct acpi_table_tpm2 *buf;
@@ -663,6 +728,7 @@ static int crb_acpi_add(struct acpi_device *device)
struct tpm_chip *chip;
struct device *dev = &device->dev;
struct tpm2_crb_smc *crb_smc;
+ struct tpm2_crb_pluton *crb_pluton;
acpi_status status;
u32 sm;
int rc;
@@ -700,6 +766,20 @@ static int crb_acpi_add(struct acpi_device *device)
priv->smc_func_id = crb_smc->smc_func_id;
}
+ if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON) {
+ if (buf->header.length < (sizeof(*buf) + sizeof(*crb_pluton))) {
+ dev_err(dev,
+ FW_BUG "TPM2 ACPI table has wrong size %u for start method type %d\n",
+ buf->header.length,
+ ACPI_TPM2_COMMAND_BUFFER_WITH_PLUTON);
+ return -EINVAL;
+ }
+ crb_pluton = ACPI_ADD_PTR(struct tpm2_crb_pluton, buf, sizeof(*buf));
+ rc = crb_map_pluton(dev, priv, buf, crb_pluton);
+ if (rc)
+ return rc;
+ }
+
priv->sm = sm;
priv->hid = acpi_device_hid(device);
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 4be3677c1463..8f77154e0550 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -146,8 +146,7 @@ static const struct tpm_class_ops i2c_atmel = {
.req_canceled = i2c_atmel_req_canceled,
};
-static int i2c_atmel_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int i2c_atmel_probe(struct i2c_client *client)
{
struct tpm_chip *chip;
struct device *dev = &client->dev;
@@ -204,7 +203,7 @@ static SIMPLE_DEV_PM_OPS(i2c_atmel_pm_ops, tpm_pm_suspend, tpm_pm_resume);
static struct i2c_driver i2c_atmel_driver = {
.id_table = i2c_atmel_id,
- .probe = i2c_atmel_probe,
+ .probe_new = i2c_atmel_probe,
.remove = i2c_atmel_remove,
.driver = {
.name = I2C_DRIVER_NAME,
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index fd3c3661e646..7cdaff52a96d 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -681,8 +681,7 @@ MODULE_DEVICE_TABLE(of, tpm_tis_i2c_of_match);
static SIMPLE_DEV_PM_OPS(tpm_tis_i2c_ops, tpm_pm_suspend, tpm_pm_resume);
-static int tpm_tis_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tpm_tis_i2c_probe(struct i2c_client *client)
{
int rc;
struct device *dev = &(client->dev);
@@ -717,7 +716,7 @@ static void tpm_tis_i2c_remove(struct i2c_client *client)
static struct i2c_driver tpm_tis_i2c_driver = {
.id_table = tpm_tis_i2c_table,
- .probe = tpm_tis_i2c_probe,
+ .probe_new = tpm_tis_i2c_probe,
.remove = tpm_tis_i2c_remove,
.driver = {
.name = "tpm_i2c_infineon",
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 95c37350cc8e..a026e98add50 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -522,9 +522,9 @@ static int get_vid(struct i2c_client *client, u32 *res)
return 0;
}
-static int i2c_nuvoton_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int i2c_nuvoton_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
int rc;
struct tpm_chip *chip;
struct device *dev = &client->dev;
@@ -650,7 +650,7 @@ static SIMPLE_DEV_PM_OPS(i2c_nuvoton_pm_ops, tpm_pm_suspend, tpm_pm_resume);
static struct i2c_driver i2c_nuvoton_driver = {
.id_table = i2c_nuvoton_id,
- .probe = i2c_nuvoton_probe,
+ .probe_new = i2c_nuvoton_probe,
.remove = i2c_nuvoton_remove,
.driver = {
.name = "tpm_i2c_nuvoton",
diff --git a/drivers/char/tpm/tpm_tis_i2c.c b/drivers/char/tpm/tpm_tis_i2c.c
index f3a7251c8e38..c8c34adc14c0 100644
--- a/drivers/char/tpm/tpm_tis_i2c.c
+++ b/drivers/char/tpm/tpm_tis_i2c.c
@@ -312,8 +312,7 @@ static const struct tpm_tis_phy_ops tpm_i2c_phy_ops = {
.verify_crc = tpm_tis_i2c_verify_crc,
};
-static int tpm_tis_i2c_probe(struct i2c_client *dev,
- const struct i2c_device_id *id)
+static int tpm_tis_i2c_probe(struct i2c_client *dev)
{
struct tpm_tis_i2c_phy *phy;
const u8 crc_enable = 1;
@@ -380,7 +379,7 @@ static struct i2c_driver tpm_tis_i2c_driver = {
.pm = &tpm_tis_pm,
.of_match_table = of_match_ptr(of_tis_i2c_match),
},
- .probe = tpm_tis_i2c_probe,
+ .probe_new = tpm_tis_i2c_probe,
.remove = tpm_tis_i2c_remove,
.id_table = tpm_tis_i2c_id,
};
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 6a821118d553..d5ac4d955bc8 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1666,9 +1666,8 @@ static void handle_control_message(struct virtio_device *vdev,
"Not enough space to store port name\n");
break;
}
- strncpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
- name_size - 1);
- port->name[name_size - 1] = 0;
+ strscpy(port->name, buf->buf + buf->offset + sizeof(*cpkt),
+ name_size);
/*
* Since we only have one sysfs attribute, 'name',
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index e62552a75f08..f6d7c6a9a654 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1055,12 +1055,12 @@ static void clk_core_disable(struct clk_core *core)
if (--core->enable_count > 0)
return;
- trace_clk_disable_rcuidle(core);
+ trace_clk_disable(core);
if (core->ops->disable)
core->ops->disable(core->hw);
- trace_clk_disable_complete_rcuidle(core);
+ trace_clk_disable_complete(core);
clk_core_disable(core->parent);
}
@@ -1114,12 +1114,12 @@ static int clk_core_enable(struct clk_core *core)
if (ret)
return ret;
- trace_clk_enable_rcuidle(core);
+ trace_clk_enable(core);
if (core->ops->enable)
ret = core->ops->enable(core->hw);
- trace_clk_enable_complete_rcuidle(core);
+ trace_clk_enable_complete(core);
if (ret) {
clk_core_disable(core->parent);
diff --git a/drivers/clk/davinci/Makefile b/drivers/clk/davinci/Makefile
index be6f55d37b49..5d0ae1ee72ec 100644
--- a/drivers/clk/davinci/Makefile
+++ b/drivers/clk/davinci/Makefile
@@ -6,12 +6,8 @@ obj-$(CONFIG_ARCH_DAVINCI_DA8XX) += da8xx-cfgchip.o
obj-y += pll.o
obj-$(CONFIG_ARCH_DAVINCI_DA830) += pll-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += pll-da850.o
-obj-$(CONFIG_ARCH_DAVINCI_DM355) += pll-dm355.o
-obj-$(CONFIG_ARCH_DAVINCI_DM365) += pll-dm365.o
obj-y += psc.o
obj-$(CONFIG_ARCH_DAVINCI_DA830) += psc-da830.o
obj-$(CONFIG_ARCH_DAVINCI_DA850) += psc-da850.o
-obj-$(CONFIG_ARCH_DAVINCI_DM355) += psc-dm355.o
-obj-$(CONFIG_ARCH_DAVINCI_DM365) += psc-dm365.o
endif
diff --git a/drivers/clk/davinci/pll-dm355.c b/drivers/clk/davinci/pll-dm355.c
deleted file mode 100644
index 505aed80be9a..000000000000
--- a/drivers/clk/davinci/pll-dm355.c
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock descriptions for TI DM355
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/bitops.h>
-#include <linux/clk/davinci.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/types.h>
-
-#include "pll.h"
-
-static const struct davinci_pll_clk_info dm355_pll1_info = {
- .name = "pll1",
- .pllm_mask = GENMASK(7, 0),
- .pllm_min = 92,
- .pllm_max = 184,
- .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_PREDIV_ALWAYS_ENABLED |
- PLL_PREDIV_FIXED8 | PLL_HAS_POSTDIV |
- PLL_POSTDIV_ALWAYS_ENABLED | PLL_POSTDIV_FIXED_DIV,
-};
-
-SYSCLK(1, pll1_sysclk1, pll1_pllen, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
-SYSCLK(2, pll1_sysclk2, pll1_pllen, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
-SYSCLK(3, pll1_sysclk3, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(4, pll1_sysclk4, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-
-int dm355_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &dm355_pll1_info, "ref_clk", base, cfgchip);
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
- clk_register_clkdev(clk, "pll1_sysclk1", "dm355-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
- clk_register_clkdev(clk, "pll1_sysclk2", "dm355-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
- clk_register_clkdev(clk, "pll1_sysclk3", "dm355-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
- clk_register_clkdev(clk, "pll1_sysclk4", "dm355-psc");
-
- clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
- clk_register_clkdev(clk, "pll1_auxclk", "dm355-psc");
-
- davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
-
- return 0;
-}
-
-static const struct davinci_pll_clk_info dm355_pll2_info = {
- .name = "pll2",
- .pllm_mask = GENMASK(7, 0),
- .pllm_min = 92,
- .pllm_max = 184,
- .flags = PLL_HAS_PREDIV | PLL_PREDIV_ALWAYS_ENABLED | PLL_HAS_POSTDIV |
- PLL_POSTDIV_ALWAYS_ENABLED | PLL_POSTDIV_FIXED_DIV,
-};
-
-SYSCLK(1, pll2_sysclk1, pll2_pllen, 5, SYSCLK_FIXED_DIV | SYSCLK_ALWAYS_ENABLED);
-
-int dm355_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- davinci_pll_clk_register(dev, &dm355_pll2_info, "oscin", base, cfgchip);
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
-
- davinci_pll_sysclkbp_clk_register(dev, "pll2_sysclkbp", base);
-
- return 0;
-}
diff --git a/drivers/clk/davinci/pll-dm365.c b/drivers/clk/davinci/pll-dm365.c
deleted file mode 100644
index 2d29712753a3..000000000000
--- a/drivers/clk/davinci/pll-dm365.c
+++ /dev/null
@@ -1,146 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PLL clock descriptions for TI DM365
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/bitops.h>
-#include <linux/clkdev.h>
-#include <linux/clk/davinci.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "pll.h"
-
-#define OCSEL_OCSRC_ENABLE 0
-
-static const struct davinci_pll_clk_info dm365_pll1_info = {
- .name = "pll1",
- .pllm_mask = GENMASK(9, 0),
- .pllm_min = 1,
- .pllm_max = 1023,
- .flags = PLL_HAS_CLKMODE | PLL_HAS_PREDIV | PLL_HAS_POSTDIV |
- PLL_POSTDIV_ALWAYS_ENABLED | PLL_PLLM_2X,
-};
-
-SYSCLK(1, pll1_sysclk1, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(2, pll1_sysclk2, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(3, pll1_sysclk3, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(4, pll1_sysclk4, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(5, pll1_sysclk5, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(6, pll1_sysclk6, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(7, pll1_sysclk7, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(8, pll1_sysclk8, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(9, pll1_sysclk9, pll1_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-
-/*
- * This is a bit of a hack to make OCSEL[OCSRC] on DM365 look like OCSEL[OCSRC]
- * on DA850. On DM365, OCSEL[OCSRC] is just an enable/disable bit instead of a
- * multiplexer. By modeling it as a single parent mux clock, the clock code will
- * still do the right thing in this case.
- */
-static const char * const dm365_pll_obsclk_parent_names[] = {
- "oscin",
-};
-
-static u32 dm365_pll_obsclk_table[] = {
- OCSEL_OCSRC_ENABLE,
-};
-
-static const struct davinci_pll_obsclk_info dm365_pll1_obsclk_info = {
- .name = "pll1_obsclk",
- .parent_names = dm365_pll_obsclk_parent_names,
- .num_parents = ARRAY_SIZE(dm365_pll_obsclk_parent_names),
- .table = dm365_pll_obsclk_table,
- .ocsrc_mask = BIT(4),
-};
-
-int dm365_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &dm365_pll1_info, "ref_clk", base, cfgchip);
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk1, base);
- clk_register_clkdev(clk, "pll1_sysclk1", "dm365-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk2, base);
- clk_register_clkdev(clk, "pll1_sysclk2", "dm365-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk3, base);
- clk_register_clkdev(clk, "pll1_sysclk3", "dm365-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk4, base);
- clk_register_clkdev(clk, "pll1_sysclk4", "dm365-psc");
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk5, base);
- clk_register_clkdev(clk, "pll1_sysclk5", "dm365-psc");
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk6, base);
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk7, base);
-
- clk = davinci_pll_sysclk_register(dev, &pll1_sysclk8, base);
- clk_register_clkdev(clk, "pll1_sysclk8", "dm365-psc");
-
- davinci_pll_sysclk_register(dev, &pll1_sysclk9, base);
-
- clk = davinci_pll_auxclk_register(dev, "pll1_auxclk", base);
- clk_register_clkdev(clk, "pll1_auxclk", "dm355-psc");
-
- davinci_pll_sysclkbp_clk_register(dev, "pll1_sysclkbp", base);
-
- davinci_pll_obsclk_register(dev, &dm365_pll1_obsclk_info, base);
-
- return 0;
-}
-
-static const struct davinci_pll_clk_info dm365_pll2_info = {
- .name = "pll2",
- .pllm_mask = GENMASK(9, 0),
- .pllm_min = 1,
- .pllm_max = 1023,
- .flags = PLL_HAS_PREDIV | PLL_HAS_POSTDIV | PLL_POSTDIV_ALWAYS_ENABLED |
- PLL_PLLM_2X,
-};
-
-SYSCLK(1, pll2_sysclk1, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(2, pll2_sysclk2, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(3, pll2_sysclk3, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(4, pll2_sysclk4, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-SYSCLK(5, pll2_sysclk5, pll2_pllen, 5, SYSCLK_ALWAYS_ENABLED);
-
-static const struct davinci_pll_obsclk_info dm365_pll2_obsclk_info = {
- .name = "pll2_obsclk",
- .parent_names = dm365_pll_obsclk_parent_names,
- .num_parents = ARRAY_SIZE(dm365_pll_obsclk_parent_names),
- .table = dm365_pll_obsclk_table,
- .ocsrc_mask = BIT(4),
-};
-
-int dm365_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip)
-{
- struct clk *clk;
-
- davinci_pll_clk_register(dev, &dm365_pll2_info, "oscin", base, cfgchip);
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk1, base);
-
- clk = davinci_pll_sysclk_register(dev, &pll2_sysclk2, base);
- clk_register_clkdev(clk, "pll1_sysclk2", "dm365-psc");
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk3, base);
-
- clk = davinci_pll_sysclk_register(dev, &pll2_sysclk4, base);
- clk_register_clkdev(clk, "pll1_sysclk4", "dm365-psc");
-
- davinci_pll_sysclk_register(dev, &pll2_sysclk5, base);
-
- davinci_pll_auxclk_register(dev, "pll2_auxclk", base);
-
- davinci_pll_obsclk_register(dev, &dm365_pll2_obsclk_info, base);
-
- return 0;
-}
diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c
index f862f5e2b3fc..87bdf8879045 100644
--- a/drivers/clk/davinci/pll.c
+++ b/drivers/clk/davinci/pll.c
@@ -882,14 +882,6 @@ static const struct platform_device_id davinci_pll_id_table[] = {
{ .name = "da850-pll0", .driver_data = (kernel_ulong_t)da850_pll0_init },
{ .name = "da850-pll1", .driver_data = (kernel_ulong_t)da850_pll1_init },
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM355
- { .name = "dm355-pll1", .driver_data = (kernel_ulong_t)dm355_pll1_init },
- { .name = "dm355-pll2", .driver_data = (kernel_ulong_t)dm355_pll2_init },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM365
- { .name = "dm365-pll1", .driver_data = (kernel_ulong_t)dm365_pll1_init },
- { .name = "dm365-pll2", .driver_data = (kernel_ulong_t)dm365_pll2_init },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/pll.h b/drivers/clk/davinci/pll.h
index 1773277bc690..20bfcec2d3b5 100644
--- a/drivers/clk/davinci/pll.h
+++ b/drivers/clk/davinci/pll.h
@@ -122,13 +122,8 @@ int of_davinci_pll_init(struct device *dev, struct device_node *node,
/* Platform-specific callbacks */
-#ifdef CONFIG_ARCH_DAVINCI_DA850
int da850_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
void of_da850_pll0_init(struct device_node *node);
int of_da850_pll1_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM355
-int dm355_pll2_init(struct device *dev, void __iomem *base, struct regmap *cfgchip);
-#endif
#endif /* __CLK_DAVINCI_PLL_H___ */
diff --git a/drivers/clk/davinci/psc-dm355.c b/drivers/clk/davinci/psc-dm355.c
deleted file mode 100644
index ddd250107c4e..000000000000
--- a/drivers/clk/davinci/psc-dm355.c
+++ /dev/null
@@ -1,89 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PSC clock descriptions for TI DaVinci DM355
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/davinci.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "psc.h"
-
-LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
-LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
-LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
-LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "dm6441-mmc.1");
-LPSC_CLKDEV1(mcbsp1_clkdev, NULL, "davinci-mcbsp.1");
-LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
-LPSC_CLKDEV1(spi2_clkdev, NULL, "spi_davinci.2");
-LPSC_CLKDEV1(aemif_clkdev, "aemif", NULL);
-LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "dm6441-mmc.0");
-LPSC_CLKDEV1(mcbsp0_clkdev, NULL, "davinci-mcbsp.0");
-LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
-LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
-LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
-LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2");
-LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
-/* REVISIT: gpio-davinci.c should be modified to drop con_id */
-LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
-LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
-LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
-LPSC_CLKDEV1(vpss_dac_clkdev, "vpss_dac", NULL);
-
-static const struct davinci_lpsc_clk_info dm355_psc_info[] = {
- LPSC(0, 0, vpss_master, pll1_sysclk4, vpss_master_clkdev, 0),
- LPSC(1, 0, vpss_slave, pll1_sysclk4, vpss_slave_clkdev, 0),
- LPSC(5, 0, timer3, pll1_auxclk, NULL, 0),
- LPSC(6, 0, spi1, pll1_sysclk2, spi1_clkdev, 0),
- LPSC(7, 0, mmcsd1, pll1_sysclk2, mmcsd1_clkdev, 0),
- LPSC(8, 0, asp1, pll1_sysclk2, mcbsp1_clkdev, 0),
- LPSC(9, 0, usb, pll1_sysclk2, usb_clkdev, 0),
- LPSC(10, 0, pwm3, pll1_auxclk, NULL, 0),
- LPSC(11, 0, spi2, pll1_sysclk2, spi2_clkdev, 0),
- LPSC(12, 0, rto, pll1_auxclk, NULL, 0),
- LPSC(14, 0, aemif, pll1_sysclk2, aemif_clkdev, 0),
- LPSC(15, 0, mmcsd0, pll1_sysclk2, mmcsd0_clkdev, 0),
- LPSC(17, 0, asp0, pll1_sysclk2, mcbsp0_clkdev, 0),
- LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
- LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
- LPSC(20, 0, uart1, pll1_auxclk, uart1_clkdev, 0),
- LPSC(21, 0, uart2, pll1_sysclk2, uart2_clkdev, 0),
- LPSC(22, 0, spi0, pll1_sysclk2, spi0_clkdev, 0),
- LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
- LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
- LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
- LPSC(26, 0, gpio, pll1_sysclk2, gpio_clkdev, 0),
- LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
- /* REVISIT: why can't this be disabled? */
- LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(31, 0, arm, pll1_sysclk1, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(40, 0, mjcp, pll1_sysclk1, NULL, 0),
- LPSC(41, 0, vpss_dac, pll1_sysclk3, vpss_dac_clkdev, 0),
- { }
-};
-
-int dm355_psc_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, dm355_psc_info, 42, base);
-}
-
-static struct clk_bulk_data dm355_psc_parent_clks[] = {
- { .id = "pll1_sysclk1" },
- { .id = "pll1_sysclk2" },
- { .id = "pll1_sysclk3" },
- { .id = "pll1_sysclk4" },
- { .id = "pll1_auxclk" },
-};
-
-const struct davinci_psc_init_data dm355_psc_init_data = {
- .parent_clks = dm355_psc_parent_clks,
- .num_parent_clks = ARRAY_SIZE(dm355_psc_parent_clks),
- .psc_init = &dm355_psc_init,
-};
diff --git a/drivers/clk/davinci/psc-dm365.c b/drivers/clk/davinci/psc-dm365.c
deleted file mode 100644
index c75424f4ea3b..000000000000
--- a/drivers/clk/davinci/psc-dm365.c
+++ /dev/null
@@ -1,111 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * PSC clock descriptions for TI DaVinci DM365
- *
- * Copyright (C) 2018 David Lechner <david@lechnology.com>
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/davinci.h>
-#include <linux/clk.h>
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "psc.h"
-
-LPSC_CLKDEV1(vpss_slave_clkdev, "slave", "vpss");
-LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1");
-LPSC_CLKDEV1(mmcsd1_clkdev, NULL, "da830-mmc.1");
-LPSC_CLKDEV1(asp0_clkdev, NULL, "davinci-mcbsp");
-LPSC_CLKDEV1(usb_clkdev, "usb", NULL);
-LPSC_CLKDEV1(spi2_clkdev, NULL, "spi_davinci.2");
-LPSC_CLKDEV2(aemif_clkdev, "aemif", NULL,
- NULL, "ti-aemif");
-LPSC_CLKDEV1(mmcsd0_clkdev, NULL, "da830-mmc.0");
-LPSC_CLKDEV1(i2c_clkdev, NULL, "i2c_davinci.1");
-LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0");
-LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1");
-LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0");
-/* REVISIT: gpio-davinci.c should be modified to drop con_id */
-LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL);
-LPSC_CLKDEV1(timer0_clkdev, "timer0", NULL);
-LPSC_CLKDEV1(timer2_clkdev, NULL, "davinci-wdt");
-LPSC_CLKDEV1(spi3_clkdev, NULL, "spi_davinci.3");
-LPSC_CLKDEV1(spi4_clkdev, NULL, "spi_davinci.4");
-LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1",
- "fck", "davinci_mdio.0");
-LPSC_CLKDEV1(voice_codec_clkdev, NULL, "davinci_voicecodec");
-LPSC_CLKDEV1(vpss_dac_clkdev, "vpss_dac", NULL);
-LPSC_CLKDEV1(vpss_master_clkdev, "master", "vpss");
-
-static const struct davinci_lpsc_clk_info dm365_psc_info[] = {
- LPSC(1, 0, vpss_slave, pll1_sysclk5, vpss_slave_clkdev, 0),
- LPSC(5, 0, timer3, pll1_auxclk, NULL, 0),
- LPSC(6, 0, spi1, pll1_sysclk4, spi1_clkdev, 0),
- LPSC(7, 0, mmcsd1, pll1_sysclk4, mmcsd1_clkdev, 0),
- LPSC(8, 0, asp0, pll1_sysclk4, asp0_clkdev, 0),
- LPSC(9, 0, usb, pll1_auxclk, usb_clkdev, 0),
- LPSC(10, 0, pwm3, pll1_auxclk, NULL, 0),
- LPSC(11, 0, spi2, pll1_sysclk4, spi2_clkdev, 0),
- LPSC(12, 0, rto, pll1_sysclk4, NULL, 0),
- LPSC(14, 0, aemif, pll1_sysclk4, aemif_clkdev, 0),
- LPSC(15, 0, mmcsd0, pll1_sysclk8, mmcsd0_clkdev, 0),
- LPSC(18, 0, i2c, pll1_auxclk, i2c_clkdev, 0),
- LPSC(19, 0, uart0, pll1_auxclk, uart0_clkdev, 0),
- LPSC(20, 0, uart1, pll1_sysclk4, uart1_clkdev, 0),
- LPSC(22, 0, spi0, pll1_sysclk4, spi0_clkdev, 0),
- LPSC(23, 0, pwm0, pll1_auxclk, NULL, 0),
- LPSC(24, 0, pwm1, pll1_auxclk, NULL, 0),
- LPSC(25, 0, pwm2, pll1_auxclk, NULL, 0),
- LPSC(26, 0, gpio, pll1_sysclk4, gpio_clkdev, 0),
- LPSC(27, 0, timer0, pll1_auxclk, timer0_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(28, 0, timer1, pll1_auxclk, NULL, 0),
- /* REVISIT: why can't this be disabled? */
- LPSC(29, 0, timer2, pll1_auxclk, timer2_clkdev, LPSC_ALWAYS_ENABLED),
- LPSC(31, 0, arm, pll2_sysclk2, NULL, LPSC_ALWAYS_ENABLED),
- LPSC(38, 0, spi3, pll1_sysclk4, spi3_clkdev, 0),
- LPSC(39, 0, spi4, pll1_auxclk, spi4_clkdev, 0),
- LPSC(40, 0, emac, pll1_sysclk4, emac_clkdev, 0),
- /*
- * The TRM (ARM Subsystem User's Guide) shows two clocks input into
- * voice codec module (PLL2 SYSCLK4 with a DIV2 and PLL1 SYSCLK4). Its
- * not fully clear from documentation which clock should be considered
- * as parent for PSC. The clock chosen here is to maintain
- * compatibility with existing code in arch/arm/mach-davinci/dm365.c
- */
- LPSC(44, 0, voice_codec, pll2_sysclk4, voice_codec_clkdev, 0),
- /*
- * Its not fully clear from TRM (ARM Subsystem User's Guide) as to what
- * the parent of VPSS DAC LPSC should actually be. PLL1 SYSCLK3 feeds
- * into HDVICP and MJCP. The clock chosen here is to remain compatible
- * with code existing in arch/arm/mach-davinci/dm365.c
- */
- LPSC(46, 0, vpss_dac, pll1_sysclk3, vpss_dac_clkdev, 0),
- LPSC(47, 0, vpss_master, pll1_sysclk5, vpss_master_clkdev, 0),
- LPSC(50, 0, mjcp, pll1_sysclk3, NULL, 0),
- { }
-};
-
-int dm365_psc_init(struct device *dev, void __iomem *base)
-{
- return davinci_psc_register_clocks(dev, dm365_psc_info, 52, base);
-}
-
-static struct clk_bulk_data dm365_psc_parent_clks[] = {
- { .id = "pll1_sysclk1" },
- { .id = "pll1_sysclk3" },
- { .id = "pll1_sysclk4" },
- { .id = "pll1_sysclk5" },
- { .id = "pll1_sysclk8" },
- { .id = "pll2_sysclk2" },
- { .id = "pll2_sysclk4" },
- { .id = "pll1_auxclk" },
-};
-
-const struct davinci_psc_init_data dm365_psc_init_data = {
- .parent_clks = dm365_psc_parent_clks,
- .num_parent_clks = ARRAY_SIZE(dm365_psc_parent_clks),
- .psc_init = &dm365_psc_init,
-};
diff --git a/drivers/clk/davinci/psc.c b/drivers/clk/davinci/psc.c
index 42a59dbd49c8..cd85d9f158b0 100644
--- a/drivers/clk/davinci/psc.c
+++ b/drivers/clk/davinci/psc.c
@@ -511,12 +511,6 @@ static const struct platform_device_id davinci_psc_id_table[] = {
{ .name = "da850-psc0", .driver_data = (kernel_ulong_t)&da850_psc0_init_data },
{ .name = "da850-psc1", .driver_data = (kernel_ulong_t)&da850_psc1_init_data },
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM355
- { .name = "dm355-psc", .driver_data = (kernel_ulong_t)&dm355_psc_init_data },
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM365
- { .name = "dm365-psc", .driver_data = (kernel_ulong_t)&dm365_psc_init_data },
-#endif
{ }
};
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index 5e382b675518..bd23f6fd56df 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -104,11 +104,4 @@ extern const struct davinci_psc_init_data da850_psc1_init_data;
extern const struct davinci_psc_init_data of_da850_psc0_init_data;
extern const struct davinci_psc_init_data of_da850_psc1_init_data;
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM355
-extern const struct davinci_psc_init_data dm355_psc_init_data;
-#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM365
-extern const struct davinci_psc_init_data dm365_psc_init_data;
-#endif
-
#endif /* __CLK_DAVINCI_PSC_H__ */
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index ecd395ac8a28..e407f00bd594 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -58,7 +58,7 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
unsigned long rate, unsigned long parent_rate,
unsigned int *pm, unsigned int *pn, unsigned int *pod)
{
- unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 2;
+ unsigned int m, n, od, m_max = (1 << pll_info->m_bits) - 1;
/* The frequency after the N divider must be between 1 and 50 MHz. */
n = parent_rate / (1 * MHZ);
@@ -66,19 +66,17 @@ jz4760_cgu_calc_m_n_od(const struct ingenic_cgu_pll_info *pll_info,
/* The N divider must be >= 2. */
n = clamp_val(n, 2, 1 << pll_info->n_bits);
- for (;; n >>= 1) {
- od = (unsigned int)-1;
+ rate /= MHZ;
+ parent_rate /= MHZ;
- do {
- m = (rate / MHZ) * (1 << ++od) * n / (parent_rate / MHZ);
- } while ((m > m_max || m & 1) && (od < 4));
-
- if (od < 4 && m >= 4 && m <= m_max)
- break;
+ for (m = m_max; m >= m_max && n >= 2; n--) {
+ m = rate * n / parent_rate;
+ od = m & 1;
+ m <<= od;
}
*pm = m;
- *pn = n;
+ *pn = n + 1;
*pod = 1 << od;
}
diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c
index 32aae880a14f..0ddc73e07be4 100644
--- a/drivers/clk/microchip/clk-mpfs-ccc.c
+++ b/drivers/clk/microchip/clk-mpfs-ccc.c
@@ -164,12 +164,11 @@ static int mpfs_ccc_register_outputs(struct device *dev, struct mpfs_ccc_out_hw_
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_out_hw_clock *out_hw = &out_hws[i];
- char *name = devm_kzalloc(dev, 23, GFP_KERNEL);
+ char *name = devm_kasprintf(dev, GFP_KERNEL, "%s_out%u", parent->name, i);
if (!name)
return -ENOMEM;
- snprintf(name, 23, "%s_out%u", parent->name, i);
out_hw->divider.hw.init = CLK_HW_INIT_HW(name, &parent->hw, &clk_divider_ops, 0);
out_hw->divider.reg = data->pll_base[i / MPFS_CCC_OUTPUTS_PER_PLL] +
out_hw->reg_offset;
@@ -201,14 +200,13 @@ static int mpfs_ccc_register_plls(struct device *dev, struct mpfs_ccc_pll_hw_clo
for (unsigned int i = 0; i < num_clks; i++) {
struct mpfs_ccc_pll_hw_clock *pll_hw = &pll_hws[i];
- char *name = devm_kzalloc(dev, 18, GFP_KERNEL);
- if (!name)
+ pll_hw->name = devm_kasprintf(dev, GFP_KERNEL, "ccc%s_pll%u",
+ strchrnul(dev->of_node->full_name, '@'), i);
+ if (!pll_hw->name)
return -ENOMEM;
pll_hw->base = data->pll_base[i];
- snprintf(name, 18, "ccc%s_pll%u", strchrnul(dev->of_node->full_name, '@'), i);
- pll_hw->name = (const char *)name;
pll_hw->hw.init = CLK_HW_INIT_PARENTS_DATA_FIXED_SIZE(pll_hw->name,
pll_hw->parents,
&mpfs_ccc_pll_ops, 0);
diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
index 983faa5707b9..087146f2ee06 100644
--- a/drivers/clk/renesas/r9a06g032-clocks.c
+++ b/drivers/clk/renesas/r9a06g032-clocks.c
@@ -25,6 +25,8 @@
#include <linux/spinlock.h>
#include <dt-bindings/clock/r9a06g032-sysctrl.h>
+#define R9A06G032_SYSCTRL_USB 0x00
+#define R9A06G032_SYSCTRL_USB_H2MODE (1<<1)
#define R9A06G032_SYSCTRL_DMAMUX 0xA0
struct r9a06g032_gate {
@@ -918,6 +920,29 @@ static void r9a06g032_clocks_del_clk_provider(void *data)
of_clk_del_provider(data);
}
+static void __init r9a06g032_init_h2mode(struct r9a06g032_priv *clocks)
+{
+ struct device_node *usbf_np = NULL;
+ u32 usb;
+
+ while ((usbf_np = of_find_compatible_node(usbf_np, NULL,
+ "renesas,rzn1-usbf"))) {
+ if (of_device_is_available(usbf_np))
+ break;
+ }
+
+ usb = readl(clocks->reg + R9A06G032_SYSCTRL_USB);
+ if (usbf_np) {
+ /* 1 host and 1 device mode */
+ usb &= ~R9A06G032_SYSCTRL_USB_H2MODE;
+ of_node_put(usbf_np);
+ } else {
+ /* 2 hosts mode */
+ usb |= R9A06G032_SYSCTRL_USB_H2MODE;
+ }
+ writel(usb, clocks->reg + R9A06G032_SYSCTRL_USB);
+}
+
static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -947,6 +972,9 @@ static int __init r9a06g032_clocks_probe(struct platform_device *pdev)
clocks->reg = of_iomap(np, 0);
if (WARN_ON(!clocks->reg))
return -ENOMEM;
+
+ r9a06g032_init_h2mode(clocks);
+
for (i = 0; i < ARRAY_SIZE(r9a06g032_clocks); ++i) {
const struct r9a06g032_clkdesc *d = &r9a06g032_clocks[i];
const char *parent_name = d->source ?
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 8e8245ab3fd1..c07bb50513bf 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -94,38 +94,6 @@ config EXYNOS_CLKOUT
status of the certains clocks from SoC, but it could also be tied to
other devices as an input clock.
-# For S3C24XX platforms, select following symbols:
-config S3C2410_COMMON_CLK
- bool "Samsung S3C2410 clock controller support" if COMPILE_TEST
- select COMMON_CLK_SAMSUNG
- help
- Support for the clock controller present on the Samsung
- S3C2410/S3C2440/S3C2442 SoCs. Choose Y here only if you build for
- this SoC.
-
-config S3C2410_COMMON_DCLK
- bool
- select COMMON_CLK_SAMSUNG
- select REGMAP_MMIO
- help
- Support for the dclk clock controller present on the Samsung
- S3C2410/S3C2412/S3C2440/S3C2443 SoCs. Choose Y here only if you build
- for this SoC.
-
-config S3C2412_COMMON_CLK
- bool "Samsung S3C2412 clock controller support" if COMPILE_TEST
- select COMMON_CLK_SAMSUNG
- help
- Support for the clock controller present on the Samsung S3C2412 SoCs.
- Choose Y here only if you build for this SoC.
-
-config S3C2443_COMMON_CLK
- bool "Samsung S3C2443 clock controller support" if COMPILE_TEST
- select COMMON_CLK_SAMSUNG
- help
- Support for the clock controller present on the Samsung
- S3C2416/S3C2443 SoCs. Choose Y here only if you build for this SoC.
-
config TESLA_FSD_COMMON_CLK
bool "Tesla FSD clock controller support" if COMPILE_TEST
depends on COMMON_CLK_SAMSUNG
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 239d9eead77f..ebbeacabe88f 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -21,10 +21,6 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynosautov9.o
-obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
-obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
-obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
-obj-$(CONFIG_S3C2443_COMMON_CLK)+= clk-s3c2443.o
obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o
obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o
obj-$(CONFIG_TESLA_FSD_COMMON_CLK) += clk-fsd.o
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 0ff28938943f..df7812371d70 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -941,169 +941,6 @@ static const struct clk_ops samsung_pll6553_clk_ops = {
};
/*
- * PLL Clock Type of S3C24XX before S3C2443
- */
-
-#define PLLS3C2410_MDIV_MASK (0xff)
-#define PLLS3C2410_PDIV_MASK (0x1f)
-#define PLLS3C2410_SDIV_MASK (0x3)
-#define PLLS3C2410_MDIV_SHIFT (12)
-#define PLLS3C2410_PDIV_SHIFT (4)
-#define PLLS3C2410_SDIV_SHIFT (0)
-
-#define PLLS3C2410_ENABLE_REG_OFFSET 0x10
-
-static unsigned long samsung_s3c2410_pll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 pll_con, mdiv, pdiv, sdiv;
- u64 fvco = parent_rate;
-
- pll_con = readl_relaxed(pll->con_reg);
- mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK;
- pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK;
- sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK;
-
- fvco *= (mdiv + 8);
- do_div(fvco, (pdiv + 2) << sdiv);
-
- return (unsigned int)fvco;
-}
-
-static unsigned long samsung_s3c2440_mpll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 pll_con, mdiv, pdiv, sdiv;
- u64 fvco = parent_rate;
-
- pll_con = readl_relaxed(pll->con_reg);
- mdiv = (pll_con >> PLLS3C2410_MDIV_SHIFT) & PLLS3C2410_MDIV_MASK;
- pdiv = (pll_con >> PLLS3C2410_PDIV_SHIFT) & PLLS3C2410_PDIV_MASK;
- sdiv = (pll_con >> PLLS3C2410_SDIV_SHIFT) & PLLS3C2410_SDIV_MASK;
-
- fvco *= (2 * (mdiv + 8));
- do_div(fvco, (pdiv + 2) << sdiv);
-
- return (unsigned int)fvco;
-}
-
-static int samsung_s3c2410_pll_set_rate(struct clk_hw *hw, unsigned long drate,
- unsigned long prate)
-{
- struct samsung_clk_pll *pll = to_clk_pll(hw);
- const struct samsung_pll_rate_table *rate;
- u32 tmp;
-
- /* Get required rate settings from table */
- rate = samsung_get_pll_settings(pll, drate);
- if (!rate) {
- pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
- drate, clk_hw_get_name(hw));
- return -EINVAL;
- }
-
- tmp = readl_relaxed(pll->con_reg);
-
- /* Change PLL PMS values */
- tmp &= ~((PLLS3C2410_MDIV_MASK << PLLS3C2410_MDIV_SHIFT) |
- (PLLS3C2410_PDIV_MASK << PLLS3C2410_PDIV_SHIFT) |
- (PLLS3C2410_SDIV_MASK << PLLS3C2410_SDIV_SHIFT));
- tmp |= (rate->mdiv << PLLS3C2410_MDIV_SHIFT) |
- (rate->pdiv << PLLS3C2410_PDIV_SHIFT) |
- (rate->sdiv << PLLS3C2410_SDIV_SHIFT);
- writel_relaxed(tmp, pll->con_reg);
-
- /* Time to settle according to the manual */
- udelay(300);
-
- return 0;
-}
-
-static int samsung_s3c2410_pll_enable(struct clk_hw *hw, int bit, bool enable)
-{
- struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 pll_en = readl_relaxed(pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET);
- u32 pll_en_orig = pll_en;
-
- if (enable)
- pll_en &= ~BIT(bit);
- else
- pll_en |= BIT(bit);
-
- writel_relaxed(pll_en, pll->lock_reg + PLLS3C2410_ENABLE_REG_OFFSET);
-
- /* if we started the UPLL, then allow to settle */
- if (enable && (pll_en_orig & BIT(bit)))
- udelay(300);
-
- return 0;
-}
-
-static int samsung_s3c2410_mpll_enable(struct clk_hw *hw)
-{
- return samsung_s3c2410_pll_enable(hw, 5, true);
-}
-
-static void samsung_s3c2410_mpll_disable(struct clk_hw *hw)
-{
- samsung_s3c2410_pll_enable(hw, 5, false);
-}
-
-static int samsung_s3c2410_upll_enable(struct clk_hw *hw)
-{
- return samsung_s3c2410_pll_enable(hw, 7, true);
-}
-
-static void samsung_s3c2410_upll_disable(struct clk_hw *hw)
-{
- samsung_s3c2410_pll_enable(hw, 7, false);
-}
-
-static const struct clk_ops samsung_s3c2410_mpll_clk_min_ops = {
- .recalc_rate = samsung_s3c2410_pll_recalc_rate,
- .enable = samsung_s3c2410_mpll_enable,
- .disable = samsung_s3c2410_mpll_disable,
-};
-
-static const struct clk_ops samsung_s3c2410_upll_clk_min_ops = {
- .recalc_rate = samsung_s3c2410_pll_recalc_rate,
- .enable = samsung_s3c2410_upll_enable,
- .disable = samsung_s3c2410_upll_disable,
-};
-
-static const struct clk_ops samsung_s3c2440_mpll_clk_min_ops = {
- .recalc_rate = samsung_s3c2440_mpll_recalc_rate,
- .enable = samsung_s3c2410_mpll_enable,
- .disable = samsung_s3c2410_mpll_disable,
-};
-
-static const struct clk_ops samsung_s3c2410_mpll_clk_ops = {
- .recalc_rate = samsung_s3c2410_pll_recalc_rate,
- .enable = samsung_s3c2410_mpll_enable,
- .disable = samsung_s3c2410_mpll_disable,
- .round_rate = samsung_pll_round_rate,
- .set_rate = samsung_s3c2410_pll_set_rate,
-};
-
-static const struct clk_ops samsung_s3c2410_upll_clk_ops = {
- .recalc_rate = samsung_s3c2410_pll_recalc_rate,
- .enable = samsung_s3c2410_upll_enable,
- .disable = samsung_s3c2410_upll_disable,
- .round_rate = samsung_pll_round_rate,
- .set_rate = samsung_s3c2410_pll_set_rate,
-};
-
-static const struct clk_ops samsung_s3c2440_mpll_clk_ops = {
- .recalc_rate = samsung_s3c2440_mpll_recalc_rate,
- .enable = samsung_s3c2410_mpll_enable,
- .disable = samsung_s3c2410_mpll_disable,
- .round_rate = samsung_pll_round_rate,
- .set_rate = samsung_s3c2410_pll_set_rate,
-};
-
-/*
* PLL2550x Clock Type
*/
@@ -1530,24 +1367,6 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
else
init.ops = &samsung_pll46xx_clk_ops;
break;
- case pll_s3c2410_mpll:
- if (!pll->rate_table)
- init.ops = &samsung_s3c2410_mpll_clk_min_ops;
- else
- init.ops = &samsung_s3c2410_mpll_clk_ops;
- break;
- case pll_s3c2410_upll:
- if (!pll->rate_table)
- init.ops = &samsung_s3c2410_upll_clk_min_ops;
- else
- init.ops = &samsung_s3c2410_upll_clk_ops;
- break;
- case pll_s3c2440_mpll:
- if (!pll->rate_table)
- init.ops = &samsung_s3c2440_mpll_clk_min_ops;
- else
- init.ops = &samsung_s3c2440_mpll_clk_ops;
- break;
case pll_2550x:
init.ops = &samsung_pll2550x_clk_ops;
break;
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index a9892c2d1f57..5d5a58d40e7e 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -25,9 +25,6 @@ enum samsung_pll_type {
pll_6552,
pll_6552_s3c2416,
pll_6553,
- pll_s3c2410_mpll,
- pll_s3c2410_upll,
- pll_s3c2440_mpll,
pll_2550x,
pll_2550xx,
pll_2650x,
@@ -56,24 +53,6 @@ enum samsung_pll_type {
.sdiv = (_s), \
}
-#define PLL_S3C2410_MPLL_RATE(_fin, _rate, _m, _p, _s) \
- { \
- .rate = PLL_VALID_RATE(_fin, _rate, \
- _m + 8, _p + 2, _s, 0, 16), \
- .mdiv = (_m), \
- .pdiv = (_p), \
- .sdiv = (_s), \
- }
-
-#define PLL_S3C2440_MPLL_RATE(_fin, _rate, _m, _p, _s) \
- { \
- .rate = PLL_VALID_RATE(_fin, _rate, \
- 2 * (_m + 8), _p + 2, _s, 0, 16), \
- .mdiv = (_m), \
- .pdiv = (_p), \
- .sdiv = (_s), \
- }
-
#define PLL_36XX_RATE(_fin, _rate, _m, _p, _s, _k) \
{ \
.rate = PLL_VALID_RATE(_fin, _rate, \
diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c
deleted file mode 100644
index f5e0a6ba2d12..000000000000
--- a/drivers/clk/samsung/clk-s3c2410-dclk.c
+++ /dev/null
@@ -1,440 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Common Clock Framework support for s3c24xx external clock output.
- */
-
-#include <linux/clkdev.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/clk-s3c2410.h>
-#include <linux/module.h>
-#include "clk.h"
-
-#define MUX_DCLK0 0
-#define MUX_DCLK1 1
-#define DIV_DCLK0 2
-#define DIV_DCLK1 3
-#define GATE_DCLK0 4
-#define GATE_DCLK1 5
-#define MUX_CLKOUT0 6
-#define MUX_CLKOUT1 7
-#define DCLK_MAX_CLKS (MUX_CLKOUT1 + 1)
-
-enum supported_socs {
- S3C2410,
- S3C2412,
- S3C2440,
- S3C2443,
-};
-
-struct s3c24xx_dclk_drv_data {
- const char **clkout0_parent_names;
- int clkout0_num_parents;
- const char **clkout1_parent_names;
- int clkout1_num_parents;
- const char **mux_parent_names;
- int mux_num_parents;
-};
-
-/*
- * Clock for output-parent selection in misccr
- */
-
-struct s3c24xx_clkout {
- struct clk_hw hw;
- u32 mask;
- u8 shift;
- unsigned int (*modify_misccr)(unsigned int clr, unsigned int chg);
-};
-
-#define to_s3c24xx_clkout(_hw) container_of(_hw, struct s3c24xx_clkout, hw)
-
-static u8 s3c24xx_clkout_get_parent(struct clk_hw *hw)
-{
- struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw);
- int num_parents = clk_hw_get_num_parents(hw);
- u32 val;
-
- val = clkout->modify_misccr(0, 0) >> clkout->shift;
- val >>= clkout->shift;
- val &= clkout->mask;
-
- if (val >= num_parents)
- return -EINVAL;
-
- return val;
-}
-
-static int s3c24xx_clkout_set_parent(struct clk_hw *hw, u8 index)
-{
- struct s3c24xx_clkout *clkout = to_s3c24xx_clkout(hw);
-
- clkout->modify_misccr((clkout->mask << clkout->shift),
- (index << clkout->shift));
-
- return 0;
-}
-
-static const struct clk_ops s3c24xx_clkout_ops = {
- .get_parent = s3c24xx_clkout_get_parent,
- .set_parent = s3c24xx_clkout_set_parent,
- .determine_rate = __clk_mux_determine_rate,
-};
-
-static struct clk_hw *s3c24xx_register_clkout(struct device *dev,
- const char *name, const char **parent_names, u8 num_parents,
- u8 shift, u32 mask)
-{
- struct s3c2410_clk_platform_data *pdata = dev_get_platdata(dev);
- struct s3c24xx_clkout *clkout;
- struct clk_init_data init;
- int ret;
-
- if (!pdata)
- return ERR_PTR(-EINVAL);
-
- /* allocate the clkout */
- clkout = kzalloc(sizeof(*clkout), GFP_KERNEL);
- if (!clkout)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &s3c24xx_clkout_ops;
- init.flags = 0;
- init.parent_names = parent_names;
- init.num_parents = num_parents;
-
- clkout->shift = shift;
- clkout->mask = mask;
- clkout->hw.init = &init;
- clkout->modify_misccr = pdata->modify_misccr;
-
- ret = clk_hw_register(dev, &clkout->hw);
- if (ret)
- return ERR_PTR(ret);
-
- return &clkout->hw;
-}
-
-/*
- * dclk and clkout init
- */
-
-struct s3c24xx_dclk {
- struct device *dev;
- void __iomem *base;
- struct notifier_block dclk0_div_change_nb;
- struct notifier_block dclk1_div_change_nb;
- spinlock_t dclk_lock;
- unsigned long reg_save;
- /* clk_data must be the last entry in the structure */
- struct clk_hw_onecell_data clk_data;
-};
-
-#define to_s3c24xx_dclk0(x) \
- container_of(x, struct s3c24xx_dclk, dclk0_div_change_nb)
-
-#define to_s3c24xx_dclk1(x) \
- container_of(x, struct s3c24xx_dclk, dclk1_div_change_nb)
-
-static const char *dclk_s3c2410_p[] = { "pclk", "uclk" };
-static const char *clkout0_s3c2410_p[] = { "mpll", "upll", "fclk", "hclk", "pclk",
- "gate_dclk0" };
-static const char *clkout1_s3c2410_p[] = { "mpll", "upll", "fclk", "hclk", "pclk",
- "gate_dclk1" };
-
-static const char *clkout0_s3c2412_p[] = { "mpll", "upll", "rtc_clkout",
- "hclk", "pclk", "gate_dclk0" };
-static const char *clkout1_s3c2412_p[] = { "xti", "upll", "fclk", "hclk", "pclk",
- "gate_dclk1" };
-
-static const char *clkout0_s3c2440_p[] = { "xti", "upll", "fclk", "hclk", "pclk",
- "gate_dclk0" };
-static const char *clkout1_s3c2440_p[] = { "mpll", "upll", "rtc_clkout",
- "hclk", "pclk", "gate_dclk1" };
-
-static const char *dclk_s3c2443_p[] = { "pclk", "epll" };
-static const char *clkout0_s3c2443_p[] = { "xti", "epll", "armclk", "hclk", "pclk",
- "gate_dclk0" };
-static const char *clkout1_s3c2443_p[] = { "dummy", "epll", "rtc_clkout",
- "hclk", "pclk", "gate_dclk1" };
-
-#define DCLKCON_DCLK_DIV_MASK 0xf
-#define DCLKCON_DCLK0_DIV_SHIFT 4
-#define DCLKCON_DCLK0_CMP_SHIFT 8
-#define DCLKCON_DCLK1_DIV_SHIFT 20
-#define DCLKCON_DCLK1_CMP_SHIFT 24
-
-static void s3c24xx_dclk_update_cmp(struct s3c24xx_dclk *s3c24xx_dclk,
- int div_shift, int cmp_shift)
-{
- unsigned long flags = 0;
- u32 dclk_con, div, cmp;
-
- spin_lock_irqsave(&s3c24xx_dclk->dclk_lock, flags);
-
- dclk_con = readl_relaxed(s3c24xx_dclk->base);
-
- div = ((dclk_con >> div_shift) & DCLKCON_DCLK_DIV_MASK) + 1;
- cmp = ((div + 1) / 2) - 1;
-
- dclk_con &= ~(DCLKCON_DCLK_DIV_MASK << cmp_shift);
- dclk_con |= (cmp << cmp_shift);
-
- writel_relaxed(dclk_con, s3c24xx_dclk->base);
-
- spin_unlock_irqrestore(&s3c24xx_dclk->dclk_lock, flags);
-}
-
-static int s3c24xx_dclk0_div_notify(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct s3c24xx_dclk *s3c24xx_dclk = to_s3c24xx_dclk0(nb);
-
- if (event == POST_RATE_CHANGE) {
- s3c24xx_dclk_update_cmp(s3c24xx_dclk,
- DCLKCON_DCLK0_DIV_SHIFT, DCLKCON_DCLK0_CMP_SHIFT);
- }
-
- return NOTIFY_DONE;
-}
-
-static int s3c24xx_dclk1_div_notify(struct notifier_block *nb,
- unsigned long event, void *data)
-{
- struct s3c24xx_dclk *s3c24xx_dclk = to_s3c24xx_dclk1(nb);
-
- if (event == POST_RATE_CHANGE) {
- s3c24xx_dclk_update_cmp(s3c24xx_dclk,
- DCLKCON_DCLK1_DIV_SHIFT, DCLKCON_DCLK1_CMP_SHIFT);
- }
-
- return NOTIFY_DONE;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int s3c24xx_dclk_suspend(struct device *dev)
-{
- struct s3c24xx_dclk *s3c24xx_dclk = dev_get_drvdata(dev);
-
- s3c24xx_dclk->reg_save = readl_relaxed(s3c24xx_dclk->base);
- return 0;
-}
-
-static int s3c24xx_dclk_resume(struct device *dev)
-{
- struct s3c24xx_dclk *s3c24xx_dclk = dev_get_drvdata(dev);
-
- writel_relaxed(s3c24xx_dclk->reg_save, s3c24xx_dclk->base);
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(s3c24xx_dclk_pm_ops,
- s3c24xx_dclk_suspend, s3c24xx_dclk_resume);
-
-static int s3c24xx_dclk_probe(struct platform_device *pdev)
-{
- struct s3c24xx_dclk *s3c24xx_dclk;
- struct s3c24xx_dclk_drv_data *dclk_variant;
- struct clk_hw **clk_table;
- int ret, i;
-
- s3c24xx_dclk = devm_kzalloc(&pdev->dev,
- struct_size(s3c24xx_dclk, clk_data.hws,
- DCLK_MAX_CLKS),
- GFP_KERNEL);
- if (!s3c24xx_dclk)
- return -ENOMEM;
-
- clk_table = s3c24xx_dclk->clk_data.hws;
-
- s3c24xx_dclk->dev = &pdev->dev;
- s3c24xx_dclk->clk_data.num = DCLK_MAX_CLKS;
- platform_set_drvdata(pdev, s3c24xx_dclk);
- spin_lock_init(&s3c24xx_dclk->dclk_lock);
-
- s3c24xx_dclk->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(s3c24xx_dclk->base))
- return PTR_ERR(s3c24xx_dclk->base);
-
- dclk_variant = (struct s3c24xx_dclk_drv_data *)
- platform_get_device_id(pdev)->driver_data;
-
-
- clk_table[MUX_DCLK0] = clk_hw_register_mux(&pdev->dev, "mux_dclk0",
- dclk_variant->mux_parent_names,
- dclk_variant->mux_num_parents, 0,
- s3c24xx_dclk->base, 1, 1, 0,
- &s3c24xx_dclk->dclk_lock);
- clk_table[MUX_DCLK1] = clk_hw_register_mux(&pdev->dev, "mux_dclk1",
- dclk_variant->mux_parent_names,
- dclk_variant->mux_num_parents, 0,
- s3c24xx_dclk->base, 17, 1, 0,
- &s3c24xx_dclk->dclk_lock);
-
- clk_table[DIV_DCLK0] = clk_hw_register_divider(&pdev->dev, "div_dclk0",
- "mux_dclk0", 0, s3c24xx_dclk->base,
- 4, 4, 0, &s3c24xx_dclk->dclk_lock);
- clk_table[DIV_DCLK1] = clk_hw_register_divider(&pdev->dev, "div_dclk1",
- "mux_dclk1", 0, s3c24xx_dclk->base,
- 20, 4, 0, &s3c24xx_dclk->dclk_lock);
-
- clk_table[GATE_DCLK0] = clk_hw_register_gate(&pdev->dev, "gate_dclk0",
- "div_dclk0", CLK_SET_RATE_PARENT,
- s3c24xx_dclk->base, 0, 0,
- &s3c24xx_dclk->dclk_lock);
- clk_table[GATE_DCLK1] = clk_hw_register_gate(&pdev->dev, "gate_dclk1",
- "div_dclk1", CLK_SET_RATE_PARENT,
- s3c24xx_dclk->base, 16, 0,
- &s3c24xx_dclk->dclk_lock);
-
- clk_table[MUX_CLKOUT0] = s3c24xx_register_clkout(&pdev->dev,
- "clkout0", dclk_variant->clkout0_parent_names,
- dclk_variant->clkout0_num_parents, 4, 7);
- clk_table[MUX_CLKOUT1] = s3c24xx_register_clkout(&pdev->dev,
- "clkout1", dclk_variant->clkout1_parent_names,
- dclk_variant->clkout1_num_parents, 8, 7);
-
- for (i = 0; i < DCLK_MAX_CLKS; i++)
- if (IS_ERR(clk_table[i])) {
- dev_err(&pdev->dev, "clock %d failed to register\n", i);
- ret = PTR_ERR(clk_table[i]);
- goto err_clk_register;
- }
-
- ret = clk_hw_register_clkdev(clk_table[MUX_DCLK0], "dclk0", NULL);
- if (!ret)
- ret = clk_hw_register_clkdev(clk_table[MUX_DCLK1], "dclk1",
- NULL);
- if (!ret)
- ret = clk_hw_register_clkdev(clk_table[MUX_CLKOUT0],
- "clkout0", NULL);
- if (!ret)
- ret = clk_hw_register_clkdev(clk_table[MUX_CLKOUT1],
- "clkout1", NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to register aliases, %d\n", ret);
- goto err_clk_register;
- }
-
- s3c24xx_dclk->dclk0_div_change_nb.notifier_call =
- s3c24xx_dclk0_div_notify;
-
- s3c24xx_dclk->dclk1_div_change_nb.notifier_call =
- s3c24xx_dclk1_div_notify;
-
- ret = clk_notifier_register(clk_table[DIV_DCLK0]->clk,
- &s3c24xx_dclk->dclk0_div_change_nb);
- if (ret)
- goto err_clk_register;
-
- ret = clk_notifier_register(clk_table[DIV_DCLK1]->clk,
- &s3c24xx_dclk->dclk1_div_change_nb);
- if (ret)
- goto err_dclk_notify;
-
- return 0;
-
-err_dclk_notify:
- clk_notifier_unregister(clk_table[DIV_DCLK0]->clk,
- &s3c24xx_dclk->dclk0_div_change_nb);
-err_clk_register:
- for (i = 0; i < DCLK_MAX_CLKS; i++)
- if (clk_table[i] && !IS_ERR(clk_table[i]))
- clk_hw_unregister(clk_table[i]);
-
- return ret;
-}
-
-static int s3c24xx_dclk_remove(struct platform_device *pdev)
-{
- struct s3c24xx_dclk *s3c24xx_dclk = platform_get_drvdata(pdev);
- struct clk_hw **clk_table = s3c24xx_dclk->clk_data.hws;
- int i;
-
- clk_notifier_unregister(clk_table[DIV_DCLK1]->clk,
- &s3c24xx_dclk->dclk1_div_change_nb);
- clk_notifier_unregister(clk_table[DIV_DCLK0]->clk,
- &s3c24xx_dclk->dclk0_div_change_nb);
-
- for (i = 0; i < DCLK_MAX_CLKS; i++)
- clk_hw_unregister(clk_table[i]);
-
- return 0;
-}
-
-static struct s3c24xx_dclk_drv_data dclk_variants[] = {
- [S3C2410] = {
- .clkout0_parent_names = clkout0_s3c2410_p,
- .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2410_p),
- .clkout1_parent_names = clkout1_s3c2410_p,
- .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2410_p),
- .mux_parent_names = dclk_s3c2410_p,
- .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p),
- },
- [S3C2412] = {
- .clkout0_parent_names = clkout0_s3c2412_p,
- .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2412_p),
- .clkout1_parent_names = clkout1_s3c2412_p,
- .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2412_p),
- .mux_parent_names = dclk_s3c2410_p,
- .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p),
- },
- [S3C2440] = {
- .clkout0_parent_names = clkout0_s3c2440_p,
- .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2440_p),
- .clkout1_parent_names = clkout1_s3c2440_p,
- .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2440_p),
- .mux_parent_names = dclk_s3c2410_p,
- .mux_num_parents = ARRAY_SIZE(dclk_s3c2410_p),
- },
- [S3C2443] = {
- .clkout0_parent_names = clkout0_s3c2443_p,
- .clkout0_num_parents = ARRAY_SIZE(clkout0_s3c2443_p),
- .clkout1_parent_names = clkout1_s3c2443_p,
- .clkout1_num_parents = ARRAY_SIZE(clkout1_s3c2443_p),
- .mux_parent_names = dclk_s3c2443_p,
- .mux_num_parents = ARRAY_SIZE(dclk_s3c2443_p),
- },
-};
-
-static const struct platform_device_id s3c24xx_dclk_driver_ids[] = {
- {
- .name = "s3c2410-dclk",
- .driver_data = (kernel_ulong_t)&dclk_variants[S3C2410],
- }, {
- .name = "s3c2412-dclk",
- .driver_data = (kernel_ulong_t)&dclk_variants[S3C2412],
- }, {
- .name = "s3c2440-dclk",
- .driver_data = (kernel_ulong_t)&dclk_variants[S3C2440],
- }, {
- .name = "s3c2443-dclk",
- .driver_data = (kernel_ulong_t)&dclk_variants[S3C2443],
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(platform, s3c24xx_dclk_driver_ids);
-
-static struct platform_driver s3c24xx_dclk_driver = {
- .driver = {
- .name = "s3c24xx-dclk",
- .pm = &s3c24xx_dclk_pm_ops,
- .suppress_bind_attrs = true,
- },
- .probe = s3c24xx_dclk_probe,
- .remove = s3c24xx_dclk_remove,
- .id_table = s3c24xx_dclk_driver_ids,
-};
-module_platform_driver(s3c24xx_dclk_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
-MODULE_DESCRIPTION("Driver for the S3C24XX external clock outputs");
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
deleted file mode 100644
index 3d152a46169b..000000000000
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ /dev/null
@@ -1,446 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Common Clock Framework support for S3C2410 and following SoCs.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/samsung.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include <dt-bindings/clock/s3c2410.h>
-
-#include "clk.h"
-#include "clk-pll.h"
-
-#define LOCKTIME 0x00
-#define MPLLCON 0x04
-#define UPLLCON 0x08
-#define CLKCON 0x0c
-#define CLKSLOW 0x10
-#define CLKDIVN 0x14
-#define CAMDIVN 0x18
-
-/* the soc types */
-enum supported_socs {
- S3C2410,
- S3C2440,
- S3C2442,
-};
-
-/* list of PLLs to be registered */
-enum s3c2410_plls {
- mpll, upll,
-};
-
-static void __iomem *reg_base;
-
-/*
- * list of controller registers to be saved and restored during a
- * suspend/resume cycle.
- */
-static unsigned long s3c2410_clk_regs[] __initdata = {
- LOCKTIME,
- MPLLCON,
- UPLLCON,
- CLKCON,
- CLKSLOW,
- CLKDIVN,
- CAMDIVN,
-};
-
-PNAME(fclk_p) = { "mpll", "div_slow" };
-
-static struct samsung_mux_clock s3c2410_common_muxes[] __initdata = {
- MUX(FCLK, "fclk", fclk_p, CLKSLOW, 4, 1),
-};
-
-static struct clk_div_table divslow_d[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 2 },
- { .val = 2, .div = 4 },
- { .val = 3, .div = 6 },
- { .val = 4, .div = 8 },
- { .val = 5, .div = 10 },
- { .val = 6, .div = 12 },
- { .val = 7, .div = 14 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c2410_common_dividers[] __initdata = {
- DIV_T(0, "div_slow", "xti", CLKSLOW, 0, 3, divslow_d),
- DIV(PCLK, "pclk", "hclk", CLKDIVN, 0, 1),
-};
-
-static struct samsung_gate_clock s3c2410_common_gates[] __initdata = {
- GATE(PCLK_SPI, "spi", "pclk", CLKCON, 18, 0, 0),
- GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 17, 0, 0),
- GATE(PCLK_I2C, "i2c", "pclk", CLKCON, 16, 0, 0),
- GATE(PCLK_ADC, "adc", "pclk", CLKCON, 15, 0, 0),
- GATE(PCLK_RTC, "rtc", "pclk", CLKCON, 14, 0, 0),
- GATE(PCLK_GPIO, "gpio", "pclk", CLKCON, 13, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_UART2, "uart2", "pclk", CLKCON, 12, 0, 0),
- GATE(PCLK_UART1, "uart1", "pclk", CLKCON, 11, 0, 0),
- GATE(PCLK_UART0, "uart0", "pclk", CLKCON, 10, 0, 0),
- GATE(PCLK_SDI, "sdi", "pclk", CLKCON, 9, 0, 0),
- GATE(PCLK_PWM, "pwm", "pclk", CLKCON, 8, 0, 0),
- GATE(HCLK_USBD, "usb-device", "hclk", CLKCON, 7, 0, 0),
- GATE(HCLK_USBH, "usb-host", "hclk", CLKCON, 6, 0, 0),
- GATE(HCLK_LCD, "lcd", "hclk", CLKCON, 5, 0, 0),
- GATE(HCLK_NAND, "nand", "hclk", CLKCON, 4, 0, 0),
-};
-
-/* should be added _after_ the soc-specific clocks are created */
-static struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
- ALIAS(PCLK_I2C, "s3c2410-i2c.0", "i2c"),
- ALIAS(PCLK_ADC, NULL, "adc"),
- ALIAS(PCLK_RTC, NULL, "rtc"),
- ALIAS(PCLK_PWM, NULL, "timers"),
- ALIAS(HCLK_LCD, NULL, "lcd"),
- ALIAS(HCLK_USBD, NULL, "usb-device"),
- ALIAS(HCLK_USBH, NULL, "usb-host"),
- ALIAS(UCLK, NULL, "usb-bus-host"),
- ALIAS(UCLK, NULL, "usb-bus-gadget"),
- ALIAS(ARMCLK, NULL, "armclk"),
- ALIAS(UCLK, NULL, "uclk"),
- ALIAS(HCLK, NULL, "hclk"),
- ALIAS(MPLL, NULL, "mpll"),
- ALIAS(FCLK, NULL, "fclk"),
- ALIAS(PCLK, NULL, "watchdog"),
- ALIAS(PCLK_SDI, NULL, "sdi"),
- ALIAS(HCLK_NAND, NULL, "nand"),
- ALIAS(PCLK_I2S, NULL, "iis"),
- ALIAS(PCLK_I2C, NULL, "i2c"),
-};
-
-/* S3C2410 specific clocks */
-
-static struct samsung_pll_rate_table pll_s3c2410_12mhz_tbl[] __initdata = {
- /* sorted in descending order */
- /* 2410A extras */
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 270000000, 127, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 268000000, 126, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 266000000, 125, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 226000000, 105, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 210000000, 132, 2, 1),
- /* 2410 common */
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 202800000, 161, 3, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 192000000, 88, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 186000000, 85, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 180000000, 82, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 170000000, 77, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 158000000, 71, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 152000000, 68, 1, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 147000000, 90, 2, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 135000000, 82, 2, 1),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 124000000, 116, 1, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 118500000, 150, 2, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 113000000, 105, 1, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 101250000, 127, 2, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 90000000, 112, 2, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 84750000, 105, 2, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 79000000, 71, 1, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 67500000, 82, 2, 2),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 56250000, 142, 2, 3),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 48000000, 120, 2, 3),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 50700000, 161, 3, 3),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 45000000, 82, 1, 3),
- PLL_S3C2410_MPLL_RATE(12 * MHZ, 33750000, 82, 2, 3),
- { /* sentinel */ },
-};
-
-static struct samsung_pll_clock s3c2410_plls[] __initdata = {
- [mpll] = PLL(pll_s3c2410_mpll, MPLL, "mpll", "xti",
- LOCKTIME, MPLLCON, NULL),
- [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "xti",
- LOCKTIME, UPLLCON, NULL),
-};
-
-static struct samsung_div_clock s3c2410_dividers[] __initdata = {
- DIV(HCLK, "hclk", "mpll", CLKDIVN, 1, 1),
-};
-
-static struct samsung_fixed_factor_clock s3c2410_ffactor[] __initdata = {
- /*
- * armclk is directly supplied by the fclk, without
- * switching possibility like on the s3c244x below.
- */
- FFACTOR(ARMCLK, "armclk", "fclk", 1, 1, 0),
-
- /* uclk is fed from the unmodified upll */
- FFACTOR(UCLK, "uclk", "upll", 1, 1, 0),
-};
-
-static struct samsung_clock_alias s3c2410_aliases[] __initdata = {
- ALIAS(PCLK_UART0, "s3c2410-uart.0", "uart"),
- ALIAS(PCLK_UART1, "s3c2410-uart.1", "uart"),
- ALIAS(PCLK_UART2, "s3c2410-uart.2", "uart"),
- ALIAS(PCLK_UART0, "s3c2410-uart.0", "clk_uart_baud0"),
- ALIAS(PCLK_UART1, "s3c2410-uart.1", "clk_uart_baud0"),
- ALIAS(PCLK_UART2, "s3c2410-uart.2", "clk_uart_baud0"),
- ALIAS(UCLK, NULL, "clk_uart_baud1"),
-};
-
-/* S3C244x specific clocks */
-
-static struct samsung_pll_rate_table pll_s3c244x_12mhz_tbl[] __initdata = {
- /* sorted in descending order */
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 400000000, 0x5c, 1, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 390000000, 0x7a, 2, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 380000000, 0x57, 1, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 370000000, 0xb1, 4, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 360000000, 0x70, 2, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 350000000, 0xa7, 4, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 340000000, 0x4d, 1, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 330000000, 0x66, 2, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 320000000, 0x98, 4, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 310000000, 0x93, 4, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 300000000, 0x75, 3, 1),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 240000000, 0x70, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 230000000, 0x6b, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 220000000, 0x66, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 210000000, 0x84, 2, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 200000000, 0x5c, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 190000000, 0x57, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 180000000, 0x70, 2, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 170000000, 0x4d, 1, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 160000000, 0x98, 4, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 150000000, 0x75, 3, 2),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 120000000, 0x70, 1, 3),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 110000000, 0x66, 1, 3),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 100000000, 0x5c, 1, 3),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 90000000, 0x70, 2, 3),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 80000000, 0x98, 4, 3),
- PLL_S3C2440_MPLL_RATE(12 * MHZ, 75000000, 0x75, 3, 3),
- { /* sentinel */ },
-};
-
-static struct samsung_pll_clock s3c244x_common_plls[] __initdata = {
- [mpll] = PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti",
- LOCKTIME, MPLLCON, NULL),
- [upll] = PLL(pll_s3c2410_upll, UPLL, "upll", "xti",
- LOCKTIME, UPLLCON, NULL),
-};
-
-PNAME(hclk_p) = { "fclk", "div_hclk_2", "div_hclk_4", "div_hclk_3" };
-PNAME(armclk_p) = { "fclk", "hclk" };
-
-static struct samsung_mux_clock s3c244x_common_muxes[] __initdata = {
- MUX(HCLK, "hclk", hclk_p, CLKDIVN, 1, 2),
- MUX(ARMCLK, "armclk", armclk_p, CAMDIVN, 12, 1),
-};
-
-static struct samsung_fixed_factor_clock s3c244x_common_ffactor[] __initdata = {
- FFACTOR(0, "div_hclk_2", "fclk", 1, 2, 0),
- FFACTOR(0, "ff_cam", "div_cam", 2, 1, CLK_SET_RATE_PARENT),
-};
-
-static struct clk_div_table div_hclk_4_d[] = {
- { .val = 0, .div = 4 },
- { .val = 1, .div = 8 },
- { /* sentinel */ },
-};
-
-static struct clk_div_table div_hclk_3_d[] = {
- { .val = 0, .div = 3 },
- { .val = 1, .div = 6 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c244x_common_dividers[] __initdata = {
- DIV(UCLK, "uclk", "upll", CLKDIVN, 3, 1),
- DIV(0, "div_hclk", "fclk", CLKDIVN, 1, 1),
- DIV_T(0, "div_hclk_4", "fclk", CAMDIVN, 9, 1, div_hclk_4_d),
- DIV_T(0, "div_hclk_3", "fclk", CAMDIVN, 8, 1, div_hclk_3_d),
- DIV(0, "div_cam", "upll", CAMDIVN, 0, 3),
-};
-
-static struct samsung_gate_clock s3c244x_common_gates[] __initdata = {
- GATE(HCLK_CAM, "cam", "hclk", CLKCON, 19, 0, 0),
-};
-
-static struct samsung_clock_alias s3c244x_common_aliases[] __initdata = {
- ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"),
- ALIAS(PCLK_UART1, "s3c2440-uart.1", "uart"),
- ALIAS(PCLK_UART2, "s3c2440-uart.2", "uart"),
- ALIAS(PCLK_UART0, "s3c2440-uart.0", "clk_uart_baud2"),
- ALIAS(PCLK_UART1, "s3c2440-uart.1", "clk_uart_baud2"),
- ALIAS(PCLK_UART2, "s3c2440-uart.2", "clk_uart_baud2"),
- ALIAS(HCLK_CAM, NULL, "camif"),
- ALIAS(CAMIF, NULL, "camif-upll"),
-};
-
-/* S3C2440 specific clocks */
-
-PNAME(s3c2440_camif_p) = { "upll", "ff_cam" };
-
-static struct samsung_mux_clock s3c2440_muxes[] __initdata = {
- MUX(CAMIF, "camif", s3c2440_camif_p, CAMDIVN, 4, 1),
-};
-
-static struct samsung_gate_clock s3c2440_gates[] __initdata = {
- GATE(PCLK_AC97, "ac97", "pclk", CLKCON, 20, 0, 0),
-};
-
-/* S3C2442 specific clocks */
-
-static struct samsung_fixed_factor_clock s3c2442_ffactor[] __initdata = {
- FFACTOR(0, "upll_3", "upll", 1, 3, 0),
-};
-
-PNAME(s3c2442_camif_p) = { "upll", "ff_cam", "upll", "upll_3" };
-
-static struct samsung_mux_clock s3c2442_muxes[] __initdata = {
- MUX(CAMIF, "camif", s3c2442_camif_p, CAMDIVN, 4, 2),
-};
-
-/*
- * fixed rate clocks generated outside the soc
- * Only necessary until the devicetree-move is complete
- */
-#define XTI 1
-static struct samsung_fixed_rate_clock s3c2410_common_frate_clks[] __initdata = {
- FRATE(XTI, "xti", NULL, 0, 0),
-};
-
-static void __init s3c2410_common_clk_register_fixed_ext(
- struct samsung_clk_provider *ctx,
- unsigned long xti_f)
-{
- struct samsung_clock_alias xti_alias = ALIAS(XTI, NULL, "xtal");
-
- s3c2410_common_frate_clks[0].fixed_rate = xti_f;
- samsung_clk_register_fixed_rate(ctx, s3c2410_common_frate_clks,
- ARRAY_SIZE(s3c2410_common_frate_clks));
-
- samsung_clk_register_alias(ctx, &xti_alias, 1);
-}
-
-void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
- int current_soc,
- void __iomem *base)
-{
- struct samsung_clk_provider *ctx;
- struct clk_hw **hws;
- reg_base = base;
-
- if (np) {
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
- }
-
- ctx = samsung_clk_init(np, reg_base, NR_CLKS);
- hws = ctx->clk_data.hws;
-
- /* Register external clocks only in non-dt cases */
- if (!np)
- s3c2410_common_clk_register_fixed_ext(ctx, xti_f);
-
- if (current_soc == S3C2410) {
- if (clk_hw_get_rate(hws[XTI]) == 12 * MHZ) {
- s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl;
- s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl;
- }
-
- /* Register PLLs. */
- samsung_clk_register_pll(ctx, s3c2410_plls,
- ARRAY_SIZE(s3c2410_plls), reg_base);
-
- } else { /* S3C2440, S3C2442 */
- if (clk_hw_get_rate(hws[XTI]) == 12 * MHZ) {
- /*
- * plls follow different calculation schemes, with the
- * upll following the same scheme as the s3c2410 plls
- */
- s3c244x_common_plls[mpll].rate_table =
- pll_s3c244x_12mhz_tbl;
- s3c244x_common_plls[upll].rate_table =
- pll_s3c2410_12mhz_tbl;
- }
-
- /* Register PLLs. */
- samsung_clk_register_pll(ctx, s3c244x_common_plls,
- ARRAY_SIZE(s3c244x_common_plls), reg_base);
- }
-
- /* Register common internal clocks. */
- samsung_clk_register_mux(ctx, s3c2410_common_muxes,
- ARRAY_SIZE(s3c2410_common_muxes));
- samsung_clk_register_div(ctx, s3c2410_common_dividers,
- ARRAY_SIZE(s3c2410_common_dividers));
- samsung_clk_register_gate(ctx, s3c2410_common_gates,
- ARRAY_SIZE(s3c2410_common_gates));
-
- if (current_soc == S3C2440 || current_soc == S3C2442) {
- samsung_clk_register_div(ctx, s3c244x_common_dividers,
- ARRAY_SIZE(s3c244x_common_dividers));
- samsung_clk_register_gate(ctx, s3c244x_common_gates,
- ARRAY_SIZE(s3c244x_common_gates));
- samsung_clk_register_mux(ctx, s3c244x_common_muxes,
- ARRAY_SIZE(s3c244x_common_muxes));
- samsung_clk_register_fixed_factor(ctx, s3c244x_common_ffactor,
- ARRAY_SIZE(s3c244x_common_ffactor));
- }
-
- /* Register SoC-specific clocks. */
- switch (current_soc) {
- case S3C2410:
- samsung_clk_register_div(ctx, s3c2410_dividers,
- ARRAY_SIZE(s3c2410_dividers));
- samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor,
- ARRAY_SIZE(s3c2410_ffactor));
- samsung_clk_register_alias(ctx, s3c2410_aliases,
- ARRAY_SIZE(s3c2410_aliases));
- break;
- case S3C2440:
- samsung_clk_register_mux(ctx, s3c2440_muxes,
- ARRAY_SIZE(s3c2440_muxes));
- samsung_clk_register_gate(ctx, s3c2440_gates,
- ARRAY_SIZE(s3c2440_gates));
- break;
- case S3C2442:
- samsung_clk_register_mux(ctx, s3c2442_muxes,
- ARRAY_SIZE(s3c2442_muxes));
- samsung_clk_register_fixed_factor(ctx, s3c2442_ffactor,
- ARRAY_SIZE(s3c2442_ffactor));
- break;
- }
-
- /*
- * Register common aliases at the end, as some of the aliased clocks
- * are SoC specific.
- */
- samsung_clk_register_alias(ctx, s3c2410_common_aliases,
- ARRAY_SIZE(s3c2410_common_aliases));
-
- if (current_soc == S3C2440 || current_soc == S3C2442) {
- samsung_clk_register_alias(ctx, s3c244x_common_aliases,
- ARRAY_SIZE(s3c244x_common_aliases));
- }
-
- samsung_clk_sleep_init(reg_base, s3c2410_clk_regs,
- ARRAY_SIZE(s3c2410_clk_regs));
-
- samsung_clk_of_add_provider(np, ctx);
-}
-
-static void __init s3c2410_clk_init(struct device_node *np)
-{
- s3c2410_common_clk_init(np, 0, S3C2410, NULL);
-}
-CLK_OF_DECLARE(s3c2410_clk, "samsung,s3c2410-clock", s3c2410_clk_init);
-
-static void __init s3c2440_clk_init(struct device_node *np)
-{
- s3c2410_common_clk_init(np, 0, S3C2440, NULL);
-}
-CLK_OF_DECLARE(s3c2440_clk, "samsung,s3c2440-clock", s3c2440_clk_init);
-
-static void __init s3c2442_clk_init(struct device_node *np)
-{
- s3c2410_common_clk_init(np, 0, S3C2442, NULL);
-}
-CLK_OF_DECLARE(s3c2442_clk, "samsung,s3c2442-clock", s3c2442_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c2412.c b/drivers/clk/samsung/clk-s3c2412.c
deleted file mode 100644
index 724ef642f048..000000000000
--- a/drivers/clk/samsung/clk-s3c2412.c
+++ /dev/null
@@ -1,254 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Common Clock Framework support for S3C2412 and S3C2413.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/samsung.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/reboot.h>
-
-#include <dt-bindings/clock/s3c2412.h>
-
-#include "clk.h"
-#include "clk-pll.h"
-
-#define LOCKTIME 0x00
-#define MPLLCON 0x04
-#define UPLLCON 0x08
-#define CLKCON 0x0c
-#define CLKDIVN 0x14
-#define CLKSRC 0x1c
-#define SWRST 0x30
-
-static void __iomem *reg_base;
-
-/*
- * list of controller registers to be saved and restored during a
- * suspend/resume cycle.
- */
-static unsigned long s3c2412_clk_regs[] __initdata = {
- LOCKTIME,
- MPLLCON,
- UPLLCON,
- CLKCON,
- CLKDIVN,
- CLKSRC,
-};
-
-static struct clk_div_table divxti_d[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 2 },
- { .val = 2, .div = 4 },
- { .val = 3, .div = 6 },
- { .val = 4, .div = 8 },
- { .val = 5, .div = 10 },
- { .val = 6, .div = 12 },
- { .val = 7, .div = 14 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c2412_dividers[] __initdata = {
- DIV_T(0, "div_xti", "xti", CLKSRC, 0, 3, divxti_d),
- DIV(0, "div_cam", "mux_cam", CLKDIVN, 16, 4),
- DIV(0, "div_i2s", "mux_i2s", CLKDIVN, 12, 4),
- DIV(0, "div_uart", "mux_uart", CLKDIVN, 8, 4),
- DIV(0, "div_usb", "mux_usb", CLKDIVN, 6, 1),
- DIV(0, "div_hclk_half", "hclk", CLKDIVN, 5, 1),
- DIV(ARMDIV, "armdiv", "msysclk", CLKDIVN, 3, 1),
- DIV(PCLK, "pclk", "hclk", CLKDIVN, 2, 1),
- DIV(HCLK, "hclk", "armdiv", CLKDIVN, 0, 2),
-};
-
-static struct samsung_fixed_factor_clock s3c2412_ffactor[] __initdata = {
- FFACTOR(0, "ff_hclk", "hclk", 2, 1, CLK_SET_RATE_PARENT),
-};
-
-/*
- * The first two use the OM[4] setting, which is not readable from
- * software, so assume it is set to xti.
- */
-PNAME(erefclk_p) = { "xti", "xti", "xti", "ext" };
-PNAME(urefclk_p) = { "xti", "xti", "xti", "ext" };
-
-PNAME(camclk_p) = { "usysclk", "hclk" };
-PNAME(usbclk_p) = { "usysclk", "hclk" };
-PNAME(i2sclk_p) = { "erefclk", "mpll" };
-PNAME(uartclk_p) = { "erefclk", "mpll" };
-PNAME(usysclk_p) = { "urefclk", "upll" };
-PNAME(msysclk_p) = { "mdivclk", "mpll" };
-PNAME(mdivclk_p) = { "xti", "div_xti" };
-PNAME(armclk_p) = { "armdiv", "hclk" };
-
-static struct samsung_mux_clock s3c2412_muxes[] __initdata = {
- MUX(0, "erefclk", erefclk_p, CLKSRC, 14, 2),
- MUX(0, "urefclk", urefclk_p, CLKSRC, 12, 2),
- MUX(0, "mux_cam", camclk_p, CLKSRC, 11, 1),
- MUX(0, "mux_usb", usbclk_p, CLKSRC, 10, 1),
- MUX(0, "mux_i2s", i2sclk_p, CLKSRC, 9, 1),
- MUX(0, "mux_uart", uartclk_p, CLKSRC, 8, 1),
- MUX(USYSCLK, "usysclk", usysclk_p, CLKSRC, 5, 1),
- MUX(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1),
- MUX(MDIVCLK, "mdivclk", mdivclk_p, CLKSRC, 3, 1),
- MUX(ARMCLK, "armclk", armclk_p, CLKDIVN, 4, 1),
-};
-
-static struct samsung_pll_clock s3c2412_plls[] __initdata = {
- PLL(pll_s3c2440_mpll, MPLL, "mpll", "xti", LOCKTIME, MPLLCON, NULL),
- PLL(pll_s3c2410_upll, UPLL, "upll", "urefclk", LOCKTIME, UPLLCON, NULL),
-};
-
-static struct samsung_gate_clock s3c2412_gates[] __initdata = {
- GATE(PCLK_WDT, "wdt", "pclk", CLKCON, 28, 0, 0),
- GATE(PCLK_SPI, "spi", "pclk", CLKCON, 27, 0, 0),
- GATE(PCLK_I2S, "i2s", "pclk", CLKCON, 26, 0, 0),
- GATE(PCLK_I2C, "i2c", "pclk", CLKCON, 25, 0, 0),
- GATE(PCLK_ADC, "adc", "pclk", CLKCON, 24, 0, 0),
- GATE(PCLK_RTC, "rtc", "pclk", CLKCON, 23, 0, 0),
- GATE(PCLK_GPIO, "gpio", "pclk", CLKCON, 22, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_UART2, "uart2", "pclk", CLKCON, 21, 0, 0),
- GATE(PCLK_UART1, "uart1", "pclk", CLKCON, 20, 0, 0),
- GATE(PCLK_UART0, "uart0", "pclk", CLKCON, 19, 0, 0),
- GATE(PCLK_SDI, "sdi", "pclk", CLKCON, 18, 0, 0),
- GATE(PCLK_PWM, "pwm", "pclk", CLKCON, 17, 0, 0),
- GATE(PCLK_USBD, "usb-device", "pclk", CLKCON, 16, 0, 0),
- GATE(SCLK_CAM, "sclk_cam", "div_cam", CLKCON, 15, 0, 0),
- GATE(SCLK_UART, "sclk_uart", "div_uart", CLKCON, 14, 0, 0),
- GATE(SCLK_I2S, "sclk_i2s", "div_i2s", CLKCON, 13, 0, 0),
- GATE(SCLK_USBH, "sclk_usbh", "div_usb", CLKCON, 12, 0, 0),
- GATE(SCLK_USBD, "sclk_usbd", "div_usb", CLKCON, 11, 0, 0),
- GATE(HCLK_HALF, "hclk_half", "div_hclk_half", CLKCON, 10, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_X2, "hclkx2", "ff_hclk", CLKCON, 9, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_SDRAM, "sdram", "hclk", CLKCON, 8, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_USBH, "usb-host", "hclk", CLKCON, 6, 0, 0),
- GATE(HCLK_LCD, "lcd", "hclk", CLKCON, 5, 0, 0),
- GATE(HCLK_NAND, "nand", "hclk", CLKCON, 4, 0, 0),
- GATE(HCLK_DMA3, "dma3", "hclk", CLKCON, 3, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA2, "dma2", "hclk", CLKCON, 2, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA1, "dma1", "hclk", CLKCON, 1, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA0, "dma0", "hclk", CLKCON, 0, CLK_IGNORE_UNUSED, 0),
-};
-
-static struct samsung_clock_alias s3c2412_aliases[] __initdata = {
- ALIAS(PCLK_UART0, "s3c2412-uart.0", "uart"),
- ALIAS(PCLK_UART1, "s3c2412-uart.1", "uart"),
- ALIAS(PCLK_UART2, "s3c2412-uart.2", "uart"),
- ALIAS(PCLK_UART0, "s3c2412-uart.0", "clk_uart_baud2"),
- ALIAS(PCLK_UART1, "s3c2412-uart.1", "clk_uart_baud2"),
- ALIAS(PCLK_UART2, "s3c2412-uart.2", "clk_uart_baud2"),
- ALIAS(SCLK_UART, NULL, "clk_uart_baud3"),
- ALIAS(PCLK_I2C, "s3c2410-i2c.0", "i2c"),
- ALIAS(PCLK_ADC, NULL, "adc"),
- ALIAS(PCLK_RTC, NULL, "rtc"),
- ALIAS(PCLK_PWM, NULL, "timers"),
- ALIAS(HCLK_LCD, NULL, "lcd"),
- ALIAS(PCLK_USBD, NULL, "usb-device"),
- ALIAS(SCLK_USBD, NULL, "usb-bus-gadget"),
- ALIAS(HCLK_USBH, NULL, "usb-host"),
- ALIAS(SCLK_USBH, NULL, "usb-bus-host"),
- ALIAS(ARMCLK, NULL, "armclk"),
- ALIAS(HCLK, NULL, "hclk"),
- ALIAS(MPLL, NULL, "mpll"),
- ALIAS(MSYSCLK, NULL, "fclk"),
-};
-
-static int s3c2412_restart(struct notifier_block *this,
- unsigned long mode, void *cmd)
-{
- /* errata "Watch-dog/Software Reset Problem" specifies that
- * this reset must be done with the SYSCLK sourced from
- * EXTCLK instead of FOUT to avoid a glitch in the reset
- * mechanism.
- *
- * See the watchdog section of the S3C2412 manual for more
- * information on this fix.
- */
-
- __raw_writel(0x00, reg_base + CLKSRC);
- __raw_writel(0x533C2412, reg_base + SWRST);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block s3c2412_restart_handler = {
- .notifier_call = s3c2412_restart,
- .priority = 129,
-};
-
-/*
- * fixed rate clocks generated outside the soc
- * Only necessary until the devicetree-move is complete
- */
-#define XTI 1
-static struct samsung_fixed_rate_clock s3c2412_common_frate_clks[] __initdata = {
- FRATE(XTI, "xti", NULL, 0, 0),
- FRATE(0, "ext", NULL, 0, 0),
-};
-
-static void __init s3c2412_common_clk_register_fixed_ext(
- struct samsung_clk_provider *ctx,
- unsigned long xti_f, unsigned long ext_f)
-{
- /* xtal alias is necessary for the current cpufreq driver */
- struct samsung_clock_alias xti_alias = ALIAS(XTI, NULL, "xtal");
-
- s3c2412_common_frate_clks[0].fixed_rate = xti_f;
- s3c2412_common_frate_clks[1].fixed_rate = ext_f;
- samsung_clk_register_fixed_rate(ctx, s3c2412_common_frate_clks,
- ARRAY_SIZE(s3c2412_common_frate_clks));
-
- samsung_clk_register_alias(ctx, &xti_alias, 1);
-}
-
-void __init s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f,
- unsigned long ext_f, void __iomem *base)
-{
- struct samsung_clk_provider *ctx;
- int ret;
- reg_base = base;
-
- if (np) {
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
- }
-
- ctx = samsung_clk_init(np, reg_base, NR_CLKS);
-
- /* Register external clocks only in non-dt cases */
- if (!np)
- s3c2412_common_clk_register_fixed_ext(ctx, xti_f, ext_f);
-
- /* Register PLLs. */
- samsung_clk_register_pll(ctx, s3c2412_plls, ARRAY_SIZE(s3c2412_plls),
- reg_base);
-
- /* Register common internal clocks. */
- samsung_clk_register_mux(ctx, s3c2412_muxes, ARRAY_SIZE(s3c2412_muxes));
- samsung_clk_register_div(ctx, s3c2412_dividers,
- ARRAY_SIZE(s3c2412_dividers));
- samsung_clk_register_gate(ctx, s3c2412_gates,
- ARRAY_SIZE(s3c2412_gates));
- samsung_clk_register_fixed_factor(ctx, s3c2412_ffactor,
- ARRAY_SIZE(s3c2412_ffactor));
- samsung_clk_register_alias(ctx, s3c2412_aliases,
- ARRAY_SIZE(s3c2412_aliases));
-
- samsung_clk_sleep_init(reg_base, s3c2412_clk_regs,
- ARRAY_SIZE(s3c2412_clk_regs));
-
- samsung_clk_of_add_provider(np, ctx);
-
- ret = register_restart_handler(&s3c2412_restart_handler);
- if (ret)
- pr_warn("cannot register restart handler, %d\n", ret);
-}
-
-static void __init s3c2412_clk_init(struct device_node *np)
-{
- s3c2412_common_clk_init(np, 0, 0, NULL);
-}
-CLK_OF_DECLARE(s3c2412_clk, "samsung,s3c2412-clock", s3c2412_clk_init);
diff --git a/drivers/clk/samsung/clk-s3c2443.c b/drivers/clk/samsung/clk-s3c2443.c
deleted file mode 100644
index a827d63766d1..000000000000
--- a/drivers/clk/samsung/clk-s3c2443.c
+++ /dev/null
@@ -1,438 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * Common Clock Framework support for S3C2443 and following SoCs.
- */
-
-#include <linux/clk-provider.h>
-#include <linux/clk/samsung.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/reboot.h>
-
-#include <dt-bindings/clock/s3c2443.h>
-
-#include "clk.h"
-#include "clk-pll.h"
-
-/* S3C2416 clock controller register offsets */
-#define LOCKCON0 0x00
-#define LOCKCON1 0x04
-#define MPLLCON 0x10
-#define EPLLCON 0x18
-#define EPLLCON_K 0x1C
-#define CLKSRC 0x20
-#define CLKDIV0 0x24
-#define CLKDIV1 0x28
-#define CLKDIV2 0x2C
-#define HCLKCON 0x30
-#define PCLKCON 0x34
-#define SCLKCON 0x38
-#define SWRST 0x44
-
-/* the soc types */
-enum supported_socs {
- S3C2416,
- S3C2443,
- S3C2450,
-};
-
-static void __iomem *reg_base;
-
-/*
- * list of controller registers to be saved and restored during a
- * suspend/resume cycle.
- */
-static unsigned long s3c2443_clk_regs[] __initdata = {
- LOCKCON0,
- LOCKCON1,
- MPLLCON,
- EPLLCON,
- EPLLCON_K,
- CLKSRC,
- CLKDIV0,
- CLKDIV1,
- CLKDIV2,
- PCLKCON,
- HCLKCON,
- SCLKCON,
-};
-
-PNAME(epllref_p) = { "mpllref", "mpllref", "xti", "ext" };
-PNAME(esysclk_p) = { "epllref", "epll" };
-PNAME(mpllref_p) = { "xti", "mdivclk" };
-PNAME(msysclk_p) = { "mpllref", "mpll" };
-PNAME(armclk_p) = { "armdiv" , "hclk" };
-PNAME(i2s0_p) = { "div_i2s0", "ext_i2s", "epllref", "epllref" };
-
-static struct samsung_mux_clock s3c2443_common_muxes[] __initdata = {
- MUX(0, "epllref", epllref_p, CLKSRC, 7, 2),
- MUX(ESYSCLK, "esysclk", esysclk_p, CLKSRC, 6, 1),
- MUX(0, "mpllref", mpllref_p, CLKSRC, 3, 1),
- MUX(MSYSCLK, "msysclk", msysclk_p, CLKSRC, 4, 1),
- MUX(ARMCLK, "armclk", armclk_p, CLKDIV0, 13, 1),
- MUX(0, "mux_i2s0", i2s0_p, CLKSRC, 14, 2),
-};
-
-static struct clk_div_table hclk_d[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 2 },
- { .val = 3, .div = 4 },
- { /* sentinel */ },
-};
-
-static struct clk_div_table mdivclk_d[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 3 },
- { .val = 2, .div = 5 },
- { .val = 3, .div = 7 },
- { .val = 4, .div = 9 },
- { .val = 5, .div = 11 },
- { .val = 6, .div = 13 },
- { .val = 7, .div = 15 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c2443_common_dividers[] __initdata = {
- DIV_T(0, "mdivclk", "xti", CLKDIV0, 6, 3, mdivclk_d),
- DIV(0, "prediv", "msysclk", CLKDIV0, 4, 2),
- DIV_T(HCLK, "hclk", "prediv", CLKDIV0, 0, 2, hclk_d),
- DIV(PCLK, "pclk", "hclk", CLKDIV0, 2, 1),
- DIV(0, "div_hsspi0_epll", "esysclk", CLKDIV1, 24, 2),
- DIV(0, "div_fimd", "esysclk", CLKDIV1, 16, 8),
- DIV(0, "div_i2s0", "esysclk", CLKDIV1, 12, 4),
- DIV(0, "div_uart", "esysclk", CLKDIV1, 8, 4),
- DIV(0, "div_hsmmc1", "esysclk", CLKDIV1, 6, 2),
- DIV(0, "div_usbhost", "esysclk", CLKDIV1, 4, 2),
-};
-
-static struct samsung_gate_clock s3c2443_common_gates[] __initdata = {
- GATE(SCLK_HSMMC_EXT, "sclk_hsmmcext", "ext", SCLKCON, 13, 0, 0),
- GATE(SCLK_HSMMC1, "sclk_hsmmc1", "div_hsmmc1", SCLKCON, 12, 0, 0),
- GATE(SCLK_FIMD, "sclk_fimd", "div_fimd", SCLKCON, 10, 0, 0),
- GATE(SCLK_I2S0, "sclk_i2s0", "mux_i2s0", SCLKCON, 9, 0, 0),
- GATE(SCLK_UART, "sclk_uart", "div_uart", SCLKCON, 8, 0, 0),
- GATE(SCLK_USBH, "sclk_usbhost", "div_usbhost", SCLKCON, 1, 0, 0),
- GATE(HCLK_DRAM, "dram", "hclk", HCLKCON, 19, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_SSMC, "ssmc", "hclk", HCLKCON, 18, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_HSMMC1, "hsmmc1", "hclk", HCLKCON, 16, 0, 0),
- GATE(HCLK_USBD, "usb-device", "hclk", HCLKCON, 12, 0, 0),
- GATE(HCLK_USBH, "usb-host", "hclk", HCLKCON, 11, 0, 0),
- GATE(HCLK_LCD, "lcd", "hclk", HCLKCON, 9, 0, 0),
- GATE(HCLK_DMA5, "dma5", "hclk", HCLKCON, 5, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA4, "dma4", "hclk", HCLKCON, 4, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA3, "dma3", "hclk", HCLKCON, 3, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA2, "dma2", "hclk", HCLKCON, 2, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA1, "dma1", "hclk", HCLKCON, 1, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA0, "dma0", "hclk", HCLKCON, 0, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_GPIO, "gpio", "pclk", PCLKCON, 13, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_RTC, "rtc", "pclk", PCLKCON, 12, 0, 0),
- GATE(PCLK_WDT, "wdt", "pclk", PCLKCON, 11, 0, 0),
- GATE(PCLK_PWM, "pwm", "pclk", PCLKCON, 10, 0, 0),
- GATE(PCLK_I2S0, "i2s0", "pclk", PCLKCON, 9, 0, 0),
- GATE(PCLK_AC97, "ac97", "pclk", PCLKCON, 8, 0, 0),
- GATE(PCLK_ADC, "adc", "pclk", PCLKCON, 7, 0, 0),
- GATE(PCLK_SPI0, "spi0", "pclk", PCLKCON, 6, 0, 0),
- GATE(PCLK_I2C0, "i2c0", "pclk", PCLKCON, 4, 0, 0),
- GATE(PCLK_UART3, "uart3", "pclk", PCLKCON, 3, 0, 0),
- GATE(PCLK_UART2, "uart2", "pclk", PCLKCON, 2, 0, 0),
- GATE(PCLK_UART1, "uart1", "pclk", PCLKCON, 1, 0, 0),
- GATE(PCLK_UART0, "uart0", "pclk", PCLKCON, 0, 0, 0),
-};
-
-static struct samsung_clock_alias s3c2443_common_aliases[] __initdata = {
- ALIAS(MSYSCLK, NULL, "msysclk"),
- ALIAS(ARMCLK, NULL, "armclk"),
- ALIAS(MPLL, NULL, "mpll"),
- ALIAS(EPLL, NULL, "epll"),
- ALIAS(HCLK, NULL, "hclk"),
- ALIAS(HCLK_SSMC, NULL, "nand"),
- ALIAS(PCLK_UART0, "s3c2440-uart.0", "uart"),
- ALIAS(PCLK_UART1, "s3c2440-uart.1", "uart"),
- ALIAS(PCLK_UART2, "s3c2440-uart.2", "uart"),
- ALIAS(PCLK_UART3, "s3c2440-uart.3", "uart"),
- ALIAS(PCLK_UART0, "s3c2440-uart.0", "clk_uart_baud2"),
- ALIAS(PCLK_UART1, "s3c2440-uart.1", "clk_uart_baud2"),
- ALIAS(PCLK_UART2, "s3c2440-uart.2", "clk_uart_baud2"),
- ALIAS(PCLK_UART3, "s3c2440-uart.3", "clk_uart_baud2"),
- ALIAS(SCLK_UART, NULL, "clk_uart_baud3"),
- ALIAS(PCLK_PWM, NULL, "timers"),
- ALIAS(PCLK_RTC, NULL, "rtc"),
- ALIAS(PCLK_WDT, NULL, "watchdog"),
- ALIAS(PCLK_ADC, NULL, "adc"),
- ALIAS(PCLK_I2C0, "s3c2410-i2c.0", "i2c"),
- ALIAS(HCLK_USBD, NULL, "usb-device"),
- ALIAS(HCLK_USBH, NULL, "usb-host"),
- ALIAS(SCLK_USBH, NULL, "usb-bus-host"),
- ALIAS(PCLK_SPI0, "s3c2443-spi.0", "spi"),
- ALIAS(PCLK_SPI0, "s3c2443-spi.0", "spi_busclk0"),
- ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "hsmmc"),
- ALIAS(HCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.0"),
- ALIAS(PCLK_I2S0, "samsung-i2s.0", "iis"),
- ALIAS(SCLK_I2S0, NULL, "i2s-if"),
- ALIAS(HCLK_LCD, NULL, "lcd"),
- ALIAS(SCLK_FIMD, NULL, "sclk_fimd"),
-};
-
-/* S3C2416 specific clocks */
-
-static struct samsung_pll_clock s3c2416_pll_clks[] __initdata = {
- PLL(pll_6552_s3c2416, MPLL, "mpll", "mpllref", LOCKCON0, MPLLCON, NULL),
- PLL(pll_6553, EPLL, "epll", "epllref", LOCKCON1, EPLLCON, NULL),
-};
-
-PNAME(s3c2416_hsmmc0_p) = { "sclk_hsmmc0", "sclk_hsmmcext" };
-PNAME(s3c2416_hsmmc1_p) = { "sclk_hsmmc1", "sclk_hsmmcext" };
-PNAME(s3c2416_hsspi0_p) = { "hsspi0_epll", "hsspi0_mpll" };
-
-static struct clk_div_table armdiv_s3c2416_d[] = {
- { .val = 0, .div = 1 },
- { .val = 1, .div = 2 },
- { .val = 2, .div = 3 },
- { .val = 3, .div = 4 },
- { .val = 5, .div = 6 },
- { .val = 7, .div = 8 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c2416_dividers[] __initdata = {
- DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 3, armdiv_s3c2416_d),
- DIV(0, "div_hsspi0_mpll", "msysclk", CLKDIV2, 0, 4),
- DIV(0, "div_hsmmc0", "esysclk", CLKDIV2, 6, 2),
-};
-
-static struct samsung_mux_clock s3c2416_muxes[] __initdata = {
- MUX(MUX_HSMMC0, "mux_hsmmc0", s3c2416_hsmmc0_p, CLKSRC, 16, 1),
- MUX(MUX_HSMMC1, "mux_hsmmc1", s3c2416_hsmmc1_p, CLKSRC, 17, 1),
- MUX(MUX_HSSPI0, "mux_hsspi0", s3c2416_hsspi0_p, CLKSRC, 18, 1),
-};
-
-static struct samsung_gate_clock s3c2416_gates[] __initdata = {
- GATE(0, "hsspi0_mpll", "div_hsspi0_mpll", SCLKCON, 19, 0, 0),
- GATE(0, "hsspi0_epll", "div_hsspi0_epll", SCLKCON, 14, 0, 0),
- GATE(0, "sclk_hsmmc0", "div_hsmmc0", SCLKCON, 6, 0, 0),
- GATE(HCLK_2D, "2d", "hclk", HCLKCON, 20, 0, 0),
- GATE(HCLK_HSMMC0, "hsmmc0", "hclk", HCLKCON, 15, 0, 0),
- GATE(HCLK_IROM, "irom", "hclk", HCLKCON, 13, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_PCM, "pcm", "pclk", PCLKCON, 19, 0, 0),
-};
-
-static struct samsung_clock_alias s3c2416_aliases[] __initdata = {
- ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "hsmmc"),
- ALIAS(HCLK_HSMMC0, "s3c-sdhci.0", "mmc_busclk.0"),
- ALIAS(MUX_HSMMC0, "s3c-sdhci.0", "mmc_busclk.2"),
- ALIAS(MUX_HSMMC1, "s3c-sdhci.1", "mmc_busclk.2"),
- ALIAS(MUX_HSSPI0, "s3c2443-spi.0", "spi_busclk2"),
- ALIAS(ARMDIV, NULL, "armdiv"),
-};
-
-/* S3C2443 specific clocks */
-
-static struct samsung_pll_clock s3c2443_pll_clks[] __initdata = {
- PLL(pll_3000, MPLL, "mpll", "mpllref", LOCKCON0, MPLLCON, NULL),
- PLL(pll_2126, EPLL, "epll", "epllref", LOCKCON1, EPLLCON, NULL),
-};
-
-static struct clk_div_table armdiv_s3c2443_d[] = {
- { .val = 0, .div = 1 },
- { .val = 8, .div = 2 },
- { .val = 2, .div = 3 },
- { .val = 9, .div = 4 },
- { .val = 10, .div = 6 },
- { .val = 11, .div = 8 },
- { .val = 13, .div = 12 },
- { .val = 15, .div = 16 },
- { /* sentinel */ },
-};
-
-static struct samsung_div_clock s3c2443_dividers[] __initdata = {
- DIV_T(ARMDIV, "armdiv", "msysclk", CLKDIV0, 9, 4, armdiv_s3c2443_d),
- DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4),
-};
-
-static struct samsung_gate_clock s3c2443_gates[] __initdata = {
- GATE(SCLK_HSSPI0, "sclk_hsspi0", "div_hsspi0_epll", SCLKCON, 14, 0, 0),
- GATE(SCLK_CAM, "sclk_cam", "div_cam", SCLKCON, 11, 0, 0),
- GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_CAM, "cam", "hclk", HCLKCON, 8, 0, 0),
- GATE(PCLK_SPI1, "spi1", "pclk", PCLKCON, 15, 0, 0),
- GATE(PCLK_SDI, "sdi", "pclk", PCLKCON, 5, 0, 0),
-};
-
-static struct samsung_clock_alias s3c2443_aliases[] __initdata = {
- ALIAS(SCLK_HSSPI0, "s3c2443-spi.0", "spi_busclk2"),
- ALIAS(SCLK_HSMMC1, "s3c-sdhci.1", "mmc_busclk.2"),
- ALIAS(SCLK_CAM, NULL, "camif-upll"),
- ALIAS(PCLK_SPI1, "s3c2410-spi.0", "spi"),
- ALIAS(PCLK_SDI, NULL, "sdi"),
- ALIAS(HCLK_CFC, NULL, "cfc"),
- ALIAS(ARMDIV, NULL, "armdiv"),
-};
-
-/* S3C2450 specific clocks */
-
-PNAME(s3c2450_cam_p) = { "div_cam", "hclk" };
-PNAME(s3c2450_hsspi1_p) = { "hsspi1_epll", "hsspi1_mpll" };
-PNAME(i2s1_p) = { "div_i2s1", "ext_i2s", "epllref", "epllref" };
-
-static struct samsung_div_clock s3c2450_dividers[] __initdata = {
- DIV(0, "div_cam", "esysclk", CLKDIV1, 26, 4),
- DIV(0, "div_hsspi1_epll", "esysclk", CLKDIV2, 24, 2),
- DIV(0, "div_hsspi1_mpll", "msysclk", CLKDIV2, 16, 4),
- DIV(0, "div_i2s1", "esysclk", CLKDIV2, 12, 4),
-};
-
-static struct samsung_mux_clock s3c2450_muxes[] __initdata = {
- MUX(0, "mux_cam", s3c2450_cam_p, CLKSRC, 20, 1),
- MUX(MUX_HSSPI1, "mux_hsspi1", s3c2450_hsspi1_p, CLKSRC, 19, 1),
- MUX(0, "mux_i2s1", i2s1_p, CLKSRC, 12, 2),
-};
-
-static struct samsung_gate_clock s3c2450_gates[] __initdata = {
- GATE(SCLK_I2S1, "sclk_i2s1", "div_i2s1", SCLKCON, 5, 0, 0),
- GATE(HCLK_CFC, "cfc", "hclk", HCLKCON, 17, 0, 0),
- GATE(HCLK_CAM, "cam", "hclk", HCLKCON, 8, 0, 0),
- GATE(HCLK_DMA7, "dma7", "hclk", HCLKCON, 7, CLK_IGNORE_UNUSED, 0),
- GATE(HCLK_DMA6, "dma6", "hclk", HCLKCON, 6, CLK_IGNORE_UNUSED, 0),
- GATE(PCLK_I2S1, "i2s1", "pclk", PCLKCON, 17, 0, 0),
- GATE(PCLK_I2C1, "i2c1", "pclk", PCLKCON, 16, 0, 0),
- GATE(PCLK_SPI1, "spi1", "pclk", PCLKCON, 14, 0, 0),
-};
-
-static struct samsung_clock_alias s3c2450_aliases[] __initdata = {
- ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi"),
- ALIAS(PCLK_SPI1, "s3c2443-spi.1", "spi_busclk0"),
- ALIAS(MUX_HSSPI1, "s3c2443-spi.1", "spi_busclk2"),
- ALIAS(PCLK_I2C1, "s3c2410-i2c.1", "i2c"),
-};
-
-static int s3c2443_restart(struct notifier_block *this,
- unsigned long mode, void *cmd)
-{
- __raw_writel(0x533c2443, reg_base + SWRST);
- return NOTIFY_DONE;
-}
-
-static struct notifier_block s3c2443_restart_handler = {
- .notifier_call = s3c2443_restart,
- .priority = 129,
-};
-
-/*
- * fixed rate clocks generated outside the soc
- * Only necessary until the devicetree-move is complete
- */
-static struct samsung_fixed_rate_clock s3c2443_common_frate_clks[] __initdata = {
- FRATE(0, "xti", NULL, 0, 0),
- FRATE(0, "ext", NULL, 0, 0),
- FRATE(0, "ext_i2s", NULL, 0, 0),
- FRATE(0, "ext_uart", NULL, 0, 0),
-};
-
-static void __init s3c2443_common_clk_register_fixed_ext(
- struct samsung_clk_provider *ctx, unsigned long xti_f)
-{
- s3c2443_common_frate_clks[0].fixed_rate = xti_f;
- samsung_clk_register_fixed_rate(ctx, s3c2443_common_frate_clks,
- ARRAY_SIZE(s3c2443_common_frate_clks));
-}
-
-void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
- int current_soc,
- void __iomem *base)
-{
- struct samsung_clk_provider *ctx;
- int ret;
- reg_base = base;
-
- if (np) {
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
- }
-
- ctx = samsung_clk_init(np, reg_base, NR_CLKS);
-
- /* Register external clocks only in non-dt cases */
- if (!np)
- s3c2443_common_clk_register_fixed_ext(ctx, xti_f);
-
- /* Register PLLs. */
- if (current_soc == S3C2416 || current_soc == S3C2450)
- samsung_clk_register_pll(ctx, s3c2416_pll_clks,
- ARRAY_SIZE(s3c2416_pll_clks), reg_base);
- else
- samsung_clk_register_pll(ctx, s3c2443_pll_clks,
- ARRAY_SIZE(s3c2443_pll_clks), reg_base);
-
- /* Register common internal clocks. */
- samsung_clk_register_mux(ctx, s3c2443_common_muxes,
- ARRAY_SIZE(s3c2443_common_muxes));
- samsung_clk_register_div(ctx, s3c2443_common_dividers,
- ARRAY_SIZE(s3c2443_common_dividers));
- samsung_clk_register_gate(ctx, s3c2443_common_gates,
- ARRAY_SIZE(s3c2443_common_gates));
- samsung_clk_register_alias(ctx, s3c2443_common_aliases,
- ARRAY_SIZE(s3c2443_common_aliases));
-
- /* Register SoC-specific clocks. */
- switch (current_soc) {
- case S3C2450:
- samsung_clk_register_div(ctx, s3c2450_dividers,
- ARRAY_SIZE(s3c2450_dividers));
- samsung_clk_register_mux(ctx, s3c2450_muxes,
- ARRAY_SIZE(s3c2450_muxes));
- samsung_clk_register_gate(ctx, s3c2450_gates,
- ARRAY_SIZE(s3c2450_gates));
- samsung_clk_register_alias(ctx, s3c2450_aliases,
- ARRAY_SIZE(s3c2450_aliases));
- fallthrough; /* as s3c2450 extends the s3c2416 clocks */
- case S3C2416:
- samsung_clk_register_div(ctx, s3c2416_dividers,
- ARRAY_SIZE(s3c2416_dividers));
- samsung_clk_register_mux(ctx, s3c2416_muxes,
- ARRAY_SIZE(s3c2416_muxes));
- samsung_clk_register_gate(ctx, s3c2416_gates,
- ARRAY_SIZE(s3c2416_gates));
- samsung_clk_register_alias(ctx, s3c2416_aliases,
- ARRAY_SIZE(s3c2416_aliases));
- break;
- case S3C2443:
- samsung_clk_register_div(ctx, s3c2443_dividers,
- ARRAY_SIZE(s3c2443_dividers));
- samsung_clk_register_gate(ctx, s3c2443_gates,
- ARRAY_SIZE(s3c2443_gates));
- samsung_clk_register_alias(ctx, s3c2443_aliases,
- ARRAY_SIZE(s3c2443_aliases));
- break;
- }
-
- samsung_clk_sleep_init(reg_base, s3c2443_clk_regs,
- ARRAY_SIZE(s3c2443_clk_regs));
-
- samsung_clk_of_add_provider(np, ctx);
-
- ret = register_restart_handler(&s3c2443_restart_handler);
- if (ret)
- pr_warn("cannot register restart handler, %d\n", ret);
-}
-
-static void __init s3c2416_clk_init(struct device_node *np)
-{
- s3c2443_common_clk_init(np, 0, S3C2416, NULL);
-}
-CLK_OF_DECLARE(s3c2416_clk, "samsung,s3c2416-clock", s3c2416_clk_init);
-
-static void __init s3c2443_clk_init(struct device_node *np)
-{
- s3c2443_common_clk_init(np, 0, S3C2443, NULL);
-}
-CLK_OF_DECLARE(s3c2443_clk, "samsung,s3c2443-clock", s3c2443_clk_init);
-
-static void __init s3c2450_clk_init(struct device_node *np)
-{
- s3c2443_common_clk_init(np, 0, S3C2450, NULL);
-}
-CLK_OF_DECLARE(s3c2450_clk, "samsung,s3c2450-clock", s3c2450_clk_init);
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 4469e7f555e9..5fc8f0e7fb38 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -441,7 +441,7 @@ config CLKSRC_EXYNOS_MCT
config CLKSRC_SAMSUNG_PWM
bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
depends on HAS_IOMEM
- depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 || COMPILE_TEST
+ depends on ARCH_EXYNOS || ARCH_S3C64XX || ARCH_S5PV210 || COMPILE_TEST
help
This is a new clocksource driver for the PWM timer found in
Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
@@ -706,7 +706,7 @@ config INGENIC_OST
config MICROCHIP_PIT64B
bool "Microchip PIT64B support"
- depends on OF || COMPILE_TEST
+ depends on OF && ARM
select TIMER_OF
help
This option enables Microchip PIT64B timer for Atmel
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 279ddff81ab4..82338773602c 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -23,6 +23,7 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <asm/io.h>
+#include <asm/time.h>
/*
* The I/O port the PMTMR resides at.
@@ -210,8 +211,9 @@ static int __init init_acpi_pm_clocksource(void)
return -ENODEV;
}
- return clocksource_register_hz(&clocksource_acpi_pm,
- PMTMR_TICKS_PER_SEC);
+ if (tsc_clocksource_watchdog_disabled())
+ clocksource_acpi_pm.flags |= CLOCK_SOURCE_MUST_VERIFY;
+ return clocksource_register_hz(&clocksource_acpi_pm, PMTMR_TICKS_PER_SEC);
}
/* We use fs_initcall because we want the PCI fixups to have run
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
index ab190dffb1ed..c04b47bd4868 100644
--- a/drivers/clocksource/em_sti.c
+++ b/drivers/clocksource/em_sti.c
@@ -333,11 +333,6 @@ static int em_sti_probe(struct platform_device *pdev)
return 0;
}
-static int em_sti_remove(struct platform_device *pdev)
-{
- return -EBUSY; /* cannot unregister clockevent and clocksource */
-}
-
static const struct of_device_id em_sti_dt_ids[] = {
{ .compatible = "renesas,em-sti", },
{},
@@ -346,10 +341,10 @@ MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
static struct platform_driver em_sti_device_driver = {
.probe = em_sti_probe,
- .remove = em_sti_remove,
.driver = {
.name = "em_sti",
.of_match_table = em_sti_dt_ids,
+ .suppress_bind_attrs = true,
}
};
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 7b952aa52c0b..8b2e079d9df2 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -1145,17 +1145,12 @@ static int sh_cmt_probe(struct platform_device *pdev)
return 0;
}
-static int sh_cmt_remove(struct platform_device *pdev)
-{
- return -EBUSY; /* cannot unregister clockevent and clocksource */
-}
-
static struct platform_driver sh_cmt_device_driver = {
.probe = sh_cmt_probe,
- .remove = sh_cmt_remove,
.driver = {
.name = "sh_cmt",
.of_match_table = of_match_ptr(sh_cmt_of_table),
+ .suppress_bind_attrs = true,
},
.id_table = sh_cmt_id_table,
};
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index b00dec0655cb..932f31a7c5be 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -632,11 +632,6 @@ static int sh_tmu_probe(struct platform_device *pdev)
return 0;
}
-static int sh_tmu_remove(struct platform_device *pdev)
-{
- return -EBUSY; /* cannot unregister clockevent and clocksource */
-}
-
static const struct platform_device_id sh_tmu_id_table[] = {
{ "sh-tmu", SH_TMU },
{ "sh-tmu-sh3", SH_TMU_SH3 },
@@ -652,10 +647,10 @@ MODULE_DEVICE_TABLE(of, sh_tmu_of_table);
static struct platform_driver sh_tmu_device_driver = {
.probe = sh_tmu_probe,
- .remove = sh_tmu_remove,
.driver = {
.name = "sh_tmu",
.of_match_table = of_match_ptr(sh_tmu_of_table),
+ .suppress_bind_attrs = true,
},
.id_table = sh_tmu_id_table,
};
diff --git a/drivers/clocksource/timer-microchip-pit64b.c b/drivers/clocksource/timer-microchip-pit64b.c
index d5f1436f33d9..57209bb38c70 100644
--- a/drivers/clocksource/timer-microchip-pit64b.c
+++ b/drivers/clocksource/timer-microchip-pit64b.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/clockchips.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
@@ -92,6 +93,8 @@ struct mchp_pit64b_clksrc {
static void __iomem *mchp_pit64b_cs_base;
/* Default cycles for clockevent timer. */
static u64 mchp_pit64b_ce_cycles;
+/* Delay timer. */
+static struct delay_timer mchp_pit64b_dt;
static inline u64 mchp_pit64b_cnt_read(void __iomem *base)
{
@@ -169,6 +172,11 @@ static u64 notrace mchp_pit64b_sched_read_clk(void)
return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
}
+static unsigned long notrace mchp_pit64b_dt_read(void)
+{
+ return mchp_pit64b_cnt_read(mchp_pit64b_cs_base);
+}
+
static int mchp_pit64b_clkevt_shutdown(struct clock_event_device *cedev)
{
struct mchp_pit64b_timer *timer = clkevt_to_mchp_pit64b_timer(cedev);
@@ -376,6 +384,10 @@ static int __init mchp_pit64b_init_clksrc(struct mchp_pit64b_timer *timer,
sched_clock_register(mchp_pit64b_sched_read_clk, 64, clk_rate);
+ mchp_pit64b_dt.read_current_timer = mchp_pit64b_dt_read;
+ mchp_pit64b_dt.freq = clk_rate;
+ register_current_timer_delay(&mchp_pit64b_dt);
+
return 0;
}
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index a0d66fabf073..5f0f10c7e222 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -28,6 +28,7 @@
#include <asm/timex.h>
static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
+static bool riscv_timer_cannot_wake_cpu;
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
@@ -73,10 +74,15 @@ static u64 notrace riscv_sched_clock(void)
static struct clocksource riscv_clocksource = {
.name = "riscv_clocksource",
- .rating = 300,
+ .rating = 400,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.read = riscv_clocksource_rdtime,
+#if IS_ENABLED(CONFIG_GENERIC_GETTIMEOFDAY)
+ .vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER,
+#else
+ .vdso_clock_mode = VDSO_CLOCKMODE_NONE,
+#endif
};
static int riscv_timer_starting_cpu(unsigned int cpu)
@@ -85,6 +91,8 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
ce->cpumask = cpumask_of(cpu);
ce->irq = riscv_clock_event_irq;
+ if (riscv_timer_cannot_wake_cpu)
+ ce->features |= CLOCK_EVT_FEAT_C3STOP;
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
enable_percpu_irq(riscv_clock_event_irq,
@@ -139,6 +147,13 @@ static int __init riscv_timer_init_dt(struct device_node *n)
if (cpuid != smp_processor_id())
return 0;
+ child = of_find_compatible_node(NULL, NULL, "riscv,timer");
+ if (child) {
+ riscv_timer_cannot_wake_cpu = of_property_read_bool(child,
+ "riscv,timer-cannot-wake-cpu");
+ of_node_put(child);
+ }
+
domain = NULL;
child = of_get_compatible_child(n, "riscv,cpu-intc");
if (!child) {
@@ -177,6 +192,11 @@ static int __init riscv_timer_init_dt(struct device_node *n)
return error;
}
+ if (riscv_isa_extension_available(NULL, SSTC)) {
+ pr_info("Timer interrupt in S-mode is available via sstc extension\n");
+ static_branch_enable(&riscv_sstc_available);
+ }
+
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
"clockevents/riscv/timer:starting",
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
@@ -184,11 +204,6 @@ static int __init riscv_timer_init_dt(struct device_node *n)
pr_err("cpu hp setup state failed for RISCV timer [%d]\n",
error);
- if (riscv_isa_extension_available(NULL, SSTC)) {
- pr_info("Timer interrupt in S-mode is available via sstc extension\n");
- static_branch_enable(&riscv_sstc_available);
- }
-
return error;
}
diff --git a/drivers/clocksource/timer-sun4i.c b/drivers/clocksource/timer-sun4i.c
index e5a70aa1deb4..7bdcc60ad43c 100644
--- a/drivers/clocksource/timer-sun4i.c
+++ b/drivers/clocksource/timer-sun4i.c
@@ -144,7 +144,8 @@ static struct timer_of to = {
.clkevt = {
.name = "sun4i_tick",
.rating = 350,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_DYNIRQ,
.set_state_shutdown = sun4i_clkevt_shutdown,
.set_state_periodic = sun4i_clkevt_set_periodic,
.set_state_oneshot = sun4i_clkevt_set_oneshot,
diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig
index 3cb61fa2c5c3..9af280735cba 100644
--- a/drivers/comedi/Kconfig
+++ b/drivers/comedi/Kconfig
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-config COMEDI
+menuconfig COMEDI
tristate "Data acquisition support (comedi)"
help
Enable support for a wide range of data acquisition devices
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index e2114bcf815a..b982903aaa46 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -1215,6 +1215,7 @@ static int check_insn_config_length(struct comedi_insn *insn,
case INSN_CONFIG_GET_CLOCK_SRC:
case INSN_CONFIG_SET_OTHER_SRC:
case INSN_CONFIG_GET_COUNTER_STATUS:
+ case INSN_CONFIG_GET_PWM_OUTPUT:
case INSN_CONFIG_PWM_SET_H_BRIDGE:
case INSN_CONFIG_PWM_GET_H_BRIDGE:
case INSN_CONFIG_GET_HARDWARE_BUFFER_SIZE:
diff --git a/drivers/counter/Kconfig b/drivers/counter/Kconfig
index d388bf26f4dc..b5ba8fb02cf7 100644
--- a/drivers/counter/Kconfig
+++ b/drivers/counter/Kconfig
@@ -29,6 +29,28 @@ config 104_QUAD_8
array module parameter. The interrupt line numbers for the devices may
be configured via the irq array module parameter.
+config FTM_QUADDEC
+ tristate "Flex Timer Module Quadrature decoder driver"
+ depends on SOC_LS1021A || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ help
+ Select this option to enable the Flex Timer Quadrature decoder
+ driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ftm-quaddec.
+
+config INTEL_QEP
+ tristate "Intel Quadrature Encoder Peripheral driver"
+ depends on X86
+ depends on PCI
+ help
+ Select this option to enable the Intel Quadrature Encoder Peripheral
+ driver.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel-qep.
+
config INTERRUPT_CNT
tristate "Interrupt counter driver"
depends on GPIOLIB
@@ -39,15 +61,17 @@ config INTERRUPT_CNT
To compile this driver as a module, choose M here: the
module will be called interrupt-cnt.
-config STM32_TIMER_CNT
- tristate "STM32 Timer encoder counter driver"
- depends on MFD_STM32_TIMERS || COMPILE_TEST
+config MICROCHIP_TCB_CAPTURE
+ tristate "Microchip Timer Counter Capture driver"
+ depends on SOC_AT91SAM9 || SOC_SAM_V7 || COMPILE_TEST
+ depends on HAS_IOMEM && OF
+ select REGMAP_MMIO
help
- Select this option to enable STM32 Timer quadrature encoder
- and counter driver.
+ Select this option to enable the Microchip Timer Counter Block
+ capture driver.
To compile this driver as a module, choose M here: the
- module will be called stm32-timer-cnt.
+ module will be called microchip-tcb-capture.
config STM32_LPTIMER_CNT
tristate "STM32 LP Timer encoder counter driver"
@@ -59,47 +83,15 @@ config STM32_LPTIMER_CNT
To compile this driver as a module, choose M here: the
module will be called stm32-lptimer-cnt.
-config TI_EQEP
- tristate "TI eQEP counter driver"
- depends on (SOC_AM33XX || COMPILE_TEST)
- select REGMAP_MMIO
- help
- Select this option to enable the Texas Instruments Enhanced Quadrature
- Encoder Pulse (eQEP) counter driver.
-
- To compile this driver as a module, choose M here: the module will be
- called ti-eqep.
-
-config FTM_QUADDEC
- tristate "Flex Timer Module Quadrature decoder driver"
- depends on HAS_IOMEM && OF
- help
- Select this option to enable the Flex Timer Quadrature decoder
- driver.
-
- To compile this driver as a module, choose M here: the
- module will be called ftm-quaddec.
-
-config MICROCHIP_TCB_CAPTURE
- tristate "Microchip Timer Counter Capture driver"
- depends on HAS_IOMEM && OF
- select REGMAP_MMIO
+config STM32_TIMER_CNT
+ tristate "STM32 Timer encoder counter driver"
+ depends on MFD_STM32_TIMERS || COMPILE_TEST
help
- Select this option to enable the Microchip Timer Counter Block
- capture driver.
+ Select this option to enable STM32 Timer quadrature encoder
+ and counter driver.
To compile this driver as a module, choose M here: the
- module will be called microchip-tcb-capture.
-
-config INTEL_QEP
- tristate "Intel Quadrature Encoder Peripheral driver"
- depends on PCI
- help
- Select this option to enable the Intel Quadrature Encoder Peripheral
- driver.
-
- To compile this driver as a module, choose M here: the module
- will be called intel-qep.
+ module will be called stm32-timer-cnt.
config TI_ECAP_CAPTURE
tristate "TI eCAP capture driver"
@@ -116,4 +108,15 @@ config TI_ECAP_CAPTURE
To compile this driver as a module, choose M here: the module
will be called ti-ecap-capture.
+config TI_EQEP
+ tristate "TI eQEP counter driver"
+ depends on (SOC_AM33XX || COMPILE_TEST)
+ select REGMAP_MMIO
+ help
+ Select this option to enable the Texas Instruments Enhanced Quadrature
+ Encoder Pulse (eQEP) counter driver.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ti-eqep.
+
endif # COUNTER
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 2a84fc63371e..2c839bd2b051 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -3,7 +3,6 @@ menu "CPU Frequency scaling"
config CPU_FREQ
bool "CPU Frequency scaling"
- select SRCU
help
CPU Frequency scaling allows you to change the clock speed of
CPUs on the fly. This is a nice method to save power, because
@@ -37,7 +36,7 @@ config CPU_FREQ_STAT
choice
prompt "Default CPUFreq governor"
- default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
+ default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1110_CPUFREQ
default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if ARM64 || ARM
default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if X86_INTEL_PSTATE && SMP
default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
@@ -271,15 +270,6 @@ config LOONGSON2_CPUFREQ
Loongson2F and its successors support this feature.
If in doubt, say N.
-
-config LOONGSON1_CPUFREQ
- tristate "Loongson1 CPUFreq Driver"
- depends on LOONGSON1_LS1B
- help
- This option adds a CPUFreq driver for loongson1 processors which
- support software configurable cpu frequency.
-
- If in doubt, say N.
endif
if SPARC64
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0a0352d8fa45..97acaa2136fd 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -189,84 +189,6 @@ config ARM_RASPBERRYPI_CPUFREQ
If in doubt, say N.
-config ARM_S3C_CPUFREQ
- bool
- help
- Internal configuration node for common cpufreq on Samsung SoC
-
-config ARM_S3C24XX_CPUFREQ
- bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
- depends on ARCH_S3C24XX
- select ARM_S3C_CPUFREQ
- help
- This enables the CPUfreq driver for the Samsung S3C24XX family
- of CPUs.
-
- For details, take a look at <file:Documentation/cpu-freq>.
-
- If in doubt, say N.
-
-config ARM_S3C24XX_CPUFREQ_DEBUG
- bool "Debug CPUfreq Samsung driver core"
- depends on ARM_S3C24XX_CPUFREQ
- help
- Enable s3c_freq_dbg for the Samsung S3C CPUfreq core
-
-config ARM_S3C24XX_CPUFREQ_IODEBUG
- bool "Debug CPUfreq Samsung driver IO timing"
- depends on ARM_S3C24XX_CPUFREQ
- help
- Enable s3c_freq_iodbg for the Samsung S3C CPUfreq core
-
-config ARM_S3C24XX_CPUFREQ_DEBUGFS
- bool "Export debugfs for CPUFreq"
- depends on ARM_S3C24XX_CPUFREQ && DEBUG_FS
- help
- Export status information via debugfs.
-
-config ARM_S3C2410_CPUFREQ
- bool
- depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2410
- help
- CPU Frequency scaling support for S3C2410
-
-config ARM_S3C2412_CPUFREQ
- bool
- depends on ARM_S3C24XX_CPUFREQ && CPU_S3C2412
- default y
- select S3C2412_IOTIMING
- help
- CPU Frequency scaling support for S3C2412 and S3C2413 SoC CPUs.
-
-config ARM_S3C2416_CPUFREQ
- bool "S3C2416 CPU Frequency scaling support"
- depends on CPU_S3C2416
- help
- This adds the CPUFreq driver for the Samsung S3C2416 and
- S3C2450 SoC. The S3C2416 supports changing the rate of the
- armdiv clock source and also entering a so called dynamic
- voltage scaling mode in which it is possible to reduce the
- core voltage of the CPU.
-
- If in doubt, say N.
-
-config ARM_S3C2416_CPUFREQ_VCORESCALE
- bool "Allow voltage scaling for S3C2416 arm core"
- depends on ARM_S3C2416_CPUFREQ && REGULATOR
- help
- Enable CPU voltage scaling when entering the dvs mode.
- It uses information gathered through existing hardware and
- tests but not documented in any datasheet.
-
- If in doubt, say N.
-
-config ARM_S3C2440_CPUFREQ
- bool "S3C2440/S3C2442 CPU Frequency scaling support"
- depends on ARM_S3C24XX_CPUFREQ && (CPU_S3C2440 || CPU_S3C2442)
- default y
- help
- CPU Frequency scaling support for S3C2440 and S3C2442 SoC CPUs.
-
config ARM_S3C64XX_CPUFREQ
bool "Samsung S3C64XX"
depends on CPU_S3C6410
@@ -286,9 +208,6 @@ config ARM_S5PV210_CPUFREQ
If in doubt, say N.
-config ARM_SA1100_CPUFREQ
- bool
-
config ARM_SA1110_CPUFREQ
bool
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 32a7029e25ed..ef8510774913 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -71,15 +71,8 @@ obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW) += qcom-cpufreq-hw.o
obj-$(CONFIG_ARM_QCOM_CPUFREQ_NVMEM) += qcom-cpufreq-nvmem.o
obj-$(CONFIG_ARM_RASPBERRYPI_CPUFREQ) += raspberrypi-cpufreq.o
-obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
-obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o
-obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o
-obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
-obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
-obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o
obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o
obj-$(CONFIG_ARM_SCMI_CPUFREQ) += scmi-cpufreq.o
obj-$(CONFIG_ARM_SCPI_CPUFREQ) += scpi-cpufreq.o
@@ -111,7 +104,6 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o
obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o
obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o
obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o
-obj-$(CONFIG_LOONGSON1_CPUFREQ) += loongson1-cpufreq.o
obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o
obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o
obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index c17bd845f5fc..45c88894fd8e 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -59,8 +59,171 @@
* we disable it by default to go acpi-cpufreq on these processors and add a
* module parameter to be able to enable it manually for debugging.
*/
+static struct cpufreq_driver *current_pstate_driver;
static struct cpufreq_driver amd_pstate_driver;
-static int cppc_load __initdata;
+static struct cpufreq_driver amd_pstate_epp_driver;
+static int cppc_state = AMD_PSTATE_DISABLE;
+struct kobject *amd_pstate_kobj;
+
+/*
+ * AMD Energy Preference Performance (EPP)
+ * The EPP is used in the CCLK DPM controller to drive
+ * the frequency that a core is going to operate during
+ * short periods of activity. EPP values will be utilized for
+ * different OS profiles (balanced, performance, power savings)
+ * display strings corresponding to EPP index in the
+ * energy_perf_strings[]
+ * index String
+ *-------------------------------------
+ * 0 default
+ * 1 performance
+ * 2 balance_performance
+ * 3 balance_power
+ * 4 power
+ */
+enum energy_perf_value_index {
+ EPP_INDEX_DEFAULT = 0,
+ EPP_INDEX_PERFORMANCE,
+ EPP_INDEX_BALANCE_PERFORMANCE,
+ EPP_INDEX_BALANCE_POWERSAVE,
+ EPP_INDEX_POWERSAVE,
+};
+
+static const char * const energy_perf_strings[] = {
+ [EPP_INDEX_DEFAULT] = "default",
+ [EPP_INDEX_PERFORMANCE] = "performance",
+ [EPP_INDEX_BALANCE_PERFORMANCE] = "balance_performance",
+ [EPP_INDEX_BALANCE_POWERSAVE] = "balance_power",
+ [EPP_INDEX_POWERSAVE] = "power",
+ NULL
+};
+
+static unsigned int epp_values[] = {
+ [EPP_INDEX_DEFAULT] = 0,
+ [EPP_INDEX_PERFORMANCE] = AMD_CPPC_EPP_PERFORMANCE,
+ [EPP_INDEX_BALANCE_PERFORMANCE] = AMD_CPPC_EPP_BALANCE_PERFORMANCE,
+ [EPP_INDEX_BALANCE_POWERSAVE] = AMD_CPPC_EPP_BALANCE_POWERSAVE,
+ [EPP_INDEX_POWERSAVE] = AMD_CPPC_EPP_POWERSAVE,
+ };
+
+static inline int get_mode_idx_from_str(const char *str, size_t size)
+{
+ int i;
+
+ for (i=0; i < AMD_PSTATE_MAX; i++) {
+ if (!strncmp(str, amd_pstate_mode_string[i], size))
+ return i;
+ }
+ return -EINVAL;
+}
+
+static DEFINE_MUTEX(amd_pstate_limits_lock);
+static DEFINE_MUTEX(amd_pstate_driver_lock);
+
+static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
+{
+ u64 epp;
+ int ret;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ if (!cppc_req_cached) {
+ epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
+ &cppc_req_cached);
+ if (epp)
+ return epp;
+ }
+ epp = (cppc_req_cached >> 24) & 0xFF;
+ } else {
+ ret = cppc_get_epp_perf(cpudata->cpu, &epp);
+ if (ret < 0) {
+ pr_debug("Could not retrieve energy perf value (%d)\n", ret);
+ return -EIO;
+ }
+ }
+
+ return (s16)(epp & 0xff);
+}
+
+static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
+{
+ s16 epp;
+ int index = -EINVAL;
+
+ epp = amd_pstate_get_epp(cpudata, 0);
+ if (epp < 0)
+ return epp;
+
+ switch (epp) {
+ case AMD_CPPC_EPP_PERFORMANCE:
+ index = EPP_INDEX_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
+ index = EPP_INDEX_BALANCE_PERFORMANCE;
+ break;
+ case AMD_CPPC_EPP_BALANCE_POWERSAVE:
+ index = EPP_INDEX_BALANCE_POWERSAVE;
+ break;
+ case AMD_CPPC_EPP_POWERSAVE:
+ index = EPP_INDEX_POWERSAVE;
+ break;
+ default:
+ break;
+ }
+
+ return index;
+}
+
+static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
+{
+ int ret;
+ struct cppc_perf_ctrls perf_ctrls;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ u64 value = READ_ONCE(cpudata->cppc_req_cached);
+
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ if (!ret)
+ cpudata->epp_cached = epp;
+ } else {
+ perf_ctrls.energy_perf = epp;
+ ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
+ if (ret) {
+ pr_debug("failed to set energy perf value (%d)\n", ret);
+ return ret;
+ }
+ cpudata->epp_cached = epp;
+ }
+
+ return ret;
+}
+
+static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
+ int pref_index)
+{
+ int epp = -EINVAL;
+ int ret;
+
+ if (!pref_index) {
+ pr_debug("EPP pref_index is invalid\n");
+ return -EINVAL;
+ }
+
+ if (epp == -EINVAL)
+ epp = epp_values[pref_index];
+
+ if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("EPP cannot be set under performance policy\n");
+ return -EBUSY;
+ }
+
+ ret = amd_pstate_set_epp(cpudata, epp);
+
+ return ret;
+}
static inline int pstate_enable(bool enable)
{
@@ -70,11 +233,21 @@ static inline int pstate_enable(bool enable)
static int cppc_enable(bool enable)
{
int cpu, ret = 0;
+ struct cppc_perf_ctrls perf_ctrls;
for_each_present_cpu(cpu) {
ret = cppc_set_enable(cpu, enable);
if (ret)
return ret;
+
+ /* Enable autonomous mode for EPP */
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ /* Set desired perf as zero to allow EPP firmware control */
+ perf_ctrls.desired_perf = 0;
+ ret = cppc_set_perf(cpu, &perf_ctrls);
+ if (ret)
+ return ret;
+ }
}
return ret;
@@ -418,7 +591,7 @@ static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
return;
cpudata->boost_supported = true;
- amd_pstate_driver.boost_enabled = true;
+ current_pstate_driver->boost_enabled = true;
}
static void amd_perf_ctl_reset(unsigned int cpu)
@@ -501,6 +674,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
policy->driver_data = cpudata;
amd_pstate_boost_init(cpudata);
+ if (!current_pstate_driver->adjust_perf)
+ current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
return 0;
@@ -561,7 +736,7 @@ static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
if (max_freq < 0)
return max_freq;
- return sprintf(&buf[0], "%u\n", max_freq);
+ return sysfs_emit(buf, "%u\n", max_freq);
}
static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
@@ -574,7 +749,7 @@ static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *poli
if (freq < 0)
return freq;
- return sprintf(&buf[0], "%u\n", freq);
+ return sysfs_emit(buf, "%u\n", freq);
}
/*
@@ -589,13 +764,151 @@ static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
perf = READ_ONCE(cpudata->highest_perf);
- return sprintf(&buf[0], "%u\n", perf);
+ return sysfs_emit(buf, "%u\n", perf);
+}
+
+static ssize_t show_energy_performance_available_preferences(
+ struct cpufreq_policy *policy, char *buf)
+{
+ int i = 0;
+ int offset = 0;
+
+ while (energy_perf_strings[i] != NULL)
+ offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
+
+ sysfs_emit_at(buf, offset, "\n");
+
+ return offset;
+}
+
+static ssize_t store_energy_performance_preference(
+ struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ char str_preference[21];
+ ssize_t ret;
+
+ ret = sscanf(buf, "%20s", str_preference);
+ if (ret != 1)
+ return -EINVAL;
+
+ ret = match_string(energy_perf_strings, -1, str_preference);
+ if (ret < 0)
+ return -EINVAL;
+
+ mutex_lock(&amd_pstate_limits_lock);
+ ret = amd_pstate_set_energy_pref_index(cpudata, ret);
+ mutex_unlock(&amd_pstate_limits_lock);
+
+ return ret ?: count;
+}
+
+static ssize_t show_energy_performance_preference(
+ struct cpufreq_policy *policy, char *buf)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int preference;
+
+ preference = amd_pstate_get_energy_pref_index(cpudata);
+ if (preference < 0)
+ return preference;
+
+ return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
+}
+
+static ssize_t amd_pstate_show_status(char *buf)
+{
+ if (!current_pstate_driver)
+ return sysfs_emit(buf, "disable\n");
+
+ return sysfs_emit(buf, "%s\n", amd_pstate_mode_string[cppc_state]);
+}
+
+static void amd_pstate_driver_cleanup(void)
+{
+ current_pstate_driver = NULL;
+}
+
+static int amd_pstate_update_status(const char *buf, size_t size)
+{
+ int ret = 0;
+ int mode_idx;
+
+ if (size > 7 || size < 6)
+ return -EINVAL;
+ mode_idx = get_mode_idx_from_str(buf, size);
+
+ switch(mode_idx) {
+ case AMD_PSTATE_DISABLE:
+ if (!current_pstate_driver)
+ return -EINVAL;
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ return -EBUSY;
+ cpufreq_unregister_driver(current_pstate_driver);
+ amd_pstate_driver_cleanup();
+ break;
+ case AMD_PSTATE_PASSIVE:
+ if (current_pstate_driver) {
+ if (current_pstate_driver == &amd_pstate_driver)
+ return 0;
+ cpufreq_unregister_driver(current_pstate_driver);
+ cppc_state = AMD_PSTATE_PASSIVE;
+ current_pstate_driver = &amd_pstate_driver;
+ }
+
+ ret = cpufreq_register_driver(current_pstate_driver);
+ break;
+ case AMD_PSTATE_ACTIVE:
+ if (current_pstate_driver) {
+ if (current_pstate_driver == &amd_pstate_epp_driver)
+ return 0;
+ cpufreq_unregister_driver(current_pstate_driver);
+ current_pstate_driver = &amd_pstate_epp_driver;
+ cppc_state = AMD_PSTATE_ACTIVE;
+ }
+
+ ret = cpufreq_register_driver(current_pstate_driver);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static ssize_t show_status(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ssize_t ret;
+
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_pstate_show_status(buf);
+ mutex_unlock(&amd_pstate_driver_lock);
+
+ return ret;
+}
+
+static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
+ const char *buf, size_t count)
+{
+ char *p = memchr(buf, '\n', count);
+ int ret;
+
+ mutex_lock(&amd_pstate_driver_lock);
+ ret = amd_pstate_update_status(buf, p ? p - buf : count);
+ mutex_unlock(&amd_pstate_driver_lock);
+
+ return ret < 0 ? ret : count;
}
cpufreq_freq_attr_ro(amd_pstate_max_freq);
cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
cpufreq_freq_attr_ro(amd_pstate_highest_perf);
+cpufreq_freq_attr_rw(energy_performance_preference);
+cpufreq_freq_attr_ro(energy_performance_available_preferences);
+define_one_global_rw(status);
static struct freq_attr *amd_pstate_attr[] = {
&amd_pstate_max_freq,
@@ -604,6 +917,313 @@ static struct freq_attr *amd_pstate_attr[] = {
NULL,
};
+static struct freq_attr *amd_pstate_epp_attr[] = {
+ &amd_pstate_max_freq,
+ &amd_pstate_lowest_nonlinear_freq,
+ &amd_pstate_highest_perf,
+ &energy_performance_preference,
+ &energy_performance_available_preferences,
+ NULL,
+};
+
+static struct attribute *pstate_global_attributes[] = {
+ &status.attr,
+ NULL
+};
+
+static const struct attribute_group amd_pstate_global_attr_group = {
+ .attrs = pstate_global_attributes,
+};
+
+static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
+{
+ int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
+ struct amd_cpudata *cpudata;
+ struct device *dev;
+ u64 value;
+
+ /*
+ * Resetting PERF_CTL_MSR will put the CPU in P0 frequency,
+ * which is ideal for initialization process.
+ */
+ amd_perf_ctl_reset(policy->cpu);
+ dev = get_cpu_device(policy->cpu);
+ if (!dev)
+ return -ENODEV;
+
+ cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
+ if (!cpudata)
+ return -ENOMEM;
+
+ cpudata->cpu = policy->cpu;
+ cpudata->epp_policy = 0;
+
+ ret = amd_pstate_init_perf(cpudata);
+ if (ret)
+ goto free_cpudata1;
+
+ min_freq = amd_get_min_freq(cpudata);
+ max_freq = amd_get_max_freq(cpudata);
+ nominal_freq = amd_get_nominal_freq(cpudata);
+ lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
+ if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
+ dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
+ min_freq, max_freq);
+ ret = -EINVAL;
+ goto free_cpudata1;
+ }
+
+ policy->cpuinfo.min_freq = min_freq;
+ policy->cpuinfo.max_freq = max_freq;
+ /* It will be updated by governor */
+ policy->cur = policy->cpuinfo.min_freq;
+
+ /* Initial processor data capability frequencies */
+ cpudata->max_freq = max_freq;
+ cpudata->min_freq = min_freq;
+ cpudata->nominal_freq = nominal_freq;
+ cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
+
+ policy->driver_data = cpudata;
+
+ cpudata->epp_cached = amd_pstate_get_epp(cpudata, 0);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ /*
+ * Set the policy to powersave to provide a valid fallback value in case
+ * the default cpufreq governor is neither powersave nor performance.
+ */
+ policy->policy = CPUFREQ_POLICY_POWERSAVE;
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ policy->fast_switch_possible = true;
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
+ if (ret)
+ return ret;
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+
+ ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1, &value);
+ if (ret)
+ return ret;
+ WRITE_ONCE(cpudata->cppc_cap1_cached, value);
+ }
+ amd_pstate_boost_init(cpudata);
+
+ return 0;
+
+free_cpudata1:
+ kfree(cpudata);
+ return ret;
+}
+
+static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
+{
+ pr_debug("CPU %d exiting\n", policy->cpu);
+ policy->fast_switch_possible = false;
+ return 0;
+}
+
+static void amd_pstate_epp_init(unsigned int cpu)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct amd_cpudata *cpudata = policy->driver_data;
+ u32 max_perf, min_perf;
+ u64 value;
+ s16 epp;
+
+ max_perf = READ_ONCE(cpudata->highest_perf);
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ min_perf = max_perf;
+
+ /* Initial min/max values for CPPC Performance Controls Register */
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(max_perf);
+
+ /* CPPC EPP feature require to set zero to the desire perf bit */
+ value &= ~AMD_CPPC_DES_PERF(~0L);
+ value |= AMD_CPPC_DES_PERF(0);
+
+ if (cpudata->epp_policy == cpudata->policy)
+ goto skip_epp;
+
+ cpudata->epp_policy = cpudata->policy;
+
+ /* Get BIOS pre-defined epp value */
+ epp = amd_pstate_get_epp(cpudata, value);
+ if (epp < 0) {
+ /**
+ * This return value can only be negative for shared_memory
+ * systems where EPP register read/write not supported.
+ */
+ goto skip_epp;
+ }
+
+ if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
+ epp = 0;
+
+ /* Set initial EPP value */
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ }
+
+ WRITE_ONCE(cpudata->cppc_req_cached, value);
+ amd_pstate_set_epp(cpudata, epp);
+skip_epp:
+ cpufreq_cpu_put(policy);
+}
+
+static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (!policy->cpuinfo.max_freq)
+ return -ENODEV;
+
+ pr_debug("set_policy: cpuinfo.max %u policy->max %u\n",
+ policy->cpuinfo.max_freq, policy->max);
+
+ cpudata->policy = policy->policy;
+
+ amd_pstate_epp_init(policy->cpu);
+
+ return 0;
+}
+
+static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
+{
+ struct cppc_perf_ctrls perf_ctrls;
+ u64 value, max_perf;
+ int ret;
+
+ ret = amd_pstate_enable(true);
+ if (ret)
+ pr_err("failed to enable amd pstate during resume, return %d\n", ret);
+
+ value = READ_ONCE(cpudata->cppc_req_cached);
+ max_perf = READ_ONCE(cpudata->highest_perf);
+
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.max_perf = max_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+}
+
+static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
+
+ if (cppc_state == AMD_PSTATE_ACTIVE) {
+ amd_pstate_epp_reenable(cpudata);
+ cpudata->suspended = false;
+ }
+
+ return 0;
+}
+
+static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ struct cppc_perf_ctrls perf_ctrls;
+ int min_perf;
+ u64 value;
+
+ min_perf = READ_ONCE(cpudata->lowest_perf);
+ value = READ_ONCE(cpudata->cppc_req_cached);
+
+ mutex_lock(&amd_pstate_limits_lock);
+ if (boot_cpu_has(X86_FEATURE_CPPC)) {
+ cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
+
+ /* Set max perf same as min perf */
+ value &= ~AMD_CPPC_MAX_PERF(~0L);
+ value |= AMD_CPPC_MAX_PERF(min_perf);
+ value &= ~AMD_CPPC_MIN_PERF(~0L);
+ value |= AMD_CPPC_MIN_PERF(min_perf);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
+ } else {
+ perf_ctrls.desired_perf = 0;
+ perf_ctrls.max_perf = min_perf;
+ perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
+ cppc_set_perf(cpudata->cpu, &perf_ctrls);
+ }
+ mutex_unlock(&amd_pstate_limits_lock);
+}
+
+static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
+
+ if (cpudata->suspended)
+ return 0;
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ amd_pstate_epp_offline(policy);
+
+ return 0;
+}
+
+static int amd_pstate_epp_verify_policy(struct cpufreq_policy_data *policy)
+{
+ cpufreq_verify_within_cpu_limits(policy);
+ pr_debug("policy_max =%d, policy_min=%d\n", policy->max, policy->min);
+ return 0;
+}
+
+static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+ int ret;
+
+ /* avoid suspending when EPP is not enabled */
+ if (cppc_state != AMD_PSTATE_ACTIVE)
+ return 0;
+
+ /* set this flag to avoid setting core offline*/
+ cpudata->suspended = true;
+
+ /* disable CPPC in lowlevel firmware */
+ ret = amd_pstate_enable(false);
+ if (ret)
+ pr_err("failed to suspend, return %d\n", ret);
+
+ return 0;
+}
+
+static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
+{
+ struct amd_cpudata *cpudata = policy->driver_data;
+
+ if (cpudata->suspended) {
+ mutex_lock(&amd_pstate_limits_lock);
+
+ /* enable amd pstate from suspend state*/
+ amd_pstate_epp_reenable(cpudata);
+
+ mutex_unlock(&amd_pstate_limits_lock);
+
+ cpudata->suspended = false;
+ }
+
+ return 0;
+}
+
static struct cpufreq_driver amd_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
.verify = amd_pstate_verify,
@@ -617,6 +1237,20 @@ static struct cpufreq_driver amd_pstate_driver = {
.attr = amd_pstate_attr,
};
+static struct cpufreq_driver amd_pstate_epp_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = amd_pstate_epp_verify_policy,
+ .setpolicy = amd_pstate_epp_set_policy,
+ .init = amd_pstate_epp_cpu_init,
+ .exit = amd_pstate_epp_cpu_exit,
+ .offline = amd_pstate_epp_cpu_offline,
+ .online = amd_pstate_epp_cpu_online,
+ .suspend = amd_pstate_epp_suspend,
+ .resume = amd_pstate_epp_resume,
+ .name = "amd_pstate_epp",
+ .attr = amd_pstate_epp_attr,
+};
+
static int __init amd_pstate_init(void)
{
int ret;
@@ -626,10 +1260,10 @@ static int __init amd_pstate_init(void)
/*
* by default the pstate driver is disabled to load
* enable the amd_pstate passive mode driver explicitly
- * with amd_pstate=passive in kernel command line
+ * with amd_pstate=passive or other modes in kernel command line
*/
- if (!cppc_load) {
- pr_debug("driver load is disabled, boot with amd_pstate=passive to enable this\n");
+ if (cppc_state == AMD_PSTATE_DISABLE) {
+ pr_debug("driver load is disabled, boot with specific mode to enable this\n");
return -ENODEV;
}
@@ -645,7 +1279,8 @@ static int __init amd_pstate_init(void)
/* capability check */
if (boot_cpu_has(X86_FEATURE_CPPC)) {
pr_debug("AMD CPPC MSR based functionality is supported\n");
- amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
+ if (cppc_state == AMD_PSTATE_PASSIVE)
+ current_pstate_driver->adjust_perf = amd_pstate_adjust_perf;
} else {
pr_debug("AMD CPPC shared memory based functionality is supported\n");
static_call_update(amd_pstate_enable, cppc_enable);
@@ -656,31 +1291,63 @@ static int __init amd_pstate_init(void)
/* enable amd pstate feature */
ret = amd_pstate_enable(true);
if (ret) {
- pr_err("failed to enable amd-pstate with return %d\n", ret);
+ pr_err("failed to enable with return %d\n", ret);
return ret;
}
- ret = cpufreq_register_driver(&amd_pstate_driver);
+ ret = cpufreq_register_driver(current_pstate_driver);
if (ret)
- pr_err("failed to register amd_pstate_driver with return %d\n",
- ret);
+ pr_err("failed to register with return %d\n", ret);
+
+ amd_pstate_kobj = kobject_create_and_add("amd_pstate", &cpu_subsys.dev_root->kobj);
+ if (!amd_pstate_kobj) {
+ ret = -EINVAL;
+ pr_err("global sysfs registration failed.\n");
+ goto kobject_free;
+ }
+
+ ret = sysfs_create_group(amd_pstate_kobj, &amd_pstate_global_attr_group);
+ if (ret) {
+ pr_err("sysfs attribute export failed with error %d.\n", ret);
+ goto global_attr_free;
+ }
return ret;
+
+global_attr_free:
+ kobject_put(amd_pstate_kobj);
+kobject_free:
+ cpufreq_unregister_driver(current_pstate_driver);
+ return ret;
}
device_initcall(amd_pstate_init);
static int __init amd_pstate_param(char *str)
{
+ size_t size;
+ int mode_idx;
+
if (!str)
return -EINVAL;
- if (!strcmp(str, "disable")) {
- cppc_load = 0;
- pr_info("driver is explicitly disabled\n");
- } else if (!strcmp(str, "passive"))
- cppc_load = 1;
+ size = strlen(str);
+ mode_idx = get_mode_idx_from_str(str, size);
- return 0;
+ if (mode_idx >= AMD_PSTATE_DISABLE && mode_idx < AMD_PSTATE_MAX) {
+ cppc_state = mode_idx;
+ if (cppc_state == AMD_PSTATE_DISABLE)
+ pr_info("driver is explicitly disabled\n");
+
+ if (cppc_state == AMD_PSTATE_ACTIVE)
+ current_pstate_driver = &amd_pstate_epp_driver;
+
+ if (cppc_state == AMD_PSTATE_PASSIVE)
+ current_pstate_driver = &amd_pstate_driver;
+
+ return 0;
+ }
+
+ return -EINVAL;
}
early_param("amd_pstate", amd_pstate_param);
diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
index 4153150e20db..ffea6402189d 100644
--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
+++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
@@ -751,10 +751,7 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
{
- int ret;
-
- ret = cpufreq_unregister_driver(&brcm_avs_driver);
- WARN_ON(ret);
+ cpufreq_unregister_driver(&brcm_avs_driver);
brcm_avs_prepare_uninit(pdev);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7e56a42750ea..6d8fd3b8dcb5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -993,7 +993,7 @@ static const struct sysfs_ops sysfs_ops = {
.store = store,
};
-static struct kobj_type ktype_cpufreq = {
+static const struct kobj_type ktype_cpufreq = {
.sysfs_ops = &sysfs_ops,
.default_groups = cpufreq_groups,
.release = cpufreq_sysfs_release,
@@ -2904,12 +2904,12 @@ EXPORT_SYMBOL_GPL(cpufreq_register_driver);
* Returns zero if successful, and -EINVAL if the cpufreq_driver is
* currently not initialised.
*/
-int cpufreq_unregister_driver(struct cpufreq_driver *driver)
+void cpufreq_unregister_driver(struct cpufreq_driver *driver)
{
unsigned long flags;
- if (!cpufreq_driver || (driver != cpufreq_driver))
- return -EINVAL;
+ if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
+ return;
pr_debug("unregistering driver %s\n", driver->name);
@@ -2926,8 +2926,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
cpus_read_unlock();
-
- return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
diff --git a/drivers/cpufreq/davinci-cpufreq.c b/drivers/cpufreq/davinci-cpufreq.c
index 9e97f60f8199..ebb3a8102681 100644
--- a/drivers/cpufreq/davinci-cpufreq.c
+++ b/drivers/cpufreq/davinci-cpufreq.c
@@ -133,12 +133,14 @@ static int __init davinci_cpufreq_probe(struct platform_device *pdev)
static int __exit davinci_cpufreq_remove(struct platform_device *pdev)
{
+ cpufreq_unregister_driver(&davinci_driver);
+
clk_put(cpufreq.armclk);
if (cpufreq.asyncclk)
clk_put(cpufreq.asyncclk);
- return cpufreq_unregister_driver(&davinci_driver);
+ return 0;
}
static struct platform_driver davinci_cpufreq_driver = {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index fd73d6d2b808..cb4beec27555 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -452,20 +452,6 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
(u32) cpu->acpi_perf_data.states[i].control);
}
- /*
- * The _PSS table doesn't contain whole turbo frequency range.
- * This just contains +1 MHZ above the max non turbo frequency,
- * with control value corresponding to max turbo ratio. But
- * when cpufreq set policy is called, it will call with this
- * max frequency, which will cause a reduced performance as
- * this driver uses real max turbo frequency as the max
- * frequency. So correct this frequency in _PSS table to
- * correct max turbo frequency based on the turbo state.
- * Also need to convert to MHz as _PSS freq is in MHz.
- */
- if (!global.turbo_disabled)
- cpu->acpi_perf_data.states[0].core_frequency =
- policy->cpuinfo.max_freq / 1000;
cpu->valid_pss_table = true;
pr_debug("_PPC limits will be enforced\n");
diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
deleted file mode 100644
index fb72d709db56..000000000000
--- a/drivers/cpufreq/loongson1-cpufreq.c
+++ /dev/null
@@ -1,222 +0,0 @@
-/*
- * CPU Frequency Scaling for Loongson 1 SoC
- *
- * Copyright (C) 2014-2016 Zhang, Keguang <keguang.zhang@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/cpu.h>
-#include <linux/cpufreq.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <cpufreq.h>
-#include <loongson1.h>
-
-struct ls1x_cpufreq {
- struct device *dev;
- struct clk *clk; /* CPU clk */
- struct clk *mux_clk; /* MUX of CPU clk */
- struct clk *pll_clk; /* PLL clk */
- struct clk *osc_clk; /* OSC clk */
- unsigned int max_freq;
- unsigned int min_freq;
-};
-
-static struct ls1x_cpufreq *cpufreq;
-
-static int ls1x_cpufreq_notifier(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- if (val == CPUFREQ_POSTCHANGE)
- current_cpu_data.udelay_val = loops_per_jiffy;
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block ls1x_cpufreq_notifier_block = {
- .notifier_call = ls1x_cpufreq_notifier
-};
-
-static int ls1x_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
- unsigned int old_freq, new_freq;
-
- old_freq = policy->cur;
- new_freq = policy->freq_table[index].frequency;
-
- /*
- * The procedure of reconfiguring CPU clk is as below.
- *
- * - Reparent CPU clk to OSC clk
- * - Reset CPU clock (very important)
- * - Reconfigure CPU DIV
- * - Reparent CPU clk back to CPU DIV clk
- */
-
- clk_set_parent(policy->clk, cpufreq->osc_clk);
- __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) | RST_CPU_EN | RST_CPU,
- LS1X_CLK_PLL_DIV);
- __raw_writel(__raw_readl(LS1X_CLK_PLL_DIV) & ~(RST_CPU_EN | RST_CPU),
- LS1X_CLK_PLL_DIV);
- clk_set_rate(cpufreq->mux_clk, new_freq * 1000);
- clk_set_parent(policy->clk, cpufreq->mux_clk);
- dev_dbg(cpu_dev, "%u KHz --> %u KHz\n", old_freq, new_freq);
-
- return 0;
-}
-
-static int ls1x_cpufreq_init(struct cpufreq_policy *policy)
-{
- struct device *cpu_dev = get_cpu_device(policy->cpu);
- struct cpufreq_frequency_table *freq_tbl;
- unsigned int pll_freq, freq;
- int steps, i;
-
- pll_freq = clk_get_rate(cpufreq->pll_clk) / 1000;
-
- steps = 1 << DIV_CPU_WIDTH;
- freq_tbl = kcalloc(steps, sizeof(*freq_tbl), GFP_KERNEL);
- if (!freq_tbl)
- return -ENOMEM;
-
- for (i = 0; i < (steps - 1); i++) {
- freq = pll_freq / (i + 1);
- if ((freq < cpufreq->min_freq) || (freq > cpufreq->max_freq))
- freq_tbl[i].frequency = CPUFREQ_ENTRY_INVALID;
- else
- freq_tbl[i].frequency = freq;
- dev_dbg(cpu_dev,
- "cpufreq table: index %d: frequency %d\n", i,
- freq_tbl[i].frequency);
- }
- freq_tbl[i].frequency = CPUFREQ_TABLE_END;
-
- policy->clk = cpufreq->clk;
- cpufreq_generic_init(policy, freq_tbl, 0);
-
- return 0;
-}
-
-static int ls1x_cpufreq_exit(struct cpufreq_policy *policy)
-{
- kfree(policy->freq_table);
- return 0;
-}
-
-static struct cpufreq_driver ls1x_cpufreq_driver = {
- .name = "cpufreq-ls1x",
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = ls1x_cpufreq_target,
- .get = cpufreq_generic_get,
- .init = ls1x_cpufreq_init,
- .exit = ls1x_cpufreq_exit,
- .attr = cpufreq_generic_attr,
-};
-
-static int ls1x_cpufreq_remove(struct platform_device *pdev)
-{
- cpufreq_unregister_notifier(&ls1x_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- cpufreq_unregister_driver(&ls1x_cpufreq_driver);
-
- return 0;
-}
-
-static int ls1x_cpufreq_probe(struct platform_device *pdev)
-{
- struct plat_ls1x_cpufreq *pdata = dev_get_platdata(&pdev->dev);
- struct clk *clk;
- int ret;
-
- if (!pdata || !pdata->clk_name || !pdata->osc_clk_name) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -EINVAL;
- }
-
- cpufreq =
- devm_kzalloc(&pdev->dev, sizeof(struct ls1x_cpufreq), GFP_KERNEL);
- if (!cpufreq)
- return -ENOMEM;
-
- cpufreq->dev = &pdev->dev;
-
- clk = devm_clk_get(&pdev->dev, pdata->clk_name);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get %s clock\n",
- pdata->clk_name);
- return PTR_ERR(clk);
- }
- cpufreq->clk = clk;
-
- clk = clk_get_parent(clk);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get parent of %s clock\n",
- __clk_get_name(cpufreq->clk));
- return PTR_ERR(clk);
- }
- cpufreq->mux_clk = clk;
-
- clk = clk_get_parent(clk);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get parent of %s clock\n",
- __clk_get_name(cpufreq->mux_clk));
- return PTR_ERR(clk);
- }
- cpufreq->pll_clk = clk;
-
- clk = devm_clk_get(&pdev->dev, pdata->osc_clk_name);
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "unable to get %s clock\n",
- pdata->osc_clk_name);
- return PTR_ERR(clk);
- }
- cpufreq->osc_clk = clk;
-
- cpufreq->max_freq = pdata->max_freq;
- cpufreq->min_freq = pdata->min_freq;
-
- ret = cpufreq_register_driver(&ls1x_cpufreq_driver);
- if (ret) {
- dev_err(&pdev->dev,
- "failed to register CPUFreq driver: %d\n", ret);
- return ret;
- }
-
- ret = cpufreq_register_notifier(&ls1x_cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
-
- if (ret) {
- dev_err(&pdev->dev,
- "failed to register CPUFreq notifier: %d\n",ret);
- cpufreq_unregister_driver(&ls1x_cpufreq_driver);
- }
-
- return ret;
-}
-
-static struct platform_driver ls1x_cpufreq_platdrv = {
- .probe = ls1x_cpufreq_probe,
- .remove = ls1x_cpufreq_remove,
- .driver = {
- .name = "ls1x-cpufreq",
- },
-};
-
-module_platform_driver(ls1x_cpufreq_platdrv);
-
-MODULE_ALIAS("platform:ls1x-cpufreq");
-MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@gmail.com>");
-MODULE_DESCRIPTION("Loongson1 CPUFreq driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index f80339779084..b22f5cc8a463 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -317,13 +317,16 @@ static int mtk_cpufreq_hw_driver_probe(struct platform_device *pdev)
static int mtk_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+ cpufreq_unregister_driver(&cpufreq_mtk_hw_driver);
+
+ return 0;
}
static const struct of_device_id mtk_cpufreq_hw_match[] = {
{ .compatible = "mediatek,cpufreq-hw", .data = &cpufreq_mtk_offsets },
{}
};
+MODULE_DEVICE_TABLE(of, mtk_cpufreq_hw_match);
static struct platform_driver mtk_cpufreq_hw_driver = {
.probe = mtk_cpufreq_hw_driver_probe,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index 1b50df06c6bc..81649a1969b6 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -184,7 +184,9 @@ static int omap_cpufreq_probe(struct platform_device *pdev)
static int omap_cpufreq_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&omap_driver);
+ cpufreq_unregister_driver(&omap_driver);
+
+ return 0;
}
static struct platform_driver omap_cpufreq_platdrv = {
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
index 9505a812d6a1..2f581d2d617d 100644
--- a/drivers/cpufreq/qcom-cpufreq-hw.c
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -143,40 +143,42 @@ static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
return lval * xo_rate;
}
-/* Get the current frequency of the CPU (after throttling) */
-static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+/* Get the frequency requested by the cpufreq core for the CPU */
+static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
+ const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
+ unsigned int index;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
+ soc_data = qcom_cpufreq.soc_data;
- return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
+ index = readl_relaxed(data->base + soc_data->reg_perf_state);
+ index = min(index, LUT_MAX_ENTRIES - 1);
+
+ return policy->freq_table[index].frequency;
}
-/* Get the frequency requested by the cpufreq core for the CPU */
-static unsigned int qcom_cpufreq_get_freq(unsigned int cpu)
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
{
struct qcom_cpufreq_data *data;
- const struct qcom_cpufreq_soc_data *soc_data;
struct cpufreq_policy *policy;
- unsigned int index;
policy = cpufreq_cpu_get_raw(cpu);
if (!policy)
return 0;
data = policy->driver_data;
- soc_data = qcom_cpufreq.soc_data;
- index = readl_relaxed(data->base + soc_data->reg_perf_state);
- index = min(index, LUT_MAX_ENTRIES - 1);
+ if (data->throttle_irq >= 0)
+ return qcom_lmh_get_throttle_freq(data) / HZ_PER_KHZ;
- return policy->freq_table[index].frequency;
+ return qcom_cpufreq_get_freq(cpu);
}
static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
@@ -704,6 +706,8 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
return -ENOMEM;
qcom_cpufreq.soc_data = of_device_get_match_data(dev);
+ if (!qcom_cpufreq.soc_data)
+ return -ENODEV;
clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_domains), GFP_KERNEL);
if (!clk_data)
@@ -766,7 +770,9 @@ of_exit:
static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev)
{
- return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
+ cpufreq_unregister_driver(&cpufreq_qcom_hw_driver);
+
+ return 0;
}
static struct platform_driver qcom_cpufreq_hw_driver = {
diff --git a/drivers/cpufreq/s3c2410-cpufreq.c b/drivers/cpufreq/s3c2410-cpufreq.c
deleted file mode 100644
index 5dcfbf0bfb74..000000000000
--- a/drivers/cpufreq/s3c2410-cpufreq.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2006-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 CPU Frequency scaling
-*/
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#define S3C2410_CLKDIVN_PDIVN (1<<0)
-#define S3C2410_CLKDIVN_HDIVN (1<<1)
-
-/* Note, 2410A has an extra mode for 1:4:4 ratio, bit 2 of CLKDIV */
-
-static void s3c2410_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- u32 clkdiv = 0;
-
- if (cfg->divs.h_divisor == 2)
- clkdiv |= S3C2410_CLKDIVN_HDIVN;
-
- if (cfg->divs.p_divisor != cfg->divs.h_divisor)
- clkdiv |= S3C2410_CLKDIVN_PDIVN;
-
- s3c24xx_write_clkdivn(clkdiv);
-}
-
-static int s3c2410_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned long hclk, fclk, pclk;
- unsigned int hdiv, pdiv;
- unsigned long hclk_max;
-
- fclk = cfg->freq.fclk;
- hclk_max = cfg->max.hclk;
-
- cfg->freq.armclk = fclk;
-
- s3c_freq_dbg("%s: fclk is %lu, max hclk %lu\n",
- __func__, fclk, hclk_max);
-
- hdiv = (fclk > cfg->max.hclk) ? 2 : 1;
- hclk = fclk / hdiv;
-
- if (hclk > cfg->max.hclk) {
- s3c_freq_dbg("%s: hclk too big\n", __func__);
- return -EINVAL;
- }
-
- pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
- pclk = hclk / pdiv;
-
- if (pclk > cfg->max.pclk) {
- s3c_freq_dbg("%s: pclk too big\n", __func__);
- return -EINVAL;
- }
-
- pdiv *= hdiv;
-
- /* record the result */
- cfg->divs.p_divisor = pdiv;
- cfg->divs.h_divisor = hdiv;
-
- return 0;
-}
-
-static struct s3c_cpufreq_info s3c2410_cpufreq_info = {
- .max = {
- .fclk = 200000000,
- .hclk = 100000000,
- .pclk = 50000000,
- },
-
- /* transition latency is about 5ms worst-case, so
- * set 10ms to be sure */
- .latency = 10000000,
-
- .locktime_m = 150,
- .locktime_u = 150,
- .locktime_bits = 12,
-
- .need_pll = 1,
-
- .name = "s3c2410",
- .calc_iotiming = s3c2410_iotiming_calc,
- .set_iotiming = s3c2410_iotiming_set,
- .get_iotiming = s3c2410_iotiming_get,
-
- .set_fvco = s3c2410_set_fvco,
- .set_refresh = s3c2410_cpufreq_setrefresh,
- .set_divs = s3c2410_cpufreq_setdivs,
- .calc_divs = s3c2410_cpufreq_calcdivs,
-
- .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
-};
-
-static int s3c2410_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- return s3c_cpufreq_register(&s3c2410_cpufreq_info);
-}
-
-static struct subsys_interface s3c2410_cpufreq_interface = {
- .name = "s3c2410_cpufreq",
- .subsys = &s3c2410_subsys,
- .add_dev = s3c2410_cpufreq_add,
-};
-
-static int __init s3c2410_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2410_cpufreq_interface);
-}
-arch_initcall(s3c2410_cpufreq_init);
-
-static int s3c2410a_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- /* alter the maximum freq settings for S3C2410A. If a board knows
- * it only has a maximum of 200, then it should register its own
- * limits. */
-
- s3c2410_cpufreq_info.max.fclk = 266000000;
- s3c2410_cpufreq_info.max.hclk = 133000000;
- s3c2410_cpufreq_info.max.pclk = 66500000;
- s3c2410_cpufreq_info.name = "s3c2410a";
-
- return s3c2410_cpufreq_add(dev, sif);
-}
-
-static struct subsys_interface s3c2410a_cpufreq_interface = {
- .name = "s3c2410a_cpufreq",
- .subsys = &s3c2410a_subsys,
- .add_dev = s3c2410a_cpufreq_add,
-};
-
-static int __init s3c2410a_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2410a_cpufreq_interface);
-}
-arch_initcall(s3c2410a_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2412-cpufreq.c b/drivers/cpufreq/s3c2412-cpufreq.c
deleted file mode 100644
index 5945945ead7c..000000000000
--- a/drivers/cpufreq/s3c2412-cpufreq.c
+++ /dev/null
@@ -1,240 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2412 CPU Frequency scalling
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#define S3C2412_CLKDIVN_PDIVN (1<<2)
-#define S3C2412_CLKDIVN_HDIVN_MASK (3<<0)
-#define S3C2412_CLKDIVN_ARMDIVN (1<<3)
-#define S3C2412_CLKDIVN_DVSEN (1<<4)
-#define S3C2412_CLKDIVN_HALFHCLK (1<<5)
-#define S3C2412_CLKDIVN_USB48DIV (1<<6)
-#define S3C2412_CLKDIVN_UARTDIV_MASK (15<<8)
-#define S3C2412_CLKDIVN_UARTDIV_SHIFT (8)
-#define S3C2412_CLKDIVN_I2SDIV_MASK (15<<12)
-#define S3C2412_CLKDIVN_I2SDIV_SHIFT (12)
-#define S3C2412_CLKDIVN_CAMDIV_MASK (15<<16)
-#define S3C2412_CLKDIVN_CAMDIV_SHIFT (16)
-
-/* our clock resources. */
-static struct clk *xtal;
-static struct clk *fclk;
-static struct clk *hclk;
-static struct clk *armclk;
-
-/* HDIV: 1, 2, 3, 4, 6, 8 */
-
-static int s3c2412_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned int hdiv, pdiv, armdiv, dvs;
- unsigned long hclk, fclk, armclk, armdiv_clk;
- unsigned long hclk_max;
-
- fclk = cfg->freq.fclk;
- armclk = cfg->freq.armclk;
- hclk_max = cfg->max.hclk;
-
- /* We can't run hclk above armclk as at the best we have to
- * have armclk and hclk in dvs mode. */
-
- if (hclk_max > armclk)
- hclk_max = armclk;
-
- s3c_freq_dbg("%s: fclk=%lu, armclk=%lu, hclk_max=%lu\n",
- __func__, fclk, armclk, hclk_max);
- s3c_freq_dbg("%s: want f=%lu, arm=%lu, h=%lu, p=%lu\n",
- __func__, cfg->freq.fclk, cfg->freq.armclk,
- cfg->freq.hclk, cfg->freq.pclk);
-
- armdiv = fclk / armclk;
-
- if (armdiv < 1)
- armdiv = 1;
- if (armdiv > 2)
- armdiv = 2;
-
- cfg->divs.arm_divisor = armdiv;
- armdiv_clk = fclk / armdiv;
-
- hdiv = armdiv_clk / hclk_max;
- if (hdiv < 1)
- hdiv = 1;
-
- cfg->freq.hclk = hclk = armdiv_clk / hdiv;
-
- /* set dvs depending on whether we reached armclk or not. */
- cfg->divs.dvs = dvs = armclk < armdiv_clk;
-
- /* update the actual armclk we achieved. */
- cfg->freq.armclk = dvs ? hclk : armdiv_clk;
-
- s3c_freq_dbg("%s: armclk %lu, hclk %lu, armdiv %d, hdiv %d, dvs %d\n",
- __func__, armclk, hclk, armdiv, hdiv, cfg->divs.dvs);
-
- if (hdiv > 4)
- goto invalid;
-
- pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
-
- if ((hclk / pdiv) > cfg->max.pclk)
- pdiv++;
-
- cfg->freq.pclk = hclk / pdiv;
-
- s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
-
- if (pdiv > 2)
- goto invalid;
-
- pdiv *= hdiv;
-
- /* store the result, and then return */
-
- cfg->divs.h_divisor = hdiv * armdiv;
- cfg->divs.p_divisor = pdiv * armdiv;
-
- return 0;
-
-invalid:
- return -EINVAL;
-}
-
-static void s3c2412_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned long clkdiv;
- unsigned long olddiv;
-
- olddiv = clkdiv = s3c24xx_read_clkdivn();
-
- /* clear off current clock info */
-
- clkdiv &= ~S3C2412_CLKDIVN_ARMDIVN;
- clkdiv &= ~S3C2412_CLKDIVN_HDIVN_MASK;
- clkdiv &= ~S3C2412_CLKDIVN_PDIVN;
-
- if (cfg->divs.arm_divisor == 2)
- clkdiv |= S3C2412_CLKDIVN_ARMDIVN;
-
- clkdiv |= ((cfg->divs.h_divisor / cfg->divs.arm_divisor) - 1);
-
- if (cfg->divs.p_divisor != cfg->divs.h_divisor)
- clkdiv |= S3C2412_CLKDIVN_PDIVN;
-
- s3c_freq_dbg("%s: div %08lx => %08lx\n", __func__, olddiv, clkdiv);
- s3c24xx_write_clkdivn(clkdiv);
-
- clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
-}
-
-/* set the default cpu frequency information, based on an 200MHz part
- * as we have no other way of detecting the speed rating in software.
- */
-
-static struct s3c_cpufreq_info s3c2412_cpufreq_info = {
- .max = {
- .fclk = 200000000,
- .hclk = 100000000,
- .pclk = 50000000,
- },
-
- .latency = 5000000, /* 5ms */
-
- .locktime_m = 150,
- .locktime_u = 150,
- .locktime_bits = 16,
-
- .name = "s3c2412",
- .set_refresh = s3c2412_cpufreq_setrefresh,
- .set_divs = s3c2412_cpufreq_setdivs,
- .calc_divs = s3c2412_cpufreq_calcdivs,
-
- .calc_iotiming = s3c2412_iotiming_calc,
- .set_iotiming = s3c2412_iotiming_set,
- .get_iotiming = s3c2412_iotiming_get,
-
- .debug_io_show = s3c_cpufreq_debugfs_call(s3c2412_iotiming_debugfs),
-};
-
-static int s3c2412_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- unsigned long fclk_rate;
-
- hclk = clk_get(NULL, "hclk");
- if (IS_ERR(hclk)) {
- pr_err("cannot find hclk clock\n");
- return -ENOENT;
- }
-
- fclk = clk_get(NULL, "fclk");
- if (IS_ERR(fclk)) {
- pr_err("cannot find fclk clock\n");
- goto err_fclk;
- }
-
- fclk_rate = clk_get_rate(fclk);
- if (fclk_rate > 200000000) {
- pr_info("fclk %ld MHz, assuming 266MHz capable part\n",
- fclk_rate / 1000000);
- s3c2412_cpufreq_info.max.fclk = 266000000;
- s3c2412_cpufreq_info.max.hclk = 133000000;
- s3c2412_cpufreq_info.max.pclk = 66000000;
- }
-
- armclk = clk_get(NULL, "armclk");
- if (IS_ERR(armclk)) {
- pr_err("cannot find arm clock\n");
- goto err_armclk;
- }
-
- xtal = clk_get(NULL, "xtal");
- if (IS_ERR(xtal)) {
- pr_err("cannot find xtal clock\n");
- goto err_xtal;
- }
-
- return s3c_cpufreq_register(&s3c2412_cpufreq_info);
-
-err_xtal:
- clk_put(armclk);
-err_armclk:
- clk_put(fclk);
-err_fclk:
- clk_put(hclk);
-
- return -ENOENT;
-}
-
-static struct subsys_interface s3c2412_cpufreq_interface = {
- .name = "s3c2412_cpufreq",
- .subsys = &s3c2412_subsys,
- .add_dev = s3c2412_cpufreq_add,
-};
-
-static int s3c2412_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2412_cpufreq_interface);
-}
-arch_initcall(s3c2412_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
deleted file mode 100644
index 5c221bc90210..000000000000
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ /dev/null
@@ -1,492 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * S3C2416/2450 CPUfreq Support
- *
- * Copyright 2011 Heiko Stuebner <heiko@sntech.de>
- *
- * based on s3c64xx_cpufreq.c
- *
- * Copyright 2009 Wolfson Microelectronics plc
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/regulator/consumer.h>
-#include <linux/reboot.h>
-#include <linux/module.h>
-
-static DEFINE_MUTEX(cpufreq_lock);
-
-struct s3c2416_data {
- struct clk *armdiv;
- struct clk *armclk;
- struct clk *hclk;
-
- unsigned long regulator_latency;
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- struct regulator *vddarm;
-#endif
-
- struct cpufreq_frequency_table *freq_table;
-
- bool is_dvs;
- bool disable_dvs;
-};
-
-static struct s3c2416_data s3c2416_cpufreq;
-
-struct s3c2416_dvfs {
- unsigned int vddarm_min;
- unsigned int vddarm_max;
-};
-
-/* pseudo-frequency for dvs mode */
-#define FREQ_DVS 132333
-
-/* frequency to sleep and reboot in
- * it's essential to leave dvs, as some boards do not reconfigure the
- * regulator on reboot
- */
-#define FREQ_SLEEP 133333
-
-/* Sources for the ARMCLK */
-#define SOURCE_HCLK 0
-#define SOURCE_ARMDIV 1
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-/* S3C2416 only supports changing the voltage in the dvs-mode.
- * Voltages down to 1.0V seem to work, so we take what the regulator
- * can get us.
- */
-static struct s3c2416_dvfs s3c2416_dvfs_table[] = {
- [SOURCE_HCLK] = { 950000, 1250000 },
- [SOURCE_ARMDIV] = { 1250000, 1350000 },
-};
-#endif
-
-static struct cpufreq_frequency_table s3c2416_freq_table[] = {
- { 0, SOURCE_HCLK, FREQ_DVS },
- { 0, SOURCE_ARMDIV, 133333 },
- { 0, SOURCE_ARMDIV, 266666 },
- { 0, SOURCE_ARMDIV, 400000 },
- { 0, 0, CPUFREQ_TABLE_END },
-};
-
-static struct cpufreq_frequency_table s3c2450_freq_table[] = {
- { 0, SOURCE_HCLK, FREQ_DVS },
- { 0, SOURCE_ARMDIV, 133500 },
- { 0, SOURCE_ARMDIV, 267000 },
- { 0, SOURCE_ARMDIV, 534000 },
- { 0, 0, CPUFREQ_TABLE_END },
-};
-
-static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
-
- if (cpu != 0)
- return 0;
-
- /* return our pseudo-frequency when in dvs mode */
- if (s3c_freq->is_dvs)
- return FREQ_DVS;
-
- return clk_get_rate(s3c_freq->armclk) / 1000;
-}
-
-static int s3c2416_cpufreq_set_armdiv(struct s3c2416_data *s3c_freq,
- unsigned int freq)
-{
- int ret;
-
- if (clk_get_rate(s3c_freq->armdiv) / 1000 != freq) {
- ret = clk_set_rate(s3c_freq->armdiv, freq * 1000);
- if (ret < 0) {
- pr_err("cpufreq: Failed to set armdiv rate %dkHz: %d\n",
- freq, ret);
- return ret;
- }
- }
-
- return 0;
-}
-
-static int s3c2416_cpufreq_enter_dvs(struct s3c2416_data *s3c_freq, int idx)
-{
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- struct s3c2416_dvfs *dvfs;
-#endif
- int ret;
-
- if (s3c_freq->is_dvs) {
- pr_debug("cpufreq: already in dvs mode, nothing to do\n");
- return 0;
- }
-
- pr_debug("cpufreq: switching armclk to hclk (%lukHz)\n",
- clk_get_rate(s3c_freq->hclk) / 1000);
- ret = clk_set_parent(s3c_freq->armclk, s3c_freq->hclk);
- if (ret < 0) {
- pr_err("cpufreq: Failed to switch armclk to hclk: %d\n", ret);
- return ret;
- }
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- /* changing the core voltage is only allowed when in dvs mode */
- if (s3c_freq->vddarm) {
- dvfs = &s3c2416_dvfs_table[idx];
-
- pr_debug("cpufreq: setting regulator to %d-%d\n",
- dvfs->vddarm_min, dvfs->vddarm_max);
- ret = regulator_set_voltage(s3c_freq->vddarm,
- dvfs->vddarm_min,
- dvfs->vddarm_max);
-
- /* when lowering the voltage failed, there is nothing to do */
- if (ret != 0)
- pr_err("cpufreq: Failed to set VDDARM: %d\n", ret);
- }
-#endif
-
- s3c_freq->is_dvs = 1;
-
- return 0;
-}
-
-static int s3c2416_cpufreq_leave_dvs(struct s3c2416_data *s3c_freq, int idx)
-{
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- struct s3c2416_dvfs *dvfs;
-#endif
- int ret;
-
- if (!s3c_freq->is_dvs) {
- pr_debug("cpufreq: not in dvs mode, so can't leave\n");
- return 0;
- }
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- if (s3c_freq->vddarm) {
- dvfs = &s3c2416_dvfs_table[idx];
-
- pr_debug("cpufreq: setting regulator to %d-%d\n",
- dvfs->vddarm_min, dvfs->vddarm_max);
- ret = regulator_set_voltage(s3c_freq->vddarm,
- dvfs->vddarm_min,
- dvfs->vddarm_max);
- if (ret != 0) {
- pr_err("cpufreq: Failed to set VDDARM: %d\n", ret);
- return ret;
- }
- }
-#endif
-
- /* force armdiv to hclk frequency for transition from dvs*/
- if (clk_get_rate(s3c_freq->armdiv) > clk_get_rate(s3c_freq->hclk)) {
- pr_debug("cpufreq: force armdiv to hclk frequency (%lukHz)\n",
- clk_get_rate(s3c_freq->hclk) / 1000);
- ret = s3c2416_cpufreq_set_armdiv(s3c_freq,
- clk_get_rate(s3c_freq->hclk) / 1000);
- if (ret < 0) {
- pr_err("cpufreq: Failed to set the armdiv to %lukHz: %d\n",
- clk_get_rate(s3c_freq->hclk) / 1000, ret);
- return ret;
- }
- }
-
- pr_debug("cpufreq: switching armclk parent to armdiv (%lukHz)\n",
- clk_get_rate(s3c_freq->armdiv) / 1000);
-
- ret = clk_set_parent(s3c_freq->armclk, s3c_freq->armdiv);
- if (ret < 0) {
- pr_err("cpufreq: Failed to switch armclk clock parent to armdiv: %d\n",
- ret);
- return ret;
- }
-
- s3c_freq->is_dvs = 0;
-
- return 0;
-}
-
-static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy,
- unsigned int index)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- unsigned int new_freq;
- int idx, ret, to_dvs = 0;
-
- mutex_lock(&cpufreq_lock);
-
- idx = s3c_freq->freq_table[index].driver_data;
-
- if (idx == SOURCE_HCLK)
- to_dvs = 1;
-
- /* switching to dvs when it's not allowed */
- if (to_dvs && s3c_freq->disable_dvs) {
- pr_debug("cpufreq: entering dvs mode not allowed\n");
- ret = -EINVAL;
- goto out;
- }
-
- /* When leavin dvs mode, always switch the armdiv to the hclk rate
- * The S3C2416 has stability issues when switching directly to
- * higher frequencies.
- */
- new_freq = (s3c_freq->is_dvs && !to_dvs)
- ? clk_get_rate(s3c_freq->hclk) / 1000
- : s3c_freq->freq_table[index].frequency;
-
- if (to_dvs) {
- pr_debug("cpufreq: enter dvs\n");
- ret = s3c2416_cpufreq_enter_dvs(s3c_freq, idx);
- } else if (s3c_freq->is_dvs) {
- pr_debug("cpufreq: leave dvs\n");
- ret = s3c2416_cpufreq_leave_dvs(s3c_freq, idx);
- } else {
- pr_debug("cpufreq: change armdiv to %dkHz\n", new_freq);
- ret = s3c2416_cpufreq_set_armdiv(s3c_freq, new_freq);
- }
-
-out:
- mutex_unlock(&cpufreq_lock);
-
- return ret;
-}
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
-{
- int count, v, i, found;
- struct cpufreq_frequency_table *pos;
- struct s3c2416_dvfs *dvfs;
-
- count = regulator_count_voltages(s3c_freq->vddarm);
- if (count < 0) {
- pr_err("cpufreq: Unable to check supported voltages\n");
- return;
- }
-
- if (!count)
- goto out;
-
- cpufreq_for_each_valid_entry(pos, s3c_freq->freq_table) {
- dvfs = &s3c2416_dvfs_table[pos->driver_data];
- found = 0;
-
- /* Check only the min-voltage, more is always ok on S3C2416 */
- for (i = 0; i < count; i++) {
- v = regulator_list_voltage(s3c_freq->vddarm, i);
- if (v >= dvfs->vddarm_min)
- found = 1;
- }
-
- if (!found) {
- pr_debug("cpufreq: %dkHz unsupported by regulator\n",
- pos->frequency);
- pos->frequency = CPUFREQ_ENTRY_INVALID;
- }
- }
-
-out:
- /* Guessed */
- s3c_freq->regulator_latency = 1 * 1000 * 1000;
-}
-#endif
-
-static int s3c2416_cpufreq_reboot_notifier_evt(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- int ret;
- struct cpufreq_policy *policy;
-
- mutex_lock(&cpufreq_lock);
-
- /* disable further changes */
- s3c_freq->disable_dvs = 1;
-
- mutex_unlock(&cpufreq_lock);
-
- /* some boards don't reconfigure the regulator on reboot, which
- * could lead to undervolting the cpu when the clock is reset.
- * Therefore we always leave the DVS mode on reboot.
- */
- if (s3c_freq->is_dvs) {
- pr_debug("cpufreq: leave dvs on reboot\n");
-
- policy = cpufreq_cpu_get(0);
- if (!policy) {
- pr_debug("cpufreq: get no policy for cpu0\n");
- return NOTIFY_BAD;
- }
-
- ret = cpufreq_driver_target(policy, FREQ_SLEEP, 0);
- cpufreq_cpu_put(policy);
-
- if (ret < 0)
- return NOTIFY_BAD;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
- .notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
-};
-
-static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
-{
- struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
- struct cpufreq_frequency_table *pos;
- struct clk *msysclk;
- unsigned long rate;
- int ret;
-
- if (policy->cpu != 0)
- return -EINVAL;
-
- msysclk = clk_get(NULL, "msysclk");
- if (IS_ERR(msysclk)) {
- ret = PTR_ERR(msysclk);
- pr_err("cpufreq: Unable to obtain msysclk: %d\n", ret);
- return ret;
- }
-
- /*
- * S3C2416 and S3C2450 share the same processor-ID and also provide no
- * other means to distinguish them other than through the rate of
- * msysclk. On S3C2416 msysclk runs at 800MHz and on S3C2450 at 533MHz.
- */
- rate = clk_get_rate(msysclk);
- if (rate == 800 * 1000 * 1000) {
- pr_info("cpufreq: msysclk running at %lukHz, using S3C2416 frequency table\n",
- rate / 1000);
- s3c_freq->freq_table = s3c2416_freq_table;
- policy->cpuinfo.max_freq = 400000;
- } else if (rate / 1000 == 534000) {
- pr_info("cpufreq: msysclk running at %lukHz, using S3C2450 frequency table\n",
- rate / 1000);
- s3c_freq->freq_table = s3c2450_freq_table;
- policy->cpuinfo.max_freq = 534000;
- }
-
- /* not needed anymore */
- clk_put(msysclk);
-
- if (s3c_freq->freq_table == NULL) {
- pr_err("cpufreq: No frequency information for this CPU, msysclk at %lukHz\n",
- rate / 1000);
- return -ENODEV;
- }
-
- s3c_freq->is_dvs = 0;
-
- s3c_freq->armdiv = clk_get(NULL, "armdiv");
- if (IS_ERR(s3c_freq->armdiv)) {
- ret = PTR_ERR(s3c_freq->armdiv);
- pr_err("cpufreq: Unable to obtain ARMDIV: %d\n", ret);
- return ret;
- }
-
- s3c_freq->hclk = clk_get(NULL, "hclk");
- if (IS_ERR(s3c_freq->hclk)) {
- ret = PTR_ERR(s3c_freq->hclk);
- pr_err("cpufreq: Unable to obtain HCLK: %d\n", ret);
- goto err_hclk;
- }
-
- /* chech hclk rate, we only support the common 133MHz for now
- * hclk could also run at 66MHz, but this not often used
- */
- rate = clk_get_rate(s3c_freq->hclk);
- if (rate < 133 * 1000 * 1000) {
- pr_err("cpufreq: HCLK not at 133MHz\n");
- ret = -EINVAL;
- goto err_armclk;
- }
-
- s3c_freq->armclk = clk_get(NULL, "armclk");
- if (IS_ERR(s3c_freq->armclk)) {
- ret = PTR_ERR(s3c_freq->armclk);
- pr_err("cpufreq: Unable to obtain ARMCLK: %d\n", ret);
- goto err_armclk;
- }
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
- s3c_freq->vddarm = regulator_get(NULL, "vddarm");
- if (IS_ERR(s3c_freq->vddarm)) {
- ret = PTR_ERR(s3c_freq->vddarm);
- pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
- goto err_vddarm;
- }
-
- s3c2416_cpufreq_cfg_regulator(s3c_freq);
-#else
- s3c_freq->regulator_latency = 0;
-#endif
-
- cpufreq_for_each_entry(pos, s3c_freq->freq_table) {
- /* special handling for dvs mode */
- if (pos->driver_data == 0) {
- if (!s3c_freq->hclk) {
- pr_debug("cpufreq: %dkHz unsupported as it would need unavailable dvs mode\n",
- pos->frequency);
- pos->frequency = CPUFREQ_ENTRY_INVALID;
- } else {
- continue;
- }
- }
-
- /* Check for frequencies we can generate */
- rate = clk_round_rate(s3c_freq->armdiv,
- pos->frequency * 1000);
- rate /= 1000;
- if (rate != pos->frequency) {
- pr_debug("cpufreq: %dkHz unsupported by clock (clk_round_rate return %lu)\n",
- pos->frequency, rate);
- pos->frequency = CPUFREQ_ENTRY_INVALID;
- }
- }
-
- /* Datasheet says PLL stabalisation time must be at least 300us,
- * so but add some fudge. (reference in LOCKCON0 register description)
- */
- cpufreq_generic_init(policy, s3c_freq->freq_table,
- (500 * 1000) + s3c_freq->regulator_latency);
- register_reboot_notifier(&s3c2416_cpufreq_reboot_notifier);
-
- return 0;
-
-#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-err_vddarm:
- clk_put(s3c_freq->armclk);
-#endif
-err_armclk:
- clk_put(s3c_freq->hclk);
-err_hclk:
- clk_put(s3c_freq->armdiv);
-
- return ret;
-}
-
-static struct cpufreq_driver s3c2416_cpufreq_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = s3c2416_cpufreq_set_target,
- .get = s3c2416_cpufreq_get_speed,
- .init = s3c2416_cpufreq_driver_init,
- .name = "s3c2416",
- .attr = cpufreq_generic_attr,
-};
-
-static int __init s3c2416_cpufreq_init(void)
-{
- return cpufreq_register_driver(&s3c2416_cpufreq_driver);
-}
-module_init(s3c2416_cpufreq_init);
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
deleted file mode 100644
index 2011fb9c03a4..000000000000
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ /dev/null
@@ -1,321 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2006-2009 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- * Vincent Sanders <vince@simtec.co.uk>
- *
- * S3C2440/S3C2442 CPU Frequency scaling
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-#define S3C2440_CLKDIVN_PDIVN (1<<0)
-#define S3C2440_CLKDIVN_HDIVN_MASK (3<<1)
-#define S3C2440_CLKDIVN_HDIVN_1 (0<<1)
-#define S3C2440_CLKDIVN_HDIVN_2 (1<<1)
-#define S3C2440_CLKDIVN_HDIVN_4_8 (2<<1)
-#define S3C2440_CLKDIVN_HDIVN_3_6 (3<<1)
-#define S3C2440_CLKDIVN_UCLK (1<<3)
-
-#define S3C2440_CAMDIVN_CAMCLK_MASK (0xf<<0)
-#define S3C2440_CAMDIVN_CAMCLK_SEL (1<<4)
-#define S3C2440_CAMDIVN_HCLK3_HALF (1<<8)
-#define S3C2440_CAMDIVN_HCLK4_HALF (1<<9)
-#define S3C2440_CAMDIVN_DVSEN (1<<12)
-
-#define S3C2442_CAMDIVN_CAMCLK_DIV3 (1<<5)
-
-static struct clk *xtal;
-static struct clk *fclk;
-static struct clk *hclk;
-static struct clk *armclk;
-
-/* HDIV: 1, 2, 3, 4, 6, 8 */
-
-static inline int within_khz(unsigned long a, unsigned long b)
-{
- long diff = a - b;
-
- return (diff >= -1000 && diff <= 1000);
-}
-
-/**
- * s3c2440_cpufreq_calcdivs - calculate divider settings
- * @cfg: The cpu frequency settings.
- *
- * Calcualte the divider values for the given frequency settings
- * specified in @cfg. The values are stored in @cfg for later use
- * by the relevant set routine if the request settings can be reached.
- */
-static int s3c2440_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned int hdiv, pdiv;
- unsigned long hclk, fclk, armclk;
- unsigned long hclk_max;
-
- fclk = cfg->freq.fclk;
- armclk = cfg->freq.armclk;
- hclk_max = cfg->max.hclk;
-
- s3c_freq_dbg("%s: fclk is %lu, armclk %lu, max hclk %lu\n",
- __func__, fclk, armclk, hclk_max);
-
- if (armclk > fclk) {
- pr_warn("%s: armclk > fclk\n", __func__);
- armclk = fclk;
- }
-
- /* if we are in DVS, we need HCLK to be <= ARMCLK */
- if (armclk < fclk && armclk < hclk_max)
- hclk_max = armclk;
-
- for (hdiv = 1; hdiv < 9; hdiv++) {
- if (hdiv == 5 || hdiv == 7)
- hdiv++;
-
- hclk = (fclk / hdiv);
- if (hclk <= hclk_max || within_khz(hclk, hclk_max))
- break;
- }
-
- s3c_freq_dbg("%s: hclk %lu, div %d\n", __func__, hclk, hdiv);
-
- if (hdiv > 8)
- goto invalid;
-
- pdiv = (hclk > cfg->max.pclk) ? 2 : 1;
-
- if ((hclk / pdiv) > cfg->max.pclk)
- pdiv++;
-
- s3c_freq_dbg("%s: pdiv %d\n", __func__, pdiv);
-
- if (pdiv > 2)
- goto invalid;
-
- pdiv *= hdiv;
-
- /* calculate a valid armclk */
-
- if (armclk < hclk)
- armclk = hclk;
-
- /* if we're running armclk lower than fclk, this really means
- * that the system should go into dvs mode, which means that
- * armclk is connected to hclk. */
- if (armclk < fclk) {
- cfg->divs.dvs = 1;
- armclk = hclk;
- } else
- cfg->divs.dvs = 0;
-
- cfg->freq.armclk = armclk;
-
- /* store the result, and then return */
-
- cfg->divs.h_divisor = hdiv;
- cfg->divs.p_divisor = pdiv;
-
- return 0;
-
- invalid:
- return -EINVAL;
-}
-
-#define CAMDIVN_HCLK_HALF (S3C2440_CAMDIVN_HCLK3_HALF | \
- S3C2440_CAMDIVN_HCLK4_HALF)
-
-/**
- * s3c2440_cpufreq_setdivs - set the cpu frequency divider settings
- * @cfg: The cpu frequency settings.
- *
- * Set the divisors from the settings in @cfg, which where generated
- * during the calculation phase by s3c2440_cpufreq_calcdivs().
- */
-static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- unsigned long clkdiv, camdiv;
-
- s3c_freq_dbg("%s: divisors: h=%d, p=%d\n", __func__,
- cfg->divs.h_divisor, cfg->divs.p_divisor);
-
- clkdiv = s3c24xx_read_clkdivn();
- camdiv = s3c2440_read_camdivn();
-
- clkdiv &= ~(S3C2440_CLKDIVN_HDIVN_MASK | S3C2440_CLKDIVN_PDIVN);
- camdiv &= ~CAMDIVN_HCLK_HALF;
-
- switch (cfg->divs.h_divisor) {
- case 1:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_1;
- break;
-
- case 2:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_2;
- break;
-
- case 6:
- camdiv |= S3C2440_CAMDIVN_HCLK3_HALF;
- fallthrough;
- case 3:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6;
- break;
-
- case 8:
- camdiv |= S3C2440_CAMDIVN_HCLK4_HALF;
- fallthrough;
- case 4:
- clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8;
- break;
-
- default:
- BUG(); /* we don't expect to get here. */
- }
-
- if (cfg->divs.p_divisor != cfg->divs.h_divisor)
- clkdiv |= S3C2440_CLKDIVN_PDIVN;
-
- /* todo - set pclk. */
-
- /* Write the divisors first with hclk intentionally halved so that
- * when we write clkdiv we will under-frequency instead of over. We
- * then make a short delay and remove the hclk halving if necessary.
- */
-
- s3c2440_write_camdivn(camdiv | CAMDIVN_HCLK_HALF);
- s3c24xx_write_clkdivn(clkdiv);
-
- ndelay(20);
- s3c2440_write_camdivn(camdiv);
-
- clk_set_parent(armclk, cfg->divs.dvs ? hclk : fclk);
-}
-
-static int run_freq_for(unsigned long max_hclk, unsigned long fclk,
- int *divs,
- struct cpufreq_frequency_table *table,
- size_t table_size)
-{
- unsigned long freq;
- int index = 0;
- int div;
-
- for (div = *divs; div > 0; div = *divs++) {
- freq = fclk / div;
-
- if (freq > max_hclk && div != 1)
- continue;
-
- freq /= 1000; /* table is in kHz */
- index = s3c_cpufreq_addfreq(table, index, table_size, freq);
- if (index < 0)
- break;
- }
-
- return index;
-}
-
-static int hclk_divs[] = { 1, 2, 3, 4, 6, 8, -1 };
-
-static int s3c2440_cpufreq_calctable(struct s3c_cpufreq_config *cfg,
- struct cpufreq_frequency_table *table,
- size_t table_size)
-{
- int ret;
-
- WARN_ON(cfg->info == NULL);
- WARN_ON(cfg->board == NULL);
-
- ret = run_freq_for(cfg->info->max.hclk,
- cfg->info->max.fclk,
- hclk_divs,
- table, table_size);
-
- s3c_freq_dbg("%s: returning %d\n", __func__, ret);
-
- return ret;
-}
-
-static struct s3c_cpufreq_info s3c2440_cpufreq_info = {
- .max = {
- .fclk = 400000000,
- .hclk = 133333333,
- .pclk = 66666666,
- },
-
- .locktime_m = 300,
- .locktime_u = 300,
- .locktime_bits = 16,
-
- .name = "s3c244x",
- .calc_iotiming = s3c2410_iotiming_calc,
- .set_iotiming = s3c2410_iotiming_set,
- .get_iotiming = s3c2410_iotiming_get,
- .set_fvco = s3c2410_set_fvco,
-
- .set_refresh = s3c2410_cpufreq_setrefresh,
- .set_divs = s3c2440_cpufreq_setdivs,
- .calc_divs = s3c2440_cpufreq_calcdivs,
- .calc_freqtable = s3c2440_cpufreq_calctable,
-
- .debug_io_show = s3c_cpufreq_debugfs_call(s3c2410_iotiming_debugfs),
-};
-
-static int s3c2440_cpufreq_add(struct device *dev,
- struct subsys_interface *sif)
-{
- xtal = s3c_cpufreq_clk_get(NULL, "xtal");
- hclk = s3c_cpufreq_clk_get(NULL, "hclk");
- fclk = s3c_cpufreq_clk_get(NULL, "fclk");
- armclk = s3c_cpufreq_clk_get(NULL, "armclk");
-
- if (IS_ERR(xtal) || IS_ERR(hclk) || IS_ERR(fclk) || IS_ERR(armclk)) {
- pr_err("%s: failed to get clocks\n", __func__);
- return -ENOENT;
- }
-
- return s3c_cpufreq_register(&s3c2440_cpufreq_info);
-}
-
-static struct subsys_interface s3c2440_cpufreq_interface = {
- .name = "s3c2440_cpufreq",
- .subsys = &s3c2440_subsys,
- .add_dev = s3c2440_cpufreq_add,
-};
-
-static int s3c2440_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2440_cpufreq_interface);
-}
-
-/* arch_initcall adds the clocks we need, so use subsys_initcall. */
-subsys_initcall(s3c2440_cpufreq_init);
-
-static struct subsys_interface s3c2442_cpufreq_interface = {
- .name = "s3c2442_cpufreq",
- .subsys = &s3c2442_subsys,
- .add_dev = s3c2440_cpufreq_add,
-};
-
-static int s3c2442_cpufreq_init(void)
-{
- return subsys_interface_register(&s3c2442_cpufreq_interface);
-}
-subsys_initcall(s3c2442_cpufreq_init);
diff --git a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c b/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
deleted file mode 100644
index 93971dfe7c75..000000000000
--- a/drivers/cpufreq/s3c24xx-cpufreq-debugfs.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2009 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX CPU Frequency scaling - debugfs status support
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/err.h>
-
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-
-static struct dentry *dbgfs_root;
-static struct dentry *dbgfs_file_io;
-static struct dentry *dbgfs_file_info;
-static struct dentry *dbgfs_file_board;
-
-#define print_ns(x) ((x) / 10), ((x) % 10)
-
-static void show_max(struct seq_file *seq, struct s3c_freq *f)
-{
- seq_printf(seq, "MAX: F=%lu, H=%lu, P=%lu, A=%lu\n",
- f->fclk, f->hclk, f->pclk, f->armclk);
-}
-
-static int board_show(struct seq_file *seq, void *p)
-{
- struct s3c_cpufreq_config *cfg;
- struct s3c_cpufreq_board *brd;
-
- cfg = s3c_cpufreq_getconfig();
- if (!cfg) {
- seq_printf(seq, "no configuration registered\n");
- return 0;
- }
-
- brd = cfg->board;
- if (!brd) {
- seq_printf(seq, "no board definition set?\n");
- return 0;
- }
-
- seq_printf(seq, "SDRAM refresh %u ns\n", brd->refresh);
- seq_printf(seq, "auto_io=%u\n", brd->auto_io);
- seq_printf(seq, "need_io=%u\n", brd->need_io);
-
- show_max(seq, &brd->max);
-
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(board);
-
-static int info_show(struct seq_file *seq, void *p)
-{
- struct s3c_cpufreq_config *cfg;
-
- cfg = s3c_cpufreq_getconfig();
- if (!cfg) {
- seq_printf(seq, "no configuration registered\n");
- return 0;
- }
-
- seq_printf(seq, " FCLK %ld Hz\n", cfg->freq.fclk);
- seq_printf(seq, " HCLK %ld Hz (%lu.%lu ns)\n",
- cfg->freq.hclk, print_ns(cfg->freq.hclk_tns));
- seq_printf(seq, " PCLK %ld Hz\n", cfg->freq.hclk);
- seq_printf(seq, "ARMCLK %ld Hz\n", cfg->freq.armclk);
- seq_printf(seq, "\n");
-
- show_max(seq, &cfg->max);
-
- seq_printf(seq, "Divisors: P=%d, H=%d, A=%d, dvs=%s\n",
- cfg->divs.h_divisor, cfg->divs.p_divisor,
- cfg->divs.arm_divisor, cfg->divs.dvs ? "on" : "off");
- seq_printf(seq, "\n");
-
- seq_printf(seq, "lock_pll=%u\n", cfg->lock_pll);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(info);
-
-static int io_show(struct seq_file *seq, void *p)
-{
- void (*show_bank)(struct seq_file *, struct s3c_cpufreq_config *, union s3c_iobank *);
- struct s3c_cpufreq_config *cfg;
- struct s3c_iotimings *iot;
- union s3c_iobank *iob;
- int bank;
-
- cfg = s3c_cpufreq_getconfig();
- if (!cfg) {
- seq_printf(seq, "no configuration registered\n");
- return 0;
- }
-
- show_bank = cfg->info->debug_io_show;
- if (!show_bank) {
- seq_printf(seq, "no code to show bank timing\n");
- return 0;
- }
-
- iot = s3c_cpufreq_getiotimings();
- if (!iot) {
- seq_printf(seq, "no io timings registered\n");
- return 0;
- }
-
- seq_printf(seq, "hclk period is %lu.%lu ns\n", print_ns(cfg->freq.hclk_tns));
-
- for (bank = 0; bank < MAX_BANKS; bank++) {
- iob = &iot->bank[bank];
-
- seq_printf(seq, "bank %d: ", bank);
-
- if (!iob->io_2410) {
- seq_printf(seq, "nothing set\n");
- continue;
- }
-
- show_bank(seq, cfg, iob);
- }
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(io);
-
-static int __init s3c_freq_debugfs_init(void)
-{
- dbgfs_root = debugfs_create_dir("s3c-cpufreq", NULL);
- if (IS_ERR(dbgfs_root)) {
- pr_err("%s: error creating debugfs root\n", __func__);
- return PTR_ERR(dbgfs_root);
- }
-
- dbgfs_file_io = debugfs_create_file("io-timing", S_IRUGO, dbgfs_root,
- NULL, &io_fops);
-
- dbgfs_file_info = debugfs_create_file("info", S_IRUGO, dbgfs_root,
- NULL, &info_fops);
-
- dbgfs_file_board = debugfs_create_file("board", S_IRUGO, dbgfs_root,
- NULL, &board_fops);
-
- return 0;
-}
-
-late_initcall(s3c_freq_debugfs_init);
-
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
deleted file mode 100644
index 7380c32b238e..000000000000
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ /dev/null
@@ -1,648 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2006-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX CPU Frequency scaling
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/cpufreq.h>
-#include <linux/cpu.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/device.h>
-#include <linux/sysfs.h>
-#include <linux/slab.h>
-#include <linux/soc/samsung/s3c-cpufreq-core.h>
-#include <linux/soc/samsung/s3c-pm.h>
-
-#include <asm/mach/arch.h>
-#include <asm/mach/map.h>
-
-/* note, cpufreq support deals in kHz, no Hz */
-static struct cpufreq_driver s3c24xx_driver;
-static struct s3c_cpufreq_config cpu_cur;
-static struct s3c_iotimings s3c24xx_iotiming;
-static struct cpufreq_frequency_table *pll_reg;
-static unsigned int last_target = ~0;
-static unsigned int ftab_size;
-static struct cpufreq_frequency_table *ftab;
-
-static struct clk *_clk_mpll;
-static struct clk *_clk_xtal;
-static struct clk *clk_fclk;
-static struct clk *clk_hclk;
-static struct clk *clk_pclk;
-static struct clk *clk_arm;
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS
-struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void)
-{
- return &cpu_cur;
-}
-
-struct s3c_iotimings *s3c_cpufreq_getiotimings(void)
-{
- return &s3c24xx_iotiming;
-}
-#endif /* CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS */
-
-static void s3c_cpufreq_getcur(struct s3c_cpufreq_config *cfg)
-{
- unsigned long fclk, pclk, hclk, armclk;
-
- cfg->freq.fclk = fclk = clk_get_rate(clk_fclk);
- cfg->freq.hclk = hclk = clk_get_rate(clk_hclk);
- cfg->freq.pclk = pclk = clk_get_rate(clk_pclk);
- cfg->freq.armclk = armclk = clk_get_rate(clk_arm);
-
- cfg->pll.driver_data = s3c24xx_read_mpllcon();
- cfg->pll.frequency = fclk;
-
- cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
-
- cfg->divs.h_divisor = fclk / hclk;
- cfg->divs.p_divisor = fclk / pclk;
-}
-
-static inline void s3c_cpufreq_calc(struct s3c_cpufreq_config *cfg)
-{
- unsigned long pll = cfg->pll.frequency;
-
- cfg->freq.fclk = pll;
- cfg->freq.hclk = pll / cfg->divs.h_divisor;
- cfg->freq.pclk = pll / cfg->divs.p_divisor;
-
- /* convert hclk into 10ths of nanoseconds for io calcs */
- cfg->freq.hclk_tns = 1000000000 / (cfg->freq.hclk / 10);
-}
-
-static inline int closer(unsigned int target, unsigned int n, unsigned int c)
-{
- int diff_cur = abs(target - c);
- int diff_new = abs(target - n);
-
- return (diff_new < diff_cur);
-}
-
-static void s3c_cpufreq_show(const char *pfx,
- struct s3c_cpufreq_config *cfg)
-{
- s3c_freq_dbg("%s: Fvco=%u, F=%lu, A=%lu, H=%lu (%u), P=%lu (%u)\n",
- pfx, cfg->pll.frequency, cfg->freq.fclk, cfg->freq.armclk,
- cfg->freq.hclk, cfg->divs.h_divisor,
- cfg->freq.pclk, cfg->divs.p_divisor);
-}
-
-/* functions to wrapper the driver info calls to do the cpu specific work */
-
-static void s3c_cpufreq_setio(struct s3c_cpufreq_config *cfg)
-{
- if (cfg->info->set_iotiming)
- (cfg->info->set_iotiming)(cfg, &s3c24xx_iotiming);
-}
-
-static int s3c_cpufreq_calcio(struct s3c_cpufreq_config *cfg)
-{
- if (cfg->info->calc_iotiming)
- return (cfg->info->calc_iotiming)(cfg, &s3c24xx_iotiming);
-
- return 0;
-}
-
-static void s3c_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg)
-{
- (cfg->info->set_refresh)(cfg);
-}
-
-static void s3c_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
-{
- (cfg->info->set_divs)(cfg);
-}
-
-static int s3c_cpufreq_calcdivs(struct s3c_cpufreq_config *cfg)
-{
- return (cfg->info->calc_divs)(cfg);
-}
-
-static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
-{
- cfg->mpll = _clk_mpll;
- (cfg->info->set_fvco)(cfg);
-}
-
-static inline void s3c_cpufreq_updateclk(struct clk *clk,
- unsigned int freq)
-{
- clk_set_rate(clk, freq);
-}
-
-static int s3c_cpufreq_settarget(struct cpufreq_policy *policy,
- unsigned int target_freq,
- struct cpufreq_frequency_table *pll)
-{
- struct s3c_cpufreq_freqs freqs;
- struct s3c_cpufreq_config cpu_new;
- unsigned long flags;
-
- cpu_new = cpu_cur; /* copy new from current */
-
- s3c_cpufreq_show("cur", &cpu_cur);
-
- /* TODO - check for DMA currently outstanding */
-
- cpu_new.pll = pll ? *pll : cpu_cur.pll;
-
- if (pll)
- freqs.pll_changing = 1;
-
- /* update our frequencies */
-
- cpu_new.freq.armclk = target_freq;
- cpu_new.freq.fclk = cpu_new.pll.frequency;
-
- if (s3c_cpufreq_calcdivs(&cpu_new) < 0) {
- pr_err("no divisors for %d\n", target_freq);
- goto err_notpossible;
- }
-
- s3c_freq_dbg("%s: got divs\n", __func__);
-
- s3c_cpufreq_calc(&cpu_new);
-
- s3c_freq_dbg("%s: calculated frequencies for new\n", __func__);
-
- if (cpu_new.freq.hclk != cpu_cur.freq.hclk) {
- if (s3c_cpufreq_calcio(&cpu_new) < 0) {
- pr_err("%s: no IO timings\n", __func__);
- goto err_notpossible;
- }
- }
-
- s3c_cpufreq_show("new", &cpu_new);
-
- /* setup our cpufreq parameters */
-
- freqs.old = cpu_cur.freq;
- freqs.new = cpu_new.freq;
-
- freqs.freqs.old = cpu_cur.freq.armclk / 1000;
- freqs.freqs.new = cpu_new.freq.armclk / 1000;
-
- /* update f/h/p clock settings before we issue the change
- * notification, so that drivers do not need to do anything
- * special if they want to recalculate on CPUFREQ_PRECHANGE. */
-
- s3c_cpufreq_updateclk(_clk_mpll, cpu_new.pll.frequency);
- s3c_cpufreq_updateclk(clk_fclk, cpu_new.freq.fclk);
- s3c_cpufreq_updateclk(clk_hclk, cpu_new.freq.hclk);
- s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk);
-
- /* start the frequency change */
- cpufreq_freq_transition_begin(policy, &freqs.freqs);
-
- /* If hclk is staying the same, then we do not need to
- * re-write the IO or the refresh timings whilst we are changing
- * speed. */
-
- local_irq_save(flags);
-
- /* is our memory clock slowing down? */
- if (cpu_new.freq.hclk < cpu_cur.freq.hclk) {
- s3c_cpufreq_setrefresh(&cpu_new);
- s3c_cpufreq_setio(&cpu_new);
- }
-
- if (cpu_new.freq.fclk == cpu_cur.freq.fclk) {
- /* not changing PLL, just set the divisors */
-
- s3c_cpufreq_setdivs(&cpu_new);
- } else {
- if (cpu_new.freq.fclk < cpu_cur.freq.fclk) {
- /* slow the cpu down, then set divisors */
-
- s3c_cpufreq_setfvco(&cpu_new);
- s3c_cpufreq_setdivs(&cpu_new);
- } else {
- /* set the divisors, then speed up */
-
- s3c_cpufreq_setdivs(&cpu_new);
- s3c_cpufreq_setfvco(&cpu_new);
- }
- }
-
- /* did our memory clock speed up */
- if (cpu_new.freq.hclk > cpu_cur.freq.hclk) {
- s3c_cpufreq_setrefresh(&cpu_new);
- s3c_cpufreq_setio(&cpu_new);
- }
-
- /* update our current settings */
- cpu_cur = cpu_new;
-
- local_irq_restore(flags);
-
- /* notify everyone we've done this */
- cpufreq_freq_transition_end(policy, &freqs.freqs, 0);
-
- s3c_freq_dbg("%s: finished\n", __func__);
- return 0;
-
- err_notpossible:
- pr_err("no compatible settings for %d\n", target_freq);
- return -EINVAL;
-}
-
-/* s3c_cpufreq_target
- *
- * called by the cpufreq core to adjust the frequency that the CPU
- * is currently running at.
- */
-
-static int s3c_cpufreq_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- struct cpufreq_frequency_table *pll;
- unsigned int index;
-
- /* avoid repeated calls which cause a needless amout of duplicated
- * logging output (and CPU time as the calculation process is
- * done) */
- if (target_freq == last_target)
- return 0;
-
- last_target = target_freq;
-
- s3c_freq_dbg("%s: policy %p, target %u, relation %u\n",
- __func__, policy, target_freq, relation);
-
- if (ftab) {
- index = cpufreq_frequency_table_target(policy, target_freq,
- relation);
-
- s3c_freq_dbg("%s: adjust %d to entry %d (%u)\n", __func__,
- target_freq, index, ftab[index].frequency);
- target_freq = ftab[index].frequency;
- }
-
- target_freq *= 1000; /* convert target to Hz */
-
- /* find the settings for our new frequency */
-
- if (!pll_reg || cpu_cur.lock_pll) {
- /* either we've not got any PLL values, or we've locked
- * to the current one. */
- pll = NULL;
- } else {
- struct cpufreq_policy tmp_policy;
-
- /* we keep the cpu pll table in Hz, to ensure we get an
- * accurate value for the PLL output. */
-
- tmp_policy.min = policy->min * 1000;
- tmp_policy.max = policy->max * 1000;
- tmp_policy.cpu = policy->cpu;
- tmp_policy.freq_table = pll_reg;
-
- /* cpufreq_frequency_table_target returns the index
- * of the table entry, not the value of
- * the table entry's index field. */
-
- index = cpufreq_frequency_table_target(&tmp_policy, target_freq,
- relation);
- pll = pll_reg + index;
-
- s3c_freq_dbg("%s: target %u => %u\n",
- __func__, target_freq, pll->frequency);
-
- target_freq = pll->frequency;
- }
-
- return s3c_cpufreq_settarget(policy, target_freq, pll);
-}
-
-struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name)
-{
- struct clk *clk;
-
- clk = clk_get(dev, name);
- if (IS_ERR(clk))
- pr_err("failed to get clock '%s'\n", name);
-
- return clk;
-}
-
-static int s3c_cpufreq_init(struct cpufreq_policy *policy)
-{
- policy->clk = clk_arm;
- policy->cpuinfo.transition_latency = cpu_cur.info->latency;
- policy->freq_table = ftab;
-
- return 0;
-}
-
-static int __init s3c_cpufreq_initclks(void)
-{
- _clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
- _clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
- clk_fclk = s3c_cpufreq_clk_get(NULL, "fclk");
- clk_hclk = s3c_cpufreq_clk_get(NULL, "hclk");
- clk_pclk = s3c_cpufreq_clk_get(NULL, "pclk");
- clk_arm = s3c_cpufreq_clk_get(NULL, "armclk");
-
- if (IS_ERR(clk_fclk) || IS_ERR(clk_hclk) || IS_ERR(clk_pclk) ||
- IS_ERR(_clk_mpll) || IS_ERR(clk_arm) || IS_ERR(_clk_xtal)) {
- pr_err("%s: could not get clock(s)\n", __func__);
- return -ENOENT;
- }
-
- pr_info("%s: clocks f=%lu,h=%lu,p=%lu,a=%lu\n",
- __func__,
- clk_get_rate(clk_fclk) / 1000,
- clk_get_rate(clk_hclk) / 1000,
- clk_get_rate(clk_pclk) / 1000,
- clk_get_rate(clk_arm) / 1000);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static struct cpufreq_frequency_table suspend_pll;
-static unsigned int suspend_freq;
-
-static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
-{
- suspend_pll.frequency = clk_get_rate(_clk_mpll);
- suspend_pll.driver_data = s3c24xx_read_mpllcon();
- suspend_freq = clk_get_rate(clk_arm);
-
- return 0;
-}
-
-static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
-{
- int ret;
-
- s3c_freq_dbg("%s: resuming with policy %p\n", __func__, policy);
-
- last_target = ~0; /* invalidate last_target setting */
-
- /* whilst we will be called later on, we try and re-set the
- * cpu frequencies as soon as possible so that we do not end
- * up resuming devices and then immediately having to re-set
- * a number of settings once these devices have restarted.
- *
- * as a note, it is expected devices are not used until they
- * have been un-suspended and at that time they should have
- * used the updated clock settings.
- */
-
- ret = s3c_cpufreq_settarget(NULL, suspend_freq, &suspend_pll);
- if (ret) {
- pr_err("%s: failed to reset pll/freq\n", __func__);
- return ret;
- }
-
- return 0;
-}
-#else
-#define s3c_cpufreq_resume NULL
-#define s3c_cpufreq_suspend NULL
-#endif
-
-static struct cpufreq_driver s3c24xx_driver = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
- .target = s3c_cpufreq_target,
- .get = cpufreq_generic_get,
- .init = s3c_cpufreq_init,
- .suspend = s3c_cpufreq_suspend,
- .resume = s3c_cpufreq_resume,
- .name = "s3c24xx",
-};
-
-
-int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
-{
- if (!info || !info->name) {
- pr_err("%s: failed to pass valid information\n", __func__);
- return -EINVAL;
- }
-
- pr_info("S3C24XX CPU Frequency driver, %s cpu support\n",
- info->name);
-
- /* check our driver info has valid data */
-
- BUG_ON(info->set_refresh == NULL);
- BUG_ON(info->set_divs == NULL);
- BUG_ON(info->calc_divs == NULL);
-
- /* info->set_fvco is optional, depending on whether there
- * is a need to set the clock code. */
-
- cpu_cur.info = info;
-
- /* Note, driver registering should probably update locktime */
-
- return 0;
-}
-
-int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
-{
- struct s3c_cpufreq_board *ours;
-
- if (!board) {
- pr_info("%s: no board data\n", __func__);
- return -EINVAL;
- }
-
- /* Copy the board information so that each board can make this
- * initdata. */
-
- ours = kzalloc(sizeof(*ours), GFP_KERNEL);
- if (!ours)
- return -ENOMEM;
-
- *ours = *board;
- cpu_cur.board = ours;
-
- return 0;
-}
-
-static int __init s3c_cpufreq_auto_io(void)
-{
- int ret;
-
- if (!cpu_cur.info->get_iotiming) {
- pr_err("%s: get_iotiming undefined\n", __func__);
- return -ENOENT;
- }
-
- pr_info("%s: working out IO settings\n", __func__);
-
- ret = (cpu_cur.info->get_iotiming)(&cpu_cur, &s3c24xx_iotiming);
- if (ret)
- pr_err("%s: failed to get timings\n", __func__);
-
- return ret;
-}
-
-/* if one or is zero, then return the other, otherwise return the min */
-#define do_min(_a, _b) ((_a) == 0 ? (_b) : (_b) == 0 ? (_a) : min(_a, _b))
-
-/**
- * s3c_cpufreq_freq_min - find the minimum settings for the given freq.
- * @dst: The destination structure
- * @a: One argument.
- * @b: The other argument.
- *
- * Create a minimum of each frequency entry in the 'struct s3c_freq',
- * unless the entry is zero when it is ignored and the non-zero argument
- * used.
- */
-static void s3c_cpufreq_freq_min(struct s3c_freq *dst,
- struct s3c_freq *a, struct s3c_freq *b)
-{
- dst->fclk = do_min(a->fclk, b->fclk);
- dst->hclk = do_min(a->hclk, b->hclk);
- dst->pclk = do_min(a->pclk, b->pclk);
- dst->armclk = do_min(a->armclk, b->armclk);
-}
-
-static inline u32 calc_locktime(u32 freq, u32 time_us)
-{
- u32 result;
-
- result = freq * time_us;
- result = DIV_ROUND_UP(result, 1000 * 1000);
-
- return result;
-}
-
-static void s3c_cpufreq_update_loctkime(void)
-{
- unsigned int bits = cpu_cur.info->locktime_bits;
- u32 rate = (u32)clk_get_rate(_clk_xtal);
- u32 val;
-
- if (bits == 0) {
- WARN_ON(1);
- return;
- }
-
- val = calc_locktime(rate, cpu_cur.info->locktime_u) << bits;
- val |= calc_locktime(rate, cpu_cur.info->locktime_m);
-
- pr_info("%s: new locktime is 0x%08x\n", __func__, val);
- s3c24xx_write_locktime(val);
-}
-
-static int s3c_cpufreq_build_freq(void)
-{
- int size, ret;
-
- kfree(ftab);
-
- size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
- size++;
-
- ftab = kcalloc(size, sizeof(*ftab), GFP_KERNEL);
- if (!ftab)
- return -ENOMEM;
-
- ftab_size = size;
-
- ret = cpu_cur.info->calc_freqtable(&cpu_cur, ftab, size);
- s3c_cpufreq_addfreq(ftab, ret, size, CPUFREQ_TABLE_END);
-
- return 0;
-}
-
-static int __init s3c_cpufreq_initcall(void)
-{
- int ret = 0;
-
- if (cpu_cur.info && cpu_cur.board) {
- ret = s3c_cpufreq_initclks();
- if (ret)
- goto out;
-
- /* get current settings */
- s3c_cpufreq_getcur(&cpu_cur);
- s3c_cpufreq_show("cur", &cpu_cur);
-
- if (cpu_cur.board->auto_io) {
- ret = s3c_cpufreq_auto_io();
- if (ret) {
- pr_err("%s: failed to get io timing\n",
- __func__);
- goto out;
- }
- }
-
- if (cpu_cur.board->need_io && !cpu_cur.info->set_iotiming) {
- pr_err("%s: no IO support registered\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- if (!cpu_cur.info->need_pll)
- cpu_cur.lock_pll = 1;
-
- s3c_cpufreq_update_loctkime();
-
- s3c_cpufreq_freq_min(&cpu_cur.max, &cpu_cur.board->max,
- &cpu_cur.info->max);
-
- if (cpu_cur.info->calc_freqtable)
- s3c_cpufreq_build_freq();
-
- ret = cpufreq_register_driver(&s3c24xx_driver);
- }
-
- out:
- return ret;
-}
-
-late_initcall(s3c_cpufreq_initcall);
-
-/**
- * s3c_plltab_register - register CPU PLL table.
- * @plls: The list of PLL entries.
- * @plls_no: The size of the PLL entries @plls.
- *
- * Register the given set of PLLs with the system.
- */
-int s3c_plltab_register(struct cpufreq_frequency_table *plls,
- unsigned int plls_no)
-{
- struct cpufreq_frequency_table *vals;
- unsigned int size;
-
- size = sizeof(*vals) * (plls_no + 1);
-
- vals = kzalloc(size, GFP_KERNEL);
- if (vals) {
- memcpy(vals, plls, size);
- pll_reg = vals;
-
- /* write a terminating entry, we don't store it in the
- * table that is stored in the kernel */
- vals += plls_no;
- vals->frequency = CPUFREQ_TABLE_END;
-
- pr_info("%d PLL entries\n", plls_no);
- } else
- pr_err("no memory for PLL tables\n");
-
- return vals ? 0 : -ENOMEM;
-}
diff --git a/drivers/cpufreq/sa1100-cpufreq.c b/drivers/cpufreq/sa1100-cpufreq.c
deleted file mode 100644
index 252b9fc26124..000000000000
--- a/drivers/cpufreq/sa1100-cpufreq.c
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * cpu-sa1100.c: clock scaling for the SA1100
- *
- * Copyright (C) 2000 2001, The Delft University of Technology
- *
- * Authors:
- * - Johan Pouwelse (J.A.Pouwelse@its.tudelft.nl): initial version
- * - Erik Mouw (J.A.K.Mouw@its.tudelft.nl):
- * - major rewrite for linux-2.3.99
- * - rewritten for the more generic power management scheme in
- * linux-2.4.5-rmk1
- *
- * This software has been developed while working on the LART
- * computing board (http://www.lartmaker.nl/), which is
- * sponsored by the Mobile Multi-media Communications
- * (http://www.mobimedia.org/) and Ubiquitous Communications
- * (http://www.ubicom.tudelft.nl/) projects.
- *
- * The authors can be reached at:
- *
- * Erik Mouw
- * Information and Communication Theory Group
- * Faculty of Information Technology and Systems
- * Delft University of Technology
- * P.O. Box 5031
- * 2600 GA Delft
- * The Netherlands
- *
- * Theory of operations
- * ====================
- *
- * Clock scaling can be used to lower the power consumption of the CPU
- * core. This will give you a somewhat longer running time.
- *
- * The SA-1100 has a single register to change the core clock speed:
- *
- * PPCR 0x90020014 PLL config
- *
- * However, the DRAM timings are closely related to the core clock
- * speed, so we need to change these, too. The used registers are:
- *
- * MDCNFG 0xA0000000 DRAM config
- * MDCAS0 0xA0000004 Access waveform
- * MDCAS1 0xA0000008 Access waveform
- * MDCAS2 0xA000000C Access waveform
- *
- * Care must be taken to change the DRAM parameters the correct way,
- * because otherwise the DRAM becomes unusable and the kernel will
- * crash.
- *
- * The simple solution to avoid a kernel crash is to put the actual
- * clock change in ROM and jump to that code from the kernel. The main
- * disadvantage is that the ROM has to be modified, which is not
- * possible on all SA-1100 platforms. Another disadvantage is that
- * jumping to ROM makes clock switching unnecessary complicated.
- *
- * The idea behind this driver is that the memory configuration can be
- * changed while running from DRAM (even with interrupts turned on!)
- * as long as all re-configuration steps yield a valid DRAM
- * configuration. The advantages are clear: it will run on all SA-1100
- * platforms, and the code is very simple.
- *
- * If you really want to understand what is going on in
- * sa1100_update_dram_timings(), you'll have to read sections 8.2,
- * 9.5.7.3, and 10.2 from the "Intel StrongARM SA-1100 Microprocessor
- * Developers Manual" (available for free from Intel).
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/io.h>
-
-#include <asm/cputype.h>
-
-#include <mach/generic.h>
-#include <mach/hardware.h>
-
-struct sa1100_dram_regs {
- int speed;
- u32 mdcnfg;
- u32 mdcas0;
- u32 mdcas1;
- u32 mdcas2;
-};
-
-
-static struct cpufreq_driver sa1100_driver;
-
-static struct sa1100_dram_regs sa1100_dram_settings[] = {
- /*speed, mdcnfg, mdcas0, mdcas1, mdcas2, clock freq */
- { 59000, 0x00dc88a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 59.0 MHz */
- { 73700, 0x011490a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 73.7 MHz */
- { 88500, 0x014e90a3, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 88.5 MHz */
- {103200, 0x01889923, 0xcccccccf, 0xfffffffc, 0xffffffff},/* 103.2 MHz */
- {118000, 0x01c29923, 0x9999998f, 0xfffffff9, 0xffffffff},/* 118.0 MHz */
- {132700, 0x01fb2123, 0x9999998f, 0xfffffff9, 0xffffffff},/* 132.7 MHz */
- {147500, 0x02352123, 0x3333330f, 0xfffffff3, 0xffffffff},/* 147.5 MHz */
- {162200, 0x026b29a3, 0x38e38e1f, 0xfff8e38e, 0xffffffff},/* 162.2 MHz */
- {176900, 0x02a329a3, 0x71c71c1f, 0xfff1c71c, 0xffffffff},/* 176.9 MHz */
- {191700, 0x02dd31a3, 0xe38e383f, 0xffe38e38, 0xffffffff},/* 191.7 MHz */
- {206400, 0x03153223, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 206.4 MHz */
- {221200, 0x034fba23, 0xc71c703f, 0xffc71c71, 0xffffffff},/* 221.2 MHz */
- {235900, 0x03853a23, 0xe1e1e07f, 0xe1e1e1e1, 0xffffffe1},/* 235.9 MHz */
- {250700, 0x03bf3aa3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 250.7 MHz */
- {265400, 0x03f7c2a3, 0xc3c3c07f, 0xc3c3c3c3, 0xffffffc3},/* 265.4 MHz */
- {280200, 0x0431c2a3, 0x878780ff, 0x87878787, 0xffffff87},/* 280.2 MHz */
- { 0, 0, 0, 0, 0 } /* last entry */
-};
-
-static void sa1100_update_dram_timings(int current_speed, int new_speed)
-{
- struct sa1100_dram_regs *settings = sa1100_dram_settings;
-
- /* find speed */
- while (settings->speed != 0) {
- if (new_speed == settings->speed)
- break;
-
- settings++;
- }
-
- if (settings->speed == 0) {
- panic("%s: couldn't find dram setting for speed %d\n",
- __func__, new_speed);
- }
-
- /* No risk, no fun: run with interrupts on! */
- if (new_speed > current_speed) {
- /* We're going FASTER, so first relax the memory
- * timings before changing the core frequency
- */
-
- /* Half the memory access clock */
- MDCNFG |= MDCNFG_CDB2;
-
- /* The order of these statements IS important, keep 8
- * pulses!!
- */
- MDCAS2 = settings->mdcas2;
- MDCAS1 = settings->mdcas1;
- MDCAS0 = settings->mdcas0;
- MDCNFG = settings->mdcnfg;
- } else {
- /* We're going SLOWER: first decrease the core
- * frequency and then tighten the memory settings.
- */
-
- /* Half the memory access clock */
- MDCNFG |= MDCNFG_CDB2;
-
- /* The order of these statements IS important, keep 8
- * pulses!!
- */
- MDCAS0 = settings->mdcas0;
- MDCAS1 = settings->mdcas1;
- MDCAS2 = settings->mdcas2;
- MDCNFG = settings->mdcnfg;
- }
-}
-
-static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr)
-{
- unsigned int cur = sa11x0_getspeed(0);
- unsigned int new_freq;
-
- new_freq = sa11x0_freq_table[ppcr].frequency;
-
- if (new_freq > cur)
- sa1100_update_dram_timings(cur, new_freq);
-
- PPCR = ppcr;
-
- if (new_freq < cur)
- sa1100_update_dram_timings(cur, new_freq);
-
- return 0;
-}
-
-static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
-{
- cpufreq_generic_init(policy, sa11x0_freq_table, 0);
- return 0;
-}
-
-static struct cpufreq_driver sa1100_driver __refdata = {
- .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK |
- CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING,
- .verify = cpufreq_generic_frequency_table_verify,
- .target_index = sa1100_target,
- .get = sa11x0_getspeed,
- .init = sa1100_cpu_init,
- .name = "sa1100",
-};
-
-static int __init sa1100_dram_init(void)
-{
- if (cpu_is_sa1100())
- return cpufreq_register_driver(&sa1100_driver);
- else
- return -ENODEV;
-}
-
-arch_initcall(sa1100_dram_init);
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 1a83c8678a63..bb7f591a8b05 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -344,14 +344,8 @@ static int __init sa1110_clk_init(void)
if (!name[0]) {
if (machine_is_assabet())
name = "TC59SM716-CL3";
- if (machine_is_pt_system3())
- name = "K4S641632D";
- if (machine_is_h3100())
- name = "KM416S4030CT";
if (machine_is_jornada720() || machine_is_h3600())
name = "K4S281632B-1H";
- if (machine_is_nanoengine())
- name = "MT48LC8M16A2TG-75";
}
sdram = sa1110_find_sdram(name);
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index 4596c3e323aa..5890e25d7f77 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -411,7 +411,8 @@ static int tegra194_cpufreq_set_target(struct cpufreq_policy *policy,
static struct cpufreq_driver tegra194_cpufreq_driver = {
.name = "tegra194",
- .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = tegra194_cpufreq_set_target,
.get = tegra194_get_speed,
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index ff71dd662880..cac5997dca50 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -74,6 +74,7 @@ endmenu
config HALTPOLL_CPUIDLE
tristate "Halt poll cpuidle driver"
depends on X86 && KVM_GUEST
+ select CPU_IDLE_GOV_HALTPOLL
default y
help
This option enables halt poll cpuidle driver, which allows to poll
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 747aa537389b..a1ee475d180d 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -24,6 +24,14 @@ config ARM_PSCI_CPUIDLE
It provides an idle driver that is capable of detecting and
managing idle states through the PSCI firmware interface.
+ The driver has limitations when used with PREEMPT_RT:
+ - If the idle states are described with the non-hierarchical layout,
+ all idle states are still available.
+
+ - If the idle states are described with the hierarchical layout,
+ only the idle states defined per CPU are available, but not the ones
+ being shared among a group of CPUs (aka cluster idle states).
+
config ARM_PSCI_CPUIDLE_DOMAIN
bool "PSCI CPU idle Domain"
depends on ARM_PSCI_CPUIDLE
@@ -102,6 +110,7 @@ config ARM_MVEBU_V7_CPUIDLE
config ARM_TEGRA_CPUIDLE
bool "CPU Idle Driver for NVIDIA Tegra SoCs"
depends on (ARCH_TEGRA || COMPILE_TEST) && !ARM64 && MMU
+ depends on ARCH_SUSPEND_POSSIBLE
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
select ARM_CPU_SUSPEND
help
@@ -110,6 +119,7 @@ config ARM_TEGRA_CPUIDLE
config ARM_QCOM_SPM_CPUIDLE
bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
+ depends on ARCH_SUSPEND_POSSIBLE
select ARM_CPU_SUSPEND
select CPU_IDLE_MULTIPLE_DRIVERS
select DT_IDLE_STATES
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 8c758920d699..7cfb980a357d 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -31,8 +31,8 @@
* Called from the CPUidle framework to program the device to the
* specified target state selected by the governor.
*/
-static int arm_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int arm_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
/*
* Pass idle state index to arm_cpuidle_suspend which in turn
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index abe51185f243..74972deda0ea 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -64,7 +64,8 @@ static struct cpuidle_driver bl_idle_little_driver = {
.enter = bl_enter_powerdown,
.exit_latency = 700,
.target_residency = 2500,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C1",
.desc = "ARM little-cluster power down",
},
@@ -85,7 +86,8 @@ static struct cpuidle_driver bl_idle_big_driver = {
.enter = bl_enter_powerdown,
.exit_latency = 500,
.target_residency = 2000,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C1",
.desc = "ARM big-cluster power down",
},
@@ -120,15 +122,17 @@ static int notrace bl_powerdown_finisher(unsigned long arg)
* Called from the CPUidle framework to program the device to the
* specified target state selected by the governor.
*/
-static int bl_enter_powerdown(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int bl_enter_powerdown(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
cpu_pm_enter();
+ ct_cpuidle_enter();
cpu_suspend(0, bl_powerdown_finisher);
/* signals the MCPM core that CPU is out of low power state */
mcpm_cpu_powered_up();
+ ct_cpuidle_exit();
cpu_pm_exit();
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index 3a39a7f48b77..e66df22f9695 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -32,7 +32,7 @@ static int default_enter_idle(struct cpuidle_device *dev,
local_irq_enable();
return index;
}
- default_idle();
+ arch_cpu_idle();
return index;
}
diff --git a/drivers/cpuidle/cpuidle-mvebu-v7.c b/drivers/cpuidle/cpuidle-mvebu-v7.c
index 01a856971f05..563dba609b98 100644
--- a/drivers/cpuidle/cpuidle-mvebu-v7.c
+++ b/drivers/cpuidle/cpuidle-mvebu-v7.c
@@ -25,9 +25,9 @@
static int (*mvebu_v7_cpu_suspend)(int);
-static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle int mvebu_v7_enter_idle(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
int ret;
bool deepidle = false;
@@ -36,7 +36,10 @@ static int mvebu_v7_enter_idle(struct cpuidle_device *dev,
if (drv->states[index].flags & MVEBU_V7_FLAG_DEEP_IDLE)
deepidle = true;
+ ct_cpuidle_enter();
ret = mvebu_v7_cpu_suspend(deepidle);
+ ct_cpuidle_exit();
+
cpu_pm_exit();
if (ret)
@@ -53,6 +56,7 @@ static struct cpuidle_driver armadaxp_idle_driver = {
.exit_latency = 100,
.power_usage = 50,
.target_residency = 1000,
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.name = "MV CPU IDLE",
.desc = "CPU power down",
},
@@ -61,7 +65,7 @@ static struct cpuidle_driver armadaxp_idle_driver = {
.exit_latency = 1000,
.power_usage = 5,
.target_residency = 10000,
- .flags = MVEBU_V7_FLAG_DEEP_IDLE,
+ .flags = MVEBU_V7_FLAG_DEEP_IDLE | CPUIDLE_FLAG_RCU_IDLE,
.name = "MV CPU DEEP IDLE",
.desc = "CPU and L2 Fabric power down",
},
@@ -76,7 +80,7 @@ static struct cpuidle_driver armada370_idle_driver = {
.exit_latency = 100,
.power_usage = 5,
.target_residency = 1000,
- .flags = MVEBU_V7_FLAG_DEEP_IDLE,
+ .flags = MVEBU_V7_FLAG_DEEP_IDLE | CPUIDLE_FLAG_RCU_IDLE,
.name = "Deep Idle",
.desc = "CPU and L2 Fabric power down",
},
@@ -91,6 +95,7 @@ static struct cpuidle_driver armada38x_idle_driver = {
.exit_latency = 10,
.power_usage = 5,
.target_residency = 100,
+ .flags = CPUIDLE_FLAG_RCU_IDLE,
.name = "Idle",
.desc = "CPU and SCU power down",
},
diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
index c80cf9ddabd8..6ad2954948a5 100644
--- a/drivers/cpuidle/cpuidle-psci-domain.c
+++ b/drivers/cpuidle/cpuidle-psci-domain.c
@@ -64,8 +64,11 @@ static int psci_pd_init(struct device_node *np, bool use_osi)
pd->flags |= GENPD_FLAG_IRQ_SAFE | GENPD_FLAG_CPU_DOMAIN;
- /* Allow power off when OSI has been successfully enabled. */
- if (use_osi)
+ /*
+ * Allow power off when OSI has been successfully enabled.
+ * PREEMPT_RT is not yet ready to enter domain idle states.
+ */
+ if (use_osi && !IS_ENABLED(CONFIG_PREEMPT_RT))
pd->power_off = psci_pd_power_off;
else
pd->flags |= GENPD_FLAG_ALWAYS_ON;
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index 57bc3e3ae391..6de027f9f6f5 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -49,14 +49,9 @@ static inline u32 psci_get_domain_state(void)
return __this_cpu_read(domain_state);
}
-static inline int psci_enter_state(int idx, u32 state)
-{
- return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, idx, state);
-}
-
-static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx,
- bool s2idle)
+static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx,
+ bool s2idle)
{
struct psci_cpuidle_data *data = this_cpu_ptr(&psci_cpuidle_data);
u32 *states = data->psci_states;
@@ -69,12 +64,10 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
pm_runtime_put_sync_suspend(pd_dev);
- ct_irq_exit_irqson();
state = psci_get_domain_state();
if (!state)
@@ -82,12 +75,10 @@ static int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
ret = psci_cpu_suspend_enter(state) ? -1 : idx;
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_resume(pd_dev);
else
pm_runtime_get_sync(pd_dev);
- ct_irq_exit_irqson();
cpu_pm_exit();
@@ -192,12 +183,12 @@ static void psci_idle_init_cpuhp(void)
pr_warn("Failed %d while setup cpuhp state\n", err);
}
-static int psci_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int psci_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
u32 *state = __this_cpu_read(psci_cpuidle_data.psci_states);
- return psci_enter_state(idx, state[idx]);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM_RCU(psci_cpu_suspend_enter, idx, state[idx]);
}
static const struct of_device_id psci_idle_state_match[] = {
@@ -231,6 +222,9 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
if (!psci_has_osi_support())
return 0;
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ return 0;
+
data->dev = psci_dt_attach_cpu(cpu);
if (IS_ERR_OR_NULL(data->dev))
return PTR_ERR_OR_ZERO(data->dev);
@@ -240,6 +234,7 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv,
* of a shared state for the domain, assumes the domain states are all
* deeper states.
*/
+ drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
drv->states[state_count - 1].enter = psci_enter_domain_idle_state;
drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state;
psci_cpuidle_use_cpuhp = true;
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index beedf22cbe78..326bca154ac7 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -58,8 +58,8 @@ static int qcom_cpu_spc(struct spm_driver_data *drv)
return ret;
}
-static int spm_enter_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int spm_enter_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
struct cpuidle_qcom_spm_data *data = container_of(drv, struct cpuidle_qcom_spm_data,
cpuidle_driver);
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index 05fe2902df9a..be383f4b6855 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -93,8 +93,8 @@ static int sbi_suspend(u32 state)
return sbi_suspend_finisher(state, 0, 0);
}
-static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx)
+static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
{
u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
u32 state = states[idx];
@@ -106,9 +106,9 @@ static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
idx, state);
}
-static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
- struct cpuidle_driver *drv, int idx,
- bool s2idle)
+static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx,
+ bool s2idle)
{
struct sbi_cpuidle_data *data = this_cpu_ptr(&sbi_cpuidle_data);
u32 *states = data->states;
@@ -121,12 +121,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
- ct_irq_enter_irqson();
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
pm_runtime_put_sync_suspend(pd_dev);
- ct_irq_exit_irqson();
+
+ ct_cpuidle_enter();
if (sbi_is_domain_state_available())
state = sbi_get_domain_state();
@@ -135,12 +135,12 @@ static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
ret = sbi_suspend(state) ? -1 : idx;
- ct_irq_enter_irqson();
+ ct_cpuidle_exit();
+
if (s2idle)
dev_pm_genpd_resume(pd_dev);
else
pm_runtime_get_sync(pd_dev);
- ct_irq_exit_irqson();
cpu_pm_exit();
@@ -251,6 +251,7 @@ static int sbi_dt_cpu_init_topology(struct cpuidle_driver *drv,
* of a shared state for the domain, assumes the domain states are all
* deeper states.
*/
+ drv->states[state_count - 1].flags |= CPUIDLE_FLAG_RCU_IDLE;
drv->states[state_count - 1].enter = sbi_enter_domain_idle_state;
drv->states[state_count - 1].enter_s2idle =
sbi_enter_s2idle_domain_idle_state;
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index 9845629aeb6d..b203a93deac5 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -160,8 +160,8 @@ static int tegra_cpuidle_coupled_barrier(struct cpuidle_device *dev)
return 0;
}
-static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
- int index, unsigned int cpu)
+static __cpuidle int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
+ int index, unsigned int cpu)
{
int err;
@@ -180,9 +180,11 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
}
local_fiq_disable();
- RCU_NONIDLE(tegra_pm_set_cpu_in_lp2());
+ tegra_pm_set_cpu_in_lp2();
cpu_pm_enter();
+ ct_cpuidle_enter();
+
switch (index) {
case TEGRA_C7:
err = tegra_cpuidle_c7_enter();
@@ -197,8 +199,10 @@ static int tegra_cpuidle_state_enter(struct cpuidle_device *dev,
break;
}
+ ct_cpuidle_exit();
+
cpu_pm_exit();
- RCU_NONIDLE(tegra_pm_clear_cpu_in_lp2());
+ tegra_pm_clear_cpu_in_lp2();
local_fiq_enable();
return err ?: index;
@@ -222,10 +226,11 @@ static int tegra_cpuidle_adjust_state_index(int index, unsigned int cpu)
return index;
}
-static int tegra_cpuidle_enter(struct cpuidle_device *dev,
- struct cpuidle_driver *drv,
- int index)
+static __cpuidle int tegra_cpuidle_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
+ bool do_rcu = drv->states[index].flags & CPUIDLE_FLAG_RCU_IDLE;
unsigned int cpu = cpu_logical_map(dev->cpu);
int ret;
@@ -233,9 +238,13 @@ static int tegra_cpuidle_enter(struct cpuidle_device *dev,
if (dev->states_usage[index].disable)
return -1;
- if (index == TEGRA_C1)
+ if (index == TEGRA_C1) {
+ if (do_rcu)
+ ct_cpuidle_enter();
ret = arm_cpuidle_simple_enter(dev, drv, index);
- else
+ if (do_rcu)
+ ct_cpuidle_exit();
+ } else
ret = tegra_cpuidle_state_enter(dev, index, cpu);
if (ret < 0) {
@@ -285,7 +294,8 @@ static struct cpuidle_driver tegra_idle_driver = {
.exit_latency = 2000,
.target_residency = 2200,
.power_usage = 100,
- .flags = CPUIDLE_FLAG_TIMER_STOP,
+ .flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE,
.name = "C7",
.desc = "CPU core powered off",
},
@@ -295,6 +305,7 @@ static struct cpuidle_driver tegra_idle_driver = {
.target_residency = 10000,
.power_usage = 0,
.flags = CPUIDLE_FLAG_TIMER_STOP |
+ CPUIDLE_FLAG_RCU_IDLE |
CPUIDLE_FLAG_COUPLED,
.name = "CC6",
.desc = "CPU cluster powered off",
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 6eceb1988243..0b00f21cefe3 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/sched/idle.h>
#include <linux/notifier.h>
#include <linux/pm_qos.h>
#include <linux/cpu.h>
@@ -136,11 +137,13 @@ int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
}
#ifdef CONFIG_SUSPEND
-static void enter_s2idle_proper(struct cpuidle_driver *drv,
- struct cpuidle_device *dev, int index)
+static noinstr void enter_s2idle_proper(struct cpuidle_driver *drv,
+ struct cpuidle_device *dev, int index)
{
- ktime_t time_start, time_end;
struct cpuidle_state *target_state = &drv->states[index];
+ ktime_t time_start, time_end;
+
+ instrumentation_begin();
time_start = ns_to_ktime(local_clock());
@@ -151,13 +154,18 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
* suspended is generally unsafe.
*/
stop_critical_timings();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_enter();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ ct_cpuidle_enter();
+ /* Annotate away the indirect call */
+ instrumentation_begin();
+ }
target_state->enter_s2idle(dev, drv, index);
if (WARN_ON_ONCE(!irqs_disabled()))
- local_irq_disable();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_exit();
+ raw_local_irq_disable();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ instrumentation_end();
+ ct_cpuidle_exit();
+ }
tick_unfreeze();
start_critical_timings();
@@ -165,6 +173,7 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv,
dev->states_usage[index].s2idle_time += ktime_us_delta(time_end, time_start);
dev->states_usage[index].s2idle_usage++;
+ instrumentation_end();
}
/**
@@ -199,8 +208,9 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* @drv: cpuidle driver for this cpu
* @index: index into the states table in @drv of the state to enter
*/
-int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
- int index)
+noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
{
int entered_state;
@@ -208,6 +218,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
ktime_t time_start, time_end;
+ instrumentation_begin();
+
/*
* Tell the time framework to switch to a broadcast timer because our
* local timer will be shut down. If a local timer is used from another
@@ -234,11 +246,33 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_start = ns_to_ktime(local_clock());
stop_critical_timings();
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_enter();
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ ct_cpuidle_enter();
+ /* Annotate away the indirect call */
+ instrumentation_begin();
+ }
+
+ /*
+ * NOTE!!
+ *
+ * For cpuidle_state::enter() methods that do *NOT* set
+ * CPUIDLE_FLAG_RCU_IDLE RCU will be disabled here and these functions
+ * must be marked either noinstr or __cpuidle.
+ *
+ * For cpuidle_state::enter() methods that *DO* set
+ * CPUIDLE_FLAG_RCU_IDLE this isn't required, but they must mark the
+ * function calling ct_cpuidle_enter() as noinstr/__cpuidle and all
+ * functions called within the RCU-idle region.
+ */
entered_state = target_state->enter(dev, drv, index);
- if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
- ct_idle_exit();
+
+ if (WARN_ONCE(!irqs_disabled(), "%ps leaked IRQ state", target_state->enter))
+ raw_local_irq_disable();
+
+ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) {
+ instrumentation_end();
+ ct_cpuidle_exit();
+ }
start_critical_timings();
sched_clock_idle_wakeup_event();
@@ -248,12 +282,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
/* The cpu is no longer idle or about to enter idle. */
sched_idle_set_state(NULL);
- if (broadcast) {
- if (WARN_ON_ONCE(!irqs_disabled()))
- local_irq_disable();
-
+ if (broadcast)
tick_broadcast_exit();
- }
if (!cpuidle_state_is_coupled(drv, index))
local_irq_enable();
@@ -305,6 +335,8 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
dev->states_usage[index].rejected++;
}
+ instrumentation_end();
+
return entered_state;
}
@@ -394,7 +426,7 @@ void cpuidle_reflect(struct cpuidle_device *dev, int index)
* @dev: the cpuidle device
*
*/
-u64 cpuidle_poll_time(struct cpuidle_driver *drv,
+__cpuidle u64 cpuidle_poll_time(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
int i;
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c
index f70aa17e2a8e..d9cda7f6ccb9 100644
--- a/drivers/cpuidle/driver.c
+++ b/drivers/cpuidle/driver.c
@@ -183,11 +183,15 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
s->target_residency_ns = s->target_residency * NSEC_PER_USEC;
else if (s->target_residency_ns < 0)
s->target_residency_ns = 0;
+ else
+ s->target_residency = div_u64(s->target_residency_ns, NSEC_PER_USEC);
if (s->exit_latency > 0)
s->exit_latency_ns = s->exit_latency * NSEC_PER_USEC;
else if (s->exit_latency_ns < 0)
s->exit_latency_ns = 0;
+ else
+ s->exit_latency = div_u64(s->exit_latency_ns, NSEC_PER_USEC);
}
}
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
index 7ca3d7d9b5ea..02aa0b39af9d 100644
--- a/drivers/cpuidle/dt_idle_states.c
+++ b/drivers/cpuidle/dt_idle_states.c
@@ -77,7 +77,7 @@ static int init_state_node(struct cpuidle_state *idle_state,
if (err)
desc = state_node->name;
- idle_state->flags = 0;
+ idle_state->flags = CPUIDLE_FLAG_RCU_IDLE;
if (of_property_read_bool(state_node, "local-timer-stop"))
idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
/*
diff --git a/drivers/cpuidle/governors/teo.c b/drivers/cpuidle/governors/teo.c
index d9262db79cae..987fc5f3997d 100644
--- a/drivers/cpuidle/governors/teo.c
+++ b/drivers/cpuidle/governors/teo.c
@@ -2,8 +2,13 @@
/*
* Timer events oriented CPU idle governor
*
+ * TEO governor:
* Copyright (C) 2018 - 2021 Intel Corporation
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ *
+ * Util-awareness mechanism:
+ * Copyright (C) 2022 Arm Ltd.
+ * Author: Kajetan Puchalski <kajetan.puchalski@arm.com>
*/
/**
@@ -99,15 +104,56 @@
* select the given idle state instead of the candidate one.
*
* 3. By default, select the candidate state.
+ *
+ * Util-awareness mechanism:
+ *
+ * The idea behind the util-awareness extension is that there are two distinct
+ * scenarios for the CPU which should result in two different approaches to idle
+ * state selection - utilized and not utilized.
+ *
+ * In this case, 'utilized' means that the average runqueue util of the CPU is
+ * above a certain threshold.
+ *
+ * When the CPU is utilized while going into idle, more likely than not it will
+ * be woken up to do more work soon and so a shallower idle state should be
+ * selected to minimise latency and maximise performance. When the CPU is not
+ * being utilized, the usual metrics-based approach to selecting the deepest
+ * available idle state should be preferred to take advantage of the power
+ * saving.
+ *
+ * In order to achieve this, the governor uses a utilization threshold.
+ * The threshold is computed per-CPU as a percentage of the CPU's capacity
+ * by bit shifting the capacity value. Based on testing, the shift of 6 (~1.56%)
+ * seems to be getting the best results.
+ *
+ * Before selecting the next idle state, the governor compares the current CPU
+ * util to the precomputed util threshold. If it's below, it defaults to the
+ * TEO metrics mechanism. If it's above, the closest shallower idle state will
+ * be selected instead, as long as is not a polling state.
*/
#include <linux/cpuidle.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/sched/clock.h>
+#include <linux/sched/topology.h>
#include <linux/tick.h>
/*
+ * The number of bits to shift the CPU's capacity by in order to determine
+ * the utilized threshold.
+ *
+ * 6 was chosen based on testing as the number that achieved the best balance
+ * of power and performance on average.
+ *
+ * The resulting threshold is high enough to not be triggered by background
+ * noise and low enough to react quickly when activity starts to ramp up.
+ */
+#define UTIL_THRESHOLD_SHIFT 6
+
+
+/*
* The PULSE value is added to metrics when they grow and the DECAY_SHIFT value
* is used for decreasing metrics on a regular basis.
*/
@@ -137,9 +183,11 @@ struct teo_bin {
* @time_span_ns: Time between idle state selection and post-wakeup update.
* @sleep_length_ns: Time till the closest timer event (at the selection time).
* @state_bins: Idle state data bins for this CPU.
- * @total: Grand total of the "intercepts" and "hits" mertics for all bins.
+ * @total: Grand total of the "intercepts" and "hits" metrics for all bins.
* @next_recent_idx: Index of the next @recent_idx entry to update.
* @recent_idx: Indices of bins corresponding to recent "intercepts".
+ * @util_threshold: Threshold above which the CPU is considered utilized
+ * @utilized: Whether the last sleep on the CPU happened while utilized
*/
struct teo_cpu {
s64 time_span_ns;
@@ -148,11 +196,30 @@ struct teo_cpu {
unsigned int total;
int next_recent_idx;
int recent_idx[NR_RECENT];
+ unsigned long util_threshold;
+ bool utilized;
};
static DEFINE_PER_CPU(struct teo_cpu, teo_cpus);
/**
+ * teo_cpu_is_utilized - Check if the CPU's util is above the threshold
+ * @cpu: Target CPU
+ * @cpu_data: Governor CPU data for the target CPU
+ */
+#ifdef CONFIG_SMP
+static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data)
+{
+ return sched_cpu_util(cpu) > cpu_data->util_threshold;
+}
+#else
+static bool teo_cpu_is_utilized(int cpu, struct teo_cpu *cpu_data)
+{
+ return false;
+}
+#endif
+
+/**
* teo_update - Update CPU metrics after wakeup.
* @drv: cpuidle driver containing state data.
* @dev: Target CPU.
@@ -258,15 +325,17 @@ static s64 teo_middle_of_bin(int idx, struct cpuidle_driver *drv)
* @dev: Target CPU.
* @state_idx: Index of the capping idle state.
* @duration_ns: Idle duration value to match.
+ * @no_poll: Don't consider polling states.
*/
static int teo_find_shallower_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev, int state_idx,
- s64 duration_ns)
+ s64 duration_ns, bool no_poll)
{
int i;
for (i = state_idx - 1; i >= 0; i--) {
- if (dev->states_usage[i].disable)
+ if (dev->states_usage[i].disable ||
+ (no_poll && drv->states[i].flags & CPUIDLE_FLAG_POLLING))
continue;
state_idx = i;
@@ -321,6 +390,22 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
goto end;
}
+ cpu_data->utilized = teo_cpu_is_utilized(dev->cpu, cpu_data);
+ /*
+ * If the CPU is being utilized over the threshold and there are only 2
+ * states to choose from, the metrics need not be considered, so choose
+ * the shallowest non-polling state and exit.
+ */
+ if (drv->state_count < 3 && cpu_data->utilized) {
+ for (i = 0; i < drv->state_count; ++i) {
+ if (!dev->states_usage[i].disable &&
+ !(drv->states[i].flags & CPUIDLE_FLAG_POLLING)) {
+ idx = i;
+ goto end;
+ }
+ }
+ }
+
/*
* Find the deepest idle state whose target residency does not exceed
* the current sleep length and the deepest idle state not deeper than
@@ -452,6 +537,13 @@ static int teo_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
if (idx > constraint_idx)
idx = constraint_idx;
+ /*
+ * If the CPU is being utilized over the threshold, choose a shallower
+ * non-polling state to improve latency
+ */
+ if (cpu_data->utilized)
+ idx = teo_find_shallower_state(drv, dev, idx, duration_ns, true);
+
end:
/*
* Don't stop the tick if the selected state is a polling one or if the
@@ -469,7 +561,7 @@ end:
*/
if (idx > idx0 &&
drv->states[idx].target_residency_ns > delta_tick)
- idx = teo_find_shallower_state(drv, dev, idx, delta_tick);
+ idx = teo_find_shallower_state(drv, dev, idx, delta_tick, false);
}
return idx;
@@ -508,9 +600,11 @@ static int teo_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct teo_cpu *cpu_data = per_cpu_ptr(&teo_cpus, dev->cpu);
+ unsigned long max_capacity = arch_scale_cpu_capacity(dev->cpu);
int i;
memset(cpu_data, 0, sizeof(*cpu_data));
+ cpu_data->util_threshold = max_capacity >> UTIL_THRESHOLD_SHIFT;
for (i = 0; i < NR_RECENT; i++)
cpu_data->recent_idx[i] = -1;
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c
index f7e83613ae94..bdcfeaecd228 100644
--- a/drivers/cpuidle/poll_state.c
+++ b/drivers/cpuidle/poll_state.c
@@ -13,11 +13,13 @@
static int __cpuidle poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- u64 time_start = local_clock();
+ u64 time_start;
+
+ time_start = local_clock();
dev->poll_time_limit = false;
- local_irq_enable();
+ raw_local_irq_enable();
if (!current_set_polling_and_test()) {
unsigned int loop_count = 0;
u64 limit;
@@ -36,6 +38,8 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev,
}
}
}
+ raw_local_irq_disable();
+
current_clr_polling();
return index;
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 2b496a53cbca..48948b171749 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -200,7 +200,7 @@ static void cpuidle_sysfs_release(struct kobject *kobj)
complete(&kdev->kobj_unregister);
}
-static struct kobj_type ktype_cpuidle = {
+static const struct kobj_type ktype_cpuidle = {
.sysfs_ops = &cpuidle_sysfs_ops,
.release = cpuidle_sysfs_release,
};
@@ -447,7 +447,7 @@ static void cpuidle_state_sysfs_release(struct kobject *kobj)
complete(&state_obj->kobj_unregister);
}
-static struct kobj_type ktype_state_cpuidle = {
+static const struct kobj_type ktype_state_cpuidle = {
.sysfs_ops = &cpuidle_state_sysfs_ops,
.default_groups = cpuidle_state_default_groups,
.release = cpuidle_state_sysfs_release,
@@ -594,7 +594,7 @@ static struct attribute *cpuidle_driver_default_attrs[] = {
};
ATTRIBUTE_GROUPS(cpuidle_driver_default);
-static struct kobj_type ktype_driver_cpuidle = {
+static const struct kobj_type ktype_driver_cpuidle = {
.sysfs_ops = &cpuidle_driver_sysfs_ops,
.default_groups = cpuidle_driver_default_groups,
.release = cpuidle_driver_sysfs_release,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index dfb103f81a64..3b2516d1433f 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -390,16 +390,6 @@ if CRYPTO_DEV_NX
source "drivers/crypto/nx/Kconfig"
endif
-config CRYPTO_DEV_UX500
- tristate "Driver for ST-Ericsson UX500 crypto hardware acceleration"
- depends on ARCH_U8500
- help
- Driver for ST-Ericsson UX500 crypto engine.
-
-if CRYPTO_DEV_UX500
- source "drivers/crypto/ux500/Kconfig"
-endif # if CRYPTO_DEV_UX500
-
config CRYPTO_DEV_ATMEL_AUTHENC
bool "Support for Atmel IPSEC/SSL hw accelerator"
depends on ARCH_AT91 || COMPILE_TEST
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index fa8bf1be1a8c..476f1a25ca32 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
obj-$(CONFIG_CRYPTO_DEV_SL3516) += gemini/
obj-y += stm32/
obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
-obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
index 9f6594699835..a6865ff4d400 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
@@ -118,6 +118,7 @@ static const struct ce_variant ce_d1_variant = {
{ "bus", 0, 200000000 },
{ "mod", 300000000, 0 },
{ "ram", 0, 400000000 },
+ { "trng", 0, 0 },
},
.esr = ESR_D1,
.prng = CE_ALG_PRNG,
diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
index 8177aaba4434..27029fb77e29 100644
--- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
+++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce.h
@@ -105,7 +105,7 @@
#define MAX_SG 8
-#define CE_MAX_CLOCKS 3
+#define CE_MAX_CLOCKS 4
#define MAXFLOW 4
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
index 902f6be057ec..83c6dfad77e1 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
@@ -452,7 +452,7 @@ int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key,
}
kfree_sensitive(op->key);
op->keylen = keylen;
- op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ op->key = kmemdup(key, keylen, GFP_KERNEL);
if (!op->key)
return -ENOMEM;
@@ -475,7 +475,7 @@ int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
kfree_sensitive(op->key);
op->keylen = keylen;
- op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ op->key = kmemdup(key, keylen, GFP_KERNEL);
if (!op->key)
return -ENOMEM;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
index ac2329e2b0e5..c9dc06f97857 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -527,7 +528,7 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
init_completion(&ss->flows[i].complete);
ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL);
if (!ss->flows[i].biv) {
err = -ENOMEM;
goto error_engine;
@@ -535,7 +536,7 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
for (j = 0; j < MAX_SG; j++) {
ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL);
if (!ss->flows[i].iv[j]) {
err = -ENOMEM;
goto error_engine;
@@ -544,13 +545,15 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
/* the padding could be up to two block. */
ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE,
- GFP_KERNEL | GFP_DMA);
+ GFP_KERNEL);
if (!ss->flows[i].pad) {
err = -ENOMEM;
goto error_engine;
}
- ss->flows[i].result = devm_kmalloc(ss->dev, SHA256_DIGEST_SIZE,
- GFP_KERNEL | GFP_DMA);
+ ss->flows[i].result =
+ devm_kmalloc(ss->dev, max(SHA256_DIGEST_SIZE,
+ dma_get_cache_alignment()),
+ GFP_KERNEL);
if (!ss->flows[i].result) {
err = -ENOMEM;
goto error_engine;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
index 36a82b22953c..577bf636f7fb 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
@@ -79,10 +79,10 @@ int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
memcpy(tfmctx->key, key, keylen);
}
- tfmctx->ipad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
+ tfmctx->ipad = kzalloc(bs, GFP_KERNEL);
if (!tfmctx->ipad)
return -ENOMEM;
- tfmctx->opad = kzalloc(bs, GFP_KERNEL | GFP_DMA);
+ tfmctx->opad = kzalloc(bs, GFP_KERNEL);
if (!tfmctx->opad) {
ret = -ENOMEM;
goto err_opad;
diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
index dd677e9ed06f..70c7b5d571b8 100644
--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
+++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
@@ -11,6 +11,8 @@
*/
#include "sun8i-ss.h"
#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
#include <linux/pm_runtime.h>
#include <crypto/internal/rng.h>
@@ -25,7 +27,7 @@ int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed,
ctx->seed = NULL;
}
if (!ctx->seed)
- ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA);
+ ctx->seed = kmalloc(slen, GFP_KERNEL);
if (!ctx->seed)
return -ENOMEM;
@@ -58,6 +60,7 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm);
struct rng_alg *alg = crypto_rng_alg(tfm);
struct sun8i_ss_alg_template *algt;
+ unsigned int todo_with_padding;
struct sun8i_ss_dev *ss;
dma_addr_t dma_iv, dma_dst;
unsigned int todo;
@@ -81,7 +84,11 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
todo = dlen + PRNG_SEED_SIZE + PRNG_DATA_SIZE;
todo -= todo % PRNG_DATA_SIZE;
- d = kzalloc(todo, GFP_KERNEL | GFP_DMA);
+ todo_with_padding = ALIGN(todo, dma_get_cache_alignment());
+ if (todo_with_padding < todo || todo < dlen)
+ return -EOVERFLOW;
+
+ d = kzalloc(todo_with_padding, GFP_KERNEL);
if (!d)
return -ENOMEM;
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 280f4b0e7133..50dc783821b6 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -522,7 +522,6 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
{
struct skcipher_request *req;
struct scatterlist *dst;
- dma_addr_t addr;
req = skcipher_request_cast(pd_uinfo->async_req);
@@ -531,8 +530,8 @@ static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
req->cryptlen, req->dst);
} else {
dst = pd_uinfo->dest_va;
- addr = dma_map_page(dev->core_dev->device, sg_page(dst),
- dst->offset, dst->length, DMA_FROM_DEVICE);
+ dma_unmap_page(dev->core_dev->device, pd->dest, dst->length,
+ DMA_FROM_DEVICE);
}
if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
@@ -557,10 +556,9 @@ static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
struct ahash_request *ahash_req;
ahash_req = ahash_request_cast(pd_uinfo->async_req);
- ctx = crypto_tfm_ctx(ahash_req->base.tfm);
+ ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(ahash_req));
- crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
- crypto_tfm_ctx(ahash_req->base.tfm));
+ crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo, ctx);
crypto4xx_ret_sg_desc(dev, pd_uinfo);
if (pd_uinfo->state & PD_ENTRY_BUSY)
diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig
index ae2710ae8d8f..db6c5b4cdc40 100644
--- a/drivers/crypto/aspeed/Kconfig
+++ b/drivers/crypto/aspeed/Kconfig
@@ -46,3 +46,14 @@ config CRYPTO_DEV_ASPEED_HACE_CRYPTO
crypto driver.
Supports AES/DES symmetric-key encryption and decryption
with ECB/CBC/CFB/OFB/CTR options.
+
+config CRYPTO_DEV_ASPEED_ACRY
+ bool "Enable Aspeed ACRY RSA Engine"
+ depends on CRYPTO_DEV_ASPEED
+ select CRYPTO_ENGINE
+ select CRYPTO_RSA
+ help
+ Select here to enable Aspeed ECC/RSA Engine (ACRY)
+ RSA driver.
+ Supports 256 bits to 4096 bits RSA encryption/decryption
+ and signature/verification.
diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile
index a0ed40ddaad1..15862752c053 100644
--- a/drivers/crypto/aspeed/Makefile
+++ b/drivers/crypto/aspeed/Makefile
@@ -5,3 +5,7 @@ obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o
aspeed_crypto-objs := aspeed-hace.o \
$(hace-hash-y) \
$(hace-crypto-y)
+
+aspeed_acry-$(CONFIG_CRYPTO_DEV_ASPEED_ACRY) += aspeed-acry.o
+
+obj-$(CONFIG_CRYPTO_DEV_ASPEED) += $(aspeed_acry-y)
diff --git a/drivers/crypto/aspeed/aspeed-acry.c b/drivers/crypto/aspeed/aspeed-acry.c
new file mode 100644
index 000000000000..1f77ebd73489
--- /dev/null
+++ b/drivers/crypto/aspeed/aspeed-acry.c
@@ -0,0 +1,828 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 Aspeed Technology Inc.
+ */
+#include <crypto/akcipher.h>
+#include <crypto/algapi.h>
+#include <crypto/engine.h>
+#include <crypto/internal/akcipher.h>
+#include <crypto/internal/rsa.h>
+#include <crypto/scatterwalk.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/interrupt.h>
+#include <linux/count_zeros.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/regmap.h>
+
+#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
+#define ACRY_DBG(d, fmt, ...) \
+ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#else
+#define ACRY_DBG(d, fmt, ...) \
+ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+#endif
+
+/*****************************
+ * *
+ * ACRY register definitions *
+ * *
+ * ***************************/
+#define ASPEED_ACRY_TRIGGER 0x000 /* ACRY Engine Control: trigger */
+#define ASPEED_ACRY_DMA_CMD 0x048 /* ACRY Engine Control: Command */
+#define ASPEED_ACRY_DMA_SRC_BASE 0x04C /* ACRY DRAM base address for DMA */
+#define ASPEED_ACRY_DMA_LEN 0x050 /* ACRY Data Length of DMA */
+#define ASPEED_ACRY_RSA_KEY_LEN 0x058 /* ACRY RSA Exp/Mod Key Length (Bits) */
+#define ASPEED_ACRY_INT_MASK 0x3F8 /* ACRY Interrupt Mask */
+#define ASPEED_ACRY_STATUS 0x3FC /* ACRY Interrupt Status */
+
+/* rsa trigger */
+#define ACRY_CMD_RSA_TRIGGER BIT(0)
+#define ACRY_CMD_DMA_RSA_TRIGGER BIT(1)
+
+/* rsa dma cmd */
+#define ACRY_CMD_DMA_SRAM_MODE_RSA (0x3 << 4)
+#define ACRY_CMD_DMEM_AHB BIT(8)
+#define ACRY_CMD_DMA_SRAM_AHB_ENGINE 0
+
+/* rsa key len */
+#define RSA_E_BITS_LEN(x) ((x) << 16)
+#define RSA_M_BITS_LEN(x) (x)
+
+/* acry isr */
+#define ACRY_RSA_ISR BIT(1)
+
+#define ASPEED_ACRY_BUFF_SIZE 0x1800 /* DMA buffer size */
+#define ASPEED_ACRY_SRAM_MAX_LEN 2048 /* ACRY SRAM maximum length (Bytes) */
+#define ASPEED_ACRY_RSA_MAX_KEY_LEN 512 /* ACRY RSA maximum key length (Bytes) */
+
+#define CRYPTO_FLAGS_BUSY BIT(1)
+#define BYTES_PER_DWORD 4
+
+/*****************************
+ * *
+ * AHBC register definitions *
+ * *
+ * ***************************/
+#define AHBC_REGION_PROT 0x240
+#define REGION_ACRYM BIT(23)
+
+#define ast_acry_write(acry, val, offset) \
+ writel((val), (acry)->regs + (offset))
+
+#define ast_acry_read(acry, offset) \
+ readl((acry)->regs + (offset))
+
+struct aspeed_acry_dev;
+
+typedef int (*aspeed_acry_fn_t)(struct aspeed_acry_dev *);
+
+struct aspeed_acry_dev {
+ void __iomem *regs;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ struct regmap *ahbc;
+
+ struct akcipher_request *req;
+ struct tasklet_struct done_task;
+ aspeed_acry_fn_t resume;
+ unsigned long flags;
+
+ /* ACRY output SRAM buffer */
+ void __iomem *acry_sram;
+
+ /* ACRY input DMA buffer */
+ void *buf_addr;
+ dma_addr_t buf_dma_addr;
+
+ struct crypto_engine *crypt_engine_rsa;
+
+ /* ACRY SRAM memory mapped */
+ int exp_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN];
+ int mod_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN];
+ int data_byte_mapping[ASPEED_ACRY_SRAM_MAX_LEN];
+};
+
+struct aspeed_acry_ctx {
+ struct crypto_engine_ctx enginectx;
+ struct aspeed_acry_dev *acry_dev;
+
+ struct rsa_key key;
+ int enc;
+ u8 *n;
+ u8 *e;
+ u8 *d;
+ size_t n_sz;
+ size_t e_sz;
+ size_t d_sz;
+
+ aspeed_acry_fn_t trigger;
+
+ struct crypto_akcipher *fallback_tfm;
+};
+
+struct aspeed_acry_alg {
+ struct aspeed_acry_dev *acry_dev;
+ struct akcipher_alg akcipher;
+};
+
+enum aspeed_rsa_key_mode {
+ ASPEED_RSA_EXP_MODE = 0,
+ ASPEED_RSA_MOD_MODE,
+ ASPEED_RSA_DATA_MODE,
+};
+
+static inline struct akcipher_request *
+ akcipher_request_cast(struct crypto_async_request *req)
+{
+ return container_of(req, struct akcipher_request, base);
+}
+
+static int aspeed_acry_do_fallback(struct akcipher_request *req)
+{
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+ int err;
+
+ akcipher_request_set_tfm(req, ctx->fallback_tfm);
+
+ if (ctx->enc)
+ err = crypto_akcipher_encrypt(req);
+ else
+ err = crypto_akcipher_decrypt(req);
+
+ akcipher_request_set_tfm(req, cipher);
+
+ return err;
+}
+
+static bool aspeed_acry_need_fallback(struct akcipher_request *req)
+{
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+
+ return ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN;
+}
+
+static int aspeed_acry_handle_queue(struct aspeed_acry_dev *acry_dev,
+ struct akcipher_request *req)
+{
+ if (aspeed_acry_need_fallback(req)) {
+ ACRY_DBG(acry_dev, "SW fallback\n");
+ return aspeed_acry_do_fallback(req);
+ }
+
+ return crypto_transfer_akcipher_request_to_engine(acry_dev->crypt_engine_rsa, req);
+}
+
+static int aspeed_acry_do_request(struct crypto_engine *engine, void *areq)
+{
+ struct akcipher_request *req = akcipher_request_cast(areq);
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+ struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
+
+ acry_dev->req = req;
+ acry_dev->flags |= CRYPTO_FLAGS_BUSY;
+
+ return ctx->trigger(acry_dev);
+}
+
+static int aspeed_acry_complete(struct aspeed_acry_dev *acry_dev, int err)
+{
+ struct akcipher_request *req = acry_dev->req;
+
+ acry_dev->flags &= ~CRYPTO_FLAGS_BUSY;
+
+ crypto_finalize_akcipher_request(acry_dev->crypt_engine_rsa, req, err);
+
+ return err;
+}
+
+/*
+ * Copy Data to DMA buffer for engine used.
+ */
+static void aspeed_acry_rsa_sg_copy_to_buffer(struct aspeed_acry_dev *acry_dev,
+ u8 *buf, struct scatterlist *src,
+ size_t nbytes)
+{
+ static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN];
+ int i = 0, j;
+ int data_idx;
+
+ ACRY_DBG(acry_dev, "\n");
+
+ scatterwalk_map_and_copy(dram_buffer, src, 0, nbytes, 0);
+
+ for (j = nbytes - 1; j >= 0; j--) {
+ data_idx = acry_dev->data_byte_mapping[i];
+ buf[data_idx] = dram_buffer[j];
+ i++;
+ }
+
+ for (; i < ASPEED_ACRY_SRAM_MAX_LEN; i++) {
+ data_idx = acry_dev->data_byte_mapping[i];
+ buf[data_idx] = 0;
+ }
+}
+
+/*
+ * Copy Exp/Mod to DMA buffer for engine used.
+ *
+ * Params:
+ * - mode 0 : Exponential
+ * - mode 1 : Modulus
+ *
+ * Example:
+ * - DRAM memory layout:
+ * D[0], D[4], D[8], D[12]
+ * - ACRY SRAM memory layout should reverse the order of source data:
+ * D[12], D[8], D[4], D[0]
+ */
+static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf,
+ const void *xbuf, size_t nbytes,
+ enum aspeed_rsa_key_mode mode)
+{
+ const u8 *src = xbuf;
+ __le32 *dw_buf = buf;
+ int nbits, ndw;
+ int i, j, idx;
+ u32 data = 0;
+
+ ACRY_DBG(acry_dev, "nbytes:%zu, mode:%d\n", nbytes, mode);
+
+ if (nbytes > ASPEED_ACRY_RSA_MAX_KEY_LEN)
+ return -ENOMEM;
+
+ /* Remove the leading zeros */
+ while (nbytes > 0 && src[0] == 0) {
+ src++;
+ nbytes--;
+ }
+
+ nbits = nbytes * 8;
+ if (nbytes > 0)
+ nbits -= count_leading_zeros(src[0]) - (BITS_PER_LONG - 8);
+
+ /* double-world alignment */
+ ndw = DIV_ROUND_UP(nbytes, BYTES_PER_DWORD);
+
+ if (nbytes > 0) {
+ i = BYTES_PER_DWORD - nbytes % BYTES_PER_DWORD;
+ i %= BYTES_PER_DWORD;
+
+ for (j = ndw; j > 0; j--) {
+ for (; i < BYTES_PER_DWORD; i++) {
+ data <<= 8;
+ data |= *src++;
+ }
+
+ i = 0;
+
+ if (mode == ASPEED_RSA_EXP_MODE)
+ idx = acry_dev->exp_dw_mapping[j - 1];
+ else if (mode == ASPEED_RSA_MOD_MODE)
+ idx = acry_dev->mod_dw_mapping[j - 1];
+
+ dw_buf[idx] = cpu_to_le32(data);
+ }
+ }
+
+ return nbits;
+}
+
+static int aspeed_acry_rsa_transfer(struct aspeed_acry_dev *acry_dev)
+{
+ struct akcipher_request *req = acry_dev->req;
+ u8 __iomem *sram_buffer = acry_dev->acry_sram;
+ struct scatterlist *out_sg = req->dst;
+ static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN];
+ int leading_zero = 1;
+ int result_nbytes;
+ int i = 0, j;
+ int data_idx;
+
+ /* Set Data Memory to AHB(CPU) Access Mode */
+ ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD);
+
+ /* Disable ACRY SRAM protection */
+ regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT,
+ REGION_ACRYM, 0);
+
+ result_nbytes = ASPEED_ACRY_SRAM_MAX_LEN;
+
+ for (j = ASPEED_ACRY_SRAM_MAX_LEN - 1; j >= 0; j--) {
+ data_idx = acry_dev->data_byte_mapping[j];
+ if (readb(sram_buffer + data_idx) == 0 && leading_zero) {
+ result_nbytes--;
+ } else {
+ leading_zero = 0;
+ dram_buffer[i] = readb(sram_buffer + data_idx);
+ i++;
+ }
+ }
+
+ ACRY_DBG(acry_dev, "result_nbytes:%d, req->dst_len:%d\n",
+ result_nbytes, req->dst_len);
+
+ if (result_nbytes <= req->dst_len) {
+ scatterwalk_map_and_copy(dram_buffer, out_sg, 0, result_nbytes,
+ 1);
+ req->dst_len = result_nbytes;
+
+ } else {
+ dev_err(acry_dev->dev, "RSA engine error!\n");
+ }
+
+ memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
+
+ return aspeed_acry_complete(acry_dev, 0);
+}
+
+static int aspeed_acry_rsa_trigger(struct aspeed_acry_dev *acry_dev)
+{
+ struct akcipher_request *req = acry_dev->req;
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+ int ne, nm;
+
+ if (!ctx->n || !ctx->n_sz) {
+ dev_err(acry_dev->dev, "%s: key n is not set\n", __func__);
+ return -EINVAL;
+ }
+
+ memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
+
+ /* Copy source data to DMA buffer */
+ aspeed_acry_rsa_sg_copy_to_buffer(acry_dev, acry_dev->buf_addr,
+ req->src, req->src_len);
+
+ nm = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, ctx->n,
+ ctx->n_sz, ASPEED_RSA_MOD_MODE);
+ if (ctx->enc) {
+ if (!ctx->e || !ctx->e_sz) {
+ dev_err(acry_dev->dev, "%s: key e is not set\n",
+ __func__);
+ return -EINVAL;
+ }
+ /* Copy key e to DMA buffer */
+ ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr,
+ ctx->e, ctx->e_sz,
+ ASPEED_RSA_EXP_MODE);
+ } else {
+ if (!ctx->d || !ctx->d_sz) {
+ dev_err(acry_dev->dev, "%s: key d is not set\n",
+ __func__);
+ return -EINVAL;
+ }
+ /* Copy key d to DMA buffer */
+ ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr,
+ ctx->key.d, ctx->key.d_sz,
+ ASPEED_RSA_EXP_MODE);
+ }
+
+ ast_acry_write(acry_dev, acry_dev->buf_dma_addr,
+ ASPEED_ACRY_DMA_SRC_BASE);
+ ast_acry_write(acry_dev, (ne << 16) + nm,
+ ASPEED_ACRY_RSA_KEY_LEN);
+ ast_acry_write(acry_dev, ASPEED_ACRY_BUFF_SIZE,
+ ASPEED_ACRY_DMA_LEN);
+
+ acry_dev->resume = aspeed_acry_rsa_transfer;
+
+ /* Enable ACRY SRAM protection */
+ regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT,
+ REGION_ACRYM, REGION_ACRYM);
+
+ ast_acry_write(acry_dev, ACRY_RSA_ISR, ASPEED_ACRY_INT_MASK);
+ ast_acry_write(acry_dev, ACRY_CMD_DMA_SRAM_MODE_RSA |
+ ACRY_CMD_DMA_SRAM_AHB_ENGINE, ASPEED_ACRY_DMA_CMD);
+
+ /* Trigger RSA engines */
+ ast_acry_write(acry_dev, ACRY_CMD_RSA_TRIGGER |
+ ACRY_CMD_DMA_RSA_TRIGGER, ASPEED_ACRY_TRIGGER);
+
+ return 0;
+}
+
+static int aspeed_acry_rsa_enc(struct akcipher_request *req)
+{
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+ struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
+
+ ctx->trigger = aspeed_acry_rsa_trigger;
+ ctx->enc = 1;
+
+ return aspeed_acry_handle_queue(acry_dev, req);
+}
+
+static int aspeed_acry_rsa_dec(struct akcipher_request *req)
+{
+ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher);
+ struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
+
+ ctx->trigger = aspeed_acry_rsa_trigger;
+ ctx->enc = 0;
+
+ return aspeed_acry_handle_queue(acry_dev, req);
+}
+
+static u8 *aspeed_rsa_key_copy(u8 *src, size_t len)
+{
+ return kmemdup(src, len, GFP_KERNEL);
+}
+
+static int aspeed_rsa_set_n(struct aspeed_acry_ctx *ctx, u8 *value,
+ size_t len)
+{
+ ctx->n_sz = len;
+ ctx->n = aspeed_rsa_key_copy(value, len);
+ if (!ctx->n)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int aspeed_rsa_set_e(struct aspeed_acry_ctx *ctx, u8 *value,
+ size_t len)
+{
+ ctx->e_sz = len;
+ ctx->e = aspeed_rsa_key_copy(value, len);
+ if (!ctx->e)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int aspeed_rsa_set_d(struct aspeed_acry_ctx *ctx, u8 *value,
+ size_t len)
+{
+ ctx->d_sz = len;
+ ctx->d = aspeed_rsa_key_copy(value, len);
+ if (!ctx->d)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void aspeed_rsa_key_free(struct aspeed_acry_ctx *ctx)
+{
+ kfree_sensitive(ctx->n);
+ kfree_sensitive(ctx->e);
+ kfree_sensitive(ctx->d);
+ ctx->n_sz = 0;
+ ctx->e_sz = 0;
+ ctx->d_sz = 0;
+}
+
+static int aspeed_acry_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
+ unsigned int keylen, int priv)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct aspeed_acry_dev *acry_dev = ctx->acry_dev;
+ int ret;
+
+ if (priv)
+ ret = rsa_parse_priv_key(&ctx->key, key, keylen);
+ else
+ ret = rsa_parse_pub_key(&ctx->key, key, keylen);
+
+ if (ret) {
+ dev_err(acry_dev->dev, "rsa parse key failed, ret:0x%x\n",
+ ret);
+ return ret;
+ }
+
+ /* Aspeed engine supports up to 4096 bits,
+ * Use software fallback instead.
+ */
+ if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN)
+ return 0;
+
+ ret = aspeed_rsa_set_n(ctx, (u8 *)ctx->key.n, ctx->key.n_sz);
+ if (ret)
+ goto err;
+
+ ret = aspeed_rsa_set_e(ctx, (u8 *)ctx->key.e, ctx->key.e_sz);
+ if (ret)
+ goto err;
+
+ if (priv) {
+ ret = aspeed_rsa_set_d(ctx, (u8 *)ctx->key.d, ctx->key.d_sz);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dev_err(acry_dev->dev, "rsa set key failed\n");
+ aspeed_rsa_key_free(ctx);
+
+ return ret;
+}
+
+static int aspeed_acry_rsa_set_pub_key(struct crypto_akcipher *tfm,
+ const void *key,
+ unsigned int keylen)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ return aspeed_acry_rsa_setkey(tfm, key, keylen, 0);
+}
+
+static int aspeed_acry_rsa_set_priv_key(struct crypto_akcipher *tfm,
+ const void *key,
+ unsigned int keylen)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+ int ret;
+
+ ret = crypto_akcipher_set_priv_key(ctx->fallback_tfm, key, keylen);
+ if (ret)
+ return ret;
+
+ return aspeed_acry_rsa_setkey(tfm, key, keylen, 1);
+}
+
+static unsigned int aspeed_acry_rsa_max_size(struct crypto_akcipher *tfm)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN)
+ return crypto_akcipher_maxsize(ctx->fallback_tfm);
+
+ return ctx->n_sz;
+}
+
+static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ const char *name = crypto_tfm_alg_name(&tfm->base);
+ struct aspeed_acry_alg *acry_alg;
+
+ acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher);
+
+ ctx->acry_dev = acry_alg->acry_dev;
+
+ ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_tfm)) {
+ dev_err(ctx->acry_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
+ name, PTR_ERR(ctx->fallback_tfm));
+ return PTR_ERR(ctx->fallback_tfm);
+ }
+
+ ctx->enginectx.op.do_one_request = aspeed_acry_do_request;
+ ctx->enginectx.op.prepare_request = NULL;
+ ctx->enginectx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm)
+{
+ struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm);
+
+ crypto_free_akcipher(ctx->fallback_tfm);
+}
+
+static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = {
+ {
+ .akcipher = {
+ .encrypt = aspeed_acry_rsa_enc,
+ .decrypt = aspeed_acry_rsa_dec,
+ .sign = aspeed_acry_rsa_dec,
+ .verify = aspeed_acry_rsa_enc,
+ .set_pub_key = aspeed_acry_rsa_set_pub_key,
+ .set_priv_key = aspeed_acry_rsa_set_priv_key,
+ .max_size = aspeed_acry_rsa_max_size,
+ .init = aspeed_acry_rsa_init_tfm,
+ .exit = aspeed_acry_rsa_exit_tfm,
+ .base = {
+ .cra_name = "rsa",
+ .cra_driver_name = "aspeed-rsa",
+ .cra_priority = 300,
+ .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY |
+ CRYPTO_ALG_NEED_FALLBACK,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct aspeed_acry_ctx),
+ },
+ },
+ },
+};
+
+static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev)
+{
+ int i, rc;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) {
+ aspeed_acry_akcipher_algs[i].acry_dev = acry_dev;
+ rc = crypto_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
+ if (rc) {
+ ACRY_DBG(acry_dev, "Failed to register %s\n",
+ aspeed_acry_akcipher_algs[i].akcipher.base.cra_name);
+ }
+ }
+}
+
+static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++)
+ crypto_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher);
+}
+
+/* ACRY interrupt service routine. */
+static irqreturn_t aspeed_acry_irq(int irq, void *dev)
+{
+ struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)dev;
+ u32 sts;
+
+ sts = ast_acry_read(acry_dev, ASPEED_ACRY_STATUS);
+ ast_acry_write(acry_dev, sts, ASPEED_ACRY_STATUS);
+
+ ACRY_DBG(acry_dev, "irq sts:0x%x\n", sts);
+
+ if (sts & ACRY_RSA_ISR) {
+ /* Stop RSA engine */
+ ast_acry_write(acry_dev, 0, ASPEED_ACRY_TRIGGER);
+
+ if (acry_dev->flags & CRYPTO_FLAGS_BUSY)
+ tasklet_schedule(&acry_dev->done_task);
+ else
+ dev_err(acry_dev->dev, "RSA no active requests.\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * ACRY SRAM has its own memory layout.
+ * Set the DRAM to SRAM indexing for future used.
+ */
+static void aspeed_acry_sram_mapping(struct aspeed_acry_dev *acry_dev)
+{
+ int i, j = 0;
+
+ for (i = 0; i < (ASPEED_ACRY_SRAM_MAX_LEN / BYTES_PER_DWORD); i++) {
+ acry_dev->exp_dw_mapping[i] = j;
+ acry_dev->mod_dw_mapping[i] = j + 4;
+ acry_dev->data_byte_mapping[(i * 4)] = (j + 8) * 4;
+ acry_dev->data_byte_mapping[(i * 4) + 1] = (j + 8) * 4 + 1;
+ acry_dev->data_byte_mapping[(i * 4) + 2] = (j + 8) * 4 + 2;
+ acry_dev->data_byte_mapping[(i * 4) + 3] = (j + 8) * 4 + 3;
+ j++;
+ j = j % 4 ? j : j + 8;
+ }
+}
+
+static void aspeed_acry_done_task(unsigned long data)
+{
+ struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)data;
+
+ (void)acry_dev->resume(acry_dev);
+}
+
+static const struct of_device_id aspeed_acry_of_matches[] = {
+ { .compatible = "aspeed,ast2600-acry", },
+ {},
+};
+
+static int aspeed_acry_probe(struct platform_device *pdev)
+{
+ struct aspeed_acry_dev *acry_dev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int rc;
+
+ acry_dev = devm_kzalloc(dev, sizeof(struct aspeed_acry_dev),
+ GFP_KERNEL);
+ if (!acry_dev)
+ return -ENOMEM;
+
+ acry_dev->dev = dev;
+
+ platform_set_drvdata(pdev, acry_dev);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ acry_dev->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(acry_dev->regs))
+ return PTR_ERR(acry_dev->regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ acry_dev->acry_sram = devm_ioremap_resource(dev, res);
+ if (IS_ERR(acry_dev->acry_sram))
+ return PTR_ERR(acry_dev->acry_sram);
+
+ /* Get irq number and register it */
+ acry_dev->irq = platform_get_irq(pdev, 0);
+ if (acry_dev->irq < 0)
+ return -ENXIO;
+
+ rc = devm_request_irq(dev, acry_dev->irq, aspeed_acry_irq, 0,
+ dev_name(dev), acry_dev);
+ if (rc) {
+ dev_err(dev, "Failed to request irq.\n");
+ return rc;
+ }
+
+ acry_dev->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(acry_dev->clk)) {
+ dev_err(dev, "Failed to get acry clk\n");
+ return PTR_ERR(acry_dev->clk);
+ }
+
+ acry_dev->ahbc = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "aspeed,ahbc");
+ if (IS_ERR(acry_dev->ahbc)) {
+ dev_err(dev, "Failed to get AHBC regmap\n");
+ return -ENODEV;
+ }
+
+ /* Initialize crypto hardware engine structure for RSA */
+ acry_dev->crypt_engine_rsa = crypto_engine_alloc_init(dev, true);
+ if (!acry_dev->crypt_engine_rsa) {
+ rc = -ENOMEM;
+ goto clk_exit;
+ }
+
+ rc = crypto_engine_start(acry_dev->crypt_engine_rsa);
+ if (rc)
+ goto err_engine_rsa_start;
+
+ tasklet_init(&acry_dev->done_task, aspeed_acry_done_task,
+ (unsigned long)acry_dev);
+
+ /* Set Data Memory to AHB(CPU) Access Mode */
+ ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD);
+
+ /* Initialize ACRY SRAM index */
+ aspeed_acry_sram_mapping(acry_dev);
+
+ acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE,
+ &acry_dev->buf_dma_addr,
+ GFP_KERNEL);
+ memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE);
+
+ aspeed_acry_register(acry_dev);
+
+ dev_info(dev, "Aspeed ACRY Accelerator successfully registered\n");
+
+ return 0;
+
+err_engine_rsa_start:
+ crypto_engine_exit(acry_dev->crypt_engine_rsa);
+clk_exit:
+ clk_disable_unprepare(acry_dev->clk);
+
+ return rc;
+}
+
+static int aspeed_acry_remove(struct platform_device *pdev)
+{
+ struct aspeed_acry_dev *acry_dev = platform_get_drvdata(pdev);
+
+ aspeed_acry_unregister(acry_dev);
+ crypto_engine_exit(acry_dev->crypt_engine_rsa);
+ tasklet_kill(&acry_dev->done_task);
+ clk_disable_unprepare(acry_dev->clk);
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches);
+
+static struct platform_driver aspeed_acry_driver = {
+ .probe = aspeed_acry_probe,
+ .remove = aspeed_acry_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = aspeed_acry_of_matches,
+ },
+};
+
+module_platform_driver(aspeed_acry_driver);
+
+MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED ACRY driver for hardware RSA Engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
index 656cb92c8bb6..d2871e1de9c2 100644
--- a/drivers/crypto/aspeed/aspeed-hace.c
+++ b/drivers/crypto/aspeed/aspeed-hace.c
@@ -99,7 +99,6 @@ static int aspeed_hace_probe(struct platform_device *pdev)
const struct of_device_id *hace_dev_id;
struct aspeed_engine_hash *hash_engine;
struct aspeed_hace_dev *hace_dev;
- struct resource *res;
int rc;
hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev),
@@ -118,11 +117,9 @@ static int aspeed_hace_probe(struct platform_device *pdev)
hash_engine = &hace_dev->hash_engine;
crypto_engine = &hace_dev->crypto_engine;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
platform_set_drvdata(pdev, hace_dev);
- hace_dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ hace_dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(hace_dev->regs))
return PTR_ERR(hace_dev->regs);
diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
index f2cde23b56ae..05d0a15d546d 100644
--- a/drivers/crypto/aspeed/aspeed-hace.h
+++ b/drivers/crypto/aspeed/aspeed-hace.h
@@ -183,7 +183,7 @@ struct aspeed_sham_ctx {
struct aspeed_hace_dev *hace_dev;
unsigned long flags; /* hmac flag */
- struct aspeed_sha_hmac_ctx base[0];
+ struct aspeed_sha_hmac_ctx base[];
};
struct aspeed_sham_reqctx {
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 886bf258544c..ed10f2ae4523 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -554,7 +554,7 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
}
if (dd->is_async)
- dd->areq->complete(dd->areq, err);
+ crypto_request_complete(dd->areq, err);
tasklet_schedule(&dd->queue_task);
@@ -955,7 +955,7 @@ static int atmel_aes_handle_queue(struct atmel_aes_dev *dd,
return ret;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
ctx = crypto_tfm_ctx(areq->tfm);
@@ -1879,7 +1879,7 @@ static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err;
- err = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ err = xts_verify_key(tfm, key, keylen);
if (err)
return err;
@@ -2510,6 +2510,7 @@ static void atmel_aes_get_cap(struct atmel_aes_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
case 0x700:
+ case 0x600:
case 0x500:
dd->caps.has_dualbuff = 1;
dd->caps.has_cfb64 = 1;
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 12205e2b53b4..aac64b555204 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -313,11 +313,10 @@ static struct kpp_alg atmel_ecdh_nist_p256 = {
static int atmel_ecc_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct atmel_i2c_client_priv *i2c_priv;
int ret;
- ret = atmel_i2c_probe(client, id);
+ ret = atmel_i2c_probe(client);
if (ret)
return ret;
diff --git a/drivers/crypto/atmel-i2c.c b/drivers/crypto/atmel-i2c.c
index 55bff1e13142..83a9093eff25 100644
--- a/drivers/crypto/atmel-i2c.c
+++ b/drivers/crypto/atmel-i2c.c
@@ -59,7 +59,7 @@ void atmel_i2c_init_read_cmd(struct atmel_i2c_cmd *cmd)
* Read the word from Configuration zone that contains the lock bytes
* (UserExtra, Selector, LockValue, LockConfig).
*/
- cmd->param1 = CONFIG_ZONE;
+ cmd->param1 = CONFIGURATION_ZONE;
cmd->param2 = cpu_to_le16(DEVICE_LOCK_ADDR);
cmd->count = READ_COUNT;
@@ -324,7 +324,7 @@ free_cmd:
return ret;
}
-int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id)
+int atmel_i2c_probe(struct i2c_client *client)
{
struct atmel_i2c_client_priv *i2c_priv;
struct device *dev = &client->dev;
diff --git a/drivers/crypto/atmel-i2c.h b/drivers/crypto/atmel-i2c.h
index 35f7857a7f7c..c0bd429ee2c7 100644
--- a/drivers/crypto/atmel-i2c.h
+++ b/drivers/crypto/atmel-i2c.h
@@ -63,7 +63,7 @@ struct atmel_i2c_cmd {
#define STATUS_WAKE_SUCCESSFUL 0x11
/* Definitions for eeprom organization */
-#define CONFIG_ZONE 0
+#define CONFIGURATION_ZONE 0
/* Definitions for Indexes common to all commands */
#define RSP_DATA_IDX 1 /* buffer index of data in response */
@@ -167,7 +167,7 @@ struct atmel_i2c_work_data {
struct atmel_i2c_cmd cmd;
};
-int atmel_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id);
+int atmel_i2c_probe(struct i2c_client *client);
void atmel_i2c_enqueue(struct atmel_i2c_work_data *work_data,
void (*cbk)(struct atmel_i2c_work_data *work_data,
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index ca4b01926d1b..e7c1db2739ec 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -292,7 +292,7 @@ static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err)
clk_disable(dd->iclk);
if ((dd->is_async || dd->force_complete) && req->base.complete)
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
/* handle new request */
tasklet_schedule(&dd->queue_task);
@@ -1080,7 +1080,7 @@ static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
return ret;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
ctx = crypto_tfm_ctx(async_req->tfm);
@@ -2099,10 +2099,9 @@ struct atmel_sha_authenc_reqctx {
unsigned int digestlen;
};
-static void atmel_sha_authenc_complete(struct crypto_async_request *areq,
- int err)
+static void atmel_sha_authenc_complete(void *data, int err)
{
- struct ahash_request *req = areq->data;
+ struct ahash_request *req = data;
struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req);
authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async);
@@ -2509,6 +2508,7 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
/* keep only major version number */
switch (dd->hw_version & 0xff0) {
case 0x700:
+ case 0x600:
case 0x510:
dd->caps.has_dma = 1;
dd->caps.has_dualbuff = 1;
diff --git a/drivers/crypto/atmel-sha204a.c b/drivers/crypto/atmel-sha204a.c
index 272a06f0b588..4403dbb0f0b1 100644
--- a/drivers/crypto/atmel-sha204a.c
+++ b/drivers/crypto/atmel-sha204a.c
@@ -93,11 +93,10 @@ static int atmel_sha204a_rng_read(struct hwrng *rng, void *data, size_t max,
static int atmel_sha204a_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct atmel_i2c_client_priv *i2c_priv;
int ret;
- ret = atmel_i2c_probe(client, id);
+ ret = atmel_i2c_probe(client);
if (ret)
return ret;
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 8b7bc1076e0d..b2d48c1649b9 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -590,7 +590,7 @@ static void atmel_tdes_finish_req(struct atmel_tdes_dev *dd, int err)
if (!err && (rctx->mode & TDES_FLAGS_OPMODE_MASK) != TDES_FLAGS_ECB)
atmel_tdes_set_iv_as_last_ciphertext_block(dd);
- req->base.complete(&req->base, err);
+ skcipher_request_complete(req, err);
}
static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
@@ -619,7 +619,7 @@ static int atmel_tdes_handle_queue(struct atmel_tdes_dev *dd,
return ret;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
req = skcipher_request_cast(async_req);
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 51c66afbe677..8493a45e1bd4 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -1621,7 +1621,7 @@ artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key,
crypto_skcipher_ctx(cipher);
int ret;
- ret = xts_check_key(&cipher->base, key, keylen);
+ ret = xts_verify_key(cipher, key, keylen);
if (ret)
return ret;
@@ -2143,13 +2143,13 @@ static void artpec6_crypto_task(unsigned long data)
list_for_each_entry_safe(req, n, &complete_in_progress,
complete_in_progress) {
- req->req->complete(req->req, -EINPROGRESS);
+ crypto_request_complete(req->req, -EINPROGRESS);
}
}
static void artpec6_crypto_complete_crypto(struct crypto_async_request *req)
{
- req->complete(req, 0);
+ crypto_request_complete(req, 0);
}
static void
@@ -2161,7 +2161,7 @@ artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req)
scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src,
cipher_req->cryptlen - AES_BLOCK_SIZE,
AES_BLOCK_SIZE, 0);
- req->complete(req, 0);
+ skcipher_request_complete(cipher_req, 0);
}
static void
@@ -2173,7 +2173,7 @@ artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req)
scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst,
cipher_req->cryptlen - AES_BLOCK_SIZE,
AES_BLOCK_SIZE, 0);
- req->complete(req, 0);
+ skcipher_request_complete(cipher_req, 0);
}
static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
@@ -2211,12 +2211,12 @@ static void artpec6_crypto_complete_aead(struct crypto_async_request *req)
}
}
- req->complete(req, result);
+ aead_request_complete(areq, result);
}
static void artpec6_crypto_complete_hash(struct crypto_async_request *req)
{
- req->complete(req, 0);
+ crypto_request_complete(req, 0);
}
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
index c8c799428fe0..70b911baab26 100644
--- a/drivers/crypto/bcm/cipher.c
+++ b/drivers/crypto/bcm/cipher.c
@@ -1614,7 +1614,7 @@ static void finish_req(struct iproc_reqctx_s *rctx, int err)
spu_chunk_cleanup(rctx);
if (areq)
- areq->complete(areq, err);
+ crypto_request_complete(areq, err);
}
/**
@@ -2570,66 +2570,29 @@ static int aead_need_fallback(struct aead_request *req)
return payload_len > ctx->max_payload;
}
-static void aead_complete(struct crypto_async_request *areq, int err)
-{
- struct aead_request *req =
- container_of(areq, struct aead_request, base);
- struct iproc_reqctx_s *rctx = aead_request_ctx(req);
- struct crypto_aead *aead = crypto_aead_reqtfm(req);
-
- flow_log("%s() err:%d\n", __func__, err);
-
- areq->tfm = crypto_aead_tfm(aead);
-
- areq->complete = rctx->old_complete;
- areq->data = rctx->old_data;
-
- areq->complete(areq, err);
-}
-
static int aead_do_fallback(struct aead_request *req, bool is_encrypt)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct iproc_reqctx_s *rctx = aead_request_ctx(req);
struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
- int err;
- u32 req_flags;
+ struct aead_request *subreq;
flow_log("%s() enc:%u\n", __func__, is_encrypt);
- if (ctx->fallback_cipher) {
- /* Store the cipher tfm and then use the fallback tfm */
- rctx->old_tfm = tfm;
- aead_request_set_tfm(req, ctx->fallback_cipher);
- /*
- * Save the callback and chain ourselves in, so we can restore
- * the tfm
- */
- rctx->old_complete = req->base.complete;
- rctx->old_data = req->base.data;
- req_flags = aead_request_flags(req);
- aead_request_set_callback(req, req_flags, aead_complete, req);
- err = is_encrypt ? crypto_aead_encrypt(req) :
- crypto_aead_decrypt(req);
-
- if (err == 0) {
- /*
- * fallback was synchronous (did not return
- * -EINPROGRESS). So restore request state here.
- */
- aead_request_set_callback(req, req_flags,
- rctx->old_complete, req);
- req->base.data = rctx->old_data;
- aead_request_set_tfm(req, aead);
- flow_log("%s() fallback completed successfully\n\n",
- __func__);
- }
- } else {
- err = -EINVAL;
- }
+ if (!ctx->fallback_cipher)
+ return -EINVAL;
- return err;
+ subreq = &rctx->req;
+ aead_request_set_tfm(subreq, ctx->fallback_cipher);
+ aead_request_set_callback(subreq, aead_request_flags(req),
+ req->base.complete, req->base.data);
+ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
+ req->iv);
+ aead_request_set_ad(subreq, req->assoclen);
+
+ return is_encrypt ? crypto_aead_encrypt(req) :
+ crypto_aead_decrypt(req);
}
static int aead_enqueue(struct aead_request *req, bool is_encrypt)
@@ -4243,6 +4206,7 @@ static int ahash_cra_init(struct crypto_tfm *tfm)
static int aead_cra_init(struct crypto_aead *aead)
{
+ unsigned int reqsize = sizeof(struct iproc_reqctx_s);
struct crypto_tfm *tfm = crypto_aead_tfm(aead);
struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm);
struct crypto_alg *alg = tfm->__crt_alg;
@@ -4254,7 +4218,6 @@ static int aead_cra_init(struct crypto_aead *aead)
flow_log("%s()\n", __func__);
- crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s));
ctx->is_esp = false;
ctx->salt_len = 0;
ctx->salt_offset = 0;
@@ -4263,22 +4226,29 @@ static int aead_cra_init(struct crypto_aead *aead)
get_random_bytes(ctx->iv, MAX_IV_SIZE);
flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE);
- if (!err) {
- if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- flow_log("%s() creating fallback cipher\n", __func__);
-
- ctx->fallback_cipher =
- crypto_alloc_aead(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(ctx->fallback_cipher)) {
- pr_err("%s() Error: failed to allocate fallback for %s\n",
- __func__, alg->cra_name);
- return PTR_ERR(ctx->fallback_cipher);
- }
- }
+ if (err)
+ goto out;
+
+ if (!(alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK))
+ goto reqsize;
+
+ flow_log("%s() creating fallback cipher\n", __func__);
+
+ ctx->fallback_cipher = crypto_alloc_aead(alg->cra_name, 0,
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(ctx->fallback_cipher)) {
+ pr_err("%s() Error: failed to allocate fallback for %s\n",
+ __func__, alg->cra_name);
+ return PTR_ERR(ctx->fallback_cipher);
}
+ reqsize += crypto_aead_reqsize(ctx->fallback_cipher);
+
+reqsize:
+ crypto_aead_set_reqsize(aead, reqsize);
+
+out:
return err;
}
diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
index d6d87332140a..e36881c983cf 100644
--- a/drivers/crypto/bcm/cipher.h
+++ b/drivers/crypto/bcm/cipher.h
@@ -339,15 +339,12 @@ struct iproc_reqctx_s {
/* hmac context */
bool is_sw_hmac;
- /* aead context */
- struct crypto_tfm *old_tfm;
- crypto_completion_t old_complete;
- void *old_data;
-
gfp_t gfp;
/* Buffers used to build SPU request and response messages */
struct spu_msg_buf msg_buf;
+
+ struct aead_request req;
};
/*
diff --git a/drivers/crypto/caam/blob_gen.c b/drivers/crypto/caam/blob_gen.c
index f46b161d2cda..87781c1534ee 100644
--- a/drivers/crypto/caam/blob_gen.c
+++ b/drivers/crypto/caam/blob_gen.c
@@ -83,7 +83,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
output_len = info->input_len - CAAM_BLOB_OVERHEAD;
}
- desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL | GFP_DMA);
+ desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL);
if (!desc)
return -ENOMEM;
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ecc15bc521db..4a9b998a8d26 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -59,6 +59,8 @@
#include <crypto/engine.h>
#include <crypto/xts.h>
#include <asm/unaligned.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
/*
* crypto alg
@@ -1379,8 +1381,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes,
- GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags);
if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0);
@@ -1608,6 +1609,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
u8 *iv;
int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+ unsigned int aligned_size;
src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
@@ -1681,15 +1683,18 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/*
* allocate space for base edesc and hw desc commands, link tables, IV
*/
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
- GFP_DMA | flags);
- if (!edesc) {
+ aligned_size = ALIGN(ivsize, __alignof__(*edesc));
+ aligned_size += sizeof(*edesc) + desc_bytes + sec4_sg_bytes;
+ aligned_size = ALIGN(aligned_size, dma_get_cache_alignment());
+ iv = kzalloc(aligned_size, flags);
+ if (!iv) {
dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
0, 0, 0);
return ERR_PTR(-ENOMEM);
}
+ edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc)));
edesc->src_nents = src_nents;
edesc->dst_nents = dst_nents;
edesc->mapped_src_nents = mapped_src_nents;
@@ -1701,7 +1706,6 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
/* Make sure IV is located in a DMAable area */
if (ivsize) {
- iv = (u8 *)edesc->sec4_sg + sec4_sg_bytes;
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL);
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index c37b67be0492..5e218bf20d5b 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -20,6 +20,8 @@
#include "caamalg_desc.h"
#include <crypto/xts.h>
#include <asm/unaligned.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
/*
* crypto alg
@@ -959,7 +961,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return (struct aead_edesc *)drv_ctx;
/* allocate space for base edesc and hw desc commands, link tables */
- edesc = qi_cache_alloc(GFP_DMA | flags);
+ edesc = qi_cache_alloc(flags);
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -1317,8 +1319,9 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
- ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+ if (unlikely(ALIGN(ivsize, __alignof__(*edesc)) +
+ offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes >
+ CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -1327,17 +1330,18 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
}
/* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_alloc(GFP_DMA | flags);
- if (unlikely(!edesc)) {
+ iv = qi_cache_alloc(flags);
+ if (unlikely(!iv)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
0, DMA_NONE, 0, 0);
return ERR_PTR(-ENOMEM);
}
+ edesc = (void *)(iv + ALIGN(ivsize, __alignof__(*edesc)));
+
/* Make sure IV is located in a DMAable area */
sg_table = &edesc->sgt[0];
- iv = (u8 *)(sg_table + qm_sg_ents);
memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL);
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
index 1b0dd742c53f..5c8d35edaa1c 100644
--- a/drivers/crypto/caam/caamalg_qi2.c
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -16,7 +16,9 @@
#include "caamalg_desc.h"
#include "caamhash_desc.h"
#include "dpseci-debugfs.h"
+#include <linux/dma-mapping.h>
#include <linux/fsl/mc.h>
+#include <linux/kernel.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
#include <crypto/xts.h>
@@ -370,7 +372,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
struct dpaa2_sg_entry *sg_table;
/* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n");
return ERR_PTR(-ENOMEM);
@@ -1189,7 +1191,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
}
/* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (unlikely(!edesc)) {
dev_err(dev, "could not allocate extended descriptor\n");
caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
@@ -3220,14 +3222,14 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
int ret = -ENOMEM;
struct dpaa2_fl_entry *in_fle, *out_fle;
- req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL);
if (!req_ctx)
return -ENOMEM;
in_fle = &req_ctx->fd_flt[1];
out_fle = &req_ctx->fd_flt[0];
- flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
+ flc = kzalloc(sizeof(*flc), GFP_KERNEL);
if (!flc)
goto err_flc;
@@ -3316,7 +3318,13 @@ static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
if (keylen > blocksize) {
- hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ unsigned int aligned_len =
+ ALIGN(keylen, dma_get_cache_alignment());
+
+ if (aligned_len < keylen)
+ return -EOVERFLOW;
+
+ hashed_key = kmemdup(key, aligned_len, GFP_KERNEL);
if (!hashed_key)
return -ENOMEM;
ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
@@ -3411,7 +3419,7 @@ static void ahash_done(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_bi(void *cbk_ctx, u32 status)
@@ -3449,7 +3457,7 @@ static void ahash_done_bi(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
crypto_ahash_digestsize(ahash), 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
@@ -3476,7 +3484,7 @@ static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
ctx->ctx_len, 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
@@ -3514,7 +3522,7 @@ static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
DUMP_PREFIX_ADDRESS, 16, 4, req->result,
crypto_ahash_digestsize(ahash), 1);
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
}
static int ahash_update_ctx(struct ahash_request *req)
@@ -3560,7 +3568,7 @@ static int ahash_update_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -3654,7 +3662,7 @@ static int ahash_final_ctx(struct ahash_request *req)
int ret;
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc)
return -ENOMEM;
@@ -3743,7 +3751,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM;
@@ -3836,7 +3844,7 @@ static int ahash_digest(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
return ret;
@@ -3913,7 +3921,7 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int ret = -ENOMEM;
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc)
return ret;
@@ -4012,7 +4020,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -4125,7 +4133,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
return ret;
@@ -4230,7 +4238,7 @@ static int ahash_update_first(struct ahash_request *req)
}
/* allocate space for base edesc and link tables */
- edesc = qi_cache_zalloc(GFP_DMA | flags);
+ edesc = qi_cache_zalloc(flags);
if (!edesc) {
dma_unmap_sg(ctx->dev, req->src, src_nents,
DMA_TO_DEVICE);
@@ -4926,6 +4934,7 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
{
struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
struct device *dev = priv->dev;
+ unsigned int alignmask;
int err;
/*
@@ -4936,13 +4945,14 @@ static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
!(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
return 0;
- priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
- GFP_KERNEL | GFP_DMA);
+ alignmask = DPAA2_CSCN_ALIGN - 1;
+ alignmask |= dma_get_cache_alignment() - 1;
+ priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1),
+ GFP_KERNEL);
if (!priv->cscn_mem)
return -ENOMEM;
- priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
- priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem,
DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, priv->cscn_dma)) {
dev_err(dev, "Error mapping CSCN memory area\n");
@@ -5174,7 +5184,7 @@ static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
priv->domain = iommu_get_domain_for_dev(dev);
qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
- 0, SLAB_CACHE_DMA, NULL);
+ 0, 0, NULL);
if (!qi_cache) {
dev_err(dev, "Can't allocate SEC cache\n");
return -ENOMEM;
@@ -5451,7 +5461,7 @@ int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
DPAA2_CSCN_SIZE,
DMA_FROM_DEVICE);
- if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) {
dev_dbg_ratelimited(dev, "Dropping request\n");
return -EBUSY;
}
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
index d35253407ade..abb502bb675c 100644
--- a/drivers/crypto/caam/caamalg_qi2.h
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -7,13 +7,14 @@
#ifndef _CAAMALG_QI2_H_
#define _CAAMALG_QI2_H_
+#include <crypto/internal/skcipher.h>
+#include <linux/compiler_attributes.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
#include <linux/threads.h>
#include <linux/netdevice.h>
#include "dpseci.h"
#include "desc_constr.h"
-#include <crypto/skcipher.h>
#define DPAA2_CAAM_STORE_SIZE 16
/* NAPI weight *must* be a multiple of the store size. */
@@ -36,8 +37,6 @@
* @tx_queue_attr: array of Tx queue attributes
* @cscn_mem: pointer to memory region containing the congestion SCN
* it's size is larger than to accommodate alignment
- * @cscn_mem_aligned: pointer to congestion SCN; it is computed as
- * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
* @cscn_dma: dma address used by the QMAN to write CSCN messages
* @dev: device associated with the DPSECI object
* @mc_io: pointer to MC portal's I/O object
@@ -58,7 +57,6 @@ struct dpaa2_caam_priv {
/* congestion */
void *cscn_mem;
- void *cscn_mem_aligned;
dma_addr_t cscn_dma;
struct device *dev;
@@ -158,7 +156,7 @@ struct ahash_edesc {
struct caam_flc {
u32 flc[16];
u32 sh_desc[MAX_SDLEN];
-} ____cacheline_aligned;
+} __aligned(CRYPTO_DMA_ALIGN);
enum optype {
ENCRYPT = 0,
@@ -180,7 +178,7 @@ enum optype {
* @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
*/
struct caam_request {
- struct dpaa2_fl_entry fd_flt[2];
+ struct dpaa2_fl_entry fd_flt[2] __aligned(CRYPTO_DMA_ALIGN);
dma_addr_t fd_flt_dma;
struct caam_flc *flc;
dma_addr_t flc_dma;
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 1050e965a438..82d3c730a502 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -66,6 +66,8 @@
#include "key_gen.h"
#include "caamhash_desc.h"
#include <crypto/engine.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#define CAAM_CRA_PRIORITY 3000
@@ -365,7 +367,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
dma_addr_t key_dma;
int ret;
- desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return -ENOMEM;
@@ -432,7 +434,13 @@ static int ahash_setkey(struct crypto_ahash *ahash,
dev_dbg(jrdev, "keylen %d\n", keylen);
if (keylen > blocksize) {
- hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
+ unsigned int aligned_len =
+ ALIGN(keylen, dma_get_cache_alignment());
+
+ if (aligned_len < keylen)
+ return -EOVERFLOW;
+
+ hashed_key = kmemdup(key, keylen, GFP_KERNEL);
if (!hashed_key)
return -ENOMEM;
ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
@@ -606,7 +614,7 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
* by CAAM, not crypto engine.
*/
if (!has_bklog)
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
else
crypto_finalize_hash_request(jrp->engine, req, ecode);
}
@@ -668,7 +676,7 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
* by CAAM, not crypto engine.
*/
if (!has_bklog)
- req->base.complete(&req->base, ecode);
+ ahash_request_complete(req, ecode);
else
crypto_finalize_hash_request(jrp->engine, req, ecode);
@@ -702,7 +710,7 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
struct ahash_edesc *edesc;
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + sg_size, flags);
if (!edesc) {
dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
return NULL;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index aef031946f33..e40614fef39d 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -16,6 +16,8 @@
#include "desc_constr.h"
#include "sg_sw_sec4.h"
#include "caampkc.h"
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB)
#define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \
@@ -310,8 +312,7 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry);
/* allocate space for base edesc, hw desc commands and link tables */
- edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes,
- GFP_DMA | flags);
+ edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags);
if (!edesc)
goto dst_fail;
@@ -898,7 +899,7 @@ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
if (!nbytes)
return NULL;
- dst = kzalloc(dstlen, GFP_DMA | GFP_KERNEL);
+ dst = kzalloc(dstlen, GFP_KERNEL);
if (!dst)
return NULL;
@@ -910,7 +911,7 @@ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen)
/**
* caam_read_raw_data - Read a raw byte stream as a positive integer.
* The function skips buffer's leading zeros, copies the remained data
- * to a buffer allocated in the GFP_DMA | GFP_KERNEL zone and returns
+ * to a buffer allocated in the GFP_KERNEL zone and returns
* the address of the new buffer.
*
* @buf : The data to read
@@ -923,7 +924,7 @@ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes)
if (!*nbytes)
return NULL;
- return kmemdup(buf, *nbytes, GFP_DMA | GFP_KERNEL);
+ return kmemdup(buf, *nbytes, GFP_KERNEL);
}
static int caam_rsa_check_key_length(unsigned int len)
@@ -949,13 +950,13 @@ static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
return ret;
/* Copy key in DMA zone */
- rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
if (!rsa_key->e)
goto err;
/*
* Skip leading zeros and copy the positive integer to a buffer
- * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+ * allocated in the GFP_KERNEL zone. The decryption descriptor
* expects a positive integer for the RSA modulus and uses its length as
* decryption output length.
*/
@@ -983,6 +984,7 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
struct caam_rsa_key *rsa_key = &ctx->key;
size_t p_sz = raw_key->p_sz;
size_t q_sz = raw_key->q_sz;
+ unsigned aligned_size;
rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz);
if (!rsa_key->p)
@@ -994,11 +996,13 @@ static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx,
goto free_p;
rsa_key->q_sz = q_sz;
- rsa_key->tmp1 = kzalloc(raw_key->p_sz, GFP_DMA | GFP_KERNEL);
+ aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment());
+ rsa_key->tmp1 = kzalloc(aligned_size, GFP_KERNEL);
if (!rsa_key->tmp1)
goto free_q;
- rsa_key->tmp2 = kzalloc(raw_key->q_sz, GFP_DMA | GFP_KERNEL);
+ aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment());
+ rsa_key->tmp2 = kzalloc(aligned_size, GFP_KERNEL);
if (!rsa_key->tmp2)
goto free_tmp1;
@@ -1051,17 +1055,17 @@ static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
return ret;
/* Copy key in DMA zone */
- rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_KERNEL);
if (!rsa_key->d)
goto err;
- rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_DMA | GFP_KERNEL);
+ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL);
if (!rsa_key->e)
goto err;
/*
* Skip leading zeros and copy the positive integer to a buffer
- * allocated in the GFP_DMA | GFP_KERNEL zone. The decryption descriptor
+ * allocated in the GFP_KERNEL zone. The decryption descriptor
* expects a positive integer for the RSA modulus and uses its length as
* decryption output length.
*/
@@ -1185,8 +1189,7 @@ int caam_pkc_init(struct device *ctrldev)
return 0;
/* allocate zero buffer, used for padding input */
- zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_DMA |
- GFP_KERNEL);
+ zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL);
if (!zero_buffer)
return -ENOMEM;
diff --git a/drivers/crypto/caam/caamprng.c b/drivers/crypto/caam/caamprng.c
index 4839e66300a2..6e4c1191cb28 100644
--- a/drivers/crypto/caam/caamprng.c
+++ b/drivers/crypto/caam/caamprng.c
@@ -8,6 +8,8 @@
#include <linux/completion.h>
#include <crypto/internal/rng.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include "compat.h"
#include "regs.h"
#include "intern.h"
@@ -75,6 +77,7 @@ static int caam_prng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
+ unsigned int aligned_dlen = ALIGN(dlen, dma_get_cache_alignment());
struct caam_prng_ctx ctx;
struct device *jrdev;
dma_addr_t dst_dma;
@@ -82,7 +85,10 @@ static int caam_prng_generate(struct crypto_rng *tfm,
u8 *buf;
int ret;
- buf = kzalloc(dlen, GFP_KERNEL);
+ if (aligned_dlen < dlen)
+ return -EOVERFLOW;
+
+ buf = kzalloc(aligned_dlen, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -94,7 +100,7 @@ static int caam_prng_generate(struct crypto_rng *tfm,
return ret;
}
- desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA);
+ desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL);
if (!desc) {
ret = -ENOMEM;
goto out1;
@@ -156,7 +162,7 @@ static int caam_prng_seed(struct crypto_rng *tfm,
return ret;
}
- desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL | GFP_DMA);
+ desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL);
if (!desc) {
caam_jr_free(jrdev);
return -ENOMEM;
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 1f0e82050976..1fd8ff965006 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -12,6 +12,8 @@
#include <linux/hw_random.h>
#include <linux/completion.h>
#include <linux/atomic.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
#include <linux/kfifo.h>
#include "compat.h"
@@ -176,17 +178,18 @@ static int caam_init(struct hwrng *rng)
int err;
ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
- GFP_DMA | GFP_KERNEL);
+ GFP_KERNEL);
if (!ctx->desc_sync)
return -ENOMEM;
ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN,
- GFP_DMA | GFP_KERNEL);
+ GFP_KERNEL);
if (!ctx->desc_async)
return -ENOMEM;
- if (kfifo_alloc(&ctx->fifo, CAAM_RNG_MAX_FIFO_STORE_SIZE,
- GFP_DMA | GFP_KERNEL))
+ if (kfifo_alloc(&ctx->fifo, ALIGN(CAAM_RNG_MAX_FIFO_STORE_SIZE,
+ dma_get_cache_alignment()),
+ GFP_KERNEL))
return -ENOMEM;
INIT_WORK(&ctx->worker, caam_rng_worker);
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 32253a064d0f..6278afb951c3 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -199,7 +199,7 @@ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
u32 *desc, status;
int sh_idx, ret = 0;
- desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -276,7 +276,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
int ret = 0, sh_idx;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
- desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
if (!desc)
return -ENOMEM;
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 62ce6421bb3f..824c94d44f94 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -163,7 +163,8 @@ static inline void append_data(u32 * const desc, const void *data, int len)
{
u32 *offset = desc_end(desc);
- if (len) /* avoid sparse warning: memcpy with byte count of 0 */
+ /* Avoid gcc warning: memcpy with data == NULL */
+ if (!IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG) || data)
memcpy(offset, data, len);
(*desc) = cpu_to_caam32(caam32_to_cpu(*desc) +
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index b0e8a4939b4f..88cc4fe2a585 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -64,7 +64,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out,
if (local_max > max_keylen)
return -EINVAL;
- desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
+ desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL);
if (!desc) {
dev_err(jrdev, "unable to allocate key input memory\n");
return ret;
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index c36f27376d7e..4c52c9365558 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -614,7 +614,7 @@ static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu)
struct qman_fq *fq;
int ret;
- fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA);
+ fq = kzalloc(sizeof(*fq), GFP_KERNEL);
if (!fq)
return -ENOMEM;
@@ -756,7 +756,7 @@ int caam_qi_init(struct platform_device *caam_pdev)
}
qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, 0,
- SLAB_CACHE_DMA, NULL);
+ 0, NULL);
if (!qi_cache) {
dev_err(qidev, "Can't allocate CAAM cache\n");
free_rsp_fqs();
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 5894f16f8fe3..a96e3d213c06 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -9,6 +9,8 @@
#ifndef __QI_H__
#define __QI_H__
+#include <crypto/algapi.h>
+#include <linux/compiler_attributes.h>
#include <soc/fsl/qman.h>
#include "compat.h"
#include "desc.h"
@@ -58,8 +60,10 @@ enum optype {
* @qidev: device pointer for CAAM/QI backend
*/
struct caam_drv_ctx {
- u32 prehdr[2];
- u32 sh_desc[MAX_SDLEN];
+ struct {
+ u32 prehdr[2];
+ u32 sh_desc[MAX_SDLEN];
+ } __aligned(CRYPTO_DMA_ALIGN);
dma_addr_t context_a;
struct qman_fq *req_fq;
struct qman_fq *rsp_fq;
@@ -67,7 +71,7 @@ struct caam_drv_ctx {
int cpu;
enum optype op_type;
struct device *qidev;
-} ____cacheline_aligned;
+};
/**
* caam_drv_req - The request structure the driver application should fill while
@@ -88,7 +92,7 @@ struct caam_drv_req {
struct caam_drv_ctx *drv_ctx;
caam_qi_cbk cbk;
void *app_ctx;
-} ____cacheline_aligned;
+} __aligned(CRYPTO_DMA_ALIGN);
/**
* caam_drv_ctx_init - Initialise a CAAM/QI driver context
diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c
index 9eca0c302186..ee476c6c7f82 100644
--- a/drivers/crypto/cavium/cpt/cptvf_algs.c
+++ b/drivers/crypto/cavium/cpt/cptvf_algs.c
@@ -28,7 +28,7 @@ static void cvm_callback(u32 status, void *arg)
{
struct crypto_async_request *req = (struct crypto_async_request *)arg;
- req->complete(req, !status);
+ crypto_request_complete(req, !status);
}
static inline void update_input_iv(struct cpt_request_info *req_info,
@@ -232,13 +232,12 @@ static int cvm_decrypt(struct skcipher_request *req)
static int cvm_xts_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(cipher);
int err;
const u8 *key1 = key;
const u8 *key2 = key + (keylen / 2);
- err = xts_check_key(tfm, key, keylen);
+ err = xts_verify_key(cipher, key, keylen);
if (err)
return err;
ctx->key_len = keylen;
@@ -289,8 +288,7 @@ static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen)
static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key,
u32 keylen, u8 cipher_type)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(cipher);
ctx->cipher_type = cipher_type;
if (!cvm_validate_keylen(ctx, keylen)) {
diff --git a/drivers/crypto/cavium/nitrox/nitrox_aead.c b/drivers/crypto/cavium/nitrox/nitrox_aead.c
index 0653484df23f..b0e53034164a 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_aead.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_aead.c
@@ -199,7 +199,7 @@ static void nitrox_aead_callback(void *arg, int err)
err = -EINVAL;
}
- areq->base.complete(&areq->base, err);
+ aead_request_complete(areq, err);
}
static inline bool nitrox_aes_gcm_assoclen_supported(unsigned int assoclen)
@@ -434,7 +434,7 @@ static void nitrox_rfc4106_callback(void *arg, int err)
err = -EINVAL;
}
- areq->base.complete(&areq->base, err);
+ aead_request_complete(areq, err);
}
static int nitrox_rfc4106_enc(struct aead_request *areq)
diff --git a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
index 248b4fff1c72..138261dcd032 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_skcipher.c
@@ -337,12 +337,11 @@ static int nitrox_3des_decrypt(struct skcipher_request *skreq)
static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher);
struct flexi_crypto_context *fctx;
int aes_keylen, ret;
- ret = xts_check_key(tfm, key, keylen);
+ ret = xts_verify_key(cipher, key, keylen);
if (ret)
return ret;
@@ -362,8 +361,7 @@ static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher,
static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher,
const u8 *key, unsigned int keylen)
{
- struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
- struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm);
+ struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher);
struct flexi_crypto_context *fctx;
int aes_keylen;
diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
index 73442a382f68..ecd58b38c46e 100644
--- a/drivers/crypto/ccp/ccp-crypto-main.c
+++ b/drivers/crypto/ccp/ccp-crypto-main.c
@@ -146,7 +146,7 @@ static void ccp_crypto_complete(void *data, int err)
/* Only propagate the -EINPROGRESS if necessary */
if (crypto_cmd->ret == -EBUSY) {
crypto_cmd->ret = -EINPROGRESS;
- req->complete(req, -EINPROGRESS);
+ crypto_request_complete(req, -EINPROGRESS);
}
return;
@@ -159,18 +159,18 @@ static void ccp_crypto_complete(void *data, int err)
held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
if (backlog) {
backlog->ret = -EINPROGRESS;
- backlog->req->complete(backlog->req, -EINPROGRESS);
+ crypto_request_complete(backlog->req, -EINPROGRESS);
}
/* Transition the state from -EBUSY to -EINPROGRESS first */
if (crypto_cmd->ret == -EBUSY)
- req->complete(req, -EINPROGRESS);
+ crypto_request_complete(req, -EINPROGRESS);
/* Completion callbacks */
ret = err;
if (ctx->complete)
ret = ctx->complete(req, ret);
- req->complete(req, ret);
+ crypto_request_complete(req, ret);
/* Submit the next cmd */
while (held) {
@@ -186,12 +186,12 @@ static void ccp_crypto_complete(void *data, int err)
ctx = crypto_tfm_ctx_dma(held->req->tfm);
if (ctx->complete)
ret = ctx->complete(held->req, ret);
- held->req->complete(held->req, ret);
+ crypto_request_complete(held->req, ret);
next = ccp_crypto_cmd_complete(held, &backlog);
if (backlog) {
backlog->ret = -EINPROGRESS;
- backlog->req->complete(backlog->req, -EINPROGRESS);
+ crypto_request_complete(backlog->req, -EINPROGRESS);
}
kfree(held);
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
index 9f753cb4f5f1..b386a7063818 100644
--- a/drivers/crypto/ccp/ccp-dmaengine.c
+++ b/drivers/crypto/ccp/ccp-dmaengine.c
@@ -642,14 +642,26 @@ static void ccp_dma_release(struct ccp_device *ccp)
chan = ccp->ccp_dma_chan + i;
dma_chan = &chan->dma_chan;
- if (dma_chan->client_count)
- dma_release_channel(dma_chan);
-
tasklet_kill(&chan->cleanup_tasklet);
list_del_rcu(&dma_chan->device_node);
}
}
+static void ccp_dma_release_channels(struct ccp_device *ccp)
+{
+ struct ccp_dma_chan *chan;
+ struct dma_chan *dma_chan;
+ unsigned int i;
+
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ chan = ccp->ccp_dma_chan + i;
+ dma_chan = &chan->dma_chan;
+
+ if (dma_chan->client_count)
+ dma_release_channel(dma_chan);
+ }
+}
+
int ccp_dmaengine_register(struct ccp_device *ccp)
{
struct ccp_dma_chan *chan;
@@ -770,8 +782,9 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp)
if (!dmaengine)
return;
- ccp_dma_release(ccp);
+ ccp_dma_release_channels(ccp);
dma_async_device_unregister(dma_dev);
+ ccp_dma_release(ccp);
kmem_cache_destroy(ccp->dma_desc_cache);
kmem_cache_destroy(ccp->dma_cmd_cache);
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 06fc7156c04f..e2f25926eb51 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -26,6 +26,7 @@
#include <linux/fs_struct.h>
#include <asm/smp.h>
+#include <asm/cacheflush.h>
#include "psp-dev.h"
#include "sev-dev.h"
@@ -56,6 +57,7 @@ MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on m
MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */
MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */
+MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */
static bool psp_dead;
static int psp_timeout;
@@ -881,7 +883,14 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
input_address = (void __user *)input.address;
if (input.address && input.length) {
- id_blob = kzalloc(input.length, GFP_KERNEL);
+ /*
+ * The length of the ID shouldn't be assumed by software since
+ * it may change in the future. The allocation size is limited
+ * to 1 << (PAGE_SHIFT + MAX_ORDER - 1) by the page allocator.
+ * If the allocation fails, simply return ENOMEM rather than
+ * warning in the kernel log.
+ */
+ id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN);
if (!id_blob)
return -ENOMEM;
@@ -1327,7 +1336,10 @@ void sev_pci_init(void)
/* Obtain the TMR memory area for SEV-ES use */
sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE);
- if (!sev_es_tmr)
+ if (sev_es_tmr)
+ /* Must flush the cache before giving it to the firmware */
+ clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE);
+ else
dev_warn(sev->dev,
"SEV: TMR allocation failed, SEV-ES support unavailable\n");
diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
index 084d052fddcc..cde33b2ac71b 100644
--- a/drivers/crypto/ccp/sp-pci.c
+++ b/drivers/crypto/ccp/sp-pci.c
@@ -342,52 +342,52 @@ static int __maybe_unused sp_pci_resume(struct device *dev)
#ifdef CONFIG_CRYPTO_DEV_SP_PSP
static const struct sev_vdata sevv1 = {
- .cmdresp_reg = 0x10580,
- .cmdbuff_addr_lo_reg = 0x105e0,
- .cmdbuff_addr_hi_reg = 0x105e4,
+ .cmdresp_reg = 0x10580, /* C2PMSG_32 */
+ .cmdbuff_addr_lo_reg = 0x105e0, /* C2PMSG_56 */
+ .cmdbuff_addr_hi_reg = 0x105e4, /* C2PMSG_57 */
};
static const struct sev_vdata sevv2 = {
- .cmdresp_reg = 0x10980,
- .cmdbuff_addr_lo_reg = 0x109e0,
- .cmdbuff_addr_hi_reg = 0x109e4,
+ .cmdresp_reg = 0x10980, /* C2PMSG_32 */
+ .cmdbuff_addr_lo_reg = 0x109e0, /* C2PMSG_56 */
+ .cmdbuff_addr_hi_reg = 0x109e4, /* C2PMSG_57 */
};
static const struct tee_vdata teev1 = {
- .cmdresp_reg = 0x10544,
- .cmdbuff_addr_lo_reg = 0x10548,
- .cmdbuff_addr_hi_reg = 0x1054c,
- .ring_wptr_reg = 0x10550,
- .ring_rptr_reg = 0x10554,
+ .cmdresp_reg = 0x10544, /* C2PMSG_17 */
+ .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */
+ .cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */
+ .ring_wptr_reg = 0x10550, /* C2PMSG_20 */
+ .ring_rptr_reg = 0x10554, /* C2PMSG_21 */
};
static const struct psp_vdata pspv1 = {
.sev = &sevv1,
- .feature_reg = 0x105fc,
- .inten_reg = 0x10610,
- .intsts_reg = 0x10614,
+ .feature_reg = 0x105fc, /* C2PMSG_63 */
+ .inten_reg = 0x10610, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10614, /* P2CMSG_INTSTS */
};
static const struct psp_vdata pspv2 = {
.sev = &sevv2,
- .feature_reg = 0x109fc,
- .inten_reg = 0x10690,
- .intsts_reg = 0x10694,
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10690, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */
};
static const struct psp_vdata pspv3 = {
.tee = &teev1,
- .feature_reg = 0x109fc,
- .inten_reg = 0x10690,
- .intsts_reg = 0x10694,
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10690, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */
};
static const struct psp_vdata pspv4 = {
.sev = &sevv2,
.tee = &teev1,
- .feature_reg = 0x109fc,
- .inten_reg = 0x10690,
- .intsts_reg = 0x10694,
+ .feature_reg = 0x109fc, /* C2PMSG_63 */
+ .inten_reg = 0x10690, /* P2CMSG_INTEN */
+ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */
};
#endif
diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
index 309da6334a0a..2cd44d7457a4 100644
--- a/drivers/crypto/ccree/cc_cipher.c
+++ b/drivers/crypto/ccree/cc_cipher.c
@@ -460,7 +460,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
}
if (ctx_p->cipher_mode == DRV_CIPHER_XTS &&
- xts_check_key(tfm, key, keylen)) {
+ xts_verify_key(sktfm, key, keylen)) {
dev_dbg(dev, "weak XTS key");
return -EINVAL;
}
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 68d65773ef2b..0eade4fa6695 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -220,7 +220,7 @@ static inline int chcr_handle_aead_resp(struct aead_request *req,
reqctx->verify = VERIFY_HW;
}
chcr_dec_wrcount(dev);
- req->base.complete(&req->base, err);
+ aead_request_complete(req, err);
return err;
}
@@ -1235,7 +1235,7 @@ complete:
complete(&ctx->cbc_aes_aio_done);
}
chcr_dec_wrcount(dev);
- req->base.complete(&req->base, err);
+ skcipher_request_complete(req, err);
return err;
}
@@ -2132,7 +2132,7 @@ unmap:
out:
chcr_dec_wrcount(dev);
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
}
/*
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index 7e7a8f01ea6b..5a7f6611803c 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1705,7 +1705,7 @@ static void hifn_process_ready(struct skcipher_request *req, int error)
hifn_cipher_walk_exit(&rctx->walk);
}
- req->base.complete(&req->base, error);
+ skcipher_request_complete(req, error);
}
static void hifn_clear_rings(struct hifn_device *dev, int error)
@@ -2054,7 +2054,7 @@ static int hifn_process_queue(struct hifn_device *dev)
break;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
req = skcipher_request_cast(async_req);
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 743ce4fc3158..4137a8bf131f 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -27,7 +27,7 @@ config CRYPTO_DEV_HISI_SEC2
select CRYPTO_SHA256
select CRYPTO_SHA512
select CRYPTO_SM4_GENERIC
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on ACPI
@@ -42,7 +42,7 @@ config CRYPTO_DEV_HISI_SEC2
config CRYPTO_DEV_HISI_QM
tristate
depends on ARM64 || COMPILE_TEST
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on UACCE || UACCE=n
depends on ACPI
help
@@ -51,7 +51,7 @@ config CRYPTO_DEV_HISI_QM
config CRYPTO_DEV_HISI_ZIP
tristate "Support for HiSilicon ZIP accelerator"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on !CPU_BIG_ENDIAN || COMPILE_TEST
depends on UACCE || UACCE=n
@@ -62,7 +62,7 @@ config CRYPTO_DEV_HISI_ZIP
config CRYPTO_DEV_HISI_HPRE
tristate "Support for HISI HPRE accelerator"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on ACPI
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 007ac7a69ce7..e4c84433a88a 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -95,8 +95,6 @@
#define QM_VFT_CFG_RDY 0x10006c
#define QM_VFT_CFG_OP_WR 0x100058
#define QM_VFT_CFG_TYPE 0x10005c
-#define QM_SQC_VFT 0x0
-#define QM_CQC_VFT 0x1
#define QM_VFT_CFG 0x100060
#define QM_VFT_CFG_OP_ENABLE 0x100054
#define QM_PM_CTRL 0x100148
@@ -118,7 +116,7 @@
#define QM_SQC_VFT_BASE_SHIFT_V2 28
#define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
#define QM_SQC_VFT_NUM_SHIFT_V2 45
-#define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
+#define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0)
#define QM_ABNORMAL_INT_SOURCE 0x100000
#define QM_ABNORMAL_INT_MASK 0x100004
@@ -164,7 +162,6 @@
/* interfunction communication */
#define QM_IFC_READY_STATUS 0x100128
-#define QM_IFC_C_STS_M 0x10012C
#define QM_IFC_INT_SET_P 0x100130
#define QM_IFC_INT_CFG 0x100134
#define QM_IFC_INT_SOURCE_P 0x100138
@@ -198,7 +195,6 @@
#define PCI_BAR_2 2
#define PCI_BAR_4 4
-#define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
#define QMC_ALIGN(sz) ALIGN(sz, 32)
#define QM_DBG_READ_LEN 256
@@ -212,8 +208,6 @@
#define QM_DRIVER_REMOVING 0
#define QM_RST_SCHED 1
#define QM_QOS_PARAM_NUM 2
-#define QM_QOS_VAL_NUM 1
-#define QM_QOS_BDF_PARAM_NUM 4
#define QM_QOS_MAX_VAL 1000
#define QM_QOS_RATE 100
#define QM_QOS_EXPAND_RATE 1000
@@ -225,38 +219,34 @@
#define QM_SHAPER_FACTOR_CBS_B_SHIFT 15
#define QM_SHAPER_FACTOR_CBS_S_SHIFT 19
#define QM_SHAPER_CBS_B 1
-#define QM_SHAPER_CBS_S 16
#define QM_SHAPER_VFT_OFFSET 6
-#define WAIT_FOR_QOS_VF 100
#define QM_QOS_MIN_ERROR_RATE 5
-#define QM_QOS_TYPICAL_NUM 8
#define QM_SHAPER_MIN_CBS_S 8
#define QM_QOS_TICK 0x300U
#define QM_QOS_DIVISOR_CLK 0x1f40U
#define QM_QOS_MAX_CIR_B 200
#define QM_QOS_MIN_CIR_B 100
#define QM_QOS_MAX_CIR_U 6
-#define QM_QOS_MAX_CIR_S 11
#define QM_AUTOSUSPEND_DELAY 3000
#define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
- (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
- ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
- ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
+ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
+ ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
+ ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
#define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \
((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
#define QM_MK_SQC_W13(priority, orders, alg_type) \
- (((priority) << QM_SQ_PRIORITY_SHIFT) | \
- ((orders) << QM_SQ_ORDERS_SHIFT) | \
+ (((priority) << QM_SQ_PRIORITY_SHIFT) | \
+ ((orders) << QM_SQ_ORDERS_SHIFT) | \
(((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
#define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
- (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
- ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
- ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
+ (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
+ ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
+ ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
#define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \
@@ -367,6 +357,16 @@ struct hisi_qm_resource {
struct list_head list;
};
+/**
+ * struct qm_hw_err - Structure describing the device errors
+ * @list: hardware error list
+ * @timestamp: timestamp when the error occurred
+ */
+struct qm_hw_err {
+ struct list_head list;
+ unsigned long long timestamp;
+};
+
struct hisi_qm_hw_ops {
int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
void (*qm_db)(struct hisi_qm *qm, u16 qn,
@@ -706,7 +706,7 @@ static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
((u64)randata << QM_DB_RAND_SHIFT_V2) |
- ((u64)index << QM_DB_INDEX_SHIFT_V2) |
+ ((u64)index << QM_DB_INDEX_SHIFT_V2) |
((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
writeq(doorbell, io_base);
@@ -905,7 +905,7 @@ static void qm_work_process(struct work_struct *work)
}
}
-static bool do_qm_irq(struct hisi_qm *qm)
+static bool do_qm_eq_irq(struct hisi_qm *qm)
{
struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
struct hisi_qm_poll_data *poll_data;
@@ -925,12 +925,12 @@ static bool do_qm_irq(struct hisi_qm *qm)
return false;
}
-static irqreturn_t qm_irq(int irq, void *data)
+static irqreturn_t qm_eq_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
bool ret;
- ret = do_qm_irq(qm);
+ ret = do_qm_eq_irq(qm);
if (ret)
return IRQ_HANDLED;
@@ -1304,7 +1304,7 @@ static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
- *number = (QM_SQC_VFT_NUM_MASK_v2 &
+ *number = (QM_SQC_VFT_NUM_MASK_V2 &
(sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
return 0;
@@ -1892,8 +1892,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
* @qm: The qm we create a qp from.
* @alg_type: Accelerator specific algorithm type in sqc.
*
- * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
- * qp memory fails.
+ * Return created qp, negative error code if failed.
*/
static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
{
@@ -2062,7 +2061,7 @@ static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
* @arg: Accelerator specific argument.
*
* After this function, qp can receive request from user. Return 0 if
- * successful, Return -EBUSY if failed.
+ * successful, negative error code if failed.
*/
int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
{
@@ -2363,7 +2362,7 @@ static int hisi_qm_uacce_mmap(struct uacce_queue *q,
return -EINVAL;
}
- vma->vm_flags |= VM_IO;
+ vm_flags_set(vma, VM_IO);
return remap_pfn_range(vma, vma->vm_start,
phys_base >> PAGE_SHIFT,
@@ -2469,6 +2468,113 @@ static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
return -EINVAL;
}
+/**
+ * qm_hw_err_isolate() - Try to set the isolation status of the uacce device
+ * according to user's configuration of error threshold.
+ * @qm: the uacce device
+ */
+static int qm_hw_err_isolate(struct hisi_qm *qm)
+{
+ struct qm_hw_err *err, *tmp, *hw_err;
+ struct qm_err_isolate *isolate;
+ u32 count = 0;
+
+ isolate = &qm->isolate_data;
+
+#define SECONDS_PER_HOUR 3600
+
+ /* All the hw errs are processed by PF driver */
+ if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold)
+ return 0;
+
+ hw_err = kzalloc(sizeof(*hw_err), GFP_KERNEL);
+ if (!hw_err)
+ return -ENOMEM;
+
+ /*
+ * Time-stamp every slot AER error. Then check the AER error log when the
+ * next device AER error occurred. if the device slot AER error count exceeds
+ * the setting error threshold in one hour, the isolated state will be set
+ * to true. And the AER error logs that exceed one hour will be cleared.
+ */
+ mutex_lock(&isolate->isolate_lock);
+ hw_err->timestamp = jiffies;
+ list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) {
+ if ((hw_err->timestamp - err->timestamp) / HZ >
+ SECONDS_PER_HOUR) {
+ list_del(&err->list);
+ kfree(err);
+ } else {
+ count++;
+ }
+ }
+ list_add(&hw_err->list, &isolate->qm_hw_errs);
+ mutex_unlock(&isolate->isolate_lock);
+
+ if (count >= isolate->err_threshold)
+ isolate->is_isolate = true;
+
+ return 0;
+}
+
+static void qm_hw_err_destroy(struct hisi_qm *qm)
+{
+ struct qm_hw_err *err, *tmp;
+
+ mutex_lock(&qm->isolate_data.isolate_lock);
+ list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) {
+ list_del(&err->list);
+ kfree(err);
+ }
+ mutex_unlock(&qm->isolate_data.isolate_lock);
+}
+
+static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce)
+{
+ struct hisi_qm *qm = uacce->priv;
+ struct hisi_qm *pf_qm;
+
+ if (uacce->is_vf)
+ pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ else
+ pf_qm = qm;
+
+ return pf_qm->isolate_data.is_isolate ?
+ UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL;
+}
+
+static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num)
+{
+ struct hisi_qm *qm = uacce->priv;
+
+ /* Must be set by PF */
+ if (uacce->is_vf)
+ return -EPERM;
+
+ if (qm->isolate_data.is_isolate)
+ return -EPERM;
+
+ qm->isolate_data.err_threshold = num;
+
+ /* After the policy is updated, need to reset the hardware err list */
+ qm_hw_err_destroy(qm);
+
+ return 0;
+}
+
+static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce)
+{
+ struct hisi_qm *qm = uacce->priv;
+ struct hisi_qm *pf_qm;
+
+ if (uacce->is_vf) {
+ pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
+ return pf_qm->isolate_data.err_threshold;
+ }
+
+ return qm->isolate_data.err_threshold;
+}
+
static const struct uacce_ops uacce_qm_ops = {
.get_available_instances = hisi_qm_get_available_instances,
.get_queue = hisi_qm_uacce_get_queue,
@@ -2478,8 +2584,22 @@ static const struct uacce_ops uacce_qm_ops = {
.mmap = hisi_qm_uacce_mmap,
.ioctl = hisi_qm_uacce_ioctl,
.is_q_updated = hisi_qm_is_q_updated,
+ .get_isolate_state = hisi_qm_get_isolate_state,
+ .isolate_err_threshold_write = hisi_qm_isolate_threshold_write,
+ .isolate_err_threshold_read = hisi_qm_isolate_threshold_read,
};
+static void qm_remove_uacce(struct hisi_qm *qm)
+{
+ struct uacce_device *uacce = qm->uacce;
+
+ if (qm->use_sva) {
+ qm_hw_err_destroy(qm);
+ uacce_remove(uacce);
+ qm->uacce = NULL;
+ }
+}
+
static int qm_alloc_uacce(struct hisi_qm *qm)
{
struct pci_dev *pdev = qm->pdev;
@@ -2506,8 +2626,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
qm->use_sva = true;
} else {
/* only consider sva case */
- uacce_remove(uacce);
- qm->uacce = NULL;
+ qm_remove_uacce(qm);
return -EINVAL;
}
@@ -2540,6 +2659,8 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
qm->uacce = uacce;
+ INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs);
+ mutex_init(&qm->isolate_data.isolate_lock);
return 0;
}
@@ -3074,7 +3195,6 @@ static int qm_stop_started_qp(struct hisi_qm *qm)
return 0;
}
-
/**
* qm_clear_queues() - Clear all queues memory in a qm.
* @qm: The qm in which the queues will be cleared.
@@ -3371,7 +3491,7 @@ static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
act_q_num = q_num;
}
- act_q_num = min_t(int, act_q_num, max_qp_num);
+ act_q_num = min(act_q_num, max_qp_num);
ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
if (ret) {
for (j = num_vfs; j > i; j--)
@@ -3558,7 +3678,7 @@ static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
qos_val = ir / QM_QOS_RATE;
ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
- ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
+ ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
err_get_status:
clear_bit(QM_RESETTING, &qm->misc_ctl);
@@ -4029,6 +4149,12 @@ static int qm_controller_reset_prepare(struct hisi_qm *qm)
return ret;
}
+ if (qm->use_sva) {
+ ret = qm_hw_err_isolate(qm);
+ if (ret)
+ pci_err(pdev, "failed to isolate hw err!\n");
+ }
+
ret = qm_wait_vf_prepare_finish(qm);
if (ret)
pci_err(pdev, "failed to stop by vfs in soft reset!\n");
@@ -4049,13 +4175,10 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
if (!qm->err_status.is_dev_ecc_mbit &&
qm->err_status.is_qm_ecc_mbit &&
qm->err_ini->close_axi_master_ooo) {
-
qm->err_ini->close_axi_master_ooo(qm);
-
} else if (qm->err_status.is_dev_ecc_mbit &&
!qm->err_status.is_qm_ecc_mbit &&
!qm->err_ini->close_axi_master_ooo) {
-
nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
qm->io_base + QM_RAS_NFE_ENABLE);
@@ -4336,21 +4459,25 @@ static int qm_controller_reset(struct hisi_qm *qm)
qm->err_ini->show_last_dfx_regs(qm);
ret = qm_soft_reset(qm);
- if (ret) {
- pci_err(pdev, "Controller reset failed (%d)\n", ret);
- qm_reset_bit_clear(qm);
- return ret;
- }
+ if (ret)
+ goto err_reset;
ret = qm_controller_reset_done(qm);
- if (ret) {
- qm_reset_bit_clear(qm);
- return ret;
- }
+ if (ret)
+ goto err_reset;
pci_info(pdev, "Controller reset complete\n");
return 0;
+
+err_reset:
+ pci_err(pdev, "Controller reset failed (%d)\n", ret);
+ qm_reset_bit_clear(qm);
+
+ /* if resetting fails, isolate the device */
+ if (qm->use_sva)
+ qm->isolate_data.is_isolate = true;
+ return ret;
}
/**
@@ -4499,7 +4626,6 @@ static irqreturn_t qm_abnormal_irq(int irq, void *data)
return IRQ_HANDLED;
}
-
/**
* hisi_qm_dev_shutdown() - Shutdown device.
* @pdev: The device will be shutdown.
@@ -4903,7 +5029,7 @@ static int qm_register_eq_irq(struct hisi_qm *qm)
return 0;
irq_vector = val & QM_IRQ_VECTOR_MASK;
- ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_irq, 0, qm->dev_name, qm);
+ ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm);
if (ret)
dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret);
@@ -5271,10 +5397,7 @@ int hisi_qm_init(struct hisi_qm *qm)
err_free_qm_memory:
hisi_qm_memory_uninit(qm);
err_alloc_uacce:
- if (qm->use_sva) {
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
- }
+ qm_remove_uacce(qm);
err_irq_register:
qm_irqs_unregister(qm);
err_pci_init:
diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
index 490e1542305e..1189effcdad0 100644
--- a/drivers/crypto/hisilicon/sec/sec_algs.c
+++ b/drivers/crypto/hisilicon/sec/sec_algs.c
@@ -504,8 +504,8 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
kfifo_avail(&ctx->queue->softqueue) >
backlog_req->num_elements)) {
sec_send_request(backlog_req, ctx->queue);
- backlog_req->req_base->complete(backlog_req->req_base,
- -EINPROGRESS);
+ crypto_request_complete(backlog_req->req_base,
+ -EINPROGRESS);
list_del(&backlog_req->backlog_head);
}
}
@@ -534,7 +534,7 @@ static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp,
if (skreq->src != skreq->dst)
dma_unmap_sg(dev, skreq->dst, sec_req->len_out,
DMA_BIDIRECTIONAL);
- skreq->base.complete(&skreq->base, sec_req->err);
+ skcipher_request_complete(skreq, sec_req->err);
}
}
diff --git a/drivers/crypto/hisilicon/sec2/sec_crypto.c b/drivers/crypto/hisilicon/sec2/sec_crypto.c
index f5bfc9755a4a..074e50ef512c 100644
--- a/drivers/crypto/hisilicon/sec2/sec_crypto.c
+++ b/drivers/crypto/hisilicon/sec2/sec_crypto.c
@@ -1459,12 +1459,11 @@ static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req,
break;
backlog_sk_req = backlog_req->c_req.sk_req;
- backlog_sk_req->base.complete(&backlog_sk_req->base,
- -EINPROGRESS);
+ skcipher_request_complete(backlog_sk_req, -EINPROGRESS);
atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt);
}
- sk_req->base.complete(&sk_req->base, err);
+ skcipher_request_complete(sk_req, err);
}
static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req)
@@ -1736,12 +1735,11 @@ static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err)
break;
backlog_aead_req = backlog_req->aead_req.aead_req;
- backlog_aead_req->base.complete(&backlog_aead_req->base,
- -EINPROGRESS);
+ aead_request_complete(backlog_aead_req, -EINPROGRESS);
atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt);
}
- a_req->base.complete(&a_req->base, err);
+ aead_request_complete(a_req, err);
}
static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req)
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 2b6f2281cfd6..09586a837b1e 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -124,9 +124,8 @@ err_free_mem:
for (j = 0; j < i; j++) {
dma_free_coherent(dev, block_size, block[j].sgl,
block[j].sgl_dma);
- memset(block + j, 0, sizeof(*block));
}
- kfree(pool);
+ kfree_sensitive(pool);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool);
@@ -250,7 +249,6 @@ hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
dev_err(dev, "Get SGL error!\n");
dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL);
return ERR_PTR(-ENOMEM);
-
}
curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr);
curr_hw_sge = curr_hw_sgl->sge_entries;
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index 9629e98bd68b..fe93d19e3044 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -157,9 +157,9 @@ static inline void img_hash_write(struct img_hash_dev *hdev,
writel_relaxed(value, hdev->io_base + offset);
}
-static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
+static inline __be32 img_hash_read_result_queue(struct img_hash_dev *hdev)
{
- return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
+ return cpu_to_be32(img_hash_read(hdev, CR_RESULT_QUEUE));
}
static void img_hash_start(struct img_hash_dev *hdev, bool dma)
@@ -283,10 +283,10 @@ static int img_hash_finish(struct ahash_request *req)
static void img_hash_copy_hash(struct ahash_request *req)
{
struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
- u32 *hash = (u32 *)ctx->digest;
+ __be32 *hash = (__be32 *)ctx->digest;
int i;
- for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
+ for (i = (ctx->digsize / sizeof(*hash)) - 1; i >= 0; i--)
hash[i] = img_hash_read_result_queue(ctx->hdev);
}
@@ -308,7 +308,7 @@ static void img_hash_finish_req(struct ahash_request *req, int err)
DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
if (req->base.complete)
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
}
static int img_hash_write_via_dma(struct img_hash_dev *hdev)
@@ -526,7 +526,7 @@ static int img_hash_handle_queue(struct img_hash_dev *hdev,
return res;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
req = ahash_request_cast(async_req);
hdev->req = req;
diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
index ae6110376e21..6858753af6b3 100644
--- a/drivers/crypto/inside-secure/safexcel.c
+++ b/drivers/crypto/inside-secure/safexcel.c
@@ -850,7 +850,7 @@ handle_req:
goto request_failed;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
/* In case the send() helper did not issue any command to push
* to the engine because the input data was cached, continue to
@@ -970,17 +970,6 @@ void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
} while (!cdesc->last_seg);
}
-void safexcel_inv_complete(struct crypto_async_request *req, int error)
-{
- struct safexcel_inv_result *result = req->data;
-
- if (error == -EINPROGRESS)
- return;
-
- result->error = error;
- complete(&result->completion);
-}
-
int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring)
@@ -1050,7 +1039,7 @@ handle_results:
if (should_complete) {
local_bh_disable();
- req->complete(req, ret);
+ crypto_request_complete(req, ret);
local_bh_enable();
}
diff --git a/drivers/crypto/inside-secure/safexcel.h b/drivers/crypto/inside-secure/safexcel.h
index 6c2fc662f64f..47ef6c7cd02c 100644
--- a/drivers/crypto/inside-secure/safexcel.h
+++ b/drivers/crypto/inside-secure/safexcel.h
@@ -884,11 +884,6 @@ struct safexcel_alg_template {
} alg;
};
-struct safexcel_inv_result {
- struct completion completion;
- int error;
-};
-
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
void *rdp);
@@ -927,7 +922,6 @@ void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req);
inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
-void safexcel_inv_complete(struct crypto_async_request *req, int error);
int safexcel_hmac_setkey(struct safexcel_context *base, const u8 *key,
unsigned int keylen, const char *alg,
unsigned int state_sz);
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index 32a37e3850c5..272c28b5a088 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -1091,13 +1091,12 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
struct crypto_async_request *base,
struct safexcel_cipher_req *sreq,
- struct safexcel_inv_result *result)
+ struct crypto_wait *result)
{
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
struct safexcel_crypto_priv *priv = ctx->base.priv;
int ring = ctx->base.ring;
-
- init_completion(&result->completion);
+ int err;
ctx = crypto_tfm_ctx(base->tfm);
ctx->base.exit_inv = true;
@@ -1110,13 +1109,13 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
- wait_for_completion(&result->completion);
+ err = crypto_wait_req(-EINPROGRESS, result);
- if (result->error) {
+ if (err) {
dev_warn(priv->dev,
"cipher: sync: invalidate: completion error %d\n",
- result->error);
- return result->error;
+ err);
+ return err;
}
return 0;
@@ -1126,12 +1125,12 @@ static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm)
{
EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE);
struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
- struct safexcel_inv_result result = {};
+ DECLARE_CRYPTO_WAIT(result);
memset(req, 0, sizeof(struct skcipher_request));
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
+ crypto_req_done, &result);
skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
@@ -1141,12 +1140,12 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
{
EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE);
struct safexcel_cipher_req *sreq = aead_request_ctx(req);
- struct safexcel_inv_result result = {};
+ DECLARE_CRYPTO_WAIT(result);
memset(req, 0, sizeof(struct aead_request));
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
+ crypto_req_done, &result);
aead_request_set_tfm(req, __crypto_aead_cast(tfm));
return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index ca46328472d4..e17577b785c3 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -625,15 +625,16 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
struct safexcel_crypto_priv *priv = ctx->base.priv;
EIP197_REQUEST_ON_STACK(req, ahash, EIP197_AHASH_REQ_SIZE);
struct safexcel_ahash_req *rctx = ahash_request_ctx_dma(req);
- struct safexcel_inv_result result = {};
+ DECLARE_CRYPTO_WAIT(result);
int ring = ctx->base.ring;
+ int err;
memset(req, 0, EIP197_AHASH_REQ_SIZE);
/* create invalidation request */
init_completion(&result.completion);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_inv_complete, &result);
+ crypto_req_done, &result);
ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
ctx = crypto_tfm_ctx(req->base.tfm);
@@ -647,12 +648,11 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
queue_work(priv->ring[ring].workqueue,
&priv->ring[ring].work_data.work);
- wait_for_completion(&result.completion);
+ err = crypto_wait_req(-EINPROGRESS, &result);
- if (result.error) {
- dev_warn(priv->dev, "hash: completion error (%d)\n",
- result.error);
- return result.error;
+ if (err) {
+ dev_warn(priv->dev, "hash: completion error (%d)\n", err);
+ return err;
}
return 0;
@@ -1042,27 +1042,11 @@ static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
return safexcel_ahash_finup(areq);
}
-struct safexcel_ahash_result {
- struct completion completion;
- int error;
-};
-
-static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
-{
- struct safexcel_ahash_result *result = req->data;
-
- if (error == -EINPROGRESS)
- return;
-
- result->error = error;
- complete(&result->completion);
-}
-
static int safexcel_hmac_init_pad(struct ahash_request *areq,
unsigned int blocksize, const u8 *key,
unsigned int keylen, u8 *ipad, u8 *opad)
{
- struct safexcel_ahash_result result;
+ DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret, i;
u8 *keydup;
@@ -1075,16 +1059,12 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
return -ENOMEM;
ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_ahash_complete, &result);
+ crypto_req_done, &result);
sg_init_one(&sg, keydup, keylen);
ahash_request_set_crypt(areq, &sg, ipad, keylen);
- init_completion(&result.completion);
ret = crypto_ahash_digest(areq);
- if (ret == -EINPROGRESS || ret == -EBUSY) {
- wait_for_completion_interruptible(&result.completion);
- ret = result.error;
- }
+ ret = crypto_wait_req(ret, &result);
/* Avoid leaking */
kfree_sensitive(keydup);
@@ -1109,16 +1089,15 @@ static int safexcel_hmac_init_pad(struct ahash_request *areq,
static int safexcel_hmac_init_iv(struct ahash_request *areq,
unsigned int blocksize, u8 *pad, void *state)
{
- struct safexcel_ahash_result result;
struct safexcel_ahash_req *req;
+ DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret;
ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
- safexcel_ahash_complete, &result);
+ crypto_req_done, &result);
sg_init_one(&sg, pad, blocksize);
ahash_request_set_crypt(areq, &sg, pad, blocksize);
- init_completion(&result.completion);
ret = crypto_ahash_init(areq);
if (ret)
@@ -1129,14 +1108,9 @@ static int safexcel_hmac_init_iv(struct ahash_request *areq,
req->last_req = true;
ret = crypto_ahash_update(areq);
- if (ret && ret != -EINPROGRESS && ret != -EBUSY)
- return ret;
-
- wait_for_completion_interruptible(&result.completion);
- if (result.error)
- return result.error;
+ ret = crypto_wait_req(ret, &result);
- return crypto_ahash_export(areq, state);
+ return ret ?: crypto_ahash_export(areq, state);
}
static int __safexcel_hmac_setkey(const char *alg, const u8 *key,
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 984b3cc0237c..b63e2359a133 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -382,7 +382,7 @@ static void one_packet(dma_addr_t phys)
if (req_ctx->hmac_virt)
finish_scattered_hmac(crypt);
- req->base.complete(&req->base, failed);
+ aead_request_complete(req, failed);
break;
}
case CTL_FLAG_PERFORM_ABLK: {
@@ -407,7 +407,7 @@ static void one_packet(dma_addr_t phys)
free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
free_buf_chain(dev, req_ctx->src, crypt->src_buf);
- req->base.complete(&req->base, failed);
+ skcipher_request_complete(req, failed);
break;
}
case CTL_FLAG_GEN_ICV:
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index 5cd332880653..b61e35b932e5 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -66,7 +66,7 @@ static void mv_cesa_rearm_engine(struct mv_cesa_engine *engine)
return;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
ctx = crypto_tfm_ctx(req->tfm);
ctx->ops->step(req);
@@ -106,7 +106,7 @@ mv_cesa_complete_req(struct mv_cesa_ctx *ctx, struct crypto_async_request *req,
{
ctx->ops->cleanup(req);
local_bh_disable();
- req->complete(req, res);
+ crypto_request_complete(req, res);
local_bh_enable();
}
diff --git a/drivers/crypto/marvell/cesa/hash.c b/drivers/crypto/marvell/cesa/hash.c
index c72b0672fc71..8d84ad45571c 100644
--- a/drivers/crypto/marvell/cesa/hash.c
+++ b/drivers/crypto/marvell/cesa/hash.c
@@ -1104,47 +1104,27 @@ struct ahash_alg mv_sha256_alg = {
}
};
-struct mv_cesa_ahash_result {
- struct completion completion;
- int error;
-};
-
-static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
- int error)
-{
- struct mv_cesa_ahash_result *result = req->data;
-
- if (error == -EINPROGRESS)
- return;
-
- result->error = error;
- complete(&result->completion);
-}
-
static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
void *state, unsigned int blocksize)
{
- struct mv_cesa_ahash_result result;
+ DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- mv_cesa_hmac_ahash_complete, &result);
+ crypto_req_done, &result);
sg_init_one(&sg, pad, blocksize);
ahash_request_set_crypt(req, &sg, pad, blocksize);
- init_completion(&result.completion);
ret = crypto_ahash_init(req);
if (ret)
return ret;
ret = crypto_ahash_update(req);
- if (ret && ret != -EINPROGRESS)
- return ret;
+ ret = crypto_wait_req(ret, &result);
- wait_for_completion_interruptible(&result.completion);
- if (result.error)
- return result.error;
+ if (ret)
+ return ret;
ret = crypto_ahash_export(req, state);
if (ret)
@@ -1158,7 +1138,7 @@ static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
u8 *ipad, u8 *opad,
unsigned int blocksize)
{
- struct mv_cesa_ahash_result result;
+ DECLARE_CRYPTO_WAIT(result);
struct scatterlist sg;
int ret;
int i;
@@ -1172,17 +1152,12 @@ static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
return -ENOMEM;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- mv_cesa_hmac_ahash_complete,
- &result);
+ crypto_req_done, &result);
sg_init_one(&sg, keydup, keylen);
ahash_request_set_crypt(req, &sg, ipad, keylen);
- init_completion(&result.completion);
ret = crypto_ahash_digest(req);
- if (ret == -EINPROGRESS) {
- wait_for_completion_interruptible(&result.completion);
- ret = result.error;
- }
+ ret = crypto_wait_req(ret, &result);
/* Set the memory region to 0 to avoid any leak. */
kfree_sensitive(keydup);
diff --git a/drivers/crypto/marvell/cesa/tdma.c b/drivers/crypto/marvell/cesa/tdma.c
index f0b5537038c2..388a06e180d6 100644
--- a/drivers/crypto/marvell/cesa/tdma.c
+++ b/drivers/crypto/marvell/cesa/tdma.c
@@ -168,7 +168,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
req);
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
}
if (res || tdma->cur_dma == tdma_cur)
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 80ba77c793a7..1c2c870e887a 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -138,7 +138,7 @@ static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
complete:
if (areq)
- areq->complete(areq, status);
+ crypto_request_complete(areq, status);
}
static void output_iv_copyback(struct crypto_async_request *areq)
@@ -188,7 +188,7 @@ static void otx_cpt_skcipher_callback(int status, void *arg1, void *arg2)
pdev = cpt_info->pdev;
do_request_cleanup(pdev, cpt_info);
}
- areq->complete(areq, status);
+ crypto_request_complete(areq, status);
}
}
@@ -398,7 +398,7 @@ static int otx_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key1 = key;
int ret;
- ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
ctx->key_len = keylen;
diff --git a/drivers/crypto/marvell/octeontx2/Makefile b/drivers/crypto/marvell/octeontx2/Makefile
index 965297e96954..f0f2942c1d27 100644
--- a/drivers/crypto/marvell/octeontx2/Makefile
+++ b/drivers/crypto/marvell/octeontx2/Makefile
@@ -1,11 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptpf.o rvu_cptvf.o
+obj-$(CONFIG_CRYPTO_DEV_OCTEONTX2_CPT) += rvu_cptcommon.o rvu_cptpf.o rvu_cptvf.o
+rvu_cptcommon-objs := cn10k_cpt.o otx2_cptlf.o otx2_cpt_mbox_common.o
rvu_cptpf-objs := otx2_cptpf_main.o otx2_cptpf_mbox.o \
- otx2_cpt_mbox_common.o otx2_cptpf_ucode.o otx2_cptlf.o \
- cn10k_cpt.o otx2_cpt_devlink.o
-rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o otx2_cptlf.o \
- otx2_cpt_mbox_common.o otx2_cptvf_reqmgr.o \
- otx2_cptvf_algs.o cn10k_cpt.o
+ otx2_cptpf_ucode.o otx2_cpt_devlink.o
+rvu_cptvf-objs := otx2_cptvf_main.o otx2_cptvf_mbox.o \
+ otx2_cptvf_reqmgr.o otx2_cptvf_algs.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
index 1499ef75b5c2..93d22b328991 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.c
@@ -7,6 +7,9 @@
#include "otx2_cptlf.h"
#include "cn10k_cpt.h"
+static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf);
+
static struct cpt_hw_ops otx2_hw_ops = {
.send_cmd = otx2_cpt_send_cmd,
.cpt_get_compcode = otx2_cpt_get_compcode,
@@ -19,8 +22,8 @@ static struct cpt_hw_ops cn10k_hw_ops = {
.cpt_get_uc_compcode = cn10k_cpt_get_uc_compcode,
};
-void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
- struct otx2_cptlf_info *lf)
+static void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
+ struct otx2_cptlf_info *lf)
{
void __iomem *lmtline = lf->lmtline;
u64 val = (lf->slot & 0x7FF);
@@ -68,6 +71,7 @@ int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf)
return 0;
}
+EXPORT_SYMBOL_NS_GPL(cn10k_cptpf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
{
@@ -91,3 +95,4 @@ int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf)
return 0;
}
+EXPORT_SYMBOL_NS_GPL(cn10k_cptvf_lmtst_init, CRYPTO_DEV_OCTEONTX2_CPT);
diff --git a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
index c091392b47e0..aaefc7e38e06 100644
--- a/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
+++ b/drivers/crypto/marvell/octeontx2/cn10k_cpt.h
@@ -28,8 +28,6 @@ static inline u8 otx2_cpt_get_uc_compcode(union otx2_cpt_res_s *result)
return ((struct cn9k_cpt_res_s *)result)->uc_compcode;
}
-void cn10k_cpt_send_cmd(union otx2_cpt_inst_s *cptinst, u32 insts_num,
- struct otx2_cptlf_info *lf);
int cn10k_cptpf_lmtst_init(struct otx2_cptpf_dev *cptpf);
int cn10k_cptvf_lmtst_init(struct otx2_cptvf_dev *cptvf);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
index 5012b7e669f0..6019066a6451 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_common.h
@@ -145,8 +145,6 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev);
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox,
struct pci_dev *pdev);
-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val, int blkaddr);
int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val, int blkaddr);
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
index a317319696ef..115997475beb 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cpt_mbox_common.c
@@ -19,6 +19,7 @@ int otx2_cpt_send_mbox_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
}
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
@@ -36,14 +37,17 @@ int otx2_cpt_send_ready_msg(struct otx2_mbox *mbox, struct pci_dev *pdev)
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_ready_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_send_af_reg_requests(struct otx2_mbox *mbox, struct pci_dev *pdev)
{
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_send_af_reg_requests, CRYPTO_DEV_OCTEONTX2_CPT);
-int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
- u64 reg, u64 *val, int blkaddr)
+static int otx2_cpt_add_read_af_reg(struct otx2_mbox *mbox,
+ struct pci_dev *pdev, u64 reg,
+ u64 *val, int blkaddr)
{
struct cpt_rd_wr_reg_msg *reg_msg;
@@ -91,6 +95,7 @@ int otx2_cpt_add_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return 0;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_add_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 *val, int blkaddr)
@@ -103,6 +108,7 @@ int otx2_cpt_read_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_read_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
u64 reg, u64 val, int blkaddr)
@@ -115,6 +121,7 @@ int otx2_cpt_write_af_reg(struct otx2_mbox *mbox, struct pci_dev *pdev,
return otx2_cpt_send_mbox_msg(mbox, pdev);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_write_af_reg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_attach_rscrs_msg(struct otx2_cptlfs_info *lfs)
{
@@ -170,6 +177,7 @@ int otx2_cpt_detach_rsrcs_msg(struct otx2_cptlfs_info *lfs)
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_detach_rsrcs_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
{
@@ -202,6 +210,7 @@ int otx2_cpt_msix_offset_msg(struct otx2_cptlfs_info *lfs)
}
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_msix_offset_msg, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
{
@@ -216,3 +225,4 @@ int otx2_cpt_sync_mbox_msg(struct otx2_mbox *mbox)
return otx2_mbox_check_rsp_msgs(mbox, 0);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cpt_sync_mbox_msg, CRYPTO_DEV_OCTEONTX2_CPT);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
index c8350fcd60fa..71e5f79431af 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptlf.c
@@ -274,6 +274,8 @@ void otx2_cptlf_unregister_interrupts(struct otx2_cptlfs_info *lfs)
}
cptlf_disable_intrs(lfs);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_unregister_interrupts,
+ CRYPTO_DEV_OCTEONTX2_CPT);
static int cptlf_do_register_interrrupts(struct otx2_cptlfs_info *lfs,
int lf_num, int irq_offset,
@@ -321,6 +323,7 @@ free_irq:
otx2_cptlf_unregister_interrupts(lfs);
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_register_interrupts, CRYPTO_DEV_OCTEONTX2_CPT);
void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -334,6 +337,7 @@ void otx2_cptlf_free_irqs_affinity(struct otx2_cptlfs_info *lfs)
free_cpumask_var(lfs->lf[slot].affinity_mask);
}
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_free_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cptlf_set_irqs_affinity(struct otx2_cptlfs_info *lfs)
{
@@ -366,6 +370,7 @@ free_affinity_mask:
otx2_cptlf_free_irqs_affinity(lfs);
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_set_irqs_affinity, CRYPTO_DEV_OCTEONTX2_CPT);
int otx2_cptlf_init(struct otx2_cptlfs_info *lfs, u8 eng_grp_mask, int pri,
int lfs_num)
@@ -422,6 +427,7 @@ clear_lfs_num:
lfs->lfs_num = 0;
return ret;
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_init, CRYPTO_DEV_OCTEONTX2_CPT);
void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
{
@@ -431,3 +437,8 @@ void otx2_cptlf_shutdown(struct otx2_cptlfs_info *lfs)
/* Send request to detach LFs */
otx2_cpt_detach_rsrcs_msg(lfs);
}
+EXPORT_SYMBOL_NS_GPL(otx2_cptlf_shutdown, CRYPTO_DEV_OCTEONTX2_CPT);
+
+MODULE_AUTHOR("Marvell");
+MODULE_DESCRIPTION("Marvell RVU CPT Common module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
index a402ccfac557..ddf6e913c1c4 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_main.c
@@ -831,6 +831,8 @@ static struct pci_driver otx2_cpt_pci_driver = {
module_pci_driver(otx2_cpt_pci_driver);
+MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
+
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION(OTX2_CPT_DRV_STRING);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index 30b423605c9c..e27ddd3c4e55 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -120,7 +120,7 @@ static void otx2_cpt_aead_callback(int status, void *arg1, void *arg2)
otx2_cpt_info_destroy(pdev, inst_info);
}
if (areq)
- areq->complete(areq, status);
+ crypto_request_complete(areq, status);
}
static void output_iv_copyback(struct crypto_async_request *areq)
@@ -170,7 +170,7 @@ static void otx2_cpt_skcipher_callback(int status, void *arg1, void *arg2)
pdev = inst_info->pdev;
otx2_cpt_info_destroy(pdev, inst_info);
}
- areq->complete(areq, status);
+ crypto_request_complete(areq, status);
}
}
@@ -412,7 +412,7 @@ static int otx2_cpt_skcipher_xts_setkey(struct crypto_skcipher *tfm,
const u8 *key1 = key;
int ret;
- ret = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen);
+ ret = xts_verify_key(tfm, key, keylen);
if (ret)
return ret;
ctx->key_len = keylen;
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
index 3411e664cf50..392e9fee05e8 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_main.c
@@ -429,6 +429,8 @@ static struct pci_driver otx2_cptvf_pci_driver = {
module_pci_driver(otx2_cptvf_pci_driver);
+MODULE_IMPORT_NS(CRYPTO_DEV_OCTEONTX2_CPT);
+
MODULE_AUTHOR("Marvell");
MODULE_DESCRIPTION("Marvell RVU CPT Virtual Function Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index d6f9e2fe863d..1c11946a4f0b 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -413,11 +413,11 @@ static int dcp_chan_thread_aes(void *data)
set_current_state(TASK_RUNNING);
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
if (arq) {
ret = mxs_dcp_aes_block_crypt(arq);
- arq->complete(arq, ret);
+ crypto_request_complete(arq, ret);
}
}
@@ -709,11 +709,11 @@ static int dcp_chan_thread_sha(void *data)
set_current_state(TASK_RUNNING);
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
if (arq) {
ret = dcp_sha_req_to_buf(arq);
- arq->complete(arq, ret);
+ crypto_request_complete(arq, ret);
}
}
diff --git a/drivers/crypto/nx/nx-common-powernv.c b/drivers/crypto/nx/nx-common-powernv.c
index f34c75a862f2..8c859872c183 100644
--- a/drivers/crypto/nx/nx-common-powernv.c
+++ b/drivers/crypto/nx/nx-common-powernv.c
@@ -72,7 +72,7 @@ static int (*nx842_powernv_exec)(const unsigned char *in,
unsigned int inlen, unsigned char *out,
unsigned int *outlenp, void *workmem, int fc);
-/**
+/*
* setup_indirect_dde - Setup an indirect DDE
*
* The DDE is setup with the DDE count, byte count, and address of
@@ -89,7 +89,7 @@ static void setup_indirect_dde(struct data_descriptor_entry *dde,
dde->address = cpu_to_be64(nx842_get_pa(ddl));
}
-/**
+/*
* setup_direct_dde - Setup single DDE from buffer
*
* The DDE is setup with the buffer and length. The buffer must be properly
@@ -111,7 +111,7 @@ static unsigned int setup_direct_dde(struct data_descriptor_entry *dde,
return l;
}
-/**
+/*
* setup_ddl - Setup DDL from buffer
*
* Returns:
@@ -181,9 +181,6 @@ static int setup_ddl(struct data_descriptor_entry *dde,
CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \
(unsigned long)be64_to_cpu((csb)->address))
-/**
- * wait_for_csb
- */
static int wait_for_csb(struct nx842_workmem *wmem,
struct coprocessor_status_block *csb)
{
@@ -632,8 +629,8 @@ static int nx842_exec_vas(const unsigned char *in, unsigned int inlen,
* @inlen: input buffer size
* @out: output buffer pointer
* @outlenp: output buffer size pointer
- * @workmem: working memory buffer pointer, size determined by
- * nx842_powernv_driver.workmem_size
+ * @wmem: working memory buffer pointer, size determined by
+ * nx842_powernv_driver.workmem_size
*
* Returns: see @nx842_powernv_exec()
*/
diff --git a/drivers/crypto/nx/nx-common-pseries.c b/drivers/crypto/nx/nx-common-pseries.c
index 3ea334b7f820..35f2d0d8507e 100644
--- a/drivers/crypto/nx/nx-common-pseries.c
+++ b/drivers/crypto/nx/nx-common-pseries.c
@@ -123,14 +123,16 @@ struct ibm_nx842_counters {
atomic64_t decomp_times[32];
};
-static struct nx842_devdata {
+struct nx842_devdata {
struct vio_dev *vdev;
struct device *dev;
struct ibm_nx842_counters *counters;
unsigned int max_sg_len;
unsigned int max_sync_size;
unsigned int max_sync_sg;
-} __rcu *devdata;
+};
+
+static struct nx842_devdata __rcu *devdata;
static DEFINE_SPINLOCK(devdata_mutex);
#define NX842_COUNTER_INC(_x) \
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
index 3b6b0267bbec..d3667dbd9826 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
@@ -37,7 +37,7 @@
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
-/* Minimum ring bufer size for memory allocation */
+/* Minimum ring buffer size for memory allocation */
#define ADF_RING_SIZE_BYTES_MIN(SIZE) \
((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index b4b9f0aa59b9..538dcbfbcd26 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -435,8 +435,8 @@ static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
} else if (aes_v2_capable && mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
ICP_QAT_FW_LA_SLICE_TYPE_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE);
- keylen = round_up(keylen, 16);
memcpy(cd->ucs_aes.key, key, keylen);
+ keylen = round_up(keylen, 16);
} else {
memcpy(cd->aes.key, key, keylen);
}
@@ -676,7 +676,7 @@ static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
res = -EBADMSG;
- areq->base.complete(&areq->base, res);
+ aead_request_complete(areq, res);
}
static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request *qat_req)
@@ -752,7 +752,7 @@ static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
- sreq->base.complete(&sreq->base, res);
+ skcipher_request_complete(sreq, res);
}
void qat_alg_callback(void *resp)
diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c
index ff5b4347f783..bb80455b3e81 100644
--- a/drivers/crypto/qat/qat_common/qat_algs_send.c
+++ b/drivers/crypto/qat/qat_common/qat_algs_send.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2022 Intel Corporation */
+#include <crypto/algapi.h>
#include "adf_transport.h"
#include "qat_algs_send.h"
#include "qat_crypto.h"
@@ -34,7 +35,7 @@ void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
break;
}
list_del(&req->list);
- req->base->complete(req->base, -EINPROGRESS);
+ crypto_request_complete(req->base, -EINPROGRESS);
}
spin_unlock_bh(&backlog->lock);
}
diff --git a/drivers/crypto/qat/qat_common/qat_bl.c b/drivers/crypto/qat/qat_common/qat_bl.c
index 2e89ff08041b..76baed0a76c0 100644
--- a/drivers/crypto/qat/qat_common/qat_bl.c
+++ b/drivers/crypto/qat/qat_common/qat_bl.c
@@ -26,8 +26,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
for (i = 0; i < bl->num_bufs; i++)
- dma_unmap_single(dev, bl->bufers[i].addr,
- bl->bufers[i].len, bl_dma_dir);
+ dma_unmap_single(dev, bl->buffers[i].addr,
+ bl->buffers[i].len, bl_dma_dir);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
@@ -36,8 +36,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
if (blp != blpout) {
for (i = 0; i < blout->num_mapped_bufs; i++) {
- dma_unmap_single(dev, blout->bufers[i].addr,
- blout->bufers[i].len,
+ dma_unmap_single(dev, blout->buffers[i].addr,
+ blout->buffers[i].len,
DMA_FROM_DEVICE);
}
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
@@ -53,6 +53,8 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
struct qat_request_buffs *buf,
dma_addr_t extra_dst_buff,
size_t sz_extra_dst_buff,
+ unsigned int sskip,
+ unsigned int dskip,
gfp_t flags)
{
struct device *dev = &GET_DEV(accel_dev);
@@ -63,8 +65,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
dma_addr_t blp = DMA_MAPPING_ERROR;
dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg;
- size_t sz_out, sz = struct_size(bufl, bufers, n);
+ size_t sz_out, sz = struct_size(bufl, buffers, n);
int node = dev_to_node(&GET_DEV(accel_dev));
+ unsigned int left;
int bufl_dma_dir;
if (unlikely(!n))
@@ -86,7 +89,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
for (i = 0; i < n; i++)
- bufl->bufers[i].addr = DMA_MAPPING_ERROR;
+ bufl->buffers[i].addr = DMA_MAPPING_ERROR;
+
+ left = sskip;
for_each_sg(sgl, sg, n, i) {
int y = sg_nctr;
@@ -94,13 +99,21 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (!sg->length)
continue;
- bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
- sg->length,
- bufl_dma_dir);
- bufl->bufers[y].len = sg->length;
- if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+ if (left >= sg->length) {
+ left -= sg->length;
+ continue;
+ }
+ bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+ sg->length - left,
+ bufl_dma_dir);
+ bufl->buffers[y].len = sg->length;
+ if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
goto err_in;
sg_nctr++;
+ if (left) {
+ bufl->buffers[y].len -= left;
+ left = 0;
+ }
}
bufl->num_bufs = sg_nctr;
blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
@@ -111,12 +124,14 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
buf->sz = sz;
/* Handle out of place operation */
if (sgl != sglout) {
- struct qat_alg_buf *bufers;
+ struct qat_alg_buf *buffers;
int extra_buff = extra_dst_buff ? 1 : 0;
int n_sglout = sg_nents(sglout);
n = n_sglout + extra_buff;
- sz_out = struct_size(buflout, bufers, n);
+ sz_out = struct_size(buflout, buffers, n);
+ left = dskip;
+
sg_nctr = 0;
if (n > QAT_MAX_BUFF_DESC) {
@@ -129,9 +144,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
buf->sgl_dst_valid = true;
}
- bufers = buflout->bufers;
+ buffers = buflout->buffers;
for (i = 0; i < n; i++)
- bufers[i].addr = DMA_MAPPING_ERROR;
+ buffers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sglout, sg, n_sglout, i) {
int y = sg_nctr;
@@ -139,17 +154,25 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (!sg->length)
continue;
- bufers[y].addr = dma_map_single(dev, sg_virt(sg),
- sg->length,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+ if (left >= sg->length) {
+ left -= sg->length;
+ continue;
+ }
+ buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
+ sg->length - left,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
goto err_out;
- bufers[y].len = sg->length;
+ buffers[y].len = sg->length;
sg_nctr++;
+ if (left) {
+ buffers[y].len -= left;
+ left = 0;
+ }
}
if (extra_buff) {
- bufers[sg_nctr].addr = extra_dst_buff;
- bufers[sg_nctr].len = sz_extra_dst_buff;
+ buffers[sg_nctr].addr = extra_dst_buff;
+ buffers[sg_nctr].len = sz_extra_dst_buff;
}
buflout->num_bufs = sg_nctr;
@@ -174,11 +197,11 @@ err_out:
n = sg_nents(sglout);
for (i = 0; i < n; i++) {
- if (buflout->bufers[i].addr == extra_dst_buff)
+ if (buflout->buffers[i].addr == extra_dst_buff)
break;
- if (!dma_mapping_error(dev, buflout->bufers[i].addr))
- dma_unmap_single(dev, buflout->bufers[i].addr,
- buflout->bufers[i].len,
+ if (!dma_mapping_error(dev, buflout->buffers[i].addr))
+ dma_unmap_single(dev, buflout->buffers[i].addr,
+ buflout->buffers[i].len,
DMA_FROM_DEVICE);
}
@@ -191,9 +214,9 @@ err_in:
n = sg_nents(sgl);
for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, bufl->bufers[i].addr))
- dma_unmap_single(dev, bufl->bufers[i].addr,
- bufl->bufers[i].len,
+ if (!dma_mapping_error(dev, bufl->buffers[i].addr))
+ dma_unmap_single(dev, bufl->buffers[i].addr,
+ bufl->buffers[i].len,
bufl_dma_dir);
if (!buf->sgl_src_valid)
@@ -212,15 +235,19 @@ int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
{
dma_addr_t extra_dst_buff = 0;
size_t sz_extra_dst_buff = 0;
+ unsigned int sskip = 0;
+ unsigned int dskip = 0;
if (params) {
extra_dst_buff = params->extra_dst_buff;
sz_extra_dst_buff = params->sz_extra_dst_buff;
+ sskip = params->sskip;
+ dskip = params->dskip;
}
return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
extra_dst_buff, sz_extra_dst_buff,
- flags);
+ sskip, dskip, flags);
}
static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
@@ -231,9 +258,9 @@ static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
int i;
for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, bl->bufers[i].addr))
- dma_unmap_single(dev, bl->bufers[i].addr,
- bl->bufers[i].len, DMA_FROM_DEVICE);
+ if (!dma_mapping_error(dev, bl->buffers[i].addr))
+ dma_unmap_single(dev, bl->buffers[i].addr,
+ bl->buffers[i].len, DMA_FROM_DEVICE);
}
static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
@@ -248,13 +275,13 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
size_t sz;
n = sg_nents(sgl);
- sz = struct_size(bufl, bufers, n);
+ sz = struct_size(bufl, buffers, n);
bufl = kzalloc_node(sz, GFP_KERNEL, node);
if (unlikely(!bufl))
return -ENOMEM;
for (i = 0; i < n; i++)
- bufl->bufers[i].addr = DMA_MAPPING_ERROR;
+ bufl->buffers[i].addr = DMA_MAPPING_ERROR;
sg_nctr = 0;
for_each_sg(sgl, sg, n, i) {
@@ -263,11 +290,11 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
if (!sg->length)
continue;
- bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
- sg->length,
- DMA_FROM_DEVICE);
- bufl->bufers[y].len = sg->length;
- if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+ bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
+ sg->length,
+ DMA_FROM_DEVICE);
+ bufl->buffers[y].len = sg->length;
+ if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
goto err_map;
sg_nctr++;
}
@@ -280,9 +307,9 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
err_map:
for (i = 0; i < n; i++)
- if (!dma_mapping_error(dev, bufl->bufers[i].addr))
- dma_unmap_single(dev, bufl->bufers[i].addr,
- bufl->bufers[i].len,
+ if (!dma_mapping_error(dev, bufl->buffers[i].addr))
+ dma_unmap_single(dev, bufl->buffers[i].addr,
+ bufl->buffers[i].len,
DMA_FROM_DEVICE);
kfree(bufl);
*bl = NULL;
@@ -351,7 +378,7 @@ int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
if (ret)
return ret;
- new_bl_size = struct_size(new_bl, bufers, new_bl->num_bufs);
+ new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
/* Map new firmware SGL descriptor */
new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
diff --git a/drivers/crypto/qat/qat_common/qat_bl.h b/drivers/crypto/qat/qat_common/qat_bl.h
index 8ca5e52ee9e2..d87e4f35ac39 100644
--- a/drivers/crypto/qat/qat_common/qat_bl.h
+++ b/drivers/crypto/qat/qat_common/qat_bl.h
@@ -18,7 +18,7 @@ struct qat_alg_buf_list {
u64 resrvd;
u32 num_bufs;
u32 num_mapped_bufs;
- struct qat_alg_buf bufers[];
+ struct qat_alg_buf buffers[];
} __packed;
struct qat_alg_fixed_buf_list {
@@ -42,6 +42,8 @@ struct qat_request_buffs {
struct qat_sgl_to_bufl_params {
dma_addr_t extra_dst_buff;
size_t sz_extra_dst_buff;
+ unsigned int sskip;
+ unsigned int dskip;
};
void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
diff --git a/drivers/crypto/qat/qat_common/qat_comp_algs.c b/drivers/crypto/qat/qat_common/qat_comp_algs.c
index 1480d36a8d2b..b533984906ec 100644
--- a/drivers/crypto/qat/qat_common/qat_comp_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_comp_algs.c
@@ -13,6 +13,15 @@
#include "qat_compression.h"
#include "qat_algs_send.h"
+#define QAT_RFC_1950_HDR_SIZE 2
+#define QAT_RFC_1950_FOOTER_SIZE 4
+#define QAT_RFC_1950_CM_DEFLATE 8
+#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7
+#define QAT_RFC_1950_CM_MASK 0x0f
+#define QAT_RFC_1950_CM_OFFSET 4
+#define QAT_RFC_1950_DICT_MASK 0x20
+#define QAT_RFC_1950_COMP_HDR 0x785e
+
static DEFINE_MUTEX(algs_lock);
static unsigned int active_devs;
@@ -21,9 +30,12 @@ enum direction {
COMPRESSION = 1,
};
+struct qat_compression_req;
+
struct qat_compression_ctx {
u8 comp_ctx[QAT_COMP_CTX_SIZE];
struct qat_compression_instance *inst;
+ int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
};
struct qat_dst {
@@ -94,7 +106,70 @@ static void qat_comp_resubmit(struct work_struct *work)
err:
qat_bl_free_bufl(accel_dev, qat_bufs);
- areq->base.complete(&areq->base, ret);
+ acomp_request_complete(areq, ret);
+}
+
+static int parse_zlib_header(u16 zlib_h)
+{
+ int ret = -EINVAL;
+ __be16 header;
+ u8 *header_p;
+ u8 cmf, flg;
+
+ header = cpu_to_be16(zlib_h);
+ header_p = (u8 *)&header;
+
+ flg = header_p[0];
+ cmf = header_p[1];
+
+ if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K)
+ return ret;
+
+ if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE)
+ return ret;
+
+ if (flg & QAT_RFC_1950_DICT_MASK)
+ return ret;
+
+ return 0;
+}
+
+static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req,
+ void *resp)
+{
+ struct acomp_req *areq = qat_req->acompress_req;
+ enum direction dir = qat_req->dir;
+ __be32 qat_produced_adler;
+
+ qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp));
+
+ if (dir == COMPRESSION) {
+ __be16 zlib_header;
+
+ zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR);
+ scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1);
+ areq->dlen += QAT_RFC_1950_HDR_SIZE;
+
+ scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen,
+ QAT_RFC_1950_FOOTER_SIZE, 1);
+ areq->dlen += QAT_RFC_1950_FOOTER_SIZE;
+ } else {
+ __be32 decomp_adler;
+ int footer_offset;
+ int consumed;
+
+ consumed = qat_comp_get_consumed_ctr(resp);
+ footer_offset = consumed + QAT_RFC_1950_HDR_SIZE;
+ if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen)
+ return -EBADMSG;
+
+ scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset,
+ QAT_RFC_1950_FOOTER_SIZE, 0);
+
+ if (qat_produced_adler != decomp_adler)
+ return -EBADMSG;
+ }
+ return 0;
}
static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
@@ -167,9 +242,12 @@ static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
res = 0;
areq->dlen = produced;
+ if (ctx->qat_comp_callback)
+ res = ctx->qat_comp_callback(qat_req, resp);
+
end:
qat_bl_free_bufl(accel_dev, &qat_req->buf);
- areq->base.complete(&areq->base, res);
+ acomp_request_complete(areq, res);
}
void qat_comp_alg_callback(void *resp)
@@ -215,24 +293,39 @@ static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
memset(ctx, 0, sizeof(*ctx));
}
-static int qat_comp_alg_compress_decompress(struct acomp_req *areq,
- enum direction dir)
+static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm)
+{
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ int ret;
+
+ ret = qat_comp_alg_init_tfm(acomp_tfm);
+ ctx->qat_comp_callback = &qat_comp_rfc1950_callback;
+
+ return ret;
+}
+
+static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
+ unsigned int shdr, unsigned int sftr,
+ unsigned int dhdr, unsigned int dftr)
{
struct qat_compression_req *qat_req = acomp_request_ctx(areq);
struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_compression_instance *inst = ctx->inst;
- struct qat_sgl_to_bufl_params *p_params = NULL;
gfp_t f = qat_algs_alloc_flags(&areq->base);
- struct qat_sgl_to_bufl_params params;
- unsigned int slen = areq->slen;
- unsigned int dlen = areq->dlen;
+ struct qat_sgl_to_bufl_params params = {0};
+ int slen = areq->slen - shdr - sftr;
+ int dlen = areq->dlen - dhdr - dftr;
dma_addr_t sfbuf, dfbuf;
u8 *req = qat_req->req;
size_t ovf_buff_sz;
int ret;
+ params.sskip = shdr;
+ params.dskip = dhdr;
+
if (!areq->src || !slen)
return -EINVAL;
@@ -254,6 +347,7 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq,
if (!areq->dst)
return -ENOMEM;
+ dlen -= dhdr + dftr;
areq->dlen = dlen;
qat_req->dst.resubmitted = false;
}
@@ -262,11 +356,10 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq,
params.extra_dst_buff = inst->dc_data->ovf_buff_p;
ovf_buff_sz = inst->dc_data->ovf_buff_sz;
params.sz_extra_dst_buff = ovf_buff_sz;
- p_params = &params;
}
ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
- &qat_req->buf, p_params, f);
+ &qat_req->buf, &params, f);
if (unlikely(ret))
return ret;
@@ -299,12 +392,49 @@ static int qat_comp_alg_compress_decompress(struct acomp_req *areq,
static int qat_comp_alg_compress(struct acomp_req *req)
{
- return qat_comp_alg_compress_decompress(req, COMPRESSION);
+ return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
}
static int qat_comp_alg_decompress(struct acomp_req *req)
{
- return qat_comp_alg_compress_decompress(req, DECOMPRESSION);
+ return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
+}
+
+static int qat_comp_alg_rfc1950_compress(struct acomp_req *req)
+{
+ if (!req->dst && req->dlen != 0)
+ return -EINVAL;
+
+ if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
+ return -EINVAL;
+
+ return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0,
+ QAT_RFC_1950_HDR_SIZE,
+ QAT_RFC_1950_FOOTER_SIZE);
+}
+
+static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req)
+{
+ struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
+ struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
+ u16 zlib_header;
+ int ret;
+
+ if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE)
+ return -EBADMSG;
+
+ scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0);
+
+ ret = parse_zlib_header(zlib_header);
+ if (ret) {
+ dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n");
+ return ret;
+ }
+
+ return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE,
+ QAT_RFC_1950_FOOTER_SIZE, 0, 0);
}
static struct acomp_alg qat_acomp[] = { {
@@ -322,6 +452,21 @@ static struct acomp_alg qat_acomp[] = { {
.decompress = qat_comp_alg_decompress,
.dst_free = sgl_free,
.reqsize = sizeof(struct qat_compression_req),
+}, {
+ .base = {
+ .cra_name = "zlib-deflate",
+ .cra_driver_name = "qat_zlib_deflate",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_ASYNC,
+ .cra_ctxsize = sizeof(struct qat_compression_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .init = qat_comp_alg_rfc1950_init_tfm,
+ .exit = qat_comp_alg_exit_tfm,
+ .compress = qat_comp_alg_rfc1950_compress,
+ .decompress = qat_comp_alg_rfc1950_decompress,
+ .dst_free = sgl_free,
+ .reqsize = sizeof(struct qat_compression_req),
} };
int qat_comp_algs_register(void)
diff --git a/drivers/crypto/qat/qat_common/qat_compression.c b/drivers/crypto/qat/qat_common/qat_compression.c
index 9fd10f4242f8..3f1f35283266 100644
--- a/drivers/crypto/qat/qat_common/qat_compression.c
+++ b/drivers/crypto/qat/qat_common/qat_compression.c
@@ -72,7 +72,7 @@ struct qat_compression_instance *qat_compression_get_instance_node(int node)
}
if (!accel_dev) {
- pr_info("QAT: Could not find a device on node %d\n", node);
+ pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
/* Get any started device */
list_for_each(itr, adf_devmgr_get_head()) {
struct adf_accel_dev *tmp_dev;
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index e31199eade5b..40c8e74d1cf9 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -70,7 +70,7 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
}
if (!accel_dev) {
- pr_info("QAT: Could not find a device on node %d\n", node);
+ pr_debug_ratelimited("QAT: Could not find a device on node %d\n", node);
/* Get any started device */
list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
if (adf_dev_started(tmp_dev) &&
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
index d3780be44a76..74deca4f96e0 100644
--- a/drivers/crypto/qce/core.c
+++ b/drivers/crypto/qce/core.c
@@ -107,7 +107,7 @@ static int qce_handle_queue(struct qce_device *qce,
if (backlog) {
spin_lock_bh(&qce->lock);
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
spin_unlock_bh(&qce->lock);
}
@@ -132,7 +132,7 @@ static void qce_tasklet_req_done(unsigned long data)
spin_unlock_irqrestore(&qce->lock, flags);
if (req)
- req->complete(req, qce->result);
+ crypto_request_complete(req, qce->result);
qce_handle_queue(qce, NULL);
}
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index b79e49aa724f..1c4d5fb05d69 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -499,7 +499,7 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
/* Calls the completion. Cannot be called with dev->lock hold. */
static void s5p_aes_complete(struct skcipher_request *req, int err)
{
- req->base.complete(&req->base, err);
+ skcipher_request_complete(req, err);
}
static void s5p_unset_outdata(struct s5p_aes_dev *dev)
@@ -1355,7 +1355,7 @@ static void s5p_hash_finish_req(struct ahash_request *req, int err)
spin_unlock_irqrestore(&dd->hash_lock, flags);
if (req->base.complete)
- req->base.complete(&req->base, err);
+ ahash_request_complete(req, err);
}
/**
@@ -1397,7 +1397,7 @@ retry:
return ret;
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
req = ahash_request_cast(async_req);
dd->hash_req = req;
@@ -1991,7 +1991,7 @@ static void s5p_tasklet_cb(unsigned long data)
spin_unlock_irqrestore(&dev->lock, flags);
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
dev->req = skcipher_request_cast(async_req);
dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 7ab20fb95166..dd4c703cd855 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -1049,7 +1049,7 @@ static int sahara_queue_manage(void *data)
spin_unlock_bh(&dev->queue_spinlock);
if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
+ crypto_request_complete(backlog, -EINPROGRESS);
if (async_req) {
if (crypto_tfm_alg_type(async_req->tfm) ==
@@ -1065,7 +1065,7 @@ static int sahara_queue_manage(void *data)
ret = sahara_aes_process(req);
}
- async_req->complete(async_req, ret);
+ crypto_request_complete(async_req, ret);
continue;
}
diff --git a/drivers/crypto/stm32/stm32-cryp.c b/drivers/crypto/stm32/stm32-cryp.c
index 4208338e72b6..6b8d731092a4 100644
--- a/drivers/crypto/stm32/stm32-cryp.c
+++ b/drivers/crypto/stm32/stm32-cryp.c
@@ -597,7 +597,6 @@ static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp)
static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp)
{
- unsigned int i;
size_t written;
size_t len;
u32 alen = cryp->areq->assoclen;
@@ -623,8 +622,8 @@ static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp)
written = min_t(size_t, AES_BLOCK_SIZE - len, alen);
scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0);
- for (i = 0; i < AES_BLOCK_32; i++)
- stm32_cryp_write(cryp, cryp->caps->din, block[i]);
+
+ writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32);
cryp->header_in -= written;
@@ -1363,18 +1362,14 @@ static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp)
u32 out_tag[AES_BLOCK_32];
/* Get and write tag */
- for (i = 0; i < AES_BLOCK_32; i++)
- out_tag[i] = stm32_cryp_read(cryp, cryp->caps->dout);
-
+ readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32);
scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1);
} else {
/* Get and check tag */
u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32];
scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0);
-
- for (i = 0; i < AES_BLOCK_32; i++)
- out_tag[i] = stm32_cryp_read(cryp, cryp->caps->dout);
+ readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32);
if (crypto_memneq(in_tag, out_tag, cryp->authsize))
ret = -EBADMSG;
@@ -1415,12 +1410,9 @@ static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp)
static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
{
- unsigned int i;
u32 block[AES_BLOCK_32];
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- block[i] = stm32_cryp_read(cryp, cryp->caps->dout);
-
+ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out), 1);
cryp->payload_out -= min_t(size_t, cryp->hw_blocksize,
@@ -1429,14 +1421,11 @@ static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp)
static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp)
{
- unsigned int i;
u32 block[AES_BLOCK_32] = {0};
scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_in), 0);
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- stm32_cryp_write(cryp, cryp->caps->din, block[i]);
-
+ writesl(cryp->regs + cryp->caps->din, block, cryp->hw_blocksize / sizeof(u32));
cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in);
}
@@ -1480,8 +1469,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
* Same code as stm32_cryp_irq_read_data(), but we want to store
* block value
*/
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- block[i] = stm32_cryp_read(cryp, cryp->caps->dout);
+ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out), 1);
@@ -1499,8 +1487,7 @@ static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp)
stm32_cryp_write(cryp, cryp->caps->cr, cfg);
/* f) write padded data */
- for (i = 0; i < AES_BLOCK_32; i++)
- stm32_cryp_write(cryp, cryp->caps->din, block[i]);
+ writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32);
/* g) Empty fifo out */
err = stm32_cryp_wait_output(cryp);
@@ -1580,8 +1567,7 @@ static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp)
* Same code as stm32_cryp_irq_read_data(), but we want to store
* block value
*/
- for (i = 0; i < cryp->hw_blocksize / sizeof(u32); i++)
- block[i] = stm32_cryp_read(cryp, cryp->caps->dout);
+ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32));
scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize,
cryp->payload_out), 1);
@@ -1660,15 +1646,14 @@ static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp)
static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp)
{
- unsigned int i;
u32 block[AES_BLOCK_32] = {0};
size_t written;
written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in);
scatterwalk_copychunks(block, &cryp->in_walk, written, 0);
- for (i = 0; i < AES_BLOCK_32; i++)
- stm32_cryp_write(cryp, cryp->caps->din, block[i]);
+
+ writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32);
cryp->header_in -= written;
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c
index d33006d43f76..7bf805563ac2 100644
--- a/drivers/crypto/stm32/stm32-hash.c
+++ b/drivers/crypto/stm32/stm32-hash.c
@@ -32,6 +32,7 @@
#define HASH_CR 0x00
#define HASH_DIN 0x04
#define HASH_STR 0x08
+#define HASH_UX500_HREG(x) (0x0c + ((x) * 0x04))
#define HASH_IMR 0x20
#define HASH_SR 0x24
#define HASH_CSR(x) (0x0F8 + ((x) * 0x04))
@@ -54,6 +55,10 @@
#define HASH_CR_ALGO_SHA224 0x40000
#define HASH_CR_ALGO_SHA256 0x40080
+#define HASH_CR_UX500_EMPTYMSG BIT(20)
+#define HASH_CR_UX500_ALGO_SHA1 BIT(7)
+#define HASH_CR_UX500_ALGO_SHA256 0x0
+
/* Interrupt */
#define HASH_DINIE BIT(0)
#define HASH_DCIE BIT(1)
@@ -115,6 +120,7 @@ enum stm32_hash_data_format {
struct stm32_hash_ctx {
struct crypto_engine_ctx enginectx;
struct stm32_hash_dev *hdev;
+ struct crypto_shash *xtfm;
unsigned long flags;
u8 key[HASH_MAX_KEY_SIZE];
@@ -157,6 +163,10 @@ struct stm32_hash_algs_info {
struct stm32_hash_pdata {
struct stm32_hash_algs_info *algs_info;
size_t algs_info_size;
+ bool has_sr;
+ bool has_mdmat;
+ bool broken_emptymsg;
+ bool ux500;
};
struct stm32_hash_dev {
@@ -168,6 +178,7 @@ struct stm32_hash_dev {
phys_addr_t phys_base;
u32 dma_mode;
u32 dma_maxburst;
+ bool polled;
struct ahash_request *req;
struct crypto_engine *engine;
@@ -208,6 +219,11 @@ static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev)
{
u32 status;
+ /* The Ux500 lacks the special status register, we poll the DCAL bit instead */
+ if (!hdev->pdata->has_sr)
+ return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status,
+ !(status & HASH_STR_DCAL), 10, 10000);
+
return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status,
!(status & HASH_SR_BUSY), 10, 10000);
}
@@ -249,7 +265,7 @@ static int stm32_hash_write_key(struct stm32_hash_dev *hdev)
return 0;
}
-static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
+static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev, int bufcnt)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req);
@@ -263,13 +279,19 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
reg |= HASH_CR_ALGO_MD5;
break;
case HASH_FLAGS_SHA1:
- reg |= HASH_CR_ALGO_SHA1;
+ if (hdev->pdata->ux500)
+ reg |= HASH_CR_UX500_ALGO_SHA1;
+ else
+ reg |= HASH_CR_ALGO_SHA1;
break;
case HASH_FLAGS_SHA224:
reg |= HASH_CR_ALGO_SHA224;
break;
case HASH_FLAGS_SHA256:
- reg |= HASH_CR_ALGO_SHA256;
+ if (hdev->pdata->ux500)
+ reg |= HASH_CR_UX500_ALGO_SHA256;
+ else
+ reg |= HASH_CR_ALGO_SHA256;
break;
default:
reg |= HASH_CR_ALGO_MD5;
@@ -284,7 +306,15 @@ static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev)
reg |= HASH_CR_LKEY;
}
- stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
+ /*
+ * On the Ux500 we need to set a special flag to indicate that
+ * the message is zero length.
+ */
+ if (hdev->pdata->ux500 && bufcnt == 0)
+ reg |= HASH_CR_UX500_EMPTYMSG;
+
+ if (!hdev->polled)
+ stm32_hash_write(hdev, HASH_IMR, HASH_DCIE);
stm32_hash_write(hdev, HASH_CR, reg);
@@ -345,7 +375,7 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
hdev->flags |= HASH_FLAGS_CPU;
- stm32_hash_write_ctrl(hdev);
+ stm32_hash_write_ctrl(hdev, length);
if (stm32_hash_wait_busy(hdev))
return -ETIMEDOUT;
@@ -362,6 +392,9 @@ static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev,
stm32_hash_write(hdev, HASH_DIN, buffer[count]);
if (final) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+
stm32_hash_set_nblw(hdev, length);
reg = stm32_hash_read(hdev, HASH_STR);
reg |= HASH_STR_DCAL;
@@ -399,8 +432,15 @@ static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev)
if (final) {
bufcnt = rctx->bufcnt;
rctx->bufcnt = 0;
- err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt,
- (rctx->flags & HASH_FLAGS_FINUP));
+ err = stm32_hash_xmit_cpu(hdev, rctx->buffer, bufcnt, 1);
+
+ /* If we have an IRQ, wait for that, else poll for completion */
+ if (hdev->polled) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ hdev->flags |= HASH_FLAGS_OUTPUT_READY;
+ err = 0;
+ }
}
return err;
@@ -431,11 +471,12 @@ static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev,
reg = stm32_hash_read(hdev, HASH_CR);
- if (mdma)
- reg |= HASH_CR_MDMAT;
- else
- reg &= ~HASH_CR_MDMAT;
-
+ if (!hdev->pdata->has_mdmat) {
+ if (mdma)
+ reg |= HASH_CR_MDMAT;
+ else
+ reg &= ~HASH_CR_MDMAT;
+ }
reg |= HASH_CR_DMAE;
stm32_hash_write(hdev, HASH_CR, reg);
@@ -556,7 +597,7 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev)
if (rctx->nents < 0)
return -EINVAL;
- stm32_hash_write_ctrl(hdev);
+ stm32_hash_write_ctrl(hdev, rctx->total);
if (hdev->flags & HASH_FLAGS_HMAC) {
err = stm32_hash_hmac_dma_send(hdev);
@@ -743,16 +784,57 @@ static int stm32_hash_final_req(struct stm32_hash_dev *hdev)
else
err = stm32_hash_xmit_cpu(hdev, rctx->buffer, buflen, 1);
+ /* If we have an IRQ, wait for that, else poll for completion */
+ if (hdev->polled) {
+ if (stm32_hash_wait_busy(hdev))
+ return -ETIMEDOUT;
+ hdev->flags |= HASH_FLAGS_OUTPUT_READY;
+ /* Caller will call stm32_hash_finish_req() */
+ err = 0;
+ }
return err;
}
+static void stm32_hash_emptymsg_fallback(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_dev *hdev = rctx->hdev;
+ int ret;
+
+ dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n",
+ ctx->keylen);
+
+ if (!ctx->xtfm) {
+ dev_err(hdev->dev, "no fallback engine\n");
+ return;
+ }
+
+ if (ctx->keylen) {
+ ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen);
+ if (ret) {
+ dev_err(hdev->dev, "failed to set key ret=%d\n", ret);
+ return;
+ }
+ }
+
+ ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest);
+ if (ret)
+ dev_err(hdev->dev, "shash digest error\n");
+}
+
static void stm32_hash_copy_hash(struct ahash_request *req)
{
struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req);
+ struct stm32_hash_dev *hdev = rctx->hdev;
__be32 *hash = (void *)rctx->digest;
unsigned int i, hashsize;
+ if (hdev->pdata->broken_emptymsg && !req->nbytes)
+ return stm32_hash_emptymsg_fallback(req);
+
switch (rctx->flags & HASH_FLAGS_ALGO_MASK) {
case HASH_FLAGS_MD5:
hashsize = MD5_DIGEST_SIZE;
@@ -770,9 +852,14 @@ static void stm32_hash_copy_hash(struct ahash_request *req)
return;
}
- for (i = 0; i < hashsize / sizeof(u32); i++)
- hash[i] = cpu_to_be32(stm32_hash_read(rctx->hdev,
- HASH_HREG(i)));
+ for (i = 0; i < hashsize / sizeof(u32); i++) {
+ if (hdev->pdata->ux500)
+ hash[i] = cpu_to_be32(stm32_hash_read(hdev,
+ HASH_UX500_HREG(i)));
+ else
+ hash[i] = cpu_to_be32(stm32_hash_read(hdev,
+ HASH_HREG(i)));
+ }
}
static int stm32_hash_finish(struct ahash_request *req)
@@ -961,11 +1048,13 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
u32 *preg;
unsigned int i;
+ int ret;
pm_runtime_get_sync(hdev->dev);
- while ((stm32_hash_read(hdev, HASH_SR) & HASH_SR_BUSY))
- cpu_relax();
+ ret = stm32_hash_wait_busy(hdev);
+ if (ret)
+ return ret;
rctx->hw_context = kmalloc_array(3 + HASH_CSR_REGISTER_NUMBER,
sizeof(u32),
@@ -973,7 +1062,8 @@ static int stm32_hash_export(struct ahash_request *req, void *out)
preg = rctx->hw_context;
- *preg++ = stm32_hash_read(hdev, HASH_IMR);
+ if (!hdev->pdata->ux500)
+ *preg++ = stm32_hash_read(hdev, HASH_IMR);
*preg++ = stm32_hash_read(hdev, HASH_STR);
*preg++ = stm32_hash_read(hdev, HASH_CR);
for (i = 0; i < HASH_CSR_REGISTER_NUMBER; i++)
@@ -1002,7 +1092,8 @@ static int stm32_hash_import(struct ahash_request *req, const void *in)
pm_runtime_get_sync(hdev->dev);
- stm32_hash_write(hdev, HASH_IMR, *preg++);
+ if (!hdev->pdata->ux500)
+ stm32_hash_write(hdev, HASH_IMR, *preg++);
stm32_hash_write(hdev, HASH_STR, *preg++);
stm32_hash_write(hdev, HASH_CR, *preg);
reg = *preg++ | HASH_CR_INIT;
@@ -1034,6 +1125,29 @@ static int stm32_hash_setkey(struct crypto_ahash *tfm,
return 0;
}
+static int stm32_hash_init_fallback(struct crypto_tfm *tfm)
+{
+ struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx);
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct crypto_shash *xtfm;
+
+ /* The fallback is only needed on Ux500 */
+ if (!hdev->pdata->ux500)
+ return 0;
+
+ xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(xtfm)) {
+ dev_err(hdev->dev, "failed to allocate %s fallback\n",
+ name);
+ return PTR_ERR(xtfm);
+ }
+ dev_info(hdev->dev, "allocated %s fallback\n", name);
+ ctx->xtfm = xtfm;
+
+ return 0;
+}
+
static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
const char *algs_hmac_name)
{
@@ -1050,7 +1164,8 @@ static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm,
ctx->enginectx.op.do_one_request = stm32_hash_one_request;
ctx->enginectx.op.prepare_request = stm32_hash_prepare_req;
ctx->enginectx.op.unprepare_request = NULL;
- return 0;
+
+ return stm32_hash_init_fallback(tfm);
}
static int stm32_hash_cra_init(struct crypto_tfm *tfm)
@@ -1078,6 +1193,14 @@ static int stm32_hash_cra_sha256_init(struct crypto_tfm *tfm)
return stm32_hash_cra_init_algs(tfm, "sha256");
}
+static void stm32_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if (ctx->xtfm)
+ crypto_free_shash(ctx->xtfm);
+}
+
static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id)
{
struct stm32_hash_dev *hdev = dev_id;
@@ -1121,7 +1244,7 @@ static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
-static struct ahash_alg algs_md5_sha1[] = {
+static struct ahash_alg algs_md5[] = {
{
.init = stm32_hash_init,
.update = stm32_hash_update,
@@ -1143,6 +1266,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
@@ -1169,10 +1293,14 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_md5_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
},
+};
+
+static struct ahash_alg algs_sha1[] = {
{
.init = stm32_hash_init,
.update = stm32_hash_update,
@@ -1194,6 +1322,7 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
@@ -1220,13 +1349,14 @@ static struct ahash_alg algs_md5_sha1[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha1_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
},
};
-static struct ahash_alg algs_sha224_sha256[] = {
+static struct ahash_alg algs_sha224[] = {
{
.init = stm32_hash_init,
.update = stm32_hash_update,
@@ -1248,6 +1378,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
@@ -1274,10 +1405,14 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha224_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
},
+};
+
+static struct ahash_alg algs_sha256[] = {
{
.init = stm32_hash_init,
.update = stm32_hash_update,
@@ -1299,6 +1434,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
@@ -1325,6 +1461,7 @@ static struct ahash_alg algs_sha224_sha256[] = {
.cra_ctxsize = sizeof(struct stm32_hash_ctx),
.cra_alignmask = 3,
.cra_init = stm32_hash_cra_sha256_init,
+ .cra_exit = stm32_hash_cra_exit,
.cra_module = THIS_MODULE,
}
}
@@ -1370,36 +1507,74 @@ static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev)
return 0;
}
+static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = {
+ {
+ .algs_list = algs_sha1,
+ .size = ARRAY_SIZE(algs_sha1),
+ },
+ {
+ .algs_list = algs_sha256,
+ .size = ARRAY_SIZE(algs_sha256),
+ },
+};
+
+static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = {
+ .algs_info = stm32_hash_algs_info_ux500,
+ .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500),
+ .broken_emptymsg = true,
+ .ux500 = true,
+};
+
static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = {
{
- .algs_list = algs_md5_sha1,
- .size = ARRAY_SIZE(algs_md5_sha1),
+ .algs_list = algs_md5,
+ .size = ARRAY_SIZE(algs_md5),
+ },
+ {
+ .algs_list = algs_sha1,
+ .size = ARRAY_SIZE(algs_sha1),
},
};
static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = {
.algs_info = stm32_hash_algs_info_stm32f4,
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4),
+ .has_sr = true,
+ .has_mdmat = true,
};
static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = {
{
- .algs_list = algs_md5_sha1,
- .size = ARRAY_SIZE(algs_md5_sha1),
+ .algs_list = algs_md5,
+ .size = ARRAY_SIZE(algs_md5),
+ },
+ {
+ .algs_list = algs_sha1,
+ .size = ARRAY_SIZE(algs_sha1),
+ },
+ {
+ .algs_list = algs_sha224,
+ .size = ARRAY_SIZE(algs_sha224),
},
{
- .algs_list = algs_sha224_sha256,
- .size = ARRAY_SIZE(algs_sha224_sha256),
+ .algs_list = algs_sha256,
+ .size = ARRAY_SIZE(algs_sha256),
},
};
static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = {
.algs_info = stm32_hash_algs_info_stm32f7,
.algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7),
+ .has_sr = true,
+ .has_mdmat = true,
};
static const struct of_device_id stm32_hash_of_match[] = {
{
+ .compatible = "stericsson,ux500-hash",
+ .data = &stm32_hash_pdata_ux500,
+ },
+ {
.compatible = "st,stm32f456-hash",
.data = &stm32_hash_pdata_stm32f4,
},
@@ -1452,16 +1627,23 @@ static int stm32_hash_probe(struct platform_device *pdev)
if (ret)
return ret;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0 && irq != -ENXIO)
return irq;
- ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler,
- stm32_hash_irq_thread, IRQF_ONESHOT,
- dev_name(dev), hdev);
- if (ret) {
- dev_err(dev, "Cannot grab IRQ\n");
- return ret;
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(dev, irq,
+ stm32_hash_irq_handler,
+ stm32_hash_irq_thread,
+ IRQF_ONESHOT,
+ dev_name(dev), hdev);
+ if (ret) {
+ dev_err(dev, "Cannot grab IRQ\n");
+ return ret;
+ }
+ } else {
+ dev_info(dev, "No IRQ, use polling mode\n");
+ hdev->polled = true;
}
hdev->clk = devm_clk_get(&pdev->dev, NULL);
@@ -1503,9 +1685,11 @@ static int stm32_hash_probe(struct platform_device *pdev)
case 0:
break;
case -ENOENT:
- dev_dbg(dev, "DMA mode not available\n");
+ case -ENODEV:
+ dev_info(dev, "DMA mode not available\n");
break;
default:
+ dev_err(dev, "DMA init error %d\n", ret);
goto err_dma;
}
@@ -1524,7 +1708,11 @@ static int stm32_hash_probe(struct platform_device *pdev)
if (ret)
goto err_engine_start;
- hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
+ if (hdev->pdata->ux500)
+ /* FIXME: implement DMA mode for Ux500 */
+ hdev->dma_mode = 0;
+ else
+ hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR);
/* Register algos */
ret = stm32_hash_register_algs(hdev);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 71db6450b6aa..bb27f011cf31 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1393,7 +1393,7 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
alloc_len += sizeof(struct talitos_desc);
alloc_len += ivsize;
- edesc = kmalloc(alloc_len, GFP_DMA | flags);
+ edesc = kmalloc(ALIGN(alloc_len, dma_get_cache_alignment()), flags);
if (!edesc)
return ERR_PTR(-ENOMEM);
if (ivsize) {
@@ -1560,7 +1560,7 @@ static void skcipher_done(struct device *dev,
kfree(edesc);
- areq->base.complete(&areq->base, err);
+ skcipher_request_complete(areq, err);
}
static int common_nonsnoop(struct talitos_edesc *edesc,
@@ -1759,7 +1759,7 @@ static void ahash_done(struct device *dev,
kfree(edesc);
- areq->base.complete(&areq->base, err);
+ ahash_request_complete(areq, err);
}
/*
diff --git a/drivers/crypto/ux500/Kconfig b/drivers/crypto/ux500/Kconfig
deleted file mode 100644
index dcbd7404768f..000000000000
--- a/drivers/crypto/ux500/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Copyright (C) ST-Ericsson SA 2010
-# Author: Shujuan Chen (shujuan.chen@stericsson.com)
-#
-
-config CRYPTO_DEV_UX500_HASH
- tristate "UX500 crypto driver for HASH block"
- depends on CRYPTO_DEV_UX500
- select CRYPTO_HASH
- select CRYPTO_SHA1
- select CRYPTO_SHA256
- help
- This selects the hash driver for the UX500_HASH hardware.
- Depends on UX500/STM DMA if running in DMA mode.
-
-config CRYPTO_DEV_UX500_DEBUG
- bool "Activate ux500 platform debug-mode for crypto and hash block"
- depends on CRYPTO_DEV_UX500_CRYP || CRYPTO_DEV_UX500_HASH
- help
- Say Y if you want to add debug prints to ux500_hash and
- ux500_cryp devices.
diff --git a/drivers/crypto/ux500/Makefile b/drivers/crypto/ux500/Makefile
deleted file mode 100644
index f1aa4edf66f4..000000000000
--- a/drivers/crypto/ux500/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Copyright (C) ST-Ericsson SA 2010
-# Author: Shujuan Chen (shujuan.chen@stericsson.com)
-#
-
-obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += hash/
diff --git a/drivers/crypto/ux500/hash/Makefile b/drivers/crypto/ux500/hash/Makefile
deleted file mode 100644
index a8f088724772..000000000000
--- a/drivers/crypto/ux500/hash/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Copyright (C) ST-Ericsson SA 2010
-# Author: Shujuan Chen (shujuan.chen@stericsson.com)
-#
-ifdef CONFIG_CRYPTO_DEV_UX500_DEBUG
-CFLAGS_hash_core.o := -DDEBUG
-endif
-
-obj-$(CONFIG_CRYPTO_DEV_UX500_HASH) += ux500_hash.o
-ux500_hash-objs := hash_core.o
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
deleted file mode 100644
index 7c9bcc15125f..000000000000
--- a/drivers/crypto/ux500/hash/hash_alg.h
+++ /dev/null
@@ -1,398 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Shujuan Chen (shujuan.chen@stericsson.com)
- * Author: Joakim Bech (joakim.xx.bech@stericsson.com)
- * Author: Berne Hebark (berne.hebark@stericsson.com))
- */
-#ifndef _HASH_ALG_H
-#define _HASH_ALG_H
-
-#include <linux/bitops.h>
-
-#define HASH_BLOCK_SIZE 64
-#define HASH_DMA_FIFO 4
-#define HASH_DMA_ALIGN_SIZE 4
-#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024
-#define HASH_BYTES_PER_WORD 4
-
-/* Maximum value of the length's high word */
-#define HASH_HIGH_WORD_MAX_VAL 0xFFFFFFFFUL
-
-/* Power on Reset values HASH registers */
-#define HASH_RESET_CR_VALUE 0x0
-#define HASH_RESET_STR_VALUE 0x0
-
-/* Number of context swap registers */
-#define HASH_CSR_COUNT 52
-
-#define HASH_RESET_CSRX_REG_VALUE 0x0
-#define HASH_RESET_CSFULL_REG_VALUE 0x0
-#define HASH_RESET_CSDATAIN_REG_VALUE 0x0
-
-#define HASH_RESET_INDEX_VAL 0x0
-#define HASH_RESET_BIT_INDEX_VAL 0x0
-#define HASH_RESET_BUFFER_VAL 0x0
-#define HASH_RESET_LEN_HIGH_VAL 0x0
-#define HASH_RESET_LEN_LOW_VAL 0x0
-
-/* Control register bitfields */
-#define HASH_CR_RESUME_MASK 0x11FCF
-
-#define HASH_CR_SWITCHON_POS 31
-#define HASH_CR_SWITCHON_MASK BIT(31)
-
-#define HASH_CR_EMPTYMSG_POS 20
-#define HASH_CR_EMPTYMSG_MASK BIT(20)
-
-#define HASH_CR_DINF_POS 12
-#define HASH_CR_DINF_MASK BIT(12)
-
-#define HASH_CR_NBW_POS 8
-#define HASH_CR_NBW_MASK 0x00000F00UL
-
-#define HASH_CR_LKEY_POS 16
-#define HASH_CR_LKEY_MASK BIT(16)
-
-#define HASH_CR_ALGO_POS 7
-#define HASH_CR_ALGO_MASK BIT(7)
-
-#define HASH_CR_MODE_POS 6
-#define HASH_CR_MODE_MASK BIT(6)
-
-#define HASH_CR_DATAFORM_POS 4
-#define HASH_CR_DATAFORM_MASK (BIT(4) | BIT(5))
-
-#define HASH_CR_DMAE_POS 3
-#define HASH_CR_DMAE_MASK BIT(3)
-
-#define HASH_CR_INIT_POS 2
-#define HASH_CR_INIT_MASK BIT(2)
-
-#define HASH_CR_PRIVN_POS 1
-#define HASH_CR_PRIVN_MASK BIT(1)
-
-#define HASH_CR_SECN_POS 0
-#define HASH_CR_SECN_MASK BIT(0)
-
-/* Start register bitfields */
-#define HASH_STR_DCAL_POS 8
-#define HASH_STR_DCAL_MASK BIT(8)
-#define HASH_STR_DEFAULT 0x0
-
-#define HASH_STR_NBLW_POS 0
-#define HASH_STR_NBLW_MASK 0x0000001FUL
-
-#define HASH_NBLW_MAX_VAL 0x1F
-
-/* PrimeCell IDs */
-#define HASH_P_ID0 0xE0
-#define HASH_P_ID1 0x05
-#define HASH_P_ID2 0x38
-#define HASH_P_ID3 0x00
-#define HASH_CELL_ID0 0x0D
-#define HASH_CELL_ID1 0xF0
-#define HASH_CELL_ID2 0x05
-#define HASH_CELL_ID3 0xB1
-
-#define HASH_SET_BITS(reg_name, mask) \
- writel_relaxed((readl_relaxed(reg_name) | mask), reg_name)
-
-#define HASH_CLEAR_BITS(reg_name, mask) \
- writel_relaxed((readl_relaxed(reg_name) & ~mask), reg_name)
-
-#define HASH_PUT_BITS(reg, val, shift, mask) \
- writel_relaxed(((readl(reg) & ~(mask)) | \
- (((u32)val << shift) & (mask))), reg)
-
-#define HASH_SET_DIN(val, len) writesl(&device_data->base->din, (val), (len))
-
-#define HASH_INITIALIZE \
- HASH_PUT_BITS( \
- &device_data->base->cr, \
- 0x01, HASH_CR_INIT_POS, \
- HASH_CR_INIT_MASK)
-
-#define HASH_SET_DATA_FORMAT(data_format) \
- HASH_PUT_BITS( \
- &device_data->base->cr, \
- (u32) (data_format), HASH_CR_DATAFORM_POS, \
- HASH_CR_DATAFORM_MASK)
-#define HASH_SET_NBLW(val) \
- HASH_PUT_BITS( \
- &device_data->base->str, \
- (u32) (val), HASH_STR_NBLW_POS, \
- HASH_STR_NBLW_MASK)
-#define HASH_SET_DCAL \
- HASH_PUT_BITS( \
- &device_data->base->str, \
- 0x01, HASH_STR_DCAL_POS, \
- HASH_STR_DCAL_MASK)
-
-/* Hardware access method */
-enum hash_mode {
- HASH_MODE_CPU,
- HASH_MODE_DMA
-};
-
-/**
- * struct uint64 - Structure to handle 64 bits integers.
- * @high_word: Most significant bits.
- * @low_word: Least significant bits.
- *
- * Used to handle 64 bits integers.
- */
-struct uint64 {
- u32 high_word;
- u32 low_word;
-};
-
-/**
- * struct hash_register - Contains all registers in ux500 hash hardware.
- * @cr: HASH control register (0x000).
- * @din: HASH data input register (0x004).
- * @str: HASH start register (0x008).
- * @hx: HASH digest register 0..7 (0x00c-0x01C).
- * @padding0: Reserved (0x02C).
- * @itcr: Integration test control register (0x080).
- * @itip: Integration test input register (0x084).
- * @itop: Integration test output register (0x088).
- * @padding1: Reserved (0x08C).
- * @csfull: HASH context full register (0x0F8).
- * @csdatain: HASH context swap data input register (0x0FC).
- * @csrx: HASH context swap register 0..51 (0x100-0x1CC).
- * @padding2: Reserved (0x1D0).
- * @periphid0: HASH peripheral identification register 0 (0xFE0).
- * @periphid1: HASH peripheral identification register 1 (0xFE4).
- * @periphid2: HASH peripheral identification register 2 (0xFE8).
- * @periphid3: HASH peripheral identification register 3 (0xFEC).
- * @cellid0: HASH PCell identification register 0 (0xFF0).
- * @cellid1: HASH PCell identification register 1 (0xFF4).
- * @cellid2: HASH PCell identification register 2 (0xFF8).
- * @cellid3: HASH PCell identification register 3 (0xFFC).
- *
- * The device communicates to the HASH via 32-bit-wide control registers
- * accessible via the 32-bit width AMBA rev. 2.0 AHB Bus. Below is a structure
- * with the registers used.
- */
-struct hash_register {
- u32 cr;
- u32 din;
- u32 str;
- u32 hx[8];
-
- u32 padding0[(0x080 - 0x02C) / sizeof(u32)];
-
- u32 itcr;
- u32 itip;
- u32 itop;
-
- u32 padding1[(0x0F8 - 0x08C) / sizeof(u32)];
-
- u32 csfull;
- u32 csdatain;
- u32 csrx[HASH_CSR_COUNT];
-
- u32 padding2[(0xFE0 - 0x1D0) / sizeof(u32)];
-
- u32 periphid0;
- u32 periphid1;
- u32 periphid2;
- u32 periphid3;
-
- u32 cellid0;
- u32 cellid1;
- u32 cellid2;
- u32 cellid3;
-};
-
-/**
- * struct hash_state - Hash context state.
- * @temp_cr: Temporary HASH Control Register.
- * @str_reg: HASH Start Register.
- * @din_reg: HASH Data Input Register.
- * @csr[52]: HASH Context Swap Registers 0-39.
- * @csfull: HASH Context Swap Registers 40 ie Status flags.
- * @csdatain: HASH Context Swap Registers 41 ie Input data.
- * @buffer: Working buffer for messages going to the hardware.
- * @length: Length of the part of message hashed so far (floor(N/64) * 64).
- * @index: Valid number of bytes in buffer (N % 64).
- * @bit_index: Valid number of bits in buffer (N % 8).
- *
- * This structure is used between context switches, i.e. when ongoing jobs are
- * interupted with new jobs. When this happens we need to store intermediate
- * results in software.
- *
- * WARNING: "index" is the member of the structure, to be sure that "buffer"
- * is aligned on a 4-bytes boundary. This is highly implementation dependent
- * and MUST be checked whenever this code is ported on new platforms.
- */
-struct hash_state {
- u32 temp_cr;
- u32 str_reg;
- u32 din_reg;
- u32 csr[52];
- u32 csfull;
- u32 csdatain;
- u32 buffer[HASH_BLOCK_SIZE / sizeof(u32)];
- struct uint64 length;
- u8 index;
- u8 bit_index;
-};
-
-/**
- * enum hash_device_id - HASH device ID.
- * @HASH_DEVICE_ID_0: Hash hardware with ID 0
- * @HASH_DEVICE_ID_1: Hash hardware with ID 1
- */
-enum hash_device_id {
- HASH_DEVICE_ID_0 = 0,
- HASH_DEVICE_ID_1 = 1
-};
-
-/**
- * enum hash_data_format - HASH data format.
- * @HASH_DATA_32_BITS: 32 bits data format
- * @HASH_DATA_16_BITS: 16 bits data format
- * @HASH_DATA_8_BITS: 8 bits data format.
- * @HASH_DATA_1_BITS: 1 bit data format.
- */
-enum hash_data_format {
- HASH_DATA_32_BITS = 0x0,
- HASH_DATA_16_BITS = 0x1,
- HASH_DATA_8_BITS = 0x2,
- HASH_DATA_1_BIT = 0x3
-};
-
-/**
- * enum hash_algo - Enumeration for selecting between SHA1 or SHA2 algorithm.
- * @HASH_ALGO_SHA1: Indicates that SHA1 is used.
- * @HASH_ALGO_SHA2: Indicates that SHA2 (SHA256) is used.
- */
-enum hash_algo {
- HASH_ALGO_SHA1 = 0x0,
- HASH_ALGO_SHA256 = 0x1
-};
-
-/**
- * enum hash_op - Enumeration for selecting between HASH or HMAC mode.
- * @HASH_OPER_MODE_HASH: Indicates usage of normal HASH mode.
- * @HASH_OPER_MODE_HMAC: Indicates usage of HMAC.
- */
-enum hash_op {
- HASH_OPER_MODE_HASH = 0x0,
- HASH_OPER_MODE_HMAC = 0x1
-};
-
-/**
- * struct hash_config - Configuration data for the hardware.
- * @data_format: Format of data entered into the hash data in register.
- * @algorithm: Algorithm selection bit.
- * @oper_mode: Operating mode selection bit.
- */
-struct hash_config {
- int data_format;
- int algorithm;
- int oper_mode;
-};
-
-/**
- * struct hash_dma - Structure used for dma.
- * @mask: DMA capabilities bitmap mask.
- * @complete: Used to maintain state for a "completion".
- * @chan_mem2hash: DMA channel.
- * @cfg_mem2hash: DMA channel configuration.
- * @sg_len: Scatterlist length.
- * @sg: Scatterlist.
- * @nents: Number of sg entries.
- */
-struct hash_dma {
- dma_cap_mask_t mask;
- struct completion complete;
- struct dma_chan *chan_mem2hash;
- void *cfg_mem2hash;
- int sg_len;
- struct scatterlist *sg;
- int nents;
-};
-
-/**
- * struct hash_ctx - The context used for hash calculations.
- * @key: The key used in the operation.
- * @keylen: The length of the key.
- * @state: The state of the current calculations.
- * @config: The current configuration.
- * @digestsize: The size of current digest.
- * @device: Pointer to the device structure.
- */
-struct hash_ctx {
- u8 *key;
- u32 keylen;
- struct hash_config config;
- int digestsize;
- struct hash_device_data *device;
-};
-
-/**
- * struct hash_ctx - The request context used for hash calculations.
- * @state: The state of the current calculations.
- * @dma_mode: Used in special cases (workaround), e.g. need to change to
- * cpu mode, if not supported/working in dma mode.
- * @updated: Indicates if hardware is initialized for new operations.
- */
-struct hash_req_ctx {
- struct hash_state state;
- bool dma_mode;
- u8 updated;
-};
-
-/**
- * struct hash_device_data - structure for a hash device.
- * @base: Pointer to virtual base address of the hash device.
- * @phybase: Pointer to physical memory location of the hash device.
- * @list_node: For inclusion in klist.
- * @dev: Pointer to the device dev structure.
- * @ctx_lock: Spinlock for current_ctx.
- * @current_ctx: Pointer to the currently allocated context.
- * @power_state: TRUE = power state on, FALSE = power state off.
- * @power_state_lock: Spinlock for power_state.
- * @regulator: Pointer to the device's power control.
- * @clk: Pointer to the device's clock control.
- * @restore_dev_state: TRUE = saved state, FALSE = no saved state.
- * @dma: Structure used for dma.
- */
-struct hash_device_data {
- struct hash_register __iomem *base;
- phys_addr_t phybase;
- struct klist_node list_node;
- struct device *dev;
- spinlock_t ctx_lock;
- struct hash_ctx *current_ctx;
- bool power_state;
- spinlock_t power_state_lock;
- struct regulator *regulator;
- struct clk *clk;
- bool restore_dev_state;
- struct hash_state state; /* Used for saving and resuming state */
- struct hash_dma dma;
-};
-
-int hash_check_hw(struct hash_device_data *device_data);
-
-int hash_setconfiguration(struct hash_device_data *device_data,
- struct hash_config *config);
-
-void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx);
-
-void hash_get_digest(struct hash_device_data *device_data,
- u8 *digest, int algorithm);
-
-int hash_hw_update(struct ahash_request *req);
-
-int hash_save_state(struct hash_device_data *device_data,
- struct hash_state *state);
-
-int hash_resume_state(struct hash_device_data *device_data,
- const struct hash_state *state);
-
-#endif
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
deleted file mode 100644
index f104e8a43036..000000000000
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ /dev/null
@@ -1,1966 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Cryptographic API.
- * Support for Nomadik hardware crypto engine.
-
- * Copyright (C) ST-Ericsson SA 2010
- * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
- * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
- * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
- * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
- * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
- */
-
-#define pr_fmt(fmt) "hashX hashX: " fmt
-
-#include <linux/clk.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/klist.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/crypto.h>
-
-#include <linux/regulator/consumer.h>
-#include <linux/dmaengine.h>
-#include <linux/bitops.h>
-
-#include <crypto/internal/hash.h>
-#include <crypto/sha1.h>
-#include <crypto/sha2.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/algapi.h>
-
-#include <linux/platform_data/crypto-ux500.h>
-
-#include "hash_alg.h"
-
-static int hash_mode;
-module_param(hash_mode, int, 0);
-MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
-
-/* HMAC-SHA1, no key */
-static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
- 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
- 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
- 0x70, 0x69, 0x0e, 0x1d
-};
-
-/* HMAC-SHA256, no key */
-static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
- 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
- 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
- 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
- 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
-};
-
-/**
- * struct hash_driver_data - data specific to the driver.
- *
- * @device_list: A list of registered devices to choose from.
- * @device_allocation: A semaphore initialized with number of devices.
- */
-struct hash_driver_data {
- struct klist device_list;
- struct semaphore device_allocation;
-};
-
-static struct hash_driver_data driver_data;
-
-/* Declaration of functions */
-/**
- * hash_messagepad - Pads a message and write the nblw bits.
- * @device_data: Structure for the hash device.
- * @message: Last word of a message
- * @index_bytes: The number of bytes in the last message
- *
- * This function manages the final part of the digest calculation, when less
- * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
- *
- */
-static void hash_messagepad(struct hash_device_data *device_data,
- const u32 *message, u8 index_bytes);
-
-/**
- * release_hash_device - Releases a previously allocated hash device.
- * @device_data: Structure for the hash device.
- *
- */
-static void release_hash_device(struct hash_device_data *device_data)
-{
- spin_lock(&device_data->ctx_lock);
- device_data->current_ctx->device = NULL;
- device_data->current_ctx = NULL;
- spin_unlock(&device_data->ctx_lock);
-
- /*
- * The down_interruptible part for this semaphore is called in
- * cryp_get_device_data.
- */
- up(&driver_data.device_allocation);
-}
-
-static void hash_dma_setup_channel(struct hash_device_data *device_data,
- struct device *dev)
-{
- struct hash_platform_data *platform_data = dev->platform_data;
- struct dma_slave_config conf = {
- .direction = DMA_MEM_TO_DEV,
- .dst_addr = device_data->phybase + HASH_DMA_FIFO,
- .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
- .dst_maxburst = 16,
- };
-
- dma_cap_zero(device_data->dma.mask);
- dma_cap_set(DMA_SLAVE, device_data->dma.mask);
-
- device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
- device_data->dma.chan_mem2hash =
- dma_request_channel(device_data->dma.mask,
- platform_data->dma_filter,
- device_data->dma.cfg_mem2hash);
-
- dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
-
- init_completion(&device_data->dma.complete);
-}
-
-static void hash_dma_callback(void *data)
-{
- struct hash_ctx *ctx = data;
-
- complete(&ctx->device->dma.complete);
-}
-
-static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
- int len, enum dma_data_direction direction)
-{
- struct dma_async_tx_descriptor *desc = NULL;
- struct dma_chan *channel = NULL;
-
- if (direction != DMA_TO_DEVICE) {
- dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
- __func__);
- return -EFAULT;
- }
-
- sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
-
- channel = ctx->device->dma.chan_mem2hash;
- ctx->device->dma.sg = sg;
- ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
- ctx->device->dma.sg, ctx->device->dma.nents,
- direction);
-
- if (!ctx->device->dma.sg_len) {
- dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
- __func__);
- return -EFAULT;
- }
-
- dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
- __func__);
- desc = dmaengine_prep_slave_sg(channel,
- ctx->device->dma.sg, ctx->device->dma.sg_len,
- DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
- if (!desc) {
- dev_err(ctx->device->dev,
- "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
- return -EFAULT;
- }
-
- desc->callback = hash_dma_callback;
- desc->callback_param = ctx;
-
- dmaengine_submit(desc);
- dma_async_issue_pending(channel);
-
- return 0;
-}
-
-static void hash_dma_done(struct hash_ctx *ctx)
-{
- struct dma_chan *chan;
-
- chan = ctx->device->dma.chan_mem2hash;
- dmaengine_terminate_all(chan);
- dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
- ctx->device->dma.nents, DMA_TO_DEVICE);
-}
-
-static int hash_dma_write(struct hash_ctx *ctx,
- struct scatterlist *sg, int len)
-{
- int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
- if (error) {
- dev_dbg(ctx->device->dev,
- "%s: hash_set_dma_transfer() failed\n", __func__);
- return error;
- }
-
- return len;
-}
-
-/**
- * get_empty_message_digest - Returns a pre-calculated digest for
- * the empty message.
- * @device_data: Structure for the hash device.
- * @zero_hash: Buffer to return the empty message digest.
- * @zero_hash_size: Hash size of the empty message digest.
- * @zero_digest: True if zero_digest returned.
- */
-static int get_empty_message_digest(
- struct hash_device_data *device_data,
- u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
-{
- int ret = 0;
- struct hash_ctx *ctx = device_data->current_ctx;
- *zero_digest = false;
-
- /**
- * Caller responsible for ctx != NULL.
- */
-
- if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
- if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
- memcpy(zero_hash, &sha1_zero_message_hash[0],
- SHA1_DIGEST_SIZE);
- *zero_hash_size = SHA1_DIGEST_SIZE;
- *zero_digest = true;
- } else if (HASH_ALGO_SHA256 ==
- ctx->config.algorithm) {
- memcpy(zero_hash, &sha256_zero_message_hash[0],
- SHA256_DIGEST_SIZE);
- *zero_hash_size = SHA256_DIGEST_SIZE;
- *zero_digest = true;
- } else {
- dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
- __func__);
- ret = -EINVAL;
- goto out;
- }
- } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
- if (!ctx->keylen) {
- if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
- memcpy(zero_hash, &zero_message_hmac_sha1[0],
- SHA1_DIGEST_SIZE);
- *zero_hash_size = SHA1_DIGEST_SIZE;
- *zero_digest = true;
- } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
- memcpy(zero_hash, &zero_message_hmac_sha256[0],
- SHA256_DIGEST_SIZE);
- *zero_hash_size = SHA256_DIGEST_SIZE;
- *zero_digest = true;
- } else {
- dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
- __func__);
- ret = -EINVAL;
- goto out;
- }
- } else {
- dev_dbg(device_data->dev,
- "%s: Continue hash calculation, since hmac key available\n",
- __func__);
- }
- }
-out:
-
- return ret;
-}
-
-/**
- * hash_disable_power - Request to disable power and clock.
- * @device_data: Structure for the hash device.
- * @save_device_state: If true, saves the current hw state.
- *
- * This function request for disabling power (regulator) and clock,
- * and could also save current hw state.
- */
-static int hash_disable_power(struct hash_device_data *device_data,
- bool save_device_state)
-{
- int ret = 0;
- struct device *dev = device_data->dev;
-
- spin_lock(&device_data->power_state_lock);
- if (!device_data->power_state)
- goto out;
-
- if (save_device_state) {
- hash_save_state(device_data,
- &device_data->state);
- device_data->restore_dev_state = true;
- }
-
- clk_disable(device_data->clk);
- ret = regulator_disable(device_data->regulator);
- if (ret)
- dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
-
- device_data->power_state = false;
-
-out:
- spin_unlock(&device_data->power_state_lock);
-
- return ret;
-}
-
-/**
- * hash_enable_power - Request to enable power and clock.
- * @device_data: Structure for the hash device.
- * @restore_device_state: If true, restores a previous saved hw state.
- *
- * This function request for enabling power (regulator) and clock,
- * and could also restore a previously saved hw state.
- */
-static int hash_enable_power(struct hash_device_data *device_data,
- bool restore_device_state)
-{
- int ret = 0;
- struct device *dev = device_data->dev;
-
- spin_lock(&device_data->power_state_lock);
- if (!device_data->power_state) {
- ret = regulator_enable(device_data->regulator);
- if (ret) {
- dev_err(dev, "%s: regulator_enable() failed!\n",
- __func__);
- goto out;
- }
- ret = clk_enable(device_data->clk);
- if (ret) {
- dev_err(dev, "%s: clk_enable() failed!\n", __func__);
- ret = regulator_disable(
- device_data->regulator);
- goto out;
- }
- device_data->power_state = true;
- }
-
- if (device_data->restore_dev_state) {
- if (restore_device_state) {
- device_data->restore_dev_state = false;
- hash_resume_state(device_data, &device_data->state);
- }
- }
-out:
- spin_unlock(&device_data->power_state_lock);
-
- return ret;
-}
-
-/**
- * hash_get_device_data - Checks for an available hash device and return it.
- * @ctx: Structure for the hash context.
- * @device_data: Structure for the hash device.
- *
- * This function check for an available hash device and return it to
- * the caller.
- * Note! Caller need to release the device, calling up().
- */
-static int hash_get_device_data(struct hash_ctx *ctx,
- struct hash_device_data **device_data)
-{
- int ret;
- struct klist_iter device_iterator;
- struct klist_node *device_node;
- struct hash_device_data *local_device_data = NULL;
-
- /* Wait until a device is available */
- ret = down_interruptible(&driver_data.device_allocation);
- if (ret)
- return ret; /* Interrupted */
-
- /* Select a device */
- klist_iter_init(&driver_data.device_list, &device_iterator);
- device_node = klist_next(&device_iterator);
- while (device_node) {
- local_device_data = container_of(device_node,
- struct hash_device_data, list_node);
- spin_lock(&local_device_data->ctx_lock);
- /* current_ctx allocates a device, NULL = unallocated */
- if (local_device_data->current_ctx) {
- device_node = klist_next(&device_iterator);
- } else {
- local_device_data->current_ctx = ctx;
- ctx->device = local_device_data;
- spin_unlock(&local_device_data->ctx_lock);
- break;
- }
- spin_unlock(&local_device_data->ctx_lock);
- }
- klist_iter_exit(&device_iterator);
-
- if (!device_node) {
- /**
- * No free device found.
- * Since we allocated a device with down_interruptible, this
- * should not be able to happen.
- * Number of available devices, which are contained in
- * device_allocation, is therefore decremented by not doing
- * an up(device_allocation).
- */
- return -EBUSY;
- }
-
- *device_data = local_device_data;
-
- return 0;
-}
-
-/**
- * hash_hw_write_key - Writes the key to the hardware registries.
- *
- * @device_data: Structure for the hash device.
- * @key: Key to be written.
- * @keylen: The lengt of the key.
- *
- * Note! This function DOES NOT write to the NBLW registry, even though
- * specified in the hw design spec. Either due to incorrect info in the
- * spec or due to a bug in the hw.
- */
-static void hash_hw_write_key(struct hash_device_data *device_data,
- const u8 *key, unsigned int keylen)
-{
- u32 word = 0;
- int nwords = 1;
-
- HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
-
- while (keylen >= 4) {
- u32 *key_word = (u32 *)key;
-
- HASH_SET_DIN(key_word, nwords);
- keylen -= 4;
- key += 4;
- }
-
- /* Take care of the remaining bytes in the last word */
- if (keylen) {
- word = 0;
- while (keylen) {
- word |= (key[keylen - 1] << (8 * (keylen - 1)));
- keylen--;
- }
-
- HASH_SET_DIN(&word, nwords);
- }
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-
- HASH_SET_DCAL;
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-}
-
-/**
- * init_hash_hw - Initialise the hash hardware for a new calculation.
- * @device_data: Structure for the hash device.
- * @ctx: The hash context.
- *
- * This function will enable the bits needed to clear and start a new
- * calculation.
- */
-static int init_hash_hw(struct hash_device_data *device_data,
- struct hash_ctx *ctx)
-{
- int ret = 0;
-
- ret = hash_setconfiguration(device_data, &ctx->config);
- if (ret) {
- dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
- __func__);
- return ret;
- }
-
- hash_begin(device_data, ctx);
-
- if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
- hash_hw_write_key(device_data, ctx->key, ctx->keylen);
-
- return ret;
-}
-
-/**
- * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
- *
- * @sg: Scatterlist.
- * @size: Size in bytes.
- * @aligned: True if sg data aligned to work in DMA mode.
- *
- */
-static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
-{
- int nents = 0;
- bool aligned_data = true;
-
- while (size > 0 && sg) {
- nents++;
- size -= sg->length;
-
- /* hash_set_dma_transfer will align last nent */
- if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
- (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
- aligned_data = false;
-
- sg = sg_next(sg);
- }
-
- if (aligned)
- *aligned = aligned_data;
-
- if (size != 0)
- return -EFAULT;
-
- return nents;
-}
-
-/**
- * hash_dma_valid_data - checks for dma valid sg data.
- * @sg: Scatterlist.
- * @datasize: Datasize in bytes.
- *
- * NOTE! This function checks for dma valid sg data, since dma
- * only accept datasizes of even wordsize.
- */
-static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
-{
- bool aligned;
-
- /* Need to include at least one nent, else error */
- if (hash_get_nents(sg, datasize, &aligned) < 1)
- return false;
-
- return aligned;
-}
-
-/**
- * ux500_hash_init - Common hash init function for SHA1/SHA2 (SHA256).
- * @req: The hash request for the job.
- *
- * Initialize structures.
- */
-static int ux500_hash_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
-
- if (!ctx->key)
- ctx->keylen = 0;
-
- memset(&req_ctx->state, 0, sizeof(struct hash_state));
- req_ctx->updated = 0;
- if (hash_mode == HASH_MODE_DMA) {
- if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
- req_ctx->dma_mode = false; /* Don't use DMA */
-
- pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
- __func__, HASH_DMA_ALIGN_SIZE);
- } else {
- if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
- hash_dma_valid_data(req->src, req->nbytes)) {
- req_ctx->dma_mode = true;
- } else {
- req_ctx->dma_mode = false;
- pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
- __func__,
- HASH_DMA_PERFORMANCE_MIN_SIZE);
- }
- }
- }
- return 0;
-}
-
-/**
- * hash_processblock - This function processes a single block of 512 bits (64
- * bytes), word aligned, starting at message.
- * @device_data: Structure for the hash device.
- * @message: Block (512 bits) of message to be written to
- * the HASH hardware.
- * @length: Message length
- *
- */
-static void hash_processblock(struct hash_device_data *device_data,
- const u32 *message, int length)
-{
- int len = length / HASH_BYTES_PER_WORD;
- /*
- * NBLW bits. Reset the number of bits in last word (NBLW).
- */
- HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
-
- /*
- * Write message data to the HASH_DIN register.
- */
- HASH_SET_DIN(message, len);
-}
-
-/**
- * hash_messagepad - Pads a message and write the nblw bits.
- * @device_data: Structure for the hash device.
- * @message: Last word of a message.
- * @index_bytes: The number of bytes in the last message.
- *
- * This function manages the final part of the digest calculation, when less
- * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
- *
- */
-static void hash_messagepad(struct hash_device_data *device_data,
- const u32 *message, u8 index_bytes)
-{
- int nwords = 1;
-
- /*
- * Clear hash str register, only clear NBLW
- * since DCAL will be reset by hardware.
- */
- HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
-
- /* Main loop */
- while (index_bytes >= 4) {
- HASH_SET_DIN(message, nwords);
- index_bytes -= 4;
- message++;
- }
-
- if (index_bytes)
- HASH_SET_DIN(message, nwords);
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-
- /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
- HASH_SET_NBLW(index_bytes * 8);
- dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
- __func__, readl_relaxed(&device_data->base->din),
- readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
- HASH_SET_DCAL;
- dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
- __func__, readl_relaxed(&device_data->base->din),
- readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-}
-
-/**
- * hash_incrementlength - Increments the length of the current message.
- * @ctx: Hash context
- * @incr: Length of message processed already
- *
- * Overflow cannot occur, because conditions for overflow are checked in
- * hash_hw_update.
- */
-static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
-{
- ctx->state.length.low_word += incr;
-
- /* Check for wrap-around */
- if (ctx->state.length.low_word < incr)
- ctx->state.length.high_word++;
-}
-
-/**
- * hash_setconfiguration - Sets the required configuration for the hash
- * hardware.
- * @device_data: Structure for the hash device.
- * @config: Pointer to a configuration structure.
- */
-int hash_setconfiguration(struct hash_device_data *device_data,
- struct hash_config *config)
-{
- int ret = 0;
-
- if (config->algorithm != HASH_ALGO_SHA1 &&
- config->algorithm != HASH_ALGO_SHA256)
- return -EPERM;
-
- /*
- * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
- * to be written to HASH_DIN is considered as 32 bits.
- */
- HASH_SET_DATA_FORMAT(config->data_format);
-
- /*
- * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
- */
- switch (config->algorithm) {
- case HASH_ALGO_SHA1:
- HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
- break;
-
- case HASH_ALGO_SHA256:
- HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
- break;
-
- default:
- dev_err(device_data->dev, "%s: Incorrect algorithm\n",
- __func__);
- return -EPERM;
- }
-
- /*
- * MODE bit. This bit selects between HASH or HMAC mode for the
- * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
- */
- if (HASH_OPER_MODE_HASH == config->oper_mode)
- HASH_CLEAR_BITS(&device_data->base->cr,
- HASH_CR_MODE_MASK);
- else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
- HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
- if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
- /* Truncate key to blocksize */
- dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
- HASH_SET_BITS(&device_data->base->cr,
- HASH_CR_LKEY_MASK);
- } else {
- dev_dbg(device_data->dev, "%s: LKEY cleared\n",
- __func__);
- HASH_CLEAR_BITS(&device_data->base->cr,
- HASH_CR_LKEY_MASK);
- }
- } else { /* Wrong hash mode */
- ret = -EPERM;
- dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
- __func__);
- }
- return ret;
-}
-
-/**
- * hash_begin - This routine resets some globals and initializes the hash
- * hardware.
- * @device_data: Structure for the hash device.
- * @ctx: Hash context.
- */
-void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
-{
- /* HW and SW initializations */
- /* Note: there is no need to initialize buffer and digest members */
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-
- /*
- * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
- * prepare the initialize the HASH accelerator to compute the message
- * digest of a new message.
- */
- HASH_INITIALIZE;
-
- /*
- * NBLW bits. Reset the number of bits in last word (NBLW).
- */
- HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
-}
-
-static int hash_process_data(struct hash_device_data *device_data,
- struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
- int msg_length, u8 *data_buffer, u8 *buffer,
- u8 *index)
-{
- int ret = 0;
- u32 count;
-
- do {
- if ((*index + msg_length) < HASH_BLOCK_SIZE) {
- for (count = 0; count < msg_length; count++) {
- buffer[*index + count] =
- *(data_buffer + count);
- }
- *index += msg_length;
- msg_length = 0;
- } else {
- if (req_ctx->updated) {
- ret = hash_resume_state(device_data,
- &device_data->state);
- memmove(req_ctx->state.buffer,
- device_data->state.buffer,
- HASH_BLOCK_SIZE);
- if (ret) {
- dev_err(device_data->dev,
- "%s: hash_resume_state() failed!\n",
- __func__);
- goto out;
- }
- } else {
- ret = init_hash_hw(device_data, ctx);
- if (ret) {
- dev_err(device_data->dev,
- "%s: init_hash_hw() failed!\n",
- __func__);
- goto out;
- }
- req_ctx->updated = 1;
- }
- /*
- * If 'data_buffer' is four byte aligned and
- * local buffer does not have any data, we can
- * write data directly from 'data_buffer' to
- * HW peripheral, otherwise we first copy data
- * to a local buffer
- */
- if (IS_ALIGNED((unsigned long)data_buffer, 4) &&
- (0 == *index))
- hash_processblock(device_data,
- (const u32 *)data_buffer,
- HASH_BLOCK_SIZE);
- else {
- for (count = 0;
- count < (u32)(HASH_BLOCK_SIZE - *index);
- count++) {
- buffer[*index + count] =
- *(data_buffer + count);
- }
- hash_processblock(device_data,
- (const u32 *)buffer,
- HASH_BLOCK_SIZE);
- }
- hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
- data_buffer += (HASH_BLOCK_SIZE - *index);
-
- msg_length -= (HASH_BLOCK_SIZE - *index);
- *index = 0;
-
- ret = hash_save_state(device_data,
- &device_data->state);
-
- memmove(device_data->state.buffer,
- req_ctx->state.buffer,
- HASH_BLOCK_SIZE);
- if (ret) {
- dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
- __func__);
- goto out;
- }
- }
- } while (msg_length != 0);
-out:
-
- return ret;
-}
-
-/**
- * hash_dma_final - The hash dma final function for SHA1/SHA256.
- * @req: The hash request for the job.
- */
-static int hash_dma_final(struct ahash_request *req)
-{
- int ret = 0;
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
- struct hash_device_data *device_data;
- u8 digest[SHA256_DIGEST_SIZE];
- int bytes_written = 0;
-
- ret = hash_get_device_data(ctx, &device_data);
- if (ret)
- return ret;
-
- dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
- (unsigned long)ctx);
-
- if (req_ctx->updated) {
- ret = hash_resume_state(device_data, &device_data->state);
-
- if (ret) {
- dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
- __func__);
- goto out;
- }
- } else {
- ret = hash_setconfiguration(device_data, &ctx->config);
- if (ret) {
- dev_err(device_data->dev,
- "%s: hash_setconfiguration() failed!\n",
- __func__);
- goto out;
- }
-
- /* Enable DMA input */
- if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
- HASH_CLEAR_BITS(&device_data->base->cr,
- HASH_CR_DMAE_MASK);
- } else {
- HASH_SET_BITS(&device_data->base->cr,
- HASH_CR_DMAE_MASK);
- HASH_SET_BITS(&device_data->base->cr,
- HASH_CR_PRIVN_MASK);
- }
-
- HASH_INITIALIZE;
-
- if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
- hash_hw_write_key(device_data, ctx->key, ctx->keylen);
-
- /* Number of bits in last word = (nbytes * 8) % 32 */
- HASH_SET_NBLW((req->nbytes * 8) % 32);
- req_ctx->updated = 1;
- }
-
- /* Store the nents in the dma struct. */
- ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
- if (!ctx->device->dma.nents) {
- dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
- __func__);
- ret = ctx->device->dma.nents;
- goto out;
- }
-
- bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
- if (bytes_written != req->nbytes) {
- dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
- __func__);
- ret = bytes_written;
- goto out;
- }
-
- wait_for_completion(&ctx->device->dma.complete);
- hash_dma_done(ctx);
-
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-
- if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
- unsigned int keylen = ctx->keylen;
- u8 *key = ctx->key;
-
- dev_dbg(device_data->dev, "%s: keylen: %d\n",
- __func__, ctx->keylen);
- hash_hw_write_key(device_data, key, keylen);
- }
-
- hash_get_digest(device_data, digest, ctx->config.algorithm);
- memcpy(req->result, digest, ctx->digestsize);
-
-out:
- release_hash_device(device_data);
-
- /**
- * Allocated in setkey, and only used in HMAC.
- */
- kfree(ctx->key);
-
- return ret;
-}
-
-/**
- * hash_hw_final - The final hash calculation function
- * @req: The hash request for the job.
- */
-static int hash_hw_final(struct ahash_request *req)
-{
- int ret = 0;
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
- struct hash_device_data *device_data;
- u8 digest[SHA256_DIGEST_SIZE];
-
- ret = hash_get_device_data(ctx, &device_data);
- if (ret)
- return ret;
-
- dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
- (unsigned long)ctx);
-
- if (req_ctx->updated) {
- ret = hash_resume_state(device_data, &device_data->state);
-
- if (ret) {
- dev_err(device_data->dev,
- "%s: hash_resume_state() failed!\n", __func__);
- goto out;
- }
- } else if (req->nbytes == 0 && ctx->keylen == 0) {
- u8 zero_hash[SHA256_DIGEST_SIZE];
- u32 zero_hash_size = 0;
- bool zero_digest = false;
- /**
- * Use a pre-calculated empty message digest
- * (workaround since hw return zeroes, hw bug!?)
- */
- ret = get_empty_message_digest(device_data, &zero_hash[0],
- &zero_hash_size, &zero_digest);
- if (!ret && likely(zero_hash_size == ctx->digestsize) &&
- zero_digest) {
- memcpy(req->result, &zero_hash[0], ctx->digestsize);
- goto out;
- } else if (!ret && !zero_digest) {
- dev_dbg(device_data->dev,
- "%s: HMAC zero msg with key, continue...\n",
- __func__);
- } else {
- dev_err(device_data->dev,
- "%s: ret=%d, or wrong digest size? %s\n",
- __func__, ret,
- zero_hash_size == ctx->digestsize ?
- "true" : "false");
- /* Return error */
- goto out;
- }
- } else if (req->nbytes == 0 && ctx->keylen > 0) {
- ret = -EPERM;
- dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
- __func__);
- goto out;
- }
-
- if (!req_ctx->updated) {
- ret = init_hash_hw(device_data, ctx);
- if (ret) {
- dev_err(device_data->dev,
- "%s: init_hash_hw() failed!\n", __func__);
- goto out;
- }
- }
-
- if (req_ctx->state.index) {
- hash_messagepad(device_data, req_ctx->state.buffer,
- req_ctx->state.index);
- } else {
- HASH_SET_DCAL;
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
- }
-
- if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
- unsigned int keylen = ctx->keylen;
- u8 *key = ctx->key;
-
- dev_dbg(device_data->dev, "%s: keylen: %d\n",
- __func__, ctx->keylen);
- hash_hw_write_key(device_data, key, keylen);
- }
-
- hash_get_digest(device_data, digest, ctx->config.algorithm);
- memcpy(req->result, digest, ctx->digestsize);
-
-out:
- release_hash_device(device_data);
-
- /**
- * Allocated in setkey, and only used in HMAC.
- */
- kfree(ctx->key);
-
- return ret;
-}
-
-/**
- * hash_hw_update - Updates current HASH computation hashing another part of
- * the message.
- * @req: Byte array containing the message to be hashed (caller
- * allocated).
- */
-int hash_hw_update(struct ahash_request *req)
-{
- int ret = 0;
- u8 index = 0;
- u8 *buffer;
- struct hash_device_data *device_data;
- u8 *data_buffer;
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
- struct crypto_hash_walk walk;
- int msg_length;
-
- index = req_ctx->state.index;
- buffer = (u8 *)req_ctx->state.buffer;
-
- ret = hash_get_device_data(ctx, &device_data);
- if (ret)
- return ret;
-
- msg_length = crypto_hash_walk_first(req, &walk);
-
- /* Empty message ("") is correct indata */
- if (msg_length == 0) {
- ret = 0;
- goto release_dev;
- }
-
- /* Check if ctx->state.length + msg_length
- overflows */
- if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
- HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
- pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
- ret = crypto_hash_walk_done(&walk, -EPERM);
- goto release_dev;
- }
-
- /* Main loop */
- while (0 != msg_length) {
- data_buffer = walk.data;
- ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
- data_buffer, buffer, &index);
-
- if (ret) {
- dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
- __func__);
- crypto_hash_walk_done(&walk, ret);
- goto release_dev;
- }
-
- msg_length = crypto_hash_walk_done(&walk, 0);
- }
-
- req_ctx->state.index = index;
- dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
- __func__, req_ctx->state.index, req_ctx->state.bit_index);
-
-release_dev:
- release_hash_device(device_data);
-
- return ret;
-}
-
-/**
- * hash_resume_state - Function that resumes the state of an calculation.
- * @device_data: Pointer to the device structure.
- * @device_state: The state to be restored in the hash hardware
- */
-int hash_resume_state(struct hash_device_data *device_data,
- const struct hash_state *device_state)
-{
- u32 temp_cr;
- s32 count;
- int hash_mode = HASH_OPER_MODE_HASH;
-
- if (NULL == device_state) {
- dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
- __func__);
- return -EPERM;
- }
-
- /* Check correctness of index and length members */
- if (device_state->index > HASH_BLOCK_SIZE ||
- (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
- dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
- __func__);
- return -EPERM;
- }
-
- /*
- * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
- * prepare the initialize the HASH accelerator to compute the message
- * digest of a new message.
- */
- HASH_INITIALIZE;
-
- temp_cr = device_state->temp_cr;
- writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
-
- if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
- hash_mode = HASH_OPER_MODE_HMAC;
- else
- hash_mode = HASH_OPER_MODE_HASH;
-
- for (count = 0; count < HASH_CSR_COUNT; count++) {
- if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
- break;
-
- writel_relaxed(device_state->csr[count],
- &device_data->base->csrx[count]);
- }
-
- writel_relaxed(device_state->csfull, &device_data->base->csfull);
- writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
-
- writel_relaxed(device_state->str_reg, &device_data->base->str);
- writel_relaxed(temp_cr, &device_data->base->cr);
-
- return 0;
-}
-
-/**
- * hash_save_state - Function that saves the state of hardware.
- * @device_data: Pointer to the device structure.
- * @device_state: The strucure where the hardware state should be saved.
- */
-int hash_save_state(struct hash_device_data *device_data,
- struct hash_state *device_state)
-{
- u32 temp_cr;
- u32 count;
- int hash_mode = HASH_OPER_MODE_HASH;
-
- if (NULL == device_state) {
- dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
- __func__);
- return -ENOTSUPP;
- }
-
- /* Write dummy value to force digest intermediate calculation. This
- * actually makes sure that there isn't any ongoing calculation in the
- * hardware.
- */
- while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
- cpu_relax();
-
- temp_cr = readl_relaxed(&device_data->base->cr);
-
- device_state->str_reg = readl_relaxed(&device_data->base->str);
-
- device_state->din_reg = readl_relaxed(&device_data->base->din);
-
- if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
- hash_mode = HASH_OPER_MODE_HMAC;
- else
- hash_mode = HASH_OPER_MODE_HASH;
-
- for (count = 0; count < HASH_CSR_COUNT; count++) {
- if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
- break;
-
- device_state->csr[count] =
- readl_relaxed(&device_data->base->csrx[count]);
- }
-
- device_state->csfull = readl_relaxed(&device_data->base->csfull);
- device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
-
- device_state->temp_cr = temp_cr;
-
- return 0;
-}
-
-/**
- * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
- * @device_data:
- *
- */
-int hash_check_hw(struct hash_device_data *device_data)
-{
- /* Checking Peripheral Ids */
- if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
- HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
- HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
- HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
- HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
- HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
- HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
- HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
- return 0;
- }
-
- dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
- return -ENOTSUPP;
-}
-
-/**
- * hash_get_digest - Gets the digest.
- * @device_data: Pointer to the device structure.
- * @digest: User allocated byte array for the calculated digest.
- * @algorithm: The algorithm in use.
- */
-void hash_get_digest(struct hash_device_data *device_data,
- u8 *digest, int algorithm)
-{
- u32 temp_hx_val, count;
- int loop_ctr;
-
- if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
- dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
- __func__, algorithm);
- return;
- }
-
- if (algorithm == HASH_ALGO_SHA1)
- loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
- else
- loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
-
- dev_dbg(device_data->dev, "%s: digest array:(0x%lx)\n",
- __func__, (unsigned long)digest);
-
- /* Copy result into digest array */
- for (count = 0; count < loop_ctr; count++) {
- temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
- digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
- digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
- digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
- digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
- }
-}
-
-/**
- * ahash_update - The hash update function for SHA1/SHA2 (SHA256).
- * @req: The hash request for the job.
- */
-static int ahash_update(struct ahash_request *req)
-{
- int ret = 0;
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
-
- if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
- ret = hash_hw_update(req);
- /* Skip update for DMA, all data will be passed to DMA in final */
-
- if (ret) {
- pr_err("%s: hash_hw_update() failed!\n", __func__);
- }
-
- return ret;
-}
-
-/**
- * ahash_final - The hash final function for SHA1/SHA2 (SHA256).
- * @req: The hash request for the job.
- */
-static int ahash_final(struct ahash_request *req)
-{
- int ret = 0;
- struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
-
- pr_debug("%s: data size: %d\n", __func__, req->nbytes);
-
- if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
- ret = hash_dma_final(req);
- else
- ret = hash_hw_final(req);
-
- if (ret) {
- pr_err("%s: hash_hw/dma_final() failed\n", __func__);
- }
-
- return ret;
-}
-
-static int hash_setkey(struct crypto_ahash *tfm,
- const u8 *key, unsigned int keylen, int alg)
-{
- int ret = 0;
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- /**
- * Freed in final.
- */
- ctx->key = kmemdup(key, keylen, GFP_KERNEL);
- if (!ctx->key) {
- pr_err("%s: Failed to allocate ctx->key for %d\n",
- __func__, alg);
- return -ENOMEM;
- }
- ctx->keylen = keylen;
-
- return ret;
-}
-
-static int ahash_sha1_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ctx->config.data_format = HASH_DATA_8_BITS;
- ctx->config.algorithm = HASH_ALGO_SHA1;
- ctx->config.oper_mode = HASH_OPER_MODE_HASH;
- ctx->digestsize = SHA1_DIGEST_SIZE;
-
- return ux500_hash_init(req);
-}
-
-static int ahash_sha256_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ctx->config.data_format = HASH_DATA_8_BITS;
- ctx->config.algorithm = HASH_ALGO_SHA256;
- ctx->config.oper_mode = HASH_OPER_MODE_HASH;
- ctx->digestsize = SHA256_DIGEST_SIZE;
-
- return ux500_hash_init(req);
-}
-
-static int ahash_sha1_digest(struct ahash_request *req)
-{
- int ret2, ret1;
-
- ret1 = ahash_sha1_init(req);
- if (ret1)
- goto out;
-
- ret1 = ahash_update(req);
- ret2 = ahash_final(req);
-
-out:
- return ret1 ? ret1 : ret2;
-}
-
-static int ahash_sha256_digest(struct ahash_request *req)
-{
- int ret2, ret1;
-
- ret1 = ahash_sha256_init(req);
- if (ret1)
- goto out;
-
- ret1 = ahash_update(req);
- ret2 = ahash_final(req);
-
-out:
- return ret1 ? ret1 : ret2;
-}
-
-static int ahash_noimport(struct ahash_request *req, const void *in)
-{
- return -ENOSYS;
-}
-
-static int ahash_noexport(struct ahash_request *req, void *out)
-{
- return -ENOSYS;
-}
-
-static int hmac_sha1_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ctx->config.data_format = HASH_DATA_8_BITS;
- ctx->config.algorithm = HASH_ALGO_SHA1;
- ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
- ctx->digestsize = SHA1_DIGEST_SIZE;
-
- return ux500_hash_init(req);
-}
-
-static int hmac_sha256_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
-
- ctx->config.data_format = HASH_DATA_8_BITS;
- ctx->config.algorithm = HASH_ALGO_SHA256;
- ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
- ctx->digestsize = SHA256_DIGEST_SIZE;
-
- return ux500_hash_init(req);
-}
-
-static int hmac_sha1_digest(struct ahash_request *req)
-{
- int ret2, ret1;
-
- ret1 = hmac_sha1_init(req);
- if (ret1)
- goto out;
-
- ret1 = ahash_update(req);
- ret2 = ahash_final(req);
-
-out:
- return ret1 ? ret1 : ret2;
-}
-
-static int hmac_sha256_digest(struct ahash_request *req)
-{
- int ret2, ret1;
-
- ret1 = hmac_sha256_init(req);
- if (ret1)
- goto out;
-
- ret1 = ahash_update(req);
- ret2 = ahash_final(req);
-
-out:
- return ret1 ? ret1 : ret2;
-}
-
-static int hmac_sha1_setkey(struct crypto_ahash *tfm,
- const u8 *key, unsigned int keylen)
-{
- return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
-}
-
-static int hmac_sha256_setkey(struct crypto_ahash *tfm,
- const u8 *key, unsigned int keylen)
-{
- return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
-}
-
-struct hash_algo_template {
- struct hash_config conf;
- struct ahash_alg hash;
-};
-
-static int hash_cra_init(struct crypto_tfm *tfm)
-{
- struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_alg *alg = tfm->__crt_alg;
- struct hash_algo_template *hash_alg;
-
- hash_alg = container_of(__crypto_ahash_alg(alg),
- struct hash_algo_template,
- hash);
-
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct hash_req_ctx));
-
- ctx->config.data_format = HASH_DATA_8_BITS;
- ctx->config.algorithm = hash_alg->conf.algorithm;
- ctx->config.oper_mode = hash_alg->conf.oper_mode;
-
- ctx->digestsize = hash_alg->hash.halg.digestsize;
-
- return 0;
-}
-
-static struct hash_algo_template hash_algs[] = {
- {
- .conf.algorithm = HASH_ALGO_SHA1,
- .conf.oper_mode = HASH_OPER_MODE_HASH,
- .hash = {
- .init = ux500_hash_init,
- .update = ahash_update,
- .final = ahash_final,
- .digest = ahash_sha1_digest,
- .export = ahash_noexport,
- .import = ahash_noimport,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.statesize = sizeof(struct hash_ctx),
- .halg.base = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1-ux500",
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct hash_ctx),
- .cra_init = hash_cra_init,
- .cra_module = THIS_MODULE,
- }
- }
- },
- {
- .conf.algorithm = HASH_ALGO_SHA256,
- .conf.oper_mode = HASH_OPER_MODE_HASH,
- .hash = {
- .init = ux500_hash_init,
- .update = ahash_update,
- .final = ahash_final,
- .digest = ahash_sha256_digest,
- .export = ahash_noexport,
- .import = ahash_noimport,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.statesize = sizeof(struct hash_ctx),
- .halg.base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256-ux500",
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct hash_ctx),
- .cra_init = hash_cra_init,
- .cra_module = THIS_MODULE,
- }
- }
- },
- {
- .conf.algorithm = HASH_ALGO_SHA1,
- .conf.oper_mode = HASH_OPER_MODE_HMAC,
- .hash = {
- .init = ux500_hash_init,
- .update = ahash_update,
- .final = ahash_final,
- .digest = hmac_sha1_digest,
- .setkey = hmac_sha1_setkey,
- .export = ahash_noexport,
- .import = ahash_noimport,
- .halg.digestsize = SHA1_DIGEST_SIZE,
- .halg.statesize = sizeof(struct hash_ctx),
- .halg.base = {
- .cra_name = "hmac(sha1)",
- .cra_driver_name = "hmac-sha1-ux500",
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct hash_ctx),
- .cra_init = hash_cra_init,
- .cra_module = THIS_MODULE,
- }
- }
- },
- {
- .conf.algorithm = HASH_ALGO_SHA256,
- .conf.oper_mode = HASH_OPER_MODE_HMAC,
- .hash = {
- .init = ux500_hash_init,
- .update = ahash_update,
- .final = ahash_final,
- .digest = hmac_sha256_digest,
- .setkey = hmac_sha256_setkey,
- .export = ahash_noexport,
- .import = ahash_noimport,
- .halg.digestsize = SHA256_DIGEST_SIZE,
- .halg.statesize = sizeof(struct hash_ctx),
- .halg.base = {
- .cra_name = "hmac(sha256)",
- .cra_driver_name = "hmac-sha256-ux500",
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct hash_ctx),
- .cra_init = hash_cra_init,
- .cra_module = THIS_MODULE,
- }
- }
- }
-};
-
-static int ahash_algs_register_all(struct hash_device_data *device_data)
-{
- int ret;
- int i;
- int count;
-
- for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
- ret = crypto_register_ahash(&hash_algs[i].hash);
- if (ret) {
- count = i;
- dev_err(device_data->dev, "%s: alg registration failed\n",
- hash_algs[i].hash.halg.base.cra_driver_name);
- goto unreg;
- }
- }
- return 0;
-unreg:
- for (i = 0; i < count; i++)
- crypto_unregister_ahash(&hash_algs[i].hash);
- return ret;
-}
-
-static void ahash_algs_unregister_all(struct hash_device_data *device_data)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
- crypto_unregister_ahash(&hash_algs[i].hash);
-}
-
-/**
- * ux500_hash_probe - Function that probes the hash hardware.
- * @pdev: The platform device.
- */
-static int ux500_hash_probe(struct platform_device *pdev)
-{
- int ret = 0;
- struct resource *res = NULL;
- struct hash_device_data *device_data;
- struct device *dev = &pdev->dev;
-
- device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_KERNEL);
- if (!device_data) {
- ret = -ENOMEM;
- goto out;
- }
-
- device_data->dev = dev;
- device_data->current_ctx = NULL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
- ret = -ENODEV;
- goto out;
- }
-
- device_data->phybase = res->start;
- device_data->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(device_data->base)) {
- ret = PTR_ERR(device_data->base);
- goto out;
- }
- spin_lock_init(&device_data->ctx_lock);
- spin_lock_init(&device_data->power_state_lock);
-
- /* Enable power for HASH1 hardware block */
- device_data->regulator = regulator_get(dev, "v-ape");
- if (IS_ERR(device_data->regulator)) {
- dev_err(dev, "%s: regulator_get() failed!\n", __func__);
- ret = PTR_ERR(device_data->regulator);
- device_data->regulator = NULL;
- goto out;
- }
-
- /* Enable the clock for HASH1 hardware block */
- device_data->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(device_data->clk)) {
- dev_err(dev, "%s: clk_get() failed!\n", __func__);
- ret = PTR_ERR(device_data->clk);
- goto out_regulator;
- }
-
- ret = clk_prepare(device_data->clk);
- if (ret) {
- dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
- goto out_regulator;
- }
-
- /* Enable device power (and clock) */
- ret = hash_enable_power(device_data, false);
- if (ret) {
- dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
- goto out_clk_unprepare;
- }
-
- ret = hash_check_hw(device_data);
- if (ret) {
- dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
- goto out_power;
- }
-
- if (hash_mode == HASH_MODE_DMA)
- hash_dma_setup_channel(device_data, dev);
-
- platform_set_drvdata(pdev, device_data);
-
- /* Put the new device into the device list... */
- klist_add_tail(&device_data->list_node, &driver_data.device_list);
- /* ... and signal that a new device is available. */
- up(&driver_data.device_allocation);
-
- ret = ahash_algs_register_all(device_data);
- if (ret) {
- dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
- __func__);
- goto out_power;
- }
-
- dev_info(dev, "successfully registered\n");
- return 0;
-
-out_power:
- hash_disable_power(device_data, false);
-
-out_clk_unprepare:
- clk_unprepare(device_data->clk);
-
-out_regulator:
- regulator_put(device_data->regulator);
-
-out:
- return ret;
-}
-
-/**
- * ux500_hash_remove - Function that removes the hash device from the platform.
- * @pdev: The platform device.
- */
-static int ux500_hash_remove(struct platform_device *pdev)
-{
- struct hash_device_data *device_data;
- struct device *dev = &pdev->dev;
-
- device_data = platform_get_drvdata(pdev);
- if (!device_data) {
- dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
- return -ENOMEM;
- }
-
- /* Try to decrease the number of available devices. */
- if (down_trylock(&driver_data.device_allocation))
- return -EBUSY;
-
- /* Check that the device is free */
- spin_lock(&device_data->ctx_lock);
- /* current_ctx allocates a device, NULL = unallocated */
- if (device_data->current_ctx) {
- /* The device is busy */
- spin_unlock(&device_data->ctx_lock);
- /* Return the device to the pool. */
- up(&driver_data.device_allocation);
- return -EBUSY;
- }
-
- spin_unlock(&device_data->ctx_lock);
-
- /* Remove the device from the list */
- if (klist_node_attached(&device_data->list_node))
- klist_remove(&device_data->list_node);
-
- /* If this was the last device, remove the services */
- if (list_empty(&driver_data.device_list.k_list))
- ahash_algs_unregister_all(device_data);
-
- if (hash_disable_power(device_data, false))
- dev_err(dev, "%s: hash_disable_power() failed\n",
- __func__);
-
- clk_unprepare(device_data->clk);
- regulator_put(device_data->regulator);
-
- return 0;
-}
-
-/**
- * ux500_hash_shutdown - Function that shutdown the hash device.
- * @pdev: The platform device
- */
-static void ux500_hash_shutdown(struct platform_device *pdev)
-{
- struct hash_device_data *device_data;
-
- device_data = platform_get_drvdata(pdev);
- if (!device_data) {
- dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
- __func__);
- return;
- }
-
- /* Check that the device is free */
- spin_lock(&device_data->ctx_lock);
- /* current_ctx allocates a device, NULL = unallocated */
- if (!device_data->current_ctx) {
- if (down_trylock(&driver_data.device_allocation))
- dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
- __func__);
- /**
- * (Allocate the device)
- * Need to set this to non-null (dummy) value,
- * to avoid usage if context switching.
- */
- device_data->current_ctx++;
- }
- spin_unlock(&device_data->ctx_lock);
-
- /* Remove the device from the list */
- if (klist_node_attached(&device_data->list_node))
- klist_remove(&device_data->list_node);
-
- /* If this was the last device, remove the services */
- if (list_empty(&driver_data.device_list.k_list))
- ahash_algs_unregister_all(device_data);
-
- if (hash_disable_power(device_data, false))
- dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
- __func__);
-}
-
-#ifdef CONFIG_PM_SLEEP
-/**
- * ux500_hash_suspend - Function that suspends the hash device.
- * @dev: Device to suspend.
- */
-static int ux500_hash_suspend(struct device *dev)
-{
- int ret;
- struct hash_device_data *device_data;
- struct hash_ctx *temp_ctx = NULL;
-
- device_data = dev_get_drvdata(dev);
- if (!device_data) {
- dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
- return -ENOMEM;
- }
-
- spin_lock(&device_data->ctx_lock);
- if (!device_data->current_ctx)
- device_data->current_ctx++;
- spin_unlock(&device_data->ctx_lock);
-
- if (device_data->current_ctx == ++temp_ctx) {
- if (down_interruptible(&driver_data.device_allocation))
- dev_dbg(dev, "%s: down_interruptible() failed\n",
- __func__);
- ret = hash_disable_power(device_data, false);
-
- } else {
- ret = hash_disable_power(device_data, true);
- }
-
- if (ret)
- dev_err(dev, "%s: hash_disable_power()\n", __func__);
-
- return ret;
-}
-
-/**
- * ux500_hash_resume - Function that resume the hash device.
- * @dev: Device to resume.
- */
-static int ux500_hash_resume(struct device *dev)
-{
- int ret = 0;
- struct hash_device_data *device_data;
- struct hash_ctx *temp_ctx = NULL;
-
- device_data = dev_get_drvdata(dev);
- if (!device_data) {
- dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
- return -ENOMEM;
- }
-
- spin_lock(&device_data->ctx_lock);
- if (device_data->current_ctx == ++temp_ctx)
- device_data->current_ctx = NULL;
- spin_unlock(&device_data->ctx_lock);
-
- if (!device_data->current_ctx)
- up(&driver_data.device_allocation);
- else
- ret = hash_enable_power(device_data, true);
-
- if (ret)
- dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
-
- return ret;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
-
-static const struct of_device_id ux500_hash_match[] = {
- { .compatible = "stericsson,ux500-hash" },
- { },
-};
-MODULE_DEVICE_TABLE(of, ux500_hash_match);
-
-static struct platform_driver hash_driver = {
- .probe = ux500_hash_probe,
- .remove = ux500_hash_remove,
- .shutdown = ux500_hash_shutdown,
- .driver = {
- .name = "hash1",
- .of_match_table = ux500_hash_match,
- .pm = &ux500_hash_pm,
- }
-};
-
-/**
- * ux500_hash_mod_init - The kernel module init function.
- */
-static int __init ux500_hash_mod_init(void)
-{
- klist_init(&driver_data.device_list, NULL, NULL);
- /* Initialize the semaphore to 0 devices (locked state) */
- sema_init(&driver_data.device_allocation, 0);
-
- return platform_driver_register(&hash_driver);
-}
-
-/**
- * ux500_hash_mod_fini - The kernel module exit function.
- */
-static void __exit ux500_hash_mod_fini(void)
-{
- platform_driver_unregister(&hash_driver);
-}
-
-module_init(ux500_hash_mod_init);
-module_exit(ux500_hash_mod_fini);
-
-MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
-MODULE_LICENSE("GPL");
-
-MODULE_ALIAS_CRYPTO("sha1-all");
-MODULE_ALIAS_CRYPTO("sha256-all");
-MODULE_ALIAS_CRYPTO("hmac-sha1-all");
-MODULE_ALIAS_CRYPTO("hmac-sha256-all");
diff --git a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
index b2979be613b8..6963344f6a3a 100644
--- a/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
+++ b/drivers/crypto/virtio/virtio_crypto_akcipher_algs.c
@@ -116,7 +116,7 @@ static int virtio_crypto_alg_akcipher_init_session(struct virtio_crypto_akcipher
struct virtio_crypto_session_input *input;
struct virtio_crypto_ctrl_request *vc_ctrl_req;
- pkey = kmemdup(key, keylen, GFP_ATOMIC);
+ pkey = kmemdup(key, keylen, GFP_KERNEL);
if (!pkey)
return -ENOMEM;
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index 0af8856936dc..28a05f2fe32d 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -27,7 +27,7 @@ static void cxl_memdev_release(struct device *dev)
kfree(cxlmd);
}
-static char *cxl_memdev_devnode(struct device *dev, umode_t *mode, kuid_t *uid,
+static char *cxl_memdev_devnode(const struct device *dev, umode_t *mode, kuid_t *uid,
kgid_t *gid)
{
return kasprintf(GFP_KERNEL, "cxl/%s", dev_name(dev));
@@ -162,7 +162,7 @@ static const struct device_type cxl_memdev_type = {
.groups = cxl_memdev_attribute_groups,
};
-bool is_cxl_memdev(struct device *dev)
+bool is_cxl_memdev(const struct device *dev)
{
return dev->type == &cxl_memdev_type;
}
diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c
index 724190418698..8ee6b6e2e2a4 100644
--- a/drivers/cxl/core/port.c
+++ b/drivers/cxl/core/port.c
@@ -38,7 +38,7 @@ static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(devtype);
-static int cxl_device_id(struct device *dev)
+static int cxl_device_id(const struct device *dev)
{
if (dev->type == &cxl_nvdimm_bridge_type)
return CXL_DEVICE_NVDIMM_BRIDGE;
@@ -530,13 +530,13 @@ static const struct device_type cxl_port_type = {
.groups = cxl_port_attribute_groups,
};
-bool is_cxl_port(struct device *dev)
+bool is_cxl_port(const struct device *dev)
{
return dev->type == &cxl_port_type;
}
EXPORT_SYMBOL_NS_GPL(is_cxl_port, CXL);
-struct cxl_port *to_cxl_port(struct device *dev)
+struct cxl_port *to_cxl_port(const struct device *dev)
{
if (dev_WARN_ONCE(dev, dev->type != &cxl_port_type,
"not a cxl_port device\n"))
@@ -1869,7 +1869,7 @@ void cxl_driver_unregister(struct cxl_driver *cxl_drv)
}
EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
-static int cxl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int cxl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
return add_uevent_var(env, "MODALIAS=" CXL_MODALIAS_FMT,
cxl_device_id(dev));
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index d853a0238ad7..f2b0962a552d 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -649,8 +649,8 @@ static inline bool is_cxl_root(struct cxl_port *port)
return port->uport == port->dev.parent;
}
-bool is_cxl_port(struct device *dev);
-struct cxl_port *to_cxl_port(struct device *dev);
+bool is_cxl_port(const struct device *dev);
+struct cxl_port *to_cxl_port(const struct device *dev);
struct pci_bus;
int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
struct pci_bus *bus);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index ccbafc05a636..090acebba4fa 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -75,7 +75,7 @@ cxled_to_memdev(struct cxl_endpoint_decoder *cxled)
return to_cxl_memdev(port->uport);
}
-bool is_cxl_memdev(struct device *dev);
+bool is_cxl_memdev(const struct device *dev);
static inline bool is_cxl_endpoint(struct cxl_port *port)
{
return is_cxl_memdev(port->uport);
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index 7e1008d756b8..a88744244149 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menuconfig DAX
tristate "DAX: direct access to differentiated memory"
- select SRCU
default m if NVDIMM_DAX
if DAX
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 67a64f4c472d..227800053309 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -18,7 +18,7 @@ struct dax_id {
char dev_name[DAX_NAME_LEN];
};
-static int dax_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int dax_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
/*
* We only ever expect to handle device-dax instances, i.e. the
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index ecdff79e31f2..af9930c03c9c 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -308,7 +308,7 @@ static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
return rc;
vma->vm_ops = &dax_vm_ops;
- vma->vm_flags |= VM_HUGEPAGE;
+ vm_flags_set(vma, VM_HUGEPAGE);
return 0;
}
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index da4438f3188c..c4c4728a36e4 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -475,7 +475,7 @@ EXPORT_SYMBOL_GPL(put_dax);
/**
* dax_holder() - obtain the holder of a dax device
* @dax_dev: a dax_device instance
-
+ *
* Return: the holder's data which represents the holder if registered,
* otherwize NULL.
*/
diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c
index fbf725fae7c1..6cfbbf0720bd 100644
--- a/drivers/dma-buf/dma-buf-sysfs-stats.c
+++ b/drivers/dma-buf/dma-buf-sysfs-stats.c
@@ -112,7 +112,7 @@ static void dma_buf_sysfs_release(struct kobject *kobj)
kfree(sysfs_entry);
}
-static struct kobj_type dma_buf_ktype = {
+static const struct kobj_type dma_buf_ktype = {
.sysfs_ops = &dma_buf_stats_sysfs_ops,
.release = dma_buf_sysfs_release,
.default_groups = dma_buf_stats_default_groups,
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index e6528767efc7..757c0fb77a6c 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1257,7 +1257,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
*
* @dmabuf: [in] buffer which is moving
*
- * Informs all attachmenst that they need to destroy and recreated all their
+ * Informs all attachments that they need to destroy and recreate all their
* mappings.
*/
void dma_buf_move_notify(struct dma_buf *dmabuf)
@@ -1275,11 +1275,11 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
/**
* DOC: cpu access
*
- * There are mutliple reasons for supporting CPU access to a dma buffer object:
+ * There are multiple reasons for supporting CPU access to a dma buffer object:
*
* - Fallback operations in the kernel, for example when a device is connected
* over USB and the kernel needs to shuffle the data around first before
- * sending it away. Cache coherency is handled by braketing any transactions
+ * sending it away. Cache coherency is handled by bracketing any transactions
* with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
* access.
*
@@ -1306,7 +1306,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
* replace ION buffers mmap support was needed.
*
* There is no special interfaces, userspace simply calls mmap on the dma-buf
- * fd. But like for CPU access there's a need to braket the actual access,
+ * fd. But like for CPU access there's a need to bracket the actual access,
* which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
* DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
* be restarted.
@@ -1380,10 +1380,10 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
* preparations. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to prepare cpu access for.
- * @direction: [in] length of range for cpu access.
+ * @direction: [in] direction of access.
*
* After the cpu access is complete the caller should call
- * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
+ * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
* it guaranteed to be coherent with other DMA access.
*
* This function will also wait for any DMA transactions tracked through
@@ -1423,7 +1423,7 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
* actions. Coherency is only guaranteed in the specified range for the
* specified access direction.
* @dmabuf: [in] buffer to complete cpu access for.
- * @direction: [in] length of range for cpu access.
+ * @direction: [in] direction of access.
*
* This terminates CPU access started with dma_buf_begin_cpu_access().
*
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index 406b4e26f538..0de0482cd36e 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -167,7 +167,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
0, 0);
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
- &dma_fence_stub.flags);
+ &fence->flags);
dma_fence_signal(fence);
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 283816fbd72f..740d6e426ee9 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -13,6 +13,8 @@
#include <linux/slab.h>
#include <linux/udmabuf.h>
#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
+#include <linux/iosys-map.h>
static int list_limit = 1024;
module_param(list_limit, int, 0644);
@@ -60,6 +62,30 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
return 0;
}
+static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+{
+ struct udmabuf *ubuf = buf->priv;
+ void *vaddr;
+
+ dma_resv_assert_held(buf->resv);
+
+ vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
+ if (!vaddr)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, vaddr);
+ return 0;
+}
+
+static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+{
+ struct udmabuf *ubuf = buf->priv;
+
+ dma_resv_assert_held(buf->resv);
+
+ vm_unmap_ram(map->vaddr, ubuf->pagecount);
+}
+
static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
enum dma_data_direction direction)
{
@@ -162,6 +188,8 @@ static const struct dma_buf_ops udmabuf_ops = {
.unmap_dma_buf = unmap_udmabuf,
.release = release_udmabuf,
.mmap = mmap_udmabuf,
+ .vmap = vmap_udmabuf,
+ .vunmap = vunmap_udmabuf,
.begin_cpu_access = begin_cpu_udmabuf,
.end_cpu_access = end_cpu_udmabuf,
};
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index b6d48d54f42f..fb7073fc034f 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -245,7 +245,7 @@ config FSL_RAID
config HISI_DMA
tristate "HiSilicon DMA Engine support"
- depends on ARM64 || COMPILE_TEST
+ depends on ARCH_HISI || COMPILE_TEST
depends on PCI_MSI
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -610,18 +610,6 @@ config SPRD_DMA
help
Enable support for the on-chip DMA controller on Spreadtrum platform.
-config S3C24XX_DMAC
- bool "Samsung S3C24XX DMA support"
- depends on ARCH_S3C24XX || COMPILE_TEST
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
- help
- Support for the Samsung S3C24XX DMA controller driver. The
- DMA controller is having multiple DMA channels which can be
- configured for different peripherals like audio, UART, SPI.
- The DMA controller can transfer data from memory to peripheral,
- periphal to memory, periphal to periphal and memory to memory.
-
config TXX9_DMAC
tristate "Toshiba TXx9 SoC DMA support"
depends on MACH_TX49XX
@@ -728,6 +716,20 @@ config XILINX_DMA
the scatter gather interface with multiple channels independent
configuration support.
+config XILINX_XDMA
+ tristate "Xilinx DMA/Bridge Subsystem DMA Engine"
+ depends on HAS_IOMEM
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select REGMAP_MMIO
+ help
+ Enable support for Xilinx DMA/Bridge Subsystem DMA engine. The DMA
+ provides high performance block data movement between Host memory
+ and the DMA subsystem. These direct memory transfers can be both in
+ the Host to Card (H2C) and Card to Host (C2H) transfers.
+ The core also provides up to 16 user interrupt wires that generate
+ interrupts to the host.
+
config XILINX_ZYNQMP_DMA
tristate "Xilinx ZynqMP DMA Engine"
depends on ARCH_ZYNQ || MICROBLAZE || ARM64 || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5b55ada052a7..a4fd1ce29510 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -70,7 +70,6 @@ obj-$(CONFIG_STM32_DMA) += stm32-dma.o
obj-$(CONFIG_STM32_DMAMUX) += stm32-dmamux.o
obj-$(CONFIG_STM32_MDMA) += stm32-mdma.o
obj-$(CONFIG_SPRD_DMA) += sprd-dma.o
-obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_TEGRA186_GPC_DMA) += tegra186-gpc-dma.o
obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index d6c9781cd46a..1f0fab180f8f 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -21,6 +21,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include "dmaengine.h"
@@ -240,6 +241,7 @@ struct at_xdmac_chan {
struct at_xdmac {
struct dma_device dma;
void __iomem *regs;
+ struct device *dev;
int irq;
struct clk *clk;
u32 save_gim;
@@ -361,13 +363,65 @@ MODULE_PARM_DESC(init_nr_desc_per_channel,
"initial descriptors per channel (default: 64)");
+static void at_xdmac_runtime_suspend_descriptors(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ struct at_xdmac_desc *desc, *_desc;
+
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
+ if (!desc->active_xfer)
+ continue;
+
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+ }
+}
+
+static int at_xdmac_runtime_resume_descriptors(struct at_xdmac_chan *atchan)
+{
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ struct at_xdmac_desc *desc, *_desc;
+ int ret;
+
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
+ if (!desc->active_xfer)
+ continue;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
{
- return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return false;
+
+ ret = !!(at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask);
+
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+
+ return ret;
}
static void at_xdmac_off(struct at_xdmac *atxdmac)
{
+ struct dma_chan *chan, *_chan;
+ struct at_xdmac_chan *atchan;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return;
+
at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
/* Wait that all chans are disabled. */
@@ -375,6 +429,18 @@ static void at_xdmac_off(struct at_xdmac *atxdmac)
cpu_relax();
at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
+
+ /* Decrement runtime PM ref counter for each active descriptor. */
+ if (!list_empty(&atxdmac->dma.channels)) {
+ list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels,
+ device_node) {
+ atchan = to_at_xdmac_chan(chan);
+ at_xdmac_runtime_suspend_descriptors(atchan);
+ }
+ }
+
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
}
/* Call with lock hold. */
@@ -383,6 +449,11 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
u32 reg;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return;
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
@@ -462,7 +533,6 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
-
}
static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
@@ -1456,14 +1526,14 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
static enum dma_status
at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
- struct dma_tx_state *txstate)
+ struct dma_tx_state *txstate)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc, *_desc, *iter;
struct list_head *descs_list;
enum dma_status ret;
- int residue, retry;
+ int residue, retry, pm_status;
u32 cur_nda, check_nda, cur_ubc, mask, value;
u8 dwidth = 0;
unsigned long flags;
@@ -1473,6 +1543,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
if (ret == DMA_COMPLETE || !txstate)
return ret;
+ pm_status = pm_runtime_resume_and_get(atxdmac->dev);
+ if (pm_status < 0)
+ return DMA_ERROR;
+
spin_lock_irqsave(&atchan->lock, flags);
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
@@ -1590,6 +1664,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
spin_unlock:
spin_unlock_irqrestore(&atchan->lock, flags);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
return ret;
}
@@ -1636,6 +1712,11 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *bad_desc;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return;
/*
* The descriptor currently at the head of the active list is
@@ -1665,12 +1746,16 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
__func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
bad_desc->lld.mbr_ubc);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+
/* Then continue with usual descriptor management */
}
static void at_xdmac_tasklet(struct tasklet_struct *t)
{
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *desc;
struct dma_async_tx_descriptor *txd;
u32 error_mask;
@@ -1720,6 +1805,13 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
at_xdmac_advance_work(atchan);
spin_unlock_irq(&atchan->lock);
+
+ /*
+ * Decrement runtime PM ref counter incremented in
+ * at_xdmac_start_xfer().
+ */
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
}
static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
@@ -1811,19 +1903,31 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
unsigned long flags;
+ int ret;
dev_dbg(chan2dev(chan), "%s\n", __func__);
if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
return 0;
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return ret;
+
spin_lock_irqsave(&atchan->lock, flags);
at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
& (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
cpu_relax();
+
+ /* Decrement runtime PM ref counter for each active descriptor. */
+ at_xdmac_runtime_suspend_descriptors(atchan);
+
spin_unlock_irqrestore(&atchan->lock, flags);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+
return 0;
}
@@ -1832,20 +1936,32 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
unsigned long flags;
+ int ret;
dev_dbg(chan2dev(chan), "%s\n", __func__);
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return ret;
+
spin_lock_irqsave(&atchan->lock, flags);
- if (!at_xdmac_chan_is_paused(atchan)) {
- spin_unlock_irqrestore(&atchan->lock, flags);
- return 0;
- }
+ if (!at_xdmac_chan_is_paused(atchan))
+ goto unlock;
+
+ /* Increment runtime PM ref counter for each active descriptor. */
+ ret = at_xdmac_runtime_resume_descriptors(atchan);
+ if (ret < 0)
+ goto unlock;
at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
+
+unlock:
spin_unlock_irqrestore(&atchan->lock, flags);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
- return 0;
+ return ret;
}
static int at_xdmac_device_terminate_all(struct dma_chan *chan)
@@ -1854,9 +1970,14 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
unsigned long flags;
+ int ret;
dev_dbg(chan2dev(chan), "%s\n", __func__);
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return ret;
+
spin_lock_irqsave(&atchan->lock, flags);
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
@@ -1867,12 +1988,24 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
list_del(&desc->xfer_node);
list_splice_tail_init(&desc->descs_list,
&atchan->free_descs_list);
+ /*
+ * We incremented the runtime PM reference count on
+ * at_xdmac_start_xfer() for this descriptor. Now it's time
+ * to release it.
+ */
+ if (desc->active_xfer) {
+ pm_runtime_put_autosuspend(atxdmac->dev);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ }
}
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+
return 0;
}
@@ -1974,6 +2107,11 @@ static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
{
struct at_xdmac *atxdmac = dev_get_drvdata(dev);
struct dma_chan *chan, *_chan;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(atxdmac->dev);
+ if (ret < 0)
+ return ret;
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
@@ -1986,12 +2124,13 @@ static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
}
+
+ at_xdmac_runtime_suspend_descriptors(atchan);
}
atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
at_xdmac_off(atxdmac);
- clk_disable_unprepare(atxdmac->clk);
- return 0;
+ return pm_runtime_force_suspend(atxdmac->dev);
}
static int __maybe_unused atmel_xdmac_resume(struct device *dev)
@@ -2003,8 +2142,8 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
int i;
int ret;
- ret = clk_prepare_enable(atxdmac->clk);
- if (ret)
+ ret = pm_runtime_force_resume(atxdmac->dev);
+ if (ret < 0)
return ret;
at_xdmac_axi_config(pdev);
@@ -2019,6 +2158,11 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
atchan = to_at_xdmac_chan(chan);
+
+ ret = at_xdmac_runtime_resume_descriptors(atchan);
+ if (ret < 0)
+ return ret;
+
at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
if (at_xdmac_chan_is_cyclic(atchan)) {
if (at_xdmac_chan_is_paused(atchan))
@@ -2030,9 +2174,29 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
}
}
+
+ pm_runtime_mark_last_busy(atxdmac->dev);
+ pm_runtime_put_autosuspend(atxdmac->dev);
+
+ return 0;
+}
+
+static int __maybe_unused atmel_xdmac_runtime_suspend(struct device *dev)
+{
+ struct at_xdmac *atxdmac = dev_get_drvdata(dev);
+
+ clk_disable(atxdmac->clk);
+
return 0;
}
+static int __maybe_unused atmel_xdmac_runtime_resume(struct device *dev)
+{
+ struct at_xdmac *atxdmac = dev_get_drvdata(dev);
+
+ return clk_enable(atxdmac->clk);
+}
+
static int at_xdmac_probe(struct platform_device *pdev)
{
struct at_xdmac *atxdmac;
@@ -2071,6 +2235,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
atxdmac->regs = base;
atxdmac->irq = irq;
+ atxdmac->dev = &pdev->dev;
atxdmac->layout = of_device_get_match_data(&pdev->dev);
if (!atxdmac->layout)
@@ -2135,11 +2300,20 @@ static int at_xdmac_probe(struct platform_device *pdev)
atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- /* Disable all chans and interrupts. */
- at_xdmac_off(atxdmac);
+ platform_set_drvdata(pdev, atxdmac);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
/* Init channels. */
INIT_LIST_HEAD(&atxdmac->dma.channels);
+
+ /* Disable all chans and interrupts. */
+ at_xdmac_off(atxdmac);
+
for (i = 0; i < nr_channels; i++) {
struct at_xdmac_chan *atchan = &atxdmac->chan[i];
@@ -2159,12 +2333,11 @@ static int at_xdmac_probe(struct platform_device *pdev)
while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
cpu_relax();
}
- platform_set_drvdata(pdev, atxdmac);
ret = dma_async_device_register(&atxdmac->dma);
if (ret) {
dev_err(&pdev->dev, "fail to register DMA engine device\n");
- goto err_clk_disable;
+ goto err_pm_disable;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@@ -2179,10 +2352,18 @@ static int at_xdmac_probe(struct platform_device *pdev)
at_xdmac_axi_config(pdev);
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
return 0;
err_dma_unregister:
dma_async_device_unregister(&atxdmac->dma);
+err_pm_disable:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
err_clk_disable:
clk_disable_unprepare(atxdmac->clk);
err_free_irq:
@@ -2198,6 +2379,9 @@ static int at_xdmac_remove(struct platform_device *pdev)
at_xdmac_off(atxdmac);
of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&atxdmac->dma);
+ pm_runtime_disable(atxdmac->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
clk_disable_unprepare(atxdmac->clk);
free_irq(atxdmac->irq, atxdmac);
@@ -2215,6 +2399,8 @@ static int at_xdmac_remove(struct platform_device *pdev)
static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
.prepare = atmel_xdmac_prepare,
SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
+ SET_RUNTIME_PM_OPS(atmel_xdmac_runtime_suspend,
+ atmel_xdmac_runtime_resume, NULL)
};
static const struct of_device_id atmel_xdmac_dt_ids[] = {
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 630dfbb01a40..0807fb9eb262 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -878,7 +878,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec,
static int bcm2835_dma_probe(struct platform_device *pdev)
{
struct bcm2835_dmadev *od;
- struct resource *res;
void __iomem *base;
int rc;
int i, j;
@@ -902,8 +901,7 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
index f30dabc99795..a812b9b00e6b 100644
--- a/drivers/dma/dma-axi-dmac.c
+++ b/drivers/dma/dma-axi-dmac.c
@@ -910,7 +910,6 @@ static int axi_dmac_probe(struct platform_device *pdev)
{
struct dma_device *dma_dev;
struct axi_dmac *dmac;
- struct resource *res;
struct regmap *regmap;
unsigned int version;
int ret;
@@ -925,8 +924,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (dmac->irq == 0)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmac->base = devm_ioremap_resource(&pdev->dev, res);
+ dmac->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 8a6e6b60d66f..c24bca210104 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -172,7 +172,7 @@ static ssize_t memcpy_count_show(struct device *dev,
if (chan) {
for_each_possible_cpu(i)
count += per_cpu_ptr(chan->local, i)->memcpy_count;
- err = sprintf(buf, "%lu\n", count);
+ err = sysfs_emit(buf, "%lu\n", count);
} else
err = -ENODEV;
mutex_unlock(&dma_list_mutex);
@@ -194,7 +194,7 @@ static ssize_t bytes_transferred_show(struct device *dev,
if (chan) {
for_each_possible_cpu(i)
count += per_cpu_ptr(chan->local, i)->bytes_transferred;
- err = sprintf(buf, "%lu\n", count);
+ err = sysfs_emit(buf, "%lu\n", count);
} else
err = -ENODEV;
mutex_unlock(&dma_list_mutex);
@@ -212,7 +212,7 @@ static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
mutex_lock(&dma_list_mutex);
chan = dev_to_dma_chan(dev);
if (chan)
- err = sprintf(buf, "%d\n", chan->client_count);
+ err = sysfs_emit(buf, "%d\n", chan->client_count);
else
err = -ENODEV;
mutex_unlock(&dma_list_mutex);
@@ -1323,11 +1323,8 @@ void dma_async_device_unregister(struct dma_device *device)
}
EXPORT_SYMBOL(dma_async_device_unregister);
-static void dmam_device_release(struct device *dev, void *res)
+static void dmaenginem_async_device_unregister(void *device)
{
- struct dma_device *device;
-
- device = *(struct dma_device **)res;
dma_async_device_unregister(device);
}
@@ -1339,22 +1336,13 @@ static void dmam_device_release(struct device *dev, void *res)
*/
int dmaenginem_async_device_register(struct dma_device *device)
{
- void *p;
int ret;
- p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
-
ret = dma_async_device_register(device);
- if (!ret) {
- *(struct dma_device **)p = device;
- devres_add(device->dev, p);
- } else {
- devres_free(p);
- }
+ if (ret)
+ return ret;
- return ret;
+ return devm_add_action(device->dev, dmaenginem_async_device_unregister, device);
}
EXPORT_SYMBOL(dmaenginem_async_device_register);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index bf85aa0979ec..4169e1d7d5ca 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -325,8 +325,6 @@ dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
completed_length = completed_blocks * len;
bytes = length - completed_length;
- } else {
- bytes = vd_to_axi_desc(vdesc)->length;
}
spin_unlock_irqrestore(&chan->vc.lock, flags);
@@ -1371,7 +1369,6 @@ static int dw_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct axi_dma_chip *chip;
- struct resource *mem;
struct dw_axi_dma *dw;
struct dw_axi_dma_hcfg *hdata;
u32 i;
@@ -1397,8 +1394,7 @@ static int dw_probe(struct platform_device *pdev)
if (chip->irq < 0)
return chip->irq;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->regs = devm_ioremap_resource(chip->dev, mem);
+ chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
diff --git a/drivers/dma/dw-edma/Kconfig b/drivers/dma/dw-edma/Kconfig
index 7ff17b2db6a1..2b6f2679508d 100644
--- a/drivers/dma/dw-edma/Kconfig
+++ b/drivers/dma/dw-edma/Kconfig
@@ -9,11 +9,14 @@ config DW_EDMA
Support the Synopsys DesignWare eDMA controller, normally
implemented on endpoints SoCs.
+if DW_EDMA
+
config DW_EDMA_PCIE
tristate "Synopsys DesignWare eDMA PCIe driver"
depends on PCI && PCI_MSI
- select DW_EDMA
help
Provides a glue-logic between the Synopsys DesignWare
eDMA controller and an endpoint PCIe device. This also serves
as a reference design to whom desires to use this IP.
+
+endif # DW_EDMA
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index c54b24ff5206..1906a836f0aa 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -39,6 +39,17 @@ struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
return container_of(vd, struct dw_edma_desc, vd);
}
+static inline
+u64 dw_edma_get_pci_address(struct dw_edma_chan *chan, phys_addr_t cpu_addr)
+{
+ struct dw_edma_chip *chip = chan->dw->chip;
+
+ if (chip->ops->pci_address)
+ return chip->ops->pci_address(chip->dev, cpu_addr);
+
+ return cpu_addr;
+}
+
static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *burst;
@@ -197,6 +208,24 @@ static void dw_edma_start_transfer(struct dw_edma_chan *chan)
desc->chunks_alloc--;
}
+static void dw_edma_device_caps(struct dma_chan *dchan,
+ struct dma_slave_caps *caps)
+{
+ struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
+
+ if (chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ if (chan->dir == EDMA_DIR_READ)
+ caps->directions = BIT(DMA_DEV_TO_MEM);
+ else
+ caps->directions = BIT(DMA_MEM_TO_DEV);
+ } else {
+ if (chan->dir == EDMA_DIR_WRITE)
+ caps->directions = BIT(DMA_DEV_TO_MEM);
+ else
+ caps->directions = BIT(DMA_MEM_TO_DEV);
+ }
+}
+
static int dw_edma_device_config(struct dma_chan *dchan,
struct dma_slave_config *config)
{
@@ -327,11 +356,12 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
{
struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
enum dma_transfer_direction dir = xfer->direction;
- phys_addr_t src_addr, dst_addr;
struct scatterlist *sg = NULL;
struct dw_edma_chunk *chunk;
struct dw_edma_burst *burst;
struct dw_edma_desc *desc;
+ u64 src_addr, dst_addr;
+ size_t fsz = 0;
u32 cnt = 0;
int i;
@@ -381,9 +411,9 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
if (xfer->xfer.sg.len < 1)
return NULL;
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
- if (!xfer->xfer.il->numf)
+ if (!xfer->xfer.il->numf || xfer->xfer.il->frame_size < 1)
return NULL;
- if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
+ if (!xfer->xfer.il->src_inc || !xfer->xfer.il->dst_inc)
return NULL;
} else {
return NULL;
@@ -405,16 +435,19 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
dst_addr = chan->config.dst_addr;
}
+ if (dir == DMA_DEV_TO_MEM)
+ src_addr = dw_edma_get_pci_address(chan, (phys_addr_t)src_addr);
+ else
+ dst_addr = dw_edma_get_pci_address(chan, (phys_addr_t)dst_addr);
+
if (xfer->type == EDMA_XFER_CYCLIC) {
cnt = xfer->xfer.cyclic.cnt;
} else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
cnt = xfer->xfer.sg.len;
sg = xfer->xfer.sg.sgl;
} else if (xfer->type == EDMA_XFER_INTERLEAVED) {
- if (xfer->xfer.il->numf > 0)
- cnt = xfer->xfer.il->numf;
- else
- cnt = xfer->xfer.il->frame_size;
+ cnt = xfer->xfer.il->numf * xfer->xfer.il->frame_size;
+ fsz = xfer->xfer.il->frame_size;
}
for (i = 0; i < cnt; i++) {
@@ -436,7 +469,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
burst->sz = sg_dma_len(sg);
else if (xfer->type == EDMA_XFER_INTERLEAVED)
- burst->sz = xfer->xfer.il->sgl[i].size;
+ burst->sz = xfer->xfer.il->sgl[i % fsz].size;
chunk->ll_region.sz += burst->sz;
desc->alloc_sz += burst->sz;
@@ -455,6 +488,8 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
* and destination addresses are increased
* by the same portion (data length)
*/
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ burst->dar = dst_addr;
}
} else {
burst->dar = dst_addr;
@@ -470,25 +505,24 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
* and destination addresses are increased
* by the same portion (data length)
*/
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
+ burst->sar = src_addr;
}
}
if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
sg = sg_next(sg);
- } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
- xfer->xfer.il->frame_size > 0) {
+ } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
struct dma_interleaved_template *il = xfer->xfer.il;
- struct data_chunk *dc = &il->sgl[i];
+ struct data_chunk *dc = &il->sgl[i % fsz];
- if (il->src_sgl) {
- src_addr += burst->sz;
+ src_addr += burst->sz;
+ if (il->src_sgl)
src_addr += dmaengine_get_src_icg(il, dc);
- }
- if (il->dst_sgl) {
- dst_addr += burst->sz;
+ dst_addr += burst->sz;
+ if (il->dst_sgl)
dst_addr += dmaengine_get_dst_icg(il, dc);
- }
}
}
@@ -701,92 +735,76 @@ static void dw_edma_free_chan_resources(struct dma_chan *dchan)
}
}
-static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
- u32 wr_alloc, u32 rd_alloc)
+static int dw_edma_channel_setup(struct dw_edma *dw, u32 wr_alloc, u32 rd_alloc)
{
struct dw_edma_chip *chip = dw->chip;
- struct dw_edma_region *dt_region;
struct device *dev = chip->dev;
struct dw_edma_chan *chan;
struct dw_edma_irq *irq;
struct dma_device *dma;
- u32 alloc, off_alloc;
- u32 i, j, cnt;
- int err = 0;
+ u32 i, ch_cnt;
u32 pos;
- if (write) {
- i = 0;
- cnt = dw->wr_ch_cnt;
- dma = &dw->wr_edma;
- alloc = wr_alloc;
- off_alloc = 0;
- } else {
- i = dw->wr_ch_cnt;
- cnt = dw->rd_ch_cnt;
- dma = &dw->rd_edma;
- alloc = rd_alloc;
- off_alloc = wr_alloc;
- }
+ ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
+ dma = &dw->dma;
INIT_LIST_HEAD(&dma->channels);
- for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
+
+ for (i = 0; i < ch_cnt; i++) {
chan = &dw->chan[i];
- dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
- if (!dt_region)
- return -ENOMEM;
+ chan->dw = dw;
- chan->vc.chan.private = dt_region;
+ if (i < dw->wr_ch_cnt) {
+ chan->id = i;
+ chan->dir = EDMA_DIR_WRITE;
+ } else {
+ chan->id = i - dw->wr_ch_cnt;
+ chan->dir = EDMA_DIR_READ;
+ }
- chan->dw = dw;
- chan->id = j;
- chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
chan->configured = false;
chan->request = EDMA_REQ_NONE;
chan->status = EDMA_ST_IDLE;
- if (write)
- chan->ll_max = (chip->ll_region_wr[j].sz / EDMA_LL_SZ);
+ if (chan->dir == EDMA_DIR_WRITE)
+ chan->ll_max = (chip->ll_region_wr[chan->id].sz / EDMA_LL_SZ);
else
- chan->ll_max = (chip->ll_region_rd[j].sz / EDMA_LL_SZ);
+ chan->ll_max = (chip->ll_region_rd[chan->id].sz / EDMA_LL_SZ);
chan->ll_max -= 1;
dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
- write ? "write" : "read", j, chan->ll_max);
+ chan->dir == EDMA_DIR_WRITE ? "write" : "read",
+ chan->id, chan->ll_max);
if (dw->nr_irqs == 1)
pos = 0;
+ else if (chan->dir == EDMA_DIR_WRITE)
+ pos = chan->id % wr_alloc;
else
- pos = off_alloc + (j % alloc);
+ pos = wr_alloc + chan->id % rd_alloc;
irq = &dw->irq[pos];
- if (write)
- irq->wr_mask |= BIT(j);
+ if (chan->dir == EDMA_DIR_WRITE)
+ irq->wr_mask |= BIT(chan->id);
else
- irq->rd_mask |= BIT(j);
+ irq->rd_mask |= BIT(chan->id);
irq->dw = dw;
memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
- write ? "write" : "read", j,
+ chan->dir == EDMA_DIR_WRITE ? "write" : "read", chan->id,
chan->msi.address_hi, chan->msi.address_lo,
chan->msi.data);
chan->vc.desc_free = vchan_free_desc;
- vchan_init(&chan->vc, dma);
+ chan->vc.chan.private = chan->dir == EDMA_DIR_WRITE ?
+ &dw->chip->dt_region_wr[chan->id] :
+ &dw->chip->dt_region_rd[chan->id];
- if (write) {
- dt_region->paddr = chip->dt_region_wr[j].paddr;
- dt_region->vaddr = chip->dt_region_wr[j].vaddr;
- dt_region->sz = chip->dt_region_wr[j].sz;
- } else {
- dt_region->paddr = chip->dt_region_rd[j].paddr;
- dt_region->vaddr = chip->dt_region_rd[j].vaddr;
- dt_region->sz = chip->dt_region_rd[j].sz;
- }
+ vchan_init(&chan->vc, dma);
dw_edma_v0_core_device_config(chan);
}
@@ -797,16 +815,16 @@ static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
dma_cap_set(DMA_CYCLIC, dma->cap_mask);
dma_cap_set(DMA_PRIVATE, dma->cap_mask);
dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
- dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
+ dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
- dma->chancnt = cnt;
/* Set DMA channel callbacks */
dma->dev = chip->dev;
dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
dma->device_free_chan_resources = dw_edma_free_chan_resources;
+ dma->device_caps = dw_edma_device_caps;
dma->device_config = dw_edma_device_config;
dma->device_pause = dw_edma_device_pause;
dma->device_resume = dw_edma_device_resume;
@@ -820,9 +838,7 @@ static int dw_edma_channel_setup(struct dw_edma *dw, bool write,
dma_set_max_seg_size(dma->dev, U32_MAX);
/* Register DMA device */
- err = dma_async_device_register(dma);
-
- return err;
+ return dma_async_device_register(dma);
}
static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
@@ -893,10 +909,8 @@ static int dw_edma_irq_request(struct dw_edma *dw,
dw_edma_interrupt_read,
IRQF_SHARED, dw->name,
&dw->irq[i]);
- if (err) {
- dw->nr_irqs = i;
- return err;
- }
+ if (err)
+ goto err_irq_free;
if (irq_get_msi_desc(irq))
get_cached_msi_msg(irq, &dw->irq[i].msi);
@@ -905,6 +919,14 @@ static int dw_edma_irq_request(struct dw_edma *dw,
dw->nr_irqs = i;
}
+ return 0;
+
+err_irq_free:
+ for (i--; i >= 0; i--) {
+ irq = chip->ops->irq_vector(dev, i);
+ free_irq(irq, &dw->irq[i]);
+ }
+
return err;
}
@@ -951,7 +973,8 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (!dw->chan)
return -ENOMEM;
- snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
+ snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%s",
+ dev_name(chip->dev));
/* Disable eDMA, only to establish the ideal initial conditions */
dw_edma_v0_core_off(dw);
@@ -961,13 +984,8 @@ int dw_edma_probe(struct dw_edma_chip *chip)
if (err)
return err;
- /* Setup write channels */
- err = dw_edma_channel_setup(dw, true, wr_alloc, rd_alloc);
- if (err)
- goto err_irq_free;
-
- /* Setup read channels */
- err = dw_edma_channel_setup(dw, false, wr_alloc, rd_alloc);
+ /* Setup write/read channels */
+ err = dw_edma_channel_setup(dw, wr_alloc, rd_alloc);
if (err)
goto err_irq_free;
@@ -993,6 +1011,10 @@ int dw_edma_remove(struct dw_edma_chip *chip)
struct dw_edma *dw = chip->dw;
int i;
+ /* Skip removal if no private data found */
+ if (!dw)
+ return -ENODEV;
+
/* Disable eDMA */
dw_edma_v0_core_off(dw);
@@ -1001,23 +1023,13 @@ int dw_edma_remove(struct dw_edma_chip *chip)
free_irq(chip->ops->irq_vector(dev, i), &dw->irq[i]);
/* Deregister eDMA device */
- dma_async_device_unregister(&dw->wr_edma);
- list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
+ dma_async_device_unregister(&dw->dma);
+ list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
vc.chan.device_node) {
tasklet_kill(&chan->vc.task);
list_del(&chan->vc.chan.device_node);
}
- dma_async_device_unregister(&dw->rd_edma);
- list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
- vc.chan.device_node) {
- tasklet_kill(&chan->vc.task);
- list_del(&chan->vc.chan.device_node);
- }
-
- /* Turn debugfs off */
- dw_edma_v0_core_debugfs_off(dw);
-
return 0;
}
EXPORT_SYMBOL_GPL(dw_edma_remove);
diff --git a/drivers/dma/dw-edma/dw-edma-core.h b/drivers/dma/dw-edma/dw-edma-core.h
index 85df2d511907..0ab2b6dba880 100644
--- a/drivers/dma/dw-edma/dw-edma-core.h
+++ b/drivers/dma/dw-edma/dw-edma-core.h
@@ -96,12 +96,11 @@ struct dw_edma_irq {
};
struct dw_edma {
- char name[20];
+ char name[32];
- struct dma_device wr_edma;
- u16 wr_ch_cnt;
+ struct dma_device dma;
- struct dma_device rd_edma;
+ u16 wr_ch_cnt;
u16 rd_ch_cnt;
struct dw_edma_irq *irq;
@@ -112,9 +111,6 @@ struct dw_edma {
raw_spinlock_t lock; /* Only for legacy */
struct dw_edma_chip *chip;
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debugfs;
-#endif /* CONFIG_DEBUG_FS */
};
struct dw_edma_sg {
diff --git a/drivers/dma/dw-edma/dw-edma-pcie.c b/drivers/dma/dw-edma/dw-edma-pcie.c
index d6b5e2463884..2b40f2b44f5e 100644
--- a/drivers/dma/dw-edma/dw-edma-pcie.c
+++ b/drivers/dma/dw-edma/dw-edma-pcie.c
@@ -95,8 +95,23 @@ static int dw_edma_pcie_irq_vector(struct device *dev, unsigned int nr)
return pci_irq_vector(to_pci_dev(dev), nr);
}
+static u64 dw_edma_pcie_address(struct device *dev, phys_addr_t cpu_addr)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_bus_region region;
+ struct resource res = {
+ .flags = IORESOURCE_MEM,
+ .start = cpu_addr,
+ .end = cpu_addr,
+ };
+
+ pcibios_resource_to_bus(pdev->bus, &region, &res);
+ return region.start;
+}
+
static const struct dw_edma_core_ops dw_edma_pcie_core_ops = {
.irq_vector = dw_edma_pcie_irq_vector,
+ .pci_address = dw_edma_pcie_address,
};
static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
@@ -207,7 +222,6 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
/* Data structure initialization */
chip->dev = dev;
- chip->id = pdev->devfn;
chip->mf = vsec_data.mf;
chip->nr_irqs = nr_irqs;
@@ -226,21 +240,21 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_block *ll_block = &vsec_data.ll_wr[i];
struct dw_edma_block *dt_block = &vsec_data.dt_wr[i];
- ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
- if (!ll_region->vaddr)
+ ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr.io)
return -ENOMEM;
- ll_region->vaddr += ll_block->off;
- ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->vaddr.io += ll_block->off;
+ ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
- dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
- if (!dt_region->vaddr)
+ dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr.io)
return -ENOMEM;
- dt_region->vaddr += dt_block->off;
- dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->vaddr.io += dt_block->off;
+ dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
@@ -251,21 +265,21 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
struct dw_edma_block *ll_block = &vsec_data.ll_rd[i];
struct dw_edma_block *dt_block = &vsec_data.dt_rd[i];
- ll_region->vaddr = pcim_iomap_table(pdev)[ll_block->bar];
- if (!ll_region->vaddr)
+ ll_region->vaddr.io = pcim_iomap_table(pdev)[ll_block->bar];
+ if (!ll_region->vaddr.io)
return -ENOMEM;
- ll_region->vaddr += ll_block->off;
- ll_region->paddr = pdev->resource[ll_block->bar].start;
+ ll_region->vaddr.io += ll_block->off;
+ ll_region->paddr = pci_bus_address(pdev, ll_block->bar);
ll_region->paddr += ll_block->off;
ll_region->sz = ll_block->sz;
- dt_region->vaddr = pcim_iomap_table(pdev)[dt_block->bar];
- if (!dt_region->vaddr)
+ dt_region->vaddr.io = pcim_iomap_table(pdev)[dt_block->bar];
+ if (!dt_region->vaddr.io)
return -ENOMEM;
- dt_region->vaddr += dt_block->off;
- dt_region->paddr = pdev->resource[dt_block->bar].start;
+ dt_region->vaddr.io += dt_block->off;
+ dt_region->paddr = pci_bus_address(pdev, dt_block->bar);
dt_region->paddr += dt_block->off;
dt_region->sz = dt_block->sz;
}
@@ -289,24 +303,24 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "L. List:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_wr[i].bar,
vsec_data.ll_wr[i].off, chip->ll_region_wr[i].sz,
- chip->ll_region_wr[i].vaddr, &chip->ll_region_wr[i].paddr);
+ chip->ll_region_wr[i].vaddr.io, &chip->ll_region_wr[i].paddr);
pci_dbg(pdev, "Data:\tWRITE CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_wr[i].bar,
vsec_data.dt_wr[i].off, chip->dt_region_wr[i].sz,
- chip->dt_region_wr[i].vaddr, &chip->dt_region_wr[i].paddr);
+ chip->dt_region_wr[i].vaddr.io, &chip->dt_region_wr[i].paddr);
}
for (i = 0; i < chip->ll_rd_cnt; i++) {
pci_dbg(pdev, "L. List:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.ll_rd[i].bar,
vsec_data.ll_rd[i].off, chip->ll_region_rd[i].sz,
- chip->ll_region_rd[i].vaddr, &chip->ll_region_rd[i].paddr);
+ chip->ll_region_rd[i].vaddr.io, &chip->ll_region_rd[i].paddr);
pci_dbg(pdev, "Data:\tREAD CH%.2u, BAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%p, p=%pa)\n",
i, vsec_data.dt_rd[i].bar,
vsec_data.dt_rd[i].off, chip->dt_region_rd[i].sz,
- chip->dt_region_rd[i].vaddr, &chip->dt_region_rd[i].paddr);
+ chip->dt_region_rd[i].vaddr.io, &chip->dt_region_rd[i].paddr);
}
pci_dbg(pdev, "Nr. IRQs:\t%u\n", chip->nr_irqs);
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.c b/drivers/dma/dw-edma/dw-edma-v0-core.c
index 77e6cfe52e0a..72e79a0c0a4e 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.c
@@ -8,6 +8,8 @@
#include <linux/bitfield.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
#include "dw-edma-v0-regs.h"
@@ -53,8 +55,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
SET_32(dw, rd_##name, value); \
} while (0)
-#ifdef CONFIG_64BIT
-
#define SET_64(dw, name, value) \
writeq(value, &(__dw_regs(dw)->name))
@@ -80,8 +80,6 @@ static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
SET_64(dw, rd_##name, value); \
} while (0)
-#endif /* CONFIG_64BIT */
-
#define SET_COMPAT(dw, name, value) \
writel(value, &(__dw_regs(dw)->type.unroll.name))
@@ -161,11 +159,6 @@ static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
#define GET_CH_32(dw, dir, ch, name) \
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
-#define SET_LL_32(ll, value) \
- writel(value, ll)
-
-#ifdef CONFIG_64BIT
-
static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u64 value, void __iomem *addr)
{
@@ -192,7 +185,7 @@ static inline void writeq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
const void __iomem *addr)
{
- u32 value;
+ u64 value;
if (dw->chip->mf == EDMA_MF_EDMA_LEGACY) {
u32 viewport_sel;
@@ -222,11 +215,6 @@ static inline u64 readq_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
#define GET_CH_64(dw, dir, ch, name) \
readq_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
-#define SET_LL_64(ll, value) \
- writeq(value, ll)
-
-#endif /* CONFIG_64BIT */
-
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *dw)
{
@@ -298,17 +286,53 @@ u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
GET_RW_32(dw, dir, int_status));
}
+static void dw_edma_v0_write_ll_data(struct dw_edma_chunk *chunk, int i,
+ u32 control, u32 size, u64 sar, u64 dar)
+{
+ ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
+
+ if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ struct dw_edma_v0_lli *lli = chunk->ll_region.vaddr.mem + ofs;
+
+ lli->control = control;
+ lli->transfer_size = size;
+ lli->sar.reg = sar;
+ lli->dar.reg = dar;
+ } else {
+ struct dw_edma_v0_lli __iomem *lli = chunk->ll_region.vaddr.io + ofs;
+
+ writel(control, &lli->control);
+ writel(size, &lli->transfer_size);
+ writeq(sar, &lli->sar.reg);
+ writeq(dar, &lli->dar.reg);
+ }
+}
+
+static void dw_edma_v0_write_ll_link(struct dw_edma_chunk *chunk,
+ int i, u32 control, u64 pointer)
+{
+ ptrdiff_t ofs = i * sizeof(struct dw_edma_v0_lli);
+
+ if (chunk->chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL) {
+ struct dw_edma_v0_llp *llp = chunk->ll_region.vaddr.mem + ofs;
+
+ llp->control = control;
+ llp->llp.reg = pointer;
+ } else {
+ struct dw_edma_v0_llp __iomem *llp = chunk->ll_region.vaddr.io + ofs;
+
+ writel(control, &llp->control);
+ writeq(pointer, &llp->llp.reg);
+ }
+}
+
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
struct dw_edma_chan *chan = chunk->chan;
- struct dw_edma_v0_lli __iomem *lli;
- struct dw_edma_v0_llp __iomem *llp;
u32 control = 0, i = 0;
int j;
- lli = chunk->ll_region.vaddr;
-
if (chunk->cb)
control = DW_EDMA_V0_CB;
@@ -320,41 +344,16 @@ static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
if (!(chan->dw->chip->flags & DW_EDMA_CHIP_LOCAL))
control |= DW_EDMA_V0_RIE;
}
- /* Channel control */
- SET_LL_32(&lli[i].control, control);
- /* Transfer size */
- SET_LL_32(&lli[i].transfer_size, child->sz);
- /* SAR */
- #ifdef CONFIG_64BIT
- SET_LL_64(&lli[i].sar.reg, child->sar);
- #else /* CONFIG_64BIT */
- SET_LL_32(&lli[i].sar.lsb, lower_32_bits(child->sar));
- SET_LL_32(&lli[i].sar.msb, upper_32_bits(child->sar));
- #endif /* CONFIG_64BIT */
- /* DAR */
- #ifdef CONFIG_64BIT
- SET_LL_64(&lli[i].dar.reg, child->dar);
- #else /* CONFIG_64BIT */
- SET_LL_32(&lli[i].dar.lsb, lower_32_bits(child->dar));
- SET_LL_32(&lli[i].dar.msb, upper_32_bits(child->dar));
- #endif /* CONFIG_64BIT */
- i++;
+
+ dw_edma_v0_write_ll_data(chunk, i++, control, child->sz,
+ child->sar, child->dar);
}
- llp = (void __iomem *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb)
control |= DW_EDMA_V0_CB;
- /* Channel control */
- SET_LL_32(&llp->control, control);
- /* Linked list */
- #ifdef CONFIG_64BIT
- SET_LL_64(&llp->llp.reg, chunk->ll_region.paddr);
- #else /* CONFIG_64BIT */
- SET_LL_32(&llp->llp.lsb, lower_32_bits(chunk->ll_region.paddr));
- SET_LL_32(&llp->llp.msb, upper_32_bits(chunk->ll_region.paddr));
- #endif /* CONFIG_64BIT */
+ dw_edma_v0_write_ll_link(chunk, i, control, chunk->ll_region.paddr);
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
@@ -504,8 +503,3 @@ void dw_edma_v0_core_debugfs_on(struct dw_edma *dw)
{
dw_edma_v0_debugfs_on(dw);
}
-
-void dw_edma_v0_core_debugfs_off(struct dw_edma *dw)
-{
- dw_edma_v0_debugfs_off(dw);
-}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-core.h b/drivers/dma/dw-edma/dw-edma-v0-core.h
index 75aec6d31b21..ab96a1f48080 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-core.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-core.h
@@ -23,6 +23,5 @@ void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma *dw);
-void dw_edma_v0_core_debugfs_off(struct dw_edma *dw);
#endif /* _DW_EDMA_V0_CORE_H */
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
index 5226c9014703..0745d9e7d259 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.c
@@ -13,76 +13,79 @@
#include "dw-edma-v0-regs.h"
#include "dw-edma-core.h"
-#define REGS_ADDR(name) \
- ((void __force *)&regs->name)
-#define REGISTER(name) \
- { #name, REGS_ADDR(name) }
-
-#define WR_REGISTER(name) \
- { #name, REGS_ADDR(wr_##name) }
-#define RD_REGISTER(name) \
- { #name, REGS_ADDR(rd_##name) }
-
-#define WR_REGISTER_LEGACY(name) \
- { #name, REGS_ADDR(type.legacy.wr_##name) }
+#define REGS_ADDR(dw, name) \
+ ({ \
+ struct dw_edma_v0_regs __iomem *__regs = (dw)->chip->reg_base; \
+ \
+ (void __iomem *)&__regs->name; \
+ })
+
+#define REGS_CH_ADDR(dw, name, _dir, _ch) \
+ ({ \
+ struct dw_edma_v0_ch_regs __iomem *__ch_regs; \
+ \
+ if ((dw)->chip->mf == EDMA_MF_EDMA_LEGACY) \
+ __ch_regs = REGS_ADDR(dw, type.legacy.ch); \
+ else if (_dir == EDMA_DIR_READ) \
+ __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].rd); \
+ else \
+ __ch_regs = REGS_ADDR(dw, type.unroll.ch[_ch].wr); \
+ \
+ (void __iomem *)&__ch_regs->name; \
+ })
+
+#define REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, name) }
+
+#define CTX_REGISTER(dw, name, dir, ch) \
+ { dw, #name, REGS_CH_ADDR(dw, name, dir, ch), dir, ch }
+
+#define WR_REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, wr_##name) }
+#define RD_REGISTER(dw, name) \
+ { dw, #name, REGS_ADDR(dw, rd_##name) }
+
+#define WR_REGISTER_LEGACY(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.legacy.wr_##name) }
#define RD_REGISTER_LEGACY(name) \
- { #name, REGS_ADDR(type.legacy.rd_##name) }
+ { dw, #name, REGS_ADDR(dw, type.legacy.rd_##name) }
-#define WR_REGISTER_UNROLL(name) \
- { #name, REGS_ADDR(type.unroll.wr_##name) }
-#define RD_REGISTER_UNROLL(name) \
- { #name, REGS_ADDR(type.unroll.rd_##name) }
+#define WR_REGISTER_UNROLL(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.unroll.wr_##name) }
+#define RD_REGISTER_UNROLL(dw, name) \
+ { dw, #name, REGS_ADDR(dw, type.unroll.rd_##name) }
#define WRITE_STR "write"
#define READ_STR "read"
#define CHANNEL_STR "channel"
#define REGISTERS_STR "registers"
-static struct dw_edma *dw;
-static struct dw_edma_v0_regs __iomem *regs;
-
-static struct {
- void __iomem *start;
- void __iomem *end;
-} lim[2][EDMA_V0_MAX_NR_CH];
-
-struct debugfs_entries {
+struct dw_edma_debugfs_entry {
+ struct dw_edma *dw;
const char *name;
- dma_addr_t *reg;
+ void __iomem *reg;
+ enum dw_edma_dir dir;
+ u16 ch;
};
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
- void __iomem *reg = (void __force __iomem *)data;
+ struct dw_edma_debugfs_entry *entry = data;
+ struct dw_edma *dw = entry->dw;
+ void __iomem *reg = entry->reg;
+
if (dw->chip->mf == EDMA_MF_EDMA_LEGACY &&
- reg >= (void __iomem *)&regs->type.legacy.ch) {
- void __iomem *ptr = &regs->type.legacy.ch;
- u32 viewport_sel = 0;
+ reg >= REGS_ADDR(dw, type.legacy.ch)) {
unsigned long flags;
- u16 ch;
-
- for (ch = 0; ch < dw->wr_ch_cnt; ch++)
- if (lim[0][ch].start >= reg && reg < lim[0][ch].end) {
- ptr += (reg - lim[0][ch].start);
- goto legacy_sel_wr;
- }
-
- for (ch = 0; ch < dw->rd_ch_cnt; ch++)
- if (lim[1][ch].start >= reg && reg < lim[1][ch].end) {
- ptr += (reg - lim[1][ch].start);
- goto legacy_sel_rd;
- }
-
- return 0;
-legacy_sel_rd:
- viewport_sel = BIT(31);
-legacy_sel_wr:
- viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
+ u32 viewport_sel;
+
+ viewport_sel = entry->dir == EDMA_DIR_READ ? BIT(31) : 0;
+ viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, entry->ch);
raw_spin_lock_irqsave(&dw->lock, flags);
- writel(viewport_sel, &regs->type.legacy.viewport_sel);
- *val = readl(ptr);
+ writel(viewport_sel, REGS_ADDR(dw, type.legacy.viewport_sel));
+ *val = readl(reg);
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
@@ -93,222 +96,197 @@ legacy_sel_wr:
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n");
-static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
- int nr_entries, struct dentry *dir)
+static void dw_edma_debugfs_create_x32(struct dw_edma *dw,
+ const struct dw_edma_debugfs_entry ini[],
+ int nr_entries, struct dentry *dent)
{
+ struct dw_edma_debugfs_entry *entries;
int i;
+ entries = devm_kcalloc(dw->chip->dev, nr_entries, sizeof(*entries),
+ GFP_KERNEL);
+ if (!entries)
+ return;
+
for (i = 0; i < nr_entries; i++) {
- if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir,
- entries[i].reg, &fops_x32))
- break;
+ entries[i] = ini[i];
+
+ debugfs_create_file_unsafe(entries[i].name, 0444, dent,
+ &entries[i], &fops_x32);
}
}
-static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs __iomem *regs,
- struct dentry *dir)
+static void dw_edma_debugfs_regs_ch(struct dw_edma *dw, enum dw_edma_dir dir,
+ u16 ch, struct dentry *dent)
{
- int nr_entries;
- const struct debugfs_entries debugfs_regs[] = {
- REGISTER(ch_control1),
- REGISTER(ch_control2),
- REGISTER(transfer_size),
- REGISTER(sar.lsb),
- REGISTER(sar.msb),
- REGISTER(dar.lsb),
- REGISTER(dar.msb),
- REGISTER(llp.lsb),
- REGISTER(llp.msb),
+ struct dw_edma_debugfs_entry debugfs_regs[] = {
+ CTX_REGISTER(dw, ch_control1, dir, ch),
+ CTX_REGISTER(dw, ch_control2, dir, ch),
+ CTX_REGISTER(dw, transfer_size, dir, ch),
+ CTX_REGISTER(dw, sar.lsb, dir, ch),
+ CTX_REGISTER(dw, sar.msb, dir, ch),
+ CTX_REGISTER(dw, dar.lsb, dir, ch),
+ CTX_REGISTER(dw, dar.msb, dir, ch),
+ CTX_REGISTER(dw, llp.lsb, dir, ch),
+ CTX_REGISTER(dw, llp.msb, dir, ch),
};
+ int nr_entries;
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, dent);
}
-static void dw_edma_debugfs_regs_wr(struct dentry *dir)
+static noinline_for_stack void
+dw_edma_debugfs_regs_wr(struct dw_edma *dw, struct dentry *dent)
{
- const struct debugfs_entries debugfs_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
/* eDMA global registers */
- WR_REGISTER(engine_en),
- WR_REGISTER(doorbell),
- WR_REGISTER(ch_arb_weight.lsb),
- WR_REGISTER(ch_arb_weight.msb),
+ WR_REGISTER(dw, engine_en),
+ WR_REGISTER(dw, doorbell),
+ WR_REGISTER(dw, ch_arb_weight.lsb),
+ WR_REGISTER(dw, ch_arb_weight.msb),
/* eDMA interrupts registers */
- WR_REGISTER(int_status),
- WR_REGISTER(int_mask),
- WR_REGISTER(int_clear),
- WR_REGISTER(err_status),
- WR_REGISTER(done_imwr.lsb),
- WR_REGISTER(done_imwr.msb),
- WR_REGISTER(abort_imwr.lsb),
- WR_REGISTER(abort_imwr.msb),
- WR_REGISTER(ch01_imwr_data),
- WR_REGISTER(ch23_imwr_data),
- WR_REGISTER(ch45_imwr_data),
- WR_REGISTER(ch67_imwr_data),
- WR_REGISTER(linked_list_err_en),
+ WR_REGISTER(dw, int_status),
+ WR_REGISTER(dw, int_mask),
+ WR_REGISTER(dw, int_clear),
+ WR_REGISTER(dw, err_status),
+ WR_REGISTER(dw, done_imwr.lsb),
+ WR_REGISTER(dw, done_imwr.msb),
+ WR_REGISTER(dw, abort_imwr.lsb),
+ WR_REGISTER(dw, abort_imwr.msb),
+ WR_REGISTER(dw, ch01_imwr_data),
+ WR_REGISTER(dw, ch23_imwr_data),
+ WR_REGISTER(dw, ch45_imwr_data),
+ WR_REGISTER(dw, ch67_imwr_data),
+ WR_REGISTER(dw, linked_list_err_en),
};
- const struct debugfs_entries debugfs_unroll_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
- WR_REGISTER_UNROLL(engine_chgroup),
- WR_REGISTER_UNROLL(engine_hshake_cnt.lsb),
- WR_REGISTER_UNROLL(engine_hshake_cnt.msb),
- WR_REGISTER_UNROLL(ch0_pwr_en),
- WR_REGISTER_UNROLL(ch1_pwr_en),
- WR_REGISTER_UNROLL(ch2_pwr_en),
- WR_REGISTER_UNROLL(ch3_pwr_en),
- WR_REGISTER_UNROLL(ch4_pwr_en),
- WR_REGISTER_UNROLL(ch5_pwr_en),
- WR_REGISTER_UNROLL(ch6_pwr_en),
- WR_REGISTER_UNROLL(ch7_pwr_en),
+ WR_REGISTER_UNROLL(dw, engine_chgroup),
+ WR_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb),
+ WR_REGISTER_UNROLL(dw, engine_hshake_cnt.msb),
+ WR_REGISTER_UNROLL(dw, ch0_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch1_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch2_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch3_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch4_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch5_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch6_pwr_en),
+ WR_REGISTER_UNROLL(dw, ch7_pwr_en),
};
- struct dentry *regs_dir, *ch_dir;
+ struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
- regs_dir = debugfs_create_dir(WRITE_STR, dir);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(WRITE_STR, dent);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
- dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
- regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries,
+ regs_dent);
}
for (i = 0; i < dw->wr_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
- ch_dir = debugfs_create_dir(name, regs_dir);
- if (!ch_dir)
- return;
-
- dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].wr, ch_dir);
+ ch_dent = debugfs_create_dir(name, regs_dent);
- lim[0][i].start = &regs->type.unroll.ch[i].wr;
- lim[0][i].end = &regs->type.unroll.ch[i].padding_1[0];
+ dw_edma_debugfs_regs_ch(dw, EDMA_DIR_WRITE, i, ch_dent);
}
}
-static void dw_edma_debugfs_regs_rd(struct dentry *dir)
+static noinline_for_stack void dw_edma_debugfs_regs_rd(struct dw_edma *dw,
+ struct dentry *dent)
{
- const struct debugfs_entries debugfs_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
/* eDMA global registers */
- RD_REGISTER(engine_en),
- RD_REGISTER(doorbell),
- RD_REGISTER(ch_arb_weight.lsb),
- RD_REGISTER(ch_arb_weight.msb),
+ RD_REGISTER(dw, engine_en),
+ RD_REGISTER(dw, doorbell),
+ RD_REGISTER(dw, ch_arb_weight.lsb),
+ RD_REGISTER(dw, ch_arb_weight.msb),
/* eDMA interrupts registers */
- RD_REGISTER(int_status),
- RD_REGISTER(int_mask),
- RD_REGISTER(int_clear),
- RD_REGISTER(err_status.lsb),
- RD_REGISTER(err_status.msb),
- RD_REGISTER(linked_list_err_en),
- RD_REGISTER(done_imwr.lsb),
- RD_REGISTER(done_imwr.msb),
- RD_REGISTER(abort_imwr.lsb),
- RD_REGISTER(abort_imwr.msb),
- RD_REGISTER(ch01_imwr_data),
- RD_REGISTER(ch23_imwr_data),
- RD_REGISTER(ch45_imwr_data),
- RD_REGISTER(ch67_imwr_data),
+ RD_REGISTER(dw, int_status),
+ RD_REGISTER(dw, int_mask),
+ RD_REGISTER(dw, int_clear),
+ RD_REGISTER(dw, err_status.lsb),
+ RD_REGISTER(dw, err_status.msb),
+ RD_REGISTER(dw, linked_list_err_en),
+ RD_REGISTER(dw, done_imwr.lsb),
+ RD_REGISTER(dw, done_imwr.msb),
+ RD_REGISTER(dw, abort_imwr.lsb),
+ RD_REGISTER(dw, abort_imwr.msb),
+ RD_REGISTER(dw, ch01_imwr_data),
+ RD_REGISTER(dw, ch23_imwr_data),
+ RD_REGISTER(dw, ch45_imwr_data),
+ RD_REGISTER(dw, ch67_imwr_data),
};
- const struct debugfs_entries debugfs_unroll_regs[] = {
+ const struct dw_edma_debugfs_entry debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
- RD_REGISTER_UNROLL(engine_chgroup),
- RD_REGISTER_UNROLL(engine_hshake_cnt.lsb),
- RD_REGISTER_UNROLL(engine_hshake_cnt.msb),
- RD_REGISTER_UNROLL(ch0_pwr_en),
- RD_REGISTER_UNROLL(ch1_pwr_en),
- RD_REGISTER_UNROLL(ch2_pwr_en),
- RD_REGISTER_UNROLL(ch3_pwr_en),
- RD_REGISTER_UNROLL(ch4_pwr_en),
- RD_REGISTER_UNROLL(ch5_pwr_en),
- RD_REGISTER_UNROLL(ch6_pwr_en),
- RD_REGISTER_UNROLL(ch7_pwr_en),
+ RD_REGISTER_UNROLL(dw, engine_chgroup),
+ RD_REGISTER_UNROLL(dw, engine_hshake_cnt.lsb),
+ RD_REGISTER_UNROLL(dw, engine_hshake_cnt.msb),
+ RD_REGISTER_UNROLL(dw, ch0_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch1_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch2_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch3_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch4_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch5_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch6_pwr_en),
+ RD_REGISTER_UNROLL(dw, ch7_pwr_en),
};
- struct dentry *regs_dir, *ch_dir;
+ struct dentry *regs_dent, *ch_dent;
int nr_entries, i;
char name[16];
- regs_dir = debugfs_create_dir(READ_STR, dir);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(READ_STR, dent);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
if (dw->chip->mf == EDMA_MF_HDMA_COMPAT) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
- dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
- regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_unroll_regs, nr_entries,
+ regs_dent);
}
for (i = 0; i < dw->rd_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
- ch_dir = debugfs_create_dir(name, regs_dir);
- if (!ch_dir)
- return;
-
- dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].rd, ch_dir);
+ ch_dent = debugfs_create_dir(name, regs_dent);
- lim[1][i].start = &regs->type.unroll.ch[i].rd;
- lim[1][i].end = &regs->type.unroll.ch[i].padding_2[0];
+ dw_edma_debugfs_regs_ch(dw, EDMA_DIR_READ, i, ch_dent);
}
}
-static void dw_edma_debugfs_regs(void)
+static void dw_edma_debugfs_regs(struct dw_edma *dw)
{
- const struct debugfs_entries debugfs_regs[] = {
- REGISTER(ctrl_data_arb_prior),
- REGISTER(ctrl),
+ const struct dw_edma_debugfs_entry debugfs_regs[] = {
+ REGISTER(dw, ctrl_data_arb_prior),
+ REGISTER(dw, ctrl),
};
- struct dentry *regs_dir;
+ struct dentry *regs_dent;
int nr_entries;
- regs_dir = debugfs_create_dir(REGISTERS_STR, dw->debugfs);
- if (!regs_dir)
- return;
+ regs_dent = debugfs_create_dir(REGISTERS_STR, dw->dma.dbg_dev_root);
nr_entries = ARRAY_SIZE(debugfs_regs);
- dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
+ dw_edma_debugfs_create_x32(dw, debugfs_regs, nr_entries, regs_dent);
- dw_edma_debugfs_regs_wr(regs_dir);
- dw_edma_debugfs_regs_rd(regs_dir);
+ dw_edma_debugfs_regs_wr(dw, regs_dent);
+ dw_edma_debugfs_regs_rd(dw, regs_dent);
}
-void dw_edma_v0_debugfs_on(struct dw_edma *_dw)
+void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
- dw = _dw;
- if (!dw)
- return;
-
- regs = dw->chip->reg_base;
- if (!regs)
- return;
-
- dw->debugfs = debugfs_create_dir(dw->name, NULL);
- if (!dw->debugfs)
+ if (!debugfs_initialized())
return;
- debugfs_create_u32("mf", 0444, dw->debugfs, &dw->chip->mf);
- debugfs_create_u16("wr_ch_cnt", 0444, dw->debugfs, &dw->wr_ch_cnt);
- debugfs_create_u16("rd_ch_cnt", 0444, dw->debugfs, &dw->rd_ch_cnt);
-
- dw_edma_debugfs_regs();
-}
-
-void dw_edma_v0_debugfs_off(struct dw_edma *_dw)
-{
- dw = _dw;
- if (!dw)
- return;
+ debugfs_create_u32("mf", 0444, dw->dma.dbg_dev_root, &dw->chip->mf);
+ debugfs_create_u16("wr_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->wr_ch_cnt);
+ debugfs_create_u16("rd_ch_cnt", 0444, dw->dma.dbg_dev_root, &dw->rd_ch_cnt);
- debugfs_remove_recursive(dw->debugfs);
- dw->debugfs = NULL;
+ dw_edma_debugfs_regs(dw);
}
diff --git a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
index 3391b86edf5a..fb3342d97d6d 100644
--- a/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
+++ b/drivers/dma/dw-edma/dw-edma-v0-debugfs.h
@@ -13,15 +13,10 @@
#ifdef CONFIG_DEBUG_FS
void dw_edma_v0_debugfs_on(struct dw_edma *dw);
-void dw_edma_v0_debugfs_off(struct dw_edma *dw);
#else
static inline void dw_edma_v0_debugfs_on(struct dw_edma *dw)
{
}
-
-static inline void dw_edma_v0_debugfs_off(struct dw_edma *dw)
-{
-}
#endif /* CONFIG_DEBUG_FS */
#endif /* _DW_EDMA_V0_DEBUG_FS_H */
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 97ba3bfc10b1..5f7d690e3dba 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -889,7 +889,8 @@ static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
return NULL;
}
-static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
+static u32 dwc_get_residue_and_status(struct dw_dma_chan *dwc, dma_cookie_t cookie,
+ enum dma_status *status)
{
struct dw_desc *desc;
unsigned long flags;
@@ -903,6 +904,8 @@ static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
residue = desc->residue;
if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
residue -= dwc_get_sent(dwc);
+ if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
+ *status = DMA_PAUSED;
} else {
residue = desc->total_len;
}
@@ -932,11 +935,7 @@ dwc_tx_status(struct dma_chan *chan,
if (ret == DMA_COMPLETE)
return ret;
- dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
-
- if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
- return DMA_PAUSED;
-
+ dma_set_residue(txstate, dwc_get_residue_and_status(dwc, cookie, &ret));
return ret;
}
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 76cbf54aec58..e40769666e39 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -272,7 +272,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
const struct fsl_edma_drvdata *drvdata = NULL;
struct fsl_edma_chan *fsl_chan;
struct edma_regs *regs;
- struct resource *res;
int len, chans;
int ret, i;
@@ -298,8 +297,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+ fsl_edma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
@@ -323,8 +321,8 @@ static int fsl_edma_probe(struct platform_device *pdev)
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
- fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
+ fsl_edma->muxbase[i] = devm_platform_ioremap_resource(pdev,
+ 1 + i);
if (IS_ERR(fsl_edma->muxbase[i])) {
/* on error: disable all previously enabled clks */
fsl_disable_clocks(fsl_edma, i);
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index 045ead46ec8f..eddb2688f234 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -1119,7 +1119,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
int ret, i;
int blk_num, blk_off;
u32 len, chans, queues;
- struct resource *res;
struct fsl_qdma_chan *fsl_chan;
struct fsl_qdma_engine *fsl_qdma;
struct device_node *np = pdev->dev.of_node;
@@ -1183,18 +1182,15 @@ static int fsl_qdma_probe(struct platform_device *pdev)
if (!fsl_qdma->status[i])
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ fsl_qdma->ctrl_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fsl_qdma->ctrl_base))
return PTR_ERR(fsl_qdma->ctrl_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
+ fsl_qdma->status_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(fsl_qdma->status_base))
return PTR_ERR(fsl_qdma->status_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
+ fsl_qdma->block_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(fsl_qdma->block_base))
return PTR_ERR(fsl_qdma->block_base);
fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index c33087c5cd02..0ac634a51c5e 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -137,8 +137,11 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
u32 status_err, u32 status_xfer)
{
struct idma64_chan *idma64c = &idma64->chan[c];
+ struct dma_chan_percpu *stat;
struct idma64_desc *desc;
+ stat = this_cpu_ptr(idma64c->vchan.chan.local);
+
spin_lock(&idma64c->vchan.lock);
desc = idma64c->desc;
if (desc) {
@@ -149,6 +152,7 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
dma_writel(idma64, CLEAR(XFER), idma64c->mask);
desc->status = DMA_COMPLETE;
vchan_cookie_complete(&desc->vdesc);
+ stat->bytes_transferred += desc->length;
idma64_start_transfer(idma64c);
}
@@ -627,7 +631,6 @@ static int idma64_platform_probe(struct platform_device *pdev)
struct idma64_chip *chip;
struct device *dev = &pdev->dev;
struct device *sysdev = dev->parent;
- struct resource *mem;
int ret;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
@@ -638,8 +641,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
if (chip->irq < 0)
return chip->irq;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->regs = devm_ioremap_resource(dev, mem);
+ chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs);
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index e13e92609943..674bfefca088 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -201,7 +201,7 @@ static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
if (rc < 0)
return rc;
- vma->vm_flags |= VM_DONTCOPY;
+ vm_flags_set(vma, VM_DONTCOPY);
pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 29dbb0f52e18..5f321f3b4242 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -699,9 +699,13 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
group->num_engines = 0;
group->num_wqs = 0;
group->use_rdbuf_limit = false;
- group->rdbufs_allowed = 0;
+ /*
+ * The default value is the same as the value of
+ * total read buffers in GRPCAP.
+ */
+ group->rdbufs_allowed = idxd->max_rdbufs;
group->rdbufs_reserved = 0;
- if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
group->tc_a = 1;
group->tc_b = 1;
} else {
@@ -934,11 +938,7 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
group->grpcfg.flags.tc_b = group->tc_b;
group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
- if (group->rdbufs_allowed)
- group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
- else
- group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
-
+ group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
group->grpcfg.flags.desc_progress_limit = group->desc_progress_limit;
group->grpcfg.flags.batch_progress_limit = group->batch_progress_limit;
}
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index e0874cb4721c..eb35ca313684 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -63,12 +63,6 @@ static void op_flag_setup(unsigned long flags, u32 *desc_flags)
*desc_flags |= IDXD_OP_FLAG_RCI;
}
-static inline void set_completion_address(struct idxd_desc *desc,
- u64 *compl_addr)
-{
- *compl_addr = desc->compl_dma;
-}
-
static inline void idxd_prep_desc_common(struct idxd_wq *wq,
struct dsa_hw_desc *hw, char opcode,
u64 addr_f1, u64 addr_f2, u64 len,
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 529ea09c9094..640d3048368e 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -295,13 +295,18 @@ static int idxd_setup_groups(struct idxd_device *idxd)
}
idxd->groups[i] = group;
- if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override) {
+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override) {
group->tc_a = 1;
group->tc_b = 1;
} else {
group->tc_a = -1;
group->tc_b = -1;
}
+ /*
+ * The default value is the same as the value of
+ * total read buffers in GRPCAP.
+ */
+ group->rdbufs_allowed = idxd->max_rdbufs;
}
return 0;
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 3229dfc78650..18cd8151dee0 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -387,7 +387,7 @@ static ssize_t group_traffic_class_a_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
return -EPERM;
if (val < 0 || val > 7)
@@ -429,7 +429,7 @@ static ssize_t group_traffic_class_b_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (idxd->hw.version < DEVICE_VERSION_2 && !tc_override)
+ if (idxd->hw.version <= DEVICE_VERSION_2 && !tc_override)
return -EPERM;
if (val < 0 || val > 7)
diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c
index e4ea107ce78c..ad084552640f 100644
--- a/drivers/dma/img-mdc-dma.c
+++ b/drivers/dma/img-mdc-dma.c
@@ -886,7 +886,6 @@ static int img_mdc_runtime_resume(struct device *dev)
static int mdc_dma_probe(struct platform_device *pdev)
{
struct mdc_dma *mdma;
- struct resource *res;
unsigned int i;
u32 val;
int ret;
@@ -898,8 +897,7 @@ static int mdc_dma_probe(struct platform_device *pdev)
mdma->soc = of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdma->regs = devm_ioremap_resource(&pdev->dev, res);
+ mdma->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdma->regs))
return PTR_ERR(mdma->regs);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 65c6094ce063..80086977973f 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -1038,7 +1038,6 @@ static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec,
static int __init imxdma_probe(struct platform_device *pdev)
{
struct imxdma_engine *imxdma;
- struct resource *res;
int ret, i;
int irq, irq_err;
@@ -1049,8 +1048,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
imxdma->dev = &pdev->dev;
imxdma->devtype = (uintptr_t)of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imxdma->base = devm_ioremap_resource(&pdev->dev, res);
+ imxdma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imxdma->base))
return PTR_ERR(imxdma->base);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index b926abe4fa43..7a912f90c2a9 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -954,7 +954,10 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
desc = sdmac->desc;
if (desc) {
if (sdmac->flags & IMX_DMA_SG_LOOP) {
- sdma_update_channel_loop(sdmac);
+ if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
+ sdma_update_channel_loop(sdmac);
+ else
+ vchan_cyclic_callback(&desc->vd);
} else {
mxc_sdma_handle_channel_normal(sdmac);
vchan_cookie_complete(&desc->vd);
@@ -1074,6 +1077,10 @@ static int sdma_get_pc(struct sdma_channel *sdmac,
per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
break;
+ case IMX_DMATYPE_HDMI:
+ emi_2_per = sdma->script_addrs->hdmi_dma_addr;
+ sdmac->is_ram_script = true;
+ break;
default:
dev_err(sdma->dev, "Unsupported transfer type %d\n",
peripheral_type);
@@ -1125,11 +1132,16 @@ static int sdma_load_context(struct sdma_channel *sdmac)
/* Send by context the event mask,base address for peripheral
* and watermark level
*/
- context->gReg[0] = sdmac->event_mask[1];
- context->gReg[1] = sdmac->event_mask[0];
- context->gReg[2] = sdmac->per_addr;
- context->gReg[6] = sdmac->shp_addr;
- context->gReg[7] = sdmac->watermark_level;
+ if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
+ context->gReg[4] = sdmac->per_addr;
+ context->gReg[6] = sdmac->shp_addr;
+ } else {
+ context->gReg[0] = sdmac->event_mask[1];
+ context->gReg[1] = sdmac->event_mask[0];
+ context->gReg[2] = sdmac->per_addr;
+ context->gReg[6] = sdmac->shp_addr;
+ context->gReg[7] = sdmac->watermark_level;
+ }
bd0->mode.command = C0_SETDM;
bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
@@ -1513,7 +1525,7 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
desc->sdmac = sdmac;
desc->num_bd = bds;
- if (sdma_alloc_bd(desc))
+ if (bds && sdma_alloc_bd(desc))
goto err_desc_out;
/* No slave_config called in MEMCPY case, so do here */
@@ -1680,13 +1692,16 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
- int num_periods = buf_len / period_len;
+ int num_periods = 0;
int channel = sdmac->channel;
int i = 0, buf = 0;
struct sdma_desc *desc;
dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
+ if (sdmac->peripheral_type != IMX_DMATYPE_HDMI)
+ num_periods = buf_len / period_len;
+
sdma_config_write(chan, &sdmac->slave_config, direction);
desc = sdma_transfer_init(sdmac, direction, num_periods);
@@ -1703,6 +1718,9 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
goto err_bd_out;
}
+ if (sdmac->peripheral_type == IMX_DMATYPE_HDMI)
+ return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
+
while (buf < buf_len) {
struct sdma_buffer_descriptor *bd = &desc->bd[i];
int param;
@@ -1763,6 +1781,10 @@ static int sdma_config_write(struct dma_chan *chan,
sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
SDMA_WATERMARK_LEVEL_HWML;
sdmac->word_size = dmaengine_cfg->dst_addr_width;
+ } else if (sdmac->peripheral_type == IMX_DMATYPE_HDMI) {
+ sdmac->per_address = dmaengine_cfg->dst_addr;
+ sdmac->per_address2 = dmaengine_cfg->src_addr;
+ sdmac->watermark_level = 0;
} else {
sdmac->per_address = dmaengine_cfg->dst_addr;
sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
@@ -2169,7 +2191,6 @@ static int sdma_probe(struct platform_device *pdev)
const char *fw_name;
int ret;
int irq;
- struct resource *iores;
struct resource spba_res;
int i;
struct sdma_engine *sdma;
@@ -2192,8 +2213,7 @@ static int sdma_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
+ sdma->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdma->regs))
return PTR_ERR(sdma->regs);
@@ -2234,6 +2254,7 @@ static int sdma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
+ dma_cap_set(DMA_PRIVATE, sdma->dma_device.cap_mask);
INIT_LIST_HEAD(&sdma->dma_device.channels);
/* Initialize channel parameters */
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
index e12b754e6398..ebd8733f72ad 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma.c
@@ -182,7 +182,6 @@ static int mcf_edma_probe(struct platform_device *pdev)
struct fsl_edma_engine *mcf_edma;
struct fsl_edma_chan *mcf_chan;
struct edma_regs *regs;
- struct resource *res;
int ret, i, len, chans;
pdata = dev_get_platdata(&pdev->dev);
@@ -210,9 +209,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
mutex_init(&mcf_edma->fsl_edma_mutex);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+ mcf_edma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcf_edma->membase))
return PTR_ERR(mcf_edma->membase);
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index f7717c44b887..69cc61c0b262 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -896,7 +896,6 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
struct mtk_hsdma_device *hsdma;
struct mtk_hsdma_vchan *vc;
struct dma_device *dd;
- struct resource *res;
int i, err;
hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
@@ -905,8 +904,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
dd = &hsdma->ddev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hsdma->base = devm_ioremap_resource(&pdev->dev, res);
+ hsdma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hsdma->base))
return PTR_ERR(hsdma->base);
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index e8d71b35593e..ebdfdcbb4f7a 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1022,7 +1022,6 @@ static int mmp_pdma_probe(struct platform_device *op)
struct mmp_pdma_device *pdev;
const struct of_device_id *of_id;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
- struct resource *iores;
int i, ret, irq = 0;
int dma_channels = 0, irq_num = 0;
const enum dma_slave_buswidth widths =
@@ -1037,8 +1036,7 @@ static int mmp_pdma_probe(struct platform_device *op)
spin_lock_init(&pdev->phy_lock);
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- pdev->base = devm_ioremap_resource(pdev->dev, iores);
+ pdev->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index a262e0eb4cc9..d49fa6bc6775 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -15,7 +15,7 @@
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/device.h>
-#include <linux/platform_data/dma-mmp_tdma.h>
+#include <linux/genalloc.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
@@ -639,7 +639,6 @@ static int mmp_tdma_probe(struct platform_device *pdev)
enum mmp_tdma_type type;
const struct of_device_id *of_id;
struct mmp_tdma_device *tdev;
- struct resource *iores;
int i, ret;
int irq = 0, irq_num = 0;
int chan_num = TDMA_CHANNEL_NUM;
@@ -663,17 +662,13 @@ static int mmp_tdma_probe(struct platform_device *pdev)
irq_num++;
}
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdev->base = devm_ioremap_resource(&pdev->dev, iores);
+ tdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tdev->base))
return PTR_ERR(tdev->base);
INIT_LIST_HEAD(&tdev->device.channels);
- if (pdev->dev.of_node)
- pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
- else
- pool = sram_get_gpool("asram");
+ pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
if (!pool) {
dev_err(&pdev->dev, "asram pool not available\n");
return -ENOMEM;
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 7459382a8353..7565ad98ba66 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -563,7 +563,6 @@ static int moxart_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- struct resource *res;
void __iomem *dma_base_addr;
int ret, i;
unsigned int irq;
@@ -580,8 +579,7 @@ static int moxart_probe(struct platform_device *pdev)
return -EINVAL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dma_base_addr = devm_ioremap_resource(dev, res);
+ dma_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dma_base_addr))
return PTR_ERR(dma_base_addr);
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 113834e1167b..89790beba305 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -714,7 +714,6 @@ static int mv_xor_v2_resume(struct platform_device *dev)
static int mv_xor_v2_probe(struct platform_device *pdev)
{
struct mv_xor_v2_device *xor_dev;
- struct resource *res;
int i, ret = 0;
struct dma_device *dma_dev;
struct mv_xor_v2_sw_desc *sw_desc;
@@ -726,13 +725,11 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (!xor_dev)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xor_dev->dma_base = devm_ioremap_resource(&pdev->dev, res);
+ xor_dev->dma_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xor_dev->dma_base))
return PTR_ERR(xor_dev->dma_base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- xor_dev->glob_base = devm_ioremap_resource(&pdev->dev, res);
+ xor_dev->glob_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(xor_dev->glob_base))
return PTR_ERR(xor_dev->glob_base);
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index dc147cc2436e..acc4d53e4630 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -746,7 +746,6 @@ static int mxs_dma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct mxs_dma_type *dma_type;
struct mxs_dma_engine *mxs_dma;
- struct resource *iores;
int ret, i;
mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL);
@@ -763,8 +762,7 @@ static int mxs_dma_probe(struct platform_device *pdev)
mxs_dma->type = dma_type->type;
mxs_dma->dev_id = dma_type->id;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores);
+ mxs_dma->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mxs_dma->base))
return PTR_ERR(mxs_dma->base);
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index a7063e9cd551..e72e8c10355e 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1294,7 +1294,6 @@ static int nbpf_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct nbpf_device *nbpf;
struct dma_device *dma_dev;
- struct resource *iomem;
const struct nbpf_config *cfg;
int num_channels;
int ret, irq, eirq, i;
@@ -1318,8 +1317,7 @@ static int nbpf_probe(struct platform_device *pdev)
dma_dev = &nbpf->dma_dev;
dma_dev->dev = dev;
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- nbpf->base = devm_ioremap_resource(dev, iomem);
+ nbpf->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(nbpf->base))
return PTR_ERR(nbpf->base);
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 6b5e91f26afc..686c270ef710 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4299,9 +4299,8 @@ static ssize_t devices_show(struct device_driver *dev, char *buf)
for (i = 0; i < PPC440SPE_ADMA_ENGINES_NUM; i++) {
if (ppc440spe_adma_devices[i] == -1)
continue;
- size += scnprintf(buf + size, PAGE_SIZE - size,
- "PPC440SP(E)-ADMA.%d: %s\n", i,
- ppc_adma_errors[ppc440spe_adma_devices[i]]);
+ size += sysfs_emit_at(buf, size, "PPC440SP(E)-ADMA.%d: %s\n",
+ i, ppc_adma_errors[ppc440spe_adma_devices[i]]);
}
return size;
}
@@ -4309,9 +4308,8 @@ static DRIVER_ATTR_RO(devices);
static ssize_t enable_show(struct device_driver *dev, char *buf)
{
- return snprintf(buf, PAGE_SIZE,
- "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
- ppc440spe_r6_enabled ? "EN" : "DIS");
+ return sysfs_emit(buf, "PPC440SP(e) RAID-6 capabilities are %sABLED.\n",
+ ppc440spe_r6_enabled ? "EN" : "DIS");
}
static ssize_t enable_store(struct device_driver *dev, const char *buf,
@@ -4362,7 +4360,7 @@ static ssize_t poly_show(struct device_driver *dev, char *buf)
reg &= 0xFF;
#endif
- size = snprintf(buf, PAGE_SIZE, "PPC440SP(e) RAID-6 driver "
+ size = sysfs_emit(buf, "PPC440SP(e) RAID-6 driver "
"uses 0x1%02x polynomial.\n", reg);
return size;
}
diff --git a/drivers/dma/ptdma/ptdma-dmaengine.c b/drivers/dma/ptdma/ptdma-dmaengine.c
index cc22d162ce25..1aa65e5de0f3 100644
--- a/drivers/dma/ptdma/ptdma-dmaengine.c
+++ b/drivers/dma/ptdma/ptdma-dmaengine.c
@@ -254,7 +254,7 @@ static void pt_issue_pending(struct dma_chan *dma_chan)
spin_unlock_irqrestore(&chan->vc.lock, flags);
/* If there was nothing active, start processing */
- if (engine_is_idle)
+ if (engine_is_idle && desc)
pt_cmd_callback(desc, 0);
}
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 22a392fe6d32..1b046d9a3a26 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1346,7 +1346,6 @@ static int pxad_probe(struct platform_device *op)
const struct of_device_id *of_id;
const struct dma_slave_map *slave_map = NULL;
struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
- struct resource *iores;
int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
@@ -1358,8 +1357,7 @@ static int pxad_probe(struct platform_device *op)
spin_lock_init(&pdev->phy_lock);
- iores = platform_get_resource(op, IORESOURCE_MEM, 0);
- pdev->base = devm_ioremap_resource(&op->dev, iores);
+ pdev->base = devm_platform_ioremap_resource(op, 0);
if (IS_ERR(pdev->base))
return PTR_ERR(pdev->base);
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 2ff787df513e..1e47d27e1f81 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -1237,7 +1237,6 @@ static int bam_dma_probe(struct platform_device *pdev)
{
struct bam_device *bdev;
const struct of_device_id *match;
- struct resource *iores;
int ret, i;
bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
@@ -1254,8 +1253,7 @@ static int bam_dma_probe(struct platform_device *pdev)
bdev->layout = match->data;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
+ bdev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdev->regs))
return PTR_ERR(bdev->regs);
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
deleted file mode 100644
index a09eeb545f7d..000000000000
--- a/drivers/dma/s3c24xx-dma.c
+++ /dev/null
@@ -1,1428 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * S3C24XX DMA handling
- *
- * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
- *
- * based on amba-pl08x.c
- *
- * Copyright (c) 2006 ARM Ltd.
- * Copyright (c) 2010 ST-Ericsson SA
- *
- * Author: Peter Pearse <peter.pearse@arm.com>
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- *
- * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
- * that can be routed to any of the 4 to 8 hardware-channels.
- *
- * Therefore on these DMA controllers the number of channels
- * and the number of incoming DMA signals are two totally different things.
- * It is usually not possible to theoretically handle all physical signals,
- * so a multiplexing scheme with possible denial of use is necessary.
- *
- * Open items:
- * - bursts
- */
-
-#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/slab.h>
-#include <linux/platform_data/dma-s3c24xx.h>
-
-#include "dmaengine.h"
-#include "virt-dma.h"
-
-#define MAX_DMA_CHANNELS 8
-
-#define S3C24XX_DISRC 0x00
-#define S3C24XX_DISRCC 0x04
-#define S3C24XX_DISRCC_INC_INCREMENT 0
-#define S3C24XX_DISRCC_INC_FIXED BIT(0)
-#define S3C24XX_DISRCC_LOC_AHB 0
-#define S3C24XX_DISRCC_LOC_APB BIT(1)
-
-#define S3C24XX_DIDST 0x08
-#define S3C24XX_DIDSTC 0x0c
-#define S3C24XX_DIDSTC_INC_INCREMENT 0
-#define S3C24XX_DIDSTC_INC_FIXED BIT(0)
-#define S3C24XX_DIDSTC_LOC_AHB 0
-#define S3C24XX_DIDSTC_LOC_APB BIT(1)
-#define S3C24XX_DIDSTC_INT_TC0 0
-#define S3C24XX_DIDSTC_INT_RELOAD BIT(2)
-
-#define S3C24XX_DCON 0x10
-
-#define S3C24XX_DCON_TC_MASK 0xfffff
-#define S3C24XX_DCON_DSZ_BYTE (0 << 20)
-#define S3C24XX_DCON_DSZ_HALFWORD (1 << 20)
-#define S3C24XX_DCON_DSZ_WORD (2 << 20)
-#define S3C24XX_DCON_DSZ_MASK (3 << 20)
-#define S3C24XX_DCON_DSZ_SHIFT 20
-#define S3C24XX_DCON_AUTORELOAD 0
-#define S3C24XX_DCON_NORELOAD BIT(22)
-#define S3C24XX_DCON_HWTRIG BIT(23)
-#define S3C24XX_DCON_HWSRC_SHIFT 24
-#define S3C24XX_DCON_SERV_SINGLE 0
-#define S3C24XX_DCON_SERV_WHOLE BIT(27)
-#define S3C24XX_DCON_TSZ_UNIT 0
-#define S3C24XX_DCON_TSZ_BURST4 BIT(28)
-#define S3C24XX_DCON_INT BIT(29)
-#define S3C24XX_DCON_SYNC_PCLK 0
-#define S3C24XX_DCON_SYNC_HCLK BIT(30)
-#define S3C24XX_DCON_DEMAND 0
-#define S3C24XX_DCON_HANDSHAKE BIT(31)
-
-#define S3C24XX_DSTAT 0x14
-#define S3C24XX_DSTAT_STAT_BUSY BIT(20)
-#define S3C24XX_DSTAT_CURRTC_MASK 0xfffff
-
-#define S3C24XX_DMASKTRIG 0x20
-#define S3C24XX_DMASKTRIG_SWTRIG BIT(0)
-#define S3C24XX_DMASKTRIG_ON BIT(1)
-#define S3C24XX_DMASKTRIG_STOP BIT(2)
-
-#define S3C24XX_DMAREQSEL 0x24
-#define S3C24XX_DMAREQSEL_HW BIT(0)
-
-/*
- * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
- * for a DMA source. Instead only specific channels are valid.
- * All of these SoCs have 4 physical channels and the number of request
- * source bits is 3. Additionally we also need 1 bit to mark the channel
- * as valid.
- * Therefore we separate the chansel element of the channel data into 4
- * parts of 4 bits each, to hold the information if the channel is valid
- * and the hw request source to use.
- *
- * Example:
- * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
- * For it the chansel field would look like
- *
- * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
- * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
- * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2
- */
-#define S3C24XX_CHANSEL_WIDTH 4
-#define S3C24XX_CHANSEL_VALID BIT(3)
-#define S3C24XX_CHANSEL_REQ_MASK 7
-
-/*
- * struct soc_data - vendor-specific config parameters for individual SoCs
- * @stride: spacing between the registers of each channel
- * @has_reqsel: does the controller use the newer requestselection mechanism
- * @has_clocks: are controllable dma-clocks present
- */
-struct soc_data {
- int stride;
- bool has_reqsel;
- bool has_clocks;
-};
-
-/*
- * enum s3c24xx_dma_chan_state - holds the virtual channel states
- * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
- * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
- * channel and is running a transfer on it
- * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
- * channel to become available (only pertains to memcpy channels)
- */
-enum s3c24xx_dma_chan_state {
- S3C24XX_DMA_CHAN_IDLE,
- S3C24XX_DMA_CHAN_RUNNING,
- S3C24XX_DMA_CHAN_WAITING,
-};
-
-/*
- * struct s3c24xx_sg - structure containing data per sg
- * @src_addr: src address of sg
- * @dst_addr: dst address of sg
- * @len: transfer len in bytes
- * @node: node for txd's dsg_list
- */
-struct s3c24xx_sg {
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- size_t len;
- struct list_head node;
-};
-
-/*
- * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
- * @vd: virtual DMA descriptor
- * @dsg_list: list of children sg's
- * @at: sg currently being transfered
- * @width: transfer width
- * @disrcc: value for source control register
- * @didstc: value for destination control register
- * @dcon: base value for dcon register
- * @cyclic: indicate cyclic transfer
- */
-struct s3c24xx_txd {
- struct virt_dma_desc vd;
- struct list_head dsg_list;
- struct list_head *at;
- u8 width;
- u32 disrcc;
- u32 didstc;
- u32 dcon;
- bool cyclic;
-};
-
-struct s3c24xx_dma_chan;
-
-/*
- * struct s3c24xx_dma_phy - holder for the physical channels
- * @id: physical index to this channel
- * @valid: does the channel have all required elements
- * @base: virtual memory base (remapped) for the this channel
- * @irq: interrupt for this channel
- * @clk: clock for this channel
- * @lock: a lock to use when altering an instance of this struct
- * @serving: virtual channel currently being served by this physicalchannel
- * @host: a pointer to the host (internal use)
- */
-struct s3c24xx_dma_phy {
- unsigned int id;
- bool valid;
- void __iomem *base;
- int irq;
- struct clk *clk;
- spinlock_t lock;
- struct s3c24xx_dma_chan *serving;
- struct s3c24xx_dma_engine *host;
-};
-
-/*
- * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
- * @id: the id of the channel
- * @name: name of the channel
- * @vc: wrapped virtual channel
- * @phy: the physical channel utilized by this channel, if there is one
- * @runtime_addr: address for RX/TX according to the runtime config
- * @at: active transaction on this channel
- * @lock: a lock for this channel data
- * @host: a pointer to the host (internal use)
- * @state: whether the channel is idle, running etc
- * @slave: whether this channel is a device (slave) or for memcpy
- */
-struct s3c24xx_dma_chan {
- int id;
- const char *name;
- struct virt_dma_chan vc;
- struct s3c24xx_dma_phy *phy;
- struct dma_slave_config cfg;
- struct s3c24xx_txd *at;
- struct s3c24xx_dma_engine *host;
- enum s3c24xx_dma_chan_state state;
- bool slave;
-};
-
-/*
- * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
- * @pdev: the corresponding platform device
- * @pdata: platform data passed in from the platform/machine
- * @base: virtual memory base (remapped)
- * @slave: slave engine for this instance
- * @memcpy: memcpy engine for this instance
- * @phy_chans: array of data for the physical channels
- */
-struct s3c24xx_dma_engine {
- struct platform_device *pdev;
- const struct s3c24xx_dma_platdata *pdata;
- struct soc_data *sdata;
- void __iomem *base;
- struct dma_device slave;
- struct dma_device memcpy;
- struct s3c24xx_dma_phy *phy_chans;
-};
-
-/*
- * Physical channel handling
- */
-
-/*
- * Check whether a certain channel is busy or not.
- */
-static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
-{
- unsigned int val = readl(phy->base + S3C24XX_DSTAT);
- return val & S3C24XX_DSTAT_STAT_BUSY;
-}
-
-static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
- struct s3c24xx_dma_phy *phy)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
- int phyvalid;
-
- /* every phy is valid for memcopy channels */
- if (!s3cchan->slave)
- return true;
-
- /* On newer variants all phys can be used for all virtual channels */
- if (s3cdma->sdata->has_reqsel)
- return true;
-
- phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
- return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
-}
-
-/*
- * Allocate a physical channel for a virtual channel
- *
- * Try to locate a physical channel to be used for this transfer. If all
- * are taken return NULL and the requester will have to cope by using
- * some fallback PIO mode or retrying later.
- */
-static
-struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_phy *phy = NULL;
- unsigned long flags;
- int i;
- int ret;
-
- for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
- phy = &s3cdma->phy_chans[i];
-
- if (!phy->valid)
- continue;
-
- if (!s3c24xx_dma_phy_valid(s3cchan, phy))
- continue;
-
- spin_lock_irqsave(&phy->lock, flags);
-
- if (!phy->serving) {
- phy->serving = s3cchan;
- spin_unlock_irqrestore(&phy->lock, flags);
- break;
- }
-
- spin_unlock_irqrestore(&phy->lock, flags);
- }
-
- /* No physical channel available, cope with it */
- if (i == s3cdma->pdata->num_phy_channels) {
- dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
- return NULL;
- }
-
- /* start the phy clock */
- if (s3cdma->sdata->has_clocks) {
- ret = clk_enable(phy->clk);
- if (ret) {
- dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
- phy->id, ret);
- phy->serving = NULL;
- return NULL;
- }
- }
-
- return phy;
-}
-
-/*
- * Mark the physical channel as free.
- *
- * This drops the link between the physical and virtual channel.
- */
-static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
-{
- struct s3c24xx_dma_engine *s3cdma = phy->host;
-
- if (s3cdma->sdata->has_clocks)
- clk_disable(phy->clk);
-
- phy->serving = NULL;
-}
-
-/*
- * Stops the channel by writing the stop bit.
- * This should not be used for an on-going transfer, but as a method of
- * shutting down a channel (eg, when it's no longer used) or terminating a
- * transfer.
- */
-static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
-{
- writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
-}
-
-/*
- * Virtual channel handling
- */
-
-static inline
-struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
-}
-
-static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_phy *phy = s3cchan->phy;
- struct s3c24xx_txd *txd = s3cchan->at;
- u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
-
- return tc * txd->width;
-}
-
-static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
- struct dma_slave_config *config)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- unsigned long flags;
- int ret = 0;
-
- /* Reject definitely invalid configurations */
- if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
- config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
- return -EINVAL;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
-
- if (!s3cchan->slave) {
- ret = -EINVAL;
- goto out;
- }
-
- s3cchan->cfg = *config;
-
-out:
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
- return ret;
-}
-
-/*
- * Transfer handling
- */
-
-static inline
-struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
-{
- return container_of(tx, struct s3c24xx_txd, vd.tx);
-}
-
-static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
-{
- struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
-
- if (txd) {
- INIT_LIST_HEAD(&txd->dsg_list);
- txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
- }
-
- return txd;
-}
-
-static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
-{
- struct s3c24xx_sg *dsg, *_dsg;
-
- list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
- list_del(&dsg->node);
- kfree(dsg);
- }
-
- kfree(txd);
-}
-
-static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
- struct s3c24xx_txd *txd)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_phy *phy = s3cchan->phy;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
- u32 dcon = txd->dcon;
- u32 val;
-
- /* transfer-size and -count from len and width */
- switch (txd->width) {
- case 1:
- dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
- break;
- case 2:
- dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
- break;
- case 4:
- dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
- break;
- }
-
- if (s3cchan->slave) {
- struct s3c24xx_dma_channel *cdata =
- &pdata->channels[s3cchan->id];
-
- if (s3cdma->sdata->has_reqsel) {
- writel_relaxed((cdata->chansel << 1) |
- S3C24XX_DMAREQSEL_HW,
- phy->base + S3C24XX_DMAREQSEL);
- } else {
- int csel = cdata->chansel >> (phy->id *
- S3C24XX_CHANSEL_WIDTH);
-
- csel &= S3C24XX_CHANSEL_REQ_MASK;
- dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
- dcon |= S3C24XX_DCON_HWTRIG;
- }
- } else {
- if (s3cdma->sdata->has_reqsel)
- writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
- }
-
- writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
- writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
- writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
- writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
- writel_relaxed(dcon, phy->base + S3C24XX_DCON);
-
- val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
- val &= ~S3C24XX_DMASKTRIG_STOP;
- val |= S3C24XX_DMASKTRIG_ON;
-
- /* trigger the dma operation for memcpy transfers */
- if (!s3cchan->slave)
- val |= S3C24XX_DMASKTRIG_SWTRIG;
-
- writel(val, phy->base + S3C24XX_DMASKTRIG);
-}
-
-/*
- * Set the initial DMA register values and start first sg.
- */
-static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_phy *phy = s3cchan->phy;
- struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
- struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
-
- list_del(&txd->vd.node);
-
- s3cchan->at = txd;
-
- /* Wait for channel inactive */
- while (s3c24xx_dma_phy_busy(phy))
- cpu_relax();
-
- /* point to the first element of the sg list */
- txd->at = txd->dsg_list.next;
- s3c24xx_dma_start_next_sg(s3cchan, txd);
-}
-
-/*
- * Try to allocate a physical channel. When successful, assign it to
- * this virtual channel, and initiate the next descriptor. The
- * virtual channel lock must be held at this point.
- */
-static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_phy *phy;
-
- phy = s3c24xx_dma_get_phy(s3cchan);
- if (!phy) {
- dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
- s3cchan->name);
- s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
- return;
- }
-
- dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
- phy->id, s3cchan->name);
-
- s3cchan->phy = phy;
- s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
-
- s3c24xx_dma_start_next_txd(s3cchan);
-}
-
-static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
- struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
-
- dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
- phy->id, s3cchan->name);
-
- /*
- * We do this without taking the lock; we're really only concerned
- * about whether this pointer is NULL or not, and we're guaranteed
- * that this will only be called when it _already_ is non-NULL.
- */
- phy->serving = s3cchan;
- s3cchan->phy = phy;
- s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
- s3c24xx_dma_start_next_txd(s3cchan);
-}
-
-/*
- * Free a physical DMA channel, potentially reallocating it to another
- * virtual channel if we have any pending.
- */
-static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
-{
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_dma_chan *p, *next;
-
-retry:
- next = NULL;
-
- /* Find a waiting virtual channel for the next transfer. */
- list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
- if (p->state == S3C24XX_DMA_CHAN_WAITING) {
- next = p;
- break;
- }
-
- if (!next) {
- list_for_each_entry(p, &s3cdma->slave.channels,
- vc.chan.device_node)
- if (p->state == S3C24XX_DMA_CHAN_WAITING &&
- s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
- next = p;
- break;
- }
- }
-
- /* Ensure that the physical channel is stopped */
- s3c24xx_dma_terminate_phy(s3cchan->phy);
-
- if (next) {
- bool success;
-
- /*
- * Eww. We know this isn't going to deadlock
- * but lockdep probably doesn't.
- */
- spin_lock(&next->vc.lock);
- /* Re-check the state now that we have the lock */
- success = next->state == S3C24XX_DMA_CHAN_WAITING;
- if (success)
- s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
- spin_unlock(&next->vc.lock);
-
- /* If the state changed, try to find another channel */
- if (!success)
- goto retry;
- } else {
- /* No more jobs, so free up the physical channel */
- s3c24xx_dma_put_phy(s3cchan->phy);
- }
-
- s3cchan->phy = NULL;
- s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
-}
-
-static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
-{
- struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
-
- if (!s3cchan->slave)
- dma_descriptor_unmap(&vd->tx);
-
- s3c24xx_dma_free_txd(txd);
-}
-
-static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
-{
- struct s3c24xx_dma_phy *phy = data;
- struct s3c24xx_dma_chan *s3cchan = phy->serving;
- struct s3c24xx_txd *txd;
-
- dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
-
- /*
- * Interrupts happen to notify the completion of a transfer and the
- * channel should have moved into its stop state already on its own.
- * Therefore interrupts on channels not bound to a virtual channel
- * should never happen. Nevertheless send a terminate command to the
- * channel if the unlikely case happens.
- */
- if (unlikely(!s3cchan)) {
- dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
- phy->id);
-
- s3c24xx_dma_terminate_phy(phy);
-
- return IRQ_HANDLED;
- }
-
- spin_lock(&s3cchan->vc.lock);
- txd = s3cchan->at;
- if (txd) {
- /* when more sg's are in this txd, start the next one */
- if (!list_is_last(txd->at, &txd->dsg_list)) {
- txd->at = txd->at->next;
- if (txd->cyclic)
- vchan_cyclic_callback(&txd->vd);
- s3c24xx_dma_start_next_sg(s3cchan, txd);
- } else if (!txd->cyclic) {
- s3cchan->at = NULL;
- vchan_cookie_complete(&txd->vd);
-
- /*
- * And start the next descriptor (if any),
- * otherwise free this channel.
- */
- if (vchan_next_desc(&s3cchan->vc))
- s3c24xx_dma_start_next_txd(s3cchan);
- else
- s3c24xx_dma_phy_free(s3cchan);
- } else {
- vchan_cyclic_callback(&txd->vd);
-
- /* Cyclic: reset at beginning */
- txd->at = txd->dsg_list.next;
- s3c24xx_dma_start_next_sg(s3cchan, txd);
- }
- }
- spin_unlock(&s3cchan->vc.lock);
-
- return IRQ_HANDLED;
-}
-
-/*
- * The DMA ENGINE API
- */
-
-static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- LIST_HEAD(head);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
-
- if (!s3cchan->phy && !s3cchan->at) {
- dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
- s3cchan->id);
- ret = -EINVAL;
- goto unlock;
- }
-
- s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
-
- /* Mark physical channel as free */
- if (s3cchan->phy)
- s3c24xx_dma_phy_free(s3cchan);
-
- /* Dequeue current job */
- if (s3cchan->at) {
- vchan_terminate_vdesc(&s3cchan->at->vd);
- s3cchan->at = NULL;
- }
-
- /* Dequeue jobs not yet fired as well */
-
- vchan_get_all_descriptors(&s3cchan->vc, &head);
-
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-
- vchan_dma_desc_free_list(&s3cchan->vc, &head);
-
- return 0;
-
-unlock:
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-
- return ret;
-}
-
-static void s3c24xx_dma_synchronize(struct dma_chan *chan)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
-
- vchan_synchronize(&s3cchan->vc);
-}
-
-static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
-{
- /* Ensure all queued descriptors are freed */
- vchan_free_chan_resources(to_virt_chan(chan));
-}
-
-static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- struct virt_dma_desc *vd;
- unsigned long flags;
- enum dma_status ret;
- size_t bytes = 0;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
- ret = dma_cookie_status(chan, cookie, txstate);
-
- /*
- * There's no point calculating the residue if there's
- * no txstate to store the value.
- */
- if (ret == DMA_COMPLETE || !txstate) {
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
- return ret;
- }
-
- vd = vchan_find_desc(&s3cchan->vc, cookie);
- if (vd) {
- /* On the issued list, so hasn't been processed yet */
- txd = to_s3c24xx_txd(&vd->tx);
-
- list_for_each_entry(dsg, &txd->dsg_list, node)
- bytes += dsg->len;
- } else {
- /*
- * Currently running, so sum over the pending sg's and
- * the currently active one.
- */
- txd = s3cchan->at;
-
- dsg = list_entry(txd->at, struct s3c24xx_sg, node);
- list_for_each_entry_from(dsg, &txd->dsg_list, node)
- bytes += dsg->len;
-
- bytes += s3c24xx_dma_getbytes_chan(s3cchan);
- }
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-
- /*
- * This cookie not complete yet
- * Get number of bytes left in the active transactions and queue
- */
- dma_set_residue(txstate, bytes);
-
- /* Whether waiting or running, we're in progress */
- return ret;
-}
-
-/*
- * Initialize a descriptor to be used by memcpy submit
- */
-static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
- struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
- size_t len, unsigned long flags)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- int src_mod, dest_mod;
-
- dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n",
- len, s3cchan->name);
-
- if ((len & S3C24XX_DCON_TC_MASK) != len) {
- dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len);
- return NULL;
- }
-
- txd = s3c24xx_dma_get_txd();
- if (!txd)
- return NULL;
-
- dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
- if (!dsg) {
- s3c24xx_dma_free_txd(txd);
- return NULL;
- }
- list_add_tail(&dsg->node, &txd->dsg_list);
-
- dsg->src_addr = src;
- dsg->dst_addr = dest;
- dsg->len = len;
-
- /*
- * Determine a suitable transfer width.
- * The DMA controller cannot fetch/store information which is not
- * naturally aligned on the bus, i.e., a 4 byte fetch must start at
- * an address divisible by 4 - more generally addr % width must be 0.
- */
- src_mod = src % 4;
- dest_mod = dest % 4;
- switch (len % 4) {
- case 0:
- txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
- break;
- case 2:
- txd->width = ((src_mod == 2 || src_mod == 0) &&
- (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
- break;
- default:
- txd->width = 1;
- break;
- }
-
- txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
- txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
- txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
- S3C24XX_DCON_SERV_WHOLE;
-
- return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
- struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
- enum dma_transfer_direction direction, unsigned long flags)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- unsigned sg_len;
- dma_addr_t slave_addr;
- u32 hwcfg = 0;
- int i;
-
- dev_dbg(&s3cdma->pdev->dev,
- "prepare cyclic transaction of %zu bytes with period %zu from %s\n",
- size, period, s3cchan->name);
-
- if (!is_slave_direction(direction)) {
- dev_err(&s3cdma->pdev->dev,
- "direction %d unsupported\n", direction);
- return NULL;
- }
-
- txd = s3c24xx_dma_get_txd();
- if (!txd)
- return NULL;
-
- txd->cyclic = 1;
-
- if (cdata->handshake)
- txd->dcon |= S3C24XX_DCON_HANDSHAKE;
-
- switch (cdata->bus) {
- case S3C24XX_DMA_APB:
- txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_APB;
- break;
- case S3C24XX_DMA_AHB:
- txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_AHB;
- break;
- }
-
- /*
- * Always assume our peripheral desintation is a fixed
- * address in memory.
- */
- hwcfg |= S3C24XX_DISRCC_INC_FIXED;
-
- /*
- * Individual dma operations are requested by the slave,
- * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
- */
- txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
-
- if (direction == DMA_MEM_TO_DEV) {
- txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
- S3C24XX_DISRCC_INC_INCREMENT;
- txd->didstc = hwcfg;
- slave_addr = s3cchan->cfg.dst_addr;
- txd->width = s3cchan->cfg.dst_addr_width;
- } else {
- txd->disrcc = hwcfg;
- txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
- S3C24XX_DIDSTC_INC_INCREMENT;
- slave_addr = s3cchan->cfg.src_addr;
- txd->width = s3cchan->cfg.src_addr_width;
- }
-
- sg_len = size / period;
-
- for (i = 0; i < sg_len; i++) {
- dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
- if (!dsg) {
- s3c24xx_dma_free_txd(txd);
- return NULL;
- }
- list_add_tail(&dsg->node, &txd->dsg_list);
-
- dsg->len = period;
- /* Check last period length */
- if (i == sg_len - 1)
- dsg->len = size - period * i;
- if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = addr + period * i;
- dsg->dst_addr = slave_addr;
- } else { /* DMA_DEV_TO_MEM */
- dsg->src_addr = slave_addr;
- dsg->dst_addr = addr + period * i;
- }
- }
-
- return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
-}
-
-static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
- const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
- struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
- struct s3c24xx_txd *txd;
- struct s3c24xx_sg *dsg;
- struct scatterlist *sg;
- dma_addr_t slave_addr;
- u32 hwcfg = 0;
- int tmp;
-
- dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
- sg_dma_len(sgl), s3cchan->name);
-
- txd = s3c24xx_dma_get_txd();
- if (!txd)
- return NULL;
-
- if (cdata->handshake)
- txd->dcon |= S3C24XX_DCON_HANDSHAKE;
-
- switch (cdata->bus) {
- case S3C24XX_DMA_APB:
- txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_APB;
- break;
- case S3C24XX_DMA_AHB:
- txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
- hwcfg |= S3C24XX_DISRCC_LOC_AHB;
- break;
- }
-
- /*
- * Always assume our peripheral desintation is a fixed
- * address in memory.
- */
- hwcfg |= S3C24XX_DISRCC_INC_FIXED;
-
- /*
- * Individual dma operations are requested by the slave,
- * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
- */
- txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
-
- if (direction == DMA_MEM_TO_DEV) {
- txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
- S3C24XX_DISRCC_INC_INCREMENT;
- txd->didstc = hwcfg;
- slave_addr = s3cchan->cfg.dst_addr;
- txd->width = s3cchan->cfg.dst_addr_width;
- } else if (direction == DMA_DEV_TO_MEM) {
- txd->disrcc = hwcfg;
- txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
- S3C24XX_DIDSTC_INC_INCREMENT;
- slave_addr = s3cchan->cfg.src_addr;
- txd->width = s3cchan->cfg.src_addr_width;
- } else {
- s3c24xx_dma_free_txd(txd);
- dev_err(&s3cdma->pdev->dev,
- "direction %d unsupported\n", direction);
- return NULL;
- }
-
- for_each_sg(sgl, sg, sg_len, tmp) {
- dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
- if (!dsg) {
- s3c24xx_dma_free_txd(txd);
- return NULL;
- }
- list_add_tail(&dsg->node, &txd->dsg_list);
-
- dsg->len = sg_dma_len(sg);
- if (direction == DMA_MEM_TO_DEV) {
- dsg->src_addr = sg_dma_address(sg);
- dsg->dst_addr = slave_addr;
- } else { /* DMA_DEV_TO_MEM */
- dsg->src_addr = slave_addr;
- dsg->dst_addr = sg_dma_address(sg);
- }
- }
-
- return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
-}
-
-/*
- * Slave transactions callback to the slave device to allow
- * synchronization of slave DMA signals with the DMAC enable
- */
-static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
-{
- struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&s3cchan->vc.lock, flags);
- if (vchan_issue_pending(&s3cchan->vc)) {
- if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
- s3c24xx_dma_phy_alloc_and_start(s3cchan);
- }
- spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
-}
-
-/*
- * Bringup and teardown
- */
-
-/*
- * Initialise the DMAC memcpy/slave channels.
- * Make a local wrapper to hold required data
- */
-static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
- struct dma_device *dmadev, unsigned int channels, bool slave)
-{
- struct s3c24xx_dma_chan *chan;
- int i;
-
- INIT_LIST_HEAD(&dmadev->channels);
-
- /*
- * Register as many memcpy as we have physical channels,
- * we won't always be able to use all but the code will have
- * to cope with that situation.
- */
- for (i = 0; i < channels; i++) {
- chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
-
- chan->id = i;
- chan->host = s3cdma;
- chan->state = S3C24XX_DMA_CHAN_IDLE;
-
- if (slave) {
- chan->slave = true;
- chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
- if (!chan->name)
- return -ENOMEM;
- } else {
- chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
- if (!chan->name)
- return -ENOMEM;
- }
- dev_dbg(dmadev->dev,
- "initialize virtual channel \"%s\"\n",
- chan->name);
-
- chan->vc.desc_free = s3c24xx_dma_desc_free;
- vchan_init(&chan->vc, dmadev);
- }
- dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
- i, slave ? "slave" : "memcpy");
- return i;
-}
-
-static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
-{
- struct s3c24xx_dma_chan *chan = NULL;
- struct s3c24xx_dma_chan *next;
-
- list_for_each_entry_safe(chan,
- next, &dmadev->channels, vc.chan.device_node) {
- list_del(&chan->vc.chan.device_node);
- tasklet_kill(&chan->vc.task);
- }
-}
-
-/* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
-static struct soc_data soc_s3c2410 = {
- .stride = 0x40,
- .has_reqsel = false,
- .has_clocks = false,
-};
-
-/* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
-static struct soc_data soc_s3c2412 = {
- .stride = 0x40,
- .has_reqsel = true,
- .has_clocks = true,
-};
-
-/* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
-static struct soc_data soc_s3c2443 = {
- .stride = 0x100,
- .has_reqsel = true,
- .has_clocks = true,
-};
-
-static const struct platform_device_id s3c24xx_dma_driver_ids[] = {
- {
- .name = "s3c2410-dma",
- .driver_data = (kernel_ulong_t)&soc_s3c2410,
- }, {
- .name = "s3c2412-dma",
- .driver_data = (kernel_ulong_t)&soc_s3c2412,
- }, {
- .name = "s3c2443-dma",
- .driver_data = (kernel_ulong_t)&soc_s3c2443,
- },
- { },
-};
-
-static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
-{
- return (struct soc_data *)
- platform_get_device_id(pdev)->driver_data;
-}
-
-static int s3c24xx_dma_probe(struct platform_device *pdev)
-{
- const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
- struct s3c24xx_dma_engine *s3cdma;
- struct soc_data *sdata;
- struct resource *res;
- int ret;
- int i;
-
- if (!pdata) {
- dev_err(&pdev->dev, "platform data missing\n");
- return -ENODEV;
- }
-
- /* Basic sanity check */
- if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
- dev_err(&pdev->dev, "too many dma channels %d, max %d\n",
- pdata->num_phy_channels, MAX_DMA_CHANNELS);
- return -EINVAL;
- }
-
- sdata = s3c24xx_dma_get_soc_data(pdev);
- if (!sdata)
- return -EINVAL;
-
- s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
- if (!s3cdma)
- return -ENOMEM;
-
- s3cdma->pdev = pdev;
- s3cdma->pdata = pdata;
- s3cdma->sdata = sdata;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(s3cdma->base))
- return PTR_ERR(s3cdma->base);
-
- s3cdma->phy_chans = devm_kcalloc(&pdev->dev,
- pdata->num_phy_channels,
- sizeof(struct s3c24xx_dma_phy),
- GFP_KERNEL);
- if (!s3cdma->phy_chans)
- return -ENOMEM;
-
- /* acquire irqs and clocks for all physical channels */
- for (i = 0; i < pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
- char clk_name[6];
-
- phy->id = i;
- phy->base = s3cdma->base + (i * sdata->stride);
- phy->host = s3cdma;
-
- phy->irq = platform_get_irq(pdev, i);
- if (phy->irq < 0)
- continue;
-
- ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
- 0, pdev->name, phy);
- if (ret) {
- dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
- i, ret);
- continue;
- }
-
- if (sdata->has_clocks) {
- sprintf(clk_name, "dma.%d", i);
- phy->clk = devm_clk_get(&pdev->dev, clk_name);
- if (IS_ERR(phy->clk) && sdata->has_clocks) {
- dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n",
- i, PTR_ERR(phy->clk));
- continue;
- }
-
- ret = clk_prepare(phy->clk);
- if (ret) {
- dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
- i, ret);
- continue;
- }
- }
-
- spin_lock_init(&phy->lock);
- phy->valid = true;
-
- dev_dbg(&pdev->dev, "physical channel %d is %s\n",
- i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
- }
-
- /* Initialize memcpy engine */
- dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
- dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
- s3cdma->memcpy.dev = &pdev->dev;
- s3cdma->memcpy.device_free_chan_resources =
- s3c24xx_dma_free_chan_resources;
- s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
- s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
- s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
- s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
- s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
- s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
-
- /* Initialize slave engine for SoC internal dedicated peripherals */
- dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
- dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
- dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
- s3cdma->slave.dev = &pdev->dev;
- s3cdma->slave.device_free_chan_resources =
- s3c24xx_dma_free_chan_resources;
- s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
- s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
- s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
- s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
- s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
- s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
- s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
- s3cdma->slave.filter.map = pdata->slave_map;
- s3cdma->slave.filter.mapcnt = pdata->slavecnt;
- s3cdma->slave.filter.fn = s3c24xx_dma_filter;
-
- /* Register as many memcpy channels as there are physical channels */
- ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
- pdata->num_phy_channels, false);
- if (ret <= 0) {
- dev_warn(&pdev->dev,
- "%s failed to enumerate memcpy channels - %d\n",
- __func__, ret);
- goto err_memcpy;
- }
-
- /* Register slave channels */
- ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
- pdata->num_channels, true);
- if (ret <= 0) {
- dev_warn(&pdev->dev,
- "%s failed to enumerate slave channels - %d\n",
- __func__, ret);
- goto err_slave;
- }
-
- ret = dma_async_device_register(&s3cdma->memcpy);
- if (ret) {
- dev_warn(&pdev->dev,
- "%s failed to register memcpy as an async device - %d\n",
- __func__, ret);
- goto err_memcpy_reg;
- }
-
- ret = dma_async_device_register(&s3cdma->slave);
- if (ret) {
- dev_warn(&pdev->dev,
- "%s failed to register slave as an async device - %d\n",
- __func__, ret);
- goto err_slave_reg;
- }
-
- platform_set_drvdata(pdev, s3cdma);
- dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
- pdata->num_phy_channels);
-
- return 0;
-
-err_slave_reg:
- dma_async_device_unregister(&s3cdma->memcpy);
-err_memcpy_reg:
- s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
-err_slave:
- s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
-err_memcpy:
- if (sdata->has_clocks)
- for (i = 0; i < pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
- if (phy->valid)
- clk_unprepare(phy->clk);
- }
-
- return ret;
-}
-
-static void s3c24xx_dma_free_irq(struct platform_device *pdev,
- struct s3c24xx_dma_engine *s3cdma)
-{
- int i;
-
- for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
-
- devm_free_irq(&pdev->dev, phy->irq, phy);
- }
-}
-
-static int s3c24xx_dma_remove(struct platform_device *pdev)
-{
- const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
- struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
- struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
- int i;
-
- dma_async_device_unregister(&s3cdma->slave);
- dma_async_device_unregister(&s3cdma->memcpy);
-
- s3c24xx_dma_free_irq(pdev, s3cdma);
-
- s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
- s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
-
- if (sdata->has_clocks)
- for (i = 0; i < pdata->num_phy_channels; i++) {
- struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
- if (phy->valid)
- clk_unprepare(phy->clk);
- }
-
- return 0;
-}
-
-static struct platform_driver s3c24xx_dma_driver = {
- .driver = {
- .name = "s3c24xx-dma",
- },
- .id_table = s3c24xx_dma_driver_ids,
- .probe = s3c24xx_dma_probe,
- .remove = s3c24xx_dma_remove,
-};
-
-module_platform_driver(s3c24xx_dma_driver);
-
-bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
-{
- struct s3c24xx_dma_chan *s3cchan;
-
- if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
- return false;
-
- s3cchan = to_s3c24xx_dma_chan(chan);
-
- return s3cchan->id == (uintptr_t)param;
-}
-EXPORT_SYMBOL(s3c24xx_dma_filter);
-
-MODULE_DESCRIPTION("S3C24XX DMA Driver");
-MODULE_AUTHOR("Heiko Stuebner");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 6b524eb6bcf3..d1c6956af452 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -96,7 +96,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
if (!desc)
return NULL;
- desc->in_use = true;
desc->dirn = DMA_MEM_TO_MEM;
desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
@@ -290,7 +289,7 @@ static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
struct sf_pdma_desc *desc;
desc = to_sf_pdma_desc(vdesc);
- desc->in_use = false;
+ kfree(desc);
}
static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
@@ -494,7 +493,6 @@ static void sf_pdma_setup_chans(struct sf_pdma *pdma)
static int sf_pdma_probe(struct platform_device *pdev)
{
struct sf_pdma *pdma;
- struct resource *res;
int ret, n_chans;
const enum dma_slave_buswidth widths =
DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
@@ -519,8 +517,7 @@ static int sf_pdma_probe(struct platform_device *pdev)
pdma->n_chans = n_chans;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdma->membase = devm_ioremap_resource(&pdev->dev, res);
+ pdma->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdma->membase))
return PTR_ERR(pdma->membase);
diff --git a/drivers/dma/sf-pdma/sf-pdma.h b/drivers/dma/sf-pdma/sf-pdma.h
index dcb3687bd5da..5c398a83b491 100644
--- a/drivers/dma/sf-pdma/sf-pdma.h
+++ b/drivers/dma/sf-pdma/sf-pdma.h
@@ -78,7 +78,6 @@ struct sf_pdma_desc {
u64 src_addr;
struct virt_dma_desc vdesc;
struct sf_pdma_chan *chan;
- bool in_use;
enum dma_transfer_direction dirn;
struct dma_async_tx_descriptor *async_tx;
};
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 5edaeb89d1e6..b14cf350b669 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -768,7 +768,6 @@ static int usb_dmac_probe(struct platform_device *pdev)
const enum dma_slave_buswidth widths = USB_DMAC_SLAVE_BUSWIDTH;
struct dma_device *engine;
struct usb_dmac *dmac;
- struct resource *mem;
unsigned int i;
int ret;
@@ -789,8 +788,7 @@ static int usb_dmac_probe(struct platform_device *pdev)
return -ENOMEM;
/* Request resources. */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmac->iomem = devm_ioremap_resource(&pdev->dev, mem);
+ dmac->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmac->iomem))
return PTR_ERR(dmac->iomem);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index ee3cbbf51006..46b884d46188 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -179,7 +179,6 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct device_node *dma_node;
struct stm32_dmamux_data *stm32_dmamux;
- struct resource *res;
void __iomem *iomem;
struct reset_control *rst;
int i, count, ret;
@@ -238,8 +237,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
}
pm_runtime_get_noresume(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iomem = devm_ioremap_resource(&pdev->dev, res);
+ iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(iomem))
return PTR_ERR(iomem);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index b9d4c843635f..84e7f4f4a800 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1580,7 +1580,6 @@ static int stm32_mdma_probe(struct platform_device *pdev)
struct stm32_mdma_device *dmadev;
struct dma_device *dd;
struct device_node *of_node;
- struct resource *res;
struct reset_control *rst;
u32 nr_channels, nr_requests;
int i, count, ret;
@@ -1622,8 +1621,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
count);
dmadev->nr_ahb_addr_masks = count;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dmadev->base = devm_ioremap_resource(&pdev->dev, res);
+ dmadev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dmadev->base))
return PTR_ERR(dmadev->base);
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index f291b1b4db32..e86c8829513a 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -1144,15 +1144,13 @@ handle_pending:
static int sun4i_dma_probe(struct platform_device *pdev)
{
struct sun4i_dma_dev *priv;
- struct resource *res;
int i, j, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index b7557f437936..ebfd29888b2f 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
@@ -1283,7 +1284,6 @@ static int sun6i_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct sun6i_dma_dev *sdc;
- struct resource *res;
int ret, i;
sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
@@ -1294,8 +1294,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
if (!sdc->cfg)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdc->base = devm_ioremap_resource(&pdev->dev, res);
+ sdc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdc->base))
return PTR_ERR(sdc->base);
@@ -1334,6 +1333,8 @@ static int sun6i_dma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&sdc->pending);
spin_lock_init(&sdc->lock);
+ dma_set_max_seg_size(&pdev->dev, SZ_32M - 1);
+
dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask);
dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask);
dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 79da93cc77b6..b97004036071 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -837,7 +837,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
{
const struct tegra_adma_chip_data *cdata;
struct tegra_adma *tdma;
- struct resource *res;
int ret, i;
cdata = of_device_get_match_data(&pdev->dev);
@@ -857,8 +856,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdma->nr_channels = cdata->nr_channels;
platform_set_drvdata(pdev, tdma);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ tdma->base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tdma->base_addr))
return PTR_ERR(tdma->base_addr);
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index b53d05b11ca5..bd1e07fda559 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -10,6 +10,7 @@ k3-psil-lib-objs := k3-psil.o \
k3-psil-j7200.o \
k3-psil-am64.o \
k3-psil-j721s2.o \
- k3-psil-am62.o
+ k3-psil-am62.o \
+ k3-psil-am62a.o
obj-$(CONFIG_TI_K3_PSIL) += k3-psil-lib.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/cppi41.c b/drivers/dma/ti/cppi41.c
index 695915dba707..c3555cfb0681 100644
--- a/drivers/dma/ti/cppi41.c
+++ b/drivers/dma/ti/cppi41.c
@@ -1039,7 +1039,6 @@ static int cppi41_dma_probe(struct platform_device *pdev)
struct cppi41_dd *cdd;
struct device *dev = &pdev->dev;
const struct cppi_glue_infos *glue_info;
- struct resource *mem;
int index;
int irq;
int ret;
@@ -1072,18 +1071,15 @@ static int cppi41_dma_probe(struct platform_device *pdev)
if (index < 0)
return index;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, index);
- cdd->ctrl_mem = devm_ioremap_resource(dev, mem);
+ cdd->ctrl_mem = devm_platform_ioremap_resource(pdev, index);
if (IS_ERR(cdd->ctrl_mem))
return PTR_ERR(cdd->ctrl_mem);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
- cdd->sched_mem = devm_ioremap_resource(dev, mem);
+ cdd->sched_mem = devm_platform_ioremap_resource(pdev, index + 1);
if (IS_ERR(cdd->sched_mem))
return PTR_ERR(cdd->sched_mem);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2);
- cdd->qmgr_mem = devm_ioremap_resource(dev, mem);
+ cdd->qmgr_mem = devm_platform_ioremap_resource(pdev, index + 2);
if (IS_ERR(cdd->qmgr_mem))
return PTR_ERR(cdd->qmgr_mem);
diff --git a/drivers/dma/ti/k3-psil-am62a.c b/drivers/dma/ti/k3-psil-am62a.c
new file mode 100644
index 000000000000..ca9d71f91422
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am62a.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = flow_base, \
+ }, \
+ }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = default_flow, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am62a_src_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0x7504, 20, 35, 8, 35, 0),
+ PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
+ PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
+ PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+ PSIL_PDMA_XY_PKT(0x4305),
+ PSIL_PDMA_XY_PKT(0x4306),
+ PSIL_PDMA_XY_PKT(0x4307),
+ PSIL_PDMA_XY_PKT(0x4308),
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+ PSIL_PDMA_XY_PKT(0x430c),
+ PSIL_PDMA_XY_PKT(0x430d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+ PSIL_PDMA_XY_PKT(0x4402),
+ PSIL_PDMA_XY_PKT(0x4403),
+ PSIL_PDMA_XY_PKT(0x4404),
+ PSIL_PDMA_XY_PKT(0x4405),
+ PSIL_PDMA_XY_PKT(0x4406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0x4500),
+ PSIL_PDMA_MCASP(0x4501),
+ PSIL_PDMA_MCASP(0x4502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0x4600, 19, 19, 16),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x5000),
+ PSIL_CSI2RX(0x5001),
+ PSIL_CSI2RX(0x5002),
+ PSIL_CSI2RX(0x5003),
+ PSIL_CSI2RX(0x5004),
+ PSIL_CSI2RX(0x5005),
+ PSIL_CSI2RX(0x5006),
+ PSIL_CSI2RX(0x5007),
+ PSIL_CSI2RX(0x5008),
+ PSIL_CSI2RX(0x5009),
+ PSIL_CSI2RX(0x500a),
+ PSIL_CSI2RX(0x500b),
+ PSIL_CSI2RX(0x500c),
+ PSIL_CSI2RX(0x500d),
+ PSIL_CSI2RX(0x500e),
+ PSIL_CSI2RX(0x500f),
+ PSIL_CSI2RX(0x5010),
+ PSIL_CSI2RX(0x5011),
+ PSIL_CSI2RX(0x5012),
+ PSIL_CSI2RX(0x5013),
+ PSIL_CSI2RX(0x5014),
+ PSIL_CSI2RX(0x5015),
+ PSIL_CSI2RX(0x5016),
+ PSIL_CSI2RX(0x5017),
+ PSIL_CSI2RX(0x5018),
+ PSIL_CSI2RX(0x5019),
+ PSIL_CSI2RX(0x501a),
+ PSIL_CSI2RX(0x501b),
+ PSIL_CSI2RX(0x501c),
+ PSIL_CSI2RX(0x501d),
+ PSIL_CSI2RX(0x501e),
+ PSIL_CSI2RX(0x501f),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am62a_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
+ PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0xc302),
+ PSIL_PDMA_XY_PKT(0xc303),
+ PSIL_PDMA_XY_PKT(0xc304),
+ PSIL_PDMA_XY_PKT(0xc305),
+ PSIL_PDMA_XY_PKT(0xc306),
+ PSIL_PDMA_XY_PKT(0xc307),
+ PSIL_PDMA_XY_PKT(0xc308),
+ PSIL_PDMA_XY_PKT(0xc309),
+ PSIL_PDMA_XY_PKT(0xc30a),
+ PSIL_PDMA_XY_PKT(0xc30b),
+ PSIL_PDMA_XY_PKT(0xc30c),
+ PSIL_PDMA_XY_PKT(0xc30d),
+ /* PDMA_MAIN1 - UART0-6 */
+ PSIL_PDMA_XY_PKT(0xc400),
+ PSIL_PDMA_XY_PKT(0xc401),
+ PSIL_PDMA_XY_PKT(0xc402),
+ PSIL_PDMA_XY_PKT(0xc403),
+ PSIL_PDMA_XY_PKT(0xc404),
+ PSIL_PDMA_XY_PKT(0xc405),
+ PSIL_PDMA_XY_PKT(0xc406),
+ /* PDMA_MAIN2 - MCASP0-2 */
+ PSIL_PDMA_MCASP(0xc500),
+ PSIL_PDMA_MCASP(0xc501),
+ PSIL_PDMA_MCASP(0xc502),
+ /* CPSW3G */
+ PSIL_ETHERNET(0xc600, 19, 19, 8),
+ PSIL_ETHERNET(0xc601, 20, 27, 8),
+ PSIL_ETHERNET(0xc602, 21, 35, 8),
+ PSIL_ETHERNET(0xc603, 22, 43, 8),
+ PSIL_ETHERNET(0xc604, 23, 51, 8),
+ PSIL_ETHERNET(0xc605, 24, 59, 8),
+ PSIL_ETHERNET(0xc606, 25, 67, 8),
+ PSIL_ETHERNET(0xc607, 26, 75, 8),
+};
+
+struct psil_ep_map am62a_ep_map = {
+ .name = "am62a",
+ .src = am62a_src_ep_map,
+ .src_count = ARRAY_SIZE(am62a_src_ep_map),
+ .dst = am62a_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am62a_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index 74fa9ec02968..abd650bb7600 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -43,5 +43,6 @@ extern struct psil_ep_map j7200_ep_map;
extern struct psil_ep_map am64_ep_map;
extern struct psil_ep_map j721s2_ep_map;
extern struct psil_ep_map am62_ep_map;
+extern struct psil_ep_map am62a_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index 8b6533a1eeeb..2da6988a0e7b 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -24,6 +24,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM64X", .data = &am64_ep_map },
{ .family = "J721S2", .data = &j721s2_ep_map },
{ .family = "AM62X", .data = &am62_ep_map },
+ { .family = "AM62AX", .data = &am62a_ep_map },
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 4c62274e0b33..7e23a6fdef95 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -135,6 +135,7 @@ struct udma_match_data {
u32 flags;
u32 statictr_z_mask;
u8 burst_size[3];
+ struct udma_soc_data *soc_data;
};
struct udma_soc_data {
@@ -4296,6 +4297,25 @@ static struct udma_match_data j721e_mcu_data = {
},
};
+static struct udma_soc_data am62a_dmss_csi_soc_data = {
+ .oes = {
+ .bcdma_rchan_data = 0xe00,
+ .bcdma_rchan_ring = 0x1000,
+ },
+};
+
+static struct udma_match_data am62a_bcdma_csirx_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x3100,
+ .enable_memcpy_support = false,
+ .burst_size = {
+ TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
+ 0, /* No H Channels */
+ 0, /* No UH Channels */
+ },
+ .soc_data = &am62a_dmss_csi_soc_data,
+};
+
static struct udma_match_data am64_bcdma_data = {
.type = DMA_TYPE_BCDMA,
.psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
@@ -4345,6 +4365,10 @@ static const struct of_device_id udma_of_match[] = {
.compatible = "ti,am64-dmss-pktdma",
.data = &am64_pktdma_data,
},
+ {
+ .compatible = "ti,am62a-dmss-bcdma-csirx",
+ .data = &am62a_bcdma_csirx_data,
+ },
{ /* Sentinel */ },
};
@@ -4387,6 +4411,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM64X", .data = &am64_soc_data },
{ .family = "J721S2", .data = &j721e_soc_data},
{ .family = "AM62X", .data = &am64_soc_data },
+ { .family = "AM62AX", .data = &am64_soc_data },
{ /* sentinel */ }
};
@@ -4775,7 +4800,10 @@ static int bcdma_setup_resources(struct udma_dev *ud)
irq_res.desc[i].num = rm_res->desc[i].num;
}
}
+ } else {
+ i = 0;
}
+
if (ud->tchan_cnt) {
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
if (IS_ERR(rm_res)) {
@@ -5271,12 +5299,15 @@ static int udma_probe(struct platform_device *pdev)
}
ud->match_data = match->data;
- soc = soc_device_match(k3_soc_devices);
- if (!soc) {
- dev_err(dev, "No compatible SoC found\n");
- return -ENODEV;
+ ud->soc_data = ud->match_data->soc_data;
+ if (!ud->soc_data) {
+ soc = soc_device_match(k3_soc_devices);
+ if (!soc) {
+ dev_err(dev, "No compatible SoC found\n");
+ return -ENODEV;
+ }
+ ud->soc_data = soc->data;
}
- ud->soc_data = soc->data;
ret = udma_get_mmrs(pdev, ud);
if (ret)
@@ -5345,7 +5376,6 @@ static int udma_probe(struct platform_device *pdev)
dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
if (!dev->msi.domain) {
- dev_err(dev, "Failed to get MSI domain\n");
return -EPROBE_DEFER;
}
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 27f5019bdc1e..02e1c08c596d 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1658,7 +1658,6 @@ static int omap_dma_probe(struct platform_device *pdev)
{
const struct omap_dma_config *conf;
struct omap_dmadev *od;
- struct resource *res;
int rc, i, irq;
u32 val;
@@ -1666,8 +1665,7 @@ static int omap_dma_probe(struct platform_device *pdev)
if (!od)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- od->base = devm_ioremap_resource(&pdev->dev, res);
+ od->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(od->base))
return PTR_ERR(od->base);
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
index 767bb45f641f..ebaa93644c94 100644
--- a/drivers/dma/xilinx/Makefile
+++ b/drivers/dma/xilinx/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
+obj-$(CONFIG_XILINX_XDMA) += xdma.o
obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
obj-$(CONFIG_XILINX_ZYNQMP_DPDMA) += xilinx_dpdma.o
diff --git a/drivers/dma/xilinx/xdma-regs.h b/drivers/dma/xilinx/xdma-regs.h
new file mode 100644
index 000000000000..dd98b4526b90
--- /dev/null
+++ b/drivers/dma/xilinx/xdma-regs.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+#ifndef __DMA_XDMA_REGS_H
+#define __DMA_XDMA_REGS_H
+
+/* The length of register space exposed to host */
+#define XDMA_REG_SPACE_LEN 65536
+
+/*
+ * maximum number of DMA channels for each direction:
+ * Host to Card (H2C) or Card to Host (C2H)
+ */
+#define XDMA_MAX_CHANNELS 4
+
+/*
+ * macros to define the number of descriptor blocks can be used in one
+ * DMA transfer request.
+ * the DMA engine uses a linked list of descriptor blocks that specify the
+ * source, destination, and length of the DMA transfers.
+ */
+#define XDMA_DESC_BLOCK_NUM BIT(7)
+#define XDMA_DESC_BLOCK_MASK (XDMA_DESC_BLOCK_NUM - 1)
+
+/* descriptor definitions */
+#define XDMA_DESC_ADJACENT 32
+#define XDMA_DESC_ADJACENT_MASK (XDMA_DESC_ADJACENT - 1)
+#define XDMA_DESC_ADJACENT_BITS GENMASK(13, 8)
+#define XDMA_DESC_MAGIC 0xad4bUL
+#define XDMA_DESC_MAGIC_BITS GENMASK(31, 16)
+#define XDMA_DESC_FLAGS_BITS GENMASK(7, 0)
+#define XDMA_DESC_STOPPED BIT(0)
+#define XDMA_DESC_COMPLETED BIT(1)
+#define XDMA_DESC_BLEN_BITS 28
+#define XDMA_DESC_BLEN_MAX (BIT(XDMA_DESC_BLEN_BITS) - PAGE_SIZE)
+
+/* macros to construct the descriptor control word */
+#define XDMA_DESC_CONTROL(adjacent, flag) \
+ (FIELD_PREP(XDMA_DESC_MAGIC_BITS, XDMA_DESC_MAGIC) | \
+ FIELD_PREP(XDMA_DESC_ADJACENT_BITS, (adjacent) - 1) | \
+ FIELD_PREP(XDMA_DESC_FLAGS_BITS, (flag)))
+#define XDMA_DESC_CONTROL_LAST \
+ XDMA_DESC_CONTROL(1, XDMA_DESC_STOPPED | XDMA_DESC_COMPLETED)
+
+/*
+ * Descriptor for a single contiguous memory block transfer.
+ *
+ * Multiple descriptors are linked by means of the next pointer. An additional
+ * extra adjacent number gives the amount of extra contiguous descriptors.
+ *
+ * The descriptors are in root complex memory, and the bytes in the 32-bit
+ * words must be in little-endian byte ordering.
+ */
+struct xdma_hw_desc {
+ __le32 control;
+ __le32 bytes;
+ __le64 src_addr;
+ __le64 dst_addr;
+ __le64 next_desc;
+};
+
+#define XDMA_DESC_SIZE sizeof(struct xdma_hw_desc)
+#define XDMA_DESC_BLOCK_SIZE (XDMA_DESC_SIZE * XDMA_DESC_ADJACENT)
+#define XDMA_DESC_BLOCK_ALIGN 4096
+
+/*
+ * Channel registers
+ */
+#define XDMA_CHAN_IDENTIFIER 0x0
+#define XDMA_CHAN_CONTROL 0x4
+#define XDMA_CHAN_CONTROL_W1S 0x8
+#define XDMA_CHAN_CONTROL_W1C 0xc
+#define XDMA_CHAN_STATUS 0x40
+#define XDMA_CHAN_COMPLETED_DESC 0x48
+#define XDMA_CHAN_ALIGNMENTS 0x4c
+#define XDMA_CHAN_INTR_ENABLE 0x90
+#define XDMA_CHAN_INTR_ENABLE_W1S 0x94
+#define XDMA_CHAN_INTR_ENABLE_W1C 0x9c
+
+#define XDMA_CHAN_STRIDE 0x100
+#define XDMA_CHAN_H2C_OFFSET 0x0
+#define XDMA_CHAN_C2H_OFFSET 0x1000
+#define XDMA_CHAN_H2C_TARGET 0x0
+#define XDMA_CHAN_C2H_TARGET 0x1
+
+/* macro to check if channel is available */
+#define XDMA_CHAN_MAGIC 0x1fc0
+#define XDMA_CHAN_CHECK_TARGET(id, target) \
+ (((u32)(id) >> 16) == XDMA_CHAN_MAGIC + (target))
+
+/* bits of the channel control register */
+#define CHAN_CTRL_RUN_STOP BIT(0)
+#define CHAN_CTRL_IE_DESC_STOPPED BIT(1)
+#define CHAN_CTRL_IE_DESC_COMPLETED BIT(2)
+#define CHAN_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
+#define CHAN_CTRL_IE_MAGIC_STOPPED BIT(4)
+#define CHAN_CTRL_IE_IDLE_STOPPED BIT(6)
+#define CHAN_CTRL_IE_READ_ERROR GENMASK(13, 9)
+#define CHAN_CTRL_IE_DESC_ERROR GENMASK(23, 19)
+#define CHAN_CTRL_NON_INCR_ADDR BIT(25)
+#define CHAN_CTRL_POLL_MODE_WB BIT(26)
+
+#define CHAN_CTRL_START (CHAN_CTRL_RUN_STOP | \
+ CHAN_CTRL_IE_DESC_STOPPED | \
+ CHAN_CTRL_IE_DESC_COMPLETED | \
+ CHAN_CTRL_IE_DESC_ALIGN_MISMATCH | \
+ CHAN_CTRL_IE_MAGIC_STOPPED | \
+ CHAN_CTRL_IE_READ_ERROR | \
+ CHAN_CTRL_IE_DESC_ERROR)
+
+/* bits of the channel interrupt enable mask */
+#define CHAN_IM_DESC_ERROR BIT(19)
+#define CHAN_IM_READ_ERROR BIT(9)
+#define CHAN_IM_IDLE_STOPPED BIT(6)
+#define CHAN_IM_MAGIC_STOPPED BIT(4)
+#define CHAN_IM_DESC_COMPLETED BIT(2)
+#define CHAN_IM_DESC_STOPPED BIT(1)
+
+#define CHAN_IM_ALL (CHAN_IM_DESC_ERROR | CHAN_IM_READ_ERROR | \
+ CHAN_IM_IDLE_STOPPED | CHAN_IM_MAGIC_STOPPED | \
+ CHAN_IM_DESC_COMPLETED | CHAN_IM_DESC_STOPPED)
+
+/*
+ * Channel SGDMA registers
+ */
+#define XDMA_SGDMA_IDENTIFIER 0x4000
+#define XDMA_SGDMA_DESC_LO 0x4080
+#define XDMA_SGDMA_DESC_HI 0x4084
+#define XDMA_SGDMA_DESC_ADJ 0x4088
+#define XDMA_SGDMA_DESC_CREDIT 0x408c
+
+/* bits of the SG DMA control register */
+#define XDMA_CTRL_RUN_STOP BIT(0)
+#define XDMA_CTRL_IE_DESC_STOPPED BIT(1)
+#define XDMA_CTRL_IE_DESC_COMPLETED BIT(2)
+#define XDMA_CTRL_IE_DESC_ALIGN_MISMATCH BIT(3)
+#define XDMA_CTRL_IE_MAGIC_STOPPED BIT(4)
+#define XDMA_CTRL_IE_IDLE_STOPPED BIT(6)
+#define XDMA_CTRL_IE_READ_ERROR GENMASK(13, 9)
+#define XDMA_CTRL_IE_DESC_ERROR GENMASK(23, 19)
+#define XDMA_CTRL_NON_INCR_ADDR BIT(25)
+#define XDMA_CTRL_POLL_MODE_WB BIT(26)
+
+/*
+ * interrupt registers
+ */
+#define XDMA_IRQ_IDENTIFIER 0x2000
+#define XDMA_IRQ_USER_INT_EN 0x2004
+#define XDMA_IRQ_USER_INT_EN_W1S 0x2008
+#define XDMA_IRQ_USER_INT_EN_W1C 0x200c
+#define XDMA_IRQ_CHAN_INT_EN 0x2010
+#define XDMA_IRQ_CHAN_INT_EN_W1S 0x2014
+#define XDMA_IRQ_CHAN_INT_EN_W1C 0x2018
+#define XDMA_IRQ_USER_INT_REQ 0x2040
+#define XDMA_IRQ_CHAN_INT_REQ 0x2044
+#define XDMA_IRQ_USER_INT_PEND 0x2048
+#define XDMA_IRQ_CHAN_INT_PEND 0x204c
+#define XDMA_IRQ_USER_VEC_NUM 0x2080
+#define XDMA_IRQ_CHAN_VEC_NUM 0x20a0
+
+#define XDMA_IRQ_VEC_SHIFT 8
+
+#endif /* __DMA_XDMA_REGS_H */
diff --git a/drivers/dma/xilinx/xdma.c b/drivers/dma/xilinx/xdma.c
new file mode 100644
index 000000000000..462109c61653
--- /dev/null
+++ b/drivers/dma/xilinx/xdma.c
@@ -0,0 +1,974 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DMA driver for Xilinx DMA/Bridge Subsystem
+ *
+ * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
+ * Copyright (C) 2022, Advanced Micro Devices, Inc.
+ */
+
+/*
+ * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
+ * between Host memory and the DMA subsystem. It does this by operating on
+ * 'descriptors' that contain information about the source, destination and
+ * amount of data to transfer. These direct memory transfers can be both in
+ * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
+ * configured to have a single AXI4 Master interface shared by all channels
+ * or one AXI4-Stream interface for each channel enabled. Memory transfers are
+ * specified on a per-channel basis in descriptor linked lists, which the DMA
+ * fetches from host memory and processes. Events such as descriptor completion
+ * and errors are signaled using interrupts. The core also provides up to 16
+ * user interrupt wires that generate interrupts to the host.
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/bitfield.h>
+#include <linux/dmapool.h>
+#include <linux/regmap.h>
+#include <linux/dmaengine.h>
+#include <linux/dma/amd_xdma.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/amd_xdma.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include "../virt-dma.h"
+#include "xdma-regs.h"
+
+/* mmio regmap config for all XDMA registers */
+static const struct regmap_config xdma_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = XDMA_REG_SPACE_LEN,
+};
+
+/**
+ * struct xdma_desc_block - Descriptor block
+ * @virt_addr: Virtual address of block start
+ * @dma_addr: DMA address of block start
+ */
+struct xdma_desc_block {
+ void *virt_addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct xdma_chan - Driver specific DMA channel structure
+ * @vchan: Virtual channel
+ * @xdev_hdl: Pointer to DMA device structure
+ * @base: Offset of channel registers
+ * @desc_pool: Descriptor pool
+ * @busy: Busy flag of the channel
+ * @dir: Transferring direction of the channel
+ * @cfg: Transferring config of the channel
+ * @irq: IRQ assigned to the channel
+ */
+struct xdma_chan {
+ struct virt_dma_chan vchan;
+ void *xdev_hdl;
+ u32 base;
+ struct dma_pool *desc_pool;
+ bool busy;
+ enum dma_transfer_direction dir;
+ struct dma_slave_config cfg;
+ u32 irq;
+};
+
+/**
+ * struct xdma_desc - DMA desc structure
+ * @vdesc: Virtual DMA descriptor
+ * @chan: DMA channel pointer
+ * @dir: Transferring direction of the request
+ * @dev_addr: Physical address on DMA device side
+ * @desc_blocks: Hardware descriptor blocks
+ * @dblk_num: Number of hardware descriptor blocks
+ * @desc_num: Number of hardware descriptors
+ * @completed_desc_num: Completed hardware descriptors
+ */
+struct xdma_desc {
+ struct virt_dma_desc vdesc;
+ struct xdma_chan *chan;
+ enum dma_transfer_direction dir;
+ u64 dev_addr;
+ struct xdma_desc_block *desc_blocks;
+ u32 dblk_num;
+ u32 desc_num;
+ u32 completed_desc_num;
+};
+
+#define XDMA_DEV_STATUS_REG_DMA BIT(0)
+#define XDMA_DEV_STATUS_INIT_MSIX BIT(1)
+
+/**
+ * struct xdma_device - DMA device structure
+ * @pdev: Platform device pointer
+ * @dma_dev: DMA device structure
+ * @rmap: MMIO regmap for DMA registers
+ * @h2c_chans: Host to Card channels
+ * @c2h_chans: Card to Host channels
+ * @h2c_chan_num: Number of H2C channels
+ * @c2h_chan_num: Number of C2H channels
+ * @irq_start: Start IRQ assigned to device
+ * @irq_num: Number of IRQ assigned to device
+ * @status: Initialization status
+ */
+struct xdma_device {
+ struct platform_device *pdev;
+ struct dma_device dma_dev;
+ struct regmap *rmap;
+ struct xdma_chan *h2c_chans;
+ struct xdma_chan *c2h_chans;
+ u32 h2c_chan_num;
+ u32 c2h_chan_num;
+ u32 irq_start;
+ u32 irq_num;
+ u32 status;
+};
+
+#define xdma_err(xdev, fmt, args...) \
+ dev_err(&(xdev)->pdev->dev, fmt, ##args)
+#define XDMA_CHAN_NUM(_xd) ({ \
+ typeof(_xd) (xd) = (_xd); \
+ ((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
+
+/* Get the last desc in a desc block */
+static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
+{
+ return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
+}
+
+/**
+ * xdma_link_desc_blocks - Link descriptor blocks for DMA transfer
+ * @sw_desc: Tx descriptor pointer
+ */
+static void xdma_link_desc_blocks(struct xdma_desc *sw_desc)
+{
+ struct xdma_desc_block *block;
+ u32 last_blk_desc, desc_control;
+ struct xdma_hw_desc *desc;
+ int i;
+
+ desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
+ for (i = 1; i < sw_desc->dblk_num; i++) {
+ block = &sw_desc->desc_blocks[i - 1];
+ desc = xdma_blk_last_desc(block);
+
+ if (!(i & XDMA_DESC_BLOCK_MASK)) {
+ desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
+ continue;
+ }
+ desc->control = cpu_to_le32(desc_control);
+ desc->next_desc = cpu_to_le64(block[1].dma_addr);
+ }
+
+ /* update the last block */
+ last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
+ if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
+ block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
+ desc = xdma_blk_last_desc(block);
+ desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
+ desc->control = cpu_to_le32(desc_control);
+ }
+
+ block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
+ desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
+ desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
+}
+
+static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct xdma_chan, vchan.chan);
+}
+
+static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct xdma_desc, vdesc);
+}
+
+/**
+ * xdma_channel_init - Initialize DMA channel registers
+ * @chan: DMA channel pointer
+ */
+static int xdma_channel_init(struct xdma_chan *chan)
+{
+ struct xdma_device *xdev = chan->xdev_hdl;
+ int ret;
+
+ ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_NON_INCR_ADDR);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
+ CHAN_IM_ALL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * xdma_free_desc - Free descriptor
+ * @vdesc: Virtual DMA descriptor
+ */
+static void xdma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct xdma_desc *sw_desc;
+ int i;
+
+ sw_desc = to_xdma_desc(vdesc);
+ for (i = 0; i < sw_desc->dblk_num; i++) {
+ if (!sw_desc->desc_blocks[i].virt_addr)
+ break;
+ dma_pool_free(sw_desc->chan->desc_pool,
+ sw_desc->desc_blocks[i].virt_addr,
+ sw_desc->desc_blocks[i].dma_addr);
+ }
+ kfree(sw_desc->desc_blocks);
+ kfree(sw_desc);
+}
+
+/**
+ * xdma_alloc_desc - Allocate descriptor
+ * @chan: DMA channel pointer
+ * @desc_num: Number of hardware descriptors
+ */
+static struct xdma_desc *
+xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num)
+{
+ struct xdma_desc *sw_desc;
+ struct xdma_hw_desc *desc;
+ dma_addr_t dma_addr;
+ u32 dblk_num;
+ void *addr;
+ int i, j;
+
+ sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
+ if (!sw_desc)
+ return NULL;
+
+ sw_desc->chan = chan;
+ sw_desc->desc_num = desc_num;
+ dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
+ sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
+ GFP_NOWAIT);
+ if (!sw_desc->desc_blocks)
+ goto failed;
+
+ sw_desc->dblk_num = dblk_num;
+ for (i = 0; i < sw_desc->dblk_num; i++) {
+ addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
+ if (!addr)
+ goto failed;
+
+ sw_desc->desc_blocks[i].virt_addr = addr;
+ sw_desc->desc_blocks[i].dma_addr = dma_addr;
+ for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
+ desc[j].control = cpu_to_le32(XDMA_DESC_CONTROL(1, 0));
+ }
+
+ xdma_link_desc_blocks(sw_desc);
+
+ return sw_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+ return NULL;
+}
+
+/**
+ * xdma_xfer_start - Start DMA transfer
+ * @xdma_chan: DMA channel pointer
+ */
+static int xdma_xfer_start(struct xdma_chan *xchan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
+ struct xdma_device *xdev = xchan->xdev_hdl;
+ struct xdma_desc_block *block;
+ u32 val, completed_blocks;
+ struct xdma_desc *desc;
+ int ret;
+
+ /*
+ * check if there is not any submitted descriptor or channel is busy.
+ * vchan lock should be held where this function is called.
+ */
+ if (!vd || xchan->busy)
+ return -EINVAL;
+
+ /* clear run stop bit to get ready for transfer */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
+ CHAN_CTRL_RUN_STOP);
+ if (ret)
+ return ret;
+
+ desc = to_xdma_desc(vd);
+ if (desc->dir != xchan->dir) {
+ xdma_err(xdev, "incorrect request direction");
+ return -EINVAL;
+ }
+
+ /* set DMA engine to the first descriptor block */
+ completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
+ block = &desc->desc_blocks[completed_blocks];
+ val = lower_32_bits(block->dma_addr);
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
+ if (ret)
+ return ret;
+
+ val = upper_32_bits(block->dma_addr);
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
+ if (ret)
+ return ret;
+
+ if (completed_blocks + 1 == desc->dblk_num)
+ val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
+ else
+ val = XDMA_DESC_ADJACENT - 1;
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
+ if (ret)
+ return ret;
+
+ /* kick off DMA transfer */
+ ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
+ CHAN_CTRL_START);
+ if (ret)
+ return ret;
+
+ xchan->busy = true;
+ return 0;
+}
+
+/**
+ * xdma_alloc_channels - Detect and allocate DMA channels
+ * @xdev: DMA device pointer
+ * @dir: Channel direction
+ */
+static int xdma_alloc_channels(struct xdma_device *xdev,
+ enum dma_transfer_direction dir)
+{
+ struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
+ struct xdma_chan **chans, *xchan;
+ u32 base, identifier, target;
+ u32 *chan_num;
+ int i, j, ret;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ base = XDMA_CHAN_H2C_OFFSET;
+ target = XDMA_CHAN_H2C_TARGET;
+ chans = &xdev->h2c_chans;
+ chan_num = &xdev->h2c_chan_num;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ base = XDMA_CHAN_C2H_OFFSET;
+ target = XDMA_CHAN_C2H_TARGET;
+ chans = &xdev->c2h_chans;
+ chan_num = &xdev->c2h_chan_num;
+ } else {
+ xdma_err(xdev, "invalid direction specified");
+ return -EINVAL;
+ }
+
+ /* detect number of available DMA channels */
+ for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
+ ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
+ &identifier);
+ if (ret)
+ return ret;
+
+ /* check if it is available DMA channel */
+ if (XDMA_CHAN_CHECK_TARGET(identifier, target))
+ (*chan_num)++;
+ }
+
+ if (!*chan_num) {
+ xdma_err(xdev, "does not probe any channel");
+ return -EINVAL;
+ }
+
+ *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
+ GFP_KERNEL);
+ if (!*chans)
+ return -ENOMEM;
+
+ for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
+ ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
+ &identifier);
+ if (ret)
+ return ret;
+
+ if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
+ continue;
+
+ if (j == *chan_num) {
+ xdma_err(xdev, "invalid channel number");
+ return -EIO;
+ }
+
+ /* init channel structure and hardware */
+ xchan = &(*chans)[j];
+ xchan->xdev_hdl = xdev;
+ xchan->base = base + i * XDMA_CHAN_STRIDE;
+ xchan->dir = dir;
+
+ ret = xdma_channel_init(xchan);
+ if (ret)
+ return ret;
+ xchan->vchan.desc_free = xdma_free_desc;
+ vchan_init(&xchan->vchan, &xdev->dma_dev);
+
+ j++;
+ }
+
+ dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
+ (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
+
+ return 0;
+}
+
+/**
+ * xdma_issue_pending - Issue pending transactions
+ * @chan: DMA channel pointer
+ */
+static void xdma_issue_pending(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
+ if (vchan_issue_pending(&xdma_chan->vchan))
+ xdma_xfer_start(xdma_chan);
+ spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
+}
+
+/**
+ * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
+ * @chan: DMA channel pointer
+ * @sgl: Transfer scatter gather list
+ * @sg_len: Length of scatter gather list
+ * @dir: Transfer direction
+ * @flags: transfer ack flags
+ * @context: APP words of the descriptor
+ */
+static struct dma_async_tx_descriptor *
+xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct dma_async_tx_descriptor *tx_desc;
+ u32 desc_num = 0, i, len, rest;
+ struct xdma_desc_block *dblk;
+ struct xdma_hw_desc *desc;
+ struct xdma_desc *sw_desc;
+ u64 dev_addr, *src, *dst;
+ struct scatterlist *sg;
+ u64 addr;
+
+ for_each_sg(sgl, sg, sg_len, i)
+ desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
+
+ sw_desc = xdma_alloc_desc(xdma_chan, desc_num);
+ if (!sw_desc)
+ return NULL;
+ sw_desc->dir = dir;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = xdma_chan->cfg.dst_addr;
+ src = &addr;
+ dst = &dev_addr;
+ } else {
+ dev_addr = xdma_chan->cfg.src_addr;
+ src = &dev_addr;
+ dst = &addr;
+ }
+
+ dblk = sw_desc->desc_blocks;
+ desc = dblk->virt_addr;
+ desc_num = 1;
+ for_each_sg(sgl, sg, sg_len, i) {
+ addr = sg_dma_address(sg);
+ rest = sg_dma_len(sg);
+
+ do {
+ len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
+ /* set hardware descriptor */
+ desc->bytes = cpu_to_le32(len);
+ desc->src_addr = cpu_to_le64(*src);
+ desc->dst_addr = cpu_to_le64(*dst);
+
+ if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
+ dblk++;
+ desc = dblk->virt_addr;
+ } else {
+ desc++;
+ }
+
+ desc_num++;
+ dev_addr += len;
+ addr += len;
+ rest -= len;
+ } while (rest);
+ }
+
+ tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
+ if (!tx_desc)
+ goto failed;
+
+ return tx_desc;
+
+failed:
+ xdma_free_desc(&sw_desc->vdesc);
+
+ return NULL;
+}
+
+/**
+ * xdma_device_config - Configure the DMA channel
+ * @chan: DMA channel
+ * @cfg: channel configuration
+ */
+static int xdma_device_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+ memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+/**
+ * xdma_free_chan_resources - Free channel resources
+ * @chan: DMA channel
+ */
+static void xdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+
+ vchan_free_chan_resources(&xdma_chan->vchan);
+ dma_pool_destroy(xdma_chan->desc_pool);
+ xdma_chan->desc_pool = NULL;
+}
+
+/**
+ * xdma_alloc_chan_resources - Allocate channel resources
+ * @chan: DMA channel
+ */
+static int xdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_device *xdev = xdma_chan->xdev_hdl;
+ struct device *dev = xdev->dma_dev.dev;
+
+ while (dev && !dev_is_pci(dev))
+ dev = dev->parent;
+ if (!dev) {
+ xdma_err(xdev, "unable to find pci device");
+ return -EINVAL;
+ }
+
+ xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
+ dev, XDMA_DESC_BLOCK_SIZE,
+ XDMA_DESC_BLOCK_ALIGN, 0);
+ if (!xdma_chan->desc_pool) {
+ xdma_err(xdev, "unable to allocate descriptor pool");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * xdma_channel_isr - XDMA channel interrupt handler
+ * @irq: IRQ number
+ * @dev_id: Pointer to the DMA channel structure
+ */
+static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
+{
+ struct xdma_chan *xchan = dev_id;
+ u32 complete_desc_num = 0;
+ struct xdma_device *xdev;
+ struct virt_dma_desc *vd;
+ struct xdma_desc *desc;
+ int ret;
+
+ spin_lock(&xchan->vchan.lock);
+
+ /* get submitted request */
+ vd = vchan_next_desc(&xchan->vchan);
+ if (!vd)
+ goto out;
+
+ xchan->busy = false;
+ desc = to_xdma_desc(vd);
+ xdev = xchan->xdev_hdl;
+
+ ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
+ &complete_desc_num);
+ if (ret)
+ goto out;
+
+ desc->completed_desc_num += complete_desc_num;
+ /*
+ * if all data blocks are transferred, remove and complete the request
+ */
+ if (desc->completed_desc_num == desc->desc_num) {
+ list_del(&vd->node);
+ vchan_cookie_complete(vd);
+ goto out;
+ }
+
+ if (desc->completed_desc_num > desc->desc_num ||
+ complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
+ goto out;
+
+ /* transfer the rest of data */
+ xdma_xfer_start(xchan);
+
+out:
+ spin_unlock(&xchan->vchan.lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * xdma_irq_fini - Uninitialize IRQ
+ * @xdev: DMA device pointer
+ */
+static void xdma_irq_fini(struct xdma_device *xdev)
+{
+ int i;
+
+ /* disable interrupt */
+ regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
+
+ /* free irq handler */
+ for (i = 0; i < xdev->h2c_chan_num; i++)
+ free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
+
+ for (i = 0; i < xdev->c2h_chan_num; i++)
+ free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
+}
+
+/**
+ * xdma_set_vector_reg - configure hardware IRQ registers
+ * @xdev: DMA device pointer
+ * @vec_tbl_start: Start of IRQ registers
+ * @irq_start: Start of IRQ
+ * @irq_num: Number of IRQ
+ */
+static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
+ u32 irq_start, u32 irq_num)
+{
+ u32 shift, i, val = 0;
+ int ret;
+
+ /* Each IRQ register is 32 bit and contains 4 IRQs */
+ while (irq_num > 0) {
+ for (i = 0; i < 4; i++) {
+ shift = XDMA_IRQ_VEC_SHIFT * i;
+ val |= irq_start << shift;
+ irq_start++;
+ irq_num--;
+ }
+
+ /* write IRQ register */
+ ret = regmap_write(xdev->rmap, vec_tbl_start, val);
+ if (ret)
+ return ret;
+ vec_tbl_start += sizeof(u32);
+ val = 0;
+ }
+
+ return 0;
+}
+
+/**
+ * xdma_irq_init - initialize IRQs
+ * @xdev: DMA device pointer
+ */
+static int xdma_irq_init(struct xdma_device *xdev)
+{
+ u32 irq = xdev->irq_start;
+ u32 user_irq_start;
+ int i, j, ret;
+
+ /* return failure if there are not enough IRQs */
+ if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
+ xdma_err(xdev, "not enough irq");
+ return -EINVAL;
+ }
+
+ /* setup H2C interrupt handler */
+ for (i = 0; i < xdev->h2c_chan_num; i++) {
+ ret = request_irq(irq, xdma_channel_isr, 0,
+ "xdma-h2c-channel", &xdev->h2c_chans[i]);
+ if (ret) {
+ xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
+ i, irq, ret);
+ goto failed_init_h2c;
+ }
+ xdev->h2c_chans[i].irq = irq;
+ irq++;
+ }
+
+ /* setup C2H interrupt handler */
+ for (j = 0; j < xdev->c2h_chan_num; j++) {
+ ret = request_irq(irq, xdma_channel_isr, 0,
+ "xdma-c2h-channel", &xdev->c2h_chans[j]);
+ if (ret) {
+ xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
+ j, irq, ret);
+ goto failed_init_c2h;
+ }
+ xdev->c2h_chans[j].irq = irq;
+ irq++;
+ }
+
+ /* config hardware IRQ registers */
+ ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
+ XDMA_CHAN_NUM(xdev));
+ if (ret) {
+ xdma_err(xdev, "failed to set channel vectors: %d", ret);
+ goto failed_init_c2h;
+ }
+
+ /* config user IRQ registers if needed */
+ user_irq_start = XDMA_CHAN_NUM(xdev);
+ if (xdev->irq_num > user_irq_start) {
+ ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
+ user_irq_start,
+ xdev->irq_num - user_irq_start);
+ if (ret) {
+ xdma_err(xdev, "failed to set user vectors: %d", ret);
+ goto failed_init_c2h;
+ }
+ }
+
+ /* enable interrupt */
+ ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
+ if (ret)
+ goto failed_init_c2h;
+
+ return 0;
+
+failed_init_c2h:
+ while (j--)
+ free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
+failed_init_h2c:
+ while (i--)
+ free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
+
+ return ret;
+}
+
+static bool xdma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct xdma_chan *xdma_chan = to_xdma_chan(chan);
+ struct xdma_chan_info *chan_info = param;
+
+ return chan_info->dir == xdma_chan->dir;
+}
+
+/**
+ * xdma_disable_user_irq - Disable user interrupt
+ * @pdev: Pointer to the platform_device structure
+ * @irq_num: System IRQ number
+ */
+void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+ u32 index;
+
+ index = irq_num - xdev->irq_start;
+ if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq number");
+ return;
+ }
+ index -= XDMA_CHAN_NUM(xdev);
+
+ regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
+}
+EXPORT_SYMBOL(xdma_disable_user_irq);
+
+/**
+ * xdma_enable_user_irq - Enable user logic interrupt
+ * @pdev: Pointer to the platform_device structure
+ * @irq_num: System IRQ number
+ */
+int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+ u32 index;
+ int ret;
+
+ index = irq_num - xdev->irq_start;
+ if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq number");
+ return -EINVAL;
+ }
+ index -= XDMA_CHAN_NUM(xdev);
+
+ ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(xdma_enable_user_irq);
+
+/**
+ * xdma_get_user_irq - Get system IRQ number
+ * @pdev: Pointer to the platform_device structure
+ * @user_irq_index: User logic IRQ wire index
+ *
+ * Return: The system IRQ number allocated for the given wire index.
+ */
+int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
+ xdma_err(xdev, "invalid user irq index");
+ return -EINVAL;
+ }
+
+ return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
+}
+EXPORT_SYMBOL(xdma_get_user_irq);
+
+/**
+ * xdma_remove - Driver remove function
+ * @pdev: Pointer to the platform_device structure
+ */
+static int xdma_remove(struct platform_device *pdev)
+{
+ struct xdma_device *xdev = platform_get_drvdata(pdev);
+
+ if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
+ xdma_irq_fini(xdev);
+
+ if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
+ dma_async_device_unregister(&xdev->dma_dev);
+
+ return 0;
+}
+
+/**
+ * xdma_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ */
+static int xdma_probe(struct platform_device *pdev)
+{
+ struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
+ struct xdma_device *xdev;
+ void __iomem *reg_base;
+ struct resource *res;
+ int ret = -ENODEV;
+
+ if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
+ dev_err(&pdev->dev, "invalid max dma channels %d",
+ pdata->max_dma_channels);
+ return -EINVAL;
+ }
+
+ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
+ if (!xdev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, xdev);
+ xdev->pdev = pdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ xdma_err(xdev, "failed to get irq resource");
+ goto failed;
+ }
+ xdev->irq_start = res->start;
+ xdev->irq_num = res->end - res->start + 1;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ xdma_err(xdev, "failed to get io resource");
+ goto failed;
+ }
+
+ reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!reg_base) {
+ xdma_err(xdev, "ioremap failed");
+ goto failed;
+ }
+
+ xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
+ &xdma_regmap_config);
+ if (!xdev->rmap) {
+ xdma_err(xdev, "config regmap failed: %d", ret);
+ goto failed;
+ }
+ INIT_LIST_HEAD(&xdev->dma_dev.channels);
+
+ ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
+ if (ret) {
+ xdma_err(xdev, "config H2C channels failed: %d", ret);
+ goto failed;
+ }
+
+ ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
+ if (ret) {
+ xdma_err(xdev, "config C2H channels failed: %d", ret);
+ goto failed;
+ }
+
+ dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
+
+ xdev->dma_dev.dev = &pdev->dev;
+ xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
+ xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
+ xdev->dma_dev.device_tx_status = dma_cookie_status;
+ xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
+ xdev->dma_dev.device_config = xdma_device_config;
+ xdev->dma_dev.device_issue_pending = xdma_issue_pending;
+ xdev->dma_dev.filter.map = pdata->device_map;
+ xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
+ xdev->dma_dev.filter.fn = xdma_filter_fn;
+
+ ret = dma_async_device_register(&xdev->dma_dev);
+ if (ret) {
+ xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
+ goto failed;
+ }
+ xdev->status |= XDMA_DEV_STATUS_REG_DMA;
+
+ ret = xdma_irq_init(xdev);
+ if (ret) {
+ xdma_err(xdev, "failed to init msix: %d", ret);
+ goto failed;
+ }
+ xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
+
+ return 0;
+
+failed:
+ xdma_remove(pdev);
+
+ return ret;
+}
+
+static const struct platform_device_id xdma_id_table[] = {
+ { "xdma", 0},
+ { },
+};
+
+static struct platform_driver xdma_driver = {
+ .driver = {
+ .name = "xdma",
+ },
+ .id_table = xdma_id_table,
+ .probe = xdma_probe,
+ .remove = xdma_remove,
+};
+
+module_platform_driver(xdma_driver);
+
+MODULE_DESCRIPTION("AMD XDMA driver");
+MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index 21472a5d7636..ce359058c638 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -890,7 +890,6 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
struct platform_device *pdev)
{
struct zynqmp_dma_chan *chan;
- struct resource *res;
struct device_node *node = pdev->dev.of_node;
int err;
@@ -900,8 +899,7 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
chan->dev = zdev->dev;
chan->zdev = zdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chan->regs = devm_ioremap_resource(&pdev->dev, res);
+ chan->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chan->regs))
return PTR_ERR(chan->regs);
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 4cfdefbd744d..68f576700911 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -542,4 +542,12 @@ config EDAC_DMC520
Support for error detection and correction on the
SoCs with ARM DMC-520 DRAM controller.
+config EDAC_ZYNQMP
+ tristate "Xilinx ZynqMP OCM Controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ help
+ This driver supports error detection and correction for the
+ Xilinx ZynqMP OCM (On Chip Memory) controller. It can also be
+ built as a module. In that case it will be called zynqmp_edac.
+
endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 2d1641a27a28..9b025c5b3061 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -84,3 +84,4 @@ obj-$(CONFIG_EDAC_QCOM) += qcom_edac.o
obj-$(CONFIG_EDAC_ASPEED) += aspeed_edac.o
obj-$(CONFIG_EDAC_BLUEFIELD) += bluefield_edac.o
obj-$(CONFIG_EDAC_DMC520) += dmc520_edac.o
+obj-$(CONFIG_EDAC_ZYNQMP) += zynqmp_edac.o
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index e3318e5575a3..5b42533f306a 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -182,21 +182,6 @@ static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
* other archs, we might not have access to the caches directly.
*/
-static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
-{
- /*
- * Fam17h supports scrub values between 0x5 and 0x14. Also, the values
- * are shifted down by 0x5, so scrubval 0x5 is written to the register
- * as 0x0, scrubval 0x6 as 0x1, etc.
- */
- if (scrubval >= 0x5 && scrubval <= 0x14) {
- scrubval -= 0x5;
- pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
- pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
- } else {
- pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
- }
-}
/*
* Scan the scrub rate mapping table for a close or matching bandwidth value to
* issue. If requested is too big, then use last maximum value found.
@@ -229,9 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- if (pvt->umc) {
- __f17h_set_scrubval(pvt, scrubval);
- } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
+ if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
f15h_select_dct(pvt, 1);
@@ -271,16 +254,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
int i, retval = -EINVAL;
u32 scrubval = 0;
- if (pvt->umc) {
- amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
- if (scrubval & BIT(0)) {
- amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
- scrubval &= 0xF;
- scrubval += 0x5;
- } else {
- scrubval = 0;
- }
- } else if (pvt->fam == 0x15) {
+ if (pvt->fam == 0x15) {
/* Erratum #505 */
if (pvt->model < 0x10)
f15h_select_dct(pvt, 0);
@@ -1454,9 +1428,6 @@ static void __dump_misc_regs_df(struct amd64_pvt *pvt)
debug_display_dimm_sizes_df(pvt, i);
}
-
- edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
- pvt->dhar, dhar_base(pvt));
}
/* Display and decode various NB registers for debug purposes. */
@@ -1491,6 +1462,8 @@ static void __dump_misc_regs(struct amd64_pvt *pvt)
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
+
+ edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
}
/* Display and decode various NB registers for debug purposes. */
@@ -1501,8 +1474,6 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
else
__dump_misc_regs(pvt);
- edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
-
amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
}
@@ -1732,24 +1703,6 @@ ddr3:
pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
}
-/* Get the number of DCT channels the memory controller is using. */
-static int k8_early_channel_count(struct amd64_pvt *pvt)
-{
- int flag;
-
- if (pvt->ext_model >= K8_REV_F)
- /* RevF (NPT) and later */
- flag = pvt->dclr0 & WIDTH_128;
- else
- /* RevE and earlier */
- flag = pvt->dclr0 & REVE_WIDTH_128;
-
- /* not used */
- pvt->dclr1 = 0;
-
- return (flag) ? 2 : 1;
-}
-
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{
@@ -2001,69 +1954,6 @@ static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
}
}
-/*
- * Get the number of DCT channels in use.
- *
- * Return:
- * number of Memory Channels in operation
- * Pass back:
- * contents of the DCL0_LOW register
- */
-static int f1x_early_channel_count(struct amd64_pvt *pvt)
-{
- int i, j, channels = 0;
-
- /* On F10h, if we are in 128 bit mode, then we are using 2 channels */
- if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
- return 2;
-
- /*
- * Need to check if in unganged mode: In such, there are 2 channels,
- * but they are not in 128 bit mode and thus the above 'dclr0' status
- * bit will be OFF.
- *
- * Need to check DCT0[0] and DCT1[0] to see if only one of them has
- * their CSEnable bit on. If so, then SINGLE DIMM case.
- */
- edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
-
- /*
- * Check DRAM Bank Address Mapping values for each DIMM to see if there
- * is more than just one DIMM present in unganged mode. Need to check
- * both controllers since DIMMs can be placed in either one.
- */
- for (i = 0; i < 2; i++) {
- u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
-
- for (j = 0; j < 4; j++) {
- if (DBAM_DIMM(j, dbam) > 0) {
- channels++;
- break;
- }
- }
- }
-
- if (channels > 2)
- channels = 2;
-
- amd64_info("MCT channel count: %d\n", channels);
-
- return channels;
-}
-
-static int f17_early_channel_count(struct amd64_pvt *pvt)
-{
- int i, channels = 0;
-
- /* SDP Control bit 31 (SdpInit) is clear for unused UMC channels */
- for_each_umc(i)
- channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
-
- amd64_info("MCT channel count: %d\n", channels);
-
- return channels;
-}
-
static int ddr3_cs_size(unsigned i, bool dct_width)
{
unsigned shift = 0;
@@ -2858,7 +2748,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
.max_mcs = 2,
.ops = {
- .early_channel_count = k8_early_channel_count,
.map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
.dbam_to_cs = k8_dbam_to_chip_select,
}
@@ -2869,7 +2758,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
.max_mcs = 2,
.ops = {
- .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f10_dbam_to_chip_select,
}
@@ -2880,7 +2768,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
.max_mcs = 2,
.ops = {
- .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f15_dbam_to_chip_select,
}
@@ -2891,7 +2778,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
.max_mcs = 2,
.ops = {
- .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
}
@@ -2902,7 +2788,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
.max_mcs = 2,
.ops = {
- .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f15_m60h_dbam_to_chip_select,
}
@@ -2913,7 +2798,6 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
.max_mcs = 2,
.ops = {
- .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
}
@@ -2924,89 +2808,64 @@ static struct amd64_family_type family_types[] = {
.f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
.max_mcs = 2,
.ops = {
- .early_channel_count = f1x_early_channel_count,
.map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
.dbam_to_cs = f16_dbam_to_chip_select,
}
},
[F17_CPUS] = {
.ctl_name = "F17h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
.max_mcs = 2,
.ops = {
- .early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M10H_CPUS] = {
.ctl_name = "F17h_M10h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
.max_mcs = 2,
.ops = {
- .early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M30H_CPUS] = {
.ctl_name = "F17h_M30h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
.max_mcs = 8,
.ops = {
- .early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M60H_CPUS] = {
.ctl_name = "F17h_M60h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M60H_DF_F6,
.max_mcs = 2,
.ops = {
- .early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F17_M70H_CPUS] = {
.ctl_name = "F17h_M70h",
- .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
.max_mcs = 2,
.ops = {
- .early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F19_CPUS] = {
.ctl_name = "F19h",
- .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6,
.max_mcs = 8,
.ops = {
- .early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F19_M10H_CPUS] = {
.ctl_name = "F19h_M10h",
- .f0_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_19H_M10H_DF_F6,
.max_mcs = 12,
.flags.zn_regs_v2 = 1,
.ops = {
- .early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
[F19_M50H_CPUS] = {
.ctl_name = "F19h_M50h",
- .f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0,
- .f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6,
.max_mcs = 2,
.ops = {
- .early_channel_count = f17_early_channel_count,
.dbam_to_cs = f17_addr_mask_to_cs_size,
}
},
@@ -3316,36 +3175,12 @@ log_error:
/*
* Use pvt->F3 which contains the F3 CPU PCI device to get the related
* F1 (AddrMap) and F2 (Dct) devices. Return negative value on error.
- * Reserve F0 and F6 on systems with a UMC.
*/
static int
reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
{
- if (pvt->umc) {
- pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
- if (!pvt->F0) {
- edac_dbg(1, "F0 not found, device 0x%x\n", pci_id1);
- return -ENODEV;
- }
-
- pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
- if (!pvt->F6) {
- pci_dev_put(pvt->F0);
- pvt->F0 = NULL;
-
- edac_dbg(1, "F6 not found: device 0x%x\n", pci_id2);
- return -ENODEV;
- }
-
- if (!pci_ctl_dev)
- pci_ctl_dev = &pvt->F0->dev;
-
- edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
- edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
- edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
-
+ if (pvt->umc)
return 0;
- }
/* Reserve the ADDRESS MAP Device */
pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
@@ -3377,8 +3212,7 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
static void free_mc_sibling_devs(struct amd64_pvt *pvt)
{
if (pvt->umc) {
- pci_dev_put(pvt->F0);
- pci_dev_put(pvt->F6);
+ return;
} else {
pci_dev_put(pvt->F1);
pci_dev_put(pvt->F2);
@@ -3468,7 +3302,6 @@ static void read_mc_regs(struct amd64_pvt *pvt)
if (pvt->umc) {
__read_mc_regs_df(pvt);
- amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
goto skip;
}
@@ -3691,7 +3524,7 @@ static int init_csrows(struct mem_ctl_info *mci)
: EDAC_SECDED;
}
- for (j = 0; j < pvt->channel_count; j++) {
+ for (j = 0; j < fam_type->max_mcs; j++) {
dimm = csrow->channels[j]->dimm;
dimm->mtype = pvt->dram_type;
dimm->edac_mode = edac_mode;
@@ -3967,6 +3800,9 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci)
mci->dev_name = pci_name(pvt->F3);
mci->ctl_page_to_phys = NULL;
+ if (pvt->fam >= 0x17)
+ return;
+
/* memory scrubber interface */
mci->set_sdram_scrub_rate = set_scrub_rate;
mci->get_sdram_scrub_rate = get_scrub_rate;
@@ -4092,16 +3928,13 @@ static const struct attribute_group *amd64_edac_attr_groups[] = {
static int hw_info_get(struct amd64_pvt *pvt)
{
- u16 pci_id1, pci_id2;
+ u16 pci_id1 = 0, pci_id2 = 0;
int ret;
if (pvt->fam >= 0x17) {
pvt->umc = kcalloc(fam_type->max_mcs, sizeof(struct amd64_umc), GFP_KERNEL);
if (!pvt->umc)
return -ENOMEM;
-
- pci_id1 = fam_type->f0_id;
- pci_id2 = fam_type->f6_id;
} else {
pci_id1 = fam_type->f1_id;
pci_id2 = fam_type->f2_id;
@@ -4118,7 +3951,7 @@ static int hw_info_get(struct amd64_pvt *pvt)
static void hw_info_put(struct amd64_pvt *pvt)
{
- if (pvt->F0 || pvt->F1)
+ if (pvt->F1)
free_mc_sibling_devs(pvt);
kfree(pvt->umc);
@@ -4128,28 +3961,12 @@ static int init_one_instance(struct amd64_pvt *pvt)
{
struct mem_ctl_info *mci = NULL;
struct edac_mc_layer layers[2];
- int ret = -EINVAL;
-
- /*
- * We need to determine how many memory channels there are. Then use
- * that information for calculating the size of the dynamic instance
- * tables in the 'mci' structure.
- */
- pvt->channel_count = pvt->ops->early_channel_count(pvt);
- if (pvt->channel_count < 0)
- return ret;
+ int ret = -ENOMEM;
- ret = -ENOMEM;
layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
layers[0].size = pvt->csels[0].b_cnt;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
-
- /*
- * Always allocate two channels since we can have setups with DIMMs on
- * only one channel. Also, this simplifies handling later for the price
- * of a couple of KBs tops.
- */
layers[1].size = fam_type->max_mcs;
layers[1].is_virt_csrow = false;
@@ -4370,12 +4187,12 @@ static int __init amd64_edac_init(void)
}
/* register stuff with EDAC MCE */
- if (boot_cpu_data.x86 >= 0x17)
+ if (boot_cpu_data.x86 >= 0x17) {
amd_register_ecc_decoder(decode_umc_error);
- else
+ } else {
amd_register_ecc_decoder(decode_bus_error);
-
- setup_pci_device();
+ setup_pci_device();
+ }
#ifdef CONFIG_X86_32
amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 38e5ad95d010..e4329dff8cf2 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -114,22 +114,6 @@
#define PCI_DEVICE_ID_AMD_16H_NB_F2 0x1532
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F1 0x1581
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
-#define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460
-#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
-#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
-#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
-#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F0 0x1490
-#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496
-#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F0 0x1448
-#define PCI_DEVICE_ID_AMD_17H_M60H_DF_F6 0x144e
-#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440
-#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446
-#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650
-#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
-#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad
-#define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3
-#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F0 0x166a
-#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F6 0x1670
/*
* Function 1 - Address Map
@@ -215,8 +199,6 @@
#define DCT_SEL_HI 0x114
#define F15H_M60H_SCRCTRL 0x1C8
-#define F17H_SCR_BASE_ADDR 0x48
-#define F17H_SCR_LIMIT_ADDR 0x4C
/*
* Function 3 - Misc Control
@@ -356,7 +338,7 @@ struct amd64_pvt {
struct low_ops *ops;
/* pci_device handles which we utilize */
- struct pci_dev *F0, *F1, *F2, *F3, *F6;
+ struct pci_dev *F1, *F2, *F3;
u16 mc_node_id; /* MC index of this MC node */
u8 fam; /* CPU family */
@@ -364,7 +346,6 @@ struct amd64_pvt {
u8 stepping; /* ... stepping */
int ext_model; /* extended model value of this node */
- int channel_count;
/* Raw registers */
u32 dclr0; /* DRAM Configuration Low DCT0 reg */
@@ -484,7 +465,6 @@ struct ecc_settings {
* functions and per device encoding/decoding logic.
*/
struct low_ops {
- int (*early_channel_count) (struct amd64_pvt *pvt);
void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci, u64 sys_addr,
struct err_info *);
int (*dbam_to_cs) (struct amd64_pvt *pvt, u8 dct,
@@ -503,7 +483,7 @@ struct amd64_family_flags {
struct amd64_family_type {
const char *ctl_name;
- u16 f0_id, f1_id, f2_id, f6_id;
+ u16 f1_id, f2_id;
/* Maximum number of memory controllers per die/node. */
u8 max_mcs;
struct amd64_family_flags flags;
diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
index 65aeea53e2df..0a4691792801 100644
--- a/drivers/edac/i10nm_base.c
+++ b/drivers/edac/i10nm_base.c
@@ -13,7 +13,7 @@
#include "edac_module.h"
#include "skx_common.h"
-#define I10NM_REVISION "v0.0.5"
+#define I10NM_REVISION "v0.0.6"
#define EDAC_MOD_STR "i10nm_edac"
/* Debug macros */
@@ -22,25 +22,34 @@
#define I10NM_GET_SCK_BAR(d, reg) \
pci_read_config_dword((d)->uracu, 0xd0, &(reg))
-#define I10NM_GET_IMC_BAR(d, i, reg) \
- pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg))
+#define I10NM_GET_IMC_BAR(d, i, reg) \
+ pci_read_config_dword((d)->uracu, \
+ (res_cfg->type == GNR ? 0xd4 : 0xd8) + (i) * 4, &(reg))
#define I10NM_GET_SAD(d, offset, i, reg)\
- pci_read_config_dword((d)->sad_all, (offset) + (i) * 8, &(reg))
+ pci_read_config_dword((d)->sad_all, (offset) + (i) * \
+ (res_cfg->type == GNR ? 12 : 8), &(reg))
#define I10NM_GET_HBM_IMC_BAR(d, reg) \
pci_read_config_dword((d)->uracu, 0xd4, &(reg))
#define I10NM_GET_CAPID3_CFG(d, reg) \
- pci_read_config_dword((d)->pcu_cr3, 0x90, &(reg))
+ pci_read_config_dword((d)->pcu_cr3, \
+ res_cfg->type == GNR ? 0x290 : 0x90, &(reg))
+#define I10NM_GET_CAPID5_CFG(d, reg) \
+ pci_read_config_dword((d)->pcu_cr3, \
+ res_cfg->type == GNR ? 0x298 : 0x98, &(reg))
#define I10NM_GET_DIMMMTR(m, i, j) \
- readl((m)->mbase + ((m)->hbm_mc ? 0x80c : 0x2080c) + \
+ readl((m)->mbase + ((m)->hbm_mc ? 0x80c : \
+ (res_cfg->type == GNR ? 0xc0c : 0x2080c)) + \
(i) * (m)->chan_mmio_sz + (j) * 4)
#define I10NM_GET_MCDDRTCFG(m, i) \
readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
(i) * (m)->chan_mmio_sz)
#define I10NM_GET_MCMTR(m, i) \
- readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : 0x20ef8) + \
+ readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : \
+ (res_cfg->type == GNR ? 0xaf8 : 0x20ef8)) + \
(i) * (m)->chan_mmio_sz)
#define I10NM_GET_AMAP(m, i) \
- readl((m)->mbase + ((m)->hbm_mc ? 0x814 : 0x20814) + \
+ readl((m)->mbase + ((m)->hbm_mc ? 0x814 : \
+ (res_cfg->type == GNR ? 0xc14 : 0x20814)) + \
(i) * (m)->chan_mmio_sz)
#define I10NM_GET_REG32(m, i, offset) \
readl((m)->mbase + (i) * (m)->chan_mmio_sz + (offset))
@@ -56,7 +65,10 @@
#define I10NM_GET_HBM_IMC_MMIO_OFFSET(reg) \
((GET_BITFIELD(reg, 0, 10) << 12) + 0x140000)
+#define I10NM_GNR_IMC_MMIO_OFFSET 0x24c000
+#define I10NM_GNR_IMC_MMIO_SIZE 0x4000
#define I10NM_HBM_IMC_MMIO_SIZE 0x9000
+#define I10NM_DDR_IMC_CH_CNT(reg) GET_BITFIELD(reg, 21, 24)
#define I10NM_IS_HBM_PRESENT(reg) GET_BITFIELD(reg, 27, 30)
#define I10NM_IS_HBM_IMC(reg) GET_BITFIELD(reg, 29, 29)
@@ -148,35 +160,47 @@ static void __enable_retry_rd_err_log(struct skx_imc *imc, int chan, bool enable
static void enable_retry_rd_err_log(bool enable)
{
+ int i, j, imc_num, chan_num;
struct skx_imc *imc;
struct skx_dev *d;
- int i, j;
edac_dbg(2, "\n");
- list_for_each_entry(d, i10nm_edac_list, list)
- for (i = 0; i < I10NM_NUM_IMC; i++) {
+ list_for_each_entry(d, i10nm_edac_list, list) {
+ imc_num = res_cfg->ddr_imc_num;
+ chan_num = res_cfg->ddr_chan_num;
+
+ for (i = 0; i < imc_num; i++) {
imc = &d->imc[i];
if (!imc->mbase)
continue;
- for (j = 0; j < I10NM_NUM_CHANNELS; j++) {
- if (imc->hbm_mc) {
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm0,
- res_cfg->offsets_demand_hbm0,
- NULL);
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub_hbm1,
- res_cfg->offsets_demand_hbm1,
- NULL);
- } else {
- __enable_retry_rd_err_log(imc, j, enable,
- res_cfg->offsets_scrub,
- res_cfg->offsets_demand,
- res_cfg->offsets_demand2);
- }
+ for (j = 0; j < chan_num; j++)
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub,
+ res_cfg->offsets_demand,
+ res_cfg->offsets_demand2);
+ }
+
+ imc_num += res_cfg->hbm_imc_num;
+ chan_num = res_cfg->hbm_chan_num;
+
+ for (; i < imc_num; i++) {
+ imc = &d->imc[i];
+ if (!imc->mbase || !imc->hbm_mc)
+ continue;
+
+ for (j = 0; j < chan_num; j++) {
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub_hbm0,
+ res_cfg->offsets_demand_hbm0,
+ NULL);
+ __enable_retry_rd_err_log(imc, j, enable,
+ res_cfg->offsets_scrub_hbm1,
+ res_cfg->offsets_demand_hbm1,
+ NULL);
}
+ }
}
}
@@ -311,6 +335,79 @@ static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
return pdev;
}
+/**
+ * i10nm_get_imc_num() - Get the number of present DDR memory controllers.
+ *
+ * @cfg : The pointer to the structure of EDAC resource configurations.
+ *
+ * For Granite Rapids CPUs, the number of present DDR memory controllers read
+ * at runtime overwrites the value statically configured in @cfg->ddr_imc_num.
+ * For other CPUs, the number of present DDR memory controllers is statically
+ * configured in @cfg->ddr_imc_num.
+ *
+ * RETURNS : 0 on success, < 0 on failure.
+ */
+static int i10nm_get_imc_num(struct res_config *cfg)
+{
+ int n, imc_num, chan_num = 0;
+ struct skx_dev *d;
+ u32 reg;
+
+ list_for_each_entry(d, i10nm_edac_list, list) {
+ d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->pcu_cr3_bdf.bus],
+ res_cfg->pcu_cr3_bdf.dev,
+ res_cfg->pcu_cr3_bdf.fun);
+ if (!d->pcu_cr3)
+ continue;
+
+ if (I10NM_GET_CAPID5_CFG(d, reg))
+ continue;
+
+ n = I10NM_DDR_IMC_CH_CNT(reg);
+
+ if (!chan_num) {
+ chan_num = n;
+ edac_dbg(2, "Get DDR CH number: %d\n", chan_num);
+ } else if (chan_num != n) {
+ i10nm_printk(KERN_NOTICE, "Get DDR CH numbers: %d, %d\n", chan_num, n);
+ }
+ }
+
+ switch (cfg->type) {
+ case GNR:
+ /*
+ * One channel per DDR memory controller for Granite Rapids CPUs.
+ */
+ imc_num = chan_num;
+
+ if (!imc_num) {
+ i10nm_printk(KERN_ERR, "Invalid DDR MC number\n");
+ return -ENODEV;
+ }
+
+ if (imc_num > I10NM_NUM_DDR_IMC) {
+ i10nm_printk(KERN_ERR, "Need to make I10NM_NUM_DDR_IMC >= %d\n", imc_num);
+ return -EINVAL;
+ }
+
+ if (cfg->ddr_imc_num != imc_num) {
+ /*
+ * Store the number of present DDR memory controllers.
+ */
+ cfg->ddr_imc_num = imc_num;
+ edac_dbg(2, "Set DDR MC number: %d", imc_num);
+ }
+
+ return 0;
+ default:
+ /*
+ * For other CPUs, the number of present DDR memory controllers
+ * is statically pre-configured in cfg->ddr_imc_num.
+ */
+ return 0;
+ }
+}
+
static bool i10nm_check_2lm(struct res_config *cfg)
{
struct skx_dev *d;
@@ -318,9 +415,9 @@ static bool i10nm_check_2lm(struct res_config *cfg)
int i;
list_for_each_entry(d, i10nm_edac_list, list) {
- d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[1],
- PCI_SLOT(cfg->sad_all_devfn),
- PCI_FUNC(cfg->sad_all_devfn));
+ d->sad_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->sad_all_bdf.bus],
+ res_cfg->sad_all_bdf.dev,
+ res_cfg->sad_all_bdf.fun);
if (!d->sad_all)
continue;
@@ -337,20 +434,39 @@ static bool i10nm_check_2lm(struct res_config *cfg)
}
/*
- * Check whether the error comes from DDRT by ICX/Tremont model specific error code.
- * Refer to SDM vol3B 16.11.3 Intel IMC MC error codes for IA32_MCi_STATUS.
+ * Check whether the error comes from DDRT by ICX/Tremont/SPR model specific error code.
+ * Refer to SDM vol3B 17.11.3/17.13.2 Intel IMC MC error codes for IA32_MCi_STATUS.
*/
static bool i10nm_mscod_is_ddrt(u32 mscod)
{
- switch (mscod) {
- case 0x0106: case 0x0107:
- case 0x0800: case 0x0804:
- case 0x0806 ... 0x0808:
- case 0x080a ... 0x080e:
- case 0x0810: case 0x0811:
- case 0x0816: case 0x081e:
- case 0x081f:
- return true;
+ switch (res_cfg->type) {
+ case I10NM:
+ switch (mscod) {
+ case 0x0106: case 0x0107:
+ case 0x0800: case 0x0804:
+ case 0x0806 ... 0x0808:
+ case 0x080a ... 0x080e:
+ case 0x0810: case 0x0811:
+ case 0x0816: case 0x081e:
+ case 0x081f:
+ return true;
+ }
+
+ break;
+ case SPR:
+ switch (mscod) {
+ case 0x0800: case 0x0804:
+ case 0x0806 ... 0x0808:
+ case 0x080a ... 0x080e:
+ case 0x0810: case 0x0811:
+ case 0x0816: case 0x081e:
+ case 0x081f:
+ return true;
+ }
+
+ break;
+ default:
+ return false;
}
return false;
@@ -358,6 +474,7 @@ static bool i10nm_mscod_is_ddrt(u32 mscod)
static bool i10nm_mc_decode_available(struct mce *mce)
{
+#define ICX_IMCx_CHy 0x06666000
u8 bank;
if (!decoding_via_mca || mem_cfg_2lm)
@@ -371,21 +488,26 @@ static bool i10nm_mc_decode_available(struct mce *mce)
switch (res_cfg->type) {
case I10NM:
- if (bank < 13 || bank > 26)
- return false;
-
- /* DDRT errors can't be decoded from MCA bank registers */
- if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT)
+ /* Check whether the bank is one of {13,14,17,18,21,22,25,26} */
+ if (!(ICX_IMCx_CHy & (1 << bank)))
return false;
-
- if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status)))
+ break;
+ case SPR:
+ if (bank < 13 || bank > 20)
return false;
-
- /* Check whether one of {13,14,17,18,21,22,25,26} */
- return ((bank - 13) & BIT(1)) == 0;
+ break;
default:
return false;
}
+
+ /* DDRT errors can't be decoded from MCA bank registers */
+ if (MCI_MISC_ECC_MODE(mce->misc) == MCI_MISC_ECC_DDRT)
+ return false;
+
+ if (i10nm_mscod_is_ddrt(MCI_STATUS_MSCOD(mce->status)))
+ return false;
+
+ return true;
}
static bool i10nm_mc_decode(struct decoded_addr *res)
@@ -407,9 +529,29 @@ static bool i10nm_mc_decode(struct decoded_addr *res)
switch (res_cfg->type) {
case I10NM:
- bank = m->bank - 13;
- res->imc = bank / 4;
- res->channel = bank % 2;
+ bank = m->bank - 13;
+ res->imc = bank / 4;
+ res->channel = bank % 2;
+ res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
+ res->row = GET_BITFIELD(m->misc, 19, 39);
+ res->bank_group = GET_BITFIELD(m->misc, 40, 41);
+ res->bank_address = GET_BITFIELD(m->misc, 42, 43);
+ res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2;
+ res->rank = GET_BITFIELD(m->misc, 56, 58);
+ res->dimm = res->rank >> 2;
+ res->rank = res->rank % 4;
+ break;
+ case SPR:
+ bank = m->bank - 13;
+ res->imc = bank / 2;
+ res->channel = bank % 2;
+ res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
+ res->row = GET_BITFIELD(m->misc, 19, 36);
+ res->bank_group = GET_BITFIELD(m->misc, 37, 38);
+ res->bank_address = GET_BITFIELD(m->misc, 39, 40);
+ res->bank_group |= GET_BITFIELD(m->misc, 41, 41) << 2;
+ res->rank = GET_BITFIELD(m->misc, 57, 57);
+ res->dimm = GET_BITFIELD(m->misc, 58, 58);
break;
default:
return false;
@@ -421,18 +563,101 @@ static bool i10nm_mc_decode(struct decoded_addr *res)
return false;
}
- res->column = GET_BITFIELD(m->misc, 9, 18) << 2;
- res->row = GET_BITFIELD(m->misc, 19, 39);
- res->bank_group = GET_BITFIELD(m->misc, 40, 41);
- res->bank_address = GET_BITFIELD(m->misc, 42, 43);
- res->bank_group |= GET_BITFIELD(m->misc, 44, 44) << 2;
- res->rank = GET_BITFIELD(m->misc, 56, 58);
- res->dimm = res->rank >> 2;
- res->rank = res->rank % 4;
-
return true;
}
+/**
+ * get_gnr_mdev() - Get the PCI device of the @logical_idx-th DDR memory controller.
+ *
+ * @d : The pointer to the structure of CPU socket EDAC device.
+ * @logical_idx : The logical index of the present memory controller (0 ~ max present MC# - 1).
+ * @physical_idx : To store the corresponding physical index of @logical_idx.
+ *
+ * RETURNS : The PCI device of the @logical_idx-th DDR memory controller, NULL on failure.
+ */
+static struct pci_dev *get_gnr_mdev(struct skx_dev *d, int logical_idx, int *physical_idx)
+{
+#define GNR_MAX_IMC_PCI_CNT 28
+
+ struct pci_dev *mdev;
+ int i, logical = 0;
+
+ /*
+ * Detect present memory controllers from { PCI device: 8-5, function 7-1 }
+ */
+ for (i = 0; i < GNR_MAX_IMC_PCI_CNT; i++) {
+ mdev = pci_get_dev_wrapper(d->seg,
+ d->bus[res_cfg->ddr_mdev_bdf.bus],
+ res_cfg->ddr_mdev_bdf.dev + i / 7,
+ res_cfg->ddr_mdev_bdf.fun + i % 7);
+
+ if (mdev) {
+ if (logical == logical_idx) {
+ *physical_idx = i;
+ return mdev;
+ }
+
+ pci_dev_put(mdev);
+ logical++;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * get_ddr_munit() - Get the resource of the i-th DDR memory controller.
+ *
+ * @d : The pointer to the structure of CPU socket EDAC device.
+ * @i : The index of the CPU socket relative DDR memory controller.
+ * @offset : To store the MMIO offset of the i-th DDR memory controller.
+ * @size : To store the MMIO size of the i-th DDR memory controller.
+ *
+ * RETURNS : The PCI device of the i-th DDR memory controller, NULL on failure.
+ */
+static struct pci_dev *get_ddr_munit(struct skx_dev *d, int i, u32 *offset, unsigned long *size)
+{
+ struct pci_dev *mdev;
+ int physical_idx;
+ u32 reg;
+
+ switch (res_cfg->type) {
+ case GNR:
+ if (I10NM_GET_IMC_BAR(d, 0, reg)) {
+ i10nm_printk(KERN_ERR, "Failed to get mc0 bar\n");
+ return NULL;
+ }
+
+ mdev = get_gnr_mdev(d, i, &physical_idx);
+ if (!mdev)
+ return NULL;
+
+ *offset = I10NM_GET_IMC_MMIO_OFFSET(reg) +
+ I10NM_GNR_IMC_MMIO_OFFSET +
+ physical_idx * I10NM_GNR_IMC_MMIO_SIZE;
+ *size = I10NM_GNR_IMC_MMIO_SIZE;
+
+ break;
+ default:
+ if (I10NM_GET_IMC_BAR(d, i, reg)) {
+ i10nm_printk(KERN_ERR, "Failed to get mc%d bar\n", i);
+ return NULL;
+ }
+
+ mdev = pci_get_dev_wrapper(d->seg,
+ d->bus[res_cfg->ddr_mdev_bdf.bus],
+ res_cfg->ddr_mdev_bdf.dev + i,
+ res_cfg->ddr_mdev_bdf.fun);
+ if (!mdev)
+ return NULL;
+
+ *offset = I10NM_GET_IMC_MMIO_OFFSET(reg);
+ *size = I10NM_GET_IMC_MMIO_SIZE(reg);
+ }
+
+ return mdev;
+}
+
static int i10nm_get_ddr_munits(void)
{
struct pci_dev *mdev;
@@ -444,11 +669,15 @@ static int i10nm_get_ddr_munits(void)
u64 base;
list_for_each_entry(d, i10nm_edac_list, list) {
- d->util_all = pci_get_dev_wrapper(d->seg, d->bus[1], 29, 1);
+ d->util_all = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->util_all_bdf.bus],
+ res_cfg->util_all_bdf.dev,
+ res_cfg->util_all_bdf.fun);
if (!d->util_all)
return -ENODEV;
- d->uracu = pci_get_dev_wrapper(d->seg, d->bus[0], 0, 1);
+ d->uracu = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->uracu_bdf.bus],
+ res_cfg->uracu_bdf.dev,
+ res_cfg->uracu_bdf.fun);
if (!d->uracu)
return -ENODEV;
@@ -461,9 +690,9 @@ static int i10nm_get_ddr_munits(void)
edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
j++, base, reg);
- for (i = 0; i < I10NM_NUM_DDR_IMC; i++) {
- mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
- 12 + i, 0);
+ for (i = 0; i < res_cfg->ddr_imc_num; i++) {
+ mdev = get_ddr_munit(d, i, &off, &size);
+
if (i == 0 && !mdev) {
i10nm_printk(KERN_ERR, "No IMC found\n");
return -ENODEV;
@@ -473,13 +702,6 @@ static int i10nm_get_ddr_munits(void)
d->imc[i].mdev = mdev;
- if (I10NM_GET_IMC_BAR(d, i, reg)) {
- i10nm_printk(KERN_ERR, "Failed to get mc bar\n");
- return -ENODEV;
- }
-
- off = I10NM_GET_IMC_MMIO_OFFSET(reg);
- size = I10NM_GET_IMC_MMIO_SIZE(reg);
edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n",
i, base + off, size, reg);
@@ -519,7 +741,6 @@ static int i10nm_get_hbm_munits(void)
u64 base;
list_for_each_entry(d, i10nm_edac_list, list) {
- d->pcu_cr3 = pci_get_dev_wrapper(d->seg, d->bus[1], 30, 3);
if (!d->pcu_cr3)
return -ENODEV;
@@ -540,11 +761,13 @@ static int i10nm_get_hbm_munits(void)
}
base += I10NM_GET_HBM_IMC_MMIO_OFFSET(reg);
- lmc = I10NM_NUM_DDR_IMC;
+ lmc = res_cfg->ddr_imc_num;
+
+ for (i = 0; i < res_cfg->hbm_imc_num; i++) {
+ mdev = pci_get_dev_wrapper(d->seg, d->bus[res_cfg->hbm_mdev_bdf.bus],
+ res_cfg->hbm_mdev_bdf.dev + i / 4,
+ res_cfg->hbm_mdev_bdf.fun + i % 4);
- for (i = 0; i < I10NM_NUM_HBM_IMC; i++) {
- mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
- 12 + i / 4, 1 + i % 4);
if (i == 0 && !mdev) {
i10nm_printk(KERN_ERR, "No hbm mc found\n");
return -ENODEV;
@@ -594,8 +817,16 @@ static struct res_config i10nm_cfg0 = {
.type = I10NM,
.decs_did = 0x3452,
.busno_cfg_offset = 0xcc,
+ .ddr_imc_num = 4,
+ .ddr_chan_num = 2,
+ .ddr_dimm_num = 2,
.ddr_chan_mmio_sz = 0x4000,
- .sad_all_devfn = PCI_DEVFN(29, 0),
+ .sad_all_bdf = {1, 29, 0},
+ .pcu_cr3_bdf = {1, 30, 3},
+ .util_all_bdf = {1, 29, 1},
+ .uracu_bdf = {0, 0, 1},
+ .ddr_mdev_bdf = {0, 12, 0},
+ .hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
.offsets_scrub = offsets_scrub_icx,
.offsets_demand = offsets_demand_icx,
@@ -605,8 +836,16 @@ static struct res_config i10nm_cfg1 = {
.type = I10NM,
.decs_did = 0x3452,
.busno_cfg_offset = 0xd0,
+ .ddr_imc_num = 4,
+ .ddr_chan_num = 2,
+ .ddr_dimm_num = 2,
.ddr_chan_mmio_sz = 0x4000,
- .sad_all_devfn = PCI_DEVFN(29, 0),
+ .sad_all_bdf = {1, 29, 0},
+ .pcu_cr3_bdf = {1, 30, 3},
+ .util_all_bdf = {1, 29, 1},
+ .uracu_bdf = {0, 0, 1},
+ .ddr_mdev_bdf = {0, 12, 0},
+ .hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x108,
.offsets_scrub = offsets_scrub_icx,
.offsets_demand = offsets_demand_icx,
@@ -616,10 +855,21 @@ static struct res_config spr_cfg = {
.type = SPR,
.decs_did = 0x3252,
.busno_cfg_offset = 0xd0,
+ .ddr_imc_num = 4,
+ .ddr_chan_num = 2,
+ .ddr_dimm_num = 2,
+ .hbm_imc_num = 16,
+ .hbm_chan_num = 2,
+ .hbm_dimm_num = 1,
.ddr_chan_mmio_sz = 0x8000,
.hbm_chan_mmio_sz = 0x4000,
.support_ddr5 = true,
- .sad_all_devfn = PCI_DEVFN(10, 0),
+ .sad_all_bdf = {1, 10, 0},
+ .pcu_cr3_bdf = {1, 30, 3},
+ .util_all_bdf = {1, 29, 1},
+ .uracu_bdf = {0, 0, 1},
+ .ddr_mdev_bdf = {0, 12, 0},
+ .hbm_mdev_bdf = {0, 12, 1},
.sad_all_offset = 0x300,
.offsets_scrub = offsets_scrub_spr,
.offsets_scrub_hbm0 = offsets_scrub_spr_hbm0,
@@ -630,6 +880,23 @@ static struct res_config spr_cfg = {
.offsets_demand_hbm1 = offsets_demand_spr_hbm1,
};
+static struct res_config gnr_cfg = {
+ .type = GNR,
+ .decs_did = 0x3252,
+ .busno_cfg_offset = 0xd0,
+ .ddr_imc_num = 12,
+ .ddr_chan_num = 1,
+ .ddr_dimm_num = 2,
+ .ddr_chan_mmio_sz = 0x4000,
+ .support_ddr5 = true,
+ .sad_all_bdf = {0, 13, 0},
+ .pcu_cr3_bdf = {0, 5, 0},
+ .util_all_bdf = {0, 13, 1},
+ .uracu_bdf = {0, 0, 1},
+ .ddr_mdev_bdf = {0, 5, 1},
+ .sad_all_offset = 0x300,
+};
+
static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
@@ -637,6 +904,8 @@ static const struct x86_cpu_id i10nm_cpuids[] = {
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
+ X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
{}
};
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
@@ -656,7 +925,7 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
{
struct skx_pvt *pvt = mci->pvt_info;
struct skx_imc *imc = pvt->imc;
- u32 mtr, amap, mcddrtcfg;
+ u32 mtr, amap, mcddrtcfg = 0;
struct dimm_info *dimm;
int i, j, ndimms;
@@ -666,7 +935,10 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
ndimms = 0;
amap = I10NM_GET_AMAP(imc, i);
- mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
+
+ if (res_cfg->type != GNR)
+ mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
+
for (j = 0; j < imc->num_dimms; j++) {
dimm = edac_get_dimm(mci, i, j, 0);
mtr = I10NM_GET_DIMMMTR(imc, i, j);
@@ -752,6 +1024,7 @@ static int __init i10nm_init(void)
struct skx_dev *d;
int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
u64 tolm, tohm;
+ int imc_num;
edac_dbg(2, "\n");
@@ -784,6 +1057,10 @@ static int __init i10nm_init(void)
return -ENODEV;
}
+ rc = i10nm_get_imc_num(cfg);
+ if (rc < 0)
+ goto fail;
+
mem_cfg_2lm = i10nm_check_2lm(cfg);
skx_set_mem_cfg(mem_cfg_2lm);
@@ -792,6 +1069,8 @@ static int __init i10nm_init(void)
if (i10nm_get_hbm_munits() && rc)
goto fail;
+ imc_num = res_cfg->ddr_imc_num + res_cfg->hbm_imc_num;
+
list_for_each_entry(d, i10nm_edac_list, list) {
rc = skx_get_src_id(d, 0xf8, &src_id);
if (rc < 0)
@@ -802,7 +1081,7 @@ static int __init i10nm_init(void)
goto fail;
edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
- for (i = 0; i < I10NM_NUM_IMC; i++) {
+ for (i = 0; i < imc_num; i++) {
if (!d->imc[i].mdev)
continue;
@@ -812,12 +1091,12 @@ static int __init i10nm_init(void)
d->imc[i].node_id = node_id;
if (d->imc[i].hbm_mc) {
d->imc[i].chan_mmio_sz = cfg->hbm_chan_mmio_sz;
- d->imc[i].num_channels = I10NM_NUM_HBM_CHANNELS;
- d->imc[i].num_dimms = I10NM_NUM_HBM_DIMMS;
+ d->imc[i].num_channels = cfg->hbm_chan_num;
+ d->imc[i].num_dimms = cfg->hbm_dimm_num;
} else {
d->imc[i].chan_mmio_sz = cfg->ddr_chan_mmio_sz;
- d->imc[i].num_channels = I10NM_NUM_DDR_CHANNELS;
- d->imc[i].num_dimms = I10NM_NUM_DDR_DIMMS;
+ d->imc[i].num_channels = cfg->ddr_chan_num;
+ d->imc[i].num_dimms = cfg->ddr_dimm_num;
}
rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index c45519f59dc1..3256254c3722 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -396,12 +396,19 @@ static int qcom_llcc_edac_remove(struct platform_device *pdev)
return 0;
}
+static const struct platform_device_id qcom_llcc_edac_id_table[] = {
+ { .name = "qcom_llcc_edac" },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, qcom_llcc_edac_id_table);
+
static struct platform_driver qcom_llcc_edac_driver = {
.probe = qcom_llcc_edac_probe,
.remove = qcom_llcc_edac_remove,
.driver = {
.name = "qcom_llcc_edac",
},
+ .id_table = qcom_llcc_edac_id_table,
};
module_platform_driver(qcom_llcc_edac_driver);
diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
index f0f8e98f6efb..ce3e0069e028 100644
--- a/drivers/edac/skx_common.c
+++ b/drivers/edac/skx_common.c
@@ -560,44 +560,28 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
tp_event = HW_EVENT_ERR_CORRECTED;
}
- /*
- * According to Intel Architecture spec vol 3B,
- * Table 15-10 "IA32_MCi_Status [15:0] Compound Error Code Encoding"
- * memory errors should fit one of these masks:
- * 000f 0000 1mmm cccc (binary)
- * 000f 0010 1mmm cccc (binary) [RAM used as cache]
- * where:
- * f = Correction Report Filtering Bit. If 1, subsequent errors
- * won't be shown
- * mmm = error type
- * cccc = channel
- * If the mask doesn't match, report an error to the parsing logic
- */
- if (!((errcode & 0xef80) == 0x80 || (errcode & 0xef80) == 0x280)) {
- optype = "Can't parse: it is not a mem";
- } else {
- switch (optypenum) {
- case 0:
- optype = "generic undef request error";
- break;
- case 1:
- optype = "memory read error";
- break;
- case 2:
- optype = "memory write error";
- break;
- case 3:
- optype = "addr/cmd error";
- break;
- case 4:
- optype = "memory scrubbing error";
- scrub_err = true;
- break;
- default:
- optype = "reserved";
- break;
- }
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request error";
+ break;
+ case 1:
+ optype = "memory read error";
+ break;
+ case 2:
+ optype = "memory write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "memory scrubbing error";
+ scrub_err = true;
+ break;
+ default:
+ optype = "reserved";
+ break;
}
+
if (res->decoded_by_adxl) {
len = snprintf(skx_msg, MSG_SIZE, "%s%s err_code:0x%04x:0x%04x %s",
overflow ? " OVERFLOW" : "",
@@ -632,12 +616,18 @@ static bool skx_error_in_1st_level_mem(const struct mce *m)
if (!skx_mem_cfg_2lm)
return false;
- errcode = GET_BITFIELD(m->status, 0, 15);
+ errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
- if ((errcode & 0xef80) != 0x280)
- return false;
+ return errcode == MCACOD_EXT_MEM_ERR;
+}
- return true;
+static bool skx_error_in_mem(const struct mce *m)
+{
+ u32 errcode;
+
+ errcode = GET_BITFIELD(m->status, 0, 15) & MCACOD_MEM_ERR_MASK;
+
+ return (errcode == MCACOD_MEM_CTL_ERR || errcode == MCACOD_EXT_MEM_ERR);
}
int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
@@ -651,13 +641,13 @@ int skx_mce_check_error(struct notifier_block *nb, unsigned long val,
if (mce->kflags & MCE_HANDLED_CEC)
return NOTIFY_DONE;
- /* ignore unless this is memory related with an address */
- if ((mce->status & 0xefff) >> 7 != 1 || !(mce->status & MCI_STATUS_ADDRV))
+ /* Ignore unless this is memory related with an address */
+ if (!skx_error_in_mem(mce) || !(mce->status & MCI_STATUS_ADDRV))
return NOTIFY_DONE;
memset(&res, 0, sizeof(res));
res.mce = mce;
- res.addr = mce->addr;
+ res.addr = mce->addr & MCI_ADDR_PHYSADDR;
/* Try driver decoder first */
if (!(driver_decode && driver_decode(&res))) {
diff --git a/drivers/edac/skx_common.h b/drivers/edac/skx_common.h
index 0cbadd3d2cd3..b6d3607dffe2 100644
--- a/drivers/edac/skx_common.h
+++ b/drivers/edac/skx_common.h
@@ -33,7 +33,7 @@
#define SKX_NUM_CHANNELS 3 /* Channels per memory controller */
#define SKX_NUM_DIMMS 2 /* Max DIMMS per channel */
-#define I10NM_NUM_DDR_IMC 4
+#define I10NM_NUM_DDR_IMC 12
#define I10NM_NUM_DDR_CHANNELS 2
#define I10NM_NUM_DDR_DIMMS 2
@@ -57,6 +57,30 @@
#define MCI_MISC_ECC_DDRT 8 /* read from DDRT */
/*
+ * According to Intel Architecture spec vol 3B,
+ * Table 15-10 "IA32_MCi_Status [15:0] Compound Error Code Encoding"
+ * memory errors should fit one of these masks:
+ * 000f 0000 1mmm cccc (binary)
+ * 000f 0010 1mmm cccc (binary) [RAM used as cache]
+ * where:
+ * f = Correction Report Filtering Bit. If 1, subsequent errors
+ * won't be shown
+ * mmm = error type
+ * cccc = channel
+ */
+#define MCACOD_MEM_ERR_MASK 0xef80
+/*
+ * Errors from either the memory of the 1-level memory system or the
+ * 2nd level memory (the slow "far" memory) of the 2-level memory system.
+ */
+#define MCACOD_MEM_CTL_ERR 0x80
+/*
+ * Errors from the 1st level memory (the fast "near" memory as cache)
+ * of the 2-level memory system.
+ */
+#define MCACOD_EXT_MEM_ERR 0x280
+
+/*
* Each cpu socket contains some pci devices that provide global
* information, and also some that are local to each of the two
* memory controllers on the die.
@@ -105,7 +129,8 @@ struct skx_pvt {
enum type {
SKX,
I10NM,
- SPR
+ SPR,
+ GNR
};
enum {
@@ -149,19 +174,47 @@ struct decoded_addr {
bool decoded_by_adxl;
};
+struct pci_bdf {
+ u32 bus : 8;
+ u32 dev : 5;
+ u32 fun : 3;
+};
+
struct res_config {
enum type type;
/* Configuration agent device ID */
unsigned int decs_did;
/* Default bus number configuration register offset */
int busno_cfg_offset;
+ /* DDR memory controllers per socket */
+ int ddr_imc_num;
+ /* DDR channels per DDR memory controller */
+ int ddr_chan_num;
+ /* DDR DIMMs per DDR memory channel */
+ int ddr_dimm_num;
/* Per DDR channel memory-mapped I/O size */
int ddr_chan_mmio_sz;
+ /* HBM memory controllers per socket */
+ int hbm_imc_num;
+ /* HBM channels per HBM memory controller */
+ int hbm_chan_num;
+ /* HBM DIMMs per HBM memory channel */
+ int hbm_dimm_num;
/* Per HBM channel memory-mapped I/O size */
int hbm_chan_mmio_sz;
bool support_ddr5;
- /* SAD device number and function number */
- unsigned int sad_all_devfn;
+ /* SAD device BDF */
+ struct pci_bdf sad_all_bdf;
+ /* PCU device BDF */
+ struct pci_bdf pcu_cr3_bdf;
+ /* UTIL device BDF */
+ struct pci_bdf util_all_bdf;
+ /* URACU device BDF */
+ struct pci_bdf uracu_bdf;
+ /* DDR mdev device BDF */
+ struct pci_bdf ddr_mdev_bdf;
+ /* HBM mdev device BDF */
+ struct pci_bdf hbm_mdev_bdf;
int sad_all_offset;
/* Offsets of retry_rd_err_log registers */
u32 *offsets_scrub;
diff --git a/drivers/edac/zynqmp_edac.c b/drivers/edac/zynqmp_edac.c
new file mode 100644
index 000000000000..ac7d1e0b324c
--- /dev/null
+++ b/drivers/edac/zynqmp_edac.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx ZynqMP OCM ECC Driver
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/edac.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "edac_module.h"
+
+#define ZYNQMP_OCM_EDAC_MSG_SIZE 256
+
+#define ZYNQMP_OCM_EDAC_STRING "zynqmp_ocm"
+
+/* Error/Interrupt registers */
+#define ERR_CTRL_OFST 0x0
+#define OCM_ISR_OFST 0x04
+#define OCM_IMR_OFST 0x08
+#define OCM_IEN_OFST 0x0C
+#define OCM_IDS_OFST 0x10
+
+/* ECC control register */
+#define ECC_CTRL_OFST 0x14
+
+/* Correctable error info registers */
+#define CE_FFA_OFST 0x1C
+#define CE_FFD0_OFST 0x20
+#define CE_FFD1_OFST 0x24
+#define CE_FFD2_OFST 0x28
+#define CE_FFD3_OFST 0x2C
+#define CE_FFE_OFST 0x30
+
+/* Uncorrectable error info registers */
+#define UE_FFA_OFST 0x34
+#define UE_FFD0_OFST 0x38
+#define UE_FFD1_OFST 0x3C
+#define UE_FFD2_OFST 0x40
+#define UE_FFD3_OFST 0x44
+#define UE_FFE_OFST 0x48
+
+/* ECC control register bit field definitions */
+#define ECC_CTRL_CLR_CE_ERR 0x40
+#define ECC_CTRL_CLR_UE_ERR 0x80
+
+/* Fault injection data and count registers */
+#define OCM_FID0_OFST 0x4C
+#define OCM_FID1_OFST 0x50
+#define OCM_FID2_OFST 0x54
+#define OCM_FID3_OFST 0x58
+#define OCM_FIC_OFST 0x74
+
+#define UE_MAX_BITPOS_LOWER 31
+#define UE_MIN_BITPOS_UPPER 32
+#define UE_MAX_BITPOS_UPPER 63
+
+/* Interrupt masks */
+#define OCM_CEINTR_MASK BIT(6)
+#define OCM_UEINTR_MASK BIT(7)
+#define OCM_ECC_ENABLE_MASK BIT(0)
+
+#define OCM_FICOUNT_MASK GENMASK(23, 0)
+#define OCM_NUM_UE_BITPOS 2
+#define OCM_BASEVAL 0xFFFC0000
+#define EDAC_DEVICE "ZynqMP-OCM"
+
+/**
+ * struct ecc_error_info - ECC error log information
+ * @addr: Fault generated at this address
+ * @fault_lo: Generated fault data (lower 32-bit)
+ * @fault_hi: Generated fault data (upper 32-bit)
+ */
+struct ecc_error_info {
+ u32 addr;
+ u32 fault_lo;
+ u32 fault_hi;
+};
+
+/**
+ * struct ecc_status - ECC status information to report
+ * @ce_cnt: Correctable error count
+ * @ue_cnt: Uncorrectable error count
+ * @ceinfo: Correctable error log information
+ * @ueinfo: Uncorrectable error log information
+ */
+struct ecc_status {
+ u32 ce_cnt;
+ u32 ue_cnt;
+ struct ecc_error_info ceinfo;
+ struct ecc_error_info ueinfo;
+};
+
+/**
+ * struct edac_priv - OCM private instance data
+ * @baseaddr: Base address of the OCM
+ * @message: Buffer for framing the event specific info
+ * @stat: ECC status information
+ * @ce_cnt: Correctable Error count
+ * @ue_cnt: Uncorrectable Error count
+ * @debugfs_dir: Directory entry for debugfs
+ * @ce_bitpos: Bit position for Correctable Error
+ * @ue_bitpos: Array to store UnCorrectable Error bit positions
+ * @fault_injection_cnt: Fault Injection Counter value
+ */
+struct edac_priv {
+ void __iomem *baseaddr;
+ char message[ZYNQMP_OCM_EDAC_MSG_SIZE];
+ struct ecc_status stat;
+ u32 ce_cnt;
+ u32 ue_cnt;
+#ifdef CONFIG_EDAC_DEBUG
+ struct dentry *debugfs_dir;
+ u8 ce_bitpos;
+ u8 ue_bitpos[OCM_NUM_UE_BITPOS];
+ u32 fault_injection_cnt;
+#endif
+};
+
+/**
+ * get_error_info - Get the current ECC error info
+ * @base: Pointer to the base address of the OCM
+ * @p: Pointer to the OCM ECC status structure
+ * @mask: Status register mask value
+ *
+ * Determines there is any ECC error or not
+ *
+ */
+static void get_error_info(void __iomem *base, struct ecc_status *p, int mask)
+{
+ if (mask & OCM_CEINTR_MASK) {
+ p->ce_cnt++;
+ p->ceinfo.fault_lo = readl(base + CE_FFD0_OFST);
+ p->ceinfo.fault_hi = readl(base + CE_FFD1_OFST);
+ p->ceinfo.addr = (OCM_BASEVAL | readl(base + CE_FFA_OFST));
+ writel(ECC_CTRL_CLR_CE_ERR, base + OCM_ISR_OFST);
+ } else if (mask & OCM_UEINTR_MASK) {
+ p->ue_cnt++;
+ p->ueinfo.fault_lo = readl(base + UE_FFD0_OFST);
+ p->ueinfo.fault_hi = readl(base + UE_FFD1_OFST);
+ p->ueinfo.addr = (OCM_BASEVAL | readl(base + UE_FFA_OFST));
+ writel(ECC_CTRL_CLR_UE_ERR, base + OCM_ISR_OFST);
+ }
+}
+
+/**
+ * handle_error - Handle error types CE and UE
+ * @dci: Pointer to the EDAC device instance
+ * @p: Pointer to the OCM ECC status structure
+ *
+ * Handles correctable and uncorrectable errors.
+ */
+static void handle_error(struct edac_device_ctl_info *dci, struct ecc_status *p)
+{
+ struct edac_priv *priv = dci->pvt_info;
+ struct ecc_error_info *pinf;
+
+ if (p->ce_cnt) {
+ pinf = &p->ceinfo;
+ snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
+ "\nOCM ECC error type :%s\nAddr: [0x%x]\nFault Data[0x%08x%08x]",
+ "CE", pinf->addr, pinf->fault_hi, pinf->fault_lo);
+ edac_device_handle_ce(dci, 0, 0, priv->message);
+ }
+
+ if (p->ue_cnt) {
+ pinf = &p->ueinfo;
+ snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE,
+ "\nOCM ECC error type :%s\nAddr: [0x%x]\nFault Data[0x%08x%08x]",
+ "UE", pinf->addr, pinf->fault_hi, pinf->fault_lo);
+ edac_device_handle_ue(dci, 0, 0, priv->message);
+ }
+
+ memset(p, 0, sizeof(*p));
+}
+
+/**
+ * intr_handler - ISR routine
+ * @irq: irq number
+ * @dev_id: device id pointer
+ *
+ * Return: IRQ_NONE, if CE/UE interrupt not set or IRQ_HANDLED otherwise
+ */
+static irqreturn_t intr_handler(int irq, void *dev_id)
+{
+ struct edac_device_ctl_info *dci = dev_id;
+ struct edac_priv *priv = dci->pvt_info;
+ int regval;
+
+ regval = readl(priv->baseaddr + OCM_ISR_OFST);
+ if (!(regval & (OCM_CEINTR_MASK | OCM_UEINTR_MASK))) {
+ WARN_ONCE(1, "Unhandled IRQ%d, ISR: 0x%x", irq, regval);
+ return IRQ_NONE;
+ }
+
+ get_error_info(priv->baseaddr, &priv->stat, regval);
+
+ priv->ce_cnt += priv->stat.ce_cnt;
+ priv->ue_cnt += priv->stat.ue_cnt;
+ handle_error(dci, &priv->stat);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * get_eccstate - Return the ECC status
+ * @base: Pointer to the OCM base address
+ *
+ * Get the ECC enable/disable status
+ *
+ * Return: ECC status 0/1.
+ */
+static bool get_eccstate(void __iomem *base)
+{
+ return readl(base + ECC_CTRL_OFST) & OCM_ECC_ENABLE_MASK;
+}
+
+#ifdef CONFIG_EDAC_DEBUG
+/**
+ * write_fault_count - write fault injection count
+ * @priv: Pointer to the EDAC private struct
+ *
+ * Update the fault injection count register, once the counter reaches
+ * zero, it injects errors
+ */
+static void write_fault_count(struct edac_priv *priv)
+{
+ u32 ficount = priv->fault_injection_cnt;
+
+ if (ficount & ~OCM_FICOUNT_MASK) {
+ ficount &= OCM_FICOUNT_MASK;
+ edac_printk(KERN_INFO, EDAC_DEVICE,
+ "Fault injection count value truncated to %d\n", ficount);
+ }
+
+ writel(ficount, priv->baseaddr + OCM_FIC_OFST);
+}
+
+/*
+ * To get the Correctable Error injected, the following steps are needed:
+ * - Setup the optional Fault Injection Count:
+ * echo <fault_count val> > /sys/kernel/debug/edac/ocm/inject_fault_count
+ * - Write the Correctable Error bit position value:
+ * echo <bit_pos val> > /sys/kernel/debug/edac/ocm/inject_ce_bitpos
+ */
+static ssize_t inject_ce_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dev = file->private_data;
+ struct edac_priv *priv = edac_dev->pvt_info;
+ int ret;
+
+ if (!data)
+ return -EFAULT;
+
+ ret = kstrtou8_from_user(data, count, 0, &priv->ce_bitpos);
+ if (ret)
+ return ret;
+
+ if (priv->ce_bitpos > UE_MAX_BITPOS_UPPER)
+ return -EINVAL;
+
+ if (priv->ce_bitpos <= UE_MAX_BITPOS_LOWER) {
+ writel(BIT(priv->ce_bitpos), priv->baseaddr + OCM_FID0_OFST);
+ writel(0, priv->baseaddr + OCM_FID1_OFST);
+ } else {
+ writel(BIT(priv->ce_bitpos - UE_MIN_BITPOS_UPPER),
+ priv->baseaddr + OCM_FID1_OFST);
+ writel(0, priv->baseaddr + OCM_FID0_OFST);
+ }
+
+ write_fault_count(priv);
+
+ return count;
+}
+
+static const struct file_operations inject_ce_fops = {
+ .open = simple_open,
+ .write = inject_ce_write,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * To get the Uncorrectable Error injected, the following steps are needed:
+ * - Setup the optional Fault Injection Count:
+ * echo <fault_count val> > /sys/kernel/debug/edac/ocm/inject_fault_count
+ * - Write the Uncorrectable Error bit position values:
+ * echo <bit_pos0 val>,<bit_pos1 val> > /sys/kernel/debug/edac/ocm/inject_ue_bitpos
+ */
+static ssize_t inject_ue_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dev = file->private_data;
+ struct edac_priv *priv = edac_dev->pvt_info;
+ char buf[6], *pbuf, *token[2];
+ u64 ue_bitpos;
+ int i, ret;
+ u8 len;
+
+ if (!data)
+ return -EFAULT;
+
+ len = min_t(size_t, count, sizeof(buf));
+ if (copy_from_user(buf, data, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ pbuf = &buf[0];
+ for (i = 0; i < OCM_NUM_UE_BITPOS; i++)
+ token[i] = strsep(&pbuf, ",");
+
+ ret = kstrtou8(token[0], 0, &priv->ue_bitpos[0]);
+ if (ret)
+ return ret;
+
+ ret = kstrtou8(token[1], 0, &priv->ue_bitpos[1]);
+ if (ret)
+ return ret;
+
+ if (priv->ue_bitpos[0] > UE_MAX_BITPOS_UPPER ||
+ priv->ue_bitpos[1] > UE_MAX_BITPOS_UPPER)
+ return -EINVAL;
+
+ if (priv->ue_bitpos[0] == priv->ue_bitpos[1]) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Bit positions should not be equal\n");
+ return -EINVAL;
+ }
+
+ ue_bitpos = BIT(priv->ue_bitpos[0]) | BIT(priv->ue_bitpos[1]);
+
+ writel((u32)ue_bitpos, priv->baseaddr + OCM_FID0_OFST);
+ writel((u32)(ue_bitpos >> 32), priv->baseaddr + OCM_FID1_OFST);
+
+ write_fault_count(priv);
+
+ return count;
+}
+
+static const struct file_operations inject_ue_fops = {
+ .open = simple_open,
+ .write = inject_ue_write,
+ .llseek = generic_file_llseek,
+};
+
+static void setup_debugfs(struct edac_device_ctl_info *edac_dev)
+{
+ struct edac_priv *priv = edac_dev->pvt_info;
+
+ priv->debugfs_dir = edac_debugfs_create_dir("ocm");
+ if (!priv->debugfs_dir)
+ return;
+
+ edac_debugfs_create_x32("inject_fault_count", 0644, priv->debugfs_dir,
+ &priv->fault_injection_cnt);
+ edac_debugfs_create_file("inject_ue_bitpos", 0644, priv->debugfs_dir,
+ edac_dev, &inject_ue_fops);
+ edac_debugfs_create_file("inject_ce_bitpos", 0644, priv->debugfs_dir,
+ edac_dev, &inject_ce_fops);
+}
+#endif
+
+static int edac_probe(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci;
+ struct edac_priv *priv;
+ void __iomem *baseaddr;
+ struct resource *res;
+ int irq, ret;
+
+ baseaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(baseaddr))
+ return PTR_ERR(baseaddr);
+
+ if (!get_eccstate(baseaddr)) {
+ edac_printk(KERN_INFO, EDAC_DEVICE, "ECC not enabled\n");
+ return -ENXIO;
+ }
+
+ dci = edac_device_alloc_ctl_info(sizeof(*priv), ZYNQMP_OCM_EDAC_STRING,
+ 1, ZYNQMP_OCM_EDAC_STRING, 1, 0, NULL, 0,
+ edac_device_alloc_index());
+ if (!dci)
+ return -ENOMEM;
+
+ priv = dci->pvt_info;
+ platform_set_drvdata(pdev, dci);
+ dci->dev = &pdev->dev;
+ priv->baseaddr = baseaddr;
+ dci->mod_name = pdev->dev.driver->name;
+ dci->ctl_name = ZYNQMP_OCM_EDAC_STRING;
+ dci->dev_name = dev_name(&pdev->dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto free_dev_ctl;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, intr_handler, 0,
+ dev_name(&pdev->dev), dci);
+ if (ret) {
+ edac_printk(KERN_ERR, EDAC_DEVICE, "Failed to request Irq\n");
+ goto free_dev_ctl;
+ }
+
+ /* Enable UE, CE interrupts */
+ writel((OCM_CEINTR_MASK | OCM_UEINTR_MASK), priv->baseaddr + OCM_IEN_OFST);
+
+#ifdef CONFIG_EDAC_DEBUG
+ setup_debugfs(dci);
+#endif
+
+ ret = edac_device_add_device(dci);
+ if (ret)
+ goto free_dev_ctl;
+
+ return 0;
+
+free_dev_ctl:
+ edac_device_free_ctl_info(dci);
+
+ return ret;
+}
+
+static int edac_remove(struct platform_device *pdev)
+{
+ struct edac_device_ctl_info *dci = platform_get_drvdata(pdev);
+ struct edac_priv *priv = dci->pvt_info;
+
+ /* Disable UE, CE interrupts */
+ writel((OCM_CEINTR_MASK | OCM_UEINTR_MASK), priv->baseaddr + OCM_IDS_OFST);
+
+#ifdef CONFIG_EDAC_DEBUG
+ debugfs_remove_recursive(priv->debugfs_dir);
+#endif
+
+ edac_device_del_device(&pdev->dev);
+ edac_device_free_ctl_info(dci);
+
+ return 0;
+}
+
+static const struct of_device_id zynqmp_ocm_edac_match[] = {
+ { .compatible = "xlnx,zynqmp-ocmc-1.0"},
+ { /* end of table */ }
+};
+
+MODULE_DEVICE_TABLE(of, zynqmp_ocm_edac_match);
+
+static struct platform_driver zynqmp_ocm_edac_driver = {
+ .driver = {
+ .name = "zynqmp-ocm-edac",
+ .of_match_table = zynqmp_ocm_edac_match,
+ },
+ .probe = edac_probe,
+ .remove = edac_remove,
+};
+
+module_platform_driver(zynqmp_ocm_edac_driver);
+
+MODULE_AUTHOR("Advanced Micro Devices, Inc");
+MODULE_DESCRIPTION("Xilinx ZynqMP OCM ECC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index 65bffde137e3..713582cc27d1 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -127,9 +127,9 @@ static int eisa_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int eisa_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int eisa_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct eisa_device *edev = to_eisa_device(dev);
+ const struct eisa_device *edev = to_eisa_device(dev);
add_uevent_var(env, "MODALIAS=" EISA_DEVICE_MODALIAS_FMT, edev->id.sig);
return 0;
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 89a6449e3f4a..2c55f06ba699 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -537,6 +537,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
cht_wc_extcon_set_5v_boost(ext, false);
break;
case INTEL_CHT_WC_LENOVO_YOGABOOK1:
+ case INTEL_CHT_WC_LENOVO_YT3_X90:
/* Do this first, as it may very well return -EPROBE_DEFER. */
ret = cht_wc_extcon_get_role_sw_and_regulator(ext);
if (ret)
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 9c89f7d53e99..2c16ee8fd842 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -111,6 +111,7 @@ struct inbound_transaction_resource {
struct client_resource resource;
struct fw_card *card;
struct fw_request *request;
+ bool is_fcp;
void *data;
size_t length;
};
@@ -643,19 +644,14 @@ static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
client->device->max_speed);
}
-static inline bool is_fcp_request(struct fw_request *request)
-{
- return request == NULL;
-}
-
static void release_request(struct client *client,
struct client_resource *resource)
{
struct inbound_transaction_resource *r = container_of(resource,
struct inbound_transaction_resource, resource);
- if (is_fcp_request(r->request))
- kfree(r->data);
+ if (r->is_fcp)
+ fw_request_put(r->request);
else
fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
@@ -669,15 +665,20 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
void *payload, size_t length, void *callback_data)
{
struct address_handler_resource *handler = callback_data;
+ bool is_fcp = is_in_fcp_region(offset, length);
struct inbound_transaction_resource *r;
struct inbound_transaction_event *e;
size_t event_size0;
- void *fcp_frame = NULL;
int ret;
/* card may be different from handler->client->device->card */
fw_card_get(card);
+ // Extend the lifetime of data for request so that its payload is safely accessible in
+ // the process context for the client.
+ if (is_fcp)
+ fw_request_get(request);
+
r = kmalloc(sizeof(*r), GFP_ATOMIC);
e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (r == NULL || e == NULL)
@@ -685,21 +686,10 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
r->card = card;
r->request = request;
+ r->is_fcp = is_fcp;
r->data = payload;
r->length = length;
- if (is_fcp_request(request)) {
- /*
- * FIXME: Let core-transaction.c manage a
- * single reference-counted copy?
- */
- fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
- if (fcp_frame == NULL)
- goto failed;
-
- r->data = fcp_frame;
- }
-
r->resource.release = release_request;
ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
if (ret < 0)
@@ -741,10 +731,11 @@ static void handle_request(struct fw_card *card, struct fw_request *request,
failed:
kfree(r);
kfree(e);
- kfree(fcp_frame);
- if (!is_fcp_request(request))
+ if (!is_fcp)
fw_send_response(card, request, RCODE_CONFLICT_ERROR);
+ else
+ fw_request_put(request);
fw_card_put(card);
}
@@ -819,17 +810,19 @@ static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
r = container_of(resource, struct inbound_transaction_resource,
resource);
- if (is_fcp_request(r->request))
+ if (r->is_fcp) {
+ fw_request_put(r->request);
goto out;
+ }
if (a->length != fw_get_response_length(r->request)) {
ret = -EINVAL;
- kfree(r->request);
+ fw_request_put(r->request);
goto out;
}
if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
ret = -EFAULT;
- kfree(r->request);
+ fw_request_put(r->request);
goto out;
}
fw_send_response(r->card, r->request, a->rcode);
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index adddd8c45d0c..aa597cda0d88 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -133,7 +133,7 @@ static void get_ids(const u32 *directory, int *id)
}
}
-static void get_modalias_ids(struct fw_unit *unit, int *id)
+static void get_modalias_ids(const struct fw_unit *unit, int *id)
{
get_ids(&fw_parent_device(unit)->config_rom[5], id);
get_ids(unit->directory, id);
@@ -195,7 +195,7 @@ static void fw_unit_remove(struct device *dev)
driver->remove(fw_unit(dev));
}
-static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
+static int get_modalias(const struct fw_unit *unit, char *buffer, size_t buffer_size)
{
int id[] = {0, 0, 0, 0};
@@ -206,9 +206,9 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
id[0], id[1], id[2], id[3]);
}
-static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int fw_unit_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct fw_unit *unit = fw_unit(dev);
+ const struct fw_unit *unit = fw_unit(dev);
char modalias[64];
get_modalias(unit, modalias, sizeof(modalias));
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index af498d767702..a9f70c96323e 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -535,12 +535,6 @@ const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
#endif /* 0 */
-static bool is_in_fcp_region(u64 offset, size_t length)
-{
- return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
- offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END);
-}
-
/**
* fw_core_add_address_handler() - register for incoming requests
* @handler: callback
@@ -617,6 +611,7 @@ void fw_core_remove_address_handler(struct fw_address_handler *handler)
EXPORT_SYMBOL(fw_core_remove_address_handler);
struct fw_request {
+ struct kref kref;
struct fw_packet response;
u32 request_header[4];
int ack;
@@ -625,13 +620,33 @@ struct fw_request {
u32 data[];
};
+void fw_request_get(struct fw_request *request)
+{
+ kref_get(&request->kref);
+}
+
+static void release_request(struct kref *kref)
+{
+ struct fw_request *request = container_of(kref, struct fw_request, kref);
+
+ kfree(request);
+}
+
+void fw_request_put(struct fw_request *request)
+{
+ kref_put(&request->kref, release_request);
+}
+
static void free_response_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
- struct fw_request *request;
+ struct fw_request *request = container_of(packet, struct fw_request, response);
- request = container_of(packet, struct fw_request, response);
- kfree(request);
+ // Decrease the reference count since not at in-flight.
+ fw_request_put(request);
+
+ // Decrease the reference count to release the object.
+ fw_request_put(request);
}
int fw_get_response_length(struct fw_request *r)
@@ -782,6 +797,7 @@ static struct fw_request *allocate_request(struct fw_card *card,
request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
if (request == NULL)
return NULL;
+ kref_init(&request->kref);
request->response.speed = p->speed;
request->response.timestamp =
@@ -800,16 +816,22 @@ static struct fw_request *allocate_request(struct fw_card *card,
return request;
}
+/**
+ * fw_send_response: - send response packet for asynchronous transaction.
+ * @card: interface to send the response at.
+ * @request: firewire request data for the transaction.
+ * @rcode: response code to send.
+ *
+ * Submit a response packet into the asynchronous response transmission queue. The @request
+ * is going to be released when the transmission successfully finishes later.
+ */
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode)
{
- if (WARN_ONCE(!request, "invalid for FCP address handlers"))
- return;
-
/* unified transaction or broadcast transaction: don't respond */
if (request->ack != ACK_PENDING ||
HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
- kfree(request);
+ fw_request_put(request);
return;
}
@@ -821,6 +843,9 @@ void fw_send_response(struct fw_card *card,
fw_fill_response(&request->response, request->request_header,
rcode, NULL, 0);
+ // Increase the reference count so that the object is kept during in-flight.
+ fw_request_get(request);
+
card->driver->send_response(card, &request->response);
}
EXPORT_SYMBOL(fw_send_response);
@@ -910,7 +935,7 @@ static void handle_fcp_region_request(struct fw_card *card,
rcu_read_lock();
list_for_each_entry_rcu(handler, &address_handler_list, link) {
if (is_enclosing_handler(handler, offset, request->length))
- handler->address_callback(card, NULL, tcode,
+ handler->address_callback(card, request, tcode,
destination, source,
p->generation, offset,
request->data,
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index 71d5f16f311c..eafa4eaae737 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -244,6 +244,9 @@ int fw_get_response_length(struct fw_request *request);
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length);
+void fw_request_get(struct fw_request *request);
+void fw_request_put(struct fw_request *request);
+
#define FW_PHY_CONFIG_NO_NODE_ID -1
#define FW_PHY_CONFIG_CURRENT_GAP_COUNT -1
void fw_send_phy_config(struct fw_card *card,
@@ -254,4 +257,10 @@ static inline bool is_ping_packet(u32 *data)
return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1];
}
+static inline bool is_in_fcp_region(u64 offset, size_t length)
+{
+ return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
+ offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END);
+}
+
#endif /* _FIREWIRE_CORE_H */
diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c
index 99d439480612..f29d77ecf72d 100644
--- a/drivers/firmware/arm_ffa/bus.c
+++ b/drivers/firmware/arm_ffa/bus.c
@@ -56,9 +56,9 @@ static void ffa_device_remove(struct device *dev)
ffa_drv->remove(to_ffa_dev(dev));
}
-static int ffa_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct ffa_device *ffa_dev = to_ffa_dev(dev);
+ const struct ffa_device *ffa_dev = to_ffa_dev(dev);
return add_uevent_var(env, "MODALIAS=arm_ffa:%04x:%pUb",
ffa_dev->vm_id, &ffa_dev->uuid);
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 35bb70724d44..cc2eba067575 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/of.h>
#include "common.h"
@@ -191,7 +192,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol,
scmi_dev->id = id;
scmi_dev->protocol_id = protocol;
scmi_dev->dev.parent = parent;
- scmi_dev->dev.of_node = np;
+ device_set_node(&scmi_dev->dev, of_fwnode_handle(np));
scmi_dev->dev.bus = &scmi_bus_type;
scmi_dev->dev.release = scmi_device_release;
dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 66727ad3361b..ed5aff0a4204 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -418,10 +418,10 @@ static ssize_t dmi_sel_raw_read_helper(struct dmi_sysfs_entry *entry,
return dmi_sel_raw_read_phys32(entry, &sel, state->buf,
state->pos, state->count);
case DMI_SEL_ACCESS_METHOD_GPNV:
- pr_info("dmi-sysfs: GPNV support missing.\n");
+ pr_info_ratelimited("dmi-sysfs: GPNV support missing.\n");
return -EIO;
default:
- pr_info("dmi-sysfs: Unknown access method %02x\n",
+ pr_info_ratelimited("dmi-sysfs: Unknown access method %02x\n",
sel.access_method);
return -EIO;
}
@@ -603,16 +603,16 @@ static void __init dmi_sysfs_register_handle(const struct dmi_header *dh,
*ret = kobject_init_and_add(&entry->kobj, &dmi_sysfs_entry_ktype, NULL,
"%d-%d", dh->type, entry->instance);
- if (*ret) {
- kobject_put(&entry->kobj);
- return;
- }
-
/* Thread on the global list for cleanup */
spin_lock(&entry_list_lock);
list_add_tail(&entry->list, &entry_list);
spin_unlock(&entry_list_lock);
+ if (*ret) {
+ kobject_put(&entry->kobj);
+ return;
+ }
+
/* Handle specializations by type */
switch (dh->type) {
case DMI_ENTRY_SYSTEM_EVENT_LOG:
diff --git a/drivers/firmware/efi/cper_cxl.c b/drivers/firmware/efi/cper_cxl.c
index 53e435c4f310..a55771b99a97 100644
--- a/drivers/firmware/efi/cper_cxl.c
+++ b/drivers/firmware/efi/cper_cxl.c
@@ -9,7 +9,6 @@
#include <linux/cper.h>
#include "cper_cxl.h"
-#include <linux/cxl_err.h>
#define PROT_ERR_VALID_AGENT_TYPE BIT_ULL(0)
#define PROT_ERR_VALID_AGENT_ADDRESS BIT_ULL(1)
@@ -19,6 +18,17 @@
#define PROT_ERR_VALID_DVSEC BIT_ULL(5)
#define PROT_ERR_VALID_ERROR_LOG BIT_ULL(6)
+/* CXL RAS Capability Structure, CXL v3.0 sec 8.2.4.16 */
+struct cxl_ras_capability_regs {
+ u32 uncor_status;
+ u32 uncor_mask;
+ u32 uncor_severity;
+ u32 cor_status;
+ u32 cor_mask;
+ u32 cap_control;
+ u32 header_log[16];
+};
+
static const char * const prot_err_agent_type_strs[] = {
"Restricted CXL Device",
"Restricted CXL Host Downstream Port",
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index 4d6c5327471a..f54e6fdf08e2 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -10,11 +10,14 @@
#include <linux/kernel.h>
#include <linux/serial_core.h>
#include <linux/screen_info.h>
+#include <linux/string.h>
#include <asm/early_ioremap.h>
static const struct console *earlycon_console __initdata;
static const struct font_desc *font;
+static u16 cur_line_y, max_line_y;
+static u32 efi_x_array[1024];
static u32 efi_x, efi_y;
static u64 fb_base;
static bool fb_wb;
@@ -85,9 +88,17 @@ static void efi_earlycon_clear_scanline(unsigned int y)
static void efi_earlycon_scroll_up(void)
{
unsigned long *dst, *src;
+ u16 maxlen = 0;
u16 len;
u32 i, height;
+ /* Find the cached maximum x coordinate */
+ for (i = 0; i < max_line_y; i++) {
+ if (efi_x_array[i] > maxlen)
+ maxlen = efi_x_array[i];
+ }
+ maxlen *= 4;
+
len = screen_info.lfb_linelength;
height = screen_info.lfb_height;
@@ -102,7 +113,7 @@ static void efi_earlycon_scroll_up(void)
return;
}
- memmove(dst, src, len);
+ memmove(dst, src, maxlen);
efi_earlycon_unmap(src, len);
efi_earlycon_unmap(dst, len);
@@ -135,6 +146,7 @@ static void
efi_earlycon_write(struct console *con, const char *str, unsigned int num)
{
struct screen_info *si;
+ u32 cur_efi_x = efi_x;
unsigned int len;
const char *s;
void *dst;
@@ -143,16 +155,10 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
len = si->lfb_linelength;
while (num) {
- unsigned int linemax;
- unsigned int h, count = 0;
-
- for (s = str; *s && *s != '\n'; s++) {
- if (count == num)
- break;
- count++;
- }
+ unsigned int linemax = (si->lfb_width - efi_x) / font->width;
+ unsigned int h, count;
- linemax = (si->lfb_width - efi_x) / font->width;
+ count = strnchrnul(str, num, '\n') - str;
if (count > linemax)
count = linemax;
@@ -181,6 +187,7 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
str += count;
if (num > 0 && *s == '\n') {
+ cur_efi_x = efi_x;
efi_x = 0;
efi_y += font->height;
str++;
@@ -188,6 +195,7 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
}
if (efi_x + font->width > si->lfb_width) {
+ cur_efi_x = efi_x;
efi_x = 0;
efi_y += font->height;
}
@@ -195,6 +203,9 @@ efi_earlycon_write(struct console *con, const char *str, unsigned int num)
if (efi_y + font->height > si->lfb_height) {
u32 i;
+ efi_x_array[cur_line_y] = cur_efi_x;
+ cur_line_y = (cur_line_y + 1) % max_line_y;
+
efi_y -= font->height;
efi_earlycon_scroll_up();
@@ -235,7 +246,15 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
if (!font)
return -ENODEV;
- efi_y = rounddown(yres, font->height) - font->height;
+ /* Fill the cache with maximum possible value of x coordinate */
+ memset32(efi_x_array, rounddown(xres, font->width), ARRAY_SIZE(efi_x_array));
+ efi_y = rounddown(yres, font->height);
+
+ /* Make sure we have cache for the x coordinate for the full screen */
+ max_line_y = efi_y / font->height + 1;
+ cur_line_y = 0;
+
+ efi_y -= font->height;
for (i = 0; i < (yres - efi_y) / font->height; i++)
efi_earlycon_scroll_up();
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index 1639159493e3..2c16080e1f71 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -92,7 +92,7 @@ static int __init uefi_init(u64 efi_system_table)
if (IS_ENABLED(CONFIG_64BIT))
set_bit(EFI_64BIT, &efi.flags);
- retval = efi_systab_check_header(&systab->hdr, 2);
+ retval = efi_systab_check_header(&systab->hdr);
if (retval)
goto out;
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index a2b0cbc8741c..abeff7dc0b58 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -187,8 +187,27 @@ static const struct attribute_group efi_subsys_attr_group = {
static struct efivars generic_efivars;
static struct efivar_operations generic_ops;
+static bool generic_ops_supported(void)
+{
+ unsigned long name_size;
+ efi_status_t status;
+ efi_char16_t name;
+ efi_guid_t guid;
+
+ name_size = sizeof(name);
+
+ status = efi.get_next_variable(&name_size, &name, &guid);
+ if (status == EFI_UNSUPPORTED)
+ return false;
+
+ return true;
+}
+
static int generic_ops_register(void)
{
+ if (!generic_ops_supported())
+ return 0;
+
generic_ops.get_variable = efi.get_variable;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
@@ -197,11 +216,14 @@ static int generic_ops_register(void)
generic_ops.set_variable = efi.set_variable;
generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
}
- return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
+ return efivars_register(&generic_efivars, &generic_ops);
}
static void generic_ops_unregister(void)
{
+ if (!generic_ops.get_variable)
+ return;
+
efivars_unregister(&generic_efivars);
}
@@ -481,7 +503,7 @@ void __init efi_find_mirror(void)
* and if so, populate the supplied memory descriptor with the appropriate
* data.
*/
-int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
{
efi_memory_desc_t *md;
@@ -499,6 +521,12 @@ int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
u64 size;
u64 end;
+ /* skip bogus entries (including empty ones) */
+ if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
+ (md->num_pages <= 0) ||
+ (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
+ continue;
+
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
if (phys_addr >= md->phys_addr && phys_addr < end) {
@@ -509,6 +537,9 @@ int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
return -ENOENT;
}
+extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+ __weak __alias(__efi_mem_desc_lookup);
+
/*
* Calculate the highest address of an efi memory descriptor.
*/
@@ -535,6 +566,10 @@ void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
*/
void __init efi_mem_reserve(phys_addr_t addr, u64 size)
{
+ /* efi_mem_reserve() does not work under Xen */
+ if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
+ return;
+
if (!memblock_is_region_reserved(addr, size))
memblock_reserve(addr, size);
@@ -583,13 +618,20 @@ static __init int match_config_table(const efi_guid_t *guid,
int i;
for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
- if (!efi_guidcmp(*guid, table_types[i].guid)) {
- *(table_types[i].ptr) = table;
+ if (efi_guidcmp(*guid, table_types[i].guid))
+ continue;
+
+ if (!efi_config_table_is_usable(guid, table)) {
if (table_types[i].name[0])
- pr_cont("%s=0x%lx ",
+ pr_cont("(%s=0x%lx unusable) ",
table_types[i].name, table);
return 1;
}
+
+ *(table_types[i].ptr) = table;
+ if (table_types[i].name[0])
+ pr_cont("%s=0x%lx ", table_types[i].name, table);
+ return 1;
}
return 0;
@@ -720,20 +762,13 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
return 0;
}
-int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
- int min_major_version)
+int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
{
if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
pr_err("System table signature incorrect!\n");
return -EINVAL;
}
- if ((systab_hdr->revision >> 16) < min_major_version)
- pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
- systab_hdr->revision >> 16,
- systab_hdr->revision & 0xffff,
- min_major_version);
-
return 0;
}
@@ -764,6 +799,7 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
char vendor[100] = "unknown";
const efi_char16_t *c16;
size_t i;
+ u16 rev;
c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
if (c16) {
@@ -774,10 +810,14 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
}
- pr_info("EFI v%u.%.02u by %s\n",
- systab_hdr->revision >> 16,
- systab_hdr->revision & 0xffff,
- vendor);
+ rev = (u16)systab_hdr->revision;
+ pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
+
+ rev %= 10;
+ if (rev)
+ pr_cont(".%u", rev);
+
+ pr_cont(" by %s\n", vendor);
if (IS_ENABLED(CONFIG_X86_64) &&
systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
@@ -1007,6 +1047,8 @@ int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
/* first try to find a slot in an existing linked list entry */
for (prsv = efi_memreserve_root->next; prsv; ) {
rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
+ if (!rsv)
+ return -ENOMEM;
index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
if (index < rsv->size) {
rsv->entry[index].base = addr;
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index 2a2f52b017e7..87729c365be1 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -247,7 +247,7 @@ void __init efi_esrt_init(void)
int rc;
phys_addr_t end;
- if (!efi_enabled(EFI_MEMMAP))
+ if (!efi_enabled(EFI_MEMMAP) && !efi_enabled(EFI_PARAVIRT))
return;
pr_debug("esrt-init: loading.\n");
@@ -258,20 +258,15 @@ void __init efi_esrt_init(void)
if (rc < 0 ||
(!(md.attribute & EFI_MEMORY_RUNTIME) &&
md.type != EFI_BOOT_SERVICES_DATA &&
- md.type != EFI_RUNTIME_SERVICES_DATA)) {
+ md.type != EFI_RUNTIME_SERVICES_DATA &&
+ md.type != EFI_ACPI_RECLAIM_MEMORY &&
+ md.type != EFI_ACPI_MEMORY_NVS)) {
pr_warn("ESRT header is not in the memory map.\n");
return;
}
- max = efi_mem_desc_end(&md);
- if (max < efi.esrt) {
- pr_err("EFI memory descriptor is invalid. (esrt: %p max: %p)\n",
- (void *)efi.esrt, (void *)max);
- return;
- }
-
+ max = efi_mem_desc_end(&md) - efi.esrt;
size = sizeof(*esrt);
- max -= efi.esrt;
if (max < size) {
pr_err("ESRT header doesn't fit on single memory map entry. (size: %zu max: %zu)\n",
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index be8b8c6e8b40..80d85a5169fb 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -87,7 +87,7 @@ lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o \
screen_info.o efi-stub-entry.o
lib-$(CONFIG_ARM) += arm32-stub.o
-lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o arm64-entry.o smbios.o
+lib-$(CONFIG_ARM64) += arm64.o arm64-stub.o smbios.o
lib-$(CONFIG_X86) += x86-stub.o
lib-$(CONFIG_RISCV) += riscv.o riscv-stub.o
lib-$(CONFIG_LOONGARCH) += loongarch.o loongarch-stub.o
@@ -141,7 +141,7 @@ STUBCOPY_RELOC-$(CONFIG_ARM) := R_ARM_ABS
#
STUBCOPY_FLAGS-$(CONFIG_ARM64) += --prefix-alloc-sections=.init \
--prefix-symbols=__efistub_
-STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS64
+STUBCOPY_RELOC-$(CONFIG_ARM64) := R_AARCH64_ABS
# For RISC-V, we don't need anything special other than arm64. Keep all the
# symbols in .init section and make sure that no absolute symbols references
diff --git a/drivers/firmware/efi/libstub/arm64-entry.S b/drivers/firmware/efi/libstub/arm64-entry.S
deleted file mode 100644
index b5c17e89a4fc..000000000000
--- a/drivers/firmware/efi/libstub/arm64-entry.S
+++ /dev/null
@@ -1,67 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * EFI entry point.
- *
- * Copyright (C) 2013, 2014 Red Hat, Inc.
- * Author: Mark Salter <msalter@redhat.com>
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
- /*
- * The entrypoint of a arm64 bare metal image is at offset #0 of the
- * image, so this is a reasonable default for primary_entry_offset.
- * Only when the EFI stub is integrated into the core kernel, it is not
- * guaranteed that the PE/COFF header has been copied to memory too, so
- * in this case, primary_entry_offset should be overridden by the
- * linker and point to primary_entry() directly.
- */
- .weak primary_entry_offset
-
-SYM_CODE_START(efi_enter_kernel)
- /*
- * efi_pe_entry() will have copied the kernel image if necessary and we
- * end up here with device tree address in x1 and the kernel entry
- * point stored in x0. Save those values in registers which are
- * callee preserved.
- */
- ldr w2, =primary_entry_offset
- add x19, x0, x2 // relocated Image entrypoint
-
- mov x0, x1 // DTB address
- mov x1, xzr
- mov x2, xzr
- mov x3, xzr
-
- /*
- * Clean the remainder of this routine to the PoC
- * so that we can safely disable the MMU and caches.
- */
- adr x4, 1f
- dc civac, x4
- dsb sy
-
- /* Turn off Dcache and MMU */
- mrs x4, CurrentEL
- cmp x4, #CurrentEL_EL2
- mrs x4, sctlr_el1
- b.ne 0f
- mrs x4, sctlr_el2
-0: bic x4, x4, #SCTLR_ELx_M
- bic x4, x4, #SCTLR_ELx_C
- b.eq 1f
- b 2f
-
- .balign 32
-1: pre_disable_mmu_workaround
- msr sctlr_el2, x4
- isb
- br x19 // jump to kernel entrypoint
-
-2: pre_disable_mmu_workaround
- msr sctlr_el1, x4
- isb
- br x19 // jump to kernel entrypoint
-
- .org 1b + 32
-SYM_CODE_END(efi_enter_kernel)
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 7327b98d8e3f..d4a6b12a8741 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -58,7 +58,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
efi_handle_t image_handle)
{
efi_status_t status;
- unsigned long kernel_size, kernel_memsize = 0;
+ unsigned long kernel_size, kernel_codesize, kernel_memsize;
u32 phys_seed = 0;
u64 min_kimg_align = efi_get_kimg_min_align();
@@ -93,6 +93,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
SEGMENT_ALIGN >> 10);
kernel_size = _edata - _text;
+ kernel_codesize = __inittext_end - _text;
kernel_memsize = kernel_size + (_end - _edata);
*reserve_size = kernel_memsize;
@@ -121,7 +122,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
*/
*image_addr = (u64)_text;
*reserve_size = 0;
- goto clean_image_to_poc;
+ return EFI_SUCCESS;
}
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
@@ -137,14 +138,21 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
*image_addr = *reserve_addr;
memcpy((void *)*image_addr, _text, kernel_size);
+ caches_clean_inval_pou(*image_addr, *image_addr + kernel_codesize);
-clean_image_to_poc:
+ return EFI_SUCCESS;
+}
+
+asmlinkage void primary_entry(void);
+
+unsigned long primary_entry_offset(void)
+{
/*
- * Clean the copied Image to the PoC, and ensure it is not shadowed by
- * stale icache entries from before relocation.
+ * When built as part of the kernel, the EFI stub cannot branch to the
+ * kernel proper via the image header, as the PE/COFF header is
+ * strictly not part of the in-memory presentation of the image, only
+ * of the file representation. So instead, we need to jump to the
+ * actual entrypoint in the .text region of the image.
*/
- dcache_clean_poc(*image_addr, *image_addr + kernel_size);
- asm("ic ialluis");
-
- return EFI_SUCCESS;
+ return (char *)primary_entry - _text;
}
diff --git a/drivers/firmware/efi/libstub/arm64.c b/drivers/firmware/efi/libstub/arm64.c
index ff2d18c42ee7..399770266372 100644
--- a/drivers/firmware/efi/libstub/arm64.c
+++ b/drivers/firmware/efi/libstub/arm64.c
@@ -19,10 +19,13 @@ static bool system_needs_vamap(void)
const u8 *type1_family = efi_get_smbios_string(1, family);
/*
- * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap()
- * has not been called prior.
+ * Ampere eMAG, Altra, and Altra Max machines crash in SetTime() if
+ * SetVirtualAddressMap() has not been called prior.
*/
- if (!type1_family || strcmp(type1_family, "Altra"))
+ if (!type1_family || (
+ strcmp(type1_family, "eMAG") &&
+ strcmp(type1_family, "Altra") &&
+ strcmp(type1_family, "Altra Max")))
return false;
efi_warn("Working around broken SetVirtualAddressMap()\n");
@@ -56,6 +59,12 @@ efi_status_t check_platform_features(void)
return EFI_SUCCESS;
}
+#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
+#define DCTYPE "civac"
+#else
+#define DCTYPE "cvau"
+#endif
+
void efi_cache_sync_image(unsigned long image_base,
unsigned long alloc_size,
unsigned long code_size)
@@ -64,13 +73,38 @@ void efi_cache_sync_image(unsigned long image_base,
u64 lsize = 4 << cpuid_feature_extract_unsigned_field(ctr,
CTR_EL0_DminLine_SHIFT);
- do {
- asm("dc civac, %0" :: "r"(image_base));
- image_base += lsize;
- alloc_size -= lsize;
- } while (alloc_size >= lsize);
+ /* only perform the cache maintenance if needed for I/D coherency */
+ if (!(ctr & BIT(CTR_EL0_IDC_SHIFT))) {
+ do {
+ asm("dc " DCTYPE ", %0" :: "r"(image_base));
+ image_base += lsize;
+ code_size -= lsize;
+ } while (code_size >= lsize);
+ }
asm("ic ialluis");
dsb(ish);
isb();
}
+
+unsigned long __weak primary_entry_offset(void)
+{
+ /*
+ * By default, we can invoke the kernel via the branch instruction in
+ * the image header, so offset #0. This will be overridden by the EFI
+ * stub build that is linked into the core kernel, as in that case, the
+ * image header may not have been loaded into memory, or may be mapped
+ * with non-executable permissions.
+ */
+ return 0;
+}
+
+void __noreturn efi_enter_kernel(unsigned long entrypoint,
+ unsigned long fdt_addr,
+ unsigned long fdt_size)
+{
+ void (* __noreturn enter_kernel)(u64, u64, u64, u64);
+
+ enter_kernel = (void *)entrypoint + primary_entry_offset();
+ enter_kernel(fdt_addr, 0, 0, 0);
+}
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index f5a4bdacac64..1e0203d74691 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -651,3 +651,70 @@ efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key)
return status;
}
+
+/**
+ * efi_remap_image - Remap a loaded image with the appropriate permissions
+ * for code and data
+ *
+ * @image_base: the base of the image in memory
+ * @alloc_size: the size of the area in memory occupied by the image
+ * @code_size: the size of the leading part of the image containing code
+ * and read-only data
+ *
+ * efi_remap_image() uses the EFI memory attribute protocol to remap the code
+ * region of the loaded image read-only/executable, and the remainder
+ * read-write/non-executable. The code region is assumed to start at the base
+ * of the image, and will therefore cover the PE/COFF header as well.
+ */
+void efi_remap_image(unsigned long image_base, unsigned alloc_size,
+ unsigned long code_size)
+{
+ efi_guid_t guid = EFI_MEMORY_ATTRIBUTE_PROTOCOL_GUID;
+ efi_memory_attribute_protocol_t *memattr;
+ efi_status_t status;
+ u64 attr;
+
+ /*
+ * If the firmware implements the EFI_MEMORY_ATTRIBUTE_PROTOCOL, let's
+ * invoke it to remap the text/rodata region of the decompressed image
+ * as read-only and the data/bss region as non-executable.
+ */
+ status = efi_bs_call(locate_protocol, &guid, NULL, (void **)&memattr);
+ if (status != EFI_SUCCESS)
+ return;
+
+ // Get the current attributes for the entire region
+ status = memattr->get_memory_attributes(memattr, image_base,
+ alloc_size, &attr);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to retrieve memory attributes for image region: 0x%lx\n",
+ status);
+ return;
+ }
+
+ // Mark the code region as read-only
+ status = memattr->set_memory_attributes(memattr, image_base, code_size,
+ EFI_MEMORY_RO);
+ if (status != EFI_SUCCESS) {
+ efi_warn("Failed to remap code region read-only\n");
+ return;
+ }
+
+ // If the entire region was already mapped as non-exec, clear the
+ // attribute from the code region. Otherwise, set it on the data
+ // region.
+ if (attr & EFI_MEMORY_XP) {
+ status = memattr->clear_memory_attributes(memattr, image_base,
+ code_size,
+ EFI_MEMORY_XP);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to remap code region executable\n");
+ } else {
+ status = memattr->set_memory_attributes(memattr,
+ image_base + code_size,
+ alloc_size - code_size,
+ EFI_MEMORY_XP);
+ if (status != EFI_SUCCESS)
+ efi_warn("Failed to remap data region non-executable\n");
+ }
+}
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 5b8f2c411ed8..6bd3bb86d967 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -442,6 +442,26 @@ union efi_dxe_services_table {
} mixed_mode;
};
+typedef union efi_memory_attribute_protocol efi_memory_attribute_protocol_t;
+
+union efi_memory_attribute_protocol {
+ struct {
+ efi_status_t (__efiapi *get_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64 *);
+
+ efi_status_t (__efiapi *set_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
+
+ efi_status_t (__efiapi *clear_memory_attributes)(
+ efi_memory_attribute_protocol_t *, efi_physical_addr_t, u64, u64);
+ };
+ struct {
+ u32 get_memory_attributes;
+ u32 set_memory_attributes;
+ u32 clear_memory_attributes;
+ } mixed_mode;
+};
+
typedef union efi_uga_draw_protocol efi_uga_draw_protocol_t;
union efi_uga_draw_protocol {
@@ -1076,4 +1096,7 @@ struct efi_smbios_type1_record {
const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize);
+void efi_remap_image(unsigned long image_base, unsigned alloc_size,
+ unsigned long code_size);
+
#endif
diff --git a/drivers/firmware/efi/libstub/zboot.c b/drivers/firmware/efi/libstub/zboot.c
index 66be5fdc6b58..ba234e062a1a 100644
--- a/drivers/firmware/efi/libstub/zboot.c
+++ b/drivers/firmware/efi/libstub/zboot.c
@@ -137,6 +137,8 @@ efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab)
efi_cache_sync_image(image_base, alloc_size, code_size);
+ efi_remap_image(image_base, alloc_size, code_size);
+
status = efi_stub_common(handle, image, image_base, cmdline_ptr);
free_image:
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
index 0a9aba5f9cef..ab85bf8e165a 100644
--- a/drivers/firmware/efi/memattr.c
+++ b/drivers/firmware/efi/memattr.c
@@ -33,7 +33,7 @@ int __init efi_memattr_init(void)
return -ENOMEM;
}
- if (tbl->version > 1) {
+ if (tbl->version > 2) {
pr_warn("Unexpected EFI Memory Attributes table version %d\n",
tbl->version);
goto unmap;
@@ -129,6 +129,7 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
efi_memattr_perm_setter fn)
{
efi_memory_attributes_table_t *tbl;
+ bool has_bti = false;
int i, ret;
if (tbl_size <= sizeof(*tbl))
@@ -150,6 +151,10 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
return -ENOMEM;
}
+ if (tbl->version > 1 &&
+ (tbl->flags & EFI_MEMORY_ATTRIBUTES_FLAGS_RT_FORWARD_CONTROL_FLOW_GUARD))
+ has_bti = true;
+
if (efi_enabled(EFI_DBG))
pr_info("Processing EFI Memory Attributes table:\n");
@@ -169,7 +174,7 @@ int __init efi_memattr_apply_permissions(struct mm_struct *mm,
efi_md_typeattr_format(buf, sizeof(buf), &md));
if (valid) {
- ret = fn(mm, &md);
+ ret = fn(mm, &md, has_bti);
if (ret)
pr_err("Error updating mappings, skipping subsequent md's\n");
}
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index 7882d4b3f2be..f06fdacc9bc8 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -264,6 +264,14 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
"Lenovo ideapad D330-10IGM"),
},
},
+ {
+ /* Lenovo IdeaPad Duet 3 10IGL5 with 1200x1920 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "IdeaPad Duet 3 10IGL5"),
+ },
+ },
{},
};
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 0ba9f18312f5..bd75b87f5fc1 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -6,6 +6,8 @@
* Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
*/
+#define pr_fmt(fmt) "efivars: " fmt
+
#include <linux/types.h>
#include <linux/sizes.h>
#include <linux/errno.h>
@@ -40,45 +42,47 @@ static efi_status_t check_var_size(bool nonblocking, u32 attributes,
}
/**
- * efivars_kobject - get the kobject for the registered efivars
+ * efivar_is_available - check if efivars is available
*
- * If efivars_register() has not been called we return NULL,
- * otherwise return the kobject used at registration time.
+ * @return true iff evivars is currently registered
*/
-struct kobject *efivars_kobject(void)
+bool efivar_is_available(void)
{
- if (!__efivars)
- return NULL;
-
- return __efivars->kobject;
+ return __efivars != NULL;
}
-EXPORT_SYMBOL_GPL(efivars_kobject);
+EXPORT_SYMBOL_GPL(efivar_is_available);
/**
* efivars_register - register an efivars
* @efivars: efivars to register
* @ops: efivars operations
- * @kobject: @efivars-specific kobject
*
* Only a single efivars can be registered at any time.
*/
int efivars_register(struct efivars *efivars,
- const struct efivar_operations *ops,
- struct kobject *kobject)
+ const struct efivar_operations *ops)
{
+ int rv;
+
if (down_interruptible(&efivars_lock))
return -EINTR;
+ if (__efivars) {
+ pr_warn("efivars already registered\n");
+ rv = -EBUSY;
+ goto out;
+ }
+
efivars->ops = ops;
- efivars->kobject = kobject;
__efivars = efivars;
pr_info("Registered efivars operations\n");
-
+ rv = 0;
+out:
up(&efivars_lock);
- return 0;
+ return rv;
}
EXPORT_SYMBOL_GPL(efivars_register);
@@ -97,7 +101,7 @@ int efivars_unregister(struct efivars *efivars)
return -EINTR;
if (!__efivars) {
- printk(KERN_ERR "efivars not registered\n");
+ pr_err("efivars not registered\n");
rv = -EINVAL;
goto out;
}
@@ -117,7 +121,7 @@ out:
}
EXPORT_SYMBOL_GPL(efivars_unregister);
-int efivar_supports_writes(void)
+bool efivar_supports_writes(void)
{
return __efivars && __efivars->ops->set_variable;
}
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 9f190eab43ed..1bc7cbf2f65d 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -44,14 +44,6 @@ config GOOGLE_COREBOOT_TABLE
device tree node /firmware/coreboot.
If unsure say N.
-config GOOGLE_COREBOOT_TABLE_ACPI
- tristate
- select GOOGLE_COREBOOT_TABLE
-
-config GOOGLE_COREBOOT_TABLE_OF
- tristate
- select GOOGLE_COREBOOT_TABLE
-
config GOOGLE_MEMCONSOLE
tristate
depends on GOOGLE_MEMCONSOLE_X86_LEGACY || GOOGLE_MEMCONSOLE_COREBOOT
diff --git a/drivers/firmware/google/framebuffer-coreboot.c b/drivers/firmware/google/framebuffer-coreboot.c
index c6dcc1ef93ac..c323a818805c 100644
--- a/drivers/firmware/google/framebuffer-coreboot.c
+++ b/drivers/firmware/google/framebuffer-coreboot.c
@@ -43,9 +43,7 @@ static int framebuffer_probe(struct coreboot_device *dev)
fb->green_mask_pos == formats[i].green.offset &&
fb->green_mask_size == formats[i].green.length &&
fb->blue_mask_pos == formats[i].blue.offset &&
- fb->blue_mask_size == formats[i].blue.length &&
- fb->reserved_mask_pos == formats[i].transp.offset &&
- fb->reserved_mask_size == formats[i].transp.length)
+ fb->blue_mask_size == formats[i].blue.length)
pdata.format = formats[i].name;
}
if (!pdata.format)
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index 871bedf533a8..96ea1fa76d35 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -1030,7 +1030,7 @@ static __init int gsmi_init(void)
}
#ifdef CONFIG_EFI
- ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj);
+ ret = efivars_register(&efivars, &efivar_ops);
if (ret) {
printk(KERN_INFO "gsmi: Failed to register efivars\n");
sysfs_remove_files(gsmi_kobj, gsmi_attrs);
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 447ee4ea5c90..29619f49873a 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -108,9 +108,10 @@ bool psci_power_state_is_valid(u32 state)
return !(state & ~valid_mask);
}
-static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
- unsigned long arg0, unsigned long arg1,
- unsigned long arg2)
+static __always_inline unsigned long
+__invoke_psci_fn_hvc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
{
struct arm_smccc_res res;
@@ -118,9 +119,10 @@ static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
return res.a0;
}
-static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
- unsigned long arg0, unsigned long arg1,
- unsigned long arg2)
+static __always_inline unsigned long
+__invoke_psci_fn_smc(unsigned long function_id,
+ unsigned long arg0, unsigned long arg1,
+ unsigned long arg2)
{
struct arm_smccc_res res;
@@ -128,7 +130,7 @@ static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
return res.a0;
}
-static int psci_to_linux_errno(int errno)
+static __always_inline int psci_to_linux_errno(int errno)
{
switch (errno) {
case PSCI_RET_SUCCESS:
@@ -169,7 +171,8 @@ int psci_set_osi_mode(bool enable)
return psci_to_linux_errno(err);
}
-static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
+static __always_inline int
+__psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
{
int err;
@@ -177,13 +180,15 @@ static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
return psci_to_linux_errno(err);
}
-static int psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
+static __always_inline int
+psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
{
return __psci_cpu_suspend(psci_0_1_function_ids.cpu_suspend,
state, entry_point);
}
-static int psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
+static __always_inline int
+psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
{
return __psci_cpu_suspend(PSCI_FN_NATIVE(0_2, CPU_SUSPEND),
state, entry_point);
@@ -450,10 +455,12 @@ late_initcall(psci_debugfs_init)
#endif
#ifdef CONFIG_CPU_IDLE
-static int psci_suspend_finisher(unsigned long state)
+static noinstr int psci_suspend_finisher(unsigned long state)
{
u32 power_state = state;
- phys_addr_t pa_cpu_resume = __pa_symbol(cpu_resume);
+ phys_addr_t pa_cpu_resume;
+
+ pa_cpu_resume = __pa_symbol_nodebug((unsigned long)cpu_resume);
return psci_ops.cpu_suspend(power_state, pa_cpu_resume);
}
@@ -465,11 +472,22 @@ int psci_cpu_suspend_enter(u32 state)
if (!psci_power_state_loses_context(state)) {
struct arm_cpuidle_irq_context context;
+ ct_cpuidle_enter();
arm_cpuidle_save_irq_context(&context);
ret = psci_ops.cpu_suspend(state, 0);
arm_cpuidle_restore_irq_context(&context);
+ ct_cpuidle_exit();
} else {
+ /*
+ * ARM64 cpu_suspend() wants to do ct_cpuidle_*() itself.
+ */
+ if (!IS_ENABLED(CONFIG_ARM64))
+ ct_cpuidle_enter();
+
ret = cpu_suspend(state, psci_suspend_finisher);
+
+ if (!IS_ENABLED(CONFIG_ARM64))
+ ct_cpuidle_exit();
}
return ret;
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index b4081f4d88a3..bde1f543f529 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -1138,13 +1138,17 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
/* allocate service controller and supporting channel */
controller = devm_kzalloc(dev, sizeof(*controller), GFP_KERNEL);
- if (!controller)
- return -ENOMEM;
+ if (!controller) {
+ ret = -ENOMEM;
+ goto err_destroy_pool;
+ }
chans = devm_kmalloc_array(dev, SVC_NUM_CHANNEL,
sizeof(*chans), GFP_KERNEL | __GFP_ZERO);
- if (!chans)
- return -ENOMEM;
+ if (!chans) {
+ ret = -ENOMEM;
+ goto err_destroy_pool;
+ }
controller->dev = dev;
controller->num_chans = SVC_NUM_CHANNEL;
@@ -1159,7 +1163,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
if (ret) {
dev_err(dev, "failed to allocate FIFO\n");
- return ret;
+ goto err_destroy_pool;
}
spin_lock_init(&controller->svc_fifo_lock);
@@ -1198,19 +1202,20 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
ret = platform_device_add(svc->stratix10_svc_rsu);
if (ret) {
platform_device_put(svc->stratix10_svc_rsu);
- return ret;
+ goto err_free_kfifo;
}
svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1);
if (!svc->intel_svc_fcs) {
dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_unregister_dev;
}
ret = platform_device_add(svc->intel_svc_fcs);
if (ret) {
platform_device_put(svc->intel_svc_fcs);
- return ret;
+ goto err_unregister_dev;
}
dev_set_drvdata(dev, svc);
@@ -1219,8 +1224,12 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
return 0;
+err_unregister_dev:
+ platform_device_unregister(svc->stratix10_svc_rsu);
err_free_kfifo:
kfifo_free(&controller->svc_fifo);
+err_destroy_pool:
+ gen_pool_destroy(genpool);
return ret;
}
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index a353e27f83f5..ce9c007ed66f 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -27,25 +27,56 @@ static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
__init bool sysfb_parse_mode(const struct screen_info *si,
struct simplefb_platform_data *mode)
{
- const struct simplefb_format *f;
__u8 type;
+ u32 bits_per_pixel;
unsigned int i;
type = si->orig_video_isVGA;
if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
return false;
+ /*
+ * The meaning of depth and bpp for direct-color formats is
+ * inconsistent:
+ *
+ * - DRM format info specifies depth as the number of color
+ * bits; including alpha, but not including filler bits.
+ * - Linux' EFI platform code computes lfb_depth from the
+ * individual color channels, including the reserved bits.
+ * - VBE 1.1 defines lfb_depth for XRGB1555 as 16, but later
+ * versions use 15.
+ * - On the kernel command line, 'bpp' of 32 is usually
+ * XRGB8888 including the filler bits, but 15 is XRGB1555
+ * not including the filler bit.
+ *
+ * It's not easily possible to fix this in struct screen_info,
+ * as this could break UAPI. The best solution is to compute
+ * bits_per_pixel here and ignore lfb_depth. In the loop below,
+ * ignore simplefb formats with alpha bits, as EFI and VESA
+ * don't specify alpha channels.
+ */
+ if (si->lfb_depth > 8) {
+ bits_per_pixel = max(max3(si->red_size + si->red_pos,
+ si->green_size + si->green_pos,
+ si->blue_size + si->blue_pos),
+ si->rsvd_size + si->rsvd_pos);
+ } else {
+ bits_per_pixel = si->lfb_depth;
+ }
+
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
- f = &formats[i];
- if (si->lfb_depth == f->bits_per_pixel &&
+ const struct simplefb_format *f = &formats[i];
+
+ if (f->transp.length)
+ continue; /* transparent formats are unsupported by VESA/EFI */
+
+ if (bits_per_pixel == f->bits_per_pixel &&
si->red_size == f->red.length &&
si->red_pos == f->red.offset &&
si->green_size == f->green.length &&
si->green_pos == f->green.offset &&
si->blue_size == f->blue.length &&
- si->blue_pos == f->blue.offset &&
- si->rsvd_size == f->transp.length &&
- si->rsvd_pos == f->transp.offset) {
+ si->blue_pos == f->blue.offset) {
mode->format = f->name;
mode->width = si->lfb_width;
mode->height = si->lfb_height;
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 6ce143dafd04..0a00763b9f28 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -246,7 +246,7 @@ config FPGA_MGR_VERSAL_FPGA
config FPGA_M10_BMC_SEC_UPDATE
tristate "Intel MAX10 BMC Secure Update driver"
- depends on MFD_INTEL_M10_BMC
+ depends on MFD_INTEL_M10_BMC_CORE
select FW_LOADER
select FW_UPLOAD
help
diff --git a/drivers/fpga/dfl-afu-region.c b/drivers/fpga/dfl-afu-region.c
index 0804b7a0c298..2e7b41629406 100644
--- a/drivers/fpga/dfl-afu-region.c
+++ b/drivers/fpga/dfl-afu-region.c
@@ -39,6 +39,7 @@ static struct dfl_afu_mmio_region *get_region_by_index(struct dfl_afu *afu,
/**
* afu_mmio_region_add - add a mmio region to given feature dev.
*
+ * @pdata: afu platform device's pdata.
* @region_index: region index.
* @region_size: region size.
* @phys: region's physical address of this region.
diff --git a/drivers/fpga/dfl-afu.h b/drivers/fpga/dfl-afu.h
index e5020e2b1f3d..674e9772f0ea 100644
--- a/drivers/fpga/dfl-afu.h
+++ b/drivers/fpga/dfl-afu.h
@@ -41,7 +41,7 @@ struct dfl_afu_mmio_region {
};
/**
- * struct fpga_afu_dma_region - afu DMA region data structure
+ * struct dfl_afu_dma_region - afu DMA region data structure
*
* @user_addr: region userspace virtual address.
* @length: region length.
diff --git a/drivers/fpga/dfl-fme-perf.c b/drivers/fpga/dfl-fme-perf.c
index 587c82be12f7..7422d2bc6f37 100644
--- a/drivers/fpga/dfl-fme-perf.c
+++ b/drivers/fpga/dfl-fme-perf.c
@@ -141,7 +141,7 @@
* @fab_port_id: used to indicate current working mode of fabric counters.
* @fab_lock: lock to protect fabric counters working mode.
* @cpu: active CPU to which the PMU is bound for accesses.
- * @cpuhp_node: node for CPU hotplug notifier link.
+ * @node: node for CPU hotplug notifier link.
* @cpuhp_state: state for CPU hotplug notification;
*/
struct fme_perf_priv {
diff --git a/drivers/fpga/dfl-fme-pr.c b/drivers/fpga/dfl-fme-pr.c
index d61ce9a18879..cdcf6dea4cc9 100644
--- a/drivers/fpga/dfl-fme-pr.c
+++ b/drivers/fpga/dfl-fme-pr.c
@@ -164,7 +164,7 @@ free_exit:
/**
* dfl_fme_create_mgr - create fpga mgr platform device as child device
- *
+ * @feature: sub feature info
* @pdata: fme platform_device's pdata
*
* Return: mgr platform device if successful, and error code otherwise.
@@ -273,7 +273,7 @@ static void dfl_fme_destroy_bridge(struct dfl_fme_bridge *fme_br)
}
/**
- * dfl_fme_destroy_bridge - destroy all fpga bridge platform device
+ * dfl_fme_destroy_bridges - destroy all fpga bridge platform device
* @pdata: fme platform device's pdata
*/
static void dfl_fme_destroy_bridges(struct dfl_feature_platform_data *pdata)
diff --git a/drivers/fpga/dfl-fme-pr.h b/drivers/fpga/dfl-fme-pr.h
index 096a699089d3..761f80f63312 100644
--- a/drivers/fpga/dfl-fme-pr.h
+++ b/drivers/fpga/dfl-fme-pr.h
@@ -58,7 +58,7 @@ struct dfl_fme_bridge {
};
/**
- * struct dfl_fme_bridge_pdata - platform data for FME bridge platform device.
+ * struct dfl_fme_br_pdata - platform data for FME bridge platform device.
*
* @cdev: container device.
* @port_id: port id.
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index b9aae85ba930..dd7a783d53b5 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -13,6 +13,7 @@
#include <linux/dfl.h>
#include <linux/fpga-dfl.h>
#include <linux/module.h>
+#include <linux/overflow.h>
#include <linux/uaccess.h>
#include "dfl.h"
@@ -45,7 +46,7 @@ static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
};
/**
- * dfl_dev_info - dfl feature device information.
+ * struct dfl_dev_info - dfl feature device information.
* @name: name string of the feature platform device.
* @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
* @id: idr id of the feature dev.
@@ -67,7 +68,7 @@ static struct dfl_dev_info dfl_devs[] = {
};
/**
- * dfl_chardev_info - chardev information of dfl feature device
+ * struct dfl_chardev_info - chardev information of dfl feature device
* @name: nmae string of the char device.
* @devt: devt of the char device.
*/
@@ -293,9 +294,9 @@ static void dfl_bus_remove(struct device *dev)
ddrv->remove(ddev);
}
-static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int dfl_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct dfl_device *ddev = to_dfl_dev(dev);
+ const struct dfl_device *ddev = to_dfl_dev(dev);
return add_uevent_var(env, "MODALIAS=dfl:t%04Xf%04X",
ddev->type, ddev->feature_id);
@@ -342,6 +343,8 @@ static void release_dfl_dev(struct device *dev)
if (ddev->mmio_res.parent)
release_resource(&ddev->mmio_res);
+ kfree(ddev->params);
+
ida_free(&dfl_device_ida, ddev->id);
kfree(ddev->irqs);
kfree(ddev);
@@ -380,7 +383,16 @@ dfl_dev_add(struct dfl_feature_platform_data *pdata,
ddev->type = feature_dev_id_type(pdev);
ddev->feature_id = feature->id;
ddev->revision = feature->revision;
+ ddev->dfh_version = feature->dfh_version;
ddev->cdev = pdata->dfl_cdev;
+ if (feature->param_size) {
+ ddev->params = kmemdup(feature->params, feature->param_size, GFP_KERNEL);
+ if (!ddev->params) {
+ ret = -ENOMEM;
+ goto put_dev;
+ }
+ ddev->param_size = feature->param_size;
+ }
/* add mmio resource */
parent_res = &pdev->resource[feature->resource_index];
@@ -708,20 +720,27 @@ struct build_feature_devs_info {
* struct dfl_feature_info - sub feature info collected during feature dev build
*
* @fid: id of this sub feature.
+ * @revision: revision of this sub feature
+ * @dfh_version: version of Device Feature Header (DFH)
* @mmio_res: mmio resource of this sub feature.
* @ioaddr: mapped base address of mmio resource.
* @node: node in sub_features linked list.
* @irq_base: start of irq index in this sub feature.
* @nr_irqs: number of irqs of this sub feature.
+ * @param_size: size DFH parameters.
+ * @params: DFH parameter data.
*/
struct dfl_feature_info {
u16 fid;
u8 revision;
+ u8 dfh_version;
struct resource mmio_res;
void __iomem *ioaddr;
struct list_head node;
unsigned int irq_base;
unsigned int nr_irqs;
+ unsigned int param_size;
+ u64 params[];
};
static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
@@ -797,7 +816,17 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
feature->dev = fdev;
feature->id = finfo->fid;
feature->revision = finfo->revision;
+ feature->dfh_version = finfo->dfh_version;
+
+ if (finfo->param_size) {
+ feature->params = devm_kmemdup(binfo->dev,
+ finfo->params, finfo->param_size,
+ GFP_KERNEL);
+ if (!feature->params)
+ return -ENOMEM;
+ feature->param_size = finfo->param_size;
+ }
/*
* the FIU header feature has some fundamental functions (sriov
* set, port enable/disable) needed for the dfl bus device and
@@ -934,56 +963,115 @@ static u16 feature_id(u64 value)
return 0;
}
+static u64 *find_param(u64 *params, resource_size_t max, int param_id)
+{
+ u64 *end = params + max / sizeof(u64);
+ u64 v, next;
+
+ while (params < end) {
+ v = *params;
+ if (param_id == FIELD_GET(DFHv1_PARAM_HDR_ID, v))
+ return params;
+
+ if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v))
+ break;
+
+ next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v);
+ params += next;
+ }
+
+ return NULL;
+}
+
+/**
+ * dfh_find_param() - find parameter block for the given parameter id
+ * @dfl_dev: dfl device
+ * @param_id: id of dfl parameter
+ * @psize: destination to store size of parameter data in bytes
+ *
+ * Return: pointer to start of parameter data, PTR_ERR otherwise.
+ */
+void *dfh_find_param(struct dfl_device *dfl_dev, int param_id, size_t *psize)
+{
+ u64 *phdr = find_param(dfl_dev->params, dfl_dev->param_size, param_id);
+
+ if (!phdr)
+ return ERR_PTR(-ENOENT);
+
+ if (psize)
+ *psize = (FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, *phdr) - 1) * sizeof(u64);
+
+ return phdr + 1;
+}
+EXPORT_SYMBOL_GPL(dfh_find_param);
+
static int parse_feature_irqs(struct build_feature_devs_info *binfo,
- resource_size_t ofst, u16 fid,
- unsigned int *irq_base, unsigned int *nr_irqs)
+ resource_size_t ofst, struct dfl_feature_info *finfo)
{
void __iomem *base = binfo->ioaddr + ofst;
unsigned int i, ibase, inr = 0;
+ void *params = finfo->params;
enum dfl_id_type type;
+ u16 fid = finfo->fid;
int virq;
+ u64 *p;
u64 v;
- type = feature_dev_id_type(binfo->feature_dev);
+ switch (finfo->dfh_version) {
+ case 0:
+ /*
+ * DFHv0 only provides MMIO resource information for each feature
+ * in the DFL header. There is no generic interrupt information.
+ * Instead, features with interrupt functionality provide
+ * the information in feature specific registers.
+ */
+ type = feature_dev_id_type(binfo->feature_dev);
+ if (type == PORT_ID) {
+ switch (fid) {
+ case PORT_FEATURE_ID_UINT:
+ v = readq(base + PORT_UINT_CAP);
+ ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
+ inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
+ break;
+ case PORT_FEATURE_ID_ERROR:
+ v = readq(base + PORT_ERROR_CAP);
+ ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+ } else if (type == FME_ID) {
+ switch (fid) {
+ case FME_FEATURE_ID_GLOBAL_ERR:
+ v = readq(base + FME_ERROR_CAP);
+ ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
+ inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
+ break;
+ }
+ }
+ break;
- /*
- * Ideally DFL framework should only read info from DFL header, but
- * current version DFL only provides mmio resources information for
- * each feature in DFL Header, no field for interrupt resources.
- * Interrupt resource information is provided by specific mmio
- * registers of each private feature which supports interrupt. So in
- * order to parse and assign irq resources, DFL framework has to look
- * into specific capability registers of these private features.
- *
- * Once future DFL version supports generic interrupt resource
- * information in common DFL headers, the generic interrupt parsing
- * code will be added. But in order to be compatible to old version
- * DFL, the driver may still fall back to these quirks.
- */
- if (type == PORT_ID) {
- switch (fid) {
- case PORT_FEATURE_ID_UINT:
- v = readq(base + PORT_UINT_CAP);
- ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
- inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
- break;
- case PORT_FEATURE_ID_ERROR:
- v = readq(base + PORT_ERROR_CAP);
- ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
- inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
+ case 1:
+ /*
+ * DFHv1 provides interrupt resource information in DFHv1
+ * parameter blocks.
+ */
+ p = find_param(params, finfo->param_size, DFHv1_PARAM_ID_MSI_X);
+ if (!p)
break;
- }
- } else if (type == FME_ID) {
- if (fid == FME_FEATURE_ID_GLOBAL_ERR) {
- v = readq(base + FME_ERROR_CAP);
- ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
- inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
- }
+
+ p++;
+ ibase = FIELD_GET(DFHv1_PARAM_MSI_X_STARTV, *p);
+ inr = FIELD_GET(DFHv1_PARAM_MSI_X_NUMV, *p);
+ break;
+
+ default:
+ dev_warn(binfo->dev, "unexpected DFH version %d\n", finfo->dfh_version);
+ break;
}
if (!inr) {
- *irq_base = 0;
- *nr_irqs = 0;
+ finfo->irq_base = 0;
+ finfo->nr_irqs = 0;
return 0;
}
@@ -1006,12 +1094,37 @@ static int parse_feature_irqs(struct build_feature_devs_info *binfo,
}
}
- *irq_base = ibase;
- *nr_irqs = inr;
+ finfo->irq_base = ibase;
+ finfo->nr_irqs = inr;
return 0;
}
+static int dfh_get_param_size(void __iomem *dfh_base, resource_size_t max)
+{
+ int size = 0;
+ u64 v, next;
+
+ if (!FIELD_GET(DFHv1_CSR_SIZE_GRP_HAS_PARAMS,
+ readq(dfh_base + DFHv1_CSR_SIZE_GRP)))
+ return 0;
+
+ while (size + DFHv1_PARAM_HDR < max) {
+ v = readq(dfh_base + DFHv1_PARAM_HDR + size);
+
+ next = FIELD_GET(DFHv1_PARAM_HDR_NEXT_OFFSET, v);
+ if (!next)
+ return -EINVAL;
+
+ size += next * sizeof(u64);
+
+ if (FIELD_GET(DFHv1_PARAM_HDR_NEXT_EOP, v))
+ return size;
+ }
+
+ return -ENOENT;
+}
+
/*
* when create sub feature instances, for private features, it doesn't need
* to provide resource size and feature id as they could be read from DFH
@@ -1023,39 +1136,69 @@ static int
create_feature_instance(struct build_feature_devs_info *binfo,
resource_size_t ofst, resource_size_t size, u16 fid)
{
- unsigned int irq_base, nr_irqs;
struct dfl_feature_info *finfo;
+ resource_size_t start, end;
+ int dfh_psize = 0;
u8 revision = 0;
+ u64 v, addr_off;
+ u8 dfh_ver = 0;
int ret;
- u64 v;
if (fid != FEATURE_ID_AFU) {
v = readq(binfo->ioaddr + ofst);
revision = FIELD_GET(DFH_REVISION, v);
-
+ dfh_ver = FIELD_GET(DFH_VERSION, v);
/* read feature size and id if inputs are invalid */
size = size ? size : feature_size(v);
fid = fid ? fid : feature_id(v);
+ if (dfh_ver == 1) {
+ dfh_psize = dfh_get_param_size(binfo->ioaddr + ofst, size);
+ if (dfh_psize < 0) {
+ dev_err(binfo->dev,
+ "failed to read size of DFHv1 parameters %d\n",
+ dfh_psize);
+ return dfh_psize;
+ }
+ dev_dbg(binfo->dev, "dfhv1_psize %d\n", dfh_psize);
+ }
}
if (binfo->len - ofst < size)
return -EINVAL;
- ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs);
- if (ret)
- return ret;
-
- finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
+ finfo = kzalloc(struct_size(finfo, params, dfh_psize / sizeof(u64)), GFP_KERNEL);
if (!finfo)
return -ENOMEM;
+ memcpy_fromio(finfo->params, binfo->ioaddr + ofst + DFHv1_PARAM_HDR, dfh_psize);
+ finfo->param_size = dfh_psize;
+
finfo->fid = fid;
finfo->revision = revision;
- finfo->mmio_res.start = binfo->start + ofst;
- finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
+ finfo->dfh_version = dfh_ver;
+ if (dfh_ver == 1) {
+ v = readq(binfo->ioaddr + ofst + DFHv1_CSR_ADDR);
+ addr_off = FIELD_GET(DFHv1_CSR_ADDR_MASK, v);
+ if (FIELD_GET(DFHv1_CSR_ADDR_REL, v))
+ start = addr_off << 1;
+ else
+ start = binfo->start + ofst + addr_off;
+
+ v = readq(binfo->ioaddr + ofst + DFHv1_CSR_SIZE_GRP);
+ end = start + FIELD_GET(DFHv1_CSR_SIZE_GRP_SIZE, v) - 1;
+ } else {
+ start = binfo->start + ofst;
+ end = start + size - 1;
+ }
finfo->mmio_res.flags = IORESOURCE_MEM;
- finfo->irq_base = irq_base;
- finfo->nr_irqs = nr_irqs;
+ finfo->mmio_res.start = start;
+ finfo->mmio_res.end = end;
+
+ ret = parse_feature_irqs(binfo, ofst, finfo);
+ if (ret) {
+ kfree(finfo);
+ return ret;
+ }
list_add_tail(&finfo->node, &binfo->sub_features);
binfo->feature_num++;
diff --git a/drivers/fpga/dfl.h b/drivers/fpga/dfl.h
index 06cfcd5e84bb..1d724a28f00a 100644
--- a/drivers/fpga/dfl.h
+++ b/drivers/fpga/dfl.h
@@ -74,11 +74,47 @@
#define DFH_REVISION GENMASK_ULL(15, 12) /* Feature revision */
#define DFH_NEXT_HDR_OFST GENMASK_ULL(39, 16) /* Offset to next DFH */
#define DFH_EOL BIT_ULL(40) /* End of list */
+#define DFH_VERSION GENMASK_ULL(59, 52) /* DFH version */
#define DFH_TYPE GENMASK_ULL(63, 60) /* Feature type */
#define DFH_TYPE_AFU 1
#define DFH_TYPE_PRIVATE 3
#define DFH_TYPE_FIU 4
+/*
+ * DFHv1 Register Offset definitons
+ * In DHFv1, DFH + GUID + CSR_START + CSR_SIZE_GROUP + PARAM_HDR + PARAM_DATA
+ * as common header registers
+ */
+#define DFHv1_CSR_ADDR 0x18 /* CSR Register start address */
+#define DFHv1_CSR_SIZE_GRP 0x20 /* Size of Reg Block and Group/tag */
+#define DFHv1_PARAM_HDR 0x28 /* Optional First Param header */
+
+/*
+ * CSR Rel Bit, 1'b0 = relative (offset from feature DFH start),
+ * 1'b1 = absolute (ARM or other non-PCIe use)
+ */
+#define DFHv1_CSR_ADDR_REL BIT_ULL(0)
+
+/* CSR Header Register Bit Definitions */
+#define DFHv1_CSR_ADDR_MASK GENMASK_ULL(63, 1) /* 63:1 of CSR address */
+
+/* CSR SIZE Goup Register Bit Definitions */
+#define DFHv1_CSR_SIZE_GRP_INSTANCE_ID GENMASK_ULL(15, 0) /* Enumeration instantiated IP */
+#define DFHv1_CSR_SIZE_GRP_GROUPING_ID GENMASK_ULL(30, 16) /* Group Features/interfaces */
+#define DFHv1_CSR_SIZE_GRP_HAS_PARAMS BIT_ULL(31) /* Presence of Parameters */
+#define DFHv1_CSR_SIZE_GRP_SIZE GENMASK_ULL(63, 32) /* Size of CSR Block in bytes */
+
+/* PARAM Header Register Bit Definitions */
+#define DFHv1_PARAM_HDR_ID GENMASK_ULL(15, 0) /* Id of this Param */
+#define DFHv1_PARAM_HDR_VER GENMASK_ULL(31, 16) /* Version Param */
+#define DFHv1_PARAM_HDR_NEXT_OFFSET GENMASK_ULL(63, 35) /* Offset of next Param */
+#define DFHv1_PARAM_HDR_NEXT_EOP BIT_ULL(32)
+#define DFHv1_PARAM_DATA 0x08 /* Offset of Param data from Param header */
+
+#define DFHv1_PARAM_ID_MSI_X 0x1
+#define DFHv1_PARAM_MSI_X_NUMV GENMASK_ULL(63, 32)
+#define DFHv1_PARAM_MSI_X_STARTV GENMASK_ULL(31, 0)
+
/* Next AFU Register Bitfield */
#define NEXT_AFU_NEXT_DFH_OFST GENMASK_ULL(23, 0) /* Offset to next AFU */
@@ -231,6 +267,7 @@ struct dfl_feature_irq_ctx {
*
* @dev: ptr to pdev of the feature device which has the sub feature.
* @id: sub feature id.
+ * @revision: revision of this sub feature.
* @resource_index: each sub feature has one mmio resource for its registers.
* this index is used to find its mmio resource from the
* feature dev (platform device)'s resources.
@@ -240,6 +277,9 @@ struct dfl_feature_irq_ctx {
* @ops: ops of this sub feature.
* @ddev: ptr to the dfl device of this sub feature.
* @priv: priv data of this feature.
+ * @dfh_version: version of the DFH
+ * @param_size: size of dfh parameters
+ * @params: point to memory copy of dfh parameters
*/
struct dfl_feature {
struct platform_device *dev;
@@ -252,6 +292,9 @@ struct dfl_feature {
const struct dfl_feature_ops *ops;
struct dfl_device *ddev;
void *priv;
+ u8 dfh_version;
+ unsigned int param_size;
+ void *params;
};
#define FEATURE_DEV_ID_UNUSED (-1)
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 727704431f61..5cd40acab5bf 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -293,12 +293,15 @@ static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fpga_bridge *bridge = to_fpga_bridge(dev);
- int enable = 1;
+ int state = 1;
- if (bridge->br_ops && bridge->br_ops->enable_show)
- enable = bridge->br_ops->enable_show(bridge);
+ if (bridge->br_ops && bridge->br_ops->enable_show) {
+ state = bridge->br_ops->enable_show(bridge);
+ if (state < 0)
+ return state;
+ }
- return sprintf(buf, "%s\n", enable ? "enabled" : "disabled");
+ return sysfs_emit(buf, "%s\n", state ? "enabled" : "disabled");
}
static DEVICE_ATTR_RO(name);
diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c
index 79d48852825e..f0acedc80182 100644
--- a/drivers/fpga/intel-m10-bmc-sec-update.c
+++ b/drivers/fpga/intel-m10-bmc-sec-update.c
@@ -14,6 +14,12 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+struct m10bmc_sec;
+
+struct m10bmc_sec_ops {
+ int (*rsu_status)(struct m10bmc_sec *sec);
+};
+
struct m10bmc_sec {
struct device *dev;
struct intel_m10bmc *m10bmc;
@@ -21,6 +27,7 @@ struct m10bmc_sec {
char *fw_name;
u32 fw_name_id;
bool cancel_request;
+ const struct m10bmc_sec_ops *ops;
};
static DEFINE_XARRAY_ALLOC(fw_upload_xa);
@@ -31,6 +38,71 @@ static DEFINE_XARRAY_ALLOC(fw_upload_xa);
#define REH_MAGIC GENMASK(15, 0)
#define REH_SHA_NUM_BYTES GENMASK(31, 16)
+static int m10bmc_sec_write(struct m10bmc_sec *sec, const u8 *buf, u32 offset, u32 size)
+{
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
+ u32 write_count = size / stride;
+ u32 leftover_offset = write_count * stride;
+ u32 leftover_size = size - leftover_offset;
+ u32 leftover_tmp = 0;
+ int ret;
+
+ if (sec->m10bmc->flash_bulk_ops)
+ return sec->m10bmc->flash_bulk_ops->write(m10bmc, buf, offset, size);
+
+ if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
+ return -EINVAL;
+
+ ret = regmap_bulk_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset,
+ buf + offset, write_count);
+ if (ret)
+ return ret;
+
+ /* If size is not aligned to stride, handle the remainder bytes with regmap_write() */
+ if (leftover_size) {
+ memcpy(&leftover_tmp, buf + leftover_offset, leftover_size);
+ ret = regmap_write(m10bmc->regmap, M10BMC_STAGING_BASE + offset + leftover_offset,
+ leftover_tmp);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int m10bmc_sec_read(struct m10bmc_sec *sec, u8 *buf, u32 addr, u32 size)
+{
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ unsigned int stride = regmap_get_reg_stride(m10bmc->regmap);
+ u32 read_count = size / stride;
+ u32 leftover_offset = read_count * stride;
+ u32 leftover_size = size - leftover_offset;
+ u32 leftover_tmp;
+ int ret;
+
+ if (sec->m10bmc->flash_bulk_ops)
+ return sec->m10bmc->flash_bulk_ops->read(m10bmc, buf, addr, size);
+
+ if (WARN_ON_ONCE(stride > sizeof(leftover_tmp)))
+ return -EINVAL;
+
+ ret = regmap_bulk_read(m10bmc->regmap, addr, buf, read_count);
+ if (ret)
+ return ret;
+
+ /* If size is not aligned to stride, handle the remainder bytes with regmap_read() */
+ if (leftover_size) {
+ ret = regmap_read(m10bmc->regmap, addr + leftover_offset, &leftover_tmp);
+ if (ret)
+ return ret;
+ memcpy(buf + leftover_offset, &leftover_tmp, leftover_size);
+ }
+
+ return 0;
+}
+
+
static ssize_t
show_root_entry_hash(struct device *dev, u32 exp_magic,
u32 prog_addr, u32 reh_addr, char *buf)
@@ -38,11 +110,9 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
struct m10bmc_sec *sec = dev_get_drvdata(dev);
int sha_num_bytes, i, ret, cnt = 0;
u8 hash[REH_SHA384_SIZE];
- unsigned int stride;
u32 magic;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
- ret = m10bmc_raw_read(sec->m10bmc, prog_addr, &magic);
+ ret = m10bmc_sec_read(sec, (u8 *)&magic, prog_addr, sizeof(magic));
if (ret)
return ret;
@@ -50,19 +120,16 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
return sysfs_emit(buf, "hash not programmed\n");
sha_num_bytes = FIELD_GET(REH_SHA_NUM_BYTES, magic) / 8;
- if ((sha_num_bytes % stride) ||
- (sha_num_bytes != REH_SHA256_SIZE &&
- sha_num_bytes != REH_SHA384_SIZE)) {
+ if (sha_num_bytes != REH_SHA256_SIZE &&
+ sha_num_bytes != REH_SHA384_SIZE) {
dev_err(sec->dev, "%s bad sha num bytes %d\n", __func__,
sha_num_bytes);
return -EINVAL;
}
- ret = regmap_bulk_read(sec->m10bmc->regmap, reh_addr,
- hash, sha_num_bytes / stride);
+ ret = m10bmc_sec_read(sec, hash, reh_addr, sha_num_bytes);
if (ret) {
- dev_err(dev, "failed to read root entry hash: %x cnt %x: %d\n",
- reh_addr, sha_num_bytes / stride, ret);
+ dev_err(dev, "failed to read root entry hash\n");
return ret;
}
@@ -73,16 +140,24 @@ show_root_entry_hash(struct device *dev, u32 exp_magic,
return cnt;
}
-#define DEVICE_ATTR_SEC_REH_RO(_name, _magic, _prog_addr, _reh_addr) \
+#define DEVICE_ATTR_SEC_REH_RO(_name) \
static ssize_t _name##_root_entry_hash_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
-{ return show_root_entry_hash(dev, _magic, _prog_addr, _reh_addr, buf); } \
+{ \
+ struct m10bmc_sec *sec = dev_get_drvdata(dev); \
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
+ \
+ return show_root_entry_hash(dev, csr_map->_name##_magic, \
+ csr_map->_name##_prog_addr, \
+ csr_map->_name##_reh_addr, \
+ buf); \
+} \
static DEVICE_ATTR_RO(_name##_root_entry_hash)
-DEVICE_ATTR_SEC_REH_RO(bmc, BMC_PROG_MAGIC, BMC_PROG_ADDR, BMC_REH_ADDR);
-DEVICE_ATTR_SEC_REH_RO(sr, SR_PROG_MAGIC, SR_PROG_ADDR, SR_REH_ADDR);
-DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
+DEVICE_ATTR_SEC_REH_RO(bmc);
+DEVICE_ATTR_SEC_REH_RO(sr);
+DEVICE_ATTR_SEC_REH_RO(pr);
#define CSK_BIT_LEN 128U
#define CSK_32ARRAY_SIZE DIV_ROUND_UP(CSK_BIT_LEN, 32)
@@ -90,27 +165,16 @@ DEVICE_ATTR_SEC_REH_RO(pr, PR_PROG_MAGIC, PR_PROG_ADDR, PR_REH_ADDR);
static ssize_t
show_canceled_csk(struct device *dev, u32 addr, char *buf)
{
- unsigned int i, stride, size = CSK_32ARRAY_SIZE * sizeof(u32);
+ unsigned int i, size = CSK_32ARRAY_SIZE * sizeof(u32);
struct m10bmc_sec *sec = dev_get_drvdata(dev);
DECLARE_BITMAP(csk_map, CSK_BIT_LEN);
__le32 csk_le32[CSK_32ARRAY_SIZE];
u32 csk32[CSK_32ARRAY_SIZE];
int ret;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
- if (size % stride) {
- dev_err(sec->dev,
- "CSK vector size (0x%x) not aligned to stride (0x%x)\n",
- size, stride);
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
- ret = regmap_bulk_read(sec->m10bmc->regmap, addr, csk_le32,
- size / stride);
+ ret = m10bmc_sec_read(sec, (u8 *)&csk_le32, addr, size);
if (ret) {
- dev_err(sec->dev, "failed to read CSK vector: %x cnt %x: %d\n",
- addr, size / stride, ret);
+ dev_err(sec->dev, "failed to read CSK vector\n");
return ret;
}
@@ -122,18 +186,25 @@ show_canceled_csk(struct device *dev, u32 addr, char *buf)
return bitmap_print_to_pagebuf(1, buf, csk_map, CSK_BIT_LEN);
}
-#define DEVICE_ATTR_SEC_CSK_RO(_name, _addr) \
+#define DEVICE_ATTR_SEC_CSK_RO(_name) \
static ssize_t _name##_canceled_csks_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
-{ return show_canceled_csk(dev, _addr, buf); } \
+{ \
+ struct m10bmc_sec *sec = dev_get_drvdata(dev); \
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map; \
+ \
+ return show_canceled_csk(dev, \
+ csr_map->_name##_prog_addr + CSK_VEC_OFFSET, \
+ buf); \
+} \
static DEVICE_ATTR_RO(_name##_canceled_csks)
#define CSK_VEC_OFFSET 0x34
-DEVICE_ATTR_SEC_CSK_RO(bmc, BMC_PROG_ADDR + CSK_VEC_OFFSET);
-DEVICE_ATTR_SEC_CSK_RO(sr, SR_PROG_ADDR + CSK_VEC_OFFSET);
-DEVICE_ATTR_SEC_CSK_RO(pr, PR_PROG_ADDR + CSK_VEC_OFFSET);
+DEVICE_ATTR_SEC_CSK_RO(bmc);
+DEVICE_ATTR_SEC_CSK_RO(sr);
+DEVICE_ATTR_SEC_CSK_RO(pr);
#define FLASH_COUNT_SIZE 4096 /* count stored as inverted bit vector */
@@ -141,31 +212,21 @@ static ssize_t flash_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct m10bmc_sec *sec = dev_get_drvdata(dev);
- unsigned int stride, num_bits;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ unsigned int num_bits;
u8 *flash_buf;
int cnt, ret;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
num_bits = FLASH_COUNT_SIZE * 8;
- if (FLASH_COUNT_SIZE % stride) {
- dev_err(sec->dev,
- "FLASH_COUNT_SIZE (0x%x) not aligned to stride (0x%x)\n",
- FLASH_COUNT_SIZE, stride);
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
-
flash_buf = kmalloc(FLASH_COUNT_SIZE, GFP_KERNEL);
if (!flash_buf)
return -ENOMEM;
- ret = regmap_bulk_read(sec->m10bmc->regmap, STAGING_FLASH_COUNT,
- flash_buf, FLASH_COUNT_SIZE / stride);
+ ret = m10bmc_sec_read(sec, flash_buf, csr_map->rsu_update_counter,
+ FLASH_COUNT_SIZE);
if (ret) {
- dev_err(sec->dev,
- "failed to read flash count: %x cnt %x: %d\n",
- STAGING_FLASH_COUNT, FLASH_COUNT_SIZE / stride, ret);
+ dev_err(sec->dev, "failed to read flash count\n");
goto exit_free;
}
cnt = num_bits - bitmap_weight((unsigned long *)flash_buf, num_bits);
@@ -200,25 +261,94 @@ static const struct attribute_group *m10bmc_sec_attr_groups[] = {
static void log_error_regs(struct m10bmc_sec *sec, u32 doorbell)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 auth_result;
- dev_err(sec->dev, "RSU error status: 0x%08x\n", doorbell);
+ dev_err(sec->dev, "Doorbell: 0x%08x\n", doorbell);
- if (!m10bmc_sys_read(sec->m10bmc, M10BMC_AUTH_RESULT, &auth_result))
+ if (!m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result))
dev_err(sec->dev, "RSU auth result: 0x%08x\n", auth_result);
}
+static int m10bmc_sec_n3000_rsu_status(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(DRBL_RSU_STATUS, doorbell);
+}
+
+static int m10bmc_sec_n6000_rsu_status(struct m10bmc_sec *sec)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 auth_result;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->auth_result, &auth_result);
+ if (ret)
+ return ret;
+
+ return FIELD_GET(AUTH_RESULT_RSU_STATUS, auth_result);
+}
+
+static bool rsu_status_ok(u32 status)
+{
+ return (status == RSU_STAT_NORMAL ||
+ status == RSU_STAT_NIOS_OK ||
+ status == RSU_STAT_USER_OK ||
+ status == RSU_STAT_FACTORY_OK);
+}
+
+static bool rsu_progress_done(u32 progress)
+{
+ return (progress == RSU_PROG_IDLE ||
+ progress == RSU_PROG_RSU_DONE);
+}
+
+static bool rsu_progress_busy(u32 progress)
+{
+ return (progress == RSU_PROG_AUTHENTICATING ||
+ progress == RSU_PROG_COPYING ||
+ progress == RSU_PROG_UPDATE_CANCEL ||
+ progress == RSU_PROG_PROGRAM_KEY_HASH);
+}
+
+static int m10bmc_sec_progress_status(struct m10bmc_sec *sec, u32 *doorbell_reg,
+ u32 *progress, u32 *status)
+{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ int ret;
+
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, doorbell_reg);
+ if (ret)
+ return ret;
+
+ ret = sec->ops->rsu_status(sec);
+ if (ret < 0)
+ return ret;
+
+ *status = ret;
+ *progress = rsu_prog(*doorbell_reg);
+
+ return 0;
+}
+
static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
- if (rsu_prog(doorbell) != RSU_PROG_IDLE &&
- rsu_prog(doorbell) != RSU_PROG_RSU_DONE) {
+ if (!rsu_progress_done(rsu_prog(doorbell))) {
log_error_regs(sec, doorbell);
return FW_UPLOAD_ERR_BUSY;
}
@@ -226,19 +356,15 @@ static enum fw_upload_err rsu_check_idle(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_NONE;
}
-static inline bool rsu_start_done(u32 doorbell)
+static inline bool rsu_start_done(u32 doorbell_reg, u32 progress, u32 status)
{
- u32 status, progress;
-
- if (doorbell & DRBL_RSU_REQUEST)
+ if (doorbell_reg & DRBL_RSU_REQUEST)
return false;
- status = rsu_stat(doorbell);
if (status == RSU_STAT_ERASE_FAIL || status == RSU_STAT_WEAROUT)
return true;
- progress = rsu_prog(doorbell);
- if (progress != RSU_PROG_IDLE && progress != RSU_PROG_RSU_DONE)
+ if (!rsu_progress_done(progress))
return true;
return false;
@@ -246,11 +372,12 @@ static inline bool rsu_start_done(u32 doorbell)
static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
{
- u32 doorbell, status;
- int ret;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell_reg, progress, status;
+ int ret, err;
ret = regmap_update_bits(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
+ csr_map->base + csr_map->doorbell,
DRBL_RSU_REQUEST | DRBL_HOST_STATUS,
DRBL_RSU_REQUEST |
FIELD_PREP(DRBL_HOST_STATUS,
@@ -258,26 +385,25 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
- ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
- doorbell,
- rsu_start_done(doorbell),
- NIOS_HANDSHAKE_INTERVAL_US,
- NIOS_HANDSHAKE_TIMEOUT_US);
+ ret = read_poll_timeout(m10bmc_sec_progress_status, err,
+ err < 0 || rsu_start_done(doorbell_reg, progress, status),
+ NIOS_HANDSHAKE_INTERVAL_US,
+ NIOS_HANDSHAKE_TIMEOUT_US,
+ false,
+ sec, &doorbell_reg, &progress, &status);
if (ret == -ETIMEDOUT) {
- log_error_regs(sec, doorbell);
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_TIMEOUT;
- } else if (ret) {
+ } else if (err) {
return FW_UPLOAD_ERR_RW_ERROR;
}
- status = rsu_stat(doorbell);
if (status == RSU_STAT_WEAROUT) {
dev_warn(sec->dev, "Excessive flash update count detected\n");
return FW_UPLOAD_ERR_WEAROUT;
} else if (status == RSU_STAT_ERASE_FAIL) {
- log_error_regs(sec, doorbell);
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_HW_ERROR;
}
@@ -286,11 +412,12 @@ static enum fw_upload_err rsu_update_init(struct m10bmc_sec *sec)
static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
unsigned long poll_timeout;
u32 doorbell, progress;
int ret;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
@@ -300,7 +427,7 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
if (time_after(jiffies, poll_timeout))
break;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
}
@@ -319,11 +446,12 @@ static enum fw_upload_err rsu_prog_ready(struct m10bmc_sec *sec)
static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
{
- u32 doorbell;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ u32 doorbell_reg, status;
int ret;
ret = regmap_update_bits(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
+ csr_map->base + csr_map->doorbell,
DRBL_HOST_STATUS,
FIELD_PREP(DRBL_HOST_STATUS,
HOST_STATUS_WRITE_DONE));
@@ -331,68 +459,58 @@ static enum fw_upload_err rsu_send_data(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_RW_ERROR;
ret = regmap_read_poll_timeout(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
- doorbell,
- rsu_prog(doorbell) != RSU_PROG_READY,
+ csr_map->base + csr_map->doorbell,
+ doorbell_reg,
+ rsu_prog(doorbell_reg) != RSU_PROG_READY,
NIOS_HANDSHAKE_INTERVAL_US,
NIOS_HANDSHAKE_TIMEOUT_US);
if (ret == -ETIMEDOUT) {
- log_error_regs(sec, doorbell);
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_TIMEOUT;
} else if (ret) {
return FW_UPLOAD_ERR_RW_ERROR;
}
- switch (rsu_stat(doorbell)) {
- case RSU_STAT_NORMAL:
- case RSU_STAT_NIOS_OK:
- case RSU_STAT_USER_OK:
- case RSU_STAT_FACTORY_OK:
- break;
- default:
- log_error_regs(sec, doorbell);
+ ret = sec->ops->rsu_status(sec);
+ if (ret < 0)
+ return ret;
+ status = ret;
+
+ if (!rsu_status_ok(status)) {
+ log_error_regs(sec, doorbell_reg);
return FW_UPLOAD_ERR_HW_ERROR;
}
return FW_UPLOAD_ERR_NONE;
}
-static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell)
+static int rsu_check_complete(struct m10bmc_sec *sec, u32 *doorbell_reg)
{
- if (m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, doorbell))
+ u32 progress, status;
+
+ if (m10bmc_sec_progress_status(sec, doorbell_reg, &progress, &status))
return -EIO;
- switch (rsu_stat(*doorbell)) {
- case RSU_STAT_NORMAL:
- case RSU_STAT_NIOS_OK:
- case RSU_STAT_USER_OK:
- case RSU_STAT_FACTORY_OK:
- break;
- default:
+ if (!rsu_status_ok(status))
return -EINVAL;
- }
- switch (rsu_prog(*doorbell)) {
- case RSU_PROG_IDLE:
- case RSU_PROG_RSU_DONE:
+ if (rsu_progress_done(progress))
return 0;
- case RSU_PROG_AUTHENTICATING:
- case RSU_PROG_COPYING:
- case RSU_PROG_UPDATE_CANCEL:
- case RSU_PROG_PROGRAM_KEY_HASH:
+
+ if (rsu_progress_busy(progress))
return -EAGAIN;
- default:
- return -EINVAL;
- }
+
+ return -EINVAL;
}
static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
{
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
u32 doorbell;
int ret;
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(sec->m10bmc, csr_map->doorbell, &doorbell);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
@@ -400,7 +518,7 @@ static enum fw_upload_err rsu_cancel(struct m10bmc_sec *sec)
return FW_UPLOAD_ERR_BUSY;
ret = regmap_update_bits(sec->m10bmc->regmap,
- M10BMC_SYS_BASE + M10BMC_DOORBELL,
+ csr_map->base + csr_map->doorbell,
DRBL_HOST_STATUS,
FIELD_PREP(DRBL_HOST_STATUS,
HOST_STATUS_ABORT_RSU));
@@ -421,39 +539,50 @@ static enum fw_upload_err m10bmc_sec_prepare(struct fw_upload *fwl,
if (!size || size > M10BMC_STAGING_SIZE)
return FW_UPLOAD_ERR_INVALID_SIZE;
+ if (sec->m10bmc->flash_bulk_ops)
+ if (sec->m10bmc->flash_bulk_ops->lock_write(sec->m10bmc))
+ return FW_UPLOAD_ERR_BUSY;
+
ret = rsu_check_idle(sec);
if (ret != FW_UPLOAD_ERR_NONE)
- return ret;
+ goto unlock_flash;
ret = rsu_update_init(sec);
if (ret != FW_UPLOAD_ERR_NONE)
- return ret;
+ goto unlock_flash;
ret = rsu_prog_ready(sec);
if (ret != FW_UPLOAD_ERR_NONE)
- return ret;
+ goto unlock_flash;
- if (sec->cancel_request)
- return rsu_cancel(sec);
+ if (sec->cancel_request) {
+ ret = rsu_cancel(sec);
+ goto unlock_flash;
+ }
return FW_UPLOAD_ERR_NONE;
+
+unlock_flash:
+ if (sec->m10bmc->flash_bulk_ops)
+ sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
+ return ret;
}
#define WRITE_BLOCK_SIZE 0x4000 /* Default write-block size is 0x4000 bytes */
-static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data,
- u32 offset, u32 size, u32 *written)
+static enum fw_upload_err m10bmc_sec_fw_write(struct fw_upload *fwl, const u8 *data,
+ u32 offset, u32 size, u32 *written)
{
struct m10bmc_sec *sec = fwl->dd_handle;
- u32 blk_size, doorbell, extra_offset;
- unsigned int stride, extra = 0;
+ const struct m10bmc_csr_map *csr_map = sec->m10bmc->info->csr_map;
+ struct intel_m10bmc *m10bmc = sec->m10bmc;
+ u32 blk_size, doorbell;
int ret;
- stride = regmap_get_reg_stride(sec->m10bmc->regmap);
if (sec->cancel_request)
return rsu_cancel(sec);
- ret = m10bmc_sys_read(sec->m10bmc, M10BMC_DOORBELL, &doorbell);
+ ret = m10bmc_sys_read(m10bmc, csr_map->doorbell, &doorbell);
if (ret) {
return FW_UPLOAD_ERR_RW_ERROR;
} else if (rsu_prog(doorbell) != RSU_PROG_READY) {
@@ -461,28 +590,12 @@ static enum fw_upload_err m10bmc_sec_write(struct fw_upload *fwl, const u8 *data
return FW_UPLOAD_ERR_HW_ERROR;
}
- WARN_ON_ONCE(WRITE_BLOCK_SIZE % stride);
+ WARN_ON_ONCE(WRITE_BLOCK_SIZE % regmap_get_reg_stride(m10bmc->regmap));
blk_size = min_t(u32, WRITE_BLOCK_SIZE, size);
- ret = regmap_bulk_write(sec->m10bmc->regmap,
- M10BMC_STAGING_BASE + offset,
- (void *)data + offset,
- blk_size / stride);
+ ret = m10bmc_sec_write(sec, data, offset, blk_size);
if (ret)
return FW_UPLOAD_ERR_RW_ERROR;
- /*
- * If blk_size is not aligned to stride, then handle the extra
- * bytes with regmap_write.
- */
- if (blk_size % stride) {
- extra_offset = offset + ALIGN_DOWN(blk_size, stride);
- memcpy(&extra, (u8 *)(data + extra_offset), blk_size % stride);
- ret = regmap_write(sec->m10bmc->regmap,
- M10BMC_STAGING_BASE + extra_offset, extra);
- if (ret)
- return FW_UPLOAD_ERR_RW_ERROR;
- }
-
*written = blk_size;
return FW_UPLOAD_ERR_NONE;
}
@@ -539,16 +652,27 @@ static void m10bmc_sec_cleanup(struct fw_upload *fwl)
struct m10bmc_sec *sec = fwl->dd_handle;
(void)rsu_cancel(sec);
+
+ if (sec->m10bmc->flash_bulk_ops)
+ sec->m10bmc->flash_bulk_ops->unlock_write(sec->m10bmc);
}
static const struct fw_upload_ops m10bmc_ops = {
.prepare = m10bmc_sec_prepare,
- .write = m10bmc_sec_write,
+ .write = m10bmc_sec_fw_write,
.poll_complete = m10bmc_sec_poll_complete,
.cancel = m10bmc_sec_cancel,
.cleanup = m10bmc_sec_cleanup,
};
+static const struct m10bmc_sec_ops m10sec_n3000_ops = {
+ .rsu_status = m10bmc_sec_n3000_rsu_status,
+};
+
+static const struct m10bmc_sec_ops m10sec_n6000_ops = {
+ .rsu_status = m10bmc_sec_n6000_rsu_status,
+};
+
#define SEC_UPDATE_LEN_MAX 32
static int m10bmc_sec_probe(struct platform_device *pdev)
{
@@ -564,6 +688,7 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
sec->dev = &pdev->dev;
sec->m10bmc = dev_get_drvdata(pdev->dev.parent);
+ sec->ops = (struct m10bmc_sec_ops *)platform_get_device_id(pdev)->driver_data;
dev_set_drvdata(&pdev->dev, sec);
ret = xa_alloc(&fw_upload_xa, &sec->fw_name_id, sec,
@@ -574,20 +699,27 @@ static int m10bmc_sec_probe(struct platform_device *pdev)
len = scnprintf(buf, SEC_UPDATE_LEN_MAX, "secure-update%d",
sec->fw_name_id);
sec->fw_name = kmemdup_nul(buf, len, GFP_KERNEL);
- if (!sec->fw_name)
- return -ENOMEM;
+ if (!sec->fw_name) {
+ ret = -ENOMEM;
+ goto fw_name_fail;
+ }
fwl = firmware_upload_register(THIS_MODULE, sec->dev, sec->fw_name,
&m10bmc_ops, sec);
if (IS_ERR(fwl)) {
dev_err(sec->dev, "Firmware Upload driver failed to start\n");
- kfree(sec->fw_name);
- xa_erase(&fw_upload_xa, sec->fw_name_id);
- return PTR_ERR(fwl);
+ ret = PTR_ERR(fwl);
+ goto fw_uploader_fail;
}
sec->fwl = fwl;
return 0;
+
+fw_uploader_fail:
+ kfree(sec->fw_name);
+fw_name_fail:
+ xa_erase(&fw_upload_xa, sec->fw_name_id);
+ return ret;
}
static int m10bmc_sec_remove(struct platform_device *pdev)
@@ -604,9 +736,15 @@ static int m10bmc_sec_remove(struct platform_device *pdev)
static const struct platform_device_id intel_m10bmc_sec_ids[] = {
{
.name = "n3000bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
},
{
.name = "d5005bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n3000_ops,
+ },
+ {
+ .name = "n6000bmc-sec-update",
+ .driver_data = (kernel_ulong_t)&m10sec_n6000_ops,
},
{ }
};
diff --git a/drivers/fpga/microchip-spi.c b/drivers/fpga/microchip-spi.c
index 7436976ea904..d6070e7f5205 100644
--- a/drivers/fpga/microchip-spi.c
+++ b/drivers/fpga/microchip-spi.c
@@ -6,6 +6,7 @@
#include <asm/unaligned.h>
#include <linux/delay.h>
#include <linux/fpga/fpga-mgr.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/spi/spi.h>
@@ -33,7 +34,7 @@
#define MPF_BITS_PER_COMPONENT_SIZE 22
-#define MPF_STATUS_POLL_RETRIES 10000
+#define MPF_STATUS_POLL_TIMEOUT (2 * USEC_PER_SEC)
#define MPF_STATUS_BUSY BIT(0)
#define MPF_STATUS_READY BIT(1)
#define MPF_STATUS_SPI_VIOLATION BIT(2)
@@ -42,46 +43,55 @@
struct mpf_priv {
struct spi_device *spi;
bool program_mode;
+ u8 tx __aligned(ARCH_KMALLOC_MINALIGN);
+ u8 rx;
};
-static int mpf_read_status(struct spi_device *spi)
+static int mpf_read_status(struct mpf_priv *priv)
{
- u8 status = 0, status_command = MPF_SPI_READ_STATUS;
- struct spi_transfer xfers[2] = { 0 };
- int ret;
-
/*
* HW status is returned on MISO in the first byte after CS went
* active. However, first reading can be inadequate, so we submit
* two identical SPI transfers and use result of the later one.
*/
- xfers[0].tx_buf = &status_command;
- xfers[1].tx_buf = &status_command;
- xfers[0].rx_buf = &status;
- xfers[1].rx_buf = &status;
- xfers[0].len = 1;
- xfers[1].len = 1;
- xfers[0].cs_change = 1;
+ struct spi_transfer xfers[2] = {
+ {
+ .tx_buf = &priv->tx,
+ .rx_buf = &priv->rx,
+ .len = 1,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &priv->tx,
+ .rx_buf = &priv->rx,
+ .len = 1,
+ },
+ };
+ u8 status;
+ int ret;
- ret = spi_sync_transfer(spi, xfers, 2);
+ priv->tx = MPF_SPI_READ_STATUS;
+
+ ret = spi_sync_transfer(priv->spi, xfers, 2);
+ if (ret)
+ return ret;
+
+ status = priv->rx;
if ((status & MPF_STATUS_SPI_VIOLATION) ||
(status & MPF_STATUS_SPI_ERROR))
- ret = -EIO;
+ return -EIO;
- return ret ? : status;
+ return status;
}
static enum fpga_mgr_states mpf_ops_state(struct fpga_manager *mgr)
{
struct mpf_priv *priv = mgr->priv;
- struct spi_device *spi;
bool program_mode;
int status;
- spi = priv->spi;
program_mode = priv->program_mode;
- status = mpf_read_status(spi);
+ status = mpf_read_status(priv);
if (!program_mode && !status)
return FPGA_MGR_STATE_OPERATING;
@@ -185,52 +195,53 @@ static int mpf_ops_parse_header(struct fpga_manager *mgr,
return 0;
}
-/* Poll HW status until busy bit is cleared and mask bits are set. */
-static int mpf_poll_status(struct spi_device *spi, u8 mask)
+static int mpf_poll_status(struct mpf_priv *priv, u8 mask)
{
- int status, retries = MPF_STATUS_POLL_RETRIES;
-
- while (retries--) {
- status = mpf_read_status(spi);
- if (status < 0)
- return status;
+ int ret, status;
- if (status & MPF_STATUS_BUSY)
- continue;
-
- if (!mask || (status & mask))
- return status;
- }
+ /*
+ * Busy poll HW status. Polling stops if any of the following
+ * conditions are met:
+ * - timeout is reached
+ * - mpf_read_status() returns an error
+ * - busy bit is cleared AND mask bits are set
+ */
+ ret = read_poll_timeout(mpf_read_status, status,
+ (status < 0) ||
+ ((status & (MPF_STATUS_BUSY | mask)) == mask),
+ 0, MPF_STATUS_POLL_TIMEOUT, false, priv);
+ if (ret < 0)
+ return ret;
- return -EBUSY;
+ return status;
}
-static int mpf_spi_write(struct spi_device *spi, const void *buf, size_t buf_size)
+static int mpf_spi_write(struct mpf_priv *priv, const void *buf, size_t buf_size)
{
- int status = mpf_poll_status(spi, 0);
+ int status = mpf_poll_status(priv, 0);
if (status < 0)
return status;
- return spi_write(spi, buf, buf_size);
+ return spi_write_then_read(priv->spi, buf, buf_size, NULL, 0);
}
-static int mpf_spi_write_then_read(struct spi_device *spi,
+static int mpf_spi_write_then_read(struct mpf_priv *priv,
const void *txbuf, size_t txbuf_size,
void *rxbuf, size_t rxbuf_size)
{
const u8 read_command[] = { MPF_SPI_READ_DATA };
int ret;
- ret = mpf_spi_write(spi, txbuf, txbuf_size);
+ ret = mpf_spi_write(priv, txbuf, txbuf_size);
if (ret)
return ret;
- ret = mpf_poll_status(spi, MPF_STATUS_READY);
+ ret = mpf_poll_status(priv, MPF_STATUS_READY);
if (ret < 0)
return ret;
- return spi_write_then_read(spi, read_command, sizeof(read_command),
+ return spi_write_then_read(priv->spi, read_command, sizeof(read_command),
rxbuf, rxbuf_size);
}
@@ -242,7 +253,6 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
const u8 isc_en_command[] = { MPF_SPI_ISC_ENABLE };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
- struct spi_device *spi;
u32 isc_ret = 0;
int ret;
@@ -251,9 +261,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return -EOPNOTSUPP;
}
- spi = priv->spi;
-
- ret = mpf_spi_write_then_read(spi, isc_en_command, sizeof(isc_en_command),
+ ret = mpf_spi_write_then_read(priv, isc_en_command, sizeof(isc_en_command),
&isc_ret, sizeof(isc_ret));
if (ret || isc_ret) {
dev_err(dev, "Failed to enable ISC: spi_ret %d, isc_ret %u\n",
@@ -261,7 +269,7 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return -EFAULT;
}
- ret = mpf_spi_write(spi, program_mode, sizeof(program_mode));
+ ret = mpf_spi_write(priv, program_mode, sizeof(program_mode));
if (ret) {
dev_err(dev, "Failed to enter program mode: %d\n", ret);
return ret;
@@ -272,13 +280,32 @@ static int mpf_ops_write_init(struct fpga_manager *mgr,
return 0;
}
+static int mpf_spi_frame_write(struct mpf_priv *priv, const char *buf)
+{
+ struct spi_transfer xfers[2] = {
+ {
+ .tx_buf = &priv->tx,
+ .len = 1,
+ }, {
+ .tx_buf = buf,
+ .len = MPF_SPI_FRAME_SIZE,
+ },
+ };
+ int ret;
+
+ ret = mpf_poll_status(priv, 0);
+ if (ret < 0)
+ return ret;
+
+ priv->tx = MPF_SPI_FRAME;
+
+ return spi_sync_transfer(priv->spi, xfers, ARRAY_SIZE(xfers));
+}
+
static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count)
{
- u8 spi_frame_command[] = { MPF_SPI_FRAME };
- struct spi_transfer xfers[2] = { 0 };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
- struct spi_device *spi;
int ret, i;
if (count % MPF_SPI_FRAME_SIZE) {
@@ -287,19 +314,8 @@ static int mpf_ops_write(struct fpga_manager *mgr, const char *buf, size_t count
return -EINVAL;
}
- spi = priv->spi;
-
- xfers[0].tx_buf = spi_frame_command;
- xfers[0].len = sizeof(spi_frame_command);
-
for (i = 0; i < count / MPF_SPI_FRAME_SIZE; i++) {
- xfers[1].tx_buf = buf + i * MPF_SPI_FRAME_SIZE;
- xfers[1].len = MPF_SPI_FRAME_SIZE;
-
- ret = mpf_poll_status(spi, 0);
- if (ret >= 0)
- ret = spi_sync_transfer(spi, xfers, ARRAY_SIZE(xfers));
-
+ ret = mpf_spi_frame_write(priv, buf + i * MPF_SPI_FRAME_SIZE);
if (ret) {
dev_err(dev, "Failed to write bitstream frame %d/%zu\n",
i, count / MPF_SPI_FRAME_SIZE);
@@ -317,12 +333,9 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
const u8 release_command[] = { MPF_SPI_RELEASE };
struct mpf_priv *priv = mgr->priv;
struct device *dev = &mgr->dev;
- struct spi_device *spi;
int ret;
- spi = priv->spi;
-
- ret = mpf_spi_write(spi, isc_dis_command, sizeof(isc_dis_command));
+ ret = mpf_spi_write(priv, isc_dis_command, sizeof(isc_dis_command));
if (ret) {
dev_err(dev, "Failed to disable ISC: %d\n", ret);
return ret;
@@ -330,7 +343,7 @@ static int mpf_ops_write_complete(struct fpga_manager *mgr,
usleep_range(1000, 2000);
- ret = mpf_spi_write(spi, release_command, sizeof(release_command));
+ ret = mpf_spi_write(priv, release_command, sizeof(release_command));
if (ret) {
dev_err(dev, "Failed to exit program mode: %d\n", ret);
return ret;
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 357cea58ec98..f7f01982a512 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -213,9 +213,9 @@ static int s10_ops_write_init(struct fpga_manager *mgr,
/* Allocate buffers from the service layer's pool. */
for (i = 0; i < NUM_SVC_BUFS; i++) {
kbuf = stratix10_svc_allocate_memory(priv->chan, SVC_BUF_SIZE);
- if (!kbuf) {
+ if (IS_ERR(kbuf)) {
s10_free_buffers(mgr);
- ret = -ENOMEM;
+ ret = PTR_ERR(kbuf);
goto init_done;
}
diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
index 694e80c06665..0b927c9f4267 100644
--- a/drivers/fsi/fsi-core.c
+++ b/drivers/fsi/fsi-core.c
@@ -897,10 +897,10 @@ static const struct attribute_group *cfam_attr_groups[] = {
NULL,
};
-static char *cfam_devnode(struct device *dev, umode_t *mode,
+static char *cfam_devnode(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
- struct fsi_slave *slave = to_fsi_slave(dev);
+ const struct fsi_slave *slave = to_fsi_slave(dev);
#ifdef CONFIG_FSI_NEW_DEV_NODE
return kasprintf(GFP_KERNEL, "fsi/cfam%d", slave->cdev_idx);
@@ -915,7 +915,7 @@ static const struct device_type cfam_type = {
.groups = cfam_attr_groups
};
-static char *fsi_cdev_devnode(struct device *dev, umode_t *mode,
+static char *fsi_cdev_devnode(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
#ifdef CONFIG_FSI_NEW_DEV_NODE
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index ec7cfd4f52b1..13be729710f2 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -354,16 +354,6 @@ config GPIO_IMX_SCU
def_bool y
depends on IMX_SCU
-config GPIO_IOP
- tristate "Intel IOP GPIO"
- depends on ARCH_IOP32X || COMPILE_TEST
- select GPIO_GENERIC
- help
- Say yes here to support the GPIO functionality of a number of Intel
- IOP32X or IOP33X series of chips.
-
- If unsure, say N.
-
config GPIO_IXP4XX
bool "Intel IXP4xx GPIO"
depends on ARCH_IXP4XX
@@ -762,7 +752,7 @@ config GPIO_XTENSA
config GPIO_ZEVIO
bool "LSI ZEVIO SoC memory mapped GPIOs"
- depends on ARM && OF_GPIO
+ depends on ARM
help
Say yes here to support the GPIO controller in LSI ZEVIO SoCs.
@@ -831,6 +821,7 @@ menu "Port-mapped I/O GPIO drivers"
config GPIO_I8255
tristate
+ select GPIO_REGMAP
help
Enables support for the i8255 interface library functions. The i8255
interface library provides functions to facilitate communication with
@@ -845,6 +836,8 @@ config GPIO_104_DIO_48E
tristate "ACCES 104-DIO-48E GPIO support"
depends on PC104
select ISA_BUS_API
+ select REGMAP_MMIO
+ select REGMAP_IRQ
select GPIOLIB_IRQCHIP
select GPIO_I8255
help
@@ -870,8 +863,10 @@ config GPIO_104_IDI_48
tristate "ACCES 104-IDI-48 GPIO support"
depends on PC104
select ISA_BUS_API
+ select REGMAP_MMIO
+ select REGMAP_IRQ
select GPIOLIB_IRQCHIP
- select GPIO_I8255
+ select GPIO_REGMAP
help
Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
104-IDI-48AC, 104-IDI-48B, 104-IDI-48BC). The base port addresses for
@@ -893,6 +888,7 @@ config GPIO_GPIO_MM
tristate "Diamond Systems GPIO-MM GPIO support"
depends on PC104
select ISA_BUS_API
+ select REGMAP_MMIO
select GPIO_I8255
help
Enables GPIO support for the Diamond Systems GPIO-MM and GPIO-MM-12.
@@ -1438,13 +1434,6 @@ config GPIO_TWL6040
Say yes here to access the GPO signals of twl6040
audio chip from Texas Instruments.
-config GPIO_UCB1400
- tristate "Philips UCB1400 GPIO"
- depends on UCB1400_CORE
- help
- This enables support for the Philips UCB1400 GPIO pins.
- The UCB1400 is an AC97 audio codec.
-
config GPIO_WHISKEY_COVE
tristate "GPIO support for Whiskey Cove PMIC"
depends on (X86 || COMPILE_TEST) && INTEL_SOC_PMIC_BXTWC
@@ -1531,6 +1520,7 @@ config GPIO_MLXBF2
tristate "Mellanox BlueField 2 SoC GPIO"
depends on (MELLANOX_PLATFORM && ARM64 && ACPI) || (64BIT && COMPILE_TEST)
select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
help
Say Y here if you want GPIO support on Mellanox BlueField 2 SoC.
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 010587025fc8..c048ba003367 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -72,7 +72,6 @@ obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
obj-$(CONFIG_GPIO_IDIO_16) += gpio-idio-16.o
obj-$(CONFIG_GPIO_IDT3243X) += gpio-idt3243x.o
obj-$(CONFIG_GPIO_IMX_SCU) += gpio-imx-scu.o
-obj-$(CONFIG_GPIO_IOP) += gpio-iop.o
obj-$(CONFIG_GPIO_IT87) += gpio-it87.o
obj-$(CONFIG_GPIO_IXP4XX) += gpio-ixp4xx.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
@@ -166,7 +165,6 @@ obj-$(CONFIG_GPIO_TS4900) += gpio-ts4900.o
obj-$(CONFIG_GPIO_TS5500) += gpio-ts5500.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_GPIO_TWL6040) += gpio-twl6040.o
-obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
obj-$(CONFIG_GPIO_UNIPHIER) += gpio-uniphier.o
obj-$(CONFIG_GPIO_VF610) += gpio-vf610.o
obj-$(CONFIG_GPIO_VIPERBOARD) += gpio-viperboard.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index 76560744587a..68ada1066941 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -61,8 +61,8 @@ Work items:
- Get rid of struct of_mm_gpio_chip altogether: use the generic MMIO
GPIO for all current users (see below). Delete struct of_mm_gpio_chip,
- to_of_mm_gpio_chip(), of_mm_gpiochip_add_data(), of_mm_gpiochip_add()
- of_mm_gpiochip_remove() from the kernel.
+ to_of_mm_gpio_chip(), of_mm_gpiochip_add_data(), of_mm_gpiochip_remove()
+ from the kernel.
- Change all consumer drivers that #include <linux/of_gpio.h> to
#include <linux/gpio/consumer.h> and stop doing custom parsing of the
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index 7b8829c8e423..a3846faf3780 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -8,17 +8,14 @@
*/
#include <linux/bits.h>
#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/io.h>
+#include <linux/err.h>
#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/irqdesc.h>
+#include <linux/irq.h>
#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/spinlock.h>
+#include <linux/regmap.h>
#include <linux/types.h>
#include "gpio-i8255.h"
@@ -38,212 +35,101 @@ static unsigned int num_irq;
module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-DIO-48E interrupt line numbers");
+#define DIO48E_ENABLE_INTERRUPT 0xB
+#define DIO48E_DISABLE_INTERRUPT DIO48E_ENABLE_INTERRUPT
+#define DIO48E_CLEAR_INTERRUPT 0xF
+
#define DIO48E_NUM_PPI 2
-/**
- * struct dio48e_reg - device register structure
- * @ppi: Programmable Peripheral Interface groups
- * @enable_buffer: Enable/Disable Buffer groups
- * @unused1: Unused
- * @enable_interrupt: Write: Enable Interrupt
- * Read: Disable Interrupt
- * @unused2: Unused
- * @enable_counter: Write: Enable Counter/Timer Addressing
- * Read: Disable Counter/Timer Addressing
- * @unused3: Unused
- * @clear_interrupt: Clear Interrupt
- */
-struct dio48e_reg {
- struct i8255 ppi[DIO48E_NUM_PPI];
- u8 enable_buffer[DIO48E_NUM_PPI];
- u8 unused1;
- u8 enable_interrupt;
- u8 unused2;
- u8 enable_counter;
- u8 unused3;
- u8 clear_interrupt;
+static const struct regmap_range dio48e_wr_ranges[] = {
+ regmap_reg_range(0x0, 0x9), regmap_reg_range(0xB, 0xB),
+ regmap_reg_range(0xD, 0xD), regmap_reg_range(0xF, 0xF),
};
-
-/**
- * struct dio48e_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @ppi_state: PPI device states
- * @lock: synchronization lock to prevent I/O race conditions
- * @reg: I/O address offset for the device registers
- * @irq_mask: I/O bits affected by interrupts
- */
-struct dio48e_gpio {
- struct gpio_chip chip;
- struct i8255_state ppi_state[DIO48E_NUM_PPI];
- raw_spinlock_t lock;
- struct dio48e_reg __iomem *reg;
- unsigned char irq_mask;
+static const struct regmap_range dio48e_rd_ranges[] = {
+ regmap_reg_range(0x0, 0x2), regmap_reg_range(0x4, 0x6),
+ regmap_reg_range(0xB, 0xB), regmap_reg_range(0xD, 0xD),
+ regmap_reg_range(0xF, 0xF),
+};
+static const struct regmap_range dio48e_volatile_ranges[] = {
+ i8255_volatile_regmap_range(0x0), i8255_volatile_regmap_range(0x4),
+ regmap_reg_range(0xB, 0xB), regmap_reg_range(0xD, 0xD),
+ regmap_reg_range(0xF, 0xF),
+};
+static const struct regmap_range dio48e_precious_ranges[] = {
+ regmap_reg_range(0xB, 0xB), regmap_reg_range(0xD, 0xD),
+ regmap_reg_range(0xF, 0xF),
+};
+static const struct regmap_access_table dio48e_wr_table = {
+ .yes_ranges = dio48e_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dio48e_wr_ranges),
+};
+static const struct regmap_access_table dio48e_rd_table = {
+ .yes_ranges = dio48e_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dio48e_rd_ranges),
+};
+static const struct regmap_access_table dio48e_volatile_table = {
+ .yes_ranges = dio48e_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dio48e_volatile_ranges),
+};
+static const struct regmap_access_table dio48e_precious_table = {
+ .yes_ranges = dio48e_precious_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dio48e_precious_ranges),
+};
+static const struct regmap_config dio48e_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .io_port = true,
+ .max_register = 0xF,
+ .wr_table = &dio48e_wr_table,
+ .rd_table = &dio48e_rd_table,
+ .volatile_table = &dio48e_volatile_table,
+ .precious_table = &dio48e_precious_table,
+ .cache_type = REGCACHE_FLAT,
};
-static int dio48e_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- if (i8255_get_direction(dio48egpio->ppi_state, offset))
- return GPIO_LINE_DIRECTION_IN;
-
- return GPIO_LINE_DIRECTION_OUT;
-}
-
-static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- i8255_direction_input(dio48egpio->reg->ppi, dio48egpio->ppi_state,
- offset);
-
- return 0;
-}
-
-static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- i8255_direction_output(dio48egpio->reg->ppi, dio48egpio->ppi_state,
- offset, value);
-
- return 0;
-}
-
-static int dio48e_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- return i8255_get(dio48egpio->reg->ppi, offset);
-}
-
-static int dio48e_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- i8255_get_multiple(dio48egpio->reg->ppi, mask, bits, chip->ngpio);
-
- return 0;
-}
-
-static void dio48e_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- i8255_set(dio48egpio->reg->ppi, dio48egpio->ppi_state, offset, value);
-}
-
-static void dio48e_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
-
- i8255_set_multiple(dio48egpio->reg->ppi, dio48egpio->ppi_state, mask,
- bits, chip->ngpio);
-}
-
-static void dio48e_irq_ack(struct irq_data *data)
-{
-}
-
-static void dio48e_irq_mask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned long offset = irqd_to_hwirq(data);
- unsigned long flags;
-
- /* only bit 3 on each respective Port C supports interrupts */
- if (offset != 19 && offset != 43)
- return;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
-
- if (offset == 19)
- dio48egpio->irq_mask &= ~BIT(0);
- else
- dio48egpio->irq_mask &= ~BIT(1);
- gpiochip_disable_irq(chip, offset);
-
- if (!dio48egpio->irq_mask)
- /* disable interrupts */
- ioread8(&dio48egpio->reg->enable_interrupt);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
-}
-
-static void dio48e_irq_unmask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip);
- const unsigned long offset = irqd_to_hwirq(data);
- unsigned long flags;
-
- /* only bit 3 on each respective Port C supports interrupts */
- if (offset != 19 && offset != 43)
- return;
-
- raw_spin_lock_irqsave(&dio48egpio->lock, flags);
-
- if (!dio48egpio->irq_mask) {
- /* enable interrupts */
- iowrite8(0x00, &dio48egpio->reg->clear_interrupt);
- iowrite8(0x00, &dio48egpio->reg->enable_interrupt);
+/* only bit 3 on each respective Port C supports interrupts */
+#define DIO48E_REGMAP_IRQ(_ppi) \
+ [19 + (_ppi) * 24] = { \
+ .mask = BIT(_ppi), \
+ .type = { .types_supported = IRQ_TYPE_EDGE_RISING }, \
}
- gpiochip_enable_irq(chip, offset);
- if (offset == 19)
- dio48egpio->irq_mask |= BIT(0);
- else
- dio48egpio->irq_mask |= BIT(1);
-
- raw_spin_unlock_irqrestore(&dio48egpio->lock, flags);
-}
-
-static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type)
-{
- const unsigned long offset = irqd_to_hwirq(data);
-
- /* only bit 3 on each respective Port C supports interrupts */
- if (offset != 19 && offset != 43)
- return -EINVAL;
-
- if (flow_type != IRQ_TYPE_NONE && flow_type != IRQ_TYPE_EDGE_RISING)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct irq_chip dio48e_irqchip = {
- .name = "104-dio-48e",
- .irq_ack = dio48e_irq_ack,
- .irq_mask = dio48e_irq_mask,
- .irq_unmask = dio48e_irq_unmask,
- .irq_set_type = dio48e_irq_set_type,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
+static const struct regmap_irq dio48e_regmap_irqs[] = {
+ DIO48E_REGMAP_IRQ(0), DIO48E_REGMAP_IRQ(1),
};
-static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
+static int dio48e_handle_mask_sync(struct regmap *const map, const int index,
+ const unsigned int mask_buf_def,
+ const unsigned int mask_buf,
+ void *const irq_drv_data)
{
- struct dio48e_gpio *const dio48egpio = dev_id;
- struct gpio_chip *const chip = &dio48egpio->chip;
- const unsigned long irq_mask = dio48egpio->irq_mask;
- unsigned long gpio;
+ unsigned int *const irq_mask = irq_drv_data;
+ const unsigned int prev_mask = *irq_mask;
+ const unsigned int all_masked = GENMASK(1, 0);
+ int err;
+ unsigned int val;
- for_each_set_bit(gpio, &irq_mask, 2)
- generic_handle_domain_irq(chip->irq.domain,
- 19 + gpio*24);
+ /* exit early if no change since the previous mask */
+ if (mask_buf == prev_mask)
+ return 0;
- raw_spin_lock(&dio48egpio->lock);
+ /* remember the current mask for the next mask sync */
+ *irq_mask = mask_buf;
- iowrite8(0x00, &dio48egpio->reg->clear_interrupt);
+ /* if all previously masked, enable interrupts when unmasking */
+ if (prev_mask == all_masked) {
+ err = regmap_write(map, DIO48E_CLEAR_INTERRUPT, 0x00);
+ if (err)
+ return err;
+ return regmap_write(map, DIO48E_ENABLE_INTERRUPT, 0x00);
+ }
- raw_spin_unlock(&dio48egpio->lock);
+ /* if all are currently masked, disable interrupts */
+ if (mask_buf == all_masked)
+ return regmap_read(map, DIO48E_DISABLE_INTERRUPT, &val);
- return IRQ_HANDLED;
+ return 0;
}
#define DIO48E_NGPIO 48
@@ -266,41 +152,24 @@ static const char *dio48e_names[DIO48E_NGPIO] = {
"PPI Group 1 Port C 5", "PPI Group 1 Port C 6", "PPI Group 1 Port C 7"
};
-static int dio48e_irq_init_hw(struct gpio_chip *gc)
+static int dio48e_irq_init_hw(struct regmap *const map)
{
- struct dio48e_gpio *const dio48egpio = gpiochip_get_data(gc);
+ unsigned int val;
/* Disable IRQ by default */
- ioread8(&dio48egpio->reg->enable_interrupt);
-
- return 0;
-}
-
-static void dio48e_init_ppi(struct i8255 __iomem *const ppi,
- struct i8255_state *const ppi_state)
-{
- const unsigned long ngpio = 24;
- const unsigned long mask = GENMASK(ngpio - 1, 0);
- const unsigned long bits = 0;
- unsigned long i;
-
- /* Initialize all GPIO to output 0 */
- for (i = 0; i < DIO48E_NUM_PPI; i++) {
- i8255_mode0_output(&ppi[i]);
- i8255_set_multiple(&ppi[i], &ppi_state[i], &mask, &bits, ngpio);
- }
+ return regmap_read(map, DIO48E_DISABLE_INTERRUPT, &val);
}
static int dio48e_probe(struct device *dev, unsigned int id)
{
- struct dio48e_gpio *dio48egpio;
const char *const name = dev_name(dev);
- struct gpio_irq_chip *girq;
+ struct i8255_regmap_config config = {};
+ void __iomem *regs;
+ struct regmap *map;
int err;
-
- dio48egpio = devm_kzalloc(dev, sizeof(*dio48egpio), GFP_KERNEL);
- if (!dio48egpio)
- return -ENOMEM;
+ struct regmap_irq_chip *chip;
+ unsigned int irq_mask;
+ struct regmap_irq_chip_data *chip_data;
if (!devm_request_region(dev, base[id], DIO48E_EXTENT, name)) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -308,53 +177,52 @@ static int dio48e_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- dio48egpio->reg = devm_ioport_map(dev, base[id], DIO48E_EXTENT);
- if (!dio48egpio->reg)
+ regs = devm_ioport_map(dev, base[id], DIO48E_EXTENT);
+ if (!regs)
return -ENOMEM;
- dio48egpio->chip.label = name;
- dio48egpio->chip.parent = dev;
- dio48egpio->chip.owner = THIS_MODULE;
- dio48egpio->chip.base = -1;
- dio48egpio->chip.ngpio = DIO48E_NGPIO;
- dio48egpio->chip.names = dio48e_names;
- dio48egpio->chip.get_direction = dio48e_gpio_get_direction;
- dio48egpio->chip.direction_input = dio48e_gpio_direction_input;
- dio48egpio->chip.direction_output = dio48e_gpio_direction_output;
- dio48egpio->chip.get = dio48e_gpio_get;
- dio48egpio->chip.get_multiple = dio48e_gpio_get_multiple;
- dio48egpio->chip.set = dio48e_gpio_set;
- dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple;
-
- girq = &dio48egpio->chip.irq;
- gpio_irq_chip_set_chip(girq, &dio48e_irqchip);
- /* This will let us handle the parent IRQ in the driver */
- girq->parent_handler = NULL;
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
- girq->init_hw = dio48e_irq_init_hw;
-
- raw_spin_lock_init(&dio48egpio->lock);
-
- i8255_state_init(dio48egpio->ppi_state, DIO48E_NUM_PPI);
- dio48e_init_ppi(dio48egpio->reg->ppi, dio48egpio->ppi_state);
-
- err = devm_gpiochip_add_data(dev, &dio48egpio->chip, dio48egpio);
- if (err) {
- dev_err(dev, "GPIO registering failed (%d)\n", err);
- return err;
- }
+ map = devm_regmap_init_mmio(dev, regs, &dio48e_regmap_config);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map),
+ "Unable to initialize register map\n");
- err = devm_request_irq(dev, irq[id], dio48e_irq_handler, 0, name,
- dio48egpio);
- if (err) {
- dev_err(dev, "IRQ handler registering failed (%d)\n", err);
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->irq_drv_data = devm_kzalloc(dev, sizeof(irq_mask), GFP_KERNEL);
+ if (!chip->irq_drv_data)
+ return -ENOMEM;
+
+ chip->name = name;
+ /* No IRQ status register so use CLEAR_INTERRUPT register instead */
+ chip->status_base = DIO48E_CLEAR_INTERRUPT;
+ chip->mask_base = DIO48E_ENABLE_INTERRUPT;
+ chip->ack_base = DIO48E_CLEAR_INTERRUPT;
+ /* CLEAR_INTERRUPT doubles as status register so we need it cleared */
+ chip->clear_ack = true;
+ chip->status_invert = true;
+ chip->num_regs = 1;
+ chip->irqs = dio48e_regmap_irqs;
+ chip->num_irqs = ARRAY_SIZE(dio48e_regmap_irqs);
+ chip->handle_mask_sync = dio48e_handle_mask_sync;
+
+ /* Initialize to prevent spurious interrupts before we're ready */
+ err = dio48e_irq_init_hw(map);
+ if (err)
return err;
- }
- return 0;
+ err = devm_regmap_add_irq_chip(dev, map, irq[id], 0, 0, chip, &chip_data);
+ if (err)
+ return dev_err_probe(dev, err, "IRQ registration failed\n");
+
+ config.parent = dev;
+ config.map = map;
+ config.num_ppi = DIO48E_NUM_PPI;
+ config.names = dio48e_names;
+ config.domain = regmap_irq_get_domain(chip_data);
+
+ return devm_i8255_regmap_register(dev, &config);
}
static struct isa_driver dio48e_driver = {
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index c5e231fde1af..ca2175b84e24 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -8,23 +8,18 @@
*/
#include <linux/bits.h>
#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
+#include <linux/err.h>
+#include <linux/gpio/regmap.h>
#include <linux/interrupt.h>
-#include <linux/irqdesc.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/spinlock.h>
+#include <linux/regmap.h>
#include <linux/types.h>
-#include "gpio-i8255.h"
-
-MODULE_IMPORT_NS(I8255);
-
#define IDI_48_EXTENT 8
#define MAX_NUM_IDI_48 max_num_isa_dev(IDI_48_EXTENT)
@@ -38,185 +33,83 @@ static unsigned int num_irq;
module_param_hw_array(irq, uint, irq, &num_irq, 0);
MODULE_PARM_DESC(irq, "ACCES 104-IDI-48 interrupt line numbers");
-/**
- * struct idi_48_reg - device register structure
- * @port0: Port 0 Inputs
- * @unused: Unused
- * @port1: Port 1 Inputs
- * @irq: Read: IRQ Status Register/IRQ Clear
- * Write: IRQ Enable/Disable
- */
-struct idi_48_reg {
- u8 port0[3];
- u8 unused;
- u8 port1[3];
- u8 irq;
-};
+#define IDI48_IRQ_STATUS 0x7
+#define IDI48_IRQ_ENABLE IDI48_IRQ_STATUS
-/**
- * struct idi_48_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @lock: synchronization lock to prevent I/O race conditions
- * @irq_mask: input bits affected by interrupts
- * @reg: I/O address offset for the device registers
- * @cos_enb: Change-Of-State IRQ enable boundaries mask
- */
-struct idi_48_gpio {
- struct gpio_chip chip;
- spinlock_t lock;
- unsigned char irq_mask[6];
- struct idi_48_reg __iomem *reg;
- unsigned char cos_enb;
-};
-
-static int idi_48_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+static int idi_48_reg_mask_xlate(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask)
{
- return GPIO_LINE_DIRECTION_IN;
-}
+ const unsigned int line = offset % 8;
+ const unsigned int stride = offset / 8;
+ const unsigned int port = (stride / 3) * 4;
+ const unsigned int port_stride = stride % 3;
-static int idi_48_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
-{
- return 0;
-}
-
-static int idi_48_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- void __iomem *const ppi = idi48gpio->reg;
-
- return i8255_get(ppi, offset);
-}
-
-static int idi_48_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
-{
- struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- void __iomem *const ppi = idi48gpio->reg;
-
- i8255_get_multiple(ppi, mask, bits, chip->ngpio);
+ *reg = base + port + port_stride;
+ *mask = BIT(line);
return 0;
}
-static void idi_48_irq_ack(struct irq_data *data)
-{
-}
-
-static void idi_48_irq_mask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- const unsigned int offset = irqd_to_hwirq(data);
- const unsigned long boundary = offset / 8;
- const unsigned long mask = BIT(offset % 8);
- unsigned long flags;
-
- spin_lock_irqsave(&idi48gpio->lock, flags);
-
- idi48gpio->irq_mask[boundary] &= ~mask;
- gpiochip_disable_irq(chip, offset);
-
- /* Exit early if there are still input lines with IRQ unmasked */
- if (idi48gpio->irq_mask[boundary])
- goto exit;
-
- idi48gpio->cos_enb &= ~BIT(boundary);
-
- iowrite8(idi48gpio->cos_enb, &idi48gpio->reg->irq);
-
-exit:
- spin_unlock_irqrestore(&idi48gpio->lock, flags);
-}
-
-static void idi_48_irq_unmask(struct irq_data *data)
-{
- struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
- struct idi_48_gpio *const idi48gpio = gpiochip_get_data(chip);
- const unsigned int offset = irqd_to_hwirq(data);
- const unsigned long boundary = offset / 8;
- const unsigned long mask = BIT(offset % 8);
- unsigned int prev_irq_mask;
- unsigned long flags;
-
- spin_lock_irqsave(&idi48gpio->lock, flags);
-
- prev_irq_mask = idi48gpio->irq_mask[boundary];
-
- gpiochip_enable_irq(chip, offset);
- idi48gpio->irq_mask[boundary] |= mask;
-
- /* Exit early if IRQ was already unmasked for this boundary */
- if (prev_irq_mask)
- goto exit;
-
- idi48gpio->cos_enb |= BIT(boundary);
-
- iowrite8(idi48gpio->cos_enb, &idi48gpio->reg->irq);
-
-exit:
- spin_unlock_irqrestore(&idi48gpio->lock, flags);
-}
-
-static int idi_48_irq_set_type(struct irq_data *data, unsigned int flow_type)
-{
- /* The only valid irq types are none and both-edges */
- if (flow_type != IRQ_TYPE_NONE &&
- (flow_type & IRQ_TYPE_EDGE_BOTH) != IRQ_TYPE_EDGE_BOTH)
- return -EINVAL;
-
- return 0;
-}
-
-static const struct irq_chip idi_48_irqchip = {
- .name = "104-idi-48",
- .irq_ack = idi_48_irq_ack,
- .irq_mask = idi_48_irq_mask,
- .irq_unmask = idi_48_irq_unmask,
- .irq_set_type = idi_48_irq_set_type,
- .flags = IRQCHIP_IMMUTABLE,
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
+static const struct regmap_range idi_48_wr_ranges[] = {
+ regmap_reg_range(0x0, 0x6),
+};
+static const struct regmap_range idi_48_rd_ranges[] = {
+ regmap_reg_range(0x0, 0x2), regmap_reg_range(0x4, 0x7),
+};
+static const struct regmap_range idi_48_precious_ranges[] = {
+ regmap_reg_range(0x7, 0x7),
+};
+static const struct regmap_access_table idi_48_wr_table = {
+ .no_ranges = idi_48_wr_ranges,
+ .n_no_ranges = ARRAY_SIZE(idi_48_wr_ranges),
+};
+static const struct regmap_access_table idi_48_rd_table = {
+ .yes_ranges = idi_48_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idi_48_rd_ranges),
+};
+static const struct regmap_access_table idi_48_precious_table = {
+ .yes_ranges = idi_48_precious_ranges,
+ .n_yes_ranges = ARRAY_SIZE(idi_48_precious_ranges),
+};
+static const struct regmap_config idi48_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .io_port = true,
+ .max_register = 0x6,
+ .wr_table = &idi_48_wr_table,
+ .rd_table = &idi_48_rd_table,
+ .precious_table = &idi_48_precious_table,
};
-static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
-{
- struct idi_48_gpio *const idi48gpio = dev_id;
- unsigned long cos_status;
- unsigned long boundary;
- unsigned long irq_mask;
- unsigned long bit_num;
- unsigned long gpio;
- struct gpio_chip *const chip = &idi48gpio->chip;
-
- spin_lock(&idi48gpio->lock);
-
- cos_status = ioread8(&idi48gpio->reg->irq);
-
- /* IRQ Status (bit 6) is active low (0 = IRQ generated by device) */
- if (cos_status & BIT(6)) {
- spin_unlock(&idi48gpio->lock);
- return IRQ_NONE;
- }
-
- /* Bit 0-5 indicate which Change-Of-State boundary triggered the IRQ */
- cos_status &= 0x3F;
-
- for_each_set_bit(boundary, &cos_status, 6) {
- irq_mask = idi48gpio->irq_mask[boundary];
-
- for_each_set_bit(bit_num, &irq_mask, 8) {
- gpio = bit_num + boundary * 8;
+#define IDI48_NGPIO 48
- generic_handle_domain_irq(chip->irq.domain,
- gpio);
- }
+#define IDI48_REGMAP_IRQ(_id) \
+ [_id] = { \
+ .mask = BIT((_id) / 8), \
+ .type = { .types_supported = IRQ_TYPE_EDGE_BOTH }, \
}
- spin_unlock(&idi48gpio->lock);
-
- return IRQ_HANDLED;
-}
+static const struct regmap_irq idi48_regmap_irqs[IDI48_NGPIO] = {
+ IDI48_REGMAP_IRQ(0), IDI48_REGMAP_IRQ(1), IDI48_REGMAP_IRQ(2), /* 0-2 */
+ IDI48_REGMAP_IRQ(3), IDI48_REGMAP_IRQ(4), IDI48_REGMAP_IRQ(5), /* 3-5 */
+ IDI48_REGMAP_IRQ(6), IDI48_REGMAP_IRQ(7), IDI48_REGMAP_IRQ(8), /* 6-8 */
+ IDI48_REGMAP_IRQ(9), IDI48_REGMAP_IRQ(10), IDI48_REGMAP_IRQ(11), /* 9-11 */
+ IDI48_REGMAP_IRQ(12), IDI48_REGMAP_IRQ(13), IDI48_REGMAP_IRQ(14), /* 12-14 */
+ IDI48_REGMAP_IRQ(15), IDI48_REGMAP_IRQ(16), IDI48_REGMAP_IRQ(17), /* 15-17 */
+ IDI48_REGMAP_IRQ(18), IDI48_REGMAP_IRQ(19), IDI48_REGMAP_IRQ(20), /* 18-20 */
+ IDI48_REGMAP_IRQ(21), IDI48_REGMAP_IRQ(22), IDI48_REGMAP_IRQ(23), /* 21-23 */
+ IDI48_REGMAP_IRQ(24), IDI48_REGMAP_IRQ(25), IDI48_REGMAP_IRQ(26), /* 24-26 */
+ IDI48_REGMAP_IRQ(27), IDI48_REGMAP_IRQ(28), IDI48_REGMAP_IRQ(29), /* 27-29 */
+ IDI48_REGMAP_IRQ(30), IDI48_REGMAP_IRQ(31), IDI48_REGMAP_IRQ(32), /* 30-32 */
+ IDI48_REGMAP_IRQ(33), IDI48_REGMAP_IRQ(34), IDI48_REGMAP_IRQ(35), /* 33-35 */
+ IDI48_REGMAP_IRQ(36), IDI48_REGMAP_IRQ(37), IDI48_REGMAP_IRQ(38), /* 36-38 */
+ IDI48_REGMAP_IRQ(39), IDI48_REGMAP_IRQ(40), IDI48_REGMAP_IRQ(41), /* 39-41 */
+ IDI48_REGMAP_IRQ(42), IDI48_REGMAP_IRQ(43), IDI48_REGMAP_IRQ(44), /* 42-44 */
+ IDI48_REGMAP_IRQ(45), IDI48_REGMAP_IRQ(46), IDI48_REGMAP_IRQ(47), /* 45-47 */
+};
-#define IDI48_NGPIO 48
static const char *idi48_names[IDI48_NGPIO] = {
"Bit 0 A", "Bit 1 A", "Bit 2 A", "Bit 3 A", "Bit 4 A", "Bit 5 A",
"Bit 6 A", "Bit 7 A", "Bit 8 A", "Bit 9 A", "Bit 10 A", "Bit 11 A",
@@ -228,75 +121,58 @@ static const char *idi48_names[IDI48_NGPIO] = {
"Bit 18 B", "Bit 19 B", "Bit 20 B", "Bit 21 B", "Bit 22 B", "Bit 23 B"
};
-static int idi_48_irq_init_hw(struct gpio_chip *gc)
-{
- struct idi_48_gpio *const idi48gpio = gpiochip_get_data(gc);
-
- /* Disable IRQ by default */
- iowrite8(0, &idi48gpio->reg->irq);
- ioread8(&idi48gpio->reg->irq);
-
- return 0;
-}
-
static int idi_48_probe(struct device *dev, unsigned int id)
{
- struct idi_48_gpio *idi48gpio;
const char *const name = dev_name(dev);
- struct gpio_irq_chip *girq;
+ struct gpio_regmap_config config = {};
+ void __iomem *regs;
+ struct regmap *map;
+ struct regmap_irq_chip *chip;
+ struct regmap_irq_chip_data *chip_data;
int err;
- idi48gpio = devm_kzalloc(dev, sizeof(*idi48gpio), GFP_KERNEL);
- if (!idi48gpio)
- return -ENOMEM;
-
if (!devm_request_region(dev, base[id], IDI_48_EXTENT, name)) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
base[id], base[id] + IDI_48_EXTENT);
return -EBUSY;
}
- idi48gpio->reg = devm_ioport_map(dev, base[id], IDI_48_EXTENT);
- if (!idi48gpio->reg)
+ regs = devm_ioport_map(dev, base[id], IDI_48_EXTENT);
+ if (!regs)
return -ENOMEM;
- idi48gpio->chip.label = name;
- idi48gpio->chip.parent = dev;
- idi48gpio->chip.owner = THIS_MODULE;
- idi48gpio->chip.base = -1;
- idi48gpio->chip.ngpio = IDI48_NGPIO;
- idi48gpio->chip.names = idi48_names;
- idi48gpio->chip.get_direction = idi_48_gpio_get_direction;
- idi48gpio->chip.direction_input = idi_48_gpio_direction_input;
- idi48gpio->chip.get = idi_48_gpio_get;
- idi48gpio->chip.get_multiple = idi_48_gpio_get_multiple;
-
- girq = &idi48gpio->chip.irq;
- gpio_irq_chip_set_chip(girq, &idi_48_irqchip);
- /* This will let us handle the parent IRQ in the driver */
- girq->parent_handler = NULL;
- girq->num_parents = 0;
- girq->parents = NULL;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_edge_irq;
- girq->init_hw = idi_48_irq_init_hw;
-
- spin_lock_init(&idi48gpio->lock);
+ map = devm_regmap_init_mmio(dev, regs, &idi48_regmap_config);
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map),
+ "Unable to initialize register map\n");
- err = devm_gpiochip_add_data(dev, &idi48gpio->chip, idi48gpio);
- if (err) {
- dev_err(dev, "GPIO registering failed (%d)\n", err);
- return err;
- }
-
- err = devm_request_irq(dev, irq[id], idi_48_irq_handler, IRQF_SHARED,
- name, idi48gpio);
- if (err) {
- dev_err(dev, "IRQ handler registering failed (%d)\n", err);
- return err;
- }
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
- return 0;
+ chip->name = name;
+ chip->status_base = IDI48_IRQ_STATUS;
+ chip->unmask_base = IDI48_IRQ_ENABLE;
+ chip->clear_on_unmask = true;
+ chip->num_regs = 1;
+ chip->irqs = idi48_regmap_irqs;
+ chip->num_irqs = ARRAY_SIZE(idi48_regmap_irqs);
+
+ err = devm_regmap_add_irq_chip(dev, map, irq[id], IRQF_SHARED, 0, chip,
+ &chip_data);
+ if (err)
+ return dev_err_probe(dev, err, "IRQ registration failed\n");
+
+ config.parent = dev;
+ config.regmap = map;
+ config.ngpio = IDI48_NGPIO;
+ config.names = idi48_names;
+ config.reg_dat_base = GPIO_REGMAP_ADDR(0x0);
+ config.ngpio_per_reg = 8;
+ config.reg_mask_xlate = idi_48_reg_mask_xlate;
+ config.irq_domain = regmap_irq_get_domain(chip_data);
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &config));
}
static struct isa_driver idi_48_driver = {
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index fa51a91afa54..26b1f7465e09 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -252,7 +252,6 @@ static int davinci_gpio_probe(struct platform_device *pdev)
chips->chip.base = pdata->no_auto_base ? pdata->base : -1;
#ifdef CONFIG_OF_GPIO
- chips->chip.of_gpio_n_cells = 2;
chips->chip.parent = dev;
chips->chip.request = gpiochip_generic_request;
chips->chip.free = gpiochip_generic_free;
@@ -534,7 +533,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
}
/*
- * Arrange gpio_to_irq() support, handling either direct IRQs or
+ * Arrange gpiod_to_irq() support, handling either direct IRQs or
* banked IRQs. Having GPIOs in the first GPIO bank use direct
* IRQs, while the others use banked IRQs, would need some setup
* tweaks to recognize hardware which can do that.
diff --git a/drivers/gpio/gpio-ge.c b/drivers/gpio/gpio-ge.c
index f6a3de99f7db..7bd4c2a4cc11 100644
--- a/drivers/gpio/gpio-ge.c
+++ b/drivers/gpio/gpio-ge.c
@@ -81,7 +81,6 @@ static int __init gef_gpio_probe(struct platform_device *pdev)
gc->base = -1;
gc->ngpio = (u16)(uintptr_t)of_device_get_match_data(&pdev->dev);
- gc->of_gpio_n_cells = 2;
/* This function adds a memory mapped GPIO chip */
ret = devm_gpiochip_add_data(&pdev->dev, gc, NULL);
diff --git a/drivers/gpio/gpio-gpio-mm.c b/drivers/gpio/gpio-gpio-mm.c
index 2689671b6b01..43d823a56e59 100644
--- a/drivers/gpio/gpio-gpio-mm.c
+++ b/drivers/gpio/gpio-gpio-mm.c
@@ -8,13 +8,13 @@
*/
#include <linux/device.h>
#include <linux/errno.h>
-#include <linux/gpio/driver.h>
-#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/isa.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
#include "gpio-i8255.h"
@@ -30,83 +30,22 @@ MODULE_PARM_DESC(base, "Diamond Systems GPIO-MM base addresses");
#define GPIOMM_NUM_PPI 2
-/**
- * struct gpiomm_gpio - GPIO device private data structure
- * @chip: instance of the gpio_chip
- * @ppi_state: Programmable Peripheral Interface group states
- * @ppi: Programmable Peripheral Interface groups
- */
-struct gpiomm_gpio {
- struct gpio_chip chip;
- struct i8255_state ppi_state[GPIOMM_NUM_PPI];
- struct i8255 __iomem *ppi;
+static const struct regmap_range gpiomm_volatile_ranges[] = {
+ i8255_volatile_regmap_range(0x0), i8255_volatile_regmap_range(0x4),
+};
+static const struct regmap_access_table gpiomm_volatile_table = {
+ .yes_ranges = gpiomm_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(gpiomm_volatile_ranges),
+};
+static const struct regmap_config gpiomm_regmap_config = {
+ .reg_bits = 8,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .io_port = true,
+ .max_register = 0x7,
+ .volatile_table = &gpiomm_volatile_table,
+ .cache_type = REGCACHE_FLAT,
};
-
-static int gpiomm_gpio_get_direction(struct gpio_chip *chip,
- unsigned int offset)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- if (i8255_get_direction(gpiommgpio->ppi_state, offset))
- return GPIO_LINE_DIRECTION_IN;
-
- return GPIO_LINE_DIRECTION_OUT;
-}
-
-static int gpiomm_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- i8255_direction_input(gpiommgpio->ppi, gpiommgpio->ppi_state, offset);
-
- return 0;
-}
-
-static int gpiomm_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- i8255_direction_output(gpiommgpio->ppi, gpiommgpio->ppi_state, offset,
- value);
-
- return 0;
-}
-
-static int gpiomm_gpio_get(struct gpio_chip *chip, unsigned int offset)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- return i8255_get(gpiommgpio->ppi, offset);
-}
-
-static int gpiomm_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask,
- unsigned long *bits)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- i8255_get_multiple(gpiommgpio->ppi, mask, bits, chip->ngpio);
-
- return 0;
-}
-
-static void gpiomm_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- i8255_set(gpiommgpio->ppi, gpiommgpio->ppi_state, offset, value);
-}
-
-static void gpiomm_gpio_set_multiple(struct gpio_chip *chip,
- unsigned long *mask, unsigned long *bits)
-{
- struct gpiomm_gpio *const gpiommgpio = gpiochip_get_data(chip);
-
- i8255_set_multiple(gpiommgpio->ppi, gpiommgpio->ppi_state, mask, bits,
- chip->ngpio);
-}
#define GPIOMM_NGPIO 48
static const char *gpiomm_names[GPIOMM_NGPIO] = {
@@ -120,30 +59,11 @@ static const char *gpiomm_names[GPIOMM_NGPIO] = {
"Port 2C2", "Port 2C3", "Port 2C4", "Port 2C5", "Port 2C6", "Port 2C7",
};
-static void gpiomm_init_dio(struct i8255 __iomem *const ppi,
- struct i8255_state *const ppi_state)
-{
- const unsigned long ngpio = 24;
- const unsigned long mask = GENMASK(ngpio - 1, 0);
- const unsigned long bits = 0;
- unsigned long i;
-
- /* Initialize all GPIO to output 0 */
- for (i = 0; i < GPIOMM_NUM_PPI; i++) {
- i8255_mode0_output(&ppi[i]);
- i8255_set_multiple(&ppi[i], &ppi_state[i], &mask, &bits, ngpio);
- }
-}
-
static int gpiomm_probe(struct device *dev, unsigned int id)
{
- struct gpiomm_gpio *gpiommgpio;
const char *const name = dev_name(dev);
- int err;
-
- gpiommgpio = devm_kzalloc(dev, sizeof(*gpiommgpio), GFP_KERNEL);
- if (!gpiommgpio)
- return -ENOMEM;
+ struct i8255_regmap_config config = {};
+ void __iomem *regs;
if (!devm_request_region(dev, base[id], GPIOMM_EXTENT, name)) {
dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n",
@@ -151,34 +71,20 @@ static int gpiomm_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- gpiommgpio->ppi = devm_ioport_map(dev, base[id], GPIOMM_EXTENT);
- if (!gpiommgpio->ppi)
+ regs = devm_ioport_map(dev, base[id], GPIOMM_EXTENT);
+ if (!regs)
return -ENOMEM;
- gpiommgpio->chip.label = name;
- gpiommgpio->chip.parent = dev;
- gpiommgpio->chip.owner = THIS_MODULE;
- gpiommgpio->chip.base = -1;
- gpiommgpio->chip.ngpio = GPIOMM_NGPIO;
- gpiommgpio->chip.names = gpiomm_names;
- gpiommgpio->chip.get_direction = gpiomm_gpio_get_direction;
- gpiommgpio->chip.direction_input = gpiomm_gpio_direction_input;
- gpiommgpio->chip.direction_output = gpiomm_gpio_direction_output;
- gpiommgpio->chip.get = gpiomm_gpio_get;
- gpiommgpio->chip.get_multiple = gpiomm_gpio_get_multiple;
- gpiommgpio->chip.set = gpiomm_gpio_set;
- gpiommgpio->chip.set_multiple = gpiomm_gpio_set_multiple;
-
- i8255_state_init(gpiommgpio->ppi_state, GPIOMM_NUM_PPI);
- gpiomm_init_dio(gpiommgpio->ppi, gpiommgpio->ppi_state);
-
- err = devm_gpiochip_add_data(dev, &gpiommgpio->chip, gpiommgpio);
- if (err) {
- dev_err(dev, "GPIO registering failed (%d)\n", err);
- return err;
- }
+ config.map = devm_regmap_init_mmio(dev, regs, &gpiomm_regmap_config);
+ if (IS_ERR(config.map))
+ return dev_err_probe(dev, PTR_ERR(config.map),
+ "Unable to initialize register map\n");
+
+ config.parent = dev;
+ config.num_ppi = GPIOMM_NUM_PPI;
+ config.names = gpiomm_names;
- return 0;
+ return devm_i8255_regmap_register(dev, &config);
}
static struct isa_driver gpiomm_driver = {
diff --git a/drivers/gpio/gpio-i8255.c b/drivers/gpio/gpio-i8255.c
index 9b97db418df1..64ab80fc4a1e 100644
--- a/drivers/gpio/gpio-i8255.c
+++ b/drivers/gpio/gpio-i8255.c
@@ -3,48 +3,43 @@
* Intel 8255 Programmable Peripheral Interface
* Copyright (C) 2022 William Breathitt Gray
*/
-#include <linux/bitmap.h>
+#include <linux/bits.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
-#include <linux/io.h>
+#include <linux/gpio/regmap.h>
#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
+#include <linux/regmap.h>
#include "gpio-i8255.h"
+#define I8255_NGPIO 24
+#define I8255_NGPIO_PER_REG 8
#define I8255_CONTROL_PORTC_LOWER_DIRECTION BIT(0)
#define I8255_CONTROL_PORTB_DIRECTION BIT(1)
#define I8255_CONTROL_PORTC_UPPER_DIRECTION BIT(3)
#define I8255_CONTROL_PORTA_DIRECTION BIT(4)
#define I8255_CONTROL_MODE_SET BIT(7)
-#define I8255_PORTA 0
-#define I8255_PORTB 1
-#define I8255_PORTC 2
-
-static int i8255_get_port(struct i8255 __iomem *const ppi,
- const unsigned long io_port, const unsigned long mask)
-{
- const unsigned long bank = io_port / 3;
- const unsigned long ppi_port = io_port % 3;
-
- return ioread8(&ppi[bank].port[ppi_port]) & mask;
-}
-
-static u8 i8255_direction_mask(const unsigned long offset)
+#define I8255_PORTA 0x0
+#define I8255_PORTB 0x1
+#define I8255_PORTC 0x2
+#define I8255_CONTROL 0x3
+#define I8255_REG_DAT_BASE I8255_PORTA
+#define I8255_REG_DIR_IN_BASE I8255_CONTROL
+
+static int i8255_direction_mask(const unsigned int offset)
{
- const unsigned long port_offset = offset % 8;
- const unsigned long io_port = offset / 8;
- const unsigned long ppi_port = io_port % 3;
+ const unsigned int stride = offset / I8255_NGPIO_PER_REG;
+ const unsigned int line = offset % I8255_NGPIO_PER_REG;
- switch (ppi_port) {
+ switch (stride) {
case I8255_PORTA:
return I8255_CONTROL_PORTA_DIRECTION;
case I8255_PORTB:
return I8255_CONTROL_PORTB_DIRECTION;
case I8255_PORTC:
/* Port C can be configured by nibble */
- if (port_offset >= 4)
+ if (line >= 4)
return I8255_CONTROL_PORTC_UPPER_DIRECTION;
return I8255_CONTROL_PORTC_LOWER_DIRECTION;
default:
@@ -53,234 +48,93 @@ static u8 i8255_direction_mask(const unsigned long offset)
}
}
-static void i8255_set_port(struct i8255 __iomem *const ppi,
- struct i8255_state *const state,
- const unsigned long io_port,
- const unsigned long mask, const unsigned long bits)
+static int i8255_ppi_init(struct regmap *const map, const unsigned int base)
{
- const unsigned long bank = io_port / 3;
- const unsigned long ppi_port = io_port % 3;
- unsigned long flags;
- unsigned long out_state;
-
- spin_lock_irqsave(&state[bank].lock, flags);
-
- out_state = ioread8(&ppi[bank].port[ppi_port]);
- out_state = (out_state & ~mask) | (bits & mask);
- iowrite8(out_state, &ppi[bank].port[ppi_port]);
-
- spin_unlock_irqrestore(&state[bank].lock, flags);
+ int err;
+
+ /* Configure all ports to MODE 0 output mode */
+ err = regmap_write(map, base + I8255_CONTROL, I8255_CONTROL_MODE_SET);
+ if (err)
+ return err;
+
+ /* Initialize all GPIO to output 0 */
+ err = regmap_write(map, base + I8255_PORTA, 0x00);
+ if (err)
+ return err;
+ err = regmap_write(map, base + I8255_PORTB, 0x00);
+ if (err)
+ return err;
+ return regmap_write(map, base + I8255_PORTC, 0x00);
}
-/**
- * i8255_direction_input - configure signal offset as input
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @state: devices states of the respective PPI banks
- * @offset: signal offset to configure as input
- *
- * Configures a signal @offset as input for the respective Intel 8255
- * Programmable Peripheral Interface (@ppi) banks. The @state control_state
- * values are updated to reflect the new configuration.
- */
-void i8255_direction_input(struct i8255 __iomem *const ppi,
- struct i8255_state *const state,
- const unsigned long offset)
+static int i8255_reg_mask_xlate(struct gpio_regmap *gpio, unsigned int base,
+ unsigned int offset, unsigned int *reg,
+ unsigned int *mask)
{
- const unsigned long io_port = offset / 8;
- const unsigned long bank = io_port / 3;
- unsigned long flags;
-
- spin_lock_irqsave(&state[bank].lock, flags);
-
- state[bank].control_state |= I8255_CONTROL_MODE_SET;
- state[bank].control_state |= i8255_direction_mask(offset);
-
- iowrite8(state[bank].control_state, &ppi[bank].control);
-
- spin_unlock_irqrestore(&state[bank].lock, flags);
-}
-EXPORT_SYMBOL_NS_GPL(i8255_direction_input, I8255);
-
-/**
- * i8255_direction_output - configure signal offset as output
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @state: devices states of the respective PPI banks
- * @offset: signal offset to configure as output
- * @value: signal value to output
- *
- * Configures a signal @offset as output for the respective Intel 8255
- * Programmable Peripheral Interface (@ppi) banks and sets the respective signal
- * output to the desired @value. The @state control_state values are updated to
- * reflect the new configuration.
- */
-void i8255_direction_output(struct i8255 __iomem *const ppi,
- struct i8255_state *const state,
- const unsigned long offset,
- const unsigned long value)
-{
- const unsigned long io_port = offset / 8;
- const unsigned long bank = io_port / 3;
- unsigned long flags;
-
- spin_lock_irqsave(&state[bank].lock, flags);
-
- state[bank].control_state |= I8255_CONTROL_MODE_SET;
- state[bank].control_state &= ~i8255_direction_mask(offset);
-
- iowrite8(state[bank].control_state, &ppi[bank].control);
-
- spin_unlock_irqrestore(&state[bank].lock, flags);
-
- i8255_set(ppi, state, offset, value);
-}
-EXPORT_SYMBOL_NS_GPL(i8255_direction_output, I8255);
-
-/**
- * i8255_get - get signal value at signal offset
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @offset: offset of signal to get
- *
- * Returns the signal value (0=low, 1=high) for the signal at @offset for the
- * respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
- */
-int i8255_get(struct i8255 __iomem *const ppi, const unsigned long offset)
-{
- const unsigned long io_port = offset / 8;
- const unsigned long offset_mask = BIT(offset % 8);
-
- return !!i8255_get_port(ppi, io_port, offset_mask);
-}
-EXPORT_SYMBOL_NS_GPL(i8255_get, I8255);
-
-/**
- * i8255_get_direction - get the I/O direction for a signal offset
- * @state: devices states of the respective PPI banks
- * @offset: offset of signal to get direction
- *
- * Returns the signal direction (0=output, 1=input) for the signal at @offset.
- */
-int i8255_get_direction(const struct i8255_state *const state,
- const unsigned long offset)
-{
- const unsigned long io_port = offset / 8;
- const unsigned long bank = io_port / 3;
-
- return !!(state[bank].control_state & i8255_direction_mask(offset));
-}
-EXPORT_SYMBOL_NS_GPL(i8255_get_direction, I8255);
-
-/**
- * i8255_get_multiple - get multiple signal values at multiple signal offsets
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @mask: mask of signals to get
- * @bits: bitmap to store signal values
- * @ngpio: number of GPIO signals of the respective PPI banks
- *
- * Stores in @bits the values (0=low, 1=high) for the signals defined by @mask
- * for the respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
- */
-void i8255_get_multiple(struct i8255 __iomem *const ppi,
- const unsigned long *const mask,
- unsigned long *const bits, const unsigned long ngpio)
-{
- unsigned long offset;
- unsigned long port_mask;
- unsigned long io_port;
- unsigned long port_state;
-
- bitmap_zero(bits, ngpio);
-
- for_each_set_clump8(offset, port_mask, mask, ngpio) {
- io_port = offset / 8;
- port_state = i8255_get_port(ppi, io_port, port_mask);
-
- bitmap_set_value8(bits, port_state, offset);
+ const unsigned int ppi = offset / I8255_NGPIO;
+ const unsigned int ppi_offset = offset % I8255_NGPIO;
+ const unsigned int stride = ppi_offset / I8255_NGPIO_PER_REG;
+ const unsigned int line = ppi_offset % I8255_NGPIO_PER_REG;
+
+ switch (base) {
+ case I8255_REG_DAT_BASE:
+ *reg = base + stride + ppi * 4;
+ *mask = BIT(line);
+ return 0;
+ case I8255_REG_DIR_IN_BASE:
+ *reg = base + ppi * 4;
+ *mask = i8255_direction_mask(ppi_offset);
+ return 0;
+ default:
+ /* Should never reach this path */
+ return -EINVAL;
}
}
-EXPORT_SYMBOL_NS_GPL(i8255_get_multiple, I8255);
/**
- * i8255_mode0_output - configure all PPI ports to MODE 0 output mode
- * @ppi: Intel 8255 Programmable Peripheral Interface bank
+ * devm_i8255_regmap_register - Register an i8255 GPIO controller
+ * @dev: device that is registering this i8255 GPIO device
+ * @config: configuration for i8255_regmap_config
*
- * Configures all Intel 8255 Programmable Peripheral Interface (@ppi) ports to
- * MODE 0 (Basic Input/Output) output mode.
+ * Registers an Intel 8255 Programmable Peripheral Interface GPIO controller.
+ * Returns 0 on success and negative error number on failure.
*/
-void i8255_mode0_output(struct i8255 __iomem *const ppi)
+int devm_i8255_regmap_register(struct device *const dev,
+ const struct i8255_regmap_config *const config)
{
- iowrite8(I8255_CONTROL_MODE_SET, &ppi->control);
-}
-EXPORT_SYMBOL_NS_GPL(i8255_mode0_output, I8255);
+ struct gpio_regmap_config gpio_config = {0};
+ unsigned long i;
+ int err;
-/**
- * i8255_set - set signal value at signal offset
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @state: devices states of the respective PPI banks
- * @offset: offset of signal to set
- * @value: value of signal to set
- *
- * Assigns output @value for the signal at @offset for the respective Intel 8255
- * Programmable Peripheral Interface (@ppi) banks.
- */
-void i8255_set(struct i8255 __iomem *const ppi, struct i8255_state *const state,
- const unsigned long offset, const unsigned long value)
-{
- const unsigned long io_port = offset / 8;
- const unsigned long port_offset = offset % 8;
- const unsigned long mask = BIT(port_offset);
- const unsigned long bits = value << port_offset;
+ if (!config->parent)
+ return -EINVAL;
- i8255_set_port(ppi, state, io_port, mask, bits);
-}
-EXPORT_SYMBOL_NS_GPL(i8255_set, I8255);
+ if (!config->map)
+ return -EINVAL;
-/**
- * i8255_set_multiple - set signal values at multiple signal offsets
- * @ppi: Intel 8255 Programmable Peripheral Interface banks
- * @state: devices states of the respective PPI banks
- * @mask: mask of signals to set
- * @bits: bitmap of signal output values
- * @ngpio: number of GPIO signals of the respective PPI banks
- *
- * Assigns output values defined by @bits for the signals defined by @mask for
- * the respective Intel 8255 Programmable Peripheral Interface (@ppi) banks.
- */
-void i8255_set_multiple(struct i8255 __iomem *const ppi,
- struct i8255_state *const state,
- const unsigned long *const mask,
- const unsigned long *const bits,
- const unsigned long ngpio)
-{
- unsigned long offset;
- unsigned long port_mask;
- unsigned long io_port;
- unsigned long value;
+ if (!config->num_ppi)
+ return -EINVAL;
- for_each_set_clump8(offset, port_mask, mask, ngpio) {
- io_port = offset / 8;
- value = bitmap_get_value8(bits, offset);
- i8255_set_port(ppi, state, io_port, port_mask, value);
+ for (i = 0; i < config->num_ppi; i++) {
+ err = i8255_ppi_init(config->map, i * 4);
+ if (err)
+ return err;
}
-}
-EXPORT_SYMBOL_NS_GPL(i8255_set_multiple, I8255);
-
-/**
- * i8255_state_init - initialize i8255_state structure
- * @state: devices states of the respective PPI banks
- * @nbanks: number of Intel 8255 Programmable Peripheral Interface banks
- *
- * Initializes the @state of each Intel 8255 Programmable Peripheral Interface
- * bank for use in i8255 library functions.
- */
-void i8255_state_init(struct i8255_state *const state,
- const unsigned long nbanks)
-{
- unsigned long bank;
- for (bank = 0; bank < nbanks; bank++)
- spin_lock_init(&state[bank].lock);
+ gpio_config.parent = config->parent;
+ gpio_config.regmap = config->map;
+ gpio_config.ngpio = I8255_NGPIO * config->num_ppi;
+ gpio_config.names = config->names;
+ gpio_config.reg_dat_base = GPIO_REGMAP_ADDR(I8255_REG_DAT_BASE);
+ gpio_config.reg_set_base = GPIO_REGMAP_ADDR(I8255_REG_DAT_BASE);
+ gpio_config.reg_dir_in_base = GPIO_REGMAP_ADDR(I8255_REG_DIR_IN_BASE);
+ gpio_config.ngpio_per_reg = I8255_NGPIO_PER_REG;
+ gpio_config.irq_domain = config->domain;
+ gpio_config.reg_mask_xlate = i8255_reg_mask_xlate;
+
+ return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &gpio_config));
}
-EXPORT_SYMBOL_NS_GPL(i8255_state_init, I8255);
+EXPORT_SYMBOL_NS_GPL(devm_i8255_regmap_register, I8255);
MODULE_AUTHOR("William Breathitt Gray");
MODULE_DESCRIPTION("Intel 8255 Programmable Peripheral Interface");
diff --git a/drivers/gpio/gpio-i8255.h b/drivers/gpio/gpio-i8255.h
index d9084aae9446..9dcf639b94df 100644
--- a/drivers/gpio/gpio-i8255.h
+++ b/drivers/gpio/gpio-i8255.h
@@ -3,44 +3,32 @@
#ifndef _I8255_H_
#define _I8255_H_
-#include <linux/spinlock.h>
-#include <linux/types.h>
+struct device;
+struct irq_domain;
+struct regmap;
-/**
- * struct i8255 - Intel 8255 register structure
- * @port: Port A, B, and C
- * @control: Control register
- */
-struct i8255 {
- u8 port[3];
- u8 control;
-};
+#define i8255_volatile_regmap_range(_base) regmap_reg_range(_base, _base + 0x2)
/**
- * struct i8255_state - Intel 8255 state structure
- * @lock: synchronization lock for accessing device state
- * @control_state: Control register state
+ * struct i8255_regmap_config - Configuration for the register map of an i8255
+ * @parent: parent device
+ * @map: regmap for the i8255
+ * @num_ppi: number of i8255 Programmable Peripheral Interface
+ * @names: (optional) array of names for gpios
+ * @domain: (optional) IRQ domain if the controller is interrupt-capable
+ *
+ * Note: The regmap is expected to have cache enabled and i8255 control
+ * registers not marked as volatile.
*/
-struct i8255_state {
- spinlock_t lock;
- u8 control_state;
+struct i8255_regmap_config {
+ struct device *parent;
+ struct regmap *map;
+ int num_ppi;
+ const char *const *names;
+ struct irq_domain *domain;
};
-void i8255_direction_input(struct i8255 __iomem *ppi, struct i8255_state *state,
- unsigned long offset);
-void i8255_direction_output(struct i8255 __iomem *ppi,
- struct i8255_state *state, unsigned long offset,
- unsigned long value);
-int i8255_get(struct i8255 __iomem *ppi, unsigned long offset);
-int i8255_get_direction(const struct i8255_state *state, unsigned long offset);
-void i8255_get_multiple(struct i8255 __iomem *ppi, const unsigned long *mask,
- unsigned long *bits, unsigned long ngpio);
-void i8255_mode0_output(struct i8255 __iomem *const ppi);
-void i8255_set(struct i8255 __iomem *ppi, struct i8255_state *state,
- unsigned long offset, unsigned long value);
-void i8255_set_multiple(struct i8255 __iomem *ppi, struct i8255_state *state,
- const unsigned long *mask, const unsigned long *bits,
- unsigned long ngpio);
-void i8255_state_init(struct i8255_state *const state, unsigned long nbanks);
+int devm_i8255_regmap_register(struct device *dev,
+ const struct i8255_regmap_config *config);
#endif /* _I8255_H_ */
diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
deleted file mode 100644
index 7390b5ca09e3..000000000000
--- a/drivers/gpio/gpio-iop.c
+++ /dev/null
@@ -1,59 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * arch/arm/plat-iop/gpio.c
- * GPIO handling for Intel IOP3xx processors.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- */
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/gpio/driver.h>
-#include <linux/platform_device.h>
-
-#define IOP3XX_GPOE 0x0000
-#define IOP3XX_GPID 0x0004
-#define IOP3XX_GPOD 0x0008
-
-static int iop3xx_gpio_probe(struct platform_device *pdev)
-{
- struct gpio_chip *gc;
- void __iomem *base;
- int err;
-
- gc = devm_kzalloc(&pdev->dev, sizeof(*gc), GFP_KERNEL);
- if (!gc)
- return -ENOMEM;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- err = bgpio_init(gc, &pdev->dev, 1, base + IOP3XX_GPID,
- base + IOP3XX_GPOD, NULL, NULL, base + IOP3XX_GPOE, 0);
- if (err)
- return err;
-
- gc->base = 0;
- gc->owner = THIS_MODULE;
- gc->label = "gpio-iop";
-
- return devm_gpiochip_add_data(&pdev->dev, gc, NULL);
-}
-
-static struct platform_driver iop3xx_gpio_driver = {
- .driver = {
- .name = "gpio-iop",
- },
- .probe = iop3xx_gpio_probe,
-};
-
-static int __init iop3xx_gpio_init(void)
-{
- return platform_driver_register(&iop3xx_gpio_driver);
-}
-arch_initcall(iop3xx_gpio_init);
-
-MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
-MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
index 52d7b8d99170..b0773e5652fa 100644
--- a/drivers/gpio/gpio-msc313.c
+++ b/drivers/gpio/gpio-msc313.c
@@ -655,11 +655,6 @@ static int msc313_gpio_probe(struct platform_device *pdev)
return devm_gpiochip_add_data(dev, gpiochip, gpio);
}
-static int msc313_gpio_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id msc313_gpio_of_match[] = {
#ifdef CONFIG_MACH_INFINITY
{
@@ -710,6 +705,5 @@ static struct platform_driver msc313_gpio_driver = {
.pm = &msc313_gpio_ops,
},
.probe = msc313_gpio_probe,
- .remove = msc313_gpio_remove,
};
builtin_platform_driver(msc313_gpio_driver);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 91a4232ee58c..a68f682aec01 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -1002,7 +1002,7 @@ static int mvebu_gpio_suspend(struct platform_device *pdev, pm_message_t state)
BUG();
}
- if (IS_ENABLED(CONFIG_PWM))
+ if (IS_REACHABLE(CONFIG_PWM))
mvebu_pwm_suspend(mvchip);
return 0;
@@ -1054,7 +1054,7 @@ static int mvebu_gpio_resume(struct platform_device *pdev)
BUG();
}
- if (IS_ENABLED(CONFIG_PWM))
+ if (IS_REACHABLE(CONFIG_PWM))
mvebu_pwm_resume(mvchip);
return 0;
@@ -1228,7 +1228,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
/* Some MVEBU SoCs have simple PWM support for GPIO lines */
- if (IS_ENABLED(CONFIG_PWM)) {
+ if (IS_REACHABLE(CONFIG_PWM)) {
err = mvebu_pwm_probe(pdev, mvchip, id);
if (err)
return err;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 80ddc43fd875..f5f3d4b22452 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1020,7 +1020,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc,
if (!label)
return -ENOMEM;
bank->chip.label = label;
- bank->chip.base = gpio;
+ bank->chip.base = -1;
}
bank->chip.ngpio = bank->width;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 5299e5bb76d6..1286b22ef23a 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -306,34 +306,31 @@ static bool pca953x_check_register(struct pca953x_chip *chip, unsigned int reg,
static bool pcal6534_check_register(struct pca953x_chip *chip, unsigned int reg,
u32 checkbank)
{
+ int bank_shift;
int bank;
int offset;
- if (reg >= 0x30) {
- /*
- * Reserved block between 14h and 2Fh does not align on
- * expected bank boundaries like other devices.
- */
- int temp = reg - 0x30;
-
- bank = temp / NBANK(chip);
- offset = temp - (bank * NBANK(chip));
- bank += 8;
- } else if (reg >= 0x54) {
+ if (reg >= 0x54) {
/*
* Handle lack of reserved registers after output port
* configuration register to form a bank.
*/
- int temp = reg - 0x54;
-
- bank = temp / NBANK(chip);
- offset = temp - (bank * NBANK(chip));
- bank += 16;
+ reg -= 0x54;
+ bank_shift = 16;
+ } else if (reg >= 0x30) {
+ /*
+ * Reserved block between 14h and 2Fh does not align on
+ * expected bank boundaries like other devices.
+ */
+ reg -= 0x30;
+ bank_shift = 8;
} else {
- bank = reg / NBANK(chip);
- offset = reg - (bank * NBANK(chip));
+ bank_shift = 0;
}
+ bank = bank_shift + reg / NBANK(chip);
+ offset = reg % NBANK(chip);
+
/* Register is not in the matching bank. */
if (!(BIT(bank) & checkbank))
return false;
@@ -464,7 +461,6 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
case PCAL953X_PULL_SEL:
case PCAL953X_INT_MASK:
case PCAL953X_INT_STAT:
- case PCAL953X_OUT_CONF:
pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x20;
break;
case PCAL6524_INT_EDGE:
diff --git a/drivers/gpio/gpio-pca9570.c b/drivers/gpio/gpio-pca9570.c
index 6c07a8811a7a..6a5a8e593ed5 100644
--- a/drivers/gpio/gpio-pca9570.c
+++ b/drivers/gpio/gpio-pca9570.c
@@ -18,11 +18,11 @@
#define SLG7XL45106_GPO_REG 0xDB
/**
- * struct pca9570_platform_data - GPIO platformdata
+ * struct pca9570_chip_data - GPIO platformdata
* @ngpio: no of gpios
* @command: Command to be sent
*/
-struct pca9570_platform_data {
+struct pca9570_chip_data {
u16 ngpio;
u32 command;
};
@@ -36,7 +36,7 @@ struct pca9570_platform_data {
*/
struct pca9570 {
struct gpio_chip chip;
- const struct pca9570_platform_data *p_data;
+ const struct pca9570_chip_data *chip_data;
struct mutex lock;
u8 out;
};
@@ -46,8 +46,8 @@ static int pca9570_read(struct pca9570 *gpio, u8 *value)
struct i2c_client *client = to_i2c_client(gpio->chip.parent);
int ret;
- if (gpio->p_data->command != 0)
- ret = i2c_smbus_read_byte_data(client, gpio->p_data->command);
+ if (gpio->chip_data->command != 0)
+ ret = i2c_smbus_read_byte_data(client, gpio->chip_data->command);
else
ret = i2c_smbus_read_byte(client);
@@ -62,8 +62,8 @@ static int pca9570_write(struct pca9570 *gpio, u8 value)
{
struct i2c_client *client = to_i2c_client(gpio->chip.parent);
- if (gpio->p_data->command != 0)
- return i2c_smbus_write_byte_data(client, gpio->p_data->command, value);
+ if (gpio->chip_data->command != 0)
+ return i2c_smbus_write_byte_data(client, gpio->chip_data->command, value);
return i2c_smbus_write_byte(client, value);
}
@@ -127,8 +127,8 @@ static int pca9570_probe(struct i2c_client *client)
gpio->chip.get = pca9570_get;
gpio->chip.set = pca9570_set;
gpio->chip.base = -1;
- gpio->p_data = device_get_match_data(&client->dev);
- gpio->chip.ngpio = gpio->p_data->ngpio;
+ gpio->chip_data = device_get_match_data(&client->dev);
+ gpio->chip.ngpio = gpio->chip_data->ngpio;
gpio->chip.can_sleep = true;
mutex_init(&gpio->lock);
@@ -141,15 +141,15 @@ static int pca9570_probe(struct i2c_client *client)
return devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio);
}
-static const struct pca9570_platform_data pca9570_gpio = {
+static const struct pca9570_chip_data pca9570_gpio = {
.ngpio = 4,
};
-static const struct pca9570_platform_data pca9571_gpio = {
+static const struct pca9570_chip_data pca9571_gpio = {
.ngpio = 8,
};
-static const struct pca9570_platform_data slg7xl45106_gpio = {
+static const struct pca9570_chip_data slg7xl45106_gpio = {
.ngpio = 8,
.command = SLG7XL45106_GPO_REG,
};
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index cec2f2c78255..3de1d3ad7472 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -7,18 +7,16 @@
#include <linux/gpio/driver.h>
#include <linux/i2c.h>
-#include <linux/platform_data/pcf857x.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-
static const struct i2c_device_id pcf857x_id[] = {
{ "pcf8574", 8 },
{ "pcf8574a", 8 },
@@ -37,7 +35,6 @@ static const struct i2c_device_id pcf857x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pcf857x_id);
-#ifdef CONFIG_OF
static const struct of_device_id pcf857x_of_table[] = {
{ .compatible = "nxp,pcf8574" },
{ .compatible = "nxp,pcf8574a" },
@@ -55,7 +52,6 @@ static const struct of_device_id pcf857x_of_table[] = {
{ }
};
MODULE_DEVICE_TABLE(of, pcf857x_of_table);
-#endif
/*
* The pcf857x, pca857x, and pca967x chips only expose one read and one
@@ -73,11 +69,11 @@ struct pcf857x {
struct gpio_chip chip;
struct i2c_client *client;
struct mutex lock; /* protect 'out' */
- unsigned out; /* software latch */
- unsigned status; /* current status */
- unsigned irq_enabled; /* enabled irqs */
+ unsigned int out; /* software latch */
+ unsigned int status; /* current status */
+ unsigned int irq_enabled; /* enabled irqs */
- int (*write)(struct i2c_client *client, unsigned data);
+ int (*write)(struct i2c_client *client, unsigned int data);
int (*read)(struct i2c_client *client);
};
@@ -85,19 +81,19 @@ struct pcf857x {
/* Talk to 8-bit I/O expander */
-static int i2c_write_le8(struct i2c_client *client, unsigned data)
+static int i2c_write_le8(struct i2c_client *client, unsigned int data)
{
return i2c_smbus_write_byte(client, data);
}
static int i2c_read_le8(struct i2c_client *client)
{
- return (int)i2c_smbus_read_byte(client);
+ return i2c_smbus_read_byte(client);
}
/* Talk to 16-bit I/O expander */
-static int i2c_write_le16(struct i2c_client *client, unsigned word)
+static int i2c_write_le16(struct i2c_client *client, unsigned int word)
{
u8 buf[2] = { word & 0xff, word >> 8, };
int status;
@@ -119,10 +115,10 @@ static int i2c_read_le16(struct i2c_client *client)
/*-------------------------------------------------------------------------*/
-static int pcf857x_input(struct gpio_chip *chip, unsigned offset)
+static int pcf857x_input(struct gpio_chip *chip, unsigned int offset)
{
- struct pcf857x *gpio = gpiochip_get_data(chip);
- int status;
+ struct pcf857x *gpio = gpiochip_get_data(chip);
+ int status;
mutex_lock(&gpio->lock);
gpio->out |= (1 << offset);
@@ -132,20 +128,35 @@ static int pcf857x_input(struct gpio_chip *chip, unsigned offset)
return status;
}
-static int pcf857x_get(struct gpio_chip *chip, unsigned offset)
+static int pcf857x_get(struct gpio_chip *chip, unsigned int offset)
{
- struct pcf857x *gpio = gpiochip_get_data(chip);
- int value;
+ struct pcf857x *gpio = gpiochip_get_data(chip);
+ int value;
value = gpio->read(gpio->client);
return (value < 0) ? value : !!(value & (1 << offset));
}
-static int pcf857x_output(struct gpio_chip *chip, unsigned offset, int value)
+static int pcf857x_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
{
- struct pcf857x *gpio = gpiochip_get_data(chip);
- unsigned bit = 1 << offset;
- int status;
+ struct pcf857x *gpio = gpiochip_get_data(chip);
+ int value = gpio->read(gpio->client);
+
+ if (value < 0)
+ return value;
+
+ *bits &= ~*mask;
+ *bits |= value & *mask;
+
+ return 0;
+}
+
+static int pcf857x_output(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct pcf857x *gpio = gpiochip_get_data(chip);
+ unsigned int bit = 1 << offset;
+ int status;
mutex_lock(&gpio->lock);
if (value)
@@ -158,16 +169,28 @@ static int pcf857x_output(struct gpio_chip *chip, unsigned offset, int value)
return status;
}
-static void pcf857x_set(struct gpio_chip *chip, unsigned offset, int value)
+static void pcf857x_set(struct gpio_chip *chip, unsigned int offset, int value)
{
pcf857x_output(chip, offset, value);
}
+static void pcf857x_set_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct pcf857x *gpio = gpiochip_get_data(chip);
+
+ mutex_lock(&gpio->lock);
+ gpio->out &= ~*mask;
+ gpio->out |= *bits & *mask;
+ gpio->write(gpio->client, gpio->out);
+ mutex_unlock(&gpio->lock);
+}
+
/*-------------------------------------------------------------------------*/
static irqreturn_t pcf857x_irq(int irq, void *data)
{
- struct pcf857x *gpio = data;
+ struct pcf857x *gpio = data;
unsigned long change, i, status;
status = gpio->read(gpio->client);
@@ -250,18 +273,11 @@ static const struct irq_chip pcf857x_irq_chip = {
static int pcf857x_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
- struct pcf857x_platform_data *pdata = dev_get_platdata(&client->dev);
- struct device_node *np = client->dev.of_node;
- struct pcf857x *gpio;
- unsigned int n_latch = 0;
- int status;
-
- if (IS_ENABLED(CONFIG_OF) && np)
- of_property_read_u32(np, "lines-initial-states", &n_latch);
- else if (pdata)
- n_latch = pdata->n_latch;
- else
- dev_dbg(&client->dev, "no platform data\n");
+ struct pcf857x *gpio;
+ unsigned int n_latch = 0;
+ int status;
+
+ device_property_read_u32(&client->dev, "lines-initial-states", &n_latch);
/* Allocate, initialize, and register this gpio_chip. */
gpio = devm_kzalloc(&client->dev, sizeof(*gpio), GFP_KERNEL);
@@ -270,12 +286,14 @@ static int pcf857x_probe(struct i2c_client *client)
mutex_init(&gpio->lock);
- gpio->chip.base = pdata ? pdata->gpio_base : -1;
+ gpio->chip.base = -1;
gpio->chip.can_sleep = true;
gpio->chip.parent = &client->dev;
gpio->chip.owner = THIS_MODULE;
gpio->chip.get = pcf857x_get;
+ gpio->chip.get_multiple = pcf857x_get_multiple;
gpio->chip.set = pcf857x_set;
+ gpio->chip.set_multiple = pcf857x_set_multiple;
gpio->chip.direction_input = pcf857x_input;
gpio->chip.direction_output = pcf857x_output;
gpio->chip.ngpio = id->driver_data;
@@ -377,17 +395,6 @@ static int pcf857x_probe(struct i2c_client *client)
if (status < 0)
goto fail;
- /* Let platform code set up the GPIOs and their users.
- * Now is the first time anyone could use them.
- */
- if (pdata && pdata->setup) {
- status = pdata->setup(client,
- gpio->chip.base, gpio->chip.ngpio,
- pdata->context);
- if (status < 0)
- dev_warn(&client->dev, "setup --> %d\n", status);
- }
-
dev_info(&client->dev, "probed\n");
return 0;
@@ -399,16 +406,6 @@ fail:
return status;
}
-static void pcf857x_remove(struct i2c_client *client)
-{
- struct pcf857x_platform_data *pdata = dev_get_platdata(&client->dev);
- struct pcf857x *gpio = i2c_get_clientdata(client);
-
- if (pdata && pdata->teardown)
- pdata->teardown(client, gpio->chip.base, gpio->chip.ngpio,
- pdata->context);
-}
-
static void pcf857x_shutdown(struct i2c_client *client)
{
struct pcf857x *gpio = i2c_get_clientdata(client);
@@ -420,10 +417,9 @@ static void pcf857x_shutdown(struct i2c_client *client)
static struct i2c_driver pcf857x_driver = {
.driver = {
.name = "pcf857x",
- .of_match_table = of_match_ptr(pcf857x_of_table),
+ .of_match_table = pcf857x_of_table,
},
.probe_new = pcf857x_probe,
- .remove = pcf857x_remove,
.shutdown = pcf857x_shutdown,
.id_table = pcf857x_id,
};
diff --git a/drivers/gpio/gpio-regmap.c b/drivers/gpio/gpio-regmap.c
index 6383136cbe59..fca17d478984 100644
--- a/drivers/gpio/gpio-regmap.c
+++ b/drivers/gpio/gpio-regmap.c
@@ -111,6 +111,11 @@ static int gpio_regmap_get_direction(struct gpio_chip *chip,
unsigned int base, val, reg, mask;
int invert, ret;
+ if (gpio->reg_dat_base && !gpio->reg_set_base)
+ return GPIO_LINE_DIRECTION_IN;
+ if (gpio->reg_set_base && !gpio->reg_dat_base)
+ return GPIO_LINE_DIRECTION_OUT;
+
if (gpio->reg_dir_out_base) {
base = gpio_regmap_addr(gpio->reg_dir_out_base);
invert = 0;
@@ -249,15 +254,7 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
chip->ngpio = config->ngpio;
chip->names = config->names;
chip->label = config->label ?: dev_name(config->parent);
-
- /*
- * If our regmap is fast_io we should probably set can_sleep to false.
- * Right now, the regmap doesn't save this property, nor is there any
- * access function for it.
- * The only regmap type which uses fast_io is regmap-mmio. For now,
- * assume a safe default of true here.
- */
- chip->can_sleep = true;
+ chip->can_sleep = regmap_might_sleep(config->regmap);
chip->get = gpio_regmap_get;
if (gpio->reg_set_base && gpio->reg_clr_base)
@@ -265,8 +262,8 @@ struct gpio_regmap *gpio_regmap_register(const struct gpio_regmap_config *config
else if (gpio->reg_set_base)
chip->set = gpio_regmap_set;
+ chip->get_direction = gpio_regmap_get_direction;
if (gpio->reg_dir_in_base || gpio->reg_dir_out_base) {
- chip->get_direction = gpio_regmap_get_direction;
chip->direction_input = gpio_regmap_direction_input;
chip->direction_output = gpio_regmap_direction_output;
}
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 200e43a6f4b4..e5de15a2ab9a 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -299,7 +299,7 @@ static int rockchip_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
}
/*
- * gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
+ * gpiod_to_irq() callback function. Creates a mapping between a GPIO pin
* and a virtual IRQ, if not already present.
*/
static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned int offset)
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 60514bc5454f..e5dfd636c63c 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -377,8 +377,8 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev)
ret = fwnode_property_read_string(swnode, "gpio-sim,label", &label);
if (ret) {
- label = devm_kasprintf(dev, GFP_KERNEL, "%s-%s",
- dev_name(dev), fwnode_get_name(swnode));
+ label = devm_kasprintf(dev, GFP_KERNEL, "%s-%pfwP",
+ dev_name(dev), swnode);
if (!label)
return -ENOMEM;
}
@@ -736,7 +736,7 @@ static void gpio_sim_remove_hogs(struct gpio_sim_device *dev)
gpiod_remove_hogs(dev->hogs);
- for (hog = dev->hogs; !hog->chip_label; hog++) {
+ for (hog = dev->hogs; hog->chip_label; hog++) {
kfree(hog->chip_label);
kfree(hog->line_name);
}
@@ -784,10 +784,9 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
GFP_KERNEL);
else
hog->chip_label = kasprintf(GFP_KERNEL,
- "gpio-sim.%u-%s",
+ "gpio-sim.%u-%pfwP",
dev->id,
- fwnode_get_name(
- bank->swnode));
+ bank->swnode);
if (!hog->chip_label) {
gpio_sim_remove_hogs(dev);
return -ENOMEM;
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index fdc5bdcd5638..14c872b6ad05 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -670,13 +670,14 @@ static unsigned int tegra186_gpio_child_offset_to_irq(struct gpio_chip *chip,
static const struct of_device_id tegra186_pmc_of_match[] = {
{ .compatible = "nvidia,tegra186-pmc" },
{ .compatible = "nvidia,tegra194-pmc" },
+ { .compatible = "nvidia,tegra234-pmc" },
{ /* sentinel */ }
};
static void tegra186_gpio_init_route_mapping(struct tegra_gpio *gpio)
{
struct device *dev = gpio->gpio.parent;
- unsigned int i, j;
+ unsigned int i;
u32 value;
for (i = 0; i < gpio->soc->num_ports; i++) {
@@ -698,27 +699,23 @@ static void tegra186_gpio_init_route_mapping(struct tegra_gpio *gpio)
* On Tegra194 and later, each pin can be routed to one or more
* interrupts.
*/
- for (j = 0; j < gpio->num_irqs_per_bank; j++) {
- dev_dbg(dev, "programming default interrupt routing for port %s\n",
- port->name);
-
- offset = TEGRA186_GPIO_INT_ROUTE_MAPPING(p, j);
-
- /*
- * By default we only want to route GPIO pins to IRQ 0. This works
- * only under the assumption that we're running as the host kernel
- * and hence all GPIO pins are owned by Linux.
- *
- * For cases where Linux is the guest OS, the hypervisor will have
- * to configure the interrupt routing and pass only the valid
- * interrupts via device tree.
- */
- if (j == 0) {
- value = readl(base + offset);
- value = BIT(port->pins) - 1;
- writel(value, base + offset);
- }
- }
+ dev_dbg(dev, "programming default interrupt routing for port %s\n",
+ port->name);
+
+ offset = TEGRA186_GPIO_INT_ROUTE_MAPPING(p, 0);
+
+ /*
+ * By default we only want to route GPIO pins to IRQ 0. This works
+ * only under the assumption that we're running as the host kernel
+ * and hence all GPIO pins are owned by Linux.
+ *
+ * For cases where Linux is the guest OS, the hypervisor will have
+ * to configure the interrupt routing and pass only the valid
+ * interrupts via device tree.
+ */
+ value = readl(base + offset);
+ value = BIT(port->pins) - 1;
+ writel(value, base + offset);
}
}
}
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
deleted file mode 100644
index 676adf1f198a..000000000000
--- a/drivers/gpio/gpio-ucb1400.c
+++ /dev/null
@@ -1,85 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Philips UCB1400 GPIO driver
- *
- * Author: Marek Vasut <marek.vasut@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/ucb1400.h>
-#include <linux/gpio/driver.h>
-
-static int ucb1400_gpio_dir_in(struct gpio_chip *gc, unsigned off)
-{
- struct ucb1400_gpio *gpio;
- gpio = gpiochip_get_data(gc);
- ucb1400_gpio_set_direction(gpio->ac97, off, 0);
- return 0;
-}
-
-static int ucb1400_gpio_dir_out(struct gpio_chip *gc, unsigned off, int val)
-{
- struct ucb1400_gpio *gpio;
- gpio = gpiochip_get_data(gc);
- ucb1400_gpio_set_direction(gpio->ac97, off, 1);
- ucb1400_gpio_set_value(gpio->ac97, off, val);
- return 0;
-}
-
-static int ucb1400_gpio_get(struct gpio_chip *gc, unsigned off)
-{
- struct ucb1400_gpio *gpio;
-
- gpio = gpiochip_get_data(gc);
- return !!ucb1400_gpio_get_value(gpio->ac97, off);
-}
-
-static void ucb1400_gpio_set(struct gpio_chip *gc, unsigned off, int val)
-{
- struct ucb1400_gpio *gpio;
- gpio = gpiochip_get_data(gc);
- ucb1400_gpio_set_value(gpio->ac97, off, val);
-}
-
-static int ucb1400_gpio_probe(struct platform_device *dev)
-{
- struct ucb1400_gpio *ucb = dev_get_platdata(&dev->dev);
- int err = 0;
-
- if (!(ucb && ucb->gpio_offset)) {
- err = -EINVAL;
- goto err;
- }
-
- platform_set_drvdata(dev, ucb);
-
- ucb->gc.label = "ucb1400_gpio";
- ucb->gc.base = ucb->gpio_offset;
- ucb->gc.ngpio = 10;
- ucb->gc.owner = THIS_MODULE;
-
- ucb->gc.direction_input = ucb1400_gpio_dir_in;
- ucb->gc.direction_output = ucb1400_gpio_dir_out;
- ucb->gc.get = ucb1400_gpio_get;
- ucb->gc.set = ucb1400_gpio_set;
- ucb->gc.can_sleep = true;
-
- err = devm_gpiochip_add_data(&dev->dev, &ucb->gc, ucb);
-
-err:
- return err;
-
-}
-
-static struct platform_driver ucb1400_gpio_driver = {
- .probe = ucb1400_gpio_probe,
- .driver = {
- .name = "ucb1400_gpio"
- },
-};
-
-module_platform_driver(ucb1400_gpio_driver);
-
-MODULE_DESCRIPTION("Philips UCB1400 GPIO driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:ucb1400_gpio");
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 9db42f6a2043..d3f3a69d4907 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -30,7 +30,6 @@ struct fsl_gpio_soc_data {
struct vf610_gpio_port {
struct gpio_chip gc;
- struct irq_chip ic;
void __iomem *base;
void __iomem *gpio_base;
const struct fsl_gpio_soc_data *sdata;
@@ -207,20 +206,24 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
static void vf610_gpio_irq_mask(struct irq_data *d)
{
- struct vf610_gpio_port *port =
- gpiochip_get_data(irq_data_get_irq_chip_data(d));
- void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct vf610_gpio_port *port = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
vf610_gpio_writel(0, pcr_base);
+ gpiochip_disable_irq(gc, gpio_num);
}
static void vf610_gpio_irq_unmask(struct irq_data *d)
{
- struct vf610_gpio_port *port =
- gpiochip_get_data(irq_data_get_irq_chip_data(d));
- void __iomem *pcr_base = port->base + PORT_PCR(d->hwirq);
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct vf610_gpio_port *port = gpiochip_get_data(gc);
+ irq_hw_number_t gpio_num = irqd_to_hwirq(d);
+ void __iomem *pcr_base = port->base + PORT_PCR(gpio_num);
- vf610_gpio_writel(port->irqc[d->hwirq] << PORT_PCR_IRQC_OFFSET,
+ gpiochip_enable_irq(gc, gpio_num);
+ vf610_gpio_writel(port->irqc[gpio_num] << PORT_PCR_IRQC_OFFSET,
pcr_base);
}
@@ -237,6 +240,17 @@ static int vf610_gpio_irq_set_wake(struct irq_data *d, u32 enable)
return 0;
}
+static const struct irq_chip vf610_irqchip = {
+ .name = "gpio-vf610",
+ .irq_ack = vf610_gpio_irq_ack,
+ .irq_mask = vf610_gpio_irq_mask,
+ .irq_unmask = vf610_gpio_irq_unmask,
+ .irq_set_type = vf610_gpio_irq_set_type,
+ .irq_set_wake = vf610_gpio_irq_set_wake,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static void vf610_gpio_disable_clk(void *data)
{
clk_disable_unprepare(data);
@@ -249,7 +263,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
struct vf610_gpio_port *port;
struct gpio_chip *gc;
struct gpio_irq_chip *girq;
- struct irq_chip *ic;
int i;
int ret;
@@ -304,7 +317,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc = &port->gc;
gc->parent = dev;
- gc->label = "vf610-gpio";
+ gc->label = dev_name(dev);
gc->ngpio = VF610_GPIO_PER_PORT;
gc->base = of_alias_get_id(np, "gpio") * VF610_GPIO_PER_PORT;
@@ -315,14 +328,6 @@ static int vf610_gpio_probe(struct platform_device *pdev)
gc->direction_output = vf610_gpio_direction_output;
gc->set = vf610_gpio_set;
- ic = &port->ic;
- ic->name = "gpio-vf610";
- ic->irq_ack = vf610_gpio_irq_ack;
- ic->irq_mask = vf610_gpio_irq_mask;
- ic->irq_unmask = vf610_gpio_irq_unmask;
- ic->irq_set_type = vf610_gpio_irq_set_type;
- ic->irq_set_wake = vf610_gpio_irq_set_wake;
-
/* Mask all GPIO interrupts */
for (i = 0; i < gc->ngpio; i++)
vf610_gpio_writel(0, port->base + PORT_PCR(i));
@@ -331,7 +336,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
vf610_gpio_writel(~0, port->base + PORT_ISFR);
girq = &gc->irq;
- girq->chip = ic;
+ gpio_irq_chip_set_chip(girq, &vf610_irqchip);
girq->parent_handler = vf610_gpio_irq_handler;
girq->num_parents = 1;
girq->parents = devm_kcalloc(&pdev->dev, 1,
diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c
index 97e6caedf1f3..817750e4e033 100644
--- a/drivers/gpio/gpio-wcd934x.c
+++ b/drivers/gpio/gpio-wcd934x.c
@@ -98,7 +98,6 @@ static int wcd_gpio_probe(struct platform_device *pdev)
chip->base = -1;
chip->ngpio = WCD934X_NPINS;
chip->label = dev_name(dev);
- chip->of_gpio_n_cells = 2;
chip->can_sleep = false;
return devm_gpiochip_add_data(dev, chip, data);
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 2fc6b6ff7f16..e248809965ca 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -558,7 +558,6 @@ static int xgpio_probe(struct platform_device *pdev)
int status = 0;
struct device_node *np = pdev->dev.of_node;
u32 is_dual = 0;
- u32 cells = 2;
u32 width[2];
u32 state[2];
u32 dir[2];
@@ -591,15 +590,6 @@ static int xgpio_probe(struct platform_device *pdev)
bitmap_from_arr32(chip->dir, dir, 64);
- /* Update cells with gpio-cells value */
- if (of_property_read_u32(np, "#gpio-cells", &cells))
- dev_dbg(&pdev->dev, "Missing gpio-cells property\n");
-
- if (cells != 2) {
- dev_err(&pdev->dev, "#gpio-cells mismatch\n");
- return -EINVAL;
- }
-
/*
* Check device node and parent device node for device width
* and assume default width of 32
@@ -630,7 +620,6 @@ static int xgpio_probe(struct platform_device *pdev)
chip->gc.parent = &pdev->dev;
chip->gc.direction_input = xgpio_dir_in;
chip->gc.direction_output = xgpio_dir_out;
- chip->gc.of_gpio_n_cells = cells;
chip->gc.get = xgpio_get;
chip->gc.set = xgpio_set;
chip->gc.request = xgpio_request;
diff --git a/drivers/gpio/gpio-zevio.c b/drivers/gpio/gpio-zevio.c
index ce9d1282165c..f0f571b323f2 100644
--- a/drivers/gpio/gpio-zevio.c
+++ b/drivers/gpio/gpio-zevio.c
@@ -5,13 +5,15 @@
* Author: Fabian Vogt <fabian@ritter-vogt.de>
*/
-#include <linux/spinlock.h>
+#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/bitops.h>
#include <linux/io.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+
#include <linux/gpio/driver.h>
/*
@@ -162,7 +164,6 @@ static const struct gpio_chip zevio_gpio_chip = {
.base = 0,
.owner = THIS_MODULE,
.ngpio = 32,
- .of_gpio_n_cells = 2,
};
/* Initialization */
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 17c53f484280..d8a421ce26a8 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1388,16 +1388,6 @@ void acpi_gpiochip_remove(struct gpio_chip *chip)
kfree(acpi_gpio);
}
-void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
-{
- /* Set default fwnode to parent's one if present */
- if (gc->parent)
- ACPI_COMPANION_SET(&gdev->dev, ACPI_COMPANION(gc->parent));
-
- if (gc->fwnode)
- device_set_node(&gdev->dev, gc->fwnode);
-}
-
static int acpi_gpio_package_count(const union acpi_object *obj)
{
const union acpi_object *element = obj->package.elements;
@@ -1637,6 +1627,18 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_wake = "ELAN0415:00@9",
},
},
+ {
+ /*
+ * Spurious wakeups from TP_ATTN# pin
+ * Found in BIOS 1.7.7
+ */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "NH5xAx"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_wake = "SYNA1202:00@16",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index 9475f99a9694..90fd6b04f24d 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -14,7 +14,6 @@
#include <linux/gpio/consumer.h>
-struct acpi_device;
struct device;
struct fwnode_handle;
@@ -26,8 +25,6 @@ struct gpio_device;
void acpi_gpiochip_add(struct gpio_chip *chip);
void acpi_gpiochip_remove(struct gpio_chip *chip);
-void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev);
-
void acpi_gpiochip_request_interrupts(struct gpio_chip *chip);
void acpi_gpiochip_free_interrupts(struct gpio_chip *chip);
@@ -42,8 +39,6 @@ int acpi_gpio_count(struct device *dev, const char *con_id);
static inline void acpi_gpiochip_add(struct gpio_chip *chip) { }
static inline void acpi_gpiochip_remove(struct gpio_chip *chip) { }
-static inline void acpi_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev) { }
-
static inline void
acpi_gpiochip_request_interrupts(struct gpio_chip *chip) { }
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index e878e3f22b0e..0a33971c964c 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -321,7 +321,7 @@ static void linehandle_free(struct linehandle_state *lh)
if (lh->descs[i])
gpiod_free(lh->descs[i]);
kfree(lh->label);
- put_device(&lh->gdev->dev);
+ gpio_device_put(lh->gdev);
kfree(lh);
}
@@ -363,8 +363,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
lh = kzalloc(sizeof(*lh), GFP_KERNEL);
if (!lh)
return -ENOMEM;
- lh->gdev = gdev;
- get_device(&gdev->dev);
+ lh->gdev = gpio_device_get(gdev);
if (handlereq.consumer_label[0] != '\0') {
/* label is only initialized if consumer_label is set */
@@ -1576,7 +1575,7 @@ static void linereq_free(struct linereq *lr)
}
kfifo_free(&lr->events);
kfree(lr->label);
- put_device(&lr->gdev->dev);
+ gpio_device_put(lr->gdev);
kfree(lr);
}
@@ -1646,8 +1645,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
if (!lr)
return -ENOMEM;
- lr->gdev = gdev;
- get_device(&gdev->dev);
+ lr->gdev = gpio_device_get(gdev);
for (i = 0; i < ulr.num_lines; i++) {
lr->lines[i].req = lr;
@@ -1916,7 +1914,7 @@ static void lineevent_free(struct lineevent_state *le)
if (le->desc)
gpiod_free(le->desc);
kfree(le->label);
- put_device(&le->gdev->dev);
+ gpio_device_put(le->gdev);
kfree(le);
}
@@ -2094,8 +2092,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
le = kzalloc(sizeof(*le), GFP_KERNEL);
if (!le)
return -ENOMEM;
- le->gdev = gdev;
- get_device(&gdev->dev);
+ le->gdev = gpio_device_get(gdev);
if (eventreq.consumer_label[0] != '\0') {
/* label is only initialized if consumer_label is set */
@@ -2671,7 +2668,7 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
init_waitqueue_head(&cdev->wait);
INIT_KFIFO(cdev->events);
- cdev->gdev = gdev;
+ cdev->gdev = gpio_device_get(gdev);
cdev->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify;
ret = blocking_notifier_chain_register(&gdev->notifier,
@@ -2679,7 +2676,6 @@ static int gpio_chrdev_open(struct inode *inode, struct file *file)
if (ret)
goto out_free_bitmap;
- get_device(&gdev->dev);
file->private_data = cdev;
ret = nonseekable_open(inode, file);
@@ -2694,6 +2690,7 @@ out_unregister_notifier:
blocking_notifier_chain_unregister(&gdev->notifier,
&cdev->lineinfo_changed_nb);
out_free_bitmap:
+ gpio_device_put(gdev);
bitmap_free(cdev->watched_lines);
out_free_cdev:
kfree(cdev);
@@ -2716,7 +2713,7 @@ static int gpio_chrdev_release(struct inode *inode, struct file *file)
bitmap_free(cdev->watched_lines);
blocking_notifier_chain_unregister(&gdev->notifier,
&cdev->lineinfo_changed_nb);
- put_device(&gdev->dev);
+ gpio_device_put(gdev);
kfree(cdev);
return 0;
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 16a696249229..fe9ce6b19f15 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -130,61 +130,6 @@ struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev,
EXPORT_SYMBOL_GPL(devm_gpiod_get_index);
/**
- * devm_gpiod_get_from_of_node() - obtain a GPIO from an OF node
- * @dev: device for lifecycle management
- * @node: handle of the OF node
- * @propname: name of the DT property representing the GPIO
- * @index: index of the GPIO to obtain for the consumer
- * @dflags: GPIO initialization flags
- * @label: label to attach to the requested GPIO
- *
- * Returns:
- * On successful request the GPIO pin is configured in accordance with
- * provided @dflags.
- *
- * In case of error an ERR_PTR() is returned.
- */
-struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev,
- const struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- struct gpio_desc **dr;
- struct gpio_desc *desc;
-
- desc = gpiod_get_from_of_node(node, propname, index, dflags, label);
- if (IS_ERR(desc))
- return desc;
-
- /*
- * For non-exclusive GPIO descriptors, check if this descriptor is
- * already under resource management by this device.
- */
- if (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
- struct devres *dres;
-
- dres = devres_find(dev, devm_gpiod_release,
- devm_gpiod_match, &desc);
- if (dres)
- return desc;
- }
-
- dr = devres_alloc(devm_gpiod_release, sizeof(struct gpio_desc *),
- GFP_KERNEL);
- if (!dr) {
- gpiod_put(desc);
- return ERR_PTR(-ENOMEM);
- }
-
- *dr = desc;
- devres_add(dev, dr);
-
- return desc;
-}
-EXPORT_SYMBOL_GPL(devm_gpiod_get_from_of_node);
-
-/**
* devm_fwnode_gpiod_get_index - get a GPIO descriptor from a given node
* @dev: GPIO consumer
* @fwnode: firmware node containing GPIO reference
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 4fff7258ee41..266352b1a966 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -23,6 +23,47 @@
#include "gpiolib.h"
#include "gpiolib-of.h"
+/*
+ * This is Linux-specific flags. By default controllers' and Linux' mapping
+ * match, but GPIO controllers are free to translate their own flags to
+ * Linux-specific in their .xlate callback. Though, 1:1 mapping is recommended.
+ */
+enum of_gpio_flags {
+ OF_GPIO_ACTIVE_LOW = 0x1,
+ OF_GPIO_SINGLE_ENDED = 0x2,
+ OF_GPIO_OPEN_DRAIN = 0x4,
+ OF_GPIO_TRANSITORY = 0x8,
+ OF_GPIO_PULL_UP = 0x10,
+ OF_GPIO_PULL_DOWN = 0x20,
+ OF_GPIO_PULL_DISABLE = 0x40,
+};
+
+/**
+ * of_gpio_named_count() - Count GPIOs for a device
+ * @np: device node to count GPIOs for
+ * @propname: property name containing gpio specifier(s)
+ *
+ * The function returns the count of GPIOs specified for a node.
+ * Note that the empty GPIO specifiers count too. Returns either
+ * Number of gpios defined in property,
+ * -EINVAL for an incorrectly formed gpios property, or
+ * -ENOENT for a missing gpios property
+ *
+ * Example:
+ * gpios = <0
+ * &gpio1 1 2
+ * 0
+ * &gpio2 3 4>;
+ *
+ * The above example defines four GPIOs, two of which are not specified.
+ * This function will return '4'
+ */
+static int of_gpio_named_count(const struct device_node *np,
+ const char *propname)
+{
+ return of_count_phandle_with_args(np, propname, "#gpio-cells");
+}
+
/**
* of_gpio_spi_cs_get_count() - special GPIO counting for SPI
* @dev: Consuming device
@@ -50,12 +91,6 @@ static int of_gpio_spi_cs_get_count(struct device *dev, const char *con_id)
return of_gpio_named_count(np, "gpios");
}
-/*
- * This is used by external users of of_gpio_count() from <linux/of_gpio.h>
- *
- * FIXME: get rid of those external users by converting them to GPIO
- * descriptors and let them all use gpiod_count()
- */
int of_gpio_get_count(struct device *dev, const char *con_id)
{
int ret;
@@ -345,19 +380,28 @@ out:
return desc;
}
-int of_get_named_gpio_flags(const struct device_node *np, const char *list_name,
- int index, enum of_gpio_flags *flags)
+/**
+ * of_get_named_gpio() - Get a GPIO number to use with GPIO API
+ * @np: device node to get GPIO from
+ * @propname: Name of property containing gpio specifier(s)
+ * @index: index of the GPIO
+ *
+ * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
+ * value on the error condition.
+ */
+int of_get_named_gpio(const struct device_node *np, const char *propname,
+ int index)
{
struct gpio_desc *desc;
- desc = of_get_named_gpiod_flags(np, list_name, index, flags);
+ desc = of_get_named_gpiod_flags(np, propname, index, NULL);
if (IS_ERR(desc))
return PTR_ERR(desc);
else
return desc_to_gpio(desc);
}
-EXPORT_SYMBOL_GPL(of_get_named_gpio_flags);
+EXPORT_SYMBOL_GPL(of_get_named_gpio);
/* Converts gpio_lookup_flags into bitmask of GPIO_* values */
static unsigned long of_convert_gpio_flags(enum of_gpio_flags flags)
@@ -389,52 +433,6 @@ static unsigned long of_convert_gpio_flags(enum of_gpio_flags flags)
return lflags;
}
-/**
- * gpiod_get_from_of_node() - obtain a GPIO from an OF node
- * @node: handle of the OF node
- * @propname: name of the DT property representing the GPIO
- * @index: index of the GPIO to obtain for the consumer
- * @dflags: GPIO initialization flags
- * @label: label to attach to the requested GPIO
- *
- * Returns:
- * On successful request the GPIO pin is configured in accordance with
- * provided @dflags.
- *
- * In case of error an ERR_PTR() is returned.
- */
-struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node,
- const char *propname, int index,
- enum gpiod_flags dflags,
- const char *label)
-{
- unsigned long lflags;
- struct gpio_desc *desc;
- enum of_gpio_flags of_flags;
- int ret;
-
- desc = of_get_named_gpiod_flags(node, propname, index, &of_flags);
- if (!desc || IS_ERR(desc))
- return desc;
-
- ret = gpiod_request(desc, label);
- if (ret == -EBUSY && (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
- return desc;
- if (ret)
- return ERR_PTR(ret);
-
- lflags = of_convert_gpio_flags(of_flags);
-
- ret = gpiod_configure_flags(desc, propname, lflags, dflags);
- if (ret < 0) {
- gpiod_put(desc);
- return ERR_PTR(ret);
- }
-
- return desc;
-}
-EXPORT_SYMBOL_GPL(gpiod_get_from_of_node);
-
static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
const char *con_id,
unsigned int idx,
@@ -668,7 +666,7 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
u32 tmp;
int ret;
- chip_np = chip->of_node;
+ chip_np = dev_of_node(&chip->gpiodev->dev);
if (!chip_np)
return ERR_PTR(-EINVAL);
@@ -760,7 +758,7 @@ static int of_gpiochip_scan_gpios(struct gpio_chip *chip)
struct device_node *np;
int ret;
- for_each_available_child_of_node(chip->of_node, np) {
+ for_each_available_child_of_node(dev_of_node(&chip->gpiodev->dev), np) {
if (!of_property_read_bool(np, "gpio-hog"))
continue;
@@ -970,22 +968,18 @@ EXPORT_SYMBOL_GPL(of_mm_gpiochip_remove);
#ifdef CONFIG_PINCTRL
static int of_gpiochip_add_pin_range(struct gpio_chip *chip)
{
- struct device_node *np = chip->of_node;
struct of_phandle_args pinspec;
struct pinctrl_dev *pctldev;
+ struct device_node *np;
int index = 0, ret;
const char *name;
static const char group_names_propname[] = "gpio-ranges-group-names";
struct property *group_names;
+ np = dev_of_node(&chip->gpiodev->dev);
if (!np)
return 0;
- if (!of_property_read_bool(np, "gpio-ranges") &&
- chip->of_gpio_ranges_fallback) {
- return chip->of_gpio_ranges_fallback(chip, np);
- }
-
group_names = of_find_property(np, group_names_propname, NULL);
for (;; index++) {
@@ -1063,7 +1057,7 @@ int of_gpiochip_add(struct gpio_chip *chip)
struct device_node *np;
int ret;
- np = to_of_node(dev_fwnode(&chip->gpiodev->dev));
+ np = dev_of_node(&chip->gpiodev->dev);
if (!np)
return 0;
@@ -1092,19 +1086,3 @@ void of_gpiochip_remove(struct gpio_chip *chip)
{
fwnode_handle_put(chip->fwnode);
}
-
-void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev)
-{
- /* Set default OF node to parent's one if present */
- if (gc->parent)
- gdev->dev.of_node = gc->parent->of_node;
-
- if (gc->fwnode)
- gc->of_node = to_of_node(gc->fwnode);
-
- /* If the gpiochip has an assigned OF node this takes precedence */
- if (gc->of_node)
- gdev->dev.of_node = gc->of_node;
- else
- gc->of_node = gdev->dev.of_node;
-}
diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h
index a6c593e6766c..e5bb065d82ef 100644
--- a/drivers/gpio/gpiolib-of.h
+++ b/drivers/gpio/gpiolib-of.h
@@ -23,7 +23,6 @@ struct gpio_desc *of_find_gpio(struct device_node *np,
int of_gpiochip_add(struct gpio_chip *gc);
void of_gpiochip_remove(struct gpio_chip *gc);
int of_gpio_get_count(struct device *dev, const char *con_id);
-void of_gpio_dev_init(struct gpio_chip *gc, struct gpio_device *gdev);
#else
static inline struct gpio_desc *of_find_gpio(struct device_node *np,
const char *con_id,
@@ -38,10 +37,6 @@ static inline int of_gpio_get_count(struct device *dev, const char *con_id)
{
return 0;
}
-static inline void of_gpio_dev_init(struct gpio_chip *gc,
- struct gpio_device *gdev)
-{
-}
#endif /* CONFIG_OF_GPIO */
extern struct notifier_block gpio_of_notifier;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 939c776b9488..19bd23044b01 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1,34 +1,35 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/acpi.h>
#include <linux/bitmap.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
+#include <linux/compat.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
+#include <linux/file.h>
+#include <linux/fs.h>
#include <linux/gpio.h>
-#include <linux/idr.h>
-#include <linux/slab.h>
-#include <linux/acpi.h>
#include <linux/gpio/driver.h>
#include <linux/gpio/machine.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
#include <linux/pinctrl/consumer.h>
-#include <linux/fs.h>
-#include <linux/compat.h>
-#include <linux/file.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
#include <uapi/linux/gpio.h>
-#include "gpiolib.h"
-#include "gpiolib-of.h"
#include "gpiolib-acpi.h"
-#include "gpiolib-swnode.h"
#include "gpiolib-cdev.h"
+#include "gpiolib-of.h"
+#include "gpiolib-swnode.h"
#include "gpiolib-sysfs.h"
+#include "gpiolib.h"
#define CREATE_TRACE_POINTS
#include <trace/events/gpio.h>
@@ -531,6 +532,14 @@ static void gpiochip_free_valid_mask(struct gpio_chip *gc)
static int gpiochip_add_pin_ranges(struct gpio_chip *gc)
{
+ /*
+ * Device Tree platforms are supposed to use "gpio-ranges"
+ * property. This check ensures that the ->add_pin_ranges()
+ * won't be called for them.
+ */
+ if (device_property_present(&gc->gpiodev->dev, "gpio-ranges"))
+ return 0;
+
if (gc->add_pin_ranges)
return gc->add_pin_ranges(gc);
@@ -578,6 +587,13 @@ static int gpiochip_setup_dev(struct gpio_device *gdev)
{
int ret;
+ /*
+ * If fwnode doesn't belong to another device, it's safe to clear its
+ * initialized flag.
+ */
+ if (gdev->dev.fwnode && !gdev->dev.fwnode->dev)
+ fwnode_dev_initialized(gdev->dev.fwnode, false);
+
ret = gcdev_register(gdev, gpio_devt);
if (ret)
return ret;
@@ -659,10 +675,12 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
int base = 0;
int ret = 0;
+ /* If the calling driver did not initialize firmware node, do it here */
if (gc->fwnode)
fwnode = gc->fwnode;
else if (gc->parent)
fwnode = dev_fwnode(gc->parent);
+ gc->fwnode = fwnode;
/*
* First: allocate and populate the internal stat container, and
@@ -676,14 +694,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
gdev->chip = gc;
gc->gpiodev = gdev;
- of_gpio_dev_init(gc, gdev);
- acpi_gpio_dev_init(gc, gdev);
-
- /*
- * Assign fwnode depending on the result of the previous calls,
- * if none of them succeed, assign it to the parent's one.
- */
- gc->fwnode = gdev->dev.fwnode = dev_fwnode(&gdev->dev) ?: fwnode;
+ device_set_node(&gdev->dev, gc->fwnode);
gdev->id = ida_alloc(&gpio_ida, GFP_KERNEL);
if (gdev->id < 0) {
@@ -882,7 +893,7 @@ err_free_gpiochip_mask:
gpiochip_free_valid_mask(gc);
if (gdev->dev.release) {
/* release() has been registered by gpiochip_setup_dev() */
- put_device(&gdev->dev);
+ gpio_device_put(gdev);
goto err_print_message;
}
err_remove_from_list:
@@ -972,7 +983,7 @@ void gpiochip_remove(struct gpio_chip *gc)
*/
gcdev_unregister(gdev);
up_write(&gdev->sem);
- put_device(&gdev->dev);
+ gpio_device_put(gdev);
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
@@ -1126,14 +1137,8 @@ static void gpiochip_set_hierarchical_irqchip(struct gpio_chip *gc,
/* Just pick something */
fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
fwspec.param_count = 2;
- ret = __irq_domain_alloc_irqs(gc->irq.domain,
- /* just pick something */
- -1,
- 1,
- NUMA_NO_NODE,
- &fwspec,
- false,
- NULL);
+ ret = irq_domain_alloc_irqs(gc->irq.domain, 1,
+ NUMA_NO_NODE, &fwspec);
if (ret < 0) {
chip_err(gc,
"can not allocate irq for GPIO line %d parent hwirq %d in hierarchy domain: %d\n",
@@ -2063,17 +2068,15 @@ static int validate_desc(const struct gpio_desc *desc, const char *func)
int gpiod_request(struct gpio_desc *desc, const char *label)
{
int ret = -EPROBE_DEFER;
- struct gpio_device *gdev;
VALIDATE_DESC(desc);
- gdev = desc->gdev;
- if (try_module_get(gdev->owner)) {
+ if (try_module_get(desc->gdev->owner)) {
ret = gpiod_request_commit(desc, label);
if (ret)
- module_put(gdev->owner);
+ module_put(desc->gdev->owner);
else
- get_device(&gdev->dev);
+ gpio_device_get(desc->gdev);
}
if (ret)
@@ -2134,7 +2137,7 @@ void gpiod_free(struct gpio_desc *desc)
{
if (desc && desc->gdev && gpiod_free_commit(desc)) {
module_put(desc->gdev->owner);
- put_device(&desc->gdev->dev);
+ gpio_device_put(desc->gdev);
} else {
WARN_ON(extra_checks);
}
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index b3c2db6eba80..cca81375f127 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -82,6 +82,16 @@ static inline struct gpio_device *to_gpio_device(struct device *dev)
return container_of(dev, struct gpio_device, dev);
}
+static inline struct gpio_device *gpio_device_get(struct gpio_device *gdev)
+{
+ return to_gpio_device(get_device(&gdev->dev));
+}
+
+static inline void gpio_device_put(struct gpio_device *gdev)
+{
+ put_device(&gdev->dev);
+}
+
/* gpio suffixes used for ACPI and device tree lookup */
static __maybe_unused const char * const gpio_suffixes[] = { "gpios", "gpio" };
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 315cbdf61979..dc0f94f02a82 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -12,7 +12,6 @@ menuconfig DRM
select HDMI
select FB_CMDLINE
select I2C
- select I2C_ALGOBIT
select DMA_SHARED_BUFFER
select SYNC_FILE
# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
@@ -53,7 +52,8 @@ config DRM_DEBUG_MM
config DRM_USE_DYNAMIC_DEBUG
bool "use dynamic debug to implement drm.debug"
- default y
+ default n
+ depends on BROKEN
depends on DRM
depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE
depends on JUMP_LABEL
@@ -63,6 +63,12 @@ config DRM_USE_DYNAMIC_DEBUG
bytes per callsite, the .data costs can be substantial, and
are therefore configurable.
+config DRM_KUNIT_TEST_HELPERS
+ tristate
+ depends on DRM && KUNIT
+ help
+ KUnit Helpers for KMS drivers.
+
config DRM_KUNIT_TEST
tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS
depends on DRM && KUNIT
@@ -73,6 +79,7 @@ config DRM_KUNIT_TEST
select DRM_KMS_HELPER
select DRM_BUDDY
select DRM_EXPORT_FOR_TESTS if m
+ select DRM_KUNIT_TEST_HELPERS
default KUNIT_ALL_TESTS
help
This builds unit tests for DRM. This option is not useful for
@@ -391,64 +398,7 @@ menuconfig DRM_LEGACY
Unless you have strong reasons to go rogue, say "N".
if DRM_LEGACY
-
-config DRM_TDFX
- tristate "3dfx Banshee/Voodoo3+"
- depends on DRM && PCI
- help
- Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
- graphics card. If M is selected, the module will be called tdfx.
-
-config DRM_R128
- tristate "ATI Rage 128"
- depends on DRM && PCI
- select FW_LOADER
- help
- Choose this option if you have an ATI Rage 128 graphics card. If M
- is selected, the module will be called r128. AGP support for
- this card is strongly suggested (unless you have a PCI version).
-
-config DRM_I810
- tristate "Intel I810"
- # !PREEMPTION because of missing ioctl locking
- depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
- help
- Choose this option if you have an Intel I810 graphics card. If M is
- selected, the module will be called i810. AGP support is required
- for this driver to work.
-
-config DRM_MGA
- tristate "Matrox g200/g400"
- depends on DRM && PCI
- select FW_LOADER
- help
- Choose this option if you have a Matrox G200, G400 or G450 graphics
- card. If M is selected, the module will be called mga. AGP
- support is required for this driver to work.
-
-config DRM_SIS
- tristate "SiS video cards"
- depends on DRM && AGP
- depends on FB_SIS || FB_SIS=n
- help
- Choose this option if you have a SiS 630 or compatible video
- chipset. If M is selected the module will be called sis. AGP
- support is required for this driver to work.
-
-config DRM_VIA
- tristate "Via unichrome video cards"
- depends on DRM && PCI
- help
- Choose this option if you have a Via unichrome or compatible video
- chipset. If M is selected the module will be called via.
-
-config DRM_SAVAGE
- tristate "Savage video cards"
- depends on DRM && PCI
- help
- Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
- chipset. If M is selected the module will be called savage.
-
+# leave here to list legacy drivers
endif # DRM_LEGACY
config DRM_EXPORT_FOR_TESTS
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index cc637343d87b..ab4460fcd63f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -126,7 +126,7 @@ obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
# Drivers and the rest
#
-obj-$(CONFIG_DRM_KUNIT_TEST) += tests/
+obj-y += tests/
obj-$(CONFIG_DRM_MIPI_DBI) += drm_mipi_dbi.o
obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
@@ -134,21 +134,14 @@ obj-y += arm/
obj-y += display/
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
-obj-$(CONFIG_DRM_TDFX) += tdfx/
-obj-$(CONFIG_DRM_R128) += r128/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
-obj-$(CONFIG_DRM_MGA) += mga/
-obj-$(CONFIG_DRM_I810) += i810/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_KMB_DISPLAY) += kmb/
obj-$(CONFIG_DRM_MGAG200) += mgag200/
obj-$(CONFIG_DRM_V3D) += v3d/
obj-$(CONFIG_DRM_VC4) += vc4/
-obj-$(CONFIG_DRM_SIS) += sis/
-obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
-obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_VGEM) += vgem/
obj-$(CONFIG_DRM_VKMS) += vkms/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 5fcd510f1abb..5341b6b242c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -13,6 +13,8 @@ config DRM_AMDGPU
select DRM_TTM_HELPER
select POWER_SUPPLY
select HWMON
+ select I2C
+ select I2C_ALGOBIT
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select DRM_BUDDY
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 798d0e9a60b7..1d72cbc85348 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -34,6 +34,7 @@ ccflags-y := -I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/acp/include \
-I$(FULL_AMD_DISPLAY_PATH) \
-I$(FULL_AMD_DISPLAY_PATH)/include \
+ -I$(FULL_AMD_DISPLAY_PATH)/modules/inc \
-I$(FULL_AMD_DISPLAY_PATH)/dc \
-I$(FULL_AMD_DISPLAY_PATH)/amdgpu_dm \
-I$(FULL_AMD_PATH)/amdkfd
@@ -76,12 +77,13 @@ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o \
- sienna_cichlid.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o
+ sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o
# add DF block
amdgpu-y += \
df_v1_7.o \
- df_v3_6.o
+ df_v3_6.o \
+ df_v4_3.o
# add GMC block
amdgpu-y += \
@@ -136,6 +138,7 @@ amdgpu-y += \
gfx_v10_0.o \
imu_v11_0.o \
gfx_v11_0.o \
+ gfx_v11_0_3.o \
imu_v11_0_3.o
# add async DMA block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e3e2e6e3b485..164141bc8b4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -52,8 +52,7 @@
#include <linux/pci.h>
#include <linux/aer.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -150,7 +149,7 @@ struct amdgpu_watchdog_timer
* Modules parameters.
*/
extern int amdgpu_modeset;
-extern int amdgpu_vram_limit;
+extern unsigned int amdgpu_vram_limit;
extern int amdgpu_vis_vram_limit;
extern int amdgpu_gart_size;
extern int amdgpu_gtt_size;
@@ -243,6 +242,7 @@ extern int amdgpu_num_kcq;
#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024)
extern int amdgpu_vcnfw_log;
+extern int amdgpu_sg_display;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)
@@ -609,7 +609,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
/* VRAM scratch page for HDP bug, default vram page */
-struct amdgpu_vram_scratch {
+struct amdgpu_mem_scratch {
struct amdgpu_bo *robj;
volatile uint32_t *ptr;
u64 gpu_addr;
@@ -756,6 +756,11 @@ struct amdgpu_mqd {
#define AMDGPU_PRODUCT_NAME_LEN 64
struct amdgpu_reset_domain;
+/*
+ * Non-zero (true) if the GPU has VRAM. Zero (false) otherwise.
+ */
+#define AMDGPU_HAS_VRAM(_adev) ((_adev)->gmc.real_vram_size)
+
struct amdgpu_device {
struct device *dev;
struct pci_dev *pdev;
@@ -849,7 +854,7 @@ struct amdgpu_device {
/* memory management */
struct amdgpu_mman mman;
- struct amdgpu_vram_scratch vram_scratch;
+ struct amdgpu_mem_scratch mem_scratch;
struct amdgpu_wb wb;
atomic64_t num_bytes_moved;
atomic64_t num_evictions;
@@ -871,7 +876,7 @@ struct amdgpu_device {
struct amdgpu_vkms_output *amdgpu_vkms_output;
struct amdgpu_mode_info mode_info;
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
- struct work_struct hotplug_work;
+ struct delayed_work hotplug_work;
struct amdgpu_irq_src crtc_irq;
struct amdgpu_irq_src vline0_irq;
struct amdgpu_irq_src vupdate_irq;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 57b5e11446c6..458362e4ea01 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/acpi.h>
+#include <linux/backlight.h>
#include <linux/slab.h>
#include <linux/power_supply.h>
#include <linux/pm_runtime.h>
@@ -31,7 +32,6 @@
#include <acpi/video.h>
#include <acpi/actbl.h>
-#include <drm/drm_crtc_helper.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
#include "amdgpu_display.h"
@@ -1079,20 +1079,16 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
* S0ix even though the system is suspending to idle, so return false
* in that case.
*/
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
dev_warn_once(adev->dev,
"Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
"To use suspend-to-idle change the sleep mode in BIOS setup.\n");
- return false;
- }
#if !IS_ENABLED(CONFIG_AMD_PMC)
dev_warn_once(adev->dev,
"Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
- return false;
-#else
- return true;
#endif /* CONFIG_AMD_PMC */
+ return true;
}
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 0040deaf8a83..333780491867 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -97,7 +97,7 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
- uint64_t vram_used;
+ int64_t vram_used;
uint64_t vram_used_aligned;
bool init_complete;
struct work_struct reset_work;
@@ -271,9 +271,9 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_
((struct drm_file *)(drm_priv))->driver_priv)->vm)
int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct file *filp, u32 pasid);
+ struct amdgpu_vm *avm, u32 pasid);
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
- struct file *filp,
+ struct amdgpu_vm *avm,
void **process_info,
struct dma_fence **ef);
void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 3b5c53712d31..d6320c836251 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -25,6 +25,7 @@
#include <linux/pagemap.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu_object.h"
#include "amdgpu_gem.h"
@@ -1430,18 +1431,11 @@ static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo)
}
int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
- struct file *filp, u32 pasid)
+ struct amdgpu_vm *avm, u32 pasid)
{
- struct amdgpu_fpriv *drv_priv;
- struct amdgpu_vm *avm;
int ret;
- ret = amdgpu_file_to_fpriv(filp, &drv_priv);
- if (ret)
- return ret;
- avm = &drv_priv->vm;
-
/* Free the original amdgpu allocated pasid,
* will be replaced with kfd allocated pasid.
*/
@@ -1458,19 +1452,12 @@ int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
}
int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
- struct file *filp,
+ struct amdgpu_vm *avm,
void **process_info,
struct dma_fence **ef)
{
- struct amdgpu_fpriv *drv_priv;
- struct amdgpu_vm *avm;
int ret;
- ret = amdgpu_file_to_fpriv(filp, &drv_priv);
- if (ret)
- return ret;
- avm = &drv_priv->vm;
-
/* Already a compute VM? */
if (avm->process_info)
return -EINVAL;
@@ -1612,6 +1599,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct amdgpu_bo *bo;
struct drm_gem_object *gobj = NULL;
u32 domain, alloc_domain;
+ uint64_t aligned_size;
u64 alloc_flags;
int ret;
@@ -1667,22 +1655,23 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
* the memory.
*/
if ((*mem)->aql_queue)
- size = size >> 1;
+ size >>= 1;
+ aligned_size = PAGE_ALIGN(size);
(*mem)->alloc_flags = flags;
amdgpu_sync_create(&(*mem)->sync);
- ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags);
+ ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags);
if (ret) {
pr_debug("Insufficient memory\n");
goto err_reserve_limit;
}
pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n",
- va, size, domain_string(alloc_domain));
+ va, (*mem)->aql_queue ? size << 1 : size, domain_string(alloc_domain));
- ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags,
+ ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags,
bo_type, NULL, &gobj);
if (ret) {
pr_debug("Failed to create BO on domain %s. ret %d\n",
@@ -1739,7 +1728,7 @@ err_node_allow:
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
- amdgpu_amdkfd_unreserve_mem_limit(adev, size, flags);
+ amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
if (gobj)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index e4d78491bcc7..ededdc01ca28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -28,6 +28,8 @@
struct hmm_range;
+struct drm_file;
+
struct amdgpu_device;
struct amdgpu_bo;
struct amdgpu_bo_va;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index f1a050379190..456e385333b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -411,17 +411,10 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
return -EINVAL;
}
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err) {
- DRM_ERROR("Failed to request firmware\n");
- return err;
- }
-
- err = amdgpu_ucode_validate(adev->pm.fw);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err) {
DRM_ERROR("Failed to load firmware \"%s\"", fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 2ebbc6382a06..6be30dcb029d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -25,7 +25,9 @@
*/
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -996,13 +998,33 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
}
}
+ if (amdgpu_connector->detected_hpd_without_ddc) {
+ force = true;
+ amdgpu_connector->detected_hpd_without_ddc = false;
+ }
+
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
goto exit;
}
- if (amdgpu_connector->ddc_bus)
+ if (amdgpu_connector->ddc_bus) {
dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
+
+ /* Sometimes the pins required for the DDC probe on DVI
+ * connectors don't make contact at the same time that the ones
+ * for HPD do. If the DDC probe fails even though we had an HPD
+ * signal, try again later
+ */
+ if (!dret && !force &&
+ amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
+ DRM_DEBUG_KMS("hpd detected without ddc, retrying in 1 second\n");
+ amdgpu_connector->detected_hpd_without_ddc = true;
+ schedule_delayed_work(&adev->hotplug_work,
+ msecs_to_jiffies(1000));
+ goto exit;
+ }
+ }
if (dret) {
amdgpu_connector->detected_by_load = false;
amdgpu_connector_free_edid(connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7b5ce00f0602..08eced097bd8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -32,6 +32,8 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
+#include <drm/ttm/ttm_tt.h>
+
#include "amdgpu_cs.h"
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -1220,10 +1222,13 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
* next job actually sees the results from the previous one
* before we start executing on the same scheduler ring.
*/
- if (!s_fence || s_fence->sched != sched)
+ if (!s_fence || s_fence->sched != sched) {
+ dma_fence_put(fence);
continue;
+ }
r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+ dma_fence_put(fence);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
index 113f39510a72..fb3e3d56d427 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h
@@ -23,6 +23,8 @@
#ifndef __AMDGPU_CS_H__
#define __AMDGPU_CS_H__
+#include <linux/ww_mutex.h>
+
#include "amdgpu_job.h"
#include "amdgpu_bo_list.h"
#include "amdgpu_ring.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 0f16d3c09309..f60753f97ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1717,7 +1717,7 @@ no_preempt:
static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
{
- int r, resched, length;
+ int r, length;
struct amdgpu_ring *ring;
struct dma_fence **fences = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)data;
@@ -1747,8 +1747,6 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val)
/* stop the scheduler */
kthread_park(ring->sched.thread);
- resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
-
/* preempt the IB */
r = amdgpu_ring_preempt_ib(ring);
if (r) {
@@ -1785,8 +1783,6 @@ failure:
up_read(&adev->reset_domain->sem);
- ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched);
-
pro_end:
kfree(fences);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 2f28a8c02f64..c4a4e2fe6681 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -38,6 +38,7 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/amdgpu_drm.h>
@@ -163,7 +164,7 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
*
* The amdgpu driver provides a sysfs API for reporting the product name
* for the device
- * The file serial_number is used for this and returns the product name
+ * The file product_name is used for this and returns the product name
* as returned from the FRU.
* NOTE: This is only available for certain server cards
*/
@@ -185,7 +186,7 @@ static DEVICE_ATTR(product_name, S_IRUGO,
*
* The amdgpu driver provides a sysfs API for reporting the part number
* for the device
- * The file serial_number is used for this and returns the part number
+ * The file product_number is used for this and returns the part number
* as returned from the FRU.
* NOTE: This is only available for certain server cards
*/
@@ -927,32 +928,33 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
+ * amdgpu_device_mem_scratch_init - allocate the VRAM scratch page
*
* @adev: amdgpu_device pointer
*
* Allocates a scratch page of VRAM for use by various things in the
* driver.
*/
-static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
+static int amdgpu_device_mem_scratch_init(struct amdgpu_device *adev)
{
- return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
- &adev->vram_scratch.robj,
- &adev->vram_scratch.gpu_addr,
- (void **)&adev->vram_scratch.ptr);
+ return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->mem_scratch.robj,
+ &adev->mem_scratch.gpu_addr,
+ (void **)&adev->mem_scratch.ptr);
}
/**
- * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
+ * amdgpu_device_mem_scratch_fini - Free the VRAM scratch page
*
* @adev: amdgpu_device pointer
*
* Frees the VRAM scratch page.
*/
-static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
+static void amdgpu_device_mem_scratch_fini(struct amdgpu_device *adev)
{
- amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mem_scratch.robj, NULL, NULL);
}
/**
@@ -1984,17 +1986,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
- err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->firmware.gpu_info_fw, fw_name);
if (err) {
dev_err(adev->dev,
- "Failed to load gpu_info firmware \"%s\"\n",
- fw_name);
- goto out;
- }
- err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
- if (err) {
- dev_err(adev->dev,
- "Failed to validate gpu_info firmware \"%s\"\n",
+ "Failed to get gpu_info firmware \"%s\"\n",
fw_name);
goto out;
}
@@ -2081,6 +2076,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct pci_dev *parent;
int i, r;
+ bool total;
amdgpu_device_enable_virtual_display(adev);
@@ -2164,6 +2160,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
+ total = true;
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
DRM_ERROR("disabled ip block: %d <%s>\n",
@@ -2177,7 +2174,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
} else if (r) {
DRM_ERROR("early_init of IP block <%s> failed %d\n",
adev->ip_blocks[i].version->funcs->name, r);
- return r;
+ total = false;
} else {
adev->ip_blocks[i].status.valid = true;
}
@@ -2208,6 +2205,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
}
}
+ if (!total)
+ return -ENODEV;
adev->cg_flags &= amdgpu_cg_mask;
adev->pg_flags &= amdgpu_pg_mask;
@@ -2393,9 +2392,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
amdgpu_virt_exchange_data(adev);
- r = amdgpu_device_vram_scratch_init(adev);
+ r = amdgpu_device_mem_scratch_init(adev);
if (r) {
- DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
+ DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r);
goto init_failed;
}
r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
@@ -2413,8 +2412,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
/* right after GMC hw init, we create CSA */
if (amdgpu_mcbp) {
r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
- AMDGPU_GEM_DOMAIN_VRAM,
- AMDGPU_CSA_SIZE);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ AMDGPU_CSA_SIZE);
if (r) {
DRM_ERROR("allocate CSA failed %d\n", r);
goto init_failed;
@@ -2584,9 +2584,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
- /* skip CG for GFX on S0ix */
+ /* skip CG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -2620,9 +2621,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
- /* skip PG for GFX on S0ix */
+ /* skip PG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
- adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
continue;
/* skip CG for VCE/UVD, it's handled specially */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
@@ -2874,7 +2876,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_ucode_free_bo(adev);
amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev);
- amdgpu_device_vram_scratch_fini(adev);
+ amdgpu_device_mem_scratch_fini(adev);
amdgpu_ib_pool_fini(adev);
}
@@ -3030,6 +3032,24 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_MES))
continue;
+ /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
+ if (adev->in_s0ix &&
+ (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0)) &&
+ (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ /* Once swPSP provides the IMU, RLC FW binaries to TOS during cold-boot.
+ * These are in TMR, hence are expected to be reused by PSP-TOS to reload
+ * from this location and RLC Autoload automatically also gets loaded
+ * from here based on PMFW -> PSP message during re-init sequence.
+ * Therefore, the psp suspend & resume should be skipped to avoid destroy
+ * the TMR and reload FWs again for IMU enabled APU ASICs.
+ */
+ if (amdgpu_in_reset(adev) &&
+ (adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs &&
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
+ continue;
+
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -3230,15 +3250,6 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
return r;
}
adev->ip_blocks[i].status.hw = true;
-
- if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- /* disable gfxoff for IP resume. The gfxoff will be re-enabled in
- * amdgpu_device_resume() after IP resume.
- */
- amdgpu_gfx_off_ctrl(adev, false);
- DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
- }
-
}
return 0;
@@ -3997,10 +4008,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->mman.initialized) {
- flush_delayed_work(&adev->mman.bdev.wq);
- ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
- }
+ if (adev->mman.initialized)
+ drain_workqueue(adev->mman.bdev.wq);
if (adev->pm_sysfs_en)
amdgpu_pm_sysfs_fini(adev);
@@ -4022,7 +4031,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_gart_dummy_page_fini(adev);
- amdgpu_device_unmap_mmio(adev);
+ if (drm_dev_is_unplugged(adev_to_drm(adev)))
+ amdgpu_device_unmap_mmio(adev);
}
@@ -4032,8 +4042,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev);
- release_firmware(adev->firmware.gpu_info_fw);
- adev->firmware.gpu_info_fw = NULL;
+ amdgpu_ucode_release(&adev->firmware.gpu_info_fw);
adev->accel_working = false;
dma_fence_put(rcu_dereference_protected(adev->gang_submit, true));
@@ -4231,13 +4240,6 @@ exit:
/* Make sure IB tests flushed */
flush_delayed_work(&adev->delayed_init_work);
- if (adev->in_s0ix) {
- /* re-enable gfxoff after IP resume. This re-enables gfxoff after
- * it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
- */
- amdgpu_gfx_off_ctrl(adev, true);
- DRM_DEBUG("will enable gfxoff for the mission mode\n");
- }
if (fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
@@ -4268,6 +4270,9 @@ exit:
}
adev->in_suspend = false;
+ if (adev->enable_mes)
+ amdgpu_mes_self_test(adev);
+
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
DRM_WARN("smart shift update failed\n");
@@ -4618,11 +4623,6 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
if (!amdgpu_ras_is_poison_mode_supported(adev))
return true;
- if (!amdgpu_device_ip_check_soft_reset(adev)) {
- dev_info(adev->dev,"Timeout, but no hardware hang detected.\n");
- return false;
- }
-
if (amdgpu_sriov_vf(adev))
return true;
@@ -4747,7 +4747,8 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!need_full_reset)
need_full_reset = amdgpu_device_ip_need_full_reset(adev);
- if (!need_full_reset && amdgpu_gpu_recovery) {
+ if (!need_full_reset && amdgpu_gpu_recovery &&
+ amdgpu_device_ip_check_soft_reset(adev)) {
amdgpu_device_ip_pre_soft_reset(adev);
r = amdgpu_device_ip_soft_reset(adev);
amdgpu_device_ip_post_soft_reset(adev);
@@ -5873,8 +5874,8 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev,
int amdgpu_in_reset(struct amdgpu_device *adev)
{
return atomic_read(&adev->reset_domain->in_gpu_reset);
- }
-
+}
+
/**
* amdgpu_device_halt() - bring hardware to some kind of halt state
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 1bbd56029a4f..b719852daa07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -33,6 +33,7 @@
#include "gmc_v9_0.h"
#include "df_v1_7.h"
#include "df_v3_6.h"
+#include "df_v4_3.h"
#include "nbio_v6_1.h"
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
@@ -2329,6 +2330,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 5, 2):
adev->df.funcs = &df_v1_7_funcs;
break;
+ case IP_VERSION(4, 3, 0):
+ adev->df.funcs = &df_v4_3_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b22471b3bd63..503f89a766c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -42,6 +42,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_vblank.h>
/**
@@ -63,7 +64,7 @@
void amdgpu_display_hotplug_work_func(struct work_struct *work)
{
struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
- hotplug_work);
+ hotplug_work.work);
struct drm_device *dev = adev_to_drm(adev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 271e30e34d93..0c001bb8fc2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -37,6 +37,7 @@
#include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
#include <drm/amdgpu_drm.h>
+#include <drm/ttm/ttm_tt.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
#include <linux/pci-p2pdma.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index cd4caaa29528..86fbb4138285 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -38,7 +38,6 @@
#include <linux/mmu_notifier.h>
#include <linux/suspend.h>
#include <linux/cc_platform.h>
-#include <linux/fb.h>
#include <linux/dynamic_debug.h>
#include "amdgpu.h"
@@ -104,13 +103,16 @@
* - 3.46.0 - To enable hot plug amdgpu tests in libdrm
* - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
* - 3.48.0 - Add IP discovery version info to HW INFO
- * 3.49.0 - Add gang submit into CS IOCTL
+ * - 3.49.0 - Add gang submit into CS IOCTL
+ * - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock
+ * Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock
+ * 3.51.0 - Return the PCIe gen and lanes from the INFO ioctl
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 49
+#define KMS_DRIVER_MINOR 51
#define KMS_DRIVER_PATCHLEVEL 0
-int amdgpu_vram_limit;
+unsigned int amdgpu_vram_limit = UINT_MAX;
int amdgpu_vis_vram_limit;
int amdgpu_gart_size = -1; /* auto */
int amdgpu_gtt_size = -1; /* auto */
@@ -186,6 +188,7 @@ int amdgpu_num_kcq = -1;
int amdgpu_smartshift_bias;
int amdgpu_use_xgmi_p2p = 1;
int amdgpu_vcnfw_log;
+int amdgpu_sg_display = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@@ -932,6 +935,16 @@ MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = e
module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
/**
+ * DOC: sg_display (int)
+ * Disable S/G (scatter/gather) display (i.e., display from system memory).
+ * This option is only relevant on APUs. Set this option to 0 to disable
+ * S/G display if you experience flickering or other issues under memory
+ * pressure and report the issue.
+ */
+MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
+module_param_named(sg_display, amdgpu_sg_display, int, 0444);
+
+/**
* DOC: smu_pptable_id (int)
* Used to override pptable id. id = 0 use VBIOS pptable.
* id > 0 use the soft pptable with specicfied id.
@@ -2225,6 +2238,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(dev);
+ drm_dev_unplug(dev);
+
if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
@@ -2264,8 +2279,6 @@ amdgpu_pci_remove(struct pci_dev *pdev)
amdgpu_driver_unload_kms(dev);
- drm_dev_unplug(dev);
-
/*
* Flush any in flight DMA operations from device.
* Clear the Bus Master Enable bit and then wait on the PCIe Device
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
index c96e458ed088..27a782a9dc72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c
@@ -24,7 +24,6 @@
* Alex Deucher
*/
-#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
index 41a4c7056729..e86834bfea1d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
@@ -30,7 +30,6 @@
#include <linux/rbtree.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <linux/sched/mm.h>
#include "amdgpu_sync.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 00444203220d..faff4a3f96e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -618,7 +618,13 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
- if (!ring->no_scheduler)
+ /*
+ * Notice we check for sched.ops since there's some
+ * override on the meaning of sched.ready by amdgpu.
+ * The natural check would be sched.ready, which is
+ * set as drm_sched_init() finishes...
+ */
+ if (ring->sched.ops)
drm_sched_fini(&ring->sched);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index bb7350ea1d75..d8e683688daa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -34,6 +34,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_ttm_helper.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
@@ -61,10 +62,10 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
goto unlock;
}
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT);
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT);
- drm_dev_exit(idx);
+ drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
@@ -257,7 +258,7 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
*/
if (is_cow_mapping(vma->vm_flags) &&
!(vma->vm_flags & VM_ACCESS_FLAGS))
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
return drm_gem_ttm_mmap(obj, vma);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 3380daf42da8..35ed46b9249c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -375,8 +375,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
* KIQ MQD no matter SRIOV or Bare-metal
*/
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
- &ring->mqd_gpu_addr, &ring->mqd_ptr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &ring->mqd_obj,
+ &ring->mqd_gpu_addr,
+ &ring->mqd_ptr);
if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
return r;
@@ -696,6 +699,50 @@ late_fini:
return r;
}
+int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err = 0;
+ struct amdgpu_gfx_ras *ras = NULL;
+
+ /* adev->gfx.ras is NULL, which means gfx does not
+ * support ras function, then do nothing here.
+ */
+ if (!adev->gfx.ras)
+ return 0;
+
+ ras = adev->gfx.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register gfx ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "gfx");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->gfx.ras_if = &ras->ras_block.ras_comm;
+
+ /* If not define special ras_late_init function, use gfx default ras_late_init */
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+
+ /* If not defined special ras_cb function, use default ras_cb */
+ if (!ras->ras_block.ras_cb)
+ ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
+
+ return 0;
+}
+
+int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler)
+ return adev->gfx.ras->poison_consumption_handler(adev, entry);
+
+ return 0;
+}
+
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
void *err_data,
struct amdgpu_iv_entry *entry)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index b3df4787877e..86ec9d0d12c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -210,6 +210,11 @@ struct amdgpu_gfx_ras {
struct amdgpu_ras_block_object ras_block;
void (*enable_watchdog_timer)(struct amdgpu_device *adev);
bool (*query_utcl2_poison_status)(struct amdgpu_device *adev);
+ int (*rlc_gc_fed_irq)(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry);
+ int (*poison_consumption_handler)(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
};
struct amdgpu_gfx_funcs {
@@ -323,6 +328,7 @@ struct amdgpu_gfx {
struct amdgpu_irq_src priv_inst_irq;
struct amdgpu_irq_src cp_ecc_error_irq;
struct amdgpu_irq_src sq_irq;
+ struct amdgpu_irq_src rlc_gc_fed_irq;
struct sq_work sq_work;
/* gfx status */
@@ -432,4 +438,7 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
+int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev);
+int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 02a4c93673ce..94f10ac0eef7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -35,6 +35,7 @@
#include "amdgpu_xgmi.h"
#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_tt.h>
/**
* amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
@@ -201,13 +202,20 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
u64 base)
{
+ uint64_t vis_limit = (uint64_t)amdgpu_vis_vram_limit << 20;
uint64_t limit = (uint64_t)amdgpu_vram_limit << 20;
mc->vram_start = base;
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- if (limit && limit < mc->real_vram_size)
+ if (limit < mc->real_vram_size)
mc->real_vram_size = limit;
+ if (vis_limit && vis_limit < mc->visible_vram_size)
+ mc->visible_vram_size = vis_limit;
+
+ if (mc->real_vram_size < mc->visible_vram_size)
+ mc->visible_vram_size = mc->real_vram_size;
+
if (mc->xgmi.num_physical_nodes == 0) {
mc->fb_start = mc->vram_start;
mc->fb_end = mc->vram_end;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index a6aef488a822..d0a1cc88832c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -45,7 +45,6 @@
#include <linux/irq.h>
#include <linux/pci.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_vblank.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 7aa7e52ca784..ca945055e683 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -43,6 +43,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_display.h"
#include "amdgpu_ras.h"
+#include "amd_pcie.h"
void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
@@ -767,6 +768,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device *dev_info;
uint64_t vm_size;
+ uint32_t pcie_gen_mask;
int ret;
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
@@ -785,15 +787,20 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
if (adev->pm.dpm_enabled) {
dev_info->max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info->max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+ dev_info->min_engine_clock = amdgpu_dpm_get_sclk(adev, true) * 10;
+ dev_info->min_memory_clock = amdgpu_dpm_get_mclk(adev, true) * 10;
} else {
- dev_info->max_engine_clock = adev->clock.default_sclk * 10;
- dev_info->max_memory_clock = adev->clock.default_mclk * 10;
+ dev_info->max_engine_clock =
+ dev_info->min_engine_clock =
+ adev->clock.default_sclk * 10;
+ dev_info->max_memory_clock =
+ dev_info->min_memory_clock =
+ adev->clock.default_mclk * 10;
}
dev_info->enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
dev_info->num_rb_pipes = adev->gfx.config.max_backends_per_se *
adev->gfx.config.max_shader_engines;
dev_info->num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
- dev_info->_pad = 0;
dev_info->ids_flags = 0;
if (adev->flags & AMD_IS_APU)
dev_info->ids_flags |= AMDGPU_IDS_FLAGS_FUSION;
@@ -847,6 +854,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
dev_info->tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
+ /* Combine the chip gen mask with the platform (CPU/mobo) mask. */
+ pcie_gen_mask = adev->pm.pcie_gen_mask & (adev->pm.pcie_gen_mask >> 16);
+ dev_info->pcie_gen = fls(pcie_gen_mask);
+ dev_info->pcie_num_lanes =
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 ? 32 :
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 ? 16 :
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 ? 12 :
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 ? 8 :
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 ? 4 :
+ adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 ? 2 : 1;
+
ret = copy_to_user(out, dev_info,
min((size_t)size, sizeof(*dev_info))) ? -EFAULT : 0;
kfree(dev_info);
@@ -1014,6 +1032,24 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
ui32 /= 100;
break;
+ case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_SCLK:
+ /* get peak pstate sclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
+ case AMDGPU_INFO_SENSOR_PEAK_PSTATE_GFX_MCLK:
+ /* get peak pstate mclk in Mhz */
+ if (amdgpu_dpm_read_sensor(adev,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
+ (void *)&ui32, &ui32_size)) {
+ return -EINVAL;
+ }
+ ui32 /= 100;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n",
info->sensor_info.type);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 0c546245793b..82e27bd4f038 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -21,6 +21,8 @@
*
*/
+#include <linux/firmware.h>
+
#include "amdgpu_mes.h"
#include "amdgpu.h"
#include "soc15_common.h"
@@ -1423,3 +1425,60 @@ error_pasid:
kfree(vm);
return 0;
}
+
+int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe)
+{
+ const struct mes_firmware_header_v1_0 *mes_hdr;
+ struct amdgpu_firmware_info *info;
+ char ucode_prefix[30];
+ char fw_name[40];
+ int r;
+
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes%s.bin",
+ ucode_prefix,
+ pipe == AMDGPU_MES_SCHED_PIPE ? "" : "1");
+ r = amdgpu_ucode_request(adev, &adev->mes.fw[pipe], fw_name);
+ if (r)
+ goto out;
+
+ mes_hdr = (const struct mes_firmware_header_v1_0 *)
+ adev->mes.fw[pipe]->data;
+ adev->mes.uc_start_addr[pipe] =
+ le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
+ ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
+ adev->mes.data_start_addr[pipe] =
+ le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
+ ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ int ucode, ucode_data;
+
+ if (pipe == AMDGPU_MES_SCHED_PIPE) {
+ ucode = AMDGPU_UCODE_ID_CP_MES;
+ ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
+ } else {
+ ucode = AMDGPU_UCODE_ID_CP_MES1;
+ ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ }
+
+ info = &adev->firmware.ucode[ucode];
+ info->ucode_id = ucode;
+ info->fw = adev->mes.fw[pipe];
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
+ PAGE_SIZE);
+
+ info = &adev->firmware.ucode[ucode_data];
+ info->ucode_id = ucode_data;
+ info->fw = adev->mes.fw[pipe];
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
+ PAGE_SIZE);
+ }
+
+ return 0;
+out:
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
+ return r;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 97c05d08a551..547ec35691fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -306,6 +306,7 @@ struct amdgpu_mes_funcs {
int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
+int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
int amdgpu_mes_init(struct amdgpu_device *adev);
void amdgpu_mes_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 8a39300b1a84..32fe05c810c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -35,7 +35,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fixed.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
#include <linux/i2c.h>
@@ -534,6 +533,7 @@ struct amdgpu_connector {
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
+ bool detected_hpd_without_ddc; /* if an HPD signal was detected on DVI, but ddc probing failed */
uint16_t connector_object_id;
struct amdgpu_hpd hpd;
struct amdgpu_router router;
@@ -549,8 +549,8 @@ struct amdgpu_mst_connector {
struct drm_dp_mst_topology_mgr mst_mgr;
struct amdgpu_dm_dp_aux dm_dp_aux;
- struct drm_dp_mst_port *port;
- struct amdgpu_connector *mst_port;
+ struct drm_dp_mst_port *mst_output_port;
+ struct amdgpu_connector *mst_root;
bool is_mst_connector;
struct amdgpu_encoder *mst_encoder;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 25a68d8888e0..981010de0a28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1574,9 +1574,9 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
attachment = READ_ONCE(bo->tbo.base.import_attach);
if (attachment)
- seq_printf(m, " imported from %p", dma_buf);
+ seq_printf(m, " imported from ino:%lu", file_inode(dma_buf->file)->i_ino);
else if (dma_buf)
- seq_printf(m, " exported as %p", dma_buf);
+ seq_printf(m, " exported as ino:%lu", file_inode(dma_buf->file)->i_ino);
amdgpu_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED);
amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 7a2fc920739b..15e601f09648 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -66,7 +66,8 @@ static int psp_ring_init(struct psp_context *psp,
/* allocate 4k Page of Local Frame Buffer memory for ring */
ring->ring_size = 0x1000;
ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->firmware.rbuf,
&ring->ring_mem_mc_addr,
(void **)&ring->ring_mem);
@@ -122,6 +123,38 @@ static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp
}
}
+static int psp_init_sriov_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ char ucode_prefix[30];
+ int ret = 0;
+
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 9):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ break;
+ case IP_VERSION(13, 0, 2):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
+ ret = psp_init_cap_microcode(psp, ucode_prefix);
+ ret &= psp_init_ta_microcode(psp, ucode_prefix);
+ break;
+ case IP_VERSION(13, 0, 0):
+ adev->virt.autoload_ucode_id = 0;
+ break;
+ case IP_VERSION(13, 0, 10):
+ adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -192,7 +225,10 @@ static int psp_early_init(void *handle)
psp_check_pmfw_centralized_cstate_management(psp);
- return 0;
+ if (amdgpu_sriov_vf(adev))
+ return psp_init_sriov_microcode(psp);
+ else
+ return psp_init_microcode(psp);
}
void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
@@ -300,7 +336,7 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
if (db_header.cookie != PSP_RUNTIME_DB_COOKIE_ID) {
/* runtime db doesn't exist, exit */
- dev_warn(adev->dev, "PSP runtime database doesn't exist\n");
+ dev_dbg(adev->dev, "PSP runtime database doesn't exist\n");
return false;
}
@@ -350,42 +386,6 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev,
return ret;
}
-static int psp_init_sriov_microcode(struct psp_context *psp)
-{
- struct amdgpu_device *adev = psp->adev;
- int ret = 0;
-
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(9, 0, 0):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "vega10");
- break;
- case IP_VERSION(11, 0, 9):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "navi12");
- break;
- case IP_VERSION(11, 0, 7):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "sienna_cichlid");
- break;
- case IP_VERSION(13, 0, 2):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MEC2;
- ret = psp_init_cap_microcode(psp, "aldebaran");
- ret &= psp_init_ta_microcode(psp, "aldebaran");
- break;
- case IP_VERSION(13, 0, 0):
- adev->virt.autoload_ucode_id = 0;
- break;
- case IP_VERSION(13, 0, 10):
- adev->virt.autoload_ucode_id = AMDGPU_UCODE_ID_CP_MES1_DATA;
- break;
- default:
- BUG();
- break;
- }
- return ret;
-}
-
static int psp_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -401,15 +401,6 @@ static int psp_sw_init(void *handle)
ret = -ENOMEM;
}
- if (amdgpu_sriov_vf(adev))
- ret = psp_init_sriov_microcode(psp);
- else
- ret = psp_init_microcode(psp);
- if (ret) {
- DRM_ERROR("Failed to load psp firmware!\n");
- return ret;
- }
-
adev->psp.xgmi_context.supports_extended_data =
!adev->gmc.xgmi.connected_to_cpu &&
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2);
@@ -514,20 +505,11 @@ static int psp_sw_fini(void *handle)
psp_memory_training_fini(psp);
- release_firmware(psp->sos_fw);
- psp->sos_fw = NULL;
-
- release_firmware(psp->asd_fw);
- psp->asd_fw = NULL;
-
- release_firmware(psp->ta_fw);
- psp->ta_fw = NULL;
-
- release_firmware(psp->cap_fw);
- psp->cap_fw = NULL;
-
- release_firmware(psp->toc_fw);
- psp->toc_fw = NULL;
+ amdgpu_ucode_release(&psp->sos_fw);
+ amdgpu_ucode_release(&psp->asd_fw);
+ amdgpu_ucode_release(&psp->ta_fw);
+ amdgpu_ucode_release(&psp->cap_fw);
+ amdgpu_ucode_release(&psp->toc_fw);
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
@@ -624,12 +606,22 @@ psp_cmd_submit_buf(struct psp_context *psp,
int timeout = 20000;
bool ras_intr = false;
bool skip_unsupport = false;
+ bool dev_entered;
if (psp->adev->no_hw_access)
return 0;
- if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
- return 0;
+ dev_entered = drm_dev_enter(adev_to_drm(psp->adev), &idx);
+ /*
+ * We allow sending PSP messages LOAD_ASD and UNLOAD_TA without acquiring
+ * a lock in drm_dev_enter during driver unload because we must call
+ * drm_dev_unplug as the beginning of unload driver sequence . It is very
+ * crucial that userspace can't access device instances anymore.
+ */
+ if (!dev_entered)
+ WARN_ON(psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_LOAD_ASD &&
+ psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_UNLOAD_TA &&
+ psp->cmd_buf_mem->cmd_id != GFX_CMD_ID_INVOKE_CMD);
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
@@ -694,7 +686,8 @@ psp_cmd_submit_buf(struct psp_context *psp,
}
exit:
- drm_dev_exit(idx);
+ if (dev_entered)
+ drm_dev_exit(idx);
return ret;
}
@@ -797,9 +790,13 @@ static int psp_tmr_init(struct psp_context *psp)
if (!psp->tmr_bo) {
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
- AMDGPU_GEM_DOMAIN_VRAM,
- &psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size,
+ PSP_TMR_ALIGNMENT,
+ AMDGPU_HAS_VRAM(psp->adev) ?
+ AMDGPU_GEM_DOMAIN_VRAM :
+ AMDGPU_GEM_DOMAIN_GTT,
+ &psp->tmr_bo, &psp->tmr_mc_addr,
+ pptr);
}
return ret;
@@ -1092,7 +1089,8 @@ int psp_ta_init_shared_buf(struct psp_context *psp,
* physical) for ta to host memory
*/
return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&mem_ctx->shared_bo,
&mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf);
@@ -1901,7 +1899,7 @@ out_unlock:
static int psp_securedisplay_initialize(struct psp_context *psp)
{
int ret;
- struct securedisplay_cmd *securedisplay_cmd;
+ struct ta_securedisplay_cmd *securedisplay_cmd;
/*
* TODO: bypass the initialize in sriov for now
@@ -2908,25 +2906,15 @@ int psp_ring_cmd_submit(struct psp_context *psp,
return 0;
}
-int psp_init_asd_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_asd_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *asd_hdr;
int err = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for asd microcode\n");
- return -EINVAL;
- }
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.asd_fw, fw_name);
if (err)
goto out;
@@ -2938,31 +2926,19 @@ int psp_init_asd_microcode(struct psp_context *psp,
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
return 0;
out:
- dev_err(adev->dev, "fail to initialize asd microcode\n");
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.asd_fw);
return err;
}
-int psp_init_toc_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_toc_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
const struct psp_firmware_header_v1_0 *toc_hdr;
int err = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for toc microcode\n");
- return -EINVAL;
- }
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", chip_name);
- err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.toc_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
if (err)
goto out;
@@ -2974,9 +2950,7 @@ int psp_init_toc_microcode(struct psp_context *psp,
le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
return 0;
out:
- dev_err(adev->dev, "fail to request/validate toc microcode\n");
- release_firmware(adev->psp.toc_fw);
- adev->psp.toc_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.toc_fw);
return err;
}
@@ -3107,8 +3081,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
return 0;
}
-int psp_init_sos_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
@@ -3121,17 +3094,8 @@ int psp_init_sos_microcode(struct psp_context *psp,
uint8_t *ucode_array_start_addr;
int fw_index = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for sos microcode\n");
- return -EINVAL;
- }
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
- err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, fw_name);
if (err)
goto out;
@@ -3203,10 +3167,7 @@ int psp_init_sos_microcode(struct psp_context *psp,
return 0;
out:
- dev_err(adev->dev,
- "failed to init sos firmware\n");
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.sos_fw);
return err;
}
@@ -3272,41 +3233,76 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
return 0;
}
-int psp_init_ta_microcode(struct psp_context *psp,
- const char *chip_name)
+static int parse_ta_v1_microcode(struct psp_context *psp)
{
+ const struct ta_firmware_header_v1_0 *ta_hdr;
struct amdgpu_device *adev = psp->adev;
- char fw_name[PSP_FW_NAME_LEN];
- const struct ta_firmware_header_v2_0 *ta_hdr;
- int err = 0;
- int ta_index = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for ta microcode\n");
+ ta_hdr = (const struct ta_firmware_header_v1_0 *) adev->psp.ta_fw->data;
+
+ if (le16_to_cpu(ta_hdr->header.header_version_major) != 1)
return -EINVAL;
- }
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err)
- goto out;
+ adev->psp.xgmi_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->xgmi.fw_version);
+ adev->psp.xgmi_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->xgmi.size_bytes);
+ adev->psp.xgmi_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ras_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->ras.fw_version);
+ adev->psp.ras_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->ras.size_bytes);
+ adev->psp.ras_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->ras.offset_bytes);
+
+ adev->psp.hdcp_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->hdcp.fw_version);
+ adev->psp.hdcp_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->hdcp.size_bytes);
+ adev->psp.hdcp_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.dtm_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->dtm.fw_version);
+ adev->psp.dtm_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->dtm.size_bytes);
+ adev->psp.dtm_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->securedisplay.fw_version);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
+ le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out;
+ return 0;
+}
+
+static int parse_ta_v2_microcode(struct psp_context *psp)
+{
+ const struct ta_firmware_header_v2_0 *ta_hdr;
+ struct amdgpu_device *adev = psp->adev;
+ int err = 0;
+ int ta_index = 0;
ta_hdr = (const struct ta_firmware_header_v2_0 *)adev->psp.ta_fw->data;
- if (le16_to_cpu(ta_hdr->header.header_version_major) != 2) {
- dev_err(adev->dev, "unsupported TA header version\n");
- err = -EINVAL;
- goto out;
- }
+ if (le16_to_cpu(ta_hdr->header.header_version_major) != 2)
+ return -EINVAL;
if (le32_to_cpu(ta_hdr->ta_fw_bin_count) >= UCODE_MAX_PSP_PACKAGING) {
dev_err(adev->dev, "packed TA count exceeds maximum limit\n");
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
for (ta_index = 0; ta_index < le32_to_cpu(ta_hdr->ta_fw_bin_count); ta_index++) {
@@ -3314,19 +3310,44 @@ int psp_init_ta_microcode(struct psp_context *psp,
&ta_hdr->ta_fw_bin[ta_index],
ta_hdr);
if (err)
- goto out;
+ return err;
}
return 0;
-out:
- dev_err(adev->dev, "fail to initialize ta microcode\n");
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
+}
+
+int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name)
+{
+ const struct common_firmware_header *hdr;
+ struct amdgpu_device *adev = psp->adev;
+ char fw_name[PSP_FW_NAME_LEN];
+ int err;
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
+ err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, fw_name);
+ if (err)
+ return err;
+
+ hdr = (const struct common_firmware_header *)adev->psp.ta_fw->data;
+ switch (le16_to_cpu(hdr->header_version_major)) {
+ case 1:
+ err = parse_ta_v1_microcode(psp);
+ break;
+ case 2:
+ err = parse_ta_v2_microcode(psp);
+ break;
+ default:
+ dev_err(adev->dev, "unsupported TA header version\n");
+ err = -EINVAL;
+ }
+
+ if (err)
+ amdgpu_ucode_release(&adev->psp.ta_fw);
+
return err;
}
-int psp_init_cap_microcode(struct psp_context *psp,
- const char *chip_name)
+int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
{
struct amdgpu_device *adev = psp->adev;
char fw_name[PSP_FW_NAME_LEN];
@@ -3334,28 +3355,20 @@ int psp_init_cap_microcode(struct psp_context *psp,
struct amdgpu_firmware_info *info = NULL;
int err = 0;
- if (!chip_name) {
- dev_err(adev->dev, "invalid chip name for cap microcode\n");
- return -EINVAL;
- }
-
if (!amdgpu_sriov_vf(adev)) {
dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n");
return -EINVAL;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name);
- err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev);
- if (err) {
- dev_warn(adev->dev, "cap microcode does not exist, skip\n");
- err = 0;
- goto out;
- }
-
- err = amdgpu_ucode_validate(adev->psp.cap_fw);
+ err = amdgpu_ucode_request(adev, &adev->psp.cap_fw, fw_name);
if (err) {
+ if (err == -ENODEV) {
+ dev_warn(adev->dev, "cap microcode does not exist, skip\n");
+ err = 0;
+ goto out;
+ }
dev_err(adev->dev, "fail to initialize cap microcode\n");
- goto out;
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
@@ -3372,8 +3385,7 @@ int psp_init_cap_microcode(struct psp_context *psp,
return 0;
out:
- release_firmware(adev->psp.cap_fw);
- adev->psp.cap_fw = NULL;
+ amdgpu_ucode_release(&adev->psp.cap_fw);
return err;
}
@@ -3444,10 +3456,10 @@ static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
/* LFB address which is aligned to 1MB boundary per PSP request */
ret = amdgpu_bo_create_kernel(adev, usbc_pd_fw->size, 0x100000,
- AMDGPU_GEM_DOMAIN_VRAM,
- &fw_buf_bo,
- &fw_pri_mc_addr,
- &fw_pri_cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &fw_buf_bo, &fw_pri_mc_addr,
+ &fw_pri_cpu_addr);
if (ret)
goto rel_buf;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ad490c1e2f57..6e543558386d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -706,13 +706,23 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
return 0;
}
+static int amdgpu_ras_check_feature_allowed(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ if (amdgpu_ras_is_feature_allowed(adev, head) ||
+ amdgpu_ras_is_poison_mode_supported(adev))
+ return 1;
+ else
+ return 0;
+}
+
/* wrapper of psp_ras_enable_features */
int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
union ta_ras_cmd_input *info;
- int ret;
+ int ret = 0;
if (!con)
return -EINVAL;
@@ -736,7 +746,8 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
}
/* Do not enable if it is not allowed. */
- WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
+ if (enable && !amdgpu_ras_check_feature_allowed(adev, head))
+ goto out;
/* Only enable ras feature operation handle on host side */
if (head->block == AMDGPU_RAS_BLOCK__GFX &&
@@ -754,7 +765,6 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
- ret = 0;
out:
if (head->block == AMDGPU_RAS_BLOCK__GFX)
kfree(info);
@@ -910,9 +920,6 @@ static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_de
if (block >= AMDGPU_RAS_BLOCK__LAST)
return NULL;
- if (!amdgpu_ras_is_supported(adev, block))
- return NULL;
-
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) {
if (!node->ras_obj) {
dev_warn(adev->dev, "Warning: abnormal ras list node.\n");
@@ -1087,6 +1094,10 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
info->head.block,
info->head.sub_block_index);
+ /* inject on guest isn't allowed, return success directly */
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
if (!obj)
return -EINVAL;
@@ -1122,11 +1133,54 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
}
/**
- * amdgpu_ras_query_error_count -- Get error counts of all IPs
+ * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP
+ * @adev: pointer to AMD GPU device
+ * @ce_count: pointer to an integer to be set to the count of correctible errors.
+ * @ue_count: pointer to an integer to be set to the count of uncorrectible errors.
+ * @query_info: pointer to ras_query_if
+ *
+ * Return 0 for query success or do nothing, otherwise return an error
+ * on failures
+ */
+static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev,
+ unsigned long *ce_count,
+ unsigned long *ue_count,
+ struct ras_query_if *query_info)
+{
+ int ret;
+
+ if (!query_info)
+ /* do nothing if query_info is not specified */
+ return 0;
+
+ ret = amdgpu_ras_query_error_status(adev, query_info);
+ if (ret)
+ return ret;
+
+ *ce_count += query_info->ce_count;
+ *ue_count += query_info->ue_count;
+
+ /* some hardware/IP supports read to clear
+ * no need to explictly reset the err status after the query call */
+ if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+ if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
+ dev_warn(adev->dev,
+ "Failed to reset error counter and error status\n");
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP
* @adev: pointer to AMD GPU device
* @ce_count: pointer to an integer to be set to the count of correctible errors.
* @ue_count: pointer to an integer to be set to the count of uncorrectible
* errors.
+ * @query_info: pointer to ras_query_if if the query request is only for
+ * specific ip block; if info is NULL, then the qurey request is for
+ * all the ip blocks that support query ras error counters/status
*
* If set, @ce_count or @ue_count, count and return the corresponding
* error counts in those integer pointers. Return 0 if the device
@@ -1134,11 +1188,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
*/
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
- unsigned long *ue_count)
+ unsigned long *ue_count,
+ struct ras_query_if *query_info)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
unsigned long ce, ue;
+ int ret;
if (!adev->ras_enabled || !con)
return -EOPNOTSUPP;
@@ -1150,26 +1206,23 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
ce = 0;
ue = 0;
- list_for_each_entry(obj, &con->head, node) {
- struct ras_query_if info = {
- .head = obj->head,
- };
- int res;
-
- res = amdgpu_ras_query_error_status(adev, &info);
- if (res)
- return res;
+ if (!query_info) {
+ /* query all the ip blocks that support ras query interface */
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
- if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
- adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
- if (amdgpu_ras_reset_error_status(adev, info.head.block))
- dev_warn(adev->dev, "Failed to reset error counter and error status");
+ ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
}
-
- ce += info.ce_count;
- ue += info.ue_count;
+ } else {
+ /* query specific ip block */
+ ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
}
+ if (ret)
+ return ret;
+
if (ce_count)
*ce_count = ce;
@@ -1564,14 +1617,14 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
struct amdgpu_ras_block_object *block_obj =
amdgpu_ras_get_ras_block(adev, obj->head.block, 0);
- if (!block_obj || !block_obj->hw_ops)
+ if (!block_obj)
return;
/* both query_poison_status and handle_poison_consumption are optional,
* but at least one of them should be implemented if we need poison
* consumption handler
*/
- if (block_obj->hw_ops->query_poison_status) {
+ if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
poison_stat = block_obj->hw_ops->query_poison_status(adev);
if (!poison_stat) {
/* Not poison consumption interrupt, no need to handle it */
@@ -1585,7 +1638,7 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
if (!adev->gmc.xgmi.connected_to_cpu)
amdgpu_umc_poison_handler(adev, false);
- if (block_obj->hw_ops->handle_poison_consumption)
+ if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption)
poison_stat = block_obj->hw_ops->handle_poison_consumption(adev);
/* gpu reset is fallback for failed and default cases */
@@ -1593,6 +1646,8 @@ static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *
dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n",
block_obj->ras_comm.name);
amdgpu_ras_reset_gpu(adev);
+ } else {
+ amdgpu_gfx_poison_consumption_handler(adev, entry);
}
}
@@ -2344,22 +2399,24 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
dev_info(adev->dev, "SRAM ECC is active.\n");
- if (!amdgpu_sriov_vf(adev)) {
+ if (!amdgpu_sriov_vf(adev))
adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
-
- if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
- adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
- adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
- else
- adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
- 1 << AMDGPU_RAS_BLOCK__JPEG);
- } else {
+ else
adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF |
1 << AMDGPU_RAS_BLOCK__SDMA |
1 << AMDGPU_RAS_BLOCK__GFX);
- }
+
+ /* VCN/JPEG RAS can be supported on both bare metal and
+ * SRIOV environment
+ */
+ if (adev->ip_versions[VCN_HWIP][0] == IP_VERSION(2, 6, 0) ||
+ adev->ip_versions[VCN_HWIP][0] == IP_VERSION(4, 0, 0))
+ adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
+ else
+ adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN |
+ 1 << AMDGPU_RAS_BLOCK__JPEG);
} else {
dev_info(adev->dev, "SRAM ECC is not presented.\n");
}
@@ -2395,7 +2452,7 @@ static void amdgpu_ras_counte_dw(struct work_struct *work)
/* Cache new values.
*/
- if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, NULL) == 0) {
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
}
@@ -2405,11 +2462,42 @@ Out:
pm_runtime_put_autosuspend(dev->dev);
}
+static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ bool df_poison, umc_poison;
+
+ /* poison setting is useless on SRIOV guest */
+ if (amdgpu_sriov_vf(adev) || !con)
+ return;
+
+ /* Init poison supported flag, the default value is false */
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ /* enabled by default when GPU is connected to CPU */
+ con->poison_supported = true;
+ } else if (adev->df.funcs &&
+ adev->df.funcs->query_ras_poison_mode &&
+ adev->umc.ras &&
+ adev->umc.ras->query_ras_poison_mode) {
+ df_poison =
+ adev->df.funcs->query_ras_poison_mode(adev);
+ umc_poison =
+ adev->umc.ras->query_ras_poison_mode(adev);
+
+ /* Only poison is set in both DF and UMC, we can support it */
+ if (df_poison && umc_poison)
+ con->poison_supported = true;
+ else if (df_poison != umc_poison)
+ dev_warn(adev->dev,
+ "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
+ df_poison, umc_poison);
+ }
+}
+
int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int r;
- bool df_poison, umc_poison;
if (con)
return 0;
@@ -2484,26 +2572,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
- /* Init poison supported flag, the default value is false */
- if (adev->gmc.xgmi.connected_to_cpu) {
- /* enabled by default when GPU is connected to CPU */
- con->poison_supported = true;
- }
- else if (adev->df.funcs &&
- adev->df.funcs->query_ras_poison_mode &&
- adev->umc.ras &&
- adev->umc.ras->query_ras_poison_mode) {
- df_poison =
- adev->df.funcs->query_ras_poison_mode(adev);
- umc_poison =
- adev->umc.ras->query_ras_poison_mode(adev);
- /* Only poison is set in both DF and UMC, we can support it */
- if (df_poison && umc_poison)
- con->poison_supported = true;
- else if (df_poison != umc_poison)
- dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
- df_poison, umc_poison);
- }
+ amdgpu_ras_query_poison_mode(adev);
if (amdgpu_ras_fs_init(adev)) {
r = -EINVAL;
@@ -2564,6 +2633,7 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
{
struct amdgpu_ras_block_object *ras_obj = NULL;
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_query_if *query_info;
unsigned long ue_count, ce_count;
int r;
@@ -2605,11 +2675,17 @@ int amdgpu_ras_block_late_init(struct amdgpu_device *adev,
/* Those are the cached values at init.
*/
- if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) {
+ query_info = kzalloc(sizeof(struct ras_query_if), GFP_KERNEL);
+ if (!query_info)
+ return -ENOMEM;
+ memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if));
+
+ if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count, query_info) == 0) {
atomic_set(&con->ras_ce_count, ce_count);
atomic_set(&con->ras_ue_count, ue_count);
}
+ kfree(query_info);
return 0;
interrupt:
@@ -2946,11 +3022,26 @@ int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_co
int amdgpu_ras_is_supported(struct amdgpu_device *adev,
unsigned int block)
{
+ int ret = 0;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
if (block >= AMDGPU_RAS_BLOCK_COUNT)
return 0;
- return ras && (adev->ras_enabled & (1 << block));
+
+ ret = ras && (adev->ras_enabled & (1 << block));
+
+ /* For the special asic with mem ecc enabled but sram ecc
+ * not enabled, even if the ras block is not supported on
+ * .ras_enabled, if the asic supports poison mode and the
+ * ras block has ras configuration, it can be considered
+ * that the ras block supports ras function.
+ */
+ if (!ret &&
+ amdgpu_ras_is_poison_mode_supported(adev) &&
+ amdgpu_ras_get_ras_block(adev, block, 0))
+ ret = 1;
+
+ return ret;
}
int amdgpu_ras_reset_gpu(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index bf5a95104ec1..f2ad999993f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -540,7 +540,8 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev);
int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
unsigned long *ce_count,
- unsigned long *ue_count);
+ unsigned long *ue_count,
+ struct ras_query_if *query_info);
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
index f778466bb9db..6437ead87e5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c
@@ -24,6 +24,7 @@
#include "amdgpu_reset.h"
#include "aldebaran.h"
#include "sienna_cichlid.h"
+#include "smu_v13_0_10.h"
int amdgpu_reset_add_handler(struct amdgpu_reset_control *reset_ctl,
struct amdgpu_reset_handler *handler)
@@ -44,6 +45,9 @@ int amdgpu_reset_init(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 7):
ret = sienna_cichlid_reset_init(adev);
break;
+ case IP_VERSION(13, 0, 10):
+ ret = smu_v13_0_10_reset_init(adev);
+ break;
default:
break;
}
@@ -62,6 +66,9 @@ int amdgpu_reset_fini(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 7):
ret = sienna_cichlid_reset_fini(adev);
break;
+ case IP_VERSION(13, 0, 10):
+ ret = smu_v13_0_10_reset_fini(adev);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index f752c7ae7f60..3989e755a5b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -295,7 +295,7 @@ struct amdgpu_ring {
#define amdgpu_ring_parse_cs(r, p, job, ib) ((r)->funcs->parse_cs((p), (job), (ib)))
#define amdgpu_ring_patch_cs_in_place(r, p, job, ib) ((r)->funcs->patch_cs_in_place((p), (job), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
-#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
+#define amdgpu_ring_test_ib(r, t) ((r)->funcs->test_ib ? (r)->funcs->test_ib((r), (t)) : 0)
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
index 012b72d00e04..85fb730d9fc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
@@ -93,7 +93,8 @@ int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws)
/* allocate save restore block */
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.save_restore_obj,
&adev->gfx.rlc.save_restore_gpu_addr,
(void **)&adev->gfx.rlc.sr_ptr);
@@ -130,7 +131,8 @@ int amdgpu_gfx_rlc_init_csb(struct amdgpu_device *adev)
/* allocate clear state block */
adev->gfx.rlc.clear_state_size = dws = adev->gfx.rlc.funcs->get_csb_size(adev);
r = amdgpu_bo_create_kernel(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
@@ -156,7 +158,8 @@ int amdgpu_gfx_rlc_init_cpt(struct amdgpu_device *adev)
int r;
r = amdgpu_bo_create_reserved(adev, adev->gfx.rlc.cp_table_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.cp_table_obj,
&adev->gfx.rlc.cp_table_gpu_addr,
(void **)&adev->gfx.rlc.cp_table_ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index ea5278f094c0..231ca06bc9c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -154,16 +154,11 @@ int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
static int amdgpu_sdma_init_inst_ctx(struct amdgpu_sdma_instance *sdma_inst)
{
- int err = 0;
uint16_t version_major;
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
const struct sdma_firmware_header_v2_0 *hdr_v2;
- err = amdgpu_ucode_validate(sdma_inst->fw);
- if (err)
- return err;
-
header = (const struct common_firmware_header *)
sdma_inst->fw->data;
version_major = le16_to_cpu(header->header_version_major);
@@ -195,7 +190,7 @@ void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
if (duplicate)
break;
}
@@ -205,16 +200,22 @@ void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
}
int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
- char *fw_name, u32 instance,
- bool duplicate)
+ u32 instance, bool duplicate)
{
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
- int err = 0, i;
+ int err, i;
const struct sdma_firmware_header_v2_0 *sdma_hdr;
uint16_t version_major;
-
- err = request_firmware(&adev->sdma.instance[instance].fw, fw_name, adev->dev);
+ char ucode_prefix[30];
+ char fw_name[40];
+
+ amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ if (instance == 0)
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+ else
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s%d.bin", ucode_prefix, instance);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[instance].fw, fw_name);
if (err)
goto out;
@@ -279,10 +280,8 @@ int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
}
out:
- if (err) {
- DRM_ERROR("SDMA: Failed to init firmware \"%s\"\n", fw_name);
+ if (err)
amdgpu_sdma_destroy_inst_ctx(adev, duplicate);
- }
return err;
}
@@ -306,3 +305,38 @@ void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev)
}
}
}
+
+int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev)
+{
+ int err = 0;
+ struct amdgpu_sdma_ras *ras = NULL;
+
+ /* adev->sdma.ras is NULL, which means sdma does not
+ * support ras function, then do nothing here.
+ */
+ if (!adev->sdma.ras)
+ return 0;
+
+ ras = adev->sdma.ras;
+
+ err = amdgpu_ras_register_ras_block(adev, &ras->ras_block);
+ if (err) {
+ dev_err(adev->dev, "Failed to register sdma ras block!\n");
+ return err;
+ }
+
+ strcpy(ras->ras_block.ras_comm.name, "sdma");
+ ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
+ ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+ adev->sdma.ras_if = &ras->ras_block.ras_comm;
+
+ /* If not define special ras_late_init function, use default ras_late_init */
+ if (!ras->ras_block.ras_late_init)
+ ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
+
+ /* If not defined special ras_cb function, use default ras_cb */
+ if (!ras->ras_block.ras_cb)
+ ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 7d99205c2e01..fc8528812598 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -124,10 +124,11 @@ int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev,
int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry);
-int amdgpu_sdma_init_microcode(struct amdgpu_device *adev,
- char *fw_name, u32 instance, bool duplicate);
+int amdgpu_sdma_init_microcode(struct amdgpu_device *adev, u32 instance,
+ bool duplicate);
void amdgpu_sdma_destroy_inst_ctx(struct amdgpu_device *adev,
bool duplicate);
void amdgpu_sdma_unset_buffer_funcs_helper(struct amdgpu_device *adev);
+int amdgpu_sdma_ras_sw_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 2c1d82fc4c34..8ed0e073656f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -77,11 +77,11 @@ void psp_securedisplay_parse_resp_status(struct psp_context *psp,
}
}
-void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd,
enum ta_securedisplay_command command_id)
{
- *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
- memset(*cmd, 0, sizeof(struct securedisplay_cmd));
+ *cmd = (struct ta_securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
+ memset(*cmd, 0, sizeof(struct ta_securedisplay_cmd));
(*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
(*cmd)->cmd_id = command_id;
}
@@ -93,7 +93,7 @@ static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __u
{
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
struct psp_context *psp = &adev->psp;
- struct securedisplay_cmd *securedisplay_cmd;
+ struct ta_securedisplay_cmd *securedisplay_cmd;
struct drm_device *dev = adev_to_drm(adev);
uint32_t phy_id;
uint32_t op;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
index fe98574748f4..456ad68ed4b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h
@@ -30,7 +30,7 @@
void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev);
void psp_securedisplay_parse_resp_status(struct psp_context *psp,
enum ta_securedisplay_status status);
-void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
+void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct ta_securedisplay_cmd **cmd,
enum ta_securedisplay_command command_id);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 677ad2016976..98d91ebf5c26 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -153,10 +153,10 @@ TRACE_EVENT(amdgpu_cs,
TP_fast_assign(
__entry->bo_list = p->bo_list;
- __entry->ring = to_amdgpu_ring(job->base.sched)->idx;
+ __entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx;
__entry->dw = ib->length_dw;
__entry->fences = amdgpu_fence_count_emitted(
- to_amdgpu_ring(job->base.sched));
+ to_amdgpu_ring(job->base.entity->rq->sched));
),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 55e0284b2bdd..c5ef7f7bdc15 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -44,10 +44,10 @@
#include <linux/module.h>
#include <drm/drm_drv.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
@@ -1679,10 +1679,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
/* reserve vram for mem train according to TMR location */
amdgpu_ttm_training_data_block_init(adev);
ret = amdgpu_bo_create_kernel_at(adev,
- ctx->c2p_train_data_offset,
- ctx->train_data_size,
- &ctx->c2p_bo,
- NULL);
+ ctx->c2p_train_data_offset,
+ ctx->train_data_size,
+ &ctx->c2p_bo,
+ NULL);
if (ret) {
DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret);
amdgpu_ttm_training_reserve_vram_fini(adev);
@@ -1692,10 +1692,10 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
}
ret = amdgpu_bo_create_kernel_at(adev,
- adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
- adev->mman.discovery_tmr_size,
- &adev->mman.discovery_memory,
- NULL);
+ adev->gmc.real_vram_size - adev->mman.discovery_tmr_size,
+ adev->mman.discovery_tmr_size,
+ &adev->mman.discovery_memory,
+ NULL);
if (ret) {
DRM_ERROR("alloc tmr failed(%d)!\n", ret);
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
@@ -1718,7 +1718,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
{
uint64_t gtt_size;
int r;
- u64 vis_vram_limit;
mutex_init(&adev->mman.gtt_window_lock);
@@ -1741,12 +1740,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
- /* Reduce size of CPU-visible VRAM if requested */
- vis_vram_limit = (u64)amdgpu_vis_vram_limit * 1024 * 1024;
- if (amdgpu_vis_vram_limit > 0 &&
- vis_vram_limit <= adev->gmc.visible_vram_size)
- adev->gmc.visible_vram_size = vis_vram_limit;
-
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_buffer_funcs_status(adev, false);
#ifdef CONFIG_64BIT
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 5cb62e6249c2..380b89114341 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -504,7 +504,7 @@ void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr)
}
}
-int amdgpu_ucode_validate(const struct firmware *fw)
+static int amdgpu_ucode_validate(const struct firmware *fw)
{
const struct common_firmware_header *hdr =
(const struct common_firmware_header *)fw->data;
@@ -1059,12 +1059,229 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
return 0;
}
+static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int block_type)
+{
+ if (block_type == MP0_HWIP) {
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ return "vega10";
+ case CHIP_VEGA12:
+ return "vega12";
+ default:
+ return NULL;
+ }
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso";
+ return "raven";
+ }
+ break;
+ case IP_VERSION(11, 0, 0):
+ return "navi10";
+ case IP_VERSION(11, 0, 2):
+ return "vega20";
+ case IP_VERSION(11, 0, 3):
+ return "renoir";
+ case IP_VERSION(11, 0, 4):
+ return "arcturus";
+ case IP_VERSION(11, 0, 5):
+ return "navi14";
+ case IP_VERSION(11, 0, 7):
+ return "sienna_cichlid";
+ case IP_VERSION(11, 0, 9):
+ return "navi12";
+ case IP_VERSION(11, 0, 11):
+ return "navy_flounder";
+ case IP_VERSION(11, 0, 12):
+ return "dimgrey_cavefish";
+ case IP_VERSION(11, 0, 13):
+ return "beige_goby";
+ case IP_VERSION(11, 5, 0):
+ return "vangogh";
+ case IP_VERSION(12, 0, 1):
+ return "green_sardine";
+ case IP_VERSION(13, 0, 2):
+ return "aldebaran";
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
+ return "yellow_carp";
+ }
+ } else if (block_type == MP1_HWIP) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_ARCTURUS)
+ return "arcturus_smc";
+ return NULL;
+ case IP_VERSION(11, 0, 0):
+ return "navi10_smc";
+ case IP_VERSION(11, 0, 5):
+ return "navi14_smc";
+ case IP_VERSION(11, 0, 9):
+ return "navi12_smc";
+ case IP_VERSION(11, 0, 7):
+ return "sienna_cichlid_smc";
+ case IP_VERSION(11, 0, 11):
+ return "navy_flounder_smc";
+ case IP_VERSION(11, 0, 12):
+ return "dimgrey_cavefish_smc";
+ case IP_VERSION(11, 0, 13):
+ return "beige_goby_smc";
+ case IP_VERSION(13, 0, 2):
+ return "aldebaran_smc";
+ }
+ } else if (block_type == SDMA0_HWIP) {
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ return "vega10_sdma";
+ case IP_VERSION(4, 0, 1):
+ return "vega12_sdma";
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2_sdma";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso_sdma";
+ return "raven_sdma";
+ case IP_VERSION(4, 1, 2):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir_sdma";
+ return "green_sardine_sdma";
+ case IP_VERSION(4, 2, 0):
+ return "vega20_sdma";
+ case IP_VERSION(4, 2, 2):
+ return "arcturus_sdma";
+ case IP_VERSION(4, 4, 0):
+ return "aldebaran_sdma";
+ case IP_VERSION(5, 0, 0):
+ return "navi10_sdma";
+ case IP_VERSION(5, 0, 1):
+ return "cyan_skillfish2_sdma";
+ case IP_VERSION(5, 0, 2):
+ return "navi14_sdma";
+ case IP_VERSION(5, 0, 5):
+ return "navi12_sdma";
+ case IP_VERSION(5, 2, 0):
+ return "sienna_cichlid_sdma";
+ case IP_VERSION(5, 2, 2):
+ return "navy_flounder_sdma";
+ case IP_VERSION(5, 2, 4):
+ return "dimgrey_cavefish_sdma";
+ case IP_VERSION(5, 2, 5):
+ return "beige_goby_sdma";
+ case IP_VERSION(5, 2, 3):
+ return "yellow_carp_sdma";
+ case IP_VERSION(5, 2, 1):
+ return "vangogh_sdma";
+ }
+ } else if (block_type == UVD_HWIP) {
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2_vcn";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso_vcn";
+ return "raven_vcn";
+ case IP_VERSION(2, 5, 0):
+ return "arcturus_vcn";
+ case IP_VERSION(2, 2, 0):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir_vcn";
+ return "green_sardine_vcn";
+ case IP_VERSION(2, 6, 0):
+ return "aldebaran_vcn";
+ case IP_VERSION(2, 0, 0):
+ return "navi10_vcn";
+ case IP_VERSION(2, 0, 2):
+ if (adev->asic_type == CHIP_NAVI12)
+ return "navi12_vcn";
+ return "navi14_vcn";
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 64):
+ case IP_VERSION(3, 0, 192):
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+ return "sienna_cichlid_vcn";
+ return "navy_flounder_vcn";
+ case IP_VERSION(3, 0, 2):
+ return "vangogh_vcn";
+ case IP_VERSION(3, 0, 16):
+ return "dimgrey_cavefish_vcn";
+ case IP_VERSION(3, 0, 33):
+ return "beige_goby_vcn";
+ case IP_VERSION(3, 1, 1):
+ return "yellow_carp_vcn";
+ }
+ } else if (block_type == GC_HWIP) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ return "vega10";
+ case IP_VERSION(9, 2, 1):
+ return "vega12";
+ case IP_VERSION(9, 4, 0):
+ return "vega20";
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ return "raven2";
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
+ return "picasso";
+ return "raven";
+ case IP_VERSION(9, 4, 1):
+ return "arcturus";
+ case IP_VERSION(9, 3, 0):
+ if (adev->apu_flags & AMD_APU_IS_RENOIR)
+ return "renoir";
+ return "green_sardine";
+ case IP_VERSION(9, 4, 2):
+ return "aldebaran";
+ case IP_VERSION(10, 1, 10):
+ return "navi10";
+ case IP_VERSION(10, 1, 1):
+ return "navi14";
+ case IP_VERSION(10, 1, 2):
+ return "navi12";
+ case IP_VERSION(10, 3, 0):
+ return "sienna_cichlid";
+ case IP_VERSION(10, 3, 2):
+ return "navy_flounder";
+ case IP_VERSION(10, 3, 1):
+ return "vangogh";
+ case IP_VERSION(10, 3, 4):
+ return "dimgrey_cavefish";
+ case IP_VERSION(10, 3, 5):
+ return "beige_goby";
+ case IP_VERSION(10, 3, 3):
+ return "yellow_carp";
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 1, 4):
+ return "cyan_skillfish2";
+ }
+ }
+ return NULL;
+}
+
void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
{
int maj, min, rev;
char *ip_name;
+ const char *legacy;
uint32_t version = adev->ip_versions[block_type][0];
+ legacy = amdgpu_ucode_legacy_naming(adev, block_type);
+ if (legacy) {
+ snprintf(ucode_prefix, len, "%s", legacy);
+ return;
+ }
+
switch (block_type) {
case GC_HWIP:
ip_name = "gc";
@@ -1091,3 +1308,39 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,
snprintf(ucode_prefix, len, "%s_%d_%d_%d", ip_name, maj, min, rev);
}
+
+/*
+ * amdgpu_ucode_request - Fetch and validate amdgpu microcode
+ *
+ * @adev: amdgpu device
+ * @fw: pointer to load firmware to
+ * @fw_name: firmware to load
+ *
+ * This is a helper that will use request_firmware and amdgpu_ucode_validate
+ * to load and run basic validation on firmware. If the load fails, remap
+ * the error code to -ENODEV, so that early_init functions will fail to load.
+ */
+int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
+ const char *fw_name)
+{
+ int err = request_firmware(fw, fw_name, adev->dev);
+
+ if (err)
+ return -ENODEV;
+ err = amdgpu_ucode_validate(*fw);
+ if (err)
+ dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
+
+ return err;
+}
+
+/*
+ * amdgpu_ucode_release - Release firmware microcode
+ *
+ * @fw: pointer to firmware to release
+ */
+void amdgpu_ucode_release(const struct firmware **fw)
+{
+ release_firmware(*fw);
+ *fw = NULL;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 552e06929229..b03321e7d2d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -538,12 +538,15 @@ struct amdgpu_firmware {
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr);
+void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr);
void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr);
-int amdgpu_ucode_validate(const struct firmware *fw);
+int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
+ const char *fw_name);
+void amdgpu_ucode_release(const struct firmware **fw);
bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
uint16_t hdr_major, uint16_t hdr_minor);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index f76c19fc0392..1c7fcb4f2380 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -169,25 +169,33 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, bool reset)
{
int ret = AMDGPU_RAS_SUCCESS;
- if (!adev->gmc.xgmi.connected_to_cpu) {
- struct ras_err_data err_data = {0, 0, 0, NULL};
- struct ras_common_if head = {
- .block = AMDGPU_RAS_BLOCK__UMC,
- };
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
-
- ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
-
- if (ret == AMDGPU_RAS_SUCCESS && obj) {
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
+ if (!amdgpu_sriov_vf(adev)) {
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ };
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head);
+
+ ret = amdgpu_umc_do_page_retirement(adev, &err_data, NULL, reset);
+
+ if (ret == AMDGPU_RAS_SUCCESS && obj) {
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ }
+ } else if (reset) {
+ /* MCA poison handler is only responsible for GPU reset,
+ * let MCA notifier do page retirement.
+ */
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev);
}
- } else if (reset) {
- /* MCA poison handler is only responsible for GPU reset,
- * let MCA notifier do page retirement.
- */
- kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
- amdgpu_ras_reset_gpu(adev);
+ } else {
+ if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
+ adev->virt.ops->ras_poison_handler(adev);
+ else
+ dev_warn(adev->dev,
+ "No ras_poison_handler interface in SRIOV!\n");
}
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index e00bb654e24b..632a6ded5735 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -260,19 +260,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = request_firmware(&adev->uvd.fw, fw_name, adev->dev);
- if (r) {
- dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->uvd.fw);
+ r = amdgpu_ucode_request(adev, &adev->uvd.fw, fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n",
fw_name);
- release_firmware(adev->uvd.fw);
- adev->uvd.fw = NULL;
+ amdgpu_ucode_release(&adev->uvd.fw);
return r;
}
@@ -331,8 +323,11 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (adev->uvd.harvest_config & (1 << j))
continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
- &adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->uvd.inst[j].vcpu_bo,
+ &adev->uvd.inst[j].gpu_addr,
+ &adev->uvd.inst[j].cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
@@ -394,7 +389,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr);
- release_firmware(adev->uvd.fw);
+ amdgpu_ucode_release(&adev->uvd.fw);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index b239e874f2d5..2fb61410b1c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -158,19 +158,11 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return -EINVAL;
}
- r = request_firmware(&adev->vce.fw, fw_name, adev->dev);
- if (r) {
- dev_err(adev->dev, "amdgpu_vce: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->vce.fw);
+ r = amdgpu_ucode_request(adev, &adev->vce.fw, fw_name);
if (r) {
dev_err(adev->dev, "amdgpu_vce: Can't validate firmware \"%s\"\n",
fw_name);
- release_firmware(adev->vce.fw);
- adev->vce.fw = NULL;
+ amdgpu_ucode_release(&adev->vce.fw);
return r;
}
@@ -186,7 +178,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
(binary_id << 8));
r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vce.vcpu_bo,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vce.vcpu_bo,
&adev->vce.gpu_addr, &adev->vce.cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
@@ -226,7 +220,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vce.num_rings; i++)
amdgpu_ring_fini(&adev->vce.ring[i]);
- release_firmware(adev->vce.fw);
+ amdgpu_ucode_release(&adev->vce.fw);
mutex_destroy(&adev->vce.idle_mutex);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index b1622ac9949f..25217b05c0ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -36,26 +36,26 @@
#include "soc15d.h"
/* Firmware Names */
-#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
-#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
-#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
-#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
-#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
-#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
-#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
-#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
-#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
-#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
-#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
-#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
+#define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin"
+#define FIRMWARE_PICASSO "amdgpu/picasso_vcn.bin"
+#define FIRMWARE_RAVEN2 "amdgpu/raven2_vcn.bin"
+#define FIRMWARE_ARCTURUS "amdgpu/arcturus_vcn.bin"
+#define FIRMWARE_RENOIR "amdgpu/renoir_vcn.bin"
+#define FIRMWARE_GREEN_SARDINE "amdgpu/green_sardine_vcn.bin"
+#define FIRMWARE_NAVI10 "amdgpu/navi10_vcn.bin"
+#define FIRMWARE_NAVI14 "amdgpu/navi14_vcn.bin"
+#define FIRMWARE_NAVI12 "amdgpu/navi12_vcn.bin"
+#define FIRMWARE_SIENNA_CICHLID "amdgpu/sienna_cichlid_vcn.bin"
+#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
+#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
-#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
-#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
-#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
-#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
-#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
-#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
-#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
+#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
+#define FIRMWARE_BEIGE_GOBY "amdgpu/beige_goby_vcn.bin"
+#define FIRMWARE_YELLOW_CARP "amdgpu/yellow_carp_vcn.bin"
+#define FIRMWARE_VCN_3_1_2 "amdgpu/vcn_3_1_2.bin"
+#define FIRMWARE_VCN4_0_0 "amdgpu/vcn_4_0_0.bin"
+#define FIRMWARE_VCN4_0_2 "amdgpu/vcn_4_0_2.bin"
+#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -80,10 +80,24 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
+int amdgpu_vcn_early_init(struct amdgpu_device *adev)
+{
+ char ucode_prefix[30];
+ char fw_name[40];
+ int r;
+
+ amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+ r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
+ if (r)
+ amdgpu_ucode_release(&adev->vcn.fw);
+
+ return r;
+}
+
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
unsigned long bo_size;
- const char *fw_name;
const struct common_firmware_header *hdr;
unsigned char fw_check;
unsigned int fw_shared_size, log_offset;
@@ -96,131 +110,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
- switch (adev->ip_versions[UVD_HWIP][0]) {
- case IP_VERSION(1, 0, 0):
- case IP_VERSION(1, 0, 1):
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- fw_name = FIRMWARE_RAVEN2;
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- fw_name = FIRMWARE_PICASSO;
- else
- fw_name = FIRMWARE_RAVEN;
- break;
- case IP_VERSION(2, 5, 0):
- fw_name = FIRMWARE_ARCTURUS;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(2, 2, 0):
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- fw_name = FIRMWARE_RENOIR;
- else
- fw_name = FIRMWARE_GREEN_SARDINE;
-
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(2, 6, 0):
- fw_name = FIRMWARE_ALDEBARAN;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(2, 0, 0):
- fw_name = FIRMWARE_NAVI10;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(2, 0, 2):
- if (adev->asic_type == CHIP_NAVI12)
- fw_name = FIRMWARE_NAVI12;
- else
- fw_name = FIRMWARE_NAVI14;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 0, 0):
- case IP_VERSION(3, 0, 64):
- case IP_VERSION(3, 0, 192):
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
- fw_name = FIRMWARE_SIENNA_CICHLID;
- else
- fw_name = FIRMWARE_NAVY_FLOUNDER;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 0, 2):
- fw_name = FIRMWARE_VANGOGH;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 0, 16):
- fw_name = FIRMWARE_DIMGREY_CAVEFISH;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 0, 33):
- fw_name = FIRMWARE_BEIGE_GOBY;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 1, 1):
- fw_name = FIRMWARE_YELLOW_CARP;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(3, 1, 2):
- fw_name = FIRMWARE_VCN_3_1_2;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(4, 0, 0):
- fw_name = FIRMWARE_VCN4_0_0;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(4, 0, 2):
- fw_name = FIRMWARE_VCN4_0_2;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case IP_VERSION(4, 0, 4):
- fw_name = FIRMWARE_VCN4_0_4;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- default:
- return -EINVAL;
- }
-
- r = request_firmware(&adev->vcn.fw, fw_name, adev->dev);
- if (r) {
- dev_err(adev->dev, "amdgpu_vcn: Can't load firmware \"%s\"\n",
- fw_name);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->vcn.fw);
- if (r) {
- dev_err(adev->dev, "amdgpu_vcn: Can't validate firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->vcn.fw);
- adev->vcn.fw = NULL;
- return r;
- }
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
@@ -274,8 +166,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].vcpu_bo,
- &adev->vcn.inst[i].gpu_addr, &adev->vcn.inst[i].cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].vcpu_bo,
+ &adev->vcn.inst[i].gpu_addr,
+ &adev->vcn.inst[i].cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
return r;
@@ -296,8 +191,11 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (adev->vcn.indirect_sram) {
r = amdgpu_bo_create_kernel(adev, 64 * 2 * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].dpg_sram_bo,
- &adev->vcn.inst[i].dpg_sram_gpu_addr, &adev->vcn.inst[i].dpg_sram_cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->vcn.inst[i].dpg_sram_bo,
+ &adev->vcn.inst[i].dpg_sram_gpu_addr,
+ &adev->vcn.inst[i].dpg_sram_cpu_addr);
if (r) {
dev_err(adev->dev, "VCN %d (%d) failed to allocate DPG bo\n", i, r);
return r;
@@ -333,7 +231,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
}
- release_firmware(adev->vcn.fw);
+ amdgpu_ucode_release(&adev->vcn.fw);
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock);
@@ -1250,8 +1148,16 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
if (!ras_if)
return 0;
- ih_data.head = *ras_if;
- amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ if (!amdgpu_sriov_vf(adev)) {
+ ih_data.head = *ras_if;
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ } else {
+ if (adev->virt.ops && adev->virt.ops->ras_poison_handler)
+ adev->virt.ops->ras_poison_handler(adev);
+ else
+ dev_warn(adev->dev,
+ "No ras_poison_handler interface in SRIOV for VCN!\n");
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index dbb8d68a30c6..d3e2af902907 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -369,6 +369,7 @@ enum vcn_ring_type {
VCN_UNIFIED_RING,
};
+int amdgpu_vcn_early_init(struct amdgpu_device *adev);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
int amdgpu_vcn_suspend(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 2994b9db196f..f2e2cbaa7fde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -232,7 +232,8 @@ int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
return 0;
r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->virt.mm_table.bo,
&adev->virt.mm_table.gpu_addr,
(void *)&adev->virt.mm_table.cpu_addr);
@@ -982,11 +983,13 @@ static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v
if (offset == reg_access_ctrl->grbm_cntl) {
/* if the target reg offset is grbm_cntl, write to scratch_reg2 */
writel(v, scratch_reg2);
- writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
} else if (offset == reg_access_ctrl->grbm_idx) {
/* if the target reg offset is grbm_idx, write to scratch_reg3 */
writel(v, scratch_reg3);
- writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ if (flag == AMDGPU_RLCG_GC_WRITE_LEGACY)
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
} else {
/*
* SCRATCH_REG0 = read/write value
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 2b9d806e23af..b9e9480448af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -88,6 +88,7 @@ struct amdgpu_virt_ops {
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, enum idh_request req,
u32 data1, u32 data2, u32 data3);
+ void (*ras_poison_handler)(struct amdgpu_device *adev);
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index dc379dc22c77..b9441ab457ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -33,6 +33,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 094bb4807303..856a64bc7a89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -29,7 +29,7 @@
#include <linux/rbtree.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <linux/sched/mm.h>
#include "amdgpu_sync.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index b5f3bba851db..01e42bdd8e4e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -974,7 +974,7 @@ int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params,
trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
min(nptes, 32u), dst, incr,
upd_flags,
- vm->task_info.pid,
+ vm->task_info.tgid,
vm->immediate.fence_context);
amdgpu_vm_pte_update_flags(params, to_amdgpu_bo_vm(pt),
cursor.level, pe_start, dst,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 4b9e7b050ccd..4340d08f7607 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -29,13 +29,16 @@
#include "df/df_3_6_offset.h"
#include "xgmi/xgmi_4_0_0_smn.h"
#include "xgmi/xgmi_4_0_0_sh_mask.h"
+#include "xgmi/xgmi_6_1_0_sh_mask.h"
#include "wafl/wafl2_4_0_0_smn.h"
#include "wafl/wafl2_4_0_0_sh_mask.h"
#include "amdgpu_reset.h"
#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
+#define smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK 0x11a00218
#define smnPCS_GOPX1_PCS_ERROR_STATUS 0x12200210
+#define smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK 0x12200218
static DEFINE_MUTEX(xgmi_mutex);
@@ -79,11 +82,27 @@ static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000
};
+static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x200000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x300000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x400000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x500000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x600000,
+ smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x700000
+};
+
static const int walf_pcs_err_status_reg_aldebaran[] = {
smnPCS_GOPX1_PCS_ERROR_STATUS,
smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000
};
+static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
+ smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK,
+ smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
+};
+
static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
{"XGMI PCS DataLossErr",
SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
@@ -162,6 +181,67 @@ static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
};
+static const struct amdgpu_pcs_ras_field xgmi3x16_pcs_ras_fields[] = {
+ {"XGMI3X16 PCS DataLossErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataLossErr)},
+ {"XGMI3X16 PCS TrainingErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TrainingErr)},
+ {"XGMI3X16 PCS FlowCtrlAckErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlAckErr)},
+ {"XGMI3X16 PCS RxFifoUnderflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoUnderflowErr)},
+ {"XGMI3X16 PCS RxFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxFifoOverflowErr)},
+ {"XGMI3X16 PCS CRCErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, CRCErr)},
+ {"XGMI3X16 PCS BERExceededErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, BERExceededErr)},
+ {"XGMI3X16 PCS TxVcidDataErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxVcidDataErr)},
+ {"XGMI3X16 PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"XGMI3X16 PCS DataParityErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataParityErr)},
+ {"XGMI3X16 PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"XGMI3X16 PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"XGMI3X16 PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"XGMI3X16 PCS DeskewErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DeskewErr)},
+ {"XGMI3X16 PCS FlowCtrlCRCErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FlowCtrlCRCErr)},
+ {"XGMI3X16 PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"XGMI3X16 PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"XGMI3X16 PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"XGMI3X16 PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"XGMI3X16 PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"XGMI3X16 PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"XGMI3X16 PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+ {"XGMI3X16 PCS ReplayAttemptErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, ReplayAttemptErr)},
+ {"XGMI3X16 PCS SyncHdrErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, SyncHdrErr)},
+ {"XGMI3X16 PCS TxReplayTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, TxReplayTimeoutErr)},
+ {"XGMI3X16 PCS RxReplayTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxReplayTimeoutErr)},
+ {"XGMI3X16 PCS LinkSubTxTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubTxTimeoutErr)},
+ {"XGMI3X16 PCS LinkSubRxTimeoutErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, LinkSubRxTimeoutErr)},
+ {"XGMI3X16 PCS RxCMDPktErr",
+ SOC15_REG_FIELD(PCS_XGMI3X16_PCS_ERROR_STATUS, RxCMDPktErr)},
+};
+
/**
* DOC: AMDGPU XGMI Support
*
@@ -809,39 +889,47 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
uint32_t value,
+ uint32_t mask_value,
uint32_t *ue_count,
uint32_t *ce_count,
- bool is_xgmi_pcs)
+ bool is_xgmi_pcs,
+ bool check_mask)
{
int i;
- int ue_cnt;
+ int ue_cnt = 0;
+ const struct amdgpu_pcs_ras_field *pcs_ras_fields = NULL;
+ uint32_t field_array_size = 0;
if (is_xgmi_pcs) {
- /* query xgmi pcs error status,
- * only ue is supported */
- for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
- ue_cnt = (value &
- xgmi_pcs_ras_fields[i].pcs_err_mask) >>
- xgmi_pcs_ras_fields[i].pcs_err_shift;
- if (ue_cnt) {
- dev_info(adev->dev, "%s detected\n",
- xgmi_pcs_ras_fields[i].err_name);
- *ue_count += ue_cnt;
- }
+ if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
+ pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];
+ field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);
+ } else {
+ pcs_ras_fields = &xgmi_pcs_ras_fields[0];
+ field_array_size = ARRAY_SIZE(xgmi_pcs_ras_fields);
}
} else {
- /* query wafl pcs error status,
- * only ue is supported */
- for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
- ue_cnt = (value &
- wafl_pcs_ras_fields[i].pcs_err_mask) >>
- wafl_pcs_ras_fields[i].pcs_err_shift;
- if (ue_cnt) {
- dev_info(adev->dev, "%s detected\n",
- wafl_pcs_ras_fields[i].err_name);
- *ue_count += ue_cnt;
- }
+ pcs_ras_fields = &wafl_pcs_ras_fields[0];
+ field_array_size = ARRAY_SIZE(wafl_pcs_ras_fields);
+ }
+
+ if (check_mask)
+ value = value & ~mask_value;
+
+ /* query xgmi/walf pcs error status,
+ * only ue is supported */
+ for (i = 0; value && i < field_array_size; i++) {
+ ue_cnt = (value &
+ pcs_ras_fields[i].pcs_err_mask) >>
+ pcs_ras_fields[i].pcs_err_shift;
+ if (ue_cnt) {
+ dev_info(adev->dev, "%s detected\n",
+ pcs_ras_fields[i].err_name);
+ *ue_count += ue_cnt;
}
+
+ /* reset bit value if the bit is checked */
+ value &= ~(pcs_ras_fields[i].pcs_err_mask);
}
return 0;
@@ -852,7 +940,7 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
int i;
- uint32_t data;
+ uint32_t data, mask_data = 0;
uint32_t ue_cnt = 0, ce_cnt = 0;
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
@@ -867,15 +955,15 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, true);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, true, false);
}
/* check wafl pcs error */
for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, false);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, false, false);
}
break;
case CHIP_VEGA20:
@@ -883,31 +971,35 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, true);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, true, false);
}
/* check wafl pcs error */
for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, false);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, false, false);
}
break;
case CHIP_ALDEBARAN:
/* check xgmi3x16 pcs error */
for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
+ mask_data =
+ RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, true);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, true, true);
}
/* check wafl pcs error */
for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) {
data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]);
+ mask_data =
+ RREG32_PCIE(walf_pcs_err_noncorrectable_mask_reg_aldebaran[i]);
if (data)
- amdgpu_xgmi_query_pcs_error_status(adev,
- data, &ue_cnt, &ce_cnt, false);
+ amdgpu_xgmi_query_pcs_error_status(adev, data,
+ mask_data, &ue_cnt, &ce_cnt, false, true);
}
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index afad094f84c2..10098fdd33fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -24,7 +24,6 @@
* Alex Deucher
*/
-#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_fixed.h>
#include "amdgpu.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 18ae9433e463..d95b2dc78063 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -28,7 +28,6 @@
#include <acpi/video.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_connectors.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index cbca9866645c..67d16236b216 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -73,10 +73,9 @@ u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev);
static void cik_sdma_free_microcode(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/*
@@ -137,18 +136,15 @@ static int cik_sdma_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
}
out:
if (err) {
pr_err("cik_sdma: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 248f1a4e915f..9a24ed463abd 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -21,8 +21,9 @@
*
*/
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@@ -2837,7 +2838,7 @@ static int dce_v10_0_sw_init(void *handle)
if (r)
return r;
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -2902,7 +2903,7 @@ static int dce_v10_0_hw_fini(void *handle)
dce_v10_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3302,7 +3303,7 @@ static int dce_v10_0_hpd_irq(struct amdgpu_device *adev,
if (disp_int & mask) {
dce_v10_0_hpd_int_ack(adev, hpd);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index cd9c19060d89..c14b70350a51 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -21,8 +21,9 @@
*
*/
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@@ -2956,7 +2957,7 @@ static int dce_v11_0_sw_init(void *handle)
if (r)
return r;
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -3032,7 +3033,7 @@ static int dce_v11_0_hw_fini(void *handle)
dce_v11_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3426,7 +3427,7 @@ static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
if (disp_int & mask) {
dce_v11_0_hpd_int_ack(adev, hpd);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 76323deecc58..7f85ba5b726f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -23,8 +23,9 @@
#include <linux/pci.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@@ -2715,7 +2716,7 @@ static int dce_v6_0_sw_init(void *handle)
return r;
/* Pre-DCE11 */
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -2776,7 +2777,7 @@ static int dce_v6_0_hw_fini(void *handle)
dce_v6_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3103,7 +3104,7 @@ static int dce_v6_0_hpd_irq(struct amdgpu_device *adev,
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 01cf3ab111cb..d421a268c9ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -21,8 +21,9 @@
*
*/
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "amdgpu.h"
@@ -2739,7 +2740,7 @@ static int dce_v8_0_sw_init(void *handle)
return r;
/* Pre-DCE11 */
- INIT_WORK(&adev->hotplug_work,
+ INIT_DELAYED_WORK(&adev->hotplug_work,
amdgpu_display_hotplug_work_func);
drm_kms_helper_poll_init(adev_to_drm(adev));
@@ -2802,7 +2803,7 @@ static int dce_v8_0_hw_fini(void *handle)
dce_v8_0_pageflip_interrupt_fini(adev);
- flush_work(&adev->hotplug_work);
+ flush_delayed_work(&adev->hotplug_work);
return 0;
}
@@ -3195,7 +3196,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
- schedule_work(&adev->hotplug_work);
+ schedule_delayed_work(&adev->hotplug_work, 0);
DRM_DEBUG("IH: HPD%d\n", hpd + 1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
index b991609f46c1..5dfab80ffff2 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c
@@ -94,7 +94,7 @@ static void df_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
WREG32_SOC15(DF, 0, mmDF_PIE_AON0_DfGlobalClkGater, tmp);
}
- /* Exit boradcast mode */
+ /* Exit broadcast mode */
adev->df.funcs->enable_broadcast_mode(adev, false);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v4_3.c b/drivers/gpu/drm/amd/amdgpu/df_v4_3.c
new file mode 100644
index 000000000000..e8b9e19ede2e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/df_v4_3.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "df_v4_3.h"
+
+#include "df/df_4_3_offset.h"
+#include "df/df_4_3_sh_mask.h"
+
+static bool df_v4_3_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t hw_assert_msklo, hw_assert_mskhi;
+ uint32_t v0, v1, v28, v31;
+
+ hw_assert_msklo = RREG32_SOC15(DF, 0,
+ regDF_CS_UMC_AON0_HardwareAssertMaskLow);
+ hw_assert_mskhi = RREG32_SOC15(DF, 0,
+ regDF_NCS_PG0_HardwareAssertMaskHigh);
+
+ v0 = REG_GET_FIELD(hw_assert_msklo,
+ DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk0);
+ v1 = REG_GET_FIELD(hw_assert_msklo,
+ DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk1);
+ v28 = REG_GET_FIELD(hw_assert_mskhi,
+ DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk28);
+ v31 = REG_GET_FIELD(hw_assert_mskhi,
+ DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk31);
+
+ if (v0 && v1 && v28 && v31)
+ return true;
+ else if (!v0 && !v1 && !v28 && !v31)
+ return false;
+ else {
+ dev_warn(adev->dev, "DF poison setting is inconsistent(%d:%d:%d:%d)!\n",
+ v0, v1, v28, v31);
+ return false;
+ }
+}
+
+const struct amdgpu_df_funcs df_v4_3_funcs = {
+ .query_ras_poison_mode = df_v4_3_query_ras_poison_mode,
+};
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h b/drivers/gpu/drm/amd/amdgpu/df_v4_3.h
index ea8d9760132f..06ef0724edd3 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.h
+++ b/drivers/gpu/drm/amd/amdgpu/df_v4_3.h
@@ -19,16 +19,13 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-#ifndef __LINK_HWSS_HPO_FRL_H__
-#define __LINK_HWSS_HPO_FRL_H__
-#include "link_hwss.h"
+#ifndef __DF_V4_3_H__
+#define __DF_V4_3_H__
+
+#include "soc15_common.h"
-bool can_use_hpo_frl_link_hwss(const struct dc_link *link,
- const struct link_resource *link_res);
-const struct link_hwss *get_hpo_frl_link_hwss(void);
+extern const struct amdgpu_df_funcs df_v4_3_funcs;
-#endif /* __LINK_HWSS_HPO_FRL_H__ */
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 49d34c7bbf20..6983acc456b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3891,18 +3891,12 @@ err1:
static void gfx_v10_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
@@ -3974,9 +3968,9 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
char fw_name[40];
- char *wks = "";
+ char ucode_prefix[30];
+ const char *wks = "";
int err;
const struct rlc_firmware_header_v2_0 *rlc_hdr;
uint16_t version_major;
@@ -3984,90 +3978,40 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(10, 1, 10):
- chip_name = "navi10";
- break;
- case IP_VERSION(10, 1, 1):
- chip_name = "navi14";
- if (!(adev->pdev->device == 0x7340 &&
- adev->pdev->revision != 0x00))
- wks = "_wks";
- break;
- case IP_VERSION(10, 1, 2):
- chip_name = "navi12";
- break;
- case IP_VERSION(10, 3, 0):
- chip_name = "sienna_cichlid";
- break;
- case IP_VERSION(10, 3, 2):
- chip_name = "navy_flounder";
- break;
- case IP_VERSION(10, 3, 1):
- chip_name = "vangogh";
- break;
- case IP_VERSION(10, 3, 4):
- chip_name = "dimgrey_cavefish";
- break;
- case IP_VERSION(10, 3, 5):
- chip_name = "beige_goby";
- break;
- case IP_VERSION(10, 3, 3):
- chip_name = "yellow_carp";
- break;
- case IP_VERSION(10, 3, 6):
- chip_name = "gc_10_3_6";
- break;
- case IP_VERSION(10, 1, 3):
- case IP_VERSION(10, 1, 4):
- chip_name = "cyan_skillfish2";
- break;
- case IP_VERSION(10, 3, 7):
- chip_name = "gc_10_3_7";
- break;
- default:
- BUG();
- }
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) &&
+ (!(adev->pdev->device == 0x7340 && adev->pdev->revision != 0x00)))
+ wks = "_wks";
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
if (!amdgpu_sriov_vf(adev)) {
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
- if (err)
- goto out;
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
/* don't check this. There are apparently firmwares in the wild with
* incorrect size in the header
*/
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ if (err == -ENODEV)
+ goto out;
if (err)
dev_dbg(adev->dev,
- "gfx10: amdgpu_ucode_validate() failed \"%s\"\n",
+ "gfx10: amdgpu_ucode_request() failed \"%s\"\n",
fw_name);
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
version_major = le16_to_cpu(rlc_hdr->header.header_version_major);
@@ -4077,47 +4021,34 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
goto out;
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", chip_name, wks);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2%s.bin", ucode_prefix, wks);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
adev->gfx.mec2_fw = NULL;
}
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
+ amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
gfx_v10_0_check_fw_write_wait(adev);
out:
if (err) {
- dev_err(adev->dev,
- "gfx10: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
}
gfx_v10_0_check_gfxoff_flag(adev);
@@ -4270,19 +4201,11 @@ static void gfx_v10_0_mec_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_obj, NULL, NULL);
}
-static int gfx_v10_0_me_init(struct amdgpu_device *adev)
+static void gfx_v10_0_me_init(struct amdgpu_device *adev)
{
- int r;
-
bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
amdgpu_gfx_graphics_queue_acquire(adev);
-
- r = gfx_v10_0_init_microcode(adev);
- if (r)
- DRM_ERROR("Failed to load gfx firmware!\n");
-
- return r;
}
static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
@@ -4650,9 +4573,7 @@ static int gfx_v10_0_sw_init(void *handle)
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
- r = gfx_v10_0_me_init(adev);
- if (r)
- return r;
+ gfx_v10_0_me_init(adev);
if (adev->gfx.rlc.funcs) {
if (adev->gfx.rlc.funcs->init) {
@@ -7630,7 +7551,7 @@ static int gfx_v10_0_early_init(void *handle)
/* init rlcg reg access ctrl */
gfx_v10_0_init_rlcg_reg_access_ctrl(adev);
- return 0;
+ return gfx_v10_0_init_microcode(adev);
}
static int gfx_v10_0_late_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index b9b57a66e113..8ad8a0bffcac 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -46,6 +46,7 @@
#include "clearstate_gfx11.h"
#include "v11_structs.h"
#include "gfx_v11_0.h"
+#include "gfx_v11_0_3.h"
#include "nbio_v4_3.h"
#include "mes_v11_0.h"
@@ -431,18 +432,37 @@ err1:
static void gfx_v11_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
kfree(adev->gfx.rlc.register_list_format);
}
+static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev, const char *ucode_prefix)
+{
+ const struct psp_firmware_header_v1_0 *toc_hdr;
+ int err = 0;
+ char fw_name[40];
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
+ err = amdgpu_ucode_request(adev, &adev->psp.toc_fw, fw_name);
+ if (err)
+ goto out;
+
+ toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
+ adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
+ adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
+ adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
+ adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
+ le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
+ return 0;
+out:
+ amdgpu_ucode_release(&adev->psp.toc_fw);
+ return err;
+}
+
static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
{
char fw_name[40];
@@ -457,10 +477,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
/* check pfp fw hdr version to decide if enable rs64 for gfx11.*/
@@ -477,10 +494,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
if (adev->gfx.rs64_enable) {
@@ -493,10 +507,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev)) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -508,10 +519,7 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
if (adev->gfx.rs64_enable) {
@@ -525,59 +533,23 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1_JT);
}
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)
+ err = gfx_v11_0_init_toc_microcode(adev, ucode_prefix);
+
/* only one MEC for gfx 11.0.0. */
adev->gfx.mec2_fw = NULL;
out:
if (err) {
- dev_err(adev->dev,
- "gfx11: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
}
return err;
}
-static int gfx_v11_0_init_toc_microcode(struct amdgpu_device *adev)
-{
- const struct psp_firmware_header_v1_0 *toc_hdr;
- int err = 0;
- char fw_name[40];
- char ucode_prefix[30];
-
- amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_toc.bin", ucode_prefix);
- err = request_firmware(&adev->psp.toc_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.toc_fw);
- if (err)
- goto out;
-
- toc_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.toc_fw->data;
- adev->psp.toc.fw_version = le32_to_cpu(toc_hdr->header.ucode_version);
- adev->psp.toc.feature_version = le32_to_cpu(toc_hdr->sos.fw_version);
- adev->psp.toc.size_bytes = le32_to_cpu(toc_hdr->header.ucode_size_bytes);
- adev->psp.toc.start_addr = (uint8_t *)toc_hdr +
- le32_to_cpu(toc_hdr->header.ucode_array_offset_bytes);
- return 0;
-out:
- dev_err(adev->dev, "Failed to load TOC microcode\n");
- release_firmware(adev->psp.toc_fw);
- adev->psp.toc_fw = NULL;
- return err;
-}
-
static u32 gfx_v11_0_get_csb_size(struct amdgpu_device *adev)
{
u32 count = 0;
@@ -714,19 +686,11 @@ static void gfx_v11_0_mec_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->gfx.mec.mec_fw_data_obj, NULL, NULL);
}
-static int gfx_v11_0_me_init(struct amdgpu_device *adev)
+static void gfx_v11_0_me_init(struct amdgpu_device *adev)
{
- int r;
-
bitmap_zero(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
amdgpu_gfx_graphics_queue_acquire(adev);
-
- r = gfx_v11_0_init_microcode(adev);
- if (r)
- DRM_ERROR("Failed to load gfx firmware!\n");
-
- return r;
}
static int gfx_v11_0_mec_init(struct amdgpu_device *adev)
@@ -790,8 +754,8 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
* zero here */
WARN_ON(simd != 0);
- /* type 2 wave data */
- dst[(*no_fields)++] = 2;
+ /* type 3 wave data */
+ dst[(*no_fields)++] = 3;
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_STATUS);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_LO);
dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_PC_HI);
@@ -852,7 +816,14 @@ static int gfx_v11_0_gpu_early_init(struct amdgpu_device *adev)
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 2):
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ break;
case IP_VERSION(11, 0, 3):
+ adev->gfx.ras = &gfx_v11_0_3_ras;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -987,10 +958,11 @@ static int gfx_v11_0_rlc_autoload_buffer_init(struct amdgpu_device *adev)
total_size = gfx_v11_0_calc_toc_total_size(adev);
r = amdgpu_bo_create_reserved(adev, total_size, 64 * 1024,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->gfx.rlc.rlc_autoload_bo,
- &adev->gfx.rlc.rlc_autoload_gpu_addr,
- (void **)&adev->gfx.rlc.rlc_autoload_ptr);
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->gfx.rlc.rlc_autoload_bo,
+ &adev->gfx.rlc.rlc_autoload_gpu_addr,
+ (void **)&adev->gfx.rlc.rlc_autoload_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to create fw autoload bo\n", r);
@@ -1336,6 +1308,20 @@ static int gfx_v11_0_sw_init(void *handle)
if (r)
return r;
+ /* ECC error */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GRBM_CP,
+ GFX_11_0_0__SRCID__CP_ECC_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
+ /* FED error */
+ r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
+ GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT,
+ &adev->gfx.rlc_gc_fed_irq);
+ if (r)
+ return r;
+
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
if (adev->gfx.imu.funcs) {
@@ -1346,9 +1332,7 @@ static int gfx_v11_0_sw_init(void *handle)
}
}
- r = gfx_v11_0_me_init(adev);
- if (r)
- return r;
+ gfx_v11_0_me_init(adev);
r = gfx_v11_0_rlc_init(adev);
if (r) {
@@ -1416,9 +1400,6 @@ static int gfx_v11_0_sw_init(void *handle)
/* allocate visible FB for rlc auto-loading fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) {
- r = gfx_v11_0_init_toc_microcode(adev);
- if (r)
- dev_err(adev->dev, "Failed to load toc firmware!\n");
r = gfx_v11_0_rlc_autoload_buffer_init(adev);
if (r)
return r;
@@ -1428,6 +1409,11 @@ static int gfx_v11_0_sw_init(void *handle)
if (r)
return r;
+ if (amdgpu_gfx_ras_sw_init(adev)) {
+ dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -2656,7 +2642,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
/* 64kb align */
r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.pfp.pfp_fw_obj,
&adev->gfx.pfp.pfp_fw_gpu_addr,
(void **)&adev->gfx.pfp.pfp_fw_ptr);
@@ -2667,7 +2655,9 @@ static int gfx_v11_0_cp_gfx_load_pfp_microcode_rs64(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_reserved(adev, fw_data_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.pfp.pfp_fw_data_obj,
&adev->gfx.pfp.pfp_fw_data_gpu_addr,
(void **)&adev->gfx.pfp.pfp_fw_data_ptr);
@@ -2870,7 +2860,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
/* 64kb align*/
r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.me.me_fw_obj,
&adev->gfx.me.me_fw_gpu_addr,
(void **)&adev->gfx.me.me_fw_ptr);
@@ -2881,7 +2873,9 @@ static int gfx_v11_0_cp_gfx_load_me_microcode_rs64(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_reserved(adev, fw_data_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.me.me_fw_data_obj,
&adev->gfx.me.me_fw_data_gpu_addr,
(void **)&adev->gfx.me.me_fw_data_ptr);
@@ -3387,7 +3381,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
fw_data_size = le32_to_cpu(mec_hdr->data_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_ucode_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.mec_fw_obj,
&adev->gfx.mec.mec_fw_gpu_addr,
(void **)&fw_ucode_ptr);
@@ -3398,7 +3394,9 @@ static int gfx_v11_0_cp_compute_load_microcode_rs64(struct amdgpu_device *adev)
}
r = amdgpu_bo_create_reserved(adev, fw_data_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.mec_fw_data_obj,
&adev->gfx.mec.mec_fw_data_gpu_addr,
(void **)&fw_data_ptr);
@@ -4408,6 +4406,7 @@ static int gfx_v11_0_hw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -4687,7 +4686,7 @@ static int gfx_v11_0_early_init(void *handle)
gfx_v11_0_init_rlcg_reg_access_ctrl(adev);
- return 0;
+ return gfx_v11_0_init_microcode(adev);
}
static int gfx_v11_0_ras_late_init(void *handle)
@@ -5839,6 +5838,36 @@ static void gfx_v11_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev
}
}
+#define CP_ME1_PIPE_INST_ADDR_INTERVAL 0x1
+#define SET_ECC_ME_PIPE_STATE(reg_addr, state) \
+ do { \
+ uint32_t tmp = RREG32_SOC15_IP(GC, reg_addr); \
+ tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE0_INT_CNTL, CP_ECC_ERROR_INT_ENABLE, state); \
+ WREG32_SOC15_IP(GC, reg_addr, tmp); \
+ } while (0)
+
+static int gfx_v11_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ uint32_t ecc_irq_state = 0;
+ uint32_t pipe0_int_cntl_addr = 0;
+ int i = 0;
+
+ ecc_irq_state = (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0;
+
+ pipe0_int_cntl_addr = SOC15_REG_OFFSET(GC, 0, regCP_ME1_PIPE0_INT_CNTL);
+
+ WREG32_FIELD15_PREREG(GC, 0, CP_INT_CNTL_RING0, CP_ECC_ERROR_INT_ENABLE, ecc_irq_state);
+
+ for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++)
+ SET_ECC_ME_PIPE_STATE(pipe0_int_cntl_addr + i * CP_ME1_PIPE_INST_ADDR_INTERVAL,
+ ecc_irq_state);
+
+ return 0;
+}
+
static int gfx_v11_0_set_eop_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -6015,6 +6044,16 @@ static int gfx_v11_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v11_0_rlc_gc_fed_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ if (adev->gfx.ras && adev->gfx.ras->rlc_gc_fed_irq)
+ return adev->gfx.ras->rlc_gc_fed_irq(adev, source, entry);
+
+ return 0;
+}
+
#if 0
static int gfx_v11_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
@@ -6245,6 +6284,15 @@ static const struct amdgpu_irq_src_funcs gfx_v11_0_priv_inst_irq_funcs = {
.process = gfx_v11_0_priv_inst_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v11_0_cp_ecc_error_irq_funcs = {
+ .set = gfx_v11_0_set_cp_ecc_error_state,
+ .process = amdgpu_gfx_cp_ecc_error_irq,
+};
+
+static const struct amdgpu_irq_src_funcs gfx_v11_0_rlc_gc_fed_irq_funcs = {
+ .process = gfx_v11_0_rlc_gc_fed_irq,
+};
+
static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
@@ -6255,6 +6303,13 @@ static void gfx_v11_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v11_0_priv_inst_irq_funcs;
+
+ adev->gfx.cp_ecc_error_irq.num_types = 1; /* CP ECC error */
+ adev->gfx.cp_ecc_error_irq.funcs = &gfx_v11_0_cp_ecc_error_irq_funcs;
+
+ adev->gfx.rlc_gc_fed_irq.num_types = 1; /* 0x80 FED error */
+ adev->gfx.rlc_gc_fed_irq.funcs = &gfx_v11_0_rlc_gc_fed_irq_funcs;
+
}
static void gfx_v11_0_set_imu_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
new file mode 100644
index 000000000000..b07a72ca25d9
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.c
@@ -0,0 +1,88 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "soc21.h"
+#include "gc/gc_11_0_3_offset.h"
+#include "gc/gc_11_0_3_sh_mask.h"
+#include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
+#include "soc15.h"
+#include "soc15d.h"
+#include "gfx_v11_0.h"
+
+
+static int gfx_v11_0_3_rlc_gc_fed_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t rlc_status0 = 0, rlc_status1 = 0;
+ struct ras_common_if *ras_if = NULL;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ rlc_status0 = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_RLCS_FED_STATUS_0));
+ rlc_status1 = RREG32(SOC15_REG_OFFSET(GC, 0, regRLC_RLCS_FED_STATUS_1));
+
+ if (!rlc_status0 && !rlc_status1) {
+ dev_warn(adev->dev, "RLC_GC_FED irq is generated, but rlc_status0 and rlc_status1 are empty!\n");
+ return 0;
+ }
+
+ /* Use RLC_RLCS_FED_STATUS_0/1 to distinguish FED error block. */
+ if (REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA0_FED_ERR) ||
+ REG_GET_FIELD(rlc_status0, RLC_RLCS_FED_STATUS_0, SDMA1_FED_ERR))
+ ras_if = adev->sdma.ras_if;
+ else
+ ras_if = adev->gfx.ras_if;
+
+ if (!ras_if) {
+ dev_err(adev->dev, "Gfx or sdma ras block not initialized, rlc_status0:0x%x.\n",
+ rlc_status0);
+ return -EINVAL;
+ }
+
+ ih_data.head = *ras_if;
+
+ dev_warn(adev->dev, "RLC %s FED IRQ\n", ras_if->name);
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+
+ return 0;
+}
+
+static int gfx_v11_0_3_poison_consumption_handler(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ /* Workaround: when vmid and pasid are both zero, trigger gpu reset in KGD. */
+ if (entry && (entry->client_id == SOC21_IH_CLIENTID_GFX) &&
+ (entry->src_id == GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT) &&
+ !entry->vmid && !entry->pasid)
+ amdgpu_ras_reset_gpu(adev);
+
+ return 0;
+}
+
+struct amdgpu_gfx_ras gfx_v11_0_3_ras = {
+ .rlc_gc_fed_irq = gfx_v11_0_3_rlc_gc_fed_irq,
+ .poison_consumption_handler = gfx_v11_0_3_poison_consumption_handler,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h
new file mode 100644
index 000000000000..672c7920b3d0
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0_3.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V11_0_3_H__
+#define __GFX_V11_0_3_H__
+
+extern struct amdgpu_gfx_ras gfx_v11_0_3_ras;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 204b246f0e3f..c41219e23151 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -338,10 +338,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
@@ -349,10 +346,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
@@ -360,10 +354,7 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
@@ -371,10 +362,9 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
rlc_hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
@@ -382,14 +372,10 @@ static int gfx_v6_0_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
pr_err("gfx6: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
}
return err;
}
@@ -2375,7 +2361,8 @@ static int gfx_v6_0_rlc_init(struct amdgpu_device *adev)
dws = adev->gfx.rlc.clear_state_size + (256 / 4);
r = amdgpu_bo_create_reserved(adev, dws * 4, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.rlc.clear_state_obj,
&adev->gfx.rlc.clear_state_gpu_addr,
(void **)&adev->gfx.rlc.cs_ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0f2976507e48..9d5c1e29b4a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -887,6 +887,16 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *bu
static void gfx_v7_0_init_pg(struct amdgpu_device *adev);
static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev);
+static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
+{
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+}
+
/*
* Core functions
*/
@@ -927,88 +937,44 @@ static int gfx_v7_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
if (adev->asic_type == CHIP_KAVERI) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
if (err)
goto out;
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
-
out:
if (err) {
pr_err("gfx7: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
+ gfx_v7_0_free_microcode(adev);
}
return err;
}
-static void gfx_v7_0_free_microcode(struct amdgpu_device *adev)
-{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
-}
-
/**
* gfx_v7_0_tiling_mode_table_init - init the hw tiling table
*
@@ -2772,7 +2738,8 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
* GFX7_MEC_HPD_SIZE * 2;
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index d47135606e3e..b1f2684d854a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -924,20 +924,14 @@ err1:
static void gfx_v8_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
if ((adev->asic_type != CHIP_STONEY) &&
(adev->asic_type != CHIP_TOPAZ))
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
@@ -989,40 +983,34 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
@@ -1030,20 +1018,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
@@ -1060,10 +1045,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->virt.chained_ib_support = false;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
@@ -1110,20 +1094,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
}
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
@@ -1132,19 +1113,16 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
(adev->asic_type != CHIP_TOPAZ)) {
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
- if (err == -ENOENT) {
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
+ if (err == -ENODEV) {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
}
} else {
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
}
if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
cp_hdr = (const struct gfx_firmware_header_v1_0 *)
adev->gfx.mec2_fw->data;
adev->gfx.mec2_fw_version =
@@ -1219,18 +1197,12 @@ out:
dev_err(adev->dev,
"gfx8: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
}
return err;
}
@@ -1340,7 +1312,8 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f202b45c413c..ae09fc1cfe6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1078,18 +1078,12 @@ err1:
static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
{
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
kfree(adev->gfx.rlc.register_list_format);
}
@@ -1251,55 +1245,40 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
}
static int gfx_v9_0_init_cp_gfx_microcode(struct amdgpu_device *adev,
- const char *chip_name)
+ char *chip_name)
{
char fw_name[30];
int err;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
- err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_PFP);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
- err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.me_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_ME);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
- err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.ce_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
out:
if (err) {
- dev_err(adev->dev,
- "gfx9: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.pfp_fw);
- adev->gfx.pfp_fw = NULL;
- release_firmware(adev->gfx.me_fw);
- adev->gfx.me_fw = NULL;
- release_firmware(adev->gfx.ce_fw);
- adev->gfx.ce_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.pfp_fw);
+ amdgpu_ucode_release(&adev->gfx.me_fw);
+ amdgpu_ucode_release(&adev->gfx.ce_fw);
}
return err;
}
static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
- const char *chip_name)
+ char *chip_name)
{
char fw_name[30];
int err;
@@ -1328,10 +1307,7 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
- err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
if (err)
goto out;
rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
@@ -1340,13 +1316,9 @@ static int gfx_v9_0_init_rlc_microcode(struct amdgpu_device *adev,
version_minor = le16_to_cpu(rlc_hdr->header.header_version_minor);
err = amdgpu_gfx_rlc_init_microcode(adev, version_major, version_minor);
out:
- if (err) {
- dev_err(adev->dev,
- "gfx9: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.rlc_fw);
- adev->gfx.rlc_fw = NULL;
- }
+ if (err)
+ amdgpu_ucode_release(&adev->gfx.rlc_fw);
+
return err;
}
@@ -1361,7 +1333,7 @@ static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
}
static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
- const char *chip_name)
+ char *chip_name)
{
char fw_name[30];
int err;
@@ -1371,10 +1343,7 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
- err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.mec_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
if (err)
goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC1);
@@ -1386,91 +1355,49 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ /* ignore failures to load */
+ err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2);
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_MEC2_JT);
} else {
err = 0;
- adev->gfx.mec2_fw = NULL;
+ amdgpu_ucode_release(&adev->gfx.mec2_fw);
}
} else {
adev->gfx.mec2_fw_version = adev->gfx.mec_fw_version;
adev->gfx.mec2_feature_version = adev->gfx.mec_feature_version;
}
-out:
gfx_v9_0_check_if_need_gfxoff(adev);
gfx_v9_0_check_fw_write_wait(adev);
- if (err) {
- dev_err(adev->dev,
- "gfx9: Failed to init firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->gfx.mec_fw);
- adev->gfx.mec_fw = NULL;
- release_firmware(adev->gfx.mec2_fw);
- adev->gfx.mec2_fw = NULL;
- }
+
+out:
+ if (err)
+ amdgpu_ucode_release(&adev->gfx.mec_fw);
return err;
}
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
+ char ucode_prefix[30];
int r;
DRM_DEBUG("\n");
-
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(9, 0, 1):
- chip_name = "vega10";
- break;
- case IP_VERSION(9, 2, 1):
- chip_name = "vega12";
- break;
- case IP_VERSION(9, 4, 0):
- chip_name = "vega20";
- break;
- case IP_VERSION(9, 2, 2):
- case IP_VERSION(9, 1, 0):
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- chip_name = "raven2";
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- chip_name = "picasso";
- else
- chip_name = "raven";
- break;
- case IP_VERSION(9, 4, 1):
- chip_name = "arcturus";
- break;
- case IP_VERSION(9, 3, 0):
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- chip_name = "renoir";
- else
- chip_name = "green_sardine";
- break;
- case IP_VERSION(9, 4, 2):
- chip_name = "aldebaran";
- break;
- default:
- BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
/* No CPG in Arcturus */
if (adev->gfx.num_gfx_rings) {
- r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
+ r = gfx_v9_0_init_cp_gfx_microcode(adev, ucode_prefix);
if (r)
return r;
}
- r = gfx_v9_0_init_rlc_microcode(adev, chip_name);
+ r = gfx_v9_0_init_rlc_microcode(adev, ucode_prefix);
if (r)
return r;
- r = gfx_v9_0_init_cp_compute_microcode(adev, chip_name);
+ r = gfx_v9_0_init_cp_compute_microcode(adev, ucode_prefix);
if (r)
return r;
@@ -1783,7 +1710,8 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
mec_hpd_size = adev->gfx.num_compute_rings * GFX9_MEC_HPD_SIZE;
if (mec_hpd_size) {
r = amdgpu_bo_create_reserved(adev, mec_hpd_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->gfx.mec.hpd_eop_obj,
&adev->gfx.mec.hpd_eop_gpu_addr,
(void **)&hpd);
@@ -2008,27 +1936,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
break;
}
- if (adev->gfx.ras) {
- err = amdgpu_ras_register_ras_block(adev, &adev->gfx.ras->ras_block);
- if (err) {
- DRM_ERROR("Failed to register gfx ras block!\n");
- return err;
- }
-
- strcpy(adev->gfx.ras->ras_block.ras_comm.name, "gfx");
- adev->gfx.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__GFX;
- adev->gfx.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->gfx.ras_if = &adev->gfx.ras->ras_block.ras_comm;
-
- /* If not define special ras_late_init function, use gfx default ras_late_init */
- if (!adev->gfx.ras->ras_block.ras_late_init)
- adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->gfx.ras->ras_block.ras_cb)
- adev->gfx.ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
- }
-
adev->gfx.config.gb_addr_config = gb_addr_config;
adev->gfx.config.gb_addr_config_fields.num_pipes = 1 <<
@@ -2158,12 +2065,6 @@ static int gfx_v9_0_sw_init(void *handle)
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
- r = gfx_v9_0_init_microcode(adev);
- if (r) {
- DRM_ERROR("Failed to load gfx firmware!\n");
- return r;
- }
-
if (adev->gfx.rlc.funcs) {
if (adev->gfx.rlc.funcs->init) {
r = adev->gfx.rlc.funcs->init(adev);
@@ -2276,6 +2177,11 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
+ if (amdgpu_gfx_ras_sw_init(adev)) {
+ dev_err(adev->dev, "Failed to initialize gfx ras block!\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -4605,7 +4511,7 @@ static int gfx_v9_0_early_init(void *handle)
/* init rlcg reg access ctrl */
gfx_v9_0_init_rlcg_reg_access_ctrl(adev);
- return 0;
+ return gfx_v9_0_init_microcode(adev);
}
static int gfx_v9_0_ecc_late_init(void *handle)
@@ -6877,7 +6783,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_gds_switch = gfx_v9_0_ring_emit_gds_switch,
.emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
.test_ring = gfx_v9_0_ring_test_ring,
- .test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_switch_buffer = gfx_v9_ring_emit_sb,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index ec4d5e15b766..ab2325f6c7ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -120,7 +120,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index 34513e8e1519..9b3a02527318 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -165,7 +165,7 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 3f8676d23a5e..4aacbbec31e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -167,7 +167,7 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
index 0e13370c2057..be0d0f47415e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0.c
@@ -151,19 +151,20 @@ static void gfxhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
- /* Disable AGP. */
+ /* Program the AGP BAR */
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0);
- WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, 0x00FFFFFF);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset;
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
index 080ff11ca305..6e0bd628c889 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v3_0_3.c
@@ -159,17 +159,17 @@ static void gfxhub_v3_0_3_init_system_aperture_regs(struct amdgpu_device *adev)
/* Disable AGP. */
WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BASE, 0);
- WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, 0x00FFFFFF);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15(GC, 0, regGCMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
/* Program the system aperture low logical page number. */
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start
+ adev->vm_manager.vram_base_offset;
WREG32_SOC15(GC, 0, regGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 21e46817d82d..7db1f1a7e33c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -78,13 +78,25 @@ gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
break;
default:
break;
@@ -835,10 +847,7 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1061,9 +1070,12 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
}
amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- r = adev->gfxhub.funcs->gart_enable(adev);
- if (r)
- return r;
+
+ if (!adev->in_s0ix) {
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r)
+ return r;
+ }
r = adev->mmhub.funcs->gart_enable(adev);
if (r)
@@ -1077,10 +1089,12 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
false : true;
- adev->gfxhub.funcs->set_fault_enable_default(adev, value);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->set_fault_enable_default(adev, value);
adev->mmhub.funcs->set_fault_enable_default(adev, value);
gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
- gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
+ if (!adev->in_s0ix)
+ gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(adev->gmc.gart_size >> 20),
@@ -1101,7 +1115,7 @@ static int gmc_v10_0_hw_init(void *handle)
* harvestable groups in gc_utcl2 need to be programmed before any GFX block
* register setup within GMC, or else system hang when harvesting SA.
*/
- if (adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
+ if (!adev->in_s0ix && adev->gfxhub.funcs && adev->gfxhub.funcs->utcl2_harvest)
adev->gfxhub.funcs->utcl2_harvest(adev);
r = gmc_v10_0_gart_enable(adev);
@@ -1129,7 +1143,8 @@ static int gmc_v10_0_hw_init(void *handle)
*/
static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
{
- adev->gfxhub.funcs->gart_disable(adev);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 4326078689cd..0a31a341aa43 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -64,13 +64,25 @@ gmc_v11_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, false);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, false);
break;
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_MMHUB_0, true);
/* GFX HUB */
- amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (!adev->in_s0ix)
+ amdgpu_gmc_set_vm_fault_masks(adev, AMDGPU_GFXHUB_0, true);
break;
default:
break;
@@ -661,6 +673,7 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc);
+ amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */
if (amdgpu_sriov_vf(adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index ec291d28edff..b7dad4e67813 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -131,19 +131,12 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/si58_mc.bin");
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->gmc.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
if (err) {
dev_err(adev->dev,
"si_mc: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
}
@@ -258,7 +251,7 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
- adev->vram_scratch.gpu_addr >> 12);
+ adev->mem_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
@@ -894,8 +887,7 @@ static int gmc_v6_0_sw_fini(void *handle)
amdgpu_vm_manager_fini(adev);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 979da6f510e8..402960b0174e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -156,16 +156,10 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gmc.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
if (err) {
pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
}
@@ -292,7 +286,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
- adev->vram_scratch.gpu_addr >> 12);
+ adev->mem_scratch.gpu_addr >> 12);
WREG32(mmMC_VM_AGP_BASE, 0);
WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
@@ -389,10 +383,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1081,8 +1072,7 @@ static int gmc_v7_0_sw_fini(void *handle)
kfree(adev->gmc.vm_fault_info);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 382dde1ce74c..504c1b34dab7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -264,16 +264,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
- err = request_firmware(&adev->gmc.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gmc.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->gmc.fw, fw_name);
if (err) {
pr_err("mc: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
}
return err;
}
@@ -474,7 +468,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
adev->gmc.vram_end >> 12);
WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
- adev->vram_scratch.gpu_addr >> 12);
+ adev->mem_scratch.gpu_addr >> 12);
if (amdgpu_sriov_vf(adev)) {
tmp = ((adev->gmc.vram_end >> 24) & 0xFFFF) << 16;
@@ -587,10 +581,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1203,8 +1194,7 @@ static int gmc_v8_0_sw_fini(void *handle)
kfree(adev->gmc.vm_fault_info);
amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
- release_firmware(adev->gmc.fw);
- adev->gmc.fw = NULL;
+ amdgpu_ucode_release(&adev->gmc.fw);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 08d6cf79fb15..d65c6cea3445 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -484,6 +484,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0))
+ continue;
+
if (j == AMDGPU_GFXHUB_0)
tmp = RREG32_SOC15_IP(GC, reg);
else
@@ -504,6 +512,14 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
+ /* This works because this interrupt is only
+ * enabled at init/resume and disabled in
+ * fini/suspend, so the overall state doesn't
+ * change over the course of suspend/resume.
+ */
+ if (adev->in_s0ix && (j == AMDGPU_GFXHUB_0))
+ continue;
+
if (j == AMDGPU_GFXHUB_0)
tmp = RREG32_SOC15_IP(GC, reg);
else
@@ -1536,10 +1552,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
}
#endif
- /* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
- if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
- adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
if (amdgpu_gart_size == -1) {
@@ -1862,9 +1875,12 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
}
amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
- r = adev->gfxhub.funcs->gart_enable(adev);
- if (r)
- return r;
+
+ if (!adev->in_s0ix) {
+ r = adev->gfxhub.funcs->gart_enable(adev);
+ if (r)
+ return r;
+ }
r = adev->mmhub.funcs->gart_enable(adev);
if (r)
@@ -1911,11 +1927,15 @@ static int gmc_v9_0_hw_init(void *handle)
value = true;
if (!amdgpu_sriov_vf(adev)) {
- adev->gfxhub.funcs->set_fault_enable_default(adev, value);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->set_fault_enable_default(adev, value);
adev->mmhub.funcs->set_fault_enable_default(adev, value);
}
- for (i = 0; i < adev->num_vmhubs; ++i)
+ for (i = 0; i < adev->num_vmhubs; ++i) {
+ if (adev->in_s0ix && (i == AMDGPU_GFXHUB_0))
+ continue;
gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0);
+ }
if (adev->umc.funcs && adev->umc.funcs->init_registers)
adev->umc.funcs->init_registers(adev);
@@ -1939,7 +1959,8 @@ static int gmc_v9_0_hw_init(void *handle)
*/
static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{
- adev->gfxhub.funcs->gart_disable(adev);
+ if (!adev->in_s0ix)
+ adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
index 077c53c6cc08..4ab90c7852c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c
@@ -50,10 +50,7 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev)
amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_imu.bin", ucode_prefix);
- err = request_firmware(&adev->gfx.imu_fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->gfx.imu_fw);
+ err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, fw_name);
if (err)
goto out;
imu_hdr = (const struct imu_firmware_header_v1_0 *)adev->gfx.imu_fw->data;
@@ -78,7 +75,7 @@ out:
dev_err(adev->dev,
"gfx11: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->gfx.imu_fw);
+ amdgpu_ucode_release(&adev->gfx.imu_fw);
}
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
index 614394118a53..2e2062636d5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
@@ -379,89 +379,6 @@ static const struct amdgpu_mes_funcs mes_v10_1_funcs = {
.resume_gang = mes_v10_1_resume_gang,
};
-static int mes_v10_1_init_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- const char *chip_name;
- char fw_name[30];
- int err;
- const struct mes_firmware_header_v1_0 *mes_hdr;
- struct amdgpu_firmware_info *info;
-
- switch (adev->ip_versions[GC_HWIP][0]) {
- case IP_VERSION(10, 1, 10):
- chip_name = "navi10";
- break;
- case IP_VERSION(10, 3, 0):
- chip_name = "sienna_cichlid";
- break;
- default:
- BUG();
- }
-
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
- chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
- chip_name);
-
- err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
- if (err)
- return err;
-
- err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
- if (err) {
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
- return err;
- }
-
- mes_hdr = (const struct mes_firmware_header_v1_0 *)
- adev->mes.fw[pipe]->data;
- adev->mes.uc_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
- adev->mes.data_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- int ucode, ucode_data;
-
- if (pipe == AMDGPU_MES_SCHED_PIPE) {
- ucode = AMDGPU_UCODE_ID_CP_MES;
- ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
- } else {
- ucode = AMDGPU_UCODE_ID_CP_MES1;
- ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
- }
-
- info = &adev->firmware.ucode[ucode];
- info->ucode_id = ucode;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
- PAGE_SIZE);
-
- info = &adev->firmware.ucode[ucode_data];
- info->ucode_id = ucode_data;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
- PAGE_SIZE);
- }
-
- return 0;
-}
-
-static void mes_v10_1_free_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
-}
-
static int mes_v10_1_allocate_ucode_buffer(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
@@ -1007,7 +924,6 @@ static int mes_v10_1_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
- adev->mes.adev = adev;
adev->mes.funcs = &mes_v10_1_funcs;
adev->mes.kiq_hw_init = &mes_v10_1_kiq_hw_init;
@@ -1019,10 +935,6 @@ static int mes_v10_1_sw_init(void *handle)
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
- r = mes_v10_1_init_microcode(adev, pipe);
- if (r)
- return r;
-
r = mes_v10_1_allocate_eop_buf(adev, pipe);
if (r)
return r;
@@ -1059,8 +971,7 @@ static int mes_v10_1_sw_fini(void *handle)
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
&adev->mes.eop_gpu_addr[pipe],
NULL);
-
- mes_v10_1_free_microcode(adev, pipe);
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
@@ -1229,6 +1140,22 @@ static int mes_v10_1_resume(void *handle)
return amdgpu_mes_resume(adev);
}
+static int mes_v10_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int pipe, r;
+
+ for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
+ if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
+ continue;
+ r = amdgpu_mes_init_microcode(adev, pipe);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
static int mes_v10_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1241,6 +1168,7 @@ static int mes_v10_0_late_init(void *handle)
static const struct amd_ip_funcs mes_v10_1_ip_funcs = {
.name = "mes_v10_1",
+ .early_init = mes_v10_0_early_init,
.late_init = mes_v10_0_late_init,
.sw_init = mes_v10_1_sw_init,
.sw_fini = mes_v10_1_sw_fini,
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 5dff79e8f301..5826eac270d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -460,80 +460,6 @@ static const struct amdgpu_mes_funcs mes_v11_0_funcs = {
.misc_op = mes_v11_0_misc_op,
};
-static int mes_v11_0_init_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- char fw_name[30];
- char ucode_prefix[30];
- int err;
- const struct mes_firmware_header_v1_0 *mes_hdr;
- struct amdgpu_firmware_info *info;
-
- amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes.bin",
- ucode_prefix);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mes1.bin",
- ucode_prefix);
-
- err = request_firmware(&adev->mes.fw[pipe], fw_name, adev->dev);
- if (err)
- return err;
-
- err = amdgpu_ucode_validate(adev->mes.fw[pipe]);
- if (err) {
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
- return err;
- }
-
- mes_hdr = (const struct mes_firmware_header_v1_0 *)
- adev->mes.fw[pipe]->data;
- adev->mes.uc_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_uc_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_uc_start_addr_hi)) << 32);
- adev->mes.data_start_addr[pipe] =
- le32_to_cpu(mes_hdr->mes_data_start_addr_lo) |
- ((uint64_t)(le32_to_cpu(mes_hdr->mes_data_start_addr_hi)) << 32);
-
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- int ucode, ucode_data;
-
- if (pipe == AMDGPU_MES_SCHED_PIPE) {
- ucode = AMDGPU_UCODE_ID_CP_MES;
- ucode_data = AMDGPU_UCODE_ID_CP_MES_DATA;
- } else {
- ucode = AMDGPU_UCODE_ID_CP_MES1;
- ucode_data = AMDGPU_UCODE_ID_CP_MES1_DATA;
- }
-
- info = &adev->firmware.ucode[ucode];
- info->ucode_id = ucode;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_size_bytes),
- PAGE_SIZE);
-
- info = &adev->firmware.ucode[ucode_data];
- info->ucode_id = ucode_data;
- info->fw = adev->mes.fw[pipe];
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes),
- PAGE_SIZE);
- }
-
- return 0;
-}
-
-static void mes_v11_0_free_microcode(struct amdgpu_device *adev,
- enum admgpu_mes_pipe pipe)
-{
- release_firmware(adev->mes.fw[pipe]);
- adev->mes.fw[pipe] = NULL;
-}
-
static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
enum admgpu_mes_pipe pipe)
{
@@ -550,7 +476,9 @@ static int mes_v11_0_allocate_ucode_buffer(struct amdgpu_device *adev,
fw_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_size,
- PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.ucode_fw_obj[pipe],
&adev->mes.ucode_fw_gpu_addr[pipe],
(void **)&adev->mes.ucode_fw_ptr[pipe]);
@@ -583,7 +511,9 @@ static int mes_v11_0_allocate_ucode_data_buffer(struct amdgpu_device *adev,
fw_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
r = amdgpu_bo_create_reserved(adev, fw_size,
- 64 * 1024, AMDGPU_GEM_DOMAIN_VRAM,
+ 64 * 1024,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
&adev->mes.data_fw_obj[pipe],
&adev->mes.data_fw_gpu_addr[pipe],
(void **)&adev->mes.data_fw_ptr[pipe]);
@@ -1088,7 +1018,6 @@ static int mes_v11_0_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int pipe, r;
- adev->mes.adev = adev;
adev->mes.funcs = &mes_v11_0_funcs;
adev->mes.kiq_hw_init = &mes_v11_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v11_0_kiq_hw_fini;
@@ -1101,10 +1030,6 @@ static int mes_v11_0_sw_init(void *handle)
if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
continue;
- r = mes_v11_0_init_microcode(adev, pipe);
- if (r)
- return r;
-
r = mes_v11_0_allocate_eop_buf(adev, pipe);
if (r)
return r;
@@ -1141,8 +1066,7 @@ static int mes_v11_0_sw_fini(void *handle)
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[pipe],
&adev->mes.eop_gpu_addr[pipe],
NULL);
-
- mes_v11_0_free_microcode(adev, pipe);
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
}
amdgpu_bo_free_kernel(&adev->gfx.kiq.ring.mqd_obj,
@@ -1339,12 +1263,28 @@ static int mes_v11_0_resume(void *handle)
return amdgpu_mes_resume(adev);
}
+static int mes_v11_0_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int pipe, r;
+
+ for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
+ if (!adev->enable_mes_kiq && pipe == AMDGPU_MES_KIQ_PIPE)
+ continue;
+ r = amdgpu_mes_init_microcode(adev, pipe);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
static int mes_v11_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* it's only intended for use in mes_self_test case, not for s0ix and reset */
- if (!amdgpu_in_reset(adev) && !adev->in_s0ix &&
+ if (!amdgpu_in_reset(adev) && !adev->in_s0ix && !adev->in_suspend &&
(adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)))
amdgpu_mes_self_test(adev);
@@ -1353,6 +1293,7 @@ static int mes_v11_0_late_init(void *handle)
static const struct amd_ip_funcs mes_v11_0_ip_funcs = {
.name = "mes_v11_0",
+ .early_init = mes_v11_0_early_init,
.late_init = mes_v11_0_late_init,
.sw_init = mes_v11_0_sw_init,
.sw_fini = mes_v11_0_sw_fini,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 3e51e773f92b..15e7cbeae75b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -114,7 +114,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
return;
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
index 6fa7090bc6cb..73afbf2facc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -134,7 +134,7 @@ static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev)
}
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 0e664d0cc8d5..278e32db878d 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -234,7 +234,7 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
}
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 4638ea7c2eec..fcf2813e70db 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -164,7 +164,7 @@ static void mmhub_v2_3_init_system_aperture_regs(struct amdgpu_device *adev)
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
index 16cc82215e2e..164948c50ac3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0.c
@@ -169,26 +169,27 @@ static void mmhub_v3_0_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
uint32_t tmp;
- if (!amdgpu_sriov_vf(adev)) {
- /*
- * the new L1 policy will block SRIOV guest from writing
- * these regs, and they will be programed at host.
- * so skip programing these regs.
- */
- /* Disable AGP. */
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
-
- /* Program the system aperture low logical page number. */
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
- }
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /*
+ * the new L1 policy will block SRIOV guest from writing
+ * these regs, and they will be programed at host.
+ * so skip programing these regs.
+ */
+ /* Program the AGP BAR */
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
index 6bdf2ef0298d..26509b6b8c24 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c
@@ -183,12 +183,12 @@ static void mmhub_v3_0_1_init_system_aperture_regs(struct amdgpu_device *adev)
*/
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
index 45465acaa943..26abbc6a47ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_2.c
@@ -162,10 +162,10 @@ static void mmhub_v3_0_2_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
uint32_t tmp;
- /* Disable AGP. */
+ /* Program the AGP BAR */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, 0);
- WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, 0x00FFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
if (!amdgpu_sriov_vf(adev)) {
/*
@@ -175,13 +175,13 @@ static void mmhub_v3_0_2_init_system_aperture_regs(struct amdgpu_device *adev)
*/
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
}
/* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start +
adev->vm_manager.vram_base_offset;
WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
(u32)(value >> 12));
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 445cb06b9d26..72083e96222f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -136,7 +136,7 @@ static void mmhub_v9_4_init_system_aperture_regs(struct amdgpu_device *adev,
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
/* Set default page address. */
- value = amdgpu_gmc_vram_mc2pa(adev, adev->vram_scratch.gpu_addr);
+ value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
WREG32_SOC15_OFFSET(
MMHUB, 0,
mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 12906ba74462..63725b2ebc03 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -404,6 +404,11 @@ static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
}
+static void xgpu_ai_ras_poison_handler(struct amdgpu_device *adev)
+{
+ xgpu_ai_send_access_requests(adev, IDH_RAS_POISON);
+}
+
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
@@ -411,4 +416,5 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
.req_init_data = xgpu_ai_request_init_data,
+ .ras_poison_handler = xgpu_ai_ras_poison_handler,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index fa7e13e0459e..af1a784696bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -39,6 +39,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
+ IDH_RAS_POISON = 202,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index e07757eea7ad..cae1aaa4ddb6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -426,6 +426,11 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
+static void xgpu_nv_ras_poison_handler(struct amdgpu_device *adev)
+{
+ xgpu_nv_send_access_requests(adev, IDH_RAS_POISON);
+}
+
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
@@ -433,4 +438,5 @@ const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.reset_gpu = xgpu_nv_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_nv_mailbox_trans_msg,
+ .ras_poison_handler = xgpu_nv_ras_poison_handler,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 73887b0aa1d6..d0221ce08769 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -39,6 +39,7 @@ enum idh_request {
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
+ IDH_RAS_POISON = 202,
};
enum idh_event {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
index 15eb3658d70e..09fdcd20cb91 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v4_3.c
@@ -337,7 +337,13 @@ const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = {
static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
{
- return;
+ if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(4, 3, 0)) {
+ uint32_t data;
+
+ data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2);
+ data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
+ WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
+ }
}
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 6853b93ac82e..d972025f0d20 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -98,7 +98,7 @@ static const struct amdgpu_video_codecs nv_video_codecs_decode =
};
/* Sienna Cichlid */
-static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
+static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
@@ -110,10 +110,27 @@ static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codecs sc_video_codecs_decode =
+static const struct amdgpu_video_codec_info sc_video_codecs_decode_array_vcn1[] =
{
- .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
- .codec_array = sc_video_codecs_decode_array,
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn0 =
+{
+ .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn0),
+ .codec_array = sc_video_codecs_decode_array_vcn0,
+};
+
+static const struct amdgpu_video_codecs sc_video_codecs_decode_vcn1 =
+{
+ .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array_vcn1),
+ .codec_array = sc_video_codecs_decode_array_vcn1,
};
/* SRIOV Sienna Cichlid, not const since data is controlled by host */
@@ -123,7 +140,7 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_encode_array[] =
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
-static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
+static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
@@ -135,16 +152,33 @@ static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array[] =
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
+static struct amdgpu_video_codec_info sriov_sc_video_codecs_decode_array_vcn1[] =
+{
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
+
static struct amdgpu_video_codecs sriov_sc_video_codecs_encode =
{
.codec_count = ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
.codec_array = sriov_sc_video_codecs_encode_array,
};
-static struct amdgpu_video_codecs sriov_sc_video_codecs_decode =
+static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn0 =
{
- .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array),
- .codec_array = sriov_sc_video_codecs_decode_array,
+ .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0),
+ .codec_array = sriov_sc_video_codecs_decode_array_vcn0,
+};
+
+static struct amdgpu_video_codecs sriov_sc_video_codecs_decode_vcn1 =
+{
+ .codec_count = ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1),
+ .codec_array = sriov_sc_video_codecs_decode_array_vcn1,
};
/* Beige Goby*/
@@ -181,20 +215,37 @@ static const struct amdgpu_video_codecs yc_video_codecs_decode = {
static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
+ if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
+ return -EINVAL;
+
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 0, 192):
if (amdgpu_sriov_vf(adev)) {
- if (encode)
- *codecs = &sriov_sc_video_codecs_encode;
- else
- *codecs = &sriov_sc_video_codecs_decode;
+ if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
+ if (encode)
+ *codecs = &sriov_sc_video_codecs_encode;
+ else
+ *codecs = &sriov_sc_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &sriov_sc_video_codecs_encode;
+ else
+ *codecs = &sriov_sc_video_codecs_decode_vcn0;
+ }
} else {
- if (encode)
- *codecs = &nv_video_codecs_encode;
- else
- *codecs = &sc_video_codecs_decode;
+ if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
+ if (encode)
+ *codecs = &nv_video_codecs_encode;
+ else
+ *codecs = &sc_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &nv_video_codecs_encode;
+ else
+ *codecs = &sc_video_codecs_decode_vcn0;
+ }
}
return 0;
case IP_VERSION(3, 0, 16):
@@ -202,7 +253,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
if (encode)
*codecs = &nv_video_codecs_encode;
else
- *codecs = &sc_video_codecs_decode;
+ *codecs = &sc_video_codecs_decode_vcn0;
return 0;
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 1, 2):
@@ -993,9 +1044,19 @@ static int nv_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev)) {
xgpu_nv_mailbox_get_irq(adev);
- amdgpu_virt_update_sriov_video_codec(adev,
- sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
- sriov_sc_video_codecs_decode_array, ARRAY_SIZE(sriov_sc_video_codecs_decode_array));
+ if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_sc_video_codecs_encode_array,
+ ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
+ sriov_sc_video_codecs_decode_array_vcn1,
+ ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1));
+ } else {
+ amdgpu_virt_update_sriov_video_codec(adev,
+ sriov_sc_video_codecs_encode_array,
+ ARRAY_SIZE(sriov_sc_video_codecs_encode_array),
+ sriov_sc_video_codecs_decode_array_vcn1,
+ ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1));
+ }
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 9de46fa8f46c..e1b7fca09666 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -47,83 +47,17 @@ MODULE_FIRMWARE("amdgpu/raven_ta.bin");
static int psp_v10_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
- char fw_name[30];
+ char ucode_prefix[30];
int err = 0;
- const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- chip_name = "raven2";
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- chip_name = "picasso";
- else
- chip_name = "raven";
- break;
- default: BUG();
- }
-
- err = psp_init_asd_microcode(psp, chip_name);
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
+
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
- goto out;
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v10.0: Failed to load firmware \"%s\"\n",
- fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out2;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)
- adev->psp.ta_fw->data;
- adev->psp.hdcp_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
-
- adev->psp.dtm_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->dtm.offset_bytes);
-
- adev->psp.securedisplay_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->securedisplay.fw_version);
- adev->psp.securedisplay_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->securedisplay.size_bytes);
- adev->psp.securedisplay_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
-
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- }
-
- return 0;
-
-out2:
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
-out:
- if (err) {
- dev_err(adev->dev,
- "psp v10.0: Failed to load firmware \"%s\"\n",
- fw_name);
- }
-
- return err;
+ return err;
+
+ return psp_init_ta_microcode(psp, ucode_prefix);
}
static int psp_v10_0_ring_create(struct psp_context *psp,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index bd3e3e23a939..8f84fe40abbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -88,159 +88,56 @@ MODULE_FIRMWARE("amdgpu/beige_goby_ta.bin");
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
- char fw_name[PSP_FW_NAME_LEN];
+ char ucode_prefix[30];
int err = 0;
- const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(11, 0, 2):
- chip_name = "vega20";
- break;
- case IP_VERSION(11, 0, 0):
- chip_name = "navi10";
- break;
- case IP_VERSION(11, 0, 5):
- chip_name = "navi14";
- break;
- case IP_VERSION(11, 0, 9):
- chip_name = "navi12";
- break;
- case IP_VERSION(11, 0, 4):
- chip_name = "arcturus";
- break;
- case IP_VERSION(11, 0, 7):
- chip_name = "sienna_cichlid";
- break;
- case IP_VERSION(11, 0, 11):
- chip_name = "navy_flounder";
- break;
- case IP_VERSION(11, 5, 0):
- chip_name = "vangogh";
- break;
- case IP_VERSION(11, 0, 12):
- chip_name = "dimgrey_cavefish";
- break;
- case IP_VERSION(11, 0, 13):
- chip_name = "beige_goby";
- break;
- default:
- BUG();
- }
-
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(11, 0, 2):
case IP_VERSION(11, 0, 4):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out2;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.xgmi_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->xgmi.fw_version);
- adev->psp.xgmi_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->xgmi.size_bytes);
- adev->psp.xgmi_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.ras_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->ras.fw_version);
- adev->psp.ras_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->ras.size_bytes);
- adev->psp.ras_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->ras.offset_bytes);
- }
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
break;
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out2;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.hdcp_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(
- ta_hdr->header.ucode_array_offset_bytes);
-
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-
- adev->psp.dtm_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context
- .bin_desc.start_addr +
- le32_to_cpu(ta_hdr->dtm.offset_bytes);
- }
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
break;
case IP_VERSION(11, 0, 7):
case IP_VERSION(11, 0, 11):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
- err = psp_init_sos_microcode(psp, chip_name);
- if (err)
- return err;
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
break;
case IP_VERSION(11, 5, 0):
- err = psp_init_asd_microcode(psp, chip_name);
- if (err)
- return err;
- err = psp_init_toc_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
+ err = psp_init_toc_microcode(psp, ucode_prefix);
break;
default:
BUG();
}
- return 0;
-
-out2:
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 8ed2281b6557..fcd708eae75c 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -48,83 +48,25 @@ MODULE_FIRMWARE("amdgpu/green_sardine_ta.bin");
static int psp_v12_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
- char fw_name[30];
+ char ucode_prefix[30];
int err = 0;
- const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_RENOIR:
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- chip_name = "renoir";
- else
- chip_name = "green_sardine";
- break;
- default:
- BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
- err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
- if (err) {
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- dev_info(adev->dev,
- "psp v12.0: Failed to load firmware \"%s\"\n",
- fw_name);
- } else {
- err = amdgpu_ucode_validate(adev->psp.ta_fw);
- if (err)
- goto out;
-
- ta_hdr = (const struct ta_firmware_header_v1_0 *)
- adev->psp.ta_fw->data;
- adev->psp.hdcp_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp_context.context.bin_desc.start_addr =
- (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
-
- adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-
- adev->psp.dtm_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->dtm.offset_bytes);
-
- if (adev->apu_flags & AMD_APU_IS_RENOIR) {
- adev->psp.securedisplay_context.context.bin_desc.fw_version =
- le32_to_cpu(ta_hdr->securedisplay.fw_version);
- adev->psp.securedisplay_context.context.bin_desc.size_bytes =
- le32_to_cpu(ta_hdr->securedisplay.size_bytes);
- adev->psp.securedisplay_context.context.bin_desc.start_addr =
- (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
- le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
- }
- }
-
- return 0;
+ err = psp_init_ta_microcode(psp, ucode_prefix);
+ if (err)
+ return err;
-out:
- release_firmware(adev->psp.ta_fw);
- adev->psp.ta_fw = NULL;
- if (err) {
- dev_err(adev->dev,
- "psp v12.0: Failed to load firmware \"%s\"\n",
- fw_name);
- }
+ /* only supported on renoir */
+ if (!(adev->apu_flags & AMD_APU_IS_RENOIR))
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
- return err;
+ return 0;
}
static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index e6a26a7e5e5e..d62fcc77af95 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -70,32 +70,19 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
static int psp_v13_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
char ucode_prefix[30];
int err = 0;
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(13, 0, 2):
- chip_name = "aldebaran";
- break;
- case IP_VERSION(13, 0, 1):
- case IP_VERSION(13, 0, 3):
- chip_name = "yellow_carp";
- break;
- default:
- amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- chip_name = ucode_prefix;
- break;
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 2):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
/* It's not necessary to load ras ta on Guest side */
if (!amdgpu_sriov_vf(adev)) {
- err = psp_init_ta_microcode(&adev->psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
}
@@ -105,21 +92,21 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
case IP_VERSION(13, 0, 5):
case IP_VERSION(13, 0, 8):
case IP_VERSION(13, 0, 11):
- err = psp_init_toc_microcode(psp, chip_name);
+ err = psp_init_toc_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
break;
case IP_VERSION(13, 0, 0):
case IP_VERSION(13, 0, 7):
case IP_VERSION(13, 0, 10):
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
/* It's not necessary to load ras ta on Guest side */
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
index 9d4e24e518e8..d5ba58eba3e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c
@@ -35,25 +35,17 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_4_ta.bin");
static int psp_v13_0_4_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
char ucode_prefix[30];
int err = 0;
- switch (adev->ip_versions[MP0_HWIP][0]) {
- case IP_VERSION(13, 0, 4):
- amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- chip_name = ucode_prefix;
- break;
- default:
- BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
switch (adev->ip_versions[MP0_HWIP][0]) {
case IP_VERSION(13, 0, 4):
- err = psp_init_toc_microcode(psp, chip_name);
+ err = psp_init_toc_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_ta_microcode(psp, chip_name);
+ err = psp_init_ta_microcode(psp, ucode_prefix);
if (err)
return err;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 157147c6c94e..f6b75e3e47ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -57,26 +57,18 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
static int psp_v3_1_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- const char *chip_name;
+ char ucode_prefix[30];
int err = 0;
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- chip_name = "vega10";
- break;
- case CHIP_VEGA12:
- chip_name = "vega12";
- break;
- default: BUG();
- }
+ amdgpu_ucode_ip_version_decode(adev, MP0_HWIP, ucode_prefix, sizeof(ucode_prefix));
- err = psp_init_sos_microcode(psp, chip_name);
+ err = psp_init_sos_microcode(psp, ucode_prefix);
if (err)
return err;
- err = psp_init_asd_microcode(psp, chip_name);
+ err = psp_init_asd_microcode(psp, ucode_prefix);
if (err)
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index c52d246a1d96..fd2a7b66ac56 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -113,10 +113,9 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev)
static void sdma_v2_4_free_microcode(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/**
@@ -151,10 +150,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
@@ -176,10 +172,8 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
pr_err("sdma_v2_4: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 486d9b5c1b9e..e572389089d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -250,10 +250,9 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
static void sdma_v3_0_free_microcode(struct amdgpu_device *adev)
{
int i;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
/**
@@ -309,10 +308,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->sdma.instance[i].fw);
+ err = amdgpu_ucode_request(adev, &adev->sdma.instance[i].fw, fw_name);
if (err)
goto out;
hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
@@ -332,10 +328,8 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
out:
if (err) {
pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name);
- for (i = 0; i < adev->sdma.num_instances; i++) {
- release_firmware(adev->sdma.instance[i].fw);
- adev->sdma.instance[i].fw = NULL;
- }
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ amdgpu_ucode_release(&adev->sdma.instance[i].fw);
}
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 4d780e4430e7..b5affba22156 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -575,60 +575,17 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
// vega10 real chip need to use PSP to load firmware
static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
{
- const char *chip_name;
- char fw_name[30];
int ret, i;
- DRM_DEBUG("\n");
-
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
- case IP_VERSION(4, 0, 0):
- chip_name = "vega10";
- break;
- case IP_VERSION(4, 0, 1):
- chip_name = "vega12";
- break;
- case IP_VERSION(4, 2, 0):
- chip_name = "vega20";
- break;
- case IP_VERSION(4, 1, 0):
- case IP_VERSION(4, 1, 1):
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- chip_name = "raven2";
- else if (adev->apu_flags & AMD_APU_IS_PICASSO)
- chip_name = "picasso";
- else
- chip_name = "raven";
- break;
- case IP_VERSION(4, 2, 2):
- chip_name = "arcturus";
- break;
- case IP_VERSION(4, 1, 2):
- if (adev->apu_flags & AMD_APU_IS_RENOIR)
- chip_name = "renoir";
- else
- chip_name = "green_sardine";
- break;
- case IP_VERSION(4, 4, 0):
- chip_name = "aldebaran";
- break;
- default:
- BUG();
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma%d.bin", chip_name, i);
if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
/* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
- ret = amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
+ ret = amdgpu_sdma_init_microcode(adev, 0, true);
break;
} else {
- ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ ret = amdgpu_sdma_init_microcode(adev, i, false);
if (ret)
return ret;
}
@@ -1894,6 +1851,11 @@ static int sdma_v4_0_sw_init(void *handle)
}
}
+ if (amdgpu_sdma_ras_sw_init(adev)) {
+ dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
+ return -EINVAL;
+ }
+
return r;
}
@@ -2731,22 +2693,6 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
break;
}
- if (adev->sdma.ras) {
- amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block);
-
- strcpy(adev->sdma.ras->ras_block.ras_comm.name, "sdma");
- adev->sdma.ras->ras_block.ras_comm.block = AMDGPU_RAS_BLOCK__SDMA;
- adev->sdma.ras->ras_block.ras_comm.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- adev->sdma.ras_if = &adev->sdma.ras->ras_block.ras_comm;
-
- /* If don't define special ras_late_init function, use default ras_late_init */
- if (!adev->sdma.ras->ras_block.ras_late_init)
- adev->sdma.ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init;
-
- /* If not defined special ras_cb function, use default ras_cb */
- if (!adev->sdma.ras->ras_block.ras_cb)
- adev->sdma.ras->ras_block.ras_cb = amdgpu_sdma_process_ras_data_cb;
- }
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index d4d9f196db83..1941b3b7c5d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -237,39 +237,13 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
// emulation only, won't work on real chip
// navi10 real chip need to use PSP to load firmware
static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
-{
- const char *chip_name;
- char fw_name[40];
- int ret, i;
+{ int ret, i;
if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
return 0;
- DRM_DEBUG("\n");
-
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
- case IP_VERSION(5, 0, 0):
- chip_name = "navi10";
- break;
- case IP_VERSION(5, 0, 2):
- chip_name = "navi14";
- break;
- case IP_VERSION(5, 0, 5):
- chip_name = "navi12";
- break;
- case IP_VERSION(5, 0, 1):
- chip_name = "cyan_skillfish2";
- break;
- default:
- BUG();
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (i == 0)
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name);
- else
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name);
- ret = amdgpu_sdma_init_microcode(adev, fw_name, i, false);
+ ret = amdgpu_sdma_init_microcode(adev, i, false);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 809eca54fc61..8e445eb9dd49 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -89,59 +89,6 @@ static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-/**
- * sdma_v5_2_init_microcode - load ucode images from disk
- *
- * @adev: amdgpu_device pointer
- *
- * Use the firmware interface to load the ucode images into
- * the driver (not loaded into hw).
- * Returns 0 on success, error on failure.
- */
-
-// emulation only, won't work on real chip
-// navi10 real chip need to use PSP to load firmware
-static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
-{
- const char *chip_name;
- char fw_name[40];
-
- DRM_DEBUG("\n");
-
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
- case IP_VERSION(5, 2, 0):
- chip_name = "sienna_cichlid_sdma";
- break;
- case IP_VERSION(5, 2, 2):
- chip_name = "navy_flounder_sdma";
- break;
- case IP_VERSION(5, 2, 1):
- chip_name = "vangogh_sdma";
- break;
- case IP_VERSION(5, 2, 4):
- chip_name = "dimgrey_cavefish_sdma";
- break;
- case IP_VERSION(5, 2, 5):
- chip_name = "beige_goby_sdma";
- break;
- case IP_VERSION(5, 2, 3):
- chip_name = "yellow_carp_sdma";
- break;
- case IP_VERSION(5, 2, 6):
- chip_name = "sdma_5_2_6";
- break;
- case IP_VERSION(5, 2, 7):
- chip_name = "sdma_5_2_7";
- break;
- default:
- BUG();
- }
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
-
- return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
-}
-
static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
@@ -809,12 +756,6 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
msleep(1000);
}
- /* TODO: check whether can submit a doorbell request to raise
- * a doorbell fence to exit gfxoff.
- */
- if (adev->in_s0ix)
- amdgpu_gfx_off_ctrl(adev, false);
-
sdma_v5_2_soft_reset(adev);
/* unhalt the MEs */
sdma_v5_2_enable(adev, true);
@@ -823,8 +764,6 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
/* start the gfx rings and rlc compute queues */
r = sdma_v5_2_gfx_resume(adev);
- if (adev->in_s0ix)
- amdgpu_gfx_off_ctrl(adev, true);
if (r)
return r;
r = sdma_v5_2_rlc_resume(adev);
@@ -1296,7 +1235,7 @@ static int sdma_v5_2_sw_init(void *handle)
return r;
}
- r = sdma_v5_2_init_microcode(adev);
+ r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
index 049c26a45d85..40e6b22daa22 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c
@@ -78,29 +78,6 @@ static u32 sdma_v6_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
return base + internal_offset;
}
-/**
- * sdma_v6_0_init_microcode - load ucode images from disk
- *
- * @adev: amdgpu_device pointer
- *
- * Use the firmware interface to load the ucode images into
- * the driver (not loaded into hw).
- * Returns 0 on success, error on failure.
- */
-static int sdma_v6_0_init_microcode(struct amdgpu_device *adev)
-{
- char fw_name[30];
- char ucode_prefix[30];
-
- DRM_DEBUG("\n");
-
- amdgpu_ucode_ip_version_decode(adev, SDMA0_HWIP, ucode_prefix, sizeof(ucode_prefix));
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
-
- return amdgpu_sdma_init_microcode(adev, fw_name, 0, true);
-}
-
static unsigned sdma_v6_0_ring_init_cond_exec(struct amdgpu_ring *ring)
{
unsigned ret;
@@ -296,8 +273,6 @@ static void sdma_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
* sdma_v6_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
*
* @ring: amdgpu ring pointer
- * @job: job to retrieve vmid from
- * @ib: IB object to schedule
*
* flush the IB by graphics cache rinse.
*/
@@ -349,7 +324,9 @@ static void sdma_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
* sdma_v6_0_ring_emit_fence - emit a fence on the DMA ring
*
* @ring: amdgpu ring pointer
- * @fence: amdgpu fence object
+ * @addr: address
+ * @seq: fence seq number
+ * @flags: fence flags
*
* Add a DMA fence packet to the ring to write
* the fence seq number and DMA trap packet to generate
@@ -1083,10 +1060,9 @@ static void sdma_v6_0_vm_copy_pte(struct amdgpu_ib *ib,
*
* @ib: indirect buffer to fill with commands
* @pe: addr of the page entry
- * @addr: dst addr to write into pe
+ * @value: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
- * @flags: access flags
*
* Update PTEs by writing them manually using sDMA.
*/
@@ -1190,7 +1166,6 @@ static void sdma_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
* sdma_v6_0_ring_emit_vm_flush - vm flush using sDMA
*
* @ring: amdgpu_ring pointer
- * @vm: amdgpu_vm pointer
*
* Update the page table base and flush the VM TLB
* using sDMA.
@@ -1234,6 +1209,24 @@ static void sdma_v6_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
}
+static struct amdgpu_sdma_ras sdma_v6_0_3_ras = {
+ .ras_block = {
+ .ras_late_init = amdgpu_ras_block_late_init,
+ },
+};
+
+static void sdma_v6_0_set_ras_funcs(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(6, 0, 3):
+ adev->sdma.ras = &sdma_v6_0_3_ras;
+ break;
+ default:
+ break;
+ }
+
+}
+
static int sdma_v6_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1243,6 +1236,7 @@ static int sdma_v6_0_early_init(void *handle)
sdma_v6_0_set_vm_pte_funcs(adev);
sdma_v6_0_set_irq_funcs(adev);
sdma_v6_0_set_mqd_funcs(adev);
+ sdma_v6_0_set_ras_funcs(adev);
return 0;
}
@@ -1260,7 +1254,7 @@ static int sdma_v6_0_sw_init(void *handle)
if (r)
return r;
- r = sdma_v6_0_init_microcode(adev);
+ r = amdgpu_sdma_init_microcode(adev, 0, true);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
return r;
@@ -1287,6 +1281,11 @@ static int sdma_v6_0_sw_init(void *handle)
return r;
}
+ if (amdgpu_sdma_ras_sw_init(adev)) {
+ dev_err(adev->dev, "Failed to initialize sdma ras block!\n");
+ return -EINVAL;
+ }
+
return r;
}
@@ -1426,10 +1425,12 @@ static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 reg_offset = sdma_v6_0_get_reg_offset(adev, type, regSDMA0_CNTL);
- sdma_cntl = RREG32(reg_offset);
- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(reg_offset, sdma_cntl);
+ if (!amdgpu_sriov_vf(adev)) {
+ sdma_cntl = RREG32(reg_offset);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32(reg_offset, sdma_cntl);
+ }
return 0;
}
@@ -1588,10 +1589,11 @@ static void sdma_v6_0_set_irq_funcs(struct amdgpu_device *adev)
/**
* sdma_v6_0_emit_copy_buffer - copy buffer using the sDMA engine
*
- * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to fill with commands
* @src_offset: src GPU address
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
+ * @tmz: if a secure copy should be used
*
* Copy GPU buffers using the DMA engine.
* Used by the amdgpu ttm implementation to move pages if
@@ -1617,7 +1619,7 @@ static void sdma_v6_0_emit_copy_buffer(struct amdgpu_ib *ib,
/**
* sdma_v6_0_emit_fill_buffer - fill buffer using the sDMA engine
*
- * @ring: amdgpu_ring structure holding ring information
+ * @ib: indirect buffer to fill
* @src_data: value to write to buffer
* @dst_offset: dst GPU address
* @byte_count: number of bytes to xfer
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
new file mode 100644
index 000000000000..ae29620b1ea4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "smu_v13_0_10.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_dpm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_psp.h"
+
+static bool smu_v13_0_10_is_mode2_default(struct amdgpu_reset_control *reset_ctl)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ if (adev->pm.fw_version >= 0x00502005 && !amdgpu_sriov_vf(adev))
+ return true;
+
+ return false;
+}
+
+static struct amdgpu_reset_handler *
+smu_v13_0_10_get_reset_handler(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (reset_context->method != AMD_RESET_METHOD_NONE) {
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_context->method)
+ return handler;
+ }
+ }
+
+ if (smu_v13_0_10_is_mode2_default(reset_ctl) &&
+ amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_MODE2) {
+ list_for_each_entry (handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == AMD_RESET_METHOD_MODE2)
+ return handler;
+ }
+ }
+
+ return NULL;
+}
+
+static int smu_v13_0_10_mode2_suspend_ip(struct amdgpu_device *adev)
+{
+ int r, i;
+
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_MES))
+ continue;
+
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+
+ if (r) {
+ dev_err(adev->dev,
+ "suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+ adev->ip_blocks[i].status.hw = false;
+ }
+
+ return r;
+}
+
+static int
+smu_v13_0_10_mode2_prepare_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r = 0;
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ if (!amdgpu_sriov_vf(adev))
+ r = smu_v13_0_10_mode2_suspend_ip(adev);
+
+ return r;
+}
+
+static int smu_v13_0_10_mode2_reset(struct amdgpu_device *adev)
+{
+ return amdgpu_dpm_mode2_reset(adev);
+}
+
+static void smu_v13_0_10_async_reset(struct work_struct *work)
+{
+ struct amdgpu_reset_handler *handler;
+ struct amdgpu_reset_control *reset_ctl =
+ container_of(work, struct amdgpu_reset_control, reset_work);
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ list_for_each_entry(handler, &reset_ctl->reset_handlers,
+ handler_list) {
+ if (handler->reset_method == reset_ctl->active_reset) {
+ dev_dbg(adev->dev, "Resetting device\n");
+ handler->do_reset(adev);
+ break;
+ }
+ }
+}
+static int
+smu_v13_0_10_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
+ int r;
+
+ r = smu_v13_0_10_mode2_reset(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "ASIC reset failed with error, %d ", r);
+ }
+ return r;
+}
+
+static int smu_v13_0_10_mode2_restore_ip(struct amdgpu_device *adev)
+{
+ int i, r;
+ struct psp_context *psp = &adev->psp;
+ struct amdgpu_firmware_info *ucode;
+ struct amdgpu_firmware_info *ucode_list[2];
+ int ucode_count = 0;
+
+ for (i = 0; i < adev->firmware.max_ucodes; i++) {
+ ucode = &adev->firmware.ucode[i];
+
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_IMU_I:
+ case AMDGPU_UCODE_ID_IMU_D:
+ ucode_list[ucode_count++] = ucode;
+ break;
+ default:
+ break;
+ }
+ }
+
+ r = psp_load_fw_list(psp, ucode_list, ucode_count);
+ if (r) {
+ dev_err(adev->dev, "IMU ucode load failed after mode2 reset\n");
+ return r;
+ }
+
+ r = psp_rlc_autoload_start(psp);
+ if (r) {
+ DRM_ERROR("Failed to start rlc autoload after mode2 reset\n");
+ return r;
+ }
+
+ amdgpu_dpm_enable_gfx_features(adev);
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_MES ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+ r = adev->ip_blocks[i].version->funcs->resume(adev);
+ if (r) {
+ dev_err(adev->dev,
+ "resume of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ return r;
+ }
+
+ adev->ip_blocks[i].status.hw = true;
+ }
+
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!(adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_GFX ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_MES ||
+ adev->ip_blocks[i].version->type ==
+ AMD_IP_BLOCK_TYPE_SDMA))
+ continue;
+
+ if (adev->ip_blocks[i].version->funcs->late_init) {
+ r = adev->ip_blocks[i].version->funcs->late_init(
+ (void *)adev);
+ if (r) {
+ dev_err(adev->dev,
+ "late_init of IP block <%s> failed %d after reset\n",
+ adev->ip_blocks[i].version->funcs->name,
+ r);
+ return r;
+ }
+ }
+ adev->ip_blocks[i].status.late_initialized = true;
+ }
+
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
+
+ return r;
+}
+
+static int
+smu_v13_0_10_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
+ struct amdgpu_reset_context *reset_context)
+{
+ int r;
+ struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle;
+
+ dev_info(tmp_adev->dev,
+ "GPU reset succeeded, trying to resume\n");
+ r = smu_v13_0_10_mode2_restore_ip(tmp_adev);
+ if (r)
+ goto end;
+
+ amdgpu_register_gpu_instance(tmp_adev);
+
+ /* Resume RAS */
+ amdgpu_ras_resume(tmp_adev);
+
+ amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
+
+ r = amdgpu_ib_ring_tests(tmp_adev);
+ if (r) {
+ dev_err(tmp_adev->dev,
+ "ib ring test failed (%d).\n", r);
+ r = -EAGAIN;
+ goto end;
+ }
+
+end:
+ if (r)
+ return -EAGAIN;
+ else
+ return r;
+}
+
+static struct amdgpu_reset_handler smu_v13_0_10_mode2_handler = {
+ .reset_method = AMD_RESET_METHOD_MODE2,
+ .prepare_env = NULL,
+ .prepare_hwcontext = smu_v13_0_10_mode2_prepare_hwcontext,
+ .perform_reset = smu_v13_0_10_mode2_perform_reset,
+ .restore_hwcontext = smu_v13_0_10_mode2_restore_hwcontext,
+ .restore_env = NULL,
+ .do_reset = smu_v13_0_10_mode2_reset,
+};
+
+int smu_v13_0_10_reset_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_reset_control *reset_ctl;
+
+ reset_ctl = kzalloc(sizeof(*reset_ctl), GFP_KERNEL);
+ if (!reset_ctl)
+ return -ENOMEM;
+
+ reset_ctl->handle = adev;
+ reset_ctl->async_reset = smu_v13_0_10_async_reset;
+ reset_ctl->active_reset = AMD_RESET_METHOD_NONE;
+ reset_ctl->get_reset_handler = smu_v13_0_10_get_reset_handler;
+
+ INIT_LIST_HEAD(&reset_ctl->reset_handlers);
+ INIT_WORK(&reset_ctl->reset_work, reset_ctl->async_reset);
+ /* Only mode2 is handled through reset control now */
+ amdgpu_reset_add_handler(reset_ctl, &smu_v13_0_10_mode2_handler);
+
+ adev->reset_cntl = reset_ctl;
+
+ return 0;
+}
+
+int smu_v13_0_10_reset_fini(struct amdgpu_device *adev)
+{
+ kfree(adev->reset_cntl);
+ adev->reset_cntl = NULL;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h
new file mode 100644
index 000000000000..e0cb72a0eec6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v13_0_10.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SMU_V13_0_10_H__
+#define __SMU_V13_0_10_H__
+
+#include "amdgpu.h"
+
+int smu_v13_0_10_reset_init(struct amdgpu_device *adev);
+int smu_v13_0_10_reset_fini(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 5562670b7b52..620f7409825d 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -48,19 +48,32 @@
static const struct amd_ip_funcs soc21_common_ip_funcs;
/* SOC21 */
-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array[] =
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode =
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_encode_array_vcn1[] =
{
- .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array),
- .codec_array = vcn_4_0_0_video_codecs_encode_array,
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)},
};
-static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[] =
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn0 =
+{
+ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn0),
+ .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn0,
+};
+
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_encode_vcn1 =
+{
+ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_encode_array_vcn1),
+ .codec_array = vcn_4_0_0_video_codecs_encode_array_vcn1,
+};
+
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn0[] =
{
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
@@ -69,23 +82,46 @@ static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array[
{codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)},
};
-static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode =
+static const struct amdgpu_video_codec_info vcn_4_0_0_video_codecs_decode_array_vcn1[] =
{
- .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array),
- .codec_array = vcn_4_0_0_video_codecs_decode_array,
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)},
+ {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)},
+};
+
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn0 =
+{
+ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn0),
+ .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn0,
+};
+
+static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 =
+{
+ .codec_count = ARRAY_SIZE(vcn_4_0_0_video_codecs_decode_array_vcn1),
+ .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1,
};
static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
- switch (adev->ip_versions[UVD_HWIP][0]) {
+ if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config))
+ return -EINVAL;
+ switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(4, 0, 0):
case IP_VERSION(4, 0, 2):
- if (encode)
- *codecs = &vcn_4_0_0_video_codecs_encode;
- else
- *codecs = &vcn_4_0_0_video_codecs_decode;
+ if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) {
+ if (encode)
+ *codecs = &vcn_4_0_0_video_codecs_encode_vcn1;
+ else
+ *codecs = &vcn_4_0_0_video_codecs_decode_vcn1;
+ } else {
+ if (encode)
+ *codecs = &vcn_4_0_0_video_codecs_encode_vcn0;
+ else
+ *codecs = &vcn_4_0_0_video_codecs_decode_vcn0;
+ }
return 0;
default:
return -EINVAL;
@@ -640,7 +676,10 @@ static int soc21_common_early_init(void *handle)
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
AMD_CG_SUPPORT_REPEATER_FGCG |
- AMD_CG_SUPPORT_GFX_MGCG;
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_HDP_SD |
+ AMD_CG_SUPPORT_ATHUB_MGCG |
+ AMD_CG_SUPPORT_ATHUB_LS;
adev->pg_flags = AMD_PG_SUPPORT_VCN |
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
index cf8ff064dc72..00d8bdb8254f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -55,10 +55,10 @@ enum ta_securedisplay_status {
TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/
};
-/** @enum ta_securedisplay_max_phy
+/** @enum ta_securedisplay_phy_ID
* Physical ID number to use for reading corresponding DIO Scratch register for ROI
*/
-enum ta_securedisplay_max_phy {
+enum ta_securedisplay_phy_ID {
TA_SECUREDISPLAY_PHY0 = 0,
TA_SECUREDISPLAY_PHY1 = 1,
TA_SECUREDISPLAY_PHY2 = 2,
@@ -139,16 +139,16 @@ union ta_securedisplay_cmd_output {
uint32_t reserved[4];
};
-/** @struct securedisplay_cmd
- * Secure Display Command which is shared buffer memory
- */
-struct securedisplay_cmd {
- uint32_t cmd_id; /* +0 Bytes Command ID */
- enum ta_securedisplay_status status; /* +4 Bytes Status of Secure Display TA */
- uint32_t reserved[2]; /* +8 Bytes Reserved */
- union ta_securedisplay_cmd_input securedisplay_in_message; /* +16 Bytes Input Buffer */
- union ta_securedisplay_cmd_output securedisplay_out_message;/* +32 Bytes Output Buffer */
- /**@note Total 48 Bytes */
+/** @struct ta_securedisplay_cmd
+* Secure display command which is shared buffer memory
+*/
+struct ta_securedisplay_cmd {
+ uint32_t cmd_id; /**< +0 Bytes Command ID */
+ enum ta_securedisplay_status status; /**< +4 Bytes Status code returned by the secure display TA */
+ uint32_t reserved[2]; /**< +8 Bytes Reserved */
+ union ta_securedisplay_cmd_input securedisplay_in_message; /**< +16 Bytes Command input buffer */
+ union ta_securedisplay_cmd_output securedisplay_out_message; /**< +32 Bytes Command output buffer */
+ /**@note Total 48 Bytes */
};
#endif //_TA_SECUREDISPLAY_IF_H
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index 72fd963f178b..e08e25a3a1a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -57,13 +57,6 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst;
}
-static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev,
- uint32_t umc_inst,
- uint32_t ch_inst)
-{
- return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst];
-}
-
static void umc_v6_7_query_error_status_helper(struct amdgpu_device *adev,
uint64_t mc_umc_status, uint32_t umc_reg_offset)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
index b7da4528cf0a..da394bc06bba 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_10.c
@@ -340,29 +340,13 @@ static void umc_v8_10_err_cnt_init(struct amdgpu_device *adev)
}
}
-static uint32_t umc_v8_10_query_ras_poison_mode_per_channel(
- struct amdgpu_device *adev,
- uint32_t umc_reg_offset)
-{
- uint32_t ecc_ctrl_addr, ecc_ctrl;
-
- ecc_ctrl_addr =
- SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_GeccCtrl);
- ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
- umc_reg_offset) * 4);
-
- return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_GeccCtrl, UCFatalEn);
-}
-
static bool umc_v8_10_query_ras_poison_mode(struct amdgpu_device *adev)
{
- uint32_t umc_reg_offset = 0;
-
- /* Enabling fatal error in umc node0 instance0 channel0 will be
- * considered as fatal error mode
+ /*
+ * Force return true, because UMCCH0_0_GeccCtrl
+ * is not accessible from host side
*/
- umc_reg_offset = get_umc_v8_10_reg_offset(adev, 0, 0, 0);
- return !umc_v8_10_query_ras_poison_mode_per_channel(adev, umc_reg_offset);
+ return true;
}
const struct amdgpu_ras_block_hw_ops umc_v8_10_ras_hw_ops = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index f0fbcda76f5e..c305b2cb8490 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -57,11 +57,12 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work);
static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
/**
- * vcn_v1_0_early_init - set function pointers
+ * vcn_v1_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v1_0_early_init(void *handle)
{
@@ -75,7 +76,7 @@ static int vcn_v1_0_early_init(void *handle)
jpeg_v1_0_early_init(handle);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 08871bad9994..4b4cd88414e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -62,11 +62,12 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
/**
- * vcn_v2_0_early_init - set function pointers
+ * vcn_v2_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v2_0_early_init(void *handle)
{
@@ -81,7 +82,7 @@ static int vcn_v2_0_early_init(void *handle)
vcn_v2_0_set_enc_ring_funcs(adev);
vcn_v2_0_set_irq_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index ec87b00f2e05..b0b0e69c6a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -71,11 +71,12 @@ static int amdgpu_ih_clientid_vcns[] = {
};
/**
- * vcn_v2_5_early_init - set function pointers
+ * vcn_v2_5_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v2_5_early_init(void *handle)
{
@@ -107,7 +108,7 @@ static int vcn_v2_5_early_init(void *handle)
vcn_v2_5_set_irq_funcs(adev);
vcn_v2_5_set_ras_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 9c8b5fd99037..66439388faee 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -78,11 +78,12 @@ static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
/**
- * vcn_v3_0_early_init - set function pointers
+ * vcn_v3_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v3_0_early_init(void *handle)
{
@@ -109,7 +110,7 @@ static int vcn_v3_0_early_init(void *handle)
vcn_v3_0_set_enc_ring_funcs(adev);
vcn_v3_0_set_irq_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
@@ -1770,6 +1771,10 @@ static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p,
if (atomic_read(&job->base.entity->fence_seq))
return -EINVAL;
+ /* if VCN0 is harvested, we can't support AV1 */
+ if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+ return -EINVAL;
+
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC]
[AMDGPU_RING_PRIO_DEFAULT].sched;
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 1e2b22299975..22a41766a8c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -68,11 +68,12 @@ static void vcn_v4_0_unified_ring_set_wptr(struct amdgpu_ring *ring);
static void vcn_v4_0_set_ras_funcs(struct amdgpu_device *adev);
/**
- * vcn_v4_0_early_init - set function pointers
+ * vcn_v4_0_early_init - set function pointers and load microcode
*
* @handle: amdgpu_device pointer
*
* Set ring and irq function pointers
+ * Load microcode from filesystem
*/
static int vcn_v4_0_early_init(void *handle)
{
@@ -88,7 +89,7 @@ static int vcn_v4_0_early_init(void *handle)
vcn_v4_0_set_irq_funcs(adev);
vcn_v4_0_set_ras_funcs(adev);
- return 0;
+ return amdgpu_vcn_early_init(adev);
}
/**
@@ -1631,6 +1632,10 @@ static int vcn_v4_0_limit_sched(struct amdgpu_cs_parser *p,
if (atomic_read(&job->base.entity->fence_seq))
return -EINVAL;
+ /* if VCN0 is harvested, we can't support AV1 */
+ if (p->adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)
+ return -EINVAL;
+
scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_ENC]
[AMDGPU_RING_PRIO_0].sched;
drm_sched_entity_modify_sched(job->base.entity, scheds, 1);
@@ -1705,7 +1710,7 @@ static int vcn_v4_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job,
create = ptr + addr + offset - start;
- /* H246, HEVC and VP9 can run on any instance */
+ /* H264, HEVC and VP9 can run on any instance */
if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11)
continue;
@@ -1719,7 +1724,29 @@ out:
return r;
}
-#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
+#define RADEON_VCN_ENGINE_TYPE_ENCODE (0x00000002)
+#define RADEON_VCN_ENGINE_TYPE_DECODE (0x00000003)
+
+#define RADEON_VCN_ENGINE_INFO (0x30000001)
+#define RADEON_VCN_ENGINE_INFO_MAX_OFFSET 16
+
+#define RENCODE_ENCODE_STANDARD_AV1 2
+#define RENCODE_IB_PARAM_SESSION_INIT 0x00000003
+#define RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET 64
+
+/* return the offset in ib if id is found, -1 otherwise
+ * to speed up the searching we only search upto max_offset
+ */
+static int vcn_v4_0_enc_find_ib_param(struct amdgpu_ib *ib, uint32_t id, int max_offset)
+{
+ int i;
+
+ for (i = 0; i < ib->length_dw && i < max_offset && ib->ptr[i] >= 8; i += ib->ptr[i]/4) {
+ if (ib->ptr[i + 1] == id)
+ return i;
+ }
+ return -1;
+}
static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_job *job,
@@ -1729,27 +1756,35 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
struct amdgpu_vcn_decode_buffer *decode_buffer;
uint64_t addr;
uint32_t val;
+ int idx;
/* The first instance can decode anything */
if (!ring->me)
return 0;
- /* unified queue ib header has 8 double words. */
- if (ib->length_dw < 8)
+ /* RADEON_VCN_ENGINE_INFO is at the top of ib block */
+ idx = vcn_v4_0_enc_find_ib_param(ib, RADEON_VCN_ENGINE_INFO,
+ RADEON_VCN_ENGINE_INFO_MAX_OFFSET);
+ if (idx < 0) /* engine info is missing */
return 0;
- val = amdgpu_ib_get_value(ib, 6); //RADEON_VCN_ENGINE_TYPE
- if (val != RADEON_VCN_ENGINE_TYPE_DECODE)
- return 0;
-
- decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[10];
-
- if (!(decode_buffer->valid_buf_flag & 0x1))
- return 0;
-
- addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
- decode_buffer->msg_buffer_address_lo;
- return vcn_v4_0_dec_msg(p, job, addr);
+ val = amdgpu_ib_get_value(ib, idx + 2); /* RADEON_VCN_ENGINE_TYPE */
+ if (val == RADEON_VCN_ENGINE_TYPE_DECODE) {
+ decode_buffer = (struct amdgpu_vcn_decode_buffer *)&ib->ptr[idx + 6];
+
+ if (!(decode_buffer->valid_buf_flag & 0x1))
+ return 0;
+
+ addr = ((u64)decode_buffer->msg_buffer_address_hi) << 32 |
+ decode_buffer->msg_buffer_address_lo;
+ return vcn_v4_0_dec_msg(p, job, addr);
+ } else if (val == RADEON_VCN_ENGINE_TYPE_ENCODE) {
+ idx = vcn_v4_0_enc_find_ib_param(ib, RENCODE_IB_PARAM_SESSION_INIT,
+ RENCODE_IB_PARAM_SESSION_INIT_MAX_OFFSET);
+ if (idx >= 0 && ib->ptr[idx + 2] == RENCODE_ENCODE_STANDARD_AV1)
+ return vcn_v4_0_limit_sched(p, job);
+ }
+ return 0;
}
static const struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 6d291aa6386b..a0e30f21e12e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -1065,6 +1065,20 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
mutex_unlock(&p->svms.lock);
return -EADDRINUSE;
}
+
+ /* When register user buffer check if it has been registered by svm by
+ * buffer cpu virtual address.
+ */
+ if ((flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) &&
+ interval_tree_iter_first(&p->svms.objects,
+ args->mmap_offset >> PAGE_SHIFT,
+ (args->mmap_offset + args->size - 1) >> PAGE_SHIFT)) {
+ pr_err("User Buffer Address: 0x%llx already allocated by SVM\n",
+ args->mmap_offset);
+ mutex_unlock(&p->svms.lock);
+ return -EADDRINUSE;
+ }
+
mutex_unlock(&p->svms.lock);
#endif
mutex_lock(&p->mutex);
@@ -1127,8 +1141,13 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
}
/* Update the VRAM usage count */
- if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
- WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ uint64_t size = args->size;
+
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
+ size >>= 1;
+ WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
+ }
mutex_unlock(&p->mutex);
@@ -2879,8 +2898,8 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
address = dev->adev->rmmio_remap.bus_addr;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
- VM_DONTDUMP | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+ VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index b8936340742b..3de7f616a001 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -262,23 +262,12 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_FIJI:
- gfx_target_version = 80003;
- f2g = &gfx_v8_kfd2kgd;
- break;
case CHIP_POLARIS10:
gfx_target_version = 80003;
f2g = &gfx_v8_kfd2kgd;
break;
case CHIP_POLARIS11:
- gfx_target_version = 80003;
- if (!vf)
- f2g = &gfx_v8_kfd2kgd;
- break;
case CHIP_POLARIS12:
- gfx_target_version = 80003;
- if (!vf)
- f2g = &gfx_v8_kfd2kgd;
- break;
case CHIP_VEGAM:
gfx_target_version = 80003;
if (!vf)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index d119070956fb..8b2dd2670ab7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -59,30 +59,27 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
/* check if sh_mem_config register already configured */
if (qpd->sh_mem_config == 0) {
- qpd->sh_mem_config =
- SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
+ qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) {
- /* Aldebaran can safely support different XNACK modes
- * per process
- */
- if (!pdd->process->xnack_enabled)
- qpd->sh_mem_config |=
- 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
- } else if (dqm->dev->noretry &&
- !dqm->dev->use_iommu_v2) {
- qpd->sh_mem_config |=
- 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
- }
+ if (dqm->dev->noretry && !dqm->dev->use_iommu_v2)
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
qpd->sh_mem_ape1_limit = 0;
qpd->sh_mem_ape1_base = 0;
}
+ if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) {
+ if (!pdd->process->xnack_enabled)
+ qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
+ else
+ qpd->sh_mem_config &= ~(1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT);
+ }
+
qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd);
- pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases);
+ pr_debug("sh_mem_bases 0x%X sh_mem_config 0x%X\n", qpd->sh_mem_bases,
+ qpd->sh_mem_config);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index cd4e61bf0493..cbef2e147da5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -159,8 +159,8 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
address = kfd_get_process_doorbells(pdd);
if (!address)
return -ENOMEM;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
- VM_DONTDUMP | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
+ VM_DONTDUMP | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 729d26d648af..dd0436bf349a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1052,8 +1052,8 @@ int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
pfn = __pa(page->kernel_address);
pfn >>= PAGE_SHIFT;
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
- | VM_DONTDUMP | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
+ | VM_DONTDUMP | VM_PFNMAP);
pr_debug("Mapping signal page\n");
pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 10048ce16aea..de8ce72344fc 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -1027,8 +1027,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
/* Disable SVM support capability */
pgmap->type = 0;
if (pgmap->type == MEMORY_DEVICE_PRIVATE)
- devm_release_mem_region(adev->dev, res->start,
- res->end - res->start + 1);
+ devm_release_mem_region(adev->dev, res->start, resource_size(res));
return PTR_ERR(r);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 552c3ac85a13..bfa30d12406b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -206,6 +206,8 @@ enum cache_policy {
#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0])
#define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1)))
+#define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\
+ (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
struct kfd_event_interrupt_class {
bool (*interrupt_isr)(struct kfd_dev *dev,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 51b1683ac5c1..7acd55a814b2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -1330,7 +1330,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
* per-process XNACK mode selection. But let the dev->noretry
* setting still influence the default XNACK mode.
*/
- if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2))
+ if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
continue;
/* GFXv10 and later GPUs do not support shader preemption
@@ -1563,6 +1563,8 @@ err_free_pdd:
int kfd_process_device_init_vm(struct kfd_process_device *pdd,
struct file *drm_file)
{
+ struct amdgpu_fpriv *drv_priv;
+ struct amdgpu_vm *avm;
struct kfd_process *p;
struct kfd_dev *dev;
int ret;
@@ -1573,10 +1575,15 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
if (pdd->drm_priv)
return -EBUSY;
+ ret = amdgpu_file_to_fpriv(drm_file, &drv_priv);
+ if (ret)
+ return ret;
+ avm = &drv_priv->vm;
+
p = pdd->process;
dev = pdd->dev;
- ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, drm_file,
+ ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(dev->adev, avm,
&p->kgd_process_info,
&p->ef);
if (ret) {
@@ -1593,7 +1600,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
if (ret)
goto err_init_cwsr;
- ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, drm_file, p->pasid);
+ ret = amdgpu_amdkfd_gpuvm_set_vm_pasid(dev->adev, avm, p->pasid);
if (ret)
goto err_set_pasid;
@@ -1607,6 +1614,7 @@ err_init_cwsr:
kfd_process_device_destroy_ib_mem(pdd);
err_reserve_ib_mem:
pdd->drm_priv = NULL;
+ amdgpu_amdkfd_gpuvm_destroy_cb(dev->adev, avm);
return ret;
}
@@ -1978,8 +1986,8 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
return -ENOMEM;
}
- vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
- | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_DONTCOPY | VM_DONTEXPAND
+ | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
/* Mapping pages to user process */
return remap_pfn_range(vma, vma->vm_start,
PFN_DOWN(__pa(qpd->cwsr_kaddr)),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index b94d2c1422ad..dc6fd6967050 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/sched/task.h>
+#include <drm/ttm/ttm_tt.h>
#include "amdgpu_sync.h"
#include "amdgpu_object.h"
#include "amdgpu_vm.h"
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index af37bc6ed1f5..c420bce47acb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -28,7 +28,6 @@
#include "dm_services_types.h"
#include "dc.h"
-#include "dc_link_dp.h"
#include "link_enc_cfg.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
@@ -39,6 +38,9 @@
#include "dc/dc_edid_parser.h"
#include "dc/dc_stat.h"
#include "amdgpu_dm_trace.h"
+#include "dpcd_defs.h"
+#include "link/protocols/link_dpcd.h"
+#include "link_service_types.h"
#include "vid.h"
#include "amdgpu.h"
@@ -66,7 +68,7 @@
#include "ivsrcid/ivsrcid_vislands30.h"
-#include "i2caux_interface.h"
+#include <linux/backlight.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
@@ -104,7 +106,6 @@
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
-#include "modules/inc/mod_info_packet.h"
#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
@@ -210,7 +211,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *amdgpu_dm_connector,
- uint32_t link_index,
+ u32 link_index,
struct amdgpu_encoder *amdgpu_encoder);
static int amdgpu_dm_encoder_init(struct drm_device *dev,
struct amdgpu_encoder *aencoder,
@@ -262,7 +263,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
- uint32_t v_blank_start, v_blank_end, h_position, v_position;
+ u32 v_blank_start, v_blank_end, h_position, v_position;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
@@ -361,7 +362,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
struct drm_pending_vblank_event *e;
- uint32_t vpos, hpos, v_blank_start, v_blank_end;
+ u32 vpos, hpos, v_blank_start, v_blank_end;
bool vrr_active;
amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@@ -648,7 +649,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct dc_link *link;
- uint8_t link_index = 0;
+ u8 link_index = 0;
struct drm_device *dev;
if (adev == NULL)
@@ -749,7 +750,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_display_manager *dm = &adev->dm;
struct dmcub_trace_buf_entry entry = { 0 };
- uint32_t count = 0;
+ u32 count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
struct dc_link *plink = NULL;
@@ -1015,7 +1016,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
struct dmub_srv_hw_params hw_params;
enum dmub_status status;
const unsigned char *fw_inst_const, *fw_bss_data;
- uint32_t i, fw_inst_const_size, fw_bss_data_size;
+ u32 i, fw_inst_const_size, fw_bss_data_size;
bool has_hw_support;
if (!dmub_srv)
@@ -1176,32 +1177,46 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
- uint64_t pt_base;
- uint32_t logical_addr_low;
- uint32_t logical_addr_high;
- uint32_t agp_base, agp_bot, agp_top;
+ u64 pt_base;
+ u32 logical_addr_low;
+ u32 logical_addr_high;
+ u32 agp_base, agp_bot, agp_top;
PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
memset(pa_config, 0, sizeof(*pa_config));
- logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
- pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
-
- if (adev->apu_flags & AMD_APU_IS_RAVEN2)
- /*
- * Raven2 has a HW issue that it is unable to use the vram which
- * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
- * workaround that increase system aperture high address (add 1)
- * to get rid of the VM fault and hardware hang.
- */
- logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
- else
- logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
-
agp_base = 0;
agp_bot = adev->gmc.agp_start >> 24;
agp_top = adev->gmc.agp_end >> 24;
+ /* AGP aperture is disabled */
+ if (agp_bot == agp_top) {
+ logical_addr_low = adev->gmc.fb_start >> 18;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
+ else
+ logical_addr_high = adev->gmc.fb_end >> 18;
+ } else {
+ logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+ else
+ logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
+ }
+
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
@@ -1225,10 +1240,25 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
- pa_config->is_hvm_enabled = 0;
+ pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
}
+static void force_connector_state(
+ struct amdgpu_dm_connector *aconnector,
+ enum drm_connector_force force_state)
+{
+ struct drm_connector *connector = &aconnector->base;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ aconnector->base.force = force_state;
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
+ mutex_lock(&aconnector->hpd_lock);
+ drm_kms_helper_connector_hotplug_event(connector);
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
{
struct hpd_rx_irq_offload_work *offload_work;
@@ -1237,6 +1267,9 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
struct amdgpu_device *adev;
enum dc_connection_type new_connection_type = dc_connection_none;
unsigned long flags;
+ union test_response test_response;
+
+ memset(&test_response, 0, sizeof(test_response));
offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
aconnector = offload_work->offload_wq->aconnector;
@@ -1250,7 +1283,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
dc_link = aconnector->dc_link;
mutex_lock(&aconnector->hpd_lock);
- if (!dc_link_detect_sink(dc_link, &new_connection_type))
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
mutex_unlock(&aconnector->hpd_lock);
@@ -1261,15 +1294,49 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
goto skip;
mutex_lock(&adev->dm.dc_lock);
- if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
+ if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
dc_link_dp_handle_automated_test(dc_link);
+
+ if (aconnector->timing_changed) {
+ /* force connector disconnect and reconnect */
+ force_connector_state(aconnector, DRM_FORCE_OFF);
+ msleep(100);
+ force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
+ }
+
+ test_response.bits.ACK = 1;
+
+ core_link_write_dpcd(
+ dc_link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+ }
else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
- hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
+ dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
dc_link_dp_allow_hpd_rx_irq(dc_link)) {
- dc_link_dp_handle_link_loss(dc_link);
+ /* offload_work->data is from handle_hpd_rx_irq->
+ * schedule_hpd_rx_offload_work.this is defer handle
+ * for hpd short pulse. upon here, link status may be
+ * changed, need get latest link status from dpcd
+ * registers. if link status is good, skip run link
+ * training again.
+ */
+ union hpd_irq_data irq_data;
+
+ memset(&irq_data, 0, sizeof(irq_data));
+
+ /* before dc_link_dp_handle_link_loss, allow new link lost handle
+ * request be added to work queue if link lost at end of dc_link_
+ * dp_handle_link_loss
+ */
spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
offload_work->offload_wq->is_handling_link_loss = false;
spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+
+ if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
+ dc_link_check_link_loss_status(dc_link, &irq_data))
+ dc_link_dp_handle_link_loss(dc_link);
}
mutex_unlock(&adev->dm.dc_lock);
@@ -1503,6 +1570,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 1):
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
init_data.flags.gpu_vm_support = true;
break;
@@ -1511,6 +1580,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
+ if (init_data.flags.gpu_vm_support &&
+ (amdgpu_sg_display == 0))
+ init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;
@@ -1532,6 +1604,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
+ /* Disable SubVP + DRR config by default */
+ init_data.flags.disable_subvp_drr = true;
+ if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR)
+ init_data.flags.disable_subvp_drr = false;
+
init_data.flags.seamless_boot_edp_requested = false;
if (check_seamless_boot_capability(adev)) {
@@ -1587,6 +1664,26 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
adev->dm.dc->debug.ignore_cable_id = true;
+ /* TODO: There is a new drm mst change where the freedom of
+ * vc_next_start_slot update is revoked/moved into drm, instead of in
+ * driver. This forces us to make sure to get vc_next_start_slot updated
+ * in drm function each time without considering if mst_state is active
+ * or not. Otherwise, next time hotplug will give wrong start_slot
+ * number. We are implementing a temporary solution to even notify drm
+ * mst deallocation when link is no longer of MST type when uncommitting
+ * the stream so we will have more time to work on a proper solution.
+ * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we
+ * should notify drm to do a complete "reset" of its states and stop
+ * calling further drm mst functions when link is no longer of an MST
+ * type. This could happen when we unplug an MST hubs/displays. When
+ * uncommit stream comes later after unplug, we should just reset
+ * hardware states only.
+ */
+ adev->dm.dc->debug.temp_mst_deallocation_sequence = true;
+
+ if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
+ DRM_INFO("DP-HDMI FRL PCON supported\n");
+
r = dm_dmub_hw_init(adev);
if (r) {
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -1640,7 +1737,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
#endif
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
+ adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctxs) {
+ DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n");
+ }
#endif
if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
init_completion(&adev->dm.dmub_aux_transfer_done);
@@ -1731,10 +1831,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
amdgpu_dm_destroy_drm_device(&adev->dm);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- if (adev->dm.crc_rd_wrk) {
- flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
- kfree(adev->dm.crc_rd_wrk);
- adev->dm.crc_rd_wrk = NULL;
+ if (adev->dm.secure_display_ctxs) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->dm.secure_display_ctxs[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ }
+ }
+ kfree(adev->dm.secure_display_ctxs);
+ adev->dm.secure_display_ctxs = NULL;
}
#endif
#ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -1869,25 +1974,17 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
}
- r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
- if (r == -ENOENT) {
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+ if (r == -ENODEV) {
/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
adev->dm.fw_dmcu = NULL;
return 0;
}
if (r) {
- dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
- fw_name_dmcu);
- return r;
- }
-
- r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
- if (r) {
dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
fw_name_dmcu);
- release_firmware(adev->dm.fw_dmcu);
- adev->dm.fw_dmcu = NULL;
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
return r;
}
@@ -1933,7 +2030,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
struct dmub_srv_fb_info *fb_info;
struct dmub_srv *dmub_srv;
const struct dmcub_firmware_header_v1_0 *hdr;
- const char *fw_name_dmub;
enum dmub_asic dmub_asic;
enum dmub_status status;
int r;
@@ -1941,73 +2037,43 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(2, 1, 0):
dmub_asic = DMUB_ASIC_DCN21;
- fw_name_dmub = FIRMWARE_RENOIR_DMUB;
- if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
- fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
break;
case IP_VERSION(3, 0, 0):
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
- dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
- } else {
- dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
- }
+ dmub_asic = DMUB_ASIC_DCN30;
break;
case IP_VERSION(3, 0, 1):
dmub_asic = DMUB_ASIC_DCN301;
- fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
break;
case IP_VERSION(3, 0, 2):
dmub_asic = DMUB_ASIC_DCN302;
- fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
break;
case IP_VERSION(3, 0, 3):
dmub_asic = DMUB_ASIC_DCN303;
- fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
break;
case IP_VERSION(3, 1, 2):
case IP_VERSION(3, 1, 3):
dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
- fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
break;
case IP_VERSION(3, 1, 4):
dmub_asic = DMUB_ASIC_DCN314;
- fw_name_dmub = FIRMWARE_DCN_314_DMUB;
break;
case IP_VERSION(3, 1, 5):
dmub_asic = DMUB_ASIC_DCN315;
- fw_name_dmub = FIRMWARE_DCN_315_DMUB;
break;
case IP_VERSION(3, 1, 6):
dmub_asic = DMUB_ASIC_DCN316;
- fw_name_dmub = FIRMWARE_DCN316_DMUB;
break;
case IP_VERSION(3, 2, 0):
dmub_asic = DMUB_ASIC_DCN32;
- fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
break;
case IP_VERSION(3, 2, 1):
dmub_asic = DMUB_ASIC_DCN321;
- fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
break;
default:
/* ASIC doesn't support DMUB. */
return 0;
}
- r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
- if (r) {
- DRM_ERROR("DMUB firmware loading failed: %d\n", r);
- return 0;
- }
-
- r = amdgpu_ucode_validate(adev->dm.dmub_fw);
- if (r) {
- DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
- return 0;
- }
-
hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
@@ -2074,7 +2140,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
* TODO: Move this into GART.
*/
r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->dm.dmub_bo,
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
if (r)
@@ -2129,11 +2197,8 @@ static int dm_sw_fini(void *handle)
adev->dm.dmub_srv = NULL;
}
- release_firmware(adev->dm.dmub_fw);
- adev->dm.dmub_fw = NULL;
-
- release_firmware(adev->dm.fw_dmcu);
- adev->dm.fw_dmcu = NULL;
+ amdgpu_ucode_release(&adev->dm.dmub_fw);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
return 0;
}
@@ -2159,6 +2224,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
DRM_ERROR("DM_MST: Failed to start MST\n");
aconnector->dc_link->type =
dc_connection_single;
+ ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
break;
}
}
@@ -2227,7 +2294,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
- aconnector->mst_port)
+ aconnector->mst_root)
continue;
mgr = &aconnector->mst_mgr;
@@ -2480,7 +2547,7 @@ struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
- uint32_t i;
+ u32 i;
struct drm_connector_state *new_con_state;
struct drm_connector *connector;
struct drm_crtc *crtc_from_state;
@@ -2728,16 +2795,18 @@ static int dm_resume(void *handle)
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->dc_link)
+ continue;
+
/*
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
- if (aconnector->dc_link &&
- aconnector->dc_link->type == dc_connection_mst_branch)
+ if (aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);
- if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -3015,6 +3084,10 @@ void amdgpu_dm_update_connector_after_detect(
aconnector->edid);
}
+ aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
+ if (!aconnector->timing_requested)
+ dm_error("%s: failed to create aconnector->requested_timing\n", __func__);
+
drm_connector_update_edid_property(connector, aconnector->edid);
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
update_connector_ext_caps(aconnector);
@@ -3026,6 +3099,8 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+ kfree(aconnector->timing_requested);
+ aconnector->timing_requested = NULL;
#ifdef CONFIG_DRM_AMD_DC_HDCP
/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
@@ -3070,7 +3145,9 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
if (aconnector->fake_enable)
aconnector->fake_enable = false;
- if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+ aconnector->timing_changed = false;
+
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -3111,8 +3188,8 @@ static void handle_hpd_irq(void *param)
static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{
- uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
- uint8_t dret;
+ u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+ u8 dret;
bool new_irq_handled = false;
int dpcd_addr;
int dpcd_bytes_to_read;
@@ -3140,7 +3217,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
while (dret == dpcd_bytes_to_read &&
process_count < max_process_count) {
- uint8_t retry;
+ u8 retry;
dret = 0;
process_count++;
@@ -3159,7 +3236,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
dpcd_bytes_to_read - 1;
for (retry = 0; retry < 3; retry++) {
- uint8_t wret;
+ u8 wret;
wret = drm_dp_dpcd_write(
&aconnector->dm_dp_aux.aux,
@@ -3219,7 +3296,7 @@ static void handle_hpd_rx_irq(void *param)
union hpd_irq_data hpd_irq_data;
bool link_loss = false;
bool has_left_work = false;
- int idx = aconnector->base.index;
+ int idx = dc_link->link_index;
struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
@@ -3273,7 +3350,7 @@ static void handle_hpd_rx_irq(void *param)
out:
if (result && !is_mst_root_connector) {
/* Downstream Port status changed. */
- if (!dc_link_detect_sink(dc_link, &new_connection_type))
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -3361,7 +3438,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
(void *) aconnector);
if (adev->dm.hpd_rx_offload_wq)
- adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+ adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
aconnector;
}
}
@@ -4173,15 +4250,16 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector);
static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
{
struct amdgpu_display_manager *dm = &adev->dm;
- int32_t i;
+ s32 i;
struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
- uint32_t link_cnt;
- int32_t primary_planes;
+ u32 link_cnt;
+ s32 primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
bool psr_feature_enabled = false;
+ int max_overlay = dm->dc->caps.max_slave_planes;
dm->display_indexes_num = dm->dc->caps.max_streams;
/* Update the actual used number of crtc */
@@ -4236,14 +4314,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (!plane->pixel_format_support.argb8888)
continue;
+ if (max_overlay-- == 0)
+ break;
+
if (initialize_plane(dm, NULL, primary_planes + i,
DRM_PLANE_TYPE_OVERLAY, plane)) {
DRM_ERROR("KMS: Failed to initialize overlay plane\n");
goto fail;
}
-
- /* Only create one overlay plane. */
- break;
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
@@ -4322,7 +4400,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
link = dc_get_link_at_index(dm->dc, i);
- if (!dc_link_detect_sink(link, &new_connection_type))
+ if (!dc_link_detect_connection_type(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -4498,9 +4576,75 @@ DEVICE_ATTR_WO(s3_debug);
#endif
+static int dm_init_microcode(struct amdgpu_device *adev)
+{
+ char *fw_name_dmub;
+ int r;
+
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
+ break;
+ case IP_VERSION(3, 0, 0):
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ else
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ break;
+ case IP_VERSION(3, 0, 1):
+ fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 2):
+ fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 3):
+ fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
+ break;
+ case IP_VERSION(3, 1, 4):
+ fw_name_dmub = FIRMWARE_DCN_314_DMUB;
+ break;
+ case IP_VERSION(3, 1, 5):
+ fw_name_dmub = FIRMWARE_DCN_315_DMUB;
+ break;
+ case IP_VERSION(3, 1, 6):
+ fw_name_dmub = FIRMWARE_DCN316_DMUB;
+ break;
+ case IP_VERSION(3, 2, 0):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+ break;
+ case IP_VERSION(3, 2, 1):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+ if (r)
+ DRM_ERROR("DMUB firmware loading failed: %d\n", r);
+ return r;
+}
+
static int dm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 data_offset;
+
+ /* if there is no object header, skip DM */
+ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ dev_info(adev->dev, "No object header, skipping DM\n");
+ return -ENOENT;
+ }
switch (adev->asic_type) {
#if defined(CONFIG_DRM_AMD_DC_SI)
@@ -4630,7 +4774,7 @@ static int dm_early_init(void *handle)
#endif
adev->dc_enabled = true;
- return 0;
+ return dm_init_microcode(adev);
}
static bool modereset_required(struct drm_crtc_state *crtc_state)
@@ -4695,7 +4839,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
static int
fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_plane_state *plane_state,
- const uint64_t tiling_flags,
+ const u64 tiling_flags,
struct dc_plane_info *plane_info,
struct dc_plane_address *address,
bool tmz_surface,
@@ -4870,7 +5014,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
static inline void fill_dc_dirty_rect(struct drm_plane *plane,
struct rect *dirty_rect, int32_t x,
- int32_t y, int32_t width, int32_t height,
+ s32 y, s32 width, s32 height,
int *i, bool ffu)
{
if (*i > DC_MAX_DIRTY_RECTS)
@@ -4906,6 +5050,7 @@ out:
* @new_plane_state: New state of @plane
* @crtc_state: New state of CRTC connected to the @plane
* @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ * @dirty_regions_changed: dirty regions changed
*
* For PSR SU, DC informs the DMUB uController of dirty rectangle regions
* (referred to as "damage clips" in DRM nomenclature) that require updating on
@@ -4922,15 +5067,17 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
struct drm_plane_state *old_plane_state,
struct drm_plane_state *new_plane_state,
struct drm_crtc_state *crtc_state,
- struct dc_flip_addrs *flip_addrs)
+ struct dc_flip_addrs *flip_addrs,
+ bool *dirty_regions_changed)
{
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
struct rect *dirty_rects = flip_addrs->dirty_rects;
- uint32_t num_clips;
+ u32 num_clips;
struct drm_mode_rect *clips;
bool bb_changed;
bool fb_changed;
- uint32_t i = 0;
+ u32 i = 0;
+ *dirty_regions_changed = false;
/*
* Cursor plane has it's own dirty rect update interface. See
@@ -4975,6 +5122,8 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
new_plane_state->plane->base.id,
bb_changed, fb_changed, num_clips);
+ *dirty_regions_changed = bb_changed;
+
if (bb_changed) {
fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
new_plane_state->crtc_x,
@@ -5076,7 +5225,7 @@ static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
bool is_y420, int requested_bpc)
{
- uint8_t bpc;
+ u8 bpc;
if (is_y420) {
bpc = 8;
@@ -5620,8 +5769,8 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
uint32_t max_dsc_target_bpp_limit_override)
{
const struct dc_link_settings *verified_link_cap = NULL;
- uint32_t link_bw_in_kbps;
- uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
+ u32 link_bw_in_kbps;
+ u32 edp_min_bpp_x16, edp_max_bpp_x16;
struct dc *dc = sink->ctx->dc;
struct dc_dsc_bw_range bw_range = {0};
struct dc_dsc_config dsc_cfg = {0};
@@ -5678,11 +5827,11 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
struct dsc_dec_dpcd_caps *dsc_caps)
{
struct drm_connector *drm_connector = &aconnector->base;
- uint32_t link_bandwidth_kbps;
+ u32 link_bandwidth_kbps;
struct dc *dc = sink->ctx->dc;
- uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
- uint32_t dsc_max_supported_bw_in_kbps;
- uint32_t max_dsc_target_bpp_limit_override =
+ u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
+ u32 dsc_max_supported_bw_in_kbps;
+ u32 max_dsc_target_bpp_limit_override =
drm_connector->display_info.max_dsc_bpp;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
@@ -5861,6 +6010,14 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
stream, &mode, &aconnector->base, con_state, old_stream,
requested_bpc);
+ if (aconnector->timing_changed) {
+ DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n",
+ __func__,
+ stream->timing.display_color_depth,
+ aconnector->timing_requested->display_color_depth);
+ stream->timing = *aconnector->timing_requested;
+ }
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* SST DSC determination policy */
update_dsc_caps(aconnector, sink, stream, &dsc_caps);
@@ -6053,15 +6210,12 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
if (aconnector->mst_mgr.dev)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
for (i = 0; i < dm->num_of_edps; i++) {
if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
backlight_device_unregister(dm->backlight_dev[i]);
dm->backlight_dev[i] = NULL;
}
}
-#endif
if (aconnector->dc_em_sink)
dc_sink_release(aconnector->dc_em_sink);
@@ -6255,7 +6409,6 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
dc_plane_state->plane_size.surface_size.width = stream->src.width;
dc_plane_state->plane_size.chroma_size.height = stream->src.height;
dc_plane_state->plane_size.chroma_size.width = stream->src.width;
- dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
dc_plane_state->rotation = ROTATION_ANGLE_0;
@@ -6553,11 +6706,11 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
int clock, bpp = 0;
bool is_y420 = false;
- if (!aconnector->port || !aconnector->dc_sink)
+ if (!aconnector->mst_output_port || !aconnector->dc_sink)
return 0;
- mst_port = aconnector->port;
- mst_mgr = &aconnector->mst_port->mst_mgr;
+ mst_port = aconnector->mst_output_port;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
return 0;
@@ -6567,7 +6720,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
return PTR_ERR(mst_state);
if (!mst_state->pbn_div)
- mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link);
+ mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
if (!state->duplicated) {
int max_bpc = conn_state->max_requested_bpc;
@@ -6613,7 +6766,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
aconnector = to_amdgpu_dm_connector(connector);
- if (!aconnector->port)
+ if (!aconnector->mst_output_port)
continue;
if (!new_con_state || !new_con_state->crtc)
@@ -6653,7 +6806,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
dm_conn_state->pbn = pbn;
dm_conn_state->vcpi_slots = slot_num;
- ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
+ ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
dm_conn_state->pbn, false);
if (ret < 0)
return ret;
@@ -6661,7 +6814,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
continue;
}
- vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true);
+ vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
if (vcpi < 0)
return vcpi;
@@ -6904,7 +7057,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
const struct drm_display_mode *m;
struct drm_display_mode *new_mode;
uint i;
- uint32_t new_modes_count = 0;
+ u32 new_modes_count = 0;
/* Standard FPS values
*
@@ -6918,7 +7071,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
* 60 - Commonly used
* 48,72,96,120 - Multiples of 24
*/
- static const uint32_t common_rates[] = {
+ static const u32 common_rates[] = {
23976, 24000, 25000, 29970, 30000,
48000, 50000, 60000, 72000, 96000, 120000
};
@@ -6934,8 +7087,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
return 0;
for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
- uint64_t target_vtotal, target_vtotal_diff;
- uint64_t num, den;
+ u64 target_vtotal, target_vtotal_diff;
+ u64 num, den;
if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
continue;
@@ -7034,6 +7187,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
aconnector->base.dpms = DRM_MODE_DPMS_OFF;
aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
aconnector->audio_inst = -1;
+ aconnector->pack_sdp_v1_3 = false;
+ aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
+ memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
mutex_init(&aconnector->hpd_lock);
/*
@@ -7075,7 +7231,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
adev->mode_info.underscan_vborder_property,
0);
- if (!aconnector->mst_port)
+ if (!aconnector->mst_root)
drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
/* This defaults to the max in the range, but we want 8bpc for non-edp. */
@@ -7093,7 +7249,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
connector_type == DRM_MODE_CONNECTOR_eDP) {
drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
- if (!aconnector->mst_port)
+ if (!aconnector->mst_root)
drm_connector_attach_vrr_capable_property(&aconnector->base);
#ifdef CONFIG_DRM_AMD_DC_HDCP
@@ -7177,7 +7333,7 @@ create_i2c(struct ddc_service *ddc_service,
*/
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
- uint32_t link_index,
+ u32 link_index,
struct amdgpu_encoder *aencoder)
{
int res = 0;
@@ -7362,27 +7518,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
-static bool is_content_protection_different(struct drm_connector_state *state,
- const struct drm_connector_state *old_state,
- const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_connector_state *new_conn_state,
+ struct drm_connector_state *old_conn_state,
+ const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
- /* Handle: Type0/1 change */
- if (old_state->hdcp_content_type != state->hdcp_content_type &&
- state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_conn_state->content_protection, new_conn_state->content_protection);
+
+ if (old_crtc_state)
+ pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* hdcp content type change */
+ if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
+ new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
return true;
}
- /* CP is being re enabled, ignore this
- *
- * Handles: ENABLED -> DESIRED
- */
- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
- state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ /* CP is being re enabled, ignore this */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
+ return true;
+ }
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
return false;
}
@@ -7390,9 +7574,9 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: UNDESIRED -> ENABLED
*/
- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
- state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
/* Stream removed and re-enabled
*
@@ -7402,10 +7586,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: DESIRED -> DESIRED (Special case)
*/
- if (!(old_state->crtc && old_state->crtc->enabled) &&
- state->crtc && state->crtc->enabled &&
+ if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
+ new_conn_state->crtc && new_conn_state->crtc->enabled &&
connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
+ __func__);
return true;
}
@@ -7417,35 +7603,42 @@ static bool is_content_protection_different(struct drm_connector_state *state,
*
* Handles: DESIRED -> DESIRED (Special case)
*/
- if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
- connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+ if (dm_con_state->update_hdcp &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
+ __func__);
return true;
}
- /*
- * Handles: UNDESIRED -> UNDESIRED
- * DESIRED -> DESIRED
- * ENABLED -> ENABLED
- */
- if (old_state->content_protection == state->content_protection)
+ if (old_conn_state->content_protection == new_conn_state->content_protection) {
+ if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
+ __func__);
+ return true;
+ }
+ pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
+ __func__);
+ return false;
+ }
+
+ pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
return false;
+ }
- /*
- * Handles: UNDESIRED -> DESIRED
- * DESIRED -> UNDESIRED
- * ENABLED -> UNDESIRED
- */
- if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
+ __func__);
return true;
+ }
- /*
- * Handles: DESIRED -> ENABLED
- */
+ pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
return false;
}
-
#endif
+
static void remove_stream(struct amdgpu_device *adev,
struct amdgpu_crtc *acrtc,
struct dc_stream_state *stream)
@@ -7487,6 +7680,8 @@ static void update_freesync_state_on_stream(
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
unsigned long flags;
bool pack_sdp_v1_3 = false;
+ struct amdgpu_dm_connector *aconn;
+ enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
if (!new_stream)
return;
@@ -7522,11 +7717,27 @@ static void update_freesync_state_on_stream(
}
}
+ aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
+
+ if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
+ pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
+
+ if (aconn->vsdb_info.amd_vsdb_version == 1)
+ packet_type = PACKET_TYPE_FS_V1;
+ else if (aconn->vsdb_info.amd_vsdb_version == 2)
+ packet_type = PACKET_TYPE_FS_V2;
+ else if (aconn->vsdb_info.amd_vsdb_version == 3)
+ packet_type = PACKET_TYPE_FS_V3;
+
+ mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
+ &new_stream->adaptive_sync_infopacket);
+ }
+
mod_freesync_build_vrr_infopacket(
dm->freesync_module,
new_stream,
&vrr_params,
- PACKET_TYPE_VRR,
+ packet_type,
TRANSFER_FUNC_UNKNOWN,
&vrr_infopacket,
pack_sdp_v1_3);
@@ -7540,6 +7751,7 @@ static void update_freesync_state_on_stream(
new_crtc_state->vrr_infopacket = vrr_infopacket;
new_stream->vrr_infopacket = vrr_infopacket;
+ new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
if (new_crtc_state->freesync_vrr_info_changed)
DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
@@ -7661,8 +7873,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_crtc *pcrtc,
bool wait_for_vblank)
{
- uint32_t i;
- uint64_t timestamp_ns;
+ u32 i;
+ u64 timestamp_ns = ktime_get_ns();
struct drm_plane *plane;
struct drm_plane_state *old_plane_state, *new_plane_state;
struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
@@ -7673,10 +7885,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0, vpos, hpos;
unsigned long flags;
- uint32_t target_vblank, last_flip_vblank;
+ u32 target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool cursor_update = false;
bool pflip_present = false;
+ bool dirty_rects_changed = false;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -7764,10 +7977,32 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
- if (acrtc_state->stream->link->psr_settings.psr_feature_enabled)
+ if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state,
- &bundle->flip_addrs[planes_count]);
+ &bundle->flip_addrs[planes_count],
+ &dirty_rects_changed);
+
+ /*
+ * If the dirty regions changed, PSR-SU need to be disabled temporarily
+ * and enabled it again after dirty regions are stable to avoid video glitch.
+ * PSR-SU will be enabled in vblank_control_worker() if user pause the video
+ * during the PSR-SU was disabled.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ dirty_rects_changed) {
+ mutex_lock(&dm->dc_lock);
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
+ timestamp_ns;
+ if (acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+ }
+ }
/*
* Only allow immediate flips for fast updates that don't
@@ -7986,7 +8221,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
!amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
#endif
- !acrtc_state->stream->link->psr_settings.psr_allow_active)
+ !acrtc_state->stream->link->psr_settings.psr_allow_active &&
+ (timestamp_ns -
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
+ 500000000)
amdgpu_dm_psr_enable(acrtc_state->stream);
} else {
acrtc_attach->dm_irq_params.allow_psr_entry = false;
@@ -8111,7 +8349,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_display_manager *dm = &adev->dm;
struct dm_atomic_state *dm_state;
struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
- uint32_t i, j;
+ u32 i, j;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
unsigned long flags;
@@ -8285,10 +8523,61 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+
+ if (!connector)
+ continue;
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_con_state->content_protection, new_con_state->content_protection);
+
+ if (aconnector->dc_sink) {
+ if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
+ pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ aconnector->dc_sink->edid_caps.display_name);
+ }
+ }
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ if (old_crtc_state)
+ pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
new_crtc_state = NULL;
+ old_crtc_state = NULL;
- if (acrtc)
+ if (acrtc) {
new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
@@ -8300,11 +8589,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
continue;
}
- if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+ if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
+ old_con_state, connector, adev->dm.hdcp_workqueue)) {
+ /* when display is unplugged from mst hub, connctor will
+ * be destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+
+ bool enable_encryption = false;
+
+ if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ if (aconnector->dc_link && aconnector->dc_sink &&
+ aconnector->dc_link->type == dc_connection_mst_branch) {
+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+ struct hdcp_workqueue *hdcp_w =
+ &hdcp_work[aconnector->dc_link->link_index];
+
+ hdcp_w->hdcp_content_type[connector->index] =
+ new_con_state->hdcp_content_type;
+ hdcp_w->content_protection[connector->index] =
+ new_con_state->content_protection;
+ }
+
+ if (new_crtc_state && new_crtc_state->mode_changed &&
+ new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+
hdcp_update_display(
adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
- new_con_state->hdcp_content_type,
- new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
+ new_con_state->hdcp_content_type, enable_encryption);
+ }
}
#endif
@@ -8402,9 +8724,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
#ifdef CONFIG_DEBUG_FS
enum amdgpu_dm_pipe_crc_source cur_crc_src;
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- struct crc_rd_work *crc_rd_wrk;
-#endif
#endif
/* Count number of newly disabled CRTCs for dropping PM refs later. */
if (old_crtc_state->active && !new_crtc_state->active)
@@ -8417,9 +8736,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
update_stream_irq_parameters(dm, dm_new_crtc_state);
#ifdef CONFIG_DEBUG_FS
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
- crc_rd_wrk = dm->crc_rd_wrk;
-#endif
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
cur_crc_src = acrtc->dm_irq_params.crc_src;
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
@@ -8448,10 +8764,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
if (amdgpu_dm_crc_window_is_activated(crtc)) {
spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
acrtc->dm_irq_params.window_param.update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
- spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- crc_rd_wrk->crtc = crtc;
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
}
#endif
@@ -8742,7 +9060,7 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
}
static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
- uint64_t num, den, res;
+ u64 num, den, res;
struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
@@ -9198,7 +9516,8 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_plane_state *old_plane_state,
struct drm_plane_state *new_plane_state,
bool enable,
- bool *lock_and_validation_needed)
+ bool *lock_and_validation_needed,
+ bool *is_top_most_overlay)
{
struct dm_atomic_state *dm_state = NULL;
@@ -9306,6 +9625,14 @@ static int dm_update_plane_state(struct dc *dc,
if (!dc_new_plane_state)
return -ENOMEM;
+ /* Block top most plane from being a video plane */
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
+ return -EINVAL;
+ else
+ *is_top_most_overlay = false;
+ }
+
DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, new_plane_crtc->base.id);
@@ -9449,7 +9776,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
continue;
aconnector = to_amdgpu_dm_connector(connector);
- if (!aconnector->port || !aconnector->mst_port)
+ if (!aconnector->mst_output_port || !aconnector->mst_root)
aconnector = NULL;
else
break;
@@ -9458,7 +9785,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
if (!aconnector)
return 0;
- return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
+ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
}
#endif
@@ -9502,6 +9829,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
enum dc_status status;
int ret, i;
bool lock_and_validation_needed = false;
+ bool is_top_most_overlay = true;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct drm_dp_mst_topology_mgr *mgr;
@@ -9628,7 +9956,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
* atomic state, so call drm helper to normalize zpos.
*/
- drm_atomic_normalize_zpos(dev, state);
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret) {
+ drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
+ goto fail;
+ }
/* Remove exiting planes if they are modified */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
@@ -9636,7 +9968,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
old_plane_state,
new_plane_state,
false,
- &lock_and_validation_needed);
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
if (ret) {
DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
@@ -9675,7 +10008,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
old_plane_state,
new_plane_state,
true,
- &lock_and_validation_needed);
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
if (ret) {
DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
goto fail;
@@ -9910,7 +10244,7 @@ fail:
static bool is_dp_capable_without_timing_msa(struct dc *dc,
struct amdgpu_dm_connector *amdgpu_dm_connector)
{
- uint8_t dpcd_data;
+ u8 dpcd_data;
bool capable = false;
if (amdgpu_dm_connector->dc_link &&
@@ -9929,7 +10263,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,
static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
unsigned int offset,
unsigned int total_length,
- uint8_t *data,
+ u8 *data,
unsigned int length,
struct amdgpu_hdmi_vsdb_info *vsdb)
{
@@ -9984,7 +10318,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
int i;
@@ -10025,7 +10359,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
int i;
@@ -10041,21 +10375,25 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
}
static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
- uint8_t *edid_ext, int len,
+ u8 *edid_ext, int len,
struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+ bool ret;
+ mutex_lock(&adev->dm.dc_lock);
if (adev->dm.dmub_srv)
- return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
+ ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
else
- return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
+ ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
+ mutex_unlock(&adev->dm.dc_lock);
+ return ret;
}
static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
{
- uint8_t *edid_ext = NULL;
+ u8 *edid_ext = NULL;
int i;
bool valid_vsdb_found = false;
@@ -10110,6 +10448,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
bool freesync_capable = false;
+ enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
if (!connector->state) {
DRM_ERROR("%s - Connector has no state", __func__);
@@ -10202,6 +10541,26 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
}
}
+ as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
+
+ if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
+ i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
+
+ amdgpu_dm_connector->pack_sdp_v1_3 = true;
+ amdgpu_dm_connector->as_type = as_type;
+ amdgpu_dm_connector->vsdb_info = vsdb_info;
+
+ amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+ amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+
+ connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+ connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
+ }
+ }
+
update:
if (dm_con_state)
dm_con_state->freesync_capable = freesync_capable;
@@ -10231,7 +10590,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
}
void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
- uint32_t value, const char *func_name)
+ u32 value, const char *func_name)
{
#ifdef DM_CHECK_ADDR_0
if (address == 0) {
@@ -10246,7 +10605,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
const char *func_name)
{
- uint32_t value;
+ u32 value;
#ifdef DM_CHECK_ADDR_0
if (address == 0) {
DC_ERR("invalid register read; address = 0\n");
@@ -10325,6 +10684,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
ret = p_notify->aux_reply.length;
*operation_result = p_notify->result;
out:
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
mutex_unlock(&adev->dm.dpia_aux_lock);
return ret;
}
@@ -10352,6 +10712,8 @@ int amdgpu_dm_process_dmub_set_config_sync(
*operation_result = SET_CONFIG_UNKNOWN_ERROR;
}
+ if (!is_cmd_complete)
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
mutex_unlock(&adev->dm.dpia_aux_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index df3c25e32c65..ed5cbe9da40c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -31,6 +31,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_plane.h>
+#include "link_service_types.h"
/*
* This file contains the definition for amdgpu_display_manager
@@ -58,6 +59,7 @@
#include "irq_types.h"
#include "signal_types.h"
#include "amdgpu_dm_crc.h"
+#include "mod_info_packet.h"
struct aux_payload;
struct set_config_cmd_payload;
enum aux_return_code_type;
@@ -494,11 +496,12 @@ struct amdgpu_display_manager {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
/**
- * @crc_rd_wrk:
+ * @secure_display_ctxs:
*
- * Work to be executed in a separate thread to communicate with PSP.
+ * Store the ROI information and the work_struct to command dmub and psp for
+ * all crtcs.
*/
- struct crc_rd_work *crc_rd_wrk;
+ struct secure_display_context *secure_display_ctxs;
#endif
/**
* @hpd_rx_offload_wq:
@@ -575,6 +578,36 @@ enum mst_progress_status {
MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3),
};
+/**
+ * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
+ *
+ * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
+ * struct is useful to keep track of the display-specific information about
+ * FreeSync.
+ */
+struct amdgpu_hdmi_vsdb_info {
+ /**
+ * @amd_vsdb_version: Vendor Specific Data Block Version, should be
+ * used to determine which Vendor Specific InfoFrame (VSIF) to send.
+ */
+ unsigned int amd_vsdb_version;
+
+ /**
+ * @freesync_supported: FreeSync Supported.
+ */
+ bool freesync_supported;
+
+ /**
+ * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
+ */
+ unsigned int min_refresh_rate_hz;
+
+ /**
+ * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
+ */
+ unsigned int max_refresh_rate_hz;
+};
+
struct amdgpu_dm_connector {
struct drm_connector base;
@@ -603,8 +636,8 @@ struct amdgpu_dm_connector {
/* DM only */
struct drm_dp_mst_topology_mgr mst_mgr;
struct amdgpu_dm_dp_aux dm_dp_aux;
- struct drm_dp_mst_port *port;
- struct amdgpu_dm_connector *mst_port;
+ struct drm_dp_mst_port *mst_output_port;
+ struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
@@ -643,6 +676,15 @@ struct amdgpu_dm_connector {
/* Record progress status of mst*/
uint8_t mst_status;
+
+ /* Automated testing */
+ bool timing_changed;
+ struct dc_crtc_timing *timing_requested;
+
+ /* Adaptive Sync */
+ bool pack_sdp_v1_3;
+ enum adaptive_sync_type as_type;
+ struct amdgpu_hdmi_vsdb_info vsdb_info;
};
static inline void amdgpu_dm_set_mst_status(uint8_t *status,
@@ -713,37 +755,6 @@ struct dm_connector_state {
uint64_t pbn;
};
-/**
- * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
- *
- * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
- * struct is useful to keep track of the display-specific information about
- * FreeSync.
- */
-struct amdgpu_hdmi_vsdb_info {
- /**
- * @amd_vsdb_version: Vendor Specific Data Block Version, should be
- * used to determine which Vendor Specific InfoFrame (VSIF) to send.
- */
- unsigned int amd_vsdb_version;
-
- /**
- * @freesync_supported: FreeSync Supported.
- */
- bool freesync_supported;
-
- /**
- * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
- */
- unsigned int min_refresh_rate_hz;
-
- /**
- * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
- */
- unsigned int max_refresh_rate_hz;
-};
-
-
#define to_dm_connector_state(x)\
container_of((x), struct dm_connector_state, base)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 66df2394d7e4..27711743c22c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -101,35 +101,44 @@ static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc)
static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
{
- struct crc_rd_work *crc_rd_wrk;
- struct amdgpu_device *adev;
+ struct secure_display_context *secure_display_ctx;
struct psp_context *psp;
- struct securedisplay_cmd *securedisplay_cmd;
+ struct ta_securedisplay_cmd *securedisplay_cmd;
struct drm_crtc *crtc;
- uint8_t phy_id;
+ struct dc_stream_state *stream;
+ uint8_t phy_inst;
int ret;
- crc_rd_wrk = container_of(work, struct crc_rd_work, notify_ta_work);
- spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- crtc = crc_rd_wrk->crtc;
+ secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work);
+ crtc = secure_display_ctx->crtc;
if (!crtc) {
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
return;
}
- adev = drm_to_adev(crtc->dev);
- psp = &adev->psp;
- phy_id = crc_rd_wrk->phy_inst;
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
+ psp = &drm_to_adev(crtc->dev)->psp;
+
+ if (!psp->securedisplay_context.context.initialized) {
+ DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n");
+ return;
+ }
+
+ stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
+ phy_inst = stream->link->link_enc_hw_inst;
+ /* need lock for multiple crtcs to use the command buffer */
mutex_lock(&psp->securedisplay_context.mutex);
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
- securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id =
- phy_id;
+
+ securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst;
+
+ /* PSP TA is expected to finish data transmission over I2C within current frame,
+ * even there are up to 4 crtcs request to send in this frame.
+ */
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC);
+
if (!ret) {
if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) {
psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status);
@@ -142,17 +151,23 @@ static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work)
static void
amdgpu_dm_forward_crc_window(struct work_struct *work)
{
- struct crc_fw_work *crc_fw_wrk;
+ struct secure_display_context *secure_display_ctx;
struct amdgpu_display_manager *dm;
+ struct drm_crtc *crtc;
+ struct dc_stream_state *stream;
- crc_fw_wrk = container_of(work, struct crc_fw_work, forward_roi_work);
- dm = crc_fw_wrk->dm;
+ secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work);
+ crtc = secure_display_ctx->crtc;
+
+ if (!crtc)
+ return;
+
+ dm = &drm_to_adev(crtc->dev)->dm;
+ stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream;
mutex_lock(&dm->dc_lock);
- dc_stream_forward_crc_window(dm->dc, &crc_fw_wrk->rect, crc_fw_wrk->stream, crc_fw_wrk->is_stop_cmd);
+ dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false);
mutex_unlock(&dm->dc_lock);
-
- kfree(crc_fw_wrk);
}
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc)
@@ -189,6 +204,9 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source)
{
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ int i;
+#endif
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
struct dc_stream_state *stream_state = dm_crtc_state->stream;
bool enable = amdgpu_dm_is_valid_crc_source(source);
@@ -200,21 +218,18 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
- /* Enable CRTC CRC generation if necessary. */
+ /* Enable or disable CRTC CRC generation */
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* Disable secure_display if it was enabled */
if (!enable) {
- if (adev->dm.crc_rd_wrk) {
- flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
- spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);
-
- if (adev->dm.crc_rd_wrk->crtc == crtc) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->dm.secure_display_ctxs[i].crtc == crtc) {
/* stop ROI update on this crtc */
- dc_stream_forward_crc_window(stream_state->ctx->dc,
- NULL, stream_state, true);
- adev->dm.crc_rd_wrk->crtc = NULL;
+ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ dc_stream_forward_crc_window(stream_state, NULL, true);
}
- spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock);
}
}
#endif
@@ -329,7 +344,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
goto cleanup;
}
- aux = (aconn->port) ? &aconn->port->aux : &aconn->dm_dp_aux.aux;
+ aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux;
if (!aux) {
DRM_DEBUG_DRIVER("No dp aux for amd connector\n");
@@ -347,6 +362,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ /* Reset secure_display when we change crc source from debugfs */
amdgpu_dm_set_crc_window_default(crtc);
#endif
@@ -456,14 +472,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
{
- struct dc_stream_state *stream_state;
struct drm_device *drm_dev = NULL;
enum amdgpu_dm_pipe_crc_source cur_crc_src;
struct amdgpu_crtc *acrtc = NULL;
struct amdgpu_device *adev = NULL;
- struct crc_rd_work *crc_rd_wrk;
- struct crc_fw_work *crc_fw_wrk;
- unsigned long flags1, flags2;
+ struct secure_display_context *secure_display_ctx = NULL;
+ unsigned long flags1;
if (crtc == NULL)
return;
@@ -473,75 +487,76 @@ void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc)
drm_dev = crtc->dev;
spin_lock_irqsave(&drm_dev->event_lock, flags1);
- stream_state = acrtc->dm_irq_params.stream;
cur_crc_src = acrtc->dm_irq_params.crc_src;
/* Early return if CRC capture is not enabled. */
- if (!amdgpu_dm_is_valid_crc_source(cur_crc_src))
+ if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) ||
+ !dm_is_crc_source_crtc(cur_crc_src))
goto cleanup;
- if (!dm_is_crc_source_crtc(cur_crc_src))
+ if (!acrtc->dm_irq_params.window_param.activated)
goto cleanup;
- if (!acrtc->dm_irq_params.window_param.activated)
+ if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
+ acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
goto cleanup;
+ }
- if (acrtc->dm_irq_params.window_param.update_win) {
- if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
- acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
- goto cleanup;
- }
+ secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id];
+ if (WARN_ON(secure_display_ctx->crtc != crtc)) {
+ /* We have set the crtc when creating secure_display_context,
+ * don't expect it to be changed here.
+ */
+ secure_display_ctx->crtc = crtc;
+ }
+ if (acrtc->dm_irq_params.window_param.update_win) {
/* prepare work for dmub to update ROI */
- crc_fw_wrk = kzalloc(sizeof(*crc_fw_wrk), GFP_ATOMIC);
- if (!crc_fw_wrk)
- goto cleanup;
-
- INIT_WORK(&crc_fw_wrk->forward_roi_work, amdgpu_dm_forward_crc_window);
- crc_fw_wrk->dm = &adev->dm;
- crc_fw_wrk->stream = stream_state;
- crc_fw_wrk->rect.x = acrtc->dm_irq_params.window_param.x_start;
- crc_fw_wrk->rect.y = acrtc->dm_irq_params.window_param.y_start;
- crc_fw_wrk->rect.width = acrtc->dm_irq_params.window_param.x_end -
+ secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start;
+ secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start;
+ secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end -
acrtc->dm_irq_params.window_param.x_start;
- crc_fw_wrk->rect.height = acrtc->dm_irq_params.window_param.y_end -
+ secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end -
acrtc->dm_irq_params.window_param.y_start;
- schedule_work(&crc_fw_wrk->forward_roi_work);
+ schedule_work(&secure_display_ctx->forward_roi_work);
acrtc->dm_irq_params.window_param.update_win = false;
+
+ /* Statically skip 1 frame, because we may need to wait below things
+ * before sending ROI to dmub:
+ * 1. We defer the work by using system workqueue.
+ * 2. We may need to wait for dc_lock before accessing dmub.
+ */
acrtc->dm_irq_params.window_param.skip_frame_cnt = 1;
} else {
- if (acrtc->dm_irq_params.window_param.skip_frame_cnt) {
- acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1;
- goto cleanup;
- }
-
- if (adev->dm.crc_rd_wrk) {
- crc_rd_wrk = adev->dm.crc_rd_wrk;
- spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2);
- crc_rd_wrk->phy_inst = stream_state->link->link_enc_hw_inst;
- spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2);
- schedule_work(&crc_rd_wrk->notify_ta_work);
- }
+ /* prepare work for psp to read ROI/CRC and send to I2C */
+ schedule_work(&secure_display_ctx->notify_ta_work);
}
cleanup:
spin_unlock_irqrestore(&drm_dev->event_lock, flags1);
}
-struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void)
+struct secure_display_context *
+amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev)
{
- struct crc_rd_work *crc_rd_wrk = NULL;
+ struct secure_display_context *secure_display_ctxs = NULL;
+ int i;
- crc_rd_wrk = kzalloc(sizeof(*crc_rd_wrk), GFP_KERNEL);
+ secure_display_ctxs = kcalloc(adev->mode_info.num_crtc,
+ sizeof(struct secure_display_context),
+ GFP_KERNEL);
- if (!crc_rd_wrk)
+ if (!secure_display_ctxs)
return NULL;
- spin_lock_init(&crc_rd_wrk->crc_rd_work_lock);
- INIT_WORK(&crc_rd_wrk->notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window);
+ INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read);
+ secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base;
+ }
- return crc_rd_wrk;
+ return secure_display_ctxs;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 71bce608d751..935adca6f048 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -45,7 +45,7 @@ struct crc_window_param {
uint16_t y_start;
uint16_t x_end;
uint16_t y_end;
- /* CRC windwo is activated or not*/
+ /* CRC window is activated or not*/
bool activated;
/* Update crc window during vertical blank or not */
bool update_win;
@@ -53,22 +53,17 @@ struct crc_window_param {
int skip_frame_cnt;
};
-/* read_work for driver to call PSP to read */
-struct crc_rd_work {
+struct secure_display_context {
+ /* work to notify PSP TA*/
struct work_struct notify_ta_work;
- /* To protect crc_rd_work carried fields*/
- spinlock_t crc_rd_work_lock;
- struct drm_crtc *crtc;
- uint8_t phy_inst;
-};
-/* forward_work for driver to forward ROI to dmu */
-struct crc_fw_work {
+ /* work to forward ROI to dmcu/dmub */
struct work_struct forward_roi_work;
- struct amdgpu_display_manager *dm;
- struct dc_stream_state *stream;
+
+ struct drm_crtc *crtc;
+
+ /* Region of Interest (ROI) */
struct rect rect;
- bool is_stop_cmd;
};
#endif
@@ -100,11 +95,12 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc);
void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc);
-struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void);
+struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts(
+ struct amdgpu_device *adev);
#else
#define amdgpu_dm_crc_window_is_activated(x)
#define amdgpu_dm_crtc_handle_crc_window_irq(x)
-#define amdgpu_dm_crtc_secure_display_create_work()
+#define amdgpu_dm_crtc_secure_display_create_contexts()
#endif
#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
index 22125daf9dcf..dc4f37240beb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
@@ -77,6 +77,9 @@ int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
int rc;
+ if (acrtc->otg_inst == -1)
+ return 0;
+
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -105,8 +108,7 @@ static void vblank_control_worker(struct work_struct *work)
else if (dm->active_vblank_irq_count)
dm->active_vblank_irq_count--;
- dc_allow_idle_optimizations(
- dm->dc, dm->active_vblank_irq_count == 0 ? true : false);
+ dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
@@ -152,6 +154,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
struct vblank_control_work *work;
int rc = 0;
+ if (acrtc->otg_inst == -1)
+ goto skip;
+
if (enable) {
/* vblank irq on -> Only need vupdate irq in vrr mode */
if (amdgpu_dm_vrr_active(acrtc_state))
@@ -169,6 +174,7 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
return -EBUSY;
+skip:
if (amdgpu_in_reset(adev))
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 461037a3dd75..09a3efa517da 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -34,9 +34,9 @@
#include "dmub/dmub_srv.h"
#include "resource.h"
#include "dsc.h"
-#include "dc_link_dp.h"
#include "link_hwss.h"
#include "dc/dc_dmub_srv.h"
+#include "link/protocols/link_dp_capability.h"
#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
#include "amdgpu_dm_psr.h"
@@ -419,67 +419,38 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
return result;
}
-static int dp_lttpr_status_show(struct seq_file *m, void *d)
+static int dp_lttpr_status_show(struct seq_file *m, void *unused)
{
- char *data;
- struct amdgpu_dm_connector *connector = file_inode(m->file)->i_private;
- struct dc_link *link = connector->dc_link;
- uint32_t read_size = 1;
- uint8_t repeater_count = 0;
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector =
+ to_amdgpu_dm_connector(connector);
+ struct dc_lttpr_caps caps = aconnector->dc_link->dpcd_caps.lttpr_caps;
- data = kzalloc(read_size, GFP_KERNEL);
- if (!data)
- return 0;
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
- dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0002, data, read_size);
+ seq_printf(m, "phy repeater count: %u (raw: 0x%x)\n",
+ dp_parse_lttpr_repeater_count(caps.phy_repeater_cnt),
+ caps.phy_repeater_cnt);
- switch ((uint8_t)*data) {
- case 0x80:
- repeater_count = 1;
- break;
- case 0x40:
- repeater_count = 2;
- break;
- case 0x20:
- repeater_count = 3;
- break;
- case 0x10:
- repeater_count = 4;
- break;
- case 0x8:
- repeater_count = 5;
- break;
- case 0x4:
- repeater_count = 6;
- break;
- case 0x2:
- repeater_count = 7;
+ seq_puts(m, "phy repeater mode: ");
+
+ switch (caps.mode) {
+ case DP_PHY_REPEATER_MODE_TRANSPARENT:
+ seq_puts(m, "transparent");
break;
- case 0x1:
- repeater_count = 8;
+ case DP_PHY_REPEATER_MODE_NON_TRANSPARENT:
+ seq_puts(m, "non-transparent");
break;
- case 0x0:
- repeater_count = 0;
+ case 0x00:
+ seq_puts(m, "non lttpr");
break;
default:
- repeater_count = (uint8_t)*data;
+ seq_printf(m, "read error (raw: 0x%x)", caps.mode);
break;
}
- seq_printf(m, "phy repeater count: %d\n", repeater_count);
-
- dm_helpers_dp_read_dpcd(link->ctx, link, 0xF0003, data, read_size);
-
- if ((uint8_t)*data == 0x55)
- seq_printf(m, "phy repeater mode: transparent\n");
- else if ((uint8_t)*data == 0xAA)
- seq_printf(m, "phy repeater mode: non-transparent\n");
- else if ((uint8_t)*data == 0x00)
- seq_printf(m, "phy repeater mode: non lttpr\n");
- else
- seq_printf(m, "phy repeater mode: read error\n");
-
- kfree(data);
+ seq_puts(m, "\n");
return 0;
}
@@ -1192,7 +1163,7 @@ static int dp_dsc_fec_support_show(struct seq_file *m, void *data)
break;
}
dpcd_caps = aconnector->dc_link->dpcd_caps;
- if (aconnector->port) {
+ if (aconnector->mst_output_port) {
/* aconnector sets dsc_aux during get_modes call
* if MST connector has it means it can either
* enable DSC on the sink device or on MST branch
@@ -1279,14 +1250,14 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
mutex_lock(&aconnector->hpd_lock);
/* Don't support for mst end device*/
- if (aconnector->mst_port) {
+ if (aconnector->mst_root) {
mutex_unlock(&aconnector->hpd_lock);
return -EINVAL;
}
if (param[0] == 1) {
- if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type) &&
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type) &&
new_connection_type != dc_connection_none)
goto unlock;
@@ -1323,7 +1294,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
/* If the aconnector is the root node in mst topology */
if (aconnector->mst_mgr.mst_state == true)
- reset_cur_dp_mst_topology(link);
+ dc_link_reset_cur_dp_mst_topology(link);
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
@@ -1375,16 +1346,11 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -1481,12 +1447,12 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Get CRTC state
@@ -1566,16 +1532,11 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -1670,12 +1631,12 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Safely get CRTC state
@@ -1755,16 +1716,11 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -1859,12 +1815,12 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Get CRTC state
@@ -1940,16 +1896,11 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2041,12 +1992,12 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx || !pipe_ctx->stream)
+ if (!pipe_ctx->stream)
goto done;
// Get CRTC state
@@ -2120,16 +2071,11 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2181,16 +2127,11 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2257,16 +2198,11 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2333,16 +2269,11 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream &&
+ if (pipe_ctx->stream &&
pipe_ctx->stream->link == aconnector->dc_link)
break;
}
- if (!pipe_ctx) {
- kfree(rd_buf);
- return -ENXIO;
- }
-
dsc = pipe_ctx->stream_res.dsc;
if (dsc)
dsc->funcs->dsc_read_state(dsc, &dsc_state);
@@ -2578,13 +2509,13 @@ static int dp_is_mst_connector_show(struct seq_file *m, void *unused)
if (aconnector->mst_mgr.mst_state) {
role = "root";
- } else if (aconnector->mst_port &&
- aconnector->mst_port->mst_mgr.mst_state) {
+ } else if (aconnector->mst_root &&
+ aconnector->mst_root->mst_mgr.mst_state) {
role = "end";
- mgr = &aconnector->mst_port->mst_mgr;
- port = aconnector->port;
+ mgr = &aconnector->mst_root->mst_mgr;
+ port = aconnector->mst_output_port;
drm_modeset_lock(&mgr->base.lock, NULL);
if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
@@ -3245,46 +3176,24 @@ DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get,
*/
static int crc_win_update_set(void *data, u64 val)
{
- struct drm_crtc *new_crtc = data;
- struct drm_crtc *old_crtc = NULL;
- struct amdgpu_crtc *new_acrtc, *old_acrtc;
- struct amdgpu_device *adev = drm_to_adev(new_crtc->dev);
- struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk;
-
- if (!crc_rd_wrk)
- return 0;
+ struct drm_crtc *crtc = data;
+ struct amdgpu_crtc *acrtc;
+ struct amdgpu_device *adev = drm_to_adev(crtc->dev);
if (val) {
- new_acrtc = to_amdgpu_crtc(new_crtc);
+ acrtc = to_amdgpu_crtc(crtc);
mutex_lock(&adev->dm.dc_lock);
/* PSR may write to OTG CRC window control register,
* so close it before starting secure_display.
*/
- amdgpu_dm_psr_disable(new_acrtc->dm_irq_params.stream);
+ amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream);
spin_lock_irq(&adev_to_drm(adev)->event_lock);
- spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
- if (crc_rd_wrk->crtc) {
- old_crtc = crc_rd_wrk->crtc;
- old_acrtc = to_amdgpu_crtc(old_crtc);
- }
- if (old_crtc && old_crtc != new_crtc) {
- old_acrtc->dm_irq_params.window_param.activated = false;
- old_acrtc->dm_irq_params.window_param.update_win = false;
- old_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
+ acrtc->dm_irq_params.window_param.activated = true;
+ acrtc->dm_irq_params.window_param.update_win = true;
+ acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- new_acrtc->dm_irq_params.window_param.activated = true;
- new_acrtc->dm_irq_params.window_param.update_win = true;
- new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- crc_rd_wrk->crtc = new_crtc;
- } else {
- new_acrtc->dm_irq_params.window_param.activated = true;
- new_acrtc->dm_irq_params.window_param.update_win = true;
- new_acrtc->dm_irq_params.window_param.skip_frame_cnt = 0;
- crc_rd_wrk->crtc = new_crtc;
- }
- spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
spin_unlock_irq(&adev_to_drm(adev)->event_lock);
mutex_unlock(&adev->dm.dc_lock);
}
@@ -3453,12 +3362,12 @@ static int trigger_hpd_mst_set(void *data, u64 val)
if (!aconnector->dc_link)
continue;
- if (!aconnector->mst_port)
+ if (!aconnector->mst_root)
continue;
link = aconnector->dc_link;
- dp_receiver_power_ctrl(link, false);
- drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_port->mst_mgr, false);
+ dc_link_dp_receiver_power_ctrl(link, false);
+ drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_root->mst_mgr, false);
link->mst_stream_alloc_table.stream_count = 0;
memset(link->mst_stream_alloc_table.stream_allocations, 0,
sizeof(link->mst_stream_alloc_table.stream_allocations));
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index a7fd98f57f94..8e572f07ec47 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -170,9 +170,10 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct mod_hdcp_display_query query;
+ unsigned int conn_index = aconnector->base.index;
mutex_lock(&hdcp_w->mutex);
- hdcp_w->aconnector = aconnector;
+ hdcp_w->aconnector[conn_index] = aconnector;
query.display = NULL;
mod_hdcp_query_display(&hdcp_w->hdcp, aconnector->base.index, &query);
@@ -204,7 +205,7 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
} else {
display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION;
- hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
cancel_delayed_work(&hdcp_w->property_validate_dwork);
}
@@ -223,9 +224,10 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
struct drm_connector_state *conn_state = aconnector->base.state;
+ unsigned int conn_index = aconnector->base.index;
mutex_lock(&hdcp_w->mutex);
- hdcp_w->aconnector = aconnector;
+ hdcp_w->aconnector[conn_index] = aconnector;
/* the removal of display will invoke auth reset -> hdcp destroy and
* we'd expect the Content Protection (CP) property changed back to
@@ -247,13 +249,18 @@ static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work,
void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index)
{
struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index];
+ unsigned int conn_index;
mutex_lock(&hdcp_w->mutex);
mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output);
cancel_delayed_work(&hdcp_w->property_validate_dwork);
- hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+
+ for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
+ hdcp_w->encryption_status[conn_index] =
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ }
process_output(hdcp_w);
@@ -290,49 +297,80 @@ static void event_callback(struct work_struct *work)
}
+
static void event_property_update(struct work_struct *work)
{
-
struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work);
- struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
- struct drm_device *dev = hdcp_work->aconnector->base.dev;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_device *dev;
long ret;
+ unsigned int conn_index;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- mutex_lock(&hdcp_work->mutex);
+ for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) {
+ aconnector = hdcp_work->aconnector[conn_index];
+ if (!aconnector)
+ continue;
- if (aconnector->base.state && aconnector->base.state->commit) {
- ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ);
+ connector = &aconnector->base;
- if (ret == 0) {
- DRM_ERROR("HDCP state unknown! Setting it to DESIRED");
- hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
- }
- }
+ /* check if display connected */
+ if (connector->status != connector_status_connected)
+ continue;
- if (aconnector->base.state) {
- if (hdcp_work->encryption_status != MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
- if (aconnector->base.state->hdcp_content_type ==
+ conn_state = aconnector->base.state;
+
+ if (!conn_state)
+ continue;
+
+ dev = connector->dev;
+
+ if (!dev)
+ continue;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ mutex_lock(&hdcp_work->mutex);
+
+ if (conn_state->commit) {
+ ret = wait_for_completion_interruptible_timeout(
+ &conn_state->commit->hw_done, 10 * HZ);
+ if (ret == 0) {
+ DRM_ERROR(
+ "HDCP state unknown! Setting it to DESIRED");
+ hdcp_work->encryption_status[conn_index] =
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ }
+ }
+ if (hdcp_work->encryption_status[conn_index] !=
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) {
+ if (conn_state->hdcp_content_type ==
DRM_MODE_HDCP_CONTENT_TYPE0 &&
- hdcp_work->encryption_status <=
- MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON)
- drm_hdcp_update_content_protection(&aconnector->base,
+ hdcp_work->encryption_status[conn_index] <=
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) {
+
+ DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n");
+ drm_hdcp_update_content_protection(
+ connector,
DRM_MODE_CONTENT_PROTECTION_ENABLED);
- else if (aconnector->base.state->hdcp_content_type ==
+ } else if (conn_state->hdcp_content_type ==
DRM_MODE_HDCP_CONTENT_TYPE1 &&
- hdcp_work->encryption_status ==
- MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON)
- drm_hdcp_update_content_protection(&aconnector->base,
+ hdcp_work->encryption_status[conn_index] ==
+ MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) {
+ drm_hdcp_update_content_protection(
+ connector,
DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ }
} else {
- drm_hdcp_update_content_protection(&aconnector->base,
- DRM_MODE_CONTENT_PROTECTION_DESIRED);
+ DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n");
+ drm_hdcp_update_content_protection(
+ connector, DRM_MODE_CONTENT_PROTECTION_DESIRED);
+
}
+ mutex_unlock(&hdcp_work->mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
-
- mutex_unlock(&hdcp_work->mutex);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
static void event_property_validate(struct work_struct *work)
@@ -340,19 +378,47 @@ static void event_property_validate(struct work_struct *work)
struct hdcp_workqueue *hdcp_work =
container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork);
struct mod_hdcp_display_query query;
- struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector;
-
- if (!aconnector)
- return;
+ struct amdgpu_dm_connector *aconnector;
+ unsigned int conn_index;
mutex_lock(&hdcp_work->mutex);
- query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
- mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query);
+ for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX;
+ conn_index++) {
+ aconnector = hdcp_work->aconnector[conn_index];
+
+ if (!aconnector)
+ continue;
+
+ /* check if display connected */
+ if (aconnector->base.status != connector_status_connected)
+ continue;
- if (query.encryption_status != hdcp_work->encryption_status) {
- hdcp_work->encryption_status = query.encryption_status;
- schedule_work(&hdcp_work->property_update_work);
+ if (!aconnector->base.state)
+ continue;
+
+ query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
+ mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index,
+ &query);
+
+ DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n",
+ aconnector->base.index,
+ aconnector->base.state->content_protection,
+ query.encryption_status,
+ hdcp_work->encryption_status[conn_index]);
+
+ if (query.encryption_status !=
+ hdcp_work->encryption_status[conn_index]) {
+ DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n",
+ hdcp_work->encryption_status[conn_index], query.encryption_status);
+
+ hdcp_work->encryption_status[conn_index] =
+ query.encryption_status;
+
+ DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n");
+
+ schedule_work(&hdcp_work->property_update_work);
+ }
}
mutex_unlock(&hdcp_work->mutex);
@@ -686,6 +752,13 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd;
hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd;
+
+ memset(hdcp_work[i].aconnector, 0,
+ sizeof(struct amdgpu_dm_connector *) *
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
+ memset(hdcp_work[i].encryption_status, 0,
+ sizeof(enum mod_hdcp_encryption_status) *
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
}
cp_psp->funcs.update_stream_config = update_config;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 09294ff122fe..69b445b011c8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -43,7 +43,7 @@ struct hdcp_workqueue {
struct delayed_work callback_dwork;
struct delayed_work watchdog_timer_dwork;
struct delayed_work property_validate_dwork;
- struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *aconnector[AMDGPU_DM_MAX_DISPLAY_INDEX];
struct mutex mutex;
struct mod_hdcp hdcp;
@@ -51,7 +51,20 @@ struct hdcp_workqueue {
struct mod_hdcp_display display;
struct mod_hdcp_link link;
- enum mod_hdcp_encryption_status encryption_status;
+ enum mod_hdcp_encryption_status encryption_status[AMDGPU_DM_MAX_DISPLAY_INDEX];
+ /* when display is unplugged from mst hub, connctor will be
+ * destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+ /* un-desired, desired, enabled */
+ unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX];
+ /* hdcp1.x, hdcp2.x */
+ unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX];
+
uint8_t max_link;
uint8_t *srm;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 5cff56bb8f56..6fdc2027c2b4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -38,6 +38,8 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_irq.h"
#include "amdgpu_dm_mst_types.h"
+#include "dpcd_defs.h"
+#include "dc/inc/core_types.h"
#include "dm_helpers.h"
#include "ddc_service_types.h"
@@ -195,18 +197,18 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
* that blocks before commit guaranteeing that the state
* is not gonna be swapped while still in use in commit tail */
- if (!aconnector || !aconnector->mst_port)
+ if (!aconnector || !aconnector->mst_root)
return false;
- mst_mgr = &aconnector->mst_port->mst_mgr;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
/* It's OK for this to fail */
- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
+ payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
if (enable)
drm_dp_add_payload_part1(mst_mgr, mst_state, payload);
else
- drm_dp_remove_payload(mst_mgr, mst_state, payload);
+ drm_dp_remove_payload(mst_mgr, mst_state, payload, payload);
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
@@ -247,10 +249,10 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->mst_port)
+ if (!aconnector || !aconnector->mst_root)
return ACT_FAILED;
- mst_mgr = &aconnector->mst_port->mst_mgr;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
if (!mst_mgr->mst_state)
return ACT_FAILED;
@@ -274,22 +276,27 @@ bool dm_helpers_dp_mst_send_payload_allocation(
struct drm_dp_mst_atomic_payload *payload;
enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
+ int ret = 0;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->mst_port)
+ if (!aconnector || !aconnector->mst_root)
return false;
- mst_mgr = &aconnector->mst_port->mst_mgr;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
- payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port);
+ payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
+
if (!enable) {
set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
}
- if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) {
+ if (enable)
+ ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload);
+
+ if (ret) {
amdgpu_dm_set_mst_status(&aconnector->mst_status,
set_flag, false);
} else {
@@ -396,6 +403,7 @@ bool dm_helpers_dp_mst_start_top_mgr(
bool boot)
{
struct amdgpu_dm_connector *aconnector = link->priv;
+ int ret;
if (!aconnector) {
DRM_ERROR("Failed to find connector for link!");
@@ -411,7 +419,16 @@ bool dm_helpers_dp_mst_start_top_mgr(
DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
aconnector, aconnector->base.base.id);
- return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0) {
+ DRM_ERROR("DM_MST: Failed to set the device into MST mode!");
+ return false;
+ }
+
+ DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0],
+ aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+
+ return true;
}
bool dm_helpers_dp_mst_stop_top_mgr(
@@ -710,7 +727,7 @@ bool dm_helpers_dp_write_dsc_enable(
aconnector->dsc_aux, stream, enable_dsc);
#endif
- port = aconnector->port;
+ port = aconnector->mst_output_port;
if (enable) {
if (port->passthrough_aux) {
@@ -987,6 +1004,128 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
sizeof(new_downspread));
}
+bool dm_helpers_dp_handle_test_pattern_request(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ union link_test_pattern dpcd_test_pattern,
+ union test_misc dpcd_test_params)
+{
+ enum dp_test_pattern test_pattern;
+ enum dp_test_pattern_color_space test_pattern_color_space =
+ DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
+ enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
+ enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED;
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = NULL;
+ struct amdgpu_dm_connector *aconnector = link->priv;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream == NULL)
+ continue;
+
+ if (pipes[i].stream->link == link && !pipes[i].top_pipe &&
+ !pipes[i].prev_odm_pipe) {
+ pipe_ctx = &pipes[i];
+ break;
+ }
+ }
+
+ if (pipe_ctx == NULL)
+ return false;
+
+ switch (dpcd_test_pattern.bits.PATTERN) {
+ case LINK_TEST_PATTERN_COLOR_RAMP:
+ test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
+ break;
+ case LINK_TEST_PATTERN_VERTICAL_BARS:
+ test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
+ break; /* black and white */
+ case LINK_TEST_PATTERN_COLOR_SQUARES:
+ test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
+ TEST_DYN_RANGE_VESA ?
+ DP_TEST_PATTERN_COLOR_SQUARES :
+ DP_TEST_PATTERN_COLOR_SQUARES_CEA);
+ break;
+ default:
+ test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+ break;
+ }
+
+ if (dpcd_test_params.bits.CLR_FORMAT == 0)
+ test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
+ else
+ test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+
+ switch (dpcd_test_params.bits.BPC) {
+ case 0: // 6 bits
+ requestColorDepth = COLOR_DEPTH_666;
+ break;
+ case 1: // 8 bits
+ requestColorDepth = COLOR_DEPTH_888;
+ break;
+ case 2: // 10 bits
+ requestColorDepth = COLOR_DEPTH_101010;
+ break;
+ case 3: // 12 bits
+ requestColorDepth = COLOR_DEPTH_121212;
+ break;
+ default:
+ break;
+ }
+
+ switch (dpcd_test_params.bits.CLR_FORMAT) {
+ case 0:
+ requestPixelEncoding = PIXEL_ENCODING_RGB;
+ break;
+ case 1:
+ requestPixelEncoding = PIXEL_ENCODING_YCBCR422;
+ break;
+ case 2:
+ requestPixelEncoding = PIXEL_ENCODING_YCBCR444;
+ break;
+ default:
+ requestPixelEncoding = PIXEL_ENCODING_RGB;
+ break;
+ }
+
+ if ((requestColorDepth != COLOR_DEPTH_UNDEFINED
+ && pipe_ctx->stream->timing.display_color_depth != requestColorDepth)
+ || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED
+ && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) {
+ DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d %d\n",
+ __func__,
+ pipe_ctx->stream->timing.display_color_depth,
+ pipe_ctx->stream->timing.pixel_encoding,
+ requestColorDepth,
+ requestPixelEncoding);
+ pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
+ pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding;
+
+ dc_link_update_dsc_config(pipe_ctx);
+
+ aconnector->timing_changed = true;
+ /* store current timing */
+ if (aconnector->timing_requested)
+ *aconnector->timing_requested = pipe_ctx->stream->timing;
+ else
+ DC_LOG_ERROR("%s: timing storage failed\n", __func__);
+
+ }
+
+ dc_link_dp_set_test_pattern(
+ (struct dc_link *) link,
+ test_pattern,
+ test_pattern_color_space,
+ NULL,
+ NULL,
+ 0);
+
+ return false;
+}
+
void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
{
// TODO
@@ -1004,3 +1143,36 @@ void dm_helpers_dp_mst_update_branch_bandwidth(
// TODO
}
+static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id)
+{
+ bool ret_val = false;
+
+ switch (branch_dev_id) {
+ case DP_BRANCH_DEVICE_ID_0060AD:
+ ret_val = true;
+ break;
+ default:
+ break;
+ }
+
+ return ret_val;
+}
+
+enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link)
+{
+ struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
+ enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
+
+ switch (dpcd_caps->dongle_type) {
+ case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
+ if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true &&
+ dpcd_caps->allow_invalid_MSA_timing_param == true &&
+ dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id))
+ as_type = FREESYNC_TYPE_PCON_IN_WHITELIST;
+ break;
+ default:
+ break;
+ }
+
+ return as_type;
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index abdbd4352f6f..e25e1b2bf194 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -32,15 +32,16 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_mst_types.h"
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+#include "amdgpu_dm_hdcp.h"
+#endif
+
#include "dc.h"
#include "dm_helpers.h"
-#include "dc_link_ddc.h"
-#include "dc_link_dp.h"
#include "ddc_service_types.h"
#include "dpcd_defs.h"
-#include "i2caux_interface.h"
#include "dmub_cmd.h"
#if defined(CONFIG_DEBUG_FS)
#include "amdgpu_dm_debugfs.h"
@@ -49,7 +50,7 @@
#include "dc/dcn20/dcn20_resource.h"
bool is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream);
-
+#define PEAK_FACTOR_X1000 1006
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
@@ -132,7 +133,7 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(aconnector->edid);
drm_connector_cleanup(connector);
- drm_dp_mst_put_port_malloc(aconnector->port);
+ drm_dp_mst_put_port_malloc(aconnector->mst_output_port);
kfree(aconnector);
}
@@ -144,7 +145,7 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
int r;
r = drm_dp_mst_connector_late_register(connector,
- amdgpu_dm_connector->port);
+ amdgpu_dm_connector->mst_output_port);
if (r < 0)
return r;
@@ -160,8 +161,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector =
to_amdgpu_dm_connector(connector);
- struct drm_dp_mst_port *port = aconnector->port;
- struct amdgpu_dm_connector *root = aconnector->mst_port;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
+ struct amdgpu_dm_connector *root = aconnector->mst_root;
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_sink = aconnector->dc_sink;
@@ -176,6 +177,9 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
if (dc_link->sink_count)
dc_link_remove_remote_sink(dc_link, dc_sink);
+ DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n",
+ dc_sink, dc_link->sink_count);
+
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
@@ -211,7 +215,7 @@ bool needs_dsc_aux_workaround(struct dc_link *link)
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink *dc_sink = aconnector->dc_sink;
- struct drm_dp_mst_port *port = aconnector->port;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
u8 dsc_caps[16] = { 0 };
u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2
u8 *dsc_branch_dec_caps = NULL;
@@ -229,7 +233,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
*/
if (!aconnector->dsc_aux && !port->parent->port_parent &&
needs_dsc_aux_workaround(aconnector->dc_link))
- aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
+ aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
if (!aconnector->dsc_aux)
return false;
@@ -279,7 +283,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
if (!aconnector->edid) {
struct edid *edid;
- edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
+ edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port);
if (!edid) {
amdgpu_dm_set_mst_status(&aconnector->mst_status,
@@ -307,6 +311,9 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return 0;
}
+ DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n",
+ dc_sink, aconnector->dc_link->sink_count);
+
dc_sink->priv = aconnector;
aconnector->dc_sink = dc_sink;
}
@@ -340,10 +347,35 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
return 0;
}
+ DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n",
+ dc_sink, aconnector->dc_link->sink_count);
+
dc_sink->priv = aconnector;
/* dc_link_add_remote_sink returns a new reference */
aconnector->dc_sink = dc_sink;
+ /* when display is unplugged from mst hub, connctor will be
+ * destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (aconnector->dc_sink && connector->state) {
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+ struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index];
+
+ connector->state->hdcp_content_type =
+ hdcp_w->hdcp_content_type[connector->index];
+ connector->state->content_protection =
+ hdcp_w->content_protection[connector->index];
+ }
+#endif
+
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(
connector, aconnector->edid);
@@ -386,15 +418,15 @@ dm_dp_mst_detect(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx, bool force)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct amdgpu_dm_connector *master = aconnector->mst_port;
- struct drm_dp_mst_port *port = aconnector->port;
+ struct amdgpu_dm_connector *master = aconnector->mst_root;
+ struct drm_dp_mst_port *port = aconnector->mst_output_port;
int connection_status;
if (drm_connector_is_unregistered(connector))
return connector_status_disconnected;
connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
- aconnector->port);
+ aconnector->mst_output_port);
if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) {
uint8_t dpcd_rev;
@@ -435,6 +467,9 @@ dm_dp_mst_detect(struct drm_connector *connector,
if (aconnector->dc_link->sink_count)
dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink);
+ DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n",
+ aconnector->dc_link, aconnector->dc_link->sink_count);
+
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
@@ -451,8 +486,8 @@ static int dm_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_port->mst_mgr;
- struct drm_dp_mst_port *mst_port = aconnector->port;
+ struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr;
+ struct drm_dp_mst_port *mst_port = aconnector->mst_output_port;
return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port);
}
@@ -514,8 +549,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
return NULL;
connector = &aconnector->base;
- aconnector->port = port;
- aconnector->mst_port = master;
+ aconnector->mst_output_port = port;
+ aconnector->mst_root = master;
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_PROBE, true);
@@ -916,7 +951,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (!aconnector)
continue;
- if (!aconnector->port)
+ if (!aconnector->mst_output_port)
continue;
stream->timing.flags.DSC = 0;
@@ -924,7 +959,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].timing = &stream->timing;
params[count].sink = stream->sink;
params[count].aconnector = aconnector;
- params[count].port = aconnector->port;
+ params[count].port = aconnector->mst_output_port;
params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
debugfs_overwrite = true;
@@ -1133,7 +1168,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->dc_sink || !aconnector->port)
+ if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
continue;
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@@ -1148,7 +1183,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
continue;
- mst_mgr = aconnector->port->mgr;
+ mst_mgr = aconnector->mst_output_port->mgr;
ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
&link_vars_start_index);
if (ret != 0)
@@ -1194,7 +1229,7 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
- if (!aconnector || !aconnector->dc_sink || !aconnector->port)
+ if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
continue;
if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
@@ -1206,7 +1241,7 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (!is_dsc_need_re_compute(state, dc_state, stream->link))
continue;
- mst_mgr = aconnector->port->mgr;
+ mst_mgr = aconnector->mst_output_port->mgr;
ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr,
&link_vars_start_index);
if (ret != 0)
@@ -1421,8 +1456,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
* with DSC enabled.
*/
if (is_dsc_common_config_possible(stream, &bw_range) &&
- aconnector->port->passthrough_aux) {
- mst_mgr = aconnector->port->mgr;
+ aconnector->mst_output_port->passthrough_aux) {
+ mst_mgr = aconnector->mst_output_port->mgr;
mutex_lock(&mst_mgr->lock);
cur_link_settings = stream->link->verified_link_cap;
@@ -1430,7 +1465,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
&cur_link_settings
);
- down_link_bw_in_kbps = kbps_from_pbn(aconnector->port->full_pbn);
+ down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
/* pick the bottleneck */
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
@@ -1454,7 +1489,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
- if (pbn > aconnector->port->full_pbn)
+ if (pbn > aconnector->mst_output_port->full_pbn)
return DC_FAIL_BANDWIDTH_VALIDATE;
#if defined(CONFIG_DRM_AMD_DC_DCN)
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 3c50b3ff7954..28fb1f02591a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -67,7 +67,16 @@ static const uint32_t overlay_formats[] = {
DRM_FORMAT_RGBA8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
- DRM_FORMAT_RGB565
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010
+};
+
+static const uint32_t video_formats[] = {
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010
};
static const u32 cursor_formats[] = {
@@ -1616,3 +1625,14 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
return 0;
}
+bool is_video_format(uint32_t format)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(video_formats); i++)
+ if (format == video_formats[i])
+ return true;
+
+ return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
index 286981a2dd40..a4bee8528a51 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h
@@ -62,4 +62,5 @@ void fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
bool *per_pixel_alpha, bool *pre_multiplied_alpha,
bool *global_alpha, int *global_alpha_value);
+bool is_video_format(uint32_t format);
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 26291db0a3cf..d647f68fd563 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -122,6 +122,9 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
psr_config.allow_multi_disp_optimizations =
(amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT);
+ if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config))
+ return false;
+
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index b9effadfc4bb..94f156d57220 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -64,9 +64,8 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
include $(AMD_DC)
-DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
-dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o
+DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
+dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o
DISPLAY_CORE += dc_vm_helper.o
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index a1a00f432168..27af9d3c2b73 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -33,7 +33,6 @@
#include "include/gpio_service_interface.h"
#include "include/grph_object_ctrl_defs.h"
#include "include/bios_parser_interface.h"
-#include "include/i2caux_interface.h"
#include "include/logger_interface.h"
#include "command_table.h"
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 074e70a5c458..e381de2429fa 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -32,7 +32,6 @@
#include "dc_bios_types.h"
#include "include/grph_object_ctrl_defs.h"
#include "include/bios_parser_interface.h"
-#include "include/i2caux_interface.h"
#include "include/logger_interface.h"
#include "command_table2.h"
@@ -1698,14 +1697,15 @@ static enum bp_result bios_parser_enable_disp_power_gating(
static enum bp_result bios_parser_enable_lvtma_control(
struct dc_bios *dcb,
uint8_t uc_pwr_on,
- uint8_t panel_instance)
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
if (!bp->cmd_tbl.enable_lvtma_control)
return BP_RESULT_FAILURE;
- return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance);
+ return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on, panel_instance, bypass_panel_control_wait);
}
static bool bios_parser_is_accelerated_mode(
@@ -2929,7 +2929,6 @@ static enum bp_result construct_integrated_info(
struct atom_common_table_header *header;
struct atom_data_revision revision;
- struct clock_voltage_caps temp = {0, 0};
uint32_t i;
uint32_t j;
@@ -3032,14 +3031,8 @@ static enum bp_result construct_integrated_info(
for (i = 1; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
for (j = i; j > 0; --j) {
if (info->disp_clk_voltage[j].max_supported_clk <
- info->disp_clk_voltage[j-1].max_supported_clk
- ) {
- /* swap j and j - 1*/
- temp = info->disp_clk_voltage[j-1];
- info->disp_clk_voltage[j-1] =
- info->disp_clk_voltage[j];
- info->disp_clk_voltage[j] = temp;
- }
+ info->disp_clk_voltage[j-1].max_supported_clk)
+ swap(info->disp_clk_voltage[j-1], info->disp_clk_voltage[j]);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index f52f7ff7ead4..1ef9e4053bb7 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -986,7 +986,8 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id)
static enum bp_result enable_lvtma_control(
struct bios_parser *bp,
uint8_t uc_pwr_on,
- uint8_t panel_instance);
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait);
static void init_enable_lvtma_control(struct bios_parser *bp)
{
@@ -998,7 +999,8 @@ static void init_enable_lvtma_control(struct bios_parser *bp)
static void enable_lvtma_control_dmcub(
struct dc_dmub_srv *dmcub,
uint8_t uc_pwr_on,
- uint8_t panel_instance)
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait)
{
union dmub_rb_cmd cmd;
@@ -1012,6 +1014,8 @@ static void enable_lvtma_control_dmcub(
uc_pwr_on;
cmd.lvtma_control.data.panel_inst =
panel_instance;
+ cmd.lvtma_control.data.bypass_panel_control_wait =
+ bypass_panel_control_wait;
dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
@@ -1021,7 +1025,8 @@ static void enable_lvtma_control_dmcub(
static enum bp_result enable_lvtma_control(
struct bios_parser *bp,
uint8_t uc_pwr_on,
- uint8_t panel_instance)
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait)
{
enum bp_result result = BP_RESULT_FAILURE;
@@ -1029,7 +1034,8 @@ static enum bp_result enable_lvtma_control(
bp->base.ctx->dc->debug.dmub_command_table) {
enable_lvtma_control_dmcub(bp->base.ctx->dmub_srv,
uc_pwr_on,
- panel_instance);
+ panel_instance,
+ bypass_panel_control_wait);
return BP_RESULT_OK;
}
return result;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
index be060b4b87db..b6d09bf6cf72 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h
@@ -96,7 +96,8 @@ struct cmd_tbl {
struct bios_parser *bp, uint8_t id);
enum bp_result (*enable_lvtma_control)(struct bios_parser *bp,
uint8_t uc_pwr_on,
- uint8_t panel_instance);
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait);
};
void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index f276abb63bcd..69691daf4dbb 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -29,6 +29,7 @@
#include "dc_types.h"
#include "dccg.h"
#include "clk_mgr_internal.h"
+#include "link.h"
#include "dce100/dce_clk_mgr.h"
#include "dce110/dce110_clk_mgr.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index 3ce0ee0d012f..694a9d3d92ae 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -577,8 +577,7 @@ void dcn3_clk_mgr_construct(
void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
{
- if (clk_mgr->base.bw_params)
- kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr->base.bw_params);
if (clk_mgr->wm_range_table)
dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 1c0569b1dc8f..f9e2e0c3095e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -47,6 +47,7 @@
#include "dcn30/dcn30_clk_mgr.h"
#include "dc_dmub_srv.h"
+#include "link.h"
#include "logger_types.h"
#undef DC_LOGGER
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index 20a06c04e4a1..89df7244b272 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -48,7 +48,7 @@
#include "dcn31/dcn31_clk_mgr.h"
#include "dc_dmub_srv.h"
-#include "dc_link_dp.h"
+#include "link.h"
#include "dcn314_smu.h"
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
index f47cfe6b42bd..0765334f0825 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_smu.c
@@ -146,6 +146,9 @@ static int dcn314_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
param == TABLE_WATERMARKS)
DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+ else if (msg_id == VBIOSSMC_MSG_SetHardMinDcfclkByFreq ||
+ msg_id == VBIOSSMC_MSG_SetMinDeepSleepDcfclk)
+ DC_LOG_WARNING("DCFCLK_DPM is not enabled by BIOS");
else
ASSERT(0);
REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
index 07edd9777edf..a737782b2840 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
@@ -46,7 +46,7 @@
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
-#include "dc_link_dp.h"
+#include "link.h"
#define TO_CLK_MGR_DCN315(clk_mgr)\
container_of(clk_mgr, struct clk_mgr_dcn315, base)
@@ -87,6 +87,16 @@ static int dcn315_get_active_display_cnt_wa(
return display_count;
}
+static bool should_disable_otg(struct pipe_ctx *pipe)
+{
+ bool ret = true;
+
+ if (pipe->stream->link->link_enc && pipe->stream->link->link_enc->funcs->is_dig_enabled &&
+ pipe->stream->link->link_enc->funcs->is_dig_enabled(pipe->stream->link->link_enc))
+ ret = false;
+ return ret;
+}
+
static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
@@ -98,12 +108,16 @@ static void dcn315_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
- if (disable) {
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
- reset_sync_context_for_pipe(dc, context, i);
- } else
- pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+ dc_is_virtual_signal(pipe->stream->signal))) {
+
+ /* This w/a should not trigger when we have a dig active */
+ if (should_disable_otg(pipe)) {
+ if (disable) {
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ reset_sync_context_for_pipe(dc, context, i);
+ } else
+ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+ }
}
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 3edc81e2d417..93db4dbee713 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -39,7 +39,7 @@
#include "dcn316_smu.h"
#include "dm_helpers.h"
#include "dc_dmub_srv.h"
-#include "dc_link_dp.h"
+#include "link.h"
// DCN316 this is CLK1 instance
#define MAX_INSTANCE 7
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 200fcec19186..61768bf726f8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -33,7 +33,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dm_helpers.h"
-#include "dc_link_dp.h"
+#include "link.h"
#include "atomfirmware.h"
#include "smu13_driver_if.h"
@@ -255,6 +255,94 @@ static void dcn32_update_dppclk_dispclk_freq(struct clk_mgr_internal *clk_mgr, s
}
}
+static void dcn32_update_clocks_update_dentist(
+ struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context,
+ uint32_t old_dispclk_khz)
+{
+ uint32_t new_disp_divider = 0;
+ uint32_t old_disp_divider = 0;
+ uint32_t new_dispclk_wdivider = 0;
+ uint32_t old_dispclk_wdivider = 0;
+ uint32_t i;
+
+ if (old_dispclk_khz == 0 || clk_mgr->base.clks.dispclk_khz == 0)
+ return;
+
+ new_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz;
+ old_disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR
+ * clk_mgr->base.dentist_vco_freq_khz / old_dispclk_khz;
+
+ new_dispclk_wdivider = dentist_get_did_from_divider(new_disp_divider);
+ old_dispclk_wdivider = dentist_get_did_from_divider(old_disp_divider);
+
+ /* When changing divider to or from 127, some extra programming is required to prevent corruption */
+ if (old_dispclk_wdivider == 127 && new_dispclk_wdivider != 127) {
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ uint32_t fifo_level;
+ struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ int32_t N;
+ int32_t j;
+
+ if (!pipe_ctx->stream)
+ continue;
+ /* Virtual encoders don't have this function */
+ if (!stream_enc->funcs->get_fifo_cal_average_level)
+ continue;
+ fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
+ stream_enc);
+ N = fifo_level / 4;
+ dccg->funcs->set_fifo_errdet_ovr_en(
+ dccg,
+ true);
+ for (j = 0; j < N - 4; j++)
+ dccg->funcs->otg_drop_pixel(
+ dccg,
+ pipe_ctx->stream_res.tg->inst);
+ dccg->funcs->set_fifo_errdet_ovr_en(
+ dccg,
+ false);
+ }
+ } else if (new_dispclk_wdivider == 127 && old_dispclk_wdivider != 127) {
+ /* request clock with 126 divider first */
+ uint32_t temp_disp_divider = dentist_get_divider_from_did(126);
+ uint32_t temp_dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR * clk_mgr->base.dentist_vco_freq_khz) / temp_disp_divider;
+
+ if (clk_mgr->smu_present)
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(temp_dispclk_khz));
+
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct dccg *dccg = clk_mgr->base.ctx->dc->res_pool->dccg;
+ struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc;
+ uint32_t fifo_level;
+ int32_t N;
+ int32_t j;
+
+ if (!pipe_ctx->stream)
+ continue;
+ /* Virtual encoders don't have this function */
+ if (!stream_enc->funcs->get_fifo_cal_average_level)
+ continue;
+ fifo_level = stream_enc->funcs->get_fifo_cal_average_level(
+ stream_enc);
+ N = fifo_level / 4;
+ dccg->funcs->set_fifo_errdet_ovr_en(dccg, true);
+ for (j = 0; j < 12 - N; j++)
+ dccg->funcs->otg_add_pixel(dccg,
+ pipe_ctx->stream_res.tg->inst);
+ dccg->funcs->set_fifo_errdet_ovr_en(dccg, false);
+ }
+ }
+
+ /* do requested DISPCLK updates*/
+ if (clk_mgr->smu_present)
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr->base.clks.dispclk_khz));
+}
+
static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_state *context,
bool safe_to_lower)
@@ -273,6 +361,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
bool p_state_change_support;
bool fclk_p_state_change_support;
int total_plane_count;
+ int old_dispclk_khz = clk_mgr_base->clks.dispclk_khz;
if (dc->work_arounds.skip_clock_update)
return;
@@ -396,9 +485,6 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
- if (clk_mgr->smu_present)
- dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DISPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dispclk_khz));
-
update_dispclk = true;
}
@@ -418,13 +504,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
if (dpp_clock_lowered) {
/* if clock is being lowered, increase DTO before lowering refclk */
dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
- dcn20_update_clocks_update_dentist(clk_mgr, context);
+ dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz);
if (clk_mgr->smu_present)
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DPPCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dppclk_khz));
} else {
/* if clock is being raised, increase refclk before lowering DTO */
if (update_dppclk || update_dispclk)
- dcn20_update_clocks_update_dentist(clk_mgr, context);
+ dcn32_update_clocks_update_dentist(clk_mgr, context, old_dispclk_khz);
/* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures
* that we do not lower dto when it is not safe to lower. We do not need to
* compare the current and new dppclk before calling this function.
@@ -783,8 +869,7 @@ void dcn32_clk_mgr_construct(
void dcn32_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr)
{
- if (clk_mgr->base.bw_params)
- kfree(clk_mgr->base.bw_params);
+ kfree(clk_mgr->base.bw_params);
if (clk_mgr->wm_range_table)
dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 0cb8d1f934d1..1c218c526650 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -33,6 +33,7 @@
#include "resource.h"
+#include "gpio_service_interface.h"
#include "clk_mgr.h"
#include "clock_source.h"
#include "dc_bios_types.h"
@@ -53,11 +54,10 @@
#include "link_enc_cfg.h"
#include "dc_link.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "dm_helpers.h"
#include "mem_input.h"
-#include "dc_link_dp.h"
#include "dc_dmub_srv.h"
#include "dsc.h"
@@ -68,8 +68,6 @@
#include "dmub/dmub_srv.h"
-#include "i2caux_interface.h"
-
#include "dce/dmub_psr.h"
#include "dce/dmub_hw_lock_mgr.h"
@@ -382,16 +380,18 @@ static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
}
/**
- * dc_stream_adjust_vmin_vmax:
+ * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
+ * @dc: dc reference
+ * @stream: Initial dc stream state
+ * @adjust: Updated parameters for vertical_total_min and vertical_total_max
*
* Looks up the pipe context of dc_stream_state and updates the
* vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
* Rate, which is a power-saving feature that targets reducing panel
* refresh rate while the screen is static
*
- * @dc: dc reference
- * @stream: Initial dc stream state
- * @adjust: Updated parameters for vertical_total_min and vertical_total_max
+ * Return: %true if the pipe context is found and adjusted;
+ * %false if the pipe context is not found.
*/
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state *stream,
@@ -419,14 +419,17 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
}
/**
- * dc_stream_get_last_used_drr_vtotal - dc_stream_get_last_vrr_vtotal
+ * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
+ * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
*
* @dc: [in] dc reference
* @stream: [in] Initial dc stream state
- * @adjust: [in] Updated parameters for vertical_total_min and
+ * @refresh_rate: [in] new refresh_rate
*
- * Looks up the pipe context of dc_stream_state and gets the last VTOTAL used
- * by DRR (Dynamic Refresh Rate)
+ * Return: %true if the pipe context is found and there is an associated
+ * timing_generator for the DC;
+ * %false if the pipe context is not found or there is no
+ * timing_generator for the DC.
*/
bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
struct dc_stream_state *stream,
@@ -518,14 +521,15 @@ dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
}
bool
-dc_stream_forward_crc_window(struct dc *dc,
- struct rect *rect, struct dc_stream_state *stream, bool is_stop)
+dc_stream_forward_crc_window(struct dc_stream_state *stream,
+ struct rect *rect, bool is_stop)
{
struct dmcu *dmcu;
struct dc_dmub_srv *dmub_srv;
struct otg_phy_mux mux_mapping;
struct pipe_ctx *pipe;
int i;
+ struct dc *dc = stream->ctx->dc;
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -566,7 +570,10 @@ dc_stream_forward_crc_window(struct dc *dc,
* once.
*
* By default, only CRC0 is configured, and the entire frame is used to
- * calculate the crc.
+ * calculate the CRC.
+ *
+ * Return: %false if the stream is not found or CRC capture is not supported;
+ * %true if the stream has been configured.
*/
bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
struct crc_params *crc_window, bool enable, bool continuous)
@@ -635,7 +642,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
* dc_stream_configure_crc needs to be called beforehand to enable CRCs.
*
* Return:
- * false if stream is not found, or if CRCs are not enabled.
+ * %false if stream is not found, or if CRCs are not enabled.
*/
bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
@@ -862,6 +869,7 @@ static bool dc_construct_ctx(struct dc *dc,
dc_ctx->perf_trace = dc_perf_trace_create();
if (!dc_ctx->perf_trace) {
+ kfree(dc_ctx);
ASSERT_CRITICAL(false);
return false;
}
@@ -1191,7 +1199,7 @@ static void disable_vbios_mode_if_required(
pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
if (pix_clk_100hz != requested_pix_clk_100hz) {
- core_link_disable_stream(pipe);
+ link_set_dpms_off(pipe);
pipe->stream->dpms_off = false;
}
}
@@ -1299,7 +1307,7 @@ static void detect_edp_presence(struct dc *dc)
if (dc->config.edp_not_connected) {
edp_link->edp_sink_present = false;
} else {
- dc_link_detect_sink(edp_link, &type);
+ dc_link_detect_connection_type(edp_link, &type);
edp_link->edp_sink_present = (type != dc_connection_none);
}
}
@@ -1650,7 +1658,7 @@ bool dc_validate_boot_timing(const struct dc *dc,
return false;
}
- if (is_edp_ilr_optimization_required(link, crtc_timing)) {
+ if (link_is_edp_ilr_optimization_required(link, crtc_timing)) {
DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
return false;
}
@@ -1740,6 +1748,8 @@ void dc_z10_save_init(struct dc *dc)
*
* Applies given context to the hardware and copy it into current context.
* It's up to the user to release the src context afterwards.
+ *
+ * Return: an enum dc_status result code for the operation
*/
static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
{
@@ -2007,8 +2017,9 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return result == DC_OK;
}
- if (!streams_changed(dc, context->streams, context->stream_count))
+ if (!streams_changed(dc, context->streams, context->stream_count)) {
return DC_OK;
+ }
DC_LOG_DC("%s: %d streams\n",
__func__, context->stream_count);
@@ -2948,6 +2959,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vsp_infopacket)
stream->vsp_infopacket = *update->vsp_infopacket;
+ if (update->adaptive_sync_infopacket)
+ stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
+
if (update->dither_option)
stream->dither_option = *update->dither_option;
@@ -3153,12 +3167,13 @@ static void commit_planes_do_stream_update(struct dc *dc,
stream_update->vsc_infopacket ||
stream_update->vsp_infopacket ||
stream_update->hfvsif_infopacket ||
+ stream_update->adaptive_sync_infopacket ||
stream_update->vtem_infopacket) {
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ link_dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
}
if (stream_update->hdr_static_metadata &&
@@ -3194,14 +3209,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
continue;
if (stream_update->dsc_config)
- dp_update_dsc_config(pipe_ctx);
+ link_update_dsc_config(pipe_ctx);
if (stream_update->mst_bw_update) {
if (stream_update->mst_bw_update->is_increase)
- dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
- else
- dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
- }
+ link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ else
+ link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ }
if (stream_update->pending_test_pattern) {
dc_link_dp_set_test_pattern(stream->link,
@@ -3214,7 +3229,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
- core_link_disable_stream(pipe_ctx);
+ link_set_dpms_off(pipe_ctx);
/* for dpms, keep acquired resources*/
if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
@@ -3224,7 +3239,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
} else {
if (get_seamless_boot_stream_count(context) == 0)
dc->hwss.prepare_bandwidth(dc, dc->current_state);
- core_link_enable_stream(dc->current_state, pipe_ctx);
+ link_set_dpms_on(dc->current_state, pipe_ctx);
}
}
@@ -3325,6 +3340,7 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *top_pipe_to_program = NULL;
bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
bool subvp_prev_use = false;
+ bool subvp_curr_use = false;
// Once we apply the new subvp context to hardware it won't be in the
// dc->current_state anymore, so we have to cache it before we apply
@@ -3334,6 +3350,21 @@ static void commit_planes_for_stream(struct dc *dc,
dc_z10_restore(dc);
+ if (update_type == UPDATE_TYPE_FULL) {
+ /* wait for all double-buffer activity to clear on all pipes */
+ int pipe_idx;
+
+ for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[pipe_idx];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+ pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+ }
+ }
+
if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
/* Optimize seamless boot flag keeps clocks and watermarks high until
* first flip. After first flip, optimization is required to lower
@@ -3381,6 +3412,15 @@ static void commit_planes_for_stream(struct dc *dc,
break;
}
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ subvp_curr_use = true;
+ break;
+ }
+ }
+
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
struct pipe_ctx *odm_pipe;
@@ -3652,42 +3692,22 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program->stream_res.tg);
}
- /* For phantom pipe OTG enable, it has to be done after any previous pipe
- * that was in use has already been programmed at gotten its double buffer
- * update for "disable".
- */
- if (update_type != UPDATE_TYPE_FAST) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
-
- /* If an active, non-phantom pipe is being transitioned into a phantom
- * pipe, wait for the double buffer update to complete first before we do
- * ANY phantom pipe programming.
- */
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
- old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
- old_pipe->stream_res.tg->funcs->wait_for_state(
- old_pipe->stream_res.tg,
- CRTC_STATE_VBLANK);
- old_pipe->stream_res.tg->funcs->wait_for_state(
- old_pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- }
+ if (subvp_curr_use) {
+ /* If enabling subvp or transitioning from subvp->subvp, enable the
+ * phantom streams before we program front end for the phantom pipes.
+ */
+ if (update_type != UPDATE_TYPE_FAST) {
+ if (dc->hwss.enable_phantom_streams)
+ dc->hwss.enable_phantom_streams(dc, context);
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+ }
- if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
- subvp_prev_use) {
- // If old context or new context has phantom pipes, apply
- // the phantom timings now. We can't change the phantom
- // pipe configuration safely without driver acquiring
- // the DMCUB lock first.
- dc->hwss.apply_ctx_to_hw(dc, context);
- break;
- }
- }
+ if (subvp_prev_use && !subvp_curr_use) {
+ /* If disabling subvp, disable phantom streams after front end
+ * programming has completed (we turn on phantom OTG in order
+ * to complete the plane disable for phantom pipes).
+ */
+ dc->hwss.apply_ctx_to_hw(dc, context);
}
if (update_type != UPDATE_TYPE_FAST)
@@ -4285,7 +4305,7 @@ void dc_resume(struct dc *dc)
uint32_t i;
for (i = 0; i < dc->link_count; i++)
- core_link_resume(dc->links[i]);
+ link_resume(dc->links[i]);
}
bool dc_is_dmcu_initialized(struct dc *dc)
@@ -4704,7 +4724,7 @@ bool dc_enable_dmub_notifications(struct dc *dc)
/**
* dc_enable_dmub_outbox - Enables DMUB unsolicited notification
*
- * dc: [in] dc structure
+ * @dc: [in] dc structure
*
* Enables DMUB unsolicited notifications to x86 via outbox.
*/
@@ -4905,8 +4925,8 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
/**
* dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
*
- * @dc [in]: dc structure
- * @hpd_int_enable [in]: 1 for hpd int enable, 0 to disable
+ * @dc: [in] dc structure
+ * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
*
* Submits dpia hpd int enable command to dmub via inbox message
*/
@@ -4987,7 +5007,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
}
/**
- * dc_extended_blank_supported 0 Decide whether extended blank is supported
+ * dc_extended_blank_supported - Decide whether extended blank is supported
*
* @dc: [in] Current DC state
*
@@ -4996,7 +5016,7 @@ void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bo
* ability to enter z9/z10.
*
* Return:
- * Indicate whether extended blank is supported (true or false)
+ * Indicate whether extended blank is supported (%true or %false)
*/
bool dc_extended_blank_supported(struct dc *dc)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index c88f044666fe..c26e7258a91c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -23,4949 +23,5 @@
*
*/
-#include <linux/slab.h>
-
-#include "dm_services.h"
-#include "atomfirmware.h"
-#include "dm_helpers.h"
-#include "dc.h"
-#include "grph_object_id.h"
-#include "gpio_service_interface.h"
-#include "core_status.h"
-#include "dc_link_dp.h"
-#include "dc_link_dpia.h"
-#include "dc_link_ddc.h"
-#include "link_hwss.h"
-#include "opp.h"
-
-#include "link_encoder.h"
-#include "hw_sequencer.h"
-#include "resource.h"
-#include "abm.h"
-#include "fixed31_32.h"
-#include "dpcd_defs.h"
-#include "dmcu.h"
-#include "hw/clk_mgr.h"
-#include "dce/dmub_psr.h"
-#include "dmub/dmub_srv.h"
-#include "inc/hw/panel_cntl.h"
-#include "inc/link_enc_cfg.h"
-#include "inc/link_dpcd.h"
-#include "link/link_dp_trace.h"
-
-#include "dc/dcn30/dcn30_vpg.h"
-
-#define DC_LOGGER_INIT(logger)
-
-#define LINK_INFO(...) \
- DC_LOG_HW_HOTPLUG( \
- __VA_ARGS__)
-
-#define RETIMER_REDRIVER_INFO(...) \
- DC_LOG_RETIMER_REDRIVER( \
- __VA_ARGS__)
-
-/*******************************************************************************
- * Private functions
- ******************************************************************************/
-static void dc_link_destruct(struct dc_link *link)
-{
- int i;
-
- if (link->hpd_gpio) {
- dal_gpio_destroy_irq(&link->hpd_gpio);
- link->hpd_gpio = NULL;
- }
-
- if (link->ddc)
- dal_ddc_service_destroy(&link->ddc);
-
- if (link->panel_cntl)
- link->panel_cntl->funcs->destroy(&link->panel_cntl);
-
- if (link->link_enc) {
- /* Update link encoder resource tracking variables. These are used for
- * the dynamic assignment of link encoders to streams. Virtual links
- * are not assigned encoder resources on creation.
- */
- if (link->link_id.id != CONNECTOR_ID_VIRTUAL) {
- link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL;
- link->dc->res_pool->dig_link_enc_count--;
- }
- link->link_enc->funcs->destroy(&link->link_enc);
- }
-
- if (link->local_sink)
- dc_sink_release(link->local_sink);
-
- for (i = 0; i < link->sink_count; ++i)
- dc_sink_release(link->remote_sinks[i]);
-}
-
-struct gpio *get_hpd_gpio(struct dc_bios *dcb,
- struct graphics_object_id link_id,
- struct gpio_service *gpio_service)
-{
- enum bp_result bp_result;
- struct graphics_object_hpd_info hpd_info;
- struct gpio_pin_info pin_info;
-
- if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK)
- return NULL;
-
- bp_result = dcb->funcs->get_gpio_pin_info(dcb,
- hpd_info.hpd_int_gpio_uid, &pin_info);
-
- if (bp_result != BP_RESULT_OK) {
- ASSERT(bp_result == BP_RESULT_NORECORD);
- return NULL;
- }
-
- return dal_gpio_service_create_irq(gpio_service,
- pin_info.offset,
- pin_info.mask);
-}
-
-/*
- * Function: program_hpd_filter
- *
- * @brief
- * Programs HPD filter on associated HPD line
- *
- * @param [in] delay_on_connect_in_ms: Connect filter timeout
- * @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout
- *
- * @return
- * true on success, false otherwise
- */
-static bool program_hpd_filter(const struct dc_link *link)
-{
- bool result = false;
- struct gpio *hpd;
- int delay_on_connect_in_ms = 0;
- int delay_on_disconnect_in_ms = 0;
-
- if (link->is_hpd_filter_disabled)
- return false;
- /* Verify feature is supported */
- switch (link->connector_signal) {
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- case SIGNAL_TYPE_HDMI_TYPE_A:
- /* Program hpd filter */
- delay_on_connect_in_ms = 500;
- delay_on_disconnect_in_ms = 100;
- break;
- case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- /* Program hpd filter to allow DP signal to settle */
- /* 500: not able to detect MST <-> SST switch as HPD is low for
- * only 100ms on DELL U2413
- * 0: some passive dongle still show aux mode instead of i2c
- * 20-50: not enough to hide bouncing HPD with passive dongle.
- * also see intermittent i2c read issues.
- */
- delay_on_connect_in_ms = 80;
- delay_on_disconnect_in_ms = 0;
- break;
- case SIGNAL_TYPE_LVDS:
- case SIGNAL_TYPE_EDP:
- default:
- /* Don't program hpd filter */
- return false;
- }
-
- /* Obtain HPD handle */
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
- link->ctx->gpio_service);
-
- if (!hpd)
- return result;
-
- /* Setup HPD filtering */
- if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
- struct gpio_hpd_config config;
-
- config.delay_on_connect = delay_on_connect_in_ms;
- config.delay_on_disconnect = delay_on_disconnect_in_ms;
-
- dal_irq_setup_hpd_filter(hpd, &config);
-
- dal_gpio_close(hpd);
-
- result = true;
- } else {
- ASSERT_CRITICAL(false);
- }
-
- /* Release HPD handle */
- dal_gpio_destroy_irq(&hpd);
-
- return result;
-}
-
-bool dc_link_wait_for_t12(struct dc_link *link)
-{
- if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
- link->dc->hwss.edp_wait_for_T12(link);
-
- return true;
- }
-
- return false;
-}
-
-/**
- * dc_link_detect_sink() - Determine if there is a sink connected
- *
- * @link: pointer to the dc link
- * @type: Returned connection type
- * Does not detect downstream devices, such as MST sinks
- * or display connected through active dongles
- */
-bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
-{
- uint32_t is_hpd_high = 0;
- struct gpio *hpd_pin;
-
- if (link->connector_signal == SIGNAL_TYPE_LVDS) {
- *type = dc_connection_single;
- return true;
- }
-
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- /*in case it is not on*/
- if (!link->dc->config.edp_no_power_sequencing)
- link->dc->hwss.edp_power_control(link, true);
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
- }
-
- /* Link may not have physical HPD pin. */
- if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
- if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link))
- *type = dc_connection_none;
- else
- *type = dc_connection_single;
-
- return true;
- }
-
- /* todo: may need to lock gpio access */
- hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
- link->ctx->gpio_service);
- if (!hpd_pin)
- goto hpd_gpio_failure;
-
- dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
- dal_gpio_get_value(hpd_pin, &is_hpd_high);
- dal_gpio_close(hpd_pin);
- dal_gpio_destroy_irq(&hpd_pin);
-
- if (is_hpd_high) {
- *type = dc_connection_single;
- /* TODO: need to do the actual detection */
- } else {
- *type = dc_connection_none;
- }
-
- return true;
-
-hpd_gpio_failure:
- return false;
-}
-
-static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal)
-{
- enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
-
- switch (sink_signal) {
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- case SIGNAL_TYPE_HDMI_TYPE_A:
- case SIGNAL_TYPE_LVDS:
- case SIGNAL_TYPE_RGB:
- transaction_type = DDC_TRANSACTION_TYPE_I2C;
- break;
-
- case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_EDP:
- transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
- break;
-
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- /* MST does not use I2COverAux, but there is the
- * SPECIAL use case for "immediate dwnstrm device
- * access" (EPR#370830).
- */
- transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
- break;
-
- default:
- break;
- }
-
- return transaction_type;
-}
-
-static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
- struct graphics_object_id downstream)
-{
- if (downstream.type == OBJECT_TYPE_CONNECTOR) {
- switch (downstream.id) {
- case CONNECTOR_ID_SINGLE_LINK_DVII:
- switch (encoder.id) {
- case ENCODER_ID_INTERNAL_DAC1:
- case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_ID_INTERNAL_DAC2:
- case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
- return SIGNAL_TYPE_RGB;
- default:
- return SIGNAL_TYPE_DVI_SINGLE_LINK;
- }
- break;
- case CONNECTOR_ID_DUAL_LINK_DVII:
- {
- switch (encoder.id) {
- case ENCODER_ID_INTERNAL_DAC1:
- case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
- case ENCODER_ID_INTERNAL_DAC2:
- case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
- return SIGNAL_TYPE_RGB;
- default:
- return SIGNAL_TYPE_DVI_DUAL_LINK;
- }
- }
- break;
- case CONNECTOR_ID_SINGLE_LINK_DVID:
- return SIGNAL_TYPE_DVI_SINGLE_LINK;
- case CONNECTOR_ID_DUAL_LINK_DVID:
- return SIGNAL_TYPE_DVI_DUAL_LINK;
- case CONNECTOR_ID_VGA:
- return SIGNAL_TYPE_RGB;
- case CONNECTOR_ID_HDMI_TYPE_A:
- return SIGNAL_TYPE_HDMI_TYPE_A;
- case CONNECTOR_ID_LVDS:
- return SIGNAL_TYPE_LVDS;
- case CONNECTOR_ID_DISPLAY_PORT:
- case CONNECTOR_ID_USBC:
- return SIGNAL_TYPE_DISPLAY_PORT;
- case CONNECTOR_ID_EDP:
- return SIGNAL_TYPE_EDP;
- default:
- return SIGNAL_TYPE_NONE;
- }
- } else if (downstream.type == OBJECT_TYPE_ENCODER) {
- switch (downstream.id) {
- case ENCODER_ID_EXTERNAL_NUTMEG:
- case ENCODER_ID_EXTERNAL_TRAVIS:
- return SIGNAL_TYPE_DISPLAY_PORT;
- default:
- return SIGNAL_TYPE_NONE;
- }
- }
-
- return SIGNAL_TYPE_NONE;
-}
-
-/*
- * dc_link_is_dp_sink_present() - Check if there is a native DP
- * or passive DP-HDMI dongle connected
- */
-bool dc_link_is_dp_sink_present(struct dc_link *link)
-{
- enum gpio_result gpio_result;
- uint32_t clock_pin = 0;
- uint8_t retry = 0;
- struct ddc *ddc;
-
- enum connector_id connector_id =
- dal_graphics_object_id_get_connector_id(link->link_id);
-
- bool present =
- ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
- (connector_id == CONNECTOR_ID_EDP) ||
- (connector_id == CONNECTOR_ID_USBC));
-
- ddc = dal_ddc_service_get_ddc_pin(link->ddc);
-
- if (!ddc) {
- BREAK_TO_DEBUGGER();
- return present;
- }
-
- /* Open GPIO and set it to I2C mode */
- /* Note: this GpioMode_Input will be converted
- * to GpioConfigType_I2cAuxDualMode in GPIO component,
- * which indicates we need additional delay
- */
-
- if (dal_ddc_open(ddc, GPIO_MODE_INPUT,
- GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) {
- dal_ddc_close(ddc);
-
- return present;
- }
-
- /*
- * Read GPIO: DP sink is present if both clock and data pins are zero
- *
- * [W/A] plug-unplug DP cable, sometimes customer board has
- * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
- * then monitor can't br light up. Add retry 3 times
- * But in real passive dongle, it need additional 3ms to detect
- */
- do {
- gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
- ASSERT(gpio_result == GPIO_RESULT_OK);
- if (clock_pin)
- udelay(1000);
- else
- break;
- } while (retry++ < 3);
-
- present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
-
- dal_ddc_close(ddc);
-
- return present;
-}
-
-/*
- * @brief
- * Detect output sink type
- */
-static enum signal_type link_detect_sink(struct dc_link *link,
- enum dc_detect_reason reason)
-{
- enum signal_type result;
- struct graphics_object_id enc_id;
-
- if (link->is_dig_mapping_flexible)
- enc_id = (struct graphics_object_id){.id = ENCODER_ID_UNKNOWN};
- else
- enc_id = link->link_enc->id;
- result = get_basic_signal_type(enc_id, link->link_id);
-
- /* Use basic signal type for link without physical connector. */
- if (link->ep_type != DISPLAY_ENDPOINT_PHY)
- return result;
-
- /* Internal digital encoder will detect only dongles
- * that require digital signal
- */
-
- /* Detection mechanism is different
- * for different native connectors.
- * LVDS connector supports only LVDS signal;
- * PCIE is a bus slot, the actual connector needs to be detected first;
- * eDP connector supports only eDP signal;
- * HDMI should check straps for audio
- */
-
- /* PCIE detects the actual connector on add-on board */
- if (link->link_id.id == CONNECTOR_ID_PCIE) {
- /* ZAZTODO implement PCIE add-on card detection */
- }
-
- switch (link->link_id.id) {
- case CONNECTOR_ID_HDMI_TYPE_A: {
- /* check audio support:
- * if native HDMI is not supported, switch to DVI
- */
- struct audio_support *aud_support =
- &link->dc->res_pool->audio_support;
-
- if (!aud_support->hdmi_audio_native)
- if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
- result = SIGNAL_TYPE_DVI_SINGLE_LINK;
- }
- break;
- case CONNECTOR_ID_DISPLAY_PORT:
- case CONNECTOR_ID_USBC: {
- /* DP HPD short pulse. Passive DP dongle will not
- * have short pulse
- */
- if (reason != DETECT_REASON_HPDRX) {
- /* Check whether DP signal detected: if not -
- * we assume signal is DVI; it could be corrected
- * to HDMI after dongle detection
- */
- if (!dm_helpers_is_dp_sink_present(link))
- result = SIGNAL_TYPE_DVI_SINGLE_LINK;
- }
- }
- break;
- default:
- break;
- }
-
- return result;
-}
-
-static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type,
- struct audio_support *audio_support)
-{
- enum signal_type signal = SIGNAL_TYPE_NONE;
-
- switch (dongle_type) {
- case DISPLAY_DONGLE_DP_HDMI_DONGLE:
- if (audio_support->hdmi_audio_on_dongle)
- signal = SIGNAL_TYPE_HDMI_TYPE_A;
- else
- signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- case DISPLAY_DONGLE_DP_DVI_DONGLE:
- signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
- if (audio_support->hdmi_audio_native)
- signal = SIGNAL_TYPE_HDMI_TYPE_A;
- else
- signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- default:
- signal = SIGNAL_TYPE_NONE;
- break;
- }
-
- return signal;
-}
-
-static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc,
- struct display_sink_capability *sink_cap,
- struct audio_support *audio_support)
-{
- dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap);
-
- return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type,
- audio_support);
-}
-
-static void link_disconnect_sink(struct dc_link *link)
-{
- if (link->local_sink) {
- dc_sink_release(link->local_sink);
- link->local_sink = NULL;
- }
-
- link->dpcd_sink_count = 0;
- //link->dpcd_caps.dpcd_rev.raw = 0;
-}
-
-static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link)
-{
- dc_sink_release(link->local_sink);
- link->local_sink = prev_sink;
-}
-
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal)
-{
- bool ret = false;
-
- switch (signal) {
- case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
- break;
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- case SIGNAL_TYPE_HDMI_TYPE_A:
- /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable,
- * we can poll for bksv but some displays have an issue with this. Since its so rare
- * for a display to not be 1.4 capable, this assumtion is ok
- */
- ret = true;
- break;
- default:
- break;
- }
- return ret;
-}
-
-bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal)
-{
- bool ret = false;
-
- switch (signal) {
- case SIGNAL_TYPE_DISPLAY_PORT:
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE &&
- link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable &&
- (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0;
- break;
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- case SIGNAL_TYPE_HDMI_TYPE_A:
- ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0;
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
-{
- struct hdcp_protection_message msg22;
- struct hdcp_protection_message msg14;
-
- memset(&msg22, 0, sizeof(struct hdcp_protection_message));
- memset(&msg14, 0, sizeof(struct hdcp_protection_message));
- memset(link->hdcp_caps.rx_caps.raw, 0,
- sizeof(link->hdcp_caps.rx_caps.raw));
-
- if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link->ddc->transaction_type ==
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX) ||
- link->connector_signal == SIGNAL_TYPE_EDP) {
- msg22.data = link->hdcp_caps.rx_caps.raw;
- msg22.length = sizeof(link->hdcp_caps.rx_caps.raw);
- msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS;
- } else {
- msg22.data = &link->hdcp_caps.rx_caps.fields.version;
- msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version);
- msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION;
- }
- msg22.version = HDCP_VERSION_22;
- msg22.link = HDCP_LINK_PRIMARY;
- msg22.max_retries = 5;
- dc_process_hdcp_msg(signal, link, &msg22);
-
- if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- msg14.data = &link->hdcp_caps.bcaps.raw;
- msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
- msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
- msg14.version = HDCP_VERSION_14;
- msg14.link = HDCP_LINK_PRIMARY;
- msg14.max_retries = 5;
-
- dc_process_hdcp_msg(signal, link, &msg14);
- }
-
-}
-#endif
-
-static void read_current_link_settings_on_detect(struct dc_link *link)
-{
- union lane_count_set lane_count_set = {0};
- uint8_t link_bw_set;
- uint8_t link_rate_set;
- uint32_t read_dpcd_retry_cnt = 10;
- enum dc_status status = DC_ERROR_UNEXPECTED;
- int i;
- union max_down_spread max_down_spread = {0};
-
- // Read DPCD 00101h to find out the number of lanes currently set
- for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(link,
- DP_LANE_COUNT_SET,
- &lane_count_set.raw,
- sizeof(lane_count_set));
- /* First DPCD read after VDD ON can fail if the particular board
- * does not have HPD pin wired correctly. So if DPCD read fails,
- * which it should never happen, retry a few times. Target worst
- * case scenario of 80 ms.
- */
- if (status == DC_OK) {
- link->cur_link_settings.lane_count =
- lane_count_set.bits.LANE_COUNT_SET;
- break;
- }
-
- msleep(8);
- }
-
- // Read DPCD 00100h to find if standard link rates are set
- core_link_read_dpcd(link, DP_LINK_BW_SET,
- &link_bw_set, sizeof(link_bw_set));
-
- if (link_bw_set == 0) {
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- /* If standard link rates are not being used,
- * Read DPCD 00115h to find the edp link rate set used
- */
- core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
-
- // edp_supported_link_rates_count = 0 for DP
- if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- link->cur_link_settings.link_rate =
- link->dpcd_caps.edp_supported_link_rates[link_rate_set];
- link->cur_link_settings.link_rate_set = link_rate_set;
- link->cur_link_settings.use_link_rate_set = true;
- }
- } else {
- // Link Rate not found. Seamless boot may not work.
- ASSERT(false);
- }
- } else {
- link->cur_link_settings.link_rate = link_bw_set;
- link->cur_link_settings.use_link_rate_set = false;
- }
- // Read DPCD 00003h to find the max down spread.
- core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
- &max_down_spread.raw, sizeof(max_down_spread));
- link->cur_link_settings.link_spread =
- max_down_spread.bits.MAX_DOWN_SPREAD ?
- LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
-}
-
-static bool detect_dp(struct dc_link *link,
- struct display_sink_capability *sink_caps,
- enum dc_detect_reason reason)
-{
- struct audio_support *audio_support = &link->dc->res_pool->audio_support;
-
- sink_caps->signal = link_detect_sink(link, reason);
- sink_caps->transaction_type =
- get_ddc_transaction_type(sink_caps->signal);
-
- if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
- sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
- if (!detect_dp_sink_caps(link))
- return false;
-
- if (is_dp_branch_device(link))
- /* DP SST branch */
- link->type = dc_connection_sst_branch;
- } else {
- /* DP passive dongles */
- sink_caps->signal = dp_passive_dongle_detection(link->ddc,
- sink_caps,
- audio_support);
- link->dpcd_caps.dongle_type = sink_caps->dongle_type;
- link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one;
- link->dpcd_caps.dpcd_rev.raw = 0;
- }
-
- return true;
-}
-
-static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
-{
- if (old_edid->length != new_edid->length)
- return false;
-
- if (new_edid->length == 0)
- return false;
-
- return (memcmp(old_edid->raw_edid,
- new_edid->raw_edid, new_edid->length) == 0);
-}
-
-static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
-{
- /**
- * something is terribly wrong if time out is > 200ms. (5Hz)
- * 500 microseconds * 400 tries us 200 ms
- **/
- unsigned int sleep_time_in_microseconds = 500;
- unsigned int tries_allowed = 400;
- bool is_in_alt_mode;
- unsigned long long enter_timestamp;
- unsigned long long finish_timestamp;
- unsigned long long time_taken_in_ns;
- int tries_taken;
-
- DC_LOGGER_INIT(link->ctx->logger);
-
- if (!link->link_enc->funcs->is_in_alt_mode)
- return true;
-
- is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
- DC_LOG_WARNING("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
-
- if (is_in_alt_mode)
- return true;
-
- enter_timestamp = dm_get_timestamp(link->ctx);
-
- for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) {
- udelay(sleep_time_in_microseconds);
- /* ask the link if alt mode is enabled, if so return ok */
- if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) {
- finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns =
- dm_get_elapse_time_in_ns(link->ctx,
- finish_timestamp,
- enter_timestamp);
- DC_LOG_WARNING("Alt mode entered finished after %llu ms\n",
- div_u64(time_taken_in_ns, 1000000));
- return true;
- }
- }
- finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp,
- enter_timestamp);
- DC_LOG_WARNING("Alt mode has timed out after %llu ms\n",
- div_u64(time_taken_in_ns, 1000000));
- return false;
-}
-
-static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link)
-{
- /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
- * reports DSC support.
- */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
- link->type == dc_connection_mst_branch &&
- link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
- link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 &&
- link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
- !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
- link->wa_flags.dpia_mst_dsc_always_on = true;
-}
-
-static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link)
-{
- /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- link->wa_flags.dpia_mst_dsc_always_on = false;
-}
-
-static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason)
-{
- DC_LOGGER_INIT(link->ctx->logger);
-
- LINK_INFO("link=%d, mst branch is now Connected\n",
- link->link_index);
-
- link->type = dc_connection_mst_branch;
- apply_dpia_mst_dsc_always_on_wa(link);
-
- dm_helpers_dp_update_branch_info(link->ctx, link);
- if (dm_helpers_dp_mst_start_top_mgr(link->ctx,
- link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) {
- link_disconnect_sink(link);
- } else {
- link->type = dc_connection_sst_branch;
- }
-
- return link->type == dc_connection_mst_branch;
-}
-
-bool reset_cur_dp_mst_topology(struct dc_link *link)
-{
- DC_LOGGER_INIT(link->ctx->logger);
-
- LINK_INFO("link=%d, mst branch is now Disconnected\n",
- link->link_index);
-
- revert_dpia_mst_dsc_always_on_wa(link);
- return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
-}
-
-static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc,
- enum dc_detect_reason reason)
-{
- int i;
- bool can_apply_seamless_boot = false;
-
- for (i = 0; i < dc->current_state->stream_count; i++) {
- if (dc->current_state->streams[i]->apply_seamless_boot_optimization) {
- can_apply_seamless_boot = true;
- break;
- }
- }
-
- return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT;
-}
-
-static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc)
-{
- dc_z10_restore(dc);
- clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
-}
-
-static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc)
-{
- clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
-}
-
-static void set_all_streams_dpms_off_for_link(struct dc_link *link)
-{
- int i;
- struct pipe_ctx *pipe_ctx;
- struct dc_stream_update stream_update;
- bool dpms_off = true;
- struct link_resource link_res = {0};
-
- memset(&stream_update, 0, sizeof(stream_update));
- stream_update.dpms_off = &dpms_off;
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
- stream_update.stream = pipe_ctx->stream;
- dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
- pipe_ctx->stream, &stream_update,
- link->ctx->dc->current_state);
- }
- }
-
- /* link can be also enabled by vbios. In this case it is not recorded
- * in pipe_ctx. Disable link phy here to make sure it is completely off
- */
- dp_disable_link_phy(link, &link_res, link->connector_signal);
-}
-
-static void verify_link_capability_destructive(struct dc_link *link,
- struct dc_sink *sink,
- enum dc_detect_reason reason)
-{
- bool should_prepare_phy_clocks =
- should_prepare_phy_clocks_for_link_verification(link->dc, reason);
-
- if (should_prepare_phy_clocks)
- prepare_phy_clocks_for_destructive_link_verification(link->dc);
-
- if (dc_is_dp_signal(link->local_sink->sink_signal)) {
- struct dc_link_settings known_limit_link_setting =
- dp_get_max_link_cap(link);
- set_all_streams_dpms_off_for_link(link);
- dp_verify_link_cap_with_retries(
- link, &known_limit_link_setting,
- LINK_TRAINING_MAX_VERIFY_RETRY);
- } else {
- ASSERT(0);
- }
-
- if (should_prepare_phy_clocks)
- restore_phy_clocks_for_destructive_link_verification(link->dc);
-}
-
-static void verify_link_capability_non_destructive(struct dc_link *link)
-{
- if (dc_is_dp_signal(link->local_sink->sink_signal)) {
- if (dc_is_embedded_signal(link->local_sink->sink_signal) ||
- link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- /* TODO - should we check link encoder's max link caps here?
- * How do we know which link encoder to check from?
- */
- link->verified_link_cap = link->reported_link_cap;
- else
- link->verified_link_cap = dp_get_max_link_cap(link);
- }
-}
-
-static bool should_verify_link_capability_destructively(struct dc_link *link,
- enum dc_detect_reason reason)
-{
- bool destrictive = false;
- struct dc_link_settings max_link_cap;
- bool is_link_enc_unavailable = link->link_enc &&
- link->dc->res_pool->funcs->link_encs_assign &&
- !link_enc_cfg_is_link_enc_avail(
- link->ctx->dc,
- link->link_enc->preferred_engine,
- link);
-
- if (dc_is_dp_signal(link->local_sink->sink_signal)) {
- max_link_cap = dp_get_max_link_cap(link);
- destrictive = true;
-
- if (link->dc->debug.skip_detection_link_training ||
- dc_is_embedded_signal(link->local_sink->sink_signal) ||
- link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- destrictive = false;
- } else if (dp_get_link_encoding_format(&max_link_cap) ==
- DP_8b_10b_ENCODING) {
- if (link->dpcd_caps.is_mst_capable ||
- is_link_enc_unavailable) {
- destrictive = false;
- }
- }
- }
-
- return destrictive;
-}
-
-static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
- enum dc_detect_reason reason)
-{
- if (should_verify_link_capability_destructively(link, reason))
- verify_link_capability_destructive(link, sink, reason);
- else
- verify_link_capability_non_destructive(link);
-}
-
-
-/**
- * detect_link_and_local_sink() - Detect if a sink is attached to a given link
- *
- * link->local_sink is created or destroyed as needed.
- *
- * This does not create remote sinks.
- */
-static bool detect_link_and_local_sink(struct dc_link *link,
- enum dc_detect_reason reason)
-{
- struct dc_sink_init_data sink_init_data = { 0 };
- struct display_sink_capability sink_caps = { 0 };
- uint32_t i;
- bool converter_disable_audio = false;
- struct audio_support *aud_support = &link->dc->res_pool->audio_support;
- bool same_edid = false;
- enum dc_edid_status edid_status;
- struct dc_context *dc_ctx = link->ctx;
- struct dc *dc = dc_ctx->dc;
- struct dc_sink *sink = NULL;
- struct dc_sink *prev_sink = NULL;
- struct dpcd_caps prev_dpcd_caps;
- enum dc_connection_type new_connection_type = dc_connection_none;
- const uint32_t post_oui_delay = 30; // 30ms
-
- DC_LOGGER_INIT(link->ctx->logger);
-
- if (dc_is_virtual_signal(link->connector_signal))
- return false;
-
- if (((link->connector_signal == SIGNAL_TYPE_LVDS ||
- link->connector_signal == SIGNAL_TYPE_EDP) &&
- (!link->dc->config.allow_edp_hotplug_detection)) &&
- link->local_sink) {
- // need to re-write OUI and brightness in resume case
- if (link->connector_signal == SIGNAL_TYPE_EDP &&
- (link->dpcd_sink_ext_caps.bits.oled == 1)) {
- dpcd_set_source_specific_data(link);
- msleep(post_oui_delay);
- dc_link_set_default_brightness_aux(link);
- //TODO: use cached
- }
-
- return true;
- }
-
- if (!dc_link_detect_sink(link, &new_connection_type)) {
- BREAK_TO_DEBUGGER();
- return false;
- }
-
- prev_sink = link->local_sink;
- if (prev_sink) {
- dc_sink_retain(prev_sink);
- memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
- }
-
- link_disconnect_sink(link);
- if (new_connection_type != dc_connection_none) {
- link->type = new_connection_type;
- link->link_state_valid = false;
-
- /* From Disconnected-to-Connected. */
- switch (link->connector_signal) {
- case SIGNAL_TYPE_HDMI_TYPE_A: {
- sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
- if (aud_support->hdmi_audio_native)
- sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
- else
- sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- }
-
- case SIGNAL_TYPE_DVI_SINGLE_LINK: {
- sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
- sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- }
-
- case SIGNAL_TYPE_DVI_DUAL_LINK: {
- sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
- sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
- break;
- }
-
- case SIGNAL_TYPE_LVDS: {
- sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
- sink_caps.signal = SIGNAL_TYPE_LVDS;
- break;
- }
-
- case SIGNAL_TYPE_EDP: {
- read_current_link_settings_on_detect(link);
-
- detect_edp_sink_caps(link);
- read_current_link_settings_on_detect(link);
-
- /* Disable power sequence on MIPI panel + converter
- */
- if (dc->config.enable_mipi_converter_optimization &&
- dc_ctx->dce_version == DCN_VERSION_3_01 &&
- link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_0022B9 &&
- memcmp(&link->dpcd_caps.branch_dev_name, DP_SINK_BRANCH_DEV_NAME_7580,
- sizeof(link->dpcd_caps.branch_dev_name)) == 0) {
- dc->config.edp_no_power_sequencing = true;
-
- if (!link->dpcd_caps.set_power_state_capable_edp)
- link->wa_flags.dp_keep_receiver_powered = true;
- }
-
- sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
- sink_caps.signal = SIGNAL_TYPE_EDP;
- break;
- }
-
- case SIGNAL_TYPE_DISPLAY_PORT: {
- /* wa HPD high coming too early*/
- if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
- link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
- /* if alt mode times out, return false */
- if (!wait_for_entering_dp_alt_mode(link))
- return false;
- }
-
- if (!detect_dp(link, &sink_caps, reason)) {
- if (prev_sink)
- dc_sink_release(prev_sink);
- return false;
- }
-
- /* Active SST downstream branch device unplug*/
- if (link->type == dc_connection_sst_branch &&
- link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
- if (prev_sink)
- /* Downstream unplug */
- dc_sink_release(prev_sink);
- return true;
- }
-
- /* disable audio for non DP to HDMI active sst converter */
- if (link->type == dc_connection_sst_branch &&
- is_dp_active_dongle(link) &&
- (link->dpcd_caps.dongle_type !=
- DISPLAY_DONGLE_DP_HDMI_CONVERTER))
- converter_disable_audio = true;
- break;
- }
-
- default:
- DC_ERROR("Invalid connector type! signal:%d\n",
- link->connector_signal);
- if (prev_sink)
- dc_sink_release(prev_sink);
- return false;
- } /* switch() */
-
- if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
- link->dpcd_sink_count =
- link->dpcd_caps.sink_count.bits.SINK_COUNT;
- else
- link->dpcd_sink_count = 1;
-
- dal_ddc_service_set_transaction_type(link->ddc,
- sink_caps.transaction_type);
-
- link->aux_mode =
- dal_ddc_service_is_in_aux_transaction_mode(link->ddc);
-
- sink_init_data.link = link;
- sink_init_data.sink_signal = sink_caps.signal;
-
- sink = dc_sink_create(&sink_init_data);
- if (!sink) {
- DC_ERROR("Failed to create sink!\n");
- if (prev_sink)
- dc_sink_release(prev_sink);
- return false;
- }
-
- sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
- sink->converter_disable_audio = converter_disable_audio;
-
- /* dc_sink_create returns a new reference */
- link->local_sink = sink;
-
- edid_status = dm_helpers_read_local_edid(link->ctx,
- link, sink);
-
- switch (edid_status) {
- case EDID_BAD_CHECKSUM:
- DC_LOG_ERROR("EDID checksum invalid.\n");
- break;
- case EDID_PARTIAL_VALID:
- DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n");
- break;
- case EDID_NO_RESPONSE:
- DC_LOG_ERROR("No EDID read.\n");
- /*
- * Abort detection for non-DP connectors if we have
- * no EDID
- *
- * DP needs to report as connected if HDP is high
- * even if we have no EDID in order to go to
- * fail-safe mode
- */
- if (dc_is_hdmi_signal(link->connector_signal) ||
- dc_is_dvi_signal(link->connector_signal)) {
- if (prev_sink)
- dc_sink_release(prev_sink);
-
- return false;
- }
-
- if (link->type == dc_connection_sst_branch &&
- link->dpcd_caps.dongle_type ==
- DISPLAY_DONGLE_DP_VGA_CONVERTER &&
- reason == DETECT_REASON_HPDRX) {
- /* Abort detection for DP-VGA adapters when EDID
- * can't be read and detection reason is VGA-side
- * hotplug
- */
- if (prev_sink)
- dc_sink_release(prev_sink);
- link_disconnect_sink(link);
-
- return true;
- }
-
- break;
- default:
- break;
- }
-
- // Check if edid is the same
- if ((prev_sink) &&
- (edid_status == EDID_THE_SAME || edid_status == EDID_OK))
- same_edid = is_same_edid(&prev_sink->dc_edid,
- &sink->dc_edid);
-
- if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
- link->ctx->dc->debug.hdmi20_disable = true;
-
- if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- sink_caps.transaction_type ==
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
- /*
- * TODO debug why Dell 2413 doesn't like
- * two link trainings
- */
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- query_hdcp_capability(sink->sink_signal, link);
-#endif
- } else {
- // If edid is the same, then discard new sink and revert back to original sink
- if (same_edid) {
- link_disconnect_remap(prev_sink, link);
- sink = prev_sink;
- prev_sink = NULL;
- }
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- query_hdcp_capability(sink->sink_signal, link);
-#endif
- }
-
- /* HDMI-DVI Dongle */
- if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
- !sink->edid_caps.edid_hdmi)
- sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
-
- if (link->local_sink && dc_is_dp_signal(sink_caps.signal))
- dp_trace_init(link);
-
- /* Connectivity log: detection */
- for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
- CONN_DATA_DETECT(link,
- &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
- DC_EDID_BLOCK_SIZE,
- "%s: [Block %d] ", sink->edid_caps.display_name, i);
- }
-
- DC_LOG_DETECTION_EDID_PARSER("%s: "
- "manufacturer_id = %X, "
- "product_id = %X, "
- "serial_number = %X, "
- "manufacture_week = %d, "
- "manufacture_year = %d, "
- "display_name = %s, "
- "speaker_flag = %d, "
- "audio_mode_count = %d\n",
- __func__,
- sink->edid_caps.manufacturer_id,
- sink->edid_caps.product_id,
- sink->edid_caps.serial_number,
- sink->edid_caps.manufacture_week,
- sink->edid_caps.manufacture_year,
- sink->edid_caps.display_name,
- sink->edid_caps.speaker_flags,
- sink->edid_caps.audio_mode_count);
-
- for (i = 0; i < sink->edid_caps.audio_mode_count; i++) {
- DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, "
- "format_code = %d, "
- "channel_count = %d, "
- "sample_rate = %d, "
- "sample_size = %d\n",
- __func__,
- i,
- sink->edid_caps.audio_modes[i].format_code,
- sink->edid_caps.audio_modes[i].channel_count,
- sink->edid_caps.audio_modes[i].sample_rate,
- sink->edid_caps.audio_modes[i].sample_size);
- }
-
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- /* Init dc_panel_config by HW config */
- if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults)
- dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config);
- /* Pickup base DM settings */
- dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
- // Override dc_panel_config if system has specific settings
- dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
- }
-
- } else {
- /* From Connected-to-Disconnected. */
- link->type = dc_connection_none;
- sink_caps.signal = SIGNAL_TYPE_NONE;
- /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
- * is not cleared. If we emulate a DP signal on this connection, it thinks
- * the dongle is still there and limits the number of modes we can emulate.
- * Clear dongle_max_pix_clk on disconnect to fix this
- */
- link->dongle_max_pix_clk = 0;
-
- dc_link_clear_dprx_states(link);
- dp_trace_reset(link);
- }
-
- LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n",
- link->link_index, sink,
- (sink_caps.signal ==
- SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"),
- prev_sink, same_edid);
-
- if (prev_sink)
- dc_sink_release(prev_sink);
-
- return true;
-}
-
-bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
-{
- bool is_local_sink_detect_success;
- bool is_delegated_to_mst_top_mgr = false;
- enum dc_connection_type pre_link_type = link->type;
-
- is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
-
- if (is_local_sink_detect_success && link->local_sink)
- verify_link_capability(link, link->local_sink, reason);
-
- if (is_local_sink_detect_success && link->local_sink &&
- dc_is_dp_signal(link->local_sink->sink_signal) &&
- link->dpcd_caps.is_mst_capable)
- is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason);
-
- if (is_local_sink_detect_success &&
- pre_link_type == dc_connection_mst_branch &&
- link->type != dc_connection_mst_branch)
- is_delegated_to_mst_top_mgr = reset_cur_dp_mst_topology(link);
-
- return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr;
-}
-
-bool dc_link_get_hpd_state(struct dc_link *dc_link)
-{
- uint32_t state;
-
- dal_gpio_lock_pin(dc_link->hpd_gpio);
- dal_gpio_get_value(dc_link->hpd_gpio, &state);
- dal_gpio_unlock_pin(dc_link->hpd_gpio);
-
- return state;
-}
-
-static enum hpd_source_id get_hpd_line(struct dc_link *link)
-{
- struct gpio *hpd;
- enum hpd_source_id hpd_id;
-
- hpd_id = HPD_SOURCEID_UNKNOWN;
-
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
- link->ctx->gpio_service);
-
- if (hpd) {
- switch (dal_irq_get_source(hpd)) {
- case DC_IRQ_SOURCE_HPD1:
- hpd_id = HPD_SOURCEID1;
- break;
- case DC_IRQ_SOURCE_HPD2:
- hpd_id = HPD_SOURCEID2;
- break;
- case DC_IRQ_SOURCE_HPD3:
- hpd_id = HPD_SOURCEID3;
- break;
- case DC_IRQ_SOURCE_HPD4:
- hpd_id = HPD_SOURCEID4;
- break;
- case DC_IRQ_SOURCE_HPD5:
- hpd_id = HPD_SOURCEID5;
- break;
- case DC_IRQ_SOURCE_HPD6:
- hpd_id = HPD_SOURCEID6;
- break;
- default:
- BREAK_TO_DEBUGGER();
- break;
- }
-
- dal_gpio_destroy_irq(&hpd);
- }
-
- return hpd_id;
-}
-
-static enum channel_id get_ddc_line(struct dc_link *link)
-{
- struct ddc *ddc;
- enum channel_id channel;
-
- channel = CHANNEL_ID_UNKNOWN;
-
- ddc = dal_ddc_service_get_ddc_pin(link->ddc);
-
- if (ddc) {
- switch (dal_ddc_get_line(ddc)) {
- case GPIO_DDC_LINE_DDC1:
- channel = CHANNEL_ID_DDC1;
- break;
- case GPIO_DDC_LINE_DDC2:
- channel = CHANNEL_ID_DDC2;
- break;
- case GPIO_DDC_LINE_DDC3:
- channel = CHANNEL_ID_DDC3;
- break;
- case GPIO_DDC_LINE_DDC4:
- channel = CHANNEL_ID_DDC4;
- break;
- case GPIO_DDC_LINE_DDC5:
- channel = CHANNEL_ID_DDC5;
- break;
- case GPIO_DDC_LINE_DDC6:
- channel = CHANNEL_ID_DDC6;
- break;
- case GPIO_DDC_LINE_DDC_VGA:
- channel = CHANNEL_ID_DDC_VGA;
- break;
- case GPIO_DDC_LINE_I2C_PAD:
- channel = CHANNEL_ID_I2C_PAD;
- break;
- default:
- BREAK_TO_DEBUGGER();
- break;
- }
- }
-
- return channel;
-}
-
-static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder)
-{
- switch (encoder.id) {
- case ENCODER_ID_INTERNAL_UNIPHY:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_UNIPHY_A;
- case ENUM_ID_2:
- return TRANSMITTER_UNIPHY_B;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- case ENCODER_ID_INTERNAL_UNIPHY1:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_UNIPHY_C;
- case ENUM_ID_2:
- return TRANSMITTER_UNIPHY_D;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- case ENCODER_ID_INTERNAL_UNIPHY2:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_UNIPHY_E;
- case ENUM_ID_2:
- return TRANSMITTER_UNIPHY_F;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- case ENCODER_ID_INTERNAL_UNIPHY3:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_UNIPHY_G;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- case ENCODER_ID_EXTERNAL_NUTMEG:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_NUTMEG_CRT;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- case ENCODER_ID_EXTERNAL_TRAVIS:
- switch (encoder.enum_id) {
- case ENUM_ID_1:
- return TRANSMITTER_TRAVIS_CRT;
- case ENUM_ID_2:
- return TRANSMITTER_TRAVIS_LCD;
- default:
- return TRANSMITTER_UNKNOWN;
- }
- break;
- default:
- return TRANSMITTER_UNKNOWN;
- }
-}
-
-static bool dc_link_construct_legacy(struct dc_link *link,
- const struct link_init_data *init_params)
-{
- uint8_t i;
- struct ddc_service_init_data ddc_service_init_data = { 0 };
- struct dc_context *dc_ctx = init_params->ctx;
- struct encoder_init_data enc_init_data = { 0 };
- struct panel_cntl_init_data panel_cntl_init_data = { 0 };
- struct integrated_info *info;
- struct dc_bios *bios = init_params->dc->ctx->dc_bios;
- const struct dc_vbios_funcs *bp_funcs = bios->funcs;
- struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 };
-
- DC_LOGGER_INIT(dc_ctx->logger);
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto create_fail;
-
- link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
-
- link->link_status.dpcd_caps = &link->dpcd_caps;
-
- link->dc = init_params->dc;
- link->ctx = dc_ctx;
- link->link_index = init_params->link_index;
-
- memset(&link->preferred_training_settings, 0,
- sizeof(struct dc_link_training_overrides));
- memset(&link->preferred_link_setting, 0,
- sizeof(struct dc_link_settings));
-
- link->link_id =
- bios->funcs->get_connector_id(bios, init_params->connector_index);
-
- link->ep_type = DISPLAY_ENDPOINT_PHY;
-
- DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
-
- if (bios->funcs->get_disp_connector_caps_info) {
- bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
- link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
- DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display);
- }
-
- if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
- dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
- __func__, init_params->connector_index,
- link->link_id.type, OBJECT_TYPE_CONNECTOR);
- goto create_fail;
- }
-
- if (link->dc->res_pool->funcs->link_init)
- link->dc->res_pool->funcs->link_init(link);
-
- link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
- link->ctx->gpio_service);
-
- if (link->hpd_gpio) {
- dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
- dal_gpio_unlock_pin(link->hpd_gpio);
- link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
-
- DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id);
- DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en);
- }
-
- switch (link->link_id.id) {
- case CONNECTOR_ID_HDMI_TYPE_A:
- link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
-
- break;
- case CONNECTOR_ID_SINGLE_LINK_DVID:
- case CONNECTOR_ID_SINGLE_LINK_DVII:
- link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
- break;
- case CONNECTOR_ID_DUAL_LINK_DVID:
- case CONNECTOR_ID_DUAL_LINK_DVII:
- link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
- break;
- case CONNECTOR_ID_DISPLAY_PORT:
- case CONNECTOR_ID_USBC:
- link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
-
- if (link->hpd_gpio)
- link->irq_source_hpd_rx =
- dal_irq_get_rx_source(link->hpd_gpio);
-
- break;
- case CONNECTOR_ID_EDP:
- link->connector_signal = SIGNAL_TYPE_EDP;
-
- if (link->hpd_gpio) {
- if (!link->dc->config.allow_edp_hotplug_detection)
- link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
-
- switch (link->dc->config.allow_edp_hotplug_detection) {
- case 1: // only the 1st eDP handles hotplug
- if (link->link_index == 0)
- link->irq_source_hpd_rx =
- dal_irq_get_rx_source(link->hpd_gpio);
- else
- link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- break;
- case 2: // only the 2nd eDP handles hotplug
- if (link->link_index == 1)
- link->irq_source_hpd_rx =
- dal_irq_get_rx_source(link->hpd_gpio);
- else
- link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- break;
- default:
- break;
- }
- }
-
- break;
- case CONNECTOR_ID_LVDS:
- link->connector_signal = SIGNAL_TYPE_LVDS;
- break;
- default:
- DC_LOG_WARNING("Unsupported Connector type:%d!\n",
- link->link_id.id);
- goto create_fail;
- }
-
- /* TODO: #DAL3 Implement id to str function.*/
- LINK_INFO("Connector[%d] description:"
- "signal %d\n",
- init_params->connector_index,
- link->connector_signal);
-
- ddc_service_init_data.ctx = link->ctx;
- ddc_service_init_data.id = link->link_id;
- ddc_service_init_data.link = link;
- link->ddc = dal_ddc_service_create(&ddc_service_init_data);
-
- if (!link->ddc) {
- DC_ERROR("Failed to create ddc_service!\n");
- goto ddc_create_fail;
- }
-
- if (!link->ddc->ddc_pin) {
- DC_ERROR("Failed to get I2C info for connector!\n");
- goto ddc_create_fail;
- }
-
- link->ddc_hw_inst =
- dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
-
-
- if (link->dc->res_pool->funcs->panel_cntl_create &&
- (link->link_id.id == CONNECTOR_ID_EDP ||
- link->link_id.id == CONNECTOR_ID_LVDS)) {
- panel_cntl_init_data.ctx = dc_ctx;
- panel_cntl_init_data.inst =
- panel_cntl_init_data.ctx->dc_edp_id_count;
- link->panel_cntl =
- link->dc->res_pool->funcs->panel_cntl_create(
- &panel_cntl_init_data);
- panel_cntl_init_data.ctx->dc_edp_id_count++;
-
- if (link->panel_cntl == NULL) {
- DC_ERROR("Failed to create link panel_cntl!\n");
- goto panel_cntl_create_fail;
- }
- }
-
- enc_init_data.ctx = dc_ctx;
- bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
- &enc_init_data.encoder);
- enc_init_data.connector = link->link_id;
- enc_init_data.channel = get_ddc_line(link);
- enc_init_data.hpd_source = get_hpd_line(link);
-
- link->hpd_src = enc_init_data.hpd_source;
-
- enc_init_data.transmitter =
- translate_encoder_to_transmitter(enc_init_data.encoder);
- link->link_enc =
- link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
-
- if (!link->link_enc) {
- DC_ERROR("Failed to create link encoder!\n");
- goto link_enc_create_fail;
- }
-
- DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
- DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
-
- /* Update link encoder tracking variables. These are used for the dynamic
- * assignment of link encoders to streams.
- */
- link->eng_id = link->link_enc->preferred_engine;
- link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc;
- link->dc->res_pool->dig_link_enc_count++;
-
- link->link_enc_hw_inst = link->link_enc->transmitter;
-
- for (i = 0; i < 4; i++) {
- if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
- link->link_id, i,
- &link->device_tag) != BP_RESULT_OK) {
- DC_ERROR("Failed to find device tag!\n");
- goto device_tag_fail;
- }
-
- /* Look for device tag that matches connector signal,
- * CRT for rgb, LCD for other supported signal tyes
- */
- if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
- link->device_tag.dev_id))
- continue;
- if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT &&
- link->connector_signal != SIGNAL_TYPE_RGB)
- continue;
- if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
- link->connector_signal == SIGNAL_TYPE_RGB)
- continue;
-
- DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device);
- DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type);
- DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id);
- break;
- }
-
- if (bios->integrated_info)
- memcpy(info, bios->integrated_info, sizeof(*info));
-
- /* Look for channel mapping corresponding to connector and device tag */
- for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
- struct external_display_path *path =
- &info->ext_disp_conn_info.path[i];
-
- if (path->device_connector_id.enum_id == link->link_id.enum_id &&
- path->device_connector_id.id == link->link_id.id &&
- path->device_connector_id.type == link->link_id.type) {
- if (link->device_tag.acpi_device != 0 &&
- path->device_acpi_enum == link->device_tag.acpi_device) {
- link->ddi_channel_mapping = path->channel_mapping;
- link->chip_caps = path->caps;
- DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
- DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
- } else if (path->device_tag ==
- link->device_tag.dev_id.raw_device_tag) {
- link->ddi_channel_mapping = path->channel_mapping;
- link->chip_caps = path->caps;
- DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
- DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
- }
-
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
- link->bios_forced_drive_settings.VOLTAGE_SWING =
- (info->ext_disp_conn_info.fixdpvoltageswing & 0x3);
- link->bios_forced_drive_settings.PRE_EMPHASIS =
- ((info->ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3);
- }
-
- break;
- }
- }
-
- if (bios->funcs->get_atom_dc_golden_table)
- bios->funcs->get_atom_dc_golden_table(bios);
-
- /*
- * TODO check if GPIO programmed correctly
- *
- * If GPIO isn't programmed correctly HPD might not rise or drain
- * fast enough, leading to bounces.
- */
- program_hpd_filter(link);
-
- link->psr_settings.psr_vtotal_control_support = false;
- link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
-
- DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
- kfree(info);
- return true;
-device_tag_fail:
- link->link_enc->funcs->destroy(&link->link_enc);
-link_enc_create_fail:
- if (link->panel_cntl != NULL)
- link->panel_cntl->funcs->destroy(&link->panel_cntl);
-panel_cntl_create_fail:
- dal_ddc_service_destroy(&link->ddc);
-ddc_create_fail:
-create_fail:
-
- if (link->hpd_gpio) {
- dal_gpio_destroy_irq(&link->hpd_gpio);
- link->hpd_gpio = NULL;
- }
-
- DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
- kfree(info);
-
- return false;
-}
-
-static bool dc_link_construct_dpia(struct dc_link *link,
- const struct link_init_data *init_params)
-{
- struct ddc_service_init_data ddc_service_init_data = { 0 };
- struct dc_context *dc_ctx = init_params->ctx;
-
- DC_LOGGER_INIT(dc_ctx->logger);
-
- /* Initialized irq source for hpd and hpd rx */
- link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
- link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
- link->link_status.dpcd_caps = &link->dpcd_caps;
-
- link->dc = init_params->dc;
- link->ctx = dc_ctx;
- link->link_index = init_params->link_index;
-
- memset(&link->preferred_training_settings, 0,
- sizeof(struct dc_link_training_overrides));
- memset(&link->preferred_link_setting, 0,
- sizeof(struct dc_link_settings));
-
- /* Dummy Init for linkid */
- link->link_id.type = OBJECT_TYPE_CONNECTOR;
- link->link_id.id = CONNECTOR_ID_DISPLAY_PORT;
- link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index;
- link->is_internal_display = false;
- link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
- LINK_INFO("Connector[%d] description:signal %d\n",
- init_params->connector_index,
- link->connector_signal);
-
- link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA;
- link->is_dig_mapping_flexible = true;
-
- /* TODO: Initialize link : funcs->link_init */
-
- ddc_service_init_data.ctx = link->ctx;
- ddc_service_init_data.id = link->link_id;
- ddc_service_init_data.link = link;
- /* Set indicator for dpia link so that ddc won't be created */
- ddc_service_init_data.is_dpia_link = true;
-
- link->ddc = dal_ddc_service_create(&ddc_service_init_data);
- if (!link->ddc) {
- DC_ERROR("Failed to create ddc_service!\n");
- goto ddc_create_fail;
- }
-
- /* Set dpia port index : 0 to number of dpia ports */
- link->ddc_hw_inst = init_params->connector_index;
-
- /* TODO: Create link encoder */
-
- link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
-
- /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
- link->wa_flags.dp_mot_reset_segment = true;
-
- return true;
-
-ddc_create_fail:
- return false;
-}
-
-static bool dc_link_construct(struct dc_link *link,
- const struct link_init_data *init_params)
-{
- /* Handle dpia case */
- if (init_params->is_dpia_link)
- return dc_link_construct_dpia(link, init_params);
- else
- return dc_link_construct_legacy(link, init_params);
-}
-/*******************************************************************************
- * Public functions
- ******************************************************************************/
-struct dc_link *link_create(const struct link_init_data *init_params)
-{
- struct dc_link *link =
- kzalloc(sizeof(*link), GFP_KERNEL);
-
- if (NULL == link)
- goto alloc_fail;
-
- if (false == dc_link_construct(link, init_params))
- goto construct_fail;
-
- /*
- * Must use preferred_link_setting, not reported_link_cap or verified_link_cap,
- * since struct preferred_link_setting won't be reset after S3.
- */
- link->preferred_link_setting.dpcd_source_device_specific_field_support = true;
-
- return link;
-
-construct_fail:
- kfree(link);
-
-alloc_fail:
- return NULL;
-}
-
-void link_destroy(struct dc_link **link)
-{
- dc_link_destruct(*link);
- kfree(*link);
- *link = NULL;
-}
-
-static void enable_stream_features(struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
-
- if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) {
- struct dc_link *link = stream->link;
- union down_spread_ctrl old_downspread;
- union down_spread_ctrl new_downspread;
-
- memset(&old_downspread, 0, sizeof(old_downspread));
-
- core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
- &old_downspread.raw, sizeof(old_downspread));
-
- new_downspread.raw = old_downspread.raw;
-
- new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
- (stream->ignore_msa_timing_param) ? 1 : 0;
-
- if (new_downspread.raw != old_downspread.raw) {
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &new_downspread.raw, sizeof(new_downspread));
- }
-
- } else {
- dm_helpers_mst_enable_stream_features(stream);
- }
-}
-
-static enum dc_status enable_link_dp(struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- enum dc_status status;
- bool skip_video_pattern;
- struct dc_link *link = stream->link;
- const struct dc_link_settings *link_settings =
- &pipe_ctx->link_config.dp_link_settings;
- bool fec_enable;
- int i;
- bool apply_seamless_boot_optimization = false;
- uint32_t bl_oled_enable_delay = 50; // in ms
- uint32_t post_oui_delay = 30; // 30ms
- /* Reduce link bandwidth between failed link training attempts. */
- bool do_fallback = false;
-
- // check for seamless boot
- for (i = 0; i < state->stream_count; i++) {
- if (state->streams[i]->apply_seamless_boot_optimization) {
- apply_seamless_boot_optimization = true;
- break;
- }
- }
-
- /* Train with fallback when enabling DPIA link. Conventional links are
- * trained with fallback during sink detection.
- */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- do_fallback = true;
-
- /*
- * Temporary w/a to get DP2.0 link rates to work with SST.
- * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved.
- */
- if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING &&
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link->dc->debug.set_mst_en_for_sst) {
- dp_enable_mst_on_sink(link, true);
- }
-
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
- /*in case it is not on*/
- if (!link->dc->config.edp_no_power_sequencing)
- link->dc->hwss.edp_power_control(link, true);
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
- }
-
- if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
- /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */
- } else {
- pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
- link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- if (state->clk_mgr && !apply_seamless_boot_optimization)
- state->clk_mgr->funcs->update_clocks(state->clk_mgr,
- state, false);
- }
-
- // during mode switch we do DP_SET_POWER off then on, and OUI is lost
- dpcd_set_source_specific_data(link);
- if (link->dpcd_sink_ext_caps.raw != 0) {
- post_oui_delay += link->panel_config.pps.extra_post_OUI_ms;
- msleep(post_oui_delay);
- }
-
- // similarly, mode switch can cause loss of cable ID
- dpcd_write_cable_id_to_dprx(link);
-
- skip_video_pattern = true;
-
- if (link_settings->link_rate == LINK_RATE_LOW)
- skip_video_pattern = false;
-
- if (perform_link_training_with_retries(link_settings,
- skip_video_pattern,
- LINK_TRAINING_ATTEMPTS,
- pipe_ctx,
- pipe_ctx->stream->signal,
- do_fallback)) {
- status = DC_OK;
- } else {
- status = DC_FAIL_DP_LINK_TRAINING;
- }
-
- if (link->preferred_training_settings.fec_enable)
- fec_enable = *link->preferred_training_settings.fec_enable;
- else
- fec_enable = true;
-
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
- dp_set_fec_enable(link, fec_enable);
-
- // during mode set we do DP_SET_POWER off then on, aux writes are lost
- if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
- link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
- link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
- dc_link_set_default_brightness_aux(link); // TODO: use cached if known
- if (link->dpcd_sink_ext_caps.bits.oled == 1)
- msleep(bl_oled_enable_delay);
- dc_link_backlight_enable_aux(link, true);
- }
-
- return status;
-}
-
-static enum dc_status enable_link_edp(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
-{
- return enable_link_dp(state, pipe_ctx);
-}
-
-static enum dc_status enable_link_dp_mst(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
-{
- struct dc_link *link = pipe_ctx->stream->link;
-
- /* sink signal type after MST branch is MST. Multiple MST sinks
- * share one link. Link DP PHY is enable or training only once.
- */
- if (link->link_status.link_active)
- return DC_OK;
-
- /* clear payload table */
- dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
-
- /* to make sure the pending down rep can be processed
- * before enabling the link
- */
- dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
-
- /* set the sink to MST mode before enabling the link */
- dp_enable_mst_on_sink(link, true);
-
- return enable_link_dp(state, pipe_ctx);
-}
-
-void dc_link_blank_all_dp_displays(struct dc *dc)
-{
- unsigned int i;
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) ||
- (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL))
- continue;
-
- /* DP 2.0 spec requires that we read LTTPR caps first */
- dp_retrieve_lttpr_cap(dc->links[i]);
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
-
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
- dc_link_blank_dp_stream(dc->links[i], true);
- }
-
-}
-
-void dc_link_blank_all_edp_displays(struct dc *dc)
-{
- unsigned int i;
- uint8_t dpcd_power_state = '\0';
- enum dc_status status = DC_ERROR_UNEXPECTED;
-
- for (i = 0; i < dc->link_count; i++) {
- if ((dc->links[i]->connector_signal != SIGNAL_TYPE_EDP) ||
- (!dc->links[i]->edp_sink_present))
- continue;
-
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
-
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
- dc_link_blank_dp_stream(dc->links[i], true);
- }
-}
-
-void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init)
-{
- unsigned int j;
- struct dc *dc = link->ctx->dc;
- enum signal_type signal = link->connector_signal;
-
- if ((signal == SIGNAL_TYPE_EDP) ||
- (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
- if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
- link->link_enc->funcs->get_dig_frontend &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
- unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
-
- if (fe != ENGINE_ID_UNKNOWN)
- for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
- if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(link,
- dc->res_pool->stream_enc[j]);
- break;
- }
- }
- }
-
- if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
- dp_receiver_power_ctrl(link, false);
- }
-}
-
-static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
- enum engine_id eng_id,
- struct ext_hdmi_settings *settings)
-{
- bool result = false;
- int i = 0;
- struct integrated_info *integrated_info =
- pipe_ctx->stream->ctx->dc_bios->integrated_info;
-
- if (integrated_info == NULL)
- return false;
-
- /*
- * Get retimer settings from sbios for passing SI eye test for DCE11
- * The setting values are varied based on board revision and port id
- * Therefore the setting values of each ports is passed by sbios.
- */
-
- // Check if current bios contains ext Hdmi settings
- if (integrated_info->gpu_cap_info & 0x20) {
- switch (eng_id) {
- case ENGINE_ID_DIGA:
- settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr;
- settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num;
- settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num;
- memmove(settings->reg_settings,
- integrated_info->dp0_ext_hdmi_reg_settings,
- sizeof(integrated_info->dp0_ext_hdmi_reg_settings));
- memmove(settings->reg_settings_6g,
- integrated_info->dp0_ext_hdmi_6g_reg_settings,
- sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings));
- result = true;
- break;
- case ENGINE_ID_DIGB:
- settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr;
- settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num;
- settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num;
- memmove(settings->reg_settings,
- integrated_info->dp1_ext_hdmi_reg_settings,
- sizeof(integrated_info->dp1_ext_hdmi_reg_settings));
- memmove(settings->reg_settings_6g,
- integrated_info->dp1_ext_hdmi_6g_reg_settings,
- sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings));
- result = true;
- break;
- case ENGINE_ID_DIGC:
- settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr;
- settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num;
- settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num;
- memmove(settings->reg_settings,
- integrated_info->dp2_ext_hdmi_reg_settings,
- sizeof(integrated_info->dp2_ext_hdmi_reg_settings));
- memmove(settings->reg_settings_6g,
- integrated_info->dp2_ext_hdmi_6g_reg_settings,
- sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings));
- result = true;
- break;
- case ENGINE_ID_DIGD:
- settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr;
- settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num;
- settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num;
- memmove(settings->reg_settings,
- integrated_info->dp3_ext_hdmi_reg_settings,
- sizeof(integrated_info->dp3_ext_hdmi_reg_settings));
- memmove(settings->reg_settings_6g,
- integrated_info->dp3_ext_hdmi_6g_reg_settings,
- sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings));
- result = true;
- break;
- default:
- break;
- }
-
- if (result == true) {
- // Validate settings from bios integrated info table
- if (settings->slv_addr == 0)
- return false;
- if (settings->reg_num > 9)
- return false;
- if (settings->reg_num_6g > 3)
- return false;
-
- for (i = 0; i < settings->reg_num; i++) {
- if (settings->reg_settings[i].i2c_reg_index > 0x20)
- return false;
- }
-
- for (i = 0; i < settings->reg_num_6g; i++) {
- if (settings->reg_settings_6g[i].i2c_reg_index > 0x20)
- return false;
- }
- }
- }
-
- return result;
-}
-
-static bool i2c_write(struct pipe_ctx *pipe_ctx,
- uint8_t address, uint8_t *buffer, uint32_t length)
-{
- struct i2c_command cmd = {0};
- struct i2c_payload payload = {0};
-
- memset(&payload, 0, sizeof(payload));
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.number_of_payloads = 1;
- cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
- cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz;
-
- payload.address = address;
- payload.data = buffer;
- payload.length = length;
- payload.write = true;
- cmd.payloads = &payload;
-
- if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx,
- pipe_ctx->stream->link, &cmd))
- return true;
-
- return false;
-}
-
-static void write_i2c_retimer_setting(
- struct pipe_ctx *pipe_ctx,
- bool is_vga_mode,
- bool is_over_340mhz,
- struct ext_hdmi_settings *settings)
-{
- uint8_t slave_address = (settings->slv_addr >> 1);
- uint8_t buffer[2];
- const uint8_t apply_rx_tx_change = 0x4;
- uint8_t offset = 0xA;
- uint8_t value = 0;
- int i = 0;
- bool i2c_success = false;
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- memset(&buffer, 0, sizeof(buffer));
-
- /* Start Ext-Hdmi programming*/
-
- for (i = 0; i < settings->reg_num; i++) {
- /* Apply 3G settings */
- if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
-
- buffer[0] = settings->reg_settings[i].i2c_reg_index;
- buffer[1] = settings->reg_settings[i].i2c_reg_val;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
-
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
- * needs to be set to 1 on every 0xA-0xC write.
- */
- if (settings->reg_settings[i].i2c_reg_index == 0xA ||
- settings->reg_settings[i].i2c_reg_index == 0xB ||
- settings->reg_settings[i].i2c_reg_index == 0xC) {
-
- /* Query current value from offset 0xA */
- if (settings->reg_settings[i].i2c_reg_index == 0xA)
- value = settings->reg_settings[i].i2c_reg_val;
- else {
- i2c_success =
- dal_ddc_service_query_ddc_data(
- pipe_ctx->stream->link->ddc,
- slave_address, &offset, 1, &value, 1);
- if (!i2c_success)
- goto i2c_write_fail;
- }
-
- buffer[0] = offset;
- /* Set APPLY_RX_TX_CHANGE bit to 1 */
- buffer[1] = value | apply_rx_tx_change;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
- }
- }
- }
-
- /* Apply 3G settings */
- if (is_over_340mhz) {
- for (i = 0; i < settings->reg_num_6g; i++) {
- /* Apply 3G settings */
- if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
-
- buffer[0] = settings->reg_settings_6g[i].i2c_reg_index;
- buffer[1] = settings->reg_settings_6g[i].i2c_reg_val;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
-
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
- * needs to be set to 1 on every 0xA-0xC write.
- */
- if (settings->reg_settings_6g[i].i2c_reg_index == 0xA ||
- settings->reg_settings_6g[i].i2c_reg_index == 0xB ||
- settings->reg_settings_6g[i].i2c_reg_index == 0xC) {
-
- /* Query current value from offset 0xA */
- if (settings->reg_settings_6g[i].i2c_reg_index == 0xA)
- value = settings->reg_settings_6g[i].i2c_reg_val;
- else {
- i2c_success =
- dal_ddc_service_query_ddc_data(
- pipe_ctx->stream->link->ddc,
- slave_address, &offset, 1, &value, 1);
- if (!i2c_success)
- goto i2c_write_fail;
- }
-
- buffer[0] = offset;
- /* Set APPLY_RX_TX_CHANGE bit to 1 */
- buffer[1] = value | apply_rx_tx_change;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
- }
- }
- }
- }
-
- if (is_vga_mode) {
- /* Program additional settings if using 640x480 resolution */
-
- /* Write offset 0xFF to 0x01 */
- buffer[0] = 0xff;
- buffer[1] = 0x01;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x00 to 0x23 */
- buffer[0] = 0x00;
- buffer[1] = 0x23;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0xff to 0x00 */
- buffer[0] = 0xff;
- buffer[1] = 0x00;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- }
-
- return;
-
-i2c_write_fail:
- DC_LOG_DEBUG("Set retimer failed");
-}
-
-static void write_i2c_default_retimer_setting(
- struct pipe_ctx *pipe_ctx,
- bool is_vga_mode,
- bool is_over_340mhz)
-{
- uint8_t slave_address = (0xBA >> 1);
- uint8_t buffer[2];
- bool i2c_success = false;
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- memset(&buffer, 0, sizeof(buffer));
-
- /* Program Slave Address for tuning single integrity */
- /* Write offset 0x0A to 0x13 */
- buffer[0] = 0x0A;
- buffer[1] = 0x13;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x0A to 0x17 */
- buffer[0] = 0x0A;
- buffer[1] = 0x17;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x0B to 0xDA or 0xD8 */
- buffer[0] = 0x0B;
- buffer[1] = is_over_340mhz ? 0xDA : 0xD8;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x0A to 0x17 */
- buffer[0] = 0x0A;
- buffer[1] = 0x17;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x0C to 0x1D or 0x91 */
- buffer[0] = 0x0C;
- buffer[1] = is_over_340mhz ? 0x1D : 0x91;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x0A to 0x17 */
- buffer[0] = 0x0A;
- buffer[1] = 0x17;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
-
- if (is_vga_mode) {
- /* Program additional settings if using 640x480 resolution */
-
- /* Write offset 0xFF to 0x01 */
- buffer[0] = 0xff;
- buffer[1] = 0x01;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0x00 to 0x23 */
- buffer[0] = 0x00;
- buffer[1] = 0x23;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
- offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
-
- /* Write offset 0xff to 0x00 */
- buffer[0] = 0xff;
- buffer[1] = 0x00;
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\
- offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
- slave_address, buffer[0], buffer[1], i2c_success?1:0);
- if (!i2c_success)
- goto i2c_write_fail;
- }
-
- return;
-
-i2c_write_fail:
- DC_LOG_DEBUG("Set default retimer failed");
-}
-
-static void write_i2c_redriver_setting(
- struct pipe_ctx *pipe_ctx,
- bool is_over_340mhz)
-{
- uint8_t slave_address = (0xF0 >> 1);
- uint8_t buffer[16];
- bool i2c_success = false;
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- memset(&buffer, 0, sizeof(buffer));
-
- // Program Slave Address for tuning single integrity
- buffer[3] = 0x4E;
- buffer[4] = 0x4E;
- buffer[5] = 0x4E;
- buffer[6] = is_over_340mhz ? 0x4E : 0x4A;
-
- i2c_success = i2c_write(pipe_ctx, slave_address,
- buffer, sizeof(buffer));
- RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\
- \t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\
- offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\
- i2c_success = %d\n",
- slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
-
- if (!i2c_success)
- DC_LOG_DEBUG("Set redriver failed");
-}
-
-static void disable_link(struct dc_link *link, const struct link_resource *link_res,
- enum signal_type signal)
-{
- /*
- * TODO: implement call for dp_set_hw_test_pattern
- * it is needed for compliance testing
- */
-
- /* Here we need to specify that encoder output settings
- * need to be calculated as for the set mode,
- * it will lead to querying dynamic link capabilities
- * which should be done before enable output
- */
-
- if (dc_is_dp_signal(signal)) {
- /* SST DP, eDP */
- struct dc_link_settings link_settings = link->cur_link_settings;
- if (dc_is_dp_sst_signal(signal))
- dp_disable_link_phy(link, link_res, signal);
- else
- dp_disable_link_phy_mst(link, link_res, signal);
-
- if (dc_is_dp_sst_signal(signal) ||
- link->mst_stream_alloc_table.stream_count == 0) {
- if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {
- dp_set_fec_enable(link, false);
- dp_set_fec_ready(link, link_res, false);
- }
- }
- } else if (signal != SIGNAL_TYPE_VIRTUAL) {
- link->dc->hwss.disable_link_output(link, link_res, signal);
- }
-
- if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- /* MST disable link only when no stream use the link */
- if (link->mst_stream_alloc_table.stream_count <= 0)
- link->link_status.link_active = false;
- } else {
- link->link_status.link_active = false;
- }
-}
-
-static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- enum dc_color_depth display_color_depth;
- enum engine_id eng_id;
- struct ext_hdmi_settings settings = {0};
- bool is_over_340mhz = false;
- bool is_vga_mode = (stream->timing.h_addressable == 640)
- && (stream->timing.v_addressable == 480);
- struct dc *dc = pipe_ctx->stream->ctx->dc;
-
- if (stream->phy_pix_clk == 0)
- stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
- if (stream->phy_pix_clk > 340000)
- is_over_340mhz = true;
-
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
- unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
- /* DP159, Retimer settings */
- eng_id = pipe_ctx->stream_res.stream_enc->id;
-
- if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) {
- write_i2c_retimer_setting(pipe_ctx,
- is_vga_mode, is_over_340mhz, &settings);
- } else {
- write_i2c_default_retimer_setting(pipe_ctx,
- is_vga_mode, is_over_340mhz);
- }
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
- /* PI3EQX1204, Redriver settings */
- write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
- }
- }
-
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- dal_ddc_service_write_scdc_data(
- stream->link->ddc,
- stream->phy_pix_clk,
- stream->timing.flags.LTE_340MCSC_SCRAMBLE);
-
- memset(&stream->link->cur_link_settings, 0,
- sizeof(struct dc_link_settings));
-
- display_color_depth = stream->timing.display_color_depth;
- if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
- display_color_depth = COLOR_DEPTH_888;
-
- dc->hwss.enable_tmds_link_output(
- link,
- &pipe_ctx->link_res,
- pipe_ctx->stream->signal,
- pipe_ctx->clock_source->id,
- display_color_depth,
- stream->phy_pix_clk);
-
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- dal_ddc_service_read_scdc_data(link->ddc);
-}
-
-static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct dc *dc = stream->ctx->dc;
-
- if (stream->phy_pix_clk == 0)
- stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
-
- memset(&stream->link->cur_link_settings, 0,
- sizeof(struct dc_link_settings));
- dc->hwss.enable_lvds_link_output(
- link,
- &pipe_ctx->link_res,
- pipe_ctx->clock_source->id,
- stream->phy_pix_clk);
-
-}
-
-bool dc_power_alpm_dpcd_enable(struct dc_link *link, bool enable)
-{
- bool ret = false;
- union dpcd_alpm_configuration alpm_config;
-
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
- memset(&alpm_config, 0, sizeof(alpm_config));
-
- alpm_config.bits.ENABLE = (enable ? true : false);
- ret = dm_helpers_dp_write_dpcd(link->ctx, link,
- DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw,
- sizeof(alpm_config.raw));
- }
- return ret;
-}
-
-/****************************enable_link***********************************/
-static enum dc_status enable_link(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
-{
- enum dc_status status = DC_ERROR_UNEXPECTED;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
-
- /* There's some scenarios where driver is unloaded with display
- * still enabled. When driver is reloaded, it may cause a display
- * to not light up if there is a mismatch between old and new
- * link settings. Need to call disable first before enabling at
- * new link settings.
- */
- if (link->link_status.link_active) {
- disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
- }
-
- switch (pipe_ctx->stream->signal) {
- case SIGNAL_TYPE_DISPLAY_PORT:
- status = enable_link_dp(state, pipe_ctx);
- break;
- case SIGNAL_TYPE_EDP:
- status = enable_link_edp(state, pipe_ctx);
- break;
- case SIGNAL_TYPE_DISPLAY_PORT_MST:
- status = enable_link_dp_mst(state, pipe_ctx);
- msleep(200);
- break;
- case SIGNAL_TYPE_DVI_SINGLE_LINK:
- case SIGNAL_TYPE_DVI_DUAL_LINK:
- case SIGNAL_TYPE_HDMI_TYPE_A:
- enable_link_hdmi(pipe_ctx);
- status = DC_OK;
- break;
- case SIGNAL_TYPE_LVDS:
- enable_link_lvds(pipe_ctx);
- status = DC_OK;
- break;
- case SIGNAL_TYPE_VIRTUAL:
- status = DC_OK;
- break;
- default:
- break;
- }
-
- if (status == DC_OK)
- pipe_ctx->stream->link->link_status.link_active = true;
-
- return status;
-}
-
-static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
-{
-
- uint32_t pxl_clk = timing->pix_clk_100hz;
-
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- pxl_clk /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- pxl_clk = pxl_clk * 2 / 3;
-
- if (timing->display_color_depth == COLOR_DEPTH_101010)
- pxl_clk = pxl_clk * 10 / 8;
- else if (timing->display_color_depth == COLOR_DEPTH_121212)
- pxl_clk = pxl_clk * 12 / 8;
-
- return pxl_clk;
-}
-
-static bool dp_active_dongle_validate_timing(
- const struct dc_crtc_timing *timing,
- const struct dpcd_caps *dpcd_caps)
-{
- const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
-
- switch (dpcd_caps->dongle_type) {
- case DISPLAY_DONGLE_DP_VGA_CONVERTER:
- case DISPLAY_DONGLE_DP_DVI_CONVERTER:
- case DISPLAY_DONGLE_DP_DVI_DONGLE:
- if (timing->pixel_encoding == PIXEL_ENCODING_RGB)
- return true;
- else
- return false;
- default:
- break;
- }
-
- if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER &&
- dongle_caps->extendedCapValid == true) {
- /* Check Pixel Encoding */
- switch (timing->pixel_encoding) {
- case PIXEL_ENCODING_RGB:
- case PIXEL_ENCODING_YCBCR444:
- break;
- case PIXEL_ENCODING_YCBCR422:
- if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
- return false;
- break;
- case PIXEL_ENCODING_YCBCR420:
- if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
- return false;
- break;
- default:
- /* Invalid Pixel Encoding*/
- return false;
- }
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- case COLOR_DEPTH_888:
- /*888 and 666 should always be supported*/
- break;
- case COLOR_DEPTH_101010:
- if (dongle_caps->dp_hdmi_max_bpc < 10)
- return false;
- break;
- case COLOR_DEPTH_121212:
- if (dongle_caps->dp_hdmi_max_bpc < 12)
- return false;
- break;
- case COLOR_DEPTH_141414:
- case COLOR_DEPTH_161616:
- default:
- /* These color depths are currently not supported */
- return false;
- }
-
- /* Check 3D format */
- switch (timing->timing_3d_format) {
- case TIMING_3D_FORMAT_NONE:
- case TIMING_3D_FORMAT_FRAME_ALTERNATE:
- /*Only frame alternate 3D is supported on active dongle*/
- break;
- default:
- /*other 3D formats are not supported due to bad infoframe translation */
- return false;
- }
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
- struct dc_crtc_timing outputTiming = *timing;
-
- if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
- /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
- outputTiming.flags.DSC = 0;
- if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
- return false;
- } else { // DP to HDMI TMDS converter
- if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
- return false;
- }
-#else
- if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
- return false;
-#endif
- }
-
- if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 &&
- dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 &&
- dongle_caps->dfp_cap_ext.supported) {
-
- if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000))
- return false;
-
- if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable)
- return false;
-
- if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable)
- return false;
-
- if (timing->pixel_encoding == PIXEL_ENCODING_RGB) {
- if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
- return false;
- if (timing->display_color_depth == COLOR_DEPTH_666 &&
- !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_888 &&
- !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
- !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
- !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
- !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc)
- return false;
- } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) {
- if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
- return false;
- if (timing->display_color_depth == COLOR_DEPTH_888 &&
- !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
- !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
- !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
- !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc)
- return false;
- } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
- if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
- return false;
- if (timing->display_color_depth == COLOR_DEPTH_888 &&
- !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
- !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
- !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
- !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc)
- return false;
- } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
- if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
- return false;
- if (timing->display_color_depth == COLOR_DEPTH_888 &&
- !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
- !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
- !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc)
- return false;
- else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
- !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc)
- return false;
- }
- }
-
- return true;
-}
-
-enum dc_status dc_link_validate_mode_timing(
- const struct dc_stream_state *stream,
- struct dc_link *link,
- const struct dc_crtc_timing *timing)
-{
- uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10;
- struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
-
- /* A hack to avoid failing any modes for EDID override feature on
- * topology change such as lower quality cable for DP or different dongle
- */
- if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
- return DC_OK;
-
- /* Passive Dongle */
- if (max_pix_clk != 0 && get_timing_pixel_clock_100hz(timing) > max_pix_clk)
- return DC_EXCEED_DONGLE_CAP;
-
- /* Active Dongle*/
- if (!dp_active_dongle_validate_timing(timing, dpcd_caps))
- return DC_EXCEED_DONGLE_CAP;
-
- switch (stream->signal) {
- case SIGNAL_TYPE_EDP:
- case SIGNAL_TYPE_DISPLAY_PORT:
- if (!dp_validate_mode_timing(
- link,
- timing))
- return DC_NO_DP_LINK_BANDWIDTH;
- break;
-
- default:
- break;
- }
-
- return DC_OK;
-}
-
-static struct abm *get_abm_from_stream_res(const struct dc_link *link)
-{
- int i;
- struct dc *dc = NULL;
- struct abm *abm = NULL;
-
- if (!link || !link->ctx)
- return NULL;
-
- dc = link->ctx->dc;
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
- struct dc_stream_state *stream = pipe_ctx.stream;
-
- if (stream && stream->link == link) {
- abm = pipe_ctx.stream_res.abm;
- break;
- }
- }
- return abm;
-}
-
-int dc_link_get_backlight_level(const struct dc_link *link)
-{
- struct abm *abm = get_abm_from_stream_res(link);
- struct panel_cntl *panel_cntl = link->panel_cntl;
- struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- bool fw_set_brightness = true;
-
- if (dmcu)
- fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
-
- if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
- return panel_cntl->funcs->get_current_backlight(panel_cntl);
- else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
- return (int) abm->funcs->get_current_backlight(abm);
- else
- return DC_ERROR_UNEXPECTED;
-}
-
-int dc_link_get_target_backlight_pwm(const struct dc_link *link)
-{
- struct abm *abm = get_abm_from_stream_res(link);
-
- if (abm == NULL || abm->funcs->get_target_backlight == NULL)
- return DC_ERROR_UNEXPECTED;
-
- return (int) abm->funcs->get_target_backlight(abm);
-}
-
-static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
-{
- int i;
- struct dc *dc = link->ctx->dc;
- struct pipe_ctx *pipe_ctx = NULL;
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) {
- pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
- break;
- }
- }
- }
-
- return pipe_ctx;
-}
-
-bool dc_link_set_backlight_level(const struct dc_link *link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp)
-{
- struct dc *dc = link->ctx->dc;
-
- DC_LOGGER_INIT(link->ctx->logger);
- DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
- backlight_pwm_u16_16, backlight_pwm_u16_16);
-
- if (dc_is_embedded_signal(link->connector_signal)) {
- struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
-
- if (pipe_ctx) {
- /* Disable brightness ramping when the display is blanked
- * as it can hang the DMCU
- */
- if (pipe_ctx->plane_state == NULL)
- frame_ramp = 0;
- } else {
- return false;
- }
-
- dc->hwss.set_backlight_level(
- pipe_ctx,
- backlight_pwm_u16_16,
- frame_ramp);
- }
- return true;
-}
-
-bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
- bool wait, bool force_static, const unsigned int *power_opts)
-{
- struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- struct dmub_psr *psr = dc->res_pool->psr;
- unsigned int panel_inst;
-
- if (psr == NULL && force_static)
- return false;
-
- if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
- return false;
-
- if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) {
- // Don't enter PSR if panel is not connected
- return false;
- }
-
- /* Set power optimization flag */
- if (power_opts && link->psr_settings.psr_power_opt != *power_opts) {
- link->psr_settings.psr_power_opt = *power_opts;
-
- if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt)
- psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst);
- }
-
- if (psr != NULL && link->psr_settings.psr_feature_enabled &&
- force_static && psr->funcs->psr_force_static)
- psr->funcs->psr_force_static(psr, panel_inst);
-
- /* Enable or Disable PSR */
- if (allow_active && link->psr_settings.psr_allow_active != *allow_active) {
- link->psr_settings.psr_allow_active = *allow_active;
-
- if (!link->psr_settings.psr_allow_active)
- dc_z10_restore(dc);
-
- if (psr != NULL && link->psr_settings.psr_feature_enabled) {
- psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst);
- } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) &&
- link->psr_settings.psr_feature_enabled)
- dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait);
- else
- return false;
- }
-
- return true;
-}
-
-bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
-{
- struct dc *dc = link->ctx->dc;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- struct dmub_psr *psr = dc->res_pool->psr;
- unsigned int panel_inst;
-
- if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
- return false;
-
- if (psr != NULL && link->psr_settings.psr_feature_enabled)
- psr->funcs->psr_get_state(psr, state, panel_inst);
- else if (dmcu != NULL && link->psr_settings.psr_feature_enabled)
- dmcu->funcs->get_psr_state(dmcu, state);
-
- return true;
-}
-
-static inline enum physical_phy_id
-transmitter_to_phy_id(enum transmitter transmitter_value)
-{
- switch (transmitter_value) {
- case TRANSMITTER_UNIPHY_A:
- return PHYLD_0;
- case TRANSMITTER_UNIPHY_B:
- return PHYLD_1;
- case TRANSMITTER_UNIPHY_C:
- return PHYLD_2;
- case TRANSMITTER_UNIPHY_D:
- return PHYLD_3;
- case TRANSMITTER_UNIPHY_E:
- return PHYLD_4;
- case TRANSMITTER_UNIPHY_F:
- return PHYLD_5;
- case TRANSMITTER_NUTMEG_CRT:
- return PHYLD_6;
- case TRANSMITTER_TRAVIS_CRT:
- return PHYLD_7;
- case TRANSMITTER_TRAVIS_LCD:
- return PHYLD_8;
- case TRANSMITTER_UNIPHY_G:
- return PHYLD_9;
- case TRANSMITTER_COUNT:
- return PHYLD_COUNT;
- case TRANSMITTER_UNKNOWN:
- return PHYLD_UNKNOWN;
- default:
- WARN_ONCE(1, "Unknown transmitter value %d\n",
- transmitter_value);
- return PHYLD_UNKNOWN;
- }
-}
-
-bool dc_link_setup_psr(struct dc_link *link,
- const struct dc_stream_state *stream, struct psr_config *psr_config,
- struct psr_context *psr_context)
-{
- struct dc *dc;
- struct dmcu *dmcu;
- struct dmub_psr *psr;
- int i;
- unsigned int panel_inst;
- /* updateSinkPsrDpcdConfig*/
- union dpcd_psr_configuration psr_configuration;
- union dpcd_sink_active_vtotal_control_mode vtotal_control = {0};
-
- psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
-
- if (!link)
- return false;
-
- dc = link->ctx->dc;
- dmcu = dc->res_pool->dmcu;
- psr = dc->res_pool->psr;
-
- if (!dmcu && !psr)
- return false;
-
- if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
- return false;
-
-
- memset(&psr_configuration, 0, sizeof(psr_configuration));
-
- psr_configuration.bits.ENABLE = 1;
- psr_configuration.bits.CRC_VERIFICATION = 1;
- psr_configuration.bits.FRAME_CAPTURE_INDICATION =
- psr_config->psr_frame_capture_indication_req;
-
- /* Check for PSR v2*/
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
- /* For PSR v2 selective update.
- * Indicates whether sink should start capturing
- * immediately following active scan line,
- * or starting with the 2nd active scan line.
- */
- psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
- /*For PSR v2, determines whether Sink should generate
- * IRQ_HPD when CRC mismatch is detected.
- */
- psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
- /* For PSR v2, set the bit when the Source device will
- * be enabling PSR2 operation.
- */
- psr_configuration.bits.ENABLE_PSR2 = 1;
- /* For PSR v2, the Sink device must be able to receive
- * SU region updates early in the frame time.
- */
- psr_configuration.bits.EARLY_TRANSPORT_ENABLE = 1;
- }
-
- dm_helpers_dp_write_dpcd(
- link->ctx,
- link,
- 368,
- &psr_configuration.raw,
- sizeof(psr_configuration.raw));
-
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
- dc_power_alpm_dpcd_enable(link, true);
- psr_context->su_granularity_required =
- psr_config->su_granularity_required;
- psr_context->su_y_granularity =
- psr_config->su_y_granularity;
- psr_context->line_time_in_us =
- psr_config->line_time_in_us;
-
- if (link->psr_settings.psr_vtotal_control_support) {
- psr_context->rate_control_caps = psr_config->rate_control_caps;
- vtotal_control.bits.ENABLE = true;
- core_link_write_dpcd(link, DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE,
- &vtotal_control.raw, sizeof(vtotal_control.raw));
- }
- }
-
- psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
- psr_context->transmitterId = link->link_enc->transmitter;
- psr_context->engineId = link->link_enc->preferred_engine;
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream
- == stream) {
- /* dmcu -1 for all controller id values,
- * therefore +1 here
- */
- psr_context->controllerId =
- dc->current_state->res_ctx.
- pipe_ctx[i].stream_res.tg->inst + 1;
- break;
- }
- }
-
- /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
- psr_context->phyType = PHY_TYPE_UNIPHY;
- /*PhyId is associated with the transmitter id*/
- psr_context->smuPhyId =
- transmitter_to_phy_id(link->link_enc->transmitter);
-
- psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
- psr_context->vsync_rate_hz = div64_u64(div64_u64((stream->
- timing.pix_clk_100hz * 100),
- stream->timing.v_total),
- stream->timing.h_total);
-
- psr_context->psrSupportedDisplayConfig = true;
- psr_context->psrExitLinkTrainingRequired =
- psr_config->psr_exit_link_training_required;
- psr_context->sdpTransmitLineNumDeadline =
- psr_config->psr_sdp_transmit_line_num_deadline;
- psr_context->psrFrameCaptureIndicationReq =
- psr_config->psr_frame_capture_indication_req;
-
- psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
-
- psr_context->numberOfControllers =
- link->dc->res_pool->timing_generator_count;
-
- psr_context->rfb_update_auto_en = true;
-
- /* 2 frames before enter PSR. */
- psr_context->timehyst_frames = 2;
- /* half a frame
- * (units in 100 lines, i.e. a value of 1 represents 100 lines)
- */
- psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
- psr_context->aux_repeats = 10;
-
- psr_context->psr_level.u32all = 0;
-
- /*skip power down the single pipe since it blocks the cstate*/
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (link->ctx->asic_id.chip_family >= FAMILY_RV) {
- switch(link->ctx->asic_id.chip_family) {
- case FAMILY_YELLOW_CARP:
- case AMDGPU_FAMILY_GC_10_3_6:
- case AMDGPU_FAMILY_GC_11_0_1:
- if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable)
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
- break;
- default:
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
- break;
- }
- }
-#else
- if (link->ctx->asic_id.chip_family >= FAMILY_RV)
- psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
-#endif
-
- /* SMU will perform additional powerdown sequence.
- * For unsupported ASICs, set psr_level flag to skip PSR
- * static screen notification to SMU.
- * (Always set for DAL2, did not check ASIC)
- */
- psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations;
- psr_context->allow_multi_disp_optimizations = psr_config->allow_multi_disp_optimizations;
-
- /* Complete PSR entry before aborting to prevent intermittent
- * freezes on certain eDPs
- */
- psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
-
- /* enable ALPM */
- psr_context->psr_level.bits.DISABLE_ALPM = 0;
- psr_context->psr_level.bits.ALPM_DEFAULT_PD_MODE = 1;
-
- /* Controls additional delay after remote frame capture before
- * continuing power down, default = 0
- */
- psr_context->frame_delay = 0;
-
- if (psr) {
- link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
- link, psr_context, panel_inst);
- link->psr_settings.psr_power_opt = 0;
- link->psr_settings.psr_allow_active = 0;
- }
- else
- link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
-
- /* psr_enabled == 0 indicates setup_psr did not succeed, but this
- * should not happen since firmware should be running at this point
- */
- if (link->psr_settings.psr_feature_enabled == 0)
- ASSERT(0);
-
- return true;
-
-}
-
-void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency)
-{
- struct dc *dc = link->ctx->dc;
- struct dmub_psr *psr = dc->res_pool->psr;
- unsigned int panel_inst;
-
- if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
- return;
-
- /* PSR residency measurements only supported on DMCUB */
- if (psr != NULL && link->psr_settings.psr_feature_enabled)
- psr->funcs->psr_get_residency(psr, residency, panel_inst);
- else
- *residency = 0;
-}
-
-bool dc_link_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su)
-{
- struct dc *dc = link->ctx->dc;
- struct dmub_psr *psr = dc->res_pool->psr;
-
- if (psr == NULL || !link->psr_settings.psr_feature_enabled || !link->psr_settings.psr_vtotal_control_support)
- return false;
-
- psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, psr_vtotal_su);
-
- return true;
-}
-
-const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
-{
- return &link->link_status;
-}
-
-void core_link_resume(struct dc_link *link)
-{
- if (link->connector_signal != SIGNAL_TYPE_VIRTUAL)
- program_hpd_filter(link);
-}
-
-static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
-{
- struct fixed31_32 mbytes_per_sec;
- uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link,
- &stream->link->cur_link_settings);
- link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
-
- mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
-
- return dc_fixpt_div_int(mbytes_per_sec, 54);
-}
-
-static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
-{
- struct fixed31_32 peak_kbps;
- uint32_t numerator = 0;
- uint32_t denominator = 1;
-
- /*
- * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
- * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
- * common multiplier to render an integer PBN for all link rate/lane
- * counts combinations
- * calculate
- * peak_kbps *= (1006/1000)
- * peak_kbps *= (64/54)
- * peak_kbps *= 8 convert to bytes
- */
-
- numerator = 64 * PEAK_FACTOR_X1000;
- denominator = 54 * 8 * 1000 * 1000;
- kbps *= numerator;
- peak_kbps = dc_fixpt_from_fraction(kbps, denominator);
-
- return peak_kbps;
-}
-
-static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
-{
- uint64_t kbps;
-
- kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
- return get_pbn_from_bw_in_kbps(kbps);
-}
-
-static void update_mst_stream_alloc_table(
- struct dc_link *link,
- struct stream_encoder *stream_enc,
- struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc?
- const struct dc_dp_mst_stream_allocation_table *proposed_table)
-{
- struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 };
- struct link_mst_stream_allocation *dc_alloc;
-
- int i;
- int j;
-
- /* if DRM proposed_table has more than one new payload */
- ASSERT(proposed_table->stream_count -
- link->mst_stream_alloc_table.stream_count < 2);
-
- /* copy proposed_table to link, add stream encoder */
- for (i = 0; i < proposed_table->stream_count; i++) {
-
- for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) {
- dc_alloc =
- &link->mst_stream_alloc_table.stream_allocations[j];
-
- if (dc_alloc->vcp_id ==
- proposed_table->stream_allocations[i].vcp_id) {
-
- work_table[i] = *dc_alloc;
- work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count;
- break; /* exit j loop */
- }
- }
-
- /* new vcp_id */
- if (j == link->mst_stream_alloc_table.stream_count) {
- work_table[i].vcp_id =
- proposed_table->stream_allocations[i].vcp_id;
- work_table[i].slot_count =
- proposed_table->stream_allocations[i].slot_count;
- work_table[i].stream_enc = stream_enc;
- work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc;
- }
- }
-
- /* update link->mst_stream_alloc_table with work_table */
- link->mst_stream_alloc_table.stream_count =
- proposed_table->stream_count;
- for (i = 0; i < MAX_CONTROLLER_NUM; i++)
- link->mst_stream_alloc_table.stream_allocations[i] =
- work_table[i];
-}
-
-static void remove_stream_from_alloc_table(
- struct dc_link *link,
- struct stream_encoder *dio_stream_enc,
- struct hpo_dp_stream_encoder *hpo_dp_stream_enc)
-{
- int i = 0;
- struct link_mst_stream_allocation_table *table =
- &link->mst_stream_alloc_table;
-
- if (hpo_dp_stream_enc) {
- for (; i < table->stream_count; i++)
- if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc)
- break;
- } else {
- for (; i < table->stream_count; i++)
- if (dio_stream_enc == table->stream_allocations[i].stream_enc)
- break;
- }
-
- if (i < table->stream_count) {
- i++;
- for (; i < table->stream_count; i++)
- table->stream_allocations[i-1] = table->stream_allocations[i];
- memset(&table->stream_allocations[table->stream_count-1], 0,
- sizeof(struct link_mst_stream_allocation));
- table->stream_count--;
- }
-}
-
-static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
-{
- const uint32_t VCP_Y_PRECISION = 1000;
- uint64_t vcp_x, vcp_y;
-
- // Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision
- avg_time_slots_per_mtp = dc_fixpt_add(
- avg_time_slots_per_mtp, dc_fixpt_from_fraction(1, 2 * VCP_Y_PRECISION));
-
- vcp_x = dc_fixpt_floor(avg_time_slots_per_mtp);
- vcp_y = dc_fixpt_floor(
- dc_fixpt_mul_int(
- dc_fixpt_sub_int(avg_time_slots_per_mtp, dc_fixpt_floor(avg_time_slots_per_mtp)),
- VCP_Y_PRECISION));
-
- if (link->type == dc_connection_mst_branch)
- DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream "
- "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION);
- else
- DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream "
- "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION);
-}
-
-/*
- * Payload allocation/deallocation for SST introduced in DP2.0
- */
-static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx,
- bool allocate)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct link_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp;
- const struct dc_link_settings empty_link_settings = {0};
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(link->ctx->logger);
-
- /* slot X.Y for SST payload deallocate */
- if (!allocate) {
- avg_time_slots_per_mtp = dc_fixpt_from_int(0);
-
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
- avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &empty_link_settings,
- avg_time_slots_per_mtp);
- }
-
- /* calculate VC payload and update branch with new payload allocation table*/
- if (!dpcd_write_128b_132b_sst_payload_allocation_table(
- stream,
- link,
- &proposed_table,
- allocate)) {
- DC_LOG_ERROR("SST Update Payload: Failed to update "
- "allocation table for "
- "pipe idx: %d\n",
- pipe_ctx->pipe_idx);
- return DC_FAIL_DP_PAYLOAD_ALLOCATION;
- }
-
- proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
-
- ASSERT(proposed_table.stream_count == 1);
-
- //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id
- DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p "
- "vcp_id: %d "
- "slot_count: %d\n",
- (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc,
- proposed_table.stream_allocations[0].vcp_id,
- proposed_table.stream_allocations[0].slot_count);
-
- /* program DP source TX for payload */
- link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
- &proposed_table);
-
- /* poll for ACT handled */
- if (!dpcd_poll_for_allocation_change_trigger(link)) {
- // Failures will result in blackscreen and errors logged
- BREAK_TO_DEBUGGER();
- }
-
- /* slot X.Y for SST payload allocate */
- if (allocate && dp_get_link_encoding_format(&link->cur_link_settings) ==
- DP_128b_132b_ENCODING) {
- avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
-
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
- avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &link->cur_link_settings,
- avg_time_slots_per_mtp);
- }
-
- /* Always return DC_OK.
- * If part of sequence fails, log failure(s) and show blackscreen
- */
- return DC_OK;
-}
-
-/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
- * because stream_encoder is not exposed to dm
- */
-enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct dc_dp_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp;
- struct fixed31_32 pbn;
- struct fixed31_32 pbn_per_slot;
- int i;
- enum act_return_status ret;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(link->ctx->logger);
-
- /* enable_link_dp_mst already check link->enabled_stream_count
- * and stream is in link->stream[]. This is called during set mode,
- * stream_enc is available.
- */
-
- /* get calculate VC payload for stream: stream_alloc */
- if (dm_helpers_dp_mst_write_payload_allocation_table(
- stream->ctx,
- stream,
- &proposed_table,
- true))
- update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- else
- DC_LOG_WARNING("Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
-
- DC_LOG_MST("%s "
- "stream_count: %d: \n ",
- __func__,
- link->mst_stream_alloc_table.stream_count);
-
- for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: %p "
- "stream[%d].hpo_dp_stream_enc: %p "
- "stream[%d].vcp_id: %d "
- "stream[%d].slot_count: %d\n",
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].slot_count);
- }
-
- ASSERT(proposed_table.stream_count > 0);
-
- /* program DP source TX for payload */
- if (link_hwss->ext.update_stream_allocation_table == NULL ||
- dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
- DC_LOG_ERROR("Failure: unknown encoding format\n");
- return DC_ERROR_UNEXPECTED;
- }
-
- link_hwss->ext.update_stream_allocation_table(link,
- &pipe_ctx->link_res,
- &link->mst_stream_alloc_table);
-
- /* send down message */
- ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
- stream->ctx,
- stream);
-
- if (ret != ACT_LINK_LOST) {
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- true);
- }
-
- /* slot X.Y for only current stream */
- pbn_per_slot = get_pbn_per_slot(stream);
- if (pbn_per_slot.value == 0) {
- DC_LOG_ERROR("Failure: pbn_per_slot==0 not allowed. Cannot continue, returning DC_UNSUPPORTED_VALUE.\n");
- return DC_UNSUPPORTED_VALUE;
- }
- pbn = get_pbn_from_timing(pipe_ctx);
- avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
-
- dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &link->cur_link_settings,
- avg_time_slots_per_mtp);
-
- return DC_OK;
-
-}
-
-enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct fixed31_32 avg_time_slots_per_mtp;
- struct fixed31_32 pbn;
- struct fixed31_32 pbn_per_slot;
- struct dc_dp_mst_stream_allocation_table proposed_table = {0};
- uint8_t i;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(link->ctx->logger);
-
- /* decrease throttled vcp size */
- pbn_per_slot = get_pbn_per_slot(stream);
- pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
- avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &link->cur_link_settings,
- avg_time_slots_per_mtp);
-
- /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- true);
-
- /* notify immediate branch device table update */
- if (dm_helpers_dp_mst_write_payload_allocation_table(
- stream->ctx,
- stream,
- &proposed_table,
- true)) {
- /* update mst stream allocation table software state */
- update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- } else {
- DC_LOG_WARNING("Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
- }
-
- DC_LOG_MST("%s "
- "stream_count: %d: \n ",
- __func__,
- link->mst_stream_alloc_table.stream_count);
-
- for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: %p "
- "stream[%d].hpo_dp_stream_enc: %p "
- "stream[%d].vcp_id: %d "
- "stream[%d].slot_count: %d\n",
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].slot_count);
- }
-
- ASSERT(proposed_table.stream_count > 0);
-
- /* update mst stream allocation table hardware state */
- if (link_hwss->ext.update_stream_allocation_table == NULL ||
- dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
- DC_LOG_ERROR("Failure: unknown encoding format\n");
- return DC_ERROR_UNEXPECTED;
- }
-
- link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
- &link->mst_stream_alloc_table);
-
- /* poll for immediate branch device ACT handled */
- dm_helpers_dp_mst_poll_for_allocation_change_trigger(
- stream->ctx,
- stream);
-
- return DC_OK;
-}
-
-enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct fixed31_32 avg_time_slots_per_mtp;
- struct fixed31_32 pbn;
- struct fixed31_32 pbn_per_slot;
- struct dc_dp_mst_stream_allocation_table proposed_table = {0};
- uint8_t i;
- enum act_return_status ret;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(link->ctx->logger);
-
- /* notify immediate branch device table update */
- if (dm_helpers_dp_mst_write_payload_allocation_table(
- stream->ctx,
- stream,
- &proposed_table,
- true)) {
- /* update mst stream allocation table software state */
- update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- }
-
- DC_LOG_MST("%s "
- "stream_count: %d: \n ",
- __func__,
- link->mst_stream_alloc_table.stream_count);
-
- for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: %p "
- "stream[%d].hpo_dp_stream_enc: %p "
- "stream[%d].vcp_id: %d "
- "stream[%d].slot_count: %d\n",
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].slot_count);
- }
-
- ASSERT(proposed_table.stream_count > 0);
-
- /* update mst stream allocation table hardware state */
- if (link_hwss->ext.update_stream_allocation_table == NULL ||
- dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
- DC_LOG_ERROR("Failure: unknown encoding format\n");
- return DC_ERROR_UNEXPECTED;
- }
-
- link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
- &link->mst_stream_alloc_table);
-
- /* poll for immediate branch device ACT handled */
- ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
- stream->ctx,
- stream);
-
- if (ret != ACT_LINK_LOST) {
- /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- true);
- }
-
- /* increase throttled vcp size */
- pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
- pbn_per_slot = get_pbn_per_slot(stream);
- avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &link->cur_link_settings,
- avg_time_slots_per_mtp);
-
- return DC_OK;
-}
-
-static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct dc_dp_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
- int i;
- bool mst_mode = (link->type == dc_connection_mst_branch);
- /* adjust for drm changes*/
- bool update_drm_mst_state = true;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- const struct dc_link_settings empty_link_settings = {0};
- DC_LOGGER_INIT(link->ctx->logger);
-
-
- /* deallocate_mst_payload is called before disable link. When mode or
- * disable/enable monitor, new stream is created which is not in link
- * stream[] yet. For this, payload is not allocated yet, so de-alloc
- * should not done. For new mode set, map_resources will get engine
- * for new stream, so stream_enc->id should be validated until here.
- */
-
- /* slot X.Y */
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &empty_link_settings,
- avg_time_slots_per_mtp);
-
- if (mst_mode || update_drm_mst_state) {
- /* when link is in mst mode, reply on mst manager to remove
- * payload
- */
- if (dm_helpers_dp_mst_write_payload_allocation_table(
- stream->ctx,
- stream,
- &proposed_table,
- false))
-
- update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- else
- DC_LOG_WARNING("Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
- } else {
- /* when link is no longer in mst mode (mst hub unplugged),
- * remove payload with default dc logic
- */
- remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc);
- }
-
- DC_LOG_MST("%s"
- "stream_count: %d: ",
- __func__,
- link->mst_stream_alloc_table.stream_count);
-
- for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: %p "
- "stream[%d].hpo_dp_stream_enc: %p "
- "stream[%d].vcp_id: %d "
- "stream[%d].slot_count: %d\n",
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].slot_count);
- }
-
- /* update mst stream allocation table hardware state */
- if (link_hwss->ext.update_stream_allocation_table == NULL ||
- dp_get_link_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
- DC_LOG_DEBUG("Unknown encoding format\n");
- return DC_ERROR_UNEXPECTED;
- }
-
- link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
- &link->mst_stream_alloc_table);
-
- if (mst_mode) {
- dm_helpers_dp_mst_poll_for_allocation_change_trigger(
- stream->ctx,
- stream);
-
- if (!update_drm_mst_state)
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- false);
- }
-
- if (update_drm_mst_state)
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- false);
-
- return DC_OK;
-}
-
-
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
-{
- struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
- struct link_encoder *link_enc = NULL;
- struct cp_psp_stream_config config = {0};
- enum dp_panel_mode panel_mode =
- dp_get_panel_mode(pipe_ctx->stream->link);
-
- if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
- return;
-
- link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
- ASSERT(link_enc);
- if (link_enc == NULL)
- return;
-
- /* otg instance */
- config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
-
- /* dig front end */
- config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
-
- /* stream encoder index */
- config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
- if (is_dp_128b_132b_signal(pipe_ctx))
- config.stream_enc_idx =
- pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
-
- /* dig back end */
- config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
-
- /* link encoder index */
- config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- if (is_dp_128b_132b_signal(pipe_ctx))
- config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
-
- /* dio output index is dpia index for DPIA endpoint & dcio index by default */
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1;
- else
- config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
-
-
- /* phy index */
- config.phy_idx = resource_transmitter_to_phy_idx(
- pipe_ctx->stream->link->dc, link_enc->transmitter);
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */
- config.phy_idx = 0;
-
- /* stream properties */
- config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
- config.mst_enabled = (pipe_ctx->stream->signal ==
- SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
- config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
- config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
- 1 : 0;
- config.dpms_off = dpms_off;
-
- /* dm stream context */
- config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
-
- cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
-}
-#endif
-
-static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct link_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp;
- uint8_t req_slot_count = 0;
- uint8_t vc_id = 1; /// VC ID always 1 for SST
- struct dc_link_settings link_settings = pipe_ctx->link_config.dp_link_settings;
- const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- stream->link->cur_link_settings = link_settings;
-
- if (link_hwss->ext.enable_dp_link_output)
- link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res,
- stream->signal, pipe_ctx->clock_source->id,
- &link_settings);
-
-#ifdef DIAGS_BUILD
- /* Workaround for FPGA HPO capture DP link data:
- * HPO capture will set link to active mode
- * This workaround is required to get a capture from start of frame
- */
- if (!dc->debug.fpga_hpo_capture_en) {
- struct encoder_set_dp_phy_pattern_param params = {0};
- params.dp_phy_pattern = DP_TEST_PATTERN_VIDEO_MODE;
-
- /* Set link active */
- stream->link->hpo_dp_link_enc->funcs->set_link_test_pattern(
- stream->link->hpo_dp_link_enc,
- &params);
- }
-#endif
-
- /* Enable DP_STREAM_ENC */
- dc->hwss.enable_stream(pipe_ctx);
-
- /* Set DPS PPS SDP (AKA "info frames") */
- if (pipe_ctx->stream->timing.flags.DSC) {
- dp_set_dsc_pps_sdp(pipe_ctx, true, true);
- }
-
- /* Allocate Payload */
- if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) {
- // MST case
- uint8_t i;
-
- proposed_table.stream_count = state->stream_count;
- for (i = 0; i < state->stream_count; i++) {
- avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link);
- req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
- proposed_table.stream_allocations[i].slot_count = req_slot_count;
- proposed_table.stream_allocations[i].vcp_id = i+1;
- /* NOTE: This makes assumption that pipe_ctx index is same as stream index */
- proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc;
- }
- } else {
- // SST case
- avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, stream->link);
- req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
- proposed_table.stream_count = 1; /// Always 1 stream for SST
- proposed_table.stream_allocations[0].slot_count = req_slot_count;
- proposed_table.stream_allocations[0].vcp_id = vc_id;
- proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
- }
-
- link_hwss->ext.update_stream_allocation_table(stream->link,
- &pipe_ctx->link_res,
- &proposed_table);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
-
- dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings);
- dc->hwss.enable_audio_stream(pipe_ctx);
-}
-
-void core_link_enable_stream(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->sink->link;
- enum dc_status status;
- struct link_encoder *link_enc;
- enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
- struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
-
- if (is_dp_128b_132b_signal(pipe_ctx))
- vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
-
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- if (pipe_ctx->stream->sink) {
- if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
- pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
- pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
- }
- }
-
- if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- return;
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
- && !is_dp_128b_132b_signal(pipe_ctx)) {
- if (link_enc)
- link_enc->funcs->setup(
- link_enc,
- pipe_ctx->stream->signal);
- }
-
- pipe_ctx->stream->link->link_state_valid = true;
-
- if (pipe_ctx->stream_res.tg->funcs->set_out_mux) {
- if (is_dp_128b_132b_signal(pipe_ctx))
- otg_out_dest = OUT_MUX_HPO_DP;
- else
- otg_out_dest = OUT_MUX_DIO;
- pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
- }
-
- link_hwss->setup_stream_attribute(pipe_ctx);
-
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- bool apply_edp_fast_boot_optimization =
- pipe_ctx->stream->apply_edp_fast_boot_optimization;
-
- pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
-
- // Enable VPG before building infoframe
- if (vpg && vpg->funcs->vpg_poweron)
- vpg->funcs->vpg_poweron(vpg);
-
- resource_build_info_frame(pipe_ctx);
- dc->hwss.update_info_frame(pipe_ctx);
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
-
- /* Do not touch link on seamless boot optimization. */
- if (pipe_ctx->stream->apply_seamless_boot_optimization) {
- pipe_ctx->stream->dpms_off = false;
-
- /* Still enable stream features & audio on seamless boot for DP external displays */
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) {
- enable_stream_features(pipe_ctx);
- dc->hwss.enable_audio_stream(pipe_ctx);
- }
-
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- update_psp_stream_config(pipe_ctx, false);
-#endif
- return;
- }
-
- /* eDP lit up by bios already, no need to enable again. */
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
- apply_edp_fast_boot_optimization &&
- !pipe_ctx->stream->timing.flags.DSC &&
- !pipe_ctx->next_odm_pipe) {
- pipe_ctx->stream->dpms_off = false;
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- update_psp_stream_config(pipe_ctx, false);
-#endif
- return;
- }
-
- if (pipe_ctx->stream->dpms_off)
- return;
-
- /* Have to setup DSC before DIG FE and BE are connected (which happens before the
- * link training). This is to make sure the bandwidth sent to DIG BE won't be
- * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
- * will be automatically set at a later time when the video is enabled
- * (DP_VID_STREAM_EN = 1).
- */
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, true);
-
- }
-
- status = enable_link(state, pipe_ctx);
-
- if (status != DC_OK) {
- DC_LOG_WARNING("enabling link %u failed: %d\n",
- pipe_ctx->stream->link->link_index,
- status);
-
- /* Abort stream enable *unless* the failure was due to
- * DP link training - some DP monitors will recover and
- * show the stream anyway. But MST displays can't proceed
- * without link training.
- */
- if (status != DC_FAIL_DP_LINK_TRAINING ||
- pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- if (false == stream->link->link_status.link_active)
- disable_link(stream->link, &pipe_ctx->link_res,
- pipe_ctx->stream->signal);
- BREAK_TO_DEBUGGER();
- return;
- }
- }
-
- /* turn off otg test pattern if enable */
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- COLOR_DEPTH_UNDEFINED);
-
- /* This second call is needed to reconfigure the DIG
- * as a workaround for the incorrect value being applied
- * from transmitter control.
- */
- if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
- is_dp_128b_132b_signal(pipe_ctx)))
- if (link_enc)
- link_enc->funcs->setup(
- link_enc,
- pipe_ctx->stream->signal);
-
- dc->hwss.enable_stream(pipe_ctx);
-
- /* Set DPS PPS SDP (AKA "info frames") */
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal)) {
- dp_set_dsc_on_rx(pipe_ctx, true);
- dp_set_dsc_pps_sdp(pipe_ctx, true, true);
- }
- }
-
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- dc_link_allocate_mst_payload(pipe_ctx);
- else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- is_dp_128b_132b_signal(pipe_ctx))
- dc_link_update_sst_payload(pipe_ctx, true);
-
- dc->hwss.unblank_stream(pipe_ctx,
- &pipe_ctx->stream->link->cur_link_settings);
-
- if (stream->sink_patches.delay_ignore_msa > 0)
- msleep(stream->sink_patches.delay_ignore_msa);
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- enable_stream_features(pipe_ctx);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- update_psp_stream_config(pipe_ctx, false);
-#endif
-
- dc->hwss.enable_audio_stream(pipe_ctx);
-
- } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- if (is_dp_128b_132b_signal(pipe_ctx))
- fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx);
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, true);
- }
-
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
- core_link_set_avmute(pipe_ctx, false);
- }
-}
-
-void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->sink->link;
- struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
-
- if (is_dp_128b_132b_signal(pipe_ctx))
- vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
-
- DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-
- if (pipe_ctx->stream->sink) {
- if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
- pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
- DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
- pipe_ctx->stream->sink->edid_caps.display_name,
- pipe_ctx->stream->signal);
- }
- }
-
- if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- return;
-
- if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) {
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- core_link_set_avmute(pipe_ctx, true);
- }
-
- dc->hwss.disable_audio_stream(pipe_ctx);
-
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
- update_psp_stream_config(pipe_ctx, true);
-#endif
- dc->hwss.blank_stream(pipe_ctx);
-
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
- deallocate_mst_payload(pipe_ctx);
- else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- is_dp_128b_132b_signal(pipe_ctx))
- dc_link_update_sst_payload(pipe_ctx, false);
-
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
- struct ext_hdmi_settings settings = {0};
- enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id;
-
- unsigned short masked_chip_caps = link->chip_caps &
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
- //Need to inform that sink is going to use legacy HDMI mode.
- dal_ddc_service_write_scdc_data(
- link->ddc,
- 165000,//vbios only handles 165Mhz.
- false);
- if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
- /* DP159, Retimer settings */
- if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings))
- write_i2c_retimer_setting(pipe_ctx,
- false, false, &settings);
- else
- write_i2c_default_retimer_setting(pipe_ctx,
- false, false);
- } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
- /* PI3EQX1204, Redriver settings */
- write_i2c_redriver_setting(pipe_ctx, false);
- }
- }
-
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
- !is_dp_128b_132b_signal(pipe_ctx)) {
-
- /* In DP1.x SST mode, our encoder will go to TPS1
- * when link is on but stream is off.
- * Disabling link before stream will avoid exposing TPS1 pattern
- * during the disable sequence as it will confuse some receivers
- * state machine.
- * In DP2 or MST mode, our encoder will stay video active
- */
- disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
- dc->hwss.disable_stream(pipe_ctx);
- } else {
- dc->hwss.disable_stream(pipe_ctx);
- disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
- }
-
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, false);
- }
- if (is_dp_128b_132b_signal(pipe_ctx)) {
- if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
- pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
- }
-
- if (vpg && vpg->funcs->vpg_powerdown)
- vpg->funcs->vpg_powerdown(vpg);
-}
-
-void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
-
- if (!dc_is_hdmi_signal(pipe_ctx->stream->signal))
- return;
-
- dc->hwss.set_avmute(pipe_ctx, enable);
-}
-
-/**
- * dc_link_enable_hpd_filter:
- * If enable is true, programs HPD filter on associated HPD line using
- * delay_on_disconnect/delay_on_connect values dependent on
- * link->connector_signal
- *
- * If enable is false, programs HPD filter on associated HPD line with no
- * delays on connect or disconnect
- *
- * @link: pointer to the dc link
- * @enable: boolean specifying whether to enable hbd
- */
-void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
-{
- struct gpio *hpd;
-
- if (enable) {
- link->is_hpd_filter_disabled = false;
- program_hpd_filter(link);
- } else {
- link->is_hpd_filter_disabled = true;
- /* Obtain HPD handle */
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
-
- if (!hpd)
- return;
-
- /* Setup HPD filtering */
- if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
- struct gpio_hpd_config config;
-
- config.delay_on_connect = 0;
- config.delay_on_disconnect = 0;
-
- dal_irq_setup_hpd_filter(hpd, &config);
-
- dal_gpio_close(hpd);
- } else {
- ASSERT_CRITICAL(false);
- }
- /* Release HPD handle */
- dal_gpio_destroy_irq(&hpd);
- }
-}
-
-void dc_link_set_drive_settings(struct dc *dc,
- struct link_training_settings *lt_settings,
- const struct dc_link *link)
-{
-
- int i;
- struct link_resource link_res;
-
- for (i = 0; i < dc->link_count; i++)
- if (dc->links[i] == link)
- break;
-
- if (i >= dc->link_count)
- ASSERT_CRITICAL(false);
-
- dc_link_get_cur_link_res(link, &link_res);
- dc_link_dp_set_drive_settings(dc->links[i], &link_res, lt_settings);
-}
-
-void dc_link_set_preferred_link_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link *link)
-{
- int i;
- struct pipe_ctx *pipe;
- struct dc_stream_state *link_stream;
- struct dc_link_settings store_settings = *link_setting;
-
- link->preferred_link_setting = store_settings;
-
- /* Retrain with preferred link settings only relevant for
- * DP signal type
- * Check for non-DP signal or if passive dongle present
- */
- if (!dc_is_dp_signal(link->connector_signal) ||
- link->dongle_max_pix_clk > 0)
- return;
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->link) {
- if (pipe->stream->link == link) {
- link_stream = pipe->stream;
- break;
- }
- }
- }
-
- /* Stream not found */
- if (i == MAX_PIPES)
- return;
-
- /* Cannot retrain link if backend is off */
- if (link_stream->dpms_off)
- return;
-
- if (decide_link_settings(link_stream, &store_settings))
- dp_retrain_link_dp_test(link, &store_settings, false);
-}
-
-void dc_link_set_preferred_training_settings(struct dc *dc,
- struct dc_link_settings *link_setting,
- struct dc_link_training_overrides *lt_overrides,
- struct dc_link *link,
- bool skip_immediate_retrain)
-{
- if (lt_overrides != NULL)
- link->preferred_training_settings = *lt_overrides;
- else
- memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings));
-
- if (link_setting != NULL) {
- link->preferred_link_setting = *link_setting;
- if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING)
- /* TODO: add dc update for acquiring link res */
- skip_immediate_retrain = true;
- } else {
- link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
- link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
- }
-
- if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link->type == dc_connection_mst_branch)
- dm_helpers_dp_mst_update_branch_bandwidth(dc->ctx, link);
-
- /* Retrain now, or wait until next stream update to apply */
- if (skip_immediate_retrain == false)
- dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link);
-}
-
-void dc_link_enable_hpd(const struct dc_link *link)
-{
- dc_link_dp_enable_hpd(link);
-}
-
-void dc_link_disable_hpd(const struct dc_link *link)
-{
- dc_link_dp_disable_hpd(link);
-}
-
-void dc_link_set_test_pattern(struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size)
-{
- if (link != NULL)
- dc_link_dp_set_test_pattern(
- link,
- test_pattern,
- test_pattern_color_space,
- p_link_settings,
- p_custom_pattern,
- cust_pattern_size);
-}
-
-uint32_t dc_link_bandwidth_kbps(
- const struct dc_link *link,
- const struct dc_link_settings *link_setting)
-{
- uint32_t total_data_bw_efficiency_x10000 = 0;
- uint32_t link_rate_per_lane_kbps = 0;
-
- switch (dp_get_link_encoding_format(link_setting)) {
- case DP_8b_10b_ENCODING:
- /* For 8b/10b encoding:
- * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane.
- * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported.
- */
- link_rate_per_lane_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
- total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
- if (dc_link_should_enable_fec(link)) {
- total_data_bw_efficiency_x10000 /= 100;
- total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
- }
- break;
- case DP_128b_132b_ENCODING:
- /* For 128b/132b encoding:
- * link rate is defined in the unit of 10mbps per lane.
- * total data bandwidth efficiency is always 96.71%.
- */
- link_rate_per_lane_kbps = link_setting->link_rate * 10000;
- total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000;
- break;
- default:
- break;
- }
-
- /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */
- return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000;
-}
-
-const struct dc_link_settings *dc_link_get_link_cap(
- const struct dc_link *link)
-{
- if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
- return &link->preferred_link_setting;
- return &link->verified_link_cap;
-}
-
-void dc_link_overwrite_extended_receiver_cap(
- struct dc_link *link)
-{
- dp_overwrite_extended_receiver_cap(link);
-}
-
-bool dc_link_is_fec_supported(const struct dc_link *link)
-{
- /* TODO - use asic cap instead of link_enc->features
- * we no longer know which link enc to use for this link before commit
- */
- struct link_encoder *link_enc = NULL;
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- return (dc_is_dp_signal(link->connector_signal) && link_enc &&
- link_enc->features.fec_supported &&
- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
- !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
-}
-
-bool dc_link_should_enable_fec(const struct dc_link *link)
-{
- bool force_disable = false;
-
- if (link->fec_state == dc_link_fec_enabled)
- force_disable = false;
- else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
- link->local_sink &&
- link->local_sink->edid_caps.panel_patch.disable_fec)
- force_disable = true;
- else if (link->connector_signal == SIGNAL_TYPE_EDP
- && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.
- dsc_support.DSC_SUPPORT == false
- || link->panel_config.dsc.disable_dsc_edp
- || !link->dc->caps.edp_dsc_support))
- force_disable = true;
-
- return !force_disable && dc_link_is_fec_supported(link);
-}
-
-uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing)
-{
- uint32_t bits_per_channel = 0;
- uint32_t kbps;
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (timing->flags.DSC)
- return dc_dsc_stream_bandwidth_in_kbps(timing,
- timing->dsc_cfg.bits_per_pixel,
- timing->dsc_cfg.num_slices_h,
- timing->dsc_cfg.is_dp);
-#endif /* CONFIG_DRM_AMD_DC_DCN */
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- bits_per_channel = 6;
- break;
- case COLOR_DEPTH_888:
- bits_per_channel = 8;
- break;
- case COLOR_DEPTH_101010:
- bits_per_channel = 10;
- break;
- case COLOR_DEPTH_121212:
- bits_per_channel = 12;
- break;
- case COLOR_DEPTH_141414:
- bits_per_channel = 14;
- break;
- case COLOR_DEPTH_161616:
- bits_per_channel = 16;
- break;
- default:
- ASSERT(bits_per_channel != 0);
- bits_per_channel = 8;
- break;
- }
-
- kbps = timing->pix_clk_100hz / 10;
- kbps *= bits_per_channel;
-
- if (timing->flags.Y_ONLY != 1) {
- /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
- kbps *= 3;
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- kbps /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- kbps = kbps * 2 / 3;
- }
-
- return kbps;
-
-}
-
-void dc_link_get_cur_link_res(const struct dc_link *link,
- struct link_resource *link_res)
-{
- int i;
- struct pipe_ctx *pipe = NULL;
-
- memset(link_res, 0, sizeof(*link_res));
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) {
- if (pipe->stream->link == link) {
- *link_res = pipe->link_res;
- break;
- }
- }
- }
-
-}
-
-/**
- * dc_get_cur_link_res_map() - take a snapshot of current link resource allocation state
- * @dc: pointer to dc of the dm calling this
- * @map: a dc link resource snapshot defined internally to dc.
- *
- * DM needs to capture a snapshot of current link resource allocation mapping
- * and store it in its persistent storage.
- *
- * Some of the link resource is using first come first serve policy.
- * The allocation mapping depends on original hotplug order. This information
- * is lost after driver is loaded next time. The snapshot is used in order to
- * restore link resource to its previous state so user will get consistent
- * link capability allocation across reboot.
- *
- * Return: none (void function)
- *
- */
-void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
-{
- struct dc_link *link;
- uint32_t i;
- uint32_t hpo_dp_recycle_map = 0;
-
- *map = 0;
-
- if (dc->caps.dp_hpo) {
- for (i = 0; i < dc->caps.max_links; i++) {
- link = dc->links[i];
- if (link->link_status.link_active &&
- dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING &&
- dp_get_link_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING)
- /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability
- * but current link doesn't use it.
- */
- hpo_dp_recycle_map |= (1 << i);
- }
- *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT);
- }
-}
-
-/**
- * dc_restore_link_res_map() - restore link resource allocation state from a snapshot
- * @dc: pointer to dc of the dm calling this
- * @map: a dc link resource snapshot defined internally to dc.
- *
- * DM needs to call this function after initial link detection on boot and
- * before first commit streams to restore link resource allocation state
- * from previous boot session.
- *
- * Some of the link resource is using first come first serve policy.
- * The allocation mapping depends on original hotplug order. This information
- * is lost after driver is loaded next time. The snapshot is used in order to
- * restore link resource to its previous state so user will get consistent
- * link capability allocation across reboot.
- *
- * Return: none (void function)
- *
- */
-void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
-{
- struct dc_link *link;
- uint32_t i;
- unsigned int available_hpo_dp_count;
- uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK)
- >> LINK_RES_HPO_DP_REC_MAP__SHIFT;
-
- if (dc->caps.dp_hpo) {
- available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count;
- /* remove excess 128b/132b encoding support for not recycled links */
- for (i = 0; i < dc->caps.max_links; i++) {
- if ((hpo_dp_recycle_map & (1 << i)) == 0) {
- link = dc->links[i];
- if (link->type != dc_connection_none &&
- dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
- if (available_hpo_dp_count > 0)
- available_hpo_dp_count--;
- else
- /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
- link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
- }
- }
- }
- /* remove excess 128b/132b encoding support for recycled links */
- for (i = 0; i < dc->caps.max_links; i++) {
- if ((hpo_dp_recycle_map & (1 << i)) != 0) {
- link = dc->links[i];
- if (link->type != dc_connection_none &&
- dp_get_link_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
- if (available_hpo_dp_count > 0)
- available_hpo_dp_count--;
- else
- /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
- link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
- }
- }
- }
- }
-}
+// TODO - remove this file after external build dependencies is resolved.
+/* NOTE: This file is pending to be removed, do not add new code to this file */ \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
deleted file mode 100644
index dedd1246ce58..000000000000
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ /dev/null
@@ -1,7553 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- */
-#include "dm_services.h"
-#include "dc.h"
-#include "dc_link_dp.h"
-#include "dm_helpers.h"
-#include "opp.h"
-#include "dsc.h"
-#include "clk_mgr.h"
-#include "resource.h"
-
-#include "inc/core_types.h"
-#include "link_hwss.h"
-#include "dc_link_ddc.h"
-#include "core_status.h"
-#include "dpcd_defs.h"
-#include "dc_dmub_srv.h"
-#include "dce/dmub_hw_lock_mgr.h"
-#include "inc/dc_link_dpia.h"
-#include "inc/link_enc_cfg.h"
-#include "link/link_dp_trace.h"
-
-/*Travis*/
-static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
-/*Nutmeg*/
-static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
-
-#define DC_LOGGER \
- link->ctx->logger
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
-
-#include "link_dpcd.h"
-
-#ifndef MAX
-#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
-#endif
-#ifndef MIN
-#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
-#endif
-
- /* maximum pre emphasis level allowed for each voltage swing level*/
- static const enum dc_pre_emphasis
- voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3,
- PRE_EMPHASIS_LEVEL2,
- PRE_EMPHASIS_LEVEL1,
- PRE_EMPHASIS_DISABLED };
-
-enum {
- POST_LT_ADJ_REQ_LIMIT = 6,
- POST_LT_ADJ_REQ_TIMEOUT = 200
-};
-
-struct dp_lt_fallback_entry {
- enum dc_lane_count lane_count;
- enum dc_link_rate link_rate;
-};
-
-static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = {
- /* This link training fallback array is ordered by
- * link bandwidth from highest to lowest.
- * DP specs makes it a normative policy to always
- * choose the next highest link bandwidth during
- * link training fallback.
- */
- {LANE_COUNT_FOUR, LINK_RATE_UHBR20},
- {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5},
- {LANE_COUNT_TWO, LINK_RATE_UHBR20},
- {LANE_COUNT_FOUR, LINK_RATE_UHBR10},
- {LANE_COUNT_TWO, LINK_RATE_UHBR13_5},
- {LANE_COUNT_FOUR, LINK_RATE_HIGH3},
- {LANE_COUNT_ONE, LINK_RATE_UHBR20},
- {LANE_COUNT_TWO, LINK_RATE_UHBR10},
- {LANE_COUNT_FOUR, LINK_RATE_HIGH2},
- {LANE_COUNT_ONE, LINK_RATE_UHBR13_5},
- {LANE_COUNT_TWO, LINK_RATE_HIGH3},
- {LANE_COUNT_ONE, LINK_RATE_UHBR10},
- {LANE_COUNT_TWO, LINK_RATE_HIGH2},
- {LANE_COUNT_FOUR, LINK_RATE_HIGH},
- {LANE_COUNT_ONE, LINK_RATE_HIGH3},
- {LANE_COUNT_FOUR, LINK_RATE_LOW},
- {LANE_COUNT_ONE, LINK_RATE_HIGH2},
- {LANE_COUNT_TWO, LINK_RATE_HIGH},
- {LANE_COUNT_TWO, LINK_RATE_LOW},
- {LANE_COUNT_ONE, LINK_RATE_HIGH},
- {LANE_COUNT_ONE, LINK_RATE_LOW},
-};
-
-static const struct dc_link_settings fail_safe_link_settings = {
- .lane_count = LANE_COUNT_ONE,
- .link_rate = LINK_RATE_LOW,
- .link_spread = LINK_SPREAD_DISABLED,
-};
-
-static bool decide_fallback_link_setting(
- struct dc_link *link,
- struct dc_link_settings *max,
- struct dc_link_settings *cur,
- enum link_training_result training_result);
-static void maximize_lane_settings(const struct link_training_settings *lt_settings,
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
-static void override_lane_settings(const struct link_training_settings *lt_settings,
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
-
-static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
- const struct dc_link_settings *link_settings)
-{
- union training_aux_rd_interval training_rd_interval;
- uint32_t wait_in_micro_secs = 100;
-
- memset(&training_rd_interval, 0, sizeof(training_rd_interval));
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
- link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
- core_link_read_dpcd(
- link,
- DP_TRAINING_AUX_RD_INTERVAL,
- (uint8_t *)&training_rd_interval,
- sizeof(training_rd_interval));
- if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
- wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
- }
-
- return wait_in_micro_secs;
-}
-
-static uint32_t get_eq_training_aux_rd_interval(
- struct dc_link *link,
- const struct dc_link_settings *link_settings)
-{
- union training_aux_rd_interval training_rd_interval;
-
- memset(&training_rd_interval, 0, sizeof(training_rd_interval));
- if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
- core_link_read_dpcd(
- link,
- DP_128b_132b_TRAINING_AUX_RD_INTERVAL,
- (uint8_t *)&training_rd_interval,
- sizeof(training_rd_interval));
- } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
- link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
- core_link_read_dpcd(
- link,
- DP_TRAINING_AUX_RD_INTERVAL,
- (uint8_t *)&training_rd_interval,
- sizeof(training_rd_interval));
- }
-
- switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) {
- case 0: return 400;
- case 1: return 4000;
- case 2: return 8000;
- case 3: return 12000;
- case 4: return 16000;
- case 5: return 32000;
- case 6: return 64000;
- default: return 400;
- }
-}
-
-void dp_wait_for_training_aux_rd_interval(
- struct dc_link *link,
- uint32_t wait_in_micro_secs)
-{
- if (wait_in_micro_secs > 1000)
- msleep(wait_in_micro_secs/1000);
- else
- udelay(wait_in_micro_secs);
-
- DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
- __func__,
- wait_in_micro_secs);
-}
-
-enum dpcd_training_patterns
- dc_dp_training_pattern_to_dpcd_training_pattern(
- struct dc_link *link,
- enum dc_dp_training_pattern pattern)
-{
- enum dpcd_training_patterns dpcd_tr_pattern =
- DPCD_TRAINING_PATTERN_VIDEOIDLE;
-
- switch (pattern) {
- case DP_TRAINING_PATTERN_SEQUENCE_1:
- dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_2:
- dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_3:
- dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_4:
- dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
- break;
- case DP_128b_132b_TPS1:
- dpcd_tr_pattern = DPCD_128b_132b_TPS1;
- break;
- case DP_128b_132b_TPS2:
- dpcd_tr_pattern = DPCD_128b_132b_TPS2;
- break;
- case DP_128b_132b_TPS2_CDS:
- dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS;
- break;
- case DP_TRAINING_PATTERN_VIDEOIDLE:
- dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE;
- break;
- default:
- ASSERT(0);
- DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
- __func__, pattern);
- break;
- }
-
- return dpcd_tr_pattern;
-}
-
-static void dpcd_set_training_pattern(
- struct dc_link *link,
- enum dc_dp_training_pattern training_pattern)
-{
- union dpcd_training_pattern dpcd_pattern = {0};
-
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
- dc_dp_training_pattern_to_dpcd_training_pattern(
- link, training_pattern);
-
- core_link_write_dpcd(
- link,
- DP_TRAINING_PATTERN_SET,
- &dpcd_pattern.raw,
- 1);
-
- DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
- __func__,
- DP_TRAINING_PATTERN_SET,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
-}
-
-static enum dc_dp_training_pattern decide_cr_training_pattern(
- const struct dc_link_settings *link_settings)
-{
- switch (dp_get_link_encoding_format(link_settings)) {
- case DP_8b_10b_ENCODING:
- default:
- return DP_TRAINING_PATTERN_SEQUENCE_1;
- case DP_128b_132b_ENCODING:
- return DP_128b_132b_TPS1;
- }
-}
-
-static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
- const struct dc_link_settings *link_settings)
-{
- struct link_encoder *link_enc;
- struct encoder_feature_support *enc_caps;
- struct dpcd_caps *rx_caps = &link->dpcd_caps;
- enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
- enc_caps = &link_enc->features;
-
- switch (dp_get_link_encoding_format(link_settings)) {
- case DP_8b_10b_ENCODING:
- if (enc_caps->flags.bits.IS_TPS4_CAPABLE &&
- rx_caps->max_down_spread.bits.TPS4_SUPPORTED)
- pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
- else if (enc_caps->flags.bits.IS_TPS3_CAPABLE &&
- rx_caps->max_ln_count.bits.TPS3_SUPPORTED)
- pattern = DP_TRAINING_PATTERN_SEQUENCE_3;
- else
- pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
- break;
- case DP_128b_132b_ENCODING:
- pattern = DP_128b_132b_TPS2;
- break;
- default:
- pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
- break;
- }
- return pattern;
-}
-
-static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
-{
- uint8_t link_rate = 0;
- enum dp_link_encoding encoding = dp_get_link_encoding_format(link_settings);
-
- if (encoding == DP_128b_132b_ENCODING)
- switch (link_settings->link_rate) {
- case LINK_RATE_UHBR10:
- link_rate = 0x1;
- break;
- case LINK_RATE_UHBR20:
- link_rate = 0x2;
- break;
- case LINK_RATE_UHBR13_5:
- link_rate = 0x4;
- break;
- default:
- link_rate = 0;
- break;
- }
- else if (encoding == DP_8b_10b_ENCODING)
- link_rate = (uint8_t) link_settings->link_rate;
- else
- link_rate = 0;
-
- return link_rate;
-}
-
-static void dp_fixed_vs_pe_read_lane_adjust(
- struct dc_link *link,
- union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX])
-{
- const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63};
- const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63};
- const uint8_t offset = dp_convert_to_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- uint32_t vendor_lttpr_write_address = 0xF004F;
- uint32_t vendor_lttpr_read_address = 0xF0053;
- uint8_t dprx_vs = 0;
- uint8_t dprx_pe = 0;
- uint8_t lane;
-
- if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- vendor_lttpr_read_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- }
-
- /* W/A to read lane settings requested by DPRX */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_read_dpcd(
- link,
- vendor_lttpr_read_address,
- &dprx_vs,
- 1);
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
- core_link_read_dpcd(
- link,
- vendor_lttpr_read_address,
- &dprx_pe,
- 1);
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3;
- dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3;
- }
-}
-
-static void dp_fixed_vs_pe_set_retimer_lane_settings(
- struct dc_link *link,
- const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
- uint8_t lane_count)
-{
- const uint8_t offset = dp_convert_to_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
- uint32_t vendor_lttpr_write_address = 0xF004F;
- uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
- uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
- uint8_t lane = 0;
-
- if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- }
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Force LTTPR to output desired VS and PE */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_reset[0],
- sizeof(vendor_lttpr_write_data_reset));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
-}
-
-enum dc_status dpcd_set_link_settings(
- struct dc_link *link,
- const struct link_training_settings *lt_settings)
-{
- uint8_t rate;
- enum dc_status status;
-
- union down_spread_ctrl downspread = {0};
- union lane_count_set lane_count_set = {0};
-
- downspread.raw = (uint8_t)
- (lt_settings->link_settings.link_spread);
-
- lane_count_set.bits.LANE_COUNT_SET =
- lt_settings->link_settings.lane_count;
-
- lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
-
-
- if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
- lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
- link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
- }
-
- status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &downspread.raw, sizeof(downspread));
-
- status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,
- &lane_count_set.raw, 1);
-
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
- lt_settings->link_settings.use_link_rate_set == true) {
- rate = 0;
- /* WA for some MUX chips that will power down with eDP and lose supported
- * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure
- * MUX chip gets link rate set back before link training.
- */
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- uint8_t supported_link_rates[16];
-
- core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
- supported_link_rates, sizeof(supported_link_rates));
- }
- status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
- status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
- &lt_settings->link_settings.link_rate_set, 1);
- } else {
- rate = get_dpcd_link_rate(&lt_settings->link_settings);
-
- status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
- }
-
- if (rate) {
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_BW_SET,
- lt_settings->link_settings.link_rate,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- lt_settings->enhanced_framing,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
- } else {
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_RATE_SET,
- lt_settings->link_settings.link_rate_set,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- lt_settings->enhanced_framing,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
- }
-
- return status;
-}
-
-uint8_t dc_dp_initialize_scrambling_data_symbols(
- struct dc_link *link,
- enum dc_dp_training_pattern pattern)
-{
- uint8_t disable_scrabled_data_symbols = 0;
-
- switch (pattern) {
- case DP_TRAINING_PATTERN_SEQUENCE_1:
- case DP_TRAINING_PATTERN_SEQUENCE_2:
- case DP_TRAINING_PATTERN_SEQUENCE_3:
- disable_scrabled_data_symbols = 1;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_4:
- case DP_128b_132b_TPS1:
- case DP_128b_132b_TPS2:
- disable_scrabled_data_symbols = 0;
- break;
- default:
- ASSERT(0);
- DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
- __func__, pattern);
- break;
- }
- return disable_scrabled_data_symbols;
-}
-
-static inline bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset)
-{
- return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
-}
-
-static void dpcd_set_lt_pattern_and_lane_settings(
- struct dc_link *link,
- const struct link_training_settings *lt_settings,
- enum dc_dp_training_pattern pattern,
- uint32_t offset)
-{
- uint32_t dpcd_base_lt_offset;
-
- uint8_t dpcd_lt_buffer[5] = {0};
- union dpcd_training_pattern dpcd_pattern = {0};
- uint32_t size_in_bytes;
- bool edp_workaround = false; /* TODO link_prop.INTERNAL */
- dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
-
- if (is_repeater(lt_settings, offset))
- dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-
- /*****************************************************************
- * DpcdAddress_TrainingPatternSet
- *****************************************************************/
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
- dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
-
- dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
- dc_dp_initialize_scrambling_data_symbols(link, pattern);
-
- dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
- = dpcd_pattern.raw;
-
- if (is_repeater(lt_settings, offset)) {
- DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
- __func__,
- offset,
- dpcd_base_lt_offset,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
- } else {
- DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
- __func__,
- dpcd_base_lt_offset,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
- }
-
- /* concatenate everything into one buffer*/
- size_in_bytes = lt_settings->link_settings.lane_count *
- sizeof(lt_settings->dpcd_lane_settings[0]);
-
- // 0x00103 - 0x00102
- memmove(
- &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET],
- lt_settings->dpcd_lane_settings,
- size_in_bytes);
-
- if (is_repeater(lt_settings, offset)) {
- if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- offset,
- dpcd_base_lt_offset,
- lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- offset,
- dpcd_base_lt_offset,
- lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
- lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
- lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
- lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
- } else {
- if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_128b_132b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- dpcd_base_lt_offset,
- lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING)
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- dpcd_base_lt_offset,
- lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
- lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
- lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
- lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
- }
- if (edp_workaround) {
- /* for eDP write in 2 parts because the 5-byte burst is
- * causing issues on some eDP panels (EPR#366724)
- */
- core_link_write_dpcd(
- link,
- DP_TRAINING_PATTERN_SET,
- &dpcd_pattern.raw,
- sizeof(dpcd_pattern.raw));
-
- core_link_write_dpcd(
- link,
- DP_TRAINING_LANE0_SET,
- (uint8_t *)(lt_settings->dpcd_lane_settings),
- size_in_bytes);
-
- } else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_128b_132b_ENCODING) {
- core_link_write_dpcd(
- link,
- dpcd_base_lt_offset,
- dpcd_lt_buffer,
- sizeof(dpcd_lt_buffer));
- } else
- /* write it all in (1 + number-of-lanes)-byte burst*/
- core_link_write_dpcd(
- link,
- dpcd_base_lt_offset,
- dpcd_lt_buffer,
- size_in_bytes + sizeof(dpcd_pattern.raw));
-}
-
-bool dp_is_cr_done(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status)
-{
- uint32_t lane;
- /*LANEx_CR_DONE bits All 1's?*/
- for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
- if (!dpcd_lane_status[lane].bits.CR_DONE_0)
- return false;
- }
- return true;
-}
-
-bool dp_is_ch_eq_done(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status)
-{
- bool done = true;
- uint32_t lane;
- for (lane = 0; lane < (uint32_t)(ln_count); lane++)
- if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
- done = false;
- return done;
-}
-
-bool dp_is_symbol_locked(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status)
-{
- bool locked = true;
- uint32_t lane;
- for (lane = 0; lane < (uint32_t)(ln_count); lane++)
- if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0)
- locked = false;
- return locked;
-}
-
-bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
-{
- return align_status.bits.INTERLANE_ALIGN_DONE == 1;
-}
-
-void dp_hw_to_dpcd_lane_settings(
- const struct link_training_settings *lt_settings,
- const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[])
-{
- uint8_t lane = 0;
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING) {
- dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET =
- (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING);
- dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET =
- (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS);
- dpcd_lane_settings[lane].bits.MAX_SWING_REACHED =
- (hw_lane_settings[lane].VOLTAGE_SWING ==
- VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
- dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED =
- (hw_lane_settings[lane].PRE_EMPHASIS ==
- PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
- }
- else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_128b_132b_ENCODING) {
- dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE =
- hw_lane_settings[lane].FFE_PRESET.settings.level;
- }
- }
-}
-
-void dp_decide_lane_settings(
- const struct link_training_settings *lt_settings,
- const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
- struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[])
-{
- uint32_t lane;
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING) {
- hw_lane_settings[lane].VOLTAGE_SWING =
- (enum dc_voltage_swing)(ln_adjust[lane].bits.
- VOLTAGE_SWING_LANE);
- hw_lane_settings[lane].PRE_EMPHASIS =
- (enum dc_pre_emphasis)(ln_adjust[lane].bits.
- PRE_EMPHASIS_LANE);
- }
- else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_128b_132b_ENCODING) {
- hw_lane_settings[lane].FFE_PRESET.raw =
- ln_adjust[lane].tx_ffe.PRESET_VALUE;
- }
- }
- dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
-
- if (lt_settings->disallow_per_lane_settings) {
- /* we find the maximum of the requested settings across all lanes*/
- /* and set this maximum for all lanes*/
- maximize_lane_settings(lt_settings, hw_lane_settings);
- override_lane_settings(lt_settings, hw_lane_settings);
-
- if (lt_settings->always_match_dpcd_with_hw_lane_settings)
- dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
- }
-
-}
-
-static uint8_t get_nibble_at_index(const uint8_t *buf,
- uint32_t index)
-{
- uint8_t nibble;
- nibble = buf[index / 2];
-
- if (index % 2)
- nibble >>= 4;
- else
- nibble &= 0x0F;
-
- return nibble;
-}
-
-static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
- enum dc_voltage_swing voltage)
-{
- enum dc_pre_emphasis pre_emphasis;
- pre_emphasis = PRE_EMPHASIS_MAX_LEVEL;
-
- if (voltage <= VOLTAGE_SWING_MAX_LEVEL)
- pre_emphasis = voltage_swing_to_pre_emphasis[voltage];
-
- return pre_emphasis;
-
-}
-
-static void maximize_lane_settings(const struct link_training_settings *lt_settings,
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
-{
- uint32_t lane;
- struct dc_lane_settings max_requested;
-
- max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING;
- max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS;
- max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET;
-
- /* Determine what the maximum of the requested settings are*/
- for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) {
- if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING)
- max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING;
-
- if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS)
- max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS;
- if (lane_settings[lane].FFE_PRESET.settings.level >
- max_requested.FFE_PRESET.settings.level)
- max_requested.FFE_PRESET.settings.level =
- lane_settings[lane].FFE_PRESET.settings.level;
- }
-
- /* make sure the requested settings are
- * not higher than maximum settings*/
- if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL)
- max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL;
-
- if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
- max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
- if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL)
- max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL;
-
- /* make sure the pre-emphasis matches the voltage swing*/
- if (max_requested.PRE_EMPHASIS >
- get_max_pre_emphasis_for_voltage_swing(
- max_requested.VOLTAGE_SWING))
- max_requested.PRE_EMPHASIS =
- get_max_pre_emphasis_for_voltage_swing(
- max_requested.VOLTAGE_SWING);
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING;
- lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS;
- lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET;
- }
-}
-
-static void override_lane_settings(const struct link_training_settings *lt_settings,
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
-{
- uint32_t lane;
-
- if (lt_settings->voltage_swing == NULL &&
- lt_settings->pre_emphasis == NULL &&
- lt_settings->ffe_preset == NULL &&
- lt_settings->post_cursor2 == NULL)
-
- return;
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- if (lt_settings->voltage_swing)
- lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
- if (lt_settings->pre_emphasis)
- lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis;
- if (lt_settings->post_cursor2)
- lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2;
- if (lt_settings->ffe_preset)
- lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset;
- }
-}
-
-enum dc_status dp_get_lane_status_and_lane_adjust(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting,
- union lane_status ln_status[LANE_COUNT_DP_MAX],
- union lane_align_status_updated *ln_align,
- union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
- uint32_t offset)
-{
- unsigned int lane01_status_address = DP_LANE0_1_STATUS;
- uint8_t lane_adjust_offset = 4;
- unsigned int lane01_adjust_address;
- uint8_t dpcd_buf[6] = {0};
- uint32_t lane;
- enum dc_status status;
-
- if (is_repeater(link_training_setting, offset)) {
- lane01_status_address =
- DP_LANE0_1_STATUS_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- lane_adjust_offset = 3;
- }
-
- status = core_link_read_dpcd(
- link,
- lane01_status_address,
- (uint8_t *)(dpcd_buf),
- sizeof(dpcd_buf));
-
- if (status != DC_OK) {
- DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X,"
- " keep current lane status and lane adjust unchanged",
- __func__,
- lane01_status_address);
- return status;
- }
-
- for (lane = 0; lane <
- (uint32_t)(link_training_setting->link_settings.lane_count);
- lane++) {
-
- ln_status[lane].raw =
- get_nibble_at_index(&dpcd_buf[0], lane);
- ln_adjust[lane].raw =
- get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane);
- }
-
- ln_align->raw = dpcd_buf[2];
-
- if (is_repeater(link_training_setting, offset)) {
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
- __func__,
- offset,
- lane01_status_address, dpcd_buf[0],
- lane01_status_address + 1, dpcd_buf[1]);
-
- lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-
- DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
- " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
- __func__,
- offset,
- lane01_adjust_address,
- dpcd_buf[lane_adjust_offset],
- lane01_adjust_address + 1,
- dpcd_buf[lane_adjust_offset + 1]);
- } else {
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
- __func__,
- lane01_status_address, dpcd_buf[0],
- lane01_status_address + 1, dpcd_buf[1]);
-
- lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
-
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
- __func__,
- lane01_adjust_address,
- dpcd_buf[lane_adjust_offset],
- lane01_adjust_address + 1,
- dpcd_buf[lane_adjust_offset + 1]);
- }
-
- return status;
-}
-
-static enum dc_status dpcd_128b_132b_set_lane_settings(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting)
-{
- enum dc_status status = core_link_write_dpcd(link,
- DP_TRAINING_LANE0_SET,
- (uint8_t *)(link_training_setting->dpcd_lane_settings),
- sizeof(link_training_setting->dpcd_lane_settings));
-
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
- __func__,
- DP_TRAINING_LANE0_SET,
- link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
- return status;
-}
-
-
-enum dc_status dpcd_set_lane_settings(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting,
- uint32_t offset)
-{
- unsigned int lane0_set_address;
- enum dc_status status;
-
- lane0_set_address = DP_TRAINING_LANE0_SET;
-
- if (is_repeater(link_training_setting, offset))
- lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-
- status = core_link_write_dpcd(link,
- lane0_set_address,
- (uint8_t *)(link_training_setting->dpcd_lane_settings),
- link_training_setting->link_settings.lane_count);
-
- if (is_repeater(link_training_setting, offset)) {
- DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
- " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- offset,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
- link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
- link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
- link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
-
- } else {
- DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
- __func__,
- lane0_set_address,
- link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
- link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
- link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
- link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
- }
-
- return status;
-}
-
-bool dp_is_max_vs_reached(
- const struct link_training_settings *lt_settings)
-{
- uint32_t lane;
- for (lane = 0; lane <
- (uint32_t)(lt_settings->link_settings.lane_count);
- lane++) {
- if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET
- == VOLTAGE_SWING_MAX_LEVEL)
- return true;
- }
- return false;
-
-}
-
-static bool perform_post_lt_adj_req_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- enum dc_lane_count lane_count =
- lt_settings->link_settings.lane_count;
-
- uint32_t adj_req_count;
- uint32_t adj_req_timer;
- bool req_drv_setting_changed;
- uint32_t lane;
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- req_drv_setting_changed = false;
- for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
- adj_req_count++) {
-
- req_drv_setting_changed = false;
-
- for (adj_req_timer = 0;
- adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
- adj_req_timer++) {
-
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- DPRX);
-
- if (dpcd_lane_status_updated.bits.
- POST_LT_ADJ_REQ_IN_PROGRESS == 0)
- return true;
-
- if (!dp_is_cr_done(lane_count, dpcd_lane_status))
- return false;
-
- if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) ||
- !dp_is_symbol_locked(lane_count, dpcd_lane_status) ||
- !dp_is_interlane_aligned(dpcd_lane_status_updated))
- return false;
-
- for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
-
- if (lt_settings->
- dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET !=
- dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE ||
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET !=
- dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) {
-
- req_drv_setting_changed = true;
- break;
- }
- }
-
- if (req_drv_setting_changed) {
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-
- dc_link_dp_set_drive_settings(link,
- link_res,
- lt_settings);
- break;
- }
-
- msleep(1);
- }
-
- if (!req_drv_setting_changed) {
- DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n",
- __func__);
-
- ASSERT(0);
- return true;
- }
- }
- DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n",
- __func__);
-
- ASSERT(0);
- return true;
-
-}
-
-/* Only used for channel equalization */
-uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval)
-{
- unsigned int aux_rd_interval_us = 400;
-
- switch (dpcd_aux_read_interval) {
- case 0x01:
- aux_rd_interval_us = 4000;
- break;
- case 0x02:
- aux_rd_interval_us = 8000;
- break;
- case 0x03:
- aux_rd_interval_us = 12000;
- break;
- case 0x04:
- aux_rd_interval_us = 16000;
- break;
- case 0x05:
- aux_rd_interval_us = 32000;
- break;
- case 0x06:
- aux_rd_interval_us = 64000;
- break;
- default:
- break;
- }
-
- return aux_rd_interval_us;
-}
-
-enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status)
-{
- enum link_training_result result = LINK_TRAINING_SUCCESS;
-
- if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0)
- result = LINK_TRAINING_CR_FAIL_LANE0;
- else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0)
- result = LINK_TRAINING_CR_FAIL_LANE1;
- else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0)
- result = LINK_TRAINING_CR_FAIL_LANE23;
- else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0)
- result = LINK_TRAINING_CR_FAIL_LANE23;
- return result;
-}
-
-static enum link_training_result perform_channel_equalization_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings,
- uint32_t offset)
-{
- enum dc_dp_training_pattern tr_pattern;
- uint32_t retries_ch_eq;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- /* Note: also check that TPS4 is a supported feature*/
- tr_pattern = lt_settings->pattern_for_eq;
-
- if (is_repeater(lt_settings, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
- tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
-
- dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
-
- for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
- retries_ch_eq++) {
-
- dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
-
- /* 2. update DPCD*/
- if (!retries_ch_eq)
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration
- */
-
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- tr_pattern, offset);
- else
- dpcd_set_lane_settings(link, lt_settings, offset);
-
- /* 3. wait for receiver to lock-on*/
- wait_time_microsec = lt_settings->eq_pattern_time;
-
- if (is_repeater(lt_settings, offset))
- wait_time_microsec =
- dp_translate_training_aux_read_interval(
- link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested
- * drive settings as set by the sink*/
-
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- offset);
-
- /* 5. check CR done*/
- if (!dp_is_cr_done(lane_count, dpcd_lane_status))
- return dpcd_lane_status[0].bits.CR_DONE_0 ?
- LINK_TRAINING_EQ_FAIL_CR_PARTIAL :
- LINK_TRAINING_EQ_FAIL_CR;
-
- /* 6. check CHEQ done*/
- if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
- dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated))
- return LINK_TRAINING_SUCCESS;
-
- /* 7. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- }
-
- return LINK_TRAINING_EQ_FAIL_EQ;
-
-}
-
-static void start_clock_recovery_pattern_early(struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings,
- uint32_t offset)
-{
- DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
- __func__);
- dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
- dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
- udelay(400);
-}
-
-static enum link_training_result perform_clock_recovery_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings,
- uint32_t offset)
-{
- uint32_t retries_cr;
- uint32_t retry_count;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
- union lane_align_status_updated dpcd_lane_status_updated;
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- retries_cr = 0;
- retry_count = 0;
-
- memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
- memset(&dpcd_lane_status_updated, '\0',
- sizeof(dpcd_lane_status_updated));
-
- if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
- dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
-
- /* najeeb - The synaptics MST hub can put the LT in
- * infinite loop by switching the VS
- */
- /* between level 0 and level 1 continuously, here
- * we try for CR lock for LinkTrainingMaxCRRetry count*/
- while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
-
-
- /* 1. call HWSS to set lane settings*/
- dp_set_hw_lane_settings(
- link,
- link_res,
- lt_settings,
- offset);
-
- /* 2. update DPCD of the receiver*/
- if (!retry_count)
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration.*/
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- lt_settings->pattern_for_cr,
- offset);
- else
- dpcd_set_lane_settings(
- link,
- lt_settings,
- offset);
-
- /* 3. wait receiver to lock-on*/
- wait_time_microsec = lt_settings->cr_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested drive
- * settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- offset);
-
- /* 5. check CR done*/
- if (dp_is_cr_done(lane_count, dpcd_lane_status))
- return LINK_TRAINING_SUCCESS;
-
- /* 6. max VS reached*/
- if ((dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING) &&
- dp_is_max_vs_reached(lt_settings))
- break;
-
- /* 7. same lane settings*/
- /* Note: settings are the same for all lanes,
- * so comparing first lane is sufficient*/
- if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) &&
- lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
- dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
- retries_cr++;
- else if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) &&
- lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE ==
- dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE)
- retries_cr++;
- else
- retries_cr = 0;
-
- /* 8. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- retry_count++;
- }
-
- if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
- ASSERT(0);
- DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
- __func__,
- LINK_TRAINING_MAX_CR_RETRY);
-
- }
-
- return dp_get_cr_failure(lane_count, dpcd_lane_status);
-}
-
-static inline enum link_training_result dp_transition_to_video_idle(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings,
- enum link_training_result status)
-{
- union lane_count_set lane_count_set = {0};
-
- /* 4. mainlink output idle pattern*/
- dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
-
- /*
- * 5. post training adjust if required
- * If the upstream DPTX and downstream DPRX both support TPS4,
- * TPS4 must be used instead of POST_LT_ADJ_REQ.
- */
- if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
- lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) {
- /* delay 5ms after Main Link output idle pattern and then check
- * DPCD 0202h.
- */
- if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
- msleep(5);
- status = dp_check_link_loss_status(link, lt_settings);
- }
- return status;
- }
-
- if (status == LINK_TRAINING_SUCCESS &&
- perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false)
- status = LINK_TRAINING_LQA_FAIL;
-
- lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
- lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
-
- core_link_write_dpcd(
- link,
- DP_LANE_COUNT_SET,
- &lane_count_set.raw,
- sizeof(lane_count_set));
-
- return status;
-}
-
-enum link_training_result dp_check_link_loss_status(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting)
-{
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- union lane_status lane_status;
- uint8_t dpcd_buf[6] = {0};
- uint32_t lane;
-
- core_link_read_dpcd(
- link,
- DP_SINK_COUNT,
- (uint8_t *)(dpcd_buf),
- sizeof(dpcd_buf));
-
- /*parse lane status*/
- for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
- /*
- * check lanes status
- */
- lane_status.raw = get_nibble_at_index(&dpcd_buf[2], lane);
-
- if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
- !lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
- /* if one of the channel equalization, clock
- * recovery or symbol lock is dropped
- * consider it as (link has been
- * dropped) dp sink status has changed
- */
- status = LINK_TRAINING_LINK_LOSS;
- break;
- }
- }
-
- return status;
-}
-
-static inline void decide_8b_10b_training_settings(
- struct dc_link *link,
- const struct dc_link_settings *link_setting,
- struct link_training_settings *lt_settings)
-{
- memset(lt_settings, '\0', sizeof(struct link_training_settings));
-
- /* Initialize link settings */
- lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set;
- lt_settings->link_settings.link_rate_set = link_setting->link_rate_set;
- lt_settings->link_settings.link_rate = link_setting->link_rate;
- lt_settings->link_settings.lane_count = link_setting->lane_count;
- /* TODO hard coded to SS for now
- * lt_settings.link_settings.link_spread =
- * dal_display_path_is_ss_supported(
- * path_mode->display_path) ?
- * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
- * LINK_SPREAD_DISABLED;
- */
- lt_settings->link_settings.link_spread = link->dp_ss_off ?
- LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
- lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
- lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
- lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
- lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
- lt_settings->enhanced_framing = 1;
- lt_settings->should_set_fec_ready = true;
- lt_settings->disallow_per_lane_settings = true;
- lt_settings->always_match_dpcd_with_hw_lane_settings = true;
- lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
- dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-}
-
-static inline void decide_128b_132b_training_settings(struct dc_link *link,
- const struct dc_link_settings *link_settings,
- struct link_training_settings *lt_settings)
-{
- memset(lt_settings, 0, sizeof(*lt_settings));
-
- lt_settings->link_settings = *link_settings;
- /* TODO: should decide link spread when populating link_settings */
- lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED :
- LINK_SPREAD_05_DOWNSPREAD_30KHZ;
-
- lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings);
- lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings);
- lt_settings->eq_pattern_time = 2500;
- lt_settings->eq_wait_time_limit = 400000;
- lt_settings->eq_loop_count_limit = 20;
- lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS;
- lt_settings->cds_pattern_time = 2500;
- lt_settings->cds_wait_time_limit = (dp_convert_to_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
- lt_settings->disallow_per_lane_settings = true;
- lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link);
- dp_hw_to_dpcd_lane_settings(lt_settings,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-}
-
-void dp_decide_training_settings(
- struct dc_link *link,
- const struct dc_link_settings *link_settings,
- struct link_training_settings *lt_settings)
-{
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
- decide_8b_10b_training_settings(link, link_settings, lt_settings);
- else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING)
- decide_128b_132b_training_settings(link, link_settings, lt_settings);
-}
-
-static void override_training_settings(
- struct dc_link *link,
- const struct dc_link_training_overrides *overrides,
- struct link_training_settings *lt_settings)
-{
- uint32_t lane;
-
- /* Override link spread */
- if (!link->dp_ss_off && overrides->downspread != NULL)
- lt_settings->link_settings.link_spread = *overrides->downspread ?
- LINK_SPREAD_05_DOWNSPREAD_30KHZ
- : LINK_SPREAD_DISABLED;
-
- /* Override lane settings */
- if (overrides->voltage_swing != NULL)
- lt_settings->voltage_swing = overrides->voltage_swing;
- if (overrides->pre_emphasis != NULL)
- lt_settings->pre_emphasis = overrides->pre_emphasis;
- if (overrides->post_cursor2 != NULL)
- lt_settings->post_cursor2 = overrides->post_cursor2;
- if (overrides->ffe_preset != NULL)
- lt_settings->ffe_preset = overrides->ffe_preset;
- /* Override HW lane settings with BIOS forced values if present */
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
- lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
- lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
- lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
- lt_settings->always_match_dpcd_with_hw_lane_settings = false;
- }
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- lt_settings->hw_lane_settings[lane].VOLTAGE_SWING =
- lt_settings->voltage_swing != NULL ?
- *lt_settings->voltage_swing :
- VOLTAGE_SWING_LEVEL0;
- lt_settings->hw_lane_settings[lane].PRE_EMPHASIS =
- lt_settings->pre_emphasis != NULL ?
- *lt_settings->pre_emphasis
- : PRE_EMPHASIS_DISABLED;
- lt_settings->hw_lane_settings[lane].POST_CURSOR2 =
- lt_settings->post_cursor2 != NULL ?
- *lt_settings->post_cursor2
- : POST_CURSOR2_DISABLED;
- }
-
- if (lt_settings->always_match_dpcd_with_hw_lane_settings)
- dp_hw_to_dpcd_lane_settings(lt_settings,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-
- /* Initialize training timings */
- if (overrides->cr_pattern_time != NULL)
- lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
-
- if (overrides->eq_pattern_time != NULL)
- lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
-
- if (overrides->pattern_for_cr != NULL)
- lt_settings->pattern_for_cr = *overrides->pattern_for_cr;
- if (overrides->pattern_for_eq != NULL)
- lt_settings->pattern_for_eq = *overrides->pattern_for_eq;
-
- if (overrides->enhanced_framing != NULL)
- lt_settings->enhanced_framing = *overrides->enhanced_framing;
-
- if (link->preferred_training_settings.fec_enable != NULL)
- lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
-
- #if defined(CONFIG_DRM_AMD_DC_DCN)
- /* Check DP tunnel LTTPR mode debug option. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr)
- lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
-
-#endif
- dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
-
-}
-
-uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
-{
- switch (lttpr_repeater_count) {
- case 0x80: // 1 lttpr repeater
- return 1;
- case 0x40: // 2 lttpr repeaters
- return 2;
- case 0x20: // 3 lttpr repeaters
- return 3;
- case 0x10: // 4 lttpr repeaters
- return 4;
- case 0x08: // 5 lttpr repeaters
- return 5;
- case 0x04: // 6 lttpr repeaters
- return 6;
- case 0x02: // 7 lttpr repeaters
- return 7;
- case 0x01: // 8 lttpr repeaters
- return 8;
- default:
- break;
- }
- return 0; // invalid value
-}
-
-static enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
-{
- uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
-
- DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
- return core_link_write_dpcd(link,
- DP_PHY_REPEATER_MODE,
- (uint8_t *)&repeater_mode,
- sizeof(repeater_mode));
-}
-
-static enum dc_status configure_lttpr_mode_non_transparent(
- struct dc_link *link,
- const struct link_training_settings *lt_settings)
-{
- /* aux timeout is already set to extended */
- /* RESET/SET lttpr mode to enable non transparent mode */
- uint8_t repeater_cnt;
- uint32_t aux_interval_address;
- uint8_t repeater_id;
- enum dc_status result = DC_ERROR_UNEXPECTED;
- uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
-
- enum dp_link_encoding encoding = dp_get_link_encoding_format(&lt_settings->link_settings);
-
- if (encoding == DP_8b_10b_ENCODING) {
- DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
- result = core_link_write_dpcd(link,
- DP_PHY_REPEATER_MODE,
- (uint8_t *)&repeater_mode,
- sizeof(repeater_mode));
-
- }
-
- if (result == DC_OK) {
- link->dpcd_caps.lttpr_caps.mode = repeater_mode;
- }
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
-
- DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
-
- repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
- result = core_link_write_dpcd(link,
- DP_PHY_REPEATER_MODE,
- (uint8_t *)&repeater_mode,
- sizeof(repeater_mode));
-
- if (result == DC_OK) {
- link->dpcd_caps.lttpr_caps.mode = repeater_mode;
- }
-
- if (encoding == DP_8b_10b_ENCODING) {
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
-
- /* Driver does not need to train the first hop. Skip DPCD read and clear
- * AUX_RD_INTERVAL for DPTX-to-DPIA hop.
- */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0;
-
- for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
- aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
- core_link_read_dpcd(
- link,
- aux_interval_address,
- (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1],
- sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1]));
- link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F;
- }
- }
- }
-
- return result;
-}
-
-static void repeater_training_done(struct dc_link *link, uint32_t offset)
-{
- union dpcd_training_pattern dpcd_pattern = {0};
-
- const uint32_t dpcd_base_lt_offset =
- DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- /* Set training not in progress*/
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
-
- core_link_write_dpcd(
- link,
- dpcd_base_lt_offset,
- &dpcd_pattern.raw,
- 1);
-
- DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n",
- __func__,
- offset,
- dpcd_base_lt_offset,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
-}
-
-static void print_status_message(
- struct dc_link *link,
- const struct link_training_settings *lt_settings,
- enum link_training_result status)
-{
- char *link_rate = "Unknown";
- char *lt_result = "Unknown";
- char *lt_spread = "Disabled";
-
- switch (lt_settings->link_settings.link_rate) {
- case LINK_RATE_LOW:
- link_rate = "RBR";
- break;
- case LINK_RATE_RATE_2:
- link_rate = "R2";
- break;
- case LINK_RATE_RATE_3:
- link_rate = "R3";
- break;
- case LINK_RATE_HIGH:
- link_rate = "HBR";
- break;
- case LINK_RATE_RBR2:
- link_rate = "RBR2";
- break;
- case LINK_RATE_RATE_6:
- link_rate = "R6";
- break;
- case LINK_RATE_HIGH2:
- link_rate = "HBR2";
- break;
- case LINK_RATE_HIGH3:
- link_rate = "HBR3";
- break;
- case LINK_RATE_UHBR10:
- link_rate = "UHBR10";
- break;
- case LINK_RATE_UHBR13_5:
- link_rate = "UHBR13.5";
- break;
- case LINK_RATE_UHBR20:
- link_rate = "UHBR20";
- break;
- default:
- break;
- }
-
- switch (status) {
- case LINK_TRAINING_SUCCESS:
- lt_result = "pass";
- break;
- case LINK_TRAINING_CR_FAIL_LANE0:
- lt_result = "CR failed lane0";
- break;
- case LINK_TRAINING_CR_FAIL_LANE1:
- lt_result = "CR failed lane1";
- break;
- case LINK_TRAINING_CR_FAIL_LANE23:
- lt_result = "CR failed lane23";
- break;
- case LINK_TRAINING_EQ_FAIL_CR:
- lt_result = "CR failed in EQ";
- break;
- case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
- lt_result = "CR failed in EQ partially";
- break;
- case LINK_TRAINING_EQ_FAIL_EQ:
- lt_result = "EQ failed";
- break;
- case LINK_TRAINING_LQA_FAIL:
- lt_result = "LQA failed";
- break;
- case LINK_TRAINING_LINK_LOSS:
- lt_result = "Link loss";
- break;
- case DP_128b_132b_LT_FAILED:
- lt_result = "LT_FAILED received";
- break;
- case DP_128b_132b_MAX_LOOP_COUNT_REACHED:
- lt_result = "max loop count reached";
- break;
- case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT:
- lt_result = "channel EQ timeout";
- break;
- case DP_128b_132b_CDS_DONE_TIMEOUT:
- lt_result = "CDS timeout";
- break;
- default:
- break;
- }
-
- switch (lt_settings->link_settings.link_spread) {
- case LINK_SPREAD_DISABLED:
- lt_spread = "Disabled";
- break;
- case LINK_SPREAD_05_DOWNSPREAD_30KHZ:
- lt_spread = "0.5% 30KHz";
- break;
- case LINK_SPREAD_05_DOWNSPREAD_33KHZ:
- lt_spread = "0.5% 33KHz";
- break;
- default:
- break;
- }
-
- /* Connectivity log: link training */
-
- /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */
-
- CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s",
- link_rate,
- lt_settings->link_settings.lane_count,
- lt_result,
- lt_settings->hw_lane_settings[0].VOLTAGE_SWING,
- lt_settings->hw_lane_settings[0].PRE_EMPHASIS,
- lt_spread);
-}
-
-void dc_link_dp_set_drive_settings(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- /* program ASIC PHY settings*/
- dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
-
- dp_hw_to_dpcd_lane_settings(lt_settings,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
-
- /* Notify DP sink the PHY settings from source */
- dpcd_set_lane_settings(link, lt_settings, DPRX);
-}
-
-bool dc_link_dp_perform_link_training_skip_aux(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_setting)
-{
- struct link_training_settings lt_settings = {0};
-
- dp_decide_training_settings(
- link,
- link_setting,
- &lt_settings);
- override_training_settings(
- link,
- &link->preferred_training_settings,
- &lt_settings);
-
- /* 1. Perform_clock_recovery_sequence. */
-
- /* transmit training pattern for clock recovery */
- dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX);
-
- /* call HWSS to set lane settings*/
- dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
-
- /* wait receiver to lock-on*/
- dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
-
- /* 2. Perform_channel_equalization_sequence. */
-
- /* transmit training pattern for channel equalization. */
- dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX);
-
- /* call HWSS to set lane settings*/
- dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
-
- /* wait receiver to lock-on. */
- dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
-
- /* 3. Perform_link_training_int. */
-
- /* Mainlink output idle pattern. */
- dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
-
- print_status_message(link, &lt_settings, LINK_TRAINING_SUCCESS);
-
- return true;
-}
-
-enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings)
-{
- enum dc_status status = DC_OK;
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT)
- status = configure_lttpr_mode_transparent(link);
-
- else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- status = configure_lttpr_mode_non_transparent(link, lt_settings);
-
- return status;
-}
-
-static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding)
-{
- uint8_t sink_status = 0;
- uint8_t i;
-
- /* clear training pattern set */
- dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
-
- if (encoding == DP_128b_132b_ENCODING) {
- /* poll for intra-hop disable */
- for (i = 0; i < 10; i++) {
- if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
- (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
- break;
- udelay(1000);
- }
- }
-}
-
-enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
- struct link_training_settings *lt_settings)
-{
- enum dp_link_encoding encoding =
- dp_get_link_encoding_format(
- &lt_settings->link_settings);
- enum dc_status status;
-
- status = core_link_write_dpcd(
- link,
- DP_MAIN_LINK_CHANNEL_CODING_SET,
- (uint8_t *) &encoding,
- 1);
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n",
- __func__,
- DP_MAIN_LINK_CHANNEL_CODING_SET,
- encoding);
-
- return status;
-}
-
-static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,
- uint32_t *interval_in_us)
-{
- union dp_128b_132b_training_aux_rd_interval dpcd_interval;
- uint32_t interval_unit = 0;
-
- dpcd_interval.raw = 0;
- core_link_read_dpcd(link, DP_128b_132b_TRAINING_AUX_RD_INTERVAL,
- &dpcd_interval.raw, sizeof(dpcd_interval.raw));
- interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */
- /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) *
- * INTERVAL_UNIT. The maximum is 256 ms
- */
- *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000;
-}
-
-static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- uint8_t loop_count;
- uint32_t aux_rd_interval = 0;
- uint32_t wait_time = 0;
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- enum dc_status status = DC_OK;
- enum link_training_result result = LINK_TRAINING_SUCCESS;
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- /* Transmit 128b/132b_TPS1 over Main-Link */
- dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX);
- /* Set TRAINING_PATTERN_SET to 01h */
- dpcd_set_training_pattern(link, lt_settings->pattern_for_cr);
-
- /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */
- dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
- dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
- dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX);
-
- /* Set loop counter to start from 1 */
- loop_count = 1;
-
- /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */
- dpcd_set_lt_pattern_and_lane_settings(link, lt_settings,
- lt_settings->pattern_for_eq, DPRX);
-
- /* poll for channel EQ done */
- while (result == LINK_TRAINING_SUCCESS) {
- dp_wait_for_training_aux_rd_interval(link, aux_rd_interval);
- wait_time += aux_rd_interval;
- status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
- if (status != DC_OK) {
- result = LINK_TRAINING_ABORT;
- } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
- dpcd_lane_status)) {
- /* pass */
- break;
- } else if (loop_count >= lt_settings->eq_loop_count_limit) {
- result = DP_128b_132b_MAX_LOOP_COUNT_REACHED;
- } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
- result = DP_128b_132b_LT_FAILED;
- } else {
- dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
- dpcd_128b_132b_set_lane_settings(link, lt_settings);
- }
- loop_count++;
- }
-
- /* poll for EQ interlane align done */
- while (result == LINK_TRAINING_SUCCESS) {
- if (status != DC_OK) {
- result = LINK_TRAINING_ABORT;
- } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) {
- /* pass */
- break;
- } else if (wait_time >= lt_settings->eq_wait_time_limit) {
- result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT;
- } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
- result = DP_128b_132b_LT_FAILED;
- } else {
- dp_wait_for_training_aux_rd_interval(link,
- lt_settings->eq_pattern_time);
- wait_time += lt_settings->eq_pattern_time;
- status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
- }
- }
-
- return result;
-}
-
-static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- /* Assumption: assume hardware has transmitted eq pattern */
- enum dc_status status = DC_OK;
- enum link_training_result result = LINK_TRAINING_SUCCESS;
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- uint32_t wait_time = 0;
-
- /* initiate CDS done sequence */
- dpcd_set_training_pattern(link, lt_settings->pattern_for_cds);
-
- /* poll for CDS interlane align done and symbol lock */
- while (result == LINK_TRAINING_SUCCESS) {
- dp_wait_for_training_aux_rd_interval(link,
- lt_settings->cds_pattern_time);
- wait_time += lt_settings->cds_pattern_time;
- status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
- &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
- if (status != DC_OK) {
- result = LINK_TRAINING_ABORT;
- } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
- dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) {
- /* pass */
- break;
- } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
- result = DP_128b_132b_LT_FAILED;
- } else if (wait_time >= lt_settings->cds_wait_time_limit) {
- result = DP_128b_132b_CDS_DONE_TIMEOUT;
- }
- }
-
- return result;
-}
-
-static enum link_training_result dp_perform_8b_10b_link_training(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- enum link_training_result status = LINK_TRAINING_SUCCESS;
-
- uint8_t repeater_cnt;
- uint8_t repeater_id;
- uint8_t lane = 0;
-
- if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
- start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
-
- /* 1. set link rate, lane count and spread. */
- dpcd_set_link_settings(link, lt_settings);
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
-
- /* 2. perform link training (set link training done
- * to false is done as well)
- */
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
-
- for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
- repeater_id--) {
- status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
-
- if (status != LINK_TRAINING_SUCCESS) {
- repeater_training_done(link, repeater_id);
- break;
- }
-
- status = perform_channel_equalization_sequence(link,
- link_res,
- lt_settings,
- repeater_id);
-
- repeater_training_done(link, repeater_id);
-
- if (status != LINK_TRAINING_SUCCESS)
- break;
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- lt_settings->dpcd_lane_settings[lane].raw = 0;
- lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0;
- lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0;
- }
- }
- }
-
- if (status == LINK_TRAINING_SUCCESS) {
- status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
- if (status == LINK_TRAINING_SUCCESS) {
- status = perform_channel_equalization_sequence(link,
- link_res,
- lt_settings,
- DPRX);
- }
- }
-
- return status;
-}
-
-static enum link_training_result dp_perform_128b_132b_link_training(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- enum link_training_result result = LINK_TRAINING_SUCCESS;
-
- /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */
- if (link->dc->debug.legacy_dp2_lt) {
- struct link_training_settings legacy_settings;
-
- decide_8b_10b_training_settings(link,
- &lt_settings->link_settings,
- &legacy_settings);
- return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings);
- }
-
- dpcd_set_link_settings(link, lt_settings);
-
- if (result == LINK_TRAINING_SUCCESS)
- result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings);
-
- if (result == LINK_TRAINING_SUCCESS)
- result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings);
-
- return result;
-}
-
-static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t lane = 0;
- uint8_t toggle_rate = 0x6;
- uint8_t target_rate = 0x6;
- bool apply_toggle_rate_wa = false;
- uint8_t repeater_cnt;
- uint8_t repeater_id;
-
- /* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */
- if (lt_settings->cr_pattern_time < 16000)
- lt_settings->cr_pattern_time = 16000;
-
- /* Fixed VS/PE specific: Toggle link rate */
- apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate);
- target_rate = get_dpcd_link_rate(&lt_settings->link_settings);
- toggle_rate = (target_rate == 0x6) ? 0xA : 0x6;
-
- if (apply_toggle_rate_wa)
- lt_settings->link_settings.link_rate = toggle_rate;
-
- if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
- start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
-
- /* 1. set link rate, lane count and spread. */
- dpcd_set_link_settings(link, lt_settings);
-
- /* Fixed VS/PE specific: Toggle link rate back*/
- if (apply_toggle_rate_wa) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
- &target_rate,
- 1);
- }
-
- link->vendor_specific_lttpr_link_rate_wa = target_rate;
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
-
- /* 2. perform link training (set link training done
- * to false is done as well)
- */
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
-
- for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
- repeater_id--) {
- status = perform_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
-
- if (status != LINK_TRAINING_SUCCESS) {
- repeater_training_done(link, repeater_id);
- break;
- }
-
- status = perform_channel_equalization_sequence(link,
- link_res,
- lt_settings,
- repeater_id);
-
- repeater_training_done(link, repeater_id);
-
- if (status != LINK_TRAINING_SUCCESS)
- break;
-
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- lt_settings->dpcd_lane_settings[lane].raw = 0;
- lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0;
- lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0;
- }
- }
- }
-
- if (status == LINK_TRAINING_SUCCESS) {
- status = perform_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
- if (status == LINK_TRAINING_SUCCESS) {
- status = perform_channel_equalization_sequence(link,
- link_res,
- lt_settings,
- DPRX);
- }
- }
-
- return status;
-}
-
-static enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings)
-{
- const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
- const uint8_t offset = dp_convert_to_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
- const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
- uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
- uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
- uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
- uint32_t vendor_lttpr_write_address = 0xF004F;
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- uint8_t lane = 0;
- union down_spread_ctrl downspread = {0};
- union lane_count_set lane_count_set = {0};
- uint8_t toggle_rate;
- uint8_t rate;
-
- /* Only 8b/10b is supported */
- ASSERT(dp_get_link_encoding_format(&lt_settings->link_settings) ==
- DP_8b_10b_ENCODING);
-
- if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
- status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
- return status;
- }
-
- if (offset != 0xFF) {
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-
- /* Certain display and cable configuration require extra delay */
- if (offset > 2)
- pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
- }
-
- /* Vendor specific: Reset lane settings */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_reset[0],
- sizeof(vendor_lttpr_write_data_reset));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
-
- /* Vendor specific: Enable intercept */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_intercept_en[0],
- sizeof(vendor_lttpr_write_data_intercept_en));
-
- /* 1. set link rate, lane count and spread. */
-
- downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
-
- lane_count_set.bits.LANE_COUNT_SET =
- lt_settings->link_settings.lane_count;
-
- lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
-
-
- if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
- lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
- link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
- }
-
- core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
- &downspread.raw, sizeof(downspread));
-
- core_link_write_dpcd(link, DP_LANE_COUNT_SET,
- &lane_count_set.raw, 1);
-
- rate = get_dpcd_link_rate(&lt_settings->link_settings);
-
- /* Vendor specific: Toggle link rate */
- toggle_rate = (rate == 0x6) ? 0xA : 0x6;
-
- if (link->vendor_specific_lttpr_link_rate_wa == rate) {
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
- &toggle_rate,
- 1);
- }
-
- link->vendor_specific_lttpr_link_rate_wa = rate;
-
- core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
-
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_BW_SET,
- lt_settings->link_settings.link_rate,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- lt_settings->enhanced_framing,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
-
- /* 2. Perform link training */
-
- /* Perform Clock Recovery Sequence */
- if (status == LINK_TRAINING_SUCCESS) {
- const uint8_t max_vendor_dpcd_retries = 10;
- uint32_t retries_cr;
- uint32_t retry_count;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
- union lane_align_status_updated dpcd_lane_status_updated;
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
- enum dc_status dpcd_status = DC_OK;
- uint8_t i = 0;
-
- retries_cr = 0;
- retry_count = 0;
-
- memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
- memset(&dpcd_lane_status_updated, '\0',
- sizeof(dpcd_lane_status_updated));
-
- while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
-
-
- /* 1. call HWSS to set lane settings */
- dp_set_hw_lane_settings(
- link,
- link_res,
- lt_settings,
- 0);
-
- /* 2. update DPCD of the receiver */
- if (!retry_count) {
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration.
- */
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- lt_settings->pattern_for_cr,
- 0);
- /* Vendor specific: Disable intercept */
- for (i = 0; i < max_vendor_dpcd_retries; i++) {
- msleep(pre_disable_intercept_delay_ms);
- dpcd_status = core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_intercept_dis[0],
- sizeof(vendor_lttpr_write_data_intercept_dis));
-
- if (dpcd_status == DC_OK)
- break;
-
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_intercept_en[0],
- sizeof(vendor_lttpr_write_data_intercept_en));
- }
- } else {
- vendor_lttpr_write_data_vs[3] = 0;
- vendor_lttpr_write_data_pe[3] = 0;
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
-
- dpcd_set_lane_settings(
- link,
- lt_settings,
- 0);
- }
-
- /* 3. wait receiver to lock-on*/
- wait_time_microsec = lt_settings->cr_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested drive
- * settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- 0);
-
- /* 5. check CR done*/
- if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_SUCCESS;
- break;
- }
-
- /* 6. max VS reached*/
- if (dp_is_max_vs_reached(lt_settings))
- break;
-
- /* 7. same lane settings */
- /* Note: settings are the same for all lanes,
- * so comparing first lane is sufficient
- */
- if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
- dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
- retries_cr++;
- else
- retries_cr = 0;
-
- /* 8. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- retry_count++;
- }
-
- if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
- ASSERT(0);
- DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
- __func__,
- LINK_TRAINING_MAX_CR_RETRY);
-
- }
-
- status = dp_get_cr_failure(lane_count, dpcd_lane_status);
- }
-
- /* Perform Channel EQ Sequence */
- if (status == LINK_TRAINING_SUCCESS) {
- enum dc_dp_training_pattern tr_pattern;
- uint32_t retries_ch_eq;
- uint32_t wait_time_microsec;
- enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
-
- /* Note: also check that TPS4 is a supported feature*/
- tr_pattern = lt_settings->pattern_for_eq;
-
- dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
-
- status = LINK_TRAINING_EQ_FAIL_EQ;
-
- for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
- retries_ch_eq++) {
-
- dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
-
- vendor_lttpr_write_data_vs[3] = 0;
- vendor_lttpr_write_data_pe[3] = 0;
-
- for (lane = 0; lane < lane_count; lane++) {
- vendor_lttpr_write_data_vs[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
- vendor_lttpr_write_data_pe[3] |=
- lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
- }
-
- /* Vendor specific: Update VS and PE to DPRX requested value */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_vs[0],
- sizeof(vendor_lttpr_write_data_vs));
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_pe[0],
- sizeof(vendor_lttpr_write_data_pe));
-
- /* 2. update DPCD*/
- if (!retries_ch_eq)
- /* EPR #361076 - write as a 5-byte burst,
- * but only for the 1-st iteration
- */
-
- dpcd_set_lt_pattern_and_lane_settings(
- link,
- lt_settings,
- tr_pattern, 0);
- else
- dpcd_set_lane_settings(link, lt_settings, 0);
-
- /* 3. wait for receiver to lock-on*/
- wait_time_microsec = lt_settings->eq_pattern_time;
-
- dp_wait_for_training_aux_rd_interval(
- link,
- wait_time_microsec);
-
- /* 4. Read lane status and requested
- * drive settings as set by the sink
- */
- dp_get_lane_status_and_lane_adjust(
- link,
- lt_settings,
- dpcd_lane_status,
- &dpcd_lane_status_updated,
- dpcd_lane_adjust,
- 0);
-
- /* 5. check CR done*/
- if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
- status = LINK_TRAINING_EQ_FAIL_CR;
- break;
- }
-
- /* 6. check CHEQ done*/
- if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
- dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated)) {
- status = LINK_TRAINING_SUCCESS;
- break;
- }
-
- /* 7. update VS/PE/PC2 in lt_settings*/
- dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
- }
- }
-
- return status;
-}
-
-
-enum link_training_result dc_link_dp_perform_link_training(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_settings,
- bool skip_video_pattern)
-{
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- struct link_training_settings lt_settings = {0};
- enum dp_link_encoding encoding =
- dp_get_link_encoding_format(link_settings);
-
- /* decide training settings */
- dp_decide_training_settings(
- link,
- link_settings,
- &lt_settings);
-
- override_training_settings(
- link,
- &link->preferred_training_settings,
- &lt_settings);
-
- /* reset previous training states */
- dpcd_exit_training_mode(link, encoding);
-
- /* configure link prior to entering training mode */
- dpcd_configure_lttpr_mode(link, &lt_settings);
- dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
- dpcd_configure_channel_coding(link, &lt_settings);
-
- /* enter training mode:
- * Per DP specs starting from here, DPTX device shall not issue
- * Non-LT AUX transactions inside training mode.
- */
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && encoding == DP_8b_10b_ENCODING)
- status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
- else if (encoding == DP_8b_10b_ENCODING)
- status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
- else if (encoding == DP_128b_132b_ENCODING)
- status = dp_perform_128b_132b_link_training(link, link_res, &lt_settings);
- else
- ASSERT(0);
-
- /* exit training mode */
- dpcd_exit_training_mode(link, encoding);
-
- /* switch to video idle */
- if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
- status = dp_transition_to_video_idle(link,
- link_res,
- &lt_settings,
- status);
-
- /* dump debug data */
- print_status_message(link, &lt_settings, status);
- if (status != LINK_TRAINING_SUCCESS)
- link->ctx->dc->debug_data.ltFailCount++;
- return status;
-}
-
-bool perform_link_training_with_retries(
- const struct dc_link_settings *link_setting,
- bool skip_video_pattern,
- int attempts,
- struct pipe_ctx *pipe_ctx,
- enum signal_type signal,
- bool do_fallback)
-{
- int j;
- uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
- enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
- struct dc_link_settings cur_link_settings = *link_setting;
- struct dc_link_settings max_link_settings = *link_setting;
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- int fail_count = 0;
- bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
- bool is_link_bw_min = /* RBR x 1 */
- (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
- (cur_link_settings.lane_count <= LANE_COUNT_ONE);
-
- dp_trace_commit_lt_init(link);
-
- if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
- /* We need to do this before the link training to ensure the idle
- * pattern in SST mode will be sent right after the link training
- */
- link_hwss->setup_stream_encoder(pipe_ctx);
-
- dp_trace_set_lt_start_timestamp(link, false);
- j = 0;
- while (j < attempts && fail_count < (attempts * 10)) {
-
- DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d)\n",
- __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
- cur_link_settings.lane_count);
-
- dp_enable_link_phy(
- link,
- &pipe_ctx->link_res,
- signal,
- pipe_ctx->clock_source->id,
- &cur_link_settings);
-
- if (stream->sink_patches.dppowerup_delay > 0) {
- int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
-
- msleep(delay_dp_power_up_in_ms);
- }
-
-#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (panel_mode == DP_PANEL_MODE_EDP) {
- struct cp_psp *cp_psp = &stream->ctx->cp_psp;
-
- if (cp_psp && cp_psp->funcs.enable_assr)
- /* ASSR is bound to fail with unsigned PSP
- * verstage used during devlopment phase.
- * Report and continue with eDP panel mode to
- * perform eDP link training with right settings
- */
- cp_psp->funcs.enable_assr(cp_psp->handle, link);
- }
-#endif
-
- dp_set_panel_mode(link, panel_mode);
-
- if (link->aux_access_disabled) {
- dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
- return true;
- } else {
- /** @todo Consolidate USB4 DP and DPx.x training. */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- status = dc_link_dpia_perform_link_training(link,
- &pipe_ctx->link_res,
- &cur_link_settings,
- skip_video_pattern);
-
- /* Transmit idle pattern once training successful. */
- if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
- dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
- /* Update verified link settings to current one
- * Because DPIA LT might fallback to lower link setting.
- */
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
- link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
- dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link);
- }
- }
- } else {
- status = dc_link_dp_perform_link_training(link,
- &pipe_ctx->link_res,
- &cur_link_settings,
- skip_video_pattern);
- }
-
- dp_trace_lt_total_count_increment(link, false);
- dp_trace_lt_result_update(link, status, false);
- dp_trace_set_lt_end_timestamp(link, false);
- if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
- return true;
- }
-
- fail_count++;
- dp_trace_lt_fail_count_update(link, fail_count, false);
- if (link->ep_type == DISPLAY_ENDPOINT_PHY) {
- /* latest link training still fail or link training is aborted
- * skip delay and keep PHY on
- */
- if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT))
- break;
- }
-
- DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) : fail reason:(%d)\n",
- __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
- cur_link_settings.lane_count, status);
-
- dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
-
- /* Abort link training if failure due to sink being unplugged. */
- if (status == LINK_TRAINING_ABORT) {
- enum dc_connection_type type = dc_connection_none;
-
- dc_link_detect_sink(link, &type);
- if (type == dc_connection_none) {
- DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
- break;
- }
- }
-
- /* Try to train again at original settings if:
- * - not falling back between training attempts;
- * - aborted previous attempt due to reasons other than sink unplug;
- * - successfully trained but at a link rate lower than that required by stream;
- * - reached minimum link bandwidth.
- */
- if (!do_fallback || (status == LINK_TRAINING_ABORT) ||
- (status == LINK_TRAINING_SUCCESS && is_link_bw_low) ||
- is_link_bw_min) {
- j++;
- cur_link_settings = *link_setting;
- delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
- is_link_bw_low = false;
- is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
- (cur_link_settings.lane_count <= LANE_COUNT_ONE);
-
- } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
- uint32_t req_bw;
- uint32_t link_bw;
-
- decide_fallback_link_setting(link, &max_link_settings,
- &cur_link_settings, status);
- /* Fail link training if reduced link bandwidth no longer meets
- * stream requirements.
- */
- req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
- link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
- is_link_bw_low = (req_bw > link_bw);
- is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
- (cur_link_settings.lane_count <= LANE_COUNT_ONE));
- if (is_link_bw_low)
- DC_LOG_WARNING(
- "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
- __func__, link->link_index, req_bw, link_bw);
- }
-
- msleep(delay_between_attempts);
- }
- return false;
-}
-
-static enum clock_source_id get_clock_source_id(struct dc_link *link)
-{
- enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED;
- struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source;
-
- if (dp_cs != NULL) {
- dp_cs_id = dp_cs->id;
- } else {
- /*
- * dp clock source is not initialized for some reason.
- * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
- */
- ASSERT(dp_cs);
- }
-
- return dp_cs_id;
-}
-
-static void set_dp_mst_mode(struct dc_link *link, const struct link_resource *link_res,
- bool mst_enable)
-{
- if (mst_enable == false &&
- link->type == dc_connection_mst_branch) {
- /* Disable MST on link. Use only local sink. */
- dp_disable_link_phy_mst(link, link_res, link->connector_signal);
-
- link->type = dc_connection_single;
- link->local_sink = link->remote_sinks[0];
- link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT;
- dc_sink_retain(link->local_sink);
- dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
- } else if (mst_enable == true &&
- link->type == dc_connection_single &&
- link->remote_sinks[0] != NULL) {
- /* Re-enable MST on link. */
- dp_disable_link_phy(link, link_res, link->connector_signal);
- dp_enable_mst_on_sink(link, true);
-
- link->type = dc_connection_mst_branch;
- link->local_sink->sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
- }
-}
-
-bool dc_link_dp_sync_lt_begin(struct dc_link *link)
-{
- /* Begin Sync LT. During this time,
- * DPCD:600h must not be powered down.
- */
- link->sync_lt_in_progress = true;
-
- /*Clear any existing preferred settings.*/
- memset(&link->preferred_training_settings, 0,
- sizeof(struct dc_link_training_overrides));
- memset(&link->preferred_link_setting, 0,
- sizeof(struct dc_link_settings));
-
- return true;
-}
-
-enum link_training_result dc_link_dp_sync_lt_attempt(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct dc_link_settings *link_settings,
- struct dc_link_training_overrides *lt_overrides)
-{
- struct link_training_settings lt_settings = {0};
- enum link_training_result lt_status = LINK_TRAINING_SUCCESS;
- enum dp_panel_mode panel_mode = DP_PANEL_MODE_DEFAULT;
- enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
- bool fec_enable = false;
-
- dp_decide_training_settings(
- link,
- link_settings,
- &lt_settings);
- override_training_settings(
- link,
- lt_overrides,
- &lt_settings);
- /* Setup MST Mode */
- if (lt_overrides->mst_enable)
- set_dp_mst_mode(link, link_res, *lt_overrides->mst_enable);
-
- /* Disable link */
- dp_disable_link_phy(link, link_res, link->connector_signal);
-
- /* Enable link */
- dp_cs_id = get_clock_source_id(link);
- dp_enable_link_phy(link, link_res, link->connector_signal,
- dp_cs_id, link_settings);
-
- /* Set FEC enable */
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
- fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
- dp_set_fec_ready(link, NULL, fec_enable);
- }
-
- if (lt_overrides->alternate_scrambler_reset) {
- if (*lt_overrides->alternate_scrambler_reset)
- panel_mode = DP_PANEL_MODE_EDP;
- else
- panel_mode = DP_PANEL_MODE_DEFAULT;
- } else
- panel_mode = dp_get_panel_mode(link);
-
- dp_set_panel_mode(link, panel_mode);
-
- /* Attempt to train with given link training settings */
- if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
- start_clock_recovery_pattern_early(link, link_res, &lt_settings, DPRX);
-
- /* Set link rate, lane count and spread. */
- dpcd_set_link_settings(link, &lt_settings);
-
- /* 2. perform link training (set link training done
- * to false is done as well)
- */
- lt_status = perform_clock_recovery_sequence(link, link_res, &lt_settings, DPRX);
- if (lt_status == LINK_TRAINING_SUCCESS) {
- lt_status = perform_channel_equalization_sequence(link,
- link_res,
- &lt_settings,
- DPRX);
- }
-
- /* 3. Sync LT must skip TRAINING_PATTERN_SET:0 (video pattern)*/
- /* 4. print status message*/
- print_status_message(link, &lt_settings, lt_status);
-
- return lt_status;
-}
-
-bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
-{
- /* If input parameter is set, shut down phy.
- * Still shouldn't turn off dp_receiver (DPCD:600h)
- */
- if (link_down == true) {
- struct dc_link_settings link_settings = link->cur_link_settings;
- dp_disable_link_phy(link, NULL, link->connector_signal);
- if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
- dp_set_fec_ready(link, NULL, false);
- }
-
- link->sync_lt_in_progress = false;
- return true;
-}
-
-static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
-{
- enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
-
- if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
- lttpr_max_link_rate = LINK_RATE_UHBR20;
- else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
- lttpr_max_link_rate = LINK_RATE_UHBR13_5;
- else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10)
- lttpr_max_link_rate = LINK_RATE_UHBR10;
-
- return lttpr_max_link_rate;
-}
-
-static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link)
-{
- enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN;
-
- if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20)
- cable_max_link_rate = LINK_RATE_UHBR20;
- else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY)
- cable_max_link_rate = LINK_RATE_UHBR13_5;
- else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10)
- cable_max_link_rate = LINK_RATE_UHBR10;
-
- return cable_max_link_rate;
-}
-
-bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
-{
- struct link_encoder *link_enc = NULL;
-
- if (!max_link_enc_cap) {
- DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
- return false;
- }
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- if (link_enc && link_enc->funcs->get_max_link_cap) {
- link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap);
- return true;
- }
-
- DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__);
- max_link_enc_cap->lane_count = 1;
- max_link_enc_cap->link_rate = 6;
- return false;
-}
-
-
-struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
-{
- struct dc_link_settings max_link_cap = {0};
- enum dc_link_rate lttpr_max_link_rate;
- enum dc_link_rate cable_max_link_rate;
- struct link_encoder *link_enc = NULL;
-
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- /* get max link encoder capability */
- if (link_enc)
- link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
-
- /* Lower link settings based on sink's link cap */
- if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
- max_link_cap.lane_count =
- link->reported_link_cap.lane_count;
- if (link->reported_link_cap.link_rate < max_link_cap.link_rate)
- max_link_cap.link_rate =
- link->reported_link_cap.link_rate;
- if (link->reported_link_cap.link_spread <
- max_link_cap.link_spread)
- max_link_cap.link_spread =
- link->reported_link_cap.link_spread;
-
- /* Lower link settings based on cable attributes
- * Cable ID is a DP2 feature to identify max certified link rate that
- * a cable can carry. The cable identification method requires both
- * cable and display hardware support. Since the specs comes late, it is
- * anticipated that the first round of DP2 cables and displays may not
- * be fully compatible to reliably return cable ID data. Therefore the
- * decision of our cable id policy is that if the cable can return non
- * zero cable id data, we will take cable's link rate capability into
- * account. However if we get zero data, the cable link rate capability
- * is considered inconclusive. In this case, we will not take cable's
- * capability into account to avoid of over limiting hardware capability
- * from users. The max overall link rate capability is still determined
- * after actual dp pre-training. Cable id is considered as an auxiliary
- * method of determining max link bandwidth capability.
- */
- cable_max_link_rate = get_cable_max_link_rate(link);
-
- if (!link->dc->debug.ignore_cable_id &&
- cable_max_link_rate != LINK_RATE_UNKNOWN &&
- cable_max_link_rate < max_link_cap.link_rate)
- max_link_cap.link_rate = cable_max_link_rate;
-
- /* account for lttpr repeaters cap
- * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
- */
- if (dp_is_lttpr_present(link)) {
- if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
- max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
- lttpr_max_link_rate = get_lttpr_max_link_rate(link);
-
- if (lttpr_max_link_rate < max_link_cap.link_rate)
- max_link_cap.link_rate = lttpr_max_link_rate;
-
- DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n",
- __func__,
- max_link_cap.lane_count,
- max_link_cap.link_rate);
- }
-
- if (dp_get_link_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING &&
- link->dc->debug.disable_uhbr)
- max_link_cap.link_rate = LINK_RATE_HIGH3;
-
- return max_link_cap;
-}
-
-static enum dc_status read_hpd_rx_irq_data(
- struct dc_link *link,
- union hpd_irq_data *irq_data)
-{
- static enum dc_status retval;
-
- /* The HW reads 16 bytes from 200h on HPD,
- * but if we get an AUX_DEFER, the HW cannot retry
- * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
- * fail, so we now explicitly read 6 bytes which is
- * the req from the above mentioned test cases.
- *
- * For DP 1.4 we need to read those from 2002h range.
- */
- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
- retval = core_link_read_dpcd(
- link,
- DP_SINK_COUNT,
- irq_data->raw,
- sizeof(union hpd_irq_data));
- else {
- /* Read 14 bytes in a single read and then copy only the required fields.
- * This is more efficient than doing it in two separate AUX reads. */
-
- uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
-
- retval = core_link_read_dpcd(
- link,
- DP_SINK_COUNT_ESI,
- tmp,
- sizeof(tmp));
-
- if (retval != DC_OK)
- return retval;
-
- irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
- irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
- }
-
- return retval;
-}
-
-bool hpd_rx_irq_check_link_loss_status(
- struct dc_link *link,
- union hpd_irq_data *hpd_irq_dpcd_data)
-{
- uint8_t irq_reg_rx_power_state = 0;
- enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
- union lane_status lane_status;
- uint32_t lane;
- bool sink_status_changed;
- bool return_code;
-
- sink_status_changed = false;
- return_code = false;
-
- if (link->cur_link_settings.lane_count == 0)
- return return_code;
-
- /*1. Check that Link Status changed, before re-training.*/
-
- /*parse lane status*/
- for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
- /* check status of lanes 0,1
- * changed DpcdAddress_Lane01Status (0x202)
- */
- lane_status.raw = get_nibble_at_index(
- &hpd_irq_dpcd_data->bytes.lane01_status.raw,
- lane);
-
- if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
- !lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
- /* if one of the channel equalization, clock
- * recovery or symbol lock is dropped
- * consider it as (link has been
- * dropped) dp sink status has changed
- */
- sink_status_changed = true;
- break;
- }
- }
-
- /* Check interlane align.*/
- if (sink_status_changed ||
- !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
-
- DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
-
- return_code = true;
-
- /*2. Check that we can handle interrupt: Not in FS DOS,
- * Not in "Display Timeout" state, Link is trained.
- */
- dpcd_result = core_link_read_dpcd(link,
- DP_SET_POWER,
- &irq_reg_rx_power_state,
- sizeof(irq_reg_rx_power_state));
-
- if (dpcd_result != DC_OK) {
- DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
- __func__);
- } else {
- if (irq_reg_rx_power_state != DP_SET_POWER_D0)
- return_code = false;
- }
- }
-
- return return_code;
-}
-
-static bool dp_verify_link_cap(
- struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting,
- int *fail_count)
-{
- struct dc_link_settings cur_link_settings = {0};
- struct dc_link_settings max_link_settings = *known_limit_link_setting;
- bool success = false;
- bool skip_video_pattern;
- enum clock_source_id dp_cs_id = get_clock_source_id(link);
- enum link_training_result status = LINK_TRAINING_SUCCESS;
- union hpd_irq_data irq_data;
- struct link_resource link_res;
-
- memset(&irq_data, 0, sizeof(irq_data));
- cur_link_settings = max_link_settings;
-
- /* Grant extended timeout request */
- if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
- uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
-
- core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
- }
-
- do {
- if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings))
- continue;
-
- skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW;
- dp_enable_link_phy(
- link,
- &link_res,
- link->connector_signal,
- dp_cs_id,
- &cur_link_settings);
-
- status = dc_link_dp_perform_link_training(
- link,
- &link_res,
- &cur_link_settings,
- skip_video_pattern);
-
- if (status == LINK_TRAINING_SUCCESS) {
- success = true;
- udelay(1000);
- if (read_hpd_rx_irq_data(link, &irq_data) == DC_OK &&
- hpd_rx_irq_check_link_loss_status(
- link,
- &irq_data))
- (*fail_count)++;
-
- } else {
- (*fail_count)++;
- }
- dp_trace_lt_total_count_increment(link, true);
- dp_trace_lt_result_update(link, status, true);
- dp_disable_link_phy(link, &link_res, link->connector_signal);
- } while (!success && decide_fallback_link_setting(link,
- &max_link_settings, &cur_link_settings, status));
-
- link->verified_link_cap = success ?
- cur_link_settings : fail_safe_link_settings;
- return success;
-}
-
-static void apply_usbc_combo_phy_reset_wa(struct dc_link *link,
- struct dc_link_settings *link_settings)
-{
- /* Temporary Renoir-specific workaround PHY will sometimes be in bad
- * state on hotplugging display from certain USB-C dongle, so add extra
- * cycle of enabling and disabling the PHY before first link training.
- */
- struct link_resource link_res = {0};
- enum clock_source_id dp_cs_id = get_clock_source_id(link);
-
- dp_enable_link_phy(link, &link_res, link->connector_signal,
- dp_cs_id, link_settings);
- dp_disable_link_phy(link, &link_res, link->connector_signal);
-}
-
-bool dp_verify_link_cap_with_retries(
- struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting,
- int attempts)
-{
- int i = 0;
- bool success = false;
- int fail_count = 0;
-
- dp_trace_detect_lt_init(link);
-
- if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
- link->dc->debug.usbc_combo_phy_reset_wa)
- apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting);
-
- dp_trace_set_lt_start_timestamp(link, false);
- for (i = 0; i < attempts; i++) {
- enum dc_connection_type type = dc_connection_none;
-
- memset(&link->verified_link_cap, 0,
- sizeof(struct dc_link_settings));
- if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) {
- link->verified_link_cap = fail_safe_link_settings;
- break;
- } else if (dp_verify_link_cap(link, known_limit_link_setting,
- &fail_count) && fail_count == 0) {
- success = true;
- break;
- }
- msleep(10);
- }
-
- dp_trace_lt_fail_count_update(link, fail_count, true);
- dp_trace_set_lt_end_timestamp(link, true);
-
- return success;
-}
-
-/* in DP compliance test, DPR-120 may have
- * a random value in its MAX_LINK_BW dpcd field.
- * We map it to the maximum supported link rate that
- * is smaller than MAX_LINK_BW in this case.
- */
-static enum dc_link_rate get_link_rate_from_max_link_bw(
- uint8_t max_link_bw)
-{
- enum dc_link_rate link_rate;
-
- if (max_link_bw >= LINK_RATE_HIGH3) {
- link_rate = LINK_RATE_HIGH3;
- } else if (max_link_bw < LINK_RATE_HIGH3
- && max_link_bw >= LINK_RATE_HIGH2) {
- link_rate = LINK_RATE_HIGH2;
- } else if (max_link_bw < LINK_RATE_HIGH2
- && max_link_bw >= LINK_RATE_HIGH) {
- link_rate = LINK_RATE_HIGH;
- } else if (max_link_bw < LINK_RATE_HIGH
- && max_link_bw >= LINK_RATE_LOW) {
- link_rate = LINK_RATE_LOW;
- } else {
- link_rate = LINK_RATE_UNKNOWN;
- }
-
- return link_rate;
-}
-
-static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count)
-{
- return lane_count <= LANE_COUNT_ONE;
-}
-
-static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate)
-{
- return link_rate <= LINK_RATE_LOW;
-}
-
-static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
-{
- switch (lane_count) {
- case LANE_COUNT_FOUR:
- return LANE_COUNT_TWO;
- case LANE_COUNT_TWO:
- return LANE_COUNT_ONE;
- case LANE_COUNT_ONE:
- return LANE_COUNT_UNKNOWN;
- default:
- return LANE_COUNT_UNKNOWN;
- }
-}
-
-static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate)
-{
- switch (link_rate) {
- case LINK_RATE_UHBR20:
- return LINK_RATE_UHBR13_5;
- case LINK_RATE_UHBR13_5:
- return LINK_RATE_UHBR10;
- case LINK_RATE_UHBR10:
- return LINK_RATE_HIGH3;
- case LINK_RATE_HIGH3:
- return LINK_RATE_HIGH2;
- case LINK_RATE_HIGH2:
- return LINK_RATE_HIGH;
- case LINK_RATE_HIGH:
- return LINK_RATE_LOW;
- case LINK_RATE_LOW:
- return LINK_RATE_UNKNOWN;
- default:
- return LINK_RATE_UNKNOWN;
- }
-}
-
-static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count)
-{
- switch (lane_count) {
- case LANE_COUNT_ONE:
- return LANE_COUNT_TWO;
- case LANE_COUNT_TWO:
- return LANE_COUNT_FOUR;
- default:
- return LANE_COUNT_UNKNOWN;
- }
-}
-
-static enum dc_link_rate increase_link_rate(struct dc_link *link,
- enum dc_link_rate link_rate)
-{
- switch (link_rate) {
- case LINK_RATE_LOW:
- return LINK_RATE_HIGH;
- case LINK_RATE_HIGH:
- return LINK_RATE_HIGH2;
- case LINK_RATE_HIGH2:
- return LINK_RATE_HIGH3;
- case LINK_RATE_HIGH3:
- return LINK_RATE_UHBR10;
- case LINK_RATE_UHBR10:
- /* upto DP2.x specs UHBR13.5 is the only link rate that could be
- * not supported by DPRX when higher link rate is supported.
- * so we treat it as a special case for code simplicity. When we
- * have new specs with more link rates like this, we should
- * consider a more generic solution to handle discrete link
- * rate capabilities.
- */
- return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ?
- LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20;
- case LINK_RATE_UHBR13_5:
- return LINK_RATE_UHBR20;
- default:
- return LINK_RATE_UNKNOWN;
- }
-}
-
-static bool decide_fallback_link_setting_max_bw_policy(
- struct dc_link *link,
- const struct dc_link_settings *max,
- struct dc_link_settings *cur,
- enum link_training_result training_result)
-{
- uint8_t cur_idx = 0, next_idx;
- bool found = false;
-
- if (training_result == LINK_TRAINING_ABORT)
- return false;
-
- while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks))
- /* find current index */
- if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count &&
- dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate)
- break;
- else
- cur_idx++;
-
- next_idx = cur_idx + 1;
-
- while (next_idx < ARRAY_SIZE(dp_lt_fallbacks))
- /* find next index */
- if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count ||
- dp_lt_fallbacks[next_idx].link_rate > max->link_rate)
- next_idx++;
- else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 &&
- link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0)
- /* upto DP2.x specs UHBR13.5 is the only link rate that
- * could be not supported by DPRX when higher link rate
- * is supported. so we treat it as a special case for
- * code simplicity. When we have new specs with more
- * link rates like this, we should consider a more
- * generic solution to handle discrete link rate
- * capabilities.
- */
- next_idx++;
- else
- break;
-
- if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) {
- cur->lane_count = dp_lt_fallbacks[next_idx].lane_count;
- cur->link_rate = dp_lt_fallbacks[next_idx].link_rate;
- found = true;
- }
-
- return found;
-}
-
-/*
- * function: set link rate and lane count fallback based
- * on current link setting and last link training result
- * return value:
- * true - link setting could be set
- * false - has reached minimum setting
- * and no further fallback could be done
- */
-static bool decide_fallback_link_setting(
- struct dc_link *link,
- struct dc_link_settings *max,
- struct dc_link_settings *cur,
- enum link_training_result training_result)
-{
- if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING ||
- link->dc->debug.force_dp2_lt_fallback_method)
- return decide_fallback_link_setting_max_bw_policy(link, max, cur,
- training_result);
-
- switch (training_result) {
- case LINK_TRAINING_CR_FAIL_LANE0:
- case LINK_TRAINING_CR_FAIL_LANE1:
- case LINK_TRAINING_CR_FAIL_LANE23:
- case LINK_TRAINING_LQA_FAIL:
- {
- if (!reached_minimum_link_rate(cur->link_rate)) {
- cur->link_rate = reduce_link_rate(cur->link_rate);
- } else if (!reached_minimum_lane_count(cur->lane_count)) {
- cur->link_rate = max->link_rate;
- if (training_result == LINK_TRAINING_CR_FAIL_LANE0)
- return false;
- else if (training_result == LINK_TRAINING_CR_FAIL_LANE1)
- cur->lane_count = LANE_COUNT_ONE;
- else if (training_result == LINK_TRAINING_CR_FAIL_LANE23)
- cur->lane_count = LANE_COUNT_TWO;
- else
- cur->lane_count = reduce_lane_count(cur->lane_count);
- } else {
- return false;
- }
- break;
- }
- case LINK_TRAINING_EQ_FAIL_EQ:
- case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
- {
- if (!reached_minimum_lane_count(cur->lane_count)) {
- cur->lane_count = reduce_lane_count(cur->lane_count);
- } else if (!reached_minimum_link_rate(cur->link_rate)) {
- cur->link_rate = reduce_link_rate(cur->link_rate);
- /* Reduce max link rate to avoid potential infinite loop.
- * Needed so that any subsequent CR_FAIL fallback can't
- * re-set the link rate higher than the link rate from
- * the latest EQ_FAIL fallback.
- */
- max->link_rate = cur->link_rate;
- cur->lane_count = max->lane_count;
- } else {
- return false;
- }
- break;
- }
- case LINK_TRAINING_EQ_FAIL_CR:
- {
- if (!reached_minimum_link_rate(cur->link_rate)) {
- cur->link_rate = reduce_link_rate(cur->link_rate);
- /* Reduce max link rate to avoid potential infinite loop.
- * Needed so that any subsequent CR_FAIL fallback can't
- * re-set the link rate higher than the link rate from
- * the latest EQ_FAIL fallback.
- */
- max->link_rate = cur->link_rate;
- cur->lane_count = max->lane_count;
- } else {
- return false;
- }
- break;
- }
- default:
- return false;
- }
- return true;
-}
-
-bool dp_validate_mode_timing(
- struct dc_link *link,
- const struct dc_crtc_timing *timing)
-{
- uint32_t req_bw;
- uint32_t max_bw;
-
- const struct dc_link_settings *link_setting;
-
- /* According to spec, VSC SDP should be used if pixel format is YCbCr420 */
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 &&
- !link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED &&
- dal_graphics_object_id_get_connector_id(link->link_id) != CONNECTOR_ID_VIRTUAL)
- return false;
-
- /*always DP fail safe mode*/
- if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 &&
- timing->h_addressable == (uint32_t) 640 &&
- timing->v_addressable == (uint32_t) 480)
- return true;
-
- link_setting = dc_link_get_link_cap(link);
-
- /* TODO: DYNAMIC_VALIDATION needs to be implemented */
- /*if (flags.DYNAMIC_VALIDATION == 1 &&
- link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN)
- link_setting = &link->verified_link_cap;
- */
-
- req_bw = dc_bandwidth_in_kbps_from_timing(timing);
- max_bw = dc_link_bandwidth_kbps(link, link_setting);
-
- if (req_bw <= max_bw) {
- /* remember the biggest mode here, during
- * initial link training (to get
- * verified_link_cap), LS sends event about
- * cannot train at reported cap to upper
- * layer and upper layer will re-enumerate modes.
- * this is not necessary if the lower
- * verified_link_cap is enough to drive
- * all the modes */
-
- /* TODO: DYNAMIC_VALIDATION needs to be implemented */
- /* if (flags.DYNAMIC_VALIDATION == 1)
- dpsst->max_req_bw_for_verified_linkcap = dal_max(
- dpsst->max_req_bw_for_verified_linkcap, req_bw); */
- return true;
- } else
- return false;
-}
-
-static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
-{
- struct dc_link_settings initial_link_setting = {
- LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0};
- struct dc_link_settings current_link_setting =
- initial_link_setting;
- uint32_t link_bw;
-
- if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
- return false;
-
- /* search for the minimum link setting that:
- * 1. is supported according to the link training result
- * 2. could support the b/w requested by the timing
- */
- while (current_link_setting.link_rate <=
- link->verified_link_cap.link_rate) {
- link_bw = dc_link_bandwidth_kbps(
- link,
- &current_link_setting);
- if (req_bw <= link_bw) {
- *link_setting = current_link_setting;
- return true;
- }
-
- if (current_link_setting.lane_count <
- link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- } else {
- current_link_setting.link_rate =
- increase_link_rate(link,
- current_link_setting.link_rate);
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- }
- }
-
- return false;
-}
-
-bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
-{
- struct dc_link_settings initial_link_setting;
- struct dc_link_settings current_link_setting;
- uint32_t link_bw;
-
- /*
- * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
- * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
- */
- if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
- link->dpcd_caps.edp_supported_link_rates_count == 0) {
- *link_setting = link->verified_link_cap;
- return true;
- }
-
- memset(&initial_link_setting, 0, sizeof(initial_link_setting));
- initial_link_setting.lane_count = LANE_COUNT_ONE;
- initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
- initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
- initial_link_setting.use_link_rate_set = true;
- initial_link_setting.link_rate_set = 0;
- current_link_setting = initial_link_setting;
-
- /* search for the minimum link setting that:
- * 1. is supported according to the link training result
- * 2. could support the b/w requested by the timing
- */
- while (current_link_setting.link_rate <=
- link->verified_link_cap.link_rate) {
- link_bw = dc_link_bandwidth_kbps(
- link,
- &current_link_setting);
- if (req_bw <= link_bw) {
- *link_setting = current_link_setting;
- return true;
- }
-
- if (current_link_setting.lane_count <
- link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- } else {
- if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- } else
- break;
- }
- }
- return false;
-}
-
-static bool decide_edp_link_settings_with_dsc(struct dc_link *link,
- struct dc_link_settings *link_setting,
- uint32_t req_bw,
- enum dc_link_rate max_link_rate)
-{
- struct dc_link_settings initial_link_setting;
- struct dc_link_settings current_link_setting;
- uint32_t link_bw;
-
- unsigned int policy = 0;
-
- policy = link->panel_config.dsc.force_dsc_edp_policy;
- if (max_link_rate == LINK_RATE_UNKNOWN)
- max_link_rate = link->verified_link_cap.link_rate;
- /*
- * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
- * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
- */
- if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
- link->dpcd_caps.edp_supported_link_rates_count == 0)) {
- /* for DSC enabled case, we search for minimum lane count */
- memset(&initial_link_setting, 0, sizeof(initial_link_setting));
- initial_link_setting.lane_count = LANE_COUNT_ONE;
- initial_link_setting.link_rate = LINK_RATE_LOW;
- initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
- initial_link_setting.use_link_rate_set = false;
- initial_link_setting.link_rate_set = 0;
- current_link_setting = initial_link_setting;
- if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
- return false;
-
- /* search for the minimum link setting that:
- * 1. is supported according to the link training result
- * 2. could support the b/w requested by the timing
- */
- while (current_link_setting.link_rate <=
- max_link_rate) {
- link_bw = dc_link_bandwidth_kbps(
- link,
- &current_link_setting);
- if (req_bw <= link_bw) {
- *link_setting = current_link_setting;
- return true;
- }
- if (policy) {
- /* minimize lane */
- if (current_link_setting.link_rate < max_link_rate) {
- current_link_setting.link_rate =
- increase_link_rate(link,
- current_link_setting.link_rate);
- } else {
- if (current_link_setting.lane_count <
- link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- current_link_setting.link_rate = initial_link_setting.link_rate;
- } else
- break;
- }
- } else {
- /* minimize link rate */
- if (current_link_setting.lane_count <
- link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- } else {
- current_link_setting.link_rate =
- increase_link_rate(link,
- current_link_setting.link_rate);
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- }
- }
- }
- return false;
- }
-
- /* if optimize edp link is supported */
- memset(&initial_link_setting, 0, sizeof(initial_link_setting));
- initial_link_setting.lane_count = LANE_COUNT_ONE;
- initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
- initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
- initial_link_setting.use_link_rate_set = true;
- initial_link_setting.link_rate_set = 0;
- current_link_setting = initial_link_setting;
-
- /* search for the minimum link setting that:
- * 1. is supported according to the link training result
- * 2. could support the b/w requested by the timing
- */
- while (current_link_setting.link_rate <=
- max_link_rate) {
- link_bw = dc_link_bandwidth_kbps(
- link,
- &current_link_setting);
- if (req_bw <= link_bw) {
- *link_setting = current_link_setting;
- return true;
- }
- if (policy) {
- /* minimize lane */
- if (current_link_setting.link_rate_set <
- link->dpcd_caps.edp_supported_link_rates_count
- && current_link_setting.link_rate < max_link_rate) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- } else {
- if (current_link_setting.lane_count < link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- current_link_setting.link_rate_set = initial_link_setting.link_rate_set;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- } else
- break;
- }
- } else {
- /* minimize link rate */
- if (current_link_setting.lane_count <
- link->verified_link_cap.lane_count) {
- current_link_setting.lane_count =
- increase_lane_count(
- current_link_setting.lane_count);
- } else {
- if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
- current_link_setting.link_rate_set++;
- current_link_setting.link_rate =
- link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
- } else
- break;
- }
- }
- }
- return false;
-}
-
-static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting)
-{
- *link_setting = link->verified_link_cap;
- return true;
-}
-
-bool decide_link_settings(struct dc_stream_state *stream,
- struct dc_link_settings *link_setting)
-{
- struct dc_link *link = stream->link;
- uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
-
- memset(link_setting, 0, sizeof(*link_setting));
-
- /* if preferred is specified through AMDDP, use it, if it's enough
- * to drive the mode
- */
- if (link->preferred_link_setting.lane_count !=
- LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate !=
- LINK_RATE_UNKNOWN) {
- *link_setting = link->preferred_link_setting;
- return true;
- }
-
- /* MST doesn't perform link training for now
- * TODO: add MST specific link training routine
- */
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- decide_mst_link_settings(link, link_setting);
- } else if (link->connector_signal == SIGNAL_TYPE_EDP) {
- /* enable edp link optimization for DSC eDP case */
- if (stream->timing.flags.DSC) {
- enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN;
-
- if (link->panel_config.dsc.force_dsc_edp_policy) {
- /* calculate link max link rate cap*/
- struct dc_link_settings tmp_link_setting;
- struct dc_crtc_timing tmp_timing = stream->timing;
- uint32_t orig_req_bw;
-
- tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
- tmp_timing.flags.DSC = 0;
- orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
- decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw);
- max_link_rate = tmp_link_setting.link_rate;
- }
- decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate);
- } else {
- decide_edp_link_settings(link, link_setting, req_bw);
- }
- } else {
- decide_dp_link_settings(link, link_setting, req_bw);
- }
-
- return link_setting->lane_count != LANE_COUNT_UNKNOWN &&
- link_setting->link_rate != LINK_RATE_UNKNOWN;
-}
-
-/*************************Short Pulse IRQ***************************/
-bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
-{
- /*
- * Don't handle RX IRQ unless one of following is met:
- * 1) The link is established (cur_link_settings != unknown)
- * 2) We know we're dealing with a branch device, SST or MST
- */
-
- if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
- is_dp_branch_device(link))
- return true;
-
- return false;
-}
-
-static bool handle_hpd_irq_psr_sink(struct dc_link *link)
-{
- union dpcd_psr_configuration psr_configuration;
-
- if (!link->psr_settings.psr_feature_enabled)
- return false;
-
- dm_helpers_dp_read_dpcd(
- link->ctx,
- link,
- 368,/*DpcdAddress_PSR_Enable_Cfg*/
- &psr_configuration.raw,
- sizeof(psr_configuration.raw));
-
- if (psr_configuration.bits.ENABLE) {
- unsigned char dpcdbuf[3] = {0};
- union psr_error_status psr_error_status;
- union psr_sink_psr_status psr_sink_psr_status;
-
- dm_helpers_dp_read_dpcd(
- link->ctx,
- link,
- 0x2006, /*DpcdAddress_PSR_Error_Status*/
- (unsigned char *) dpcdbuf,
- sizeof(dpcdbuf));
-
- /*DPCD 2006h ERROR STATUS*/
- psr_error_status.raw = dpcdbuf[0];
- /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/
- psr_sink_psr_status.raw = dpcdbuf[2];
-
- if (psr_error_status.bits.LINK_CRC_ERROR ||
- psr_error_status.bits.RFB_STORAGE_ERROR ||
- psr_error_status.bits.VSC_SDP_ERROR) {
- bool allow_active;
-
- /* Acknowledge and clear error bits */
- dm_helpers_dp_write_dpcd(
- link->ctx,
- link,
- 8198,/*DpcdAddress_PSR_Error_Status*/
- &psr_error_status.raw,
- sizeof(psr_error_status.raw));
-
- /* PSR error, disable and re-enable PSR */
- if (link->psr_settings.psr_allow_active) {
- allow_active = false;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
- allow_active = true;
- dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
- }
-
- return true;
- } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
- PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){
- /* No error is detect, PSR is active.
- * We should return with IRQ_HPD handled without
- * checking for loss of sync since PSR would have
- * powered down main link.
- */
- return true;
- }
- }
- return false;
-}
-
-static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)
-{
- switch (test_rate) {
- case DP_TEST_LINK_RATE_RBR:
- return LINK_RATE_LOW;
- case DP_TEST_LINK_RATE_HBR:
- return LINK_RATE_HIGH;
- case DP_TEST_LINK_RATE_HBR2:
- return LINK_RATE_HIGH2;
- case DP_TEST_LINK_RATE_HBR3:
- return LINK_RATE_HIGH3;
- case DP_TEST_LINK_RATE_UHBR10:
- return LINK_RATE_UHBR10;
- case DP_TEST_LINK_RATE_UHBR20:
- return LINK_RATE_UHBR20;
- case DP_TEST_LINK_RATE_UHBR13_5:
- return LINK_RATE_UHBR13_5;
- default:
- return LINK_RATE_UNKNOWN;
- }
-}
-
-static void dp_test_send_link_training(struct dc_link *link)
-{
- struct dc_link_settings link_settings = {0};
- uint8_t test_rate = 0;
-
- core_link_read_dpcd(
- link,
- DP_TEST_LANE_COUNT,
- (unsigned char *)(&link_settings.lane_count),
- 1);
- core_link_read_dpcd(
- link,
- DP_TEST_LINK_RATE,
- &test_rate,
- 1);
- link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate);
-
- /* Set preferred link settings */
- link->verified_link_cap.lane_count = link_settings.lane_count;
- link->verified_link_cap.link_rate = link_settings.link_rate;
-
- dp_retrain_link_dp_test(link, &link_settings, false);
-}
-
-/* TODO Raven hbr2 compliance eye output is unstable
- * (toggling on and off) with debugger break
- * This caueses intermittent PHY automation failure
- * Need to look into the root cause */
-static void dp_test_send_phy_test_pattern(struct dc_link *link)
-{
- union phy_test_pattern dpcd_test_pattern;
- union lane_adjust dpcd_lane_adjustment[2];
- unsigned char dpcd_post_cursor_2_adjustment = 0;
- unsigned char test_pattern_buffer[
- (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 -
- DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0};
- unsigned int test_pattern_size = 0;
- enum dp_test_pattern test_pattern;
- union lane_adjust dpcd_lane_adjust;
- unsigned int lane;
- struct link_training_settings link_training_settings;
-
- dpcd_test_pattern.raw = 0;
- memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
- memset(&link_training_settings, 0, sizeof(link_training_settings));
-
- /* get phy test pattern and pattern parameters from DP receiver */
- core_link_read_dpcd(
- link,
- DP_PHY_TEST_PATTERN,
- &dpcd_test_pattern.raw,
- sizeof(dpcd_test_pattern));
- core_link_read_dpcd(
- link,
- DP_ADJUST_REQUEST_LANE0_1,
- &dpcd_lane_adjustment[0].raw,
- sizeof(dpcd_lane_adjustment));
-
- /* prepare link training settings */
- link_training_settings.link_settings = link->cur_link_settings;
-
- link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings);
-
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
- dp_fixed_vs_pe_read_lane_adjust(
- link,
- link_training_settings.dpcd_lane_settings);
-
- /*get post cursor 2 parameters
- * For DP 1.1a or eariler, this DPCD register's value is 0
- * For DP 1.2 or later:
- * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1
- * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3
- */
- core_link_read_dpcd(
- link,
- DP_ADJUST_REQUEST_POST_CURSOR2,
- &dpcd_post_cursor_2_adjustment,
- sizeof(dpcd_post_cursor_2_adjustment));
-
- /* translate request */
- switch (dpcd_test_pattern.bits.PATTERN) {
- case PHY_TEST_PATTERN_D10_2:
- test_pattern = DP_TEST_PATTERN_D102;
- break;
- case PHY_TEST_PATTERN_SYMBOL_ERROR:
- test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR;
- break;
- case PHY_TEST_PATTERN_PRBS7:
- test_pattern = DP_TEST_PATTERN_PRBS7;
- break;
- case PHY_TEST_PATTERN_80BIT_CUSTOM:
- test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM;
- break;
- case PHY_TEST_PATTERN_CP2520_1:
- /* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
- DP_TEST_PATTERN_TRAINING_PATTERN4 :
- DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
- break;
- case PHY_TEST_PATTERN_CP2520_2:
- /* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
- DP_TEST_PATTERN_TRAINING_PATTERN4 :
- DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
- break;
- case PHY_TEST_PATTERN_CP2520_3:
- test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
- break;
- case PHY_TEST_PATTERN_128b_132b_TPS1:
- test_pattern = DP_TEST_PATTERN_128b_132b_TPS1;
- break;
- case PHY_TEST_PATTERN_128b_132b_TPS2:
- test_pattern = DP_TEST_PATTERN_128b_132b_TPS2;
- break;
- case PHY_TEST_PATTERN_PRBS9:
- test_pattern = DP_TEST_PATTERN_PRBS9;
- break;
- case PHY_TEST_PATTERN_PRBS11:
- test_pattern = DP_TEST_PATTERN_PRBS11;
- break;
- case PHY_TEST_PATTERN_PRBS15:
- test_pattern = DP_TEST_PATTERN_PRBS15;
- break;
- case PHY_TEST_PATTERN_PRBS23:
- test_pattern = DP_TEST_PATTERN_PRBS23;
- break;
- case PHY_TEST_PATTERN_PRBS31:
- test_pattern = DP_TEST_PATTERN_PRBS31;
- break;
- case PHY_TEST_PATTERN_264BIT_CUSTOM:
- test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM;
- break;
- case PHY_TEST_PATTERN_SQUARE_PULSE:
- test_pattern = DP_TEST_PATTERN_SQUARE_PULSE;
- break;
- default:
- test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
- break;
- }
-
- if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
- test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
- DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1;
- core_link_read_dpcd(
- link,
- DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
- test_pattern_buffer,
- test_pattern_size);
- }
-
- if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) {
- test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
- core_link_read_dpcd(
- link,
- DP_PHY_SQUARE_PATTERN,
- test_pattern_buffer,
- test_pattern_size);
- }
-
- if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) {
- test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256-
- DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1;
- core_link_read_dpcd(
- link,
- DP_TEST_264BIT_CUSTOM_PATTERN_7_0,
- test_pattern_buffer,
- test_pattern_size);
- }
-
- for (lane = 0; lane <
- (unsigned int)(link->cur_link_settings.lane_count);
- lane++) {
- dpcd_lane_adjust.raw =
- get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
- if (dp_get_link_encoding_format(&link->cur_link_settings) ==
- DP_8b_10b_ENCODING) {
- link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING =
- (enum dc_voltage_swing)
- (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
- link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS =
- (enum dc_pre_emphasis)
- (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
- link_training_settings.hw_lane_settings[lane].POST_CURSOR2 =
- (enum dc_post_cursor2)
- ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
- } else if (dp_get_link_encoding_format(&link->cur_link_settings) ==
- DP_128b_132b_ENCODING) {
- link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw =
- dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
- }
- }
-
- dp_hw_to_dpcd_lane_settings(&link_training_settings,
- link_training_settings.hw_lane_settings,
- link_training_settings.dpcd_lane_settings);
- /*Usage: Measure DP physical lane signal
- * by DP SI test equipment automatically.
- * PHY test pattern request is generated by equipment via HPD interrupt.
- * HPD needs to be active all the time. HPD should be active
- * all the time. Do not touch it.
- * forward request to DS
- */
- dc_link_dp_set_test_pattern(
- link,
- test_pattern,
- DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
- &link_training_settings,
- test_pattern_buffer,
- test_pattern_size);
-}
-
-static void dp_test_send_link_test_pattern(struct dc_link *link)
-{
- union link_test_pattern dpcd_test_pattern;
- union test_misc dpcd_test_params;
- enum dp_test_pattern test_pattern;
- enum dp_test_pattern_color_space test_pattern_color_space =
- DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED;
- enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED;
- struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
- struct pipe_ctx *pipe_ctx = NULL;
- int i;
-
- memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
- memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (pipes[i].stream == NULL)
- continue;
-
- if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
- pipe_ctx = &pipes[i];
- break;
- }
- }
-
- if (pipe_ctx == NULL)
- return;
-
- /* get link test pattern and pattern parameters */
- core_link_read_dpcd(
- link,
- DP_TEST_PATTERN,
- &dpcd_test_pattern.raw,
- sizeof(dpcd_test_pattern));
- core_link_read_dpcd(
- link,
- DP_TEST_MISC0,
- &dpcd_test_params.raw,
- sizeof(dpcd_test_params));
-
- switch (dpcd_test_pattern.bits.PATTERN) {
- case LINK_TEST_PATTERN_COLOR_RAMP:
- test_pattern = DP_TEST_PATTERN_COLOR_RAMP;
- break;
- case LINK_TEST_PATTERN_VERTICAL_BARS:
- test_pattern = DP_TEST_PATTERN_VERTICAL_BARS;
- break; /* black and white */
- case LINK_TEST_PATTERN_COLOR_SQUARES:
- test_pattern = (dpcd_test_params.bits.DYN_RANGE ==
- TEST_DYN_RANGE_VESA ?
- DP_TEST_PATTERN_COLOR_SQUARES :
- DP_TEST_PATTERN_COLOR_SQUARES_CEA);
- break;
- default:
- test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
- break;
- }
-
- if (dpcd_test_params.bits.CLR_FORMAT == 0)
- test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
- else
- test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
-
- switch (dpcd_test_params.bits.BPC) {
- case 0: // 6 bits
- requestColorDepth = COLOR_DEPTH_666;
- break;
- case 1: // 8 bits
- requestColorDepth = COLOR_DEPTH_888;
- break;
- case 2: // 10 bits
- requestColorDepth = COLOR_DEPTH_101010;
- break;
- case 3: // 12 bits
- requestColorDepth = COLOR_DEPTH_121212;
- break;
- default:
- break;
- }
-
- switch (dpcd_test_params.bits.CLR_FORMAT) {
- case 0:
- pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB;
- break;
- case 1:
- pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR422;
- break;
- case 2:
- pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_YCBCR444;
- break;
- default:
- pipe_ctx->stream->timing.pixel_encoding = PIXEL_ENCODING_RGB;
- break;
- }
-
-
- if (requestColorDepth != COLOR_DEPTH_UNDEFINED
- && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) {
- DC_LOG_DEBUG("%s: original bpc %d, changing to %d\n",
- __func__,
- pipe_ctx->stream->timing.display_color_depth,
- requestColorDepth);
- pipe_ctx->stream->timing.display_color_depth = requestColorDepth;
- }
-
- dp_update_dsc_config(pipe_ctx);
-
- dc_link_dp_set_test_pattern(
- link,
- test_pattern,
- test_pattern_color_space,
- NULL,
- NULL,
- 0);
-}
-
-static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video)
-{
- union audio_test_mode dpcd_test_mode = {0};
- struct audio_test_pattern_type dpcd_pattern_type = {0};
- union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0};
- enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
-
- struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
- struct pipe_ctx *pipe_ctx = &pipes[0];
- unsigned int channel_count;
- unsigned int channel = 0;
- unsigned int modes = 0;
- unsigned int sampling_rate_in_hz = 0;
-
- // get audio test mode and test pattern parameters
- core_link_read_dpcd(
- link,
- DP_TEST_AUDIO_MODE,
- &dpcd_test_mode.raw,
- sizeof(dpcd_test_mode));
-
- core_link_read_dpcd(
- link,
- DP_TEST_AUDIO_PATTERN_TYPE,
- &dpcd_pattern_type.value,
- sizeof(dpcd_pattern_type));
-
- channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
-
- // read pattern periods for requested channels when sawTooth pattern is requested
- if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
- dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) {
-
- test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ?
- DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
- // read period for each channel
- for (channel = 0; channel < channel_count; channel++) {
- core_link_read_dpcd(
- link,
- DP_TEST_AUDIO_PERIOD_CH1 + channel,
- &dpcd_pattern_period[channel].raw,
- sizeof(dpcd_pattern_period[channel]));
- }
- }
-
- // translate sampling rate
- switch (dpcd_test_mode.bits.sampling_rate) {
- case AUDIO_SAMPLING_RATE_32KHZ:
- sampling_rate_in_hz = 32000;
- break;
- case AUDIO_SAMPLING_RATE_44_1KHZ:
- sampling_rate_in_hz = 44100;
- break;
- case AUDIO_SAMPLING_RATE_48KHZ:
- sampling_rate_in_hz = 48000;
- break;
- case AUDIO_SAMPLING_RATE_88_2KHZ:
- sampling_rate_in_hz = 88200;
- break;
- case AUDIO_SAMPLING_RATE_96KHZ:
- sampling_rate_in_hz = 96000;
- break;
- case AUDIO_SAMPLING_RATE_176_4KHZ:
- sampling_rate_in_hz = 176400;
- break;
- case AUDIO_SAMPLING_RATE_192KHZ:
- sampling_rate_in_hz = 192000;
- break;
- default:
- sampling_rate_in_hz = 0;
- break;
- }
-
- link->audio_test_data.flags.test_requested = 1;
- link->audio_test_data.flags.disable_video = disable_video;
- link->audio_test_data.sampling_rate = sampling_rate_in_hz;
- link->audio_test_data.channel_count = channel_count;
- link->audio_test_data.pattern_type = test_pattern;
-
- if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) {
- for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) {
- link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period;
- }
- }
-}
-
-void dc_link_dp_handle_automated_test(struct dc_link *link)
-{
- union test_request test_request;
- union test_response test_response;
-
- memset(&test_request, 0, sizeof(test_request));
- memset(&test_response, 0, sizeof(test_response));
-
- core_link_read_dpcd(
- link,
- DP_TEST_REQUEST,
- &test_request.raw,
- sizeof(union test_request));
- if (test_request.bits.LINK_TRAINING) {
- /* ACK first to let DP RX test box monitor LT sequence */
- test_response.bits.ACK = 1;
- core_link_write_dpcd(
- link,
- DP_TEST_RESPONSE,
- &test_response.raw,
- sizeof(test_response));
- dp_test_send_link_training(link);
- /* no acknowledge request is needed again */
- test_response.bits.ACK = 0;
- }
- if (test_request.bits.LINK_TEST_PATTRN) {
- dp_test_send_link_test_pattern(link);
- test_response.bits.ACK = 1;
- }
-
- if (test_request.bits.AUDIO_TEST_PATTERN) {
- dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO);
- test_response.bits.ACK = 1;
- }
-
- if (test_request.bits.PHY_TEST_PATTERN) {
- dp_test_send_phy_test_pattern(link);
- test_response.bits.ACK = 1;
- }
-
- /* send request acknowledgment */
- if (test_response.bits.ACK)
- core_link_write_dpcd(
- link,
- DP_TEST_RESPONSE,
- &test_response.raw,
- sizeof(test_response));
-}
-
-void dc_link_dp_handle_link_loss(struct dc_link *link)
-{
- int i;
- struct pipe_ctx *pipe_ctx;
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
- break;
- }
-
- if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
- return;
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
- core_link_disable_stream(pipe_ctx);
- }
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off
- && pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
- // Always use max settings here for DP 1.4a LL Compliance CTS
- if (link->is_automated) {
- pipe_ctx->link_config.dp_link_settings.lane_count =
- link->verified_link_cap.lane_count;
- pipe_ctx->link_config.dp_link_settings.link_rate =
- link->verified_link_cap.link_rate;
- pipe_ctx->link_config.dp_link_settings.link_spread =
- link->verified_link_cap.link_spread;
- }
- core_link_enable_stream(link->dc->current_state, pipe_ctx);
- }
- }
-}
-
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
- bool defer_handling, bool *has_left_work)
-{
- union hpd_irq_data hpd_irq_dpcd_data = {0};
- union device_service_irq device_service_clear = {0};
- enum dc_status result;
- bool status = false;
-
- if (out_link_loss)
- *out_link_loss = false;
-
- if (has_left_work)
- *has_left_work = false;
- /* For use cases related to down stream connection status change,
- * PSR and device auto test, refer to function handle_sst_hpd_irq
- * in DAL2.1*/
-
- DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n",
- __func__, link->link_index);
-
-
- /* All the "handle_hpd_irq_xxx()" methods
- * should be called only after
- * dal_dpsst_ls_read_hpd_irq_data
- * Order of calls is important too
- */
- result = read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
- if (out_hpd_irq_dpcd_data)
- *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
-
- if (result != DC_OK) {
- DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n",
- __func__);
- return false;
- }
-
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
- // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
- link->is_automated = true;
- device_service_clear.bits.AUTOMATED_TEST = 1;
- core_link_write_dpcd(
- link,
- DP_DEVICE_SERVICE_IRQ_VECTOR,
- &device_service_clear.raw,
- sizeof(device_service_clear.raw));
- device_service_clear.raw = 0;
- if (defer_handling && has_left_work)
- *has_left_work = true;
- else
- dc_link_dp_handle_automated_test(link);
- return false;
- }
-
- if (!dc_link_dp_allow_hpd_rx_irq(link)) {
- DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
- __func__, link->link_index);
- return false;
- }
-
- if (handle_hpd_irq_psr_sink(link))
- /* PSR-related error was detected and handled */
- return true;
-
- /* If PSR-related error handled, Main link may be off,
- * so do not handle as a normal sink status change interrupt.
- */
-
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
- if (defer_handling && has_left_work)
- *has_left_work = true;
- return true;
- }
-
- /* check if we have MST msg and return since we poll for it */
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
- if (defer_handling && has_left_work)
- *has_left_work = true;
- return false;
- }
-
- /* For now we only handle 'Downstream port status' case.
- * If we got sink count changed it means
- * Downstream port status changed,
- * then DM should call DC to do the detection.
- * NOTE: Do not handle link loss on eDP since it is internal link*/
- if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
- hpd_rx_irq_check_link_loss_status(
- link,
- &hpd_irq_dpcd_data)) {
- /* Connectivity log: link loss */
- CONN_DATA_LINK_LOSS(link,
- hpd_irq_dpcd_data.raw,
- sizeof(hpd_irq_dpcd_data),
- "Status: ");
-
- if (defer_handling && has_left_work)
- *has_left_work = true;
- else
- dc_link_dp_handle_link_loss(link);
-
- status = false;
- if (out_link_loss)
- *out_link_loss = true;
-
- dp_trace_link_loss_increment(link);
- }
-
- if (link->type == dc_connection_sst_branch &&
- hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
- != link->dpcd_sink_count)
- status = true;
-
- /* reasons for HPD RX:
- * 1. Link Loss - ie Re-train the Link
- * 2. MST sideband message
- * 3. Automated Test - ie. Internal Commit
- * 4. CP (copy protection) - (not interesting for DM???)
- * 5. DRR
- * 6. Downstream Port status changed
- * -ie. Detect - this the only one
- * which is interesting for DM because
- * it must call dc_link_detect.
- */
- return status;
-}
-
-/*query dpcd for version and mst cap addresses*/
-bool is_mst_supported(struct dc_link *link)
-{
- bool mst = false;
- enum dc_status st = DC_OK;
- union dpcd_rev rev;
- union mstm_cap cap;
-
- if (link->preferred_training_settings.mst_enable &&
- *link->preferred_training_settings.mst_enable == false) {
- return false;
- }
-
- rev.raw = 0;
- cap.raw = 0;
-
- st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
- sizeof(rev));
-
- if (st == DC_OK && rev.raw >= DPCD_REV_12) {
-
- st = core_link_read_dpcd(link, DP_MSTM_CAP,
- &cap.raw, sizeof(cap));
- if (st == DC_OK && cap.bits.MST_CAP == 1)
- mst = true;
- }
- return mst;
-
-}
-
-bool is_dp_active_dongle(const struct dc_link *link)
-{
- return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) &&
- (link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER);
-}
-
-bool is_dp_branch_device(const struct dc_link *link)
-{
- return link->dpcd_caps.is_branch_dev;
-}
-
-static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
-{
- switch (bpc) {
- case DOWN_STREAM_MAX_8BPC:
- return 8;
- case DOWN_STREAM_MAX_10BPC:
- return 10;
- case DOWN_STREAM_MAX_12BPC:
- return 12;
- case DOWN_STREAM_MAX_16BPC:
- return 16;
- default:
- break;
- }
-
- return -1;
-}
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
-{
- switch (bw) {
- case 0b001:
- return 9000000;
- case 0b010:
- return 18000000;
- case 0b011:
- return 24000000;
- case 0b100:
- return 32000000;
- case 0b101:
- return 40000000;
- case 0b110:
- return 48000000;
- }
-
- return 0;
-}
-
-/*
- * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw.
- */
-static uint32_t intersect_frl_link_bw_support(
- const uint32_t max_supported_frl_bw_in_kbps,
- const union hdmi_encoded_link_bw hdmi_encoded_link_bw)
-{
- uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
-
- // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode)
- if (hdmi_encoded_link_bw.bits.FRL_MODE) {
- if (hdmi_encoded_link_bw.bits.BW_48Gbps)
- supported_bw_in_kbps = 48000000;
- else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
- supported_bw_in_kbps = 40000000;
- else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
- supported_bw_in_kbps = 32000000;
- else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
- supported_bw_in_kbps = 24000000;
- else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
- supported_bw_in_kbps = 18000000;
- else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
- supported_bw_in_kbps = 9000000;
- }
-
- return supported_bw_in_kbps;
-}
-#endif
-
-static void read_dp_device_vendor_id(struct dc_link *link)
-{
- struct dp_device_vendor_id dp_id;
-
- /* read IEEE branch device id */
- core_link_read_dpcd(
- link,
- DP_BRANCH_OUI,
- (uint8_t *)&dp_id,
- sizeof(dp_id));
-
- link->dpcd_caps.branch_dev_id =
- (dp_id.ieee_oui[0] << 16) +
- (dp_id.ieee_oui[1] << 8) +
- dp_id.ieee_oui[2];
-
- memmove(
- link->dpcd_caps.branch_dev_name,
- dp_id.ieee_device_id,
- sizeof(dp_id.ieee_device_id));
-}
-
-
-
-static void get_active_converter_info(
- uint8_t data, struct dc_link *link)
-{
- union dp_downstream_port_present ds_port = { .byte = data };
- memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
-
- /* decode converter info*/
- if (!ds_port.fields.PORT_PRESENT) {
- link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
- ddc_service_set_dongle_type(link->ddc,
- link->dpcd_caps.dongle_type);
- link->dpcd_caps.is_branch_dev = false;
- return;
- }
-
- /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
- link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
-
- switch (ds_port.fields.PORT_TYPE) {
- case DOWNSTREAM_VGA:
- link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
- break;
- case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS:
- /* At this point we don't know is it DVI or HDMI or DP++,
- * assume DVI.*/
- link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
- break;
- default:
- link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
- break;
- }
-
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
- uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
- union dwnstream_port_caps_byte0 *port_caps =
- (union dwnstream_port_caps_byte0 *)det_caps;
- if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
- det_caps, sizeof(det_caps)) == DC_OK) {
-
- switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
- /*Handle DP case as DONGLE_NONE*/
- case DOWN_STREAM_DETAILED_DP:
- link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
- break;
- case DOWN_STREAM_DETAILED_VGA:
- link->dpcd_caps.dongle_type =
- DISPLAY_DONGLE_DP_VGA_CONVERTER;
- break;
- case DOWN_STREAM_DETAILED_DVI:
- link->dpcd_caps.dongle_type =
- DISPLAY_DONGLE_DP_DVI_CONVERTER;
- break;
- case DOWN_STREAM_DETAILED_HDMI:
- case DOWN_STREAM_DETAILED_DP_PLUS_PLUS:
- /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/
- link->dpcd_caps.dongle_type =
- DISPLAY_DONGLE_DP_HDMI_CONVERTER;
-
- link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type;
- if (ds_port.fields.DETAILED_CAPS) {
-
- union dwnstream_port_caps_byte3_hdmi
- hdmi_caps = {.raw = det_caps[3] };
- union dwnstream_port_caps_byte2
- hdmi_color_caps = {.raw = det_caps[2] };
- link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz =
- det_caps[1] * 2500;
-
- link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
- hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
- /*YCBCR capability only for HDMI case*/
- if (port_caps->bits.DWN_STRM_PORTX_TYPE
- == DOWN_STREAM_DETAILED_HDMI) {
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
- hdmi_caps.bits.YCrCr422_PASS_THROUGH;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
- hdmi_caps.bits.YCrCr420_PASS_THROUGH;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
- hdmi_caps.bits.YCrCr422_CONVERSION;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
- hdmi_caps.bits.YCrCr420_CONVERSION;
- }
-
- link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
- translate_dpcd_max_bpc(
- hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (link->dc->caps.dp_hdmi21_pcon_support) {
- union hdmi_encoded_link_bw hdmi_encoded_link_bw;
-
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
- dc_link_bw_kbps_from_raw_frl_link_rate_data(
- hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT);
-
- // Intersect reported max link bw support with the supported link rate post FRL link training
- if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
- &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
- link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
- hdmi_encoded_link_bw);
- }
-
- if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0)
- link->dpcd_caps.dongle_caps.extendedCapValid = true;
- }
-#endif
-
- if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
- link->dpcd_caps.dongle_caps.extendedCapValid = true;
- }
-
- break;
- }
- }
- }
-
- ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
-
- {
- struct dp_sink_hw_fw_revision dp_hw_fw_revision;
-
- core_link_read_dpcd(
- link,
- DP_BRANCH_REVISION_START,
- (uint8_t *)&dp_hw_fw_revision,
- sizeof(dp_hw_fw_revision));
-
- link->dpcd_caps.branch_hw_revision =
- dp_hw_fw_revision.ieee_hw_rev;
-
- memmove(
- link->dpcd_caps.branch_fw_revision,
- dp_hw_fw_revision.ieee_fw_rev,
- sizeof(dp_hw_fw_revision.ieee_fw_rev));
- }
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
- link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
- union dp_dfp_cap_ext dfp_cap_ext;
- memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext));
- core_link_read_dpcd(
- link,
- DP_DFP_CAPABILITY_EXTENSION_SUPPORT,
- dfp_cap_ext.raw,
- sizeof(dfp_cap_ext.raw));
- link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported;
- link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps =
- dfp_cap_ext.fields.max_pixel_rate_in_mps[0] +
- (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8);
- link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width =
- dfp_cap_ext.fields.max_video_h_active_width[0] +
- (dfp_cap_ext.fields.max_video_h_active_width[1] << 8);
- link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height =
- dfp_cap_ext.fields.max_video_v_active_height[0] +
- (dfp_cap_ext.fields.max_video_v_active_height[1] << 8);
- link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps =
- dfp_cap_ext.fields.encoding_format_caps;
- link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps =
- dfp_cap_ext.fields.rgb_color_depth_caps;
- link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps =
- dfp_cap_ext.fields.ycbcr444_color_depth_caps;
- link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps =
- dfp_cap_ext.fields.ycbcr422_color_depth_caps;
- link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps =
- dfp_cap_ext.fields.ycbcr420_color_depth_caps;
- DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index);
- DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false");
- DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps);
- DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width);
- DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height);
- }
-}
-
-static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
- int length)
-{
- int retry = 0;
-
- if (!link->dpcd_caps.dpcd_rev.raw) {
- do {
- dp_receiver_power_ctrl(link, true);
- core_link_read_dpcd(link, DP_DPCD_REV,
- dpcd_data, length);
- link->dpcd_caps.dpcd_rev.raw = dpcd_data[
- DP_DPCD_REV -
- DP_DPCD_REV];
- } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
- }
-
- if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
- switch (link->dpcd_caps.branch_dev_id) {
- /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down
- * all internal circuits including AUX communication preventing
- * reading DPCD table and EDID (spec violation).
- * Encoder will skip DP RX power down on disable_output to
- * keep receiver powered all the time.*/
- case DP_BRANCH_DEVICE_ID_0010FA:
- case DP_BRANCH_DEVICE_ID_0080E1:
- case DP_BRANCH_DEVICE_ID_00E04C:
- link->wa_flags.dp_keep_receiver_powered = true;
- break;
-
- /* TODO: May need work around for other dongles. */
- default:
- link->wa_flags.dp_keep_receiver_powered = false;
- break;
- }
- } else
- link->wa_flags.dp_keep_receiver_powered = false;
-}
-
-/* Read additional sink caps defined in source specific DPCD area
- * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP)
- */
-static bool dpcd_read_sink_ext_caps(struct dc_link *link)
-{
- uint8_t dpcd_data;
-
- if (!link)
- return false;
-
- if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK)
- return false;
-
- link->dpcd_sink_ext_caps.raw = dpcd_data;
- return true;
-}
-
-enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
-{
- uint8_t lttpr_dpcd_data[8];
- enum dc_status status = DC_ERROR_UNEXPECTED;
- bool is_lttpr_present = false;
-
- /* Logic to determine LTTPR support*/
- bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
-
- if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support)
- return false;
-
- /* By reading LTTPR capability, RX assumes that we will enable
- * LTTPR extended aux timeout if LTTPR is present.
- */
- status = core_link_read_dpcd(link,
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
- lttpr_dpcd_data,
- sizeof(lttpr_dpcd_data));
-
- link->dpcd_caps.lttpr_caps.revision.raw =
- lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_link_rate =
- lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
- lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_lane_count =
- lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.mode =
- lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.max_ext_timeout =
- lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
- link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
- lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
- lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
-
- /* If this chip cap is set, at least one retimer must exist in the chain
- * Override count to 1 if we receive a known bad count (0 or an invalid value)
- */
- if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
- (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
- ASSERT(0);
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
- DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- }
-
- /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
- is_lttpr_present = dp_is_lttpr_present(link);
-
- if (is_lttpr_present)
- CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
-
- DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
- return status;
-}
-
-bool dp_is_lttpr_present(struct dc_link *link)
-{
- return (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
- link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
- link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
-}
-
-enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting)
-{
- enum dp_link_encoding encoding = dp_get_link_encoding_format(link_setting);
-
- if (encoding == DP_8b_10b_ENCODING)
- return dp_decide_8b_10b_lttpr_mode(link);
- else if (encoding == DP_128b_132b_ENCODING)
- return dp_decide_128b_132b_lttpr_mode(link);
-
- ASSERT(0);
- return LTTPR_MODE_NON_LTTPR;
-}
-
-void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
-{
- if (!dp_is_lttpr_present(link))
- return;
-
- if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) {
- *override = LTTPR_MODE_TRANSPARENT;
- } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) {
- *override = LTTPR_MODE_NON_TRANSPARENT;
- } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
- *override = LTTPR_MODE_NON_LTTPR;
- }
- DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override));
-}
-
-enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
-{
- bool is_lttpr_present = dp_is_lttpr_present(link);
- bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable;
- bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware;
-
- if (!is_lttpr_present)
- return LTTPR_MODE_NON_LTTPR;
-
- if (vbios_lttpr_aware) {
- if (vbios_lttpr_force_non_transparent) {
- DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
- return LTTPR_MODE_NON_TRANSPARENT;
- } else {
- DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
- return LTTPR_MODE_TRANSPARENT;
- }
- }
-
- if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
- link->dc->caps.extended_aux_timeout_support) {
- DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n");
- return LTTPR_MODE_NON_TRANSPARENT;
- }
-
- DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n");
- return LTTPR_MODE_NON_LTTPR;
-}
-
-enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
-{
- enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR;
-
- if (dp_is_lttpr_present(link))
- mode = LTTPR_MODE_NON_TRANSPARENT;
-
- DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode);
- return mode;
-}
-
-static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
-{
- union dmub_rb_cmd cmd;
-
- if (!link->ctx->dmub_srv ||
- link->ep_type != DISPLAY_ENDPOINT_PHY ||
- link->link_enc->features.flags.bits.DP_IS_USB_C == 0)
- return false;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID;
- cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data);
- cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
- link->dc, link->link_enc->transmitter);
- if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
- cmd.cable_id.header.ret_status == 1) {
- cable_id->raw = cmd.cable_id.data.output_raw;
- DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
- }
- return cmd.cable_id.header.ret_status == 1;
-}
-
-static union dp_cable_id intersect_cable_id(
- union dp_cable_id *a, union dp_cable_id *b)
-{
- union dp_cable_id out;
-
- out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY,
- b->bits.UHBR10_20_CAPABILITY);
- out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY,
- b->bits.UHBR13_5_CAPABILITY);
- out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE);
-
- return out;
-}
-
-static void retrieve_cable_id(struct dc_link *link)
-{
- union dp_cable_id usbc_cable_id;
-
- link->dpcd_caps.cable_id.raw = 0;
- core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX,
- &link->dpcd_caps.cable_id.raw, sizeof(uint8_t));
-
- if (get_usbc_cable_id(link, &usbc_cable_id))
- link->dpcd_caps.cable_id = intersect_cable_id(
- &link->dpcd_caps.cable_id, &usbc_cable_id);
-}
-
-static enum dc_status wake_up_aux_channel(struct dc_link *link)
-{
- enum dc_status status = DC_ERROR_UNEXPECTED;
- uint32_t aux_channel_retry_cnt = 0;
- uint8_t dpcd_power_state = '\0';
-
- while (status != DC_OK && aux_channel_retry_cnt < 10) {
- status = core_link_read_dpcd(link, DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
-
- /* Delay 1 ms if AUX CH is in power down state. Based on spec
- * section 2.3.1.2, if AUX CH may be powered down due to
- * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
- * signal and may need up to 1 ms before being able to reply.
- */
- if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) {
- udelay(1000);
- aux_channel_retry_cnt++;
- }
- }
-
- if (status != DC_OK) {
- dpcd_power_state = DP_SET_POWER_D0;
- status = core_link_write_dpcd(
- link,
- DP_SET_POWER,
- &dpcd_power_state,
- sizeof(dpcd_power_state));
-
- dpcd_power_state = DP_SET_POWER_D3;
- status = core_link_write_dpcd(
- link,
- DP_SET_POWER,
- &dpcd_power_state,
- sizeof(dpcd_power_state));
- return DC_ERROR_UNEXPECTED;
- }
-
- return DC_OK;
-}
-
-static bool retrieve_link_cap(struct dc_link *link)
-{
- /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
- * which means size 16 will be good for both of those DPCD register block reads
- */
- uint8_t dpcd_data[16];
- /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
- */
- uint8_t dpcd_dprx_data = '\0';
-
- struct dp_device_vendor_id sink_id;
- union down_stream_port_count down_strm_port_count;
- union edp_configuration_cap edp_config_cap;
- union dp_downstream_port_present ds_port = { 0 };
- enum dc_status status = DC_ERROR_UNEXPECTED;
- uint32_t read_dpcd_retry_cnt = 3;
- int i;
- struct dp_sink_hw_fw_revision dp_hw_fw_revision;
- const uint32_t post_oui_delay = 30; // 30ms
-
- memset(dpcd_data, '\0', sizeof(dpcd_data));
- memset(&down_strm_port_count,
- '\0', sizeof(union down_stream_port_count));
- memset(&edp_config_cap, '\0',
- sizeof(union edp_configuration_cap));
-
- /* if extended timeout is supported in hardware,
- * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
- * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
- */
- dc_link_aux_try_to_configure_timeout(link->ddc,
- LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
-
- status = dp_retrieve_lttpr_cap(link);
-
- if (status != DC_OK) {
- status = wake_up_aux_channel(link);
- if (status == DC_OK)
- dp_retrieve_lttpr_cap(link);
- else
- return false;
- }
-
- if (dp_is_lttpr_present(link))
- configure_lttpr_mode_transparent(link);
-
- /* Read DP tunneling information. */
- status = dpcd_get_tunneling_device_data(link);
-
- dpcd_set_source_specific_data(link);
- /* Sink may need to configure internals based on vendor, so allow some
- * time before proceeding with possibly vendor specific transactions
- */
- msleep(post_oui_delay);
-
- for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_DPCD_REV,
- dpcd_data,
- sizeof(dpcd_data));
- if (status == DC_OK)
- break;
- }
-
- if (status != DC_OK) {
- dm_error("%s: Read receiver caps dpcd data failed.\n", __func__);
- return false;
- }
-
- if (!dp_is_lttpr_present(link))
- dc_link_aux_try_to_configure_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
-
- {
- union training_aux_rd_interval aux_rd_interval;
-
- aux_rd_interval.raw =
- dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
-
- link->dpcd_caps.ext_receiver_cap_field_present =
- aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1;
-
- if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
- uint8_t ext_cap_data[16];
-
- memset(ext_cap_data, '\0', sizeof(ext_cap_data));
- for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_DP13_DPCD_REV,
- ext_cap_data,
- sizeof(ext_cap_data));
- if (status == DC_OK) {
- memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data));
- break;
- }
- }
- if (status != DC_OK)
- dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__);
- }
- }
-
- link->dpcd_caps.dpcd_rev.raw =
- dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
-
- if (link->dpcd_caps.ext_receiver_cap_field_present) {
- for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_DPRX_FEATURE_ENUMERATION_LIST,
- &dpcd_dprx_data,
- sizeof(dpcd_dprx_data));
- if (status == DC_OK)
- break;
- }
-
- link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
-
- if (status != DC_OK)
- dm_error("%s: Read DPRX caps data failed.\n", __func__);
- }
-
- else {
- link->dpcd_caps.dprx_feature.raw = 0;
- }
-
-
- /* Error condition checking...
- * It is impossible for Sink to report Max Lane Count = 0.
- * It is possible for Sink to report Max Link Rate = 0, if it is
- * an eDP device that is reporting specialized link rates in the
- * SUPPORTED_LINK_RATE table.
- */
- if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
- return false;
-
- ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
- DP_DPCD_REV];
-
- read_dp_device_vendor_id(link);
-
- /* TODO - decouple raw mst capability from policy decision */
- link->dpcd_caps.is_mst_capable = is_mst_supported(link);
-
- get_active_converter_info(ds_port.byte, link);
-
- dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
-
- down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
- DP_DPCD_REV];
-
- link->dpcd_caps.allow_invalid_MSA_timing_param =
- down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
-
- link->dpcd_caps.max_ln_count.raw = dpcd_data[
- DP_MAX_LANE_COUNT - DP_DPCD_REV];
-
- link->dpcd_caps.max_down_spread.raw = dpcd_data[
- DP_MAX_DOWNSPREAD - DP_DPCD_REV];
-
- link->reported_link_cap.lane_count =
- link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
- link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw(
- dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]);
- link->reported_link_cap.link_spread =
- link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
- LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
-
- edp_config_cap.raw = dpcd_data[
- DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
- link->dpcd_caps.panel_mode_edp =
- edp_config_cap.bits.ALT_SCRAMBLER_RESET;
- link->dpcd_caps.dpcd_display_control_capable =
- edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
- link->dpcd_caps.channel_coding_cap.raw =
- dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV];
- link->test_pattern_enabled = false;
- link->compliance_test_state.raw = 0;
-
- /* read sink count */
- core_link_read_dpcd(link,
- DP_SINK_COUNT,
- &link->dpcd_caps.sink_count.raw,
- sizeof(link->dpcd_caps.sink_count.raw));
-
- /* read sink ieee oui */
- core_link_read_dpcd(link,
- DP_SINK_OUI,
- (uint8_t *)(&sink_id),
- sizeof(sink_id));
-
- link->dpcd_caps.sink_dev_id =
- (sink_id.ieee_oui[0] << 16) +
- (sink_id.ieee_oui[1] << 8) +
- (sink_id.ieee_oui[2]);
-
- memmove(
- link->dpcd_caps.sink_dev_id_str,
- sink_id.ieee_device_id,
- sizeof(sink_id.ieee_device_id));
-
- /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
- {
- uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
-
- if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
- !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
- sizeof(str_mbp_2017))) {
- link->reported_link_cap.link_rate = 0x0c;
- }
- }
-
- core_link_read_dpcd(
- link,
- DP_SINK_HW_REVISION_START,
- (uint8_t *)&dp_hw_fw_revision,
- sizeof(dp_hw_fw_revision));
-
- link->dpcd_caps.sink_hw_revision =
- dp_hw_fw_revision.ieee_hw_rev;
-
- memmove(
- link->dpcd_caps.sink_fw_revision,
- dp_hw_fw_revision.ieee_fw_rev,
- sizeof(dp_hw_fw_revision.ieee_fw_rev));
-
- /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */
- {
- uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
- uint8_t fwrev_mbp_2018[] = { 7, 4 };
- uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
-
- /* We also check for the firmware revision as 16,1 models have an
- * identical device id and are incorrectly quirked otherwise.
- */
- if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
- !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
- sizeof(str_mbp_2018)) &&
- (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
- sizeof(fwrev_mbp_2018)) ||
- !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
- sizeof(fwrev_mbp_2018_vega)))) {
- link->reported_link_cap.link_rate = LINK_RATE_RBR2;
- }
- }
-
- memset(&link->dpcd_caps.dsc_caps, '\0',
- sizeof(link->dpcd_caps.dsc_caps));
- memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
- /* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) {
- status = core_link_read_dpcd(
- link,
- DP_FEC_CAPABILITY,
- &link->dpcd_caps.fec_cap.raw,
- sizeof(link->dpcd_caps.fec_cap.raw));
- status = core_link_read_dpcd(
- link,
- DP_DSC_SUPPORT,
- link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
- sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw));
- if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
- status = core_link_read_dpcd(
- link,
- DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
- link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
- sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
- DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index);
- DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x",
- link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0);
- DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x",
- link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1);
- DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x",
- link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH);
- }
-
- /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode
- * only if required.
- */
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
- link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around &&
- link->dpcd_caps.is_branch_dev &&
- link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
- link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 &&
- (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE ||
- link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) {
- /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA.
- * Clear FEC and DSC capabilities as a work around if that is not the case.
- */
- link->wa_flags.dpia_forced_tbt3_mode = true;
- memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps));
- memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
- DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index);
- } else
- link->wa_flags.dpia_forced_tbt3_mode = false;
- }
-
- if (!dpcd_read_sink_ext_caps(link))
- link->dpcd_sink_ext_caps.raw = 0;
-
- if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
- DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index);
-
- core_link_read_dpcd(link,
- DP_128b_132b_SUPPORTED_LINK_RATES,
- &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw,
- sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw));
- if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20)
- link->reported_link_cap.link_rate = LINK_RATE_UHBR20;
- else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5)
- link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5;
- else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10)
- link->reported_link_cap.link_rate = LINK_RATE_UHBR10;
- else
- dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__);
- DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index);
- DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz",
- link->reported_link_cap.link_rate / 100,
- link->reported_link_cap.link_rate % 100);
-
- core_link_read_dpcd(link,
- DP_SINK_VIDEO_FALLBACK_FORMATS,
- &link->dpcd_caps.fallback_formats.raw,
- sizeof(link->dpcd_caps.fallback_formats.raw));
- DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index);
- if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support)
- DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported");
- if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support)
- DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported");
- if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support)
- DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported");
- if (link->dpcd_caps.fallback_formats.raw == 0) {
- DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported");
- link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1;
- }
-
- core_link_read_dpcd(link,
- DP_FEC_CAPABILITY_1,
- &link->dpcd_caps.fec_cap1.raw,
- sizeof(link->dpcd_caps.fec_cap1.raw));
- DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index);
- if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE)
- DC_LOG_DP2("\tFEC aggregated error counters are supported");
- }
-
- retrieve_cable_id(link);
- dpcd_write_cable_id_to_dprx(link);
-
- /* Connectivity log: detection */
- CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
-
- return true;
-}
-
-bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
-{
- uint8_t dpcd_data[16];
- uint32_t read_dpcd_retry_cnt = 3;
- enum dc_status status = DC_ERROR_UNEXPECTED;
- union dp_downstream_port_present ds_port = { 0 };
- union down_stream_port_count down_strm_port_count;
- union edp_configuration_cap edp_config_cap;
-
- int i;
-
- for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_DPCD_REV,
- dpcd_data,
- sizeof(dpcd_data));
- if (status == DC_OK)
- break;
- }
-
- link->dpcd_caps.dpcd_rev.raw =
- dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
-
- if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
- return false;
-
- ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
- DP_DPCD_REV];
-
- get_active_converter_info(ds_port.byte, link);
-
- down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
- DP_DPCD_REV];
-
- link->dpcd_caps.allow_invalid_MSA_timing_param =
- down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
-
- link->dpcd_caps.max_ln_count.raw = dpcd_data[
- DP_MAX_LANE_COUNT - DP_DPCD_REV];
-
- link->dpcd_caps.max_down_spread.raw = dpcd_data[
- DP_MAX_DOWNSPREAD - DP_DPCD_REV];
-
- link->reported_link_cap.lane_count =
- link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
- link->reported_link_cap.link_rate = dpcd_data[
- DP_MAX_LINK_RATE - DP_DPCD_REV];
- link->reported_link_cap.link_spread =
- link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
- LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
-
- edp_config_cap.raw = dpcd_data[
- DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
- link->dpcd_caps.panel_mode_edp =
- edp_config_cap.bits.ALT_SCRAMBLER_RESET;
- link->dpcd_caps.dpcd_display_control_capable =
- edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
-
- return true;
-}
-
-bool detect_dp_sink_caps(struct dc_link *link)
-{
- return retrieve_link_cap(link);
-}
-
-static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
-{
- enum dc_link_rate link_rate;
- // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation.
- switch (link_rate_in_khz) {
- case 1620000:
- link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane
- break;
- case 2160000:
- link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane
- break;
- case 2430000:
- link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane
- break;
- case 2700000:
- link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane
- break;
- case 3240000:
- link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2) - 3.24 Gbps/Lane
- break;
- case 4320000:
- link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane
- break;
- case 5400000:
- link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2) - 5.40 Gbps/Lane
- break;
- case 8100000:
- link_rate = LINK_RATE_HIGH3; // Rate_8 (HBR3) - 8.10 Gbps/Lane
- break;
- default:
- link_rate = LINK_RATE_UNKNOWN;
- break;
- }
- return link_rate;
-}
-
-void detect_edp_sink_caps(struct dc_link *link)
-{
- uint8_t supported_link_rates[16];
- uint32_t entry;
- uint32_t link_rate_in_khz;
- enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
- uint8_t backlight_adj_cap;
- uint8_t general_edp_cap;
-
- retrieve_link_cap(link);
- link->dpcd_caps.edp_supported_link_rates_count = 0;
- memset(supported_link_rates, 0, sizeof(supported_link_rates));
-
- /*
- * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
- * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
- */
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
- (link->panel_config.ilr.optimize_edp_link_rate ||
- link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
- // Read DPCD 00010h - 0001Fh 16 bytes at one shot
- core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
- supported_link_rates, sizeof(supported_link_rates));
-
- for (entry = 0; entry < 16; entry += 2) {
- // DPCD register reports per-lane link rate = 16-bit link rate capability
- // value X 200 kHz. Need multiplier to find link rate in kHz.
- link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
- supported_link_rates[entry]) * 200;
-
- if (link_rate_in_khz != 0) {
- link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
- link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
- link->dpcd_caps.edp_supported_link_rates_count++;
-
- if (link->reported_link_cap.link_rate < link_rate)
- link->reported_link_cap.link_rate = link_rate;
- }
- }
- }
- core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP,
- &backlight_adj_cap, sizeof(backlight_adj_cap));
-
- link->dpcd_caps.dynamic_backlight_capable_edp =
- (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false;
-
- core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1,
- &general_edp_cap, sizeof(general_edp_cap));
-
- link->dpcd_caps.set_power_state_capable_edp =
- (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false;
-
- dc_link_set_default_brightness_aux(link);
-
- core_link_read_dpcd(link, DP_EDP_DPCD_REV,
- &link->dpcd_caps.edp_rev,
- sizeof(link->dpcd_caps.edp_rev));
- /*
- * PSR is only valid for eDP v1.3 or higher.
- */
- if (link->dpcd_caps.edp_rev >= DP_EDP_13) {
- core_link_read_dpcd(link, DP_PSR_SUPPORT,
- &link->dpcd_caps.psr_info.psr_version,
- sizeof(link->dpcd_caps.psr_info.psr_version));
- if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
- core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY,
- &link->dpcd_caps.psr_info.force_psrsu_cap,
- sizeof(link->dpcd_caps.psr_info.force_psrsu_cap));
- core_link_read_dpcd(link, DP_PSR_CAPS,
- &link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
- sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw));
- if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) {
- core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY,
- &link->dpcd_caps.psr_info.psr2_su_y_granularity_cap,
- sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap));
- }
- }
-
- /*
- * ALPM is only valid for eDP v1.4 or higher.
- */
- if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14)
- core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP,
- &link->dpcd_caps.alpm_caps.raw,
- sizeof(link->dpcd_caps.alpm_caps.raw));
-}
-
-void dc_link_dp_enable_hpd(const struct dc_link *link)
-{
- struct link_encoder *encoder = link->link_enc;
-
- if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
- encoder->funcs->enable_hpd(encoder);
-}
-
-void dc_link_dp_disable_hpd(const struct dc_link *link)
-{
- struct link_encoder *encoder = link->link_enc;
-
- if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
- encoder->funcs->disable_hpd(encoder);
-}
-
-static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
-{
- if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
- test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
- test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
- return true;
- else
- return false;
-}
-
-static void set_crtc_test_pattern(struct dc_link *link,
- struct pipe_ctx *pipe_ctx,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space)
-{
- enum controller_dp_test_pattern controller_test_pattern;
- enum dc_color_depth color_depth = pipe_ctx->
- stream->timing.display_color_depth;
- struct bit_depth_reduction_params params;
- struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
- int width = pipe_ctx->stream->timing.h_addressable +
- pipe_ctx->stream->timing.h_border_left +
- pipe_ctx->stream->timing.h_border_right;
- int height = pipe_ctx->stream->timing.v_addressable +
- pipe_ctx->stream->timing.v_border_bottom +
- pipe_ctx->stream->timing.v_border_top;
-
- memset(&params, 0, sizeof(params));
-
- switch (test_pattern) {
- case DP_TEST_PATTERN_COLOR_SQUARES:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
- break;
- case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA;
- break;
- case DP_TEST_PATTERN_VERTICAL_BARS:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_VERTICALBARS;
- break;
- case DP_TEST_PATTERN_HORIZONTAL_BARS:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS;
- break;
- case DP_TEST_PATTERN_COLOR_RAMP:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_COLORRAMP;
- break;
- default:
- controller_test_pattern =
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
- break;
- }
-
- switch (test_pattern) {
- case DP_TEST_PATTERN_COLOR_SQUARES:
- case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
- case DP_TEST_PATTERN_VERTICAL_BARS:
- case DP_TEST_PATTERN_HORIZONTAL_BARS:
- case DP_TEST_PATTERN_COLOR_RAMP:
- {
- /* disable bit depth reduction */
- pipe_ctx->stream->bit_depth_params = params;
- opp->funcs->opp_program_bit_depth_reduction(opp, &params);
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- controller_test_pattern, color_depth);
- else if (link->dc->hwss.set_disp_pattern_generator) {
- struct pipe_ctx *odm_pipe;
- enum controller_dp_color_space controller_color_space;
- int opp_cnt = 1;
- int offset = 0;
- int dpg_width = width;
-
- switch (test_pattern_color_space) {
- case DP_TEST_PATTERN_COLOR_SPACE_RGB:
- controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
- break;
- case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
- controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601;
- break;
- case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
- controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709;
- break;
- case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED:
- default:
- controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
- DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__);
- ASSERT(0);
- break;
- }
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
- dpg_width = width / opp_cnt;
- offset = dpg_width;
-
- link->dc->hwss.set_disp_pattern_generator(link->dc,
- pipe_ctx,
- controller_test_pattern,
- controller_color_space,
- color_depth,
- NULL,
- dpg_width,
- height,
- 0);
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
- odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
- link->dc->hwss.set_disp_pattern_generator(link->dc,
- odm_pipe,
- controller_test_pattern,
- controller_color_space,
- color_depth,
- NULL,
- dpg_width,
- height,
- offset);
- offset += offset;
- }
- }
- }
- break;
- case DP_TEST_PATTERN_VIDEO_MODE:
- {
- /* restore bitdepth reduction */
- resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
- pipe_ctx->stream->bit_depth_params = params;
- opp->funcs->opp_program_bit_depth_reduction(opp, &params);
- if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
- pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- color_depth);
- else if (link->dc->hwss.set_disp_pattern_generator) {
- struct pipe_ctx *odm_pipe;
- int opp_cnt = 1;
- int dpg_width;
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
-
- dpg_width = width / opp_cnt;
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
- odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
- link->dc->hwss.set_disp_pattern_generator(link->dc,
- odm_pipe,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
- color_depth,
- NULL,
- dpg_width,
- height,
- 0);
- }
- link->dc->hwss.set_disp_pattern_generator(link->dc,
- pipe_ctx,
- CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
- CONTROLLER_DP_COLOR_SPACE_UDEFINED,
- color_depth,
- NULL,
- dpg_width,
- height,
- 0);
- }
- }
- break;
-
- default:
- break;
- }
-}
-
-bool dc_link_dp_set_test_pattern(
- struct dc_link *link,
- enum dp_test_pattern test_pattern,
- enum dp_test_pattern_color_space test_pattern_color_space,
- const struct link_training_settings *p_link_settings,
- const unsigned char *p_custom_pattern,
- unsigned int cust_pattern_size)
-{
- struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
- struct pipe_ctx *pipe_ctx = NULL;
- unsigned int lane;
- unsigned int i;
- unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
- union dpcd_training_pattern training_pattern;
- enum dpcd_phy_test_patterns pattern;
-
- memset(&training_pattern, 0, sizeof(training_pattern));
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (pipes[i].stream == NULL)
- continue;
-
- if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
- pipe_ctx = &pipes[i];
- break;
- }
- }
-
- if (pipe_ctx == NULL)
- return false;
-
- /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
- if (link->test_pattern_enabled && test_pattern ==
- DP_TEST_PATTERN_VIDEO_MODE) {
- /* Set CRTC Test Pattern */
- set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
- dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
- (uint8_t *)p_custom_pattern,
- (uint32_t)cust_pattern_size);
-
- /* Unblank Stream */
- link->dc->hwss.unblank_stream(
- pipe_ctx,
- &link->verified_link_cap);
- /* TODO:m_pHwss->MuteAudioEndpoint
- * (pPathMode->pDisplayPath, false);
- */
-
- /* Reset Test Pattern state */
- link->test_pattern_enabled = false;
-
- return true;
- }
-
- /* Check for PHY Test Patterns */
- if (is_dp_phy_pattern(test_pattern)) {
- /* Set DPCD Lane Settings before running test pattern */
- if (p_link_settings != NULL) {
- if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
- dp_fixed_vs_pe_set_retimer_lane_settings(
- link,
- p_link_settings->dpcd_lane_settings,
- p_link_settings->link_settings.lane_count);
- } else {
- dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX);
- }
- dpcd_set_lane_settings(link, p_link_settings, DPRX);
- }
-
- /* Blank stream if running test pattern */
- if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
- /*TODO:
- * m_pHwss->
- * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
- */
- /* Blank stream */
- pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
- }
-
- dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
- (uint8_t *)p_custom_pattern,
- (uint32_t)cust_pattern_size);
-
- if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
- /* Set Test Pattern state */
- link->test_pattern_enabled = true;
- if (p_link_settings != NULL)
- dpcd_set_link_settings(link,
- p_link_settings);
- }
-
- switch (test_pattern) {
- case DP_TEST_PATTERN_VIDEO_MODE:
- pattern = PHY_TEST_PATTERN_NONE;
- break;
- case DP_TEST_PATTERN_D102:
- pattern = PHY_TEST_PATTERN_D10_2;
- break;
- case DP_TEST_PATTERN_SYMBOL_ERROR:
- pattern = PHY_TEST_PATTERN_SYMBOL_ERROR;
- break;
- case DP_TEST_PATTERN_PRBS7:
- pattern = PHY_TEST_PATTERN_PRBS7;
- break;
- case DP_TEST_PATTERN_80BIT_CUSTOM:
- pattern = PHY_TEST_PATTERN_80BIT_CUSTOM;
- break;
- case DP_TEST_PATTERN_CP2520_1:
- pattern = PHY_TEST_PATTERN_CP2520_1;
- break;
- case DP_TEST_PATTERN_CP2520_2:
- pattern = PHY_TEST_PATTERN_CP2520_2;
- break;
- case DP_TEST_PATTERN_CP2520_3:
- pattern = PHY_TEST_PATTERN_CP2520_3;
- break;
- case DP_TEST_PATTERN_128b_132b_TPS1:
- pattern = PHY_TEST_PATTERN_128b_132b_TPS1;
- break;
- case DP_TEST_PATTERN_128b_132b_TPS2:
- pattern = PHY_TEST_PATTERN_128b_132b_TPS2;
- break;
- case DP_TEST_PATTERN_PRBS9:
- pattern = PHY_TEST_PATTERN_PRBS9;
- break;
- case DP_TEST_PATTERN_PRBS11:
- pattern = PHY_TEST_PATTERN_PRBS11;
- break;
- case DP_TEST_PATTERN_PRBS15:
- pattern = PHY_TEST_PATTERN_PRBS15;
- break;
- case DP_TEST_PATTERN_PRBS23:
- pattern = PHY_TEST_PATTERN_PRBS23;
- break;
- case DP_TEST_PATTERN_PRBS31:
- pattern = PHY_TEST_PATTERN_PRBS31;
- break;
- case DP_TEST_PATTERN_264BIT_CUSTOM:
- pattern = PHY_TEST_PATTERN_264BIT_CUSTOM;
- break;
- case DP_TEST_PATTERN_SQUARE_PULSE:
- pattern = PHY_TEST_PATTERN_SQUARE_PULSE;
- break;
- default:
- return false;
- }
-
- if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE
- /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/)
- return false;
-
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE)
- core_link_write_dpcd(link,
- DP_LINK_SQUARE_PATTERN,
- p_custom_pattern,
- 1);
-
-#endif
- /* tell receiver that we are sending qualification
- * pattern DP 1.2 or later - DP receiver's link quality
- * pattern is set using DPCD LINK_QUAL_LANEx_SET
- * register (0x10B~0x10E)\
- */
- for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++)
- link_qual_pattern[lane] =
- (unsigned char)(pattern);
-
- core_link_write_dpcd(link,
- DP_LINK_QUAL_LANE0_SET,
- link_qual_pattern,
- sizeof(link_qual_pattern));
- } else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 ||
- link->dpcd_caps.dpcd_rev.raw == 0) {
- /* tell receiver that we are sending qualification
- * pattern DP 1.1a or earlier - DP receiver's link
- * quality pattern is set using
- * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET
- * register (0x102). We will use v_1.3 when we are
- * setting test pattern for DP 1.1.
- */
- core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET,
- &training_pattern.raw,
- sizeof(training_pattern));
- training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern;
- core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET,
- &training_pattern.raw,
- sizeof(training_pattern));
- }
- } else {
- enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
-
- switch (test_pattern_color_space) {
- case DP_TEST_PATTERN_COLOR_SPACE_RGB:
- color_space = COLOR_SPACE_SRGB;
- if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
- color_space = COLOR_SPACE_SRGB_LIMITED;
- break;
-
- case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
- color_space = COLOR_SPACE_YCBCR601;
- if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
- color_space = COLOR_SPACE_YCBCR601_LIMITED;
- break;
- case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
- color_space = COLOR_SPACE_YCBCR709;
- if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
- color_space = COLOR_SPACE_YCBCR709_LIMITED;
- break;
- default:
- break;
- }
-
- if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
- if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
- union dmub_hw_lock_flags hw_locks = { 0 };
- struct dmub_hw_lock_inst_flags inst_flags = { 0 };
-
- hw_locks.bits.lock_dig = 1;
- inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
-
- dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
- true,
- &hw_locks,
- &inst_flags);
- } else
- pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
- pipe_ctx->stream_res.tg);
- }
-
- pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
- /* update MSA to requested color space */
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
- &pipe_ctx->stream->timing,
- color_space,
- pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
- link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
-
- if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) {
- if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
- pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range
- else
- pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7);
- resource_build_info_frame(pipe_ctx);
- link->dc->hwss.update_info_frame(pipe_ctx);
- }
-
- /* CRTC Patterns */
- set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
- pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
- CRTC_STATE_VACTIVE);
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
- CRTC_STATE_VBLANK);
- pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
- CRTC_STATE_VACTIVE);
-
- if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
- if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
- union dmub_hw_lock_flags hw_locks = { 0 };
- struct dmub_hw_lock_inst_flags inst_flags = { 0 };
-
- hw_locks.bits.lock_dig = 1;
- inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
-
- dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
- false,
- &hw_locks,
- &inst_flags);
- } else
- pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
- pipe_ctx->stream_res.tg);
- }
-
- /* Set Test Pattern state */
- link->test_pattern_enabled = true;
- }
-
- return true;
-}
-
-void dp_enable_mst_on_sink(struct dc_link *link, bool enable)
-{
- unsigned char mstmCntl;
-
- core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
- if (enable)
- mstmCntl |= DP_MST_EN;
- else
- mstmCntl &= (~DP_MST_EN);
-
- core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
-}
-
-void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
-{
- union dpcd_edp_config edp_config_set;
- bool panel_mode_edp = false;
-
- memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
-
- if (panel_mode != DP_PANEL_MODE_DEFAULT) {
-
- switch (panel_mode) {
- case DP_PANEL_MODE_EDP:
- case DP_PANEL_MODE_SPECIAL:
- panel_mode_edp = true;
- break;
-
- default:
- break;
- }
-
- /*set edp panel mode in receiver*/
- core_link_read_dpcd(
- link,
- DP_EDP_CONFIGURATION_SET,
- &edp_config_set.raw,
- sizeof(edp_config_set.raw));
-
- if (edp_config_set.bits.PANEL_MODE_EDP
- != panel_mode_edp) {
- enum dc_status result;
-
- edp_config_set.bits.PANEL_MODE_EDP =
- panel_mode_edp;
- result = core_link_write_dpcd(
- link,
- DP_EDP_CONFIGURATION_SET,
- &edp_config_set.raw,
- sizeof(edp_config_set.raw));
-
- ASSERT(result == DC_OK);
- }
- }
- DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
- "eDP panel mode enabled: %d \n",
- link->link_index,
- link->dpcd_caps.panel_mode_edp,
- panel_mode_edp);
-}
-
-enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
-{
- /* We need to explicitly check that connector
- * is not DP. Some Travis_VGA get reported
- * by video bios as DP.
- */
- if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
-
- switch (link->dpcd_caps.branch_dev_id) {
- case DP_BRANCH_DEVICE_ID_0022B9:
- /* alternate scrambler reset is required for Travis
- * for the case when external chip does not
- * provide sink device id, alternate scrambler
- * scheme will be overriden later by querying
- * Encoder features
- */
- if (strncmp(
- link->dpcd_caps.branch_dev_name,
- DP_VGA_LVDS_CONVERTER_ID_2,
- sizeof(
- link->dpcd_caps.
- branch_dev_name)) == 0) {
- return DP_PANEL_MODE_SPECIAL;
- }
- break;
- case DP_BRANCH_DEVICE_ID_00001A:
- /* alternate scrambler reset is required for Travis
- * for the case when external chip does not provide
- * sink device id, alternate scrambler scheme will
- * be overriden later by querying Encoder feature
- */
- if (strncmp(link->dpcd_caps.branch_dev_name,
- DP_VGA_LVDS_CONVERTER_ID_3,
- sizeof(
- link->dpcd_caps.
- branch_dev_name)) == 0) {
- return DP_PANEL_MODE_SPECIAL;
- }
- break;
- default:
- break;
- }
- }
-
- if (link->dpcd_caps.panel_mode_edp &&
- (link->connector_signal == SIGNAL_TYPE_EDP ||
- (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- link->is_internal_display))) {
- return DP_PANEL_MODE_EDP;
- }
-
- return DP_PANEL_MODE_DEFAULT;
-}
-
-enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready)
-{
- /* FEC has to be "set ready" before the link training.
- * The policy is to always train with FEC
- * if the sink supports it and leave it enabled on link.
- * If FEC is not supported, disable it.
- */
- struct link_encoder *link_enc = NULL;
- enum dc_status status = DC_OK;
- uint8_t fec_config = 0;
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- if (!dc_link_should_enable_fec(link))
- return status;
-
- if (link_enc->funcs->fec_set_ready &&
- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
- if (ready) {
- fec_config = 1;
- status = core_link_write_dpcd(link,
- DP_FEC_CONFIGURATION,
- &fec_config,
- sizeof(fec_config));
- if (status == DC_OK) {
- link_enc->funcs->fec_set_ready(link_enc, true);
- link->fec_state = dc_link_fec_ready;
- } else {
- link_enc->funcs->fec_set_ready(link_enc, false);
- link->fec_state = dc_link_fec_not_ready;
- dm_error("dpcd write failed to set fec_ready");
- }
- } else if (link->fec_state == dc_link_fec_ready) {
- fec_config = 0;
- status = core_link_write_dpcd(link,
- DP_FEC_CONFIGURATION,
- &fec_config,
- sizeof(fec_config));
- link_enc->funcs->fec_set_ready(link_enc, false);
- link->fec_state = dc_link_fec_not_ready;
- }
- }
-
- return status;
-}
-
-void dp_set_fec_enable(struct dc_link *link, bool enable)
-{
- struct link_encoder *link_enc = NULL;
-
- link_enc = link_enc_cfg_get_link_enc(link);
- ASSERT(link_enc);
-
- if (!dc_link_should_enable_fec(link))
- return;
-
- if (link_enc->funcs->fec_set_enable &&
- link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
- if (link->fec_state == dc_link_fec_ready && enable) {
- /* Accord to DP spec, FEC enable sequence can first
- * be transmitted anytime after 1000 LL codes have
- * been transmitted on the link after link training
- * completion. Using 1 lane RBR should have the maximum
- * time for transmitting 1000 LL codes which is 6.173 us.
- * So use 7 microseconds delay instead.
- */
- udelay(7);
- link_enc->funcs->fec_set_enable(link_enc, true);
- link->fec_state = dc_link_fec_enabled;
- } else if (link->fec_state == dc_link_fec_enabled && !enable) {
- link_enc->funcs->fec_set_enable(link_enc, false);
- link->fec_state = dc_link_fec_ready;
- }
- }
-}
-
-void dpcd_set_source_specific_data(struct dc_link *link)
-{
- if (!link->dc->vendor_signature.is_valid) {
- enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED;
- struct dpcd_amd_signature amd_signature = {0};
- struct dpcd_amd_device_id amd_device_id = {0};
-
- amd_device_id.device_id_byte1 =
- (uint8_t)(link->ctx->asic_id.chip_id);
- amd_device_id.device_id_byte2 =
- (uint8_t)(link->ctx->asic_id.chip_id >> 8);
- amd_device_id.dce_version =
- (uint8_t)(link->ctx->dce_version);
- amd_device_id.dal_version_byte1 = 0x0; // needed? where to get?
- amd_device_id.dal_version_byte2 = 0x0; // needed? where to get?
-
- core_link_read_dpcd(link, DP_SOURCE_OUI,
- (uint8_t *)(&amd_signature),
- sizeof(amd_signature));
-
- if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) &&
- (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) &&
- (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) {
-
- amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
- amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
- amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
-
- core_link_write_dpcd(link, DP_SOURCE_OUI,
- (uint8_t *)(&amd_signature),
- sizeof(amd_signature));
- }
-
- core_link_write_dpcd(link, DP_SOURCE_OUI+0x03,
- (uint8_t *)(&amd_device_id),
- sizeof(amd_device_id));
-
- if (link->ctx->dce_version >= DCN_VERSION_2_0 &&
- link->dc->caps.min_horizontal_blanking_period != 0) {
-
- uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period;
-
- if (link->preferred_link_setting.dpcd_source_device_specific_field_support) {
- result_write_min_hblank = core_link_write_dpcd(link,
- DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
- sizeof(hblank_size));
-
- if (result_write_min_hblank == DC_ERROR_UNEXPECTED)
- link->preferred_link_setting.dpcd_source_device_specific_field_support = false;
- } else {
- DC_LOG_DC("Sink device does not support 00340h DPCD write. Skipping on purpose.\n");
- }
- }
-
- DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
- WPP_BIT_FLAG_DC_DETECTION_DP_CAPS,
- "result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'",
- result_write_min_hblank,
- link->link_index,
- link->ctx->dce_version,
- DP_SOURCE_MINIMUM_HBLANK_SUPPORTED,
- link->dc->caps.min_horizontal_blanking_period,
- link->dpcd_caps.branch_dev_id,
- link->dpcd_caps.branch_dev_name[0],
- link->dpcd_caps.branch_dev_name[1],
- link->dpcd_caps.branch_dev_name[2],
- link->dpcd_caps.branch_dev_name[3],
- link->dpcd_caps.branch_dev_name[4],
- link->dpcd_caps.branch_dev_name[5]);
- } else {
- core_link_write_dpcd(link, DP_SOURCE_OUI,
- link->dc->vendor_signature.data.raw,
- sizeof(link->dc->vendor_signature.data.raw));
- }
-}
-
-void dpcd_write_cable_id_to_dprx(struct dc_link *link)
-{
- if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED ||
- link->dpcd_caps.cable_id.raw == 0 ||
- link->dprx_states.cable_id_written)
- return;
-
- core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX,
- &link->dpcd_caps.cable_id.raw,
- sizeof(link->dpcd_caps.cable_id.raw));
-
- link->dprx_states.cable_id_written = 1;
-}
-
-bool dc_link_set_backlight_level_nits(struct dc_link *link,
- bool isHDR,
- uint32_t backlight_millinits,
- uint32_t transition_time_in_ms)
-{
- struct dpcd_source_backlight_set dpcd_backlight_set;
- uint8_t backlight_control = isHDR ? 1 : 0;
-
- if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
- link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
- return false;
-
- // OLEDs have no PWM, they can only use AUX
- if (link->dpcd_sink_ext_caps.bits.oled == 1)
- backlight_control = 1;
-
- *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
- *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
-
-
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
- (uint8_t *)(&dpcd_backlight_set),
- sizeof(dpcd_backlight_set)) != DC_OK)
- return false;
-
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
- &backlight_control, 1) != DC_OK)
- return false;
-
- return true;
-}
-
-bool dc_link_get_backlight_level_nits(struct dc_link *link,
- uint32_t *backlight_millinits_avg,
- uint32_t *backlight_millinits_peak)
-{
- union dpcd_source_backlight_get dpcd_backlight_get;
-
- memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get));
-
- if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
- link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
- return false;
-
- if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
- dpcd_backlight_get.raw,
- sizeof(union dpcd_source_backlight_get)) != DC_OK)
- return false;
-
- *backlight_millinits_avg =
- dpcd_backlight_get.bytes.backlight_millinits_avg;
- *backlight_millinits_peak =
- dpcd_backlight_get.bytes.backlight_millinits_peak;
-
- /* On non-supported panels dpcd_read usually succeeds with 0 returned */
- if (*backlight_millinits_avg == 0 ||
- *backlight_millinits_avg > *backlight_millinits_peak)
- return false;
-
- return true;
-}
-
-bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable)
-{
- uint8_t backlight_enable = enable ? 1 : 0;
-
- if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
- link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
- return false;
-
- if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
- &backlight_enable, 1) != DC_OK)
- return false;
-
- return true;
-}
-
-// we read default from 0x320 because we expect BIOS wrote it there
-// regular get_backlight_nit reads from panel set at 0x326
-bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits)
-{
- if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
- link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
- return false;
-
- if (core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
- (uint8_t *) backlight_millinits,
- sizeof(uint32_t)) != DC_OK)
- return false;
-
- return true;
-}
-
-bool dc_link_set_default_brightness_aux(struct dc_link *link)
-{
- uint32_t default_backlight;
-
- if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
- if (!dc_link_read_default_bl_aux(link, &default_backlight))
- default_backlight = 150000;
- // if < 5 nits or > 5000, it might be wrong readback
- if (default_backlight < 5000 || default_backlight > 5000000)
- default_backlight = 150000; //
-
- return dc_link_set_backlight_level_nits(link, true,
- default_backlight, 0);
- }
- return false;
-}
-
-bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing)
-{
- struct dc_link_settings link_setting;
- uint8_t link_bw_set;
- uint8_t link_rate_set;
- uint32_t req_bw;
- union lane_count_set lane_count_set = {0};
-
- ASSERT(link || crtc_timing); // invalid input
-
- if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
- !link->panel_config.ilr.optimize_edp_link_rate)
- return false;
-
-
- // Read DPCD 00100h to find if standard link rates are set
- core_link_read_dpcd(link, DP_LINK_BW_SET,
- &link_bw_set, sizeof(link_bw_set));
-
- if (link_bw_set) {
- DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n");
- return true;
- }
-
- // Read DPCD 00115h to find the edp link rate set used
- core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
-
- // Read DPCD 00101h to find out the number of lanes currently set
- core_link_read_dpcd(link, DP_LANE_COUNT_SET,
- &lane_count_set.raw, sizeof(lane_count_set));
-
- req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
-
- if (!crtc_timing->flags.DSC)
- decide_edp_link_settings(link, &link_setting, req_bw);
- else
- decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN);
-
- if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate ||
- lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) {
- DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n");
- return true;
- }
-
- DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n");
- return false;
-}
-
-enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings)
-{
- if ((link_settings->link_rate >= LINK_RATE_LOW) &&
- (link_settings->link_rate <= LINK_RATE_HIGH3))
- return DP_8b_10b_ENCODING;
- else if ((link_settings->link_rate >= LINK_RATE_UHBR10) &&
- (link_settings->link_rate <= LINK_RATE_UHBR20))
- return DP_128b_132b_ENCODING;
- return DP_UNKNOWN_ENCODING;
-}
-
-enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link)
-{
- struct dc_link_settings link_settings = {0};
-
- if (!dc_is_dp_signal(link->connector_signal))
- return DP_UNKNOWN_ENCODING;
-
- if (link->preferred_link_setting.lane_count !=
- LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate !=
- LINK_RATE_UNKNOWN) {
- link_settings = link->preferred_link_setting;
- } else {
- decide_mst_link_settings(link, &link_settings);
- }
-
- return dp_get_link_encoding_format(&link_settings);
-}
-
-// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST)
-static void get_lane_status(
- struct dc_link *link,
- uint32_t lane_count,
- union lane_status *status,
- union lane_align_status_updated *status_updated)
-{
- unsigned int lane;
- uint8_t dpcd_buf[3] = {0};
-
- if (status == NULL || status_updated == NULL) {
- return;
- }
-
- core_link_read_dpcd(
- link,
- DP_LANE0_1_STATUS,
- dpcd_buf,
- sizeof(dpcd_buf));
-
- for (lane = 0; lane < lane_count; lane++) {
- status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane);
- }
-
- status_updated->raw = dpcd_buf[2];
-}
-
-bool dpcd_write_128b_132b_sst_payload_allocation_table(
- const struct dc_stream_state *stream,
- struct dc_link *link,
- struct link_mst_stream_allocation_table *proposed_table,
- bool allocate)
-{
- const uint8_t vc_id = 1; /// VC ID always 1 for SST
- const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST
- bool result = false;
- uint8_t req_slot_count = 0;
- struct fixed31_32 avg_time_slots_per_mtp = { 0 };
- union payload_table_update_status update_status = { 0 };
- const uint32_t max_retries = 30;
- uint32_t retries = 0;
-
- if (allocate) {
- avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
- req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
- /// Validation should filter out modes that exceed link BW
- ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT);
- if (req_slot_count > MAX_MTP_SLOT_COUNT)
- return false;
- } else {
- /// Leave req_slot_count = 0 if allocate is false.
- }
-
- proposed_table->stream_count = 1; /// Always 1 stream for SST
- proposed_table->stream_allocations[0].slot_count = req_slot_count;
- proposed_table->stream_allocations[0].vcp_id = vc_id;
-
- if (link->aux_access_disabled)
- return true;
-
- /// Write DPCD 2C0 = 1 to start updating
- update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1;
- core_link_write_dpcd(
- link,
- DP_PAYLOAD_TABLE_UPDATE_STATUS,
- &update_status.raw,
- 1);
-
- /// Program the changes in DPCD 1C0 - 1C2
- ASSERT(vc_id == 1);
- core_link_write_dpcd(
- link,
- DP_PAYLOAD_ALLOCATE_SET,
- &vc_id,
- 1);
-
- ASSERT(start_time_slot == 0);
- core_link_write_dpcd(
- link,
- DP_PAYLOAD_ALLOCATE_START_TIME_SLOT,
- &start_time_slot,
- 1);
-
- core_link_write_dpcd(
- link,
- DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT,
- &req_slot_count,
- 1);
-
- /// Poll till DPCD 2C0 read 1
- /// Try for at least 150ms (30 retries, with 5ms delay after each attempt)
-
- while (retries < max_retries) {
- if (core_link_read_dpcd(
- link,
- DP_PAYLOAD_TABLE_UPDATE_STATUS,
- &update_status.raw,
- 1) == DC_OK) {
- if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) {
- DC_LOG_DP2("SST Update Payload: downstream payload table updated.");
- result = true;
- break;
- }
- } else {
- union dpcd_rev dpcdRev;
-
- if (core_link_read_dpcd(
- link,
- DP_DPCD_REV,
- &dpcdRev.raw,
- 1) != DC_OK) {
- DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision "
- "of sink while polling payload table "
- "updated status bit.");
- break;
- }
- }
- retries++;
- msleep(5);
- }
-
- if (!result && retries == max_retries) {
- DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, "
- "continue on. Something is wrong with the branch.");
- // TODO - DP2.0 Payload: Read and log the payload table from downstream branch
- }
-
- return result;
-}
-
-bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link)
-{
- /*
- * wait for ACT handled
- */
- int i;
- const int act_retries = 30;
- enum act_return_status result = ACT_FAILED;
- union payload_table_update_status update_status = {0};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
- union lane_align_status_updated lane_status_updated;
-
- if (link->aux_access_disabled)
- return true;
- for (i = 0; i < act_retries; i++) {
- get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated);
-
- if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
- !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
- !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) ||
- !dp_is_interlane_aligned(lane_status_updated)) {
- DC_LOG_ERROR("SST Update Payload: Link loss occurred while "
- "polling for ACT handled.");
- result = ACT_LINK_LOST;
- break;
- }
- core_link_read_dpcd(
- link,
- DP_PAYLOAD_TABLE_UPDATE_STATUS,
- &update_status.raw,
- 1);
-
- if (update_status.bits.ACT_HANDLED == 1) {
- DC_LOG_DP2("SST Update Payload: ACT handled by downstream.");
- result = ACT_SUCCESS;
- break;
- }
-
- msleep(5);
- }
-
- if (result == ACT_FAILED) {
- DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, "
- "continue on. Something is wrong with the branch.");
- }
-
- return (result == ACT_SUCCESS);
-}
-
-struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
- const struct dc_stream_state *stream,
- const struct dc_link *link)
-{
- struct fixed31_32 link_bw_effective =
- dc_fixpt_from_int(
- dc_link_bandwidth_kbps(link, &link->cur_link_settings));
- struct fixed31_32 timeslot_bw_effective =
- dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
- struct fixed31_32 timing_bw =
- dc_fixpt_from_int(
- dc_bandwidth_in_kbps_from_timing(&stream->timing));
- struct fixed31_32 avg_time_slots_per_mtp =
- dc_fixpt_div(timing_bw, timeslot_bw_effective);
-
- return avg_time_slots_per_mtp;
-}
-
-bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
-{
- /* If this assert is hit then we have a link encoder dynamic management issue */
- ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
- return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
- pipe_ctx->link_res.hpo_dp_link_enc &&
- dc_is_dp_signal(pipe_ctx->stream->signal));
-}
-
-void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
-{
- if (link->connector_signal != SIGNAL_TYPE_EDP)
- return;
-
- link->dc->hwss.edp_power_control(link, true);
- if (wait_for_hpd)
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
- if (link->dc->hwss.edp_backlight_control)
- link->dc->hwss.edp_backlight_control(link, true);
-}
-
-void dc_link_clear_dprx_states(struct dc_link *link)
-{
- memset(&link->dprx_states, 0, sizeof(link->dprx_states));
-}
-
-void dp_receiver_power_ctrl(struct dc_link *link, bool on)
-{
- uint8_t state;
-
- state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
-
- if (link->sync_lt_in_progress)
- return;
-
- core_link_write_dpcd(link, DP_SET_POWER, &state,
- sizeof(state));
-
-}
-
-void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
-{
- if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
- core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
- &dp_test_mode, sizeof(dp_test_mode));
-}
-
-
-static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
-{
- switch (lttpr_repeater_count) {
- case 0x80: // 1 lttpr repeater
- return 1;
- case 0x40: // 2 lttpr repeaters
- return 2;
- case 0x20: // 3 lttpr repeaters
- return 3;
- case 0x10: // 4 lttpr repeaters
- return 4;
- case 0x08: // 5 lttpr repeaters
- return 5;
- case 0x04: // 6 lttpr repeaters
- return 6;
- case 0x02: // 7 lttpr repeaters
- return 7;
- case 0x01: // 8 lttpr repeaters
- return 8;
- default:
- break;
- }
- return 0; // invalid value
-}
-
-static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset)
-{
- return (convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == offset);
-}
-
-void dp_enable_link_phy(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum signal_type signal,
- enum clock_source_id clock_source,
- const struct dc_link_settings *link_settings)
-{
- link->cur_link_settings = *link_settings;
- link->dc->hwss.enable_dp_link_output(link, link_res, signal,
- clock_source, link_settings);
- dp_receiver_power_ctrl(link, true);
-}
-
-void edp_add_delay_for_T9(struct dc_link *link)
-{
- if (link && link->panel_config.pps.extra_delay_backlight_off > 0)
- udelay(link->panel_config.pps.extra_delay_backlight_off * 1000);
-}
-
-bool edp_receiver_ready_T9(struct dc_link *link)
-{
- unsigned int tries = 0;
- unsigned char sinkstatus = 0;
- unsigned char edpRev = 0;
- enum dc_status result = DC_OK;
-
- result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
-
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- if (result == DC_OK && edpRev >= DP_EDP_12) {
- do {
- sinkstatus = 1;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 0)
- break;
- if (result != DC_OK)
- break;
- udelay(100); //MAx T9
- } while (++tries < 50);
- }
-
- return result;
-}
-bool edp_receiver_ready_T7(struct dc_link *link)
-{
- unsigned char sinkstatus = 0;
- unsigned char edpRev = 0;
- enum dc_status result = DC_OK;
-
- /* use absolute time stamp to constrain max T7*/
- unsigned long long enter_timestamp = 0;
- unsigned long long finish_timestamp = 0;
- unsigned long long time_taken_in_ns = 0;
-
- result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
-
- if (result == DC_OK && edpRev >= DP_EDP_12) {
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- enter_timestamp = dm_get_timestamp(link->ctx);
- do {
- sinkstatus = 0;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 1)
- break;
- if (result != DC_OK)
- break;
- udelay(25);
- finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
- } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
- }
-
- if (link && link->panel_config.pps.extra_t7_ms > 0)
- udelay(link->panel_config.pps.extra_t7_ms * 1000);
-
- return result;
-}
-
-void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
- enum signal_type signal)
-{
- struct dc *dc = link->ctx->dc;
-
- if (!link->wa_flags.dp_keep_receiver_powered)
- dp_receiver_power_ctrl(link, false);
-
- dc->hwss.disable_link_output(link, link_res, signal);
- /* Clear current link setting.*/
- memset(&link->cur_link_settings, 0,
- sizeof(link->cur_link_settings));
-
- if (dc->clk_mgr->funcs->notify_link_rate_change)
- dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
-}
-
-void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
- enum signal_type signal)
-{
- /* MST disable link only when no stream use the link */
- if (link->mst_stream_alloc_table.stream_count > 0)
- return;
-
- dp_disable_link_phy(link, link_res, signal);
-
- /* set the sink to SST mode after disabling the link */
- dp_enable_mst_on_sink(link, false);
-}
-
-bool dp_set_hw_training_pattern(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum dc_dp_training_pattern pattern,
- uint32_t offset)
-{
- enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
-
- switch (pattern) {
- case DP_TRAINING_PATTERN_SEQUENCE_1:
- test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_2:
- test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_3:
- test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
- break;
- case DP_TRAINING_PATTERN_SEQUENCE_4:
- test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
- break;
- case DP_128b_132b_TPS1:
- test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE;
- break;
- case DP_128b_132b_TPS2:
- test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE;
- break;
- default:
- break;
- }
-
- dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0);
-
- return true;
-}
-
-void dp_set_hw_lane_settings(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct link_training_settings *link_settings,
- uint32_t offset)
-{
- const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
-
- if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && !is_immediate_downstream(link, offset))
- return;
-
- if (link_hwss->ext.set_dp_lane_settings)
- link_hwss->ext.set_dp_lane_settings(link, link_res,
- &link_settings->link_settings,
- link_settings->hw_lane_settings);
-
- memmove(link->cur_lane_setting,
- link_settings->hw_lane_settings,
- sizeof(link->cur_lane_setting));
-}
-
-void dp_set_hw_test_pattern(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum dp_test_pattern test_pattern,
- uint8_t *custom_pattern,
- uint32_t custom_pattern_size)
-{
- const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
- struct encoder_set_dp_phy_pattern_param pattern_param = {0};
-
- pattern_param.dp_phy_pattern = test_pattern;
- pattern_param.custom_pattern = custom_pattern;
- pattern_param.custom_pattern_size = custom_pattern_size;
- pattern_param.dp_panel_mode = dp_get_panel_mode(link);
-
- if (link_hwss->ext.set_dp_link_test_pattern)
- link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param);
-}
-
-void dp_retrain_link_dp_test(struct dc_link *link,
- struct dc_link_settings *link_setting,
- bool skip_video_pattern)
-{
- struct pipe_ctx *pipes =
- &link->dc->current_state->res_ctx.pipe_ctx[0];
- unsigned int i;
- bool do_fallback = false;
-
-
- for (i = 0; i < MAX_PIPES; i++) {
- if (pipes[i].stream != NULL &&
- !pipes[i].top_pipe && !pipes[i].prev_odm_pipe &&
- pipes[i].stream->link != NULL &&
- pipes[i].stream_res.stream_enc != NULL &&
- pipes[i].stream->link == link) {
- udelay(100);
-
- pipes[i].stream_res.stream_enc->funcs->dp_blank(link,
- pipes[i].stream_res.stream_enc);
-
- /* disable any test pattern that might be active */
- dp_set_hw_test_pattern(link, &pipes[i].link_res,
- DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
-
- dp_receiver_power_ctrl(link, false);
-
- link->dc->hwss.disable_stream(&pipes[i]);
- if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only)
- (&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio);
-
- if (link->link_enc)
- link->link_enc->funcs->disable_output(
- link->link_enc,
- SIGNAL_TYPE_DISPLAY_PORT);
-
- /* Clear current link setting. */
- memset(&link->cur_link_settings, 0,
- sizeof(link->cur_link_settings));
-
- if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- do_fallback = true;
-
- perform_link_training_with_retries(
- link_setting,
- skip_video_pattern,
- LINK_TRAINING_ATTEMPTS,
- &pipes[i],
- SIGNAL_TYPE_DISPLAY_PORT,
- do_fallback);
-
- link->dc->hwss.enable_stream(&pipes[i]);
-
- link->dc->hwss.unblank_stream(&pipes[i],
- link_setting);
-
- link->dc->hwss.enable_audio_stream(&pipes[i]);
- }
- }
-}
-
-#undef DC_LOGGER
-#define DC_LOGGER \
- dsc->ctx->logger
-static void dsc_optc_config_log(struct display_stream_compressor *dsc,
- struct dsc_optc_config *config)
-{
- uint32_t precision = 1 << 28;
- uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision;
- uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision;
- uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod;
-
- /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC
- * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is
- * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal
- */
- ll_bytes_per_pix_fraq *= 10000000;
- ll_bytes_per_pix_fraq /= precision;
-
- DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)",
- config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq);
- DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444);
- DC_LOG_DSC("\tslice_width %d", config->slice_width);
-}
-
-bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
-{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
- struct dc_stream_state *stream = pipe_ctx->stream;
- bool result = false;
-
- if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- result = true;
- else
- result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
- return result;
-}
-
-/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
- * i.e. after dp_enable_dsc_on_rx() had been called
- */
-void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
-{
- struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
- struct dc *dc = pipe_ctx->stream->ctx->dc;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct pipe_ctx *odm_pipe;
- int opp_cnt = 1;
-
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- opp_cnt++;
-
- if (enable) {
- struct dsc_config dsc_cfg;
- struct dsc_optc_config dsc_optc_cfg;
- enum optc_dsc_mode optc_dsc_mode;
-
- /* Enable DSC hw block */
- dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
- dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
- dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
- dsc_cfg.color_depth = stream->timing.display_color_depth;
- dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
- dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
- ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
- dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
-
- dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
- dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
- struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
-
- odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
- odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
- }
- dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
- dsc_cfg.pic_width *= opp_cnt;
-
- optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
-
- /* Enable DSC in encoder */
- if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
- && !is_dp_128b_132b_signal(pipe_ctx)) {
- DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
- dsc_optc_config_log(dsc, &dsc_optc_cfg);
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
- optc_dsc_mode,
- dsc_optc_cfg.bytes_per_pixel,
- dsc_optc_cfg.slice_width);
-
- /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */
- }
-
- /* Enable DSC in OPTC */
- DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
- dsc_optc_config_log(dsc, &dsc_optc_cfg);
- pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
- optc_dsc_mode,
- dsc_optc_cfg.bytes_per_pixel,
- dsc_optc_cfg.slice_width);
- } else {
- /* disable DSC in OPTC */
- pipe_ctx->stream_res.tg->funcs->set_dsc_config(
- pipe_ctx->stream_res.tg,
- OPTC_DSC_DISABLED, 0, 0);
-
- /* disable DSC in stream encoder */
- if (dc_is_dp_signal(stream->signal)) {
- if (is_dp_128b_132b_signal(pipe_ctx))
- pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- false,
- NULL,
- true);
- else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
- pipe_ctx->stream_res.stream_enc,
- OPTC_DSC_DISABLED, 0, 0);
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc, false, NULL, true);
- }
- }
-
- /* disable DSC block */
- pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
- for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
- odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
- }
-}
-
-bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
-{
- struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
- bool result = false;
-
- if (!pipe_ctx->stream->timing.flags.DSC)
- goto out;
- if (!dsc)
- goto out;
-
- if (enable) {
- {
- dp_set_dsc_on_stream(pipe_ctx, true);
- result = true;
- }
- } else {
- dp_set_dsc_on_rx(pipe_ctx, false);
- dp_set_dsc_on_stream(pipe_ctx, false);
- result = true;
- }
-out:
- return result;
-}
-
-/*
- * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled;
- * hence PPS info packet update need to use frame update instead of immediate update.
- * Added parameter immediate_update for this purpose.
- * The decision to use frame update is hard-coded in function dp_update_dsc_config(),
- * which is the only place where a "false" would be passed in for param immediate_update.
- *
- * immediate_update is only applicable when DSC is enabled.
- */
-bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update)
-{
- struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
- struct dc_stream_state *stream = pipe_ctx->stream;
-
- if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
- return false;
-
- if (enable) {
- struct dsc_config dsc_cfg;
- uint8_t dsc_packed_pps[128];
-
- memset(&dsc_cfg, 0, sizeof(dsc_cfg));
- memset(dsc_packed_pps, 0, 128);
-
- /* Enable DSC hw block */
- dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
- dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
- dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
- dsc_cfg.color_depth = stream->timing.display_color_depth;
- dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
- dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
-
- DC_LOG_DSC(" ");
- dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
- memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
- if (dc_is_dp_signal(stream->signal)) {
- DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
- if (is_dp_128b_132b_signal(pipe_ctx))
- pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- true,
- &dsc_packed_pps[0],
- immediate_update);
- else
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc,
- true,
- &dsc_packed_pps[0],
- immediate_update);
- }
- } else {
- /* disable DSC PPS in stream encoder */
- memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps));
- if (dc_is_dp_signal(stream->signal)) {
- if (is_dp_128b_132b_signal(pipe_ctx))
- pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- false,
- NULL,
- true);
- else
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc, false, NULL, true);
- }
- }
-
- return true;
-}
-
-
-bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
-{
- struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
-
- if (!pipe_ctx->stream->timing.flags.DSC)
- return false;
- if (!dsc)
- return false;
-
- dp_set_dsc_on_stream(pipe_ctx, true);
- dp_set_dsc_pps_sdp(pipe_ctx, true, false);
- return true;
-}
-
-#undef DC_LOGGER
-#define DC_LOGGER \
- link->ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index 614f022d1cff..74e465ba158d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -24,7 +24,7 @@
#include "link_enc_cfg.h"
#include "resource.h"
-#include "dc_link_dp.h"
+#include "link.h"
#define DC_LOGGER dc->ctx->logger
@@ -48,7 +48,7 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
/* DIGs do not support DP2.0 streams with 128b/132b encoding. */
struct dc_link_settings link_settings = {0};
- decide_link_settings(stream, &link_settings);
+ link_decide_link_settings(stream, &link_settings);
if ((link_settings.link_rate >= LINK_RATE_LOW) &&
link_settings.link_rate <= LINK_RATE_HIGH3) {
is_dig_stream = true;
@@ -305,15 +305,17 @@ void link_enc_cfg_link_encs_assign(
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i];
+ /* skip it if the link is mappable endpoint. */
+ if (stream->link->is_dig_mapping_flexible)
+ continue;
+
/* Skip stream if not supported by DIG link encoder. */
if (!is_dig_link_enc_stream(stream))
continue;
/* Physical endpoints have a fixed mapping to DIG link encoders. */
- if (!stream->link->is_dig_mapping_flexible) {
- eng_id = stream->link->eng_id;
- add_link_enc_assignment(state, stream, eng_id);
- }
+ eng_id = stream->link->eng_id;
+ add_link_enc_assignment(state, stream, eng_id);
}
/* (b) Retain previous assignments for mappable endpoints if encoders still available. */
@@ -325,11 +327,12 @@ void link_enc_cfg_link_encs_assign(
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = state->streams[i];
- /* Skip stream if not supported by DIG link encoder. */
- if (!is_dig_link_enc_stream(stream))
+ /* Skip it if the link is NOT mappable endpoint. */
+ if (!stream->link->is_dig_mapping_flexible)
continue;
- if (!stream->link->is_dig_mapping_flexible)
+ /* Skip stream if not supported by DIG link encoder. */
+ if (!is_dig_link_enc_stream(stream))
continue;
for (j = 0; j < prev_state->stream_count; j++) {
@@ -338,6 +341,7 @@ void link_enc_cfg_link_encs_assign(
if (stream == prev_stream && stream->link == prev_stream->link &&
prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) {
eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id;
+
if (is_avail_link_enc(state, eng_id, stream))
add_link_enc_assignment(state, stream, eng_id);
}
@@ -350,6 +354,15 @@ void link_enc_cfg_link_encs_assign(
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i];
+ struct link_encoder *link_enc = NULL;
+
+ /* Skip it if the link is NOT mappable endpoint. */
+ if (!stream->link->is_dig_mapping_flexible)
+ continue;
+
+ /* Skip if encoder assignment retained in step (b) above. */
+ if (stream->link_enc)
+ continue;
/* Skip stream if not supported by DIG link encoder. */
if (!is_dig_link_enc_stream(stream)) {
@@ -358,24 +371,18 @@ void link_enc_cfg_link_encs_assign(
}
/* Mappable endpoints have a flexible mapping to DIG link encoders. */
- if (stream->link->is_dig_mapping_flexible) {
- struct link_encoder *link_enc = NULL;
- /* Skip if encoder assignment retained in step (b) above. */
- if (stream->link_enc)
- continue;
+ /* For MST, multiple streams will share the same link / display
+ * endpoint. These streams should use the same link encoder
+ * assigned to that endpoint.
+ */
+ link_enc = get_link_enc_used_by_link(state, stream->link);
+ if (link_enc == NULL)
+ eng_id = find_first_avail_link_enc(stream->ctx, state);
+ else
+ eng_id = link_enc->preferred_engine;
- /* For MST, multiple streams will share the same link / display
- * endpoint. These streams should use the same link encoder
- * assigned to that endpoint.
- */
- link_enc = get_link_enc_used_by_link(state, stream->link);
- if (link_enc == NULL)
- eng_id = find_first_avail_link_enc(stream->ctx, state);
- else
- eng_id = link_enc->preferred_engine;
- add_link_enc_assignment(state, stream, eng_id);
- }
+ add_link_enc_assignment(state, stream, eng_id);
}
link_enc_cfg_validate(dc, state);
@@ -420,10 +427,6 @@ void link_enc_cfg_link_enc_unassign(
{
enum engine_id eng_id = ENGINE_ID_UNKNOWN;
- /* Only DIG link encoders. */
- if (!is_dig_link_enc_stream(stream))
- return;
-
if (stream->link_enc)
eng_id = stream->link_enc->preferred_engine;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
new file mode 100644
index 000000000000..a951e10416ee
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file provides single entrance to link functionality declared in dc
+ * public headers. The file is intended to be used as a thin translation layer
+ * that directly calls link internal functions without adding new functional
+ * behavior.
+ *
+ * When exporting a new link related dc function, add function declaration in
+ * dc.h with detail interface documentation, then add function implementation
+ * in this file which calls link functions.
+ */
+#include "link.h"
+
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ return link_detect(link, reason);
+}
+
+bool dc_link_detect_connection_type(struct dc_link *link,
+ enum dc_connection_type *type)
+{
+ return link_detect_connection_type(link, type);
+}
+
+const struct dc_link_status *dc_link_get_status(const struct dc_link *link)
+{
+ return link_get_status(link);
+}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+
+/* return true if the connected receiver supports the hdcp version */
+bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal)
+{
+ return link_is_hdcp14(link, signal);
+}
+
+bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal)
+{
+ return link_is_hdcp22(link, signal);
+}
+#endif
+
+void dc_link_clear_dprx_states(struct dc_link *link)
+{
+ link_clear_dprx_states(link);
+}
+
+bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link)
+{
+ return link_reset_cur_dp_mst_topology(link);
+}
+
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_settings)
+{
+ return dp_link_bandwidth_kbps(link, link_settings);
+}
+
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ return link_timing_bandwidth_kbps(timing);
+}
+
+void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map)
+{
+ link_get_cur_res_map(dc, map);
+}
+
+void dc_restore_link_res_map(const struct dc *dc, uint32_t *map)
+{
+ link_restore_res_map(dc, map);
+}
+
+bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx)
+{
+ return link_update_dsc_config(pipe_ctx);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index da164685547d..d9f2ef242b0f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -40,11 +40,11 @@
#include "virtual/virtual_stream_encoder.h"
#include "dpcd_defs.h"
#include "link_enc_cfg.h"
-#include "dc_link_dp.h"
+#include "link.h"
#include "virtual/virtual_link_hwss.h"
-#include "link/link_hwss_dio.h"
-#include "link/link_hwss_dpia.h"
-#include "link/link_hwss_hpo_dp.h"
+#include "link/hwss/link_hwss_dio.h"
+#include "link/hwss/link_hwss_dpia.h"
+#include "link/hwss/link_hwss_hpo_dp.h"
#if defined(CONFIG_DRM_AMD_DC_SI)
#include "dce60/dce60_resource.h"
@@ -2213,7 +2213,7 @@ enum dc_status dc_remove_stream_from_ctx(
del_pipe->stream_res.stream_enc,
false);
- if (is_dp_128b_132b_signal(del_pipe)) {
+ if (link_is_dp_128b_132b_signal(del_pipe)) {
update_hpo_dp_stream_engine_usage(
&new_ctx->res_ctx, dc->res_pool,
del_pipe->stream_res.hpo_dp_stream_enc,
@@ -2513,9 +2513,9 @@ enum dc_status resource_map_pool_resources(
* and link settings
*/
if (dc_is_dp_signal(stream->signal)) {
- if (!decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))
+ if (!link_decide_link_settings(stream, &pipe_ctx->link_config.dp_link_settings))
return DC_FAIL_DP_LINK_BANDWIDTH;
- if (dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
+ if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
pipe_ctx->stream_res.hpo_dp_stream_enc =
find_first_free_match_hpo_dp_stream_enc_for_link(
&context->res_ctx, pool, stream);
@@ -3269,6 +3269,50 @@ static void set_hfvs_info_packet(
*info_packet = stream->hfvsif_infopacket;
}
+static void adaptive_sync_override_dp_info_packets_sdp_line_num(
+ const struct dc_crtc_timing *timing,
+ struct enc_sdp_line_num *sdp_line_num,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+{
+ uint32_t asic_blank_start = 0;
+ uint32_t asic_blank_end = 0;
+ uint32_t v_update = 0;
+
+ const struct dc_crtc_timing *tg = timing;
+
+ /* blank_start = frame end - front porch */
+ asic_blank_start = tg->v_total - tg->v_front_porch;
+
+ /* blank_end = blank_start - active */
+ asic_blank_end = (asic_blank_start - tg->v_border_bottom -
+ tg->v_addressable - tg->v_border_top);
+
+ if (pipe_dlg_param->vstartup_start > asic_blank_end) {
+ v_update = (tg->v_total - (pipe_dlg_param->vstartup_start - asic_blank_end));
+ sdp_line_num->adaptive_sync_line_num_valid = true;
+ sdp_line_num->adaptive_sync_line_num = (tg->v_total - v_update - 1);
+ } else {
+ sdp_line_num->adaptive_sync_line_num_valid = false;
+ sdp_line_num->adaptive_sync_line_num = 0;
+ }
+}
+
+static void set_adaptive_sync_info_packet(
+ struct dc_info_packet *info_packet,
+ const struct dc_stream_state *stream,
+ struct encoder_info_frame *info_frame,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dlg_param)
+{
+ if (!stream->adaptive_sync_infopacket.valid)
+ return;
+
+ adaptive_sync_override_dp_info_packets_sdp_line_num(
+ &stream->timing,
+ &info_frame->sdp_line_num,
+ pipe_dlg_param);
+
+ *info_packet = stream->adaptive_sync_infopacket;
+}
static void set_vtem_info_packet(
struct dc_info_packet *info_packet,
@@ -3361,6 +3405,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->vsc.valid = false;
info->hfvsif.valid = false;
info->vtem.valid = false;
+ info->adaptive_sync.valid = false;
signal = pipe_ctx->stream->signal;
/* HDMi and DP have different info packets*/
@@ -3381,6 +3426,10 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_spd_info_packet(&info->spd, pipe_ctx->stream);
set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
+ set_adaptive_sync_info_packet(&info->adaptive_sync,
+ pipe_ctx->stream,
+ info,
+ &pipe_ctx->pipe_dlg_param);
}
patch_gamut_packet_checksum(&info->gamut);
@@ -3636,7 +3685,7 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
/* TODO: validate audio ASIC caps, encoder */
if (res == DC_OK)
- res = dc_link_validate_mode_timing(stream,
+ res = link_validate_mode_timing(stream,
link,
&stream->timing);
@@ -3763,7 +3812,7 @@ bool get_temp_dp_link_res(struct dc_link *link,
memset(link_res, 0, sizeof(*link_res));
- if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
link_res->hpo_dp_link_enc = get_temp_hpo_dp_link_enc(res_ctx,
dc->res_pool, link);
if (!link_res->hpo_dp_link_enc)
@@ -3820,9 +3869,20 @@ void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
pipe_ctx_check = &context->res_ctx.pipe_ctx[i];
if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) &&
- IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx))
+ IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx)) {
+ struct pipe_ctx *first_pipe = pipe_ctx_check;
+
+ while (first_pipe->prev_odm_pipe)
+ first_pipe = first_pipe->prev_odm_pipe;
+ /* When ODM combine is enabled, this case is expected. If the disabled pipe
+ * is part of the ODM tree, then we should not print an error.
+ * */
+ if (first_pipe->pipe_idx == disabled_master_pipe_idx)
+ continue;
+
DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
- i, disabled_master_pipe_idx);
+ i, disabled_master_pipe_idx);
+ }
}
}
@@ -3981,3 +4041,42 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
return true;
}
+
+enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx)
+{
+ if (link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings) == DP_128b_132b_ENCODING) {
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc == NULL) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc =
+ find_first_free_match_hpo_dp_stream_enc_for_link(
+ &context->res_ctx, dc->res_pool, pipe_ctx->stream);
+
+ if (!pipe_ctx->stream_res.hpo_dp_stream_enc)
+ return DC_NO_STREAM_ENC_RESOURCE;
+
+ update_hpo_dp_stream_engine_usage(
+ &context->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ true);
+ }
+
+ if (pipe_ctx->link_res.hpo_dp_link_enc == NULL) {
+ if (!add_hpo_dp_link_enc_to_ctx(&context->res_ctx, dc->res_pool, pipe_ctx, pipe_ctx->stream))
+ return DC_NO_LINK_ENC_RESOURCE;
+ }
+ } else {
+ if (pipe_ctx->stream_res.hpo_dp_stream_enc) {
+ update_hpo_dp_stream_engine_usage(
+ &context->res_ctx, dc->res_pool,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ false);
+ pipe_ctx->stream_res.hpo_dp_stream_enc = NULL;
+ }
+ if (pipe_ctx->link_res.hpo_dp_link_enc)
+ remove_hpo_dp_link_enc_from_ctx(&context->res_ctx, pipe_ctx, pipe_ctx->stream);
+ }
+
+ return DC_OK;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index 4b372aa52801..6c06587dd88c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -65,6 +65,7 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
/* For HPD/HPD RX, convert dpia port index into link index */
if (notify->type == DMUB_NOTIFICATION_HPD ||
notify->type == DMUB_NOTIFICATION_HPD_IRQ ||
+ notify->type == DMUB_NOTIFICATION_DPIA_NOTIFICATION ||
notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
notify->link_index =
get_link_index_from_dpia_port_index(dc, notify->link_index);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 20e534f73513..72b261ad9587 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -408,7 +408,7 @@ bool dc_stream_set_cursor_position(
struct dc_stream_state *stream,
const struct dc_cursor_position *position)
{
- struct dc *dc = stream->ctx->dc;
+ struct dc *dc;
bool reset_idle_optimizations = false;
if (NULL == stream) {
@@ -481,6 +481,7 @@ bool dc_stream_add_writeback(struct dc *dc,
}
if (!isDrc) {
+ ASSERT(stream->num_wb_info + 1 <= MAX_DWB_PIPES);
stream->writeback_info[stream->num_wb_info++] = *wb_info;
}
@@ -526,6 +527,11 @@ bool dc_stream_remove_writeback(struct dc *dc,
return false;
}
+ if (stream->num_wb_info > MAX_DWB_PIPES) {
+ dm_error("DC: num_wb_info is invalid!\n");
+ return false;
+ }
+
// stream->writeback_info[dwb_pipe_inst].wb_enabled = false;
for (i = 0; i < stream->num_wb_info; i++) {
/*dynamic update*/
@@ -540,7 +546,8 @@ bool dc_stream_remove_writeback(struct dc *dc,
if (stream->writeback_info[i].wb_enabled) {
if (j < i)
/* trim the array */
- stream->writeback_info[j] = stream->writeback_info[i];
+ memcpy(&stream->writeback_info[j], &stream->writeback_info[i],
+ sizeof(struct dc_writeback_info));
j++;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 85ebeaa2de18..1fde43378689 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -47,12 +47,11 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.215"
+#define DC_VER "3.2.223"
#define MAX_SURFACES 3
#define MAX_PLANES 6
#define MAX_STREAMS 6
-#define MAX_SINKS_PER_LINK 4
#define MIN_VIEWPORT_SIZE 12
#define MAX_NUM_EDP 2
@@ -410,7 +409,7 @@ struct dc_config {
bool force_bios_enable_lttpr;
uint8_t force_bios_fixed_vs;
int sdpif_request_limit_words_per_umc;
-
+ bool disable_subvp_drr;
};
enum visual_confirm {
@@ -872,6 +871,9 @@ struct dc_debug_options {
enum lttpr_mode lttpr_mode_override;
unsigned int dsc_delay_factor_wa_x1000;
unsigned int min_prefetch_in_strobe_ns;
+ bool disable_unbounded_requesting;
+ bool dig_fifo_off_in_blank;
+ bool temp_mst_deallocation_sequence;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -1369,108 +1371,128 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
void dc_release_state(struct dc_state *context);
+struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
+ struct dc_stream_state *stream,
+ int mpcc_inst);
+
+
+uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+
/* Link Interfaces */
+/* TODO: remove this after resolving external dependencies */
+#include "dc_link.h"
-struct dpcd_caps {
- union dpcd_rev dpcd_rev;
- union max_lane_count max_ln_count;
- union max_down_spread max_down_spread;
- union dprx_feature dprx_feature;
-
- /* valid only for eDP v1.4 or higher*/
- uint8_t edp_supported_link_rates_count;
- enum dc_link_rate edp_supported_link_rates[8];
-
- /* dongle type (DP converter, CV smart dongle) */
- enum display_dongle_type dongle_type;
- bool is_dongle_type_one;
- /* branch device or sink device */
- bool is_branch_dev;
- /* Dongle's downstream count. */
- union sink_count sink_count;
- bool is_mst_capable;
- /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
- indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
- struct dc_dongle_caps dongle_caps;
-
- uint32_t sink_dev_id;
- int8_t sink_dev_id_str[6];
- int8_t sink_hw_revision;
- int8_t sink_fw_revision[2];
-
- uint32_t branch_dev_id;
- int8_t branch_dev_name[6];
- int8_t branch_hw_revision;
- int8_t branch_fw_revision[2];
-
- bool allow_invalid_MSA_timing_param;
- bool panel_mode_edp;
- bool dpcd_display_control_capable;
- bool ext_receiver_cap_field_present;
- bool set_power_state_capable_edp;
- bool dynamic_backlight_capable_edp;
- union dpcd_fec_capability fec_cap;
- struct dpcd_dsc_capabilities dsc_caps;
- struct dc_lttpr_caps lttpr_caps;
- struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info;
-
- union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates;
- union dp_main_line_channel_coding_cap channel_coding_cap;
- union dp_sink_video_fallback_formats fallback_formats;
- union dp_fec_capability1 fec_cap1;
- union dp_cable_id cable_id;
- uint8_t edp_rev;
- union edp_alpm_caps alpm_caps;
- struct edp_psr_info psr_info;
-};
-
-union dpcd_sink_ext_caps {
- struct {
- /* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode
- * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode.
- */
- uint8_t sdr_aux_backlight_control : 1;
- uint8_t hdr_aux_backlight_control : 1;
- uint8_t reserved_1 : 2;
- uint8_t oled : 1;
- uint8_t reserved : 3;
- } bits;
- uint8_t raw;
-};
+/* The function initiates detection handshake over the given link. It first
+ * determines if there are display connections over the link. If so it initiates
+ * detection protocols supported by the connected receiver device. The function
+ * contains protocol specific handshake sequences which are sometimes mandatory
+ * to establish a proper connection between TX and RX. So it is always
+ * recommended to call this function as the first link operation upon HPD event
+ * or power up event. Upon completion, the function will update link structure
+ * in place based on latest RX capabilities. The function may also cause dpms
+ * to be reset to off for all currently enabled streams to the link. It is DM's
+ * responsibility to serialize detection and DPMS updates.
+ *
+ * @reason - Indicate which event triggers this detection. dc may customize
+ * detection flow depending on the triggering events.
+ * return false - if detection is not fully completed. This could happen when
+ * there is an unrecoverable error during detection or detection is partially
+ * completed (detection has been delegated to dm mst manager ie.
+ * link->connection_type == dc_connection_mst_branch when returning false).
+ * return true - detection is completed, link has been fully updated with latest
+ * detection result.
+ */
+bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason);
-#if defined(CONFIG_DRM_AMD_DC_HDCP)
-union hdcp_rx_caps {
- struct {
- uint8_t version;
- uint8_t reserved;
- struct {
- uint8_t repeater : 1;
- uint8_t hdcp_capable : 1;
- uint8_t reserved : 6;
- } byte0;
- } fields;
- uint8_t raw[3];
-};
+/* determine if there is a sink connected to the link
+ *
+ * @type - dc_connection_single if connected, dc_connection_none otherwise.
+ * return - false if an unexpected error occurs, true otherwise.
+ *
+ * NOTE: This function doesn't detect downstream sink connections i.e
+ * dc_connection_mst_branch, dc_connection_sst_branch. In this case, it will
+ * return dc_connection_single if the branch device is connected despite of
+ * downstream sink's connection status.
+ */
+bool dc_link_detect_connection_type(struct dc_link *link,
+ enum dc_connection_type *type);
-union hdcp_bcaps {
- struct {
- uint8_t HDCP_CAPABLE:1;
- uint8_t REPEATER:1;
- uint8_t RESERVED:6;
- } bits;
- uint8_t raw;
-};
+/* Getter for cached link status from given link */
+const struct dc_link_status *dc_link_get_status(const struct dc_link *link);
-struct hdcp_caps {
- union hdcp_rx_caps rx_caps;
- union hdcp_bcaps bcaps;
-};
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+/* return true if the connected receiver supports the hdcp version */
+bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal);
+bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal);
#endif
-#include "dc_link.h"
+/* The function clears recorded DP RX states in the link. DM should call this
+ * function when it is resuming from S3 power state to previously connected links.
+ *
+ * TODO - in the future we should consider to expand link resume interface to
+ * support clearing previous rx states. So we don't have to rely on dm to call
+ * this interface explicitly.
+ */
+void dc_link_clear_dprx_states(struct dc_link *link);
-uint32_t dc_get_opp_for_plane(struct dc *dc, struct dc_plane_state *plane);
+/* Destruct the mst topology of the link and reset the allocated payload table
+ *
+ * NOTE: this should only be called if DM chooses not to call dc_link_detect but
+ * still wants to reset MST topology on an unplug event */
+bool dc_link_reset_cur_dp_mst_topology(struct dc_link *link);
+
+/* The function calculates effective DP link bandwidth when a given link is
+ * using the given link settings.
+ *
+ * return - total effective link bandwidth in kbps.
+ */
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting);
+
+/* The function returns minimum bandwidth required to drive a given timing
+ * return - minimum required timing bandwidth in kbps.
+ */
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing);
+/* The function takes a snapshot of current link resource allocation state
+ * @dc: pointer to dc of the dm calling this
+ * @map: a dc link resource snapshot defined internally to dc.
+ *
+ * DM needs to capture a snapshot of current link resource allocation mapping
+ * and store it in its persistent storage.
+ *
+ * Some of the link resource is using first come first serve policy.
+ * The allocation mapping depends on original hotplug order. This information
+ * is lost after driver is loaded next time. The snapshot is used in order to
+ * restore link resource to its previous state so user will get consistent
+ * link capability allocation across reboot.
+ *
+ */
+void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
+
+/* This function restores link resource allocation state from a snapshot
+ * @dc: pointer to dc of the dm calling this
+ * @map: a dc link resource snapshot defined internally to dc.
+ *
+ * DM needs to call this function after initial link detection on boot and
+ * before first commit streams to restore link resource allocation state
+ * from previous boot session.
+ *
+ * Some of the link resource is using first come first serve policy.
+ * The allocation mapping depends on original hotplug order. This information
+ * is lost after driver is loaded next time. The snapshot is used in order to
+ * restore link resource to its previous state so user will get consistent
+ * link capability allocation across reboot.
+ *
+ */
+void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
+
+/* TODO: this is not meant to be exposed to DM. Should switch to stream update
+ * interface i.e stream_update->dsc_config
+ */
+bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx);
/* Sink Interfaces - A sink corresponds to a display output device */
struct dc_container_id {
@@ -1502,6 +1524,11 @@ struct dc_sink_fec_caps {
bool is_topology_fec_supported;
};
+struct scdc_caps {
+ union hdmi_scdc_manufacturer_OUI_data manufacturer_OUI;
+ union hdmi_scdc_device_id_data device_id;
+};
+
/*
* The sink structure contains EDID and other display device properties
*/
@@ -1515,6 +1542,7 @@ struct dc_sink {
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
+ struct scdc_caps scdc_caps;
struct dc_sink_dsc_caps dsc_caps;
struct dc_sink_fec_caps fec_caps;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 260ac4458870..be9aa1a71847 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -140,7 +140,8 @@ struct dc_vbios_funcs {
enum bp_result (*enable_lvtma_control)(
struct dc_bios *bios,
uint8_t uc_pwr_on,
- uint8_t panel_instance);
+ uint8_t panel_instance,
+ uint8_t bypass_panel_control_wait);
enum bp_result (*get_soc_bb_info)(
struct dc_bios *dcb,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 7769bd099a5a..428e3a9ab65a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -77,6 +77,32 @@ struct aux_reply_transaction_data {
uint8_t *data;
};
+struct aux_payload {
+ /* set following flag to read/write I2C data,
+ * reset it to read/write DPCD data */
+ bool i2c_over_aux;
+ /* set following flag to write data,
+ * reset it to read data */
+ bool write;
+ bool mot;
+ bool write_status_update;
+
+ uint32_t address;
+ uint32_t length;
+ uint8_t *data;
+ /*
+ * used to return the reply type of the transaction
+ * ignored if NULL
+ */
+ uint8_t *reply;
+ /* expressed in milliseconds
+ * zero means "use default value"
+ */
+ uint32_t defer_delay;
+
+};
+#define DEFAULT_AUX_MAX_DATA_SIZE 16
+
struct i2c_payload {
bool write;
uint8_t address;
@@ -90,6 +116,8 @@ enum i2c_command_engine {
I2C_COMMAND_ENGINE_HW
};
+#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW
+
struct i2c_command {
struct i2c_payload *payloads;
uint8_t number_of_payloads;
@@ -150,6 +178,9 @@ enum display_dongle_type {
DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE,
};
+#define DC_MAX_EDID_BUFFER_SIZE 2048
+#define DC_EDID_BLOCK_SIZE 128
+
struct ddc_service {
struct ddc *ddc_pin;
struct ddc_flags flags;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 6ccf477d1c4d..c2092775ca88 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -698,7 +698,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
*
* @dc: [in] current dc state
* @context: [in] new dc state
- * @cmd: [in] DMUB cmd to be populated with SubVP info
+ * @enable: [in] if true enables the pipes population
*
* This function loops through each pipe and populates the DMUB SubVP CMD info
* based on the pipe (e.g. SubVP, VBLANK).
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 2c54b6e0498b..809a1851f196 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -27,6 +27,7 @@
#define DC_DP_TYPES_H
#include "os_types.h"
+#include "dc_ddc_types.h"
enum dc_lane_count {
LANE_COUNT_UNKNOWN = 0,
@@ -149,7 +150,6 @@ struct dc_link_settings {
enum dc_link_spread link_spread;
bool use_link_rate_set;
uint8_t link_rate_set;
- bool dpcd_source_device_specific_field_support;
};
union dc_dp_ffe_preset {
@@ -362,14 +362,10 @@ enum dpcd_downstream_port_detailed_type {
union dwnstream_port_caps_byte2 {
struct {
uint8_t MAX_BITS_PER_COLOR_COMPONENT:2;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
uint8_t MAX_ENCODED_LINK_BW_SUPPORT:3;
uint8_t SOURCE_CONTROL_MODE_SUPPORT:1;
uint8_t CONCURRENT_LINK_BRING_UP_SEQ_SUPPORT:1;
uint8_t RESERVED:1;
-#else
- uint8_t RESERVED:6;
-#endif
} bits;
uint8_t raw;
};
@@ -407,7 +403,6 @@ union dwnstream_port_caps_byte3_hdmi {
uint8_t raw;
};
-#if defined(CONFIG_DRM_AMD_DC_DCN)
union hdmi_sink_encoded_link_bw_support {
struct {
uint8_t HDMI_SINK_ENCODED_LINK_BW_SUPPORT:3;
@@ -429,7 +424,6 @@ union hdmi_encoded_link_bw {
} bits;
uint8_t raw;
};
-#endif
/*4-byte structure for detailed capabilities of a down-stream port
(DP-to-TMDS converter).*/
@@ -509,7 +503,11 @@ union down_spread_ctrl {
1 = Main link signal is downspread <= 0.5%
with frequency in the range of 30kHz ~ 33kHz*/
uint8_t SPREAD_AMP:1;
- uint8_t RESERVED2:2;/*Bit 6:5 = RESERVED. Read all 0s*/
+ uint8_t RESERVED2:1;/*Bit 5 = RESERVED. Read all 0s*/
+ /* Bit 6 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE.
+ 0 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE is not enabled by the Source device (default)
+ 1 = FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE is enabled by Source device */
+ uint8_t FIXED_VTOTAL_AS_SDP_EN_IN_PR_ACTIVE:1;
/*Bit 7 = MSA_TIMING_PAR_IGNORE_EN
0 = Source device will send valid data for the MSA Timing Params
1 = Source device may send invalid data for these MSA Timing Params*/
@@ -865,6 +863,21 @@ struct psr_caps {
unsigned int psr_power_opt_flag;
};
+union dpcd_dprx_feature_enumeration_list_cont_1 {
+ struct {
+ uint8_t ADAPTIVE_SYNC_SDP_SUPPORT:1;
+ uint8_t AS_SDP_FIRST_HALF_LINE_OR_3840_PIXEL_CYCLE_WINDOW_NOT_SUPPORTED: 1;
+ uint8_t RESERVED0: 2;
+ uint8_t VSC_EXT_SDP_VER1_SUPPORT: 1;
+ uint8_t RESERVED1: 3;
+ } bits;
+ uint8_t raw;
+};
+
+struct adaptive_sync_caps {
+ union dpcd_dprx_feature_enumeration_list_cont_1 dp_adap_sync_caps;
+};
+
/* Length of router topology ID read from DPCD in bytes. */
#define DPCD_USB4_TOPOLOGY_ID_LEN 5
@@ -926,6 +939,9 @@ struct dpcd_usb4_dp_tunneling_info {
#ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL
#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216
#endif
+#ifndef DP_LINK_SQUARE_PATTERN
+#define DP_LINK_SQUARE_PATTERN 0x10F
+#endif
#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX
#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217
#endif
@@ -973,6 +989,9 @@ struct dpcd_usb4_dp_tunneling_info {
/* TODO - Use DRM header to replace above once available */
#endif // DP_INTRA_HOP_AUX_REPLY_INDICATION
+#ifndef DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE
+#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
+#endif
union dp_main_line_channel_coding_cap {
struct {
uint8_t DP_8b_10b_SUPPORTED :1;
@@ -1107,4 +1126,139 @@ struct edp_psr_info {
uint8_t force_psrsu_cap;
};
+struct dprx_states {
+ bool cable_id_written;
+};
+
+enum dpcd_downstream_port_max_bpc {
+ DOWN_STREAM_MAX_8BPC = 0,
+ DOWN_STREAM_MAX_10BPC,
+ DOWN_STREAM_MAX_12BPC,
+ DOWN_STREAM_MAX_16BPC
+};
+
+enum link_training_offset {
+ DPRX = 0,
+ LTTPR_PHY_REPEATER1 = 1,
+ LTTPR_PHY_REPEATER2 = 2,
+ LTTPR_PHY_REPEATER3 = 3,
+ LTTPR_PHY_REPEATER4 = 4,
+ LTTPR_PHY_REPEATER5 = 5,
+ LTTPR_PHY_REPEATER6 = 6,
+ LTTPR_PHY_REPEATER7 = 7,
+ LTTPR_PHY_REPEATER8 = 8
+};
+
+#define MAX_REPEATER_CNT 8
+
+struct dc_lttpr_caps {
+ union dpcd_rev revision;
+ uint8_t mode;
+ uint8_t max_lane_count;
+ uint8_t max_link_rate;
+ uint8_t phy_repeater_cnt;
+ uint8_t max_ext_timeout;
+ union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
+ union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
+ uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
+};
+
+struct dc_dongle_dfp_cap_ext {
+ bool supported;
+ uint16_t max_pixel_rate_in_mps;
+ uint16_t max_video_h_active_width;
+ uint16_t max_video_v_active_height;
+ struct dp_encoding_format_caps encoding_format_caps;
+ struct dp_color_depth_caps rgb_color_depth_caps;
+ struct dp_color_depth_caps ycbcr444_color_depth_caps;
+ struct dp_color_depth_caps ycbcr422_color_depth_caps;
+ struct dp_color_depth_caps ycbcr420_color_depth_caps;
+};
+
+struct dc_dongle_caps {
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+ bool extendedCapValid;
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ bool is_dp_hdmi_s3d_converter;
+ bool is_dp_hdmi_ycbcr422_pass_through;
+ bool is_dp_hdmi_ycbcr420_pass_through;
+ bool is_dp_hdmi_ycbcr422_converter;
+ bool is_dp_hdmi_ycbcr420_converter;
+ uint32_t dp_hdmi_max_bpc;
+ uint32_t dp_hdmi_max_pixel_clk_in_khz;
+ uint32_t dp_hdmi_frl_max_link_bw_in_kbps;
+ struct dc_dongle_dfp_cap_ext dfp_cap_ext;
+};
+
+struct dpcd_caps {
+ union dpcd_rev dpcd_rev;
+ union max_lane_count max_ln_count;
+ union max_down_spread max_down_spread;
+ union dprx_feature dprx_feature;
+
+ /* valid only for eDP v1.4 or higher*/
+ uint8_t edp_supported_link_rates_count;
+ enum dc_link_rate edp_supported_link_rates[8];
+
+ /* dongle type (DP converter, CV smart dongle) */
+ enum display_dongle_type dongle_type;
+ bool is_dongle_type_one;
+ /* branch device or sink device */
+ bool is_branch_dev;
+ /* Dongle's downstream count. */
+ union sink_count sink_count;
+ bool is_mst_capable;
+ /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
+ indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
+ struct dc_dongle_caps dongle_caps;
+
+ uint32_t sink_dev_id;
+ int8_t sink_dev_id_str[6];
+ int8_t sink_hw_revision;
+ int8_t sink_fw_revision[2];
+
+ uint32_t branch_dev_id;
+ int8_t branch_dev_name[6];
+ int8_t branch_hw_revision;
+ int8_t branch_fw_revision[2];
+
+ bool allow_invalid_MSA_timing_param;
+ bool panel_mode_edp;
+ bool dpcd_display_control_capable;
+ bool ext_receiver_cap_field_present;
+ bool set_power_state_capable_edp;
+ bool dynamic_backlight_capable_edp;
+ union dpcd_fec_capability fec_cap;
+ struct dpcd_dsc_capabilities dsc_caps;
+ struct dc_lttpr_caps lttpr_caps;
+ struct adaptive_sync_caps adaptive_sync_caps;
+ struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info;
+
+ union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates;
+ union dp_main_line_channel_coding_cap channel_coding_cap;
+ union dp_sink_video_fallback_formats fallback_formats;
+ union dp_fec_capability1 fec_cap1;
+ union dp_cable_id cable_id;
+ uint8_t edp_rev;
+ union edp_alpm_caps alpm_caps;
+ struct edp_psr_info psr_info;
+};
+
+union dpcd_sink_ext_caps {
+ struct {
+ /* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode
+ * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode.
+ */
+ uint8_t sdr_aux_backlight_control : 1;
+ uint8_t hdr_aux_backlight_control : 1;
+ uint8_t reserved_1 : 2;
+ uint8_t oled : 1;
+ uint8_t reserved_2 : 1;
+ uint8_t miniled : 1;
+ uint8_t reserved : 1;
+ } bits;
+ uint8_t raw;
+};
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
new file mode 100644
index 000000000000..c364744b4c83
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_hdmi_types.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef DC_HDMI_TYPES_H
+#define DC_HDMI_TYPES_H
+
+#include "os_types.h"
+
+/* Address range from 0x00 to 0x1F.*/
+#define DP_ADAPTOR_TYPE2_SIZE 0x20
+#define DP_ADAPTOR_TYPE2_REG_ID 0x10
+#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D
+/* Identifies adaptor as Dual-mode adaptor */
+#define DP_ADAPTOR_TYPE2_ID 0xA0
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25
+/* kHZ*/
+#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
+/* kHZ*/
+#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
+
+struct dp_hdmi_dongle_signature_data {
+ int8_t id[15];/* "DP-HDMI ADAPTOR"*/
+ uint8_t eot;/* end of transmition '\x4' */
+};
+
+/* DP-HDMI dongle slave address for retrieving dongle signature*/
+#define DP_HDMI_DONGLE_ADDRESS 0x40
+static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
+#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
+
+
+/* SCDC Address defines (HDMI 2.0)*/
+#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
+#define HDMI_SCDC_ADDRESS 0x54
+#define HDMI_SCDC_SINK_VERSION 0x01
+#define HDMI_SCDC_SOURCE_VERSION 0x02
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_CONFIG_1 0x31
+#define HDMI_SCDC_SOURCE_TEST_REQ 0x35
+#define HDMI_SCDC_STATUS_FLAGS 0x40
+#define HDMI_SCDC_ERR_DETECT 0x50
+#define HDMI_SCDC_TEST_CONFIG 0xC0
+
+#define HDMI_SCDC_MANUFACTURER_OUI 0xD0
+#define HDMI_SCDC_DEVICE_ID 0xDB
+
+union hdmi_scdc_update_read_data {
+ uint8_t byte[2];
+ struct {
+ uint8_t STATUS_UPDATE:1;
+ uint8_t CED_UPDATE:1;
+ uint8_t RR_TEST:1;
+ uint8_t RESERVED:5;
+ uint8_t RESERVED2:8;
+ } fields;
+};
+
+union hdmi_scdc_status_flags_data {
+ uint8_t byte;
+ struct {
+ uint8_t CLOCK_DETECTED:1;
+ uint8_t CH0_LOCKED:1;
+ uint8_t CH1_LOCKED:1;
+ uint8_t CH2_LOCKED:1;
+ uint8_t RESERVED:4;
+ } fields;
+};
+
+union hdmi_scdc_ced_data {
+ uint8_t byte[11];
+ struct {
+ uint8_t CH0_8LOW:8;
+ uint8_t CH0_7HIGH:7;
+ uint8_t CH0_VALID:1;
+ uint8_t CH1_8LOW:8;
+ uint8_t CH1_7HIGH:7;
+ uint8_t CH1_VALID:1;
+ uint8_t CH2_8LOW:8;
+ uint8_t CH2_7HIGH:7;
+ uint8_t CH2_VALID:1;
+ uint8_t CHECKSUM:8;
+ uint8_t RESERVED:8;
+ uint8_t RESERVED2:8;
+ uint8_t RESERVED3:8;
+ uint8_t RESERVED4:4;
+ } fields;
+};
+
+union hdmi_scdc_manufacturer_OUI_data {
+ uint8_t byte[3];
+ struct {
+ uint8_t Manufacturer_OUI_1:8;
+ uint8_t Manufacturer_OUI_2:8;
+ uint8_t Manufacturer_OUI_3:8;
+ } fields;
+};
+
+union hdmi_scdc_device_id_data {
+ uint8_t byte;
+ struct {
+ uint8_t Hardware_Minor_Rev:4;
+ uint8_t Hardware_Major_Rev:4;
+ } fields;
+};
+
+#endif /* DC_HDMI_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 848db8676adf..cc3d6fb39364 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -797,6 +797,29 @@ enum dc_timing_3d_format {
TIMING_3D_FORMAT_MAX,
};
+#define DC_DSC_QP_SET_SIZE 15
+#define DC_DSC_RC_BUF_THRESH_SIZE 14
+struct dc_dsc_rc_params_override {
+ int32_t rc_model_size;
+ int32_t rc_buf_thresh[DC_DSC_RC_BUF_THRESH_SIZE];
+ int32_t rc_minqp[DC_DSC_QP_SET_SIZE];
+ int32_t rc_maxqp[DC_DSC_QP_SET_SIZE];
+ int32_t rc_offset[DC_DSC_QP_SET_SIZE];
+
+ int32_t rc_tgt_offset_hi;
+ int32_t rc_tgt_offset_lo;
+ int32_t rc_edge_factor;
+ int32_t rc_quant_incr_limit0;
+ int32_t rc_quant_incr_limit1;
+
+ int32_t initial_fullness_offset;
+ int32_t initial_delay;
+
+ int32_t flatness_min_qp;
+ int32_t flatness_max_qp;
+ int32_t flatness_det_thresh;
+};
+
struct dc_dsc_config {
uint32_t num_slices_h; /* Number of DSC slices - horizontal */
uint32_t num_slices_v; /* Number of DSC slices - vertical */
@@ -811,6 +834,7 @@ struct dc_dsc_config {
#endif
bool is_dp; /* indicate if DSC is applied based on DP's capability */
uint32_t mst_pbn; /* pbn of display on dsc mst hub */
+ const struct dc_dsc_rc_params_override *rc_params_ovrd; /* DM owned memory. If not NULL, apply custom dsc rc params */
};
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 2e18bcf6b11a..cecd807f5ed8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -31,6 +31,7 @@
#include "grph_object_defs.h"
struct link_resource;
+enum aux_return_code_type;
enum dc_link_fec_state {
dc_link_fec_not_ready,
@@ -38,15 +39,6 @@ enum dc_link_fec_state {
dc_link_fec_enabled
};
-struct dc_link_status {
- bool link_active;
- struct dpcd_caps *dpcd_caps;
-};
-
-struct dprx_states {
- bool cable_id_written;
-};
-
/* DP MST stream allocation (payload bandwidth number) */
struct link_mst_stream_allocation {
/* DIG front */
@@ -101,6 +93,7 @@ struct psr_settings {
bool psr_allow_active; // PSR is currently active
enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD
bool psr_vtotal_control_support; // Vtotal control is supported by sink
+ unsigned long long psr_dirty_rects_change_timestamp_ns; // for delay of enabling PSR-SU
/* These parameters are calculated in Driver,
* based on display timing and Sink capabilities.
@@ -158,13 +151,15 @@ struct dc_panel_config {
struct dc_dpia_bw_alloc {
int sink_verified_bw; // The Verified BW that sink can allocated and use that has been verified already
int sink_allocated_bw; // The Actual Allocated BW that sink currently allocated
- int padding_bw; // The Padding "Un-used" BW allocated by CM for padding reasons
int sink_max_bw; // The Max BW that sink can require/support
int estimated_bw; // The estimated available BW for this DPIA
int bw_granularity; // BW Granularity
bool bw_alloc_enabled; // The BW Alloc Mode Support is turned ON for all 3: DP-Tx & Dpia & CM
+ bool response_ready; // Response ready from the CM side
};
+#define MAX_SINKS_PER_LINK 4
+
/*
* A link contains one or more sinks and their connected status.
* The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -279,6 +274,7 @@ struct dc_link {
bool dp_keep_receiver_powered;
bool dp_skip_DID2;
bool dp_skip_reset_segment;
+ bool dp_skip_fs_144hz;
bool dp_mot_reset_segment;
/* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
bool dpia_mst_dsc_always_on;
@@ -293,11 +289,12 @@ struct dc_link {
struct gpio *hpd_gpio;
enum dc_link_fec_state fec_state;
+ bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly
+
struct dc_panel_config panel_config;
struct phy_state phy_state;
};
-const struct dc_link_status *dc_link_get_status(const struct dc_link *dc_link);
/**
* dc_get_link_at_index() - Return an enumerated dc_link.
@@ -335,15 +332,17 @@ static inline bool dc_get_edp_link_panel_inst(const struct dc *dc,
unsigned int *inst_out)
{
struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num;
+ int edp_num, i;
+ *inst_out = 0;
if (link->connector_signal != SIGNAL_TYPE_EDP)
return false;
get_edp_links(dc, edp_links, &edp_num);
- if ((edp_num > 1) && (link->link_index > edp_links[0]->link_index))
- *inst_out = 1;
- else
- *inst_out = 0;
+ for (i = 0; i < edp_num; i++) {
+ if (link == edp_links[i])
+ break;
+ (*inst_out)++;
+ }
return true;
}
@@ -365,11 +364,6 @@ bool dc_link_get_backlight_level_nits(struct dc_link *link,
uint32_t *backlight_millinits,
uint32_t *backlight_millinits_peak);
-bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable);
-
-bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits);
-bool dc_link_set_default_brightness_aux(struct dc_link *link);
-
int dc_link_get_backlight_level(const struct dc_link *dc_link);
int dc_link_get_target_backlight_pwm(const struct dc_link *link);
@@ -383,38 +377,7 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
const struct dc_stream_state *stream, struct psr_config *psr_config,
struct psr_context *psr_context);
-bool dc_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
-
-void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency);
-
-void dc_link_blank_all_dp_displays(struct dc *dc);
-void dc_link_blank_all_edp_displays(struct dc *dc);
-
-void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init);
-bool dc_link_set_sink_vtotal_in_psr_active(const struct dc_link *link,
- uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
-
-/* Request DC to detect if there is a Panel connected.
- * boot - If this call is during initial boot.
- * Return false for any type of detection failure or MST detection
- * true otherwise. True meaning further action is required (status update
- * and OS notification).
- */
-enum dc_detect_reason {
- DETECT_REASON_BOOT,
- DETECT_REASON_RESUMEFROMS3S4,
- DETECT_REASON_HPD,
- DETECT_REASON_HPDRX,
- DETECT_REASON_FALLBACK,
- DETECT_REASON_RETRAIN,
- DETECT_REASON_TDR,
-};
-
-bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
-enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
-enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
-enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
@@ -436,7 +399,11 @@ bool dc_link_wait_for_t12(struct dc_link *link);
void dc_link_dp_handle_automated_test(struct dc_link *link);
void dc_link_dp_handle_link_loss(struct dc_link *link);
bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
-
+bool dc_link_check_link_loss_status(struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data);
+enum dc_status dc_link_dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data);
struct dc_sink_init_data;
struct dc_sink *dc_link_add_remote_sink(
@@ -451,36 +418,6 @@ void dc_link_remove_remote_sink(
/* Used by diagnostics for virtual link at the moment */
-void dc_link_dp_set_drive_settings(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct link_training_settings *lt_settings);
-
-bool dc_link_dp_perform_link_training_skip_aux(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_setting);
-
-enum link_training_result dc_link_dp_perform_link_training(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_settings,
- bool skip_video_pattern);
-
-bool dc_link_dp_sync_lt_begin(struct dc_link *link);
-
-enum link_training_result dc_link_dp_sync_lt_attempt(
- struct dc_link *link,
- const struct link_resource *link_res,
- struct dc_link_settings *link_setting,
- struct dc_link_training_overrides *lt_settings);
-
-bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down);
-
-void dc_link_dp_enable_hpd(const struct dc_link *link);
-
-void dc_link_dp_disable_hpd(const struct dc_link *link);
-
bool dc_link_dp_set_test_pattern(
struct dc_link *link,
enum dp_test_pattern test_pattern,
@@ -491,19 +428,28 @@ bool dc_link_dp_set_test_pattern(
bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap);
+/**
+ *****************************************************************************
+ * Function: dc_link_enable_hpd_filter
+ *
+ * @brief
+ * If enable is true, programs HPD filter on associated HPD line to default
+ * values dependent on link->connector_signal
+ *
+ * If enable is false, programs HPD filter on associated HPD line with no
+ * delays on connect or disconnect
+ *
+ * @param [in] link: pointer to the dc link
+ * @param [in] enable: boolean specifying whether to enable hbd
+ *****************************************************************************
+ */
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
bool dc_link_is_dp_sink_present(struct dc_link *link);
-
-bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
/*
* DPCD access interfaces
*/
-#ifdef CONFIG_DRM_AMD_DC_HDCP
-bool dc_link_is_hdcp14(struct dc_link *link, enum signal_type signal);
-bool dc_link_is_hdcp22(struct dc_link *link, enum signal_type signal);
-#endif
void dc_link_set_drive_settings(struct dc *dc,
struct link_training_settings *lt_settings,
const struct dc_link *link);
@@ -523,9 +469,6 @@ void dc_link_set_test_pattern(struct dc_link *link,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
-uint32_t dc_link_bandwidth_kbps(
- const struct dc_link *link,
- const struct dc_link_settings *link_setting);
const struct dc_link_settings *dc_link_get_link_cap(
const struct dc_link *link);
@@ -547,25 +490,16 @@ bool dc_submit_i2c_oem(
struct dc *dc,
struct i2c_command *cmd);
-uint32_t dc_bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing);
-
bool dc_link_is_fec_supported(const struct dc_link *link);
bool dc_link_should_enable_fec(const struct dc_link *link);
uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw);
enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);
-void dc_link_get_cur_link_res(const struct dc_link *link,
- struct link_resource *link_res);
/* take a snapshot of current link resource allocation state */
void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map);
/* restore link resource allocation state from a snapshot */
void dc_restore_link_res_map(const struct dc *dc, uint32_t *map);
-void dc_link_clear_dprx_states(struct dc_link *link);
-struct gpio *get_hpd_gpio(struct dc_bios *dcb,
- struct graphics_object_id link_id,
- struct gpio_service *gpio_service);
void dp_trace_reset(struct dc_link *link);
bool dc_dp_trace_is_initialized(struct dc_link *link);
unsigned long long dc_dp_trace_get_lt_end_timestamp(struct dc_link *link,
@@ -579,6 +513,65 @@ struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection);
unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
-/* Destruct the mst topology of the link and reset the allocated payload table */
-bool reset_cur_dp_mst_topology(struct dc_link *link);
+/* Attempt to transfer the given aux payload. This function does not perform
+ * retries or handle error states. The reply is returned in the payload->reply
+ * and the result through operation_result. Returns the number of bytes
+ * transferred,or -1 on a failure.
+ */
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result);
+
+enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting);
+void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
+bool dc_link_decide_edp_link_settings(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint32_t req_bw);
+void dc_link_edp_panel_backlight_power_on(struct dc_link *link,
+ bool wait_for_hpd);
+
+/*
+ * USB4 DPIA BW ALLOCATION PUBLIC FUNCTIONS
+ */
+/*
+ * Send a request from DP-Tx requesting to allocate BW remotely after
+ * allocating it locally. This will get processed by CM and a CB function
+ * will be called.
+ *
+ * @link: pointer to the dc_link struct instance
+ * @req_bw: The requested bw in Kbyte to allocated
+ *
+ * return: none
+ */
+void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw);
+
+/*
+ * CB function for when the status of the Req above is complete. We will
+ * find out the result of allocating on CM and update structs accordingly
+ *
+ * @link: pointer to the dc_link struct instance
+ * @bw: Allocated or Estimated BW depending on the result
+ * @result: Response type
+ *
+ * return: none
+ */
+void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result);
+
+/*
+ * Handle the USB4 BW Allocation related functionality here:
+ * Plug => Try to allocate max bw from timing parameters supported by the sink
+ * Unplug => de-allocate bw
+ *
+ * @link: pointer to the dc_link struct instance
+ * @peak_bw: Peak bw used by the link/sink
+ *
+ * return: allocated bw else return 0
+ */
+int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw);
+
+/* TODO: this is not meant to be exposed to DM. Should switch to stream update
+ * interface i.e stream_update->dsc_config
+ */
+bool dc_link_update_dsc_config(struct pipe_ctx *pipe_ctx);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index dfd3df1d2f7e..567452599659 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -190,6 +190,7 @@ struct dc_stream_state {
struct dc_info_packet vsp_infopacket;
struct dc_info_packet hfvsif_infopacket;
struct dc_info_packet vtem_infopacket;
+ struct dc_info_packet adaptive_sync_infopacket;
uint8_t dsc_packed_pps[128];
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -313,6 +314,7 @@ struct dc_stream_update {
struct dc_info_packet *vsp_infopacket;
struct dc_info_packet *hfvsif_infopacket;
struct dc_info_packet *vtem_infopacket;
+ struct dc_info_packet *adaptive_sync_infopacket;
bool *dpms_off;
bool integer_scaling_update;
bool *allow_freesync;
@@ -543,9 +545,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,
unsigned int *nom_v_pos);
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-bool dc_stream_forward_crc_window(struct dc *dc,
+bool dc_stream_forward_crc_window(struct dc_stream_state *stream,
struct rect *rect,
- struct dc_stream_state *stream,
bool is_stop);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index dc78e2404b48..27d0242d6cbd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -32,7 +32,9 @@
#include "os_types.h"
#include "fixed31_32.h"
#include "irq_types.h"
+#include "dc_ddc_types.h"
#include "dc_dp_types.h"
+#include "dc_hdmi_types.h"
#include "dc_hw_types.h"
#include "dal_types.h"
#include "grph_object_defs.h"
@@ -82,13 +84,8 @@ struct dc_perf_trace {
unsigned long last_entry_write;
};
-#define DC_MAX_EDID_BUFFER_SIZE 2048
-#define DC_EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
-#define MAX_REPEATER_CNT 8
-
-#include "dc_ddc_types.h"
enum tiling_mode {
TILING_MODE_INVALID,
@@ -374,66 +371,6 @@ struct dc_csc_adjustments {
struct fixed31_32 hue;
};
-enum dpcd_downstream_port_max_bpc {
- DOWN_STREAM_MAX_8BPC = 0,
- DOWN_STREAM_MAX_10BPC,
- DOWN_STREAM_MAX_12BPC,
- DOWN_STREAM_MAX_16BPC
-};
-
-
-enum link_training_offset {
- DPRX = 0,
- LTTPR_PHY_REPEATER1 = 1,
- LTTPR_PHY_REPEATER2 = 2,
- LTTPR_PHY_REPEATER3 = 3,
- LTTPR_PHY_REPEATER4 = 4,
- LTTPR_PHY_REPEATER5 = 5,
- LTTPR_PHY_REPEATER6 = 6,
- LTTPR_PHY_REPEATER7 = 7,
- LTTPR_PHY_REPEATER8 = 8
-};
-
-struct dc_lttpr_caps {
- union dpcd_rev revision;
- uint8_t mode;
- uint8_t max_lane_count;
- uint8_t max_link_rate;
- uint8_t phy_repeater_cnt;
- uint8_t max_ext_timeout;
- union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
- union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
- uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
-};
-
-struct dc_dongle_dfp_cap_ext {
- bool supported;
- uint16_t max_pixel_rate_in_mps;
- uint16_t max_video_h_active_width;
- uint16_t max_video_v_active_height;
- struct dp_encoding_format_caps encoding_format_caps;
- struct dp_color_depth_caps rgb_color_depth_caps;
- struct dp_color_depth_caps ycbcr444_color_depth_caps;
- struct dp_color_depth_caps ycbcr422_color_depth_caps;
- struct dp_color_depth_caps ycbcr420_color_depth_caps;
-};
-
-struct dc_dongle_caps {
- /* dongle type (DP converter, CV smart dongle) */
- enum display_dongle_type dongle_type;
- bool extendedCapValid;
- /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
- indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/
- bool is_dp_hdmi_s3d_converter;
- bool is_dp_hdmi_ycbcr422_pass_through;
- bool is_dp_hdmi_ycbcr420_pass_through;
- bool is_dp_hdmi_ycbcr422_converter;
- bool is_dp_hdmi_ycbcr420_converter;
- uint32_t dp_hdmi_max_bpc;
- uint32_t dp_hdmi_max_pixel_clk_in_khz;
- uint32_t dp_hdmi_frl_max_link_bw_in_kbps;
- struct dc_dongle_dfp_cap_ext dfp_cap_ext;
-};
/* Scaling format */
enum scaling_transformation {
SCALING_TRANSFORMATION_UNINITIALIZED,
@@ -690,6 +627,7 @@ struct psr_config {
uint8_t su_y_granularity;
unsigned int line_time_in_us;
uint8_t rate_control_caps;
+ uint16_t dsc_slice_height;
};
union dmcu_psr_level {
@@ -801,6 +739,7 @@ struct psr_context {
uint8_t su_y_granularity;
unsigned int line_time_in_us;
uint8_t rate_control_caps;
+ uint16_t dsc_slice_height;
};
struct colorspace_transform {
@@ -1000,4 +939,47 @@ struct otg_phy_mux {
};
#endif
+enum dc_detect_reason {
+ DETECT_REASON_BOOT,
+ DETECT_REASON_RESUMEFROMS3S4,
+ DETECT_REASON_HPD,
+ DETECT_REASON_HPDRX,
+ DETECT_REASON_FALLBACK,
+ DETECT_REASON_RETRAIN,
+ DETECT_REASON_TDR,
+};
+
+struct dc_link_status {
+ bool link_active;
+ struct dpcd_caps *dpcd_caps;
+};
+
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+union hdcp_rx_caps {
+ struct {
+ uint8_t version;
+ uint8_t reserved;
+ struct {
+ uint8_t repeater : 1;
+ uint8_t hdcp_capable : 1;
+ uint8_t reserved : 6;
+ } byte0;
+ } fields;
+ uint8_t raw[3];
+};
+
+union hdcp_bcaps {
+ struct {
+ uint8_t HDCP_CAPABLE:1;
+ uint8_t REPEATER:1;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+struct hdcp_caps {
+ union hdcp_rx_caps rx_caps;
+ union hdcp_bcaps bcaps;
+};
+#endif
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index e69f1899fbf0..c850ed49281f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -26,7 +26,7 @@
#ifndef __DAL_AUX_ENGINE_DCE110_H__
#define __DAL_AUX_ENGINE_DCE110_H__
-#include "i2caux_interface.h"
+#include "gpio_service_interface.h"
#include "inc/hw/aux_engine.h"
enum aux_return_code_type;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 09260c23c3bd..fa314493ffc5 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dce_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 2d3201b77d6a..1e2d2cbe2c37 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -417,6 +417,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->relock_delay_frame_cnt = 0;
if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
copy_settings_data->relock_delay_frame_cnt = 2;
+ copy_settings_data->dsc_slice_height = psr_context->dsc_slice_height;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 913a1fe6b3da..fb3fd5b7c78b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -46,7 +46,7 @@
#include "link_encoder.h"
#include "link_enc_cfg.h"
#include "link_hwss.h"
-#include "dc_link_dp.h"
+#include "link.h"
#include "dccg.h"
#include "clock_source.h"
#include "clk_mgr.h"
@@ -54,7 +54,6 @@
#include "audio.h"
#include "reg_helper.h"
#include "panel_cntl.h"
-#include "inc/link_dpcd.h"
#include "dpcd_defs.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
@@ -65,7 +64,6 @@
#include "dcn10/dcn10_hw_sequencer.h"
-#include "link/link_dp_trace.h"
#include "dce110_hw_sequencer.h"
#define GAMMA_HW_POINTS_NUM 256
@@ -653,10 +651,16 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
- else
+ else {
+ if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num)
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
+ }
}
void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
@@ -737,7 +741,7 @@ void dce110_edp_wait_for_hpd_ready(
/* obtain HPD */
/* TODO what to do with this? */
- hpd = get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
+ hpd = link_get_hpd_gpio(ctx->dc_bios, connector, ctx->gpio_service);
if (!hpd) {
BREAK_TO_DEBUGGER();
@@ -807,19 +811,19 @@ void dce110_edp_power_control(
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
+ link_dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
unsigned long long time_since_edp_poweron_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- dp_trace_get_edp_poweron_timestamp(link)), 1000000);
+ link_dp_trace_get_edp_poweron_timestamp(link)), 1000000);
DC_LOG_HW_RESUME_S3(
"%s: transition: power_up=%d current_ts=%llu edp_poweroff=%llu edp_poweron=%llu time_since_edp_poweroff_ms=%llu time_since_edp_poweron_ms=%llu",
__func__,
power_up,
current_ts,
- dp_trace_get_edp_poweroff_timestamp(link),
- dp_trace_get_edp_poweron_timestamp(link),
+ link_dp_trace_get_edp_poweroff_timestamp(link),
+ link_dp_trace_get_edp_poweron_timestamp(link),
time_since_edp_poweroff_ms,
time_since_edp_poweron_ms);
@@ -834,7 +838,7 @@ void dce110_edp_power_control(
link->panel_config.pps.extra_t12_ms;
/* Adjust remaining_min_edp_poweroff_time_ms if this is not the first time. */
- if (dp_trace_get_edp_poweroff_timestamp(link) != 0) {
+ if (link_dp_trace_get_edp_poweroff_timestamp(link) != 0) {
if (time_since_edp_poweroff_ms < remaining_min_edp_poweroff_time_ms)
remaining_min_edp_poweroff_time_ms =
remaining_min_edp_poweroff_time_ms - time_since_edp_poweroff_ms;
@@ -875,14 +879,16 @@ void dce110_edp_power_control(
if (ctx->dc->ctx->dmub_srv &&
ctx->dc->debug.dmub_command_table) {
- if (cntl.action == TRANSMITTER_CONTROL_POWER_ON)
+
+ if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) {
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_POWER_ON,
- panel_instance);
- else
+ panel_instance, link->link_powered_externally);
+ } else {
bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_POWER_OFF,
- panel_instance);
+ panel_instance, link->link_powered_externally);
+ }
}
bp_result = link_transmitter_control(ctx->dc_bios, &cntl);
@@ -892,13 +898,13 @@ void dce110_edp_power_control(
__func__, (power_up ? "On":"Off"),
bp_result);
- dp_trace_set_edp_power_timestamp(link, power_up);
+ link_dp_trace_set_edp_power_timestamp(link, power_up);
DC_LOG_HW_RESUME_S3(
"%s: updated values: edp_poweroff=%llu edp_poweron=%llu\n",
__func__,
- dp_trace_get_edp_poweroff_timestamp(link),
- dp_trace_get_edp_poweron_timestamp(link));
+ link_dp_trace_get_edp_poweroff_timestamp(link),
+ link_dp_trace_get_edp_poweron_timestamp(link));
if (bp_result != BP_RESULT_OK)
DC_LOG_ERROR(
@@ -926,14 +932,14 @@ void dce110_edp_wait_for_T12(
return;
if (!link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl) &&
- dp_trace_get_edp_poweroff_timestamp(link) != 0) {
+ link_dp_trace_get_edp_poweroff_timestamp(link) != 0) {
unsigned int t12_duration = 500; // Default T12 as per spec
unsigned long long current_ts = dm_get_timestamp(ctx);
unsigned long long time_since_edp_poweroff_ms =
div64_u64(dm_get_elapse_time_in_ns(
ctx,
current_ts,
- dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
+ link_dp_trace_get_edp_poweroff_timestamp(link)), 1000000);
t12_duration += link->panel_config.pps.extra_t12_ms; // Add extra T12
@@ -941,7 +947,6 @@ void dce110_edp_wait_for_T12(
msleep(t12_duration - time_since_edp_poweroff_ms);
}
}
-
/*todo: cloned in stream enc, fix*/
/*
* @brief
@@ -1015,21 +1020,25 @@ void dce110_edp_backlight_control(
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T7-ready.
*/
- edp_receiver_ready_T7(link);
+ link_edp_receiver_ready_T7(link);
else
DC_LOG_DC("edp_receiver_ready_T7 skipped\n");
}
+ /* Setting link_powered_externally will bypass delays in the backlight
+ * as they are not required if the link is being powered by a different
+ * source.
+ */
if (ctx->dc->ctx->dmub_srv &&
ctx->dc->debug.dmub_command_table) {
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_LCD_BLON,
- panel_instance);
+ panel_instance, link->link_powered_externally);
else
ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios,
LVTMA_CONTROL_LCD_BLOFF,
- panel_instance);
+ panel_instance, link->link_powered_externally);
}
link_transmitter_control(ctx->dc_bios, &cntl);
@@ -1042,7 +1051,7 @@ void dce110_edp_backlight_control(
if (link->dpcd_sink_ext_caps.bits.oled ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)
- dc_link_backlight_enable_aux(link, enable);
+ link_backlight_enable_aux(link, enable);
/*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF) {
@@ -1054,7 +1063,7 @@ void dce110_edp_backlight_control(
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T9-ready.
*/
- edp_add_delay_for_T9(link);
+ link_edp_add_delay_for_T9(link);
else
DC_LOG_DC("edp_receiver_ready_T9 skipped\n");
}
@@ -1142,6 +1151,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_link *link = stream->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ struct dccg *dccg = dc->res_pool->dccg;
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct dtbclk_dto_params dto_params = {0};
+ int dp_hpo_inst;
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
@@ -1150,7 +1163,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc);
}
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link_is_dp_128b_132b_signal(pipe_ctx)) {
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets(
pipe_ctx->stream_res.hpo_dp_stream_enc);
} else if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1161,7 +1174,16 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
link_hwss->reset_stream_encoder(pipe_ctx);
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ dto_params.otg_inst = tg->inst;
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst);
+ }
+
+ if (link_is_dp_128b_132b_signal(pipe_ctx)) {
/* TODO: This looks like a bug to me as we are disabling HPO IO when
* we are just disabling a single HPO stream. Shouldn't we disable HPO
* HW control only when HPOs for all streams are disabled?
@@ -1203,7 +1225,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link_is_dp_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank(
pipe_ctx->stream_res.hpo_dp_stream_enc);
@@ -1225,7 +1247,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
* we shouldn't be doing power-sequencing, hence we can skip
* waiting for T9-ready.
*/
- edp_receiver_ready_T9(link);
+ link_edp_receiver_ready_T9(link);
}
}
}
@@ -1408,7 +1430,7 @@ static enum dc_status dce110_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -1512,7 +1534,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
* To do so, move calling function enable_stream_timing to only be done AFTER calling
* function core_link_enable_stream
*/
- if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)))
+ if (!(hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)))
/* */
/* Do not touch stream timing on seamless boot optimization. */
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
@@ -1544,17 +1566,17 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->inst);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
if (!stream->dpms_off)
- core_link_enable_stream(context, pipe_ctx);
+ link_set_dpms_on(context, pipe_ctx);
/* DCN3.1 FPGA Workaround
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
* To do so, move calling function enable_stream_timing to only be done AFTER calling
* function core_link_enable_stream
*/
- if (hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)) {
+ if (hws->wa.dp_hpo_and_otg_sequence && link_is_dp_128b_132b_signal(pipe_ctx)) {
if (!pipe_ctx->stream->apply_seamless_boot_optimization)
hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
}
@@ -1580,7 +1602,7 @@ static void power_down_encoders(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
- dc_link_blank_dp_stream(dc->links[i], false);
+ link_blank_dp_stream(dc->links[i], false);
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
@@ -2063,7 +2085,7 @@ static void dce110_reset_hw_ctx_wrap(
* disabled already, no need to disable again.
*/
if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off) {
- core_link_disable_stream(pipe_ctx_old);
+ link_set_dpms_off(pipe_ctx_old);
/* free acquired resources*/
if (pipe_ctx_old->stream_res.audio) {
@@ -3034,13 +3056,13 @@ void dce110_enable_dp_link_output(
pipes[i].clock_source->funcs->program_pix_clk(
pipes[i].clock_source,
&pipes[i].stream_res.pix_clk_params,
- dp_get_link_encoding_format(link_settings),
+ link_dp_get_encoding_format(link_settings),
&pipes[i].pll_settings);
}
}
}
- if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
if (dc->clk_mgr->funcs->notify_link_rate_change)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
}
@@ -3057,7 +3079,7 @@ void dce110_enable_dp_link_output(
if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
}
void dce110_disable_link_output(struct dc_link *link,
@@ -3082,7 +3104,7 @@ void dce110_disable_link_output(struct dc_link *link,
link->dc->hwss.edp_power_control(link, false);
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->unlock_phy(dmcu);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
static const struct hw_sequencer_funcs dce110_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 758f4b3b0087..394d83a97f33 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -71,7 +71,7 @@ void dce110_optimize_bandwidth(
struct dc *dc,
struct dc_state *context);
-void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on);
void dce110_edp_power_control(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index f607a0e28f14..f62368da875d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -581,7 +581,7 @@ static void dpp1_dscl_set_manual_ratio_init(
* dpp1_dscl_set_recout - Set the first pixel of RECOUT in the OTG active area
*
* @dpp: DPP data struct
- * @recount: Rectangle information
+ * @recout: Rectangle information
*
* This function sets the MPC RECOUT_START and RECOUT_SIZE registers based on
* the values specified in the recount parameter.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index ba1c0621f0f8..e8752077571a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -172,6 +172,10 @@ struct dcn_hubbub_registers {
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_C;
uint32_t DCHUBBUB_ARB_FCLK_PSTATE_CHANGE_WATERMARK_D;
uint32_t SDPIF_REQUEST_RATE_LIMIT;
+ uint32_t DCHUBBUB_SDPIF_CFG0;
+ uint32_t DCHUBBUB_SDPIF_CFG1;
+ uint32_t DCHUBBUB_CLOCK_CNTL;
+ uint32_t DCHUBBUB_MEM_PWR_MODE_CTRL;
};
#define HUBBUB_REG_FIELD_LIST_DCN32(type) \
@@ -362,7 +366,13 @@ struct dcn_hubbub_registers {
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C;\
type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D;\
type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D;\
- type SDPIF_REQUEST_RATE_LIMIT
+ type SDPIF_REQUEST_RATE_LIMIT;\
+ type DISPCLK_R_DCHUBBUB_GATE_DIS;\
+ type DCFCLK_R_DCHUBBUB_GATE_DIS;\
+ type SDPIF_MAX_NUM_OUTSTANDING;\
+ type DCHUBBUB_ARB_MAX_REQ_OUTSTAND;\
+ type SDPIF_PORT_CONTROL;\
+ type DET_MEM_PWR_LS_MODE
struct dcn_hubbub_shift {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index fe2023f18b7d..a1a29c508394 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -45,7 +45,6 @@
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
#include "dcn10_cm_common.h"
-#include "dc_link_dp.h"
#include "dccg.h"
#include "clk_mgr.h"
#include "link_hwss.h"
@@ -56,8 +55,7 @@
#include "dce/dmub_hw_lock_mgr.h"
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
-#include "inc/dc_link_dp.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#define DC_LOGGER_INIT(logger)
@@ -921,7 +919,7 @@ enum dc_status dcn10_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -1019,7 +1017,7 @@ static void dcn10_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- core_link_disable_stream(pipe_ctx);
+ link_set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -1566,7 +1564,7 @@ void dcn10_init_hw(struct dc *dc)
}
/* we want to turn off all dp displays before doing detection */
- dc_link_blank_all_dp_displays(dc);
+ link_blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -2901,7 +2899,7 @@ void dcn10_blank_pixel_data(
dc->hwss.set_pipe(pipe_ctx);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
- } else if (blank) {
+ } else {
dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (stream_res->tg->funcs->set_blank) {
stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
@@ -3225,12 +3223,16 @@ static void dcn10_config_stereo_parameters(
timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
- enum display_dongle_type dongle = \
- stream->link->ddc->dongle_type;
- if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
- dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
- dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
- flags->DISABLE_STEREO_DP_SYNC = 1;
+
+ if (stream->link && stream->link->ddc) {
+ enum display_dongle_type dongle = \
+ stream->link->ddc->dongle_type;
+
+ if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
+ dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ flags->DISABLE_STEREO_DP_SYNC = 1;
+ }
}
flags->RIGHT_EYE_POLARITY =\
stream->timing.flags.RIGHT_EYE_3D_POLARITY;
@@ -3626,7 +3628,7 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
(int)hubp->curs_attr.width || pos_cpy.x
<= (int)hubp->curs_attr.width +
pipe_ctx->plane_state->src_rect.x) {
- pos_cpy.x = temp_x + viewport_width;
+ pos_cpy.x = 2 * viewport_width - temp_x;
}
}
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index fbccb7263ad2..c4287147b853 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dcn10_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 88ac5f6f4c96..0b37bb0e184b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -519,7 +519,8 @@ struct dcn_optc_registers {
type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;\
- type OTG_V_TOTAL_LAST_USED_BY_DRR;
+ type OTG_V_TOTAL_LAST_USED_BY_DRR;\
+ type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 484e7cdf00b8..3c451ab5d3ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -28,7 +28,7 @@
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dpcd_defs.h"
#include "dcn30/dcn30_afmt.h"
@@ -753,12 +753,19 @@ void enc1_stream_encoder_update_dp_info_packets(
* use other packetIndex (such as 5,6) for other info packet
*/
+ if (info_frame->adaptive_sync.valid)
+ enc1_update_generic_info_packet(
+ enc1,
+ 5, /* packetIndex */
+ &info_frame->adaptive_sync);
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, info_frame->adaptive_sync.valid);
/* This bit is the master enable bit.
* When enabling secondary stream engine,
@@ -926,7 +933,7 @@ void enc1_stream_encoder_dp_blank(
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
/* the encoder stops sending the video stream
* at the start of the vertical blanking.
@@ -945,7 +952,7 @@ void enc1_stream_encoder_dp_blank(
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
}
/* output video stream to link encoder */
@@ -1018,7 +1025,7 @@ void enc1_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
void enc1_stream_encoder_set_avmute(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 784a8b6f360d..42344aec60d6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -28,6 +28,7 @@
#include "reg_helper.h"
#include "dcn20_dsc.h"
#include "dsc/dscc_types.h"
+#include "dsc/rc_calc.h"
static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_config *pps);
static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
@@ -200,7 +201,6 @@ static void dsc2_set_config(struct display_stream_compressor *dsc, const struct
bool is_config_ok;
struct dcn20_dsc *dsc20 = TO_DCN20_DSC(dsc);
- DC_LOG_DSC(" ");
DC_LOG_DSC("Setting DSC Config at DSC inst %d", dsc->inst);
dsc_config_log(dsc, dsc_cfg);
is_config_ok = dsc_prepare_config(dsc_cfg, &dsc20->reg_vals, dsc_optc_cfg);
@@ -345,10 +345,38 @@ static void dsc_log_pps(struct display_stream_compressor *dsc, struct drm_dsc_co
}
}
+static void dsc_override_rc_params(struct rc_params *rc, const struct dc_dsc_rc_params_override *override)
+{
+ uint8_t i;
+
+ rc->rc_model_size = override->rc_model_size;
+ for (i = 0; i < DC_DSC_RC_BUF_THRESH_SIZE; i++)
+ rc->rc_buf_thresh[i] = override->rc_buf_thresh[i];
+ for (i = 0; i < DC_DSC_QP_SET_SIZE; i++) {
+ rc->qp_min[i] = override->rc_minqp[i];
+ rc->qp_max[i] = override->rc_maxqp[i];
+ rc->ofs[i] = override->rc_offset[i];
+ }
+
+ rc->rc_tgt_offset_hi = override->rc_tgt_offset_hi;
+ rc->rc_tgt_offset_lo = override->rc_tgt_offset_lo;
+ rc->rc_edge_factor = override->rc_edge_factor;
+ rc->rc_quant_incr_limit0 = override->rc_quant_incr_limit0;
+ rc->rc_quant_incr_limit1 = override->rc_quant_incr_limit1;
+
+ rc->initial_fullness_offset = override->initial_fullness_offset;
+ rc->initial_xmit_delay = override->initial_delay;
+
+ rc->flatness_min_qp = override->flatness_min_qp;
+ rc->flatness_max_qp = override->flatness_max_qp;
+ rc->flatness_det_thresh = override->flatness_det_thresh;
+}
+
static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_values *dsc_reg_vals,
struct dsc_optc_config *dsc_optc_cfg)
{
struct dsc_parameters dsc_params;
+ struct rc_params rc;
/* Validate input parameters */
ASSERT(dsc_cfg->dc_dsc_cfg.num_slices_h);
@@ -413,7 +441,12 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_
dsc_reg_vals->pps.native_420 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_NATIVE_YCBCR420);
dsc_reg_vals->pps.simple_422 = (dsc_reg_vals->pixel_format == DSC_PIXFMT_SIMPLE_YCBCR422);
- if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &dsc_params)) {
+ calc_rc_params(&rc, &dsc_reg_vals->pps);
+
+ if (dsc_cfg->dc_dsc_cfg.rc_params_ovrd)
+ dsc_override_rc_params(&rc, dsc_cfg->dc_dsc_cfg.rc_params_ovrd);
+
+ if (dscc_compute_dsc_parameters(&dsc_reg_vals->pps, &rc, &dsc_params)) {
dm_output_to_console("%s: DSC config failed\n", __func__);
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 6291a241158a..b83873a3a534 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -46,16 +46,15 @@
#include "dchubbub.h"
#include "reg_helper.h"
#include "dcn10/dcn10_cm_common.h"
-#include "dc_link_dp.h"
#include "vm_helper.h"
#include "dccg.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "hw_sequencer.h"
-#include "inc/link_dpcd.h"
#include "dpcd_defs.h"
#include "inc/link_enc_cfg.h"
#include "link_hwss.h"
+#include "link.h"
#define DC_LOGGER_INIT(logger)
@@ -582,6 +581,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.gsl_group != 0)
dcn20_setup_gsl_group_as_lock(dc, pipe_ctx, false);
+ if (hubp->funcs->hubp_update_mall_sel)
+ hubp->funcs->hubp_update_mall_sel(hubp, 0, false);
+
dc->hwss.set_flip_control_gsl(pipe_ctx, false);
hubp->funcs->hubp_clk_cntl(hubp, false);
@@ -605,6 +607,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
+ bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+ struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
+
DC_LOGGER_INIT(dc->ctx->logger);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
@@ -612,6 +617,12 @@ void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
dcn20_plane_atomic_disable(dc, pipe_ctx);
+ /* Turn back off the phantom OTG after the phantom plane is fully disabled
+ */
+ if (is_phantom)
+ if (tg && tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+
DC_LOG_DC("Power down front end %d\n",
pipe_ctx->pipe_idx);
}
@@ -700,7 +711,7 @@ enum dc_status dcn20_enable_stream_timing(
if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings)) {
BREAK_TO_DEBUGGER();
return DC_ERROR_UNEXPECTED;
@@ -1766,6 +1777,15 @@ static void dcn20_program_pipe(
&pipe_ctx->stream->bit_depth_params,
&pipe_ctx->stream->clamping);
}
+
+ /* Set ABM pipe after other pipe configurations done */
+ if (pipe_ctx->plane_state->visible) {
+ if (pipe_ctx->stream_res.abm) {
+ dc->hwss.set_pipe(pipe_ctx);
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
+ pipe_ctx->stream->abm_level);
+ }
+ }
}
void dcn20_program_front_end_for_ctx(
@@ -1803,6 +1823,20 @@ void dcn20_program_front_end_for_ctx(
dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
+ /* When disabling phantom pipes, turn on phantom OTG first (so we can get double
+ * buffer updates properly)
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
+
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
+ dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
+
+ if (tg->funcs->enable_crtc)
+ tg->funcs->enable_crtc(tg);
+ }
+ }
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable
@@ -1999,8 +2033,11 @@ void dcn20_prepare_bandwidth(
}
}
- /* program dchubbub watermarks */
- dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
+ /* program dchubbub watermarks:
+ * For assigning wm_optimized_required, use |= operator since we don't want
+ * to clear the value if the optimize has not happened yet
+ */
+ dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2359,7 +2396,7 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link_is_dp_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
pipe_ctx->stream_res.hpo_dp_stream_enc,
@@ -2412,7 +2449,7 @@ static void dcn20_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- core_link_disable_stream(pipe_ctx);
+ link_set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -2432,7 +2469,7 @@ static void dcn20_reset_back_end_for_pipe(
}
}
else if (pipe_ctx->stream_res.dsc) {
- dp_set_dsc_enable(pipe_ctx, false);
+ link_set_dsc_enable(pipe_ctx, false);
}
/* by upper caller loop, parent pipe: pipe0, will be reset last.
@@ -2615,6 +2652,37 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->mpcc_id = mpcc_id;
}
+static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
+{
+ switch (link->link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYD32CLKA;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYD32CLKB;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYD32CLKC;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYD32CLKD;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYD32CLKE;
+ default:
+ return PHYD32CLKA;
+ }
+}
+
+static int get_odm_segment_count(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+ int count = 1;
+
+ while (odm_pipe != NULL) {
+ count++;
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+
+ return count;
+}
+
void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
{
enum dc_lane_count lane_count =
@@ -2628,12 +2696,43 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
struct timing_generator *tg = pipe_ctx->stream_res.tg;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dtbclk_dto_params dto_params = {0};
+ struct dccg *dccg = dc->res_pool->dccg;
+ enum phyd32clk_clock_source phyd32clk;
+ int dp_hpo_inst;
+ struct dce_hwseq *hws = dc->hwseq;
+ unsigned int k1_div = PIXEL_RATE_DIV_NA;
+ unsigned int k2_div = PIXEL_RATE_DIV_NA;
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link_is_dp_128b_132b_signal(pipe_ctx)) {
if (dc->hwseq->funcs.setup_hpo_hw_control)
dc->hwseq->funcs.setup_hpo_hw_control(dc->hwseq, true);
}
+ if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ dp_hpo_inst = pipe_ctx->stream_res.hpo_dp_stream_enc->inst;
+ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, dp_hpo_inst);
+
+ phyd32clk = get_phyd32clk_src(link);
+ dccg->funcs->enable_symclk32_se(dccg, dp_hpo_inst, phyd32clk);
+
+ dto_params.otg_inst = tg->inst;
+ dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
+ dto_params.timing = &pipe_ctx->stream->timing;
+ dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
+ dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
+ }
+
+ if (hws->funcs.calculate_dccg_k1_k2_values && dc->res_pool->dccg->funcs->set_pixel_rate_div) {
+ hws->funcs.calculate_dccg_k1_k2_values(pipe_ctx, &k1_div, &k2_div);
+
+ dc->res_pool->dccg->funcs->set_pixel_rate_div(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.tg->inst,
+ k1_div, k2_div);
+ }
+
link_hwss->setup_stream_encoder(pipe_ctx);
if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
@@ -2644,7 +2743,7 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
dc->hwss.update_info_frame(pipe_ctx);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index 2f9bfaeaba8d..51a57dae1811 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dcn20_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 8a0dd0d7134b..3af24ef9cb2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -62,7 +62,6 @@
#include "dml/display_mode_vba.h"
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
-#include "dc_link_ddc.h"
#include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h"
@@ -90,6 +89,7 @@
#include "amdgpu_socbb.h"
+#include "link.h"
#define DC_LOGGER_INIT(logger)
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@@ -1214,7 +1214,7 @@ static void dcn20_resource_destruct(struct dcn20_resource_pool *pool)
dcn20_pp_smu_destroy(&pool->base.pp_smu);
if (pool->base.oem_device != NULL)
- dal_ddc_service_destroy(&pool->base.oem_device);
+ link_destroy_ddc_service(&pool->base.oem_device);
}
struct hubp *dcn20_hubp_create(
@@ -1389,6 +1389,9 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &dc_ctx->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->top_pipe)
+ continue;
+
if (pipe_ctx->stream != dc_stream)
continue;
@@ -2222,14 +2225,10 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat
enum surface_pixel_format surf_pix_format = plane_state->format;
unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
- enum swizzle_mode_values swizzle = DC_SW_LINEAR;
-
+ plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_S;
if (bpp == 64)
- swizzle = DC_SW_64KB_D;
- else
- swizzle = DC_SW_64KB_S;
+ plane_state->tiling_info.gfx9.swizzle = DC_SW_64KB_D;
- plane_state->tiling_info.gfx9.swizzle = swizzle;
return DC_OK;
}
@@ -2766,7 +2765,7 @@ static bool dcn20_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index b40489e678f9..42865d6c0cdd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn20_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -423,6 +423,22 @@ void enc2_set_dynamic_metadata(struct stream_encoder *enc,
}
}
+static void enc2_stream_encoder_update_dp_info_packets_sdp_line_num(
+ struct stream_encoder *enc,
+ struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (info_frame->adaptive_sync.valid == true &&
+ info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) {
+ //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF
+ REG_UPDATE(DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, 1);
+
+ REG_UPDATE(DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM,
+ info_frame->sdp_line_num.adaptive_sync_line_num);
+ }
+}
+
static void enc2_stream_encoder_update_dp_info_packets(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame)
@@ -530,7 +546,7 @@ void enc2_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
static void enc2_dp_set_odm_combine(
@@ -587,6 +603,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc2_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc2_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets_sdp_line_num =
+ enc2_stream_encoder_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets =
enc2_stream_encoder_update_dp_info_packets,
.send_immediate_sdp_message =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
index 7f9ec59ef443..8d31fa131cd6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dcn201_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 69cc192a7e71..15475c7e2cf9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -35,7 +35,7 @@
#include "hw/clk_mgr.h"
#include "dc_dmub_srv.h"
#include "abm.h"
-
+#include "link.h"
#define DC_LOGGER_INIT(logger)
@@ -132,8 +132,8 @@ void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
return;
pipe_ctx->stream->dpms_off = false;
- core_link_enable_stream(context, pipe_ctx);
- core_link_disable_stream(pipe_ctx);
+ link_set_dpms_on(context, pipe_ctx);
+ link_set_dpms_off(pipe_ctx);
pipe_ctx->stream->dpms_off = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
index 0a1ba6e7081c..eb9abb9f9698 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -31,7 +31,6 @@
#include "dcn21_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index fbcf0afeae0d..8f9244fe5c86 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -1393,15 +1393,13 @@ static uint32_t read_pipe_fuses(struct dc_context *ctx)
static enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
- enum dc_status result = DC_OK;
-
if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) {
plane_state->dcc.enable = 1;
/* align to our worst case block width */
plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024;
}
- result = dcn20_patch_unknown_plane_state(plane_state);
- return result;
+
+ return dcn20_patch_unknown_plane_state(plane_state);
}
static const struct resource_funcs dcn21_res_pool_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
index 6f3c2fb60790..1fb8fd7afc95 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dcn30_dio_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
/* #include "dcn3ag/dcn3ag_phy_fw.h" */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 17df53793c92..5f9079d3943a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -404,6 +404,22 @@ static void enc3_read_state(struct stream_encoder *enc, struct enc_state *s)
}
}
+void enc3_stream_encoder_update_dp_info_packets_sdp_line_num(
+ struct stream_encoder *enc,
+ struct encoder_info_frame *info_frame)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ if (info_frame->adaptive_sync.valid == true &&
+ info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) {
+ //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF
+ REG_UPDATE(DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, 1);
+
+ REG_UPDATE(DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM,
+ info_frame->sdp_line_num.adaptive_sync_line_num);
+ }
+}
+
void enc3_stream_encoder_update_dp_info_packets(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame)
@@ -452,12 +468,20 @@ void enc3_stream_encoder_update_dp_info_packets(
* use other packetIndex (such as 5,6) for other info packet
*/
+ if (info_frame->adaptive_sync.valid)
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 5, /* packetIndex */
+ &info_frame->adaptive_sync,
+ true);
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP5_ENABLE, info_frame->adaptive_sync.valid);
/* This bit is the master enable bit.
* When enabling secondary stream engine,
@@ -803,6 +827,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
enc3_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc3_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets_sdp_line_num =
+ enc3_stream_encoder_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets =
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
index 54ee230e7f98..06310973ded2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.h
@@ -292,6 +292,10 @@ void enc3_stream_encoder_update_hdmi_info_packets(
void enc3_stream_encoder_stop_hdmi_info_packets(
struct stream_encoder *enc);
+void enc3_stream_encoder_update_dp_info_packets_sdp_line_num(
+ struct stream_encoder *enc,
+ struct encoder_info_frame *info_frame);
+
void enc3_stream_encoder_update_dp_info_packets(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 8c5045711264..df787fcf8e86 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -50,8 +50,7 @@
#include "dpcd_defs.h"
#include "../dcn20/dcn20_hwseq.h"
#include "dcn30_resource.h"
-#include "inc/dc_link_dp.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
@@ -91,8 +90,8 @@ bool dcn30_set_blend_lut(
return result;
}
-static bool dcn30_set_mpc_shaper_3dlut(
- struct pipe_ctx *pipe_ctx, const struct dc_stream_state *stream)
+static bool dcn30_set_mpc_shaper_3dlut(struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
int mpcc_id = pipe_ctx->plane_res.hubp->inst;
@@ -104,19 +103,18 @@ static bool dcn30_set_mpc_shaper_3dlut(
const struct pwl_params *shaper_lut = NULL;
//get the shaper lut params
if (stream->func_shaper) {
- if (stream->func_shaper->type == TF_TYPE_HWPWL)
+ if (stream->func_shaper->type == TF_TYPE_HWPWL) {
shaper_lut = &stream->func_shaper->pwl;
- else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
- cm_helper_translate_curve_to_hw_format(
- stream->func_shaper,
- &dpp_base->shaper_params, true);
+ } else if (stream->func_shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ cm_helper_translate_curve_to_hw_format(stream->func_shaper,
+ &dpp_base->shaper_params, true);
shaper_lut = &dpp_base->shaper_params;
}
}
if (stream->lut3d_func &&
- stream->lut3d_func->state.bits.initialized == 1 &&
- stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
+ stream->lut3d_func->state.bits.initialized == 1 &&
+ stream->lut3d_func->state.bits.rmu_idx_valid == 1) {
if (stream->lut3d_func->state.bits.rmu_mux_num == 0)
mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu0_mux;
else if (stream->lut3d_func->state.bits.rmu_mux_num == 1)
@@ -125,20 +123,22 @@ static bool dcn30_set_mpc_shaper_3dlut(
mpcc_id_projected = stream->lut3d_func->state.bits.mpc_rmu2_mux;
if (mpcc_id_projected != mpcc_id)
BREAK_TO_DEBUGGER();
- /*find the reason why logical layer assigned a differant mpcc_id into acquire_post_bldn_3dlut*/
+ /* find the reason why logical layer assigned a different
+ * mpcc_id into acquire_post_bldn_3dlut
+ */
acquired_rmu = mpc->funcs->acquire_rmu(mpc, mpcc_id,
- stream->lut3d_func->state.bits.rmu_mux_num);
+ stream->lut3d_func->state.bits.rmu_mux_num);
if (acquired_rmu != stream->lut3d_func->state.bits.rmu_mux_num)
BREAK_TO_DEBUGGER();
- result = mpc->funcs->program_3dlut(mpc,
- &stream->lut3d_func->lut_3d,
- stream->lut3d_func->state.bits.rmu_mux_num);
+
+ result = mpc->funcs->program_3dlut(mpc, &stream->lut3d_func->lut_3d,
+ stream->lut3d_func->state.bits.rmu_mux_num);
result = mpc->funcs->program_shaper(mpc, shaper_lut,
- stream->lut3d_func->state.bits.rmu_mux_num);
- } else
- /*loop through the available mux and release the requested mpcc_id*/
+ stream->lut3d_func->state.bits.rmu_mux_num);
+ } else {
+ // loop through the available mux and release the requested mpcc_id
mpc->funcs->release_rmu(mpc, mpcc_id);
-
+ }
return result;
}
@@ -540,7 +540,7 @@ void dcn30_init_hw(struct dc *dc)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
/* we want to turn off all dp displays before doing detection */
- dc_link_blank_all_dp_displays(dc);
+ link_blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -675,10 +675,16 @@ void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
- else
+ else {
+ if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num)
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
+ }
}
void dcn30_program_dmdata_engine(struct pipe_ctx *pipe_ctx)
@@ -992,8 +998,5 @@ void dcn30_prepare_bandwidth(struct dc *dc,
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
dcn20_prepare_bandwidth(dc, context);
-
- dc_dmub_srv_p_state_delegate(dc,
- context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 867d60151aeb..08b92715e2e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -291,6 +291,14 @@ static void optc3_set_timing_double_buffer(struct timing_generator *optc, bool e
OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
}
+void optc3_wait_drr_doublebuffer_pending_clear(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, 0, 2, 100000); /* 1 vupdate at 5hz */
+
+}
+
void optc3_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max)
{
optc1_set_vtotal_min_max(optc, vtotal_min, vtotal_max);
@@ -360,6 +368,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = {
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
+ .wait_drr_doublebuffer_pending_clear = optc3_wait_drr_doublebuffer_pending_clear,
};
void dcn30_timing_generator_init(struct optc *optc1)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
index dd45a5499b07..fb06dc9a4893 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
@@ -279,6 +279,7 @@
SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_BY2, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh)
@@ -317,6 +318,7 @@
SF(OTG0_OTG_DRR_TRIGGER_WINDOW, OTG_DRR_TRIGGER_WINDOW_END_X, mask_sh),\
SF(OTG0_OTG_DRR_V_TOTAL_CHANGE, OTG_DRR_V_TOTAL_CHANGE_LIMIT, mask_sh),\
SF(OTG0_OTG_H_TIMING_CNTL, OTG_H_TIMING_DIV_MODE, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_DRR_TIMING_DBUF_UPDATE_MODE, mask_sh)
void dcn30_timing_generator_init(struct optc *optc1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index c18c52a60100..b5b5320c7bef 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -60,7 +60,7 @@
#include "dml/display_mode_vba.h"
#include "dcn30/dcn30_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "dce/dce_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -1208,7 +1208,7 @@ static void dcn30_resource_destruct(struct dcn30_resource_pool *pool)
dcn_dccg_destroy(&pool->base.dccg);
if (pool->base.oem_device != NULL)
- dal_ddc_service_destroy(&pool->base.oem_device);
+ link_destroy_ddc_service(&pool->base.oem_device);
}
static struct hubp *dcn30_hubp_create(
@@ -1477,8 +1477,8 @@ bool dcn30_acquire_post_bldn_3dlut(
state->bits.mpc_rmu2_mux = mpcc_id;
ret = true;
break;
- }
}
+ }
return ret;
}
@@ -1648,7 +1648,8 @@ noinline bool dcn30_internal_validate_bw(
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate)
+ bool fast_validate,
+ bool allow_self_refresh_only)
{
bool out = false;
bool repopulate_pipes = false;
@@ -1675,7 +1676,7 @@ noinline bool dcn30_internal_validate_bw(
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
- if (!fast_validate) {
+ if (!fast_validate || !allow_self_refresh_only) {
/*
* DML favors voltage over p-state, but we're more interested in
* supporting p-state over voltage. We can't support p-state in
@@ -1688,11 +1689,12 @@ noinline bool dcn30_internal_validate_bw(
if (vlevel < context->bw_ctx.dml.soc.num_states)
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
- if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
- vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
+ if (allow_self_refresh_only &&
+ (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
+ vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) {
/*
- * If mode is unsupported or there's still no p-state support then
- * fall back to favoring voltage.
+ * If mode is unsupported or there's still no p-state support
+ * then fall back to favoring voltage.
*
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
@@ -2063,7 +2065,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();
if (pipe_cnt == 0)
@@ -2590,7 +2592,7 @@ static bool dcn30_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
index 7d063c7d6a4b..8e6b8b7368fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -64,7 +64,8 @@ bool dcn30_internal_validate_bw(
display_e2e_pipe_params_st *pipes,
int *pipe_cnt_out,
int *vlevel_out,
- bool fast_validate);
+ bool fast_validate,
+ bool allow_self_refresh_only);
void dcn30_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c
index c9fbaed23965..1b39a6e8a1ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_dio_link_encoder.c
@@ -29,7 +29,6 @@
#include "link_encoder.h"
#include "dcn301_dio_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 8cf10351f271..ee62ae3eb98f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1414,7 +1414,8 @@ static struct resource_funcs dcn301_res_pool_funcs = {
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.acquire_post_bldn_3dlut = dcn30_acquire_post_bldn_3dlut,
.release_post_bldn_3dlut = dcn30_release_post_bldn_3dlut,
- .update_bw_bounding_box = dcn301_update_bw_bounding_box
+ .update_bw_bounding_box = dcn301_update_bw_bounding_box,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state
};
static bool dcn301_resource_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 47cffd0e6830..03ddf4f5f065 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -47,6 +47,7 @@
#include "dcn10/dcn10_resource.h"
+#include "link.h"
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
#include "dce/dce_aux.h"
@@ -1125,6 +1126,9 @@ static void dcn302_resource_destruct(struct resource_pool *pool)
if (pool->dccg != NULL)
dcn_dccg_destroy(&pool->dccg);
+
+ if (pool->oem_device != NULL)
+ link_destroy_ddc_service(&pool->oem_device);
}
static void dcn302_destroy_resource_pool(struct resource_pool **pool)
@@ -1216,6 +1220,7 @@ static bool dcn302_resource_construct(
int i;
struct dc_context *ctx = dc->ctx;
struct irq_service_init_data init_data;
+ struct ddc_service_init_data ddc_init_data = {0};
ctx->dc_bios->regs = &bios_regs;
@@ -1497,6 +1502,17 @@ static bool dcn302_resource_construct(
dc->cap_funcs = cap_funcs;
+ if (dc->ctx->dc_bios->fw_info.oem_i2c_present) {
+ ddc_init_data.ctx = dc->ctx;
+ ddc_init_data.link = NULL;
+ ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
+ ddc_init_data.id.enum_id = 0;
+ ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
+ pool->oem_device = link_create_ddc_service(&ddc_init_data);
+ } else {
+ pool->oem_device = NULL;
+ }
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index c14d35894b2e..31e212064168 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -29,7 +29,7 @@
#include "dcn10/dcn10_resource.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "dce/dce_abm.h"
#include "dce/dce_audio.h"
@@ -1054,7 +1054,7 @@ static void dcn303_resource_destruct(struct resource_pool *pool)
dcn_dccg_destroy(&pool->dccg);
if (pool->oem_device != NULL)
- dal_ddc_service_destroy(&pool->oem_device);
+ link_destroy_ddc_service(&pool->oem_device);
}
static void dcn303_destroy_resource_pool(struct resource_pool **pool)
@@ -1421,7 +1421,7 @@ static bool dcn303_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->oem_device = dal_ddc_service_create(&ddc_init_data);
+ pool->oem_device = link_create_ddc_service(&ddc_init_data);
} else {
pool->oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index ab70ebd8f223..275e78c06dee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -30,7 +30,6 @@
#include "link_encoder.h"
#include "dcn31_dio_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
index 80dfaa4d4d81..0b317ed31f91 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -242,7 +242,10 @@ void dcn31_hpo_dp_link_enc_set_link_test_pattern(
REG_UPDATE(DP_DPHY_SYM32_CONTROL,
MODE, DP2_TEST_PATTERN);
break;
- case DP_TEST_PATTERN_SQUARE_PULSE:
+ case DP_TEST_PATTERN_SQUARE:
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+ case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:
REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0,
TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
index 16639bd03adf..d76f55a12eb4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -430,6 +430,22 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
MSA_DATA_LANE_3, 0);
}
+static void dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num(
+ struct hpo_dp_stream_encoder *enc,
+ struct encoder_info_frame *info_frame)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ if (info_frame->adaptive_sync.valid == true &&
+ info_frame->sdp_line_num.adaptive_sync_line_num_valid == true) {
+ //00: REFER_TO_DP_SOF, 01: REFER_TO_OTG_SOF
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, 1);
+
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER,
+ info_frame->sdp_line_num.adaptive_sync_line_num);
+ }
+}
+
static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
struct hpo_dp_stream_encoder *enc,
const struct encoder_info_frame *info_frame)
@@ -458,12 +474,20 @@ static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
&info_frame->hdrsmd,
true);
+ if (info_frame->adaptive_sync.valid)
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 5, /* packetIndex */
+ &info_frame->adaptive_sync,
+ true);
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->adaptive_sync.valid);
/* check if dynamic metadata packet transmission is enabled */
REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL,
@@ -714,6 +738,7 @@ static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = {
.dp_blank = dcn31_hpo_dp_stream_enc_dp_blank,
.disable = dcn31_hpo_dp_stream_enc_disable,
.set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute,
+ .update_dp_info_packets_sdp_line_num = dcn31_hpo_dp_stream_enc_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets,
.stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets,
.dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 6360dc9502e7..7e7cd5b64e6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -1008,6 +1008,24 @@ static bool hubbub31_verify_allow_pstate_change_high(struct hubbub *hubbub)
return false;
}
+void hubbub31_init(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ /*Enable clock gate*/
+ if (hubbub->ctx->dc->debug.disable_clock_gate) {
+ /*done in hwseq*/
+ /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
+ REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
+ DISPCLK_R_DCHUBBUB_GATE_DIS, 0,
+ DCFCLK_R_DCHUBBUB_GATE_DIS, 0);
+ }
+
+ /*
+ only the DCN will determine when to connect the SDP port
+ */
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, 1);
+}
static const struct hubbub_funcs hubbub31_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub31_init_dchub_sys_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
index 70c60de448ac..89d6208287b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.h
@@ -42,6 +42,10 @@
SR(DCHUBBUB_COMPBUF_CTRL),\
SR(COMPBUF_RESERVED_SPACE),\
SR(DCHUBBUB_DEBUG_CTRL_0),\
+ SR(DCHUBBUB_CLOCK_CNTL),\
+ SR(DCHUBBUB_SDPIF_CFG0),\
+ SR(DCHUBBUB_SDPIF_CFG1),\
+ SR(DCHUBBUB_MEM_PWR_MODE_CTRL),\
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A),\
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A),\
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B),\
@@ -120,11 +124,17 @@
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_VMID, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
- HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh)
+ HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh)
int hubbub31_init_dchub_sys_ctx(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config);
+void hubbub31_init(struct hubbub *hubbub);
+
void hubbub31_construct(struct dcn20_hubbub *hubbub3,
struct dc_context *ctx,
const struct dcn_hubbub_registers *hubbub_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 4226a051df41..d13e46eeee3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -45,8 +45,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "dc_link_dp.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -203,7 +202,7 @@ void dcn31_init_hw(struct dc *dc)
dmub_enable_outbox_notification(dc->ctx->dmub_srv);
/* we want to turn off all dp displays before doing detection */
- dc_link_blank_all_dp_displays(dc);
+ link_blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
@@ -231,7 +230,7 @@ void dcn31_init_hw(struct dc *dc)
}
if (num_opps > 1) {
- dc_link_blank_all_edp_displays(dc);
+ link_blank_all_edp_displays(dc);
break;
}
}
@@ -415,7 +414,17 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
- else {
+ else if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->update_dp_info_packets(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+ return;
+ } else {
+ if (pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num)
+ pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets_sdp_line_num(
+ pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream_res.encoder_info_frame);
+
pipe_ctx->stream_res.stream_enc->funcs->update_dp_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -556,7 +565,7 @@ static void dcn31_reset_back_end_for_pipe(
* VBIOS lit up eDP, so check link status too.
*/
if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
- core_link_disable_stream(pipe_ctx);
+ link_set_dpms_off(pipe_ctx);
else if (pipe_ctx->stream_res.audio)
dc->hwss.disable_audio_stream(pipe_ctx);
@@ -575,7 +584,7 @@ static void dcn31_reset_back_end_for_pipe(
}
}
} else if (pipe_ctx->stream_res.dsc) {
- dp_set_dsc_enable(pipe_ctx, false);
+ link_set_dsc_enable(pipe_ctx, false);
}
pipe_ctx->stream = NULL;
@@ -623,43 +632,3 @@ void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable)
if (hws->ctx->dc->debug.hpo_optimization)
REG_UPDATE(HPO_TOP_HW_CONTROL, HPO_IO_EN, !!enable);
}
-void dcn31_set_drr(struct pipe_ctx **pipe_ctx,
- int num_pipes, struct dc_crtc_timing_adjust adjust)
-{
- int i = 0;
- struct drr_params params = {0};
- unsigned int event_triggers = 0x2;/*Bit[1]: OTG_TRIG_A*/
- unsigned int num_frames = 2;
- params.vertical_total_max = adjust.v_total_max;
- params.vertical_total_min = adjust.v_total_min;
- params.vertical_total_mid = adjust.v_total_mid;
- params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
- for (i = 0; i < num_pipes; i++) {
- if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
- if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
- pipe_ctx[i]->stream_res.tg->funcs->set_drr(
- pipe_ctx[i]->stream_res.tg, &params);
- if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
- if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
- pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
- pipe_ctx[i]->stream_res.tg,
- event_triggers, num_frames);
- }
- }
-}
-void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params)
-{
- unsigned int i;
- unsigned int triggers = 0;
- if (params->triggers.surface_update)
- triggers |= 0x600;/*bit 9 and bit10 : 110 0000 0000*/
- if (params->triggers.cursor_update)
- triggers |= 0x10;/*bit4*/
- if (params->triggers.force_trigger)
- triggers |= 0x1;
- for (i = 0; i < num_pipes; i++)
- pipe_ctx[i]->stream_res.tg->funcs->
- set_static_screen_control(pipe_ctx[i]->stream_res.tg,
- triggers, params->num_frames);
-}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
index e7e03a8722e0..edfc01d6ad73 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -56,8 +56,4 @@ bool dcn31_is_abm_supported(struct dc *dc,
void dcn31_init_pipes(struct dc *dc, struct dc_state *context);
void dcn31_setup_hpo_hw_control(const struct dce_hwseq *hws, bool enable);
-void dcn31_set_static_screen_control(struct pipe_ctx **pipe_ctx,
- int num_pipes, const struct dc_static_screen_params *params);
-void dcn31_set_drr(struct pipe_ctx **pipe_ctx,
- int num_pipes, struct dc_crtc_timing_adjust adjust);
#endif /* __DC_HWSS_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 7c2da70ffe21..3a32810bbe38 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -64,9 +64,9 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn31_set_drr,
+ .set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn31_set_static_screen_control,
+ .set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
index fe449f7aa771..63a677c8ee27 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
@@ -40,7 +40,6 @@
#define FN(reg_name, field_name) \
optc1->tg_shift->field_name, optc1->tg_mask->field_name
-#define STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN 0x2000 /*bit 13*/
static void optc31_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_cnt,
struct dc_crtc_timing *timing)
{
@@ -232,32 +231,6 @@ void optc3_init_odm(struct timing_generator *optc)
OPTC_MEM_SEL, 0);
optc1->opp_count = 1;
}
-void optc31_set_static_screen_control(
- struct timing_generator *optc,
- uint32_t event_triggers,
- uint32_t num_frames)
-{
- struct optc *optc1 = DCN10TG_FROM_TG(optc);
- uint32_t framecount;
- uint32_t events;
-
- if (num_frames > 0xFF)
- num_frames = 0xFF;
- REG_GET_2(OTG_STATIC_SCREEN_CONTROL,
- OTG_STATIC_SCREEN_EVENT_MASK, &events,
- OTG_STATIC_SCREEN_FRAME_COUNT, &framecount);
-
- if (events == event_triggers && num_frames == framecount)
- return;
- if ((event_triggers & STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN)
- != 0)
- event_triggers = event_triggers &
- ~STATIC_SCREEN_EVENT_MASK_DRR_DOUBLE_BUFFER_UPDATE_EN;
-
- REG_UPDATE_2(OTG_STATIC_SCREEN_CONTROL,
- OTG_STATIC_SCREEN_EVENT_MASK, event_triggers,
- OTG_STATIC_SCREEN_FRAME_COUNT, num_frames);
-}
static struct timing_generator_funcs dcn31_tg_funcs = {
.validate_timing = optc1_validate_timing,
@@ -293,7 +266,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = {
.set_drr = optc31_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
- .set_static_screen_control = optc31_set_static_screen_control,
+ .set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.tg_init = optc3_tg_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
index 5fc6c63580d7..30b81a448ce2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
@@ -263,8 +263,5 @@ bool optc31_immediate_disable_crtc(struct timing_generator *optc);
void optc31_set_drr(struct timing_generator *optc, const struct drr_params *params);
void optc3_init_odm(struct timing_generator *optc);
-void optc31_set_static_screen_control(
- struct timing_generator *optc,
- uint32_t event_triggers,
- uint32_t num_frames);
+
#endif /* __DC_OPTC_DCN31_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 3ca517dcc82d..d3918a10773a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -1795,7 +1795,7 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
DC_FP_START();
- out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true);
DC_FP_END();
// Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index 38842f938bed..962a2c02b422 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -30,7 +30,7 @@
#include "dcn314_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -278,10 +278,11 @@ static void enc314_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
{
- /* New to DCN314 - disable the FIFO before VID stream disable. */
- enc314_disable_fifo(enc);
-
enc1_stream_encoder_dp_blank(link, enc);
+
+ /* Disable FIFO after the DP vid stream is disabled to avoid corruption. */
+ if (enc->ctx->dc->debug.dig_fifo_off_in_blank)
+ enc314_disable_fifo(enc);
}
static void enc314_stream_encoder_dp_unblank(
@@ -365,7 +366,7 @@ static void enc314_stream_encoder_dp_unblank(
*/
enc314_enable_fifo(enc);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
/* Set DSC-related configuration.
@@ -428,6 +429,8 @@ static const struct stream_encoder_funcs dcn314_str_enc_funcs = {
enc3_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc3_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets_sdp_line_num =
+ enc3_stream_encoder_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets =
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h
index 33dfdf8b4100..ed0772387903 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.h
@@ -280,6 +280,10 @@ void enc3_stream_encoder_update_hdmi_info_packets(
void enc3_stream_encoder_stop_hdmi_info_packets(
struct stream_encoder *enc);
+void enc3_stream_encoder_update_dp_info_packets_sdp_line_num(
+ struct stream_encoder *enc,
+ struct encoder_info_frame *info_frame);
+
void enc3_stream_encoder_update_dp_info_packets(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
index a0741794db62..575d3501c848 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.c
@@ -46,9 +46,7 @@
#include "link_hwss.h"
#include "dpcd_defs.h"
#include "dce/dmub_outbox.h"
-#include "dc_link_dp.h"
-#include "inc/dc_link_dp.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dcn10/dcn10_hw_sequencer.h"
#include "inc/link_enc_cfg.h"
#include "dcn30/dcn30_vpg.h"
@@ -348,7 +346,7 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link_is_dp_128b_132b_signal(pipe_ctx)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_1;
} else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
@@ -391,3 +389,27 @@ void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->funcs->set_input_mode(pipe_ctx->stream_res.stream_enc,
pix_per_cycle);
}
+
+void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on)
+{
+ struct dc_context *ctx = hws->ctx;
+ union dmub_rb_cmd cmd;
+
+ if (hws->ctx->dc->debug.disable_hubp_power_gate)
+ return;
+
+ PERF_TRACE();
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.domain_control.header.type = DMUB_CMD__VBIOS;
+ cmd.domain_control.header.sub_type = DMUB_CMD__VBIOS_DOMAIN_CONTROL;
+ cmd.domain_control.header.payload_bytes = sizeof(cmd.domain_control.data);
+ cmd.domain_control.data.inst = hubp_inst;
+ cmd.domain_control.data.power_gate = !power_on;
+
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(ctx->dmub_srv);
+ dc_dmub_srv_wait_idle(ctx->dmub_srv);
+
+ PERF_TRACE();
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
index 244280298212..c419d3dbdfee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_hwseq.h
@@ -41,4 +41,6 @@ unsigned int dcn314_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsig
void dcn314_set_pixels_per_cycle(struct pipe_ctx *pipe_ctx);
+void dcn314_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
+
#endif /* __DC_HWSS_DCN314_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
index 31feb4b0edee..343f4d9dd5e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
@@ -66,9 +66,9 @@ static const struct hw_sequencer_funcs dcn314_funcs = {
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn31_set_drr,
+ .set_drr = dcn10_set_drr,
.get_position = dcn10_get_position,
- .set_static_screen_control = dcn31_set_static_screen_control,
+ .set_static_screen_control = dcn10_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
.set_avmute = dcn30_set_avmute,
.log_hw_state = dcn10_log_hw_state,
@@ -137,7 +137,7 @@ static const struct hwseq_private_funcs dcn314_private_funcs = {
.plane_atomic_disable = dcn20_plane_atomic_disable,
.plane_atomic_power_down = dcn10_plane_atomic_power_down,
.enable_power_gating_plane = dcn314_enable_power_gating_plane,
- .hubp_pg_control = dcn31_hubp_pg_control,
+ .hubp_pg_control = dcn314_hubp_pg_control,
.program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
.update_odm = dcn314_update_odm,
.dsc_pg_control = dcn314_dsc_pg_control,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 41edbd64ea21..0086cafb0f7a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -228,7 +228,7 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
.set_drr = optc31_set_drr,
.get_last_used_drr_vtotal = optc2_get_last_used_drr_vtotal,
.set_vtotal_min_max = optc1_set_vtotal_min_max,
- .set_static_screen_control = optc31_set_static_screen_control,
+ .set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
.tg_init = optc3_tg_init,
@@ -241,7 +241,6 @@ static struct timing_generator_funcs dcn314_tg_funcs = {
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
- .set_odm_combine = optc314_set_odm_combine,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index f9ea1e86707f..54ed3de869d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -874,8 +874,9 @@ static const struct dc_plane_cap plane_cap = {
},
// 6:1 downscaling ratio: 1000/6 = 166.666
+ // 4:1 downscaling ratio for ARGB888 to prevent underflow during P010 playback: 1000/4 = 250
.max_downscale_factor = {
- .argb8888 = 167,
+ .argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
@@ -891,6 +892,8 @@ static const struct dc_debug_options debug_defaults_drv = {
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
+ .disable_dpp_power_gate = true,
+ .disable_hubp_power_gate = true,
.disable_pplib_clock_request = false,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = false,
@@ -900,7 +903,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.max_downscale_src_width = 4096,/*upto true 4k*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
- .sanity_checks = false,
+ .sanity_checks = true,
.underflow_assert_delay_us = 0xFFFFFFFF,
.dwb_fi_phase = -1, // -1 = disable,
.dmub_command_table = true,
@@ -1694,6 +1697,61 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi
*panel_config = panel_config_defaults;
}
+bool dcn314_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ DC_FP_START();
+ // do not support self refresh only
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false);
+ DC_FP_END();
+
+ // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg
+ if (pipe_cnt == 0)
+ fast_validate = false;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+
+validate_fail:
+ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
+
+ BW_VAL_TRACE_SKIP(fail);
+ out = false;
+
+validate_out:
+ kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+}
+
static struct resource_funcs dcn314_res_pool_funcs = {
.destroy = dcn314_destroy_resource_pool,
.link_enc_create = dcn31_link_encoder_create,
@@ -1701,7 +1759,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.link_encs_assign = link_enc_cfg_link_encs_assign,
.link_enc_unassign = link_enc_cfg_link_enc_unassign,
.panel_cntl_create = dcn31_panel_cntl_create,
- .validate_bandwidth = dcn31_validate_bandwidth,
+ .validate_bandwidth = dcn314_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn314_populate_dml_pipes_from_context,
@@ -1763,7 +1821,7 @@ static bool dcn314_resource_construct(
pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
pool->base.pipe_count = pool->base.res_cap->num_timing_generator;
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
- dc->caps.max_downscale_ratio = 600;
+ dc->caps.max_downscale_ratio = 400;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100;
dc->caps.max_cursor_size = 256;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
index 0dd3153aa5c1..49ffe71018df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
@@ -39,6 +39,10 @@ struct dcn314_resource_pool {
struct resource_pool base;
};
+bool dcn314_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate);
+
struct resource_pool *dcn314_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
index b4d5076e124c..dc0b49506275 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
@@ -1776,7 +1776,7 @@ static bool dcn316_resource_construct(
pool->base.mpcc_count = pool->base.res_cap->num_timing_generator;
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
- dc->caps.i2c_speed_in_khz_hdcp = 100;
+ dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/
dc->caps.max_cursor_size = 256;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index 076969d928af..501388014855 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -31,7 +31,6 @@
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn32_dio_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "link_enc_cfg.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index d19fc93dbc75..36e6f5657942 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -29,7 +29,7 @@
#include "dcn32_dio_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
-#include "inc/link_dpcd.h"
+#include "link.h"
#include "dpcd_defs.h"
#define DC_LOGGER \
@@ -373,7 +373,7 @@ static void enc32_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
/* Set DSC-related configuration.
@@ -421,6 +421,33 @@ static void enc32_set_dig_input_mode(struct stream_encoder *enc, unsigned int pi
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_OUTPUT_PIXEL_MODE, pix_per_container == 2 ? 0x1 : 0x0);
}
+static void enc32_reset_fifo(struct stream_encoder *enc, bool reset)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+ uint32_t reset_val = reset ? 1 : 0;
+ uint32_t is_symclk_on;
+
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val);
+ REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on);
+
+ if (is_symclk_on)
+ REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000);
+ else
+ udelay(10);
+}
+
+static void enc32_enable_fifo(struct stream_encoder *enc)
+{
+ struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
+
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+
+ enc32_reset_fifo(enc, true);
+ enc32_reset_fifo(enc, false);
+
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1);
+}
+
static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
.dp_set_odm_combine =
enc32_dp_set_odm_combine,
@@ -436,6 +463,8 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
enc3_stream_encoder_update_hdmi_info_packets,
.stop_hdmi_info_packets =
enc3_stream_encoder_stop_hdmi_info_packets,
+ .update_dp_info_packets_sdp_line_num =
+ enc3_stream_encoder_update_dp_info_packets_sdp_line_num,
.update_dp_info_packets =
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
@@ -466,6 +495,7 @@ static const struct stream_encoder_funcs dcn32_str_enc_funcs = {
.hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute,
.set_input_mode = enc32_set_dig_input_mode,
+ .enable_fifo = enc32_enable_fifo,
};
void dcn32_dio_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
index 9501403a48a9..eb08ccc38e79 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.c
@@ -945,6 +945,35 @@ void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub)
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
}
+void hubbub32_init(struct hubbub *hubbub)
+{
+ struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
+
+ /* Enable clock gate*/
+ if (hubbub->ctx->dc->debug.disable_clock_gate) {
+ /*done in hwseq*/
+ /*REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);*/
+
+ REG_UPDATE_2(DCHUBBUB_CLOCK_CNTL,
+ DISPCLK_R_DCHUBBUB_GATE_DIS, 0,
+ DCFCLK_R_DCHUBBUB_GATE_DIS, 0);
+ }
+ /*
+ ignore the "df_pre_cstate_req" from the SDP port control.
+ only the DCN will determine when to connect the SDP port
+ */
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG0,
+ SDPIF_PORT_CONTROL, 1);
+ /*Set SDP's max outstanding request to 512
+ must set the register back to 0 (max outstanding = 256) in zero frame buffer mode*/
+ REG_UPDATE(DCHUBBUB_SDPIF_CFG1,
+ SDPIF_MAX_NUM_OUTSTANDING, 1);
+ /*must set the registers back to 256 in zero frame buffer mode*/
+ REG_UPDATE_2(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MAX_REQ_OUTSTAND, 512,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 512);
+}
+
static const struct hubbub_funcs hubbub32_funcs = {
.update_dchub = hubbub2_update_dchub,
.init_dchub_sys_ctx = hubbub3_init_dchub_sys_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
index 786f9ce07f92..b20eb04724bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubbub.h
@@ -83,7 +83,12 @@
SR(DCN_VM_FAULT_ADDR_LSB),\
SR(DCN_VM_FAULT_CNTL),\
SR(DCN_VM_FAULT_STATUS),\
- SR(SDPIF_REQUEST_RATE_LIMIT)
+ SR(SDPIF_REQUEST_RATE_LIMIT),\
+ SR(DCHUBBUB_CLOCK_CNTL),\
+ SR(DCHUBBUB_SDPIF_CFG0),\
+ SR(DCHUBBUB_SDPIF_CFG1),\
+ SR(DCHUBBUB_MEM_PWR_MODE_CTRL)
+
#define HUBBUB_MASK_SH_LIST_DCN32(mask_sh)\
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
@@ -96,6 +101,7 @@
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MAX_REQ_OUTSTAND, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \
@@ -161,7 +167,14 @@
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_TABLE_LEVEL, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_PIPE, mask_sh), \
HUBBUB_SF(DCN_VM_FAULT_STATUS, DCN_VM_ERROR_INTERRUPT_STATUS, mask_sh),\
- HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh)
+ HUBBUB_SF(SDPIF_REQUEST_RATE_LIMIT, SDPIF_REQUEST_RATE_LIMIT, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DISPCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_CLOCK_CNTL, DCFCLK_R_DCHUBBUB_GATE_DIS, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_SDPIF_CFG0, SDPIF_PORT_CONTROL, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_SDPIF_CFG1, SDPIF_MAX_NUM_OUTSTANDING, mask_sh),\
+ HUBBUB_SF(DCHUBBUB_MEM_PWR_MODE_CTRL, DET_MEM_PWR_LS_MODE, mask_sh)
+
+
bool hubbub32_program_urgent_watermarks(
struct hubbub *hubbub,
@@ -191,6 +204,8 @@ void hubbub32_force_usr_retraining_allow(struct hubbub *hubbub, bool allow);
void hubbub32_force_wm_propagate_to_pipes(struct hubbub *hubbub);
+void hubbub32_init(struct hubbub *hubbub);
+
void dcn32_program_det_size(struct hubbub *hubbub, int hubp_inst, unsigned int det_buffer_size_in_kbyte);
void hubbub32_construct(struct dcn20_hubbub *hubbub2,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index ac1c6458dd55..fe0cd177744c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -155,7 +155,11 @@ void hubp32_cursor_set_attributes(
else
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
}
-
+void hubp32_init(struct hubp *hubp)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
+}
static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
.hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
index 56ef71151536..4cdbf63c952b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h
@@ -61,6 +61,8 @@ void hubp32_phantom_hubp_post_enable(struct hubp *hubp);
void hubp32_cursor_set_attributes(struct hubp *hubp,
const struct dc_cursor_attributes *attr);
+void hubp32_init(struct hubp *hubp);
+
bool hubp32_construct(
struct dcn20_hubp *hubp2,
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index b8767be1e4c5..16f892125b6f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -50,7 +50,7 @@
#include "dmub_subvp_state.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "dcn32_resource.h"
-#include "dc_link_dp.h"
+#include "link.h"
#include "dmub/inc/dmub_subvp_state.h"
#define DC_LOGGER_INIT(logger)
@@ -188,7 +188,8 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
/* First, check no-memory-request case */
for (i = 0; i < dc->current_state->stream_count; i++) {
- if (dc->current_state->stream_status[i].plane_count)
+ if ((dc->current_state->stream_status[i].plane_count) &&
+ (dc->current_state->streams[i]->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED))
/* Fail eligibility on a visible stream */
break;
}
@@ -206,151 +207,31 @@ static bool dcn32_check_no_memory_request_for_cab(struct dc *dc)
*/
static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *ctx)
{
- int i, j;
- struct dc_stream_state *stream = NULL;
- struct dc_plane_state *plane = NULL;
- uint32_t cursor_size = 0;
- uint32_t total_lines = 0;
- uint32_t lines_per_way = 0;
+ int i;
uint8_t num_ways = 0;
- uint8_t bytes_per_pixel = 0;
- uint8_t cursor_bpp = 0;
- uint16_t mblk_width = 0;
- uint16_t mblk_height = 0;
- uint16_t mall_alloc_width_blk_aligned = 0;
- uint16_t mall_alloc_height_blk_aligned = 0;
- uint16_t num_mblks = 0;
- uint32_t bytes_in_mall = 0;
- uint32_t cache_lines_used = 0;
- uint32_t cache_lines_per_plane = 0;
+ uint32_t mall_ss_size_bytes = 0;
+ mall_ss_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_size_bytes;
+ // TODO add additional logic for PSR active stream exclusion optimization
+ // mall_ss_psr_active_size_bytes = ctx->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes;
+
+ // Include cursor size for CAB allocation
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[i];
- /* If PSR is supported on an eDP panel that's connected, but that panel is
- * not in PSR at the time of trying to enter MALL SS, we have to include it
- * in the static screen CAB calculation
- */
- if (!pipe->stream || !pipe->plane_state ||
- (pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
- pipe->stream->link->psr_settings.psr_allow_active) ||
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (!pipe->stream || !pipe->plane_state)
continue;
- bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mblk_width = DCN3_2_MBLK_WIDTH;
- mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
-
- /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
- * FLOOR(vp_x_start, blk_width)
- *
- * mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c
- */
- mall_alloc_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
- pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) -
- (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
-
- /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
- * FLOOR(vp_y_start, blk_height)
- *
- * mall_alloc_height_blk_aligned_l/c = full_vp_height_blk_aligned_l/c
- */
- mall_alloc_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
- pipe->plane_res.scl_data.viewport.height + mblk_height - 1) / mblk_height * mblk_height) -
- (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
-
- num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
- ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
-
- /*For DCC:
- * meta_num_mblk = CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1)
- */
- if (pipe->plane_state->dcc.enable)
- num_mblks += (pipe->plane_state->dcc.meta_pitch * pipe->plane_res.scl_data.viewport.height * bytes_per_pixel +
- (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
-
- bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
-
- /* (cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
- * (MALL is 64-byte aligned)
- */
- cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
- cache_lines_used += cache_lines_per_plane;
- }
-
- // Include cursor size for CAB allocation
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
- struct hubp *hubp = pipe->plane_res.hubp;
-
- if (pipe->stream && pipe->plane_state && hubp)
- /* Find the cursor plane and use the exact size instead of
- using the max for calculation */
-
- if (hubp->curs_attr.width > 0) {
- cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
-
- switch (pipe->stream->cursor_attributes.color_format) {
- case CURSOR_MODE_MONO:
- cursor_size /= 2;
- cursor_bpp = 4;
- break;
- case CURSOR_MODE_COLOR_1BIT_AND:
- case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
- case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
- cursor_size *= 4;
- cursor_bpp = 4;
- break;
-
- case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
- case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
- cursor_size *= 8;
- cursor_bpp = 8;
- break;
- }
-
- if (pipe->stream->cursor_position.enable && !dc->debug.alloc_extra_way_for_cursor &&
- cursor_size > 16384) {
- /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
- */
- cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
- DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) /
- dc->caps.cache_line_size + 2;
- break;
- }
- }
+ mall_ss_size_bytes += dcn32_helper_calculate_mall_bytes_for_cursor(dc, pipe, false);
}
// Convert number of cache lines required to number of ways
- total_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
- lines_per_way = total_lines / dc->caps.cache_num_ways;
- num_ways = cache_lines_used / lines_per_way;
-
- if (cache_lines_used % lines_per_way > 0)
- num_ways++;
-
- for (i = 0; i < ctx->stream_count; i++) {
- stream = ctx->streams[i];
- for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
- plane = ctx->stream_status[i].plane_states[j];
-
- if (stream->cursor_position.enable && plane &&
- dc->debug.alloc_extra_way_for_cursor &&
- cursor_size > 16384) {
- /* Cursor caching is not supported since it won't be on the same line.
- * So we need an extra line to accommodate it. With large cursors and a single 4k monitor
- * this case triggers corruption. If we're at the edge, then dont trigger display refresh
- * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
- */
- num_ways++;
- /* We only expect one cursor plane */
- break;
- }
- }
- }
if (dc->debug.force_mall_ss_num_ways > 0) {
num_ways = dc->debug.force_mall_ss_num_ways;
+ } else {
+ num_ways = dcn32_helper_mall_bytes_to_ways(dc, mall_ss_size_bytes);
}
+
return num_ways;
}
@@ -365,6 +246,13 @@ bool dcn32_apply_idle_power_optimizations(struct dc *dc, bool enable)
if (!dc->ctx->dmub_srv)
return false;
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ /* MALL SS messaging is not supported with PSR at this time */
+ if (dc->current_state->streams[i] != NULL &&
+ dc->current_state->streams[i]->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
+ return false;
+ }
+
if (enable) {
if (dc->current_state) {
@@ -803,6 +691,26 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context)
}
}
+static void dcn32_initialize_min_clocks(struct dc *dc)
+{
+ struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk;
+
+ clocks->dcfclk_deep_sleep_khz = DCN3_2_DCFCLK_DS_INIT_KHZ;
+ clocks->dcfclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz * 1000;
+ clocks->socclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].socclk_mhz * 1000;
+ clocks->dramclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 1000;
+ clocks->dppclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dppclk_mhz * 1000;
+ clocks->dispclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dispclk_mhz * 1000;
+ clocks->ref_dtbclk_khz = dc->clk_mgr->bw_params->clk_table.entries[0].dtbclk_mhz * 1000;
+ clocks->fclk_p_state_change_support = true;
+ clocks->p_state_change_support = true;
+
+ dc->clk_mgr->funcs->update_clocks(
+ dc->clk_mgr,
+ dc->current_state,
+ true);
+}
+
void dcn32_init_hw(struct dc *dc)
{
struct abm **abms = dc->res_pool->multiple_abms;
@@ -884,7 +792,7 @@ void dcn32_init_hw(struct dc *dc)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
/* we want to turn off all dp displays before doing detection */
- dc_link_blank_all_dp_displays(dc);
+ link_blank_all_dp_displays(dc);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
@@ -897,6 +805,18 @@ void dcn32_init_hw(struct dc *dc)
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
+
+ dcn32_initialize_min_clocks(dc);
+
+ /* On HW init, allow idle optimizations after pipes have been turned off.
+ *
+ * In certain D3 cases (i.e. BOCO / BOMACO) it's possible that hardware state
+ * is reset (i.e. not in idle at the time hw init is called), but software state
+ * still has idle_optimizations = true, so we must disable idle optimizations first
+ * (i.e. set false), then re-enable (set true).
+ */
+ dc_allow_idle_optimizations(dc, false);
+ dc_allow_idle_optimizations(dc, true);
}
/* In headless boot cases, DIG may be turned
@@ -1175,16 +1095,16 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
two_pix_per_container = optc2_is_two_pixels_per_containter(&stream->timing);
odm_combine_factor = get_odm_config(pipe_ctx, NULL);
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link_is_dp_128b_132b_signal(pipe_ctx)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_1;
- } else if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal) || dc_is_dvi_signal(pipe_ctx->stream->signal)) {
+ } else if (dc_is_hdmi_tmds_signal(stream->signal) || dc_is_dvi_signal(stream->signal)) {
*k1_div = PIXEL_RATE_DIV_BY_1;
if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
*k2_div = PIXEL_RATE_DIV_BY_2;
else
*k2_div = PIXEL_RATE_DIV_BY_4;
- } else if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) {
+ } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
if (two_pix_per_container) {
*k1_div = PIXEL_RATE_DIV_BY_1;
*k2_div = PIXEL_RATE_DIV_BY_2;
@@ -1239,7 +1159,7 @@ void dcn32_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
- if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link_is_dp_128b_132b_signal(pipe_ctx)) {
/* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
pipe_ctx->stream_res.hpo_dp_stream_enc,
@@ -1266,7 +1186,7 @@ bool dcn32_is_dp_dig_pixel_rate_div_policy(struct pipe_ctx *pipe_ctx)
if (!is_h_timing_divisible_by_2(pipe_ctx->stream))
return false;
- if (dc_is_dp_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx) &&
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) && !link_is_dp_128b_132b_signal(pipe_ctx) &&
dc->debug.enable_dp_dig_pixel_rate_div_policy)
return true;
return false;
@@ -1300,7 +1220,7 @@ static void apply_symclk_on_tx_off_wa(struct dc_link *link)
pipe_ctx->clock_source->funcs->program_pix_clk(
pipe_ctx->clock_source,
&pipe_ctx->stream_res.pix_clk_params,
- dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
+ link_dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
&pipe_ctx->pll_settings);
link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
break;
@@ -1332,7 +1252,7 @@ void dcn32_disable_link_output(struct dc_link *link,
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->unlock_phy(dmcu);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
apply_symclk_on_tx_off_wa(link);
}
@@ -1450,3 +1370,39 @@ void dcn32_update_dsc_pg(struct dc *dc,
}
}
}
+
+void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
+{
+ unsigned int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* If an active, non-phantom pipe is being transitioned into a phantom
+ * pipe, wait for the double buffer update to complete first before we do
+ * ANY phantom pipe programming.
+ */
+ if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
+ old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ old_pipe->stream_res.tg->funcs->wait_for_state(
+ old_pipe->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ old_pipe->stream_res.tg->funcs->wait_for_state(
+ old_pipe->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ }
+ }
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ // If old context or new context has phantom pipes, apply
+ // the phantom timings now. We can't change the phantom
+ // pipe configuration safely without driver acquiring
+ // the DMCUB lock first.
+ dc->hwss.apply_ctx_to_hw(dc, context);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
index 7de36529cf99..e9e9534f3668 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.h
@@ -102,4 +102,6 @@ void dcn32_update_dsc_pg(struct dc *dc,
struct dc_state *context,
bool safe_to_disable);
+void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
+
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
index dc4649458567..0694fa3a3680 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
@@ -30,6 +30,7 @@
#include "dcn30/dcn30_hwseq.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn32_hwseq.h"
+#include "dcn32_init.h"
static const struct hw_sequencer_funcs dcn32_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
@@ -94,7 +95,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
.apply_idle_power_optimizations = dcn32_apply_idle_power_optimizations,
- .does_plane_fit_in_mall = dcn30_does_plane_fit_in_mall,
+ .does_plane_fit_in_mall = NULL,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.hardware_release = dcn30_hardware_release,
@@ -106,6 +107,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config,
+ .enable_phantom_streams = dcn32_enable_phantom_streams,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
.update_phantom_vp_position = dcn32_update_phantom_vp_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index e4dbc8353ea3..74e50c09bb62 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -57,7 +57,6 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "dc_link_dp.h"
#include "dcn31/dcn31_apg.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn32/dcn32_dio_link_encoder.h"
@@ -69,7 +68,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -726,6 +725,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback"
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
+ .disable_unbounded_requesting = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1507,7 +1507,7 @@ static void dcn32_resource_destruct(struct dcn32_resource_pool *pool)
dcn_dccg_destroy(&pool->base.dccg);
if (pool->base.oem_device != NULL)
- dal_ddc_service_destroy(&pool->base.oem_device);
+ link_destroy_ddc_service(&pool->base.oem_device);
}
@@ -2149,13 +2149,19 @@ static bool dcn32_resource_construct(
dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.mall_size_per_mem_channel = 0;
+ dc->caps.mall_size_per_mem_channel = 4;
dc->caps.mall_size_total = 0;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
dc->caps.cache_num_ways = 16;
- dc->caps.max_cab_allocation_bytes = 67108864; // 64MB = 1024 * 1024 * 64
+
+ /* Calculate the available MALL space */
+ dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
+ dc, dc->ctx->dc_bios->vram_info.num_chans) *
+ dc->caps.mall_size_per_mem_channel * 1024 * 1024;
+ dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
+
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_drr_max_vblank_margin_us = 40;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
@@ -2449,7 +2455,7 @@ static bool dcn32_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
@@ -2592,3 +2598,55 @@ struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
return idle_pipe;
}
+
+unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans)
+{
+ /*
+ * DCN32 and DCN321 SKUs may have different sizes for MALL
+ * but we may not be able to access all the MALL space.
+ * If the num_chans is power of 2, then we can access all
+ * of the available MALL space. Otherwise, we can only
+ * access:
+ *
+ * max_cab_size_in_bytes = total_cache_size_in_bytes *
+ * ((2^floor(log2(num_chans)))/num_chans)
+ *
+ * Calculating the MALL sizes for all available SKUs, we
+ * have come up with the follow simplified check.
+ * - we have max_chans which provides the max MALL size.
+ * Each chans supports 4MB of MALL so:
+ *
+ * total_cache_size_in_bytes = max_chans * 4 MB
+ *
+ * - we have avail_chans which shows the number of channels
+ * we can use if we can't access the entire MALL space.
+ * It is generally half of max_chans
+ * - so we use the following checks:
+ *
+ * if (num_chans == max_chans), return max_chans
+ * if (num_chans < max_chans), return avail_chans
+ *
+ * - exception is GC_11_0_0 where we can't access max_chans,
+ * so we define max_avail_chans as the maximum available
+ * MALL space
+ *
+ */
+ int gc_11_0_0_max_chans = 48;
+ int gc_11_0_0_max_avail_chans = 32;
+ int gc_11_0_0_avail_chans = 16;
+ int gc_11_0_3_max_chans = 16;
+ int gc_11_0_3_avail_chans = 8;
+ int gc_11_0_2_max_chans = 8;
+ int gc_11_0_2_avail_chans = 4;
+
+ if (ASICREV_IS_GC_11_0_0(dc->ctx->asic_id.hw_internal_rev)) {
+ return (num_chans == gc_11_0_0_max_chans) ?
+ gc_11_0_0_max_avail_chans : gc_11_0_0_avail_chans;
+ } else if (ASICREV_IS_GC_11_0_2(dc->ctx->asic_id.hw_internal_rev)) {
+ return (num_chans == gc_11_0_2_max_chans) ?
+ gc_11_0_2_max_chans : gc_11_0_2_avail_chans;
+ } else { // if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev)) {
+ return (num_chans == gc_11_0_3_max_chans) ?
+ gc_11_0_3_max_chans : gc_11_0_3_avail_chans;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 13fbc574910b..aca928edc4e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -38,6 +38,7 @@
#define DCN3_2_MBLK_HEIGHT_4BPE 128
#define DCN3_2_MBLK_HEIGHT_8BPE 64
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
+#define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -96,8 +97,17 @@ void dcn32_calculate_wm_and_dlg(
int pipe_cnt,
int vlevel);
-uint32_t dcn32_helper_calculate_num_ways_for_subvp
- (struct dc *dc,
+uint32_t dcn32_helper_mall_bytes_to_ways(
+ struct dc *dc,
+ uint32_t total_size_in_mall_bytes);
+
+uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool ignore_cursor_buf);
+
+uint32_t dcn32_helper_calculate_num_ways_for_subvp(
+ struct dc *dc,
struct dc_state *context);
void dcn32_merge_pipes_for_subvp(struct dc *dc,
@@ -112,6 +122,8 @@ bool dcn32_subvp_in_use(struct dc *dc,
bool dcn32_mpo_in_use(struct dc_state *context);
bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context);
+bool dcn32_is_center_timing(struct pipe_ctx *pipe);
+bool dcn32_is_psr_capable(struct pipe_ctx *pipe);
struct pipe_ctx *dcn32_acquire_idle_pipe_for_head_pipe_in_layer(
struct dc_state *state,
@@ -134,6 +146,12 @@ void dcn32_restore_mall_state(struct dc *dc,
struct dc_state *context,
struct mall_temp_config *temp_config);
+bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
+
+unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans);
+
+double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 783935c4e664..3a2d7bcc4b6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -33,13 +33,75 @@ static bool is_dual_plane(enum surface_pixel_format format)
return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
}
+
+uint32_t dcn32_helper_mall_bytes_to_ways(
+ struct dc *dc,
+ uint32_t total_size_in_mall_bytes)
+{
+ uint32_t cache_lines_used, lines_per_way, total_cache_lines, num_ways;
+
+ /* add 2 lines for worst case alignment */
+ cache_lines_used = total_size_in_mall_bytes / dc->caps.cache_line_size + 2;
+
+ total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
+ lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
+ num_ways = cache_lines_used / lines_per_way;
+ if (cache_lines_used % lines_per_way > 0)
+ num_ways++;
+
+ return num_ways;
+}
+
+uint32_t dcn32_helper_calculate_mall_bytes_for_cursor(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ bool ignore_cursor_buf)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ uint32_t cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height;
+ uint32_t cursor_bpp = 4;
+ uint32_t cursor_mall_size_bytes = 0;
+
+ switch (pipe_ctx->stream->cursor_attributes.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ cursor_bpp = 4;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ cursor_bpp = 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cursor_size *= 8;
+ cursor_bpp = 8;
+ break;
+ }
+
+ /* only count if cursor is enabled, and if additional allocation needed outside of the
+ * DCN cursor buffer
+ */
+ if (pipe_ctx->stream->cursor_position.enable && (ignore_cursor_buf ||
+ cursor_size > 16384)) {
+ /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1)
+ * Note: add 1 mblk in case of cursor misalignment
+ */
+ cursor_mall_size_bytes = ((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) /
+ DCN3_2_MALL_MBLK_SIZE_BYTES + 1) * DCN3_2_MALL_MBLK_SIZE_BYTES;
+ }
+
+ return cursor_mall_size_bytes;
+}
+
/**
* ********************************************************************************************
* dcn32_helper_calculate_num_ways_for_subvp: Calculate number of ways needed for SubVP
*
- * This function first checks the bytes required per pixel on the SubVP pipe, then calculates
- * the total number of pixels required in the SubVP MALL region. These are used to calculate
- * the number of cache lines used (then number of ways required) for SubVP MCLK switching.
+ * Gets total allocation required for the phantom viewport calculated by DML in bytes and
+ * converts to number of cache ways.
*
* @param [in] dc: current dc state
* @param [in] context: new dc state
@@ -48,106 +110,19 @@ static bool is_dual_plane(enum surface_pixel_format format)
*
* ********************************************************************************************
*/
-uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
+uint32_t dcn32_helper_calculate_num_ways_for_subvp(
+ struct dc *dc,
+ struct dc_state *context)
{
- uint32_t num_ways = 0;
- uint32_t bytes_per_pixel = 0;
- uint32_t cache_lines_used = 0;
- uint32_t lines_per_way = 0;
- uint32_t total_cache_lines = 0;
- uint32_t bytes_in_mall = 0;
- uint32_t num_mblks = 0;
- uint32_t cache_lines_per_plane = 0;
- uint32_t i = 0, j = 0;
- uint16_t mblk_width = 0;
- uint16_t mblk_height = 0;
- uint32_t full_vp_width_blk_aligned = 0;
- uint32_t full_vp_height_blk_aligned = 0;
- uint32_t mall_alloc_width_blk_aligned = 0;
- uint32_t mall_alloc_height_blk_aligned = 0;
- uint16_t full_vp_height = 0;
- bool subvp_in_use = false;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- /* Find the phantom pipes.
- * - For pipe split case we need to loop through the bottom and next ODM
- * pipes or only half the viewport size is counted
- */
- if (pipe->stream && pipe->plane_state &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- struct pipe_ctx *main_pipe = NULL;
-
- subvp_in_use = true;
- /* Get full viewport height from main pipe (required for MBLK calculation) */
- for (j = 0; j < dc->res_pool->pipe_count; j++) {
- main_pipe = &context->res_ctx.pipe_ctx[j];
- if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) {
- full_vp_height = main_pipe->plane_res.scl_data.viewport.height;
- break;
- }
- }
-
- bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mblk_width = DCN3_2_MBLK_WIDTH;
- mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
-
- /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
- * FLOOR(vp_x_start, blk_width)
- */
- full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
- pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) -
- (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
-
- /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
- * FLOOR(vp_y_start, blk_height)
- */
- full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
- full_vp_height + mblk_height - 1) / mblk_height * mblk_height) -
- (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
-
- /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
- mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
-
- /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
- mall_alloc_height_blk_aligned = (pipe->plane_res.scl_data.viewport.height - 1 + mblk_height - 1) /
- mblk_height * mblk_height + mblk_height;
-
- /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
- * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
- * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
- * (Should be divisible, but round up if not)
- */
- num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
- ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
-
- /*For DCC:
- * meta_num_mblk = CEILING(meta_pitch*full_vp_height*Bpe/256/mblk_bytes, 1)
- */
- if (pipe->plane_state->dcc.enable)
- num_mblks += (pipe->plane_state->dcc.meta_pitch * pipe->plane_res.scl_data.viewport.height * bytes_per_pixel +
- (256 * DCN3_2_MALL_MBLK_SIZE_BYTES) - 1) / (256 * DCN3_2_MALL_MBLK_SIZE_BYTES);
-
- bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
- // cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
- // (MALL is 64-byte aligned)
- cache_lines_per_plane = bytes_in_mall / dc->caps.cache_line_size + 2;
-
- cache_lines_used += cache_lines_per_plane;
+ if (context->bw_ctx.bw.dcn.mall_subvp_size_bytes > 0) {
+ if (dc->debug.force_subvp_num_ways) {
+ return dc->debug.force_subvp_num_ways;
+ } else {
+ return dcn32_helper_mall_bytes_to_ways(dc, context->bw_ctx.bw.dcn.mall_subvp_size_bytes);
}
+ } else {
+ return 0;
}
-
- total_cache_lines = dc->caps.max_cab_allocation_bytes / dc->caps.cache_line_size;
- lines_per_way = total_cache_lines / dc->caps.cache_num_ways;
- num_ways = cache_lines_used / lines_per_way;
- if (cache_lines_used % lines_per_way > 0)
- num_ways++;
-
- if (subvp_in_use && dc->debug.force_subvp_num_ways > 0)
- num_ways = dc->debug.force_subvp_num_ways;
-
- return num_ways;
}
void dcn32_merge_pipes_for_subvp(struct dc *dc,
@@ -255,6 +230,37 @@ bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context)
return false;
}
+bool dcn32_is_center_timing(struct pipe_ctx *pipe)
+{
+ bool is_center_timing = false;
+
+ if (pipe->stream) {
+ if (pipe->stream->timing.v_addressable != pipe->stream->dst.height ||
+ pipe->stream->timing.v_addressable != pipe->stream->src.height) {
+ is_center_timing = true;
+ }
+ }
+
+ if (pipe->plane_state) {
+ if (pipe->stream->timing.v_addressable != pipe->plane_state->dst_rect.height &&
+ pipe->stream->timing.v_addressable != pipe->plane_state->src_rect.height) {
+ is_center_timing = true;
+ }
+ }
+
+ return is_center_timing;
+}
+
+bool dcn32_is_psr_capable(struct pipe_ctx *pipe)
+{
+ bool psr_capable = false;
+
+ if (pipe->stream && pipe->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) {
+ psr_capable = true;
+ }
+ return psr_capable;
+}
+
/**
* *******************************************************************************************
* dcn32_determine_det_override: Determine DET allocation for each pipe
@@ -357,6 +363,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
int i, pipe_cnt;
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *pipe;
+ bool disable_unbounded_requesting = dc->debug.disable_z9_mpc || dc->debug.disable_unbounded_requesting;
for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
@@ -373,7 +380,7 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
*/
if (pipe_cnt == 1) {
pipes[0].pipe.src.det_size_override = DCN3_2_MAX_DET_SIZE;
- if (pipe->plane_state && !dc->debug.disable_z9_mpc && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ if (pipe->plane_state && !disable_unbounded_requesting && pipe->plane_state->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
if (!is_dual_plane(pipe->plane_state->format)) {
pipes[0].pipe.src.det_size_override = DCN3_2_DEFAULT_DET_SIZE;
pipes[0].pipe.src.unbounded_req_mode = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
index fa9b6603cfd3..13be5f06d987 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_dio_link_encoder.c
@@ -31,7 +31,6 @@
#include "dcn321_dio_link_encoder.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "stream_encoder.h"
-#include "i2caux_interface.h"
#include "dc_bios_types.h"
#include "gpio_service_interface.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index d1f36df03c2e..55f918b44077 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -60,7 +60,6 @@
#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
#include "dcn31/dcn31_hpo_dp_link_encoder.h"
#include "dcn32/dcn32_hpo_dp_link_encoder.h"
-#include "dc_link_dp.h"
#include "dcn31/dcn31_apg.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn32/dcn32_dio_link_encoder.h"
@@ -73,7 +72,7 @@
#include "dml/display_mode_vba.h"
#include "dcn32/dcn32_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
@@ -724,6 +723,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.allow_sw_cursor_fallback = false, // Linux can't do SW cursor "fallback"
.alloc_extra_way_for_cursor = true,
.min_prefetch_in_strobe_ns = 60000, // 60us
+ .disable_unbounded_requesting = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1492,7 +1492,7 @@ static void dcn321_resource_destruct(struct dcn321_resource_pool *pool)
dcn_dccg_destroy(&pool->base.dccg);
if (pool->base.oem_device != NULL)
- dal_ddc_service_destroy(&pool->base.oem_device);
+ link_destroy_ddc_service(&pool->base.oem_device);
}
@@ -1702,12 +1702,18 @@ static bool dcn321_resource_construct(
dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.mall_size_per_mem_channel = 0;
+ dc->caps.mall_size_per_mem_channel = 4;
dc->caps.mall_size_total = 0;
dc->caps.cursor_cache_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size * 8;
dc->caps.cache_line_size = 64;
dc->caps.cache_num_ways = 16;
- dc->caps.max_cab_allocation_bytes = 33554432; // 32MB = 1024 * 1024 * 32
+
+ /* Calculate the available MALL space */
+ dc->caps.max_cab_allocation_bytes = dcn32_calc_num_avail_chans_for_mall(
+ dc, dc->ctx->dc_bios->vram_info.num_chans) *
+ dc->caps.mall_size_per_mem_channel * 1024 * 1024;
+ dc->caps.mall_size_total = dc->caps.max_cab_allocation_bytes;
+
dc->caps.subvp_fw_processing_delay_us = 15;
dc->caps.subvp_drr_max_vblank_margin_us = 40;
dc->caps.subvp_prefetch_end_to_mall_start_us = 15;
@@ -1990,7 +1996,7 @@ static bool dcn321_resource_construct(
ddc_init_data.id.id = dc->ctx->dc_bios->fw_info.oem_i2c_obj_id;
ddc_init_data.id.enum_id = 0;
ddc_init_data.id.type = OBJECT_TYPE_GENERIC;
- pool->base.oem_device = dal_ddc_service_create(&ddc_init_data);
+ pool->base.oem_device = link_create_ddc_service(&ddc_init_data);
} else {
pool->base.oem_device = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index af1c50ed905a..7ce9a5b6c33b 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -161,6 +161,12 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct dc_link *link,
struct dc_sink *sink);
+bool dm_helpers_dp_handle_test_pattern_request(
+ struct dc_context *ctx,
+ const struct dc_link *link,
+ union link_test_pattern dpcd_test_pattern,
+ union test_misc dpcd_test_params);
+
void dm_set_dcn_clocks(
struct dc_context *ctx,
struct dc_clocks *clks);
@@ -193,6 +199,7 @@ int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
const struct dc_link *link,
struct set_config_cmd_payload *payload,
enum set_config_status *operation_result);
+enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link);
enum dc_edid_status dm_helpers_get_sbios_edid(struct dc_link *link, struct dc_edid *edid);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index c26da3bb2892..d3ba65efe1d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -26,12 +26,12 @@
#include "resource.h"
#include "clk_mgr.h"
-#include "dc_link_dp.h"
#include "dchubbub.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
#include "clk_mgr/dcn21/rn_clk_mgr.h"
+#include "link.h"
#include "dcn20_fpu.h"
#define DC_LOGGER_INIT(logger)
@@ -938,7 +938,7 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
}
return false;
@@ -949,7 +949,6 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
int plane_count;
int i;
unsigned int optimized_min_dst_y_next_start_us;
- bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
plane_count = 0;
optimized_min_dst_y_next_start_us = 0;
@@ -974,6 +973,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
struct dc_link *link = context->streams[0]->sink->link;
struct dc_stream_status *stream_status = &context->stream_status[0];
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > 1000.0;
+ bool is_pwrseq0 = link->link_index == 0;
if (dc_extended_blank_supported(dc)) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -986,23 +987,55 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
}
}
}
- /* zstate only supported on PWRSEQ0 and when there's <2 planes*/
- if (link->link_index != 0 || stream_status->plane_count > 1)
+
+ /* Don't support multi-plane configurations */
+ if (stream_status->plane_count > 1)
return DCN_ZSTATE_SUPPORT_DISALLOW;
- if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000)
+ if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || optimized_min_dst_y_next_start_us > 5000))
return DCN_ZSTATE_SUPPORT_ALLOW;
- else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
+ else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
else
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY : DCN_ZSTATE_SUPPORT_DISALLOW;
- } else if (allow_z8) {
- return DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
} else {
return DCN_ZSTATE_SUPPORT_DISALLOW;
}
}
+static void dcn20_adjust_freesync_v_startup(
+ const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ uint32_t asic_blank_end = 0;
+ uint32_t asic_blank_start = 0;
+ uint32_t newVstartup = 0;
+
+ patched_crtc_timing = *dc_crtc_timing;
+
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+ if (patched_crtc_timing.v_front_porch < 2)
+ patched_crtc_timing.v_front_porch = 2;
+ } else {
+ if (patched_crtc_timing.v_front_porch < 1)
+ patched_crtc_timing.v_front_porch = 1;
+ }
+
+ /* blank_start = frame end - front porch */
+ asic_blank_start = patched_crtc_timing.v_total -
+ patched_crtc_timing.v_front_porch;
+
+ /* blank_end = blank_start - active */
+ asic_blank_end = asic_blank_start -
+ patched_crtc_timing.v_border_bottom -
+ patched_crtc_timing.v_addressable -
+ patched_crtc_timing.v_border_top;
+
+ newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
+
+ *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
+}
+
void dcn20_calculate_dlg_params(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -1062,6 +1095,11 @@ void dcn20_calculate_dlg_params(
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+ if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+ dcn20_adjust_freesync_v_startup(
+ &context->res_ctx.pipe_ctx[i].stream->timing,
+ &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
+
pipe_idx++;
}
/*save a original dppclock copy*/
@@ -1302,7 +1340,7 @@ int dcn20_populate_dml_pipes_from_context(
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
pipes[pipe_cnt].dout.output_type = dm_dp;
- if (is_dp_128b_132b_signal(&res_ctx->pipe_ctx[i]))
+ if (link_is_dp_128b_132b_signal(&res_ctx->pipe_ctx[i]))
pipes[pipe_cnt].dout.output_type = dm_dp2p0;
break;
case SIGNAL_TYPE_EDP:
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
index d3b5b6fedf04..6266b0788387 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
@@ -3897,14 +3897,14 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
if (mode_lib->vba.ODMCapability) {
if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
}
}
@@ -3957,7 +3957,7 @@ void dml20_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index edd098c7eb92..989d83ee3842 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -4008,17 +4008,17 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
if (mode_lib->vba.ODMCapability) {
if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
}
}
@@ -4071,7 +4071,7 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 1d84ae50311d..b7c2844d0cbe 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -4102,17 +4102,17 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2
* (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0);
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine;
if (mode_lib->vba.ODMCapability) {
if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
} else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) {
- locals->ODMCombineEnablePerState[i][k] = true;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1;
mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine;
}
}
@@ -4165,7 +4165,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
locals->RequiredDISPCLK[i][j] = 0.0;
locals->DISPCLK_DPPCLK_Support[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- locals->ODMCombineEnablePerState[i][k] = false;
+ locals->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled;
if (locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k]) {
locals->NoOfDPP[i][j][k] = 1;
locals->RequiredDPPCLK[i][j][k] = locals->MinDPPCLKUsingSingleDPP[k]
@@ -5230,7 +5230,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.ODMCombineEnabled[k] =
locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k];
} else {
- mode_lib->vba.ODMCombineEnabled[k] = false;
+ mode_lib->vba.ODMCombineEnabled[k] = dm_odm_combine_mode_disabled;
}
mode_lib->vba.DSCEnabled[k] =
locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
index d4c0f9cdac8e..4fa636364793 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c
@@ -634,7 +634,7 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc,
while (dummy_latency_index < max_latency_table_entries) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us;
- dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true);
if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank ==
dm_allow_self_refresh_and_mclk_switch)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index ec351c8418cb..27f488405335 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -878,7 +878,9 @@ static bool CalculatePrefetchSchedule(
double DSTTotalPixelsAfterScaler;
double LineTime;
double dst_y_prefetch_equ;
+#ifdef __DML_VBA_DEBUG__
double Tsw_oto;
+#endif
double prefetch_bw_oto;
double prefetch_bw_pr;
double Tvm_oto;
@@ -1060,7 +1062,9 @@ static bool CalculatePrefetchSchedule(
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
+#ifdef __DML_VBA_DEBUG__
Tsw_oto = Lsw_oto * LineTime;
+#endif
#ifdef __DML_VBA_DEBUG__
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 6a1cf6adea77..acda3e1babd4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -149,8 +149,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = {
.num_states = 5,
.sr_exit_time_us = 16.5,
.sr_enter_plus_exit_time_us = 18.5,
- .sr_exit_z8_time_us = 280.0,
- .sr_enter_plus_exit_z8_time_us = 350.0,
+ .sr_exit_z8_time_us = 210.0,
+ .sr_enter_plus_exit_z8_time_us = 310.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
@@ -346,7 +346,8 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c
context->bw_ctx.dml.ip.det_buffer_size_kbytes = DCN3_14_DEFAULT_DET_SIZE;
dc->config.enable_4to1MPC = false;
- if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (pipe_cnt == 1 && pipe->plane_state
+ && pipe->plane_state->rotation == ROTATION_ANGLE_0 && !dc->debug.disable_z9_mpc) {
if (is_dual_plane(pipe->plane_state->format)
&& pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
dc->config.enable_4to1MPC = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index 950669f2c10d..c843b394aeb4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -900,7 +900,9 @@ static bool CalculatePrefetchSchedule(
double DSTTotalPixelsAfterScaler;
double LineTime;
double dst_y_prefetch_equ;
+#ifdef __DML_VBA_DEBUG__
double Tsw_oto;
+#endif
double prefetch_bw_oto;
double prefetch_bw_pr;
double Tvm_oto;
@@ -1082,7 +1084,9 @@ static bool CalculatePrefetchSchedule(
min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre);
Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4;
+#ifdef __DML_VBA_DEBUG__
Tsw_oto = Lsw_oto * LineTime;
+#endif
#ifdef __DML_VBA_DEBUG__
@@ -3183,7 +3187,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
} else {
v->MIN_DST_Y_NEXT_START[k] = v->VTotal[k] - v->VFrontPorch[k] + v->VTotal[k] - v->VActive[k] - v->VStartup[k];
}
- v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / (double)v->HTotal[k] / v->PixelClock[k], 1.0) / 4.0;
+ v->MIN_DST_Y_NEXT_START[k] += dml_floor(4.0 * v->TSetup[k] / ((double)v->HTotal[k] / v->PixelClock[k]), 1.0) / 4.0;
if (((v->VUpdateOffsetPix[k] + v->VUpdateWidthPix[k] + v->VReadyOffsetPix[k]) / v->HTotal[k])
<= (isInterlaceTiming ?
dml_floor((v->VTotal[k] - v->VActive[k] - v->VFrontPorch[k] - v->VStartup[k]) / 2.0, 1.0) :
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
index 61ee9ba063a7..6576b897a512 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_rq_dlg_calc_314.c
@@ -51,7 +51,7 @@ static bool CalculateBytePerPixelAnd256BBlockSizes(
*BytePerPixelDETC = 0;
*BytePerPixelY = 4;
*BytePerPixelC = 0;
- } else if (SourcePixelFormat == dm_444_16 || SourcePixelFormat == dm_444_16) {
+ } else if (SourcePixelFormat == dm_444_16) {
*BytePerPixelDETY = 2;
*BytePerPixelDETC = 0;
*BytePerPixelY = 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index f94abd124021..e47828e3b6d5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -24,13 +24,14 @@
*
*/
#include "dcn32_fpu.h"
-#include "dc_link_dp.h"
#include "dcn32/dcn32_resource.h"
#include "dcn20/dcn20_resource.h"
#include "display_mode_vba_util_32.h"
+#include "dml/dcn32/display_mode_vba_32.h"
// We need this includes for WATERMARKS_* defines
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
#include "dcn30/dcn30_resource.h"
+#include "link.h"
#define DC_LOGGER_INIT(logger)
@@ -691,9 +692,11 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* to combine this with SubVP can cause issues with the scheduling).
* - Not TMZ surface
*/
- if (pipe->plane_state && !pipe->top_pipe &&
+ if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && !dcn32_is_psr_capable(pipe) &&
pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 && !pipe->plane_state->address.tmz_surface &&
- vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
+ (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
+ (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
+ dcn32_allow_subvp_with_active_margin(pipe)))) {
while (pipe) {
num_pipes++;
pipe = pipe->bottom_pipe;
@@ -877,6 +880,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
int16_t stretched_drr_us = 0;
int16_t drr_stretched_vblank_us = 0;
int16_t max_vblank_mallregion = 0;
+ const struct dc_config *config = &dc->config;
+
+ if (config->disable_subvp_drr)
+ return false;
// Find SubVP pipe
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -977,10 +984,12 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
subvp_pipe = pipe;
}
- // Use ignore_msa_timing_param flag to identify as DRR
- if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) {
- // SUBVP + DRR case
- schedulable = subvp_drr_schedulable(dc, context, &context->res_ctx.pipe_ctx[vblank_index]);
+ // Use ignore_msa_timing_param and VRR active, or Freesync flag to identify as DRR On
+ if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param &&
+ (context->res_ctx.pipe_ctx[vblank_index].stream->allow_freesync ||
+ context->res_ctx.pipe_ctx[vblank_index].stream->vrr_active_variable)) {
+ // SUBVP + DRR case -- only allowed if run through DRR validation path
+ schedulable = false;
} else if (found) {
main_timing = &subvp_pipe->stream->timing;
phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
@@ -1084,12 +1093,12 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
{
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
unsigned int dc_pipe_idx = 0;
+ int i = 0;
bool found_supported_config = false;
struct pipe_ctx *pipe = NULL;
uint32_t non_subvp_pipes = 0;
bool drr_pipe_found = false;
uint32_t drr_pipe_index = 0;
- uint32_t i = 0;
dc_assert_fp_enabled();
@@ -1169,15 +1178,25 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0);
*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
+ /* Check that vlevel requested supports pstate or not
+ * if not, select the lowest vlevel that supports it
+ */
+ for (i = *vlevel; i < context->bw_ctx.dml.soc.num_states; i++) {
+ if (vba->DRAMClockChangeSupport[i][vba->maxMpcComb] != dm_dram_clock_change_unsupported) {
+ *vlevel = i;
+ break;
+ }
+ }
+
if (*vlevel < context->bw_ctx.dml.soc.num_states &&
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported
&& subvp_validate_static_schedulability(dc, context, *vlevel)) {
found_supported_config = true;
- } else if (*vlevel < context->bw_ctx.dml.soc.num_states &&
- vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
- /* Case where 1 SubVP is added, and DML reports MCLK unsupported. This handles
- * the case for SubVP + DRR, where the DRR display does not support MCLK switch
- * at it's native refresh rate / timing.
+ } else if (*vlevel < context->bw_ctx.dml.soc.num_states) {
+ /* Case where 1 SubVP is added, and DML reports MCLK unsupported or DRR is allowed.
+ * This handles the case for SubVP + DRR, where the DRR display does not support MCLK
+ * switch at it's native refresh rate / timing, or DRR is allowed for the non-subvp
+ * display.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
@@ -1185,7 +1204,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
pipe->stream->mall_stream_config.type == SUBVP_NONE) {
non_subvp_pipes++;
// Use ignore_msa_timing_param flag to identify as DRR
- if (pipe->stream->ignore_msa_timing_param) {
+ if (pipe->stream->ignore_msa_timing_param && pipe->stream->allow_freesync) {
drr_pipe_found = true;
drr_pipe_index = i;
}
@@ -1194,6 +1213,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
// If there is only 1 remaining non SubVP pipe that is DRR, check static
// schedulability for SubVP + DRR.
if (non_subvp_pipes == 1 && drr_pipe_found) {
+ /* find lowest vlevel that supports the config */
+ for (i = *vlevel; i >= 0; i--) {
+ if (vba->ModeSupport[i][vba->maxMpcComb]) {
+ *vlevel = i;
+ } else {
+ break;
+ }
+ }
+
found_supported_config = subvp_drr_schedulable(dc, context,
&context->res_ctx.pipe_ctx[drr_pipe_index]);
}
@@ -1242,12 +1270,44 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ if (link_is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
return true;
}
return false;
}
+static void dcn20_adjust_freesync_v_startup(const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
+{
+ struct dc_crtc_timing patched_crtc_timing;
+ uint32_t asic_blank_end = 0;
+ uint32_t asic_blank_start = 0;
+ uint32_t newVstartup = 0;
+
+ patched_crtc_timing = *dc_crtc_timing;
+
+ if (patched_crtc_timing.flags.INTERLACE == 1) {
+ if (patched_crtc_timing.v_front_porch < 2)
+ patched_crtc_timing.v_front_porch = 2;
+ } else {
+ if (patched_crtc_timing.v_front_porch < 1)
+ patched_crtc_timing.v_front_porch = 1;
+ }
+
+ /* blank_start = frame end - front porch */
+ asic_blank_start = patched_crtc_timing.v_total -
+ patched_crtc_timing.v_front_porch;
+
+ /* blank_end = blank_start - active */
+ asic_blank_end = asic_blank_start -
+ patched_crtc_timing.v_border_bottom -
+ patched_crtc_timing.v_addressable -
+ patched_crtc_timing.v_border_top;
+
+ newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
+
+ *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
+}
+
static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt, int vlevel)
@@ -1270,7 +1330,6 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.clk.p_state_change_support =
context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
!= dm_dram_clock_change_unsupported;
- context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context);
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
@@ -1294,6 +1353,10 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
unbounded_req_enabled = false;
}
+ context->bw_ctx.bw.dcn.mall_ss_size_bytes = 0;
+ context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes = 0;
+ context->bw_ctx.bw.dcn.mall_subvp_size_bytes = 0;
+
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
@@ -1325,6 +1388,34 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
else
context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = 0;
context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+
+ context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes = get_surface_size_in_mall(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ /* MALL Allocation Sizes */
+ /* count from active, top pipes per plane only */
+ if (context->res_ctx.pipe_ctx[i].stream && context->res_ctx.pipe_ctx[i].plane_state &&
+ (context->res_ctx.pipe_ctx[i].top_pipe == NULL ||
+ context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) &&
+ context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
+ /* SS: all active surfaces stored in MALL */
+ if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
+
+ if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
+ /* SS PSR On: all active surfaces part of streams not supporting PSR stored in MALL */
+ context->bw_ctx.bw.dcn.mall_ss_psr_active_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
+ }
+ } else {
+ /* SUBVP: phantom surfaces only stored in MALL */
+ context->bw_ctx.bw.dcn.mall_subvp_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
+ }
+ }
+
+ if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+ dcn20_adjust_freesync_v_startup(
+ &context->res_ctx.pipe_ctx[i].stream->timing,
+ &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
+
pipe_idx++;
}
/* If DCN isn't making memory requests we can allow pstate change and lower clocks */
@@ -1345,6 +1436,8 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz
* 1000;
+ context->bw_ctx.bw.dcn.clk.num_ways = dcn32_helper_calculate_num_ways_for_subvp(dc, context);
+
context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1530,6 +1623,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
}
dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
+ context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context);
if (!fast_validate)
dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
@@ -1549,16 +1643,12 @@ bool dcn32_internal_validate_bw(struct dc *dc,
* to support with Prefetch mode 1 (dm_prefetch_support_fclk_and_stutter == 2)
*/
context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_fclk_and_stutter;
+ dm_prefetch_support_none;
+ context->bw_ctx.dml.validate_max_state = fast_validate;
vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
- /* Last attempt with Prefetch mode 2 (dm_prefetch_support_stutter == 3) */
- if (vlevel == context->bw_ctx.dml.soc.num_states) {
- context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final =
- dm_prefetch_support_stutter;
- vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
- }
+ context->bw_ctx.dml.validate_max_state = false;
if (vlevel < context->bw_ctx.dml.soc.num_states) {
memset(split, 0, sizeof(split));
@@ -1645,6 +1735,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ memset(&pipe->link_res, 0, sizeof(pipe->link_res));
repopulate_pipes = true;
} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
struct pipe_ctx *top_pipe = pipe->top_pipe;
@@ -1660,6 +1751,7 @@ bool dcn32_internal_validate_bw(struct dc *dc,
pipe->stream = NULL;
memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+ memset(&pipe->link_res, 0, sizeof(pipe->link_res));
repopulate_pipes = true;
} else
ASSERT(0); /* Should never try to merge master pipe */
@@ -1834,7 +1926,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
bool subvp_in_use = dcn32_subvp_in_use(dc, context);
unsigned int min_dram_speed_mts_margin;
bool need_fclk_lat_as_dummy = false;
- bool is_subvp_p_drr = true;
+ bool is_subvp_p_drr = false;
dc_assert_fp_enabled();
@@ -1842,7 +1934,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
if (subvp_in_use) {
/* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */
if (!pstate_en) {
- context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
+ context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_fclk_and_stutter;
pstate_en = true;
is_subvp_p_drr = true;
}
@@ -1860,8 +1953,9 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false);
+ maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb;
if (is_subvp_p_drr) {
- context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
+ context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp;
}
}
@@ -2038,6 +2132,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
*/
context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
+ /* Calculate FCLK p-state change watermark based on FCLK pstate change latency in case
+ * UCLK p-state is not supported, to avoid underflow in case FCLK pstate is supported
+ */
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.fclk_pstate_change_ns = get_fclk_watermark(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
} else {
/* Set A:
* All clocks min.
@@ -2443,8 +2541,11 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa
}
/* Override from VBIOS for num_chan */
- if (dc->ctx->dc_bios->vram_info.num_chans)
+ if (dc->ctx->dc_bios->vram_info.num_chans) {
dcn3_2_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
+ dcn3_2_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
+ dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
+ }
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
@@ -2622,3 +2723,60 @@ void dcn32_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
}
+
+bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
+{
+ bool allow = false;
+ uint32_t refresh_rate = 0;
+
+ /* Allow subvp on displays that have active margin for 2560x1440@60hz displays
+ * only for now. There must be no scaling as well.
+ *
+ * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs
+ * for p-state switching.
+ */
+ if (pipe->stream && pipe->plane_state) {
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
+ / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
+ if (pipe->stream->timing.v_addressable == 1440 &&
+ pipe->stream->timing.h_addressable == 2560 &&
+ refresh_rate >= 55 && refresh_rate <= 65 &&
+ pipe->plane_state->src_rect.height == 1440 &&
+ pipe->plane_state->src_rect.width == 2560 &&
+ pipe->plane_state->dst_rect.height == 1440 &&
+ pipe->plane_state->dst_rect.width == 2560)
+ allow = true;
+ }
+ return allow;
+}
+
+/**
+ * *******************************************************************************************
+ * dcn32_determine_max_vratio_prefetch: Determine max Vratio for prefetch by driver policy
+ *
+ * @param [in]: dc: Current DC state
+ * @param [in]: context: New DC state to be programmed
+ *
+ * @return: Max vratio for prefetch
+ *
+ * *******************************************************************************************
+ */
+double dcn32_determine_max_vratio_prefetch(struct dc *dc, struct dc_state *context)
+{
+ double max_vratio_pre = __DML_MAX_BW_RATIO_PRE__; // Default value is 4
+ int i;
+
+ /* For single display MPO configs, allow the max vratio to be 8
+ * if any plane is YUV420 format
+ */
+ if (context->stream_count == 1 && context->stream_status[0].plane_count > 1) {
+ for (i = 0; i < context->stream_status[0].plane_count; i++) {
+ if (context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr ||
+ context->stream_status[0].plane_states[i]->format == SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb) {
+ max_vratio_pre = __DML_MAX_VRATIO_PRE__;
+ }
+ }
+ }
+ return max_vratio_pre;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 4b8f5fa0f0ad..3b2a014ccf8f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -387,6 +387,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.MALLAllocatedForDCNFinal,
mode_lib->vba.UseMALLForStaticScreen,
+ mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.DCCEnable,
mode_lib->vba.ViewportStationary,
mode_lib->vba.ViewportXStartY,
@@ -411,6 +412,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->BlockWidthC,
v->BlockHeightY,
v->BlockHeightC,
+ mode_lib->vba.DCCMetaPitchY,
+ mode_lib->vba.DCCMetaPitchC,
/* Output */
v->SurfaceSizeInMALL,
@@ -893,8 +896,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
if (v->DestinationLinesForPrefetch[k] < 2)
DestinationLineTimesForPrefetchLessThan2 = true;
- if (v->VRatioPrefetchY[k] > __DML_MAX_VRATIO_PRE__
- || v->VRatioPrefetchC[k] > __DML_MAX_VRATIO_PRE__)
+ if (v->VRatioPrefetchY[k] > v->MaxVRatioPre
+ || v->VRatioPrefetchC[k] > v->MaxVRatioPre)
VRatioPrefetchMoreThanMax = true;
//bool DestinationLinesToRequestVMInVBlankEqualOrMoreThan32 = false;
@@ -939,6 +942,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->UrgBurstFactorLumaPre,
v->UrgBurstFactorChromaPre,
v->UrgBurstFactorCursorPre,
+ v->PrefetchBandwidth,
+ v->VRatio,
+ v->MaxVRatioPre,
/* output */
&MaxTotalRDBandwidth,
@@ -969,6 +975,9 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_unit_vector,
+ v->PrefetchBandwidth,
+ v->VRatio,
+ v->MaxVRatioPre,
/* output */
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_single[0],
@@ -1636,9 +1645,14 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
static void mode_support_configuration(struct vba_vars_st *v,
struct display_mode_lib *mode_lib)
{
- int i, j;
+ int i, j, start_state;
+
+ if (mode_lib->validate_max_state)
+ start_state = v->soc.num_states - 1;
+ else
+ start_state = 0;
- for (i = v->soc.num_states - 1; i >= 0; i--) {
+ for (i = v->soc.num_states - 1; i >= start_state; i--) {
for (j = 0; j < 2; j++) {
if (mode_lib->vba.ScaleRatioAndTapsSupport == true
&& mode_lib->vba.SourceFormatPixelAndScanSupport == true
@@ -1707,7 +1721,7 @@ static void mode_support_configuration(struct vba_vars_st *v,
void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *v = &mode_lib->vba;
- int i, j;
+ int i, j, start_state;
unsigned int k, m;
unsigned int MaximumMPCCombine;
unsigned int NumberOfNonCombinedSurfaceOfMaximumBandwidth;
@@ -1720,6 +1734,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
#endif
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+ if (mode_lib->validate_max_state)
+ start_state = v->soc.num_states - 1;
+ else
+ start_state = 0;
/*Scale Ratio, taps Support Check*/
@@ -2009,7 +2027,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MPCCombineMethodIncompatible = v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsNeededForPStateChangeAndVoltage
&& v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.MPCCombineMethodAsPossible;
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
mode_lib->vba.TotalNumberOfActiveDPP[i][j] = 0;
mode_lib->vba.TotalAvailablePipesSupport[i][j] = true;
@@ -2286,7 +2304,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.ExceededMultistreamSlots[i] = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.OutputMultistreamEn[k] == true && mode_lib->vba.OutputMultistreamId[k] == k) {
@@ -2335,8 +2353,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.DSCEnable[k] && mode_lib->vba.ForcedOutputLinkBPP[k] != 0)
mode_lib->vba.DSCOnlyIfNecessaryWithBPP = true;
- if ((mode_lib->vba.DSCEnable[k] || mode_lib->vba.DSCEnable[k])
- && mode_lib->vba.OutputFormat[k] == dm_n422
+ if (mode_lib->vba.DSCEnable[k] && mode_lib->vba.OutputFormat[k] == dm_n422
&& !mode_lib->vba.DSC422NativeSupport)
mode_lib->vba.DSC422NativeNotSupported = true;
@@ -2386,7 +2403,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.DTBCLKRequiredMoreThanSupported[i] = false;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
if (mode_lib->vba.BlendingAndTiming[k] == k
@@ -2403,7 +2420,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.ODMCombine2To1SupportCheckOK[i] = true;
mode_lib->vba.ODMCombine4To1SupportCheckOK[i] = true;
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
@@ -2421,7 +2438,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
mode_lib->vba.DSCCLKRequiredMoreThanSupported[i] = false;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -2458,7 +2475,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/* Check DSC Unit and Slices Support */
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0;
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
mode_lib->vba.NotEnoughDSCUnits[i] = false;
mode_lib->vba.NotEnoughDSCSlices[i] = false;
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.TotalDSCUnitsRequired = 0;
@@ -2493,7 +2510,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
/*DSC Delay per state*/
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.DSCDelayPerState[i][k] = dml32_DSCDelayRequirement(
mode_lib->vba.RequiresDSC[i][k], mode_lib->vba.ODMCombineEnablePerState[i][k],
@@ -2520,7 +2537,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
//Calculate Swath, DET Configuration, DCFCLKDeepSleep
//
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) {
mode_lib->vba.RequiredDPPCLKThisState[k] = mode_lib->vba.RequiredDPPCLK[i][j][k];
@@ -2626,6 +2643,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.NumberOfActiveSurfaces,
mode_lib->vba.MALLAllocatedForDCNFinal,
mode_lib->vba.UseMALLForStaticScreen,
+ mode_lib->vba.UsesMALLForPStateChange,
mode_lib->vba.DCCEnable,
mode_lib->vba.ViewportStationary,
mode_lib->vba.ViewportXStartY,
@@ -2650,12 +2668,14 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.MacroTileWidthC,
mode_lib->vba.MacroTileHeightY,
mode_lib->vba.MacroTileHeightC,
+ mode_lib->vba.DCCMetaPitchY,
+ mode_lib->vba.DCCMetaPitchC,
/* Output */
mode_lib->vba.SurfaceSizeInMALL,
&mode_lib->vba.ExceededMALLSize);
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
mode_lib->vba.swath_width_luma_ub_this_state[k] =
@@ -2882,7 +2902,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
//Calculate Return BW
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
if (mode_lib->vba.BlendingAndTiming[k] == k) {
@@ -2961,7 +2981,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.MinPrefetchMode,
&mode_lib->vba.MaxPrefetchMode);
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j)
mode_lib->vba.DCFCLKState[i][j] = mode_lib->vba.DCFCLKPerState[i];
}
@@ -3083,7 +3103,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DCFCLKState);
} // UseMinimumRequiredDCFCLK == true
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.ReturnBWPerState[i][j] = dml32_get_return_bw_mbps(&mode_lib->vba.soc, i,
mode_lib->vba.HostVMEnable, mode_lib->vba.DCFCLKState[i][j],
@@ -3092,7 +3112,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
//Re-ordering Buffer Support Check
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
if ((mode_lib->vba.ROBBufferSizeInKByte - mode_lib->vba.PixelChunkSizeInKByte) * 1024
/ mode_lib->vba.ReturnBWPerState[i][j]
@@ -3114,7 +3134,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ mode_lib->vba.ReadBandwidthChroma[k];
}
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.MaxTotalVerticalActiveAvailableBandwidth[i][j] =
dml_min3(mode_lib->vba.ReturnBusWidth * mode_lib->vba.DCFCLKState[i][j]
@@ -3138,7 +3158,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/* Prefetch Check */
- for (i = 0; i < (int) v->soc.num_states; ++i) {
+ for (i = start_state; i < (int) v->soc.num_states; ++i) {
for (j = 0; j <= 1; ++j) {
mode_lib->vba.TimeCalc = 24 / mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j];
@@ -3358,6 +3378,9 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.UrgentBurstFactorLumaPre,
mode_lib->vba.UrgentBurstFactorChromaPre,
mode_lib->vba.UrgentBurstFactorCursorPre,
+ v->PrefetchBW,
+ v->VRatio,
+ v->MaxVRatioPre,
/* output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // Single *PrefetchBandwidth
@@ -3382,8 +3405,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.VRatioInPrefetchSupported[i][j] = true;
for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) {
- if (mode_lib->vba.VRatioPreY[i][j][k] > __DML_MAX_VRATIO_PRE__
- || mode_lib->vba.VRatioPreC[i][j][k] > __DML_MAX_VRATIO_PRE__
+ if (mode_lib->vba.VRatioPreY[i][j][k] > mode_lib->vba.MaxVRatioPre
+ || mode_lib->vba.VRatioPreC[i][j][k] > mode_lib->vba.MaxVRatioPre
|| mode_lib->vba.NoTimeForPrefetch[i][j][k] == true) {
mode_lib->vba.VRatioInPrefetchSupported[i][j] = false;
}
@@ -3639,7 +3662,6 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
if (mode_lib->vba.SourcePixelFormat[k] != dm_444_64
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_32
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_16
- && mode_lib->vba.SourcePixelFormat[k] != dm_444_16
&& mode_lib->vba.SourcePixelFormat[k] != dm_444_8
&& mode_lib->vba.SourcePixelFormat[k] != dm_rgbe) {
if (mode_lib->vba.ViewportWidthChroma[k] > mode_lib->vba.SurfaceWidthC[k]
@@ -3656,7 +3678,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
MaximumMPCCombine = 0;
- for (i = v->soc.num_states; i >= 0; i--) {
+ for (i = v->soc.num_states; i >= start_state; i--) {
if (i == v->soc.num_states || mode_lib->vba.ModeSupport[i][0] == true ||
mode_lib->vba.ModeSupport[i][1] == true) {
mode_lib->vba.VoltageLevel = i;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
index c8b28c83ddf4..500b3dd6052d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
@@ -44,7 +44,8 @@
#define __DML_MIN_DCFCLK_FACTOR__ 1.15
// Prefetch schedule max vratio
-#define __DML_MAX_VRATIO_PRE__ 4.0
+#define __DML_MAX_VRATIO_PRE__ 7.9
+#define __DML_MAX_BW_RATIO_PRE__ 4.0
#define __DML_VBA_MAX_DST_Y_PRE__ 63.75
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index b53feeaf5cf1..d1000aa4c481 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -1772,6 +1772,7 @@ void dml32_CalculateSurfaceSizeInMall(
unsigned int NumberOfActiveSurfaces,
unsigned int MALLAllocatedForDCN,
enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[],
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
bool DCCEnable[],
bool ViewportStationary[],
unsigned int ViewportXStartY[],
@@ -1796,13 +1797,17 @@ void dml32_CalculateSurfaceSizeInMall(
unsigned int ReadBlockWidthC[],
unsigned int ReadBlockHeightY[],
unsigned int ReadBlockHeightC[],
+ unsigned int DCCMetaPitchY[],
+ unsigned int DCCMetaPitchC[],
/* Output */
unsigned int SurfaceSizeInMALL[],
bool *ExceededMALLSize)
{
- unsigned int TotalSurfaceSizeInMALL = 0;
unsigned int k;
+ unsigned int TotalSurfaceSizeInMALLForSS = 0;
+ unsigned int TotalSurfaceSizeInMALLForSubVP = 0;
+ unsigned int MALLAllocatedForDCNInBytes = MALLAllocatedForDCN * 1024 * 1024;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (ViewportStationary[k]) {
@@ -1828,18 +1833,18 @@ void dml32_CalculateSurfaceSizeInMall(
}
if (DCCEnable[k] == true) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
- dml_min(dml_ceil(SurfaceWidthY[k], 8 * Read256BytesBlockWidthY[k]),
+ (dml_min(dml_ceil(DCCMetaPitchY[k], 8 * Read256BytesBlockWidthY[k]),
dml_floor(ViewportXStartY[k] + ViewportWidthY[k] + 8 *
Read256BytesBlockWidthY[k] - 1, 8 * Read256BytesBlockWidthY[k])
- dml_floor(ViewportXStartY[k], 8 * Read256BytesBlockWidthY[k]))
* dml_min(dml_ceil(SurfaceHeightY[k], 8 *
Read256BytesBlockHeightY[k]), dml_floor(ViewportYStartY[k] +
ViewportHeightY[k] + 8 * Read256BytesBlockHeightY[k] - 1, 8 *
- Read256BytesBlockHeightY[k]) - dml_floor(ViewportYStartY[k], 8
- * Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256;
+ Read256BytesBlockHeightY[k]) - dml_floor(ViewportYStartY[k], 8 *
+ Read256BytesBlockHeightY[k])) * BytesPerPixelY[k] / 256) + (64 * 1024);
if (Read256BytesBlockWidthC[k] > 0) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
- dml_min(dml_ceil(SurfaceWidthC[k], 8 *
+ dml_min(dml_ceil(DCCMetaPitchC[k], 8 *
Read256BytesBlockWidthC[k]),
dml_floor(ViewportXStartC[k] + ViewportWidthC[k] + 8
* Read256BytesBlockWidthC[k] - 1, 8 *
@@ -1872,16 +1877,16 @@ void dml32_CalculateSurfaceSizeInMall(
}
if (DCCEnable[k] == true) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
- dml_ceil(dml_min(SurfaceWidthY[k], ViewportWidthY[k] + 8 *
+ (dml_ceil(dml_min(DCCMetaPitchY[k], ViewportWidthY[k] + 8 *
Read256BytesBlockWidthY[k] - 1), 8 *
Read256BytesBlockWidthY[k]) *
dml_ceil(dml_min(SurfaceHeightY[k], ViewportHeightY[k] + 8 *
Read256BytesBlockHeightY[k] - 1), 8 *
- Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256;
+ Read256BytesBlockHeightY[k]) * BytesPerPixelY[k] / 256) + (64 * 1024);
if (Read256BytesBlockWidthC[k] > 0) {
SurfaceSizeInMALL[k] = SurfaceSizeInMALL[k] +
- dml_ceil(dml_min(SurfaceWidthC[k], ViewportWidthC[k] + 8 *
+ dml_ceil(dml_min(DCCMetaPitchC[k], ViewportWidthC[k] + 8 *
Read256BytesBlockWidthC[k] - 1), 8 *
Read256BytesBlockWidthC[k]) *
dml_ceil(dml_min(SurfaceHeightC[k], ViewportHeightC[k] + 8 *
@@ -1894,10 +1899,14 @@ void dml32_CalculateSurfaceSizeInMall(
}
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable)
- TotalSurfaceSizeInMALL = TotalSurfaceSizeInMALL + SurfaceSizeInMALL[k];
+ /* SS and Subvp counted separate as they are never used at the same time */
+ if (UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe)
+ TotalSurfaceSizeInMALLForSubVP = TotalSurfaceSizeInMALLForSubVP + SurfaceSizeInMALL[k];
+ else if (UseMALLForStaticScreen[k] == dm_use_mall_static_screen_enable)
+ TotalSurfaceSizeInMALLForSS = TotalSurfaceSizeInMALLForSS + SurfaceSizeInMALL[k];
}
- *ExceededMALLSize = (TotalSurfaceSizeInMALL > MALLAllocatedForDCN * 1024 * 1024);
+ *ExceededMALLSize = (TotalSurfaceSizeInMALLForSS > MALLAllocatedForDCNInBytes) ||
+ (TotalSurfaceSizeInMALLForSubVP > MALLAllocatedForDCNInBytes);
} // CalculateSurfaceSizeInMall
void dml32_CalculateVMRowAndSwath(
@@ -3471,7 +3480,7 @@ bool dml32_CalculatePrefetchSchedule(
double prefetch_sw_bytes;
double bytes_pp;
double dep_bytes;
- unsigned int max_vratio_pre = __DML_MAX_VRATIO_PRE__;
+ unsigned int max_vratio_pre = v->MaxVRatioPre;
double min_Lsw;
double Tsw_est1 = 0;
double Tsw_est3 = 0;
@@ -6134,29 +6143,46 @@ void dml32_CalculatePrefetchBandwithSupport(unsigned int NumberOfActiveSurfaces,
double UrgentBurstFactorLumaPre[],
double UrgentBurstFactorChromaPre[],
double UrgentBurstFactorCursorPre[],
+ double PrefetchBW[],
+ double VRatio[],
+ double MaxVRatioPre,
/* output */
- double *PrefetchBandwidth,
+ double *MaxPrefetchBandwidth,
double *FractionOfUrgentBandwidth,
bool *PrefetchBandwidthSupport)
{
unsigned int k;
+ double ActiveBandwidthPerSurface;
bool NotEnoughUrgentLatencyHiding = false;
+ double TotalActiveBandwidth = 0;
+ double TotalPrefetchBandwidth = 0;
+
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
if (NotUrgentLatencyHiding[k]) {
NotEnoughUrgentLatencyHiding = true;
}
}
- *PrefetchBandwidth = 0;
+ *MaxPrefetchBandwidth = 0;
for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- *PrefetchBandwidth = *PrefetchBandwidth + dml_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k],
- ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k] + NumberOfDPP[k] * (meta_row_bandwidth[k] + dpte_row_bandwidth[k]),
+ ActiveBandwidthPerSurface = ReadBandwidthLuma[k] * UrgentBurstFactorLuma[k] + ReadBandwidthChroma[k] * UrgentBurstFactorChroma[k] + cursor_bw[k] * UrgentBurstFactorCursor[k] + NumberOfDPP[k] * (meta_row_bandwidth[k] + dpte_row_bandwidth[k]);
+
+ TotalActiveBandwidth += ActiveBandwidthPerSurface;
+
+ TotalPrefetchBandwidth = TotalPrefetchBandwidth + PrefetchBW[k] * VRatio[k];
+
+ *MaxPrefetchBandwidth = *MaxPrefetchBandwidth + dml_max3(NumberOfDPP[k] * prefetch_vmrow_bw[k],
+ ActiveBandwidthPerSurface,
NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * UrgentBurstFactorLumaPre[k] + PrefetchBandwidthChroma[k] * UrgentBurstFactorChromaPre[k]) + cursor_bw_pre[k] * UrgentBurstFactorCursorPre[k]);
}
- *PrefetchBandwidthSupport = (*PrefetchBandwidth <= ReturnBW) && !NotEnoughUrgentLatencyHiding;
- *FractionOfUrgentBandwidth = *PrefetchBandwidth / ReturnBW;
+ if (MaxVRatioPre == __DML_MAX_VRATIO_PRE__)
+ *PrefetchBandwidthSupport = (*MaxPrefetchBandwidth <= ReturnBW) && (TotalPrefetchBandwidth <= TotalActiveBandwidth * __DML_MAX_BW_RATIO_PRE__) && !NotEnoughUrgentLatencyHiding;
+ else
+ *PrefetchBandwidthSupport = (*MaxPrefetchBandwidth <= ReturnBW) && !NotEnoughUrgentLatencyHiding;
+
+ *FractionOfUrgentBandwidth = *MaxPrefetchBandwidth / ReturnBW;
}
double dml32_CalculateBandwidthAvailableForImmediateFlip(unsigned int NumberOfActiveSurfaces,
@@ -6245,7 +6271,7 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double PixelClock[],
double VRatioY[],
double VRatioC[],
- enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX])
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[])
{
int k;
double SwathSizeAllSurfaces = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 779c6805f599..9ba792c633a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -334,6 +334,7 @@ void dml32_CalculateSurfaceSizeInMall(
unsigned int NumberOfActiveSurfaces,
unsigned int MALLAllocatedForDCN,
enum dm_use_mall_for_static_screen_mode UseMALLForStaticScreen[],
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[],
bool DCCEnable[],
bool ViewportStationary[],
unsigned int ViewportXStartY[],
@@ -358,6 +359,8 @@ void dml32_CalculateSurfaceSizeInMall(
unsigned int ReadBlockWidthC[],
unsigned int ReadBlockHeightY[],
unsigned int ReadBlockHeightC[],
+ unsigned int DCCMetaPitchY[],
+ unsigned int DCCMetaPitchC[],
/* Output */
unsigned int SurfaceSizeInMALL[],
@@ -1093,9 +1096,12 @@ void dml32_CalculatePrefetchBandwithSupport(unsigned int NumberOfActiveSurfaces,
double UrgentBurstFactorLumaPre[],
double UrgentBurstFactorChromaPre[],
double UrgentBurstFactorCursorPre[],
+ double PrefetchBW[],
+ double VRatio[],
+ double MaxVRatioPre,
/* output */
- double *PrefetchBandwidth,
+ double *MaxPrefetchBandwidth,
double *FractionOfUrgentBandwidth,
bool *PrefetchBandwidthSupport);
@@ -1157,6 +1163,6 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
double PixelClock[],
double VRatioY[],
double VRatioC[],
- enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[DC__NUM_DPP__MAX]);
+ enum dm_use_mall_for_pstate_change_mode UsesMALLForPStateChange[]);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
index f4b176599be7..b80cef70fa60 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c
@@ -136,7 +136,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = {
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_sdp_bw_after_urgent = 100.0,
+ .pct_ideal_sdp_bw_after_urgent = 90.0,
.pct_ideal_fabric_bw_after_urgent = 67.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
@@ -534,8 +534,11 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
}
/* Override from VBIOS for num_chan */
- if (dc->ctx->dc_bios->vram_info.num_chans)
+ if (dc->ctx->dc_bios->vram_info.num_chans) {
dcn3_21_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans;
+ dcn3_21_soc.mall_allocated_for_dcn_mbytes = (double)(dcn32_calc_num_avail_chans_for_mall(dc,
+ dc->ctx->dc_bios->vram_info.num_chans) * dc->caps.mall_size_per_mem_channel);
+ }
if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes)
dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 3d643d50c3eb..a9d49ef58fb5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -91,6 +91,7 @@ struct display_mode_lib {
struct dal_logger *logger;
struct dml_funcs funcs;
struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6];
+ bool validate_max_state;
};
void dml_init_instance(struct display_mode_lib *lib,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 64d602e6412f..3c077164f362 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -246,6 +246,7 @@ struct _vcs_dpi_soc_bounding_box_st {
bool disable_dram_clock_change_vactive_support;
bool allow_dram_clock_one_display_vactive;
enum self_refresh_affinity allow_dram_self_refresh_or_dram_clock_change_in_vblank;
+ double max_vratio_pre;
};
/**
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 8e6585dab20e..f9653f511baa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -202,6 +202,7 @@ dml_get_pipe_attr_func(vm_group_size_in_bytes, mode_lib->vba.vm_group_bytes);
dml_get_pipe_attr_func(dpte_row_height_linear_l, mode_lib->vba.dpte_row_height_linear);
dml_get_pipe_attr_func(pte_buffer_mode, mode_lib->vba.PTE_BUFFER_MODE);
dml_get_pipe_attr_func(subviewport_lines_needed_in_mall, mode_lib->vba.SubViewportLinesNeededInMALL);
+dml_get_pipe_attr_func(surface_size_in_mall, mode_lib->vba.SurfaceSizeInMALL)
double get_total_immediate_flip_bytes(
struct display_mode_lib *mode_lib,
@@ -411,6 +412,7 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
soc->urgent_latency_adjustment_fabric_clock_component_us;
mode_lib->vba.UrgentLatencyAdjustmentFabricClockReference =
soc->urgent_latency_adjustment_fabric_clock_reference_mhz;
+ mode_lib->vba.MaxVRatioPre = soc->max_vratio_pre;
}
static void fetch_ip_params(struct display_mode_lib *mode_lib)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 81e53e67cd0b..07993741f5e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -143,6 +143,7 @@ dml_get_pipe_attr_decl(vready_at_or_after_vsync);
dml_get_pipe_attr_decl(min_dst_y_next_start);
dml_get_pipe_attr_decl(vstartup_calculated);
dml_get_pipe_attr_decl(subviewport_lines_needed_in_mall);
+dml_get_pipe_attr_decl(surface_size_in_mall);
double get_total_immediate_flip_bytes(
struct display_mode_lib *mode_lib,
@@ -262,6 +263,7 @@ struct vba_vars_st {
int maxMpcComb;
bool UseMaximumVStartup;
+ double MaxVRatioPre;
double WritebackDISPCLK;
double DPPCLKUsingSingleDPPLuma;
double DPPCLKUsingSingleDPPChroma;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
index ad80bde9bc0f..31574940ccc7 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dscc_types.h
@@ -46,7 +46,10 @@ struct dsc_parameters {
uint32_t rc_buffer_model_size;
};
-int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params);
+struct rc_params;
+int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
+ const struct rc_params *rc,
+ struct dsc_parameters *dsc_params);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index f0aea988fef0..36d6c1646a51 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -95,19 +95,19 @@ static void copy_rc_to_cfg(struct drm_dsc_config *dsc_cfg, const struct rc_param
dsc_cfg->rc_buf_thresh[i] = rc->rc_buf_thresh[i];
}
-int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params)
+int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps,
+ const struct rc_params *rc,
+ struct dsc_parameters *dsc_params)
{
int ret;
- struct rc_params rc;
struct drm_dsc_config dsc_cfg;
unsigned long long tmp;
- calc_rc_params(&rc, pps);
dsc_params->pps = *pps;
- dsc_params->pps.initial_scale_value = 8 * rc.rc_model_size / (rc.rc_model_size - rc.initial_fullness_offset);
+ dsc_params->pps.initial_scale_value = 8 * rc->rc_model_size / (rc->rc_model_size - rc->initial_fullness_offset);
copy_pps_fields(&dsc_cfg, &dsc_params->pps);
- copy_rc_to_cfg(&dsc_cfg, &rc);
+ copy_rc_to_cfg(&dsc_cfg, rc);
dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
index 9b63c6c0cc84..e0bd0c722e00 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
@@ -138,7 +138,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
- DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask[] = {
@@ -147,7 +148,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
- DDC_MASK_SH_LIST_DCN2(_MASK, 6)
+ DDC_MASK_SH_LIST_DCN2(_MASK, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
};
#include "../generic_regs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
index 687d4f128480..36a5736c58c9 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
@@ -145,7 +145,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
- DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask[] = {
@@ -154,7 +155,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
- DDC_MASK_SH_LIST_DCN2(_MASK, 6)
+ DDC_MASK_SH_LIST_DCN2(_MASK, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
};
#include "../generic_regs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
index 9fd8b269dd79..985f10b39750 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c
@@ -149,7 +149,8 @@ static const struct ddc_sh_mask ddc_shift[] = {
DDC_MASK_SH_LIST_DCN2(__SHIFT, 3),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 4),
DDC_MASK_SH_LIST_DCN2(__SHIFT, 5),
- DDC_MASK_SH_LIST_DCN2(__SHIFT, 6)
+ DDC_MASK_SH_LIST_DCN2(__SHIFT, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(__SHIFT)
};
static const struct ddc_sh_mask ddc_mask[] = {
@@ -158,7 +159,8 @@ static const struct ddc_sh_mask ddc_mask[] = {
DDC_MASK_SH_LIST_DCN2(_MASK, 3),
DDC_MASK_SH_LIST_DCN2(_MASK, 4),
DDC_MASK_SH_LIST_DCN2(_MASK, 5),
- DDC_MASK_SH_LIST_DCN2(_MASK, 6)
+ DDC_MASK_SH_LIST_DCN2(_MASK, 6),
+ DDC_MASK_SH_LIST_DCN2_VGA(_MASK)
};
#include "../generic_regs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
index 308a543178a5..59884ef651b3 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
+++ b/drivers/gpu/drm/amd/display/dc/gpio/ddc_regs.h
@@ -113,6 +113,13 @@
(PHY_AUX_CNTL__AUX## cd ##_PAD_RXSEL## mask_sh),\
(DC_GPIO_AUX_CTRL_5__DDC_PAD## cd ##_I2CMODE## mask_sh)}
+#define DDC_MASK_SH_LIST_DCN2_VGA(mask_sh) \
+ {DDC_MASK_SH_LIST_COMMON(mask_sh),\
+ 0,\
+ 0,\
+ 0,\
+ 0}
+
struct ddc_registers {
struct gpio_registers gpio;
uint32_t ddc_setup;
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 4233955e3c47..e1422e5e86c9 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -28,12 +28,11 @@
#include "dm_services.h"
#include "dm_helpers.h"
#include "include/hdcp_types.h"
-#include "include/i2caux_interface.h"
#include "include/signal_types.h"
#include "core_types.h"
-#include "dc_link_ddc.h"
+#include "link.h"
#include "link_hwss.h"
-#include "inc/link_dpcd.h"
+#include "link/protocols/link_dpcd.h"
#define DC_LOGGER \
link->ctx->logger
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 525f8f0b8732..ed3c03108da6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -56,33 +56,6 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state,
#endif
#include "link_hwss.h"
-/************ link *****************/
-struct link_init_data {
- const struct dc *dc;
- struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */
- uint32_t connector_index; /* this will be mapped to the HPD pins */
- uint32_t link_index; /* this is mapped to DAL display_index
- TODO: remove it when DC is complete. */
- bool is_dpia_link;
-};
-
-struct dc_link *link_create(const struct link_init_data *init_params);
-void link_destroy(struct dc_link **link);
-
-enum dc_status dc_link_validate_mode_timing(
- const struct dc_stream_state *stream,
- struct dc_link *link,
- const struct dc_crtc_timing *timing);
-
-void core_link_resume(struct dc_link *link);
-
-void core_link_enable_stream(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx);
-
-void core_link_disable_stream(struct pipe_ctx *pipe_ctx);
-
-void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable);
/********** DAL Core*********************/
#include "transform.h"
#include "dpp.h"
@@ -450,10 +423,11 @@ struct pipe_ctx {
struct _vcs_dpi_display_e2e_pipe_params_st dml_input;
int det_buffer_size_kb;
bool unbounded_req;
+ unsigned int surface_size_in_mall_bytes;
- union pipe_update_flags update_flags;
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
+ union pipe_update_flags update_flags;
};
/* Data used for dynamic link encoder assignment.
@@ -507,6 +481,9 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
struct dcn_bw_writeback bw_writeback;
int compbuf_size_kb;
+ unsigned int mall_ss_size_bytes;
+ unsigned int mall_ss_psr_active_size_bytes;
+ unsigned int mall_subvp_size_bytes;
unsigned int legacy_svp_drr_stream_index;
bool legacy_svp_drr_stream_index_valid;
};
@@ -547,15 +524,6 @@ struct dc_state {
struct resource_context res_ctx;
/**
- * @bw_ctx: The output from bandwidth and watermark calculations and the DML
- *
- * Each context must have its own instance of VBA, and in order to
- * initialize and obtain IP and SOC, the base DML instance from DC is
- * initially copied into every context.
- */
- struct bw_context bw_ctx;
-
- /**
* @pp_display_cfg: PowerPlay clocks and settings
* Note: this is a big struct, do *not* put on stack!
*/
@@ -570,6 +538,15 @@ struct dc_state {
struct clk_mgr *clk_mgr;
/**
+ * @bw_ctx: The output from bandwidth and watermark calculations and the DML
+ *
+ * Each context must have its own instance of VBA, and in order to
+ * initialize and obtain IP and SOC, the base DML instance from DC is
+ * initially copied into every context.
+ */
+ struct bw_context bw_ctx;
+
+ /**
* @refcount: refcount reference
*
* Notice that dc_state is used around the code to capture the current
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
deleted file mode 100644
index 95fb61d62778..000000000000
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/*
- * Copyright 2012-15 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DAL_DDC_SERVICE_H__
-#define __DAL_DDC_SERVICE_H__
-
-#include "include/ddc_service_types.h"
-#include "include/i2caux_interface.h"
-
-#define EDID_SEGMENT_SIZE 256
-
-/* Address range from 0x00 to 0x1F.*/
-#define DP_ADAPTOR_TYPE2_SIZE 0x20
-#define DP_ADAPTOR_TYPE2_REG_ID 0x10
-#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D
-/* Identifies adaptor as Dual-mode adaptor */
-#define DP_ADAPTOR_TYPE2_ID 0xA0
-/* MHz*/
-#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600
-/* MHz*/
-#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25
-/* kHZ*/
-#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
-/* kHZ*/
-#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
-
-#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW
-
-struct ddc_service;
-struct graphics_object_id;
-enum ddc_result;
-struct av_sync_data;
-struct dp_receiver_id_info;
-
-struct i2c_payloads;
-struct aux_payloads;
-enum aux_return_code_type;
-
-void dal_ddc_i2c_payloads_add(
- struct i2c_payloads *payloads,
- uint32_t address,
- uint32_t len,
- uint8_t *data,
- bool write);
-
-struct ddc_service_init_data {
- struct graphics_object_id id;
- struct dc_context *ctx;
- struct dc_link *link;
- bool is_dpia_link;
-};
-
-struct ddc_service *dal_ddc_service_create(
- struct ddc_service_init_data *ddc_init_data);
-
-void dal_ddc_service_destroy(struct ddc_service **ddc);
-
-enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc);
-
-void dal_ddc_service_set_transaction_type(
- struct ddc_service *ddc,
- enum ddc_transaction_type type);
-
-bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc);
-
-void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
- struct ddc_service *ddc,
- struct display_sink_capability *sink_cap);
-
-bool dal_ddc_service_query_ddc_data(
- struct ddc_service *ddc,
- uint32_t address,
- uint8_t *write_buf,
- uint32_t write_size,
- uint8_t *read_buf,
- uint32_t read_size);
-
-bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
- struct aux_payload *payload);
-
-int dc_link_aux_transfer_raw(struct ddc_service *ddc,
- struct aux_payload *payload,
- enum aux_return_code_type *operation_result);
-
-bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
- struct aux_payload *payload);
-
-bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
- uint32_t timeout);
-
-void dal_ddc_service_write_scdc_data(
- struct ddc_service *ddc_service,
- uint32_t pix_clk,
- bool lte_340_scramble);
-
-void dal_ddc_service_read_scdc_data(
- struct ddc_service *ddc_service);
-
-void ddc_service_set_dongle_type(struct ddc_service *ddc,
- enum display_dongle_type dongle_type);
-
-void dal_ddc_service_set_ddc_pin(
- struct ddc_service *ddc_service,
- struct ddc *ddc);
-
-struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service);
-
-uint32_t get_defer_delay(struct ddc_service *ddc);
-
-#endif /* __DAL_DDC_SERVICE_H__ */
-
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
deleted file mode 100644
index e8d8c5cb1309..000000000000
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Copyright 2015 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DC_LINK_DP_H__
-#define __DC_LINK_DP_H__
-
-#define LINK_TRAINING_ATTEMPTS 4
-#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
-#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
-#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
-#define MAX_MTP_SLOT_COUNT 64
-#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
-#define TRAINING_AUX_RD_INTERVAL 100 //us
-#define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX.
-
-struct dc_link;
-struct dc_stream_state;
-struct dc_link_settings;
-
-enum {
- LINK_TRAINING_MAX_RETRY_COUNT = 5,
- /* to avoid infinite loop where-in the receiver
- * switches between different VS
- */
- LINK_TRAINING_MAX_CR_RETRY = 100,
- /*
- * Some receivers fail to train on first try and are good
- * on subsequent tries. 2 retries should be plenty. If we
- * don't have a successful training then we don't expect to
- * ever get one.
- */
- LINK_TRAINING_MAX_VERIFY_RETRY = 2,
- PEAK_FACTOR_X1000 = 1006,
-};
-
-struct dc_link_settings dp_get_max_link_cap(struct dc_link *link);
-
-bool dp_verify_link_cap_with_retries(
- struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting,
- int attempts);
-
-bool dp_validate_mode_timing(
- struct dc_link *link,
- const struct dc_crtc_timing *timing);
-
-bool decide_edp_link_settings(struct dc_link *link,
- struct dc_link_settings *link_setting,
- uint32_t req_bw);
-
-bool decide_link_settings(
- struct dc_stream_state *stream,
- struct dc_link_settings *link_setting);
-
-bool perform_link_training_with_retries(
- const struct dc_link_settings *link_setting,
- bool skip_video_pattern,
- int attempts,
- struct pipe_ctx *pipe_ctx,
- enum signal_type signal,
- bool do_fallback);
-
-bool hpd_rx_irq_check_link_loss_status(
- struct dc_link *link,
- union hpd_irq_data *hpd_irq_dpcd_data);
-
-bool is_mst_supported(struct dc_link *link);
-
-bool detect_dp_sink_caps(struct dc_link *link);
-
-void detect_edp_sink_caps(struct dc_link *link);
-
-bool is_dp_active_dongle(const struct dc_link *link);
-
-bool is_dp_branch_device(const struct dc_link *link);
-
-bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing);
-
-void dp_enable_mst_on_sink(struct dc_link *link, bool enable);
-
-enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
-void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
-
-bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
-
-void dpcd_set_source_specific_data(struct dc_link *link);
-
-void dpcd_write_cable_id_to_dprx(struct dc_link *link);
-
-/* Write DPCD link configuration data. */
-enum dc_status dpcd_set_link_settings(
- struct dc_link *link,
- const struct link_training_settings *lt_settings);
-/* Write DPCD drive settings. */
-enum dc_status dpcd_set_lane_settings(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting,
- uint32_t offset);
-/* Read training status and adjustment requests from DPCD. */
-enum dc_status dp_get_lane_status_and_lane_adjust(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting,
- union lane_status ln_status[LANE_COUNT_DP_MAX],
- union lane_align_status_updated *ln_align,
- union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
- uint32_t offset);
-
-void dp_wait_for_training_aux_rd_interval(
- struct dc_link *link,
- uint32_t wait_in_micro_secs);
-
-bool dp_is_cr_done(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status);
-
-enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status);
-
-bool dp_is_ch_eq_done(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status);
-bool dp_is_symbol_locked(enum dc_lane_count ln_count,
- union lane_status *dpcd_lane_status);
-bool dp_is_interlane_aligned(union lane_align_status_updated align_status);
-
-bool dp_is_max_vs_reached(
- const struct link_training_settings *lt_settings);
-void dp_hw_to_dpcd_lane_settings(
- const struct link_training_settings *lt_settings,
- const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[]);
-void dp_decide_lane_settings(
- const struct link_training_settings *lt_settings,
- const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
- struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[]);
-
-uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval);
-
-enum dpcd_training_patterns
- dc_dp_training_pattern_to_dpcd_training_pattern(
- struct dc_link *link,
- enum dc_dp_training_pattern pattern);
-
-uint8_t dc_dp_initialize_scrambling_data_symbols(
- struct dc_link *link,
- enum dc_dp_training_pattern pattern);
-
-enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready);
-void dp_set_fec_enable(struct dc_link *link, bool enable);
-bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
-bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update);
-void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
-bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
-bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
-
-/* Initialize output parameter lt_settings. */
-void dp_decide_training_settings(
- struct dc_link *link,
- const struct dc_link_settings *link_setting,
- struct link_training_settings *lt_settings);
-
-/* Convert PHY repeater count read from DPCD uint8_t. */
-uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count);
-
-/* Check DPCD training status registers to detect link loss. */
-enum link_training_result dp_check_link_loss_status(
- struct dc_link *link,
- const struct link_training_settings *link_training_setting);
-
-enum dc_status dpcd_configure_lttpr_mode(
- struct dc_link *link,
- struct link_training_settings *lt_settings);
-
-enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
-enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
-bool dp_is_lttpr_present(struct dc_link *link);
-enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, struct dc_link_settings *link_setting);
-void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override);
-enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link);
-enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link);
-bool dpcd_write_128b_132b_sst_payload_allocation_table(
- const struct dc_stream_state *stream,
- struct dc_link *link,
- struct link_mst_stream_allocation_table *proposed_table,
- bool allocate);
-
-enum dc_status dpcd_configure_channel_coding(
- struct dc_link *link,
- struct link_training_settings *lt_settings);
-
-bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link);
-
-struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
- const struct dc_stream_state *stream,
- const struct dc_link *link);
-void enable_dp_hpo_output(struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_settings);
-void disable_dp_hpo_output(struct dc_link *link,
- const struct link_resource *link_res,
- enum signal_type signal);
-
-void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
-bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
-void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
-void dp_receiver_power_ctrl(struct dc_link *link, bool on);
-void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
-void dp_enable_link_phy(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum signal_type signal,
- enum clock_source_id clock_source,
- const struct dc_link_settings *link_settings);
-void edp_add_delay_for_T9(struct dc_link *link);
-bool edp_receiver_ready_T9(struct dc_link *link);
-bool edp_receiver_ready_T7(struct dc_link *link);
-
-void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_res,
- enum signal_type signal);
-
-void dp_disable_link_phy_mst(struct dc_link *link, const struct link_resource *link_res,
- enum signal_type signal);
-
-bool dp_set_hw_training_pattern(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum dc_dp_training_pattern pattern,
- uint32_t offset);
-
-void dp_set_hw_lane_settings(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct link_training_settings *link_settings,
- uint32_t offset);
-
-void dp_set_hw_test_pattern(
- struct dc_link *link,
- const struct link_resource *link_res,
- enum dp_test_pattern test_pattern,
- uint8_t *custom_pattern,
- uint32_t custom_pattern_size);
-
-void dp_retrain_link_dp_test(struct dc_link *link,
- struct dc_link_settings *link_setting,
- bool skip_video_pattern);
-#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
deleted file mode 100644
index 39c1d1d07357..000000000000
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright 2021 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#ifndef __DC_LINK_DPIA_H__
-#define __DC_LINK_DPIA_H__
-
-/* This module implements functionality for training DPIA links. */
-
-struct dc_link;
-struct dc_link_settings;
-
-/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */
-#define DPIA_CLK_SYNC_DELAY 16000
-
-/* Extend interval between training status checks for manual testing. */
-#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000
-
-/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */
-/* DPCD DP Tunneling over USB4 */
-#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d
-#define DP_IN_ADAPTER_INFO 0xe000e
-#define DP_USB4_DRIVER_ID 0xe000f
-#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b
-
-/* SET_CONFIG message types sent by driver. */
-enum dpia_set_config_type {
- DPIA_SET_CFG_SET_LINK = 0x01,
- DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05,
- DPIA_SET_CFG_SET_TRAINING = 0x18,
- DPIA_SET_CFG_SET_VSPE = 0x19
-};
-
-/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */
-enum dpia_set_config_ts {
- DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */
- DPIA_TS_TPS1 = 0x01,
- DPIA_TS_TPS2 = 0x02,
- DPIA_TS_TPS3 = 0x03,
- DPIA_TS_TPS4 = 0x07,
- DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */
-};
-
-/* SET_CONFIG message data associated with messages sent by driver. */
-union dpia_set_config_data {
- struct {
- uint8_t mode : 1;
- uint8_t reserved : 7;
- } set_link;
- struct {
- uint8_t stage;
- } set_training;
- struct {
- uint8_t swing : 2;
- uint8_t max_swing_reached : 1;
- uint8_t pre_emph : 2;
- uint8_t max_pre_emph_reached : 1;
- uint8_t reserved : 2;
- } set_vspe;
- uint8_t raw;
-};
-
-/* Read tunneling device capability from DPCD and update link capability
- * accordingly.
- */
-enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
-
-/* Query hot plug status of USB4 DP tunnel.
- * Returns true if HPD high.
- */
-bool dc_link_dpia_query_hpd_status(struct dc_link *link);
-
-/* Train DP tunneling link for USB4 DPIA display endpoint.
- * DPIA equivalent of dc_link_dp_perfrorm_link_training.
- * Aborts link training upon detection of sink unplug.
- */
-enum link_training_result dc_link_dpia_perform_link_training(
- struct dc_link *link,
- const struct link_resource *link_res,
- const struct dc_link_settings *link_setting,
- bool skip_video_pattern);
-
-#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
index 2ae630bf2aee..7254182b7c72 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -27,7 +27,6 @@
#define __DAL_AUX_ENGINE_H__
#include "dc_ddc_types.h"
-#include "include/i2caux_interface.h"
enum aux_return_code_type;
@@ -81,7 +80,12 @@ enum i2c_default_speed {
I2CAUX_DEFAULT_I2C_SW_SPEED = 50
};
-union aux_config;
+union aux_config {
+ struct {
+ uint32_t ALLOW_AUX_WHEN_HPD_LOW:1;
+ } bits;
+ uint32_t raw;
+};
struct aux_engine {
uint32_t inst;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 5b0265c0df61..beb26dc8a07f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -187,6 +187,7 @@ struct hubbub_funcs {
void (*init_crb)(struct hubbub *hubbub);
void (*force_usr_retraining_allow)(struct hubbub *hubbub, bool allow);
void (*set_request_limit)(struct hubbub *hubbub, int memory_channel_count, int words_per_channel);
+ void (*dchubbub_init)(struct hubbub *hubbub);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 42db4b7b79fd..bb5ad70d4266 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -72,6 +72,12 @@ enum dynamic_metadata_mode {
dmdata_dolby_vision
};
+struct enc_sdp_line_num {
+ /* Adaptive Sync SDP */
+ bool adaptive_sync_line_num_valid;
+ uint32_t adaptive_sync_line_num;
+};
+
struct encoder_info_frame {
/* auxiliary video information */
struct dc_info_packet avi;
@@ -85,6 +91,9 @@ struct encoder_info_frame {
struct dc_info_packet vsc;
/* HDR Static MetaData */
struct dc_info_packet hdrsmd;
+ /* Adaptive Sync SDP*/
+ struct dc_info_packet adaptive_sync;
+ struct enc_sdp_line_num sdp_line_num;
};
struct encoder_unblank_param {
@@ -154,6 +163,10 @@ struct stream_encoder_funcs {
void (*stop_hdmi_info_packets)(
struct stream_encoder *enc);
+ void (*update_dp_info_packets_sdp_line_num)(
+ struct stream_encoder *enc,
+ struct encoder_info_frame *info_frame);
+
void (*update_dp_info_packets)(
struct stream_encoder *enc,
const struct encoder_info_frame *info_frame);
@@ -302,6 +315,10 @@ struct hpo_dp_stream_encoder_funcs {
bool compressed_format,
bool double_buffer_en);
+ void (*update_dp_info_packets_sdp_line_num)(
+ struct hpo_dp_stream_encoder *enc,
+ struct encoder_info_frame *info_frame);
+
void (*update_dp_info_packets)(
struct hpo_dp_stream_encoder *enc,
const struct encoder_info_frame *info_frame);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 0e42e721dd15..1d9f9c53d2bd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -331,6 +331,7 @@ struct timing_generator_funcs {
uint32_t vtotal_change_limit);
void (*init_odm)(struct timing_generator *tg);
+ void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index c43523f9ff6d..88ac723d10aa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -266,6 +266,7 @@ struct hw_sequencer_funcs {
void (*apply_update_flags_for_phantom)(struct pipe_ctx *phantom_pipe);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
+ void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
new file mode 100644
index 000000000000..e70fa0059223
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_H__
+#define __DC_LINK_H__
+
+/* FILE POLICY AND INTENDED USAGE:
+ *
+ * This header declares link functions exposed to dc. All functions must have
+ * "link_" as prefix. For example link_run_my_function. This header is strictly
+ * private in dc and should never be included in other header files. dc
+ * components should include this header in their .c files in order to access
+ * functions in link folder. This file should never include any header files in
+ * link folder. If there is a need to expose a function declared in one of
+ * header files in side link folder, you need to move the function declaration
+ * into this file and prefix it with "link_".
+ */
+#include "core_types.h"
+#include "dc_link.h"
+
+struct link_init_data {
+ const struct dc *dc;
+ struct dc_context *ctx; /* TODO: remove 'dal' when DC is complete. */
+ uint32_t connector_index; /* this will be mapped to the HPD pins */
+ uint32_t link_index; /* this is mapped to DAL display_index
+ TODO: remove it when DC is complete. */
+ bool is_dpia_link;
+};
+
+struct dc_link *link_create(const struct link_init_data *init_params);
+void link_destroy(struct dc_link **link);
+
+// TODO - convert any function declarations below to function pointers
+struct gpio *link_get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service);
+
+struct ddc_service_init_data {
+ struct graphics_object_id id;
+ struct dc_context *ctx;
+ struct dc_link *link;
+ bool is_dpia_link;
+};
+
+struct ddc_service *link_create_ddc_service(
+ struct ddc_service_init_data *ddc_init_data);
+
+void link_destroy_ddc_service(struct ddc_service **ddc);
+
+bool link_is_in_aux_transaction_mode(struct ddc_service *ddc);
+
+bool link_query_ddc_data(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *write_buf,
+ uint32_t write_size,
+ uint8_t *read_buf,
+ uint32_t read_size);
+
+
+/* Attempt to submit an aux payload, retrying on timeouts, defers, and busy
+ * states as outlined in the DP spec. Returns true if the request was
+ * successful.
+ *
+ * NOTE: The function requires explicit mutex on DM side in order to prevent
+ * potential race condition. DC components should call the dpcd read/write
+ * function in dm_helpers in order to access dpcd safely
+ */
+bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
+ struct aux_payload *payload);
+
+uint32_t link_get_aux_defer_delay(struct ddc_service *ddc);
+
+bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
+
+enum dp_link_encoding link_dp_get_encoding_format(
+ const struct dc_link_settings *link_settings);
+
+bool link_decide_link_settings(
+ struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting);
+
+void link_dp_trace_set_edp_power_timestamp(struct dc_link *link,
+ bool power_up);
+uint64_t link_dp_trace_get_edp_poweron_timestamp(struct dc_link *link);
+uint64_t link_dp_trace_get_edp_poweroff_timestamp(struct dc_link *link);
+
+bool link_is_edp_ilr_optimization_required(struct dc_link *link,
+ struct dc_crtc_timing *crtc_timing);
+
+bool link_backlight_enable_aux(struct dc_link *link, bool enable);
+void link_edp_add_delay_for_T9(struct dc_link *link);
+bool link_edp_receiver_ready_T9(struct dc_link *link);
+bool link_edp_receiver_ready_T7(struct dc_link *link);
+bool link_power_alpm_dpcd_enable(struct dc_link *link, bool enable);
+bool link_set_sink_vtotal_in_psr_active(const struct dc_link *link,
+ uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su);
+void link_get_psr_residency(const struct dc_link *link, uint32_t *residency);
+enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+void link_blank_all_dp_displays(struct dc *dc);
+void link_blank_all_edp_displays(struct dc *dc);
+void link_blank_dp_stream(struct dc_link *link, bool hw_init);
+void link_resume(struct dc_link *link);
+void link_set_dpms_on(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
+void link_set_dpms_off(struct pipe_ctx *pipe_ctx);
+void link_dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
+void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
+bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
+bool link_update_dsc_config(struct pipe_ctx *pipe_ctx);
+enum dc_status link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing);
+bool link_detect(struct dc_link *link, enum dc_detect_reason reason);
+bool link_detect_connection_type(struct dc_link *link,
+ enum dc_connection_type *type);
+const struct dc_link_status *link_get_status(const struct dc_link *link);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+/* return true if the connected receiver supports the hdcp version */
+bool link_is_hdcp14(struct dc_link *link, enum signal_type signal);
+bool link_is_hdcp22(struct dc_link *link, enum signal_type signal);
+#endif
+void link_clear_dprx_states(struct dc_link *link);
+bool link_reset_cur_dp_mst_topology(struct dc_link *link);
+uint32_t dp_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_settings);
+uint32_t link_timing_bandwidth_kbps(const struct dc_crtc_timing *timing);
+void link_get_cur_res_map(const struct dc *dc, uint32_t *map);
+void link_restore_res_map(const struct dc *dc, uint32_t *map);
+
+#endif /* __DC_LINK_HPD_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 5040836f404d..fa6da93caa88 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -165,10 +165,6 @@ bool resource_validate_attach_surfaces(
struct dc_state *context,
const struct resource_pool *pool);
-void resource_validate_ctx_update_pointer_after_copy(
- const struct dc_state *src_ctx,
- struct dc_state *dst_ctx);
-
enum dc_status resource_map_clock_resources(
const struct dc *dc,
struct dc_state *context,
@@ -236,4 +232,13 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm(
struct pipe_ctx *pri_pipe,
struct pipe_ctx *sec_pipe,
bool odm);
+
+/* A test harness interface that modifies dp encoder resources in the given dc
+ * state and bypasses the need to revalidate. The interface assumes that the
+ * test harness interface is called with pre-validated link config stored in the
+ * pipe_ctx and updates dp encoder resources according to the link config.
+ */
+enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
index 5f4f6dd79511..3c7cb3dc046b 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -37,7 +37,7 @@
#include "soc15_hw_ip.h"
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
-enum dc_irq_source to_dal_irq_source_dcn201(
+static enum dc_irq_source to_dal_irq_source_dcn201(
struct irq_service *irq_service,
uint32_t src_id,
uint32_t ext_id)
@@ -136,11 +136,6 @@ static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
.ack = NULL
};
-static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = {
- .set = NULL,
- .ack = NULL
-};
-
#undef BASE_INNER
#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile
index 054c2a727eb2..40352d8d7648 100644
--- a/drivers/gpu/drm/amd/display/dc/link/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/link/Makefile
@@ -23,8 +23,41 @@
# It abstracts the control and status of back end pipe such as DIO, HPO, DPIA,
# PHY, HPD, DDC and etc).
-LINK = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o link_dp_trace.o
+LINK = link_detection.o link_dpms.o link_factory.o link_resource.o \
+link_validation.o
-AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/,$(LINK))
+AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/, \
+$(LINK))
AMD_DISPLAY_FILES += $(AMD_DAL_LINK)
+###############################################################################
+# accessories
+###############################################################################
+LINK_ACCESSORIES = link_dp_trace.o link_dp_cts.o link_fpga.o
+
+AMD_DAL_LINK_ACCESSORIES = $(addprefix $(AMDDALPATH)/dc/link/accessories/, \
+$(LINK_ACCESSORIES))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_LINK_ACCESSORIES)
+###############################################################################
+# hwss
+###############################################################################
+LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o
+
+AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \
+$(LINK_HWSS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_LINK_HWSS)
+###############################################################################
+# protocols
+###############################################################################
+LINK_PROTOCOLS = link_hpd.o link_ddc.o link_dpcd.o link_dp_dpia.o \
+link_dp_training.o link_dp_training_8b_10b.o link_dp_training_128b_132b.o \
+link_dp_training_dpia.o link_dp_training_auxless.o \
+link_dp_training_fixed_vs_pe_retimer.o link_dp_phy.o link_dp_capability.o \
+link_edp_panel_control.o link_dp_irq_handler.o
+
+AMD_DAL_LINK_PROTOCOLS = $(addprefix $(AMDDALPATH)/dc/link/protocols/, \
+$(LINK_PROTOCOLS))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_LINK_PROTOCOLS) \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
new file mode 100644
index 000000000000..942300e0bd92
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
@@ -0,0 +1,1046 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_dp_cts.h"
+#include "link/link_resource.h"
+#include "link/protocols/link_dpcd.h"
+#include "link/protocols/link_dp_training.h"
+#include "link/protocols/link_dp_phy.h"
+#include "link/protocols/link_dp_training_fixed_vs_pe_retimer.h"
+#include "link/link_dpms.h"
+#include "resource.h"
+#include "dm_helpers.h"
+#include "dc_dmub_srv.h"
+#include "dce/dmub_hw_lock_mgr.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)
+{
+ switch (test_rate) {
+ case DP_TEST_LINK_RATE_RBR:
+ return LINK_RATE_LOW;
+ case DP_TEST_LINK_RATE_HBR:
+ return LINK_RATE_HIGH;
+ case DP_TEST_LINK_RATE_HBR2:
+ return LINK_RATE_HIGH2;
+ case DP_TEST_LINK_RATE_HBR3:
+ return LINK_RATE_HIGH3;
+ case DP_TEST_LINK_RATE_UHBR10:
+ return LINK_RATE_UHBR10;
+ case DP_TEST_LINK_RATE_UHBR20:
+ return LINK_RATE_UHBR20;
+ case DP_TEST_LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR13_5;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern)
+{
+ return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern &&
+ test_pattern <= DP_TEST_PATTERN_SQUARE_END);
+}
+
+static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern)
+{
+ if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern &&
+ test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) ||
+ test_pattern == DP_TEST_PATTERN_VIDEO_MODE)
+ return true;
+ else
+ return false;
+}
+
+void dp_retrain_link_dp_test(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ struct pipe_ctx *pipes[MAX_PIPES];
+ struct dc_state *state = link->dc->current_state;
+ uint8_t count;
+ int i;
+
+ udelay(100);
+
+ link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+
+ for (i = 0; i < count; i++) {
+ link_set_dpms_off(pipes[i]);
+ pipes[i]->link_config.dp_link_settings = *link_setting;
+ update_dp_encoder_resources_for_test_harness(
+ link->dc,
+ state,
+ pipes[i]);
+ }
+
+ for (i = count-1; i >= 0; i--)
+ link_set_dpms_on(state, pipes[i]);
+}
+
+static void dp_test_send_link_training(struct dc_link *link)
+{
+ struct dc_link_settings link_settings = {0};
+ uint8_t test_rate = 0;
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_LANE_COUNT,
+ (unsigned char *)(&link_settings.lane_count),
+ 1);
+ core_link_read_dpcd(
+ link,
+ DP_TEST_LINK_RATE,
+ &test_rate,
+ 1);
+ link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate);
+
+ /* Set preferred link settings */
+ link->verified_link_cap.lane_count = link_settings.lane_count;
+ link->verified_link_cap.link_rate = link_settings.link_rate;
+
+ dp_retrain_link_dp_test(link, &link_settings, false);
+}
+
+static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video)
+{
+ union audio_test_mode dpcd_test_mode = {0};
+ struct audio_test_pattern_type dpcd_pattern_type = {0};
+ union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0};
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = &pipes[0];
+ unsigned int channel_count;
+ unsigned int channel = 0;
+ unsigned int modes = 0;
+ unsigned int sampling_rate_in_hz = 0;
+
+ // get audio test mode and test pattern parameters
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_MODE,
+ &dpcd_test_mode.raw,
+ sizeof(dpcd_test_mode));
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_PATTERN_TYPE,
+ &dpcd_pattern_type.value,
+ sizeof(dpcd_pattern_type));
+
+ channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT);
+
+ // read pattern periods for requested channels when sawTooth pattern is requested
+ if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH ||
+ dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) {
+
+ test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ?
+ DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED;
+ // read period for each channel
+ for (channel = 0; channel < channel_count; channel++) {
+ core_link_read_dpcd(
+ link,
+ DP_TEST_AUDIO_PERIOD_CH1 + channel,
+ &dpcd_pattern_period[channel].raw,
+ sizeof(dpcd_pattern_period[channel]));
+ }
+ }
+
+ // translate sampling rate
+ switch (dpcd_test_mode.bits.sampling_rate) {
+ case AUDIO_SAMPLING_RATE_32KHZ:
+ sampling_rate_in_hz = 32000;
+ break;
+ case AUDIO_SAMPLING_RATE_44_1KHZ:
+ sampling_rate_in_hz = 44100;
+ break;
+ case AUDIO_SAMPLING_RATE_48KHZ:
+ sampling_rate_in_hz = 48000;
+ break;
+ case AUDIO_SAMPLING_RATE_88_2KHZ:
+ sampling_rate_in_hz = 88200;
+ break;
+ case AUDIO_SAMPLING_RATE_96KHZ:
+ sampling_rate_in_hz = 96000;
+ break;
+ case AUDIO_SAMPLING_RATE_176_4KHZ:
+ sampling_rate_in_hz = 176400;
+ break;
+ case AUDIO_SAMPLING_RATE_192KHZ:
+ sampling_rate_in_hz = 192000;
+ break;
+ default:
+ sampling_rate_in_hz = 0;
+ break;
+ }
+
+ link->audio_test_data.flags.test_requested = 1;
+ link->audio_test_data.flags.disable_video = disable_video;
+ link->audio_test_data.sampling_rate = sampling_rate_in_hz;
+ link->audio_test_data.channel_count = channel_count;
+ link->audio_test_data.pattern_type = test_pattern;
+
+ if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) {
+ for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) {
+ link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period;
+ }
+ }
+}
+
+/* TODO Raven hbr2 compliance eye output is unstable
+ * (toggling on and off) with debugger break
+ * This caueses intermittent PHY automation failure
+ * Need to look into the root cause */
+static void dp_test_send_phy_test_pattern(struct dc_link *link)
+{
+ union phy_test_pattern dpcd_test_pattern;
+ union lane_adjust dpcd_lane_adjustment[2];
+ unsigned char dpcd_post_cursor_2_adjustment = 0;
+ unsigned char test_pattern_buffer[
+ (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 -
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+ unsigned int test_pattern_size = 0;
+ enum dp_test_pattern test_pattern;
+ union lane_adjust dpcd_lane_adjust;
+ unsigned int lane;
+ struct link_training_settings link_training_settings;
+ unsigned char no_preshoot = 0;
+ unsigned char no_deemphasis = 0;
+
+ dpcd_test_pattern.raw = 0;
+ memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
+ memset(&link_training_settings, 0, sizeof(link_training_settings));
+
+ /* get phy test pattern and pattern parameters from DP receiver */
+ core_link_read_dpcd(
+ link,
+ DP_PHY_TEST_PATTERN,
+ &dpcd_test_pattern.raw,
+ sizeof(dpcd_test_pattern));
+ core_link_read_dpcd(
+ link,
+ DP_ADJUST_REQUEST_LANE0_1,
+ &dpcd_lane_adjustment[0].raw,
+ sizeof(dpcd_lane_adjustment));
+
+ /* prepare link training settings */
+ link_training_settings.link_settings = link->cur_link_settings;
+
+ link_training_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link->cur_link_settings);
+
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ dp_fixed_vs_pe_read_lane_adjust(
+ link,
+ link_training_settings.dpcd_lane_settings);
+
+ /*get post cursor 2 parameters
+ * For DP 1.1a or eariler, this DPCD register's value is 0
+ * For DP 1.2 or later:
+ * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1
+ * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3
+ */
+ core_link_read_dpcd(
+ link,
+ DP_ADJUST_REQUEST_POST_CURSOR2,
+ &dpcd_post_cursor_2_adjustment,
+ sizeof(dpcd_post_cursor_2_adjustment));
+
+ /* translate request */
+ switch (dpcd_test_pattern.bits.PATTERN) {
+ case PHY_TEST_PATTERN_D10_2:
+ test_pattern = DP_TEST_PATTERN_D102;
+ break;
+ case PHY_TEST_PATTERN_SYMBOL_ERROR:
+ test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR;
+ break;
+ case PHY_TEST_PATTERN_PRBS7:
+ test_pattern = DP_TEST_PATTERN_PRBS7;
+ break;
+ case PHY_TEST_PATTERN_80BIT_CUSTOM:
+ test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM;
+ break;
+ case PHY_TEST_PATTERN_CP2520_1:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+ case PHY_TEST_PATTERN_CP2520_2:
+ /* CP2520 pattern is unstable, temporarily use TPS4 instead */
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
+ DP_TEST_PATTERN_TRAINING_PATTERN4 :
+ DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
+ break;
+ case PHY_TEST_PATTERN_CP2520_3:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+ break;
+ case PHY_TEST_PATTERN_128b_132b_TPS1:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS1;
+ break;
+ case PHY_TEST_PATTERN_128b_132b_TPS2:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS2;
+ break;
+ case PHY_TEST_PATTERN_PRBS9:
+ test_pattern = DP_TEST_PATTERN_PRBS9;
+ break;
+ case PHY_TEST_PATTERN_PRBS11:
+ test_pattern = DP_TEST_PATTERN_PRBS11;
+ break;
+ case PHY_TEST_PATTERN_PRBS15:
+ test_pattern = DP_TEST_PATTERN_PRBS15;
+ break;
+ case PHY_TEST_PATTERN_PRBS23:
+ test_pattern = DP_TEST_PATTERN_PRBS23;
+ break;
+ case PHY_TEST_PATTERN_PRBS31:
+ test_pattern = DP_TEST_PATTERN_PRBS31;
+ break;
+ case PHY_TEST_PATTERN_264BIT_CUSTOM:
+ test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM;
+ break;
+ case PHY_TEST_PATTERN_SQUARE:
+ test_pattern = DP_TEST_PATTERN_SQUARE;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+ test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED;
+ no_preshoot = 1;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:
+ test_pattern = DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED;
+ no_deemphasis = 1;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:
+ test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED;
+ no_preshoot = 1;
+ no_deemphasis = 1;
+ break;
+ default:
+ test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+ break;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) {
+ test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1;
+ core_link_read_dpcd(
+ link,
+ DP_TEST_80BIT_CUSTOM_PATTERN_7_0,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+
+ if (is_dp_phy_sqaure_pattern(test_pattern)) {
+ test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
+ core_link_read_dpcd(
+ link,
+ DP_PHY_SQUARE_PATTERN,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) {
+ test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256-
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1;
+ core_link_read_dpcd(
+ link,
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+
+ for (lane = 0; lane <
+ (unsigned int)(link->cur_link_settings.lane_count);
+ lane++) {
+ dpcd_lane_adjust.raw =
+ dp_get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
+ if (link_dp_get_encoding_format(&link->cur_link_settings) ==
+ DP_8b_10b_ENCODING) {
+ link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)
+ (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
+ link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)
+ (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
+ link_training_settings.hw_lane_settings[lane].POST_CURSOR2 =
+ (enum dc_post_cursor2)
+ ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+ } else if (link_dp_get_encoding_format(&link->cur_link_settings) ==
+ DP_128b_132b_ENCODING) {
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.level =
+ dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_preshoot = no_preshoot;
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_deemphasis = no_deemphasis;
+ }
+ }
+
+ dp_hw_to_dpcd_lane_settings(&link_training_settings,
+ link_training_settings.hw_lane_settings,
+ link_training_settings.dpcd_lane_settings);
+ /*Usage: Measure DP physical lane signal
+ * by DP SI test equipment automatically.
+ * PHY test pattern request is generated by equipment via HPD interrupt.
+ * HPD needs to be active all the time. HPD should be active
+ * all the time. Do not touch it.
+ * forward request to DS
+ */
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED,
+ &link_training_settings,
+ test_pattern_buffer,
+ test_pattern_size);
+}
+
+static void set_crtc_test_pattern(struct dc_link *link,
+ struct pipe_ctx *pipe_ctx,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space)
+{
+ enum controller_dp_test_pattern controller_test_pattern;
+ enum dc_color_depth color_depth = pipe_ctx->
+ stream->timing.display_color_depth;
+ struct bit_depth_reduction_params params;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
+ int width = pipe_ctx->stream->timing.h_addressable +
+ pipe_ctx->stream->timing.h_border_left +
+ pipe_ctx->stream->timing.h_border_right;
+ int height = pipe_ctx->stream->timing.v_addressable +
+ pipe_ctx->stream->timing.v_border_bottom +
+ pipe_ctx->stream->timing.v_border_top;
+
+ memset(&params, 0, sizeof(params));
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
+ break;
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA;
+ break;
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_VERTICALBARS;
+ break;
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS;
+ break;
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_COLORRAMP;
+ break;
+ default:
+ controller_test_pattern =
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE;
+ break;
+ }
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ case DP_TEST_PATTERN_COLOR_RAMP:
+ {
+ /* disable bit depth reduction */
+ pipe_ctx->stream->bit_depth_params = params;
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ controller_test_pattern, color_depth);
+ else if (link->dc->hwss.set_disp_pattern_generator) {
+ struct pipe_ctx *odm_pipe;
+ enum controller_dp_color_space controller_color_space;
+ int opp_cnt = 1;
+ int offset = 0;
+ int dpg_width = width;
+
+ switch (test_pattern_color_space) {
+ case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED:
+ default:
+ controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED;
+ DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__);
+ ASSERT(0);
+ break;
+ }
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+ dpg_width = width / opp_cnt;
+ offset = dpg_width;
+
+ link->dc->hwss.set_disp_pattern_generator(link->dc,
+ pipe_ctx,
+ controller_test_pattern,
+ controller_color_space,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ 0);
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
+
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ link->dc->hwss.set_disp_pattern_generator(link->dc,
+ odm_pipe,
+ controller_test_pattern,
+ controller_color_space,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ offset);
+ offset += offset;
+ }
+ }
+ }
+ break;
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ {
+ /* restore bitdepth reduction */
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
+ pipe_ctx->stream->bit_depth_params = params;
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ color_depth);
+ else if (link->dc->hwss.set_disp_pattern_generator) {
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ int dpg_width;
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ dpg_width = width / opp_cnt;
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
+
+ odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
+ link->dc->hwss.set_disp_pattern_generator(link->dc,
+ odm_pipe,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ 0);
+ }
+ link->dc->hwss.set_disp_pattern_generator(link->dc,
+ pipe_ctx,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ 0);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+void dc_link_dp_handle_automated_test(struct dc_link *link)
+{
+ union test_request test_request;
+ union test_response test_response;
+
+ memset(&test_request, 0, sizeof(test_request));
+ memset(&test_response, 0, sizeof(test_response));
+
+ core_link_read_dpcd(
+ link,
+ DP_TEST_REQUEST,
+ &test_request.raw,
+ sizeof(union test_request));
+ if (test_request.bits.LINK_TRAINING) {
+ /* ACK first to let DP RX test box monitor LT sequence */
+ test_response.bits.ACK = 1;
+ core_link_write_dpcd(
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+ dp_test_send_link_training(link);
+ /* no acknowledge request is needed again */
+ test_response.bits.ACK = 0;
+ }
+ if (test_request.bits.LINK_TEST_PATTRN) {
+ union test_misc dpcd_test_params;
+ union link_test_pattern dpcd_test_pattern;
+
+ memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern));
+ memset(&dpcd_test_params, 0, sizeof(dpcd_test_params));
+
+ /* get link test pattern and pattern parameters */
+ core_link_read_dpcd(
+ link,
+ DP_TEST_PATTERN,
+ &dpcd_test_pattern.raw,
+ sizeof(dpcd_test_pattern));
+ core_link_read_dpcd(
+ link,
+ DP_TEST_MISC0,
+ &dpcd_test_params.raw,
+ sizeof(dpcd_test_params));
+ test_response.bits.ACK = dm_helpers_dp_handle_test_pattern_request(link->ctx, link,
+ dpcd_test_pattern, dpcd_test_params) ? 1 : 0;
+ }
+
+ if (test_request.bits.AUDIO_TEST_PATTERN) {
+ dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO);
+ test_response.bits.ACK = 1;
+ }
+
+ if (test_request.bits.PHY_TEST_PATTERN) {
+ dp_test_send_phy_test_pattern(link);
+ test_response.bits.ACK = 1;
+ }
+
+ /* send request acknowledgment */
+ if (test_response.bits.ACK)
+ core_link_write_dpcd(
+ link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+}
+
+bool dc_link_dp_set_test_pattern(
+ struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
+ struct pipe_ctx *pipe_ctx = NULL;
+ unsigned int lane;
+ unsigned int i;
+ unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
+ union dpcd_training_pattern training_pattern;
+ enum dpcd_phy_test_patterns pattern;
+
+ memset(&training_pattern, 0, sizeof(training_pattern));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (pipes[i].stream == NULL)
+ continue;
+
+ if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
+ pipe_ctx = &pipes[i];
+ break;
+ }
+ }
+
+ if (pipe_ctx == NULL)
+ return false;
+
+ /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
+ if (link->test_pattern_enabled && test_pattern ==
+ DP_TEST_PATTERN_VIDEO_MODE) {
+ /* Set CRTC Test Pattern */
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
+ (uint8_t *)p_custom_pattern,
+ (uint32_t)cust_pattern_size);
+
+ /* Unblank Stream */
+ link->dc->hwss.unblank_stream(
+ pipe_ctx,
+ &link->verified_link_cap);
+ /* TODO:m_pHwss->MuteAudioEndpoint
+ * (pPathMode->pDisplayPath, false);
+ */
+
+ /* Reset Test Pattern state */
+ link->test_pattern_enabled = false;
+
+ return true;
+ }
+
+ /* Check for PHY Test Patterns */
+ if (is_dp_phy_pattern(test_pattern)) {
+ /* Set DPCD Lane Settings before running test pattern */
+ if (p_link_settings != NULL) {
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ dp_fixed_vs_pe_set_retimer_lane_settings(
+ link,
+ p_link_settings->dpcd_lane_settings,
+ p_link_settings->link_settings.lane_count);
+ } else {
+ dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX);
+ }
+ dpcd_set_lane_settings(link, p_link_settings, DPRX);
+ }
+
+ /* Blank stream if running test pattern */
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ /*TODO:
+ * m_pHwss->
+ * MuteAudioEndpoint(pPathMode->pDisplayPath, true);
+ */
+ /* Blank stream */
+ link->dc->hwss.blank_stream(pipe_ctx);
+ }
+
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern,
+ (uint8_t *)p_custom_pattern,
+ (uint32_t)cust_pattern_size);
+
+ if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) {
+ /* Set Test Pattern state */
+ link->test_pattern_enabled = true;
+ if (p_link_settings != NULL)
+ dpcd_set_link_settings(link,
+ p_link_settings);
+ }
+
+ switch (test_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ pattern = PHY_TEST_PATTERN_NONE;
+ break;
+ case DP_TEST_PATTERN_D102:
+ pattern = PHY_TEST_PATTERN_D10_2;
+ break;
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ pattern = PHY_TEST_PATTERN_SYMBOL_ERROR;
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ pattern = PHY_TEST_PATTERN_PRBS7;
+ break;
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ pattern = PHY_TEST_PATTERN_80BIT_CUSTOM;
+ break;
+ case DP_TEST_PATTERN_CP2520_1:
+ pattern = PHY_TEST_PATTERN_CP2520_1;
+ break;
+ case DP_TEST_PATTERN_CP2520_2:
+ pattern = PHY_TEST_PATTERN_CP2520_2;
+ break;
+ case DP_TEST_PATTERN_CP2520_3:
+ pattern = PHY_TEST_PATTERN_CP2520_3;
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS1:
+ pattern = PHY_TEST_PATTERN_128b_132b_TPS1;
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS2:
+ pattern = PHY_TEST_PATTERN_128b_132b_TPS2;
+ break;
+ case DP_TEST_PATTERN_PRBS9:
+ pattern = PHY_TEST_PATTERN_PRBS9;
+ break;
+ case DP_TEST_PATTERN_PRBS11:
+ pattern = PHY_TEST_PATTERN_PRBS11;
+ break;
+ case DP_TEST_PATTERN_PRBS15:
+ pattern = PHY_TEST_PATTERN_PRBS15;
+ break;
+ case DP_TEST_PATTERN_PRBS23:
+ pattern = PHY_TEST_PATTERN_PRBS23;
+ break;
+ case DP_TEST_PATTERN_PRBS31:
+ pattern = PHY_TEST_PATTERN_PRBS31;
+ break;
+ case DP_TEST_PATTERN_264BIT_CUSTOM:
+ pattern = PHY_TEST_PATTERN_264BIT_CUSTOM;
+ break;
+ case DP_TEST_PATTERN_SQUARE:
+ pattern = PHY_TEST_PATTERN_SQUARE;
+ break;
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED:
+ pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED;
+ break;
+ case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED:
+ pattern = PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED;
+ break;
+ case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED:
+ pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED;
+ break;
+ default:
+ return false;
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE
+ /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/)
+ return false;
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ if (is_dp_phy_sqaure_pattern(test_pattern))
+ core_link_write_dpcd(link,
+ DP_LINK_SQUARE_PATTERN,
+ p_custom_pattern,
+ 1);
+
+ /* tell receiver that we are sending qualification
+ * pattern DP 1.2 or later - DP receiver's link quality
+ * pattern is set using DPCD LINK_QUAL_LANEx_SET
+ * register (0x10B~0x10E)\
+ */
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++)
+ link_qual_pattern[lane] =
+ (unsigned char)(pattern);
+
+ core_link_write_dpcd(link,
+ DP_LINK_QUAL_LANE0_SET,
+ link_qual_pattern,
+ sizeof(link_qual_pattern));
+ } else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 ||
+ link->dpcd_caps.dpcd_rev.raw == 0) {
+ /* tell receiver that we are sending qualification
+ * pattern DP 1.1a or earlier - DP receiver's link
+ * quality pattern is set using
+ * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET
+ * register (0x102). We will use v_1.3 when we are
+ * setting test pattern for DP 1.1.
+ */
+ core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET,
+ &training_pattern.raw,
+ sizeof(training_pattern));
+ training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern;
+ core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET,
+ &training_pattern.raw,
+ sizeof(training_pattern));
+ }
+ } else {
+ enum dc_color_space color_space = COLOR_SPACE_UNKNOWN;
+
+ switch (test_pattern_color_space) {
+ case DP_TEST_PATTERN_COLOR_SPACE_RGB:
+ color_space = COLOR_SPACE_SRGB;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_SRGB_LIMITED;
+ break;
+
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601:
+ color_space = COLOR_SPACE_YCBCR601;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ break;
+ case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709:
+ color_space = COLOR_SPACE_YCBCR709;
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ break;
+ default:
+ break;
+ }
+
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
+ true,
+ &hw_locks,
+ &inst_flags);
+ } else
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
+ pipe_ctx->stream_res.tg);
+ }
+
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
+ /* update MSA to requested color space */
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
+ &pipe_ctx->stream->timing,
+ color_space,
+ pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
+ link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+
+ if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) {
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range
+ else
+ pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7);
+ resource_build_info_frame(pipe_ctx);
+ link->dc->hwss.update_info_frame(pipe_ctx);
+ }
+
+ /* CRTC Patterns */
+ set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) {
+ if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv,
+ false,
+ &hw_locks,
+ &inst_flags);
+ } else
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
+ pipe_ctx->stream_res.tg);
+ }
+
+ /* Set Test Pattern state */
+ link->test_pattern_enabled = true;
+ }
+
+ return true;
+}
+
+void dc_link_set_drive_settings(struct dc *dc,
+ struct link_training_settings *lt_settings,
+ const struct dc_link *link)
+{
+
+ int i;
+ struct link_resource link_res;
+
+ for (i = 0; i < dc->link_count; i++)
+ if (dc->links[i] == link)
+ break;
+
+ if (i >= dc->link_count)
+ ASSERT_CRITICAL(false);
+
+ link_get_cur_link_res(link, &link_res);
+ dp_set_drive_settings(dc->links[i], &link_res, lt_settings);
+}
+
+void dc_link_set_preferred_link_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link *link)
+{
+ int i;
+ struct pipe_ctx *pipe;
+ struct dc_stream_state *link_stream;
+ struct dc_link_settings store_settings = *link_setting;
+
+ link->preferred_link_setting = store_settings;
+
+ /* Retrain with preferred link settings only relevant for
+ * DP signal type
+ * Check for non-DP signal or if passive dongle present
+ */
+ if (!dc_is_dp_signal(link->connector_signal) ||
+ link->dongle_max_pix_clk > 0)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->link) {
+ if (pipe->stream->link == link) {
+ link_stream = pipe->stream;
+ break;
+ }
+ }
+ }
+
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return;
+
+ /* Cannot retrain link if backend is off */
+ if (link_stream->dpms_off)
+ return;
+
+ if (link_decide_link_settings(link_stream, &store_settings))
+ dp_retrain_link_dp_test(link, &store_settings, false);
+}
+
+void dc_link_set_preferred_training_settings(struct dc *dc,
+ struct dc_link_settings *link_setting,
+ struct dc_link_training_overrides *lt_overrides,
+ struct dc_link *link,
+ bool skip_immediate_retrain)
+{
+ if (lt_overrides != NULL)
+ link->preferred_training_settings = *lt_overrides;
+ else
+ memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings));
+
+ if (link_setting != NULL) {
+ link->preferred_link_setting = *link_setting;
+ } else {
+ link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
+ link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->type == dc_connection_mst_branch)
+ dm_helpers_dp_mst_update_branch_bandwidth(dc->ctx, link);
+
+ /* Retrain now, or wait until next stream update to apply */
+ if (skip_immediate_retrain == false)
+ dc_link_set_preferred_link_settings(dc, &link->preferred_link_setting, link);
+}
+
+void dc_link_set_test_pattern(struct dc_link *link,
+ enum dp_test_pattern test_pattern,
+ enum dp_test_pattern_color_space test_pattern_color_space,
+ const struct link_training_settings *p_link_settings,
+ const unsigned char *p_custom_pattern,
+ unsigned int cust_pattern_size)
+{
+ if (link != NULL)
+ dc_link_dp_set_test_pattern(
+ link,
+ test_pattern,
+ test_pattern_color_space,
+ p_link_settings,
+ p_custom_pattern,
+ cust_pattern_size);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
new file mode 100644
index 000000000000..7f17838b653b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_DP_CTS_H__
+#define __LINK_DP_CTS_H__
+#include "link.h"
+
+void dp_retrain_link_dp_test(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+#endif /* __LINK_DP_CTS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
index 2c1a3bfcdb50..459b362ed374 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.c
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c
@@ -22,8 +22,9 @@
* Authors: AMD
*
*/
-#include "dc_link.h"
#include "link_dp_trace.h"
+#include "link/protocols/link_dpcd.h"
+#include "link.h"
void dp_trace_init(struct dc_link *link)
{
@@ -145,7 +146,7 @@ unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link)
return link->dp_trace.link_loss_count;
}
-void dp_trace_set_edp_power_timestamp(struct dc_link *link,
+void link_dp_trace_set_edp_power_timestamp(struct dc_link *link,
bool power_up)
{
if (!power_up)
@@ -155,12 +156,19 @@ void dp_trace_set_edp_power_timestamp(struct dc_link *link,
link->dp_trace.edp_trace_power_timestamps.poweron = dm_get_timestamp(link->dc->ctx);
}
-uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link)
+uint64_t link_dp_trace_get_edp_poweron_timestamp(struct dc_link *link)
{
return link->dp_trace.edp_trace_power_timestamps.poweron;
}
-uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link)
+uint64_t link_dp_trace_get_edp_poweroff_timestamp(struct dc_link *link)
{
return link->dp_trace.edp_trace_power_timestamps.poweroff;
-} \ No newline at end of file
+}
+
+void link_dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
+{
+ if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
+ core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
+ &dp_test_mode, sizeof(dp_test_mode));
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
index 26700e3cd65e..89feea1b2692 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h
@@ -24,6 +24,7 @@
*/
#ifndef __LINK_DP_TRACE_H__
#define __LINK_DP_TRACE_H__
+#include "link.h"
void dp_trace_init(struct dc_link *link);
void dp_trace_reset(struct dc_link *link);
@@ -54,9 +55,4 @@ struct dp_trace_lt_counts *dc_dp_trace_get_lt_counts(struct dc_link *link,
bool in_detection);
unsigned int dc_dp_trace_get_link_loss_count(struct dc_link *link);
-void dp_trace_set_edp_power_timestamp(struct dc_link *link,
- bool power_up);
-uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link);
-uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link);
-
#endif /* __LINK_DP_TRACE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c
new file mode 100644
index 000000000000..d3cc604eed67
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "link_fpga.h"
+#include "link/link_dpms.h"
+#include "dm_helpers.h"
+#include "link_hwss.h"
+#include "dccg.h"
+#include "resource.h"
+
+#define DC_LOGGER_INIT(logger)
+
+void dp_fpga_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct link_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ uint8_t req_slot_count = 0;
+ uint8_t vc_id = 1; /// VC ID always 1 for SST
+ struct dc_link_settings link_settings = pipe_ctx->link_config.dp_link_settings;
+ const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res);
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ stream->link->cur_link_settings = link_settings;
+
+ if (link_hwss->ext.enable_dp_link_output)
+ link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res,
+ stream->signal, pipe_ctx->clock_source->id,
+ &link_settings);
+
+ /* Enable DP_STREAM_ENC */
+ dc->hwss.enable_stream(pipe_ctx);
+
+ /* Set DPS PPS SDP (AKA "info frames") */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ link_set_dsc_pps_packet(pipe_ctx, true, true);
+ }
+
+ /* Allocate Payload */
+ if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) {
+ // MST case
+ uint8_t i;
+
+ proposed_table.stream_count = state->stream_count;
+ for (i = 0; i < state->stream_count; i++) {
+ avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ proposed_table.stream_allocations[i].slot_count = req_slot_count;
+ proposed_table.stream_allocations[i].vcp_id = i+1;
+ /* NOTE: This makes assumption that pipe_ctx index is same as stream index */
+ proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc;
+ }
+ } else {
+ // SST case
+ avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, stream->link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ proposed_table.stream_count = 1; /// Always 1 stream for SST
+ proposed_table.stream_allocations[0].slot_count = req_slot_count;
+ proposed_table.stream_allocations[0].vcp_id = vc_id;
+ proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(stream->link,
+ &pipe_ctx->link_res,
+ &proposed_table);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+
+ dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings);
+ dc->hwss.enable_audio_stream(pipe_ctx);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h
new file mode 100644
index 000000000000..3a80f5595943
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_FPGA_H__
+#define __LINK_FPGA_H__
+#include "link.h"
+void dp_fpga_hpo_enable_link_and_stream(struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
+#endif /* __LINK_FPGA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
index 33148b753c03..b092b00b3599 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c
@@ -24,7 +24,6 @@
*/
#include "link_hwss_dio.h"
#include "core_types.h"
-#include "dc_link_dp.h"
#include "link_enc_cfg.h"
void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx,
@@ -45,7 +44,7 @@ void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
link_enc->funcs->connect_dig_be_to_fe(link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(pipe_ctx->stream->link,
+ link_dp_source_sequence_trace(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
if (stream_enc->funcs->enable_fifo)
stream_enc->funcs->enable_fifo(stream_enc);
@@ -64,7 +63,7 @@ void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc->id,
false);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(pipe_ctx->stream->link,
+ link_dp_source_sequence_trace(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
}
@@ -106,7 +105,7 @@ void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx)
&stream->timing);
if (dc_is_dp_signal(stream->signal))
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
}
void enable_dio_dp_link_output(struct dc_link *link,
@@ -127,7 +126,7 @@ void enable_dio_dp_link_output(struct dc_link *link,
link_enc,
link_settings,
clock_source);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
}
void disable_dio_link_output(struct dc_link *link,
@@ -137,7 +136,7 @@ void disable_dio_link_output(struct dc_link *link,
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
link_enc->funcs->disable_output(link_enc, signal);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
}
void set_dio_dp_link_test_pattern(struct dc_link *link,
@@ -147,7 +146,7 @@ void set_dio_dp_link_test_pattern(struct dc_link *link,
struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link);
link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
}
void set_dio_dp_lane_settings(struct dc_link *link,
@@ -196,7 +195,7 @@ void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc, false);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(pipe_ctx->stream->link,
+ link_dp_source_sequence_trace(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM);
}
@@ -215,7 +214,7 @@ void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx)
}
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- dp_source_sequence_trace(pipe_ctx->stream->link,
+ link_dp_source_sequence_trace(pipe_ctx->stream->link,
DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM);
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
index 9a108c3d7831..8b8a099feeb0 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dio.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h
@@ -26,6 +26,7 @@
#define __LINK_HWSS_DIO_H__
#include "link_hwss.h"
+#include "link.h"
const struct link_hwss *get_dio_link_hwss(void);
bool can_use_dio_link_hwss(const struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
index 861f3cd5b356..861f3cd5b356 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
index ad16ec5d9bb7..ad16ec5d9bb7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_dpia.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
index 2f46e1ac4ce0..aa1c5e253b43 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c
@@ -26,7 +26,6 @@
#include "dm_helpers.h"
#include "core_types.h"
#include "dccg.h"
-#include "dc_link_dp.h"
#include "clk_mgr.h"
static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
@@ -87,57 +86,20 @@ static void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx,
hblank_min_symbol_width);
}
-static int get_odm_segment_count(struct pipe_ctx *pipe_ctx)
-{
- struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
- int count = 1;
-
- while (odm_pipe != NULL) {
- count++;
- odm_pipe = odm_pipe->next_odm_pipe;
- }
-
- return count;
-}
-
static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc;
- struct dccg *dccg = dc->res_pool->dccg;
- struct timing_generator *tg = pipe_ctx->stream_res.tg;
- struct dtbclk_dto_params dto_params = {0};
- enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link);
- dto_params.otg_inst = tg->inst;
- dto_params.pixclk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
- dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx);
- dto_params.timing = &pipe_ctx->stream->timing;
- dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
-
- dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst);
- dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc);
stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst);
}
static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
- struct dccg *dccg = dc->res_pool->dccg;
- struct timing_generator *tg = pipe_ctx->stream_res.tg;
- struct dtbclk_dto_params dto_params = {0};
-
- dto_params.otg_inst = tg->inst;
- dto_params.timing = &pipe_ctx->stream->timing;
stream_enc->funcs->disable(stream_enc);
- dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
- dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
- dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);
}
static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
@@ -153,7 +115,7 @@ static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
stream->use_vsc_sdp_for_colorimetry,
stream->timing.flags.DSC,
false);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
}
static void enable_hpo_dp_fpga_link_output(struct dc_link *link,
@@ -239,7 +201,7 @@ static void set_hpo_dp_link_test_pattern(struct dc_link *link,
{
link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
link_res->hpo_dp_link_enc, tp_params);
- dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
}
static void set_hpo_dp_lane_settings(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
index 57d447ec27b8..3cbb94b41a23 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h
@@ -26,6 +26,7 @@
#define __LINK_HWSS_HPO_DP_H__
#include "link_hwss.h"
+#include "link.h"
bool can_use_hpo_dp_link_hwss(const struct dc_link *link,
const struct link_resource *link_res);
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
new file mode 100644
index 000000000000..38216c789d77
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -0,0 +1,1323 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file manages link detection states and receiver states by using various
+ * link protocols. It also provides helper functions to interpret certain
+ * capabilities or status based on the states it manages or retrieve them
+ * directly from connected receivers.
+ */
+
+#include "link_dpms.h"
+#include "link_detection.h"
+#include "link_hwss.h"
+#include "protocols/link_edp_panel_control.h"
+#include "protocols/link_ddc.h"
+#include "protocols/link_hpd.h"
+#include "protocols/link_dpcd.h"
+#include "protocols/link_dp_capability.h"
+#include "protocols/link_dp_dpia.h"
+#include "protocols/link_dp_phy.h"
+#include "protocols/link_dp_training.h"
+#include "accessories/link_dp_trace.h"
+
+#include "link_enc_cfg.h"
+#include "dm_helpers.h"
+#include "clk_mgr.h"
+
+#define DC_LOGGER_INIT(logger)
+
+#define LINK_INFO(...) \
+ DC_LOG_HW_HOTPLUG( \
+ __VA_ARGS__)
+/*
+ * Some receivers fail to train on first try and are good
+ * on subsequent tries. 2 retries should be plenty. If we
+ * don't have a successful training then we don't expect to
+ * ever get one.
+ */
+#define LINK_TRAINING_MAX_VERIFY_RETRY 2
+
+static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal)
+{
+ enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
+
+ switch (sink_signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_RGB:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* MST does not use I2COverAux, but there is the
+ * SPECIAL use case for "immediate dwnstrm device
+ * access" (EPR#370830).
+ */
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ default:
+ break;
+ }
+
+ return transaction_type;
+}
+
+static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
+ struct graphics_object_id downstream)
+{
+ if (downstream.type == OBJECT_TYPE_CONNECTOR) {
+ switch (downstream.id) {
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return SIGNAL_TYPE_RGB;
+ default:
+ return SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ break;
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ {
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return SIGNAL_TYPE_RGB;
+ default:
+ return SIGNAL_TYPE_DVI_DUAL_LINK;
+ }
+ }
+ break;
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ return SIGNAL_TYPE_DVI_SINGLE_LINK;
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ return SIGNAL_TYPE_DVI_DUAL_LINK;
+ case CONNECTOR_ID_VGA:
+ return SIGNAL_TYPE_RGB;
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ return SIGNAL_TYPE_HDMI_TYPE_A;
+ case CONNECTOR_ID_LVDS:
+ return SIGNAL_TYPE_LVDS;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC:
+ return SIGNAL_TYPE_DISPLAY_PORT;
+ case CONNECTOR_ID_EDP:
+ return SIGNAL_TYPE_EDP;
+ default:
+ return SIGNAL_TYPE_NONE;
+ }
+ } else if (downstream.type == OBJECT_TYPE_ENCODER) {
+ switch (downstream.id) {
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return SIGNAL_TYPE_DISPLAY_PORT;
+ default:
+ return SIGNAL_TYPE_NONE;
+ }
+ }
+
+ return SIGNAL_TYPE_NONE;
+}
+
+/*
+ * @brief
+ * Detect output sink type
+ */
+static enum signal_type link_detect_sink_signal_type(struct dc_link *link,
+ enum dc_detect_reason reason)
+{
+ enum signal_type result;
+ struct graphics_object_id enc_id;
+
+ if (link->is_dig_mapping_flexible)
+ enc_id = (struct graphics_object_id){.id = ENCODER_ID_UNKNOWN};
+ else
+ enc_id = link->link_enc->id;
+ result = get_basic_signal_type(enc_id, link->link_id);
+
+ /* Use basic signal type for link without physical connector. */
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ return result;
+
+ /* Internal digital encoder will detect only dongles
+ * that require digital signal
+ */
+
+ /* Detection mechanism is different
+ * for different native connectors.
+ * LVDS connector supports only LVDS signal;
+ * PCIE is a bus slot, the actual connector needs to be detected first;
+ * eDP connector supports only eDP signal;
+ * HDMI should check straps for audio
+ */
+
+ /* PCIE detects the actual connector on add-on board */
+ if (link->link_id.id == CONNECTOR_ID_PCIE) {
+ /* ZAZTODO implement PCIE add-on card detection */
+ }
+
+ switch (link->link_id.id) {
+ case CONNECTOR_ID_HDMI_TYPE_A: {
+ /* check audio support:
+ * if native HDMI is not supported, switch to DVI
+ */
+ struct audio_support *aud_support =
+ &link->dc->res_pool->audio_support;
+
+ if (!aud_support->hdmi_audio_native)
+ if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
+ result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ break;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC: {
+ /* DP HPD short pulse. Passive DP dongle will not
+ * have short pulse
+ */
+ if (reason != DETECT_REASON_HPDRX) {
+ /* Check whether DP signal detected: if not -
+ * we assume signal is DVI; it could be corrected
+ * to HDMI after dongle detection
+ */
+ if (!dm_helpers_is_dp_sink_present(link))
+ result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return result;
+}
+
+static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type,
+ struct audio_support *audio_support)
+{
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+
+ switch (dongle_type) {
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+ if (audio_support->hdmi_audio_on_dongle)
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+ if (audio_support->hdmi_audio_native)
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ default:
+ signal = SIGNAL_TYPE_NONE;
+ break;
+ }
+
+ return signal;
+}
+
+static void read_scdc_caps(struct ddc_service *ddc_service,
+ struct dc_sink *sink)
+{
+ uint8_t slave_address = HDMI_SCDC_ADDRESS;
+ uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI;
+
+ link_query_ddc_data(ddc_service, slave_address, &offset,
+ sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte,
+ sizeof(sink->scdc_caps.manufacturer_OUI.byte));
+
+ offset = HDMI_SCDC_DEVICE_ID;
+
+ link_query_ddc_data(ddc_service, slave_address, &offset,
+ sizeof(offset), &(sink->scdc_caps.device_id.byte),
+ sizeof(sink->scdc_caps.device_id.byte));
+}
+
+static bool i2c_read(
+ struct ddc_service *ddc,
+ uint32_t address,
+ uint8_t *buffer,
+ uint32_t len)
+{
+ uint8_t offs_data = 0;
+ struct i2c_payload payloads[2] = {
+ {
+ .write = true,
+ .address = address,
+ .length = 1,
+ .data = &offs_data },
+ {
+ .write = false,
+ .address = address,
+ .length = len,
+ .data = buffer } };
+
+ struct i2c_command command = {
+ .payloads = payloads,
+ .number_of_payloads = 2,
+ .engine = DDC_I2C_COMMAND_ENGINE,
+ .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+
+ return dm_helpers_submit_i2c(
+ ddc->ctx,
+ ddc->link,
+ &command);
+}
+
+enum {
+ DP_SINK_CAP_SIZE =
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1
+};
+
+static void query_dp_dual_mode_adaptor(
+ struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap)
+{
+ uint8_t i;
+ bool is_valid_hdmi_signature;
+ enum display_dongle_type *dongle = &sink_cap->dongle_type;
+ uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
+ bool is_type2_dongle = false;
+ int retry_count = 2;
+ struct dp_hdmi_dongle_signature_data *dongle_signature;
+
+ /* Assume we have no valid DP passive dongle connected */
+ *dongle = DISPLAY_DONGLE_NONE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
+
+ /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
+ if (!i2c_read(
+ ddc,
+ DP_HDMI_DONGLE_ADDRESS,
+ type2_dongle_buf,
+ sizeof(type2_dongle_buf))) {
+ /* Passive HDMI dongles can sometimes fail here without retrying*/
+ while (retry_count > 0) {
+ if (i2c_read(ddc,
+ DP_HDMI_DONGLE_ADDRESS,
+ type2_dongle_buf,
+ sizeof(type2_dongle_buf)))
+ break;
+ retry_count--;
+ }
+ if (retry_count == 0) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+ sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ return;
+ }
+ }
+
+ /* Check if Type 2 dongle.*/
+ if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
+ is_type2_dongle = true;
+
+ dongle_signature =
+ (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
+
+ is_valid_hdmi_signature = true;
+
+ /* Check EOT */
+ if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
+ is_valid_hdmi_signature = false;
+ }
+
+ /* Check signature */
+ for (i = 0; i < sizeof(dongle_signature->id); ++i) {
+ /* If its not the right signature,
+ * skip mismatch in subversion byte.*/
+ if (dongle_signature->id[i] !=
+ dp_hdmi_dongle_signature_str[i] && i != 3) {
+
+ if (is_type2_dongle) {
+ is_valid_hdmi_signature = false;
+ break;
+ }
+
+ }
+ }
+
+ if (is_type2_dongle) {
+ uint32_t max_tmds_clk =
+ type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
+
+ max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
+
+ if (0 == max_tmds_clk ||
+ max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
+ max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
+ *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "DP-DVI passive dongle %dMhz: ",
+ DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+ } else {
+ if (is_valid_hdmi_signature == true) {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 2 DP-HDMI passive dongle %dMhz: ",
+ max_tmds_clk);
+ } else {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ",
+ max_tmds_clk);
+
+ }
+
+ /* Multiply by 1000 to convert to kHz. */
+ sink_cap->max_hdmi_pixel_clock =
+ max_tmds_clk * 1000;
+ }
+ sink_cap->is_dongle_type_one = false;
+
+ } else {
+ if (is_valid_hdmi_signature == true) {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 1 DP-HDMI passive dongle %dMhz: ",
+ sink_cap->max_hdmi_pixel_clock / 1000);
+ } else {
+ *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+
+ CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
+ sizeof(type2_dongle_buf),
+ "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
+ sink_cap->max_hdmi_pixel_clock / 1000);
+ }
+ sink_cap->is_dongle_type_one = true;
+ }
+
+ return;
+}
+
+static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap,
+ struct audio_support *audio_support)
+{
+ query_dp_dual_mode_adaptor(ddc, sink_cap);
+
+ return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type,
+ audio_support);
+}
+
+static void link_disconnect_sink(struct dc_link *link)
+{
+ if (link->local_sink) {
+ dc_sink_release(link->local_sink);
+ link->local_sink = NULL;
+ }
+
+ link->dpcd_sink_count = 0;
+ //link->dpcd_caps.dpcd_rev.raw = 0;
+}
+
+static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link)
+{
+ dc_sink_release(link->local_sink);
+ link->local_sink = prev_sink;
+}
+
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
+{
+ struct hdcp_protection_message msg22;
+ struct hdcp_protection_message msg14;
+
+ memset(&msg22, 0, sizeof(struct hdcp_protection_message));
+ memset(&msg14, 0, sizeof(struct hdcp_protection_message));
+ memset(link->hdcp_caps.rx_caps.raw, 0,
+ sizeof(link->hdcp_caps.rx_caps.raw));
+
+ if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->ddc->transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) ||
+ link->connector_signal == SIGNAL_TYPE_EDP) {
+ msg22.data = link->hdcp_caps.rx_caps.raw;
+ msg22.length = sizeof(link->hdcp_caps.rx_caps.raw);
+ msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS;
+ } else {
+ msg22.data = &link->hdcp_caps.rx_caps.fields.version;
+ msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version);
+ msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION;
+ }
+ msg22.version = HDCP_VERSION_22;
+ msg22.link = HDCP_LINK_PRIMARY;
+ msg22.max_retries = 5;
+ dc_process_hdcp_msg(signal, link, &msg22);
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
+
+ msg14.data = &link->hdcp_caps.bcaps.raw;
+ msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
+ msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
+ msg14.version = HDCP_VERSION_14;
+ msg14.link = HDCP_LINK_PRIMARY;
+ msg14.max_retries = 5;
+
+ status = dc_process_hdcp_msg(signal, link, &msg14);
+ }
+
+}
+#endif // CONFIG_DRM_AMD_DC_HDCP
+static void read_current_link_settings_on_detect(struct dc_link *link)
+{
+ union lane_count_set lane_count_set = {0};
+ uint8_t link_bw_set;
+ uint8_t link_rate_set;
+ uint32_t read_dpcd_retry_cnt = 10;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ int i;
+ union max_down_spread max_down_spread = {0};
+
+ // Read DPCD 00101h to find out the number of lanes currently set
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(link,
+ DP_LANE_COUNT_SET,
+ &lane_count_set.raw,
+ sizeof(lane_count_set));
+ /* First DPCD read after VDD ON can fail if the particular board
+ * does not have HPD pin wired correctly. So if DPCD read fails,
+ * which it should never happen, retry a few times. Target worst
+ * case scenario of 80 ms.
+ */
+ if (status == DC_OK) {
+ link->cur_link_settings.lane_count =
+ lane_count_set.bits.LANE_COUNT_SET;
+ break;
+ }
+
+ msleep(8);
+ }
+
+ // Read DPCD 00100h to find if standard link rates are set
+ core_link_read_dpcd(link, DP_LINK_BW_SET,
+ &link_bw_set, sizeof(link_bw_set));
+
+ if (link_bw_set == 0) {
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* If standard link rates are not being used,
+ * Read DPCD 00115h to find the edp link rate set used
+ */
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // edp_supported_link_rates_count = 0 for DP
+ if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ link->cur_link_settings.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->cur_link_settings.link_rate_set = link_rate_set;
+ link->cur_link_settings.use_link_rate_set = true;
+ }
+ } else {
+ // Link Rate not found. Seamless boot may not work.
+ ASSERT(false);
+ }
+ } else {
+ link->cur_link_settings.link_rate = link_bw_set;
+ link->cur_link_settings.use_link_rate_set = false;
+ }
+ // Read DPCD 00003h to find the max down spread.
+ core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
+ &max_down_spread.raw, sizeof(max_down_spread));
+ link->cur_link_settings.link_spread =
+ max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+}
+
+static bool detect_dp(struct dc_link *link,
+ struct display_sink_capability *sink_caps,
+ enum dc_detect_reason reason)
+{
+ struct audio_support *audio_support = &link->dc->res_pool->audio_support;
+
+ sink_caps->signal = link_detect_sink_signal_type(link, reason);
+ sink_caps->transaction_type =
+ get_ddc_transaction_type(sink_caps->signal);
+
+ if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+ if (!detect_dp_sink_caps(link))
+ return false;
+
+ if (is_dp_branch_device(link))
+ /* DP SST branch */
+ link->type = dc_connection_sst_branch;
+ } else {
+ /* DP passive dongles */
+ sink_caps->signal = dp_passive_dongle_detection(link->ddc,
+ sink_caps,
+ audio_support);
+ link->dpcd_caps.dongle_type = sink_caps->dongle_type;
+ link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one;
+ link->dpcd_caps.dpcd_rev.raw = 0;
+ }
+
+ return true;
+}
+
+static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
+{
+ if (old_edid->length != new_edid->length)
+ return false;
+
+ if (new_edid->length == 0)
+ return false;
+
+ return (memcmp(old_edid->raw_edid,
+ new_edid->raw_edid, new_edid->length) == 0);
+}
+
+static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
+{
+
+ /**
+ * something is terribly wrong if time out is > 200ms. (5Hz)
+ * 500 microseconds * 400 tries us 200 ms
+ **/
+ unsigned int sleep_time_in_microseconds = 500;
+ unsigned int tries_allowed = 400;
+ bool is_in_alt_mode;
+ unsigned long long enter_timestamp;
+ unsigned long long finish_timestamp;
+ unsigned long long time_taken_in_ns;
+ int tries_taken;
+
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /**
+ * this function will only exist if we are on dcn21 (is_in_alt_mode is a
+ * function pointer, so checking to see if it is equal to 0 is the same
+ * as checking to see if it is null
+ **/
+ if (!link->link_enc->funcs->is_in_alt_mode)
+ return true;
+
+ is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
+ DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode);
+
+ if (is_in_alt_mode)
+ return true;
+
+ enter_timestamp = dm_get_timestamp(link->ctx);
+
+ for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) {
+ udelay(sleep_time_in_microseconds);
+ /* ask the link if alt mode is enabled, if so return ok */
+ if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) {
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns =
+ dm_get_elapse_time_in_ns(link->ctx,
+ finish_timestamp,
+ enter_timestamp);
+ DC_LOG_WARNING("Alt mode entered finished after %llu ms\n",
+ div_u64(time_taken_in_ns, 1000000));
+ return true;
+ }
+ }
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp,
+ enter_timestamp);
+ DC_LOG_WARNING("Alt mode has timed out after %llu ms\n",
+ div_u64(time_taken_in_ns, 1000000));
+ return false;
+}
+
+static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link)
+{
+ /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
+ * reports DSC support.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->type == dc_connection_mst_branch &&
+ link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 &&
+ link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
+ !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
+ link->wa_flags.dpia_mst_dsc_always_on = true;
+}
+
+static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link)
+{
+ /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->wa_flags.dpia_mst_dsc_always_on = false;
+}
+
+static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason)
+{
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ LINK_INFO("link=%d, mst branch is now Connected\n",
+ link->link_index);
+
+ link->type = dc_connection_mst_branch;
+ apply_dpia_mst_dsc_always_on_wa(link);
+
+ dm_helpers_dp_update_branch_info(link->ctx, link);
+ if (dm_helpers_dp_mst_start_top_mgr(link->ctx,
+ link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) {
+ link_disconnect_sink(link);
+ } else {
+ link->type = dc_connection_sst_branch;
+ }
+
+ return link->type == dc_connection_mst_branch;
+}
+
+bool link_reset_cur_dp_mst_topology(struct dc_link *link)
+{
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ LINK_INFO("link=%d, mst branch is now Disconnected\n",
+ link->link_index);
+
+ revert_dpia_mst_dsc_always_on_wa(link);
+ return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
+}
+
+static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc,
+ enum dc_detect_reason reason)
+{
+ int i;
+ bool can_apply_seamless_boot = false;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc->current_state->streams[i]->apply_seamless_boot_optimization) {
+ can_apply_seamless_boot = true;
+ break;
+ }
+ }
+
+ return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT;
+}
+
+static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc)
+{
+ dc_z10_restore(dc);
+ clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
+}
+
+static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc)
+{
+ clk_mgr_optimize_pwr_state(dc, dc->clk_mgr);
+}
+
+static void verify_link_capability_destructive(struct dc_link *link,
+ struct dc_sink *sink,
+ enum dc_detect_reason reason)
+{
+ bool should_prepare_phy_clocks =
+ should_prepare_phy_clocks_for_link_verification(link->dc, reason);
+
+ if (should_prepare_phy_clocks)
+ prepare_phy_clocks_for_destructive_link_verification(link->dc);
+
+ if (dc_is_dp_signal(link->local_sink->sink_signal)) {
+ struct dc_link_settings known_limit_link_setting =
+ dp_get_max_link_cap(link);
+ link_set_all_streams_dpms_off_for_link(link);
+ dp_verify_link_cap_with_retries(
+ link, &known_limit_link_setting,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
+ } else {
+ ASSERT(0);
+ }
+
+ if (should_prepare_phy_clocks)
+ restore_phy_clocks_for_destructive_link_verification(link->dc);
+}
+
+static void verify_link_capability_non_destructive(struct dc_link *link)
+{
+ if (dc_is_dp_signal(link->local_sink->sink_signal)) {
+ if (dc_is_embedded_signal(link->local_sink->sink_signal) ||
+ link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ /* TODO - should we check link encoder's max link caps here?
+ * How do we know which link encoder to check from?
+ */
+ link->verified_link_cap = link->reported_link_cap;
+ else
+ link->verified_link_cap = dp_get_max_link_cap(link);
+ }
+}
+
+static bool should_verify_link_capability_destructively(struct dc_link *link,
+ enum dc_detect_reason reason)
+{
+ bool destrictive = false;
+ struct dc_link_settings max_link_cap;
+ bool is_link_enc_unavailable = link->link_enc &&
+ link->dc->res_pool->funcs->link_encs_assign &&
+ !link_enc_cfg_is_link_enc_avail(
+ link->ctx->dc,
+ link->link_enc->preferred_engine,
+ link);
+
+ if (dc_is_dp_signal(link->local_sink->sink_signal)) {
+ max_link_cap = dp_get_max_link_cap(link);
+ destrictive = true;
+
+ if (link->dc->debug.skip_detection_link_training ||
+ dc_is_embedded_signal(link->local_sink->sink_signal) ||
+ link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ destrictive = false;
+ } else if (link_dp_get_encoding_format(&max_link_cap) ==
+ DP_8b_10b_ENCODING) {
+ if (link->dpcd_caps.is_mst_capable ||
+ is_link_enc_unavailable) {
+ destrictive = false;
+ }
+ }
+ }
+
+ return destrictive;
+}
+
+static void verify_link_capability(struct dc_link *link, struct dc_sink *sink,
+ enum dc_detect_reason reason)
+{
+ if (should_verify_link_capability_destructively(link, reason))
+ verify_link_capability_destructive(link, sink, reason);
+ else
+ verify_link_capability_non_destructive(link);
+}
+
+/**
+ * detect_link_and_local_sink() - Detect if a sink is attached to a given link
+ *
+ * link->local_sink is created or destroyed as needed.
+ *
+ * This does not create remote sinks.
+ */
+static bool detect_link_and_local_sink(struct dc_link *link,
+ enum dc_detect_reason reason)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ uint32_t i;
+ bool converter_disable_audio = false;
+ struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+ bool same_edid = false;
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct dc *dc = dc_ctx->dc;
+ struct dc_sink *sink = NULL;
+ struct dc_sink *prev_sink = NULL;
+ struct dpcd_caps prev_dpcd_caps;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ const uint32_t post_oui_delay = 30; // 30ms
+
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (dc_is_virtual_signal(link->connector_signal))
+ return false;
+
+ if (((link->connector_signal == SIGNAL_TYPE_LVDS ||
+ link->connector_signal == SIGNAL_TYPE_EDP) &&
+ (!link->dc->config.allow_edp_hotplug_detection)) &&
+ link->local_sink) {
+ // need to re-write OUI and brightness in resume case
+ if (link->connector_signal == SIGNAL_TYPE_EDP &&
+ (link->dpcd_sink_ext_caps.bits.oled == 1)) {
+ dpcd_set_source_specific_data(link);
+ msleep(post_oui_delay);
+ set_default_brightness_aux(link);
+ //TODO: use cached
+ }
+
+ return true;
+ }
+
+ if (!dc_link_detect_connection_type(link, &new_connection_type)) {
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ prev_sink = link->local_sink;
+ if (prev_sink) {
+ dc_sink_retain(prev_sink);
+ memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
+ }
+
+ link_disconnect_sink(link);
+ if (new_connection_type != dc_connection_none) {
+ link->type = new_connection_type;
+ link->link_state_valid = false;
+
+ /* From Disconnected-to-Connected. */
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ if (aud_support->hdmi_audio_native)
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_LVDS: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_LVDS;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ detect_edp_sink_caps(link);
+ read_current_link_settings_on_detect(link);
+
+ /* Disable power sequence on MIPI panel + converter
+ */
+ if (dc->config.enable_mipi_converter_optimization &&
+ dc_ctx->dce_version == DCN_VERSION_3_01 &&
+ link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_0022B9 &&
+ memcmp(&link->dpcd_caps.branch_dev_name, DP_SINK_BRANCH_DEV_NAME_7580,
+ sizeof(link->dpcd_caps.branch_dev_name)) == 0) {
+ dc->config.edp_no_power_sequencing = true;
+
+ if (!link->dpcd_caps.set_power_state_capable_edp)
+ link->wa_flags.dp_keep_receiver_powered = true;
+ }
+
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+
+ /* wa HPD high coming too early*/
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
+
+ /* if alt mode times out, return false */
+ if (!wait_for_entering_dp_alt_mode(link))
+ return false;
+ }
+
+ if (!detect_dp(link, &sink_caps, reason)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ return false;
+ }
+
+ /* Active SST downstream branch device unplug*/
+ if (link->type == dc_connection_sst_branch &&
+ link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
+ if (prev_sink)
+ /* Downstream unplug */
+ dc_sink_release(prev_sink);
+ return true;
+ }
+
+ /* disable audio for non DP to HDMI active sst converter */
+ if (link->type == dc_connection_sst_branch &&
+ is_dp_active_dongle(link) &&
+ (link->dpcd_caps.dongle_type !=
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER))
+ converter_disable_audio = true;
+ break;
+ }
+
+ default:
+ DC_ERROR("Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ return false;
+ } /* switch() */
+
+ if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
+ link->dpcd_sink_count =
+ link->dpcd_caps.sink_count.bits.SINK_COUNT;
+ else
+ link->dpcd_sink_count = 1;
+
+ set_ddc_transaction_type(link->ddc,
+ sink_caps.transaction_type);
+
+ link->aux_mode =
+ link_is_in_aux_transaction_mode(link->ddc);
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DC_ERROR("Failed to create sink!\n");
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ return false;
+ }
+
+ sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
+ sink->converter_disable_audio = converter_disable_audio;
+
+ /* dc_sink_create returns a new reference */
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(link->ctx,
+ link, sink);
+
+ switch (edid_status) {
+ case EDID_BAD_CHECKSUM:
+ DC_LOG_ERROR("EDID checksum invalid.\n");
+ break;
+ case EDID_PARTIAL_VALID:
+ DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n");
+ break;
+ case EDID_NO_RESPONSE:
+ DC_LOG_ERROR("No EDID read.\n");
+ /*
+ * Abort detection for non-DP connectors if we have
+ * no EDID
+ *
+ * DP needs to report as connected if HDP is high
+ * even if we have no EDID in order to go to
+ * fail-safe mode
+ */
+ if (dc_is_hdmi_signal(link->connector_signal) ||
+ dc_is_dvi_signal(link->connector_signal)) {
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+
+ return false;
+ }
+
+ if (link->type == dc_connection_sst_branch &&
+ link->dpcd_caps.dongle_type ==
+ DISPLAY_DONGLE_DP_VGA_CONVERTER &&
+ reason == DETECT_REASON_HPDRX) {
+ /* Abort detection for DP-VGA adapters when EDID
+ * can't be read and detection reason is VGA-side
+ * hotplug
+ */
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+ link_disconnect_sink(link);
+
+ return true;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ // Check if edid is the same
+ if ((prev_sink) &&
+ (edid_status == EDID_THE_SAME || edid_status == EDID_OK))
+ same_edid = is_same_edid(&prev_sink->dc_edid,
+ &sink->dc_edid);
+
+ if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ link->ctx->dc->debug.hdmi20_disable = true;
+
+ if (dc_is_hdmi_signal(link->connector_signal))
+ read_scdc_caps(link->ddc, link->local_sink);
+
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ sink_caps.transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ /*
+ * TODO debug why certain monitors don't like
+ * two link trainings
+ */
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ query_hdcp_capability(sink->sink_signal, link);
+#endif
+ } else {
+ // If edid is the same, then discard new sink and revert back to original sink
+ if (same_edid) {
+ link_disconnect_remap(prev_sink, link);
+ sink = prev_sink;
+ prev_sink = NULL;
+ }
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ query_hdcp_capability(sink->sink_signal, link);
+#endif
+ }
+
+ /* HDMI-DVI Dongle */
+ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+ !sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
+ if (link->local_sink && dc_is_dp_signal(sink_caps.signal))
+ dp_trace_init(link);
+
+ /* Connectivity log: detection */
+ for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
+ CONN_DATA_DETECT(link,
+ &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
+ DC_EDID_BLOCK_SIZE,
+ "%s: [Block %d] ", sink->edid_caps.display_name, i);
+ }
+
+ DC_LOG_DETECTION_EDID_PARSER("%s: "
+ "manufacturer_id = %X, "
+ "product_id = %X, "
+ "serial_number = %X, "
+ "manufacture_week = %d, "
+ "manufacture_year = %d, "
+ "display_name = %s, "
+ "speaker_flag = %d, "
+ "audio_mode_count = %d\n",
+ __func__,
+ sink->edid_caps.manufacturer_id,
+ sink->edid_caps.product_id,
+ sink->edid_caps.serial_number,
+ sink->edid_caps.manufacture_week,
+ sink->edid_caps.manufacture_year,
+ sink->edid_caps.display_name,
+ sink->edid_caps.speaker_flags,
+ sink->edid_caps.audio_mode_count);
+
+ for (i = 0; i < sink->edid_caps.audio_mode_count; i++) {
+ DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, "
+ "format_code = %d, "
+ "channel_count = %d, "
+ "sample_rate = %d, "
+ "sample_size = %d\n",
+ __func__,
+ i,
+ sink->edid_caps.audio_modes[i].format_code,
+ sink->edid_caps.audio_modes[i].channel_count,
+ sink->edid_caps.audio_modes[i].sample_rate,
+ sink->edid_caps.audio_modes[i].sample_size);
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ // Init dc_panel_config by HW config
+ if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults)
+ dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config);
+ // Pickup base DM settings
+ dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink);
+ // Override dc_panel_config if system has specific settings
+ dm_helpers_override_panel_settings(dc_ctx, &link->panel_config);
+ }
+
+ } else {
+ /* From Connected-to-Disconnected. */
+ link->type = dc_connection_none;
+ sink_caps.signal = SIGNAL_TYPE_NONE;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ memset(&link->hdcp_caps, 0, sizeof(struct hdcp_caps));
+#endif
+ /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
+ * is not cleared. If we emulate a DP signal on this connection, it thinks
+ * the dongle is still there and limits the number of modes we can emulate.
+ * Clear dongle_max_pix_clk on disconnect to fix this
+ */
+ link->dongle_max_pix_clk = 0;
+
+ dc_link_clear_dprx_states(link);
+ dp_trace_reset(link);
+ }
+
+ LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n",
+ link->link_index, sink,
+ (sink_caps.signal ==
+ SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"),
+ prev_sink, same_edid);
+
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+
+ return true;
+}
+
+/**
+ * dc_link_detect_connection_type() - Determine if there is a sink connected
+ *
+ * @type: Returned connection type
+ * Does not detect downstream devices, such as MST sinks
+ * or display connected through active dongles
+ */
+bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *type)
+{
+ uint32_t is_hpd_high = 0;
+
+ if (link->connector_signal == SIGNAL_TYPE_LVDS) {
+ *type = dc_connection_single;
+ return true;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /*in case it is not on*/
+ if (!link->dc->config.edp_no_power_sequencing)
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
+ /* Link may not have physical HPD pin. */
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY) {
+ if (link->is_hpd_pending || !dc_link_dpia_query_hpd_status(link))
+ *type = dc_connection_none;
+ else
+ *type = dc_connection_single;
+
+ return true;
+ }
+
+
+ if (!query_hpd_status(link, &is_hpd_high))
+ goto hpd_gpio_failure;
+
+ if (is_hpd_high) {
+ *type = dc_connection_single;
+ /* TODO: need to do the actual detection */
+ } else {
+ *type = dc_connection_none;
+ }
+
+ return true;
+
+hpd_gpio_failure:
+ return false;
+}
+
+bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
+{
+ bool is_local_sink_detect_success;
+ bool is_delegated_to_mst_top_mgr = false;
+ enum dc_connection_type pre_link_type = link->type;
+
+ is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
+
+ if (is_local_sink_detect_success && link->local_sink)
+ verify_link_capability(link, link->local_sink, reason);
+
+ if (is_local_sink_detect_success && link->local_sink &&
+ dc_is_dp_signal(link->local_sink->sink_signal) &&
+ link->dpcd_caps.is_mst_capable)
+ is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason);
+
+ if (is_local_sink_detect_success &&
+ pre_link_type == dc_connection_mst_branch &&
+ link->type != dc_connection_mst_branch)
+ is_delegated_to_mst_top_mgr = link_reset_cur_dp_mst_topology(link);
+
+ return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr;
+}
+
+void link_clear_dprx_states(struct dc_link *link)
+{
+ memset(&link->dprx_states, 0, sizeof(link->dprx_states));
+}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+
+bool link_is_hdcp14(struct dc_link *link, enum signal_type signal)
+{
+ bool ret = false;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable,
+ * we can poll for bksv but some displays have an issue with this. Since its so rare
+ * for a display to not be 1.4 capable, this assumtion is ok
+ */
+ ret = true;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+bool link_is_hdcp22(struct dc_link *link, enum signal_type signal)
+{
+ bool ret = false;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE &&
+ link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable &&
+ (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+#endif // CONFIG_DRM_AMD_DC_HDCP
+
+const struct dc_link_status *link_get_status(const struct dc_link *link)
+{
+ return &link->link_status;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
new file mode 100644
index 000000000000..1831636516fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DETECTION_H__
+#define __DC_LINK_DETECTION_H__
+#include "link.h"
+
+#endif /* __DC_LINK_DETECTION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
new file mode 100644
index 000000000000..257e1c3ba00a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -0,0 +1,2528 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file owns the programming sequence of stream's dpms state associated
+ * with the link and link's enable/disable sequences as result of the stream's
+ * dpms state change.
+ *
+ * TODO - The reason link owns stream's dpms programming sequence is
+ * because dpms programming sequence is highly dependent on underlying signal
+ * specific link protocols. This unfortunately causes link to own a portion of
+ * stream state programming sequence. This creates a gray area where the
+ * boundary between link and stream is not clearly defined.
+ */
+
+#include "link_dpms.h"
+#include "link_hwss.h"
+#include "accessories/link_fpga.h"
+#include "accessories/link_dp_trace.h"
+#include "protocols/link_dpcd.h"
+#include "protocols/link_ddc.h"
+#include "protocols/link_hpd.h"
+#include "protocols/link_dp_phy.h"
+#include "protocols/link_dp_capability.h"
+#include "protocols/link_dp_training.h"
+#include "protocols/link_edp_panel_control.h"
+
+#include "dm_helpers.h"
+#include "link_enc_cfg.h"
+#include "resource.h"
+#include "dsc.h"
+#include "dccg.h"
+#include "clk_mgr.h"
+#include "atomfirmware.h"
+#define DC_LOGGER_INIT(logger)
+
+#define LINK_INFO(...) \
+ DC_LOG_HW_HOTPLUG( \
+ __VA_ARGS__)
+
+#define RETIMER_REDRIVER_INFO(...) \
+ DC_LOG_RETIMER_REDRIVER( \
+ __VA_ARGS__)
+#include "dc/dcn30/dcn30_vpg.h"
+
+#define MAX_MTP_SLOT_COUNT 64
+#define LINK_TRAINING_ATTEMPTS 4
+#define PEAK_FACTOR_X1000 1006
+
+void link_blank_all_dp_displays(struct dc *dc)
+{
+ unsigned int i;
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) ||
+ (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL))
+ continue;
+
+ /* DP 2.0 spec requires that we read LTTPR caps first */
+ dp_retrieve_lttpr_cap(dc->links[i]);
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+ link_blank_dp_stream(dc->links[i], true);
+ }
+
+}
+
+void link_blank_all_edp_displays(struct dc *dc)
+{
+ unsigned int i;
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if ((dc->links[i]->connector_signal != SIGNAL_TYPE_EDP) ||
+ (!dc->links[i]->edp_sink_present))
+ continue;
+
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+ link_blank_dp_stream(dc->links[i], true);
+ }
+}
+
+void link_blank_dp_stream(struct dc_link *link, bool hw_init)
+{
+ unsigned int j;
+ struct dc *dc = link->ctx->dc;
+ enum signal_type signal = link->connector_signal;
+
+ if ((signal == SIGNAL_TYPE_EDP) ||
+ (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ link->link_enc->funcs->get_dig_frontend &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
+ unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+
+ if (fe != ENGINE_ID_UNKNOWN)
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(link,
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+
+ if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)
+ dc_link_dp_receiver_power_ctrl(link, false);
+ }
+}
+
+void link_set_all_streams_dpms_off_for_link(struct dc_link *link)
+{
+ struct pipe_ctx *pipes[MAX_PIPES];
+ struct dc_state *state = link->dc->current_state;
+ uint8_t count;
+ int i;
+ struct dc_stream_update stream_update;
+ bool dpms_off = true;
+ struct link_resource link_res = {0};
+
+ memset(&stream_update, 0, sizeof(stream_update));
+ stream_update.dpms_off = &dpms_off;
+
+ link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+
+ for (i = 0; i < count; i++) {
+ stream_update.stream = pipes[i]->stream;
+ dc_commit_updates_for_stream(link->ctx->dc, NULL, 0,
+ pipes[i]->stream, &stream_update,
+ state);
+ }
+
+ /* link can be also enabled by vbios. In this case it is not recorded
+ * in pipe_ctx. Disable link phy here to make sure it is completely off
+ */
+ dp_disable_link_phy(link, &link_res, link->connector_signal);
+}
+
+void link_resume(struct dc_link *link)
+{
+ if (link->connector_signal != SIGNAL_TYPE_VIRTUAL)
+ program_hpd_filter(link);
+}
+
+/* This function returns true if the pipe is used to feed video signal directly
+ * to the link.
+ */
+static bool is_master_pipe_for_link(const struct dc_link *link,
+ const struct pipe_ctx *pipe)
+{
+ return (pipe->stream &&
+ pipe->stream->link &&
+ pipe->stream->link == link &&
+ pipe->top_pipe == NULL &&
+ pipe->prev_odm_pipe == NULL);
+}
+
+/*
+ * This function finds all master pipes feeding to a given link with dpms set to
+ * on in given dc state.
+ */
+void link_get_master_pipes_with_dpms_on(const struct dc_link *link,
+ struct dc_state *state,
+ uint8_t *count,
+ struct pipe_ctx *pipes[MAX_PIPES])
+{
+ int i;
+ struct pipe_ctx *pipe = NULL;
+
+ *count = 0;
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &state->res_ctx.pipe_ctx[i];
+
+ if (is_master_pipe_for_link(link, pipe) &&
+ pipe->stream->dpms_off == false) {
+ pipes[(*count)++] = pipe;
+ }
+ }
+}
+
+static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx,
+ enum engine_id eng_id,
+ struct ext_hdmi_settings *settings)
+{
+ bool result = false;
+ int i = 0;
+ struct integrated_info *integrated_info =
+ pipe_ctx->stream->ctx->dc_bios->integrated_info;
+
+ if (integrated_info == NULL)
+ return false;
+
+ /*
+ * Get retimer settings from sbios for passing SI eye test for DCE11
+ * The setting values are varied based on board revision and port id
+ * Therefore the setting values of each ports is passed by sbios.
+ */
+
+ // Check if current bios contains ext Hdmi settings
+ if (integrated_info->gpu_cap_info & 0x20) {
+ switch (eng_id) {
+ case ENGINE_ID_DIGA:
+ settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp0_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp0_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp0_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGB:
+ settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp1_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp1_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp1_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGC:
+ settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp2_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp2_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp2_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ case ENGINE_ID_DIGD:
+ settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr;
+ settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num;
+ settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num;
+ memmove(settings->reg_settings,
+ integrated_info->dp3_ext_hdmi_reg_settings,
+ sizeof(integrated_info->dp3_ext_hdmi_reg_settings));
+ memmove(settings->reg_settings_6g,
+ integrated_info->dp3_ext_hdmi_6g_reg_settings,
+ sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings));
+ result = true;
+ break;
+ default:
+ break;
+ }
+
+ if (result == true) {
+ // Validate settings from bios integrated info table
+ if (settings->slv_addr == 0)
+ return false;
+ if (settings->reg_num > 9)
+ return false;
+ if (settings->reg_num_6g > 3)
+ return false;
+
+ for (i = 0; i < settings->reg_num; i++) {
+ if (settings->reg_settings[i].i2c_reg_index > 0x20)
+ return false;
+ }
+
+ for (i = 0; i < settings->reg_num_6g; i++) {
+ if (settings->reg_settings_6g[i].i2c_reg_index > 0x20)
+ return false;
+ }
+ }
+ }
+
+ return result;
+}
+
+static bool write_i2c(struct pipe_ctx *pipe_ctx,
+ uint8_t address, uint8_t *buffer, uint32_t length)
+{
+ struct i2c_command cmd = {0};
+ struct i2c_payload payload = {0};
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.number_of_payloads = 1;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz;
+
+ payload.address = address;
+ payload.data = buffer;
+ payload.length = length;
+ payload.write = true;
+ cmd.payloads = &payload;
+
+ if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx,
+ pipe_ctx->stream->link, &cmd))
+ return true;
+
+ return false;
+}
+
+static void write_i2c_retimer_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_vga_mode,
+ bool is_over_340mhz,
+ struct ext_hdmi_settings *settings)
+{
+ uint8_t slave_address = (settings->slv_addr >> 1);
+ uint8_t buffer[2];
+ const uint8_t apply_rx_tx_change = 0x4;
+ uint8_t offset = 0xA;
+ uint8_t value = 0;
+ int i = 0;
+ bool i2c_success = false;
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Start Ext-Hdmi programming*/
+
+ for (i = 0; i < settings->reg_num; i++) {
+ /* Apply 3G settings */
+ if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+ buffer[0] = settings->reg_settings[i].i2c_reg_index;
+ buffer[1] = settings->reg_settings[i].i2c_reg_val;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+ */
+ if (settings->reg_settings[i].i2c_reg_index == 0xA ||
+ settings->reg_settings[i].i2c_reg_index == 0xB ||
+ settings->reg_settings[i].i2c_reg_index == 0xC) {
+
+ /* Query current value from offset 0xA */
+ if (settings->reg_settings[i].i2c_reg_index == 0xA)
+ value = settings->reg_settings[i].i2c_reg_val;
+ else {
+ i2c_success =
+ link_query_ddc_data(
+ pipe_ctx->stream->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+ goto i2c_write_fail;
+ }
+
+ buffer[0] = offset;
+ /* Set APPLY_RX_TX_CHANGE bit to 1 */
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+ }
+ }
+ }
+
+ /* Apply 3G settings */
+ if (is_over_340mhz) {
+ for (i = 0; i < settings->reg_num_6g; i++) {
+ /* Apply 3G settings */
+ if (settings->reg_settings[i].i2c_reg_index <= 0x20) {
+
+ buffer[0] = settings->reg_settings_6g[i].i2c_reg_index;
+ buffer[1] = settings->reg_settings_6g[i].i2c_reg_val;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+ */
+ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA ||
+ settings->reg_settings_6g[i].i2c_reg_index == 0xB ||
+ settings->reg_settings_6g[i].i2c_reg_index == 0xC) {
+
+ /* Query current value from offset 0xA */
+ if (settings->reg_settings_6g[i].i2c_reg_index == 0xA)
+ value = settings->reg_settings_6g[i].i2c_reg_val;
+ else {
+ i2c_success =
+ link_query_ddc_data(
+ pipe_ctx->stream->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+ goto i2c_write_fail;
+ }
+
+ buffer[0] = offset;
+ /* Set APPLY_RX_TX_CHANGE bit to 1 */
+ buffer[1] = value | apply_rx_tx_change;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+ }
+ }
+ }
+ }
+
+ if (is_vga_mode) {
+ /* Program additional settings if using 640x480 resolution */
+
+ /* Write offset 0xFF to 0x01 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x01;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+ buffer[1] = 0x23;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x00;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ }
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set retimer failed");
+}
+
+static void write_i2c_default_retimer_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_vga_mode,
+ bool is_over_340mhz)
+{
+ uint8_t slave_address = (0xBA >> 1);
+ uint8_t buffer[2];
+ bool i2c_success = false;
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ /* Program Slave Address for tuning single integrity */
+ /* Write offset 0x0A to 0x13 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x13;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x0B to 0xDA or 0xD8 */
+ buffer[0] = 0x0B;
+ buffer[1] = is_over_340mhz ? 0xDA : 0xD8;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x0C to 0x1D or 0x91 */
+ buffer[0] = 0x0C;
+ buffer[1] = is_over_340mhz ? 0x1D : 0x91;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+ buffer[1] = 0x17;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+
+ if (is_vga_mode) {
+ /* Program additional settings if using 640x480 resolution */
+
+ /* Write offset 0xFF to 0x01 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x01;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+ buffer[1] = 0x23;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+ buffer[1] = 0x00;
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\
+ offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
+ slave_address, buffer[0], buffer[1], i2c_success?1:0);
+ if (!i2c_success)
+ goto i2c_write_fail;
+ }
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set default retimer failed");
+}
+
+static void write_i2c_redriver_setting(
+ struct pipe_ctx *pipe_ctx,
+ bool is_over_340mhz)
+{
+ uint8_t slave_address = (0xF0 >> 1);
+ uint8_t buffer[16];
+ bool i2c_success = false;
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ memset(&buffer, 0, sizeof(buffer));
+
+ // Program Slave Address for tuning single integrity
+ buffer[3] = 0x4E;
+ buffer[4] = 0x4E;
+ buffer[5] = 0x4E;
+ buffer[6] = is_over_340mhz ? 0x4E : 0x4A;
+
+ i2c_success = write_i2c(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\
+ \t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\
+ offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\
+ i2c_success = %d\n",
+ slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
+
+ if (!i2c_success)
+ DC_LOG_DEBUG("Set redriver failed");
+}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+
+static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
+{
+ struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+ struct link_encoder *link_enc = NULL;
+ struct cp_psp_stream_config config = {0};
+ enum dp_panel_mode panel_mode =
+ dp_get_panel_mode(pipe_ctx->stream->link);
+
+ if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
+ return;
+
+ link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link);
+ ASSERT(link_enc);
+ if (link_enc == NULL)
+ return;
+
+ /* otg instance */
+ config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
+
+ /* dig front end */
+ config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
+
+ /* stream encoder index */
+ config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
+ if (link_is_dp_128b_132b_signal(pipe_ctx))
+ config.stream_enc_idx =
+ pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
+
+ /* dig back end */
+ config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
+
+ /* link encoder index */
+ config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ if (link_is_dp_128b_132b_signal(pipe_ctx))
+ config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
+
+ /* dio output index is dpia index for DPIA endpoint & dcio index by default */
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1;
+ else
+ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+
+ /* phy index */
+ config.phy_idx = resource_transmitter_to_phy_idx(
+ pipe_ctx->stream->link->dc, link_enc->transmitter);
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */
+ config.phy_idx = 0;
+
+ /* stream properties */
+ config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
+ config.mst_enabled = (pipe_ctx->stream->signal ==
+ SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
+ config.dp2_enabled = link_is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
+ config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
+ 1 : 0;
+ config.dpms_off = dpms_off;
+
+ /* dm stream context */
+ config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+
+ cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
+}
+#endif
+
+static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (!dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ return;
+
+ dc->hwss.set_avmute(pipe_ctx, enable);
+}
+
+static void enable_mst_on_sink(struct dc_link *link, bool enable)
+{
+ unsigned char mstmCntl;
+
+ core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+ if (enable)
+ mstmCntl |= DP_MST_EN;
+ else
+ mstmCntl &= (~DP_MST_EN);
+
+ core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1);
+}
+
+static void dsc_optc_config_log(struct display_stream_compressor *dsc,
+ struct dsc_optc_config *config)
+{
+ uint32_t precision = 1 << 28;
+ uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision;
+ uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision;
+ uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod;
+ DC_LOGGER_INIT(dsc->ctx->logger);
+
+ /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC
+ * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is
+ * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal
+ */
+ ll_bytes_per_pix_fraq *= 10000000;
+ ll_bytes_per_pix_fraq /= precision;
+
+ DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)",
+ config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq);
+ DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444);
+ DC_LOG_DSC("\tslice_width %d", config->slice_width);
+}
+
+static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ bool result = false;
+
+ if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ result = true;
+ else
+ result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
+ return result;
+}
+
+/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first,
+ * i.e. after dp_enable_dsc_on_rx() had been called
+ */
+void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *odm_pipe;
+ int opp_cnt = 1;
+ DC_LOGGER_INIT(dsc->ctx->logger);
+
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ opp_cnt++;
+
+ if (enable) {
+ struct dsc_config dsc_cfg;
+ struct dsc_optc_config dsc_optc_cfg;
+ enum optc_dsc_mode optc_dsc_mode;
+
+ /* Enable DSC hw block */
+ dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+ ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
+ dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
+
+ dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg);
+ dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst);
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
+ struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc;
+
+ odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg);
+ odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst);
+ }
+ dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt;
+ dsc_cfg.pic_width *= opp_cnt;
+
+ optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
+
+ /* Enable DSC in encoder */
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
+ && !link_is_dp_128b_132b_signal(pipe_ctx)) {
+ DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
+ dsc_optc_config_log(dsc, &dsc_optc_cfg);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
+ optc_dsc_mode,
+ dsc_optc_cfg.bytes_per_pixel,
+ dsc_optc_cfg.slice_width);
+
+ /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */
+ }
+
+ /* Enable DSC in OPTC */
+ DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst);
+ dsc_optc_config_log(dsc, &dsc_optc_cfg);
+ pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg,
+ optc_dsc_mode,
+ dsc_optc_cfg.bytes_per_pixel,
+ dsc_optc_cfg.slice_width);
+ } else {
+ /* disable DSC in OPTC */
+ pipe_ctx->stream_res.tg->funcs->set_dsc_config(
+ pipe_ctx->stream_res.tg,
+ OPTC_DSC_DISABLED, 0, 0);
+
+ /* disable DSC in stream encoder */
+ if (dc_is_dp_signal(stream->signal)) {
+ if (link_is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ false,
+ NULL,
+ true);
+ else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
+ pipe_ctx->stream_res.stream_enc,
+ OPTC_DSC_DISABLED, 0, 0);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc, false, NULL, true);
+ }
+ }
+
+ /* disable DSC block */
+ pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc);
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc);
+ }
+}
+
+/*
+ * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled;
+ * hence PPS info packet update need to use frame update instead of immediate update.
+ * Added parameter immediate_update for this purpose.
+ * The decision to use frame update is hard-coded in function dp_update_dsc_config(),
+ * which is the only place where a "false" would be passed in for param immediate_update.
+ *
+ * immediate_update is only applicable when DSC is enabled.
+ */
+bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ DC_LOGGER_INIT(dsc->ctx->logger);
+
+ if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
+ return false;
+
+ if (enable) {
+ struct dsc_config dsc_cfg;
+ uint8_t dsc_packed_pps[128];
+
+ memset(&dsc_cfg, 0, sizeof(dsc_cfg));
+ memset(dsc_packed_pps, 0, 128);
+
+ /* Enable DSC hw block */
+ dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right;
+ dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
+ dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+ dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
+ dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
+
+ dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
+ memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps));
+ if (dc_is_dp_signal(stream->signal)) {
+ DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
+ if (link_is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ true,
+ &dsc_packed_pps[0],
+ immediate_update);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc,
+ true,
+ &dsc_packed_pps[0],
+ immediate_update);
+ }
+ } else {
+ /* disable DSC PPS in stream encoder */
+ memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps));
+ if (dc_is_dp_signal(stream->signal)) {
+ if (link_is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ false,
+ NULL,
+ true);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc, false, NULL, true);
+ }
+ }
+
+ return true;
+}
+
+bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+ bool result = false;
+
+ if (!pipe_ctx->stream->timing.flags.DSC)
+ goto out;
+ if (!dsc)
+ goto out;
+
+ if (enable) {
+ {
+ link_set_dsc_on_stream(pipe_ctx, true);
+ result = true;
+ }
+ } else {
+ dp_set_dsc_on_rx(pipe_ctx, false);
+ link_set_dsc_on_stream(pipe_ctx, false);
+ result = true;
+ }
+out:
+ return result;
+}
+
+bool link_update_dsc_config(struct pipe_ctx *pipe_ctx)
+{
+ struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
+
+ if (!pipe_ctx->stream->timing.flags.DSC)
+ return false;
+ if (!dsc)
+ return false;
+
+ link_set_dsc_on_stream(pipe_ctx, true);
+ link_set_dsc_pps_packet(pipe_ctx, true, false);
+ return true;
+}
+
+static void enable_stream_features(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+
+ if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ struct dc_link *link = stream->link;
+ union down_spread_ctrl old_downspread;
+ union down_spread_ctrl new_downspread;
+
+ memset(&old_downspread, 0, sizeof(old_downspread));
+
+ core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &old_downspread.raw, sizeof(old_downspread));
+
+ new_downspread.raw = old_downspread.raw;
+
+ new_downspread.bits.IGNORE_MSA_TIMING_PARAM =
+ (stream->ignore_msa_timing_param) ? 1 : 0;
+
+ if (new_downspread.raw != old_downspread.raw) {
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &new_downspread.raw, sizeof(new_downspread));
+ }
+
+ } else {
+ dm_helpers_mst_enable_stream_features(stream);
+ }
+}
+
+static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
+{
+ const uint32_t VCP_Y_PRECISION = 1000;
+ uint64_t vcp_x, vcp_y;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ // Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision
+ avg_time_slots_per_mtp = dc_fixpt_add(
+ avg_time_slots_per_mtp,
+ dc_fixpt_from_fraction(
+ 1,
+ 2*VCP_Y_PRECISION));
+
+ vcp_x = dc_fixpt_floor(
+ avg_time_slots_per_mtp);
+ vcp_y = dc_fixpt_floor(
+ dc_fixpt_mul_int(
+ dc_fixpt_sub_int(
+ avg_time_slots_per_mtp,
+ dc_fixpt_floor(
+ avg_time_slots_per_mtp)),
+ VCP_Y_PRECISION));
+
+
+ if (link->type == dc_connection_mst_branch)
+ DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream "
+ "X: %llu "
+ "Y: %llu/%d",
+ vcp_x,
+ vcp_y,
+ VCP_Y_PRECISION);
+ else
+ DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream "
+ "X: %llu "
+ "Y: %llu/%d",
+ vcp_x,
+ vcp_y,
+ VCP_Y_PRECISION);
+}
+
+static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
+{
+ struct fixed31_32 mbytes_per_sec;
+ uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link,
+ &stream->link->cur_link_settings);
+ link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
+
+ mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
+
+ return dc_fixpt_div_int(mbytes_per_sec, 54);
+}
+
+static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps)
+{
+ struct fixed31_32 peak_kbps;
+ uint32_t numerator = 0;
+ uint32_t denominator = 1;
+
+ /*
+ * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+ * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+ * common multiplier to render an integer PBN for all link rate/lane
+ * counts combinations
+ * calculate
+ * peak_kbps *= (1006/1000)
+ * peak_kbps *= (64/54)
+ * peak_kbps *= 8 convert to bytes
+ */
+
+ numerator = 64 * PEAK_FACTOR_X1000;
+ denominator = 54 * 8 * 1000 * 1000;
+ kbps *= numerator;
+ peak_kbps = dc_fixpt_from_fraction(kbps, denominator);
+
+ return peak_kbps;
+}
+
+static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
+{
+ uint64_t kbps;
+
+ kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
+ return get_pbn_from_bw_in_kbps(kbps);
+}
+
+
+// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST)
+static void get_lane_status(
+ struct dc_link *link,
+ uint32_t lane_count,
+ union lane_status *status,
+ union lane_align_status_updated *status_updated)
+{
+ unsigned int lane;
+ uint8_t dpcd_buf[3] = {0};
+
+ if (status == NULL || status_updated == NULL) {
+ return;
+ }
+
+ core_link_read_dpcd(
+ link,
+ DP_LANE0_1_STATUS,
+ dpcd_buf,
+ sizeof(dpcd_buf));
+
+ for (lane = 0; lane < lane_count; lane++) {
+ status[lane].raw = dp_get_nibble_at_index(&dpcd_buf[0], lane);
+ }
+
+ status_updated->raw = dpcd_buf[2];
+}
+
+static bool poll_for_allocation_change_trigger(struct dc_link *link)
+{
+ /*
+ * wait for ACT handled
+ */
+ int i;
+ const int act_retries = 30;
+ enum act_return_status result = ACT_FAILED;
+ union payload_table_update_status update_status = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated lane_status_updated;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (link->aux_access_disabled)
+ return true;
+ for (i = 0; i < act_retries; i++) {
+ get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated);
+
+ if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_interlane_aligned(lane_status_updated)) {
+ DC_LOG_ERROR("SST Update Payload: Link loss occurred while "
+ "polling for ACT handled.");
+ result = ACT_LINK_LOST;
+ break;
+ }
+ core_link_read_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1);
+
+ if (update_status.bits.ACT_HANDLED == 1) {
+ DC_LOG_DP2("SST Update Payload: ACT handled by downstream.");
+ result = ACT_SUCCESS;
+ break;
+ }
+
+ msleep(5);
+ }
+
+ if (result == ACT_FAILED) {
+ DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, "
+ "continue on. Something is wrong with the branch.");
+ }
+
+ return (result == ACT_SUCCESS);
+}
+
+static void update_mst_stream_alloc_table(
+ struct dc_link *link,
+ struct stream_encoder *stream_enc,
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc?
+ const struct dc_dp_mst_stream_allocation_table *proposed_table)
+{
+ struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 };
+ struct link_mst_stream_allocation *dc_alloc;
+
+ int i;
+ int j;
+
+ /* if DRM proposed_table has more than one new payload */
+ ASSERT(proposed_table->stream_count -
+ link->mst_stream_alloc_table.stream_count < 2);
+
+ /* copy proposed_table to link, add stream encoder */
+ for (i = 0; i < proposed_table->stream_count; i++) {
+
+ for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) {
+ dc_alloc =
+ &link->mst_stream_alloc_table.stream_allocations[j];
+
+ if (dc_alloc->vcp_id ==
+ proposed_table->stream_allocations[i].vcp_id) {
+
+ work_table[i] = *dc_alloc;
+ work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count;
+ break; /* exit j loop */
+ }
+ }
+
+ /* new vcp_id */
+ if (j == link->mst_stream_alloc_table.stream_count) {
+ work_table[i].vcp_id =
+ proposed_table->stream_allocations[i].vcp_id;
+ work_table[i].slot_count =
+ proposed_table->stream_allocations[i].slot_count;
+ work_table[i].stream_enc = stream_enc;
+ work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc;
+ }
+ }
+
+ /* update link->mst_stream_alloc_table with work_table */
+ link->mst_stream_alloc_table.stream_count =
+ proposed_table->stream_count;
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++)
+ link->mst_stream_alloc_table.stream_allocations[i] =
+ work_table[i];
+}
+
+static void remove_stream_from_alloc_table(
+ struct dc_link *link,
+ struct stream_encoder *dio_stream_enc,
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc)
+{
+ int i = 0;
+ struct link_mst_stream_allocation_table *table =
+ &link->mst_stream_alloc_table;
+
+ if (hpo_dp_stream_enc) {
+ for (; i < table->stream_count; i++)
+ if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc)
+ break;
+ } else {
+ for (; i < table->stream_count; i++)
+ if (dio_stream_enc == table->stream_allocations[i].stream_enc)
+ break;
+ }
+
+ if (i < table->stream_count) {
+ i++;
+ for (; i < table->stream_count; i++)
+ table->stream_allocations[i-1] = table->stream_allocations[i];
+ memset(&table->stream_allocations[table->stream_count-1], 0,
+ sizeof(struct link_mst_stream_allocation));
+ table->stream_count--;
+ }
+}
+
+static enum dc_status deallocate_mst_payload_with_temp_drm_wa(
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+ int i;
+ bool mst_mode = (link->type == dc_connection_mst_branch);
+ /* adjust for drm changes*/
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ const struct dc_link_settings empty_link_settings = {0};
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &empty_link_settings,
+ avg_time_slots_per_mtp);
+
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ false))
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ else
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+
+ DC_LOG_MST("%s"
+ "stream_count: %d: ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_DEBUG("Unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
+ if (mst_mode) {
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+ }
+
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ false);
+
+ return DC_OK;
+}
+
+static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+ int i;
+ bool mst_mode = (link->type == dc_connection_mst_branch);
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ const struct dc_link_settings empty_link_settings = {0};
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (link->dc->debug.temp_mst_deallocation_sequence)
+ return deallocate_mst_payload_with_temp_drm_wa(pipe_ctx);
+
+ /* deallocate_mst_payload is called before disable link. When mode or
+ * disable/enable monitor, new stream is created which is not in link
+ * stream[] yet. For this, payload is not allocated yet, so de-alloc
+ * should not done. For new mode set, map_resources will get engine
+ * for new stream, so stream_enc->id should be validated until here.
+ */
+
+ /* slot X.Y */
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &empty_link_settings,
+ avg_time_slots_per_mtp);
+
+ if (mst_mode) {
+ /* when link is in mst mode, reply on mst manager to remove
+ * payload
+ */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ false))
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ else
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ } else {
+ /* when link is no longer in mst mode (mst hub unplugged),
+ * remove payload with default dc logic
+ */
+ remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ }
+
+ DC_LOG_MST("%s"
+ "stream_count: %d: ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ /* update mst stream allocation table hardware state */
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_DEBUG("Unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
+ if (mst_mode) {
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ false);
+ }
+
+ return DC_OK;
+}
+
+/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
+ * because stream_encoder is not exposed to dm
+ */
+static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ int i;
+ enum act_return_status ret;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* enable_link_dp_mst already check link->enabled_stream_count
+ * and stream is in link->stream[]. This is called during set mode,
+ * stream_enc is available.
+ */
+
+ /* get calculate VC payload for stream: stream_alloc */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true))
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ else
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* program DP source TX for payload */
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link,
+ &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
+ /* send down message */
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ if (ret != ACT_LINK_LOST) {
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
+
+ /* slot X.Y for only current stream */
+ pbn_per_slot = get_pbn_per_slot(stream);
+ if (pbn_per_slot.value == 0) {
+ DC_LOG_ERROR("Failure: pbn_per_slot==0 not allowed. Cannot continue, returning DC_UNSUPPORTED_VALUE.\n");
+ return DC_UNSUPPORTED_VALUE;
+ }
+ pbn = get_pbn_from_timing(pipe_ctx);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &link->cur_link_settings,
+ avg_time_slots_per_mtp);
+
+ return DC_OK;
+}
+
+struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
+ const struct dc_stream_state *stream,
+ const struct dc_link *link)
+{
+ struct fixed31_32 link_bw_effective =
+ dc_fixpt_from_int(
+ dc_link_bandwidth_kbps(link, &link->cur_link_settings));
+ struct fixed31_32 timeslot_bw_effective =
+ dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
+ struct fixed31_32 timing_bw =
+ dc_fixpt_from_int(
+ dc_bandwidth_in_kbps_from_timing(&stream->timing));
+ struct fixed31_32 avg_time_slots_per_mtp =
+ dc_fixpt_div(timing_bw, timeslot_bw_effective);
+
+ return avg_time_slots_per_mtp;
+}
+
+
+static bool write_128b_132b_sst_payload_allocation_table(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ struct link_mst_stream_allocation_table *proposed_table,
+ bool allocate)
+{
+ const uint8_t vc_id = 1; /// VC ID always 1 for SST
+ const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST
+ bool result = false;
+ uint8_t req_slot_count = 0;
+ struct fixed31_32 avg_time_slots_per_mtp = { 0 };
+ union payload_table_update_status update_status = { 0 };
+ const uint32_t max_retries = 30;
+ uint32_t retries = 0;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (allocate) {
+ avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ /// Validation should filter out modes that exceed link BW
+ ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT);
+ if (req_slot_count > MAX_MTP_SLOT_COUNT)
+ return false;
+ } else {
+ /// Leave req_slot_count = 0 if allocate is false.
+ }
+
+ proposed_table->stream_count = 1; /// Always 1 stream for SST
+ proposed_table->stream_allocations[0].slot_count = req_slot_count;
+ proposed_table->stream_allocations[0].vcp_id = vc_id;
+
+ if (link->aux_access_disabled)
+ return true;
+
+ /// Write DPCD 2C0 = 1 to start updating
+ update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1;
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1);
+
+ /// Program the changes in DPCD 1C0 - 1C2
+ ASSERT(vc_id == 1);
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_SET,
+ &vc_id,
+ 1);
+
+ ASSERT(start_time_slot == 0);
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_START_TIME_SLOT,
+ &start_time_slot,
+ 1);
+
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT,
+ &req_slot_count,
+ 1);
+
+ /// Poll till DPCD 2C0 read 1
+ /// Try for at least 150ms (30 retries, with 5ms delay after each attempt)
+
+ while (retries < max_retries) {
+ if (core_link_read_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1) == DC_OK) {
+ if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) {
+ DC_LOG_DP2("SST Update Payload: downstream payload table updated.");
+ result = true;
+ break;
+ }
+ } else {
+ union dpcd_rev dpcdRev;
+
+ if (core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ &dpcdRev.raw,
+ 1) != DC_OK) {
+ DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision "
+ "of sink while polling payload table "
+ "updated status bit.");
+ break;
+ }
+ }
+ retries++;
+ msleep(5);
+ }
+
+ if (!result && retries == max_retries) {
+ DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, "
+ "continue on. Something is wrong with the branch.");
+ // TODO - DP2.0 Payload: Read and log the payload table from downstream branch
+ }
+
+ return result;
+}
+
+/*
+ * Payload allocation/deallocation for SST introduced in DP2.0
+ */
+static enum dc_status update_sst_payload(struct pipe_ctx *pipe_ctx,
+ bool allocate)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct link_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ const struct dc_link_settings empty_link_settings = {0};
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* slot X.Y for SST payload deallocate */
+ if (!allocate) {
+ avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+
+ dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
+ avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &empty_link_settings,
+ avg_time_slots_per_mtp);
+ }
+
+ /* calculate VC payload and update branch with new payload allocation table*/
+ if (!write_128b_132b_sst_payload_allocation_table(
+ stream,
+ link,
+ &proposed_table,
+ allocate)) {
+ DC_LOG_ERROR("SST Update Payload: Failed to update "
+ "allocation table for "
+ "pipe idx: %d\n",
+ pipe_ctx->pipe_idx);
+ return DC_FAIL_DP_PAYLOAD_ALLOCATION;
+ }
+
+ proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
+
+ ASSERT(proposed_table.stream_count == 1);
+
+ //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id
+ DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p "
+ "vcp_id: %d "
+ "slot_count: %d\n",
+ (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc,
+ proposed_table.stream_allocations[0].vcp_id,
+ proposed_table.stream_allocations[0].slot_count);
+
+ /* program DP source TX for payload */
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &proposed_table);
+
+ /* poll for ACT handled */
+ if (!poll_for_allocation_change_trigger(link)) {
+ // Failures will result in blackscreen and errors logged
+ BREAK_TO_DEBUGGER();
+ }
+
+ /* slot X.Y for SST payload allocate */
+ if (allocate && link_dp_get_encoding_format(&link->cur_link_settings) ==
+ DP_128b_132b_ENCODING) {
+ avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link);
+
+ dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx,
+ avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &link->cur_link_settings,
+ avg_time_slots_per_mtp);
+ }
+
+ /* Always return DC_OK.
+ * If part of sequence fails, log failure(s) and show blackscreen
+ */
+ return DC_OK;
+}
+
+enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
+ uint8_t i;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* decrease throttled vcp size */
+ pbn_per_slot = get_pbn_per_slot(stream);
+ pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &link->cur_link_settings,
+ avg_time_slots_per_mtp);
+
+ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+
+ /* notify immediate branch device table update */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ /* update mst stream allocation table software state */
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ } else {
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ }
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* update mst stream allocation table hardware state */
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
+ /* poll for immediate branch device ACT handled */
+ dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ return DC_OK;
+}
+
+enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ struct dc_dp_mst_stream_allocation_table proposed_table = {0};
+ uint8_t i;
+ enum act_return_status ret;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* notify immediate branch device table update */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ /* update mst stream allocation table software state */
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ }
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* update mst stream allocation table hardware state */
+ if (link_hwss->ext.update_stream_allocation_table == NULL ||
+ link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
+ &link->mst_stream_alloc_table);
+
+ /* poll for immediate branch device ACT handled */
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ if (ret != ACT_LINK_LOST) {
+ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
+
+ /* increase throttled vcp size */
+ pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
+ pbn_per_slot = get_pbn_per_slot(stream);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ if (link_hwss->ext.set_throttled_vcp_size)
+ link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
+ if (link_hwss->ext.set_hblank_min_symbol_width)
+ link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
+ &link->cur_link_settings,
+ avg_time_slots_per_mtp);
+
+ return DC_OK;
+}
+
+static void disable_link_dp(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc_link_settings link_settings = link->cur_link_settings;
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ link->mst_stream_alloc_table.stream_count > 0)
+ /* disable MST link only when last vc payload is deallocated */
+ return;
+
+ dp_disable_link_phy(link, link_res, signal);
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ /* set the sink to SST mode after disabling the link */
+ enable_mst_on_sink(link, false);
+
+ if (link_dp_get_encoding_format(&link_settings) ==
+ DP_8b_10b_ENCODING) {
+ dp_set_fec_enable(link, false);
+ dp_set_fec_ready(link, link_res, false);
+ }
+}
+
+static void disable_link(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ if (dc_is_dp_signal(signal)) {
+ disable_link_dp(link, link_res, signal);
+ } else if (signal != SIGNAL_TYPE_VIRTUAL) {
+ link->dc->hwss.disable_link_output(link, link_res, signal);
+ }
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* MST disable link only when no stream use the link */
+ if (link->mst_stream_alloc_table.stream_count <= 0)
+ link->link_status.link_active = false;
+ } else {
+ link->link_status.link_active = false;
+ }
+}
+
+static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ enum dc_color_depth display_color_depth;
+ enum engine_id eng_id;
+ struct ext_hdmi_settings settings = {0};
+ bool is_over_340mhz = false;
+ bool is_vga_mode = (stream->timing.h_addressable == 640)
+ && (stream->timing.v_addressable == 480);
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+
+ if (stream->phy_pix_clk == 0)
+ stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
+ if (stream->phy_pix_clk > 340000)
+ is_over_340mhz = true;
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+ unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps &
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ /* DP159, Retimer settings */
+ eng_id = pipe_ctx->stream_res.stream_enc->id;
+
+ if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) {
+ write_i2c_retimer_setting(pipe_ctx,
+ is_vga_mode, is_over_340mhz, &settings);
+ } else {
+ write_i2c_default_retimer_setting(pipe_ctx,
+ is_vga_mode, is_over_340mhz);
+ }
+ } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ /* PI3EQX1204, Redriver settings */
+ write_i2c_redriver_setting(pipe_ctx, is_over_340mhz);
+ }
+ }
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ write_scdc_data(
+ stream->link->ddc,
+ stream->phy_pix_clk,
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE);
+
+ memset(&stream->link->cur_link_settings, 0,
+ sizeof(struct dc_link_settings));
+
+ display_color_depth = stream->timing.display_color_depth;
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ display_color_depth = COLOR_DEPTH_888;
+
+ dc->hwss.enable_tmds_link_output(
+ link,
+ &pipe_ctx->link_res,
+ pipe_ctx->stream->signal,
+ pipe_ctx->clock_source->id,
+ display_color_depth,
+ stream->phy_pix_clk);
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ read_scdc_data(link->ddc);
+}
+
+static enum dc_status enable_link_dp(struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ enum dc_status status;
+ bool skip_video_pattern;
+ struct dc_link *link = stream->link;
+ const struct dc_link_settings *link_settings =
+ &pipe_ctx->link_config.dp_link_settings;
+ bool fec_enable;
+ int i;
+ bool apply_seamless_boot_optimization = false;
+ uint32_t bl_oled_enable_delay = 50; // in ms
+ uint32_t post_oui_delay = 30; // 30ms
+ /* Reduce link bandwidth between failed link training attempts. */
+ bool do_fallback = false;
+
+ // check for seamless boot
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i]->apply_seamless_boot_optimization) {
+ apply_seamless_boot_optimization = true;
+ break;
+ }
+ }
+
+ /* Train with fallback when enabling DPIA link. Conventional links are
+ * trained with fallback during sink detection.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ do_fallback = true;
+
+ /*
+ * Temporary w/a to get DP2.0 link rates to work with SST.
+ * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved.
+ */
+ if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->dc->debug.set_mst_en_for_sst) {
+ enable_mst_on_sink(link, true);
+ }
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
+ /*in case it is not on*/
+ if (!link->dc->config.edp_no_power_sequencing)
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
+ if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */
+ } else {
+ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
+ link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
+ if (state->clk_mgr && !apply_seamless_boot_optimization)
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr,
+ state, false);
+ }
+
+ // during mode switch we do DP_SET_POWER off then on, and OUI is lost
+ dpcd_set_source_specific_data(link);
+ if (link->dpcd_sink_ext_caps.raw != 0) {
+ post_oui_delay += link->panel_config.pps.extra_post_OUI_ms;
+ msleep(post_oui_delay);
+ }
+
+ // similarly, mode switch can cause loss of cable ID
+ dpcd_write_cable_id_to_dprx(link);
+
+ skip_video_pattern = true;
+
+ if (link_settings->link_rate == LINK_RATE_LOW)
+ skip_video_pattern = false;
+
+ if (perform_link_training_with_retries(link_settings,
+ skip_video_pattern,
+ LINK_TRAINING_ATTEMPTS,
+ pipe_ctx,
+ pipe_ctx->stream->signal,
+ do_fallback)) {
+ status = DC_OK;
+ } else {
+ status = DC_FAIL_DP_LINK_TRAINING;
+ }
+
+ if (link->preferred_training_settings.fec_enable)
+ fec_enable = *link->preferred_training_settings.fec_enable;
+ else
+ fec_enable = true;
+
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
+ dp_set_fec_enable(link, fec_enable);
+
+ // during mode set we do DP_SET_POWER off then on, aux writes are lost
+ if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
+ set_default_brightness_aux(link); // TODO: use cached if known
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ msleep(bl_oled_enable_delay);
+ link_backlight_enable_aux(link, true);
+ }
+
+ return status;
+}
+
+static enum dc_status enable_link_edp(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ return enable_link_dp(state, pipe_ctx);
+}
+
+static void enable_link_lvds(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct dc *dc = stream->ctx->dc;
+
+ if (stream->phy_pix_clk == 0)
+ stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10;
+
+ memset(&stream->link->cur_link_settings, 0,
+ sizeof(struct dc_link_settings));
+ dc->hwss.enable_lvds_link_output(
+ link,
+ &pipe_ctx->link_res,
+ pipe_ctx->clock_source->id,
+ stream->phy_pix_clk);
+
+}
+
+static enum dc_status enable_link_dp_mst(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+
+ /* sink signal type after MST branch is MST. Multiple MST sinks
+ * share one link. Link DP PHY is enable or training only once.
+ */
+ if (link->link_status.link_active)
+ return DC_OK;
+
+ /* clear payload table */
+ dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
+
+ /* to make sure the pending down rep can be processed
+ * before enabling the link
+ */
+ dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
+
+ /* set the sink to MST mode before enabling the link */
+ enable_mst_on_sink(link, true);
+
+ return enable_link_dp(state, pipe_ctx);
+}
+
+static enum dc_status enable_link(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ /* There's some scenarios where driver is unloaded with display
+ * still enabled. When driver is reloaded, it may cause a display
+ * to not light up if there is a mismatch between old and new
+ * link settings. Need to call disable first before enabling at
+ * new link settings.
+ */
+ if (link->link_status.link_active) {
+ disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ }
+
+ switch (pipe_ctx->stream->signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ status = enable_link_dp(state, pipe_ctx);
+ break;
+ case SIGNAL_TYPE_EDP:
+ status = enable_link_edp(state, pipe_ctx);
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ status = enable_link_dp_mst(state, pipe_ctx);
+ msleep(200);
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ enable_link_hdmi(pipe_ctx);
+ status = DC_OK;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ enable_link_lvds(pipe_ctx);
+ status = DC_OK;
+ break;
+ case SIGNAL_TYPE_VIRTUAL:
+ status = DC_OK;
+ break;
+ default:
+ break;
+ }
+
+ if (status == DC_OK) {
+ pipe_ctx->stream->link->link_status.link_active = true;
+ }
+
+ return status;
+}
+
+void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+
+ ASSERT(is_master_pipe_for_link(link, pipe_ctx));
+
+ if (link_is_dp_128b_132b_signal(pipe_ctx))
+ vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
+
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ if (pipe_ctx->stream->sink) {
+ if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ pipe_ctx->stream->sink->edid_caps.display_name,
+ pipe_ctx->stream->signal);
+ }
+ }
+
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
+ if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) {
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ set_avmute(pipe_ctx, true);
+ }
+
+ dc->hwss.disable_audio_stream(pipe_ctx);
+
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, true);
+#endif
+ dc->hwss.blank_stream(pipe_ctx);
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ deallocate_mst_payload(pipe_ctx);
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link_is_dp_128b_132b_signal(pipe_ctx))
+ update_sst_payload(pipe_ctx, false);
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+ struct ext_hdmi_settings settings = {0};
+ enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id;
+
+ unsigned short masked_chip_caps = link->chip_caps &
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK;
+ //Need to inform that sink is going to use legacy HDMI mode.
+ write_scdc_data(
+ link->ddc,
+ 165000,//vbios only handles 165Mhz.
+ false);
+ if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) {
+ /* DP159, Retimer settings */
+ if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings))
+ write_i2c_retimer_setting(pipe_ctx,
+ false, false, &settings);
+ else
+ write_i2c_default_retimer_setting(pipe_ctx,
+ false, false);
+ } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) {
+ /* PI3EQX1204, Redriver settings */
+ write_i2c_redriver_setting(pipe_ctx, false);
+ }
+ }
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ !link_is_dp_128b_132b_signal(pipe_ctx)) {
+
+ /* In DP1.x SST mode, our encoder will go to TPS1
+ * when link is on but stream is off.
+ * Disabling link before stream will avoid exposing TPS1 pattern
+ * during the disable sequence as it will confuse some receivers
+ * state machine.
+ * In DP2 or MST mode, our encoder will stay video active
+ */
+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ dc->hwss.disable_stream(pipe_ctx);
+ } else {
+ dc->hwss.disable_stream(pipe_ctx);
+ disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
+ }
+
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ link_set_dsc_enable(pipe_ctx, false);
+ }
+ if (link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
+ pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
+ }
+
+ if (vpg && vpg->funcs->vpg_powerdown)
+ vpg->funcs->vpg_powerdown(vpg);
+}
+
+void link_set_dpms_on(
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
+ enum dc_status status;
+ struct link_encoder *link_enc;
+ enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
+ struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+
+ ASSERT(is_master_pipe_for_link(link, pipe_ctx));
+
+ if (link_is_dp_128b_132b_signal(pipe_ctx))
+ vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
+
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ if (pipe_ctx->stream->sink) {
+ if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) {
+ DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__,
+ pipe_ctx->stream->sink->edid_caps.display_name,
+ pipe_ctx->stream->signal);
+ }
+ }
+
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ return;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
+ && !link_is_dp_128b_132b_signal(pipe_ctx)) {
+ if (link_enc)
+ link_enc->funcs->setup(
+ link_enc,
+ pipe_ctx->stream->signal);
+ }
+
+ pipe_ctx->stream->link->link_state_valid = true;
+
+ if (pipe_ctx->stream_res.tg->funcs->set_out_mux) {
+ if (link_is_dp_128b_132b_signal(pipe_ctx))
+ otg_out_dest = OUT_MUX_HPO_DP;
+ else
+ otg_out_dest = OUT_MUX_DIO;
+ pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest);
+ }
+
+ link_hwss->setup_stream_attribute(pipe_ctx);
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ bool apply_edp_fast_boot_optimization =
+ pipe_ctx->stream->apply_edp_fast_boot_optimization;
+
+ pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
+
+ // Enable VPG before building infoframe
+ if (vpg && vpg->funcs->vpg_poweron)
+ vpg->funcs->vpg_poweron(vpg);
+
+ resource_build_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ link_dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+
+ /* Do not touch link on seamless boot optimization. */
+ if (pipe_ctx->stream->apply_seamless_boot_optimization) {
+ pipe_ctx->stream->dpms_off = false;
+
+ /* Still enable stream features & audio on seamless boot for DP external displays */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ enable_stream_features(pipe_ctx);
+ dc->hwss.enable_audio_stream(pipe_ctx);
+ }
+
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
+ return;
+ }
+
+ /* eDP lit up by bios already, no need to enable again. */
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP &&
+ apply_edp_fast_boot_optimization &&
+ !pipe_ctx->stream->timing.flags.DSC &&
+ !pipe_ctx->next_odm_pipe) {
+ pipe_ctx->stream->dpms_off = false;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
+ return;
+ }
+
+ if (pipe_ctx->stream->dpms_off)
+ return;
+
+ /* Have to setup DSC before DIG FE and BE are connected (which happens before the
+ * link training). This is to make sure the bandwidth sent to DIG BE won't be
+ * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
+ * will be automatically set at a later time when the video is enabled
+ * (DP_VID_STREAM_EN = 1).
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ link_set_dsc_enable(pipe_ctx, true);
+
+ }
+
+ status = enable_link(state, pipe_ctx);
+
+ if (status != DC_OK) {
+ DC_LOG_WARNING("enabling link %u failed: %d\n",
+ pipe_ctx->stream->link->link_index,
+ status);
+
+ /* Abort stream enable *unless* the failure was due to
+ * DP link training - some DP monitors will recover and
+ * show the stream anyway. But MST displays can't proceed
+ * without link training.
+ */
+ if (status != DC_FAIL_DP_LINK_TRAINING ||
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ if (false == stream->link->link_status.link_active)
+ disable_link(stream->link, &pipe_ctx->link_res,
+ pipe_ctx->stream->signal);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ }
+
+ /* turn off otg test pattern if enable */
+ if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
+ pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
+ CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
+ COLOR_DEPTH_UNDEFINED);
+
+ /* This second call is needed to reconfigure the DIG
+ * as a workaround for the incorrect value being applied
+ * from transmitter control.
+ */
+ if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
+ link_is_dp_128b_132b_signal(pipe_ctx)))
+ if (link_enc)
+ link_enc->funcs->setup(
+ link_enc,
+ pipe_ctx->stream->signal);
+
+ dc->hwss.enable_stream(pipe_ctx);
+
+ /* Set DPS PPS SDP (AKA "info frames") */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal)) {
+ dp_set_dsc_on_rx(pipe_ctx, true);
+ link_set_dsc_pps_packet(pipe_ctx, true, true);
+ }
+ }
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ allocate_mst_payload(pipe_ctx);
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link_is_dp_128b_132b_signal(pipe_ctx))
+ update_sst_payload(pipe_ctx, true);
+
+ dc->hwss.unblank_stream(pipe_ctx,
+ &pipe_ctx->stream->link->cur_link_settings);
+
+ if (stream->sink_patches.delay_ignore_msa > 0)
+ msleep(stream->sink_patches.delay_ignore_msa);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ enable_stream_features(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ update_psp_stream_config(pipe_ctx, false);
+#endif
+
+ dc->hwss.enable_audio_stream(pipe_ctx);
+
+ } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ if (link_is_dp_128b_132b_signal(pipe_ctx))
+ dp_fpga_hpo_enable_link_and_stream(state, pipe_ctx);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ link_set_dsc_enable(pipe_ctx, true);
+ }
+
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
+ set_avmute(pipe_ctx, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
new file mode 100644
index 000000000000..33d312dabdb8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DPMS_H__
+#define __DC_LINK_DPMS_H__
+
+#include "link.h"
+bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx,
+ bool enable, bool immediate_update);
+struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp(
+ const struct dc_stream_state *stream,
+ const struct dc_link *link);
+void link_set_all_streams_dpms_off_for_link(struct dc_link *link);
+void link_get_master_pipes_with_dpms_on(const struct dc_link *link,
+ struct dc_state *state,
+ uint8_t *count,
+ struct pipe_ctx *pipes[MAX_PIPES]);
+#endif /* __DC_LINK_DPMS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
new file mode 100644
index 000000000000..aeb26a4d539e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -0,0 +1,577 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file owns the creation/destruction of link structure.
+ */
+#include "link_factory.h"
+#include "protocols/link_ddc.h"
+#include "protocols/link_edp_panel_control.h"
+#include "protocols/link_hpd.h"
+#include "gpio_service_interface.h"
+#include "atomfirmware.h"
+
+#define DC_LOGGER_INIT(logger)
+
+#define LINK_INFO(...) \
+ DC_LOG_HW_HOTPLUG( \
+ __VA_ARGS__)
+
+static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder)
+{
+ switch (encoder.id) {
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_A;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_B;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_C;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_D;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_E;
+ case ENUM_ID_2:
+ return TRANSMITTER_UNIPHY_F;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_UNIPHY_G;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_NUTMEG_CRT;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return TRANSMITTER_TRAVIS_CRT;
+ case ENUM_ID_2:
+ return TRANSMITTER_TRAVIS_LCD;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+ break;
+ default:
+ return TRANSMITTER_UNKNOWN;
+ }
+}
+
+static void link_destruct(struct dc_link *link)
+{
+ int i;
+
+ if (link->hpd_gpio) {
+ dal_gpio_destroy_irq(&link->hpd_gpio);
+ link->hpd_gpio = NULL;
+ }
+
+ if (link->ddc)
+ link_destroy_ddc_service(&link->ddc);
+
+ if (link->panel_cntl)
+ link->panel_cntl->funcs->destroy(&link->panel_cntl);
+
+ if (link->link_enc) {
+ /* Update link encoder resource tracking variables. These are used for
+ * the dynamic assignment of link encoders to streams. Virtual links
+ * are not assigned encoder resources on creation.
+ */
+ if (link->link_id.id != CONNECTOR_ID_VIRTUAL) {
+ link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL;
+ link->dc->res_pool->dig_link_enc_count--;
+ }
+ link->link_enc->funcs->destroy(&link->link_enc);
+ }
+
+ if (link->local_sink)
+ dc_sink_release(link->local_sink);
+
+ for (i = 0; i < link->sink_count; ++i)
+ dc_sink_release(link->remote_sinks[i]);
+}
+
+static enum channel_id get_ddc_line(struct dc_link *link)
+{
+ struct ddc *ddc;
+ enum channel_id channel;
+
+ channel = CHANNEL_ID_UNKNOWN;
+
+ ddc = get_ddc_pin(link->ddc);
+
+ if (ddc) {
+ switch (dal_ddc_get_line(ddc)) {
+ case GPIO_DDC_LINE_DDC1:
+ channel = CHANNEL_ID_DDC1;
+ break;
+ case GPIO_DDC_LINE_DDC2:
+ channel = CHANNEL_ID_DDC2;
+ break;
+ case GPIO_DDC_LINE_DDC3:
+ channel = CHANNEL_ID_DDC3;
+ break;
+ case GPIO_DDC_LINE_DDC4:
+ channel = CHANNEL_ID_DDC4;
+ break;
+ case GPIO_DDC_LINE_DDC5:
+ channel = CHANNEL_ID_DDC5;
+ break;
+ case GPIO_DDC_LINE_DDC6:
+ channel = CHANNEL_ID_DDC6;
+ break;
+ case GPIO_DDC_LINE_DDC_VGA:
+ channel = CHANNEL_ID_DDC_VGA;
+ break;
+ case GPIO_DDC_LINE_I2C_PAD:
+ channel = CHANNEL_ID_I2C_PAD;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+ }
+
+ return channel;
+}
+
+static bool dc_link_construct_phy(struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ uint8_t i;
+ struct ddc_service_init_data ddc_service_init_data = { 0 };
+ struct dc_context *dc_ctx = init_params->ctx;
+ struct encoder_init_data enc_init_data = { 0 };
+ struct panel_cntl_init_data panel_cntl_init_data = { 0 };
+ struct integrated_info info = { 0 };
+ struct dc_bios *bios = init_params->dc->ctx->dc_bios;
+ const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+ struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 };
+
+ DC_LOGGER_INIT(dc_ctx->logger);
+
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ link->dc = init_params->dc;
+ link->ctx = dc_ctx;
+ link->link_index = init_params->link_index;
+
+ memset(&link->preferred_training_settings, 0,
+ sizeof(struct dc_link_training_overrides));
+ memset(&link->preferred_link_setting, 0,
+ sizeof(struct dc_link_settings));
+
+ link->link_id =
+ bios->funcs->get_connector_id(bios, init_params->connector_index);
+
+ link->ep_type = DISPLAY_ENDPOINT_PHY;
+
+ DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id);
+
+ if (bios->funcs->get_disp_connector_caps_info) {
+ bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info);
+ link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY;
+ DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display);
+ }
+
+ if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
+ dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
+ __func__, init_params->connector_index,
+ link->link_id.type, OBJECT_TYPE_CONNECTOR);
+ goto create_fail;
+ }
+
+ if (link->dc->res_pool->funcs->link_init)
+ link->dc->res_pool->funcs->link_init(link);
+
+ link->hpd_gpio = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+
+ if (link->hpd_gpio) {
+ dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
+ dal_gpio_unlock_pin(link->hpd_gpio);
+ link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
+
+ DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id);
+ DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en);
+ }
+
+ switch (link->link_id.id) {
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+
+ break;
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ case CONNECTOR_ID_USBC:
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+
+ if (link->hpd_gpio)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+
+ break;
+ case CONNECTOR_ID_EDP:
+ link->connector_signal = SIGNAL_TYPE_EDP;
+
+ if (link->hpd_gpio) {
+ if (!link->dc->config.allow_edp_hotplug_detection)
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+
+ switch (link->dc->config.allow_edp_hotplug_detection) {
+ case 1: // only the 1st eDP handles hotplug
+ if (link->link_index == 0)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ case 2: // only the 2nd eDP handles hotplug
+ if (link->link_index == 1)
+ link->irq_source_hpd_rx =
+ dal_irq_get_rx_source(link->hpd_gpio);
+ else
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ break;
+ default:
+ break;
+ }
+ }
+
+ break;
+ case CONNECTOR_ID_LVDS:
+ link->connector_signal = SIGNAL_TYPE_LVDS;
+ break;
+ default:
+ DC_LOG_WARNING("Unsupported Connector type:%d!\n",
+ link->link_id.id);
+ goto create_fail;
+ }
+
+ /* TODO: #DAL3 Implement id to str function.*/
+ LINK_INFO("Connector[%d] description:"
+ "signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
+
+ ddc_service_init_data.ctx = link->ctx;
+ ddc_service_init_data.id = link->link_id;
+ ddc_service_init_data.link = link;
+ link->ddc = link_create_ddc_service(&ddc_service_init_data);
+
+ if (!link->ddc) {
+ DC_ERROR("Failed to create ddc_service!\n");
+ goto ddc_create_fail;
+ }
+
+ if (!link->ddc->ddc_pin) {
+ DC_ERROR("Failed to get I2C info for connector!\n");
+ goto ddc_create_fail;
+ }
+
+ link->ddc_hw_inst =
+ dal_ddc_get_line(get_ddc_pin(link->ddc));
+
+
+ if (link->dc->res_pool->funcs->panel_cntl_create &&
+ (link->link_id.id == CONNECTOR_ID_EDP ||
+ link->link_id.id == CONNECTOR_ID_LVDS)) {
+ panel_cntl_init_data.ctx = dc_ctx;
+ panel_cntl_init_data.inst =
+ panel_cntl_init_data.ctx->dc_edp_id_count;
+ link->panel_cntl =
+ link->dc->res_pool->funcs->panel_cntl_create(
+ &panel_cntl_init_data);
+ panel_cntl_init_data.ctx->dc_edp_id_count++;
+
+ if (link->panel_cntl == NULL) {
+ DC_ERROR("Failed to create link panel_cntl!\n");
+ goto panel_cntl_create_fail;
+ }
+ }
+
+ enc_init_data.ctx = dc_ctx;
+ bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
+ &enc_init_data.encoder);
+ enc_init_data.connector = link->link_id;
+ enc_init_data.channel = get_ddc_line(link);
+ enc_init_data.hpd_source = get_hpd_line(link);
+
+ link->hpd_src = enc_init_data.hpd_source;
+
+ enc_init_data.transmitter =
+ translate_encoder_to_transmitter(enc_init_data.encoder);
+ link->link_enc =
+ link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data);
+
+ DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
+ DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
+
+ if (!link->link_enc) {
+ DC_ERROR("Failed to create link encoder!\n");
+ goto link_enc_create_fail;
+ }
+
+ /* Update link encoder tracking variables. These are used for the dynamic
+ * assignment of link encoders to streams.
+ */
+ link->eng_id = link->link_enc->preferred_engine;
+ link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc;
+ link->dc->res_pool->dig_link_enc_count++;
+
+ link->link_enc_hw_inst = link->link_enc->transmitter;
+ for (i = 0; i < 4; i++) {
+ if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
+ link->link_id, i,
+ &link->device_tag) != BP_RESULT_OK) {
+ DC_ERROR("Failed to find device tag!\n");
+ goto device_tag_fail;
+ }
+
+ /* Look for device tag that matches connector signal,
+ * CRT for rgb, LCD for other supported signal tyes
+ */
+ if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
+ link->device_tag.dev_id))
+ continue;
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT &&
+ link->connector_signal != SIGNAL_TYPE_RGB)
+ continue;
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
+ link->connector_signal == SIGNAL_TYPE_RGB)
+ continue;
+
+ DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device);
+ DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type);
+ DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id);
+ break;
+ }
+
+ if (bios->integrated_info)
+ info = *bios->integrated_info;
+
+ /* Look for channel mapping corresponding to connector and device tag */
+ for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
+ struct external_display_path *path =
+ &info.ext_disp_conn_info.path[i];
+
+ if (path->device_connector_id.enum_id == link->link_id.enum_id &&
+ path->device_connector_id.id == link->link_id.id &&
+ path->device_connector_id.type == link->link_id.type) {
+ if (link->device_tag.acpi_device != 0 &&
+ path->device_acpi_enum == link->device_tag.acpi_device) {
+ link->ddi_channel_mapping = path->channel_mapping;
+ link->chip_caps = path->caps;
+ DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
+ DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
+ } else if (path->device_tag ==
+ link->device_tag.dev_id.raw_device_tag) {
+ link->ddi_channel_mapping = path->channel_mapping;
+ link->chip_caps = path->caps;
+ DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
+ DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
+ }
+
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
+ link->bios_forced_drive_settings.VOLTAGE_SWING =
+ (info.ext_disp_conn_info.fixdpvoltageswing & 0x3);
+ link->bios_forced_drive_settings.PRE_EMPHASIS =
+ ((info.ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3);
+ }
+
+ break;
+ }
+ }
+
+ if (bios->funcs->get_atom_dc_golden_table)
+ bios->funcs->get_atom_dc_golden_table(bios);
+
+ /*
+ * TODO check if GPIO programmed correctly
+ *
+ * If GPIO isn't programmed correctly HPD might not rise or drain
+ * fast enough, leading to bounces.
+ */
+ program_hpd_filter(link);
+
+ link->psr_settings.psr_vtotal_control_support = false;
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__);
+ return true;
+device_tag_fail:
+ link->link_enc->funcs->destroy(&link->link_enc);
+link_enc_create_fail:
+ if (link->panel_cntl != NULL)
+ link->panel_cntl->funcs->destroy(&link->panel_cntl);
+panel_cntl_create_fail:
+ link_destroy_ddc_service(&link->ddc);
+ddc_create_fail:
+create_fail:
+
+ if (link->hpd_gpio) {
+ dal_gpio_destroy_irq(&link->hpd_gpio);
+ link->hpd_gpio = NULL;
+ }
+
+ DC_LOG_DC("BIOS object table - %s failed.\n", __func__);
+ return false;
+}
+
+static bool dc_link_construct_dpia(struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ struct ddc_service_init_data ddc_service_init_data = { 0 };
+ struct dc_context *dc_ctx = init_params->ctx;
+
+ DC_LOGGER_INIT(dc_ctx->logger);
+
+ /* Initialized irq source for hpd and hpd rx */
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ link->dc = init_params->dc;
+ link->ctx = dc_ctx;
+ link->link_index = init_params->link_index;
+
+ memset(&link->preferred_training_settings, 0,
+ sizeof(struct dc_link_training_overrides));
+ memset(&link->preferred_link_setting, 0,
+ sizeof(struct dc_link_settings));
+
+ /* Dummy Init for linkid */
+ link->link_id.type = OBJECT_TYPE_CONNECTOR;
+ link->link_id.id = CONNECTOR_ID_DISPLAY_PORT;
+ link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index;
+ link->is_internal_display = false;
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+ LINK_INFO("Connector[%d] description:signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
+
+ link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA;
+ link->is_dig_mapping_flexible = true;
+
+ /* TODO: Initialize link : funcs->link_init */
+
+ ddc_service_init_data.ctx = link->ctx;
+ ddc_service_init_data.id = link->link_id;
+ ddc_service_init_data.link = link;
+ /* Set indicator for dpia link so that ddc wont be created */
+ ddc_service_init_data.is_dpia_link = true;
+
+ link->ddc = link_create_ddc_service(&ddc_service_init_data);
+ if (!link->ddc) {
+ DC_ERROR("Failed to create ddc_service!\n");
+ goto ddc_create_fail;
+ }
+
+ /* Set dpia port index : 0 to number of dpia ports */
+ link->ddc_hw_inst = init_params->connector_index;
+
+ /* TODO: Create link encoder */
+
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
+ link->wa_flags.dp_mot_reset_segment = true;
+
+ return true;
+
+ddc_create_fail:
+ return false;
+}
+
+static bool link_construct(struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ /* Handle dpia case */
+ if (init_params->is_dpia_link == true)
+ return dc_link_construct_dpia(link, init_params);
+ else
+ return dc_link_construct_phy(link, init_params);
+}
+
+struct dc_link *link_create(const struct link_init_data *init_params)
+{
+ struct dc_link *link =
+ kzalloc(sizeof(*link), GFP_KERNEL);
+
+ if (NULL == link)
+ goto alloc_fail;
+
+ if (false == link_construct(link, init_params))
+ goto construct_fail;
+
+ return link;
+
+construct_fail:
+ kfree(link);
+
+alloc_fail:
+ return NULL;
+}
+
+void link_destroy(struct dc_link **link)
+{
+ link_destruct(*link);
+ kfree(*link);
+ *link = NULL;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
new file mode 100644
index 000000000000..5b846147c4a6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_FACTORY_H__
+#define __LINK_FACTORY_H__
+#include "link.h"
+
+#endif /* __LINK_FACTORY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.c b/drivers/gpu/drm/amd/display/dc/link/link_resource.c
new file mode 100644
index 000000000000..bd42bb273c0c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements accessors to link resource.
+ */
+
+#include "link_resource.h"
+#include "protocols/link_dp_capability.h"
+
+void link_get_cur_link_res(const struct dc_link *link,
+ struct link_resource *link_res)
+{
+ int i;
+ struct pipe_ctx *pipe = NULL;
+
+ memset(link_res, 0, sizeof(*link_res));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) {
+ if (pipe->stream->link == link) {
+ *link_res = pipe->link_res;
+ break;
+ }
+ }
+ }
+
+}
+
+void link_get_cur_res_map(const struct dc *dc, uint32_t *map)
+{
+ struct dc_link *link;
+ uint32_t i;
+ uint32_t hpo_dp_recycle_map = 0;
+
+ *map = 0;
+
+ if (dc->caps.dp_hpo) {
+ for (i = 0; i < dc->caps.max_links; i++) {
+ link = dc->links[i];
+ if (link->link_status.link_active &&
+ link_dp_get_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING &&
+ link_dp_get_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING)
+ /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability
+ * but current link doesn't use it.
+ */
+ hpo_dp_recycle_map |= (1 << i);
+ }
+ *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT);
+ }
+}
+
+void link_restore_res_map(const struct dc *dc, uint32_t *map)
+{
+ struct dc_link *link;
+ uint32_t i;
+ unsigned int available_hpo_dp_count;
+ uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK)
+ >> LINK_RES_HPO_DP_REC_MAP__SHIFT;
+
+ if (dc->caps.dp_hpo) {
+ available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count;
+ /* remove excess 128b/132b encoding support for not recycled links */
+ for (i = 0; i < dc->caps.max_links; i++) {
+ if ((hpo_dp_recycle_map & (1 << i)) == 0) {
+ link = dc->links[i];
+ if (link->type != dc_connection_none &&
+ link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
+ if (available_hpo_dp_count > 0)
+ available_hpo_dp_count--;
+ else
+ /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
+ }
+ }
+ }
+ /* remove excess 128b/132b encoding support for recycled links */
+ for (i = 0; i < dc->caps.max_links; i++) {
+ if ((hpo_dp_recycle_map & (1 << i)) != 0) {
+ link = dc->links[i];
+ if (link->type != dc_connection_none &&
+ link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) {
+ if (available_hpo_dp_count > 0)
+ available_hpo_dp_count--;
+ else
+ /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH3;
+ }
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
new file mode 100644
index 000000000000..45554d30adf0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_RESOURCE_H__
+#define __LINK_RESOURCE_H__
+#include "link.h"
+void link_get_cur_link_res(const struct dc_link *link,
+ struct link_resource *link_res);
+
+#endif /* __LINK_RESOURCE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
new file mode 100644
index 000000000000..d4f6ee6ca948
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c
@@ -0,0 +1,398 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file owns timing validation against various link limitations. (ex.
+ * link bandwidth, receiver capability or our hardware capability) It also
+ * provides helper functions exposing bandwidth formulas used in validation.
+ */
+#include "link_validation.h"
+#include "resource.h"
+
+#define DC_LOGGER_INIT(logger)
+
+static uint32_t get_tmds_output_pixel_clock_100hz(const struct dc_crtc_timing *timing)
+{
+
+ uint32_t pxl_clk = timing->pix_clk_100hz;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pxl_clk /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ pxl_clk = pxl_clk * 2 / 3;
+
+ if (timing->display_color_depth == COLOR_DEPTH_101010)
+ pxl_clk = pxl_clk * 10 / 8;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212)
+ pxl_clk = pxl_clk * 12 / 8;
+
+ return pxl_clk;
+}
+
+static bool dp_active_dongle_validate_timing(
+ const struct dc_crtc_timing *timing,
+ const struct dpcd_caps *dpcd_caps)
+{
+ const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
+
+ switch (dpcd_caps->dongle_type) {
+ case DISPLAY_DONGLE_DP_VGA_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ if (timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ return true;
+ else
+ return false;
+ default:
+ break;
+ }
+
+ if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER &&
+ dongle_caps->extendedCapValid == true) {
+ /* Check Pixel Encoding */
+ switch (timing->pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
+ return false;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+ return false;
+ break;
+ default:
+ /* Invalid Pixel Encoding*/
+ return false;
+ }
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+ /*888 and 666 should always be supported*/
+ break;
+ case COLOR_DEPTH_101010:
+ if (dongle_caps->dp_hdmi_max_bpc < 10)
+ return false;
+ break;
+ case COLOR_DEPTH_121212:
+ if (dongle_caps->dp_hdmi_max_bpc < 12)
+ return false;
+ break;
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+ /* These color depths are currently not supported */
+ return false;
+ }
+
+ /* Check 3D format */
+ switch (timing->timing_3d_format) {
+ case TIMING_3D_FORMAT_NONE:
+ case TIMING_3D_FORMAT_FRAME_ALTERNATE:
+ /*Only frame alternate 3D is supported on active dongle*/
+ break;
+ default:
+ /*other 3D formats are not supported due to bad infoframe translation */
+ return false;
+ }
+
+ if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter
+ struct dc_crtc_timing outputTiming = *timing;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (timing->flags.DSC && !timing->dsc_cfg.is_frl)
+ /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */
+ outputTiming.flags.DSC = 0;
+#endif
+ if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps)
+ return false;
+ } else { // DP to HDMI TMDS converter
+ if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
+ return false;
+ }
+ }
+
+ if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 &&
+ dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 &&
+ dongle_caps->dfp_cap_ext.supported) {
+
+ if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000))
+ return false;
+
+ if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable)
+ return false;
+
+ if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable)
+ return false;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_RGB) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_666 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+uint32_t dp_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_settings)
+{
+ uint32_t total_data_bw_efficiency_x10000 = 0;
+ uint32_t link_rate_per_lane_kbps = 0;
+
+ switch (link_dp_get_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ /* For 8b/10b encoding:
+ * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane.
+ * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported.
+ */
+ link_rate_per_lane_kbps = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
+ if (dc_link_should_enable_fec(link)) {
+ total_data_bw_efficiency_x10000 /= 100;
+ total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
+ }
+ break;
+ case DP_128b_132b_ENCODING:
+ /* For 128b/132b encoding:
+ * link rate is defined in the unit of 10mbps per lane.
+ * total data bandwidth efficiency is always 96.71%.
+ */
+ link_rate_per_lane_kbps = link_settings->link_rate * 10000;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000;
+ break;
+ default:
+ break;
+ }
+
+ /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */
+ return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000;
+}
+
+uint32_t link_timing_bandwidth_kbps(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (timing->flags.DSC)
+ return dc_dsc_stream_bandwidth_in_kbps(timing,
+ timing->dsc_cfg.bits_per_pixel,
+ timing->dsc_cfg.num_slices_h,
+ timing->dsc_cfg.is_dp);
+#endif /* CONFIG_DRM_AMD_DC_DCN */
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ ASSERT(bits_per_channel != 0);
+ bits_per_channel = 8;
+ break;
+ }
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+}
+
+static bool dp_validate_mode_timing(
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t req_bw;
+ uint32_t max_bw;
+
+ const struct dc_link_settings *link_setting;
+
+ /* According to spec, VSC SDP should be used if pixel format is YCbCr420 */
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 &&
+ !link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED &&
+ dal_graphics_object_id_get_connector_id(link->link_id) != CONNECTOR_ID_VIRTUAL)
+ return false;
+
+ /*always DP fail safe mode*/
+ if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 &&
+ timing->h_addressable == (uint32_t) 640 &&
+ timing->v_addressable == (uint32_t) 480)
+ return true;
+
+ link_setting = dc_link_get_link_cap(link);
+
+ /* TODO: DYNAMIC_VALIDATION needs to be implemented */
+ /*if (flags.DYNAMIC_VALIDATION == 1 &&
+ link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN)
+ link_setting = &link->verified_link_cap;
+ */
+
+ req_bw = dc_bandwidth_in_kbps_from_timing(timing);
+ max_bw = dc_link_bandwidth_kbps(link, link_setting);
+
+ if (req_bw <= max_bw) {
+ /* remember the biggest mode here, during
+ * initial link training (to get
+ * verified_link_cap), LS sends event about
+ * cannot train at reported cap to upper
+ * layer and upper layer will re-enumerate modes.
+ * this is not necessary if the lower
+ * verified_link_cap is enough to drive
+ * all the modes */
+
+ /* TODO: DYNAMIC_VALIDATION needs to be implemented */
+ /* if (flags.DYNAMIC_VALIDATION == 1)
+ dpsst->max_req_bw_for_verified_linkcap = dal_max(
+ dpsst->max_req_bw_for_verified_linkcap, req_bw); */
+ return true;
+ } else
+ return false;
+}
+
+enum dc_status link_validate_mode_timing(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10;
+ struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
+
+ /* A hack to avoid failing any modes for EDID override feature on
+ * topology change such as lower quality cable for DP or different dongle
+ */
+ if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
+ return DC_OK;
+
+ /* Passive Dongle */
+ if (max_pix_clk != 0 && get_tmds_output_pixel_clock_100hz(timing) > max_pix_clk)
+ return DC_EXCEED_DONGLE_CAP;
+
+ /* Active Dongle*/
+ if (!dp_active_dongle_validate_timing(timing, dpcd_caps))
+ return DC_EXCEED_DONGLE_CAP;
+
+ switch (stream->signal) {
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ if (!dp_validate_mode_timing(
+ link,
+ timing))
+ return DC_NO_DP_LINK_BANDWIDTH;
+ break;
+
+ default:
+ break;
+ }
+
+ return DC_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
new file mode 100644
index 000000000000..ab6a44f50032
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef __LINK_VALIDATION_H__
+#define __LINK_VALIDATION_H__
+#include "link.h"
+#endif /* __LINK_VALIDATION_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
index ce8d6a54ca54..5269125bc2a4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c
@@ -23,20 +23,20 @@
*
*/
-#include "dm_services.h"
-#include "dm_helpers.h"
-#include "gpio_service_interface.h"
-#include "include/ddc_service_types.h"
-#include "include/grph_object_id.h"
-#include "include/dpcd_defs.h"
-#include "include/logger_interface.h"
-#include "include/vector.h"
-#include "core_types.h"
-#include "dc_link_ddc.h"
+/* FILE POLICY AND INTENDED USAGE:
+ *
+ * This file implements generic display communication protocols such as i2c, aux
+ * and scdc. The file should not contain any specific applications of these
+ * protocols such as display capability query, detection, or handshaking such as
+ * link training.
+ */
+#include "link_ddc.h"
+#include "vector.h"
#include "dce/dce_aux.h"
-#include "dmub/inc/dmub_cmd.h"
+#include "dal_asic_id.h"
#include "link_dpcd.h"
-#include "include/dal_asic_id.h"
+#include "dm_helpers.h"
+#include "atomfirmware.h"
#define DC_LOGGER_INIT(logger)
@@ -45,87 +45,6 @@ static const uint8_t DP_VGA_DONGLE_BRANCH_DEV_NAME[] = "DpVga";
static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa";
static const uint8_t DP_DVI_CONVERTER_ID_5[] = "3393N2";
-#define AUX_POWER_UP_WA_DELAY 500
-#define I2C_OVER_AUX_DEFER_WA_DELAY 70
-#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40
-#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1
-
-/* CV smart dongle slave address for retrieving supported HDTV modes*/
-#define CV_SMART_DONGLE_ADDRESS 0x20
-/* DVI-HDMI dongle slave address for retrieving dongle signature*/
-#define DVI_HDMI_DONGLE_ADDRESS 0x68
-struct dvi_hdmi_dongle_signature_data {
- int8_t vendor[3];/* "AMD" */
- uint8_t version[2];
- uint8_t size;
- int8_t id[11];/* "6140063500G"*/
-};
-/* DP-HDMI dongle slave address for retrieving dongle signature*/
-#define DP_HDMI_DONGLE_ADDRESS 0x40
-static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
-#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
-
-struct dp_hdmi_dongle_signature_data {
- int8_t id[15];/* "DP-HDMI ADAPTOR"*/
- uint8_t eot;/* end of transmition '\x4' */
-};
-
-/* SCDC Address defines (HDMI 2.0)*/
-#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
-#define HDMI_SCDC_ADDRESS 0x54
-#define HDMI_SCDC_SINK_VERSION 0x01
-#define HDMI_SCDC_SOURCE_VERSION 0x02
-#define HDMI_SCDC_UPDATE_0 0x10
-#define HDMI_SCDC_TMDS_CONFIG 0x20
-#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
-#define HDMI_SCDC_CONFIG_0 0x30
-#define HDMI_SCDC_STATUS_FLAGS 0x40
-#define HDMI_SCDC_ERR_DETECT 0x50
-#define HDMI_SCDC_TEST_CONFIG 0xC0
-#define HDMI_SCDC_DEVICE_ID 0xD3
-
-union hdmi_scdc_update_read_data {
- uint8_t byte[2];
- struct {
- uint8_t STATUS_UPDATE:1;
- uint8_t CED_UPDATE:1;
- uint8_t RR_TEST:1;
- uint8_t RESERVED:5;
- uint8_t RESERVED2:8;
- } fields;
-};
-
-union hdmi_scdc_status_flags_data {
- uint8_t byte;
- struct {
- uint8_t CLOCK_DETECTED:1;
- uint8_t CH0_LOCKED:1;
- uint8_t CH1_LOCKED:1;
- uint8_t CH2_LOCKED:1;
- uint8_t RESERVED:4;
- } fields;
-};
-
-union hdmi_scdc_ced_data {
- uint8_t byte[7];
- struct {
- uint8_t CH0_8LOW:8;
- uint8_t CH0_7HIGH:7;
- uint8_t CH0_VALID:1;
- uint8_t CH1_8LOW:8;
- uint8_t CH1_7HIGH:7;
- uint8_t CH1_VALID:1;
- uint8_t CH2_8LOW:8;
- uint8_t CH2_7HIGH:7;
- uint8_t CH2_VALID:1;
- uint8_t CHECKSUM:8;
- uint8_t RESERVED:8;
- uint8_t RESERVED2:8;
- uint8_t RESERVED3:8;
- uint8_t RESERVED4:4;
- } fields;
-};
-
struct i2c_payloads {
struct vector payloads;
};
@@ -158,7 +77,7 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
-void dal_ddc_i2c_payloads_add(
+static void i2c_payloads_add(
struct i2c_payloads *payloads,
uint32_t address,
uint32_t len,
@@ -226,7 +145,7 @@ static void ddc_service_construct(
ddc_service->wa.raw = 0;
}
-struct ddc_service *dal_ddc_service_create(
+struct ddc_service *link_create_ddc_service(
struct ddc_service_init_data *init_data)
{
struct ddc_service *ddc_service;
@@ -246,7 +165,7 @@ static void ddc_service_destruct(struct ddc_service *ddc)
dal_gpio_destroy_ddc(&ddc->ddc_pin);
}
-void dal_ddc_service_destroy(struct ddc_service **ddc)
+void link_destroy_ddc_service(struct ddc_service **ddc)
{
if (!ddc || !*ddc) {
BREAK_TO_DEBUGGER();
@@ -257,19 +176,14 @@ void dal_ddc_service_destroy(struct ddc_service **ddc)
*ddc = NULL;
}
-enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc)
-{
- return DDC_SERVICE_TYPE_CONNECTOR;
-}
-
-void dal_ddc_service_set_transaction_type(
+void set_ddc_transaction_type(
struct ddc_service *ddc,
enum ddc_transaction_type type)
{
ddc->transaction_type = type;
}
-bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)
+bool link_is_in_aux_transaction_mode(struct ddc_service *ddc)
{
switch (ddc->transaction_type) {
case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
@@ -282,7 +196,7 @@ bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)
return false;
}
-void ddc_service_set_dongle_type(struct ddc_service *ddc,
+void set_dongle_type(struct ddc_service *ddc,
enum display_dongle_type dongle_type)
{
ddc->dongle_type = dongle_type;
@@ -324,7 +238,7 @@ static uint32_t defer_delay_converter_wa(
#define DP_TRANSLATOR_DELAY 5
-uint32_t get_defer_delay(struct ddc_service *ddc)
+uint32_t link_get_aux_defer_delay(struct ddc_service *ddc)
{
uint32_t defer_delay = 0;
@@ -352,175 +266,45 @@ uint32_t get_defer_delay(struct ddc_service *ddc)
return defer_delay;
}
-static bool i2c_read(
- struct ddc_service *ddc,
- uint32_t address,
- uint8_t *buffer,
- uint32_t len)
-{
- uint8_t offs_data = 0;
- struct i2c_payload payloads[2] = {
- {
- .write = true,
- .address = address,
- .length = 1,
- .data = &offs_data },
- {
- .write = false,
- .address = address,
- .length = len,
- .data = buffer } };
-
- struct i2c_command command = {
- .payloads = payloads,
- .number_of_payloads = 2,
- .engine = DDC_I2C_COMMAND_ENGINE,
- .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
-
- return dm_helpers_submit_i2c(
- ddc->ctx,
- ddc->link,
- &command);
-}
-
-void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
- struct ddc_service *ddc,
- struct display_sink_capability *sink_cap)
+static bool submit_aux_command(struct ddc_service *ddc,
+ struct aux_payload *payload)
{
- uint8_t i;
- bool is_valid_hdmi_signature;
- enum display_dongle_type *dongle = &sink_cap->dongle_type;
- uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
- bool is_type2_dongle = false;
- int retry_count = 2;
- struct dp_hdmi_dongle_signature_data *dongle_signature;
-
- /* Assume we have no valid DP passive dongle connected */
- *dongle = DISPLAY_DONGLE_NONE;
- sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
-
- /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
- if (!i2c_read(
- ddc,
- DP_HDMI_DONGLE_ADDRESS,
- type2_dongle_buf,
- sizeof(type2_dongle_buf))) {
- /* Passive HDMI dongles can sometimes fail here without retrying*/
- while (retry_count > 0) {
- if (i2c_read(ddc,
- DP_HDMI_DONGLE_ADDRESS,
- type2_dongle_buf,
- sizeof(type2_dongle_buf)))
- break;
- retry_count--;
- }
- if (retry_count == 0) {
- *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
- sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
- "DP-DVI passive dongle %dMhz: ",
- DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
- return;
- }
- }
-
- /* Check if Type 2 dongle.*/
- if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
- is_type2_dongle = true;
-
- dongle_signature =
- (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
+ uint32_t retrieved = 0;
+ bool ret = false;
- is_valid_hdmi_signature = true;
+ if (!ddc)
+ return false;
- /* Check EOT */
- if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
- is_valid_hdmi_signature = false;
- }
+ if (!payload)
+ return false;
- /* Check signature */
- for (i = 0; i < sizeof(dongle_signature->id); ++i) {
- /* If its not the right signature,
- * skip mismatch in subversion byte.*/
- if (dongle_signature->id[i] !=
- dp_hdmi_dongle_signature_str[i] && i != 3) {
+ do {
+ struct aux_payload current_payload;
+ bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
+ payload->length;
+ uint32_t payload_length = is_end_of_payload ?
+ payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
- if (is_type2_dongle) {
- is_valid_hdmi_signature = false;
- break;
- }
+ current_payload.address = payload->address;
+ current_payload.data = &payload->data[retrieved];
+ current_payload.defer_delay = payload->defer_delay;
+ current_payload.i2c_over_aux = payload->i2c_over_aux;
+ current_payload.length = payload_length;
+ /* set mot (middle of transaction) to false if it is the last payload */
+ current_payload.mot = is_end_of_payload ? payload->mot:true;
+ current_payload.write_status_update = false;
+ current_payload.reply = payload->reply;
+ current_payload.write = payload->write;
- }
- }
+ ret = link_aux_transfer_with_retries_no_mutex(ddc, &current_payload);
- if (is_type2_dongle) {
- uint32_t max_tmds_clk =
- type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
-
- max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
-
- if (0 == max_tmds_clk ||
- max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
- max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
- *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
- sizeof(type2_dongle_buf),
- "DP-DVI passive dongle %dMhz: ",
- DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
- } else {
- if (is_valid_hdmi_signature == true) {
- *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
- sizeof(type2_dongle_buf),
- "Type 2 DP-HDMI passive dongle %dMhz: ",
- max_tmds_clk);
- } else {
- *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
- sizeof(type2_dongle_buf),
- "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ",
- max_tmds_clk);
-
- }
-
- /* Multiply by 1000 to convert to kHz. */
- sink_cap->max_hdmi_pixel_clock =
- max_tmds_clk * 1000;
- }
- sink_cap->is_dongle_type_one = false;
-
- } else {
- if (is_valid_hdmi_signature == true) {
- *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
- sizeof(type2_dongle_buf),
- "Type 1 DP-HDMI passive dongle %dMhz: ",
- sink_cap->max_hdmi_pixel_clock / 1000);
- } else {
- *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
-
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf,
- sizeof(type2_dongle_buf),
- "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ",
- sink_cap->max_hdmi_pixel_clock / 1000);
- }
- sink_cap->is_dongle_type_one = true;
- }
+ retrieved += payload_length;
+ } while (retrieved < payload->length && ret == true);
- return;
+ return ret;
}
-enum {
- DP_SINK_CAP_SIZE =
- DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1
-};
-
-bool dal_ddc_service_query_ddc_data(
+bool link_query_ddc_data(
struct ddc_service *ddc,
uint32_t address,
uint8_t *write_buf,
@@ -530,7 +314,7 @@ bool dal_ddc_service_query_ddc_data(
{
bool success = true;
uint32_t payload_size =
- dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
+ link_is_in_aux_transaction_mode(ddc) ?
DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
uint32_t write_payloads =
@@ -544,13 +328,13 @@ bool dal_ddc_service_query_ddc_data(
if (!payloads_num)
return false;
- if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+ if (link_is_in_aux_transaction_mode(ddc)) {
struct aux_payload payload;
payload.i2c_over_aux = true;
payload.address = address;
payload.reply = NULL;
- payload.defer_delay = get_defer_delay(ddc);
+ payload.defer_delay = link_get_aux_defer_delay(ddc);
payload.write_status_update = false;
if (write_size != 0) {
@@ -562,7 +346,7 @@ bool dal_ddc_service_query_ddc_data(
payload.length = write_size;
payload.data = write_buf;
- success = dal_ddc_submit_aux_command(ddc, &payload);
+ success = submit_aux_command(ddc, &payload);
}
if (read_size != 0 && success) {
@@ -574,7 +358,7 @@ bool dal_ddc_service_query_ddc_data(
payload.length = read_size;
payload.data = read_buf;
- success = dal_ddc_submit_aux_command(ddc, &payload);
+ success = submit_aux_command(ddc, &payload);
}
} else {
struct i2c_command command = {0};
@@ -588,10 +372,10 @@ bool dal_ddc_service_query_ddc_data(
command.engine = DDC_I2C_COMMAND_ENGINE;
command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
- dal_ddc_i2c_payloads_add(
+ i2c_payloads_add(
&payloads, address, write_size, write_buf, true);
- dal_ddc_i2c_payloads_add(
+ i2c_payloads_add(
&payloads, address, read_size, read_buf, false);
command.number_of_payloads =
@@ -608,51 +392,6 @@ bool dal_ddc_service_query_ddc_data(
return success;
}
-bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
- struct aux_payload *payload)
-{
- uint32_t retrieved = 0;
- bool ret = false;
-
- if (!ddc)
- return false;
-
- if (!payload)
- return false;
-
- do {
- struct aux_payload current_payload;
- bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
- payload->length;
- uint32_t payload_length = is_end_of_payload ?
- payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
-
- current_payload.address = payload->address;
- current_payload.data = &payload->data[retrieved];
- current_payload.defer_delay = payload->defer_delay;
- current_payload.i2c_over_aux = payload->i2c_over_aux;
- current_payload.length = payload_length;
- /* set mot (middle of transaction) to false if it is the last payload */
- current_payload.mot = is_end_of_payload ? payload->mot:true;
- current_payload.write_status_update = false;
- current_payload.reply = payload->reply;
- current_payload.write = payload->write;
-
- ret = dc_link_aux_transfer_with_retries(ddc, &current_payload);
-
- retrieved += payload_length;
- } while (retrieved < payload->length && ret == true);
-
- return ret;
-}
-
-/* dc_link_aux_transfer_raw() - Attempt to transfer
- * the given aux payload. This function does not perform
- * retries or handle error states. The reply is returned
- * in the payload->reply and the result through
- * *operation_result. Returns the number of bytes transferred,
- * or -1 on a failure.
- */
int dc_link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_return_code_type *operation_result)
@@ -665,22 +404,14 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
}
}
-/* dc_link_aux_transfer_with_retries() - Attempt to submit an
- * aux payload, retrying on timeouts, defers, and busy states
- * as outlined in the DP spec. Returns true if the request
- * was successful.
- *
- * Unless you want to implement your own retry semantics, this
- * is probably the one you want.
- */
-bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
+bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc,
struct aux_payload *payload)
{
return dce_aux_transfer_with_retries(ddc, payload);
}
-bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
+bool try_to_configure_aux_timeout(struct ddc_service *ddc,
uint32_t timeout)
{
bool result = false;
@@ -713,20 +444,12 @@ bool dc_link_aux_try_to_configure_timeout(struct ddc_service *ddc,
return result;
}
-/*test only function*/
-void dal_ddc_service_set_ddc_pin(
- struct ddc_service *ddc_service,
- struct ddc *ddc)
-{
- ddc_service->ddc_pin = ddc;
-}
-
-struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service)
+struct ddc *get_ddc_pin(struct ddc_service *ddc_service)
{
return ddc_service->ddc_pin;
}
-void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
+void write_scdc_data(struct ddc_service *ddc_service,
uint32_t pix_clk,
bool lte_340_scramble)
{
@@ -741,13 +464,13 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
return;
- dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+ link_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &sink_version, sizeof(sink_version));
if (sink_version == 1) {
/*Source Version = 1*/
write_buffer[0] = HDMI_SCDC_SOURCE_VERSION;
write_buffer[1] = 1;
- dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ link_query_ddc_data(ddc_service, slave_address,
write_buffer, sizeof(write_buffer), NULL, 0);
/*Read Request from SCDC caps*/
}
@@ -760,11 +483,11 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
} else {
write_buffer[1] = 0;
}
- dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer,
+ link_query_ddc_data(ddc_service, slave_address, write_buffer,
sizeof(write_buffer), NULL, 0);
}
-void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
+void read_scdc_data(struct ddc_service *ddc_service)
{
uint8_t slave_address = HDMI_SCDC_ADDRESS;
uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
@@ -774,20 +497,19 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
return;
- dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+ link_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &tmds_config, sizeof(tmds_config));
if (tmds_config & 0x1) {
union hdmi_scdc_status_flags_data status_data = {0};
uint8_t scramble_status = 0;
offset = HDMI_SCDC_SCRAMBLER_STATUS;
- dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ link_query_ddc_data(ddc_service, slave_address,
&offset, sizeof(offset), &scramble_status,
sizeof(scramble_status));
offset = HDMI_SCDC_STATUS_FLAGS;
- dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+ link_query_ddc_data(ddc_service, slave_address,
&offset, sizeof(offset), &status_data.byte,
sizeof(status_data.byte));
}
}
-
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
index 418fbf8c5c3a..86e9d2e886d6 100644
--- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h
@@ -23,60 +23,38 @@
*
*/
-#ifndef __DAL_I2CAUX_INTERFACE_H__
-#define __DAL_I2CAUX_INTERFACE_H__
+#ifndef __DAL_DDC_SERVICE_H__
+#define __DAL_DDC_SERVICE_H__
-#include "dc_types.h"
-#include "gpio_service_interface.h"
+#include "link.h"
+#define AUX_POWER_UP_WA_DELAY 500
+#define I2C_OVER_AUX_DEFER_WA_DELAY 70
+#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40
+#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1
+#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
-#define DEFAULT_AUX_MAX_DATA_SIZE 16
-#define AUX_MAX_DEFER_WRITE_RETRY 20
+#define EDID_SEGMENT_SIZE 256
-struct aux_payload {
- /* set following flag to read/write I2C data,
- * reset it to read/write DPCD data */
- bool i2c_over_aux;
- /* set following flag to write data,
- * reset it to read data */
- bool write;
- bool mot;
- bool write_status_update;
+void set_ddc_transaction_type(
+ struct ddc_service *ddc,
+ enum ddc_transaction_type type);
- uint32_t address;
- uint32_t length;
- uint8_t *data;
- /*
- * used to return the reply type of the transaction
- * ignored if NULL
- */
- uint8_t *reply;
- /* expressed in milliseconds
- * zero means "use default value"
- */
- uint32_t defer_delay;
+bool try_to_configure_aux_timeout(struct ddc_service *ddc,
+ uint32_t timeout);
-};
+void write_scdc_data(
+ struct ddc_service *ddc_service,
+ uint32_t pix_clk,
+ bool lte_340_scramble);
-struct aux_command {
- struct aux_payload *payloads;
- uint8_t number_of_payloads;
+void read_scdc_data(
+ struct ddc_service *ddc_service);
- /* expressed in milliseconds
- * zero means "use default value" */
- uint32_t defer_delay;
+void set_dongle_type(struct ddc_service *ddc,
+ enum display_dongle_type dongle_type);
- /* zero means "use default value" */
- uint32_t max_defer_write_retry;
+struct ddc *get_ddc_pin(struct ddc_service *ddc_service);
- enum i2c_mot_mode mot;
-};
+#endif /* __DAL_DDC_SERVICE_H__ */
-union aux_config {
- struct {
- uint32_t ALLOW_AUX_WHEN_HPD_LOW:1;
- } bits;
- uint32_t raw;
-};
-
-#endif
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
new file mode 100644
index 000000000000..4874d1bf1dcb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -0,0 +1,2246 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements dp specific link capability retrieval sequence. It is
+ * responsible for retrieving, parsing, overriding, deciding capability obtained
+ * from dp link. Link capability consists of encoders, DPRXs, cables, retimers,
+ * usb and all other possible backend capabilities. Other components should
+ * include this header file in order to access link capability. Accessing link
+ * capability by dereferencing dc_link outside dp_link_capability is not a
+ * recommended method as it makes the component dependent on the underlying data
+ * structure used to represent link capability instead of function interfaces.
+ */
+
+#include "link_dp_capability.h"
+#include "link_ddc.h"
+#include "link_dpcd.h"
+#include "link_dp_dpia.h"
+#include "link_dp_phy.h"
+#include "link_edp_panel_control.h"
+#include "link_dp_irq_handler.h"
+#include "link/accessories/link_dp_trace.h"
+#include "link_dp_training.h"
+#include "atomfirmware.h"
+#include "resource.h"
+#include "link_enc_cfg.h"
+#include "dc_dmub_srv.h"
+#include "gpio_service_interface.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
+#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
+
+struct dp_lt_fallback_entry {
+ enum dc_lane_count lane_count;
+ enum dc_link_rate link_rate;
+};
+
+static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = {
+ /* This link training fallback array is ordered by
+ * link bandwidth from highest to lowest.
+ * DP specs makes it a normative policy to always
+ * choose the next highest link bandwidth during
+ * link training fallback.
+ */
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR20},
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR20},
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR10},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH3},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR20},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR10},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH2},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH3},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR10},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH2},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH3},
+ {LANE_COUNT_FOUR, LINK_RATE_LOW},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH2},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH},
+ {LANE_COUNT_TWO, LINK_RATE_LOW},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH},
+ {LANE_COUNT_ONE, LINK_RATE_LOW},
+};
+
+static const struct dc_link_settings fail_safe_link_settings = {
+ .lane_count = LANE_COUNT_ONE,
+ .link_rate = LINK_RATE_LOW,
+ .link_spread = LINK_SPREAD_DISABLED,
+};
+
+bool is_dp_active_dongle(const struct dc_link *link)
+{
+ return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) &&
+ (link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+}
+
+bool is_dp_branch_device(const struct dc_link *link)
+{
+ return link->dpcd_caps.is_branch_dev;
+}
+
+static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
+{
+ switch (bpc) {
+ case DOWN_STREAM_MAX_8BPC:
+ return 8;
+ case DOWN_STREAM_MAX_10BPC:
+ return 10;
+ case DOWN_STREAM_MAX_12BPC:
+ return 12;
+ case DOWN_STREAM_MAX_16BPC:
+ return 16;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count)
+{
+ switch (lttpr_repeater_count) {
+ case 0x80: // 1 lttpr repeater
+ return 1;
+ case 0x40: // 2 lttpr repeaters
+ return 2;
+ case 0x20: // 3 lttpr repeaters
+ return 3;
+ case 0x10: // 4 lttpr repeaters
+ return 4;
+ case 0x08: // 5 lttpr repeaters
+ return 5;
+ case 0x04: // 6 lttpr repeaters
+ return 6;
+ case 0x02: // 7 lttpr repeaters
+ return 7;
+ case 0x01: // 8 lttpr repeaters
+ return 8;
+ default:
+ break;
+ }
+ return 0; // invalid value
+}
+
+uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw)
+{
+ switch (bw) {
+ case 0b001:
+ return 9000000;
+ case 0b010:
+ return 18000000;
+ case 0b011:
+ return 24000000;
+ case 0b100:
+ return 32000000;
+ case 0b101:
+ return 40000000;
+ case 0b110:
+ return 48000000;
+ }
+
+ return 0;
+}
+
+static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
+{
+ enum dc_link_rate link_rate;
+ // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation.
+ switch (link_rate_in_khz) {
+ case 1620000:
+ link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane
+ break;
+ case 2160000:
+ link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane
+ break;
+ case 2430000:
+ link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane
+ break;
+ case 2700000:
+ link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane
+ break;
+ case 3240000:
+ link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2)- 3.24 Gbps/Lane
+ break;
+ case 4320000:
+ link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane
+ break;
+ case 5400000:
+ link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2)- 5.40 Gbps/Lane
+ break;
+ case 8100000:
+ link_rate = LINK_RATE_HIGH3; // Rate_8 (HBR3)- 8.10 Gbps/Lane
+ break;
+ default:
+ link_rate = LINK_RATE_UNKNOWN;
+ break;
+ }
+ return link_rate;
+}
+
+static union dp_cable_id intersect_cable_id(
+ union dp_cable_id *a, union dp_cable_id *b)
+{
+ union dp_cable_id out;
+
+ out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY,
+ b->bits.UHBR10_20_CAPABILITY);
+ out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY,
+ b->bits.UHBR13_5_CAPABILITY);
+ out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE);
+
+ return out;
+}
+
+/*
+ * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw.
+ */
+static uint32_t intersect_frl_link_bw_support(
+ const uint32_t max_supported_frl_bw_in_kbps,
+ const union hdmi_encoded_link_bw hdmi_encoded_link_bw)
+{
+ uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps;
+
+ // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode)
+ if (hdmi_encoded_link_bw.bits.FRL_MODE) {
+ if (hdmi_encoded_link_bw.bits.BW_48Gbps)
+ supported_bw_in_kbps = 48000000;
+ else if (hdmi_encoded_link_bw.bits.BW_40Gbps)
+ supported_bw_in_kbps = 40000000;
+ else if (hdmi_encoded_link_bw.bits.BW_32Gbps)
+ supported_bw_in_kbps = 32000000;
+ else if (hdmi_encoded_link_bw.bits.BW_24Gbps)
+ supported_bw_in_kbps = 24000000;
+ else if (hdmi_encoded_link_bw.bits.BW_18Gbps)
+ supported_bw_in_kbps = 18000000;
+ else if (hdmi_encoded_link_bw.bits.BW_9Gbps)
+ supported_bw_in_kbps = 9000000;
+ }
+
+ return supported_bw_in_kbps;
+}
+
+static enum clock_source_id get_clock_source_id(struct dc_link *link)
+{
+ enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED;
+ struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source;
+
+ if (dp_cs != NULL) {
+ dp_cs_id = dp_cs->id;
+ } else {
+ /*
+ * dp clock source is not initialized for some reason.
+ * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used
+ */
+ ASSERT(dp_cs);
+ }
+
+ return dp_cs_id;
+}
+
+static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
+ int length)
+{
+ int retry = 0;
+
+ if (!link->dpcd_caps.dpcd_rev.raw) {
+ do {
+ dc_link_dp_receiver_power_ctrl(link, true);
+ core_link_read_dpcd(link, DP_DPCD_REV,
+ dpcd_data, length);
+ link->dpcd_caps.dpcd_rev.raw = dpcd_data[
+ DP_DPCD_REV -
+ DP_DPCD_REV];
+ } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
+ }
+
+ if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
+ switch (link->dpcd_caps.branch_dev_id) {
+ /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down
+ * all internal circuits including AUX communication preventing
+ * reading DPCD table and EDID (spec violation).
+ * Encoder will skip DP RX power down on disable_output to
+ * keep receiver powered all the time.*/
+ case DP_BRANCH_DEVICE_ID_0010FA:
+ case DP_BRANCH_DEVICE_ID_0080E1:
+ case DP_BRANCH_DEVICE_ID_00E04C:
+ link->wa_flags.dp_keep_receiver_powered = true;
+ break;
+
+ /* TODO: May need work around for other dongles. */
+ default:
+ link->wa_flags.dp_keep_receiver_powered = false;
+ break;
+ }
+ } else
+ link->wa_flags.dp_keep_receiver_powered = false;
+}
+
+bool dc_link_is_fec_supported(const struct dc_link *link)
+{
+ /* TODO - use asic cap instead of link_enc->features
+ * we no longer know which link enc to use for this link before commit
+ */
+ struct link_encoder *link_enc = NULL;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ return (dc_is_dp_signal(link->connector_signal) && link_enc &&
+ link_enc->features.fec_supported &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
+ !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
+}
+
+bool dc_link_should_enable_fec(const struct dc_link *link)
+{
+ bool force_disable = false;
+
+ if (link->fec_state == dc_link_fec_enabled)
+ force_disable = false;
+ else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
+ link->local_sink &&
+ link->local_sink->edid_caps.panel_patch.disable_fec)
+ force_disable = true;
+ else if (link->connector_signal == SIGNAL_TYPE_EDP
+ && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.
+ dsc_support.DSC_SUPPORT == false
+ || link->panel_config.dsc.disable_dsc_edp
+ || !link->dc->caps.edp_dsc_support))
+ force_disable = true;
+
+ return !force_disable && dc_link_is_fec_supported(link);
+}
+
+bool link_is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
+{
+ /* If this assert is hit then we have a link encoder dynamic management issue */
+ ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
+ return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
+ pipe_ctx->link_res.hpo_dp_link_enc &&
+ dc_is_dp_signal(pipe_ctx->stream->signal));
+}
+
+bool dp_is_lttpr_present(struct dc_link *link)
+{
+ return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
+ link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
+ link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
+}
+
+/* in DP compliance test, DPR-120 may have
+ * a random value in its MAX_LINK_BW dpcd field.
+ * We map it to the maximum supported link rate that
+ * is smaller than MAX_LINK_BW in this case.
+ */
+static enum dc_link_rate get_link_rate_from_max_link_bw(
+ uint8_t max_link_bw)
+{
+ enum dc_link_rate link_rate;
+
+ if (max_link_bw >= LINK_RATE_HIGH3) {
+ link_rate = LINK_RATE_HIGH3;
+ } else if (max_link_bw < LINK_RATE_HIGH3
+ && max_link_bw >= LINK_RATE_HIGH2) {
+ link_rate = LINK_RATE_HIGH2;
+ } else if (max_link_bw < LINK_RATE_HIGH2
+ && max_link_bw >= LINK_RATE_HIGH) {
+ link_rate = LINK_RATE_HIGH;
+ } else if (max_link_bw < LINK_RATE_HIGH
+ && max_link_bw >= LINK_RATE_LOW) {
+ link_rate = LINK_RATE_LOW;
+ } else {
+ link_rate = LINK_RATE_UNKNOWN;
+ }
+
+ return link_rate;
+}
+
+static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
+{
+ enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+
+ if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
+ lttpr_max_link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
+ lttpr_max_link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10)
+ lttpr_max_link_rate = LINK_RATE_UHBR10;
+
+ return lttpr_max_link_rate;
+}
+
+static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link)
+{
+ enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN;
+
+ if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20)
+ cable_max_link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY)
+ cable_max_link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10)
+ cable_max_link_rate = LINK_RATE_UHBR10;
+
+ return cable_max_link_rate;
+}
+
+static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count)
+{
+ return lane_count <= LANE_COUNT_ONE;
+}
+
+static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate)
+{
+ return link_rate <= LINK_RATE_LOW;
+}
+
+static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
+{
+ switch (lane_count) {
+ case LANE_COUNT_FOUR:
+ return LANE_COUNT_TWO;
+ case LANE_COUNT_TWO:
+ return LANE_COUNT_ONE;
+ case LANE_COUNT_ONE:
+ return LANE_COUNT_UNKNOWN;
+ default:
+ return LANE_COUNT_UNKNOWN;
+ }
+}
+
+static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_UHBR20:
+ return LINK_RATE_UHBR13_5;
+ case LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR10;
+ case LINK_RATE_UHBR10:
+ return LINK_RATE_HIGH3;
+ case LINK_RATE_HIGH3:
+ return LINK_RATE_HIGH2;
+ case LINK_RATE_HIGH2:
+ return LINK_RATE_HIGH;
+ case LINK_RATE_HIGH:
+ return LINK_RATE_LOW;
+ case LINK_RATE_LOW:
+ return LINK_RATE_UNKNOWN;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count)
+{
+ switch (lane_count) {
+ case LANE_COUNT_ONE:
+ return LANE_COUNT_TWO;
+ case LANE_COUNT_TWO:
+ return LANE_COUNT_FOUR;
+ default:
+ return LANE_COUNT_UNKNOWN;
+ }
+}
+
+static enum dc_link_rate increase_link_rate(struct dc_link *link,
+ enum dc_link_rate link_rate)
+{
+ switch (link_rate) {
+ case LINK_RATE_LOW:
+ return LINK_RATE_HIGH;
+ case LINK_RATE_HIGH:
+ return LINK_RATE_HIGH2;
+ case LINK_RATE_HIGH2:
+ return LINK_RATE_HIGH3;
+ case LINK_RATE_HIGH3:
+ return LINK_RATE_UHBR10;
+ case LINK_RATE_UHBR10:
+ /* upto DP2.x specs UHBR13.5 is the only link rate that could be
+ * not supported by DPRX when higher link rate is supported.
+ * so we treat it as a special case for code simplicity. When we
+ * have new specs with more link rates like this, we should
+ * consider a more generic solution to handle discrete link
+ * rate capabilities.
+ */
+ return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ?
+ LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20;
+ case LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR20;
+ default:
+ return LINK_RATE_UNKNOWN;
+ }
+}
+
+static bool decide_fallback_link_setting_max_bw_policy(
+ struct dc_link *link,
+ const struct dc_link_settings *max,
+ struct dc_link_settings *cur,
+ enum link_training_result training_result)
+{
+ uint8_t cur_idx = 0, next_idx;
+ bool found = false;
+
+ if (training_result == LINK_TRAINING_ABORT)
+ return false;
+
+ while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks))
+ /* find current index */
+ if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count &&
+ dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate)
+ break;
+ else
+ cur_idx++;
+
+ next_idx = cur_idx + 1;
+
+ while (next_idx < ARRAY_SIZE(dp_lt_fallbacks))
+ /* find next index */
+ if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count ||
+ dp_lt_fallbacks[next_idx].link_rate > max->link_rate)
+ next_idx++;
+ else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 &&
+ link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0)
+ /* upto DP2.x specs UHBR13.5 is the only link rate that
+ * could be not supported by DPRX when higher link rate
+ * is supported. so we treat it as a special case for
+ * code simplicity. When we have new specs with more
+ * link rates like this, we should consider a more
+ * generic solution to handle discrete link rate
+ * capabilities.
+ */
+ next_idx++;
+ else
+ break;
+
+ if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) {
+ cur->lane_count = dp_lt_fallbacks[next_idx].lane_count;
+ cur->link_rate = dp_lt_fallbacks[next_idx].link_rate;
+ found = true;
+ }
+
+ return found;
+}
+
+/*
+ * function: set link rate and lane count fallback based
+ * on current link setting and last link training result
+ * return value:
+ * true - link setting could be set
+ * false - has reached minimum setting
+ * and no further fallback could be done
+ */
+bool decide_fallback_link_setting(
+ struct dc_link *link,
+ struct dc_link_settings *max,
+ struct dc_link_settings *cur,
+ enum link_training_result training_result)
+{
+ if (link_dp_get_encoding_format(max) == DP_128b_132b_ENCODING ||
+ link->dc->debug.force_dp2_lt_fallback_method)
+ return decide_fallback_link_setting_max_bw_policy(link, max,
+ cur, training_result);
+
+ switch (training_result) {
+ case LINK_TRAINING_CR_FAIL_LANE0:
+ case LINK_TRAINING_CR_FAIL_LANE1:
+ case LINK_TRAINING_CR_FAIL_LANE23:
+ case LINK_TRAINING_LQA_FAIL:
+ {
+ if (!reached_minimum_link_rate(cur->link_rate)) {
+ cur->link_rate = reduce_link_rate(cur->link_rate);
+ } else if (!reached_minimum_lane_count(cur->lane_count)) {
+ cur->link_rate = max->link_rate;
+ if (training_result == LINK_TRAINING_CR_FAIL_LANE0)
+ return false;
+ else if (training_result == LINK_TRAINING_CR_FAIL_LANE1)
+ cur->lane_count = LANE_COUNT_ONE;
+ else if (training_result == LINK_TRAINING_CR_FAIL_LANE23)
+ cur->lane_count = LANE_COUNT_TWO;
+ else
+ cur->lane_count = reduce_lane_count(cur->lane_count);
+ } else {
+ return false;
+ }
+ break;
+ }
+ case LINK_TRAINING_EQ_FAIL_EQ:
+ case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
+ {
+ if (!reached_minimum_lane_count(cur->lane_count)) {
+ cur->lane_count = reduce_lane_count(cur->lane_count);
+ } else if (!reached_minimum_link_rate(cur->link_rate)) {
+ cur->link_rate = reduce_link_rate(cur->link_rate);
+ /* Reduce max link rate to avoid potential infinite loop.
+ * Needed so that any subsequent CR_FAIL fallback can't
+ * re-set the link rate higher than the link rate from
+ * the latest EQ_FAIL fallback.
+ */
+ max->link_rate = cur->link_rate;
+ cur->lane_count = max->lane_count;
+ } else {
+ return false;
+ }
+ break;
+ }
+ case LINK_TRAINING_EQ_FAIL_CR:
+ {
+ if (!reached_minimum_link_rate(cur->link_rate)) {
+ cur->link_rate = reduce_link_rate(cur->link_rate);
+ /* Reduce max link rate to avoid potential infinite loop.
+ * Needed so that any subsequent CR_FAIL fallback can't
+ * re-set the link rate higher than the link rate from
+ * the latest EQ_FAIL fallback.
+ */
+ max->link_rate = cur->link_rate;
+ cur->lane_count = max->lane_count;
+ } else {
+ return false;
+ }
+ break;
+ }
+ default:
+ return false;
+ }
+ return true;
+}
+static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+{
+ struct dc_link_settings initial_link_setting = {
+ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0};
+ struct dc_link_settings current_link_setting =
+ initial_link_setting;
+ uint32_t link_bw;
+
+ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ link->verified_link_cap.link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(link,
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
+ }
+
+ return false;
+}
+
+bool dc_link_decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+{
+ struct dc_link_settings initial_link_setting;
+ struct dc_link_settings current_link_setting;
+ uint32_t link_bw;
+
+ /*
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
+ link->dpcd_caps.edp_supported_link_rates_count == 0) {
+ *link_setting = link->verified_link_cap;
+ return true;
+ }
+
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = true;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ link->verified_link_cap.link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ } else
+ break;
+ }
+ }
+ return false;
+}
+
+bool decide_edp_link_settings_with_dsc(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint32_t req_bw,
+ enum dc_link_rate max_link_rate)
+{
+ struct dc_link_settings initial_link_setting;
+ struct dc_link_settings current_link_setting;
+ uint32_t link_bw;
+
+ unsigned int policy = 0;
+
+ policy = link->panel_config.dsc.force_dsc_edp_policy;
+ if (max_link_rate == LINK_RATE_UNKNOWN)
+ max_link_rate = link->verified_link_cap.link_rate;
+ /*
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+ if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 ||
+ link->dpcd_caps.edp_supported_link_rates_count == 0)) {
+ /* for DSC enabled case, we search for minimum lane count */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = LINK_RATE_LOW;
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = false;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+ if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
+ return false;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ max_link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+ if (policy) {
+ /* minimize lane */
+ if (current_link_setting.link_rate < max_link_rate) {
+ current_link_setting.link_rate =
+ increase_link_rate(link,
+ current_link_setting.link_rate);
+ } else {
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ current_link_setting.link_rate = initial_link_setting.link_rate;
+ } else
+ break;
+ }
+ } else {
+ /* minimize link rate */
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(link,
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
+ }
+ }
+ return false;
+ }
+
+ /* if optimize edp link is supported */
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = true;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
+ */
+ while (current_link_setting.link_rate <=
+ max_link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
+ if (policy) {
+ /* minimize lane */
+ if (current_link_setting.link_rate_set <
+ link->dpcd_caps.edp_supported_link_rates_count
+ && current_link_setting.link_rate < max_link_rate) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ } else {
+ if (current_link_setting.lane_count < link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ current_link_setting.link_rate_set = initial_link_setting.link_rate_set;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ } else
+ break;
+ }
+ } else {
+ /* minimize link rate */
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ } else
+ break;
+ }
+ }
+ }
+ return false;
+}
+
+static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting)
+{
+ *link_setting = link->verified_link_cap;
+ return true;
+}
+
+bool link_decide_link_settings(struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting)
+{
+ struct dc_link *link = stream->link;
+ uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+
+ memset(link_setting, 0, sizeof(*link_setting));
+
+ /* if preferred is specified through AMDDP, use it, if it's enough
+ * to drive the mode
+ */
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ *link_setting = link->preferred_link_setting;
+ return true;
+ }
+
+ /* MST doesn't perform link training for now
+ * TODO: add MST specific link training routine
+ */
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ decide_mst_link_settings(link, link_setting);
+ } else if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* enable edp link optimization for DSC eDP case */
+ if (stream->timing.flags.DSC) {
+ enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN;
+
+ if (link->panel_config.dsc.force_dsc_edp_policy) {
+ /* calculate link max link rate cap*/
+ struct dc_link_settings tmp_link_setting;
+ struct dc_crtc_timing tmp_timing = stream->timing;
+ uint32_t orig_req_bw;
+
+ tmp_link_setting.link_rate = LINK_RATE_UNKNOWN;
+ tmp_timing.flags.DSC = 0;
+ orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing);
+ dc_link_decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw);
+ max_link_rate = tmp_link_setting.link_rate;
+ }
+ decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate);
+ } else {
+ dc_link_decide_edp_link_settings(link, link_setting, req_bw);
+ }
+ } else {
+ decide_dp_link_settings(link, link_setting, req_bw);
+ }
+
+ return link_setting->lane_count != LANE_COUNT_UNKNOWN &&
+ link_setting->link_rate != LINK_RATE_UNKNOWN;
+}
+
+enum dp_link_encoding link_dp_get_encoding_format(const struct dc_link_settings *link_settings)
+{
+ if ((link_settings->link_rate >= LINK_RATE_LOW) &&
+ (link_settings->link_rate <= LINK_RATE_HIGH3))
+ return DP_8b_10b_ENCODING;
+ else if ((link_settings->link_rate >= LINK_RATE_UHBR10) &&
+ (link_settings->link_rate <= LINK_RATE_UHBR20))
+ return DP_128b_132b_ENCODING;
+ return DP_UNKNOWN_ENCODING;
+}
+
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link)
+{
+ struct dc_link_settings link_settings = {0};
+
+ if (!dc_is_dp_signal(link->connector_signal))
+ return DP_UNKNOWN_ENCODING;
+
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ link_settings = link->preferred_link_setting;
+ } else {
+ decide_mst_link_settings(link, &link_settings);
+ }
+
+ return link_dp_get_encoding_format(&link_settings);
+}
+
+static void read_dp_device_vendor_id(struct dc_link *link)
+{
+ struct dp_device_vendor_id dp_id;
+
+ /* read IEEE branch device id */
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_OUI,
+ (uint8_t *)&dp_id,
+ sizeof(dp_id));
+
+ link->dpcd_caps.branch_dev_id =
+ (dp_id.ieee_oui[0] << 16) +
+ (dp_id.ieee_oui[1] << 8) +
+ dp_id.ieee_oui[2];
+
+ memmove(
+ link->dpcd_caps.branch_dev_name,
+ dp_id.ieee_device_id,
+ sizeof(dp_id.ieee_device_id));
+}
+
+static enum dc_status wake_up_aux_channel(struct dc_link *link)
+{
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ uint32_t aux_channel_retry_cnt = 0;
+ uint8_t dpcd_power_state = '\0';
+
+ while (status != DC_OK && aux_channel_retry_cnt < 10) {
+ status = core_link_read_dpcd(link, DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+
+ /* Delay 1 ms if AUX CH is in power down state. Based on spec
+ * section 2.3.1.2, if AUX CH may be powered down due to
+ * write to DPCD 600h = 2. Sink AUX CH is monitoring differential
+ * signal and may need up to 1 ms before being able to reply.
+ */
+ if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) {
+ udelay(1000);
+ aux_channel_retry_cnt++;
+ }
+ }
+
+ if (status != DC_OK) {
+ dpcd_power_state = DP_SET_POWER_D0;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_power_state,
+ sizeof(dpcd_power_state));
+
+ dpcd_power_state = DP_SET_POWER_D3;
+ status = core_link_write_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_power_state,
+ sizeof(dpcd_power_state));
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ return DC_OK;
+}
+
+static void get_active_converter_info(
+ uint8_t data, struct dc_link *link)
+{
+ union dp_downstream_port_present ds_port = { .byte = data };
+ memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps));
+
+ /* decode converter info*/
+ if (!ds_port.fields.PORT_PRESENT) {
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ set_dongle_type(link->ddc,
+ link->dpcd_caps.dongle_type);
+ link->dpcd_caps.is_branch_dev = false;
+ return;
+ }
+
+ /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
+ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
+
+ switch (ds_port.fields.PORT_TYPE) {
+ case DOWNSTREAM_VGA:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
+ break;
+ case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS:
+ /* At this point we don't know is it DVI or HDMI or DP++,
+ * assume DVI.*/
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
+ break;
+ default:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ break;
+ }
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) {
+ uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/
+ union dwnstream_port_caps_byte0 *port_caps =
+ (union dwnstream_port_caps_byte0 *)det_caps;
+ if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0,
+ det_caps, sizeof(det_caps)) == DC_OK) {
+
+ switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
+ /*Handle DP case as DONGLE_NONE*/
+ case DOWN_STREAM_DETAILED_DP:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ break;
+ case DOWN_STREAM_DETAILED_VGA:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_VGA_CONVERTER;
+ break;
+ case DOWN_STREAM_DETAILED_DVI:
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_DVI_CONVERTER;
+ break;
+ case DOWN_STREAM_DETAILED_HDMI:
+ case DOWN_STREAM_DETAILED_DP_PLUS_PLUS:
+ /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/
+ link->dpcd_caps.dongle_type =
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER;
+
+ link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type;
+ if (ds_port.fields.DETAILED_CAPS) {
+
+ union dwnstream_port_caps_byte3_hdmi
+ hdmi_caps = {.raw = det_caps[3] };
+ union dwnstream_port_caps_byte2
+ hdmi_color_caps = {.raw = det_caps[2] };
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz =
+ det_caps[1] * 2500;
+
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
+ hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
+ /*YCBCR capability only for HDMI case*/
+ if (port_caps->bits.DWN_STRM_PORTX_TYPE
+ == DOWN_STREAM_DETAILED_HDMI) {
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
+ hdmi_caps.bits.YCrCr422_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
+ hdmi_caps.bits.YCrCr420_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
+ hdmi_caps.bits.YCrCr422_CONVERSION;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
+ hdmi_caps.bits.YCrCr420_CONVERSION;
+ }
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
+ translate_dpcd_max_bpc(
+ hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
+
+ if (link->dc->caps.dp_hdmi21_pcon_support) {
+ union hdmi_encoded_link_bw hdmi_encoded_link_bw;
+
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps =
+ dc_link_bw_kbps_from_raw_frl_link_rate_data(
+ hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT);
+
+ // Intersect reported max link bw support with the supported link rate post FRL link training
+ if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS,
+ &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) {
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support(
+ link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps,
+ hdmi_encoded_link_bw);
+ }
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0)
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ }
+
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ }
+
+ break;
+ }
+ }
+ }
+
+ set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
+
+ {
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_REVISION_START,
+ (uint8_t *)&dp_hw_fw_revision,
+ sizeof(dp_hw_fw_revision));
+
+ link->dpcd_caps.branch_hw_revision =
+ dp_hw_fw_revision.ieee_hw_rev;
+
+ memmove(
+ link->dpcd_caps.branch_fw_revision,
+ dp_hw_fw_revision.ieee_fw_rev,
+ sizeof(dp_hw_fw_revision.ieee_fw_rev));
+ }
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
+ union dp_dfp_cap_ext dfp_cap_ext;
+ memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext));
+ core_link_read_dpcd(
+ link,
+ DP_DFP_CAPABILITY_EXTENSION_SUPPORT,
+ dfp_cap_ext.raw,
+ sizeof(dfp_cap_ext.raw));
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps =
+ dfp_cap_ext.fields.max_pixel_rate_in_mps[0] +
+ (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width =
+ dfp_cap_ext.fields.max_video_h_active_width[0] +
+ (dfp_cap_ext.fields.max_video_h_active_width[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height =
+ dfp_cap_ext.fields.max_video_v_active_height[0] +
+ (dfp_cap_ext.fields.max_video_v_active_height[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps =
+ dfp_cap_ext.fields.encoding_format_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps =
+ dfp_cap_ext.fields.rgb_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr444_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr422_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr420_color_depth_caps;
+ DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index);
+ DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false");
+ DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps);
+ DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width);
+ DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height);
+ }
+}
+
+static void apply_usbc_combo_phy_reset_wa(struct dc_link *link,
+ struct dc_link_settings *link_settings)
+{
+ /* Temporary Renoir-specific workaround PHY will sometimes be in bad
+ * state on hotplugging display from certain USB-C dongle, so add extra
+ * cycle of enabling and disabling the PHY before first link training.
+ */
+ struct link_resource link_res = {0};
+ enum clock_source_id dp_cs_id = get_clock_source_id(link);
+
+ dp_enable_link_phy(link, &link_res, link->connector_signal,
+ dp_cs_id, link_settings);
+ dp_disable_link_phy(link, &link_res, link->connector_signal);
+}
+
+static bool dp_overwrite_extended_receiver_cap(struct dc_link *link)
+{
+ uint8_t dpcd_data[16];
+ uint32_t read_dpcd_retry_cnt = 3;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ union dp_downstream_port_present ds_port = { 0 };
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+
+ int i;
+
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.dpcd_rev.raw =
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+ return false;
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ get_active_converter_info(ds_port.byte, link);
+
+ down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
+ DP_DPCD_REV];
+
+ link->dpcd_caps.allow_invalid_MSA_timing_param =
+ down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+ link->dpcd_caps.max_ln_count.raw = dpcd_data[
+ DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+ link->dpcd_caps.max_down_spread.raw = dpcd_data[
+ DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+ link->reported_link_cap.lane_count =
+ link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+ link->reported_link_cap.link_rate = dpcd_data[
+ DP_MAX_LINK_RATE - DP_DPCD_REV];
+ link->reported_link_cap.link_spread =
+ link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+ edp_config_cap.raw = dpcd_data[
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+ link->dpcd_caps.panel_mode_edp =
+ edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+ link->dpcd_caps.dpcd_display_control_capable =
+ edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+
+ return true;
+}
+
+void dc_link_overwrite_extended_receiver_cap(
+ struct dc_link *link)
+{
+ dp_overwrite_extended_receiver_cap(link);
+}
+
+void dpcd_set_source_specific_data(struct dc_link *link)
+{
+ if (!link->dc->vendor_signature.is_valid) {
+ enum dc_status result_write_min_hblank = DC_NOT_SUPPORTED;
+ struct dpcd_amd_signature amd_signature = {0};
+ struct dpcd_amd_device_id amd_device_id = {0};
+
+ amd_device_id.device_id_byte1 =
+ (uint8_t)(link->ctx->asic_id.chip_id);
+ amd_device_id.device_id_byte2 =
+ (uint8_t)(link->ctx->asic_id.chip_id >> 8);
+ amd_device_id.dce_version =
+ (uint8_t)(link->ctx->dce_version);
+ amd_device_id.dal_version_byte1 = 0x0; // needed? where to get?
+ amd_device_id.dal_version_byte2 = 0x0; // needed? where to get?
+
+ core_link_read_dpcd(link, DP_SOURCE_OUI,
+ (uint8_t *)(&amd_signature),
+ sizeof(amd_signature));
+
+ if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) &&
+ (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) &&
+ (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) {
+
+ amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ (uint8_t *)(&amd_signature),
+ sizeof(amd_signature));
+ }
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI+0x03,
+ (uint8_t *)(&amd_device_id),
+ sizeof(amd_device_id));
+
+ if (link->ctx->dce_version >= DCN_VERSION_2_0 &&
+ link->dc->caps.min_horizontal_blanking_period != 0) {
+
+ uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period;
+
+ result_write_min_hblank = core_link_write_dpcd(link,
+ DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size),
+ sizeof(hblank_size));
+ }
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION,
+ WPP_BIT_FLAG_DC_DETECTION_DP_CAPS,
+ "result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'",
+ result_write_min_hblank,
+ link->link_index,
+ link->ctx->dce_version,
+ DP_SOURCE_MINIMUM_HBLANK_SUPPORTED,
+ link->dc->caps.min_horizontal_blanking_period,
+ link->dpcd_caps.branch_dev_id,
+ link->dpcd_caps.branch_dev_name[0],
+ link->dpcd_caps.branch_dev_name[1],
+ link->dpcd_caps.branch_dev_name[2],
+ link->dpcd_caps.branch_dev_name[3],
+ link->dpcd_caps.branch_dev_name[4],
+ link->dpcd_caps.branch_dev_name[5]);
+ } else {
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ link->dc->vendor_signature.data.raw,
+ sizeof(link->dc->vendor_signature.data.raw));
+ }
+}
+
+void dpcd_write_cable_id_to_dprx(struct dc_link *link)
+{
+ if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED ||
+ link->dpcd_caps.cable_id.raw == 0 ||
+ link->dprx_states.cable_id_written)
+ return;
+
+ core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX,
+ &link->dpcd_caps.cable_id.raw,
+ sizeof(link->dpcd_caps.cable_id.raw));
+
+ link->dprx_states.cable_id_written = 1;
+}
+
+static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!link->ctx->dmub_srv ||
+ link->ep_type != DISPLAY_ENDPOINT_PHY ||
+ link->link_enc->features.flags.bits.DP_IS_USB_C == 0)
+ return false;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID;
+ cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data);
+ cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx(
+ link->dc, link->link_enc->transmitter);
+ if (dc_dmub_srv_cmd_with_reply_data(link->ctx->dmub_srv, &cmd) &&
+ cmd.cable_id.header.ret_status == 1) {
+ cable_id->raw = cmd.cable_id.data.output_raw;
+ DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw);
+ }
+ return cmd.cable_id.header.ret_status == 1;
+}
+
+static void retrieve_cable_id(struct dc_link *link)
+{
+ union dp_cable_id usbc_cable_id;
+
+ link->dpcd_caps.cable_id.raw = 0;
+ core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX,
+ &link->dpcd_caps.cable_id.raw, sizeof(uint8_t));
+
+ if (get_usbc_cable_id(link, &usbc_cable_id))
+ link->dpcd_caps.cable_id = intersect_cable_id(
+ &link->dpcd_caps.cable_id, &usbc_cable_id);
+}
+
+bool read_is_mst_supported(struct dc_link *link)
+{
+ bool mst = false;
+ enum dc_status st = DC_OK;
+ union dpcd_rev rev;
+ union mstm_cap cap;
+
+ if (link->preferred_training_settings.mst_enable &&
+ *link->preferred_training_settings.mst_enable == false) {
+ return false;
+ }
+
+ rev.raw = 0;
+ cap.raw = 0;
+
+ st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw,
+ sizeof(rev));
+
+ if (st == DC_OK && rev.raw >= DPCD_REV_12) {
+
+ st = core_link_read_dpcd(link, DP_MSTM_CAP,
+ &cap.raw, sizeof(cap));
+ if (st == DC_OK && cap.bits.MST_CAP == 1)
+ mst = true;
+ }
+ return mst;
+
+}
+
+/* Read additional sink caps defined in source specific DPCD area
+ * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP)
+ * TODO: Add FS caps and read from DP_SOURCE_SINK_FS_CAP as well
+ */
+static bool dpcd_read_sink_ext_caps(struct dc_link *link)
+{
+ uint8_t dpcd_data;
+
+ if (!link)
+ return false;
+
+ if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK)
+ return false;
+
+ link->dpcd_sink_ext_caps.raw = dpcd_data;
+ return true;
+}
+
+enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link)
+{
+ uint8_t lttpr_dpcd_data[8];
+ enum dc_status status;
+ bool is_lttpr_present;
+
+ /* Logic to determine LTTPR support*/
+ bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
+
+ if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support)
+ return DC_NOT_SUPPORTED;
+
+ /* By reading LTTPR capability, RX assumes that we will enable
+ * LTTPR extended aux timeout if LTTPR is present.
+ */
+ status = core_link_read_dpcd(
+ link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ lttpr_dpcd_data,
+ sizeof(lttpr_dpcd_data));
+
+ link->dpcd_caps.lttpr_caps.revision.raw =
+ lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_link_rate =
+ lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt =
+ lttpr_dpcd_data[DP_PHY_REPEATER_CNT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_lane_count =
+ lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.mode =
+ lttpr_dpcd_data[DP_PHY_REPEATER_MODE -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.max_ext_timeout =
+ lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ /* If this chip cap is set, at least one retimer must exist in the chain
+ * Override count to 1 if we receive a known bad count (0 or an invalid value) */
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) {
+ ASSERT(0);
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80;
+ DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ }
+
+ /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
+ is_lttpr_present = dp_is_lttpr_present(link);
+
+ if (is_lttpr_present)
+ CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: ");
+
+ DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present);
+ return status;
+}
+
+static bool retrieve_link_cap(struct dc_link *link)
+{
+ /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
+ * which means size 16 will be good for both of those DPCD register block reads
+ */
+ uint8_t dpcd_data[16];
+ /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
+ */
+ uint8_t dpcd_dprx_data = '\0';
+
+ struct dp_device_vendor_id sink_id;
+ union down_stream_port_count down_strm_port_count;
+ union edp_configuration_cap edp_config_cap;
+ union dp_downstream_port_present ds_port = { 0 };
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ uint32_t read_dpcd_retry_cnt = 3;
+ int i;
+ struct dp_sink_hw_fw_revision dp_hw_fw_revision;
+ const uint32_t post_oui_delay = 30; // 30ms
+
+ memset(dpcd_data, '\0', sizeof(dpcd_data));
+ memset(&down_strm_port_count,
+ '\0', sizeof(union down_stream_port_count));
+ memset(&edp_config_cap, '\0',
+ sizeof(union edp_configuration_cap));
+
+ /* if extended timeout is supported in hardware,
+ * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+ * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+ */
+ try_to_configure_aux_timeout(link->ddc,
+ LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+
+ status = dp_retrieve_lttpr_cap(link);
+
+ if (status != DC_OK) {
+ status = wake_up_aux_channel(link);
+ if (status == DC_OK)
+ dp_retrieve_lttpr_cap(link);
+ else
+ return false;
+ }
+
+ if (dp_is_lttpr_present(link))
+ configure_lttpr_mode_transparent(link);
+
+ /* Read DP tunneling information. */
+ status = dpcd_get_tunneling_device_data(link);
+
+ dpcd_set_source_specific_data(link);
+ /* Sink may need to configure internals based on vendor, so allow some
+ * time before proceeding with possibly vendor specific transactions
+ */
+ msleep(post_oui_delay);
+
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ dpcd_data,
+ sizeof(dpcd_data));
+ if (status == DC_OK)
+ break;
+ }
+
+
+ if (status != DC_OK) {
+ dm_error("%s: Read receiver caps dpcd data failed.\n", __func__);
+ return false;
+ }
+
+ if (!dp_is_lttpr_present(link))
+ try_to_configure_aux_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+
+
+ {
+ union training_aux_rd_interval aux_rd_interval;
+
+ aux_rd_interval.raw =
+ dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
+
+ link->dpcd_caps.ext_receiver_cap_field_present =
+ aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1;
+
+ if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
+ uint8_t ext_cap_data[16];
+
+ memset(ext_cap_data, '\0', sizeof(ext_cap_data));
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DP13_DPCD_REV,
+ ext_cap_data,
+ sizeof(ext_cap_data));
+ if (status == DC_OK) {
+ memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data));
+ break;
+ }
+ }
+ if (status != DC_OK)
+ dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__);
+ }
+ }
+
+ link->dpcd_caps.dpcd_rev.raw =
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ if (link->dpcd_caps.ext_receiver_cap_field_present) {
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dpcd_dprx_data,
+ sizeof(dpcd_dprx_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
+
+ if (status != DC_OK)
+ dm_error("%s: Read DPRX caps data failed.\n", __func__);
+
+ /* AdaptiveSyncCapability */
+ dpcd_dprx_data = 0;
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1,
+ &dpcd_dprx_data, sizeof(dpcd_dprx_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.adaptive_sync_caps.dp_adap_sync_caps.raw = dpcd_dprx_data;
+
+ if (status != DC_OK)
+ dm_error("%s: Read DPRX caps data failed. Addr:%#x\n",
+ __func__, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1);
+ }
+
+ else {
+ link->dpcd_caps.dprx_feature.raw = 0;
+ }
+
+
+ /* Error condition checking...
+ * It is impossible for Sink to report Max Lane Count = 0.
+ * It is possible for Sink to report Max Link Rate = 0, if it is
+ * an eDP device that is reporting specialized link rates in the
+ * SUPPORTED_LINK_RATE table.
+ */
+ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+ return false;
+
+ ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
+ DP_DPCD_REV];
+
+ read_dp_device_vendor_id(link);
+
+ /* TODO - decouple raw mst capability from policy decision */
+ link->dpcd_caps.is_mst_capable = read_is_mst_supported(link);
+
+ get_active_converter_info(ds_port.byte, link);
+
+ dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
+
+ down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT -
+ DP_DPCD_REV];
+
+ link->dpcd_caps.allow_invalid_MSA_timing_param =
+ down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+ link->dpcd_caps.max_ln_count.raw = dpcd_data[
+ DP_MAX_LANE_COUNT - DP_DPCD_REV];
+
+ link->dpcd_caps.max_down_spread.raw = dpcd_data[
+ DP_MAX_DOWNSPREAD - DP_DPCD_REV];
+
+ link->reported_link_cap.lane_count =
+ link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+ link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw(
+ dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]);
+ link->reported_link_cap.link_spread =
+ link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+ edp_config_cap.raw = dpcd_data[
+ DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV];
+ link->dpcd_caps.panel_mode_edp =
+ edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+ link->dpcd_caps.dpcd_display_control_capable =
+ edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE;
+ link->dpcd_caps.channel_coding_cap.raw =
+ dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV];
+ link->test_pattern_enabled = false;
+ link->compliance_test_state.raw = 0;
+
+ /* read sink count */
+ core_link_read_dpcd(link,
+ DP_SINK_COUNT,
+ &link->dpcd_caps.sink_count.raw,
+ sizeof(link->dpcd_caps.sink_count.raw));
+
+ /* read sink ieee oui */
+ core_link_read_dpcd(link,
+ DP_SINK_OUI,
+ (uint8_t *)(&sink_id),
+ sizeof(sink_id));
+
+ link->dpcd_caps.sink_dev_id =
+ (sink_id.ieee_oui[0] << 16) +
+ (sink_id.ieee_oui[1] << 8) +
+ (sink_id.ieee_oui[2]);
+
+ memmove(
+ link->dpcd_caps.sink_dev_id_str,
+ sink_id.ieee_device_id,
+ sizeof(sink_id.ieee_device_id));
+
+ core_link_read_dpcd(
+ link,
+ DP_SINK_HW_REVISION_START,
+ (uint8_t *)&dp_hw_fw_revision,
+ sizeof(dp_hw_fw_revision));
+
+ link->dpcd_caps.sink_hw_revision =
+ dp_hw_fw_revision.ieee_hw_rev;
+
+ memmove(
+ link->dpcd_caps.sink_fw_revision,
+ dp_hw_fw_revision.ieee_fw_rev,
+ sizeof(dp_hw_fw_revision.ieee_fw_rev));
+
+ /* Quirk for Retina panels: wrong DP_MAX_LINK_RATE */
+ {
+ uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
+ uint8_t fwrev_mbp_2018[] = { 7, 4 };
+ uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
+
+ /* We also check for the firmware revision as 16,1 models have an
+ * identical device id and are incorrectly quirked otherwise.
+ */
+ if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
+ !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
+ sizeof(str_mbp_2018)) &&
+ (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
+ sizeof(fwrev_mbp_2018)) ||
+ !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
+ sizeof(fwrev_mbp_2018_vega)))) {
+ link->reported_link_cap.link_rate = LINK_RATE_RBR2;
+ }
+ }
+
+ memset(&link->dpcd_caps.dsc_caps, '\0',
+ sizeof(link->dpcd_caps.dsc_caps));
+ memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
+ /* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) {
+ status = core_link_read_dpcd(
+ link,
+ DP_FEC_CAPABILITY,
+ &link->dpcd_caps.fec_cap.raw,
+ sizeof(link->dpcd_caps.fec_cap.raw));
+ status = core_link_read_dpcd(
+ link,
+ DP_DSC_SUPPORT,
+ link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw));
+ if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
+ DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index);
+ DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0);
+ DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1);
+ DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH);
+ }
+
+ /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode
+ * only if required.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around &&
+ link->dpcd_caps.is_branch_dev &&
+ link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
+ link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 &&
+ (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE ||
+ link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) {
+ /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA.
+ * Clear FEC and DSC capabilities as a work around if that is not the case.
+ */
+ link->wa_flags.dpia_forced_tbt3_mode = true;
+ memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps));
+ memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
+ DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index);
+ } else
+ link->wa_flags.dpia_forced_tbt3_mode = false;
+ }
+
+ if (!dpcd_read_sink_ext_caps(link))
+ link->dpcd_sink_ext_caps.raw = 0;
+
+ if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index);
+
+ core_link_read_dpcd(link,
+ DP_128B132B_SUPPORTED_LINK_RATES,
+ &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw,
+ sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw));
+ if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR10;
+ else
+ dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__);
+ DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index);
+ DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz",
+ link->reported_link_cap.link_rate / 100,
+ link->reported_link_cap.link_rate % 100);
+
+ core_link_read_dpcd(link,
+ DP_SINK_VIDEO_FALLBACK_FORMATS,
+ &link->dpcd_caps.fallback_formats.raw,
+ sizeof(link->dpcd_caps.fallback_formats.raw));
+ DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index);
+ if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.raw == 0) {
+ DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported");
+ link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1;
+ }
+
+ core_link_read_dpcd(link,
+ DP_FEC_CAPABILITY_1,
+ &link->dpcd_caps.fec_cap1.raw,
+ sizeof(link->dpcd_caps.fec_cap1.raw));
+ DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index);
+ if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE)
+ DC_LOG_DP2("\tFEC aggregated error counters are supported");
+ }
+
+ retrieve_cable_id(link);
+ dpcd_write_cable_id_to_dprx(link);
+
+ /* Connectivity log: detection */
+ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
+
+ return true;
+}
+
+bool detect_dp_sink_caps(struct dc_link *link)
+{
+ return retrieve_link_cap(link);
+}
+
+void detect_edp_sink_caps(struct dc_link *link)
+{
+ uint8_t supported_link_rates[16];
+ uint32_t entry;
+ uint32_t link_rate_in_khz;
+ enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
+ uint8_t backlight_adj_cap;
+ uint8_t general_edp_cap;
+
+ retrieve_link_cap(link);
+ link->dpcd_caps.edp_supported_link_rates_count = 0;
+ memset(supported_link_rates, 0, sizeof(supported_link_rates));
+
+ /*
+ * edp_supported_link_rates_count is only valid for eDP v1.4 or higher.
+ * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h"
+ */
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
+ (link->panel_config.ilr.optimize_edp_link_rate ||
+ link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN)) {
+ // Read DPCD 00010h - 0001Fh 16 bytes at one shot
+ core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
+ supported_link_rates, sizeof(supported_link_rates));
+
+ for (entry = 0; entry < 16; entry += 2) {
+ // DPCD register reports per-lane link rate = 16-bit link rate capability
+ // value X 200 kHz. Need multiplier to find link rate in kHz.
+ link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
+ supported_link_rates[entry]) * 200;
+
+ if (link_rate_in_khz != 0) {
+ link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
+ link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
+ link->dpcd_caps.edp_supported_link_rates_count++;
+
+ if (link->reported_link_cap.link_rate < link_rate)
+ link->reported_link_cap.link_rate = link_rate;
+ }
+ }
+ }
+ core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP,
+ &backlight_adj_cap, sizeof(backlight_adj_cap));
+
+ link->dpcd_caps.dynamic_backlight_capable_edp =
+ (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false;
+
+ core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1,
+ &general_edp_cap, sizeof(general_edp_cap));
+
+ link->dpcd_caps.set_power_state_capable_edp =
+ (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false;
+
+ set_default_brightness_aux(link);
+
+ core_link_read_dpcd(link, DP_EDP_DPCD_REV,
+ &link->dpcd_caps.edp_rev,
+ sizeof(link->dpcd_caps.edp_rev));
+ /*
+ * PSR is only valid for eDP v1.3 or higher.
+ */
+ if (link->dpcd_caps.edp_rev >= DP_EDP_13) {
+ core_link_read_dpcd(link, DP_PSR_SUPPORT,
+ &link->dpcd_caps.psr_info.psr_version,
+ sizeof(link->dpcd_caps.psr_info.psr_version));
+ if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8)
+ core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY,
+ &link->dpcd_caps.psr_info.force_psrsu_cap,
+ sizeof(link->dpcd_caps.psr_info.force_psrsu_cap));
+ core_link_read_dpcd(link, DP_PSR_CAPS,
+ &link->dpcd_caps.psr_info.psr_dpcd_caps.raw,
+ sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw));
+ if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) {
+ core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY,
+ &link->dpcd_caps.psr_info.psr2_su_y_granularity_cap,
+ sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap));
+ }
+ }
+
+ /*
+ * ALPM is only valid for eDP v1.4 or higher.
+ */
+ if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14)
+ core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP,
+ &link->dpcd_caps.alpm_caps.raw,
+ sizeof(link->dpcd_caps.alpm_caps.raw));
+}
+
+bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
+{
+ struct link_encoder *link_enc = NULL;
+
+ if (!max_link_enc_cap) {
+ DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
+ return false;
+ }
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ if (link_enc && link_enc->funcs->get_max_link_cap) {
+ link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap);
+ return true;
+ }
+
+ DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__);
+ max_link_enc_cap->lane_count = 1;
+ max_link_enc_cap->link_rate = 6;
+ return false;
+}
+
+const struct dc_link_settings *dc_link_get_link_cap(
+ const struct dc_link *link)
+{
+ if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
+ return &link->preferred_link_setting;
+ return &link->verified_link_cap;
+}
+
+struct dc_link_settings dp_get_max_link_cap(struct dc_link *link)
+{
+ struct dc_link_settings max_link_cap = {0};
+ enum dc_link_rate lttpr_max_link_rate;
+ enum dc_link_rate cable_max_link_rate;
+ struct link_encoder *link_enc = NULL;
+
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ /* get max link encoder capability */
+ if (link_enc)
+ link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
+
+ /* Lower link settings based on sink's link cap */
+ if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count =
+ link->reported_link_cap.lane_count;
+ if (link->reported_link_cap.link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate =
+ link->reported_link_cap.link_rate;
+ if (link->reported_link_cap.link_spread <
+ max_link_cap.link_spread)
+ max_link_cap.link_spread =
+ link->reported_link_cap.link_spread;
+
+ /* Lower link settings based on cable attributes
+ * Cable ID is a DP2 feature to identify max certified link rate that
+ * a cable can carry. The cable identification method requires both
+ * cable and display hardware support. Since the specs comes late, it is
+ * anticipated that the first round of DP2 cables and displays may not
+ * be fully compatible to reliably return cable ID data. Therefore the
+ * decision of our cable id policy is that if the cable can return non
+ * zero cable id data, we will take cable's link rate capability into
+ * account. However if we get zero data, the cable link rate capability
+ * is considered inconclusive. In this case, we will not take cable's
+ * capability into account to avoid of over limiting hardware capability
+ * from users. The max overall link rate capability is still determined
+ * after actual dp pre-training. Cable id is considered as an auxiliary
+ * method of determining max link bandwidth capability.
+ */
+ cable_max_link_rate = get_cable_max_link_rate(link);
+
+ if (!link->dc->debug.ignore_cable_id &&
+ cable_max_link_rate != LINK_RATE_UNKNOWN &&
+ cable_max_link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate = cable_max_link_rate;
+
+ /* account for lttpr repeaters cap
+ * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3).
+ */
+ if (dp_is_lttpr_present(link)) {
+ if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
+ max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+ lttpr_max_link_rate = get_lttpr_max_link_rate(link);
+
+ if (lttpr_max_link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate = lttpr_max_link_rate;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n",
+ __func__,
+ max_link_cap.lane_count,
+ max_link_cap.link_rate);
+ }
+
+ if (link_dp_get_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING &&
+ link->dc->debug.disable_uhbr)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ return max_link_cap;
+}
+
+static bool dp_verify_link_cap(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting,
+ int *fail_count)
+{
+ struct dc_link_settings cur_link_settings = {0};
+ struct dc_link_settings max_link_settings = *known_limit_link_setting;
+ bool success = false;
+ bool skip_video_pattern;
+ enum clock_source_id dp_cs_id = get_clock_source_id(link);
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ union hpd_irq_data irq_data;
+ struct link_resource link_res;
+
+ memset(&irq_data, 0, sizeof(irq_data));
+ cur_link_settings = max_link_settings;
+
+ /* Grant extended timeout request */
+ if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) {
+ uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
+
+ core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
+ }
+
+ do {
+ if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings))
+ continue;
+
+ skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW;
+ dp_enable_link_phy(
+ link,
+ &link_res,
+ link->connector_signal,
+ dp_cs_id,
+ &cur_link_settings);
+
+ status = dp_perform_link_training(
+ link,
+ &link_res,
+ &cur_link_settings,
+ skip_video_pattern);
+
+ if (status == LINK_TRAINING_SUCCESS) {
+ success = true;
+ udelay(1000);
+ if (dc_link_dp_read_hpd_rx_irq_data(link, &irq_data) == DC_OK &&
+ dc_link_check_link_loss_status(
+ link,
+ &irq_data))
+ (*fail_count)++;
+
+ } else {
+ (*fail_count)++;
+ }
+ dp_trace_lt_total_count_increment(link, true);
+ dp_trace_lt_result_update(link, status, true);
+ dp_disable_link_phy(link, &link_res, link->connector_signal);
+ } while (!success && decide_fallback_link_setting(link,
+ &max_link_settings, &cur_link_settings, status));
+
+ link->verified_link_cap = success ?
+ cur_link_settings : fail_safe_link_settings;
+ return success;
+}
+
+bool dp_verify_link_cap_with_retries(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting,
+ int attempts)
+{
+ int i = 0;
+ bool success = false;
+ int fail_count = 0;
+
+ dp_trace_detect_lt_init(link);
+
+ if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
+ link->dc->debug.usbc_combo_phy_reset_wa)
+ apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting);
+
+ dp_trace_set_lt_start_timestamp(link, false);
+ for (i = 0; i < attempts; i++) {
+ enum dc_connection_type type = dc_connection_none;
+
+ memset(&link->verified_link_cap, 0,
+ sizeof(struct dc_link_settings));
+ if (!dc_link_detect_connection_type(link, &type) || type == dc_connection_none) {
+ link->verified_link_cap = fail_safe_link_settings;
+ break;
+ } else if (dp_verify_link_cap(link, known_limit_link_setting,
+ &fail_count) && fail_count == 0) {
+ success = true;
+ break;
+ }
+ msleep(10);
+ }
+
+ dp_trace_lt_fail_count_update(link, fail_count, true);
+ dp_trace_set_lt_end_timestamp(link, true);
+
+ return success;
+}
+
+/**
+ * dc_link_is_dp_sink_present() - Check if there is a native DP
+ * or passive DP-HDMI dongle connected
+ */
+bool dc_link_is_dp_sink_present(struct dc_link *link)
+{
+ enum gpio_result gpio_result;
+ uint32_t clock_pin = 0;
+ uint8_t retry = 0;
+ struct ddc *ddc;
+
+ enum connector_id connector_id =
+ dal_graphics_object_id_get_connector_id(link->link_id);
+
+ bool present =
+ ((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
+ (connector_id == CONNECTOR_ID_EDP) ||
+ (connector_id == CONNECTOR_ID_USBC));
+
+ ddc = get_ddc_pin(link->ddc);
+
+ if (!ddc) {
+ BREAK_TO_DEBUGGER();
+ return present;
+ }
+
+ /* Open GPIO and set it to I2C mode */
+ /* Note: this GpioMode_Input will be converted
+ * to GpioConfigType_I2cAuxDualMode in GPIO component,
+ * which indicates we need additional delay
+ */
+
+ if (dal_ddc_open(ddc, GPIO_MODE_INPUT,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) {
+ dal_ddc_close(ddc);
+
+ return present;
+ }
+
+ /*
+ * Read GPIO: DP sink is present if both clock and data pins are zero
+ *
+ * [W/A] plug-unplug DP cable, sometimes customer board has
+ * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
+ * then monitor can't br light up. Add retry 3 times
+ * But in real passive dongle, it need additional 3ms to detect
+ */
+ do {
+ gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+ ASSERT(gpio_result == GPIO_RESULT_OK);
+ if (clock_pin)
+ udelay(1000);
+ else
+ break;
+ } while (retry++ < 3);
+
+ present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
+
+ dal_ddc_close(ddc);
+
+ return present;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
new file mode 100644
index 000000000000..f79e4a4a9db6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DP_CAPABILITY_H__
+#define __DC_LINK_DP_CAPABILITY_H__
+
+#include "link.h"
+
+bool detect_dp_sink_caps(struct dc_link *link);
+
+void detect_edp_sink_caps(struct dc_link *link);
+
+struct dc_link_settings dp_get_max_link_cap(struct dc_link *link);
+
+
+enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link);
+
+/* Convert PHY repeater count read from DPCD uint8_t. */
+uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count);
+
+bool dp_is_lttpr_present(struct dc_link *link);
+
+bool is_dp_active_dongle(const struct dc_link *link);
+
+bool is_dp_branch_device(const struct dc_link *link);
+
+void dpcd_write_cable_id_to_dprx(struct dc_link *link);
+
+/* Initialize output parameter lt_settings. */
+void dp_decide_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ struct link_training_settings *lt_settings);
+
+
+bool decide_edp_link_settings_with_dsc(struct dc_link *link,
+ struct dc_link_settings *link_setting,
+ uint32_t req_bw,
+ enum dc_link_rate max_link_rate);
+
+void dpcd_set_source_specific_data(struct dc_link *link);
+
+/*query dpcd for version and mst cap addresses*/
+bool read_is_mst_supported(struct dc_link *link);
+
+bool decide_fallback_link_setting(
+ struct dc_link *link,
+ struct dc_link_settings *max,
+ struct dc_link_settings *cur,
+ enum link_training_result training_result);
+
+bool dp_verify_link_cap_with_retries(
+ struct dc_link *link,
+ struct dc_link_settings *known_limit_link_setting,
+ int attempts);
+
+#endif /* __DC_LINK_DP_CAPABILITY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
new file mode 100644
index 000000000000..32f48a48e9dd
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc.h"
+#include "inc/core_status.h"
+#include "dc_link.h"
+#include "dpcd_defs.h"
+
+#include "link_dp_dpia.h"
+#include "link_hwss.h"
+#include "dm_helpers.h"
+#include "dmub/inc/dmub_cmd.h"
+#include "link_dpcd.h"
+#include "link_dp_training.h"
+#include "dc_dmub_srv.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */
+/* DPCD DP Tunneling over USB4 */
+#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d
+#define DP_IN_ADAPTER_INFO 0xe000e
+#define DP_USB4_DRIVER_ID 0xe000f
+#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b
+
+enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
+{
+ enum dc_status status = DC_OK;
+ uint8_t dpcd_dp_tun_data[3] = {0};
+ uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0};
+ uint8_t i = 0;
+
+ status = core_link_read_dpcd(
+ link,
+ DP_TUNNELING_CAPABILITIES_SUPPORT,
+ dpcd_dp_tun_data,
+ sizeof(dpcd_dp_tun_data));
+
+ status = core_link_read_dpcd(
+ link,
+ DP_USB4_ROUTER_TOPOLOGY_ID,
+ dpcd_topology_data,
+ sizeof(dpcd_topology_data));
+
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
+ dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
+ dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
+ link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
+
+ return status;
+}
+
+bool dc_link_dpia_query_hpd_status(struct dc_link *link)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
+ bool is_hpd_high = false;
+
+ /* prepare QUERY_HPD command */
+ cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
+ cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
+ cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
+
+ /* Return HPD status reported by DMUB if query successfully executed. */
+ if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
+ is_hpd_high = cmd.query_hpd.data.result;
+
+ DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
+ __func__,
+ link->link_index,
+ link->link_id.enum_id - ENUM_ID_1,
+ cmd.query_hpd.data.status,
+ cmd.query_hpd.data.result);
+
+ return is_hpd_high;
+}
+
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
index 84204ec1b046..98935cc10bb7 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h
@@ -1,9 +1,6 @@
-/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
- * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
- */
+/* SPDX-License-Identifier: MIT */
/*
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
+ * Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -12,36 +9,35 @@
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
+ * Authors: AMD
+ *
*/
-#ifndef __TDFX_H__
-#define __TDFX_H__
+#ifndef __DC_LINK_DPIA_H__
+#define __DC_LINK_DPIA_H__
-/* General customization:
- */
+#include "link.h"
-#define DRIVER_AUTHOR "VA Linux Systems Inc."
+/* Read tunneling device capability from DPCD and update link capability
+ * accordingly.
+ */
+enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
-#define DRIVER_NAME "tdfx"
-#define DRIVER_DESC "3dfx Banshee/Voodoo3+"
-#define DRIVER_DATE "20010216"
+/* Query hot plug status of USB4 DP tunnel.
+ * Returns true if HPD high.
+ */
+bool dc_link_dpia_query_hpd_status(struct dc_link *link);
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 0
-#endif
+#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
new file mode 100644
index 000000000000..f69e681b3b5b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
@@ -0,0 +1,441 @@
+
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+/*********************************************************************/
+// USB4 DPIA BANDWIDTH ALLOCATION LOGIC
+/*********************************************************************/
+#include "dc.h"
+#include "dc_link.h"
+#include "link_dp_dpia_bw.h"
+#include "drm_dp_helper_dc.h"
+#include "link_dpcd.h"
+
+#define Kbps_TO_Gbps (1000 * 1000)
+
+// ------------------------------------------------------------------
+// PRIVATE FUNCTIONS
+// ------------------------------------------------------------------
+/*
+ * Always Check the following:
+ * - Is it USB4 link?
+ * - Is HPD HIGH?
+ * - Is BW Allocation Support Mode enabled on DP-Tx?
+ */
+static bool get_bw_alloc_proceed_flag(struct dc_link *tmp)
+{
+ return (tmp && DISPLAY_ENDPOINT_USB4_DPIA == tmp->ep_type
+ && tmp->hpd_status
+ && tmp->dpia_bw_alloc_config.bw_alloc_enabled);
+}
+static void reset_bw_alloc_struct(struct dc_link *link)
+{
+ link->dpia_bw_alloc_config.bw_alloc_enabled = false;
+ link->dpia_bw_alloc_config.sink_verified_bw = 0;
+ link->dpia_bw_alloc_config.sink_max_bw = 0;
+ link->dpia_bw_alloc_config.estimated_bw = 0;
+ link->dpia_bw_alloc_config.bw_granularity = 0;
+ link->dpia_bw_alloc_config.response_ready = false;
+}
+static uint8_t get_bw_granularity(struct dc_link *link)
+{
+ uint8_t bw_granularity = 0;
+
+ core_link_read_dpcd(
+ link,
+ DP_BW_GRANULALITY,
+ &bw_granularity,
+ sizeof(uint8_t));
+
+ switch (bw_granularity & 0x3) {
+ case 0:
+ bw_granularity = 4;
+ break;
+ case 1:
+ default:
+ bw_granularity = 2;
+ break;
+ }
+
+ return bw_granularity;
+}
+static int get_estimated_bw(struct dc_link *link)
+{
+ uint8_t bw_estimated_bw = 0;
+
+ if (core_link_read_dpcd(
+ link,
+ ESTIMATED_BW,
+ &bw_estimated_bw,
+ sizeof(uint8_t)) != DC_OK)
+ dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, ESTIMATED_BW);
+
+ return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+}
+static bool allocate_usb4_bw(int *stream_allocated_bw, int bw_needed, struct dc_link *link)
+{
+ if (bw_needed > 0)
+ *stream_allocated_bw += bw_needed;
+
+ return true;
+}
+static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, struct dc_link *link)
+{
+ bool ret = false;
+
+ if (*stream_allocated_bw > 0) {
+ *stream_allocated_bw -= bw_to_dealloc;
+ ret = true;
+ } else {
+ //Do nothing for now
+ ret = true;
+ }
+
+ // Unplug so reset values
+ if (!link->hpd_status)
+ reset_bw_alloc_struct(link);
+
+ return ret;
+}
+/*
+ * Read all New BW alloc configuration ex: estimated_bw, allocated_bw,
+ * granuality, Driver_ID, CM_Group, & populate the BW allocation structs
+ * for host router and dpia
+ */
+static void init_usb4_bw_struct(struct dc_link *link)
+{
+ // Init the known values
+ link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link);
+ link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link);
+}
+static uint8_t get_lowest_dpia_index(struct dc_link *link)
+{
+ const struct dc *dc_struct = link->dc;
+ uint8_t idx = 0xFF;
+
+ for (int i = 0; i < MAX_PIPES * 2; ++i) {
+
+ if (!dc_struct->links[i] ||
+ dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ continue;
+
+ if (idx > dc_struct->links[i]->link_index)
+ idx = dc_struct->links[i]->link_index;
+ }
+
+ return idx;
+}
+/*
+ * Get the Max Available BW or Max Estimated BW for each Host Router
+ *
+ * @link: pointer to the dc_link struct instance
+ * @type: ESTIMATD BW or MAX AVAILABLE BW
+ *
+ * return: response_ready flag from dc_link struct
+ */
+static int get_host_router_total_bw(struct dc_link *link, uint8_t type)
+{
+ const struct dc *dc_struct = link->dc;
+ uint8_t lowest_dpia_index = get_lowest_dpia_index(link);
+ uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0;
+ struct dc_link *link_temp;
+ int total_bw = 0;
+
+ for (int i = 0; i < MAX_PIPES * 2; ++i) {
+
+ if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA)
+ continue;
+
+ link_temp = dc_struct->links[i];
+ if (!link_temp || !link_temp->hpd_status)
+ continue;
+
+ idx_temp = (link_temp->link_index - lowest_dpia_index) / 2;
+
+ if (idx_temp == idx) {
+
+ if (type == HOST_ROUTER_BW_ESTIMATED)
+ total_bw += link_temp->dpia_bw_alloc_config.estimated_bw;
+ else if (type == HOST_ROUTER_BW_ALLOCATED)
+ total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw;
+ }
+ }
+
+ return total_bw;
+}
+/*
+ * Cleanup function for when the dpia is unplugged to reset struct
+ * and perform any required clean up
+ *
+ * @link: pointer to the dc_link struct instance
+ *
+ * return: none
+ */
+static bool dpia_bw_alloc_unplug(struct dc_link *link)
+{
+ bool ret = false;
+
+ if (!link)
+ return true;
+
+ return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
+ link->dpia_bw_alloc_config.sink_allocated_bw, link);
+}
+static void dc_link_set_usb4_req_bw_req(struct dc_link *link, int req_bw)
+{
+ uint8_t requested_bw;
+ uint32_t temp;
+
+ // 1. Add check for this corner case #1
+ if (req_bw > link->dpia_bw_alloc_config.estimated_bw)
+ req_bw = link->dpia_bw_alloc_config.estimated_bw;
+
+ temp = req_bw * link->dpia_bw_alloc_config.bw_granularity;
+ requested_bw = temp / Kbps_TO_Gbps;
+
+ // Always make sure to add more to account for floating points
+ if (temp % Kbps_TO_Gbps)
+ ++requested_bw;
+
+ // 2. Add check for this corner case #2
+ req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw)
+ return;
+
+ if (core_link_write_dpcd(
+ link,
+ REQUESTED_BW,
+ &requested_bw,
+ sizeof(uint8_t)) != DC_OK)
+ dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, REQUESTED_BW);
+ else
+ link->dpia_bw_alloc_config.response_ready = false; // Reset flag
+}
+/*
+ * Return the response_ready flag from dc_link struct
+ *
+ * @link: pointer to the dc_link struct instance
+ *
+ * return: response_ready flag from dc_link struct
+ */
+static bool get_cm_response_ready_flag(struct dc_link *link)
+{
+ return link->dpia_bw_alloc_config.response_ready;
+}
+// ------------------------------------------------------------------
+// PUBLIC FUNCTIONS
+// ------------------------------------------------------------------
+bool set_dptx_usb4_bw_alloc_support(struct dc_link *link)
+{
+ bool ret = false;
+ uint8_t response = 0,
+ bw_support_dpia = 0,
+ bw_support_cm = 0;
+
+ if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status))
+ goto out;
+
+ if (core_link_read_dpcd(
+ link,
+ DP_TUNNELING_CAPABILITIES,
+ &response,
+ sizeof(uint8_t)) != DC_OK)
+ dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, DP_TUNNELING_CAPABILITIES);
+
+ bw_support_dpia = (response >> 7) & 1;
+
+ if (core_link_read_dpcd(
+ link,
+ USB4_DRIVER_BW_CAPABILITY,
+ &response,
+ sizeof(uint8_t)) != DC_OK)
+ dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n", __func__, DP_TUNNELING_CAPABILITIES);
+
+ bw_support_cm = (response >> 7) & 1;
+
+ /* Send request acknowledgment to Turn ON DPTX support */
+ if (bw_support_cm && bw_support_dpia) {
+
+ response = 0x80;
+ if (core_link_write_dpcd(
+ link,
+ DPTX_BW_ALLOCATION_MODE_CONTROL,
+ &response,
+ sizeof(uint8_t)) != DC_OK)
+ dm_output_to_console("%s: AUX W/R ERROR @ 0x%x\n",
+ "**** FAILURE Enabling DPtx BW Allocation Mode Support ***\n",
+ __func__, DP_TUNNELING_CAPABILITIES);
+ else {
+
+ // SUCCESS Enabled DPtx BW Allocation Mode Support
+ link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+ dm_output_to_console("**** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n");
+
+ ret = true;
+ init_usb4_bw_struct(link);
+ }
+ }
+
+out:
+ return ret;
+}
+void dc_link_get_usb4_req_bw_resp(struct dc_link *link, uint8_t bw, uint8_t result)
+{
+ if (!get_bw_alloc_proceed_flag((link)))
+ return;
+
+ switch (result) {
+
+ case DPIA_BW_REQ_FAILED:
+
+ dm_output_to_console("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__);
+
+ // Update the new Estimated BW value updated by CM
+ link->dpia_bw_alloc_config.estimated_bw =
+ bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+
+ dc_link_set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw);
+ link->dpia_bw_alloc_config.response_ready = false;
+
+ /*
+ * If FAIL then it is either:
+ * 1. Due to DP-Tx trying to allocate more than available i.e. it failed locally
+ * => get estimated and allocate that
+ * 2. Due to the fact that DP-Tx tried to allocated ESTIMATED BW and failed then
+ * CM will have to update 0xE0023 with new ESTIMATED BW value.
+ */
+ break;
+
+ case DPIA_BW_REQ_SUCCESS:
+
+ dm_output_to_console("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__);
+
+ // 1. SUCCESS 1st time before any Pruning is done
+ // 2. SUCCESS after prev. FAIL before any Pruning is done
+ // 3. SUCCESS after Pruning is done but before enabling link
+
+ int needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+
+ // 1.
+ if (!link->dpia_bw_alloc_config.sink_allocated_bw) {
+
+ allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, needed, link);
+ link->dpia_bw_alloc_config.sink_verified_bw =
+ link->dpia_bw_alloc_config.sink_allocated_bw;
+
+ // SUCCESS from first attempt
+ if (link->dpia_bw_alloc_config.sink_allocated_bw >
+ link->dpia_bw_alloc_config.sink_max_bw)
+ link->dpia_bw_alloc_config.sink_verified_bw =
+ link->dpia_bw_alloc_config.sink_max_bw;
+ }
+ // 3.
+ else if (link->dpia_bw_alloc_config.sink_allocated_bw) {
+
+ // Find out how much do we need to de-alloc
+ if (link->dpia_bw_alloc_config.sink_allocated_bw > needed)
+ deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
+ link->dpia_bw_alloc_config.sink_allocated_bw - needed, link);
+ else
+ allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw,
+ needed - link->dpia_bw_alloc_config.sink_allocated_bw, link);
+ }
+
+ // 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA
+ // => check if estimated_bw changed
+
+ link->dpia_bw_alloc_config.response_ready = true;
+ break;
+
+ case DPIA_EST_BW_CHANGED:
+
+ dm_output_to_console("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__);
+
+ int available = 0, estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
+ int host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED);
+
+ // 1. If due to unplug of other sink
+ if (estimated == host_router_total_estimated_bw) {
+
+ // First update the estimated & max_bw fields
+ if (link->dpia_bw_alloc_config.estimated_bw < estimated) {
+ available = estimated - link->dpia_bw_alloc_config.estimated_bw;
+ link->dpia_bw_alloc_config.estimated_bw = estimated;
+ }
+ }
+ // 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw
+ else {
+
+ // We took from another unplugged/problematic sink to give to us
+ if (link->dpia_bw_alloc_config.estimated_bw < estimated)
+ available = estimated - link->dpia_bw_alloc_config.estimated_bw;
+
+ // We lost estimated bw usually due to plug event of other dpia
+ link->dpia_bw_alloc_config.estimated_bw = estimated;
+ }
+ break;
+
+ case DPIA_BW_ALLOC_CAPS_CHANGED:
+
+ dm_output_to_console("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__);
+ link->dpia_bw_alloc_config.bw_alloc_enabled = false;
+ break;
+ }
+}
+int dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw)
+{
+ int ret = 0;
+ uint8_t timeout = 10;
+
+ if (!(link && DISPLAY_ENDPOINT_USB4_DPIA == link->ep_type
+ && link->dpia_bw_alloc_config.bw_alloc_enabled))
+ goto out;
+
+ //1. Hot Plug
+ if (link->hpd_status && peak_bw > 0) {
+
+ // If DP over USB4 then we need to check BW allocation
+ link->dpia_bw_alloc_config.sink_max_bw = peak_bw;
+ dc_link_set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw);
+
+ do {
+ if (!timeout > 0)
+ timeout--;
+ else
+ break;
+ udelay(10 * 1000);
+ } while (!get_cm_response_ready_flag(link));
+
+ if (!timeout)
+ ret = 0;// ERROR TIMEOUT waiting for response for allocating bw
+ else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0)
+ ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED);
+ }
+ //2. Cold Unplug
+ else if (!link->hpd_status)
+ dpia_bw_alloc_unplug(link);
+
+out:
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
index 669e995f825f..c2c3049adcd1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h
@@ -26,13 +26,13 @@
#ifndef DC_INC_LINK_DP_DPIA_BW_H_
#define DC_INC_LINK_DP_DPIA_BW_H_
-// XXX: TODO: Re-add for Phase 2
-/* Number of Host Routers per motherboard is 2 and 2 DPIA per host router */
-#define MAX_HR_NUM 2
-
-struct dc_host_router_bw_alloc {
- int max_bw[MAX_HR_NUM]; // The Max BW that each Host Router has available to be shared btw DPIAs
- int total_estimated_bw[MAX_HR_NUM]; // The Total Verified and available BW that Host Router has
+/*
+ * Host Router BW type
+ */
+enum bw_type {
+ HOST_ROUTER_BW_ESTIMATED,
+ HOST_ROUTER_BW_ALLOCATED,
+ HOST_ROUTER_BW_INVALID,
};
/*
@@ -44,26 +44,4 @@ struct dc_host_router_bw_alloc {
*/
bool set_dptx_usb4_bw_alloc_support(struct dc_link *link);
-/*
- * Send a request from DP-Tx requesting to allocate BW remotely after
- * allocating it locally. This will get processed by CM and a CB function
- * will be called.
- *
- * @link: pointer to the dc_link struct instance
- * @req_bw: The requested bw in Kbyte to allocated
- *
- * return: none
- */
-void set_usb4_req_bw_req(struct dc_link *link, int req_bw);
-
-/*
- * CB function for when the status of the Req above is complete. We will
- * find out the result of allocating on CM and update structs accordingly
- *
- * @link: pointer to the dc_link struct instance
- *
- * return: none
- */
-void get_usb4_req_bw_resp(struct dc_link *link);
-
#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
new file mode 100644
index 000000000000..9d80427520cf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements DP HPD short pulse handling sequence according to DP
+ * specifications
+ *
+ */
+
+#include "link_dp_irq_handler.h"
+#include "link_dpcd.h"
+#include "link_dp_training.h"
+#include "link_dp_capability.h"
+#include "link/accessories/link_dp_trace.h"
+#include "link/link_dpms.h"
+#include "dm_helpers.h"
+
+#define DC_LOGGER_INIT(logger)
+
+bool dc_link_check_link_loss_status(
+ struct dc_link *link,
+ union hpd_irq_data *hpd_irq_dpcd_data)
+{
+ uint8_t irq_reg_rx_power_state = 0;
+ enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
+ union lane_status lane_status;
+ uint32_t lane;
+ bool sink_status_changed;
+ bool return_code;
+
+ sink_status_changed = false;
+ return_code = false;
+
+ if (link->cur_link_settings.lane_count == 0)
+ return return_code;
+
+ /*1. Check that Link Status changed, before re-training.*/
+
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /* check status of lanes 0,1
+ * changed DpcdAddress_Lane01Status (0x202)
+ */
+ lane_status.raw = dp_get_nibble_at_index(
+ &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+ lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ sink_status_changed = true;
+ break;
+ }
+ }
+
+ /* Check interlane align.*/
+ if (sink_status_changed ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
+
+ DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__);
+
+ return_code = true;
+
+ /*2. Check that we can handle interrupt: Not in FS DOS,
+ * Not in "Display Timeout" state, Link is trained.
+ */
+ dpcd_result = core_link_read_dpcd(link,
+ DP_SET_POWER,
+ &irq_reg_rx_power_state,
+ sizeof(irq_reg_rx_power_state));
+
+ if (dpcd_result != DC_OK) {
+ DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n",
+ __func__);
+ } else {
+ if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+ return_code = false;
+ }
+ }
+
+ return return_code;
+}
+
+static bool handle_hpd_irq_psr_sink(struct dc_link *link)
+{
+ union dpcd_psr_configuration psr_configuration;
+
+ if (!link->psr_settings.psr_feature_enabled)
+ return false;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ 368,/*DpcdAddress_PSR_Enable_Cfg*/
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ if (psr_configuration.bits.ENABLE) {
+ unsigned char dpcdbuf[3] = {0};
+ union psr_error_status psr_error_status;
+ union psr_sink_psr_status psr_sink_psr_status;
+
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ 0x2006, /*DpcdAddress_PSR_Error_Status*/
+ (unsigned char *) dpcdbuf,
+ sizeof(dpcdbuf));
+
+ /*DPCD 2006h ERROR STATUS*/
+ psr_error_status.raw = dpcdbuf[0];
+ /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/
+ psr_sink_psr_status.raw = dpcdbuf[2];
+
+ if (psr_error_status.bits.LINK_CRC_ERROR ||
+ psr_error_status.bits.RFB_STORAGE_ERROR ||
+ psr_error_status.bits.VSC_SDP_ERROR) {
+ bool allow_active;
+
+ /* Acknowledge and clear error bits */
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 8198,/*DpcdAddress_PSR_Error_Status*/
+ &psr_error_status.raw,
+ sizeof(psr_error_status.raw));
+
+ /* PSR error, disable and re-enable PSR */
+ if (link->psr_settings.psr_allow_active) {
+ allow_active = false;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ allow_active = true;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ }
+
+ return true;
+ } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
+ PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){
+ /* No error is detect, PSR is active.
+ * We should return with IRQ_HPD handled without
+ * checking for loss of sync since PSR would have
+ * powered down main link.
+ */
+ return true;
+ }
+ }
+ return false;
+}
+
+void dc_link_dp_handle_link_loss(struct dc_link *link)
+{
+ struct pipe_ctx *pipes[MAX_PIPES];
+ struct dc_state *state = link->dc->current_state;
+ uint8_t count;
+ int i;
+
+ link_get_master_pipes_with_dpms_on(link, state, &count, pipes);
+
+ for (i = 0; i < count; i++)
+ link_set_dpms_off(pipes[i]);
+
+ for (i = count - 1; i >= 0; i--) {
+ // Always use max settings here for DP 1.4a LL Compliance CTS
+ if (link->is_automated) {
+ pipes[i]->link_config.dp_link_settings.lane_count =
+ link->verified_link_cap.lane_count;
+ pipes[i]->link_config.dp_link_settings.link_rate =
+ link->verified_link_cap.link_rate;
+ pipes[i]->link_config.dp_link_settings.link_spread =
+ link->verified_link_cap.link_spread;
+ }
+ link_set_dpms_on(link->dc->current_state, pipes[i]);
+ }
+}
+
+enum dc_status dc_link_dp_read_hpd_rx_irq_data(
+ struct dc_link *link,
+ union hpd_irq_data *irq_data)
+{
+ static enum dc_status retval;
+
+ /* The HW reads 16 bytes from 200h on HPD,
+ * but if we get an AUX_DEFER, the HW cannot retry
+ * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+ * fail, so we now explicitly read 6 bytes which is
+ * the req from the above mentioned test cases.
+ *
+ * For DP 1.4 we need to read those from 2002h range.
+ */
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14)
+ retval = core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT,
+ irq_data->raw,
+ sizeof(union hpd_irq_data));
+ else {
+ /* Read 14 bytes in a single read and then copy only the required fields.
+ * This is more efficient than doing it in two separate AUX reads. */
+
+ uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1];
+
+ retval = core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT_ESI,
+ tmp,
+ sizeof(tmp));
+
+ if (retval != DC_OK)
+ return retval;
+
+ irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI];
+ irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI];
+ }
+
+ return retval;
+}
+
+/*************************Short Pulse IRQ***************************/
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
+{
+ /*
+ * Don't handle RX IRQ unless one of following is met:
+ * 1) The link is established (cur_link_settings != unknown)
+ * 2) We know we're dealing with a branch device, SST or MST
+ */
+
+ if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+ is_dp_branch_device(link))
+ return true;
+
+ return false;
+}
+
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work)
+{
+ union hpd_irq_data hpd_irq_dpcd_data = {0};
+ union device_service_irq device_service_clear = {0};
+ enum dc_status result;
+ bool status = false;
+
+ if (out_link_loss)
+ *out_link_loss = false;
+
+ if (has_left_work)
+ *has_left_work = false;
+ /* For use cases related to down stream connection status change,
+ * PSR and device auto test, refer to function handle_sst_hpd_irq
+ * in DAL2.1*/
+
+ DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n",
+ __func__, link->link_index);
+
+
+ /* All the "handle_hpd_irq_xxx()" methods
+ * should be called only after
+ * dal_dpsst_ls_read_hpd_irq_data
+ * Order of calls is important too
+ */
+ result = dc_link_dp_read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
+ if (out_hpd_irq_dpcd_data)
+ *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data;
+
+ if (result != DC_OK) {
+ DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n",
+ __func__);
+ return false;
+ }
+
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
+ link->is_automated = true;
+ device_service_clear.bits.AUTOMATED_TEST = 1;
+ core_link_write_dpcd(
+ link,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &device_service_clear.raw,
+ sizeof(device_service_clear.raw));
+ device_service_clear.raw = 0;
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ else
+ dc_link_dp_handle_automated_test(link);
+ return false;
+ }
+
+ if (!dc_link_dp_allow_hpd_rx_irq(link)) {
+ DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
+ __func__, link->link_index);
+ return false;
+ }
+
+ if (handle_hpd_irq_psr_sink(link))
+ /* PSR-related error was detected and handled */
+ return true;
+
+ /* If PSR-related error handled, Main link may be off,
+ * so do not handle as a normal sink status change interrupt.
+ */
+
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ return true;
+ }
+
+ /* check if we have MST msg and return since we poll for it */
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ return false;
+ }
+
+ /* For now we only handle 'Downstream port status' case.
+ * If we got sink count changed it means
+ * Downstream port status changed,
+ * then DM should call DC to do the detection.
+ * NOTE: Do not handle link loss on eDP since it is internal link*/
+ if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
+ dc_link_check_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
+ /* Connectivity log: link loss */
+ CONN_DATA_LINK_LOSS(link,
+ hpd_irq_dpcd_data.raw,
+ sizeof(hpd_irq_dpcd_data),
+ "Status: ");
+
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ else
+ dc_link_dp_handle_link_loss(link);
+
+ status = false;
+ if (out_link_loss)
+ *out_link_loss = true;
+
+ dp_trace_link_loss_increment(link);
+ }
+
+ if (link->type == dc_connection_sst_branch &&
+ hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
+ != link->dpcd_sink_count)
+ status = true;
+
+ /* reasons for HPD RX:
+ * 1. Link Loss - ie Re-train the Link
+ * 2. MST sideband message
+ * 3. Automated Test - ie. Internal Commit
+ * 4. CP (copy protection) - (not interesting for DM???)
+ * 5. DRR
+ * 6. Downstream Port status changed
+ * -ie. Detect - this the only one
+ * which is interesting for DM because
+ * it must call dc_link_detect.
+ */
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
index 801a95b34e8c..39b2e51ea79d 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dp_dpia_bw.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h
@@ -1,4 +1,3 @@
-
/*
* Copyright 2022 Advanced Micro Devices, Inc.
*
@@ -23,6 +22,10 @@
* Authors: AMD
*
*/
-/*********************************************************************/
-// USB4 DPIA BANDWIDTH ALLOCATION LOGIC
-/*********************************************************************/
+
+#ifndef __DC_LINK_DP_IRQ_HANDLER_H__
+#define __DC_LINK_DP_IRQ_HANDLER_H__
+
+#include "link.h"
+
+#endif /* __DC_LINK_DP_IRQ_HANDLER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
new file mode 100644
index 000000000000..cd9fb8126bcf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements basic dp phy functionality such as enable/disable phy
+ * output and set lane/drive settings. This file is responsible for maintaining
+ * and update software state representing current phy status such as current
+ * link settings.
+ */
+
+#include "link_dp_phy.h"
+#include "link_dpcd.h"
+#include "link_dp_training.h"
+#include "link_dp_capability.h"
+#include "clk_mgr.h"
+#include "resource.h"
+#include "link_enc_cfg.h"
+#define DC_LOGGER \
+ link->ctx->logger
+
+void dc_link_dp_receiver_power_ctrl(struct dc_link *link, bool on)
+{
+ uint8_t state;
+
+ state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
+
+ if (link->sync_lt_in_progress)
+ return;
+
+ core_link_write_dpcd(link, DP_SET_POWER, &state,
+ sizeof(state));
+
+}
+
+void dp_enable_link_phy(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings)
+{
+ link->cur_link_settings = *link_settings;
+ link->dc->hwss.enable_dp_link_output(link, link_res, signal,
+ clock_source, link_settings);
+ dc_link_dp_receiver_power_ctrl(link, true);
+}
+
+void dp_disable_link_phy(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal)
+{
+ struct dc *dc = link->ctx->dc;
+
+ if (!link->wa_flags.dp_keep_receiver_powered)
+ dc_link_dp_receiver_power_ctrl(link, false);
+
+ dc->hwss.disable_link_output(link, link_res, signal);
+ /* Clear current link setting.*/
+ memset(&link->cur_link_settings, 0,
+ sizeof(link->cur_link_settings));
+
+ if (dc->clk_mgr->funcs->notify_link_rate_change)
+ dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
+}
+
+static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset)
+{
+ return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ==
+ offset);
+}
+
+void dp_set_hw_lane_settings(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_training_settings *link_settings,
+ uint32_t offset)
+{
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+
+ if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) &&
+ !is_immediate_downstream(link, offset))
+ return;
+
+ if (link_hwss->ext.set_dp_lane_settings)
+ link_hwss->ext.set_dp_lane_settings(link, link_res,
+ &link_settings->link_settings,
+ link_settings->hw_lane_settings);
+
+ memmove(link->cur_lane_setting,
+ link_settings->hw_lane_settings,
+ sizeof(link->cur_lane_setting));
+}
+
+void dp_set_drive_settings(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ /* program ASIC PHY settings*/
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings,
+ lt_settings->dpcd_lane_settings);
+
+ /* Notify DP sink the PHY settings from source */
+ dpcd_set_lane_settings(link, lt_settings, DPRX);
+}
+
+enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready)
+{
+ /* FEC has to be "set ready" before the link training.
+ * The policy is to always train with FEC
+ * if the sink supports it and leave it enabled on link.
+ * If FEC is not supported, disable it.
+ */
+ struct link_encoder *link_enc = NULL;
+ enum dc_status status = DC_OK;
+ uint8_t fec_config = 0;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ if (!dc_link_should_enable_fec(link))
+ return status;
+
+ if (link_enc->funcs->fec_set_ready &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+ if (ready) {
+ fec_config = 1;
+ status = core_link_write_dpcd(link,
+ DP_FEC_CONFIGURATION,
+ &fec_config,
+ sizeof(fec_config));
+ if (status == DC_OK) {
+ link_enc->funcs->fec_set_ready(link_enc, true);
+ link->fec_state = dc_link_fec_ready;
+ } else {
+ link_enc->funcs->fec_set_ready(link_enc, false);
+ link->fec_state = dc_link_fec_not_ready;
+ dm_error("dpcd write failed to set fec_ready");
+ }
+ } else if (link->fec_state == dc_link_fec_ready) {
+ fec_config = 0;
+ status = core_link_write_dpcd(link,
+ DP_FEC_CONFIGURATION,
+ &fec_config,
+ sizeof(fec_config));
+ link_enc->funcs->fec_set_ready(link_enc, false);
+ link->fec_state = dc_link_fec_not_ready;
+ }
+ }
+
+ return status;
+}
+
+void dp_set_fec_enable(struct dc_link *link, bool enable)
+{
+ struct link_encoder *link_enc = NULL;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+
+ if (!dc_link_should_enable_fec(link))
+ return;
+
+ if (link_enc->funcs->fec_set_enable &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+ if (link->fec_state == dc_link_fec_ready && enable) {
+ /* Accord to DP spec, FEC enable sequence can first
+ * be transmitted anytime after 1000 LL codes have
+ * been transmitted on the link after link training
+ * completion. Using 1 lane RBR should have the maximum
+ * time for transmitting 1000 LL codes which is 6.173 us.
+ * So use 7 microseconds delay instead.
+ */
+ udelay(7);
+ link_enc->funcs->fec_set_enable(link_enc, true);
+ link->fec_state = dc_link_fec_enabled;
+ } else if (link->fec_state == dc_link_fec_enabled && !enable) {
+ link_enc->funcs->fec_set_enable(link_enc, false);
+ link->fec_state = dc_link_fec_ready;
+ }
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
new file mode 100644
index 000000000000..dba1f29df319
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DP_PHY_H__
+#define __DC_LINK_DP_PHY_H__
+
+#include "link.h"
+void dp_enable_link_phy(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal,
+ enum clock_source_id clock_source,
+ const struct dc_link_settings *link_settings);
+
+void dp_disable_link_phy(struct dc_link *link,
+ const struct link_resource *link_res,
+ enum signal_type signal);
+
+void dp_set_hw_lane_settings(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct link_training_settings *link_settings,
+ uint32_t offset);
+
+void dp_set_drive_settings(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+
+enum dc_status dp_set_fec_ready(struct dc_link *link,
+ const struct link_resource *link_res, bool ready);
+void dp_set_fec_enable(struct dc_link *link, bool enable);
+
+#endif /* __DC_LINK_DP_PHY_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
new file mode 100644
index 000000000000..b48d4d822991
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c
@@ -0,0 +1,1701 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements all generic dp link training helper functions and top
+ * level generic training sequence. All variations of dp link training sequence
+ * should be called inside the top level training functions in this file to
+ * ensure the integrity of our overall training procedure across different types
+ * of link encoding and back end hardware.
+ */
+#include "link_dp_training.h"
+#include "link_dp_training_8b_10b.h"
+#include "link_dp_training_128b_132b.h"
+#include "link_dp_training_auxless.h"
+#include "link_dp_training_dpia.h"
+#include "link_dp_training_fixed_vs_pe_retimer.h"
+#include "link_dpcd.h"
+#include "link/accessories/link_dp_trace.h"
+#include "link_dp_phy.h"
+#include "link_dp_capability.h"
+#include "link_edp_panel_control.h"
+#include "atomfirmware.h"
+#include "link_enc_cfg.h"
+#include "resource.h"
+#include "dm_helpers.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+#define POST_LT_ADJ_REQ_LIMIT 6
+#define POST_LT_ADJ_REQ_TIMEOUT 200
+#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
+
+void dp_log_training_result(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum link_training_result status)
+{
+ char *link_rate = "Unknown";
+ char *lt_result = "Unknown";
+ char *lt_spread = "Disabled";
+
+ switch (lt_settings->link_settings.link_rate) {
+ case LINK_RATE_LOW:
+ link_rate = "RBR";
+ break;
+ case LINK_RATE_RATE_2:
+ link_rate = "R2";
+ break;
+ case LINK_RATE_RATE_3:
+ link_rate = "R3";
+ break;
+ case LINK_RATE_HIGH:
+ link_rate = "HBR";
+ break;
+ case LINK_RATE_RBR2:
+ link_rate = "RBR2";
+ break;
+ case LINK_RATE_RATE_6:
+ link_rate = "R6";
+ break;
+ case LINK_RATE_HIGH2:
+ link_rate = "HBR2";
+ break;
+ case LINK_RATE_HIGH3:
+ link_rate = "HBR3";
+ break;
+ case LINK_RATE_UHBR10:
+ link_rate = "UHBR10";
+ break;
+ case LINK_RATE_UHBR13_5:
+ link_rate = "UHBR13.5";
+ break;
+ case LINK_RATE_UHBR20:
+ link_rate = "UHBR20";
+ break;
+ default:
+ break;
+ }
+
+ switch (status) {
+ case LINK_TRAINING_SUCCESS:
+ lt_result = "pass";
+ break;
+ case LINK_TRAINING_CR_FAIL_LANE0:
+ lt_result = "CR failed lane0";
+ break;
+ case LINK_TRAINING_CR_FAIL_LANE1:
+ lt_result = "CR failed lane1";
+ break;
+ case LINK_TRAINING_CR_FAIL_LANE23:
+ lt_result = "CR failed lane23";
+ break;
+ case LINK_TRAINING_EQ_FAIL_CR:
+ lt_result = "CR failed in EQ";
+ break;
+ case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
+ lt_result = "CR failed in EQ partially";
+ break;
+ case LINK_TRAINING_EQ_FAIL_EQ:
+ lt_result = "EQ failed";
+ break;
+ case LINK_TRAINING_LQA_FAIL:
+ lt_result = "LQA failed";
+ break;
+ case LINK_TRAINING_LINK_LOSS:
+ lt_result = "Link loss";
+ break;
+ case DP_128b_132b_LT_FAILED:
+ lt_result = "LT_FAILED received";
+ break;
+ case DP_128b_132b_MAX_LOOP_COUNT_REACHED:
+ lt_result = "max loop count reached";
+ break;
+ case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT:
+ lt_result = "channel EQ timeout";
+ break;
+ case DP_128b_132b_CDS_DONE_TIMEOUT:
+ lt_result = "CDS timeout";
+ break;
+ default:
+ break;
+ }
+
+ switch (lt_settings->link_settings.link_spread) {
+ case LINK_SPREAD_DISABLED:
+ lt_spread = "Disabled";
+ break;
+ case LINK_SPREAD_05_DOWNSPREAD_30KHZ:
+ lt_spread = "0.5% 30KHz";
+ break;
+ case LINK_SPREAD_05_DOWNSPREAD_33KHZ:
+ lt_spread = "0.5% 33KHz";
+ break;
+ default:
+ break;
+ }
+
+ /* Connectivity log: link training */
+
+ /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */
+
+ CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s",
+ link_rate,
+ lt_settings->link_settings.lane_count,
+ lt_result,
+ lt_settings->hw_lane_settings[0].VOLTAGE_SWING,
+ lt_settings->hw_lane_settings[0].PRE_EMPHASIS,
+ lt_spread);
+}
+
+uint8_t dp_initialize_scrambling_data_symbols(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern)
+{
+ uint8_t disable_scrabled_data_symbols = 0;
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ disable_scrabled_data_symbols = 1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ case DP_128b_132b_TPS1:
+ case DP_128b_132b_TPS2:
+ disable_scrabled_data_symbols = 0;
+ break;
+ default:
+ ASSERT(0);
+ DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
+ __func__, pattern);
+ break;
+ }
+ return disable_scrabled_data_symbols;
+}
+
+enum dpcd_training_patterns
+ dp_training_pattern_to_dpcd_training_pattern(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern)
+{
+ enum dpcd_training_patterns dpcd_tr_pattern =
+ DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
+ break;
+ case DP_128b_132b_TPS1:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS1;
+ break;
+ case DP_128b_132b_TPS2:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS2;
+ break;
+ case DP_128b_132b_TPS2_CDS:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS;
+ break;
+ case DP_TRAINING_PATTERN_VIDEOIDLE:
+ dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+ break;
+ default:
+ ASSERT(0);
+ DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
+ __func__, pattern);
+ break;
+ }
+
+ return dpcd_tr_pattern;
+}
+
+uint8_t dp_get_nibble_at_index(const uint8_t *buf,
+ uint32_t index)
+{
+ uint8_t nibble;
+ nibble = buf[index / 2];
+
+ if (index % 2)
+ nibble >>= 4;
+ else
+ nibble &= 0x0F;
+
+ return nibble;
+}
+
+void dp_wait_for_training_aux_rd_interval(
+ struct dc_link *link,
+ uint32_t wait_in_micro_secs)
+{
+ if (wait_in_micro_secs > 1000)
+ msleep(wait_in_micro_secs/1000);
+ else
+ udelay(wait_in_micro_secs);
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
+ __func__,
+ wait_in_micro_secs);
+}
+
+/* maximum pre emphasis level allowed for each voltage swing level*/
+static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = {
+ PRE_EMPHASIS_LEVEL3,
+ PRE_EMPHASIS_LEVEL2,
+ PRE_EMPHASIS_LEVEL1,
+ PRE_EMPHASIS_DISABLED };
+
+static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
+ enum dc_voltage_swing voltage)
+{
+ enum dc_pre_emphasis pre_emphasis;
+ pre_emphasis = PRE_EMPHASIS_MAX_LEVEL;
+
+ if (voltage <= VOLTAGE_SWING_MAX_LEVEL)
+ pre_emphasis = voltage_swing_to_pre_emphasis[voltage];
+
+ return pre_emphasis;
+
+}
+
+static void maximize_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint32_t lane;
+ struct dc_lane_settings max_requested;
+
+ max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING;
+ max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS;
+ max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET;
+
+ /* Determine what the maximum of the requested settings are*/
+ for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) {
+ if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING)
+ max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING;
+
+ if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS)
+ max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS;
+ if (lane_settings[lane].FFE_PRESET.settings.level >
+ max_requested.FFE_PRESET.settings.level)
+ max_requested.FFE_PRESET.settings.level =
+ lane_settings[lane].FFE_PRESET.settings.level;
+ }
+
+ /* make sure the requested settings are
+ * not higher than maximum settings*/
+ if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL)
+ max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL;
+
+ if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
+ max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
+ if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL)
+ max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL;
+
+ /* make sure the pre-emphasis matches the voltage swing*/
+ if (max_requested.PRE_EMPHASIS >
+ get_max_pre_emphasis_for_voltage_swing(
+ max_requested.VOLTAGE_SWING))
+ max_requested.PRE_EMPHASIS =
+ get_max_pre_emphasis_for_voltage_swing(
+ max_requested.VOLTAGE_SWING);
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING;
+ lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS;
+ lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET;
+ }
+}
+
+void dp_hw_to_dpcd_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint8_t lane = 0;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) {
+ dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET =
+ (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING);
+ dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET =
+ (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS);
+ dpcd_lane_settings[lane].bits.MAX_SWING_REACHED =
+ (hw_lane_settings[lane].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+ dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+ (hw_lane_settings[lane].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+ } else if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE =
+ hw_lane_settings[lane].FFE_PRESET.settings.level;
+ }
+ }
+}
+
+uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
+{
+ uint8_t link_rate = 0;
+ enum dp_link_encoding encoding = link_dp_get_encoding_format(link_settings);
+
+ if (encoding == DP_128b_132b_ENCODING)
+ switch (link_settings->link_rate) {
+ case LINK_RATE_UHBR10:
+ link_rate = 0x1;
+ break;
+ case LINK_RATE_UHBR20:
+ link_rate = 0x2;
+ break;
+ case LINK_RATE_UHBR13_5:
+ link_rate = 0x4;
+ break;
+ default:
+ link_rate = 0;
+ break;
+ }
+ else if (encoding == DP_8b_10b_ENCODING)
+ link_rate = (uint8_t) link_settings->link_rate;
+ else
+ link_rate = 0;
+
+ return link_rate;
+}
+
+/* Only used for channel equalization */
+uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval)
+{
+ unsigned int aux_rd_interval_us = 400;
+
+ switch (dpcd_aux_read_interval) {
+ case 0x01:
+ aux_rd_interval_us = 4000;
+ break;
+ case 0x02:
+ aux_rd_interval_us = 8000;
+ break;
+ case 0x03:
+ aux_rd_interval_us = 12000;
+ break;
+ case 0x04:
+ aux_rd_interval_us = 16000;
+ break;
+ case 0x05:
+ aux_rd_interval_us = 32000;
+ break;
+ case 0x06:
+ aux_rd_interval_us = 64000;
+ break;
+ default:
+ break;
+ }
+
+ return aux_rd_interval_us;
+}
+
+enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+
+ if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE0;
+ else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE1;
+ else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE23;
+ else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0)
+ result = LINK_TRAINING_CR_FAIL_LANE23;
+ return result;
+}
+
+bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset)
+{
+ return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0);
+}
+
+bool dp_is_max_vs_reached(
+ const struct link_training_settings *lt_settings)
+{
+ uint32_t lane;
+ for (lane = 0; lane <
+ (uint32_t)(lt_settings->link_settings.lane_count);
+ lane++) {
+ if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET
+ == VOLTAGE_SWING_MAX_LEVEL)
+ return true;
+ }
+ return false;
+
+}
+
+bool dp_is_cr_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ bool done = true;
+ uint32_t lane;
+ /*LANEx_CR_DONE bits All 1's?*/
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+ if (!dpcd_lane_status[lane].bits.CR_DONE_0)
+ done = false;
+ }
+ return done;
+
+}
+
+bool dp_is_ch_eq_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ bool done = true;
+ uint32_t lane;
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++)
+ if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
+ done = false;
+ return done;
+}
+
+bool dp_is_symbol_locked(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status)
+{
+ bool locked = true;
+ uint32_t lane;
+ for (lane = 0; lane < (uint32_t)(ln_count); lane++)
+ if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0)
+ locked = false;
+ return locked;
+}
+
+bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
+{
+ return align_status.bits.INTERLANE_ALIGN_DONE == 1;
+}
+
+enum link_training_result dp_check_link_loss_status(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ union lane_status lane_status;
+ uint8_t dpcd_buf[6] = {0};
+ uint32_t lane;
+
+ core_link_read_dpcd(
+ link,
+ DP_SINK_COUNT,
+ (uint8_t *)(dpcd_buf),
+ sizeof(dpcd_buf));
+
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /*
+ * check lanes status
+ */
+ lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ status = LINK_TRAINING_LINK_LOSS;
+ break;
+ }
+ }
+
+ return status;
+}
+
+enum dc_status dp_get_lane_status_and_lane_adjust(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ union lane_status ln_status[LANE_COUNT_DP_MAX],
+ union lane_align_status_updated *ln_align,
+ union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ uint32_t offset)
+{
+ unsigned int lane01_status_address = DP_LANE0_1_STATUS;
+ uint8_t lane_adjust_offset = 4;
+ unsigned int lane01_adjust_address;
+ uint8_t dpcd_buf[6] = {0};
+ uint32_t lane;
+ enum dc_status status;
+
+ if (is_repeater(link_training_setting, offset)) {
+ lane01_status_address =
+ DP_LANE0_1_STATUS_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ lane_adjust_offset = 3;
+ }
+
+ status = core_link_read_dpcd(
+ link,
+ lane01_status_address,
+ (uint8_t *)(dpcd_buf),
+ sizeof(dpcd_buf));
+
+ if (status != DC_OK) {
+ DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X,"
+ " keep current lane status and lane adjust unchanged",
+ __func__,
+ lane01_status_address);
+ return status;
+ }
+
+ for (lane = 0; lane <
+ (uint32_t)(link_training_setting->link_settings.lane_count);
+ lane++) {
+
+ ln_status[lane].raw =
+ dp_get_nibble_at_index(&dpcd_buf[0], lane);
+ ln_adjust[lane].raw =
+ dp_get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane);
+ }
+
+ ln_align->raw = dpcd_buf[2];
+
+ if (is_repeater(link_training_setting, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+ __func__,
+ offset,
+ lane01_status_address, dpcd_buf[0],
+ lane01_status_address + 1, dpcd_buf[1]);
+
+ lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
+ __func__,
+ offset,
+ lane01_adjust_address,
+ dpcd_buf[lane_adjust_offset],
+ lane01_adjust_address + 1,
+ dpcd_buf[lane_adjust_offset + 1]);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+ __func__,
+ lane01_status_address, dpcd_buf[0],
+ lane01_status_address + 1, dpcd_buf[1]);
+
+ lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
+ __func__,
+ lane01_adjust_address,
+ dpcd_buf[lane_adjust_offset],
+ lane01_adjust_address + 1,
+ dpcd_buf[lane_adjust_offset + 1]);
+ }
+
+ return status;
+}
+
+static void override_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint32_t lane;
+
+ if (lt_settings->voltage_swing == NULL &&
+ lt_settings->pre_emphasis == NULL &&
+ lt_settings->ffe_preset == NULL &&
+ lt_settings->post_cursor2 == NULL)
+
+ return;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (lt_settings->voltage_swing)
+ lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
+ if (lt_settings->pre_emphasis)
+ lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis;
+ if (lt_settings->post_cursor2)
+ lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2;
+ if (lt_settings->ffe_preset)
+ lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset;
+ }
+}
+
+void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override)
+{
+ if (!dp_is_lttpr_present(link))
+ return;
+
+ if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) {
+ *override = LTTPR_MODE_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) {
+ *override = LTTPR_MODE_NON_TRANSPARENT;
+ } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) {
+ *override = LTTPR_MODE_NON_LTTPR;
+ }
+ DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override));
+}
+
+void override_training_settings(
+ struct dc_link *link,
+ const struct dc_link_training_overrides *overrides,
+ struct link_training_settings *lt_settings)
+{
+ uint32_t lane;
+
+ /* Override link spread */
+ if (!link->dp_ss_off && overrides->downspread != NULL)
+ lt_settings->link_settings.link_spread = *overrides->downspread ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ
+ : LINK_SPREAD_DISABLED;
+
+ /* Override lane settings */
+ if (overrides->voltage_swing != NULL)
+ lt_settings->voltage_swing = overrides->voltage_swing;
+ if (overrides->pre_emphasis != NULL)
+ lt_settings->pre_emphasis = overrides->pre_emphasis;
+ if (overrides->post_cursor2 != NULL)
+ lt_settings->post_cursor2 = overrides->post_cursor2;
+ if (overrides->ffe_preset != NULL)
+ lt_settings->ffe_preset = overrides->ffe_preset;
+ /* Override HW lane settings with BIOS forced values if present */
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
+ lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
+ lt_settings->always_match_dpcd_with_hw_lane_settings = false;
+ }
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lt_settings->hw_lane_settings[lane].VOLTAGE_SWING =
+ lt_settings->voltage_swing != NULL ?
+ *lt_settings->voltage_swing :
+ VOLTAGE_SWING_LEVEL0;
+ lt_settings->hw_lane_settings[lane].PRE_EMPHASIS =
+ lt_settings->pre_emphasis != NULL ?
+ *lt_settings->pre_emphasis
+ : PRE_EMPHASIS_DISABLED;
+ lt_settings->hw_lane_settings[lane].POST_CURSOR2 =
+ lt_settings->post_cursor2 != NULL ?
+ *lt_settings->post_cursor2
+ : POST_CURSOR2_DISABLED;
+ }
+
+ if (lt_settings->always_match_dpcd_with_hw_lane_settings)
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
+ /* Override training timings */
+ if (overrides->cr_pattern_time != NULL)
+ lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
+ if (overrides->eq_pattern_time != NULL)
+ lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
+ if (overrides->pattern_for_cr != NULL)
+ lt_settings->pattern_for_cr = *overrides->pattern_for_cr;
+ if (overrides->pattern_for_eq != NULL)
+ lt_settings->pattern_for_eq = *overrides->pattern_for_eq;
+ if (overrides->enhanced_framing != NULL)
+ lt_settings->enhanced_framing = *overrides->enhanced_framing;
+ if (link->preferred_training_settings.fec_enable != NULL)
+ lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Check DP tunnel LTTPR mode debug option. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr)
+ lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+
+#endif
+ dp_get_lttpr_mode_override(link, &lt_settings->lttpr_mode);
+
+}
+
+enum dc_dp_training_pattern decide_cr_training_pattern(
+ const struct dc_link_settings *link_settings)
+{
+ switch (link_dp_get_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ default:
+ return DP_TRAINING_PATTERN_SEQUENCE_1;
+ case DP_128b_132b_ENCODING:
+ return DP_128b_132b_TPS1;
+ }
+}
+
+enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct dc_link_settings *link_settings)
+{
+ struct link_encoder *link_enc;
+ struct encoder_feature_support *enc_caps;
+ struct dpcd_caps *rx_caps = &link->dpcd_caps;
+ enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+
+ link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link_enc);
+ enc_caps = &link_enc->features;
+
+ switch (link_dp_get_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ if (enc_caps->flags.bits.IS_TPS4_CAPABLE &&
+ rx_caps->max_down_spread.bits.TPS4_SUPPORTED)
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+ else if (enc_caps->flags.bits.IS_TPS3_CAPABLE &&
+ rx_caps->max_ln_count.bits.TPS3_SUPPORTED)
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_3;
+ else
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+ break;
+ case DP_128b_132b_ENCODING:
+ pattern = DP_128b_132b_TPS2;
+ break;
+ default:
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+ break;
+ }
+ return pattern;
+}
+
+enum lttpr_mode dc_link_decide_lttpr_mode(struct dc_link *link,
+ struct dc_link_settings *link_setting)
+{
+ enum dp_link_encoding encoding = link_dp_get_encoding_format(link_setting);
+
+ if (encoding == DP_8b_10b_ENCODING)
+ return dp_decide_8b_10b_lttpr_mode(link);
+ else if (encoding == DP_128b_132b_ENCODING)
+ return dp_decide_128b_132b_lttpr_mode(link);
+
+ ASSERT(0);
+ return LTTPR_MODE_NON_LTTPR;
+}
+
+void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint32_t lane;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) {
+ hw_lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)(ln_adjust[lane].bits.
+ VOLTAGE_SWING_LANE);
+ hw_lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)(ln_adjust[lane].bits.
+ PRE_EMPHASIS_LANE);
+ } else if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ hw_lane_settings[lane].FFE_PRESET.raw =
+ ln_adjust[lane].tx_ffe.PRESET_VALUE;
+ }
+ }
+ dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
+
+ if (lt_settings->disallow_per_lane_settings) {
+ /* we find the maximum of the requested settings across all lanes*/
+ /* and set this maximum for all lanes*/
+ maximize_lane_settings(lt_settings, hw_lane_settings);
+ override_lane_settings(lt_settings, hw_lane_settings);
+
+ if (lt_settings->always_match_dpcd_with_hw_lane_settings)
+ dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
+ }
+
+}
+
+void dp_decide_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ struct link_training_settings *lt_settings)
+{
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING)
+ decide_8b_10b_training_settings(link, link_settings, lt_settings);
+ else if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING)
+ decide_128b_132b_training_settings(link, link_settings, lt_settings);
+}
+
+
+enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
+{
+ uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
+ return core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+}
+
+static enum dc_status configure_lttpr_mode_non_transparent(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings)
+{
+ /* aux timeout is already set to extended */
+ /* RESET/SET lttpr mode to enable non transparent mode */
+ uint8_t repeater_cnt;
+ uint32_t aux_interval_address;
+ uint8_t repeater_id;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
+
+ enum dp_link_encoding encoding = link_dp_get_encoding_format(&lt_settings->link_settings);
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
+ result = core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+
+ }
+
+ if (result == DC_OK) {
+ link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+ }
+
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+
+ DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__);
+
+ repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT;
+ result = core_link_write_dpcd(link,
+ DP_PHY_REPEATER_MODE,
+ (uint8_t *)&repeater_mode,
+ sizeof(repeater_mode));
+
+ if (result == DC_OK) {
+ link->dpcd_caps.lttpr_caps.mode = repeater_mode;
+ }
+
+ if (encoding == DP_8b_10b_ENCODING) {
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ /* Driver does not need to train the first hop. Skip DPCD read and clear
+ * AUX_RD_INTERVAL for DPTX-to-DPIA hop.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0;
+
+ for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
+ aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
+ core_link_read_dpcd(
+ link,
+ aux_interval_address,
+ (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1],
+ sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1]));
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F;
+ }
+ }
+ }
+
+ return result;
+}
+
+enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings)
+{
+ enum dc_status status = DC_OK;
+
+ if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ status = configure_lttpr_mode_transparent(link);
+
+ else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ status = configure_lttpr_mode_non_transparent(link, lt_settings);
+
+ return status;
+}
+
+void repeater_training_done(struct dc_link *link, uint32_t offset)
+{
+ union dpcd_training_pattern dpcd_pattern = {0};
+
+ const uint32_t dpcd_base_lt_offset =
+ DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ /* Set training not in progress*/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ &dpcd_pattern.raw,
+ 1);
+
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
+static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding)
+{
+ uint8_t sink_status = 0;
+ uint8_t i;
+
+ /* clear training pattern set */
+ dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+
+ if (encoding == DP_128b_132b_ENCODING) {
+ /* poll for intra-hop disable */
+ for (i = 0; i < 10; i++) {
+ if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
+ (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
+ break;
+ udelay(1000);
+ }
+ }
+}
+
+enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum dp_link_encoding encoding =
+ link_dp_get_encoding_format(
+ &lt_settings->link_settings);
+ enum dc_status status;
+
+ status = core_link_write_dpcd(
+ link,
+ DP_MAIN_LINK_CHANNEL_CODING_SET,
+ (uint8_t *) &encoding,
+ 1);
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n",
+ __func__,
+ DP_MAIN_LINK_CHANNEL_CODING_SET,
+ encoding);
+
+ return status;
+}
+
+void dpcd_set_training_pattern(
+ struct dc_link *link,
+ enum dc_dp_training_pattern training_pattern)
+{
+ union dpcd_training_pattern dpcd_pattern = {0};
+
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
+ dp_training_pattern_to_dpcd_training_pattern(
+ link, training_pattern);
+
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_PATTERN_SET,
+ &dpcd_pattern.raw,
+ 1);
+
+ DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n",
+ __func__,
+ DP_TRAINING_PATTERN_SET,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+}
+
+enum dc_status dpcd_set_link_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings)
+{
+ uint8_t rate;
+ enum dc_status status;
+
+ union down_spread_ctrl downspread = {0};
+ union lane_count_set lane_count_set = {0};
+
+ downspread.raw = (uint8_t)
+ (lt_settings->link_settings.link_spread);
+
+ lane_count_set.bits.LANE_COUNT_SET =
+ lt_settings->link_settings.lane_count;
+
+ lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+ link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+ }
+
+ status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, 1);
+
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 &&
+ lt_settings->link_settings.use_link_rate_set == true) {
+ rate = 0;
+ /* WA for some MUX chips that will power down with eDP and lose supported
+ * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure
+ * MUX chip gets link rate set back before link training.
+ */
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ uint8_t supported_link_rates[16];
+
+ core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
+ supported_link_rates, sizeof(supported_link_rates));
+ }
+ status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+ status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
+ &lt_settings->link_settings.link_rate_set, 1);
+ } else {
+ rate = get_dpcd_link_rate(&lt_settings->link_settings);
+
+ status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+ }
+
+ if (rate) {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ lt_settings->enhanced_framing,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_RATE_SET,
+ lt_settings->link_settings.link_rate_set,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ lt_settings->enhanced_framing,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ }
+
+ return status;
+}
+
+enum dc_status dpcd_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ uint32_t offset)
+{
+ unsigned int lane0_set_address;
+ enum dc_status status;
+ lane0_set_address = DP_TRAINING_LANE0_SET;
+
+ if (is_repeater(link_training_setting, offset))
+ lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ status = core_link_write_dpcd(link,
+ lane0_set_address,
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
+ link_training_setting->link_settings.lane_count);
+
+ if (is_repeater(link_training_setting, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
+ " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ offset,
+ lane0_set_address,
+ link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ lane0_set_address,
+ link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ }
+
+ return status;
+}
+
+void dpcd_set_lt_pattern_and_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset)
+{
+ uint32_t dpcd_base_lt_offset;
+ uint8_t dpcd_lt_buffer[5] = {0};
+ union dpcd_training_pattern dpcd_pattern = {0};
+ uint32_t size_in_bytes;
+ bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+ dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
+
+ if (is_repeater(lt_settings, offset))
+ dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /*****************************************************************
+ * DpcdAddress_TrainingPatternSet
+ *****************************************************************/
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
+ dp_training_pattern_to_dpcd_training_pattern(link, pattern);
+
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
+ dp_initialize_scrambling_data_symbols(link, pattern);
+
+ dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
+ = dpcd_pattern.raw;
+
+ if (is_repeater(lt_settings, offset)) {
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ }
+
+ /* concatenate everything into one buffer*/
+ size_in_bytes = lt_settings->link_settings.lane_count *
+ sizeof(lt_settings->dpcd_lane_settings[0]);
+
+ // 0x00103 - 0x00102
+ memmove(
+ &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET],
+ lt_settings->dpcd_lane_settings,
+ size_in_bytes);
+
+ if (is_repeater(lt_settings, offset)) {
+ if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ } else {
+ if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ }
+ if (edp_workaround) {
+ /* for eDP write in 2 parts because the 5-byte burst is
+ * causing issues on some eDP panels (EPR#366724)
+ */
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_PATTERN_SET,
+ &dpcd_pattern.raw,
+ sizeof(dpcd_pattern.raw));
+
+ core_link_write_dpcd(
+ link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(lt_settings->dpcd_lane_settings),
+ size_in_bytes);
+
+ } else if (link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ dpcd_lt_buffer,
+ sizeof(dpcd_lt_buffer));
+ } else
+ /* write it all in (1 + number-of-lanes)-byte burst*/
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ dpcd_lt_buffer,
+ size_in_bytes + sizeof(dpcd_pattern.raw));
+}
+
+void start_clock_recovery_pattern_early(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
+{
+ DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
+ __func__);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
+ udelay(400);
+}
+
+void dp_set_hw_test_pattern(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum dp_test_pattern test_pattern,
+ uint8_t *custom_pattern,
+ uint32_t custom_pattern_size)
+{
+ const struct link_hwss *link_hwss = get_link_hwss(link, link_res);
+ struct encoder_set_dp_phy_pattern_param pattern_param = {0};
+
+ pattern_param.dp_phy_pattern = test_pattern;
+ pattern_param.custom_pattern = custom_pattern;
+ pattern_param.custom_pattern_size = custom_pattern_size;
+ pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+
+ if (link_hwss->ext.set_dp_link_test_pattern)
+ link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param);
+}
+
+bool dp_set_hw_training_pattern(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset)
+{
+ enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
+ break;
+ case DP_128b_132b_TPS1:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE;
+ break;
+ case DP_128b_132b_TPS2:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE;
+ break;
+ default:
+ break;
+ }
+
+ dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0);
+
+ return true;
+}
+
+static bool perform_post_lt_adj_req_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ enum dc_lane_count lane_count =
+ lt_settings->link_settings.lane_count;
+
+ uint32_t adj_req_count;
+ uint32_t adj_req_timer;
+ bool req_drv_setting_changed;
+ uint32_t lane;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ req_drv_setting_changed = false;
+ for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
+ adj_req_count++) {
+
+ req_drv_setting_changed = false;
+
+ for (adj_req_timer = 0;
+ adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
+ adj_req_timer++) {
+
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ DPRX);
+
+ if (dpcd_lane_status_updated.bits.
+ POST_LT_ADJ_REQ_IN_PROGRESS == 0)
+ return true;
+
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status))
+ return false;
+
+ if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) ||
+ !dp_is_symbol_locked(lane_count, dpcd_lane_status) ||
+ !dp_is_interlane_aligned(dpcd_lane_status_updated))
+ return false;
+
+ for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
+
+ if (lt_settings->
+ dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET !=
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE ||
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET !=
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) {
+
+ req_drv_setting_changed = true;
+ break;
+ }
+ }
+
+ if (req_drv_setting_changed) {
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
+ dp_set_drive_settings(link,
+ link_res,
+ lt_settings);
+ break;
+ }
+
+ msleep(1);
+ }
+
+ if (!req_drv_setting_changed) {
+ DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n",
+ __func__);
+
+ ASSERT(0);
+ return true;
+ }
+ }
+ DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n",
+ __func__);
+
+ ASSERT(0);
+ return true;
+
+}
+
+static enum link_training_result dp_transition_to_video_idle(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ enum link_training_result status)
+{
+ union lane_count_set lane_count_set = {0};
+
+ /* 4. mainlink output idle pattern*/
+ dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+ /*
+ * 5. post training adjust if required
+ * If the upstream DPTX and downstream DPRX both support TPS4,
+ * TPS4 must be used instead of POST_LT_ADJ_REQ.
+ */
+ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
+ lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) {
+ /* delay 5ms after Main Link output idle pattern and then check
+ * DPCD 0202h.
+ */
+ if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
+ msleep(5);
+ status = dp_check_link_loss_status(link, lt_settings);
+ }
+ return status;
+ }
+
+ if (status == LINK_TRAINING_SUCCESS &&
+ perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false)
+ status = LINK_TRAINING_LQA_FAIL;
+
+ lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count;
+ lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+ core_link_write_dpcd(
+ link,
+ DP_LANE_COUNT_SET,
+ &lane_count_set.raw,
+ sizeof(lane_count_set));
+
+ return status;
+}
+
+enum link_training_result dp_perform_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings,
+ bool skip_video_pattern)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ struct link_training_settings lt_settings = {0};
+ enum dp_link_encoding encoding =
+ link_dp_get_encoding_format(link_settings);
+
+ /* decide training settings */
+ dp_decide_training_settings(
+ link,
+ link_settings,
+ &lt_settings);
+
+ override_training_settings(
+ link,
+ &link->preferred_training_settings,
+ &lt_settings);
+
+ /* reset previous training states */
+ dpcd_exit_training_mode(link, encoding);
+
+ /* configure link prior to entering training mode */
+ dpcd_configure_lttpr_mode(link, &lt_settings);
+ dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready);
+ dpcd_configure_channel_coding(link, &lt_settings);
+
+ /* enter training mode:
+ * Per DP specs starting from here, DPTX device shall not issue
+ * Non-LT AUX transactions inside training mode.
+ */
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING)
+ status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, &lt_settings);
+ else if (encoding == DP_8b_10b_ENCODING)
+ status = dp_perform_8b_10b_link_training(link, link_res, &lt_settings);
+ else if (encoding == DP_128b_132b_ENCODING)
+ status = dp_perform_128b_132b_link_training(link, link_res, &lt_settings);
+ else
+ ASSERT(0);
+
+ /* exit training mode */
+ dpcd_exit_training_mode(link, encoding);
+
+ /* switch to video idle */
+ if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
+ status = dp_transition_to_video_idle(link,
+ link_res,
+ &lt_settings,
+ status);
+
+ /* dump debug data */
+ dp_log_training_result(link, &lt_settings, status);
+ if (status != LINK_TRAINING_SUCCESS)
+ link->ctx->dc->debug_data.ltFailCount++;
+ return status;
+}
+
+bool perform_link_training_with_retries(
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern,
+ int attempts,
+ struct pipe_ctx *pipe_ctx,
+ enum signal_type signal,
+ bool do_fallback)
+{
+ int j;
+ uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ enum dp_panel_mode panel_mode = dp_get_panel_mode(link);
+ enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0;
+ struct dc_link_settings cur_link_settings = *link_setting;
+ struct dc_link_settings max_link_settings = *link_setting;
+ const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+ int fail_count = 0;
+ bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */
+ bool is_link_bw_min = /* RBR x 1 */
+ (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
+
+ dp_trace_commit_lt_init(link);
+
+
+ if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING)
+ /* We need to do this before the link training to ensure the idle
+ * pattern in SST mode will be sent right after the link training
+ */
+ link_hwss->setup_stream_encoder(pipe_ctx);
+
+ dp_trace_set_lt_start_timestamp(link, false);
+ j = 0;
+ while (j < attempts && fail_count < (attempts * 10)) {
+
+ DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d)\n",
+ __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ cur_link_settings.lane_count);
+
+ dp_enable_link_phy(
+ link,
+ &pipe_ctx->link_res,
+ signal,
+ pipe_ctx->clock_source->id,
+ &cur_link_settings);
+
+ if (stream->sink_patches.dppowerup_delay > 0) {
+ int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay;
+
+ msleep(delay_dp_power_up_in_ms);
+ }
+
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (panel_mode == DP_PANEL_MODE_EDP) {
+ struct cp_psp *cp_psp = &stream->ctx->cp_psp;
+
+ if (cp_psp && cp_psp->funcs.enable_assr) {
+ /* ASSR is bound to fail with unsigned PSP
+ * verstage used during devlopment phase.
+ * Report and continue with eDP panel mode to
+ * perform eDP link training with right settings
+ */
+ bool result;
+ result = cp_psp->funcs.enable_assr(cp_psp->handle, link);
+ }
+ }
+#endif
+
+ dp_set_panel_mode(link, panel_mode);
+
+ if (link->aux_access_disabled) {
+ dc_link_dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings);
+ return true;
+ } else {
+ /** @todo Consolidate USB4 DP and DPx.x training. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ status = dc_link_dpia_perform_link_training(
+ link,
+ &pipe_ctx->link_res,
+ &cur_link_settings,
+ skip_video_pattern);
+
+ /* Transmit idle pattern once training successful. */
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) {
+ dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+ // Update verified link settings to current one
+ // Because DPIA LT might fallback to lower link setting.
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ link->verified_link_cap.link_rate = link->cur_link_settings.link_rate;
+ link->verified_link_cap.lane_count = link->cur_link_settings.lane_count;
+ dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link);
+ }
+ }
+ } else {
+ status = dp_perform_link_training(
+ link,
+ &pipe_ctx->link_res,
+ &cur_link_settings,
+ skip_video_pattern);
+ }
+
+ dp_trace_lt_total_count_increment(link, false);
+ dp_trace_lt_result_update(link, status, false);
+ dp_trace_set_lt_end_timestamp(link, false);
+ if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low)
+ return true;
+ }
+
+ fail_count++;
+ dp_trace_lt_fail_count_update(link, fail_count, false);
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY) {
+ /* latest link training still fail or link training is aborted
+ * skip delay and keep PHY on
+ */
+ if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT))
+ break;
+ }
+
+ DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) : fail reason:(%d)\n",
+ __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ cur_link_settings.lane_count, status);
+
+ dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
+
+ /* Abort link training if failure due to sink being unplugged. */
+ if (status == LINK_TRAINING_ABORT) {
+ enum dc_connection_type type = dc_connection_none;
+
+ dc_link_detect_connection_type(link, &type);
+ if (type == dc_connection_none) {
+ DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__);
+ break;
+ }
+ }
+
+ /* Try to train again at original settings if:
+ * - not falling back between training attempts;
+ * - aborted previous attempt due to reasons other than sink unplug;
+ * - successfully trained but at a link rate lower than that required by stream;
+ * - reached minimum link bandwidth.
+ */
+ if (!do_fallback || (status == LINK_TRAINING_ABORT) ||
+ (status == LINK_TRAINING_SUCCESS && is_link_bw_low) ||
+ is_link_bw_min) {
+ j++;
+ cur_link_settings = *link_setting;
+ delay_between_attempts += LINK_TRAINING_RETRY_DELAY;
+ is_link_bw_low = false;
+ is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE);
+
+ } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */
+ uint32_t req_bw;
+ uint32_t link_bw;
+
+ decide_fallback_link_setting(link, &max_link_settings,
+ &cur_link_settings, status);
+ /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to
+ * minimum link bandwidth.
+ */
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
+ is_link_bw_low = (req_bw > link_bw);
+ is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE));
+
+ if (is_link_bw_low)
+ DC_LOG_WARNING(
+ "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
+ __func__, link->link_index, req_bw, link_bw);
+ }
+
+ msleep(delay_between_attempts);
+ }
+
+ return false;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
new file mode 100644
index 000000000000..a04948635369
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_TRAINING_H__
+#define __DC_LINK_DP_TRAINING_H__
+#include "link.h"
+
+bool perform_link_training_with_retries(
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern,
+ int attempts,
+ struct pipe_ctx *pipe_ctx,
+ enum signal_type signal,
+ bool do_fallback);
+
+enum link_training_result dp_perform_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_settings,
+ bool skip_video_pattern);
+
+bool dp_set_hw_training_pattern(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset);
+
+void dp_set_hw_test_pattern(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ enum dp_test_pattern test_pattern,
+ uint8_t *custom_pattern,
+ uint32_t custom_pattern_size);
+
+void dpcd_set_training_pattern(
+ struct dc_link *link,
+ enum dc_dp_training_pattern training_pattern);
+
+/* Write DPCD drive settings. */
+enum dc_status dpcd_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ uint32_t offset);
+
+/* Write DPCD link configuration data. */
+enum dc_status dpcd_set_link_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings);
+
+void dpcd_set_lt_pattern_and_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum dc_dp_training_pattern pattern,
+ uint32_t offset);
+
+/* Read training status and adjustment requests from DPCD. */
+enum dc_status dp_get_lane_status_and_lane_adjust(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting,
+ union lane_status ln_status[LANE_COUNT_DP_MAX],
+ union lane_align_status_updated *ln_align,
+ union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ uint32_t offset);
+
+enum dc_status dpcd_configure_lttpr_mode(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings);
+
+enum dc_status configure_lttpr_mode_transparent(struct dc_link *link);
+
+enum dc_status dpcd_configure_channel_coding(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings);
+
+void repeater_training_done(struct dc_link *link, uint32_t offset);
+
+void start_clock_recovery_pattern_early(struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset);
+
+void dp_decide_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ struct link_training_settings *lt_settings);
+
+void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+
+enum dc_dp_training_pattern decide_cr_training_pattern(
+ const struct dc_link_settings *link_settings);
+
+enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
+ const struct dc_link_settings *link_settings);
+
+void dp_get_lttpr_mode_override(struct dc_link *link,
+ enum lttpr_mode *override);
+
+void override_training_settings(
+ struct dc_link *link,
+ const struct dc_link_training_overrides *overrides,
+ struct link_training_settings *lt_settings);
+
+/* Check DPCD training status registers to detect link loss. */
+enum link_training_result dp_check_link_loss_status(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting);
+
+bool dp_is_cr_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status);
+
+bool dp_is_ch_eq_done(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status);
+bool dp_is_symbol_locked(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status);
+bool dp_is_interlane_aligned(union lane_align_status_updated align_status);
+
+bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset);
+
+bool dp_is_max_vs_reached(
+ const struct link_training_settings *lt_settings);
+
+uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings);
+
+enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
+ union lane_status *dpcd_lane_status);
+
+void dp_hw_to_dpcd_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+
+void dp_wait_for_training_aux_rd_interval(
+ struct dc_link *link,
+ uint32_t wait_in_micro_secs);
+
+enum dpcd_training_patterns
+ dp_training_pattern_to_dpcd_training_pattern(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern);
+
+uint8_t dp_initialize_scrambling_data_symbols(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern);
+
+void dp_log_training_result(
+ struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ enum link_training_result status);
+
+uint32_t dp_translate_training_aux_read_interval(
+ uint32_t dpcd_aux_read_interval);
+
+uint8_t dp_get_nibble_at_index(const uint8_t *buf,
+ uint32_t index);
+#endif /* __DC_LINK_DP_TRAINING_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
new file mode 100644
index 000000000000..23d380f09a21
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements dp 128b/132b link training software policies and
+ * sequences.
+ */
+#include "link_dp_training_128b_132b.h"
+#include "link_dp_training_8b_10b.h"
+#include "link_dpcd.h"
+#include "link_dp_phy.h"
+#include "link_dp_capability.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+static enum dc_status dpcd_128b_132b_set_lane_settings(
+ struct dc_link *link,
+ const struct link_training_settings *link_training_setting)
+{
+ enum dc_status status = core_link_write_dpcd(link,
+ DP_TRAINING_LANE0_SET,
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
+ sizeof(link_training_setting->dpcd_lane_settings));
+
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ DP_TRAINING_LANE0_SET,
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ return status;
+}
+
+static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,
+ uint32_t *interval_in_us)
+{
+ union dp_128b_132b_training_aux_rd_interval dpcd_interval;
+ uint32_t interval_unit = 0;
+
+ dpcd_interval.raw = 0;
+ core_link_read_dpcd(link, DP_128B132B_TRAINING_AUX_RD_INTERVAL,
+ &dpcd_interval.raw, sizeof(dpcd_interval.raw));
+ interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */
+ /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) *
+ * INTERVAL_UNIT. The maximum is 256 ms
+ */
+ *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000;
+}
+
+static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ uint8_t loop_count;
+ uint32_t aux_rd_interval = 0;
+ uint32_t wait_time = 0;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+ enum dc_status status = DC_OK;
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+
+ /* Transmit 128b/132b_TPS1 over Main-Link */
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX);
+
+ /* Set TRAINING_PATTERN_SET to 01h */
+ dpcd_set_training_pattern(link, lt_settings->pattern_for_cr);
+
+ /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */
+ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX);
+
+ /* Set loop counter to start from 1 */
+ loop_count = 1;
+
+ /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */
+ dpcd_set_lt_pattern_and_lane_settings(link, lt_settings,
+ lt_settings->pattern_for_eq, DPRX);
+
+ /* poll for channel EQ done */
+ while (result == LINK_TRAINING_SUCCESS) {
+ dp_wait_for_training_aux_rd_interval(link, aux_rd_interval);
+ wait_time += aux_rd_interval;
+ status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
+ dpcd_lane_status)) {
+ /* pass */
+ break;
+ } else if (loop_count >= lt_settings->eq_loop_count_limit) {
+ result = DP_128b_132b_MAX_LOOP_COUNT_REACHED;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ result = DP_128b_132b_LT_FAILED;
+ } else {
+ dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
+ dpcd_128b_132b_set_lane_settings(link, lt_settings);
+ }
+ loop_count++;
+ }
+
+ /* poll for EQ interlane align done */
+ while (result == LINK_TRAINING_SUCCESS) {
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) {
+ /* pass */
+ break;
+ } else if (wait_time >= lt_settings->eq_wait_time_limit) {
+ result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ result = DP_128b_132b_LT_FAILED;
+ } else {
+ dp_wait_for_training_aux_rd_interval(link,
+ lt_settings->eq_pattern_time);
+ wait_time += lt_settings->eq_pattern_time;
+ status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ }
+ }
+
+ return result;
+}
+
+static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ /* Assumption: assume hardware has transmitted eq pattern */
+ enum dc_status status = DC_OK;
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+ uint32_t wait_time = 0;
+
+ /* initiate CDS done sequence */
+ dpcd_set_training_pattern(link, lt_settings->pattern_for_cds);
+
+ /* poll for CDS interlane align done and symbol lock */
+ while (result == LINK_TRAINING_SUCCESS) {
+ dp_wait_for_training_aux_rd_interval(link,
+ lt_settings->cds_pattern_time);
+ wait_time += lt_settings->cds_pattern_time;
+ status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
+ dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) {
+ /* pass */
+ break;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ result = DP_128b_132b_LT_FAILED;
+ } else if (wait_time >= lt_settings->cds_wait_time_limit) {
+ result = DP_128b_132b_CDS_DONE_TIMEOUT;
+ }
+ }
+
+ return result;
+}
+
+enum link_training_result dp_perform_128b_132b_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+
+ /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */
+ if (link->dc->debug.legacy_dp2_lt) {
+ struct link_training_settings legacy_settings;
+
+ decide_8b_10b_training_settings(link,
+ &lt_settings->link_settings,
+ &legacy_settings);
+ return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings);
+ }
+
+ dpcd_set_link_settings(link, lt_settings);
+
+ if (result == LINK_TRAINING_SUCCESS)
+ result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings);
+
+ if (result == LINK_TRAINING_SUCCESS)
+ result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings);
+
+ return result;
+}
+
+void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ struct link_training_settings *lt_settings)
+{
+ memset(lt_settings, 0, sizeof(*lt_settings));
+
+ lt_settings->link_settings = *link_settings;
+ /* TODO: should decide link spread when populating link_settings */
+ lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED :
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+ lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings);
+ lt_settings->eq_pattern_time = 2500;
+ lt_settings->eq_wait_time_limit = 400000;
+ lt_settings->eq_loop_count_limit = 20;
+ lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS;
+ lt_settings->cds_pattern_time = 2500;
+ lt_settings->cds_wait_time_limit = (dp_parse_lttpr_repeater_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
+ lt_settings->disallow_per_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link);
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+}
+
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link)
+{
+ enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR;
+
+ if (dp_is_lttpr_present(link))
+ mode = LTTPR_MODE_NON_TRANSPARENT;
+
+ DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode);
+ return mode;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
new file mode 100644
index 000000000000..2147f24efc8b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_TRAINING_128B_132B_H__
+#define __DC_LINK_DP_TRAINING_128B_132B_H__
+#include "link_dp_training.h"
+
+enum link_training_result dp_perform_128b_132b_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+
+void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ struct link_training_settings *lt_settings);
+
+enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link);
+
+#endif /* __DC_LINK_DP_TRAINING_128B_132B_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
new file mode 100644
index 000000000000..14b98e096d39
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements dp 8b/10b link training software policies and
+ * sequences.
+ */
+#include "link_dp_training_8b_10b.h"
+#include "link_dpcd.h"
+#include "link_dp_phy.h"
+#include "link_dp_capability.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+static int32_t get_cr_training_aux_rd_interval(struct dc_link *link,
+ const struct dc_link_settings *link_settings)
+{
+ union training_aux_rd_interval training_rd_interval;
+ uint32_t wait_in_micro_secs = 100;
+
+ memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+ if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ }
+ return wait_in_micro_secs;
+}
+
+static uint32_t get_eq_training_aux_rd_interval(
+ struct dc_link *link,
+ const struct dc_link_settings *link_settings)
+{
+ union training_aux_rd_interval training_rd_interval;
+
+ memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+ if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ core_link_read_dpcd(
+ link,
+ DP_128B132B_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ }
+
+ switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) {
+ case 0: return 400;
+ case 1: return 4000;
+ case 2: return 8000;
+ case 3: return 12000;
+ case 4: return 16000;
+ case 5: return 32000;
+ case 6: return 64000;
+ default: return 400;
+ }
+}
+
+void decide_8b_10b_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ struct link_training_settings *lt_settings)
+{
+ memset(lt_settings, '\0', sizeof(struct link_training_settings));
+
+ /* Initialize link settings */
+ lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set;
+ lt_settings->link_settings.link_rate_set = link_setting->link_rate_set;
+ lt_settings->link_settings.link_rate = link_setting->link_rate;
+ lt_settings->link_settings.lane_count = link_setting->lane_count;
+ /* TODO hard coded to SS for now
+ * lt_settings.link_settings.link_spread =
+ * dal_display_path_is_ss_supported(
+ * path_mode->display_path) ?
+ * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
+ * LINK_SPREAD_DISABLED;
+ */
+ lt_settings->link_settings.link_spread = link->dp_ss_off ?
+ LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+ lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
+ lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting);
+ lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
+ lt_settings->enhanced_framing = 1;
+ lt_settings->should_set_fec_ready = true;
+ lt_settings->disallow_per_lane_settings = true;
+ lt_settings->always_match_dpcd_with_hw_lane_settings = true;
+ lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link);
+ dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+}
+
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link)
+{
+ bool is_lttpr_present = dp_is_lttpr_present(link);
+ bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable;
+ bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware;
+
+ if (!is_lttpr_present)
+ return LTTPR_MODE_NON_LTTPR;
+
+ if (vbios_lttpr_aware) {
+ if (vbios_lttpr_force_non_transparent) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
+ return LTTPR_MODE_NON_TRANSPARENT;
+ } else {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n");
+ return LTTPR_MODE_TRANSPARENT;
+ }
+ }
+
+ if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
+ link->dc->caps.extended_aux_timeout_support) {
+ DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n");
+ return LTTPR_MODE_NON_TRANSPARENT;
+ }
+
+ DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n");
+ return LTTPR_MODE_NON_LTTPR;
+}
+
+enum link_training_result perform_8b_10b_clock_recovery_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
+{
+ uint32_t retries_cr;
+ uint32_t retry_count;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ retries_cr = 0;
+ retry_count = 0;
+
+ memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+ memset(&dpcd_lane_status_updated, '\0',
+ sizeof(dpcd_lane_status_updated));
+
+ if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset);
+
+ /* najeeb - The synaptics MST hub can put the LT in
+ * infinite loop by switching the VS
+ */
+ /* between level 0 and level 1 continuously, here
+ * we try for CR lock for LinkTrainingMaxCRRetry count*/
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+
+ /* 1. call HWSS to set lane settings*/
+ dp_set_hw_lane_settings(
+ link,
+ link_res,
+ lt_settings,
+ offset);
+
+ /* 2. update DPCD of the receiver*/
+ if (!retry_count)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration.*/
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ lt_settings->pattern_for_cr,
+ offset);
+ else
+ dpcd_set_lane_settings(
+ link,
+ lt_settings,
+ offset);
+
+ /* 3. wait receiver to lock-on*/
+ wait_time_microsec = lt_settings->cr_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested drive
+ * settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ offset);
+
+ /* 5. check CR done*/
+ if (dp_is_cr_done(lane_count, dpcd_lane_status))
+ return LINK_TRAINING_SUCCESS;
+
+ /* 6. max VS reached*/
+ if ((link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) &&
+ dp_is_max_vs_reached(lt_settings))
+ break;
+
+ /* 7. same lane settings*/
+ /* Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient*/
+ if ((link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) &&
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ retries_cr++;
+ else if ((link_dp_get_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) &&
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE ==
+ dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE)
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* 8. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ retry_count++;
+ }
+
+ if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+ ASSERT(0);
+ DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
+ __func__,
+ LINK_TRAINING_MAX_CR_RETRY);
+
+ }
+
+ return dp_get_cr_failure(lane_count, dpcd_lane_status);
+}
+
+enum link_training_result perform_8b_10b_channel_equalization_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
+{
+ enum dc_dp_training_pattern tr_pattern;
+ uint32_t retries_ch_eq;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ /* Note: also check that TPS4 is a supported feature*/
+ tr_pattern = lt_settings->pattern_for_eq;
+
+ if (is_repeater(lt_settings, offset) && link_dp_get_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
+ tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+
+ dp_set_hw_training_pattern(link, link_res, tr_pattern, offset);
+
+ for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+ retries_ch_eq++) {
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, offset);
+
+ /* 2. update DPCD*/
+ if (!retries_ch_eq)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration
+ */
+
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ tr_pattern, offset);
+ else
+ dpcd_set_lane_settings(link, lt_settings, offset);
+
+ /* 3. wait for receiver to lock-on*/
+ wait_time_microsec = lt_settings->eq_pattern_time;
+
+ if (is_repeater(lt_settings, offset))
+ wait_time_microsec =
+ dp_translate_training_aux_read_interval(
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested
+ * drive settings as set by the sink*/
+
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ offset);
+
+ /* 5. check CR done*/
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status))
+ return dpcd_lane_status[0].bits.CR_DONE_0 ?
+ LINK_TRAINING_EQ_FAIL_CR_PARTIAL :
+ LINK_TRAINING_EQ_FAIL_CR;
+
+ /* 6. check CHEQ done*/
+ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
+ dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated))
+ return LINK_TRAINING_SUCCESS;
+
+ /* 7. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+
+ return LINK_TRAINING_EQ_FAIL_EQ;
+
+}
+
+enum link_training_result dp_perform_8b_10b_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+
+ uint8_t repeater_cnt;
+ uint8_t repeater_id;
+ uint8_t lane = 0;
+
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
+
+ /* 1. set link rate, lane count and spread. */
+ dpcd_set_link_settings(link, lt_settings);
+
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+
+ /* 2. perform link training (set link training done
+ * to false is done as well)
+ */
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
+ repeater_id--) {
+ status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS) {
+ repeater_training_done(link, repeater_id);
+ break;
+ }
+
+ status = perform_8b_10b_channel_equalization_sequence(link,
+ link_res,
+ lt_settings,
+ repeater_id);
+
+ repeater_training_done(link, repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS)
+ break;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lt_settings->dpcd_lane_settings[lane].raw = 0;
+ lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0;
+ lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0;
+ }
+ }
+ }
+
+ if (status == LINK_TRAINING_SUCCESS) {
+ status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
+ if (status == LINK_TRAINING_SUCCESS) {
+ status = perform_8b_10b_channel_equalization_sequence(link,
+ link_res,
+ lt_settings,
+ DPRX);
+ }
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
new file mode 100644
index 000000000000..d26de15ce954
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_TRAINING_8B_10B_H__
+#define __DC_LINK_DP_TRAINING_8B_10B_H__
+#include "link_dp_training.h"
+
+/* to avoid infinite loop where-in the receiver
+ * switches between different VS
+ */
+#define LINK_TRAINING_MAX_CR_RETRY 100
+#define LINK_TRAINING_MAX_RETRY_COUNT 5
+
+enum link_training_result dp_perform_8b_10b_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+
+enum link_training_result perform_8b_10b_clock_recovery_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset);
+
+enum link_training_result perform_8b_10b_channel_equalization_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings,
+ uint32_t offset);
+
+enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link);
+
+void decide_8b_10b_training_settings(
+ struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ struct link_training_settings *lt_settings);
+
+#endif /* __DC_LINK_DP_TRAINING_8B_10B_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
new file mode 100644
index 000000000000..e50ec5012559
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ *
+ */
+#include "link_dp_training_auxless.h"
+#include "link_dp_phy.h"
+#define DC_LOGGER \
+ link->ctx->logger
+bool dc_link_dp_perform_link_training_skip_aux(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_setting)
+{
+ struct link_training_settings lt_settings = {0};
+
+ dp_decide_training_settings(
+ link,
+ link_setting,
+ &lt_settings);
+ override_training_settings(
+ link,
+ &link->preferred_training_settings,
+ &lt_settings);
+
+ /* 1. Perform_clock_recovery_sequence. */
+
+ /* transmit training pattern for clock recovery */
+ dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX);
+
+ /* call HWSS to set lane settings*/
+ dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
+
+ /* wait receiver to lock-on*/
+ dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
+
+ /* 2. Perform_channel_equalization_sequence. */
+
+ /* transmit training pattern for channel equalization. */
+ dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX);
+
+ /* call HWSS to set lane settings*/
+ dp_set_hw_lane_settings(link, link_res, &lt_settings, DPRX);
+
+ /* wait receiver to lock-on. */
+ dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
+
+ /* 3. Perform_link_training_int. */
+
+ /* Mainlink output idle pattern. */
+ dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
+
+ dp_log_training_result(link, &lt_settings, LINK_TRAINING_SUCCESS);
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h
new file mode 100644
index 000000000000..413999cd03c4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_TRAINING_AUXLESS_H__
+#define __DC_LINK_DP_TRAINING_AUXLESS_H__
+#include "link_dp_training.h"
+
+bool dc_link_dp_perform_link_training_skip_aux(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_setting);
+#endif /* __DC_LINK_DP_TRAINING_AUXLESS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index d130d58ac08e..e60da0532c53 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -1,6 +1,5 @@
-// SPDX-License-Identifier: MIT
/*
- * Copyright 2021 Advanced Micro Devices, Inc.
+ * Copyright 2022 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -24,76 +23,72 @@
*
*/
+/* FILE POLICY AND INTENDED USAGE:
+ * This module implements functionality for training DPIA links.
+ */
+#include "link_dp_training_dpia.h"
#include "dc.h"
-#include "dc_link_dpia.h"
#include "inc/core_status.h"
#include "dc_link.h"
-#include "dc_link_dp.h"
#include "dpcd_defs.h"
+
+#include "link_dp_dpia.h"
#include "link_hwss.h"
#include "dm_helpers.h"
#include "dmub/inc/dmub_cmd.h"
-#include "inc/link_dpcd.h"
+#include "link_dpcd.h"
+#include "link_dp_phy.h"
+#include "link_dp_training_8b_10b.h"
+#include "link_dp_capability.h"
#include "dc_dmub_srv.h"
-
#define DC_LOGGER \
link->ctx->logger
-enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
-{
- enum dc_status status = DC_OK;
- uint8_t dpcd_dp_tun_data[3] = {0};
- uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0};
- uint8_t i = 0;
-
- status = core_link_read_dpcd(link,
- DP_TUNNELING_CAPABILITIES_SUPPORT,
- dpcd_dp_tun_data,
- sizeof(dpcd_dp_tun_data));
-
- status = core_link_read_dpcd(link,
- DP_USB4_ROUTER_TOPOLOGY_ID,
- dpcd_topology_data,
- sizeof(dpcd_topology_data));
-
- link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
- dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT -
- DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
- dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
- link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
- dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
-
- for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
- link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
-
- return status;
-}
-
-bool dc_link_dpia_query_hpd_status(struct dc_link *link)
-{
- union dmub_rb_cmd cmd = {0};
- struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
- bool is_hpd_high = false;
-
- /* prepare QUERY_HPD command */
- cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
- cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
- cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
-
- /* Return HPD status reported by DMUB if query successfully executed. */
- if (dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd) && cmd.query_hpd.data.status == AUX_RET_SUCCESS)
- is_hpd_high = cmd.query_hpd.data.result;
+/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */
+#define DPIA_CLK_SYNC_DELAY 16000
+
+/* Extend interval between training status checks for manual testing. */
+#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000
+
+#define TRAINING_AUX_RD_INTERVAL 100 //us
+
+/* SET_CONFIG message types sent by driver. */
+enum dpia_set_config_type {
+ DPIA_SET_CFG_SET_LINK = 0x01,
+ DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05,
+ DPIA_SET_CFG_SET_TRAINING = 0x18,
+ DPIA_SET_CFG_SET_VSPE = 0x19
+};
+
+/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */
+enum dpia_set_config_ts {
+ DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */
+ DPIA_TS_TPS1 = 0x01,
+ DPIA_TS_TPS2 = 0x02,
+ DPIA_TS_TPS3 = 0x03,
+ DPIA_TS_TPS4 = 0x07,
+ DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */
+};
+
+/* SET_CONFIG message data associated with messages sent by driver. */
+union dpia_set_config_data {
+ struct {
+ uint8_t mode : 1;
+ uint8_t reserved : 7;
+ } set_link;
+ struct {
+ uint8_t stage;
+ } set_training;
+ struct {
+ uint8_t swing : 2;
+ uint8_t max_swing_reached : 1;
+ uint8_t pre_emph : 2;
+ uint8_t max_pre_emph_reached : 1;
+ uint8_t reserved : 2;
+ } set_vspe;
+ uint8_t raw;
+};
- DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
- __func__,
- link->link_index,
- link->link_id.enum_id - ENUM_ID_1,
- cmd.query_hpd.data.status,
- cmd.query_hpd.data.result);
-
- return is_hpd_high;
-}
/* Configure link as prescribed in link_setting; set LTTPR mode; and
* Initialize link training settings.
@@ -113,11 +108,12 @@ static enum link_training_result dpia_configure_link(
bool fec_enable;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n",
- __func__,
- link->link_id.enum_id - ENUM_ID_1,
- lt_settings->lttpr_mode);
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ lt_settings->lttpr_mode);
- dp_decide_training_settings(link,
+ dp_decide_training_settings(
+ link,
link_setting,
lt_settings);
@@ -137,7 +133,7 @@ static enum link_training_result dpia_configure_link(
if (status != DC_OK && link->is_hpd_pending)
return LINK_TRAINING_ABORT;
- if (link->preferred_training_settings.fec_enable)
+ if (link->preferred_training_settings.fec_enable != NULL)
fec_enable = *link->preferred_training_settings.fec_enable;
else
fec_enable = true;
@@ -148,7 +144,8 @@ static enum link_training_result dpia_configure_link(
return LINK_TRAINING_SUCCESS;
}
-static enum dc_status core_link_send_set_config(struct dc_link *link,
+static enum dc_status core_link_send_set_config(
+ struct dc_link *link,
uint8_t msg_type,
uint8_t msg_data)
{
@@ -160,8 +157,8 @@ static enum dc_status core_link_send_set_config(struct dc_link *link,
payload.msg_data = msg_data;
if (!link->ddc->ddc_pin && !link->aux_access_disabled &&
- (dm_helpers_dmub_set_config_sync(link->ctx, link,
- &payload, &set_config_result) == -1)) {
+ (dm_helpers_dmub_set_config_sync(link->ctx,
+ link, &payload, &set_config_result) == -1)) {
return DC_ERROR_UNEXPECTED;
}
@@ -170,7 +167,8 @@ static enum dc_status core_link_send_set_config(struct dc_link *link,
}
/* Build SET_CONFIG message data payload for specified message type. */
-static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,
+static uint8_t dpia_build_set_config_data(
+ enum dpia_set_config_type type,
struct dc_link *link,
struct link_training_settings *lt_settings)
{
@@ -189,11 +187,9 @@ static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,
data.set_vspe.swing = lt_settings->hw_lane_settings[0].VOLTAGE_SWING;
data.set_vspe.pre_emph = lt_settings->hw_lane_settings[0].PRE_EMPHASIS;
data.set_vspe.max_swing_reached =
- lt_settings->hw_lane_settings[0].VOLTAGE_SWING ==
- VOLTAGE_SWING_MAX_LEVEL ? 1 : 0;
+ lt_settings->hw_lane_settings[0].VOLTAGE_SWING == VOLTAGE_SWING_MAX_LEVEL ? 1 : 0;
data.set_vspe.max_pre_emph_reached =
- lt_settings->hw_lane_settings[0].PRE_EMPHASIS ==
- PRE_EMPHASIS_MAX_LEVEL ? 1 : 0;
+ lt_settings->hw_lane_settings[0].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0;
break;
default:
ASSERT(false); /* Message type not supported by helper function. */
@@ -235,7 +231,8 @@ static enum dc_status convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern t
}
/* Write training pattern to DPCD. */
-static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,
+static enum dc_status dpcd_set_lt_pattern(
+ struct dc_link *link,
enum dc_dp_training_pattern pattern,
uint32_t hop)
{
@@ -249,28 +246,29 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,
/* DpcdAddress_TrainingPatternSet */
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
- dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
+ dp_training_pattern_to_dpcd_training_pattern(link, pattern);
dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
- dc_dp_initialize_scrambling_data_symbols(link, pattern);
+ dp_initialize_scrambling_data_symbols(link, pattern);
if (hop != DPRX) {
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
- __func__,
- hop,
- dpcd_tps_offset,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ __func__,
+ hop,
+ dpcd_tps_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
} else {
DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
- __func__,
- dpcd_tps_offset,
- dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ __func__,
+ dpcd_tps_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
- status = core_link_write_dpcd(link,
- dpcd_tps_offset,
- &dpcd_pattern.raw,
- sizeof(dpcd_pattern.raw));
+ status = core_link_write_dpcd(
+ link,
+ dpcd_tps_offset,
+ &dpcd_pattern.raw,
+ sizeof(dpcd_pattern.raw));
return status;
}
@@ -284,7 +282,7 @@ static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,
*
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_cr_non_transparent(
struct dc_link *link,
@@ -297,8 +295,7 @@ static enum link_training_result dpia_training_cr_non_transparent(
enum dc_status status;
uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */
uint32_t retry_count = 0;
- /* From DP spec, CR read interval is always 100us. */
- uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
+ uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
union lane_align_status_updated dpcd_lane_status_updated = {0};
@@ -306,7 +303,7 @@ static enum link_training_result dpia_training_cr_non_transparent(
uint8_t set_cfg_data;
enum dpia_set_config_ts ts;
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
/* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery.
* Fix inherited from perform_clock_recovery_sequence() -
@@ -316,17 +313,20 @@ static enum link_training_result dpia_training_cr_non_transparent(
* continuously.
*/
while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
/* DPTX-to-DPIA */
if (hop == repeater_cnt) {
/* Send SET_CONFIG(SET_LINK:LC,LR,LTTPR) to notify DPOA that
* non-transparent link training has started.
* This also enables the transmission of clk_sync packets.
*/
- set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_LINK,
+ set_cfg_data = dpia_build_set_config_data(
+ DPIA_SET_CFG_SET_LINK,
link,
lt_settings);
- status = core_link_send_set_config(link,
+ status = core_link_send_set_config(
+ link,
DPIA_SET_CFG_SET_LINK,
set_cfg_data);
/* CR for this hop is considered successful as long as
@@ -347,6 +347,14 @@ static enum link_training_result dpia_training_cr_non_transparent(
result = LINK_TRAINING_ABORT;
break;
}
+ status = core_link_send_set_config(
+ link,
+ DPIA_SET_CFG_SET_TRAINING,
+ ts);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, hop);
if (status != DC_OK) {
result = LINK_TRAINING_ABORT;
@@ -358,10 +366,12 @@ static enum link_training_result dpia_training_cr_non_transparent(
* drive settings for hops immediately downstream.
*/
if (hop == repeater_cnt - 1) {
- set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE,
+ set_cfg_data = dpia_build_set_config_data(
+ DPIA_SET_CFG_SET_VSPE,
link,
lt_settings);
- status = core_link_send_set_config(link,
+ status = core_link_send_set_config(
+ link,
DPIA_SET_CFG_SET_VSPE,
set_cfg_data);
if (status != DC_OK) {
@@ -468,7 +478,8 @@ static enum link_training_result dpia_training_cr_transparent(
* continuously.
*/
while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
- (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
/* Write TPS1 (not VS or PE) to DPCD to start CR phase.
* DPIA sends SET_CONFIG(SET_LINK) to notify DPOA to
* start link training.
@@ -529,8 +540,7 @@ static enum link_training_result dpia_training_cr_transparent(
if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
- DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
- " -hop(%d)\n - result(%d)\n - retries(%d)\n",
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
DPRX,
@@ -545,7 +555,7 @@ static enum link_training_result dpia_training_cr_transparent(
*
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_cr_phase(
struct dc_link *link,
@@ -564,7 +574,8 @@ static enum link_training_result dpia_training_cr_phase(
}
/* Return status read interval during equalization phase. */
-static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,
+static uint32_t dpia_get_eq_aux_rd_interval(
+ const struct dc_link *link,
const struct link_training_settings *lt_settings,
uint32_t hop)
{
@@ -590,12 +601,11 @@ static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,
* - TPSx is transmitted for any hops downstream of DPOA.
* - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA.
* - EQ for the first hop (DPTX-to-DPIA) is assumed to be successful.
- * - DPRX EQ only reported successful when both DPRX and DPIA requirements
- * (clk sync packets sent) fulfilled.
+ * - DPRX EQ only reported successful when both DPRX and DPIA requirements (clk sync packets sent) fulfilled.
*
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_eq_non_transparent(
struct dc_link *link,
@@ -624,9 +634,10 @@ static enum link_training_result dpia_training_eq_non_transparent(
else
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) {
+
/* DPTX-to-DPIA equalization always successful. */
if (hop == repeater_cnt) {
result = LINK_TRAINING_SUCCESS;
@@ -640,7 +651,8 @@ static enum link_training_result dpia_training_eq_non_transparent(
result = LINK_TRAINING_ABORT;
break;
}
- status = core_link_send_set_config(link,
+ status = core_link_send_set_config(
+ link,
DPIA_SET_CFG_SET_TRAINING,
ts);
if (status != DC_OK) {
@@ -658,12 +670,14 @@ static enum link_training_result dpia_training_eq_non_transparent(
* drive settings for hop immediately downstream.
*/
if (hop == repeater_cnt - 1) {
- set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE,
- link,
- lt_settings);
- status = core_link_send_set_config(link,
- DPIA_SET_CFG_SET_VSPE,
- set_cfg_data);
+ set_cfg_data = dpia_build_set_config_data(
+ DPIA_SET_CFG_SET_VSPE,
+ link,
+ lt_settings);
+ status = core_link_send_set_config(
+ link,
+ DPIA_SET_CFG_SET_VSPE,
+ set_cfg_data);
if (status != DC_OK) {
result = LINK_TRAINING_ABORT;
break;
@@ -679,7 +693,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
* ensure clock sync packets have been sent.
*/
if (hop == DPRX && retries_eq == 1)
- wait_time_microsec = max(wait_time_microsec, (uint32_t)DPIA_CLK_SYNC_DELAY);
+ wait_time_microsec = max(wait_time_microsec, (uint32_t) DPIA_CLK_SYNC_DELAY);
else
wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, hop);
@@ -705,8 +719,8 @@ static enum link_training_result dpia_training_eq_non_transparent(
}
if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
- dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) &&
- dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated)) {
result = LINK_TRAINING_SUCCESS;
break;
}
@@ -741,7 +755,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
*
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_eq_transparent(
struct dc_link *link,
@@ -761,6 +775,7 @@ static enum link_training_result dpia_training_eq_transparent(
wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX);
for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) {
+
if (retries_eq == 0) {
status = dpcd_set_lt_pattern(link, tr_pattern, DPRX);
if (status != DC_OK) {
@@ -810,8 +825,7 @@ static enum link_training_result dpia_training_eq_transparent(
if (link->is_hpd_pending)
result = LINK_TRAINING_ABORT;
- DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
- " - hop(%d)\n - result(%d)\n - retries(%d)\n",
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n",
__func__,
link->link_id.enum_id - ENUM_ID_1,
DPRX,
@@ -826,7 +840,7 @@ static enum link_training_result dpia_training_eq_transparent(
*
* @param link DPIA link being trained.
* @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
static enum link_training_result dpia_training_eq_phase(
struct dc_link *link,
@@ -845,7 +859,9 @@ static enum link_training_result dpia_training_eq_phase(
}
/* End training of specified hop in display path. */
-static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
+static enum dc_status dpcd_clear_lt_pattern(
+ struct dc_link *link,
+ uint32_t hop)
{
union dpcd_training_pattern dpcd_pattern = {0};
uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
@@ -855,7 +871,8 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1));
- status = core_link_write_dpcd(link,
+ status = core_link_write_dpcd(
+ link,
dpcd_tps_offset,
&dpcd_pattern.raw,
sizeof(dpcd_pattern.raw));
@@ -873,9 +890,10 @@ static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
* (DPTX-to-DPIA) and last hop (DPRX).
*
* @param link DPIA link being trained.
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
-static enum link_training_result dpia_training_end(struct dc_link *link,
+static enum link_training_result dpia_training_end(
+ struct dc_link *link,
struct link_training_settings *lt_settings,
uint32_t hop)
{
@@ -884,13 +902,15 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
enum dc_status status;
if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
if (hop == repeater_cnt) { /* DPTX-to-DPIA */
/* Send SET_CONFIG(SET_TRAINING:0xff) to notify DPOA that
* DPTX-to-DPIA hop trained. No DPCD write needed for first hop.
*/
- status = core_link_send_set_config(link,
+ status = core_link_send_set_config(
+ link,
DPIA_SET_CFG_SET_TRAINING,
DPIA_TS_UFP_DONE);
if (status != DC_OK)
@@ -904,7 +924,8 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
/* Notify DPOA that non-transparent link training of DPRX done. */
if (hop == DPRX && result != LINK_TRAINING_ABORT) {
- status = core_link_send_set_config(link,
+ status = core_link_send_set_config(
+ link,
DPIA_SET_CFG_SET_TRAINING,
DPIA_TS_DPRX_DONE);
if (status != DC_OK)
@@ -912,18 +933,20 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
}
} else { /* non-LTTPR or transparent LTTPR. */
+
/* Write 0x0 to TRAINING_PATTERN_SET */
status = dpcd_clear_lt_pattern(link, hop);
if (status != DC_OK)
result = LINK_TRAINING_ABORT;
+
}
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) end\n - hop(%d)\n - result(%d)\n - LTTPR mode(%d)\n",
- __func__,
- link->link_id.enum_id - ENUM_ID_1,
- hop,
- result,
- lt_settings->lttpr_mode);
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ hop,
+ result,
+ lt_settings->lttpr_mode);
return result;
}
@@ -933,20 +956,21 @@ static enum link_training_result dpia_training_end(struct dc_link *link,
* - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0.
*
* @param link DPIA link being trained.
- * @param hop The Hop in display path. DPRX = 0.
+ * @param hop Hop in display path. DPRX = 0.
*/
-static void dpia_training_abort(struct dc_link *link,
- struct link_training_settings *lt_settings,
- uint32_t hop)
+static void dpia_training_abort(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
{
uint8_t data = 0;
uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n",
- __func__,
- link->link_id.enum_id - ENUM_ID_1,
- lt_settings->lttpr_mode,
- link->is_hpd_pending);
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ lt_settings->lttpr_mode,
+ link->is_hpd_pending);
/* Abandon clean-up if sink unplugged. */
if (link->is_hpd_pending)
@@ -975,7 +999,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in
- lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings);
+ lt_settings.lttpr_mode = dc_link_decide_lttpr_mode(link, &link_settings);
/* Configure link as prescribed in link_setting and set LTTPR mode. */
result = dpia_configure_link(link, link_res, link_setting, &lt_settings);
@@ -983,7 +1007,7 @@ enum link_training_result dc_link_dpia_perform_link_training(
return result;
if (lt_settings.lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
- repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
/* Train each hop in turn starting with the one closest to DPTX.
* In transparent or non-LTTPR mode, train only the final hop (DPRX).
@@ -1014,10 +1038,10 @@ enum link_training_result dc_link_dpia_perform_link_training(
msleep(5);
if (!link->is_automated)
result = dp_check_link_loss_status(link, &lt_settings);
- } else if (result == LINK_TRAINING_ABORT) {
+ } else if (result == LINK_TRAINING_ABORT)
dpia_training_abort(link, &lt_settings, repeater_id);
- } else {
+ else
dpia_training_end(link, &lt_settings, repeater_id);
- }
+
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
new file mode 100644
index 000000000000..0150f2916421
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_TRAINING_DPIA_H__
+#define __DC_LINK_DP_TRAINING_DPIA_H__
+#include "link_dp_training.h"
+
+/* Train DP tunneling link for USB4 DPIA display endpoint.
+ * DPIA equivalent of dc_link_dp_perfrorm_link_training.
+ * Aborts link training upon detection of sink unplug.
+ */
+enum link_training_result dc_link_dpia_perform_link_training(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+#endif /* __DC_LINK_DP_TRAINING_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
new file mode 100644
index 000000000000..a4071d2959a0
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -0,0 +1,579 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements 8b/10b link training specially modified to support an
+ * embedded retimer chip. This retimer chip is referred as fixed vs pe retimer.
+ * Unlike native dp connection this chip requires a modified link training
+ * protocol based on 8b/10b link training. Since this is a non standard sequence
+ * and we must support this hardware, we decided to isolate it in its own
+ * training sequence inside its own file.
+ */
+#include "link_dp_training_fixed_vs_pe_retimer.h"
+#include "link_dp_training_8b_10b.h"
+#include "link_dpcd.h"
+#include "link_dp_phy.h"
+#include "link_dp_capability.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+void dp_fixed_vs_pe_read_lane_adjust(
+ struct dc_link *link,
+ union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX])
+{
+ const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63};
+ const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63};
+ const uint8_t offset = dp_parse_lttpr_repeater_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ uint32_t vendor_lttpr_read_address = 0xF0053;
+ uint8_t dprx_vs = 0;
+ uint8_t dprx_pe = 0;
+ uint8_t lane;
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ vendor_lttpr_read_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ }
+
+ /* W/A to read lane settings requested by DPRX */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_read_dpcd(
+ link,
+ vendor_lttpr_read_address,
+ &dprx_vs,
+ 1);
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+ core_link_read_dpcd(
+ link,
+ vendor_lttpr_read_address,
+ &dprx_pe,
+ 1);
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3;
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3;
+ }
+}
+
+
+void dp_fixed_vs_pe_set_retimer_lane_settings(
+ struct dc_link *link,
+ const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
+ uint8_t lane_count)
+{
+ const uint8_t offset = dp_parse_lttpr_repeater_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
+ uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+ uint8_t lane = 0;
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+ }
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Force LTTPR to output desired VS and PE */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_reset[0],
+ sizeof(vendor_lttpr_write_data_reset));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+}
+
+static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ uint8_t lane = 0;
+ uint8_t toggle_rate = 0x6;
+ uint8_t target_rate = 0x6;
+ bool apply_toggle_rate_wa = false;
+ uint8_t repeater_cnt;
+ uint8_t repeater_id;
+
+ /* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */
+ if (lt_settings->cr_pattern_time < 16000)
+ lt_settings->cr_pattern_time = 16000;
+
+ /* Fixed VS/PE specific: Toggle link rate */
+ apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate);
+ target_rate = get_dpcd_link_rate(&lt_settings->link_settings);
+ toggle_rate = (target_rate == 0x6) ? 0xA : 0x6;
+
+ if (apply_toggle_rate_wa)
+ lt_settings->link_settings.link_rate = toggle_rate;
+
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX);
+
+ /* 1. set link rate, lane count and spread. */
+ dpcd_set_link_settings(link, lt_settings);
+
+ /* Fixed VS/PE specific: Toggle link rate back*/
+ if (apply_toggle_rate_wa) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &target_rate,
+ 1);
+ }
+
+ link->vendor_specific_lttpr_link_rate_wa = target_rate;
+
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+
+ /* 2. perform link training (set link training done
+ * to false is done as well)
+ */
+ repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
+ repeater_id--) {
+ status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS) {
+ repeater_training_done(link, repeater_id);
+ break;
+ }
+
+ status = perform_8b_10b_channel_equalization_sequence(link,
+ link_res,
+ lt_settings,
+ repeater_id);
+
+ repeater_training_done(link, repeater_id);
+
+ if (status != LINK_TRAINING_SUCCESS)
+ break;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lt_settings->dpcd_lane_settings[lane].raw = 0;
+ lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0;
+ lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0;
+ }
+ }
+ }
+
+ if (status == LINK_TRAINING_SUCCESS) {
+ status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX);
+ if (status == LINK_TRAINING_SUCCESS) {
+ status = perform_8b_10b_channel_equalization_sequence(link,
+ link_res,
+ lt_settings,
+ DPRX);
+ }
+ }
+
+ return status;
+}
+
+
+enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings)
+{
+ const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF};
+ const uint8_t offset = dp_parse_lttpr_repeater_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+ const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
+ const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
+ uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
+ uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
+ uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
+ uint32_t vendor_lttpr_write_address = 0xF004F;
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ uint8_t lane = 0;
+ union down_spread_ctrl downspread = {0};
+ union lane_count_set lane_count_set = {0};
+ uint8_t toggle_rate;
+ uint8_t rate;
+
+ /* Only 8b/10b is supported */
+ ASSERT(link_dp_get_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING);
+
+ if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings);
+ return status;
+ }
+
+ if (offset != 0xFF) {
+ vendor_lttpr_write_address +=
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /* Certain display and cable configuration require extra delay */
+ if (offset > 2)
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
+ }
+
+ /* Vendor specific: Reset lane settings */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_reset[0],
+ sizeof(vendor_lttpr_write_data_reset));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ /* Vendor specific: Enable intercept */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_en[0],
+ sizeof(vendor_lttpr_write_data_intercept_en));
+
+ /* 1. set link rate, lane count and spread. */
+
+ downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread);
+
+ lane_count_set.bits.LANE_COUNT_SET =
+ lt_settings->link_settings.lane_count;
+
+ lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing;
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+
+ if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
+ lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+ link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+ }
+
+ core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+ &downspread.raw, sizeof(downspread));
+
+ core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, 1);
+
+ rate = get_dpcd_link_rate(&lt_settings->link_settings);
+
+ /* Vendor specific: Toggle link rate */
+ toggle_rate = (rate == 0x6) ? 0xA : 0x6;
+
+ if (link->vendor_specific_lttpr_link_rate_wa == rate) {
+ core_link_write_dpcd(
+ link,
+ DP_LINK_BW_SET,
+ &toggle_rate,
+ 1);
+ }
+
+ link->vendor_specific_lttpr_link_rate_wa = rate;
+
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ lt_settings->enhanced_framing,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+
+ /* 2. Perform link training */
+
+ /* Perform Clock Recovery Sequence */
+ if (status == LINK_TRAINING_SUCCESS) {
+ const uint8_t max_vendor_dpcd_retries = 10;
+ uint32_t retries_cr;
+ uint32_t retry_count;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+ enum dc_status dpcd_status = DC_OK;
+ uint8_t i = 0;
+
+ retries_cr = 0;
+ retry_count = 0;
+
+ memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+ memset(&dpcd_lane_status_updated, '\0',
+ sizeof(dpcd_lane_status_updated));
+
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+
+ /* 1. call HWSS to set lane settings */
+ dp_set_hw_lane_settings(
+ link,
+ link_res,
+ lt_settings,
+ 0);
+
+ /* 2. update DPCD of the receiver */
+ if (!retry_count) {
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration.
+ */
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ lt_settings->pattern_for_cr,
+ 0);
+ /* Vendor specific: Disable intercept */
+ for (i = 0; i < max_vendor_dpcd_retries; i++) {
+ msleep(pre_disable_intercept_delay_ms);
+ dpcd_status = core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_dis[0],
+ sizeof(vendor_lttpr_write_data_intercept_dis));
+
+ if (dpcd_status == DC_OK)
+ break;
+
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_en[0],
+ sizeof(vendor_lttpr_write_data_intercept_en));
+ }
+ } else {
+ vendor_lttpr_write_data_vs[3] = 0;
+ vendor_lttpr_write_data_pe[3] = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Vendor specific: Update VS and PE to DPRX requested value */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ dpcd_set_lane_settings(
+ link,
+ lt_settings,
+ 0);
+ }
+
+ /* 3. wait receiver to lock-on*/
+ wait_time_microsec = lt_settings->cr_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested drive
+ * settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ 0);
+
+ /* 5. check CR done*/
+ if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ status = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* 6. max VS reached*/
+ if (dp_is_max_vs_reached(lt_settings))
+ break;
+
+ /* 7. same lane settings */
+ /* Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient
+ */
+ if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* 8. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ retry_count++;
+ }
+
+ if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+ ASSERT(0);
+ DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue",
+ __func__,
+ LINK_TRAINING_MAX_CR_RETRY);
+
+ }
+
+ status = dp_get_cr_failure(lane_count, dpcd_lane_status);
+ }
+
+ /* Perform Channel EQ Sequence */
+ if (status == LINK_TRAINING_SUCCESS) {
+ enum dc_dp_training_pattern tr_pattern;
+ uint32_t retries_ch_eq;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ /* Note: also check that TPS4 is a supported feature*/
+ tr_pattern = lt_settings->pattern_for_eq;
+
+ dp_set_hw_training_pattern(link, link_res, tr_pattern, 0);
+
+ status = LINK_TRAINING_EQ_FAIL_EQ;
+
+ for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+ retries_ch_eq++) {
+
+ dp_set_hw_lane_settings(link, link_res, lt_settings, 0);
+
+ vendor_lttpr_write_data_vs[3] = 0;
+ vendor_lttpr_write_data_pe[3] = 0;
+
+ for (lane = 0; lane < lane_count; lane++) {
+ vendor_lttpr_write_data_vs[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane);
+ vendor_lttpr_write_data_pe[3] |=
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane);
+ }
+
+ /* Vendor specific: Update VS and PE to DPRX requested value */
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_vs[0],
+ sizeof(vendor_lttpr_write_data_vs));
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_pe[0],
+ sizeof(vendor_lttpr_write_data_pe));
+
+ /* 2. update DPCD*/
+ if (!retries_ch_eq)
+ /* EPR #361076 - write as a 5-byte burst,
+ * but only for the 1-st iteration
+ */
+
+ dpcd_set_lt_pattern_and_lane_settings(
+ link,
+ lt_settings,
+ tr_pattern, 0);
+ else
+ dpcd_set_lane_settings(link, lt_settings, 0);
+
+ /* 3. wait for receiver to lock-on*/
+ wait_time_microsec = lt_settings->eq_pattern_time;
+
+ dp_wait_for_training_aux_rd_interval(
+ link,
+ wait_time_microsec);
+
+ /* 4. Read lane status and requested
+ * drive settings as set by the sink
+ */
+ dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ 0);
+
+ /* 5. check CR done*/
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ status = LINK_TRAINING_EQ_FAIL_CR;
+ break;
+ }
+
+ /* 6. check CHEQ done*/
+ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
+ dp_is_symbol_locked(lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ status = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* 7. update VS/PE/PC2 in lt_settings*/
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+ }
+
+ return status;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
new file mode 100644
index 000000000000..e61970e27661
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__
+#define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__
+#include "link_dp_training.h"
+
+enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
+ struct dc_link *link,
+ const struct link_resource *link_res,
+ struct link_training_settings *lt_settings);
+
+void dp_fixed_vs_pe_set_retimer_lane_settings(
+ struct dc_link *link,
+ const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
+ uint8_t lane_count);
+
+void dp_fixed_vs_pe_read_lane_adjust(
+ struct dc_link *link,
+ union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]);
+
+#endif /* __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
index af110bf9470f..5c9a30211c10 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
@@ -23,11 +23,14 @@
*
*/
-#include <inc/core_status.h>
-#include <dc_link.h>
-#include <inc/link_hwss.h>
-#include <inc/link_dpcd.h>
-#include <dc_dp_types.h>
+/* FILE POLICY AND INTENDED USAGE:
+ *
+ * This file implements basic dpcd read/write functionality. It also does basic
+ * dpcd range check to ensure that every dpcd request is compliant with specs
+ * range requirements.
+ */
+
+#include "link_dpcd.h"
#include <drm/display/drm_dp_helper.h>
#include "dm_helpers.h"
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
index d561f86d503c..08d787a1e451 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_dpcd.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h
@@ -25,9 +25,8 @@
#ifndef __LINK_DPCD_H__
#define __LINK_DPCD_H__
-#include <inc/core_status.h>
-#include <dc_link.h>
-#include <dc_link_dp.h>
+#include "link.h"
+#include "dpcd_defs.h"
enum dc_status core_link_read_dpcd(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
new file mode 100644
index 000000000000..97e02b5b21ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -0,0 +1,833 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ * This file implements retrieval and configuration of eDP panel features such
+ * as PSR and ABM and it also manages specs defined eDP panel power sequences.
+ */
+
+#include "link_edp_panel_control.h"
+#include "link_dpcd.h"
+#include "link_dp_capability.h"
+#include "dm_helpers.h"
+#include "dal_asic_id.h"
+#include "dce/dmub_psr.h"
+#include "abm.h"
+#define DC_LOGGER_INIT(logger)
+
+void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
+{
+ union dpcd_edp_config edp_config_set;
+ bool panel_mode_edp = false;
+
+ memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
+
+ if (panel_mode != DP_PANEL_MODE_DEFAULT) {
+
+ switch (panel_mode) {
+ case DP_PANEL_MODE_EDP:
+ case DP_PANEL_MODE_SPECIAL:
+ panel_mode_edp = true;
+ break;
+
+ default:
+ break;
+ }
+
+ /*set edp panel mode in receiver*/
+ core_link_read_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ if (edp_config_set.bits.PANEL_MODE_EDP
+ != panel_mode_edp) {
+ enum dc_status result;
+
+ edp_config_set.bits.PANEL_MODE_EDP =
+ panel_mode_edp;
+ result = core_link_write_dpcd(
+ link,
+ DP_EDP_CONFIGURATION_SET,
+ &edp_config_set.raw,
+ sizeof(edp_config_set.raw));
+
+ ASSERT(result == DC_OK);
+ }
+ }
+ DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
+ "eDP panel mode enabled: %d \n",
+ link->link_index,
+ link->dpcd_caps.panel_mode_edp,
+ panel_mode_edp);
+}
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
+{
+ /* We need to explicitly check that connector
+ * is not DP. Some Travis_VGA get reported
+ * by video bios as DP.
+ */
+ if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
+
+ switch (link->dpcd_caps.branch_dev_id) {
+ case DP_BRANCH_DEVICE_ID_0022B9:
+ /* alternate scrambler reset is required for Travis
+ * for the case when external chip does not
+ * provide sink device id, alternate scrambler
+ * scheme will be overriden later by querying
+ * Encoder features
+ */
+ if (strncmp(
+ link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_2,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ case DP_BRANCH_DEVICE_ID_00001A:
+ /* alternate scrambler reset is required for Travis
+ * for the case when external chip does not provide
+ * sink device id, alternate scrambler scheme will
+ * be overriden later by querying Encoder feature
+ */
+ if (strncmp(link->dpcd_caps.branch_dev_name,
+ DP_VGA_LVDS_CONVERTER_ID_3,
+ sizeof(
+ link->dpcd_caps.
+ branch_dev_name)) == 0) {
+ return DP_PANEL_MODE_SPECIAL;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (link->dpcd_caps.panel_mode_edp &&
+ (link->connector_signal == SIGNAL_TYPE_EDP ||
+ (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->is_internal_display))) {
+ return DP_PANEL_MODE_EDP;
+ }
+
+ return DP_PANEL_MODE_DEFAULT;
+}
+
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms)
+{
+ struct dpcd_source_backlight_set dpcd_backlight_set;
+ uint8_t backlight_control = isHDR ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ // OLEDs have no PWM, they can only use AUX
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ backlight_control = 1;
+
+ *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
+ *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *)(&dpcd_backlight_set),
+ sizeof(dpcd_backlight_set)) != DC_OK)
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_control, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak)
+{
+ union dpcd_source_backlight_get dpcd_backlight_get;
+
+ memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get));
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
+ dpcd_backlight_get.raw,
+ sizeof(union dpcd_source_backlight_get)))
+ return false;
+
+ *backlight_millinits_avg =
+ dpcd_backlight_get.bytes.backlight_millinits_avg;
+ *backlight_millinits_peak =
+ dpcd_backlight_get.bytes.backlight_millinits_peak;
+
+ /* On non-supported panels dpcd_read usually succeeds with 0 returned */
+ if (*backlight_millinits_avg == 0 ||
+ *backlight_millinits_avg > *backlight_millinits_peak)
+ return false;
+
+ return true;
+}
+
+bool link_backlight_enable_aux(struct dc_link *link, bool enable)
+{
+ uint8_t backlight_enable = enable ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
+ &backlight_enable, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+// we read default from 0x320 because we expect BIOS wrote it there
+// regular get_backlight_nit reads from panel set at 0x326
+static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits)
+{
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *) backlight_millinits,
+ sizeof(uint32_t)))
+ return false;
+
+ return true;
+}
+
+bool set_default_brightness_aux(struct dc_link *link)
+{
+ uint32_t default_backlight;
+
+ if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
+ if (!read_default_bl_aux(link, &default_backlight))
+ default_backlight = 150000;
+ // if < 5 nits or > 5000, it might be wrong readback
+ if (default_backlight < 5000 || default_backlight > 5000000)
+ default_backlight = 150000; //
+
+ return dc_link_set_backlight_level_nits(link, true,
+ default_backlight, 0);
+ }
+ return false;
+}
+
+bool link_is_edp_ilr_optimization_required(struct dc_link *link,
+ struct dc_crtc_timing *crtc_timing)
+{
+ struct dc_link_settings link_setting;
+ uint8_t link_bw_set;
+ uint8_t link_rate_set;
+ uint32_t req_bw;
+ union lane_count_set lane_count_set = {0};
+
+ ASSERT(link || crtc_timing); // invalid input
+
+ if (link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+ !link->panel_config.ilr.optimize_edp_link_rate)
+ return false;
+
+
+ // Read DPCD 00100h to find if standard link rates are set
+ core_link_read_dpcd(link, DP_LINK_BW_SET,
+ &link_bw_set, sizeof(link_bw_set));
+
+ if (link_bw_set) {
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n");
+ return true;
+ }
+
+ // Read DPCD 00115h to find the edp link rate set used
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ // Read DPCD 00101h to find out the number of lanes currently set
+ core_link_read_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, sizeof(lane_count_set));
+
+ req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing);
+
+ if (!crtc_timing->flags.DSC)
+ dc_link_decide_edp_link_settings(link, &link_setting, req_bw);
+ else
+ decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN);
+
+ if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate ||
+ lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) {
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n");
+ return true;
+ }
+
+ DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n");
+ return false;
+}
+
+void dc_link_edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd)
+{
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ link->dc->hwss.edp_power_control(link, true);
+ if (wait_for_hpd)
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ if (link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control(link, true);
+}
+
+bool dc_link_wait_for_t12(struct dc_link *link)
+{
+ if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) {
+ link->dc->hwss.edp_wait_for_T12(link);
+
+ return true;
+ }
+
+ return false;
+}
+
+void link_edp_add_delay_for_T9(struct dc_link *link)
+{
+ if (link && link->panel_config.pps.extra_delay_backlight_off > 0)
+ udelay(link->panel_config.pps.extra_delay_backlight_off * 1000);
+}
+
+bool link_edp_receiver_ready_T9(struct dc_link *link)
+{
+ unsigned int tries = 0;
+ unsigned char sinkstatus = 0;
+ unsigned char edpRev = 0;
+ enum dc_status result = DC_OK;
+
+ result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
+
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ do {
+ sinkstatus = 1;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 0)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(100); //MAx T9
+ } while (++tries < 50);
+ }
+
+ return result;
+}
+
+bool link_edp_receiver_ready_T7(struct dc_link *link)
+{
+ unsigned char sinkstatus = 0;
+ unsigned char edpRev = 0;
+ enum dc_status result = DC_OK;
+
+ /* use absolute time stamp to constrain max T7*/
+ unsigned long long enter_timestamp = 0;
+ unsigned long long finish_timestamp = 0;
+ unsigned long long time_taken_in_ns = 0;
+
+ result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
+
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ enter_timestamp = dm_get_timestamp(link->ctx);
+ do {
+ sinkstatus = 0;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 1)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(25);
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+ } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+ }
+
+ if (link && link->panel_config.pps.extra_t7_ms > 0)
+ udelay(link->panel_config.pps.extra_t7_ms * 1000);
+
+ return result;
+}
+
+bool link_power_alpm_dpcd_enable(struct dc_link *link, bool enable)
+{
+ bool ret = false;
+ union dpcd_alpm_configuration alpm_config;
+
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ memset(&alpm_config, 0, sizeof(alpm_config));
+
+ alpm_config.bits.ENABLE = (enable ? true : false);
+ ret = dm_helpers_dp_write_dpcd(link->ctx, link,
+ DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw,
+ sizeof(alpm_config.raw));
+ }
+ return ret;
+}
+
+static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
+{
+ int i;
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+ }
+
+ return pipe_ctx;
+}
+
+bool dc_link_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
+{
+ struct dc *dc = link->ctx->dc;
+
+ DC_LOGGER_INIT(link->ctx->logger);
+ DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+ backlight_pwm_u16_16, backlight_pwm_u16_16);
+
+ if (dc_is_embedded_signal(link->connector_signal)) {
+ struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
+
+ if (pipe_ctx) {
+ /* Disable brightness ramping when the display is blanked
+ * as it can hang the DMCU
+ */
+ if (pipe_ctx->plane_state == NULL)
+ frame_ramp = 0;
+ } else {
+ return false;
+ }
+
+ dc->hwss.set_backlight_level(
+ pipe_ctx,
+ backlight_pwm_u16_16,
+ frame_ramp);
+ }
+ return true;
+}
+
+bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct dmub_psr *psr = dc->res_pool->psr;
+ unsigned int panel_inst;
+
+ if (psr == NULL && force_static)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) {
+ // Don't enter PSR if panel is not connected
+ return false;
+ }
+
+ /* Set power optimization flag */
+ if (power_opts && link->psr_settings.psr_power_opt != *power_opts) {
+ link->psr_settings.psr_power_opt = *power_opts;
+
+ if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt)
+ psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst);
+ }
+
+ if (psr != NULL && link->psr_settings.psr_feature_enabled &&
+ force_static && psr->funcs->psr_force_static)
+ psr->funcs->psr_force_static(psr, panel_inst);
+
+ /* Enable or Disable PSR */
+ if (allow_active && link->psr_settings.psr_allow_active != *allow_active) {
+ link->psr_settings.psr_allow_active = *allow_active;
+
+ if (!link->psr_settings.psr_allow_active)
+ dc_z10_restore(dc);
+
+ if (psr != NULL && link->psr_settings.psr_feature_enabled) {
+ psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst);
+ } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) &&
+ link->psr_settings.psr_feature_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait);
+ else
+ return false;
+ }
+ return true;
+}
+
+bool dc_link_get_psr_state(const struct dc_link *link, enum dc_psr_state *state)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ struct dmub_psr *psr = dc->res_pool->psr;
+ unsigned int panel_inst;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ if (psr != NULL && link->psr_settings.psr_feature_enabled)
+ psr->funcs->psr_get_state(psr, state, panel_inst);
+ else if (dmcu != NULL && link->psr_settings.psr_feature_enabled)
+ dmcu->funcs->get_psr_state(dmcu, state);
+
+ return true;
+}
+
+static inline enum physical_phy_id
+transmitter_to_phy_id(struct dc_link *link)
+{
+ struct dc_context *dc_ctx = link->ctx;
+ enum transmitter transmitter_value = link->link_enc->transmitter;
+
+ switch (transmitter_value) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYLD_0;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYLD_1;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYLD_2;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYLD_3;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYLD_4;
+ case TRANSMITTER_UNIPHY_F:
+ return PHYLD_5;
+ case TRANSMITTER_NUTMEG_CRT:
+ return PHYLD_6;
+ case TRANSMITTER_TRAVIS_CRT:
+ return PHYLD_7;
+ case TRANSMITTER_TRAVIS_LCD:
+ return PHYLD_8;
+ case TRANSMITTER_UNIPHY_G:
+ return PHYLD_9;
+ case TRANSMITTER_COUNT:
+ return PHYLD_COUNT;
+ case TRANSMITTER_UNKNOWN:
+ return PHYLD_UNKNOWN;
+ default:
+ DC_ERROR("Unknown transmitter value %d\n", transmitter_value);
+ return PHYLD_UNKNOWN;
+ }
+}
+
+bool dc_link_setup_psr(struct dc_link *link,
+ const struct dc_stream_state *stream, struct psr_config *psr_config,
+ struct psr_context *psr_context)
+{
+ struct dc *dc;
+ struct dmcu *dmcu;
+ struct dmub_psr *psr;
+ int i;
+ unsigned int panel_inst;
+ /* updateSinkPsrDpcdConfig*/
+ union dpcd_psr_configuration psr_configuration;
+ union dpcd_sink_active_vtotal_control_mode vtotal_control = {0};
+
+ psr_context->controllerId = CONTROLLER_ID_UNDEFINED;
+
+ if (!link)
+ return false;
+
+ dc = link->ctx->dc;
+ dmcu = dc->res_pool->dmcu;
+ psr = dc->res_pool->psr;
+
+ if (!dmcu && !psr)
+ return false;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+
+ memset(&psr_configuration, 0, sizeof(psr_configuration));
+
+ psr_configuration.bits.ENABLE = 1;
+ psr_configuration.bits.CRC_VERIFICATION = 1;
+ psr_configuration.bits.FRAME_CAPTURE_INDICATION =
+ psr_config->psr_frame_capture_indication_req;
+
+ /* Check for PSR v2*/
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ /* For PSR v2 selective update.
+ * Indicates whether sink should start capturing
+ * immediately following active scan line,
+ * or starting with the 2nd active scan line.
+ */
+ psr_configuration.bits.LINE_CAPTURE_INDICATION = 0;
+ /*For PSR v2, determines whether Sink should generate
+ * IRQ_HPD when CRC mismatch is detected.
+ */
+ psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1;
+ /* For PSR v2, set the bit when the Source device will
+ * be enabling PSR2 operation.
+ */
+ psr_configuration.bits.ENABLE_PSR2 = 1;
+ /* For PSR v2, the Sink device must be able to receive
+ * SU region updates early in the frame time.
+ */
+ psr_configuration.bits.EARLY_TRANSPORT_ENABLE = 1;
+ }
+
+ dm_helpers_dp_write_dpcd(
+ link->ctx,
+ link,
+ 368,
+ &psr_configuration.raw,
+ sizeof(psr_configuration.raw));
+
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) {
+ link_power_alpm_dpcd_enable(link, true);
+ psr_context->su_granularity_required =
+ psr_config->su_granularity_required;
+ psr_context->su_y_granularity =
+ psr_config->su_y_granularity;
+ psr_context->line_time_in_us = psr_config->line_time_in_us;
+
+ /* linux must be able to expose AMD Source DPCD definition
+ * in order to support FreeSync PSR
+ */
+ if (link->psr_settings.psr_vtotal_control_support) {
+ psr_context->rate_control_caps = psr_config->rate_control_caps;
+ vtotal_control.bits.ENABLE = true;
+ core_link_write_dpcd(link, DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE,
+ &vtotal_control.raw, sizeof(vtotal_control.raw));
+ }
+ }
+
+ psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel;
+ psr_context->transmitterId = link->link_enc->transmitter;
+ psr_context->engineId = link->link_enc->preferred_engine;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ /* dmcu -1 for all controller id values,
+ * therefore +1 here
+ */
+ psr_context->controllerId =
+ dc->current_state->res_ctx.
+ pipe_ctx[i].stream_res.tg->inst + 1;
+ break;
+ }
+ }
+
+ /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/
+ psr_context->phyType = PHY_TYPE_UNIPHY;
+ /*PhyId is associated with the transmitter id*/
+ psr_context->smuPhyId = transmitter_to_phy_id(link);
+
+ psr_context->crtcTimingVerticalTotal = stream->timing.v_total;
+ psr_context->vsync_rate_hz = div64_u64(div64_u64((stream->
+ timing.pix_clk_100hz * 100),
+ stream->timing.v_total),
+ stream->timing.h_total);
+
+ psr_context->psrSupportedDisplayConfig = true;
+ psr_context->psrExitLinkTrainingRequired =
+ psr_config->psr_exit_link_training_required;
+ psr_context->sdpTransmitLineNumDeadline =
+ psr_config->psr_sdp_transmit_line_num_deadline;
+ psr_context->psrFrameCaptureIndicationReq =
+ psr_config->psr_frame_capture_indication_req;
+
+ psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */
+
+ psr_context->numberOfControllers =
+ link->dc->res_pool->timing_generator_count;
+
+ psr_context->rfb_update_auto_en = true;
+
+ /* 2 frames before enter PSR. */
+ psr_context->timehyst_frames = 2;
+ /* half a frame
+ * (units in 100 lines, i.e. a value of 1 represents 100 lines)
+ */
+ psr_context->hyst_lines = stream->timing.v_total / 2 / 100;
+ psr_context->aux_repeats = 10;
+
+ psr_context->psr_level.u32all = 0;
+
+ /*skip power down the single pipe since it blocks the cstate*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link->ctx->asic_id.chip_family >= FAMILY_RV) {
+ switch (link->ctx->asic_id.chip_family) {
+ case FAMILY_YELLOW_CARP:
+ case AMDGPU_FAMILY_GC_10_3_6:
+ case AMDGPU_FAMILY_GC_11_0_1:
+ if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable)
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+ break;
+ default:
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+ break;
+ }
+ }
+#else
+ if (link->ctx->asic_id.chip_family >= FAMILY_RV)
+ psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
+#endif
+
+ /* SMU will perform additional powerdown sequence.
+ * For unsupported ASICs, set psr_level flag to skip PSR
+ * static screen notification to SMU.
+ * (Always set for DAL2, did not check ASIC)
+ */
+ psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations;
+ psr_context->allow_multi_disp_optimizations = psr_config->allow_multi_disp_optimizations;
+
+ /* Complete PSR entry before aborting to prevent intermittent
+ * freezes on certain eDPs
+ */
+ psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1;
+
+ /* Disable ALPM first for compatible non-ALPM panel now */
+ psr_context->psr_level.bits.DISABLE_ALPM = 0;
+ psr_context->psr_level.bits.ALPM_DEFAULT_PD_MODE = 1;
+
+ /* Controls additional delay after remote frame capture before
+ * continuing power down, default = 0
+ */
+ psr_context->frame_delay = 0;
+
+ psr_context->dsc_slice_height = psr_config->dsc_slice_height;
+
+ if (psr) {
+ link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr,
+ link, psr_context, panel_inst);
+ link->psr_settings.psr_power_opt = 0;
+ link->psr_settings.psr_allow_active = 0;
+ } else {
+ link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+ }
+
+ /* psr_enabled == 0 indicates setup_psr did not succeed, but this
+ * should not happen since firmware should be running at this point
+ */
+ if (link->psr_settings.psr_feature_enabled == 0)
+ ASSERT(0);
+
+ return true;
+
+}
+
+void link_get_psr_residency(const struct dc_link *link, uint32_t *residency)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_psr *psr = dc->res_pool->psr;
+ unsigned int panel_inst;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return;
+
+ // PSR residency measurements only supported on DMCUB
+ if (psr != NULL && link->psr_settings.psr_feature_enabled)
+ psr->funcs->psr_get_residency(psr, residency, panel_inst);
+ else
+ *residency = 0;
+}
+bool link_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_psr *psr = dc->res_pool->psr;
+
+ if (psr == NULL || !link->psr_settings.psr_feature_enabled || !link->psr_settings.psr_vtotal_control_support)
+ return false;
+
+ psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, psr_vtotal_su);
+
+ return true;
+}
+
+static struct abm *get_abm_from_stream_res(const struct dc_link *link)
+{
+ int i;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx.stream;
+
+ if (stream && stream->link == link) {
+ abm = pipe_ctx.stream_res.abm;
+ break;
+ }
+ }
+ return abm;
+}
+
+int dc_link_get_backlight_level(const struct dc_link *link)
+{
+ struct abm *abm = get_abm_from_stream_res(link);
+ struct panel_cntl *panel_cntl = link->panel_cntl;
+ struct dc *dc = link->ctx->dc;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ bool fw_set_brightness = true;
+
+ if (dmcu)
+ fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+
+ if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight)
+ return panel_cntl->funcs->get_current_backlight(panel_cntl);
+ else if (abm != NULL && abm->funcs->get_current_backlight != NULL)
+ return (int) abm->funcs->get_current_backlight(abm);
+ else
+ return DC_ERROR_UNEXPECTED;
+}
+
+int dc_link_get_target_backlight_pwm(const struct dc_link *link)
+{
+ struct abm *abm = get_abm_from_stream_res(link);
+
+ if (abm == NULL || abm->funcs->get_target_backlight == NULL)
+ return DC_ERROR_UNEXPECTED;
+
+ return (int) abm->funcs->get_target_backlight(abm);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
new file mode 100644
index 000000000000..7f91a564b089
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_EDP_PANEL_CONTROL_H__
+#define __DC_LINK_EDP_PANEL_CONTROL_H__
+#include "link.h"
+
+enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
+void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
+bool set_default_brightness_aux(struct dc_link *link);
+#endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
new file mode 100644
index 000000000000..5f39dfe06e9a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* FILE POLICY AND INTENDED USAGE:
+ *
+ * This file implements functions that manage basic HPD components such as gpio.
+ * It also provides wrapper functions to execute HPD related programming. This
+ * file only manages basic HPD functionality. It doesn't manage detection or
+ * feature or signal specific HPD behaviors.
+ */
+#include "link_hpd.h"
+#include "gpio_service_interface.h"
+
+bool dc_link_get_hpd_state(struct dc_link *dc_link)
+{
+ uint32_t state;
+
+ dal_gpio_lock_pin(dc_link->hpd_gpio);
+ dal_gpio_get_value(dc_link->hpd_gpio, &state);
+ dal_gpio_unlock_pin(dc_link->hpd_gpio);
+
+ return state;
+}
+
+void dc_link_enable_hpd(const struct dc_link *link)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+ encoder->funcs->enable_hpd(encoder);
+}
+
+void dc_link_disable_hpd(const struct dc_link *link)
+{
+ struct link_encoder *encoder = link->link_enc;
+
+ if (encoder != NULL && encoder->funcs->enable_hpd != NULL)
+ encoder->funcs->disable_hpd(encoder);
+}
+
+void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
+{
+ struct gpio *hpd;
+
+ if (enable) {
+ link->is_hpd_filter_disabled = false;
+ program_hpd_filter(link);
+ } else {
+ link->is_hpd_filter_disabled = true;
+ /* Obtain HPD handle */
+ hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+
+ if (!hpd)
+ return;
+
+ /* Setup HPD filtering */
+ if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
+ struct gpio_hpd_config config;
+
+ config.delay_on_connect = 0;
+ config.delay_on_disconnect = 0;
+
+ dal_irq_setup_hpd_filter(hpd, &config);
+
+ dal_gpio_close(hpd);
+ } else {
+ ASSERT_CRITICAL(false);
+ }
+ /* Release HPD handle */
+ dal_gpio_destroy_irq(&hpd);
+ }
+}
+
+struct gpio *link_get_hpd_gpio(struct dc_bios *dcb,
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service)
+{
+ enum bp_result bp_result;
+ struct graphics_object_hpd_info hpd_info;
+ struct gpio_pin_info pin_info;
+
+ if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK)
+ return NULL;
+
+ bp_result = dcb->funcs->get_gpio_pin_info(dcb,
+ hpd_info.hpd_int_gpio_uid, &pin_info);
+
+ if (bp_result != BP_RESULT_OK) {
+ ASSERT(bp_result == BP_RESULT_NORECORD);
+ return NULL;
+ }
+
+ return dal_gpio_service_create_irq(gpio_service,
+ pin_info.offset,
+ pin_info.mask);
+}
+
+bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high)
+{
+ struct gpio *hpd_pin = link_get_hpd_gpio(
+ link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+ if (!hpd_pin)
+ return false;
+
+ dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
+ dal_gpio_get_value(hpd_pin, is_hpd_high);
+ dal_gpio_close(hpd_pin);
+ dal_gpio_destroy_irq(&hpd_pin);
+ return true;
+}
+
+enum hpd_source_id get_hpd_line(struct dc_link *link)
+{
+ struct gpio *hpd;
+ enum hpd_source_id hpd_id;
+
+ hpd_id = HPD_SOURCEID_UNKNOWN;
+
+ hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+
+ if (hpd) {
+ switch (dal_irq_get_source(hpd)) {
+ case DC_IRQ_SOURCE_HPD1:
+ hpd_id = HPD_SOURCEID1;
+ break;
+ case DC_IRQ_SOURCE_HPD2:
+ hpd_id = HPD_SOURCEID2;
+ break;
+ case DC_IRQ_SOURCE_HPD3:
+ hpd_id = HPD_SOURCEID3;
+ break;
+ case DC_IRQ_SOURCE_HPD4:
+ hpd_id = HPD_SOURCEID4;
+ break;
+ case DC_IRQ_SOURCE_HPD5:
+ hpd_id = HPD_SOURCEID5;
+ break;
+ case DC_IRQ_SOURCE_HPD6:
+ hpd_id = HPD_SOURCEID6;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ dal_gpio_destroy_irq(&hpd);
+ }
+
+ return hpd_id;
+}
+
+bool program_hpd_filter(const struct dc_link *link)
+{
+ bool result = false;
+ struct gpio *hpd;
+ int delay_on_connect_in_ms = 0;
+ int delay_on_disconnect_in_ms = 0;
+
+ if (link->is_hpd_filter_disabled)
+ return false;
+ /* Verify feature is supported */
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* Program hpd filter */
+ delay_on_connect_in_ms = 500;
+ delay_on_disconnect_in_ms = 100;
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* Program hpd filter to allow DP signal to settle */
+ /* 500: not able to detect MST <-> SST switch as HPD is low for
+ * only 100ms on DELL U2413
+ * 0: some passive dongle still show aux mode instead of i2c
+ * 20-50: not enough to hide bouncing HPD with passive dongle.
+ * also see intermittent i2c read issues.
+ */
+ delay_on_connect_in_ms = 80;
+ delay_on_disconnect_in_ms = 0;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_EDP:
+ default:
+ /* Don't program hpd filter */
+ return false;
+ }
+
+ /* Obtain HPD handle */
+ hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+
+ if (!hpd)
+ return result;
+
+ /* Setup HPD filtering */
+ if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) {
+ struct gpio_hpd_config config;
+
+ config.delay_on_connect = delay_on_connect_in_ms;
+ config.delay_on_disconnect = delay_on_disconnect_in_ms;
+
+ dal_irq_setup_hpd_filter(hpd, &config);
+
+ dal_gpio_close(hpd);
+
+ result = true;
+ } else {
+ ASSERT_CRITICAL(false);
+ }
+
+ /* Release HPD handle */
+ dal_gpio_destroy_irq(&hpd);
+
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
new file mode 100644
index 000000000000..3d122def0c88
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DC_LINK_HPD_H__
+#define __DC_LINK_HPD_H__
+#include "link.h"
+
+enum hpd_source_id get_hpd_line(struct dc_link *link);
+/*
+ * Function: program_hpd_filter
+ *
+ * @brief
+ * Programs HPD filter on associated HPD line to default values.
+ *
+ * @return
+ * true on success, false otherwise
+ */
+bool program_hpd_filter(const struct dc_link *link);
+/* Query hot plug status of USB4 DP tunnel.
+ * Returns true if HPD high.
+ */
+bool dpia_query_hpd_status(struct dc_link *link);
+bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high);
+#endif /* __DC_LINK_HPD_H__ */
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index eb5b7eb292ef..a391b939d709 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -126,10 +126,22 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_HPD,
DMUB_NOTIFICATION_HPD_IRQ,
DMUB_NOTIFICATION_SET_CONFIG_REPLY,
+ DMUB_NOTIFICATION_DPIA_NOTIFICATION,
DMUB_NOTIFICATION_MAX
};
/**
+ * DPIA NOTIFICATION Response Type
+ */
+enum dpia_notify_bw_alloc_status {
+
+ DPIA_BW_REQ_FAILED = 0,
+ DPIA_BW_REQ_SUCCESS,
+ DPIA_EST_BW_CHANGED,
+ DPIA_BW_ALLOC_CAPS_CHANGED
+};
+
+/**
* struct dmub_region - dmub hw memory region
* @base: base address for region, must be 256 byte aligned
* @top: top address for region
@@ -453,6 +465,7 @@ struct dmub_srv {
* @pending_notification: Indicates there are other pending notifications
* @aux_reply: aux reply
* @hpd_status: hpd status
+ * @bw_alloc_reply: BW Allocation reply from CM/DPIA
*/
struct dmub_notification {
enum dmub_notification_type type;
@@ -463,6 +476,10 @@ struct dmub_notification {
struct aux_reply_data aux_reply;
enum dp_hpd_status hpd_status;
enum set_config_status sc_status;
+ /**
+ * DPIA notification command.
+ */
+ struct dmub_rb_cmd_dpia_notification dpia_notification;
};
};
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 33907feefebb..007d6bdc3e39 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -162,6 +162,7 @@ extern "C" {
#define dmub_udelay(microseconds) udelay(microseconds)
#endif
+#pragma pack(push, 1)
/**
* union dmub_addr - DMUB physical/virtual 64-bit address.
*/
@@ -172,6 +173,7 @@ union dmub_addr {
} u; /*<< Low/high bit access */
uint64_t quad_part; /*<< 64 bit address */
};
+#pragma pack(pop)
/**
* Dirty rect definition.
@@ -457,6 +459,10 @@ enum dmub_cmd_vbios_type {
* Query DP alt status on a transmitter.
*/
DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT = 26,
+ /**
+ * Controls domain power gating
+ */
+ DMUB_CMD__VBIOS_DOMAIN_CONTROL = 28,
};
//==============================================================================
@@ -770,6 +776,10 @@ enum dmub_out_cmd_type {
* Command type used for SET_CONFIG Reply notification
*/
DMUB_OUT_CMD__SET_CONFIG_REPLY = 3,
+ /**
+ * Command type used for USB4 DPIA notification
+ */
+ DMUB_OUT_CMD__DPIA_NOTIFICATION = 5,
};
/* DMUB_CMD__DPIA command sub-types. */
@@ -779,6 +789,11 @@ enum dmub_cmd_dpia_type {
DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
};
+/* DMUB_OUT_CMD__DPIA_NOTIFICATION command types. */
+enum dmub_cmd_dpia_notification_type {
+ DPIA_NOTIFY__BW_ALLOCATION = 0,
+};
+
#pragma pack(push, 1)
/**
@@ -1205,6 +1220,23 @@ struct dmub_rb_cmd_dig1_transmitter_control {
};
/**
+ * struct dmub_rb_cmd_domain_control_data - Data for DOMAIN power control
+ */
+struct dmub_rb_cmd_domain_control_data {
+ uint8_t inst : 6; /**< DOMAIN instance to control */
+ uint8_t power_gate : 1; /**< 1=power gate, 0=power up */
+ uint8_t reserved[3]; /**< Reserved for future use */
+};
+
+/**
+ * struct dmub_rb_cmd_domain_control - Controls DOMAIN power gating
+ */
+struct dmub_rb_cmd_domain_control {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_rb_cmd_domain_control_data data; /**< payload */
+};
+
+/**
* DPIA tunnel command parameters.
*/
struct dmub_cmd_dig_dpia_control_data {
@@ -1558,6 +1590,79 @@ struct dmub_rb_cmd_dp_set_config_reply {
};
/**
+ * Definition of a DPIA notification header
+ */
+struct dpia_notification_header {
+ uint8_t instance; /**< DPIA Instance */
+ uint8_t reserved[3];
+ enum dmub_cmd_dpia_notification_type type; /**< DPIA notification type */
+};
+
+/**
+ * Definition of the common data struct of DPIA notification
+ */
+struct dpia_notification_common {
+ uint8_t cmd_buffer[DMUB_RB_CMD_SIZE - sizeof(struct dmub_cmd_header)
+ - sizeof(struct dpia_notification_header)];
+};
+
+/**
+ * Definition of a DPIA notification data
+ */
+struct dpia_bw_allocation_notify_data {
+ union {
+ struct {
+ uint16_t cm_bw_alloc_support: 1; /**< USB4 CM BW Allocation mode support */
+ uint16_t bw_request_failed: 1; /**< BW_Request_Failed */
+ uint16_t bw_request_succeeded: 1; /**< BW_Request_Succeeded */
+ uint16_t est_bw_changed: 1; /**< Estimated_BW changed */
+ uint16_t bw_alloc_cap_changed: 1; /**< BW_Allocation_Capabiity_Changed */
+ uint16_t reserved: 11; /**< Reserved */
+ } bits;
+
+ uint16_t flags;
+ };
+
+ uint8_t cm_id; /**< CM ID */
+ uint8_t group_id; /**< Group ID */
+ uint8_t granularity; /**< BW Allocation Granularity */
+ uint8_t estimated_bw; /**< Estimated_BW */
+ uint8_t allocated_bw; /**< Allocated_BW */
+ uint8_t reserved;
+};
+
+/**
+ * union dpia_notify_data_type - DPIA Notification in Outbox command
+ */
+union dpia_notification_data {
+ /**
+ * DPIA Notification for common data struct
+ */
+ struct dpia_notification_common common_data;
+
+ /**
+ * DPIA Notification for DP BW Allocation support
+ */
+ struct dpia_bw_allocation_notify_data dpia_bw_alloc;
+};
+
+/**
+ * Definition of a DPIA notification payload
+ */
+struct dpia_notification_payload {
+ struct dpia_notification_header header;
+ union dpia_notification_data data; /**< DPIA notification payload data */
+};
+
+/**
+ * Definition of a DMUB_OUT_CMD__DPIA_NOTIFICATION command.
+ */
+struct dmub_rb_cmd_dpia_notification {
+ struct dmub_cmd_header header; /**< DPIA notification header */
+ struct dpia_notification_payload payload; /**< DPIA notification payload */
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__QUERY_HPD_STATE command.
*/
struct dmub_cmd_hpd_state_query_data {
@@ -1886,6 +1991,14 @@ struct dmub_cmd_psr_copy_settings_data {
* Explicit padding to 2 byte boundary.
*/
uint8_t pad3;
+ /**
+ * DSC Slice height.
+ */
+ uint16_t dsc_slice_height;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint16_t pad;
};
/**
@@ -3029,7 +3142,8 @@ struct dmub_rb_cmd_panel_cntl {
*/
struct dmub_cmd_lvtma_control_data {
uint8_t uc_pwr_action; /**< LVTMA_ACTION */
- uint8_t reserved_0[3]; /**< For future use */
+ uint8_t bypass_panel_control_wait;
+ uint8_t reserved_0[2]; /**< For future use */
uint8_t panel_inst; /**< LVTMA control instance */
uint8_t reserved_1[3]; /**< For future use */
};
@@ -3232,6 +3346,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
/**
+ * Definition of a DMUB_CMD__VBIOS_DOMAIN_CONTROL command.
+ */
+ struct dmub_rb_cmd_domain_control domain_control;
+ /**
* Definition of a DMUB_CMD__PSR_SET_VERSION command.
*/
struct dmub_rb_cmd_psr_set_version psr_set_version;
@@ -3422,6 +3540,10 @@ union dmub_rb_out_cmd {
* SET_CONFIG reply command.
*/
struct dmub_rb_cmd_dp_set_config_reply set_config_reply;
+ /**
+ * DPIA notification command.
+ */
+ struct dmub_rb_cmd_dpia_notification dpia_notification;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 4a122925c3ae..92c18bfb98b3 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -532,6 +532,9 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
+ /* reset the cache of the last wptr as well now that hw is reset */
+ dmub->inbox1_last_wptr = 0;
+
cw0.offset.quad_part = inst_fb->gpu_addr;
cw0.region.base = DMUB_CW0_BASE;
cw0.region.top = cw0.region.base + inst_fb->size - 1;
@@ -649,6 +652,15 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
if (dmub->hw_funcs.reset)
dmub->hw_funcs.reset(dmub);
+ /* mailboxes have been reset in hw, so reset the sw state as well */
+ dmub->inbox1_last_wptr = 0;
+ dmub->inbox1_rb.wrpt = 0;
+ dmub->inbox1_rb.rptr = 0;
+ dmub->outbox0_rb.wrpt = 0;
+ dmub->outbox0_rb.rptr = 0;
+ dmub->outbox1_rb.wrpt = 0;
+ dmub->outbox1_rb.rptr = 0;
+
dmub->hw_init = false;
return DMUB_STATUS_OK;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index 44502ec919a2..74189102eaec 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -92,6 +92,27 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
notify->link_index = cmd.set_config_reply.set_config_reply_control.instance;
notify->sc_status = cmd.set_config_reply.set_config_reply_control.status;
break;
+ case DMUB_OUT_CMD__DPIA_NOTIFICATION:
+ notify->type = DMUB_NOTIFICATION_DPIA_NOTIFICATION;
+ notify->link_index = cmd.dpia_notification.payload.header.instance;
+
+ if (cmd.dpia_notification.payload.header.type == DPIA_NOTIFY__BW_ALLOCATION) {
+
+ notify->dpia_notification.payload.data.dpia_bw_alloc.estimated_bw =
+ cmd.dpia_notification.payload.data.dpia_bw_alloc.estimated_bw;
+ notify->dpia_notification.payload.data.dpia_bw_alloc.allocated_bw =
+ cmd.dpia_notification.payload.data.dpia_bw_alloc.allocated_bw;
+
+ if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_failed)
+ notify->result = DPIA_BW_REQ_FAILED;
+ else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_request_succeeded)
+ notify->result = DPIA_BW_REQ_SUCCESS;
+ else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.est_bw_changed)
+ notify->result = DPIA_EST_BW_CHANGED;
+ else if (cmd.dpia_notification.payload.data.dpia_bw_alloc.bits.bw_alloc_cap_changed)
+ notify->result = DPIA_BW_ALLOC_CAPS_CHANGED;
+ }
+ break;
default:
notify->type = DMUB_NOTIFICATION_NO_DATA;
break;
diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
index a7ba5bd8dc16..31a12ce79a8e 100644
--- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h
@@ -35,6 +35,7 @@
#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C
#define DP_BRANCH_DEVICE_ID_006037 0x006037
#define DP_BRANCH_DEVICE_ID_001CF8 0x001CF8
+#define DP_BRANCH_DEVICE_ID_0060AD 0x0060AD
#define DP_BRANCH_HW_REV_10 0x10
#define DP_BRANCH_HW_REV_20 0x20
@@ -133,6 +134,11 @@ static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5};
static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u";
+/*Travis*/
+static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT";
+/*Nutmeg*/
+static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
+
/*MST Dock*/
static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA";
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index b2df07f9e91c..c062a44db078 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -88,7 +88,10 @@ enum dpcd_phy_test_patterns {
PHY_TEST_PATTERN_PRBS23 = 0x30,
PHY_TEST_PATTERN_PRBS31 = 0x38,
PHY_TEST_PATTERN_264BIT_CUSTOM = 0x40,
- PHY_TEST_PATTERN_SQUARE_PULSE = 0x48,
+ PHY_TEST_PATTERN_SQUARE = 0x48,
+ PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED = 0x49,
+ PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED = 0x4A,
+ PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED = 0x4B,
};
enum dpcd_test_dyn_range {
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index d1e91d31d151..18b9173d5a96 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -165,7 +165,12 @@ enum dp_test_pattern {
DP_TEST_PATTERN_PRBS23,
DP_TEST_PATTERN_PRBS31,
DP_TEST_PATTERN_264BIT_CUSTOM,
- DP_TEST_PATTERN_SQUARE_PULSE,
+ DP_TEST_PATTERN_SQUARE_BEGIN,
+ DP_TEST_PATTERN_SQUARE = DP_TEST_PATTERN_SQUARE_BEGIN,
+ DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED,
+ DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED,
+ DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED,
+ DP_TEST_PATTERN_SQUARE_END = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED,
/* Link Training Patterns */
DP_TEST_PATTERN_TRAINING_PATTERN1,
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index f6034213c700..67a062af3ab0 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1715,8 +1715,8 @@ static bool map_regamma_hw_to_x_user(
const struct pwl_float_data_ex *rgb_regamma,
uint32_t hw_points_num,
struct dc_transfer_func_distributed_points *tf_pts,
- bool mapUserRamp,
- bool doClamping)
+ bool map_user_ramp,
+ bool do_clamping)
{
/* setup to spare calculated ideal regamma values */
@@ -1724,7 +1724,7 @@ static bool map_regamma_hw_to_x_user(
struct hw_x_point *coords = coords_x;
const struct pwl_float_data_ex *regamma = rgb_regamma;
- if (ramp && mapUserRamp) {
+ if (ramp && map_user_ramp) {
copy_rgb_regamma_to_coordinates_x(coords,
hw_points_num,
rgb_regamma);
@@ -1744,7 +1744,7 @@ static bool map_regamma_hw_to_x_user(
}
}
- if (doClamping) {
+ if (do_clamping) {
/* this should be named differently, all it does is clamp to 0-1 */
build_new_custom_resulted_curve(hw_points_num, tf_pts);
}
@@ -1875,7 +1875,7 @@ rgb_user_alloc_fail:
bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
struct dc_transfer_func *input_tf,
- const struct dc_gamma *ramp, bool mapUserRamp)
+ const struct dc_gamma *ramp, bool map_user_ramp)
{
struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;
struct dividers dividers;
@@ -1883,7 +1883,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
struct pwl_float_data_ex *curve = NULL;
struct gamma_pixel *axis_x = NULL;
struct pixel_gamma_point *coeff = NULL;
- enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
+ enum dc_transfer_func_predefined tf;
uint32_t i;
bool ret = false;
@@ -1891,12 +1891,12 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
return false;
/* we can use hardcoded curve for plain SRGB TF
- * If linear, it's bypass if on user ramp
+ * If linear, it's bypass if no user ramp
*/
if (input_tf->type == TF_TYPE_PREDEFINED) {
if ((input_tf->tf == TRANSFER_FUNCTION_SRGB ||
input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
- !mapUserRamp)
+ !map_user_ramp)
return true;
if (dc_caps != NULL &&
@@ -1919,7 +1919,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
- if (mapUserRamp && ramp && ramp->type == GAMMA_RGB_256) {
+ if (map_user_ramp && ramp && ramp->type == GAMMA_RGB_256) {
rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
sizeof(*rgb_user),
GFP_KERNEL);
@@ -2007,7 +2007,7 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
coordinates_x, axis_x, curve,
MAX_HW_POINTS, tf_pts,
- mapUserRamp && ramp && ramp->type == GAMMA_RGB_256,
+ map_user_ramp && ramp && ramp->type == GAMMA_RGB_256,
true);
}
@@ -2112,9 +2112,11 @@ static bool calculate_curve(enum dc_transfer_func_predefined trans,
}
bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
- const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
- const struct hdr_tm_params *fs_params,
- struct calculate_buffer *cal_buffer)
+ const struct dc_gamma *ramp,
+ bool map_user_ramp,
+ bool can_rom_be_used,
+ const struct hdr_tm_params *fs_params,
+ struct calculate_buffer *cal_buffer)
{
struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts;
struct dividers dividers;
@@ -2123,27 +2125,27 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
struct pwl_float_data_ex *rgb_regamma = NULL;
struct gamma_pixel *axis_x = NULL;
struct pixel_gamma_point *coeff = NULL;
- enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
- bool doClamping = true;
+ enum dc_transfer_func_predefined tf;
+ bool do_clamping = true;
bool ret = false;
if (output_tf->type == TF_TYPE_BYPASS)
return false;
/* we can use hardcoded curve for plain SRGB TF */
- if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true &&
+ if (output_tf->type == TF_TYPE_PREDEFINED && can_rom_be_used == true &&
output_tf->tf == TRANSFER_FUNCTION_SRGB) {
if (ramp == NULL)
return true;
if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) ||
- (!mapUserRamp && ramp->type == GAMMA_RGB_256))
+ (!map_user_ramp && ramp->type == GAMMA_RGB_256))
return true;
}
output_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
if (ramp && ramp->type != GAMMA_CS_TFM_1D &&
- (mapUserRamp || ramp->type != GAMMA_RGB_256)) {
+ (map_user_ramp || ramp->type != GAMMA_RGB_256)) {
rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS,
sizeof(*rgb_user),
GFP_KERNEL);
@@ -2164,7 +2166,7 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
ramp->num_entries,
dividers);
- if (ramp->type == GAMMA_RGB_256 && mapUserRamp)
+ if (ramp->type == GAMMA_RGB_256 && map_user_ramp)
scale_gamma(rgb_user, ramp, dividers);
else if (ramp->type == GAMMA_RGB_FLOAT_1024)
scale_gamma_dx(rgb_user, ramp, dividers);
@@ -2191,15 +2193,15 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
cal_buffer);
if (ret) {
- doClamping = !(output_tf->tf == TRANSFER_FUNCTION_GAMMA22 &&
- fs_params != NULL && fs_params->skip_tm == 0);
+ do_clamping = !(output_tf->tf == TRANSFER_FUNCTION_GAMMA22 &&
+ fs_params != NULL && fs_params->skip_tm == 0);
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
- coordinates_x, axis_x, rgb_regamma,
- MAX_HW_POINTS, tf_pts,
- (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) &&
- (ramp && ramp->type != GAMMA_CS_TFM_1D),
- doClamping);
+ coordinates_x, axis_x, rgb_regamma,
+ MAX_HW_POINTS, tf_pts,
+ (map_user_ramp || (ramp && ramp->type != GAMMA_RGB_256)) &&
+ (ramp && ramp->type != GAMMA_CS_TFM_1D),
+ do_clamping);
if (ramp && ramp->type == GAMMA_CS_TFM_1D)
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
@@ -2215,89 +2217,3 @@ axis_x_alloc_fail:
rgb_user_alloc_fail:
return ret;
}
-
-bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
- struct dc_transfer_func_distributed_points *points)
-{
- uint32_t i;
- bool ret = false;
- struct pwl_float_data_ex *rgb_degamma = NULL;
-
- if (trans == TRANSFER_FUNCTION_UNITY ||
- trans == TRANSFER_FUNCTION_LINEAR) {
-
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = coordinates_x[i].x;
- points->green[i] = coordinates_x[i].x;
- points->blue[i] = coordinates_x[i].x;
- }
- ret = true;
- } else if (trans == TRANSFER_FUNCTION_PQ) {
- rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_degamma),
- GFP_KERNEL);
- if (!rgb_degamma)
- goto rgb_degamma_alloc_fail;
-
-
- build_de_pq(rgb_degamma,
- MAX_HW_POINTS,
- coordinates_x);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_degamma[i].r;
- points->green[i] = rgb_degamma[i].g;
- points->blue[i] = rgb_degamma[i].b;
- }
- ret = true;
-
- kvfree(rgb_degamma);
- } else if (trans == TRANSFER_FUNCTION_SRGB ||
- trans == TRANSFER_FUNCTION_BT709 ||
- trans == TRANSFER_FUNCTION_GAMMA22 ||
- trans == TRANSFER_FUNCTION_GAMMA24 ||
- trans == TRANSFER_FUNCTION_GAMMA26) {
- rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_degamma),
- GFP_KERNEL);
- if (!rgb_degamma)
- goto rgb_degamma_alloc_fail;
-
- build_degamma(rgb_degamma,
- MAX_HW_POINTS,
- coordinates_x,
- trans);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_degamma[i].r;
- points->green[i] = rgb_degamma[i].g;
- points->blue[i] = rgb_degamma[i].b;
- }
- ret = true;
-
- kvfree(rgb_degamma);
- } else if (trans == TRANSFER_FUNCTION_HLG) {
- rgb_degamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS,
- sizeof(*rgb_degamma),
- GFP_KERNEL);
- if (!rgb_degamma)
- goto rgb_degamma_alloc_fail;
-
- build_hlg_degamma(rgb_degamma,
- MAX_HW_POINTS,
- coordinates_x,
- 80, 1000);
- for (i = 0; i <= MAX_HW_POINTS ; i++) {
- points->red[i] = rgb_degamma[i].r;
- points->green[i] = rgb_degamma[i].g;
- points->blue[i] = rgb_degamma[i].b;
- }
- ret = true;
- kvfree(rgb_degamma);
- }
- points->end_exponent = 0;
- points->x_point_at_y1_red = 1;
- points->x_point_at_y1_green = 1;
- points->x_point_at_y1_blue = 1;
-
-rgb_degamma_alloc_fail:
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 2893abf48208..ee5c466613de 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -115,9 +115,6 @@ bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp);
-bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
- struct dc_transfer_func_distributed_points *points);
-
bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf,
const struct regamma_lut *regamma,
struct calculate_buffer *cal_buffer,
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index c2e00f7b8381..2be45b314922 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -616,7 +616,8 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
}
static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
- struct dc_info_packet *infopacket)
+ struct dc_info_packet *infopacket,
+ bool freesync_on_desktop)
{
unsigned int min_refresh;
unsigned int max_refresh;
@@ -649,9 +650,15 @@ static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
infopacket->sb[6] |= 0x02;
/* PB6 = [Bit 2 = FreeSync Active] */
- if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+ if (freesync_on_desktop) {
+ if (vrr->state != VRR_STATE_DISABLED &&
+ vrr->state != VRR_STATE_UNSUPPORTED)
+ infopacket->sb[6] |= 0x04;
+ } else {
+ if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
vrr->state == VRR_STATE_ACTIVE_FIXED)
- infopacket->sb[6] |= 0x04;
+ infopacket->sb[6] |= 0x04;
+ }
min_refresh = (vrr->min_refresh_in_uhz + 500000) / 1000000;
max_refresh = (vrr->max_refresh_in_uhz + 500000) / 1000000;
@@ -898,52 +905,20 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
infopacket->valid = true;
}
-#ifndef TRIM_FSFT
-static void build_vrr_infopacket_fast_transport_data(
- bool ftActive,
- unsigned int ftOutputRate,
- struct dc_info_packet *infopacket)
-{
- /* PB9 : bit7 - fast transport Active*/
- unsigned char activeBit = (ftActive) ? 1 << 7 : 0;
-
- infopacket->sb[1] &= ~activeBit; //clear bit
- infopacket->sb[1] |= activeBit; //set bit
-
- /* PB13 : Target Output Pixel Rate [kHz] - bits 7:0 */
- infopacket->sb[13] = ftOutputRate & 0xFF;
-
- /* PB14 : Target Output Pixel Rate [kHz] - bits 15:8 */
- infopacket->sb[14] = (ftOutputRate >> 8) & 0xFF;
-
- /* PB15 : Target Output Pixel Rate [kHz] - bits 23:16 */
- infopacket->sb[15] = (ftOutputRate >> 16) & 0xFF;
-
-}
-#endif
static void build_vrr_infopacket_v3(enum signal_type signal,
const struct mod_vrr_params *vrr,
-#ifndef TRIM_FSFT
- bool ftActive, unsigned int ftOutputRate,
-#endif
enum color_transfer_func app_tf,
- struct dc_info_packet *infopacket)
+ struct dc_info_packet *infopacket,
+ bool freesync_on_desktop)
{
unsigned int payload_size = 0;
build_vrr_infopacket_header_v3(signal, infopacket, &payload_size);
- build_vrr_infopacket_data_v3(vrr, infopacket);
+ build_vrr_infopacket_data_v3(vrr, infopacket, freesync_on_desktop);
build_vrr_infopacket_fs2_data(app_tf, infopacket);
-#ifndef TRIM_FSFT
- build_vrr_infopacket_fast_transport_data(
- ftActive,
- ftOutputRate,
- infopacket);
-#endif
-
build_vrr_infopacket_checksum(&payload_size, infopacket);
infopacket->valid = true;
@@ -980,31 +955,26 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
* Check if Freesync is supported. Return if false. If true,
* set the corresponding bit in the info packet
*/
+ bool freesync_on_desktop;
+ bool fams_enable;
+
+ fams_enable = stream->ctx->dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching;
+ freesync_on_desktop = stream->freesync_on_desktop && fams_enable;
+
if (!vrr->send_info_frame)
return;
switch (packet_type) {
case PACKET_TYPE_FS_V3:
-#ifndef TRIM_FSFT
- // always populate with pixel rate.
- build_vrr_infopacket_v3(
- stream->signal, vrr,
- stream->timing.flags.FAST_TRANSPORT,
- (stream->timing.flags.FAST_TRANSPORT) ?
- stream->timing.fast_transport_output_rate_100hz :
- stream->timing.pix_clk_100hz,
- app_tf, infopacket);
-#else
- build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
-#endif
+ build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket, freesync_on_desktop);
break;
case PACKET_TYPE_FS_V2:
- build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop);
+ build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, freesync_on_desktop);
break;
case PACKET_TYPE_VRR:
case PACKET_TYPE_FS_V1:
default:
- build_vrr_infopacket_v1(stream->signal, vrr, infopacket, stream->freesync_on_desktop);
+ build_vrr_infopacket_v1(stream->signal, vrr, infopacket, freesync_on_desktop);
}
if (true == pack_sdp_v1_3 &&
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index edf5845f6a1f..66dc9a19aebe 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -41,4 +41,40 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet);
+enum adaptive_sync_type {
+ ADAPTIVE_SYNC_TYPE_NONE = 0,
+ ADAPTIVE_SYNC_TYPE_DP = 1,
+ FREESYNC_TYPE_PCON_IN_WHITELIST = 2,
+ FREESYNC_TYPE_PCON_NOT_IN_WHITELIST = 3,
+ ADAPTIVE_SYNC_TYPE_EDP = 4,
+};
+
+enum adaptive_sync_sdp_version {
+ AS_SDP_VER_0 = 0x0,
+ AS_SDP_VER_1 = 0x1,
+ AS_SDP_VER_2 = 0x2,
+};
+
+#define AS_DP_SDP_LENGTH (9)
+
+struct frame_duration_op {
+ bool support;
+ unsigned char frame_duration_hex;
+};
+
+struct AS_Df_params {
+ bool supportMode;
+ struct frame_duration_op increase;
+ struct frame_duration_op decrease;
+};
+
+void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream,
+ enum adaptive_sync_type asType, const struct AS_Df_params *param,
+ struct dc_info_packet *info_packet);
+
+void mod_build_adaptive_sync_infopacket_v2(const struct dc_stream_state *stream,
+ const struct AS_Df_params *param, struct dc_info_packet *info_packet);
+
+void mod_build_adaptive_sync_infopacket_v1(struct dc_info_packet *info_packet);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 69691058ab89..ec64f19e1786 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -519,3 +519,58 @@ void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
info_packet->valid = true;
}
+void mod_build_adaptive_sync_infopacket(const struct dc_stream_state *stream,
+ enum adaptive_sync_type asType,
+ const struct AS_Df_params *param,
+ struct dc_info_packet *info_packet)
+{
+ info_packet->valid = false;
+
+ memset(info_packet, 0, sizeof(struct dc_info_packet));
+
+ switch (asType) {
+ case ADAPTIVE_SYNC_TYPE_DP:
+ if (stream != NULL)
+ mod_build_adaptive_sync_infopacket_v2(stream, param, info_packet);
+ break;
+ case FREESYNC_TYPE_PCON_IN_WHITELIST:
+ mod_build_adaptive_sync_infopacket_v1(info_packet);
+ break;
+ case ADAPTIVE_SYNC_TYPE_NONE:
+ case FREESYNC_TYPE_PCON_NOT_IN_WHITELIST:
+ default:
+ break;
+ }
+}
+
+void mod_build_adaptive_sync_infopacket_v1(struct dc_info_packet *info_packet)
+{
+ info_packet->valid = true;
+ // HEADER {HB0, HB1, HB2, HB3} = {00, Type, Version, Length}
+ info_packet->hb0 = 0x00;
+ info_packet->hb1 = 0x22;
+ info_packet->hb2 = AS_SDP_VER_1;
+ info_packet->hb3 = 0x00;
+}
+
+void mod_build_adaptive_sync_infopacket_v2(const struct dc_stream_state *stream,
+ const struct AS_Df_params *param,
+ struct dc_info_packet *info_packet)
+{
+ info_packet->valid = true;
+ // HEADER {HB0, HB1, HB2, HB3} = {00, Type, Version, Length}
+ info_packet->hb0 = 0x00;
+ info_packet->hb1 = 0x22;
+ info_packet->hb2 = AS_SDP_VER_2;
+ info_packet->hb3 = AS_DP_SDP_LENGTH;
+
+ //Payload
+ info_packet->sb[0] = param->supportMode; //1: AVT; 0: FAVT
+ info_packet->sb[1] = (stream->timing.v_total & 0x00FF);
+ info_packet->sb[2] = (stream->timing.v_total & 0xFF00) >> 8;
+ //info_packet->sb[3] = 0x00; Target RR, not use fot AVT
+ info_packet->sb[4] = (param->increase.support << 6 | param->decrease.support << 7);
+ info_packet->sb[5] = param->increase.frame_duration_hex;
+ info_packet->sb[6] = param->decrease.frame_duration_hex;
+}
+
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 9b5d9b2c9a6a..e39b133d05af 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -916,3 +916,34 @@ bool mod_power_only_edp(const struct dc_state *context, const struct dc_stream_s
{
return context && context->stream_count == 1 && dc_is_embedded_signal(stream->signal);
}
+
+bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
+ struct dc_stream_state *stream,
+ struct psr_config *config)
+{
+ uint16_t pic_height;
+ uint16_t slice_height;
+
+ config->dsc_slice_height = 0;
+ if ((link->connector_signal & SIGNAL_TYPE_EDP) &&
+ (!dc->caps.edp_dsc_support ||
+ link->panel_config.dsc.disable_dsc_edp ||
+ !link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
+ !stream->timing.dsc_cfg.num_slices_v))
+ return true;
+
+ pic_height = stream->timing.v_addressable +
+ stream->timing.v_border_top + stream->timing.v_border_bottom;
+ slice_height = pic_height / stream->timing.dsc_cfg.num_slices_v;
+ config->dsc_slice_height = slice_height;
+
+ if (slice_height) {
+ if (config->su_y_granularity &&
+ (slice_height % config->su_y_granularity)) {
+ ASSERT(0);
+ return false;
+ }
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index 316452e9dbc9..1d3079e56799 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -59,4 +59,7 @@ void mod_power_calc_psr_configs(struct psr_config *psr_config,
const struct dc_stream_state *stream);
bool mod_power_only_edp(const struct dc_state *context,
const struct dc_stream_state *stream);
+bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
+ struct dc_stream_state *stream,
+ struct psr_config *config);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index f175e65b853a..e4a22c68517d 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -240,6 +240,7 @@ enum DC_FEATURE_MASK {
DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
+ DC_ENABLE_SUBVP_DRR = (1 << 9), // 0x200, disabled by default
};
enum DC_DEBUG_MASK {
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h
new file mode 100644
index 000000000000..fbb18e44ec52
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_offset.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _df_4_3_OFFSET_HEADER
+#define _df_4_3_OFFSET_HEADER
+
+#define regDF_CS_UMC_AON0_HardwareAssertMaskLow 0x0e3e
+#define regDF_CS_UMC_AON0_HardwareAssertMaskLow_BASE_IDX 4
+#define regDF_NCS_PG0_HardwareAssertMaskHigh 0x0e3f
+#define regDF_NCS_PG0_HardwareAssertMaskHigh_BASE_IDX 4
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h
new file mode 100644
index 000000000000..9c8f19ded4eb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_4_3_sh_mask.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _df_4_3_SH_MASK_HEADER
+#define _df_4_3_SH_MASK_HEADER
+
+//DF_CS_UMC_AON0_HardwareAssertMaskLow
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0__SHIFT 0x0
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1__SHIFT 0x1
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2__SHIFT 0x2
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3__SHIFT 0x3
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4__SHIFT 0x4
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5__SHIFT 0x5
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6__SHIFT 0x6
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7__SHIFT 0x7
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8__SHIFT 0x8
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9__SHIFT 0x9
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10__SHIFT 0xa
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11__SHIFT 0xb
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12__SHIFT 0xc
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13__SHIFT 0xd
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14__SHIFT 0xe
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15__SHIFT 0xf
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16__SHIFT 0x10
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17__SHIFT 0x11
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18__SHIFT 0x12
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19__SHIFT 0x13
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20__SHIFT 0x14
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21__SHIFT 0x15
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22__SHIFT 0x16
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23__SHIFT 0x17
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24__SHIFT 0x18
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25__SHIFT 0x19
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26__SHIFT 0x1a
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27__SHIFT 0x1b
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28__SHIFT 0x1c
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29__SHIFT 0x1d
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30__SHIFT 0x1e
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31__SHIFT 0x1f
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0_MASK 0x00000001L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1_MASK 0x00000002L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2_MASK 0x00000004L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3_MASK 0x00000008L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4_MASK 0x00000010L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5_MASK 0x00000020L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6_MASK 0x00000040L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7_MASK 0x00000080L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8_MASK 0x00000100L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9_MASK 0x00000200L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10_MASK 0x00000400L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11_MASK 0x00000800L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12_MASK 0x00001000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13_MASK 0x00002000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14_MASK 0x00004000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15_MASK 0x00008000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16_MASK 0x00010000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17_MASK 0x00020000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18_MASK 0x00040000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19_MASK 0x00080000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20_MASK 0x00100000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21_MASK 0x00200000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22_MASK 0x00400000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23_MASK 0x00800000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24_MASK 0x01000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25_MASK 0x02000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26_MASK 0x04000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27_MASK 0x08000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28_MASK 0x10000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29_MASK 0x20000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30_MASK 0x40000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31_MASK 0x80000000L
+
+//DF_NCS_PG0_HardwareAssertMaskHigh
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0__SHIFT 0x0
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1__SHIFT 0x1
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2__SHIFT 0x2
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3__SHIFT 0x3
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4__SHIFT 0x4
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5__SHIFT 0x5
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6__SHIFT 0x6
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7__SHIFT 0x7
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8__SHIFT 0x8
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9__SHIFT 0x9
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10__SHIFT 0xa
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11__SHIFT 0xb
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12__SHIFT 0xc
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13__SHIFT 0xd
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14__SHIFT 0xe
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15__SHIFT 0xf
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16__SHIFT 0x10
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17__SHIFT 0x11
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18__SHIFT 0x12
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19__SHIFT 0x13
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20__SHIFT 0x14
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21__SHIFT 0x15
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22__SHIFT 0x16
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23__SHIFT 0x17
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24__SHIFT 0x18
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25__SHIFT 0x19
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26__SHIFT 0x1a
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27__SHIFT 0x1b
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28__SHIFT 0x1c
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29__SHIFT 0x1d
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30__SHIFT 0x1e
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31__SHIFT 0x1f
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0_MASK 0x00000001L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1_MASK 0x00000002L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2_MASK 0x00000004L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3_MASK 0x00000008L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4_MASK 0x00000010L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5_MASK 0x00000020L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6_MASK 0x00000040L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7_MASK 0x00000080L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8_MASK 0x00000100L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9_MASK 0x00000200L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10_MASK 0x00000400L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11_MASK 0x00000800L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12_MASK 0x00001000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13_MASK 0x00002000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14_MASK 0x00004000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15_MASK 0x00008000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16_MASK 0x00010000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17_MASK 0x00020000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18_MASK 0x00040000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19_MASK 0x00080000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20_MASK 0x00100000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21_MASK 0x00200000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22_MASK 0x00400000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23_MASK 0x00800000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24_MASK 0x01000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25_MASK 0x02000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26_MASK 0x04000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27_MASK 0x08000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28_MASK 0x10000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29_MASK 0x20000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30_MASK 0x40000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31_MASK 0x80000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
index 3b95a59b196c..56e00252bff8 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_offset.h
@@ -3593,6 +3593,14 @@
#define regGCL2TLB_PERFCOUNTER_RSLT_CNTL_BASE_IDX 1
+// addressBlock: gc_rlcsdec
+// base address: 0x3b980
+#define regRLC_RLCS_FED_STATUS_0 0x4eff
+#define regRLC_RLCS_FED_STATUS_0_BASE_IDX 1
+#define regRLC_RLCS_FED_STATUS_1 0x4f00
+#define regRLC_RLCS_FED_STATUS_1_BASE_IDX 1
+
+
// addressBlock: gc_gcvml2pspdec
// base address: 0x3f900
#define regGCUTCL2_TRANSLATION_BYPASS_BY_VMID 0x5e41
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
index ae3ef8a9e702..658e88a8e2ac 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_11_0_3_sh_mask.h
@@ -37642,6 +37642,56 @@
#define RLC_RLCG_DOORBELL_RANGE__LOWER_ADDR_MASK 0x00000FFCL
#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_RESERVED_MASK 0x00030000L
#define RLC_RLCG_DOORBELL_RANGE__UPPER_ADDR_MASK 0x0FFC0000L
+//RLC_RLCS_FED_STATUS_0
+#define RLC_RLCS_FED_STATUS_0__RLC_FED_ERR__SHIFT 0x0
+#define RLC_RLCS_FED_STATUS_0__UTCL2_FED_ERR__SHIFT 0x1
+#define RLC_RLCS_FED_STATUS_0__GE_FED_ERR__SHIFT 0x2
+#define RLC_RLCS_FED_STATUS_0__CPC_FED_ERR__SHIFT 0x3
+#define RLC_RLCS_FED_STATUS_0__CPF_FED_ERR__SHIFT 0x4
+#define RLC_RLCS_FED_STATUS_0__CPG_FED_ERR__SHIFT 0x5
+#define RLC_RLCS_FED_STATUS_0__SDMA0_FED_ERR__SHIFT 0x6
+#define RLC_RLCS_FED_STATUS_0__SDMA1_FED_ERR__SHIFT 0x7
+#define RLC_RLCS_FED_STATUS_0__RLC_FED_ERR_MASK 0x00000001L
+#define RLC_RLCS_FED_STATUS_0__UTCL2_FED_ERR_MASK 0x00000002L
+#define RLC_RLCS_FED_STATUS_0__GE_FED_ERR_MASK 0x00000004L
+#define RLC_RLCS_FED_STATUS_0__CPC_FED_ERR_MASK 0x00000008L
+#define RLC_RLCS_FED_STATUS_0__CPF_FED_ERR_MASK 0x00000010L
+#define RLC_RLCS_FED_STATUS_0__CPG_FED_ERR_MASK 0x00000020L
+#define RLC_RLCS_FED_STATUS_0__SDMA0_FED_ERR_MASK 0x00000040L
+#define RLC_RLCS_FED_STATUS_0__SDMA1_FED_ERR_MASK 0x00000080L
+//RLC_RLCS_FED_STATUS_1
+#define RLC_RLCS_FED_STATUS_1__GL2C0_FED_ERR__SHIFT 0x0
+#define RLC_RLCS_FED_STATUS_1__GL2C1_FED_ERR__SHIFT 0x1
+#define RLC_RLCS_FED_STATUS_1__GL2C2_FED_ERR__SHIFT 0x2
+#define RLC_RLCS_FED_STATUS_1__GL2C3_FED_ERR__SHIFT 0x3
+#define RLC_RLCS_FED_STATUS_1__GL2C4_FED_ERR__SHIFT 0x4
+#define RLC_RLCS_FED_STATUS_1__GL2C5_FED_ERR__SHIFT 0x5
+#define RLC_RLCS_FED_STATUS_1__GL2C6_FED_ERR__SHIFT 0x6
+#define RLC_RLCS_FED_STATUS_1__GL2C7_FED_ERR__SHIFT 0x7
+#define RLC_RLCS_FED_STATUS_1__GL2C8_FED_ERR__SHIFT 0x8
+#define RLC_RLCS_FED_STATUS_1__GL2C9_FED_ERR__SHIFT 0x9
+#define RLC_RLCS_FED_STATUS_1__GL2C10_FED_ERR__SHIFT 0xa
+#define RLC_RLCS_FED_STATUS_1__GL2C11_FED_ERR__SHIFT 0xb
+#define RLC_RLCS_FED_STATUS_1__GL2C12_FED_ERR__SHIFT 0xc
+#define RLC_RLCS_FED_STATUS_1__GL2C13_FED_ERR__SHIFT 0xd
+#define RLC_RLCS_FED_STATUS_1__GL2C14_FED_ERR__SHIFT 0xe
+#define RLC_RLCS_FED_STATUS_1__GL2C15_FED_ERR__SHIFT 0xf
+#define RLC_RLCS_FED_STATUS_1__GL2C0_FED_ERR_MASK 0x00000001L
+#define RLC_RLCS_FED_STATUS_1__GL2C1_FED_ERR_MASK 0x00000002L
+#define RLC_RLCS_FED_STATUS_1__GL2C2_FED_ERR_MASK 0x00000004L
+#define RLC_RLCS_FED_STATUS_1__GL2C3_FED_ERR_MASK 0x00000008L
+#define RLC_RLCS_FED_STATUS_1__GL2C4_FED_ERR_MASK 0x00000010L
+#define RLC_RLCS_FED_STATUS_1__GL2C5_FED_ERR_MASK 0x00000020L
+#define RLC_RLCS_FED_STATUS_1__GL2C6_FED_ERR_MASK 0x00000040L
+#define RLC_RLCS_FED_STATUS_1__GL2C7_FED_ERR_MASK 0x00000080L
+#define RLC_RLCS_FED_STATUS_1__GL2C8_FED_ERR_MASK 0x00000100L
+#define RLC_RLCS_FED_STATUS_1__GL2C9_FED_ERR_MASK 0x00000200L
+#define RLC_RLCS_FED_STATUS_1__GL2C10_FED_ERR_MASK 0x00000400L
+#define RLC_RLCS_FED_STATUS_1__GL2C11_FED_ERR_MASK 0x00000800L
+#define RLC_RLCS_FED_STATUS_1__GL2C12_FED_ERR_MASK 0x00001000L
+#define RLC_RLCS_FED_STATUS_1__GL2C13_FED_ERR_MASK 0x00002000L
+#define RLC_RLCS_FED_STATUS_1__GL2C14_FED_ERR_MASK 0x00004000L
+#define RLC_RLCS_FED_STATUS_1__GL2C15_FED_ERR_MASK 0x00008000L
//RLC_CGTT_MGCG_OVERRIDE
#define RLC_CGTT_MGCG_OVERRIDE__RLC_REPEATER_FGCG_OVERRIDE__SHIFT 0x0
#define RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE__SHIFT 0x1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h
new file mode 100644
index 000000000000..c6c0cf1376a6
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_6_1_0_sh_mask.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_6_1_0_SH_MASK_HEADER
+#define _xgmi_6_1_0_SH_MASK_HEADER
+
+//PCS_XGMI3X16_PCS_ERROR_STATUS
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlAckErr__SHIFT 0x2
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoUnderflowErr__SHIFT 0x3
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoOverflowErr__SHIFT 0x4
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxVcidDataErr__SHIFT 0x7
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlCRCErr__SHIFT 0xe
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayAttemptErr__SHIFT 0x16
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__SyncHdrErr__SHIFT 0x17
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxReplayTimeoutErr__SHIFT 0x18
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxReplayTimeoutErr__SHIFT 0x19
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubTxTimeoutErr__SHIFT 0x1a
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubRxTimeoutErr__SHIFT 0x1b
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxCMDPktErr__SHIFT 0x1c
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlAckErr_MASK 0x00000004L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoUnderflowErr_MASK 0x00000008L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxFifoOverflowErr_MASK 0x00000010L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxVcidDataErr_MASK 0x00000080L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FlowCtrlCRCErr_MASK 0x00004000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__ReplayAttemptErr_MASK 0x00400000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__SyncHdrErr_MASK 0x00800000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__TxReplayTimeoutErr_MASK 0x01000000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxReplayTimeoutErr_MASK 0x02000000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubTxTimeoutErr_MASK 0x04000000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__LinkSubRxTimeoutErr_MASK 0x08000000L
+#define PCS_XGMI3X16_PCS_ERROR_STATUS__RxCMDPktErr_MASK 0x10000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
index 9e8ed9f4bb15..3a4670bc4449 100644
--- a/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
+++ b/drivers/gpu/drm/amd/include/ivsrcid/gfx/irqsrcs_gfx_11_0_0.h
@@ -49,6 +49,8 @@
#define GFX_11_0_0__SRCID__SDMA_SEM_INCOMPLETE_TIMEOUT 65 // 0x41 GPF(Sem incomplete timeout)
#define GFX_11_0_0__SRCID__SDMA_SEM_WAIT_FAIL_TIMEOUT 66 // 0x42 Semaphore wait fail timeout
+#define GFX_11_0_0__SRCID__RLC_GC_FED_INTERRUPT 128 // 0x80 FED Interrupt (for data poisoning)
+
#define GFX_11_0_0__SRCID__CP_GENERIC_INT 177 // 0xB1 CP_GENERIC int
#define GFX_11_0_0__SRCID__CP_PM4_PKT_RSVD_BIT_ERROR 180 // 0xB4 PM4 Pkt Rsvd Bits Error
#define GFX_11_0_0__SRCID__CP_EOP_INTERRUPT 181 // 0xB5 End-of-Pipe Interrupt
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index d18162e9ed1d..75f18791cdb9 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -139,6 +139,8 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_MIN_FAN_RPM,
AMDGPU_PP_SENSOR_MAX_FAN_RPM,
AMDGPU_PP_SENSOR_VCN_POWER_STATE,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK,
+ AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK,
};
enum amd_pp_task {
@@ -395,6 +397,7 @@ struct amd_pm_funcs {
int (*get_ppfeature_status)(void *handle, char *buf);
int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks);
int (*asic_reset_mode_2)(void *handle);
+ int (*asic_reset_enable_gfx_features)(void *handle);
int (*set_df_cstate)(void *handle, enum pp_df_cstate state);
int (*set_xgmi_pstate)(void *handle, uint32_t pstate);
ssize_t (*get_gpu_metrics)(void *handle, void **table);
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 1b300c569faf..6e79d3352d0b 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -227,6 +227,24 @@ int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
return ret;
}
+int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!pp_funcs || !pp_funcs->asic_reset_enable_gfx_features)
+ return -ENOENT;
+
+ mutex_lock(&adev->pm.mutex);
+
+ ret = pp_funcs->asic_reset_enable_gfx_features(pp_handle);
+
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 236657eece47..bf6d63673b5a 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -1991,6 +1991,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
case IP_VERSION(9, 4, 2):
case IP_VERSION(10, 3, 0):
case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 1):
+ case IP_VERSION(11, 0, 2):
*states = ATTR_STATE_SUPPORTED;
break;
default:
@@ -2007,14 +2009,16 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2)))
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
if (!(gc_ver == IP_VERSION(10, 3, 1) ||
gc_ver == IP_VERSION(10, 3, 0) ||
gc_ver == IP_VERSION(10, 1, 2) ||
gc_ver == IP_VERSION(11, 0, 0) ||
- gc_ver == IP_VERSION(11, 0, 2)))
+ gc_ver == IP_VERSION(11, 0, 2) ||
+ gc_ver == IP_VERSION(11, 0, 3)))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
@@ -3059,7 +3063,7 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
*
* hwmon interfaces for GPU power:
*
- * - power1_average: average power used by the GPU in microWatts
+ * - power1_average: average power used by the SoC in microWatts. On APUs this includes the CPU.
*
* - power1_cap_min: minimum cap supported in microWatts
*
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index cb5b9df78b4d..16addceca68f 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -386,6 +386,7 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev);
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev);
+int amdgpu_dpm_enable_gfx_features(struct amdgpu_device *adev);
bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 49c398ec0aaf..d6d9e3b1b2c0 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7714,20 +7714,13 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
}
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
-
-out:
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err) {
DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
err, fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
}
return err;
-
}
static int si_dpm_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 304190d5c9d2..11b7b4cffaae 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -111,8 +111,7 @@ static int pp_sw_fini(void *handle)
hwmgr_sw_fini(hwmgr);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
return 0;
}
@@ -769,10 +768,16 @@ static int pp_dpm_read_sensor(void *handle, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
- *((uint32_t *)value) = hwmgr->pstate_sclk;
+ *((uint32_t *)value) = hwmgr->pstate_sclk * 100;
return 0;
case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
- *((uint32_t *)value) = hwmgr->pstate_mclk;
+ *((uint32_t *)value) = hwmgr->pstate_mclk * 100;
+ return 0;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
+ *((uint32_t *)value) = hwmgr->pstate_sclk_peak * 100;
+ return 0;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
+ *((uint32_t *)value) = hwmgr->pstate_mclk_peak * 100;
return 0;
case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index ede71de2343d..86d6e88c7386 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -375,6 +375,17 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
return 0;
}
+static void smu10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK;
+ hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK;
+
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetMaxGfxclkFrequency,
+ &hwmgr->pstate_sclk_peak);
+ hwmgr->pstate_mclk_peak = SMU10_UMD_PSTATE_PEAK_FCLK;
+}
+
static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
@@ -398,6 +409,8 @@ static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
return ret;
}
+ smu10_populate_umdpstate_clocks(hwmgr);
+
return 0;
}
@@ -574,9 +587,6 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
- hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
-
/* enable the pp_od_clk_voltage sysfs file */
hwmgr->od_enabled = 1;
/* disabled fine grain tuning function by default */
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 7ef7e81525a3..e10cc5e7928e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -22,7 +22,6 @@
*/
#include "pp_debug.h"
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -1501,6 +1500,67 @@ static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
return ret;
}
+static void smu7_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
+ struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
+ int32_t tmp_sclk, count, percentage;
+
+ if (golden_dpm_table->mclk_table.count == 1) {
+ percentage = 70;
+ hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[0].value;
+ } else {
+ percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+ hwmgr->pstate_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
+ }
+
+ tmp_sclk = hwmgr->pstate_mclk * percentage / 100;
+
+ if (hwmgr->pp_table_version == PP_TABLE_V0) {
+ struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+ for (count = vddc_dependency_on_sclk->count - 1; count >= 0; count--) {
+ if (tmp_sclk >= vddc_dependency_on_sclk->entries[count].clk) {
+ hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[count].clk;
+ break;
+ }
+ }
+ if (count < 0)
+ hwmgr->pstate_sclk = vddc_dependency_on_sclk->entries[0].clk;
+
+ hwmgr->pstate_sclk_peak =
+ vddc_dependency_on_sclk->entries[vddc_dependency_on_sclk->count - 1].clk;
+ } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)(hwmgr->pptable);
+ struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk =
+ table_info->vdd_dep_on_sclk;
+
+ for (count = vdd_dep_on_sclk->count - 1; count >= 0; count--) {
+ if (tmp_sclk >= vdd_dep_on_sclk->entries[count].clk) {
+ hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[count].clk;
+ break;
+ }
+ }
+ if (count < 0)
+ hwmgr->pstate_sclk = vdd_dep_on_sclk->entries[0].clk;
+
+ hwmgr->pstate_sclk_peak =
+ vdd_dep_on_sclk->entries[vdd_dep_on_sclk->count - 1].clk;
+ }
+
+ hwmgr->pstate_mclk_peak =
+ golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
+
+ /* make sure the output is in Mhz */
+ hwmgr->pstate_sclk /= 100;
+ hwmgr->pstate_mclk /= 100;
+ hwmgr->pstate_sclk_peak /= 100;
+ hwmgr->pstate_mclk_peak /= 100;
+}
+
static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result = 0;
@@ -1625,6 +1685,8 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"pcie performance request failed!", result = tmp_result);
+ smu7_populate_umdpstate_clocks(hwmgr);
+
return 0;
}
@@ -3143,15 +3205,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
count >= 0; count--) {
if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
- tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
*sclk_mask = count;
break;
}
}
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
*sclk_mask = 0;
- tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
- }
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
@@ -3161,15 +3220,12 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
- tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
*sclk_mask = count;
break;
}
}
- if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK)
*sclk_mask = 0;
- tmp_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
- }
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
*sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
@@ -3181,8 +3237,6 @@ static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_le
*mclk_mask = golden_dpm_table->mclk_table.count - 1;
*pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
- hwmgr->pstate_sclk = tmp_sclk;
- hwmgr->pstate_mclk = tmp_mclk;
return 0;
}
@@ -3195,9 +3249,6 @@ static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t pcie_mask = 0;
- if (hwmgr->pstate_sclk == 0)
- smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
-
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = smu7_force_dpm_highest(hwmgr);
@@ -4153,7 +4204,7 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to freeze SCLK DPM when DPM is disabled",
);
@@ -4210,7 +4261,7 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
result = smum_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
@@ -4218,7 +4269,7 @@ static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+ (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
/*populate MCLK dpm table to SMU7 */
result = smum_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
@@ -4309,7 +4360,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->sclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to Unfreeze SCLK DPM when DPM is disabled",
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index b50fd4a4a3d1..b015a601b385 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -1016,6 +1016,18 @@ static void smu8_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
data->acp_boot_level = 0xff;
}
+static void smu8_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct phm_clock_voltage_dependency_table *table =
+ hwmgr->dyn_state.vddc_dependency_on_sclk;
+
+ hwmgr->pstate_sclk = table->entries[0].clk / 100;
+ hwmgr->pstate_mclk = 0;
+
+ hwmgr->pstate_sclk_peak = table->entries[table->count - 1].clk / 100;
+ hwmgr->pstate_mclk_peak = 0;
+}
+
static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
smu8_program_voting_clients(hwmgr);
@@ -1024,6 +1036,8 @@ static int smu8_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
smu8_program_bootup_state(hwmgr);
smu8_reset_acp_boot_level(hwmgr);
+ smu8_populate_umdpstate_clocks(hwmgr);
+
return 0;
}
@@ -1167,8 +1181,6 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
data->sclk_dpm.soft_min_clk = table->entries[0].clk;
data->sclk_dpm.hard_min_clk = table->entries[0].clk;
- hwmgr->pstate_sclk = table->entries[0].clk;
- hwmgr->pstate_mclk = 0;
level = smu8_get_max_sclk_level(hwmgr) - 1;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index c8c9fb827bda..99cd2e63afdd 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -22,7 +22,6 @@
*/
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -3008,6 +3007,30 @@ static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr *hwmgr, bool
return 0;
}
+static void vega10_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v2_information *table_info =
+ (struct phm_ppt_v2_information *)(hwmgr->pptable);
+
+ if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
+ table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
+ hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
+ hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
+ } else {
+ hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
+ hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[0].clk;
+ }
+
+ hwmgr->pstate_sclk_peak = table_info->vdd_dep_on_sclk->entries[table_info->vdd_dep_on_sclk->count - 1].clk;
+ hwmgr->pstate_mclk_peak = table_info->vdd_dep_on_mclk->entries[table_info->vdd_dep_on_mclk->count - 1].clk;
+
+ /* make sure the output is in Mhz */
+ hwmgr->pstate_sclk /= 100;
+ hwmgr->pstate_mclk /= 100;
+ hwmgr->pstate_sclk_peak /= 100;
+ hwmgr->pstate_mclk_peak /= 100;
+}
+
static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
struct vega10_hwmgr *data = hwmgr->backend;
@@ -3082,6 +3105,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
result = tmp_result);
}
+ vega10_populate_umdpstate_clocks(hwmgr);
+
return result;
}
@@ -4169,8 +4194,6 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo
*sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
*soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
*mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
- hwmgr->pstate_sclk = table_info->vdd_dep_on_sclk->entries[VEGA10_UMD_PSTATE_GFXCLK_LEVEL].clk;
- hwmgr->pstate_mclk = table_info->vdd_dep_on_mclk->entries[VEGA10_UMD_PSTATE_MCLK_LEVEL].clk;
}
if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
@@ -4281,9 +4304,6 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t mclk_mask = 0;
uint32_t soc_mask = 0;
- if (hwmgr->pstate_sclk == 0)
- vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
-
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
ret = vega10_force_dpm_highest(hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
index 95b988823f50..bb90d8abf79b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_processpptables.c
@@ -23,7 +23,6 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include "vega10_processpptables.h"
#include "ppatomfwctrl.h"
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
index a2f4d6773d45..e9db137cd1c6 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
@@ -22,7 +22,6 @@
*/
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -1026,6 +1025,25 @@ static int vega12_get_all_clock_ranges(struct pp_hwmgr *hwmgr)
return 0;
}
+static void vega12_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
+{
+ struct vega12_hwmgr *data = (struct vega12_hwmgr *)(hwmgr->backend);
+ struct vega12_single_dpm_table *gfx_dpm_table = &(data->dpm_table.gfx_table);
+ struct vega12_single_dpm_table *mem_dpm_table = &(data->dpm_table.mem_table);
+
+ if (gfx_dpm_table->count > VEGA12_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > VEGA12_UMD_PSTATE_MCLK_LEVEL) {
+ hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_GFXCLK_LEVEL].value;
+ hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[VEGA12_UMD_PSTATE_MCLK_LEVEL].value;
+ } else {
+ hwmgr->pstate_sclk = gfx_dpm_table->dpm_levels[0].value;
+ hwmgr->pstate_mclk = mem_dpm_table->dpm_levels[0].value;
+ }
+
+ hwmgr->pstate_sclk_peak = gfx_dpm_table->dpm_levels[gfx_dpm_table->count].value;
+ hwmgr->pstate_mclk_peak = mem_dpm_table->dpm_levels[mem_dpm_table->count].value;
+}
+
static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
{
int tmp_result, result = 0;
@@ -1077,6 +1095,9 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!result,
"Failed to setup default DPM tables!",
return result);
+
+ vega12_populate_umdpstate_clocks(hwmgr);
+
return result;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
index bd54fbd393b9..89148f73b514 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_processpptables.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include "vega12/smu9_driver_if.h"
#include "vega12_processpptables.h"
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
index b30684c84e20..0d4d4811527c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
@@ -22,7 +22,6 @@
*/
#include <linux/delay.h>
-#include <linux/fb.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -1555,26 +1554,23 @@ static int vega20_set_mclk_od(
return 0;
}
-static int vega20_populate_umdpstate_clocks(
- struct pp_hwmgr *hwmgr)
+static void vega20_populate_umdpstate_clocks(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
struct vega20_single_dpm_table *gfx_table = &(data->dpm_table.gfx_table);
struct vega20_single_dpm_table *mem_table = &(data->dpm_table.mem_table);
- hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
- hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
-
if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
hwmgr->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
hwmgr->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ } else {
+ hwmgr->pstate_sclk = gfx_table->dpm_levels[0].value;
+ hwmgr->pstate_mclk = mem_table->dpm_levels[0].value;
}
- hwmgr->pstate_sclk = hwmgr->pstate_sclk * 100;
- hwmgr->pstate_mclk = hwmgr->pstate_mclk * 100;
-
- return 0;
+ hwmgr->pstate_sclk_peak = gfx_table->dpm_levels[gfx_table->count - 1].value;
+ hwmgr->pstate_mclk_peak = mem_table->dpm_levels[mem_table->count - 1].value;
}
static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
@@ -1753,10 +1749,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"[EnableDPMTasks] Failed to initialize odn settings!",
return result);
- result = vega20_populate_umdpstate_clocks(hwmgr);
- PP_ASSERT_WITH_CODE(!result,
- "[EnableDPMTasks] Failed to populate umdpstate clocks!",
- return result);
+ vega20_populate_umdpstate_clocks(hwmgr);
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
index 1f9082539457..79c817752a33 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_processpptables.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include "smu11_driver_if.h"
#include "vega20_processpptables.h"
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 27f8d0e0e6a8..5ce433e2c16a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -809,6 +809,8 @@ struct pp_hwmgr {
uint32_t workload_prority[Workload_Policy_Max];
uint32_t workload_setting[Workload_Policy_Max];
bool gfxoff_state_changed_by_workload;
+ uint32_t pstate_sclk_peak;
+ uint32_t pstate_mclk_peak;
};
int hwmgr_early_init(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h
index fdc6b7a57bc9..c2efc70ef288 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h
@@ -358,6 +358,7 @@ typedef struct {
QuadraticInt_t SsCurve;
} DpmDescriptor_t;
+#pragma pack(push, 1)
typedef struct {
uint32_t Version;
@@ -609,6 +610,7 @@ typedef struct {
uint32_t MmHubPadding[8];
} PPTable_t;
+#pragma pack(pop)
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
index 2818c98ff5ca..faae4b918d90 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
@@ -122,6 +122,7 @@ typedef struct {
uint16_t Vid; /* min voltage in SVI2 VID */
} DisplayClockTable_t;
+#pragma pack(push, 1)
typedef struct {
/* PowerTune */
uint16_t SocketPowerLimit; /* Watts */
@@ -323,6 +324,7 @@ typedef struct {
uint32_t MmHubPadding[3]; /* SMU internal use */
} PPTable_t;
+#pragma pack(pop)
typedef struct {
uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h
index b6ffd08784e7..6456bea5d2d5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h
@@ -245,6 +245,7 @@ typedef struct {
QuadraticInt_t SsCurve;
} DpmDescriptor_t;
+#pragma pack(push, 1)
typedef struct {
uint32_t Version;
@@ -508,6 +509,7 @@ typedef struct {
uint32_t MmHubPadding[7];
} PPTable_t;
+#pragma pack(pop)
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index 5ca3c422f7d4..4bc8db1be738 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -22,7 +22,6 @@
*/
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/fb.h>
#include "linux/delay.h"
#include <linux/types.h>
#include <linux/pci.h>
@@ -2203,7 +2202,7 @@ static int ci_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK))
return ci_program_memory_timing_parameters(hwmgr);
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
index 03df35dee8ba..060fc140c574 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/iceland_smumgr.c
@@ -2165,7 +2165,7 @@ static int iceland_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK))
return iceland_program_memory_timing_parameters(hwmgr);
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
index 88a5641465dc..7eeab84d421a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
@@ -250,9 +250,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr)
/* allocate space for watermarks table */
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
- sizeof(Watermarks_t),
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ sizeof(Watermarks_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
&priv->smu_tables.entry[SMU10_WMTABLE].handle,
&priv->smu_tables.entry[SMU10_WMTABLE].mc_addr,
&priv->smu_tables.entry[SMU10_WMTABLE].table);
@@ -266,9 +265,8 @@ static int smu10_smu_init(struct pp_hwmgr *hwmgr)
/* allocate space for watermarks table */
r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
- sizeof(DpmClocks_t),
- PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ sizeof(DpmClocks_t), PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].handle,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].mc_addr,
&priv->smu_tables.entry[SMU10_CLOCKTABLE].table);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
index 04b561f5d932..acbe41174d7e 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/tonga_smumgr.c
@@ -2554,7 +2554,7 @@ static int tonga_program_mem_timing_parameters(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->need_update_smu7_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_OD_UPDATE_MCLK))
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK))
return tonga_program_memory_timing_parameters(hwmgr);
return 0;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index ca3beb5d8f27..834d146c4991 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -623,6 +623,7 @@ static int smu_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu;
+ int r;
smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
if (!smu)
@@ -640,7 +641,10 @@ static int smu_early_init(void *handle)
adev->powerplay.pp_handle = smu;
adev->powerplay.pp_funcs = &swsmu_pm_funcs;
- return smu_set_funcs(adev);
+ r = smu_set_funcs(adev);
+ if (r)
+ return r;
+ return smu_init_microcode(smu);
}
static int smu_set_default_dpm_table(struct smu_context *smu)
@@ -900,9 +904,8 @@ static int smu_alloc_dummy_read_table(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- dummy_read_1_table->size = 0x40000;
- dummy_read_1_table->align = PAGE_SIZE;
- dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+ if (!dummy_read_1_table->size)
+ return 0;
ret = amdgpu_bo_create_kernel(adev,
dummy_read_1_table->size,
@@ -1067,12 +1070,6 @@ static int smu_sw_init(void *handle)
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
- ret = smu_init_microcode(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to load smu firmware!\n");
- return ret;
- }
-
ret = smu_smc_table_sw_init(smu);
if (ret) {
dev_err(adev->dev, "Failed to sw init smc table!\n");
@@ -1500,6 +1497,20 @@ static int smu_disable_dpms(struct smu_context *smu)
}
/*
+ * For SMU 13.0.4/11, PMFW will handle the features disablement properly
+ * for gpu reset case. Driver involvement is unnecessary.
+ */
+ if (amdgpu_in_reset(adev)) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 4):
+ case IP_VERSION(13, 0, 11):
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ /*
* For gpu reset, runpm and hibernation through BACO,
* BACO feature has to be kept enabled.
*/
@@ -2473,6 +2484,14 @@ static int smu_read_sensor(void *handle,
*((uint32_t *)data) = pstate_table->uclk_pstate.standard * 100;
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK:
+ *((uint32_t *)data) = pstate_table->gfxclk_pstate.peak * 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK:
+ *((uint32_t *)data) = pstate_table->uclk_pstate.peak * 100;
+ *size = 4;
+ break;
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
*size = 8;
@@ -2839,6 +2858,23 @@ static int smu_mode2_reset(void *handle)
return ret;
}
+static int smu_enable_gfx_features(void *handle)
+{
+ struct smu_context *smu = handle;
+ int ret = 0;
+
+ if (!smu->pm_enabled)
+ return -EOPNOTSUPP;
+
+ if (smu->ppt_funcs->enable_gfx_features)
+ ret = smu->ppt_funcs->enable_gfx_features(smu);
+
+ if (ret)
+ dev_err(smu->adev->dev, "enable gfx features failed!\n");
+
+ return ret;
+}
+
static int smu_get_max_sustainable_clocks_by_dc(void *handle,
struct pp_smu_nv_clock_table *max_clocks)
{
@@ -3023,6 +3059,7 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_ppfeature_status = smu_sys_get_pp_feature_mask,
.set_ppfeature_status = smu_sys_set_pp_feature_mask,
.asic_reset_mode_2 = smu_mode2_reset,
+ .asic_reset_enable_gfx_features = smu_enable_gfx_features,
.set_df_cstate = smu_set_df_cstate,
.set_xgmi_pstate = smu_set_xgmi_pstate,
.get_gpu_metrics = smu_sys_get_gpu_metrics,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index 3bc4128a22ac..2a03d85bf4e2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -1201,6 +1201,8 @@ struct pptable_funcs {
* IPs reset varies by asic.
*/
int (*mode2_reset)(struct smu_context *smu);
+ /* for gfx feature enablement after mode2 reset */
+ int (*enable_gfx_features)(struct smu_context *smu);
/**
* @get_dpm_ultimate_freq: Get the hard frequency range of a clock
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h
index 43d43d6addc0..d518dee18e1b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h
@@ -464,6 +464,7 @@ typedef struct {
uint16_t Padding16;
} DpmDescriptor_t;
+#pragma pack(push, 1)
typedef struct {
uint32_t Version;
@@ -733,6 +734,7 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// Time constant parameters for clock averages in ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h
index 04752ade1016..c5c1943fb6a1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h
@@ -515,6 +515,7 @@ typedef struct {
uint32_t BoardLevelEnergyAccumulator;
} OutOfBandMonitor_t;
+#pragma pack(push, 1)
typedef struct {
uint32_t Version;
@@ -814,6 +815,7 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// Time constant parameters for clock averages in ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
index 351a4af429b3..aa6d29de4002 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
@@ -599,6 +599,7 @@ typedef struct {
uint16_t Fmax;
} UclkDpmChangeRange_t;
+#pragma pack(push, 1)
typedef struct {
// MAJOR SECTION: SKU PARAMETERS
@@ -957,6 +958,7 @@ typedef struct {
uint32_t MmHubPadding[8]; // SMU internal use
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// MAJOR SECTION: SKU PARAMETERS
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
index 7a6075daa7b2..90200f31ff52 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
@@ -267,6 +267,7 @@ typedef struct {
QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V)
} DpmDescriptor_t;
+#pragma pack(push, 1)
typedef struct {
uint32_t Version;
@@ -448,6 +449,7 @@ typedef struct {
uint32_t reserved[14];
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// Time constant parameters for clock averages in ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index d6b964cf73bd..b686fb68a6e7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -123,7 +123,8 @@
(1 << FEATURE_DS_FCLK_BIT) | \
(1 << FEATURE_DS_LCLK_BIT) | \
(1 << FEATURE_DS_DCFCLK_BIT) | \
- (1 << FEATURE_DS_UCLK_BIT))
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
@@ -522,9 +523,9 @@ typedef enum {
TEMP_HOTSPOT_M,
TEMP_MEM,
TEMP_VR_GFX,
- TEMP_VR_SOC,
TEMP_VR_MEM0,
TEMP_VR_MEM1,
+ TEMP_VR_SOC,
TEMP_VR_U,
TEMP_LIQUID0,
TEMP_LIQUID1,
@@ -1346,10 +1347,12 @@ typedef struct {
uint32_t MmHubPadding[8];
} BoardTable_t;
+#pragma pack(push, 1)
typedef struct {
SkuTable_t SkuTable;
BoardTable_t BoardTable;
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// Time constant parameters for clock averages in ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index d6b13933a98f..4c46a0392451 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -113,20 +113,21 @@
#define NUM_FEATURES 64
#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
-#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
- (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
- (1 << FEATURE_DPM_UCLK_BIT) | \
- (1 << FEATURE_DPM_FCLK_BIT) | \
- (1 << FEATURE_DPM_SOCCLK_BIT) | \
- (1 << FEATURE_DPM_MP0CLK_BIT) | \
- (1 << FEATURE_DPM_LINK_BIT) | \
- (1 << FEATURE_DPM_DCN_BIT) | \
- (1 << FEATURE_DS_GFXCLK_BIT) | \
- (1 << FEATURE_DS_SOCCLK_BIT) | \
- (1 << FEATURE_DS_FCLK_BIT) | \
- (1 << FEATURE_DS_LCLK_BIT) | \
- (1 << FEATURE_DS_DCFCLK_BIT) | \
- (1 << FEATURE_DS_UCLK_BIT)
+#define ALLOWED_FEATURE_CTRL_SCPM ((1 << FEATURE_DPM_GFXCLK_BIT) | \
+ (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+ (1 << FEATURE_DPM_UCLK_BIT) | \
+ (1 << FEATURE_DPM_FCLK_BIT) | \
+ (1 << FEATURE_DPM_SOCCLK_BIT) | \
+ (1 << FEATURE_DPM_MP0CLK_BIT) | \
+ (1 << FEATURE_DPM_LINK_BIT) | \
+ (1 << FEATURE_DPM_DCN_BIT) | \
+ (1 << FEATURE_DS_GFXCLK_BIT) | \
+ (1 << FEATURE_DS_SOCCLK_BIT) | \
+ (1 << FEATURE_DS_FCLK_BIT) | \
+ (1 << FEATURE_DS_LCLK_BIT) | \
+ (1 << FEATURE_DS_DCFCLK_BIT) | \
+ (1 << FEATURE_DS_UCLK_BIT) | \
+ (1ULL << FEATURE_DS_VCN_BIT))
//For use with feature control messages
typedef enum {
@@ -1379,10 +1380,12 @@ typedef struct {
uint32_t MmHubPadding[8];
} BoardTable_t;
+#pragma pack(push, 1)
typedef struct {
SkuTable_t SkuTable;
BoardTable_t BoardTable;
} PPTable_t;
+#pragma pack(pop)
typedef struct {
// Time constant parameters for clock averages in ms
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
index 8b8266890a10..10cff75b44d5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_0_ppsmc.h
@@ -94,6 +94,7 @@
//Resets
#define PPSMC_MSG_PrepareMp1ForUnload 0x2E
#define PPSMC_MSG_Mode1Reset 0x2F
+#define PPSMC_MSG_Mode2Reset 0x4F
//Set SystemVirtual DramAddrHigh
#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x30
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index 4180c71d930f..96f6c2db955b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -242,7 +242,8 @@
__SMU_DUMMY_MAP(LogGfxOffResidency), \
__SMU_DUMMY_MAP(SetNumBadMemoryPagesRetired), \
__SMU_DUMMY_MAP(SetBadMemoryPagesRetiredFlagsPerChannel), \
- __SMU_DUMMY_MAP(AllowGpo),
+ __SMU_DUMMY_MAP(AllowGpo), \
+ __SMU_DUMMY_MAP(Mode2Reset),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index e8c6febb8b64..1c0ae2cb757b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,11 +28,11 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x34
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x35
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -244,11 +244,6 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
enum smu_clk_type clk_type,
struct smu_13_0_dpm_table *single_dpm_table);
-int smu_v13_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value);
-
int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu);
int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 0bcd4fe0ef17..95da6dd1cc65 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -494,6 +494,8 @@ static int navi10_tables_init(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *tables = smu_table->tables;
+ struct smu_table *dummy_read_1_table =
+ &smu_table->dummy_read_1_table;
SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -513,6 +515,10 @@ static int navi10_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_DRIVER_SMU_CONFIG, sizeof(DriverSmuConfig_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ dummy_read_1_table->size = 0x40000;
+ dummy_read_1_table->align = PAGE_SIZE;
+ dummy_read_1_table->domain = AMDGPU_GEM_DOMAIN_VRAM;
+
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_NV1X_t),
GFP_KERNEL);
if (!smu_table->metrics_table)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index ad66d57aa102..6492d69e2e60 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -93,7 +93,7 @@ static void smu_v11_0_poll_baco_exit(struct smu_context *smu)
int smu_v11_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- const char *chip_name;
+ char ucode_prefix[30];
char fw_name[SMU_FW_NAME_LEN];
int err = 0;
const struct smc_firmware_header_v1_0 *hdr;
@@ -105,43 +105,11 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
(adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7))))
return 0;
- switch (adev->ip_versions[MP1_HWIP][0]) {
- case IP_VERSION(11, 0, 0):
- chip_name = "navi10";
- break;
- case IP_VERSION(11, 0, 5):
- chip_name = "navi14";
- break;
- case IP_VERSION(11, 0, 9):
- chip_name = "navi12";
- break;
- case IP_VERSION(11, 0, 7):
- chip_name = "sienna_cichlid";
- break;
- case IP_VERSION(11, 0, 11):
- chip_name = "navy_flounder";
- break;
- case IP_VERSION(11, 0, 12):
- chip_name = "dimgrey_cavefish";
- break;
- case IP_VERSION(11, 0, 13):
- chip_name = "beige_goby";
- break;
- case IP_VERSION(11, 0, 2):
- chip_name = "arcturus";
- break;
- default:
- dev_err(adev->dev, "Unsupported IP version 0x%x\n",
- adev->ip_versions[MP1_HWIP][0]);
- return -EINVAL;
- }
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err)
goto out;
@@ -159,12 +127,8 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
}
out:
- if (err) {
- DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
+ if (err)
+ amdgpu_ucode_release(&adev->pm.fw);
return err;
}
@@ -172,8 +136,7 @@ void smu_v11_0_fini_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
adev->pm.fw_version = 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index b4373b6568ae..78945e79dbee 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -88,7 +88,6 @@ static const int link_speed[] = {25, 50, 80, 160};
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- const char *chip_name;
char fw_name[30];
char ucode_prefix[30];
int err = 0;
@@ -100,21 +99,11 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->ip_versions[MP1_HWIP][0]) {
- case IP_VERSION(13, 0, 2):
- chip_name = "aldebaran_smc";
- break;
- default:
- amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- chip_name = ucode_prefix;
- }
+ amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", chip_name);
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
- if (err)
- goto out;
- err = amdgpu_ucode_validate(adev->pm.fw);
+ err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
if (err)
goto out;
@@ -132,12 +121,8 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
}
out:
- if (err) {
- DRM_ERROR("smu_v13_0: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
- }
+ if (err)
+ amdgpu_ucode_release(&adev->pm.fw);
return err;
}
@@ -145,8 +130,7 @@ void smu_v13_0_fini_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
- release_firmware(adev->pm.fw);
- adev->pm.fw = NULL;
+ amdgpu_ucode_release(&adev->pm.fw);
adev->pm.fw_version = 0;
}
@@ -2064,45 +2048,6 @@ int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
return 0;
}
-int smu_v13_0_get_dpm_level_range(struct smu_context *smu,
- enum smu_clk_type clk_type,
- uint32_t *min_value,
- uint32_t *max_value)
-{
- uint32_t level_count = 0;
- int ret = 0;
-
- if (!min_value && !max_value)
- return -EINVAL;
-
- if (min_value) {
- /* by default, level 0 clock value as min value */
- ret = smu_v13_0_get_dpm_freq_by_index(smu,
- clk_type,
- 0,
- min_value);
- if (ret)
- return ret;
- }
-
- if (max_value) {
- ret = smu_v13_0_get_dpm_level_count(smu,
- clk_type,
- &level_count);
- if (ret)
- return ret;
-
- ret = smu_v13_0_get_dpm_freq_by_index(smu,
- clk_type,
- level_count - 1,
- max_value);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index cf96c3f2affe..7c906ab3ddd2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -138,6 +138,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
+ MSG_MAP(Mode2Reset, PPSMC_MSG_Mode2Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
@@ -242,6 +243,7 @@ static struct cmn2asic_mapping smu_v13_0_0_workload_map[PP_SMC_POWER_PROFILE_COU
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
+ WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D, WORKLOAD_PPLIB_WINDOW_3D_BIT),
};
static const uint8_t smu_v13_0_0_throttler_map[] = {
@@ -407,6 +409,9 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
&smu_table->power_play_table,
&smu_table->power_play_table_size);
@@ -1257,6 +1262,9 @@ static int smu_v13_0_0_get_thermal_temperature_range(struct smu_context *smu,
table_context->power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (!range)
return -EINVAL;
@@ -1557,7 +1565,7 @@ static int smu_v13_0_0_get_power_profile_mode(struct smu_context *smu,
title[0], title[1], title[2], title[3], title[4], title[5],
title[6], title[7], title[8], title[9]);
- for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_WORKLOAD,
@@ -1619,7 +1627,7 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
smu->power_profile_mode = input[size];
- if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
}
@@ -1904,15 +1912,51 @@ static int smu_v13_0_0_set_df_cstate(struct smu_context *smu,
NULL);
}
+static void smu_v13_0_0_set_mode1_reset_param(struct smu_context *smu,
+ uint32_t supported_version,
+ uint32_t *param)
+{
+ uint32_t smu_version;
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+
+ if ((smu_version >= supported_version) &&
+ ras && atomic_read(&ras->in_recovery))
+ /* Set RAS fatal error reset flag */
+ *param = 1 << 16;
+ else
+ *param = 0;
+}
+
static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
{
int ret;
+ uint32_t param;
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
- ret = smu_cmn_send_debug_smc_msg(smu, DEBUGSMC_MSG_Mode1Reset);
- else
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ /* SMU 13_0_0 PMFW supports RAS fatal error reset from 78.77 */
+ smu_v13_0_0_set_mode1_reset_param(smu, 0x004e4d00, &param);
+
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_Mode1Reset, param, NULL);
+ break;
+
+ case IP_VERSION(13, 0, 10):
+ /* SMU 13_0_10 PMFW supports RAS fatal error reset from 80.28 */
+ smu_v13_0_0_set_mode1_reset_param(smu, 0x00501c00, &param);
+
+ ret = smu_cmn_send_debug_smc_msg_with_param(smu,
+ DEBUGSMC_MSG_Mode1Reset, param);
+ break;
+
+ default:
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+ break;
+ }
if (!ret)
msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
@@ -1920,6 +1964,30 @@ static int smu_v13_0_0_mode1_reset(struct smu_context *smu)
return ret;
}
+static int smu_v13_0_0_mode2_reset(struct smu_context *smu)
+{
+ int ret;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode2Reset, NULL);
+ else
+ return -EOPNOTSUPP;
+
+ return ret;
+}
+
+static int smu_v13_0_0_enable_gfx_features(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
+ FEATURE_PWR_GFX, NULL);
+ else
+ return -EOPNOTSUPP;
+}
+
static void smu_v13_0_0_set_smu_mailbox_registers(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -2035,6 +2103,8 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.baco_exit = smu_v13_0_0_baco_exit,
.mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
.mode1_reset = smu_v13_0_0_mode1_reset,
+ .mode2_reset = smu_v13_0_0_mode2_reset,
+ .enable_gfx_features = smu_v13_0_0_enable_gfx_features,
.set_mp1_state = smu_v13_0_0_set_mp1_state,
.set_df_cstate = smu_v13_0_0_set_df_cstate,
.send_hbm_bad_pages_num = smu_v13_0_0_smu_send_bad_mem_page_num,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index e87db7e02e8a..9e1967d8049e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -124,6 +124,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0),
MSG_MAP(AllowGpo, PPSMC_MSG_SetGpoAllow, 0),
+ MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 768b6e7dbd77..d5abafc5a682 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -404,6 +404,12 @@ int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
return __smu_cmn_send_debug_msg(smu, msg, 0);
}
+int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
+ uint32_t msg, uint32_t param)
+{
+ return __smu_cmn_send_debug_msg(smu, msg, param);
+}
+
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
enum smu_cmn2asic_mapping_type type,
uint32_t index)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index f82cf76dd3a4..d7cd358a53bd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -45,6 +45,9 @@ int smu_cmn_send_smc_msg(struct smu_context *smu,
int smu_cmn_send_debug_smc_msg(struct smu_context *smu,
uint32_t msg);
+int smu_cmn_send_debug_smc_msg_with_param(struct smu_context *smu,
+ uint32_t msg, uint32_t param);
+
int smu_cmn_wait_for_response(struct smu_context *smu);
int smu_cmn_to_asic_specific_index(struct smu_context *smu,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 4cc07d6bb9d8..cea3fd5772b5 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -10,7 +10,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index 3f4e719eebd8..28f76e07dd95 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -6,6 +6,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/component.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 7339339ef6b8..3a872c292091 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -11,7 +11,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_writeback.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 7043d1c9ed8f..e3507dd6f82a 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -195,8 +195,8 @@ static int hdlcd_setup_mode_config(struct drm_device *drm)
#ifdef CONFIG_DEBUG_FS
static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *drm = entry->dev;
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count));
@@ -208,8 +208,8 @@ static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *drm = entry->dev;
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
unsigned long clkrate = clk_get_rate(hdlcd->clk);
unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000;
@@ -219,17 +219,10 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
return 0;
}
-static struct drm_info_list hdlcd_debugfs_list[] = {
+static struct drm_debugfs_info hdlcd_debugfs_list[] = {
{ "interrupt_count", hdlcd_show_underrun_count, 0 },
{ "clocks", hdlcd_show_pxlclock, 0 },
};
-
-static void hdlcd_debugfs_init(struct drm_minor *minor)
-{
- drm_debugfs_create_files(hdlcd_debugfs_list,
- ARRAY_SIZE(hdlcd_debugfs_list),
- minor->debugfs_root, minor);
-}
#endif
DEFINE_DRM_GEM_DMA_FOPS(fops);
@@ -237,9 +230,6 @@ DEFINE_DRM_GEM_DMA_FOPS(fops);
static const struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
DRM_GEM_DMA_DRIVER_OPS,
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = hdlcd_debugfs_init,
-#endif
.fops = &fops,
.name = "hdlcd",
.desc = "ARM HDLCD Controller DRM",
@@ -303,6 +293,10 @@ static int hdlcd_drm_bind(struct device *dev)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
+#ifdef CONFIG_DEBUG_FS
+ drm_debugfs_add_files(drm, hdlcd_debugfs_list, ARRAY_SIZE(hdlcd_debugfs_list));
+#endif
+
ret = drm_dev_register(drm, 0);
if (ret)
goto err_register;
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 584cee123bd8..0e44f53e9fa4 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -129,7 +129,7 @@ int armada_fbdev_init(struct drm_device *dev)
priv->fbdev = fbh;
- drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
+ drm_fb_helper_prepare(dev, fbh, 32, &armada_fb_helper_funcs);
ret = drm_fb_helper_init(dev, fbh);
if (ret) {
@@ -137,7 +137,7 @@ int armada_fbdev_init(struct drm_device *dev)
goto err_fb_helper;
}
- ret = drm_fb_helper_initial_config(fbh, 32);
+ ret = drm_fb_helper_initial_config(fbh);
if (ret) {
DRM_ERROR("failed to set initial config\n");
goto err_fb_setup;
@@ -147,6 +147,7 @@ int armada_fbdev_init(struct drm_device *dev)
err_fb_setup:
drm_fb_helper_fini(fbh);
err_fb_helper:
+ drm_fb_helper_unprepare(fbh);
priv->fbdev = NULL;
return ret;
}
@@ -164,6 +165,8 @@ void armada_fbdev_fini(struct drm_device *dev)
if (fbh->fb)
fbh->fb->funcs->destroy(fbh->fb);
+ drm_fb_helper_unprepare(fbh);
+
priv->fbdev = NULL;
}
}
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index 55a3444a51d8..7877a57b8e26 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -5,7 +5,6 @@
#include <linux/reset.h>
#include <linux/regmap.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 718119e168a6..ecfb060d2557 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -14,7 +14,6 @@
#include <linux/reset.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
index 4f2187025a21..78775e0c853f 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
@@ -3,7 +3,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index d367a90cd3de..563fa7a3b546 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -4,6 +4,8 @@ config DRM_AST
depends on DRM && PCI && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
+ select I2C
+ select I2C_ALGOBIT
help
Say yes for experimental AST GPU driver. Do not enable
this driver without having a working -modesetting,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 420fc75c240e..d78852c7cf5b 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -31,7 +31,6 @@
#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_shmem_helper.h>
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index bffa310a0431..f83ce77127cb 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -29,7 +29,6 @@
#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/drm_managed.h>
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c7443317c747..984ec590a7e7 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -35,7 +35,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
@@ -636,7 +635,7 @@ static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src
struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
- struct iosys_map dst = IOSYS_MAP_INIT_VADDR(ast_plane->vaddr);
+ struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(ast_plane->vaddr);
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
@@ -714,7 +713,7 @@ static int ast_primary_plane_init(struct ast_private *ast)
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
void __iomem *vaddr = ast->vram;
- u64 offset = ast->vram_base;
+ u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */
unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
unsigned long size = ast->vram_fb_available - cursor_size;
int ret;
@@ -972,7 +971,7 @@ static int ast_cursor_plane_init(struct ast_private *ast)
return -ENOMEM;
vaddr = ast->vram + ast->vram_fb_available - size;
- offset = ast->vram_base + ast->vram_fb_available - size;
+ offset = ast->vram_fb_available - size;
ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
0x01, &ast_cursor_plane_funcs,
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index a2bb5b916235..4e806b06d35d 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -784,7 +784,6 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
@@ -815,10 +814,10 @@ static int atmel_hlcdc_dc_drm_resume(struct device *dev)
return drm_atomic_helper_resume(drm_dev, dc->suspend.state);
}
-#endif
-static SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops,
- atmel_hlcdc_dc_drm_suspend, atmel_hlcdc_dc_drm_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(atmel_hlcdc_dc_drm_pm_ops,
+ atmel_hlcdc_dc_drm_suspend,
+ atmel_hlcdc_dc_drm_resume);
static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
{ .compatible = "atmel,hlcdc-display-controller" },
@@ -830,7 +829,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = {
.remove = atmel_hlcdc_dc_drm_remove,
.driver = {
.name = "atmel-hlcdc-display-controller",
- .pm = &atmel_hlcdc_dc_drm_pm_ops,
+ .pm = pm_sleep_ptr(&atmel_hlcdc_dc_drm_pm_ops),
.of_match_table = atmel_hlcdc_dc_of_match,
},
};
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 57946d80b02d..8b2226f72b24 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -15,17 +15,6 @@ config DRM_PANEL_BRIDGE
menu "Display Interface Bridges"
depends on DRM && DRM_BRIDGE
-config DRM_CDNS_DSI
- tristate "Cadence DPI/DSI bridge"
- select DRM_KMS_HELPER
- select DRM_MIPI_DSI
- select DRM_PANEL_BRIDGE
- select GENERIC_PHY_MIPI_DPHY
- depends on OF
- help
- Support Cadence DPI to DSI bridge. This is an internal
- bridge and is meant to be directly embedded in a SoC.
-
config DRM_CHIPONE_ICN6211
tristate "Chipone ICN6211 MIPI-DSI/RGB Converter bridge"
depends on OF
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 1884803c6860..52f6e8b4a821 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o
obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index e7a6e456ed0d..ddceafa7b637 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -1185,8 +1185,9 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
-static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+static int adv7511_probe(struct i2c_client *i2c)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
struct adv7511_link_config link_config;
struct adv7511 *adv7511;
struct device *dev = &i2c->dev;
@@ -1392,7 +1393,7 @@ static struct i2c_driver adv7511_driver = {
.of_match_table = adv7511_of_ids,
},
.id_table = adv7511_i2c_ids,
- .probe = adv7511_probe,
+ .probe_new = adv7511_probe,
.remove = adv7511_remove,
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 660a54857929..3577c532abb4 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -22,7 +22,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -692,8 +691,7 @@ static bool anx6345_get_chip_id(struct anx6345 *anx6345)
return false;
}
-static int anx6345_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int anx6345_i2c_probe(struct i2c_client *client)
{
struct anx6345 *anx6345;
struct device *dev;
@@ -817,7 +815,7 @@ static struct i2c_driver anx6345_driver = {
.name = "anx6345",
.of_match_table = of_match_ptr(anx6345_match_table),
},
- .probe = anx6345_i2c_probe,
+ .probe_new = anx6345_i2c_probe,
.remove = anx6345_i2c_remove,
.id_table = anx6345_id,
};
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 5997049fde5b..a3a38bbe2786 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1214,8 +1214,7 @@ static const u16 anx78xx_chipid_list[] = {
0x7818,
};
-static int anx78xx_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int anx78xx_i2c_probe(struct i2c_client *client)
{
struct anx78xx *anx78xx;
struct anx78xx_platform_data *pdata;
@@ -1390,7 +1389,7 @@ static struct i2c_driver anx78xx_driver = {
.name = "anx7814",
.of_match_table = of_match_ptr(anx78xx_match_table),
},
- .probe = anx78xx_i2c_probe,
+ .probe_new = anx78xx_i2c_probe,
.remove = anx78xx_i2c_remove,
.id_table = anx78xx_id,
};
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index b0ff1ecb80a5..6846199a2ee1 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -26,7 +26,6 @@
#include <drm/display/drm_hdcp_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -1403,7 +1402,6 @@ static void anx7625_stop_dp_work(struct anx7625_data *ctx)
{
ctx->hpd_status = 0;
ctx->hpd_high_cnt = 0;
- ctx->display_timing_valid = 0;
}
static void anx7625_start_dp_work(struct anx7625_data *ctx)
@@ -2562,8 +2560,7 @@ static void anx7625_runtime_disable(void *data)
pm_runtime_disable(data);
}
-static int anx7625_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int anx7625_i2c_probe(struct i2c_client *client)
{
struct anx7625_data *platform;
struct anx7625_platform_data *pdata;
@@ -2756,7 +2753,7 @@ static struct i2c_driver anx7625_driver = {
.of_match_table = anx_match_table,
.pm = &anx7625_pm_ops,
},
- .probe = anx7625_i2c_probe,
+ .probe_new = anx7625_i2c_probe,
.remove = anx7625_i2c_remove,
.id_table = anx7625_id,
diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig
index 1d06182bea71..ec35215a2003 100644
--- a/drivers/gpu/drm/bridge/cadence/Kconfig
+++ b/drivers/gpu/drm/bridge/cadence/Kconfig
@@ -1,4 +1,25 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_CDNS_DSI
+ tristate "Cadence DPI/DSI bridge"
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
+ select GENERIC_PHY_MIPI_DPHY
+ depends on OF
+ help
+ Support Cadence DPI to DSI bridge. This is an internal
+ bridge and is meant to be directly embedded in a SoC.
+
+if DRM_CDNS_DSI
+
+config DRM_CDNS_DSI_J721E
+ bool "J721E Cadence DSI wrapper support"
+ default y
+ help
+ Support J721E Cadence DSI wrapper. The wrapper manages
+ the routing of the DSS DPI signal to the Cadence DSI.
+endif
+
config DRM_CDNS_MHDP8546
tristate "Cadence DPI/DP bridge"
select DRM_DISPLAY_DP_HELPER
diff --git a/drivers/gpu/drm/bridge/cadence/Makefile b/drivers/gpu/drm/bridge/cadence/Makefile
index 4d2db8df1bc6..c95fd5b81d13 100644
--- a/drivers/gpu/drm/bridge/cadence/Makefile
+++ b/drivers/gpu/drm/bridge/cadence/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
+cdns-dsi-y := cdns-dsi-core.o
+cdns-dsi-$(CONFIG_DRM_CDNS_DSI_J721E) += cdns-dsi-j721e.o
obj-$(CONFIG_DRM_CDNS_MHDP8546) += cdns-mhdp8546.o
cdns-mhdp8546-y := cdns-mhdp8546-core.o cdns-mhdp8546-hdcp.o
cdns-mhdp8546-$(CONFIG_DRM_CDNS_MHDP8546_J721E) += cdns-mhdp8546-j721e.o
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index 20bece84ff8c..5dbfc7226b31 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -6,10 +6,7 @@
*/
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
#include <drm/drm_drv.h>
-#include <drm/drm_mipi_dsi.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include <video/mipi_display.h>
@@ -18,14 +15,19 @@
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
-#include <linux/phy/phy.h>
#include <linux/phy/phy-mipi-dphy.h>
+#include "cdns-dsi-core.h"
+#ifdef CONFIG_DRM_CDNS_DSI_J721E
+#include "cdns-dsi-j721e.h"
+#endif
+
#define IP_CONF 0x0
#define SP_HS_FIFO_DEPTH(x) (((x) & GENMASK(30, 26)) >> 26)
#define SP_LP_FIFO_DEPTH(x) (((x) & GENMASK(25, 21)) >> 21)
@@ -424,48 +426,6 @@
#define DSI_NULL_FRAME_OVERHEAD 6
#define DSI_EOT_PKT_SIZE 4
-struct cdns_dsi_output {
- struct mipi_dsi_device *dev;
- struct drm_panel *panel;
- struct drm_bridge *bridge;
- union phy_configure_opts phy_opts;
-};
-
-enum cdns_dsi_input_id {
- CDNS_SDI_INPUT,
- CDNS_DPI_INPUT,
- CDNS_DSC_INPUT,
-};
-
-struct cdns_dsi_cfg {
- unsigned int hfp;
- unsigned int hsa;
- unsigned int hbp;
- unsigned int hact;
- unsigned int htotal;
-};
-
-struct cdns_dsi_input {
- enum cdns_dsi_input_id id;
- struct drm_bridge bridge;
-};
-
-struct cdns_dsi {
- struct mipi_dsi_host base;
- void __iomem *regs;
- struct cdns_dsi_input input;
- struct cdns_dsi_output output;
- unsigned int direct_cmd_fifo_depth;
- unsigned int rx_fifo_depth;
- struct completion direct_cmd_comp;
- struct clk *dsi_p_clk;
- struct reset_control *dsi_p_rst;
- struct clk *dsi_sys_clk;
- bool link_initialized;
- bool phy_initialized;
- struct phy *dphy;
-};
-
static inline struct cdns_dsi *input_to_dsi(struct cdns_dsi_input *input)
{
return container_of(input, struct cdns_dsi, input);
@@ -709,6 +669,10 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge)
val = readl(dsi->regs + MCTL_MAIN_EN) & ~IF_EN(input->id);
writel(val, dsi->regs + MCTL_MAIN_EN);
+
+ if (dsi->platform_ops && dsi->platform_ops->disable)
+ dsi->platform_ops->disable(dsi);
+
pm_runtime_put(dsi->base.dev);
}
@@ -804,6 +768,9 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge)
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
+ if (dsi->platform_ops && dsi->platform_ops->enable)
+ dsi->platform_ops->enable(dsi);
+
mode = &bridge->encoder->crtc->state->adjusted_mode;
nlanes = output->dev->lanes;
@@ -1244,6 +1211,8 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
goto err_disable_pclk;
}
+ dsi->platform_ops = of_device_get_match_data(&pdev->dev);
+
val = readl(dsi->regs + IP_CONF);
dsi->direct_cmd_fifo_depth = 1 << (DIRCMD_FIFO_DEPTH(val) + 2);
dsi->rx_fifo_depth = RX_FIFO_DEPTH(val);
@@ -1279,14 +1248,27 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
dsi->base.dev = &pdev->dev;
dsi->base.ops = &cdns_dsi_ops;
+ if (dsi->platform_ops && dsi->platform_ops->init) {
+ ret = dsi->platform_ops->init(dsi);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "platform initialization failed: %d\n",
+ ret);
+ goto err_disable_runtime_pm;
+ }
+ }
+
ret = mipi_dsi_host_register(&dsi->base);
if (ret)
- goto err_disable_runtime_pm;
+ goto err_deinit_platform;
clk_disable_unprepare(dsi->dsi_p_clk);
return 0;
+err_deinit_platform:
+ if (dsi->platform_ops && dsi->platform_ops->deinit)
+ dsi->platform_ops->deinit(dsi);
+
err_disable_runtime_pm:
pm_runtime_disable(&pdev->dev);
@@ -1301,6 +1283,10 @@ static int cdns_dsi_drm_remove(struct platform_device *pdev)
struct cdns_dsi *dsi = platform_get_drvdata(pdev);
mipi_dsi_host_unregister(&dsi->base);
+
+ if (dsi->platform_ops && dsi->platform_ops->deinit)
+ dsi->platform_ops->deinit(dsi);
+
pm_runtime_disable(&pdev->dev);
return 0;
@@ -1308,6 +1294,9 @@ static int cdns_dsi_drm_remove(struct platform_device *pdev)
static const struct of_device_id cdns_dsi_of_match[] = {
{ .compatible = "cdns,dsi" },
+#ifdef CONFIG_DRM_CDNS_DSI_J721E
+ { .compatible = "ti,j721e-dsi", .data = &dsi_ti_j721e_ops, },
+#endif
{ },
};
MODULE_DEVICE_TABLE(of, cdns_dsi_of_match);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
new file mode 100644
index 000000000000..ca7ea2da635c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright: 2017 Cadence Design Systems, Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef __CDNS_DSI_H__
+#define __CDNS_DSI_H__
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/phy/phy.h>
+
+struct clk;
+struct reset_control;
+
+struct cdns_dsi_output {
+ struct mipi_dsi_device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ union phy_configure_opts phy_opts;
+};
+
+enum cdns_dsi_input_id {
+ CDNS_SDI_INPUT,
+ CDNS_DPI_INPUT,
+ CDNS_DSC_INPUT,
+};
+
+struct cdns_dsi_cfg {
+ unsigned int hfp;
+ unsigned int hsa;
+ unsigned int hbp;
+ unsigned int hact;
+ unsigned int htotal;
+};
+
+struct cdns_dsi_input {
+ enum cdns_dsi_input_id id;
+ struct drm_bridge bridge;
+};
+
+struct cdns_dsi;
+
+/**
+ * struct cdns_dsi_platform_ops - CDNS DSI Platform operations
+ * @init: Called in the CDNS DSI probe
+ * @deinit: Called in the CDNS DSI remove
+ * @enable: Called at the beginning of CDNS DSI bridge enable
+ * @disable: Called at the end of CDNS DSI bridge disable
+ */
+struct cdns_dsi_platform_ops {
+ int (*init)(struct cdns_dsi *dsi);
+ void (*deinit)(struct cdns_dsi *dsi);
+ void (*enable)(struct cdns_dsi *dsi);
+ void (*disable)(struct cdns_dsi *dsi);
+};
+
+struct cdns_dsi {
+ struct mipi_dsi_host base;
+ void __iomem *regs;
+#ifdef CONFIG_DRM_CDNS_DSI_J721E
+ void __iomem *j721e_regs;
+#endif
+ const struct cdns_dsi_platform_ops *platform_ops;
+ struct cdns_dsi_input input;
+ struct cdns_dsi_output output;
+ unsigned int direct_cmd_fifo_depth;
+ unsigned int rx_fifo_depth;
+ struct completion direct_cmd_comp;
+ struct clk *dsi_p_clk;
+ struct reset_control *dsi_p_rst;
+ struct clk *dsi_sys_clk;
+ bool link_initialized;
+ bool phy_initialized;
+ struct phy *dphy;
+};
+
+#endif /* !__CDNS_DSI_H__ */
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c
new file mode 100644
index 000000000000..b654d4b3cb5c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TI j721e Cadence DSI wrapper
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rahul T R <r-ravikumar@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/platform_device.h>
+
+#include "cdns-dsi-j721e.h"
+
+#define DSI_WRAP_REVISION 0x0
+#define DSI_WRAP_DPI_CONTROL 0x4
+#define DSI_WRAP_DSC_CONTROL 0x8
+#define DSI_WRAP_DPI_SECURE 0xc
+#define DSI_WRAP_DSI_0_ASF_STATUS 0x10
+
+#define DSI_WRAP_DPI_0_EN BIT(0)
+#define DSI_WRAP_DSI2_MUX_SEL BIT(4)
+
+static int cdns_dsi_j721e_init(struct cdns_dsi *dsi)
+{
+ struct platform_device *pdev = to_platform_device(dsi->base.dev);
+
+ dsi->j721e_regs = devm_platform_ioremap_resource(pdev, 1);
+ return PTR_ERR_OR_ZERO(dsi->j721e_regs);
+}
+
+static void cdns_dsi_j721e_enable(struct cdns_dsi *dsi)
+{
+ /*
+ * Enable DPI0 as its input. DSS0 DPI2 is connected
+ * to DSI DPI0. This is the only supported configuration on
+ * J721E.
+ */
+ writel(DSI_WRAP_DPI_0_EN, dsi->j721e_regs + DSI_WRAP_DPI_CONTROL);
+}
+
+static void cdns_dsi_j721e_disable(struct cdns_dsi *dsi)
+{
+ /* Put everything to defaults */
+ writel(0, dsi->j721e_regs + DSI_WRAP_DPI_CONTROL);
+}
+
+const struct cdns_dsi_platform_ops dsi_ti_j721e_ops = {
+ .init = cdns_dsi_j721e_init,
+ .enable = cdns_dsi_j721e_enable,
+ .disable = cdns_dsi_j721e_disable,
+};
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h
new file mode 100644
index 000000000000..275e5e8e7583
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-j721e.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * TI j721e Cadence DSI wrapper
+ *
+ * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rahul T R <r-ravikumar@ti.com>
+ */
+
+#ifndef __CDNS_DSI_J721E_H__
+#define __CDNS_DSI_J721E_H__
+
+#include "cdns-dsi-core.h"
+
+extern const struct cdns_dsi_platform_ops dsi_ti_j721e_ops;
+
+#endif /* !__CDNS_DSI_J721E_H__ */
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 31442a922502..f6822dfa3805 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -43,7 +43,6 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index bf920c3503aa..0e37840cd7a8 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -740,8 +740,7 @@ static int chipone_dsi_probe(struct mipi_dsi_device *dsi)
return ret;
}
-static int chipone_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int chipone_i2c_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct chipone *icn;
@@ -796,7 +795,7 @@ static struct i2c_device_id chipone_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, chipone_i2c_id);
static struct i2c_driver chipone_i2c_driver = {
- .probe = chipone_i2c_probe,
+ .probe_new = chipone_i2c_probe,
.id_table = chipone_i2c_id,
.driver = {
.name = "chipone-icn6211-i2c",
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index b94f39a86846..339b759e4c81 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -528,8 +528,7 @@ static const struct regmap_config ch7033_regmap_config = {
.max_register = 0x7f,
};
-static int ch7033_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ch7033_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ch7033_priv *priv;
@@ -604,7 +603,7 @@ static const struct i2c_device_id ch7033_ids[] = {
MODULE_DEVICE_TABLE(i2c, ch7033_ids);
static struct i2c_driver ch7033_driver = {
- .probe = ch7033_probe,
+ .probe_new = ch7033_probe,
.remove = ch7033_remove,
.driver = {
.name = "ch7033",
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index f9e0f8d99268..6bac160b395b 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -18,7 +18,6 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
-#define LDB_CTRL 0x5c
#define LDB_CTRL_CH0_ENABLE BIT(0)
#define LDB_CTRL_CH0_DI_SELECT BIT(1)
#define LDB_CTRL_CH1_ENABLE BIT(2)
@@ -35,9 +34,13 @@
#define LDB_CTRL_ASYNC_FIFO_ENABLE BIT(24)
#define LDB_CTRL_ASYNC_FIFO_THRESHOLD_MASK GENMASK(27, 25)
-#define LVDS_CTRL 0x128
#define LVDS_CTRL_CH0_EN BIT(0)
#define LVDS_CTRL_CH1_EN BIT(1)
+/*
+ * LVDS_CTRL_LVDS_EN bit is poorly named in i.MX93 reference manual.
+ * Clear it to enable LVDS and set it to disable LVDS.
+ */
+#define LVDS_CTRL_LVDS_EN BIT(1)
#define LVDS_CTRL_VBG_EN BIT(2)
#define LVDS_CTRL_HS_EN BIT(3)
#define LVDS_CTRL_PRE_EMPH_EN BIT(4)
@@ -52,6 +55,29 @@
#define LVDS_CTRL_VBG_ADJ(n) (((n) & 0x7) << 17)
#define LVDS_CTRL_VBG_ADJ_MASK GENMASK(19, 17)
+enum fsl_ldb_devtype {
+ IMX8MP_LDB,
+ IMX93_LDB,
+};
+
+struct fsl_ldb_devdata {
+ u32 ldb_ctrl;
+ u32 lvds_ctrl;
+ bool lvds_en_bit;
+};
+
+static const struct fsl_ldb_devdata fsl_ldb_devdata[] = {
+ [IMX8MP_LDB] = {
+ .ldb_ctrl = 0x5c,
+ .lvds_ctrl = 0x128,
+ },
+ [IMX93_LDB] = {
+ .ldb_ctrl = 0x20,
+ .lvds_ctrl = 0x24,
+ .lvds_en_bit = true,
+ },
+};
+
struct fsl_ldb {
struct device *dev;
struct drm_bridge bridge;
@@ -59,6 +85,7 @@ struct fsl_ldb {
struct clk *clk;
struct regmap *regmap;
bool lvds_dual_link;
+ const struct fsl_ldb_devdata *devdata;
};
static inline struct fsl_ldb *to_fsl_ldb(struct drm_bridge *bridge)
@@ -66,6 +93,14 @@ static inline struct fsl_ldb *to_fsl_ldb(struct drm_bridge *bridge)
return container_of(bridge, struct fsl_ldb, bridge);
}
+static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock)
+{
+ if (fsl_ldb->lvds_dual_link)
+ return clock * 3500;
+ else
+ return clock * 7000;
+}
+
static int fsl_ldb_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
@@ -85,6 +120,8 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
const struct drm_display_mode *mode;
struct drm_connector *connector;
struct drm_crtc *crtc;
+ unsigned long configured_link_freq;
+ unsigned long requested_link_freq;
bool lvds_format_24bpp;
bool lvds_format_jeida;
u32 reg;
@@ -128,10 +165,15 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
mode = &crtc_state->adjusted_mode;
- if (fsl_ldb->lvds_dual_link)
- clk_set_rate(fsl_ldb->clk, mode->clock * 3500);
- else
- clk_set_rate(fsl_ldb->clk, mode->clock * 7000);
+ requested_link_freq = fsl_ldb_link_frequency(fsl_ldb, mode->clock);
+ clk_set_rate(fsl_ldb->clk, requested_link_freq);
+
+ configured_link_freq = clk_get_rate(fsl_ldb->clk);
+ if (configured_link_freq != requested_link_freq)
+ dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz",
+ configured_link_freq,
+ requested_link_freq);
+
clk_prepare_enable(fsl_ldb->clk);
/* Program LDB_CTRL */
@@ -158,12 +200,12 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
reg |= LDB_CTRL_DI1_VSYNC_POLARITY;
}
- regmap_write(fsl_ldb->regmap, LDB_CTRL, reg);
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->ldb_ctrl, reg);
/* Program LVDS_CTRL */
reg = LVDS_CTRL_CC_ADJ(2) | LVDS_CTRL_PRE_EMPH_EN |
LVDS_CTRL_PRE_EMPH_ADJ(3) | LVDS_CTRL_VBG_EN;
- regmap_write(fsl_ldb->regmap, LVDS_CTRL, reg);
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, reg);
/* Wait for VBG to stabilize. */
usleep_range(15, 20);
@@ -172,7 +214,7 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge,
if (fsl_ldb->lvds_dual_link)
reg |= LVDS_CTRL_CH1_EN;
- regmap_write(fsl_ldb->regmap, LVDS_CTRL, reg);
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, reg);
}
static void fsl_ldb_atomic_disable(struct drm_bridge *bridge,
@@ -180,9 +222,14 @@ static void fsl_ldb_atomic_disable(struct drm_bridge *bridge,
{
struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge);
- /* Stop both channels. */
- regmap_write(fsl_ldb->regmap, LVDS_CTRL, 0);
- regmap_write(fsl_ldb->regmap, LDB_CTRL, 0);
+ /* Stop channel(s). */
+ if (fsl_ldb->devdata->lvds_en_bit)
+ /* Set LVDS_CTRL_LVDS_EN bit to disable. */
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl,
+ LVDS_CTRL_LVDS_EN);
+ else
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->lvds_ctrl, 0);
+ regmap_write(fsl_ldb->regmap, fsl_ldb->devdata->ldb_ctrl, 0);
clk_disable_unprepare(fsl_ldb->clk);
}
@@ -248,6 +295,10 @@ static int fsl_ldb_probe(struct platform_device *pdev)
if (!fsl_ldb)
return -ENOMEM;
+ fsl_ldb->devdata = of_device_get_match_data(dev);
+ if (!fsl_ldb->devdata)
+ return -EINVAL;
+
fsl_ldb->dev = &pdev->dev;
fsl_ldb->bridge.funcs = &funcs;
fsl_ldb->bridge.of_node = dev->of_node;
@@ -306,7 +357,10 @@ static int fsl_ldb_remove(struct platform_device *pdev)
}
static const struct of_device_id fsl_ldb_match[] = {
- { .compatible = "fsl,imx8mp-ldb", },
+ { .compatible = "fsl,imx8mp-ldb",
+ .data = &fsl_ldb_devdata[IMX8MP_LDB], },
+ { .compatible = "fsl,imx93-ldb",
+ .data = &fsl_ldb_devdata[IMX93_LDB], },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, fsl_ldb_match);
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 21a9b8422bda..bc451b2a77c2 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -26,7 +26,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -412,7 +411,6 @@ struct it6505 {
* Mutex protects extcon and interrupt functions from interfering
* each other.
*/
- struct mutex irq_lock;
struct mutex extcon_lock;
struct mutex mode_lock; /* used to bridge_detect */
struct mutex aux_lock; /* used to aux data transfers */
@@ -438,6 +436,8 @@ struct it6505 {
bool powered;
bool hpd_state;
u32 afe_setting;
+ u32 max_dpi_pixel_clock;
+ u32 max_lane_count;
enum hdcp_state hdcp_status;
struct delayed_work hdcp_work;
struct work_struct hdcp_wait_ksv_list;
@@ -457,6 +457,8 @@ struct it6505 {
/* it6505 driver hold option */
bool enable_drv_hold;
+
+ struct edid *cached_edid;
};
struct it6505_step_train_para {
@@ -1463,7 +1465,8 @@ static void it6505_parse_link_capabilities(struct it6505 *it6505)
it6505->lane_count = link->num_lanes;
DRM_DEV_DEBUG_DRIVER(dev, "Sink support %d lanes training",
it6505->lane_count);
- it6505->lane_count = min_t(int, it6505->lane_count, MAX_LANE_COUNT);
+ it6505->lane_count = min_t(int, it6505->lane_count,
+ it6505->max_lane_count);
it6505->branch_device = drm_dp_is_branch(it6505->dpcd);
DRM_DEV_DEBUG_DRIVER(dev, "Sink %sbranch device",
@@ -2244,6 +2247,12 @@ static void it6505_plugged_status_to_codec(struct it6505 *it6505)
status == connector_status_connected);
}
+static void it6505_remove_edid(struct it6505 *it6505)
+{
+ kfree(it6505->cached_edid);
+ it6505->cached_edid = NULL;
+}
+
static int it6505_process_hpd_irq(struct it6505 *it6505)
{
struct device *dev = &it6505->client->dev;
@@ -2270,6 +2279,7 @@ static int it6505_process_hpd_irq(struct it6505 *it6505)
it6505_reset_logic(it6505);
it6505_int_mask_enable(it6505);
it6505_init(it6505);
+ it6505_remove_edid(it6505);
return 0;
}
@@ -2353,6 +2363,7 @@ static void it6505_irq_hpd(struct it6505 *it6505)
it6505_video_reset(it6505);
} else {
memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
+ it6505_remove_edid(it6505);
if (it6505->hdcp_desired)
it6505_stop_hdcp(it6505);
@@ -2494,10 +2505,8 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
};
int int_status[3], i;
- mutex_lock(&it6505->irq_lock);
-
- if (it6505->enable_drv_hold || !it6505->powered)
- goto unlock;
+ if (it6505->enable_drv_hold || pm_runtime_get_if_in_use(dev) <= 0)
+ return IRQ_HANDLED;
int_status[0] = it6505_read(it6505, INT_STATUS_01);
int_status[1] = it6505_read(it6505, INT_STATUS_02);
@@ -2515,16 +2524,14 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
if (it6505_test_bit(irq_vec[0].bit, (unsigned int *)int_status))
irq_vec[0].handler(it6505);
- if (!it6505->hpd_state)
- goto unlock;
-
- for (i = 1; i < ARRAY_SIZE(irq_vec); i++) {
- if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status))
- irq_vec[i].handler(it6505);
+ if (it6505->hpd_state) {
+ for (i = 1; i < ARRAY_SIZE(irq_vec); i++) {
+ if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status))
+ irq_vec[i].handler(it6505);
+ }
}
-unlock:
- mutex_unlock(&it6505->irq_lock);
+ pm_runtime_put_sync(dev);
return IRQ_HANDLED;
}
@@ -2902,7 +2909,7 @@ it6505_bridge_mode_valid(struct drm_bridge *bridge,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
- if (mode->clock > DPI_PIXEL_CLK_MAX)
+ if (mode->clock > it6505->max_dpi_pixel_clock)
return MODE_CLOCK_HIGH;
it6505->video_info.clock = mode->clock;
@@ -3016,16 +3023,18 @@ static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge,
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = &it6505->client->dev;
- struct edid *edid;
- edid = drm_do_get_edid(connector, it6505_get_edid_block, it6505);
+ if (!it6505->cached_edid) {
+ it6505->cached_edid = drm_do_get_edid(connector, it6505_get_edid_block,
+ it6505);
- if (!edid) {
- DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!");
- return NULL;
+ if (!it6505->cached_edid) {
+ DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!");
+ return NULL;
+ }
}
- return edid;
+ return drm_edid_duplicate(it6505->cached_edid);
}
static const struct drm_bridge_funcs it6505_bridge_funcs = {
@@ -3089,10 +3098,32 @@ static int it6505_init_pdata(struct it6505 *it6505)
return 0;
}
+static int it6505_get_data_lanes_count(const struct device_node *endpoint,
+ const unsigned int min,
+ const unsigned int max)
+{
+ int ret;
+
+ ret = of_property_count_u32_elems(endpoint, "data-lanes");
+ if (ret < 0)
+ return ret;
+
+ if (ret < min || ret > max)
+ return -EINVAL;
+
+ return ret;
+}
+
static void it6505_parse_dt(struct it6505 *it6505)
{
struct device *dev = &it6505->client->dev;
+ struct device_node *np = dev->of_node, *ep = NULL;
+ int len;
+ u64 link_frequencies;
+ u32 data_lanes[4];
u32 *afe_setting = &it6505->afe_setting;
+ u32 *max_lane_count = &it6505->max_lane_count;
+ u32 *max_dpi_pixel_clock = &it6505->max_dpi_pixel_clock;
it6505->lane_swap_disabled =
device_property_read_bool(dev, "no-laneswap");
@@ -3108,7 +3139,56 @@ static void it6505_parse_dt(struct it6505 *it6505)
} else {
*afe_setting = 0;
}
- DRM_DEV_DEBUG_DRIVER(dev, "using afe_setting: %d", *afe_setting);
+
+ ep = of_graph_get_endpoint_by_regs(np, 1, 0);
+ of_node_put(ep);
+
+ if (ep) {
+ len = it6505_get_data_lanes_count(ep, 1, 4);
+
+ if (len > 0 && len != 3) {
+ of_property_read_u32_array(ep, "data-lanes",
+ data_lanes, len);
+ *max_lane_count = len;
+ } else {
+ *max_lane_count = MAX_LANE_COUNT;
+ dev_err(dev, "error data-lanes, use default");
+ }
+ } else {
+ *max_lane_count = MAX_LANE_COUNT;
+ dev_err(dev, "error endpoint, use default");
+ }
+
+ ep = of_graph_get_endpoint_by_regs(np, 0, 0);
+ of_node_put(ep);
+
+ if (ep) {
+ len = of_property_read_variable_u64_array(ep,
+ "link-frequencies",
+ &link_frequencies, 0,
+ 1);
+ if (len >= 0) {
+ do_div(link_frequencies, 1000);
+ if (link_frequencies > 297000) {
+ dev_err(dev,
+ "max pixel clock error, use default");
+ *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX;
+ } else {
+ *max_dpi_pixel_clock = link_frequencies;
+ }
+ } else {
+ dev_err(dev, "error link frequencies, use default");
+ *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX;
+ }
+ } else {
+ dev_err(dev, "error endpoint, use default");
+ *max_dpi_pixel_clock = DPI_PIXEL_CLK_MAX;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "using afe_setting: %u, max_lane_count: %u",
+ it6505->afe_setting, it6505->max_lane_count);
+ DRM_DEV_DEBUG_DRIVER(dev, "using max_dpi_pixel_clock: %u kHz",
+ it6505->max_dpi_pixel_clock);
}
static ssize_t receive_timing_debugfs_show(struct file *file, char __user *buf,
@@ -3265,8 +3345,7 @@ static void it6505_shutdown(struct i2c_client *client)
it6505_lane_off(it6505);
}
-static int it6505_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int it6505_i2c_probe(struct i2c_client *client)
{
struct it6505 *it6505;
struct device *dev = &client->dev;
@@ -3277,7 +3356,6 @@ static int it6505_i2c_probe(struct i2c_client *client,
if (!it6505)
return -ENOMEM;
- mutex_init(&it6505->irq_lock);
mutex_init(&it6505->extcon_lock);
mutex_init(&it6505->mode_lock);
mutex_init(&it6505->aux_lock);
@@ -3367,6 +3445,7 @@ static void it6505_i2c_remove(struct i2c_client *client)
drm_dp_aux_unregister(&it6505->aux);
it6505_debugfs_remove(it6505);
it6505_poweroff(it6505);
+ it6505_remove_edid(it6505);
}
static const struct i2c_device_id it6505_id[] = {
@@ -3387,7 +3466,7 @@ static struct i2c_driver it6505_i2c_driver = {
.of_match_table = it6505_of_match,
.pm = &it6505_bridge_pm_ops,
},
- .probe = it6505_i2c_probe,
+ .probe_new = it6505_i2c_probe,
.remove = it6505_i2c_remove,
.shutdown = it6505_shutdown,
.id_table = it6505_id,
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 4f6f1deba28c..a2d723d6a4be 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -22,7 +22,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_modes.h>
#include <drm/drm_print.h>
@@ -35,10 +34,6 @@
#define IT66121_DEVICE_ID0_REG 0x02
#define IT66121_DEVICE_ID1_REG 0x03
-#define IT66121_VENDOR_ID0 0x54
-#define IT66121_VENDOR_ID1 0x49
-#define IT66121_DEVICE_ID0 0x12
-#define IT66121_DEVICE_ID1 0x06
#define IT66121_REVISION_MASK GENMASK(7, 4)
#define IT66121_DEVICE_ID1_MASK GENMASK(3, 0)
@@ -72,6 +67,7 @@
#define IT66121_AFE_XP_ENO BIT(4)
#define IT66121_AFE_XP_RESETB BIT(3)
#define IT66121_AFE_XP_PWDI BIT(2)
+#define IT6610_AFE_XP_BYPASS BIT(0)
#define IT66121_AFE_IP_REG 0x64
#define IT66121_AFE_IP_GAINBIT BIT(7)
@@ -286,13 +282,18 @@
#define IT66121_AUD_SWL_16BIT 0x2
#define IT66121_AUD_SWL_NOT_INDICATED 0x0
-#define IT66121_VENDOR_ID0 0x54
-#define IT66121_VENDOR_ID1 0x49
-#define IT66121_DEVICE_ID0 0x12
-#define IT66121_DEVICE_ID1 0x06
-#define IT66121_DEVICE_MASK 0x0F
#define IT66121_AFE_CLK_HIGH 80000 /* Khz */
+enum chip_id {
+ ID_IT6610,
+ ID_IT66121,
+};
+
+struct it66121_chip_info {
+ enum chip_id id;
+ u16 vid, pid;
+};
+
struct it66121_ctx {
struct regmap *regmap;
struct drm_bridge bridge;
@@ -301,7 +302,6 @@ struct it66121_ctx {
struct device *dev;
struct gpio_desc *gpio_reset;
struct i2c_client *client;
- struct regulator_bulk_data supplies[3];
u32 bus_width;
struct mutex lock; /* Protects fields below and device registers */
struct hdmi_avi_infoframe hdmi_avi_infoframe;
@@ -312,6 +312,7 @@ struct it66121_ctx {
u8 swl;
bool auto_cts;
} audio;
+ const struct it66121_chip_info *info;
};
static const struct regmap_range_cfg it66121_regmap_banks[] = {
@@ -342,16 +343,6 @@ static void it66121_hw_reset(struct it66121_ctx *ctx)
gpiod_set_value(ctx->gpio_reset, 0);
}
-static inline int ite66121_power_on(struct it66121_ctx *ctx)
-{
- return regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-}
-
-static inline int ite66121_power_off(struct it66121_ctx *ctx)
-{
- return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
-}
-
static inline int it66121_preamble_ddc(struct it66121_ctx *ctx)
{
return regmap_write(ctx->regmap, IT66121_MASTER_SEL_REG, IT66121_MASTER_SEL_HOST);
@@ -406,16 +397,22 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_GAINBIT |
- IT66121_AFE_IP_ER0 |
- IT66121_AFE_IP_EC1,
+ IT66121_AFE_IP_ER0,
IT66121_AFE_IP_GAINBIT);
if (ret)
return ret;
- ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
- IT66121_AFE_XP_EC1_LOWCLK, 0x80);
- if (ret)
- return ret;
+ if (ctx->info->id == ID_IT66121) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+ IT66121_AFE_IP_EC1, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
+ IT66121_AFE_XP_EC1_LOWCLK, 0x80);
+ if (ret)
+ return ret;
+ }
} else {
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
IT66121_AFE_XP_GAINBIT |
@@ -426,17 +423,24 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
IT66121_AFE_IP_GAINBIT |
- IT66121_AFE_IP_ER0 |
- IT66121_AFE_IP_EC1, IT66121_AFE_IP_ER0 |
- IT66121_AFE_IP_EC1);
+ IT66121_AFE_IP_ER0,
+ IT66121_AFE_IP_ER0);
if (ret)
return ret;
- ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
- IT66121_AFE_XP_EC1_LOWCLK,
- IT66121_AFE_XP_EC1_LOWCLK);
- if (ret)
- return ret;
+ if (ctx->info->id == ID_IT66121) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+ IT66121_AFE_IP_EC1,
+ IT66121_AFE_IP_EC1);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
+ IT66121_AFE_XP_EC1_LOWCLK,
+ IT66121_AFE_XP_EC1_LOWCLK);
+ if (ret)
+ return ret;
+ }
}
/* Clear reset flags */
@@ -445,38 +449,36 @@ static int it66121_configure_afe(struct it66121_ctx *ctx,
if (ret)
return ret;
+ if (ctx->info->id == ID_IT6610) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
+ IT6610_AFE_XP_BYPASS,
+ IT6610_AFE_XP_BYPASS);
+ if (ret)
+ return ret;
+ }
+
return it66121_fire_afe(ctx);
}
static inline int it66121_wait_ddc_ready(struct it66121_ctx *ctx)
{
int ret, val;
- u32 busy = IT66121_DDC_STATUS_NOACK | IT66121_DDC_STATUS_WAIT_BUS |
- IT66121_DDC_STATUS_ARBI_LOSE;
+ u32 error = IT66121_DDC_STATUS_NOACK | IT66121_DDC_STATUS_WAIT_BUS |
+ IT66121_DDC_STATUS_ARBI_LOSE;
+ u32 done = IT66121_DDC_STATUS_TX_DONE;
- ret = regmap_read_poll_timeout(ctx->regmap, IT66121_DDC_STATUS_REG, val, true,
- IT66121_EDID_SLEEP_US, IT66121_EDID_TIMEOUT_US);
+ ret = regmap_read_poll_timeout(ctx->regmap, IT66121_DDC_STATUS_REG, val,
+ val & (error | done), IT66121_EDID_SLEEP_US,
+ IT66121_EDID_TIMEOUT_US);
if (ret)
return ret;
- if (val & busy)
+ if (val & error)
return -EAGAIN;
return 0;
}
-static int it66121_clear_ddc_fifo(struct it66121_ctx *ctx)
-{
- int ret;
-
- ret = it66121_preamble_ddc(ctx);
- if (ret)
- return ret;
-
- return regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
- IT66121_DDC_COMMAND_FIFO_CLR);
-}
-
static int it66121_abort_ddc_ops(struct it66121_ctx *ctx)
{
int ret;
@@ -516,7 +518,6 @@ static int it66121_get_edid_block(void *context, u8 *buf,
unsigned int block, size_t len)
{
struct it66121_ctx *ctx = context;
- unsigned int val;
int remain = len;
int offset = 0;
int ret, cnt;
@@ -524,26 +525,9 @@ static int it66121_get_edid_block(void *context, u8 *buf,
offset = (block % 2) * len;
block = block / 2;
- ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
- if (ret)
- return ret;
-
- if (val & IT66121_INT_STATUS1_DDC_BUSHANG) {
- ret = it66121_abort_ddc_ops(ctx);
- if (ret)
- return ret;
- }
-
- ret = it66121_clear_ddc_fifo(ctx);
- if (ret)
- return ret;
-
while (remain > 0) {
cnt = (remain > IT66121_EDID_FIFO_SIZE) ?
IT66121_EDID_FIFO_SIZE : remain;
- ret = it66121_preamble_ddc(ctx);
- if (ret)
- return ret;
ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
IT66121_DDC_COMMAND_FIFO_CLR);
@@ -554,25 +538,6 @@ static int it66121_get_edid_block(void *context, u8 *buf,
if (ret)
return ret;
- ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
- if (ret)
- return ret;
-
- if (val & IT66121_INT_STATUS1_DDC_BUSHANG) {
- ret = it66121_abort_ddc_ops(ctx);
- if (ret)
- return ret;
- }
-
- ret = it66121_preamble_ddc(ctx);
- if (ret)
- return ret;
-
- ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
- IT66121_DDC_HEADER_EDID);
- if (ret)
- return ret;
-
ret = regmap_write(ctx->regmap, IT66121_DDC_OFFSET_REG, offset);
if (ret)
return ret;
@@ -593,20 +558,18 @@ static int it66121_get_edid_block(void *context, u8 *buf,
offset += cnt;
remain -= cnt;
- /* Per programming manual, sleep here before emptying the FIFO */
- msleep(20);
-
ret = it66121_wait_ddc_ready(ctx);
+ if (ret) {
+ it66121_abort_ddc_ops(ctx);
+ return ret;
+ }
+
+ ret = regmap_noinc_read(ctx->regmap, IT66121_DDC_RD_FIFO_REG,
+ buf, cnt);
if (ret)
return ret;
- do {
- ret = regmap_read(ctx->regmap, IT66121_DDC_RD_FIFO_REG, &val);
- if (ret)
- return ret;
- *(buf++) = val;
- cnt--;
- } while (cnt > 0);
+ buf += cnt;
}
return 0;
@@ -635,10 +598,12 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
if (ret)
return ret;
- ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
- IT66121_CLK_BANK_PWROFF_RCLK, 0);
- if (ret)
- return ret;
+ if (ctx->info->id == ID_IT66121) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+ IT66121_CLK_BANK_PWROFF_RCLK, 0);
+ if (ret)
+ return ret;
+ }
ret = regmap_write_bits(ctx->regmap, IT66121_INT_REG,
IT66121_INT_TX_CLK_OFF, 0);
@@ -684,11 +649,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
/* Per programming manual, sleep here for bridge to settle */
msleep(50);
- /* Start interrupts */
- return regmap_write_bits(ctx->regmap, IT66121_INT_MASK1_REG,
- IT66121_INT_MASK1_DDC_NOACK |
- IT66121_INT_MASK1_DDC_FIFOERR |
- IT66121_INT_MASK1_DDC_BUSHANG, 0);
+ return 0;
}
static int it66121_set_mute(struct it66121_ctx *ctx, bool mute)
@@ -780,29 +741,32 @@ static void it66121_bridge_disable(struct drm_bridge *bridge,
ctx->connector = NULL;
}
+static int it66121_bridge_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+
+ if (ctx->info->id == ID_IT6610) {
+ /* The IT6610 only supports these settings */
+ bridge_state->input_bus_cfg.flags |= DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
+ bridge_state->input_bus_cfg.flags &=
+ ~DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
+ }
+
+ return 0;
+}
+
static
void it66121_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode)
{
- int ret, i;
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
- const u16 aviinfo_reg[HDMI_AVI_INFOFRAME_SIZE] = {
- IT66121_AVIINFO_DB1_REG,
- IT66121_AVIINFO_DB2_REG,
- IT66121_AVIINFO_DB3_REG,
- IT66121_AVIINFO_DB4_REG,
- IT66121_AVIINFO_DB5_REG,
- IT66121_AVIINFO_DB6_REG,
- IT66121_AVIINFO_DB7_REG,
- IT66121_AVIINFO_DB8_REG,
- IT66121_AVIINFO_DB9_REG,
- IT66121_AVIINFO_DB10_REG,
- IT66121_AVIINFO_DB11_REG,
- IT66121_AVIINFO_DB12_REG,
- IT66121_AVIINFO_DB13_REG
- };
+ int ret;
mutex_lock(&ctx->lock);
@@ -822,10 +786,12 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
}
/* Write new AVI infoframe packet */
- for (i = 0; i < HDMI_AVI_INFOFRAME_SIZE; i++) {
- if (regmap_write(ctx->regmap, aviinfo_reg[i], buf[i + HDMI_INFOFRAME_HEADER_SIZE]))
- goto unlock;
- }
+ ret = regmap_bulk_write(ctx->regmap, IT66121_AVIINFO_DB1_REG,
+ &buf[HDMI_INFOFRAME_HEADER_SIZE],
+ HDMI_AVI_INFOFRAME_SIZE);
+ if (ret)
+ goto unlock;
+
if (regmap_write(ctx->regmap, IT66121_AVIINFO_CSUM_REG, buf[3]))
goto unlock;
@@ -838,9 +804,12 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI))
goto unlock;
- if (regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
- IT66121_CLK_BANK_PWROFF_TXCLK, IT66121_CLK_BANK_PWROFF_TXCLK))
+ if (ctx->info->id == ID_IT66121 &&
+ regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+ IT66121_CLK_BANK_PWROFF_TXCLK,
+ IT66121_CLK_BANK_PWROFF_TXCLK)) {
goto unlock;
+ }
if (it66121_configure_input(ctx))
goto unlock;
@@ -848,7 +817,11 @@ void it66121_bridge_mode_set(struct drm_bridge *bridge,
if (it66121_configure_afe(ctx, adjusted_mode))
goto unlock;
- regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, 0);
+ if (ctx->info->id == ID_IT66121 &&
+ regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+ IT66121_CLK_BANK_PWROFF_TXCLK, 0)) {
+ goto unlock;
+ }
unlock:
mutex_unlock(&ctx->lock);
@@ -906,9 +879,25 @@ static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
{
struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
struct edid *edid;
+ int ret;
mutex_lock(&ctx->lock);
+ ret = it66121_preamble_ddc(ctx);
+ if (ret) {
+ edid = ERR_PTR(ret);
+ goto out_unlock;
+ }
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
+ IT66121_DDC_HEADER_EDID);
+ if (ret) {
+ edid = ERR_PTR(ret);
+ goto out_unlock;
+ }
+
edid = drm_do_get_edid(connector, it66121_get_edid_block, ctx);
+
+out_unlock:
mutex_unlock(&ctx->lock);
return edid;
@@ -923,6 +912,7 @@ static const struct drm_bridge_funcs it66121_bridge_funcs = {
.atomic_get_input_bus_fmts = it66121_bridge_atomic_get_input_bus_fmts,
.atomic_enable = it66121_bridge_enable,
.atomic_disable = it66121_bridge_disable,
+ .atomic_check = it66121_bridge_check,
.mode_set = it66121_bridge_mode_set,
.mode_valid = it66121_bridge_mode_valid,
.detect = it66121_bridge_detect,
@@ -952,21 +942,14 @@ static irqreturn_t it66121_irq_threaded_handler(int irq, void *dev_id)
ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
if (ret) {
dev_err(dev, "Cannot read STATUS1_REG %d\n", ret);
- } else {
- if (val & IT66121_INT_STATUS1_DDC_FIFOERR)
- it66121_clear_ddc_fifo(ctx);
- if (val & (IT66121_INT_STATUS1_DDC_BUSHANG |
- IT66121_INT_STATUS1_DDC_NOACK))
- it66121_abort_ddc_ops(ctx);
- if (val & IT66121_INT_STATUS1_HPD_STATUS) {
- regmap_write_bits(ctx->regmap, IT66121_INT_CLR1_REG,
- IT66121_INT_CLR1_HPD, IT66121_INT_CLR1_HPD);
+ } else if (val & IT66121_INT_STATUS1_HPD_STATUS) {
+ regmap_write_bits(ctx->regmap, IT66121_INT_CLR1_REG,
+ IT66121_INT_CLR1_HPD, IT66121_INT_CLR1_HPD);
- status = it66121_is_hpd_detect(ctx) ? connector_status_connected
- : connector_status_disconnected;
+ status = it66121_is_hpd_detect(ctx) ? connector_status_connected
+ : connector_status_disconnected;
- event = true;
- }
+ event = true;
}
regmap_write_bits(ctx->regmap, IT66121_SYS_STATUS_REG,
@@ -1512,9 +1495,13 @@ static int it66121_audio_codec_init(struct it66121_ctx *ctx, struct device *dev)
return PTR_ERR_OR_ZERO(ctx->audio.pdev);
}
-static int it66121_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static const char * const it66121_supplies[] = {
+ "vcn33", "vcn18", "vrf12"
+};
+
+static int it66121_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 };
struct device_node *ep;
int ret;
@@ -1536,6 +1523,7 @@ static int it66121_probe(struct i2c_client *client,
ctx->dev = dev;
ctx->client = client;
+ ctx->info = (const struct it66121_chip_info *) id->driver_data;
of_property_read_u32(ep, "bus-width", &ctx->bus_width);
of_node_put(ep);
@@ -1565,26 +1553,18 @@ static int it66121_probe(struct i2c_client *client,
i2c_set_clientdata(client, ctx);
mutex_init(&ctx->lock);
- ctx->supplies[0].supply = "vcn33";
- ctx->supplies[1].supply = "vcn18";
- ctx->supplies[2].supply = "vrf12";
- ret = devm_regulator_bulk_get(ctx->dev, 3, ctx->supplies);
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(it66121_supplies),
+ it66121_supplies);
if (ret) {
- dev_err(ctx->dev, "regulator_bulk failed\n");
+ dev_err(dev, "Failed to enable power supplies\n");
return ret;
}
- ret = ite66121_power_on(ctx);
- if (ret)
- return ret;
-
it66121_hw_reset(ctx);
ctx->regmap = devm_regmap_init_i2c(client, &it66121_regmap_config);
- if (IS_ERR(ctx->regmap)) {
- ite66121_power_off(ctx);
+ if (IS_ERR(ctx->regmap))
return PTR_ERR(ctx->regmap);
- }
regmap_read(ctx->regmap, IT66121_VENDOR_ID0_REG, &vendor_ids[0]);
regmap_read(ctx->regmap, IT66121_VENDOR_ID1_REG, &vendor_ids[1]);
@@ -1595,9 +1575,8 @@ static int it66121_probe(struct i2c_client *client,
revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]);
device_ids[1] &= IT66121_DEVICE_ID1_MASK;
- if (vendor_ids[0] != IT66121_VENDOR_ID0 || vendor_ids[1] != IT66121_VENDOR_ID1 ||
- device_ids[0] != IT66121_DEVICE_ID0 || device_ids[1] != IT66121_DEVICE_ID1) {
- ite66121_power_off(ctx);
+ if ((vendor_ids[1] << 8 | vendor_ids[0]) != ctx->info->vid ||
+ (device_ids[1] << 8 | device_ids[0]) != ctx->info->pid) {
return -ENODEV;
}
@@ -1610,7 +1589,6 @@ static int it66121_probe(struct i2c_client *client,
IRQF_ONESHOT, dev_name(dev), ctx);
if (ret < 0) {
dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret);
- ite66121_power_off(ctx);
return ret;
}
@@ -1627,19 +1605,32 @@ static void it66121_remove(struct i2c_client *client)
{
struct it66121_ctx *ctx = i2c_get_clientdata(client);
- ite66121_power_off(ctx);
drm_bridge_remove(&ctx->bridge);
mutex_destroy(&ctx->lock);
}
static const struct of_device_id it66121_dt_match[] = {
{ .compatible = "ite,it66121" },
+ { .compatible = "ite,it6610" },
{ }
};
MODULE_DEVICE_TABLE(of, it66121_dt_match);
+static const struct it66121_chip_info it66121_chip_info = {
+ .id = ID_IT66121,
+ .vid = 0x4954,
+ .pid = 0x0612,
+};
+
+static const struct it66121_chip_info it6610_chip_info = {
+ .id = ID_IT6610,
+ .vid = 0xca00,
+ .pid = 0x0611,
+};
+
static const struct i2c_device_id it66121_id[] = {
- { "it66121", 0 },
+ { "it66121", (kernel_ulong_t) &it66121_chip_info },
+ { "it6610", (kernel_ulong_t) &it6610_chip_info },
{ }
};
MODULE_DEVICE_TABLE(i2c, it66121_id);
@@ -1649,7 +1640,7 @@ static struct i2c_driver it66121_driver = {
.name = "it66121",
.of_match_table = it66121_dt_match,
},
- .probe = it66121_probe,
+ .probe_new = it66121_probe,
.remove = it66121_remove,
.id_table = it66121_id,
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index a98efef0ba0e..2019a8167d69 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -517,14 +517,27 @@ static int lt8912_attach_dsi(struct lt8912 *lt)
return 0;
}
+static void lt8912_bridge_hpd_cb(void *data, enum drm_connector_status status)
+{
+ struct lt8912 *lt = data;
+
+ if (lt->bridge.dev)
+ drm_helper_hpd_irq_event(lt->bridge.dev);
+}
+
static int lt8912_bridge_connector_init(struct drm_bridge *bridge)
{
int ret;
struct lt8912 *lt = bridge_to_lt8912(bridge);
struct drm_connector *connector = &lt->connector;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_hpd_enable(lt->hdmi_port, lt8912_bridge_hpd_cb, lt);
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ } else {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
ret = drm_connector_init(bridge->dev, connector,
&lt8912_connector_funcs,
@@ -578,6 +591,10 @@ static void lt8912_bridge_detach(struct drm_bridge *bridge)
if (lt->is_attached) {
lt8912_hard_power_off(lt);
+
+ if (lt->hdmi_port->ops & DRM_BRIDGE_OP_HPD)
+ drm_bridge_hpd_disable(lt->hdmi_port);
+
drm_connector_unregister(&lt->connector);
drm_connector_cleanup(&lt->connector);
}
@@ -685,8 +702,7 @@ static int lt8912_put_dt(struct lt8912 *lt)
return 0;
}
-static int lt8912_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt8912_probe(struct i2c_client *client)
{
static struct lt8912 *lt;
int ret = 0;
@@ -758,7 +774,7 @@ static struct i2c_driver lt8912_i2c_driver = {
.name = "lt8912",
.of_match_table = lt8912_dt_match,
},
- .probe = lt8912_probe,
+ .probe_new = lt8912_probe,
.remove = lt8912_remove,
.id_table = lt8912_id,
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 933ca028d612..3e19fff6547a 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -720,8 +720,7 @@ static int lt9211_host_attach(struct lt9211 *ctx)
return 0;
}
-static int lt9211_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt9211_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lt9211 *ctx;
@@ -786,7 +785,7 @@ static const struct of_device_id lt9211_match_table[] = {
MODULE_DEVICE_TABLE(of, lt9211_match_table);
static struct i2c_driver lt9211_driver = {
- .probe = lt9211_probe,
+ .probe_new = lt9211_probe,
.remove = lt9211_remove,
.id_table = lt9211_id,
.driver = {
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index 7c0a99173b39..a25d21a7d5c1 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -19,6 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -33,7 +34,7 @@
struct lt9611 {
struct device *dev;
struct drm_bridge bridge;
- struct drm_connector connector;
+ struct drm_bridge *next_bridge;
struct regmap *regmap;
@@ -58,7 +59,6 @@ struct lt9611 {
enum drm_connector_status status;
u8 edid_buf[EDID_SEG_SIZE];
- u32 vic;
};
#define LT9611_PAGE_CONTROL 0xff
@@ -84,34 +84,11 @@ static const struct regmap_config lt9611_regmap_config = {
.num_ranges = ARRAY_SIZE(lt9611_ranges),
};
-struct lt9611_mode {
- u16 hdisplay;
- u16 vdisplay;
- u8 vrefresh;
- u8 lanes;
- u8 intfs;
-};
-
-static struct lt9611_mode lt9611_modes[] = {
- { 3840, 2160, 30, 4, 2 }, /* 3840x2160 24bit 30Hz 4Lane 2ports */
- { 1920, 1080, 60, 4, 1 }, /* 1080P 24bit 60Hz 4lane 1port */
- { 1920, 1080, 30, 3, 1 }, /* 1080P 24bit 30Hz 3lane 1port */
- { 1920, 1080, 24, 3, 1 },
- { 720, 480, 60, 4, 1 },
- { 720, 576, 50, 2, 1 },
- { 640, 480, 60, 2, 1 },
-};
-
static struct lt9611 *bridge_to_lt9611(struct drm_bridge *bridge)
{
return container_of(bridge, struct lt9611, bridge);
}
-static struct lt9611 *connector_to_lt9611(struct drm_connector *connector)
-{
- return container_of(connector, struct lt9611, connector);
-}
-
static int lt9611_mipi_input_analog(struct lt9611 *lt9611)
{
const struct reg_sequence reg_cfg[] = {
@@ -141,7 +118,7 @@ static int lt9611_mipi_input_digital(struct lt9611 *lt9611,
{ 0x8306, 0x0a },
};
- if (mode->hdisplay == 3840)
+ if (lt9611->dsi1_node)
reg_cfg[1].def = 0x03;
return regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
@@ -159,12 +136,12 @@ static void lt9611_mipi_video_setup(struct lt9611 *lt9611,
hactive = mode->hdisplay;
hsync_len = mode->hsync_end - mode->hsync_start;
hfront_porch = mode->hsync_start - mode->hdisplay;
- hsync_porch = hsync_len + mode->htotal - mode->hsync_end;
+ hsync_porch = mode->htotal - mode->hsync_start;
vactive = mode->vdisplay;
vsync_len = mode->vsync_end - mode->vsync_start;
vfront_porch = mode->vsync_start - mode->vdisplay;
- vsync_porch = vsync_len + mode->vtotal - mode->vsync_end;
+ vsync_porch = mode->vtotal - mode->vsync_start;
regmap_write(lt9611->regmap, 0x830d, (u8)(v_total / 256));
regmap_write(lt9611->regmap, 0x830e, (u8)(v_total % 256));
@@ -187,12 +164,14 @@ static void lt9611_mipi_video_setup(struct lt9611 *lt9611,
regmap_write(lt9611->regmap, 0x8319, (u8)(hfront_porch % 256));
- regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256));
+ regmap_write(lt9611->regmap, 0x831a, (u8)(hsync_porch / 256) |
+ ((hfront_porch / 256) << 4));
regmap_write(lt9611->regmap, 0x831b, (u8)(hsync_porch % 256));
}
-static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode)
+static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int postdiv)
{
+ unsigned int pcr_m = mode->clock * 5 * postdiv / 27000;
const struct reg_sequence reg_cfg[] = {
{ 0x830b, 0x01 },
{ 0x830c, 0x10 },
@@ -207,45 +186,40 @@ static void lt9611_pcr_setup(struct lt9611 *lt9611, const struct drm_display_mod
/* stage 2 */
{ 0x834a, 0x40 },
- { 0x831d, 0x10 },
/* MK limit */
{ 0x832d, 0x38 },
{ 0x8331, 0x08 },
};
- const struct reg_sequence reg_cfg2[] = {
- { 0x830b, 0x03 },
- { 0x830c, 0xd0 },
- { 0x8348, 0x03 },
- { 0x8349, 0xe0 },
- { 0x8324, 0x72 },
- { 0x8325, 0x00 },
- { 0x832a, 0x01 },
- { 0x834a, 0x10 },
- { 0x831d, 0x10 },
- { 0x8326, 0x37 },
- };
+ u8 pol = 0x10;
- regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ pol |= 0x2;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ pol |= 0x1;
+ regmap_write(lt9611->regmap, 0x831d, pol);
- switch (mode->hdisplay) {
- case 640:
- regmap_write(lt9611->regmap, 0x8326, 0x14);
- break;
- case 1920:
- regmap_write(lt9611->regmap, 0x8326, 0x37);
- break;
- case 3840:
- regmap_multi_reg_write(lt9611->regmap, reg_cfg2, ARRAY_SIZE(reg_cfg2));
- break;
+ regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
+ if (lt9611->dsi1_node) {
+ unsigned int hact = mode->hdisplay;
+
+ hact >>= 2;
+ hact += 0x50;
+ hact = min(hact, 0x3e0U);
+ regmap_write(lt9611->regmap, 0x830b, hact / 256);
+ regmap_write(lt9611->regmap, 0x830c, hact % 256);
+ regmap_write(lt9611->regmap, 0x8348, hact / 256);
+ regmap_write(lt9611->regmap, 0x8349, hact % 256);
}
+ regmap_write(lt9611->regmap, 0x8326, pcr_m);
+
/* pcr rst */
regmap_write(lt9611->regmap, 0x8011, 0x5a);
regmap_write(lt9611->regmap, 0x8011, 0xfa);
}
-static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode)
+static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode *mode, unsigned int *postdiv)
{
unsigned int pclk = mode->clock;
const struct reg_sequence reg_cfg[] = {
@@ -259,16 +233,21 @@ static int lt9611_pll_setup(struct lt9611 *lt9611, const struct drm_display_mode
{ 0x8126, 0x55 },
{ 0x8127, 0x66 },
{ 0x8128, 0x88 },
+ { 0x812a, 0x20 },
};
regmap_multi_reg_write(lt9611->regmap, reg_cfg, ARRAY_SIZE(reg_cfg));
- if (pclk > 150000)
+ if (pclk > 150000) {
regmap_write(lt9611->regmap, 0x812d, 0x88);
- else if (pclk > 70000)
+ *postdiv = 1;
+ } else if (pclk > 70000) {
regmap_write(lt9611->regmap, 0x812d, 0x99);
- else
+ *postdiv = 2;
+ } else {
regmap_write(lt9611->regmap, 0x812d, 0xaa);
+ *postdiv = 4;
+ }
/*
* first divide pclk by 2 first
@@ -353,13 +332,55 @@ end:
return temp;
}
-static void lt9611_hdmi_tx_digital(struct lt9611 *lt9611)
+static void lt9611_hdmi_set_infoframes(struct lt9611 *lt9611,
+ struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
- regmap_write(lt9611->regmap, 0x8443, 0x46 - lt9611->vic);
- regmap_write(lt9611->regmap, 0x8447, lt9611->vic);
- regmap_write(lt9611->regmap, 0x843d, 0x0a); /* UD1 infoframe */
+ union hdmi_infoframe infoframe;
+ ssize_t len;
+ u8 iframes = 0x0a; /* UD1 infoframe */
+ u8 buf[32];
+ int ret;
+ int i;
- regmap_write(lt9611->regmap, 0x82d6, 0x8c);
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&infoframe.avi,
+ connector,
+ mode);
+ if (ret < 0)
+ goto out;
+
+ len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf));
+ if (len < 0)
+ goto out;
+
+ for (i = 0; i < len; i++)
+ regmap_write(lt9611->regmap, 0x8440 + i, buf[i]);
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&infoframe.vendor.hdmi,
+ connector,
+ mode);
+ if (ret < 0)
+ goto out;
+
+ len = hdmi_infoframe_pack(&infoframe, buf, sizeof(buf));
+ if (len < 0)
+ goto out;
+
+ for (i = 0; i < len; i++)
+ regmap_write(lt9611->regmap, 0x8474 + i, buf[i]);
+
+ iframes |= 0x20;
+
+out:
+ regmap_write(lt9611->regmap, 0x843d, iframes); /* UD1 infoframe */
+}
+
+static void lt9611_hdmi_tx_digital(struct lt9611 *lt9611, bool is_hdmi)
+{
+ if (is_hdmi)
+ regmap_write(lt9611->regmap, 0x82d6, 0x8c);
+ else
+ regmap_write(lt9611->regmap, 0x82d6, 0x0c);
regmap_write(lt9611->regmap, 0x82d7, 0x04);
}
@@ -448,12 +469,11 @@ static void lt9611_sleep_setup(struct lt9611 *lt9611)
{ 0x8023, 0x01 },
{ 0x8157, 0x03 }, /* set addr pin as output */
{ 0x8149, 0x0b },
- { 0x8151, 0x30 }, /* disable IRQ */
+
{ 0x8102, 0x48 }, /* MIPI Rx power down */
{ 0x8123, 0x80 },
{ 0x8130, 0x00 },
- { 0x8100, 0x01 }, /* bandgap power down */
- { 0x8101, 0x00 }, /* system clk power down */
+ { 0x8011, 0x0a },
};
regmap_multi_reg_write(lt9611->regmap,
@@ -564,24 +584,9 @@ static int lt9611_regulator_enable(struct lt9611 *lt9611)
return 0;
}
-static struct lt9611_mode *lt9611_find_mode(const struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(lt9611_modes); i++) {
- if (lt9611_modes[i].hdisplay == mode->hdisplay &&
- lt9611_modes[i].vdisplay == mode->vdisplay &&
- lt9611_modes[i].vrefresh == drm_mode_vrefresh(mode)) {
- return &lt9611_modes[i];
- }
- }
-
- return NULL;
-}
-
-/* connector funcs */
-static enum drm_connector_status __lt9611_detect(struct lt9611 *lt9611)
+static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
{
+ struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
unsigned int reg_val = 0;
int connected = 0;
@@ -594,12 +599,6 @@ static enum drm_connector_status __lt9611_detect(struct lt9611 *lt9611)
return lt9611->status;
}
-static enum drm_connector_status
-lt9611_connector_detect(struct drm_connector *connector, bool force)
-{
- return __lt9611_detect(connector_to_lt9611(connector));
-}
-
static int lt9611_read_edid(struct lt9611 *lt9611)
{
unsigned int temp;
@@ -681,36 +680,37 @@ lt9611_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
return 0;
}
-static int lt9611_connector_get_modes(struct drm_connector *connector)
-{
- struct lt9611 *lt9611 = connector_to_lt9611(connector);
- unsigned int count;
- struct edid *edid;
-
- lt9611_power_on(lt9611);
- edid = drm_do_get_edid(connector, lt9611_get_edid_block, lt9611);
- drm_connector_update_edid_property(connector, edid);
- count = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return count;
-}
-
-static enum drm_mode_status
-lt9611_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode);
-
- return lt9611_mode ? MODE_OK : MODE_BAD;
-}
-
/* bridge funcs */
static void
lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ struct drm_atomic_state *state = old_bridge_state->base.state;
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_display_mode *mode;
+ unsigned int postdiv;
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ mode = &crtc_state->adjusted_mode;
+
+ lt9611_mipi_input_digital(lt9611, mode);
+ lt9611_pll_setup(lt9611, mode, &postdiv);
+ lt9611_mipi_video_setup(lt9611, mode);
+ lt9611_pcr_setup(lt9611, mode, postdiv);
if (lt9611_power_on(lt9611)) {
dev_err(lt9611->dev, "power on failed\n");
@@ -718,7 +718,8 @@ lt9611_bridge_atomic_enable(struct drm_bridge *bridge,
}
lt9611_mipi_input_analog(lt9611);
- lt9611_hdmi_tx_digital(lt9611);
+ lt9611_hdmi_set_infoframes(lt9611, connector, mode);
+ lt9611_hdmi_tx_digital(lt9611, connector->display_info.is_hdmi);
lt9611_hdmi_tx_phy(lt9611);
msleep(500);
@@ -749,25 +750,10 @@ lt9611_bridge_atomic_disable(struct drm_bridge *bridge,
}
}
-static struct
-drm_connector_helper_funcs lt9611_bridge_connector_helper_funcs = {
- .get_modes = lt9611_connector_get_modes,
- .mode_valid = lt9611_connector_mode_valid,
-};
-
-static const struct drm_connector_funcs lt9611_bridge_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = lt9611_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
struct device_node *dsi_node)
{
- const struct mipi_dsi_device_info info = { "lt9611", 0, NULL };
+ const struct mipi_dsi_device_info info = { "lt9611", 0, lt9611->dev->of_node};
struct mipi_dsi_device *dsi;
struct mipi_dsi_host *host;
struct device *dev = lt9611->dev;
@@ -799,70 +785,54 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
return dsi;
}
-static int lt9611_connector_init(struct drm_bridge *bridge, struct lt9611 *lt9611)
-{
- int ret;
-
- ret = drm_connector_init(bridge->dev, &lt9611->connector,
- &lt9611_bridge_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
- }
-
- drm_connector_helper_add(&lt9611->connector,
- &lt9611_bridge_connector_helper_funcs);
-
- if (!bridge->encoder) {
- DRM_ERROR("Parent encoder object not found");
- return -ENODEV;
- }
-
- drm_connector_attach_encoder(&lt9611->connector, bridge->encoder);
-
- return 0;
-}
-
static int lt9611_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- int ret;
- if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
- ret = lt9611_connector_init(bridge, lt9611);
- if (ret < 0)
- return ret;
- }
-
- return 0;
+ return drm_bridge_attach(bridge->encoder, lt9611->next_bridge,
+ bridge, flags);
}
static enum drm_mode_status lt9611_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
- struct lt9611_mode *lt9611_mode = lt9611_find_mode(mode);
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- if (!lt9611_mode)
- return MODE_BAD;
- else if (lt9611_mode->intfs > 1 && !lt9611->dsi1)
+ if (mode->hdisplay > 3840)
+ return MODE_BAD_HVALUE;
+
+ if (mode->vdisplay > 2160)
+ return MODE_BAD_VVALUE;
+
+ if (mode->hdisplay == 3840 &&
+ mode->vdisplay == 2160 &&
+ drm_mode_vrefresh(mode) > 30)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > 2000 && !lt9611->dsi1_node)
return MODE_PANEL;
else
return MODE_OK;
}
-static void lt9611_bridge_pre_enable(struct drm_bridge *bridge)
+static void lt9611_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
+ static const struct reg_sequence reg_cfg[] = {
+ { 0x8102, 0x12 },
+ { 0x8123, 0x40 },
+ { 0x8130, 0xea },
+ { 0x8011, 0xfa },
+ };
if (!lt9611->sleep)
return;
- lt9611_reset(lt9611);
- regmap_write(lt9611->regmap, 0x80ee, 0x01);
+ regmap_multi_reg_write(lt9611->regmap,
+ reg_cfg, ARRAY_SIZE(reg_cfg));
lt9611->sleep = false;
}
@@ -876,33 +846,6 @@ lt9611_bridge_atomic_post_disable(struct drm_bridge *bridge,
lt9611_sleep_setup(lt9611);
}
-static void lt9611_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adj_mode)
-{
- struct lt9611 *lt9611 = bridge_to_lt9611(bridge);
- struct hdmi_avi_infoframe avi_frame;
- int ret;
-
- lt9611_bridge_pre_enable(bridge);
-
- lt9611_mipi_input_digital(lt9611, mode);
- lt9611_pll_setup(lt9611, mode);
- lt9611_mipi_video_setup(lt9611, mode);
- lt9611_pcr_setup(lt9611, mode);
-
- ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame,
- &lt9611->connector,
- mode);
- if (!ret)
- lt9611->vic = avi_frame.video_code;
-}
-
-static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge)
-{
- return __lt9611_detect(bridge_to_lt9611(bridge));
-}
-
static struct edid *lt9611_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
@@ -948,11 +891,11 @@ lt9611_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
static const struct drm_bridge_funcs lt9611_bridge_funcs = {
.attach = lt9611_bridge_attach,
.mode_valid = lt9611_bridge_mode_valid,
- .mode_set = lt9611_bridge_mode_set,
.detect = lt9611_bridge_detect,
.get_edid = lt9611_bridge_get_edid,
.hpd_enable = lt9611_bridge_hpd_enable,
+ .atomic_pre_enable = lt9611_bridge_atomic_pre_enable,
.atomic_enable = lt9611_bridge_atomic_enable,
.atomic_disable = lt9611_bridge_atomic_disable,
.atomic_post_disable = lt9611_bridge_atomic_post_disable,
@@ -975,7 +918,7 @@ static int lt9611_parse_dt(struct device *dev,
lt9611->ac_mode = of_property_read_bool(dev->of_node, "lt,ac-mode");
- return 0;
+ return drm_of_find_panel_or_bridge(dev->of_node, 2, -1, NULL, &lt9611->next_bridge);
}
static int lt9611_gpio_init(struct lt9611 *lt9611)
@@ -1108,8 +1051,7 @@ static void lt9611_audio_exit(struct lt9611 *lt9611)
}
}
-static int lt9611_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt9611_probe(struct i2c_client *client)
{
struct lt9611 *lt9611;
struct device *dev = &client->dev;
@@ -1248,7 +1190,7 @@ static struct i2c_driver lt9611_driver = {
.name = "lt9611",
.of_match_table = lt9611_match_table,
},
- .probe = lt9611_probe,
+ .probe_new = lt9611_probe,
.remove = lt9611_remove,
.id_table = lt9611_id,
};
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index fa1ee6264d92..583daacf3705 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -844,8 +844,7 @@ static const struct attribute_group *lt9611uxc_attr_groups[] = {
NULL,
};
-static int lt9611uxc_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lt9611uxc_probe(struct i2c_client *client)
{
struct lt9611uxc *lt9611uxc;
struct device *dev = &client->dev;
@@ -1012,7 +1011,7 @@ static struct i2c_driver lt9611uxc_driver = {
.of_match_table = lt9611uxc_match_table,
.dev_groups = lt9611uxc_attr_groups,
},
- .probe = lt9611uxc_probe,
+ .probe_new = lt9611uxc_probe,
.remove = lt9611uxc_remove,
.id_table = lt9611uxc_id,
};
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 97359f807bfc..4fc494d9084b 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -336,8 +336,7 @@ static int ge_b850v3_register(void)
"ge-b850v3-lvds-dp", ge_b850v3_lvds_ptr);
}
-static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c,
- const struct i2c_device_id *id)
+static int stdp4028_ge_b850v3_fw_probe(struct i2c_client *stdp4028_i2c)
{
struct device *dev = &stdp4028_i2c->dev;
int ret;
@@ -376,7 +375,7 @@ MODULE_DEVICE_TABLE(of, stdp4028_ge_b850v3_fw_match);
static struct i2c_driver stdp4028_ge_b850v3_fw_driver = {
.id_table = stdp4028_ge_b850v3_fw_i2c_table,
- .probe = stdp4028_ge_b850v3_fw_probe,
+ .probe_new = stdp4028_ge_b850v3_fw_probe,
.remove = stdp4028_ge_b850v3_fw_remove,
.driver = {
.name = "stdp4028-ge-b850v3-fw",
@@ -384,8 +383,7 @@ static struct i2c_driver stdp4028_ge_b850v3_fw_driver = {
},
};
-static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c,
- const struct i2c_device_id *id)
+static int stdp2690_ge_b850v3_fw_probe(struct i2c_client *stdp2690_i2c)
{
struct device *dev = &stdp2690_i2c->dev;
int ret;
@@ -424,7 +422,7 @@ MODULE_DEVICE_TABLE(of, stdp2690_ge_b850v3_fw_match);
static struct i2c_driver stdp2690_ge_b850v3_fw_driver = {
.id_table = stdp2690_ge_b850v3_fw_i2c_table,
- .probe = stdp2690_ge_b850v3_fw_probe,
+ .probe_new = stdp2690_ge_b850v3_fw_probe,
.remove = stdp2690_ge_b850v3_fw_remove,
.driver = {
.name = "stdp2690-ge-b850v3-fw",
@@ -440,7 +438,11 @@ static int __init stdpxxxx_ge_b850v3_init(void)
if (ret)
return ret;
- return i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
+ ret = i2c_add_driver(&stdp2690_ge_b850v3_fw_driver);
+ if (ret)
+ i2c_del_driver(&stdp4028_ge_b850v3_fw_driver);
+
+ return ret;
}
module_init(stdpxxxx_ge_b850v3_init);
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 0851101a8c72..cd292a2f894c 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -257,8 +257,7 @@ static const struct drm_bridge_funcs ptn3460_bridge_funcs = {
.get_edid = ptn3460_get_edid,
};
-static int ptn3460_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ptn3460_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ptn3460_bridge *ptn_bridge;
@@ -336,7 +335,7 @@ MODULE_DEVICE_TABLE(of, ptn3460_match);
static struct i2c_driver ptn3460_driver = {
.id_table = ptn3460_i2c_table,
- .probe = ptn3460_probe,
+ .probe_new = ptn3460_probe,
.remove = ptn3460_remove,
.driver = {
.name = "nxp,ptn3460",
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 216af76d0042..e8aae3cdc73d 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -357,13 +357,16 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
return ERR_PTR(-ENOMEM);
bridge = drm_panel_bridge_add_typed(panel, connector_type);
- if (!IS_ERR(bridge)) {
- *ptr = bridge;
- devres_add(dev, ptr);
- } else {
+ if (IS_ERR(bridge)) {
devres_free(ptr);
+ return bridge;
}
+ bridge->pre_enable_prev_first = panel->prepare_prev_first;
+
+ *ptr = bridge;
+ devres_add(dev, ptr);
+
return bridge;
}
EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
@@ -402,6 +405,8 @@ struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm,
if (ret)
return ERR_PTR(ret);
+ bridge->pre_enable_prev_first = panel->prepare_prev_first;
+
return bridge;
}
EXPORT_SYMBOL(drmm_panel_bridge_add);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 309de802863d..530ee6a19e7e 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -442,9 +442,9 @@ static const struct of_device_id ps8622_devices[] = {
};
MODULE_DEVICE_TABLE(of, ps8622_devices);
-static int ps8622_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ps8622_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct ps8622_bridge *ps8622;
struct drm_bridge *panel_bridge;
@@ -538,7 +538,7 @@ MODULE_DEVICE_TABLE(i2c, ps8622_i2c_table);
static struct i2c_driver ps8622_driver = {
.id_table = ps8622_i2c_table,
- .probe = ps8622_probe,
+ .probe_new = ps8622_probe,
.remove = ps8622_remove,
.driver = {
.name = "ps8622",
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 6a614e54b383..4b361d7d5e44 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -15,6 +15,7 @@
#include <drm/display/drm_dp_aux_bus.h>
#include <drm/display/drm_dp_helper.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_mipi_dsi.h>
@@ -442,7 +443,8 @@ static const struct dev_pm_ops ps8640_pm_ops = {
pm_runtime_force_resume)
};
-static void ps8640_pre_enable(struct drm_bridge *bridge)
+static void ps8640_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
@@ -476,7 +478,8 @@ static void ps8640_pre_enable(struct drm_bridge *bridge)
ps_bridge->pre_enabled = true;
}
-static void ps8640_post_disable(struct drm_bridge *bridge)
+static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
@@ -554,7 +557,7 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
* EDID, for this chip, we need to do a full poweron, otherwise it will
* fail.
*/
- drm_bridge_chain_pre_enable(bridge);
+ drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
edid = drm_get_edid(connector,
ps_bridge->page[PAGE0_DP_CNTL]->adapter);
@@ -564,7 +567,7 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
* before, return the chip to its original power state.
*/
if (poweroff)
- drm_bridge_chain_post_disable(bridge);
+ drm_atomic_bridge_chain_post_disable(bridge, connector->state->state);
return edid;
}
@@ -579,8 +582,11 @@ static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.attach = ps8640_bridge_attach,
.detach = ps8640_bridge_detach,
.get_edid = ps8640_bridge_get_edid,
- .post_disable = ps8640_post_disable,
- .pre_enable = ps8640_pre_enable,
+ .atomic_post_disable = ps8640_atomic_post_disable,
+ .atomic_pre_enable = ps8640_atomic_pre_enable,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
};
static int ps8640_bridge_get_dsi_resources(struct device *dev, struct ps8640 *ps_bridge)
@@ -734,13 +740,13 @@ static int ps8640_probe(struct i2c_client *client)
pm_runtime_enable(dev);
/*
* Powering on ps8640 takes ~300ms. To avoid wasting time on power
- * cycling ps8640 too often, set autosuspend_delay to 1000ms to ensure
+ * cycling ps8640 too often, set autosuspend_delay to 2000ms to ensure
* the bridge wouldn't suspend in between each _aux_transfer_msg() call
* during EDID read (~20ms in my experiment) and in between the last
* _aux_transfer_msg() call during EDID read and the _pre_enable() call
* (~100ms in my experiment).
*/
- pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_set_autosuspend_delay(dev, 2000);
pm_runtime_use_autosuspend(dev);
pm_suspend_ignore_children(dev, true);
ret = devm_add_action_or_reset(dev, ps8640_runtime_disable, dev);
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 878fb7d3732b..ef66461e7f7c 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -171,7 +171,6 @@ struct sii902x {
struct drm_connector connector;
struct gpio_desc *reset_gpio;
struct i2c_mux_core *i2cmux;
- struct regulator_bulk_data supplies[2];
bool sink_is_hdmi;
/*
* Mutex protects audio and video functions from interfering
@@ -240,12 +239,12 @@ static void sii902x_reset(struct sii902x *sii902x)
if (!sii902x->reset_gpio)
return;
- gpiod_set_value(sii902x->reset_gpio, 1);
+ gpiod_set_value_cansleep(sii902x->reset_gpio, 1);
/* The datasheet says treset-min = 100us. Make it 150us to be sure. */
usleep_range(150, 200);
- gpiod_set_value(sii902x->reset_gpio, 0);
+ gpiod_set_value_cansleep(sii902x->reset_gpio, 0);
}
static enum drm_connector_status sii902x_detect(struct sii902x *sii902x)
@@ -1066,12 +1065,12 @@ static int sii902x_init(struct sii902x *sii902x)
return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
}
-static int sii902x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sii902x_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *endpoint;
struct sii902x *sii902x;
+ static const char * const supplies[] = {"iovcc", "cvcc12"};
int ret;
ret = i2c_check_functionality(client->adapter,
@@ -1117,32 +1116,17 @@ static int sii902x_probe(struct i2c_client *client,
sii902x->next_bridge = of_drm_find_bridge(remote);
of_node_put(remote);
if (!sii902x->next_bridge)
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, -EPROBE_DEFER,
+ "Failed to find remote bridge\n");
}
mutex_init(&sii902x->mutex);
- sii902x->supplies[0].supply = "iovcc";
- sii902x->supplies[1].supply = "cvcc12";
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(supplies), supplies);
if (ret < 0)
- return ret;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
- if (ret < 0) {
- dev_err_probe(dev, ret, "Failed to enable supplies");
- return ret;
- }
+ return dev_err_probe(dev, ret, "Failed to enable supplies");
- ret = sii902x_init(sii902x);
- if (ret < 0) {
- regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
- }
-
- return ret;
+ return sii902x_init(sii902x);
}
static void sii902x_remove(struct i2c_client *client)
@@ -1152,8 +1136,6 @@ static void sii902x_remove(struct i2c_client *client)
i2c_mux_del_adapters(sii902x->i2cmux);
drm_bridge_remove(&sii902x->bridge);
- regulator_bulk_disable(ARRAY_SIZE(sii902x->supplies),
- sii902x->supplies);
}
static const struct of_device_id sii902x_dt_ids[] = {
@@ -1169,7 +1151,7 @@ static const struct i2c_device_id sii902x_i2c_ids[] = {
MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
static struct i2c_driver sii902x_driver = {
- .probe = sii902x_probe,
+ .probe_new = sii902x_probe,
.remove = sii902x_remove,
.driver = {
.name = "sii902x",
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index 5b3061d4b5c3..099b510ff285 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -886,8 +886,7 @@ static const struct drm_bridge_funcs sii9234_bridge_funcs = {
.mode_valid = sii9234_mode_valid,
};
-static int sii9234_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sii9234_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct sii9234 *ctx;
@@ -961,7 +960,7 @@ static struct i2c_driver sii9234_driver = {
.name = "sii9234",
.of_match_table = sii9234_dt_match,
},
- .probe = sii9234_probe,
+ .probe_new = sii9234_probe,
.remove = sii9234_remove,
.id_table = sii9234_id,
};
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 511982a1cedb..b96d03cd878d 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2284,8 +2284,7 @@ static const struct drm_bridge_funcs sii8620_bridge_funcs = {
.mode_valid = sii8620_mode_valid,
};
-static int sii8620_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sii8620_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct sii8620 *ctx;
@@ -2379,7 +2378,7 @@ static struct i2c_driver sii8620_driver = {
.name = "sii8620",
.of_match_table = of_match_ptr(sii8620_dt_match),
},
- .probe = sii8620_probe,
+ .probe_new = sii8620_probe,
.remove = sii8620_remove,
.id_table = sii8620_id,
};
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index a2f0860b20bb..d751820c6da6 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -193,6 +193,7 @@ static int snd_dw_hdmi_probe(struct platform_device *pdev)
struct hdmi_codec_pdata pdata;
struct platform_device *platform;
+ memset(&pdata, 0, sizeof(pdata));
pdata.ops = &dw_hdmi_i2s_ops;
pdata.i2s = 1;
pdata.max_i2s_channels = 8;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 53259c12d777..f85654f1b104 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -369,6 +369,7 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
ctx->bridge.funcs = &tc358764_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
+ ctx->bridge.pre_enable_prev_first = true;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 2a58eb271f70..6d16ec45ea61 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1264,10 +1264,10 @@ static int tc_dsi_rx_enable(struct tc_data *tc)
u32 value;
int ret;
- regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 5);
- regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 5);
- regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 5);
- regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 5);
+ regmap_write(tc->regmap, PPI_D0S_CLRSIPOCOUNT, 25);
+ regmap_write(tc->regmap, PPI_D1S_CLRSIPOCOUNT, 25);
+ regmap_write(tc->regmap, PPI_D2S_CLRSIPOCOUNT, 25);
+ regmap_write(tc->regmap, PPI_D3S_CLRSIPOCOUNT, 25);
regmap_write(tc->regmap, PPI_D0S_ATMR, 0);
regmap_write(tc->regmap, PPI_D1S_ATMR, 0);
regmap_write(tc->regmap, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
@@ -2029,7 +2029,7 @@ static void tc_clk_disable(void *data)
clk_disable_unprepare(refclk);
}
-static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int tc_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tc_data *tc;
@@ -2209,7 +2209,7 @@ static struct i2c_driver tc358767_driver = {
.of_match_table = tc358767_of_ids,
},
.id_table = tc358767_i2c_ids,
- .probe = tc_probe,
+ .probe_new = tc_probe,
.remove = tc_remove,
};
module_i2c_driver(tc358767_driver);
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 4c4b77ce8aba..7c0cbe84611b 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -1018,8 +1017,7 @@ static int tc358768_get_regulators(struct tc358768_priv *priv)
return ret;
}
-static int tc358768_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tc358768_i2c_probe(struct i2c_client *client)
{
struct tc358768_priv *priv;
struct device *dev = &client->dev;
@@ -1085,7 +1083,7 @@ static struct i2c_driver tc358768_driver = {
.of_match_table = tc358768_of_ids,
},
.id_table = tc358768_i2c_ids,
- .probe = tc358768_i2c_probe,
+ .probe_new = tc358768_i2c_probe,
.remove = tc358768_i2c_remove,
};
module_i2c_driver(tc358768_driver);
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 3ceb0e9f9bdc..19316994ddd1 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -23,7 +23,6 @@
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -637,7 +636,7 @@ static int tc_attach_host(struct tc_data *tc)
return 0;
}
-static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int tc_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct tc_data *tc;
@@ -729,7 +728,7 @@ static struct i2c_driver tc358775_driver = {
.of_match_table = tc358775_of_ids,
},
.id_table = tc358775_i2c_ids,
- .probe = tc_probe,
+ .probe_new = tc_probe,
.remove = tc_remove,
};
module_i2c_driver(tc358775_driver);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 7ba9467fff12..91ecfbe45bf9 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -346,7 +346,7 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
/* Deassert reset */
gpiod_set_value_cansleep(ctx->enable_gpio, 1);
- usleep_range(1000, 1100);
+ usleep_range(10000, 11000);
/* Get the LVDS format from the bridge state. */
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
@@ -653,9 +653,9 @@ static int sn65dsi83_host_attach(struct sn65dsi83 *ctx)
return 0;
}
-static int sn65dsi83_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sn65dsi83_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
enum sn65dsi83_model model;
struct sn65dsi83 *ctx;
@@ -730,7 +730,7 @@ static const struct of_device_id sn65dsi83_match_table[] = {
MODULE_DEVICE_TABLE(of, sn65dsi83_match_table);
static struct i2c_driver sn65dsi83_driver = {
- .probe = sn65dsi83_probe,
+ .probe_new = sn65dsi83_probe,
.remove = sn65dsi83_remove,
.id_table = sn65dsi83_id,
.driver = {
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 05f8756d1aaf..1e26fa63845a 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -1852,8 +1852,7 @@ static int ti_sn65dsi86_parse_regulators(struct ti_sn65dsi86 *pdata)
pdata->supplies);
}
-static int ti_sn65dsi86_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ti_sn65dsi86_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ti_sn65dsi86 *pdata;
@@ -1952,7 +1951,7 @@ static struct i2c_driver ti_sn65dsi86_driver = {
.of_match_table = ti_sn65dsi86_match_table,
.pm = &ti_sn65dsi86_pm_ops,
},
- .probe = ti_sn65dsi86_probe,
+ .probe_new = ti_sn65dsi86_probe,
.id_table = ti_sn65dsi86_id,
};
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index b9635abbad16..6db69df0e18b 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -379,8 +379,7 @@ static struct platform_driver tfp410_platform_driver = {
#if IS_ENABLED(CONFIG_I2C)
/* There is currently no i2c functionality. */
-static int tfp410_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tfp410_i2c_probe(struct i2c_client *client)
{
int reg;
@@ -411,7 +410,7 @@ static struct i2c_driver tfp410_i2c_driver = {
.of_match_table = of_match_ptr(tfp410_match),
},
.id_table = tfp410_i2c_ids,
- .probe = tfp410_i2c_probe,
+ .probe_new = tfp410_i2c_probe,
.remove = tfp410_i2c_remove,
};
#endif /* IS_ENABLED(CONFIG_I2C) */
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index f5741b45ca07..8a165be1a821 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -161,9 +161,14 @@ static void dp_aux_ep_dev_release(struct device *dev)
kfree(aux_ep_with_data);
}
+static int dp_aux_ep_dev_modalias(const struct device *dev, struct kobj_uevent_env *env)
+{
+ return of_device_uevent_modalias(dev, env);
+}
+
static struct device_type dp_aux_device_type_type = {
.groups = dp_aux_ep_dev_groups,
- .uevent = of_device_uevent_modalias,
+ .uevent = dp_aux_ep_dev_modalias,
.release = dp_aux_ep_dev_release,
};
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 4ca37261584a..38dab76ae69e 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -3309,8 +3309,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
int ret;
port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
- if (!port)
+ if (!port) {
+ drm_dbg_kms(mgr->dev,
+ "VCPI %d for port %p not in topology, not creating a payload\n",
+ payload->vcpi, payload->port);
+ payload->vc_start_slot = -1;
return 0;
+ }
if (mgr->payload_count == 0)
mgr->next_start_slot = mst_state->start_slot;
@@ -3337,7 +3342,8 @@ EXPORT_SYMBOL(drm_dp_add_payload_part1);
* drm_dp_remove_payload() - Remove an MST payload
* @mgr: Manager to use.
* @mst_state: The MST atomic state
- * @payload: The payload to write
+ * @old_payload: The payload with its old state
+ * @new_payload: The payload to write
*
* Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
* the starting time slots of all other payloads which would have been shifted towards the start of
@@ -3345,36 +3351,37 @@ EXPORT_SYMBOL(drm_dp_add_payload_part1);
*/
void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state,
- struct drm_dp_mst_atomic_payload *payload)
+ const struct drm_dp_mst_atomic_payload *old_payload,
+ struct drm_dp_mst_atomic_payload *new_payload)
{
struct drm_dp_mst_atomic_payload *pos;
bool send_remove = false;
/* We failed to make the payload, so nothing to do */
- if (payload->vc_start_slot == -1)
+ if (new_payload->vc_start_slot == -1)
return;
mutex_lock(&mgr->lock);
- send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
+ send_remove = drm_dp_mst_port_downstream_of_branch(new_payload->port, mgr->mst_primary);
mutex_unlock(&mgr->lock);
if (send_remove)
- drm_dp_destroy_payload_step1(mgr, mst_state, payload);
+ drm_dp_destroy_payload_step1(mgr, mst_state, new_payload);
else
drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
- payload->vcpi);
+ new_payload->vcpi);
list_for_each_entry(pos, &mst_state->payloads, next) {
- if (pos != payload && pos->vc_start_slot > payload->vc_start_slot)
- pos->vc_start_slot -= payload->time_slots;
+ if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
+ pos->vc_start_slot -= old_payload->time_slots;
}
- payload->vc_start_slot = -1;
+ new_payload->vc_start_slot = -1;
mgr->payload_count--;
- mgr->next_start_slot -= payload->time_slots;
+ mgr->next_start_slot -= old_payload->time_slots;
- if (payload->delete)
- drm_dp_mst_put_port_malloc(payload->port);
+ if (new_payload->delete)
+ drm_dp_mst_put_port_malloc(new_payload->port);
}
EXPORT_SYMBOL(drm_dp_remove_payload);
@@ -3644,6 +3651,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
mgr->payload_id_table_cleared = false;
+
+ memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
+ memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
}
out_unlock:
@@ -3856,7 +3866,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
- goto out;
+ goto out_clear_reply;
/* Multi-packet message transmission, don't clear the reply */
if (!msg->have_eomt)
@@ -5355,27 +5365,52 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
/**
+ * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
+ * @state: global atomic state
+ * @mgr: MST topology manager, also the private object in this case
+ *
+ * This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic
+ * state vtable so that the private object state returned is that of a MST
+ * topology object.
+ *
+ * Returns:
+ *
+ * The old MST topology state, or NULL if there's no topology state for this MST mgr
+ * in the global atomic state
+ */
+struct drm_dp_mst_topology_state *
+drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct drm_private_state *old_priv_state =
+ drm_atomic_get_old_private_obj_state(state, &mgr->base);
+
+ return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
+
+/**
* drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
* @state: global atomic state
* @mgr: MST topology manager, also the private object in this case
*
- * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
+ * This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic
* state vtable so that the private object state returned is that of a MST
* topology object.
*
* Returns:
*
- * The MST topology state, or NULL if there's no topology state for this MST mgr
+ * The new MST topology state, or NULL if there's no topology state for this MST mgr
* in the global atomic state
*/
struct drm_dp_mst_topology_state *
drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr)
{
- struct drm_private_state *priv_state =
+ struct drm_private_state *new_priv_state =
drm_atomic_get_new_private_obj_state(state, &mgr->base);
- return priv_state ? to_dp_mst_topology_state(priv_state) : NULL;
+ return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL;
}
EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index f197f59f6d99..5457c02ca1ab 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -880,7 +880,7 @@ EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
* or NULL if the private_obj is not part of the global atomic state.
*/
struct drm_private_state *
-drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_old_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
int i;
@@ -902,7 +902,7 @@ EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state);
* or NULL if the private_obj is not part of the global atomic state.
*/
struct drm_private_state *
-drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
+drm_atomic_get_new_private_obj_state(const struct drm_atomic_state *state,
struct drm_private_obj *obj)
{
int i;
@@ -934,7 +934,7 @@ EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
* not connected.
*/
struct drm_connector *
-drm_atomic_get_old_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_connector_state *conn_state;
@@ -968,7 +968,7 @@ EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder);
* not connected.
*/
struct drm_connector *
-drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
+drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state,
struct drm_encoder *encoder)
{
struct drm_connector_state *conn_state;
@@ -1117,7 +1117,7 @@ EXPORT_SYMBOL(drm_atomic_get_bridge_state);
* the bridge is not part of the global atomic state.
*/
struct drm_bridge_state *
-drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_old_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge)
{
struct drm_private_state *obj_state;
@@ -1139,7 +1139,7 @@ EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
* the bridge is not part of the global atomic state.
*/
struct drm_bridge_state *
-drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+drm_atomic_get_new_bridge_state(const struct drm_atomic_state *state,
struct drm_bridge *bridge)
{
struct drm_private_state *obj_state;
@@ -1756,8 +1756,8 @@ EXPORT_SYMBOL(drm_state_dump);
#ifdef CONFIG_DEBUG_FS
static int drm_state_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
__drm_state_dump(dev, &p, true);
@@ -1766,14 +1766,13 @@ static int drm_state_info(struct seq_file *m, void *data)
}
/* any use in debugfs files to dump individual planes/crtc/etc? */
-static const struct drm_info_list drm_atomic_debugfs_list[] = {
+static const struct drm_debugfs_info drm_atomic_debugfs_list[] = {
{"state", drm_state_info, 0},
};
void drm_atomic_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_atomic_debugfs_list,
- ARRAY_SIZE(drm_atomic_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list));
}
#endif
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index dfb57217253b..784e63d70a42 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -482,6 +482,130 @@ void drm_atomic_helper_connector_tv_margins_reset(struct drm_connector *connecto
EXPORT_SYMBOL(drm_atomic_helper_connector_tv_margins_reset);
/**
+ * drm_atomic_helper_connector_tv_reset - Resets Analog TV connector properties
+ * @connector: DRM connector
+ *
+ * Resets the analog TV properties attached to a connector
+ */
+void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
+ struct drm_connector_state *state = connector->state;
+ struct drm_property *prop;
+ uint64_t val;
+
+ prop = dev->mode_config.tv_mode_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.mode = val;
+
+ if (cmdline->tv_mode_specified)
+ state->tv.mode = cmdline->tv_mode;
+
+ prop = dev->mode_config.tv_select_subconnector_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.select_subconnector = val;
+
+ prop = dev->mode_config.tv_subconnector_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.subconnector = val;
+
+ prop = dev->mode_config.tv_brightness_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.brightness = val;
+
+ prop = dev->mode_config.tv_contrast_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.contrast = val;
+
+ prop = dev->mode_config.tv_flicker_reduction_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.flicker_reduction = val;
+
+ prop = dev->mode_config.tv_overscan_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.overscan = val;
+
+ prop = dev->mode_config.tv_saturation_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.saturation = val;
+
+ prop = dev->mode_config.tv_hue_property;
+ if (prop)
+ if (!drm_object_property_get_default_value(&connector->base,
+ prop, &val))
+ state->tv.hue = val;
+
+ drm_atomic_helper_connector_tv_margins_reset(connector);
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_tv_reset);
+
+/**
+ * drm_atomic_helper_connector_tv_check - Validate an analog TV connector state
+ * @connector: DRM Connector
+ * @state: the DRM State object
+ *
+ * Checks the state object to see if the requested state is valid for an
+ * analog TV connector.
+ *
+ * Return:
+ * %0 for success, a negative error code on error.
+ */
+int drm_atomic_helper_connector_tv_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc_state *crtc_state;
+ struct drm_crtc *crtc;
+
+ crtc = new_conn_state->crtc;
+ if (!crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!crtc_state)
+ return -EINVAL;
+
+ if (old_conn_state->tv.mode != new_conn_state->tv.mode)
+ crtc_state->mode_changed = true;
+
+ if (old_conn_state->tv.margins.left != new_conn_state->tv.margins.left ||
+ old_conn_state->tv.margins.right != new_conn_state->tv.margins.right ||
+ old_conn_state->tv.margins.top != new_conn_state->tv.margins.top ||
+ old_conn_state->tv.margins.bottom != new_conn_state->tv.margins.bottom ||
+ old_conn_state->tv.mode != new_conn_state->tv.mode ||
+ old_conn_state->tv.brightness != new_conn_state->tv.brightness ||
+ old_conn_state->tv.contrast != new_conn_state->tv.contrast ||
+ old_conn_state->tv.flicker_reduction != new_conn_state->tv.flicker_reduction ||
+ old_conn_state->tv.overscan != new_conn_state->tv.overscan ||
+ old_conn_state->tv.saturation != new_conn_state->tv.saturation ||
+ old_conn_state->tv.hue != new_conn_state->tv.hue)
+ crtc_state->connectors_changed = true;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_helper_connector_tv_check);
+
+/**
* __drm_atomic_helper_connector_duplicate_state - copy atomic connector state
* @connector: connector object
* @state: atomic connector state
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index c06d0639d552..d867e7f9f2cd 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -698,6 +698,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
state->tv.margins.top = val;
} else if (property == config->tv_bottom_margin_property) {
state->tv.margins.bottom = val;
+ } else if (property == config->legacy_tv_mode_property) {
+ state->tv.legacy_mode = val;
} else if (property == config->tv_mode_property) {
state->tv.mode = val;
} else if (property == config->tv_brightness_property) {
@@ -808,6 +810,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->tv.margins.top;
} else if (property == config->tv_bottom_margin_property) {
*val = state->tv.margins.bottom;
+ } else if (property == config->legacy_tv_mode_property) {
+ *val = state->tv.legacy_mode;
} else if (property == config->tv_mode_property) {
*val = state->tv.mode;
} else if (property == config->tv_brightness_property) {
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index b4c8cab7158c..6e74de833466 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -450,8 +450,8 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
int i, n = 0;
int ret = 0;
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
- crtc->base.id, crtc->name);
+ drm_dbg_atomic(dev, "[CRTC:%d:%s] calculating normalized zpos values\n",
+ crtc->base.id, crtc->name);
states = kmalloc_array(total_planes, sizeof(*states), GFP_KERNEL);
if (!states)
@@ -469,9 +469,8 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
goto done;
}
states[n++] = plane_state;
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] processing zpos value %d\n",
- plane->base.id, plane->name,
- plane_state->zpos);
+ drm_dbg_atomic(dev, "[PLANE:%d:%s] processing zpos value %d\n",
+ plane->base.id, plane->name, plane_state->zpos);
}
sort(states, n, sizeof(*states), drm_atomic_state_zpos_cmp, NULL);
@@ -480,8 +479,8 @@ static int drm_atomic_helper_crtc_normalize_zpos(struct drm_crtc *crtc,
plane = states[i]->plane;
states[i]->normalized_zpos = i;
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] normalized zpos value %d\n",
- plane->base.id, plane->name, i);
+ drm_dbg_atomic(dev, "[PLANE:%d:%s] normalized zpos value %d\n",
+ plane->base.id, plane->name, i);
}
crtc_state->zpos_changed = true;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 1545c50fd1c8..c3d69af02e79 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -153,6 +153,45 @@
* situation when probing.
*/
+/**
+ * DOC: dsi bridge operations
+ *
+ * DSI host interfaces are expected to be implemented as bridges rather than
+ * encoders, however there are a few aspects of their operation that need to
+ * be defined in order to provide a consistent interface.
+ *
+ * A DSI host should keep the PHY powered down until the pre_enable operation is
+ * called. All lanes are in an undefined idle state up to this point, and it
+ * must not be assumed that it is LP-11.
+ * pre_enable should initialise the PHY, set the data lanes to LP-11, and the
+ * clock lane to either LP-11 or HS depending on the mode_flag
+ * %MIPI_DSI_CLOCK_NON_CONTINUOUS.
+ *
+ * Ordinarily the downstream bridge DSI peripheral pre_enable will have been
+ * called before the DSI host. If the DSI peripheral requires LP-11 and/or
+ * the clock lane to be in HS mode prior to pre_enable, then it can set the
+ * &pre_enable_prev_first flag to request the pre_enable (and
+ * post_disable) order to be altered to enable the DSI host first.
+ *
+ * Either the CRTC being enabled, or the DSI host enable operation should switch
+ * the host to actively transmitting video on the data lanes.
+ *
+ * The reverse also applies. The DSI host disable operation or stopping the CRTC
+ * should stop transmitting video, and the data lanes should return to the LP-11
+ * state. The DSI host &post_disable operation should disable the PHY.
+ * If the &pre_enable_prev_first flag is set, then the DSI peripheral's
+ * bridge &post_disable will be called before the DSI host's post_disable.
+ *
+ * Whilst it is valid to call &host_transfer prior to pre_enable or after
+ * post_disable, the exact state of the lanes is undefined at this point. The
+ * DSI host should initialise the interface, transmit the data, and then disable
+ * the interface again.
+ *
+ * Ultra Low Power State (ULPS) is not explicitly supported by DRM. If
+ * implemented, it therefore needs to be handled entirely within the DSI Host
+ * driver.
+ */
+
static DEFINE_MUTEX(bridge_lock);
static LIST_HEAD(bridge_list);
@@ -510,61 +549,6 @@ drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_chain_mode_valid);
/**
- * drm_bridge_chain_disable - disables all bridges in the encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.disable op for all the bridges in the encoder
- * chain, starting from the last bridge to the first. These are called before
- * calling the encoder's prepare op.
- *
- * Note: the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_disable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
- struct drm_bridge *iter;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->disable)
- iter->funcs->disable(iter);
-
- if (iter == bridge)
- break;
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_disable);
-
-/**
- * drm_bridge_chain_post_disable - cleans up after disabling all bridges in the
- * encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.post_disable op for all the bridges in the
- * encoder chain, starting from the first bridge to the last. These are called
- * after completing the encoder's prepare op.
- *
- * Note: the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_post_disable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->post_disable)
- bridge->funcs->post_disable(bridge);
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_post_disable);
-
-/**
* drm_bridge_chain_mode_set - set proposed mode for all bridges in the
* encoder chain
* @bridge: bridge control structure
@@ -594,61 +578,6 @@ void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
EXPORT_SYMBOL(drm_bridge_chain_mode_set);
/**
- * drm_bridge_chain_pre_enable - prepares for enabling all bridges in the
- * encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.pre_enable op for all the bridges in the encoder
- * chain, starting from the last bridge to the first. These are called
- * before calling the encoder's commit op.
- *
- * Note: the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
- struct drm_bridge *iter;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->pre_enable)
- iter->funcs->pre_enable(iter);
-
- if (iter == bridge)
- break;
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
-
-/**
- * drm_bridge_chain_enable - enables all bridges in the encoder chain
- * @bridge: bridge control structure
- *
- * Calls &drm_bridge_funcs.enable op for all the bridges in the encoder
- * chain, starting from the first bridge to the last. These are called
- * after completing the encoder's commit op.
- *
- * Note that the bridge passed should be the one closest to the encoder
- */
-void drm_bridge_chain_enable(struct drm_bridge *bridge)
-{
- struct drm_encoder *encoder;
-
- if (!bridge)
- return;
-
- encoder = bridge->encoder;
- list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->enable)
- bridge->funcs->enable(bridge);
- }
-}
-EXPORT_SYMBOL(drm_bridge_chain_enable);
-
-/**
* drm_atomic_bridge_chain_disable - disables all bridges in the encoder chain
* @bridge: bridge control structure
* @old_state: old atomic state
@@ -691,6 +620,25 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
+static void drm_atomic_bridge_call_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
+{
+ if (old_state && bridge->funcs->atomic_post_disable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_post_disable(bridge,
+ old_bridge_state);
+ } else if (bridge->funcs->post_disable) {
+ bridge->funcs->post_disable(bridge);
+ }
+}
+
/**
* drm_atomic_bridge_chain_post_disable - cleans up after disabling all bridges
* in the encoder chain
@@ -702,36 +650,86 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_disable);
* starting from the first bridge to the last. These are called after completing
* &drm_encoder_helper_funcs.atomic_disable
*
+ * If a bridge sets @pre_enable_prev_first, then the @post_disable for that
+ * bridge will be called before the previous one to reverse the @pre_enable
+ * calling direction.
+ *
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
struct drm_encoder *encoder;
+ struct drm_bridge *next, *limit;
if (!bridge)
return;
encoder = bridge->encoder;
+
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->atomic_post_disable) {
- struct drm_bridge_state *old_bridge_state;
+ limit = NULL;
+
+ if (!list_is_last(&bridge->chain_node, &encoder->bridge_chain)) {
+ next = list_next_entry(bridge, chain_node);
+
+ if (next->pre_enable_prev_first) {
+ /* next bridge had requested that prev
+ * was enabled first, so disabled last
+ */
+ limit = next;
+
+ /* Find the next bridge that has NOT requested
+ * prev to be enabled first / disabled last
+ */
+ list_for_each_entry_from(next, &encoder->bridge_chain,
+ chain_node) {
+ if (next->pre_enable_prev_first) {
+ next = list_prev_entry(next, chain_node);
+ limit = next;
+ break;
+ }
+ }
+
+ /* Call these bridges in reverse order */
+ list_for_each_entry_from_reverse(next, &encoder->bridge_chain,
+ chain_node) {
+ if (next == bridge)
+ break;
+
+ drm_atomic_bridge_call_post_disable(next,
+ old_state);
+ }
+ }
+ }
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- bridge);
- if (WARN_ON(!old_bridge_state))
- return;
+ drm_atomic_bridge_call_post_disable(bridge, old_state);
- bridge->funcs->atomic_post_disable(bridge,
- old_bridge_state);
- } else if (bridge->funcs->post_disable) {
- bridge->funcs->post_disable(bridge);
- }
+ if (limit)
+ /* Jump all bridges that we have already post_disabled */
+ bridge = limit;
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
+static void drm_atomic_bridge_call_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *old_state)
+{
+ if (old_state && bridge->funcs->atomic_pre_enable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_pre_enable(bridge, old_bridge_state);
+ } else if (bridge->funcs->pre_enable) {
+ bridge->funcs->pre_enable(bridge);
+ }
+}
+
/**
* drm_atomic_bridge_chain_pre_enable - prepares for enabling all bridges in
* the encoder chain
@@ -743,32 +741,60 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
* starting from the last bridge to the first. These are called before calling
* &drm_encoder_helper_funcs.atomic_enable
*
+ * If a bridge sets @pre_enable_prev_first, then the pre_enable for the
+ * prev bridge will be called before pre_enable of this bridge.
+ *
* Note: the bridge passed should be the one closest to the encoder
*/
void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *old_state)
{
struct drm_encoder *encoder;
- struct drm_bridge *iter;
+ struct drm_bridge *iter, *next, *limit;
if (!bridge)
return;
encoder = bridge->encoder;
+
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->atomic_pre_enable) {
- struct drm_bridge_state *old_bridge_state;
+ if (iter->pre_enable_prev_first) {
+ next = iter;
+ limit = bridge;
+ list_for_each_entry_from_reverse(next,
+ &encoder->bridge_chain,
+ chain_node) {
+ if (next == bridge)
+ break;
+
+ if (!next->pre_enable_prev_first) {
+ /* Found first bridge that does NOT
+ * request prev to be enabled first
+ */
+ limit = list_prev_entry(next, chain_node);
+ break;
+ }
+ }
+
+ list_for_each_entry_from(next, &encoder->bridge_chain, chain_node) {
+ /* Call requested prev bridge pre_enable
+ * in order.
+ */
+ if (next == iter)
+ /* At the first bridge to request prev
+ * bridges called first.
+ */
+ break;
+
+ drm_atomic_bridge_call_pre_enable(next, old_state);
+ }
+ }
- old_bridge_state =
- drm_atomic_get_old_bridge_state(old_state,
- iter);
- if (WARN_ON(!old_bridge_state))
- return;
+ drm_atomic_bridge_call_pre_enable(iter, old_state);
- iter->funcs->atomic_pre_enable(iter, old_bridge_state);
- } else if (iter->funcs->pre_enable) {
- iter->funcs->pre_enable(iter);
- }
+ if (iter->pre_enable_prev_first)
+ /* Jump all bridges that we have already pre_enabled */
+ iter = limit;
if (iter == bridge)
break;
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
index 1c7d936523df..19ae4a177ac3 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -128,14 +128,7 @@ static void drm_bridge_connector_hpd_cb(void *cb_data,
drm_kms_helper_hotplug_event(dev);
}
-/**
- * drm_bridge_connector_enable_hpd - Enable hot-plug detection for the connector
- * @connector: The DRM bridge connector
- *
- * This function enables hot-plug detection for the given bridge connector.
- * This is typically used by display drivers in their resume handler.
- */
-void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
+static void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
@@ -145,17 +138,8 @@ void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
drm_bridge_hpd_enable(hpd, drm_bridge_connector_hpd_cb,
bridge_connector);
}
-EXPORT_SYMBOL_GPL(drm_bridge_connector_enable_hpd);
-/**
- * drm_bridge_connector_disable_hpd - Disable hot-plug detection for the
- * connector
- * @connector: The DRM bridge connector
- *
- * This function disables hot-plug detection for the given bridge connector.
- * This is typically used by display drivers in their suspend handler.
- */
-void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
+static void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
{
struct drm_bridge_connector *bridge_connector =
to_drm_bridge_connector(connector);
@@ -164,7 +148,6 @@ void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
if (hpd)
drm_bridge_hpd_disable(hpd);
}
-EXPORT_SYMBOL_GPL(drm_bridge_connector_disable_hpd);
/* -----------------------------------------------------------------------------
* Bridge Connector Functions
@@ -305,6 +288,8 @@ static int drm_bridge_connector_get_modes(struct drm_connector *connector)
static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
.get_modes = drm_bridge_connector_get_modes,
/* No need for .mode_valid(), the bridges are checked by the core. */
+ .enable_hpd = drm_bridge_connector_enable_hpd,
+ .disable_hpd = drm_bridge_connector_disable_hpd,
};
/* -----------------------------------------------------------------------------
@@ -387,10 +372,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
connector_type, ddc);
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
- if (bridge_connector->bridge_hpd) {
+ if (bridge_connector->bridge_hpd)
connector->polled = DRM_CONNECTOR_POLL_HPD;
- drm_bridge_connector_enable_hpd(connector);
- }
else if (bridge_connector->bridge_detect)
connector->polled = DRM_CONNECTOR_POLL_CONNECT
| DRM_CONNECTOR_POLL_DISCONNECT;
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index fcca21e8efac..86700560fea2 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -423,8 +423,7 @@ int drm_legacy_addmap_ioctl(struct drm_device *dev, void *data,
if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM))
return -EPERM;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
err = drm_addmap_core(dev, map->offset, map->size, map->type,
@@ -469,8 +468,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
int idx;
int i;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
idx = map->offset;
@@ -570,8 +568,7 @@ EXPORT_SYMBOL(drm_legacy_rmmap_locked);
void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map)
{
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
@@ -628,8 +625,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
struct drm_map_list *r_list;
int ret;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fd67efe37c63..f6292ba0e6fc 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -198,13 +198,23 @@ void drm_client_dev_hotplug(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ if (!dev->mode_config.num_connector) {
+ drm_dbg_kms(dev, "No connectors found, will not send hotplug events!\n");
+ return;
+ }
+
mutex_lock(&dev->clientlist_mutex);
list_for_each_entry(client, &dev->clientlist, list) {
if (!client->funcs || !client->funcs->hotplug)
continue;
+ if (client->hotplug_failed)
+ continue;
+
ret = client->funcs->hotplug(client);
drm_dbg_kms(dev, "%s: ret=%d\n", client->name, ret);
+ if (ret)
+ client->hotplug_failed = true;
}
mutex_unlock(&dev->clientlist_mutex);
}
@@ -233,21 +243,17 @@ void drm_client_dev_restore(struct drm_device *dev)
static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
{
- struct drm_device *dev = buffer->client->dev;
-
if (buffer->gem) {
drm_gem_vunmap_unlocked(buffer->gem, &buffer->map);
drm_gem_object_put(buffer->gem);
}
- if (buffer->handle)
- drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
-
kfree(buffer);
}
static struct drm_client_buffer *
-drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
+drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height,
+ u32 format, u32 *handle)
{
const struct drm_format_info *info = drm_format_info(format);
struct drm_mode_create_dumb dumb_args = { };
@@ -269,16 +275,15 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
if (ret)
goto err_delete;
- buffer->handle = dumb_args.handle;
- buffer->pitch = dumb_args.pitch;
-
obj = drm_gem_object_lookup(client->file, dumb_args.handle);
if (!obj) {
ret = -ENOENT;
goto err_delete;
}
+ buffer->pitch = dumb_args.pitch;
buffer->gem = obj;
+ *handle = dumb_args.handle;
return buffer;
@@ -365,7 +370,8 @@ static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
}
static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
- u32 width, u32 height, u32 format)
+ u32 width, u32 height, u32 format,
+ u32 handle)
{
struct drm_client_dev *client = buffer->client;
struct drm_mode_fb_cmd fb_req = { };
@@ -377,7 +383,7 @@ static int drm_client_buffer_addfb(struct drm_client_buffer *buffer,
fb_req.depth = info->depth;
fb_req.width = width;
fb_req.height = height;
- fb_req.handle = buffer->handle;
+ fb_req.handle = handle;
fb_req.pitch = buffer->pitch;
ret = drm_mode_addfb(client->dev, &fb_req, client->file);
@@ -414,13 +420,24 @@ struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format)
{
struct drm_client_buffer *buffer;
+ u32 handle;
int ret;
- buffer = drm_client_buffer_create(client, width, height, format);
+ buffer = drm_client_buffer_create(client, width, height, format,
+ &handle);
if (IS_ERR(buffer))
return buffer;
- ret = drm_client_buffer_addfb(buffer, width, height, format);
+ ret = drm_client_buffer_addfb(buffer, width, height, format, handle);
+
+ /*
+ * The handle is only needed for creating the framebuffer, destroy it
+ * again to solve a circular dependency should anybody export the GEM
+ * object as DMA-buf. The framebuffer and our buffer structure are still
+ * holding references to the GEM object to prevent its destruction.
+ */
+ drm_mode_destroy_dumb(client->dev, handle, client->file);
+
if (ret) {
drm_client_buffer_delete(buffer);
return ERR_PTR(ret);
@@ -480,8 +497,8 @@ EXPORT_SYMBOL(drm_client_framebuffer_flush);
#ifdef CONFIG_DEBUG_FS
static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
struct drm_client_dev *client;
@@ -493,14 +510,13 @@ static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_client_debugfs_list[] = {
+static const struct drm_debugfs_info drm_client_debugfs_list[] = {
{ "internal_clients", drm_client_debugfs_internal_clients, 0 },
};
void drm_client_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_client_debugfs_list,
- ARRAY_SIZE(drm_client_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_client_debugfs_list,
+ ARRAY_SIZE(drm_client_debugfs_list));
}
#endif
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index d553e793e673..1b12a3c201a3 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -188,10 +188,6 @@ static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_conne
prefer_non_interlace = !cmdline_mode->interlace;
again:
list_for_each_entry(mode, &connector->modes, head) {
- /* Check (optional) mode name first */
- if (!strcmp(mode->name, cmdline_mode->name))
- return mode;
-
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
mode->vdisplay != cmdline_mode->yres)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 547356e00341..9d0250c28e9b 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -565,6 +565,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
ida_free(&dev->mode_config.connector_ida, connector->index);
kfree(connector->display_info.bus_formats);
+ kfree(connector->display_info.vics);
drm_mode_object_unregister(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
@@ -984,6 +985,41 @@ static const struct drm_prop_enum_list drm_dvi_i_subconnector_enum_list[] = {
DRM_ENUM_NAME_FN(drm_get_dvi_i_subconnector_name,
drm_dvi_i_subconnector_enum_list)
+static const struct drm_prop_enum_list drm_tv_mode_enum_list[] = {
+ { DRM_MODE_TV_MODE_NTSC, "NTSC" },
+ { DRM_MODE_TV_MODE_NTSC_443, "NTSC-443" },
+ { DRM_MODE_TV_MODE_NTSC_J, "NTSC-J" },
+ { DRM_MODE_TV_MODE_PAL, "PAL" },
+ { DRM_MODE_TV_MODE_PAL_M, "PAL-M" },
+ { DRM_MODE_TV_MODE_PAL_N, "PAL-N" },
+ { DRM_MODE_TV_MODE_SECAM, "SECAM" },
+};
+DRM_ENUM_NAME_FN(drm_get_tv_mode_name, drm_tv_mode_enum_list)
+
+/**
+ * drm_get_tv_mode_from_name - Translates a TV mode name into its enum value
+ * @name: TV Mode name we want to convert
+ * @len: Length of @name
+ *
+ * Translates @name into an enum drm_connector_tv_mode.
+ *
+ * Returns: the enum value on success, a negative errno otherwise.
+ */
+int drm_get_tv_mode_from_name(const char *name, size_t len)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_tv_mode_enum_list); i++) {
+ const struct drm_prop_enum_list *item = &drm_tv_mode_enum_list[i];
+
+ if (strlen(item->name) == len && !strncmp(item->name, name, len))
+ return item->type;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(drm_get_tv_mode_from_name);
+
static const struct drm_prop_enum_list drm_tv_select_enum_list[] = {
{ DRM_MODE_SUBCONNECTOR_Automatic, "Automatic" }, /* DVI-I and TV-out */
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */
@@ -1552,6 +1588,71 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
* infoframe values is done through drm_hdmi_avi_infoframe_content_type().
*/
+/*
+ * TODO: Document the properties:
+ * - left margin
+ * - right margin
+ * - top margin
+ * - bottom margin
+ * - brightness
+ * - contrast
+ * - flicker reduction
+ * - hue
+ * - mode
+ * - overscan
+ * - saturation
+ * - select subconnector
+ * - subconnector
+ */
+/**
+ * DOC: Analog TV Connector Properties
+ *
+ * TV Mode:
+ * Indicates the TV Mode used on an analog TV connector. The value
+ * of this property can be one of the following:
+ *
+ * NTSC:
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the NTSC Color Encoding.
+ *
+ * NTSC-443:
+ *
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the NTSC Color Encoding, but with a color subcarrier
+ * frequency of 4.43MHz
+ *
+ * NTSC-J:
+ *
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the NTSC Color Encoding, but with a black level equal to
+ * the blanking level.
+ *
+ * PAL:
+ *
+ * TV Mode is CCIR System B (aka 625-lines) together with
+ * the PAL Color Encoding.
+ *
+ * PAL-M:
+ *
+ * TV Mode is CCIR System M (aka 525-lines) together with
+ * the PAL Color Encoding.
+ *
+ * PAL-N:
+ *
+ * TV Mode is CCIR System N together with the PAL Color
+ * Encoding, a color subcarrier frequency of 3.58MHz, the
+ * SECAM color space, and narrower channels than other PAL
+ * variants.
+ *
+ * SECAM:
+ *
+ * TV Mode is CCIR System B (aka 625-lines) together with
+ * the SECAM Color Encoding.
+ *
+ * Drivers can set up this property by calling
+ * drm_mode_create_tv_properties().
+ */
+
/**
* drm_connector_attach_content_type_property - attach content-type property
* @connector: connector to attach content type property on.
@@ -1604,7 +1705,7 @@ EXPORT_SYMBOL(drm_connector_attach_tv_margin_properties);
* Called by a driver's HDMI connector initialization routine, this function
* creates the TV margin properties for a given device. No need to call this
* function for an SDTV connector, it's already called from
- * drm_mode_create_tv_properties().
+ * drm_mode_create_tv_properties_legacy().
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -1639,7 +1740,7 @@ int drm_mode_create_tv_margin_properties(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_tv_margin_properties);
/**
- * drm_mode_create_tv_properties - create TV specific connector properties
+ * drm_mode_create_tv_properties_legacy - create TV specific connector properties
* @dev: DRM device
* @num_modes: number of different TV formats (modes) supported
* @modes: array of pointers to strings containing name of each format
@@ -1649,12 +1750,16 @@ EXPORT_SYMBOL(drm_mode_create_tv_margin_properties);
* responsible for allocating a list of format names and passing them to
* this routine.
*
+ * NOTE: This functions registers the deprecated "mode" connector
+ * property to select the analog TV mode (ie, NTSC, PAL, etc.). New
+ * drivers must use drm_mode_create_tv_properties() instead.
+ *
* Returns:
* 0 on success or a negative error code on failure.
*/
-int drm_mode_create_tv_properties(struct drm_device *dev,
- unsigned int num_modes,
- const char * const modes[])
+int drm_mode_create_tv_properties_legacy(struct drm_device *dev,
+ unsigned int num_modes,
+ const char * const modes[])
{
struct drm_property *tv_selector;
struct drm_property *tv_subconnector;
@@ -1690,15 +1795,17 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
if (drm_mode_create_tv_margin_properties(dev))
goto nomem;
- dev->mode_config.tv_mode_property =
- drm_property_create(dev, DRM_MODE_PROP_ENUM,
- "mode", num_modes);
- if (!dev->mode_config.tv_mode_property)
- goto nomem;
+ if (num_modes) {
+ dev->mode_config.legacy_tv_mode_property =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", num_modes);
+ if (!dev->mode_config.legacy_tv_mode_property)
+ goto nomem;
- for (i = 0; i < num_modes; i++)
- drm_property_add_enum(dev->mode_config.tv_mode_property,
- i, modes[i]);
+ for (i = 0; i < num_modes; i++)
+ drm_property_add_enum(dev->mode_config.legacy_tv_mode_property,
+ i, modes[i]);
+ }
dev->mode_config.tv_brightness_property =
drm_property_create_range(dev, 0, "brightness", 0, 100);
@@ -1734,6 +1841,47 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
nomem:
return -ENOMEM;
}
+EXPORT_SYMBOL(drm_mode_create_tv_properties_legacy);
+
+/**
+ * drm_mode_create_tv_properties - create TV specific connector properties
+ * @dev: DRM device
+ * @supported_tv_modes: Bitmask of TV modes supported (See DRM_MODE_TV_MODE_*)
+ *
+ * Called by a driver's TV initialization routine, this function creates
+ * the TV specific connector properties for a given device.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_mode_create_tv_properties(struct drm_device *dev,
+ unsigned int supported_tv_modes)
+{
+ struct drm_prop_enum_list tv_mode_list[DRM_MODE_TV_MODE_MAX];
+ struct drm_property *tv_mode;
+ unsigned int i, len = 0;
+
+ if (dev->mode_config.tv_mode_property)
+ return 0;
+
+ for (i = 0; i < DRM_MODE_TV_MODE_MAX; i++) {
+ if (!(supported_tv_modes & BIT(i)))
+ continue;
+
+ tv_mode_list[len].type = i;
+ tv_mode_list[len].name = drm_get_tv_mode_name(i);
+ len++;
+ }
+
+ tv_mode = drm_property_create_enum(dev, 0, "TV mode",
+ tv_mode_list, len);
+ if (!tv_mode)
+ return -ENOMEM;
+
+ dev->mode_config.tv_mode_property = tv_mode;
+
+ return drm_mode_create_tv_properties_legacy(dev, 0, NULL);
+}
EXPORT_SYMBOL(drm_mode_create_tv_properties);
/**
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index c6e6a3e7219a..a0fc779e5e1e 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -59,8 +59,7 @@ struct drm_ctx_list {
*/
void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
@@ -97,8 +96,7 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
*/
void drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
idr_init(&dev->ctx_idr);
@@ -114,8 +112,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
*/
void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->struct_mutex);
@@ -136,8 +133,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
{
struct drm_ctx_list *pos, *tmp;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return;
mutex_lock(&dev->ctxlist_mutex);
@@ -182,8 +178,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map;
struct drm_map_list *_entry;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
@@ -230,8 +225,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
mutex_lock(&dev->struct_mutex);
@@ -335,8 +329,7 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
struct drm_ctx ctx;
int i;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
if (res->count >= DRM_RESERVED_CONTEXTS) {
@@ -370,8 +363,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
struct drm_ctx *ctx = data;
int tmp_handle;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
tmp_handle = drm_legacy_ctxbitmap_next(dev);
@@ -419,8 +411,7 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
/* This is 0, because we don't handle any context flags */
@@ -445,8 +436,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
@@ -469,8 +459,7 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
@@ -495,8 +484,7 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
- if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
- !drm_core_check_feature(dev, DRIVER_LEGACY))
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP;
DRM_DEBUG("%d\n", ctx->handle);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index ee445f4605ba..4f643a490dc3 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -38,6 +38,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -50,9 +51,8 @@
static int drm_name_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_minor *minor = node->minor;
- struct drm_device *dev = minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_master *master;
mutex_lock(&dev->master_mutex);
@@ -72,8 +72,8 @@ static int drm_name_info(struct seq_file *m, void *data)
static int drm_clients_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_file *priv;
kuid_t uid;
@@ -124,8 +124,8 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
static int drm_gem_name_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
seq_printf(m, " name size handles refcount\n");
@@ -136,7 +136,7 @@ static int drm_gem_name_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_debugfs_list[] = {
+static const struct drm_debugfs_info drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"clients", drm_clients_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
@@ -151,6 +151,21 @@ static int drm_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, node->info_ent->show, node);
}
+static int drm_debugfs_entry_open(struct inode *inode, struct file *file)
+{
+ struct drm_debugfs_entry *entry = inode->i_private;
+ struct drm_debugfs_info *node = &entry->file;
+
+ return single_open(file, node->show, entry);
+}
+
+static const struct file_operations drm_debugfs_entry_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_debugfs_entry_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
static const struct file_operations drm_debugfs_fops = {
.owner = THIS_MODULE,
@@ -192,7 +207,7 @@ void drm_debugfs_create_files(const struct drm_info_list *files, int count,
tmp->minor = minor;
tmp->dent = debugfs_create_file(files[i].name,
- S_IFREG | S_IRUGO, root, tmp,
+ 0444, root, tmp,
&drm_debugfs_fops);
tmp->info_ent = &files[i];
@@ -207,6 +222,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
{
struct drm_device *dev = minor->dev;
+ struct drm_debugfs_entry *entry, *tmp;
char name[64];
INIT_LIST_HEAD(&minor->debugfs_list);
@@ -214,8 +230,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
- drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_debugfs_list, DRM_DEBUGFS_ENTRIES);
if (drm_drv_uses_atomic_modeset(dev)) {
drm_atomic_debugfs_init(minor);
@@ -230,9 +245,29 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
if (dev->driver->debugfs_init)
dev->driver->debugfs_init(minor);
+ list_for_each_entry_safe(entry, tmp, &dev->debugfs_list, list) {
+ debugfs_create_file(entry->file.name, 0444,
+ minor->debugfs_root, entry, &drm_debugfs_entry_fops);
+ list_del(&entry->list);
+ }
+
return 0;
}
+void drm_debugfs_late_register(struct drm_device *dev)
+{
+ struct drm_minor *minor = dev->primary;
+ struct drm_debugfs_entry *entry, *tmp;
+
+ if (!minor)
+ return;
+
+ list_for_each_entry_safe(entry, tmp, &dev->debugfs_list, list) {
+ debugfs_create_file(entry->file.name, 0444,
+ minor->debugfs_root, entry, &drm_debugfs_entry_fops);
+ list_del(&entry->list);
+ }
+}
int drm_debugfs_remove_files(const struct drm_info_list *files, int count,
struct drm_minor *minor)
@@ -281,6 +316,53 @@ void drm_debugfs_cleanup(struct drm_minor *minor)
minor->debugfs_root = NULL;
}
+/**
+ * drm_debugfs_add_file - Add a given file to the DRM device debugfs file list
+ * @dev: drm device for the ioctl
+ * @name: debugfs file name
+ * @show: show callback
+ * @data: driver-private data, should not be device-specific
+ *
+ * Add a given file entry to the DRM device debugfs file list to be created on
+ * drm_debugfs_init.
+ */
+void drm_debugfs_add_file(struct drm_device *dev, const char *name,
+ int (*show)(struct seq_file*, void*), void *data)
+{
+ struct drm_debugfs_entry *entry = drmm_kzalloc(dev, sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return;
+
+ entry->file.name = name;
+ entry->file.show = show;
+ entry->file.data = data;
+ entry->dev = dev;
+
+ mutex_lock(&dev->debugfs_mutex);
+ list_add(&entry->list, &dev->debugfs_list);
+ mutex_unlock(&dev->debugfs_mutex);
+}
+EXPORT_SYMBOL(drm_debugfs_add_file);
+
+/**
+ * drm_debugfs_add_files - Add an array of files to the DRM device debugfs file list
+ * @dev: drm device for the ioctl
+ * @files: The array of files to create
+ * @count: The number of files given
+ *
+ * Add a given set of debugfs files represented by an array of
+ * &struct drm_debugfs_info in the DRM device debugfs file list.
+ */
+void drm_debugfs_add_files(struct drm_device *dev, const struct drm_debugfs_info *files, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ drm_debugfs_add_file(dev, files[i].name, files[i].show, files[i].data);
+}
+EXPORT_SYMBOL(drm_debugfs_add_files);
+
static int connector_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
@@ -426,15 +508,15 @@ void drm_debugfs_connector_add(struct drm_connector *connector)
connector->debugfs_entry = root;
/* force */
- debugfs_create_file("force", S_IRUGO | S_IWUSR, root, connector,
+ debugfs_create_file("force", 0644, root, connector,
&drm_connector_fops);
/* edid */
- debugfs_create_file("edid_override", S_IRUGO | S_IWUSR, root, connector,
+ debugfs_create_file("edid_override", 0644, root, connector,
&drm_edid_fops);
/* vrr range */
- debugfs_create_file("vrr_range", S_IRUGO, root, connector,
+ debugfs_create_file("vrr_range", 0444, root, connector,
&vrr_range_fops);
/* max bpc */
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 73b845a75d52..c6eb8972451a 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -598,6 +598,7 @@ static void drm_dev_init_release(struct drm_device *dev, void *res)
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
+ mutex_destroy(&dev->debugfs_mutex);
drm_legacy_destroy_members(dev);
}
@@ -638,12 +639,14 @@ static int drm_dev_init(struct drm_device *dev,
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
INIT_LIST_HEAD(&dev->vblank_event_list);
+ INIT_LIST_HEAD(&dev->debugfs_list);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
+ mutex_init(&dev->debugfs_mutex);
ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
if (ret)
@@ -929,8 +932,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
dev->registered = true;
- if (dev->driver->load) {
- ret = dev->driver->load(dev, flags);
+ if (driver->load) {
+ ret = driver->load(dev, flags);
if (ret)
goto err_minors;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3841aba17abd..3d0a4da661bc 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -96,7 +96,6 @@ struct detailed_mode_closure {
struct drm_connector *connector;
const struct drm_edid *drm_edid;
bool preferred;
- u32 quirks;
int modes;
};
@@ -2887,9 +2886,9 @@ static u32 edid_get_quirks(const struct drm_edid *drm_edid)
* Walk the mode list for connector, clearing the preferred status on existing
* modes and setting it anew for the right mode ala quirks.
*/
-static void edid_fixup_preferred(struct drm_connector *connector,
- u32 quirks)
+static void edid_fixup_preferred(struct drm_connector *connector)
{
+ const struct drm_display_info *info = &connector->display_info;
struct drm_display_mode *t, *cur_mode, *preferred_mode;
int target_refresh = 0;
int cur_vrefresh, preferred_vrefresh;
@@ -2897,9 +2896,9 @@ static void edid_fixup_preferred(struct drm_connector *connector,
if (list_empty(&connector->probed_modes))
return;
- if (quirks & EDID_QUIRK_PREFER_LARGE_60)
+ if (info->quirks & EDID_QUIRK_PREFER_LARGE_60)
target_refresh = 60;
- if (quirks & EDID_QUIRK_PREFER_LARGE_75)
+ if (info->quirks & EDID_QUIRK_PREFER_LARGE_75)
target_refresh = 75;
preferred_mode = list_first_entry(&connector->probed_modes,
@@ -3401,9 +3400,9 @@ drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
*/
static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connector,
const struct drm_edid *drm_edid,
- const struct detailed_timing *timing,
- u32 quirks)
+ const struct detailed_timing *timing)
{
+ const struct drm_display_info *info = &connector->display_info;
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
const struct detailed_pixel_timing *pt = &timing->data.pixel_data;
@@ -3437,7 +3436,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
return NULL;
}
- if (quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
+ if (info->quirks & EDID_QUIRK_FORCE_REDUCED_BLANKING) {
mode = drm_cvt_mode(dev, hactive, vactive, 60, true, false, false);
if (!mode)
return NULL;
@@ -3449,7 +3448,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (!mode)
return NULL;
- if (quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
+ if (info->quirks & EDID_QUIRK_135_CLOCK_TOO_HIGH)
mode->clock = 1088 * 10;
else
mode->clock = le16_to_cpu(timing->pixel_clock) * 10;
@@ -3472,7 +3471,7 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
drm_mode_do_interlace_quirk(mode, pt);
- if (quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
@@ -3485,12 +3484,12 @@ set_size:
mode->width_mm = pt->width_mm_lo | (pt->width_height_mm_hi & 0xf0) << 4;
mode->height_mm = pt->height_mm_lo | (pt->width_height_mm_hi & 0xf) << 8;
- if (quirks & EDID_QUIRK_DETAILED_IN_CM) {
+ if (info->quirks & EDID_QUIRK_DETAILED_IN_CM) {
mode->width_mm *= 10;
mode->height_mm *= 10;
}
- if (quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
+ if (info->quirks & EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE) {
mode->width_mm = drm_edid->edid->width_cm * 10;
mode->height_mm = drm_edid->edid->height_cm * 10;
}
@@ -4003,8 +4002,7 @@ do_detailed_mode(const struct detailed_timing *timing, void *c)
return;
newmode = drm_mode_detailed(closure->connector,
- closure->drm_edid, timing,
- closure->quirks);
+ closure->drm_edid, timing);
if (!newmode)
return;
@@ -4027,15 +4025,13 @@ do_detailed_mode(const struct detailed_timing *timing, void *c)
* add_detailed_modes - Add modes from detailed timings
* @connector: attached connector
* @drm_edid: EDID block to scan
- * @quirks: quirks to apply
*/
static int add_detailed_modes(struct drm_connector *connector,
- const struct drm_edid *drm_edid, u32 quirks)
+ const struct drm_edid *drm_edid)
{
struct detailed_mode_closure closure = {
.connector = connector,
.drm_edid = drm_edid,
- .quirks = quirks,
};
if (drm_edid->edid->revision >= 4)
@@ -4468,28 +4464,20 @@ static u8 svd_to_vic(u8 svd)
return svd;
}
+/*
+ * Return a display mode for the 0-based vic_index'th VIC across all CTA VDBs in
+ * the EDID, or NULL on errors.
+ */
static struct drm_display_mode *
-drm_display_mode_from_vic_index(struct drm_connector *connector,
- const u8 *video_db, u8 video_len,
- u8 video_index)
+drm_display_mode_from_vic_index(struct drm_connector *connector, int vic_index)
{
+ const struct drm_display_info *info = &connector->display_info;
struct drm_device *dev = connector->dev;
- struct drm_display_mode *newmode;
- u8 vic;
-
- if (video_db == NULL || video_index >= video_len)
- return NULL;
- /* CEA modes are numbered 1..127 */
- vic = svd_to_vic(video_db[video_index]);
- if (!drm_valid_cea_vic(vic))
- return NULL;
-
- newmode = drm_mode_duplicate(dev, cea_mode_for_vic(vic));
- if (!newmode)
+ if (!info->vics || vic_index >= info->vics_len || !info->vics[vic_index])
return NULL;
- return newmode;
+ return drm_display_mode_from_cea_vic(dev, info->vics[vic_index]);
}
/*
@@ -4505,10 +4493,8 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
static int do_y420vdb_modes(struct drm_connector *connector,
const u8 *svds, u8 svds_len)
{
- int modes = 0, i;
struct drm_device *dev = connector->dev;
- struct drm_display_info *info = &connector->display_info;
- struct drm_hdmi_info *hdmi = &info->hdmi;
+ int modes = 0, i;
for (i = 0; i < svds_len; i++) {
u8 vic = svd_to_vic(svds[i]);
@@ -4520,35 +4506,13 @@ static int do_y420vdb_modes(struct drm_connector *connector,
newmode = drm_mode_duplicate(dev, cea_mode_for_vic(vic));
if (!newmode)
break;
- bitmap_set(hdmi->y420_vdb_modes, vic, 1);
drm_mode_probed_add(connector, newmode);
modes++;
}
- if (modes > 0)
- info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
return modes;
}
-/*
- * drm_add_cmdb_modes - Add a YCBCR 420 mode into bitmap
- * @connector: connector corresponding to the HDMI sink
- * @vic: CEA vic for the video mode to be added in the map
- *
- * Makes an entry for a videomode in the YCBCR 420 bitmap
- */
-static void
-drm_add_cmdb_modes(struct drm_connector *connector, u8 svd)
-{
- u8 vic = svd_to_vic(svd);
- struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
-
- if (!drm_valid_cea_vic(vic))
- return;
-
- bitmap_set(hdmi->y420_cmdb_modes, vic, 1);
-}
-
/**
* drm_display_mode_from_cea_vic() - return a mode for CEA VIC
* @dev: DRM device
@@ -4577,29 +4541,20 @@ drm_display_mode_from_cea_vic(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_display_mode_from_cea_vic);
-static int
-do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
+/* Add modes based on VICs parsed in parse_cta_vdb() */
+static int add_cta_vdb_modes(struct drm_connector *connector)
{
+ const struct drm_display_info *info = &connector->display_info;
int i, modes = 0;
- struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
- for (i = 0; i < len; i++) {
+ if (!info->vics)
+ return 0;
+
+ for (i = 0; i < info->vics_len; i++) {
struct drm_display_mode *mode;
- mode = drm_display_mode_from_vic_index(connector, db, len, i);
+ mode = drm_display_mode_from_vic_index(connector, i);
if (mode) {
- /*
- * YCBCR420 capability block contains a bitmap which
- * gives the index of CEA modes from CEA VDB, which
- * can support YCBCR 420 sampling output also (apart
- * from RGB/YCBCR444 etc).
- * For example, if the bit 0 in bitmap is set,
- * first mode in VDB can support YCBCR420 output too.
- * Add YCBCR420 modes only if sink is HDMI 2.0 capable.
- */
- if (i < 64 && hdmi->y420_cmdb_map & (1ULL << i))
- drm_add_cmdb_modes(connector, db[i]);
-
drm_mode_probed_add(connector, mode);
modes++;
}
@@ -4693,15 +4648,13 @@ static int add_hdmi_mode(struct drm_connector *connector, u8 vic)
}
static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
- const u8 *video_db, u8 video_len, u8 video_index)
+ int vic_index)
{
struct drm_display_mode *newmode;
int modes = 0;
if (structure & (1 << 0)) {
- newmode = drm_display_mode_from_vic_index(connector, video_db,
- video_len,
- video_index);
+ newmode = drm_display_mode_from_vic_index(connector, vic_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_FRAME_PACKING;
drm_mode_probed_add(connector, newmode);
@@ -4709,9 +4662,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
}
}
if (structure & (1 << 6)) {
- newmode = drm_display_mode_from_vic_index(connector, video_db,
- video_len,
- video_index);
+ newmode = drm_display_mode_from_vic_index(connector, vic_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_TOP_AND_BOTTOM;
drm_mode_probed_add(connector, newmode);
@@ -4719,9 +4670,7 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
}
}
if (structure & (1 << 8)) {
- newmode = drm_display_mode_from_vic_index(connector, video_db,
- video_len,
- video_index);
+ newmode = drm_display_mode_from_vic_index(connector, vic_index);
if (newmode) {
newmode->flags |= DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF;
drm_mode_probed_add(connector, newmode);
@@ -4732,6 +4681,26 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
return modes;
}
+static bool hdmi_vsdb_latency_present(const u8 *db)
+{
+ return db[8] & BIT(7);
+}
+
+static bool hdmi_vsdb_i_latency_present(const u8 *db)
+{
+ return hdmi_vsdb_latency_present(db) && db[8] & BIT(6);
+}
+
+static int hdmi_vsdb_latency_length(const u8 *db)
+{
+ if (hdmi_vsdb_i_latency_present(db))
+ return 4;
+ else if (hdmi_vsdb_latency_present(db))
+ return 2;
+ else
+ return 0;
+}
+
/*
* do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
* @connector: connector corresponding to the HDMI sink
@@ -4742,10 +4711,8 @@ static int add_3d_struct_modes(struct drm_connector *connector, u16 structure,
* also adds the stereo 3d modes when applicable.
*/
static int
-do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
- const u8 *video_db, u8 video_len)
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
- struct drm_display_info *info = &connector->display_info;
int modes = 0, offset = 0, i, multi_present = 0, multi_len;
u8 vic_len, hdmi_3d_len = 0;
u16 mask;
@@ -4758,13 +4725,7 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
if (!(db[8] & (1 << 5)))
goto out;
- /* Latency_Fields_Present */
- if (db[8] & (1 << 7))
- offset += 2;
-
- /* I_Latency_Fields_Present */
- if (db[8] & (1 << 6))
- offset += 2;
+ offset += hdmi_vsdb_latency_length(db);
/* the declared length is not long enough for the 2 first bytes
* of additional video format capabilities */
@@ -4818,9 +4779,7 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
for (i = 0; i < 16; i++) {
if (mask & (1 << i))
modes += add_3d_struct_modes(connector,
- structure_all,
- video_db,
- video_len, i);
+ structure_all, i);
}
}
@@ -4857,8 +4816,6 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
if (newflag != 0) {
newmode = drm_display_mode_from_vic_index(connector,
- video_db,
- video_len,
vic_index);
if (newmode) {
@@ -4873,8 +4830,6 @@ do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len,
}
out:
- if (modes > 0)
- info->has_hdmi_infoframe = true;
return modes;
}
@@ -5204,20 +5159,26 @@ static int edid_hfeeodb_extension_block_count(const struct edid *edid)
return cta[4 + 2];
}
-static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector,
- const u8 *db)
+/*
+ * CTA-861 YCbCr 4:2:0 Capability Map Data Block (CTA Y420CMDB)
+ *
+ * Y420CMDB contains a bitmap which gives the index of CTA modes from CTA VDB,
+ * which can support YCBCR 420 sampling output also (apart from RGB/YCBCR444
+ * etc). For example, if the bit 0 in bitmap is set, first mode in VDB can
+ * support YCBCR420 output too.
+ */
+static void parse_cta_y420cmdb(struct drm_connector *connector,
+ const struct cea_db *db, u64 *y420cmdb_map)
{
struct drm_display_info *info = &connector->display_info;
- struct drm_hdmi_info *hdmi = &info->hdmi;
- u8 map_len = cea_db_payload_len(db) - 1;
- u8 count;
+ int i, map_len = cea_db_payload_len(db) - 1;
+ const u8 *data = cea_db_data(db) + 1;
u64 map = 0;
if (map_len == 0) {
/* All CEA modes support ycbcr420 sampling also.*/
- hdmi->y420_cmdb_map = U64_MAX;
- info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
- return;
+ map = U64_MAX;
+ goto out;
}
/*
@@ -5235,13 +5196,14 @@ static void drm_parse_y420cmdb_bitmap(struct drm_connector *connector,
if (WARN_ON_ONCE(map_len > 8))
map_len = 8;
- for (count = 0; count < map_len; count++)
- map |= (u64)db[2 + count] << (8 * count);
+ for (i = 0; i < map_len; i++)
+ map |= (u64)data[i] << (8 * i);
+out:
if (map)
info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
- hdmi->y420_cmdb_map = map;
+ *y420cmdb_map = map;
}
static int add_cea_modes(struct drm_connector *connector,
@@ -5249,21 +5211,16 @@ static int add_cea_modes(struct drm_connector *connector,
{
const struct cea_db *db;
struct cea_db_iter iter;
- int modes = 0;
+ int modes;
+
+ /* CTA VDB block VICs parsed earlier */
+ modes = add_cta_vdb_modes(connector);
cea_db_iter_edid_begin(drm_edid, &iter);
cea_db_iter_for_each(db, &iter) {
- const u8 *hdmi = NULL, *video = NULL;
- u8 hdmi_len = 0, video_len = 0;
-
- if (cea_db_tag(db) == CTA_DB_VIDEO) {
- video = cea_db_data(db);
- video_len = cea_db_payload_len(db);
- modes += do_cea_modes(connector, video, video_len);
- } else if (cea_db_is_hdmi_vsdb(db)) {
- /* FIXME: Switch to use cea_db_data() */
- hdmi = (const u8 *)db;
- hdmi_len = cea_db_payload_len(db);
+ if (cea_db_is_hdmi_vsdb(db)) {
+ modes += do_hdmi_vsdb_modes(connector, (const u8 *)db,
+ cea_db_payload_len(db));
} else if (cea_db_is_y420vdb(db)) {
const u8 *vdb420 = cea_db_data(db) + 1;
@@ -5271,15 +5228,6 @@ static int add_cea_modes(struct drm_connector *connector,
modes += do_y420vdb_modes(connector, vdb420,
cea_db_payload_len(db) - 1);
}
-
- /*
- * We parse the HDMI VSDB after having added the cea modes as we
- * will be patching their flags when the sink supports stereo
- * 3D.
- */
- if (hdmi)
- modes += do_hdmi_vsdb_modes(connector, hdmi, hdmi_len,
- video, video_len);
}
cea_db_iter_end(&iter);
@@ -5416,6 +5364,7 @@ drm_parse_hdr_metadata_block(struct drm_connector *connector, const u8 *db)
}
}
+/* HDMI Vendor-Specific Data Block (HDMI VSDB, H14b-VSDB) */
static void
drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
{
@@ -5423,18 +5372,18 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
if (len >= 6 && (db[6] & (1 << 7)))
connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_SUPPORTS_AI;
- if (len >= 8) {
- connector->latency_present[0] = db[8] >> 7;
- connector->latency_present[1] = (db[8] >> 6) & 1;
- }
- if (len >= 9)
+
+ if (len >= 10 && hdmi_vsdb_latency_present(db)) {
+ connector->latency_present[0] = true;
connector->video_latency[0] = db[9];
- if (len >= 10)
connector->audio_latency[0] = db[10];
- if (len >= 11)
+ }
+
+ if (len >= 12 && hdmi_vsdb_i_latency_present(db)) {
+ connector->latency_present[1] = true;
connector->video_latency[1] = db[11];
- if (len >= 12)
connector->audio_latency[1] = db[12];
+ }
drm_dbg_kms(connector->dev,
"[CONNECTOR:%d:%s] HDMI: latency present %d %d, video latency %d %d, audio latency %d %d\n",
@@ -5533,8 +5482,6 @@ static void drm_edid_to_eld(struct drm_connector *connector,
int total_sad_count = 0;
int mnl;
- clear_eld(connector);
-
if (!drm_edid)
return;
@@ -5864,6 +5811,92 @@ drm_default_rgb_quant_range(const struct drm_display_mode *mode)
}
EXPORT_SYMBOL(drm_default_rgb_quant_range);
+/* CTA-861 Video Data Block (CTA VDB) */
+static void parse_cta_vdb(struct drm_connector *connector, const struct cea_db *db)
+{
+ struct drm_display_info *info = &connector->display_info;
+ int i, vic_index, len = cea_db_payload_len(db);
+ const u8 *svds = cea_db_data(db);
+ u8 *vics;
+
+ if (!len)
+ return;
+
+ /* Gracefully handle multiple VDBs, however unlikely that is */
+ vics = krealloc(info->vics, info->vics_len + len, GFP_KERNEL);
+ if (!vics)
+ return;
+
+ vic_index = info->vics_len;
+ info->vics_len += len;
+ info->vics = vics;
+
+ for (i = 0; i < len; i++) {
+ u8 vic = svd_to_vic(svds[i]);
+
+ if (!drm_valid_cea_vic(vic))
+ vic = 0;
+
+ info->vics[vic_index++] = vic;
+ }
+}
+
+/*
+ * Update y420_cmdb_modes based on previously parsed CTA VDB and Y420CMDB.
+ *
+ * Translate the y420cmdb_map based on VIC indexes to y420_cmdb_modes indexed
+ * using the VICs themselves.
+ */
+static void update_cta_y420cmdb(struct drm_connector *connector, u64 y420cmdb_map)
+{
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_hdmi_info *hdmi = &info->hdmi;
+ int i, len = min_t(int, info->vics_len, BITS_PER_TYPE(y420cmdb_map));
+
+ for (i = 0; i < len; i++) {
+ u8 vic = info->vics[i];
+
+ if (vic && y420cmdb_map & BIT_ULL(i))
+ bitmap_set(hdmi->y420_cmdb_modes, vic, 1);
+ }
+}
+
+static bool cta_vdb_has_vic(const struct drm_connector *connector, u8 vic)
+{
+ const struct drm_display_info *info = &connector->display_info;
+ int i;
+
+ if (!vic || !info->vics)
+ return false;
+
+ for (i = 0; i < info->vics_len; i++) {
+ if (info->vics[i] == vic)
+ return true;
+ }
+
+ return false;
+}
+
+/* CTA-861-H YCbCr 4:2:0 Video Data Block (CTA Y420VDB) */
+static void parse_cta_y420vdb(struct drm_connector *connector,
+ const struct cea_db *db)
+{
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_hdmi_info *hdmi = &info->hdmi;
+ const u8 *svds = cea_db_data(db) + 1;
+ int i;
+
+ for (i = 0; i < cea_db_payload_len(db) - 1; i++) {
+ u8 vic = svd_to_vic(svds[i]);
+
+ if (!drm_valid_cea_vic(vic))
+ continue;
+
+ bitmap_set(hdmi->y420_vdb_modes, vic, 1);
+ info->color_formats |= DRM_COLOR_FORMAT_YCBCR420;
+ }
+}
+
static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db)
{
struct drm_display_info *info = &connector->display_info;
@@ -5995,14 +6028,14 @@ static void drm_parse_dsc_info(struct drm_hdmi_dsc_cap *hdmi_dsc,
static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
const u8 *hf_scds)
{
- struct drm_display_info *display = &connector->display_info;
- struct drm_hdmi_info *hdmi = &display->hdmi;
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_hdmi_info *hdmi = &info->hdmi;
struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
int max_tmds_clock = 0;
u8 max_frl_rate = 0;
bool dsc_support = false;
- display->has_hdmi_infoframe = true;
+ info->has_hdmi_infoframe = true;
if (hf_scds[6] & 0x80) {
hdmi->scdc.supported = true;
@@ -6026,7 +6059,7 @@ static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
max_tmds_clock = hf_scds[5] * 5000;
if (max_tmds_clock > 340000) {
- display->max_tmds_clock = max_tmds_clock;
+ info->max_tmds_clock = max_tmds_clock;
}
if (scdc->supported) {
@@ -6117,6 +6150,7 @@ static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
}
}
+/* HDMI Vendor-Specific Data Block (HDMI VSDB, H14b-VSDB) */
static void
drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
{
@@ -6130,6 +6164,15 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
if (len >= 7)
info->max_tmds_clock = db[7] * 5000;
+ /*
+ * Try to infer whether the sink supports HDMI infoframes.
+ *
+ * HDMI infoframe support was first added in HDMI 1.4. Assume the sink
+ * supports infoframes if HDMI_Video_present is set.
+ */
+ if (len >= 8 && db[8] & BIT(5))
+ info->has_hdmi_infoframe = true;
+
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] HDMI: DVI dual %d, max TMDS clock %d kHz\n",
connector->base.id, connector->name,
info->dvi_dual, info->max_tmds_clock);
@@ -6165,6 +6208,7 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
const struct cea_db *db;
struct cea_db_iter iter;
const u8 *edid_ext;
+ u64 y420cmdb_map = 0;
drm_edid_iter_begin(drm_edid, &edid_iter);
drm_edid_iter_for_each(edid_ext, &edid_iter) {
@@ -6202,13 +6246,20 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
else if (cea_db_is_microsoft_vsdb(db))
drm_parse_microsoft_vsdb(connector, data);
else if (cea_db_is_y420cmdb(db))
- drm_parse_y420cmdb_bitmap(connector, data);
+ parse_cta_y420cmdb(connector, db, &y420cmdb_map);
+ else if (cea_db_is_y420vdb(db))
+ parse_cta_y420vdb(connector, db);
else if (cea_db_is_vcdb(db))
drm_parse_vcdb(connector, data);
else if (cea_db_is_hdmi_hdr_metadata_block(db))
drm_parse_hdr_metadata_block(connector, data);
+ else if (cea_db_tag(db) == CTA_DB_VIDEO)
+ parse_cta_vdb(connector, db);
}
cea_db_iter_end(&iter);
+
+ if (y420cmdb_map)
+ update_cta_y420cmdb(connector, y420cmdb_map);
}
static
@@ -6374,17 +6425,29 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->mso_stream_count = 0;
info->mso_pixel_overlap = 0;
info->max_dsc_bpp = 0;
+
+ kfree(info->vics);
+ info->vics = NULL;
+ info->vics_len = 0;
+
+ info->quirks = 0;
}
-static u32 update_display_info(struct drm_connector *connector,
- const struct drm_edid *drm_edid)
+static void update_display_info(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
struct drm_display_info *info = &connector->display_info;
- const struct edid *edid = drm_edid->edid;
-
- u32 quirks = edid_get_quirks(drm_edid);
+ const struct edid *edid;
drm_reset_display_info(connector);
+ clear_eld(connector);
+
+ if (!drm_edid)
+ return;
+
+ edid = drm_edid->edid;
+
+ info->quirks = edid_get_quirks(drm_edid);
info->width_mm = edid->width_cm * 10;
info->height_mm = edid->height_cm * 10;
@@ -6456,17 +6519,30 @@ static u32 update_display_info(struct drm_connector *connector,
drm_update_mso(connector, drm_edid);
out:
- if (quirks & EDID_QUIRK_NON_DESKTOP) {
+ if (info->quirks & EDID_QUIRK_NON_DESKTOP) {
drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s] Non-desktop display%s\n",
connector->base.id, connector->name,
info->non_desktop ? " (redundant quirk)" : "");
info->non_desktop = true;
}
- if (quirks & EDID_QUIRK_CAP_DSC_15BPP)
+ if (info->quirks & EDID_QUIRK_CAP_DSC_15BPP)
info->max_dsc_bpp = 15;
- return quirks;
+ if (info->quirks & EDID_QUIRK_FORCE_6BPC)
+ info->bpc = 6;
+
+ if (info->quirks & EDID_QUIRK_FORCE_8BPC)
+ info->bpc = 8;
+
+ if (info->quirks & EDID_QUIRK_FORCE_10BPC)
+ info->bpc = 10;
+
+ if (info->quirks & EDID_QUIRK_FORCE_12BPC)
+ info->bpc = 12;
+
+ /* Depends on info->cea_rev set by drm_parse_cea_ext() above */
+ drm_edid_to_eld(connector, drm_edid);
}
static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *dev,
@@ -6561,27 +6637,14 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
return num_modes;
}
-static int _drm_edid_connector_update(struct drm_connector *connector,
- const struct drm_edid *drm_edid)
+static int _drm_edid_connector_add_modes(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
{
+ const struct drm_display_info *info = &connector->display_info;
int num_modes = 0;
- u32 quirks;
- if (!drm_edid) {
- drm_reset_display_info(connector);
- clear_eld(connector);
+ if (!drm_edid)
return 0;
- }
-
- /*
- * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks.
- * To avoid multiple parsing of same block, lets parse that map
- * from sink info, before parsing CEA modes.
- */
- quirks = update_display_info(connector, drm_edid);
-
- /* Depends on info->cea_rev set by update_display_info() above */
- drm_edid_to_eld(connector, drm_edid);
/*
* EDID spec says modes should be preferred in this order:
@@ -6597,7 +6660,7 @@ static int _drm_edid_connector_update(struct drm_connector *connector,
*
* XXX order for additional mode types in extension blocks?
*/
- num_modes += add_detailed_modes(connector, drm_edid, quirks);
+ num_modes += add_detailed_modes(connector, drm_edid);
num_modes += add_cvt_modes(connector, drm_edid);
num_modes += add_standard_modes(connector, drm_edid);
num_modes += add_established_modes(connector, drm_edid);
@@ -6607,20 +6670,8 @@ static int _drm_edid_connector_update(struct drm_connector *connector,
if (drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ)
num_modes += add_inferred_modes(connector, drm_edid);
- if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
- edid_fixup_preferred(connector, quirks);
-
- if (quirks & EDID_QUIRK_FORCE_6BPC)
- connector->display_info.bpc = 6;
-
- if (quirks & EDID_QUIRK_FORCE_8BPC)
- connector->display_info.bpc = 8;
-
- if (quirks & EDID_QUIRK_FORCE_10BPC)
- connector->display_info.bpc = 10;
-
- if (quirks & EDID_QUIRK_FORCE_12BPC)
- connector->display_info.bpc = 12;
+ if (info->quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
+ edid_fixup_preferred(connector);
return num_modes;
}
@@ -6684,49 +6735,54 @@ out:
* @connector: Connector
* @drm_edid: EDID
*
- * Update the connector mode list, display info, ELD, HDR metadata, relevant
- * properties, etc. from the passed in EDID.
+ * Update the connector display info, ELD, HDR metadata, relevant properties,
+ * etc. from the passed in EDID.
*
* If EDID is NULL, reset the information.
*
- * Return: The number of modes added or 0 if we couldn't find any.
+ * Must be called before calling drm_edid_connector_add_modes().
+ *
+ * Return: 0 on success, negative error on errors.
*/
int drm_edid_connector_update(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- int count;
-
- count = _drm_edid_connector_update(connector, drm_edid);
+ update_display_info(connector, drm_edid);
_drm_update_tile_info(connector, drm_edid);
- /* Note: Ignore errors for now. */
- _drm_edid_connector_property_update(connector, drm_edid);
-
- return count;
+ return _drm_edid_connector_property_update(connector, drm_edid);
}
EXPORT_SYMBOL(drm_edid_connector_update);
-static int _drm_connector_update_edid_property(struct drm_connector *connector,
- const struct drm_edid *drm_edid)
+/**
+ * drm_edid_connector_add_modes - Update probed modes from the EDID property
+ * @connector: Connector
+ *
+ * Add the modes from the previously updated EDID property to the connector
+ * probed modes list.
+ *
+ * drm_edid_connector_update() must have been called before this to update the
+ * EDID property.
+ *
+ * Return: The number of modes added, or 0 if we couldn't find any.
+ */
+int drm_edid_connector_add_modes(struct drm_connector *connector)
{
- /*
- * Set the display info, using edid if available, otherwise resetting
- * the values to defaults. This duplicates the work done in
- * drm_add_edid_modes, but that function is not consistently called
- * before this one in all drivers and the computation is cheap enough
- * that it seems better to duplicate it rather than attempt to ensure
- * some arbitrary ordering of calls.
- */
- if (drm_edid)
- update_display_info(connector, drm_edid);
- else
- drm_reset_display_info(connector);
+ const struct drm_edid *drm_edid = NULL;
+ int count;
- _drm_update_tile_info(connector, drm_edid);
+ if (connector->edid_blob_ptr)
+ drm_edid = drm_edid_alloc(connector->edid_blob_ptr->data,
+ connector->edid_blob_ptr->length);
- return _drm_edid_connector_property_update(connector, drm_edid);
+ count = _drm_edid_connector_add_modes(connector, drm_edid);
+
+ drm_edid_free(drm_edid);
+
+ return count;
}
+EXPORT_SYMBOL(drm_edid_connector_add_modes);
/**
* drm_connector_update_edid_property - update the edid property of a connector
@@ -6749,8 +6805,7 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
{
struct drm_edid drm_edid;
- return _drm_connector_update_edid_property(connector,
- drm_edid_legacy_init(&drm_edid, edid));
+ return drm_edid_connector_update(connector, drm_edid_legacy_init(&drm_edid, edid));
}
EXPORT_SYMBOL(drm_connector_update_edid_property);
@@ -6763,13 +6818,14 @@ EXPORT_SYMBOL(drm_connector_update_edid_property);
* &drm_display_info structure and ELD in @connector with any information which
* can be derived from the edid.
*
- * This function is deprecated. Use drm_edid_connector_update() instead.
+ * This function is deprecated. Use drm_edid_connector_add_modes() instead.
*
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
{
- struct drm_edid drm_edid;
+ struct drm_edid _drm_edid;
+ const struct drm_edid *drm_edid;
if (edid && !drm_edid_is_valid(edid)) {
drm_warn(connector->dev, "[CONNECTOR:%d:%s] EDID invalid.\n",
@@ -6777,8 +6833,11 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
edid = NULL;
}
- return _drm_edid_connector_update(connector,
- drm_edid_legacy_init(&drm_edid, edid));
+ drm_edid = drm_edid_legacy_init(&_drm_edid, edid);
+
+ update_display_info(connector, drm_edid);
+
+ return _drm_edid_connector_add_modes(connector, drm_edid);
}
EXPORT_SYMBOL(drm_add_edid_modes);
@@ -6885,8 +6944,6 @@ static u8 drm_mode_hdmi_vic(const struct drm_connector *connector,
static u8 drm_mode_cea_vic(const struct drm_connector *connector,
const struct drm_display_mode *mode)
{
- u8 vic;
-
/*
* HDMI spec says if a mode is found in HDMI 1.4b 4K modes
* we should send its VIC in vendor infoframes, else send the
@@ -6896,14 +6953,23 @@ static u8 drm_mode_cea_vic(const struct drm_connector *connector,
if (drm_mode_hdmi_vic(connector, mode))
return 0;
- vic = drm_match_cea_mode(mode);
+ return drm_match_cea_mode(mode);
+}
- /*
- * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but
- * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we
- * have to make sure we dont break HDMI 1.4 sinks.
- */
- if (!is_hdmi2_sink(connector) && vic > 64)
+/*
+ * Avoid sending VICs defined in HDMI 2.0 in AVI infoframes to sinks that
+ * conform to HDMI 1.4.
+ *
+ * HDMI 1.4 (CTA-861-D) VIC range: [1..64]
+ * HDMI 2.0 (CTA-861-F) VIC range: [1..107]
+ *
+ * If the sink lists the VIC in CTA VDB, assume it's fine, regardless of HDMI
+ * version.
+ */
+static u8 vic_for_avi_infoframe(const struct drm_connector *connector, u8 vic)
+{
+ if (!is_hdmi2_sink(connector) && vic > 64 &&
+ !cta_vdb_has_vic(connector, vic))
return 0;
return vic;
@@ -6978,7 +7044,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
picture_aspect = HDMI_PICTURE_ASPECT_NONE;
}
- frame->video_code = vic;
+ frame->video_code = vic_for_avi_infoframe(connector, vic);
frame->picture_aspect = picture_aspect;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
frame->scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0d0c26ebab90..a39998047f8a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -416,14 +416,30 @@ static void drm_fb_helper_damage_work(struct work_struct *work)
* drm_fb_helper_prepare - setup a drm_fb_helper structure
* @dev: DRM device
* @helper: driver-allocated fbdev helper structure to set up
+ * @preferred_bpp: Preferred bits per pixel for the device.
* @funcs: pointer to structure of functions associate with this helper
*
* Sets up the bare minimum to make the framebuffer helper usable. This is
* useful to implement race-free initialization of the polling helpers.
*/
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
+ unsigned int preferred_bpp,
const struct drm_fb_helper_funcs *funcs)
{
+ /*
+ * Pick a preferred bpp of 32 if no value has been given. This
+ * will select XRGB8888 for the framebuffer formats. All drivers
+ * have to support XRGB8888 for backwards compatibility with legacy
+ * userspace, so it's the safe choice here.
+ *
+ * TODO: Replace struct drm_mode_config.preferred_depth and this
+ * bpp value with a preferred format that is given as struct
+ * drm_format_info. Then derive all other values from the
+ * format.
+ */
+ if (!preferred_bpp)
+ preferred_bpp = 32;
+
INIT_LIST_HEAD(&helper->kernel_fb_list);
spin_lock_init(&helper->damage_lock);
INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
@@ -432,10 +448,23 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
mutex_init(&helper->lock);
helper->funcs = funcs;
helper->dev = dev;
+ helper->preferred_bpp = preferred_bpp;
}
EXPORT_SYMBOL(drm_fb_helper_prepare);
/**
+ * drm_fb_helper_unprepare - clean up a drm_fb_helper structure
+ * @fb_helper: driver-allocated fbdev helper structure to set up
+ *
+ * Cleans up the framebuffer helper. Inverse of drm_fb_helper_prepare().
+ */
+void drm_fb_helper_unprepare(struct drm_fb_helper *fb_helper)
+{
+ mutex_destroy(&fb_helper->lock);
+}
+EXPORT_SYMBOL(drm_fb_helper_unprepare);
+
+/**
* drm_fb_helper_init - initialize a &struct drm_fb_helper
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
@@ -475,8 +504,8 @@ EXPORT_SYMBOL(drm_fb_helper_init);
* drm_fb_helper_alloc_info - allocate fb_info and some of its members
* @fb_helper: driver-allocated fbdev helper
*
- * A helper to alloc fb_info and the members cmap and apertures. Called
- * by the driver within the fb_probe fb_helper callback function. Drivers do not
+ * A helper to alloc fb_info and the member cmap. Called by the driver
+ * within the fb_probe fb_helper callback function. Drivers do not
* need to release the allocated fb_info structure themselves, this is
* automatically done when calling drm_fb_helper_fini().
*
@@ -498,27 +527,11 @@ struct fb_info *drm_fb_helper_alloc_info(struct drm_fb_helper *fb_helper)
if (ret)
goto err_release;
- /*
- * TODO: We really should be smarter here and alloc an aperture
- * for each IORESOURCE_MEM resource helper->dev->dev has and also
- * init the ranges of the appertures based on the resources.
- * Note some drivers currently count on there being only 1 empty
- * aperture and fill this themselves, these will need to be dealt
- * with somehow when fixing this.
- */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto err_free_cmap;
- }
-
fb_helper->info = info;
info->skip_vt_switch = true;
return info;
-err_free_cmap:
- fb_dealloc_cmap(&info->cmap);
err_release:
framebuffer_release(info);
return ERR_PTR(ret);
@@ -577,8 +590,6 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
}
mutex_unlock(&kernel_fb_helper_lock);
- mutex_destroy(&fb_helper->lock);
-
if (!fb_helper->client.funcs)
drm_client_release(&fb_helper->client);
}
@@ -1728,117 +1739,132 @@ unlock:
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
-/*
- * Allocates the backing storage and sets up the fbdev info structure through
- * the ->fb_probe callback.
- */
-static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
- int preferred_bpp)
+static uint32_t drm_fb_helper_find_format(struct drm_fb_helper *fb_helper, const uint32_t *formats,
+ size_t format_count, uint32_t bpp, uint32_t depth)
{
- struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
- struct drm_mode_config *config = &dev->mode_config;
- int ret = 0;
- int crtc_count = 0;
- struct drm_connector_list_iter conn_iter;
- struct drm_fb_helper_surface_size sizes;
- struct drm_connector *connector;
- struct drm_mode_set *mode_set;
- int best_depth = 0;
-
- memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
- sizes.surface_depth = 24;
- sizes.surface_bpp = 32;
- sizes.fb_width = (u32)-1;
- sizes.fb_height = (u32)-1;
+ uint32_t format;
+ size_t i;
/*
- * If driver picks 8 or 16 by default use that for both depth/bpp
- * to begin with
+ * Do not consider YUV or other complicated formats
+ * for framebuffers. This means only legacy formats
+ * are supported (fmt->depth is a legacy field), but
+ * the framebuffer emulation can only deal with such
+ * formats, specifically RGB/BGA formats.
*/
- if (preferred_bpp != sizes.surface_bpp)
- sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
+ format = drm_mode_legacy_fb_format(bpp, depth);
+ if (!format)
+ goto err;
- drm_connector_list_iter_begin(fb_helper->dev, &conn_iter);
- drm_client_for_each_connector_iter(connector, &conn_iter) {
- struct drm_cmdline_mode *cmdline_mode;
+ for (i = 0; i < format_count; ++i) {
+ if (formats[i] == format)
+ return format;
+ }
- cmdline_mode = &connector->cmdline_mode;
+err:
+ /* We found nothing. */
+ drm_warn(dev, "bpp/depth value of %u/%u not supported\n", bpp, depth);
- if (cmdline_mode->bpp_specified) {
- switch (cmdline_mode->bpp) {
- case 8:
- sizes.surface_depth = sizes.surface_bpp = 8;
- break;
- case 15:
- sizes.surface_depth = 15;
- sizes.surface_bpp = 16;
- break;
- case 16:
- sizes.surface_depth = sizes.surface_bpp = 16;
- break;
- case 24:
- sizes.surface_depth = sizes.surface_bpp = 24;
- break;
- case 32:
- sizes.surface_depth = 24;
- sizes.surface_bpp = 32;
- break;
- }
- break;
- }
+ return DRM_FORMAT_INVALID;
+}
+
+static uint32_t drm_fb_helper_find_color_mode_format(struct drm_fb_helper *fb_helper,
+ const uint32_t *formats, size_t format_count,
+ unsigned int color_mode)
+{
+ struct drm_device *dev = fb_helper->dev;
+ uint32_t bpp, depth;
+
+ switch (color_mode) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ case 16:
+ case 24:
+ bpp = depth = color_mode;
+ break;
+ case 15:
+ bpp = 16;
+ depth = 15;
+ break;
+ case 32:
+ bpp = 32;
+ depth = 24;
+ break;
+ default:
+ drm_info(dev, "unsupported color mode of %d\n", color_mode);
+ return DRM_FORMAT_INVALID;
}
- drm_connector_list_iter_end(&conn_iter);
- /*
- * If we run into a situation where, for example, the primary plane
- * supports RGBA5551 (16 bpp, depth 15) but not RGB565 (16 bpp, depth
- * 16) we need to scale down the depth of the sizes we request.
- */
- mutex_lock(&client->modeset_mutex);
+ return drm_fb_helper_find_format(fb_helper, formats, format_count, bpp, depth);
+}
+
+static int __drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ int crtc_count = 0;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+ struct drm_mode_set *mode_set;
+ uint32_t surface_format = DRM_FORMAT_INVALID;
+ const struct drm_format_info *info;
+
+ memset(sizes, 0, sizeof(*sizes));
+ sizes->fb_width = (u32)-1;
+ sizes->fb_height = (u32)-1;
+
drm_client_for_each_modeset(mode_set, client) {
struct drm_crtc *crtc = mode_set->crtc;
struct drm_plane *plane = crtc->primary;
- int j;
drm_dbg_kms(dev, "test CRTC %u primary plane\n", drm_crtc_index(crtc));
- for (j = 0; j < plane->format_count; j++) {
- const struct drm_format_info *fmt;
-
- fmt = drm_format_info(plane->format_types[j]);
-
- /*
- * Do not consider YUV or other complicated formats
- * for framebuffers. This means only legacy formats
- * are supported (fmt->depth is a legacy field) but
- * the framebuffer emulation can only deal with such
- * formats, specifically RGB/BGA formats.
- */
- if (fmt->depth == 0)
- continue;
-
- /* We found a perfect fit, great */
- if (fmt->depth == sizes.surface_depth) {
- best_depth = fmt->depth;
- break;
- }
+ drm_connector_list_iter_begin(fb_helper->dev, &conn_iter);
+ drm_client_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
- /* Skip depths above what we're looking for */
- if (fmt->depth > sizes.surface_depth)
+ if (!cmdline_mode->bpp_specified)
continue;
- /* Best depth found so far */
- if (fmt->depth > best_depth)
- best_depth = fmt->depth;
+ surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
+ plane->format_types,
+ plane->format_count,
+ cmdline_mode->bpp);
+ if (surface_format != DRM_FORMAT_INVALID)
+ break; /* found supported format */
}
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (surface_format != DRM_FORMAT_INVALID)
+ break; /* found supported format */
+
+ /* try preferred color mode */
+ surface_format = drm_fb_helper_find_color_mode_format(fb_helper,
+ plane->format_types,
+ plane->format_count,
+ fb_helper->preferred_bpp);
+ if (surface_format != DRM_FORMAT_INVALID)
+ break; /* found supported format */
}
- if (sizes.surface_depth != best_depth && best_depth) {
- drm_info(dev, "requested bpp %d, scaled depth down to %d",
- sizes.surface_bpp, best_depth);
- sizes.surface_depth = best_depth;
+
+ if (surface_format == DRM_FORMAT_INVALID) {
+ /*
+ * If none of the given color modes works, fall back
+ * to XRGB8888. Drivers are expected to provide this
+ * format for compatibility with legacy applications.
+ */
+ drm_warn(dev, "No compatible format found\n");
+ surface_format = drm_driver_legacy_fb_format(dev, 32, 24);
}
+ info = drm_format_info(surface_format);
+ sizes->surface_bpp = drm_format_info_bpp(info, 0);
+ sizes->surface_depth = info->depth;
+
/* first up get a count of crtcs now in use and new min/maxes width/heights */
crtc_count = 0;
drm_client_for_each_modeset(mode_set, client) {
@@ -1860,8 +1886,10 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
x = mode_set->x;
y = mode_set->y;
- sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
- sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
+ sizes->surface_width =
+ max_t(u32, desired_mode->hdisplay + x, sizes->surface_width);
+ sizes->surface_height =
+ max_t(u32, desired_mode->vdisplay + y, sizes->surface_height);
for (j = 0; j < mode_set->num_connectors; j++) {
struct drm_connector *connector = mode_set->connectors[j];
@@ -1877,28 +1905,63 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
}
if (lasth)
- sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width);
+ sizes->fb_width = min_t(u32, desired_mode->hdisplay + x, sizes->fb_width);
if (lastv)
- sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height);
+ sizes->fb_height = min_t(u32, desired_mode->vdisplay + y, sizes->fb_height);
}
- mutex_unlock(&client->modeset_mutex);
- if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
+ if (crtc_count == 0 || sizes->fb_width == -1 || sizes->fb_height == -1) {
drm_info(dev, "Cannot find any crtc or sizes\n");
-
- /* First time: disable all crtc's.. */
- if (!fb_helper->deferred_setup)
- drm_client_modeset_commit(client);
return -EAGAIN;
}
+ return 0;
+}
+
+static int drm_fb_helper_find_sizes(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_mode_config *config = &dev->mode_config;
+ int ret;
+
+ mutex_lock(&client->modeset_mutex);
+ ret = __drm_fb_helper_find_sizes(fb_helper, sizes);
+ mutex_unlock(&client->modeset_mutex);
+
+ if (ret)
+ return ret;
+
/* Handle our overallocation */
- sizes.surface_height *= drm_fbdev_overalloc;
- sizes.surface_height /= 100;
- if (sizes.surface_height > config->max_height) {
+ sizes->surface_height *= drm_fbdev_overalloc;
+ sizes->surface_height /= 100;
+ if (sizes->surface_height > config->max_height) {
drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n",
config->max_height);
- sizes.surface_height = config->max_height;
+ sizes->surface_height = config->max_height;
+ }
+
+ return 0;
+}
+
+/*
+ * Allocates the backing storage and sets up the fbdev info structure through
+ * the ->fb_probe callback.
+ */
+static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper)
+{
+ struct drm_client_dev *client = &fb_helper->client;
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_fb_helper_surface_size sizes;
+ int ret;
+
+ ret = drm_fb_helper_find_sizes(fb_helper, &sizes);
+ if (ret) {
+ /* First time: disable all crtc's.. */
+ if (!fb_helper->deferred_setup)
+ drm_client_modeset_commit(client);
+ return ret;
}
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
@@ -2076,8 +2139,7 @@ static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper)
/* Note: Drops fb_helper->lock before returning. */
static int
-__drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
- int bpp_sel)
+__drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct fb_info *info;
@@ -2088,10 +2150,9 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
height = dev->mode_config.max_height;
drm_client_modeset_probe(&fb_helper->client, width, height);
- ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
+ ret = drm_fb_helper_single_fb_probe(fb_helper);
if (ret < 0) {
if (ret == -EAGAIN) {
- fb_helper->preferred_bpp = bpp_sel;
fb_helper->deferred_setup = true;
ret = 0;
}
@@ -2137,7 +2198,6 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
/**
* drm_fb_helper_initial_config - setup a sane initial connector configuration
* @fb_helper: fb_helper device struct
- * @bpp_sel: bpp value to use for the framebuffer configuration
*
* Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
@@ -2175,7 +2235,7 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
-int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
+int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper)
{
int ret;
@@ -2183,7 +2243,7 @@ int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
return 0;
mutex_lock(&fb_helper->lock);
- ret = __drm_fb_helper_initial_config_and_unlock(fb_helper, bpp_sel);
+ ret = __drm_fb_helper_initial_config_and_unlock(fb_helper);
return ret;
}
@@ -2219,8 +2279,7 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
mutex_lock(&fb_helper->lock);
if (fb_helper->deferred_setup) {
- err = __drm_fb_helper_initial_config_and_unlock(fb_helper,
- fb_helper->preferred_bpp);
+ err = __drm_fb_helper_initial_config_and_unlock(fb_helper);
return err;
}
diff --git a/drivers/gpu/drm/drm_fbdev_generic.c b/drivers/gpu/drm/drm_fbdev_generic.c
index 593aa3283792..4d6325e91565 100644
--- a/drivers/gpu/drm/drm_fbdev_generic.c
+++ b/drivers/gpu/drm/drm_fbdev_generic.c
@@ -43,20 +43,18 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
return 0;
}
-static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
+static void drm_fbdev_fb_destroy(struct fb_info *info)
{
- struct fb_info *fbi = fb_helper->info;
+ struct drm_fb_helper *fb_helper = info->par;
void *shadow = NULL;
if (!fb_helper->dev)
return;
- if (fbi) {
- if (fbi->fbdefio)
- fb_deferred_io_cleanup(fbi);
- if (drm_fbdev_use_shadow_fb(fb_helper))
- shadow = fbi->screen_buffer;
- }
+ if (info->fbdefio)
+ fb_deferred_io_cleanup(info);
+ if (drm_fbdev_use_shadow_fb(fb_helper))
+ shadow = info->screen_buffer;
drm_fb_helper_fini(fb_helper);
@@ -66,22 +64,10 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
drm_client_buffer_vunmap(fb_helper->buffer);
drm_client_framebuffer_delete(fb_helper->buffer);
-}
-
-static void drm_fbdev_release(struct drm_fb_helper *fb_helper)
-{
- drm_fbdev_cleanup(fb_helper);
drm_client_release(&fb_helper->client);
- kfree(fb_helper);
-}
-/*
- * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
- * unregister_framebuffer() or fb_release().
- */
-static void drm_fbdev_fb_destroy(struct fb_info *info)
-{
- drm_fbdev_release(info->par);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
}
static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
@@ -181,7 +167,7 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
struct drm_device *dev = fb_helper->dev;
struct drm_client_buffer *buffer;
struct drm_framebuffer *fb;
- struct fb_info *fbi;
+ struct fb_info *info;
u32 format;
struct iosys_map map;
int ret;
@@ -200,29 +186,29 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
fb_helper->fb = buffer->fb;
fb = buffer->fb;
- fbi = drm_fb_helper_alloc_info(fb_helper);
- if (IS_ERR(fbi))
- return PTR_ERR(fbi);
+ info = drm_fb_helper_alloc_info(fb_helper);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
- fbi->fbops = &drm_fbdev_fb_ops;
- fbi->screen_size = sizes->surface_height * fb->pitches[0];
- fbi->fix.smem_len = fbi->screen_size;
- fbi->flags = FBINFO_DEFAULT;
+ info->fbops = &drm_fbdev_fb_ops;
+ info->screen_size = sizes->surface_height * fb->pitches[0];
+ info->fix.smem_len = info->screen_size;
+ info->flags = FBINFO_DEFAULT;
- drm_fb_helper_fill_info(fbi, fb_helper, sizes);
+ drm_fb_helper_fill_info(info, fb_helper, sizes);
if (drm_fbdev_use_shadow_fb(fb_helper)) {
- fbi->screen_buffer = vzalloc(fbi->screen_size);
- if (!fbi->screen_buffer)
+ info->screen_buffer = vzalloc(info->screen_size);
+ if (!info->screen_buffer)
return -ENOMEM;
- fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
+ info->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
/* Set a default deferred I/O handler */
fb_helper->fbdefio.delay = HZ / 20;
fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
- fbi->fbdefio = &fb_helper->fbdefio;
- ret = fb_deferred_io_init(fbi);
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
if (ret)
return ret;
} else {
@@ -231,10 +217,10 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
if (ret)
return ret;
if (map.is_iomem) {
- fbi->screen_base = map.vaddr_iomem;
+ info->screen_base = map.vaddr_iomem;
} else {
- fbi->screen_buffer = map.vaddr;
- fbi->flags |= FBINFO_VIRTFB;
+ info->screen_buffer = map.vaddr;
+ info->flags |= FBINFO_VIRTFB;
}
/*
@@ -243,10 +229,10 @@ static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
* case.
*/
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- if (fb_helper->hint_leak_smem_start && fbi->fix.smem_start == 0 &&
+ if (fb_helper->hint_leak_smem_start && info->fix.smem_start == 0 &&
!drm_WARN_ON_ONCE(dev, map.is_iomem))
- fbi->fix.smem_start =
- page_to_phys(virt_to_page(fbi->screen_buffer));
+ info->fix.smem_start =
+ page_to_phys(virt_to_page(info->screen_buffer));
#endif
}
@@ -363,11 +349,13 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client)
{
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
- if (fb_helper->info)
- /* drm_fbdev_fb_destroy() takes care of cleanup */
+ if (fb_helper->info) {
drm_fb_helper_unregister_info(fb_helper);
- else
- drm_fbdev_release(fb_helper);
+ } else {
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+ }
}
static int drm_fbdev_client_restore(struct drm_client_dev *client)
@@ -383,41 +371,26 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
struct drm_device *dev = client->dev;
int ret;
- /* Setup is not retried if it has failed */
- if (!fb_helper->dev && fb_helper->funcs)
- return 0;
-
if (dev->fb_helper)
return drm_fb_helper_hotplug_event(dev->fb_helper);
- if (!dev->mode_config.num_connector) {
- drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n");
- return 0;
- }
-
- drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
-
ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
- goto err;
+ goto err_drm_err;
if (!drm_drv_uses_atomic_modeset(dev))
drm_helper_disable_unused_functions(dev);
- ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp);
+ ret = drm_fb_helper_initial_config(fb_helper);
if (ret)
- goto err_cleanup;
+ goto err_drm_fb_helper_fini;
return 0;
-err_cleanup:
- drm_fbdev_cleanup(fb_helper);
-err:
- fb_helper->dev = NULL;
- fb_helper->info = NULL;
-
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(fb_helper);
+err_drm_err:
drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
-
return ret;
}
@@ -432,7 +405,6 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
* drm_fbdev_generic_setup() - Setup generic fbdev emulation
* @dev: DRM device
* @preferred_bpp: Preferred bits per pixel for the device.
- * @dev->mode_config.preferred_depth is used if this is zero.
*
* This function sets up generic fbdev emulation for drivers that supports
* dumb buffers with a virtual address and that can be mmap'ed.
@@ -467,29 +439,25 @@ void drm_fbdev_generic_setup(struct drm_device *dev,
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
if (!fb_helper)
return;
+ drm_fb_helper_prepare(dev, fb_helper, preferred_bpp, &drm_fb_helper_generic_funcs);
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
if (ret) {
- kfree(fb_helper);
drm_err(dev, "Failed to register client: %d\n", ret);
- return;
+ goto err_drm_client_init;
}
- /*
- * FIXME: This mixes up depth with bpp, which results in a glorious
- * mess, resulting in some drivers picking wrong fbdev defaults and
- * others wrong preferred_depth defaults.
- */
- if (!preferred_bpp)
- preferred_bpp = dev->mode_config.preferred_depth;
- if (!preferred_bpp)
- preferred_bpp = 32;
- fb_helper->preferred_bpp = preferred_bpp;
-
ret = drm_fbdev_client_hotplug(&fb_helper->client);
if (ret)
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
drm_client_register(&fb_helper->client);
+
+ return;
+
+err_drm_client_init:
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
+ return;
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 64b4a3a87fbb..a51ff8cee049 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -245,10 +245,10 @@ void drm_file_free(struct drm_file *file)
dev = file->minor->dev;
- DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file->minor->kdev->devt),
- atomic_read(&dev->open_count));
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file->minor->kdev->devt),
+ atomic_read(&dev->open_count));
#ifdef CONFIG_DRM_LEGACY
if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
@@ -340,8 +340,8 @@ int drm_open_helper(struct file *filp, struct drm_minor *minor)
dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
return -EINVAL;
- DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
- task_pid_nr(current), minor->index);
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, minor=%d\n",
+ current->comm, task_pid_nr(current), minor->index);
priv = drm_file_alloc(minor);
if (IS_ERR(priv))
@@ -450,11 +450,11 @@ EXPORT_SYMBOL(drm_open);
void drm_lastclose(struct drm_device * dev)
{
- DRM_DEBUG("\n");
+ drm_dbg_core(dev, "\n");
if (dev->driver->lastclose)
dev->driver->lastclose(dev);
- DRM_DEBUG("driver lastclose completed\n");
+ drm_dbg_core(dev, "driver lastclose completed\n");
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_legacy_dev_reinit(dev);
@@ -485,7 +485,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
- DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
+ drm_dbg_core(dev, "open_count = %d\n", atomic_read(&dev->open_count));
drm_close_helper(filp);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 74ff33c2ddaa..f93a4efcee90 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -322,7 +322,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
- u16 *dbuf16 = dbuf;
+ __le16 *dbuf16 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
@@ -333,14 +333,15 @@ static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigne
val16 = ((pix & 0x00F80000) >> 8) |
((pix & 0x0000FC00) >> 5) |
((pix & 0x000000F8) >> 3);
- dbuf16[x] = val16;
+ dbuf16[x] = cpu_to_le16(val16);
}
}
+/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */
static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
unsigned int pixels)
{
- u16 *dbuf16 = dbuf;
+ __le16 *dbuf16 = dbuf;
const __le32 *sbuf32 = sbuf;
unsigned int x;
u16 val16;
@@ -351,7 +352,7 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf,
val16 = ((pix & 0x00F80000) >> 8) |
((pix & 0x0000FC00) >> 5) |
((pix & 0x000000F8) >> 3);
- dbuf16[x] = swab16(val16);
+ dbuf16[x] = cpu_to_le16(swab16(val16));
}
}
@@ -395,6 +396,161 @@ void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pi
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
+static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le16 *dbuf16 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u16 val16;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+ dbuf16[x] = cpu_to_le16(val16);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_xrgb1555 - Convert XRGB8888 to XRGB1555 clip buffer
+ * @dst: Array of XRGB1555 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for XRGB1555 devices that don't support
+ * XRGB8888 natively.
+ */
+void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_xrgb1555_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb1555);
+
+static void drm_fb_xrgb8888_to_argb1555_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le16 *dbuf16 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u16 val16;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = BIT(15) | /* set alpha bit */
+ ((pix & 0x00f80000) >> 9) |
+ ((pix & 0x0000f800) >> 6) |
+ ((pix & 0x000000f8) >> 3);
+ dbuf16[x] = cpu_to_le16(val16);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_argb1555 - Convert XRGB8888 to ARGB1555 clip buffer
+ * @dst: Array of ARGB1555 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ARGB1555 devices that don't support
+ * XRGB8888 natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_argb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_argb1555_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb1555);
+
+static void drm_fb_xrgb8888_to_rgba5551_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le16 *dbuf16 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u16 val16;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val16 = ((pix & 0x00f80000) >> 8) |
+ ((pix & 0x0000f800) >> 5) |
+ ((pix & 0x000000f8) >> 2) |
+ BIT(0); /* set alpha bit */
+ dbuf16[x] = cpu_to_le16(val16);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgba5551 - Convert XRGB8888 to RGBA5551 clip buffer
+ * @dst: Array of RGBA5551 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for RGBA5551 devices that don't support
+ * XRGB8888 natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 2,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_rgba5551_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgba5551);
+
static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
@@ -404,6 +560,7 @@ static void drm_fb_xrgb8888_to_rgb888_line(void *dbuf, const void *sbuf, unsigne
for (x = 0; x < pixels; x++) {
pix = le32_to_cpu(sbuf32[x]);
+ /* write blue-green-red to output in little endianness */
*dbuf8++ = (pix & 0x000000FF) >> 0;
*dbuf8++ = (pix & 0x0000FF00) >> 8;
*dbuf8++ = (pix & 0x00FF0000) >> 16;
@@ -444,63 +601,112 @@ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pi
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
-static void drm_fb_rgb565_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
- const __le16 *sbuf16 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- u16 val16 = le16_to_cpu(sbuf16[x]);
- u32 val32 = ((val16 & 0xf800) << 8) |
- ((val16 & 0x07e0) << 5) |
- ((val16 & 0x001f) << 3);
- val32 = 0xff000000 | val32 |
- ((val32 >> 3) & 0x00070007) |
- ((val32 >> 2) & 0x00000300);
- dbuf32[x] = cpu_to_le32(val32);
+ pix = le32_to_cpu(sbuf32[x]);
+ pix |= GENMASK(31, 24); /* fill alpha bits */
+ dbuf32[x] = cpu_to_le32(pix);
}
}
-static void drm_fb_rgb565_to_xrgb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+/**
+ * drm_fb_xrgb8888_to_argb8888 - Convert XRGB8888 to ARGB8888 clip buffer
+ * @dst: Array of ARGB8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ARGB8888 devices that don't support XRGB8888
+ * natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
- drm_fb_rgb565_to_xrgb8888_line);
+ drm_fb_xrgb8888_to_argb8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
-static void drm_fb_rgb888_to_xrgb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
__le32 *dbuf32 = dbuf;
- const u8 *sbuf8 = sbuf;
+ const __le32 *sbuf32 = sbuf;
unsigned int x;
+ u32 pix;
for (x = 0; x < pixels; x++) {
- u8 r = *sbuf8++;
- u8 g = *sbuf8++;
- u8 b = *sbuf8++;
- u32 pix = 0xff000000 | (r << 16) | (g << 8) | b;
- dbuf32[x] = cpu_to_le32(pix);
+ pix = le32_to_cpu(sbuf32[x]);
+ pix = ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ GENMASK(31, 24); /* fill alpha bits */
+ *dbuf32++ = cpu_to_le32(pix);
}
}
-static void drm_fb_rgb888_to_xrgb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip)
+static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
};
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
- drm_fb_rgb888_to_xrgb8888_line);
+ drm_fb_xrgb8888_to_abgr8888_line);
+}
+
+static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ pix = ((pix & 0x00ff0000) >> 16) << 0 |
+ ((pix & 0x0000ff00) >> 8) << 8 |
+ ((pix & 0x000000ff) >> 0) << 16 |
+ ((pix & 0xff000000) >> 24) << 24;
+ *dbuf32++ = cpu_to_le32(pix);
+ }
+}
+
+static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_xbgr8888_line);
}
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
@@ -555,6 +761,59 @@ void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *d
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
+static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ __le32 *dbuf32 = dbuf;
+ const __le32 *sbuf32 = sbuf;
+ unsigned int x;
+ u32 val32;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf32[x]);
+ val32 = ((pix & 0x000000ff) << 2) |
+ ((pix & 0x0000ff00) << 4) |
+ ((pix & 0x00ff0000) << 6);
+ pix = GENMASK(31, 30) | /* set alpha bits */
+ val32 | ((val32 >> 8) & 0x00300c03);
+ *dbuf32++ = cpu_to_le32(pix);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_argb2101010 - Convert XRGB8888 to ARGB2101010 clip buffer
+ * @dst: Array of ARGB2101010 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffers
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory and converts
+ * the color format during the process. The parameters @dst, @dst_pitch and
+ * @src refer to arrays. Each array must have at least as many entries as
+ * there are planes in @fb's format. Each entry stores the value for the
+ * format's respective color plane at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ARGB2101010 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_argb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false,
+ drm_fb_xrgb8888_to_argb2101010_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb2101010);
+
static void drm_fb_xrgb8888_to_gray8_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
u8 *dbuf8 = dbuf;
@@ -641,50 +900,47 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
{
uint32_t fb_format = fb->format->format;
- /* treat alpha channel like filler bits */
- if (fb_format == DRM_FORMAT_ARGB8888)
- fb_format = DRM_FORMAT_XRGB8888;
- if (dst_format == DRM_FORMAT_ARGB8888)
- dst_format = DRM_FORMAT_XRGB8888;
- if (fb_format == DRM_FORMAT_ARGB2101010)
- fb_format = DRM_FORMAT_XRGB2101010;
- if (dst_format == DRM_FORMAT_ARGB2101010)
- dst_format = DRM_FORMAT_XRGB2101010;
-
- if (dst_format == fb_format) {
+ if (fb_format == dst_format) {
drm_fb_memcpy(dst, dst_pitch, src, fb, clip);
return 0;
-
- } else if (dst_format == DRM_FORMAT_RGB565) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
+ } else if (fb_format == (dst_format | DRM_FORMAT_BIG_ENDIAN)) {
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ return 0;
+ } else if (fb_format == (dst_format & ~DRM_FORMAT_BIG_ENDIAN)) {
+ drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ return 0;
+ } else if (fb_format == DRM_FORMAT_XRGB8888) {
+ if (dst_format == DRM_FORMAT_RGB565) {
drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false);
return 0;
- }
- } else if (dst_format == (DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN)) {
- if (fb_format == DRM_FORMAT_RGB565) {
- drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
+ } else if (dst_format == DRM_FORMAT_XRGB1555) {
+ drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_RGB888) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
+ } else if (dst_format == DRM_FORMAT_ARGB1555) {
+ drm_fb_xrgb8888_to_argb1555(dst, dst_pitch, src, fb, clip);
+ return 0;
+ } else if (dst_format == DRM_FORMAT_RGBA5551) {
+ drm_fb_xrgb8888_to_rgba5551(dst, dst_pitch, src, fb, clip);
+ return 0;
+ } else if (dst_format == DRM_FORMAT_RGB888) {
drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_XRGB8888) {
- if (fb_format == DRM_FORMAT_RGB888) {
- drm_fb_rgb888_to_xrgb8888(dst, dst_pitch, src, fb, clip);
+ } else if (dst_format == DRM_FORMAT_ARGB8888) {
+ drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip);
return 0;
- } else if (fb_format == DRM_FORMAT_RGB565) {
- drm_fb_rgb565_to_xrgb8888(dst, dst_pitch, src, fb, clip);
+ } else if (dst_format == DRM_FORMAT_XBGR8888) {
+ drm_fb_xrgb8888_to_xbgr8888(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_XRGB2101010) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
+ } else if (dst_format == DRM_FORMAT_ABGR8888) {
+ drm_fb_xrgb8888_to_abgr8888(dst, dst_pitch, src, fb, clip);
+ return 0;
+ } else if (dst_format == DRM_FORMAT_XRGB2101010) {
drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip);
return 0;
- }
- } else if (dst_format == DRM_FORMAT_BGRX8888) {
- if (fb_format == DRM_FORMAT_XRGB8888) {
+ } else if (dst_format == DRM_FORMAT_ARGB2101010) {
+ drm_fb_xrgb8888_to_argb2101010(dst, dst_pitch, src, fb, clip);
+ return 0;
+ } else if (dst_format == DRM_FORMAT_BGRX8888) {
drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
return 0;
}
@@ -805,6 +1061,39 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
+static uint32_t drm_fb_nonalpha_fourcc(uint32_t fourcc)
+{
+ /* only handle formats with depth != 0 and alpha channel */
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ return DRM_FORMAT_XRGB1555;
+ case DRM_FORMAT_ABGR1555:
+ return DRM_FORMAT_XBGR1555;
+ case DRM_FORMAT_RGBA5551:
+ return DRM_FORMAT_RGBX5551;
+ case DRM_FORMAT_BGRA5551:
+ return DRM_FORMAT_BGRX5551;
+ case DRM_FORMAT_ARGB8888:
+ return DRM_FORMAT_XRGB8888;
+ case DRM_FORMAT_ABGR8888:
+ return DRM_FORMAT_XBGR8888;
+ case DRM_FORMAT_RGBA8888:
+ return DRM_FORMAT_RGBX8888;
+ case DRM_FORMAT_BGRA8888:
+ return DRM_FORMAT_BGRX8888;
+ case DRM_FORMAT_ARGB2101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DRM_FORMAT_ABGR2101010:
+ return DRM_FORMAT_XBGR2101010;
+ case DRM_FORMAT_RGBA1010102:
+ return DRM_FORMAT_RGBX1010102;
+ case DRM_FORMAT_BGRA1010102:
+ return DRM_FORMAT_BGRX1010102;
+ }
+
+ return fourcc;
+}
+
static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc)
{
const uint32_t *fourccs_end = fourccs + nfourccs;
@@ -817,73 +1106,48 @@ static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t
return false;
}
-static const uint32_t conv_from_xrgb8888[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
-};
-
-static const uint32_t conv_from_rgb565_888[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
-};
-
-static bool is_conversion_supported(uint32_t from, uint32_t to)
-{
- switch (from) {
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- return is_listed_fourcc(conv_from_xrgb8888, ARRAY_SIZE(conv_from_xrgb8888), to);
- case DRM_FORMAT_RGB565:
- case DRM_FORMAT_RGB888:
- return is_listed_fourcc(conv_from_rgb565_888, ARRAY_SIZE(conv_from_rgb565_888), to);
- case DRM_FORMAT_XRGB2101010:
- return to == DRM_FORMAT_ARGB2101010;
- case DRM_FORMAT_ARGB2101010:
- return to == DRM_FORMAT_XRGB2101010;
- default:
- return false;
- }
-}
-
/**
* drm_fb_build_fourcc_list - Filters a list of supported color formats against
* the device's native formats
* @dev: DRM device
* @native_fourccs: 4CC codes of natively supported color formats
* @native_nfourccs: The number of entries in @native_fourccs
- * @driver_fourccs: 4CC codes of all driver-supported color formats
- * @driver_nfourccs: The number of entries in @driver_fourccs
* @fourccs_out: Returns 4CC codes of supported color formats
* @nfourccs_out: The number of available entries in @fourccs_out
*
* This function create a list of supported color format from natively
- * supported formats and the emulated formats.
+ * supported formats and additional emulated formats.
* At a minimum, most userspace programs expect at least support for
* XRGB8888 on the primary plane. Devices that have to emulate the
* format, and possibly others, can use drm_fb_build_fourcc_list() to
* create a list of supported color formats. The returned list can
* be handed over to drm_universal_plane_init() et al. Native formats
- * will go before emulated formats. Other heuristics might be applied
+ * will go before emulated formats. Native formats with alpha channel
+ * will be replaced by such without, as primary planes usually don't
+ * support alpha. Other heuristics might be applied
* to optimize the order. Formats near the beginning of the list are
- * usually preferred over formats near the end of the list. Formats
- * without conversion helpers will be skipped. New drivers should only
- * pass in XRGB8888 and avoid exposing additional emulated formats.
+ * usually preferred over formats near the end of the list.
*
* Returns:
* The number of color-formats 4CC codes returned in @fourccs_out.
*/
size_t drm_fb_build_fourcc_list(struct drm_device *dev,
const u32 *native_fourccs, size_t native_nfourccs,
- const u32 *driver_fourccs, size_t driver_nfourccs,
u32 *fourccs_out, size_t nfourccs_out)
{
+ /*
+ * XRGB8888 is the default fallback format for most of userspace
+ * and it's currently the only format that should be emulated for
+ * the primary plane. Only if there's ever another default fallback,
+ * it should be added here.
+ */
+ static const uint32_t extra_fourccs[] = {
+ DRM_FORMAT_XRGB8888,
+ };
+ static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
+
u32 *fourccs = fourccs_out;
const u32 *fourccs_end = fourccs_out + nfourccs_out;
- uint32_t native_format = 0;
size_t i;
/*
@@ -891,7 +1155,12 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev,
*/
for (i = 0; i < native_nfourccs; ++i) {
- u32 fourcc = native_fourccs[i];
+ /*
+ * Several DTs, boot loaders and firmware report native
+ * alpha formats that are non-alpha formats instead. So
+ * replace alpha formats by non-alpha formats.
+ */
+ u32 fourcc = drm_fb_nonalpha_fourcc(native_fourccs[i]);
if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
continue; /* skip duplicate entries */
@@ -902,14 +1171,6 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev,
drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
- /*
- * There should only be one native format with the current API.
- * This API needs to be refactored to correctly support arbitrary
- * sets of native formats, since it needs to report which native
- * format to use for each emulated format.
- */
- if (!native_format)
- native_format = fourcc;
*fourccs = fourcc;
++fourccs;
}
@@ -918,17 +1179,14 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev,
* The extra formats, emulated by the driver, go second.
*/
- for (i = 0; (i < driver_nfourccs) && (fourccs < fourccs_end); ++i) {
- u32 fourcc = driver_fourccs[i];
+ for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
+ u32 fourcc = extra_fourccs[i];
if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
continue; /* skip duplicate and native entries */
} else if (fourccs == fourccs_end) {
drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
continue; /* end of available output buffer */
- } else if (!is_conversion_supported(fourcc, native_format)) {
- drm_dbg_kms(dev, "Unsupported emulated format %p4cc\n", &fourcc);
- continue; /* format is not supported for conversion */
}
drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 6242dfbe9240..0f17dfa8702b 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -190,6 +190,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+#ifdef __BIG_ENDIAN
+ { .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+#endif
{ .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 2dd97473ca10..aff3746dedfb 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -1203,8 +1203,8 @@ void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
#ifdef CONFIG_DEBUG_FS
static int drm_framebuffer_info(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct drm_printer p = drm_seq_file_printer(m);
struct drm_framebuffer *fb;
@@ -1218,14 +1218,13 @@ static int drm_framebuffer_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_framebuffer_debugfs_list[] = {
+static const struct drm_debugfs_info drm_framebuffer_debugfs_list[] = {
{ "framebuffer", drm_framebuffer_info, 0 },
};
void drm_framebuffer_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_framebuffer_debugfs_list,
- ARRAY_SIZE(drm_framebuffer_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_framebuffer_debugfs_list,
+ ARRAY_SIZE(drm_framebuffer_debugfs_list));
}
#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index b8db675e7fb5..7a3cb08dc942 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -170,6 +170,20 @@ void drm_gem_private_object_init(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_private_object_init);
/**
+ * drm_gem_private_object_fini - Finalize a failed drm_gem_object
+ * @obj: drm_gem_object
+ *
+ * Uninitialize an already allocated GEM object when it initialized failed
+ */
+void drm_gem_private_object_fini(struct drm_gem_object *obj)
+{
+ WARN_ON(obj->dma_buf);
+
+ dma_resv_fini(&obj->_resv);
+}
+EXPORT_SYMBOL(drm_gem_private_object_fini);
+
+/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
*
@@ -930,12 +944,11 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
- WARN_ON(obj->dma_buf);
-
if (obj->filp)
fput(obj->filp);
- dma_resv_fini(&obj->_resv);
+ drm_gem_private_object_fini(obj);
+
drm_gem_free_mmap_offset(obj);
drm_gem_lru_remove(obj);
}
@@ -1047,7 +1060,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
goto err_drm_gem_object_put;
}
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
}
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index e42800718f51..5d4b9cd077f7 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -26,11 +26,8 @@
* call drm_gem_plane_helper_prepare_fb() from their implementation of
* struct &drm_plane_helper.prepare_fb . It sets the plane's fence from
* the framebuffer so that the DRM core can synchronize access automatically.
- *
* drm_gem_plane_helper_prepare_fb() can also be used directly as
- * implementation of prepare_fb. For drivers based on
- * struct drm_simple_display_pipe, drm_gem_simple_display_pipe_prepare_fb()
- * provides equivalent functionality.
+ * implementation of prepare_fb.
*
* .. code-block:: c
*
@@ -41,11 +38,6 @@
* . prepare_fb = drm_gem_plane_helper_prepare_fb,
* };
*
- * struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
- * ...,
- * . prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
- * };
- *
* A driver using a shadow buffer copies the content of the shadow buffers
* into the HW's framebuffer memory during an atomic update. This requires
* a mapping of the shadow buffer into kernel address space. The mappings
@@ -205,27 +197,6 @@ error:
}
EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
-/**
- * drm_gem_simple_display_pipe_prepare_fb - prepare_fb helper for &drm_simple_display_pipe
- * @pipe: Simple display pipe
- * @plane_state: Plane state
- *
- * This function uses drm_gem_plane_helper_prepare_fb() to extract the fences
- * from &drm_gem_object.resv and attaches them to the plane state for the atomic
- * helper to wait on. This is necessary to correctly implement implicit
- * synchronization for any buffers shared as a struct &dma_buf. Drivers can use
- * this as their &drm_simple_display_pipe_funcs.prepare_fb callback.
- *
- * See drm_gem_plane_helper_prepare_fb() for a discussion of implicit and
- * explicit fencing in atomic modeset updates.
- */
-int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state)
-{
- return drm_gem_plane_helper_prepare_fb(&pipe->plane, plane_state);
-}
-EXPORT_SYMBOL(drm_gem_simple_display_pipe_prepare_fb);
-
/*
* Shadow-buffered Planes
*/
diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c
index 1e658c448366..870b90b78bc4 100644
--- a/drivers/gpu/drm/drm_gem_dma_helper.c
+++ b/drivers/gpu/drm/drm_gem_dma_helper.c
@@ -477,8 +477,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
dma_obj->dma_addr = sg_dma_address(sgt->sgl);
dma_obj->sgt = sgt;
- DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
- attach->dmabuf->size);
+ drm_dbg_prime(dev, "dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
+ attach->dmabuf->size);
return &dma_obj->base;
}
@@ -530,8 +530,7 @@ int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *
* the whole buffer.
*/
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_DONTEXPAND;
+ vm_flags_mod(vma, VM_DONTEXPAND, VM_PFNMAP);
if (dma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index b602cd72a120..7e5c6a8d0212 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -79,8 +79,10 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
} else {
ret = drm_gem_object_init(dev, obj, size);
}
- if (ret)
+ if (ret) {
+ drm_gem_private_object_fini(obj);
goto err_free;
+ }
ret = drm_gem_create_mmap_offset(obj);
if (ret)
@@ -413,7 +415,7 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem,
}
EXPORT_SYMBOL(drm_gem_shmem_vunmap);
-static struct drm_gem_shmem_object *
+static int
drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
struct drm_device *dev, size_t size,
uint32_t *handle)
@@ -423,7 +425,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
shmem = drm_gem_shmem_create(dev, size);
if (IS_ERR(shmem))
- return shmem;
+ return PTR_ERR(shmem);
/*
* Allocate an id of idr table where the obj is registered
@@ -432,10 +434,8 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put(&shmem->base);
- if (ret)
- return ERR_PTR(ret);
- return shmem;
+ return ret;
}
/* Update madvise status, returns true if not purged, else
@@ -518,7 +518,6 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- struct drm_gem_shmem_object *shmem;
if (!args->pitch || !args->size) {
args->pitch = min_pitch;
@@ -531,9 +530,7 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = PAGE_ALIGN(args->pitch * args->height);
}
- shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
-
- return PTR_ERR_OR_ZERO(shmem);
+ return drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
@@ -633,7 +630,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
if (ret)
return ret;
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -681,23 +678,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
-/**
- * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
- * scatter/gather table for a shmem GEM object.
- * @shmem: shmem GEM object
- *
- * This function returns a scatter/gather table suitable for driver usage. If
- * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
- * table created.
- *
- * This is the main function for drivers to get at backing storage, and it hides
- * and difference between dma-buf imported and natively allocated objects.
- * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
- *
- * Returns:
- * A pointer to the scatter/gather table of pinned pages or errno on failure.
- */
-struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
+static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
int ret;
@@ -708,7 +689,7 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
WARN_ON(obj->import_attach);
- ret = drm_gem_shmem_get_pages(shmem);
+ ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)
return ERR_PTR(ret);
@@ -730,10 +711,40 @@ err_free_sgt:
sg_free_table(sgt);
kfree(sgt);
err_put_pages:
- drm_gem_shmem_put_pages(shmem);
+ drm_gem_shmem_put_pages_locked(shmem);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
+
+/**
+ * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
+ * scatter/gather table for a shmem GEM object.
+ * @shmem: shmem GEM object
+ *
+ * This function returns a scatter/gather table suitable for driver usage. If
+ * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
+ * table created.
+ *
+ * This is the main function for drivers to get at backing storage, and it hides
+ * and difference between dma-buf imported and natively allocated objects.
+ * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or errno on failure.
+ */
+struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
+{
+ int ret;
+ struct sg_table *sgt;
+
+ ret = mutex_lock_interruptible(&shmem->pages_lock);
+ if (ret)
+ return ERR_PTR(ret);
+ sgt = drm_gem_shmem_get_pages_sgt_locked(shmem);
+ mutex_unlock(&shmem->pages_lock);
+
+ return sgt;
+}
+EXPORT_SYMBOL(drm_gem_shmem_get_pages_sgt);
/**
* drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
@@ -764,7 +775,7 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
shmem->sgt = sgt;
- DRM_DEBUG_PRIME("size = %zu\n", size);
+ drm_dbg_prime(dev, "size = %zu\n", size);
return &shmem->base;
}
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index d5962a34c01d..3734aa2d1c5b 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -3,6 +3,8 @@
#include <linux/module.h>
#include <drm/drm_gem_ttm_helper.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
/**
* DOC: overview
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index b6c7e3803bb3..d40b3edb52d0 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -19,6 +19,7 @@
#include <drm/drm_simple_kms_helper.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
@@ -956,8 +957,8 @@ static struct ttm_device_funcs bo_driver = {
static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_vram_mm *vmm = entry->dev->vram_mm;
struct ttm_resource_manager *man = ttm_manager_type(&vmm->bdev, TTM_PL_VRAM);
struct drm_printer p = drm_seq_file_printer(m);
@@ -965,7 +966,7 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
+static const struct drm_debugfs_info drm_vram_mm_debugfs_list[] = {
{ "vram-mm", drm_vram_mm_debugfs, 0, NULL },
};
@@ -977,9 +978,8 @@ static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
*/
void drm_vram_mm_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(drm_vram_mm_debugfs_list,
- ARRAY_SIZE(drm_vram_mm_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, drm_vram_mm_debugfs_list,
+ ARRAY_SIZE(drm_vram_mm_debugfs_list));
}
EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 5ea5e260118c..ed2103ee272c 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -186,6 +186,7 @@ int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root);
void drm_debugfs_cleanup(struct drm_minor *minor);
+void drm_debugfs_late_register(struct drm_device *dev);
void drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
void drm_debugfs_crtc_add(struct drm_crtc *crtc);
@@ -202,6 +203,10 @@ static inline void drm_debugfs_cleanup(struct drm_minor *minor)
{
}
+static inline void drm_debugfs_late_register(struct drm_device *dev)
+{
+}
+
static inline void drm_debugfs_connector_add(struct drm_connector *connector)
{
}
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 5d82891c3222..49a743f62b4a 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -972,6 +972,7 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
drm_ioctl_compat_t *fn;
int ret;
@@ -986,14 +987,14 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (!fn)
return drm_ioctl(filp, cmd, arg);
- DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, %s\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated,
- drm_compat_ioctls[nr].name);
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, %s\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated,
+ drm_compat_ioctls[nr].name);
ret = (*fn)(filp, cmd, arg);
if (ret)
- DRM_DEBUG("ret = %d\n", ret);
+ drm_dbg_core(dev, "ret = %d\n", ret);
return ret;
}
EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ca2a6e6101dc..7c9d66ee917d 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -440,7 +440,7 @@ done:
int drm_noop(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- DRM_DEBUG("\n");
+ drm_dbg_core(dev, "\n");
return 0;
}
EXPORT_SYMBOL(drm_noop);
@@ -856,16 +856,16 @@ long drm_ioctl(struct file *filp,
out_size = 0;
ksize = max(max(in_size, out_size), drv_size);
- DRM_DEBUG("comm=\"%s\" pid=%d, dev=0x%lx, auth=%d, %s\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, ioctl->name);
+ drm_dbg_core(dev, "comm=\"%s\" pid=%d, dev=0x%lx, auth=%d, %s\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, ioctl->name);
/* Do not trust userspace, use our own definition */
func = ioctl->func;
if (unlikely(!func)) {
- DRM_DEBUG("no function\n");
+ drm_dbg_core(dev, "no function\n");
retcode = -EINVAL;
goto err_i1;
}
@@ -894,16 +894,17 @@ long drm_ioctl(struct file *filp,
err_i1:
if (!ioctl)
- DRM_DEBUG("invalid ioctl: comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
- current->comm, task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated, cmd, nr);
+ drm_dbg_core(dev,
+ "invalid ioctl: comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+ current->comm, task_pid_nr(current),
+ (long)old_encode_dev(file_priv->minor->kdev->devt),
+ file_priv->authenticated, cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
if (retcode)
- DRM_DEBUG("comm=\"%s\", pid=%d, ret=%d\n", current->comm,
- task_pid_nr(current), retcode);
+ drm_dbg_core(dev, "comm=\"%s\", pid=%d, ret=%d\n",
+ current->comm, task_pid_nr(current), retcode);
return retcode;
}
EXPORT_SYMBOL(drm_ioctl);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index d72c2fac0ff1..150fe1555068 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -6,7 +6,7 @@
#include <linux/uaccess.h>
#include <drm/drm_auth.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_lease.h>
@@ -213,11 +213,11 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
int id;
void *entry;
- DRM_DEBUG_LEASE("lessor %d\n", lessor->lessee_id);
+ drm_dbg_lease(dev, "lessor %d\n", lessor->lessee_id);
lessee = drm_master_create(lessor->dev);
if (!lessee) {
- DRM_DEBUG_LEASE("drm_master_create failed\n");
+ drm_dbg_lease(dev, "drm_master_create failed\n");
return ERR_PTR(-ENOMEM);
}
@@ -231,7 +231,7 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
error = -EBUSY;
if (error != 0) {
- DRM_DEBUG_LEASE("object %d failed %d\n", object, error);
+ drm_dbg_lease(dev, "object %d failed %d\n", object, error);
goto out_lessee;
}
}
@@ -249,7 +249,8 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
/* Move the leases over */
lessee->leases = *leases;
- DRM_DEBUG_LEASE("new lessee %d %p, lessor %d %p\n", lessee->lessee_id, lessee, lessor->lessee_id, lessor);
+ drm_dbg_lease(dev, "new lessee %d %p, lessor %d %p\n",
+ lessee->lessee_id, lessee, lessor->lessee_id, lessor);
mutex_unlock(&dev->mode_config.idr_mutex);
return lessee;
@@ -268,7 +269,7 @@ void drm_lease_destroy(struct drm_master *master)
mutex_lock(&dev->mode_config.idr_mutex);
- DRM_DEBUG_LEASE("drm_lease_destroy %d\n", master->lessee_id);
+ drm_dbg_lease(dev, "drm_lease_destroy %d\n", master->lessee_id);
/* This master is referenced by all lessees, hence it cannot be destroyed
* until all of them have been
@@ -277,7 +278,8 @@ void drm_lease_destroy(struct drm_master *master)
/* Remove this master from the lessee idr in the owner */
if (master->lessee_id != 0) {
- DRM_DEBUG_LEASE("remove master %d from device list of lessees\n", master->lessee_id);
+ drm_dbg_lease(dev, "remove master %d from device list of lessees\n",
+ master->lessee_id);
idr_remove(&(drm_lease_owner(master)->lessee_idr), master->lessee_id);
}
@@ -292,7 +294,7 @@ void drm_lease_destroy(struct drm_master *master)
drm_master_put(&master->lessor);
}
- DRM_DEBUG_LEASE("drm_lease_destroy done %d\n", master->lessee_id);
+ drm_dbg_lease(dev, "drm_lease_destroy done %d\n", master->lessee_id);
}
static void _drm_lease_revoke(struct drm_master *top)
@@ -308,7 +310,8 @@ static void _drm_lease_revoke(struct drm_master *top)
* the tree is fully connected, we can do this without recursing
*/
for (;;) {
- DRM_DEBUG_LEASE("revoke leases for %p %d\n", master, master->lessee_id);
+ drm_dbg_lease(master->dev, "revoke leases for %p %d\n",
+ master, master->lessee_id);
/* Evacuate the lease */
idr_for_each_entry(&master->leases, entry, object)
@@ -408,7 +411,7 @@ static int fill_object_idr(struct drm_device *dev,
ret = validate_lease(dev, object_count, objects, universal_planes);
if (ret) {
- DRM_DEBUG_LEASE("lease validation failed\n");
+ drm_dbg_lease(dev, "lease validation failed\n");
goto out_free_objects;
}
@@ -418,7 +421,7 @@ static int fill_object_idr(struct drm_device *dev,
struct drm_mode_object *obj = objects[o];
u32 object_id = objects[o]->id;
- DRM_DEBUG_LEASE("Adding object %d to lease\n", object_id);
+ drm_dbg_lease(dev, "Adding object %d to lease\n", object_id);
/*
* We're using an IDR to hold the set of leased
@@ -430,8 +433,8 @@ static int fill_object_idr(struct drm_device *dev,
*/
ret = idr_alloc(leases, &drm_lease_idr_object , object_id, object_id + 1, GFP_KERNEL);
if (ret < 0) {
- DRM_DEBUG_LEASE("Object %d cannot be inserted into leases (%d)\n",
- object_id, ret);
+ drm_dbg_lease(dev, "Object %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
goto out_free_objects;
}
if (obj->type == DRM_MODE_OBJECT_CRTC && !universal_planes) {
@@ -439,15 +442,15 @@ static int fill_object_idr(struct drm_device *dev,
ret = idr_alloc(leases, &drm_lease_idr_object, crtc->primary->base.id, crtc->primary->base.id + 1, GFP_KERNEL);
if (ret < 0) {
- DRM_DEBUG_LEASE("Object primary plane %d cannot be inserted into leases (%d)\n",
- object_id, ret);
+ drm_dbg_lease(dev, "Object primary plane %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
goto out_free_objects;
}
if (crtc->cursor) {
ret = idr_alloc(leases, &drm_lease_idr_object, crtc->cursor->base.id, crtc->cursor->base.id + 1, GFP_KERNEL);
if (ret < 0) {
- DRM_DEBUG_LEASE("Object cursor plane %d cannot be inserted into leases (%d)\n",
- object_id, ret);
+ drm_dbg_lease(dev, "Object cursor plane %d cannot be inserted into leases (%d)\n",
+ object_id, ret);
goto out_free_objects;
}
}
@@ -490,14 +493,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) {
- DRM_DEBUG_LEASE("invalid flags\n");
+ drm_dbg_lease(dev, "invalid flags\n");
return -EINVAL;
}
lessor = drm_file_get_master(lessor_priv);
/* Do not allow sub-leases */
if (lessor->lessor) {
- DRM_DEBUG_LEASE("recursive leasing not allowed\n");
+ drm_dbg_lease(dev, "recursive leasing not allowed\n");
ret = -EINVAL;
goto out_lessor;
}
@@ -520,7 +523,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
object_count, object_ids);
kfree(object_ids);
if (ret) {
- DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
+ drm_dbg_lease(dev, "lease object lookup failed: %i\n", ret);
idr_destroy(&leases);
goto out_lessor;
}
@@ -534,7 +537,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
goto out_lessor;
}
- DRM_DEBUG_LEASE("Creating lease\n");
+ drm_dbg_lease(dev, "Creating lease\n");
/* lessee will take the ownership of leases */
lessee = drm_lease_create(lessor, &leases);
@@ -545,7 +548,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
}
/* Clone the lessor file to create a new file for us */
- DRM_DEBUG_LEASE("Allocating lease file\n");
+ drm_dbg_lease(dev, "Allocating lease file\n");
lessee_file = file_clone_open(lessor_file);
if (IS_ERR(lessee_file)) {
ret = PTR_ERR(lessee_file);
@@ -560,7 +563,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
lessee_priv->authenticated = 1;
/* Pass fd back to userspace */
- DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
+ drm_dbg_lease(dev, "Returning fd %d id %d\n", fd, lessee->lessee_id);
cl->fd = fd;
cl->lessee_id = lessee->lessee_id;
@@ -568,7 +571,7 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
fd_install(fd, lessee_file);
drm_master_put(&lessor);
- DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
+ drm_dbg_lease(dev, "drm_mode_create_lease_ioctl succeeded\n");
return 0;
out_lessee:
@@ -579,7 +582,7 @@ out_leases:
out_lessor:
drm_master_put(&lessor);
- DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
+ drm_dbg_lease(dev, "drm_mode_create_lease_ioctl failed: %d\n", ret);
return ret;
}
@@ -601,7 +604,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
lessor = drm_file_get_master(lessor_priv);
- DRM_DEBUG_LEASE("List lessees for %d\n", lessor->lessee_id);
+ drm_dbg_lease(dev, "List lessees for %d\n", lessor->lessee_id);
mutex_lock(&dev->mode_config.idr_mutex);
@@ -610,7 +613,8 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
/* Only list un-revoked leases */
if (!idr_is_empty(&lessee->leases)) {
if (count_lessees > count) {
- DRM_DEBUG_LEASE("Add lessee %d\n", lessee->lessee_id);
+ drm_dbg_lease(dev, "Add lessee %d\n",
+ lessee->lessee_id);
ret = put_user(lessee->lessee_id, lessee_ids + count);
if (ret)
break;
@@ -619,7 +623,7 @@ int drm_mode_list_lessees_ioctl(struct drm_device *dev,
}
}
- DRM_DEBUG_LEASE("Lessor leases to %d\n", count);
+ drm_dbg_lease(dev, "Lessor leases to %d\n", count);
if (ret == 0)
arg->count_lessees = count;
@@ -651,7 +655,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
return -EOPNOTSUPP;
lessee = drm_file_get_master(lessee_priv);
- DRM_DEBUG_LEASE("get lease for %d\n", lessee->lessee_id);
+ drm_dbg_lease(dev, "get lease for %d\n", lessee->lessee_id);
mutex_lock(&dev->mode_config.idr_mutex);
@@ -665,7 +669,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
count = 0;
idr_for_each_entry(object_idr, entry, object) {
if (count_objects > count) {
- DRM_DEBUG_LEASE("adding object %d\n", object);
+ drm_dbg_lease(dev, "adding object %d\n", object);
ret = put_user(object, object_ids + count);
if (ret)
break;
@@ -696,7 +700,7 @@ int drm_mode_revoke_lease_ioctl(struct drm_device *dev,
struct drm_master *lessee;
int ret = 0;
- DRM_DEBUG_LEASE("revoke lease for %d\n", arg->lessee_id);
+ drm_dbg_lease(dev, "revoke lease for %d\n", arg->lessee_id);
/* Can't lease without MODESET */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index a6ac56580876..c871d9f096b8 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -21,6 +21,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modes.h>
@@ -192,6 +193,7 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
/**
* mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
* @dst: The destination buffer
+ * @src: The source buffer
* @fb: The source framebuffer
* @clip: Clipping rectangle of the area to be copied
* @swap: When true, swap MSB/LSB of 16-bit values
@@ -199,12 +201,10 @@ EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
+int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *clip, bool swap)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
- struct iosys_map map[DRM_FORMAT_MAX_PLANES];
- struct iosys_map data[DRM_FORMAT_MAX_PLANES];
struct iosys_map dst_map = IOSYS_MAP_INIT_VADDR(dst);
int ret;
@@ -212,19 +212,15 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
if (ret)
return ret;
- ret = drm_gem_fb_vmap(fb, map, data);
- if (ret)
- goto out_drm_gem_fb_end_cpu_access;
-
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab(&dst_map, NULL, data, fb, clip, !gem->import_attach);
+ drm_fb_swab(&dst_map, NULL, src, fb, clip, !gem->import_attach);
else
- drm_fb_memcpy(&dst_map, NULL, data, fb, clip);
+ drm_fb_memcpy(&dst_map, NULL, src, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
- drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, data, fb, clip, swap);
+ drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, swap);
break;
default:
drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
@@ -232,8 +228,6 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
ret = -EINVAL;
}
- drm_gem_fb_vunmap(fb, map);
-out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return ret;
@@ -257,29 +251,18 @@ static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
ys & 0xff, (ye >> 8) & 0xff, ye & 0xff);
}
-static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
+static void mipi_dbi_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
+ struct drm_rect *rect)
{
- struct iosys_map map[DRM_FORMAT_MAX_PLANES];
- struct iosys_map data[DRM_FORMAT_MAX_PLANES];
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
struct mipi_dbi *dbi = &dbidev->dbi;
bool swap = dbi->swap_bytes;
- int idx, ret = 0;
+ int ret = 0;
bool full;
void *tr;
- if (WARN_ON(!fb))
- return;
-
- if (!drm_dev_enter(fb->dev, &idx))
- return;
-
- ret = drm_gem_fb_vmap(fb, map, data);
- if (ret)
- goto err_drm_dev_exit;
-
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -287,11 +270,11 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
tr = dbidev->tx_buf;
- ret = mipi_dbi_buf_copy(dbidev->tx_buf, fb, rect, swap);
+ ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap);
if (ret)
goto err_msg;
} else {
- tr = data[0].vaddr; /* TODO: Use mapping abstraction properly */
+ tr = src->vaddr; /* TODO: Use mapping abstraction properly */
}
mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
@@ -302,11 +285,6 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
drm_err_once(fb->dev, "Failed to update display %d\n", ret);
-
- drm_gem_fb_vunmap(fb, map);
-
-err_drm_dev_exit:
- drm_dev_exit(idx);
}
/**
@@ -339,13 +317,24 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
+ int idx;
if (!pipe->crtc.state->active)
return;
+ if (WARN_ON(!fb))
+ return;
+
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- mipi_dbi_fb_dirty(state->fb, &rect);
+ mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+
+ drm_dev_exit(idx);
}
EXPORT_SYMBOL(mipi_dbi_pipe_update);
@@ -366,6 +355,7 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_rect rect = {
.x1 = 0,
@@ -378,7 +368,7 @@ void mipi_dbi_enable_flush(struct mipi_dbi_dev *dbidev,
if (!drm_dev_enter(&dbidev->drm, &idx))
return;
- mipi_dbi_fb_dirty(fb, &rect);
+ mipi_dbi_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
backlight_enable(dbidev->backlight);
drm_dev_exit(idx);
@@ -427,9 +417,95 @@ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
if (dbidev->regulator)
regulator_disable(dbidev->regulator);
+ if (dbidev->io_regulator)
+ regulator_disable(dbidev->io_regulator);
}
EXPORT_SYMBOL(mipi_dbi_pipe_disable);
+/**
+ * mipi_dbi_pipe_begin_fb_access - MIPI DBI pipe begin-access helper
+ * @pipe: Display pipe
+ * @plane_state: Plane state
+ *
+ * This function implements struct &drm_simple_display_funcs.begin_fb_access.
+ *
+ * See drm_gem_begin_shadow_fb_access() for details and mipi_dbi_pipe_cleanup_fb()
+ * for cleanup.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int mipi_dbi_pipe_begin_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_begin_fb_access);
+
+/**
+ * mipi_dbi_pipe_end_fb_access - MIPI DBI pipe end-access helper
+ * @pipe: Display pipe
+ * @plane_state: Plane state
+ *
+ * This function implements struct &drm_simple_display_funcs.end_fb_access.
+ *
+ * See mipi_dbi_pipe_begin_fb_access().
+ */
+void mipi_dbi_pipe_end_fb_access(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ drm_gem_end_shadow_fb_access(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_end_fb_access);
+
+/**
+ * mipi_dbi_pipe_reset_plane - MIPI DBI plane-reset helper
+ * @pipe: Display pipe
+ *
+ * This function implements struct &drm_simple_display_funcs.reset_plane
+ * for MIPI DBI planes.
+ */
+void mipi_dbi_pipe_reset_plane(struct drm_simple_display_pipe *pipe)
+{
+ drm_gem_reset_shadow_plane(&pipe->plane);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_reset_plane);
+
+/**
+ * mipi_dbi_pipe_duplicate_plane_state - duplicates MIPI DBI plane state
+ * @pipe: Display pipe
+ *
+ * This function implements struct &drm_simple_display_funcs.duplicate_plane_state
+ * for MIPI DBI planes.
+ *
+ * See drm_gem_duplicate_shadow_plane_state() for additional details.
+ *
+ * Returns:
+ * A pointer to a new plane state on success, or NULL otherwise.
+ */
+struct drm_plane_state *mipi_dbi_pipe_duplicate_plane_state(struct drm_simple_display_pipe *pipe)
+{
+ return drm_gem_duplicate_shadow_plane_state(&pipe->plane);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_duplicate_plane_state);
+
+/**
+ * mipi_dbi_pipe_destroy_plane_state - cleans up MIPI DBI plane state
+ * @pipe: Display pipe
+ * @plane_state: Plane state
+ *
+ * This function implements struct drm_simple_display_funcs.destroy_plane_state
+ * for MIPI DBI planes.
+ *
+ * See drm_gem_destroy_shadow_plane_state() for additional details.
+ */
+void mipi_dbi_pipe_destroy_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(mipi_dbi_pipe_destroy_plane_state);
+
static int mipi_dbi_connector_get_modes(struct drm_connector *connector)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(connector->dev);
@@ -652,6 +728,16 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi_dev *dbidev, bool
}
}
+ if (dbidev->io_regulator) {
+ ret = regulator_enable(dbidev->io_regulator);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to enable I/O regulator (%d)\n", ret);
+ if (dbidev->regulator)
+ regulator_disable(dbidev->regulator);
+ return ret;
+ }
+ }
+
if (cond && mipi_dbi_display_is_on(dbi))
return 1;
@@ -661,6 +747,8 @@ static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi_dev *dbidev, bool
DRM_DEV_ERROR(dev, "Failed to send reset command (%d)\n", ret);
if (dbidev->regulator)
regulator_disable(dbidev->regulator);
+ if (dbidev->io_regulator)
+ regulator_disable(dbidev->io_regulator);
return ret;
}
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 497ef4b6a90a..b41aaf2bb9f1 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -62,9 +62,9 @@ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int mipi_dsi_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ const struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
int err;
err = of_device_uevent_modalias(dev, env);
@@ -1224,6 +1224,58 @@ int mipi_dsi_dcs_get_display_brightness(struct mipi_dsi_device *dsi,
}
EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness);
+/**
+ * mipi_dsi_dcs_set_display_brightness_large() - sets the 16-bit brightness value
+ * of the display
+ * @dsi: DSI peripheral device
+ * @brightness: brightness value
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_set_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 brightness)
+{
+ u8 payload[2] = { brightness >> 8, brightness & 0xff };
+ ssize_t err;
+
+ err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ payload, sizeof(payload));
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_set_display_brightness_large);
+
+/**
+ * mipi_dsi_dcs_get_display_brightness_large() - gets the current 16-bit
+ * brightness value of the display
+ * @dsi: DSI peripheral device
+ * @brightness: brightness value
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int mipi_dsi_dcs_get_display_brightness_large(struct mipi_dsi_device *dsi,
+ u16 *brightness)
+{
+ u8 brightness_be[2];
+ ssize_t err;
+
+ err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
+ brightness_be, sizeof(brightness_be));
+ if (err <= 0) {
+ if (err == 0)
+ err = -ENODATA;
+
+ return err;
+ }
+
+ *brightness = (brightness_be[0] << 8) | brightness_be[1];
+
+ return 0;
+}
+EXPORT_SYMBOL(mipi_dsi_dcs_get_display_brightness_large);
+
static int mipi_dsi_drv_probe(struct device *dev)
{
struct mipi_dsi_driver *drv = to_mipi_dsi_driver(dev->driver);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 688c8afe0bf1..87eb591fe9b5 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -54,6 +54,8 @@ int drm_modeset_register_all(struct drm_device *dev)
if (ret)
goto err_connector;
+ drm_debugfs_late_register(dev);
+
return 0;
err_connector:
@@ -399,6 +401,8 @@ static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
*/
int drmm_mode_config_init(struct drm_device *dev)
{
+ int ret;
+
mutex_init(&dev->mode_config.mutex);
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
mutex_init(&dev->mode_config.idr_mutex);
@@ -420,7 +424,11 @@ int drmm_mode_config_init(struct drm_device *dev)
init_llist_head(&dev->mode_config.connector_free_list);
INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
- drm_mode_create_standard_properties(dev);
+ ret = drm_mode_create_standard_properties(dev);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
+ }
/* Just to be sure */
dev->mode_config.num_fb = 0;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 3c8034a8c27b..40d482a01178 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -31,10 +31,11 @@
*/
#include <linux/ctype.h>
+#include <linux/export.h>
+#include <linux/fb.h> /* for KHZ2PICOS() */
#include <linux/list.h>
#include <linux/list_sort.h>
-#include <linux/export.h>
-#include <linux/fb.h>
+#include <linux/of.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
@@ -116,6 +117,482 @@ void drm_mode_probed_add(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_probed_add);
+enum drm_mode_analog {
+ DRM_MODE_ANALOG_NTSC, /* 525 lines, 60Hz */
+ DRM_MODE_ANALOG_PAL, /* 625 lines, 50Hz */
+};
+
+/*
+ * The timings come from:
+ * - https://web.archive.org/web/20220406232708/http://www.kolumbus.fi/pami1/video/pal_ntsc.html
+ * - https://web.archive.org/web/20220406124914/http://martin.hinner.info/vga/pal.html
+ * - https://web.archive.org/web/20220609202433/http://www.batsocks.co.uk/readme/video_timing.htm
+ */
+#define NTSC_LINE_DURATION_NS 63556U
+#define NTSC_LINES_NUMBER 525
+
+#define NTSC_HBLK_DURATION_TYP_NS 10900U
+#define NTSC_HBLK_DURATION_MIN_NS (NTSC_HBLK_DURATION_TYP_NS - 200)
+#define NTSC_HBLK_DURATION_MAX_NS (NTSC_HBLK_DURATION_TYP_NS + 200)
+
+#define NTSC_HACT_DURATION_TYP_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_TYP_NS)
+#define NTSC_HACT_DURATION_MIN_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_MAX_NS)
+#define NTSC_HACT_DURATION_MAX_NS (NTSC_LINE_DURATION_NS - NTSC_HBLK_DURATION_MIN_NS)
+
+#define NTSC_HFP_DURATION_TYP_NS 1500
+#define NTSC_HFP_DURATION_MIN_NS 1270
+#define NTSC_HFP_DURATION_MAX_NS 2220
+
+#define NTSC_HSLEN_DURATION_TYP_NS 4700
+#define NTSC_HSLEN_DURATION_MIN_NS (NTSC_HSLEN_DURATION_TYP_NS - 100)
+#define NTSC_HSLEN_DURATION_MAX_NS (NTSC_HSLEN_DURATION_TYP_NS + 100)
+
+#define NTSC_HBP_DURATION_TYP_NS 4700
+
+/*
+ * I couldn't find the actual tolerance for the back porch, so let's
+ * just reuse the sync length ones.
+ */
+#define NTSC_HBP_DURATION_MIN_NS (NTSC_HBP_DURATION_TYP_NS - 100)
+#define NTSC_HBP_DURATION_MAX_NS (NTSC_HBP_DURATION_TYP_NS + 100)
+
+#define PAL_LINE_DURATION_NS 64000U
+#define PAL_LINES_NUMBER 625
+
+#define PAL_HACT_DURATION_TYP_NS 51950U
+#define PAL_HACT_DURATION_MIN_NS (PAL_HACT_DURATION_TYP_NS - 100)
+#define PAL_HACT_DURATION_MAX_NS (PAL_HACT_DURATION_TYP_NS + 400)
+
+#define PAL_HBLK_DURATION_TYP_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_TYP_NS)
+#define PAL_HBLK_DURATION_MIN_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_MAX_NS)
+#define PAL_HBLK_DURATION_MAX_NS (PAL_LINE_DURATION_NS - PAL_HACT_DURATION_MIN_NS)
+
+#define PAL_HFP_DURATION_TYP_NS 1650
+#define PAL_HFP_DURATION_MIN_NS (PAL_HFP_DURATION_TYP_NS - 100)
+#define PAL_HFP_DURATION_MAX_NS (PAL_HFP_DURATION_TYP_NS + 400)
+
+#define PAL_HSLEN_DURATION_TYP_NS 4700
+#define PAL_HSLEN_DURATION_MIN_NS (PAL_HSLEN_DURATION_TYP_NS - 200)
+#define PAL_HSLEN_DURATION_MAX_NS (PAL_HSLEN_DURATION_TYP_NS + 200)
+
+#define PAL_HBP_DURATION_TYP_NS 5700
+#define PAL_HBP_DURATION_MIN_NS (PAL_HBP_DURATION_TYP_NS - 200)
+#define PAL_HBP_DURATION_MAX_NS (PAL_HBP_DURATION_TYP_NS + 200)
+
+struct analog_param_field {
+ unsigned int even, odd;
+};
+
+#define PARAM_FIELD(_odd, _even) \
+ { .even = _even, .odd = _odd }
+
+struct analog_param_range {
+ unsigned int min, typ, max;
+};
+
+#define PARAM_RANGE(_min, _typ, _max) \
+ { .min = _min, .typ = _typ, .max = _max }
+
+struct analog_parameters {
+ unsigned int num_lines;
+ unsigned int line_duration_ns;
+
+ struct analog_param_range hact_ns;
+ struct analog_param_range hfp_ns;
+ struct analog_param_range hslen_ns;
+ struct analog_param_range hbp_ns;
+ struct analog_param_range hblk_ns;
+
+ unsigned int bt601_hfp;
+
+ struct analog_param_field vfp_lines;
+ struct analog_param_field vslen_lines;
+ struct analog_param_field vbp_lines;
+};
+
+#define TV_MODE_PARAMETER(_mode, _lines, _line_dur, _hact, _hfp, \
+ _hslen, _hbp, _hblk, _bt601_hfp, _vfp, \
+ _vslen, _vbp) \
+ [_mode] = { \
+ .num_lines = _lines, \
+ .line_duration_ns = _line_dur, \
+ .hact_ns = _hact, \
+ .hfp_ns = _hfp, \
+ .hslen_ns = _hslen, \
+ .hbp_ns = _hbp, \
+ .hblk_ns = _hblk, \
+ .bt601_hfp = _bt601_hfp, \
+ .vfp_lines = _vfp, \
+ .vslen_lines = _vslen, \
+ .vbp_lines = _vbp, \
+ }
+
+static const struct analog_parameters tv_modes_parameters[] = {
+ TV_MODE_PARAMETER(DRM_MODE_ANALOG_NTSC,
+ NTSC_LINES_NUMBER,
+ NTSC_LINE_DURATION_NS,
+ PARAM_RANGE(NTSC_HACT_DURATION_MIN_NS,
+ NTSC_HACT_DURATION_TYP_NS,
+ NTSC_HACT_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HFP_DURATION_MIN_NS,
+ NTSC_HFP_DURATION_TYP_NS,
+ NTSC_HFP_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HSLEN_DURATION_MIN_NS,
+ NTSC_HSLEN_DURATION_TYP_NS,
+ NTSC_HSLEN_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HBP_DURATION_MIN_NS,
+ NTSC_HBP_DURATION_TYP_NS,
+ NTSC_HBP_DURATION_MAX_NS),
+ PARAM_RANGE(NTSC_HBLK_DURATION_MIN_NS,
+ NTSC_HBLK_DURATION_TYP_NS,
+ NTSC_HBLK_DURATION_MAX_NS),
+ 16,
+ PARAM_FIELD(3, 3),
+ PARAM_FIELD(3, 3),
+ PARAM_FIELD(16, 17)),
+ TV_MODE_PARAMETER(DRM_MODE_ANALOG_PAL,
+ PAL_LINES_NUMBER,
+ PAL_LINE_DURATION_NS,
+ PARAM_RANGE(PAL_HACT_DURATION_MIN_NS,
+ PAL_HACT_DURATION_TYP_NS,
+ PAL_HACT_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HFP_DURATION_MIN_NS,
+ PAL_HFP_DURATION_TYP_NS,
+ PAL_HFP_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HSLEN_DURATION_MIN_NS,
+ PAL_HSLEN_DURATION_TYP_NS,
+ PAL_HSLEN_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HBP_DURATION_MIN_NS,
+ PAL_HBP_DURATION_TYP_NS,
+ PAL_HBP_DURATION_MAX_NS),
+ PARAM_RANGE(PAL_HBLK_DURATION_MIN_NS,
+ PAL_HBLK_DURATION_TYP_NS,
+ PAL_HBLK_DURATION_MAX_NS),
+ 12,
+
+ /*
+ * The front porch is actually 6 short sync
+ * pulses for the even field, and 5 for the
+ * odd field. Each sync takes half a life so
+ * the odd field front porch is shorter by
+ * half a line.
+ *
+ * In progressive, we're supposed to use 6
+ * pulses, so we're fine there
+ */
+ PARAM_FIELD(3, 2),
+
+ /*
+ * The vsync length is 5 long sync pulses,
+ * each field taking half a line. We're
+ * shorter for both fields by half a line.
+ *
+ * In progressive, we're supposed to use 5
+ * pulses, so we're off by half
+ * a line.
+ *
+ * In interlace, we're now off by half a line
+ * for the even field and one line for the odd
+ * field.
+ */
+ PARAM_FIELD(3, 3),
+
+ /*
+ * The back porch starts with post-equalizing
+ * pulses, consisting in 5 short sync pulses
+ * for the even field, 4 for the odd field. In
+ * progressive, it's 5 short syncs.
+ *
+ * In progressive, we thus have 2.5 lines,
+ * plus the 0.5 line we were missing
+ * previously, so we should use 3 lines.
+ *
+ * In interlace, the even field is in the
+ * exact same case than progressive. For the
+ * odd field, we should be using 2 lines but
+ * we're one line short, so we'll make up for
+ * it here by using 3.
+ *
+ * The entire blanking area is supposed to
+ * take 25 lines, so we also need to account
+ * for the rest of the blanking area that
+ * can't be in either the front porch or sync
+ * period.
+ */
+ PARAM_FIELD(19, 20)),
+};
+
+static int fill_analog_mode(struct drm_device *dev,
+ struct drm_display_mode *mode,
+ const struct analog_parameters *params,
+ unsigned long pixel_clock_hz,
+ unsigned int hactive,
+ unsigned int vactive,
+ bool interlace)
+{
+ unsigned long pixel_duration_ns = NSEC_PER_SEC / pixel_clock_hz;
+ unsigned int htotal, vtotal;
+ unsigned int max_hact, hact_duration_ns;
+ unsigned int hblk, hblk_duration_ns;
+ unsigned int hfp, hfp_duration_ns;
+ unsigned int hslen, hslen_duration_ns;
+ unsigned int hbp, hbp_duration_ns;
+ unsigned int porches, porches_duration_ns;
+ unsigned int vfp, vfp_min;
+ unsigned int vbp, vbp_min;
+ unsigned int vslen;
+ bool bt601 = false;
+ int porches_rem;
+ u64 result;
+
+ drm_dbg_kms(dev,
+ "Generating a %ux%u%c, %u-line mode with a %lu kHz clock\n",
+ hactive, vactive,
+ interlace ? 'i' : 'p',
+ params->num_lines,
+ pixel_clock_hz / 1000);
+
+ max_hact = params->hact_ns.max / pixel_duration_ns;
+ if (pixel_clock_hz == 13500000 && hactive > max_hact && hactive <= 720) {
+ drm_dbg_kms(dev, "Trying to generate a BT.601 mode. Disabling checks.\n");
+ bt601 = true;
+ }
+
+ /*
+ * Our pixel duration is going to be round down by the division,
+ * so rounding up is probably going to introduce even more
+ * deviation.
+ */
+ result = (u64)params->line_duration_ns * pixel_clock_hz;
+ do_div(result, NSEC_PER_SEC);
+ htotal = result;
+
+ drm_dbg_kms(dev, "Total Horizontal Number of Pixels: %u\n", htotal);
+
+ hact_duration_ns = hactive * pixel_duration_ns;
+ if (!bt601 &&
+ (hact_duration_ns < params->hact_ns.min ||
+ hact_duration_ns > params->hact_ns.max)) {
+ DRM_ERROR("Invalid horizontal active area duration: %uns (min: %u, max %u)\n",
+ hact_duration_ns, params->hact_ns.min, params->hact_ns.max);
+ return -EINVAL;
+ }
+
+ hblk = htotal - hactive;
+ drm_dbg_kms(dev, "Horizontal Blanking Period: %u\n", hblk);
+
+ hblk_duration_ns = hblk * pixel_duration_ns;
+ if (!bt601 &&
+ (hblk_duration_ns < params->hblk_ns.min ||
+ hblk_duration_ns > params->hblk_ns.max)) {
+ DRM_ERROR("Invalid horizontal blanking duration: %uns (min: %u, max %u)\n",
+ hblk_duration_ns, params->hblk_ns.min, params->hblk_ns.max);
+ return -EINVAL;
+ }
+
+ hslen = DIV_ROUND_UP(params->hslen_ns.typ, pixel_duration_ns);
+ drm_dbg_kms(dev, "Horizontal Sync Period: %u\n", hslen);
+
+ hslen_duration_ns = hslen * pixel_duration_ns;
+ if (!bt601 &&
+ (hslen_duration_ns < params->hslen_ns.min ||
+ hslen_duration_ns > params->hslen_ns.max)) {
+ DRM_ERROR("Invalid horizontal sync duration: %uns (min: %u, max %u)\n",
+ hslen_duration_ns, params->hslen_ns.min, params->hslen_ns.max);
+ return -EINVAL;
+ }
+
+ porches = hblk - hslen;
+ drm_dbg_kms(dev, "Remaining horizontal pixels for both porches: %u\n", porches);
+
+ porches_duration_ns = porches * pixel_duration_ns;
+ if (!bt601 &&
+ (porches_duration_ns > (params->hfp_ns.max + params->hbp_ns.max) ||
+ porches_duration_ns < (params->hfp_ns.min + params->hbp_ns.min))) {
+ DRM_ERROR("Invalid horizontal porches duration: %uns\n", porches_duration_ns);
+ return -EINVAL;
+ }
+
+ if (bt601) {
+ hfp = params->bt601_hfp;
+ } else {
+ unsigned int hfp_min = DIV_ROUND_UP(params->hfp_ns.min,
+ pixel_duration_ns);
+ unsigned int hbp_min = DIV_ROUND_UP(params->hbp_ns.min,
+ pixel_duration_ns);
+ int porches_rem = porches - hfp_min - hbp_min;
+
+ hfp = hfp_min + DIV_ROUND_UP(porches_rem, 2);
+ }
+
+ drm_dbg_kms(dev, "Horizontal Front Porch: %u\n", hfp);
+
+ hfp_duration_ns = hfp * pixel_duration_ns;
+ if (!bt601 &&
+ (hfp_duration_ns < params->hfp_ns.min ||
+ hfp_duration_ns > params->hfp_ns.max)) {
+ DRM_ERROR("Invalid horizontal front porch duration: %uns (min: %u, max %u)\n",
+ hfp_duration_ns, params->hfp_ns.min, params->hfp_ns.max);
+ return -EINVAL;
+ }
+
+ hbp = porches - hfp;
+ drm_dbg_kms(dev, "Horizontal Back Porch: %u\n", hbp);
+
+ hbp_duration_ns = hbp * pixel_duration_ns;
+ if (!bt601 &&
+ (hbp_duration_ns < params->hbp_ns.min ||
+ hbp_duration_ns > params->hbp_ns.max)) {
+ DRM_ERROR("Invalid horizontal back porch duration: %uns (min: %u, max %u)\n",
+ hbp_duration_ns, params->hbp_ns.min, params->hbp_ns.max);
+ return -EINVAL;
+ }
+
+ if (htotal != (hactive + hfp + hslen + hbp))
+ return -EINVAL;
+
+ mode->clock = pixel_clock_hz / 1000;
+ mode->hdisplay = hactive;
+ mode->hsync_start = mode->hdisplay + hfp;
+ mode->hsync_end = mode->hsync_start + hslen;
+ mode->htotal = mode->hsync_end + hbp;
+
+ if (interlace) {
+ vfp_min = params->vfp_lines.even + params->vfp_lines.odd;
+ vbp_min = params->vbp_lines.even + params->vbp_lines.odd;
+ vslen = params->vslen_lines.even + params->vslen_lines.odd;
+ } else {
+ /*
+ * By convention, NTSC (aka 525/60) systems start with
+ * the even field, but PAL (aka 625/50) systems start
+ * with the odd one.
+ *
+ * PAL systems also have asymmetric timings between the
+ * even and odd field, while NTSC is symmetric.
+ *
+ * Moreover, if we want to create a progressive mode for
+ * PAL, we need to use the odd field timings.
+ *
+ * Since odd == even for NTSC, we can just use the odd
+ * one all the time to simplify the code a bit.
+ */
+ vfp_min = params->vfp_lines.odd;
+ vbp_min = params->vbp_lines.odd;
+ vslen = params->vslen_lines.odd;
+ }
+
+ drm_dbg_kms(dev, "Vertical Sync Period: %u\n", vslen);
+
+ porches = params->num_lines - vactive - vslen;
+ drm_dbg_kms(dev, "Remaining vertical pixels for both porches: %u\n", porches);
+
+ porches_rem = porches - vfp_min - vbp_min;
+ vfp = vfp_min + (porches_rem / 2);
+ drm_dbg_kms(dev, "Vertical Front Porch: %u\n", vfp);
+
+ vbp = porches - vfp;
+ drm_dbg_kms(dev, "Vertical Back Porch: %u\n", vbp);
+
+ vtotal = vactive + vfp + vslen + vbp;
+ if (params->num_lines != vtotal) {
+ DRM_ERROR("Invalid vertical total: %upx (expected %upx)\n",
+ vtotal, params->num_lines);
+ return -EINVAL;
+ }
+
+ mode->vdisplay = vactive;
+ mode->vsync_start = mode->vdisplay + vfp;
+ mode->vsync_end = mode->vsync_start + vslen;
+ mode->vtotal = mode->vsync_end + vbp;
+
+ if (mode->vtotal != params->num_lines)
+ return -EINVAL;
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ mode->flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC;
+ if (interlace)
+ mode->flags |= DRM_MODE_FLAG_INTERLACE;
+
+ drm_mode_set_name(mode);
+
+ drm_dbg_kms(dev, "Generated mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+
+ return 0;
+}
+
+/**
+ * drm_analog_tv_mode - create a display mode for an analog TV
+ * @dev: drm device
+ * @tv_mode: TV Mode standard to create a mode for. See DRM_MODE_TV_MODE_*.
+ * @pixel_clock_hz: Pixel Clock Frequency, in Hertz
+ * @hdisplay: hdisplay size
+ * @vdisplay: vdisplay size
+ * @interlace: whether to compute an interlaced mode
+ *
+ * This function creates a struct drm_display_mode instance suited for
+ * an analog TV output, for one of the usual analog TV mode.
+ *
+ * Note that @hdisplay is larger than the usual constraints for the PAL
+ * and NTSC timings, and we'll choose to ignore most timings constraints
+ * to reach those resolutions.
+ *
+ * Returns:
+ *
+ * A pointer to the mode, allocated with drm_mode_create(). Returns NULL
+ * on error.
+ */
+struct drm_display_mode *drm_analog_tv_mode(struct drm_device *dev,
+ enum drm_connector_tv_mode tv_mode,
+ unsigned long pixel_clock_hz,
+ unsigned int hdisplay,
+ unsigned int vdisplay,
+ bool interlace)
+{
+ struct drm_display_mode *mode;
+ enum drm_mode_analog analog;
+ int ret;
+
+ switch (tv_mode) {
+ case DRM_MODE_TV_MODE_NTSC:
+ fallthrough;
+ case DRM_MODE_TV_MODE_NTSC_443:
+ fallthrough;
+ case DRM_MODE_TV_MODE_NTSC_J:
+ fallthrough;
+ case DRM_MODE_TV_MODE_PAL_M:
+ analog = DRM_MODE_ANALOG_NTSC;
+ break;
+
+ case DRM_MODE_TV_MODE_PAL:
+ fallthrough;
+ case DRM_MODE_TV_MODE_PAL_N:
+ fallthrough;
+ case DRM_MODE_TV_MODE_SECAM:
+ analog = DRM_MODE_ANALOG_PAL;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ mode = drm_mode_create(dev);
+ if (!mode)
+ return NULL;
+
+ ret = fill_analog_mode(dev, mode,
+ &tv_modes_parameters[analog],
+ pixel_clock_hz, hdisplay, vdisplay, interlace);
+ if (ret)
+ goto err_free_mode;
+
+ return mode;
+
+err_free_mode:
+ drm_mode_destroy(dev, mode);
+ return NULL;
+}
+EXPORT_SYMBOL(drm_analog_tv_mode);
+
/**
* drm_cvt_mode -create a modeline based on the CVT algorithm
* @dev: drm device
@@ -1659,6 +2136,30 @@ static int drm_mode_parse_panel_orientation(const char *delim,
return 0;
}
+static int drm_mode_parse_tv_mode(const char *delim,
+ struct drm_cmdline_mode *mode)
+{
+ const char *value;
+ int ret;
+
+ if (*delim != '=')
+ return -EINVAL;
+
+ value = delim + 1;
+ delim = strchr(value, ',');
+ if (!delim)
+ delim = value + strlen(value);
+
+ ret = drm_get_tv_mode_from_name(value, delim - value);
+ if (ret < 0)
+ return ret;
+
+ mode->tv_mode_specified = true;
+ mode->tv_mode = ret;
+
+ return 0;
+}
+
static int drm_mode_parse_cmdline_options(const char *str,
bool freestanding,
const struct drm_connector *connector,
@@ -1728,6 +2229,9 @@ static int drm_mode_parse_cmdline_options(const char *str,
} else if (!strncmp(option, "panel_orientation", delim - option)) {
if (drm_mode_parse_panel_orientation(delim, mode))
return -EINVAL;
+ } else if (!strncmp(option, "tv_mode", delim - option)) {
+ if (drm_mode_parse_tv_mode(delim, mode))
+ return -EINVAL;
} else {
return -EINVAL;
}
@@ -1756,20 +2260,24 @@ struct drm_named_mode {
unsigned int xres;
unsigned int yres;
unsigned int flags;
+ unsigned int tv_mode;
};
-#define NAMED_MODE(_name, _pclk, _x, _y, _flags) \
+#define NAMED_MODE(_name, _pclk, _x, _y, _flags, _mode) \
{ \
.name = _name, \
.pixel_clock_khz = _pclk, \
.xres = _x, \
.yres = _y, \
.flags = _flags, \
+ .tv_mode = _mode, \
}
static const struct drm_named_mode drm_named_modes[] = {
- NAMED_MODE("NTSC", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE),
- NAMED_MODE("PAL", 13500, 720, 576, DRM_MODE_FLAG_INTERLACE),
+ NAMED_MODE("NTSC", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_NTSC),
+ NAMED_MODE("NTSC-J", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_NTSC_J),
+ NAMED_MODE("PAL", 13500, 720, 576, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_PAL),
+ NAMED_MODE("PAL-M", 13500, 720, 480, DRM_MODE_FLAG_INTERLACE, DRM_MODE_TV_MODE_PAL_M),
};
static int drm_mode_parse_cmdline_named_mode(const char *name,
@@ -1809,11 +2317,13 @@ static int drm_mode_parse_cmdline_named_mode(const char *name,
if (ret != name_end)
continue;
- strcpy(cmdline_mode->name, mode->name);
+ strscpy(cmdline_mode->name, mode->name, sizeof(cmdline_mode->name));
cmdline_mode->pixel_clock = mode->pixel_clock_khz;
cmdline_mode->xres = mode->xres;
cmdline_mode->yres = mode->yres;
cmdline_mode->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+ cmdline_mode->tv_mode = mode->tv_mode;
+ cmdline_mode->tv_mode_specified = true;
cmdline_mode->specified = true;
return 1;
@@ -1992,6 +2502,31 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
}
EXPORT_SYMBOL(drm_mode_parse_command_line_for_connector);
+static struct drm_display_mode *drm_named_mode(struct drm_device *dev,
+ struct drm_cmdline_mode *cmd)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_named_modes); i++) {
+ const struct drm_named_mode *named_mode = &drm_named_modes[i];
+
+ if (strcmp(cmd->name, named_mode->name))
+ continue;
+
+ if (!cmd->tv_mode_specified)
+ continue;
+
+ return drm_analog_tv_mode(dev,
+ named_mode->tv_mode,
+ named_mode->pixel_clock_khz * 1000,
+ named_mode->xres,
+ named_mode->yres,
+ named_mode->flags & DRM_MODE_FLAG_INTERLACE);
+ }
+
+ return NULL;
+}
+
/**
* drm_mode_create_from_cmdline_mode - convert a command line modeline into a DRM display mode
* @dev: DRM device to create the new mode for
@@ -2009,7 +2544,9 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
if (cmd->xres == 0 || cmd->yres == 0)
return NULL;
- if (cmd->cvt)
+ if (strlen(cmd->name))
+ mode = drm_named_mode(dev, cmd);
+ else if (cmd->cvt)
mode = drm_cvt_mode(dev,
cmd->xres, cmd->yres,
cmd->refresh_specified ? cmd->refresh : 60,
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 3659f0465a72..5522d610c5cf 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
int orientation;
};
-static const struct drm_dmi_panel_orientation_data asus_t100ha = {
- .width = 800,
- .height = 1280,
- .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
-};
-
static const struct drm_dmi_panel_orientation_data gpd_micropc = {
.width = 720,
.height = 1280,
@@ -97,6 +91,12 @@ static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd800x1280_leftside_up = {
+ .width = 800,
+ .height = 1280,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
.width = 800,
.height = 1280,
@@ -127,6 +127,12 @@ static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1600x2560_rightside_up = {
+ .width = 1600,
+ .height = 2560,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -151,7 +157,7 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
- .driver_data = (void *)&asus_t100ha,
+ .driver_data = (void *)&lcd800x1280_leftside_up,
}, { /* Asus T101HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -196,6 +202,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Dynabook K50 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dynabook Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "dynabook K50/FR"),
+ },
+ .driver_data = (void *)&lcd800x1280_leftside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
@@ -310,6 +322,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo IdeaPad Duet 3 10IGL5 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Yoga Book X90F / X91F / X91L */
.matches = {
/* Non exact match to match all versions */
@@ -331,6 +349,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Lenovo Yoga Tab 3 X90F */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+ },
+ .driver_data = (void *)&lcd1600x2560_rightside_up,
}, { /* Nanote UMPC-01 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 33357629a7f5..24e7998d1731 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -46,6 +46,11 @@
* properties that specify how the pixels are positioned and blended, like
* rotation or Z-position. All these properties are stored in &drm_plane_state.
*
+ * Unless explicitly specified (via CRTC property or otherwise), the active area
+ * of a CRTC will be black by default. This means portions of the active area
+ * which are not covered by a plane will be black, and alpha blending of any
+ * planes with the CRTC background will blend with black at the lowest zpos.
+ *
* To create a plane, a KMS drivers allocates and zeroes an instances of
* &struct drm_plane (possibly as part of a larger structure) and registers it
* with a call to drm_universal_plane_init().
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index ba6a9136a065..c91e454eba09 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -28,7 +28,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index bcd9611dabfd..8127be134c39 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -222,6 +222,45 @@ drm_connector_mode_valid(struct drm_connector *connector,
return ret;
}
+static void drm_kms_helper_disable_hpd(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_helper_funcs *funcs =
+ connector->helper_private;
+
+ if (funcs && funcs->disable_hpd)
+ funcs->disable_hpd(connector);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static bool drm_kms_helper_enable_hpd(struct drm_device *dev)
+{
+ bool poll = false;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_helper_funcs *funcs =
+ connector->helper_private;
+
+ if (funcs && funcs->enable_hpd)
+ funcs->enable_hpd(connector);
+
+ if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT))
+ poll = true;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return poll;
+}
+
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
/**
* drm_kms_helper_poll_enable - re-enable output polling.
@@ -241,20 +280,13 @@ drm_connector_mode_valid(struct drm_connector *connector,
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
unsigned long delay = DRM_OUTPUT_POLL_PERIOD;
- if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll)
+ if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll ||
+ dev->mode_config.poll_running)
return;
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT))
- poll = true;
- }
- drm_connector_list_iter_end(&conn_iter);
+ poll = drm_kms_helper_enable_hpd(dev);
if (dev->mode_config.delayed_event) {
/*
@@ -273,6 +305,8 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, delay);
+
+ dev->mode_config.poll_running = true;
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
@@ -561,10 +595,7 @@ retry:
}
/* Re-enable polling in case the global poll config changed. */
- if (drm_kms_helper_poll != dev->mode_config.poll_running)
- drm_kms_helper_poll_enable(dev);
-
- dev->mode_config.poll_running = drm_kms_helper_poll;
+ drm_kms_helper_poll_enable(dev);
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
@@ -704,8 +735,11 @@ static void output_poll_execute(struct work_struct *work)
changed = dev->mode_config.delayed_event;
dev->mode_config.delayed_event = false;
- if (!drm_kms_helper_poll)
+ if (!drm_kms_helper_poll && dev->mode_config.poll_running) {
+ drm_kms_helper_disable_hpd(dev);
+ dev->mode_config.poll_running = false;
goto out;
+ }
if (!mutex_trylock(&dev->mode_config.mutex)) {
repoll = true;
@@ -818,9 +852,12 @@ EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
*/
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
- if (!dev->mode_config.poll_enabled)
- return;
+ if (dev->mode_config.poll_running)
+ drm_kms_helper_disable_hpd(dev);
+
cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
+
+ dev->mode_config.poll_running = false;
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
@@ -861,8 +898,9 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
if (!dev->mode_config.poll_enabled)
return;
+ drm_kms_helper_poll_disable(dev);
+
dev->mode_config.poll_enabled = false;
- cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
@@ -1139,10 +1177,94 @@ int drm_connector_helper_get_modes(struct drm_connector *connector)
* EDID. Otherwise, if the EDID is NULL, clear the connector
* information.
*/
- count = drm_edid_connector_update(connector, drm_edid);
+ drm_edid_connector_update(connector, drm_edid);
+
+ count = drm_edid_connector_add_modes(connector);
drm_edid_free(drm_edid);
return count;
}
EXPORT_SYMBOL(drm_connector_helper_get_modes);
+
+/**
+ * drm_connector_helper_tv_get_modes - Fills the modes availables to a TV connector
+ * @connector: The connector
+ *
+ * Fills the available modes for a TV connector based on the supported
+ * TV modes, and the default mode expressed by the kernel command line.
+ *
+ * This can be used as the default TV connector helper .get_modes() hook
+ * if the driver does not need any special processing.
+ *
+ * Returns:
+ * The number of modes added to the connector.
+ */
+int drm_connector_helper_tv_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *tv_mode_property =
+ dev->mode_config.tv_mode_property;
+ struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
+ unsigned int ntsc_modes = BIT(DRM_MODE_TV_MODE_NTSC) |
+ BIT(DRM_MODE_TV_MODE_NTSC_443) |
+ BIT(DRM_MODE_TV_MODE_NTSC_J) |
+ BIT(DRM_MODE_TV_MODE_PAL_M);
+ unsigned int pal_modes = BIT(DRM_MODE_TV_MODE_PAL) |
+ BIT(DRM_MODE_TV_MODE_PAL_N) |
+ BIT(DRM_MODE_TV_MODE_SECAM);
+ unsigned int tv_modes[2] = { UINT_MAX, UINT_MAX };
+ unsigned int i, supported_tv_modes = 0;
+
+ if (!tv_mode_property)
+ return 0;
+
+ for (i = 0; i < tv_mode_property->num_values; i++)
+ supported_tv_modes |= BIT(tv_mode_property->values[i]);
+
+ if ((supported_tv_modes & ntsc_modes) &&
+ (supported_tv_modes & pal_modes)) {
+ uint64_t default_mode;
+
+ if (drm_object_property_get_default_value(&connector->base,
+ tv_mode_property,
+ &default_mode))
+ return 0;
+
+ if (cmdline->tv_mode_specified)
+ default_mode = cmdline->tv_mode;
+
+ if (BIT(default_mode) & ntsc_modes) {
+ tv_modes[0] = DRM_MODE_TV_MODE_NTSC;
+ tv_modes[1] = DRM_MODE_TV_MODE_PAL;
+ } else {
+ tv_modes[0] = DRM_MODE_TV_MODE_PAL;
+ tv_modes[1] = DRM_MODE_TV_MODE_NTSC;
+ }
+ } else if (supported_tv_modes & ntsc_modes) {
+ tv_modes[0] = DRM_MODE_TV_MODE_NTSC;
+ } else if (supported_tv_modes & pal_modes) {
+ tv_modes[0] = DRM_MODE_TV_MODE_PAL;
+ } else {
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
+ struct drm_display_mode *mode;
+
+ if (tv_modes[i] == DRM_MODE_TV_MODE_NTSC)
+ mode = drm_mode_analog_ntsc_480i(dev);
+ else if (tv_modes[i] == DRM_MODE_TV_MODE_PAL)
+ mode = drm_mode_analog_pal_576i(dev);
+ else
+ break;
+ if (!mode)
+ return i;
+ if (!i)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ }
+
+ return i;
+}
+EXPORT_SYMBOL(drm_connector_helper_tv_get_modes);
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 3ef420ec4534..270523ae36d4 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -267,7 +267,7 @@ static int drm_simple_kms_plane_prepare_fb(struct drm_plane *plane,
WARN_ON_ONCE(pipe->funcs && pipe->funcs->cleanup_fb);
- return drm_gem_simple_display_pipe_prepare_fb(pipe, state);
+ return drm_gem_plane_helper_prepare_fb(plane, state);
}
return pipe->funcs->prepare_fb(pipe, state);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index f024dc93939e..87c9fe55dec7 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -476,7 +476,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
if (!capable(CAP_SYS_ADMIN) &&
(dma->flags & _DRM_DMA_USE_PCI_RO)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
@@ -492,7 +492,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_dma_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
@@ -560,7 +560,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
- vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+ vm_flags_clear(vma, VM_WRITE | VM_MAYWRITE);
#if defined(__i386__) || defined(__x86_64__)
pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
#else
@@ -628,7 +628,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
default:
return -EINVAL; /* This should never happen. */
}
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
drm_vm_open_locked(dev, vma);
return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 1d2b4fb4bcf8..44ca803237a5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -22,6 +22,7 @@
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
+#include "common.xml.h"
/*
* DRM operations:
@@ -56,6 +57,11 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
if (!ctx)
return -ENOMEM;
+ ret = xa_alloc_cyclic(&priv->active_contexts, &ctx->id, ctx,
+ xa_limit_32b, &priv->next_context_id, GFP_KERNEL);
+ if (ret < 0)
+ goto out_free;
+
ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
priv->cmdbuf_suballoc);
if (!ctx->mmu) {
@@ -99,6 +105,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
etnaviv_iommu_context_put(ctx->mmu);
+ xa_erase(&priv->active_contexts, ctx->id);
+
kfree(ctx);
}
@@ -468,7 +476,47 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_FOPS(fops);
+static void etnaviv_fop_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct drm_file *file = f->private_data;
+ struct drm_device *dev = file->minor->dev;
+ struct etnaviv_drm_private *priv = dev->dev_private;
+ struct etnaviv_file_private *ctx = file->driver_priv;
+
+ /*
+ * For a description of the text output format used here, see
+ * Documentation/gpu/drm-usage-stats.rst.
+ */
+ seq_printf(m, "drm-driver:\t%s\n", dev->driver->name);
+ seq_printf(m, "drm-client-id:\t%u\n", ctx->id);
+
+ for (int i = 0; i < ETNA_MAX_PIPES; i++) {
+ struct etnaviv_gpu *gpu = priv->gpu[i];
+ char engine[10] = "UNK";
+ int cur = 0;
+
+ if (!gpu)
+ continue;
+
+ if (gpu->identity.features & chipFeatures_PIPE_2D)
+ cur = snprintf(engine, sizeof(engine), "2D");
+ if (gpu->identity.features & chipFeatures_PIPE_3D)
+ cur = snprintf(engine + cur, sizeof(engine) - cur,
+ "%s3D", cur ? "/" : "");
+ if (gpu->identity.nn_core_count > 0)
+ cur = snprintf(engine + cur, sizeof(engine) - cur,
+ "%sNN", cur ? "/" : "");
+
+ seq_printf(m, "drm-engine-%s:\t%llu ns\n", engine,
+ ctx->sched_entity[i].elapsed_ns);
+ }
+}
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ DRM_GEM_FOPS,
+ .show_fdinfo = etnaviv_fop_show_fdinfo,
+};
static const struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
@@ -514,6 +562,8 @@ static int etnaviv_bind(struct device *dev)
dma_set_max_seg_size(dev, SZ_2G);
+ xa_init_flags(&priv->active_contexts, XA_FLAGS_ALLOC);
+
mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
@@ -563,6 +613,8 @@ static void etnaviv_unbind(struct device *dev)
etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
+ xa_destroy(&priv->active_contexts);
+
drm->dev_private = NULL;
kfree(priv);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 2bb4c25565dc..b3eb1662e90c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -12,6 +12,7 @@
#include <linux/sizes.h>
#include <linux/time64.h>
#include <linux/types.h>
+#include <linux/xarray.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
@@ -28,6 +29,7 @@ struct etnaviv_iommu_global;
#define ETNAVIV_SOFTPIN_START_ADDRESS SZ_4M /* must be >= SUBALLOC_SIZE */
struct etnaviv_file_private {
+ int id;
struct etnaviv_iommu_context *mmu;
struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
};
@@ -40,6 +42,9 @@ struct etnaviv_drm_private {
struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
struct etnaviv_iommu_global *mmu_global;
+ struct xarray active_contexts;
+ u32 next_context_id;
+
/* list of GEM objects: */
struct mutex gem_lock;
struct list_head gem_list;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index c5ae5492e1af..b5f73502e3dd 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -130,7 +130,7 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
{
pgprot_t vm_page_prot;
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vm_page_prot = vm_get_page_prot(vma->vm_flags);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 1491159d0d20..45403ea38906 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -393,10 +393,11 @@ static void submit_cleanup(struct kref *kref)
wake_up_all(&submit->gpu->fence_event);
if (submit->out_fence) {
- /* first remove from IDR, so fence can not be found anymore */
- mutex_lock(&submit->gpu->fence_lock);
- idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
- mutex_unlock(&submit->gpu->fence_lock);
+ /*
+ * Remove from user fence array before dropping the reference,
+ * so fence can not be found in lookup anymore.
+ */
+ xa_erase(&submit->gpu->user_fences, submit->out_fence_id);
dma_fence_put(submit->out_fence);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 51320eeebfcf..de8c9894967c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -773,6 +773,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail;
}
+ if (gpu->identity.nn_core_count > 0)
+ dev_warn(gpu->dev, "etnaviv has been instantiated on a NPU, "
+ "for which the UAPI is still experimental\n");
+
/* Exclude VG cores with FE2.0 */
if (gpu->identity.features & chipFeatures_PIPE_VG &&
gpu->identity.features & chipFeatures_FE20) {
@@ -957,6 +961,8 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
gpu->identity.vertex_cache_size);
seq_printf(m, "\t shader_core_count: %d\n",
gpu->identity.shader_core_count);
+ seq_printf(m, "\t nn_core_count: %d\n",
+ gpu->identity.nn_core_count);
seq_printf(m, "\t pixel_pipes: %d\n",
gpu->identity.pixel_pipes);
seq_printf(m, "\t vertex_output_buffer_size: %d\n",
@@ -1240,7 +1246,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
* pretends we didn't find a fence in that case.
*/
rcu_read_lock();
- fence = idr_find(&gpu->fence_idr, id);
+ fence = xa_load(&gpu->user_fences, id);
if (fence)
fence = dma_fence_get_rcu(fence);
rcu_read_unlock();
@@ -1450,6 +1456,15 @@ static void sync_point_worker(struct work_struct *work)
static void dump_mmu_fault(struct etnaviv_gpu *gpu)
{
+ static const char *fault_reasons[] = {
+ "slave not present",
+ "page not present",
+ "write violation",
+ "out of bounds",
+ "read security violation",
+ "write security violation",
+ };
+
u32 status_reg, status;
int i;
@@ -1462,18 +1477,25 @@ static void dump_mmu_fault(struct etnaviv_gpu *gpu)
dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
for (i = 0; i < 4; i++) {
+ const char *reason = "unknown";
u32 address_reg;
+ u32 mmu_status;
- if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4))))
+ mmu_status = (status >> (i * 4)) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK;
+ if (!mmu_status)
continue;
+ if ((mmu_status - 1) < ARRAY_SIZE(fault_reasons))
+ reason = fault_reasons[mmu_status - 1];
+
if (gpu->sec_mode == ETNA_SEC_NONE)
address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
else
address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
- dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
- gpu_read(gpu, address_reg));
+ dev_err_ratelimited(gpu->dev,
+ "MMU %d fault (%s) addr 0x%08x\n",
+ i, reason, gpu_read(gpu, address_reg));
}
}
@@ -1629,7 +1651,6 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
return etnaviv_gpu_clk_disable(gpu);
}
-#ifdef CONFIG_PM
static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
{
int ret;
@@ -1645,7 +1666,6 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
return 0;
}
-#endif
static int
etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
@@ -1713,18 +1733,17 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
if (ret)
goto out_workqueue;
-#ifdef CONFIG_PM
- ret = pm_runtime_get_sync(gpu->dev);
-#else
- ret = etnaviv_gpu_clk_enable(gpu);
-#endif
+ if (IS_ENABLED(CONFIG_PM))
+ ret = pm_runtime_get_sync(gpu->dev);
+ else
+ ret = etnaviv_gpu_clk_enable(gpu);
if (ret < 0)
goto out_sched;
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
- idr_init(&gpu->fence_idr);
+ xa_init_flags(&gpu->user_fences, XA_FLAGS_ALLOC);
spin_lock_init(&gpu->fence_spinlock);
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
@@ -1761,12 +1780,12 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
etnaviv_sched_fini(gpu);
-#ifdef CONFIG_PM
- pm_runtime_get_sync(gpu->dev);
- pm_runtime_put_sync_suspend(gpu->dev);
-#else
- etnaviv_gpu_hw_suspend(gpu);
-#endif
+ if (IS_ENABLED(CONFIG_PM)) {
+ pm_runtime_get_sync(gpu->dev);
+ pm_runtime_put_sync_suspend(gpu->dev);
+ } else {
+ etnaviv_gpu_hw_suspend(gpu);
+ }
if (gpu->mmu_context)
etnaviv_iommu_context_put(gpu->mmu_context);
@@ -1778,7 +1797,7 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
}
gpu->drm = NULL;
- idr_destroy(&gpu->fence_idr);
+ xa_destroy(&gpu->user_fences);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling);
@@ -1810,7 +1829,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- mutex_init(&gpu->fence_lock);
+ mutex_init(&gpu->sched_lock);
/* Map registers: */
gpu->mmio = devm_platform_ioremap_resource(pdev, 0);
@@ -1880,7 +1899,6 @@ static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int etnaviv_gpu_rpm_suspend(struct device *dev)
{
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
@@ -1923,18 +1941,16 @@ static int etnaviv_gpu_rpm_resume(struct device *dev)
return 0;
}
-#endif
static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
- SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
- NULL)
+ RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, NULL)
};
struct platform_driver etnaviv_gpu_driver = {
.driver = {
.name = "etnaviv-gpu",
.owner = THIS_MODULE,
- .pm = &etnaviv_gpu_pm_ops,
+ .pm = pm_ptr(&etnaviv_gpu_pm_ops),
.of_match_table = etnaviv_gpu_match,
},
.probe = etnaviv_gpu_platform_probe,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index f1204b070fb8..98c6f9c320fc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -51,6 +51,9 @@ struct etnaviv_chip_identity {
/* Number of shader cores. */
u32 shader_core_count;
+ /* Number of Neural Network cores. */
+ u32 nn_core_count;
+
/* Size of the vertex cache. */
u32 vertex_cache_size;
@@ -100,6 +103,7 @@ struct etnaviv_gpu {
struct etnaviv_chip_identity identity;
enum etnaviv_sec_mode sec_mode;
struct workqueue_struct *wq;
+ struct mutex sched_lock;
struct drm_gpu_scheduler sched;
bool initialized;
bool fe_running;
@@ -117,8 +121,8 @@ struct etnaviv_gpu {
u32 idle_mask;
/* Fencing support */
- struct mutex fence_lock;
- struct idr fence_idr;
+ struct xarray user_fences;
+ u32 next_user_fence;
u32 next_fence;
u32 completed_fence;
wait_queue_head_t fence_event;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
index 57f334e24189..2e63afa6c798 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -16,6 +16,7 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 128,
.shader_core_count = 1,
+ .nn_core_count = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -47,6 +48,7 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
+ .nn_core_count = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -78,6 +80,7 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
+ .nn_core_count = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@@ -140,6 +143,7 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 1024,
.shader_core_count = 4,
+ .nn_core_count = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 2,
@@ -161,6 +165,38 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.minor_features10 = 0x90044250,
.minor_features11 = 0x00000024,
},
+ {
+ .model = 0x8000,
+ .revision = 0x7120,
+ .product_id = 0x45080009,
+ .customer_id = 0x88,
+ .eco_id = 0,
+ .stream_count = 8,
+ .register_max = 64,
+ .thread_count = 256,
+ .shader_core_count = 1,
+ .nn_core_count = 8,
+ .vertex_cache_size = 16,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 1,
+ .instruction_count = 512,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 16,
+ .features = 0xe0287cac,
+ .minor_features0 = 0xc1799eff,
+ .minor_features1 = 0xfefbfadb,
+ .minor_features2 = 0xeb9d6fbf,
+ .minor_features3 = 0xedfffced,
+ .minor_features4 = 0xd30dafc7,
+ .minor_features5 = 0x7b5ac333,
+ .minor_features6 = 0xfc8ee200,
+ .minor_features7 = 0x03fffa6f,
+ .minor_features8 = 0x00fe0ef0,
+ .minor_features9 = 0x0088003c,
+ .minor_features10 = 0x108048c0,
+ .minor_features11 = 0x00000010,
+ },
};
bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index d29f467eee13..1ae87dfd19c4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -97,24 +97,24 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
{
- int ret = 0;
+ struct etnaviv_gpu *gpu = submit->gpu;
+ int ret;
/*
- * Hold the fence lock across the whole operation to avoid jobs being
+ * Hold the sched lock across the whole operation to avoid jobs being
* pushed out of order with regard to their sched fence seqnos as
* allocated in drm_sched_job_arm.
*/
- mutex_lock(&submit->gpu->fence_lock);
+ mutex_lock(&gpu->sched_lock);
drm_sched_job_arm(&submit->sched_job);
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
- submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
- submit->out_fence, 0,
- INT_MAX, GFP_KERNEL);
- if (submit->out_fence_id < 0) {
+ ret = xa_alloc_cyclic(&gpu->user_fences, &submit->out_fence_id,
+ submit->out_fence, xa_limit_32b,
+ &gpu->next_user_fence, GFP_KERNEL);
+ if (ret < 0) {
drm_sched_job_cleanup(&submit->sched_job);
- ret = -ENOMEM;
goto out_unlock;
}
@@ -124,7 +124,7 @@ int etnaviv_sched_push_job(struct etnaviv_gem_submit *submit)
drm_sched_entity_push_job(&submit->sched_job);
out_unlock:
- mutex_unlock(&submit->gpu->fence_lock);
+ mutex_unlock(&gpu->sched_lock);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index deaaa99fa654..94d5f33b1fd6 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -8,17 +8,17 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state.xml ( 26666 bytes, from 2019-12-20 21:20:35)
-- common.xml ( 35468 bytes, from 2018-02-10 13:09:26)
-- common_3d.xml ( 15058 bytes, from 2019-12-28 20:02:03)
-- state_hi.xml ( 30552 bytes, from 2019-12-28 20:02:48)
-- copyright.xml ( 1597 bytes, from 2018-02-10 13:09:26)
-- state_2d.xml ( 51552 bytes, from 2018-02-10 13:09:26)
-- state_3d.xml ( 83098 bytes, from 2019-12-28 20:02:03)
-- state_blt.xml ( 14252 bytes, from 2019-10-20 19:59:15)
-- state_vg.xml ( 5975 bytes, from 2018-02-10 13:09:26)
-
-Copyright (C) 2012-2019 by the following authors:
+- state.xml ( 27198 bytes, from 2022-04-22 10:35:24)
+- common.xml ( 35468 bytes, from 2020-10-28 12:56:03)
+- common_3d.xml ( 15058 bytes, from 2020-10-28 12:56:03)
+- state_hi.xml ( 34804 bytes, from 2022-12-02 09:06:28)
+- copyright.xml ( 1597 bytes, from 2020-10-28 12:56:03)
+- state_2d.xml ( 51552 bytes, from 2020-10-28 12:56:03)
+- state_3d.xml ( 84445 bytes, from 2022-11-15 15:59:38)
+- state_blt.xml ( 14424 bytes, from 2022-11-07 11:18:41)
+- state_vg.xml ( 5975 bytes, from 2020-10-28 12:56:03)
+
+Copyright (C) 2012-2022 by the following authors:
- Wladimir J. van der Laan <laanwj@gmail.com>
- Christian Gmeiner <christian.gmeiner@gmail.com>
- Lucas Stach <l.stach@pengutronix.de>
@@ -321,16 +321,16 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MMUv2_CONFIGURATION_ADDRESS(x) (((x) << VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT) & VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK)
#define VIVS_MMUv2_STATUS 0x00000188
-#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK 0x00000003
+#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK 0x0000000f
#define VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT 0
#define VIVS_MMUv2_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK)
-#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK 0x00000030
+#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK 0x000000f0
#define VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT 4
#define VIVS_MMUv2_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION1__MASK)
-#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK 0x00000300
+#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK 0x00000f00
#define VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT 8
#define VIVS_MMUv2_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION2__MASK)
-#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK 0x00003000
+#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK 0x0000f000
#define VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT 12
#define VIVS_MMUv2_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION3__MASK)
@@ -465,7 +465,13 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MC_PROFILE_CONFIG0 0x00000470
#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x000000ff
#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0
+#define VIVS_MC_PROFILE_CONFIG0_FE_DRAW_COUNT 0x0000000a
+#define VIVS_MC_PROFILE_CONFIG0_FE_OUT_VERTEX_COUNT 0x0000000b
+#define VIVS_MC_PROFILE_CONFIG0_FE_CACHE_MISS_COUNT 0x0000000c
#define VIVS_MC_PROFILE_CONFIG0_FE_RESET 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG0_FE_CACHE_LK_COUNT 0x00000010
+#define VIVS_MC_PROFILE_CONFIG0_FE_STALL_COUNT 0x00000011
+#define VIVS_MC_PROFILE_CONFIG0_FE_PROCESS_COUNT 0x00000012
#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG0_DE_RESET 0x00000f00
@@ -499,11 +505,14 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER 0x00000006
#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER 0x00000007
#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER 0x00000008
+#define VIVS_MC_PROFILE_CONFIG1_PA_DROPED_PRIM_COUNTER 0x00000009
+#define VIVS_MC_PROFILE_CONFIG1_PA_FRUSTUM_CLIPPED_PRIM_COUNTER 0x0000000a
#define VIVS_MC_PROFILE_CONFIG1_PA_RESET 0x0000000f
#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT 0x00000000
#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT 0x00000100
+#define VIVS_MC_PROFILE_CONFIG1_SE_TRIVIAL_REJECTED_LINE_COUNT 0x00000400
#define VIVS_MC_PROFILE_CONFIG1_SE_RESET 0x00000f00
#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x00ff0000
#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT 16
@@ -515,6 +524,8 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000
#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT 0x000b0000
#define VIVS_MC_PROFILE_CONFIG1_RA_RESET 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG1_RA_PIPE_HZ_CACHE_MISS_COUNTER 0x00110000
+#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_HZ_CACHE_MISS_COUNTER 0x00120000
#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0xff000000
#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT 24
#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS 0x00000000
@@ -535,13 +546,48 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE 0x00000001
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP 0x00000002
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE 0x00000003
-#define VIVS_MC_PROFILE_CONFIG2_MC_RESET 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_SENTOUT_FROM_COLORPIPE 0x00000004
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_FROM_COLORPIPE 0x00000005
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_DEPTHPIPE 0x00000007
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_SENTOUT_FROM_DEPTHPIPE 0x00000008
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_DEPTHPIPE 0x00000009
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_SENTOUT_FROM_DEPTHPIPE 0x0000000a
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_FROM_DEPTHPIPE 0x0000000b
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_OTHERS 0x0000000c
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_OTHERS 0x0000000d
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_FROM_OTHERS 0x0000000e
+#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_FROM_OTHERS 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG2_MC_FE_READ_BANDWIDTH 0x00000015
+#define VIVS_MC_PROFILE_CONFIG2_MC_MMU_READ_BANDWIDTH 0x00000016
+#define VIVS_MC_PROFILE_CONFIG2_MC_BLT_READ_BANDWIDTH 0x00000017
+#define VIVS_MC_PROFILE_CONFIG2_MC_SH0_READ_BANDWIDTH 0x00000018
+#define VIVS_MC_PROFILE_CONFIG2_MC_SH1_READ_BANDWIDTH 0x00000019
+#define VIVS_MC_PROFILE_CONFIG2_MC_PE_WRITE_BANDWIDTH 0x0000001a
+#define VIVS_MC_PROFILE_CONFIG2_MC_BLT_WRITE_BANDWIDTH 0x0000001b
+#define VIVS_MC_PROFILE_CONFIG2_MC_SH0_WRITE_BANDWIDTH 0x0000001c
+#define VIVS_MC_PROFILE_CONFIG2_MC_SH1_WRITE_BANDWIDTH 0x0000001d
#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED 0x00000000
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED 0x00000100
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED 0x00000200
#define VIVS_MC_PROFILE_CONFIG2_HI_RESET 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG2_L2__MASK 0x00ff0000
+#define VIVS_MC_PROFILE_CONFIG2_L2__SHIFT 16
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_AXI0_READ_REQUEST_COUNT 0x00000000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_AXI0_WRITE_REQUEST_COUNT 0x00040000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_AXI1_WRITE_REQUEST_COUNT 0x00050000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_READ_TRANSACTIONS_REQUEST_BY_AXI0 0x00080000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_READ_TRANSACTIONS_REQUEST_BY_AXI1 0x00090000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_WRITE_TRANSACTIONS_REQUEST_BY_AXI0 0x000c0000
+#define VIVS_MC_PROFILE_CONFIG2_L2_TOTAL_WRITE_TRANSACTIONS_REQUEST_BY_AXI1 0x000d0000
+#define VIVS_MC_PROFILE_CONFIG2_L2_RESET 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI0_MINMAX_LATENCY 0x00100000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI0_TOTAL_LATENCY 0x00110000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI0_TOTAL_REQUEST_COUNT 0x00120000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI1_MINMAX_LATENCY 0x00130000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI1_TOTAL_LATENCY 0x00140000
+#define VIVS_MC_PROFILE_CONFIG2_L2_AXI1_TOTAL_REQUEST_COUNT 0x00150000
#define VIVS_MC_PROFILE_CONFIG2_BLT__MASK 0xff000000
#define VIVS_MC_PROFILE_CONFIG2_BLT__SHIFT 24
#define VIVS_MC_PROFILE_CONFIG2_BLT_UNK0 0x00000000
@@ -566,5 +612,13 @@ DEALINGS IN THE SOFTWARE.
#define VIVS_MC_PROFILE_L2_READ 0x00000564
+#define VIVS_MC_MC_LATENCY_RESET 0x00000568
+
+#define VIVS_MC_MC_AXI_MAX_MIN_LATENCY 0x0000056c
+
+#define VIVS_MC_MC_AXI_TOTAL_LATENCY 0x00000570
+
+#define VIVS_MC_MC_AXI_SAMPLE_COUNT 0x00000574
+
#endif /* STATE_HI_XML */
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 8155d7e650f1..2867b39fa35e 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -710,7 +710,6 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_PM
static int exynos5433_decon_suspend(struct device *dev)
{
struct decon_context *ctx = dev_get_drvdata(dev);
@@ -741,14 +740,10 @@ err:
return ret;
}
-#endif
-static const struct dev_pm_ops exynos5433_decon_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos5433_decon_pm_ops,
+ exynos5433_decon_suspend,
+ exynos5433_decon_resume, NULL);
static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
{
@@ -881,7 +876,7 @@ struct platform_driver exynos5433_decon_driver = {
.remove = exynos5433_decon_remove,
.driver = {
.name = "exynos5433-decon",
- .pm = &exynos5433_decon_pm_ops,
+ .pm = pm_ptr(&exynos5433_decon_pm_ops),
.of_match_table = exynos5433_decon_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 7080cf7952ec..3126f735dedc 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -779,7 +779,6 @@ static int decon_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int exynos7_decon_suspend(struct device *dev)
{
struct decon_context *ctx = dev_get_drvdata(dev);
@@ -836,21 +835,16 @@ err_aclk_enable:
err_pclk_enable:
return ret;
}
-#endif
-static const struct dev_pm_ops exynos7_decon_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
- NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos7_decon_pm_ops, exynos7_decon_suspend,
+ exynos7_decon_resume, NULL);
struct platform_driver decon_driver = {
.probe = decon_probe,
.remove = decon_remove,
.driver = {
.name = "exynos-decon",
- .pm = &exynos7_decon_pm_ops,
+ .pm = pm_ptr(&exynos7_decon_pm_ops),
.of_match_table = decon_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4e3d3d5f6866..3404ec1367fb 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -260,7 +260,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int exynos_dp_suspend(struct device *dev)
{
struct exynos_dp_device *dp = dev_get_drvdata(dev);
@@ -274,13 +273,9 @@ static int exynos_dp_resume(struct device *dev)
return analogix_dp_resume(dp->adp);
}
-#endif
-static const struct dev_pm_ops exynos_dp_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos_dp_pm_ops, exynos_dp_suspend,
+ exynos_dp_resume, NULL);
static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
@@ -294,7 +289,7 @@ struct platform_driver dp_driver = {
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
- .pm = &exynos_dp_pm_ops,
+ .pm = pm_ptr(&exynos_dp_pm_ops),
.of_match_table = exynos_dp_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index ec673223d6b7..06d6513ddaae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -75,10 +75,27 @@
#define DSIM_MAIN_PIX_FORMAT_RGB565 (0x4 << 12)
#define DSIM_SUB_VC (((x) & 0x3) << 16)
#define DSIM_MAIN_VC (((x) & 0x3) << 18)
-#define DSIM_HSA_MODE (1 << 20)
-#define DSIM_HBP_MODE (1 << 21)
-#define DSIM_HFP_MODE (1 << 22)
-#define DSIM_HSE_MODE (1 << 23)
+#define DSIM_HSA_DISABLE_MODE (1 << 20)
+#define DSIM_HBP_DISABLE_MODE (1 << 21)
+#define DSIM_HFP_DISABLE_MODE (1 << 22)
+/*
+ * The i.MX 8M Mini Applications Processor Reference Manual,
+ * Rev. 3, 11/2020 Page 4091
+ * The i.MX 8M Nano Applications Processor Reference Manual,
+ * Rev. 2, 07/2022 Page 3058
+ * The i.MX 8M Plus Applications Processor Reference Manual,
+ * Rev. 1, 06/2021 Page 5436
+ * named this bit as 'HseDisableMode' but the bit definition
+ * is quite opposite like
+ * 0 = Disables transfer
+ * 1 = Enables transfer
+ * which clearly states that HSE is not a disable bit.
+ *
+ * This bit is named as per the manual even though it is not
+ * a disable bit however the driver logic for handling HSE
+ * is based on the MIPI_DSI_MODE_VIDEO_HSE flag itself.
+ */
+#define DSIM_HSE_DISABLE_MODE (1 << 23)
#define DSIM_AUTO_MODE (1 << 24)
#define DSIM_VIDEO_MODE (1 << 25)
#define DSIM_BURST_MODE (1 << 26)
@@ -804,16 +821,16 @@ static int exynos_dsi_init_link(struct exynos_dsi *dsi)
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_AUTO_VERT)
reg |= DSIM_AUTO_MODE;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE)
- reg |= DSIM_HSE_MODE;
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP))
- reg |= DSIM_HFP_MODE;
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP))
- reg |= DSIM_HBP_MODE;
- if (!(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA))
- reg |= DSIM_HSA_MODE;
+ reg |= DSIM_HSE_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HFP)
+ reg |= DSIM_HFP_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HBP)
+ reg |= DSIM_HBP_DISABLE_MODE;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_NO_HSA)
+ reg |= DSIM_HSA_DISABLE_MODE;
}
- if (!(dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET))
+ if (dsi->mode_flags & MIPI_DSI_MODE_NO_EOT_PACKET)
reg |= DSIM_EOT_DISABLE;
switch (dsi->format) {
@@ -1428,7 +1445,8 @@ static int exynos_dsi_attach(struct drm_bridge *bridge,
{
struct exynos_dsi *dsi = bridge_to_dsi(bridge);
- return drm_bridge_attach(bridge->encoder, dsi->out_bridge, NULL, flags);
+ return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge,
+ flags);
}
static const struct drm_bridge_funcs exynos_dsi_bridge_funcs = {
@@ -1474,7 +1492,10 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
drm_bridge_add(&dsi->bridge);
- drm_bridge_attach(encoder, &dsi->bridge, NULL, 0);
+ drm_bridge_attach(encoder, &dsi->bridge,
+ list_first_entry_or_null(&encoder->bridge_chain,
+ struct drm_bridge,
+ chain_node), 0);
/*
* This is a temporary solution and should be made by more generic way.
@@ -1709,6 +1730,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->bridge.funcs = &exynos_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
+ dsi->bridge.pre_enable_prev_first = true;
ret = component_add(dev, &exynos_dsi_component_ops);
if (ret)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 55c92372fca0..4929ffe5a09a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -163,7 +163,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
private->fb_helper = helper = &fbdev->drm_fb_helper;
- drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
+ drm_fb_helper_prepare(dev, helper, PREFERRED_BPP, &exynos_drm_fb_helper_funcs);
ret = drm_fb_helper_init(dev, helper);
if (ret < 0) {
@@ -172,7 +172,7 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
goto err_init;
}
- ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
+ ret = drm_fb_helper_initial_config(helper);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"failed to set up hw configuration.\n");
@@ -183,8 +183,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
err_setup:
drm_fb_helper_fini(helper);
-
err_init:
+ drm_fb_helper_unprepare(helper);
private->fb_helper = NULL;
kfree(fbdev);
@@ -219,6 +219,7 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
fbdev = to_exynos_fbdev(private->fb_helper);
exynos_drm_fbdev_destroy(dev, private->fb_helper);
+ drm_fb_helper_unprepare(private->fb_helper);
kfree(fbdev);
private->fb_helper = NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 0ee32e4b1e43..8de2714599fc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1381,7 +1381,6 @@ static int fimc_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
@@ -1398,13 +1397,9 @@ static int fimc_runtime_resume(struct device *dev)
DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
return clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
}
-#endif
-static const struct dev_pm_ops fimc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(fimc_runtime_suspend, fimc_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(fimc_pm_ops, fimc_runtime_suspend,
+ fimc_runtime_resume, NULL);
static const struct of_device_id fimc_of_match[] = {
{ .compatible = "samsung,exynos4210-fimc" },
@@ -1420,6 +1415,6 @@ struct platform_driver fimc_driver = {
.of_match_table = fimc_of_match,
.name = "exynos-drm-fimc",
.owner = THIS_MODULE,
- .pm = &fimc_pm_ops,
+ .pm = pm_ptr(&fimc_pm_ops),
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index ae6636e6658e..7f4a0be03dd1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1287,7 +1287,6 @@ static int fimd_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int exynos_fimd_suspend(struct device *dev)
{
struct fimd_context *ctx = dev_get_drvdata(dev);
@@ -1321,13 +1320,9 @@ static int exynos_fimd_resume(struct device *dev)
return 0;
}
-#endif
-static const struct dev_pm_ops exynos_fimd_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos_fimd_pm_ops, exynos_fimd_suspend,
+ exynos_fimd_resume, NULL);
struct platform_driver fimd_driver = {
.probe = fimd_probe,
@@ -1335,7 +1330,7 @@ struct platform_driver fimd_driver = {
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
- .pm = &exynos_fimd_pm_ops,
+ .pm = pm_ptr(&exynos_fimd_pm_ops),
.of_match_table = fimd_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index e19c2ceb3759..ec784e58da5c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1549,7 +1549,6 @@ static int g2d_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
static int g2d_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
@@ -1574,9 +1573,7 @@ static int g2d_resume(struct device *dev)
return 0;
}
-#endif
-#ifdef CONFIG_PM
static int g2d_runtime_suspend(struct device *dev)
{
struct g2d_data *g2d = dev_get_drvdata(dev);
@@ -1597,11 +1594,10 @@ static int g2d_runtime_resume(struct device *dev)
return ret;
}
-#endif
static const struct dev_pm_ops g2d_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
- SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+ RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
};
static const struct of_device_id exynos_g2d_match[] = {
@@ -1617,7 +1613,7 @@ struct platform_driver g2d_driver = {
.driver = {
.name = "exynos-drm-g2d",
.owner = THIS_MODULE,
- .pm = &g2d_pm_ops,
+ .pm = pm_ptr(&g2d_pm_ops),
.of_match_table = exynos_g2d_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 3e493f48e0d4..638ca96830e9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -274,7 +274,7 @@ static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
unsigned long vm_size;
int ret;
- vma->vm_flags &= ~VM_PFNMAP;
+ vm_flags_clear(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
vm_size = vma->vm_end - vma->vm_start;
@@ -368,7 +368,7 @@ static int exynos_drm_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct
if (obj->import_attach)
return dma_buf_mmap(obj->dma_buf, vma, 0);
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
exynos_gem->flags);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 09ce28ee08d9..17bab5b1663f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -340,7 +340,6 @@ static const struct component_ops exynos_mic_component_ops = {
.unbind = exynos_mic_unbind,
};
-#ifdef CONFIG_PM
static int exynos_mic_suspend(struct device *dev)
{
struct exynos_mic *mic = dev_get_drvdata(dev);
@@ -369,13 +368,9 @@ static int exynos_mic_resume(struct device *dev)
}
return 0;
}
-#endif
-static const struct dev_pm_ops exynos_mic_pm_ops = {
- SET_RUNTIME_PM_OPS(exynos_mic_suspend, exynos_mic_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(exynos_mic_pm_ops, exynos_mic_suspend,
+ exynos_mic_resume, NULL);
static int exynos_mic_probe(struct platform_device *pdev)
{
@@ -470,7 +465,7 @@ struct platform_driver mic_driver = {
.remove = exynos_mic_remove,
.driver = {
.name = "exynos-mic",
- .pm = &exynos_mic_pm_ops,
+ .pm = pm_ptr(&exynos_mic_pm_ops),
.owner = THIS_MODULE,
.of_match_table = exynos_mic_of_match,
},
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index dec7df35baa9..8706f377c349 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -340,7 +340,6 @@ static int rotator_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
@@ -355,7 +354,6 @@ static int rotator_runtime_resume(struct device *dev)
return clk_prepare_enable(rot->clock);
}
-#endif
static const struct drm_exynos_ipp_limit rotator_s5pv210_rbg888_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 8, SZ_16K }, .v = { 8, SZ_16K }) },
@@ -450,12 +448,8 @@ static const struct of_device_id exynos_rotator_match[] = {
};
MODULE_DEVICE_TABLE(of, exynos_rotator_match);
-static const struct dev_pm_ops rotator_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(rotator_runtime_suspend, rotator_runtime_resume,
- NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(rotator_pm_ops, rotator_runtime_suspend,
+ rotator_runtime_resume, NULL);
struct platform_driver rotator_driver = {
.probe = rotator_probe,
@@ -463,7 +457,7 @@ struct platform_driver rotator_driver = {
.driver = {
.name = "exynos-rotator",
.owner = THIS_MODULE,
- .pm = &rotator_pm_ops,
+ .pm = pm_ptr(&rotator_pm_ops),
.of_match_table = exynos_rotator_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 3c049fb658a3..20608e9780ce 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -550,8 +550,6 @@ static int scaler_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-
static int clk_disable_unprepare_wrapper(struct clk *clk)
{
clk_disable_unprepare(clk);
@@ -584,13 +582,9 @@ static int scaler_runtime_resume(struct device *dev)
return scaler_clk_ctrl(scaler, true);
}
-#endif
-static const struct dev_pm_ops scaler_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(scaler_runtime_suspend, scaler_runtime_resume, NULL)
-};
+static DEFINE_RUNTIME_DEV_PM_OPS(scaler_pm_ops, scaler_runtime_suspend,
+ scaler_runtime_resume, NULL);
static const struct drm_exynos_ipp_limit scaler_5420_two_pixel_hv_limits[] = {
{ IPP_SIZE_LIMIT(BUFFER, .h = { 16, SZ_8K }, .v = { 16, SZ_8K }) },
@@ -731,7 +725,7 @@ struct platform_driver scaler_driver = {
.driver = {
.name = "exynos-scaler",
.owner = THIS_MODULE,
- .pm = &scaler_pm_ops,
+ .pm = pm_ptr(&scaler_pm_ops),
.of_match_table = exynos_scaler_match,
},
};
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 807b989e3c77..2efc0eb41c64 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -3,6 +3,8 @@ config DRM_GMA500
tristate "Intel GMA500/600/3600/3650 KMS Framebuffer"
depends on DRM && PCI && X86 && MMU
select DRM_KMS_HELPER
+ select I2C
+ select I2C_ALGOBIT
# GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
select ACPI_VIDEO if ACPI
select BACKLIGHT_CLASS_DEVICE if ACPI
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 577a4987b193..8711a7a5b8da 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -7,6 +7,8 @@
* Authors: Eric Knopp
*/
+#include <linux/backlight.h>
+
#include <acpi/video.h>
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 3065596257e9..3e83299113e3 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include "cdv_device.h"
#include "gma_device.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 7ff1e5141150..5a0acd914f76 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -28,6 +28,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 0c3ddcdc29dc..bbd0abdd8382 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -10,6 +10,7 @@
#include <linux/i2c.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include "cdv_device.h"
#include "framebuffer.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 53b967282d6a..8992a95076f2 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -33,6 +33,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "gma_display.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 29ef45f14169..2d95e0471291 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -28,7 +28,9 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index be6efcaaa3b3..f08a6803dc18 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -12,6 +12,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8d5a37b8f110..50611eb7f134 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -19,10 +19,12 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
#include "framebuffer.h"
#include "gem.h"
@@ -139,7 +141,7 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
*/
vma->vm_ops = &psbfb_vm_ops;
vma->vm_private_data = (void *)fb;
- vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
@@ -297,11 +299,6 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
info->screen_base = dev_priv->vram_addr + backing->offset;
info->screen_size = size;
- if (dev_priv->gtt.stolen_size) {
- info->apertures->ranges[0].base = dev_priv->fb_base;
- info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
- }
-
drm_fb_helper_fill_info(info, fb_helper, sizes);
info->fix.mmio_start = pci_resource_start(pdev, 0);
@@ -412,7 +409,7 @@ int psb_fbdev_init(struct drm_device *dev)
dev_priv->fb_helper = fb_helper;
- drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
+ drm_fb_helper_prepare(dev, fb_helper, 32, &psb_fb_helper_funcs);
ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
@@ -421,7 +418,7 @@ int psb_fbdev_init(struct drm_device *dev)
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
- ret = drm_fb_helper_initial_config(fb_helper, 32);
+ ret = drm_fb_helper_initial_config(fb_helper);
if (ret)
goto fini;
@@ -430,6 +427,7 @@ int psb_fbdev_init(struct drm_device *dev)
fini:
drm_fb_helper_fini(fb_helper);
free:
+ drm_fb_helper_unprepare(fb_helper);
kfree(fb_helper);
return ret;
}
@@ -442,6 +440,7 @@ static void psb_fbdev_fini(struct drm_device *dev)
return;
psb_fbdev_destroy(dev, dev_priv->fb_helper);
+ drm_fb_helper_unprepare(dev_priv->fb_helper);
kfree(dev_priv->fb_helper);
dev_priv->fb_helper = NULL;
}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index fe7b8436f87a..f65e90d890f4 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -11,8 +11,10 @@
#include <linux/highmem.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include "framebuffer.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 64761f46b434..de8ccfe9890f 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -9,6 +9,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include "framebuffer.h"
#include "gem.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index 95b7cb099e63..ed8626c73541 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -27,7 +27,9 @@
#include <linux/delay.h>
#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "psb_drv.h"
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 75b4eb1c8884..d974d0c60d2a 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -14,6 +14,7 @@
#include <asm/intel-mid.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 3c294c38bdb4..dcfcd7b89d4a 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -6,6 +6,7 @@
**************************************************************************/
#include <drm/drm.h>
+#include <drm/drm_crtc_helper.h>
#include "gma_device.h"
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 531c1781a8fb..ff46e88c4768 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -9,6 +9,9 @@
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
#include "framebuffer.h"
#include "gem.h"
#include "gma_display.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 8a1111fe714b..0bb85494e3da 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -9,7 +9,6 @@
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 7ee6c8ce103b..8486de230ec9 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -11,6 +11,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_simple_kms_helper.h>
#include "intel_bios.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index bdced46dd333..d6fd5d726216 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -33,7 +33,9 @@
#include <linux/slab.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include "psb_drv.h"
#include "psb_intel_drv.h"
diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c
index fa636206f232..034e78360d4f 100644
--- a/drivers/gpu/drm/gud/gud_connector.c
+++ b/drivers/gpu/drm/gud/gud_connector.c
@@ -303,7 +303,7 @@ static int gud_connector_atomic_check(struct drm_connector *connector,
old_state->tv.margins.right != new_state->tv.margins.right ||
old_state->tv.margins.top != new_state->tv.margins.top ||
old_state->tv.margins.bottom != new_state->tv.margins.bottom ||
- old_state->tv.mode != new_state->tv.mode ||
+ old_state->tv.legacy_mode != new_state->tv.legacy_mode ||
old_state->tv.brightness != new_state->tv.brightness ||
old_state->tv.contrast != new_state->tv.contrast ||
old_state->tv.flicker_reduction != new_state->tv.flicker_reduction ||
@@ -400,7 +400,7 @@ static int gud_connector_add_tv_mode(struct gud_device *gdrm, struct drm_connect
for (i = 0; i < num_modes; i++)
modes[i] = &buf[i * GUD_CONNECTOR_TV_MODE_NAME_LEN];
- ret = drm_mode_create_tv_properties(connector->dev, num_modes, modes);
+ ret = drm_mode_create_tv_properties_legacy(connector->dev, num_modes, modes);
free:
kfree(buf);
if (ret < 0)
@@ -424,7 +424,7 @@ gud_connector_property_lookup(struct drm_connector *connector, u16 prop)
case GUD_PROPERTY_TV_BOTTOM_MARGIN:
return config->tv_bottom_margin_property;
case GUD_PROPERTY_TV_MODE:
- return config->tv_mode_property;
+ return config->legacy_tv_mode_property;
case GUD_PROPERTY_TV_BRIGHTNESS:
return config->tv_brightness_property;
case GUD_PROPERTY_TV_CONTRAST:
@@ -454,7 +454,7 @@ static unsigned int *gud_connector_tv_state_val(u16 prop, struct drm_tv_connecto
case GUD_PROPERTY_TV_BOTTOM_MARGIN:
return &state->margins.bottom;
case GUD_PROPERTY_TV_MODE:
- return &state->mode;
+ return &state->legacy_mode;
case GUD_PROPERTY_TV_BRIGHTNESS:
return &state->brightness;
case GUD_PROPERTY_TV_CONTRAST:
@@ -539,7 +539,7 @@ static int gud_connector_add_properties(struct gud_device *gdrm, struct gud_conn
fallthrough;
case GUD_PROPERTY_TV_HUE:
/* This is a no-op if already added. */
- ret = drm_mode_create_tv_properties(drm, 0, NULL);
+ ret = drm_mode_create_tv_properties_legacy(drm, 0, NULL);
if (ret)
goto out;
break;
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index d57dab104358..9d7bf8ee45f1 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -325,8 +325,8 @@ static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struc
static int gud_stats_debugfs(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct gud_device *gdrm = to_gud_device(node->minor->dev);
+ struct drm_debugfs_entry *entry = m->private;
+ struct gud_device *gdrm = to_gud_device(entry->dev);
char buf[10];
string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf));
@@ -352,19 +352,10 @@ static int gud_stats_debugfs(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list gud_debugfs_list[] = {
- { "stats", gud_stats_debugfs, 0, NULL },
-};
-
-static void gud_debugfs_init(struct drm_minor *minor)
-{
- drm_debugfs_create_files(gud_debugfs_list, ARRAY_SIZE(gud_debugfs_list),
- minor->debugfs_root, minor);
-}
-
static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
.check = gud_pipe_check,
.update = gud_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS
};
static const struct drm_mode_config_funcs gud_mode_config_funcs = {
@@ -385,7 +376,6 @@ static const struct drm_driver gud_drm_driver = {
.fops = &gud_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
.gem_prime_import = gud_gem_prime_import,
- .debugfs_init = gud_debugfs_init,
.name = "gud",
.desc = "Generic USB Display",
@@ -622,6 +612,8 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (!gdrm->dmadev)
dev_warn(dev, "buffer sharing not supported");
+ drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL);
+
ret = drm_dev_register(drm, 0);
if (ret) {
put_device(gdrm->dmadev);
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index e351a1f1420d..0d148a6f27aa 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -43,6 +43,7 @@ struct gud_device {
struct drm_framebuffer *fb;
struct drm_rect damage;
bool prev_flush_failed;
+ void *shadow_buf;
};
static inline struct gud_device *to_gud_device(struct drm_device *drm)
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 7c6dc2bcd14a..dc16a92625d4 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -5,6 +5,7 @@
#include <linux/lz4.h>
#include <linux/usb.h>
+#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <drm/drm_atomic.h>
@@ -15,6 +16,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_rect.h>
@@ -24,17 +26,13 @@
#include "gud_internal.h"
/*
- * Some userspace rendering loops runs all displays in the same loop.
+ * Some userspace rendering loops run all displays in the same loop.
* This means that a fast display will have to wait for a slow one.
- * For this reason gud does flushing asynchronous by default.
- * The down side is that in e.g. a single display setup userspace thinks
- * the display is insanely fast since the driver reports back immediately
- * that the flush/pageflip is done. This wastes CPU and power.
- * Such users might want to set this module parameter to false.
+ * Such users might want to enable this module parameter.
*/
-static bool gud_async_flush = true;
+static bool gud_async_flush;
module_param_named(async_flush, gud_async_flush, bool, 0644);
-MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=true]");
+MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=0]");
/*
* FIXME: The driver is probably broken on Big Endian machines.
@@ -152,32 +150,21 @@ static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *forma
}
static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, bool cached_reads,
const struct drm_format_info *format, struct drm_rect *rect,
struct gud_set_buffer_req *req)
{
- struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
u8 compression = gdrm->compression;
- struct iosys_map map[DRM_FORMAT_MAX_PLANES];
- struct iosys_map map_data[DRM_FORMAT_MAX_PLANES];
struct iosys_map dst;
void *vaddr, *buf;
size_t pitch, len;
- int ret = 0;
pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
len = pitch * drm_rect_height(rect);
if (len > gdrm->bulk_len)
return -E2BIG;
- ret = drm_gem_fb_vmap(fb, map, map_data);
- if (ret)
- return ret;
-
- vaddr = map_data[0].vaddr;
-
- ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
- if (ret)
- goto vunmap;
+ vaddr = src[0].vaddr;
retry:
if (compression)
buf = gdrm->compress_buf;
@@ -192,29 +179,27 @@ retry:
if (format != fb->format) {
if (format->format == GUD_DRM_FORMAT_R1) {
len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
- if (!len) {
- ret = -ENOMEM;
- goto end_cpu_access;
- }
+ if (!len)
+ return -ENOMEM;
} else if (format->format == DRM_FORMAT_R8) {
- drm_fb_xrgb8888_to_gray8(&dst, NULL, map_data, fb, rect);
+ drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect);
} else if (format->format == DRM_FORMAT_RGB332) {
- drm_fb_xrgb8888_to_rgb332(&dst, NULL, map_data, fb, rect);
+ drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect);
} else if (format->format == DRM_FORMAT_RGB565) {
- drm_fb_xrgb8888_to_rgb565(&dst, NULL, map_data, fb, rect,
+ drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
gud_is_big_endian());
} else if (format->format == DRM_FORMAT_RGB888) {
- drm_fb_xrgb8888_to_rgb888(&dst, NULL, map_data, fb, rect);
+ drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect);
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
} else if (gud_is_big_endian() && format->cpp[0] > 1) {
- drm_fb_swab(&dst, NULL, map_data, fb, rect, !import_attach);
- } else if (compression && !import_attach && pitch == fb->pitches[0]) {
+ drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads);
+ } else if (compression && cached_reads && pitch == fb->pitches[0]) {
/* can compress directly from the framebuffer */
buf = vaddr + rect->y1 * pitch;
} else {
- drm_fb_memcpy(&dst, NULL, map_data, fb, rect);
+ drm_fb_memcpy(&dst, NULL, src, fb, rect);
}
memset(req, 0, sizeof(*req));
@@ -237,12 +222,7 @@ retry:
req->compressed_length = cpu_to_le32(complen);
}
-end_cpu_access:
- drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-vunmap:
- drm_gem_fb_vunmap(fb, map);
-
- return ret;
+ return 0;
}
struct gud_usb_bulk_context {
@@ -285,6 +265,7 @@ static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
}
static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, bool cached_reads,
const struct drm_format_info *format, struct drm_rect *rect)
{
struct gud_set_buffer_req req;
@@ -293,7 +274,7 @@ static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- ret = gud_prep_flush(gdrm, fb, format, rect, &req);
+ ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req);
if (ret)
return ret;
@@ -333,46 +314,51 @@ void gud_clear_damage(struct gud_device *gdrm)
gdrm->damage.y2 = 0;
}
-static void gud_add_damage(struct gud_device *gdrm, struct drm_rect *damage)
+static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, bool cached_reads,
+ struct drm_rect *damage)
{
- gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
- gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
- gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
- gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
-}
+ const struct drm_format_info *format;
+ unsigned int i, lines;
+ size_t pitch;
+ int ret;
-static void gud_retry_failed_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
- struct drm_rect *damage)
-{
- /*
- * pipe_update waits for the worker when the display mode is going to change.
- * This ensures that the width and height is still the same making it safe to
- * add back the damage.
- */
+ format = fb->format;
+ if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
+ format = gdrm->xrgb8888_emulation_format;
- mutex_lock(&gdrm->damage_lock);
- if (!gdrm->fb) {
- drm_framebuffer_get(fb);
- gdrm->fb = fb;
- }
- gud_add_damage(gdrm, damage);
- mutex_unlock(&gdrm->damage_lock);
+ /* Split update if it's too big */
+ pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(damage));
+ lines = drm_rect_height(damage);
+
+ if (gdrm->bulk_len < lines * pitch)
+ lines = gdrm->bulk_len / pitch;
+
+ for (i = 0; i < DIV_ROUND_UP(drm_rect_height(damage), lines); i++) {
+ struct drm_rect rect = *damage;
- /* Retry only once to avoid a possible storm in case of continues errors. */
- if (!gdrm->prev_flush_failed)
- queue_work(system_long_wq, &gdrm->work);
- gdrm->prev_flush_failed = true;
+ rect.y1 += i * lines;
+ rect.y2 = min_t(u32, rect.y1 + lines, damage->y2);
+
+ ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect);
+ if (ret) {
+ if (ret != -ENODEV && ret != -ECONNRESET &&
+ ret != -ESHUTDOWN && ret != -EPROTO)
+ dev_err_ratelimited(fb->dev->dev,
+ "Failed to flush framebuffer: error=%d\n", ret);
+ gdrm->prev_flush_failed = true;
+ break;
+ }
+ }
}
void gud_flush_work(struct work_struct *work)
{
struct gud_device *gdrm = container_of(work, struct gud_device, work);
- const struct drm_format_info *format;
+ struct iosys_map shadow_map;
struct drm_framebuffer *fb;
struct drm_rect damage;
- unsigned int i, lines;
- int idx, ret = 0;
- size_t pitch;
+ int idx;
if (!drm_dev_enter(&gdrm->drm, &idx))
return;
@@ -380,6 +366,7 @@ void gud_flush_work(struct work_struct *work)
mutex_lock(&gdrm->damage_lock);
fb = gdrm->fb;
gdrm->fb = NULL;
+ iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf);
damage = gdrm->damage;
gud_clear_damage(gdrm);
mutex_unlock(&gdrm->damage_lock);
@@ -387,59 +374,43 @@ void gud_flush_work(struct work_struct *work)
if (!fb)
goto out;
- format = fb->format;
- if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
- format = gdrm->xrgb8888_emulation_format;
-
- /* Split update if it's too big */
- pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(&damage));
- lines = drm_rect_height(&damage);
-
- if (gdrm->bulk_len < lines * pitch)
- lines = gdrm->bulk_len / pitch;
-
- for (i = 0; i < DIV_ROUND_UP(drm_rect_height(&damage), lines); i++) {
- struct drm_rect rect = damage;
-
- rect.y1 += i * lines;
- rect.y2 = min_t(u32, rect.y1 + lines, damage.y2);
-
- ret = gud_flush_rect(gdrm, fb, format, &rect);
- if (ret) {
- if (ret != -ENODEV && ret != -ECONNRESET &&
- ret != -ESHUTDOWN && ret != -EPROTO) {
- bool prev_flush_failed = gdrm->prev_flush_failed;
-
- gud_retry_failed_flush(gdrm, fb, &damage);
- if (!prev_flush_failed)
- dev_err_ratelimited(fb->dev->dev,
- "Failed to flush framebuffer: error=%d\n", ret);
- }
- break;
- }
-
- gdrm->prev_flush_failed = false;
- }
+ gud_flush_damage(gdrm, fb, &shadow_map, true, &damage);
drm_framebuffer_put(fb);
out:
drm_dev_exit(idx);
}
-static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
- struct drm_rect *damage)
+static int gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, struct drm_rect *damage)
{
struct drm_framebuffer *old_fb = NULL;
+ struct iosys_map shadow_map;
mutex_lock(&gdrm->damage_lock);
+ if (!gdrm->shadow_buf) {
+ gdrm->shadow_buf = vzalloc(fb->pitches[0] * fb->height);
+ if (!gdrm->shadow_buf) {
+ mutex_unlock(&gdrm->damage_lock);
+ return -ENOMEM;
+ }
+ }
+
+ iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf);
+ iosys_map_incr(&shadow_map, drm_fb_clip_offset(fb->pitches[0], fb->format, damage));
+ drm_fb_memcpy(&shadow_map, fb->pitches, src, fb, damage);
+
if (fb != gdrm->fb) {
old_fb = gdrm->fb;
drm_framebuffer_get(fb);
gdrm->fb = fb;
}
- gud_add_damage(gdrm, damage);
+ gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
+ gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
+ gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
+ gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
mutex_unlock(&gdrm->damage_lock);
@@ -447,6 +418,26 @@ static void gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer
if (old_fb)
drm_framebuffer_put(old_fb);
+
+ return 0;
+}
+
+static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
+ const struct iosys_map *src, struct drm_rect *damage)
+{
+ int ret;
+
+ if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
+ drm_rect_init(damage, 0, 0, fb->width, fb->height);
+
+ if (gud_async_flush) {
+ ret = gud_fb_queue_damage(gdrm, fb, src, damage);
+ if (ret != -ENOMEM)
+ return;
+ }
+
+ /* Imported buffers are assumed to be WriteCombined with uncached reads */
+ gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage);
}
int gud_pipe_check(struct drm_simple_display_pipe *pipe,
@@ -571,10 +562,11 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_device *drm = pipe->crtc.dev;
struct gud_device *gdrm = to_gud_device(drm);
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect damage;
- int idx;
+ int ret, idx;
if (crtc->state->mode_changed || !crtc->state->enable) {
cancel_work_sync(&gdrm->work);
@@ -584,6 +576,8 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
gdrm->fb = NULL;
}
gud_clear_damage(gdrm);
+ vfree(gdrm->shadow_buf);
+ gdrm->shadow_buf = NULL;
mutex_unlock(&gdrm->damage_lock);
}
@@ -599,14 +593,19 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe,
if (crtc->state->active_changed)
gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
- if (drm_atomic_helper_damage_merged(old_state, state, &damage)) {
- if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
- drm_rect_init(&damage, 0, 0, fb->width, fb->height);
- gud_fb_queue_damage(gdrm, fb, &damage);
- if (!gud_async_flush)
- flush_work(&gdrm->work);
- }
+ if (!fb)
+ goto ctrl_disable;
+
+ ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
+ if (ret)
+ goto ctrl_disable;
+
+ if (drm_atomic_helper_damage_merged(old_state, state, &damage))
+ gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage);
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+ctrl_disable:
if (!crtc->state->enable)
gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 4e41c144a290..126504318a4f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -7,6 +7,8 @@ config DRM_HISI_HIBMC
select DRM_VRAM_HELPER
select DRM_TTM
select DRM_TTM_HELPER
+ select I2C
+ select I2C_ALGOBIT
help
Choose this option if you have a Hisilicon Hibmc soc chipset.
If M is selected the module will be called hibmc-drm.
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 22053c613644..0c4aa4d9b0a7 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -106,7 +106,7 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 1200;
- dev->mode_config.preferred_depth = 32;
+ dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -340,7 +340,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
goto err_unload;
}
- drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+ drm_fbdev_generic_setup(dev, 32);
return 0;
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 427c20ba3404..f830d62a5ce6 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -165,7 +165,7 @@ err_hv_set_drv_data:
return ret;
}
-static int hyperv_vmbus_remove(struct hv_device *hdev)
+static void hyperv_vmbus_remove(struct hv_device *hdev)
{
struct drm_device *dev = hv_get_drvdata(hdev);
struct hyperv_drm_device *hv = to_hv(dev);
@@ -176,8 +176,6 @@ static int hyperv_vmbus_remove(struct hv_device *hdev)
hv_set_drvdata(hdev, NULL);
vmbus_free_mmio(hv->mem->start, hv->fb_size);
-
- return 0;
}
static int hyperv_vmbus_suspend(struct hv_device *hdev)
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 578b738859b9..521bdf656cca 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -26,6 +26,8 @@
#include <linux/module.h>
+#include <drm/drm_crtc_helper.h>
+
#include "ch7006_priv.h"
/* DRM encoder functions */
@@ -250,7 +252,7 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_mode_config *conf = &dev->mode_config;
- drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
+ drm_mode_create_tv_properties_legacy(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
if (!priv->scale_property)
@@ -264,8 +266,8 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
priv->hmargin);
drm_object_attach_property(&connector->base, conf->tv_bottom_margin_property,
priv->vmargin);
- drm_object_attach_property(&connector->base, conf->tv_mode_property,
- priv->norm);
+ drm_object_attach_property(&connector->base, conf->legacy_tv_mode_property,
+ priv->norm);
drm_object_attach_property(&connector->base, conf->tv_brightness_property,
priv->brightness);
drm_object_attach_property(&connector->base, conf->tv_contrast_property,
@@ -315,7 +317,7 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
ch7006_load_reg(client, state, CH7006_POV);
ch7006_load_reg(client, state, CH7006_VPOS);
- } else if (property == conf->tv_mode_property) {
+ } else if (property == conf->legacy_tv_mode_property) {
if (connector->dpms != DRM_MODE_DPMS_OFF)
return -EINVAL;
@@ -386,7 +388,7 @@ static const struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
/* I2C driver functions */
-static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int ch7006_probe(struct i2c_client *client)
{
uint8_t addr = CH7006_VERSION_ID;
uint8_t val;
@@ -495,7 +497,7 @@ static const struct dev_pm_ops ch7006_pm_ops = {
static struct drm_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
- .probe = ch7006_probe,
+ .probe_new = ch7006_probe,
.remove = ch7006_remove,
.driver = {
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
index 986b04599906..052bdc48a339 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -27,7 +27,6 @@
#ifndef __DRM_I2C_CH7006_PRIV_H__
#define __DRM_I2C_CH7006_PRIV_H__
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_probe_helper.h>
#include <drm/i2c/ch7006.h>
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index 1bc0b5de4499..f57f9a807542 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -350,7 +350,7 @@ static const struct drm_encoder_slave_funcs sil164_encoder_funcs = {
/* I2C driver functions */
static int
-sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
+sil164_probe(struct i2c_client *client)
{
int vendor = sil164_read(client, SIL164_VENDOR_HI) << 8 |
sil164_read(client, SIL164_VENDOR_LO);
@@ -420,7 +420,7 @@ MODULE_DEVICE_TABLE(i2c, sil164_ids);
static struct drm_i2c_encoder_driver sil164_driver = {
.i2c_driver = {
- .probe = sil164_probe,
+ .probe_new = sil164_probe,
.driver = {
.name = "sil164",
},
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 9ed54e7ccff2..b8c143e573e0 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -375,8 +375,7 @@ static void tda9950_cec_del(void *data)
cec_delete_adapter(priv->adap);
}
-static int tda9950_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tda9950_probe(struct i2c_client *client)
{
struct tda9950_glue *glue = client->dev.platform_data;
struct device *dev = &client->dev;
@@ -493,7 +492,7 @@ static struct i2c_device_id tda9950_ids[] = {
MODULE_DEVICE_TABLE(i2c, tda9950_ids);
static struct i2c_driver tda9950_driver = {
- .probe = tda9950_probe,
+ .probe_new = tda9950_probe,
.remove = tda9950_remove,
.driver = {
.name = "tda9950",
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index a14d2896aebb..db5c9343a3d2 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -2059,7 +2059,7 @@ static const struct component_ops tda998x_ops = {
};
static int
-tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+tda998x_probe(struct i2c_client *client)
{
int ret;
@@ -2099,7 +2099,7 @@ static const struct i2c_device_id tda998x_ids[] = {
MODULE_DEVICE_TABLE(i2c, tda998x_ids);
static struct i2c_driver tda998x_driver = {
- .probe = tda998x_probe,
+ .probe_new = tda998x_probe,
.remove = tda998x_remove,
.driver = {
.name = "tda998x",
diff --git a/drivers/gpu/drm/i810/Makefile b/drivers/gpu/drm/i810/Makefile
deleted file mode 100644
index c181f8528c5c..000000000000
--- a/drivers/gpu/drm/i810/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-i810-y := i810_drv.o i810_dma.o
-
-obj-$(CONFIG_DRM_I810) += i810.o
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
deleted file mode 100644
index 9fb4dd63342f..000000000000
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ /dev/null
@@ -1,1266 +0,0 @@
-/* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keith@tungstengraphics.com>
- *
- */
-
-#include <linux/delay.h>
-#include <linux/mman.h>
-#include <linux/pci.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_print.h>
-#include <drm/i810_drm.h>
-
-#include "i810_drv.h"
-
-#define I810_BUF_FREE 2
-#define I810_BUF_CLIENT 1
-#define I810_BUF_HARDWARE 0
-
-#define I810_BUF_UNMAPPED 0
-#define I810_BUF_MAPPED 1
-
-static struct drm_buf *i810_freelist_get(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
- int used;
-
- /* Linear search might not be the best solution */
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
- I810_BUF_CLIENT);
- if (used == I810_BUF_FREE)
- return buf;
- }
- return NULL;
-}
-
-/* This should only be called if the buffer is not sent to the hardware
- * yet, the hardware updates in use for us once its on the ring buffer.
- */
-
-static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
-{
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- int used;
-
- /* In use is already a pointer */
- used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
- if (used != I810_BUF_CLIENT) {
- DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev;
- drm_i810_private_t *dev_priv;
- struct drm_buf *buf;
- drm_i810_buf_priv_t *buf_priv;
-
- dev = priv->minor->dev;
- dev_priv = dev->dev_private;
- buf = dev_priv->mmap_buffer;
- buf_priv = buf->dev_private;
-
- vma->vm_flags |= VM_DONTCOPY;
-
- buf_priv->currently_mapped = I810_BUF_MAPPED;
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-
-static const struct file_operations i810_buffer_fops = {
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = i810_mmap_buffers,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
-
-static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
-{
- struct drm_device *dev = file_priv->minor->dev;
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- drm_i810_private_t *dev_priv = dev->dev_private;
- const struct file_operations *old_fops;
- int retcode = 0;
-
- if (buf_priv->currently_mapped == I810_BUF_MAPPED)
- return -EINVAL;
-
- /* This is all entirely broken */
- old_fops = file_priv->filp->f_op;
- file_priv->filp->f_op = &i810_buffer_fops;
- dev_priv->mmap_buffer = buf;
- buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
- PROT_READ | PROT_WRITE,
- MAP_SHARED, buf->bus_address);
- dev_priv->mmap_buffer = NULL;
- file_priv->filp->f_op = old_fops;
- if (IS_ERR(buf_priv->virtual)) {
- /* Real error */
- DRM_ERROR("mmap error\n");
- retcode = PTR_ERR(buf_priv->virtual);
- buf_priv->virtual = NULL;
- }
-
- return retcode;
-}
-
-static int i810_unmap_buffer(struct drm_buf *buf)
-{
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- int retcode = 0;
-
- if (buf_priv->currently_mapped != I810_BUF_MAPPED)
- return -EINVAL;
-
- retcode = vm_munmap((unsigned long)buf_priv->virtual,
- (size_t) buf->total);
-
- buf_priv->currently_mapped = I810_BUF_UNMAPPED;
- buf_priv->virtual = NULL;
-
- return retcode;
-}
-
-static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
- struct drm_file *file_priv)
-{
- struct drm_buf *buf;
- drm_i810_buf_priv_t *buf_priv;
- int retcode = 0;
-
- buf = i810_freelist_get(dev);
- if (!buf) {
- retcode = -ENOMEM;
- DRM_DEBUG("retcode=%d\n", retcode);
- return retcode;
- }
-
- retcode = i810_map_buffer(buf, file_priv);
- if (retcode) {
- i810_freelist_put(dev, buf);
- DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
- return retcode;
- }
- buf->file_priv = file_priv;
- buf_priv = buf->dev_private;
- d->granted = 1;
- d->request_idx = buf->idx;
- d->request_size = buf->total;
- d->virtual = buf_priv->virtual;
-
- return retcode;
-}
-
-static int i810_dma_cleanup(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
- drm_legacy_irq_uninstall(dev);
-
- if (dev->dev_private) {
- int i;
- drm_i810_private_t *dev_priv =
- (drm_i810_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start)
- drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
- if (dev_priv->hw_status_page) {
- dma_free_coherent(dev->dev, PAGE_SIZE,
- dev_priv->hw_status_page,
- dev_priv->dma_status_page);
- }
- kfree(dev->dev_private);
- dev->dev_private = NULL;
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
-
- if (buf_priv->kernel_virtual && buf->total)
- drm_legacy_ioremapfree(&buf_priv->map, dev);
- }
- }
- return 0;
-}
-
-static int i810_wait_ring(struct drm_device *dev, int n)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
- int iters = 0;
- unsigned long end;
- unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-
- end = jiffies + (HZ * 3);
- while (ring->space < n) {
- ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-
- if (ring->head != last_head) {
- end = jiffies + (HZ * 3);
- last_head = ring->head;
- }
-
- iters++;
- if (time_before(end, jiffies)) {
- DRM_ERROR("space: %d wanted %d\n", ring->space, n);
- DRM_ERROR("lockup\n");
- goto out_wait_ring;
- }
- udelay(1);
- }
-
-out_wait_ring:
- return iters;
-}
-
-static void i810_kernel_lost_context(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
-
- ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
- ring->tail = I810_READ(LP_RING + RING_TAIL);
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->Size;
-}
-
-static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int my_idx = 24;
- u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
- int i;
-
- if (dma->buf_count > 1019) {
- /* Not enough space in the status page for the freelist */
- return -EINVAL;
- }
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
-
- buf_priv->in_use = hw_status++;
- buf_priv->my_use_idx = my_idx;
- my_idx += 4;
-
- *buf_priv->in_use = I810_BUF_FREE;
-
- buf_priv->map.offset = buf->bus_address;
- buf_priv->map.size = buf->total;
- buf_priv->map.type = _DRM_AGP;
- buf_priv->map.flags = 0;
- buf_priv->map.mtrr = 0;
-
- drm_legacy_ioremap(&buf_priv->map, dev);
- buf_priv->kernel_virtual = buf_priv->map.handle;
-
- }
- return 0;
-}
-
-static int i810_dma_initialize(struct drm_device *dev,
- drm_i810_private_t *dev_priv,
- drm_i810_init_t *init)
-{
- struct drm_map_list *r_list;
- memset(dev_priv, 0, sizeof(drm_i810_private_t));
-
- list_for_each_entry(r_list, &dev->maplist, head) {
- if (r_list->map &&
- r_list->map->type == _DRM_SHM &&
- r_list->map->flags & _DRM_CONTAINS_LOCK) {
- dev_priv->sarea_map = r_list->map;
- break;
- }
- }
- if (!dev_priv->sarea_map) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("can not find sarea!\n");
- return -EINVAL;
- }
- dev_priv->mmio_map = drm_legacy_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio_map) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("can not find mmio map!\n");
- return -EINVAL;
- }
- dev->agp_buffer_token = init->buffers_offset;
- dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
- if (!dev->agp_buffer_map) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("can not find dma buffer map!\n");
- return -EINVAL;
- }
-
- dev_priv->sarea_priv = (drm_i810_sarea_t *)
- ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
-
- dev_priv->ring.Start = init->ring_start;
- dev_priv->ring.End = init->ring_end;
- dev_priv->ring.Size = init->ring_size;
-
- dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
- dev_priv->ring.map.size = init->ring_size;
- dev_priv->ring.map.type = _DRM_AGP;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_legacy_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
-
- dev_priv->w = init->w;
- dev_priv->h = init->h;
- dev_priv->pitch = init->pitch;
- dev_priv->back_offset = init->back_offset;
- dev_priv->depth_offset = init->depth_offset;
- dev_priv->front_offset = init->front_offset;
-
- dev_priv->overlay_offset = init->overlay_offset;
- dev_priv->overlay_physical = init->overlay_physical;
-
- dev_priv->front_di1 = init->front_offset | init->pitch_bits;
- dev_priv->back_di1 = init->back_offset | init->pitch_bits;
- dev_priv->zi1 = init->depth_offset | init->pitch_bits;
-
- /* Program Hardware Status Page */
- dev_priv->hw_status_page =
- dma_alloc_coherent(dev->dev, PAGE_SIZE,
- &dev_priv->dma_status_page, GFP_KERNEL);
- if (!dev_priv->hw_status_page) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("Can not allocate hardware status page\n");
- return -ENOMEM;
- }
- DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
-
- I810_WRITE(0x02080, dev_priv->dma_status_page);
- DRM_DEBUG("Enabled hardware status page\n");
-
- /* Now we need to init our freelist */
- if (i810_freelist_init(dev, dev_priv) != 0) {
- dev->dev_private = (void *)dev_priv;
- i810_dma_cleanup(dev);
- DRM_ERROR("Not enough space in the status page for"
- " the freelist\n");
- return -ENOMEM;
- }
- dev->dev_private = (void *)dev_priv;
-
- return 0;
-}
-
-static int i810_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv;
- drm_i810_init_t *init = data;
- int retcode = 0;
-
- switch (init->func) {
- case I810_INIT_DMA_1_4:
- DRM_INFO("Using v1.4 init.\n");
- dev_priv = kmalloc(sizeof(drm_i810_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
- retcode = i810_dma_initialize(dev, dev_priv, init);
- break;
-
- case I810_CLEANUP_DMA:
- DRM_INFO("DMA Cleanup\n");
- retcode = i810_dma_cleanup(dev);
- break;
- default:
- return -EINVAL;
- }
-
- return retcode;
-}
-
-/* Most efficient way to verify state for the i810 is as it is
- * emitted. Non-conformant state is silently dropped.
- *
- * Use 'volatile' & local var tmp to force the emitted values to be
- * identical to the verified ones.
- */
-static void i810EmitContextVerified(struct drm_device *dev,
- volatile unsigned int *code)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
-
- OUT_RING(GFX_OP_COLOR_FACTOR);
- OUT_RING(code[I810_CTXREG_CF1]);
-
- OUT_RING(GFX_OP_STIPPLE);
- OUT_RING(code[I810_CTXREG_ST1]);
-
- for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
- tmp = code[i];
-
- if ((tmp & (7 << 29)) == (3 << 29) &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else
- printk("constext state dropped!!!\n");
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- int i, j = 0;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
-
- OUT_RING(GFX_OP_MAP_INFO);
- OUT_RING(code[I810_TEXREG_MI1]);
- OUT_RING(code[I810_TEXREG_MI2]);
- OUT_RING(code[I810_TEXREG_MI3]);
-
- for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
- tmp = code[i];
-
- if ((tmp & (7 << 29)) == (3 << 29) &&
- (tmp & (0x1f << 24)) < (0x1d << 24)) {
- OUT_RING(tmp);
- j++;
- } else
- printk("texture state dropped!!!\n");
- }
-
- if (j & 1)
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-/* Need to do some additional checking when setting the dest buffer.
- */
-static void i810EmitDestVerified(struct drm_device *dev,
- volatile unsigned int *code)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- unsigned int tmp;
- RING_LOCALS;
-
- BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
-
- tmp = code[I810_DESTREG_DI1];
- if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
- OUT_RING(CMD_OP_DESTBUFFER_INFO);
- OUT_RING(tmp);
- } else
- DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
- tmp, dev_priv->front_di1, dev_priv->back_di1);
-
- /* invarient:
- */
- OUT_RING(CMD_OP_Z_BUFFER_INFO);
- OUT_RING(dev_priv->zi1);
-
- OUT_RING(GFX_OP_DESTBUFFER_VARS);
- OUT_RING(code[I810_DESTREG_DV1]);
-
- OUT_RING(GFX_OP_DRAWRECT_INFO);
- OUT_RING(code[I810_DESTREG_DR1]);
- OUT_RING(code[I810_DESTREG_DR2]);
- OUT_RING(code[I810_DESTREG_DR3]);
- OUT_RING(code[I810_DESTREG_DR4]);
- OUT_RING(0);
-
- ADVANCE_LP_RING();
-}
-
-static void i810EmitState(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int dirty = sarea_priv->dirty;
-
- DRM_DEBUG("%x\n", dirty);
-
- if (dirty & I810_UPLOAD_BUFFERS) {
- i810EmitDestVerified(dev, sarea_priv->BufferState);
- sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
- }
-
- if (dirty & I810_UPLOAD_CTX) {
- i810EmitContextVerified(dev, sarea_priv->ContextState);
- sarea_priv->dirty &= ~I810_UPLOAD_CTX;
- }
-
- if (dirty & I810_UPLOAD_TEX0) {
- i810EmitTexVerified(dev, sarea_priv->TexState[0]);
- sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
- }
-
- if (dirty & I810_UPLOAD_TEX1) {
- i810EmitTexVerified(dev, sarea_priv->TexState[1]);
- sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
- }
-}
-
-/* need to verify
- */
-static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
- unsigned int clear_color,
- unsigned int clear_zval)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = 2;
- int i;
- RING_LOCALS;
-
- if (dev_priv->current_page == 1) {
- unsigned int tmp = flags;
-
- flags &= ~(I810_FRONT | I810_BACK);
- if (tmp & I810_FRONT)
- flags |= I810_BACK;
- if (tmp & I810_BACK)
- flags |= I810_FRONT;
- }
-
- i810_kernel_lost_context(dev);
-
- if (nbox > I810_NR_SAREA_CLIPRECTS)
- nbox = I810_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- unsigned int x = pbox->x1;
- unsigned int y = pbox->y1;
- unsigned int width = (pbox->x2 - x) * cpp;
- unsigned int height = pbox->y2 - y;
- unsigned int start = y * pitch + x * cpp;
-
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- if (flags & I810_FRONT) {
- BEGIN_LP_RING(6);
- OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
- OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
- OUT_RING((height << 16) | width);
- OUT_RING(start);
- OUT_RING(clear_color);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- if (flags & I810_BACK) {
- BEGIN_LP_RING(6);
- OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
- OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
- OUT_RING((height << 16) | width);
- OUT_RING(dev_priv->back_offset + start);
- OUT_RING(clear_color);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-
- if (flags & I810_DEPTH) {
- BEGIN_LP_RING(6);
- OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
- OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
- OUT_RING((height << 16) | width);
- OUT_RING(dev_priv->depth_offset + start);
- OUT_RING(clear_zval);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
- }
-}
-
-static void i810_dma_dispatch_swap(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int pitch = dev_priv->pitch;
- int cpp = 2;
- int i;
- RING_LOCALS;
-
- DRM_DEBUG("swapbuffers\n");
-
- i810_kernel_lost_context(dev);
-
- if (nbox > I810_NR_SAREA_CLIPRECTS)
- nbox = I810_NR_SAREA_CLIPRECTS;
-
- for (i = 0; i < nbox; i++, pbox++) {
- unsigned int w = pbox->x2 - pbox->x1;
- unsigned int h = pbox->y2 - pbox->y1;
- unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
- unsigned int start = dst;
-
- if (pbox->x1 > pbox->x2 ||
- pbox->y1 > pbox->y2 ||
- pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
- continue;
-
- BEGIN_LP_RING(6);
- OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
- OUT_RING(pitch | (0xCC << 16));
- OUT_RING((h << 16) | (w * cpp));
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->front_offset + start);
- else
- OUT_RING(dev_priv->back_offset + start);
- OUT_RING(pitch);
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->back_offset + start);
- else
- OUT_RING(dev_priv->front_offset + start);
- ADVANCE_LP_RING();
- }
-}
-
-static void i810_dma_dispatch_vertex(struct drm_device *dev,
- struct drm_buf *buf, int discard, int used)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- struct drm_clip_rect *box = sarea_priv->boxes;
- int nbox = sarea_priv->nbox;
- unsigned long address = (unsigned long)buf->bus_address;
- unsigned long start = address - dev->agp->base;
- int i = 0;
- RING_LOCALS;
-
- i810_kernel_lost_context(dev);
-
- if (nbox > I810_NR_SAREA_CLIPRECTS)
- nbox = I810_NR_SAREA_CLIPRECTS;
-
- if (used < 0 || used > 4 * 1024)
- used = 0;
-
- if (sarea_priv->dirty)
- i810EmitState(dev);
-
- if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
- unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
-
- *(u32 *) buf_priv->kernel_virtual =
- ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
-
- if (used & 4) {
- *(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
- used += 4;
- }
-
- i810_unmap_buffer(buf);
- }
-
- if (used) {
- do {
- if (i < nbox) {
- BEGIN_LP_RING(4);
- OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
- SC_ENABLE);
- OUT_RING(GFX_OP_SCISSOR_INFO);
- OUT_RING(box[i].x1 | (box[i].y1 << 16));
- OUT_RING((box[i].x2 -
- 1) | ((box[i].y2 - 1) << 16));
- ADVANCE_LP_RING();
- }
-
- BEGIN_LP_RING(4);
- OUT_RING(CMD_OP_BATCH_BUFFER);
- OUT_RING(start | BB1_PROTECTED);
- OUT_RING(start + used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- } while (++i < nbox);
- }
-
- if (discard) {
- dev_priv->counter++;
-
- (void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
- I810_BUF_HARDWARE);
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(20);
- OUT_RING(dev_priv->counter);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(buf_priv->my_use_idx);
- OUT_RING(I810_BUF_FREE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
- }
-}
-
-static void i810_dma_dispatch_flip(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- int pitch = dev_priv->pitch;
- RING_LOCALS;
-
- DRM_DEBUG("page=%d pfCurrentPage=%d\n",
- dev_priv->current_page,
- dev_priv->sarea_priv->pf_current_page);
-
- i810_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
- /* On i815 at least ASYNC is buggy */
- /* pitch<<5 is from 11.2.8 p158,
- its the pitch / 8 then left shifted 8,
- so (pitch >> 3) << 8 */
- OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_offset);
- dev_priv->current_page = 1;
- } else {
- OUT_RING(dev_priv->front_offset);
- dev_priv->current_page = 0;
- }
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(2);
- OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- /* Increment the frame counter. The client-side 3D driver must
- * throttle the framerate by waiting for this value before
- * performing the swapbuffer ioctl.
- */
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-
-}
-
-static void i810_dma_quiescent(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
-
- i810_kernel_lost_context(dev);
-
- BEGIN_LP_RING(4);
- OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i810_wait_ring(dev, dev_priv->ring.Size - 8);
-}
-
-static void i810_flush_queue(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- int i;
- RING_LOCALS;
-
- i810_kernel_lost_context(dev);
-
- BEGIN_LP_RING(2);
- OUT_RING(CMD_REPORT_HEAD);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- i810_wait_ring(dev, dev_priv->ring.Size - 8);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
-
- int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
- I810_BUF_FREE);
-
- if (used == I810_BUF_HARDWARE)
- DRM_DEBUG("reclaimed from HARDWARE\n");
- if (used == I810_BUF_CLIENT)
- DRM_DEBUG("still on client\n");
- }
-
- return;
-}
-
-/* Must be called with the lock held */
-void i810_driver_reclaim_buffers(struct drm_device *dev,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- if (!dma)
- return;
- if (!dev->dev_private)
- return;
- if (!dma->buflist)
- return;
-
- i810_flush_queue(dev);
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
-
- if (buf->file_priv == file_priv && buf_priv) {
- int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
- I810_BUF_FREE);
-
- if (used == I810_BUF_CLIENT)
- DRM_DEBUG("reclaimed from client\n");
- if (buf_priv->currently_mapped == I810_BUF_MAPPED)
- buf_priv->currently_mapped = I810_BUF_UNMAPPED;
- }
- }
-}
-
-static int i810_flush_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i810_flush_queue(dev);
- return 0;
-}
-
-static int i810_dma_vertex(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
- dev_priv->sarea_priv;
- drm_i810_vertex_t *vertex = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("idx %d used %d discard %d\n",
- vertex->idx, vertex->used, vertex->discard);
-
- if (vertex->idx < 0 || vertex->idx >= dma->buf_count)
- return -EINVAL;
-
- i810_dma_dispatch_vertex(dev,
- dma->buflist[vertex->idx],
- vertex->discard, vertex->used);
-
- sarea_priv->last_enqueue = dev_priv->counter - 1;
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return 0;
-}
-
-static int i810_clear_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_clear_t *clear = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* GH: Someone's doing nasty things... */
- if (!dev->dev_private)
- return -EINVAL;
-
- i810_dma_dispatch_clear(dev, clear->flags,
- clear->clear_color, clear->clear_depth);
- return 0;
-}
-
-static int i810_swap_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- i810_dma_dispatch_swap(dev);
- return 0;
-}
-
-static int i810_getage(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
- dev_priv->sarea_priv;
-
- sarea_priv->last_dispatch = (int)hw_status[5];
- return 0;
-}
-
-static int i810_getbuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- int retcode = 0;
- drm_i810_dma_t *d = data;
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
- dev_priv->sarea_priv;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- d->granted = 0;
-
- retcode = i810_dma_get_buffer(dev, d, file_priv);
-
- DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
- task_pid_nr(current), retcode, d->granted);
-
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return retcode;
-}
-
-static int i810_copybuf(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Never copy - 2.4.x doesn't need it */
- return 0;
-}
-
-static int i810_docopy(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Never copy - 2.4.x doesn't need it */
- return 0;
-}
-
-static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
- unsigned int last_render)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
- drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned long address = (unsigned long)buf->bus_address;
- unsigned long start = address - dev->agp->base;
- int u;
- RING_LOCALS;
-
- i810_kernel_lost_context(dev);
-
- u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
- if (u != I810_BUF_CLIENT)
- DRM_DEBUG("MC found buffer that isn't mine!\n");
-
- if (used < 0 || used > 4 * 1024)
- used = 0;
-
- sarea_priv->dirty = 0x7f;
-
- DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
-
- dev_priv->counter++;
- DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG("start : %lx\n", start);
- DRM_DEBUG("used : %d\n", used);
- DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
-
- if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
- if (used & 4) {
- *(u32 *) ((char *) buf_priv->virtual + used) = 0;
- used += 4;
- }
-
- i810_unmap_buffer(buf);
- }
- BEGIN_LP_RING(4);
- OUT_RING(CMD_OP_BATCH_BUFFER);
- OUT_RING(start | BB1_PROTECTED);
- OUT_RING(start + used - 4);
- OUT_RING(0);
- ADVANCE_LP_RING();
-
- BEGIN_LP_RING(8);
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(buf_priv->my_use_idx);
- OUT_RING(I810_BUF_FREE);
- OUT_RING(0);
-
- OUT_RING(CMD_STORE_DWORD_IDX);
- OUT_RING(16);
- OUT_RING(last_render);
- OUT_RING(0);
- ADVANCE_LP_RING();
-}
-
-static int i810_dma_mc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- u32 *hw_status = dev_priv->hw_status_page;
- drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
- dev_priv->sarea_priv;
- drm_i810_mc_t *mc = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (mc->idx >= dma->buf_count || mc->idx < 0)
- return -EINVAL;
-
- i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
- mc->last_render);
-
- sarea_priv->last_enqueue = dev_priv->counter - 1;
- sarea_priv->last_dispatch = (int)hw_status[5];
-
- return 0;
-}
-
-static int i810_rstatus(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
-
- return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
-}
-
-static int i810_ov0_info(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- drm_i810_overlay_t *ov = data;
-
- ov->offset = dev_priv->overlay_offset;
- ov->physical = dev_priv->overlay_physical;
-
- return 0;
-}
-
-static int i810_fstatus(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
- return I810_READ(0x30008);
-}
-
-static int i810_ov0_flip(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* Tell the overlay to update */
- I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
-
- return 0;
-}
-
-/* Not sure why this isn't set all the time:
- */
-static void i810_do_init_pageflip(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("\n");
- dev_priv->page_flipping = 1;
- dev_priv->current_page = 0;
- dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
-}
-
-static int i810_do_cleanup_pageflip(struct drm_device *dev)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("\n");
- if (dev_priv->current_page != 0)
- i810_dma_dispatch_flip(dev);
-
- dev_priv->page_flipping = 0;
- return 0;
-}
-
-static int i810_flip_bufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_i810_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (!dev_priv->page_flipping)
- i810_do_init_pageflip(dev);
-
- i810_dma_dispatch_flip(dev);
- return 0;
-}
-
-int i810_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- dev->agp = drm_legacy_agp_init(dev);
- if (dev->agp) {
- dev->agp->agp_mtrr = arch_phys_wc_add(
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024);
- }
-
- /* Our userspace depends upon the agp mapping support. */
- if (!dev->agp)
- return -EINVAL;
-
- pci_set_master(pdev);
-
- return 0;
-}
-
-void i810_driver_lastclose(struct drm_device *dev)
-{
- i810_dma_cleanup(dev);
-}
-
-void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
-{
- if (dev->dev_private) {
- drm_i810_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping)
- i810_do_cleanup_pageflip(dev);
- }
-
- if (file_priv->master && file_priv->master->lock.hw_lock) {
- drm_legacy_idlelock_take(&file_priv->master->lock);
- i810_driver_reclaim_buffers(dev, file_priv);
- drm_legacy_idlelock_release(&file_priv->master->lock);
- } else {
- /* master disappeared, clean up stuff anyway and hope nothing
- * goes wrong */
- i810_driver_reclaim_buffers(dev, file_priv);
- }
-
-}
-
-int i810_driver_dma_quiescent(struct drm_device *dev)
-{
- i810_dma_quiescent(dev);
- return 0;
-}
-
-const struct drm_ioctl_desc i810_ioctls[] = {
- DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
-};
-
-int i810_max_ioctl = ARRAY_SIZE(i810_ioctls);
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
deleted file mode 100644
index 0e53a066d4db..000000000000
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/* i810_drv.c -- I810 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#include "i810_drv.h"
-
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_pciids.h>
-#include <drm/i810_drm.h>
-
-
-static struct pci_device_id pciidlist[] = {
- i810_PCI_IDS
-};
-
-static const struct file_operations i810_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_legacy_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
-
-static struct drm_driver driver = {
- .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_LEGACY,
- .dev_priv_size = sizeof(drm_i810_buf_priv_t),
- .load = i810_driver_load,
- .lastclose = i810_driver_lastclose,
- .preclose = i810_driver_preclose,
- .dma_quiescent = i810_driver_dma_quiescent,
- .ioctls = i810_ioctls,
- .fops = &i810_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static struct pci_driver i810_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
-};
-
-static int __init i810_init(void)
-{
- if (num_possible_cpus() > 1) {
- pr_err("drm/i810 does not support SMP\n");
- return -EINVAL;
- }
- driver.num_ioctls = i810_max_ioctl;
- return drm_legacy_pci_init(&driver, &i810_pci_driver);
-}
-
-static void __exit i810_exit(void)
-{
- drm_legacy_pci_exit(&driver, &i810_pci_driver);
-}
-
-module_init(i810_init);
-module_exit(i810_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
deleted file mode 100644
index 9df3981ffc66..000000000000
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ /dev/null
@@ -1,246 +0,0 @@
-/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
- *
- */
-
-#ifndef _I810_DRV_H_
-#define _I810_DRV_H_
-
-#include <drm/drm_ioctl.h>
-#include <drm/drm_legacy.h>
-#include <drm/i810_drm.h>
-
-/* General customization:
- */
-
-#define DRIVER_AUTHOR "VA Linux Systems Inc."
-
-#define DRIVER_NAME "i810"
-#define DRIVER_DESC "Intel i810"
-#define DRIVER_DATE "20030605"
-
-/* Interface history
- *
- * 1.1 - XFree86 4.1
- * 1.2 - XvMC interfaces
- * - XFree86 4.2
- * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility)
- * - Remove requirement for interrupt (leave stubs again)
- * 1.3 - Add page flipping.
- * 1.4 - fix DRM interface
- */
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 4
-#define DRIVER_PATCHLEVEL 0
-
-typedef struct drm_i810_buf_priv {
- u32 *in_use;
- int my_use_idx;
- int currently_mapped;
- void *virtual;
- void *kernel_virtual;
- drm_local_map_t map;
-} drm_i810_buf_priv_t;
-
-typedef struct _drm_i810_ring_buffer {
- int tail_mask;
- unsigned long Start;
- unsigned long End;
- unsigned long Size;
- u8 *virtual_start;
- int head;
- int tail;
- int space;
- drm_local_map_t map;
-} drm_i810_ring_buffer_t;
-
-typedef struct drm_i810_private {
- struct drm_local_map *sarea_map;
- struct drm_local_map *mmio_map;
-
- drm_i810_sarea_t *sarea_priv;
- drm_i810_ring_buffer_t ring;
-
- void *hw_status_page;
- unsigned long counter;
-
- dma_addr_t dma_status_page;
-
- struct drm_buf *mmap_buffer;
-
- u32 front_di1, back_di1, zi1;
-
- int back_offset;
- int depth_offset;
- int overlay_offset;
- int overlay_physical;
- int w, h;
- int pitch;
- int back_pitch;
- int depth_pitch;
-
- int do_boxes;
- int dma_used;
-
- int current_page;
- int page_flipping;
-
- wait_queue_head_t irq_queue;
- atomic_t irq_received;
- atomic_t irq_emitted;
-
- int front_offset;
-} drm_i810_private_t;
-
- /* i810_dma.c */
-extern int i810_driver_dma_quiescent(struct drm_device *dev);
-void i810_driver_reclaim_buffers(struct drm_device *dev,
- struct drm_file *file_priv);
-extern int i810_driver_load(struct drm_device *, unsigned long flags);
-extern void i810_driver_lastclose(struct drm_device *dev);
-extern void i810_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
-
-extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern const struct drm_ioctl_desc i810_ioctls[];
-extern int i810_max_ioctl;
-
-#define I810_BASE(reg) ((unsigned long) \
- dev_priv->mmio_map->handle)
-#define I810_ADDR(reg) (I810_BASE(reg) + reg)
-#define I810_DEREF(reg) (*(__volatile__ int *)I810_ADDR(reg))
-#define I810_READ(reg) I810_DEREF(reg)
-#define I810_WRITE(reg, val) do { I810_DEREF(reg) = val; } while (0)
-#define I810_DEREF16(reg) (*(__volatile__ u16 *)I810_ADDR(reg))
-#define I810_READ16(reg) I810_DEREF16(reg)
-#define I810_WRITE16(reg, val) do { I810_DEREF16(reg) = val; } while (0)
-
-#define I810_VERBOSE 0
-#define RING_LOCALS unsigned int outring, ringmask; \
- volatile char *virt;
-
-#define BEGIN_LP_RING(n) do { \
- if (I810_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
- if (dev_priv->ring.space < n*4) \
- i810_wait_ring(dev, n*4); \
- dev_priv->ring.space -= n*4; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- if (I810_VERBOSE) \
- DRM_DEBUG("ADVANCE_LP_RING\n"); \
- dev_priv->ring.tail = outring; \
- I810_WRITE(LP_RING + RING_TAIL, outring); \
-} while (0)
-
-#define OUT_RING(n) do { \
- if (I810_VERBOSE) \
- DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = n; \
- outring += 4; \
- outring &= ringmask; \
-} while (0)
-
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
-#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
-#define CMD_REPORT_HEAD (7<<23)
-#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
-#define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1)
-
-#define INST_PARSER_CLIENT 0x00000000
-#define INST_OP_FLUSH 0x02000000
-#define INST_FLUSH_MAP_CACHE 0x00000001
-
-#define BB1_START_ADDR_MASK (~0x7)
-#define BB1_PROTECTED (1<<0)
-#define BB1_UNPROTECTED (0<<0)
-#define BB2_END_ADDR_MASK (~0x7)
-
-#define I810REG_HWSTAM 0x02098
-#define I810REG_INT_IDENTITY_R 0x020a4
-#define I810REG_INT_MASK_R 0x020a8
-#define I810REG_INT_ENABLE_R 0x020a0
-
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
-#define TAIL_ADDR 0x000FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x00FFFFF8
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x000FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
-
-#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
-#define SC_UPDATE_SCISSOR (0x1<<1)
-#define SC_ENABLE_MASK (0x1<<0)
-#define SC_ENABLE (0x1<<0)
-
-#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
-#define SCI_YMIN_MASK (0xffff<<16)
-#define SCI_XMIN_MASK (0xffff<<0)
-#define SCI_YMAX_MASK (0xffff<<16)
-#define SCI_XMAX_MASK (0xffff<<0)
-
-#define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
-#define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
-#define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x2)
-#define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
-#define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
-#define GFX_OP_PRIMITIVE ((0x3<<29)|(0x1f<<24))
-
-#define CMD_OP_Z_BUFFER_INFO ((0x0<<29)|(0x16<<23))
-#define CMD_OP_DESTBUFFER_INFO ((0x0<<29)|(0x15<<23))
-#define CMD_OP_FRONTBUFFER_INFO ((0x0<<29)|(0x14<<23))
-#define CMD_OP_WAIT_FOR_EVENT ((0x0<<29)|(0x03<<23))
-
-#define BR00_BITBLT_CLIENT 0x40000000
-#define BR00_OP_COLOR_BLT 0x10000000
-#define BR00_OP_SRC_COPY_BLT 0x10C00000
-#define BR13_SOLID_PATTERN 0x80000000
-
-#define WAIT_FOR_PLANE_A_SCANLINES (1<<1)
-#define WAIT_FOR_PLANE_A_FLIP (1<<2)
-#define WAIT_FOR_VBLANK (1<<3)
-
-#endif
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 3efce05d7b57..9c0990c0ec87 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -18,6 +18,8 @@ config DRM_I915
select DRM_PANEL
select DRM_MIPI_DSI
select RELAY
+ select I2C
+ select I2C_ALGOBIT
select IRQ_WORK
# i915 depends on ACPI_VIDEO when ACPI is enabled
# but for select to work, need to select ACPI_VIDEO's dependencies, ick
@@ -54,24 +56,33 @@ config DRM_I915
If "M" is selected, the module will be called i915.
config DRM_I915_FORCE_PROBE
- string "Force probe driver for selected new Intel hardware"
+ string "Force probe i915 for selected Intel hardware IDs"
depends on DRM_I915
help
This is the default value for the i915.force_probe module
parameter. Using the module parameter overrides this option.
- Force probe the driver for new Intel graphics devices that are
+ Force probe the i915 for Intel graphics devices that are
recognized but not properly supported by this kernel version. It is
recommended to upgrade to a kernel version with proper support as soon
as it is available.
+ It can also be used to block the probe of recognized and fully
+ supported devices.
+
Use "" to disable force probe. If in doubt, use this.
- Use "<pci-id>[,<pci-id>,...]" to force probe the driver for listed
+ Use "<pci-id>[,<pci-id>,...]" to force probe the i915 for listed
devices. For example, "4500" or "4500,4571".
Use "*" to force probe the driver for all known devices.
+ Use "!" right before the ID to block the probe of the device. For
+ example, "4500,!4571" forces the probe of 4500 and blocks the probe of
+ 4571.
+
+ Use "!*" to block the probe of the driver for all known devices.
+
config DRM_I915_CAPTURE_ERROR
bool "Enable capturing GPU state following a hang"
depends on DRM_I915
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 01974b82d205..918470a04591 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -48,9 +48,7 @@ i915-y += i915_driver.o \
i915_sysfs.o \
i915_utils.o \
intel_device_info.o \
- intel_dram.o \
intel_memory_region.o \
- intel_pch.o \
intel_pcode.o \
intel_pm.o \
intel_region_ttm.o \
@@ -62,6 +60,12 @@ i915-y += i915_driver.o \
vlv_sideband.o \
vlv_suspend.o
+# core peripheral code
+i915-y += \
+ soc/intel_dram.o \
+ soc/intel_gmch.o \
+ soc/intel_pch.o
+
# core library code
i915-y += \
i915_memcpy.o \
@@ -188,9 +192,9 @@ i915-y += \
i915_vma_resource.o
# general-purpose microcontroller (GuC) support
-i915-y += gt/uc/intel_uc.o \
- gt/uc/intel_uc_debugfs.o \
- gt/uc/intel_uc_fw.o \
+i915-y += \
+ gt/uc/intel_gsc_fw.o \
+ gt/uc/intel_gsc_uc.o \
gt/uc/intel_guc.o \
gt/uc/intel_guc_ads.o \
gt/uc/intel_guc_capture.o \
@@ -205,7 +209,10 @@ i915-y += gt/uc/intel_uc.o \
gt/uc/intel_guc_submission.o \
gt/uc/intel_huc.o \
gt/uc/intel_huc_debugfs.o \
- gt/uc/intel_huc_fw.o
+ gt/uc/intel_huc_fw.o \
+ gt/uc/intel_uc.o \
+ gt/uc/intel_uc_debugfs.o \
+ gt/uc/intel_uc_fw.o
# graphics system controller (GSC) support
i915-y += gt/intel_gsc.o
@@ -260,6 +267,7 @@ i915-y += \
display/intel_quirks.o \
display/intel_sprite.o \
display/intel_tc.o \
+ display/intel_vblank.o \
display/intel_vga.o \
display/i9xx_plane.o \
display/skl_scaler.o \
diff --git a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
index 54f58ba44b9f..6d948520e9a6 100644
--- a/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/display/dvo_ch7xxx.c
@@ -50,15 +50,26 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define CH7xxx_INPUT_CLOCK 0x1d
#define CH7xxx_GPIO 0x1e
#define CH7xxx_GPIO_HPIR (1<<3)
-#define CH7xxx_IDF 0x1f
+#define CH7xxx_IDF 0x1f
+#define CH7xxx_IDF_IBS (1<<7)
+#define CH7xxx_IDF_DES (1<<6)
#define CH7xxx_IDF_HSP (1<<3)
#define CH7xxx_IDF_VSP (1<<4)
#define CH7xxx_CONNECTION_DETECT 0x20
#define CH7xxx_CDET_DVI (1<<5)
-#define CH7301_DAC_CNTL 0x21
+#define CH7xxx_DAC_CNTL 0x21
+#define CH7xxx_SYNCO_MASK (3 << 3)
+#define CH7xxx_SYNCO_VGA_HSYNC (1 << 3)
+
+#define CH7xxx_CLOCK_OUTPUT 0x22
+#define CH7xxx_BCOEN (1 << 4)
+#define CH7xxx_BCOP (1 << 3)
+#define CH7xxx_BCO_MASK (7 << 0)
+#define CH7xxx_BCO_VGA_VSYNC (6 << 0)
+
#define CH7301_HOTPLUG 0x23
#define CH7xxx_TCTL 0x31
#define CH7xxx_TVCO 0x32
@@ -301,6 +312,8 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
ch7xxx_readb(dvo, CH7xxx_IDF, &idf);
+ idf |= CH7xxx_IDF_IBS;
+
idf &= ~(CH7xxx_IDF_HSP | CH7xxx_IDF_VSP);
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
idf |= CH7xxx_IDF_HSP;
@@ -309,6 +322,11 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
idf |= CH7xxx_IDF_VSP;
ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
+
+ ch7xxx_writeb(dvo, CH7xxx_DAC_CNTL,
+ CH7xxx_SYNCO_VGA_HSYNC);
+ ch7xxx_writeb(dvo, CH7xxx_CLOCK_OUTPUT,
+ CH7xxx_BCOEN | CH7xxx_BCO_VGA_VSYNC);
}
/* set the CH7xxx power state */
diff --git a/drivers/gpu/drm/i915/display/dvo_sil164.c b/drivers/gpu/drm/i915/display/dvo_sil164.c
index 0dfa0a0209ff..4acc8ce29c0b 100644
--- a/drivers/gpu/drm/i915/display/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/display/dvo_sil164.c
@@ -58,6 +58,10 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#define SIL164_9_MDI (1<<0)
#define SIL164_REGC 0x0c
+#define SIL164_C_SCNT (1<<7)
+#define SIL164_C_PLLF_MASK (0xf<<1)
+#define SIL164_C_PLLF_REC (4<<1)
+#define SIL164_C_PFEN (1<<0)
struct sil164_priv {
//I2CDevRec d;
@@ -205,7 +209,13 @@ static void sil164_mode_set(struct intel_dvo_device *dvo,
sil164_writeb(sil, 0x0c, 0x89);
sil164_writeb(sil, 0x08, 0x31);*/
/* don't do much */
- return;
+
+ sil164_writeb(dvo, SIL164_REG8,
+ SIL164_8_VEN | SIL164_8_HEN);
+ sil164_writeb(dvo, SIL164_REG9,
+ SIL164_9_TSEL);
+ sil164_writeb(dvo, SIL164_REGC,
+ SIL164_C_PLLF_REC | SIL164_C_PFEN);
}
/* set the SIL164 power state */
@@ -224,7 +234,6 @@ static void sil164_dpms(struct intel_dvo_device *dvo, bool enable)
ch &= ~SIL164_8_PD;
sil164_writeb(dvo, SIL164_REG8, ch);
- return;
}
static bool sil164_get_hw_state(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 24ef36ec2d3d..fa754038d669 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -398,6 +398,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
if (intel_dp_is_edp(intel_dp))
intel_edp_fixup_vbt_bpp(encoder, pipe_config->pipe_bpp);
+
+ intel_audio_codec_get_config(encoder, pipe_config);
}
static void
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index c3580d96765c..64c3b3990702 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -155,6 +155,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
intel_read_infoframe(encoder, pipe_config,
HDMI_INFOFRAME_TYPE_VENDOR,
&pipe_config->infoframes.hdmi);
+
+ intel_audio_codec_get_config(encoder, pipe_config);
}
static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index d16b30a2dded..468a792e6a40 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -2043,7 +2043,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
/* attach connector to encoder */
intel_connector_attach_encoder(intel_connector, encoder);
- intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
+ encoder->devdata = intel_bios_encoder_data_lookup(dev_priv, port);
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata, NULL);
mutex_lock(&dev_priv->drm.mode_config.mutex);
intel_panel_add_vbt_lfp_fixed_mode(intel_connector);
@@ -2054,7 +2055,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, NULL);
intel_backlight_setup(intel_connector, INVALID_PIPE);
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 6621aa245caf..a9a3f3715279 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -41,6 +41,7 @@
#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
+#include "intel_fb.h"
#include "skl_universal_plane.h"
/**
@@ -310,11 +311,11 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
kfree(crtc_state);
}
-static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
- int num_scalers_need, struct intel_crtc *intel_crtc,
- const char *name, int idx,
- struct intel_plane_state *plane_state,
- int *scaler_id)
+static int intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_state,
+ int num_scalers_need, struct intel_crtc *intel_crtc,
+ const char *name, int idx,
+ struct intel_plane_state *plane_state,
+ int *scaler_id)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
int j;
@@ -334,7 +335,7 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
"Cannot find scaler for %s:%d\n", name, idx))
- return;
+ return -EINVAL;
/* set scaler mode */
if (plane_state && plane_state->hw.fb &&
@@ -375,9 +376,71 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
mode = SKL_PS_SCALER_MODE_DYN;
}
+ /*
+ * FIXME: we should also check the scaler factors for pfit, so
+ * this shouldn't be tied directly to planes.
+ */
+ if (plane_state && plane_state->hw.fb) {
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ const struct drm_rect *src = &plane_state->uapi.src;
+ const struct drm_rect *dst = &plane_state->uapi.dst;
+ int hscale, vscale, max_vscale, max_hscale;
+
+ /*
+ * FIXME: When two scalers are needed, but only one of
+ * them needs to downscale, we should make sure that
+ * the one that needs downscaling support is assigned
+ * as the first scaler, so we don't reject downscaling
+ * unnecessarily.
+ */
+
+ if (DISPLAY_VER(dev_priv) >= 14) {
+ /*
+ * On versions 14 and up, only the first
+ * scaler supports a vertical scaling factor
+ * of more than 1.0, while a horizontal
+ * scaling factor of 3.0 is supported.
+ */
+ max_hscale = 0x30000 - 1;
+ if (*scaler_id == 0)
+ max_vscale = 0x30000 - 1;
+ else
+ max_vscale = 0x10000;
+
+ } else if (DISPLAY_VER(dev_priv) >= 10 ||
+ !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) {
+ max_hscale = 0x30000 - 1;
+ max_vscale = 0x30000 - 1;
+ } else {
+ max_hscale = 0x20000 - 1;
+ max_vscale = 0x20000 - 1;
+ }
+
+ /*
+ * FIXME: We should change the if-else block above to
+ * support HQ vs dynamic scaler properly.
+ */
+
+ /* Check if required scaling is within limits */
+ hscale = drm_rect_calc_hscale(src, dst, 1, max_hscale);
+ vscale = drm_rect_calc_vscale(src, dst, 1, max_vscale);
+
+ if (hscale < 0 || vscale < 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Scaler %d doesn't support required plane scaling\n",
+ *scaler_id);
+ drm_rect_debug_print("src: ", src, true);
+ drm_rect_debug_print("dst: ", dst, false);
+
+ return -EINVAL;
+ }
+ }
+
drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
intel_crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
+
+ return 0;
}
/**
@@ -437,7 +500,7 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
int *scaler_id;
const char *name;
- int idx;
+ int idx, ret;
/* skip if scaler not required */
if (!(scaler_state->scaler_users & (1 << i)))
@@ -494,9 +557,11 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
scaler_id = &plane_state->scaler_id;
}
- intel_atomic_setup_scaler(scaler_state, num_scalers_need,
- intel_crtc, name, idx,
- plane_state, scaler_id);
+ ret = intel_atomic_setup_scaler(scaler_state, num_scalers_need,
+ intel_crtc, name, idx,
+ plane_state, scaler_id);
+ if (ret < 0)
+ return ret;
}
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 10e1fc9d0698..1409bcfb6fd3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -36,6 +36,7 @@
#include "gt/intel_rps.h"
+#include "i915_config.h"
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
#include "intel_display_trace.h"
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 98c3322b4549..a9335c856644 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -71,6 +71,8 @@ struct intel_audio_funcs {
void (*audio_codec_disable)(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
+ void (*audio_codec_get_config)(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
};
/* DP N/M table */
@@ -314,6 +316,27 @@ static int g4x_eld_buffer_size(struct drm_i915_private *i915)
return REG_FIELD_GET(G4X_ELD_BUFFER_SIZE_MASK, tmp);
}
+static void g4x_audio_codec_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u32 *eld = (u32 *)crtc_state->eld;
+ int eld_buffer_size, len, i;
+ u32 tmp;
+
+ tmp = intel_de_read(i915, G4X_AUD_CNTL_ST);
+ if ((tmp & G4X_ELD_VALID) == 0)
+ return;
+
+ intel_de_rmw(i915, G4X_AUD_CNTL_ST, G4X_ELD_ADDRESS_MASK, 0);
+
+ eld_buffer_size = g4x_eld_buffer_size(i915);
+ len = min_t(int, sizeof(crtc_state->eld) / 4, eld_buffer_size);
+
+ for (i = 0; i < len; i++)
+ eld[i] = intel_de_read(i915, G4X_HDMIW_HDMIEDID);
+}
+
static void g4x_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -335,8 +358,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
- const u32 *eld = (const u32 *)connector->eld;
+ const u32 *eld = (const u32 *)crtc_state->eld;
int eld_buffer_size, len, i;
intel_crtc_wait_for_next_vblank(crtc);
@@ -345,7 +367,7 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
G4X_ELD_VALID | G4X_ELD_ADDRESS_MASK, 0);
eld_buffer_size = g4x_eld_buffer_size(i915);
- len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
+ len = min(drm_eld_size(crtc_state->eld) / 4, eld_buffer_size);
for (i = 0; i < len; i++)
intel_de_write(i915, G4X_HDMIW_HDMIEDID, eld[i]);
@@ -459,17 +481,6 @@ hsw_audio_config_update(struct intel_encoder *encoder,
hsw_hdmi_audio_config_update(encoder, crtc_state);
}
-/* ELD buffer size in dwords */
-static int hsw_eld_buffer_size(struct drm_i915_private *i915,
- enum transcoder cpu_transcoder)
-{
- u32 tmp;
-
- tmp = intel_de_read(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
-
- return REG_FIELD_GET(IBX_ELD_BUFFER_SIZE_MASK, tmp);
-}
-
static void hsw_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
@@ -618,10 +629,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- const u32 *eld = (const u32 *)connector->eld;
- int eld_buffer_size, len, i;
mutex_lock(&i915->display.audio.mutex);
@@ -639,25 +647,10 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_ELD_VALID(cpu_transcoder), 0);
- /* Reset ELD address */
- intel_de_rmw(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder),
- IBX_ELD_ADDRESS_MASK, 0);
-
- eld_buffer_size = hsw_eld_buffer_size(i915, cpu_transcoder);
- len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
-
- for (i = 0; i < len; i++)
- intel_de_write(i915, HSW_AUD_EDID_DATA(cpu_transcoder), eld[i]);
- for (; i < eld_buffer_size; i++)
- intel_de_write(i915, HSW_AUD_EDID_DATA(cpu_transcoder), 0);
-
- drm_WARN_ON(&i915->drm,
- (intel_de_read(i915, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder)) &
- IBX_ELD_ADDRESS_MASK) != 0);
-
- /* ELD valid */
- intel_de_rmw(i915, HSW_AUD_PIN_ELD_CP_VLD,
- 0, AUDIO_ELD_VALID(cpu_transcoder));
+ /*
+ * The audio componenent is used to convey the ELD
+ * instead using of the hardware ELD buffer.
+ */
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
@@ -665,47 +658,33 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
mutex_unlock(&i915->display.audio.mutex);
}
-struct ilk_audio_regs {
+struct ibx_audio_regs {
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
};
-static void ilk_audio_regs_init(struct drm_i915_private *i915,
+static void ibx_audio_regs_init(struct drm_i915_private *i915,
enum pipe pipe,
- struct ilk_audio_regs *regs)
+ struct ibx_audio_regs *regs)
{
- if (HAS_PCH_IBX(i915)) {
- regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
- regs->aud_config = IBX_AUD_CFG(pipe);
- regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
- regs->aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
- } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
regs->hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
regs->aud_config = VLV_AUD_CFG(pipe);
regs->aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
- } else {
+ } else if (HAS_PCH_CPT(i915)) {
regs->hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
regs->aud_config = CPT_AUD_CFG(pipe);
regs->aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
regs->aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
+ } else if (HAS_PCH_IBX(i915)) {
+ regs->hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
+ regs->aud_config = IBX_AUD_CFG(pipe);
+ regs->aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
+ regs->aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
}
}
-/* ELD buffer size in dwords */
-static int ilk_eld_buffer_size(struct drm_i915_private *i915,
- enum pipe pipe)
-{
- struct ilk_audio_regs regs;
- u32 tmp;
-
- ilk_audio_regs_init(i915, pipe, &regs);
-
- tmp = intel_de_read(i915, regs.aud_cntl_st);
-
- return REG_FIELD_GET(IBX_ELD_BUFFER_SIZE_MASK, tmp);
-}
-
-static void ilk_audio_codec_disable(struct intel_encoder *encoder,
+static void ibx_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
@@ -713,12 +692,12 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
- struct ilk_audio_regs regs;
+ struct ibx_audio_regs regs;
if (drm_WARN_ON(&i915->drm, port == PORT_A))
return;
- ilk_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(i915, pipe, &regs);
mutex_lock(&i915->display.audio.mutex);
@@ -741,25 +720,22 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
intel_crtc_wait_for_next_vblank(crtc);
}
-static void ilk_audio_codec_enable(struct intel_encoder *encoder,
+static void ibx_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
- const u32 *eld = (const u32 *)connector->eld;
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
- int eld_buffer_size, len, i;
- struct ilk_audio_regs regs;
+ struct ibx_audio_regs regs;
if (drm_WARN_ON(&i915->drm, port == PORT_A))
return;
intel_crtc_wait_for_next_vblank(crtc);
- ilk_audio_regs_init(i915, pipe, &regs);
+ ibx_audio_regs_init(i915, pipe, &regs);
mutex_lock(&i915->display.audio.mutex);
@@ -767,24 +743,10 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
intel_de_rmw(i915, regs.aud_cntrl_st2,
IBX_ELD_VALID(port), 0);
- /* Reset ELD address */
- intel_de_rmw(i915, regs.aud_cntl_st,
- IBX_ELD_ADDRESS_MASK, 0);
-
- eld_buffer_size = ilk_eld_buffer_size(i915, pipe);
- len = min(drm_eld_size(connector->eld) / 4, eld_buffer_size);
-
- for (i = 0; i < len; i++)
- intel_de_write(i915, regs.hdmiw_hdmiedid, eld[i]);
- for (; i < eld_buffer_size; i++)
- intel_de_write(i915, regs.hdmiw_hdmiedid, 0);
-
- drm_WARN_ON(&i915->drm,
- (intel_de_read(i915, regs.aud_cntl_st) & IBX_ELD_ADDRESS_MASK) != 0);
-
- /* ELD valid */
- intel_de_rmw(i915, regs.aud_cntrl_st2,
- 0, IBX_ELD_VALID(port));
+ /*
+ * The audio componenent is used to convey the ELD
+ * instead using of the hardware ELD buffer.
+ */
/* Enable timestamps */
intel_de_rmw(i915, regs.aud_config,
@@ -798,6 +760,41 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
mutex_unlock(&i915->display.audio.mutex);
}
+void intel_audio_sdp_split_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum transcoder trans = crtc_state->cpu_transcoder;
+
+ if (HAS_DP20(i915))
+ intel_de_rmw(i915, AUD_DP_2DOT0_CTRL(trans), AUD_ENABLE_SDP_SPLIT,
+ crtc_state->sdp_split_enable ? AUD_ENABLE_SDP_SPLIT : 0);
+}
+
+bool intel_audio_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct drm_connector *connector = conn_state->connector;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!connector->eld[0]) {
+ drm_dbg_kms(&i915->drm,
+ "Bogus ELD on [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ return false;
+ }
+
+ BUILD_BUG_ON(sizeof(crtc_state->eld) != sizeof(connector->eld));
+ memcpy(crtc_state->eld, connector->eld, sizeof(crtc_state->eld));
+
+ crtc_state->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+
+ return true;
+}
+
/**
* intel_audio_codec_enable - Enable the audio codec for HD audio
* @encoder: encoder on which to enable audio
@@ -814,27 +811,19 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_connector *connector = conn_state->connector;
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_audio_state *audio_state;
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
if (!crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on pipe %c, %u bytes ELD\n",
- connector->base.id, connector->name,
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on [CRTC:%d:%s], %u bytes ELD\n",
+ connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name,
- pipe_name(pipe), drm_eld_size(connector->eld));
-
- /* FIXME precompute the ELD in .compute_config() */
- if (!connector->eld[0])
- drm_dbg_kms(&i915->drm,
- "Bogus ELD on [CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
-
- connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+ crtc->base.base.id, crtc->base.name,
+ drm_eld_size(crtc_state->eld));
if (i915->display.funcs.audio)
i915->display.funcs.audio->audio_codec_enable(encoder,
@@ -842,10 +831,13 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
conn_state);
mutex_lock(&i915->display.audio.mutex);
- encoder->audio_connector = connector;
- /* referred in audio callbacks */
- i915->display.audio.encoder_map[pipe] = encoder;
+ audio_state = &i915->display.audio.state[pipe];
+
+ audio_state->encoder = encoder;
+ BUILD_BUG_ON(sizeof(audio_state->eld) != sizeof(crtc_state->eld));
+ memcpy(audio_state->eld, crtc_state->eld, sizeof(audio_state->eld));
+
mutex_unlock(&i915->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
@@ -857,7 +849,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
(int)port, (int)pipe);
}
- intel_lpe_audio_notify(i915, pipe, port, connector->eld,
+ intel_lpe_audio_notify(i915, pipe, port, crtc_state->eld,
crtc_state->port_clock,
intel_crtc_has_dp_encoder(crtc_state));
}
@@ -878,16 +870,18 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct i915_audio_component *acomp = i915->display.audio.component;
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
- struct drm_connector *connector = old_conn_state->connector;
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct intel_audio_state *audio_state;
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
if (!old_crtc_state->has_audio)
return;
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on pipe %c\n",
- connector->base.id, connector->name,
- encoder->base.base.id, encoder->base.name, pipe_name(pipe));
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on [CRTC:%d:%s]\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ crtc->base.base.id, crtc->base.name);
if (i915->display.funcs.audio)
i915->display.funcs.audio->audio_codec_disable(encoder,
@@ -895,8 +889,12 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
old_conn_state);
mutex_lock(&i915->display.audio.mutex);
- encoder->audio_connector = NULL;
- i915->display.audio.encoder_map[pipe] = NULL;
+
+ audio_state = &i915->display.audio.state[pipe];
+
+ audio_state->encoder = NULL;
+ memset(audio_state->eld, 0, sizeof(audio_state->eld));
+
mutex_unlock(&i915->display.audio.mutex);
if (acomp && acomp->base.audio_ops &&
@@ -911,19 +909,52 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
intel_lpe_audio_notify(i915, pipe, port, NULL, 0, false);
}
+static void intel_acomp_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_audio_state *audio_state;
+ enum pipe pipe = crtc->pipe;
+
+ mutex_lock(&i915->display.audio.mutex);
+
+ audio_state = &i915->display.audio.state[pipe];
+
+ if (audio_state->encoder)
+ memcpy(crtc_state->eld, audio_state->eld, sizeof(audio_state->eld));
+
+ mutex_unlock(&i915->display.audio.mutex);
+}
+
+void intel_audio_codec_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ if (!crtc_state->has_audio)
+ return;
+
+ if (i915->display.funcs.audio)
+ i915->display.funcs.audio->audio_codec_get_config(encoder, crtc_state);
+}
+
static const struct intel_audio_funcs g4x_audio_funcs = {
.audio_codec_enable = g4x_audio_codec_enable,
.audio_codec_disable = g4x_audio_codec_disable,
+ .audio_codec_get_config = g4x_audio_codec_get_config,
};
-static const struct intel_audio_funcs ilk_audio_funcs = {
- .audio_codec_enable = ilk_audio_codec_enable,
- .audio_codec_disable = ilk_audio_codec_disable,
+static const struct intel_audio_funcs ibx_audio_funcs = {
+ .audio_codec_enable = ibx_audio_codec_enable,
+ .audio_codec_disable = ibx_audio_codec_disable,
+ .audio_codec_get_config = intel_acomp_get_config,
};
static const struct intel_audio_funcs hsw_audio_funcs = {
.audio_codec_enable = hsw_audio_codec_enable,
.audio_codec_disable = hsw_audio_codec_disable,
+ .audio_codec_get_config = intel_acomp_get_config,
};
/**
@@ -934,12 +965,11 @@ void intel_audio_hooks_init(struct drm_i915_private *i915)
{
if (IS_G4X(i915))
i915->display.funcs.audio = &g4x_audio_funcs;
- else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display.funcs.audio = &ilk_audio_funcs;
+ else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915) ||
+ HAS_PCH_CPT(i915) || HAS_PCH_IBX(i915))
+ i915->display.funcs.audio = &ibx_audio_funcs;
else if (IS_HASWELL(i915) || DISPLAY_VER(i915) >= 8)
i915->display.funcs.audio = &hsw_audio_funcs;
- else if (HAS_PCH_SPLIT(i915))
- i915->display.funcs.audio = &ilk_audio_funcs;
}
struct aud_ts_cdclk_m_n {
@@ -1117,35 +1147,32 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
}
/*
- * get the intel_encoder according to the parameter port and pipe
- * intel_encoder is saved by the index of pipe
- * MST & (pipe >= 0): return the audio.encoder_map[pipe],
+ * get the intel audio state according to the parameter port and pipe
+ * MST & (pipe >= 0): return the audio.state[pipe].encoder],
* when port is matched
* MST & (pipe < 0): this is invalid
* Non-MST & (pipe >= 0): only pipe = 0 (the first device entry)
* will get the right intel_encoder with port matched
* Non-MST & (pipe < 0): get the right intel_encoder with port matched
*/
-static struct intel_encoder *get_saved_enc(struct drm_i915_private *i915,
- int port, int pipe)
+static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
+ int port, int pipe)
{
/* MST */
if (pipe >= 0) {
+ struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
if (drm_WARN_ON(&i915->drm,
- pipe >= ARRAY_SIZE(i915->display.audio.encoder_map)))
+ pipe >= ARRAY_SIZE(i915->display.audio.state)))
return NULL;
- encoder = i915->display.audio.encoder_map[pipe];
- /*
- * when bootup, audio driver may not know it is
- * MST or not. So it will poll all the port & pipe
- * combinations
- */
+ audio_state = &i915->display.audio.state[pipe];
+ encoder = audio_state->encoder;
+
if (encoder && encoder->port == port &&
encoder->type == INTEL_OUTPUT_DP_MST)
- return encoder;
+ return audio_state;
}
/* Non-MST */
@@ -1153,13 +1180,15 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *i915,
return NULL;
for_each_pipe(i915, pipe) {
+ struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
- encoder = i915->display.audio.encoder_map[pipe];
+ audio_state = &i915->display.audio.state[pipe];
+ encoder = audio_state->encoder;
if (encoder && encoder->port == port &&
encoder->type != INTEL_OUTPUT_DP_MST)
- return encoder;
+ return audio_state;
}
return NULL;
@@ -1170,6 +1199,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
struct i915_audio_component *acomp = i915->display.audio.component;
+ const struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
unsigned long cookie;
@@ -1181,20 +1211,22 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
cookie = i915_audio_component_get_power(kdev);
mutex_lock(&i915->display.audio.mutex);
- /* 1. get the pipe */
- encoder = get_saved_enc(i915, port, pipe);
- if (!encoder || !encoder->base.crtc) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n",
- port_name(port));
+ audio_state = find_audio_state(i915, port, pipe);
+ if (!audio_state) {
+ drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
err = -ENODEV;
goto unlock;
}
+ encoder = audio_state->encoder;
+
+ /* FIXME stop using the legacy crtc pointer */
crtc = to_intel_crtc(encoder->base.crtc);
/* port must be valid now, otherwise the pipe will be invalid */
acomp->aud_sample_rate[port] = rate;
+ /* FIXME get rid of the crtc->config stuff */
hsw_audio_config_update(encoder, crtc->config);
unlock:
@@ -1208,24 +1240,22 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
unsigned char *buf, int max_bytes)
{
struct drm_i915_private *i915 = kdev_to_i915(kdev);
- struct intel_encoder *intel_encoder;
- const u8 *eld;
- int ret = -EINVAL;
+ const struct intel_audio_state *audio_state;
+ int ret = 0;
mutex_lock(&i915->display.audio.mutex);
- intel_encoder = get_saved_enc(i915, port, pipe);
- if (!intel_encoder) {
- drm_dbg_kms(&i915->drm, "Not valid for port %c\n",
- port_name(port));
+ audio_state = find_audio_state(i915, port, pipe);
+ if (!audio_state) {
+ drm_dbg_kms(&i915->drm, "Not valid for port %c\n", port_name(port));
mutex_unlock(&i915->display.audio.mutex);
- return ret;
+ return -EINVAL;
}
- ret = 0;
- *enabled = intel_encoder->audio_connector != NULL;
+ *enabled = audio_state->encoder != NULL;
if (*enabled) {
- eld = intel_encoder->audio_connector->eld;
+ const u8 *eld = audio_state->eld;
+
ret = drm_eld_size(eld);
memcpy(buf, eld, min(max_bytes, ret));
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h
index 63b22131dc45..07d034a981e9 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.h
+++ b/drivers/gpu/drm/i915/display/intel_audio.h
@@ -6,21 +6,30 @@
#ifndef __INTEL_AUDIO_H__
#define __INTEL_AUDIO_H__
+#include <linux/types.h>
+
struct drm_connector_state;
struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
void intel_audio_hooks_init(struct drm_i915_private *dev_priv);
+bool intel_audio_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
void intel_audio_codec_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
void intel_audio_codec_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state);
+void intel_audio_codec_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
+void intel_audio_sdp_split_update(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 71af88a70461..a4e4b7f79e4d 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -83,16 +83,16 @@ static u32 scale_hw_to_user(struct intel_connector *connector,
u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
- if (dev_priv->params.invert_brightness < 0)
+ if (i915->params.invert_brightness < 0)
return val;
- if (dev_priv->params.invert_brightness > 0 ||
- intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)) {
+ if (i915->params.invert_brightness > 0 ||
+ intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) {
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
@@ -111,10 +111,10 @@ void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state,
u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON_ONCE(&dev_priv->drm,
+ drm_WARN_ON_ONCE(&i915->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
val = scale(val, panel->backlight.min, panel->backlight.max,
@@ -125,14 +125,14 @@ u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- drm_WARN_ON_ONCE(&dev_priv->drm,
+ drm_WARN_ON_ONCE(&i915->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
- if (dev_priv->params.invert_brightness > 0 ||
- (dev_priv->params.invert_brightness == 0 && intel_has_quirk(dev_priv, QUIRK_INVERT_BRIGHTNESS)))
+ if (i915->params.invert_brightness > 0 ||
+ (i915->params.invert_brightness == 0 && intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
@@ -141,32 +141,32 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(i915, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(i915, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 val;
- val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (DISPLAY_VER(dev_priv) < 4)
+ val = intel_de_read(i915, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (DISPLAY_VER(i915) < 4)
val >>= 1;
if (panel->backlight.combination_mode) {
u8 lbpc;
- pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc);
+ pci_read_config_byte(to_pci_dev(i915->drm.dev), LBPC, &lbpc);
val *= lbpc;
}
@@ -175,21 +175,20 @@ static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unuse
static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&i915->drm, pipe != PIPE_A && pipe != PIPE_B))
return 0;
- return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(i915, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- return intel_de_read(dev_priv,
- BXT_BLC_PWM_DUTY(panel->backlight.controller));
+ return intel_de_read(i915, BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
@@ -204,69 +203,69 @@ static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe un
static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ u32 val;
- u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level);
+ val = intel_de_read(i915, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(i915, BLC_PWM_PCH_CTL2, val | level);
}
static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
u32 tmp;
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level);
+ tmp = intel_de_read(i915, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(i915, BLC_PWM_CPU_CTL, tmp | level);
}
static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
if (panel->backlight.combination_mode) {
u8 lbpc;
lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
level /= lbpc;
- pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc);
+ pci_write_config_byte(to_pci_dev(i915->drm.dev), LBPC, lbpc);
}
- if (DISPLAY_VER(dev_priv) == 4) {
+ if (DISPLAY_VER(i915) == 4) {
mask = BACKLIGHT_DUTY_CYCLE_MASK;
} else {
level <<= 1;
mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
}
- tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask;
- intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level);
+ tmp = intel_de_read(i915, BLC_PWM_CTL) & ~mask;
+ intel_de_write(i915, BLC_PWM_CTL, tmp | level);
}
static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 tmp;
- tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level);
+ tmp = intel_de_read(i915, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(i915, VLV_BLC_PWM_CTL(pipe), tmp | level);
}
static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- intel_de_write(dev_priv,
- BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+ intel_de_write(i915, BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -296,7 +295,7 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
u32 user_level, u32 user_max)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 hw_level;
@@ -309,9 +308,9 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (!panel->backlight.present || !conn_state->crtc)
return;
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&i915->drm, panel->backlight.max == 0);
hw_level = clamp_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -325,13 +324,13 @@ void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
}
static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, level);
@@ -344,31 +343,29 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
* This needs rework if we need to add support for CPU PWM on PCH split
* platforms.
*/
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (tmp & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm,
- "cpu backlight was enabled, disabling\n");
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
- tmp & ~BLM_PWM_ENABLE);
+ drm_dbg_kms(&i915->drm, "cpu backlight was enabled, disabling\n");
+ intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
}
- tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(i915, BLC_PWM_CPU_CTL2);
+ intel_de_write(i915, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
- tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ tmp = intel_de_read(i915, BLC_PWM_PCH_CTL1);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
@@ -378,62 +375,59 @@ static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_st
static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
- struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
+ struct drm_i915_private *i915 = to_i915(old_conn_state->connector->dev);
u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(i915, BLC_PWM_CTL2);
+ intel_de_write(i915, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
}
static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
- tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
}
static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
tmp & ~BXT_BLC_PWM_ENABLE);
if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ val = intel_de_read(i915, UTIL_PIN_CTL);
val &= ~UTIL_PIN_ENABLE;
- intel_de_write(dev_priv, UTIL_PIN_CTL, val);
+ intel_de_write(i915, UTIL_PIN_CTL, val);
}
}
static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 tmp;
intel_backlight_set_pwm_level(old_conn_state, val);
- tmp = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
tmp & ~BXT_BLC_PWM_ENABLE);
}
@@ -451,7 +445,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn
void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
{
struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present)
@@ -463,68 +457,66 @@ void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
* backlight. This will leave the backlight on unnecessarily when
* another client is not activated.
*/
- if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg_kms(&dev_priv->drm,
- "Skipping backlight disable on vga switch\n");
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ drm_dbg_kms(&i915->drm, "Skipping backlight disable on vga switch\n");
return;
}
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
if (panel->backlight.device)
panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
panel->backlight.enabled = false;
panel->backlight.funcs->disable(old_conn_state, 0);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
}
static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, schicken;
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "pch backlight already enabled\n");
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
}
- if (HAS_PCH_LPT(dev_priv)) {
- schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ if (HAS_PCH_LPT(i915)) {
+ schicken = intel_de_read(i915, SOUTH_CHICKEN2);
if (panel->backlight.alternate_pwm_increment)
schicken |= LPT_PWM_GRANULARITY;
else
schicken &= ~LPT_PWM_GRANULARITY;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken);
+ intel_de_write(i915, SOUTH_CHICKEN2, schicken);
} else {
- schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1);
+ schicken = intel_de_read(i915, SOUTH_CHICKEN1);
if (panel->backlight.alternate_pwm_increment)
schicken |= SPT_PWM_GRANULARITY;
else
schicken &= ~SPT_PWM_GRANULARITY;
- intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
+ intel_de_write(i915, SOUTH_CHICKEN1, schicken);
}
pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
/* After LPT, override is the default. */
- if (HAS_PCH_LPT(dev_priv))
+ if (HAS_PCH_LPT(i915))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
- pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(i915, BLC_PWM_PCH_CTL1);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_backlight_set_pwm_level(conn_state, level);
@@ -534,61 +526,60 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "cpu backlight already enabled\n");
cpu_ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2);
}
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "pch backlight already enabled\n");
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
}
if (cpu_transcoder == TRANSCODER_EDP)
cpu_ctl2 = BLM_TRANSCODER_EDP;
else
cpu_ctl2 = BLM_PIPE(cpu_transcoder);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
- intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_posting_read(i915, BLC_PWM_CPU_CTL2);
+ intel_de_write(i915, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_backlight_set_pwm_level(conn_state, level);
pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(i915, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
- pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(i915, BLC_PWM_PCH_CTL1);
+ intel_de_write(i915, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
}
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, freq;
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ ctl = intel_de_read(i915, BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- intel_de_write(dev_priv, BLC_PWM_CTL, 0);
+ drm_dbg_kms(&i915->drm, "backlight already enabled\n");
+ intel_de_write(i915, BLC_PWM_CTL, 0);
}
freq = panel->backlight.pwm_level_max;
@@ -598,11 +589,11 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl = freq << 17;
if (panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE;
- if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
+ if (IS_PINEVIEW(i915) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
- intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
- intel_de_posting_read(dev_priv, BLC_PWM_CTL);
+ intel_de_write(i915, BLC_PWM_CTL, ctl);
+ intel_de_posting_read(i915, BLC_PWM_CTL);
/* XXX: combine this into above write? */
intel_backlight_set_pwm_level(conn_state, level);
@@ -612,24 +603,24 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
* 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
* that has backlight.
*/
- if (DISPLAY_VER(dev_priv) == 2)
- intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+ if (DISPLAY_VER(i915) == 2)
+ intel_de_write(i915, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 ctl, ctl2, freq;
- ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ ctl2 = intel_de_read(i915, BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+ intel_de_write(i915, BLC_PWM_CTL2, ctl2);
}
freq = panel->backlight.pwm_level_max;
@@ -637,16 +628,16 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
freq /= 0xff;
ctl = freq << 16;
- intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+ intel_de_write(i915, BLC_PWM_CTL, ctl);
ctl2 = BLM_PIPE(pipe);
if (panel->backlight.combination_mode)
ctl2 |= BLM_COMBINATION_MODE;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
- intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(i915, BLC_PWM_CTL2, ctl2);
+ intel_de_posting_read(i915, BLC_PWM_CTL2);
+ intel_de_write(i915, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
intel_backlight_set_pwm_level(conn_state, level);
}
@@ -655,20 +646,20 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 ctl, ctl2;
- ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
ctl = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
+ intel_de_write(i915, VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
intel_backlight_set_pwm_level(conn_state, level);
@@ -676,50 +667,45 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = 0;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
- intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
- ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_posting_read(i915, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(i915, VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
}
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 pwm_ctl, val;
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ val = intel_de_read(i915, UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
- drm_dbg_kms(&dev_priv->drm,
- "util pin already enabled\n");
+ drm_dbg_kms(&i915->drm, "util pin already enabled\n");
val &= ~UTIL_PIN_ENABLE;
- intel_de_write(dev_priv, UTIL_PIN_CTL, val);
+ intel_de_write(i915, UTIL_PIN_CTL, val);
}
val = 0;
if (panel->backlight.util_pin_active_low)
val |= UTIL_PIN_POLARITY;
- intel_de_write(dev_priv, UTIL_PIN_CTL,
+ intel_de_write(i915, UTIL_PIN_CTL,
val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
}
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
}
- intel_de_write(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller),
panel->backlight.pwm_level_max);
intel_backlight_set_pwm_level(conn_state, level);
@@ -728,11 +714,9 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- intel_de_posting_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
+ intel_de_posting_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
@@ -740,22 +724,19 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ drm_dbg_kms(&i915->drm, "backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl);
}
- intel_de_write(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller),
panel->backlight.pwm_level_max);
intel_backlight_set_pwm_level(conn_state, level);
@@ -764,11 +745,9 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- intel_de_posting_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
+ intel_de_posting_read(i915, BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(i915, BXT_BLC_PWM_CTL(panel->backlight.controller),
pwm_ctl | BXT_BLC_PWM_ENABLE);
}
@@ -810,37 +789,37 @@ void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
if (!panel->backlight.present)
return;
- drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(&i915->drm, "pipe %c\n", pipe_name(pipe));
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
}
#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 val = 0;
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
if (panel->backlight.enabled)
val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
- drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
+ drm_dbg_kms(&i915->drm, "get backlight PWM = %d\n", val);
return val;
}
@@ -859,16 +838,16 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
u32 user_level, u32 user_max)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 hw_level;
if (!panel->backlight.present)
return;
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+ drm_WARN_ON(&i915->drm, panel->backlight.max == 0);
hw_level = scale_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -876,18 +855,19 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
if (panel->backlight.enabled)
intel_panel_actually_set_backlight(conn_state, hw_level);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
}
static int intel_backlight_device_update_status(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- struct drm_device *dev = connector->base.dev;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
- bd->props.brightness, bd->props.max_brightness);
+ drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
+
+ drm_dbg_kms(&i915->drm, "updating intel_backlight, brightness=%d/%d\n",
+ bd->props.brightness, bd->props.max_brightness);
intel_panel_set_backlight(connector->base.state, bd->props.brightness,
bd->props.max_brightness);
@@ -907,28 +887,28 @@ static int intel_backlight_device_update_status(struct backlight_device *bd)
bd->props.power = FB_BLANK_POWERDOWN;
}
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+
return 0;
}
static int intel_backlight_device_get_brightness(struct backlight_device *bd)
{
struct intel_connector *connector = bl_get_data(bd);
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
intel_wakeref_t wakeref;
int ret = 0;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
u32 hw_level;
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
hw_level = intel_panel_get_backlight(connector);
ret = scale_hw_to_user(connector,
hw_level, bd->props.max_brightness);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
}
return ret;
@@ -1038,9 +1018,9 @@ void intel_backlight_device_unregister(struct intel_connector *connector)
*/
static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq),
pwm_freq_hz);
}
@@ -1077,7 +1057,7 @@ static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 mul, clock;
@@ -1086,7 +1066,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
else
mul = 128;
- if (HAS_PCH_LPT_H(dev_priv))
+ if (HAS_PCH_LPT_H(i915))
clock = MHz(135); /* LPT:H */
else
clock = MHz(24); /* LPT:LP */
@@ -1100,9 +1080,9 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(i915)->rawclk_freq),
pwm_freq_hz * 128);
}
@@ -1116,13 +1096,13 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int clock;
- if (IS_PINEVIEW(dev_priv))
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ if (IS_PINEVIEW(i915))
+ clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
else
- clock = KHz(dev_priv->display.cdclk.hw.cdclk);
+ clock = KHz(i915->display.cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
}
@@ -1134,13 +1114,13 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int clock;
- if (IS_G4X(dev_priv))
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ if (IS_G4X(i915))
+ clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
else
- clock = KHz(dev_priv->display.cdclk.hw.cdclk);
+ clock = KHz(i915->display.cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
}
@@ -1152,17 +1132,17 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
*/
static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int mul, clock;
- if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
- if (IS_CHERRYVIEW(dev_priv))
+ if ((intel_de_read(i915, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+ if (IS_CHERRYVIEW(i915))
clock = KHz(19200);
else
clock = MHz(25);
mul = 16;
} else {
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ clock = KHz(RUNTIME_INFO(i915)->rawclk_freq);
mul = 128;
}
@@ -1171,16 +1151,16 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
static u16 get_vbt_pwm_freq(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
u16 pwm_freq_hz = connector->panel.vbt.backlight.pwm_freq_hz;
if (pwm_freq_hz) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"VBT defined backlight frequency %u Hz\n",
pwm_freq_hz);
} else {
pwm_freq_hz = 200;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"default backlight frequency %u Hz\n",
pwm_freq_hz);
}
@@ -1190,20 +1170,20 @@ static u16 get_vbt_pwm_freq(struct intel_connector *connector)
static u32 get_backlight_max_vbt(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u16 pwm_freq_hz = get_vbt_pwm_freq(connector);
u32 pwm;
if (!panel->backlight.pwm_funcs->hz_to_pwm) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"backlight frequency conversion not supported\n");
return 0;
}
pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"backlight frequency conversion failed\n");
return 0;
}
@@ -1216,11 +1196,11 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
*/
static u32 get_backlight_min_vbt(struct intel_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
int min;
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+ drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1231,7 +1211,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
*/
min = clamp_t(int, connector->panel.vbt.backlight.min_brightness, 0, 64);
if (min != connector->panel.vbt.backlight.min_brightness) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"clamping VBT min backlight %d/255 to %d/255\n",
connector->panel.vbt.backlight.min_brightness, min);
}
@@ -1242,24 +1222,24 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
bool alt, cpu_mode;
- if (HAS_PCH_LPT(dev_priv))
- alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ if (HAS_PCH_LPT(i915))
+ alt = intel_de_read(i915, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
else
- alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ alt = intel_de_read(i915, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
panel->backlight.alternate_pwm_increment = alt;
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(i915, BLC_PWM_PCH_CTL2);
panel->backlight.pwm_level_max = pch_ctl2 >> 16;
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1271,22 +1251,22 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
- cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(i915) &&
!(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
(cpu_ctl2 & BLM_PWM_ENABLE);
if (cpu_mode) {
val = pch_get_backlight(connector, unused);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, val);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ intel_de_write(i915, BLC_PWM_PCH_CTL1,
pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ intel_de_write(i915, BLC_PWM_CPU_CTL2,
cpu_ctl2 & ~BLM_PWM_ENABLE);
}
@@ -1295,14 +1275,14 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(i915, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(i915, BLC_PWM_PCH_CTL2);
panel->backlight.pwm_level_max = pch_ctl2 >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1313,7 +1293,7 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(i915, BLC_PWM_CPU_CTL2);
panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
@@ -1322,16 +1302,16 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, val;
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ ctl = intel_de_read(i915, BLC_PWM_CTL);
- if (DISPLAY_VER(dev_priv) == 2 || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ if (DISPLAY_VER(i915) == 2 || IS_I915GM(i915) || IS_I945GM(i915))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
- if (IS_PINEVIEW(dev_priv))
+ if (IS_PINEVIEW(i915))
panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
panel->backlight.pwm_level_max = ctl >> 17;
@@ -1360,15 +1340,15 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2;
- ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ ctl2 = intel_de_read(i915, BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ ctl = intel_de_read(i915, BLC_PWM_CTL);
panel->backlight.pwm_level_max = ctl >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1389,17 +1369,17 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2;
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&i915->drm, pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
- ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(i915, VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
+ ctl = intel_de_read(i915, VLV_BLC_PWM_CTL(pipe));
panel->backlight.pwm_level_max = ctl >> 16;
if (!panel->backlight.pwm_level_max)
@@ -1418,25 +1398,25 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
static int
bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl, val;
panel->backlight.controller = connector->panel.vbt.backlight.controller;
- pwm_ctl = intel_de_read(dev_priv,
+ pwm_ctl = intel_de_read(i915,
BXT_BLC_PWM_CTL(panel->backlight.controller));
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ val = intel_de_read(i915, UTIL_PIN_CTL);
panel->backlight.util_pin_active_low =
val & UTIL_PIN_POLARITY;
}
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.pwm_level_max =
- intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1451,26 +1431,54 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
return 0;
}
+static int cnp_num_backlight_controllers(struct drm_i915_private *i915)
+{
+ if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ return 1;
+
+ if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ return 2;
+
+ return 1;
+}
+
+static bool cnp_backlight_controller_is_valid(struct drm_i915_private *i915, int controller)
+{
+ if (controller < 0 || controller >= cnp_num_backlight_controllers(i915))
+ return false;
+
+ if (controller == 1 &&
+ INTEL_PCH_TYPE(i915) >= PCH_ICP &&
+ INTEL_PCH_TYPE(i915) < PCH_MTP)
+ return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
+
+ return true;
+}
+
static int
cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
/*
* CNP has the BXT implementation of backlight, but with only one
- * controller. TODO: ICP has multiple controllers but we only use
- * controller 0 for now.
+ * controller. ICP+ can have two controllers, depending on pin muxing.
*/
- panel->backlight.controller = 0;
+ panel->backlight.controller = connector->panel.vbt.backlight.controller;
+ if (!cnp_backlight_controller_is_valid(i915, panel->backlight.controller)) {
+ drm_dbg_kms(&i915->drm, "Invalid backlight controller %d, assuming 0\n",
+ panel->backlight.controller);
+ panel->backlight.controller = 0;
+ }
- pwm_ctl = intel_de_read(dev_priv,
+ pwm_ctl = intel_de_read(i915,
BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.pwm_level_max =
- intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(i915, BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.pwm_level_max)
panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
@@ -1488,23 +1496,22 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int ext_pwm_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
const char *desc;
u32 level;
/* Get the right PWM chip for DSI backlight according to VBT */
if (connector->panel.vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
- panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
+ panel->backlight.pwm = pwm_get(i915->drm.dev, "pwm_pmic_backlight");
desc = "PMIC";
} else {
- panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight");
+ panel->backlight.pwm = pwm_get(i915->drm.dev, "pwm_soc_backlight");
desc = "SoC";
}
if (IS_ERR(panel->backlight.pwm)) {
- drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n",
+ drm_err(&i915->drm, "Failed to get the %s PWM chip\n",
desc);
panel->backlight.pwm = NULL;
return -ENODEV;
@@ -1522,7 +1529,7 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
level = intel_backlight_invert_pwm_level(connector, level);
panel->backlight.pwm_enabled = true;
- drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ drm_dbg_kms(&i915->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
get_vbt_pwm_freq(connector), level);
} else {
@@ -1531,7 +1538,7 @@ static int ext_pwm_setup_backlight(struct intel_connector *connector,
NSEC_PER_SEC / get_vbt_pwm_freq(connector);
}
- drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
+ drm_info(&i915->drm, "Using %s PWM for LCD backlight control\n",
desc);
return 0;
}
@@ -1594,47 +1601,47 @@ void intel_backlight_update(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
if (!panel->backlight.present)
return;
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
if (!panel->backlight.enabled)
__intel_backlight_enable(crtc_state, conn_state);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
}
int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
int ret;
if (!connector->panel.vbt.backlight.present) {
- if (intel_has_quirk(dev_priv, QUIRK_BACKLIGHT_PRESENT)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (intel_has_quirk(i915, QUIRK_BACKLIGHT_PRESENT)) {
+ drm_dbg_kms(&i915->drm,
"no backlight present per VBT, but present per quirk\n");
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"no backlight present per VBT\n");
return 0;
}
}
/* ensure intel_panel has been initialized first */
- if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
+ if (drm_WARN_ON(&i915->drm, !panel->backlight.funcs))
return -ENODEV;
/* set level and max in panel struct */
- mutex_lock(&dev_priv->display.backlight.lock);
+ mutex_lock(&i915->display.backlight.lock);
ret = panel->backlight.funcs->setup(connector, pipe);
- mutex_unlock(&dev_priv->display.backlight.lock);
+ mutex_unlock(&i915->display.backlight.lock);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"failed to setup backlight for connector %s\n",
connector->base.name);
return ret;
@@ -1642,7 +1649,7 @@ int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
panel->backlight.present = true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Connector %s backlight initialized, %s, brightness %u/%u\n",
connector->base.name,
str_enabled_disabled(panel->backlight.enabled),
@@ -1753,30 +1760,30 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
{
struct intel_connector *connector =
container_of(panel, struct intel_connector, panel);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
intel_dsi_dcs_init_backlight_funcs(connector) == 0)
return;
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
panel->backlight.pwm_funcs = &bxt_pwm_funcs;
- } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_CNP) {
panel->backlight.pwm_funcs = &cnp_pwm_funcs;
- } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
- if (HAS_PCH_LPT(dev_priv))
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_LPT) {
+ if (HAS_PCH_LPT(i915))
panel->backlight.pwm_funcs = &lpt_pwm_funcs;
else
panel->backlight.pwm_funcs = &spt_pwm_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(i915)) {
panel->backlight.pwm_funcs = &pch_pwm_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
panel->backlight.pwm_funcs = &ext_pwm_funcs;
} else {
panel->backlight.pwm_funcs = &vlv_pwm_funcs;
}
- } else if (DISPLAY_VER(dev_priv) == 4) {
+ } else if (DISPLAY_VER(i915) == 4) {
panel->backlight.pwm_funcs = &i965_pwm_funcs;
} else {
panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
@@ -1786,7 +1793,7 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
if (intel_dp_aux_init_backlight_funcs(connector) == 0)
return;
- if (!intel_has_quirk(dev_priv, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ if (!intel_has_quirk(i915, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
connector->panel.backlight.power = intel_pps_backlight_power;
}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight_regs.h b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
index 344eb8096bd2..d0cdfd631d75 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_backlight_regs.h
@@ -8,23 +8,20 @@
#include "intel_display_reg_defs.h"
-#define _VLV_BLC_PWM_CTL2_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61250)
-#define _VLV_BLC_PWM_CTL2_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61350)
-#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, \
- _VLV_BLC_PWM_CTL2_B)
+#define _VLV_BLC_PWM_CTL2_A (VLV_DISPLAY_BASE + 0x61250)
+#define _VLV_BLC_PWM_CTL2_B (VLV_DISPLAY_BASE + 0x61350)
+#define VLV_BLC_PWM_CTL2(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL2_A, _VLV_BLC_PWM_CTL2_B)
-#define _VLV_BLC_PWM_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
-#define _VLV_BLC_PWM_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61354)
-#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, \
- _VLV_BLC_PWM_CTL_B)
+#define _VLV_BLC_PWM_CTL_A (VLV_DISPLAY_BASE + 0x61254)
+#define _VLV_BLC_PWM_CTL_B (VLV_DISPLAY_BASE + 0x61354)
+#define VLV_BLC_PWM_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_PWM_CTL_A, _VLV_BLC_PWM_CTL_B)
-#define _VLV_BLC_HIST_CTL_A (DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
-#define _VLV_BLC_HIST_CTL_B (DISPLAY_MMIO_BASE(dev_priv) + 0x61360)
-#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, \
- _VLV_BLC_HIST_CTL_B)
+#define _VLV_BLC_HIST_CTL_A (VLV_DISPLAY_BASE + 0x61260)
+#define _VLV_BLC_HIST_CTL_B (VLV_DISPLAY_BASE + 0x61360)
+#define VLV_BLC_HIST_CTL(pipe) _MMIO_PIPE(pipe, _VLV_BLC_HIST_CTL_A, _VLV_BLC_HIST_CTL_B)
/* Backlight control */
-#define BLC_PWM_CTL2 _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61250) /* 965+ only */
+#define BLC_PWM_CTL2 _MMIO(0x61250) /* 965+ only */
#define BLM_PWM_ENABLE (1 << 31)
#define BLM_COMBINATION_MODE (1 << 30) /* gen4 only */
#define BLM_PIPE_SELECT (1 << 29)
@@ -47,7 +44,7 @@
#define BLM_PHASE_IN_COUNT_MASK (0xff << 8)
#define BLM_PHASE_IN_INCR_SHIFT (0)
#define BLM_PHASE_IN_INCR_MASK (0xff << 0)
-#define BLC_PWM_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61254)
+#define BLC_PWM_CTL _MMIO(0x61254)
/*
* This is the most significant 15 bits of the number of backlight cycles in a
* complete cycle of the modulated backlight control.
@@ -69,7 +66,7 @@
#define BACKLIGHT_DUTY_CYCLE_MASK_PNV (0xfffe)
#define BLM_POLARITY_PNV (1 << 0) /* pnv only */
-#define BLC_HIST_CTL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61260)
+#define BLC_HIST_CTL _MMIO(0x61260)
#define BLM_HISTOGRAM_ENABLE (1 << 31)
/* New registers for PCH-split platforms. Safe where new bits show up, the
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 572a4e3769f3..04b846440de6 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -25,16 +25,15 @@
*
*/
-#include <drm/drm_edid.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dsc_helper.h>
-
-#include "display/intel_display.h"
-#include "display/intel_display_types.h"
-#include "display/intel_gmbus.h"
+#include <drm/drm_edid.h>
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_display.h"
+#include "intel_display_types.h"
+#include "intel_gmbus.h"
#define _INTEL_BIOS_PRIVATE
#include "intel_vbt_defs.h"
@@ -620,14 +619,14 @@ static void dump_pnp_id(struct drm_i915_private *i915,
static int opregion_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
return intel_opregion_get_panel_type(i915);
}
static int vbt_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
const struct bdb_lvds_options *lvds_options;
@@ -652,12 +651,13 @@ static int vbt_get_panel_type(struct drm_i915_private *i915,
static int pnpid_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
const struct bdb_lvds_lfp_data *data;
const struct bdb_lvds_lfp_data_ptrs *ptrs;
const struct lvds_pnp_id *edid_id;
struct lvds_pnp_id edid_id_nodate;
+ const struct edid *edid = drm_edid_raw(drm_edid); /* FIXME */
int i, best = -1;
if (!edid)
@@ -701,9 +701,9 @@ static int pnpid_get_panel_type(struct drm_i915_private *i915,
static int fallback_get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
- return 0;
+ return use_fallback ? 0 : -1;
}
enum panel_type {
@@ -715,13 +715,13 @@ enum panel_type {
static int get_panel_type(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+ const struct drm_edid *drm_edid, bool use_fallback)
{
struct {
const char *name;
int (*get_panel_type)(struct drm_i915_private *i915,
const struct intel_bios_encoder_data *devdata,
- const struct edid *edid);
+ const struct drm_edid *drm_edid, bool use_fallback);
int panel_type;
} panel_types[] = {
[PANEL_TYPE_OPREGION] = {
@@ -744,7 +744,8 @@ static int get_panel_type(struct drm_i915_private *i915,
int i;
for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
- panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata, edid);
+ panel_types[i].panel_type = panel_types[i].get_panel_type(i915, devdata,
+ drm_edid, use_fallback);
drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf &&
panel_types[i].panel_type != 0xff);
@@ -1032,6 +1033,7 @@ parse_lfp_backlight(struct drm_i915_private *i915,
}
panel->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
+ panel->vbt.backlight.controller = 0;
if (i915->display.vbt.version >= 191) {
size_t exp_size;
@@ -2466,6 +2468,22 @@ static enum port dvo_port_to_port(struct drm_i915_private *i915,
dvo_port);
}
+static enum port
+dsi_dvo_port_to_port(struct drm_i915_private *i915, u8 dvo_port)
+{
+ switch (dvo_port) {
+ case DVO_PORT_MIPIA:
+ return PORT_A;
+ case DVO_PORT_MIPIC:
+ if (DISPLAY_VER(i915) >= 11)
+ return PORT_B;
+ else
+ return PORT_C;
+ default:
+ return PORT_NONE;
+ }
+}
+
static int parse_bdb_230_dp_max_link_rate(const int vbt_max_link_rate)
{
switch (vbt_max_link_rate) {
@@ -2576,6 +2594,12 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
}
+static bool
+intel_bios_encoder_supports_dsi(const struct intel_bios_encoder_data *devdata)
+{
+ return devdata->child.device_type & DEVICE_TYPE_MIPI_OUTPUT;
+}
+
static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 158)
@@ -2626,7 +2650,7 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
{
struct drm_i915_private *i915 = devdata->i915;
const struct child_device_config *child = &devdata->child;
- bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_dsi, is_crt, supports_typec_usb, supports_tbt;
int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
is_dvi = intel_bios_encoder_supports_dvi(devdata);
@@ -2634,13 +2658,14 @@ static void print_ddi_port(const struct intel_bios_encoder_data *devdata,
is_crt = intel_bios_encoder_supports_crt(devdata);
is_hdmi = intel_bios_encoder_supports_hdmi(devdata);
is_edp = intel_bios_encoder_supports_edp(devdata);
+ is_dsi = intel_bios_encoder_supports_dsi(devdata);
supports_typec_usb = intel_bios_encoder_supports_typec_usb(devdata);
supports_tbt = intel_bios_encoder_supports_tbt(devdata);
drm_dbg_kms(&i915->drm,
- "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
- port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
+ "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d DSI:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
+ port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp, is_dsi,
HAS_LSPCON(i915) && child->lspcon,
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
@@ -2693,6 +2718,8 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
enum port port;
port = dvo_port_to_port(i915, child->dvo_port);
+ if (port == PORT_NONE && DISPLAY_VER(i915) >= 11)
+ port = dsi_dvo_port_to_port(i915, child->dvo_port);
if (port == PORT_NONE)
return;
@@ -3183,14 +3210,26 @@ out:
kfree(oprom_vbt);
}
-void intel_bios_init_panel(struct drm_i915_private *i915,
- struct intel_panel *panel,
- const struct intel_bios_encoder_data *devdata,
- const struct edid *edid)
+static void intel_bios_init_panel(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct drm_edid *drm_edid,
+ bool use_fallback)
{
- init_vbt_panel_defaults(panel);
+ /* already have it? */
+ if (panel->vbt.panel_type >= 0) {
+ drm_WARN_ON(&i915->drm, !use_fallback);
+ return;
+ }
- panel->vbt.panel_type = get_panel_type(i915, devdata, edid);
+ panel->vbt.panel_type = get_panel_type(i915, devdata,
+ drm_edid, use_fallback);
+ if (panel->vbt.panel_type < 0) {
+ drm_WARN_ON(&i915->drm, use_fallback);
+ return;
+ }
+
+ init_vbt_panel_defaults(panel);
parse_panel_options(i915, panel);
parse_generic_dtd(i915, panel);
@@ -3205,6 +3244,21 @@ void intel_bios_init_panel(struct drm_i915_private *i915,
parse_mipi_sequence(i915, panel);
}
+void intel_bios_init_panel_early(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata)
+{
+ intel_bios_init_panel(i915, panel, devdata, NULL, false);
+}
+
+void intel_bios_init_panel_late(struct drm_i915_private *i915,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct drm_edid *drm_edid)
+{
+ intel_bios_init_panel(i915, panel, devdata, drm_edid, true);
+}
+
/**
* intel_bios_driver_remove - Free any resources allocated by intel_bios_init()
* @i915: i915 device instance
@@ -3414,19 +3468,16 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *i915,
dvo_port = child->dvo_port;
- if (dvo_port == DVO_PORT_MIPIA ||
- (dvo_port == DVO_PORT_MIPIB && DISPLAY_VER(i915) >= 11) ||
- (dvo_port == DVO_PORT_MIPIC && DISPLAY_VER(i915) < 11)) {
- if (port)
- *port = dvo_port - DVO_PORT_MIPIA;
- return true;
- } else if (dvo_port == DVO_PORT_MIPIB ||
- dvo_port == DVO_PORT_MIPIC ||
- dvo_port == DVO_PORT_MIPID) {
+ if (dsi_dvo_port_to_port(i915, dvo_port) == PORT_NONE) {
drm_dbg_kms(&i915->drm,
"VBT has unsupported DSI port %c\n",
port_name(dvo_port - DVO_PORT_MIPIA));
+ continue;
}
+
+ if (port)
+ *port = dsi_dvo_port_to_port(i915, dvo_port);
+ return true;
}
return false;
@@ -3511,7 +3562,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
if (!(child->device_type & DEVICE_TYPE_MIPI_OUTPUT))
continue;
- if (child->dvo_port - DVO_PORT_MIPIA == encoder->port) {
+ if (dsi_dvo_port_to_port(i915, child->dvo_port) == encoder->port) {
if (!devdata->dsc)
return false;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index e375405a7828..d221f784aa88 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -32,8 +32,8 @@
#include <linux/types.h>
+struct drm_edid;
struct drm_i915_private;
-struct edid;
struct intel_bios_encoder_data;
struct intel_crtc_state;
struct intel_encoder;
@@ -232,10 +232,13 @@ struct mipi_pps_data {
} __packed;
void intel_bios_init(struct drm_i915_private *dev_priv);
-void intel_bios_init_panel(struct drm_i915_private *dev_priv,
- struct intel_panel *panel,
- const struct intel_bios_encoder_data *devdata,
- const struct edid *edid);
+void intel_bios_init_panel_early(struct drm_i915_private *dev_priv,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata);
+void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
+ struct intel_panel *panel,
+ const struct intel_bios_encoder_data *devdata,
+ const struct drm_edid *drm_edid);
void intel_bios_fini_panel(struct intel_panel *panel);
void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index cb7ee3a24a58..f20292143745 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -8,7 +8,7 @@
#include <drm/drm_atomic.h>
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_global_state.h"
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index b74e36d76013..7e16b655c833 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1319,7 +1319,7 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
{ .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 },
{ .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 },
{ .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 },
- { .refclk = 24400, .cdclk = 648000, .divider = 2, .ratio = 54 },
+ { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 },
{ .refclk = 38400, .cdclk = 179200, .divider = 3, .ratio = 14 },
{ .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
@@ -1346,6 +1346,16 @@ static const struct intel_cdclk_vals dg2_cdclk_table[] = {
{}
};
+static const struct intel_cdclk_vals mtl_cdclk_table[] = {
+ { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 16, .waveform = 0xad5a },
+ { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 16, .waveform = 0xb6b6 },
+ { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 480000, .divider = 2, .ratio = 25, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29, .waveform = 0x0000 },
+ { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34, .waveform = 0x0000 },
+ {}
+};
+
static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
{
const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
@@ -1717,39 +1727,92 @@ static void dg2_cdclk_squash_program(struct drm_i915_private *i915,
intel_de_write(i915, CDCLK_SQUASH_CTL, squash_ctl);
}
-static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_config *cdclk_config,
- enum pipe pipe)
+static bool cdclk_pll_is_unknown(unsigned int vco)
+{
+ /*
+ * Ensure driver does not take the crawl path for the
+ * case when the vco is set to ~0 in the
+ * sanitize path.
+ */
+ return vco == ~0;
+}
+
+static int cdclk_squash_divider(u16 waveform)
+{
+ return hweight16(waveform ?: 0xffff);
+}
+
+static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *old_cdclk_config,
+ const struct intel_cdclk_config *new_cdclk_config,
+ struct intel_cdclk_config *mid_cdclk_config)
+{
+ u16 old_waveform, new_waveform, mid_waveform;
+ int size = 16;
+ int div = 2;
+
+ /* Return if PLL is in an unknown state, force a complete disable and re-enable. */
+ if (cdclk_pll_is_unknown(old_cdclk_config->vco))
+ return false;
+
+ /* Return if both Squash and Crawl are not present */
+ if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915))
+ return false;
+
+ old_waveform = cdclk_squash_waveform(i915, old_cdclk_config->cdclk);
+ new_waveform = cdclk_squash_waveform(i915, new_cdclk_config->cdclk);
+
+ /* Return if Squash only or Crawl only is the desired action */
+ if (old_cdclk_config->vco == 0 || new_cdclk_config->vco == 0 ||
+ old_cdclk_config->vco == new_cdclk_config->vco ||
+ old_waveform == new_waveform)
+ return false;
+
+ *mid_cdclk_config = *new_cdclk_config;
+
+ /*
+ * Populate the mid_cdclk_config accordingly.
+ * - If moving to a higher cdclk, the desired action is squashing.
+ * The mid cdclk config should have the new (squash) waveform.
+ * - If moving to a lower cdclk, the desired action is crawling.
+ * The mid cdclk config should have the new vco.
+ */
+
+ if (cdclk_squash_divider(new_waveform) > cdclk_squash_divider(old_waveform)) {
+ mid_cdclk_config->vco = old_cdclk_config->vco;
+ mid_waveform = new_waveform;
+ } else {
+ mid_cdclk_config->vco = new_cdclk_config->vco;
+ mid_waveform = old_waveform;
+ }
+
+ mid_cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_squash_divider(mid_waveform) *
+ mid_cdclk_config->vco, size * div);
+
+ /* make sure the mid clock came out sane */
+
+ drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk <
+ min(old_cdclk_config->cdclk, new_cdclk_config->cdclk));
+ drm_WARN_ON(&i915->drm, mid_cdclk_config->cdclk >
+ i915->display.cdclk.max_cdclk_freq);
+ drm_WARN_ON(&i915->drm, cdclk_squash_waveform(i915, mid_cdclk_config->cdclk) !=
+ mid_waveform);
+
+ return true;
+}
+
+static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
u32 val;
u16 waveform;
int clock;
- int ret;
-
- /* Inform power controller of upcoming frequency change. */
- if (DISPLAY_VER(dev_priv) >= 11)
- ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
- else
- /*
- * BSpec requires us to wait up to 150usec, but that leads to
- * timeouts; the 2ms used here is based on experiment.
- */
- ret = snb_pcode_write_timeout(&dev_priv->uncore,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 150, 2);
- if (ret) {
- drm_err(&dev_priv->drm,
- "Failed to inform PCU about cdclk change (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
- if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0) {
+ if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
+ !cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
if (dev_priv->display.cdclk.hw.vco != vco)
adlp_cdclk_pll_crawl(dev_priv, vco);
} else if (DISPLAY_VER(dev_priv) >= 11)
@@ -1782,11 +1845,62 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
if (pipe != INVALID_PIPE)
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ struct intel_cdclk_config mid_cdclk_config;
+ int cdclk = cdclk_config->cdclk;
+ int ret = 0;
+
+ /*
+ * Inform power controller of upcoming frequency change.
+ * Display versions 14 and beyond do not follow the PUnit
+ * mailbox communication, skip
+ * this step.
+ */
+ if (DISPLAY_VER(dev_priv) >= 14)
+ /* NOOP */;
+ else if (DISPLAY_VER(dev_priv) >= 11)
+ ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ else
+ /*
+ * BSpec requires us to wait up to 150usec, but that leads to
+ * timeouts; the 2ms used here is based on experiment.
+ */
+ ret = snb_pcode_write_timeout(&dev_priv->uncore,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 150, 2);
+
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Failed to inform PCU about cdclk change (err %d, freq %d)\n",
+ ret, cdclk);
+ return;
+ }
+
+ if (cdclk_compute_crawl_and_squash_midpoint(dev_priv, &dev_priv->display.cdclk.hw,
+ cdclk_config, &mid_cdclk_config)) {
+ _bxt_set_cdclk(dev_priv, &mid_cdclk_config, pipe);
+ _bxt_set_cdclk(dev_priv, cdclk_config, pipe);
+ } else {
+ _bxt_set_cdclk(dev_priv, cdclk_config, pipe);
+ }
- if (DISPLAY_VER(dev_priv) >= 11) {
+ if (DISPLAY_VER(dev_priv) >= 14)
+ /*
+ * NOOP - No Pcode communication needed for
+ * Display versions 14 and beyond
+ */;
+ else if (DISPLAY_VER(dev_priv) >= 11)
ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
cdclk_config->voltage_level);
- } else {
+ else
/*
* The timeout isn't specified, the 2ms used here is based on
* experiment.
@@ -1797,7 +1911,6 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ,
cdclk_config->voltage_level,
150, 2);
- }
if (ret) {
drm_err(&dev_priv->drm,
@@ -1954,6 +2067,28 @@ void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
skl_cdclk_uninit_hw(i915);
}
+static bool intel_cdclk_can_crawl_and_squash(struct drm_i915_private *i915,
+ const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
+{
+ u16 old_waveform;
+ u16 new_waveform;
+
+ drm_WARN_ON(&i915->drm, cdclk_pll_is_unknown(a->vco));
+
+ if (a->vco == 0 || b->vco == 0)
+ return false;
+
+ if (!HAS_CDCLK_CRAWL(i915) || !HAS_CDCLK_SQUASH(i915))
+ return false;
+
+ old_waveform = cdclk_squash_waveform(i915, a->cdclk);
+ new_waveform = cdclk_squash_waveform(i915, b->cdclk);
+
+ return a->vco != b->vco &&
+ old_waveform != new_waveform;
+}
+
static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv,
const struct intel_cdclk_config *a,
const struct intel_cdclk_config *b)
@@ -2760,9 +2895,14 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
pipe = INVALID_PIPE;
}
- if (intel_cdclk_can_squash(dev_priv,
- &old_cdclk_state->actual,
- &new_cdclk_state->actual)) {
+ if (intel_cdclk_can_crawl_and_squash(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Can change cdclk via crawling and squashing\n");
+ } else if (intel_cdclk_can_squash(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
drm_dbg_kms(&dev_priv->drm,
"Can change cdclk via squashing\n");
} else if (intel_cdclk_can_crawl(dev_priv,
@@ -3060,6 +3200,13 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
return freq;
}
+static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = tgl_calc_voltage_level,
+};
+
static const struct intel_cdclk_funcs tgl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
@@ -3195,7 +3342,10 @@ static const struct intel_cdclk_funcs i830_cdclk_funcs = {
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_DG2(dev_priv)) {
+ if (IS_METEORLAKE(dev_priv)) {
+ dev_priv->display.funcs.cdclk = &mtl_cdclk_funcs;
+ dev_priv->display.cdclk.table = mtl_cdclk_table;
+ } else if (IS_DG2(dev_priv)) {
dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
dev_priv->display.cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index c674879a84a5..51e2f6a11ce4 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_global_state.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 250e83f1f5ac..8d97c299e657 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -53,7 +53,18 @@ struct intel_color_funcs {
* involved with the same commit.
*/
void (*load_luts)(const struct intel_crtc_state *crtc_state);
+ /*
+ * Read out the LUTs from the hardware into the software state.
+ * Used by eg. the hardware state checker.
+ */
void (*read_luts)(struct intel_crtc_state *crtc_state);
+ /*
+ * Compare the LUTs
+ */
+ bool (*lut_equal)(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut);
};
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -143,15 +154,7 @@ static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
static bool lut_is_legacy(const struct drm_property_blob *lut)
{
- return drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
-}
-
-static bool crtc_state_is_legacy_gamma(const struct intel_crtc_state *crtc_state)
-{
- return !crtc_state->hw.degamma_lut &&
- !crtc_state->hw.ctm &&
- crtc_state->hw.gamma_lut &&
- lut_is_legacy(crtc_state->hw.gamma_lut);
+ return lut && drm_color_lut_size(lut) == LEGACY_LUT_LENGTH;
}
/*
@@ -246,17 +249,44 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
intel_de_write_fw(i915, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
}
-static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
+static bool ilk_limited_range(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- /*
- * FIXME if there's a gamma LUT after the CSC, we should
- * do the range compression using the gamma LUT instead.
- */
- return crtc_state->limited_color_range &&
- (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
- IS_DISPLAY_VER(i915, 9, 10));
+ /* icl+ have dedicated output CSC */
+ if (DISPLAY_VER(i915) >= 11)
+ return false;
+
+ /* pre-hsw have PIPECONF_COLOR_RANGE_SELECT */
+ if (DISPLAY_VER(i915) < 7 || IS_IVYBRIDGE(i915))
+ return false;
+
+ return crtc_state->limited_color_range;
+}
+
+static bool ilk_lut_limited_range(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (!ilk_limited_range(crtc_state))
+ return false;
+
+ if (crtc_state->c8_planes)
+ return false;
+
+ if (DISPLAY_VER(i915) == 10)
+ return crtc_state->hw.gamma_lut;
+ else
+ return crtc_state->hw.gamma_lut &&
+ (crtc_state->hw.degamma_lut || crtc_state->hw.ctm);
+}
+
+static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
+{
+ if (!ilk_limited_range(crtc_state))
+ return false;
+
+ return !ilk_lut_limited_range(crtc_state);
}
static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
@@ -437,6 +467,79 @@ static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val)
entry->blue = intel_color_lut_pack(REG_FIELD_GET(PALETTE_BLUE_MASK, val), 8);
}
+/* i8xx/i9xx+ 10bit slope format "even DW" (low 8 bits) */
+static u32 _i9xx_lut_10_ldw(u16 a)
+{
+ return drm_color_lut_extract(a, 10) & 0xff;
+}
+
+static u32 i9xx_lut_10_ldw(const struct drm_color_lut *color)
+{
+ return REG_FIELD_PREP(PALETTE_RED_MASK, _i9xx_lut_10_ldw(color[0].red)) |
+ REG_FIELD_PREP(PALETTE_GREEN_MASK, _i9xx_lut_10_ldw(color[0].green)) |
+ REG_FIELD_PREP(PALETTE_BLUE_MASK, _i9xx_lut_10_ldw(color[0].blue));
+}
+
+/* i8xx/i9xx+ 10bit slope format "odd DW" (high 2 bits + slope) */
+static u32 _i9xx_lut_10_udw(u16 a, u16 b)
+{
+ unsigned int mantissa, exponent;
+
+ a = drm_color_lut_extract(a, 10);
+ b = drm_color_lut_extract(b, 10);
+
+ /* b = a + 8 * m * 2 ^ -e */
+ mantissa = clamp(b - a, 0, 0x7f);
+ exponent = 3;
+ while (mantissa > 0xf) {
+ mantissa >>= 1;
+ exponent--;
+ }
+
+ return (exponent << 6) |
+ (mantissa << 2) |
+ (a >> 8);
+}
+
+static u32 i9xx_lut_10_udw(const struct drm_color_lut *color)
+{
+ return REG_FIELD_PREP(PALETTE_RED_MASK, _i9xx_lut_10_udw(color[0].red, color[1].red)) |
+ REG_FIELD_PREP(PALETTE_GREEN_MASK, _i9xx_lut_10_udw(color[0].green, color[1].green)) |
+ REG_FIELD_PREP(PALETTE_BLUE_MASK, _i9xx_lut_10_udw(color[0].blue, color[1].blue));
+}
+
+static void i9xx_lut_10_pack(struct drm_color_lut *color,
+ u32 ldw, u32 udw)
+{
+ u16 red = REG_FIELD_GET(PALETTE_10BIT_RED_LDW_MASK, ldw) |
+ REG_FIELD_GET(PALETTE_10BIT_RED_UDW_MASK, udw) << 8;
+ u16 green = REG_FIELD_GET(PALETTE_10BIT_GREEN_LDW_MASK, ldw) |
+ REG_FIELD_GET(PALETTE_10BIT_GREEN_UDW_MASK, udw) << 8;
+ u16 blue = REG_FIELD_GET(PALETTE_10BIT_BLUE_LDW_MASK, ldw) |
+ REG_FIELD_GET(PALETTE_10BIT_BLUE_UDW_MASK, udw) << 8;
+
+ color->red = intel_color_lut_pack(red, 10);
+ color->green = intel_color_lut_pack(green, 10);
+ color->blue = intel_color_lut_pack(blue, 10);
+}
+
+static void i9xx_lut_10_pack_slope(struct drm_color_lut *color,
+ u32 ldw, u32 udw)
+{
+ int r_exp = REG_FIELD_GET(PALETTE_10BIT_RED_EXP_MASK, udw);
+ int r_mant = REG_FIELD_GET(PALETTE_10BIT_RED_MANT_MASK, udw);
+ int g_exp = REG_FIELD_GET(PALETTE_10BIT_GREEN_EXP_MASK, udw);
+ int g_mant = REG_FIELD_GET(PALETTE_10BIT_GREEN_MANT_MASK, udw);
+ int b_exp = REG_FIELD_GET(PALETTE_10BIT_BLUE_EXP_MASK, udw);
+ int b_mant = REG_FIELD_GET(PALETTE_10BIT_BLUE_MANT_MASK, udw);
+
+ i9xx_lut_10_pack(color, ldw, udw);
+
+ color->red += r_mant << (3 - r_exp);
+ color->green += g_mant << (3 - g_exp);
+ color->blue += b_mant << (3 - b_exp);
+}
+
/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color)
{
@@ -600,9 +703,18 @@ create_linear_lut(struct drm_i915_private *i915, int lut_size)
return blob;
}
+static u16 lut_limited_range(unsigned int value)
+{
+ unsigned int min = 16 << 8;
+ unsigned int max = 235 << 8;
+
+ return value * (max - min) / 0xffff + min;
+}
+
static struct drm_property_blob *
create_resized_lut(struct drm_i915_private *i915,
- const struct drm_property_blob *blob_in, int lut_out_size)
+ const struct drm_property_blob *blob_in, int lut_out_size,
+ bool limited_color_range)
{
int i, lut_in_size = drm_color_lut_size(blob_in);
struct drm_property_blob *blob_out;
@@ -618,8 +730,18 @@ create_resized_lut(struct drm_i915_private *i915,
lut_in = blob_in->data;
lut_out = blob_out->data;
- for (i = 0; i < lut_out_size; i++)
- lut_out[i] = lut_in[i * (lut_in_size - 1) / (lut_out_size - 1)];
+ for (i = 0; i < lut_out_size; i++) {
+ const struct drm_color_lut *entry =
+ &lut_in[i * (lut_in_size - 1) / (lut_out_size - 1)];
+
+ if (limited_color_range) {
+ lut_out[i].red = lut_limited_range(entry->red);
+ lut_out[i].green = lut_limited_range(entry->green);
+ lut_out[i].blue = lut_limited_range(entry->blue);
+ } else {
+ lut_out[i] = *entry;
+ }
+ }
return blob_out;
}
@@ -642,12 +764,38 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc,
i9xx_lut_8(&lut[i]));
}
+static void i9xx_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ for (i = 0; i < lut_size - 1; i++) {
+ intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0),
+ i9xx_lut_10_ldw(&lut[i]));
+ intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1),
+ i9xx_lut_10_udw(&lut[i]));
+ }
+}
+
static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
- i9xx_load_lut_8(crtc, post_csc_lut);
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ i9xx_load_lut_8(crtc, post_csc_lut);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ i9xx_load_lut_10(crtc, post_csc_lut);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
}
static void i965_load_lut_10p6(struct intel_crtc *crtc,
@@ -675,16 +823,34 @@ static void i965_load_luts(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
- if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
i9xx_load_lut_8(crtc, post_csc_lut);
- else
+ break;
+ case GAMMA_MODE_MODE_10BIT:
i965_load_lut_10p6(crtc, post_csc_lut);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
}
-static void ilk_load_lut_8(struct intel_crtc *crtc,
+static void ilk_lut_write(const struct intel_crtc_state *crtc_state,
+ i915_reg_t reg, u32 val)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (crtc_state->dsb)
+ intel_dsb_reg_write(crtc_state->dsb, reg, val);
+ else
+ intel_de_write_fw(i915, reg, val);
+}
+
+static void ilk_load_lut_8(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut;
enum pipe pipe = crtc->pipe;
int i;
@@ -695,36 +861,35 @@ static void ilk_load_lut_8(struct intel_crtc *crtc,
lut = blob->data;
for (i = 0; i < 256; i++)
- intel_de_write_fw(i915, LGC_PALETTE(pipe, i),
- i9xx_lut_8(&lut[i]));
+ ilk_lut_write(crtc_state, LGC_PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
}
-static void ilk_load_lut_10(struct intel_crtc *crtc,
+static void ilk_load_lut_10(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++)
- intel_de_write_fw(i915, PREC_PALETTE(pipe, i),
- ilk_lut_10(&lut[i]));
+ ilk_lut_write(crtc_state, PREC_PALETTE(pipe, i),
+ ilk_lut_10(&lut[i]));
}
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, blob);
+ ilk_load_lut_8(crtc_state, blob);
break;
case GAMMA_MODE_MODE_10BIT:
- ilk_load_lut_10(crtc, blob);
+ ilk_load_lut_10(crtc_state, blob);
break;
default:
MISSING_CASE(crtc_state->gamma_mode);
@@ -745,50 +910,56 @@ static int ivb_lut_10_size(u32 prec_index)
* "Restriction : Index auto increment mode is not
* supported and must not be enabled."
*/
-static void ivb_load_lut_10(struct intel_crtc *crtc,
+static void ivb_load_lut_10(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob,
u32 prec_index)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), prec_index++);
- intel_de_write_fw(i915, PREC_PAL_DATA(pipe),
- ilk_lut_10(&lut[i]));
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ prec_index + i);
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_10(&lut[i]));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), 0);
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
}
/* On BDW+ the index auto increment mode actually works */
-static void bdw_load_lut_10(struct intel_crtc *crtc,
+static void bdw_load_lut_10(const struct intel_crtc_state *crtc_state,
const struct drm_property_blob *blob,
u32 prec_index)
{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_color_lut *lut = blob->data;
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe),
- prec_index | PAL_PREC_AUTO_INCREMENT);
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ prec_index);
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT |
+ prec_index);
for (i = 0; i < lut_size; i++)
- intel_de_write_fw(i915, PREC_PAL_DATA(pipe),
- ilk_lut_10(&lut[i]));
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_10(&lut[i]));
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), 0);
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
}
static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
@@ -797,9 +968,9 @@ static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
}
static void glk_load_lut_ext2_max(const struct intel_crtc_state *crtc_state)
@@ -808,31 +979,30 @@ static void glk_load_lut_ext2_max(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
/* Program the max register to clamp values > 1.0. */
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
- intel_dsb_reg_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
+ ilk_lut_write(crtc_state, PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
}
static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, blob);
+ ilk_load_lut_8(crtc_state, blob);
break;
case GAMMA_MODE_MODE_SPLIT:
- ivb_load_lut_10(crtc, pre_csc_lut, PAL_PREC_SPLIT_MODE |
+ ivb_load_lut_10(crtc_state, pre_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
- ivb_load_lut_10(crtc, post_csc_lut, PAL_PREC_SPLIT_MODE |
+ ivb_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
break;
case GAMMA_MODE_MODE_10BIT:
- ivb_load_lut_10(crtc, blob,
+ ivb_load_lut_10(crtc_state, blob,
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
break;
@@ -844,25 +1014,23 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *blob = post_csc_lut ?: pre_csc_lut;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, blob);
+ ilk_load_lut_8(crtc_state, blob);
break;
case GAMMA_MODE_MODE_SPLIT:
- bdw_load_lut_10(crtc, pre_csc_lut, PAL_PREC_SPLIT_MODE |
+ bdw_load_lut_10(crtc_state, pre_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
- bdw_load_lut_10(crtc, post_csc_lut, PAL_PREC_SPLIT_MODE |
+ bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(512));
break;
case GAMMA_MODE_MODE_10BIT:
-
- bdw_load_lut_10(crtc, blob,
+ bdw_load_lut_10(crtc_state, blob,
PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
break;
@@ -894,9 +1062,11 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
- intel_de_write_fw(i915, PRE_CSC_GAMC_INDEX(pipe), 0);
- intel_de_write_fw(i915, PRE_CSC_GAMC_INDEX(pipe),
- PRE_CSC_GAMC_AUTO_INCREMENT);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_INDEX_VALUE(0));
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT |
+ PRE_CSC_GAMC_INDEX_VALUE(0));
for (i = 0; i < lut_size; i++) {
/*
@@ -912,32 +1082,31 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- intel_de_write_fw(i915, PRE_CSC_GAMC_DATA(pipe),
- lut[i].green);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
+ lut[i].green);
}
/* Clamp values > 1.0. */
while (i++ < glk_degamma_lut_size(i915))
- intel_de_write_fw(i915, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
- intel_de_write_fw(i915, PRE_CSC_GAMC_INDEX(pipe), 0);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0);
}
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (pre_csc_lut)
glk_load_degamma_lut(crtc_state, pre_csc_lut);
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, post_csc_lut);
+ ilk_load_lut_8(crtc_state, post_csc_lut);
break;
case GAMMA_MODE_MODE_10BIT:
- bdw_load_lut_10(crtc, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
+ bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
glk_load_lut_ext2_max(crtc_state);
break;
@@ -955,9 +1124,9 @@ ivb_load_lut_max(const struct intel_crtc_state *crtc_state,
enum pipe pipe = crtc->pipe;
/* FIXME LUT entries are 16 bit only, so we can prog 0xFFFF max */
- intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red);
- intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green);
- intel_dsb_reg_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue);
+ ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 0), color->red);
+ ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 1), color->green);
+ ilk_lut_write(crtc_state, PREC_PAL_GC_MAX(pipe, 2), color->blue);
}
static void
@@ -976,17 +1145,23 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
* 9 entries, corresponding to values 0, 1/(8 * 128 * 256),
* 2/(8 * 128 * 256) ... 8/(8 * 128 * 256).
*/
- intel_dsb_reg_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
- PAL_PREC_AUTO_INCREMENT);
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT |
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
for (i = 0; i < 9; i++) {
const struct drm_color_lut *entry = &lut[i];
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
+
+ ilk_lut_write(crtc_state, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
}
static void
@@ -1009,14 +1184,19 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
* PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1],
* seg2[0] being unused by the hardware.
*/
- intel_dsb_reg_write(crtc_state, PREC_PAL_INDEX(pipe),
- PAL_PREC_AUTO_INCREMENT);
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT |
+ PAL_PREC_INDEX_VALUE(0));
+
for (i = 1; i < 257; i++) {
entry = &lut[i * 8];
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
/*
@@ -1033,12 +1213,16 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
*/
for (i = 0; i < 256; i++) {
entry = &lut[i * 8 * 128];
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_ldw(entry));
- intel_dsb_indexed_reg_write(crtc_state, PREC_PAL_DATA(pipe),
- ilk_lut_12p4_udw(entry));
+
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_ldw(entry));
+ ilk_lut_write(crtc_state, PREC_PAL_DATA(pipe),
+ ilk_lut_12p4_udw(entry));
}
+ ilk_lut_write(crtc_state, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
+
/* The last entry in the LUT is to be programmed in GCMAX */
entry = &lut[256 * 8 * 128];
ivb_load_lut_max(crtc_state, entry);
@@ -1048,23 +1232,22 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
{
const struct drm_property_blob *pre_csc_lut = crtc_state->pre_csc_lut;
const struct drm_property_blob *post_csc_lut = crtc_state->post_csc_lut;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
if (pre_csc_lut)
glk_load_degamma_lut(crtc_state, pre_csc_lut);
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
- ilk_load_lut_8(crtc, post_csc_lut);
+ ilk_load_lut_8(crtc_state, post_csc_lut);
break;
- case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEG:
icl_program_gamma_superfine_segment(crtc_state);
icl_program_gamma_multi_segment(crtc_state);
ivb_load_lut_ext_max(crtc_state);
glk_load_lut_ext2_max(crtc_state);
break;
case GAMMA_MODE_MODE_10BIT:
- bdw_load_lut_10(crtc, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
+ bdw_load_lut_10(crtc_state, post_csc_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc_state);
glk_load_lut_ext2_max(crtc_state);
break;
@@ -1073,7 +1256,8 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
break;
}
- intel_dsb_commit(crtc_state);
+ if (crtc_state->dsb)
+ intel_dsb_commit(crtc_state->dsb);
}
static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color)
@@ -1087,6 +1271,13 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
return REG_FIELD_PREP(CGM_PIPE_DEGAMMA_RED_UDW_MASK, drm_color_lut_extract(color->red, 14));
}
+static void chv_cgm_degamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_GREEN_LDW_MASK, ldw), 14);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_BLUE_LDW_MASK, ldw), 14);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_DEGAMMA_RED_UDW_MASK, udw), 14);
+}
+
static void chv_load_cgm_degamma(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -1182,6 +1373,25 @@ void intel_color_commit_arm(const struct intel_crtc_state *crtc_state)
i915->display.funcs.color->color_commit_arm(crtc_state);
}
+void intel_color_prepare_commit(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ /* FIXME DSB has issues loading LUTs, disable it for now */
+ return;
+
+ crtc_state->dsb = intel_dsb_prepare(crtc, 1024);
+}
+
+void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->dsb)
+ return;
+
+ intel_dsb_cleanup(crtc_state->dsb);
+ crtc_state->dsb = NULL;
+}
+
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
@@ -1224,8 +1434,25 @@ void intel_color_get_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- if (i915->display.funcs.color->read_luts)
- i915->display.funcs.color->read_luts(crtc_state);
+ i915->display.funcs.color->read_luts(crtc_state);
+}
+
+bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ /*
+ * FIXME c8_planes readout missing thus
+ * .read_luts() doesn't read out post_csc_lut.
+ */
+ if (!is_pre_csc_lut && crtc_state->c8_planes)
+ return true;
+
+ return i915->display.funcs.color->lut_equal(crtc_state, blob1, blob2,
+ is_pre_csc_lut);
}
static bool need_plane_update(struct intel_plane *plane,
@@ -1282,6 +1509,42 @@ intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
return 0;
}
+static u32 intel_gamma_lut_tests(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ if (lut_is_legacy(gamma_lut))
+ return 0;
+
+ return INTEL_INFO(i915)->display.color.gamma_lut_tests;
+}
+
+static u32 intel_degamma_lut_tests(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return INTEL_INFO(i915)->display.color.degamma_lut_tests;
+}
+
+static int intel_gamma_lut_size(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ if (lut_is_legacy(gamma_lut))
+ return LEGACY_LUT_LENGTH;
+
+ return INTEL_INFO(i915)->display.color.gamma_lut_size;
+}
+
+static u32 intel_degamma_lut_size(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return INTEL_INFO(i915)->display.color.degamma_lut_size;
+}
+
static int check_lut_size(const struct drm_property_blob *lut, int expected)
{
int len;
@@ -1299,29 +1562,23 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
return 0;
}
-static int check_luts(const struct intel_crtc_state *crtc_state)
+static int _check_luts(const struct intel_crtc_state *crtc_state,
+ u32 degamma_tests, u32 gamma_tests)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
int gamma_length, degamma_length;
- u32 gamma_tests, degamma_tests;
-
- /* Always allow legacy gamma LUT with no further checking. */
- if (crtc_state_is_legacy_gamma(crtc_state))
- return 0;
/* C8 relies on its palette being stored in the legacy LUT */
- if (crtc_state->c8_planes) {
+ if (crtc_state->c8_planes && !lut_is_legacy(crtc_state->hw.gamma_lut)) {
drm_dbg_kms(&i915->drm,
"C8 pixelformat requires the legacy LUT\n");
return -EINVAL;
}
- degamma_length = INTEL_INFO(i915)->display.color.degamma_lut_size;
- gamma_length = INTEL_INFO(i915)->display.color.gamma_lut_size;
- degamma_tests = INTEL_INFO(i915)->display.color.degamma_lut_tests;
- gamma_tests = INTEL_INFO(i915)->display.color.gamma_lut_tests;
+ degamma_length = intel_degamma_lut_size(crtc_state);
+ gamma_length = intel_gamma_lut_size(crtc_state);
if (check_lut_size(degamma_lut, degamma_length) ||
check_lut_size(gamma_lut, gamma_length))
@@ -1334,13 +1591,44 @@ static int check_luts(const struct intel_crtc_state *crtc_state)
return 0;
}
+static int check_luts(const struct intel_crtc_state *crtc_state)
+{
+ return _check_luts(crtc_state,
+ intel_degamma_lut_tests(crtc_state),
+ intel_gamma_lut_tests(crtc_state));
+}
+
static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state)
{
if (!crtc_state->gamma_enable ||
- crtc_state_is_legacy_gamma(crtc_state))
+ lut_is_legacy(crtc_state->hw.gamma_lut))
return GAMMA_MODE_MODE_8BIT;
else
- return GAMMA_MODE_MODE_10BIT; /* i965+ only */
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static int i9xx_lut_10_diff(u16 a, u16 b)
+{
+ return drm_color_lut_extract(a, 10) -
+ drm_color_lut_extract(b, 10);
+}
+
+static int i9xx_check_lut_10(struct drm_i915_private *dev_priv,
+ const struct drm_property_blob *blob)
+{
+ const struct drm_color_lut *lut = blob->data;
+ int lut_size = drm_color_lut_size(blob);
+ const struct drm_color_lut *a = &lut[lut_size - 2];
+ const struct drm_color_lut *b = &lut[lut_size - 1];
+
+ if (i9xx_lut_10_diff(b->red, a->red) > 0x7f ||
+ i9xx_lut_10_diff(b->green, a->green) > 0x7f ||
+ i9xx_lut_10_diff(b->blue, a->blue) > 0x7f) {
+ drm_dbg_kms(&dev_priv->drm, "Last gamma LUT entry exceeds max slope\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state)
@@ -1355,15 +1643,19 @@ void intel_color_assert_luts(const struct intel_crtc_state *crtc_state)
crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
} else if (DISPLAY_VER(i915) == 10) {
drm_WARN_ON(&i915->drm,
+ crtc_state->post_csc_lut == crtc_state->hw.gamma_lut &&
crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut &&
crtc_state->pre_csc_lut != i915->display.color.glk_linear_degamma_lut);
drm_WARN_ON(&i915->drm,
+ !ilk_lut_limited_range(crtc_state) &&
+ crtc_state->post_csc_lut != NULL &&
crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
} else if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT) {
drm_WARN_ON(&i915->drm,
crtc_state->pre_csc_lut != crtc_state->hw.degamma_lut &&
crtc_state->pre_csc_lut != crtc_state->hw.gamma_lut);
drm_WARN_ON(&i915->drm,
+ !ilk_lut_limited_range(crtc_state) &&
crtc_state->post_csc_lut != crtc_state->hw.degamma_lut &&
crtc_state->post_csc_lut != crtc_state->hw.gamma_lut);
}
@@ -1379,6 +1671,7 @@ static void intel_assign_luts(struct intel_crtc_state *crtc_state)
static int i9xx_color_check(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
int ret;
ret = check_luts(crtc_state);
@@ -1391,6 +1684,13 @@ static int i9xx_color_check(struct intel_crtc_state *crtc_state)
crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
+ if (DISPLAY_VER(i915) < 4 &&
+ crtc_state->gamma_mode == GAMMA_MODE_MODE_10BIT) {
+ ret = i9xx_check_lut_10(i915, crtc_state->hw.gamma_lut);
+ if (ret)
+ return ret;
+ }
+
ret = intel_color_add_affected_planes(crtc_state);
if (ret)
return ret;
@@ -1406,14 +1706,12 @@ static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
{
u32 cgm_mode = 0;
- if (crtc_state_is_legacy_gamma(crtc_state))
- return 0;
-
if (crtc_state->hw.degamma_lut)
cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
if (crtc_state->hw.ctm)
cgm_mode |= CGM_PIPE_MODE_CSC;
- if (crtc_state->hw.gamma_lut)
+ if (crtc_state->hw.gamma_lut &&
+ !lut_is_legacy(crtc_state->hw.gamma_lut))
cgm_mode |= CGM_PIPE_MODE_GAMMA;
return cgm_mode;
@@ -1440,7 +1738,7 @@ static int chv_color_check(struct intel_crtc_state *crtc_state)
* Otherwise we bypass it and use the CGM gamma instead.
*/
crtc_state->gamma_enable =
- crtc_state_is_legacy_gamma(crtc_state) &&
+ lut_is_legacy(crtc_state->hw.gamma_lut) &&
!crtc_state->c8_planes;
crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
@@ -1475,7 +1773,7 @@ static bool ilk_csc_enable(const struct intel_crtc_state *crtc_state)
static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
{
if (!crtc_state->gamma_enable ||
- crtc_state_is_legacy_gamma(crtc_state))
+ lut_is_legacy(crtc_state->hw.gamma_lut))
return GAMMA_MODE_MODE_8BIT;
else
return GAMMA_MODE_MODE_10BIT;
@@ -1499,8 +1797,28 @@ static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state)
CSC_POSITION_BEFORE_GAMMA;
}
-static void ilk_assign_luts(struct intel_crtc_state *crtc_state)
+static int ilk_assign_luts(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (ilk_lut_limited_range(crtc_state)) {
+ struct drm_property_blob *gamma_lut;
+
+ gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut,
+ drm_color_lut_size(crtc_state->hw.gamma_lut),
+ true);
+ if (IS_ERR(gamma_lut))
+ return PTR_ERR(gamma_lut);
+
+ drm_property_replace_blob(&crtc_state->post_csc_lut, gamma_lut);
+
+ drm_property_blob_put(gamma_lut);
+
+ drm_property_replace_blob(&crtc_state->pre_csc_lut, crtc_state->hw.degamma_lut);
+
+ return 0;
+ }
+
if (crtc_state->hw.degamma_lut ||
crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) {
drm_property_replace_blob(&crtc_state->pre_csc_lut,
@@ -1513,6 +1831,8 @@ static void ilk_assign_luts(struct intel_crtc_state *crtc_state)
drm_property_replace_blob(&crtc_state->post_csc_lut,
NULL);
}
+
+ return 0;
}
static int ilk_color_check(struct intel_crtc_state *crtc_state)
@@ -1549,7 +1869,9 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
- ilk_assign_luts(crtc_state);
+ ret = ilk_assign_luts(crtc_state);
+ if (ret)
+ return ret;
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
@@ -1585,19 +1907,19 @@ static int ivb_assign_luts(struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct drm_property_blob *degamma_lut, *gamma_lut;
- if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT) {
- ilk_assign_luts(crtc_state);
- return 0;
- }
+ if (crtc_state->gamma_mode != GAMMA_MODE_MODE_SPLIT)
+ return ilk_assign_luts(crtc_state);
drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.degamma_lut) != 1024);
drm_WARN_ON(&i915->drm, drm_color_lut_size(crtc_state->hw.gamma_lut) != 1024);
- degamma_lut = create_resized_lut(i915, crtc_state->hw.degamma_lut, 512);
+ degamma_lut = create_resized_lut(i915, crtc_state->hw.degamma_lut, 512,
+ false);
if (IS_ERR(degamma_lut))
return PTR_ERR(degamma_lut);
- gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, 512);
+ gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut, 512,
+ ilk_lut_limited_range(crtc_state));
if (IS_ERR(gamma_lut)) {
drm_property_blob_put(degamma_lut);
return PTR_ERR(gamma_lut);
@@ -1621,6 +1943,12 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
+ if (crtc_state->c8_planes && crtc_state->hw.degamma_lut) {
+ drm_dbg_kms(&i915->drm,
+ "C8 pixelformat and degamma together are not possible\n");
+ return -EINVAL;
+ }
+
if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB &&
crtc_state->hw.ctm) {
drm_dbg_kms(&i915->drm,
@@ -1659,17 +1987,57 @@ static int ivb_color_check(struct intel_crtc_state *crtc_state)
static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
{
if (!crtc_state->gamma_enable ||
- crtc_state_is_legacy_gamma(crtc_state))
+ lut_is_legacy(crtc_state->hw.gamma_lut))
return GAMMA_MODE_MODE_8BIT;
else
return GAMMA_MODE_MODE_10BIT;
}
-static void glk_assign_luts(struct intel_crtc_state *crtc_state)
+static bool glk_use_pre_csc_lut_for_gamma(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->hw.gamma_lut &&
+ !crtc_state->c8_planes &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB;
+}
+
+static int glk_assign_luts(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
- intel_assign_luts(crtc_state);
+ if (glk_use_pre_csc_lut_for_gamma(crtc_state)) {
+ struct drm_property_blob *gamma_lut;
+
+ gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut,
+ INTEL_INFO(i915)->display.color.degamma_lut_size,
+ false);
+ if (IS_ERR(gamma_lut))
+ return PTR_ERR(gamma_lut);
+
+ drm_property_replace_blob(&crtc_state->pre_csc_lut, gamma_lut);
+ drm_property_replace_blob(&crtc_state->post_csc_lut, NULL);
+
+ drm_property_blob_put(gamma_lut);
+
+ return 0;
+ }
+
+ if (ilk_lut_limited_range(crtc_state)) {
+ struct drm_property_blob *gamma_lut;
+
+ gamma_lut = create_resized_lut(i915, crtc_state->hw.gamma_lut,
+ drm_color_lut_size(crtc_state->hw.gamma_lut),
+ true);
+ if (IS_ERR(gamma_lut))
+ return PTR_ERR(gamma_lut);
+
+ drm_property_replace_blob(&crtc_state->post_csc_lut, gamma_lut);
+
+ drm_property_blob_put(gamma_lut);
+ } else {
+ drm_property_replace_blob(&crtc_state->post_csc_lut, crtc_state->hw.gamma_lut);
+ }
+
+ drm_property_replace_blob(&crtc_state->pre_csc_lut, crtc_state->hw.degamma_lut);
/*
* On GLK+ both pipe CSC and degamma LUT are controlled
@@ -1680,6 +2048,19 @@ static void glk_assign_luts(struct intel_crtc_state *crtc_state)
if (crtc_state->csc_enable && !crtc_state->pre_csc_lut)
drm_property_replace_blob(&crtc_state->pre_csc_lut,
i915->display.color.glk_linear_degamma_lut);
+
+ return 0;
+}
+
+static int glk_check_luts(const struct intel_crtc_state *crtc_state)
+{
+ u32 degamma_tests = intel_degamma_lut_tests(crtc_state);
+ u32 gamma_tests = intel_gamma_lut_tests(crtc_state);
+
+ if (glk_use_pre_csc_lut_for_gamma(crtc_state))
+ gamma_tests |= degamma_tests;
+
+ return _check_luts(crtc_state, degamma_tests, gamma_tests);
}
static int glk_color_check(struct intel_crtc_state *crtc_state)
@@ -1687,7 +2068,7 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
int ret;
- ret = check_luts(crtc_state);
+ ret = glk_check_luts(crtc_state);
if (ret)
return ret;
@@ -1706,14 +2087,16 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
}
crtc_state->gamma_enable =
+ !glk_use_pre_csc_lut_for_gamma(crtc_state) &&
crtc_state->hw.gamma_lut &&
!crtc_state->c8_planes;
/* On GLK+ degamma LUT is controlled by csc_enable */
crtc_state->csc_enable =
+ glk_use_pre_csc_lut_for_gamma(crtc_state) ||
crtc_state->hw.degamma_lut ||
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
- crtc_state->hw.ctm || crtc_state->limited_color_range;
+ crtc_state->hw.ctm || ilk_csc_limited_range(crtc_state);
crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
@@ -1723,7 +2106,9 @@ static int glk_color_check(struct intel_crtc_state *crtc_state)
if (ret)
return ret;
- glk_assign_luts(crtc_state);
+ ret = glk_assign_luts(crtc_state);
+ if (ret)
+ return ret;
crtc_state->preload_luts = intel_can_preload_luts(crtc_state);
@@ -1744,7 +2129,7 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
gamma_mode |= POST_CSC_GAMMA_ENABLE;
if (!crtc_state->hw.gamma_lut ||
- crtc_state_is_legacy_gamma(crtc_state))
+ lut_is_legacy(crtc_state->hw.gamma_lut))
gamma_mode |= GAMMA_MODE_MODE_8BIT;
/*
* Enable 10bit gamma for D13
@@ -1754,7 +2139,7 @@ static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
else if (DISPLAY_VER(i915) >= 13)
gamma_mode |= GAMMA_MODE_MODE_10BIT;
else
- gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED;
+ gamma_mode |= GAMMA_MODE_MODE_12BIT_MULTI_SEG;
return gamma_mode;
}
@@ -1792,68 +2177,153 @@ static int icl_color_check(struct intel_crtc_state *crtc_state)
return 0;
}
-static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state)
+static int i9xx_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
{
- if (!crtc_state->gamma_enable)
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return 0;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
return 8;
case GAMMA_MODE_MODE_10BIT:
- return 16;
+ return 10;
default:
MISSING_CASE(crtc_state->gamma_mode);
return 0;
}
}
-static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state)
+static int i9xx_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
{
- if (!crtc_state->gamma_enable)
- return 0;
+ return 0;
+}
- if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+static int i965_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return 0;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
return 8;
case GAMMA_MODE_MODE_10BIT:
- return 10;
+ return 16;
default:
MISSING_CASE(crtc_state->gamma_mode);
return 0;
}
}
-static int chv_gamma_precision(const struct intel_crtc_state *crtc_state)
+static int ilk_gamma_mode_precision(u32 gamma_mode)
{
- if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ switch (gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ return 8;
+ case GAMMA_MODE_MODE_10BIT:
return 10;
- else
- return i9xx_gamma_precision(crtc_state);
+ default:
+ MISSING_CASE(gamma_mode);
+ return 0;
+ }
+}
+
+static bool ilk_has_post_csc_lut(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->c8_planes)
+ return true;
+
+ return crtc_state->gamma_enable &&
+ (crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) != 0;
+}
+
+static bool ilk_has_pre_csc_lut(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->gamma_enable &&
+ (crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0;
}
-static int glk_gamma_precision(const struct intel_crtc_state *crtc_state)
+static int ilk_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
{
- if (!crtc_state->gamma_enable)
+ if (!ilk_has_post_csc_lut(crtc_state))
return 0;
- switch (crtc_state->gamma_mode) {
- case GAMMA_MODE_MODE_8BIT:
- return 8;
- case GAMMA_MODE_MODE_10BIT:
+ return ilk_gamma_mode_precision(crtc_state->gamma_mode);
+}
+
+static int ilk_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!ilk_has_pre_csc_lut(crtc_state))
+ return 0;
+
+ return ilk_gamma_mode_precision(crtc_state->gamma_mode);
+}
+
+static int ivb_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->gamma_enable &&
+ crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
return 10;
- default:
- MISSING_CASE(crtc_state->gamma_mode);
+
+ return ilk_post_csc_lut_precision(crtc_state);
+}
+
+static int ivb_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->gamma_enable &&
+ crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
+ return 10;
+
+ return ilk_pre_csc_lut_precision(crtc_state);
+}
+
+static int chv_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
+ return 10;
+
+ return i965_post_csc_lut_precision(crtc_state);
+}
+
+static int chv_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
+ return 14;
+
+ return 0;
+}
+
+static int glk_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return 0;
- }
+
+ return ilk_gamma_mode_precision(crtc_state->gamma_mode);
}
-static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
+static int glk_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
{
- if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ if (!crtc_state->csc_enable)
+ return 0;
+
+ return 16;
+}
+
+static bool icl_has_post_csc_lut(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->c8_planes)
+ return true;
+
+ return crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE;
+}
+
+static bool icl_has_pre_csc_lut(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->gamma_mode & PRE_CSC_GAMMA_ENABLE;
+}
+
+static int icl_post_csc_lut_precision(const struct intel_crtc_state *crtc_state)
+{
+ if (!icl_has_post_csc_lut(crtc_state))
return 0;
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
@@ -1861,7 +2331,7 @@ static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
return 8;
case GAMMA_MODE_MODE_10BIT:
return 10;
- case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEG:
return 16;
default:
MISSING_CASE(crtc_state->gamma_mode);
@@ -1869,26 +2339,12 @@ static int icl_gamma_precision(const struct intel_crtc_state *crtc_state)
}
}
-int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state)
+static int icl_pre_csc_lut_precision(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (HAS_GMCH(i915)) {
- if (IS_CHERRYVIEW(i915))
- return chv_gamma_precision(crtc_state);
- else
- return i9xx_gamma_precision(crtc_state);
- } else {
- if (DISPLAY_VER(i915) >= 11)
- return icl_gamma_precision(crtc_state);
- else if (DISPLAY_VER(i915) == 10)
- return glk_gamma_precision(crtc_state);
- else if (IS_IRONLAKE(i915))
- return ilk_gamma_precision(crtc_state);
- }
+ if (!icl_has_pre_csc_lut(crtc_state))
+ return 0;
- return 0;
+ return 16;
}
static bool err_check(struct drm_color_lut *lut1,
@@ -1899,9 +2355,9 @@ static bool err_check(struct drm_color_lut *lut1,
((abs((long)lut2->green - lut1->green)) <= err);
}
-static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1,
- struct drm_color_lut *lut2,
- int lut_size, u32 err)
+static bool intel_lut_entries_equal(struct drm_color_lut *lut1,
+ struct drm_color_lut *lut2,
+ int lut_size, u32 err)
{
int i;
@@ -1913,9 +2369,9 @@ static bool intel_color_lut_entries_equal(struct drm_color_lut *lut1,
return true;
}
-bool intel_color_lut_equal(struct drm_property_blob *blob1,
- struct drm_property_blob *blob2,
- u32 gamma_mode, u32 bit_precision)
+static bool intel_lut_equal(const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ int check_size, int precision)
{
struct drm_color_lut *lut1, *lut2;
int lut_size1, lut_size2;
@@ -1924,40 +2380,134 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
if (!blob1 != !blob2)
return false;
+ if (!blob1 != !precision)
+ return false;
+
if (!blob1)
return true;
lut_size1 = drm_color_lut_size(blob1);
lut_size2 = drm_color_lut_size(blob2);
- /* check sw and hw lut size */
if (lut_size1 != lut_size2)
return false;
+ if (check_size > lut_size1)
+ return false;
+
lut1 = blob1->data;
lut2 = blob2->data;
- err = 0xffff >> bit_precision;
+ err = 0xffff >> precision;
- /* check sw and hw lut entry to be equal */
- switch (gamma_mode & GAMMA_MODE_MODE_MASK) {
- case GAMMA_MODE_MODE_8BIT:
- case GAMMA_MODE_MODE_10BIT:
- if (!intel_color_lut_entries_equal(lut1, lut2,
- lut_size2, err))
- return false;
- break;
- case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
- if (!intel_color_lut_entries_equal(lut1, lut2,
- 9, err))
- return false;
- break;
- default:
- MISSING_CASE(gamma_mode);
- return false;
- }
+ if (!check_size)
+ check_size = lut_size1;
- return true;
+ return intel_lut_entries_equal(lut1, lut2, check_size, err);
+}
+
+static bool i9xx_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ int check_size = 0;
+
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ i9xx_pre_csc_lut_precision(crtc_state));
+
+ /* 10bit mode last entry is implicit, just skip it */
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_10BIT)
+ check_size = 128;
+
+ return intel_lut_equal(blob1, blob2, check_size,
+ i9xx_post_csc_lut_precision(crtc_state));
+}
+
+static bool i965_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ i9xx_pre_csc_lut_precision(crtc_state));
+ else
+ return intel_lut_equal(blob1, blob2, 0,
+ i965_post_csc_lut_precision(crtc_state));
+}
+
+static bool chv_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ chv_pre_csc_lut_precision(crtc_state));
+ else
+ return intel_lut_equal(blob1, blob2, 0,
+ chv_post_csc_lut_precision(crtc_state));
+}
+
+static bool ilk_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ ilk_pre_csc_lut_precision(crtc_state));
+ else
+ return intel_lut_equal(blob1, blob2, 0,
+ ilk_post_csc_lut_precision(crtc_state));
+}
+
+static bool ivb_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ ivb_pre_csc_lut_precision(crtc_state));
+ else
+ return intel_lut_equal(blob1, blob2, 0,
+ ivb_post_csc_lut_precision(crtc_state));
+}
+
+static bool glk_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ glk_pre_csc_lut_precision(crtc_state));
+ else
+ return intel_lut_equal(blob1, blob2, 0,
+ glk_post_csc_lut_precision(crtc_state));
+}
+
+static bool icl_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut)
+{
+ int check_size = 0;
+
+ if (is_pre_csc_lut)
+ return intel_lut_equal(blob1, blob2, 0,
+ icl_pre_csc_lut_precision(crtc_state));
+
+ /* hw readout broken except for the super fine segment :( */
+ if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) ==
+ GAMMA_MODE_MODE_12BIT_MULTI_SEG)
+ check_size = 9;
+
+ return intel_lut_equal(blob1, blob2, check_size,
+ icl_post_csc_lut_precision(crtc_state));
}
static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
@@ -1985,14 +2535,53 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
return blob;
}
+static struct drm_property_blob *i9xx_read_lut_10(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 lut_size = INTEL_INFO(dev_priv)->display.color.gamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+ u32 ldw, udw;
+ int i;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ lut_size * sizeof(lut[0]), NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < lut_size - 1; i++) {
+ ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0));
+ udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1));
+
+ i9xx_lut_10_pack(&lut[i], ldw, udw);
+ }
+
+ i9xx_lut_10_pack_slope(&lut[i], ldw, udw);
+
+ return blob;
+}
+
static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if (!crtc_state->gamma_enable)
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return;
- crtc_state->post_csc_lut = i9xx_read_lut_8(crtc);
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ crtc_state->post_csc_lut = i9xx_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ crtc_state->post_csc_lut = i9xx_read_lut_10(crtc);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
}
static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
@@ -2029,13 +2618,46 @@ static void i965_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if (!crtc_state->gamma_enable)
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return;
- if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
crtc_state->post_csc_lut = i9xx_read_lut_8(crtc);
- else
+ break;
+ case GAMMA_MODE_MODE_10BIT:
crtc_state->post_csc_lut = i965_read_lut_10p6(crtc);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+static struct drm_property_blob *chv_read_cgm_degamma(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(lut[0]) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0));
+ u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1));
+
+ chv_cgm_degamma_pack(&lut[i], ldw, udw);
+ }
+
+ return blob;
}
static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
@@ -2068,6 +2690,9 @@ static void chv_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
+ crtc_state->pre_csc_lut = chv_read_cgm_degamma(crtc);
+
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
crtc_state->post_csc_lut = chv_read_cgm_gamma(crtc);
else
@@ -2127,19 +2752,88 @@ static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
static void ilk_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_property_blob **blob =
+ ilk_has_post_csc_lut(crtc_state) ?
+ &crtc_state->post_csc_lut : &crtc_state->pre_csc_lut;
- if (!crtc_state->gamma_enable)
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return;
- if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0)
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ *blob = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ *blob = ilk_read_lut_10(crtc);
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+/*
+ * IVB/HSW Bspec / PAL_PREC_INDEX:
+ * "Restriction : Index auto increment mode is not
+ * supported and must not be enabled."
+ */
+static struct drm_property_blob *ivb_read_lut_10(struct intel_crtc *crtc,
+ u32 prec_index)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = ivb_lut_10_size(prec_index);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(lut[0]) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < lut_size; i++) {
+ u32 val;
+
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index + i);
+ val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe));
+
+ ilk_lut_10_pack(&lut[i], val);
+ }
+
+ intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
+
+ return blob;
+}
+
+static void ivb_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_property_blob **blob =
+ ilk_has_post_csc_lut(crtc_state) ?
+ &crtc_state->post_csc_lut : &crtc_state->pre_csc_lut;
+
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return;
switch (crtc_state->gamma_mode) {
case GAMMA_MODE_MODE_8BIT:
- crtc_state->post_csc_lut = ilk_read_lut_8(crtc);
+ *blob = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_SPLIT:
+ crtc_state->pre_csc_lut =
+ ivb_read_lut_10(crtc, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ crtc_state->post_csc_lut =
+ ivb_read_lut_10(crtc, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
break;
case GAMMA_MODE_MODE_10BIT:
- crtc_state->post_csc_lut = ilk_read_lut_10(crtc);
+ *blob = ivb_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
break;
default:
MISSING_CASE(crtc_state->gamma_mode);
@@ -2152,14 +2846,11 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
u32 prec_index)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- int i, hw_lut_size = ivb_lut_10_size(prec_index);
- int lut_size = INTEL_INFO(i915)->display.color.gamma_lut_size;
+ int i, lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
struct drm_color_lut *lut;
- drm_WARN_ON(&i915->drm, lut_size != hw_lut_size);
-
blob = drm_property_create_blob(&i915->drm,
sizeof(lut[0]) * lut_size,
NULL);
@@ -2169,7 +2860,10 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
lut = blob->data;
intel_de_write_fw(i915, PREC_PAL_INDEX(pipe),
- prec_index | PAL_PREC_AUTO_INCREMENT);
+ prec_index);
+ intel_de_write_fw(i915, PREC_PAL_INDEX(pipe),
+ PAL_PREC_AUTO_INCREMENT |
+ prec_index);
for (i = 0; i < lut_size; i++) {
u32 val = intel_de_read_fw(i915, PREC_PAL_DATA(pipe));
@@ -2177,7 +2871,80 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc,
ilk_lut_10_pack(&lut[i], val);
}
- intel_de_write_fw(i915, PREC_PAL_INDEX(pipe), 0);
+ intel_de_write_fw(i915, PREC_PAL_INDEX(pipe),
+ PAL_PREC_INDEX_VALUE(0));
+
+ return blob;
+}
+
+static void bdw_read_luts(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_property_blob **blob =
+ ilk_has_post_csc_lut(crtc_state) ?
+ &crtc_state->post_csc_lut : &crtc_state->pre_csc_lut;
+
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
+ return;
+
+ switch (crtc_state->gamma_mode) {
+ case GAMMA_MODE_MODE_8BIT:
+ *blob = ilk_read_lut_8(crtc);
+ break;
+ case GAMMA_MODE_MODE_SPLIT:
+ crtc_state->pre_csc_lut =
+ bdw_read_lut_10(crtc, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ crtc_state->post_csc_lut =
+ bdw_read_lut_10(crtc, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
+ break;
+ case GAMMA_MODE_MODE_10BIT:
+ *blob = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
+ break;
+ default:
+ MISSING_CASE(crtc_state->gamma_mode);
+ break;
+ }
+}
+
+static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int i, lut_size = INTEL_INFO(dev_priv)->display.color.degamma_lut_size;
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(lut[0]) * lut_size,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ /*
+ * When setting the auto-increment bit, the hardware seems to
+ * ignore the index bits, so we need to reset it to index 0
+ * separately.
+ */
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_INDEX_VALUE(0));
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT |
+ PRE_CSC_GAMC_INDEX_VALUE(0));
+
+ for (i = 0; i < lut_size; i++) {
+ u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe));
+
+ lut[i].red = val;
+ lut[i].green = val;
+ lut[i].blue = val;
+ }
+
+ intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_INDEX_VALUE(0));
return blob;
}
@@ -2186,7 +2953,10 @@ static void glk_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if (!crtc_state->gamma_enable)
+ if (crtc_state->csc_enable)
+ crtc_state->pre_csc_lut = glk_read_degamma_lut(crtc);
+
+ if (!crtc_state->gamma_enable && !crtc_state->c8_planes)
return;
switch (crtc_state->gamma_mode) {
@@ -2220,7 +2990,10 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc)
lut = blob->data;
intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe),
- PAL_PREC_AUTO_INCREMENT);
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
+ intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_MULTI_SEG_AUTO_INCREMENT |
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
for (i = 0; i < 9; i++) {
u32 ldw = intel_de_read_fw(i915, PREC_PAL_MULTI_SEG_DATA(pipe));
@@ -2229,7 +3002,8 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc)
ilk_lut_12p4_pack(&lut[i], ldw, udw);
}
- intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe), 0);
+ intel_de_write_fw(i915, PREC_PAL_MULTI_SEG_INDEX(pipe),
+ PAL_PREC_MULTI_SEG_INDEX_VALUE(0));
/*
* FIXME readouts from PAL_PREC_DATA register aren't giving
@@ -2244,7 +3018,10 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- if ((crtc_state->gamma_mode & POST_CSC_GAMMA_ENABLE) == 0)
+ if (icl_has_pre_csc_lut(crtc_state))
+ crtc_state->pre_csc_lut = glk_read_degamma_lut(crtc);
+
+ if (!icl_has_post_csc_lut(crtc_state))
return;
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
@@ -2254,7 +3031,7 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
case GAMMA_MODE_MODE_10BIT:
crtc_state->post_csc_lut = bdw_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
break;
- case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
+ case GAMMA_MODE_MODE_12BIT_MULTI_SEG:
crtc_state->post_csc_lut = icl_read_lut_multi_segment(crtc);
break;
default:
@@ -2268,6 +3045,7 @@ static const struct intel_color_funcs chv_color_funcs = {
.color_commit_arm = i9xx_color_commit_arm,
.load_luts = chv_load_luts,
.read_luts = chv_read_luts,
+ .lut_equal = chv_lut_equal,
};
static const struct intel_color_funcs i965_color_funcs = {
@@ -2275,6 +3053,7 @@ static const struct intel_color_funcs i965_color_funcs = {
.color_commit_arm = i9xx_color_commit_arm,
.load_luts = i965_load_luts,
.read_luts = i965_read_luts,
+ .lut_equal = i965_lut_equal,
};
static const struct intel_color_funcs i9xx_color_funcs = {
@@ -2282,6 +3061,7 @@ static const struct intel_color_funcs i9xx_color_funcs = {
.color_commit_arm = i9xx_color_commit_arm,
.load_luts = i9xx_load_luts,
.read_luts = i9xx_read_luts,
+ .lut_equal = i9xx_lut_equal,
};
static const struct intel_color_funcs icl_color_funcs = {
@@ -2290,6 +3070,7 @@ static const struct intel_color_funcs icl_color_funcs = {
.color_commit_arm = skl_color_commit_arm,
.load_luts = icl_load_luts,
.read_luts = icl_read_luts,
+ .lut_equal = icl_lut_equal,
};
static const struct intel_color_funcs glk_color_funcs = {
@@ -2298,6 +3079,7 @@ static const struct intel_color_funcs glk_color_funcs = {
.color_commit_arm = skl_color_commit_arm,
.load_luts = glk_load_luts,
.read_luts = glk_read_luts,
+ .lut_equal = glk_lut_equal,
};
static const struct intel_color_funcs skl_color_funcs = {
@@ -2305,7 +3087,8 @@ static const struct intel_color_funcs skl_color_funcs = {
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_arm = skl_color_commit_arm,
.load_luts = bdw_load_luts,
- .read_luts = NULL,
+ .read_luts = bdw_read_luts,
+ .lut_equal = ivb_lut_equal,
};
static const struct intel_color_funcs bdw_color_funcs = {
@@ -2313,7 +3096,8 @@ static const struct intel_color_funcs bdw_color_funcs = {
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_arm = hsw_color_commit_arm,
.load_luts = bdw_load_luts,
- .read_luts = NULL,
+ .read_luts = bdw_read_luts,
+ .lut_equal = ivb_lut_equal,
};
static const struct intel_color_funcs hsw_color_funcs = {
@@ -2321,7 +3105,8 @@ static const struct intel_color_funcs hsw_color_funcs = {
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_arm = hsw_color_commit_arm,
.load_luts = ivb_load_luts,
- .read_luts = NULL,
+ .read_luts = ivb_read_luts,
+ .lut_equal = ivb_lut_equal,
};
static const struct intel_color_funcs ivb_color_funcs = {
@@ -2329,7 +3114,8 @@ static const struct intel_color_funcs ivb_color_funcs = {
.color_commit_noarm = ilk_color_commit_noarm,
.color_commit_arm = ilk_color_commit_arm,
.load_luts = ivb_load_luts,
- .read_luts = NULL,
+ .read_luts = ivb_read_luts,
+ .lut_equal = ivb_lut_equal,
};
static const struct intel_color_funcs ilk_color_funcs = {
@@ -2338,19 +3124,34 @@ static const struct intel_color_funcs ilk_color_funcs = {
.color_commit_arm = ilk_color_commit_arm,
.load_luts = ilk_load_luts,
.read_luts = ilk_read_luts,
+ .lut_equal = ilk_lut_equal,
};
void intel_color_crtc_init(struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- bool has_ctm = INTEL_INFO(i915)->display.color.degamma_lut_size != 0;
+ int degamma_lut_size, gamma_lut_size;
+ bool has_ctm;
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
- drm_crtc_enable_color_mgmt(&crtc->base,
- INTEL_INFO(i915)->display.color.degamma_lut_size,
- has_ctm,
- INTEL_INFO(i915)->display.color.gamma_lut_size);
+ gamma_lut_size = INTEL_INFO(i915)->display.color.gamma_lut_size;
+ degamma_lut_size = INTEL_INFO(i915)->display.color.degamma_lut_size;
+ has_ctm = degamma_lut_size != 0;
+
+ /*
+ * "DPALETTE_A: NOTE: The 8-bit (non-10-bit) mode is the
+ * only mode supported by Alviso and Grantsdale."
+ *
+ * Actually looks like this affects all of gen3.
+ * Confirmed on alv,cst,pnv. Mobile gen2 parts (alm,mgm)
+ * are confirmed not to suffer from this restriction.
+ */
+ if (DISPLAY_VER(i915) == 3 && crtc->pipe == PIPE_A)
+ gamma_lut_size = 256;
+
+ drm_crtc_enable_color_mgmt(&crtc->base, degamma_lut_size,
+ has_ctm, gamma_lut_size);
}
int intel_color_init(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h
index 2a5ada67774d..d620b5b1e2a6 100644
--- a/drivers/gpu/drm/i915/display/intel_color.h
+++ b/drivers/gpu/drm/i915/display/intel_color.h
@@ -17,14 +17,16 @@ void intel_color_init_hooks(struct drm_i915_private *i915);
int intel_color_init(struct drm_i915_private *i915);
void intel_color_crtc_init(struct intel_crtc *crtc);
int intel_color_check(struct intel_crtc_state *crtc_state);
+void intel_color_prepare_commit(struct intel_crtc_state *crtc_state);
+void intel_color_cleanup_commit(struct intel_crtc_state *crtc_state);
void intel_color_commit_noarm(const struct intel_crtc_state *crtc_state);
void intel_color_commit_arm(const struct intel_crtc_state *crtc_state);
void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
void intel_color_get_config(struct intel_crtc_state *crtc_state);
-int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state);
-bool intel_color_lut_equal(struct drm_property_blob *blob1,
- struct drm_property_blob *blob2,
- u32 gamma_mode, u32 bit_precision);
+bool intel_color_lut_equal(const struct intel_crtc_state *crtc_state,
+ const struct drm_property_blob *blob1,
+ const struct drm_property_blob *blob2,
+ bool is_pre_csc_lut);
void intel_color_assert_luts(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 6205ddd3ded0..257afac34839 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -54,7 +54,7 @@ int intel_connector_init(struct intel_connector *connector)
__drm_atomic_helper_connector_reset(&connector->base,
&conn_state->base);
- INIT_LIST_HEAD(&connector->panel.fixed_modes);
+ intel_panel_init_alloc(connector);
return 0;
}
@@ -95,13 +95,10 @@ void intel_connector_destroy(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- kfree(intel_connector->detect_edid);
+ drm_edid_free(intel_connector->detect_edid);
intel_hdcp_cleanup(intel_connector);
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- kfree(intel_connector->edid);
-
intel_panel_fini(intel_connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 797ad9489f7e..7267ffc7f539 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -682,30 +682,20 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
{
struct drm_device *dev = crt->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_uncore *uncore = &dev_priv->uncore;
u32 save_bclrpat;
u32 save_vtotal;
u32 vtotal, vactive;
u32 vsample;
u32 vblank, vblank_start, vblank_end;
u32 dsl;
- i915_reg_t bclrpat_reg, vtotal_reg,
- vblank_reg, vsync_reg, pipeconf_reg, pipe_dsl_reg;
u8 st00;
enum drm_connector_status status;
drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n");
- bclrpat_reg = BCLRPAT(pipe);
- vtotal_reg = VTOTAL(pipe);
- vblank_reg = VBLANK(pipe);
- vsync_reg = VSYNC(pipe);
- pipeconf_reg = PIPECONF(pipe);
- pipe_dsl_reg = PIPEDSL(pipe);
-
- save_bclrpat = intel_uncore_read(uncore, bclrpat_reg);
- save_vtotal = intel_uncore_read(uncore, vtotal_reg);
- vblank = intel_uncore_read(uncore, vblank_reg);
+ save_bclrpat = intel_de_read(dev_priv, BCLRPAT(pipe));
+ save_vtotal = intel_de_read(dev_priv, VTOTAL(pipe));
+ vblank = intel_de_read(dev_priv, VBLANK(pipe));
vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
vactive = (save_vtotal & 0x7ff) + 1;
@@ -714,23 +704,23 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
vblank_end = ((vblank >> 16) & 0xfff) + 1;
/* Set the border color to purple. */
- intel_uncore_write(uncore, bclrpat_reg, 0x500050);
+ intel_de_write(dev_priv, BCLRPAT(pipe), 0x500050);
if (DISPLAY_VER(dev_priv) != 2) {
- u32 pipeconf = intel_uncore_read(uncore, pipeconf_reg);
- intel_uncore_write(uncore,
- pipeconf_reg,
- pipeconf | PIPECONF_FORCE_BORDER);
- intel_uncore_posting_read(uncore, pipeconf_reg);
+ u32 pipeconf = intel_de_read(dev_priv, PIPECONF(pipe));
+
+ intel_de_write(dev_priv, PIPECONF(pipe),
+ pipeconf | PIPECONF_FORCE_BORDER);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
/* Wait for next Vblank to substitue
* border color for Color info */
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
- st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
+ st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE);
status = ((st00 & (1 << 4)) != 0) ?
connector_status_connected :
connector_status_disconnected;
- intel_uncore_write(uncore, pipeconf_reg, pipeconf);
+ intel_de_write(dev_priv, PIPECONF(pipe), pipeconf);
} else {
bool restore_vblank = false;
int count, detect;
@@ -740,14 +730,12 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
- u32 vsync = intel_de_read(dev_priv, vsync_reg);
+ u32 vsync = intel_de_read(dev_priv, VSYNC(pipe));
u32 vsync_start = (vsync & 0xffff) + 1;
vblank_start = vsync_start;
- intel_uncore_write(uncore,
- vblank_reg,
- (vblank_start - 1) |
- ((vblank_end - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(pipe),
+ (vblank_start - 1) | ((vblank_end - 1) << 16));
restore_vblank = true;
}
/* sample in the vertical border, selecting the larger one */
@@ -759,10 +747,9 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
/*
* Wait for the border to be displayed
*/
- while (intel_uncore_read(uncore, pipe_dsl_reg) >= vactive)
+ while (intel_de_read(dev_priv, PIPEDSL(pipe)) >= vactive)
;
- while ((dsl = intel_uncore_read(uncore, pipe_dsl_reg)) <=
- vsample)
+ while ((dsl = intel_de_read(dev_priv, PIPEDSL(pipe))) <= vsample)
;
/*
* Watch ST00 for an entire scanline
@@ -772,14 +759,14 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
do {
count++;
/* Read the ST00 VGA status register */
- st00 = intel_uncore_read8(uncore, _VGA_MSR_WRITE);
+ st00 = intel_de_read8(dev_priv, _VGA_MSR_WRITE);
if (st00 & (1 << 4))
detect++;
- } while ((intel_uncore_read(uncore, pipe_dsl_reg) == dsl));
+ } while ((intel_de_read(dev_priv, PIPEDSL(pipe)) == dsl));
/* restore vblank if necessary */
if (restore_vblank)
- intel_uncore_write(uncore, vblank_reg, vblank);
+ intel_de_write(dev_priv, VBLANK(pipe), vblank);
/*
* If more than 3/4 of the scanline detected a monitor,
* then it is assumed to be present. This works even on i830,
@@ -792,7 +779,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
}
/* Restore previous settings */
- intel_uncore_write(uncore, bclrpat_reg, save_bclrpat);
+ intel_de_write(dev_priv, BCLRPAT(pipe), save_bclrpat);
return status;
}
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 037fc140b585..82be0fbe9934 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -28,6 +28,7 @@
#include "intel_pipe_crc.h"
#include "intel_psr.h"
#include "intel_sprite.h"
+#include "intel_vblank.h"
#include "intel_vrr.h"
#include "skl_universal_plane.h"
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index e3273fe8ddac..2422d6ef5777 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <drm/drm_edid.h>
+
#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
#include "intel_display_types.h"
@@ -56,6 +58,17 @@ intel_dump_dp_vsc_sdp(struct drm_i915_private *i915,
drm_dp_vsc_sdp_log(KERN_DEBUG, i915->drm.dev, vsc);
}
+static void
+intel_dump_buffer(struct drm_i915_private *i915,
+ const char *prefix, const u8 *buf, size_t len)
+{
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_NONE,
+ 16, 0, buf, len, false);
+}
+
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@@ -236,6 +249,10 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(DP_SDP_VSC))
intel_dump_dp_vsc_sdp(i915, &pipe_config->infoframes.vsc);
+ if (pipe_config->has_audio)
+ intel_dump_buffer(i915, "ELD: ", pipe_config->eld,
+ drm_eld_size(pipe_config->eld));
+
drm_dbg_kms(&i915->drm, "vrr: %s, vmin: %d, vmax: %d, pipeline full: %d, guardband: %d flipline: %d, vmin vblank: %d, vmax vblank: %d\n",
str_yes_no(pipe_config->vrr.enable),
pipe_config->vrr.vmin, pipe_config->vrr.vmax,
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 0f1ec2a98cc8..254559abedfb 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -185,6 +185,8 @@ void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
enum port port)
{
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+ int timeout_us;
int ret;
/* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
@@ -193,8 +195,19 @@ static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
return;
}
+ if (IS_DG2(dev_priv)) {
+ timeout_us = 1200;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ if (intel_phy_is_tc(dev_priv, phy))
+ timeout_us = 3000;
+ else
+ timeout_us = 1000;
+ } else {
+ timeout_us = 500;
+ }
+
ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
- DDI_BUF_IS_IDLE), IS_DG2(dev_priv) ? 1200 : 500, 10, 10);
+ DDI_BUF_IS_IDLE), timeout_us, 10, 10);
if (ret)
drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
@@ -2726,10 +2739,10 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_transcoder(old_crtc_state);
-
intel_vrr_disable(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
+
intel_ddi_disable_transcoder_func(old_crtc_state);
intel_dsc_disable(old_crtc_state);
@@ -2933,6 +2946,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
}
intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
+ intel_wait_ddi_buf_active(dev_priv, port);
+
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
@@ -2946,10 +2961,13 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
if (!intel_crtc_is_bigjoiner_slave(crtc_state))
intel_ddi_enable_transcoder_func(encoder, crtc_state);
- intel_vrr_enable(encoder, crtc_state);
+ /* Enable/Disable DP2.0 SDP split config before transcoder */
+ intel_audio_sdp_split_update(encoder, crtc_state);
intel_enable_transcoder(crtc_state);
+ intel_vrr_enable(encoder, crtc_state);
+
intel_crtc_vblank_on(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -3478,6 +3496,8 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
intel_read_dp_sdp(encoder, pipe_config, DP_SDP_VSC);
intel_psr_get_config(encoder, pipe_config);
+
+ intel_audio_codec_get_config(encoder, pipe_config);
}
void intel_ddi_get_clock(struct intel_encoder *encoder,
@@ -4305,7 +4325,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
}
if (intel_phy_is_snps(dev_priv, phy) &&
- dev_priv->snps_phy_failed_calibration & BIT(phy)) {
+ dev_priv->display.snps.phy_failed_calibration & BIT(phy)) {
drm_dbg_kms(&dev_priv->drm,
"SNPS PHY %c failed to calibrate, proceeding anyway\n",
phy_name(phy));
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index 9c104f65e4c8..42552d8c151e 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -16,6 +16,19 @@ intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
return intel_uncore_read(&i915->uncore, reg);
}
+static inline u8
+intel_de_read8(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ return intel_uncore_read8(&i915->uncore, reg);
+}
+
+static inline u64
+intel_de_read64_2x32(struct drm_i915_private *i915,
+ i915_reg_t lower_reg, i915_reg_t upper_reg)
+{
+ return intel_uncore_read64_2x32(&i915->uncore, lower_reg, upper_reg);
+}
+
static inline void
intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
{
@@ -28,10 +41,10 @@ intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
intel_uncore_write(&i915->uncore, reg, val);
}
-static inline void
+static inline u32
intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
{
- intel_uncore_rmw(&i915->uncore, reg, clear, set);
+ return intel_uncore_rmw(&i915->uncore, reg, clear, set);
}
static inline int
@@ -42,6 +55,23 @@ intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
}
static inline int
+intel_de_wait_for_register_fw(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
+{
+ return intel_wait_for_register_fw(&i915->uncore, reg, mask, value, timeout);
+}
+
+static inline int
+__intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms, u32 *out_value)
+{
+ return __intel_wait_for_register(&i915->uncore, reg, mask, value,
+ fast_timeout_us, slow_timeout_ms, out_value);
+}
+
+static inline int
intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
u32 mask, unsigned int timeout)
{
@@ -81,4 +111,16 @@ intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
intel_uncore_write_fw(&i915->uncore, reg, val);
}
+static inline u32
+intel_de_read_notrace(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ return intel_uncore_read_notrace(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_write_notrace(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_notrace(&i915->uncore, reg, val);
+}
+
#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 6c2686ecb62a..d3994e2a7d63 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -24,15 +24,15 @@
* Eric Anholt <eric@anholt.net>
*/
-#include <acpi/video.h>
+#include <linux/dma-resv.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/dma-resv.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/vga_switcheroo.h>
+#include <acpi/video.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/drm_atomic.h>
@@ -45,65 +45,57 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include "display/intel_audio.h"
-#include "display/intel_crt.h"
-#include "display/intel_ddi.h"
-#include "display/intel_display_debugfs.h"
-#include "display/intel_display_power.h"
-#include "display/intel_dp.h"
-#include "display/intel_dp_mst.h"
-#include "display/intel_dpll.h"
-#include "display/intel_dpll_mgr.h"
-#include "display/intel_drrs.h"
-#include "display/intel_dsi.h"
-#include "display/intel_dvo.h"
-#include "display/intel_fb.h"
-#include "display/intel_gmbus.h"
-#include "display/intel_hdmi.h"
-#include "display/intel_lvds.h"
-#include "display/intel_sdvo.h"
-#include "display/intel_snps_phy.h"
-#include "display/intel_tv.h"
-#include "display/intel_vdsc.h"
-#include "display/intel_vrr.h"
-
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
-#include "gt/gen8_ppgtt.h"
-
#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "hsw_ips.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
+#include "i9xx_plane.h"
#include "icl_dsi.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
+#include "intel_audio.h"
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_color.h"
+#include "intel_crt.h"
#include "intel_crtc.h"
#include "intel_crtc_state_dump.h"
+#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
+#include "intel_dp.h"
#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
+#include "intel_dpll_mgr.h"
#include "intel_dpt.h"
-#include "intel_dsb.h"
+#include "intel_drrs.h"
+#include "intel_dsi.h"
+#include "intel_dvo.h"
+#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_fbdev.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
+#include "intel_gmbus.h"
#include "intel_hdcp.h"
+#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_hti.h"
-#include "intel_modeset_verify.h"
+#include "intel_lvds.h"
#include "intel_modeset_setup.h"
+#include "intel_modeset_verify.h"
#include "intel_overlay.h"
#include "intel_panel.h"
#include "intel_pch_display.h"
@@ -115,10 +107,15 @@
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
+#include "intel_sdvo.h"
+#include "intel_snps_phy.h"
#include "intel_sprite.h"
#include "intel_tc.h"
+#include "intel_tv.h"
+#include "intel_vblank.h"
+#include "intel_vdsc.h"
#include "intel_vga.h"
-#include "i9xx_plane.h"
+#include "intel_vrr.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
@@ -388,41 +385,6 @@ struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
return to_intel_crtc(crtc_state->uapi.crtc);
}
-static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- i915_reg_t reg = PIPEDSL(pipe);
- u32 line1, line2;
-
- line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
- msleep(5);
- line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
-
- return line1 != line2;
-}
-
-static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- /* Wait for the display line to settle/start moving */
- if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
- drm_err(&dev_priv->drm,
- "pipe %c scanline %s wait timed out\n",
- pipe_name(pipe), str_on_off(state));
-}
-
-static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
-{
- wait_for_pipe_scanline_moving(crtc, false);
-}
-
-static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
-{
- wait_for_pipe_scanline_moving(crtc, true);
-}
-
static void
intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
{
@@ -1098,22 +1060,6 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
return encoder;
}
-static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- i915_reg_t dslreg = PIPEDSL(pipe);
- u32 temp;
-
- temp = intel_de_read(dev_priv, dslreg);
- udelay(500);
- if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
- if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
- drm_err(&dev_priv->drm,
- "mode set failed: pipe %c stuck\n",
- pipe_name(pipe));
- }
-}
-
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1246,7 +1192,6 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
intel_update_watermarks(dev_priv);
- hsw_ips_post_update(state, crtc);
intel_fbc_post_update(state, crtc);
if (needs_async_flip_vtd_wa(old_crtc_state) &&
@@ -1809,7 +1754,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_encoders_enable(state, crtc);
if (HAS_PCH_CPT(dev_priv))
- cpt_verify_modeset(dev_priv, pipe);
+ intel_wait_for_pipe_scanline_moving(crtc);
/*
* Must wait for vblank to avoid spurious PCH FIFO underruns.
@@ -1922,6 +1867,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
+ intel_dmc_enable_pipe(dev_priv, crtc->pipe);
+
if (!new_crtc_state->bigjoiner_pipes) {
intel_encoders_pre_pll_enable(state, crtc);
@@ -2057,6 +2004,7 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
{
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
/*
* FIXME collapse everything to one hook.
@@ -2066,6 +2014,8 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
intel_encoders_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
}
+
+ intel_dmc_disable_pipe(i915, crtc->pipe);
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
@@ -3296,7 +3246,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (DISPLAY_VER(dev_priv) >= 4) {
/* No way to read it out on pipes B and C */
if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
- tmp = dev_priv->chv_dpll_md[crtc->pipe];
+ tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe];
else
tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
pipe_config->pixel_multiplier =
@@ -5433,6 +5383,12 @@ intel_compare_dp_vsc_sdp(const struct drm_dp_vsc_sdp *a,
return memcmp(a, b, sizeof(*a)) == 0;
}
+static bool
+intel_compare_buffer(const u8 *a, const u8 *b, size_t len)
+{
+ return memcmp(a, b, len) == 0;
+}
+
static void
pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
bool fastset, const char *name,
@@ -5483,6 +5439,30 @@ pipe_config_dp_vsc_sdp_mismatch(struct drm_i915_private *dev_priv,
}
}
+static void
+pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
+ bool fastset, const char *name,
+ const u8 *a, const u8 *b, size_t len)
+{
+ if (fastset) {
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "fastset mismatch in %s buffer\n", name);
+ print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
+ 16, 0, a, len, false);
+ print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
+ 16, 0, b, len, false);
+ } else {
+ drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
+ print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
+ 16, 0, a, len, false);
+ print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
+ 16, 0, b, len, false);
+ }
+}
+
static void __printf(4, 5)
pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
const char *name, const char *format, ...)
@@ -5531,7 +5511,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
bool ret = true;
- u32 bp_gamma = 0;
bool fixup_inherited = fastset &&
current_config->inherited && !pipe_config->inherited;
@@ -5682,21 +5661,26 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
-#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \
- if (current_config->name1 != pipe_config->name1) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name1), \
- "(expected %i, found %i, won't compare lut values)", \
- current_config->name1, \
- pipe_config->name1); \
- ret = false;\
- } else { \
- if (!intel_color_lut_equal(current_config->name2, \
- pipe_config->name2, pipe_config->name1, \
- bit_precision)) { \
- pipe_config_mismatch(fastset, crtc, __stringify(name2), \
- "hw_state doesn't match sw_state"); \
- ret = false; \
- } \
+#define PIPE_CONF_CHECK_BUFFER(name, len) do { \
+ BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
+ BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
+ if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
+ pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \
+ current_config->name, \
+ pipe_config->name, \
+ (len)); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_CHECK_COLOR_LUT(lut, is_pre_csc_lut) do { \
+ if (current_config->gamma_mode == pipe_config->gamma_mode && \
+ !intel_color_lut_equal(current_config, \
+ current_config->lut, pipe_config->lut, \
+ is_pre_csc_lut)) { \
+ pipe_config_mismatch(fastset, crtc, __stringify(lut), \
+ "hw_state doesn't match sw_state"); \
+ ret = false; \
} \
} while (0)
@@ -5760,6 +5744,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(fec_enable);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
+ PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
@@ -5793,9 +5778,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(linetime);
PIPE_CONF_CHECK_I(ips_linetime);
- bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
- if (bp_gamma)
- PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, post_csc_lut, bp_gamma);
+ PIPE_CONF_CHECK_COLOR_LUT(pre_csc_lut, true);
+ PIPE_CONF_CHECK_COLOR_LUT(post_csc_lut, false);
if (current_config->active_planes) {
PIPE_CONF_CHECK_BOOL(has_psr);
@@ -5950,6 +5934,10 @@ int intel_modeset_all_pipes(struct intel_atomic_state *state,
if (ret)
return ret;
+ ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
+ if (ret)
+ return ret;
+
ret = intel_atomic_add_affected_planes(state, crtc);
if (ret)
return ret;
@@ -6941,7 +6929,7 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
if (intel_crtc_needs_color_update(crtc_state))
- intel_dsb_prepare(crtc_state);
+ intel_color_prepare_commit(crtc_state);
}
return 0;
@@ -7392,24 +7380,18 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat
&wait_reset);
}
-static void intel_cleanup_dsbs(struct intel_atomic_state *state)
-{
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- struct intel_crtc *crtc;
- int i;
-
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i)
- intel_dsb_cleanup(old_crtc_state);
-}
-
static void intel_atomic_cleanup_work(struct work_struct *work)
{
struct intel_atomic_state *state =
container_of(work, struct intel_atomic_state, base.commit_work);
struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ for_each_old_intel_crtc_in_state(state, crtc, old_crtc_state, i)
+ intel_color_cleanup_commit(old_crtc_state);
- intel_cleanup_dsbs(state);
drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
drm_atomic_helper_commit_cleanup_done(&state->base);
drm_atomic_state_put(&state->base);
@@ -7587,6 +7569,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
+ /* Must be done after gamma readout due to HSW split gamma vs. IPS w/a */
+ hsw_ips_post_update(state, crtc);
+
/*
* Activate DRRS after state readout to avoid
* dp_m_n vs. dp_m2_n2 confusion on BDW+.
@@ -7597,6 +7582,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
* DSB cleanup is done in cleanup_work aligning with framebuffer
* cleanup. So copy and reset the dsb structure to sync with
* commit_done and later do dsb cleanup in cleanup_work.
+ *
+ * FIXME get rid of this funny new->old swapping
*/
old_crtc_state->dsb = fetch_and_zero(&new_crtc_state->dsb);
}
@@ -7747,7 +7734,7 @@ static int intel_atomic_commit(struct drm_device *dev,
i915_sw_fence_commit(&state->commit_ready);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
- intel_dsb_cleanup(new_crtc_state);
+ intel_color_cleanup_commit(new_crtc_state);
drm_atomic_helper_cleanup_planes(dev, &state->base);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 714030136b7f..cb6f520cc575 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -28,6 +28,7 @@
#include <drm/drm_util.h>
#include "i915_reg_defs.h"
+#include "intel_display_limits.h"
enum drm_scaling_filter;
struct dpll;
@@ -62,51 +63,9 @@ struct intel_remapped_info;
struct intel_rotation_info;
struct pci_dev;
-/*
- * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
- * rest have consecutive values and match the enum values of transcoders
- * with a 1:1 transcoder -> pipe mapping.
- */
-enum pipe {
- INVALID_PIPE = -1,
-
- PIPE_A = 0,
- PIPE_B,
- PIPE_C,
- PIPE_D,
- _PIPE_EDP,
-
- I915_MAX_PIPES = _PIPE_EDP
-};
#define pipe_name(p) ((p) + 'A')
-enum transcoder {
- INVALID_TRANSCODER = -1,
- /*
- * The following transcoders have a 1:1 transcoder -> pipe mapping,
- * keep their values fixed: the code assumes that TRANSCODER_A=0, the
- * rest have consecutive values and match the enum values of the pipes
- * they map to.
- */
- TRANSCODER_A = PIPE_A,
- TRANSCODER_B = PIPE_B,
- TRANSCODER_C = PIPE_C,
- TRANSCODER_D = PIPE_D,
-
- /*
- * The following transcoders can map to any pipe, their enum value
- * doesn't need to stay fixed.
- */
- TRANSCODER_EDP,
- TRANSCODER_DSI_0,
- TRANSCODER_DSI_1,
- TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
- TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
-
- I915_MAX_TRANSCODERS
-};
-
static inline const char *transcoder_name(enum transcoder transcoder)
{
switch (transcoder) {
@@ -147,29 +106,6 @@ enum i9xx_plane_id {
#define plane_name(p) ((p) + 'A')
#define sprite_name(p, s) ((p) * RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
-/*
- * Per-pipe plane identifier.
- * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
- * number of planes per CRTC. Not all platforms really have this many planes,
- * which means some arrays of size I915_MAX_PLANES may have unused entries
- * between the topmost sprite plane and the cursor plane.
- *
- * This is expected to be passed to various register macros
- * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
- */
-enum plane_id {
- PLANE_PRIMARY,
- PLANE_SPRITE0,
- PLANE_SPRITE1,
- PLANE_SPRITE2,
- PLANE_SPRITE3,
- PLANE_SPRITE4,
- PLANE_SPRITE5,
- PLANE_CURSOR,
-
- I915_MAX_PLANES,
-};
-
#define for_each_plane_id_on_crtc(__crtc, __p) \
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
@@ -182,34 +118,6 @@ enum plane_id {
for_each_dbuf_slice((__dev_priv), (__slice)) \
for_each_if((__mask) & BIT(__slice))
-enum port {
- PORT_NONE = -1,
-
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- PORT_F,
- PORT_G,
- PORT_H,
- PORT_I,
-
- /* tgl+ */
- PORT_TC1 = PORT_D,
- PORT_TC2,
- PORT_TC3,
- PORT_TC4,
- PORT_TC5,
- PORT_TC6,
-
- /* XE_LPD repositions D/E offsets and bitfields */
- PORT_D_XELPD = PORT_TC5,
- PORT_E_XELPD,
-
- I915_MAX_PORTS
-};
-
#define port_name(p) ((p) + 'A')
/*
@@ -312,27 +220,6 @@ enum phy_fia {
FIA3,
};
-enum hpd_pin {
- HPD_NONE = 0,
- HPD_TV = HPD_NONE, /* TV is known to be unreliable */
- HPD_CRT,
- HPD_SDVO_B,
- HPD_SDVO_C,
- HPD_PORT_A,
- HPD_PORT_B,
- HPD_PORT_C,
- HPD_PORT_D,
- HPD_PORT_E,
- HPD_PORT_TC1,
- HPD_PORT_TC2,
- HPD_PORT_TC3,
- HPD_PORT_TC4,
- HPD_PORT_TC5,
- HPD_PORT_TC6,
-
- HPD_NUM_PINS
-};
-
#define for_each_hpd_pin(__pin) \
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
@@ -440,6 +327,14 @@ enum hpd_pin {
(__i)++) \
for_each_if(plane)
+#define for_each_old_intel_crtc_in_state(__state, crtc, old_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_crtc && \
+ ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
+ (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), 1); \
+ (__i)++) \
+ for_each_if(crtc)
+
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index 57ddce3ba02b..fb8670aa2932 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -17,7 +17,7 @@
#include <drm/drm_modeset_lock.h>
#include "intel_cdclk.h"
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dmc.h"
#include "intel_dpll_mgr.h"
@@ -87,6 +87,11 @@ struct intel_wm_funcs {
int (*compute_global_watermarks)(struct intel_atomic_state *state);
};
+struct intel_audio_state {
+ struct intel_encoder *encoder;
+ u8 eld[MAX_ELD_BYTES];
+};
+
struct intel_audio {
/* hda/i915 audio component */
struct i915_audio_component *component;
@@ -96,8 +101,8 @@ struct intel_audio {
int power_refcount;
u32 freq_cntrl;
- /* Used to save the pipe-to-encoder mapping for audio */
- struct intel_encoder *encoder_map[I915_MAX_PIPES];
+ /* current audio state for the audio component hooks */
+ struct intel_audio_state state[I915_MAX_PIPES];
/* necessary resource sharing with HDMI LPE audio driver. */
struct {
@@ -122,6 +127,11 @@ struct intel_dpll {
int nssc;
int ssc;
} ref_clks;
+
+ /*
+ * Bitmask of PLLs using the PCH SSC, indexed using enum intel_dpll_id.
+ */
+ u8 pch_ssc_use;
};
struct intel_frontbuffer_tracking {
@@ -429,6 +439,24 @@ struct intel_display {
} sagv;
struct {
+ /*
+ * DG2: Mask of PHYs that were not calibrated by the firmware
+ * and should not be used.
+ */
+ u8 phy_failed_calibration;
+ } snps;
+
+ struct {
+ /*
+ * Shadows for CHV DPLL_MD regs to keep the state
+ * checker somewhat working in the presence hardware
+ * crappiness (can't read out DPLL_MD for pipes B & C).
+ */
+ u32 chv_dpll_md[I915_MAX_PIPES];
+ u32 bxt_phy_grc;
+ } state;
+
+ struct {
/* ordered wq for modesets */
struct workqueue_struct *modeset;
diff --git a/drivers/gpu/drm/i915/display/intel_display_limits.h b/drivers/gpu/drm/i915/display/intel_display_limits.h
new file mode 100644
index 000000000000..5126d0b5ae5d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_limits.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_LIMITS_H__
+#define __INTEL_DISPLAY_LIMITS_H__
+
+/*
+ * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the
+ * rest have consecutive values and match the enum values of transcoders
+ * with a 1:1 transcoder -> pipe mapping.
+ */
+enum pipe {
+ INVALID_PIPE = -1,
+
+ PIPE_A = 0,
+ PIPE_B,
+ PIPE_C,
+ PIPE_D,
+ _PIPE_EDP,
+
+ I915_MAX_PIPES = _PIPE_EDP
+};
+
+enum transcoder {
+ INVALID_TRANSCODER = -1,
+ /*
+ * The following transcoders have a 1:1 transcoder -> pipe mapping,
+ * keep their values fixed: the code assumes that TRANSCODER_A=0, the
+ * rest have consecutive values and match the enum values of the pipes
+ * they map to.
+ */
+ TRANSCODER_A = PIPE_A,
+ TRANSCODER_B = PIPE_B,
+ TRANSCODER_C = PIPE_C,
+ TRANSCODER_D = PIPE_D,
+
+ /*
+ * The following transcoders can map to any pipe, their enum value
+ * doesn't need to stay fixed.
+ */
+ TRANSCODER_EDP,
+ TRANSCODER_DSI_0,
+ TRANSCODER_DSI_1,
+ TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */
+ TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */
+
+ I915_MAX_TRANSCODERS
+};
+
+/*
+ * Per-pipe plane identifier.
+ * I915_MAX_PLANES in the enum below is the maximum (across all platforms)
+ * number of planes per CRTC. Not all platforms really have this many planes,
+ * which means some arrays of size I915_MAX_PLANES may have unused entries
+ * between the topmost sprite plane and the cursor plane.
+ *
+ * This is expected to be passed to various register macros
+ * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
+ */
+enum plane_id {
+ PLANE_PRIMARY,
+ PLANE_SPRITE0,
+ PLANE_SPRITE1,
+ PLANE_SPRITE2,
+ PLANE_SPRITE3,
+ PLANE_SPRITE4,
+ PLANE_SPRITE5,
+ PLANE_CURSOR,
+
+ I915_MAX_PLANES,
+};
+
+enum port {
+ PORT_NONE = -1,
+
+ PORT_A = 0,
+ PORT_B,
+ PORT_C,
+ PORT_D,
+ PORT_E,
+ PORT_F,
+ PORT_G,
+ PORT_H,
+ PORT_I,
+
+ /* tgl+ */
+ PORT_TC1 = PORT_D,
+ PORT_TC2,
+ PORT_TC3,
+ PORT_TC4,
+ PORT_TC5,
+ PORT_TC6,
+
+ /* XE_LPD repositions D/E offsets and bitfields */
+ PORT_D_XELPD = PORT_TC5,
+ PORT_E_XELPD,
+
+ I915_MAX_PORTS
+};
+
+enum hpd_pin {
+ HPD_NONE = 0,
+ HPD_TV = HPD_NONE, /* TV is known to be unreliable */
+ HPD_CRT,
+ HPD_SDVO_B,
+ HPD_SDVO_C,
+ HPD_PORT_A,
+ HPD_PORT_B,
+ HPD_PORT_C,
+ HPD_PORT_D,
+ HPD_PORT_E,
+ HPD_PORT_TC1,
+ HPD_PORT_TC2,
+ HPD_PORT_TC3,
+ HPD_PORT_TC4,
+ HPD_PORT_TC5,
+ HPD_PORT_TC6,
+
+ HPD_NUM_PINS
+};
+
+#endif /* __INTEL_DISPLAY_LIMITS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 3adba64937de..1a23ecd4623a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -1673,7 +1673,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
if (DISPLAY_VER(dev_priv) >= 12) {
val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
- intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
+ intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0, val);
}
/* Wa_14011503030:xelpd */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index f5d66ca85b19..6645eb1911d8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -10,6 +10,7 @@
#include "intel_display_power_map.h"
#include "intel_display_power_well.h"
+#include "intel_display_types.h"
#define __LIST_INLINE_ELEMS(__elem_type, ...) \
((__elem_type[]) { __VA_ARGS__ })
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 725aba3fa531..651ea8564e1b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -17,6 +17,7 @@
#include "i915_irq.h"
#include "intel_crtc.h"
#include "intel_display_types.h"
+#include "intel_vblank.h"
#define __dev_name_i915(i915) dev_name((i915)->drm.dev)
#define __dev_name_kms(obj) dev_name((obj)->base.dev->dev)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index f07395065a69..54c517ca9632 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -50,6 +50,7 @@
#include "i915_vma_types.h"
#include "intel_bios.h"
#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
#include "intel_pm_types.h"
@@ -262,8 +263,6 @@ struct intel_encoder {
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
- /* for communication with audio component; protected by av_mutex */
- const struct drm_connector *audio_connector;
/* VBT information for this encoder (may be NULL for older platforms) */
const struct intel_bios_encoder_data *devdata;
@@ -291,7 +290,7 @@ struct intel_vbt_panel_data {
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits */
- unsigned int panel_type:4;
+ int panel_type;
unsigned int lvds_dither:1;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
@@ -330,7 +329,7 @@ struct intel_vbt_panel_data {
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
- u8 controller; /* brightness controller number */
+ s8 controller; /* brightness controller number */
enum intel_backlight_type type;
} backlight;
@@ -351,6 +350,9 @@ struct intel_vbt_panel_data {
};
struct intel_panel {
+ /* Fixed EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
+ const struct drm_edid *fixed_edid;
+
struct list_head fixed_modes;
/* backlight */
@@ -591,9 +593,8 @@ struct intel_connector {
/* Panel info for eDP and LVDS */
struct intel_panel panel;
- /* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
- struct edid *edid;
- struct edid *detect_edid;
+ /* Cached EDID for detect. */
+ const struct drm_edid *detect_edid;
/* Number of times hotplug detection was tried after an HPD interrupt */
int hotplug_retries;
@@ -1261,6 +1262,8 @@ struct intel_crtc_state {
struct drm_dp_vsc_sdp vsc;
} infoframes;
+ u8 eld[MAX_ELD_BYTES];
+
/* HDMI scrambling status */
bool hdmi_scrambling;
@@ -1295,6 +1298,8 @@ struct intel_crtc_state {
/* Forward Error correction State */
bool fec_enable;
+ bool sdp_split_enable;
+
/* Pointer to master transcoder in case of tiled displays */
enum transcoder master_transcoder;
@@ -1568,11 +1573,19 @@ struct intel_pps {
ktime_t panel_power_off_time;
intel_wakeref_t vdd_wakeref;
- /*
- * Pipe whose power sequencer is currently locked into
- * this port. Only relevant on VLV/CHV.
- */
- enum pipe pps_pipe;
+ union {
+ /*
+ * Pipe whose power sequencer is currently locked into
+ * this port. Only relevant on VLV/CHV.
+ */
+ enum pipe pps_pipe;
+
+ /*
+ * Power sequencer index. Only relevant on BXT+.
+ */
+ int pps_idx;
+ };
+
/*
* Pipe currently driving the port. Used for preventing
* the use of the PPS for any pipe currentrly driving
@@ -1581,7 +1594,7 @@ struct intel_pps {
enum pipe active_pipe;
/*
* Set if the sequencer may be reset due to a power transition,
- * requiring a reinitialization. Only relevant on BXT.
+ * requiring a reinitialization. Only relevant on BXT+.
*/
bool pps_reset;
struct edp_power_seq pps_delays;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index eff3add70611..257aa2b7cf20 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -42,62 +42,61 @@
#define DMC_VERSION_MAJOR(version) ((version) >> 16)
#define DMC_VERSION_MINOR(version) ((version) & 0xffff)
-#define DMC_PATH(platform, major, minor) \
- "i915/" \
- __stringify(platform) "_dmc_ver" \
- __stringify(major) "_" \
+#define DMC_PATH(platform) \
+ "i915/" __stringify(platform) "_dmc.bin"
+
+/*
+ * New DMC additions should not use this. This is used solely to remain
+ * compatible with systems that have not yet updated DMC blobs to use
+ * unversioned file names.
+ */
+#define DMC_LEGACY_PATH(platform, major, minor) \
+ "i915/" \
+ __stringify(platform) "_dmc_ver" \
+ __stringify(major) "_" \
__stringify(minor) ".bin"
#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
-#define DG2_DMC_PATH DMC_PATH(dg2, 2, 08)
-#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 8)
+#define DG2_DMC_PATH DMC_LEGACY_PATH(dg2, 2, 08)
MODULE_FIRMWARE(DG2_DMC_PATH);
-#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
-#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 16)
+#define ADLP_DMC_PATH DMC_PATH(adlp)
+#define ADLP_DMC_FALLBACK_PATH DMC_LEGACY_PATH(adlp, 2, 16)
MODULE_FIRMWARE(ADLP_DMC_PATH);
+MODULE_FIRMWARE(ADLP_DMC_FALLBACK_PATH);
-#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
-#define ADLS_DMC_VERSION_REQUIRED DMC_VERSION(2, 1)
+#define ADLS_DMC_PATH DMC_LEGACY_PATH(adls, 2, 01)
MODULE_FIRMWARE(ADLS_DMC_PATH);
-#define DG1_DMC_PATH DMC_PATH(dg1, 2, 02)
-#define DG1_DMC_VERSION_REQUIRED DMC_VERSION(2, 2)
+#define DG1_DMC_PATH DMC_LEGACY_PATH(dg1, 2, 02)
MODULE_FIRMWARE(DG1_DMC_PATH);
-#define RKL_DMC_PATH DMC_PATH(rkl, 2, 03)
-#define RKL_DMC_VERSION_REQUIRED DMC_VERSION(2, 3)
+#define RKL_DMC_PATH DMC_LEGACY_PATH(rkl, 2, 03)
MODULE_FIRMWARE(RKL_DMC_PATH);
-#define TGL_DMC_PATH DMC_PATH(tgl, 2, 12)
-#define TGL_DMC_VERSION_REQUIRED DMC_VERSION(2, 12)
+#define TGL_DMC_PATH DMC_LEGACY_PATH(tgl, 2, 12)
MODULE_FIRMWARE(TGL_DMC_PATH);
-#define ICL_DMC_PATH DMC_PATH(icl, 1, 09)
-#define ICL_DMC_VERSION_REQUIRED DMC_VERSION(1, 9)
+#define ICL_DMC_PATH DMC_LEGACY_PATH(icl, 1, 09)
#define ICL_DMC_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(ICL_DMC_PATH);
-#define GLK_DMC_PATH DMC_PATH(glk, 1, 04)
-#define GLK_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
+#define GLK_DMC_PATH DMC_LEGACY_PATH(glk, 1, 04)
#define GLK_DMC_MAX_FW_SIZE 0x4000
MODULE_FIRMWARE(GLK_DMC_PATH);
-#define KBL_DMC_PATH DMC_PATH(kbl, 1, 04)
-#define KBL_DMC_VERSION_REQUIRED DMC_VERSION(1, 4)
+#define KBL_DMC_PATH DMC_LEGACY_PATH(kbl, 1, 04)
#define KBL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
MODULE_FIRMWARE(KBL_DMC_PATH);
-#define SKL_DMC_PATH DMC_PATH(skl, 1, 27)
-#define SKL_DMC_VERSION_REQUIRED DMC_VERSION(1, 27)
+#define SKL_DMC_PATH DMC_LEGACY_PATH(skl, 1, 27)
#define SKL_DMC_MAX_FW_SIZE BXT_DMC_MAX_FW_SIZE
MODULE_FIRMWARE(SKL_DMC_PATH);
-#define BXT_DMC_PATH DMC_PATH(bxt, 1, 07)
-#define BXT_DMC_VERSION_REQUIRED DMC_VERSION(1, 7)
+#define BXT_DMC_PATH DMC_LEGACY_PATH(bxt, 1, 07)
#define BXT_DMC_MAX_FW_SIZE 0x3000
MODULE_FIRMWARE(BXT_DMC_PATH);
@@ -108,6 +107,8 @@ MODULE_FIRMWARE(BXT_DMC_PATH);
#define DMC_V3_MAX_MMIO_COUNT 20
#define DMC_V1_MMIO_START_RANGE 0x80000
+#define PIPE_TO_DMC_ID(pipe) (DMC_FW_PIPEA + ((pipe) - PIPE_A))
+
struct intel_css_header {
/* 0x09 for DMC */
u32 module_type;
@@ -387,11 +388,11 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
{
enum pipe pipe;
- if (DISPLAY_VER(i915) != 13)
+ if (DISPLAY_VER(i915) < 13)
return;
/*
- * Wa_16015201720:adl-p,dg2
+ * Wa_16015201720:adl-p,dg2, mtl
* The WA requires clock gating to be disabled all the time
* for pipe A and B.
* For pipe C and D clock gating needs to be disabled only
@@ -407,6 +408,28 @@ static void pipedmc_clock_gating_wa(struct drm_i915_private *i915, bool enable)
PIPEDMC_GATING_DIS, 0);
}
+void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe)
+{
+ if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe)))
+ return;
+
+ if (DISPLAY_VER(i915) >= 14)
+ intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
+ else
+ intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
+}
+
+void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
+{
+ if (!has_dmc_id_fw(i915, PIPE_TO_DMC_ID(pipe)))
+ return;
+
+ if (DISPLAY_VER(i915) >= 14)
+ intel_de_rmw(i915, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
+ else
+ intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
+}
+
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @dev_priv: i915 drm device.
@@ -433,9 +456,9 @@ void intel_dmc_load_program(struct drm_i915_private *dev_priv)
for (id = 0; id < DMC_FW_MAX; id++) {
for (i = 0; i < dmc->dmc_info[id].dmc_fw_size; i++) {
- intel_uncore_write_fw(&dev_priv->uncore,
- DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i),
- dmc->dmc_info[id].payload[i]);
+ intel_de_write_fw(dev_priv,
+ DMC_PROGRAM(dmc->dmc_info[id].start_mmioaddr, i),
+ dmc->dmc_info[id].payload[i]);
}
}
@@ -765,17 +788,6 @@ static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
return 0;
}
- if (dmc->required_version &&
- css_header->version != dmc->required_version) {
- drm_info(&i915->drm, "Refusing to load DMC firmware v%u.%u,"
- " please use v%u.%u\n",
- DMC_VERSION_MAJOR(css_header->version),
- DMC_VERSION_MINOR(css_header->version),
- DMC_VERSION_MAJOR(dmc->required_version),
- DMC_VERSION_MINOR(dmc->required_version));
- return 0;
- }
-
dmc->version = css_header->version;
return sizeof(struct intel_css_header);
@@ -843,16 +855,40 @@ static void intel_dmc_runtime_pm_put(struct drm_i915_private *dev_priv)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
}
+static const char *dmc_fallback_path(struct drm_i915_private *i915)
+{
+ if (IS_ALDERLAKE_P(i915))
+ return ADLP_DMC_FALLBACK_PATH;
+
+ return NULL;
+}
+
static void dmc_load_work_fn(struct work_struct *work)
{
struct drm_i915_private *dev_priv;
struct intel_dmc *dmc;
const struct firmware *fw = NULL;
+ const char *fallback_path;
+ int err;
dev_priv = container_of(work, typeof(*dev_priv), display.dmc.work);
dmc = &dev_priv->display.dmc;
- request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev);
+ err = request_firmware(&fw, dev_priv->display.dmc.fw_path, dev_priv->drm.dev);
+
+ if (err == -ENOENT && !dev_priv->params.dmc_firmware_path) {
+ fallback_path = dmc_fallback_path(dev_priv);
+ if (fallback_path) {
+ drm_dbg_kms(&dev_priv->drm,
+ "%s not found, falling back to %s\n",
+ dmc->fw_path,
+ fallback_path);
+ err = request_firmware(&fw, fallback_path, dev_priv->drm.dev);
+ if (err == 0)
+ dev_priv->display.dmc.fw_path = fallback_path;
+ }
+ }
+
parse_dmc_fw(dev_priv, fw);
if (intel_dmc_has_payload(dev_priv)) {
@@ -903,49 +939,38 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
if (IS_DG2(dev_priv)) {
dmc->fw_path = DG2_DMC_PATH;
- dmc->required_version = DG2_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
} else if (IS_ALDERLAKE_P(dev_priv)) {
dmc->fw_path = ADLP_DMC_PATH;
- dmc->required_version = ADLP_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
} else if (IS_ALDERLAKE_S(dev_priv)) {
dmc->fw_path = ADLS_DMC_PATH;
- dmc->required_version = ADLS_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
} else if (IS_DG1(dev_priv)) {
dmc->fw_path = DG1_DMC_PATH;
- dmc->required_version = DG1_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
} else if (IS_ROCKETLAKE(dev_priv)) {
dmc->fw_path = RKL_DMC_PATH;
- dmc->required_version = RKL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
} else if (IS_TIGERLAKE(dev_priv)) {
dmc->fw_path = TGL_DMC_PATH;
- dmc->required_version = TGL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
} else if (DISPLAY_VER(dev_priv) == 11) {
dmc->fw_path = ICL_DMC_PATH;
- dmc->required_version = ICL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
} else if (IS_GEMINILAKE(dev_priv)) {
dmc->fw_path = GLK_DMC_PATH;
- dmc->required_version = GLK_DMC_VERSION_REQUIRED;
dmc->max_fw_size = GLK_DMC_MAX_FW_SIZE;
} else if (IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv) ||
IS_COMETLAKE(dev_priv)) {
dmc->fw_path = KBL_DMC_PATH;
- dmc->required_version = KBL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = KBL_DMC_MAX_FW_SIZE;
} else if (IS_SKYLAKE(dev_priv)) {
dmc->fw_path = SKL_DMC_PATH;
- dmc->required_version = SKL_DMC_VERSION_REQUIRED;
dmc->max_fw_size = SKL_DMC_MAX_FW_SIZE;
} else if (IS_BROXTON(dev_priv)) {
dmc->fw_path = BXT_DMC_PATH;
- dmc->required_version = BXT_DMC_VERSION_REQUIRED;
dmc->max_fw_size = BXT_DMC_MAX_FW_SIZE;
}
@@ -958,8 +983,6 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
}
dmc->fw_path = dev_priv->params.dmc_firmware_path;
- /* Bypass version check for firmware override. */
- dmc->required_version = 0;
}
if (!dmc->fw_path) {
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h
index 67e03315ef99..fd1725de4289 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc.h
@@ -13,6 +13,8 @@
struct drm_i915_error_state_buf;
struct drm_i915_private;
+enum pipe;
+
enum {
DMC_FW_MAIN = 0,
DMC_FW_PIPEA,
@@ -25,7 +27,6 @@ enum {
struct intel_dmc {
struct work_struct work;
const char *fw_path;
- u32 required_version;
u32 max_fw_size; /* bytes */
u32 version;
struct dmc_fw_info {
@@ -48,6 +49,8 @@ struct intel_dmc {
void intel_dmc_ucode_init(struct drm_i915_private *i915);
void intel_dmc_load_program(struct drm_i915_private *i915);
void intel_dmc_disable_program(struct drm_i915_private *i915);
+void intel_dmc_enable_pipe(struct drm_i915_private *i915, enum pipe pipe);
+void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe);
void intel_dmc_ucode_fini(struct drm_i915_private *i915);
void intel_dmc_ucode_suspend(struct drm_i915_private *i915);
void intel_dmc_ucode_resume(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index 5e5e41644ddf..cf10094acae3 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -11,6 +11,16 @@
#define DMC_PROGRAM(addr, i) _MMIO((addr) + (i) * 4)
#define DMC_SSP_BASE_ADDR_GEN9 0x00002FC0
+#define _PIPEDMC_CONTROL_A 0x45250
+#define _PIPEDMC_CONTROL_B 0x45254
+#define PIPEDMC_CONTROL(pipe) _MMIO_PIPE(pipe, \
+ _PIPEDMC_CONTROL_A, \
+ _PIPEDMC_CONTROL_B)
+#define PIPEDMC_ENABLE REG_BIT(0)
+
+#define MTL_PIPEDMC_CONTROL _MMIO(0x45250)
+#define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4)
+
#define _ADLP_PIPEDMC_REG_MMIO_BASE_A 0x5f000
#define _TGL_PIPEDMC_REG_MMIO_BASE_A 0x92000
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 75070eb07d4b..62cbab7402e9 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -117,7 +117,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
}
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
-static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc);
/* Is link rate UHBR and thus 128b/132b? */
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
@@ -673,23 +672,59 @@ small_joiner_ram_size_bits(struct drm_i915_private *i915)
return 6144 * 8;
}
-static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
- u32 link_clock, u32 lane_count,
- u32 mode_clock, u32 mode_hdisplay,
- bool bigjoiner,
- u32 pipe_bpp)
+u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp)
{
- u32 bits_per_pixel, max_bpp_small_joiner_ram;
+ u32 bits_per_pixel = bpp;
int i;
+ /* Error out if the max bpp is less than smallest allowed valid bpp */
+ if (bits_per_pixel < valid_dsc_bpp[0]) {
+ drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
+ bits_per_pixel, valid_dsc_bpp[0]);
+ return 0;
+ }
+
+ /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
+ if (DISPLAY_VER(i915) >= 13) {
+ bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
+ } else {
+ /* Find the nearest match in the array of known BPPs from VESA */
+ for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+ if (bits_per_pixel < valid_dsc_bpp[i + 1])
+ break;
+ }
+ drm_dbg_kms(&i915->drm, "Set dsc bpp from %d to VESA %d\n",
+ bits_per_pixel, valid_dsc_bpp[i]);
+
+ bits_per_pixel = valid_dsc_bpp[i];
+ }
+
+ return bits_per_pixel;
+}
+
+u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
+ u32 link_clock, u32 lane_count,
+ u32 mode_clock, u32 mode_hdisplay,
+ bool bigjoiner,
+ u32 pipe_bpp,
+ u32 timeslots)
+{
+ u32 bits_per_pixel, max_bpp_small_joiner_ram;
+
/*
* Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
- * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
- * for SST -> TimeSlotsPerMTP is 1,
- * for MST -> TimeSlotsPerMTP has to be calculated
+ * (LinkSymbolClock)* 8 * (TimeSlots / 64)
+ * for SST -> TimeSlots is 64(i.e all TimeSlots that are available)
+ * for MST -> TimeSlots has to be calculated, based on mode requirements
*/
- bits_per_pixel = (link_clock * lane_count * 8) /
- intel_dp_mode_to_fec_clock(mode_clock);
+ bits_per_pixel = DIV_ROUND_UP((link_clock * lane_count) * timeslots,
+ intel_dp_mode_to_fec_clock(mode_clock) * 8);
+
+ drm_dbg_kms(&i915->drm, "Max link bpp is %u for %u timeslots "
+ "total bw %u pixel clock %u\n",
+ bits_per_pixel, timeslots,
+ (link_clock * lane_count * 8),
+ intel_dp_mode_to_fec_clock(mode_clock));
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
@@ -712,24 +747,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
}
- /* Error out if the max bpp is less than smallest allowed valid bpp */
- if (bits_per_pixel < valid_dsc_bpp[0]) {
- drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
- bits_per_pixel, valid_dsc_bpp[0]);
- return 0;
- }
-
- /* From XE_LPD onwards we support from bpc upto uncompressed bpp-1 BPPs */
- if (DISPLAY_VER(i915) >= 13) {
- bits_per_pixel = min(bits_per_pixel, pipe_bpp - 1);
- } else {
- /* Find the nearest match in the array of known BPPs from VESA */
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
- if (bits_per_pixel < valid_dsc_bpp[i + 1])
- break;
- }
- bits_per_pixel = valid_dsc_bpp[i];
- }
+ bits_per_pixel = intel_dp_dsc_nearest_valid_bpp(i915, bits_per_pixel, pipe_bpp);
/*
* Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
@@ -738,9 +756,9 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
return bits_per_pixel << 4;
}
-static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
- int mode_clock, int mode_hdisplay,
- bool bigjoiner)
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ int mode_clock, int mode_hdisplay,
+ bool bigjoiner)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 min_slice_count, i;
@@ -947,8 +965,8 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
return MODE_OK;
}
-static bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
- int hdisplay, int clock)
+bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ int hdisplay, int clock)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -974,9 +992,6 @@ intel_dp_mode_valid(struct drm_connector *_connector,
enum drm_mode_status status;
bool dsc = false, bigjoiner = false;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
return MODE_H_ILLEGAL;
@@ -1013,7 +1028,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
* Output bpp is stored in 6.4 format so right shift by 4 to get the
* integer value since we support only integer values of bpp.
*/
- if (DISPLAY_VER(dev_priv) >= 10 &&
+ if (HAS_DSC(dev_priv) &&
drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
/*
* TBD pass the connector BPC,
@@ -1035,7 +1050,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
target_clock,
mode->hdisplay,
bigjoiner,
- pipe_bpp) >> 4;
+ pipe_bpp, 64) >> 4;
dsc_slice_count =
intel_dp_dsc_get_slice_count(intel_dp,
target_clock,
@@ -1364,7 +1379,7 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
return -EINVAL;
}
-static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
+int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 max_req_bpc)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int i, num_bpc;
@@ -1465,10 +1480,12 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
return drm_dsc_compute_rc_parameters(vdsc_cfg);
}
-static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits,
+ int timeslots,
+ bool compute_pipe_bpp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
@@ -1483,7 +1500,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (!intel_dp_supports_dsc(intel_dp, pipe_config))
return -EINVAL;
- pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
+ if (compute_pipe_bpp)
+ pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, conn_state->max_requested_bpc);
+ else
+ pipe_bpp = pipe_config->pipe_bpp;
if (intel_dp->force_dsc_bpc) {
pipe_bpp = intel_dp->force_dsc_bpc * 3;
@@ -1514,33 +1534,52 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
true);
} else {
- u16 dsc_max_output_bpp;
+ u16 dsc_max_output_bpp = 0;
u8 dsc_dp_slice_count;
- dsc_max_output_bpp =
- intel_dp_dsc_get_output_bpp(dev_priv,
- pipe_config->port_clock,
- pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- adjusted_mode->crtc_hdisplay,
- pipe_config->bigjoiner_pipes,
- pipe_bpp);
+ if (compute_pipe_bpp) {
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ pipe_config->port_clock,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay,
+ pipe_config->bigjoiner_pipes,
+ pipe_bpp,
+ timeslots);
+ if (!dsc_max_output_bpp) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Compressed BPP not supported\n");
+ return -EINVAL;
+ }
+ }
dsc_dp_slice_count =
intel_dp_dsc_get_slice_count(intel_dp,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay,
pipe_config->bigjoiner_pipes);
- if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
+ if (!dsc_dp_slice_count) {
drm_dbg_kms(&dev_priv->drm,
- "Compressed BPP/Slice Count not supported\n");
+ "Compressed Slice Count not supported\n");
return -EINVAL;
}
- pipe_config->dsc.compressed_bpp = min_t(u16,
- dsc_max_output_bpp >> 4,
- pipe_config->pipe_bpp);
+
+ /*
+ * compute pipe bpp is set to false for DP MST DSC case
+ * and compressed_bpp is calculated same time once
+ * vpci timeslots are allocated, because overall bpp
+ * calculation procedure is bit different for MST case.
+ */
+ if (compute_pipe_bpp) {
+ pipe_config->dsc.compressed_bpp = min_t(u16,
+ dsc_max_output_bpp >> 4,
+ pipe_config->pipe_bpp);
+ }
pipe_config->dsc.slice_count = dsc_dp_slice_count;
+ drm_dbg_kms(&dev_priv->drm, "DSC: compressed bpp %d slice count %d\n",
+ pipe_config->dsc.compressed_bpp,
+ pipe_config->dsc.slice_count);
}
-
/*
* VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
* is greater than the maximum Cdclock and if slice count is even
@@ -1548,13 +1587,13 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
*/
if (adjusted_mode->crtc_clock > dev_priv->display.cdclk.max_cdclk_freq ||
pipe_config->bigjoiner_pipes) {
- if (pipe_config->dsc.slice_count < 2) {
+ if (pipe_config->dsc.slice_count > 1) {
+ pipe_config->dsc.dsc_split = true;
+ } else {
drm_dbg_kms(&dev_priv->drm,
"Cannot split stream to use 2 VDSC instances\n");
return -EINVAL;
}
-
- pipe_config->dsc.dsc_split = true;
}
ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
@@ -1643,7 +1682,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
- conn_state, &limits);
+ conn_state, &limits, 64, true);
if (ret < 0)
return ret;
}
@@ -2009,6 +2048,23 @@ intel_dp_compute_output_format(struct intel_encoder *encoder,
return ret;
}
+static void
+intel_dp_audio_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct drm_connector *connector = conn_state->connector;
+
+ pipe_config->sdp_split_enable =
+ intel_dp_has_audio(encoder, pipe_config, conn_state) &&
+ intel_dp_is_uhbr(pipe_config);
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n",
+ connector->base.id, connector->name,
+ str_yes_no(pipe_config->sdp_split_enable));
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2024,7 +2080,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
pipe_config->has_pch_encoder = true;
- pipe_config->has_audio = intel_dp_has_audio(encoder, pipe_config, conn_state);
+ pipe_config->has_audio =
+ intel_dp_has_audio(encoder, pipe_config, conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
fixed_mode = intel_panel_fixed_mode(connector, adjusted_mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -2036,7 +2094,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
- if (HAS_GMCH(dev_priv) &&
+ if (!connector->base.interlace_allowed &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return -EINVAL;
@@ -2092,6 +2150,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode->crtc_clock /= n;
}
+ intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
+
intel_link_compute_m_n(output_bpp,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
@@ -2907,7 +2967,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
intel_dp_set_max_sink_lane_count(intel_dp);
/* Read the eDP DSC DPCD registers */
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (HAS_DSC(dev_priv))
intel_dp_get_dsc_sink_cap(intel_dp);
/*
@@ -3590,12 +3650,11 @@ static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
intel_dp->aux.i2c_defer_count);
intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
} else {
- struct edid *block = intel_connector->detect_edid;
+ /* FIXME: Get rid of drm_edid_raw() */
+ const struct edid *block = drm_edid_raw(intel_connector->detect_edid);
- /* We have to write the checksum
- * of the last block read
- */
- block += intel_connector->detect_edid->extensions;
+ /* We have to write the checksum of the last block read */
+ block += block->extensions;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
block->checksum) <= 0)
@@ -4417,29 +4476,34 @@ bool intel_digital_port_connected(struct intel_encoder *encoder)
return is_connected;
}
-static struct edid *
+static const struct drm_edid *
intel_dp_get_edid(struct intel_dp *intel_dp)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ const struct drm_edid *fixed_edid = connector->panel.fixed_edid;
- /* use cached edid if we have one */
- if (intel_connector->edid) {
+ /* Use panel fixed edid if we have one */
+ if (fixed_edid) {
/* invalid edid */
- if (IS_ERR(intel_connector->edid))
+ if (IS_ERR(fixed_edid))
return NULL;
- return drm_edid_duplicate(intel_connector->edid);
- } else
- return drm_get_edid(&intel_connector->base,
- &intel_dp->aux.ddc);
+ return drm_edid_dup(fixed_edid);
+ }
+
+ return drm_edid_read_ddc(&connector->base, &intel_dp->aux.ddc);
}
static void
intel_dp_update_dfp(struct intel_dp *intel_dp,
- const struct edid *edid)
+ const struct drm_edid *drm_edid)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
+ const struct edid *edid;
+
+ /* FIXME: Get rid of drm_edid_raw() */
+ edid = drm_edid_raw(drm_edid);
intel_dp->dfp.max_bpc =
drm_dp_downstream_max_bpc(intel_dp->dpcd,
@@ -4539,21 +4603,27 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
bool vrr_capable;
intel_dp_unset_edid(intel_dp);
- edid = intel_dp_get_edid(intel_dp);
- connector->detect_edid = edid;
+ drm_edid = intel_dp_get_edid(intel_dp);
+ connector->detect_edid = drm_edid;
+
+ /* Below we depend on display info having been updated */
+ drm_edid_connector_update(&connector->base, drm_edid);
vrr_capable = intel_vrr_is_capable(connector);
drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] VRR capable: %s\n",
connector->base.base.id, connector->base.name, str_yes_no(vrr_capable));
drm_connector_set_vrr_capable_property(&connector->base, vrr_capable);
- intel_dp_update_dfp(intel_dp, edid);
+ intel_dp_update_dfp(intel_dp, drm_edid);
intel_dp_update_420(intel_dp);
+ /* FIXME: Get rid of drm_edid_raw() */
+ edid = drm_edid_raw(drm_edid);
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
intel_dp->has_audio = drm_detect_monitor_audio(edid);
@@ -4568,7 +4638,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
struct intel_connector *connector = intel_dp->attached_connector;
drm_dp_cec_unset_edid(&intel_dp->aux);
- kfree(connector->detect_edid);
+ drm_edid_free(connector->detect_edid);
connector->detect_edid = NULL;
intel_dp->has_hdmi_sink = false;
@@ -4633,7 +4703,7 @@ intel_dp_detect(struct drm_connector *connector,
}
/* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (HAS_DSC(dev_priv))
intel_dp_get_dsc_sink_cap(intel_dp);
intel_dp_configure_mst(intel_dp);
@@ -4732,12 +4802,10 @@ intel_dp_force(struct drm_connector *connector)
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
- struct edid *edid;
- int num_modes = 0;
+ int num_modes;
- edid = intel_connector->detect_edid;
- if (edid)
- num_modes = intel_connector_update_modes(connector, edid);
+ /* drm_edid_connector_update() done in ->detect() or ->force() */
+ num_modes = drm_edid_connector_add_modes(connector);
/* Also add fixed mode, which may or may not be present in EDID */
if (intel_dp_is_edp(intel_attached_dp(intel_connector)))
@@ -4746,7 +4814,7 @@ static int intel_dp_get_modes(struct drm_connector *connector)
if (num_modes)
return num_modes;
- if (!edid) {
+ if (!intel_connector->detect_edid) {
struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
struct drm_display_mode *mode;
@@ -5182,7 +5250,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *fixed_mode;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool has_dpcd;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
if (!intel_dp_is_edp(intel_dp))
return true;
@@ -5202,7 +5270,20 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
return false;
}
- intel_pps_init(intel_dp);
+ intel_bios_init_panel_early(dev_priv, &intel_connector->panel,
+ encoder->devdata);
+
+ if (!intel_pps_init(intel_dp)) {
+ drm_info(&dev_priv->drm,
+ "[ENCODER:%d:%s] unusable PPS, disabling eDP\n",
+ encoder->base.base.id, encoder->base.name);
+ /*
+ * The BIOS may have still enabled VDD on the PPS even
+ * though it's unusable. Make sure we turn it back off
+ * and to release the power domain references/etc.
+ */
+ goto out_vdd_off;
+ }
/* Cache DPCD and EDID for edp. */
has_dpcd = intel_edp_init_dpcd(intel_dp);
@@ -5216,29 +5297,28 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
mutex_lock(&dev_priv->drm.mode_config.mutex);
- edid = drm_get_edid(connector, &intel_dp->aux.ddc);
- if (!edid) {
+ drm_edid = drm_edid_read_ddc(connector, &intel_dp->aux.ddc);
+ if (!drm_edid) {
/* Fallback to EDID from ACPI OpRegion, if any */
- edid = intel_opregion_get_edid(intel_connector);
- if (edid)
+ drm_edid = intel_opregion_get_edid(intel_connector);
+ if (drm_edid)
drm_dbg_kms(&dev_priv->drm,
"[CONNECTOR:%d:%s] Using OpRegion EDID\n",
connector->base.id, connector->name);
}
- if (edid) {
- if (drm_add_edid_modes(connector, edid)) {
- drm_connector_update_edid_property(connector, edid);
- } else {
- kfree(edid);
- edid = ERR_PTR(-EINVAL);
+ if (drm_edid) {
+ if (drm_edid_connector_update(connector, drm_edid) ||
+ !drm_edid_connector_add_modes(connector)) {
+ drm_edid_connector_update(connector, NULL);
+ drm_edid_free(drm_edid);
+ drm_edid = ERR_PTR(-EINVAL);
}
} else {
- edid = ERR_PTR(-ENOENT);
+ drm_edid = ERR_PTR(-ENOENT);
}
- intel_connector->edid = edid;
- intel_bios_init_panel(dev_priv, &intel_connector->panel,
- encoder->devdata, IS_ERR(edid) ? NULL : edid);
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, encoder->devdata,
+ IS_ERR(drm_edid) ? NULL : drm_edid);
intel_panel_add_edid_fixed_modes(intel_connector, true);
@@ -5262,7 +5342,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
goto out_vdd_off;
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, drm_edid);
intel_edp_backlight_setup(intel_dp, intel_connector);
@@ -5364,7 +5444,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) < 12)
connector->interlace_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index a54902c713a3..ef39e4f7a329 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -56,6 +56,12 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
int intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
+int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits,
+ int timeslots,
+ bool recompute_pipe_bpp);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
@@ -96,6 +102,18 @@ void intel_read_dp_sdp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
unsigned int type);
bool intel_digital_port_connected(struct intel_encoder *encoder);
+int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc);
+u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
+ u32 link_clock, u32 lane_count,
+ u32 mode_clock, u32 mode_hdisplay,
+ bool bigjoiner,
+ u32 pipe_bpp,
+ u32 timeslots);
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+ int mode_clock, int mode_hdisplay,
+ bool bigjoiner);
+bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ int hdisplay, int clock);
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
@@ -103,6 +121,7 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
}
u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp);
void intel_ddi_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 664bebdecea7..5a176bfb10a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_trace.h"
+#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp_aux.h"
#include "intel_pps.h"
@@ -40,20 +41,16 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
const unsigned int timeout_ms = 10;
u32 status;
- bool done;
-
-#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
- done = wait_event_timeout(i915->display.gmbus.wait_queue, C,
- msecs_to_jiffies_timeout(timeout_ms));
+ int ret;
- /* just trace the final value */
- trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
+ ret = __intel_de_wait_for_register(i915, ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY, 0,
+ 2, timeout_ms, &status);
- if (!done)
+ if (ret == -ETIMEDOUT)
drm_err(&i915->drm,
"%s: did not complete or timeout within %ums (status 0x%08x)\n",
intel_dp->aux.name, timeout_ms, status);
-#undef C
return status;
}
@@ -191,7 +188,6 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *i915 =
to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
@@ -235,7 +231,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
- status = intel_uncore_read_notrace(uncore, ch_ctl);
+ status = intel_de_read_notrace(i915, ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
msleep(1);
@@ -244,7 +240,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (try == 3) {
- const u32 status = intel_uncore_read(uncore, ch_ctl);
+ const u32 status = intel_de_read(i915, ch_ctl);
if (status != intel_dp->aux_busy_last_status) {
drm_WARN(&i915->drm, 1,
@@ -274,23 +270,20 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
- intel_uncore_write(uncore,
- ch_data[i >> 2],
- intel_dp_aux_pack(send + i,
- send_bytes - i));
+ intel_de_write(i915, ch_data[i >> 2],
+ intel_dp_aux_pack(send + i,
+ send_bytes - i));
/* Send the command and wait for it to complete */
- intel_uncore_write(uncore, ch_ctl, send_ctl);
+ intel_de_write(i915, ch_ctl, send_ctl);
status = intel_dp_aux_wait_done(intel_dp);
/* Clear done status and any errors */
- intel_uncore_write(uncore,
- ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
+ intel_de_write(i915, ch_ctl,
+ status | DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
/*
* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
@@ -361,7 +354,7 @@ done:
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- intel_dp_aux_unpack(intel_uncore_read(uncore, ch_data[i >> 2]),
+ intel_dp_aux_unpack(intel_de_read(i915, ch_data[i >> 2]),
recv + i, recv_bytes - i);
ret = recv_bytes;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 4077a979a924..054a009e800d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -45,10 +45,14 @@
#include "intel_hotplug.h"
#include "skl_scaler.h"
-static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state,
- struct link_config_limits *limits)
+static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ int max_bpp,
+ int min_bpp,
+ struct link_config_limits *limits,
+ struct drm_connector_state *conn_state,
+ int step,
+ bool dsc)
{
struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
@@ -60,6 +64,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpp, slots = -EINVAL;
+ int ret = 0;
mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr);
if (IS_ERR(mst_state))
@@ -71,30 +76,68 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
// TODO: Handle pbn_div changes by adding a new MST helper
if (!mst_state->pbn_div) {
mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
- limits->max_rate,
- limits->max_lane_count);
+ crtc_state->port_clock,
+ crtc_state->lane_count);
}
- for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
- crtc_state->pipe_bpp = bpp;
-
+ for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
- crtc_state->pipe_bpp,
- false);
+ dsc ? bpp << 4 : bpp,
+ dsc);
+
+ drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
+
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
- connector->port, crtc_state->pbn);
+ connector->port,
+ crtc_state->pbn);
if (slots == -EDEADLK)
return slots;
- if (slots >= 0)
- break;
+
+ if (slots >= 0) {
+ ret = drm_dp_mst_atomic_check(state);
+ /*
+ * If we got slots >= 0 and we can fit those based on check
+ * then we can exit the loop. Otherwise keep trying.
+ */
+ if (!ret)
+ break;
+ }
}
+ /* Despite slots are non-zero, we still failed the atomic check */
+ if (ret && slots >= 0)
+ slots = ret;
+
if (slots < 0) {
drm_dbg_kms(&i915->drm, "failed finding vcpi slots:%d\n",
slots);
- return slots;
+ } else {
+ if (!dsc)
+ crtc_state->pipe_bpp = bpp;
+ else
+ crtc_state->dsc.compressed_bpp = bpp;
+ drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
}
+ return slots;
+}
+
+static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int slots = -EINVAL;
+
+ slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, limits->max_bpp,
+ limits->min_bpp, limits,
+ conn_state, 2 * 3, false);
+
+ if (slots < 0)
+ return slots;
+
intel_link_compute_m_n(crtc_state->pipe_bpp,
crtc_state->lane_count,
adjusted_mode->crtc_clock,
@@ -106,6 +149,99 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ int slots = -EINVAL;
+ int i, num_bpc;
+ u8 dsc_bpc[3] = {0};
+ int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
+ u8 dsc_max_bpc;
+ bool need_timeslot_recalc = false;
+ u32 last_compressed_bpp;
+
+ /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
+ if (DISPLAY_VER(i915) >= 12)
+ dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
+ else
+ dsc_max_bpc = min_t(u8, 10, conn_state->max_requested_bpc);
+
+ max_bpp = min_t(u8, dsc_max_bpc * 3, limits->max_bpp);
+ min_bpp = limits->min_bpp;
+
+ num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
+ dsc_bpc);
+
+ drm_dbg_kms(&i915->drm, "DSC Source supported min bpp %d max bpp %d\n",
+ min_bpp, max_bpp);
+
+ sink_max_bpp = dsc_bpc[0] * 3;
+ sink_min_bpp = sink_max_bpp;
+
+ for (i = 1; i < num_bpc; i++) {
+ if (sink_min_bpp > dsc_bpc[i] * 3)
+ sink_min_bpp = dsc_bpc[i] * 3;
+ if (sink_max_bpp < dsc_bpc[i] * 3)
+ sink_max_bpp = dsc_bpc[i] * 3;
+ }
+
+ drm_dbg_kms(&i915->drm, "DSC Sink supported min bpp %d max bpp %d\n",
+ sink_min_bpp, sink_max_bpp);
+
+ if (min_bpp < sink_min_bpp)
+ min_bpp = sink_min_bpp;
+
+ if (max_bpp > sink_max_bpp)
+ max_bpp = sink_max_bpp;
+
+ slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_bpp,
+ min_bpp, limits,
+ conn_state, 2 * 3, true);
+
+ if (slots < 0)
+ return slots;
+
+ last_compressed_bpp = crtc_state->dsc.compressed_bpp;
+
+ crtc_state->dsc.compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915,
+ last_compressed_bpp,
+ crtc_state->pipe_bpp);
+
+ if (crtc_state->dsc.compressed_bpp != last_compressed_bpp)
+ need_timeslot_recalc = true;
+
+ /*
+ * Apparently some MST hubs dislike if vcpi slots are not matching precisely
+ * the actual compressed bpp we use.
+ */
+ if (need_timeslot_recalc) {
+ slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
+ crtc_state->dsc.compressed_bpp,
+ crtc_state->dsc.compressed_bpp,
+ limits, conn_state, 2 * 3, true);
+ if (slots < 0)
+ return slots;
+ }
+
+ intel_link_compute_m_n(crtc_state->pipe_bpp,
+ crtc_state->lane_count,
+ adjusted_mode->crtc_clock,
+ crtc_state->port_clock,
+ &crtc_state->dp_m_n,
+ crtc_state->fec_enable);
+ crtc_state->dp_m_n.tu = slots;
+
+ return 0;
+}
static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -182,6 +318,29 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
conn_state, &limits);
+
+ if (ret == -EDEADLK)
+ return ret;
+
+ /* enable compression if the mode doesn't fit available BW */
+ drm_dbg_kms(&dev_priv->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
+ if (ret || intel_dp->force_dsc_en) {
+ /*
+ * Try to get at least some timeslots and then see, if
+ * we can fit there with DSC.
+ */
+ drm_dbg_kms(&dev_priv->drm, "Trying to find VCPI slots in DSC mode\n");
+
+ ret = intel_dp_dsc_mst_compute_link_config(encoder, pipe_config,
+ conn_state, &limits);
+ if (ret < 0)
+ return ret;
+
+ ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
+ conn_state, &limits,
+ pipe_config->dp_m_n.tu, false);
+ }
+
if (ret)
return ret;
@@ -365,8 +524,14 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- struct drm_dp_mst_topology_state *mst_state =
- drm_atomic_get_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ struct drm_dp_mst_topology_state *old_mst_state =
+ drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ struct drm_dp_mst_topology_state *new_mst_state =
+ drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
+ const struct drm_dp_mst_atomic_payload *old_payload =
+ drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
+ struct drm_dp_mst_atomic_payload *new_payload =
+ drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_dbg_kms(&i915->drm, "active links %d\n",
@@ -374,8 +539,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
intel_hdcp_disable(intel_mst->connector);
- drm_dp_remove_payload(&intel_dp->mst_mgr, mst_state,
- drm_atomic_get_mst_payload_state(mst_state, connector->port));
+ drm_dp_remove_payload(&intel_dp->mst_mgr, new_mst_state,
+ old_payload, new_payload);
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
}
@@ -692,6 +857,10 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
int ret;
+ bool dsc = false, bigjoiner = false;
+ u16 dsc_max_output_bpp = 0;
+ u8 dsc_slice_count = 0;
+ int target_clock = mode->clock;
if (drm_connector_is_unregistered(connector)) {
*status = MODE_ERROR;
@@ -729,6 +898,48 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
+ if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
+ bigjoiner = true;
+ max_dotclk *= 2;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 10 &&
+ drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
+ /*
+ * TBD pass the connector BPC,
+ * for now U8_MAX so that max BPC on that platform would be picked
+ */
+ int pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, U8_MAX);
+
+ if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
+ dsc_max_output_bpp =
+ intel_dp_dsc_get_output_bpp(dev_priv,
+ max_link_clock,
+ max_lanes,
+ target_clock,
+ mode->hdisplay,
+ bigjoiner,
+ pipe_bpp, 64) >> 4;
+ dsc_slice_count =
+ intel_dp_dsc_get_slice_count(intel_dp,
+ target_clock,
+ mode->hdisplay,
+ bigjoiner);
+ }
+
+ dsc = dsc_max_output_bpp && dsc_slice_count;
+ }
+
+ /*
+ * Big joiner configuration needs DSC for TGL which is not true for
+ * XE_LPD where uncompressed joiner is supported.
+ */
+ if (DISPLAY_VER(dev_priv) < 13 && bigjoiner && !dsc)
+ return MODE_CLOCK_HIGH;
+
+ if (mode_rate > max_rate && !dsc)
+ return MODE_CLOCK_HIGH;
+
*status = intel_mode_valid_max_plane_size(dev_priv, mode, false);
return 0;
}
@@ -1018,3 +1229,64 @@ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state)
return crtc_state->mst_master_transcoder != INVALID_TRANSCODER &&
crtc_state->mst_master_transcoder != crtc_state->cpu_transcoder;
}
+
+/**
+ * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector
+ * @state: atomic state
+ * @connector: connector to add the state for
+ * @crtc: the CRTC @connector is attached to
+ *
+ * Add the MST topology state for @connector to @state.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int
+intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ struct intel_crtc *crtc)
+{
+ struct drm_dp_mst_topology_state *mst_state;
+
+ if (!connector->mst_port)
+ return 0;
+
+ mst_state = drm_atomic_get_mst_topology_state(&state->base,
+ &connector->mst_port->mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pending_crtc_mask |= drm_crtc_mask(&crtc->base);
+
+ return 0;
+}
+
+/**
+ * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC
+ * @state: atomic state
+ * @crtc: CRTC to add the state for
+ *
+ * Add the MST topology state for @crtc to @state.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_connector *_connector;
+ struct drm_connector_state *conn_state;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
+ struct intel_connector *connector = to_intel_connector(_connector);
+ int ret;
+
+ if (conn_state->crtc != &crtc->base)
+ continue;
+
+ ret = intel_dp_mst_add_topology_state_for_connector(state, connector, crtc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index f7301de6cdfb..f1815bb72267 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
+struct intel_atomic_state;
+struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
@@ -18,5 +20,7 @@ int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
+int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 7eb7440b3180..565c06de2432 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -376,7 +376,7 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
/* Still read out the GRC value for state verification */
if (phy_info->rcomp_phy != -1)
- dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
+ dev_priv->display.state.bxt_phy_grc = bxt_get_grc(dev_priv, phy);
if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
@@ -442,8 +442,9 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
* the corresponding calibrated value from PHY1, and disable
* the automatic calibration on PHY0.
*/
- val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
- phy_info->rcomp_phy);
+ val = bxt_get_grc(dev_priv, phy_info->rcomp_phy);
+ dev_priv->display.state.bxt_phy_grc = val;
+
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
@@ -568,7 +569,7 @@ bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
"BXT_PORT_CL2CM_DW6(%d)", phy);
if (phy_info->rcomp_phy != -1) {
- u32 grc_code = dev_priv->bxt_phy_grc;
+ u32 grc_code = dev_priv->display.state.bxt_phy_grc;
grc_code = grc_code << GRC_CODE_FAST_SHIFT |
grc_code << GRC_CODE_SLOW_SHIFT |
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index c236aafe9be0..4e9c18be7e1f 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1910,7 +1910,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
intel_de_write(dev_priv, DPLL_MD(PIPE_B),
crtc_state->dpll_hw_state.dpll_md);
intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
+ dev_priv->display.state.chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 1974eb580ed1..380368eff31a 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -618,7 +618,7 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
* Try to set up the PCH reference clock once all DPLLs
* that depend on it have been shut down.
*/
- if (dev_priv->pch_ssc_use & BIT(id))
+ if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
intel_init_pch_refclk(dev_priv);
}
@@ -636,7 +636,7 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
* Try to set up the PCH reference clock once all DPLLs
* that depend on it have been shut down.
*/
- if (dev_priv->pch_ssc_use & BIT(id))
+ if (dev_priv->display.dpll.pch_ssc_use & BIT(id))
intel_init_pch_refclk(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index 5b9e44443814..29c6421cd666 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -374,16 +374,16 @@ out:
return ret;
}
-DEFINE_SIMPLE_ATTRIBUTE(intel_drrs_debugfs_ctl_fops,
- NULL, intel_drrs_debugfs_ctl_set, "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(intel_drrs_debugfs_ctl_fops,
+ NULL, intel_drrs_debugfs_ctl_set, "%llu\n");
void intel_drrs_crtc_debugfs_add(struct intel_crtc *crtc)
{
debugfs_create_file("i915_drrs_status", 0444, crtc->base.debugfs_entry,
crtc, &intel_drrs_debugfs_status_fops);
- debugfs_create_file("i915_drrs_ctl", 0644, crtc->base.debugfs_entry,
- crtc, &intel_drrs_debugfs_ctl_fops);
+ debugfs_create_file_unsafe("i915_drrs_ctl", 0644, crtc->base.debugfs_entry,
+ crtc, &intel_drrs_debugfs_ctl_fops);
}
static int intel_drrs_debugfs_type_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 1e1c6107d51b..96bc117fd6a0 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -24,25 +24,30 @@ enum dsb_id {
struct intel_dsb {
enum dsb_id id;
+
u32 *cmd_buf;
struct i915_vma *vma;
+ struct intel_crtc *crtc;
+
+ /*
+ * maximum number of dwords the buffer will hold.
+ */
+ unsigned int size;
/*
- * free_pos will point the first free entry position
- * and help in calculating tail of command buffer.
+ * free_pos will point the first free dword and
+ * help in calculating tail of command buffer.
*/
- int free_pos;
+ unsigned int free_pos;
/*
- * ins_start_offset will help to store start address of the dsb
+ * ins_start_offset will help to store start dword of the dsb
* instuction and help in identifying the batch of auto-increment
* register.
*/
- u32 ins_start_offset;
+ unsigned int ins_start_offset;
};
-#define DSB_BUF_SIZE (2 * PAGE_SIZE)
-
/**
* DOC: DSB
*
@@ -62,86 +67,86 @@ struct intel_dsb {
/* DSB opcodes. */
#define DSB_OPCODE_SHIFT 24
+#define DSB_OPCODE_NOOP 0x0
#define DSB_OPCODE_MMIO_WRITE 0x1
+#define DSB_OPCODE_WAIT_USEC 0x2
+#define DSB_OPCODE_WAIT_LINES 0x3
+#define DSB_OPCODE_WAIT_VBLANKS 0x4
+#define DSB_OPCODE_WAIT_DSL_IN 0x5
+#define DSB_OPCODE_WAIT_DSL_OUT 0x6
+#define DSB_OPCODE_INTERRUPT 0x7
#define DSB_OPCODE_INDEXED_WRITE 0x9
+#define DSB_OPCODE_POLL 0xA
#define DSB_BYTE_EN 0xF
#define DSB_BYTE_EN_SHIFT 20
#define DSB_REG_VALUE_MASK 0xfffff
+static bool assert_dsb_has_room(struct intel_dsb *dsb)
+{
+ struct intel_crtc *crtc = dsb->crtc;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ /* each instruction is 2 dwords */
+ return !drm_WARN(&i915->drm, dsb->free_pos > dsb->size - 2,
+ "DSB buffer overflow\n");
+}
+
static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
enum dsb_id id)
{
- return DSB_STATUS & intel_de_read(i915, DSB_CTRL(pipe, id));
+ return intel_de_read(i915, DSB_CTRL(pipe, id)) & DSB_STATUS_BUSY;
}
-static bool intel_dsb_enable_engine(struct drm_i915_private *i915,
- enum pipe pipe, enum dsb_id id)
+static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
{
- u32 dsb_ctrl;
+ u32 *buf = dsb->cmd_buf;
- dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
- if (DSB_STATUS & dsb_ctrl) {
- drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
- return false;
- }
+ if (!assert_dsb_has_room(dsb))
+ return;
- dsb_ctrl |= DSB_ENABLE;
- intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
+ /* Every instruction should be 8 byte aligned. */
+ dsb->free_pos = ALIGN(dsb->free_pos, 2);
- intel_de_posting_read(i915, DSB_CTRL(pipe, id));
- return true;
+ dsb->ins_start_offset = dsb->free_pos;
+
+ buf[dsb->free_pos++] = ldw;
+ buf[dsb->free_pos++] = udw;
}
-static bool intel_dsb_disable_engine(struct drm_i915_private *i915,
- enum pipe pipe, enum dsb_id id)
+static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
+ u32 opcode, i915_reg_t reg)
{
- u32 dsb_ctrl;
+ const u32 *buf = dsb->cmd_buf;
+ u32 prev_opcode, prev_reg;
- dsb_ctrl = intel_de_read(i915, DSB_CTRL(pipe, id));
- if (DSB_STATUS & dsb_ctrl) {
- drm_dbg_kms(&i915->drm, "DSB engine is busy.\n");
- return false;
- }
+ prev_opcode = buf[dsb->ins_start_offset + 1] >> DSB_OPCODE_SHIFT;
+ prev_reg = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
- dsb_ctrl &= ~DSB_ENABLE;
- intel_de_write(i915, DSB_CTRL(pipe, id), dsb_ctrl);
+ return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
+}
+
+static bool intel_dsb_prev_ins_is_mmio_write(struct intel_dsb *dsb, i915_reg_t reg)
+{
+ return intel_dsb_prev_ins_is_write(dsb, DSB_OPCODE_MMIO_WRITE, reg);
+}
- intel_de_posting_read(i915, DSB_CTRL(pipe, id));
- return true;
+static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_t reg)
+{
+ return intel_dsb_prev_ins_is_write(dsb, DSB_OPCODE_INDEXED_WRITE, reg);
}
/**
- * intel_dsb_indexed_reg_write() -Write to the DSB context for auto
- * increment register.
- * @crtc_state: intel_crtc_state structure
+ * intel_dsb_reg_write() - Emit register wriite to the DSB context
+ * @dsb: DSB context
* @reg: register address.
* @val: value.
*
* This function is used for writing register-value pair in command
- * buffer of DSB for auto-increment register. During command buffer overflow,
- * a warning is thrown and rest all erroneous condition register programming
- * is done through mmio write.
+ * buffer of DSB.
*/
-
-void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
- i915_reg_t reg, u32 val)
+void intel_dsb_reg_write(struct intel_dsb *dsb,
+ i915_reg_t reg, u32 val)
{
- struct intel_dsb *dsb = crtc_state->dsb;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 *buf;
- u32 reg_val;
-
- if (!dsb) {
- intel_de_write_fw(dev_priv, reg, val);
- return;
- }
- buf = dsb->cmd_buf;
- if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
- drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
- return;
- }
-
/*
* For example the buffer will look like below for 3 dwords for auto
* increment register:
@@ -158,207 +163,182 @@ void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
* we are writing odd no of dwords, Zeros will be added in the end for
* padding.
*/
- reg_val = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
- if (reg_val != i915_mmio_reg_offset(reg)) {
- /* Every instruction should be 8 byte aligned. */
- dsb->free_pos = ALIGN(dsb->free_pos, 2);
+ if (!intel_dsb_prev_ins_is_mmio_write(dsb, reg) &&
+ !intel_dsb_prev_ins_is_indexed_write(dsb, reg)) {
+ intel_dsb_emit(dsb, val,
+ (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
+ (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
+ i915_mmio_reg_offset(reg));
+ } else {
+ u32 *buf = dsb->cmd_buf;
- dsb->ins_start_offset = dsb->free_pos;
+ if (!assert_dsb_has_room(dsb))
+ return;
- /* Update the size. */
- buf[dsb->free_pos++] = 1;
+ /* convert to indexed write? */
+ if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
+ u32 prev_val = buf[dsb->ins_start_offset + 0];
- /* Update the opcode and reg. */
- buf[dsb->free_pos++] = (DSB_OPCODE_INDEXED_WRITE <<
- DSB_OPCODE_SHIFT) |
- i915_mmio_reg_offset(reg);
+ buf[dsb->ins_start_offset + 0] = 1; /* count */
+ buf[dsb->ins_start_offset + 1] =
+ (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg);
+ buf[dsb->ins_start_offset + 2] = prev_val;
- /* Update the value. */
- buf[dsb->free_pos++] = val;
- } else {
- /* Update the new value. */
- buf[dsb->free_pos++] = val;
+ dsb->free_pos++;
+ }
- /* Update the size. */
+ buf[dsb->free_pos++] = val;
+ /* Update the count */
buf[dsb->ins_start_offset]++;
- }
- /* if number of data words is odd, then the last dword should be 0.*/
- if (dsb->free_pos & 0x1)
- buf[dsb->free_pos] = 0;
+ /* if number of data words is odd, then the last dword should be 0.*/
+ if (dsb->free_pos & 0x1)
+ buf[dsb->free_pos] = 0;
+ }
}
-/**
- * intel_dsb_reg_write() -Write to the DSB context for normal
- * register.
- * @crtc_state: intel_crtc_state structure
- * @reg: register address.
- * @val: value.
- *
- * This function is used for writing register-value pair in command
- * buffer of DSB. During command buffer overflow, a warning is thrown
- * and rest all erroneous condition register programming is done
- * through mmio write.
- */
-void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
- i915_reg_t reg, u32 val)
+static u32 intel_dsb_align_tail(struct intel_dsb *dsb)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_dsb *dsb;
- u32 *buf;
+ u32 aligned_tail, tail;
- dsb = crtc_state->dsb;
- if (!dsb) {
- intel_de_write_fw(dev_priv, reg, val);
- return;
- }
+ tail = dsb->free_pos * 4;
+ aligned_tail = ALIGN(tail, CACHELINE_BYTES);
- buf = dsb->cmd_buf;
- if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
- drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
- return;
- }
+ if (aligned_tail > tail)
+ memset(&dsb->cmd_buf[dsb->free_pos], 0,
+ aligned_tail - tail);
- dsb->ins_start_offset = dsb->free_pos;
- buf[dsb->free_pos++] = val;
- buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) |
- (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
- i915_mmio_reg_offset(reg);
+ dsb->free_pos = aligned_tail / 4;
+
+ return aligned_tail;
}
/**
* intel_dsb_commit() - Trigger workload execution of DSB.
- * @crtc_state: intel_crtc_state structure
+ * @dsb: DSB context
*
* This function is used to do actual write to hardware using DSB.
- * On errors, fall back to MMIO. Also this function help to reset the context.
*/
-void intel_dsb_commit(const struct intel_crtc_state *crtc_state)
+void intel_dsb_commit(struct intel_dsb *dsb)
{
- struct intel_dsb *dsb = crtc_state->dsb;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = dsb->crtc;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
u32 tail;
- if (!(dsb && dsb->free_pos))
+ tail = intel_dsb_align_tail(dsb);
+ if (tail == 0)
return;
- if (!intel_dsb_enable_engine(dev_priv, pipe, dsb->id))
- goto reset;
-
if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
- drm_err(&dev_priv->drm,
- "HEAD_PTR write failed - dsb engine is busy.\n");
+ drm_err(&dev_priv->drm, "DSB engine is busy.\n");
goto reset;
}
+
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id),
+ DSB_ENABLE);
intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
i915_ggtt_offset(dsb->vma));
+ intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
+ i915_ggtt_offset(dsb->vma) + tail);
- tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
- if (tail > dsb->free_pos * 4)
- memset(&dsb->cmd_buf[dsb->free_pos], 0,
- (tail - dsb->free_pos * 4));
-
- if (is_dsb_busy(dev_priv, pipe, dsb->id)) {
- drm_err(&dev_priv->drm,
- "TAIL_PTR write failed - dsb engine is busy.\n");
- goto reset;
- }
drm_dbg_kms(&dev_priv->drm,
"DSB execution started - head 0x%x, tail 0x%x\n",
- i915_ggtt_offset(dsb->vma), tail);
- intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
- i915_ggtt_offset(dsb->vma) + tail);
- if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
+ i915_ggtt_offset(dsb->vma),
+ i915_ggtt_offset(dsb->vma) + tail);
+
+ if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1))
drm_err(&dev_priv->drm,
"Timed out waiting for DSB workload completion.\n");
- goto reset;
- }
reset:
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
- intel_dsb_disable_engine(dev_priv, pipe, dsb->id);
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), 0);
}
/**
* intel_dsb_prepare() - Allocate, pin and map the DSB command buffer.
- * @crtc_state: intel_crtc_state structure to prepare associated dsb instance.
+ * @crtc: the CRTC
+ * @max_cmds: number of commands we need to fit into command buffer
*
* This function prepare the command buffer which is used to store dsb
* instructions with data.
+ *
+ * Returns:
+ * DSB context, NULL on failure
*/
-void intel_dsb_prepare(struct intel_crtc_state *crtc_state)
+struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc,
+ unsigned int max_cmds)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct intel_dsb *dsb;
struct drm_i915_gem_object *obj;
+ intel_wakeref_t wakeref;
+ struct intel_dsb *dsb;
struct i915_vma *vma;
+ unsigned int size;
u32 *buf;
- intel_wakeref_t wakeref;
if (!HAS_DSB(i915))
- return;
+ return NULL;
- dsb = kmalloc(sizeof(*dsb), GFP_KERNEL);
- if (!dsb) {
- drm_err(&i915->drm, "DSB object creation failed\n");
- return;
- }
+ dsb = kzalloc(sizeof(*dsb), GFP_KERNEL);
+ if (!dsb)
+ goto out;
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
- if (IS_ERR(obj)) {
- kfree(dsb);
- goto out;
- }
+ /* ~1 qword per instruction, full cachelines */
+ size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
+ if (IS_ERR(obj))
+ goto out_put_rpm;
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
- kfree(dsb);
- goto out;
+ goto out_put_rpm;
}
buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
- kfree(dsb);
- goto out;
+ goto out_put_rpm;
}
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
dsb->id = DSB1;
dsb->vma = vma;
+ dsb->crtc = crtc;
dsb->cmd_buf = buf;
+ dsb->size = size / 4; /* in dwords */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
- crtc_state->dsb = dsb;
-out:
- if (!crtc_state->dsb)
- drm_info(&i915->drm,
- "DSB queue setup failed, will fallback to MMIO for display HW programming\n");
+ return dsb;
+
+out_put_rpm:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ kfree(dsb);
+out:
+ drm_info_once(&i915->drm,
+ "DSB queue setup failed, will fallback to MMIO for display HW programming\n");
+
+ return NULL;
}
/**
* intel_dsb_cleanup() - To cleanup DSB context.
- * @crtc_state: intel_crtc_state structure to cleanup associated dsb instance.
+ * @dsb: DSB context
*
* This function cleanup the DSB context by unpinning and releasing
* the VMA object associated with it.
*/
-void intel_dsb_cleanup(struct intel_crtc_state *crtc_state)
+void intel_dsb_cleanup(struct intel_dsb *dsb)
{
- if (!crtc_state->dsb)
- return;
-
- i915_vma_unpin_and_release(&crtc_state->dsb->vma, I915_VMA_RELEASE_MAP);
- kfree(crtc_state->dsb);
- crtc_state->dsb = NULL;
+ i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
+ kfree(dsb);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h
index 74dd2b3343bb..05c221b6d0a4 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.h
+++ b/drivers/gpu/drm/i915/display/intel_dsb.h
@@ -10,14 +10,14 @@
#include "i915_reg_defs.h"
-struct intel_crtc_state;
+struct intel_crtc;
+struct intel_dsb;
-void intel_dsb_prepare(struct intel_crtc_state *crtc_state);
-void intel_dsb_cleanup(struct intel_crtc_state *crtc_state);
-void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state,
+struct intel_dsb *intel_dsb_prepare(struct intel_crtc *crtc,
+ unsigned int max_cmds);
+void intel_dsb_cleanup(struct intel_dsb *dsb);
+void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val);
-void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state,
- i915_reg_t reg, u32 val);
-void intel_dsb_commit(const struct intel_crtc_state *crtc_state);
+void intel_dsb_commit(struct intel_dsb *dsb);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index c86f9890754d..0be8105cb18a 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -38,6 +38,7 @@
#include "intel_display_types.h"
#include "intel_dvo.h"
#include "intel_dvo_dev.h"
+#include "intel_dvo_regs.h"
#include "intel_gmbus.h"
#include "intel_panel.h"
@@ -56,48 +57,42 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "sil164",
- .dvo_reg = DVOC,
- .dvo_srcdim_reg = DVOC_SRCDIM,
+ .port = PORT_C,
.slave_addr = SIL164_ADDR,
.dev_ops = &sil164_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
- .dvo_reg = DVOC,
- .dvo_srcdim_reg = DVOC_SRCDIM,
+ .port = PORT_C,
.slave_addr = CH7xxx_ADDR,
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "ch7xxx",
- .dvo_reg = DVOC,
- .dvo_srcdim_reg = DVOC_SRCDIM,
+ .port = PORT_C,
.slave_addr = 0x75, /* For some ch7010 */
.dev_ops = &ch7xxx_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ivch",
- .dvo_reg = DVOA,
- .dvo_srcdim_reg = DVOA_SRCDIM,
+ .port = PORT_A,
.slave_addr = 0x02, /* Might also be 0x44, 0x84, 0xc4 */
.dev_ops = &ivch_ops,
},
{
.type = INTEL_DVO_CHIP_TMDS,
.name = "tfp410",
- .dvo_reg = DVOC,
- .dvo_srcdim_reg = DVOC_SRCDIM,
+ .port = PORT_C,
.slave_addr = TFP410_ADDR,
.dev_ops = &tfp410_ops,
},
{
.type = INTEL_DVO_CHIP_LVDS,
.name = "ch7017",
- .dvo_reg = DVOC,
- .dvo_srcdim_reg = DVOC_SRCDIM,
+ .port = PORT_C,
.slave_addr = 0x75,
.gpio = GMBUS_PIN_DPB,
.dev_ops = &ch7017_ops,
@@ -105,8 +100,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
{
.type = INTEL_DVO_CHIP_LVDS_NO_FIXED,
.name = "ns2501",
- .dvo_reg = DVOB,
- .dvo_srcdim_reg = DVOB_SRCDIM,
+ .port = PORT_B,
.slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
},
@@ -118,8 +112,6 @@ struct intel_dvo {
struct intel_dvo_device dev;
struct intel_connector *attached_connector;
-
- bool panel_wants_dither;
};
static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
@@ -134,12 +126,13 @@ static struct intel_dvo *intel_attached_dvo(struct intel_connector *connector)
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(i915, DVO(port));
if (!(tmp & DVO_ENABLE))
return false;
@@ -150,13 +143,13 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
u32 tmp;
- tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(i915, DVO(port));
- *pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
+ *pipe = REG_FIELD_GET(DVO_PIPE_SEL_MASK, tmp);
return tmp & DVO_ENABLE;
}
@@ -164,13 +157,13 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
u32 tmp, flags = 0;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
- tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(i915, DVO(port));
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -190,14 +183,14 @@ static void intel_disable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- u32 temp = intel_de_read(dev_priv, dvo_reg);
+ enum port port = encoder->port;
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
- intel_de_write(dev_priv, dvo_reg, temp & ~DVO_ENABLE);
- intel_de_read(dev_priv, dvo_reg);
+
+ intel_de_rmw(i915, DVO(port), DVO_ENABLE, 0);
+ intel_de_posting_read(i915, DVO(port));
}
static void intel_enable_dvo(struct intel_atomic_state *state,
@@ -205,30 +198,29 @@ static void intel_enable_dvo(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
- i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- u32 temp = intel_de_read(dev_priv, dvo_reg);
+ enum port port = encoder->port;
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&pipe_config->hw.mode,
&pipe_config->hw.adjusted_mode);
- intel_de_write(dev_priv, dvo_reg, temp | DVO_ENABLE);
- intel_de_read(dev_priv, dvo_reg);
+ intel_de_rmw(i915, DVO(port), 0, DVO_ENABLE);
+ intel_de_posting_read(i915, DVO(port));
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
static enum drm_mode_status
-intel_dvo_mode_valid(struct drm_connector *connector,
+intel_dvo_mode_valid(struct drm_connector *_connector,
struct drm_display_mode *mode)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_dvo *intel_dvo = intel_attached_dvo(intel_connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
const struct drm_display_mode *fixed_mode =
- intel_panel_fixed_mode(intel_connector, mode);
- int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ intel_panel_fixed_mode(connector, mode);
+ int max_dotclk = to_i915(connector->base.dev)->max_dotclk_freq;
int target_clock = mode->clock;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -239,7 +231,7 @@ intel_dvo_mode_valid(struct drm_connector *connector,
if (fixed_mode) {
enum drm_mode_status status;
- status = intel_panel_mode_valid(intel_connector, mode);
+ status = intel_panel_mode_valid(connector, mode);
if (status != MODE_OK)
return status;
@@ -289,18 +281,17 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
u32 dvo_val;
- i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
- /* Save the data order, since I don't know what it should be set to. */
- dvo_val = intel_de_read(dev_priv, dvo_reg) &
- (DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
+ /* Save the active data order, since I don't know what it should be set to. */
+ dvo_val = intel_de_read(i915, DVO(port)) &
+ (DVO_DEDICATED_INT_ENABLE |
+ DVO_PRESERVE_MASK | DVO_ACT_DATA_ORDER_MASK);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH;
@@ -311,19 +302,21 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- intel_de_write(dev_priv, dvo_srcdim_reg,
- (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
- intel_de_write(dev_priv, dvo_reg, dvo_val);
+ intel_de_write(i915, DVO_SRCDIM(port),
+ DVO_SRCDIM_HORIZONTAL(adjusted_mode->crtc_hdisplay) |
+ DVO_SRCDIM_VERTICAL(adjusted_mode->crtc_vdisplay));
+ intel_de_write(i915, DVO(port), dvo_val);
}
static enum drm_connector_status
-intel_dvo_detect(struct drm_connector *connector, bool force)
+intel_dvo_detect(struct drm_connector *_connector, bool force)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.base.id, connector->base.name);
if (!INTEL_DISPLAY_ENABLED(i915))
return connector_status_disconnected;
@@ -331,9 +324,10 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
-static int intel_dvo_get_modes(struct drm_connector *connector)
+static int intel_dvo_get_modes(struct drm_connector *_connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int num_modes;
/*
@@ -342,12 +336,12 @@ static int intel_dvo_get_modes(struct drm_connector *connector)
* (TV-out, for example), but for now with just TMDS and LVDS,
* that's not the case.
*/
- num_modes = intel_ddc_get_modes(connector,
- intel_gmbus_get_adapter(dev_priv, GMBUS_PIN_DPC));
+ num_modes = intel_ddc_get_modes(&connector->base,
+ intel_gmbus_get_adapter(i915, GMBUS_PIN_DPC));
if (num_modes)
return num_modes;
- return intel_panel_get_modes(to_intel_connector(connector));
+ return intel_panel_get_modes(connector);
}
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
@@ -379,165 +373,187 @@ static const struct drm_encoder_funcs intel_dvo_enc_funcs = {
.destroy = intel_dvo_enc_destroy,
};
-static enum port intel_dvo_port(i915_reg_t dvo_reg)
+static int intel_dvo_encoder_type(const struct intel_dvo_device *dvo)
+{
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ return DRM_MODE_ENCODER_TMDS;
+ case INTEL_DVO_CHIP_LVDS_NO_FIXED:
+ case INTEL_DVO_CHIP_LVDS:
+ return DRM_MODE_ENCODER_LVDS;
+ default:
+ MISSING_CASE(dvo->type);
+ return DRM_MODE_ENCODER_NONE;
+ }
+}
+
+static int intel_dvo_connector_type(const struct intel_dvo_device *dvo)
+{
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ return DRM_MODE_CONNECTOR_DVII;
+ case INTEL_DVO_CHIP_LVDS_NO_FIXED:
+ case INTEL_DVO_CHIP_LVDS:
+ return DRM_MODE_CONNECTOR_LVDS;
+ default:
+ MISSING_CASE(dvo->type);
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static bool intel_dvo_init_dev(struct drm_i915_private *dev_priv,
+ struct intel_dvo *intel_dvo,
+ const struct intel_dvo_device *dvo)
{
- if (i915_mmio_reg_equal(dvo_reg, DVOA))
- return PORT_A;
- else if (i915_mmio_reg_equal(dvo_reg, DVOB))
- return PORT_B;
+ struct i2c_adapter *i2c;
+ u32 dpll[I915_MAX_PIPES];
+ enum pipe pipe;
+ int gpio;
+ bool ret;
+
+ /*
+ * Allow the I2C driver info to specify the GPIO to be used in
+ * special cases, but otherwise default to what's defined
+ * in the spec.
+ */
+ if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio))
+ gpio = dvo->gpio;
+ else if (dvo->type == INTEL_DVO_CHIP_LVDS)
+ gpio = GMBUS_PIN_SSC;
else
- return PORT_C;
+ gpio = GMBUS_PIN_DPB;
+
+ /*
+ * Set up the I2C bus necessary for the chip we're probing.
+ * It appears that everything is on GPIOE except for panels
+ * on i830 laptops, which are on GPIOB (DVOA).
+ */
+ i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+
+ intel_dvo->dev = *dvo;
+
+ /*
+ * GMBUS NAK handling seems to be unstable, hence let the
+ * transmitter detection run in bit banging mode for now.
+ */
+ intel_gmbus_force_bit(i2c, true);
+
+ /*
+ * ns2501 requires the DVO 2x clock before it will
+ * respond to i2c accesses, so make sure we have
+ * the clock enabled before we attempt to initialize
+ * the device.
+ */
+ for_each_pipe(dev_priv, pipe) {
+ dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe),
+ dpll[pipe] | DPLL_DVO_2X_MODE);
+ }
+
+ ret = dvo->dev_ops->init(&intel_dvo->dev, i2c);
+
+ /* restore the DVO 2x clock state to original */
+ for_each_pipe(dev_priv, pipe) {
+ intel_de_write(dev_priv, DPLL(pipe), dpll[pipe]);
+ }
+
+ intel_gmbus_force_bit(i2c, false);
+
+ return ret;
}
-void intel_dvo_init(struct drm_i915_private *dev_priv)
+static bool intel_dvo_probe(struct drm_i915_private *i915,
+ struct intel_dvo *intel_dvo)
{
- struct intel_encoder *intel_encoder;
- struct intel_dvo *intel_dvo;
- struct intel_connector *intel_connector;
int i;
- int encoder_type = DRM_MODE_ENCODER_NONE;
+
+ /* Now, try to find a controller */
+ for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
+ if (intel_dvo_init_dev(i915, intel_dvo,
+ &intel_dvo_devices[i]))
+ return true;
+ }
+
+ return false;
+}
+
+void intel_dvo_init(struct drm_i915_private *i915)
+{
+ struct intel_connector *connector;
+ struct intel_encoder *encoder;
+ struct intel_dvo *intel_dvo;
intel_dvo = kzalloc(sizeof(*intel_dvo), GFP_KERNEL);
if (!intel_dvo)
return;
- intel_connector = intel_connector_alloc();
- if (!intel_connector) {
+ connector = intel_connector_alloc();
+ if (!connector) {
kfree(intel_dvo);
return;
}
- intel_dvo->attached_connector = intel_connector;
+ intel_dvo->attached_connector = connector;
- intel_encoder = &intel_dvo->base;
+ encoder = &intel_dvo->base;
- intel_encoder->disable = intel_disable_dvo;
- intel_encoder->enable = intel_enable_dvo;
- intel_encoder->get_hw_state = intel_dvo_get_hw_state;
- intel_encoder->get_config = intel_dvo_get_config;
- intel_encoder->compute_config = intel_dvo_compute_config;
- intel_encoder->pre_enable = intel_dvo_pre_enable;
- intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
+ encoder->disable = intel_disable_dvo;
+ encoder->enable = intel_enable_dvo;
+ encoder->get_hw_state = intel_dvo_get_hw_state;
+ encoder->get_config = intel_dvo_get_config;
+ encoder->compute_config = intel_dvo_compute_config;
+ encoder->pre_enable = intel_dvo_pre_enable;
+ connector->get_hw_state = intel_dvo_connector_get_hw_state;
- /* Now, try to find a controller */
- for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
- struct drm_connector *connector = &intel_connector->base;
- const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
- struct i2c_adapter *i2c;
- int gpio;
- bool dvoinit;
- enum pipe pipe;
- u32 dpll[I915_MAX_PIPES];
- enum port port;
+ if (!intel_dvo_probe(i915, intel_dvo)) {
+ kfree(intel_dvo);
+ intel_connector_free(connector);
+ return;
+ }
- /*
- * Allow the I2C driver info to specify the GPIO to be used in
- * special cases, but otherwise default to what's defined
- * in the spec.
- */
- if (intel_gmbus_is_valid_pin(dev_priv, dvo->gpio))
- gpio = dvo->gpio;
- else if (dvo->type == INTEL_DVO_CHIP_LVDS)
- gpio = GMBUS_PIN_SSC;
- else
- gpio = GMBUS_PIN_DPB;
+ encoder->type = INTEL_OUTPUT_DVO;
+ encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
+ encoder->port = intel_dvo->dev.port;
+ encoder->pipe_mask = ~0;
- /*
- * Set up the I2C bus necessary for the chip we're probing.
- * It appears that everything is on GPIOE except for panels
- * on i830 laptops, which are on GPIOB (DVOA).
- */
- i2c = intel_gmbus_get_adapter(dev_priv, gpio);
+ if (intel_dvo->dev.type != INTEL_DVO_CHIP_LVDS)
+ encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) |
+ BIT(INTEL_OUTPUT_DVO);
- intel_dvo->dev = *dvo;
+ drm_encoder_init(&i915->drm, &encoder->base,
+ &intel_dvo_enc_funcs,
+ intel_dvo_encoder_type(&intel_dvo->dev),
+ "DVO %c", port_name(encoder->port));
- /*
- * GMBUS NAK handling seems to be unstable, hence let the
- * transmitter detection run in bit banging mode for now.
- */
- intel_gmbus_force_bit(i2c, true);
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] detected %s\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dvo->dev.name);
+
+ if (intel_dvo->dev.type == INTEL_DVO_CHIP_TMDS)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ drm_connector_init(&i915->drm, &connector->base,
+ &intel_dvo_connector_funcs,
+ intel_dvo_connector_type(&intel_dvo->dev));
+
+ drm_connector_helper_add(&connector->base,
+ &intel_dvo_connector_helper_funcs);
+ connector->base.display_info.subpixel_order = SubPixelHorizontalRGB;
+ intel_connector_attach_encoder(connector, encoder);
+
+ if (intel_dvo->dev.type == INTEL_DVO_CHIP_LVDS) {
/*
- * ns2501 requires the DVO 2x clock before it will
- * respond to i2c accesses, so make sure we have
- * have the clock enabled before we attempt to
- * initialize the device.
+ * For our LVDS chipsets, we should hopefully be able
+ * to dig the fixed panel mode out of the BIOS data.
+ * However, it's in a different format from the BIOS
+ * data on chipsets with integrated LVDS (stored in AIM
+ * headers, likely), so for now, just get the current
+ * mode being output through DVO.
*/
- for_each_pipe(dev_priv, pipe) {
- dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe));
- intel_de_write(dev_priv, DPLL(pipe),
- dpll[pipe] | DPLL_DVO_2X_MODE);
- }
-
- dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
-
- /* restore the DVO 2x clock state to original */
- for_each_pipe(dev_priv, pipe) {
- intel_de_write(dev_priv, DPLL(pipe), dpll[pipe]);
- }
-
- intel_gmbus_force_bit(i2c, false);
-
- if (!dvoinit)
- continue;
-
- port = intel_dvo_port(dvo->dvo_reg);
- drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
- &intel_dvo_enc_funcs, encoder_type,
- "DVO %c", port_name(port));
-
- intel_encoder->type = INTEL_OUTPUT_DVO;
- intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
- intel_encoder->port = port;
- intel_encoder->pipe_mask = ~0;
-
- if (dvo->type != INTEL_DVO_CHIP_LVDS)
- intel_encoder->cloneable = BIT(INTEL_OUTPUT_ANALOG) |
- BIT(INTEL_OUTPUT_DVO);
-
- switch (dvo->type) {
- case INTEL_DVO_CHIP_TMDS:
- intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
- drm_connector_init(&dev_priv->drm, connector,
- &intel_dvo_connector_funcs,
- DRM_MODE_CONNECTOR_DVII);
- encoder_type = DRM_MODE_ENCODER_TMDS;
- break;
- case INTEL_DVO_CHIP_LVDS_NO_FIXED:
- case INTEL_DVO_CHIP_LVDS:
- drm_connector_init(&dev_priv->drm, connector,
- &intel_dvo_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- encoder_type = DRM_MODE_ENCODER_LVDS;
- break;
- }
-
- drm_connector_helper_add(connector,
- &intel_dvo_connector_helper_funcs);
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-
- intel_connector_attach_encoder(intel_connector, intel_encoder);
- if (dvo->type == INTEL_DVO_CHIP_LVDS) {
- /*
- * For our LVDS chipsets, we should hopefully be able
- * to dig the fixed panel mode out of the BIOS data.
- * However, it's in a different format from the BIOS
- * data on chipsets with integrated LVDS (stored in AIM
- * headers, likely), so for now, just get the current
- * mode being output through DVO.
- */
- intel_panel_add_encoder_fixed_mode(intel_connector,
- intel_encoder);
-
- intel_panel_init(intel_connector);
-
- intel_dvo->panel_wants_dither = true;
- }
+ intel_panel_add_encoder_fixed_mode(connector, encoder);
- return;
+ intel_panel_init(connector, NULL);
}
-
- kfree(intel_dvo);
- kfree(intel_connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_dev.h b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
index ecff7b190856..f7e98e1c6470 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo_dev.h
+++ b/drivers/gpu/drm/i915/display/intel_dvo_dev.h
@@ -25,6 +25,8 @@
#include "i915_reg_defs.h"
+#include "intel_display_limits.h"
+
enum drm_connector_status;
struct drm_display_mode;
struct i2c_adapter;
@@ -32,9 +34,8 @@ struct i2c_adapter;
struct intel_dvo_device {
const char *name;
int type;
- /* DVOA/B/C output register */
- i915_reg_t dvo_reg;
- i915_reg_t dvo_srcdim_reg;
+ /* DVOA/B/C */
+ enum port port;
/* GPIO register used for i2c bus to control this device */
u32 gpio;
int slave_addr;
diff --git a/drivers/gpu/drm/i915/display/intel_dvo_regs.h b/drivers/gpu/drm/i915/display/intel_dvo_regs.h
new file mode 100644
index 000000000000..6f9058462850
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dvo_regs.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef __INTEL_DVO_REGS_H__
+#define __INTEL_DVO_REGS_H__
+
+#include "intel_display_reg_defs.h"
+
+#define _DVOA 0x61120
+#define _DVOB 0x61140
+#define _DVOC 0x61160
+#define DVO(port) _MMIO_PORT((port), _DVOA, _DVOB)
+#define DVO_ENABLE REG_BIT(31)
+#define DVO_PIPE_SEL_MASK REG_BIT(30)
+#define DVO_PIPE_SEL(pipe) REG_FIELD_PREP(DVO_PIPE_SEL_MASK, (pipe))
+#define DVO_PIPE_STALL_MASK REG_GENMASK(29, 28)
+#define DVO_PIPE_STALL_UNUSED REG_FIELD_PREP(DVO_PIPE_STALL_MASK, 0)
+#define DVO_PIPE_STALL REG_FIELD_PREP(DVO_PIPE_STALL_MASK, 1)
+#define DVO_PIPE_STALL_TV REG_FIELD_PREP(DVO_PIPE_STALL_MASK, 2)
+#define DVO_INTERRUPT_SELECT REG_BIT(27)
+#define DVO_DEDICATED_INT_ENABLE REG_BIT(26)
+#define DVO_PRESERVE_MASK REG_GENMASK(25, 24)
+#define DVO_USE_VGA_SYNC REG_BIT(15)
+#define DVO_DATA_ORDER_MASK REG_BIT(14)
+#define DVO_DATA_ORDER_I740 REG_FIELD_PREP(DVO_DATA_ORDER_MASK, 0)
+#define DVO_DATA_ORDER_FP REG_FIELD_PREP(DVO_DATA_ORDER_MASK, 1)
+#define DVO_VSYNC_DISABLE REG_BIT(11)
+#define DVO_HSYNC_DISABLE REG_BIT(10)
+#define DVO_VSYNC_TRISTATE REG_BIT(9)
+#define DVO_HSYNC_TRISTATE REG_BIT(8)
+#define DVO_BORDER_ENABLE REG_BIT(7)
+#define DVO_ACT_DATA_ORDER_MASK REG_BIT(6)
+#define DVO_ACT_DATA_ORDER_RGGB REG_FIELD_PREP(DVO_ACT_DATA_ORDER_MASK, 0)
+#define DVO_ACT_DATA_ORDER_GBRG REG_FIELD_PREP(DVO_ACT_DATA_ORDER_MASK, 1)
+#define DVO_ACT_DATA_ORDER_GBRG_ERRATA REG_FIELD_PREP(DVO_ACT_DATA_ORDER_MASK, 0)
+#define DVO_ACT_DATA_ORDER_RGGB_ERRATA REG_FIELD_PREP(DVO_ACT_DATA_ORDER_MASK, 1)
+#define DVO_VSYNC_ACTIVE_HIGH REG_BIT(4)
+#define DVO_HSYNC_ACTIVE_HIGH REG_BIT(3)
+#define DVO_BLANK_ACTIVE_HIGH REG_BIT(2)
+#define DVO_OUTPUT_CSTATE_PIXELS REG_BIT(1) /* SDG only */
+#define DVO_OUTPUT_SOURCE_SIZE_PIXELS REG_BIT(0) /* SDG only */
+
+#define _DVOA_SRCDIM 0x61124
+#define _DVOB_SRCDIM 0x61144
+#define _DVOC_SRCDIM 0x61164
+#define DVO_SRCDIM(port) _MMIO_PORT((port), _DVOA_SRCDIM, _DVOB_SRCDIM)
+#define DVO_SRCDIM_HORIZONTAL_MASK REG_GENMASK(22, 12)
+#define DVO_SRCDIM_HORIZONTAL(x) REG_FIELD_PREP(DVO_SRCDIM_HORIZONTAL_MASK, (x))
+#define DVO_SRCDIM_VERTICAL_MASK REG_GENMASK(10, 0)
+#define DVO_SRCDIM_VERTICAL(x) REG_FIELD_PREP(DVO_SRCDIM_VERTICAL_MASK, (x))
+
+#endif /* __INTEL_DVO_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 63137ae5ab21..93d0e46e5481 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -174,7 +174,7 @@ static const struct intel_modifier_desc intel_modifiers[] = {
.plane_caps = INTEL_PLANE_CAP_TILING_4 | INTEL_PLANE_CAP_CCS_RC,
}, {
.modifier = I915_FORMAT_MOD_4_TILED,
- .display_ver = { 13, 13 },
+ .display_ver = { 13, -1 },
.plane_caps = INTEL_PLANE_CAP_TILING_4,
}, {
.modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 6900acbb1381..1aca7552a85d 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -91,7 +91,7 @@ intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
goto err;
}
- vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+ vma->display_alignment = max(vma->display_alignment, alignment);
i915_gem_object_flush_if_display(obj);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index b5ee5ea0d010..b507ff944864 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -323,25 +323,23 @@ static void i8xx_fbc_nuke(struct intel_fbc *fbc)
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
struct drm_i915_private *dev_priv = fbc->i915;
- spin_lock_irq(&dev_priv->uncore.lock);
intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
- spin_unlock_irq(&dev_priv->uncore.lock);
}
static void i8xx_fbc_program_cfb(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
- GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
+ GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
fbc->compressed_fb.start, U32_MAX));
- GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start,
+ GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.stolen.start,
fbc->compressed_llb.start, U32_MAX));
intel_de_write(i915, FBC_CFB_BASE,
- i915->dsm.start + fbc->compressed_fb.start);
+ i915->dsm.stolen.start + fbc->compressed_fb.start);
intel_de_write(i915, FBC_LL_BASE,
- i915->dsm.start + fbc->compressed_llb.start);
+ i915->dsm.stolen.start + fbc->compressed_llb.start);
}
static const struct intel_fbc_funcs i8xx_fbc_funcs = {
@@ -359,10 +357,8 @@ static void i965_fbc_nuke(struct intel_fbc *fbc)
enum i9xx_plane_id i9xx_plane = fbc_state->plane->i9xx_plane;
struct drm_i915_private *dev_priv = fbc->i915;
- spin_lock_irq(&dev_priv->uncore.lock);
intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
- spin_unlock_irq(&dev_priv->uncore.lock);
}
static const struct intel_fbc_funcs i965_fbc_funcs = {
@@ -716,7 +712,7 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *i915)
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(i915) ||
(DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)))
- end = resource_size(&i915->dsm) - 8 * 1024 * 1024;
+ end = resource_size(&i915->dsm.stolen) - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -815,7 +811,7 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
{
- /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp */
+ /* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,adlp,mtl */
if (DISPLAY_VER(fbc->i915) >= 11 && !IS_DG2(fbc->i915))
intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
@@ -1095,7 +1091,9 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
}
/* Wa_14016291713 */
- if (IS_DISPLAY_VER(i915, 12, 13) && crtc_state->has_psr) {
+ if ((IS_DISPLAY_VER(i915, 12, 13) ||
+ IS_MTL_DISPLAY_STEP(i915, STEP_A0, STEP_C0)) &&
+ crtc_state->has_psr) {
plane_state->no_fbc_reason = "PSR1 enabled (Wa_14016291713)";
return 0;
}
@@ -1809,10 +1807,10 @@ static int intel_fbc_debugfs_false_color_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
- intel_fbc_debugfs_false_color_get,
- intel_fbc_debugfs_false_color_set,
- "%llu\n");
+DEFINE_DEBUGFS_ATTRIBUTE(intel_fbc_debugfs_false_color_fops,
+ intel_fbc_debugfs_false_color_get,
+ intel_fbc_debugfs_false_color_set,
+ "%llu\n");
static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
struct dentry *parent)
@@ -1821,8 +1819,8 @@ static void intel_fbc_debugfs_add(struct intel_fbc *fbc,
fbc, &intel_fbc_debugfs_status_fops);
if (fbc->funcs->set_false_color)
- debugfs_create_file("i915_fbc_false_color", 0644, parent,
- fbc, &intel_fbc_debugfs_false_color_fops);
+ debugfs_create_file_unsafe("i915_fbc_false_color", 0644, parent,
+ fbc, &intel_fbc_debugfs_false_color_fops);
}
void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 5575d7abdc09..f76b06293eb9 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -170,7 +170,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
* important and we should probably use that space with FBC or other
* features.
*/
- if (size * 2 < dev_priv->stolen_usable_size)
+ if (size * 2 < dev_priv->dsm.usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(dev_priv, size);
@@ -267,26 +267,19 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fbops = &intelfb_ops;
- /* setup aperture base/size for vesafb takeover */
obj = intel_fb_obj(&intel_fb->base);
if (i915_gem_object_is_lmem(obj)) {
struct intel_memory_region *mem = obj->mm.region;
- info->apertures->ranges[0].base = mem->io_start;
- info->apertures->ranges[0].size = mem->io_size;
-
/* Use fbdev's framebuffer from lmem for discrete */
info->fix.smem_start =
(unsigned long)(mem->io_start +
i915_gem_object_get_dma_address(obj, 0));
info->fix.smem_len = obj->base.size;
} else {
- info->apertures->ranges[0].base = ggtt->gmadr.start;
- info->apertures->ranges[0].size = ggtt->mappable_end;
-
/* Our framebuffer is the entirety of fbdev's system memory */
info->fix.smem_start =
- (unsigned long)(ggtt->gmadr.start + vma->node.start);
+ (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
info->fix.smem_len = vma->size;
}
@@ -328,8 +321,20 @@ out_unlock:
return ret;
}
+static int intelfb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
+{
+ if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
+ return 0;
+
+ if (helper->fb->funcs->dirty)
+ return helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
+
+ return 0;
+}
+
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.fb_probe = intelfb_create,
+ .fb_dirty = intelfb_dirty,
};
static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
@@ -347,6 +352,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
if (ifbdev->fb)
drm_framebuffer_remove(&ifbdev->fb->base);
+ drm_fb_helper_unprepare(&ifbdev->helper);
kfree(ifbdev);
}
@@ -527,10 +533,12 @@ int intel_fbdev_init(struct drm_device *dev)
return -ENOMEM;
mutex_init(&ifbdev->hpd_lock);
- drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
+ drm_fb_helper_prepare(dev, &ifbdev->helper, 32, &intel_fb_helper_funcs);
- if (!intel_fbdev_init_bios(dev, ifbdev))
- ifbdev->preferred_bpp = 32;
+ if (intel_fbdev_init_bios(dev, ifbdev))
+ ifbdev->helper.preferred_bpp = ifbdev->preferred_bpp;
+ else
+ ifbdev->preferred_bpp = ifbdev->helper.preferred_bpp;
ret = drm_fb_helper_init(dev, &ifbdev->helper);
if (ret) {
@@ -549,8 +557,7 @@ static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
struct intel_fbdev *ifbdev = data;
/* Due to peculiar init order wrt to hpd handling this is separate. */
- if (drm_fb_helper_initial_config(&ifbdev->helper,
- ifbdev->preferred_bpp))
+ if (drm_fb_helper_initial_config(&ifbdev->helper))
intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
}
@@ -626,7 +633,13 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
struct intel_fbdev *ifbdev = dev_priv->display.fbdev.fbdev;
struct fb_info *info;
- if (!ifbdev || !ifbdev->vma)
+ if (!ifbdev)
+ return;
+
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DISPLAY(dev_priv)))
+ return;
+
+ if (!ifbdev->vma)
goto set_suspend;
info = ifbdev->helper.info;
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index a5840a28a69d..0bc4f6b48e80 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -255,14 +255,12 @@ static void bxt_gmbus_clock_gating(struct drm_i915_private *i915,
static u32 get_reserved(struct intel_gmbus *bus)
{
struct drm_i915_private *i915 = bus->i915;
- struct intel_uncore *uncore = &i915->uncore;
u32 reserved = 0;
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(i915) && !IS_I845G(i915))
- reserved = intel_uncore_read_notrace(uncore, bus->gpio_reg) &
- (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ reserved = intel_de_read_notrace(i915, bus->gpio_reg) &
+ (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
}
@@ -270,37 +268,31 @@ static u32 get_reserved(struct intel_gmbus *bus)
static int get_clock(void *data)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->i915->uncore;
+ struct drm_i915_private *i915 = bus->i915;
u32 reserved = get_reserved(bus);
- intel_uncore_write_notrace(uncore,
- bus->gpio_reg,
- reserved | GPIO_CLOCK_DIR_MASK);
- intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_CLOCK_DIR_MASK);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved);
- return (intel_uncore_read_notrace(uncore, bus->gpio_reg) &
- GPIO_CLOCK_VAL_IN) != 0;
+ return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->i915->uncore;
+ struct drm_i915_private *i915 = bus->i915;
u32 reserved = get_reserved(bus);
- intel_uncore_write_notrace(uncore,
- bus->gpio_reg,
- reserved | GPIO_DATA_DIR_MASK);
- intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved | GPIO_DATA_DIR_MASK);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved);
- return (intel_uncore_read_notrace(uncore, bus->gpio_reg) &
- GPIO_DATA_VAL_IN) != 0;
+ return (intel_de_read_notrace(i915, bus->gpio_reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->i915->uncore;
+ struct drm_i915_private *i915 = bus->i915;
u32 reserved = get_reserved(bus);
u32 clock_bits;
@@ -310,16 +302,14 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- intel_uncore_write_notrace(uncore,
- bus->gpio_reg,
- reserved | clock_bits);
- intel_uncore_posting_read(uncore, bus->gpio_reg);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved | clock_bits);
+ intel_de_posting_read(i915, bus->gpio_reg);
}
static void set_data(void *data, int state_high)
{
struct intel_gmbus *bus = data;
- struct intel_uncore *uncore = &bus->i915->uncore;
+ struct drm_i915_private *i915 = bus->i915;
u32 reserved = get_reserved(bus);
u32 data_bits;
@@ -329,8 +319,8 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- intel_uncore_write_notrace(uncore, bus->gpio_reg, reserved | data_bits);
- intel_uncore_posting_read(uncore, bus->gpio_reg);
+ intel_de_write_notrace(i915, bus->gpio_reg, reserved | data_bits);
+ intel_de_posting_read(i915, bus->gpio_reg);
}
static int
@@ -439,9 +429,7 @@ gmbus_wait_idle(struct drm_i915_private *i915)
add_wait_queue(&i915->display.gmbus.wait_queue, &wait);
intel_de_write_fw(i915, GMBUS4(i915), irq_enable);
- ret = intel_wait_for_register_fw(&i915->uncore,
- GMBUS2(i915), GMBUS_ACTIVE, 0,
- 10);
+ ret = intel_de_wait_for_register_fw(i915, GMBUS2(i915), GMBUS_ACTIVE, 0, 10);
intel_de_write_fw(i915, GMBUS4(i915), 0);
remove_wait_queue(&i915->display.gmbus.wait_queue, &wait);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index bac85d88054f..c0ce6d3dc505 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -44,6 +44,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
+#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_de.h"
@@ -537,8 +538,7 @@ void hsw_write_infoframe(struct intel_encoder *encoder,
0);
/* Wa_14013475917 */
- if (DISPLAY_VER(dev_priv) == 13 && crtc_state->has_psr &&
- type == DP_SDP_VSC)
+ if (IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr && type == DP_SDP_VSC)
return;
val |= hsw_infoframe_enable(type);
@@ -767,6 +767,7 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
int ret;
@@ -776,7 +777,11 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
crtc_state->infoframes.enable |=
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
- ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
+ if (IS_DGFX(i915))
+ ret = hdmi_spd_infoframe_init(frame, "Intel", "Discrete gfx");
+ else
+ ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
+
if (drm_WARN_ON(encoder->base.dev, ret))
return false;
@@ -1988,9 +1993,6 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
bool ycbcr_420_only;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
clock *= 2;
@@ -2252,6 +2254,10 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
+ if (!connector->interlace_allowed &&
+ adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return -EINVAL;
+
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_hdmi_sink =
intel_has_hdmi_sink(intel_hdmi, conn_state) &&
@@ -2264,7 +2270,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->pixel_multiplier = 2;
pipe_config->has_audio =
- intel_hdmi_has_audio(encoder, pipe_config, conn_state);
+ intel_hdmi_has_audio(encoder, pipe_config, conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
/*
* Try to respect downstream TMDS clock limits first, if
@@ -2353,7 +2360,7 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
- kfree(to_intel_connector(connector)->detect_edid);
+ drm_edid_free(to_intel_connector(connector)->detect_edid);
to_intel_connector(connector)->detect_edid = NULL;
}
@@ -2414,7 +2421,8 @@ intel_hdmi_set_edid(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
intel_wakeref_t wakeref;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
+ const struct edid *edid;
bool connected = false;
struct i2c_adapter *i2c;
@@ -2422,17 +2430,23 @@ intel_hdmi_set_edid(struct drm_connector *connector)
i2c = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
- edid = drm_get_edid(connector, i2c);
+ drm_edid = drm_edid_read_ddc(connector, i2c);
- if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
+ if (!drm_edid && !intel_gmbus_is_forced_bit(i2c)) {
drm_dbg_kms(&dev_priv->drm,
"HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(i2c, true);
- edid = drm_get_edid(connector, i2c);
+ drm_edid = drm_edid_read_ddc(connector, i2c);
intel_gmbus_force_bit(i2c, false);
}
- to_intel_connector(connector)->detect_edid = edid;
+ /* Below we depend on display info having been updated */
+ drm_edid_connector_update(connector, drm_edid);
+
+ to_intel_connector(connector)->detect_edid = drm_edid;
+
+ /* FIXME: Get rid of drm_edid_raw() */
+ edid = drm_edid_raw(drm_edid);
if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
@@ -2508,13 +2522,8 @@ intel_hdmi_force(struct drm_connector *connector)
static int intel_hdmi_get_modes(struct drm_connector *connector)
{
- struct edid *edid;
-
- edid = to_intel_connector(connector)->detect_edid;
- if (edid == NULL)
- return 0;
-
- return intel_connector_update_modes(connector, edid);
+ /* drm_edid_connector_update() done in ->detect() or ->force() */
+ return drm_edid_connector_add_modes(connector);
}
static struct i2c_adapter *
@@ -2953,7 +2962,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
ddc);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- connector->interlace_allowed = true;
+ if (DISPLAY_VER(dev_priv) < 12)
+ connector->interlace_allowed = true;
+
connector->stereo_allowed = true;
if (DISPLAY_VER(dev_priv) >= 10)
diff --git a/drivers/gpu/drm/i915/display/intel_hti.c b/drivers/gpu/drm/i915/display/intel_hti.c
index 12a1f4ce1a77..c518efebdf77 100644
--- a/drivers/gpu/drm/i915/display/intel_hti.c
+++ b/drivers/gpu/drm/i915/display/intel_hti.c
@@ -21,6 +21,9 @@ void intel_hti_init(struct drm_i915_private *i915)
bool intel_hti_uses_phy(struct drm_i915_private *i915, enum phy phy)
{
+ if (drm_WARN_ON(&i915->drm, phy == PHY_NONE))
+ return false;
+
return i915->display.hti.state & HDPORT_ENABLED &&
i915->display.hti.state & HDPORT_DDI_USED(phy);
}
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 7bf1bdfd03ec..a1557d84ce0a 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -477,10 +477,14 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
+ const struct drm_edid *fixed_edid = intel_connector->panel.fixed_edid;
- /* use cached edid if we have one */
- if (!IS_ERR_OR_NULL(intel_connector->edid))
- return drm_add_edid_modes(connector, intel_connector->edid);
+ /* Use panel fixed edid if we have one */
+ if (!IS_ERR_OR_NULL(fixed_edid)) {
+ drm_edid_connector_update(connector, fixed_edid);
+
+ return drm_edid_connector_add_modes(connector);
+ }
return intel_panel_get_modes(intel_connector);
}
@@ -834,7 +838,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct edid *edid;
+ const struct drm_edid *drm_edid;
i915_reg_t lvds_reg;
u32 lvds;
u8 pin;
@@ -945,27 +949,34 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
* preferred mode is the right one.
*/
mutex_lock(&dev_priv->drm.mode_config.mutex);
- if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
+ if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) {
+ const struct edid *edid;
+
+ /* FIXME: Make drm_get_edid_switcheroo() return drm_edid */
edid = drm_get_edid_switcheroo(connector,
- intel_gmbus_get_adapter(dev_priv, pin));
- else
- edid = drm_get_edid(connector,
- intel_gmbus_get_adapter(dev_priv, pin));
- if (edid) {
- if (drm_add_edid_modes(connector, edid)) {
- drm_connector_update_edid_property(connector,
- edid);
- } else {
+ intel_gmbus_get_adapter(dev_priv, pin));
+ if (edid) {
+ drm_edid = drm_edid_alloc(edid, (edid->extensions + 1) * EDID_LENGTH);
kfree(edid);
- edid = ERR_PTR(-EINVAL);
+ } else {
+ drm_edid = NULL;
}
} else {
- edid = ERR_PTR(-ENOENT);
+ drm_edid = drm_edid_read_ddc(connector,
+ intel_gmbus_get_adapter(dev_priv, pin));
}
- intel_connector->edid = edid;
-
- intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL,
- IS_ERR(edid) ? NULL : edid);
+ if (drm_edid) {
+ if (drm_edid_connector_update(connector, drm_edid) ||
+ !drm_edid_connector_add_modes(connector)) {
+ drm_edid_connector_update(connector, NULL);
+ drm_edid_free(drm_edid);
+ drm_edid = ERR_PTR(-EINVAL);
+ }
+ } else {
+ drm_edid = ERR_PTR(-ENOENT);
+ }
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL,
+ IS_ERR(drm_edid) ? NULL : drm_edid);
/* Try EDID first */
intel_panel_add_edid_fixed_modes(intel_connector, true);
@@ -988,7 +999,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
if (!intel_panel_preferred_fixed_mode(intel_connector))
goto failed;
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, drm_edid);
intel_backlight_setup(intel_connector, INVALID_PIPE);
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index 96395bfbd41d..52cdbd4fc2fa 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -698,8 +698,10 @@ void intel_modeset_setup_hw_state(struct drm_i915_private *i915,
drm_crtc_vblank_reset(&crtc->base);
- if (crtc_state->hw.active)
+ if (crtc_state->hw.active) {
+ intel_dmc_enable_pipe(i915, crtc->pipe);
intel_crtc_vblank_on(crtc_state);
+ }
}
intel_fbc_sanitize(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index e0184745632c..b8dce0576512 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -1101,41 +1101,34 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
* The EDID in the OpRegion, or NULL if there is none or it's invalid.
*
*/
-struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
+const struct drm_edid *intel_opregion_get_edid(struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_opregion *opregion = &i915->display.opregion;
- const void *in_edid;
- const struct edid *edid;
- struct edid *new_edid;
+ const struct drm_edid *drm_edid;
+ const void *edid;
int len;
if (!opregion->asle_ext)
return NULL;
- in_edid = opregion->asle_ext->bddc;
+ edid = opregion->asle_ext->bddc;
/* Validity corresponds to number of 128-byte blocks */
len = (opregion->asle_ext->phed & ASLE_PHED_EDID_VALID_MASK) * 128;
- if (!len || !memchr_inv(in_edid, 0, len))
+ if (!len || !memchr_inv(edid, 0, len))
return NULL;
- edid = in_edid;
+ drm_edid = drm_edid_alloc(edid, len);
- if (len < EDID_LENGTH * (1 + edid->extensions)) {
- drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5): too short\n");
- return NULL;
- }
- new_edid = drm_edid_duplicate(edid);
- if (!new_edid)
- return NULL;
- if (!drm_edid_is_valid(new_edid)) {
- kfree(new_edid);
+ if (!drm_edid_valid(drm_edid)) {
drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n");
- return NULL;
+ drm_edid_free(drm_edid);
+ drm_edid = NULL;
}
- return new_edid;
+
+ return drm_edid;
}
bool intel_opregion_headless_sku(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h
index 2f261f985400..d02e6696a050 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.h
+++ b/drivers/gpu/drm/i915/display/intel_opregion.h
@@ -74,7 +74,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
pci_power_t state);
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
-struct edid *intel_opregion_get_edid(struct intel_connector *connector);
+const struct drm_edid *intel_opregion_get_edid(struct intel_connector *connector);
bool intel_opregion_headless_sku(struct drm_i915_private *i915);
@@ -123,7 +123,7 @@ static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
return -ENODEV;
}
-static inline struct edid *
+static inline const struct drm_edid *
intel_opregion_get_edid(struct intel_connector *connector)
{
return NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 1640726bfbf6..42aa04bac261 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -31,6 +31,8 @@
#include <linux/kernel.h>
#include <linux/pwm.h>
+#include <drm/drm_edid.h>
+
#include "i915_reg.h"
#include "intel_backlight.h"
#include "intel_connector.h"
@@ -661,10 +663,22 @@ intel_panel_mode_valid(struct intel_connector *connector,
return MODE_OK;
}
-int intel_panel_init(struct intel_connector *connector)
+void intel_panel_init_alloc(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ connector->panel.vbt.panel_type = -1;
+ connector->panel.vbt.backlight.controller = -1;
+ INIT_LIST_HEAD(&panel->fixed_modes);
+}
+
+int intel_panel_init(struct intel_connector *connector,
+ const struct drm_edid *fixed_edid)
{
struct intel_panel *panel = &connector->panel;
+ panel->fixed_edid = fixed_edid;
+
intel_backlight_init_funcs(panel);
if (!has_drrs_modes(connector))
@@ -683,6 +697,9 @@ void intel_panel_fini(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
struct drm_display_mode *fixed_mode, *next;
+ if (!IS_ERR_OR_NULL(panel->fixed_edid))
+ drm_edid_free(panel->fixed_edid);
+
intel_backlight_destroy(panel);
intel_bios_fini_panel(panel);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 5c5b5b7f95b6..15a8c897b33f 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -13,12 +13,15 @@ enum drrs_type;
struct drm_connector;
struct drm_connector_state;
struct drm_display_mode;
+struct drm_edid;
struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
struct intel_encoder;
-int intel_panel_init(struct intel_connector *connector);
+void intel_panel_init_alloc(struct intel_connector *connector);
+int intel_panel_init(struct intel_connector *connector,
+ const struct drm_edid *fixed_edid);
void intel_panel_fini(struct intel_connector *connector);
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force);
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 08a94365b7d1..3657b2940702 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -467,24 +467,24 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
* clock hierarchy. That would also allow us to do
* clock bending finally.
*/
- dev_priv->pch_ssc_use = 0;
+ dev_priv->display.dpll.pch_ssc_use = 0;
if (spll_uses_pch_ssc(dev_priv)) {
drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
+ dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
+ dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
+ dev_priv->display.dpll.pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
- if (dev_priv->pch_ssc_use)
+ if (dev_priv->display.dpll.pch_ssc_use)
return;
if (has_fdi) {
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index e9774670e3f6..8d3ea8d7b737 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -72,14 +72,13 @@ static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- enum intel_pipe_crc_source *source)
+static void i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source)
{
struct intel_encoder *encoder;
struct intel_crtc *crtc;
struct intel_digital_port *dig_port;
- int ret = 0;
*source = INTEL_PIPE_CRC_SOURCE_PIPE;
@@ -121,8 +120,6 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
}
}
drm_modeset_unlock_all(&dev_priv->drm);
-
- return ret;
}
static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
@@ -132,11 +129,8 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
{
bool need_stable_symbols = false;
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
- if (ret)
- return ret;
- }
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
@@ -200,11 +194,8 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source *source,
u32 *val)
{
- if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
- int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
- if (ret)
- return ret;
- }
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
index 76be796df255..bb6ea7de5c61 100644
--- a/drivers/gpu/drm/i915/display/intel_plane_initial.c
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -107,7 +107,7 @@ initial_plane_vma(struct drm_i915_private *i915,
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
- size * 2 > i915->stolen_usable_size)
+ size * 2 > i915->dsm.usable_size)
return NULL;
obj = i915_gem_object_create_region_at(mem, phys_base, size, 0);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 9bbf41a076f7..7b21438edd9b 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -22,6 +22,40 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
static void pps_init_delays(struct intel_dp *intel_dp);
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
+static const char *pps_name(struct drm_i915_private *i915,
+ struct intel_pps *pps)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ switch (pps->pps_pipe) {
+ case INVALID_PIPE:
+ /*
+ * FIXME would be nice if we can guarantee
+ * to always have a valid PPS when calling this.
+ */
+ return "PPS <none>";
+ case PIPE_A:
+ return "PPS A";
+ case PIPE_B:
+ return "PPS B";
+ default:
+ MISSING_CASE(pps->pps_pipe);
+ break;
+ }
+ } else {
+ switch (pps->pps_idx) {
+ case 0:
+ return "PPS 0";
+ case 1:
+ return "PPS 1";
+ default:
+ MISSING_CASE(pps->pps_idx);
+ break;
+ }
+ }
+
+ return "PPS <invalid>";
+}
+
intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -60,15 +94,15 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
if (drm_WARN(&dev_priv->drm,
intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name))
+ "skipping %s kick due to [ENCODER:%d:%s] being active\n",
+ pps_name(dev_priv, &intel_dp->pps),
+ dig_port->base.base.base.id, dig_port->base.base.name))
return;
drm_dbg_kms(&dev_priv->drm,
- "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
+ "kicking %s for [ENCODER:%d:%s]\n",
+ pps_name(dev_priv, &intel_dp->pps),
+ dig_port->base.base.base.id, dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
@@ -95,7 +129,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
drm_err(&dev_priv->drm,
- "Failed to force on pll for pipe %c!\n",
+ "Failed to force on PLL for pipe %c!\n",
pipe_name(pipe));
return;
}
@@ -190,10 +224,9 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
intel_dp->pps.pps_pipe = pipe;
drm_dbg_kms(&dev_priv->drm,
- "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps.pps_pipe),
- dig_port->base.base.base.id,
- dig_port->base.base.name);
+ "picked %s for [ENCODER:%d:%s]\n",
+ pps_name(dev_priv, &intel_dp->pps),
+ dig_port->base.base.base.id, dig_port->base.base.name);
/* init power sequencer on this pipe and port */
pps_init_delays(intel_dp);
@@ -212,8 +245,7 @@ static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_connector *connector = intel_dp->attached_connector;
- int backlight_controller = connector->panel.vbt.backlight.controller;
+ int pps_idx = intel_dp->pps.pps_idx;
lockdep_assert_held(&dev_priv->display.pps.mutex);
@@ -221,7 +253,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
if (!intel_dp->pps.pps_reset)
- return backlight_controller;
+ return pps_idx;
intel_dp->pps.pps_reset = false;
@@ -231,34 +263,29 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
*/
pps_init_registers(intel_dp, false);
- return backlight_controller;
+ return pps_idx;
}
-typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
- enum pipe pipe);
+typedef bool (*pps_check)(struct drm_i915_private *dev_priv, int pps_idx);
-static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static bool pps_has_pp_on(struct drm_i915_private *dev_priv, int pps_idx)
{
- return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
+ return intel_de_read(dev_priv, PP_STATUS(pps_idx)) & PP_ON;
}
-static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static bool pps_has_vdd_on(struct drm_i915_private *dev_priv, int pps_idx)
{
- return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+ return intel_de_read(dev_priv, PP_CONTROL(pps_idx)) & EDP_FORCE_VDD;
}
-static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+static bool pps_any(struct drm_i915_private *dev_priv, int pps_idx)
{
return true;
}
static enum pipe
vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
- enum port port,
- vlv_pipe_check pipe_check)
+ enum port port, pps_check check)
{
enum pipe pipe;
@@ -269,7 +296,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
if (port_sel != PANEL_PORT_SELECT_VLV(port))
continue;
- if (!pipe_check(dev_priv, pipe))
+ if (!check(dev_priv, pipe))
continue;
return pipe;
@@ -290,30 +317,117 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
/* try to find a pipe with this port selected */
/* first pick one where the panel is on */
intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_pp_on);
+ pps_has_pp_on);
/* didn't find one? pick one where vdd is on */
if (intel_dp->pps.pps_pipe == INVALID_PIPE)
intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_has_vdd_on);
+ pps_has_vdd_on);
/* didn't find one? pick one with just the correct port */
if (intel_dp->pps.pps_pipe == INVALID_PIPE)
intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
- vlv_pipe_any);
+ pps_any);
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
drm_dbg_kms(&dev_priv->drm,
- "no initial power sequencer for [ENCODER:%d:%s]\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
+ "[ENCODER:%d:%s] no initial power sequencer\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
return;
}
drm_dbg_kms(&dev_priv->drm,
- "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name,
- pipe_name(intel_dp->pps.pps_pipe));
+ "[ENCODER:%d:%s] initial power sequencer: %s\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
+}
+
+static int intel_num_pps(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return 2;
+
+ if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
+ return 2;
+
+ if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
+ return 1;
+
+ if (INTEL_PCH_TYPE(i915) >= PCH_ICP)
+ return 2;
+
+ return 1;
+}
+
+static bool intel_pps_is_valid(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (intel_dp->pps.pps_idx == 1 &&
+ INTEL_PCH_TYPE(i915) >= PCH_ICP &&
+ INTEL_PCH_TYPE(i915) < PCH_MTP)
+ return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
+
+ return true;
+}
+
+static int
+bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check)
+{
+ int pps_idx, pps_num = intel_num_pps(i915);
+
+ for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
+ if (check(i915, pps_idx))
+ return pps_idx;
+ }
+
+ return -1;
+}
+
+static bool
+pps_initial_setup(struct intel_dp *intel_dp)
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ lockdep_assert_held(&i915->display.pps.mutex);
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ vlv_initial_power_sequencer_setup(intel_dp);
+ return true;
+ }
+
+ /* first ask the VBT */
+ if (intel_num_pps(i915) > 1)
+ intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
+ else
+ intel_dp->pps.pps_idx = 0;
+
+ if (drm_WARN_ON(&i915->drm, intel_dp->pps.pps_idx >= intel_num_pps(i915)))
+ intel_dp->pps.pps_idx = -1;
+
+ /* VBT wasn't parsed yet? pick one where the panel is on */
+ if (intel_dp->pps.pps_idx < 0)
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_pp_on);
+ /* didn't find one? pick one where vdd is on */
+ if (intel_dp->pps.pps_idx < 0)
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_vdd_on);
+ /* didn't find one? pick any */
+ if (intel_dp->pps.pps_idx < 0) {
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_any);
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] no initial power sequencer, assuming %s\n",
+ encoder->base.base.id, encoder->base.name,
+ pps_name(i915, &intel_dp->pps));
+ } else {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] initial power sequencer: %s\n",
+ encoder->base.base.id, encoder->base.name,
+ pps_name(i915, &intel_dp->pps));
+ }
+
+ return intel_pps_is_valid(intel_dp);
}
void intel_pps_reset_all(struct drm_i915_private *dev_priv)
@@ -364,14 +478,16 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int pps_idx = 0;
+ int pps_idx;
memset(regs, 0, sizeof(*regs));
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- pps_idx = bxt_power_sequencer_idx(intel_dp);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
pps_idx = vlv_power_sequencer_pipe(intel_dp);
+ else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
+ pps_idx = bxt_power_sequencer_idx(intel_dp);
+ else
+ pps_idx = intel_dp->pps.pps_idx;
regs->pp_ctrl = PP_CONTROL(pps_idx);
regs->pp_stat = PP_STATUS(pps_idx);
@@ -435,21 +551,27 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
drm_WARN(&dev_priv->drm, 1,
- "eDP powered off while attempting aux channel communication.\n");
- drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+ "[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps),
intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
}
}
#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
-#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
+#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
@@ -460,10 +582,10 @@ void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
static void intel_pps_verify_state(struct intel_dp *intel_dp);
static void wait_panel_status(struct intel_dp *intel_dp,
- u32 mask,
- u32 value)
+ u32 mask, u32 value)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
lockdep_assert_held(&dev_priv->display.pps.mutex);
@@ -474,7 +596,9 @@ static void wait_panel_status(struct intel_dp *intel_dp,
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
drm_dbg_kms(&dev_priv->drm,
- "mask %08x value %08x status %08x control %08x\n",
+ "[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps),
mask, value,
intel_de_read(dev_priv, pp_stat_reg),
intel_de_read(dev_priv, pp_ctrl_reg));
@@ -482,7 +606,9 @@ static void wait_panel_status(struct intel_dp *intel_dp,
if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
mask, value, 5000))
drm_err(&dev_priv->drm,
- "Panel status timeout: status %08x control %08x\n",
+ "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps),
intel_de_read(dev_priv, pp_stat_reg),
intel_de_read(dev_priv, pp_ctrl_reg));
@@ -492,26 +618,35 @@ static void wait_panel_status(struct intel_dp *intel_dp,
static void wait_panel_on(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power on\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(i915, &intel_dp->pps));
wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
static void wait_panel_off(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power off time\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(i915, &intel_dp->pps));
wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ktime_t panel_power_on_time;
s64 panel_power_off_duration;
- drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power cycle\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(i915, &intel_dp->pps));
/* take the difference of current time and panel power off time
* and then make panel wait for t11_t12 if needed. */
@@ -598,9 +733,12 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
+ pp_stat_reg = _pp_stat_reg(intel_dp);
+ pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
+
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -608,12 +746,11 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
- pp_stat_reg = _pp_stat_reg(intel_dp);
- pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
-
intel_de_write(dev_priv, pp_ctrl_reg, pp);
intel_de_posting_read(dev_priv, pp_ctrl_reg);
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps),
intel_de_read(dev_priv, pp_stat_reg),
intel_de_read(dev_priv, pp_ctrl_reg));
/*
@@ -621,9 +758,9 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
*/
if (!edp_have_panel_power(intel_dp)) {
drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] panel power wasn't enabled\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
+ "[ENCODER:%d:%s] %s panel power wasn't enabled\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
msleep(intel_dp->pps.panel_power_up_delay);
}
@@ -638,6 +775,7 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
*/
void intel_pps_vdd_on(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
intel_wakeref_t wakeref;
bool vdd;
@@ -647,9 +785,10 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp)
vdd = false;
with_intel_pps_lock(intel_dp, wakeref)
vdd = intel_pps_vdd_on_unlocked(intel_dp);
- I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
+ I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
+ dp_to_dig_port(intel_dp)->base.base.name,
+ pps_name(i915, &intel_dp->pps));
}
static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
@@ -667,9 +806,9 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
if (!edp_have_panel_vdd(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
- dig_port->base.base.base.id,
- dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -681,7 +820,9 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
intel_de_posting_read(dev_priv, pp_ctrl_reg);
/* Make sure sequencer is idle before allowing subsequent activity */
- drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps),
intel_de_read(dev_priv, pp_stat_reg),
intel_de_read(dev_priv, pp_ctrl_reg));
@@ -756,9 +897,10 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
if (!intel_dp_is_edp(intel_dp))
return;
- I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
+ I915_STATE_WARN(!intel_dp->pps.want_panel_vdd, "[ENCODER:%d:%s] %s VDD not forced on",
dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
+ dp_to_dig_port(intel_dp)->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
intel_dp->pps.want_panel_vdd = false;
@@ -779,14 +921,16 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
+ dp_to_dig_port(intel_dp)->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
- "[ENCODER:%d:%s] panel power already on\n",
+ "[ENCODER:%d:%s] %s panel power already on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name))
+ dp_to_dig_port(intel_dp)->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps)))
return;
wait_panel_power_cycle(intel_dp);
@@ -840,12 +984,14 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
- "Need [ENCODER:%d:%s] VDD to turn off panel\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
+ "[ENCODER:%d:%s] %s need VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -980,9 +1126,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* from a port.
*/
drm_dbg_kms(&dev_priv->drm,
- "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), dig_port->base.base.base.id,
- dig_port->base.base.name);
+ "detaching %s from [ENCODER:%d:%s]\n",
+ pps_name(dev_priv, &intel_dp->pps),
+ dig_port->base.base.base.id, dig_port->base.base.name);
intel_de_write(dev_priv, pp_on_reg, 0);
intel_de_posting_read(dev_priv, pp_on_reg);
@@ -1000,7 +1146,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
- "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ "stealing PPS %c from active [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
@@ -1008,7 +1154,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
continue;
drm_dbg_kms(&dev_priv->drm,
- "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ "stealing PPS %c from [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
@@ -1053,9 +1199,9 @@ void vlv_pps_init(struct intel_encoder *encoder,
intel_dp->pps.pps_pipe = crtc->pipe;
drm_dbg_kms(&dev_priv->drm,
- "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps.pps_pipe), encoder->base.base.id,
- encoder->base.name);
+ "initializing %s for [ENCODER:%d:%s]\n",
+ pps_name(dev_priv, &intel_dp->pps),
+ encoder->base.base.id, encoder->base.name);
/* init power sequencer on this pipe and port */
pps_init_delays(intel_dp);
@@ -1079,7 +1225,9 @@ static void pps_vdd_init(struct intel_dp *intel_dp)
* indefinitely.
*/
drm_dbg_kms(&dev_priv->drm,
- "VDD left on by BIOS, adjusting state tracking\n");
+ "[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n",
+ dig_port->base.base.base.id, dig_port->base.base.name,
+ pps_name(dev_priv, &intel_dp->pps));
drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
@@ -1432,10 +1580,10 @@ void intel_pps_encoder_reset(struct intel_dp *intel_dp)
}
}
-void intel_pps_init(struct intel_dp *intel_dp)
+bool intel_pps_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
intel_wakeref_t wakeref;
+ bool ret;
intel_dp->pps.initializing = true;
INIT_DELAYED_WORK(&intel_dp->pps.panel_vdd_work, edp_panel_vdd_work);
@@ -1443,13 +1591,36 @@ void intel_pps_init(struct intel_dp *intel_dp)
pps_init_timestamps(intel_dp);
with_intel_pps_lock(intel_dp, wakeref) {
- if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- vlv_initial_power_sequencer_setup(intel_dp);
+ ret = pps_initial_setup(intel_dp);
pps_init_delays(intel_dp);
pps_init_registers(intel_dp, false);
pps_vdd_init(intel_dp);
}
+
+ return ret;
+}
+
+static void pps_init_late(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return;
+
+ if (intel_num_pps(i915) < 2)
+ return;
+
+ drm_WARN(&i915->drm, connector->panel.vbt.backlight.controller >= 0 &&
+ intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller,
+ "[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp->pps.pps_idx, connector->panel.vbt.backlight.controller);
+
+ if (connector->panel.vbt.backlight.controller >= 0)
+ intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
}
void intel_pps_init_late(struct intel_dp *intel_dp)
@@ -1458,6 +1629,8 @@ void intel_pps_init_late(struct intel_dp *intel_dp)
with_intel_pps_lock(intel_dp, wakeref) {
/* Reinit delays after per-panel info has been parsed from VBT */
+ pps_init_late(intel_dp);
+
memset(&intel_dp->pps.pps_delays, 0, sizeof(intel_dp->pps.pps_delays));
pps_init_delays(intel_dp);
pps_init_registers(intel_dp, false);
@@ -1480,10 +1653,7 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
* This w/a is needed at least on CPT/PPT, but to be sure apply it
* everywhere where registers can be write protected.
*/
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pps_num = 2;
- else
- pps_num = 1;
+ pps_num = intel_num_pps(dev_priv);
for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index a3a56f903f26..a2c2467e3c22 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -40,7 +40,7 @@ void intel_pps_vdd_off_sync(struct intel_dp *intel_dp);
bool intel_pps_have_panel_power_or_vdd(struct intel_dp *intel_dp);
void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
-void intel_pps_init(struct intel_dp *intel_dp);
+bool intel_pps_init(struct intel_dp *intel_dp);
void intel_pps_init_late(struct intel_dp *intel_dp);
void intel_pps_encoder_reset(struct intel_dp *intel_dp);
void intel_pps_reset_all(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 5b678916e6db..7a72e15e6836 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -24,14 +24,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
-#include "display/intel_dp.h"
-
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dp_aux.h"
#include "intel_hdmi.h"
#include "intel_psr.h"
@@ -797,7 +796,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
return intel_dp->psr.su_y_granularity == 4;
/*
- * adl_p and display 14+ platforms has 1 line granularity.
+ * adl_p and mtl platforms have 1 line granularity.
* For other platforms with SW tracking we can adjust the y coordinates
* to match sink requirement if multiple of 4.
*/
@@ -1112,6 +1111,8 @@ static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
return LATENCY_REPORTING_REMOVED_PIPE_B;
case PIPE_C:
return LATENCY_REPORTING_REMOVED_PIPE_C;
+ case PIPE_D:
+ return LATENCY_REPORTING_REMOVED_PIPE_D;
default:
MISSING_CASE(intel_dp->psr.pipe);
return 0;
@@ -1163,6 +1164,23 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
+ /*
+ * Wa_16013835468
+ * Wa_14015648006
+ */
+ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+ IS_DISPLAY_VER(dev_priv, 12, 13)) {
+ u16 vtotal, vblank;
+
+ vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
+ crtc_state->uapi.adjusted_mode.crtc_vdisplay;
+ vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
+ crtc_state->uapi.adjusted_mode.crtc_vblank_start;
+ if (vblank > vtotal)
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
+ wa_16013835468_bit_get(intel_dp));
+ }
+
if (intel_dp->psr.psr2_enabled) {
if (DISPLAY_VER(dev_priv) == 9)
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
@@ -1170,11 +1188,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
PSR2_ADD_VERTICAL_LINE_COUNT);
/*
- * Wa_16014451276:adlp
+ * Wa_16014451276:adlp,mtl[a0,b0]
* All supported adlp panels have 1-based X granularity, this may
* cause issues if non-supported panels are used.
*/
- if (IS_ALDERLAKE_P(dev_priv))
+ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
+ ADLP_1_BASED_X_GRANULARITY);
+ else if (IS_ALDERLAKE_P(dev_priv))
intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
ADLP_1_BASED_X_GRANULARITY);
@@ -1185,24 +1206,14 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
TRANS_SET_CONTEXT_LATENCY_MASK,
TRANS_SET_CONTEXT_LATENCY_VALUE(1));
- /* Wa_16012604467:adlp */
- if (IS_ALDERLAKE_P(dev_priv))
+ /* Wa_16012604467:adlp,mtl[a0,b0] */
+ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv,
+ MTL_CLKGATE_DIS_TRANS(cpu_transcoder), 0,
+ MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
+ else if (IS_ALDERLAKE_P(dev_priv))
intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
-
- /* Wa_16013835468:tgl[b0+], dg1 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
- IS_DG1(dev_priv)) {
- u16 vtotal, vblank;
-
- vtotal = crtc_state->uapi.adjusted_mode.crtc_vtotal -
- crtc_state->uapi.adjusted_mode.crtc_vdisplay;
- vblank = crtc_state->uapi.adjusted_mode.crtc_vblank_end -
- crtc_state->uapi.adjusted_mode.crtc_vblank_start;
- if (vblank > vtotal)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0,
- wa_16013835468_bit_get(intel_dp));
- }
}
}
@@ -1355,6 +1366,15 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
+ /*
+ * Wa_16013835468
+ * Wa_14015648006
+ */
+ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+ IS_DISPLAY_VER(dev_priv, 12, 13))
+ intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ wa_16013835468_bit_get(intel_dp), 0);
+
if (intel_dp->psr.psr2_enabled) {
/* Wa_16011168373:adl-p */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
@@ -1362,16 +1382,14 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
TRANS_SET_CONTEXT_LATENCY_MASK, 0);
- /* Wa_16012604467:adlp */
- if (IS_ALDERLAKE_P(dev_priv))
+ /* Wa_16012604467:adlp,mtl[a0,b0] */
+ if (IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ intel_de_rmw(dev_priv,
+ MTL_CLKGATE_DIS_TRANS(intel_dp->psr.transcoder),
+ MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
+ else if (IS_ALDERLAKE_P(dev_priv))
intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
-
- /* Wa_16013835468:tgl[b0+], dg1 */
- if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER) ||
- IS_DG1(dev_priv))
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
- wa_16013835468_bit_get(intel_dp), 0);
}
intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
@@ -1510,7 +1528,8 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
man_trk_ctl_enable_bit_get(dev_priv) |
man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_single_full_frame_bit_get(dev_priv));
+ man_trk_ctl_single_full_frame_bit_get(dev_priv) |
+ man_trk_ctl_continuos_full_frame(dev_priv));
/*
* Display WA #0884: skl+
@@ -1624,11 +1643,8 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
if (full_update) {
- /*
- * Not applying Wa_14014971508:adlp as we do not support the
- * feature that requires this workaround.
- */
val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
+ val |= man_trk_ctl_continuos_full_frame(dev_priv);
goto exit;
}
@@ -1826,6 +1842,12 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (full_update)
goto skip_sel_fetch_set_loop;
+ /* Wa_14014971492 */
+ if ((IS_MTL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+ IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv)) &&
+ crtc_state->splitter.enable)
+ pipe_clip.y1 = 0;
+
ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
if (ret)
return ret;
@@ -2307,12 +2329,15 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
/* can we turn CFF off? */
if (intel_dp->psr.busy_frontbuffer_bits == 0) {
u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
- man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_single_full_frame_bit_get(dev_priv);
+ man_trk_ctl_partial_frame_bit_get(dev_priv) |
+ man_trk_ctl_single_full_frame_bit_get(dev_priv) |
+ man_trk_ctl_continuos_full_frame(dev_priv);
/*
- * turn continuous full frame off and do a single
- * full frame
+ * Set psr2_sel_fetch_cff_enabled as false to allow selective
+ * updates. Still keep cff bit enabled as we don't have proper
+ * SU configuration in case update is sent for any reason after
+ * sff bit gets cleared by the HW on next vblank.
*/
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
val);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 329b9d9af667..e12ba458636c 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -39,6 +39,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_atomic.h"
+#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -1068,7 +1069,8 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
&tx_rate, 1))
return -ENXIO;
- if (tx_rate == SDVO_HBUF_TX_DISABLED)
+ /* TX_DISABLED doesn't mean disabled for ELD */
+ if (if_index != SDVO_HBUF_INDEX_ELD && tx_rate == SDVO_HBUF_TX_DISABLED)
return 0;
if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
@@ -1185,6 +1187,28 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
}
+static void intel_sdvo_get_eld(struct intel_sdvo *intel_sdvo,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
+ ssize_t len;
+ u8 val;
+
+ if (!crtc_state->has_audio)
+ return;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT, &val, 1))
+ return;
+
+ if ((val & SDVO_AUDIO_ELD_VALID) == 0)
+ return;
+
+ len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
+ crtc_state->eld, sizeof(crtc_state->eld));
+ if (len < 0)
+ drm_dbg_kms(&i915->drm, "failed to read ELD\n");
+}
+
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state)
{
@@ -1378,7 +1402,9 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state);
- pipe_config->has_audio = intel_sdvo_has_audio(encoder, pipe_config, conn_state);
+ pipe_config->has_audio =
+ intel_sdvo_has_audio(encoder, pipe_config, conn_state) &&
+ intel_audio_compute_config(encoder, pipe_config, conn_state);
pipe_config->limited_color_range =
intel_sdvo_limited_color_range(encoder, pipe_config,
@@ -1729,9 +1755,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
&val, 1)) {
- u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT;
-
- if ((val & mask) == mask)
+ if (val & SDVO_AUDIO_PRESENCE_DETECT)
pipe_config->has_audio = true;
}
@@ -1742,6 +1766,8 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
+
+ intel_sdvo_get_eld(intel_sdvo, pipe_config);
}
static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
@@ -1753,12 +1779,7 @@ static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- struct drm_connector *connector = conn_state->connector;
- u8 *eld = connector->eld;
-
- eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
+ const u8 *eld = crtc_state->eld;
intel_sdvo_set_audio_state(intel_sdvo, 0);
@@ -2886,7 +2907,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
if (!intel_sdvo_create_enhance_property(intel_sdvo, intel_sdvo_connector))
goto err;
- intel_bios_init_panel(i915, &intel_connector->panel, NULL, NULL);
+ intel_bios_init_panel_late(i915, &intel_connector->panel, NULL, NULL);
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
@@ -2903,7 +2924,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
mutex_unlock(&i915->drm.mode_config.mutex);
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, NULL);
if (!intel_panel_preferred_fixed_mode(intel_connector))
goto err;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index c799e891f8b5..c65c771f5c46 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -40,22 +40,22 @@ void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915)
*/
if (intel_de_wait_for_clear(i915, DG2_PHY_MISC(phy),
DG2_PHY_DP_TX_ACK_MASK, 25))
- i915->snps_phy_failed_calibration |= BIT(phy);
+ i915->display.snps.phy_failed_calibration |= BIT(phy);
}
}
-void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
+void intel_snps_phy_update_psr_power_state(struct drm_i915_private *i915,
enum phy phy, bool enable)
{
u32 val;
- if (!intel_phy_is_snps(dev_priv, phy))
+ if (!intel_phy_is_snps(i915, phy))
return;
val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR,
enable ? 2 : 3);
- intel_uncore_rmw(&dev_priv->uncore, SNPS_PHY_TX_REQ(phy),
- SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val);
+ intel_de_rmw(i915, SNPS_PHY_TX_REQ(phy),
+ SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val);
}
void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
@@ -1785,7 +1785,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder,
*/
/* 5. Software sets DPLL_ENABLE [PLL Enable] to "1". */
- intel_uncore_rmw(&dev_priv->uncore, enable_reg, 0, PLL_ENABLE);
+ intel_de_rmw(dev_priv, enable_reg, 0, PLL_ENABLE);
/*
* 9. Software sets SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "1". This
@@ -1830,14 +1830,13 @@ void intel_mpllb_disable(struct intel_encoder *encoder)
*/
/* 2. Software programs DPLL_ENABLE [PLL Enable] to "0" */
- intel_uncore_rmw(&i915->uncore, enable_reg, PLL_ENABLE, 0);
+ intel_de_rmw(i915, enable_reg, PLL_ENABLE, 0);
/*
* 4. Software programs SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "0".
* This will allow the PLL to stop running.
*/
- intel_uncore_rmw(&i915->uncore, SNPS_PHY_MPLLB_DIV(phy),
- SNPS_PHY_MPLLB_FORCE_EN, 0);
+ intel_de_rmw(i915, SNPS_PHY_MPLLB_DIV(phy), SNPS_PHY_MPLLB_FORCE_EN, 0);
/*
* 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 70624b4b2d38..f45328712bff 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -5,6 +5,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_power_map.h"
#include "intel_display_types.h"
@@ -120,11 +121,9 @@ assert_tc_cold_blocked(struct intel_digital_port *dig_port)
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
u32 lane_mask;
- lane_mask = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+ lane_mask = intel_de_read(i915, PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
assert_tc_cold_blocked(dig_port);
@@ -136,11 +135,9 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
u32 pin_mask;
- pin_mask = intel_uncore_read(uncore,
- PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
+ pin_mask = intel_de_read(i915, PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
assert_tc_cold_blocked(dig_port);
@@ -186,7 +183,6 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
- struct intel_uncore *uncore = &i915->uncore;
u32 val;
drm_WARN_ON(&i915->drm,
@@ -194,8 +190,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
assert_tc_cold_blocked(dig_port);
- val = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
+ val = intel_de_read(i915, PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx);
switch (required_lanes) {
@@ -216,8 +211,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
MISSING_CASE(required_lanes);
}
- intel_uncore_write(uncore,
- PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
+ intel_de_write(i915, PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia), val);
}
static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
@@ -246,13 +240,11 @@ static void tc_port_fixup_legacy_flag(struct intel_digital_port *dig_port,
static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
u32 mask = 0;
u32 val;
- val = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
+ val = intel_de_read(i915, PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
@@ -266,7 +258,7 @@ static u32 icl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx))
mask |= BIT(TC_PORT_DP_ALT);
- if (intel_uncore_read(uncore, SDEISR) & isr_bit)
+ if (intel_de_read(i915, SDEISR) & isr_bit)
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
@@ -281,7 +273,6 @@ static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
u32 isr_bit = i915->display.hotplug.pch_hpd[dig_port->base.hpd_pin];
- struct intel_uncore *uncore = &i915->uncore;
u32 val, mask = 0;
/*
@@ -289,13 +280,13 @@ static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
* registers in IOM. Note that this doesn't apply to PHY and FIA
* registers.
*/
- val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
mask |= BIT(TC_PORT_DP_ALT);
if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT)
mask |= BIT(TC_PORT_TBT_ALT);
- if (intel_uncore_read(uncore, SDEISR) & isr_bit)
+ if (intel_de_read(i915, SDEISR) & isr_bit)
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
@@ -326,11 +317,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
u32 val;
- val = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
+ val = intel_de_read(i915, PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, assuming not complete\n",
@@ -352,10 +341,9 @@ static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
- struct intel_uncore *uncore = &i915->uncore;
u32 val;
- val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
+ val = intel_de_read(i915, TCSS_DDI_STATUS(tc_port));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, assuming not complete\n",
@@ -380,11 +368,9 @@ static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
bool take)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
u32 val;
- val = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, can't %s ownership\n",
@@ -397,8 +383,7 @@ static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
if (take)
val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx);
- intel_uncore_write(uncore,
- PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
+ intel_de_write(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
return true;
}
@@ -407,11 +392,10 @@ static bool adl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
bool take)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
enum port port = dig_port->base.port;
- intel_uncore_rmw(uncore, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
- take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
+ intel_de_rmw(i915, DDI_BUF_CTL(port), DDI_BUF_CTL_TC_PHY_OWNERSHIP,
+ take ? DDI_BUF_CTL_TC_PHY_OWNERSHIP : 0);
return true;
}
@@ -429,11 +413,9 @@ static bool tc_phy_take_ownership(struct intel_digital_port *dig_port, bool take
static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
u32 val;
- val = intel_uncore_read(uncore,
- PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
+ val = intel_de_read(i915, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, assume safe mode\n",
@@ -447,11 +429,10 @@ static bool icl_tc_phy_is_owned(struct intel_digital_port *dig_port)
static bool adl_tc_phy_is_owned(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_uncore *uncore = &i915->uncore;
enum port port = dig_port->base.port;
u32 val;
- val = intel_uncore_read(uncore, DDI_BUF_CTL(port));
+ val = intel_de_read(i915, DDI_BUF_CTL(port));
return val & DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
@@ -907,7 +888,7 @@ tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig
mutex_lock(&dig_port->tc_lock);
wakeref = tc_cold_block(dig_port, &domain);
- val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
+ val = intel_de_read(i915, PORT_TX_DFLEXDPSP(FIA1));
tc_cold_unblock(dig_port, domain, wakeref);
mutex_unlock(&dig_port->tc_lock);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 4d2101ca1692..b986bf075889 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1905,10 +1905,10 @@ static void intel_tv_add_properties(struct drm_connector *connector)
tv_format_names[i] = tv_modes[i].name;
}
- drm_mode_create_tv_properties(&i915->drm, i, tv_format_names);
+ drm_mode_create_tv_properties_legacy(&i915->drm, i, tv_format_names);
drm_object_attach_property(&connector->base,
- i915->drm.mode_config.tv_mode_property,
+ i915->drm.mode_config.legacy_tv_mode_property,
conn_state->tv.mode);
drm_object_attach_property(&connector->base,
i915->drm.mode_config.tv_left_margin_property,
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
new file mode 100644
index 000000000000..4c83e2320bca
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022-2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_vblank.h"
+
+/*
+ * This timing diagram depicts the video signal in and
+ * around the vertical blanking period.
+ *
+ * Assumptions about the fictitious mode used in this example:
+ * vblank_start >= 3
+ * vsync_start = vblank_start + 1
+ * vsync_end = vblank_start + 2
+ * vtotal = vblank_start + 3
+ *
+ * start of vblank:
+ * latch double buffered registers
+ * increment frame counter (ctg+)
+ * generate start of vblank interrupt (gen4+)
+ * |
+ * | frame start:
+ * | generate frame start interrupt (aka. vblank interrupt) (gmch)
+ * | may be shifted forward 1-3 extra lines via PIPECONF
+ * | |
+ * | | start of vsync:
+ * | | generate vsync interrupt
+ * | | |
+ * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
+ * . \hs/ . \hs/ \hs/ \hs/ . \hs/
+ * ----va---> <-----------------vb--------------------> <--------va-------------
+ * | | <----vs-----> |
+ * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
+ * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
+ * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
+ * | | |
+ * last visible pixel first visible pixel
+ * | increment frame counter (gen3/4)
+ * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
+ *
+ * x = horizontal active
+ * _ = horizontal blanking
+ * hs = horizontal sync
+ * va = vertical active
+ * vb = vertical blanking
+ * vs = vertical sync
+ * vbs = vblank_start (number)
+ *
+ * Summary:
+ * - most events happen at the start of horizontal sync
+ * - frame start happens at the start of horizontal blank, 1-4 lines
+ * (depending on PIPECONF settings) after the start of vblank
+ * - gen3/4 pixel and frame counter are synchronized with the start
+ * of horizontal active on the first line of vertical active
+ */
+
+/*
+ * Called from drm generic code, passed a 'crtc', which we use as a pipe index.
+ */
+u32 i915_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ u32 pixel, vbl_start, hsync_start, htotal;
+ u64 frame;
+
+ /*
+ * On i965gm TV output the frame counter only works up to
+ * the point when we enable the TV encoder. After that the
+ * frame counter ceases to work and reads zero. We need a
+ * vblank wait before enabling the TV encoder and so we
+ * have to enable vblank interrupts while the frame counter
+ * is still in a working state. However the core vblank code
+ * does not like us returning non-zero frame counter values
+ * when we've told it that we don't have a working frame
+ * counter. Thus we must stop non-zero values leaking out.
+ */
+ if (!vblank->max_vblank_count)
+ return 0;
+
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vbl_start = mode->crtc_vblank_start;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
+
+ /* Convert to pixel count */
+ vbl_start *= htotal;
+
+ /* Start of vblank event occurs at start of hsync */
+ vbl_start -= htotal - hsync_start;
+
+ /*
+ * High & low register fields aren't synchronized, so make sure
+ * we get a low value that's stable across two reads of the high
+ * register.
+ */
+ frame = intel_de_read64_2x32(dev_priv, PIPEFRAMEPIXEL(pipe), PIPEFRAME(pipe));
+
+ pixel = frame & PIPE_PIXEL_MASK;
+ frame = (frame >> PIPE_FRAME_LOW_SHIFT) & 0xffffff;
+
+ /*
+ * The frame counter increments at beginning of active.
+ * Cook up a vblank counter by also checking the pixel
+ * counter against vblank start.
+ */
+ return (frame + (pixel >= vbl_start)) & 0xffffff;
+}
+
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ if (!vblank->max_vblank_count)
+ return 0;
+
+ return intel_de_read(dev_priv, PIPE_FRMCOUNT_G4X(pipe));
+}
+
+static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 htotal = mode->crtc_htotal;
+ u32 clock = mode->crtc_clock;
+ u32 scan_prev_time, scan_curr_time, scan_post_time;
+
+ /*
+ * To avoid the race condition where we might cross into the
+ * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
+ * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
+ * during the same frame.
+ */
+ do {
+ /*
+ * This field provides read back of the display
+ * pipe frame time stamp. The time stamp value
+ * is sampled at every start of vertical blank.
+ */
+ scan_prev_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
+
+ /*
+ * The TIMESTAMP_CTR register has the current
+ * time stamp value.
+ */
+ scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
+
+ scan_post_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
+ } while (scan_post_time != scan_prev_time);
+
+ return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
+ clock), 1000 * htotal);
+}
+
+/*
+ * On certain encoders on certain platforms, pipe
+ * scanline register will not work to get the scanline,
+ * since the timings are driven from the PORT or issues
+ * with scanline register updates.
+ * This function will use Framestamp and current
+ * timestamp registers to calculate the scanline.
+ */
+static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
+{
+ struct drm_vblank_crtc *vblank =
+ &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ const struct drm_display_mode *mode = &vblank->hwmode;
+ u32 vblank_start = mode->crtc_vblank_start;
+ u32 vtotal = mode->crtc_vtotal;
+ u32 scanline;
+
+ scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
+ scanline = min(scanline, vtotal - 1);
+ scanline = (scanline + vblank_start) % vtotal;
+
+ return scanline;
+}
+
+/*
+ * intel_de_read_fw(), only for fast reads of display block, no need for
+ * forcewake etc.
+ */
+static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ const struct drm_display_mode *mode;
+ struct drm_vblank_crtc *vblank;
+ enum pipe pipe = crtc->pipe;
+ int position, vtotal;
+
+ if (!crtc->active)
+ return 0;
+
+ vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
+ mode = &vblank->hwmode;
+
+ if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
+ return __intel_get_crtc_scanline_from_timestamp(crtc);
+
+ vtotal = mode->crtc_vtotal;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ vtotal /= 2;
+
+ position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
+
+ /*
+ * On HSW, the DSL reg (0x70000) appears to return 0 if we
+ * read it just before the start of vblank. So try it again
+ * so we don't accidentally end up spanning a vblank frame
+ * increment, causing the pipe_update_end() code to squak at us.
+ *
+ * The nature of this problem means we can't simply check the ISR
+ * bit and return the vblank start value; nor can we use the scanline
+ * debug register in the transcoder as it appears to have the same
+ * problem. We may need to extend this to include other platforms,
+ * but so far testing only shows the problem on HSW.
+ */
+ if (HAS_DDI(dev_priv) && !position) {
+ int i, temp;
+
+ for (i = 0; i < 100; i++) {
+ udelay(1);
+ temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
+ if (temp != position) {
+ position = temp;
+ break;
+ }
+ }
+ }
+
+ /*
+ * See update_scanline_offset() for the details on the
+ * scanline_offset adjustment.
+ */
+ return (position + crtc->scanline_offset) % vtotal;
+}
+
+static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = _crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
+ enum pipe pipe = crtc->pipe;
+ int position;
+ int vbl_start, vbl_end, hsync_start, htotal, vtotal;
+ unsigned long irqflags;
+ bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
+ IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
+ crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
+
+ if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
+ drm_dbg(&dev_priv->drm,
+ "trying to get scanoutpos for disabled pipe %c\n",
+ pipe_name(pipe));
+ return false;
+ }
+
+ htotal = mode->crtc_htotal;
+ hsync_start = mode->crtc_hsync_start;
+ vtotal = mode->crtc_vtotal;
+ vbl_start = mode->crtc_vblank_start;
+ vbl_end = mode->crtc_vblank_end;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vbl_start = DIV_ROUND_UP(vbl_start, 2);
+ vbl_end /= 2;
+ vtotal /= 2;
+ }
+
+ /*
+ * Lock uncore.lock, as we will do multiple timing critical raw
+ * register reads, potentially with preemption disabled, so the
+ * following code must not block on uncore.lock.
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
+
+ /* Get optional system timestamp before query. */
+ if (stime)
+ *stime = ktime_get();
+
+ if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
+ int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
+
+ position = __intel_get_crtc_scanline(crtc);
+
+ /*
+ * Already exiting vblank? If so, shift our position
+ * so it looks like we're already apporaching the full
+ * vblank end. This should make the generated timestamp
+ * more or less match when the active portion will start.
+ */
+ if (position >= vbl_start && scanlines < position)
+ position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
+ } else if (use_scanline_counter) {
+ /* No obvious pixelcount register. Only query vertical
+ * scanout position from Display scan line register.
+ */
+ position = __intel_get_crtc_scanline(crtc);
+ } else {
+ /*
+ * Have access to pixelcount since start of frame.
+ * We can split this into vertical and horizontal
+ * scanout position.
+ */
+ position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+ /* convert to pixel counts */
+ vbl_start *= htotal;
+ vbl_end *= htotal;
+ vtotal *= htotal;
+
+ /*
+ * In interlaced modes, the pixel counter counts all pixels,
+ * so one field will have htotal more pixels. In order to avoid
+ * the reported position from jumping backwards when the pixel
+ * counter is beyond the length of the shorter field, just
+ * clamp the position the length of the shorter field. This
+ * matches how the scanline counter based position works since
+ * the scanline counter doesn't count the two half lines.
+ */
+ if (position >= vtotal)
+ position = vtotal - 1;
+
+ /*
+ * Start of vblank interrupt is triggered at start of hsync,
+ * just prior to the first active line of vblank. However we
+ * consider lines to start at the leading edge of horizontal
+ * active. So, should we get here before we've crossed into
+ * the horizontal active of the first line in vblank, we would
+ * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
+ * always add htotal-hsync_start to the current pixel position.
+ */
+ position = (position + htotal - hsync_start) % vtotal;
+ }
+
+ /* Get optional system timestamp after query. */
+ if (etime)
+ *etime = ktime_get();
+
+ /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ /*
+ * While in vblank, position will be negative
+ * counting up towards 0 at vbl_end. And outside
+ * vblank, position will be positive counting
+ * up since vbl_end.
+ */
+ if (position >= vbl_start)
+ position -= vbl_end;
+ else
+ position += vtotal - vbl_end;
+
+ if (use_scanline_counter) {
+ *vpos = position;
+ *hpos = 0;
+ } else {
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
+
+ return true;
+}
+
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq)
+{
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ crtc, max_error, vblank_time, in_vblank_irq,
+ i915_get_crtc_scanoutpos);
+}
+
+int intel_get_crtc_scanline(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ unsigned long irqflags;
+ int position;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ position = __intel_get_crtc_scanline(crtc);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ return position;
+}
+
+static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
+ enum pipe pipe)
+{
+ i915_reg_t reg = PIPEDSL(pipe);
+ u32 line1, line2;
+
+ line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+ msleep(5);
+ line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
+
+ return line1 != line2;
+}
+
+static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ /* Wait for the display line to settle/start moving */
+ if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
+ drm_err(&dev_priv->drm,
+ "pipe %c scanline %s wait timed out\n",
+ pipe_name(pipe), str_on_off(state));
+}
+
+void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
+{
+ wait_for_pipe_scanline_moving(crtc, false);
+}
+
+void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
+{
+ wait_for_pipe_scanline_moving(crtc, true);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.h b/drivers/gpu/drm/i915/display/intel_vblank.h
new file mode 100644
index 000000000000..c9fea2c2a990
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_vblank.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022-2023 Intel Corporation
+ */
+
+#ifndef __INTEL_VBLANK_H__
+#define __INTEL_VBLANK_H__
+
+#include <linux/ktime.h>
+#include <linux/types.h>
+
+struct drm_crtc;
+struct intel_crtc;
+
+u32 i915_get_vblank_counter(struct drm_crtc *crtc);
+u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq);
+int intel_get_crtc_scanline(struct intel_crtc *crtc);
+void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc);
+void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc);
+
+#endif /* __INTEL_VBLANK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 9d3b77b41b5c..207b2a648d32 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -345,16 +345,13 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!RUNTIME_INFO(i915)->has_dsc)
+ if (!HAS_DSC(i915))
return false;
- if (DISPLAY_VER(i915) >= 12)
- return true;
-
- if (DISPLAY_VER(i915) >= 11 && cpu_transcoder != TRANSCODER_A)
- return true;
+ if (DISPLAY_VER(i915) == 11 && cpu_transcoder == TRANSCODER_A)
+ return false;
- return false;
+ return true;
}
static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index a69bfcac9a94..286a0bdd28c6 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -6,9 +6,10 @@
#include <linux/pci.h>
#include <linux/vgaarb.h>
-#include <drm/i915_drm.h>
#include <video/vga.h>
+#include "soc/intel_gmch.h"
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_de.h"
@@ -98,39 +99,12 @@ void intel_vga_reset_io_mem(struct drm_i915_private *i915)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
}
-static int
-intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
-{
- unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
- u16 gmch_ctrl;
-
- if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
- drm_err(&i915->drm, "failed to read control word\n");
- return -EIO;
- }
-
- if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
- return 0;
-
- if (enable_decode)
- gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
- else
- gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
-
- if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
- drm_err(&i915->drm, "failed to write control word\n");
- return -EIO;
- }
-
- return 0;
-}
-
static unsigned int
intel_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
{
struct drm_i915_private *i915 = pdev_to_i915(pdev);
- intel_vga_set_state(i915, enable_decode);
+ intel_gmch_vga_set_state(i915, enable_decode);
if (enable_decode)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index 7b1357e82b69..5ff6aed9575e 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -78,10 +78,10 @@ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- /* The hw imposes the extra scanline before frame start */
if (DISPLAY_VER(i915) >= 13)
- return crtc_state->vrr.guardband + crtc_state->framestart_delay + 1;
+ return crtc_state->vrr.guardband;
else
+ /* The hw imposes the extra scanline before frame start */
return crtc_state->vrr.pipeline_full + crtc_state->framestart_delay + 1;
}
@@ -151,50 +151,46 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
* number of scan lines. Assuming 0 for no DSB.
*/
crtc_state->vrr.guardband =
- crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay;
+ crtc_state->vrr.vmin + 1 - adjusted_mode->crtc_vdisplay;
} else {
- /*
- * FIXME: s/4/framestart_delay/ to get consistent
- * earliest/latest points for register latching regardless
- * of the framestart_delay used?
- *
- * FIXME: this really needs the extra scanline to provide consistent
- * behaviour for all framestart_delay values. Otherwise with
- * framestart_delay==4 we will end up extending the min vblank by
- * one extra line.
- */
crtc_state->vrr.pipeline_full =
- min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay - 4 - 1);
+ min(255, crtc_state->vrr.vmin - adjusted_mode->crtc_vdisplay -
+ crtc_state->framestart_delay - 1);
}
crtc_state->mode_flags |= I915_MODE_FLAG_VRR;
}
+static u32 trans_vrr_ctl(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (DISPLAY_VER(i915) >= 13)
+ return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
+ else
+ return VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
+ VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
+ VRR_CTL_PIPELINE_FULL_OVERRIDE;
+}
+
void intel_vrr_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 trans_vrr_ctl;
if (!crtc_state->vrr.enable)
return;
- if (DISPLAY_VER(dev_priv) >= 13)
- trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
- VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
- XELPD_VRR_CTL_VRR_GUARDBAND(crtc_state->vrr.guardband);
- else
- trans_vrr_ctl = VRR_CTL_VRR_ENABLE |
- VRR_CTL_IGN_MAX_SHIFT | VRR_CTL_FLIP_LINE_EN |
- VRR_CTL_PIPELINE_FULL(crtc_state->vrr.pipeline_full) |
- VRR_CTL_PIPELINE_FULL_OVERRIDE;
-
intel_de_write(dev_priv, TRANS_VRR_VMIN(cpu_transcoder), crtc_state->vrr.vmin - 1);
intel_de_write(dev_priv, TRANS_VRR_VMAX(cpu_transcoder), crtc_state->vrr.vmax - 1);
- intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), trans_vrr_ctl(crtc_state));
intel_de_write(dev_priv, TRANS_VRR_FLIPLINE(cpu_transcoder), crtc_state->vrr.flipline - 1);
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), TRANS_PUSH_EN);
+
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
+ VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state));
}
void intel_vrr_send_push(const struct intel_crtc_state *crtc_state)
@@ -231,8 +227,13 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->vrr.enable)
return;
- intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder),
+ trans_vrr_ctl(old_crtc_state));
+ intel_de_wait_for_clear(dev_priv, TRANS_VRR_STATUS(cpu_transcoder),
+ VRR_STATUS_VRR_EN_LIVE, 1000);
+
intel_de_write(dev_priv, TRANS_PUSH(cpu_transcoder), 0);
+ intel_de_write(dev_priv, TRANS_VRR_CTL(cpu_transcoder), 0);
}
void intel_vrr_get_config(struct intel_crtc *crtc,
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index d7390067b7d4..473d53610b92 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -87,6 +87,14 @@ static u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
#define ICL_MAX_SRC_H 4096
#define ICL_MAX_DST_W 5120
#define ICL_MAX_DST_H 4096
+#define TGL_MAX_SRC_W 5120
+#define TGL_MAX_SRC_H 8192
+#define TGL_MAX_DST_W 8192
+#define TGL_MAX_DST_H 8192
+#define MTL_MAX_SRC_W 4096
+#define MTL_MAX_SRC_H 8192
+#define MTL_MAX_DST_W 8192
+#define MTL_MAX_DST_H 8192
#define SKL_MIN_YUV_420_SRC_W 16
#define SKL_MIN_YUV_420_SRC_H 16
@@ -103,6 +111,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
+ int min_src_w, min_src_h, min_dst_w, min_dst_h;
+ int max_src_w, max_src_h, max_dst_w, max_dst_h;
/*
* Src coordinates are already rotated by 270 degrees for
@@ -157,15 +167,38 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return -EINVAL;
}
+ min_src_w = SKL_MIN_SRC_W;
+ min_src_h = SKL_MIN_SRC_H;
+ min_dst_w = SKL_MIN_DST_W;
+ min_dst_h = SKL_MIN_DST_H;
+
+ if (DISPLAY_VER(dev_priv) < 11) {
+ max_src_w = SKL_MAX_SRC_W;
+ max_src_h = SKL_MAX_SRC_H;
+ max_dst_w = SKL_MAX_DST_W;
+ max_dst_h = SKL_MAX_DST_H;
+ } else if (DISPLAY_VER(dev_priv) < 12) {
+ max_src_w = ICL_MAX_SRC_W;
+ max_src_h = ICL_MAX_SRC_H;
+ max_dst_w = ICL_MAX_DST_W;
+ max_dst_h = ICL_MAX_DST_H;
+ } else if (DISPLAY_VER(dev_priv) < 14) {
+ max_src_w = TGL_MAX_SRC_W;
+ max_src_h = TGL_MAX_SRC_H;
+ max_dst_w = TGL_MAX_DST_W;
+ max_dst_h = TGL_MAX_DST_H;
+ } else {
+ max_src_w = MTL_MAX_SRC_W;
+ max_src_h = MTL_MAX_SRC_H;
+ max_dst_w = MTL_MAX_DST_W;
+ max_dst_h = MTL_MAX_DST_H;
+ }
+
/* range checks */
- if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
- dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
- (DISPLAY_VER(dev_priv) >= 11 &&
- (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
- dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
- (DISPLAY_VER(dev_priv) < 11 &&
- (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
- dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
+ if (src_w < min_src_w || src_h < min_src_h ||
+ dst_w < min_dst_w || dst_h < min_dst_h ||
+ src_w > max_src_w || src_h > max_src_h ||
+ dst_w > max_dst_w || dst_h > max_dst_h) {
drm_dbg_kms(&dev_priv->drm,
"scaler_user index %u.%u: src %ux%u dst %ux%u "
"size is out of scaler range\n",
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 7d07fa3123ec..9b172a1e90de 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -1848,7 +1848,7 @@ static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- return intel_pxp_key_check(&to_gt(i915)->pxp, obj, false) == 0;
+ return intel_pxp_key_check(i915->pxp, obj, false) == 0;
}
static bool pxp_is_borked(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index e0766d1be966..d1670cc3eff2 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -45,8 +45,7 @@ u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *i915)
enum dbuf_slice slice;
for_each_dbuf_slice(i915, slice) {
- if (intel_uncore_read(&i915->uncore,
- DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
+ if (intel_de_read(i915, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
enabled_slices |= BIT(slice);
}
@@ -75,7 +74,7 @@ intel_sagv_block_time(struct drm_i915_private *i915)
if (DISPLAY_VER(i915) >= 14) {
u32 val;
- val = intel_uncore_read(&i915->uncore, MTL_LATENCY_SAGV);
+ val = intel_de_read(i915, MTL_LATENCY_SAGV);
return REG_FIELD_GET(MTL_LATENCY_QCLK_SAGV, val);
} else if (DISPLAY_VER(i915) >= 12) {
@@ -756,18 +755,18 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *i915,
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
- val = intel_uncore_read(&i915->uncore, CUR_BUF_CFG(pipe));
+ val = intel_de_read(i915, CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(ddb, val);
return;
}
- val = intel_uncore_read(&i915->uncore, PLANE_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(i915, PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb, val);
if (DISPLAY_VER(i915) >= 11)
return;
- val = intel_uncore_read(&i915->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id));
+ val = intel_de_read(i915, PLANE_NV12_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(ddb_y, val);
}
@@ -1587,7 +1586,8 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state,
skl_check_wm_level(&wm->wm[level], ddb);
if (icl_need_wm1_wa(i915, plane_id) &&
- level == 1 && wm->wm[0].enable) {
+ level == 1 && !wm->wm[level].enable &&
+ wm->wm[0].enable) {
wm->wm[level].blocks = wm->wm[0].blocks;
wm->wm[level].lines = wm->wm[0].lines;
wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
@@ -2821,36 +2821,32 @@ static void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
for (level = 0; level <= max_level; level++) {
if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&i915->uncore, PLANE_WM(pipe, plane_id, level));
+ val = intel_de_read(i915, PLANE_WM(pipe, plane_id, level));
else
- val = intel_uncore_read(&i915->uncore, CUR_WM(pipe, level));
+ val = intel_de_read(i915, CUR_WM(pipe, level));
skl_wm_level_from_reg_val(val, &wm->wm[level]);
}
if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&i915->uncore, PLANE_WM_TRANS(pipe, plane_id));
+ val = intel_de_read(i915, PLANE_WM_TRANS(pipe, plane_id));
else
- val = intel_uncore_read(&i915->uncore, CUR_WM_TRANS(pipe));
+ val = intel_de_read(i915, CUR_WM_TRANS(pipe));
skl_wm_level_from_reg_val(val, &wm->trans_wm);
if (HAS_HW_SAGV_WM(i915)) {
if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&i915->uncore,
- PLANE_WM_SAGV(pipe, plane_id));
+ val = intel_de_read(i915, PLANE_WM_SAGV(pipe, plane_id));
else
- val = intel_uncore_read(&i915->uncore,
- CUR_WM_SAGV(pipe));
+ val = intel_de_read(i915, CUR_WM_SAGV(pipe));
skl_wm_level_from_reg_val(val, &wm->sagv.wm0);
if (plane_id != PLANE_CURSOR)
- val = intel_uncore_read(&i915->uncore,
- PLANE_WM_SAGV_TRANS(pipe, plane_id));
+ val = intel_de_read(i915, PLANE_WM_SAGV_TRANS(pipe, plane_id));
else
- val = intel_uncore_read(&i915->uncore,
- CUR_WM_SAGV_TRANS(pipe));
+ val = intel_de_read(i915, CUR_WM_SAGV_TRANS(pipe));
skl_wm_level_from_reg_val(val, &wm->sagv.trans_wm);
} else if (DISPLAY_VER(i915) >= 12) {
@@ -3126,8 +3122,8 @@ void skl_watermark_ipc_update(struct drm_i915_private *i915)
if (!HAS_IPC(i915))
return;
- intel_uncore_rmw(&i915->uncore, DISP_ARB_CTL2, DISP_IPC_ENABLE,
- skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
+ intel_de_rmw(i915, DISP_ARB_CTL2, DISP_IPC_ENABLE,
+ skl_watermark_ipc_enabled(i915) ? DISP_IPC_ENABLE : 0);
}
static bool skl_watermark_ipc_can_enable(struct drm_i915_private *i915)
@@ -3201,19 +3197,18 @@ adjust_wm_latency(struct drm_i915_private *i915,
static void mtl_read_wm_latency(struct drm_i915_private *i915, u16 wm[])
{
- struct intel_uncore *uncore = &i915->uncore;
int max_level = ilk_wm_max_level(i915);
u32 val;
- val = intel_uncore_read(uncore, MTL_LATENCY_LP0_LP1);
+ val = intel_de_read(i915, MTL_LATENCY_LP0_LP1);
wm[0] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[1] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_uncore_read(uncore, MTL_LATENCY_LP2_LP3);
+ val = intel_de_read(i915, MTL_LATENCY_LP2_LP3);
wm[2] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[3] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
- val = intel_uncore_read(uncore, MTL_LATENCY_LP4_LP5);
+ val = intel_de_read(i915, MTL_LATENCY_LP4_LP5);
wm[4] = REG_FIELD_GET(MTL_LATENCY_LEVEL_EVEN_MASK, val);
wm[5] = REG_FIELD_GET(MTL_LATENCY_LEVEL_ODD_MASK, val);
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h
index 7a5a4e67cd73..37954c472070 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.h
+++ b/drivers/gpu/drm/i915/display/skl_watermark.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
-#include "intel_display.h"
+#include "intel_display_limits.h"
#include "intel_global_state.h"
#include "intel_pm_types.h"
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 84481030883a..2289f6b1b4eb 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1916,7 +1916,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi->panel_power_off_time = ktime_get_boottime();
- intel_bios_init_panel(dev_priv, &intel_connector->panel, NULL, NULL);
+ intel_bios_init_panel_late(dev_priv, &intel_connector->panel, NULL, NULL);
if (intel_connector->panel.vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
@@ -1983,7 +1983,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
goto err_cleanup_connector;
}
- intel_panel_init(intel_connector);
+ intel_panel_init(intel_connector, NULL);
intel_backlight_setup(intel_connector, INVALID_PIPE);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index b3b398fe689c..385ffc575b48 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -8,6 +8,7 @@
#include "display/intel_frontbuffer.h"
+#include "i915_config.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_sw_fence_work.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 6250de9b9196..6d639ca24dfb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -257,7 +257,7 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
if (!protected) {
pc->uses_protected_content = false;
- } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
+ } else if (!intel_pxp_is_enabled(i915->pxp)) {
ret = -ENODEV;
} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
!(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
@@ -271,8 +271,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
*/
pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (!intel_pxp_is_active(&to_gt(i915)->pxp))
- ret = intel_pxp_start(&to_gt(i915)->pxp);
+ if (!intel_pxp_is_active(i915->pxp))
+ ret = intel_pxp_start(i915->pxp);
}
return ret;
@@ -1096,16 +1096,15 @@ static struct i915_gem_engines *alloc_engines(unsigned int count)
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
struct intel_sseu rcs_sseu)
{
- const struct intel_gt *gt = to_gt(ctx->i915);
+ const unsigned int max = I915_NUM_ENGINES;
struct intel_engine_cs *engine;
struct i915_gem_engines *e, *err;
- enum intel_engine_id id;
- e = alloc_engines(I915_NUM_ENGINES);
+ e = alloc_engines(max);
if (!e)
return ERR_PTR(-ENOMEM);
- for_each_engine(engine, gt, id) {
+ for_each_uabi_engine(engine, ctx->i915) {
struct intel_context *ce;
struct intel_sseu sseu = {};
int ret;
@@ -1113,7 +1112,7 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
if (engine->legacy_idx == INVALID_ENGINE)
continue;
- GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
+ GEM_BUG_ON(engine->legacy_idx >= max);
GEM_BUG_ON(e->engines[engine->legacy_idx]);
ce = intel_context_create(engine);
@@ -1861,11 +1860,19 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
vm = ctx->vm;
GEM_BUG_ON(!vm);
+ /*
+ * Get a reference for the allocated handle. Once the handle is
+ * visible in the vm_xa table, userspace could try to close it
+ * from under our feet, so we need to hold the extra reference
+ * first.
+ */
+ i915_vm_get(vm);
+
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
- if (err)
+ if (err) {
+ i915_vm_put(vm);
return err;
-
- i915_vm_get(vm);
+ }
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->value = id;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 33673fe7ee0a..e76c9703680e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -5,6 +5,7 @@
#include <drm/drm_fourcc.h>
+#include "display/intel_display.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
@@ -384,7 +385,7 @@ static int ext_set_protected(struct i915_user_extension __user *base, void *data
if (ext.flags)
return -EINVAL;
- if (!intel_pxp_is_enabled(&to_gt(ext_data->i915)->pxp))
+ if (!intel_pxp_is_enabled(ext_data->i915->pxp))
return -ENODEV;
ext_data->flags |= I915_BO_PROTECTED;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index d44a152ce680..497de40b8e68 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -4,6 +4,7 @@
* Copyright © 2014-2016 Intel Corporation
*/
+#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
@@ -17,6 +18,8 @@
#include "i915_gem_object.h"
#include "i915_vma.h"
+#define VTD_GUARD (168u * I915_GTT_PAGE_SIZE) /* 168 or tile-row PTE padding */
+
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -424,6 +427,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (ret)
return ERR_PTR(ret);
+ /* VT-d may overfetch before/after the vma, so pad with scratch */
+ if (intel_scanout_needs_vtd_wa(i915)) {
+ unsigned int guard = VTD_GUARD;
+
+ if (i915_gem_object_is_tiled(obj))
+ guard = max(guard,
+ i915_gem_object_get_tile_row_size(obj));
+
+ flags |= PIN_OFFSET_GUARD | guard;
+ }
+
/*
* As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
@@ -444,7 +458,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return vma;
- vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+ vma->display_alignment = max(vma->display_alignment, alignment);
i915_vma_mark_scanout(vma);
i915_gem_object_flush_if_display_locked(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index f266b68cf012..9dce2957b4e5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -379,22 +379,25 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
const struct i915_vma *vma,
unsigned int flags)
{
- if (vma->node.size < entry->pad_to_size)
+ const u64 start = i915_vma_offset(vma);
+ const u64 size = i915_vma_size(vma);
+
+ if (size < entry->pad_to_size)
return true;
- if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
+ if (entry->alignment && !IS_ALIGNED(start, entry->alignment))
return true;
if (flags & EXEC_OBJECT_PINNED &&
- vma->node.start != entry->offset)
+ start != entry->offset)
return true;
if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
- vma->node.start < BATCH_OFFSET_BIAS)
+ start < BATCH_OFFSET_BIAS)
return true;
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
- (vma->node.start + vma->node.size + 4095) >> 32)
+ (start + size + 4095) >> 32)
return true;
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
@@ -440,7 +443,7 @@ eb_pin_vma(struct i915_execbuffer *eb,
int err;
if (vma->node.size)
- pin_flags = vma->node.start;
+ pin_flags = __i915_vma_offset(vma);
else
pin_flags = entry->offset & PIN_OFFSET_MASK;
@@ -663,8 +666,8 @@ static int eb_reserve_vma(struct i915_execbuffer *eb,
if (err)
return err;
- if (entry->offset != vma->node.start) {
- entry->offset = vma->node.start | UPDATE;
+ if (entry->offset != i915_vma_offset(vma)) {
+ entry->offset = i915_vma_offset(vma) | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
@@ -906,7 +909,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
*/
if (i915_gem_context_uses_protected_content(eb->gem_context) &&
i915_gem_object_is_protected(obj)) {
- err = intel_pxp_key_check(&vm->gt->pxp, obj, true);
+ err = intel_pxp_key_check(eb->i915->pxp, obj, true);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
@@ -1021,8 +1024,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb)
return err;
if (!err) {
- if (entry->offset != vma->node.start) {
- entry->offset = vma->node.start | UPDATE;
+ if (entry->offset != i915_vma_offset(vma)) {
+ entry->offset = i915_vma_offset(vma) | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
} else {
@@ -1103,7 +1106,7 @@ static inline u64
relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
const struct i915_vma *target)
{
- return gen8_canonical_addr((int)reloc->delta + target->node.start);
+ return gen8_canonical_addr((int)reloc->delta + i915_vma_offset(target));
}
static void reloc_cache_init(struct reloc_cache *cache,
@@ -1312,7 +1315,7 @@ static void *reloc_iomap(struct i915_vma *batch,
if (err) /* no inactive aperture space, use cpu reloc */
return NULL;
} else {
- cache->node.start = vma->node.start;
+ cache->node.start = i915_ggtt_offset(vma);
cache->node.mm = (void *)vma;
}
}
@@ -1475,7 +1478,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* more work needs to be done.
*/
if (!DBG_FORCE_RELOC &&
- gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
+ gen8_canonical_addr(i915_vma_offset(target->vma)) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
@@ -2405,7 +2408,7 @@ static int eb_request_submit(struct i915_execbuffer *eb,
}
err = rq->context->engine->emit_bb_start(rq,
- batch->node.start +
+ i915_vma_offset(batch) +
eb->batch_start_offset,
batch_len,
eb->batch_flags);
@@ -2416,7 +2419,7 @@ static int eb_request_submit(struct i915_execbuffer *eb,
GEM_BUG_ON(intel_context_is_parallel(rq->context));
GEM_BUG_ON(eb->batch_start_offset);
err = rq->context->engine->emit_bb_start(rq,
- eb->trampoline->node.start +
+ i915_vma_offset(eb->trampoline) +
batch_len, 0, 0);
if (err)
return err;
@@ -3483,6 +3486,13 @@ err_request:
eb.composite_fence :
&eb.requests[0]->fence);
+ if (unlikely(eb.gem_context->syncobj)) {
+ drm_syncobj_replace_fence(eb.gem_context->syncobj,
+ eb.composite_fence ?
+ eb.composite_fence :
+ &eb.requests[0]->fence);
+ }
+
if (out_fence) {
if (err == 0) {
fd_install(out_fence_fd, out_fence->file);
@@ -3494,13 +3504,6 @@ err_request:
}
}
- if (unlikely(eb.gem_context->syncobj)) {
- drm_syncobj_replace_fence(eb.gem_context->syncobj,
- eb.composite_fence ?
- eb.composite_fence :
- &eb.requests[0]->fence);
- }
-
if (!out_fence && eb.composite_fence)
dma_fence_put(eb.composite_fence);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index f66bcefc09ec..6bc26b4b06b8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -35,11 +35,15 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
- unsigned int npages;
+ unsigned int npages; /* restricted by sg_alloc_table */
int max_order = MAX_ORDER;
unsigned int max_segment;
gfp_t gfp;
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, npages))
+ return -E2BIG;
+
+ npages = obj->base.size >> PAGE_SHIFT;
max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT;
max_order = min(max_order, get_order(max_segment));
@@ -55,7 +59,6 @@ create_st:
if (!st)
return -ENOMEM;
- npages = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st);
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index c7c252d4d366..d3c1dee16af2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -395,7 +395,7 @@ retry:
/* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area,
area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
+ (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
if (ret)
@@ -979,7 +979,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
i915_gem_object_put(obj);
return -EINVAL;
}
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
}
anon = mmap_singleton(to_i915(dev));
@@ -988,7 +988,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return PTR_ERR(anon);
}
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
/*
* We keep the ref on mmo->obj, not vm_file, but we require
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 1a0886b8aaa1..e6d4efde4fc5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -427,10 +427,11 @@ void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
+ pgoff_t idx = offset >> PAGE_SHIFT;
void *src_map;
void *src_ptr;
- src_map = kmap_atomic(i915_gem_object_get_page(obj, offset >> PAGE_SHIFT));
+ src_map = kmap_atomic(i915_gem_object_get_page(obj, idx));
src_ptr = src_map + offset_in_page(offset);
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
@@ -443,9 +444,10 @@ i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset,
static void
i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
+ pgoff_t idx = offset >> PAGE_SHIFT;
+ dma_addr_t dma = i915_gem_object_get_dma_address(obj, idx);
void __iomem *src_map;
void __iomem *src_ptr;
- dma_addr_t dma = i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT);
src_map = io_mapping_map_wc(&obj->mm.region->iomap,
dma - obj->mm.region->region.start,
@@ -484,6 +486,7 @@ static bool object_has_mappable_iomem(struct drm_i915_gem_object *obj)
*/
int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
+ GEM_BUG_ON(overflows_type(offset >> PAGE_SHIFT, pgoff_t));
GEM_BUG_ON(offset >= obj->base.size);
GEM_BUG_ON(offset_in_page(offset) > PAGE_SIZE - size);
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 3db53769864c..f9a8acbba715 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -20,26 +20,10 @@
enum intel_region_id;
-/*
- * XXX: There is a prevalence of the assumption that we fit the
- * object's page count inside a 32bit _signed_ variable. Let's document
- * this and catch if we ever need to fix it. In the meantime, if you do
- * spot such a local variable, please consider fixing!
- *
- * Aside from our own locals (for which we have no excuse!):
- * - sg_table embeds unsigned int for num_pages
- * - get_user_pages*() mixed ints with longs
- */
-#define GEM_CHECK_SIZE_OVERFLOW(sz) \
- GEM_WARN_ON((sz) >> PAGE_SHIFT > INT_MAX)
-
static inline bool i915_gem_object_size_2big(u64 size)
{
struct drm_i915_gem_object *obj;
- if (GEM_CHECK_SIZE_OVERFLOW(size))
- return true;
-
if (overflows_type(size, obj->base.size))
return true;
@@ -363,44 +347,289 @@ i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
unsigned int tiling, unsigned int stride);
+/**
+ * __i915_gem_object_page_iter_get_sg - helper to find the target scatterlist
+ * pointer and the target page position using pgoff_t n input argument and
+ * i915_gem_object_page_iter
+ * @obj: i915 GEM buffer object
+ * @iter: i915 GEM buffer object page iterator
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
+ * Takes and releases the RCU lock to search the radix_tree of
+ * i915_gem_object_page_iter.
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * Recommended to use wrapper macro: i915_gem_object_page_iter_get_sg()
+ */
struct scatterlist *
-__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- struct i915_gem_object_page_iter *iter,
- unsigned int n,
- unsigned int *offset, bool dma);
+__i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
+ struct i915_gem_object_page_iter *iter,
+ pgoff_t n,
+ unsigned int *offset);
+/**
+ * i915_gem_object_page_iter_get_sg - wrapper macro for
+ * __i915_gem_object_page_iter_get_sg()
+ * @obj: i915 GEM buffer object
+ * @it: i915 GEM buffer object page iterator
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * Context: Takes and releases the mutex lock of the i915_gem_object_page_iter.
+ * Takes and releases the RCU lock to search the radix_tree of
+ * i915_gem_object_page_iter.
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_page_iter_get_sg().
+ */
+#define i915_gem_object_page_iter_get_sg(obj, it, n, offset) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_page_iter_get_sg(obj, it, n, offset); \
+})
+
+/**
+ * __i915_gem_object_get_sg - helper to find the target scatterlist
+ * pointer and the target page position using pgoff_t n input argument and
+ * drm_i915_gem_object. It uses an internal shmem scatterlist lookup function.
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
+ * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_sg()
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
static inline struct scatterlist *
-i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- unsigned int n,
- unsigned int *offset)
+__i915_gem_object_get_sg(struct drm_i915_gem_object *obj, pgoff_t n,
+ unsigned int *offset)
{
- return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, false);
+ return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_page, n, offset);
}
+/**
+ * i915_gem_object_get_sg - wrapper macro for __i915_gem_object_get_sg()
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_sg().
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
+#define i915_gem_object_get_sg(obj, n, offset) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_sg(obj, n, offset); \
+})
+
+/**
+ * __i915_gem_object_get_sg_dma - helper to find the target scatterlist
+ * pointer and the target page position using pgoff_t n input argument and
+ * drm_i915_gem_object. It uses an internal DMA mapped scatterlist lookup function
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * It uses drm_i915_gem_object's internal DMA mapped scatterlist lookup function
+ * as i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg().
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_sg_dma()
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
static inline struct scatterlist *
-i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
- unsigned int n,
- unsigned int *offset)
+__i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj, pgoff_t n,
+ unsigned int *offset)
{
- return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, true);
+ return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_dma_page, n, offset);
}
+/**
+ * i915_gem_object_get_sg_dma - wrapper macro for __i915_gem_object_get_sg_dma()
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @offset: searched physical offset,
+ * it will be used for returning physical page offset value
+ *
+ * Returns:
+ * The target scatterlist pointer and the target page position.
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_sg_dma().
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
+#define i915_gem_object_get_sg_dma(obj, n, offset) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_sg_dma(obj, n, offset); \
+})
+
+/**
+ * __i915_gem_object_get_page - helper to find the target page with a page offset
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * It uses drm_i915_gem_object's internal shmem scatterlist lookup function as
+ * i915_gem_object_page_iter and calls __i915_gem_object_page_iter_get_sg()
+ * internally.
+ *
+ * Returns:
+ * The target page pointer.
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_page()
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj,
- unsigned int n);
+__i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n);
+/**
+ * i915_gem_object_get_page - wrapper macro for __i915_gem_object_get_page
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * Returns:
+ * The target page pointer.
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_page().
+ * See also __i915_gem_object_page_iter_get_sg()
+ */
+#define i915_gem_object_get_page(obj, n) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_page(obj, n); \
+})
+
+/**
+ * __i915_gem_object_get_dirty_page - helper to find the target page with a page
+ * offset
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * It works like i915_gem_object_get_page(), but it marks the returned page dirty.
+ *
+ * Returns:
+ * The target page pointer.
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_dirty_page()
+ * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
+ */
struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n);
+__i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n);
+
+/**
+ * i915_gem_object_get_dirty_page - wrapper macro for __i915_gem_object_get_dirty_page
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * Returns:
+ * The target page pointer.
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_dirty_page().
+ * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_page()
+ */
+#define i915_gem_object_get_dirty_page(obj, n) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_dirty_page(obj, n); \
+})
+/**
+ * __i915_gem_object_get_dma_address_len - helper to get bus addresses of
+ * targeted DMA mapped scatterlist from i915 GEM buffer object and it's length
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @len: DMA mapped scatterlist's DMA bus addresses length to return
+ *
+ * Returns:
+ * Bus addresses of targeted DMA mapped scatterlist
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_dma_address_len()
+ * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
+ */
dma_addr_t
-i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
- unsigned long n,
- unsigned int *len);
+__i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj, pgoff_t n,
+ unsigned int *len);
+
+/**
+ * i915_gem_object_get_dma_address_len - wrapper macro for
+ * __i915_gem_object_get_dma_address_len
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ * @len: DMA mapped scatterlist's DMA bus addresses length to return
+ *
+ * Returns:
+ * Bus addresses of targeted DMA mapped scatterlist
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_dma_address_len().
+ * See also __i915_gem_object_page_iter_get_sg() and
+ * __i915_gem_object_get_dma_address_len()
+ */
+#define i915_gem_object_get_dma_address_len(obj, n, len) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_dma_address_len(obj, n, len); \
+})
+/**
+ * __i915_gem_object_get_dma_address - helper to get bus addresses of
+ * targeted DMA mapped scatterlist from i915 GEM buffer object
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * Returns:
+ * Bus addresses of targeted DMA mapped scatterlis
+ *
+ * Recommended to use wrapper macro: i915_gem_object_get_dma_address()
+ * See also __i915_gem_object_page_iter_get_sg() and __i915_gem_object_get_sg_dma()
+ */
dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n);
+__i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n);
+
+/**
+ * i915_gem_object_get_dma_address - wrapper macro for
+ * __i915_gem_object_get_dma_address
+ * @obj: i915 GEM buffer object
+ * @n: page offset
+ *
+ * Returns:
+ * Bus addresses of targeted DMA mapped scatterlist
+ *
+ * In order to avoid the truncation of the input parameter, it checks the page
+ * offset n's type from the input parameter before calling
+ * __i915_gem_object_get_dma_address().
+ * See also __i915_gem_object_page_iter_get_sg() and
+ * __i915_gem_object_get_dma_address()
+ */
+#define i915_gem_object_get_dma_address(obj, n) ({ \
+ static_assert(castable_to_type(n, pgoff_t)); \
+ __i915_gem_object_get_dma_address(obj, n); \
+})
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index ab4c2f90a564..19c9bdd8f905 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -10,7 +10,7 @@
#include <linux/mmu_notifier.h>
#include <drm/drm_gem.h>
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include <uapi/drm/i915_drm.h>
#include "i915_active.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 05a27723ebb8..ecd86130b74f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -521,14 +521,16 @@ void __i915_gem_object_release_map(struct drm_i915_gem_object *obj)
}
struct scatterlist *
-__i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
- struct i915_gem_object_page_iter *iter,
- unsigned int n,
- unsigned int *offset,
- bool dma)
+__i915_gem_object_page_iter_get_sg(struct drm_i915_gem_object *obj,
+ struct i915_gem_object_page_iter *iter,
+ pgoff_t n,
+ unsigned int *offset)
+
{
- struct scatterlist *sg;
+ const bool dma = iter == &obj->mm.get_dma_page ||
+ iter == &obj->ttm.get_io_page;
unsigned int idx, count;
+ struct scatterlist *sg;
might_sleep();
GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
@@ -636,7 +638,7 @@ lookup:
}
struct page *
-i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
+__i915_gem_object_get_page(struct drm_i915_gem_object *obj, pgoff_t n)
{
struct scatterlist *sg;
unsigned int offset;
@@ -649,8 +651,7 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
-i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
- unsigned int n)
+__i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, pgoff_t n)
{
struct page *page;
@@ -662,9 +663,8 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
}
dma_addr_t
-i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
- unsigned long n,
- unsigned int *len)
+__i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
+ pgoff_t n, unsigned int *len)
{
struct scatterlist *sg;
unsigned int offset;
@@ -678,8 +678,7 @@ i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
}
dma_addr_t
-i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
- unsigned long n)
+__i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, pgoff_t n)
{
return i915_gem_object_get_dma_address_len(obj, n, NULL);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 68453572275b..76efe98eaa14 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -28,6 +28,10 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
void *dst;
int i;
+ /* Contiguous chunk, with a single scatterlist element */
+ if (overflows_type(obj->base.size, sg->length))
+ return -E2BIG;
+
if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 9c759df700ca..37d1efcd3ca6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -60,7 +60,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
struct address_space *mapping,
unsigned int max_segment)
{
- const unsigned long page_count = size / PAGE_SIZE;
+ unsigned int page_count; /* restricted by sg_alloc_table */
unsigned long i;
struct scatterlist *sg;
struct page *page;
@@ -68,6 +68,10 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
gfp_t noreclaim;
int ret;
+ if (overflows_type(size / PAGE_SIZE, page_count))
+ return -E2BIG;
+
+ page_count = size / PAGE_SIZE;
/*
* If there's no chance of allocating enough pages for the whole
* object, bail early.
@@ -193,7 +197,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct intel_memory_region *mem = obj->mm.region;
struct address_space *mapping = obj->base.filp->f_mapping;
- const unsigned long page_count = obj->base.size / PAGE_SIZE;
unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
struct sg_table *st;
struct sgt_iter sgt_iter;
@@ -235,8 +238,8 @@ rebuild_st:
goto rebuild_st;
} else {
dev_warn(i915->drm.dev,
- "Failed to DMA remap %lu pages\n",
- page_count);
+ "Failed to DMA remap %zu pages\n",
+ obj->base.size >> PAGE_SHIFT);
goto err_pages;
}
}
@@ -538,6 +541,20 @@ static int __create_shmem(struct drm_i915_private *i915,
drm_gem_private_object_init(&i915->drm, obj, size);
+ /* XXX: The __shmem_file_setup() function returns -EINVAL if size is
+ * greater than MAX_LFS_FILESIZE.
+ * To handle the same error as other code that returns -E2BIG when
+ * the size is too large, we add a code that returns -E2BIG when the
+ * size is larger than the size that can be handled.
+ * If BITS_PER_LONG is 32, size > MAX_LFS_FILESIZE is always false,
+ * so we only needs to check when BITS_PER_LONG is 64.
+ * If BITS_PER_LONG is 32, E2BIG checks are processed when
+ * i915_gem_object_size_2big() is called before init_object() callback
+ * is called.
+ */
+ if (BITS_PER_LONG == 64 && size > MAX_LFS_FILESIZE)
+ return -E2BIG;
+
if (i915->mm.gemfs)
filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
flags);
@@ -579,7 +596,7 @@ static int shmem_object_init(struct intel_memory_region *mem,
mapping_set_gfp_mask(mapping, mask);
GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
- i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
+ i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, flags);
obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 8dc5c8874d8a..b1672e054b21 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -400,7 +400,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
list_for_each_entry_safe(vma, next,
&to_gt(i915)->ggtt->vm.bound_list, vm_link) {
- unsigned long count = vma->node.size >> PAGE_SHIFT;
+ unsigned long count = i915_vma_size(vma) >> PAGE_SHIFT;
struct drm_i915_gem_object *obj = vma->obj;
if (!vma->iomap || i915_vma_is_active(vma))
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index bc9521078807..90a967374b1a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -110,9 +110,7 @@ static int adjust_stolen(struct drm_i915_private *i915,
else
ggtt_start &= PGTBL_ADDRESS_LO_MASK;
- ggtt_res =
- (struct resource) DEFINE_RES_MEM(ggtt_start,
- ggtt_total_entries(ggtt) * 4);
+ ggtt_res = DEFINE_RES_MEM(ggtt_start, ggtt_total_entries(ggtt) * 4);
if (ggtt_res.start >= stolen[0].start && ggtt_res.start < stolen[0].end)
stolen[0].end = ggtt_res.start;
@@ -211,7 +209,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
IS_GM45(i915) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
- resource_size_t stolen_top = i915->dsm.end + 1;
+ resource_size_t stolen_top = i915->dsm.stolen.end + 1;
drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
IS_GM45(i915) ? "CTG" : "ELK", reg_val);
@@ -276,7 +274,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
resource_size_t *size)
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = i915->dsm.end + 1;
+ resource_size_t stolen_top = i915->dsm.stolen.end + 1;
drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -365,7 +363,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
resource_size_t *size)
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- resource_size_t stolen_top = i915->dsm.end + 1;
+ resource_size_t stolen_top = i915->dsm.stolen.end + 1;
drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
@@ -414,7 +412,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
}
/*
- * Initialize i915->dsm_reserved to contain the reserved space within the Data
+ * Initialize i915->dsm.reserved to contain the reserved space within the Data
* Stolen Memory. This is a range on the top of DSM that is reserved, not to
* be used by driver, so must be excluded from the region passed to the
* allocator later. In the spec this is also called as WOPCM.
@@ -430,7 +428,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915)
resource_size_t reserved_size;
int ret = 0;
- stolen_top = i915->dsm.end + 1;
+ stolen_top = i915->dsm.stolen.end + 1;
reserved_base = stolen_top;
reserved_size = 0;
@@ -471,13 +469,12 @@ static int init_reserved_stolen(struct drm_i915_private *i915)
goto bail_out;
}
- i915->dsm_reserved =
- (struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
+ i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, reserved_size);
- if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
+ if (!resource_contains(&i915->dsm.stolen, &i915->dsm.reserved)) {
drm_err(&i915->drm,
"Stolen reserved area %pR outside stolen memory %pR\n",
- &i915->dsm_reserved, &i915->dsm);
+ &i915->dsm.reserved, &i915->dsm.stolen);
ret = -EINVAL;
goto bail_out;
}
@@ -485,8 +482,7 @@ static int init_reserved_stolen(struct drm_i915_private *i915)
return 0;
bail_out:
- i915->dsm_reserved =
- (struct resource)DEFINE_RES_MEM(reserved_base, 0);
+ i915->dsm.reserved = DEFINE_RES_MEM(reserved_base, 0);
return ret;
}
@@ -517,27 +513,27 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
if (request_smem_stolen(i915, &mem->region))
return -ENOSPC;
- i915->dsm = mem->region;
+ i915->dsm.stolen = mem->region;
if (init_reserved_stolen(i915))
return -ENOSPC;
/* Exclude the reserved region from driver use */
- mem->region.end = i915->dsm_reserved.start - 1;
+ mem->region.end = i915->dsm.reserved.start - 1;
mem->io_size = min(mem->io_size, resource_size(&mem->region));
- i915->stolen_usable_size = resource_size(&mem->region);
+ i915->dsm.usable_size = resource_size(&mem->region);
drm_dbg(&i915->drm,
"Memory reserved for graphics device: %lluK, usable: %lluK\n",
- (u64)resource_size(&i915->dsm) >> 10,
- (u64)i915->stolen_usable_size >> 10);
+ (u64)resource_size(&i915->dsm.stolen) >> 10,
+ (u64)i915->dsm.usable_size >> 10);
- if (i915->stolen_usable_size == 0)
+ if (i915->dsm.usable_size == 0)
return -ENOSPC;
/* Basic memrange allocator for stolen space. */
- drm_mm_init(&i915->mm.stolen, 0, i915->stolen_usable_size);
+ drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
return 0;
}
@@ -587,7 +583,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
struct sg_table *st;
struct scatterlist *sg;
- GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm)));
+ GEM_BUG_ON(range_overflows(offset, size, resource_size(&i915->dsm.stolen)));
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -607,7 +603,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
sg->offset = 0;
sg->length = size;
- sg_dma_address(sg) = (dma_addr_t)i915->dsm.start + offset;
+ sg_dma_address(sg) = (dma_addr_t)i915->dsm.stolen.start + offset;
sg_dma_len(sg) = size;
return st;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index fd42b89b7162..a049ca0b7980 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -168,11 +168,11 @@ static bool i915_vma_fence_prepare(struct i915_vma *vma,
return true;
size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride);
- if (vma->node.size < size)
+ if (i915_vma_size(vma) < size)
return false;
alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride);
- if (!IS_ALIGNED(vma->node.start, alignment))
+ if (!IS_ALIGNED(i915_ggtt_offset(vma), alignment))
return false;
return true;
@@ -305,10 +305,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
spin_unlock(&obj->vma.lock);
obj->tiling_and_stride = tiling | stride;
- i915_gem_object_unlock(obj);
-
- /* Force the fence to be reacquired for GTT access */
- i915_gem_object_release_mmap_gtt(obj);
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
@@ -321,6 +317,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
obj->bit_17 = NULL;
}
+ i915_gem_object_unlock(obj);
+
+ /* Force the fence to be reacquired for GTT access */
+ i915_gem_object_release_mmap_gtt(obj);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 1e50fb0d6bfc..7420276827a5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -5,8 +5,8 @@
#include <linux/shmem_fs.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
#include <drm/drm_buddy.h>
#include "i915_drv.h"
@@ -140,13 +140,16 @@ i915_ttm_place_from_region(const struct intel_memory_region *mr,
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place->flags |= TTM_PL_FLAG_CONTIGUOUS;
if (offset != I915_BO_INVALID_OFFSET) {
+ WARN_ON(overflows_type(offset >> PAGE_SHIFT, place->fpfn));
place->fpfn = offset >> PAGE_SHIFT;
+ WARN_ON(overflows_type(place->fpfn + (size >> PAGE_SHIFT), place->lpfn));
place->lpfn = place->fpfn + (size >> PAGE_SHIFT);
} else if (mr->io_size && mr->io_size < mr->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place->flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place->fpfn = 0;
+ WARN_ON(overflows_type(mr->io_size >> PAGE_SHIFT, place->lpfn));
place->lpfn = mr->io_size >> PAGE_SHIFT;
}
}
@@ -271,8 +274,6 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
bdev);
- struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, bo->resource->mem_type);
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
unsigned long ccs_pages = 0;
enum ttm_caching caching;
@@ -286,8 +287,8 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
if (!i915_tt)
return NULL;
- if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
- man->use_tt)
+ if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource ||
+ ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt))
page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
caching = i915_ttm_select_tt_caching(obj);
@@ -599,13 +600,16 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
- int err;
+ long err;
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
- err = ttm_bo_wait(bo, true, false);
- if (err)
+ err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ true, 15 * HZ);
+ if (err < 0)
return err;
+ if (err == 0)
+ return -EBUSY;
err = i915_ttm_move_notify(bo);
if (err)
@@ -689,7 +693,7 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
GEM_WARN_ON(bo->ttm);
base = obj->mm.region->iomap.base - obj->mm.region->region.start;
- sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
+ sg = i915_gem_object_page_iter_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs);
return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
}
@@ -832,6 +836,10 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
struct ttm_placement placement;
+ /* restricted by sg_alloc_table */
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
+ return -E2BIG;
+
GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
/* Move to the requested placement. */
@@ -1048,7 +1056,26 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
- if (!i915_ttm_resource_mappable(bo->resource)) {
+ /*
+ * This must be swapped out with shmem ttm_tt (pipeline-gutting).
+ * Calling ttm_bo_validate() here with TTM_PL_SYSTEM should only go as
+ * far as far doing a ttm_bo_move_null(), which should skip all the
+ * other junk.
+ */
+ if (!bo->resource) {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = true,
+ .no_wait_gpu = true, /* should be idle already */
+ };
+
+ GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
+
+ ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+ if (ret) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
+ } else if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
@@ -1302,6 +1329,17 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), bo_type,
&i915_sys_placement, page_size >> PAGE_SHIFT,
&ctx, NULL, NULL, i915_ttm_bo_destroy);
+
+ /*
+ * XXX: The ttm_bo_init_reserved() functions returns -ENOSPC if the size
+ * is too big to add vma. The direct function that returns -ENOSPC is
+ * drm_mm_insert_node_in_range(). To handle the same error as other code
+ * that returns -E2BIG when the size is too large, it converts -ENOSPC to
+ * -E2BIG.
+ */
+ if (size >> PAGE_SHIFT > INT_MAX && ret == -ENOSPC)
+ ret = -E2BIG;
+
if (ret)
return i915_ttm_err_to_gem(ret);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index f59f812dc6d2..76dd9e5e1a8b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -3,7 +3,7 @@
* Copyright © 2021 Intel Corporation
*/
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_tt.h>
#include "i915_deps.h"
#include "i915_drv.h"
@@ -103,7 +103,27 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
unsigned int cache_level;
+ unsigned int mem_flags;
unsigned int i;
+ int mem_type;
+
+ /*
+ * We might have been purged (or swapped out) if the resource is NULL,
+ * in which case the SYSTEM placement is the closest match to describe
+ * the current domain. If the object is ever used in this state then we
+ * will require moving it again.
+ */
+ if (!bo->resource) {
+ mem_flags = I915_BO_FLAG_STRUCT_PAGE;
+ mem_type = I915_PL_SYSTEM;
+ cache_level = I915_CACHE_NONE;
+ } else {
+ mem_flags = i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
+ I915_BO_FLAG_STRUCT_PAGE;
+ mem_type = bo->resource->mem_type;
+ cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
+ bo->ttm);
+ }
/*
* If object was moved to an allowable region, update the object
@@ -111,11 +131,11 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
* in an allowable region, it's evicted and we don't update the
* object region.
*/
- if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
+ if (intel_region_to_ttm_type(obj->mm.region) != mem_type) {
for (i = 0; i < obj->mm.n_placements; ++i) {
struct intel_memory_region *mr = obj->mm.placements[i];
- if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
+ if (intel_region_to_ttm_type(mr) == mem_type &&
mr != obj->mm.region) {
i915_gem_object_release_memory_region(obj);
i915_gem_object_init_memory_region(obj, mr);
@@ -125,12 +145,8 @@ void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
}
obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
+ obj->mem_flags |= mem_flags;
- obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
- I915_BO_FLAG_STRUCT_PAGE;
-
- cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
- bo->ttm);
i915_gem_object_set_cache_coherency(obj, cache_level);
}
@@ -565,6 +581,32 @@ int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
return 0;
}
+ if (!bo->resource) {
+ if (dst_mem->mem_type != TTM_PL_SYSTEM) {
+ hop->mem_type = TTM_PL_SYSTEM;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ return -EMULTIHOP;
+ }
+
+ /*
+ * This is only reached when first creating the object, or if
+ * the object was purged or swapped out (pipeline-gutting). For
+ * the former we can safely skip all of the below since we are
+ * only using a dummy SYSTEM placement here. And with the latter
+ * we will always re-enter here with bo->resource set correctly
+ * (as per the above), since this is part of a multi-hop
+ * sequence, where at the end we can do the move for real.
+ *
+ * The special case here is when the dst_mem is TTM_PL_SYSTEM,
+ * which doens't require any kind of move, so it should be safe
+ * to skip all the below and call ttm_bo_move_null() here, where
+ * the caller in __i915_ttm_get_pages() will take care of the
+ * rest, since we should have a valid ttm_tt.
+ */
+ ttm_bo_move_null(bo, dst_mem);
+ return 0;
+ }
+
ret = i915_ttm_move_notify(bo);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 9348b1804d53..1d3ebdf4069b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -128,12 +128,16 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
- const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev);
struct sg_table *st;
struct page **pvec;
+ unsigned int num_pages; /* limited by sg_alloc_table_from_pages_segment */
int ret;
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, num_pages))
+ return -E2BIG;
+
+ num_pages = obj->base.size >> PAGE_SHIFT;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
index cbd9b624a788..bac957755068 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
@@ -29,11 +29,15 @@ static int huge_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL)
const unsigned long nreal = obj->scratch / PAGE_SIZE;
- const unsigned long npages = obj->base.size / PAGE_SIZE;
+ unsigned int npages; /* restricted by sg_alloc_table */
struct scatterlist *sg, *src, *end;
struct sg_table *pages;
unsigned long n;
+ if (overflows_type(obj->base.size / PAGE_SIZE, npages))
+ return -E2BIG;
+
+ npages = obj->base.size / PAGE_SIZE;
pages = kmalloc(sizeof(*pages), GFP);
if (!pages)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 977dead10ab5..defece0bcb81 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -84,6 +84,10 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
unsigned int sg_page_sizes;
u64 rem;
+ /* restricted by sg_alloc_table */
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
+ return -E2BIG;
+
st = kmalloc(sizeof(*st), GFP);
if (!st)
return -ENOMEM;
@@ -212,6 +216,10 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
struct scatterlist *sg;
u64 rem;
+ /* restricted by sg_alloc_table */
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, unsigned int))
+ return -E2BIG;
+
st = kmalloc(sizeof(*st), GFP);
if (!st)
return -ENOMEM;
@@ -400,7 +408,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
* Maintaining alignment is required to utilise huge pages in the ppGGT.
*/
if (i915_gem_object_is_lmem(obj) &&
- IS_ALIGNED(vma->node.start, SZ_2M) &&
+ IS_ALIGNED(i915_vma_offset(vma), SZ_2M) &&
vma->page_sizes.sg & SZ_2M &&
vma->resource->page_sizes_gtt < SZ_2M) {
pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n",
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index 692a16914ca0..3bb1f7f0110e 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -194,12 +194,12 @@ static int prepare_blit(const struct tiled_blits *t,
*cs++ = src_4t | dst_4t | BLT_DEPTH_32 | dst_pitch;
*cs++ = 0;
*cs++ = t->height << 16 | t->width;
- *cs++ = lower_32_bits(dst->vma->node.start);
- *cs++ = upper_32_bits(dst->vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
+ *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
*cs++ = 0;
*cs++ = src_pitch;
- *cs++ = lower_32_bits(src->vma->node.start);
- *cs++ = upper_32_bits(src->vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(src->vma));
+ *cs++ = upper_32_bits(i915_vma_offset(src->vma));
} else {
if (ver >= 6) {
*cs++ = MI_LOAD_REGISTER_IMM(1);
@@ -240,14 +240,14 @@ static int prepare_blit(const struct tiled_blits *t,
*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | dst_pitch;
*cs++ = 0;
*cs++ = t->height << 16 | t->width;
- *cs++ = lower_32_bits(dst->vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(dst->vma));
if (use_64b_reloc)
- *cs++ = upper_32_bits(dst->vma->node.start);
+ *cs++ = upper_32_bits(i915_vma_offset(dst->vma));
*cs++ = 0;
*cs++ = src_pitch;
- *cs++ = lower_32_bits(src->vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(src->vma));
if (use_64b_reloc)
- *cs++ = upper_32_bits(src->vma->node.start);
+ *cs++ = upper_32_bits(i915_vma_offset(src->vma));
}
*cs++ = MI_BATCH_BUFFER_END;
@@ -462,7 +462,7 @@ static int pin_buffer(struct i915_vma *vma, u64 addr)
{
int err;
- if (drm_mm_node_allocated(&vma->node) && vma->node.start != addr) {
+ if (drm_mm_node_allocated(&vma->node) && i915_vma_offset(vma) != addr) {
err = i915_vma_unbind_unlocked(vma);
if (err)
return err;
@@ -472,6 +472,7 @@ static int pin_buffer(struct i915_vma *vma, u64 addr)
if (err)
return err;
+ GEM_BUG_ON(i915_vma_offset(vma) != addr);
return 0;
}
@@ -518,8 +519,8 @@ tiled_blit(struct tiled_blits *t,
err = igt_vma_move_to_active_unlocked(dst->vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- t->batch->node.start,
- t->batch->node.size,
+ i915_vma_offset(t->batch),
+ i915_vma_size(t->batch),
0);
i915_request_get(rq);
i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index c228fe4aba50..3bef1beec7cb 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -222,7 +222,7 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
}
if (GRAPHICS_VER(ctx->engine->i915) >= 8) {
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
*cs++ = v;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index a0ff51d71d07..a81fa6a20f5a 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -469,7 +469,8 @@ static int gpu_fill(struct intel_context *ce,
static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
{
const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
- unsigned int n, m, need_flush;
+ unsigned int need_flush;
+ unsigned long n, m;
int err;
i915_gem_object_lock(obj, NULL);
@@ -499,7 +500,8 @@ out:
static noinline int cpu_check(struct drm_i915_gem_object *obj,
unsigned int idx, unsigned int max)
{
- unsigned int n, m, needs_flush;
+ unsigned int needs_flush;
+ unsigned long n;
int err;
i915_gem_object_lock(obj, NULL);
@@ -508,7 +510,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
goto out_unlock;
for (n = 0; n < real_page_count(obj); n++) {
- u32 *map;
+ u32 *map, m;
map = kmap_atomic(i915_gem_object_get_page(obj, n));
if (needs_flush & CLFLUSH_BEFORE)
@@ -516,7 +518,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
for (m = 0; m < max; m++) {
if (map[m] != m) {
- pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
+ pr_err("%pS: Invalid value at object %d page %ld/%ld, offset %d/%d: found %x expected %x\n",
__builtin_return_address(0), idx,
n, real_page_count(obj), m, max,
map[m], m);
@@ -527,7 +529,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
for (; m < DW_PER_PAGE; m++) {
if (map[m] != STACK_MAGIC) {
- pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
+ pr_err("%pS: Invalid value at object %d page %ld, offset %d: found %x expected %x (uninitialised)\n",
__builtin_return_address(0), idx, n, m,
map[m], STACK_MAGIC);
err = -EINVAL;
@@ -914,8 +916,8 @@ static int rpcs_query_batch(struct drm_i915_gem_object *rpcs,
*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
*cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE(engine->mmio_base));
- *cmd++ = lower_32_bits(vma->node.start);
- *cmd++ = upper_32_bits(vma->node.start);
+ *cmd++ = lower_32_bits(i915_vma_offset(vma));
+ *cmd++ = upper_32_bits(i915_vma_offset(vma));
*cmd = MI_BATCH_BUFFER_END;
__i915_gem_object_flush_map(rpcs, 0, 64);
@@ -999,7 +1001,8 @@ retry:
}
err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
0);
if (err)
goto skip_request;
@@ -1548,9 +1551,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto err_unpin;
}
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, 0);
if (err)
goto skip_request;
@@ -1560,7 +1561,8 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto skip_request;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ err = engine->emit_bb_start(rq, i915_vma_offset(vma),
+ i915_vma_size(vma), 0);
if (err)
goto skip_request;
@@ -1665,7 +1667,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
*cmd++ = offset;
*cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
*cmd++ = reg;
- *cmd++ = vma->node.start + result;
+ *cmd++ = i915_vma_offset(vma) + result;
*cmd = MI_BATCH_BUFFER_END;
i915_gem_object_flush_map(obj);
@@ -1682,9 +1684,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_unpin;
}
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto skip_request;
@@ -1694,7 +1694,8 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto skip_request;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
+ err = engine->emit_bb_start(rq, i915_vma_offset(vma),
+ i915_vma_size(vma), flags);
if (err)
goto skip_request;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 3f658d5717d8..56279908ed30 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -97,11 +97,11 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_gtt_view view;
struct i915_vma *vma;
+ unsigned long offset;
unsigned long page;
u32 __iomem *io;
struct page *p;
unsigned int n;
- u64 offset;
u32 *cpu;
int err;
@@ -158,7 +158,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
cpu = kmap(p) + offset_in_page(offset);
drm_clflush_virt_range(cpu, sizeof(*cpu));
if (*cpu != (u32)page) {
- pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+ pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
page, n,
view.partial.offset,
view.partial.size,
@@ -214,10 +214,10 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
for_each_prime_number_from(page, 1, npages) {
struct i915_gtt_view view =
compute_partial_view(obj, page, MIN_CHUNK_PAGES);
+ unsigned long offset;
u32 __iomem *io;
struct page *p;
unsigned int n;
- u64 offset;
u32 *cpu;
GEM_BUG_ON(view.partial.size > nreal);
@@ -254,7 +254,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj,
cpu = kmap(p) + offset_in_page(offset);
drm_clflush_virt_range(cpu, sizeof(*cpu));
if (*cpu != (u32)page) {
- pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+ pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
page, n,
view.partial.offset,
view.partial.size,
@@ -1609,7 +1609,7 @@ retry:
err = i915_vma_move_to_active(vma, rq, 0);
- err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
+ err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
i915_request_get(rq);
i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
index bdf5bb40ccf1..19e374f68ff7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object.c
@@ -33,10 +33,10 @@ out:
static int igt_gem_huge(void *arg)
{
- const unsigned int nreal = 509; /* just to be awkward */
+ const unsigned long nreal = 509; /* just to be awkward */
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
- unsigned int n;
+ unsigned long n;
int err;
/* Basic sanitycheck of our huge fake object allocation */
@@ -49,7 +49,7 @@ static int igt_gem_huge(void *arg)
err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
- pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ pr_err("Failed to allocate %lu pages (%lu total), err=%d\n",
nreal, obj->base.size / PAGE_SIZE, err);
goto out;
}
@@ -57,7 +57,7 @@ static int igt_gem_huge(void *arg)
for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
if (i915_gem_object_get_page(obj, n) !=
i915_gem_object_get_page(obj, n % nreal)) {
- pr_err("Page lookup mismatch at index %u [%u]\n",
+ pr_err("Page lookup mismatch at index %lu [%lu]\n",
n, n % nreal);
err = -EINVAL;
goto out_unpin;
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 374b10ac430e..20a232a140b0 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -62,8 +62,8 @@ igt_emit_store_dw(struct i915_vma *vma,
goto err;
}
- GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
- offset += vma->node.start;
+ GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > i915_vma_size(vma));
+ offset += i915_vma_offset(vma);
for (n = 0; n < count; n++) {
if (ver >= 8) {
@@ -130,15 +130,11 @@ int igt_gpu_fill_dw(struct intel_context *ce,
goto err_batch;
}
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
+ err = igt_vma_move_to_active_unlocked(batch, rq, 0);
if (err)
goto skip_request;
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto skip_request;
@@ -147,7 +143,8 @@ int igt_gpu_fill_dw(struct intel_context *ce,
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq,
- batch->node.start, batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
flags);
skip_request:
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
index 1379fbc14431..71a3ca8a8865 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h
@@ -38,7 +38,7 @@ igt_vma_move_to_active_unlocked(struct i915_vma *vma, struct i915_request *rq,
int err;
i915_vma_lock(vma);
- err = _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
+ err = i915_vma_move_to_active(vma, rq, flags);
i915_vma_unlock(vma);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
index 317efb145787..d38b914d1206 100644
--- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -106,7 +106,7 @@ static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
static u32 batch_addr(const struct batch_chunk *bc)
{
- return bc->vma->node.start;
+ return i915_vma_offset(bc->vma);
}
static void batch_add(struct batch_chunk *bc, const u32 d)
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index e94365b08f1e..2aa63ec521b8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -528,7 +528,7 @@ retry:
return rq;
}
-struct i915_request *intel_context_find_active_request(struct intel_context *ce)
+struct i915_request *intel_context_get_active_request(struct intel_context *ce)
{
struct intel_context *parent = intel_context_to_parent(ce);
struct i915_request *rq, *active = NULL;
@@ -552,6 +552,8 @@ struct i915_request *intel_context_find_active_request(struct intel_context *ce)
active = rq;
}
+ if (active)
+ active = i915_request_get_rcu(active);
spin_unlock_irqrestore(&parent->guc_state.lock, flags);
return active;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index fb62b7b8cbcd..0a8d553da3f4 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -268,8 +268,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
struct i915_request *intel_context_create_request(struct intel_context *ce);
-struct i915_request *
-intel_context_find_active_request(struct intel_context *ce);
+struct i915_request *intel_context_get_active_request(struct intel_context *ce);
static inline bool intel_context_is_barrier(const struct intel_context *ce)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index cbc8b857d5f7..b58c30ac8ef0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -172,6 +172,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32))
#define I915_GEM_HWS_PXP 0x60
#define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32))
+#define I915_GEM_HWS_GSC 0x62
+#define I915_GEM_HWS_GSC_ADDR (I915_GEM_HWS_GSC * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
#define I915_HWS_CSB_BUF0_INDEX 0x10
@@ -248,8 +250,8 @@ void intel_engine_dump_active_requests(struct list_head *requests,
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
ktime_t *now);
-struct i915_request *
-intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine);
+void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
+ struct intel_context **ce, struct i915_request **rq);
u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
struct intel_context *
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index c33e0d72d670..d4e29da74612 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -894,6 +894,24 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
engine_mask_apply_compute_fuses(gt);
engine_mask_apply_copy_fuses(gt);
+ /*
+ * The only use of the GSC CS is to load and communicate with the GSC
+ * FW, so we have no use for it if we don't have the FW.
+ *
+ * IMPORTANT: in cases where we don't have the GSC FW, we have a
+ * catch-22 situation that breaks media C6 due to 2 requirements:
+ * 1) once turned on, the GSC power well will not go to sleep unless the
+ * GSC FW is loaded.
+ * 2) to enable idling (which is required for media C6) we need to
+ * initialize the IDLE_MSG register for the GSC CS and do at least 1
+ * submission, which will wake up the GSC power well.
+ */
+ if (__HAS_ENGINE(info->engine_mask, GSC0) && !intel_uc_wants_gsc_uc(&gt->uc)) {
+ drm_notice(&gt->i915->drm,
+ "No GSC FW selected, disabling GSC CS and media C6\n");
+ info->engine_mask &= ~BIT(GSC0);
+ }
+
return info->engine_mask;
}
@@ -1476,10 +1494,12 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
/*
- * Wa_22011802037 : gen11, gen12, Prior to doing a reset, ensure CS is
+ * Wa_22011802037: Prior to doing a reset, ensure CS is
* stopped, set ring stop bit and prefetch disable bit to halt CS
*/
- if (IS_GRAPHICS_VER(engine->i915, 11, 12))
+ if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
+ (GRAPHICS_VER(engine->i915) >= 11 &&
+ GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70)))
intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
_MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
@@ -1564,11 +1584,8 @@ static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
};
u32 val;
- if (!_reg[engine->id].reg) {
- drm_err(&engine->i915->drm,
- "MSG IDLE undefined for engine id %u\n", engine->id);
+ if (!_reg[engine->id].reg)
return 0;
- }
val = intel_uncore_read(engine->uncore, _reg[engine->id]);
@@ -2094,17 +2111,6 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
}
}
-static unsigned long list_count(struct list_head *list)
-{
- struct list_head *pos;
- unsigned long count = 0;
-
- list_for_each(pos, list)
- count++;
-
- return count;
-}
-
static unsigned long read_ul(void *p, size_t x)
{
return *(unsigned long *)(p + x);
@@ -2196,11 +2202,11 @@ void intel_engine_dump_active_requests(struct list_head *requests,
}
}
-static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m)
+static void engine_dump_active_requests(struct intel_engine_cs *engine,
+ struct drm_printer *m)
{
+ struct intel_context *hung_ce = NULL;
struct i915_request *hung_rq = NULL;
- struct intel_context *ce;
- bool guc;
/*
* No need for an engine->irq_seqno_barrier() before the seqno reads.
@@ -2209,27 +2215,22 @@ static void engine_dump_active_requests(struct intel_engine_cs *engine, struct d
* But the intention here is just to report an instantaneous snapshot
* so that's fine.
*/
- lockdep_assert_held(&engine->sched_engine->lock);
+ intel_engine_get_hung_entity(engine, &hung_ce, &hung_rq);
drm_printf(m, "\tRequests:\n");
- guc = intel_uc_uses_guc_submission(&engine->gt->uc);
- if (guc) {
- ce = intel_engine_get_hung_context(engine);
- if (ce)
- hung_rq = intel_context_find_active_request(ce);
- } else {
- hung_rq = intel_engine_execlist_find_hung_request(engine);
- }
-
if (hung_rq)
engine_dump_request(hung_rq, m, "\t\thung");
+ else if (hung_ce)
+ drm_printf(m, "\t\tGot hung ce but no hung rq!\n");
- if (guc)
+ if (intel_uc_uses_guc_submission(&engine->gt->uc))
intel_guc_dump_active_requests(engine, hung_rq, m);
else
- intel_engine_dump_active_requests(&engine->sched_engine->requests,
- hung_rq, m);
+ intel_execlists_dump_active_requests(engine, hung_rq, m);
+
+ if (hung_rq)
+ i915_request_put(hung_rq);
}
void intel_engine_dump(struct intel_engine_cs *engine,
@@ -2239,7 +2240,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq;
intel_wakeref_t wakeref;
- unsigned long flags;
ktime_t dummy;
if (header) {
@@ -2276,13 +2276,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
i915_reset_count(error));
print_properties(engine, m);
- spin_lock_irqsave(&engine->sched_engine->lock, flags);
engine_dump_active_requests(engine, m);
- drm_printf(m, "\tOn hold?: %lu\n",
- list_count(&engine->sched_engine->hold));
- spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
-
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
if (wakeref) {
@@ -2328,8 +2323,7 @@ intel_engine_create_virtual(struct intel_engine_cs **siblings,
return siblings[0]->cops->create_virtual(siblings, count, flags);
}
-struct i915_request *
-intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
+static struct i915_request *engine_execlist_find_hung_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
@@ -2381,6 +2375,33 @@ intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
return active;
}
+void intel_engine_get_hung_entity(struct intel_engine_cs *engine,
+ struct intel_context **ce, struct i915_request **rq)
+{
+ unsigned long flags;
+
+ *ce = intel_engine_get_hung_context(engine);
+ if (*ce) {
+ intel_engine_clear_hung_context(engine);
+
+ *rq = intel_context_get_active_request(*ce);
+ return;
+ }
+
+ /*
+ * Getting here with GuC enabled means it is a forced error capture
+ * with no actual hang. So, no need to attempt the execlist search.
+ */
+ if (intel_uc_uses_guc_submission(&engine->gt->uc))
+ return;
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+ *rq = engine_execlist_find_hung_request(engine);
+ if (*rq)
+ *rq = i915_request_get_rcu(*rq);
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
{
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index b0a4a2dbe3ee..e971b153fda9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -15,6 +15,22 @@
#include "intel_rc6.h"
#include "intel_ring.h"
#include "shmem_utils.h"
+#include "intel_gt_regs.h"
+
+static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (IS_METEORLAKE(i915) && engine->id == GSC0) {
+ intel_uncore_write(engine->gt->uncore,
+ RC_PSMI_CTRL_GSCCS,
+ _MASKED_BIT_DISABLE(IDLE_MSG_DISABLE));
+ /* hysteresis 0xA=5us as recommended in spec*/
+ intel_uncore_write(engine->gt->uncore,
+ PWRCTX_MAXCNT_GSCCS,
+ 0xA);
+ }
+}
static void dbg_poison_ce(struct intel_context *ce)
{
@@ -275,6 +291,8 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
intel_engine_init_heartbeat(engine);
+
+ intel_gsc_idle_msg_enable(engine);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index ee3efd06ee54..6b9d9f837669 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -81,6 +81,7 @@
#define RING_EIR(base) _MMIO((base) + 0xb0)
#define RING_EMR(base) _MMIO((base) + 0xb4)
#define RING_ESR(base) _MMIO((base) + 0xb8)
+#define GEN12_STATE_ACK_DEBUG(base) _MMIO((base) + 0xbc)
#define RING_INSTPM(base) _MMIO((base) + 0xc0)
#define RING_CMD_CCTL(base) _MMIO((base) + 0xc4)
#define ACTHD(base) _MMIO((base) + 0xc8)
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 2daffa7c7dfd..1bbe6708d0a7 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2989,10 +2989,12 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
intel_engine_stop_cs(engine);
/*
- * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
+ * Wa_22011802037: In addition to stopping the cs, we need
* to wait for any pending mi force wakeups
*/
- if (IS_GRAPHICS_VER(engine->i915, 11, 12))
+ if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
+ (GRAPHICS_VER(engine->i915) >= 11 &&
+ GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70)))
intel_engine_wait_for_pending_mi_fw(engine);
engine->execlists.reset_ccid = active_ccid(engine);
@@ -4148,6 +4150,22 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
+void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->sched_engine->lock, flags);
+
+ intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m);
+
+ drm_printf(m, "\tOn hold?: %zu\n",
+ list_count_nodes(&engine->sched_engine->hold));
+
+ spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_execlists.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
index a1aa92c983a5..d2c7d45ea062 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h
@@ -32,6 +32,10 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
int indent),
unsigned int max);
+void intel_execlists_dump_active_requests(struct intel_engine_cs *engine,
+ struct i915_request *hung_rq,
+ struct drm_printer *m);
+
bool
intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 8145851ad23d..842e69c7b21e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -8,9 +8,11 @@
#include <linux/types.h>
#include <linux/stop_machine.h>
+#include <drm/drm_managed.h>
#include <drm/i915_drm.h>
#include <drm/intel-gtt.h>
+#include "display/intel_display.h"
#include "gem/i915_gem_lmem.h"
#include "intel_ggtt_gmch.h"
@@ -26,13 +28,6 @@
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
-static inline bool suspend_retains_ptes(struct i915_address_space *vm)
-{
- return GRAPHICS_VER(vm->i915) >= 8 &&
- !HAS_LMEM(vm->i915) &&
- vm->is_ggtt;
-}
-
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
@@ -104,23 +99,6 @@ int i915_ggtt_init_hw(struct drm_i915_private *i915)
return 0;
}
-/*
- * Return the value of the last GGTT pte cast to an u64, if
- * the system is supposed to retain ptes across resume. 0 otherwise.
- */
-static u64 read_last_pte(struct i915_address_space *vm)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen8_pte_t __iomem *ptep;
-
- if (!suspend_retains_ptes(vm))
- return 0;
-
- GEM_BUG_ON(GRAPHICS_VER(vm->i915) < 8);
- ptep = (typeof(ptep))ggtt->gsm + (ggtt_total_entries(ggtt) - 1);
- return readq(ptep);
-}
-
/**
* i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
* @vm: The VM to suspend the mappings for
@@ -184,10 +162,7 @@ retry:
i915_gem_object_unlock(obj);
}
- if (!suspend_retains_ptes(vm))
- vm->clear_range(vm, 0, vm->total);
- else
- i915_vm_to_ggtt(vm)->probed_pte = read_last_pte(vm);
+ vm->clear_range(vm, 0, vm->total);
vm->skip_pte_rewrite = save_skip_rewrite;
@@ -196,10 +171,13 @@ retry:
void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
+ struct intel_gt *gt;
+
i915_ggtt_suspend_vm(&ggtt->vm);
ggtt->invalidate(ggtt);
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+ intel_gt_check_and_clear_faults(gt);
}
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -225,16 +203,21 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = ggtt->vm.gt->uncore;
struct drm_i915_private *i915 = ggtt->vm.i915;
gen8_ggtt_invalidate(ggtt);
- if (GRAPHICS_VER(i915) >= 12)
- intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
- GEN12_GUC_TLB_INV_CR_INVALIDATE);
- else
- intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+ if (GRAPHICS_VER(i915) >= 12) {
+ struct intel_gt *gt;
+
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+ intel_uncore_write_fw(gt->uncore,
+ GEN12_GUC_TLB_INV_CR,
+ GEN12_GUC_TLB_INV_CR_INVALIDATE);
+ } else {
+ intel_uncore_write_fw(ggtt->vm.gt->uncore,
+ GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+ }
}
u64 gen8_ggtt_pte_encode(dma_addr_t addr,
@@ -287,8 +270,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
gte = (gen8_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+ gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0]->encode);
+ end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
gen8_set_pte(gte++, pte_encode | addr);
@@ -338,9 +324,12 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
dma_addr_t addr;
gte = (gen6_pte_t __iomem *)ggtt->gsm;
- gte += vma_res->start / I915_GTT_PAGE_SIZE;
- end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+ gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
+ end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
+ while (gte < end)
+ iowrite32(vm->scratch[0]->encode, gte++);
+ end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
iowrite32(vm->pte_encode(addr, level, flags), gte++);
GEM_BUG_ON(gte > end);
@@ -361,27 +350,6 @@ static void nop_clear_range(struct i915_address_space *vm,
{
}
-static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
- unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
- const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
- gen8_pte_t __iomem *gtt_base =
- (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
- const int max_entries = ggtt_total_entries(ggtt) - first_entry;
- int i;
-
- if (WARN(num_entries > max_entries,
- "First entry = %d; Num entries = %d (max=%d)\n",
- first_entry, num_entries, max_entries))
- num_entries = max_entries;
-
- for (i = 0; i < num_entries; i++)
- gen8_set_pte(&gtt_base[i], scratch_pte);
-}
-
static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
{
/*
@@ -551,8 +519,6 @@ static int init_ggtt(struct i915_ggtt *ggtt)
struct drm_mm_node *entry;
int ret;
- ggtt->pte_lost = true;
-
/*
* GuC requires all resources that we're sharing with it to be placed in
* non-WOPCM memory. If GuC is not present or not in use we still need a
@@ -920,8 +886,8 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
static struct resource pci_resource(struct pci_dev *pdev, int bar)
{
- return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
- pci_resource_len(pdev, bar));
+ return DEFINE_RES_MEM(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -953,8 +919,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.cleanup = gen6_gmch_remove;
ggtt->vm.insert_page = gen8_ggtt_insert_page;
ggtt->vm.clear_range = nop_clear_range;
- if (intel_scanout_needs_vtd_wa(i915))
- ggtt->vm.clear_range = gen8_ggtt_clear_range;
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
@@ -979,15 +943,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
- ggtt->invalidate = gen8_ggtt_invalidate;
+ if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
+ ggtt->invalidate = guc_ggtt_invalidate;
+ else
+ ggtt->invalidate = gen8_ggtt_invalidate;
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
- setup_private_pat(ggtt->vm.gt);
-
return ggtt_probe_common(ggtt, size);
}
@@ -1115,7 +1080,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.clear_range = nop_clear_range;
- if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+ if (!HAS_FULL_PPGTT(i915))
ggtt->vm.clear_range = gen6_ggtt_clear_range;
ggtt->vm.insert_page = gen6_ggtt_insert_page;
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
@@ -1196,7 +1161,14 @@ static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
*/
int i915_ggtt_probe_hw(struct drm_i915_private *i915)
{
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
+
+ for_each_gt(gt, i915, i) {
+ ret = intel_gt_assign_ggtt(gt);
+ if (ret)
+ return ret;
+ }
ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
if (ret)
@@ -1208,35 +1180,25 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915)
return 0;
}
-int i915_ggtt_enable_hw(struct drm_i915_private *i915)
+struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915)
{
- if (GRAPHICS_VER(i915) < 6)
- return intel_ggtt_gmch_enable_hw(i915);
+ struct i915_ggtt *ggtt;
- return 0;
-}
+ ggtt = drmm_kzalloc(&i915->drm, sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt)
+ return ERR_PTR(-ENOMEM);
-void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
-{
- GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
+ INIT_LIST_HEAD(&ggtt->gt_list);
- ggtt->invalidate = guc_ggtt_invalidate;
-
- ggtt->invalidate(ggtt);
+ return ggtt;
}
-void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
+int i915_ggtt_enable_hw(struct drm_i915_private *i915)
{
- /* XXX Temporary pardon for error unload */
- if (ggtt->invalidate == gen8_ggtt_invalidate)
- return;
-
- /* We should only be called after i915_ggtt_enable_guc() */
- GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
-
- ggtt->invalidate = gen8_ggtt_invalidate;
+ if (GRAPHICS_VER(i915) < 6)
+ return intel_ggtt_gmch_enable_hw(i915);
- ggtt->invalidate(ggtt);
+ return 0;
}
/**
@@ -1253,20 +1215,11 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
bool write_domain_objs = false;
- bool retained_ptes;
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
- /*
- * First fill our portion of the GTT with scratch pages if
- * they were not retained across suspend.
- */
- retained_ptes = suspend_retains_ptes(vm) &&
- !i915_vm_to_ggtt(vm)->pte_lost &&
- !GEM_WARN_ON(i915_vm_to_ggtt(vm)->probed_pte != read_last_pte(vm));
-
- if (!retained_ptes)
- vm->clear_range(vm, 0, vm->total);
+ /* First fill our portion of the GTT with scratch pages */
+ vm->clear_range(vm, 0, vm->total);
/* clflush objects bound into the GGTT and rebind them. */
list_for_each_entry(vma, &vm->bound_list, vm_link) {
@@ -1275,16 +1228,16 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- if (!retained_ptes) {
- /*
- * Clear the bound flags of the vma resource to allow
- * ptes to be repopulated.
- */
- vma->resource->bound_flags = 0;
- vma->ops->bind_vma(vm, NULL, vma->resource,
- obj ? obj->cache_level : 0,
- was_bound);
- }
+
+ /*
+ * Clear the bound flags of the vma resource to allow
+ * ptes to be repopulated.
+ */
+ vma->resource->bound_flags = 0;
+ vma->ops->bind_vma(vm, NULL, vma->resource,
+ obj ? obj->cache_level : 0,
+ was_bound);
+
if (obj) { /* only used during resume => exclusive access */
write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
@@ -1296,9 +1249,11 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm)
void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
+ struct intel_gt *gt;
bool flush;
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+ intel_gt_check_and_clear_faults(gt);
flush = i915_ggtt_resume_vm(&ggtt->vm);
@@ -1307,13 +1262,5 @@ void i915_ggtt_resume(struct i915_ggtt *ggtt)
if (flush)
wbinvd_on_all_cpus();
- if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
- setup_private_pat(ggtt->vm.gt);
-
intel_ggtt_restore_fences(ggtt);
}
-
-void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val)
-{
- to_gt(i915)->ggtt->pte_lost = val;
-}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 995082d45cb2..37d0b0fe791d 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -5,6 +5,7 @@
#include <linux/highmem.h>
+#include "display/intel_display.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_scatterlist.h"
@@ -220,7 +221,8 @@ static int fence_update(struct i915_fence_reg *fence,
return ret;
}
- fence->start = vma->node.start;
+ GEM_BUG_ON(vma->fence_size > i915_vma_size(vma));
+ fence->start = i915_ggtt_offset(vma);
fence->size = vma->fence_size;
fence->stride = i915_gem_object_get_stride(vma->obj);
fence->tiling = i915_gem_object_get_tiling(vma->obj);
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
index 4e2163a1aa46..77c793812eb4 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
@@ -6,7 +6,6 @@
#include "intel_ggtt_gmch.h"
#include <drm/intel-gtt.h>
-#include <drm/i915_drm.h>
#include <linux/agp_backend.h>
@@ -81,7 +80,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
phys_addr_t gmadr_base;
int ret;
- ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
+ ret = intel_gmch_probe(i915->gmch.pdev, to_pci_dev(i915->drm.dev), NULL);
if (!ret) {
drm_err(&i915->drm, "failed to set up gmch\n");
return -EIO;
@@ -89,8 +88,7 @@ int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
- ggtt->gmadr =
- (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
+ ggtt->gmadr = DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index f50ea92910d9..2af1ae3831df 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -21,6 +21,7 @@
#define INSTR_CLIENT_SHIFT 29
#define INSTR_MI_CLIENT 0x0
#define INSTR_BC_CLIENT 0x2
+#define INSTR_GSC_CLIENT 0x2 /* MTL+ */
#define INSTR_RC_CLIENT 0x3
#define INSTR_SUBCLIENT_SHIFT 27
#define INSTR_SUBCLIENT_MASK 0x18000000
@@ -432,6 +433,12 @@
#define COLOR_BLT ((0x2<<29)|(0x40<<22))
#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
+#define GSC_INSTR(opcode, data, flags) \
+ (__INSTR(INSTR_GSC_CLIENT) | (opcode) << 22 | (data) << 9 | (flags))
+
+#define GSC_FW_LOAD GSC_INSTR(1, 0, 2)
+#define HECI1_FW_LIMIT_VALID (1 << 31)
+
/*
* Used to convert any address to canonical form.
* Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c
index 976fdf27e790..bcc3605158db 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.c
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.c
@@ -174,6 +174,14 @@ static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
intf->irq = -1;
intf->id = intf_id;
+ /*
+ * On the multi-tile setups the GSC is functional on the first tile only
+ */
+ if (gsc_to_gt(gsc)->info.id != 0) {
+ drm_dbg(&i915->drm, "Not initializing gsc for remote tiles\n");
+ return;
+ }
+
if (intf_id == 0 && !HAS_HECI_PXP(i915))
return;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 9c18b5f2e789..f0dbfc434e07 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -8,7 +8,6 @@
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
-#include "pxp/intel_pxp.h"
#include "i915_drv.h"
#include "i915_perf_oa_regs.h"
@@ -23,6 +22,7 @@
#include "intel_gt_debugfs.h"
#include "intel_gt_mcr.h"
#include "intel_gt_pm.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_gt_requests.h"
#include "intel_migrate.h"
@@ -90,9 +90,8 @@ static int intel_gt_probe_lmem(struct intel_gt *gt)
if (err == -ENODEV)
return 0;
- drm_err(&i915->drm,
- "Failed to setup region(%d) type=%d\n",
- err, INTEL_MEMORY_LOCAL);
+ gt_err(gt, "Failed to setup region(%d) type=%d\n",
+ err, INTEL_MEMORY_LOCAL);
return err;
}
@@ -110,9 +109,18 @@ static int intel_gt_probe_lmem(struct intel_gt *gt)
int intel_gt_assign_ggtt(struct intel_gt *gt)
{
- gt->ggtt = drmm_kzalloc(&gt->i915->drm, sizeof(*gt->ggtt), GFP_KERNEL);
+ /* Media GT shares primary GT's GGTT */
+ if (gt->type == GT_MEDIA) {
+ gt->ggtt = to_gt(gt->i915)->ggtt;
+ } else {
+ gt->ggtt = i915_ggtt_create(gt->i915);
+ if (IS_ERR(gt->ggtt))
+ return PTR_ERR(gt->ggtt);
+ }
+
+ list_add_tail(&gt->ggtt_link, &gt->ggtt->gt_list);
- return gt->ggtt ? 0 : -ENOMEM;
+ return 0;
}
int intel_gt_init_mmio(struct intel_gt *gt)
@@ -192,14 +200,14 @@ int intel_gt_init_hw(struct intel_gt *gt)
ret = i915_ppgtt_init_hw(gt);
if (ret) {
- drm_err(&i915->drm, "Enabling PPGTT failed (%d)\n", ret);
+ gt_err(gt, "Enabling PPGTT failed (%d)\n", ret);
goto out;
}
/* We can't enable contexts until all firmware is loaded */
ret = intel_uc_init_hw(&gt->uc);
if (ret) {
- i915_probe_error(i915, "Enabling uc failed (%d)\n", ret);
+ gt_probe_error(gt, "Enabling uc failed (%d)\n", ret);
goto out;
}
@@ -210,21 +218,6 @@ out:
return ret;
}
-static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
-{
- intel_uncore_rmw(uncore, reg, 0, set);
-}
-
-static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
-{
- intel_uncore_rmw(uncore, reg, clr, 0);
-}
-
-static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
-{
- intel_uncore_rmw(uncore, reg, 0, 0);
-}
-
static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
{
GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
@@ -250,22 +243,22 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
u32 eir;
if (GRAPHICS_VER(i915) != 2)
- clear_register(uncore, PGTBL_ER);
+ intel_uncore_write(uncore, PGTBL_ER, 0);
if (GRAPHICS_VER(i915) < 4)
- clear_register(uncore, IPEIR(RENDER_RING_BASE));
+ intel_uncore_write(uncore, IPEIR(RENDER_RING_BASE), 0);
else
- clear_register(uncore, IPEIR_I965);
+ intel_uncore_write(uncore, IPEIR_I965, 0);
- clear_register(uncore, EIR);
+ intel_uncore_write(uncore, EIR, 0);
eir = intel_uncore_read(uncore, EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
- drm_dbg(&gt->i915->drm, "EIR stuck: 0x%08x, masking\n", eir);
- rmw_set(uncore, EMR, eir);
+ gt_dbg(gt, "EIR stuck: 0x%08x, masking\n", eir);
+ intel_uncore_rmw(uncore, EMR, 0, eir);
intel_uncore_write(uncore, GEN2_IIR,
I915_MASTER_ERROR_INTERRUPT);
}
@@ -275,10 +268,10 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
RING_FAULT_VALID, 0);
intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 12) {
- rmw_clear(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_rmw(uncore, GEN12_RING_FAULT_REG, RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore, GEN12_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 8) {
- rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_rmw(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID, 0);
intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
} else if (GRAPHICS_VER(i915) >= 6) {
struct intel_engine_cs *engine;
@@ -298,16 +291,16 @@ static void gen6_check_faults(struct intel_gt *gt)
for_each_engine(engine, gt, id) {
fault = GEN6_RING_FAULT_REG_READ(engine);
if (fault & RING_FAULT_VALID) {
- drm_dbg(&engine->i915->drm, "Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault & PAGE_MASK,
- fault & RING_FAULT_GTTSEL_MASK ?
- "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ gt_dbg(gt, "Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ?
+ "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
}
@@ -334,17 +327,17 @@ static void xehp_check_faults(struct intel_gt *gt)
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
- drm_dbg(&gt->i915->drm, "Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr), lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ gt_dbg(gt, "Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr), lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
@@ -375,17 +368,17 @@ static void gen8_check_faults(struct intel_gt *gt)
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
- drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr), lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ gt_dbg(gt, "Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr), lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
@@ -479,7 +472,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
if (IS_ERR(obj))
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj)) {
- drm_err(&i915->drm, "Failed to allocate scratch page\n");
+ gt_err(gt, "Failed to allocate scratch page\n");
return PTR_ERR(obj);
}
@@ -734,8 +727,7 @@ int intel_gt_init(struct intel_gt *gt)
err = intel_gt_init_hwconfig(gt);
if (err)
- drm_err(&gt->i915->drm, "Failed to retrieve hwconfig table: %pe\n",
- ERR_PTR(err));
+ gt_err(gt, "Failed to retrieve hwconfig table: %pe\n", ERR_PTR(err));
err = __engines_record_defaults(gt);
if (err)
@@ -753,8 +745,6 @@ int intel_gt_init(struct intel_gt *gt)
intel_migrate_init(&gt->migrate, gt);
- intel_pxp_init(&gt->pxp);
-
goto out_fw;
err_gt:
__intel_gt_disable(gt);
@@ -794,8 +784,6 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
intel_rps_driver_unregister(&gt->rps);
intel_gsc_fini(&gt->gsc);
- intel_pxp_fini(&gt->pxp);
-
/*
* Upon unregistering the device to prevent any new users, cancel
* all in-flight requests so that we can quickly unbind the active
@@ -896,7 +884,7 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
gt->name = "Primary GT";
gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
- drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
+ gt_dbg(gt, "Setting up %s\n", gt->name);
ret = intel_gt_tile_setup(gt, phys_addr);
if (ret)
return ret;
@@ -921,7 +909,7 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
gt->info.engine_mask = gtdef->engine_mask;
gt->info.id = i;
- drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
+ gt_dbg(gt, "Setting up %s\n", gt->name);
if (GEM_WARN_ON(range_overflows_t(resource_size_t,
gtdef->mapping_base,
SZ_16M,
@@ -1009,8 +997,7 @@ get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
const unsigned int class = engine->class;
struct reg_and_bit rb = { };
- if (drm_WARN_ON_ONCE(&engine->i915->drm,
- class >= num || !regs[class].reg))
+ if (gt_WARN_ON_ONCE(engine->gt, class >= num || !regs[class].reg))
return rb;
rb.reg = regs[class];
@@ -1079,11 +1066,25 @@ static void mmio_invalidate_full(struct intel_gt *gt)
enum intel_engine_id id;
const i915_reg_t *regs;
unsigned int num = 0;
+ unsigned long flags;
- if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ /*
+ * New platforms should not be added with catch-all-newer (>=)
+ * condition so that any later platform added triggers the below warning
+ * and in turn mandates a human cross-check of whether the invalidation
+ * flows have compatible semantics.
+ *
+ * For instance with the 11.00 -> 12.00 transition three out of five
+ * respective engine registers were moved to masked type. Then after the
+ * 12.00 -> 12.50 transition multi cast handling is required too.
+ */
+
+ if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
regs = NULL;
num = ARRAY_SIZE(xehp_regs);
- } else if (GRAPHICS_VER(i915) == 12) {
+ } else if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 0) ||
+ GRAPHICS_VER_FULL(i915) == IP_VER(12, 10)) {
regs = gen12_regs;
num = ARRAY_SIZE(gen12_regs);
} else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
@@ -1093,13 +1094,13 @@ static void mmio_invalidate_full(struct intel_gt *gt)
return;
}
- if (drm_WARN_ONCE(&i915->drm, !num,
- "Platform does not implement TLB invalidation!"))
+ if (gt_WARN_ONCE(gt, !num, "Platform does not implement TLB invalidation!"))
return;
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */
+ intel_gt_mcr_lock(gt, &flags);
+ spin_lock(&uncore->lock); /* serialise invalidate with GT reset */
awake = 0;
for_each_engine(engine, gt, id) {
@@ -1144,7 +1145,8 @@ static void mmio_invalidate_full(struct intel_gt *gt)
IS_ALDERLAKE_P(i915)))
intel_uncore_write_fw(uncore, GEN12_OA_TLB_INV_CR, 1);
- spin_unlock_irq(&uncore->lock);
+ spin_unlock(&uncore->lock);
+ intel_gt_mcr_unlock(gt, flags);
for_each_engine_masked(engine, gt, awake, tmp) {
struct reg_and_bit rb;
@@ -1157,9 +1159,8 @@ static void mmio_invalidate_full(struct intel_gt *gt)
}
if (wait_for_invalidate(gt, rb))
- drm_err_ratelimited(&gt->i915->drm,
- "%s TLB invalidation did not complete in %ums!\n",
- engine->name, TLB_INVAL_TIMEOUT_MS);
+ gt_err_ratelimited(gt, "%s TLB invalidation did not complete in %ums!\n",
+ engine->name, TLB_INVAL_TIMEOUT_MS);
}
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index e0365d556248..d2f4fbde5f9f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -39,6 +39,11 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc)
return container_of(huc, struct intel_gt, uc.huc);
}
+static inline struct intel_gt *gsc_uc_to_gt(struct intel_gsc_uc *gsc_uc)
+{
+ return container_of(gsc_uc, struct intel_gt, uc.gsc);
+}
+
static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
{
return container_of(gsc, struct intel_gt, gsc);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
index 2a6a4ca7fdad..7c9be4fd1c8c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
@@ -7,6 +7,7 @@
#include "i915_reg.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
static u32 read_reference_ts_freq(struct intel_uncore *uncore)
@@ -193,10 +194,9 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt)
void intel_gt_check_clock_frequency(const struct intel_gt *gt)
{
if (gt->clock_frequency != read_clock_frequency(gt->uncore)) {
- dev_err(gt->i915->drm.dev,
- "GT clock frequency changed, was %uHz, now %uHz!\n",
- gt->clock_frequency,
- read_clock_frequency(gt->uncore));
+ gt_err(gt, "GT clock frequency changed, was %uHz, now %uHz!\n",
+ gt->clock_frequency,
+ read_clock_frequency(gt->uncore));
}
}
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
index dd53641f3637..5fc2df01aa0d 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -12,7 +12,6 @@
#include "intel_gt_mcr.h"
#include "intel_gt_pm_debugfs.h"
#include "intel_sseu_debugfs.h"
-#include "pxp/intel_pxp_debugfs.h"
#include "uc/intel_uc_debugfs.h"
int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val)
@@ -99,7 +98,6 @@ void intel_gt_debugfs_register(struct intel_gt *gt)
intel_sseu_debugfs_register(gt, root);
intel_uc_debugfs_register(&gt->uc, root);
- intel_pxp_debugfs_register(&gt->pxp, root);
}
void intel_gt_debugfs_register_files(struct dentry *root,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 6f6b9e04d916..1b25a6039152 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -10,6 +10,7 @@
#include "intel_breadcrumbs.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_uncore.h"
#include "intel_rps.h"
@@ -47,9 +48,8 @@ gen11_gt_engine_identity(struct intel_gt *gt,
!time_after32(local_clock() >> 10, timeout_ts));
if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
- drm_err(&gt->i915->drm,
- "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
- bank, bit, ident);
+ gt_err(gt, "INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
+ bank, bit, ident);
return 0;
}
@@ -76,7 +76,7 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
return gen11_rps_irq_handler(&media_gt->rps, iir);
if (instance == OTHER_KCR_INSTANCE)
- return intel_pxp_irq_handler(&gt->pxp, iir);
+ return intel_pxp_irq_handler(gt->i915->pxp, iir);
if (instance == OTHER_GSC_INSTANCE)
return intel_gsc_irq_handler(gt, iir);
@@ -378,8 +378,7 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
GT_CS_MASTER_ERROR_INTERRUPT))
- drm_dbg(&gt->i915->drm, "Command parser error, gt_iir 0x%08x\n",
- gt_iir);
+ gt_dbg(gt, "Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(gt->i915))
gen7_parity_error_irq_handler(gt, gt_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index ea86c1ab5dc5..169393a7ad88 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "intel_gt_mcr.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
/**
@@ -143,6 +144,8 @@ void intel_gt_mcr_init(struct intel_gt *gt)
unsigned long fuse;
int i;
+ spin_lock_init(&gt->mcr_lock);
+
/*
* An mslice is unavailable only if both the meml3 for the slice is
* disabled *and* all of the DSS in the slice (quadrant) are disabled.
@@ -156,14 +159,21 @@ void intel_gt_mcr_init(struct intel_gt *gt)
GEN12_MEML3_EN_MASK);
if (!gt->info.mslice_mask) /* should be impossible! */
- drm_warn(&i915->drm, "mslice mask all zero!\n");
+ gt_warn(gt, "mslice mask all zero!\n");
}
if (MEDIA_VER(i915) >= 13 && gt->type == GT_MEDIA) {
gt->steering_table[OADDRM] = xelpmp_oaddrm_steering_table;
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
- fuse = REG_FIELD_GET(GT_L3_EXC_MASK,
- intel_uncore_read(gt->uncore, XEHP_FUSE4));
+ /* Wa_14016747170 */
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0))
+ fuse = REG_FIELD_GET(MTL_GT_L3_EXC_MASK,
+ intel_uncore_read(gt->uncore,
+ MTL_GT_ACTIVITY_FACTOR));
+ else
+ fuse = REG_FIELD_GET(GT_L3_EXC_MASK,
+ intel_uncore_read(gt->uncore, XEHP_FUSE4));
/*
* Despite the register field being named "exclude mask" the
@@ -196,7 +206,7 @@ void intel_gt_mcr_init(struct intel_gt *gt)
~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
GEN10_L3BANK_MASK;
if (!gt->info.l3bank_mask) /* should be impossible! */
- drm_warn(&i915->drm, "L3 bank mask is all zero!\n");
+ gt_warn(gt, "L3 bank mask is all zero!\n");
} else if (GRAPHICS_VER(i915) >= 11) {
/*
* We expect all modern platforms to have at least some
@@ -221,24 +231,26 @@ static i915_reg_t mcr_reg_cast(const i915_mcr_reg_t mcr)
/*
* rw_with_mcr_steering_fw - Access a register with specific MCR steering
- * @uncore: pointer to struct intel_uncore
+ * @gt: GT to read register from
* @reg: register being accessed
* @rw_flag: FW_REG_READ for read access or FW_REG_WRITE for write access
* @group: group number (documented as "sliceid" on older platforms)
* @instance: instance number (documented as "subsliceid" on older platforms)
* @value: register value to be written (ignored for read)
*
+ * Context: The caller must hold the MCR lock
* Return: 0 for write access. register value for read access.
*
* Caller needs to make sure the relevant forcewake wells are up.
*/
-static u32 rw_with_mcr_steering_fw(struct intel_uncore *uncore,
+static u32 rw_with_mcr_steering_fw(struct intel_gt *gt,
i915_mcr_reg_t reg, u8 rw_flag,
int group, int instance, u32 value)
{
+ struct intel_uncore *uncore = gt->uncore;
u32 mcr_mask, mcr_ss, mcr, old_mcr, val = 0;
- lockdep_assert_held(&uncore->lock);
+ lockdep_assert_held(&gt->mcr_lock);
if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 70)) {
/*
@@ -308,12 +320,14 @@ static u32 rw_with_mcr_steering_fw(struct intel_uncore *uncore,
return val;
}
-static u32 rw_with_mcr_steering(struct intel_uncore *uncore,
+static u32 rw_with_mcr_steering(struct intel_gt *gt,
i915_mcr_reg_t reg, u8 rw_flag,
int group, int instance,
u32 value)
{
+ struct intel_uncore *uncore = gt->uncore;
enum forcewake_domains fw_domains;
+ unsigned long flags;
u32 val;
fw_domains = intel_uncore_forcewake_for_reg(uncore, mcr_reg_cast(reg),
@@ -322,24 +336,96 @@ static u32 rw_with_mcr_steering(struct intel_uncore *uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
- spin_lock_irq(&uncore->lock);
+ intel_gt_mcr_lock(gt, &flags);
+ spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw_domains);
- val = rw_with_mcr_steering_fw(uncore, reg, rw_flag, group, instance, value);
+ val = rw_with_mcr_steering_fw(gt, reg, rw_flag, group, instance, value);
intel_uncore_forcewake_put__locked(uncore, fw_domains);
- spin_unlock_irq(&uncore->lock);
+ spin_unlock(&uncore->lock);
+ intel_gt_mcr_unlock(gt, flags);
return val;
}
/**
+ * intel_gt_mcr_lock - Acquire MCR steering lock
+ * @gt: GT structure
+ * @flags: storage to save IRQ flags to
+ *
+ * Performs locking to protect the steering for the duration of an MCR
+ * operation. On MTL and beyond, a hardware lock will also be taken to
+ * serialize access not only for the driver, but also for external hardware and
+ * firmware agents.
+ *
+ * Context: Takes gt->mcr_lock. uncore->lock should *not* be held when this
+ * function is called, although it may be acquired after this
+ * function call.
+ */
+void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
+{
+ unsigned long __flags;
+ int err = 0;
+
+ lockdep_assert_not_held(&gt->uncore->lock);
+
+ /*
+ * Starting with MTL, we need to coordinate not only with other
+ * driver threads, but also with hardware/firmware agents. A dedicated
+ * locking register is used.
+ */
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+ err = wait_for(intel_uncore_read_fw(gt->uncore,
+ MTL_STEER_SEMAPHORE) == 0x1, 100);
+
+ /*
+ * Even on platforms with a hardware lock, we'll continue to grab
+ * a software spinlock too for lockdep purposes. If the hardware lock
+ * was already acquired, there should never be contention on the
+ * software lock.
+ */
+ spin_lock_irqsave(&gt->mcr_lock, __flags);
+
+ *flags = __flags;
+
+ /*
+ * In theory we should never fail to acquire the HW semaphore; this
+ * would indicate some hardware/firmware is misbehaving and not
+ * releasing it properly.
+ */
+ if (err == -ETIMEDOUT) {
+ gt_err_ratelimited(gt, "hardware MCR steering semaphore timed out");
+ add_taint_for_CI(gt->i915, TAINT_WARN); /* CI is now unreliable */
+ }
+}
+
+/**
+ * intel_gt_mcr_unlock - Release MCR steering lock
+ * @gt: GT structure
+ * @flags: IRQ flags to restore
+ *
+ * Releases the lock acquired by intel_gt_mcr_lock().
+ *
+ * Context: Releases gt->mcr_lock
+ */
+void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags)
+{
+ spin_unlock_irqrestore(&gt->mcr_lock, flags);
+
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))
+ intel_uncore_write_fw(gt->uncore, MTL_STEER_SEMAPHORE, 0x1);
+}
+
+/**
* intel_gt_mcr_read - read a specific instance of an MCR register
* @gt: GT structure
* @reg: the MCR register to read
* @group: the MCR group
* @instance: the MCR instance
*
+ * Context: Takes and releases gt->mcr_lock
+ *
* Returns the value read from an MCR register after steering toward a specific
* group/instance.
*/
@@ -347,7 +433,7 @@ u32 intel_gt_mcr_read(struct intel_gt *gt,
i915_mcr_reg_t reg,
int group, int instance)
{
- return rw_with_mcr_steering(gt->uncore, reg, FW_REG_READ, group, instance, 0);
+ return rw_with_mcr_steering(gt, reg, FW_REG_READ, group, instance, 0);
}
/**
@@ -360,11 +446,13 @@ u32 intel_gt_mcr_read(struct intel_gt *gt,
*
* Write an MCR register in unicast mode after steering toward a specific
* group/instance.
+ *
+ * Context: Calls a function that takes and releases gt->mcr_lock
*/
void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value,
int group, int instance)
{
- rw_with_mcr_steering(gt->uncore, reg, FW_REG_WRITE, group, instance, value);
+ rw_with_mcr_steering(gt, reg, FW_REG_WRITE, group, instance, value);
}
/**
@@ -374,10 +462,16 @@ void intel_gt_mcr_unicast_write(struct intel_gt *gt, i915_mcr_reg_t reg, u32 val
* @value: value to write
*
* Write an MCR register in multicast mode to update all instances.
+ *
+ * Context: Takes and releases gt->mcr_lock
*/
void intel_gt_mcr_multicast_write(struct intel_gt *gt,
i915_mcr_reg_t reg, u32 value)
{
+ unsigned long flags;
+
+ intel_gt_mcr_lock(gt, &flags);
+
/*
* Ensure we have multicast behavior, just in case some non-i915 agent
* left the hardware in unicast mode.
@@ -386,6 +480,8 @@ void intel_gt_mcr_multicast_write(struct intel_gt *gt,
intel_uncore_write_fw(gt->uncore, MTL_MCR_SELECTOR, GEN11_MCR_MULTICAST);
intel_uncore_write(gt->uncore, mcr_reg_cast(reg), value);
+
+ intel_gt_mcr_unlock(gt, flags);
}
/**
@@ -398,9 +494,13 @@ void intel_gt_mcr_multicast_write(struct intel_gt *gt,
* function assumes the caller is already holding any necessary forcewake
* domains; use intel_gt_mcr_multicast_write() in cases where forcewake should
* be obtained automatically.
+ *
+ * Context: The caller must hold gt->mcr_lock.
*/
void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_mcr_reg_t reg, u32 value)
{
+ lockdep_assert_held(&gt->mcr_lock);
+
/*
* Ensure we have multicast behavior, just in case some non-i915 agent
* left the hardware in unicast mode.
@@ -427,6 +527,8 @@ void intel_gt_mcr_multicast_write_fw(struct intel_gt *gt, i915_mcr_reg_t reg, u3
* domains; use intel_gt_mcr_multicast_rmw() in cases where forcewake should
* be obtained automatically.
*
+ * Context: Calls functions that take and release gt->mcr_lock
+ *
* Returns the old (unmodified) value read.
*/
u32 intel_gt_mcr_multicast_rmw(struct intel_gt *gt, i915_mcr_reg_t reg,
@@ -578,6 +680,8 @@ void intel_gt_mcr_get_nonterminated_steering(struct intel_gt *gt,
* domains; use intel_gt_mcr_read_any() in cases where forcewake should be
* obtained automatically.
*
+ * Context: The caller must hold gt->mcr_lock.
+ *
* Returns the value from a non-terminated instance of @reg.
*/
u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg)
@@ -585,10 +689,12 @@ u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg)
int type;
u8 group, instance;
+ lockdep_assert_held(&gt->mcr_lock);
+
for (type = 0; type < NUM_STEERING_TYPES; type++) {
if (reg_needs_read_steering(gt, reg, type)) {
get_nonterminated_steering(gt, type, &group, &instance);
- return rw_with_mcr_steering_fw(gt->uncore, reg,
+ return rw_with_mcr_steering_fw(gt, reg,
FW_REG_READ,
group, instance, 0);
}
@@ -605,6 +711,8 @@ u32 intel_gt_mcr_read_any_fw(struct intel_gt *gt, i915_mcr_reg_t reg)
* Reads a GT MCR register. The read will be steered to a non-terminated
* instance (i.e., one that isn't fused off or powered down by power gating).
*
+ * Context: Calls a function that takes and releases gt->mcr_lock.
+ *
* Returns the value from a non-terminated instance of @reg.
*/
u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg)
@@ -615,7 +723,7 @@ u32 intel_gt_mcr_read_any(struct intel_gt *gt, i915_mcr_reg_t reg)
for (type = 0; type < NUM_STEERING_TYPES; type++) {
if (reg_needs_read_steering(gt, reg, type)) {
get_nonterminated_steering(gt, type, &group, &instance);
- return rw_with_mcr_steering(gt->uncore, reg,
+ return rw_with_mcr_steering(gt, reg,
FW_REG_READ,
group, instance, 0);
}
@@ -728,6 +836,7 @@ void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
* Note that this routine assumes the caller holds forcewake asserted, it is
* not suitable for very long waits.
*
+ * Context: Calls a function that takes and releases gt->mcr_lock
* Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
*/
int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
@@ -739,7 +848,7 @@ int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
{
int ret;
- lockdep_assert_not_held(&gt->uncore->lock);
+ lockdep_assert_not_held(&gt->mcr_lock);
#define done ((intel_gt_mcr_read_any(gt, reg) & mask) == value)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
index ae93b20e1c17..41684495b7da 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.h
@@ -9,6 +9,8 @@
#include "intel_gt_types.h"
void intel_gt_mcr_init(struct intel_gt *gt);
+void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags);
+void intel_gt_mcr_unlock(struct intel_gt *gt, unsigned long flags);
u32 intel_gt_mcr_read(struct intel_gt *gt,
i915_mcr_reg_t reg,
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 16db85fab0b1..cef3d6f5c34e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -14,6 +14,7 @@
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
#include "intel_gt_pm.h"
+#include "intel_gt_print.h"
#include "intel_gt_requests.h"
#include "intel_llc.h"
#include "intel_pm.h"
@@ -275,8 +276,7 @@ int intel_gt_resume(struct intel_gt *gt)
/* Only when the HW is re-initialised, can we replay the requests */
err = intel_gt_init_hw(gt);
if (err) {
- i915_probe_error(gt->i915,
- "Failed to initialize GPU, declaring it wedged!\n");
+ gt_probe_error(gt, "Failed to initialize GPU, declaring it wedged!\n");
goto err_wedged;
}
@@ -293,9 +293,8 @@ int intel_gt_resume(struct intel_gt *gt)
intel_engine_pm_put(engine);
if (err) {
- drm_err(&gt->i915->drm,
- "Failed to restart %s (%d)\n",
- engine->name, err);
+ gt_err(gt, "Failed to restart %s (%d)\n",
+ engine->name, err);
goto err_wedged;
}
}
@@ -304,8 +303,6 @@ int intel_gt_resume(struct intel_gt *gt)
intel_uc_resume(&gt->uc);
- intel_pxp_resume(&gt->pxp);
-
user_forcewake(gt, false);
out_fw:
@@ -339,8 +336,6 @@ void intel_gt_suspend_prepare(struct intel_gt *gt)
{
user_forcewake(gt, true);
wait_for_suspend(gt);
-
- intel_pxp_suspend_prepare(&gt->pxp);
}
static suspend_state_t pm_suspend_target(void)
@@ -365,7 +360,6 @@ void intel_gt_suspend_late(struct intel_gt *gt)
GEM_BUG_ON(gt->awake);
intel_uc_suspend(&gt->uc);
- intel_pxp_suspend(&gt->pxp);
/*
* On disabling the device, we want to turn off HW access to memory
@@ -393,7 +387,6 @@ void intel_gt_suspend_late(struct intel_gt *gt)
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
- intel_pxp_runtime_suspend(&gt->pxp);
intel_uc_runtime_suspend(&gt->uc);
GT_TRACE(gt, "\n");
@@ -411,8 +404,6 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
if (ret)
return ret;
- intel_pxp_runtime_resume(&gt->pxp);
-
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_print.h b/drivers/gpu/drm/i915/gt/intel_gt_print.h
new file mode 100644
index 000000000000..5d9da355ce24
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_print.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_PRINT__
+#define __INTEL_GT_PRINT__
+
+#include <drm/drm_print.h>
+#include "intel_gt_types.h"
+#include "i915_utils.h"
+
+#define gt_err(_gt, _fmt, ...) \
+ drm_err(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_warn(_gt, _fmt, ...) \
+ drm_warn(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_notice(_gt, _fmt, ...) \
+ drm_notice(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_info(_gt, _fmt, ...) \
+ drm_info(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_dbg(_gt, _fmt, ...) \
+ drm_dbg(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_err_ratelimited(_gt, _fmt, ...) \
+ drm_err_ratelimited(&(_gt)->i915->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_probe_error(_gt, _fmt, ...) \
+ do { \
+ if (i915_error_injected()) \
+ gt_dbg(_gt, _fmt, ##__VA_ARGS__); \
+ else \
+ gt_err(_gt, _fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define gt_WARN(_gt, _condition, _fmt, ...) \
+ drm_WARN(&(_gt)->i915->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_WARN_ONCE(_gt, _condition, _fmt, ...) \
+ drm_WARN_ONCE(&(_gt)->i915->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
+
+#define gt_WARN_ON(_gt, _condition) \
+ gt_WARN(_gt, _condition, "%s", "gt_WARN_ON(" __stringify(_condition) ")")
+
+#define gt_WARN_ON_ONCE(_gt, _condition) \
+ gt_WARN_ONCE(_gt, _condition, "%s", "gt_WARN_ONCE(" __stringify(_condition) ")")
+
+#endif /* __INTEL_GT_PRINT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index a5454af2a9cf..be0f6e305c88 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -67,6 +67,7 @@
#define GMD_ID_MEDIA _MMIO(MTL_MEDIA_GSI_BASE + 0xd8c)
#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
+#define MTL_STEER_SEMAPHORE _MMIO(0xfd0)
#define MTL_MCR_SELECTOR _MMIO(0xfd4)
#define SF_MCR_SELECTOR _MMIO(0xfd8)
#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
@@ -406,6 +407,8 @@
#define GEN9_WM_CHICKEN3 _MMIO(0x5588)
#define GEN9_FACTOR_IN_CLR_VAL_HIZ (1 << 9)
+#define XEHP_CULLBIT1 MCR_REG(0x6100)
+
#define CHICKEN_RASTER_1 MCR_REG(0x6204)
#define DIS_SF_ROUND_NEAREST_EVEN REG_BIT(8)
@@ -413,6 +416,7 @@
#define TBIMR_FAST_CLIP REG_BIT(5)
#define VFLSKPD MCR_REG(0x62a8)
+#define VF_PREFETCH_TLB_DIS REG_BIT(5)
#define DIS_OVER_FETCH_CACHE REG_BIT(1)
#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0)
@@ -455,10 +459,12 @@
#define HZ_DEPTH_TEST_LE_GE_OPT_DISABLE REG_BIT(13)
#define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE REG_BIT(3)
+#define XEHP_CULLBIT2 MCR_REG(0x7030)
+
#define GEN8_L3CNTLREG _MMIO(0x7034)
#define GEN8_ERRDETBCTRL (1 << 9)
-#define PSS_MODE2 _MMIO(0x703c)
+#define XEHP_PSS_MODE2 MCR_REG(0x703c)
#define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5)
#define GEN7_SC_INSTDONE _MMIO(0x7100)
@@ -680,10 +686,7 @@
#define GEN6_RSTCTL _MMIO(0x9420)
#define GEN7_MISCCPCTL _MMIO(0x9424)
-#define GEN7_DOP_CLOCK_GATE_ENABLE (1 << 0)
-
-#define GEN8_MISCCPCTL MCR_REG(0x9424)
-#define GEN8_DOP_CLOCK_GATE_ENABLE REG_BIT(0)
+#define GEN7_DOP_CLOCK_GATE_ENABLE REG_BIT(0)
#define GEN12_DOP_CLOCK_GATE_RENDER_ENABLE REG_BIT(1)
#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1 << 2)
#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1 << 4)
@@ -921,6 +924,10 @@
#define MSG_IDLE_FW_MASK REG_GENMASK(13, 9)
#define MSG_IDLE_FW_SHIFT 9
+#define RC_PSMI_CTRL_GSCCS _MMIO(0x11a050)
+#define IDLE_MSG_DISABLE REG_BIT(0)
+#define PWRCTX_MAXCNT_GSCCS _MMIO(0x11a054)
+
#define FORCEWAKE_MEDIA_GEN9 _MMIO(0xa270)
#define FORCEWAKE_RENDER_GEN9 _MMIO(0xa278)
@@ -953,10 +960,11 @@
#define GEN7_DISABLE_SAMPLER_PREFETCH (1 << 30)
#define GEN8_GARBCNTL _MMIO(0xb004)
-#define GEN9_GAPS_TSV_CREDIT_DISABLE (1 << 7)
-#define GEN11_ARBITRATION_PRIO_ORDER_MASK (0x3f << 22)
-#define GEN11_HASH_CTRL_EXCL_MASK (0x7f << 0)
-#define GEN11_HASH_CTRL_EXCL_BIT0 (1 << 0)
+#define GEN11_ARBITRATION_PRIO_ORDER_MASK REG_GENMASK(27, 22)
+#define GEN12_BUS_HASH_CTL_BIT_EXC REG_BIT(7)
+#define GEN9_GAPS_TSV_CREDIT_DISABLE REG_BIT(7)
+#define GEN11_HASH_CTRL_EXCL_MASK REG_GENMASK(6, 0)
+#define GEN11_HASH_CTRL_EXCL_BIT0 REG_FIELD_PREP(GEN11_HASH_CTRL_EXCL_MASK, 0x1)
#define GEN9_SCRATCH_LNCF1 _MMIO(0xb008)
#define GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(0)
@@ -968,7 +976,8 @@
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
#define GEN7_L3AGDIS (1 << 19)
-#define XEHPC_LNCFMISCCFGREG0 _MMIO(0xb01c)
+#define XEHPC_LNCFMISCCFGREG0 MCR_REG(0xb01c)
+#define XEHPC_HOSTCACHEEN REG_BIT(1)
#define XEHPC_OVRLSCCC REG_BIT(0)
#define GEN7_L3CNTLREG2 _MMIO(0xb020)
@@ -1030,7 +1039,7 @@
#define XEHP_L3SCQREG7 MCR_REG(0xb188)
#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3)
-#define XEHPC_L3SCRUB _MMIO(0xb18c)
+#define XEHPC_L3SCRUB MCR_REG(0xb18c)
#define SCRUB_CL_DWNGRADE_SHARED REG_BIT(12)
#define SCRUB_RATE_PER_BANK_MASK REG_GENMASK(2, 0)
#define SCRUB_RATE_4B_PER_CLK REG_FIELD_PREP(SCRUB_RATE_PER_BANK_MASK, 0x6)
@@ -1088,16 +1097,19 @@
#define XEHP_MERT_MOD_CTRL MCR_REG(0xcf28)
#define RENDER_MOD_CTRL MCR_REG(0xcf2c)
#define COMP_MOD_CTRL MCR_REG(0xcf30)
-#define VDBX_MOD_CTRL MCR_REG(0xcf34)
-#define VEBX_MOD_CTRL MCR_REG(0xcf38)
+#define XELPMP_GSC_MOD_CTRL _MMIO(0xcf30) /* media GT only */
+#define XEHP_VDBX_MOD_CTRL MCR_REG(0xcf34)
+#define XELPMP_VDBX_MOD_CTRL _MMIO(0xcf34)
+#define XEHP_VEBX_MOD_CTRL MCR_REG(0xcf38)
+#define XELPMP_VEBX_MOD_CTRL _MMIO(0xcf38)
#define FORCE_MISS_FTLB REG_BIT(3)
-#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c)
+#define XEHP_GAMSTLB_CTRL MCR_REG(0xcf4c)
#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12)
#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11)
#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7)
-#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54)
+#define XEHP_GAMCNTRL_CTRL MCR_REG(0xcf54)
#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12)
#define GLOBAL_INVALIDATION_MODE REG_BIT(2)
@@ -1528,6 +1540,9 @@
#define MTL_MEDIA_MC6 _MMIO(0x138048)
+#define MTL_GT_ACTIVITY_FACTOR _MMIO(0x138010)
+#define MTL_GT_L3_EXC_MASK REG_GENMASK(5, 3)
+
#define GEN6_GT_THREAD_STATUS_REG _MMIO(0x13805c)
#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
index 9486dd3bed99..6629e4c72b6b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c
@@ -12,6 +12,7 @@
#include "i915_drv.h"
#include "i915_sysfs.h"
#include "intel_gt.h"
+#include "intel_gt_print.h"
#include "intel_gt_sysfs.h"
#include "intel_gt_sysfs_pm.h"
#include "intel_gt_types.h"
@@ -105,8 +106,7 @@ void intel_gt_sysfs_register(struct intel_gt *gt)
exit_fail:
kobject_put(&gt->sysfs_gt);
- drm_warn(&gt->i915->drm,
- "failed to initialize gt%d sysfs root\n", gt->info.id);
+ gt_warn(gt, "failed to initialize sysfs root\n");
}
void intel_gt_sysfs_unregister(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index cf71305ad586..28f27091cd3b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -11,6 +11,7 @@
#include "i915_reg.h"
#include "i915_sysfs.h"
#include "intel_gt.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_gt_sysfs.h"
#include "intel_gt_sysfs_pm.h"
@@ -164,7 +165,6 @@ sysfs_gt_attribute_r_func(struct kobject *kobj, struct attribute *attr,
NULL); \
INTEL_GT_ATTR_RO(_name)
-#ifdef CONFIG_PM
static u32 get_residency(struct intel_gt *gt, enum intel_rc6_res_type id)
{
intel_wakeref_t wakeref;
@@ -300,14 +300,12 @@ static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
{
int ret;
- if (!HAS_RC6(gt->i915))
+ if (!IS_ENABLED(CONFIG_PM) || !HAS_RC6(gt->i915))
return;
ret = __intel_gt_sysfs_create_group(kobj, rc6_attr_group);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u RC6 sysfs files (%pe)\n",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create RC6 sysfs files (%pe)\n", ERR_PTR(ret));
/*
* cannot use the is_visible() attribute because
@@ -316,24 +314,15 @@ static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
if (HAS_RC6p(gt->i915)) {
ret = __intel_gt_sysfs_create_group(kobj, rc6p_attr_group);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u RC6p sysfs files (%pe)\n",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create RC6p sysfs files (%pe)\n", ERR_PTR(ret));
}
if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
ret = __intel_gt_sysfs_create_group(kobj, media_rc6_attr_group);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create media %u RC6 sysfs files (%pe)\n",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create media RC6 sysfs files (%pe)\n", ERR_PTR(ret));
}
}
-#else
-static void intel_sysfs_rc6_init(struct intel_gt *gt, struct kobject *kobj)
-{
-}
-#endif /* CONFIG_PM */
static u32 __act_freq_mhz_show(struct intel_gt *gt)
{
@@ -745,9 +734,7 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
ret = intel_sysfs_rps_init(gt, kobj);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u RPS sysfs files (%pe)",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create RPS sysfs files (%pe)", ERR_PTR(ret));
/* end of the legacy interfaces */
if (!is_object_gt(kobj))
@@ -755,29 +742,22 @@ void intel_gt_sysfs_pm_init(struct intel_gt *gt, struct kobject *kobj)
ret = sysfs_create_file(kobj, &attr_punit_req_freq_mhz.attr);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u punit_req_freq_mhz sysfs (%pe)",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create punit_req_freq_mhz sysfs (%pe)", ERR_PTR(ret));
if (i915_mmio_reg_valid(intel_gt_perf_limit_reasons_reg(gt))) {
ret = sysfs_create_files(kobj, throttle_reason_attrs);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u throttle sysfs files (%pe)",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create throttle sysfs files (%pe)", ERR_PTR(ret));
}
if (HAS_MEDIA_RATIO_MODE(gt->i915) && intel_uc_uses_guc_slpc(&gt->uc)) {
ret = sysfs_create_files(kobj, media_perf_power_attrs);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to create gt%u media_perf_power_attrs sysfs (%pe)\n",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to create media_perf_power_attrs sysfs (%pe)\n",
+ ERR_PTR(ret));
}
ret = sysfs_create_files(gt->sysfs_defaults, rps_defaults_attrs);
if (ret)
- drm_warn(&gt->i915->drm,
- "failed to add gt%u rps defaults (%pe)\n",
- gt->info.id, ERR_PTR(ret));
+ gt_warn(gt, "failed to add rps defaults (%pe)\n", ERR_PTR(ret));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index c1d9cd255e06..f08c2556aa25 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -30,7 +30,6 @@
#include "intel_rps_types.h"
#include "intel_migrate_types.h"
#include "intel_wakeref.h"
-#include "pxp/intel_pxp_types.h"
#include "intel_wopcm.h"
struct drm_i915_private;
@@ -233,6 +232,14 @@ struct intel_gt {
u8 instanceid;
} default_steering;
+ /**
+ * @mcr_lock: Protects the MCR steering register
+ *
+ * Protects the MCR steering register (e.g., GEN8_MCR_SELECTOR).
+ * Should be taken before uncore->lock in cases where both are desired.
+ */
+ spinlock_t mcr_lock;
+
/*
* Base of per-tile GTTMMADR where we can derive the MMIO and the GGTT.
*/
@@ -267,8 +274,6 @@ struct intel_gt {
u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
} mocs;
- struct intel_pxp pxp;
-
/* gt/gtN sysfs */
struct kobject sysfs_gt;
@@ -277,6 +282,9 @@ struct intel_gt {
struct kobject *sysfs_defaults;
struct i915_perf_gt perf;
+
+ /** link: &ggtt.gt_list */
+ struct list_head ggtt_link;
};
struct intel_gt_definition {
@@ -296,12 +304,6 @@ enum intel_gt_scratch_field {
/* 8 bytes */
INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256,
-
- /* 6 * 8 bytes */
- INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048,
-
- /* 4 bytes */
- INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096,
};
#endif /* __INTEL_GT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 2ba3983984b9..4f436ba7a3c8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -17,6 +17,7 @@
#include "i915_utils.h"
#include "intel_gt.h"
#include "intel_gt_mcr.h"
+#include "intel_gt_print.h"
#include "intel_gt_regs.h"
#include "intel_gtt.h"
@@ -461,9 +462,9 @@ void gtt_write_workarounds(struct intel_gt *gt)
intel_uncore_write(uncore,
HSW_GTT_CACHE_EN,
can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
- drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
- intel_uncore_read(uncore,
- HSW_GTT_CACHE_EN) == 0);
+ gt_WARN_ON_ONCE(gt, can_use_gtt_cache &&
+ intel_uncore_read(uncore,
+ HSW_GTT_CACHE_EN) == 0);
}
}
@@ -482,14 +483,25 @@ static void tgl_setup_private_ppat(struct intel_uncore *uncore)
static void xehp_setup_private_ppat(struct intel_gt *gt)
{
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(0), GEN8_PPAT_WB);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(1), GEN8_PPAT_WC);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(2), GEN8_PPAT_WT);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(3), GEN8_PPAT_UC);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(4), GEN8_PPAT_WB);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(5), GEN8_PPAT_WB);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(6), GEN8_PPAT_WB);
- intel_gt_mcr_multicast_write(gt, XEHP_PAT_INDEX(7), GEN8_PPAT_WB);
+ enum forcewake_domains fw;
+ unsigned long flags;
+
+ fw = intel_uncore_forcewake_for_reg(gt->uncore, _MMIO(XEHP_PAT_INDEX(0).reg),
+ FW_REG_WRITE);
+ intel_uncore_forcewake_get(gt->uncore, fw);
+
+ intel_gt_mcr_lock(gt, &flags);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(0), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(1), GEN8_PPAT_WC);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(2), GEN8_PPAT_WT);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(3), GEN8_PPAT_UC);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(4), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(5), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(6), GEN8_PPAT_WB);
+ intel_gt_mcr_multicast_write_fw(gt, XEHP_PAT_INDEX(7), GEN8_PPAT_WB);
+ intel_gt_mcr_unlock(gt, flags);
+
+ intel_uncore_forcewake_put(gt->uncore, fw);
}
static void icl_setup_private_ppat(struct intel_uncore *uncore)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 4d75ba4bb41d..5a775310d3fc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -355,19 +355,6 @@ struct i915_ggtt {
bool do_idle_maps;
- /**
- * @pte_lost: Are ptes lost on resume?
- *
- * Whether the system was recently restored from hibernate and
- * thus may have lost pte content.
- */
- bool pte_lost;
-
- /**
- * @probed_pte: Probed pte value on suspend. Re-checked on resume.
- */
- u64 probed_pte;
-
int mtrr;
/** Bit 6 swizzling required for X tiling */
@@ -390,6 +377,9 @@ struct i915_ggtt {
struct mutex error_mutex;
struct drm_mm_node error_capture;
struct drm_mm_node uc_fw;
+
+ /** List of GTs mapping this GGTT */
+ struct list_head gt_list;
};
struct i915_ppgtt {
@@ -579,11 +569,10 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
int i915_ggtt_enable_hw(struct drm_i915_private *i915);
-void i915_ggtt_enable_guc(struct i915_ggtt *ggtt);
-void i915_ggtt_disable_guc(struct i915_ggtt *ggtt);
int i915_init_ggtt(struct drm_i915_private *i915);
void i915_ggtt_driver_release(struct drm_i915_private *i915);
void i915_ggtt_driver_late_release(struct drm_i915_private *i915);
+struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915);
static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
{
@@ -600,17 +589,6 @@ bool i915_ggtt_resume_vm(struct i915_address_space *vm);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
-/**
- * i915_ggtt_mark_pte_lost - Mark ggtt ptes as lost or clear such a marking
- * @i915 The device private.
- * @val whether the ptes should be marked as lost.
- *
- * In some cases pte content is retained across suspend, but typically lost
- * across hibernate. Typically they should be marked as lost on
- * hibernation restore and such marking cleared on suspend.
- */
-void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val);
-
void
fill_page_dma(struct drm_i915_gem_object *p, const u64 val, unsigned int count);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index bbeeb6dde7ae..81a96c52a92b 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1316,16 +1316,16 @@ static u32 *
dg2_emit_rcs_hang_wabb(const struct intel_context *ce, u32 *cs)
{
*cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(GEN12_STATE_ACK_DEBUG);
+ *cs++ = i915_mmio_reg_offset(GEN12_STATE_ACK_DEBUG(ce->engine->mmio_base));
*cs++ = 0x21;
*cs++ = MI_LOAD_REGISTER_REG;
*cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
- *cs++ = i915_mmio_reg_offset(GEN12_CULLBIT1);
+ *cs++ = i915_mmio_reg_offset(XEHP_CULLBIT1);
*cs++ = MI_LOAD_REGISTER_REG;
*cs++ = i915_mmio_reg_offset(RING_NOPID(ce->engine->mmio_base));
- *cs++ = i915_mmio_reg_offset(GEN12_CULLBIT2);
+ *cs++ = i915_mmio_reg_offset(XEHP_CULLBIT2);
return cs;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 5fb74e71f27b..3f638f198796 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -352,6 +352,8 @@ static int max_pte_pkt_size(struct i915_request *rq, int pkt)
return pkt;
}
+#define I915_EMIT_PTE_NUM_DWORDS 6
+
static int emit_pte(struct i915_request *rq,
struct sgt_dma *it,
enum i915_cache_level cache_level,
@@ -393,7 +395,7 @@ static int emit_pte(struct i915_request *rq,
offset += (u64)rq->engine->instance << 32;
- cs = intel_ring_begin(rq, 6);
+ cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -416,7 +418,7 @@ static int emit_pte(struct i915_request *rq,
intel_ring_advance(rq, cs);
intel_ring_update_space(ring);
- cs = intel_ring_begin(rq, 6);
+ cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
if (IS_ERR(cs))
return PTR_ERR(cs);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 49fdd509527a..69b489e8dfed 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -613,14 +613,17 @@ static u32 l3cc_combine(u16 low, u16 high)
static void init_l3cc_table(struct intel_gt *gt,
const struct drm_i915_mocs_table *table)
{
+ unsigned long flags;
unsigned int i;
u32 l3cc;
+ intel_gt_mcr_lock(gt, &flags);
for_each_l3cc(l3cc, table, i)
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
intel_gt_mcr_multicast_write_fw(gt, XEHP_LNCFCMOCS(i), l3cc);
else
intel_uncore_write_fw(gt->uncore, GEN9_LNCFCMOCS(i), l3cc);
+ intel_gt_mcr_unlock(gt, flags);
}
void intel_mocs_init_engine(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 2ee4051e4d96..5c91622dfca4 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -301,7 +301,7 @@ static int chv_rc6_init(struct intel_rc6 *rc6)
pcbr = intel_uncore_read(uncore, VLV_PCBR);
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
drm_dbg(&i915->drm, "BIOS didn't set up PCBR, fixing up\n");
- paddr = i915->dsm.end + 1 - pctx_size;
+ paddr = i915->dsm.stolen.end + 1 - pctx_size;
GEM_BUG_ON(paddr > U32_MAX);
pctx_paddr = (paddr & ~4095);
@@ -325,7 +325,7 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
/* BIOS set it up already, grab the pre-alloc'd space */
resource_size_t pcbr_offset;
- pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
+ pcbr_offset = (pcbr & ~4095) - i915->dsm.stolen.start;
pctx = i915_gem_object_create_region_at(i915->mm.stolen_region,
pcbr_offset,
pctx_size,
@@ -354,10 +354,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
}
GEM_BUG_ON(range_overflows_end_t(u64,
- i915->dsm.start,
+ i915->dsm.stolen.start,
pctx->stolen->start,
U32_MAX));
- pctx_paddr = i915->dsm.start + pctx->stolen->start;
+ pctx_paddr = i915->dsm.stolen.start + pctx->stolen->start;
intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
out:
@@ -448,8 +448,8 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6)
*/
rc6_ctx_base =
intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!(rc6_ctx_base >= i915->dsm_reserved.start &&
- rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) {
+ if (!(rc6_ctx_base >= i915->dsm.reserved.start &&
+ rc6_ctx_base + PAGE_SIZE < i915->dsm.reserved.end)) {
drm_dbg(&i915->drm, "RC6 Base address not as expected.\n");
enable_rc6 = false;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c
index 9c1ae070ee7b..4b56ec3743cf 100644
--- a/drivers/gpu/drm/i915/gt/intel_renderstate.c
+++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c
@@ -63,7 +63,7 @@ static int render_state_setup(struct intel_renderstate *so,
u32 s = rodata->batch[i];
if (i * 4 == rodata->reloc[reloc_index]) {
- u64 r = s + so->vma->node.start;
+ u64 r = s + i915_vma_offset(so->vma);
s = lower_32_bits(r);
if (HAS_64BIT_RELOC(i915)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 78dc5e493c62..0bb9094fdacd 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -35,16 +35,6 @@
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0
-static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
-{
- intel_uncore_rmw_fw(uncore, reg, 0, set);
-}
-
-static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
-{
- intel_uncore_rmw_fw(uncore, reg, clr, 0);
-}
-
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
{
struct drm_i915_file_private *file_priv = ctx->file_priv;
@@ -212,7 +202,7 @@ static int g4x_do_reset(struct intel_gt *gt,
int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
- rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, 0, VCP_UNIT_CLOCK_GATE_DISABLE);
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
pci_write_config_byte(pdev, I915_GDRST,
@@ -234,7 +224,7 @@ static int g4x_do_reset(struct intel_gt *gt,
out:
pci_write_config_byte(pdev, I915_GDRST, 0);
- rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE, 0);
intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
return ret;
@@ -470,7 +460,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine,
* to reset it as well (we will unlock it once the reset sequence is
* completed).
*/
- rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
+ intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, 0, sfc_lock.lock_bit);
ret = __intel_wait_for_register_fw(uncore,
sfc_lock.ack_reg,
@@ -520,7 +510,7 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)
get_sfc_forced_lock_data(engine, &sfc_lock);
- rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
+ intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit, 0);
}
static int __gen11_reset_engines(struct intel_gt *gt,
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 356c787e11d3..827adb0cfaea 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -897,7 +897,7 @@ static int clear_residuals(struct i915_request *rq)
}
ret = engine->emit_bb_start(rq,
- engine->wa_ctx.vma->node.start, 0,
+ i915_vma_offset(engine->wa_ctx.vma), 0,
0);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 9ad3bc7201cb..f5d7b5126433 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -7,6 +7,7 @@
#include <drm/i915_drm.h>
+#include "display/intel_display.h"
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_breadcrumbs.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 9e1cad9ba0e9..c622962c6bef 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -12,6 +12,9 @@
struct i915_request;
struct drm_printer;
+#define GT_FREQUENCY_MULTIPLIER 50
+#define GEN9_FREQ_SCALER 3
+
void intel_rps_init_early(struct intel_rps *rps);
void intel_rps_init(struct intel_rps *rps);
void intel_rps_sanitize(struct intel_rps *rps);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 949c19339015..485c5cc5d0f9 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -30,6 +30,9 @@
* creation to have a "primed golden context", i.e. a context image that
* already contains the changes needed to all the registers.
*
+ * Context workarounds should be implemented in the \*_ctx_workarounds_init()
+ * variants respective to the targeted platforms.
+ *
* - Engine workarounds: the list of these WAs is applied whenever the specific
* engine is reset. It's also possible that a set of engine classes share a
* common power domain and they are reset together. This happens on some
@@ -42,15 +45,28 @@
* saves/restores their values before/after the reset takes place. See
* ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
*
+ * Workarounds for registers specific to RCS and CCS should be implemented in
+ * rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
+ * registers belonging to BCS, VCS or VECS should be implemented in
+ * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
+ * engine's MMIO range but that are part of of the common RCS/CCS reset domain
+ * should be implemented in general_render_compute_wa_init().
+ *
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
*
+ * GT workarounds should be implemented in the \*_gt_workarounds_init()
+ * variants respective to the targeted platforms.
+ *
* - Register whitelist: some workarounds need to be implemented in userspace,
* but need to touch privileged registers. The whitelist in the kernel
* instructs the hardware to allow the access to happen. From the kernel side,
* this is just a special case of a MMIO workaround (as we write the list of
* these to/be-whitelisted registers to some special HW registers).
*
+ * Register whitelisting should be done in the \*_whitelist_build() variants
+ * respective to the targeted platforms.
+ *
* - Workaround batchbuffers: buffers that get executed automatically by the
* hardware on every HW context restore. These buffers are created and
* programmed in the default context so the hardware always go through those
@@ -225,6 +241,12 @@ wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
}
static void
+wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
+{
+ wa_mcr_write_clr_set(wal, reg, ~0, set);
+}
+
+static void
wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
wa_write_clr_set(wal, reg, set, set);
@@ -777,7 +799,7 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_18018764978:dg2 */
if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_C0, STEP_FOREVER) ||
IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
- wa_masked_en(wal, PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
+ wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
/* Wa_15010599737:dg2 */
wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
@@ -786,6 +808,32 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
}
+static void mtl_ctx_workarounds_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ struct drm_i915_private *i915 = engine->i915;
+
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
+ /* Wa_14014947963 */
+ wa_masked_field_set(wal, VF_PREEMPTION,
+ PREEMPTION_VERTEX_COUNT, 0x4000);
+
+ /* Wa_16013271637 */
+ wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
+ MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
+
+ /* Wa_18019627453 */
+ wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
+
+ /* Wa_18018764978 */
+ wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
+ }
+
+ /* Wa_18019271663 */
+ wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
+}
+
static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
@@ -872,7 +920,9 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
goto done;
- if (IS_PONTEVECCHIO(i915))
+ if (IS_METEORLAKE(i915))
+ mtl_ctx_workarounds_init(engine, wal);
+ else if (IS_PONTEVECCHIO(i915))
; /* noop; none at this time */
else if (IS_DG2(i915))
dg2_ctx_workarounds_init(engine, wal);
@@ -1355,6 +1405,13 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_L3_COH_PIPE);
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
/* Wa_1407352427:icl,ehl */
wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
PSDUNIT_CLKGATE_DIS);
@@ -1515,6 +1572,13 @@ xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14011060649:xehpsdv */
wa_14011060649(gt, wal);
+
+ /* Wa_14012362059:xehpsdv */
+ wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_14014368820:xehpsdv */
+ wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
+ INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
}
static void
@@ -1555,6 +1619,12 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
DSS_ROUTER_CLKGATE_DIS);
}
+ if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14012362059:dg2 */
+ wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
+ }
+
if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) {
/* Wa_14010948348:dg2_g10 */
wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS);
@@ -1600,6 +1670,12 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
/* Wa_14011028019:dg2_g10 */
wa_mcr_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS);
+
+ /* Wa_14010680813:dg2_g10 */
+ wa_mcr_write_or(wal, XEHP_GAMSTLB_CTRL,
+ CONTROL_BLOCK_CLKGATE_DIS |
+ EGRESS_BLOCK_CLKGATE_DIS |
+ TAG_BLOCK_CLKGATE_DIS);
}
/* Wa_14014830051:dg2 */
@@ -1613,7 +1689,17 @@ dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
/* Wa_14015795083 */
- wa_mcr_write_clr(wal, GEN8_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+ wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+
+ /* Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
+
+ /* Wa_1509235366:dg2 */
+ wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
+ INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
}
static void
@@ -1622,13 +1708,27 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
pvc_init_mcr(gt, wal);
/* Wa_14015795083 */
- wa_mcr_write_clr(wal, GEN8_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+ wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
+
+ /* Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
}
static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- /* FIXME: Actual workarounds will be added in future patch(es) */
+ if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(gt->i915, P, STEP_A0, STEP_B0)) {
+ /* Wa_14014830051 */
+ wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
+
+ /* Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
+ }
/*
* Unlike older platforms, we no longer setup implicit steering here;
@@ -1640,7 +1740,17 @@ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
static void
xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- /* FIXME: Actual workarounds will be added in future patch(es) */
+ if (IS_MTL_MEDIA_STEP(gt->i915, STEP_A0, STEP_B0)) {
+ /*
+ * Wa_18018781329
+ *
+ * Note that although these registers are MCR on the primary
+ * GT, the media GT's versions are regular singleton registers.
+ */
+ wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
+ wa_write_or(wal, XELPMP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
+ }
debug_dump_steering(gt);
}
@@ -1760,7 +1870,8 @@ static void wa_list_apply(const struct i915_wa_list *wal)
fw = wal_get_fw_for_rmw(uncore, wal);
- spin_lock_irqsave(&uncore->lock, flags);
+ intel_gt_mcr_lock(gt, &flags);
+ spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
@@ -1789,7 +1900,8 @@ static void wa_list_apply(const struct i915_wa_list *wal)
}
intel_uncore_forcewake_put__locked(uncore, fw);
- spin_unlock_irqrestore(&uncore->lock, flags);
+ spin_unlock(&uncore->lock);
+ intel_gt_mcr_unlock(gt, flags);
}
void intel_gt_apply_workarounds(struct intel_gt *gt)
@@ -1810,7 +1922,8 @@ static bool wa_list_verify(struct intel_gt *gt,
fw = wal_get_fw_for_rmw(uncore, wal);
- spin_lock_irqsave(&uncore->lock, flags);
+ intel_gt_mcr_lock(gt, &flags);
+ spin_lock(&uncore->lock);
intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
@@ -1820,7 +1933,8 @@ static bool wa_list_verify(struct intel_gt *gt,
wal->name, from);
intel_uncore_forcewake_put__locked(uncore, fw);
- spin_unlock_irqrestore(&uncore->lock, flags);
+ spin_unlock(&uncore->lock);
+ intel_gt_mcr_unlock(gt, flags);
return ok;
}
@@ -2164,7 +2278,9 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
wa_init_start(w, engine->gt, "whitelist", engine->name);
- if (IS_PONTEVECCHIO(i915))
+ if (IS_METEORLAKE(i915))
+ ; /* noop; none at this time */
+ else if (IS_PONTEVECCHIO(i915))
pvc_whitelist_build(engine);
else if (IS_DG2(i915))
dg2_whitelist_build(engine);
@@ -2274,24 +2390,35 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_DG2(i915)) {
- /* Wa_1509235366:dg2 */
- wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
- GLOBAL_INVALIDATION_MODE);
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0)) {
+ /* Wa_22014600077 */
+ wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
+ ENABLE_EU_COUNT_FOR_TDL_FLUSH);
}
- if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
- /* Wa_14013392000:dg2_g11 */
- wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
- }
-
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
- /* Wa_1509727124:dg2 */
+ /* Wa_1509727124 */
wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
SC_DISABLE_POWER_OPTIMIZATION_EBB);
}
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(i915) || IS_DG2_G12(i915) ||
+ IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0)) {
+ /* Wa_22012856258 */
+ wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
+ GEN12_DISABLE_READ_SUPPRESSION);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
+ /* Wa_14013392000:dg2_g11 */
+ wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE);
+ }
+
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0) ||
IS_DG2_GRAPHICS_STEP(i915, G11, STEP_A0, STEP_B0)) {
/* Wa_14012419201:dg2 */
@@ -2299,21 +2426,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX);
}
- if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
- IS_DG2_G11(i915)) {
- /*
- * Wa_22012826095:dg2
- * Wa_22013059131:dg2
- */
- wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
- MAXREQS_PER_BANK,
- REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
-
- /* Wa_22013059131:dg2 */
- wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
- FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
- }
-
/* Wa_1308578152:dg2_g10 when first gslice is fused off */
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) &&
needs_wa_1308578152(engine)) {
@@ -2323,14 +2435,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
- /* Wa_22013037850:dg2 */
- wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
- DISABLE_128B_EVICTION_COMMAND_UDW);
-
- /* Wa_22012856258:dg2 */
- wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
- GEN12_DISABLE_READ_SUPPRESSION);
-
/*
* Wa_22010960976:dg2
* Wa_14013347512:dg2
@@ -2346,16 +2450,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
*/
wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE);
-
- /*
- * Wa_14010918519:dg2_g10
- *
- * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping,
- * so ignoring verification.
- */
- wa_mcr_add(wal, LSC_CHICKEN_BIT_0_UDW, 0,
- FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE,
- 0, false);
}
if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
@@ -2379,18 +2473,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA);
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) {
- /* Wa_14010680813:dg2_g10 */
- wa_write_or(wal, GEN12_GAMSTLB_CTRL, CONTROL_BLOCK_CLKGATE_DIS |
- EGRESS_BLOCK_CLKGATE_DIS | TAG_BLOCK_CLKGATE_DIS);
- }
-
- if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) ||
- IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) {
- /* Wa_14012362059:dg2 */
- wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
- }
-
if (IS_DG2_GRAPHICS_STEP(i915, G11, STEP_B0, STEP_FOREVER) ||
IS_DG2_G10(i915)) {
/* Wa_22014600077:dg2 */
@@ -2540,13 +2622,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN11_ENABLE_32_PLANE_MODE);
/*
- * Wa_1408615072:icl,ehl (vsunit)
- * Wa_1407596294:icl,ehl (hsunit)
- */
- wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
- VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
-
- /*
* Wa_1408767742:icl[a2..forever],ehl[all]
* Wa_1605460711:icl[a0..c0]
*/
@@ -2901,27 +2976,14 @@ add_render_compute_tuning_settings(struct drm_i915_private *i915,
struct i915_wa_list *wal)
{
if (IS_PONTEVECCHIO(i915)) {
- wa_write(wal, XEHPC_L3SCRUB,
- SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ wa_mcr_write(wal, XEHPC_L3SCRUB,
+ SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
+ wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
}
if (IS_DG2(i915)) {
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
-
- /*
- * This is also listed as Wa_22012654132 for certain DG2
- * steppings, but the tuning setting programming is a superset
- * since it applies to all DG2 variants and steppings.
- *
- * Note that register 0xE420 is write-only and cannot be read
- * back for verification on DG2 (due to Wa_14012342262), so
- * we need to explicitly skip the readback.
- */
- wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
- 0 /* write-only, so skip validation */,
- true);
}
/*
@@ -2932,6 +2994,9 @@ add_render_compute_tuning_settings(struct drm_i915_private *i915,
if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
THREAD_EX_ARB_MODE_RR_AFTER_DEP);
+
+ if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+ wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
}
/*
@@ -2950,9 +3015,60 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
add_render_compute_tuning_settings(i915, wal);
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+ IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_FOREVER) ||
+ IS_DG2_G11(i915) || IS_DG2_G12(i915)) {
+ /* Wa_22013037850 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
+ DISABLE_128B_EVICTION_COMMAND_UDW);
+ }
+
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+ IS_PONTEVECCHIO(i915) ||
+ IS_DG2(i915)) {
+ /* Wa_22014226127 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
+ }
+
+ if (IS_MTL_GRAPHICS_STEP(i915, M, STEP_A0, STEP_B0) ||
+ IS_MTL_GRAPHICS_STEP(i915, P, STEP_A0, STEP_B0) ||
+ IS_DG2(i915)) {
+ /* Wa_18017747507 */
+ wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_B0, STEP_C0) ||
+ IS_DG2_G11(i915)) {
+ /*
+ * Wa_22012826095:dg2
+ * Wa_22013059131:dg2
+ */
+ wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
+ MAXREQS_PER_BANK,
+ REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
+
+ /* Wa_22013059131:dg2 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
+ FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
+ }
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ /*
+ * Wa_14010918519:dg2_g10
+ *
+ * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping,
+ * so ignoring verification.
+ */
+ wa_mcr_add(wal, LSC_CHICKEN_BIT_0_UDW, 0,
+ FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE,
+ 0, false);
+ }
+
if (IS_PONTEVECCHIO(i915)) {
/* Wa_16016694945 */
- wa_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
+ wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
}
if (IS_XEHPSDV(i915)) {
@@ -2978,30 +3094,14 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
}
-
- /* Wa_14012362059:xehpsdv */
- wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
-
- /* Wa_14014368820:xehpsdv */
- wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS |
- GLOBAL_INVALIDATION_MODE);
}
if (IS_DG2(i915) || IS_PONTEVECCHIO(i915)) {
/* Wa_14015227452:dg2,pvc */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
- /* Wa_22014226127:dg2,pvc */
- wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
-
/* Wa_16015675438:dg2,pvc */
wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
-
- /* Wa_18018781329:dg2,pvc */
- wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, VDBX_MOD_CTRL, FORCE_MISS_FTLB);
- wa_mcr_write_or(wal, VEBX_MOD_CTRL, FORCE_MISS_FTLB);
}
if (IS_DG2(i915)) {
@@ -3010,10 +3110,20 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
* Wa_22015475538:dg2
*/
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
-
- /* Wa_18017747507:dg2 */
- wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
}
+
+ if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_C0) || IS_DG2_G11(i915))
+ /*
+ * Wa_22012654132
+ *
+ * Note that register 0xE420 is write-only and cannot be read
+ * back for verification on DG2 (due to Wa_14012342262), so
+ * we need to explicitly skip the readback.
+ */
+ wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
+ _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+ 0 /* write-only, so skip validation */,
+ true);
}
static void
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 881b64f3e7b9..542ce6d2de19 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -178,7 +178,7 @@ static int perf_mi_bb_start(void *arg)
goto out;
err = rq->engine->emit_bb_start(rq,
- batch->node.start, 8,
+ i915_vma_offset(batch), 8,
0);
if (err)
goto out;
@@ -321,7 +321,7 @@ static int perf_mi_noop(void *arg)
goto out;
err = rq->engine->emit_bb_start(rq,
- base->node.start, 8,
+ i915_vma_offset(base), 8,
0);
if (err)
goto out;
@@ -331,8 +331,8 @@ static int perf_mi_noop(void *arg)
goto out;
err = rq->engine->emit_bb_start(rq,
- nop->node.start,
- nop->node.size,
+ i915_vma_offset(nop),
+ i915_vma_size(nop),
0);
if (err)
goto out;
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index ab2e9a6a2452..736b89a8ecf5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -2737,11 +2737,11 @@ static int create_gang(struct intel_engine_cs *engine,
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_EQ_SDD;
*cs++ = 0;
- *cs++ = lower_32_bits(vma->node.start);
- *cs++ = upper_32_bits(vma->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(vma));
+ *cs++ = upper_32_bits(i915_vma_offset(vma));
if (*prev) {
- u64 offset = (*prev)->batch->node.start;
+ u64 offset = i915_vma_offset((*prev)->batch);
/* Terminate the spinner in the next lower priority batch. */
*cs++ = MI_STORE_DWORD_IMM_GEN4;
@@ -2763,13 +2763,11 @@ static int create_gang(struct intel_engine_cs *engine,
rq->batch = i915_vma_get(vma);
i915_request_get(rq);
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, 0);
+ err = igt_vma_move_to_active_unlocked(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
- i915_vma_unlock(vma);
i915_request_add(rq);
if (err)
goto err_rq;
@@ -3095,7 +3093,7 @@ create_gpr_user(struct intel_engine_cs *engine,
*cs++ = MI_MATH_ADD;
*cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU);
- addr = result->node.start + offset + i * sizeof(*cs);
+ addr = i915_vma_offset(result) + offset + i * sizeof(*cs);
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = CS_GPR(engine, 2 * i);
*cs++ = lower_32_bits(addr);
@@ -3105,8 +3103,8 @@ create_gpr_user(struct intel_engine_cs *engine,
MI_SEMAPHORE_POLL |
MI_SEMAPHORE_SAD_GTE_SDD;
*cs++ = i;
- *cs++ = lower_32_bits(result->node.start);
- *cs++ = upper_32_bits(result->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(result));
+ *cs++ = upper_32_bits(i915_vma_offset(result));
}
*cs++ = MI_BATCH_BUFFER_END;
@@ -3177,16 +3175,14 @@ create_gpr_client(struct intel_engine_cs *engine,
goto out_batch;
}
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, 0);
i915_vma_lock(batch);
if (!err)
err = i915_vma_move_to_active(batch, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- batch->node.start,
+ i915_vma_offset(batch),
PAGE_SIZE, 0);
i915_vma_unlock(batch);
i915_vma_unpin(batch);
@@ -3514,13 +3510,11 @@ static int smoke_submit(struct preempt_smoke *smoke,
}
if (vma) {
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, 0);
+ err = igt_vma_move_to_active_unlocked(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
- i915_vma_unlock(vma);
}
i915_request_add(rq);
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index bc05ef48c194..8b0d84f2aad2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -96,7 +96,8 @@ err_ctx:
static u64 hws_address(const struct i915_vma *hws,
const struct i915_request *rq)
{
- return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
+ return i915_vma_offset(hws) +
+ offset_in_page(sizeof(u32) * rq->fence.context);
}
static struct i915_request *
@@ -180,8 +181,8 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
- *batch++ = lower_32_bits(vma->node.start);
- *batch++ = upper_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
+ *batch++ = upper_32_bits(i915_vma_offset(vma));
} else if (GRAPHICS_VER(gt->i915) >= 6) {
*batch++ = MI_STORE_DWORD_IMM_GEN4;
*batch++ = 0;
@@ -194,7 +195,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
- *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
} else if (GRAPHICS_VER(gt->i915) >= 4) {
*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*batch++ = 0;
@@ -207,7 +208,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
- *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
} else {
*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
*batch++ = lower_32_bits(hws_address(hws, rq));
@@ -219,7 +220,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_NOOP;
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
- *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
}
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
intel_gt_chipset_flush(engine->gt);
@@ -234,7 +235,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
if (GRAPHICS_VER(gt->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
- err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+ err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
cancel_rq:
if (err) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 7c56ffd2c659..a78a3d2c2e16 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -599,9 +599,7 @@ __gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
*cs++ = 0;
}
- i915_vma_lock(scratch);
- err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(scratch);
+ err = igt_vma_move_to_active_unlocked(scratch, rq, EXEC_OBJECT_WRITE);
i915_request_get(rq);
i915_request_add(rq);
@@ -1030,8 +1028,8 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
while (len--) {
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = hw[dw];
- *cs++ = lower_32_bits(scratch->node.start + x);
- *cs++ = upper_32_bits(scratch->node.start + x);
+ *cs++ = lower_32_bits(i915_vma_offset(scratch) + x);
+ *cs++ = upper_32_bits(i915_vma_offset(scratch) + x);
dw += 2;
x += 4;
@@ -1098,8 +1096,8 @@ record_registers(struct intel_context *ce,
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(b_before->node.start);
- *cs++ = upper_32_bits(b_before->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(b_before));
+ *cs++ = upper_32_bits(i915_vma_offset(b_before));
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_SEMAPHORE_WAIT |
@@ -1114,8 +1112,8 @@ record_registers(struct intel_context *ce,
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(b_after->node.start);
- *cs++ = upper_32_bits(b_after->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(b_after));
+ *cs++ = upper_32_bits(i915_vma_offset(b_after));
intel_ring_advance(rq, cs);
@@ -1236,8 +1234,8 @@ static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
- *cs++ = lower_32_bits(batch->node.start);
- *cs++ = upper_32_bits(batch->node.start);
+ *cs++ = lower_32_bits(i915_vma_offset(batch));
+ *cs++ = upper_32_bits(i915_vma_offset(batch));
*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
index 0dc5309c90a4..e677f2da093d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_migrate.c
+++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c
@@ -8,6 +8,7 @@
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
+#include "selftests/igt_spinner.h"
#include "selftests/i915_random.h"
static const unsigned int sizes[] = {
@@ -486,7 +487,8 @@ global_clear(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
static int live_migrate_copy(void *arg)
{
- struct intel_migrate *migrate = arg;
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
struct drm_i915_private *i915 = migrate->context->engine->i915;
I915_RND_STATE(prng);
int i;
@@ -507,7 +509,8 @@ static int live_migrate_copy(void *arg)
static int live_migrate_clear(void *arg)
{
- struct intel_migrate *migrate = arg;
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
struct drm_i915_private *i915 = migrate->context->engine->i915;
I915_RND_STATE(prng);
int i;
@@ -527,6 +530,149 @@ static int live_migrate_clear(void *arg)
return 0;
}
+struct spinner_timer {
+ struct timer_list timer;
+ struct igt_spinner spin;
+};
+
+static void spinner_kill(struct timer_list *timer)
+{
+ struct spinner_timer *st = from_timer(st, timer, timer);
+
+ igt_spinner_end(&st->spin);
+ pr_info("%s\n", __func__);
+}
+
+static int live_emit_pte_full_ring(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
+ struct drm_i915_private *i915 = migrate->context->engine->i915;
+ struct drm_i915_gem_object *obj;
+ struct intel_context *ce;
+ struct i915_request *rq, *prev;
+ struct spinner_timer st;
+ struct sgt_dma it;
+ int len, sz, err;
+ u32 *cs;
+
+ /*
+ * Simple regression test to check that we don't trample the
+ * rq->reserved_space when returning from emit_pte(), if the ring is
+ * nearly full.
+ */
+
+ if (igt_spinner_init(&st.spin, to_gt(i915)))
+ return -ENOMEM;
+
+ obj = i915_gem_object_create_internal(i915, 2 * PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_spinner;
+ }
+
+ err = i915_gem_object_pin_pages_unlocked(obj);
+ if (err)
+ goto out_obj;
+
+ ce = intel_migrate_create_context(migrate);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out_obj;
+ }
+
+ ce->ring_size = SZ_4K; /* Not too big */
+
+ err = intel_context_pin(ce);
+ if (err)
+ goto out_put;
+
+ rq = igt_spinner_create_request(&st.spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ i915_request_add(rq);
+ if (!igt_wait_for_spinner(&st.spin, rq)) {
+ err = -EIO;
+ goto out_unpin;
+ }
+
+ /*
+ * Fill the rest of the ring leaving I915_EMIT_PTE_NUM_DWORDS +
+ * ring->reserved_space at the end. To actually emit the PTEs we require
+ * slightly more than I915_EMIT_PTE_NUM_DWORDS, since our object size is
+ * greater than PAGE_SIZE. The correct behaviour is to wait for more
+ * ring space in emit_pte(), otherwise we trample on the reserved_space
+ * resulting in crashes when later submitting the rq.
+ */
+
+ prev = NULL;
+ do {
+ if (prev)
+ i915_request_add(rq);
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ sz = (rq->ring->space - rq->reserved_space) / sizeof(u32) -
+ I915_EMIT_PTE_NUM_DWORDS;
+ sz = min_t(u32, sz, (SZ_1K - rq->reserved_space) / sizeof(u32) -
+ I915_EMIT_PTE_NUM_DWORDS);
+ cs = intel_ring_begin(rq, sz);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_rq;
+ }
+
+ memset32(cs, MI_NOOP, sz);
+ cs += sz;
+ intel_ring_advance(rq, cs);
+
+ pr_info("%s emit=%u sz=%d\n", __func__, rq->ring->emit, sz);
+
+ prev = rq;
+ } while (rq->ring->space > (rq->reserved_space +
+ I915_EMIT_PTE_NUM_DWORDS * sizeof(u32)));
+
+ timer_setup_on_stack(&st.timer, spinner_kill, 0);
+ mod_timer(&st.timer, jiffies + 2 * HZ);
+
+ /*
+ * This should wait for the spinner to be killed, otherwise we should go
+ * down in flames when doing i915_request_add().
+ */
+ pr_info("%s emite_pte ring space=%u\n", __func__, rq->ring->space);
+ it = sg_sgt(obj->mm.pages->sgl);
+ len = emit_pte(rq, &it, obj->cache_level, false, 0, CHUNK_SZ);
+ if (!len) {
+ err = -EINVAL;
+ goto out_rq;
+ }
+ if (len < 0) {
+ err = len;
+ goto out_rq;
+ }
+
+out_rq:
+ i915_request_add(rq); /* GEM_BUG_ON(rq->reserved_space > ring->space)? */
+ del_timer_sync(&st.timer);
+ destroy_timer_on_stack(&st.timer);
+out_unpin:
+ intel_context_unpin(ce);
+out_put:
+ intel_context_put(ce);
+out_obj:
+ i915_gem_object_put(obj);
+out_spinner:
+ igt_spinner_fini(&st.spin);
+ return err;
+}
+
struct threaded_migrate {
struct intel_migrate *migrate;
struct task_struct *tsk;
@@ -593,7 +739,10 @@ static int __thread_migrate_copy(void *arg)
static int thread_migrate_copy(void *arg)
{
- return threaded_migrate(arg, __thread_migrate_copy, 0);
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
+
+ return threaded_migrate(migrate, __thread_migrate_copy, 0);
}
static int __thread_global_copy(void *arg)
@@ -605,7 +754,10 @@ static int __thread_global_copy(void *arg)
static int thread_global_copy(void *arg)
{
- return threaded_migrate(arg, __thread_global_copy, 0);
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
+
+ return threaded_migrate(migrate, __thread_global_copy, 0);
}
static int __thread_migrate_clear(void *arg)
@@ -624,12 +776,18 @@ static int __thread_global_clear(void *arg)
static int thread_migrate_clear(void *arg)
{
- return threaded_migrate(arg, __thread_migrate_clear, 0);
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
+
+ return threaded_migrate(migrate, __thread_migrate_clear, 0);
}
static int thread_global_clear(void *arg)
{
- return threaded_migrate(arg, __thread_global_clear, 0);
+ struct intel_gt *gt = arg;
+ struct intel_migrate *migrate = &gt->migrate;
+
+ return threaded_migrate(migrate, __thread_global_clear, 0);
}
int intel_migrate_live_selftests(struct drm_i915_private *i915)
@@ -637,6 +795,7 @@ int intel_migrate_live_selftests(struct drm_i915_private *i915)
static const struct i915_subtest tests[] = {
SUBTEST(live_migrate_copy),
SUBTEST(live_migrate_clear),
+ SUBTEST(live_emit_pte_full_ring),
SUBTEST(thread_migrate_copy),
SUBTEST(thread_migrate_clear),
SUBTEST(thread_global_copy),
@@ -647,7 +806,7 @@ int intel_migrate_live_selftests(struct drm_i915_private *i915)
if (!gt->migrate.context)
return 0;
- return i915_subtests(tests, &gt->migrate);
+ return intel_gt_live_subtests(tests, gt);
}
static struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index f27cc28608d4..ca009a6a13bd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -228,9 +228,7 @@ static int check_mocs_engine(struct live_mocs *arg,
if (IS_ERR(rq))
return PTR_ERR(rq);
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
/* Read the mocs tables back using SRM */
offset = i915_ggtt_offset(vma);
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 37c38bdd5f47..a9e0a91bc0e0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -20,7 +20,7 @@ __igt_reset_stolen(struct intel_gt *gt,
const char *msg)
{
struct i915_ggtt *ggtt = gt->ggtt;
- const struct resource *dsm = &gt->i915->dsm;
+ const struct resource *dsm = &gt->i915->dsm.stolen;
resource_size_t num_pages, page;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
index 70f9ac1ec2c7..87ceb0f374b6 100644
--- a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -50,7 +50,7 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
} else {
*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
}
- *cs++ = vma->node.start + 4000;
+ *cs++ = i915_vma_offset(vma) + 4000;
*cs++ = STACK_MAGIC;
*cs++ = MI_BATCH_BUFFER_END;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 39f1b7564170..6755bbc4ebda 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -122,14 +122,14 @@ create_spin_counter(struct intel_engine_cs *engine,
if (srm) {
*cs++ = MI_STORE_REGISTER_MEM_GEN8;
*cs++ = i915_mmio_reg_offset(CS_GPR(COUNT));
- *cs++ = lower_32_bits(vma->node.start + end * sizeof(*cs));
- *cs++ = upper_32_bits(vma->node.start + end * sizeof(*cs));
+ *cs++ = lower_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
+ *cs++ = upper_32_bits(i915_vma_offset(vma) + end * sizeof(*cs));
}
}
*cs++ = MI_BATCH_BUFFER_START_GEN8;
- *cs++ = lower_32_bits(vma->node.start + loop * sizeof(*cs));
- *cs++ = upper_32_bits(vma->node.start + loop * sizeof(*cs));
+ *cs++ = lower_32_bits(i915_vma_offset(vma) + loop * sizeof(*cs));
+ *cs++ = upper_32_bits(i915_vma_offset(vma) + loop * sizeof(*cs));
GEM_BUG_ON(cs - base > end);
i915_gem_object_flush_map(obj);
@@ -655,7 +655,7 @@ int live_rps_frequency_cs(void *arg)
err = i915_vma_move_to_active(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
i915_request_add(rq);
if (err)
@@ -794,7 +794,7 @@ int live_rps_frequency_srm(void *arg)
err = i915_vma_move_to_active(vma, rq, 0);
if (!err)
err = rq->engine->emit_bb_start(rq,
- vma->node.start,
+ i915_vma_offset(vma),
PAGE_SIZE, 0);
i915_request_add(rq);
if (err)
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 96e3861706d6..14a8b25b6204 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -138,9 +138,7 @@ read_nonprivs(struct intel_context *ce)
goto err_pin;
}
- i915_vma_lock(vma);
- err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
+ err = igt_vma_move_to_active_unlocked(vma, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_req;
@@ -521,7 +519,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
for (i = 0; i < engine->whitelist.count; i++) {
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
struct i915_gem_ww_ctx ww;
- u64 addr = scratch->node.start;
+ u64 addr = i915_vma_offset(scratch);
struct i915_request *rq;
u32 srm, lrm, rsvd;
u32 expect;
@@ -640,7 +638,7 @@ retry:
goto err_request;
err = engine->emit_bb_start(rq,
- batch->node.start, PAGE_SIZE,
+ i915_vma_offset(batch), PAGE_SIZE,
0);
if (err)
goto err_request;
@@ -853,9 +851,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
if (IS_ERR(rq))
return PTR_ERR(rq);
- i915_vma_lock(results);
- err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(results);
+ err = igt_vma_move_to_active_unlocked(results, rq, EXEC_OBJECT_WRITE);
if (err)
goto err_req;
@@ -870,7 +866,7 @@ static int read_whitelisted_registers(struct intel_context *ce,
}
for (i = 0; i < engine->whitelist.count; i++) {
- u64 offset = results->node.start + sizeof(u32) * i;
+ u64 offset = i915_vma_offset(results) + sizeof(u32) * i;
u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
/* Clear non priv flags */
@@ -935,14 +931,12 @@ static int scrub_whitelisted_registers(struct intel_context *ce)
goto err_request;
}
- i915_vma_lock(batch);
- err = i915_vma_move_to_active(batch, rq, 0);
- i915_vma_unlock(batch);
+ err = igt_vma_move_to_active_unlocked(batch, rq, 0);
if (err)
goto err_request;
/* Perform the writes from an unprivileged "user" batch */
- err = engine->emit_bb_start(rq, batch->node.start, 0, 0);
+ err = engine->emit_bb_start(rq, i915_vma_offset(batch), 0, 0);
err_request:
err = request_add_sync(rq, err);
diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c
index 402f085f3a02..449c9ed44382 100644
--- a/drivers/gpu/drm/i915/gt/shmem_utils.c
+++ b/drivers/gpu/drm/i915/gt/shmem_utils.c
@@ -8,6 +8,7 @@
#include <linux/pagemap.h>
#include <linux/shmem_fs.h>
+#include "i915_drv.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_lmem.h"
#include "shmem_utils.h"
@@ -32,6 +33,8 @@ struct file *shmem_create_from_data(const char *name, void *data, size_t len)
struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ enum i915_map_type map_type;
struct file *file;
void *ptr;
@@ -41,8 +44,8 @@ struct file *shmem_create_from_object(struct drm_i915_gem_object *obj)
return file;
}
- ptr = i915_gem_object_pin_map_unlocked(obj, i915_gem_object_is_lmem(obj) ?
- I915_MAP_WC : I915_MAP_WB);
+ map_type = i915_coherent_map_type(i915, obj, true);
+ ptr = i915_gem_object_pin_map_unlocked(obj, map_type);
if (IS_ERR(ptr))
return ERR_CAST(ptr);
diff --git a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
index 3624abfd22d1..9d589c28f40f 100644
--- a/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/guc_capture_fwif.h
@@ -73,7 +73,7 @@ struct guc_debug_capture_list_header {
struct guc_debug_capture_list {
struct guc_debug_capture_list_header header;
- struct guc_mmio_reg regs[0];
+ struct guc_mmio_reg regs[];
} __packed;
/**
@@ -125,7 +125,7 @@ struct guc_state_capture_header_t {
struct guc_state_capture_t {
struct guc_state_capture_header_t header;
- struct guc_mmio_reg mmio_entries[0];
+ struct guc_mmio_reg mmio_entries[];
} __packed;
enum guc_capture_group_types {
@@ -145,7 +145,7 @@ struct guc_state_capture_group_header_t {
/* this is the top level structure where an error-capture dump starts */
struct guc_state_capture_group_t {
struct guc_state_capture_group_header_t grp_header;
- struct guc_state_capture_t capture_entries[0];
+ struct guc_state_capture_t capture_entries[];
} __packed;
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
new file mode 100644
index 000000000000..e73d4440c5e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_ring.h"
+#include "intel_gsc_fw.h"
+
+#define GSC_FW_STATUS_REG _MMIO(0x116C40)
+#define GSC_FW_CURRENT_STATE REG_GENMASK(3, 0)
+#define GSC_FW_CURRENT_STATE_RESET 0
+#define GSC_FW_INIT_COMPLETE_BIT REG_BIT(9)
+
+static bool gsc_is_in_reset(struct intel_uncore *uncore)
+{
+ u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
+
+ return REG_FIELD_GET(GSC_FW_CURRENT_STATE, fw_status) ==
+ GSC_FW_CURRENT_STATE_RESET;
+}
+
+bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc)
+{
+ struct intel_uncore *uncore = gsc_uc_to_gt(gsc)->uncore;
+ u32 fw_status = intel_uncore_read(uncore, GSC_FW_STATUS_REG);
+
+ return fw_status & GSC_FW_INIT_COMPLETE_BIT;
+}
+
+static int emit_gsc_fw_load(struct i915_request *rq, struct intel_gsc_uc *gsc)
+{
+ u32 offset = i915_ggtt_offset(gsc->local);
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GSC_FW_LOAD;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = (gsc->local->size / SZ_4K) | HECI1_FW_LIMIT_VALID;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gsc_fw_load(struct intel_gsc_uc *gsc)
+{
+ struct intel_context *ce = gsc->ce;
+ struct i915_request *rq;
+ int err;
+
+ if (!ce)
+ return -ENODEV;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ err = emit_gsc_fw_load(rq, gsc);
+ if (err)
+ goto out_rq;
+
+ err = ce->engine->emit_flush(rq, 0);
+
+out_rq:
+ i915_request_get(rq);
+
+ if (unlikely(err))
+ i915_request_set_error_once(rq, err);
+
+ i915_request_add(rq);
+
+ if (!err && i915_request_wait(rq, 0, msecs_to_jiffies(500)) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ if (err)
+ drm_err(&gsc_uc_to_gt(gsc)->i915->drm,
+ "Request submission for GSC load failed (%d)\n",
+ err);
+
+ return err;
+}
+
+static int gsc_fw_load_prepare(struct intel_gsc_uc *gsc)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct drm_i915_private *i915 = gt->i915;
+ struct drm_i915_gem_object *obj;
+ void *src, *dst;
+
+ if (!gsc->local)
+ return -ENODEV;
+
+ obj = gsc->local->obj;
+
+ if (obj->base.size < gsc->fw.size)
+ return -ENOSPC;
+
+ dst = i915_gem_object_pin_map_unlocked(obj,
+ i915_coherent_map_type(i915, obj, true));
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ src = i915_gem_object_pin_map_unlocked(gsc->fw.obj,
+ i915_coherent_map_type(i915, gsc->fw.obj, true));
+ if (IS_ERR(src)) {
+ i915_gem_object_unpin_map(obj);
+ return PTR_ERR(src);
+ }
+
+ memset(dst, 0, obj->base.size);
+ memcpy(dst, src, gsc->fw.size);
+
+ i915_gem_object_unpin_map(gsc->fw.obj);
+ i915_gem_object_unpin_map(obj);
+
+ return 0;
+}
+
+static int gsc_fw_wait(struct intel_gt *gt)
+{
+ return intel_wait_for_register(gt->uncore,
+ GSC_FW_STATUS_REG,
+ GSC_FW_INIT_COMPLETE_BIT,
+ GSC_FW_INIT_COMPLETE_BIT,
+ 500);
+}
+
+int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc)
+{
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct intel_uc_fw *gsc_fw = &gsc->fw;
+ int err;
+
+ /* check current fw status */
+ if (intel_gsc_uc_fw_init_done(gsc)) {
+ if (GEM_WARN_ON(!intel_uc_fw_is_loaded(gsc_fw)))
+ intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
+ return -EEXIST;
+ }
+
+ if (!intel_uc_fw_is_loadable(gsc_fw))
+ return -ENOEXEC;
+
+ /* FW blob is ok, so clean the status */
+ intel_uc_fw_sanitize(&gsc->fw);
+
+ if (!gsc_is_in_reset(gt->uncore))
+ return -EIO;
+
+ err = gsc_fw_load_prepare(gsc);
+ if (err)
+ goto fail;
+
+ /*
+ * GSC is only killed by an FLR, so we need to trigger one on unload to
+ * make sure we stop it. This is because we assign a chunk of memory to
+ * the GSC as part of the FW load , so we need to make sure it stops
+ * using it when we release it to the system on driver unload. Note that
+ * this is not a problem of the unload per-se, because the GSC will not
+ * touch that memory unless there are requests for it coming from the
+ * driver; therefore, no accesses will happen while i915 is not loaded,
+ * but if we re-load the driver then the GSC might wake up and try to
+ * access that old memory location again.
+ * Given that an FLR is a very disruptive action (see the FLR function
+ * for details), we want to do it as the last action before releasing
+ * the access to the MMIO bar, which means we need to do it as part of
+ * the primary uncore cleanup.
+ * An alternative approach to the FLR would be to use a memory location
+ * that survives driver unload, like e.g. stolen memory, and keep the
+ * GSC loaded across reloads. However, this requires us to make sure we
+ * preserve that memory location on unload and then determine and
+ * reserve its offset on each subsequent load, which is not trivial, so
+ * it is easier to just kill everything and start fresh.
+ */
+ intel_uncore_set_flr_on_fini(&gt->i915->uncore);
+
+ err = gsc_fw_load(gsc);
+ if (err)
+ goto fail;
+
+ err = gsc_fw_wait(gt);
+ if (err)
+ goto fail;
+
+ /* FW is not fully operational until we enable SW proxy */
+ intel_uc_fw_change_status(gsc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
+
+ drm_info(&gt->i915->drm, "Loaded GSC firmware %s\n",
+ gsc_fw->file_selected.path);
+
+ return 0;
+
+fail:
+ return intel_uc_fw_mark_load_failed(gsc_fw, err);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
new file mode 100644
index 000000000000..4b5dbb44afb4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_fw.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _INTEL_GSC_FW_H_
+#define _INTEL_GSC_FW_H_
+
+#include <linux/types.h>
+
+struct intel_gsc_uc;
+
+int intel_gsc_uc_fw_upload(struct intel_gsc_uc *gsc);
+bool intel_gsc_uc_fw_init_done(struct intel_gsc_uc *gsc);
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
new file mode 100644
index 000000000000..fd21dbd2663b
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include <linux/types.h>
+
+#include "gt/intel_gt.h"
+#include "intel_gsc_uc.h"
+#include "intel_gsc_fw.h"
+#include "i915_drv.h"
+
+static void gsc_work(struct work_struct *work)
+{
+ struct intel_gsc_uc *gsc = container_of(work, typeof(*gsc), work);
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ intel_gsc_uc_fw_upload(gsc);
+}
+
+static bool gsc_engine_supported(struct intel_gt *gt)
+{
+ intel_engine_mask_t mask;
+
+ /*
+ * We reach here from i915_driver_early_probe for the primary GT before
+ * its engine mask is set, so we use the device info engine mask for it.
+ * For other GTs we expect the GT-specific mask to be set before we
+ * call this function.
+ */
+ GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
+
+ if (gt_is_root(gt))
+ mask = RUNTIME_INFO(gt->i915)->platform_engine_mask;
+ else
+ mask = gt->info.engine_mask;
+
+ return __HAS_ENGINE(mask, GSC0);
+}
+
+void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc)
+{
+ intel_uc_fw_init_early(&gsc->fw, INTEL_UC_FW_TYPE_GSC);
+ INIT_WORK(&gsc->work, gsc_work);
+
+ /* we can arrive here from i915_driver_early_probe for primary
+ * GT with it being not fully setup hence check device info's
+ * engine mask
+ */
+ if (!gsc_engine_supported(gsc_uc_to_gt(gsc))) {
+ intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
+ return;
+ }
+}
+
+int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
+{
+ static struct lock_class_key gsc_lock;
+ struct intel_gt *gt = gsc_uc_to_gt(gsc);
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_engine_cs *engine = gt->engine[GSC0];
+ struct intel_context *ce;
+ struct i915_vma *vma;
+ int err;
+
+ err = intel_uc_fw_init(&gsc->fw);
+ if (err)
+ goto out;
+
+ vma = intel_guc_allocate_vma(&gt->uc.guc, SZ_8M);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_fw;
+ }
+
+ gsc->local = vma;
+
+ ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
+ I915_GEM_HWS_GSC_ADDR,
+ &gsc_lock, "gsc_context");
+ if (IS_ERR(ce)) {
+ drm_err(&gt->i915->drm,
+ "failed to create GSC CS ctx for FW communication\n");
+ err = PTR_ERR(ce);
+ goto out_vma;
+ }
+
+ gsc->ce = ce;
+
+ intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
+ return 0;
+
+out_vma:
+ i915_vma_unpin_and_release(&gsc->local, 0);
+out_fw:
+ intel_uc_fw_fini(&gsc->fw);
+out:
+ i915_probe_error(i915, "failed with %d\n", err);
+ return err;
+}
+
+void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
+{
+ if (!intel_uc_fw_is_loadable(&gsc->fw))
+ return;
+
+ flush_work(&gsc->work);
+
+ if (gsc->ce)
+ intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce));
+
+ i915_vma_unpin_and_release(&gsc->local, 0);
+
+ intel_uc_fw_fini(&gsc->fw);
+}
+
+void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc)
+{
+ if (!intel_uc_fw_is_loadable(&gsc->fw))
+ return;
+
+ flush_work(&gsc->work);
+}
+
+void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
+{
+ if (!intel_uc_fw_is_loadable(&gsc->fw))
+ return;
+
+ if (intel_gsc_uc_fw_init_done(gsc))
+ return;
+
+ queue_work(system_unbound_wq, &gsc->work);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
new file mode 100644
index 000000000000..03fd0a8e8db1
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#ifndef _INTEL_GSC_UC_H_
+#define _INTEL_GSC_UC_H_
+
+#include "intel_uc_fw.h"
+
+struct i915_vma;
+struct intel_context;
+
+struct intel_gsc_uc {
+ /* Generic uC firmware management */
+ struct intel_uc_fw fw;
+
+ /* GSC-specific additions */
+ struct i915_vma *local; /* private memory for GSC usage */
+ struct intel_context *ce; /* for submission to GSC FW via GSC engine */
+
+ struct work_struct work; /* for delayed load */
+};
+
+void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc);
+int intel_gsc_uc_init(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_fini(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_suspend(struct intel_gsc_uc *gsc);
+void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc);
+
+static inline bool intel_gsc_uc_is_supported(struct intel_gsc_uc *gsc)
+{
+ return intel_uc_fw_is_supported(&gsc->fw);
+}
+
+static inline bool intel_gsc_uc_is_wanted(struct intel_gsc_uc *gsc)
+{
+ return intel_uc_fw_is_enabled(&gsc->fw);
+}
+
+static inline bool intel_gsc_uc_is_used(struct intel_gsc_uc *gsc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&gsc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&gsc->fw);
+}
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 52aede324788..d76508fa3af7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -11,6 +11,7 @@
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
+#include "intel_guc_print.h"
#include "intel_guc_slpc.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -94,8 +95,8 @@ static void gen9_enable_guc_interrupts(struct intel_guc *guc)
assert_rpm_wakelock_held(&gt->i915->runtime_pm);
spin_lock_irq(gt->irq_lock);
- WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
- gt->pm_guc_events);
+ guc_WARN_ON_ONCE(guc, intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) &
+ gt->pm_guc_events);
gen6_gt_pm_enable_irq(gt, gt->pm_guc_events);
spin_unlock_irq(gt->irq_lock);
@@ -274,8 +275,9 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0))
flags |= GUC_WA_GAM_CREDITS;
- /* Wa_14014475959:dg2 */
- if (IS_DG2(gt->i915))
+ /* Wa_14014475959 */
+ if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
+ IS_DG2(gt->i915))
flags |= GUC_WA_HOLD_CCS_SWITCHOUT;
/*
@@ -289,7 +291,9 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
flags |= GUC_WA_DUAL_QUEUE;
/* Wa_22011802037: graphics version 11/12 */
- if (IS_GRAPHICS_VER(gt->i915, 11, 12))
+ if (IS_MTL_GRAPHICS_STEP(gt->i915, M, STEP_A0, STEP_B0) ||
+ (GRAPHICS_VER(gt->i915) >= 11 &&
+ GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70)))
flags |= GUC_WA_PRE_PARSER;
/* Wa_16011777198:dg2 */
@@ -339,7 +343,7 @@ static void guc_init_params(struct intel_guc *guc)
params[GUC_CTL_DEVID] = guc_ctl_devid(guc);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
- DRM_DEBUG_DRIVER("param[%2d] = %#x\n", i, params[i]);
+ guc_dbg(guc, "param[%2d] = %#x\n", i, params[i]);
}
/*
@@ -386,7 +390,6 @@ void intel_guc_dump_time_info(struct intel_guc *guc, struct drm_printer *p)
int intel_guc_init(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
int ret;
ret = intel_uc_fw_init(&guc->fw);
@@ -430,9 +433,6 @@ int intel_guc_init(struct intel_guc *guc)
/* now that everything is perma-pinned, initialize the parameters */
guc_init_params(guc);
- /* We need to notify the guc whenever we change the GGTT */
- i915_ggtt_enable_guc(gt->ggtt);
-
intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
return 0;
@@ -451,19 +451,15 @@ err_fw:
intel_uc_fw_fini(&guc->fw);
out:
intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_INIT_FAIL);
- i915_probe_error(gt->i915, "failed with %d\n", ret);
+ guc_probe_error(guc, "failed with %pe\n", ERR_PTR(ret));
return ret;
}
void intel_guc_fini(struct intel_guc *guc)
{
- struct intel_gt *gt = guc_to_gt(guc);
-
if (!intel_uc_fw_is_loadable(&guc->fw))
return;
- i915_ggtt_disable_guc(gt->ggtt);
-
if (intel_guc_slpc_is_used(guc))
intel_guc_slpc_fini(&guc->slpc);
@@ -484,7 +480,6 @@ void intel_guc_fini(struct intel_guc *guc)
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
u32 *response_buf, u32 response_buf_size)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
u32 header;
int i;
@@ -519,7 +514,7 @@ retry:
10, 10, &header);
if (unlikely(ret)) {
timeout:
- drm_err(&i915->drm, "mmio request %#x: no reply %x\n",
+ guc_err(guc, "mmio request %#x: no reply %x\n",
request[0], header);
goto out;
}
@@ -541,7 +536,7 @@ timeout:
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
- drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n",
+ guc_dbg(guc, "mmio request %#x: retrying, reason %u\n",
request[0], reason);
goto retry;
}
@@ -550,7 +545,7 @@ timeout:
u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
- drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n",
+ guc_err(guc, "mmio request %#x: failure %x/%u\n",
request[0], error, hint);
ret = -ENXIO;
goto out;
@@ -558,7 +553,7 @@ timeout:
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
proto:
- drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n",
+ guc_err(guc, "mmio request %#x: unexpected reply %#x\n",
request[0], header);
ret = -EPROTO;
goto out;
@@ -601,9 +596,9 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
msg = payload[0] & guc->msg_enabled_mask;
if (msg & INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED)
- drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC crash dump notification!\n");
+ guc_err(guc, "Received early crash dump notification!\n");
if (msg & INTEL_GUC_RECV_MSG_EXCEPTION)
- drm_err(&guc_to_gt(guc)->i915->drm, "Received early GuC exception notification!\n");
+ guc_err(guc, "Received early exception notification!\n");
return 0;
}
@@ -657,7 +652,8 @@ int intel_guc_suspend(struct intel_guc *guc)
*/
ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
if (ret)
- DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret);
+ guc_err(guc, "suspend: RESET_CLIENT action failed with %pe\n",
+ ERR_PTR(ret));
}
/* Signal that the GuC isn't running. */
@@ -832,12 +828,11 @@ static int __guc_action_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 va
static int __guc_self_cfg(struct intel_guc *guc, u16 key, u16 len, u64 value)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
int err = __guc_action_self_cfg(guc, key, len, value);
if (unlikely(err))
- i915_probe_error(i915, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
- ERR_PTR(err), key, value);
+ guc_probe_error(guc, "Unsuccessful self-config (%pe) key %#hx value %#llx\n",
+ ERR_PTR(err), key, value);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 1bb3f9829286..bb4dfe707a7d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -158,6 +158,9 @@ struct intel_guc {
bool submission_selected;
/** @submission_initialized: tracks whether GuC submission has been initialised */
bool submission_initialized;
+ /** @submission_version: Submission API version of the currently loaded firmware */
+ struct intel_uc_fw_ver submission_version;
+
/**
* @rc_supported: tracks whether we support GuC rc on the current platform
*/
@@ -268,6 +271,14 @@ struct intel_guc {
#endif
};
+/*
+ * GuC version number components are only 8-bit, so converting to a 32bit 8.8.8
+ * integer works.
+ */
+#define MAKE_GUC_VER(maj, min, pat) (((maj) << 16) | ((min) << 8) | (pat))
+#define MAKE_GUC_VER_STRUCT(ver) MAKE_GUC_VER((ver).major, (ver).minor, (ver).patch)
+#define GUC_SUBMIT_VER(guc) MAKE_GUC_VER_STRUCT((guc)->submission_version)
+
static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
{
return container_of(log, struct intel_guc, log);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index a7f737c4792e..69ce06faf8cd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -15,6 +15,7 @@
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
#include "intel_guc_fwif.h"
+#include "intel_guc_print.h"
#include "intel_uc.h"
#include "i915_drv.h"
@@ -427,7 +428,7 @@ static long guc_mmio_reg_state_create(struct intel_guc *guc)
guc->ads_regset = temp_set.storage;
- drm_dbg(&guc_to_gt(guc)->i915->drm, "Used %zu KB for temporary ADS regset\n",
+ guc_dbg(guc, "Used %zu KB for temporary ADS regset\n",
(temp_set.storage_max * sizeof(struct guc_mmio_reg)) >> 10);
return total * sizeof(struct guc_mmio_reg);
@@ -621,7 +622,7 @@ static void guc_init_golden_context(struct intel_guc *guc)
engine = find_engine_state(gt, engine_class);
if (!engine) {
- drm_err(&gt->i915->drm, "No engine state recorded for class %d!\n",
+ guc_err(guc, "No engine state recorded for class %d!\n",
engine_class);
ads_blob_write(guc, ads.eng_state_size[guc_class], 0);
ads_blob_write(guc, ads.golden_context_lrca[guc_class], 0);
@@ -646,7 +647,6 @@ static int
guc_capture_prep_lists(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
u32 ads_ggtt, capture_offset, null_ggtt, total_size = 0;
struct guc_gt_system_info local_info;
struct iosys_map info_map;
@@ -751,7 +751,7 @@ engine_instance_list:
}
if (guc->ads_capture_size && guc->ads_capture_size != PAGE_ALIGN(total_size))
- drm_warn(&i915->drm, "GuC->ADS->Capture alloc size changed from %d to %d\n",
+ guc_warn(guc, "ADS capture alloc size changed from %d to %d\n",
guc->ads_capture_size, PAGE_ALIGN(total_size));
return PAGE_ALIGN(total_size);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index 1c1b85073b4b..fc3b994626a4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -1506,7 +1506,7 @@ int intel_guc_capture_print_engine_node(struct drm_i915_error_state_buf *ebuf,
if (!ebuf || !ee)
return -EINVAL;
- cap = ee->capture;
+ cap = ee->guc_capture;
if (!cap || !ee->engine)
return -ENODEV;
@@ -1576,8 +1576,8 @@ void intel_guc_capture_free_node(struct intel_engine_coredump *ee)
if (!ee || !ee->guc_capture_node)
return;
- guc_capture_add_node_to_cachelist(ee->capture, ee->guc_capture_node);
- ee->capture = NULL;
+ guc_capture_add_node_to_cachelist(ee->guc_capture, ee->guc_capture_node);
+ ee->guc_capture = NULL;
ee->guc_capture_node = NULL;
}
@@ -1611,7 +1611,7 @@ void intel_guc_capture_get_matching_node(struct intel_gt *gt,
(ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
list_del(&n->link);
ee->guc_capture_node = n;
- ee->capture = guc->capture;
+ ee->guc_capture = guc->capture;
return;
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 2b22065e87bf..1803a633ed64 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -11,38 +11,23 @@
#include "i915_drv.h"
#include "intel_guc_ct.h"
-#include "gt/intel_gt.h"
+#include "intel_guc_print.h"
static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
{
return container_of(ct, struct intel_guc, ct);
}
-static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
-{
- return guc_to_gt(ct_to_guc(ct));
-}
-
-static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
-{
- return ct_to_gt(ct)->i915;
-}
-
-static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
-{
- return &ct_to_i915(ct)->drm;
-}
-
#define CT_ERROR(_ct, _fmt, ...) \
- drm_err(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
+ guc_err(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__)
#ifdef CONFIG_DRM_I915_DEBUG_GUC
#define CT_DEBUG(_ct, _fmt, ...) \
- drm_dbg(ct_to_drm(_ct), "CT: " _fmt, ##__VA_ARGS__)
+ guc_dbg(ct_to_guc(_ct), "CT: " _fmt, ##__VA_ARGS__)
#else
#define CT_DEBUG(...) do { } while (0)
#endif
#define CT_PROBE_ERROR(_ct, _fmt, ...) \
- i915_probe_error(ct_to_i915(ct), "CT: " _fmt, ##__VA_ARGS__)
+ guc_probe_error(ct_to_guc(ct), "CT: " _fmt, ##__VA_ARGS__)
/**
* DOC: CTB Blob
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 5b86b2e286e0..69133420c78b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -13,6 +13,7 @@
#include "gt/intel_gt_mcr.h"
#include "gt/intel_gt_regs.h"
#include "intel_guc_fw.h"
+#include "intel_guc_print.h"
#include "i915_drv.h"
static void guc_prepare_xfer(struct intel_gt *gt)
@@ -38,9 +39,8 @@ static void guc_prepare_xfer(struct intel_gt *gt)
if (GRAPHICS_VER(uncore->i915) == 9) {
/* DOP Clock Gating Enable for GuC clocks */
- intel_gt_mcr_multicast_write(gt, GEN8_MISCCPCTL,
- GEN8_DOP_CLOCK_GATE_GUC_ENABLE |
- intel_gt_mcr_read_any(gt, GEN8_MISCCPCTL));
+ intel_uncore_rmw(uncore, GEN7_MISCCPCTL, 0,
+ GEN8_DOP_CLOCK_GATE_GUC_ENABLE);
/* allows for 5us (in 10ns units) before GT can go to RC6 */
intel_uncore_write(uncore, GUC_ARAT_C6DIS, 0x1FF);
@@ -103,8 +103,10 @@ static inline bool guc_ready(struct intel_uncore *uncore, u32 *status)
return uk_val == INTEL_GUC_LOAD_STATUS_READY;
}
-static int guc_wait_ucode(struct intel_uncore *uncore)
+static int guc_wait_ucode(struct intel_guc *guc)
{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_uncore *uncore = gt->uncore;
u32 status;
int ret;
@@ -127,10 +129,8 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
*/
ret = wait_for(guc_ready(uncore, &status), 200);
if (ret) {
- struct drm_device *drm = &uncore->i915->drm;
-
- drm_info(drm, "GuC load failed: status = 0x%08X\n", status);
- drm_info(drm, "GuC load failed: status: Reset = %d, "
+ guc_info(guc, "load failed: status = 0x%08X\n", status);
+ guc_info(guc, "load failed: status: Reset = %d, "
"BootROM = 0x%02X, UKernel = 0x%02X, "
"MIA = 0x%02X, Auth = 0x%02X\n",
REG_FIELD_GET(GS_MIA_IN_RESET, status),
@@ -140,12 +140,12 @@ static int guc_wait_ucode(struct intel_uncore *uncore)
REG_FIELD_GET(GS_AUTH_STATUS_MASK, status));
if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) {
- drm_info(drm, "GuC firmware signature verification failed\n");
+ guc_info(guc, "firmware signature verification failed\n");
ret = -ENOEXEC;
}
if (REG_FIELD_GET(GS_UKERNEL_MASK, status) == INTEL_GUC_LOAD_STATUS_EXCEPTION) {
- drm_info(drm, "GuC firmware exception. EIP: %#x\n",
+ guc_info(guc, "firmware exception. EIP: %#x\n",
intel_uncore_read(uncore, SOFT_SCRATCH(13)));
ret = -ENXIO;
}
@@ -194,7 +194,7 @@ int intel_guc_fw_upload(struct intel_guc *guc)
if (ret)
goto out;
- ret = guc_wait_ucode(uncore);
+ ret = guc_wait_ucode(guc);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 68331c538b0a..c3792ddeec80 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -12,6 +12,7 @@
#include "i915_memcpy.h"
#include "intel_guc_capture.h"
#include "intel_guc_log.h"
+#include "intel_guc_print.h"
#if defined(CONFIG_DRM_I915_DEBUG_GUC)
#define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M
@@ -39,7 +40,6 @@ struct guc_log_section {
static void _guc_log_init_sizes(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
static const struct guc_log_section sections[GUC_LOG_SECTIONS_LIMIT] = {
{
GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT,
@@ -82,12 +82,12 @@ static void _guc_log_init_sizes(struct intel_guc_log *log)
}
if (!IS_ALIGNED(log->sizes[i].bytes, log->sizes[i].units))
- drm_err(&i915->drm, "Mis-aligned GuC log %s size: 0x%X vs 0x%X!",
+ guc_err(guc, "Mis-aligned log %s size: 0x%X vs 0x%X!\n",
sections[i].name, log->sizes[i].bytes, log->sizes[i].units);
log->sizes[i].count = log->sizes[i].bytes / log->sizes[i].units;
if (!log->sizes[i].count) {
- drm_err(&i915->drm, "Zero GuC log %s size!", sections[i].name);
+ guc_err(guc, "Zero log %s size!\n", sections[i].name);
} else {
/* Size is +1 unit */
log->sizes[i].count--;
@@ -95,14 +95,14 @@ static void _guc_log_init_sizes(struct intel_guc_log *log)
/* Clip to field size */
if (log->sizes[i].count > sections[i].max) {
- drm_err(&i915->drm, "GuC log %s size too large: %d vs %d!",
+ guc_err(guc, "log %s size too large: %d vs %d!\n",
sections[i].name, log->sizes[i].count + 1, sections[i].max + 1);
log->sizes[i].count = sections[i].max;
}
}
if (log->sizes[GUC_LOG_SECTIONS_CRASH].units != log->sizes[GUC_LOG_SECTIONS_DEBUG].units) {
- drm_err(&i915->drm, "Unit mis-match for GuC log crash and debug sections: %d vs %d!",
+ guc_err(guc, "Unit mismatch for crash and debug sections: %d vs %d!\n",
log->sizes[GUC_LOG_SECTIONS_CRASH].units,
log->sizes[GUC_LOG_SECTIONS_DEBUG].units);
log->sizes[GUC_LOG_SECTIONS_CRASH].units = log->sizes[GUC_LOG_SECTIONS_DEBUG].units;
@@ -374,6 +374,7 @@ size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log,
static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
+ struct intel_guc *guc = log_to_guc(log);
unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt;
struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state;
struct guc_log_buffer_state log_buf_state_local;
@@ -383,7 +384,7 @@ static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
mutex_lock(&log->relay.lock);
- if (WARN_ON(!intel_guc_log_relay_created(log)))
+ if (guc_WARN_ON(guc, !intel_guc_log_relay_created(log)))
goto out_unlock;
/* Get the pointer to shared GuC log buffer */
@@ -398,7 +399,7 @@ static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
* Used rate limited to avoid deluge of messages, logs might be
* getting consumed by User at a slow rate.
*/
- DRM_ERROR_RATELIMITED("no sub-buffer to copy general logs\n");
+ guc_err_ratelimited(guc, "no sub-buffer to copy general logs\n");
log->relay.full_count++;
goto out_unlock;
@@ -451,7 +452,7 @@ static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
write_offset = buffer_size;
} else if (unlikely((read_offset > buffer_size) ||
(write_offset > buffer_size))) {
- DRM_ERROR("invalid log buffer state\n");
+ guc_err(guc, "invalid log buffer state\n");
/* copy whole buffer as offsets are unreliable */
read_offset = 0;
write_offset = buffer_size;
@@ -547,7 +548,7 @@ static int guc_log_relay_create(struct intel_guc_log *log)
subbuf_size, n_subbufs,
&relay_callbacks, dev_priv);
if (!guc_log_relay_chan) {
- DRM_ERROR("Couldn't create relay chan for GuC logging\n");
+ guc_err(guc, "Couldn't create relay channel for logging\n");
ret = -ENOMEM;
return ret;
@@ -596,9 +597,8 @@ static u32 __get_default_log_level(struct intel_guc_log *log)
}
if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) {
- DRM_WARN("Incompatible option detected: %s=%d, %s!\n",
- "guc_log_level", i915->params.guc_log_level,
- "verbosity too high");
+ guc_warn(guc, "Log verbosity param out of range: %d > %d!\n",
+ i915->params.guc_log_level, GUC_LOG_LEVEL_MAX);
return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) ||
IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ?
GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED;
@@ -641,15 +641,15 @@ int intel_guc_log_create(struct intel_guc_log *log)
log->buf_addr = vaddr;
log->level = __get_default_log_level(log);
- DRM_DEBUG_DRIVER("guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
- log->level, str_enabled_disabled(log->level),
- str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
- GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
+ guc_dbg(guc, "guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n",
+ log->level, str_enabled_disabled(log->level),
+ str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)),
+ GUC_LOG_LEVEL_TO_VERBOSITY(log->level));
return 0;
err:
- DRM_ERROR("Failed to allocate or map GuC log buffer. %d\n", ret);
+ guc_err(guc, "Failed to allocate or map log buffer %pe\n", ERR_PTR(ret));
return ret;
}
@@ -687,7 +687,7 @@ int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
GUC_LOG_LEVEL_IS_ENABLED(level),
GUC_LOG_LEVEL_TO_VERBOSITY(level));
if (ret) {
- DRM_DEBUG_DRIVER("guc_log_control action failed %d\n", ret);
+ guc_dbg(guc, "guc_log_control action failed %pe\n", ERR_PTR(ret));
goto out_unlock;
}
@@ -905,7 +905,7 @@ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p,
map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
if (IS_ERR(map)) {
- DRM_DEBUG("Failed to pin object\n");
+ guc_dbg(guc, "Failed to pin log object: %pe\n", map);
drm_puts(p, "(log data unaccessible)\n");
free_page((unsigned long)page);
return PTR_ERR(map);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
new file mode 100644
index 000000000000..e75989d4ba06
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_print.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_GUC_PRINT__
+#define __INTEL_GUC_PRINT__
+
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
+
+#define guc_printk(_guc, _level, _fmt, ...) \
+ gt_##_level(guc_to_gt(_guc), "GUC: " _fmt, ##__VA_ARGS__)
+
+#define guc_err(_guc, _fmt, ...) \
+ guc_printk((_guc), err, _fmt, ##__VA_ARGS__)
+
+#define guc_warn(_guc, _fmt, ...) \
+ guc_printk((_guc), warn, _fmt, ##__VA_ARGS__)
+
+#define guc_notice(_guc, _fmt, ...) \
+ guc_printk((_guc), notice, _fmt, ##__VA_ARGS__)
+
+#define guc_info(_guc, _fmt, ...) \
+ guc_printk((_guc), info, _fmt, ##__VA_ARGS__)
+
+#define guc_dbg(_guc, _fmt, ...) \
+ guc_printk((_guc), dbg, _fmt, ##__VA_ARGS__)
+
+#define guc_err_ratelimited(_guc, _fmt, ...) \
+ guc_printk((_guc), err_ratelimited, _fmt, ##__VA_ARGS__)
+
+#define guc_probe_error(_guc, _fmt, ...) \
+ guc_printk((_guc), probe_error, _fmt, ##__VA_ARGS__)
+
+#define guc_WARN(_guc, _cond, _fmt, ...) \
+ gt_WARN(guc_to_gt(_guc), _cond, "GUC: " _fmt, ##__VA_ARGS__)
+
+#define guc_WARN_ONCE(_guc, _cond, _fmt, ...) \
+ gt_WARN_ONCE(guc_to_gt(_guc), _cond, "GUC: " _fmt, ##__VA_ARGS__)
+
+#define guc_WARN_ON(_guc, _cond) \
+ gt_WARN(guc_to_gt(_guc), _cond, "%s(%s)", "guc_WARN_ON", __stringify(_cond))
+
+#define guc_WARN_ON_ONCE(_guc, _cond) \
+ gt_WARN_ONCE(guc_to_gt(_guc), _cond, "%s(%s)", "guc_WARN_ON_ONCE", __stringify(_cond))
+
+#endif /* __INTEL_GUC_PRINT__ */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 0a42f1807f52..53f3ed3244d5 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -27,6 +27,7 @@
#include "intel_guc_ads.h"
#include "intel_guc_capture.h"
+#include "intel_guc_print.h"
#include "intel_guc_submission.h"
#include "i915_drv.h"
@@ -1443,8 +1444,7 @@ static void guc_init_engine_stats(struct intel_guc *guc)
int ret = guc_action_enable_usage_stats(guc);
if (ret)
- drm_err(&gt->i915->drm,
- "Failed to enable usage stats: %d!\n", ret);
+ guc_err(guc, "Failed to enable usage stats: %pe\n", ERR_PTR(ret));
}
}
@@ -1621,7 +1621,7 @@ static void guc_engine_reset_prepare(struct intel_engine_cs *engine)
intel_engine_stop_cs(engine);
/*
- * Wa_22011802037:gen11/gen12: In addition to stopping the cs, we need
+ * Wa_22011802037: In addition to stopping the cs, we need
* to wait for any pending mi force wakeups
*/
intel_engine_wait_for_pending_mi_fw(engine);
@@ -1702,7 +1702,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
goto next_context;
guilty = false;
- rq = intel_context_find_active_request(ce);
+ rq = intel_context_get_active_request(ce);
if (!rq) {
head = ce->ring->tail;
goto out_replay;
@@ -1715,6 +1715,7 @@ static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t st
head = intel_ring_wrap(ce->ring, rq->head);
__i915_request_reset(rq, guilty);
+ i915_request_put(rq);
out_replay:
guc_reset_state(ce, head, guilty);
next_context:
@@ -1890,7 +1891,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
if (guc->submission_initialized)
return 0;
- if (GET_UC_VER(guc) < MAKE_UC_VER(70, 0, 0)) {
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 0, 0)) {
ret = guc_lrc_desc_pool_create_v69(guc);
if (ret)
return ret;
@@ -2330,7 +2331,7 @@ static int register_context(struct intel_context *ce, bool loop)
GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
- if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0))
ret = register_context_v70(guc, ce, loop);
else
ret = register_context_v69(guc, ce, loop);
@@ -2342,7 +2343,7 @@ static int register_context(struct intel_context *ce, bool loop)
set_context_registered(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0))
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0))
guc_context_policy_init_v70(ce, loop);
}
@@ -2534,6 +2535,7 @@ static void prepare_context_registration_info_v69(struct intel_context *ce)
i915_gem_object_is_lmem(ce->ring->vma->obj));
desc = __get_lrc_desc_v69(guc, ctx_id);
+ GEM_BUG_ON(!desc);
desc->engine_class = engine_class_to_guc_class(engine->class);
desc->engine_submit_mask = engine->logical_mask;
desc->hw_context_desc = ce->lrc.lrca;
@@ -2956,7 +2958,7 @@ static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
u16 guc_id,
u32 preemption_timeout)
{
- if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, guc_id);
@@ -3283,7 +3285,7 @@ static int guc_context_alloc(struct intel_context *ce)
static void __guc_context_set_prio(struct intel_guc *guc,
struct intel_context *ce)
{
- if (GET_UC_VER(guc) >= MAKE_UC_VER(70, 0, 0)) {
+ if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 0, 0)) {
struct context_policy policy;
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
@@ -3584,8 +3586,7 @@ static int guc_request_alloc(struct i915_request *rq)
intel_context_sched_disable_unpin(ce);
else if (intel_context_is_closed(ce))
if (wait_for(context_close_done(ce), 1500))
- drm_warn(&guc_to_gt(guc)->i915->drm,
- "timed out waiting on context sched close before realloc\n");
+ guc_warn(guc, "timed out waiting on context sched close before realloc\n");
/*
* Call pin_guc_id here rather than in the pinning step as with
* dma_resv, contexts can be repeatedly pinned / unpinned trashing the
@@ -4202,8 +4203,10 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
engine->flags |= I915_ENGINE_HAS_TIMESLICES;
/* Wa_14014475959:dg2 */
- if (IS_DG2(engine->i915) && engine->class == COMPUTE_CLASS)
- engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
+ if (engine->class == COMPUTE_CLASS)
+ if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) ||
+ IS_DG2(engine->i915))
+ engine->flags |= I915_ENGINE_USES_WA_HOLD_CCS_SWITCHOUT;
/*
* TODO: GuC supports timeslicing and semaphores as well, but they're
@@ -4346,11 +4349,14 @@ static int __guc_action_set_scheduling_policies(struct intel_guc *guc,
ret = intel_guc_send(guc, (u32 *)&policy->h2g,
__guc_scheduling_policy_action_size(policy));
- if (ret < 0)
+ if (ret < 0) {
+ guc_probe_error(guc, "Failed to configure global scheduling policies: %pe!\n",
+ ERR_PTR(ret));
return ret;
+ }
if (ret != policy->count) {
- drm_warn(&guc_to_gt(guc)->i915->drm, "GuC global scheduler policy processed %d of %d KLVs!",
+ guc_warn(guc, "global scheduler policy processed %d of %d KLVs!",
ret, policy->count);
if (ret > policy->count)
return -EPROTO;
@@ -4364,9 +4370,9 @@ static int guc_init_global_schedule_policy(struct intel_guc *guc)
struct scheduling_policy policy;
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
- int ret = 0;
+ int ret;
- if (GET_UC_VER(guc) < MAKE_UC_VER(70, 3, 0))
+ if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 1, 0))
return 0;
__guc_scheduling_policy_start_klv(&policy);
@@ -4382,10 +4388,6 @@ static int guc_init_global_schedule_policy(struct intel_guc *guc)
yield, ARRAY_SIZE(yield));
ret = __guc_action_set_scheduling_policies(guc, &policy);
- if (ret)
- i915_probe_error(gt->i915,
- "Failed to configure global scheduling policies: %pe!\n",
- ERR_PTR(ret));
}
return ret;
@@ -4484,21 +4486,18 @@ g2h_context_lookup(struct intel_guc *guc, u32 ctx_id)
struct intel_context *ce;
if (unlikely(ctx_id >= GUC_MAX_CONTEXT_ID)) {
- drm_err(&guc_to_gt(guc)->i915->drm,
- "Invalid ctx_id %u\n", ctx_id);
+ guc_err(guc, "Invalid ctx_id %u\n", ctx_id);
return NULL;
}
ce = __get_context(guc, ctx_id);
if (unlikely(!ce)) {
- drm_err(&guc_to_gt(guc)->i915->drm,
- "Context is NULL, ctx_id %u\n", ctx_id);
+ guc_err(guc, "Context is NULL, ctx_id %u\n", ctx_id);
return NULL;
}
if (unlikely(intel_context_is_child(ce))) {
- drm_err(&guc_to_gt(guc)->i915->drm,
- "Context is child, ctx_id %u\n", ctx_id);
+ guc_err(guc, "Context is child, ctx_id %u\n", ctx_id);
return NULL;
}
@@ -4513,7 +4512,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
u32 ctx_id;
if (unlikely(len < 1)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
+ guc_err(guc, "Invalid length %u\n", len);
return -EPROTO;
}
ctx_id = msg[0];
@@ -4565,7 +4564,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
u32 ctx_id;
if (unlikely(len < 2)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u\n", len);
+ guc_err(guc, "Invalid length %u\n", len);
return -EPROTO;
}
ctx_id = msg[0];
@@ -4577,8 +4576,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
if (unlikely(context_destroyed(ce) ||
(!context_pending_enable(ce) &&
!context_pending_disable(ce)))) {
- drm_err(&guc_to_gt(guc)->i915->drm,
- "Bad context sched_state 0x%x, ctx_id %u\n",
+ guc_err(guc, "Bad context sched_state 0x%x, ctx_id %u\n",
ce->guc_state.sched_state, ctx_id);
return -EPROTO;
}
@@ -4662,12 +4660,15 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
+ drm_dbg(&guc_to_gt(guc)->i915->drm, "Got GuC reset of 0x%04X, exiting = %d, banned = %d\n",
+ ce->guc_id.id, test_bit(CONTEXT_EXITING, &ce->flags),
+ test_bit(CONTEXT_BANNED, &ce->flags));
+
if (likely(intel_context_is_schedulable(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
} else {
- drm_info(&guc_to_gt(guc)->i915->drm,
- "Ignoring context reset notification of exiting context 0x%04X on %s",
+ guc_info(guc, "Ignoring context reset notification of exiting context 0x%04X on %s",
ce->guc_id.id, ce->engine->name);
}
}
@@ -4680,7 +4681,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc,
int ctx_id;
if (unlikely(len != 1)) {
- drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ guc_err(guc, "Invalid length %u", len);
return -EPROTO;
}
@@ -4713,13 +4714,13 @@ int intel_guc_error_capture_process_msg(struct intel_guc *guc,
u32 status;
if (unlikely(len != 1)) {
- drm_dbg(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+ guc_dbg(guc, "Invalid length %u", len);
return -EPROTO;
}
status = msg[0] & INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_MASK;
if (status == INTEL_GUC_STATE_CAPTURE_EVENT_STATUS_NOSPACE)
- drm_warn(&guc_to_gt(guc)->i915->drm, "G2H-Error capture no space");
+ guc_warn(guc, "No space for error capture");
intel_guc_capture_process(guc);
@@ -4751,24 +4752,36 @@ static void reset_fail_worker_func(struct work_struct *w)
guc->submission_state.reset_fail_mask = 0;
spin_unlock_irqrestore(&guc->submission_state.lock, flags);
- if (likely(reset_fail_mask))
+ if (likely(reset_fail_mask)) {
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * GuC is toast at this point - it dead loops after sending the failed
+ * reset notification. So need to manually determine the guilty context.
+ * Note that it should be reliable to do this here because the GuC is
+ * toast and will not be scheduling behind the KMD's back.
+ */
+ for_each_engine_masked(engine, gt, reset_fail_mask, id)
+ intel_guc_find_hung_context(engine);
+
intel_gt_handle_error(gt, reset_fail_mask,
I915_ERROR_CAPTURE,
- "GuC failed to reset engine mask=0x%x\n",
+ "GuC failed to reset engine mask=0x%x",
reset_fail_mask);
+ }
}
int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
const u32 *msg, u32 len)
{
struct intel_engine_cs *engine;
- struct intel_gt *gt = guc_to_gt(guc);
u8 guc_class, instance;
u32 reason;
unsigned long flags;
if (unlikely(len != 3)) {
- drm_err(&gt->i915->drm, "Invalid length %u", len);
+ guc_err(guc, "Invalid length %u", len);
return -EPROTO;
}
@@ -4778,8 +4791,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
engine = intel_guc_lookup_engine(guc, guc_class, instance);
if (unlikely(!engine)) {
- drm_err(&gt->i915->drm,
- "Invalid engine %d:%d", guc_class, instance);
+ guc_err(guc, "Invalid engine %d:%d", guc_class, instance);
return -EPROTO;
}
@@ -4787,7 +4799,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
* This is an unexpected failure of a hardware feature. So, log a real
* error message not just the informational that comes with the reset.
*/
- drm_err(&gt->i915->drm, "GuC engine reset request failed on %d:%d (%s) because 0x%08X",
+ guc_err(guc, "Engine reset failed on %d:%d (%s) because 0x%08X",
guc_class, instance, engine->name, reason);
spin_lock_irqsave(&guc->submission_state.lock, flags);
@@ -4817,6 +4829,8 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
+ bool found;
+
if (!kref_get_unless_zero(&ce->ref))
continue;
@@ -4833,10 +4847,18 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
goto next;
}
+ found = false;
+ spin_lock(&ce->guc_state.lock);
list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
continue;
+ found = true;
+ break;
+ }
+ spin_unlock(&ce->guc_state.lock);
+
+ if (found) {
intel_engine_set_hung_context(engine, ce);
/* Can only cope with one hang at a time... */
@@ -4844,6 +4866,7 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
xa_lock(&guc->context_lookup);
goto done;
}
+
next:
intel_context_put(ce);
xa_lock(&guc->context_lookup);
@@ -4905,6 +4928,9 @@ void intel_guc_submission_print_info(struct intel_guc *guc,
if (!sched_engine)
return;
+ drm_printf(p, "GuC Submission API Version: %d.%d.%d\n",
+ guc->submission_version.major, guc->submission_version.minor,
+ guc->submission_version.patch);
drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
atomic_read(&guc->outstanding_submission_g2h));
drm_printf(p, "GuC tasklet count: %u\n",
@@ -5336,8 +5362,8 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
GEM_BUG_ON(!is_power_of_2(sibling->mask));
if (sibling->mask & ve->base.mask) {
- DRM_DEBUG("duplicate %s entry in load balancer\n",
- sibling->name);
+ guc_dbg(guc, "duplicate %s entry in load balancer\n",
+ sibling->name);
err = -EINVAL;
goto err_put;
}
@@ -5346,8 +5372,8 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
ve->base.logical_mask |= sibling->logical_mask;
if (n != 0 && ve->base.class != sibling->class) {
- DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
- sibling->class, ve->base.class);
+ guc_dbg(guc, "invalid mixing of engine class, sibling %d, already %d\n",
+ sibling->class, ve->base.class);
err = -EINVAL;
goto err_put;
} else if (n == 0) {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index 4f246416db17..534b0aa43316 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -32,7 +32,7 @@ int intel_huc_fw_load_and_auth_via_gsc(struct intel_huc *huc)
GEM_WARN_ON(intel_uc_fw_is_loaded(&huc->fw));
- ret = intel_pxp_huc_load_and_auth(&huc_to_gt(huc)->pxp);
+ ret = intel_pxp_huc_load_and_auth(huc_to_gt(huc)->i915->pxp);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 2a508b137e90..de7f987cf611 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -6,9 +6,13 @@
#include <linux/string_helpers.h>
#include "gt/intel_gt.h"
+#include "gt/intel_gt_print.h"
#include "gt/intel_reset.h"
+#include "intel_gsc_fw.h"
+#include "intel_gsc_uc.h"
#include "intel_guc.h"
#include "intel_guc_ads.h"
+#include "intel_guc_print.h"
#include "intel_guc_submission.h"
#include "gt/intel_rps.h"
#include "intel_uc.h"
@@ -65,14 +69,14 @@ static int __intel_uc_reset_hw(struct intel_uc *uc)
ret = intel_reset_guc(gt);
if (ret) {
- DRM_ERROR("Failed to reset GuC, ret = %d\n", ret);
+ gt_err(gt, "Failed to reset GuC, ret = %d\n", ret);
return ret;
}
guc_status = intel_uncore_read(gt->uncore, GUC_STATUS);
- WARN(!(guc_status & GS_MIA_IN_RESET),
- "GuC status: 0x%x, MIA core expected to be in reset\n",
- guc_status);
+ gt_WARN(gt, !(guc_status & GS_MIA_IN_RESET),
+ "GuC status: 0x%x, MIA core expected to be in reset\n",
+ guc_status);
return ret;
}
@@ -126,6 +130,7 @@ void intel_uc_init_early(struct intel_uc *uc)
intel_guc_init_early(&uc->guc);
intel_huc_init_early(&uc->huc);
+ intel_gsc_uc_init_early(&uc->gsc);
__confirm_options(uc);
@@ -249,15 +254,13 @@ static int guc_enable_communication(struct intel_guc *guc)
intel_guc_ct_event_handler(&guc->ct);
spin_unlock_irq(gt->irq_lock);
- drm_dbg(&i915->drm, "GuC communication enabled\n");
+ guc_dbg(guc, "communication enabled\n");
return 0;
}
static void guc_disable_communication(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
-
/*
* Events generated during or after CT disable are logged by guc in
* via mmio. Make sure the register is clear before disabling CT since
@@ -277,11 +280,12 @@ static void guc_disable_communication(struct intel_guc *guc)
*/
guc_get_mmio_msg(guc);
- drm_dbg(&i915->drm, "GuC communication disabled\n");
+ guc_dbg(guc, "communication disabled\n");
}
static void __uc_fetch_firmwares(struct intel_uc *uc)
{
+ struct intel_gt *gt = uc_to_gt(uc);
int err;
GEM_BUG_ON(!intel_uc_wants_guc(uc));
@@ -290,21 +294,30 @@ static void __uc_fetch_firmwares(struct intel_uc *uc)
if (err) {
/* Make sure we transition out of transient "SELECTED" state */
if (intel_uc_wants_huc(uc)) {
- drm_dbg(&uc_to_gt(uc)->i915->drm,
- "Failed to fetch GuC: %d disabling HuC\n", err);
+ gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling HuC\n", ERR_PTR(err));
intel_uc_fw_change_status(&uc->huc.fw,
INTEL_UC_FIRMWARE_ERROR);
}
+ if (intel_uc_wants_gsc_uc(uc)) {
+ gt_dbg(gt, "Failed to fetch GuC fw (%pe) disabling GSC\n", ERR_PTR(err));
+ intel_uc_fw_change_status(&uc->gsc.fw,
+ INTEL_UC_FIRMWARE_ERROR);
+ }
+
return;
}
if (intel_uc_wants_huc(uc))
intel_uc_fw_fetch(&uc->huc.fw);
+
+ if (intel_uc_wants_gsc_uc(uc))
+ intel_uc_fw_fetch(&uc->gsc.fw);
}
static void __uc_cleanup_firmwares(struct intel_uc *uc)
{
+ intel_uc_fw_cleanup_fetch(&uc->gsc.fw);
intel_uc_fw_cleanup_fetch(&uc->huc.fw);
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
}
@@ -330,11 +343,15 @@ static int __uc_init(struct intel_uc *uc)
if (intel_uc_uses_huc(uc))
intel_huc_init(huc);
+ if (intel_uc_uses_gsc_uc(uc))
+ intel_gsc_uc_init(&uc->gsc);
+
return 0;
}
static void __uc_fini(struct intel_uc *uc)
{
+ intel_gsc_uc_fini(&uc->gsc);
intel_huc_fini(&uc->huc);
intel_guc_fini(&uc->guc);
}
@@ -364,7 +381,7 @@ static int uc_init_wopcm(struct intel_uc *uc)
int err;
if (unlikely(!base || !size)) {
- i915_probe_error(gt->i915, "Unsuccessful WOPCM partitioning\n");
+ gt_probe_error(gt, "Unsuccessful WOPCM partitioning\n");
return -E2BIG;
}
@@ -395,13 +412,13 @@ static int uc_init_wopcm(struct intel_uc *uc)
return 0;
err_out:
- i915_probe_error(gt->i915, "Failed to init uC WOPCM registers!\n");
- i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
- i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
- intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
- i915_probe_error(gt->i915, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
- i915_mmio_reg_offset(GUC_WOPCM_SIZE),
- intel_uncore_read(uncore, GUC_WOPCM_SIZE));
+ gt_probe_error(gt, "Failed to init uC WOPCM registers!\n");
+ gt_probe_error(gt, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
+ i915_mmio_reg_offset(DMA_GUC_WOPCM_OFFSET),
+ intel_uncore_read(uncore, DMA_GUC_WOPCM_OFFSET));
+ gt_probe_error(gt, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
+ i915_mmio_reg_offset(GUC_WOPCM_SIZE),
+ intel_uncore_read(uncore, GUC_WOPCM_SIZE));
return err;
}
@@ -431,20 +448,19 @@ static int __uc_check_hw(struct intel_uc *uc)
return 0;
}
-static void print_fw_ver(struct intel_uc *uc, struct intel_uc_fw *fw)
+static void print_fw_ver(struct intel_gt *gt, struct intel_uc_fw *fw)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
-
- drm_info(&i915->drm, "%s firmware %s version %u.%u.%u\n",
- intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
- fw->file_selected.major_ver,
- fw->file_selected.minor_ver,
- fw->file_selected.patch_ver);
+ gt_info(gt, "%s firmware %s version %u.%u.%u\n",
+ intel_uc_fw_type_repr(fw->type), fw->file_selected.path,
+ fw->file_selected.ver.major,
+ fw->file_selected.ver.minor,
+ fw->file_selected.ver.patch);
}
static int __uc_init_hw(struct intel_uc *uc)
{
- struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
+ struct intel_gt *gt = uc_to_gt(uc);
+ struct drm_i915_private *i915 = gt->i915;
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret, attempts;
@@ -452,10 +468,10 @@ static int __uc_init_hw(struct intel_uc *uc)
GEM_BUG_ON(!intel_uc_supports_guc(uc));
GEM_BUG_ON(!intel_uc_wants_guc(uc));
- print_fw_ver(uc, &guc->fw);
+ print_fw_ver(gt, &guc->fw);
if (intel_uc_uses_huc(uc))
- print_fw_ver(uc, &huc->fw);
+ print_fw_ver(gt, &huc->fw);
if (!intel_uc_fw_is_loadable(&guc->fw)) {
ret = __uc_check_hw(uc) ||
@@ -496,8 +512,8 @@ static int __uc_init_hw(struct intel_uc *uc)
if (ret == 0)
break;
- DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
- "retry %d more time(s)\n", ret, attempts);
+ gt_dbg(gt, "GuC fw load failed (%pe) will reset and retry %d more time(s)\n",
+ ERR_PTR(ret), attempts);
}
/* Did we succeded or run out of retries? */
@@ -531,10 +547,12 @@ static int __uc_init_hw(struct intel_uc *uc)
intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
- drm_info(&i915->drm, "GuC submission %s\n",
- str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
- drm_info(&i915->drm, "GuC SLPC %s\n",
- str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
+ intel_gsc_uc_load_start(&uc->gsc);
+
+ gt_info(gt, "GuC submission %s\n",
+ str_enabled_disabled(intel_uc_uses_guc_submission(uc)));
+ gt_info(gt, "GuC SLPC %s\n",
+ str_enabled_disabled(intel_uc_uses_guc_slpc(uc)));
return 0;
@@ -552,12 +570,12 @@ err_out:
__uc_sanitize(uc);
if (!ret) {
- drm_notice(&i915->drm, "GuC is uninitialized\n");
+ gt_notice(gt, "GuC is uninitialized\n");
/* We want to run without GuC submission */
return 0;
}
- i915_probe_error(i915, "GuC initialization failed %d\n", ret);
+ gt_probe_error(gt, "GuC initialization failed %pe\n", ERR_PTR(ret));
/* We want to keep KMS alive */
return -EIO;
@@ -659,6 +677,9 @@ void intel_uc_suspend(struct intel_uc *uc)
intel_wakeref_t wakeref;
int err;
+ /* flush the GSC worker */
+ intel_gsc_uc_suspend(&uc->gsc);
+
if (!intel_guc_is_ready(guc)) {
guc->interrupts.enabled = false;
return;
@@ -667,7 +688,7 @@ void intel_uc_suspend(struct intel_uc *uc)
with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
err = intel_guc_suspend(guc);
if (err)
- DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
+ guc_dbg(guc, "Failed to suspend, %pe", ERR_PTR(err));
}
}
@@ -695,7 +716,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
err = intel_guc_resume(guc);
if (err) {
- DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
+ guc_dbg(guc, "Failed to resume, %pe", ERR_PTR(err));
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index a8f38c2c60e2..5d0f1bcc381e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -6,6 +6,7 @@
#ifndef _INTEL_UC_H_
#define _INTEL_UC_H_
+#include "intel_gsc_uc.h"
#include "intel_guc.h"
#include "intel_guc_rc.h"
#include "intel_guc_submission.h"
@@ -27,6 +28,7 @@ struct intel_uc_ops {
struct intel_uc {
struct intel_uc_ops const *ops;
+ struct intel_gsc_uc gsc;
struct intel_guc guc;
struct intel_huc huc;
@@ -87,6 +89,7 @@ uc_state_checkers(huc, huc);
uc_state_checkers(guc, guc_submission);
uc_state_checkers(guc, guc_slpc);
uc_state_checkers(guc, guc_rc);
+uc_state_checkers(gsc, gsc_uc);
#undef uc_state_checkers
#undef __uc_state_checker
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 2bcdd192f814..65672ff82605 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -19,11 +19,18 @@
static inline struct intel_gt *
____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
{
- if (type == INTEL_UC_FW_TYPE_GUC)
+ GEM_BUG_ON(type >= INTEL_UC_FW_NUM_TYPES);
+
+ switch (type) {
+ case INTEL_UC_FW_TYPE_GUC:
return container_of(uc_fw, struct intel_gt, uc.guc.fw);
+ case INTEL_UC_FW_TYPE_HUC:
+ return container_of(uc_fw, struct intel_gt, uc.huc.fw);
+ case INTEL_UC_FW_TYPE_GSC:
+ return container_of(uc_fw, struct intel_gt, uc.gsc.fw);
+ }
- GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
- return container_of(uc_fw, struct intel_gt, uc.huc.fw);
+ return NULL;
}
static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
@@ -118,35 +125,35 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
*/
#define __MAKE_UC_FW_PATH_BLANK(prefix_, name_) \
"i915/" \
- __stringify(prefix_) name_ ".bin"
+ __stringify(prefix_) "_" name_ ".bin"
#define __MAKE_UC_FW_PATH_MAJOR(prefix_, name_, major_) \
"i915/" \
- __stringify(prefix_) name_ \
+ __stringify(prefix_) "_" name_ "_" \
__stringify(major_) ".bin"
#define __MAKE_UC_FW_PATH_MMP(prefix_, name_, major_, minor_, patch_) \
"i915/" \
- __stringify(prefix_) name_ \
+ __stringify(prefix_) "_" name_ "_" \
__stringify(major_) "." \
__stringify(minor_) "." \
__stringify(patch_) ".bin"
/* Minor for internal driver use, not part of file name */
#define MAKE_GUC_FW_PATH_MAJOR(prefix_, major_, minor_) \
- __MAKE_UC_FW_PATH_MAJOR(prefix_, "_guc_", major_)
+ __MAKE_UC_FW_PATH_MAJOR(prefix_, "guc", major_)
#define MAKE_GUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
- __MAKE_UC_FW_PATH_MMP(prefix_, "_guc_", major_, minor_, patch_)
+ __MAKE_UC_FW_PATH_MMP(prefix_, "guc", major_, minor_, patch_)
#define MAKE_HUC_FW_PATH_BLANK(prefix_) \
- __MAKE_UC_FW_PATH_BLANK(prefix_, "_huc")
+ __MAKE_UC_FW_PATH_BLANK(prefix_, "huc")
#define MAKE_HUC_FW_PATH_GSC(prefix_) \
- __MAKE_UC_FW_PATH_BLANK(prefix_, "_huc_gsc")
+ __MAKE_UC_FW_PATH_BLANK(prefix_, "huc_gsc")
#define MAKE_HUC_FW_PATH_MMP(prefix_, major_, minor_, patch_) \
- __MAKE_UC_FW_PATH_MMP(prefix_, "_huc_", major_, minor_, patch_)
+ __MAKE_UC_FW_PATH_MMP(prefix_, "huc", major_, minor_, patch_)
/*
* All blobs need to be declared via MODULE_FIRMWARE().
@@ -238,7 +245,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
};
- static bool verified;
+ static bool verified[INTEL_UC_FW_NUM_TYPES];
const struct uc_fw_platform_requirement *fw_blobs;
enum intel_platform p = INTEL_INFO(i915)->platform;
u32 fw_count;
@@ -247,6 +254,14 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
bool found;
/*
+ * GSC FW support is still not fully in place, so we're not defining
+ * the FW blob yet because we don't want the driver to attempt to load
+ * it until we're ready for it.
+ */
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GSC)
+ return;
+
+ /*
* The only difference between the ADL GuC FWs is the HWConfig support.
* ADL-N does not support HWConfig, so we should use the same binary as
* ADL-S, otherwise the GuC might attempt to fetch a config table that
@@ -278,8 +293,8 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
uc_fw->file_selected.path = blob->path;
uc_fw->file_wanted.path = blob->path;
- uc_fw->file_wanted.major_ver = blob->major;
- uc_fw->file_wanted.minor_ver = blob->minor;
+ uc_fw->file_wanted.ver.major = blob->major;
+ uc_fw->file_wanted.ver.minor = blob->minor;
uc_fw->loaded_via_gsc = blob->loaded_via_gsc;
found = true;
break;
@@ -291,8 +306,8 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
}
/* make sure the list is ordered as expected */
- if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified) {
- verified = true;
+ if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST) && !verified[uc_fw->type]) {
+ verified[uc_fw->type] = true;
for (i = 1; i < fw_count; i++) {
/* Next platform is good: */
@@ -343,7 +358,8 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
continue;
bad:
- drm_err(&i915->drm, "Invalid FW blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
+ drm_err(&i915->drm, "Invalid %s blob order: %s r%u %s%d.%d.%d comes before %s r%u %s%d.%d.%d\n",
+ intel_uc_fw_type_repr(uc_fw->type),
intel_platform_name(fw_blobs[i - 1].p), fw_blobs[i - 1].rev,
fw_blobs[i - 1].blob.legacy ? "L" : "v",
fw_blobs[i - 1].blob.major,
@@ -374,6 +390,11 @@ static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
return "";
}
+static const char *__override_gsc_firmware_path(struct drm_i915_private *i915)
+{
+ return i915->params.gsc_firmware_path;
+}
+
static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
{
const char *path = NULL;
@@ -385,6 +406,9 @@ static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc
case INTEL_UC_FW_TYPE_HUC:
path = __override_huc_firmware_path(i915);
break;
+ case INTEL_UC_FW_TYPE_GSC:
+ path = __override_gsc_firmware_path(i915);
+ break;
}
if (unlikely(path)) {
@@ -438,28 +462,28 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next major version */
- uc_fw->file_wanted.major_ver += 1;
- uc_fw->file_wanted.minor_ver = 0;
+ uc_fw->file_wanted.ver.major += 1;
+ uc_fw->file_wanted.ver.minor = 0;
uc_fw->user_overridden = user;
} else if (i915_inject_probe_error(i915, e)) {
/* require next minor version */
- uc_fw->file_wanted.minor_ver += 1;
+ uc_fw->file_wanted.ver.minor += 1;
uc_fw->user_overridden = user;
- } else if (uc_fw->file_wanted.major_ver &&
+ } else if (uc_fw->file_wanted.ver.major &&
i915_inject_probe_error(i915, e)) {
/* require prev major version */
- uc_fw->file_wanted.major_ver -= 1;
- uc_fw->file_wanted.minor_ver = 0;
+ uc_fw->file_wanted.ver.major -= 1;
+ uc_fw->file_wanted.ver.minor = 0;
uc_fw->user_overridden = user;
- } else if (uc_fw->file_wanted.minor_ver &&
+ } else if (uc_fw->file_wanted.ver.minor &&
i915_inject_probe_error(i915, e)) {
/* require prev minor version - hey, this should work! */
- uc_fw->file_wanted.minor_ver -= 1;
+ uc_fw->file_wanted.ver.minor -= 1;
uc_fw->user_overridden = user;
} else if (user && i915_inject_probe_error(i915, e)) {
/* officially unsupported platform */
- uc_fw->file_wanted.major_ver = 0;
- uc_fw->file_wanted.minor_ver = 0;
+ uc_fw->file_wanted.ver.major = 0;
+ uc_fw->file_wanted.ver.minor = 0;
uc_fw->user_overridden = true;
}
}
@@ -471,13 +495,69 @@ static int check_gsc_manifest(const struct firmware *fw,
u32 version_hi = dw[HUC_GSC_VERSION_HI_DW];
u32 version_lo = dw[HUC_GSC_VERSION_LO_DW];
- uc_fw->file_selected.major_ver = FIELD_GET(HUC_GSC_MAJOR_VER_HI_MASK, version_hi);
- uc_fw->file_selected.minor_ver = FIELD_GET(HUC_GSC_MINOR_VER_HI_MASK, version_hi);
- uc_fw->file_selected.patch_ver = FIELD_GET(HUC_GSC_PATCH_VER_LO_MASK, version_lo);
+ uc_fw->file_selected.ver.major = FIELD_GET(HUC_GSC_MAJOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.ver.minor = FIELD_GET(HUC_GSC_MINOR_VER_HI_MASK, version_hi);
+ uc_fw->file_selected.ver.patch = FIELD_GET(HUC_GSC_PATCH_VER_LO_MASK, version_lo);
return 0;
}
+static void uc_unpack_css_version(struct intel_uc_fw_ver *ver, u32 css_value)
+{
+ /* Get version numbers from the CSS header */
+ ver->major = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, css_value);
+ ver->minor = FIELD_GET(CSS_SW_VERSION_UC_MINOR, css_value);
+ ver->patch = FIELD_GET(CSS_SW_VERSION_UC_PATCH, css_value);
+}
+
+static void guc_read_css_info(struct intel_uc_fw *uc_fw, struct uc_css_header *css)
+{
+ struct intel_guc *guc = container_of(uc_fw, struct intel_guc, fw);
+
+ /*
+ * The GuC firmware includes an extra version number to specify the
+ * submission API level. This allows submission code to work with
+ * multiple GuC versions without having to know the absolute firmware
+ * version number (there are likely to be multiple firmware releases
+ * which all support the same submission API level).
+ *
+ * Note that the spec for the CSS header defines this version number
+ * as 'vf_version' as it was originally intended for virtualisation.
+ * However, it is applicable to native submission as well.
+ *
+ * Unfortunately, due to an oversight, this version number was only
+ * exposed in the CSS header from v70.6.0.
+ */
+ if (uc_fw->file_selected.ver.major >= 70) {
+ if (uc_fw->file_selected.ver.minor >= 6) {
+ /* v70.6.0 adds CSS header support */
+ uc_unpack_css_version(&guc->submission_version, css->vf_version);
+ } else if (uc_fw->file_selected.ver.minor >= 3) {
+ /* v70.3.0 introduced v1.1.0 */
+ guc->submission_version.major = 1;
+ guc->submission_version.minor = 1;
+ guc->submission_version.patch = 0;
+ } else {
+ /* v70.0.0 introduced v1.0.0 */
+ guc->submission_version.major = 1;
+ guc->submission_version.minor = 0;
+ guc->submission_version.patch = 0;
+ }
+ } else if (uc_fw->file_selected.ver.major >= 69) {
+ /* v69.0.0 introduced v0.10.0 */
+ guc->submission_version.major = 0;
+ guc->submission_version.minor = 10;
+ guc->submission_version.patch = 0;
+ } else {
+ /* Prior versions were v0.1.0 */
+ guc->submission_version.major = 0;
+ guc->submission_version.minor = 1;
+ guc->submission_version.patch = 0;
+ }
+
+ uc_fw->private_data_size = css->private_data_size;
+}
+
static int check_ccs_header(struct intel_gt *gt,
const struct firmware *fw,
struct intel_uc_fw *uc_fw)
@@ -531,16 +611,66 @@ static int check_ccs_header(struct intel_gt *gt,
return -E2BIG;
}
- /* Get version numbers from the CSS header */
- uc_fw->file_selected.major_ver = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
- css->sw_version);
- uc_fw->file_selected.minor_ver = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
- css->sw_version);
- uc_fw->file_selected.patch_ver = FIELD_GET(CSS_SW_VERSION_UC_PATCH,
- css->sw_version);
+ uc_unpack_css_version(&uc_fw->file_selected.ver, css->sw_version);
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
- uc_fw->private_data_size = css->private_data_size;
+ guc_read_css_info(uc_fw, css);
+
+ return 0;
+}
+
+static bool is_ver_8bit(struct intel_uc_fw_ver *ver)
+{
+ return ver->major < 0xFF && ver->minor < 0xFF && ver->patch < 0xFF;
+}
+
+static bool guc_check_version_range(struct intel_uc_fw *uc_fw)
+{
+ struct intel_guc *guc = container_of(uc_fw, struct intel_guc, fw);
+
+ /*
+ * GuC version number components are defined as being 8-bits.
+ * The submission code relies on this to optimise version comparison
+ * tests. So enforce the restriction here.
+ */
+
+ if (!is_ver_8bit(&uc_fw->file_selected.ver)) {
+ drm_warn(&__uc_fw_to_gt(uc_fw)->i915->drm, "%s firmware: invalid file version: 0x%02X:%02X:%02X\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->file_selected.ver.major,
+ uc_fw->file_selected.ver.minor,
+ uc_fw->file_selected.ver.patch);
+ return false;
+ }
+
+ if (!is_ver_8bit(&guc->submission_version)) {
+ drm_warn(&__uc_fw_to_gt(uc_fw)->i915->drm, "%s firmware: invalid submit version: 0x%02X:%02X:%02X\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ guc->submission_version.major,
+ guc->submission_version.minor,
+ guc->submission_version.patch);
+ return false;
+ }
+
+ return true;
+}
+
+static int check_fw_header(struct intel_gt *gt,
+ const struct firmware *fw,
+ struct intel_uc_fw *uc_fw)
+{
+ int err = 0;
+
+ /* GSC FW version is queried after the FW is loaded */
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GSC)
+ return 0;
+
+ if (uc_fw->loaded_via_gsc)
+ err = check_gsc_manifest(fw, uc_fw);
+ else
+ err = check_ccs_header(gt, fw, uc_fw);
+ if (err)
+ return err;
return 0;
}
@@ -628,31 +758,31 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (err)
goto fail;
- if (uc_fw->loaded_via_gsc)
- err = check_gsc_manifest(fw, uc_fw);
- else
- err = check_ccs_header(gt, fw, uc_fw);
+ err = check_fw_header(gt, fw, uc_fw);
if (err)
goto fail;
- if (uc_fw->file_wanted.major_ver) {
+ if (uc_fw->type == INTEL_UC_FW_TYPE_GUC && !guc_check_version_range(uc_fw))
+ goto fail;
+
+ if (uc_fw->file_wanted.ver.major && uc_fw->file_selected.ver.major) {
/* Check the file's major version was as it claimed */
- if (uc_fw->file_selected.major_ver != uc_fw->file_wanted.major_ver) {
+ if (uc_fw->file_selected.ver.major != uc_fw->file_wanted.ver.major) {
drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver,
- uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver);
+ uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor,
+ uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor);
if (!intel_uc_fw_is_overridden(uc_fw)) {
err = -ENOEXEC;
goto fail;
}
} else {
- if (uc_fw->file_selected.minor_ver < uc_fw->file_wanted.minor_ver)
+ if (uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor)
old_ver = true;
}
}
- if (old_ver) {
+ if (old_ver && uc_fw->file_selected.ver.major) {
/* Preserve the version that was really wanted */
memcpy(&uc_fw->file_wanted, &file_ideal, sizeof(uc_fw->file_wanted));
@@ -660,9 +790,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
"%s firmware %s (%d.%d) is recommended, but only %s (%d.%d) was found\n",
intel_uc_fw_type_repr(uc_fw->type),
uc_fw->file_wanted.path,
- uc_fw->file_wanted.major_ver, uc_fw->file_wanted.minor_ver,
+ uc_fw->file_wanted.ver.major, uc_fw->file_wanted.ver.minor,
uc_fw->file_selected.path,
- uc_fw->file_selected.major_ver, uc_fw->file_selected.minor_ver);
+ uc_fw->file_selected.ver.major, uc_fw->file_selected.ver.minor);
drm_info(&i915->drm,
"Consider updating your linux-firmware pkg or downloading from %s\n",
INTEL_UC_FIRMWARE_URL);
@@ -814,6 +944,20 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
return ret;
}
+int intel_uc_fw_mark_load_failed(struct intel_uc_fw *uc_fw, int err)
+{
+ struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+
+ GEM_BUG_ON(!intel_uc_fw_is_loadable(uc_fw));
+
+ i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
+ intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+ err);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
+
+ return err;
+}
+
/**
* intel_uc_fw_upload - load uC firmware using custom loader
* @uc_fw: uC firmware
@@ -850,11 +994,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
return 0;
fail:
- i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
- intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
- err);
- intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
- return err;
+ return intel_uc_fw_mark_load_failed(uc_fw, err);
}
static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
@@ -1068,7 +1208,7 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
*/
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
{
- u32 ver_sel, ver_want;
+ bool got_wanted;
drm_printf(p, "%s firmware: %s\n",
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path);
@@ -1077,25 +1217,32 @@ void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_wanted.path);
drm_printf(p, "\tstatus: %s\n",
intel_uc_fw_status_repr(uc_fw->status));
- ver_sel = MAKE_UC_VER(uc_fw->file_selected.major_ver,
- uc_fw->file_selected.minor_ver,
- uc_fw->file_selected.patch_ver);
- ver_want = MAKE_UC_VER(uc_fw->file_wanted.major_ver,
- uc_fw->file_wanted.minor_ver,
- uc_fw->file_wanted.patch_ver);
- if (ver_sel < ver_want)
+
+ if (uc_fw->file_selected.ver.major < uc_fw->file_wanted.ver.major)
+ got_wanted = false;
+ else if ((uc_fw->file_selected.ver.major == uc_fw->file_wanted.ver.major) &&
+ (uc_fw->file_selected.ver.minor < uc_fw->file_wanted.ver.minor))
+ got_wanted = false;
+ else if ((uc_fw->file_selected.ver.major == uc_fw->file_wanted.ver.major) &&
+ (uc_fw->file_selected.ver.minor == uc_fw->file_wanted.ver.minor) &&
+ (uc_fw->file_selected.ver.patch < uc_fw->file_wanted.ver.patch))
+ got_wanted = false;
+ else
+ got_wanted = true;
+
+ if (!got_wanted)
drm_printf(p, "\tversion: wanted %u.%u.%u, found %u.%u.%u\n",
- uc_fw->file_wanted.major_ver,
- uc_fw->file_wanted.minor_ver,
- uc_fw->file_wanted.patch_ver,
- uc_fw->file_selected.major_ver,
- uc_fw->file_selected.minor_ver,
- uc_fw->file_selected.patch_ver);
+ uc_fw->file_wanted.ver.major,
+ uc_fw->file_wanted.ver.minor,
+ uc_fw->file_wanted.ver.patch,
+ uc_fw->file_selected.ver.major,
+ uc_fw->file_selected.ver.minor,
+ uc_fw->file_selected.ver.patch);
else
drm_printf(p, "\tversion: found %u.%u.%u\n",
- uc_fw->file_selected.major_ver,
- uc_fw->file_selected.minor_ver,
- uc_fw->file_selected.patch_ver);
+ uc_fw->file_selected.ver.major,
+ uc_fw->file_selected.ver.minor,
+ uc_fw->file_selected.ver.patch);
drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index bc898ba5355d..6ba00e6b3975 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -61,9 +61,16 @@ enum intel_uc_fw_status {
enum intel_uc_fw_type {
INTEL_UC_FW_TYPE_GUC = 0,
- INTEL_UC_FW_TYPE_HUC
+ INTEL_UC_FW_TYPE_HUC,
+ INTEL_UC_FW_TYPE_GSC,
+};
+#define INTEL_UC_FW_NUM_TYPES 3
+
+struct intel_uc_fw_ver {
+ u32 major;
+ u32 minor;
+ u32 patch;
};
-#define INTEL_UC_FW_NUM_TYPES 2
/*
* The firmware build process will generate a version header file with major and
@@ -72,9 +79,7 @@ enum intel_uc_fw_type {
*/
struct intel_uc_fw_file {
const char *path;
- u16 major_ver;
- u16 minor_ver;
- u16 patch_ver;
+ struct intel_uc_fw_ver ver;
};
/*
@@ -110,11 +115,6 @@ struct intel_uc_fw {
bool loaded_via_gsc;
};
-#define MAKE_UC_VER(maj, min, pat) ((pat) | ((min) << 8) | ((maj) << 16))
-#define GET_UC_VER(uc) (MAKE_UC_VER((uc)->fw.file_selected.major_ver, \
- (uc)->fw.file_selected.minor_ver, \
- (uc)->fw.file_selected.patch_ver))
-
/*
* When we load the uC binaries, we pin them in a reserved section at the top of
* the GGTT, which is ~18 MBs. On multi-GT systems where the GTs share the GGTT,
@@ -205,6 +205,8 @@ static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
return "GuC";
case INTEL_UC_FW_TYPE_HUC:
return "HuC";
+ case INTEL_UC_FW_TYPE_GSC:
+ return "GSC";
}
return "uC";
}
@@ -287,6 +289,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 offset, u32 dma_flags);
int intel_uc_fw_init(struct intel_uc_fw *uc_fw);
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw);
size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len);
+int intel_uc_fw_mark_load_failed(struct intel_uc_fw *uc_fw, int err);
void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p);
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
index 7a411178bdbf..646fa8aa6cf1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h
@@ -74,7 +74,8 @@ struct uc_css_header {
#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16)
#define CSS_SW_VERSION_UC_MINOR (0xFF << 8)
#define CSS_SW_VERSION_UC_PATCH (0xFF << 0)
- u32 reserved0[13];
+ u32 vf_version;
+ u32 reserved0[12];
union {
u32 private_data_size; /* only applies to GuC */
u32 reserved1;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 0ebf5fbf0e39..3c4ae1da0d41 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -49,6 +49,7 @@
#include "i915_pvinfo.h"
#include "trace.h"
+#include "display/intel_display.h"
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_context.h"
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index c033249e73f4..4d898b14de93 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -36,6 +36,7 @@
#include "i915_reg.h"
#include "gvt.h"
+#include "display/intel_display.h"
#include "display/intel_dpio_phy.h"
static int get_edp_pipe(struct intel_vgpu *vgpu)
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index ffe41e9be04f..6834f9fe40cf 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -42,8 +42,7 @@
#define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
-static int vgpu_gem_get_pages(
- struct drm_i915_gem_object *obj)
+static int vgpu_gem_get_pages(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_vgpu *vgpu;
@@ -52,8 +51,12 @@ static int vgpu_gem_get_pages(
int i, j, ret;
gen8_pte_t __iomem *gtt_entries;
struct intel_vgpu_fb_info *fb_info;
- u32 page_num;
+ unsigned int page_num; /* limited by sg_alloc_table */
+ if (overflows_type(obj->base.size >> PAGE_SHIFT, page_num))
+ return -E2BIG;
+
+ page_num = obj->base.size >> PAGE_SHIFT;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
if (drm_WARN_ON(&dev_priv->drm, !fb_info))
return -ENODEV;
@@ -66,7 +69,6 @@ static int vgpu_gem_get_pages(
if (unlikely(!st))
return -ENOMEM;
- page_num = obj->base.size >> PAGE_SHIFT;
ret = sg_alloc_table(st, page_num, GFP_KERNEL);
if (ret) {
kfree(st);
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h
index 0daa3931aef7..4eff44194439 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.h
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h
@@ -38,7 +38,7 @@
#include <linux/types.h>
-#include "display/intel_display.h"
+#include "display/intel_display_limits.h"
struct intel_vgpu;
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index a683c22d5b64..dce93738e98a 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -45,7 +45,7 @@ struct gvt_firmware_header {
u64 cfg_space_offset; /* offset in the file */
u64 mmio_size;
u64 mmio_offset; /* offset in the file */
- unsigned char data[1];
+ unsigned char data[];
};
#define dev_to_drm_minor(d) dev_get_drvdata((d))
@@ -77,7 +77,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
unsigned long size, crc32_start;
int ret;
- size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
+ size = offsetof(struct gvt_firmware_header, data) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
if (!firmware)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index f93e6122f247..ddf49c2dbb91 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1471,8 +1471,8 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
/* Defer failure until attempted use */
jump_whitelist = alloc_whitelist(batch_length);
- shadow_addr = gen8_canonical_addr(shadow->node.start);
- batch_addr = gen8_canonical_addr(batch->node.start + batch_offset);
+ shadow_addr = gen8_canonical_addr(i915_vma_offset(shadow));
+ batch_addr = gen8_canonical_addr(i915_vma_offset(batch) + batch_offset);
/*
* We use the batch length as size because the shadow object is as
diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c
index afb828dab53b..24e5bb8a670e 100644
--- a/drivers/gpu/drm/i915/i915_config.c
+++ b/drivers/gpu/drm/i915/i915_config.c
@@ -3,7 +3,10 @@
* Copyright © 2020 Intel Corporation
*/
-#include "i915_drv.h"
+#include <linux/kernel.h>
+
+#include "i915_config.h"
+#include "i915_utils.h"
unsigned long
i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context)
diff --git a/drivers/gpu/drm/i915/i915_config.h b/drivers/gpu/drm/i915/i915_config.h
new file mode 100644
index 000000000000..10e18b036489
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_config.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __I915_CONFIG_H__
+#define __I915_CONFIG_H__
+
+#include <linux/types.h>
+#include <linux/limits.h>
+
+struct drm_i915_private;
+
+unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
+ u64 context);
+
+static inline unsigned long
+i915_fence_timeout(const struct drm_i915_private *i915)
+{
+ return i915_fence_context_timeout(i915, U64_MAX);
+}
+
+#endif /* __I915_CONFIG_H__ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6c7ac73b69a5..45773ce1deac 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -183,7 +183,7 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
stringify_vma_type(vma),
- vma->node.start, vma->node.size,
+ i915_vma_offset(vma), i915_vma_size(vma),
stringify_page_sizes(vma->resource->page_sizes_gtt,
NULL, 0));
if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
@@ -648,13 +648,14 @@ i915_drop_caches_get(void *data, u64 *val)
return 0;
}
+
static int
gt_drop_caches(struct intel_gt *gt, u64 val)
{
int ret;
if (val & DROP_RESET_ACTIVE &&
- wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
+ wait_for(intel_engines_are_idle(gt), 200))
intel_gt_set_wedged(gt);
if (val & DROP_RETIRE)
@@ -762,7 +763,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_sseu_status", i915_sseu_status, 0},
{"i915_rps_boost_info", i915_rps_boost_info, 0},
};
-#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
static const struct i915_debugfs_files {
const char *name;
@@ -795,6 +795,6 @@ void i915_debugfs_register(struct drm_i915_private *dev_priv)
}
drm_debugfs_create_files(i915_debugfs_list,
- I915_DEBUGFS_ENTRIES,
+ ARRAY_SIZE(i915_debugfs_list),
minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
index 783c8676eee2..614bde321589 100644
--- a/drivers/gpu/drm/i915/i915_debugfs_params.c
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -230,27 +230,16 @@ i915_debugfs_create_charp(const char *name, umode_t mode,
&i915_param_charp_fops);
}
-static __always_inline void
-_i915_param_create_file(struct dentry *parent, const char *name,
- const char *type, int mode, void *value)
-{
- if (!mode)
- return;
-
- if (!__builtin_strcmp(type, "bool"))
- debugfs_create_bool(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "int"))
- i915_debugfs_create_int(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "unsigned int"))
- i915_debugfs_create_uint(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "unsigned long"))
- debugfs_create_ulong(name, mode, parent, value);
- else if (!__builtin_strcmp(type, "char *"))
- i915_debugfs_create_charp(name, mode, parent, value);
- else
- WARN(1, "no debugfs fops defined for param type %s (i915.%s)\n",
- type, name);
-}
+#define _i915_param_create_file(parent, name, mode, valp) \
+ do { \
+ if (mode) \
+ _Generic(valp, \
+ bool *: debugfs_create_bool, \
+ int *: i915_debugfs_create_int, \
+ unsigned int *: i915_debugfs_create_uint, \
+ unsigned long *: debugfs_create_ulong, \
+ char **: i915_debugfs_create_charp)(name, mode, parent, valp); \
+ } while(0)
/* add a subdirectory with files for each i915 param */
struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
@@ -269,7 +258,7 @@ struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
* just let the generic create file fail silently with -EEXIST.
*/
-#define REGISTER(T, x, unused, mode, ...) _i915_param_create_file(dir, #x, #T, mode, &params->x);
+#define REGISTER(T, x, unused, mode, ...) _i915_param_create_file(dir, #x, mode, &params->x);
I915_PARAMS_FOR_EACH(REGISTER);
#undef REGISTER
diff --git a/drivers/gpu/drm/i915/i915_deps.c b/drivers/gpu/drm/i915/i915_deps.c
index 297b8e4e42ee..91c61864285a 100644
--- a/drivers/gpu/drm/i915/i915_deps.c
+++ b/drivers/gpu/drm/i915/i915_deps.c
@@ -6,7 +6,7 @@
#include <linux/dma-fence.h>
#include <linux/slab.h>
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include "i915_deps.h"
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 61c38fc734cf..cf1c0970ecb4 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -34,7 +34,6 @@
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/pnp.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include <linux/vga_switcheroo.h>
@@ -73,8 +72,13 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
+#include "pxp/intel_pxp.h"
+#include "pxp/intel_pxp_debugfs.h"
#include "pxp/intel_pxp_pm.h"
+#include "soc/intel_dram.h"
+#include "soc/intel_gmch.h"
+
#include "i915_file_private.h"
#include "i915_debugfs.h"
#include "i915_driver.h"
@@ -93,7 +97,6 @@
#include "i915_sysfs.h"
#include "i915_utils.h"
#include "i915_vgpu.h"
-#include "intel_dram.h"
#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pci_config.h"
@@ -102,146 +105,8 @@
#include "intel_region_ttm.h"
#include "vlv_suspend.h"
-/* Intel Rapid Start Technology ACPI device name */
-static const char irst_name[] = "INT3392";
-
static const struct drm_driver i915_drm_driver;
-static void i915_release_bridge_dev(struct drm_device *dev,
- void *bridge)
-{
- pci_dev_put(bridge);
-}
-
-static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
-{
- int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
-
- dev_priv->bridge_dev =
- pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
- if (!dev_priv->bridge_dev) {
- drm_err(&dev_priv->drm, "bridge device not found\n");
- return -EIO;
- }
-
- return drmm_add_action_or_reset(&dev_priv->drm, i915_release_bridge_dev,
- dev_priv->bridge_dev);
-}
-
-/* Allocate space for the MCH regs if needed, return nonzero on error */
-static int
-intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
-{
- int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp_lo, temp_hi = 0;
- u64 mchbar_addr;
- int ret;
-
- if (GRAPHICS_VER(dev_priv) >= 4)
- pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
- pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
- mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-
- /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
- if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
- return 0;
-#endif
-
- /* Get some space for it */
- dev_priv->mch_res.name = "i915 MCHBAR";
- dev_priv->mch_res.flags = IORESOURCE_MEM;
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
- &dev_priv->mch_res,
- MCHBAR_SIZE, MCHBAR_SIZE,
- PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
- dev_priv->bridge_dev);
- if (ret) {
- drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
- dev_priv->mch_res.start = 0;
- return ret;
- }
-
- if (GRAPHICS_VER(dev_priv) >= 4)
- pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
- upper_32_bits(dev_priv->mch_res.start));
-
- pci_write_config_dword(dev_priv->bridge_dev, reg,
- lower_32_bits(dev_priv->mch_res.start));
- return 0;
-}
-
-/* Setup MCHBAR if possible, return true if we should disable it again */
-static void
-intel_setup_mchbar(struct drm_i915_private *dev_priv)
-{
- int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
- bool enabled;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return;
-
- dev_priv->mchbar_need_disable = false;
-
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
- enabled = !!(temp & DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- enabled = temp & 1;
- }
-
- /* If it's already enabled, don't have to do anything */
- if (enabled)
- return;
-
- if (intel_alloc_mchbar_resource(dev_priv))
- return;
-
- dev_priv->mchbar_need_disable = true;
-
- /* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
- temp | DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
- }
-}
-
-static void
-intel_teardown_mchbar(struct drm_i915_private *dev_priv)
-{
- int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-
- if (dev_priv->mchbar_need_disable) {
- if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
- u32 deven_val;
-
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
- &deven_val);
- deven_val &= ~DEVEN_MCHBAR_EN;
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
- deven_val);
- } else {
- u32 mchbar_val;
-
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
- &mchbar_val);
- mchbar_val &= ~1;
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
- mchbar_val);
- }
- }
-
- if (dev_priv->mch_res.start)
- release_resource(&dev_priv->mch_res);
-}
-
static int i915_workqueues_init(struct drm_i915_private *dev_priv)
{
/*
@@ -447,7 +312,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- ret = i915_get_bridge_dev(dev_priv);
+ ret = intel_gmch_bridge_setup(dev_priv);
if (ret < 0)
return ret;
@@ -464,7 +329,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
}
/* Try to make sure MCHBAR is enabled before poking at it */
- intel_setup_mchbar(dev_priv);
+ intel_gmch_bar_setup(dev_priv);
intel_device_info_runtime_init(dev_priv);
for_each_gt(gt, dev_priv, i) {
@@ -479,7 +344,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
return 0;
err_uncore:
- intel_teardown_mchbar(dev_priv);
+ intel_gmch_bar_teardown(dev_priv);
return ret;
}
@@ -490,7 +355,7 @@ err_uncore:
*/
static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
{
- intel_teardown_mchbar(dev_priv);
+ intel_gmch_bar_teardown(dev_priv);
}
/**
@@ -612,10 +477,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
i915_perf_init(dev_priv);
- ret = intel_gt_assign_ggtt(to_gt(dev_priv));
- if (ret)
- goto err_perf;
-
ret = i915_ggtt_probe_hw(dev_priv);
if (ret)
goto err_perf;
@@ -763,6 +624,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
for_each_gt(gt, dev_priv, i)
intel_gt_driver_register(gt);
+ intel_pxp_debugfs_register(dev_priv->pxp);
+
i915_hwmon_register(dev_priv);
intel_display_driver_register(dev_priv);
@@ -794,6 +657,8 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
intel_display_driver_unregister(dev_priv);
+ intel_pxp_fini(dev_priv);
+
for_each_gt(gt, dev_priv, i)
intel_gt_driver_unregister(gt);
@@ -937,6 +802,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset2;
+ intel_pxp_init(i915);
+
ret = intel_modeset_init(i915);
if (ret)
goto out_cleanup_gem;
@@ -1169,6 +1036,8 @@ static int i915_drm_prepare(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
+ intel_pxp_suspend_prepare(i915->pxp);
+
/*
* NB intel_display_suspend() may issue new requests after we've
* ostensibly marked the GPU as ready-to-sleep here. We need to
@@ -1249,6 +1118,8 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
disable_rpm_wakeref_asserts(rpm);
+ intel_pxp_suspend(dev_priv->pxp);
+
i915_gem_suspend_late(dev_priv);
for_each_gt(gt, dev_priv, i)
@@ -1313,7 +1184,8 @@ int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
+ struct intel_gt *gt;
+ int ret, i;
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
@@ -1328,6 +1200,11 @@ static int i915_drm_resume(struct drm_device *dev)
drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
i915_ggtt_resume(to_gt(dev_priv)->ggtt);
+
+ for_each_gt(gt, dev_priv, i)
+ if (GRAPHICS_VER(gt->i915) >= 8)
+ setup_private_pat(gt);
+
/* Must be called after GGTT is resumed. */
intel_dpt_resume(dev_priv);
@@ -1355,6 +1232,8 @@ static int i915_drm_resume(struct drm_device *dev)
i915_gem_resume(dev_priv);
+ intel_pxp_resume(dev_priv->pxp);
+
intel_modeset_init_hw(dev_priv);
intel_init_clock_gating(dev_priv);
intel_hpd_init(dev_priv);
@@ -1491,8 +1370,6 @@ static int i915_pm_suspend(struct device *kdev)
return -ENODEV;
}
- i915_ggtt_mark_pte_lost(i915, false);
-
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
@@ -1545,14 +1422,6 @@ static int i915_pm_resume(struct device *kdev)
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- /*
- * If IRST is enabled, or if we can't detect whether it's enabled,
- * then we must assume we lost the GGTT page table entries, since
- * they are not retained if IRST decided to enter S4.
- */
- if (!IS_ENABLED(CONFIG_ACPI) || acpi_dev_present(irst_name, NULL, -1))
- i915_ggtt_mark_pte_lost(i915, true);
-
return i915_drm_resume(&i915->drm);
}
@@ -1612,9 +1481,6 @@ static int i915_pm_restore_early(struct device *kdev)
static int i915_pm_restore(struct device *kdev)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
-
- i915_ggtt_mark_pte_lost(i915, true);
return i915_pm_resume(kdev);
}
@@ -1638,6 +1504,8 @@ static int intel_runtime_suspend(struct device *kdev)
*/
i915_gem_runtime_suspend(dev_priv);
+ intel_pxp_runtime_suspend(dev_priv->pxp);
+
for_each_gt(gt, dev_priv, i)
intel_gt_runtime_suspend(gt);
@@ -1742,6 +1610,8 @@ static int intel_runtime_resume(struct device *kdev)
for_each_gt(gt, dev_priv, i)
intel_gt_runtime_resume(gt);
+ intel_pxp_runtime_resume(dev_priv->pxp);
+
/*
* On VLV/CHV display interrupts are part of the display
* power well, so hpd is reinitialized from there. For
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a380db36d52c..4295306487c7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -36,7 +36,7 @@
#include <drm/ttm/ttm_device.h>
-#include "display/intel_display.h"
+#include "display/intel_display_limits.h"
#include "display/intel_display_core.h"
#include "gem/i915_gem_context_types.h"
@@ -49,6 +49,8 @@
#include "gt/intel_workarounds.h"
#include "gt/uc/intel_uc.h"
+#include "soc/intel_pch.h"
+
#include "i915_drm_client.h"
#include "i915_gem.h"
#include "i915_gpu_error.h"
@@ -58,31 +60,46 @@
#include "i915_utils.h"
#include "intel_device_info.h"
#include "intel_memory_region.h"
-#include "intel_pch.h"
#include "intel_runtime_pm.h"
#include "intel_step.h"
#include "intel_uncore.h"
struct drm_i915_clock_gating_funcs;
-struct drm_i915_gem_object;
-struct drm_i915_private;
-struct intel_connector;
-struct intel_dp;
-struct intel_encoder;
-struct intel_limit;
-struct intel_overlay_error_state;
struct vlv_s0ix_state;
+struct intel_pxp;
-#define I915_GEM_GPU_DOMAINS \
- (I915_GEM_DOMAIN_RENDER | \
- I915_GEM_DOMAIN_SAMPLER | \
- I915_GEM_DOMAIN_COMMAND | \
- I915_GEM_DOMAIN_INSTRUCTION | \
- I915_GEM_DOMAIN_VERTEX)
+#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
-#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
+/* Data Stolen Memory (DSM) aka "i915 stolen memory" */
+struct i915_dsm {
+ /*
+ * The start and end of DSM which we can optionally use to create GEM
+ * objects backed by stolen memory.
+ *
+ * Note that usable_size tells us exactly how much of this we are
+ * actually allowed to use, given that some portion of it is in fact
+ * reserved for use by hardware functions.
+ */
+ struct resource stolen;
-#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0)
+ /*
+ * Reserved portion of DSM.
+ */
+ struct resource reserved;
+
+ /*
+ * Total size minus reserved ranges.
+ *
+ * DSM is segmented in hardware with different portions offlimits to
+ * certain functions.
+ *
+ * The drm_mm is initialised to the total accessible range, as found
+ * from the PCI config. On Broadwell+, this is further restricted to
+ * avoid the first page! The upper end of DSM is reserved for hardware
+ * functions and similarly removed from the accessible range.
+ */
+ resource_size_t usable_size;
+};
struct i915_suspend_saved_registers {
u32 saveDSPARB;
@@ -161,19 +178,6 @@ struct i915_gem_mm {
u32 shrink_count;
};
-#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
-
-unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
- u64 context);
-
-static inline unsigned long
-i915_fence_timeout(const struct drm_i915_private *i915)
-{
- return i915_fence_context_timeout(i915, U64_MAX);
-}
-
-#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
-
struct i915_virtual_gpu {
struct mutex lock; /* serialises sending of g2v_notify command pkts */
bool active;
@@ -203,29 +207,7 @@ struct drm_i915_private {
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
- /**
- * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
- * end of stolen which we can optionally use to create GEM objects
- * backed by stolen memory. Note that stolen_usable_size tells us
- * exactly how much of this we are actually allowed to use, given that
- * some portion of it is in fact reserved for use by hardware functions.
- */
- struct resource dsm;
- /**
- * Reseved portion of Data Stolen Memory
- */
- struct resource dsm_reserved;
-
- /*
- * Stolen memory is segmented in hardware with different portions
- * offlimits to certain functions.
- *
- * The drm_mm is initialised to the total accessible range, as found
- * from the PCI config. On Broadwell+, this is further restricted to
- * avoid the first page! The upper end of stolen memory is reserved for
- * hardware functions and similarly removed from the accessible range.
- */
- resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
+ struct i915_dsm dsm;
struct intel_uncore uncore;
struct intel_uncore_mmio_debug mmio_debug;
@@ -234,13 +216,15 @@ struct drm_i915_private {
struct intel_gvt *gvt;
- struct pci_dev *bridge_dev;
+ struct {
+ struct pci_dev *pdev;
+ struct resource mch_res;
+ bool mchbar_need_disable;
+ } gmch;
struct rb_root uabi_engines;
unsigned int engine_uabi_class_count[I915_LAST_UABI_ENGINE_CLASS + 1];
- struct resource mch_res;
-
/* protects the irq masks */
spinlock_t irq_lock;
@@ -286,8 +270,6 @@ struct drm_i915_private {
struct i915_gem_mm mm;
- bool mchbar_need_disable;
-
struct intel_l3_parity l3_parity;
/*
@@ -298,14 +280,6 @@ struct drm_i915_private {
struct i915_gpu_error gpu_error;
- /*
- * Shadows for CHV DPLL_MD regs to keep the state
- * checker somewhat working in the presence hardware
- * crappiness (can't read out DPLL_MD for pipes B & C).
- */
- u32 chv_dpll_md[I915_MAX_PIPES];
- u32 bxt_phy_grc;
-
u32 suspend_count;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state *vlv_s0ix_state;
@@ -364,19 +338,13 @@ struct drm_i915_private {
struct file *mmap_singleton;
} gem;
- u8 pch_ssc_use;
+ struct intel_pxp *pxp;
/* For i915gm/i945gm vblank irq workaround */
u8 vblank_enabled;
bool irq_enabled;
- /*
- * DG2: Mask of PHYs that were not calibrated by the firmware
- * and should not be used.
- */
- u8 snps_phy_failed_calibration;
-
struct i915_pmu pmu;
struct i915_drm_clients clients;
@@ -465,8 +433,6 @@ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
#define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
-#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
-
#define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
@@ -726,6 +692,14 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
(IS_SUBPLATFORM(__i915, INTEL_METEORLAKE, INTEL_SUBPLATFORM_##variant) && \
IS_GRAPHICS_STEP(__i915, since, until))
+#define IS_MTL_DISPLAY_STEP(__i915, since, until) \
+ (IS_METEORLAKE(__i915) && \
+ IS_DISPLAY_STEP(__i915, since, until))
+
+#define IS_MTL_MEDIA_STEP(__i915, since, until) \
+ (IS_METEORLAKE(__i915) && \
+ IS_MEDIA_STEP(__i915, since, until))
+
/*
* DG2 hardware steppings are a bit unusual. The hardware design was forked to
* create three variants (G10, G11, and G12) which each have distinct
@@ -874,6 +848,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_RPS(dev_priv) (INTEL_INFO(dev_priv)->has_rps)
#define HAS_DMC(dev_priv) (RUNTIME_INFO(dev_priv)->has_dmc)
+#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb)
+#define HAS_DSC(__i915) (RUNTIME_INFO(__i915)->has_dsc)
+#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
#define HAS_HECI_PXP(dev_priv) \
(INTEL_INFO(dev_priv)->has_heci_pxp)
@@ -918,10 +895,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
-#define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
- INTEL_INFO(dev_priv)->has_pxp) && \
- VDBOX_MASK(to_gt(dev_priv)))
-
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
#define HAS_GMD_ID(i915) (INTEL_INFO(i915)->has_gmd_id)
@@ -935,9 +908,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
2 : HAS_L3_DPF(dev_priv))
-#define GT_FREQUENCY_MULTIPLIER 50
-#define GEN9_FREQ_SCALER 3
-
#define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
#define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8468ca9885fd..35950fa91406 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -229,8 +229,9 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args)
{
unsigned int needs_clflush;
- unsigned int idx, offset;
char __user *user_data;
+ unsigned long offset;
+ pgoff_t idx;
u64 remain;
int ret;
@@ -383,13 +384,17 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+ unsigned long remain, offset;
intel_wakeref_t wakeref;
struct drm_mm_node node;
void __user *user_data;
struct i915_vma *vma;
- u64 remain, offset;
int ret = 0;
+ if (overflows_type(args->size, remain) ||
+ overflows_type(args->offset, offset))
+ return -EINVAL;
+
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
vma = i915_gem_gtt_prepare(obj, &node, false);
@@ -540,13 +545,17 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
struct intel_runtime_pm *rpm = &i915->runtime_pm;
+ unsigned long remain, offset;
intel_wakeref_t wakeref;
struct drm_mm_node node;
struct i915_vma *vma;
- u64 remain, offset;
void __user *user_data;
int ret = 0;
+ if (overflows_type(args->size, remain) ||
+ overflows_type(args->offset, offset))
+ return -EINVAL;
+
if (i915_gem_object_has_struct_page(obj)) {
/*
* Avoid waking the device up if we can fallback, as
@@ -654,8 +663,9 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
{
unsigned int partial_cacheline_write;
unsigned int needs_clflush;
- unsigned int offset, idx;
void __user *user_data;
+ unsigned long offset;
+ pgoff_t idx;
u64 remain;
int ret;
@@ -1099,7 +1109,7 @@ void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
while (atomic_read(&i915->mm.free_count)) {
flush_work(&i915->mm.free_work);
- flush_delayed_work(&i915->bdev.wq);
+ drain_workqueue(i915->bdev.wq);
rcu_barrier();
}
}
@@ -1143,6 +1153,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
for_each_gt(gt, dev_priv, i) {
intel_uc_fetch_firmwares(&gt->uc);
intel_wopcm_init(&gt->wopcm);
+ if (GRAPHICS_VER(dev_priv) >= 8)
+ setup_private_pat(gt);
}
ret = i915_init_ggtt(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index a5cdf6662d01..82e9d289398c 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -39,6 +39,13 @@ struct i915_gem_ww_ctx;
struct i915_gtt_view;
struct i915_vma;
+#define I915_GEM_GPU_DOMAINS \
+ (I915_GEM_DOMAIN_RENDER | \
+ I915_GEM_DOMAIN_SAMPLER | \
+ I915_GEM_DOMAIN_COMMAND | \
+ I915_GEM_DOMAIN_INSTRUCTION | \
+ I915_GEM_DOMAIN_VERTEX)
+
void i915_gem_init_early(struct drm_i915_private *i915);
void i915_gem_cleanup_early(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index a4b4d9b7d26c..c02ebd6900ae 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -43,16 +43,25 @@ static bool dying_vma(struct i915_vma *vma)
return !kref_read(&vma->obj->base.refcount);
}
-static int ggtt_flush(struct intel_gt *gt)
+static int ggtt_flush(struct i915_address_space *vm)
{
- /*
- * Not everything in the GGTT is tracked via vma (otherwise we
- * could evict as required with minimal stalling) so we are forced
- * to idle the GPU and explicitly retire outstanding requests in
- * the hopes that we can then remove contexts and the like only
- * bound by their active reference.
- */
- return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct intel_gt *gt;
+ int ret = 0;
+
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
+ /*
+ * Not everything in the GGTT is tracked via vma (otherwise we
+ * could evict as required with minimal stalling) so we are forced
+ * to idle the GPU and explicitly retire outstanding requests in
+ * the hopes that we can then remove contexts and the like only
+ * bound by their active reference.
+ */
+ ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+ return ret;
}
static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
@@ -149,6 +158,7 @@ i915_gem_evict_something(struct i915_address_space *vm,
struct drm_mm_node *node;
enum drm_mm_insert_mode mode;
struct i915_vma *active;
+ struct intel_gt *gt;
int ret;
lockdep_assert_held(&vm->mutex);
@@ -174,7 +184,14 @@ i915_gem_evict_something(struct i915_address_space *vm,
min_size, alignment, color,
start, end, mode);
- intel_gt_retire_requests(vm->gt);
+ if (i915_is_ggtt(vm)) {
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+ intel_gt_retire_requests(gt);
+ } else {
+ intel_gt_retire_requests(vm->gt);
+ }
search_again:
active = NULL;
@@ -246,7 +263,7 @@ search_again:
if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
return -EBUSY;
- ret = ggtt_flush(vm->gt);
+ ret = ggtt_flush(vm);
if (ret)
return ret;
@@ -332,7 +349,15 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* a stray pin (preventing eviction) that can only be resolved by
* retiring.
*/
- intel_gt_retire_requests(vm->gt);
+ if (i915_is_ggtt(vm)) {
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ struct intel_gt *gt;
+
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+ intel_gt_retire_requests(gt);
+ } else {
+ intel_gt_retire_requests(vm->gt);
+ }
if (i915_vm_has_cache_coloring(vm)) {
/* Expand search to cover neighbouring guard pages (or lack!) */
@@ -444,7 +469,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww,
* switch otherwise is ineffective.
*/
if (i915_is_ggtt(vm)) {
- ret = ggtt_flush(vm->gt);
+ ret = ggtt_flush(vm);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 8c2f57eb5dda..3d77679bf211 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -18,6 +18,8 @@ struct drm_i915_gem_object;
struct i915_address_space;
struct i915_gem_ww_ctx;
+#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
+
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages);
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
@@ -44,7 +46,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
#define PIN_HIGH BIT_ULL(5)
#define PIN_OFFSET_BIAS BIT_ULL(6)
#define PIN_OFFSET_FIXED BIT_ULL(7)
-#define PIN_VALIDATE BIT_ULL(8) /* validate placement only, no need to call unpin() */
+#define PIN_OFFSET_GUARD BIT_ULL(8)
+#define PIN_VALIDATE BIT_ULL(9) /* validate placement only, no need to call unpin() */
#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9d5d5a397b64..904f21e1380c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1370,14 +1370,14 @@ static void engine_record_execlists(struct intel_engine_coredump *ee)
}
static bool record_context(struct i915_gem_context_coredump *e,
- const struct i915_request *rq)
+ struct intel_context *ce)
{
struct i915_gem_context *ctx;
struct task_struct *task;
bool simulated;
rcu_read_lock();
- ctx = rcu_dereference(rq->context->gem_context);
+ ctx = rcu_dereference(ce->gem_context);
if (ctx && !kref_get_unless_zero(&ctx->ref))
ctx = NULL;
rcu_read_unlock();
@@ -1396,8 +1396,8 @@ static bool record_context(struct i915_gem_context_coredump *e,
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
- e->total_runtime = intel_context_get_total_runtime_ns(rq->context);
- e->avg_runtime = intel_context_get_avg_runtime_ns(rq->context);
+ e->total_runtime = intel_context_get_total_runtime_ns(ce);
+ e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
simulated = i915_gem_context_no_error_capture(ctx);
@@ -1532,15 +1532,37 @@ intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp, u32 dump_
return ee;
}
+static struct intel_engine_capture_vma *
+engine_coredump_add_context(struct intel_engine_coredump *ee,
+ struct intel_context *ce,
+ gfp_t gfp)
+{
+ struct intel_engine_capture_vma *vma = NULL;
+
+ ee->simulated |= record_context(&ee->context, ce);
+ if (ee->simulated)
+ return NULL;
+
+ /*
+ * We need to copy these to an anonymous buffer
+ * as the simplest method to avoid being overwritten
+ * by userspace.
+ */
+ vma = capture_vma(vma, ce->ring->vma, "ring", gfp);
+ vma = capture_vma(vma, ce->state, "HW context", gfp);
+
+ return vma;
+}
+
struct intel_engine_capture_vma *
intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
struct i915_request *rq,
gfp_t gfp)
{
- struct intel_engine_capture_vma *vma = NULL;
+ struct intel_engine_capture_vma *vma;
- ee->simulated |= record_context(&ee->context, rq);
- if (ee->simulated)
+ vma = engine_coredump_add_context(ee, rq->context, gfp);
+ if (!vma)
return NULL;
/*
@@ -1550,8 +1572,6 @@ intel_engine_coredump_add_request(struct intel_engine_coredump *ee,
*/
vma = capture_vma_snapshot(vma, rq->batch_res, gfp, "batch");
vma = capture_user(vma, rq, gfp);
- vma = capture_vma(vma, rq->ring->vma, "ring", gfp);
- vma = capture_vma(vma, rq->context->state, "HW context", gfp);
ee->rq_head = rq->head;
ee->rq_post = rq->postfix;
@@ -1596,54 +1616,36 @@ capture_engine(struct intel_engine_cs *engine,
{
struct intel_engine_capture_vma *capture = NULL;
struct intel_engine_coredump *ee;
- struct intel_context *ce;
+ struct intel_context *ce = NULL;
struct i915_request *rq = NULL;
- unsigned long flags;
ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL, dump_flags);
if (!ee)
return NULL;
- ce = intel_engine_get_hung_context(engine);
- if (ce) {
- intel_engine_clear_hung_context(engine);
- rq = intel_context_find_active_request(ce);
- if (!rq || !i915_request_started(rq))
- goto no_request_capture;
- } else {
- /*
- * Getting here with GuC enabled means it is a forced error capture
- * with no actual hang. So, no need to attempt the execlist search.
- */
- if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
- spin_lock_irqsave(&engine->sched_engine->lock, flags);
- rq = intel_engine_execlist_find_hung_request(engine);
- spin_unlock_irqrestore(&engine->sched_engine->lock,
- flags);
- }
- }
- if (rq)
- rq = i915_request_get_rcu(rq);
+ intel_engine_get_hung_entity(engine, &ce, &rq);
+ if (rq && !i915_request_started(rq))
+ drm_info(&engine->gt->i915->drm, "Got hung context on %s with active request %lld:%lld [0x%04X] not yet started\n",
+ engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
- if (!rq)
- goto no_request_capture;
-
- capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
- if (!capture) {
+ if (rq) {
+ capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL);
i915_request_put(rq);
- goto no_request_capture;
+ } else if (ce) {
+ capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL);
}
- if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
- intel_guc_capture_get_matching_node(engine->gt, ee, ce);
- intel_engine_coredump_add_vma(ee, capture, compress);
- i915_request_put(rq);
+ if (capture) {
+ intel_engine_coredump_add_vma(ee, capture, compress);
- return ee;
+ if (dump_flags & CORE_DUMP_FLAG_IS_GUC_CAPTURE)
+ intel_guc_capture_get_matching_node(engine->gt, ee, ce);
+ } else {
+ kfree(ee);
+ ee = NULL;
+ }
-no_request_capture:
- kfree(ee);
- return NULL;
+ return ee;
}
static void
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index efc75cc2ffdb..56027ffbce51 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -94,7 +94,7 @@ struct intel_engine_coredump {
struct intel_instdone instdone;
/* GuC matched capture-lists info */
- struct intel_guc_state_capture *capture;
+ struct intel_guc_state_capture *guc_capture;
struct __guc_capture_parsed_output *guc_capture_node;
struct i915_gem_context_coredump {
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index c588a17f97e9..1225bc432f0d 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -293,6 +293,10 @@ static const struct hwmon_channel_info *hwm_gt_info[] = {
/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
static int hwm_pcode_read_i1(struct drm_i915_private *i915, u32 *uval)
{
+ /* Avoid ILLEGAL_SUBCOMMAND "mailbox access failed" warning in snb_pcode_read */
+ if (IS_DG1(i915) || IS_DG2(i915))
+ return -ENXIO;
+
return snb_pcode_read_p(&i915->uncore, PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_READ_I1, 0, uval);
}
@@ -355,6 +359,38 @@ hwm_power_is_visible(const struct hwm_drvdata *ddat, u32 attr, int chan)
}
}
+/*
+ * HW allows arbitrary PL1 limits to be set but silently clamps these values to
+ * "typical but not guaranteed" min/max values in rg.pkg_power_sku. Follow the
+ * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
+ * clamped values when read. Write/read I1 also follows the same pattern.
+ */
+static int
+hwm_power_max_read(struct hwm_drvdata *ddat, long *val)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+ intel_wakeref_t wakeref;
+ u64 r, min, max;
+
+ *val = hwm_field_read_and_scale(ddat,
+ hwmon->rg.pkg_rapl_limit,
+ PKG_PWR_LIM_1,
+ hwmon->scl_shift_power,
+ SF_POWER);
+
+ with_intel_runtime_pm(ddat->uncore->rpm, wakeref)
+ r = intel_uncore_read64(ddat->uncore, hwmon->rg.pkg_power_sku);
+ min = REG_FIELD_GET(PKG_MIN_PWR, r);
+ min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
+ max = REG_FIELD_GET(PKG_MAX_PWR, r);
+ max = mul_u64_u32_shr(max, SF_POWER, hwmon->scl_shift_power);
+
+ if (min && max)
+ *val = clamp_t(u64, *val, min, max);
+
+ return 0;
+}
+
static int
hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
{
@@ -364,12 +400,7 @@ hwm_power_read(struct hwm_drvdata *ddat, u32 attr, int chan, long *val)
switch (attr) {
case hwmon_power_max:
- *val = hwm_field_read_and_scale(ddat,
- hwmon->rg.pkg_rapl_limit,
- PKG_PWR_LIM_1,
- hwmon->scl_shift_power,
- SF_POWER);
- return 0;
+ return hwm_power_max_read(ddat, val);
case hwmon_power_rated_max:
*val = hwm_field_read_and_scale(ddat,
hwmon->rg.pkg_power_sku,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 91c533986041..240d5e198904 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -614,414 +614,6 @@ static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-/*
- * This timing diagram depicts the video signal in and
- * around the vertical blanking period.
- *
- * Assumptions about the fictitious mode used in this example:
- * vblank_start >= 3
- * vsync_start = vblank_start + 1
- * vsync_end = vblank_start + 2
- * vtotal = vblank_start + 3
- *
- * start of vblank:
- * latch double buffered registers
- * increment frame counter (ctg+)
- * generate start of vblank interrupt (gen4+)
- * |
- * | frame start:
- * | generate frame start interrupt (aka. vblank interrupt) (gmch)
- * | may be shifted forward 1-3 extra lines via PIPECONF
- * | |
- * | | start of vsync:
- * | | generate vsync interrupt
- * | | |
- * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
- * . \hs/ . \hs/ \hs/ \hs/ . \hs/
- * ----va---> <-----------------vb--------------------> <--------va-------------
- * | | <----vs-----> |
- * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
- * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
- * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
- * | | |
- * last visible pixel first visible pixel
- * | increment frame counter (gen3/4)
- * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
- *
- * x = horizontal active
- * _ = horizontal blanking
- * hs = horizontal sync
- * va = vertical active
- * vb = vertical blanking
- * vs = vertical sync
- * vbs = vblank_start (number)
- *
- * Summary:
- * - most events happen at the start of horizontal sync
- * - frame start happens at the start of horizontal blank, 1-4 lines
- * (depending on PIPECONF settings) after the start of vblank
- * - gen3/4 pixel and frame counter are synchronized with the start
- * of horizontal active on the first line of vertical active
- */
-
-/* Called from drm generic code, passed a 'crtc', which
- * we use as a pipe index
- */
-u32 i915_get_vblank_counter(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
- const struct drm_display_mode *mode = &vblank->hwmode;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
- i915_reg_t high_frame, low_frame;
- u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
- unsigned long irqflags;
-
- /*
- * On i965gm TV output the frame counter only works up to
- * the point when we enable the TV encoder. After that the
- * frame counter ceases to work and reads zero. We need a
- * vblank wait before enabling the TV encoder and so we
- * have to enable vblank interrupts while the frame counter
- * is still in a working state. However the core vblank code
- * does not like us returning non-zero frame counter values
- * when we've told it that we don't have a working frame
- * counter. Thus we must stop non-zero values leaking out.
- */
- if (!vblank->max_vblank_count)
- return 0;
-
- htotal = mode->crtc_htotal;
- hsync_start = mode->crtc_hsync_start;
- vbl_start = mode->crtc_vblank_start;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- vbl_start = DIV_ROUND_UP(vbl_start, 2);
-
- /* Convert to pixel count */
- vbl_start *= htotal;
-
- /* Start of vblank event occurs at start of hsync */
- vbl_start -= htotal - hsync_start;
-
- high_frame = PIPEFRAME(pipe);
- low_frame = PIPEFRAMEPIXEL(pipe);
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /*
- * High & low register fields aren't synchronized, so make sure
- * we get a low value that's stable across two reads of the high
- * register.
- */
- do {
- high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
- low = intel_de_read_fw(dev_priv, low_frame);
- high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
- } while (high1 != high2);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
- high1 >>= PIPE_FRAME_HIGH_SHIFT;
- pixel = low & PIPE_PIXEL_MASK;
- low >>= PIPE_FRAME_LOW_SHIFT;
-
- /*
- * The frame counter increments at beginning of active.
- * Cook up a vblank counter by also checking the pixel
- * counter against vblank start.
- */
- return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
-}
-
-u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
-
- if (!vblank->max_vblank_count)
- return 0;
-
- return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
-}
-
-static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct drm_vblank_crtc *vblank =
- &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
- const struct drm_display_mode *mode = &vblank->hwmode;
- u32 htotal = mode->crtc_htotal;
- u32 clock = mode->crtc_clock;
- u32 scan_prev_time, scan_curr_time, scan_post_time;
-
- /*
- * To avoid the race condition where we might cross into the
- * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
- * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
- * during the same frame.
- */
- do {
- /*
- * This field provides read back of the display
- * pipe frame time stamp. The time stamp value
- * is sampled at every start of vertical blank.
- */
- scan_prev_time = intel_de_read_fw(dev_priv,
- PIPE_FRMTMSTMP(crtc->pipe));
-
- /*
- * The TIMESTAMP_CTR register has the current
- * time stamp value.
- */
- scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
-
- scan_post_time = intel_de_read_fw(dev_priv,
- PIPE_FRMTMSTMP(crtc->pipe));
- } while (scan_post_time != scan_prev_time);
-
- return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
- clock), 1000 * htotal);
-}
-
-/*
- * On certain encoders on certain platforms, pipe
- * scanline register will not work to get the scanline,
- * since the timings are driven from the PORT or issues
- * with scanline register updates.
- * This function will use Framestamp and current
- * timestamp registers to calculate the scanline.
- */
-static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
-{
- struct drm_vblank_crtc *vblank =
- &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
- const struct drm_display_mode *mode = &vblank->hwmode;
- u32 vblank_start = mode->crtc_vblank_start;
- u32 vtotal = mode->crtc_vtotal;
- u32 scanline;
-
- scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
- scanline = min(scanline, vtotal - 1);
- scanline = (scanline + vblank_start) % vtotal;
-
- return scanline;
-}
-
-/*
- * intel_de_read_fw(), only for fast reads of display block, no need for
- * forcewake etc.
- */
-static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- const struct drm_display_mode *mode;
- struct drm_vblank_crtc *vblank;
- enum pipe pipe = crtc->pipe;
- int position, vtotal;
-
- if (!crtc->active)
- return 0;
-
- vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
- mode = &vblank->hwmode;
-
- if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
- return __intel_get_crtc_scanline_from_timestamp(crtc);
-
- vtotal = mode->crtc_vtotal;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- vtotal /= 2;
-
- position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
-
- /*
- * On HSW, the DSL reg (0x70000) appears to return 0 if we
- * read it just before the start of vblank. So try it again
- * so we don't accidentally end up spanning a vblank frame
- * increment, causing the pipe_update_end() code to squak at us.
- *
- * The nature of this problem means we can't simply check the ISR
- * bit and return the vblank start value; nor can we use the scanline
- * debug register in the transcoder as it appears to have the same
- * problem. We may need to extend this to include other platforms,
- * but so far testing only shows the problem on HSW.
- */
- if (HAS_DDI(dev_priv) && !position) {
- int i, temp;
-
- for (i = 0; i < 100; i++) {
- udelay(1);
- temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
- if (temp != position) {
- position = temp;
- break;
- }
- }
- }
-
- /*
- * See update_scanline_offset() for the details on the
- * scanline_offset adjustment.
- */
- return (position + crtc->scanline_offset) % vtotal;
-}
-
-static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
- bool in_vblank_irq,
- int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- struct drm_device *dev = _crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(_crtc);
- enum pipe pipe = crtc->pipe;
- int position;
- int vbl_start, vbl_end, hsync_start, htotal, vtotal;
- unsigned long irqflags;
- bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
- IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
- crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
-
- if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
- drm_dbg(&dev_priv->drm,
- "trying to get scanoutpos for disabled "
- "pipe %c\n", pipe_name(pipe));
- return false;
- }
-
- htotal = mode->crtc_htotal;
- hsync_start = mode->crtc_hsync_start;
- vtotal = mode->crtc_vtotal;
- vbl_start = mode->crtc_vblank_start;
- vbl_end = mode->crtc_vblank_end;
-
- if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vbl_start = DIV_ROUND_UP(vbl_start, 2);
- vbl_end /= 2;
- vtotal /= 2;
- }
-
- /*
- * Lock uncore.lock, as we will do multiple timing critical raw
- * register reads, potentially with preemption disabled, so the
- * following code must not block on uncore.lock.
- */
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
-
- /* Get optional system timestamp before query. */
- if (stime)
- *stime = ktime_get();
-
- if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
- int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
-
- position = __intel_get_crtc_scanline(crtc);
-
- /*
- * Already exiting vblank? If so, shift our position
- * so it looks like we're already apporaching the full
- * vblank end. This should make the generated timestamp
- * more or less match when the active portion will start.
- */
- if (position >= vbl_start && scanlines < position)
- position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
- } else if (use_scanline_counter) {
- /* No obvious pixelcount register. Only query vertical
- * scanout position from Display scan line register.
- */
- position = __intel_get_crtc_scanline(crtc);
- } else {
- /* Have access to pixelcount since start of frame.
- * We can split this into vertical and horizontal
- * scanout position.
- */
- position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
-
- /* convert to pixel counts */
- vbl_start *= htotal;
- vbl_end *= htotal;
- vtotal *= htotal;
-
- /*
- * In interlaced modes, the pixel counter counts all pixels,
- * so one field will have htotal more pixels. In order to avoid
- * the reported position from jumping backwards when the pixel
- * counter is beyond the length of the shorter field, just
- * clamp the position the length of the shorter field. This
- * matches how the scanline counter based position works since
- * the scanline counter doesn't count the two half lines.
- */
- if (position >= vtotal)
- position = vtotal - 1;
-
- /*
- * Start of vblank interrupt is triggered at start of hsync,
- * just prior to the first active line of vblank. However we
- * consider lines to start at the leading edge of horizontal
- * active. So, should we get here before we've crossed into
- * the horizontal active of the first line in vblank, we would
- * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
- * always add htotal-hsync_start to the current pixel position.
- */
- position = (position + htotal - hsync_start) % vtotal;
- }
-
- /* Get optional system timestamp after query. */
- if (etime)
- *etime = ktime_get();
-
- /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
- /*
- * While in vblank, position will be negative
- * counting up towards 0 at vbl_end. And outside
- * vblank, position will be positive counting
- * up since vbl_end.
- */
- if (position >= vbl_start)
- position -= vbl_end;
- else
- position += vtotal - vbl_end;
-
- if (use_scanline_counter) {
- *vpos = position;
- *hpos = 0;
- } else {
- *vpos = position / htotal;
- *hpos = position - (*vpos * htotal);
- }
-
- return true;
-}
-
-bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
- ktime_t *vblank_time, bool in_vblank_irq)
-{
- return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
- crtc, max_error, vblank_time, in_vblank_irq,
- i915_get_crtc_scanoutpos);
-}
-
-int intel_get_crtc_scanline(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- unsigned long irqflags;
- int position;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- position = __intel_get_crtc_scanline(crtc);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-
- return position;
-}
-
/**
* ivb_parity_work - Workqueue called when a parity error interrupt
* occurred.
@@ -2451,8 +2043,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
gen8_de_misc_irq_handler(dev_priv, iir);
} else {
- drm_err(&dev_priv->drm,
- "The master control interrupt lied (DE MISC)!\n");
+ drm_err_ratelimited(&dev_priv->drm,
+ "The master control interrupt lied (DE MISC)!\n");
}
}
@@ -2463,8 +2055,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
gen11_hpd_irq_handler(dev_priv, iir);
} else {
- drm_err(&dev_priv->drm,
- "The master control interrupt lied, (DE HPD)!\n");
+ drm_err_ratelimited(&dev_priv->drm,
+ "The master control interrupt lied, (DE HPD)!\n");
}
}
@@ -2513,12 +2105,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (!found)
- drm_err(&dev_priv->drm,
- "Unexpected DE Port interrupt\n");
+ drm_err_ratelimited(&dev_priv->drm,
+ "Unexpected DE Port interrupt\n");
}
else
- drm_err(&dev_priv->drm,
- "The master control interrupt lied (DE PORT)!\n");
+ drm_err_ratelimited(&dev_priv->drm,
+ "The master control interrupt lied (DE PORT)!\n");
}
for_each_pipe(dev_priv, pipe) {
@@ -2529,8 +2121,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
- drm_err(&dev_priv->drm,
- "The master control interrupt lied (DE PIPE)!\n");
+ drm_err_ratelimited(&dev_priv->drm,
+ "The master control interrupt lied (DE PIPE)!\n");
continue;
}
@@ -2551,10 +2143,10 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
if (fault_errors)
- drm_err(&dev_priv->drm,
- "Fault errors on pipe %c: 0x%08x\n",
- pipe_name(pipe),
- fault_errors);
+ drm_err_ratelimited(&dev_priv->drm,
+ "Fault errors on pipe %c: 0x%08x\n",
+ pipe_name(pipe),
+ fault_errors);
}
if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 9b004fc3444e..03ee4c8b1ed3 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -66,18 +66,12 @@ bool intel_irqs_enabled(struct drm_i915_private *dev_priv);
void intel_synchronize_irq(struct drm_i915_private *i915);
void intel_synchronize_hardirq(struct drm_i915_private *i915);
-int intel_get_crtc_scanline(struct intel_crtc *crtc);
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv);
-bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
- ktime_t *vblank_time, bool in_vblank_irq);
-
-u32 i915_get_vblank_counter(struct drm_crtc *crtc);
-u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
int i8xx_enable_vblank(struct drm_crtc *crtc);
int i915gm_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index d1e4d528cb17..ade744cccfea 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -122,7 +122,7 @@ i915_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
"Default: 0");
i915_param_named_unsafe(force_probe, charp, 0400,
- "Force probe the driver for specified devices. "
+ "Force probe options for specified supported devices. "
"See CONFIG_DRM_I915_FORCE_PROBE for details.");
i915_param_named_unsafe(disable_power_well, int, 0400,
@@ -192,6 +192,9 @@ i915_param_named_unsafe(huc_firmware_path, charp, 0400,
i915_param_named_unsafe(dmc_firmware_path, charp, 0400,
"DMC firmware path to use instead of the default one");
+i915_param_named_unsafe(gsc_firmware_path, charp, 0400,
+ "GSC firmware path to use instead of the default one");
+
i915_param_named_unsafe(enable_dp_mst, bool, 0400,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
@@ -219,27 +222,44 @@ i915_param_named_unsafe(lmem_size, uint, 0400,
i915_param_named_unsafe(lmem_bar_size, uint, 0400,
"Set the lmem bar size(in MiB).");
-static __always_inline void _print_param(struct drm_printer *p,
- const char *name,
- const char *type,
- const void *x)
+static void _param_print_bool(struct drm_printer *p, const char *name,
+ bool val)
+{
+ drm_printf(p, "i915.%s=%s\n", name, str_yes_no(val));
+}
+
+static void _param_print_int(struct drm_printer *p, const char *name,
+ int val)
+{
+ drm_printf(p, "i915.%s=%d\n", name, val);
+}
+
+static void _param_print_uint(struct drm_printer *p, const char *name,
+ unsigned int val)
+{
+ drm_printf(p, "i915.%s=%u\n", name, val);
+}
+
+static void _param_print_ulong(struct drm_printer *p, const char *name,
+ unsigned long val)
{
- if (!__builtin_strcmp(type, "bool"))
- drm_printf(p, "i915.%s=%s\n", name,
- str_yes_no(*(const bool *)x));
- else if (!__builtin_strcmp(type, "int"))
- drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
- else if (!__builtin_strcmp(type, "unsigned int"))
- drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
- else if (!__builtin_strcmp(type, "unsigned long"))
- drm_printf(p, "i915.%s=%lu\n", name, *(const unsigned long *)x);
- else if (!__builtin_strcmp(type, "char *"))
- drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
- else
- WARN_ONCE(1, "no printer defined for param type %s (i915.%s)\n",
- type, name);
+ drm_printf(p, "i915.%s=%lu\n", name, val);
}
+static void _param_print_charp(struct drm_printer *p, const char *name,
+ const char *val)
+{
+ drm_printf(p, "i915.%s=%s\n", name, val);
+}
+
+#define _param_print(p, name, val) \
+ _Generic(val, \
+ bool: _param_print_bool, \
+ int: _param_print_int, \
+ unsigned int: _param_print_uint, \
+ unsigned long: _param_print_ulong, \
+ char *: _param_print_charp)(p, name, val)
+
/**
* i915_params_dump - dump i915 modparams
* @params: i915 modparams
@@ -249,37 +269,48 @@ static __always_inline void _print_param(struct drm_printer *p,
*/
void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
{
-#define PRINT(T, x, ...) _print_param(p, #x, #T, &params->x);
+#define PRINT(T, x, ...) _param_print(p, #x, params->x);
I915_PARAMS_FOR_EACH(PRINT);
#undef PRINT
}
-static __always_inline void dup_param(const char *type, void *x)
+static void _param_dup_charp(char **valp)
{
- if (!__builtin_strcmp(type, "char *"))
- *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
+ *valp = kstrdup(*valp, GFP_ATOMIC);
}
+static void _param_nop(void *valp)
+{
+}
+
+#define _param_dup(valp) \
+ _Generic(valp, \
+ char **: _param_dup_charp, \
+ default: _param_nop)(valp)
+
void i915_params_copy(struct i915_params *dest, const struct i915_params *src)
{
*dest = *src;
-#define DUP(T, x, ...) dup_param(#T, &dest->x);
+#define DUP(T, x, ...) _param_dup(&dest->x);
I915_PARAMS_FOR_EACH(DUP);
#undef DUP
}
-static __always_inline void free_param(const char *type, void *x)
+static void _param_free_charp(char **valp)
{
- if (!__builtin_strcmp(type, "char *")) {
- kfree(*(void **)x);
- *(void **)x = NULL;
- }
+ kfree(*valp);
+ *valp = NULL;
}
+#define _param_free(valp) \
+ _Generic(valp, \
+ char **: _param_free_charp, \
+ default: _param_nop)(valp)
+
/* free the allocated members, *not* the passed in params itself */
void i915_params_free(struct i915_params *params)
{
-#define FREE(T, x, ...) free_param(#T, &params->x);
+#define FREE(T, x, ...) _param_free(&params->x);
I915_PARAMS_FOR_EACH(FREE);
#undef FREE
}
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 2733cb6cfe09..3f51f90145b6 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -64,6 +64,7 @@ struct drm_printer;
param(char *, guc_firmware_path, NULL, 0400) \
param(char *, huc_firmware_path, NULL, 0400) \
param(char *, dmc_firmware_path, NULL, 0400) \
+ param(char *, gsc_firmware_path, NULL, 0400) \
param(bool, memtest, false, 0400) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
param(int, edp_vswing, 0, 0400) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 4fada7ebe8d8..a8d942b16223 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,6 +26,7 @@
#include <drm/drm_drv.h>
#include <drm/i915_pciids.h>
+#include "display/intel_display.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_sa_media.h"
@@ -132,9 +133,9 @@
[PIPE_D] = TGL_CURSOR_D_OFFSET, \
}
-#define I9XX_COLORS \
+#define I845_COLORS \
.display.color = { .gamma_lut_size = 256 }
-#define I965_COLORS \
+#define I9XX_COLORS \
.display.color = { .gamma_lut_size = 129, \
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
@@ -210,7 +211,7 @@
.dma_mask_size = 32, \
I845_PIPE_OFFSETS, \
I845_CURSOR_OFFSETS, \
- I9XX_COLORS, \
+ I845_COLORS, \
GEN_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
@@ -341,7 +342,7 @@ static const struct intel_device_info pnv_m_info = {
.dma_mask_size = 36, \
I9XX_PIPE_OFFSETS, \
I9XX_CURSOR_OFFSETS, \
- I965_COLORS, \
+ I9XX_COLORS, \
GEN_DEFAULT_PAGE_SIZES, \
GEN_DEFAULT_REGIONS
@@ -548,7 +549,7 @@ static const struct intel_device_info vlv_info = {
.display.mmio_offset = VLV_DISPLAY_BASE,
I9XX_PIPE_OFFSETS,
I9XX_CURSOR_OFFSETS,
- I965_COLORS,
+ I9XX_COLORS,
GEN_DEFAULT_PAGE_SIZES,
GEN_DEFAULT_REGIONS,
};
@@ -890,7 +891,7 @@ static const struct intel_device_info jsl_info = {
TGL_CURSOR_OFFSETS, \
.has_global_mocs = 1, \
.has_pxp = 1, \
- .display.has_dsb = 0 /* FIXME: LUT load is broken with DSB */
+ .display.has_dsb = 1
static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
@@ -949,7 +950,7 @@ static const struct intel_device_info adl_s_info = {
#define XE_LPD_FEATURES \
.display.abox_mask = GENMASK(1, 0), \
.display.color = { \
- .degamma_lut_size = 128, .gamma_lut_size = 1024, \
+ .degamma_lut_size = 129, .gamma_lut_size = 1024, \
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
DRM_COLOR_LUT_EQUAL_CHANNELS, \
}, \
@@ -1018,6 +1019,7 @@ static const struct intel_device_info adl_p_info = {
.has_3d_pipeline = 1, \
.has_64bit_reloc = 1, \
.has_flat_ccs = 1, \
+ .has_4tile = 1, \
.has_global_mocs = 1, \
.has_gt_uc = 1, \
.has_llc = 1, \
@@ -1062,7 +1064,6 @@ static const struct intel_device_info xehpsdv_info = {
.__runtime.graphics.ip.rel = 55, \
.__runtime.media.ip.rel = 55, \
PLATFORM(INTEL_DG2), \
- .has_4tile = 1, \
.has_64k_pages = 1, \
.has_guc_deprivilege = 1, \
.has_heci_pxp = 1, \
@@ -1118,6 +1119,7 @@ static const struct intel_device_info pvc_info = {
XE_LPD_FEATURES, \
.__runtime.display.ip.ver = 14, \
.display.has_cdclk_crawl = 1, \
+ .display.has_cdclk_squash = 1, \
.__runtime.fbc_mask = BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B)
static const struct intel_gt_definition xelpmp_extra_gt[] = {
@@ -1125,7 +1127,7 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = {
.type = GT_MEDIA,
.name = "Standalone Media GT",
.gsi_offset = MTL_MEDIA_GSI_BASE,
- .engine_mask = BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+ .engine_mask = BIT(VECS0) | BIT(VCS0) | BIT(VCS2) | BIT(GSC0),
},
{}
};
@@ -1253,7 +1255,7 @@ static void i915_pci_remove(struct pci_dev *pdev)
}
/* is device_id present in comma separated list of ids */
-static bool force_probe(u16 device_id, const char *devices)
+static bool device_id_in_list(u16 device_id, const char *devices, bool negative)
{
char *s, *p, *tok;
bool ret;
@@ -1262,7 +1264,9 @@ static bool force_probe(u16 device_id, const char *devices)
return false;
/* match everything */
- if (strcmp(devices, "*") == 0)
+ if (negative && strcmp(devices, "!*") == 0)
+ return true;
+ if (!negative && strcmp(devices, "*") == 0)
return true;
s = kstrdup(devices, GFP_KERNEL);
@@ -1272,6 +1276,12 @@ static bool force_probe(u16 device_id, const char *devices)
for (p = s, ret = false; (tok = strsep(&p, ",")) != NULL; ) {
u16 val;
+ if (negative && tok[0] == '!')
+ tok++;
+ else if ((negative && tok[0] != '!') ||
+ (!negative && tok[0] == '!'))
+ continue;
+
if (kstrtou16(tok, 16, &val) == 0 && val == device_id) {
ret = true;
break;
@@ -1283,6 +1293,16 @@ static bool force_probe(u16 device_id, const char *devices)
return ret;
}
+static bool id_forced(u16 device_id)
+{
+ return device_id_in_list(device_id, i915_modparams.force_probe, false);
+}
+
+static bool id_blocked(u16 device_id)
+{
+ return device_id_in_list(device_id, i915_modparams.force_probe, true);
+}
+
bool i915_pci_resource_valid(struct pci_dev *pdev, int bar)
{
if (!pci_resource_flags(pdev, bar))
@@ -1308,10 +1328,9 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(struct intel_device_info *) ent->driver_data;
int err;
- if (intel_info->require_force_probe &&
- !force_probe(pdev->device, i915_modparams.force_probe)) {
+ if (intel_info->require_force_probe && !id_forced(pdev->device)) {
dev_info(&pdev->dev,
- "Your graphics device %04x is not properly supported by the driver in this\n"
+ "Your graphics device %04x is not properly supported by i915 in this\n"
"kernel version. To force driver probe anyway, use i915.force_probe=%04x\n"
"module parameter or CONFIG_DRM_I915_FORCE_PROBE=%04x configuration option,\n"
"or (recommended) check for kernel updates.\n",
@@ -1319,6 +1338,12 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
+ if (id_blocked(pdev->device)) {
+ dev_info(&pdev->dev, "I915 probe blocked for Device ID %04x.\n",
+ pdev->device);
+ return -ENODEV;
+ }
+
/* Only bind to function 0 of the device. Early generations
* used function 1 as a placeholder for multi-head. This causes
* us confusion instead, especially on the systems where both
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 125b6ca25a75..824a34ec0b83 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1846,8 +1846,7 @@ static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
for (d = 0; d < dword_count; d++) {
*cs++ = cmd;
*cs++ = i915_mmio_reg_offset(reg) + 4 * d;
- *cs++ = intel_gt_scratch_offset(stream->engine->gt,
- offset) + 4 * d;
+ *cs++ = i915_ggtt_offset(stream->noa_wait) + offset + 4 * d;
*cs++ = 0;
}
@@ -1880,7 +1879,13 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
MI_PREDICATE_RESULT_2_ENGINE(base) :
MI_PREDICATE_RESULT_1(RENDER_RING_BASE);
- bo = i915_gem_object_create_internal(i915, 4096);
+ /*
+ * gt->scratch was being used to save/restore the GPR registers, but on
+ * MTL the scratch uses stolen lmem. An MI_SRM to this memory region
+ * causes an engine hang. Instead allocate an additional page here to
+ * save/restore GPR registers
+ */
+ bo = i915_gem_object_create_internal(i915, 8192);
if (IS_ERR(bo)) {
drm_err(&i915->drm,
"Failed to allocate NOA wait batchbuffer\n");
@@ -1914,14 +1919,19 @@ retry:
goto err_unpin;
}
+ stream->noa_wait = vma;
+
+#define GPR_SAVE_OFFSET 4096
+#define PREDICATE_SAVE_OFFSET 4160
+
/* Save registers. */
for (i = 0; i < N_CS_GPR; i++)
cs = save_restore_register(
stream, cs, true /* save */, CS_GPR(i),
- INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
+ GPR_SAVE_OFFSET + 8 * i, 2);
cs = save_restore_register(
stream, cs, true /* save */, mi_predicate_result,
- INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
+ PREDICATE_SAVE_OFFSET, 1);
/* First timestamp snapshot location. */
ts0 = cs;
@@ -2037,10 +2047,10 @@ retry:
for (i = 0; i < N_CS_GPR; i++)
cs = save_restore_register(
stream, cs, false /* restore */, CS_GPR(i),
- INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2);
+ GPR_SAVE_OFFSET + 8 * i, 2);
cs = save_restore_register(
stream, cs, false /* restore */, mi_predicate_result,
- INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1);
+ PREDICATE_SAVE_OFFSET, 1);
/* And return to the ring. */
*cs++ = MI_BATCH_BUFFER_END;
@@ -2050,7 +2060,6 @@ retry:
i915_gem_object_flush_map(bo);
__i915_gem_object_release_map(bo);
- stream->noa_wait = vma;
goto out_ww;
err_unpin:
@@ -2263,7 +2272,7 @@ retry:
goto err_add_request;
err = rq->engine->emit_bb_start(rq,
- vma->node.start, 0,
+ i915_vma_offset(vma), 0,
I915_DISPATCH_SECURE);
if (err)
goto err_add_request;
@@ -3131,8 +3140,11 @@ get_sseu_config(struct intel_sseu *out_sseu,
*/
u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
{
- /* Wa_18013179988:dg2 */
- if (IS_DG2(i915)) {
+ /*
+ * Wa_18013179988:dg2
+ * Wa_14015846243:mtl
+ */
+ if (IS_DG2(i915) || IS_METEORLAKE(i915)) {
intel_wakeref_t wakeref;
u32 reg, shift;
@@ -4310,6 +4322,17 @@ static const struct i915_range gen12_oa_mux_regs[] = {
{}
};
+/*
+ * Ref: 14010536224:
+ * 0x20cc is repurposed on MTL, so use a separate array for MTL.
+ */
+static const struct i915_range mtl_oa_mux_regs[] = {
+ { .start = 0x0d00, .end = 0x0d04 }, /* RPM_CONFIG[0-1] */
+ { .start = 0x0d0c, .end = 0x0d2c }, /* NOA_CONFIG[0-8] */
+ { .start = 0x9840, .end = 0x9840 }, /* GDT_CHICKEN_BITS */
+ { .start = 0x9884, .end = 0x9888 }, /* NOA_WRITE */
+};
+
static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
{
return reg_in_range_table(addr, gen7_oa_b_counters);
@@ -4353,7 +4376,10 @@ static bool xehp_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr)
static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
{
- return reg_in_range_table(addr, gen12_oa_mux_regs);
+ if (IS_METEORLAKE(perf->i915))
+ return reg_in_range_table(addr, mtl_oa_mux_regs);
+ else
+ return reg_in_range_table(addr, gen12_oa_mux_regs);
}
static u32 mask_reg_value(u32 reg, u32 val)
@@ -4750,6 +4776,7 @@ static void oa_init_supported_formats(struct i915_perf *perf)
break;
case INTEL_DG2:
+ case INTEL_METEORLAKE:
oa_format_add(perf, I915_OAR_FORMAT_A32u40_A4u32_B8_C8);
oa_format_add(perf, I915_OA_FORMAT_A24u40_A14u32_B8_C8);
break;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 916176872544..3b2642397b82 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -118,6 +118,9 @@
#define GU_CNTL _MMIO(0x101010)
#define LMEM_INIT REG_BIT(7)
+#define DRIVERFLR REG_BIT(31)
+#define GU_DEBUG _MMIO(0x101018)
+#define DRIVERFLR_STATUS REG_BIT(31)
#define GEN6_STOLEN_RESERVED _MMIO(0x1082C0)
#define GEN6_STOLEN_RESERVED_ADDR_MASK (0xFFF << 20)
@@ -1713,6 +1716,20 @@
#define PALETTE_RED_MASK REG_GENMASK(23, 16)
#define PALETTE_GREEN_MASK REG_GENMASK(15, 8)
#define PALETTE_BLUE_MASK REG_GENMASK(7, 0)
+/* pre-i965 10bit interpolated mode ldw */
+#define PALETTE_10BIT_RED_LDW_MASK REG_GENMASK(23, 16)
+#define PALETTE_10BIT_GREEN_LDW_MASK REG_GENMASK(15, 8)
+#define PALETTE_10BIT_BLUE_LDW_MASK REG_GENMASK(7, 0)
+/* pre-i965 10bit interpolated mode udw */
+#define PALETTE_10BIT_RED_EXP_MASK REG_GENMASK(23, 22)
+#define PALETTE_10BIT_RED_MANT_MASK REG_GENMASK(21, 18)
+#define PALETTE_10BIT_RED_UDW_MASK REG_GENMASK(17, 16)
+#define PALETTE_10BIT_GREEN_EXP_MASK REG_GENMASK(15, 14)
+#define PALETTE_10BIT_GREEN_MANT_MASK REG_GENMASK(13, 10)
+#define PALETTE_10BIT_GREEN_UDW_MASK REG_GENMASK(9, 8)
+#define PALETTE_10BIT_BLUE_EXP_MASK REG_GENMASK(7, 6)
+#define PALETTE_10BIT_BLUE_MANT_MASK REG_GENMASK(5, 2)
+#define PALETTE_10BIT_BLUE_UDW_MASK REG_GENMASK(1, 0)
#define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \
_PICK((pipe), _PALETTE_A, \
_PALETTE_B, _CHV_PALETTE_C) + \
@@ -2575,46 +2592,6 @@
#define SDVO_PIPE_SEL_MASK_CHV (3 << 24)
#define SDVO_PIPE_SEL_CHV(pipe) ((pipe) << 24)
-
-/* DVO port control */
-#define _DVOA 0x61120
-#define DVOA _MMIO(_DVOA)
-#define _DVOB 0x61140
-#define DVOB _MMIO(_DVOB)
-#define _DVOC 0x61160
-#define DVOC _MMIO(_DVOC)
-#define DVO_ENABLE (1 << 31)
-#define DVO_PIPE_SEL_SHIFT 30
-#define DVO_PIPE_SEL_MASK (1 << 30)
-#define DVO_PIPE_SEL(pipe) ((pipe) << 30)
-#define DVO_PIPE_STALL_UNUSED (0 << 28)
-#define DVO_PIPE_STALL (1 << 28)
-#define DVO_PIPE_STALL_TV (2 << 28)
-#define DVO_PIPE_STALL_MASK (3 << 28)
-#define DVO_USE_VGA_SYNC (1 << 15)
-#define DVO_DATA_ORDER_I740 (0 << 14)
-#define DVO_DATA_ORDER_FP (1 << 14)
-#define DVO_VSYNC_DISABLE (1 << 11)
-#define DVO_HSYNC_DISABLE (1 << 10)
-#define DVO_VSYNC_TRISTATE (1 << 9)
-#define DVO_HSYNC_TRISTATE (1 << 8)
-#define DVO_BORDER_ENABLE (1 << 7)
-#define DVO_DATA_ORDER_GBRG (1 << 6)
-#define DVO_DATA_ORDER_RGGB (0 << 6)
-#define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6)
-#define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6)
-#define DVO_VSYNC_ACTIVE_HIGH (1 << 4)
-#define DVO_HSYNC_ACTIVE_HIGH (1 << 3)
-#define DVO_BLANK_ACTIVE_HIGH (1 << 2)
-#define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */
-#define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */
-#define DVO_PRESERVE_MASK (0x7 << 24)
-#define DVOA_SRCDIM _MMIO(0x61124)
-#define DVOB_SRCDIM _MMIO(0x61144)
-#define DVOC_SRCDIM _MMIO(0x61164)
-#define DVO_SRCDIM_HORIZONTAL_SHIFT 12
-#define DVO_SRCDIM_VERTICAL_SHIFT 0
-
/* LVDS port control */
#define LVDS _MMIO(0x61180)
/*
@@ -3646,7 +3623,7 @@
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
-#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
+#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4) /* u1.16 */
#define _PIPE_ARB_CTL_A 0x70028 /* icl+ */
#define PIPE_ARB_CTL(pipe) _MMIO_PIPE2(pipe, _PIPE_ARB_CTL_A)
@@ -5330,19 +5307,20 @@
#define _PREC_PIPEAGCMAX 0x4d000
#define _PREC_PIPEBGCMAX 0x4d010
-#define PREC_PIPEGCMAX(pipe, i) _MMIO(_PIPE(pipe, _PIPEAGCMAX, _PIPEBGCMAX) + (i) * 4)
+#define PREC_PIPEGCMAX(pipe, i) _MMIO(_PIPE(pipe, _PIPEAGCMAX, _PIPEBGCMAX) + (i) * 4) /* u1.16 */
#define _GAMMA_MODE_A 0x4a480
#define _GAMMA_MODE_B 0x4ac80
#define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
-#define PRE_CSC_GAMMA_ENABLE (1 << 31)
-#define POST_CSC_GAMMA_ENABLE (1 << 30)
-#define GAMMA_MODE_MODE_MASK (3 << 0)
-#define GAMMA_MODE_MODE_8BIT (0 << 0)
-#define GAMMA_MODE_MODE_10BIT (1 << 0)
-#define GAMMA_MODE_MODE_12BIT (2 << 0)
-#define GAMMA_MODE_MODE_SPLIT (3 << 0) /* ivb-bdw */
-#define GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED (3 << 0) /* icl + */
+#define PRE_CSC_GAMMA_ENABLE REG_BIT(31) /* icl+ */
+#define POST_CSC_GAMMA_ENABLE REG_BIT(30) /* icl+ */
+#define PALETTE_ANTICOL_DISABLE REG_BIT(15) /* skl+ */
+#define GAMMA_MODE_MODE_MASK REG_GENMASK(1, 0)
+#define GAMMA_MODE_MODE_8BIT REG_FIELD_PREP(GAMMA_MODE_MODE_MASK, 0)
+#define GAMMA_MODE_MODE_10BIT REG_FIELD_PREP(GAMMA_MODE_MODE_MASK, 1)
+#define GAMMA_MODE_MODE_12BIT REG_FIELD_PREP(GAMMA_MODE_MODE_MASK, 2)
+#define GAMMA_MODE_MODE_SPLIT REG_FIELD_PREP(GAMMA_MODE_MODE_MASK, 3) /* ivb-bdw */
+#define GAMMA_MODE_MODE_12BIT_MULTI_SEG REG_FIELD_PREP(GAMMA_MODE_MODE_MASK, 3) /* icl-tgl */
/* Display Internal Timeout Register */
#define RM_TIMEOUT _MMIO(0x42060)
@@ -5759,6 +5737,7 @@
#define RESET_PCH_HANDSHAKE_ENABLE REG_BIT(4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
#define LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
#define LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
@@ -6269,6 +6248,7 @@
#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
#define SBCLK_RUN_REFCLK_DIS (1 << 7)
+#define ICP_SECOND_PPS_IO_SELECT REG_BIT(2)
#define SPT_PWM_GRANULARITY (1 << 0)
#define SOUTH_CHICKEN2 _MMIO(0xc2004)
#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13)
@@ -7557,11 +7537,10 @@ enum skl_power_gate {
#define _PAL_PREC_INDEX_A 0x4A400
#define _PAL_PREC_INDEX_B 0x4AC00
#define _PAL_PREC_INDEX_C 0x4B400
-#define PAL_PREC_10_12_BIT (0 << 31)
-#define PAL_PREC_SPLIT_MODE (1 << 31)
-#define PAL_PREC_AUTO_INCREMENT (1 << 15)
-#define PAL_PREC_INDEX_VALUE_MASK (0x3ff << 0)
-#define PAL_PREC_INDEX_VALUE(x) ((x) << 0)
+#define PAL_PREC_SPLIT_MODE REG_BIT(31)
+#define PAL_PREC_AUTO_INCREMENT REG_BIT(15)
+#define PAL_PREC_INDEX_VALUE_MASK REG_GENMASK(9, 0)
+#define PAL_PREC_INDEX_VALUE(x) REG_FIELD_PREP(PAL_PREC_INDEX_VALUE_MASK, (x))
#define _PAL_PREC_DATA_A 0x4A404
#define _PAL_PREC_DATA_B 0x4AC04
#define _PAL_PREC_DATA_C 0x4B404
@@ -7578,14 +7557,16 @@ enum skl_power_gate {
#define PREC_PAL_INDEX(pipe) _MMIO_PIPE(pipe, _PAL_PREC_INDEX_A, _PAL_PREC_INDEX_B)
#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
-#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
-#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
-#define PREC_PAL_EXT2_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT2_GC_MAX_A, _PAL_PREC_EXT2_GC_MAX_B) + (i) * 4)
+#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4) /* u1.16 */
+#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4) /* u3.16 */
+#define PREC_PAL_EXT2_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT2_GC_MAX_A, _PAL_PREC_EXT2_GC_MAX_B) + (i) * 4) /* glk+, u3.16 */
#define _PRE_CSC_GAMC_INDEX_A 0x4A484
#define _PRE_CSC_GAMC_INDEX_B 0x4AC84
#define _PRE_CSC_GAMC_INDEX_C 0x4B484
-#define PRE_CSC_GAMC_AUTO_INCREMENT (1 << 10)
+#define PRE_CSC_GAMC_AUTO_INCREMENT REG_BIT(10)
+#define PRE_CSC_GAMC_INDEX_VALUE_MASK REG_GENMASK(7, 0)
+#define PRE_CSC_GAMC_INDEX_VALUE(x) REG_FIELD_PREP(PRE_CSC_GAMC_INDEX_VALUE_MASK, (x))
#define _PRE_CSC_GAMC_DATA_A 0x4A488
#define _PRE_CSC_GAMC_DATA_B 0x4AC88
#define _PRE_CSC_GAMC_DATA_C 0x4B488
@@ -7596,8 +7577,9 @@ enum skl_power_gate {
/* ICL Multi segmented gamma */
#define _PAL_PREC_MULTI_SEG_INDEX_A 0x4A408
#define _PAL_PREC_MULTI_SEG_INDEX_B 0x4AC08
-#define PAL_PREC_MULTI_SEGMENT_AUTO_INCREMENT REG_BIT(15)
-#define PAL_PREC_MULTI_SEGMENT_INDEX_VALUE_MASK REG_GENMASK(4, 0)
+#define PAL_PREC_MULTI_SEG_AUTO_INCREMENT REG_BIT(15)
+#define PAL_PREC_MULTI_SEG_INDEX_VALUE_MASK REG_GENMASK(4, 0)
+#define PAL_PREC_MULTI_SEG_INDEX_VALUE(x) REG_FIELD_PREP(PAL_PREC_MULTI_SEG_INDEX_VALUE_MASK, (x))
#define _PAL_PREC_MULTI_SEG_DATA_A 0x4A40C
#define _PAL_PREC_MULTI_SEG_DATA_B 0x4AC0C
@@ -8124,7 +8106,7 @@ enum skl_power_gate {
#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
#define DSB_ENABLE (1 << 31)
-#define DSB_STATUS (1 << 0)
+#define DSB_STATUS_BUSY (1 << 0)
#define CLKREQ_POLICY _MMIO(0x101038)
#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
@@ -8132,10 +8114,6 @@ enum skl_power_gate {
#define CLKGATE_DIS_MISC _MMIO(0x46534)
#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
-#define GEN12_CULLBIT1 _MMIO(0x6100)
-#define GEN12_CULLBIT2 _MMIO(0x7030)
-#define GEN12_STATE_ACK_DEBUG _MMIO(0x20BC)
-
#define _MTL_CLKGATE_DIS_TRANS_A 0x604E8
#define _MTL_CLKGATE_DIS_TRANS_B 0x614E8
#define MTL_CLKGATE_DIS_TRANS(trans) _MMIO_TRANS2(trans, _MTL_CLKGATE_DIS_TRANS_A)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f949a9495758..7503dcb9043b 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -43,6 +43,7 @@
#include "gt/intel_rps.h"
#include "i915_active.h"
+#include "i915_config.h"
#include "i915_deps.h"
#include "i915_driver.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c
index 114e5e39aa72..7c7a86921b1c 100644
--- a/drivers/gpu/drm/i915/i915_scatterlist.c
+++ b/drivers/gpu/drm/i915/i915_scatterlist.c
@@ -96,6 +96,13 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
st = &rsgt->table;
+ /* restricted by sg_alloc_table */
+ if (WARN_ON(overflows_type(DIV_ROUND_UP_ULL(node->size, segment_pages),
+ unsigned int))) {
+ i915_refct_sgt_put(rsgt);
+ return ERR_PTR(-E2BIG);
+ }
+
if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
GFP_KERNEL)) {
i915_refct_sgt_put(rsgt);
@@ -177,6 +184,12 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
i915_refct_sgt_init(rsgt, size);
st = &rsgt->table;
+ /* restricted by sg_alloc_table */
+ if (WARN_ON(overflows_type(PFN_UP(res->size), unsigned int))) {
+ i915_refct_sgt_put(rsgt);
+ return ERR_PTR(-E2BIG);
+ }
+
if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) {
i915_refct_sgt_put(rsgt);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 7e611476c7a4..a72698a2dbc8 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -5,8 +5,8 @@
#include <linux/slab.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/drm_buddy.h>
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 67a66d4d5c70..2c430c0c3bad 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -145,8 +145,6 @@ bool i915_error_injected(void);
#define page_pack_bits(ptr, bits) ptr_pack_bits(ptr, bits, PAGE_SHIFT)
#define page_unpack_bits(ptr, bits) ptr_unpack_bits(ptr, bits, PAGE_SHIFT)
-#define struct_member(T, member) (((T *)0)->member)
-
#define fetch_and_zero(ptr) ({ \
typeof(*ptr) __T = *(ptr); \
*(ptr) = (typeof(*ptr))0; \
@@ -166,7 +164,7 @@ static __always_inline ptrdiff_t ptrdiff(const void *a, const void *b)
*/
#define container_of_user(ptr, type, member) ({ \
void __user *__mptr = (void __user *)(ptr); \
- BUILD_BUG_ON_MSG(!__same_type(*(ptr), struct_member(type, member)) && \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), typeof_member(type, member)) && \
!__same_type(*(ptr), void), \
"pointer type mismatch in container_of()"); \
((type __user *)(__mptr - offsetof(type, member))); })
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 135390d975b6..f51fd9fd4c89 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -26,6 +26,7 @@
#include <linux/dma-fence-array.h>
#include <drm/drm_gem.h>
+#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_tiling.h"
@@ -418,8 +419,8 @@ i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
obj->mm.rsgt, i915_gem_object_is_readonly(obj),
i915_gem_object_is_lmem(obj), obj->mm.region,
- vma->ops, vma->private, vma->node.start,
- vma->node.size, vma->size);
+ vma->ops, vma->private, __i915_vma_offset(vma),
+ __i915_vma_size(vma), vma->size, vma->guard);
}
/**
@@ -447,7 +448,7 @@ int i915_vma_bind(struct i915_vma *vma,
lockdep_assert_held(&vma->vm->mutex);
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(vma->size > vma->node.size);
+ GEM_BUG_ON(vma->size > i915_vma_size(vma));
if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
vma->node.size,
@@ -569,8 +570,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
vma->obj->base.size);
} else if (i915_vma_is_map_and_fenceable(vma)) {
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
- vma->node.start,
- vma->node.size);
+ i915_vma_offset(vma),
+ i915_vma_size(vma));
} else {
ptr = (void __iomem *)
i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
@@ -659,22 +660,26 @@ bool i915_vma_misplaced(const struct i915_vma *vma,
if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
return true;
- if (vma->node.size < size)
+ if (i915_vma_size(vma) < size)
return true;
GEM_BUG_ON(alignment && !is_power_of_2(alignment));
- if (alignment && !IS_ALIGNED(vma->node.start, alignment))
+ if (alignment && !IS_ALIGNED(i915_vma_offset(vma), alignment))
return true;
if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
return true;
if (flags & PIN_OFFSET_BIAS &&
- vma->node.start < (flags & PIN_OFFSET_MASK))
+ i915_vma_offset(vma) < (flags & PIN_OFFSET_MASK))
return true;
if (flags & PIN_OFFSET_FIXED &&
- vma->node.start != (flags & PIN_OFFSET_MASK))
+ i915_vma_offset(vma) != (flags & PIN_OFFSET_MASK))
+ return true;
+
+ if (flags & PIN_OFFSET_GUARD &&
+ vma->guard < (flags & PIN_OFFSET_MASK))
return true;
return false;
@@ -687,10 +692,11 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!vma->fence_size);
- fenceable = (vma->node.size >= vma->fence_size &&
- IS_ALIGNED(vma->node.start, vma->fence_alignment));
+ fenceable = (i915_vma_size(vma) >= vma->fence_size &&
+ IS_ALIGNED(i915_vma_offset(vma), vma->fence_alignment));
- mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
+ mappable = i915_ggtt_offset(vma) + vma->fence_size <=
+ i915_vm_to_ggtt(vma->vm)->mappable_end;
if (mappable && fenceable)
set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
@@ -748,15 +754,16 @@ static int
i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u64 size, u64 alignment, u64 flags)
{
- unsigned long color;
+ unsigned long color, guard;
u64 start, end;
int ret;
GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(hweight64(flags & (PIN_OFFSET_GUARD | PIN_OFFSET_FIXED | PIN_OFFSET_BIAS)) > 1);
size = max(size, vma->size);
- alignment = max(alignment, vma->display_alignment);
+ alignment = max_t(typeof(alignment), alignment, vma->display_alignment);
if (flags & PIN_MAPPABLE) {
size = max_t(typeof(size), size, vma->fence_size);
alignment = max_t(typeof(alignment),
@@ -767,6 +774,18 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
GEM_BUG_ON(!is_power_of_2(alignment));
+ guard = vma->guard; /* retain guard across rebinds */
+ if (flags & PIN_OFFSET_GUARD) {
+ GEM_BUG_ON(overflows_type(flags & PIN_OFFSET_MASK, u32));
+ guard = max_t(u32, guard, flags & PIN_OFFSET_MASK);
+ }
+ /*
+ * As we align the node upon insertion, but the hardware gets
+ * node.start + guard, the easiest way to make that work is
+ * to make the guard a multiple of the alignment size.
+ */
+ guard = ALIGN(guard, alignment);
+
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
@@ -779,11 +798,12 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
- /* If binding the object/GGTT view requires more space than the entire
+ /*
+ * If binding the object/GGTT view requires more space than the entire
* aperture has, reject it early before evicting everything in a vain
* attempt to find space.
*/
- if (size > end) {
+ if (size > end - 2 * guard) {
drm_dbg(&to_i915(vma->obj->base.dev)->drm,
"Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
size, flags & PIN_MAPPABLE ? "mappable" : "total", end);
@@ -800,13 +820,23 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (!IS_ALIGNED(offset, alignment) ||
range_overflows(offset, size, end))
return -EINVAL;
+ /*
+ * The caller knows not of the guard added by others and
+ * requests for the offset of the start of its buffer
+ * to be fixed, which may not be the same as the position
+ * of the vma->node due to the guard pages.
+ */
+ if (offset < guard || offset + size > end - guard)
+ return -ENOSPC;
ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
- size, offset, color,
- flags);
+ size + 2 * guard,
+ offset - guard,
+ color, flags);
if (ret)
return ret;
} else {
+ size += 2 * guard;
/*
* We only support huge gtt pages through the 48b PPGTT,
* however we also don't want to force any alignment for
@@ -854,6 +884,7 @@ i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
+ vma->guard = guard;
return 0;
}
@@ -906,7 +937,7 @@ rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int column, row;
- unsigned int src_idx;
+ pgoff_t src_idx;
for (column = 0; column < width; column++) {
unsigned int left;
@@ -1012,7 +1043,7 @@ add_padding_pages(unsigned int count,
static struct scatterlist *
remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
- unsigned int offset, unsigned int alignment_pad,
+ unsigned long offset, unsigned int alignment_pad,
unsigned int width, unsigned int height,
unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg,
@@ -1071,7 +1102,7 @@ remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
static struct scatterlist *
remap_contiguous_pages(struct drm_i915_gem_object *obj,
- unsigned int obj_offset,
+ pgoff_t obj_offset,
unsigned int count,
struct sg_table *st, struct scatterlist *sg)
{
@@ -1104,7 +1135,7 @@ remap_contiguous_pages(struct drm_i915_gem_object *obj,
static struct scatterlist *
remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
- unsigned int obj_offset, unsigned int alignment_pad,
+ pgoff_t obj_offset, unsigned int alignment_pad,
unsigned int size,
struct sg_table *st, struct scatterlist *sg,
unsigned int *gtt_offset)
@@ -1544,6 +1575,8 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
u32 align, unsigned int flags)
{
struct i915_address_space *vm = vma->vm;
+ struct intel_gt *gt;
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
int err;
do {
@@ -1559,7 +1592,8 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
}
/* Unlike i915_vma_pin, we don't take no for an answer! */
- flush_idle_contexts(vm->gt);
+ list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+ flush_idle_contexts(gt);
if (mutex_lock_interruptible(&vm->mutex) == 0) {
/*
* We pass NULL ww here, as we don't want to unbind
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 0757977a489b..ed5c9d682a1b 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -125,13 +125,59 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma)
return !list_empty(&vma->closed_link);
}
+/* Internal use only. */
+static inline u64 __i915_vma_size(const struct i915_vma *vma)
+{
+ return vma->node.size - 2 * vma->guard;
+}
+
+/**
+ * i915_vma_offset - Obtain the va range size of the vma
+ * @vma: The vma
+ *
+ * GPU virtual address space may be allocated with padding. This
+ * function returns the effective virtual address range size
+ * with padding subtracted.
+ *
+ * Return: The effective virtual address range size.
+ */
+static inline u64 i915_vma_size(const struct i915_vma *vma)
+{
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ return __i915_vma_size(vma);
+}
+
+/* Internal use only. */
+static inline u64 __i915_vma_offset(const struct i915_vma *vma)
+{
+ /* The actual start of the vma->pages is after the guard pages. */
+ return vma->node.start + vma->guard;
+}
+
+/**
+ * i915_vma_offset - Obtain the va offset of the vma
+ * @vma: The vma
+ *
+ * GPU virtual address space may be allocated with padding. This
+ * function returns the effective virtual address offset the gpu
+ * should use to access the bound data.
+ *
+ * Return: The effective virtual address offset.
+ */
+static inline u64 i915_vma_offset(const struct i915_vma *vma)
+{
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ return __i915_vma_offset(vma);
+}
+
static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
{
GEM_BUG_ON(!i915_vma_is_ggtt(vma));
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
- GEM_BUG_ON(upper_32_bits(vma->node.start));
- GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1));
- return lower_32_bits(vma->node.start);
+ GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma)));
+ GEM_BUG_ON(upper_32_bits(i915_vma_offset(vma) +
+ i915_vma_size(vma) - 1));
+ return lower_32_bits(i915_vma_offset(vma));
}
static inline u32 i915_ggtt_pin_bias(struct i915_vma *vma)
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index de1342dbfa12..6ba7a7feceba 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -34,8 +34,8 @@ static struct kmem_cache *slab_vma_resources;
* and removal of fences increases as O(ln(pending_unbinds)) instead of
* O(1) for a single fence without interval tree.
*/
-#define VMA_RES_START(_node) ((_node)->start)
-#define VMA_RES_LAST(_node) ((_node)->start + (_node)->node_size - 1)
+#define VMA_RES_START(_node) ((_node)->start - (_node)->guard)
+#define VMA_RES_LAST(_node) ((_node)->start + (_node)->node_size + (_node)->guard - 1)
INTERVAL_TREE_DEFINE(struct i915_vma_resource, rb,
u64, __subtree_last,
VMA_RES_START, VMA_RES_LAST, static, vma_res_itree);
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.h b/drivers/gpu/drm/i915/i915_vma_resource.h
index 06923d1816e7..c1864e3d0b43 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.h
+++ b/drivers/gpu/drm/i915/i915_vma_resource.h
@@ -52,9 +52,12 @@ struct i915_page_sizes {
* @mr: The memory region of the object pointed to by the vma.
* @ops: Pointer to the backend i915_vma_ops.
* @private: Bind backend private info.
- * @start: Offset into the address space of bind range start.
- * @node_size: Size of the allocated range manager node.
+ * @start: Offset into the address space of bind range start. Note that
+ * this is after any padding that might have been allocated.
+ * @node_size: Size of the allocated range manager node with padding
+ * subtracted.
* @vma_size: Bind size.
+ * @guard: The size of guard area preceding and trailing the bind.
* @page_sizes_gtt: Resulting page sizes from the bind operation.
* @bound_flags: Flags indicating binding status.
* @allocated: Backend private data. TODO: Should move into @private.
@@ -113,6 +116,7 @@ struct i915_vma_resource {
u64 start;
u64 node_size;
u64 vma_size;
+ u32 guard;
u32 page_sizes_gtt;
u32 bound_flags;
@@ -174,9 +178,10 @@ static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
* @mr: The memory region of the object the vma points to.
* @ops: The backend ops.
* @private: Bind backend private info.
- * @start: Offset into the address space of bind range start.
- * @node_size: Size of the allocated range manager node.
+ * @start: Offset into the address space of bind range start after padding.
+ * @node_size: Size of the allocated range manager node minus padding.
* @size: Bind size.
+ * @guard: The size of the guard area preceding and trailing the bind.
*
* Initializes a vma resource allocated using i915_vma_resource_alloc().
* The reason for having separate allocate and initialize function is that
@@ -195,7 +200,8 @@ static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
void *private,
u64 start,
u64 node_size,
- u64 size)
+ u64 size,
+ u32 guard)
{
__i915_vma_resource_init(vma_res);
vma_res->vm = vm;
@@ -213,6 +219,7 @@ static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
vma_res->start = start;
vma_res->node_size = node_size;
vma_res->vma_size = size;
+ vma_res->guard = guard;
}
static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index ec0f6c9f57d0..77fda2244d16 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -197,14 +197,15 @@ struct i915_vma {
struct i915_fence_reg *fence;
u64 size;
- u64 display_alignment;
struct i915_page_sizes page_sizes;
/* mmap-offset associated with fencing for this vma */
struct i915_mmap_offset *mmo;
+ u32 guard; /* padding allocated around vma->pages within the node */
u32 fence_size;
u32 fence_alignment;
+ u32 display_alignment;
/**
* Count of the number of times this vma has been opened by different
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 849baf6c3b3c..98769e5f2c3d 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -29,6 +29,7 @@
#include "display/intel_cdclk.h"
#include "display/intel_de.h"
+#include "display/intel_display.h"
#include "gt/intel_gt_regs.h"
#include "i915_drv.h"
#include "i915_reg.h"
@@ -343,6 +344,12 @@ static void intel_ipver_early_init(struct drm_i915_private *i915)
ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_GRAPHICS),
&runtime->graphics.ip);
+ /* Wa_22012778468 */
+ if (runtime->graphics.ip.ver == 0x0 &&
+ INTEL_INFO(i915)->platform == INTEL_METEORLAKE) {
+ RUNTIME_INFO(i915)->graphics.ip.ver = 12;
+ RUNTIME_INFO(i915)->graphics.ip.rel = 70;
+ }
ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_DISPLAY),
&runtime->display.ip);
ip_ver_read(i915, i915_mmio_reg_offset(GMD_ID_MEDIA),
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index d588e5fd2eea..80bda653d61b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -29,7 +29,7 @@
#include "intel_step.h"
-#include "display/intel_display.h"
+#include "display/intel_display_limits.h"
#include "gt/intel_engine_types.h"
#include "gt/intel_context_types.h"
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index ce6b3c3b636a..1f4805aa2b08 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -5,6 +5,7 @@
#include "display/intel_audio_regs.h"
#include "display/intel_backlight_regs.h"
+#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dpio_phy.h"
#include "display/vlv_dsi_pll_regs.h"
diff --git a/drivers/gpu/drm/i915/intel_mchbar_regs.h b/drivers/gpu/drm/i915/intel_mchbar_regs.h
index f93e9af43ac3..73900c098d59 100644
--- a/drivers/gpu/drm/i915/intel_mchbar_regs.h
+++ b/drivers/gpu/drm/i915/intel_mchbar_regs.h
@@ -194,6 +194,8 @@
*/
#define PCU_PACKAGE_POWER_SKU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5930)
#define PKG_PKG_TDP GENMASK_ULL(14, 0)
+#define PKG_MIN_PWR GENMASK_ULL(30, 16)
+#define PKG_MAX_PWR GENMASK_ULL(46, 32)
#define PKG_MAX_WIN GENMASK_ULL(54, 48)
#define PKG_MAX_WIN_X GENMASK_ULL(54, 53)
#define PKG_MAX_WIN_Y GENMASK_ULL(52, 48)
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index b9a164efd6ae..3d1fdea9811d 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -235,7 +235,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
return ERR_PTR(-ENOMEM);
mem->i915 = i915;
- mem->region = (struct resource)DEFINE_RES_MEM(start, size);
+ mem->region = DEFINE_RES_MEM(start, size);
mem->io_start = io_start;
mem->io_size = io_size;
mem->min_page_size = min_page_size;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 73c88b1c9545..59714b1080d4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
*/
#include "display/intel_de.h"
+#include "display/intel_display.h"
#include "display/intel_display_trace.h"
#include "display/skl_watermark.h"
@@ -4299,8 +4300,8 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
u32 val;
/* WaTempDisableDOPClkGating:bdw */
- misccpctl = intel_gt_mcr_multicast_rmw(to_gt(dev_priv), GEN8_MISCCPCTL,
- GEN8_DOP_CLOCK_GATE_ENABLE, 0);
+ misccpctl = intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
+ GEN7_DOP_CLOCK_GATE_ENABLE, 0);
val = intel_gt_mcr_read_any(to_gt(dev_priv), GEN8_L3SQCREG1);
val &= ~L3_PRIO_CREDITS_MASK;
@@ -4314,7 +4315,7 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
*/
intel_gt_mcr_read_any(to_gt(dev_priv), GEN8_L3SQCREG1);
udelay(1);
- intel_gt_mcr_multicast_write(to_gt(dev_priv), GEN8_MISCCPCTL, misccpctl);
+ intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
}
static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -4465,8 +4466,8 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WaDisableDopClockGating:skl */
- intel_gt_mcr_multicast_rmw(to_gt(dev_priv), GEN8_MISCCPCTL,
- GEN8_DOP_CLOCK_GATE_ENABLE, 0);
+ intel_uncore_rmw(&dev_priv->uncore, GEN7_MISCCPCTL,
+ GEN7_DOP_CLOCK_GATE_ENABLE, 0);
/* WAC6entrylatency:skl */
intel_uncore_rmw(&dev_priv->uncore, FBC_LLC_READ_CTRL, 0, FBC_LLC_FULLY_OPEN);
diff --git a/drivers/gpu/drm/i915/intel_pm_types.h b/drivers/gpu/drm/i915/intel_pm_types.h
index 211632f58751..93152537b420 100644
--- a/drivers/gpu/drm/i915/intel_pm_types.h
+++ b/drivers/gpu/drm/i915/intel_pm_types.h
@@ -8,7 +8,7 @@
#include <linux/types.h>
-#include "display/intel_display.h"
+#include "display/intel_display_limits.h"
enum intel_ddb_partitioning {
INTEL_DDB_PART_1_2,
diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c
index cf89d0c2a2d9..b7fbd5abb42a 100644
--- a/drivers/gpu/drm/i915/intel_region_ttm.c
+++ b/drivers/gpu/drm/i915/intel_region_ttm.c
@@ -2,7 +2,6 @@
/*
* Copyright © 2021 Intel Corporation
*/
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_range_manager.h>
@@ -132,7 +131,7 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)
break;
msleep(20);
- flush_delayed_work(&mem->i915->bdev.wq);
+ drain_workqueue(mem->i915->bdev.wq);
}
/* If we leaked objects, Don't free the region causing use after free */
@@ -209,13 +208,25 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
if (flags & I915_BO_ALLOC_CONTIGUOUS)
place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (offset != I915_BO_INVALID_OFFSET) {
+ if (WARN_ON(overflows_type(offset >> PAGE_SHIFT, place.fpfn))) {
+ ret = -E2BIG;
+ goto out;
+ }
place.fpfn = offset >> PAGE_SHIFT;
+ if (WARN_ON(overflows_type(place.fpfn + (size >> PAGE_SHIFT), place.lpfn))) {
+ ret = -E2BIG;
+ goto out;
+ }
place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
} else if (mem->io_size && mem->io_size < mem->total) {
if (flags & I915_BO_ALLOC_GPU_ONLY) {
place.flags |= TTM_PL_FLAG_TOPDOWN;
} else {
place.fpfn = 0;
+ if (WARN_ON(overflows_type(mem->io_size >> PAGE_SHIFT, place.lpfn))) {
+ ret = -E2BIG;
+ goto out;
+ }
place.lpfn = mem->io_size >> PAGE_SHIFT;
}
}
@@ -224,6 +235,8 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
mock_bo.bdev = &mem->i915->bdev;
ret = man->func->alloc(man, &mock_bo, &place, &res);
+
+out:
if (ret == -ENOSPC)
ret = -ENXIO;
if (!ret)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 98b8b28baaa1..e592e8d6499a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -96,7 +96,7 @@ struct intel_runtime_pm {
};
#define BITS_PER_WAKEREF \
- BITS_PER_TYPE(struct_member(struct intel_runtime_pm, wakeref_count))
+ BITS_PER_TYPE(typeof_member(struct intel_runtime_pm, wakeref_count))
#define INTEL_RPM_WAKELOCK_SHIFT (BITS_PER_WAKEREF / 2)
#define INTEL_RPM_WAKELOCK_BIAS (1 << INTEL_RPM_WAKELOCK_SHIFT)
#define INTEL_RPM_RAW_WAKEREF_MASK (INTEL_RPM_WAKELOCK_BIAS - 1)
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 614013745fca..8dee9e62a73e 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -2701,6 +2701,62 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
if (fw_domains & BIT(domain_id))
fw_domain_fini(uncore, domain_id);
}
+
+ if ((fw_domains & BIT(FW_DOMAIN_ID_GSC)) && !HAS_ENGINE(gt, GSC0))
+ fw_domain_fini(uncore, FW_DOMAIN_ID_GSC);
+}
+
+/*
+ * The driver-initiated FLR is the highest level of reset that we can trigger
+ * from within the driver. It is different from the PCI FLR in that it doesn't
+ * fully reset the SGUnit and doesn't modify the PCI config space and therefore
+ * it doesn't require a re-enumeration of the PCI BARs. However, the
+ * driver-initiated FLR does still cause a reset of both GT and display and a
+ * memory wipe of local and stolen memory, so recovery would require a full HW
+ * re-init and saving/restoring (or re-populating) the wiped memory. Since we
+ * perform the FLR as the very last action before releasing access to the HW
+ * during the driver release flow, we don't attempt recovery at all, because
+ * if/when a new instance of i915 is bound to the device it will do a full
+ * re-init anyway.
+ */
+static void driver_initiated_flr(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore->i915;
+ const unsigned int flr_timeout_ms = 3000; /* specs recommend a 3s wait */
+ int ret;
+
+ drm_dbg(&i915->drm, "Triggering Driver-FLR\n");
+
+ /*
+ * Make sure any pending FLR requests have cleared by waiting for the
+ * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
+ * to make sure it's not still set from a prior attempt (it's a write to
+ * clear bit).
+ * Note that we should never be in a situation where a previous attempt
+ * is still pending (unless the HW is totally dead), but better to be
+ * safe in case something unexpected happens
+ */
+ ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms);
+ if (ret) {
+ drm_err(&i915->drm,
+ "Failed to wait for Driver-FLR bit to clear! %d\n",
+ ret);
+ return;
+ }
+ intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
+
+ /* Trigger the actual Driver-FLR */
+ intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
+
+ ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
+ DRIVERFLR_STATUS, DRIVERFLR_STATUS,
+ flr_timeout_ms);
+ if (ret) {
+ drm_err(&i915->drm, "wait for Driver-FLR completion failed! %d\n", ret);
+ return;
+ }
+
+ intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
}
/* Called via drm-managed action */
@@ -2716,6 +2772,9 @@ void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
intel_uncore_fw_domains_fini(uncore);
iosf_mbi_punit_release();
}
+
+ if (intel_uncore_needs_flr_on_fini(uncore))
+ driver_initiated_flr(uncore);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index e9e38490815d..9ea1f4864a3a 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -153,6 +153,7 @@ struct intel_uncore {
#define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1)
#define UNCORE_HAS_DBG_UNCLAIMED BIT(2)
#define UNCORE_HAS_FIFO BIT(3)
+#define UNCORE_NEEDS_FLR_ON_FINI BIT(4)
const struct intel_forcewake_range *fw_domains_table;
unsigned int fw_domains_table_entries;
@@ -223,6 +224,18 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
return uncore->flags & UNCORE_HAS_FIFO;
}
+static inline bool
+intel_uncore_needs_flr_on_fini(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_NEEDS_FLR_ON_FINI;
+}
+
+static inline bool
+intel_uncore_set_flr_on_fini(struct intel_uncore *uncore)
+{
+ return uncore->flags |= UNCORE_NEEDS_FLR_ON_FINI;
+}
+
void intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915);
void intel_uncore_init_early(struct intel_uncore *uncore,
struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 4f4c2e15e736..71b8a63f6f10 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -68,11 +68,12 @@ void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
* @wf: the wakeref
*
* Acquire a hold on the wakeref. The first user to do so, will acquire
- * the runtime pm wakeref and then call the @fn underneath the wakeref
- * mutex.
+ * the runtime pm wakeref and then call the intel_wakeref_ops->get()
+ * underneath the wakeref mutex.
*
- * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
- * will be released and the acquisition unwound, and an error reported.
+ * Note that intel_wakeref_ops->get() is allowed to fail, in which case
+ * the runtime-pm wakeref will be released and the acquisition unwound,
+ * and an error reported.
*
* Returns: 0 if the wakeref was acquired successfully, or a negative error
* code otherwise.
@@ -130,19 +131,17 @@ intel_wakeref_might_get(struct intel_wakeref *wf)
}
/**
- * intel_wakeref_put_flags: Release the wakeref
+ * __intel_wakeref_put: Release the wakeref
* @wf: the wakeref
* @flags: control flags
*
* Release our hold on the wakeref. When there are no more users,
- * the runtime pm wakeref will be released after the @fn callback is called
- * underneath the wakeref mutex.
+ * the runtime pm wakeref will be released after the intel_wakeref_ops->put()
+ * callback is called underneath the wakeref mutex.
*
- * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
- * is retained and an error reported.
+ * Note that intel_wakeref_ops->put() is allowed to fail, in which case the
+ * runtime-pm wakeref is retained.
*
- * Returns: 0 if the wakeref was released successfully, or a negative error
- * code otherwise.
*/
static inline void
__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index 5efe61f67546..cfc9af8b3d21 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -3,13 +3,19 @@
* Copyright(c) 2020 Intel Corporation.
*/
#include <linux/workqueue.h>
+
+#include "gem/i915_gem_context.h"
+
+#include "gt/intel_context.h"
+#include "gt/intel_gt.h"
+
+#include "i915_drv.h"
+
#include "intel_pxp.h"
#include "intel_pxp_irq.h"
#include "intel_pxp_session.h"
#include "intel_pxp_tee.h"
-#include "gem/i915_gem_context.h"
-#include "gt/intel_context.h"
-#include "i915_drv.h"
+#include "intel_pxp_types.h"
/**
* DOC: PXP
@@ -39,19 +45,19 @@
* performed via the mei_pxp component module.
*/
-struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp)
+bool intel_pxp_is_supported(const struct intel_pxp *pxp)
{
- return container_of(pxp, struct intel_gt, pxp);
+ return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp;
}
bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
{
- return pxp->ce;
+ return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->ce;
}
bool intel_pxp_is_active(const struct intel_pxp *pxp)
{
- return pxp->arb_is_valid;
+ return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->arb_is_valid;
}
/* KCR register definitions */
@@ -74,7 +80,7 @@ static void kcr_pxp_disable(struct intel_gt *gt)
static int create_vcs_context(struct intel_pxp *pxp)
{
static struct lock_class_key pxp_lock;
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_gt *gt = pxp->ctrl_gt;
struct intel_engine_cs *engine;
struct intel_context *ce;
int i;
@@ -109,7 +115,7 @@ static void destroy_vcs_context(struct intel_pxp *pxp)
static void pxp_init_full(struct intel_pxp *pxp)
{
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_gt *gt = pxp->ctrl_gt;
int ret;
/*
@@ -138,31 +144,97 @@ out_context:
destroy_vcs_context(pxp);
}
-void intel_pxp_init(struct intel_pxp *pxp)
+static struct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i915)
{
- struct intel_gt *gt = pxp_to_gt(pxp);
+ /*
+ * NOTE: Only certain platforms require PXP-tee-backend dependencies
+ * for HuC authentication. For now, its limited to DG2.
+ */
+ if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) &&
+ intel_huc_is_loaded_by_gsc(&i915->gt0.uc.huc) && intel_uc_uses_huc(&i915->gt0.uc))
+ return &i915->gt0;
- /* we rely on the mei PXP module */
- if (!IS_ENABLED(CONFIG_INTEL_MEI_PXP))
- return;
+ return NULL;
+}
+
+static struct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_PXP) || !INTEL_INFO(i915)->has_pxp)
+ return NULL;
/*
- * If HuC is loaded by GSC but PXP is disabled, we can skip the init of
- * the full PXP session/object management and just init the tee channel.
+ * For MTL onwards, PXP-controller-GT needs to have a valid GSC engine
+ * on the media GT. NOTE: if we have a media-tile with a GSC-engine,
+ * the VDBOX is already present so skip that check
*/
- if (HAS_PXP(gt->i915))
- pxp_init_full(pxp);
- else if (intel_huc_is_loaded_by_gsc(&gt->uc.huc) && intel_uc_uses_huc(&gt->uc))
- intel_pxp_tee_component_init(pxp);
+ if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0))
+ return i915->media_gt;
+
+ /*
+ * Else we rely on mei-pxp module but only on legacy platforms
+ * prior to having separate media GTs and has a valid VDBOX.
+ */
+ if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(&i915->gt0))
+ return &i915->gt0;
+
+ return NULL;
}
-void intel_pxp_fini(struct intel_pxp *pxp)
+int intel_pxp_init(struct drm_i915_private *i915)
{
- pxp->arb_is_valid = false;
+ struct intel_gt *gt;
+ bool is_full_feature = false;
- intel_pxp_tee_component_fini(pxp);
+ /*
+ * NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since
+ * we still need it if PXP's backend tee transport is needed.
+ */
+ gt = find_gt_for_required_protected_content(i915);
+ if (gt)
+ is_full_feature = true;
+ else
+ gt = find_gt_for_required_teelink(i915);
- destroy_vcs_context(pxp);
+ if (!gt)
+ return -ENODEV;
+
+ /*
+ * At this point, we will either enable full featured PXP capabilities
+ * including session and object management, or we will init the backend tee
+ * channel for internal users such as HuC loading by GSC
+ */
+ i915->pxp = kzalloc(sizeof(*i915->pxp), GFP_KERNEL);
+ if (!i915->pxp)
+ return -ENOMEM;
+
+ i915->pxp->ctrl_gt = gt;
+
+ /*
+ * If full PXP feature is not available but HuC is loaded by GSC on pre-MTL
+ * such as DG2, we can skip the init of the full PXP session/object management
+ * and just init the tee channel.
+ */
+ if (is_full_feature)
+ pxp_init_full(i915->pxp);
+ else
+ intel_pxp_tee_component_init(i915->pxp);
+
+ return 0;
+}
+
+void intel_pxp_fini(struct drm_i915_private *i915)
+{
+ if (!i915->pxp)
+ return;
+
+ i915->pxp->arb_is_valid = false;
+
+ intel_pxp_tee_component_fini(i915->pxp);
+
+ destroy_vcs_context(i915->pxp);
+
+ kfree(i915->pxp);
+ i915->pxp = NULL;
}
void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp)
@@ -173,7 +245,7 @@ void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp)
static void pxp_queue_termination(struct intel_pxp *pxp)
{
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_gt *gt = pxp->ctrl_gt;
/*
* We want to get the same effect as if we received a termination
@@ -238,13 +310,13 @@ unlock:
void intel_pxp_init_hw(struct intel_pxp *pxp)
{
- kcr_pxp_enable(pxp_to_gt(pxp));
+ kcr_pxp_enable(pxp->ctrl_gt);
intel_pxp_irq_enable(pxp);
}
void intel_pxp_fini_hw(struct intel_pxp *pxp)
{
- kcr_pxp_disable(pxp_to_gt(pxp));
+ kcr_pxp_disable(pxp->ctrl_gt);
intel_pxp_irq_disable(pxp);
}
@@ -278,7 +350,7 @@ int intel_pxp_key_check(struct intel_pxp *pxp,
void intel_pxp_invalidate(struct intel_pxp *pxp)
{
- struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
struct i915_gem_context *ctx, *cn;
/* ban all contexts marked as protected */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
index 2da309088c6d..04440fada711 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -9,15 +9,16 @@
#include <linux/errno.h>
#include <linux/types.h>
-struct intel_pxp;
struct drm_i915_gem_object;
+struct drm_i915_private;
+struct intel_pxp;
-struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp);
+bool intel_pxp_is_supported(const struct intel_pxp *pxp);
bool intel_pxp_is_enabled(const struct intel_pxp *pxp);
bool intel_pxp_is_active(const struct intel_pxp *pxp);
-void intel_pxp_init(struct intel_pxp *pxp);
-void intel_pxp_fini(struct intel_pxp *pxp);
+int intel_pxp_init(struct drm_i915_private *i915);
+void intel_pxp_fini(struct drm_i915_private *i915);
void intel_pxp_init_hw(struct intel_pxp *pxp);
void intel_pxp_fini_hw(struct intel_pxp *pxp);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
index f41e45763d0d..0eee51c4a772 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
@@ -3,9 +3,6 @@
* Copyright(c) 2020, Intel Corporation. All rights reserved.
*/
-#include "intel_pxp.h"
-#include "intel_pxp_cmd.h"
-#include "intel_pxp_session.h"
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gpu_commands.h"
@@ -13,6 +10,11 @@
#include "i915_trace.h"
+#include "intel_pxp.h"
+#include "intel_pxp_cmd.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_types.h"
+
/* stall until prior PXP and MFX/HCP/HUC objects are cmopleted */
#define MFX_WAIT_PXP (MFX_WAIT | \
MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
index c2f23394f9b8..aaa8187a0afb 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd_interface_cmn.h
@@ -17,6 +17,7 @@
*/
enum pxp_status {
PXP_STATUS_SUCCESS = 0x0,
+ PXP_STATUS_ERROR_API_VERSION = 0x1002,
PXP_STATUS_OP_NOT_PERMITTED = 0x4013
};
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
index 4359e8be4101..4b8e70caa3ad 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -9,18 +9,20 @@
#include <drm/drm_print.h>
#include "gt/intel_gt_debugfs.h"
+
#include "i915_drv.h"
+
#include "intel_pxp.h"
#include "intel_pxp_debugfs.h"
#include "intel_pxp_irq.h"
+#include "intel_pxp_types.h"
static int pxp_info_show(struct seq_file *m, void *data)
{
struct intel_pxp *pxp = m->private;
struct drm_printer p = drm_seq_file_printer(m);
- bool enabled = intel_pxp_is_enabled(pxp);
- if (!enabled) {
+ if (!intel_pxp_is_enabled(pxp)) {
drm_printf(&p, "pxp disabled\n");
return 0;
}
@@ -30,7 +32,8 @@ static int pxp_info_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(pxp_info);
+
+DEFINE_SHOW_ATTRIBUTE(pxp_info);
static int pxp_terminate_get(void *data, u64 *val)
{
@@ -41,7 +44,7 @@ static int pxp_terminate_get(void *data, u64 *val)
static int pxp_terminate_set(void *data, u64 val)
{
struct intel_pxp *pxp = data;
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_gt *gt = pxp->ctrl_gt;
if (!intel_pxp_is_active(pxp))
return -ENODEV;
@@ -59,23 +62,26 @@ static int pxp_terminate_set(void *data, u64 val)
}
DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set, "%llx\n");
-void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *gt_root)
+
+void intel_pxp_debugfs_register(struct intel_pxp *pxp)
{
- static const struct intel_gt_debugfs_file files[] = {
- { "info", &pxp_info_fops, NULL },
- { "terminate_state", &pxp_terminate_fops, NULL },
- };
- struct dentry *root;
+ struct drm_minor *minor;
+ struct dentry *pxproot;
- if (!gt_root)
+ if (!intel_pxp_is_supported(pxp))
return;
- if (!HAS_PXP((pxp_to_gt(pxp)->i915)))
+ minor = pxp->ctrl_gt->i915->drm.primary;
+ if (!minor->debugfs_root)
return;
- root = debugfs_create_dir("pxp", gt_root);
- if (IS_ERR(root))
+ pxproot = debugfs_create_dir("pxp", minor->debugfs_root);
+ if (IS_ERR(pxproot))
return;
- intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), pxp);
+ debugfs_create_file("info", 0444, pxproot,
+ pxp, &pxp_info_fops);
+
+ debugfs_create_file("terminate_state", 0644, pxproot,
+ pxp, &pxp_terminate_fops);
}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
index 7e0c3d2f5d7e..299382b59e66 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
@@ -10,10 +10,10 @@ struct intel_pxp;
struct dentry;
#ifdef CONFIG_DRM_I915_PXP
-void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root);
+void intel_pxp_debugfs_register(struct intel_pxp *pxp);
#else
static inline void
-intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root)
+intel_pxp_debugfs_register(struct intel_pxp *pxp)
{
}
#endif
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
index 2e1165522950..64609d1b1c0f 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_huc.c
@@ -3,8 +3,6 @@
* Copyright(c) 2021-2022, Intel Corporation. All rights reserved.
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "gem/i915_gem_region.h"
@@ -18,8 +16,8 @@
int intel_pxp_huc_load_and_auth(struct intel_pxp *pxp)
{
- struct intel_gt *gt = pxp_to_gt(pxp);
- struct intel_huc *huc = &gt->uc.huc;
+ struct intel_gt *gt;
+ struct intel_huc *huc;
struct pxp43_start_huc_auth_in huc_in = {0};
struct pxp43_start_huc_auth_out huc_out = {0};
dma_addr_t huc_phys_addr;
@@ -27,9 +25,12 @@ int intel_pxp_huc_load_and_auth(struct intel_pxp *pxp)
u8 fence_id = 0;
int err;
- if (!pxp->pxp_component)
+ if (!pxp || !pxp->pxp_component)
return -ENODEV;
+ gt = pxp->ctrl_gt;
+ huc = &gt->uc.huc;
+
huc_phys_addr = i915_gem_object_get_dma_address(huc->fw.obj, 0);
/* write the PXP message into the lmem (the sg list) */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
index c28be430718a..91e9622c07d0 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -3,14 +3,18 @@
* Copyright(c) 2020 Intel Corporation.
*/
#include <linux/workqueue.h>
-#include "intel_pxp.h"
-#include "intel_pxp_irq.h"
-#include "intel_pxp_session.h"
+
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_regs.h"
#include "gt/intel_gt_types.h"
+
#include "i915_irq.h"
#include "i915_reg.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_types.h"
#include "intel_runtime_pm.h"
/**
@@ -20,11 +24,13 @@
*/
void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
{
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_gt *gt;
if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
return;
+ gt = pxp->ctrl_gt;
+
lockdep_assert_held(gt->irq_lock);
if (unlikely(!iir))
@@ -62,7 +68,7 @@ static inline void pxp_irq_reset(struct intel_gt *gt)
void intel_pxp_irq_enable(struct intel_pxp *pxp)
{
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_gt *gt = pxp->ctrl_gt;
spin_lock_irq(gt->irq_lock);
@@ -77,7 +83,7 @@ void intel_pxp_irq_enable(struct intel_pxp *pxp)
void intel_pxp_irq_disable(struct intel_pxp *pxp)
{
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_gt *gt = pxp->ctrl_gt;
/*
* We always need to submit a global termination when we re-enable the
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
index 6a7d4e2ee138..892d39cc61c1 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -3,11 +3,13 @@
* Copyright(c) 2020 Intel Corporation.
*/
+#include "i915_drv.h"
+
#include "intel_pxp.h"
#include "intel_pxp_irq.h"
#include "intel_pxp_pm.h"
#include "intel_pxp_session.h"
-#include "i915_drv.h"
+#include "intel_pxp_types.h"
void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
{
@@ -26,7 +28,7 @@ void intel_pxp_suspend(struct intel_pxp *pxp)
if (!intel_pxp_is_enabled(pxp))
return;
- with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) {
+ with_intel_runtime_pm(&pxp->ctrl_gt->i915->runtime_pm, wakeref) {
intel_pxp_fini_hw(pxp);
pxp->hw_state_invalidated = false;
}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index 85572360c71a..ae413580b81a 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -20,7 +20,7 @@
static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id)
{
- struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
+ struct intel_uncore *uncore = pxp->ctrl_gt->uncore;
intel_wakeref_t wakeref;
u32 sip = 0;
@@ -33,7 +33,7 @@ static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id)
static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_play)
{
- struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
+ struct intel_uncore *uncore = pxp->ctrl_gt->uncore;
intel_wakeref_t wakeref;
u32 mask = BIT(id);
int ret;
@@ -56,7 +56,7 @@ static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_pla
static int pxp_create_arb_session(struct intel_pxp *pxp)
{
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_gt *gt = pxp->ctrl_gt;
int ret;
pxp->arb_is_valid = false;
@@ -90,7 +90,7 @@ static int pxp_create_arb_session(struct intel_pxp *pxp)
static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp)
{
int ret;
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_gt *gt = pxp->ctrl_gt;
/* must mark termination in progress calling this function */
GEM_WARN_ON(pxp->arb_is_valid);
@@ -141,7 +141,7 @@ static void pxp_terminate_complete(struct intel_pxp *pxp)
static void pxp_session_work(struct work_struct *work)
{
struct intel_pxp *pxp = container_of(work, typeof(*pxp), session_work);
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_gt *gt = pxp->ctrl_gt;
intel_wakeref_t wakeref;
u32 events = 0;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index b0c9170b1395..73aa8015f828 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -11,25 +11,20 @@
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
+
#include "intel_pxp.h"
-#include "intel_pxp_session.h"
-#include "intel_pxp_tee.h"
#include "intel_pxp_cmd_interface_42.h"
#include "intel_pxp_huc.h"
-
-static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev)
-{
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
-
- return &to_gt(i915)->pxp;
-}
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_types.h"
static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
void *msg_in, u32 msg_in_size,
void *msg_out, u32 msg_out_max_size,
u32 *msg_out_rcv_size)
{
- struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
struct i915_pxp_component *pxp_component = pxp->pxp_component;
int ret = 0;
@@ -79,7 +74,7 @@ int intel_pxp_tee_stream_message(struct intel_pxp *pxp,
{
/* TODO: for bigger objects we need to use a sg of 4k pages */
const size_t max_msg_size = PAGE_SIZE;
- struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
struct i915_pxp_component *pxp_component = pxp->pxp_component;
unsigned int offset = 0;
struct scatterlist *sg;
@@ -127,8 +122,8 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev,
struct device *tee_kdev, void *data)
{
struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
- struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
- struct intel_uc *uc = &pxp_to_gt(pxp)->uc;
+ struct intel_pxp *pxp = i915->pxp;
+ struct intel_uc *uc = &pxp->ctrl_gt->uc;
intel_wakeref_t wakeref;
int ret = 0;
@@ -164,7 +159,7 @@ static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
struct device *tee_kdev, void *data)
{
struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
- struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+ struct intel_pxp *pxp = i915->pxp;
intel_wakeref_t wakeref;
if (intel_pxp_is_enabled(pxp))
@@ -183,7 +178,7 @@ static const struct component_ops i915_pxp_tee_component_ops = {
static int alloc_streaming_command(struct intel_pxp *pxp)
{
- struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
struct drm_i915_gem_object *obj = NULL;
void *cmd;
int err;
@@ -244,7 +239,7 @@ static void free_streaming_command(struct intel_pxp *pxp)
int intel_pxp_tee_component_init(struct intel_pxp *pxp)
{
int ret;
- struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_gt *gt = pxp->ctrl_gt;
struct drm_i915_private *i915 = gt->i915;
mutex_init(&pxp->tee_mutex);
@@ -271,7 +266,7 @@ out_free:
void intel_pxp_tee_component_fini(struct intel_pxp *pxp)
{
- struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
if (!pxp->pxp_component_added)
return;
@@ -285,7 +280,7 @@ void intel_pxp_tee_component_fini(struct intel_pxp *pxp)
int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
int arb_session_id)
{
- struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct drm_i915_private *i915 = pxp->ctrl_gt->i915;
struct pxp42_create_arb_in msg_in = {0};
struct pxp42_create_arb_out msg_out = {0};
int ret;
@@ -303,6 +298,10 @@ int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
if (ret)
drm_err(&i915->drm, "Failed to send tee msg ret=[%d]\n", ret);
+ else if (msg_out.header.status == PXP_STATUS_ERROR_API_VERSION)
+ drm_dbg(&i915->drm, "PXP firmware version unsupported, requested: "
+ "CMD-ID-[0x%08x] on API-Ver-[0x%08x]\n",
+ msg_in.header.command_id, msg_in.header.api_version);
else if (msg_out.header.status != 0x0)
drm_warn(&i915->drm, "PXP firmware failed arb session init request ret=[0x%08x]\n",
msg_out.header.status);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index f74b1e11a505..7dc5f08d1583 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -12,13 +12,21 @@
#include <linux/workqueue.h>
struct intel_context;
+struct intel_gt;
struct i915_pxp_component;
+struct drm_i915_private;
/**
* struct intel_pxp - pxp state
*/
struct intel_pxp {
/**
+ * @ctrl_gt: poiner to the tile that owns the controls for PXP subsystem assets that
+ * the VDBOX, the KCR engine (and GSC CS depending on the platform)
+ */
+ struct intel_gt *ctrl_gt;
+
+ /**
* @pxp_component: i915_pxp_component struct of the bound mei_pxp
* module. Only set and cleared inside component bind/unbind functions,
* which are protected by &tee_mutex.
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index e5dd82e7e480..d91d0ade8abd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -44,7 +44,7 @@ static void trash_stolen(struct drm_i915_private *i915)
{
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
const u64 slot = ggtt->error_capture.start;
- const resource_size_t size = resource_size(&i915->dsm);
+ const resource_size_t size = resource_size(&i915->dsm.stolen);
unsigned long page;
u32 prng = 0x12345678;
@@ -53,7 +53,7 @@ static void trash_stolen(struct drm_i915_private *i915)
return;
for (page = 0; page < size; page += PAGE_SIZE) {
- const dma_addr_t dma = i915->dsm.start + page;
+ const dma_addr_t dma = i915->dsm.stolen.start + page;
u32 __iomem *s;
int x;
@@ -127,6 +127,8 @@ static void igt_pm_resume(struct drm_i915_private *i915)
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
i915_ggtt_resume(to_gt(i915)->ggtt);
+ if (GRAPHICS_VER(i915) >= 8)
+ setup_private_pat(to_gt(i915));
i915_gem_resume(i915);
}
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index eae7d947d7de..01e75160a84a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -68,6 +68,10 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
return -ENOMEM;
rem = round_up(obj->base.size, BIT(31)) >> 31;
+ /* restricted by sg_alloc_table */
+ if (overflows_type(rem, unsigned int))
+ return -E2BIG;
+
if (sg_alloc_table(pages, rem, GFP)) {
kfree(pages);
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 0daa8669181d..6fe22b096bdd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -1017,8 +1017,8 @@ empty_request(struct intel_engine_cs *engine,
return request;
err = engine->emit_bb_start(request,
- batch->node.start,
- batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
I915_DISPATCH_SECURE);
if (err)
goto out_request;
@@ -1138,14 +1138,14 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (ver >= 8) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
- *cmd++ = lower_32_bits(vma->node.start);
- *cmd++ = upper_32_bits(vma->node.start);
+ *cmd++ = lower_32_bits(i915_vma_offset(vma));
+ *cmd++ = upper_32_bits(i915_vma_offset(vma));
} else if (ver >= 6) {
*cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
- *cmd++ = lower_32_bits(vma->node.start);
+ *cmd++ = lower_32_bits(i915_vma_offset(vma));
} else {
*cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *cmd++ = lower_32_bits(vma->node.start);
+ *cmd++ = lower_32_bits(i915_vma_offset(vma));
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
@@ -1227,8 +1227,8 @@ static int live_all_engines(void *arg)
GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
0);
GEM_BUG_ON(err);
request[idx]->batch = batch;
@@ -1354,8 +1354,8 @@ static int live_sequential_engines(void *arg)
GEM_BUG_ON(err);
err = engine->emit_bb_start(request[idx],
- batch->node.start,
- batch->node.size,
+ i915_vma_offset(batch),
+ i915_vma_size(batch),
0);
GEM_BUG_ON(err);
request[idx]->batch = batch;
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index b484e12df417..29110abb4fe0 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,21 +14,27 @@
int igt_flush_test(struct drm_i915_private *i915)
{
- struct intel_gt *gt = to_gt(i915);
- int ret = intel_gt_is_wedged(gt) ? -EIO : 0;
+ struct intel_gt *gt;
+ unsigned int i;
+ int ret = 0;
- cond_resched();
+ for_each_gt(gt, i915, i) {
+ if (intel_gt_is_wedged(gt))
+ ret = -EIO;
- if (intel_gt_wait_for_idle(gt, HZ * 3) == -ETIME) {
- pr_err("%pS timed out, cancelling all further testing.\n",
- __builtin_return_address(0));
+ cond_resched();
- GEM_TRACE("%pS timed out.\n",
- __builtin_return_address(0));
- GEM_TRACE_DUMP();
+ if (intel_gt_wait_for_idle(gt, HZ * 3) == -ETIME) {
+ pr_err("%pS timed out, cancelling all further testing.\n",
+ __builtin_return_address(0));
- intel_gt_set_wedged(gt);
- ret = -EIO;
+ GEM_TRACE("%pS timed out.\n",
+ __builtin_return_address(0));
+ GEM_TRACE_DUMP();
+
+ intel_gt_set_wedged(gt);
+ ret = -EIO;
+ }
}
return ret;
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 16978ac59797..618d9386d554 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -116,7 +116,7 @@ static unsigned int seqno_offset(u64 fence)
static u64 hws_address(const struct i915_vma *hws,
const struct i915_request *rq)
{
- return hws->node.start + seqno_offset(rq->fence.context);
+ return i915_vma_offset(hws) + seqno_offset(rq->fence.context);
}
struct i915_request *
@@ -187,8 +187,8 @@ igt_spinner_create_request(struct igt_spinner *spin,
*batch++ = MI_BATCH_BUFFER_START;
else
*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
- *batch++ = lower_32_bits(vma->node.start);
- *batch++ = upper_32_bits(vma->node.start);
+ *batch++ = lower_32_bits(i915_vma_offset(vma));
+ *batch++ = upper_32_bits(i915_vma_offset(vma));
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
@@ -203,7 +203,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
flags = 0;
if (GRAPHICS_VER(rq->engine->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
- err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+ err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
cancel_rq:
if (err) {
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
index 568840e7ca66..ece97e4faacb 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -112,7 +112,7 @@ void mock_init_ggtt(struct intel_gt *gt)
ggtt->vm.i915 = gt->i915;
ggtt->vm.is_ggtt = true;
- ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
+ ggtt->gmadr = DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->vm.total = 4096 * PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index d599186d5b71..805c4bfb85fe 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -220,6 +220,10 @@ static int alloc_table(struct pfn_table *pt,
struct scatterlist *sg;
unsigned long n, pfn;
+ /* restricted by sg_alloc_table */
+ if (overflows_type(max, unsigned int))
+ return -E2BIG;
+
if (sg_alloc_table(&pt->st, max,
GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN))
return alloc_error;
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c
index bba8cb6e8ae4..bba8cb6e8ae4 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/soc/intel_dram.c
diff --git a/drivers/gpu/drm/i915/intel_dram.h b/drivers/gpu/drm/i915/soc/intel_dram.h
index 4ba13c13162c..4ba13c13162c 100644
--- a/drivers/gpu/drm/i915/intel_dram.h
+++ b/drivers/gpu/drm/i915/soc/intel_dram.h
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
new file mode 100644
index 000000000000..6d0204942f7a
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/pci.h>
+#include <linux/pnp.h>
+
+#include <drm/drm_managed.h>
+#include <drm/i915_drm.h>
+
+#include "i915_drv.h"
+#include "intel_gmch.h"
+#include "intel_pci_config.h"
+
+static void intel_gmch_bridge_release(struct drm_device *dev, void *bridge)
+{
+ pci_dev_put(bridge);
+}
+
+int intel_gmch_bridge_setup(struct drm_i915_private *i915)
+{
+ int domain = pci_domain_nr(to_pci_dev(i915->drm.dev)->bus);
+
+ i915->gmch.pdev = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
+ if (!i915->gmch.pdev) {
+ drm_err(&i915->drm, "bridge device not found\n");
+ return -EIO;
+ }
+
+ return drmm_add_action_or_reset(&i915->drm, intel_gmch_bridge_release,
+ i915->gmch.pdev);
+}
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_i915_private *i915)
+{
+ int reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret;
+
+ if (GRAPHICS_VER(i915) >= 4)
+ pci_read_config_dword(i915->gmch.pdev, reg + 4, &temp_hi);
+ pci_read_config_dword(i915->gmch.pdev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+ if (mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+ return 0;
+#endif
+
+ /* Get some space for it */
+ i915->gmch.mch_res.name = "i915 MCHBAR";
+ i915->gmch.mch_res.flags = IORESOURCE_MEM;
+ ret = pci_bus_alloc_resource(i915->gmch.pdev->bus,
+ &i915->gmch.mch_res,
+ MCHBAR_SIZE, MCHBAR_SIZE,
+ PCIBIOS_MIN_MEM,
+ 0, pcibios_align_resource,
+ i915->gmch.pdev);
+ if (ret) {
+ drm_dbg(&i915->drm, "failed bus alloc: %d\n", ret);
+ i915->gmch.mch_res.start = 0;
+ return ret;
+ }
+
+ if (GRAPHICS_VER(i915) >= 4)
+ pci_write_config_dword(i915->gmch.pdev, reg + 4,
+ upper_32_bits(i915->gmch.mch_res.start));
+
+ pci_write_config_dword(i915->gmch.pdev, reg,
+ lower_32_bits(i915->gmch.mch_res.start));
+ return 0;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+void intel_gmch_bar_setup(struct drm_i915_private *i915)
+{
+ int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool enabled;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ return;
+
+ i915->gmch.mchbar_need_disable = false;
+
+ if (IS_I915G(i915) || IS_I915GM(i915)) {
+ pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
+ enabled = temp & 1;
+ }
+
+ /* If it's already enabled, don't have to do anything */
+ if (enabled)
+ return;
+
+ if (intel_alloc_mchbar_resource(i915))
+ return;
+
+ i915->gmch.mchbar_need_disable = true;
+
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(i915) || IS_I915GM(i915)) {
+ pci_write_config_dword(i915->gmch.pdev, DEVEN,
+ temp | DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg, temp | 1);
+ }
+}
+
+void intel_gmch_bar_teardown(struct drm_i915_private *i915)
+{
+ int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+ if (i915->gmch.mchbar_need_disable) {
+ if (IS_I915G(i915) || IS_I915GM(i915)) {
+ u32 deven_val;
+
+ pci_read_config_dword(i915->gmch.pdev, DEVEN,
+ &deven_val);
+ deven_val &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(i915->gmch.pdev, DEVEN,
+ deven_val);
+ } else {
+ u32 mchbar_val;
+
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg,
+ &mchbar_val);
+ mchbar_val &= ~1;
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg,
+ mchbar_val);
+ }
+ }
+
+ if (i915->gmch.mch_res.start)
+ release_resource(&i915->gmch.mch_res);
+}
+
+int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
+{
+ unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
+ u16 gmch_ctrl;
+
+ if (pci_read_config_word(i915->gmch.pdev, reg, &gmch_ctrl)) {
+ drm_err(&i915->drm, "failed to read control word\n");
+ return -EIO;
+ }
+
+ if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
+ return 0;
+
+ if (enable_decode)
+ gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+ else
+ gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+
+ if (pci_write_config_word(i915->gmch.pdev, reg, gmch_ctrl)) {
+ drm_err(&i915->drm, "failed to write control word\n");
+ return -EIO;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.h b/drivers/gpu/drm/i915/soc/intel_gmch.h
new file mode 100644
index 000000000000..d0133eedc720
--- /dev/null
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_GMCH_H__
+#define __INTEL_GMCH_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+int intel_gmch_bridge_setup(struct drm_i915_private *i915);
+void intel_gmch_bar_setup(struct drm_i915_private *i915);
+void intel_gmch_bar_teardown(struct drm_i915_private *i915);
+int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode);
+
+#endif /* __INTEL_GMCH_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/soc/intel_pch.c
index ba9843cb1b13..ba9843cb1b13 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/soc/intel_pch.c
diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/soc/intel_pch.h
index 32aff5a70d04..32aff5a70d04 100644
--- a/drivers/gpu/drm/i915/intel_pch.h
+++ b/drivers/gpu/drm/i915/soc/intel_pch.h
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index 6eea6e1a99c0..b98dec3ad817 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -9,6 +9,7 @@
#include "vlv_sideband.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_display_types.h"
/*
* IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index fd5b2471fdf0..e5749927fd6c 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -1,43 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config DRM_IMX
- tristate "DRM Support for Freescale i.MX"
- select DRM_KMS_HELPER
- select VIDEOMODE_HELPERS
- select DRM_GEM_DMA_HELPER
- depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
- depends on IMX_IPUV3_CORE
- help
- enable i.MX graphics support
-
-config DRM_IMX_PARALLEL_DISPLAY
- tristate "Support for parallel displays"
- select DRM_PANEL
- depends on DRM_IMX
- select VIDEOMODE_HELPERS
-
-config DRM_IMX_TVE
- tristate "Support for TV and VGA displays"
- depends on DRM_IMX
- depends on COMMON_CLK
- select REGMAP_MMIO
- help
- Choose this to enable the internal Television Encoder (TVe)
- found on i.MX53 processors.
-
-config DRM_IMX_LDB
- tristate "Support for LVDS displays"
- depends on DRM_IMX && MFD_SYSCON
- depends on COMMON_CLK
- select DRM_PANEL
- help
- Choose this to enable the internal LVDS Display Bridge (LDB)
- found on i.MX53 and i.MX6 processors.
-
-config DRM_IMX_HDMI
- tristate "Freescale i.MX DRM HDMI"
- select DRM_DW_HDMI
- depends on DRM_IMX && OF
- help
- Choose this if you want to use HDMI on i.MX6.
source "drivers/gpu/drm/imx/dcss/Kconfig"
+source "drivers/gpu/drm/imx/ipuv3/Kconfig"
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index b644deffe948..909622864716 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -1,12 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
-
-obj-$(CONFIG_DRM_IMX) += imxdrm.o
-
-obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
-obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
-obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
-
-obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/
+obj-$(CONFIG_DRM_IMX) += ipuv3/
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c
index 3f5750cc2673..5d1779ab65c0 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c
@@ -249,16 +249,12 @@ void dcss_dev_destroy(struct dcss_dev *dcss)
kfree(dcss);
}
-#ifdef CONFIG_PM_SLEEP
-int dcss_dev_suspend(struct device *dev)
+static int dcss_dev_suspend(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
struct drm_device *ddev = dcss_drv_dev_to_drm(dev);
- struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base);
int ret;
- drm_bridge_connector_disable_hpd(kms->connector);
-
drm_mode_config_helper_suspend(ddev);
if (pm_runtime_suspended(dev))
@@ -273,11 +269,10 @@ int dcss_dev_suspend(struct device *dev)
return 0;
}
-int dcss_dev_resume(struct device *dev)
+static int dcss_dev_resume(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
struct drm_device *ddev = dcss_drv_dev_to_drm(dev);
- struct dcss_kms_dev *kms = container_of(ddev, struct dcss_kms_dev, base);
if (pm_runtime_suspended(dev)) {
drm_mode_config_helper_resume(ddev);
@@ -292,14 +287,10 @@ int dcss_dev_resume(struct device *dev)
drm_mode_config_helper_resume(ddev);
- drm_bridge_connector_enable_hpd(kms->connector);
-
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-#ifdef CONFIG_PM
-int dcss_dev_runtime_suspend(struct device *dev)
+static int dcss_dev_runtime_suspend(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
int ret;
@@ -313,7 +304,7 @@ int dcss_dev_runtime_suspend(struct device *dev)
return 0;
}
-int dcss_dev_runtime_resume(struct device *dev)
+static int dcss_dev_runtime_resume(struct device *dev)
{
struct dcss_dev *dcss = dcss_drv_dev_to_dcss(dev);
@@ -325,4 +316,8 @@ int dcss_dev_runtime_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
+
+EXPORT_GPL_DEV_PM_OPS(dcss_dev_pm_ops) = {
+ RUNTIME_PM_OPS(dcss_dev_runtime_suspend, dcss_dev_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume)
+};
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h
index 1e582270c6ea..f27b87c09599 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.h
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h
@@ -9,6 +9,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane.h>
#include <linux/io.h>
+#include <linux/pm.h>
#include <video/videomode.h>
#define SET 0x04
@@ -95,13 +96,11 @@ struct dcss_dev *dcss_drv_dev_to_dcss(struct device *dev);
struct drm_device *dcss_drv_dev_to_drm(struct device *dev);
struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output);
void dcss_dev_destroy(struct dcss_dev *dcss);
-int dcss_dev_runtime_suspend(struct device *dev);
-int dcss_dev_runtime_resume(struct device *dev);
-int dcss_dev_suspend(struct device *dev);
-int dcss_dev_resume(struct device *dev);
void dcss_enable_dtg_and_ss(struct dcss_dev *dcss);
void dcss_disable_dtg_and_ss(struct dcss_dev *dcss);
+extern const struct dev_pm_ops dcss_dev_pm_ops;
+
/* BLKCTL */
int dcss_blkctl_init(struct dcss_dev *dcss, unsigned long blkctl_base);
void dcss_blkctl_cfg(struct dcss_blkctl *blkctl);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
index 1c70f70247f6..4f2291610139 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-drv.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -74,8 +74,6 @@ static int dcss_drv_platform_probe(struct platform_device *pdev)
dcss_shutoff:
dcss_dev_destroy(mdrv->dcss);
- dev_set_drvdata(dev, NULL);
-
err:
kfree(mdrv);
return err;
@@ -85,14 +83,9 @@ static int dcss_drv_platform_remove(struct platform_device *pdev)
{
struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
- if (!mdrv)
- return 0;
-
dcss_kms_detach(mdrv->kms);
dcss_dev_destroy(mdrv->dcss);
- dev_set_drvdata(&pdev->dev, NULL);
-
kfree(mdrv);
return 0;
@@ -117,19 +110,13 @@ static const struct of_device_id dcss_of_match[] = {
MODULE_DEVICE_TABLE(of, dcss_of_match);
-static const struct dev_pm_ops dcss_dev_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(dcss_dev_suspend, dcss_dev_resume)
- SET_RUNTIME_PM_OPS(dcss_dev_runtime_suspend,
- dcss_dev_runtime_resume, NULL)
-};
-
static struct platform_driver dcss_platform_driver = {
.probe = dcss_drv_platform_probe,
.remove = dcss_drv_platform_remove,
.driver = {
.name = "imx-dcss",
.of_match_table = dcss_of_match,
- .pm = &dcss_dev_pm,
+ .pm = pm_ptr(&dcss_dev_pm_ops),
},
};
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 18df3888b7f9..dab5e664920d 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -150,7 +150,6 @@ struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss)
return kms;
cleanup_crtc:
- drm_bridge_connector_disable_hpd(kms->connector);
drm_kms_helper_poll_fini(drm);
dcss_crtc_deinit(crtc, drm);
@@ -166,7 +165,6 @@ void dcss_kms_detach(struct dcss_kms_dev *kms)
struct drm_device *drm = &kms->base;
drm_dev_unregister(drm);
- drm_bridge_connector_disable_hpd(kms->connector);
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
drm_crtc_vblank_off(&kms->crtc.base);
diff --git a/drivers/gpu/drm/imx/ipuv3/Kconfig b/drivers/gpu/drm/imx/ipuv3/Kconfig
new file mode 100644
index 000000000000..bb278a369575
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/Kconfig
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_IMX
+ tristate "DRM Support for Freescale i.MX"
+ select DRM_KMS_HELPER
+ select VIDEOMODE_HELPERS
+ select DRM_GEM_DMA_HELPER
+ depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST)
+ depends on IMX_IPUV3_CORE
+ help
+ enable i.MX graphics support
+
+config DRM_IMX_PARALLEL_DISPLAY
+ tristate "Support for parallel displays"
+ select DRM_PANEL
+ depends on DRM_IMX
+ select VIDEOMODE_HELPERS
+
+config DRM_IMX_TVE
+ tristate "Support for TV and VGA displays"
+ depends on DRM_IMX
+ depends on COMMON_CLK
+ select REGMAP_MMIO
+ help
+ Choose this to enable the internal Television Encoder (TVe)
+ found on i.MX53 processors.
+
+config DRM_IMX_LDB
+ tristate "Support for LVDS displays"
+ depends on DRM_IMX && MFD_SYSCON
+ depends on COMMON_CLK
+ select DRM_PANEL
+ help
+ Choose this to enable the internal LVDS Display Bridge (LDB)
+ found on i.MX53 and i.MX6 processors.
+
+config DRM_IMX_HDMI
+ tristate "Freescale i.MX DRM HDMI"
+ select DRM_DW_HDMI
+ depends on DRM_IMX && OF
+ help
+ Choose this if you want to use HDMI on i.MX6.
diff --git a/drivers/gpu/drm/imx/ipuv3/Makefile b/drivers/gpu/drm/imx/ipuv3/Makefile
new file mode 100644
index 000000000000..21cdcc2faabc
--- /dev/null
+++ b/drivers/gpu/drm/imx/ipuv3/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
+
+obj-$(CONFIG_DRM_IMX) += imxdrm.o
+
+obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
+obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
+obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
+
+obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
index a2277a0d6d06..a2277a0d6d06 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/ipuv3/dw_hdmi-imx.c
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
index e060fa6cbcb9..e060fa6cbcb9 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/ipuv3/imx-drm.h
index e721bebda2bd..e721bebda2bd 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm.h
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
index c45fc8f4744d..c45fc8f4744d 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
index d6832f506322..d6832f506322 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
index 5f26090b0c98..5f26090b0c98 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-crtc.c
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
index 80142d9a4a55..80142d9a4a55 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.c
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.h
index 6d544e6ce63f..6d544e6ce63f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3/ipuv3-plane.h
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 0fa0b590830b..0fa0b590830b 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 3d5af44bf92d..5ec75e9ba499 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -26,7 +26,6 @@
#include <drm/drm_bridge_connector.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
diff --git a/drivers/gpu/drm/kmb/kmb_crtc.c b/drivers/gpu/drm/kmb/kmb_crtc.c
index 06613ffeaaf8..647872f65bff 100644
--- a/drivers/gpu/drm/kmb/kmb_crtc.c
+++ b/drivers/gpu/drm/kmb/kmb_crtc.c
@@ -8,7 +8,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_modeset_helper_vtables.h>
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index d172a302f902..9e0562aa2bcb 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -7,7 +7,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
index 9de24d9f0c96..2fb23697740a 100644
--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -301,6 +301,7 @@ static int logicvc_drm_probe(struct platform_device *pdev)
struct regmap *regmap = NULL;
struct resource res;
void __iomem *base;
+ unsigned int preferred_bpp;
int irq;
int ret;
@@ -438,7 +439,17 @@ static int logicvc_drm_probe(struct platform_device *pdev)
goto error_mode;
}
- drm_fbdev_generic_setup(drm_dev, drm_dev->mode_config.preferred_depth);
+ switch (drm_dev->mode_config.preferred_depth) {
+ case 16:
+ preferred_bpp = 16;
+ break;
+ case 24:
+ case 32:
+ default:
+ preferred_bpp = 32;
+ break;
+ }
+ drm_fbdev_generic_setup(drm_dev, preferred_bpp);
return 0;
diff --git a/drivers/gpu/drm/logicvc/logicvc_interface.c b/drivers/gpu/drm/logicvc/logicvc_interface.c
index 815cebb4c4ca..689049d395c0 100644
--- a/drivers/gpu/drm/logicvc/logicvc_interface.c
+++ b/drivers/gpu/drm/logicvc/logicvc_interface.c
@@ -9,7 +9,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem_dma_helper.h>
diff --git a/drivers/gpu/drm/logicvc/logicvc_mode.c b/drivers/gpu/drm/logicvc/logicvc_mode.c
index 9971950ebd4e..3cf04b70bd27 100644
--- a/drivers/gpu/drm/logicvc/logicvc_mode.c
+++ b/drivers/gpu/drm/logicvc/logicvc_mode.c
@@ -8,7 +8,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index 369e495d0c3e..b451dee64d34 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -7,7 +7,6 @@ config DRM_MEDIATEK
depends on HAVE_ARM_SMCCC
depends on OF
depends on MTK_MMSYS
- select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index cdfa648910b2..b640bc0559e7 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -12,6 +12,8 @@
#include <linux/platform_device.h>
#include "mtk_cec.h"
+#include "mtk_hdmi.h"
+#include "mtk_drm_drv.h"
#define TR_CONFIG 0x00
#define CLEAR_CEC_IRQ BIT(15)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_aal.c b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
index 0f9d7efb61d7..434e8a9ce8ab 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_aal.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_aal.c
@@ -14,6 +14,7 @@
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
#define DISP_AAL_EN 0x0000
#define AAL_EN BIT(0)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
index 3a53ebc4e172..1773379b2439 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ccorr.c
@@ -14,6 +14,7 @@
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
#define DISP_CCORR_EN 0x0000
#define CCORR_EN BIT(0)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 473f5bb5cbad..cac9206079e7 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -14,6 +14,7 @@
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
#define DISP_COLOR_CFG_MAIN 0x0400
#define DISP_COLOR_START_MT2701 0x0f00
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
index bbd558a036ec..c844942603f7 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_gamma.c
@@ -14,6 +14,7 @@
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
#define DISP_GAMMA_EN 0x0000
#define GAMMA_EN BIT(0)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 84daeaffab6a..9d8c986700ee 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -19,6 +19,7 @@
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
#define DISP_REG_OVL_INTEN 0x0004
#define OVL_FME_CPL_INT BIT(1)
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 0ec2e4049e07..a5a0c3bac35d 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -17,6 +17,7 @@
#include "mtk_disp_drv.h"
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
#define DISP_REG_RDMA_INT_ENABLE 0x0000
#define DISP_REG_RDMA_INT_STATUS 0x0004
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 9d085c05c49c..1f94fcc144d3 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -1693,7 +1693,7 @@ static int mtk_dp_training(struct mtk_dp *mtk_dp)
break;
default:
return -EINVAL;
- };
+ }
continue;
}
@@ -1981,7 +1981,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
struct cea_sad *sads;
if (!enabled) {
- drm_bridge_chain_pre_enable(bridge);
+ drm_atomic_bridge_chain_pre_enable(bridge, connector->state->state);
/* power on aux */
mtk_dp_update_bits(mtk_dp, MTK_DP_TOP_PWR_STATE,
@@ -2019,7 +2019,7 @@ static struct edid *mtk_dp_get_edid(struct drm_bridge *bridge,
DP_PWR_STATE_BANDGAP_TPLL,
DP_PWR_STATE_MASK);
- drm_bridge_chain_post_disable(bridge);
+ drm_atomic_bridge_chain_post_disable(bridge, connector->state->state);
}
return new_edid;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 4317595a15d1..948a53f1f4b3 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -14,6 +14,7 @@
#include <linux/of_graph.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/types.h>
#include <video/videomode.h>
@@ -29,6 +30,7 @@
#include "mtk_disp_drv.h"
#include "mtk_dpi_regs.h"
#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
enum mtk_dpi_out_bit_num {
MTK_DPI_OUT_BIT_NUM_8BITS,
@@ -66,6 +68,7 @@ struct mtk_dpi {
struct drm_connector *connector;
void __iomem *regs;
struct device *dev;
+ struct device *mmsys_dev;
struct clk *engine_clk;
struct clk *pixel_clk;
struct clk *tvd_clk;
@@ -134,6 +137,7 @@ struct mtk_dpi_yc_limit {
* @yuv422_en_bit: Enable bit of yuv422.
* @csc_enable_bit: Enable bit of CSC.
* @pixels_per_iter: Quantity of transferred pixels per iteration.
+ * @edge_cfg_in_mmsys: If the edge configuration for DPI's output needs to be set in MMSYS.
*/
struct mtk_dpi_conf {
unsigned int (*cal_factor)(int clock);
@@ -152,6 +156,7 @@ struct mtk_dpi_conf {
u32 yuv422_en_bit;
u32 csc_enable_bit;
u32 pixels_per_iter;
+ bool edge_cfg_in_mmsys;
};
static void mtk_dpi_mask(struct mtk_dpi *dpi, u32 offset, u32 val, u32 mask)
@@ -448,8 +453,12 @@ static void mtk_dpi_dual_edge(struct mtk_dpi *dpi)
mtk_dpi_mask(dpi, DPI_OUTPUT_SETTING,
dpi->output_fmt == MEDIA_BUS_FMT_RGB888_2X12_LE ?
EDGE_SEL : 0, EDGE_SEL);
+ if (dpi->conf->edge_cfg_in_mmsys)
+ mtk_mmsys_ddp_dpi_fmt_config(dpi->mmsys_dev, MTK_DPI_RGB888_DDR_CON);
} else {
mtk_dpi_mask(dpi, DPI_DDR_SETTING, DDR_EN | DDR_4PHASE, 0);
+ if (dpi->conf->edge_cfg_in_mmsys)
+ mtk_mmsys_ddp_dpi_fmt_config(dpi->mmsys_dev, MTK_DPI_RGB888_SDR_CON);
}
}
@@ -777,8 +786,10 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
{
struct mtk_dpi *dpi = dev_get_drvdata(dev);
struct drm_device *drm_dev = data;
+ struct mtk_drm_private *priv = drm_dev->dev_private;
int ret;
+ dpi->mmsys_dev = priv->mmsys_dev;
ret = drm_simple_encoder_init(drm_dev, &dpi->encoder,
DRM_MODE_ENCODER_TMDS);
if (ret) {
@@ -929,6 +940,24 @@ static const struct mtk_dpi_conf mt8183_conf = {
.csc_enable_bit = CSC_ENABLE,
};
+static const struct mtk_dpi_conf mt8186_conf = {
+ .cal_factor = mt8183_calculate_factor,
+ .reg_h_fre_con = 0xe0,
+ .max_clock_khz = 150000,
+ .output_fmts = mt8183_output_fmts,
+ .num_output_fmts = ARRAY_SIZE(mt8183_output_fmts),
+ .edge_cfg_in_mmsys = true,
+ .pixels_per_iter = 1,
+ .is_ck_de_pol = true,
+ .swap_input_support = true,
+ .support_direct_pin = true,
+ .dimension_mask = HPW_MASK,
+ .hvsize_mask = HSIZE_MASK,
+ .channel_swap_shift = CH_SWAP,
+ .yuv422_en_bit = YUV422_EN,
+ .csc_enable_bit = CSC_ENABLE,
+};
+
static const struct mtk_dpi_conf mt8188_dpintf_conf = {
.cal_factor = mt8195_dpintf_calculate_factor,
.max_clock_khz = 600000,
@@ -1093,6 +1122,9 @@ static const struct of_device_id mtk_dpi_of_ids[] = {
{ .compatible = "mediatek,mt8183-dpi",
.data = &mt8183_conf,
},
+ { .compatible = "mediatek,mt8186-dpi",
+ .data = &mt8186_conf,
+ },
{ .compatible = "mediatek,mt8188-dp-intf",
.data = &mt8188_dpintf_conf,
},
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 112615817dcb..5071f1263216 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -945,6 +945,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
sizeof(struct drm_plane), GFP_KERNEL);
+ if (!mtk_crtc->planes)
+ return -ENOMEM;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index cd5b18ef7951..a13b36ac03a1 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -20,8 +20,8 @@
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -520,6 +520,7 @@ static int mtk_drm_bind(struct device *dev)
err_deinit:
mtk_drm_kms_deinit(drm);
err_free:
+ private->drm = NULL;
drm_dev_put(drm);
return ret;
}
@@ -637,6 +638,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
.data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8183-dpi",
.data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt8186-dpi",
+ .data = (void *)MTK_DPI },
{ .compatible = "mediatek,mt8188-dp-intf",
.data = (void *)MTK_DP_INTF },
{ .compatible = "mediatek,mt8192-dpi",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 47e96b0289f9..a25b28d3ee90 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -16,13 +16,18 @@
static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+static const struct vm_operations_struct vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
.free = mtk_drm_gem_free_object,
.get_sg_table = mtk_gem_prime_get_sg_table,
.vmap = mtk_drm_gem_prime_vmap,
.vunmap = mtk_drm_gem_prime_vunmap,
.mmap = mtk_drm_gem_object_mmap,
- .vm_ops = &drm_gem_dma_vm_ops,
+ .vm_ops = &vm_ops,
};
static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
@@ -158,14 +163,12 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
* dma_alloc_attrs() allocated a struct page table for mtk_gem, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
- if (ret)
- drm_gem_vm_close(vma);
return ret;
}
@@ -262,6 +265,6 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj,
return;
vunmap(vaddr);
- mtk_gem->kvaddr = 0;
+ mtk_gem->kvaddr = NULL;
kfree(mtk_gem->pages);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 3b7d13028fb6..7d5250351193 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -28,6 +28,7 @@
#include "mtk_disp_drv.h"
#include "mtk_drm_ddp_comp.h"
+#include "mtk_drm_drv.h"
#define DSI_START 0x00
@@ -721,7 +722,7 @@ static void mtk_dsi_lane_ready(struct mtk_dsi *dsi)
mtk_dsi_clk_ulp_mode_leave(dsi);
mtk_dsi_lane0_ulp_mode_leave(dsi);
mtk_dsi_clk_hs_mode(dsi, 0);
- msleep(20);
+ usleep_range(1000, 3000);
/* The reaction time after pulling up the mipi signal for dsi_rx */
}
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
index 6207eac88550..2fc9214ffa82 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -19,6 +19,9 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
+#include "mtk_drm_drv.h"
+#include "mtk_hdmi.h"
+
#define SIF1_CLOK (288)
#define DDC_DDCMCTL0 (0x0)
#define DDCM_ODRAIN BIT(31)
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 5cd2b2ebbbd3..534621a13a34 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -140,7 +140,6 @@ struct meson_dw_hdmi {
struct reset_control *hdmitx_apb;
struct reset_control *hdmitx_ctrl;
struct reset_control *hdmitx_phy;
- struct regulator *hdmi_supply;
u32 irq_stat;
struct dw_hdmi *hdmi;
struct drm_bridge *bridge;
@@ -665,11 +664,6 @@ static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi)
}
-static void meson_disable_regulator(void *data)
-{
- regulator_disable(data);
-}
-
static void meson_disable_clk(void *data)
{
clk_disable_unprepare(data);
@@ -723,20 +717,9 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
meson_dw_hdmi->data = match;
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
- meson_dw_hdmi->hdmi_supply = devm_regulator_get_optional(dev, "hdmi");
- if (IS_ERR(meson_dw_hdmi->hdmi_supply)) {
- if (PTR_ERR(meson_dw_hdmi->hdmi_supply) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- meson_dw_hdmi->hdmi_supply = NULL;
- } else {
- ret = regulator_enable(meson_dw_hdmi->hdmi_supply);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, meson_disable_regulator,
- meson_dw_hdmi->hdmi_supply);
- if (ret)
- return ret;
- }
+ ret = devm_regulator_get_enable_optional(dev, "hdmi");
+ if (ret < 0)
+ return ret;
meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev,
"hdmitx_apb");
diff --git a/drivers/gpu/drm/mga/Makefile b/drivers/gpu/drm/mga/Makefile
deleted file mode 100644
index db07c7fcc996..000000000000
--- a/drivers/gpu/drm/mga/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-mga-y := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
-
-mga-$(CONFIG_COMPAT) += mga_ioc32.o
-
-obj-$(CONFIG_DRM_MGA) += mga.o
-
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
deleted file mode 100644
index 331c2f0da57a..000000000000
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ /dev/null
@@ -1,1168 +0,0 @@
-/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * \file mga_dma.c
- * DMA support for MGA G200 / G400.
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Jeff Hartmann <jhartmann@valinux.com>
- * \author Keith Whitwell <keith@tungstengraphics.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-#include <linux/delay.h>
-
-#include "mga_drv.h"
-
-#define MGA_DEFAULT_USEC_TIMEOUT 10000
-#define MGA_FREELIST_DEBUG 0
-
-#define MINIMAL_CLEANUP 0
-#define FULL_CLEANUP 1
-static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
-
-/* ================================================================
- * Engine control
- */
-
-int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
-{
- u32 status = 0;
- int i;
- DRM_DEBUG("\n");
-
- for (i = 0; i < dev_priv->usec_timeout; i++) {
- status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
- if (status == MGA_ENDPRDMASTS) {
- MGA_WRITE8(MGA_CRTC_INDEX, 0);
- return 0;
- }
- udelay(1);
- }
-
-#if MGA_DMA_DEBUG
- DRM_ERROR("failed!\n");
- DRM_INFO(" status=0x%08x\n", status);
-#endif
- return -EBUSY;
-}
-
-static int mga_do_dma_reset(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_primary_buffer_t *primary = &dev_priv->prim;
-
- DRM_DEBUG("\n");
-
- /* The primary DMA stream should look like new right about now.
- */
- primary->tail = 0;
- primary->space = primary->size;
- primary->last_flush = 0;
-
- sarea_priv->last_wrap = 0;
-
- /* FIXME: Reset counters, buffer ages etc...
- */
-
- /* FIXME: What else do we need to reinitialize? WARP stuff?
- */
-
- return 0;
-}
-
-/* ================================================================
- * Primary DMA stream
- */
-
-void mga_do_dma_flush(drm_mga_private_t *dev_priv)
-{
- drm_mga_primary_buffer_t *primary = &dev_priv->prim;
- u32 head, tail;
- u32 status = 0;
- int i;
- DMA_LOCALS;
- DRM_DEBUG("\n");
-
- /* We need to wait so that we can do an safe flush */
- for (i = 0; i < dev_priv->usec_timeout; i++) {
- status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
- if (status == MGA_ENDPRDMASTS)
- break;
- udelay(1);
- }
-
- if (primary->tail == primary->last_flush) {
- DRM_DEBUG(" bailing out...\n");
- return;
- }
-
- tail = primary->tail + dev_priv->primary->offset;
-
- /* We need to pad the stream between flushes, as the card
- * actually (partially?) reads the first of these commands.
- * See page 4-16 in the G400 manual, middle of the page or so.
- */
- BEGIN_DMA(1);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
-
- ADVANCE_DMA();
-
- primary->last_flush = primary->tail;
-
- head = MGA_READ(MGA_PRIMADDRESS);
-
- if (head <= tail)
- primary->space = primary->size - primary->tail;
- else
- primary->space = head - tail;
-
- DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
- DRM_DEBUG(" tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset));
- DRM_DEBUG(" space = 0x%06x\n", primary->space);
-
- mga_flush_write_combine();
- MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
-
- DRM_DEBUG("done.\n");
-}
-
-void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv)
-{
- drm_mga_primary_buffer_t *primary = &dev_priv->prim;
- u32 head, tail;
- DMA_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_DMA_WRAP();
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
-
- ADVANCE_DMA();
-
- tail = primary->tail + dev_priv->primary->offset;
-
- primary->tail = 0;
- primary->last_flush = 0;
- primary->last_wrap++;
-
- head = MGA_READ(MGA_PRIMADDRESS);
-
- if (head == dev_priv->primary->offset)
- primary->space = primary->size;
- else
- primary->space = head - dev_priv->primary->offset;
-
- DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
- DRM_DEBUG(" tail = 0x%06x\n", primary->tail);
- DRM_DEBUG(" wrap = %d\n", primary->last_wrap);
- DRM_DEBUG(" space = 0x%06x\n", primary->space);
-
- mga_flush_write_combine();
- MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
-
- set_bit(0, &primary->wrapped);
- DRM_DEBUG("done.\n");
-}
-
-void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv)
-{
- drm_mga_primary_buffer_t *primary = &dev_priv->prim;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- u32 head = dev_priv->primary->offset;
- DRM_DEBUG("\n");
-
- sarea_priv->last_wrap++;
- DRM_DEBUG(" wrap = %d\n", sarea_priv->last_wrap);
-
- mga_flush_write_combine();
- MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
-
- clear_bit(0, &primary->wrapped);
- DRM_DEBUG("done.\n");
-}
-
-/* ================================================================
- * Freelist management
- */
-
-#define MGA_BUFFER_USED (~0)
-#define MGA_BUFFER_FREE 0
-
-#if MGA_FREELIST_DEBUG
-static void mga_freelist_print(struct drm_device *dev)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_freelist_t *entry;
-
- DRM_INFO("\n");
- DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
- dev_priv->sarea_priv->last_dispatch,
- (unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
- dev_priv->primary->offset));
- DRM_INFO("current freelist:\n");
-
- for (entry = dev_priv->head->next; entry; entry = entry->next) {
- DRM_INFO(" %p idx=%2d age=0x%x 0x%06lx\n",
- entry, entry->buf->idx, entry->age.head,
- (unsigned long)(entry->age.head - dev_priv->primary->offset));
- }
- DRM_INFO("\n");
-}
-#endif
-
-static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf *buf;
- drm_mga_buf_priv_t *buf_priv;
- drm_mga_freelist_t *entry;
- int i;
- DRM_DEBUG("count=%d\n", dma->buf_count);
-
- dev_priv->head = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
- if (dev_priv->head == NULL)
- return -ENOMEM;
-
- SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
-
- for (i = 0; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
- buf_priv = buf->dev_private;
-
- entry = kzalloc(sizeof(drm_mga_freelist_t), GFP_KERNEL);
- if (entry == NULL)
- return -ENOMEM;
-
- entry->next = dev_priv->head->next;
- entry->prev = dev_priv->head;
- SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
- entry->buf = buf;
-
- if (dev_priv->head->next != NULL)
- dev_priv->head->next->prev = entry;
- if (entry->next == NULL)
- dev_priv->tail = entry;
-
- buf_priv->list_entry = entry;
- buf_priv->discard = 0;
- buf_priv->dispatched = 0;
-
- dev_priv->head->next = entry;
- }
-
- return 0;
-}
-
-static void mga_freelist_cleanup(struct drm_device *dev)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_freelist_t *entry;
- drm_mga_freelist_t *next;
- DRM_DEBUG("\n");
-
- entry = dev_priv->head;
- while (entry) {
- next = entry->next;
- kfree(entry);
- entry = next;
- }
-
- dev_priv->head = dev_priv->tail = NULL;
-}
-
-#if 0
-/* FIXME: Still needed?
- */
-static void mga_freelist_reset(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf *buf;
- drm_mga_buf_priv_t *buf_priv;
- int i;
-
- for (i = 0; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
- buf_priv = buf->dev_private;
- SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
- }
-}
-#endif
-
-static struct drm_buf *mga_freelist_get(struct drm_device * dev)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_freelist_t *next;
- drm_mga_freelist_t *prev;
- drm_mga_freelist_t *tail = dev_priv->tail;
- u32 head, wrap;
- DRM_DEBUG("\n");
-
- head = MGA_READ(MGA_PRIMADDRESS);
- wrap = dev_priv->sarea_priv->last_wrap;
-
- DRM_DEBUG(" tail=0x%06lx %d\n",
- tail->age.head ?
- (unsigned long)(tail->age.head - dev_priv->primary->offset) : 0,
- tail->age.wrap);
- DRM_DEBUG(" head=0x%06lx %d\n",
- (unsigned long)(head - dev_priv->primary->offset), wrap);
-
- if (TEST_AGE(&tail->age, head, wrap)) {
- prev = dev_priv->tail->prev;
- next = dev_priv->tail;
- prev->next = NULL;
- next->prev = next->next = NULL;
- dev_priv->tail = prev;
- SET_AGE(&next->age, MGA_BUFFER_USED, 0);
- return next->buf;
- }
-
- DRM_DEBUG("returning NULL!\n");
- return NULL;
-}
-
-int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_buf_priv_t *buf_priv = buf->dev_private;
- drm_mga_freelist_t *head, *entry, *prev;
-
- DRM_DEBUG("age=0x%06lx wrap=%d\n",
- (unsigned long)(buf_priv->list_entry->age.head -
- dev_priv->primary->offset),
- buf_priv->list_entry->age.wrap);
-
- entry = buf_priv->list_entry;
- head = dev_priv->head;
-
- if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
- SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
- prev = dev_priv->tail;
- prev->next = entry;
- entry->prev = prev;
- entry->next = NULL;
- } else {
- prev = head->next;
- head->next = entry;
- prev->prev = entry;
- entry->prev = head;
- entry->next = prev;
- }
-
- return 0;
-}
-
-/* ================================================================
- * DMA initialization, cleanup
- */
-
-int mga_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- drm_mga_private_t *dev_priv;
- int ret;
-
- /* There are PCI versions of the G450. These cards have the
- * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
- * bridge chip. We detect these cards, which are not currently
- * supported by this driver, by looking at the device ID of the
- * bus the "card" is on. If vendor is 0x3388 (Hint Corp) and the
- * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
- * device.
- */
- if ((pdev->device == 0x0525) && pdev->bus->self
- && (pdev->bus->self->vendor == 0x3388)
- && (pdev->bus->self->device == 0x0021)
- && dev->agp) {
- /* FIXME: This should be quirked in the pci core, but oh well
- * the hw probably stopped existing. */
- arch_phys_wc_del(dev->agp->agp_mtrr);
- kfree(dev->agp);
- dev->agp = NULL;
- }
- dev_priv = kzalloc(sizeof(drm_mga_private_t), GFP_KERNEL);
- if (!dev_priv)
- return -ENOMEM;
-
- dev->dev_private = (void *)dev_priv;
-
- dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
- dev_priv->chipset = flags;
-
- pci_set_master(pdev);
-
- dev_priv->mmio_base = pci_resource_start(pdev, 1);
- dev_priv->mmio_size = pci_resource_len(pdev, 1);
-
- ret = drm_vblank_init(dev, 1);
-
- if (ret) {
- (void) mga_driver_unload(dev);
- return ret;
- }
-
- return 0;
-}
-
-#if IS_ENABLED(CONFIG_AGP)
-/*
- * Bootstrap the driver for AGP DMA.
- *
- * \todo
- * Investigate whether there is any benefit to storing the WARP microcode in
- * AGP memory. If not, the microcode may as well always be put in PCI
- * memory.
- *
- * \todo
- * This routine needs to set dma_bs->agp_mode to the mode actually configured
- * in the hardware. Looking just at the Linux AGP driver code, I don't see
- * an easy way to determine this.
- *
- * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
- */
-static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
- drm_mga_dma_bootstrap_t *dma_bs)
-{
- drm_mga_private_t *const dev_priv =
- (drm_mga_private_t *) dev->dev_private;
- unsigned int warp_size = MGA_WARP_UCODE_SIZE;
- int err;
- unsigned offset;
- const unsigned secondary_size = dma_bs->secondary_bin_count
- * dma_bs->secondary_bin_size;
- const unsigned agp_size = (dma_bs->agp_size << 20);
- struct drm_buf_desc req;
- struct drm_agp_mode mode;
- struct drm_agp_info info;
- struct drm_agp_buffer agp_req;
- struct drm_agp_binding bind_req;
-
- /* Acquire AGP. */
- err = drm_legacy_agp_acquire(dev);
- if (err) {
- DRM_ERROR("Unable to acquire AGP: %d\n", err);
- return err;
- }
-
- err = drm_legacy_agp_info(dev, &info);
- if (err) {
- DRM_ERROR("Unable to get AGP info: %d\n", err);
- return err;
- }
-
- mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
- err = drm_legacy_agp_enable(dev, mode);
- if (err) {
- DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
- return err;
- }
-
- /* In addition to the usual AGP mode configuration, the G200 AGP cards
- * need to have the AGP mode "manually" set.
- */
-
- if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
- if (mode.mode & 0x02)
- MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
- else
- MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
- }
-
- /* Allocate and bind AGP memory. */
- agp_req.size = agp_size;
- agp_req.type = 0;
- err = drm_legacy_agp_alloc(dev, &agp_req);
- if (err) {
- dev_priv->agp_size = 0;
- DRM_ERROR("Unable to allocate %uMB AGP memory\n",
- dma_bs->agp_size);
- return err;
- }
-
- dev_priv->agp_size = agp_size;
- dev_priv->agp_handle = agp_req.handle;
-
- bind_req.handle = agp_req.handle;
- bind_req.offset = 0;
- err = drm_legacy_agp_bind(dev, &bind_req);
- if (err) {
- DRM_ERROR("Unable to bind AGP memory: %d\n", err);
- return err;
- }
-
- /* Make drm_legacy_addbufs happy by not trying to create a mapping for
- * less than a page.
- */
- if (warp_size < PAGE_SIZE)
- warp_size = PAGE_SIZE;
-
- offset = 0;
- err = drm_legacy_addmap(dev, offset, warp_size,
- _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
- if (err) {
- DRM_ERROR("Unable to map WARP microcode: %d\n", err);
- return err;
- }
-
- offset += warp_size;
- err = drm_legacy_addmap(dev, offset, dma_bs->primary_size,
- _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
- if (err) {
- DRM_ERROR("Unable to map primary DMA region: %d\n", err);
- return err;
- }
-
- offset += dma_bs->primary_size;
- err = drm_legacy_addmap(dev, offset, secondary_size,
- _DRM_AGP, 0, &dev->agp_buffer_map);
- if (err) {
- DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
- return err;
- }
-
- (void)memset(&req, 0, sizeof(req));
- req.count = dma_bs->secondary_bin_count;
- req.size = dma_bs->secondary_bin_size;
- req.flags = _DRM_AGP_BUFFER;
- req.agp_start = offset;
-
- err = drm_legacy_addbufs_agp(dev, &req);
- if (err) {
- DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
- return err;
- }
-
- {
- struct drm_map_list *_entry;
- unsigned long agp_token = 0;
-
- list_for_each_entry(_entry, &dev->maplist, head) {
- if (_entry->map == dev->agp_buffer_map)
- agp_token = _entry->user_token;
- }
- if (!agp_token)
- return -EFAULT;
-
- dev->agp_buffer_token = agp_token;
- }
-
- offset += secondary_size;
- err = drm_legacy_addmap(dev, offset, agp_size - offset,
- _DRM_AGP, 0, &dev_priv->agp_textures);
- if (err) {
- DRM_ERROR("Unable to map AGP texture region %d\n", err);
- return err;
- }
-
- drm_legacy_ioremap(dev_priv->warp, dev);
- drm_legacy_ioremap(dev_priv->primary, dev);
- drm_legacy_ioremap(dev->agp_buffer_map, dev);
-
- if (!dev_priv->warp->handle ||
- !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
- DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
- dev_priv->warp->handle, dev_priv->primary->handle,
- dev->agp_buffer_map->handle);
- return -ENOMEM;
- }
-
- dev_priv->dma_access = MGA_PAGPXFER;
- dev_priv->wagp_enable = MGA_WAGP_ENABLE;
-
- DRM_INFO("Initialized card for AGP DMA.\n");
- return 0;
-}
-#else
-static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
- drm_mga_dma_bootstrap_t *dma_bs)
-{
- return -EINVAL;
-}
-#endif
-
-/*
- * Bootstrap the driver for PCI DMA.
- *
- * \todo
- * The algorithm for decreasing the size of the primary DMA buffer could be
- * better. The size should be rounded up to the nearest page size, then
- * decrease the request size by a single page each pass through the loop.
- *
- * \todo
- * Determine whether the maximum address passed to drm_pci_alloc is correct.
- * The same goes for drm_legacy_addbufs_pci.
- *
- * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
- */
-static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
- drm_mga_dma_bootstrap_t *dma_bs)
-{
- drm_mga_private_t *const dev_priv =
- (drm_mga_private_t *) dev->dev_private;
- unsigned int warp_size = MGA_WARP_UCODE_SIZE;
- unsigned int primary_size;
- unsigned int bin_count;
- int err;
- struct drm_buf_desc req;
-
- if (dev->dma == NULL) {
- DRM_ERROR("dev->dma is NULL\n");
- return -EFAULT;
- }
-
- /* Make drm_legacy_addbufs happy by not trying to create a mapping for
- * less than a page.
- */
- if (warp_size < PAGE_SIZE)
- warp_size = PAGE_SIZE;
-
- /* The proper alignment is 0x100 for this mapping */
- err = drm_legacy_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
- _DRM_READ_ONLY, &dev_priv->warp);
- if (err != 0) {
- DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
- err);
- return err;
- }
-
- /* Other than the bottom two bits being used to encode other
- * information, there don't appear to be any restrictions on the
- * alignment of the primary or secondary DMA buffers.
- */
-
- for (primary_size = dma_bs->primary_size; primary_size != 0;
- primary_size >>= 1) {
- /* The proper alignment for this mapping is 0x04 */
- err = drm_legacy_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
- _DRM_READ_ONLY, &dev_priv->primary);
- if (!err)
- break;
- }
-
- if (err != 0) {
- DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
- return -ENOMEM;
- }
-
- if (dev_priv->primary->size != dma_bs->primary_size) {
- DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
- dma_bs->primary_size,
- (unsigned)dev_priv->primary->size);
- dma_bs->primary_size = dev_priv->primary->size;
- }
-
- for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
- bin_count--) {
- (void)memset(&req, 0, sizeof(req));
- req.count = bin_count;
- req.size = dma_bs->secondary_bin_size;
-
- err = drm_legacy_addbufs_pci(dev, &req);
- if (!err)
- break;
- }
-
- if (bin_count == 0) {
- DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
- return err;
- }
-
- if (bin_count != dma_bs->secondary_bin_count) {
- DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
- "to %u.\n", dma_bs->secondary_bin_count, bin_count);
-
- dma_bs->secondary_bin_count = bin_count;
- }
-
- dev_priv->dma_access = 0;
- dev_priv->wagp_enable = 0;
-
- dma_bs->agp_mode = 0;
-
- DRM_INFO("Initialized card for PCI DMA.\n");
- return 0;
-}
-
-static int mga_do_dma_bootstrap(struct drm_device *dev,
- drm_mga_dma_bootstrap_t *dma_bs)
-{
- const int is_agp = (dma_bs->agp_mode != 0) && dev->agp;
- int err;
- drm_mga_private_t *const dev_priv =
- (drm_mga_private_t *) dev->dev_private;
-
- dev_priv->used_new_dma_init = 1;
-
- /* The first steps are the same for both PCI and AGP based DMA. Map
- * the cards MMIO registers and map a status page.
- */
- err = drm_legacy_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
- _DRM_REGISTERS, _DRM_READ_ONLY,
- &dev_priv->mmio);
- if (err) {
- DRM_ERROR("Unable to map MMIO region: %d\n", err);
- return err;
- }
-
- err = drm_legacy_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
- _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
- &dev_priv->status);
- if (err) {
- DRM_ERROR("Unable to map status region: %d\n", err);
- return err;
- }
-
- /* The DMA initialization procedure is slightly different for PCI and
- * AGP cards. AGP cards just allocate a large block of AGP memory and
- * carve off portions of it for internal uses. The remaining memory
- * is returned to user-mode to be used for AGP textures.
- */
- if (is_agp)
- err = mga_do_agp_dma_bootstrap(dev, dma_bs);
-
- /* If we attempted to initialize the card for AGP DMA but failed,
- * clean-up any mess that may have been created.
- */
-
- if (err)
- mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
-
- /* Not only do we want to try and initialized PCI cards for PCI DMA,
- * but we also try to initialized AGP cards that could not be
- * initialized for AGP DMA. This covers the case where we have an AGP
- * card in a system with an unsupported AGP chipset. In that case the
- * card will be detected as AGP, but we won't be able to allocate any
- * AGP memory, etc.
- */
-
- if (!is_agp || err)
- err = mga_do_pci_dma_bootstrap(dev, dma_bs);
-
- return err;
-}
-
-int mga_dma_bootstrap(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_mga_dma_bootstrap_t *bootstrap = data;
- int err;
- static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
- const drm_mga_private_t *const dev_priv =
- (drm_mga_private_t *) dev->dev_private;
-
- err = mga_do_dma_bootstrap(dev, bootstrap);
- if (err) {
- mga_do_cleanup_dma(dev, FULL_CLEANUP);
- return err;
- }
-
- if (dev_priv->agp_textures != NULL) {
- bootstrap->texture_handle = dev_priv->agp_textures->offset;
- bootstrap->texture_size = dev_priv->agp_textures->size;
- } else {
- bootstrap->texture_handle = 0;
- bootstrap->texture_size = 0;
- }
-
- bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
-
- return err;
-}
-
-static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
-{
- drm_mga_private_t *dev_priv;
- int ret;
- DRM_DEBUG("\n");
-
- dev_priv = dev->dev_private;
-
- if (init->sgram)
- dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
- else
- dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
- dev_priv->maccess = init->maccess;
-
- dev_priv->fb_cpp = init->fb_cpp;
- dev_priv->front_offset = init->front_offset;
- dev_priv->front_pitch = init->front_pitch;
- dev_priv->back_offset = init->back_offset;
- dev_priv->back_pitch = init->back_pitch;
-
- dev_priv->depth_cpp = init->depth_cpp;
- dev_priv->depth_offset = init->depth_offset;
- dev_priv->depth_pitch = init->depth_pitch;
-
- /* FIXME: Need to support AGP textures...
- */
- dev_priv->texture_offset = init->texture_offset[0];
- dev_priv->texture_size = init->texture_size[0];
-
- dev_priv->sarea = drm_legacy_getsarea(dev);
- if (!dev_priv->sarea) {
- DRM_ERROR("failed to find sarea!\n");
- return -EINVAL;
- }
-
- if (!dev_priv->used_new_dma_init) {
-
- dev_priv->dma_access = MGA_PAGPXFER;
- dev_priv->wagp_enable = MGA_WAGP_ENABLE;
-
- dev_priv->status = drm_legacy_findmap(dev, init->status_offset);
- if (!dev_priv->status) {
- DRM_ERROR("failed to find status page!\n");
- return -EINVAL;
- }
- dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio) {
- DRM_ERROR("failed to find mmio region!\n");
- return -EINVAL;
- }
- dev_priv->warp = drm_legacy_findmap(dev, init->warp_offset);
- if (!dev_priv->warp) {
- DRM_ERROR("failed to find warp microcode region!\n");
- return -EINVAL;
- }
- dev_priv->primary = drm_legacy_findmap(dev, init->primary_offset);
- if (!dev_priv->primary) {
- DRM_ERROR("failed to find primary dma region!\n");
- return -EINVAL;
- }
- dev->agp_buffer_token = init->buffers_offset;
- dev->agp_buffer_map =
- drm_legacy_findmap(dev, init->buffers_offset);
- if (!dev->agp_buffer_map) {
- DRM_ERROR("failed to find dma buffer region!\n");
- return -EINVAL;
- }
-
- drm_legacy_ioremap(dev_priv->warp, dev);
- drm_legacy_ioremap(dev_priv->primary, dev);
- drm_legacy_ioremap(dev->agp_buffer_map, dev);
- }
-
- dev_priv->sarea_priv =
- (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
- init->sarea_priv_offset);
-
- if (!dev_priv->warp->handle ||
- !dev_priv->primary->handle ||
- ((dev_priv->dma_access != 0) &&
- ((dev->agp_buffer_map == NULL) ||
- (dev->agp_buffer_map->handle == NULL)))) {
- DRM_ERROR("failed to ioremap agp regions!\n");
- return -ENOMEM;
- }
-
- ret = mga_warp_install_microcode(dev_priv);
- if (ret < 0) {
- DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
- return ret;
- }
-
- ret = mga_warp_init(dev_priv);
- if (ret < 0) {
- DRM_ERROR("failed to init WARP engine!: %d\n", ret);
- return ret;
- }
-
- dev_priv->prim.status = (u32 *) dev_priv->status->handle;
-
- mga_do_wait_for_idle(dev_priv);
-
- /* Init the primary DMA registers.
- */
- MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
-#if 0
- MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 | /* Soft trap, SECEND, SETUPEND */
- MGA_PRIMPTREN1); /* DWGSYNC */
-#endif
-
- dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
- dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
- + dev_priv->primary->size);
- dev_priv->prim.size = dev_priv->primary->size;
-
- dev_priv->prim.tail = 0;
- dev_priv->prim.space = dev_priv->prim.size;
- dev_priv->prim.wrapped = 0;
-
- dev_priv->prim.last_flush = 0;
- dev_priv->prim.last_wrap = 0;
-
- dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
-
- dev_priv->prim.status[0] = dev_priv->primary->offset;
- dev_priv->prim.status[1] = 0;
-
- dev_priv->sarea_priv->last_wrap = 0;
- dev_priv->sarea_priv->last_frame.head = 0;
- dev_priv->sarea_priv->last_frame.wrap = 0;
-
- if (mga_freelist_init(dev, dev_priv) < 0) {
- DRM_ERROR("could not initialize freelist\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
-{
- int err = 0;
- DRM_DEBUG("\n");
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (dev->irq_enabled)
- drm_legacy_irq_uninstall(dev);
-
- if (dev->dev_private) {
- drm_mga_private_t *dev_priv = dev->dev_private;
-
- if ((dev_priv->warp != NULL)
- && (dev_priv->warp->type != _DRM_CONSISTENT))
- drm_legacy_ioremapfree(dev_priv->warp, dev);
-
- if ((dev_priv->primary != NULL)
- && (dev_priv->primary->type != _DRM_CONSISTENT))
- drm_legacy_ioremapfree(dev_priv->primary, dev);
-
- if (dev->agp_buffer_map != NULL)
- drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
-
- if (dev_priv->used_new_dma_init) {
-#if IS_ENABLED(CONFIG_AGP)
- if (dev_priv->agp_handle != 0) {
- struct drm_agp_binding unbind_req;
- struct drm_agp_buffer free_req;
-
- unbind_req.handle = dev_priv->agp_handle;
- drm_legacy_agp_unbind(dev, &unbind_req);
-
- free_req.handle = dev_priv->agp_handle;
- drm_legacy_agp_free(dev, &free_req);
-
- dev_priv->agp_textures = NULL;
- dev_priv->agp_size = 0;
- dev_priv->agp_handle = 0;
- }
-
- if ((dev->agp != NULL) && dev->agp->acquired)
- err = drm_legacy_agp_release(dev);
-#endif
- }
-
- dev_priv->warp = NULL;
- dev_priv->primary = NULL;
- dev_priv->sarea = NULL;
- dev_priv->sarea_priv = NULL;
- dev->agp_buffer_map = NULL;
-
- if (full_cleanup) {
- dev_priv->mmio = NULL;
- dev_priv->status = NULL;
- dev_priv->used_new_dma_init = 0;
- }
-
- memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
- dev_priv->warp_pipe = 0;
- memset(dev_priv->warp_pipe_phys, 0,
- sizeof(dev_priv->warp_pipe_phys));
-
- if (dev_priv->head != NULL)
- mga_freelist_cleanup(dev);
- }
-
- return err;
-}
-
-int mga_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_mga_init_t *init = data;
- int err;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- switch (init->func) {
- case MGA_INIT_DMA:
- err = mga_do_init_dma(dev, init);
- if (err)
- (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
- return err;
- case MGA_CLEANUP_DMA:
- return mga_do_cleanup_dma(dev, FULL_CLEANUP);
- }
-
- return -EINVAL;
-}
-
-/* ================================================================
- * Primary DMA stream management
- */
-
-int mga_dma_flush(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- struct drm_lock *lock = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("%s%s%s\n",
- (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
- (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
- (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
-
- WRAP_WAIT_WITH_RETURN(dev_priv);
-
- if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL))
- mga_do_dma_flush(dev_priv);
-
- if (lock->flags & _DRM_LOCK_QUIESCENT) {
-#if MGA_DMA_DEBUG
- int ret = mga_do_wait_for_idle(dev_priv);
- if (ret < 0)
- DRM_INFO("-EBUSY\n");
- return ret;
-#else
- return mga_do_wait_for_idle(dev_priv);
-#endif
- } else {
- return 0;
- }
-}
-
-int mga_dma_reset(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- return mga_do_dma_reset(dev_priv);
-}
-
-/* ================================================================
- * DMA buffer management
- */
-
-static int mga_dma_get_buffers(struct drm_device *dev,
- struct drm_file *file_priv, struct drm_dma *d)
-{
- struct drm_buf *buf;
- int i;
-
- for (i = d->granted_count; i < d->request_count; i++) {
- buf = mga_freelist_get(dev);
- if (!buf)
- return -EAGAIN;
-
- buf->file_priv = file_priv;
-
- if (copy_to_user(&d->request_indices[i],
- &buf->idx, sizeof(buf->idx)))
- return -EFAULT;
- if (copy_to_user(&d->request_sizes[i],
- &buf->total, sizeof(buf->total)))
- return -EFAULT;
-
- d->granted_count++;
- }
- return 0;
-}
-
-int mga_dma_buffers(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- struct drm_dma *d = data;
- int ret = 0;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* Please don't send us buffers.
- */
- if (d->send_count != 0) {
- DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- task_pid_nr(current), d->send_count);
- return -EINVAL;
- }
-
- /* We'll send you buffers.
- */
- if (d->request_count < 0 || d->request_count > dma->buf_count) {
- DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- task_pid_nr(current), d->request_count,
- dma->buf_count);
- return -EINVAL;
- }
-
- WRAP_TEST_WITH_RETURN(dev_priv);
-
- d->granted_count = 0;
-
- if (d->request_count)
- ret = mga_dma_get_buffers(dev, file_priv, d);
-
- return ret;
-}
-
-/*
- * Called just before the module is unloaded.
- */
-void mga_driver_unload(struct drm_device *dev)
-{
- kfree(dev->dev_private);
- dev->dev_private = NULL;
-}
-
-/*
- * Called when the last opener of the device is closed.
- */
-void mga_driver_lastclose(struct drm_device *dev)
-{
- mga_do_cleanup_dma(dev, FULL_CLEANUP);
-}
-
-int mga_driver_dma_quiescent(struct drm_device *dev)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- return mga_do_wait_for_idle(dev_priv);
-}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
deleted file mode 100644
index 71128e6f6ae9..000000000000
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#include <linux/module.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_pciids.h>
-
-#include "mga_drv.h"
-
-static struct pci_device_id pciidlist[] = {
- mga_PCI_IDS
-};
-
-static const struct file_operations mga_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_legacy_mmap,
- .poll = drm_poll,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mga_compat_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
-static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_LEGACY |
- DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ,
- .dev_priv_size = sizeof(drm_mga_buf_priv_t),
- .load = mga_driver_load,
- .unload = mga_driver_unload,
- .lastclose = mga_driver_lastclose,
- .dma_quiescent = mga_driver_dma_quiescent,
- .get_vblank_counter = mga_get_vblank_counter,
- .enable_vblank = mga_enable_vblank,
- .disable_vblank = mga_disable_vblank,
- .irq_preinstall = mga_driver_irq_preinstall,
- .irq_postinstall = mga_driver_irq_postinstall,
- .irq_uninstall = mga_driver_irq_uninstall,
- .irq_handler = mga_driver_irq_handler,
- .ioctls = mga_ioctls,
- .dma_ioctl = mga_dma_buffers,
- .fops = &mga_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static struct pci_driver mga_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
-};
-
-static int __init mga_init(void)
-{
- driver.num_ioctls = mga_max_ioctl;
- return drm_legacy_pci_init(&driver, &mga_pci_driver);
-}
-
-static void __exit mga_exit(void)
-{
- drm_legacy_pci_exit(&driver, &mga_pci_driver);
-}
-
-module_init(mga_init);
-module_exit(mga_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
deleted file mode 100644
index f61401c70b90..000000000000
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ /dev/null
@@ -1,685 +0,0 @@
-/* mga_drv.h -- Private header for the Matrox G200/G400 driver -*- linux-c -*-
- * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#ifndef __MGA_DRV_H__
-#define __MGA_DRV_H__
-
-#include <linux/irqreturn.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_print.h>
-#include <drm/drm_sarea.h>
-#include <drm/drm_vblank.h>
-#include <drm/mga_drm.h>
-
-/* General customization:
- */
-
-#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
-
-#define DRIVER_NAME "mga"
-#define DRIVER_DESC "Matrox G200/G400"
-#define DRIVER_DATE "20051102"
-
-#define DRIVER_MAJOR 3
-#define DRIVER_MINOR 2
-#define DRIVER_PATCHLEVEL 1
-
-typedef struct drm_mga_primary_buffer {
- u8 *start;
- u8 *end;
- int size;
-
- u32 tail;
- int space;
- volatile long wrapped;
-
- volatile u32 *status;
-
- u32 last_flush;
- u32 last_wrap;
-
- u32 high_mark;
-} drm_mga_primary_buffer_t;
-
-typedef struct drm_mga_freelist {
- struct drm_mga_freelist *next;
- struct drm_mga_freelist *prev;
- drm_mga_age_t age;
- struct drm_buf *buf;
-} drm_mga_freelist_t;
-
-typedef struct {
- drm_mga_freelist_t *list_entry;
- int discard;
- int dispatched;
-} drm_mga_buf_priv_t;
-
-typedef struct drm_mga_private {
- drm_mga_primary_buffer_t prim;
- drm_mga_sarea_t *sarea_priv;
-
- drm_mga_freelist_t *head;
- drm_mga_freelist_t *tail;
-
- unsigned int warp_pipe;
- unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES];
-
- int chipset;
- int usec_timeout;
-
- /**
- * If set, the new DMA initialization sequence was used. This is
- * primarilly used to select how the driver should uninitialized its
- * internal DMA structures.
- */
- int used_new_dma_init;
-
- /**
- * If AGP memory is used for DMA buffers, this will be the value
- * \c MGA_PAGPXFER. Otherwise, it will be zero (for a PCI transfer).
- */
- u32 dma_access;
-
- /**
- * If AGP memory is used for DMA buffers, this will be the value
- * \c MGA_WAGP_ENABLE. Otherwise, it will be zero (for a PCI
- * transfer).
- */
- u32 wagp_enable;
-
- /**
- * \name MMIO region parameters.
- *
- * \sa drm_mga_private_t::mmio
- */
- /*@{ */
- resource_size_t mmio_base; /**< Bus address of base of MMIO. */
- resource_size_t mmio_size; /**< Size of the MMIO region. */
- /*@} */
-
- u32 clear_cmd;
- u32 maccess;
-
- atomic_t vbl_received; /**< Number of vblanks received. */
- wait_queue_head_t fence_queue;
- atomic_t last_fence_retired;
- u32 next_fence_to_post;
-
- unsigned int fb_cpp;
- unsigned int front_offset;
- unsigned int front_pitch;
- unsigned int back_offset;
- unsigned int back_pitch;
-
- unsigned int depth_cpp;
- unsigned int depth_offset;
- unsigned int depth_pitch;
-
- unsigned int texture_offset;
- unsigned int texture_size;
-
- drm_local_map_t *sarea;
- drm_local_map_t *mmio;
- drm_local_map_t *status;
- drm_local_map_t *warp;
- drm_local_map_t *primary;
- drm_local_map_t *agp_textures;
-
- unsigned long agp_handle;
- unsigned int agp_size;
-} drm_mga_private_t;
-
-extern const struct drm_ioctl_desc mga_ioctls[];
-extern int mga_max_ioctl;
-
- /* mga_dma.c */
-extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int mga_dma_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int mga_getparam(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int mga_dma_flush(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int mga_dma_reset(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int mga_dma_buffers(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
-extern void mga_driver_unload(struct drm_device *dev);
-extern void mga_driver_lastclose(struct drm_device *dev);
-extern int mga_driver_dma_quiescent(struct drm_device *dev);
-
-extern int mga_do_wait_for_idle(drm_mga_private_t *dev_priv);
-
-extern void mga_do_dma_flush(drm_mga_private_t *dev_priv);
-extern void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv);
-extern void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv);
-
-extern int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf);
-
- /* mga_warp.c */
-extern int mga_warp_install_microcode(drm_mga_private_t *dev_priv);
-extern int mga_warp_init(drm_mga_private_t *dev_priv);
-
- /* mga_irq.c */
-extern int mga_enable_vblank(struct drm_device *dev, unsigned int pipe);
-extern void mga_disable_vblank(struct drm_device *dev, unsigned int pipe);
-extern u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
-extern void mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
-extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern irqreturn_t mga_driver_irq_handler(int irq, void *arg);
-extern void mga_driver_irq_preinstall(struct drm_device *dev);
-extern int mga_driver_irq_postinstall(struct drm_device *dev);
-extern void mga_driver_irq_uninstall(struct drm_device *dev);
-extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
-
-#define mga_flush_write_combine() wmb()
-
-#define MGA_READ8(reg) \
- readb(((void __iomem *)dev_priv->mmio->handle) + (reg))
-#define MGA_READ(reg) \
- readl(((void __iomem *)dev_priv->mmio->handle) + (reg))
-#define MGA_WRITE8(reg, val) \
- writeb(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
-#define MGA_WRITE(reg, val) \
- writel(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
-
-#define DWGREG0 0x1c00
-#define DWGREG0_END 0x1dff
-#define DWGREG1 0x2c00
-#define DWGREG1_END 0x2dff
-
-#define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END)
-#define DMAREG0(r) (u8)((r - DWGREG0) >> 2)
-#define DMAREG1(r) (u8)(((r - DWGREG1) >> 2) | 0x80)
-#define DMAREG(r) (ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
-
-/* ================================================================
- * Helper macross...
- */
-
-#define MGA_EMIT_STATE(dev_priv, dirty) \
-do { \
- if ((dirty) & ~MGA_UPLOAD_CLIPRECTS) { \
- if (dev_priv->chipset >= MGA_CARD_TYPE_G400) \
- mga_g400_emit_state(dev_priv); \
- else \
- mga_g200_emit_state(dev_priv); \
- } \
-} while (0)
-
-#define WRAP_TEST_WITH_RETURN(dev_priv) \
-do { \
- if (test_bit(0, &dev_priv->prim.wrapped)) { \
- if (mga_is_idle(dev_priv)) { \
- mga_do_dma_wrap_end(dev_priv); \
- } else if (dev_priv->prim.space < \
- dev_priv->prim.high_mark) { \
- if (MGA_DMA_DEBUG) \
- DRM_INFO("wrap...\n"); \
- return -EBUSY; \
- } \
- } \
-} while (0)
-
-#define WRAP_WAIT_WITH_RETURN(dev_priv) \
-do { \
- if (test_bit(0, &dev_priv->prim.wrapped)) { \
- if (mga_do_wait_for_idle(dev_priv) < 0) { \
- if (MGA_DMA_DEBUG) \
- DRM_INFO("wrap...\n"); \
- return -EBUSY; \
- } \
- mga_do_dma_wrap_end(dev_priv); \
- } \
-} while (0)
-
-/* ================================================================
- * Primary DMA command stream
- */
-
-#define MGA_VERBOSE 0
-
-#define DMA_LOCALS unsigned int write; volatile u8 *prim;
-
-#define DMA_BLOCK_SIZE (5 * sizeof(u32))
-
-#define BEGIN_DMA(n) \
-do { \
- if (MGA_VERBOSE) { \
- DRM_INFO("BEGIN_DMA(%d)\n", (n)); \
- DRM_INFO(" space=0x%x req=0x%zx\n", \
- dev_priv->prim.space, (n) * DMA_BLOCK_SIZE); \
- } \
- prim = dev_priv->prim.start; \
- write = dev_priv->prim.tail; \
-} while (0)
-
-#define BEGIN_DMA_WRAP() \
-do { \
- if (MGA_VERBOSE) { \
- DRM_INFO("BEGIN_DMA()\n"); \
- DRM_INFO(" space=0x%x\n", dev_priv->prim.space); \
- } \
- prim = dev_priv->prim.start; \
- write = dev_priv->prim.tail; \
-} while (0)
-
-#define ADVANCE_DMA() \
-do { \
- dev_priv->prim.tail = write; \
- if (MGA_VERBOSE) \
- DRM_INFO("ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
- write, dev_priv->prim.space); \
-} while (0)
-
-#define FLUSH_DMA() \
-do { \
- if (0) { \
- DRM_INFO("\n"); \
- DRM_INFO(" tail=0x%06x head=0x%06lx\n", \
- dev_priv->prim.tail, \
- (unsigned long)(MGA_READ(MGA_PRIMADDRESS) - \
- dev_priv->primary->offset)); \
- } \
- if (!test_bit(0, &dev_priv->prim.wrapped)) { \
- if (dev_priv->prim.space < dev_priv->prim.high_mark) \
- mga_do_dma_wrap_start(dev_priv); \
- else \
- mga_do_dma_flush(dev_priv); \
- } \
-} while (0)
-
-/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
- */
-#define DMA_WRITE(offset, val) \
-do { \
- if (MGA_VERBOSE) \
- DRM_INFO(" DMA_WRITE( 0x%08x ) at 0x%04zx\n", \
- (u32)(val), write + (offset) * sizeof(u32)); \
- *(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
-} while (0)
-
-#define DMA_BLOCK(reg0, val0, reg1, val1, reg2, val2, reg3, val3) \
-do { \
- DMA_WRITE(0, ((DMAREG(reg0) << 0) | \
- (DMAREG(reg1) << 8) | \
- (DMAREG(reg2) << 16) | \
- (DMAREG(reg3) << 24))); \
- DMA_WRITE(1, val0); \
- DMA_WRITE(2, val1); \
- DMA_WRITE(3, val2); \
- DMA_WRITE(4, val3); \
- write += DMA_BLOCK_SIZE; \
-} while (0)
-
-/* Buffer aging via primary DMA stream head pointer.
- */
-
-#define SET_AGE(age, h, w) \
-do { \
- (age)->head = h; \
- (age)->wrap = w; \
-} while (0)
-
-#define TEST_AGE(age, h, w) ((age)->wrap < w || \
- ((age)->wrap == w && \
- (age)->head < h))
-
-#define AGE_BUFFER(buf_priv) \
-do { \
- drm_mga_freelist_t *entry = (buf_priv)->list_entry; \
- if ((buf_priv)->dispatched) { \
- entry->age.head = (dev_priv->prim.tail + \
- dev_priv->primary->offset); \
- entry->age.wrap = dev_priv->sarea_priv->last_wrap; \
- } else { \
- entry->age.head = 0; \
- entry->age.wrap = 0; \
- } \
-} while (0)
-
-#define MGA_ENGINE_IDLE_MASK (MGA_SOFTRAPEN | \
- MGA_DWGENGSTS | \
- MGA_ENDPRDMASTS)
-#define MGA_DMA_IDLE_MASK (MGA_SOFTRAPEN | \
- MGA_ENDPRDMASTS)
-
-#define MGA_DMA_DEBUG 0
-
-/* A reduced set of the mga registers.
- */
-#define MGA_CRTC_INDEX 0x1fd4
-#define MGA_CRTC_DATA 0x1fd5
-
-/* CRTC11 */
-#define MGA_VINTCLR (1 << 4)
-#define MGA_VINTEN (1 << 5)
-
-#define MGA_ALPHACTRL 0x2c7c
-#define MGA_AR0 0x1c60
-#define MGA_AR1 0x1c64
-#define MGA_AR2 0x1c68
-#define MGA_AR3 0x1c6c
-#define MGA_AR4 0x1c70
-#define MGA_AR5 0x1c74
-#define MGA_AR6 0x1c78
-
-#define MGA_CXBNDRY 0x1c80
-#define MGA_CXLEFT 0x1ca0
-#define MGA_CXRIGHT 0x1ca4
-
-#define MGA_DMAPAD 0x1c54
-#define MGA_DSTORG 0x2cb8
-#define MGA_DWGCTL 0x1c00
-# define MGA_OPCOD_MASK (15 << 0)
-# define MGA_OPCOD_TRAP (4 << 0)
-# define MGA_OPCOD_TEXTURE_TRAP (6 << 0)
-# define MGA_OPCOD_BITBLT (8 << 0)
-# define MGA_OPCOD_ILOAD (9 << 0)
-# define MGA_ATYPE_MASK (7 << 4)
-# define MGA_ATYPE_RPL (0 << 4)
-# define MGA_ATYPE_RSTR (1 << 4)
-# define MGA_ATYPE_ZI (3 << 4)
-# define MGA_ATYPE_BLK (4 << 4)
-# define MGA_ATYPE_I (7 << 4)
-# define MGA_LINEAR (1 << 7)
-# define MGA_ZMODE_MASK (7 << 8)
-# define MGA_ZMODE_NOZCMP (0 << 8)
-# define MGA_ZMODE_ZE (2 << 8)
-# define MGA_ZMODE_ZNE (3 << 8)
-# define MGA_ZMODE_ZLT (4 << 8)
-# define MGA_ZMODE_ZLTE (5 << 8)
-# define MGA_ZMODE_ZGT (6 << 8)
-# define MGA_ZMODE_ZGTE (7 << 8)
-# define MGA_SOLID (1 << 11)
-# define MGA_ARZERO (1 << 12)
-# define MGA_SGNZERO (1 << 13)
-# define MGA_SHIFTZERO (1 << 14)
-# define MGA_BOP_MASK (15 << 16)
-# define MGA_BOP_ZERO (0 << 16)
-# define MGA_BOP_DST (10 << 16)
-# define MGA_BOP_SRC (12 << 16)
-# define MGA_BOP_ONE (15 << 16)
-# define MGA_TRANS_SHIFT 20
-# define MGA_TRANS_MASK (15 << 20)
-# define MGA_BLTMOD_MASK (15 << 25)
-# define MGA_BLTMOD_BMONOLEF (0 << 25)
-# define MGA_BLTMOD_BMONOWF (4 << 25)
-# define MGA_BLTMOD_PLAN (1 << 25)
-# define MGA_BLTMOD_BFCOL (2 << 25)
-# define MGA_BLTMOD_BU32BGR (3 << 25)
-# define MGA_BLTMOD_BU32RGB (7 << 25)
-# define MGA_BLTMOD_BU24BGR (11 << 25)
-# define MGA_BLTMOD_BU24RGB (15 << 25)
-# define MGA_PATTERN (1 << 29)
-# define MGA_TRANSC (1 << 30)
-# define MGA_CLIPDIS (1 << 31)
-#define MGA_DWGSYNC 0x2c4c
-
-#define MGA_FCOL 0x1c24
-#define MGA_FIFOSTATUS 0x1e10
-#define MGA_FOGCOL 0x1cf4
-#define MGA_FXBNDRY 0x1c84
-#define MGA_FXLEFT 0x1ca8
-#define MGA_FXRIGHT 0x1cac
-
-#define MGA_ICLEAR 0x1e18
-# define MGA_SOFTRAPICLR (1 << 0)
-# define MGA_VLINEICLR (1 << 5)
-#define MGA_IEN 0x1e1c
-# define MGA_SOFTRAPIEN (1 << 0)
-# define MGA_VLINEIEN (1 << 5)
-
-#define MGA_LEN 0x1c5c
-
-#define MGA_MACCESS 0x1c04
-
-#define MGA_PITCH 0x1c8c
-#define MGA_PLNWT 0x1c1c
-#define MGA_PRIMADDRESS 0x1e58
-# define MGA_DMA_GENERAL (0 << 0)
-# define MGA_DMA_BLIT (1 << 0)
-# define MGA_DMA_VECTOR (2 << 0)
-# define MGA_DMA_VERTEX (3 << 0)
-#define MGA_PRIMEND 0x1e5c
-# define MGA_PRIMNOSTART (1 << 0)
-# define MGA_PAGPXFER (1 << 1)
-#define MGA_PRIMPTR 0x1e50
-# define MGA_PRIMPTREN0 (1 << 0)
-# define MGA_PRIMPTREN1 (1 << 1)
-
-#define MGA_RST 0x1e40
-# define MGA_SOFTRESET (1 << 0)
-# define MGA_SOFTEXTRST (1 << 1)
-
-#define MGA_SECADDRESS 0x2c40
-#define MGA_SECEND 0x2c44
-#define MGA_SETUPADDRESS 0x2cd0
-#define MGA_SETUPEND 0x2cd4
-#define MGA_SGN 0x1c58
-#define MGA_SOFTRAP 0x2c48
-#define MGA_SRCORG 0x2cb4
-# define MGA_SRMMAP_MASK (1 << 0)
-# define MGA_SRCMAP_FB (0 << 0)
-# define MGA_SRCMAP_SYSMEM (1 << 0)
-# define MGA_SRCACC_MASK (1 << 1)
-# define MGA_SRCACC_PCI (0 << 1)
-# define MGA_SRCACC_AGP (1 << 1)
-#define MGA_STATUS 0x1e14
-# define MGA_SOFTRAPEN (1 << 0)
-# define MGA_VSYNCPEN (1 << 4)
-# define MGA_VLINEPEN (1 << 5)
-# define MGA_DWGENGSTS (1 << 16)
-# define MGA_ENDPRDMASTS (1 << 17)
-#define MGA_STENCIL 0x2cc8
-#define MGA_STENCILCTL 0x2ccc
-
-#define MGA_TDUALSTAGE0 0x2cf8
-#define MGA_TDUALSTAGE1 0x2cfc
-#define MGA_TEXBORDERCOL 0x2c5c
-#define MGA_TEXCTL 0x2c30
-#define MGA_TEXCTL2 0x2c3c
-# define MGA_DUALTEX (1 << 7)
-# define MGA_G400_TC2_MAGIC (1 << 15)
-# define MGA_MAP1_ENABLE (1 << 31)
-#define MGA_TEXFILTER 0x2c58
-#define MGA_TEXHEIGHT 0x2c2c
-#define MGA_TEXORG 0x2c24
-# define MGA_TEXORGMAP_MASK (1 << 0)
-# define MGA_TEXORGMAP_FB (0 << 0)
-# define MGA_TEXORGMAP_SYSMEM (1 << 0)
-# define MGA_TEXORGACC_MASK (1 << 1)
-# define MGA_TEXORGACC_PCI (0 << 1)
-# define MGA_TEXORGACC_AGP (1 << 1)
-#define MGA_TEXORG1 0x2ca4
-#define MGA_TEXORG2 0x2ca8
-#define MGA_TEXORG3 0x2cac
-#define MGA_TEXORG4 0x2cb0
-#define MGA_TEXTRANS 0x2c34
-#define MGA_TEXTRANSHIGH 0x2c38
-#define MGA_TEXWIDTH 0x2c28
-
-#define MGA_WACCEPTSEQ 0x1dd4
-#define MGA_WCODEADDR 0x1e6c
-#define MGA_WFLAG 0x1dc4
-#define MGA_WFLAG1 0x1de0
-#define MGA_WFLAGNB 0x1e64
-#define MGA_WFLAGNB1 0x1e08
-#define MGA_WGETMSB 0x1dc8
-#define MGA_WIADDR 0x1dc0
-#define MGA_WIADDR2 0x1dd8
-# define MGA_WMODE_SUSPEND (0 << 0)
-# define MGA_WMODE_RESUME (1 << 0)
-# define MGA_WMODE_JUMP (2 << 0)
-# define MGA_WMODE_START (3 << 0)
-# define MGA_WAGP_ENABLE (1 << 2)
-#define MGA_WMISC 0x1e70
-# define MGA_WUCODECACHE_ENABLE (1 << 0)
-# define MGA_WMASTER_ENABLE (1 << 1)
-# define MGA_WCACHEFLUSH_ENABLE (1 << 3)
-#define MGA_WVRTXSZ 0x1dcc
-
-#define MGA_YBOT 0x1c9c
-#define MGA_YDST 0x1c90
-#define MGA_YDSTLEN 0x1c88
-#define MGA_YDSTORG 0x1c94
-#define MGA_YTOP 0x1c98
-
-#define MGA_ZORG 0x1c0c
-
-/* This finishes the current batch of commands
- */
-#define MGA_EXEC 0x0100
-
-/* AGP PLL encoding (for G200 only).
- */
-#define MGA_AGP_PLL 0x1e4c
-# define MGA_AGP2XPLL_DISABLE (0 << 0)
-# define MGA_AGP2XPLL_ENABLE (1 << 0)
-
-/* Warp registers
- */
-#define MGA_WR0 0x2d00
-#define MGA_WR1 0x2d04
-#define MGA_WR2 0x2d08
-#define MGA_WR3 0x2d0c
-#define MGA_WR4 0x2d10
-#define MGA_WR5 0x2d14
-#define MGA_WR6 0x2d18
-#define MGA_WR7 0x2d1c
-#define MGA_WR8 0x2d20
-#define MGA_WR9 0x2d24
-#define MGA_WR10 0x2d28
-#define MGA_WR11 0x2d2c
-#define MGA_WR12 0x2d30
-#define MGA_WR13 0x2d34
-#define MGA_WR14 0x2d38
-#define MGA_WR15 0x2d3c
-#define MGA_WR16 0x2d40
-#define MGA_WR17 0x2d44
-#define MGA_WR18 0x2d48
-#define MGA_WR19 0x2d4c
-#define MGA_WR20 0x2d50
-#define MGA_WR21 0x2d54
-#define MGA_WR22 0x2d58
-#define MGA_WR23 0x2d5c
-#define MGA_WR24 0x2d60
-#define MGA_WR25 0x2d64
-#define MGA_WR26 0x2d68
-#define MGA_WR27 0x2d6c
-#define MGA_WR28 0x2d70
-#define MGA_WR29 0x2d74
-#define MGA_WR30 0x2d78
-#define MGA_WR31 0x2d7c
-#define MGA_WR32 0x2d80
-#define MGA_WR33 0x2d84
-#define MGA_WR34 0x2d88
-#define MGA_WR35 0x2d8c
-#define MGA_WR36 0x2d90
-#define MGA_WR37 0x2d94
-#define MGA_WR38 0x2d98
-#define MGA_WR39 0x2d9c
-#define MGA_WR40 0x2da0
-#define MGA_WR41 0x2da4
-#define MGA_WR42 0x2da8
-#define MGA_WR43 0x2dac
-#define MGA_WR44 0x2db0
-#define MGA_WR45 0x2db4
-#define MGA_WR46 0x2db8
-#define MGA_WR47 0x2dbc
-#define MGA_WR48 0x2dc0
-#define MGA_WR49 0x2dc4
-#define MGA_WR50 0x2dc8
-#define MGA_WR51 0x2dcc
-#define MGA_WR52 0x2dd0
-#define MGA_WR53 0x2dd4
-#define MGA_WR54 0x2dd8
-#define MGA_WR55 0x2ddc
-#define MGA_WR56 0x2de0
-#define MGA_WR57 0x2de4
-#define MGA_WR58 0x2de8
-#define MGA_WR59 0x2dec
-#define MGA_WR60 0x2df0
-#define MGA_WR61 0x2df4
-#define MGA_WR62 0x2df8
-#define MGA_WR63 0x2dfc
-# define MGA_G400_WR_MAGIC (1 << 6)
-# define MGA_G400_WR56_MAGIC 0x46480000 /* 12800.0f */
-
-#define MGA_ILOAD_ALIGN 64
-#define MGA_ILOAD_MASK (MGA_ILOAD_ALIGN - 1)
-
-#define MGA_DWGCTL_FLUSH (MGA_OPCOD_TEXTURE_TRAP | \
- MGA_ATYPE_I | \
- MGA_ZMODE_NOZCMP | \
- MGA_ARZERO | \
- MGA_SGNZERO | \
- MGA_BOP_SRC | \
- (15 << MGA_TRANS_SHIFT))
-
-#define MGA_DWGCTL_CLEAR (MGA_OPCOD_TRAP | \
- MGA_ZMODE_NOZCMP | \
- MGA_SOLID | \
- MGA_ARZERO | \
- MGA_SGNZERO | \
- MGA_SHIFTZERO | \
- MGA_BOP_SRC | \
- (0 << MGA_TRANS_SHIFT) | \
- MGA_BLTMOD_BMONOLEF | \
- MGA_TRANSC | \
- MGA_CLIPDIS)
-
-#define MGA_DWGCTL_COPY (MGA_OPCOD_BITBLT | \
- MGA_ATYPE_RPL | \
- MGA_SGNZERO | \
- MGA_SHIFTZERO | \
- MGA_BOP_SRC | \
- (0 << MGA_TRANS_SHIFT) | \
- MGA_BLTMOD_BFCOL | \
- MGA_CLIPDIS)
-
-/* Simple idle test.
- */
-static __inline__ int mga_is_idle(drm_mga_private_t *dev_priv)
-{
- u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
- return (status == MGA_ENDPRDMASTS);
-}
-
-#endif
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
deleted file mode 100644
index 894472921c30..000000000000
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * \file mga_ioc32.c
- *
- * 32-bit ioctl compatibility routines for the MGA DRM.
- *
- * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
- *
- *
- * Copyright (C) Paul Mackerras 2005
- * Copyright (C) Egbert Eich 2003,2004
- * Copyright (C) Dave Airlie 2005
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/compat.h>
-
-#include "mga_drv.h"
-
-typedef struct drm32_mga_init {
- int func;
- u32 sarea_priv_offset;
- struct_group(always32bit,
- int chipset;
- int sgram;
- unsigned int maccess;
- unsigned int fb_cpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_cpp;
- unsigned int depth_offset, depth_pitch;
- unsigned int texture_offset[MGA_NR_TEX_HEAPS];
- unsigned int texture_size[MGA_NR_TEX_HEAPS];
- );
- u32 fb_offset;
- u32 mmio_offset;
- u32 status_offset;
- u32 warp_offset;
- u32 primary_offset;
- u32 buffers_offset;
-} drm_mga_init32_t;
-
-static int compat_mga_init(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_mga_init32_t init32;
- drm_mga_init_t init;
-
- if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
- return -EFAULT;
-
- init.func = init32.func;
- init.sarea_priv_offset = init32.sarea_priv_offset;
- memcpy(&init.always32bit, &init32.always32bit,
- sizeof(init32.always32bit));
- init.fb_offset = init32.fb_offset;
- init.mmio_offset = init32.mmio_offset;
- init.status_offset = init32.status_offset;
- init.warp_offset = init32.warp_offset;
- init.primary_offset = init32.primary_offset;
- init.buffers_offset = init32.buffers_offset;
-
- return drm_ioctl_kernel(file, mga_dma_init, &init,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_mga_getparam32 {
- int param;
- u32 value;
-} drm_mga_getparam32_t;
-
-static int compat_mga_getparam(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_mga_getparam32_t getparam32;
- drm_mga_getparam_t getparam;
-
- if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
- return -EFAULT;
-
- getparam.param = getparam32.param;
- getparam.value = compat_ptr(getparam32.value);
- return drm_ioctl_kernel(file, mga_getparam, &getparam, DRM_AUTH);
-}
-
-typedef struct drm_mga_drm_bootstrap32 {
- u32 texture_handle;
- u32 texture_size;
- u32 primary_size;
- u32 secondary_bin_count;
- u32 secondary_bin_size;
- u32 agp_mode;
- u8 agp_size;
-} drm_mga_dma_bootstrap32_t;
-
-static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_mga_dma_bootstrap32_t dma_bootstrap32;
- drm_mga_dma_bootstrap_t dma_bootstrap;
- int err;
-
- if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
- sizeof(dma_bootstrap32)))
- return -EFAULT;
-
- dma_bootstrap.texture_handle = dma_bootstrap32.texture_handle;
- dma_bootstrap.texture_size = dma_bootstrap32.texture_size;
- dma_bootstrap.primary_size = dma_bootstrap32.primary_size;
- dma_bootstrap.secondary_bin_count = dma_bootstrap32.secondary_bin_count;
- dma_bootstrap.secondary_bin_size = dma_bootstrap32.secondary_bin_size;
- dma_bootstrap.agp_mode = dma_bootstrap32.agp_mode;
- dma_bootstrap.agp_size = dma_bootstrap32.agp_size;
-
- err = drm_ioctl_kernel(file, mga_dma_bootstrap, &dma_bootstrap,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
- if (err)
- return err;
-
- dma_bootstrap32.texture_handle = dma_bootstrap.texture_handle;
- dma_bootstrap32.texture_size = dma_bootstrap.texture_size;
- dma_bootstrap32.primary_size = dma_bootstrap.primary_size;
- dma_bootstrap32.secondary_bin_count = dma_bootstrap.secondary_bin_count;
- dma_bootstrap32.secondary_bin_size = dma_bootstrap.secondary_bin_size;
- dma_bootstrap32.agp_mode = dma_bootstrap.agp_mode;
- dma_bootstrap32.agp_size = dma_bootstrap.agp_size;
- if (copy_to_user((void __user *)arg, &dma_bootstrap32,
- sizeof(dma_bootstrap32)))
- return -EFAULT;
-
- return 0;
-}
-
-static struct {
- drm_ioctl_compat_t *fn;
- char *name;
-} mga_compat_ioctls[] = {
-#define DRM_IOCTL32_DEF(n, f)[DRM_##n] = {.fn = f, .name = #n}
- DRM_IOCTL32_DEF(MGA_INIT, compat_mga_init),
- DRM_IOCTL32_DEF(MGA_GETPARAM, compat_mga_getparam),
- DRM_IOCTL32_DEF(MGA_DMA_BOOTSTRAP, compat_mga_dma_bootstrap),
-};
-
-/**
- * mga_compat_ioctl - Called whenever a 32-bit process running under
- * a 64-bit kernel performs an ioctl on /dev/dri/card<n>.
- *
- * @filp: file pointer.
- * @cmd: command.
- * @arg: user argument.
- * return: zero on success or negative number on failure.
- */
-long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- unsigned int nr = DRM_IOCTL_NR(cmd);
- struct drm_file *file_priv = filp->private_data;
- drm_ioctl_compat_t *fn = NULL;
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
- if (nr >= DRM_COMMAND_BASE + ARRAY_SIZE(mga_compat_ioctls))
- return drm_ioctl(filp, cmd, arg);
-
- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE].fn;
- if (!fn)
- return drm_ioctl(filp, cmd, arg);
-
- DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
- task_pid_nr(current),
- (long)old_encode_dev(file_priv->minor->kdev->devt),
- file_priv->authenticated,
- mga_compat_ioctls[nr - DRM_COMMAND_BASE].name);
- ret = (*fn) (filp, cmd, arg);
- if (ret)
- DRM_DEBUG("ret = %d\n", ret);
- return ret;
-}
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
deleted file mode 100644
index a7e6ffc80a78..000000000000
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
- */
-/*
- * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
- *
- * The Weather Channel (TM) funded Tungsten Graphics to develop the
- * initial release of the Radeon 8500 driver under the XFree86 license.
- * This notice must be preserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Keith Whitwell <keith@tungstengraphics.com>
- * Eric Anholt <anholt@FreeBSD.org>
- */
-
-#include "mga_drv.h"
-
-u32 mga_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
- const drm_mga_private_t *const dev_priv =
- (drm_mga_private_t *) dev->dev_private;
-
- if (pipe != 0)
- return 0;
-
- return atomic_read(&dev_priv->vbl_received);
-}
-
-
-irqreturn_t mga_driver_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- int status;
- int handled = 0;
-
- status = MGA_READ(MGA_STATUS);
-
- /* VBLANK interrupt */
- if (status & MGA_VLINEPEN) {
- MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
- atomic_inc(&dev_priv->vbl_received);
- drm_handle_vblank(dev, 0);
- handled = 1;
- }
-
- /* SOFTRAP interrupt */
- if (status & MGA_SOFTRAPEN) {
- const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
- const u32 prim_end = MGA_READ(MGA_PRIMEND);
-
-
- MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
-
- /* In addition to clearing the interrupt-pending bit, we
- * have to write to MGA_PRIMEND to re-start the DMA operation.
- */
- if ((prim_start & ~0x03) != (prim_end & ~0x03))
- MGA_WRITE(MGA_PRIMEND, prim_end);
-
- atomic_inc(&dev_priv->last_fence_retired);
- wake_up(&dev_priv->fence_queue);
- handled = 1;
- }
-
- if (handled)
- return IRQ_HANDLED;
- return IRQ_NONE;
-}
-
-int mga_enable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
-
- if (pipe != 0) {
- DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
- pipe);
- return 0;
- }
-
- MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
- return 0;
-}
-
-
-void mga_disable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- if (pipe != 0) {
- DRM_ERROR("tried to disable vblank on non-existent crtc %u\n",
- pipe);
- }
-
- /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have
- * a nice hardware counter that tracks the number of refreshes when
- * the interrupt is disabled, and the kernel doesn't know the refresh
- * rate to calculate an estimate.
- */
- /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
-}
-
-void mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
-{
- drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- unsigned int cur_fence;
-
- /* Assume that the user has missed the current sequence number
- * by about a day rather than she wants to wait for years
- * using fences.
- */
- wait_event_timeout(dev_priv->fence_queue,
- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
- - *sequence) <= (1 << 23)),
- msecs_to_jiffies(3000));
-
- *sequence = cur_fence;
-}
-
-void mga_driver_irq_preinstall(struct drm_device *dev)
-{
- drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
-
- /* Disable *all* interrupts */
- MGA_WRITE(MGA_IEN, 0);
- /* Clear bits if they're already high */
- MGA_WRITE(MGA_ICLEAR, ~0);
-}
-
-int mga_driver_irq_postinstall(struct drm_device *dev)
-{
- drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
-
- init_waitqueue_head(&dev_priv->fence_queue);
-
- /* Turn on soft trap interrupt. Vertical blank interrupts are enabled
- * in mga_enable_vblank.
- */
- MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
- return 0;
-}
-
-void mga_driver_irq_uninstall(struct drm_device *dev)
-{
- drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- if (!dev_priv)
- return;
-
- /* Disable *all* interrupts */
- MGA_WRITE(MGA_IEN, 0);
-
- dev->irq_enabled = false;
-}
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
deleted file mode 100644
index 5b7247b58451..000000000000
--- a/drivers/gpu/drm/mga/mga_state.c
+++ /dev/null
@@ -1,1099 +0,0 @@
-/* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*-
- * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jeff Hartmann <jhartmann@valinux.com>
- * Keith Whitwell <keith@tungstengraphics.com>
- *
- * Rewritten by:
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#include "mga_drv.h"
-
-/* ================================================================
- * DMA hardware state programming functions
- */
-
-static void mga_emit_clip_rect(drm_mga_private_t *dev_priv,
- struct drm_clip_rect *box)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- unsigned int pitch = dev_priv->front_pitch;
- DMA_LOCALS;
-
- BEGIN_DMA(2);
-
- /* Force reset of DWGCTL on G400 (eliminates clip disable bit).
- */
- if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
- DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
- MGA_LEN + MGA_EXEC, 0x80000000,
- MGA_DWGCTL, ctx->dwgctl,
- MGA_LEN + MGA_EXEC, 0x80000000);
- }
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
- MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch);
-
- ADVANCE_DMA();
-}
-
-static __inline__ void mga_g200_emit_context(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- DMA_LOCALS;
-
- BEGIN_DMA(3);
-
- DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
- MGA_MACCESS, ctx->maccess,
- MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
-
- DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
- MGA_FOGCOL, ctx->fogcolor,
- MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
-
- DMA_BLOCK(MGA_FCOL, ctx->fcol,
- MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
-
- ADVANCE_DMA();
-}
-
-static __inline__ void mga_g400_emit_context(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- DMA_LOCALS;
-
- BEGIN_DMA(4);
-
- DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
- MGA_MACCESS, ctx->maccess,
- MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
-
- DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
- MGA_FOGCOL, ctx->fogcolor,
- MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
-
- DMA_BLOCK(MGA_WFLAG1, ctx->wflag,
- MGA_TDUALSTAGE0, ctx->tdualstage0,
- MGA_TDUALSTAGE1, ctx->tdualstage1, MGA_FCOL, ctx->fcol);
-
- DMA_BLOCK(MGA_STENCIL, ctx->stencil,
- MGA_STENCILCTL, ctx->stencilctl,
- MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
-
- ADVANCE_DMA();
-}
-
-static __inline__ void mga_g200_emit_tex0(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
- DMA_LOCALS;
-
- BEGIN_DMA(4);
-
- DMA_BLOCK(MGA_TEXCTL2, tex->texctl2,
- MGA_TEXCTL, tex->texctl,
- MGA_TEXFILTER, tex->texfilter,
- MGA_TEXBORDERCOL, tex->texbordercol);
-
- DMA_BLOCK(MGA_TEXORG, tex->texorg,
- MGA_TEXORG1, tex->texorg1,
- MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
-
- DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
- MGA_TEXWIDTH, tex->texwidth,
- MGA_TEXHEIGHT, tex->texheight, MGA_WR24, tex->texwidth);
-
- DMA_BLOCK(MGA_WR34, tex->texheight,
- MGA_TEXTRANS, 0x0000ffff,
- MGA_TEXTRANSHIGH, 0x0000ffff, MGA_DMAPAD, 0x00000000);
-
- ADVANCE_DMA();
-}
-
-static __inline__ void mga_g400_emit_tex0(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
- DMA_LOCALS;
-
-/* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
-/* tex->texctl, tex->texctl2); */
-
- BEGIN_DMA(6);
-
- DMA_BLOCK(MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC,
- MGA_TEXCTL, tex->texctl,
- MGA_TEXFILTER, tex->texfilter,
- MGA_TEXBORDERCOL, tex->texbordercol);
-
- DMA_BLOCK(MGA_TEXORG, tex->texorg,
- MGA_TEXORG1, tex->texorg1,
- MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
-
- DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
- MGA_TEXWIDTH, tex->texwidth,
- MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
-
- DMA_BLOCK(MGA_WR57, 0x00000000,
- MGA_WR53, 0x00000000,
- MGA_WR61, 0x00000000, MGA_WR52, MGA_G400_WR_MAGIC);
-
- DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC,
- MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC,
- MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC,
- MGA_DMAPAD, 0x00000000);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff);
-
- ADVANCE_DMA();
-}
-
-static __inline__ void mga_g400_emit_tex1(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
- DMA_LOCALS;
-
-/* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
-/* tex->texctl, tex->texctl2); */
-
- BEGIN_DMA(5);
-
- DMA_BLOCK(MGA_TEXCTL2, (tex->texctl2 |
- MGA_MAP1_ENABLE |
- MGA_G400_TC2_MAGIC),
- MGA_TEXCTL, tex->texctl,
- MGA_TEXFILTER, tex->texfilter,
- MGA_TEXBORDERCOL, tex->texbordercol);
-
- DMA_BLOCK(MGA_TEXORG, tex->texorg,
- MGA_TEXORG1, tex->texorg1,
- MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
-
- DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
- MGA_TEXWIDTH, tex->texwidth,
- MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
-
- DMA_BLOCK(MGA_WR57, 0x00000000,
- MGA_WR53, 0x00000000,
- MGA_WR61, 0x00000000,
- MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC);
-
- DMA_BLOCK(MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC,
- MGA_TEXTRANS, 0x0000ffff,
- MGA_TEXTRANSHIGH, 0x0000ffff,
- MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC);
-
- ADVANCE_DMA();
-}
-
-static __inline__ void mga_g200_emit_pipe(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int pipe = sarea_priv->warp_pipe;
- DMA_LOCALS;
-
- BEGIN_DMA(3);
-
- DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND,
- MGA_WVRTXSZ, 0x00000007,
- MGA_WFLAG, 0x00000000, MGA_WR24, 0x00000000);
-
- DMA_BLOCK(MGA_WR25, 0x00000100,
- MGA_WR34, 0x00000000,
- MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff);
-
- /* Padding required due to hardware bug.
- */
- DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
- MGA_DMAPAD, 0xffffffff,
- MGA_DMAPAD, 0xffffffff,
- MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
- MGA_WMODE_START | dev_priv->wagp_enable));
-
- ADVANCE_DMA();
-}
-
-static __inline__ void mga_g400_emit_pipe(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int pipe = sarea_priv->warp_pipe;
- DMA_LOCALS;
-
-/* printk("mga_g400_emit_pipe %x\n", pipe); */
-
- BEGIN_DMA(10);
-
- DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND,
- MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
-
- if (pipe & MGA_T2) {
- DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09,
- MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
-
- DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
- MGA_WACCEPTSEQ, 0x00000000,
- MGA_WACCEPTSEQ, 0x00000000,
- MGA_WACCEPTSEQ, 0x1e000000);
- } else {
- if (dev_priv->warp_pipe & MGA_T2) {
- /* Flush the WARP pipe */
- DMA_BLOCK(MGA_YDST, 0x00000000,
- MGA_FXLEFT, 0x00000000,
- MGA_FXRIGHT, 0x00000001,
- MGA_DWGCTL, MGA_DWGCTL_FLUSH);
-
- DMA_BLOCK(MGA_LEN + MGA_EXEC, 0x00000001,
- MGA_DWGSYNC, 0x00007000,
- MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
- MGA_LEN + MGA_EXEC, 0x00000000);
-
- DMA_BLOCK(MGA_TEXCTL2, (MGA_DUALTEX |
- MGA_G400_TC2_MAGIC),
- MGA_LEN + MGA_EXEC, 0x00000000,
- MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
- MGA_DMAPAD, 0x00000000);
- }
-
- DMA_BLOCK(MGA_WVRTXSZ, 0x00001807,
- MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
-
- DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
- MGA_WACCEPTSEQ, 0x00000000,
- MGA_WACCEPTSEQ, 0x00000000,
- MGA_WACCEPTSEQ, 0x18000000);
- }
-
- DMA_BLOCK(MGA_WFLAG, 0x00000000,
- MGA_WFLAG1, 0x00000000,
- MGA_WR56, MGA_G400_WR56_MAGIC, MGA_DMAPAD, 0x00000000);
-
- DMA_BLOCK(MGA_WR49, 0x00000000, /* tex0 */
- MGA_WR57, 0x00000000, /* tex0 */
- MGA_WR53, 0x00000000, /* tex1 */
- MGA_WR61, 0x00000000); /* tex1 */
-
- DMA_BLOCK(MGA_WR54, MGA_G400_WR_MAGIC, /* tex0 width */
- MGA_WR62, MGA_G400_WR_MAGIC, /* tex0 height */
- MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */
- MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */
-
- /* Padding required due to hardware bug */
- DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
- MGA_DMAPAD, 0xffffffff,
- MGA_DMAPAD, 0xffffffff,
- MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
- MGA_WMODE_START | dev_priv->wagp_enable));
-
- ADVANCE_DMA();
-}
-
-static void mga_g200_emit_state(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int dirty = sarea_priv->dirty;
-
- if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
- mga_g200_emit_pipe(dev_priv);
- dev_priv->warp_pipe = sarea_priv->warp_pipe;
- }
-
- if (dirty & MGA_UPLOAD_CONTEXT) {
- mga_g200_emit_context(dev_priv);
- sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
- }
-
- if (dirty & MGA_UPLOAD_TEX0) {
- mga_g200_emit_tex0(dev_priv);
- sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
- }
-}
-
-static void mga_g400_emit_state(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int dirty = sarea_priv->dirty;
- int multitex = sarea_priv->warp_pipe & MGA_T2;
-
- if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
- mga_g400_emit_pipe(dev_priv);
- dev_priv->warp_pipe = sarea_priv->warp_pipe;
- }
-
- if (dirty & MGA_UPLOAD_CONTEXT) {
- mga_g400_emit_context(dev_priv);
- sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
- }
-
- if (dirty & MGA_UPLOAD_TEX0) {
- mga_g400_emit_tex0(dev_priv);
- sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
- }
-
- if ((dirty & MGA_UPLOAD_TEX1) && multitex) {
- mga_g400_emit_tex1(dev_priv);
- sarea_priv->dirty &= ~MGA_UPLOAD_TEX1;
- }
-}
-
-/* ================================================================
- * SAREA state verification
- */
-
-/* Disallow all write destinations except the front and backbuffer.
- */
-static int mga_verify_context(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
-
- if (ctx->dstorg != dev_priv->front_offset &&
- ctx->dstorg != dev_priv->back_offset) {
- DRM_ERROR("*** bad DSTORG: %x (front %x, back %x)\n\n",
- ctx->dstorg, dev_priv->front_offset,
- dev_priv->back_offset);
- ctx->dstorg = 0;
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Disallow texture reads from PCI space.
- */
-static int mga_verify_tex(drm_mga_private_t *dev_priv, int unit)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
- unsigned int org;
-
- org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK);
-
- if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
- DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
- tex->texorg = 0;
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int mga_verify_state(drm_mga_private_t *dev_priv)
-{
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int dirty = sarea_priv->dirty;
- int ret = 0;
-
- if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
- sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
-
- if (dirty & MGA_UPLOAD_CONTEXT)
- ret |= mga_verify_context(dev_priv);
-
- if (dirty & MGA_UPLOAD_TEX0)
- ret |= mga_verify_tex(dev_priv, 0);
-
- if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
- if (dirty & MGA_UPLOAD_TEX1)
- ret |= mga_verify_tex(dev_priv, 1);
-
- if (dirty & MGA_UPLOAD_PIPE)
- ret |= (sarea_priv->warp_pipe > MGA_MAX_G400_PIPES);
- } else {
- if (dirty & MGA_UPLOAD_PIPE)
- ret |= (sarea_priv->warp_pipe > MGA_MAX_G200_PIPES);
- }
-
- return (ret == 0);
-}
-
-static int mga_verify_iload(drm_mga_private_t *dev_priv,
- unsigned int dstorg, unsigned int length)
-{
- if (dstorg < dev_priv->texture_offset ||
- dstorg + length > (dev_priv->texture_offset +
- dev_priv->texture_size)) {
- DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
- return -EINVAL;
- }
-
- if (length & MGA_ILOAD_MASK) {
- DRM_ERROR("*** bad iload length: 0x%x\n",
- length & MGA_ILOAD_MASK);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int mga_verify_blit(drm_mga_private_t *dev_priv,
- unsigned int srcorg, unsigned int dstorg)
-{
- if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
- (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
- DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
- return -EINVAL;
- }
- return 0;
-}
-
-/* ================================================================
- *
- */
-
-static void mga_dma_dispatch_clear(struct drm_device *dev, drm_mga_clear_t *clear)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int nbox = sarea_priv->nbox;
- int i;
- DMA_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_DMA(1);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
-
- ADVANCE_DMA();
-
- for (i = 0; i < nbox; i++) {
- struct drm_clip_rect *box = &pbox[i];
- u32 height = box->y2 - box->y1;
-
- DRM_DEBUG(" from=%d,%d to=%d,%d\n",
- box->x1, box->y1, box->x2, box->y2);
-
- if (clear->flags & MGA_FRONT) {
- BEGIN_DMA(2);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_PLNWT, clear->color_mask,
- MGA_YDSTLEN, (box->y1 << 16) | height,
- MGA_FXBNDRY, (box->x2 << 16) | box->x1);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_FCOL, clear->clear_color,
- MGA_DSTORG, dev_priv->front_offset,
- MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
-
- ADVANCE_DMA();
- }
-
- if (clear->flags & MGA_BACK) {
- BEGIN_DMA(2);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_PLNWT, clear->color_mask,
- MGA_YDSTLEN, (box->y1 << 16) | height,
- MGA_FXBNDRY, (box->x2 << 16) | box->x1);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_FCOL, clear->clear_color,
- MGA_DSTORG, dev_priv->back_offset,
- MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
-
- ADVANCE_DMA();
- }
-
- if (clear->flags & MGA_DEPTH) {
- BEGIN_DMA(2);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_PLNWT, clear->depth_mask,
- MGA_YDSTLEN, (box->y1 << 16) | height,
- MGA_FXBNDRY, (box->x2 << 16) | box->x1);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_FCOL, clear->clear_depth,
- MGA_DSTORG, dev_priv->depth_offset,
- MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
-
- ADVANCE_DMA();
- }
-
- }
-
- BEGIN_DMA(1);
-
- /* Force reset of DWGCTL */
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
-
- ADVANCE_DMA();
-
- FLUSH_DMA();
-}
-
-static void mga_dma_dispatch_swap(struct drm_device *dev)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int nbox = sarea_priv->nbox;
- int i;
- DMA_LOCALS;
- DRM_DEBUG("\n");
-
- sarea_priv->last_frame.head = dev_priv->prim.tail;
- sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap;
-
- BEGIN_DMA(4 + nbox);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
-
- DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset,
- MGA_MACCESS, dev_priv->maccess,
- MGA_SRCORG, dev_priv->back_offset,
- MGA_AR5, dev_priv->front_pitch);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_PLNWT, 0xffffffff, MGA_DWGCTL, MGA_DWGCTL_COPY);
-
- for (i = 0; i < nbox; i++) {
- struct drm_clip_rect *box = &pbox[i];
- u32 height = box->y2 - box->y1;
- u32 start = box->y1 * dev_priv->front_pitch;
-
- DRM_DEBUG(" from=%d,%d to=%d,%d\n",
- box->x1, box->y1, box->x2, box->y2);
-
- DMA_BLOCK(MGA_AR0, start + box->x2 - 1,
- MGA_AR3, start + box->x1,
- MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1,
- MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height);
- }
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_PLNWT, ctx->plnwt,
- MGA_SRCORG, dev_priv->front_offset, MGA_DWGCTL, ctx->dwgctl);
-
- ADVANCE_DMA();
-
- FLUSH_DMA();
-
- DRM_DEBUG("... done.\n");
-}
-
-static void mga_dma_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_buf_priv_t *buf_priv = buf->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- u32 address = (u32) buf->bus_address;
- u32 length = (u32) buf->used;
- int i = 0;
- DMA_LOCALS;
- DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
-
- if (buf->used) {
- buf_priv->dispatched = 1;
-
- MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
-
- do {
- if (i < sarea_priv->nbox) {
- mga_emit_clip_rect(dev_priv,
- &sarea_priv->boxes[i]);
- }
-
- BEGIN_DMA(1);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_SECADDRESS, (address |
- MGA_DMA_VERTEX),
- MGA_SECEND, ((address + length) |
- dev_priv->dma_access));
-
- ADVANCE_DMA();
- } while (++i < sarea_priv->nbox);
- }
-
- if (buf_priv->discard) {
- AGE_BUFFER(buf_priv);
- buf->pending = 0;
- buf->used = 0;
- buf_priv->dispatched = 0;
-
- mga_freelist_put(dev, buf);
- }
-
- FLUSH_DMA();
-}
-
-static void mga_dma_dispatch_indices(struct drm_device *dev, struct drm_buf *buf,
- unsigned int start, unsigned int end)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_buf_priv_t *buf_priv = buf->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- u32 address = (u32) buf->bus_address;
- int i = 0;
- DMA_LOCALS;
- DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);
-
- if (start != end) {
- buf_priv->dispatched = 1;
-
- MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
-
- do {
- if (i < sarea_priv->nbox) {
- mga_emit_clip_rect(dev_priv,
- &sarea_priv->boxes[i]);
- }
-
- BEGIN_DMA(1);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_SETUPADDRESS, address + start,
- MGA_SETUPEND, ((address + end) |
- dev_priv->dma_access));
-
- ADVANCE_DMA();
- } while (++i < sarea_priv->nbox);
- }
-
- if (buf_priv->discard) {
- AGE_BUFFER(buf_priv);
- buf->pending = 0;
- buf->used = 0;
- buf_priv->dispatched = 0;
-
- mga_freelist_put(dev, buf);
- }
-
- FLUSH_DMA();
-}
-
-/* This copies a 64 byte aligned agp region to the frambuffer with a
- * standard blit, the ioctl needs to do checking.
- */
-static void mga_dma_dispatch_iload(struct drm_device *dev, struct drm_buf *buf,
- unsigned int dstorg, unsigned int length)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_buf_priv_t *buf_priv = buf->dev_private;
- drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
- u32 srcorg =
- buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
- u32 y2;
- DMA_LOCALS;
- DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
-
- y2 = length / 64;
-
- BEGIN_DMA(5);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
-
- DMA_BLOCK(MGA_DSTORG, dstorg,
- MGA_MACCESS, 0x00000000, MGA_SRCORG, srcorg, MGA_AR5, 64);
-
- DMA_BLOCK(MGA_PITCH, 64,
- MGA_PLNWT, 0xffffffff,
- MGA_DMAPAD, 0x00000000, MGA_DWGCTL, MGA_DWGCTL_COPY);
-
- DMA_BLOCK(MGA_AR0, 63,
- MGA_AR3, 0,
- MGA_FXBNDRY, (63 << 16) | 0, MGA_YDSTLEN + MGA_EXEC, y2);
-
- DMA_BLOCK(MGA_PLNWT, ctx->plnwt,
- MGA_SRCORG, dev_priv->front_offset,
- MGA_PITCH, dev_priv->front_pitch, MGA_DWGSYNC, 0x00007000);
-
- ADVANCE_DMA();
-
- AGE_BUFFER(buf_priv);
-
- buf->pending = 0;
- buf->used = 0;
- buf_priv->dispatched = 0;
-
- mga_freelist_put(dev, buf);
-
- FLUSH_DMA();
-}
-
-static void mga_dma_dispatch_blit(struct drm_device *dev, drm_mga_blit_t *blit)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int nbox = sarea_priv->nbox;
- u32 scandir = 0, i;
- DMA_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_DMA(4 + nbox);
-
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
-
- DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY,
- MGA_PLNWT, blit->planemask,
- MGA_SRCORG, blit->srcorg, MGA_DSTORG, blit->dstorg);
-
- DMA_BLOCK(MGA_SGN, scandir,
- MGA_MACCESS, dev_priv->maccess,
- MGA_AR5, blit->ydir * blit->src_pitch,
- MGA_PITCH, blit->dst_pitch);
-
- for (i = 0; i < nbox; i++) {
- int srcx = pbox[i].x1 + blit->delta_sx;
- int srcy = pbox[i].y1 + blit->delta_sy;
- int dstx = pbox[i].x1 + blit->delta_dx;
- int dsty = pbox[i].y1 + blit->delta_dy;
- int h = pbox[i].y2 - pbox[i].y1;
- int w = pbox[i].x2 - pbox[i].x1 - 1;
- int start;
-
- if (blit->ydir == -1)
- srcy = blit->height - srcy - 1;
-
- start = srcy * blit->src_pitch + srcx;
-
- DMA_BLOCK(MGA_AR0, start + w,
- MGA_AR3, start,
- MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff),
- MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h);
- }
-
- /* Do something to flush AGP?
- */
-
- /* Force reset of DWGCTL */
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_PLNWT, ctx->plnwt,
- MGA_PITCH, dev_priv->front_pitch, MGA_DWGCTL, ctx->dwgctl);
-
- ADVANCE_DMA();
-}
-
-/* ================================================================
- *
- */
-
-static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_clear_t *clear = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
- sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
-
- WRAP_TEST_WITH_RETURN(dev_priv);
-
- mga_dma_dispatch_clear(dev, clear);
-
- /* Make sure we restore the 3D state next time.
- */
- dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
-
- return 0;
-}
-
-static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
- sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
-
- WRAP_TEST_WITH_RETURN(dev_priv);
-
- mga_dma_dispatch_swap(dev);
-
- /* Make sure we restore the 3D state next time.
- */
- dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
-
- return 0;
-}
-
-static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf *buf;
- drm_mga_buf_priv_t *buf_priv;
- drm_mga_vertex_t *vertex = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (vertex->idx < 0 || vertex->idx > dma->buf_count)
- return -EINVAL;
- buf = dma->buflist[vertex->idx];
- buf_priv = buf->dev_private;
-
- buf->used = vertex->used;
- buf_priv->discard = vertex->discard;
-
- if (!mga_verify_state(dev_priv)) {
- if (vertex->discard) {
- if (buf_priv->dispatched == 1)
- AGE_BUFFER(buf_priv);
- buf_priv->dispatched = 0;
- mga_freelist_put(dev, buf);
- }
- return -EINVAL;
- }
-
- WRAP_TEST_WITH_RETURN(dev_priv);
-
- mga_dma_dispatch_vertex(dev, buf);
-
- return 0;
-}
-
-static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf *buf;
- drm_mga_buf_priv_t *buf_priv;
- drm_mga_indices_t *indices = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (indices->idx < 0 || indices->idx > dma->buf_count)
- return -EINVAL;
-
- buf = dma->buflist[indices->idx];
- buf_priv = buf->dev_private;
-
- buf_priv->discard = indices->discard;
-
- if (!mga_verify_state(dev_priv)) {
- if (indices->discard) {
- if (buf_priv->dispatched == 1)
- AGE_BUFFER(buf_priv);
- buf_priv->dispatched = 0;
- mga_freelist_put(dev, buf);
- }
- return -EINVAL;
- }
-
- WRAP_TEST_WITH_RETURN(dev_priv);
-
- mga_dma_dispatch_indices(dev, buf, indices->start, indices->end);
-
- return 0;
-}
-
-static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_mga_private_t *dev_priv = dev->dev_private;
- struct drm_buf *buf;
- drm_mga_iload_t *iload = data;
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
-#if 0
- if (mga_do_wait_for_idle(dev_priv) < 0) {
- if (MGA_DMA_DEBUG)
- DRM_INFO("-EBUSY\n");
- return -EBUSY;
- }
-#endif
- if (iload->idx < 0 || iload->idx > dma->buf_count)
- return -EINVAL;
-
- buf = dma->buflist[iload->idx];
-
- if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
- mga_freelist_put(dev, buf);
- return -EINVAL;
- }
-
- WRAP_TEST_WITH_RETURN(dev_priv);
-
- mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length);
-
- /* Make sure we restore the 3D state next time.
- */
- dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
-
- return 0;
-}
-
-static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_blit_t *blit = data;
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
- sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
-
- if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg))
- return -EINVAL;
-
- WRAP_TEST_WITH_RETURN(dev_priv);
-
- mga_dma_dispatch_blit(dev, blit);
-
- /* Make sure we restore the 3D state next time.
- */
- dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
-
- return 0;
-}
-
-int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_getparam_t *param = data;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int value;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- DRM_DEBUG("pid=%d\n", task_pid_nr(current));
-
- switch (param->param) {
- case MGA_PARAM_IRQ_NR:
- value = pdev->irq;
- break;
- case MGA_PARAM_CARD_TYPE:
- value = dev_priv->chipset;
- break;
- default:
- return -EINVAL;
- }
-
- if (copy_to_user(param->value, &value, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- u32 *fence = data;
- DMA_LOCALS;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- DRM_DEBUG("pid=%d\n", task_pid_nr(current));
-
- /* I would normal do this assignment in the declaration of fence,
- * but dev_priv may be NULL.
- */
-
- *fence = dev_priv->next_fence_to_post;
- dev_priv->next_fence_to_post++;
-
- BEGIN_DMA(1);
- DMA_BLOCK(MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000,
- MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000);
- ADVANCE_DMA();
-
- return 0;
-}
-
-static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *
-file_priv)
-{
- drm_mga_private_t *dev_priv = dev->dev_private;
- u32 *fence = data;
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- DRM_DEBUG("pid=%d\n", task_pid_nr(current));
-
- mga_driver_fence_wait(dev, fence);
- return 0;
-}
-
-const struct drm_ioctl_desc mga_ioctls[] = {
- DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-};
-
-int mga_max_ioctl = ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
deleted file mode 100644
index b5ef1d2c8b1c..000000000000
--- a/drivers/gpu/drm/mga/mga_warp.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*-
- * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#include <linux/firmware.h>
-#include <linux/ihex.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "mga_drv.h"
-
-#define FIRMWARE_G200 "matrox/g200_warp.fw"
-#define FIRMWARE_G400 "matrox/g400_warp.fw"
-
-MODULE_FIRMWARE(FIRMWARE_G200);
-MODULE_FIRMWARE(FIRMWARE_G400);
-
-#define MGA_WARP_CODE_ALIGN 256 /* in bytes */
-
-#define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN)
-
-int mga_warp_install_microcode(drm_mga_private_t *dev_priv)
-{
- unsigned char *vcbase = dev_priv->warp->handle;
- unsigned long pcbase = dev_priv->warp->offset;
- const char *firmware_name;
- struct platform_device *pdev;
- const struct firmware *fw = NULL;
- const struct ihex_binrec *rec;
- unsigned int size;
- int n_pipes, where;
- int rc = 0;
-
- switch (dev_priv->chipset) {
- case MGA_CARD_TYPE_G400:
- case MGA_CARD_TYPE_G550:
- firmware_name = FIRMWARE_G400;
- n_pipes = MGA_MAX_G400_PIPES;
- break;
- case MGA_CARD_TYPE_G200:
- firmware_name = FIRMWARE_G200;
- n_pipes = MGA_MAX_G200_PIPES;
- break;
- default:
- return -EINVAL;
- }
-
- pdev = platform_device_register_simple("mga_warp", 0, NULL, 0);
- if (IS_ERR(pdev)) {
- DRM_ERROR("mga: Failed to register microcode\n");
- return PTR_ERR(pdev);
- }
- rc = request_ihex_firmware(&fw, firmware_name, &pdev->dev);
- platform_device_unregister(pdev);
- if (rc) {
- DRM_ERROR("mga: Failed to load microcode \"%s\"\n",
- firmware_name);
- return rc;
- }
-
- size = 0;
- where = 0;
- for (rec = (const struct ihex_binrec *)fw->data;
- rec;
- rec = ihex_next_binrec(rec)) {
- size += WARP_UCODE_SIZE(be16_to_cpu(rec->len));
- where++;
- }
-
- if (where != n_pipes) {
- DRM_ERROR("mga: Invalid microcode \"%s\"\n", firmware_name);
- rc = -EINVAL;
- goto out;
- }
- size = PAGE_ALIGN(size);
- DRM_DEBUG("MGA ucode size = %d bytes\n", size);
- if (size > dev_priv->warp->size) {
- DRM_ERROR("microcode too large! (%u > %lu)\n",
- size, dev_priv->warp->size);
- rc = -ENOMEM;
- goto out;
- }
-
- memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
-
- where = 0;
- for (rec = (const struct ihex_binrec *)fw->data;
- rec;
- rec = ihex_next_binrec(rec)) {
- unsigned int src_size, dst_size;
-
- DRM_DEBUG(" pcbase = 0x%08lx vcbase = %p\n", pcbase, vcbase);
- dev_priv->warp_pipe_phys[where] = pcbase;
- src_size = be16_to_cpu(rec->len);
- dst_size = WARP_UCODE_SIZE(src_size);
- memcpy(vcbase, rec->data, src_size);
- pcbase += dst_size;
- vcbase += dst_size;
- where++;
- }
-
-out:
- release_firmware(fw);
- return rc;
-}
-
-#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
-
-int mga_warp_init(drm_mga_private_t *dev_priv)
-{
- u32 wmisc;
-
- /* FIXME: Get rid of these damned magic numbers...
- */
- switch (dev_priv->chipset) {
- case MGA_CARD_TYPE_G400:
- case MGA_CARD_TYPE_G550:
- MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
- MGA_WRITE(MGA_WGETMSB, 0x00000E00);
- MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
- MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
- break;
- case MGA_CARD_TYPE_G200:
- MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND);
- MGA_WRITE(MGA_WGETMSB, 0x1606);
- MGA_WRITE(MGA_WVRTXSZ, 7);
- break;
- default:
- return -EINVAL;
- }
-
- MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
- MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE));
- wmisc = MGA_READ(MGA_WMISC);
- if (wmisc != WMISC_EXPECTED) {
- DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
- wmisc, WMISC_EXPECTED);
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index eec59658a938..b28c5e4828f4 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -4,6 +4,8 @@ config DRM_MGAG200
depends on DRM && PCI && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
+ select I2C
+ select I2C_ALGOBIT
help
This is a KMS driver for Matrox G200 chips. It supports the original
MGA G200 desktop chips and the server variants. It requires 0.3.0
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 3c9dfdb0b328..871870ddf7ec 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -23,6 +23,7 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
@@ -140,12 +141,12 @@ config DRM_MSM_DSI_10NM_PHY
Choose this option if DSI PHY on SDM845 is used on the platform.
config DRM_MSM_DSI_7NM_PHY
- bool "Enable DSI 7nm PHY driver in MSM DRM"
+ bool "Enable DSI 7nm/5nm/4nm PHY driver in MSM DRM"
depends on DRM_MSM_DSI
default y
help
- Choose this option if DSI PHY on SM8150/SM8250/SC7280 is used on
- the platform.
+ Choose this option if DSI PHY on SM8150/SM8250/SM8350/SM8450/SM8550/SC7280
+ is used on the platform.
config DRM_MSM_HDMI
bool "Enable HDMI support in MSM DRM driver"
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 6c9a747eb4ad..c67089a7ebc1 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -53,6 +53,8 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
static bool a2xx_me_init(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 18);
@@ -84,15 +86,20 @@ static bool a2xx_me_init(struct msm_gpu *gpu)
/* NQ and External Memory Swap */
OUT_RING(ring, 0x00000000);
/* protected mode error checking (0x1f2 is REG_AXXX_CP_INT_CNTL) */
- OUT_RING(ring, 0x200001f2);
+ if (a2xx_gpu->protection_disabled)
+ OUT_RING(ring, 0x00000000);
+ else
+ OUT_RING(ring, 0x200001f2);
/* Disable header dumping and Header dump address */
OUT_RING(ring, 0x00000000);
/* Header dump size */
OUT_RING(ring, 0x00000000);
- /* enable protected mode */
- OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
- OUT_RING(ring, 1);
+ if (!a2xx_gpu->protection_disabled) {
+ /* enable protected mode */
+ OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+ }
adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
return a2xx_idle(gpu);
@@ -101,6 +108,7 @@ static bool a2xx_me_init(struct msm_gpu *gpu)
static int a2xx_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a2xx_gpu *a2xx_gpu = to_a2xx_gpu(adreno_gpu);
dma_addr_t pt_base, tran_error;
uint32_t *ptr, len;
int i, ret;
@@ -221,6 +229,17 @@ static int a2xx_hw_init(struct msm_gpu *gpu)
len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
DBG("loading PM4 ucode version: %x", ptr[1]);
+ /*
+ * New firmware files seem to have GPU and firmware version in this
+ * word (0x20xxxx for A200, 0x220xxx for A220, 0x225xxx for A225).
+ * Older firmware files, which lack protection support, have 0 instead.
+ */
+ if (ptr[1] == 0) {
+ dev_warn(gpu->dev->dev,
+ "Legacy firmware detected, disabling protection support\n");
+ a2xx_gpu->protection_disabled = true;
+ }
+
gpu_write(gpu, REG_AXXX_CP_DEBUG,
AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
index 02fba2cb8932..161a075f94af 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.h
@@ -15,6 +15,7 @@
struct a2xx_gpu {
struct adreno_gpu base;
bool pm_enabled;
+ bool protection_disabled;
};
#define to_a2xx_gpu(x) container_of(x, struct a2xx_gpu, base)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index beea4a7fc1df..a92788019376 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -241,6 +241,9 @@ enum a6xx_shader_id {
A6XX_HLSQ_FRONTEND_META = 97,
A6XX_HLSQ_INDIRECT_META = 98,
A6XX_HLSQ_BACKEND_META = 99,
+ A6XX_SP_LB_6_DATA = 112,
+ A6XX_SP_LB_7_DATA = 113,
+ A6XX_HLSQ_INST_RAM_1 = 115,
};
enum a6xx_debugbus_id {
@@ -274,19 +277,32 @@ enum a6xx_debugbus_id {
A6XX_DBGBUS_HLSQ_SPTP = 31,
A6XX_DBGBUS_RB_0 = 32,
A6XX_DBGBUS_RB_1 = 33,
+ A6XX_DBGBUS_RB_2 = 34,
A6XX_DBGBUS_UCHE_WRAPPER = 36,
A6XX_DBGBUS_CCU_0 = 40,
A6XX_DBGBUS_CCU_1 = 41,
+ A6XX_DBGBUS_CCU_2 = 42,
A6XX_DBGBUS_VFD_0 = 56,
A6XX_DBGBUS_VFD_1 = 57,
A6XX_DBGBUS_VFD_2 = 58,
A6XX_DBGBUS_VFD_3 = 59,
+ A6XX_DBGBUS_VFD_4 = 60,
+ A6XX_DBGBUS_VFD_5 = 61,
A6XX_DBGBUS_SP_0 = 64,
A6XX_DBGBUS_SP_1 = 65,
+ A6XX_DBGBUS_SP_2 = 66,
A6XX_DBGBUS_TPL1_0 = 72,
A6XX_DBGBUS_TPL1_1 = 73,
A6XX_DBGBUS_TPL1_2 = 74,
A6XX_DBGBUS_TPL1_3 = 75,
+ A6XX_DBGBUS_TPL1_4 = 76,
+ A6XX_DBGBUS_TPL1_5 = 77,
+ A6XX_DBGBUS_SPTP_0 = 88,
+ A6XX_DBGBUS_SPTP_1 = 89,
+ A6XX_DBGBUS_SPTP_2 = 90,
+ A6XX_DBGBUS_SPTP_3 = 91,
+ A6XX_DBGBUS_SPTP_4 = 92,
+ A6XX_DBGBUS_SPTP_5 = 93,
};
enum a6xx_cp_perfcounter_select {
@@ -1071,6 +1087,8 @@ enum a6xx_tex_type {
#define REG_A6XX_CP_MISC_CNTL 0x00000840
+#define REG_A6XX_CP_CHICKEN_DBG 0x00000841
+
#define REG_A6XX_CP_APRIV_CNTL 0x00000844
#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 3be0f2928b57..aae60cbd9164 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -2028,7 +2028,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
* to cause power supply issues:
*/
if (adreno_is_a618(adreno_gpu) || adreno_is_7c3(adreno_gpu))
- gpu->clamp_to_idle = true;
+ priv->gpu_clamp_to_idle = true;
/* Check if there is a GMU phandle and set it up */
node = of_parse_phandle(pdev->dev.of_node, "qcom,gmu", 0);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index a023d5f962dc..b7e217d00a22 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -385,6 +385,9 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
nr_debugbus_blocks = ARRAY_SIZE(a6xx_debugbus_blocks) +
(a6xx_has_gbif(to_adreno_gpu(gpu)) ? 1 : 0);
+ if (adreno_is_a650_family(to_adreno_gpu(gpu)))
+ nr_debugbus_blocks += ARRAY_SIZE(a650_debugbus_blocks);
+
a6xx_state->debugbus = state_kcalloc(a6xx_state, nr_debugbus_blocks,
sizeof(*a6xx_state->debugbus));
@@ -411,6 +414,15 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
a6xx_state->nr_debugbus += 1;
}
+
+
+ if (adreno_is_a650_family(to_adreno_gpu(gpu))) {
+ for (i = 0; i < ARRAY_SIZE(a650_debugbus_blocks); i++)
+ a6xx_get_debugbus_block(gpu,
+ a6xx_state,
+ &a650_debugbus_blocks[i],
+ &a6xx_state->debugbus[i]);
+ }
}
/* Dump the VBIF debugbus on applicable targets */
@@ -524,10 +536,21 @@ static void a6xx_get_cluster(struct msm_gpu *gpu,
struct a6xx_gpu_state_obj *obj,
struct a6xx_crashdumper *dumper)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
u64 *in = dumper->ptr;
u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
size_t datasize;
int i, regcount = 0;
+ u32 id = cluster->id;
+
+ /* Skip registers that are not present on older generation */
+ if (!adreno_is_a660_family(adreno_gpu) &&
+ cluster->registers == a660_fe_cluster)
+ return;
+
+ if (adreno_is_a650_family(adreno_gpu) &&
+ cluster->registers == a6xx_ps_cluster)
+ id = CLUSTER_VPC_PS;
/* Some clusters need a selector register to be programmed too */
if (cluster->sel_reg)
@@ -537,7 +560,7 @@ static void a6xx_get_cluster(struct msm_gpu *gpu,
int j;
in += CRASHDUMP_WRITE(in, REG_A6XX_CP_APERTURE_CNTL_CD,
- (cluster->id << 8) | (i << 4) | i);
+ (id << 8) | (i << 4) | i);
for (j = 0; j < cluster->count; j += 2) {
int count = RANGE(cluster->registers, j);
@@ -687,6 +710,11 @@ static void a6xx_get_crashdumper_registers(struct msm_gpu *gpu,
u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
int i, regcount = 0;
+ /* Skip unsupported registers on older generations */
+ if (!adreno_is_a660_family(to_adreno_gpu(gpu)) &&
+ (regs->registers == a660_registers))
+ return;
+
/* Some blocks might need to program a selector register first */
if (regs->val0)
in += CRASHDUMP_WRITE(in, regs->val0, regs->val1);
@@ -721,6 +749,11 @@ static void a6xx_get_ahb_gpu_registers(struct msm_gpu *gpu,
{
int i, regcount = 0, index = 0;
+ /* Skip unsupported registers on older generations */
+ if (!adreno_is_a660_family(to_adreno_gpu(gpu)) &&
+ (regs->registers == a660_registers))
+ return;
+
for (i = 0; i < regs->count; i += 2)
regcount += RANGE(regs->registers, i);
@@ -909,15 +942,24 @@ static void a6xx_get_registers(struct msm_gpu *gpu,
dumper);
}
+static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu)
+{
+ /* The value at [16:31] is in 4dword units. Convert it to dwords */
+ return gpu_read(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2) >> 14;
+}
+
/* Read a block of data from an indexed register pair */
static void a6xx_get_indexed_regs(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
- const struct a6xx_indexed_registers *indexed,
+ struct a6xx_indexed_registers *indexed,
struct a6xx_gpu_state_obj *obj)
{
int i;
obj->handle = (const void *) indexed;
+ if (indexed->count_fn)
+ indexed->count = indexed->count_fn(gpu);
+
obj->data = state_kcalloc(a6xx_state, indexed->count, sizeof(u32));
if (!obj->data)
return;
@@ -946,6 +988,21 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_indexed_reglist[i],
&a6xx_state->indexed_regs[i]);
+ if (adreno_is_a650_family(to_adreno_gpu(gpu))) {
+ u32 val;
+
+ val = gpu_read(gpu, REG_A6XX_CP_CHICKEN_DBG);
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, val | 4);
+
+ /* Get the contents of the CP mempool */
+ a6xx_get_indexed_regs(gpu, a6xx_state, &a6xx_cp_mempool_indexed,
+ &a6xx_state->indexed_regs[i]);
+
+ gpu_write(gpu, REG_A6XX_CP_CHICKEN_DBG, val);
+ a6xx_state->nr_indexed_regs = count;
+ return;
+ }
+
/* Set the CP mempool size to 0 to stabilize it while dumping */
mempool_size = gpu_read(gpu, REG_A6XX_CP_MEM_POOL_SIZE);
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 0);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
index 2fb58b7098e4..790f55e24533 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h
@@ -36,16 +36,21 @@ static const u32 a6xx_fe_cluster[] = {
0xa00e, 0xa0ef, 0xa0f8, 0xa0f8,
};
+static const u32 a660_fe_cluster[] = {
+ 0x9807, 0x9807,
+};
+
static const u32 a6xx_pc_vs_cluster[] = {
0x9100, 0x9108, 0x9300, 0x9306, 0x9980, 0x9981, 0x9b00, 0x9b07,
};
-#define CLUSTER_FE 0
-#define CLUSTER_SP_VS 1
-#define CLUSTER_PC_VS 2
-#define CLUSTER_GRAS 3
-#define CLUSTER_SP_PS 4
-#define CLUSTER_PS 5
+#define CLUSTER_FE 0
+#define CLUSTER_SP_VS 1
+#define CLUSTER_PC_VS 2
+#define CLUSTER_GRAS 3
+#define CLUSTER_SP_PS 4
+#define CLUSTER_PS 5
+#define CLUSTER_VPC_PS 6
#define CLUSTER(_id, _reg, _sel_reg, _sel_val) \
{ .id = _id, .name = #_id,\
@@ -67,6 +72,7 @@ static const struct a6xx_cluster {
CLUSTER(CLUSTER_PS, a6xx_ps_cluster, 0, 0),
CLUSTER(CLUSTER_FE, a6xx_fe_cluster, 0, 0),
CLUSTER(CLUSTER_PC_VS, a6xx_pc_vs_cluster, 0, 0),
+ CLUSTER(CLUSTER_FE, a660_fe_cluster, 0, 0),
};
static const u32 a6xx_sp_vs_hlsq_cluster[] = {
@@ -105,7 +111,7 @@ static const u32 a6xx_sp_ps_hlsq_2d_cluster[] = {
static const u32 a6xx_sp_ps_sp_cluster[] = {
0xa980, 0xa9a8, 0xa9b0, 0xa9bc, 0xa9d0, 0xa9d3, 0xa9e0, 0xa9f3,
- 0xaa00, 0xaa00, 0xaa30, 0xaa31,
+ 0xaa00, 0xaa00, 0xaa30, 0xaa31, 0xaaf2, 0xaaf2,
};
static const u32 a6xx_sp_ps_sp_2d_cluster[] = {
@@ -229,6 +235,9 @@ static const struct a6xx_shader_block {
SHADER(A6XX_HLSQ_DATAPATH_META, 0x40),
SHADER(A6XX_HLSQ_FRONTEND_META, 0x40),
SHADER(A6XX_HLSQ_INDIRECT_META, 0x40),
+ SHADER(A6XX_SP_LB_6_DATA, 0x200),
+ SHADER(A6XX_SP_LB_7_DATA, 0x200),
+ SHADER(A6XX_HLSQ_INST_RAM_1, 0x200),
};
static const u32 a6xx_rb_rac_registers[] = {
@@ -251,7 +260,7 @@ static const u32 a6xx_registers[] = {
0x0540, 0x0555,
/* CP */
0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
- 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084f, 0x086f,
+ 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0845, 0x084f, 0x086f,
0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4, 0x08d0, 0x08dd,
0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093e,
0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996, 0x0998, 0x099e,
@@ -274,6 +283,13 @@ static const u32 a6xx_registers[] = {
/* VFD */
0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a, 0xa610, 0xa617,
0xa630, 0xa630,
+ /* HLSQ */
+ 0xd002, 0xd003,
+};
+
+static const u32 a660_registers[] = {
+ /* UCHE */
+ 0x0e3c, 0x0e3c,
};
#define REGS(_array, _sel_reg, _sel_val) \
@@ -282,6 +298,7 @@ static const u32 a6xx_registers[] = {
static const struct a6xx_registers a6xx_reglist[] = {
REGS(a6xx_registers, 0, 0),
+ REGS(a660_registers, 0, 0),
REGS(a6xx_rb_rac_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 0),
REGS(a6xx_rb_rbp_registers, REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD, 9),
};
@@ -366,25 +383,28 @@ static const struct a6xx_registers a6xx_gmu_reglist[] = {
REGS(a6xx_gmu_gx_registers, 0, 0),
};
-static const struct a6xx_indexed_registers {
+static u32 a6xx_get_cp_roq_size(struct msm_gpu *gpu);
+
+static struct a6xx_indexed_registers {
const char *name;
u32 addr;
u32 data;
u32 count;
+ u32 (*count_fn)(struct msm_gpu *gpu);
} a6xx_indexed_reglist[] = {
{ "CP_SQE_STAT", REG_A6XX_CP_SQE_STAT_ADDR,
- REG_A6XX_CP_SQE_STAT_DATA, 0x33 },
+ REG_A6XX_CP_SQE_STAT_DATA, 0x33, NULL },
{ "CP_DRAW_STATE", REG_A6XX_CP_DRAW_STATE_ADDR,
- REG_A6XX_CP_DRAW_STATE_DATA, 0x100 },
+ REG_A6XX_CP_DRAW_STATE_DATA, 0x100, NULL },
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
- REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x6000 },
+ REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x8000, NULL },
{ "CP_ROQ", REG_A6XX_CP_ROQ_DBG_ADDR,
- REG_A6XX_CP_ROQ_DBG_DATA, 0x400 },
+ REG_A6XX_CP_ROQ_DBG_DATA, 0, a6xx_get_cp_roq_size},
};
-static const struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
+static struct a6xx_indexed_registers a6xx_cp_mempool_indexed = {
"CP_MEMPOOL", REG_A6XX_CP_MEM_POOL_DBG_ADDR,
- REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060,
+ REG_A6XX_CP_MEM_POOL_DBG_DATA, 0x2060, NULL,
};
#define DEBUGBUS(_id, _count) { .id = _id, .name = #_id, .count = _count }
@@ -443,4 +463,20 @@ static const struct a6xx_debugbus_block a6xx_cx_debugbus_blocks[] = {
DEBUGBUS(A6XX_DBGBUS_CX, 0x100),
};
+static const struct a6xx_debugbus_block a650_debugbus_blocks[] = {
+ DEBUGBUS(A6XX_DBGBUS_RB_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_CCU_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_4, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_VFD_5, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SP_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_4, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_TPL1_5, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_0, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_1, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_2, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_3, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_4, 0x100),
+ DEBUGBUS(A6XX_DBGBUS_SPTP_5, 0x100),
+};
+
#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 3605f095b2de..817599766329 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -1083,13 +1083,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
struct msm_gpu *gpu = &adreno_gpu->base;
- struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
release_firmware(adreno_gpu->fw[i]);
- if (pm_runtime_enabled(&priv->gpu_pdev->dev))
+ if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev))
pm_runtime_disable(&priv->gpu_pdev->dev);
msm_gpu_cleanup(&adreno_gpu->base);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 13ce321283ff..f29a339a3705 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -748,7 +748,7 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
int i;
- if (!state->color_mgmt_changed)
+ if (!state->color_mgmt_changed && !drm_atomic_crtc_needs_modeset(state))
return;
for (i = 0; i < cstate->num_mixers; i++) {
@@ -968,7 +968,10 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
dpu_crtc_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
+ if (cstate)
+ __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
/**
@@ -1150,6 +1153,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
+ if (!pstates)
+ return -ENOMEM;
if (!crtc_state->enable || !crtc_state->active) {
DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
@@ -1517,16 +1522,12 @@ DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
- struct dentry *debugfs_root;
-
- debugfs_root = debugfs_create_dir(dpu_crtc->name,
- crtc->dev->primary->debugfs_root);
debugfs_create_file("status", 0400,
- debugfs_root,
+ crtc->debugfs_entry,
dpu_crtc, &_dpu_debugfs_status_fops);
debugfs_create_file("state", 0600,
- debugfs_root,
+ crtc->debugfs_entry,
&dpu_crtc->base,
&dpu_crtc_debugfs_state_fops);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 9c6817b5a194..758261e8ac73 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -162,6 +162,7 @@ enum dpu_enc_rc_states {
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
+ * @wide_bus_en: wide bus is enabled on this interface
* @dsc: drm_dsc_config pointer, for DSC-enabled encoders
*/
struct dpu_encoder_virt {
@@ -340,9 +341,7 @@ void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0,
phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
- if (phys_enc->parent_ops->handle_frame_done)
- phys_enc->parent_ops->handle_frame_done(
- phys_enc->parent, phys_enc,
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
DPU_ENCODER_FRAME_EVENT_ERROR);
}
@@ -579,19 +578,18 @@ static struct msm_display_topology dpu_encoder_get_topology(
topology.num_dspp = topology.num_lm;
}
- topology.num_enc = 0;
topology.num_intf = intf_count;
if (dpu_enc->dsc) {
- /* In case of Display Stream Compression (DSC), we would use
- * 2 encoders, 2 layer mixers and 1 interface
+ /*
+ * In case of Display Stream Compression (DSC), we would use
+ * 2 DSC encoders, 2 layer mixers and 1 interface
* this is power optimal and can drive up to (including) 4k
* screens
*/
- topology.num_enc = 2;
topology.num_dsc = 2;
- topology.num_intf = 1;
topology.num_lm = 2;
+ topology.num_intf = 1;
}
return topology;
@@ -1284,7 +1282,7 @@ static enum dpu_wb dpu_encoder_get_wb(const struct dpu_mdss_cfg *catalog,
return WB_MAX;
}
-static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc)
{
struct dpu_encoder_virt *dpu_enc = NULL;
@@ -1306,7 +1304,7 @@ static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
DPU_ATRACE_END("encoder_vblank_callback");
}
-static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
struct dpu_encoder_phys *phy_enc)
{
if (!phy_enc)
@@ -1382,7 +1380,7 @@ void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
}
-static void dpu_encoder_frame_done_callback(
+void dpu_encoder_frame_done_callback(
struct drm_encoder *drm_enc,
struct dpu_encoder_phys *ready_phys, u32 event)
{
@@ -1830,6 +1828,9 @@ static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
if (hw_pp->ops.setup_dsc)
hw_pp->ops.setup_dsc(hw_pp);
+ if (hw_dsc->ops.dsc_bind_pingpong_blk)
+ hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, true, hw_pp->idx);
+
if (hw_pp->ops.enable_dsc)
hw_pp->ops.enable_dsc(hw_pp);
}
@@ -2233,12 +2234,6 @@ static int dpu_encoder_virt_add_phys_encs(
return 0;
}
-static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
- .handle_vblank_virt = dpu_encoder_vblank_callback,
- .handle_underrun_virt = dpu_encoder_underrun_callback,
- .handle_frame_done = dpu_encoder_frame_done_callback,
-};
-
static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
struct msm_display_info *disp_info)
@@ -2258,7 +2253,6 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
memset(&phys_params, 0, sizeof(phys_params));
phys_params.dpu_kms = dpu_kms;
phys_params.parent = &dpu_enc->base;
- phys_params.parent_ops = &dpu_encoder_parent_ops;
phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
switch (disp_info->intf_type) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index f2af07d87f56..1d434b22180d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -61,25 +61,6 @@ enum dpu_enc_enable_state {
struct dpu_encoder_phys;
/**
- * struct dpu_encoder_virt_ops - Interface the containing virtual encoder
- * provides for the physical encoders to use to callback.
- * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception
- * Note: This is called from IRQ handler context.
- * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
- * Note: This is called from IRQ handler context.
- * @handle_frame_done: Notify virtual encoder that this phys encoder
- * completes last request frame.
- */
-struct dpu_encoder_virt_ops {
- void (*handle_vblank_virt)(struct drm_encoder *,
- struct dpu_encoder_phys *phys);
- void (*handle_underrun_virt)(struct drm_encoder *,
- struct dpu_encoder_phys *phys);
- void (*handle_frame_done)(struct drm_encoder *,
- struct dpu_encoder_phys *phys, u32 event);
-};
-
-/**
* struct dpu_encoder_phys_ops - Interface the physical encoders provide to
* the containing virtual encoder.
* @late_register: DRM Call. Add Userspace interfaces, debugfs.
@@ -199,7 +180,6 @@ enum dpu_intr_idx {
struct dpu_encoder_phys {
struct drm_encoder *parent;
struct dpu_encoder_phys_ops ops;
- const struct dpu_encoder_virt_ops *parent_ops;
struct dpu_hw_mdp *hw_mdptop;
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_pingpong *hw_pp;
@@ -283,7 +263,6 @@ struct dpu_encoder_phys_cmd {
struct dpu_enc_phys_init_params {
struct dpu_kms *dpu_kms;
struct drm_encoder *parent;
- const struct dpu_encoder_virt_ops *parent_ops;
enum dpu_enc_split_role split_role;
enum dpu_intf intf_idx;
enum dpu_wb wb_idx;
@@ -400,4 +379,30 @@ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
*/
void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc);
+/**
+ * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception
+ * @drm_enc: Pointer to drm encoder structure
+ * @phys_enc: Pointer to physical encoder
+ * Note: This is called from IRQ handler context.
+ */
+void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc);
+
+/** dpu_encoder_underrun_callback - Notify virtual encoder of underrun IRQ reception
+ * @drm_enc: Pointer to drm encoder structure
+ * @phys_enc: Pointer to physical encoder
+ * Note: This is called from IRQ handler context.
+ */
+void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc);
+
+/** dpu_encoder_frame_done_callback -- Notify virtual encoder that this phys encoder completes last request frame
+ * @drm_enc: Pointer to drm encoder structure
+ * @phys_enc: Pointer to physical encoder
+ * @event: Event to process
+ */
+void dpu_encoder_frame_done_callback(
+ struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *ready_phys, u32 event);
+
#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index ae28b2b93e69..c8f4a62a9536 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -61,6 +61,7 @@ static void _dpu_encoder_phys_cmd_update_intf_cfg(
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
intf_cfg.stream_sel = cmd_enc->stream_sel;
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
/* setup which pp blk will connect to this intf */
@@ -83,9 +84,7 @@ static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
DPU_ATRACE_BEGIN("pp_done_irq");
/* notify all synchronous clients first, then asynchronous clients */
- if (phys_enc->parent_ops->handle_frame_done)
- phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
- phys_enc, event);
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -111,9 +110,7 @@ static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
DPU_ATRACE_BEGIN("rd_ptr_irq");
cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
- if (phys_enc->parent_ops->handle_vblank_virt)
- phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
- phys_enc);
+ dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
wake_up_all(&cmd_enc->pending_vblank_wq);
@@ -137,9 +134,7 @@ static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- if (phys_enc->parent_ops->handle_underrun_virt)
- phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
- phys_enc);
+ dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
}
static void dpu_encoder_phys_cmd_atomic_mode_set(
@@ -202,9 +197,7 @@ static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
/* request a ctl reset before the next kickoff */
phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
- if (phys_enc->parent_ops->handle_frame_done)
- phys_enc->parent_ops->handle_frame_done(
- drm_enc, phys_enc, frame_event);
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
return -ETIMEDOUT;
}
@@ -780,7 +773,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
phys_enc->parent = p->parent;
- phys_enc->parent_ops = p->parent_ops;
phys_enc->dpu_kms = p->dpu_kms;
phys_enc->split_role = p->split_role;
phys_enc->intf_mode = INTF_MODE_CMD;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 0f71e8fe7be7..48c48106b16a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -274,6 +274,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
if (phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
@@ -308,9 +309,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
DPU_ATRACE_BEGIN("vblank_irq");
- if (phys_enc->parent_ops->handle_vblank_virt)
- phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
- phys_enc);
+ dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
atomic_read(&phys_enc->pending_kickoff_cnt);
@@ -330,7 +329,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
/* Signal any waiting atomic commit thread */
wake_up_all(&phys_enc->pending_kickoff_wq);
- phys_enc->parent_ops->handle_frame_done(phys_enc->parent, phys_enc,
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
DPU_ENCODER_FRAME_EVENT_DONE);
DPU_ATRACE_END("vblank_irq");
@@ -340,9 +339,7 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
{
struct dpu_encoder_phys *phys_enc = arg;
- if (phys_enc->parent_ops->handle_underrun_virt)
- phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
- phys_enc);
+ dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
}
static bool dpu_encoder_phys_vid_needs_single_flush(
@@ -700,7 +697,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
phys_enc->parent = p->parent;
- phys_enc->parent_ops = p->parent_ops;
phys_enc->dpu_kms = p->dpu_kms;
phys_enc->split_role = p->split_role;
phys_enc->intf_mode = INTF_MODE_VIDEO;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
index 62f6ff6abf41..bac4aa807b4b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
@@ -26,6 +26,7 @@
/**
* dpu_encoder_phys_wb_is_master - report wb always as master encoder
+ * @phys_enc: Pointer to physical encoder
*/
static bool dpu_encoder_phys_wb_is_master(struct dpu_encoder_phys *phys_enc)
{
@@ -364,13 +365,9 @@ static void _dpu_encoder_phys_wb_frame_done_helper(void *arg)
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
- if (phys_enc->parent_ops->handle_frame_done)
- phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
- phys_enc, event);
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
- if (phys_enc->parent_ops->handle_vblank_virt)
- phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
- phys_enc);
+ dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
@@ -440,9 +437,7 @@ static void _dpu_encoder_phys_wb_handle_wbdone_timeout(
if (wb_enc->wb_conn)
drm_writeback_signal_completion(wb_enc->wb_conn, 0);
- if (phys_enc->parent_ops->handle_frame_done)
- phys_enc->parent_ops->handle_frame_done(
- phys_enc->parent, phys_enc, frame_event);
+ dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
}
/**
@@ -722,7 +717,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
dpu_encoder_phys_wb_init_ops(&phys_enc->ops);
phys_enc->parent = p->parent;
- phys_enc->parent_ops = p->parent_ops;
phys_enc->dpu_kms = p->dpu_kms;
phys_enc->split_role = p->split_role;
phys_enc->intf_mode = INTF_MODE_WB_LINE;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 2196e205efa5..cf053e8f081e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -56,7 +56,7 @@
#define MIXER_SDM845_MASK \
(BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
-#define MIXER_SC7180_MASK \
+#define MIXER_QCM2290_MASK \
(BIT(DPU_DIM_LAYER) | BIT(DPU_MIXER_COMBINED_ALPHA))
#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
@@ -67,6 +67,9 @@
#define CTL_SC7280_MASK \
(BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_FETCH_ACTIVE) | BIT(DPU_CTL_VM_CFG))
+#define CTL_SM8550_MASK \
+ (CTL_SC7280_MASK | BIT(DPU_CTL_HAS_LAYER_EXT4))
+
#define MERGE_3D_SM8150_MASK (0)
#define DSPP_MSM8998_MASK BIT(DPU_DSPP_PCC) | BIT(DPU_DSPP_GC)
@@ -86,7 +89,6 @@
BIT(MDP_INTF1_INTR) | \
BIT(MDP_INTF2_INTR) | \
BIT(MDP_INTF3_INTR) | \
- BIT(MDP_INTF4_INTR) | \
BIT(MDP_AD4_0_INTR) | \
BIT(MDP_AD4_1_INTR))
@@ -112,6 +114,14 @@
BIT(MDP_INTF3_INTR) | \
BIT(MDP_INTF4_INTR))
+#define IRQ_SM8350_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_7xxx_INTR) | \
+ BIT(MDP_INTF1_7xxx_INTR) | \
+ BIT(MDP_INTF2_7xxx_INTR) | \
+ BIT(MDP_INTF3_7xxx_INTR))
+
#define IRQ_SC8180X_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
BIT(MDP_SSPP_TOP0_INTR2) | \
BIT(MDP_SSPP_TOP0_HIST_INTR) | \
@@ -124,6 +134,27 @@
BIT(MDP_AD4_0_INTR) | \
BIT(MDP_AD4_1_INTR))
+#define IRQ_SC8280XP_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_7xxx_INTR) | \
+ BIT(MDP_INTF1_7xxx_INTR) | \
+ BIT(MDP_INTF2_7xxx_INTR) | \
+ BIT(MDP_INTF3_7xxx_INTR) | \
+ BIT(MDP_INTF4_7xxx_INTR) | \
+ BIT(MDP_INTF5_7xxx_INTR) | \
+ BIT(MDP_INTF6_7xxx_INTR) | \
+ BIT(MDP_INTF7_7xxx_INTR) | \
+ BIT(MDP_INTF8_7xxx_INTR))
+
+#define IRQ_SM8450_MASK (BIT(MDP_SSPP_TOP0_INTR) | \
+ BIT(MDP_SSPP_TOP0_INTR2) | \
+ BIT(MDP_SSPP_TOP0_HIST_INTR) | \
+ BIT(MDP_INTF0_7xxx_INTR) | \
+ BIT(MDP_INTF1_7xxx_INTR) | \
+ BIT(MDP_INTF2_7xxx_INTR) | \
+ BIT(MDP_INTF3_7xxx_INTR))
+
#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
BIT(DPU_WB_UBWC) | \
BIT(DPU_WB_YUV_CONFIG) | \
@@ -326,7 +357,7 @@ static const struct dpu_caps sm6115_dpu_caps = {
.max_mixer_blendstages = 0x4,
.qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
.smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
- .ubwc_version = DPU_HW_UBWC_VER_20,
+ .ubwc_version = DPU_HW_UBWC_VER_10,
.has_dim_layer = true,
.has_idle_pc = true,
.max_linewidth = 2160,
@@ -365,6 +396,20 @@ static const struct dpu_caps sc8180x_dpu_caps = {
.max_vdeci_exp = MAX_VERT_DECIMATION,
};
+static const struct dpu_caps sc8280xp_dpu_caps = {
+ .max_mixer_width = 2560,
+ .max_mixer_blendstages = 11,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ .ubwc_version = DPU_HW_UBWC_VER_40,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
static const struct dpu_caps sm8250_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0xb,
@@ -379,6 +424,48 @@ static const struct dpu_caps sm8250_dpu_caps = {
.pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
};
+static const struct dpu_caps sm8350_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ .ubwc_version = DPU_HW_UBWC_VER_40,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 4096,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_caps sm8450_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED4,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ .ubwc_version = DPU_HW_UBWC_VER_40,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
+static const struct dpu_caps sm8550_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3LITE,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2, /* TODO: v2.5 */
+ .ubwc_version = DPU_HW_UBWC_VER_40,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+ .has_3d_merge = true,
+ .max_linewidth = 5120,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+};
+
static const struct dpu_caps sc7280_dpu_caps = {
.max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
.max_mixer_blendstages = 0x7,
@@ -459,6 +546,8 @@ static const struct dpu_mdp_cfg sc7180_mdp[] = {
.reg_off = 0x2B4, .bit_off = 8},
.clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
.reg_off = 0x2C4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_WB2] = {
+ .reg_off = 0x3B8, .bit_off = 24},
},
};
@@ -466,7 +555,7 @@ static const struct dpu_mdp_cfg sc8180x_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
.base = 0x0, .len = 0x45C,
- .features = 0,
+ .features = BIT(DPU_MDP_AUDIO_SELECT),
.highest_bank_bit = 0x3,
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
.reg_off = 0x2AC, .bit_off = 0},
@@ -493,6 +582,7 @@ static const struct dpu_mdp_cfg sm6115_mdp[] = {
.base = 0x0, .len = 0x494,
.features = 0,
.highest_bank_bit = 0x1,
+ .ubwc_swizzle = 0x7,
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
.reg_off = 0x2ac, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_DMA0] = {
@@ -506,6 +596,7 @@ static const struct dpu_mdp_cfg sm8250_mdp[] = {
.base = 0x0, .len = 0x494,
.features = 0,
.highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
+ .ubwc_swizzle = 0x6,
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
.reg_off = 0x2AC, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_VIG1] = {
@@ -529,11 +620,67 @@ static const struct dpu_mdp_cfg sm8250_mdp[] = {
},
};
+static const struct dpu_mdp_cfg sm8350_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x494,
+ .features = 0,
+ .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2ac, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2b4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2bc, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2c4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2ac, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2b4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2bc, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2c4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
+ .reg_off = 0x2bc, .bit_off = 20},
+ },
+};
+
+static const struct dpu_mdp_cfg sm8450_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
+ .ubwc_swizzle = 0x6,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2B4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2BC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2C4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
+ .reg_off = 0x2BC, .bit_off = 20},
+ },
+};
+
static const struct dpu_mdp_cfg sc7280_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
.base = 0x0, .len = 0x2014,
.highest_bank_bit = 0x1,
+ .ubwc_swizzle = 0x6,
.clk_ctrls[DPU_CLK_CTRL_VIG0] = {
.reg_off = 0x2AC, .bit_off = 0},
.clk_ctrls[DPU_CLK_CTRL_DMA0] = {
@@ -545,6 +692,57 @@ static const struct dpu_mdp_cfg sc7280_mdp[] = {
},
};
+static const struct dpu_mdp_cfg sc8280xp_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x494,
+ .features = 0,
+ .highest_bank_bit = 2,
+ .ubwc_swizzle = 6,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = { .reg_off = 0x2bc, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = { .reg_off = 0x2c4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = { .reg_off = 0x2bc, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = { .reg_off = 0x2c4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20},
+ },
+};
+
+static const struct dpu_mdp_cfg sm8550_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0, .len = 0x494,
+ .features = BIT(DPU_MDP_PERIPH_0_REMOVED),
+ .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
+ .ubwc_swizzle = 0x6,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x4330, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x6330, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x8330, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0xa330, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x24330, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x26330, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA2] = {
+ .reg_off = 0x28330, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA3] = {
+ .reg_off = 0x2a330, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2c330, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2e330, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_REG_DMA] = {
+ .reg_off = 0x2bc, .bit_off = 20},
+ },
+};
+
static const struct dpu_mdp_cfg qcm2290_mdp[] = {
{
.name = "top_0", .id = MDP_TOP,
@@ -648,6 +846,45 @@ static const struct dpu_ctl_cfg sc7180_ctl[] = {
},
};
+static const struct dpu_ctl_cfg sc8280xp_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+ {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
static const struct dpu_ctl_cfg sm8150_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
@@ -687,6 +924,123 @@ static const struct dpu_ctl_cfg sm8150_ctl[] = {
},
};
+static const struct dpu_ctl_cfg sm8350_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x1e8,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x1e8,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+ {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x1e8,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_ctl_cfg sm8450_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x204,
+ .features = BIT(DPU_CTL_ACTIVE_CFG) | BIT(DPU_CTL_SPLIT_DISPLAY) | BIT(DPU_CTL_FETCH_ACTIVE),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x204,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY) | CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+ {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x204,
+ .features = CTL_SC7280_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
+static const struct dpu_ctl_cfg sm8550_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x15000, .len = 0x290,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 9),
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x16000, .len = 0x290,
+ .features = CTL_SM8550_MASK | BIT(DPU_CTL_SPLIT_DISPLAY),
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 10),
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x17000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 11),
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x18000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 12),
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x19000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 13),
+ },
+ {
+ .name = "ctl_5", .id = CTL_5,
+ .base = 0x1a000, .len = 0x290,
+ .features = CTL_SM8550_MASK,
+ .intr_start = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 23),
+ },
+};
+
static const struct dpu_ctl_cfg sc7280_ctl[] = {
{
.name = "ctl_0", .id = CTL_0,
@@ -915,6 +1269,68 @@ static const struct dpu_sspp_cfg sm8250_sspp[] = {
sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
+static const struct dpu_sspp_sub_blks sm8450_vig_sblk_0 =
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8450_vig_sblk_1 =
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8450_vig_sblk_2 =
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8450_vig_sblk_3 =
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
+
+static const struct dpu_sspp_cfg sm8450_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+ sm8450_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK,
+ sm8450_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK,
+ sm8450_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK,
+ sm8450_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+};
+
+static const struct dpu_sspp_sub_blks sm8550_vig_sblk_0 =
+ _VIG_SBLK("0", 7, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8550_vig_sblk_1 =
+ _VIG_SBLK("1", 8, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8550_vig_sblk_2 =
+ _VIG_SBLK("2", 9, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8550_vig_sblk_3 =
+ _VIG_SBLK("3", 10, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sm8550_dma_sblk_4 = _DMA_SBLK("12", 5);
+static const struct dpu_sspp_sub_blks sd8550_dma_sblk_5 = _DMA_SBLK("13", 6);
+
+static const struct dpu_sspp_cfg sm8550_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7180_MASK,
+ sm8550_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SC7180_MASK,
+ sm8550_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SC7180_MASK,
+ sm8550_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SC7180_MASK,
+ sm8550_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA2),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA3),
+ SSPP_BLK("sspp_12", SSPP_DMA4, 0x2c000, DMA_CURSOR_SDM845_MASK,
+ sm8550_dma_sblk_4, 14, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_13", SSPP_DMA5, 0x2e000, DMA_CURSOR_SDM845_MASK,
+ sd8550_dma_sblk_5, 15, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+};
+
static const struct dpu_sspp_cfg sc7280_sspp[] = {
SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SC7280_MASK,
sc7280_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
@@ -926,6 +1342,33 @@ static const struct dpu_sspp_cfg sc7280_sspp[] = {
sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
};
+static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_0 =
+ _VIG_SBLK("0", 5, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_1 =
+ _VIG_SBLK("1", 6, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_2 =
+ _VIG_SBLK("2", 7, DPU_SSPP_SCALER_QSEED3LITE);
+static const struct dpu_sspp_sub_blks sc8280xp_vig_sblk_3 =
+ _VIG_SBLK("3", 8, DPU_SSPP_SCALER_QSEED3LITE);
+
+static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
+ SSPP_BLK("sspp_0", SSPP_VIG0, 0x4000, VIG_SM8250_MASK,
+ sc8280xp_vig_sblk_0, 0, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG0),
+ SSPP_BLK("sspp_1", SSPP_VIG1, 0x6000, VIG_SM8250_MASK,
+ sc8280xp_vig_sblk_1, 4, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG1),
+ SSPP_BLK("sspp_2", SSPP_VIG2, 0x8000, VIG_SM8250_MASK,
+ sc8280xp_vig_sblk_2, 8, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG2),
+ SSPP_BLK("sspp_3", SSPP_VIG3, 0xa000, VIG_SM8250_MASK,
+ sc8280xp_vig_sblk_3, 12, SSPP_TYPE_VIG, DPU_CLK_CTRL_VIG3),
+ SSPP_BLK("sspp_8", SSPP_DMA0, 0x24000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_0, 1, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA0),
+ SSPP_BLK("sspp_9", SSPP_DMA1, 0x26000, DMA_SDM845_MASK,
+ sdm845_dma_sblk_1, 5, SSPP_TYPE_DMA, DPU_CLK_CTRL_DMA1),
+ SSPP_BLK("sspp_10", SSPP_DMA2, 0x28000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_2, 9, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR0),
+ SSPP_BLK("sspp_11", SSPP_DMA3, 0x2a000, DMA_CURSOR_SDM845_MASK,
+ sdm845_dma_sblk_3, 13, SSPP_TYPE_DMA, DPU_CLK_CTRL_CURSOR1),
+};
#define _VIG_SBLK_NOSCALE(num, sdma_pri) \
{ \
@@ -1028,12 +1471,23 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
};
static const struct dpu_lm_cfg sc7180_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
&sc7180_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
- LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK,
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
&sc7180_lm_sblk, PINGPONG_1, LM_0, 0),
};
+/* SC8280XP */
+
+static const struct dpu_lm_cfg sc8280xp_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
+ LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
+ LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_2, LM_3, DSPP_2),
+ LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_3, LM_2, DSPP_3),
+ LM_BLK("lm_4", LM_4, 0x48000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_4, LM_5, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK, &sdm845_lm_sblk, PINGPONG_5, LM_4, 0),
+};
+
/* SM8150 */
static const struct dpu_lm_cfg sm8150_lm[] = {
@@ -1052,11 +1506,11 @@ static const struct dpu_lm_cfg sm8150_lm[] = {
};
static const struct dpu_lm_cfg sc7280_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
&sc7180_lm_sblk, PINGPONG_0, 0, DSPP_0),
- LM_BLK("lm_2", LM_2, 0x46000, MIXER_SC7180_MASK,
+ LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
&sc7180_lm_sblk, PINGPONG_2, LM_3, 0),
- LM_BLK("lm_3", LM_3, 0x47000, MIXER_SC7180_MASK,
+ LM_BLK("lm_3", LM_3, 0x47000, MIXER_SDM845_MASK,
&sc7180_lm_sblk, PINGPONG_3, LM_2, 0),
};
@@ -1071,7 +1525,7 @@ static const struct dpu_lm_sub_blks qcm2290_lm_sblk = {
};
static const struct dpu_lm_cfg qcm2290_lm[] = {
- LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
+ LM_BLK("lm_0", LM_0, 0x44000, MIXER_QCM2290_MASK,
&qcm2290_lm_sblk, PINGPONG_0, 0, DSPP_0),
};
@@ -1151,6 +1605,16 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
.len = 0x20, .version = 0x20000},
};
+#define PP_BLK_DIPHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0, \
+ .features = BIT(DPU_PINGPONG_DITHER), \
+ .merge_3d = _merge_3d, \
+ .sblk = &_sblk, \
+ .intr_done = _done, \
+ .intr_rdptr = _rdptr, \
+ }
#define PP_BLK_TE(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
{\
.name = _name, .id = _id, \
@@ -1192,6 +1656,21 @@ static struct dpu_pingpong_cfg sc7180_pp[] = {
PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te, -1, -1),
};
+static struct dpu_pingpong_cfg sc8280xp_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1),
+ PP_BLK_TE("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1),
+ PP_BLK_TE("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1),
+ PP_BLK_TE("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1),
+ PP_BLK_TE("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1),
+};
+
static const struct dpu_pingpong_cfg sm8150_pp[] = {
PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
@@ -1213,6 +1692,27 @@ static const struct dpu_pingpong_cfg sm8150_pp[] = {
-1),
};
+static const struct dpu_pingpong_cfg sm8350_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
+ PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ -1),
+ PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ -1),
+};
+
static const struct dpu_pingpong_cfg sc7280_pp[] = {
PP_BLK("pingpong_0", PINGPONG_0, 0x59000, 0, sc7280_pp_sblk, -1, -1),
PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
@@ -1226,6 +1726,61 @@ static struct dpu_pingpong_cfg qcm2290_pp[] = {
DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
};
+/* FIXME: interrupts */
+static const struct dpu_pingpong_cfg sm8450_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
+ PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ -1),
+ PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ -1),
+ PP_BLK("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sdm845_pp_sblk,
+ -1,
+ -1),
+ PP_BLK("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sdm845_pp_sblk,
+ -1,
+ -1),
+};
+
+static const struct dpu_pingpong_cfg sm8550_pp[] = {
+ PP_BLK_DIPHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
+ -1),
+ PP_BLK_DIPHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
+ -1),
+ PP_BLK_DIPHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
+ -1),
+ PP_BLK_DIPHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
+ -1),
+ PP_BLK_DIPHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
+ -1),
+ PP_BLK_DIPHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
+ DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
+ -1),
+ PP_BLK_DIPHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk,
+ -1,
+ -1),
+ PP_BLK_DIPHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk,
+ -1,
+ -1),
+};
+
/*************************************************************
* MERGE_3D sub blocks config
*************************************************************/
@@ -1243,21 +1798,48 @@ static const struct dpu_merge_3d_cfg sm8150_merge_3d[] = {
MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x83200),
};
+static const struct dpu_merge_3d_cfg sm8350_merge_3d[] = {
+ MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x4e000),
+ MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x4f000),
+ MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x50000),
+};
+
+static const struct dpu_merge_3d_cfg sm8450_merge_3d[] = {
+ MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x4e000),
+ MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x4f000),
+ MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x50000),
+ MERGE_3D_BLK("merge_3d_3", MERGE_3D_3, 0x65f00),
+};
+
+static const struct dpu_merge_3d_cfg sm8550_merge_3d[] = {
+ MERGE_3D_BLK("merge_3d_0", MERGE_3D_0, 0x4e000),
+ MERGE_3D_BLK("merge_3d_1", MERGE_3D_1, 0x4f000),
+ MERGE_3D_BLK("merge_3d_2", MERGE_3D_2, 0x50000),
+ MERGE_3D_BLK("merge_3d_3", MERGE_3D_3, 0x66700),
+};
+
/*************************************************************
* DSC sub blocks config
*************************************************************/
-#define DSC_BLK(_name, _id, _base) \
+#define DSC_BLK(_name, _id, _base, _features) \
{\
.name = _name, .id = _id, \
.base = _base, .len = 0x140, \
- .features = 0, \
+ .features = _features, \
}
static struct dpu_dsc_cfg sdm845_dsc[] = {
- DSC_BLK("dsc_0", DSC_0, 0x80000),
- DSC_BLK("dsc_1", DSC_1, 0x80400),
- DSC_BLK("dsc_2", DSC_2, 0x80800),
- DSC_BLK("dsc_3", DSC_3, 0x80c00),
+ DSC_BLK("dsc_0", DSC_0, 0x80000, 0),
+ DSC_BLK("dsc_1", DSC_1, 0x80400, 0),
+ DSC_BLK("dsc_2", DSC_2, 0x80800, 0),
+ DSC_BLK("dsc_3", DSC_3, 0x80c00, 0),
+};
+
+static struct dpu_dsc_cfg sm8150_dsc[] = {
+ DSC_BLK("dsc_0", DSC_0, 0x80000, BIT(DPU_DSC_OUTPUT_CTRL)),
+ DSC_BLK("dsc_1", DSC_1, 0x80400, BIT(DPU_DSC_OUTPUT_CTRL)),
+ DSC_BLK("dsc_2", DSC_2, 0x80800, BIT(DPU_DSC_OUTPUT_CTRL)),
+ DSC_BLK("dsc_3", DSC_3, 0x80c00, BIT(DPU_DSC_OUTPUT_CTRL)),
};
/*************************************************************
@@ -1307,6 +1889,13 @@ static const struct dpu_intf_cfg sc7280_intf[] = {
INTF_BLK("intf_5", INTF_5, 0x39000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
};
+static const struct dpu_intf_cfg sm8350_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_2", INTF_2, 0x36000, INTF_DSI, 1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+ INTF_BLK("intf_3", INTF_3, 0x37000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
+};
+
static const struct dpu_intf_cfg sc8180x_intf[] = {
INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
@@ -1317,11 +1906,39 @@ static const struct dpu_intf_cfg sc8180x_intf[] = {
INTF_BLK("intf_5", INTF_5, 0x6C800, INTF_DP, MSM_DP_CONTROLLER_2, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
};
+/* TODO: INTF 3, 8 and 7 are used for MST, marked as INTF_NONE for now */
+static const struct dpu_intf_cfg sc8280xp_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_2", INTF_2, 0x36000, INTF_DSI, 1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+ INTF_BLK("intf_3", INTF_3, 0x37000, INTF_NONE, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
+ INTF_BLK("intf_4", INTF_4, 0x38000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 20, 21),
+ INTF_BLK("intf_5", INTF_5, 0x39000, INTF_DP, MSM_DP_CONTROLLER_3, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
+ INTF_BLK("intf_6", INTF_6, 0x3a000, INTF_DP, MSM_DP_CONTROLLER_2, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 16, 17),
+ INTF_BLK("intf_7", INTF_7, 0x3b000, INTF_NONE, MSM_DP_CONTROLLER_2, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 18, 19),
+ INTF_BLK("intf_8", INTF_8, 0x3c000, INTF_NONE, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 12, 13),
+};
+
static const struct dpu_intf_cfg qcm2290_intf[] = {
INTF_BLK("intf_0", INTF_0, 0x00000, INTF_NONE, 0, 0, 0, 0, 0, 0),
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
};
+static const struct dpu_intf_cfg sm8450_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_2", INTF_2, 0x36000, INTF_DSI, 1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+ INTF_BLK("intf_3", INTF_3, 0x37000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
+};
+
+static const struct dpu_intf_cfg sm8550_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ /* TODO TE sub-blocks for intf1 & intf2 */
+ INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+ INTF_BLK("intf_2", INTF_2, 0x36000, INTF_DSI, 1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+ INTF_BLK("intf_3", INTF_3, 0x37000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
+};
+
/*************************************************************
* Writeback blocks config
*************************************************************/
@@ -1419,6 +2036,14 @@ static const struct dpu_vbif_cfg sdm845_vbif[] = {
},
};
+static const struct dpu_reg_dma_cfg sc8280xp_regdma = {
+ .base = 0x0,
+ .version = 0x00020000,
+ .trigger_sel_off = 0x119c,
+ .xin_id = 7,
+ .clk_ctrl = DPU_CLK_CTRL_REG_DMA,
+};
+
static const struct dpu_reg_dma_cfg sdm845_regdma = {
.base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
};
@@ -1435,6 +2060,22 @@ static const struct dpu_reg_dma_cfg sm8250_regdma = {
.clk_ctrl = DPU_CLK_CTRL_REG_DMA,
};
+static const struct dpu_reg_dma_cfg sm8350_regdma = {
+ .base = 0x400,
+ .version = 0x00020000,
+ .trigger_sel_off = 0x119c,
+ .xin_id = 7,
+ .clk_ctrl = DPU_CLK_CTRL_REG_DMA,
+};
+
+static const struct dpu_reg_dma_cfg sm8450_regdma = {
+ .base = 0x0,
+ .version = 0x00020000,
+ .trigger_sel_off = 0x119c,
+ .xin_id = 7,
+ .clk_ctrl = DPU_CLK_CTRL_REG_DMA,
+};
+
/*************************************************************
* PERF data config
*************************************************************/
@@ -1691,6 +2332,33 @@ static const struct dpu_perf_cfg sc8180x_perf_data = {
.min_dram_ib = 800000,
.danger_lut_tbl = {0xf, 0xffff, 0x0},
.qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
+static const struct dpu_perf_cfg sc8280xp_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .qos_lut_tbl = {
{.nentry = ARRAY_SIZE(sc8180x_qos_linear),
.entries = sc8180x_qos_linear
},
@@ -1739,6 +2407,36 @@ static const struct dpu_perf_cfg sm8250_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_perf_cfg sm8450_perf_data = {
+ .max_bw_low = 13600000,
+ .max_bw_high = 18200000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 35,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
static const struct dpu_perf_cfg sc7280_perf_data = {
.max_bw_low = 4700000,
.max_bw_high = 8800000,
@@ -1767,6 +2465,36 @@ static const struct dpu_perf_cfg sc7280_perf_data = {
.bw_inefficiency_factor = 120,
};
+static const struct dpu_perf_cfg sm8350_perf_data = {
+ .max_bw_low = 11800000,
+ .max_bw_high = 15500000,
+ .min_core_ib = 2500000,
+ .min_llcc_ib = 0,
+ .min_dram_ib = 800000,
+ .min_prefill_lines = 40,
+ /* FIXME: lut tables */
+ .danger_lut_tbl = {0x3ffff, 0x3ffff, 0x0},
+ .safe_lut_tbl = {0xfe00, 0xfe00, 0xffff},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sc7180_qos_linear),
+ .entries = sc7180_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
+ .entries = sc7180_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sc7180_qos_nrt),
+ .entries = sc7180_qos_nrt
+ },
+ /* TODO: macrotile-qseed is different from macrotile */
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+ .clk_inefficiency_factor = 105,
+ .bw_inefficiency_factor = 120,
+};
+
static const struct dpu_perf_cfg qcm2290_perf_data = {
.max_bw_low = 2700000,
.max_bw_high = 2700000,
@@ -1899,6 +2627,8 @@ static const struct dpu_mdss_cfg sm8150_dpu_cfg = {
.mixer = sm8150_lm,
.dspp_count = ARRAY_SIZE(sm8150_dspp),
.dspp = sm8150_dspp,
+ .dsc_count = ARRAY_SIZE(sm8150_dsc),
+ .dsc = sm8150_dsc,
.pingpong_count = ARRAY_SIZE(sm8150_pp),
.pingpong = sm8150_pp,
.merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
@@ -1937,6 +2667,32 @@ static const struct dpu_mdss_cfg sc8180x_dpu_cfg = {
.mdss_irqs = IRQ_SC8180X_MASK,
};
+static const struct dpu_mdss_cfg sc8280xp_dpu_cfg = {
+ .caps = &sc8280xp_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sc8280xp_mdp),
+ .mdp = sc8280xp_mdp,
+ .ctl_count = ARRAY_SIZE(sc8280xp_ctl),
+ .ctl = sc8280xp_ctl,
+ .sspp_count = ARRAY_SIZE(sc8280xp_sspp),
+ .sspp = sc8280xp_sspp,
+ .mixer_count = ARRAY_SIZE(sc8280xp_lm),
+ .mixer = sc8280xp_lm,
+ .dspp_count = ARRAY_SIZE(sm8150_dspp),
+ .dspp = sm8150_dspp,
+ .pingpong_count = ARRAY_SIZE(sc8280xp_pp),
+ .pingpong = sc8280xp_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8350_merge_3d),
+ .merge_3d = sm8350_merge_3d,
+ .intf_count = ARRAY_SIZE(sc8280xp_intf),
+ .intf = sc8280xp_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sc8280xp_regdma,
+ .perf = &sc8280xp_perf_data,
+ .mdss_irqs = IRQ_SC8280XP_MASK,
+};
+
static const struct dpu_mdss_cfg sm8250_dpu_cfg = {
.caps = &sm8250_dpu_caps,
.mdp_count = ARRAY_SIZE(sm8250_mdp),
@@ -1949,6 +2705,8 @@ static const struct dpu_mdss_cfg sm8250_dpu_cfg = {
.mixer = sm8150_lm,
.dspp_count = ARRAY_SIZE(sm8150_dspp),
.dspp = sm8150_dspp,
+ .dsc_count = ARRAY_SIZE(sm8150_dsc),
+ .dsc = sm8150_dsc,
.pingpong_count = ARRAY_SIZE(sm8150_pp),
.pingpong = sm8150_pp,
.merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
@@ -1965,6 +2723,84 @@ static const struct dpu_mdss_cfg sm8250_dpu_cfg = {
.mdss_irqs = IRQ_SM8250_MASK,
};
+static const struct dpu_mdss_cfg sm8350_dpu_cfg = {
+ .caps = &sm8350_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sm8350_mdp),
+ .mdp = sm8350_mdp,
+ .ctl_count = ARRAY_SIZE(sm8350_ctl),
+ .ctl = sm8350_ctl,
+ .sspp_count = ARRAY_SIZE(sm8250_sspp),
+ .sspp = sm8250_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .dspp_count = ARRAY_SIZE(sm8150_dspp),
+ .dspp = sm8150_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8350_pp),
+ .pingpong = sm8350_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8350_merge_3d),
+ .merge_3d = sm8350_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8350_intf),
+ .intf = sm8350_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sm8350_regdma,
+ .perf = &sm8350_perf_data,
+ .mdss_irqs = IRQ_SM8350_MASK,
+};
+
+static const struct dpu_mdss_cfg sm8450_dpu_cfg = {
+ .caps = &sm8450_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sm8450_mdp),
+ .mdp = sm8450_mdp,
+ .ctl_count = ARRAY_SIZE(sm8450_ctl),
+ .ctl = sm8450_ctl,
+ .sspp_count = ARRAY_SIZE(sm8450_sspp),
+ .sspp = sm8450_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .dspp_count = ARRAY_SIZE(sm8150_dspp),
+ .dspp = sm8150_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8450_pp),
+ .pingpong = sm8450_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8450_merge_3d),
+ .merge_3d = sm8450_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8450_intf),
+ .intf = sm8450_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sm8450_regdma,
+ .perf = &sm8450_perf_data,
+ .mdss_irqs = IRQ_SM8450_MASK,
+};
+
+static const struct dpu_mdss_cfg sm8550_dpu_cfg = {
+ .caps = &sm8550_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sm8550_mdp),
+ .mdp = sm8550_mdp,
+ .ctl_count = ARRAY_SIZE(sm8550_ctl),
+ .ctl = sm8550_ctl,
+ .sspp_count = ARRAY_SIZE(sm8550_sspp),
+ .sspp = sm8550_sspp,
+ .mixer_count = ARRAY_SIZE(sm8150_lm),
+ .mixer = sm8150_lm,
+ .dspp_count = ARRAY_SIZE(sm8150_dspp),
+ .dspp = sm8150_dspp,
+ .pingpong_count = ARRAY_SIZE(sm8550_pp),
+ .pingpong = sm8550_pp,
+ .merge_3d_count = ARRAY_SIZE(sm8550_merge_3d),
+ .merge_3d = sm8550_merge_3d,
+ .intf_count = ARRAY_SIZE(sm8550_intf),
+ .intf = sm8550_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = &sm8450_regdma,
+ .perf = &sm8450_perf_data,
+ .mdss_irqs = IRQ_SM8450_MASK,
+};
+
static const struct dpu_mdss_cfg sc7280_dpu_cfg = {
.caps = &sc7280_dpu_caps,
.mdp_count = ARRAY_SIZE(sc7280_mdp),
@@ -2023,7 +2859,11 @@ static const struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
{ .hw_rev = DPU_HW_VER_620, .dpu_cfg = &sc7180_dpu_cfg},
{ .hw_rev = DPU_HW_VER_630, .dpu_cfg = &sm6115_dpu_cfg},
{ .hw_rev = DPU_HW_VER_650, .dpu_cfg = &qcm2290_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_700, .dpu_cfg = &sm8350_dpu_cfg},
{ .hw_rev = DPU_HW_VER_720, .dpu_cfg = &sc7280_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_800, .dpu_cfg = &sc8280xp_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_810, .dpu_cfg = &sm8450_dpu_cfg},
+ { .hw_rev = DPU_HW_VER_900, .dpu_cfg = &sm8550_dpu_cfg},
};
const struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 3b645d5aa9aa..ddab9caebb18 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -46,7 +46,11 @@
#define DPU_HW_VER_620 DPU_HW_VER(6, 2, 0) /* sc7180 v1.0 */
#define DPU_HW_VER_630 DPU_HW_VER(6, 3, 0) /* sm6115|sm4250 */
#define DPU_HW_VER_650 DPU_HW_VER(6, 5, 0) /* qcm2290|sm4125 */
+#define DPU_HW_VER_700 DPU_HW_VER(7, 0, 0) /* sm8350 */
#define DPU_HW_VER_720 DPU_HW_VER(7, 2, 0) /* sc7280 */
+#define DPU_HW_VER_800 DPU_HW_VER(8, 0, 0) /* sc8280xp */
+#define DPU_HW_VER_810 DPU_HW_VER(8, 1, 0) /* sm8450 */
+#define DPU_HW_VER_900 DPU_HW_VER(9, 0, 0) /* sm8550 */
#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
@@ -83,6 +87,8 @@ enum {
* @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
* compression initial revision
* @DPU_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
+ * @DPU_MDP_PERIPH_0_REMOVED Indicates that access to periph top0 block results
+ * in a failure
* @DPU_MDP_MAX Maximum value
*/
@@ -93,6 +99,7 @@ enum {
DPU_MDP_UBWC_1_0,
DPU_MDP_UBWC_1_5,
DPU_MDP_AUDIO_SELECT,
+ DPU_MDP_PERIPH_0_REMOVED,
DPU_MDP_MAX
};
@@ -192,6 +199,7 @@ enum {
* @DPU_CTL_SPLIT_DISPLAY: CTL supports video mode split display
* @DPU_CTL_FETCH_ACTIVE: Active CTL for fetch HW (SSPPs)
* @DPU_CTL_VM_CFG: CTL config to support multiple VMs
+ * @DPU_CTL_HAS_LAYER_EXT4: CTL has the CTL_LAYER_EXT4 register
* @DPU_CTL_MAX
*/
enum {
@@ -199,6 +207,7 @@ enum {
DPU_CTL_ACTIVE_CFG,
DPU_CTL_FETCH_ACTIVE,
DPU_CTL_VM_CFG,
+ DPU_CTL_HAS_LAYER_EXT4,
DPU_CTL_MAX
};
@@ -267,6 +276,15 @@ enum {
};
/**
+ * DSC features
+ * @DPU_DSC_OUTPUT_CTRL Configure which PINGPONG block gets
+ * the pixel output from this DSC.
+ */
+enum {
+ DPU_DSC_OUTPUT_CTRL = 0x1,
+};
+
+/**
* MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
* @name: string name for debug purposes
* @id: enum identifying this block
@@ -519,7 +537,6 @@ struct dpu_clk_ctrl_reg {
* @base: register base offset to mdss
* @features bit mask identifying sub-blocks/features
* @highest_bank_bit: UBWC parameter
- * @ubwc_static: ubwc static configuration
* @ubwc_swizzle: ubwc default swizzle setting
* @clk_ctrls clock control register definition
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index a35ecb6676c8..b88a2f3724e6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -17,6 +17,8 @@
(0x70 + (((lm) - LM_0) * 0x004))
#define CTL_LAYER_EXT3(lm) \
(0xA0 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT4(lm) \
+ (0xB8 + (((lm) - LM_0) * 0x004))
#define CTL_TOP 0x014
#define CTL_FLUSH 0x018
#define CTL_START 0x01C
@@ -377,12 +379,37 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
}
+struct ctl_blend_config {
+ int idx, shift, ext_shift;
+};
+
+static const struct ctl_blend_config ctl_blend_config[][2] = {
+ [SSPP_NONE] = { { -1 }, { -1 } },
+ [SSPP_MAX] = { { -1 }, { -1 } },
+ [SSPP_VIG0] = { { 0, 0, 0 }, { 3, 0 } },
+ [SSPP_VIG1] = { { 0, 3, 2 }, { 3, 4 } },
+ [SSPP_VIG2] = { { 0, 6, 4 }, { 3, 8 } },
+ [SSPP_VIG3] = { { 0, 26, 6 }, { 3, 12 } },
+ [SSPP_RGB0] = { { 0, 9, 8 }, { -1 } },
+ [SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
+ [SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
+ [SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
+ [SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
+ [SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
+ [SSPP_DMA2] = { { 2, 0 }, { 2, 16 } },
+ [SSPP_DMA3] = { { 2, 4 }, { 2, 20 } },
+ [SSPP_DMA4] = { { 4, 0 }, { 4, 8 } },
+ [SSPP_DMA5] = { { 4, 4 }, { 4, 12 } },
+ [SSPP_CURSOR0] = { { 1, 20 }, { -1 } },
+ [SSPP_CURSOR1] = { { 1, 26 }, { -1 } },
+};
+
static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
- u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
- u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+ u32 mix, ext, mix_ext;
+ u32 mixercfg[5] = { 0 };
int i, j;
int stages;
int pipes_per_stage;
@@ -397,7 +424,7 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
else
pipes_per_stage = 1;
- mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+ mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
if (!stage_cfg)
goto exit;
@@ -406,109 +433,35 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
/* overflow to ext register if 'i + 1 > 7' */
mix = (i + 1) & 0x7;
ext = i >= 7;
+ mix_ext = (i + 1) & 0xf;
for (j = 0 ; j < pipes_per_stage; j++) {
enum dpu_sspp_multirect_index rect_index =
stage_cfg->multirect_index[i][j];
-
- switch (stage_cfg->stage[i][j]) {
- case SSPP_VIG0:
- if (rect_index == DPU_SSPP_RECT_1) {
- mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
- } else {
- mixercfg |= mix << 0;
- mixercfg_ext |= ext << 0;
- }
- break;
- case SSPP_VIG1:
- if (rect_index == DPU_SSPP_RECT_1) {
- mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
- } else {
- mixercfg |= mix << 3;
- mixercfg_ext |= ext << 2;
- }
- break;
- case SSPP_VIG2:
- if (rect_index == DPU_SSPP_RECT_1) {
- mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
- } else {
- mixercfg |= mix << 6;
- mixercfg_ext |= ext << 4;
- }
- break;
- case SSPP_VIG3:
- if (rect_index == DPU_SSPP_RECT_1) {
- mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
- } else {
- mixercfg |= mix << 26;
- mixercfg_ext |= ext << 6;
- }
- break;
- case SSPP_RGB0:
- mixercfg |= mix << 9;
- mixercfg_ext |= ext << 8;
- break;
- case SSPP_RGB1:
- mixercfg |= mix << 12;
- mixercfg_ext |= ext << 10;
- break;
- case SSPP_RGB2:
- mixercfg |= mix << 15;
- mixercfg_ext |= ext << 12;
- break;
- case SSPP_RGB3:
- mixercfg |= mix << 29;
- mixercfg_ext |= ext << 14;
- break;
- case SSPP_DMA0:
- if (rect_index == DPU_SSPP_RECT_1) {
- mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
- } else {
- mixercfg |= mix << 18;
- mixercfg_ext |= ext << 16;
- }
- break;
- case SSPP_DMA1:
- if (rect_index == DPU_SSPP_RECT_1) {
- mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
- } else {
- mixercfg |= mix << 21;
- mixercfg_ext |= ext << 18;
- }
- break;
- case SSPP_DMA2:
- if (rect_index == DPU_SSPP_RECT_1) {
- mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
- } else {
- mix |= (i + 1) & 0xF;
- mixercfg_ext2 |= mix << 0;
- }
- break;
- case SSPP_DMA3:
- if (rect_index == DPU_SSPP_RECT_1) {
- mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
- } else {
- mix |= (i + 1) & 0xF;
- mixercfg_ext2 |= mix << 4;
- }
- break;
- case SSPP_CURSOR0:
- mixercfg_ext |= ((i + 1) & 0xF) << 20;
- break;
- case SSPP_CURSOR1:
- mixercfg_ext |= ((i + 1) & 0xF) << 26;
- break;
- default:
- break;
+ enum dpu_sspp pipe = stage_cfg->stage[i][j];
+ const struct ctl_blend_config *cfg =
+ &ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
+
+ /*
+ * CTL_LAYER has 3-bit field (and extra bits in EXT register),
+ * all EXT registers has 4-bit fields.
+ */
+ if (cfg->idx == 0) {
+ mixercfg[0] |= mix << cfg->shift;
+ mixercfg[1] |= ext << cfg->ext_shift;
+ } else {
+ mixercfg[cfg->idx] |= mix_ext << cfg->shift;
}
}
}
exit:
- DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
- DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
- DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
- DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+ DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
+ if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
+ DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 3662df698dae..619926da1441 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -29,6 +29,8 @@
#define DSC_RANGE_MAX_QP 0x0B0
#define DSC_RANGE_BPG_OFFSET 0x0EC
+#define DSC_CTL(m) (0x1800 - 0x3FC * (m - DSC_0))
+
static void dpu_hw_dsc_disable(struct dpu_hw_dsc *dsc)
{
struct dpu_hw_blk_reg_map *c = &dsc->hw;
@@ -150,6 +152,29 @@ static void dpu_hw_dsc_config_thresh(struct dpu_hw_dsc *hw_dsc,
}
}
+static void dpu_hw_dsc_bind_pingpong_blk(
+ struct dpu_hw_dsc *hw_dsc,
+ bool enable,
+ const enum dpu_pingpong pp)
+{
+ struct dpu_hw_blk_reg_map *c = &hw_dsc->hw;
+ int mux_cfg = 0xF;
+ u32 dsc_ctl_offset;
+
+ dsc_ctl_offset = DSC_CTL(hw_dsc->idx);
+
+ if (enable)
+ mux_cfg = (pp - PINGPONG_0) & 0x7;
+
+ DRM_DEBUG_KMS("%s dsc:%d %s pp:%d\n",
+ enable ? "Binding" : "Unbinding",
+ hw_dsc->idx - DSC_0,
+ enable ? "to" : "from",
+ pp - PINGPONG_0);
+
+ DPU_REG_WRITE(c, dsc_ctl_offset, mux_cfg);
+}
+
static struct dpu_dsc_cfg *_dsc_offset(enum dpu_dsc dsc,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -174,6 +199,8 @@ static void _setup_dsc_ops(struct dpu_hw_dsc_ops *ops,
ops->dsc_disable = dpu_hw_dsc_disable;
ops->dsc_config = dpu_hw_dsc_config;
ops->dsc_config_thresh = dpu_hw_dsc_config_thresh;
+ if (cap & BIT(DPU_DSC_OUTPUT_CTRL))
+ ops->dsc_bind_pingpong_blk = dpu_hw_dsc_bind_pingpong_blk;
};
struct dpu_hw_dsc *dpu_hw_dsc_init(enum dpu_dsc idx, void __iomem *addr,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
index c0b77fe1a696..ae9b5db53d7f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
@@ -42,6 +42,10 @@ struct dpu_hw_dsc_ops {
*/
void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc,
struct drm_dsc_config *dsc);
+
+ void (*dsc_bind_pingpong_blk)(struct dpu_hw_dsc *hw_dsc,
+ bool enable,
+ enum dpu_pingpong pp);
};
struct dpu_hw_dsc {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index cf1b6d84c18a..53326f25e40e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -35,6 +35,9 @@
#define MDP_INTF_3_OFF_REV_7xxx 0x37000
#define MDP_INTF_4_OFF_REV_7xxx 0x38000
#define MDP_INTF_5_OFF_REV_7xxx 0x39000
+#define MDP_INTF_6_OFF_REV_7xxx 0x3a000
+#define MDP_INTF_7_OFF_REV_7xxx 0x3b000
+#define MDP_INTF_8_OFF_REV_7xxx 0x3c000
/**
* struct dpu_intr_reg - array of DPU register sets
@@ -139,6 +142,21 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
},
+ [MDP_INTF6_7xxx_INTR] = {
+ MDP_INTF_6_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_6_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_6_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ [MDP_INTF7_7xxx_INTR] = {
+ MDP_INTF_7_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_7_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_7_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ [MDP_INTF8_7xxx_INTR] = {
+ MDP_INTF_8_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_8_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_8_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
};
#define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
@@ -252,9 +270,9 @@ static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
cache_irq_mask = intr->cache_irq_mask[reg_idx];
if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
- dbgstr = "DPU IRQ already set:";
+ dbgstr = "already ";
} else {
- dbgstr = "DPU IRQ enabled:";
+ dbgstr = "";
cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
/* Cleaning any pending interrupt */
@@ -268,7 +286,7 @@ static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
- pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
+ pr_debug("DPU IRQ %d %senabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
DPU_IRQ_MASK(irq_idx), cache_irq_mask);
return 0;
@@ -301,9 +319,9 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
cache_irq_mask = intr->cache_irq_mask[reg_idx];
if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
- dbgstr = "DPU IRQ is already cleared:";
+ dbgstr = "already ";
} else {
- dbgstr = "DPU IRQ mask disable:";
+ dbgstr = "";
cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
/* Disable interrupts based on the new mask */
@@ -317,7 +335,7 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
intr->cache_irq_mask[reg_idx] = cache_irq_mask;
}
- pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
+ pr_debug("DPU IRQ %d %sdisabled: MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", irq_idx, dbgstr,
DPU_IRQ_MASK(irq_idx), cache_irq_mask);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index 46443955443c..425465011c80 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -31,6 +31,9 @@ enum dpu_hw_intr_reg {
MDP_INTF3_7xxx_INTR,
MDP_INTF4_7xxx_INTR,
MDP_INTF5_7xxx_INTR,
+ MDP_INTF6_7xxx_INTR,
+ MDP_INTF7_7xxx_INTR,
+ MDP_INTF8_7xxx_INTR,
MDP_INTR_MAX,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index d3b0ed0a9c6c..2d9192a6ce00 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -120,6 +120,8 @@ enum dpu_sspp {
SSPP_DMA1,
SSPP_DMA2,
SSPP_DMA3,
+ SSPP_DMA4,
+ SSPP_DMA5,
SSPP_CURSOR0,
SSPP_CURSOR1,
SSPP_MAX
@@ -195,6 +197,8 @@ enum dpu_pingpong {
PINGPONG_3,
PINGPONG_4,
PINGPONG_5,
+ PINGPONG_6,
+ PINGPONG_7,
PINGPONG_S0,
PINGPONG_MAX
};
@@ -203,6 +207,7 @@ enum dpu_merge_3d {
MERGE_3D_0 = 1,
MERGE_3D_1,
MERGE_3D_2,
+ MERGE_3D_3,
MERGE_3D_MAX
};
@@ -214,6 +219,8 @@ enum dpu_intf {
INTF_4,
INTF_5,
INTF_6,
+ INTF_7,
+ INTF_8,
INTF_MAX
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 691c471b08c2..4246ab0b3bee 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -310,7 +310,11 @@ static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
ctx->mdp->highest_bank_bit << 18);
switch (ctx->catalog->caps->ubwc_version) {
case DPU_HW_UBWC_VER_10:
- /* TODO: UBWC v1 case */
+ fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ fast_clear | (ctx->mdp->ubwc_swizzle & 0x1) |
+ BIT(8) |
+ (ctx->mdp->highest_bank_bit << 4));
break;
case DPU_HW_UBWC_VER_20:
fast_clear = fmt->alpha_enable ? BIT(31) : 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index c3110a25a30d..2bb02e17ee52 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -7,40 +7,17 @@
#include "dpu_hw_top.h"
#include "dpu_kms.h"
-#define SSPP_SPARE 0x28
-
#define FLD_SPLIT_DISPLAY_CMD BIT(1)
#define FLD_SMART_PANEL_FREE_RUN BIT(2)
#define FLD_INTF_1_SW_TRG_MUX BIT(4)
#define FLD_INTF_2_SW_TRG_MUX BIT(8)
#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
-#define DANGER_STATUS 0x360
-#define SAFE_STATUS 0x364
-
-#define TE_LINE_INTERVAL 0x3F4
-
#define TRAFFIC_SHAPER_EN BIT(31)
#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
-#define MDP_WD_TIMER_0_CTL 0x380
-#define MDP_WD_TIMER_0_CTL2 0x384
-#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
-#define MDP_WD_TIMER_1_CTL 0x390
-#define MDP_WD_TIMER_1_CTL2 0x394
-#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
-#define MDP_WD_TIMER_2_CTL 0x420
-#define MDP_WD_TIMER_2_CTL2 0x424
-#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
-#define MDP_WD_TIMER_3_CTL 0x430
-#define MDP_WD_TIMER_3_CTL2 0x434
-#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
-#define MDP_WD_TIMER_4_CTL 0x440
-#define MDP_WD_TIMER_4_CTL2 0x444
-#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
-
#define MDP_TICK_COUNT 16
#define XO_CLK_RATE 19200
#define MS_TICKS_IN_SEC 1000
@@ -48,8 +25,6 @@
#define CALCULATE_WD_LOAD_VALUE(fps) \
((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
-#define DCE_SEL 0x450
-
static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
struct split_pipe_cfg *cfg)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
index c8156ed4b7fb..feb9a729844a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -16,6 +16,7 @@
#define INTR_CLEAR 0x018
#define INTR2_EN 0x008
#define INTR2_STATUS 0x00c
+#define SSPP_SPARE 0x028
#define INTR2_CLEAR 0x02c
#define HIST_INTR_EN 0x01c
#define HIST_INTR_STATUS 0x020
@@ -28,7 +29,15 @@
#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
+#define DANGER_STATUS 0x360
+#define SAFE_STATUS 0x364
#define HW_EVENTS_CTL 0x37C
+#define MDP_WD_TIMER_0_CTL 0x380
+#define MDP_WD_TIMER_0_CTL2 0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
+#define MDP_WD_TIMER_1_CTL 0x390
+#define MDP_WD_TIMER_1_CTL2 0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
#define CLK_CTRL3 0x3A8
#define CLK_STATUS3 0x3AC
#define CLK_CTRL4 0x3B0
@@ -43,6 +52,18 @@
#define HDMI_DP_CORE_SELECT 0x408
#define MDP_OUT_CTL_0 0x410
#define MDP_VSYNC_SEL 0x414
+#define MDP_WD_TIMER_2_CTL 0x420
+#define MDP_WD_TIMER_2_CTL2 0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
+#define MDP_WD_TIMER_3_CTL 0x430
+#define MDP_WD_TIMER_3_CTL2 0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
+#define MDP_WD_TIMER_4_CTL 0x440
+#define MDP_WD_TIMER_4_CTL2 0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
#define DCE_SEL 0x450
+#define MDP_PERIPH_TOP0 MDP_WD_TIMER_0_CTL
+#define MDP_PERIPH_TOP0_END CLK_CTRL3
+
#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index b71199511a52..a683bd9b5a04 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -927,8 +927,20 @@ static void dpu_kms_mdp_snapshot(struct msm_disp_state *disp_state, struct msm_k
msm_disp_snapshot_add_block(disp_state, cat->wb[i].len,
dpu_kms->mmio + cat->wb[i].base, "wb_%d", i);
- msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
- dpu_kms->mmio + cat->mdp[0].base, "top");
+ if (cat->mdp[0].features & BIT(DPU_MDP_PERIPH_0_REMOVED)) {
+ msm_disp_snapshot_add_block(disp_state, MDP_PERIPH_TOP0,
+ dpu_kms->mmio + cat->mdp[0].base, "top");
+ msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len - MDP_PERIPH_TOP0_END,
+ dpu_kms->mmio + cat->mdp[0].base + MDP_PERIPH_TOP0_END, "top_2");
+ } else {
+ msm_disp_snapshot_add_block(disp_state, cat->mdp[0].len,
+ dpu_kms->mmio + cat->mdp[0].base, "top");
+ }
+
+ /* dump DSC sub-blocks HW regs info */
+ for (i = 0; i < cat->dsc_count; i++)
+ msm_disp_snapshot_add_block(disp_state, cat->dsc[i].len,
+ dpu_kms->mmio + cat->dsc[i].base, "dsc_%d", i);
pm_runtime_put_sync(&dpu_kms->pdev->dev);
}
@@ -1292,9 +1304,13 @@ static const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sc7180-dpu", },
{ .compatible = "qcom,sc7280-dpu", },
{ .compatible = "qcom,sc8180x-dpu", },
+ { .compatible = "qcom,sc8280xp-dpu", },
{ .compatible = "qcom,sm6115-dpu", },
{ .compatible = "qcom,sm8150-dpu", },
{ .compatible = "qcom,sm8250-dpu", },
+ { .compatible = "qcom,sm8350-dpu", },
+ { .compatible = "qcom,sm8450-dpu", },
+ { .compatible = "qcom,sm8550-dpu", },
{}
};
MODULE_DEVICE_TABLE(of, dpu_dt_match);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 86719020afe2..bfd5be89e8b8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1126,7 +1126,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
struct dpu_plane_state *pstate = to_dpu_plane_state(state);
struct drm_crtc *crtc = state->crtc;
struct drm_framebuffer *fb = state->fb;
- bool is_rt_pipe, update_qos_remap;
+ bool is_rt_pipe;
const struct dpu_format *fmt =
to_dpu_format(msm_framebuffer_format(fb));
struct dpu_hw_pipe_cfg pipe_cfg;
@@ -1138,6 +1138,9 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
pstate->pending = true;
is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+ pstate->needs_qos_remap |= (is_rt_pipe != pdpu->is_rt_pipe);
+ pdpu->is_rt_pipe = is_rt_pipe;
+
_dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FP_FMT "->crtc%u " DRM_RECT_FMT
@@ -1219,14 +1222,8 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
_dpu_plane_set_ot_limit(plane, crtc, &pipe_cfg);
}
- update_qos_remap = (is_rt_pipe != pdpu->is_rt_pipe) ||
- pstate->needs_qos_remap;
-
- if (update_qos_remap) {
- if (is_rt_pipe != pdpu->is_rt_pipe)
- pdpu->is_rt_pipe = is_rt_pipe;
- else if (pstate->needs_qos_remap)
- pstate->needs_qos_remap = false;
+ if (pstate->needs_qos_remap) {
+ pstate->needs_qos_remap = false;
_dpu_plane_set_qos_remap(plane);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 73b3442e7467..396429e63756 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -496,6 +496,11 @@ static int _dpu_rm_reserve_dsc(struct dpu_rm *rm,
/* check if DSC required are allocated or not */
for (i = 0; i < num_dsc; i++) {
+ if (!rm->dsc_blks[i]) {
+ DPU_ERROR("DSC %d does not exist\n", i);
+ return -EIO;
+ }
+
if (global_state->dsc_to_enc_id[i]) {
DPU_ERROR("DSC %d is already allocated\n", i);
return -EIO;
@@ -543,8 +548,8 @@ static int _dpu_rm_populate_requirements(
{
reqs->topology = req_topology;
- DRM_DEBUG_KMS("num_lm: %d num_enc: %d num_intf: %d\n",
- reqs->topology.num_lm, reqs->topology.num_enc,
+ DRM_DEBUG_KMS("num_lm: %d num_dsc: %d num_intf: %d\n",
+ reqs->topology.num_lm, reqs->topology.num_dsc,
reqs->topology.num_intf);
return 0;
@@ -660,6 +665,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
blks_size, enc_id);
break;
}
+ if (!hw_blks[i]) {
+ DPU_ERROR("Allocated resource %d unavailable to assign to enc %d\n",
+ type, enc_id);
+ break;
+ }
blks[num_blks++] = hw_blks[i];
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
index 088ec990a2f2..2a5a68366582 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c
@@ -70,6 +70,8 @@ int dpu_writeback_init(struct drm_device *dev, struct drm_encoder *enc,
int rc = 0;
dpu_wb_conn = devm_kzalloc(dev->dev, sizeof(*dpu_wb_conn), GFP_KERNEL);
+ if (!dpu_wb_conn)
+ return -ENOMEM;
drm_connector_helper_add(&dpu_wb_conn->base.base, &dpu_wb_conn_helper_funcs);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
index 4d49f3ba6a96..ddcdd5e87853 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
@@ -69,8 +69,7 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp4_kms *mdp4_kms = to_mdp4_kms(mdp_kms);
struct drm_device *dev = mdp4_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
- unsigned int id;
+ struct drm_crtc *crtc;
uint32_t status, enable;
enable = mdp4_read(mdp4_kms, REG_MDP4_INTR_ENABLE);
@@ -81,9 +80,9 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
mdp_dispatch_irqs(mdp_kms, status);
- for (id = 0; id < priv->num_crtcs; id++)
- if (status & mdp4_crtc_vblank(priv->crtcs[id]))
- drm_handle_vblank(dev, id);
+ drm_for_each_crtc(crtc, dev)
+ if (status & mdp4_crtc_vblank(crtc))
+ drm_crtc_handle_vblank(crtc);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index e86421c69bd1..86036dd4e1e8 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -1139,7 +1139,10 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
if (crtc->state)
mdp5_crtc_destroy_state(crtc, crtc->state);
- __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+ if (mdp5_cstate)
+ __drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+ else
+ __drm_atomic_helper_crtc_reset(crtc, NULL);
}
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
index 9b4c8d92ff32..43443a435d59 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
@@ -82,8 +82,7 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
struct mdp_kms *mdp_kms = to_mdp_kms(kms);
struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
struct drm_device *dev = mdp5_kms->dev;
- struct msm_drm_private *priv = dev->dev_private;
- unsigned int id;
+ struct drm_crtc *crtc;
uint32_t status, enable;
enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN);
@@ -94,9 +93,9 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
mdp_dispatch_irqs(mdp_kms, status);
- for (id = 0; id < priv->num_crtcs; id++)
- if (status & mdp5_crtc_vblank(priv->crtcs[id]))
- drm_handle_vblank(dev, id);
+ drm_for_each_crtc(crtc, dev)
+ if (status & mdp5_crtc_vblank(crtc))
+ drm_crtc_handle_vblank(crtc);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
index e75b97127c0d..b73031cd48e4 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot.c
@@ -129,6 +129,9 @@ void msm_disp_snapshot_destroy(struct drm_device *drm_dev)
}
priv = drm_dev->dev_private;
+ if (!priv->kms)
+ return;
+
kms = priv->kms;
if (kms->dump_worker)
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 7ff60e5ff325..bde1a7ce442f 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -122,61 +122,64 @@ struct dp_display_private {
struct msm_dp_desc {
phys_addr_t io_start;
+ unsigned int id;
unsigned int connector_type;
bool wide_bus_en;
};
-struct msm_dp_config {
- const struct msm_dp_desc *descs;
- size_t num_descs;
-};
-
static const struct msm_dp_desc sc7180_dp_descs[] = {
- [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
-};
-
-static const struct msm_dp_config sc7180_dp_cfg = {
- .descs = sc7180_dp_descs,
- .num_descs = ARRAY_SIZE(sc7180_dp_descs),
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ {}
};
static const struct msm_dp_desc sc7280_dp_descs[] = {
- [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
- [MSM_DP_CONTROLLER_1] = { .io_start = 0x0aea0000, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
-};
-
-static const struct msm_dp_config sc7280_dp_cfg = {
- .descs = sc7280_dp_descs,
- .num_descs = ARRAY_SIZE(sc7280_dp_descs),
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
+ {}
};
static const struct msm_dp_desc sc8180x_dp_descs[] = {
- [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
- [MSM_DP_CONTROLLER_1] = { .io_start = 0x0ae98000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
- [MSM_DP_CONTROLLER_2] = { .io_start = 0x0ae9a000, .connector_type = DRM_MODE_CONNECTOR_eDP },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP },
+ {}
};
-static const struct msm_dp_config sc8180x_dp_cfg = {
- .descs = sc8180x_dp_descs,
- .num_descs = ARRAY_SIZE(sc8180x_dp_descs),
+static const struct msm_dp_desc sc8280xp_dp_descs[] = {
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ { .io_start = 0x22090000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ { .io_start = 0x22098000, .id = MSM_DP_CONTROLLER_1, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_DisplayPort, .wide_bus_en = true },
+ {}
};
-static const struct msm_dp_desc sm8350_dp_descs[] = {
- [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+static const struct msm_dp_desc sc8280xp_edp_descs[] = {
+ { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
+ { .io_start = 0x0aea0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
+ { .io_start = 0x2209a000, .id = MSM_DP_CONTROLLER_2, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
+ { .io_start = 0x220a0000, .id = MSM_DP_CONTROLLER_3, .connector_type = DRM_MODE_CONNECTOR_eDP, .wide_bus_en = true },
+ {}
};
-static const struct msm_dp_config sm8350_dp_cfg = {
- .descs = sm8350_dp_descs,
- .num_descs = ARRAY_SIZE(sm8350_dp_descs),
+static const struct msm_dp_desc sm8350_dp_descs[] = {
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ {}
};
static const struct of_device_id dp_dt_match[] = {
- { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg },
- { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_cfg },
- { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_cfg },
- { .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_cfg },
- { .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_cfg },
- { .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_cfg },
+ { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_descs },
+ { .compatible = "qcom,sc7280-dp", .data = &sc7280_dp_descs },
+ { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_descs },
+ { .compatible = "qcom,sc8180x-dp", .data = &sc8180x_dp_descs },
+ { .compatible = "qcom,sc8180x-edp", .data = &sc8180x_dp_descs },
+ { .compatible = "qcom,sc8280xp-dp", .data = &sc8280xp_dp_descs },
+ { .compatible = "qcom,sc8280xp-edp", .data = &sc8280xp_edp_descs },
+ { .compatible = "qcom,sdm845-dp", .data = &sc7180_dp_descs },
+ { .compatible = "qcom,sm8350-dp", .data = &sm8350_dp_descs },
{}
};
@@ -390,6 +393,10 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
struct edid *edid;
dp->panel->max_dp_lanes = dp->parser->max_dp_lanes;
+ dp->panel->max_dp_link_rate = dp->parser->max_dp_link_rate;
+
+ drm_dbg_dp(dp->drm_dev, "max_lanes=%d max_link_rate=%d\n",
+ dp->panel->max_dp_lanes, dp->panel->max_dp_link_rate);
rc = dp_panel_read_sink_caps(dp->panel, dp->dp_display.connector);
if (rc)
@@ -607,8 +614,10 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
}
/* enable HDP irq_hpd/replug interrupt */
- dp_catalog_hpd_config_intr(dp->catalog,
- DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
+ if (dp->dp_display.internal_hpd)
+ dp_catalog_hpd_config_intr(dp->catalog,
+ DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK,
+ true);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
dp->dp_display.connector_type, state);
@@ -648,8 +657,10 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp->dp_display.connector_type, state);
/* disable irq_hpd/replug interrupts */
- dp_catalog_hpd_config_intr(dp->catalog,
- DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, false);
+ if (dp->dp_display.internal_hpd)
+ dp_catalog_hpd_config_intr(dp->catalog,
+ DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK,
+ false);
/* unplugged, no more irq_hpd handle */
dp_del_event(dp, EV_IRQ_HPD_INT);
@@ -675,7 +686,8 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
}
/* disable HPD plug interrupts */
- dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
+ if (dp->dp_display.internal_hpd)
+ dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
/*
* We don't need separate work for disconnect as
@@ -693,7 +705,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
dp_display_handle_plugged_change(&dp->dp_display, false);
/* enable HDP plug interrupt to prepare for next plugin */
- if (!dp->dp_display.is_edp)
+ if (dp->dp_display.internal_hpd)
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
@@ -1078,8 +1090,8 @@ static void dp_display_config_hpd(struct dp_display_private *dp)
dp_display_host_init(dp);
dp_catalog_ctrl_hpd_config(dp->catalog);
- /* Enable plug and unplug interrupts only for external DisplayPort */
- if (!dp->dp_display.is_edp)
+ /* Enable plug and unplug interrupts only if requested */
+ if (dp->dp_display.internal_hpd)
dp_catalog_hpd_config_intr(dp->catalog,
DP_DP_HPD_PLUG_INT_MASK |
DP_DP_HPD_UNPLUG_INT_MASK,
@@ -1262,10 +1274,9 @@ int dp_display_request_irq(struct msm_dp *dp_display)
return 0;
}
-static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev,
- unsigned int *id)
+static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev)
{
- const struct msm_dp_config *cfg = of_device_get_match_data(&pdev->dev);
+ const struct msm_dp_desc *descs = of_device_get_match_data(&pdev->dev);
struct resource *res;
int i;
@@ -1273,11 +1284,9 @@ static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pde
if (!res)
return NULL;
- for (i = 0; i < cfg->num_descs; i++) {
- if (cfg->descs[i].io_start == res->start) {
- *id = i;
- return &cfg->descs[i];
- }
+ for (i = 0; i < descs[i].io_start; i++) {
+ if (descs[i].io_start == res->start)
+ return &descs[i];
}
dev_err(&pdev->dev, "unknown displayport instance\n");
@@ -1299,12 +1308,13 @@ static int dp_display_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
- desc = dp_display_get_desc(pdev, &dp->id);
+ desc = dp_display_get_desc(pdev);
if (!desc)
return -EINVAL;
dp->pdev = pdev;
dp->name = "drm_dp";
+ dp->id = desc->id;
dp->dp_display.connector_type = desc->connector_type;
dp->wide_bus_en = desc->wide_bus_en;
dp->dp_display.is_edp =
@@ -1373,8 +1383,7 @@ static int dp_pm_resume(struct device *dev)
dp_catalog_ctrl_hpd_config(dp->catalog);
-
- if (!dp->dp_display.is_edp)
+ if (dp->dp_display.internal_hpd)
dp_catalog_hpd_config_intr(dp->catalog,
DP_DP_HPD_PLUG_INT_MASK |
DP_DP_HPD_UNPLUG_INT_MASK,
@@ -1497,7 +1506,7 @@ void msm_dp_irq_postinstall(struct msm_dp *dp_display)
dp = container_of(dp_display, struct dp_display_private, dp_display);
if (!dp_display->is_edp)
- dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100);
+ dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 0);
}
bool msm_dp_wide_bus_available(const struct msm_dp *dp_display)
@@ -1771,3 +1780,41 @@ void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
dp_display->dp_mode.h_active_low =
!!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC);
}
+
+void dp_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
+ struct msm_dp *dp_display = dp_bridge->dp_display;
+
+ dp_display->internal_hpd = true;
+}
+
+void dp_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
+ struct msm_dp *dp_display = dp_bridge->dp_display;
+
+ dp_display->internal_hpd = false;
+}
+
+void dp_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status)
+{
+ struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
+ struct msm_dp *dp_display = dp_bridge->dp_display;
+ struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ /* Without next_bridge interrupts are handled by the DP core directly */
+ if (dp_display->internal_hpd)
+ return;
+
+ if (!dp->core_initialized) {
+ drm_dbg_dp(dp->drm_dev, "not initialized\n");
+ return;
+ }
+
+ if (!dp_display->is_connected && status == connector_status_connected)
+ dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
+ else if (dp_display->is_connected && status == connector_status_disconnected)
+ dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index dcedf021f7fe..371337d0fae2 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -21,6 +21,7 @@ struct msm_dp {
bool power_on;
unsigned int connector_type;
bool is_edp;
+ bool internal_hpd;
hdmi_codec_plugged_cb plugged_cb;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 6db82f9b03af..275370f21115 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -102,6 +102,9 @@ static const struct drm_bridge_funcs dp_bridge_ops = {
.get_modes = dp_bridge_get_modes,
.detect = dp_bridge_detect,
.atomic_check = dp_bridge_atomic_check,
+ .hpd_enable = dp_bridge_hpd_enable,
+ .hpd_disable = dp_bridge_hpd_disable,
+ .hpd_notify = dp_bridge_hpd_notify,
};
struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev,
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
index 82035dbb0578..250f7c66201f 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.h
+++ b/drivers/gpu/drm/msm/dp/dp_drm.h
@@ -32,5 +32,9 @@ enum drm_mode_status dp_bridge_mode_valid(struct drm_bridge *bridge,
void dp_bridge_mode_set(struct drm_bridge *drm_bridge,
const struct drm_display_mode *mode,
const struct drm_display_mode *adjusted_mode);
+void dp_bridge_hpd_enable(struct drm_bridge *bridge);
+void dp_bridge_hpd_disable(struct drm_bridge *bridge);
+void dp_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status);
#endif /* _DP_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 5149cebc93f6..1800d8963f8a 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -75,12 +75,13 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+ /* Limit data lanes from data-lanes of endpoint property of dtsi */
if (link_info->num_lanes > dp_panel->max_dp_lanes)
link_info->num_lanes = dp_panel->max_dp_lanes;
- /* Limit support upto HBR2 until HBR3 support is added */
- if (link_info->rate >= (drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4)))
- link_info->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
+ /* Limit link rate from link-frequencies of endpoint property of dtsi */
+ if (link_info->rate > dp_panel->max_dp_link_rate)
+ link_info->rate = dp_panel->max_dp_link_rate;
drm_dbg_dp(panel->drm_dev, "version: %d.%d\n", major, minor);
drm_dbg_dp(panel->drm_dev, "link_rate=%d\n", link_info->rate);
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index d861197ac1c8..f04d0210b5cd 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -50,6 +50,7 @@ struct dp_panel {
u32 vic;
u32 max_dp_lanes;
+ u32 max_dp_link_rate;
u32 max_bw_code;
};
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index dcbe893d66d7..7032dcc8842b 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -91,19 +91,53 @@ static int dp_parser_ctrl_res(struct dp_parser *parser)
return 0;
}
+static u32 dp_parser_link_frequencies(struct device_node *of_node)
+{
+ struct device_node *endpoint;
+ u64 frequency = 0;
+ int cnt;
+
+ endpoint = of_graph_get_endpoint_by_regs(of_node, 1, 0); /* port@1 */
+ if (!endpoint)
+ return 0;
+
+ cnt = of_property_count_u64_elems(endpoint, "link-frequencies");
+
+ if (cnt > 0)
+ of_property_read_u64_index(endpoint, "link-frequencies",
+ cnt - 1, &frequency);
+ of_node_put(endpoint);
+
+ do_div(frequency,
+ 10 * /* from symbol rate to link rate */
+ 1000); /* kbytes */
+
+ return frequency;
+}
+
static int dp_parser_misc(struct dp_parser *parser)
{
struct device_node *of_node = parser->pdev->dev.of_node;
- int len;
-
- len = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES);
- if (len < 0) {
- DRM_WARN("Invalid property \"data-lanes\", default max DP lanes = %d\n",
- DP_MAX_NUM_DP_LANES);
- len = DP_MAX_NUM_DP_LANES;
+ int cnt;
+
+ /*
+ * data-lanes is the property of dp_out endpoint
+ */
+ cnt = drm_of_get_data_lanes_count_ep(of_node, 1, 0, 1, DP_MAX_NUM_DP_LANES);
+ if (cnt < 0) {
+ /* legacy code, data-lanes is the property of mdss_dp node */
+ cnt = drm_of_get_data_lanes_count(of_node, 1, DP_MAX_NUM_DP_LANES);
}
- parser->max_dp_lanes = len;
+ if (cnt > 0)
+ parser->max_dp_lanes = cnt;
+ else
+ parser->max_dp_lanes = DP_MAX_NUM_DP_LANES; /* 4 lanes */
+
+ parser->max_dp_link_rate = dp_parser_link_frequencies(of_node);
+ if (!parser->max_dp_link_rate)
+ parser->max_dp_link_rate = DP_LINK_RATE_HBR2;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index d30ab773db46..1f068626d445 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -15,6 +15,7 @@
#define DP_LABEL "MDSS DP DISPLAY"
#define DP_MAX_PIXEL_CLK_KHZ 675000
#define DP_MAX_NUM_DP_LANES 4
+#define DP_LINK_RATE_HBR2 540000 /* kbytes */
enum dp_pm_type {
DP_CORE_PM,
@@ -119,6 +120,7 @@ struct dp_parser {
struct dp_io io;
struct dp_display_data disp_data;
u32 max_dp_lanes;
+ u32 max_dp_link_rate;
struct drm_bridge *next_bridge;
int (*parse)(struct dp_parser *parser);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 2a96b4fe7839..bd3763a5d723 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -118,6 +118,8 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host);
+unsigned long dsi_byte_clk_get_rate(struct mipi_dsi_host *host, bool is_bonded_dsi,
+ const struct drm_display_mode *mode);
int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size);
int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
@@ -139,6 +141,7 @@ struct msm_dsi_phy_shared_timings {
u32 clk_post;
u32 clk_pre;
bool clk_pre_inc_by_2;
+ bool byte_intf_clk_div_2;
};
struct msm_dsi_phy_clk_request {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 7e97c239ed48..6d21f0b33411 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -181,6 +181,20 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
.num_dsi = 2,
};
+static const struct regulator_bulk_data sm8550_dsi_regulators[] = {
+ { .supply = "vdda", .init_load_uA = 16800 }, /* 1.2 V */
+};
+
+static const struct msm_dsi_config sm8550_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .regulator_data = sm8550_dsi_regulators,
+ .num_regulators = ARRAY_SIZE(sm8550_dsi_regulators),
+ .bus_clk_names = dsi_sdm845_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names),
+ .io_start = { 0xae94000, 0xae96000 },
+ .num_dsi = 2,
+};
+
static const struct regulator_bulk_data sc7180_dsi_regulators[] = {
{ .supply = "vdda", .init_load_uA = 21800 }, /* 1.2 V */
};
@@ -209,8 +223,8 @@ static const struct msm_dsi_config sc7280_dsi_cfg = {
.num_regulators = ARRAY_SIZE(sc7280_dsi_regulators),
.bus_clk_names = dsi_sc7280_bus_clk_names,
.num_bus_clks = ARRAY_SIZE(dsi_sc7280_bus_clk_names),
- .io_start = { 0xae94000 },
- .num_dsi = 1,
+ .io_start = { 0xae94000, 0xae96000 },
+ .num_dsi = 2,
};
static const char * const dsi_qcm2290_bus_clk_names[] = {
@@ -300,6 +314,10 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
&sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_5_0,
&sc7280_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_6_0,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_7_0,
+ &sm8550_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 8f04e685a74e..44be4a88aa83 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -25,6 +25,8 @@
#define MSM_DSI_6G_VER_MINOR_V2_4_0 0x20040000
#define MSM_DSI_6G_VER_MINOR_V2_4_1 0x20040001
#define MSM_DSI_6G_VER_MINOR_V2_5_0 0x20050000
+#define MSM_DSI_6G_VER_MINOR_V2_6_0 0x20060000
+#define MSM_DSI_6G_VER_MINOR_V2_7_0 0x20070000
#define MSM_DSI_V2_VER_MINOR_8064 0x0
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 89aadd3b3202..18fa30e1e858 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -122,6 +122,7 @@ struct msm_dsi_host {
struct clk *byte_intf_clk;
unsigned long byte_clk_rate;
+ unsigned long byte_intf_clk_rate;
unsigned long pixel_clk_rate;
unsigned long esc_clk_rate;
@@ -398,7 +399,6 @@ int msm_dsi_runtime_resume(struct device *dev)
int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
- unsigned long byte_intf_rate;
int ret;
DBG("Set clk rates: pclk=%d, byteclk=%lu",
@@ -418,13 +418,7 @@ int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
}
if (msm_host->byte_intf_clk) {
- /* For CPHY, byte_intf_clk is same as byte_clk */
- if (msm_host->cphy_mode)
- byte_intf_rate = msm_host->byte_clk_rate;
- else
- byte_intf_rate = msm_host->byte_clk_rate / 2;
-
- ret = clk_set_rate(msm_host->byte_intf_clk, byte_intf_rate);
+ ret = clk_set_rate(msm_host->byte_intf_clk, msm_host->byte_intf_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate byte intf clk, %d\n",
__func__, ret);
@@ -570,9 +564,8 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
clk_disable_unprepare(msm_host->byte_clk);
}
-static unsigned long dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode, bool is_bonded_dsi)
{
- struct drm_display_mode *mode = msm_host->mode;
unsigned long pclk_rate;
pclk_rate = mode->clock * 1000;
@@ -589,11 +582,13 @@ static unsigned long dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bo
return pclk_rate;
}
-static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+unsigned long dsi_byte_clk_get_rate(struct mipi_dsi_host *host, bool is_bonded_dsi,
+ const struct drm_display_mode *mode)
{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
- unsigned long pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi);
+ unsigned long pclk_rate = dsi_get_pclk_rate(mode, is_bonded_dsi);
u64 pclk_bpp = (u64)pclk_rate * bpp;
if (lanes == 0) {
@@ -607,8 +602,14 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
else
do_div(pclk_bpp, (8 * lanes));
- msm_host->pixel_clk_rate = pclk_rate;
- msm_host->byte_clk_rate = pclk_bpp;
+ return pclk_bpp;
+}
+
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+{
+ msm_host->pixel_clk_rate = dsi_get_pclk_rate(msm_host->mode, is_bonded_dsi);
+ msm_host->byte_clk_rate = dsi_byte_clk_get_rate(&msm_host->base, is_bonded_dsi,
+ msm_host->mode);
DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate,
msm_host->byte_clk_rate);
@@ -636,7 +637,7 @@ int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
dsi_calc_pclk(msm_host, is_bonded_dsi);
- pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_bonded_dsi) * bpp;
+ pclk_bpp = (u64)dsi_get_pclk_rate(msm_host->mode, is_bonded_dsi) * bpp;
do_div(pclk_bpp, 8);
msm_host->src_clk_rate = pclk_bpp;
@@ -853,11 +854,12 @@ static void dsi_update_dsc_timing(struct msm_dsi_host *msm_host, bool is_cmd_mod
*/
slice_per_intf = DIV_ROUND_UP(hdisplay, dsc->slice_width);
- /* If slice_per_pkt is greater than slice_per_intf
+ /*
+ * If slice_count is greater than slice_per_intf
* then default to 1. This can happen during partial
* update.
*/
- if (slice_per_intf > dsc->slice_count)
+ if (dsc->slice_count > slice_per_intf)
dsc->slice_count = 1;
total_bytes_per_intf = dsc->slice_chunk_size * slice_per_intf;
@@ -987,7 +989,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (!msm_host->dsc)
wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
else
- wc = mode->hdisplay / 2 + 1;
+ wc = msm_host->dsc->slice_chunk_size * msm_host->dsc->slice_count + 1;
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM0_CTRL,
DSI_CMD_MDP_STREAM0_CTRL_WORD_COUNT(wc) |
@@ -1883,8 +1885,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
if (!msm_host) {
- ret = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
msm_host->pdev = pdev;
@@ -1893,31 +1894,28 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
ret = dsi_host_parse_dt(msm_host);
if (ret) {
pr_err("%s: failed to parse dt\n", __func__);
- goto fail;
+ return ret;
}
msm_host->ctrl_base = msm_ioremap_size(pdev, "dsi_ctrl", &msm_host->ctrl_size);
if (IS_ERR(msm_host->ctrl_base)) {
pr_err("%s: unable to map Dsi ctrl base\n", __func__);
- ret = PTR_ERR(msm_host->ctrl_base);
- goto fail;
+ return PTR_ERR(msm_host->ctrl_base);
}
pm_runtime_enable(&pdev->dev);
msm_host->cfg_hnd = dsi_get_config(msm_host);
if (!msm_host->cfg_hnd) {
- ret = -EINVAL;
pr_err("%s: get config failed\n", __func__);
- goto fail;
+ return -EINVAL;
}
cfg = msm_host->cfg_hnd->cfg;
msm_host->id = dsi_host_get_id(msm_host);
if (msm_host->id < 0) {
- ret = msm_host->id;
pr_err("%s: unable to identify DSI host index\n", __func__);
- goto fail;
+ return msm_host->id;
}
/* fixup base address by io offset */
@@ -1927,19 +1925,18 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
cfg->regulator_data,
&msm_host->supplies);
if (ret)
- goto fail;
+ return ret;
ret = dsi_clk_init(msm_host);
if (ret) {
pr_err("%s: unable to initialize dsi clks\n", __func__);
- goto fail;
+ return ret;
}
msm_host->rx_buf = devm_kzalloc(&pdev->dev, SZ_4K, GFP_KERNEL);
if (!msm_host->rx_buf) {
- ret = -ENOMEM;
pr_err("%s: alloc rx temp buf failed\n", __func__);
- goto fail;
+ return -ENOMEM;
}
ret = devm_pm_opp_set_clkname(&pdev->dev, "byte");
@@ -1977,15 +1974,15 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
+ if (!msm_host->workqueue)
+ return -ENOMEM;
+
INIT_WORK(&msm_host->err_work, dsi_err_worker);
msm_dsi->id = msm_host->id;
DBG("Dsi Host %d initialized", msm_host->id);
return 0;
-
-fail:
- return ret;
}
void msm_dsi_host_destroy(struct mipi_dsi_host *host)
@@ -2391,6 +2388,10 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
goto unlock_ret;
}
+ msm_host->byte_intf_clk_rate = msm_host->byte_clk_rate;
+ if (phy_shared_timings->byte_intf_clk_div_2)
+ msm_host->byte_intf_clk_rate /= 2;
+
msm_dsi_sfpb_config(msm_host, true);
ret = regulator_bulk_enable(msm_host->cfg_hnd->cfg->num_regulators,
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 3a1417397283..1bbac72dad35 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -450,6 +450,26 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge,
int id = dsi_mgr_bridge_get_id(bridge);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct mipi_dsi_host *host = msm_dsi->host;
+ struct platform_device *pdev = msm_dsi->pdev;
+ struct dev_pm_opp *opp;
+ unsigned long byte_clk_rate;
+
+ byte_clk_rate = dsi_byte_clk_get_rate(host, IS_BONDED_DSI(), mode);
+
+ opp = dev_pm_opp_find_freq_ceil(&pdev->dev, &byte_clk_rate);
+ if (!IS_ERR(opp)) {
+ dev_pm_opp_put(opp);
+ } else if (PTR_ERR(opp) == -ERANGE) {
+ /*
+ * An empty table is created by devm_pm_opp_set_clkname() even
+ * if there is none. Thus find_freq_ceil will still return
+ * -ERANGE in such case.
+ */
+ if (dev_pm_opp_get_opp_count(&pdev->dev) != 0)
+ return MODE_CLOCK_RANGE;
+ } else {
+ return MODE_ERROR;
+ }
return msm_dsi_host_check_dsc(host, mode);
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index ee6051367679..bb09cbe8ff86 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -350,6 +350,8 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
timing->shared_timings.clk_pre_inc_by_2 = 0;
}
+ timing->shared_timings.byte_intf_clk_div_2 = true;
+
timing->ta_go = 3;
timing->ta_sure = 0;
timing->ta_get = 4;
@@ -454,6 +456,8 @@ int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
tmax = 255;
timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
+ timing->shared_timings.byte_intf_clk_div_2 = true;
+
DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
@@ -569,6 +573,14 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_7nm_8150_cfgs },
{ .compatible = "qcom,sc7280-dsi-phy-7nm",
.data = &dsi_phy_7nm_7280_cfgs },
+ { .compatible = "qcom,sm6375-dsi-phy-7nm",
+ .data = &dsi_phy_7nm_6375_cfgs },
+ { .compatible = "qcom,sm8350-dsi-phy-5nm",
+ .data = &dsi_phy_5nm_8350_cfgs },
+ { .compatible = "qcom,sm8450-dsi-phy-5nm",
+ .data = &dsi_phy_5nm_8450_cfgs },
+ { .compatible = "qcom,sm8550-dsi-phy-4nm",
+ .data = &dsi_phy_4nm_8550_cfgs },
#endif
{}
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 1096afedd616..7137a17ae523 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -55,8 +55,12 @@ extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_6375_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_zero;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 9e7fa7d88ead..3b1ed02f644d 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -39,8 +39,16 @@
#define VCO_REF_CLK_RATE 19200000
#define FRAC_BITS 18
+/* Hardware is pre V4.1 */
+#define DSI_PHY_7NM_QUIRK_PRE_V4_1 BIT(0)
/* Hardware is V4.1 */
-#define DSI_PHY_7NM_QUIRK_V4_1 BIT(0)
+#define DSI_PHY_7NM_QUIRK_V4_1 BIT(1)
+/* Hardware is V4.2 */
+#define DSI_PHY_7NM_QUIRK_V4_2 BIT(2)
+/* Hardware is V4.3 */
+#define DSI_PHY_7NM_QUIRK_V4_3 BIT(3)
+/* Hardware is V5.2 */
+#define DSI_PHY_7NM_QUIRK_V5_2 BIT(4)
struct dsi_pll_config {
bool enable_ssc;
@@ -116,16 +124,27 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
dec_multiple = div_u64(pll_freq * multiplier, divider);
dec = div_u64_rem(dec_multiple, multiplier, &frac);
- if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1))
+ if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)
config->pll_clock_inverters = 0x28;
- else if (pll_freq <= 1000000000ULL)
- config->pll_clock_inverters = 0xa0;
- else if (pll_freq <= 2500000000ULL)
- config->pll_clock_inverters = 0x20;
- else if (pll_freq <= 3020000000ULL)
- config->pll_clock_inverters = 0x00;
- else
- config->pll_clock_inverters = 0x40;
+ else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ if (pll_freq <= 1300000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq <= 2500000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq <= 4000000000ULL)
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
+ } else {
+ if (pll_freq <= 1000000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq <= 2500000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq <= 3020000000ULL)
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
+ }
config->decimal_div_start = dec;
config->frac_div_start = frac;
@@ -197,16 +216,32 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
void __iomem *base = pll->phy->pll_base;
u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
- if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+ if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1))
if (pll->vco_current_rate >= 3100000000ULL)
analog_controls_five_1 = 0x03;
+ if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
if (pll->vco_current_rate < 1520000000ULL)
vco_config_1 = 0x08;
else if (pll->vco_current_rate < 2990000000ULL)
vco_config_1 = 0x01;
}
+ if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_2) ||
+ (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3)) {
+ if (pll->vco_current_rate < 1520000000ULL)
+ vco_config_1 = 0x08;
+ else if (pll->vco_current_rate >= 2990000000ULL)
+ vco_config_1 = 0x01;
+ }
+
+ if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ if (pll->vco_current_rate < 1557000000ULL)
+ vco_config_1 = 0x08;
+ else
+ vco_config_1 = 0x01;
+ }
+
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
analog_controls_five_1);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
@@ -231,9 +266,9 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
- pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1 ? 0x3f : 0x22);
+ !(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) ? 0x3f : 0x22);
- if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+ if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)) {
dsi_phy_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
if (pll->slave)
dsi_phy_write(pll->slave->phy->pll_base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
@@ -788,7 +823,7 @@ static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
const u8 *tx_dctrl = tx_dctrl_0;
void __iomem *lane_base = phy->lane_base;
- if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1)
+ if (!(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1))
tx_dctrl = tx_dctrl_1;
/* Strength ctrl settings */
@@ -844,6 +879,13 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
if (dsi_phy_hw_v4_0_is_pll_on(phy))
pr_warn("PLL turned on before configuring PHY\n");
+ /* Request for REFGEN READY */
+ if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ dsi_phy_write(phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10, 0x1);
+ udelay(500);
+ }
+
/* wait for REFGEN READY */
ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS,
status, (status & BIT(0)),
@@ -858,23 +900,64 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* Alter PHY configurations if data rate less than 1.5GHZ*/
less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
- if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+ glbl_str_swi_cal_sel_ctrl = 0x00;
+ if (phy->cphy_mode) {
+ vreg_ctrl_0 = 0x51;
+ vreg_ctrl_1 = 0x55;
+ glbl_hstx_str_ctrl_0 = 0x00;
+ glbl_pemph_ctrl_0 = 0x11;
+ lane_ctrl0 = 0x17;
+ } else {
vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
+ vreg_ctrl_1 = 0x5c;
+ glbl_hstx_str_ctrl_0 = 0x88;
+ glbl_pemph_ctrl_0 = 0x00;
+ lane_ctrl0 = 0x1f;
+ }
+
+ if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
if (phy->cphy_mode) {
+ vreg_ctrl_0 = 0x45;
+ vreg_ctrl_1 = 0x45;
+ glbl_rescode_top_ctrl = 0x00;
+ glbl_rescode_bot_ctrl = 0x00;
+ } else {
+ vreg_ctrl_0 = 0x44;
+ vreg_ctrl_1 = 0x19;
+ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3c : 0x03;
+ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3c;
+ }
+ } else if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3)) {
+ if (phy->cphy_mode) {
+ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x01;
+ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3b;
+ } else {
+ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x01;
+ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x39;
+ }
+ } else if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_2) {
+ if (phy->cphy_mode) {
+ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x01;
+ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x3b;
+ } else {
+ glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3c : 0x00;
+ glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x38 : 0x39;
+ }
+ } else if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
+ if (phy->cphy_mode) {
+ glbl_hstx_str_ctrl_0 = 0x88;
glbl_rescode_top_ctrl = 0x00;
glbl_rescode_bot_ctrl = 0x3c;
} else {
glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d : 0x00;
glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 : 0x3c;
}
- glbl_str_swi_cal_sel_ctrl = 0x00;
- glbl_hstx_str_ctrl_0 = 0x88;
} else {
- vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
if (phy->cphy_mode) {
glbl_str_swi_cal_sel_ctrl = 0x03;
glbl_hstx_str_ctrl_0 = 0x66;
} else {
+ vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
}
@@ -882,17 +965,6 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
glbl_rescode_bot_ctrl = 0x3c;
}
- if (phy->cphy_mode) {
- vreg_ctrl_0 = 0x51;
- vreg_ctrl_1 = 0x55;
- glbl_pemph_ctrl_0 = 0x11;
- lane_ctrl0 = 0x17;
- } else {
- vreg_ctrl_1 = 0x5c;
- glbl_pemph_ctrl_0 = 0x00;
- lane_ctrl0 = 0x1f;
- }
-
/* de-assert digital and pll power down */
data = BIT(6) | BIT(5);
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
@@ -904,9 +976,8 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00);
/* program CMN_CTRL_4 for minor_ver 2 chipsets*/
- data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0);
- data = data & (0xf0);
- if (data == 0x20)
+ if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
+ (dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20)
dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04);
/* Configure PHY lane swap (TODO: we need to calculate this) */
@@ -1017,6 +1088,16 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
pr_warn("Turning OFF PHY while PLL is on\n");
dsi_phy_hw_v4_0_config_lpcdrx(phy, false);
+
+ /* Turn off REFGEN Vote */
+ if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2)) {
+ dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10, 0x0);
+ wmb();
+ /* Delay to ensure HW removes vote before PHY shut down */
+ udelay(2);
+ }
+
data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_CTRL_0);
/* disable all lanes */
@@ -1040,6 +1121,14 @@ static const struct regulator_bulk_data dsi_phy_7nm_37750uA_regulators[] = {
{ .supply = "vdds", .init_load_uA = 37550 },
};
+static const struct regulator_bulk_data dsi_phy_7nm_97800uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 97800 },
+};
+
+static const struct regulator_bulk_data dsi_phy_7nm_98400uA_regulators[] = {
+ { .supply = "vdds", .init_load_uA = 98400 },
+};
+
const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_36mA_regulators,
@@ -1063,6 +1152,26 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
.quirks = DSI_PHY_7NM_QUIRK_V4_1,
};
+const struct msm_dsi_phy_cfg dsi_phy_7nm_6375_cfgs = {
+ .has_phy_lane = true,
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000ULL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0x5e94400 },
+ .num_dsi_phy = 1,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_1,
+};
+
const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
.has_phy_lane = true,
.regulator_data = dsi_phy_7nm_36mA_regulators,
@@ -1079,6 +1188,7 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
.max_pll_rate = 3500000000UL,
.io_start = { 0xae94400, 0xae96400 },
.num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_PRE_V4_1,
};
const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = {
@@ -1102,3 +1212,72 @@ const struct msm_dsi_phy_cfg dsi_phy_7nm_7280_cfgs = {
.num_dsi_phy = 1,
.quirks = DSI_PHY_7NM_QUIRK_V4_1,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_5nm_8350_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_37750uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_37750uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_5nm_8450_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_97800uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_97800uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V4_3,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_4nm_8550_cfgs = {
+ .has_phy_lane = true,
+ .regulator_data = dsi_phy_7nm_98400uA_regulators,
+ .num_regulators = ARRAY_SIZE(dsi_phy_7nm_98400uA_regulators),
+ .ops = {
+ .enable = dsi_7nm_phy_enable,
+ .disable = dsi_7nm_phy_disable,
+ .pll_init = dsi_pll_7nm_init,
+ .save_pll_state = dsi_7nm_pll_save_state,
+ .restore_pll_state = dsi_7nm_pll_restore_state,
+ .set_continuous_clock = dsi_7nm_set_continuous_clock,
+ },
+ .min_pll_rate = 600000000UL,
+#ifdef CONFIG_64BIT
+ .max_pll_rate = 5000000000UL,
+#else
+ .max_pll_rate = ULONG_MAX,
+#endif
+ .io_start = { 0xae95000, 0xae97000 },
+ .num_dsi_phy = 2,
+ .quirks = DSI_PHY_7NM_QUIRK_V5_2,
+};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 97372bb241d8..3132105a2a43 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -120,6 +120,10 @@ static int msm_hdmi_init(struct hdmi *hdmi)
int ret;
hdmi->workq = alloc_ordered_workqueue("msm_hdmi", 0);
+ if (!hdmi->workq) {
+ ret = -ENOMEM;
+ goto fail;
+ }
hdmi->i2c = msm_hdmi_i2c_init(hdmi);
if (IS_ERR(hdmi->i2c)) {
@@ -203,8 +207,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- drm_bridge_connector_enable_hpd(hdmi->connector);
-
ret = msm_hdmi_hpd_enable(hdmi->bridge);
if (ret < 0) {
DRM_DEV_ERROR(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
index be4b0b67e797..cb35a297afbd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_pll_8960.c
@@ -406,14 +406,14 @@ static const struct clk_ops hdmi_pll_ops = {
.set_rate = hdmi_pll_set_rate,
};
-static const char * const hdmi_pll_parents[] = {
- "pxo",
+static const struct clk_parent_data hdmi_pll_parents[] = {
+ { .fw_name = "pxo", .name = "pxo_board" },
};
static struct clk_init_data pll_init = {
.name = "hdmi_pll",
.ops = &hdmi_pll_ops,
- .parent_names = hdmi_pll_parents,
+ .parent_data = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents),
.flags = CLK_IGNORE_UNUSED,
};
@@ -422,8 +422,7 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hdmi_pll_8960 *pll;
- struct clk *clk;
- int i;
+ int i, ret;
/* sanity check: */
for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++)
@@ -443,10 +442,16 @@ int msm_hdmi_pll_8960_init(struct platform_device *pdev)
pll->pdev = pdev;
pll->clk_hw.init = &pll_init;
- clk = devm_clk_register(dev, &pll->clk_hw);
- if (IS_ERR(clk)) {
+ ret = devm_clk_hw_register(dev, &pll->clk_hw);
+ if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to register pll clock\n");
- return -EINVAL;
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "%s: failed to register clk provider: %d\n", __func__, ret);
+ return ret;
}
return 0;
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 95f4374ae21c..d6ecff0ab618 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -305,6 +305,7 @@ void msm_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct dentry *gpu_devfreq;
drm_debugfs_create_files(msm_debugfs_list,
ARRAY_SIZE(msm_debugfs_list),
@@ -325,6 +326,17 @@ void msm_debugfs_init(struct drm_minor *minor)
debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
dev, &shrink_fops);
+ gpu_devfreq = debugfs_create_dir("devfreq", minor->debugfs_root);
+
+ debugfs_create_bool("idle_clamp",0600, gpu_devfreq,
+ &priv->gpu_clamp_to_idle);
+
+ debugfs_create_u32("upthreshold",0600, gpu_devfreq,
+ &priv->gpu_devfreq_config.upthreshold);
+
+ debugfs_create_u32("downdifferential",0600, gpu_devfreq,
+ &priv->gpu_devfreq_config.downdifferential);
+
if (priv->kms && priv->kms->funcs->debugfs_init)
priv->kms->funcs->debugfs_init(priv->kms, minor);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 45e81eb148a8..aca48c868c14 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -45,9 +45,10 @@
* - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
* - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
* - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
+ * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 9
+#define MSM_VERSION_MINOR 10
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -149,6 +150,9 @@ static void msm_irq_uninstall(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
+ if (!priv->kms)
+ return;
+
kms->funcs->irq_uninstall(kms);
if (kms->irq_requested)
free_irq(kms->irq, dev);
@@ -266,8 +270,6 @@ static int msm_drm_uninit(struct device *dev)
component_unbind_all(dev, ddev);
ddev->dev_private = NULL;
- drm_dev_put(ddev);
-
destroy_workqueue(priv->wq);
return 0;
@@ -418,6 +420,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
priv->dev = ddev;
priv->wq = alloc_ordered_workqueue("msm", 0);
+ if (!priv->wq)
+ return -ENOMEM;
INIT_LIST_HEAD(&priv->objects);
mutex_init(&priv->obj_lock);
@@ -440,12 +444,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ret = msm_init_vram(ddev);
if (ret)
- return ret;
+ goto err_drm_dev_put;
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
if (ret)
- return ret;
+ goto err_drm_dev_put;
dma_set_max_seg_size(dev, UINT_MAX);
@@ -491,7 +495,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (IS_ERR(priv->event_thread[i].worker)) {
ret = PTR_ERR(priv->event_thread[i].worker);
DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
- ret = PTR_ERR(priv->event_thread[i].worker);
+ priv->event_thread[i].worker = NULL;
goto err_msm_uninit;
}
@@ -540,6 +544,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
err_msm_uninit:
msm_drm_uninit(dev);
+err_drm_dev_put:
+ drm_dev_put(ddev);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d4e0ef608950..9f0c184b02a0 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
+#include <linux/devfreq.h>
#include <linux/module.h>
#include <linux/component.h>
#include <linux/platform_device.h>
@@ -61,6 +62,7 @@ enum msm_dp_controller {
MSM_DP_CONTROLLER_0,
MSM_DP_CONTROLLER_1,
MSM_DP_CONTROLLER_2,
+ MSM_DP_CONTROLLER_3,
MSM_DP_CONTROLLER_COUNT,
};
@@ -82,14 +84,12 @@ enum msm_event_wait {
/**
* struct msm_display_topology - defines a display topology pipeline
* @num_lm: number of layer mixers used
- * @num_enc: number of compression encoder blocks used
* @num_intf: number of interfaces the panel is mounted on
* @num_dspp: number of dspp blocks used
* @num_dsc: number of Display Stream Compression (DSC) blocks used
*/
struct msm_display_topology {
u32 num_lm;
- u32 num_enc;
u32 num_intf;
u32 num_dspp;
u32 num_dsc;
@@ -233,6 +233,14 @@ struct msm_drm_private {
*/
unsigned int hangcheck_period;
+ /** gpu_devfreq_config: Devfreq tuning config for the GPU. */
+ struct devfreq_simple_ondemand_data gpu_devfreq_config;
+
+ /**
+ * gpu_clamp_to_idle: Enable clamping to idle freq when inactive
+ */
+ bool gpu_clamp_to_idle;
+
/**
* disable_err_irq:
*
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 31e1e30cb52a..c1356aff87da 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -142,11 +142,11 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
- goto fail;
+ return NULL;
helper = &fbdev->base;
- drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
+ drm_fb_helper_prepare(dev, helper, 32, &msm_fb_helper_funcs);
ret = drm_fb_helper_init(dev, helper);
if (ret) {
@@ -159,7 +159,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
if (ret)
goto fini;
- ret = drm_fb_helper_initial_config(helper, 32);
+ ret = drm_fb_helper_initial_config(helper);
if (ret)
goto fini;
@@ -170,6 +170,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
fini:
drm_fb_helper_fini(helper);
fail:
+ drm_fb_helper_unprepare(helper);
kfree(fbdev);
return NULL;
}
@@ -196,6 +197,7 @@ void msm_fbdev_free(struct drm_device *dev)
drm_framebuffer_remove(fbdev->fb);
}
+ drm_fb_helper_unprepare(helper);
kfree(fbdev);
priv->fbdev = NULL;
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index a47e5837c528..56641408ea74 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -22,7 +22,7 @@ msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
return ERR_PTR(-ENOMEM);
fctx->dev = dev;
- strncpy(fctx->name, name, sizeof(fctx->name));
+ strscpy(fctx->name, name, sizeof(fctx->name));
fctx->context = dma_fence_context_alloc(1);
fctx->index = index++;
fctx->fenceptr = fenceptr;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 1dee0d18abbb..c2fb98a94bc3 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1012,7 +1012,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 73a2ca122c57..be4bf77103cd 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -209,6 +209,10 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit,
goto out;
}
submit->cmd[i].relocs = kmalloc(sz, GFP_KERNEL);
+ if (!submit->cmd[i].relocs) {
+ ret = -ENOMEM;
+ goto out;
+ }
ret = copy_from_user(submit->cmd[i].relocs, userptr, sz);
if (ret) {
ret = -EFAULT;
@@ -334,9 +338,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
if (ret)
return ret;
+ /* If userspace has determined that explicit fencing is
+ * used, it can disable implicit sync on the entire
+ * submit:
+ */
if (no_implicit)
continue;
+ /* Otherwise userspace can ask for implicit sync to be
+ * disabled on specific buffers. This is useful for internal
+ * usermode driver managed buffers, suballocation, etc.
+ */
+ if (submit->bos[i].flags & MSM_SUBMIT_BO_NO_IMPLICIT)
+ continue;
+
ret = drm_sched_job_add_implicit_dependencies(&submit->base,
obj,
write);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 732295e25683..fc1c0d8611a8 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -109,11 +109,15 @@ struct msm_gpu_devfreq {
struct mutex lock;
/**
- * idle_constraint:
+ * idle_freq:
*
- * A PM QoS constraint to limit max freq while the GPU is idle.
+ * Shadow frequency used while the GPU is idle. From the PoV of
+ * the devfreq governor, we are continuing to sample busyness and
+ * adjust frequency while the GPU is idle, but we use this shadow
+ * value as the GPU is actually clamped to minimum frequency while
+ * it is inactive.
*/
- struct dev_pm_qos_request idle_freq;
+ unsigned long idle_freq;
/**
* boost_constraint:
@@ -135,8 +139,6 @@ struct msm_gpu_devfreq {
/** idle_time: Time of last transition to idle: */
ktime_t idle_time;
- struct devfreq_dev_status average_status;
-
/**
* idle_work:
*
@@ -275,9 +277,6 @@ struct msm_gpu {
struct msm_gpu_state *crashstate;
- /* Enable clamping to idle freq when inactive: */
- bool clamp_to_idle;
-
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 85c443a37e4e..e27dbf12b5e8 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -33,6 +33,16 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
+ /*
+ * If the GPU is idle, devfreq is not aware, so just stash
+ * the new target freq (to use when we return to active)
+ */
+ if (df->idle_freq) {
+ df->idle_freq = *freq;
+ dev_pm_opp_put(opp);
+ return 0;
+ }
+
if (gpu->funcs->gpu_set_freq) {
mutex_lock(&df->lock);
gpu->funcs->gpu_set_freq(gpu, opp, df->suspended);
@@ -48,15 +58,26 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
static unsigned long get_freq(struct msm_gpu *gpu)
{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
+ /*
+ * If the GPU is idle, use the shadow/saved freq to avoid
+ * confusing devfreq (which is unaware that we are switching
+ * to lowest freq until the device is active again)
+ */
+ if (df->idle_freq)
+ return df->idle_freq;
+
if (gpu->funcs->gpu_get_freq)
return gpu->funcs->gpu_get_freq(gpu);
return clk_get_rate(gpu->core_clk);
}
-static void get_raw_dev_status(struct msm_gpu *gpu,
+static int msm_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
+ struct msm_gpu *gpu = dev_to_gpu(dev);
struct msm_gpu_devfreq *df = &gpu->devfreq;
u64 busy_cycles, busy_time;
unsigned long sample_rate;
@@ -72,7 +93,7 @@ static void get_raw_dev_status(struct msm_gpu *gpu,
if (df->suspended) {
mutex_unlock(&df->lock);
status->busy_time = 0;
- return;
+ return 0;
}
busy_cycles = gpu->funcs->gpu_busy(gpu, &sample_rate);
@@ -87,71 +108,6 @@ static void get_raw_dev_status(struct msm_gpu *gpu,
busy_time = ~0LU;
status->busy_time = busy_time;
-}
-
-static void update_average_dev_status(struct msm_gpu *gpu,
- const struct devfreq_dev_status *raw)
-{
- struct msm_gpu_devfreq *df = &gpu->devfreq;
- const u32 polling_ms = df->devfreq->profile->polling_ms;
- const u32 max_history_ms = polling_ms * 11 / 10;
- struct devfreq_dev_status *avg = &df->average_status;
- u64 avg_freq;
-
- /* simple_ondemand governor interacts poorly with gpu->clamp_to_idle.
- * When we enforce the constraint on idle, it calls get_dev_status
- * which would normally reset the stats. When we remove the
- * constraint on active, it calls get_dev_status again where busy_time
- * would be 0.
- *
- * To remedy this, we always return the average load over the past
- * polling_ms.
- */
-
- /* raw is longer than polling_ms or avg has no history */
- if (div_u64(raw->total_time, USEC_PER_MSEC) >= polling_ms ||
- !avg->total_time) {
- *avg = *raw;
- return;
- }
-
- /* Truncate the oldest history first.
- *
- * Because we keep the history with a single devfreq_dev_status,
- * rather than a list of devfreq_dev_status, we have to assume freq
- * and load are the same over avg->total_time. We can scale down
- * avg->busy_time and avg->total_time by the same factor to drop
- * history.
- */
- if (div_u64(avg->total_time + raw->total_time, USEC_PER_MSEC) >=
- max_history_ms) {
- const u32 new_total_time = polling_ms * USEC_PER_MSEC -
- raw->total_time;
- avg->busy_time = div_u64(
- mul_u32_u32(avg->busy_time, new_total_time),
- avg->total_time);
- avg->total_time = new_total_time;
- }
-
- /* compute the average freq over avg->total_time + raw->total_time */
- avg_freq = mul_u32_u32(avg->current_frequency, avg->total_time);
- avg_freq += mul_u32_u32(raw->current_frequency, raw->total_time);
- do_div(avg_freq, avg->total_time + raw->total_time);
-
- avg->current_frequency = avg_freq;
- avg->busy_time += raw->busy_time;
- avg->total_time += raw->total_time;
-}
-
-static int msm_devfreq_get_dev_status(struct device *dev,
- struct devfreq_dev_status *status)
-{
- struct msm_gpu *gpu = dev_to_gpu(dev);
- struct devfreq_dev_status raw;
-
- get_raw_dev_status(gpu, &raw);
- update_average_dev_status(gpu, &raw);
- *status = gpu->devfreq.average_status;
return 0;
}
@@ -183,16 +139,23 @@ static bool has_devfreq(struct msm_gpu *gpu)
void msm_devfreq_init(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
+ struct msm_drm_private *priv = gpu->dev->dev_private;
/* We need target support to do devfreq */
if (!gpu->funcs->gpu_busy)
return;
+ /*
+ * Setup default values for simple_ondemand governor tuning. We
+ * want to throttle up at 50% load for the double-buffer case,
+ * where due to stalling waiting for vblank we could get stuck
+ * at (for ex) 30fps at 50% utilization.
+ */
+ priv->gpu_devfreq_config.upthreshold = 50;
+ priv->gpu_devfreq_config.downdifferential = 10;
+
mutex_init(&df->lock);
- dev_pm_qos_add_request(&gpu->pdev->dev, &df->idle_freq,
- DEV_PM_QOS_MAX_FREQUENCY,
- PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
DEV_PM_QOS_MIN_FREQUENCY, 0);
@@ -209,11 +172,10 @@ void msm_devfreq_init(struct msm_gpu *gpu)
df->devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
- NULL);
+ &priv->gpu_devfreq_config);
if (IS_ERR(df->devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
- dev_pm_qos_remove_request(&df->idle_freq);
dev_pm_qos_remove_request(&df->boost_freq);
df->devfreq = NULL;
return;
@@ -255,7 +217,6 @@ void msm_devfreq_cleanup(struct msm_gpu *gpu)
devfreq_cooling_unregister(gpu->cooling);
dev_pm_qos_remove_request(&df->boost_freq);
- dev_pm_qos_remove_request(&df->idle_freq);
}
void msm_devfreq_resume(struct msm_gpu *gpu)
@@ -328,6 +289,7 @@ void msm_devfreq_active(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
unsigned int idle_time;
+ unsigned long target_freq;
if (!has_devfreq(gpu))
return;
@@ -337,8 +299,28 @@ void msm_devfreq_active(struct msm_gpu *gpu)
*/
cancel_idle_work(df);
+ /*
+ * Hold devfreq lock to synchronize with get_dev_status()/
+ * target() callbacks
+ */
+ mutex_lock(&df->devfreq->lock);
+
+ target_freq = df->idle_freq;
+
idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time));
+ df->idle_freq = 0;
+
+ /*
+ * We could have become active again before the idle work had a
+ * chance to run, in which case the df->idle_freq would have
+ * still been zero. In this case, no need to change freq.
+ */
+ if (target_freq)
+ msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+
+ mutex_unlock(&df->devfreq->lock);
+
/*
* If we've been idle for a significant fraction of a polling
* interval, then we won't meet the threshold of busyness for
@@ -347,9 +329,6 @@ void msm_devfreq_active(struct msm_gpu *gpu)
if (idle_time > msm_devfreq_profile.polling_ms) {
msm_devfreq_boost(gpu, 2);
}
-
- dev_pm_qos_update_request(&df->idle_freq,
- PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
}
@@ -358,11 +337,24 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
struct msm_gpu_devfreq *df = container_of(work,
struct msm_gpu_devfreq, idle_work.work);
struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ unsigned long idle_freq, target_freq = 0;
+
+ /*
+ * Hold devfreq lock to synchronize with get_dev_status()/
+ * target() callbacks
+ */
+ mutex_lock(&df->devfreq->lock);
+
+ idle_freq = get_freq(gpu);
+
+ if (priv->gpu_clamp_to_idle)
+ msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
df->idle_time = ktime_get();
+ df->idle_freq = idle_freq;
- if (gpu->clamp_to_idle)
- dev_pm_qos_update_request(&df->idle_freq, 0);
+ mutex_unlock(&df->devfreq->lock);
}
void msm_devfreq_idle(struct msm_gpu *gpu)
diff --git a/drivers/gpu/drm/msm/msm_mdss.c b/drivers/gpu/drm/msm/msm_mdss.c
index 2527afef9c19..02646e4bb4cd 100644
--- a/drivers/gpu/drm/msm/msm_mdss.c
+++ b/drivers/gpu/drm/msm/msm_mdss.c
@@ -286,9 +286,21 @@ static int msm_mdss_enable(struct msm_mdss *msm_mdss)
/* UBWC_2_0 */
msm_mdss_setup_ubwc_dec_20(msm_mdss, 0x11f);
break;
+ case DPU_HW_VER_700:
+ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
+ msm_mdss_setup_ubwc_dec_40(msm_mdss, UBWC_4_0, 6, 1, 3, 1);
+ break;
case DPU_HW_VER_720:
msm_mdss_setup_ubwc_dec_40(msm_mdss, UBWC_3_0, 6, 1, 1, 1);
break;
+ case DPU_HW_VER_800:
+ msm_mdss_setup_ubwc_dec_40(msm_mdss, UBWC_4_0, 6, 1, 2, 1);
+ break;
+ case DPU_HW_VER_810:
+ case DPU_HW_VER_900:
+ /* TODO: highest_bank_bit = 2 for LP_DDR4 */
+ msm_mdss_setup_ubwc_dec_40(msm_mdss, UBWC_4_0, 6, 1, 3, 1);
+ break;
}
return ret;
@@ -515,9 +527,13 @@ static const struct of_device_id mdss_dt_match[] = {
{ .compatible = "qcom,sc7180-mdss" },
{ .compatible = "qcom,sc7280-mdss" },
{ .compatible = "qcom,sc8180x-mdss" },
+ { .compatible = "qcom,sc8280xp-mdss" },
{ .compatible = "qcom,sm6115-mdss" },
{ .compatible = "qcom,sm8150-mdss" },
{ .compatible = "qcom,sm8250-mdss" },
+ { .compatible = "qcom,sm8350-mdss" },
+ { .compatible = "qcom,sm8450-mdss" },
+ { .compatible = "qcom,sm8550-mdss" },
{}
};
MODULE_DEVICE_TABLE(of, mdss_dt_match);
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index 116f8168bda4..518b53345354 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -8,6 +8,7 @@ config DRM_MXSFB
tristate "i.MX (e)LCDIF LCD controller"
depends on DRM && OF
depends on COMMON_CLK
+ depends on ARCH_MXS || ARCH_MXC || COMPILE_TEST
select DRM_MXS
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
@@ -24,6 +25,7 @@ config DRM_IMX_LCDIF
tristate "i.MX LCDIFv3 LCD controller"
depends on DRM && OF
depends on COMMON_CLK
+ depends on ARCH_MXC || COMPILE_TEST
select DRM_MXS
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 810edea0a31e..b3ab86ad1b36 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -78,14 +78,12 @@ static const struct mxsfb_devdata mxsfb_devdata[] = {
void mxsfb_enable_axi_clk(struct mxsfb_drm_private *mxsfb)
{
- if (mxsfb->clk_axi)
- clk_prepare_enable(mxsfb->clk_axi);
+ clk_prepare_enable(mxsfb->clk_axi);
}
void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb)
{
- if (mxsfb->clk_axi)
- clk_disable_unprepare(mxsfb->clk_axi);
+ clk_disable_unprepare(mxsfb->clk_axi);
}
static struct drm_framebuffer *
@@ -235,9 +233,9 @@ static int mxsfb_load(struct drm_device *drm,
if (IS_ERR(mxsfb->clk))
return PTR_ERR(mxsfb->clk);
- mxsfb->clk_axi = devm_clk_get(drm->dev, "axi");
+ mxsfb->clk_axi = devm_clk_get_optional(drm->dev, "axi");
if (IS_ERR(mxsfb->clk_axi))
- mxsfb->clk_axi = NULL;
+ return PTR_ERR(mxsfb->clk_axi);
mxsfb->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi");
if (IS_ERR(mxsfb->clk_disp_axi))
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 03d12caf9e26..a70bd65e1400 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -10,6 +10,8 @@ config DRM_NOUVEAU
select DRM_KMS_HELPER
select DRM_TTM
select DRM_TTM_HELPER
+ select I2C
+ select I2C_ALGOBIT
select BACKLIGHT_CLASS_DEVICE if DRM_NOUVEAU_BACKLIGHT
select X86_PLATFORM_DEVICES if ACPI && X86
select ACPI_WMI if ACPI && X86
@@ -24,18 +26,6 @@ config DRM_NOUVEAU
help
Choose this option for open-source NVIDIA support.
-config NOUVEAU_LEGACY_CTX_SUPPORT
- bool "Nouveau legacy context support"
- depends on DRM_NOUVEAU
- select DRM_LEGACY
- default y
- help
- There was a version of the nouveau DDX that relied on legacy
- ctx ioctls not erroring out. But that was back in time a long
- ways, so offer a way to disable it now. For uapi compat with
- old nouveau ddx this should be on by default, but modern distros
- should consider turning it off.
-
config NOUVEAU_PLATFORM_DRIVER
bool "Nouveau (NVIDIA) SoC GPUs"
depends on DRM_NOUVEAU && ARCH_TEGRA
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0e0f117bc70b..a6f2e681bde9 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -23,8 +23,8 @@
* DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index 22d10f328559..d6b8e0cce2ac 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -24,7 +24,7 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
index ce3d8c6ef000..d5b129dc623b 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c
@@ -24,8 +24,8 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
index 2f6d2b6711ab..a3fedd226854 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c
@@ -24,7 +24,6 @@
*
*/
-#include <drm/drm_crtc_helper.h>
#include "nouveau_drv.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
index 3ba7b59580d5..de3ea731d6e6 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv04.c
@@ -30,7 +30,7 @@
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "hw.h"
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/i2c/ch7006.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index be28e7bd7490..670c9739e5e1 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -25,6 +25,7 @@
*/
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include "nouveau_drv.h"
#include "nouveau_reg.h"
@@ -653,7 +654,7 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
tv_enc->tv_norm = i;
}
- drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
+ drm_mode_create_tv_properties_legacy(dev, num_tv_norms, nv17_tv_norm_names);
drm_object_attach_property(&connector->base,
conf->tv_select_subconnector_property,
@@ -662,7 +663,7 @@ static int nv17_tv_create_resources(struct drm_encoder *encoder,
conf->tv_subconnector_property,
tv_enc->subconnector);
drm_object_attach_property(&connector->base,
- conf->tv_mode_property,
+ conf->legacy_tv_mode_property,
tv_enc->tv_norm);
drm_object_attach_property(&connector->base,
conf->tv_flicker_reduction_property,
@@ -722,7 +723,7 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
if (encoder->crtc)
nv17_tv_update_rescaler(encoder);
- } else if (property == conf->tv_mode_property) {
+ } else if (property == conf->legacy_tv_mode_property) {
if (connector->dpms != DRM_MODE_DPMS_OFF)
return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index edcb2529b402..ed9d374147b8 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -885,7 +885,7 @@ nv50_msto_prepare(struct drm_atomic_state *state,
// TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
if (msto->disabled) {
- drm_dp_remove_payload(mgr, mst_state, payload);
+ drm_dp_remove_payload(mgr, mst_state, payload, payload);
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
} else {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index f006e56e1e08..5f490fbf1877 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -32,7 +32,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_vblank.h>
#include "nouveau_connector.h"
diff --git a/drivers/gpu/drm/nouveau/include/nvfw/hs.h b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
index 8c4cd08a7b5f..8b58b668fc0c 100644
--- a/drivers/gpu/drm/nouveau/include/nvfw/hs.h
+++ b/drivers/gpu/drm/nouveau/include/nvfw/hs.h
@@ -52,7 +52,7 @@ struct nvfw_hs_load_header_v2 {
struct {
u32 offset;
u32 size;
- } app[0];
+ } app[];
};
const struct nvfw_hs_load_header_v2 *nvfw_hs_load_header_v2(struct nvkm_subdev *, const void *);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/outp.h b/drivers/gpu/drm/nouveau/include/nvif/outp.h
index 45daadec3c0c..fa76a7b5e4b3 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/outp.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/outp.h
@@ -3,6 +3,7 @@
#define __NVIF_OUTP_H__
#include <nvif/object.h>
#include <nvif/if0012.h>
+#include <drm/display/drm_dp.h>
struct nvif_disp;
struct nvif_outp {
@@ -21,7 +22,7 @@ int nvif_outp_acquire_rgb_crt(struct nvif_outp *);
int nvif_outp_acquire_tmds(struct nvif_outp *, int head,
bool hdmi, u8 max_ac_packet, u8 rekey, u8 scdc, bool hda);
int nvif_outp_acquire_lvds(struct nvif_outp *, bool dual, bool bpc8);
-int nvif_outp_acquire_dp(struct nvif_outp *, u8 dpcd[16],
+int nvif_outp_acquire_dp(struct nvif_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
int link_nr, int link_bw, bool hda, bool mst);
void nvif_outp_release(struct nvif_outp *);
int nvif_outp_infoframe(struct nvif_outp *, u8 type, struct nvif_outp_infoframe_v0 *, u32 size);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 40768373cdd9..c5a4f49ee206 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -97,6 +97,7 @@ int gp100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
int gp102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int gp10b_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int gv100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+int tu102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga100_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
int ga102_fb_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a11871e3119c..288eebc70a67 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,6 +28,7 @@
*/
#include <linux/dma-mapping.h>
+#include <drm/ttm/ttm_tt.h>
#include "nouveau_drv.h"
#include "nouveau_chan.h"
@@ -921,6 +922,7 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vma *vma;
+ long ret;
/* ttm can now (stupidly) pass the driver bos it didn't create... */
if (bo->destroy != nouveau_bo_del_ttm)
@@ -935,7 +937,10 @@ static void nouveau_bo_move_ntfy(struct ttm_buffer_object *bo,
}
} else {
list_for_each_entry(vma, &nvbo->vma_list, head) {
- WARN_ON(ttm_bo_wait(bo, false, false));
+ ret = dma_resv_wait_timeout(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, 15 * HZ);
+ WARN_ON(ret <= 0);
nouveau_vma_unmap(vma);
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index c2d3f9c48eba..774dd93ca76b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: MIT */
#ifndef __NOUVEAU_BO_H__
#define __NOUVEAU_BO_H__
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/drm_gem.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
struct nouveau_channel;
struct nouveau_cli;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 80f154b6adab..cc7c5b4a05fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -31,9 +31,7 @@
#include <linux/dynamic_debug.h>
#include <drm/drm_aperture.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
@@ -1221,13 +1219,9 @@ nouveau_driver_fops = {
static struct drm_driver
driver_stub = {
- .driver_features =
- DRIVER_GEM | DRIVER_MODESET | DRIVER_RENDER
-#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
- | DRIVER_KMS_LEGACY_CONTEXT
-#endif
- ,
-
+ .driver_features = DRIVER_GEM |
+ DRIVER_MODESET |
+ DRIVER_RENDER,
.open = nouveau_drm_open,
.postclose = nouveau_drm_postclose,
.lastclose = nouveau_vga_lastclose,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index d6dd07bfa64a..b5de312a523f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -51,8 +51,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_audio_component.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index ac5793c96957..f77e44958037 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -645,7 +645,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct drm_nouveau_gem_pushbuf_reloc *reloc,
struct drm_nouveau_gem_pushbuf_bo *bo)
{
- int ret = 0;
+ long ret = 0;
unsigned i;
for (i = 0; i < req->nr_relocs; i++) {
@@ -703,9 +703,14 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
data |= r->vor;
}
- ret = ttm_bo_wait(&nvbo->bo, false, false);
+ ret = dma_resv_wait_timeout(nvbo->bo.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, 15 * HZ);
+ if (ret == 0)
+ ret = -EBUSY;
if (ret) {
- NV_PRINTK(err, cli, "reloc wait_idle failed: %d\n", ret);
+ NV_PRINTK(err, cli, "reloc wait_idle failed: %ld\n",
+ ret);
break;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 1fde3a5d7c32..25f31d5169e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -19,11 +19,12 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <drm/ttm/ttm_tt.h>
+
#include "nouveau_mem.h"
#include "nouveau_drv.h"
#include "nouveau_bo.h"
-#include <drm/ttm/ttm_bo_driver.h>
#include <nvif/class.h>
#include <nvif/if000a.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 1ee6cdb9ad9b..76c86d8bb01e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -1,6 +1,6 @@
#ifndef __NOUVEAU_MEM_H__
#define __NOUVEAU_MEM_H__
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
struct ttm_tt;
#include <nvif/mem.h>
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 9608121e49b7..f42c2b1b0363 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -23,6 +23,7 @@
*/
#include <linux/dma-buf.h>
+#include <drm/ttm/ttm_tt.h>
#include "nouveau_drv.h"
#include "nouveau_gem.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 85c03c83259b..b14895f75b3c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT
#include <linux/pagemap.h>
#include <linux/slab.h>
+#include <drm/ttm/ttm_tt.h>
#include "nouveau_drv.h"
#include "nouveau_mem.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 789393b94291..f8bf0ec26844 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -2,7 +2,6 @@
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include "nouveau_drv.h"
diff --git a/drivers/gpu/drm/nouveau/nvif/outp.c b/drivers/gpu/drm/nouveau/nvif/outp.c
index 7da39f1eae9f..c24bc5eae3ec 100644
--- a/drivers/gpu/drm/nouveau/nvif/outp.c
+++ b/drivers/gpu/drm/nouveau/nvif/outp.c
@@ -127,7 +127,7 @@ nvif_outp_acquire(struct nvif_outp *outp, u8 proto, struct nvif_outp_acquire_v0
}
int
-nvif_outp_acquire_dp(struct nvif_outp *outp, u8 dpcd[16],
+nvif_outp_acquire_dp(struct nvif_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
int link_nr, int link_bw, bool hda, bool mst)
{
struct nvif_outp_acquire_v0 args;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index fcf2a002f6cb..91fb494d4009 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -151,6 +151,9 @@ nvkm_firmware_mem_page(struct nvkm_memory *memory)
static enum nvkm_memory_target
nvkm_firmware_mem_target(struct nvkm_memory *memory)
{
+ if (nvkm_firmware_mem(memory)->device->func->tegra)
+ return NVKM_MEM_TARGET_NCOH;
+
return NVKM_MEM_TARGET_HOST;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 364fea320cb3..1c81e5b34d29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2405,7 +2405,7 @@ nv162_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2440,7 +2440,7 @@ nv164_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2475,7 +2475,7 @@ nv166_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2510,7 +2510,7 @@ nv167_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
@@ -2545,7 +2545,7 @@ nv168_chipset = {
.bus = { 0x00000001, gf100_bus_new },
.devinit = { 0x00000001, tu102_devinit_new },
.fault = { 0x00000001, tu102_fault_new },
- .fb = { 0x00000001, gv100_fb_new },
+ .fb = { 0x00000001, tu102_fb_new },
.fuse = { 0x00000001, gm107_fuse_new },
.gpio = { 0x00000001, gk104_gpio_new },
.gsp = { 0x00000001, gv100_gsp_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
index 393ade9f7e6c..b7da3ab44c27 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c
@@ -48,6 +48,16 @@ gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int l
img += 4;
len -= 4;
}
+
+ /* Sigh. Tegra PMU FW's init message... */
+ if (len) {
+ u32 data = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
+
+ while (len--) {
+ *(u8 *)img++ = data & 0xff;
+ data >>= 8;
+ }
+ }
}
static void
@@ -64,6 +74,8 @@ gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int l
img += 4;
len -= 4;
}
+
+ WARN_ON(len);
}
static void
@@ -74,7 +86,7 @@ gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 d
const struct nvkm_falcon_func_pio
gm200_flcn_dmem_pio = {
- .min = 4,
+ .min = 1,
.max = 0x100,
.wr_init = gm200_flcn_pio_dmem_wr_init,
.wr = gm200_flcn_pio_dmem_wr,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
index dd4981708fe4..3d9319c319c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/base.c
@@ -51,7 +51,8 @@ u64
nvkm_devinit_disable(struct nvkm_devinit *init)
{
if (init && init->func->disable)
- return init->func->disable(init);
+ init->func->disable(init);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
index c224702b7bed..00df7811dd10 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g84.c
@@ -26,13 +26,12 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
-static u64
+static void
g84_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
u32 r001540 = nvkm_rd32(device, 0x001540);
u32 r00154c = nvkm_rd32(device, 0x00154c);
- u64 disable = 0ULL;
if (!(r001540 & 0x40000000)) {
nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0);
@@ -47,8 +46,6 @@ g84_devinit_disable(struct nvkm_devinit *init)
nvkm_subdev_disable(device, NVKM_ENGINE_BSP, 0);
if (!(r00154c & 0x00000040))
nvkm_subdev_disable(device, NVKM_ENGINE_CIPHER, 0);
-
- return disable;
}
static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
index 8977483a9f42..54bee499b982 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/g98.c
@@ -26,7 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
-static u64
+static void
g98_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
@@ -45,8 +45,6 @@ g98_devinit_disable(struct nvkm_devinit *init)
nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (!(r00154c & 0x00000040))
nvkm_subdev_disable(device, NVKM_ENGINE_SEC, 0);
-
- return 0ULL;
}
static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
index 5b7cb1fe7897..5368e705e7fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
@@ -63,7 +63,7 @@ gf100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return ret;
}
-static u64
+static void
gf100_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
@@ -85,8 +85,6 @@ gf100_devinit_disable(struct nvkm_devinit *init)
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
if (r022500 & 0x00000200)
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 1);
-
- return 0ULL;
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
index 8955af2704c7..7bcbc4895ec2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
@@ -26,7 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
-u64
+void
gm107_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
@@ -39,8 +39,6 @@ gm107_devinit_disable(struct nvkm_devinit *init)
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 2);
if (r021c04 & 0x00000001)
nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
-
- return 0ULL;
}
static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
index 3d0ab86c3115..dbca92318baf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gt215.c
@@ -62,7 +62,7 @@ gt215_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return ret;
}
-static u64
+static void
gt215_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
@@ -80,8 +80,6 @@ gt215_devinit_disable(struct nvkm_devinit *init)
nvkm_subdev_disable(device, NVKM_ENGINE_MSVLD, 0);
if (!(r00154c & 0x00000200))
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
-
- return 0ULL;
}
static u32
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
index a9cdf2411187..a24bd2e7d7ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/mcp89.c
@@ -26,7 +26,7 @@
#include <subdev/bios.h>
#include <subdev/bios/init.h>
-static u64
+static void
mcp89_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
@@ -46,8 +46,6 @@ mcp89_devinit_disable(struct nvkm_devinit *init)
nvkm_subdev_disable(device, NVKM_ENGINE_VIC, 0);
if (!(r00154c & 0x00000200))
nvkm_subdev_disable(device, NVKM_ENGINE_CE, 0);
-
- return 0;
}
static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
index 380995d398b1..07ed8fd778b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.c
@@ -77,17 +77,14 @@ nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return 0;
}
-static u64
+static void
nv50_devinit_disable(struct nvkm_devinit *init)
{
struct nvkm_device *device = init->subdev.device;
u32 r001540 = nvkm_rd32(device, 0x001540);
- u64 disable = 0ULL;
if (!(r001540 & 0x40000000))
nvkm_subdev_disable(device, NVKM_ENGINE_MPEG, 0);
-
- return disable;
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
index 987a7f478b84..8de409c084c1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
@@ -23,7 +23,7 @@ int gf100_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
int gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32);
void gf100_devinit_preinit(struct nvkm_devinit *);
-u64 gm107_devinit_disable(struct nvkm_devinit *);
+void gm107_devinit_disable(struct nvkm_devinit *);
int gm200_devinit_post(struct nvkm_devinit *, bool);
void gm200_devinit_preos(struct nv50_devinit *, bool);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
index dd8b038a8cee..a648482d06e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
@@ -12,7 +12,7 @@ struct nvkm_devinit_func {
u32 (*mmio)(struct nvkm_devinit *, u32);
void (*meminit)(struct nvkm_devinit *);
int (*pll_set)(struct nvkm_devinit *, u32 type, u32 freq);
- u64 (*disable)(struct nvkm_devinit *);
+ void (*disable)(struct nvkm_devinit *);
};
void nvkm_devinit_ctor(const struct nvkm_devinit_func *, struct nvkm_device *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index 634f64f88fc8..81a1ad2c88a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -65,10 +65,33 @@ tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
return ret;
}
+static int
+tu102_devinit_wait(struct nvkm_device *device)
+{
+ unsigned timeout = 50 + 2000;
+
+ do {
+ if (nvkm_rd32(device, 0x118128) & 0x00000001) {
+ if ((nvkm_rd32(device, 0x118234) & 0x000000ff) == 0xff)
+ return 0;
+ }
+
+ usleep_range(1000, 2000);
+ } while (timeout--);
+
+ return -ETIMEDOUT;
+}
+
int
tu102_devinit_post(struct nvkm_devinit *base, bool post)
{
struct nv50_devinit *init = nv50_devinit(base);
+ int ret;
+
+ ret = tu102_devinit_wait(init->base.subdev.device);
+ if (ret)
+ return ret;
+
gm200_devinit_preos(init, post);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 5d0bab8ecb43..6ba5120a2ebe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -32,6 +32,7 @@ nvkm-y += nvkm/subdev/fb/gp100.o
nvkm-y += nvkm/subdev/fb/gp102.o
nvkm-y += nvkm/subdev/fb/gp10b.o
nvkm-y += nvkm/subdev/fb/gv100.o
+nvkm-y += nvkm/subdev/fb/tu102.o
nvkm-y += nvkm/subdev/fb/ga100.o
nvkm-y += nvkm/subdev/fb/ga102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
index 8b7c8ea5e8a5..5a21b0ae4595 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
@@ -40,12 +40,6 @@ ga102_fb_vpr_scrub(struct nvkm_fb *fb)
return ret;
}
-static bool
-ga102_fb_vpr_scrub_required(struct nvkm_fb *fb)
-{
- return (nvkm_rd32(fb->subdev.device, 0x1fa80c) & 0x00000010) != 0;
-}
-
static const struct nvkm_fb_func
ga102_fb = {
.dtor = gf100_fb_dtor,
@@ -56,7 +50,7 @@ ga102_fb = {
.sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
.ram_new = ga102_ram_new,
.default_bigpage = 16,
- .vpr.scrub_required = ga102_fb_vpr_scrub_required,
+ .vpr.scrub_required = tu102_fb_vpr_scrub_required,
.vpr.scrub = ga102_fb_vpr_scrub,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 1f0126437c1a..0e3c0a8f5d71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -49,8 +49,3 @@ gv100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, s
}
MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
-MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index ac03eac0f261..f517751f94ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -89,4 +89,6 @@ bool gp102_fb_vpr_scrub_required(struct nvkm_fb *);
int gp102_fb_vpr_scrub(struct nvkm_fb *);
int gv100_fb_init_page(struct nvkm_fb *);
+
+bool tu102_fb_vpr_scrub_required(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
new file mode 100644
index 000000000000..be82af0364ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2018 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+#include "ram.h"
+
+bool
+tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
+{
+ return (nvkm_rd32(fb->subdev.device, 0x1fa80c) & 0x00000010) != 0;
+}
+
+static const struct nvkm_fb_func
+tu102_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gm200_fb_init,
+ .init_page = gv100_fb_init_page,
+ .init_unkn = gp100_fb_init_unkn,
+ .sysmem.flush_page_init = gf100_fb_sysmem_flush_page_init,
+ .vpr.scrub_required = tu102_fb_vpr_scrub_required,
+ .vpr.scrub = gp102_fb_vpr_scrub,
+ .ram_new = gp100_ram_new,
+ .default_bigpage = 16,
+};
+
+int
+tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+ return gp102_fb_new_(&tu102_fb, device, type, inst, pfb);
+}
+
+MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 648ecf5a8fbc..a4ac94a2ab57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -475,7 +475,8 @@ gk20a_instobj_ctor_iommu(struct gk20a_instmem *imem, u32 npages, u32 align,
u32 offset = (r->offset + i) << imem->iommu_pgshift;
ret = iommu_map(imem->domain, offset, node->dma_addrs[i],
- PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
+ PAGE_SIZE, IOMMU_READ | IOMMU_WRITE,
+ GFP_KERNEL);
if (ret < 0) {
nvkm_error(subdev, "IOMMU mapping failure: %d\n", ret);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index a72403777329..2ed04da3621d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -225,7 +225,7 @@ gm20b_pmu_init(struct nvkm_pmu *pmu)
pmu->initmsg_received = false;
- nvkm_falcon_load_dmem(falcon, &args, addr_args, sizeof(args), 0);
+ nvkm_falcon_pio_wr(falcon, (u8 *)&args, 0, 0, DMEM, addr_args, sizeof(args), 0, false);
nvkm_falcon_start(falcon);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 0ee344ebcd1c..aacad5045e95 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -855,11 +855,6 @@ struct csc_coef_yuv2rgb {
bool full_range;
};
-struct csc_coef_rgb2yuv {
- int yr, yg, yb, cbr, cbg, cbb, crr, crg, crb;
- bool full_range;
-};
-
static void dispc_ovl_write_color_conv_coef(struct dispc_device *dispc,
enum omap_plane_id plane,
const struct csc_coef_yuv2rgb *ct)
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index a6845856cbce..4c1084eb0175 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1039,22 +1039,26 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
{
struct dsi_data *dsi = s->private;
unsigned long flags;
- struct dsi_irq_stats stats;
+ struct dsi_irq_stats *stats;
+
+ stats = kmalloc(sizeof(*stats), GFP_KERNEL);
+ if (!stats)
+ return -ENOMEM;
spin_lock_irqsave(&dsi->irq_stats_lock, flags);
- stats = dsi->irq_stats;
+ *stats = dsi->irq_stats;
memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
dsi->irq_stats.last_reset = jiffies;
spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
seq_printf(s, "period %u ms\n",
- jiffies_to_msecs(jiffies - stats.last_reset));
+ jiffies_to_msecs(jiffies - stats->last_reset));
- seq_printf(s, "irqs %d\n", stats.irq_count);
+ seq_printf(s, "irqs %d\n", stats->irq_count);
#define PIS(x) \
- seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
+ seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1]);
seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
PIS(VC0);
@@ -1078,10 +1082,10 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
#define PIS(x) \
seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
- stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
- stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
+ stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
+ stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
seq_printf(s, "-- VC interrupts --\n");
PIS(CS);
@@ -1097,7 +1101,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
#define PIS(x) \
seq_printf(s, "%-20s %10d\n", #x, \
- stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
+ stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
seq_printf(s, "-- CIO interrupts --\n");
PIS(ERRSYNCESC1);
@@ -1122,6 +1126,8 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
PIS(ULPSACTIVENOT_ALL1);
#undef PIS
+ kfree(stats);
+
return 0;
}
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index eaf67b9e5f12..699ed814e021 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -546,44 +546,6 @@ static void omap_modeset_fini(struct drm_device *ddev)
}
/*
- * Enable the HPD in external components if supported
- */
-static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
-{
- struct omap_drm_private *priv = ddev->dev_private;
- unsigned int i;
-
- for (i = 0; i < priv->num_pipes; i++) {
- struct drm_connector *connector = priv->pipes[i].connector;
-
- if (!connector)
- continue;
-
- if (priv->pipes[i].output->bridge)
- drm_bridge_connector_enable_hpd(connector);
- }
-}
-
-/*
- * Disable the HPD in external components if supported
- */
-static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
-{
- struct omap_drm_private *priv = ddev->dev_private;
- unsigned int i;
-
- for (i = 0; i < priv->num_pipes; i++) {
- struct drm_connector *connector = priv->pipes[i].connector;
-
- if (!connector)
- continue;
-
- if (priv->pipes[i].output->bridge)
- drm_bridge_connector_disable_hpd(connector);
- }
-}
-
-/*
* drm ioctl funcs
*/
@@ -782,7 +744,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_fbdev_init(ddev);
drm_kms_helper_poll_init(ddev);
- omap_modeset_enable_external_hpd(ddev);
/*
* Register the DRM device with the core and the connectors with
@@ -795,7 +756,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
return 0;
err_cleanup_helpers:
- omap_modeset_disable_external_hpd(ddev);
drm_kms_helper_poll_fini(ddev);
omap_fbdev_fini(ddev);
@@ -822,7 +782,6 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_dev_unregister(ddev);
- omap_modeset_disable_external_hpd(ddev);
drm_kms_helper_poll_fini(ddev);
omap_fbdev_fini(ddev);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 98d8758048fc..84429728347f 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -239,13 +239,13 @@ void omap_fbdev_init(struct drm_device *dev)
helper = &fbdev->base;
- drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
+ drm_fb_helper_prepare(dev, helper, 32, &omap_fb_helper_funcs);
ret = drm_fb_helper_init(dev, helper);
if (ret)
goto fail;
- ret = drm_fb_helper_initial_config(helper, 32);
+ ret = drm_fb_helper_initial_config(helper);
if (ret)
goto fini;
@@ -256,6 +256,7 @@ void omap_fbdev_init(struct drm_device *dev)
fini:
drm_fb_helper_fini(helper);
fail:
+ drm_fb_helper_unprepare(helper);
kfree(fbdev);
dev_warn(dev->dev, "omap_fbdev_init failed\n");
@@ -286,6 +287,7 @@ void omap_fbdev_fini(struct drm_device *dev)
if (fbdev->fb)
drm_framebuffer_remove(fbdev->fb);
+ drm_fb_helper_unprepare(helper);
kfree(fbdev);
priv->fbdev = NULL;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index cf571796fd26..6b58a5bb7b44 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -543,8 +543,7 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
if (omap_obj->flags & OMAP_BO_WC) {
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
@@ -605,7 +604,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
/**
- * omap_gem_dumb_map - buffer mapping for dumb interface
+ * omap_gem_dumb_map_offset - create an offset for a dumb buffer
* @file: our drm client file
* @dev: drm device
* @handle: GEM handle to the object (from dumb_create)
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 4aca14dab927..a6f0bbc879d2 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -99,7 +99,7 @@ int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
}
/**
- * enable_vblank - enable vblank interrupt events
+ * omap_irq_enable_vblank - enable vblank interrupt events
* @crtc: DRM CRTC
*
* Enable vblank interrupts for @crtc. If the device doesn't have
@@ -129,7 +129,7 @@ int omap_irq_enable_vblank(struct drm_crtc *crtc)
}
/**
- * disable_vblank - disable vblank interrupt events
+ * omap_irq_disable_vblank - disable vblank interrupt events
* @crtc: DRM CRTC
*
* Disable vblank interrupts for @crtc. If the device doesn't have
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 737edcdf9eef..8eeee71c0000 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -37,6 +37,14 @@ config DRM_PANEL_ASUS_Z00T_TM5P5_NT35596
NT35596 1080x1920 video mode panel as found in some Asus
Zenfone 2 Laser Z00T devices.
+config DRM_PANEL_AUO_A030JTN01
+ tristate "AUO A030JTN01"
+ depends on SPI
+ select REGMAP_SPI
+ help
+ Say Y here to enable support for the AUO A030JTN01 320x480 3.0" panel
+ as found in the YLM RS-97 handheld gaming console.
+
config DRM_PANEL_BOE_BF060Y8M_AJ0
tristate "Boe BF060Y8M-AJ0 panel"
depends on OF
@@ -154,6 +162,18 @@ config DRM_PANEL_FEIYANG_FY07024DI26A30D
Say Y if you want to enable support for panels based on the
Feiyang FY07024DI26A30-D MIPI-DSI interface.
+config DRM_PANEL_HIMAX_HX8394
+ tristate "HIMAX HX8394 MIPI-DSI LCD panels"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Himax HX8394 controller, such as the HannStar HSD060BHW4
+ 720x1440 TFT LCD panel that uses a MIPI-DSI interface.
+
+ If M is selected the module will be called panel-himax-hx8394.
+
config DRM_PANEL_ILITEK_IL9322
tristate "Ilitek ILI9322 320x240 QVGA panels"
depends on OF && SPI
@@ -400,6 +420,15 @@ config DRM_PANEL_OLIMEX_LCD_OLINUXINO
Say Y here if you want to enable support for Olimex Ltd.
LCD-OLinuXino panel.
+config DRM_PANEL_ORISETECH_OTA5601A
+ tristate "Orise Technology ota5601a RGB/SPI panel"
+ depends on SPI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select REGMAP_SPI
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Orise Technology OTA9601A display controller.
+
config DRM_PANEL_ORISETECH_OTM8009A
tristate "Orise Technology otm8009a 480x800 dsi 2dl panel"
depends on OF
@@ -717,6 +746,15 @@ config DRM_PANEL_VISIONOX_RM69299
Say Y here if you want to enable support for Visionox
RM69299 DSI Video Mode panel.
+config DRM_PANEL_VISIONOX_VTDR6130
+ tristate "Visionox VTDR6130"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Visionox
+ VTDR6130 1080x2400 AMOLED DSI panel.
+
config DRM_PANEL_WIDECHIPS_WS2401
tristate "Widechips WS2401 DPI panel driver"
depends on SPI && GPIOLIB
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index f8f9d9f6a307..c05aa9e23907 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_DRM_PANEL_ABT_Y030XX067A) += panel-abt-y030xx067a.o
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o
+obj-$(CONFIG_DRM_PANEL_AUO_A030JTN01) += panel-auo-a030jtn01.o
obj-$(CONFIG_DRM_PANEL_BOE_BF060Y8M_AJ0) += panel-boe-bf060y8m-aj0.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
@@ -13,6 +14,7 @@ obj-$(CONFIG_DRM_PANEL_EBBG_FT8719) += panel-ebbg-ft8719.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
+obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
@@ -37,6 +39,7 @@ obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
+obj-$(CONFIG_DRM_PANEL_ORISETECH_OTA5601A) += panel-orisetech-ota5601a.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_OSD_OSD101T2587_53TS) += panel-osd-osd101t2587-53ts.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
@@ -73,5 +76,6 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_VTDR6130) += panel-visionox-vtdr6130.o
obj-$(CONFIG_DRM_PANEL_WIDECHIPS_WS2401) += panel-widechips-ws2401.o
obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index b3235781e6ba..075a7af81eff 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -24,22 +24,6 @@ static inline struct tm5p5_nt35596 *to_tm5p5_nt35596(struct drm_panel *panel)
return container_of(panel, struct tm5p5_nt35596, panel);
}
-#define dsi_generic_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void tm5p5_nt35596_reset(struct tm5p5_nt35596 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
@@ -54,46 +38,46 @@ static int tm5p5_nt35596_on(struct tm5p5_nt35596 *ctx)
{
struct mipi_dsi_device *dsi = ctx->dsi;
- dsi_generic_write_seq(dsi, 0xff, 0x05);
- dsi_generic_write_seq(dsi, 0xfb, 0x01);
- dsi_generic_write_seq(dsi, 0xc5, 0x31);
- dsi_generic_write_seq(dsi, 0xff, 0x04);
- dsi_generic_write_seq(dsi, 0x01, 0x84);
- dsi_generic_write_seq(dsi, 0x05, 0x25);
- dsi_generic_write_seq(dsi, 0x06, 0x01);
- dsi_generic_write_seq(dsi, 0x07, 0x20);
- dsi_generic_write_seq(dsi, 0x08, 0x06);
- dsi_generic_write_seq(dsi, 0x09, 0x08);
- dsi_generic_write_seq(dsi, 0x0a, 0x10);
- dsi_generic_write_seq(dsi, 0x0b, 0x10);
- dsi_generic_write_seq(dsi, 0x0c, 0x10);
- dsi_generic_write_seq(dsi, 0x0d, 0x14);
- dsi_generic_write_seq(dsi, 0x0e, 0x14);
- dsi_generic_write_seq(dsi, 0x0f, 0x14);
- dsi_generic_write_seq(dsi, 0x10, 0x14);
- dsi_generic_write_seq(dsi, 0x11, 0x14);
- dsi_generic_write_seq(dsi, 0x12, 0x14);
- dsi_generic_write_seq(dsi, 0x17, 0xf3);
- dsi_generic_write_seq(dsi, 0x18, 0xc0);
- dsi_generic_write_seq(dsi, 0x19, 0xc0);
- dsi_generic_write_seq(dsi, 0x1a, 0xc0);
- dsi_generic_write_seq(dsi, 0x1b, 0xb3);
- dsi_generic_write_seq(dsi, 0x1c, 0xb3);
- dsi_generic_write_seq(dsi, 0x1d, 0xb3);
- dsi_generic_write_seq(dsi, 0x1e, 0xb3);
- dsi_generic_write_seq(dsi, 0x1f, 0xb3);
- dsi_generic_write_seq(dsi, 0x20, 0xb3);
- dsi_generic_write_seq(dsi, 0xfb, 0x01);
- dsi_generic_write_seq(dsi, 0xff, 0x00);
- dsi_generic_write_seq(dsi, 0xfb, 0x01);
- dsi_generic_write_seq(dsi, 0x35, 0x01);
- dsi_generic_write_seq(dsi, 0xd3, 0x06);
- dsi_generic_write_seq(dsi, 0xd4, 0x04);
- dsi_generic_write_seq(dsi, 0x5e, 0x0d);
- dsi_generic_write_seq(dsi, 0x11, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xff, 0x05);
+ mipi_dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xc5, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xff, 0x04);
+ mipi_dsi_generic_write_seq(dsi, 0x01, 0x84);
+ mipi_dsi_generic_write_seq(dsi, 0x05, 0x25);
+ mipi_dsi_generic_write_seq(dsi, 0x06, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0x07, 0x20);
+ mipi_dsi_generic_write_seq(dsi, 0x08, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0x09, 0x08);
+ mipi_dsi_generic_write_seq(dsi, 0x0a, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0x0b, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0x0c, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0x0d, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x0e, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x0f, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x10, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x11, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x12, 0x14);
+ mipi_dsi_generic_write_seq(dsi, 0x17, 0xf3);
+ mipi_dsi_generic_write_seq(dsi, 0x18, 0xc0);
+ mipi_dsi_generic_write_seq(dsi, 0x19, 0xc0);
+ mipi_dsi_generic_write_seq(dsi, 0x1a, 0xc0);
+ mipi_dsi_generic_write_seq(dsi, 0x1b, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0x1c, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0x1d, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0x1e, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0x1f, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0x20, 0xb3);
+ mipi_dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xff, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0x35, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xd3, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xd4, 0x04);
+ mipi_dsi_generic_write_seq(dsi, 0x5e, 0x0d);
+ mipi_dsi_generic_write_seq(dsi, 0x11, 0x00);
msleep(100);
- dsi_generic_write_seq(dsi, 0x29, 0x00);
- dsi_generic_write_seq(dsi, 0x53, 0x24);
+ mipi_dsi_generic_write_seq(dsi, 0x29, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0x53, 0x24);
return 0;
}
@@ -117,7 +101,7 @@ static int tm5p5_nt35596_off(struct tm5p5_nt35596 *ctx)
return ret;
}
- dsi_dcs_write_seq(dsi, 0x4f, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x4f, 0x01);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-auo-a030jtn01.c b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
new file mode 100644
index 000000000000..3c976a98de6a
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-auo-a030jtn01.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AU Optronics A030JTN01.0 TFT LCD panel driver
+ *
+ * Copyright (C) 2023, Paul Cercueil <paul@crapouillou.net>
+ * Copyright (C) 2023, Christophe Branchereau <cbranchereau@gmail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define REG05 0x05
+#define REG06 0x06
+#define REG07 0x07
+
+#define REG05_STDBY BIT(0)
+#define REG06_VBLK GENMASK(4, 0)
+#define REG07_HBLK GENMASK(7, 0)
+
+
+struct a030jtn01_info {
+ const struct drm_display_mode *display_modes;
+ unsigned int num_modes;
+ u16 width_mm, height_mm;
+ u32 bus_format, bus_flags;
+};
+
+struct a030jtn01 {
+ struct drm_panel panel;
+ struct spi_device *spi;
+ struct regmap *map;
+
+ const struct a030jtn01_info *panel_info;
+
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+};
+
+static inline struct a030jtn01 *to_a030jtn01(struct drm_panel *panel)
+{
+ return container_of(panel, struct a030jtn01, panel);
+}
+
+static int a030jtn01_prepare(struct drm_panel *panel)
+{
+ struct a030jtn01 *priv = to_a030jtn01(panel);
+ struct device *dev = &priv->spi->dev;
+ unsigned int dummy;
+ int err;
+
+ err = regulator_enable(priv->supply);
+ if (err) {
+ dev_err(dev, "Failed to enable power supply: %d\n", err);
+ return err;
+ }
+
+ usleep_range(1000, 8000);
+
+ /* Reset the chip */
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(100, 8000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(2000, 8000);
+
+ /*
+ * No idea why, but a register read (doesn't matter which) is needed to
+ * properly initialize the chip after a reset; otherwise, the colors
+ * will be wrong. It doesn't seem to be timing-related as a msleep(200)
+ * doesn't fix it.
+ */
+ err = regmap_read(priv->map, REG05, &dummy);
+ if (err)
+ goto err_disable_regulator;
+
+ /* Use (24 + 6) == 0x1e as the vertical back porch */
+ err = regmap_write(priv->map, REG06, FIELD_PREP(REG06_VBLK, 0x1e));
+ if (err)
+ goto err_disable_regulator;
+
+ /* Use (42 + 30) * 3 == 0xd8 as the horizontal back porch */
+ err = regmap_write(priv->map, REG07, FIELD_PREP(REG07_HBLK, 0xd8));
+ if (err)
+ goto err_disable_regulator;
+
+ return 0;
+
+err_disable_regulator:
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ regulator_disable(priv->supply);
+ return err;
+}
+
+static int a030jtn01_unprepare(struct drm_panel *panel)
+{
+ struct a030jtn01 *priv = to_a030jtn01(panel);
+
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ regulator_disable(priv->supply);
+
+ return 0;
+}
+
+static int a030jtn01_enable(struct drm_panel *panel)
+{
+ struct a030jtn01 *priv = to_a030jtn01(panel);
+ int ret;
+
+ ret = regmap_set_bits(priv->map, REG05, REG05_STDBY);
+ if (ret)
+ return ret;
+
+ /* Wait for the picture to be stable */
+ if (panel->backlight)
+ msleep(100);
+
+ return 0;
+}
+
+static int a030jtn01_disable(struct drm_panel *panel)
+{
+ struct a030jtn01 *priv = to_a030jtn01(panel);
+
+ return regmap_clear_bits(priv->map, REG05, REG05_STDBY);
+}
+
+static int a030jtn01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct a030jtn01 *priv = to_a030jtn01(panel);
+ const struct a030jtn01_info *panel_info = priv->panel_info;
+ struct drm_display_mode *mode;
+ unsigned int i;
+
+ for (i = 0; i < panel_info->num_modes; i++) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel_info->display_modes[i]);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ if (panel_info->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = panel_info->width_mm;
+ connector->display_info.height_mm = panel_info->height_mm;
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &panel_info->bus_format, 1);
+ connector->display_info.bus_flags = panel_info->bus_flags;
+
+ return panel_info->num_modes;
+}
+
+static const struct drm_panel_funcs a030jtn01_funcs = {
+ .prepare = a030jtn01_prepare,
+ .unprepare = a030jtn01_unprepare,
+ .enable = a030jtn01_enable,
+ .disable = a030jtn01_disable,
+ .get_modes = a030jtn01_get_modes,
+};
+
+static bool a030jtn01_has_reg(struct device *dev, unsigned int reg)
+{
+ static const u32 a030jtn01_regs_mask = 0x001823f1fb;
+
+ return a030jtn01_regs_mask & BIT(reg);
+};
+
+static const struct regmap_config a030jtn01_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .read_flag_mask = 0x40,
+ .max_register = 0x1c,
+ .readable_reg = a030jtn01_has_reg,
+ .writeable_reg = a030jtn01_has_reg,
+};
+
+static int a030jtn01_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct a030jtn01 *priv;
+ int err;
+
+ spi->mode |= SPI_MODE_3 | SPI_3WIRE;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spi = spi;
+ spi_set_drvdata(spi, priv);
+
+ priv->map = devm_regmap_init_spi(spi, &a030jtn01_regmap_config);
+ if (IS_ERR(priv->map))
+ return dev_err_probe(dev, PTR_ERR(priv->map), "Unable to init regmap");
+
+ priv->panel_info = spi_get_device_match_data(spi);
+ if (!priv->panel_info)
+ return -EINVAL;
+
+ priv->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(priv->supply))
+ return dev_err_probe(dev, PTR_ERR(priv->supply), "Failed to get power supply");
+
+ priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio), "Failed to get reset GPIO");
+
+ drm_panel_init(&priv->panel, dev, &a030jtn01_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ err = drm_panel_of_backlight(&priv->panel);
+ if (err)
+ return err;
+
+ drm_panel_add(&priv->panel);
+
+ return 0;
+}
+
+static void a030jtn01_remove(struct spi_device *spi)
+{
+ struct a030jtn01 *priv = spi_get_drvdata(spi);
+
+ drm_panel_remove(&priv->panel);
+ drm_panel_disable(&priv->panel);
+ drm_panel_unprepare(&priv->panel);
+}
+
+static const struct drm_display_mode a030jtn01_modes[] = {
+ { /* 60 Hz */
+ .clock = 14400,
+ .hdisplay = 320,
+ .hsync_start = 320 + 8,
+ .hsync_end = 320 + 8 + 42,
+ .htotal = 320 + 8 + 42 + 30,
+ .vdisplay = 480,
+ .vsync_start = 480 + 90,
+ .vsync_end = 480 + 90 + 24,
+ .vtotal = 480 + 90 + 24 + 6,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+ { /* 50 Hz */
+ .clock = 12000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 8,
+ .hsync_end = 320 + 8 + 42,
+ .htotal = 320 + 8 + 42 + 30,
+ .vdisplay = 480,
+ .vsync_start = 480 + 90,
+ .vsync_end = 480 + 90 + 24,
+ .vtotal = 480 + 90 + 24 + 6,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct a030jtn01_info a030jtn01_info = {
+ .display_modes = a030jtn01_modes,
+ .num_modes = ARRAY_SIZE(a030jtn01_modes),
+ .width_mm = 70,
+ .height_mm = 51,
+ .bus_format = MEDIA_BUS_FMT_RGB888_3X8_DELTA,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+};
+
+static const struct spi_device_id a030jtn01_id[] = {
+ { "a030jtn01", (kernel_ulong_t) &a030jtn01_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, a030jtn01_id);
+
+static const struct of_device_id a030jtn01_of_match[] = {
+ { .compatible = "auo,a030jtn01" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, a030jtn01_of_match);
+
+static struct spi_driver a030jtn01_driver = {
+ .driver = {
+ .name = "auo-a030jtn01",
+ .of_match_table = a030jtn01_of_match,
+ },
+ .id_table = a030jtn01_id,
+ .probe = a030jtn01_probe,
+ .remove = a030jtn01_remove,
+};
+module_spi_driver(a030jtn01_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_AUTHOR("Christophe Branchereau <cbranchereau@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index ad58840eda41..90098b753e3b 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -43,14 +43,6 @@ struct boe_bf060y8m_aj0 *to_boe_bf060y8m_aj0(struct drm_panel *panel)
return container_of(panel, struct boe_bf060y8m_aj0, panel);
}
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void boe_bf060y8m_aj0_reset(struct boe_bf060y8m_aj0 *boe)
{
gpiod_set_value_cansleep(boe->reset_gpio, 0);
@@ -67,12 +59,12 @@ static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe)
struct device *dev = &dsi->dev;
int ret;
- dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c);
- dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10);
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
- dsi_dcs_write_seq(dsi, 0xf8,
- 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0x4c);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, DCS_ALLOW_HBM_RANGE);
+ mipi_dsi_dcs_write_seq(dsi, 0xf8,
+ 0x00, 0x08, 0x10, 0x00, 0x22, 0x00, 0x00, 0x2d);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
@@ -81,17 +73,17 @@ static int boe_bf060y8m_aj0_on(struct boe_bf060y8m_aj0 *boe)
}
msleep(30);
- dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
- dsi_dcs_write_seq(dsi, 0xc0,
- 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
- 0x2a, 0x31, 0x39, 0x20, 0x09);
- dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
- 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
- 0x5c, 0x5c, 0x5c);
- dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0xa5, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xc0,
+ 0x08, 0x48, 0x65, 0x33, 0x33, 0x33,
+ 0x2a, 0x31, 0x39, 0x20, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x00, 0x00, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x20, 0x04, 0x10, 0x12, 0x92,
+ 0x4f, 0x8f, 0x44, 0x84, 0x83, 0x83, 0x83,
+ 0x5c, 0x5c, 0x5c);
+ mipi_dsi_dcs_write_seq(dsi, 0xde, 0x01, 0x2c, 0x00, 0x77, 0x3e);
msleep(30);
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 857a2f0420d7..c924f1124ebc 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -1193,14 +1193,11 @@ static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
return 0;
}
-static int boe_panel_unprepare(struct drm_panel *panel)
+static int boe_panel_disable(struct drm_panel *panel)
{
struct boe_panel *boe = to_boe_panel(panel);
int ret;
- if (!boe->prepared)
- return 0;
-
ret = boe_panel_enter_sleep_mode(boe);
if (ret < 0) {
dev_err(panel->dev, "failed to set panel off: %d\n", ret);
@@ -1209,6 +1206,16 @@ static int boe_panel_unprepare(struct drm_panel *panel)
msleep(150);
+ return 0;
+}
+
+static int boe_panel_unprepare(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+
+ if (!boe->prepared)
+ return 0;
+
if (boe->desc->discharge_on_disable) {
regulator_disable(boe->avee);
regulator_disable(boe->avdd);
@@ -1528,6 +1535,7 @@ static enum drm_panel_orientation boe_panel_get_orientation(struct drm_panel *pa
}
static const struct drm_panel_funcs boe_panel_funcs = {
+ .disable = boe_panel_disable,
.unprepare = boe_panel_unprepare,
.prepare = boe_panel_prepare,
.enable = boe_panel_enable,
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 5cb8dc2ebe18..01bfe0783304 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -351,7 +351,7 @@ static void panel_edp_wait(ktime_t start_ktime, unsigned int min_ms)
return;
min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
- now_ktime = ktime_get();
+ now_ktime = ktime_get_boottime();
if (ktime_before(now_ktime, min_ktime))
msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
@@ -378,7 +378,7 @@ static int panel_edp_suspend(struct device *dev)
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- p->unprepared_time = ktime_get();
+ p->unprepared_time = ktime_get_boottime();
return 0;
}
@@ -464,14 +464,14 @@ static int panel_edp_prepare_once(struct panel_edp *p)
}
}
- p->prepared_time = ktime_get();
+ p->prepared_time = ktime_get_boottime();
return 0;
error:
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- p->unprepared_time = ktime_get();
+ p->unprepared_time = ktime_get_boottime();
return err;
}
@@ -1891,7 +1891,8 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"),
EDP_PANEL_ENTRY('I', 'V', 'O', 0x057d, &delay_200_500_e200, "R140NWF5 RH"),
- EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "M133NW4J-R3"),
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x854a, &delay_200_500_p2e100, "M133NW4J"),
+ EDP_PANEL_ENTRY('I', 'V', 'O', 0x854b, &delay_200_500_p2e100, "R133NW4K-R0"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
EDP_PANEL_ENTRY('K', 'D', 'B', 0x1120, &delay_200_500_e80_d50, "116N29-30NK-C007"),
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index eee714cf3f49..e7be15b68102 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -51,14 +51,6 @@ static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel)
return container_of(panel, struct kd35t133, panel);
}
-#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
- static const u8 b[] = { cmd, seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, b, ARRAY_SIZE(b)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static int kd35t133_init_sequence(struct kd35t133 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -68,25 +60,25 @@ static int kd35t133_init_sequence(struct kd35t133 *ctx)
* Init sequence was supplied by the panel vendor with minimal
* documentation.
*/
- dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA,
- 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
- 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA,
- 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
- 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
- dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
- dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
- 0x20, 0x02);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
- dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3,
- 0xa9, 0x51, 0x2c, 0x82);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
+ 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
+ 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
+ 0x20, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3,
+ 0xa9, 0x51, 0x2c, 0x82);
mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0);
dev_dbg(dev, "Panel init sequence done\n");
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
new file mode 100644
index 000000000000..d4fb5d1b295b
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -0,0 +1,451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for panels based on Himax HX8394 controller, such as:
+ *
+ * - HannStar HSD060BHW4 5.99" MIPI-DSI panel
+ *
+ * Copyright (C) 2021 Kamil Trzciński
+ *
+ * Based on drivers/gpu/drm/panel/panel-sitronix-st7703.c
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define DRV_NAME "panel-himax-hx8394"
+
+/* Manufacturer specific commands sent via DSI, listed in HX8394-F datasheet */
+#define HX8394_CMD_SETSEQUENCE 0xb0
+#define HX8394_CMD_SETPOWER 0xb1
+#define HX8394_CMD_SETDISP 0xb2
+#define HX8394_CMD_SETCYC 0xb4
+#define HX8394_CMD_SETVCOM 0xb6
+#define HX8394_CMD_SETTE 0xb7
+#define HX8394_CMD_SETSENSOR 0xb8
+#define HX8394_CMD_SETEXTC 0xb9
+#define HX8394_CMD_SETMIPI 0xba
+#define HX8394_CMD_SETOTP 0xbb
+#define HX8394_CMD_SETREGBANK 0xbd
+#define HX8394_CMD_UNKNOWN1 0xc0
+#define HX8394_CMD_SETDGCLUT 0xc1
+#define HX8394_CMD_SETID 0xc3
+#define HX8394_CMD_SETDDB 0xc4
+#define HX8394_CMD_UNKNOWN2 0xc6
+#define HX8394_CMD_SETCABC 0xc9
+#define HX8394_CMD_SETCABCGAIN 0xca
+#define HX8394_CMD_SETPANEL 0xcc
+#define HX8394_CMD_SETOFFSET 0xd2
+#define HX8394_CMD_SETGIP0 0xd3
+#define HX8394_CMD_UNKNOWN3 0xd4
+#define HX8394_CMD_SETGIP1 0xd5
+#define HX8394_CMD_SETGIP2 0xd6
+#define HX8394_CMD_SETGPO 0xd6
+#define HX8394_CMD_SETSCALING 0xdd
+#define HX8394_CMD_SETIDLE 0xdf
+#define HX8394_CMD_SETGAMMA 0xe0
+#define HX8394_CMD_SETCHEMODE_DYN 0xe4
+#define HX8394_CMD_SETCHE 0xe5
+#define HX8394_CMD_SETCESEL 0xe6
+#define HX8394_CMD_SET_SP_CMD 0xe9
+#define HX8394_CMD_SETREADINDEX 0xfe
+#define HX8394_CMD_GETSPIREAD 0xff
+
+struct hx8394 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vcc;
+ struct regulator *iovcc;
+ bool prepared;
+
+ const struct hx8394_panel_desc *desc;
+};
+
+struct hx8394_panel_desc {
+ const struct drm_display_mode *mode;
+ unsigned int lanes;
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ int (*init_sequence)(struct hx8394 *ctx);
+};
+
+static inline struct hx8394 *panel_to_hx8394(struct drm_panel *panel)
+{
+ return container_of(panel, struct hx8394, panel);
+}
+
+static int hsd060bhw4_init_sequence(struct hx8394 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ /* 5.19.8 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x94);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x48, 0x11, 0x71, 0x09, 0x32, 0x24, 0x71, 0x31, 0x55, 0x30);
+
+ /* 5.19.9 SETMIPI: Set MIPI control (BAh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETMIPI,
+ 0x63, 0x03, 0x68, 0x6b, 0xb2, 0xc0);
+
+ /* 5.19.3 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x78, 0x0c, 0x07);
+
+ /* 5.19.4 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETCYC,
+ 0x12, 0x63, 0x12, 0x63, 0x12, 0x63, 0x01, 0x0c, 0x7c, 0x55,
+ 0x00, 0x3f, 0x12, 0x6b, 0x12, 0x6b, 0x12, 0x6b, 0x01, 0x0c,
+ 0x7c);
+
+ /* 5.19.19 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x00, 0x00, 0x3c, 0x1c, 0x00, 0x00, 0x32, 0x10,
+ 0x09, 0x00, 0x09, 0x32, 0x15, 0xad, 0x05, 0xad, 0x32, 0x00,
+ 0x00, 0x00, 0x00, 0x37, 0x03, 0x0b, 0x0b, 0x37, 0x00, 0x00,
+ 0x00, 0x0c, 0x40);
+
+ /* 5.19.20 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP1,
+ 0x19, 0x19, 0x18, 0x18, 0x1b, 0x1b, 0x1a, 0x1a, 0x00, 0x01,
+ 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x20, 0x21, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x24, 0x25, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+
+ /* 5.19.21 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGIP2,
+ 0x18, 0x18, 0x19, 0x19, 0x1b, 0x1b, 0x1a, 0x1a, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x25, 0x24, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x21, 0x20, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x18, 0x18);
+
+ /* 5.19.25 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETGAMMA,
+ 0x00, 0x04, 0x0c, 0x12, 0x14, 0x18, 0x1a, 0x18, 0x31, 0x3f,
+ 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f, 0x82, 0x7e, 0x8a,
+ 0x99, 0x4a, 0x48, 0x49, 0x4b, 0x4a, 0x4c, 0x4b, 0x7f, 0x00,
+ 0x04, 0x0c, 0x11, 0x13, 0x17, 0x1a, 0x18, 0x31,
+ 0x3f, 0x4d, 0x4c, 0x54, 0x65, 0x6b, 0x70, 0x7f,
+ 0x82, 0x7e, 0x8a, 0x99, 0x4a, 0x48, 0x49, 0x4b,
+ 0x4a, 0x4c, 0x4b, 0x7f);
+
+ /* 5.19.17 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPANEL,
+ 0x0b);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN1,
+ 0x1f, 0x31);
+
+ /* 5.19.5 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETVCOM,
+ 0x7d, 0x7d);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
+ 0x02);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 5.19.2 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETPOWER,
+ 0x00);
+
+ /* 5.19.11 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* Unknown command, not listed in the HX8394-F datasheet */
+ mipi_dsi_dcs_write_seq(dsi, HX8394_CMD_UNKNOWN3,
+ 0xed);
+
+ return 0;
+}
+
+static const struct drm_display_mode hsd060bhw4_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 40,
+ .hsync_end = 720 + 40 + 46,
+ .htotal = 720 + 40 + 46 + 40,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 9,
+ .vsync_end = 1440 + 9 + 7,
+ .vtotal = 1440 + 9 + 7 + 7,
+ .clock = 74250,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 68,
+ .height_mm = 136,
+};
+
+static const struct hx8394_panel_desc hsd060bhw4_desc = {
+ .mode = &hsd060bhw4_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = hsd060bhw4_init_sequence,
+};
+
+static int hx8394_enable(struct drm_panel *panel)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ ret = ctx->desc->init_sequence(ctx);
+ if (ret) {
+ dev_err(ctx->dev, "Panel init sequence failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+
+ /* Panel is operational 120 msec after reset */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to turn on the display: %d\n", ret);
+ goto sleep_in;
+ }
+
+ return 0;
+
+sleep_in:
+ /* This will probably fail, but let's try orderly power off anyway. */
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (!ret)
+ msleep(50);
+
+ return ret;
+}
+
+static int hx8394_disable(struct drm_panel *panel)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+
+ msleep(50); /* about 3 frames */
+
+ return 0;
+}
+
+static int hx8394_unprepare(struct drm_panel *panel)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+
+ if (!ctx->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vcc);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int hx8394_prepare(struct drm_panel *panel)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ ret = regulator_enable(ctx->vcc);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to enable vcc supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(ctx->iovcc);
+ if (ret) {
+ dev_err(ctx->dev, "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vcc;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ msleep(180);
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_vcc:
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_disable(ctx->vcc);
+ return ret;
+}
+
+static int hx8394_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct hx8394 *ctx = panel_to_hx8394(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
+ if (!mode) {
+ dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
+ drm_mode_vrefresh(ctx->desc->mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs hx8394_drm_funcs = {
+ .disable = hx8394_disable,
+ .unprepare = hx8394_unprepare,
+ .prepare = hx8394_prepare,
+ .enable = hx8394_enable,
+ .get_modes = hx8394_get_modes,
+};
+
+static int hx8394_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct hx8394 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset gpio\n");
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+ ctx->desc = of_device_get_match_data(dev);
+
+ dsi->mode_flags = ctx->desc->mode_flags;
+ dsi->format = ctx->desc->format;
+ dsi->lanes = ctx->desc->lanes;
+
+ ctx->vcc = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(ctx->vcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->vcc),
+ "Failed to request vcc regulator\n");
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc))
+ return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
+ "Failed to request iovcc regulator\n");
+
+ drm_panel_init(&ctx->panel, dev, &hx8394_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "mipi_dsi_attach failed\n");
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ dev_dbg(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
+ ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
+ drm_mode_vrefresh(ctx->desc->mode),
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
+
+ return 0;
+}
+
+static void hx8394_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to disable panel: %d\n", ret);
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to unprepare panel: %d\n", ret);
+}
+
+static void hx8394_remove(struct mipi_dsi_device *dsi)
+{
+ struct hx8394 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ hx8394_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id hx8394_of_match[] = {
+ { .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hx8394_of_match);
+
+static struct mipi_dsi_driver hx8394_driver = {
+ .probe = hx8394_probe,
+ .remove = hx8394_remove,
+ .shutdown = hx8394_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hx8394_of_match,
+ },
+};
+module_mipi_dsi_driver(hx8394_driver);
+
+MODULE_AUTHOR("Kamil Trzciński <ayufan@ayufan.eu>");
+MODULE_DESCRIPTION("DRM driver for Himax HX8394 based MIPI DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index 384a724f2822..3fdf884b3257 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -577,11 +577,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9341_dbi_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = ili9341_dbi_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
- .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(ili9341_dbi_enable),
};
static const struct drm_display_mode ili9341_dbi_mode = {
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index cbb68caa36f2..1ec696adf9de 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -7,7 +7,6 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/fb.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index d8765b2294fb..8912757a6f42 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -29,22 +29,6 @@ static inline struct jdi_fhd_r63452 *to_jdi_fhd_r63452(struct drm_panel *panel)
return container_of(panel, struct jdi_fhd_r63452, panel);
}
-#define dsi_generic_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void jdi_fhd_r63452_reset(struct jdi_fhd_r63452 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
@@ -63,12 +47,12 @@ static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx)
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- dsi_generic_write_seq(dsi, 0xb0, 0x00);
- dsi_generic_write_seq(dsi, 0xd6, 0x01);
- dsi_generic_write_seq(dsi, 0xec,
- 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
- 0x13, 0x15, 0x68, 0x0b, 0xb5);
- dsi_generic_write_seq(dsi, 0xb0, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd6, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xec,
+ 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
+ 0x13, 0x15, 0x68, 0x0b, 0xb5);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret < 0) {
@@ -76,7 +60,7 @@ static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx)
return ret;
}
- dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x77);
if (ret < 0) {
@@ -108,10 +92,10 @@ static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx)
return ret;
}
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00);
- dsi_dcs_write_seq(dsi, 0x84, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0x00);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
@@ -127,10 +111,10 @@ static int jdi_fhd_r63452_on(struct jdi_fhd_r63452 *ctx)
}
msleep(80);
- dsi_generic_write_seq(dsi, 0xb0, 0x04);
- dsi_dcs_write_seq(dsi, 0x84, 0x00);
- dsi_generic_write_seq(dsi, 0xc8, 0x11);
- dsi_generic_write_seq(dsi, 0xb0, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0x84, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc8, 0x11);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
return 0;
}
@@ -143,12 +127,12 @@ static int jdi_fhd_r63452_off(struct jdi_fhd_r63452 *ctx)
dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
- dsi_generic_write_seq(dsi, 0xb0, 0x00);
- dsi_generic_write_seq(dsi, 0xd6, 0x01);
- dsi_generic_write_seq(dsi, 0xec,
- 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
- 0x13, 0x15, 0x68, 0x0b, 0x95);
- dsi_generic_write_seq(dsi, 0xb0, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd6, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xec,
+ 0x64, 0xdc, 0xec, 0x3b, 0x52, 0x00, 0x0b, 0x0b,
+ 0x13, 0x15, 0x68, 0x0b, 0x95);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x03);
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 5619f186d28c..d2efd887484b 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -244,14 +244,6 @@ struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel)
return container_of(panel, struct ltk050h3146w, panel);
}
-#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
- static const u8 b[] = { cmd, seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, b, ARRAY_SIZE(b)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -261,55 +253,55 @@ static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
- dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8);
- dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
- 0x01);
- dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5);
- dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5);
- dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
-
- dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07);
- dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
- 0x28, 0x04, 0xcc, 0xcc, 0xcc);
- dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04);
- dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2);
- dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03);
- dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12);
- dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
- 0x80);
- dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
- 0x16, 0x00, 0x00);
- dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
- 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
- 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
- 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
- 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
- dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
- 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
- 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
- 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
- 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
- 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
- dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
- 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
- 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
- dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
- 0x21, 0x00, 0x60);
- dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00);
- dsi_dcs_write_seq(dsi, 0xde, 0x02);
- dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c);
- dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04);
- dsi_dcs_write_seq(dsi, 0xc1, 0x11);
- dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
- dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84);
- dsi_dcs_write_seq(dsi, 0xde, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
+ 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5);
+ mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5);
+ mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
+
+ mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07);
+ mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
+ 0x28, 0x04, 0xcc, 0xcc, 0xcc);
+ mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2);
+ mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03);
+ mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12);
+ mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
+ 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
+ 0x16, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
+ 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
+ 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
+ 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
+ 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
+ mipi_dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
+ 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
+ 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
+ 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
+ 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
+ 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
+ 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
+ mipi_dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
+ 0x21, 0x00, 0x60);
+ mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xde, 0x02);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c);
+ mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x11);
+ mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
+ mipi_dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84);
+ mipi_dsi_dcs_write_seq(dsi, 0xde, 0x00);
ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 772e3b6acece..9243b2ad828d 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -45,14 +45,6 @@ static inline struct mantix *panel_to_mantix(struct drm_panel *panel)
return container_of(panel, struct mantix, panel);
}
-#define dsi_generic_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static int mantix_init_sequence(struct mantix *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -61,18 +53,18 @@ static int mantix_init_sequence(struct mantix *ctx)
/*
* Init sequence was supplied by the panel vendor.
*/
- dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A);
- dsi_generic_write_seq(dsi, MANTIX_CMD_INT_CANCEL, 0x03);
- dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x03);
- dsi_generic_write_seq(dsi, 0x80, 0xA9, 0x00);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_INT_CANCEL, 0x03);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0x80, 0xA9, 0x00);
- dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x09);
- dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x5A, 0x09);
+ mipi_dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00);
msleep(20);
- dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
- dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5);
+ mipi_dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F);
msleep(20);
dev_dbg(dev, "Panel init sequence done\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 3a844917da07..abf752b36a52 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -89,14 +89,6 @@ static inline struct nt35950 *to_nt35950(struct drm_panel *panel)
return container_of(panel, struct nt35950, panel);
}
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void nt35950_reset(struct nt35950 *nt)
{
gpiod_set_value_cansleep(nt->reset_gpio, 1);
@@ -338,7 +330,7 @@ static int nt35950_on(struct nt35950 *nt)
return ret;
/* Unknown command */
- dsi_dcs_write_seq(dsi, 0xd4, 0x88, 0x88);
+ mipi_dsi_dcs_write_seq(dsi, 0xd4, 0x88, 0x88);
/* CMD2 Page 7 */
ret = nt35950_set_cmd2_page(nt, 7);
@@ -346,10 +338,10 @@ static int nt35950_on(struct nt35950 *nt)
return ret;
/* Enable SubPixel Rendering */
- dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_EN, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_EN, 0x01);
/* SPR Mode: YYG Rainbow-RGB */
- dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_MODE, MCS_SPR_MODE_YYG_RAINBOW_RGB);
+ mipi_dsi_dcs_write_seq(dsi, MCS_PARAM_SPR_MODE, MCS_SPR_MODE_YYG_RAINBOW_RGB);
/* CMD3 */
ret = nt35950_inject_black_image(nt);
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 36a46cb7fe1c..aba556c98300 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -202,8 +202,7 @@ static const struct drm_panel_funcs lcd_olinuxino_funcs = {
.get_modes = lcd_olinuxino_get_modes,
};
-static int lcd_olinuxino_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lcd_olinuxino_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lcd_olinuxino *lcd;
@@ -309,7 +308,7 @@ static struct i2c_driver lcd_olinuxino_driver = {
.name = "lcd_olinuxino",
.of_match_table = lcd_olinuxino_of_ids,
},
- .probe = lcd_olinuxino_probe,
+ .probe_new = lcd_olinuxino_probe,
.remove = lcd_olinuxino_remove,
};
diff --git a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
new file mode 100644
index 000000000000..e46be5014d42
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Orisetech OTA5601A TFT LCD panel driver
+ *
+ * Copyright (C) 2021, Christophe Branchereau <cbranchereau@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define OTA5601A_CTL 0x01
+#define OTA5601A_CTL_OFF 0x00
+#define OTA5601A_CTL_ON BIT(0)
+
+struct ota5601a_panel_info {
+ const struct drm_display_mode *display_modes;
+ unsigned int num_modes;
+ u16 width_mm, height_mm;
+ u32 bus_format, bus_flags;
+};
+
+struct ota5601a {
+ struct drm_panel drm_panel;
+ struct regmap *map;
+ struct regulator *supply;
+ const struct ota5601a_panel_info *panel_info;
+
+ struct gpio_desc *reset_gpio;
+};
+
+static inline struct ota5601a *to_ota5601a(struct drm_panel *panel)
+{
+ return container_of(panel, struct ota5601a, drm_panel);
+}
+
+static const struct reg_sequence ota5601a_panel_regs[] = {
+ { 0xfd, 0x00 }, /* Page Shift */
+ { 0x02, 0x00 }, /* Reset */
+
+ { 0x18, 0x00 }, /* Interface Sel: RGB 24 Bits */
+ { 0x34, 0x20 }, /* Undocumented */
+
+ { 0x0c, 0x01 }, /* Contrast set by CMD1 == within page 0x00 */
+ { 0x0d, 0x48 }, /* R Brightness */
+ { 0x0e, 0x48 }, /* G Brightness */
+ { 0x0f, 0x48 }, /* B Brightness */
+ { 0x07, 0x40 }, /* R Contrast */
+ { 0x08, 0x33 }, /* G Contrast */
+ { 0x09, 0x3a }, /* B Contrast */
+
+ { 0x16, 0x01 }, /* NTSC Sel */
+ { 0x19, 0x8d }, /* VBLK */
+ { 0x1a, 0x28 }, /* HBLK */
+ { 0x1c, 0x00 }, /* Scan Shift Dir. */
+
+ { 0xfd, 0xc5 }, /* Page Shift */
+ { 0x82, 0x0c }, /* PWR_CTRL Pump */
+ { 0xa2, 0xb4 }, /* PWR_CTRL VGH/VGL */
+
+ { 0xfd, 0xc4 }, /* Page Shift - What follows is listed as "RGB 24bit Timing Set" */
+ { 0x82, 0x45 },
+
+ { 0xfd, 0xc1 },
+ { 0x91, 0x02 },
+
+ { 0xfd, 0xc0 },
+ { 0xa1, 0x01 },
+ { 0xa2, 0x1f },
+ { 0xa3, 0x0b },
+ { 0xa4, 0x38 },
+ { 0xa5, 0x00 },
+ { 0xa6, 0x0a },
+ { 0xa7, 0x38 },
+ { 0xa8, 0x00 },
+ { 0xa9, 0x0a },
+ { 0xaa, 0x37 },
+
+ { 0xfd, 0xce },
+ { 0x81, 0x18 },
+ { 0x82, 0x43 },
+ { 0x83, 0x43 },
+ { 0x91, 0x06 },
+ { 0x93, 0x38 },
+ { 0x94, 0x02 },
+ { 0x95, 0x06 },
+ { 0x97, 0x38 },
+ { 0x98, 0x02 },
+ { 0x99, 0x06 },
+ { 0x9b, 0x38 },
+ { 0x9c, 0x02 },
+
+ { 0xfd, 0x00 }, /* Page Shift */
+};
+
+static const struct regmap_config ota5601a_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int ota5601a_prepare(struct drm_panel *drm_panel)
+{
+ struct ota5601a *panel = to_ota5601a(drm_panel);
+ int err;
+
+ err = regulator_enable(panel->supply);
+ if (err) {
+ dev_err(drm_panel->dev, "Failed to enable power supply: %d\n", err);
+ return err;
+ }
+
+ /* Reset to be held low for 10us min according to the doc, 10ms before sending commands */
+ gpiod_set_value_cansleep(panel->reset_gpio, 1);
+ usleep_range(10, 30);
+ gpiod_set_value_cansleep(panel->reset_gpio, 0);
+ usleep_range(10000, 20000);
+
+ /* Init all registers. */
+ err = regmap_multi_reg_write(panel->map, ota5601a_panel_regs,
+ ARRAY_SIZE(ota5601a_panel_regs));
+ if (err) {
+ dev_err(drm_panel->dev, "Failed to init registers: %d\n", err);
+ goto err_disable_regulator;
+ }
+
+ msleep(120);
+
+ return 0;
+
+err_disable_regulator:
+ regulator_disable(panel->supply);
+ return err;
+}
+
+static int ota5601a_unprepare(struct drm_panel *drm_panel)
+{
+ struct ota5601a *panel = to_ota5601a(drm_panel);
+
+ gpiod_set_value_cansleep(panel->reset_gpio, 1);
+
+ regulator_disable(panel->supply);
+
+ return 0;
+}
+
+static int ota5601a_enable(struct drm_panel *drm_panel)
+{
+ struct ota5601a *panel = to_ota5601a(drm_panel);
+ int err;
+
+ err = regmap_write(panel->map, OTA5601A_CTL, OTA5601A_CTL_ON);
+
+ if (err) {
+ dev_err(drm_panel->dev, "Unable to enable panel: %d\n", err);
+ return err;
+ }
+
+ if (drm_panel->backlight) {
+ /* Wait for the picture to be ready before enabling backlight */
+ msleep(120);
+ }
+
+ return 0;
+}
+
+static int ota5601a_disable(struct drm_panel *drm_panel)
+{
+ struct ota5601a *panel = to_ota5601a(drm_panel);
+ int err;
+
+ err = regmap_write(panel->map, OTA5601A_CTL, OTA5601A_CTL_OFF);
+
+ if (err) {
+ dev_err(drm_panel->dev, "Unable to disable panel: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int ota5601a_get_modes(struct drm_panel *drm_panel,
+ struct drm_connector *connector)
+{
+ struct ota5601a *panel = to_ota5601a(drm_panel);
+ const struct ota5601a_panel_info *panel_info = panel->panel_info;
+ struct drm_display_mode *mode;
+ unsigned int i;
+
+ for (i = 0; i < panel_info->num_modes; i++) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel_info->display_modes[i]);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ if (panel_info->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ }
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = panel_info->width_mm;
+ connector->display_info.height_mm = panel_info->height_mm;
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &panel_info->bus_format, 1);
+ connector->display_info.bus_flags = panel_info->bus_flags;
+
+ return panel_info->num_modes;
+}
+
+static const struct drm_panel_funcs ota5601a_funcs = {
+ .prepare = ota5601a_prepare,
+ .unprepare = ota5601a_unprepare,
+ .enable = ota5601a_enable,
+ .disable = ota5601a_disable,
+ .get_modes = ota5601a_get_modes,
+};
+
+static int ota5601a_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct device *dev = &spi->dev;
+ struct ota5601a *panel;
+ int err;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, panel);
+
+ panel->panel_info = (const struct ota5601a_panel_info *)id->driver_data;
+ if (!panel->panel_info)
+ return -EINVAL;
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply)) {
+ dev_err(dev, "Failed to get power supply\n");
+ return PTR_ERR(panel->supply);
+ }
+
+ panel->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(panel->reset_gpio)) {
+ dev_err(dev, "Failed to get reset GPIO\n");
+ return PTR_ERR(panel->reset_gpio);
+ }
+
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_3 | SPI_3WIRE;
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(dev, "Failed to setup SPI\n");
+ return err;
+ }
+
+ panel->map = devm_regmap_init_spi(spi, &ota5601a_regmap_config);
+ if (IS_ERR(panel->map)) {
+ dev_err(dev, "Failed to init regmap\n");
+ return PTR_ERR(panel->map);
+ }
+
+ drm_panel_init(&panel->drm_panel, dev, &ota5601a_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ err = drm_panel_of_backlight(&panel->drm_panel);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get backlight handle\n");
+ return err;
+ }
+
+ drm_panel_add(&panel->drm_panel);
+
+ return 0;
+}
+
+static void ota5601a_remove(struct spi_device *spi)
+{
+ struct ota5601a *panel = spi_get_drvdata(spi);
+
+ drm_panel_remove(&panel->drm_panel);
+
+ ota5601a_disable(&panel->drm_panel);
+ ota5601a_unprepare(&panel->drm_panel);
+}
+
+static const struct drm_display_mode gpt3_display_modes[] = {
+ { /* 60 Hz */
+ .clock = 27000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 220,
+ .hsync_end = 640 + 220 + 20,
+ .htotal = 640 + 220 + 20 + 20,
+ .vdisplay = 480,
+ .vsync_start = 480 + 7,
+ .vsync_end = 480 + 7 + 6,
+ .vtotal = 480 + 7 + 6 + 7,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+
+ { /* 50 Hz */
+ .clock = 24000,
+ .hdisplay = 640,
+ .hsync_start = 640 + 280,
+ .hsync_end = 640 + 280 + 20,
+ .htotal = 640 + 280 + 20 + 20,
+ .vdisplay = 480,
+ .vsync_start = 480 + 7,
+ .vsync_end = 480 + 7 + 6,
+ .vtotal = 480 + 7 + 6 + 7,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct ota5601a_panel_info gpt3_info = {
+ .display_modes = gpt3_display_modes,
+ .num_modes = ARRAY_SIZE(gpt3_display_modes),
+ .width_mm = 71,
+ .height_mm = 51,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+};
+
+static const struct spi_device_id gpt3_id[] = {
+ { "gpt3", (kernel_ulong_t)&gpt3_info },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, gpt3_id);
+
+static const struct of_device_id ota5601a_of_match[] = {
+ { .compatible = "focaltech,gpt3" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ota5601a_of_match);
+
+static struct spi_driver ota5601a_driver = {
+ .driver = {
+ .name = "ota5601a",
+ .of_match_table = ota5601a_of_match,
+ },
+ .id_table = gpt3_id,
+ .probe = ota5601a_probe,
+ .remove = ota5601a_remove,
+};
+
+module_spi_driver(ota5601a_driver);
+
+MODULE_AUTHOR("Christophe Branchereau <cbranchereau@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 79f852465a84..11d6ca276c1e 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -43,7 +43,6 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/fb.h>
#include <linux/i2c.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
@@ -362,8 +361,7 @@ static const struct drm_panel_funcs rpi_touchscreen_funcs = {
.get_modes = rpi_touchscreen_get_modes,
};
-static int rpi_touchscreen_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int rpi_touchscreen_probe(struct i2c_client *i2c)
{
struct device *dev = &i2c->dev;
struct rpi_touchscreen *ts;
@@ -491,7 +489,7 @@ static struct i2c_driver rpi_touchscreen_driver = {
.name = "rpi_touchscreen",
.of_match_table = rpi_touchscreen_of_ids,
},
- .probe = rpi_touchscreen_probe,
+ .probe_new = rpi_touchscreen_probe,
.remove = rpi_touchscreen_remove,
};
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index a8a98c91b13c..2ef5ea5eaeeb 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -11,10 +11,10 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
-#include <linux/fb.h>
#include <linux/kernel.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 5a8b978c6415..5703f4712d96 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -53,7 +53,7 @@ static void atana33xc20_wait(ktime_t start_ktime, unsigned int min_ms)
ktime_t now_ktime, min_ktime;
min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
- now_ktime = ktime_get();
+ now_ktime = ktime_get_boottime();
if (ktime_before(now_ktime, min_ktime))
msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
@@ -75,7 +75,7 @@ static int atana33xc20_suspend(struct device *dev)
ret = regulator_disable(p->supply);
if (ret)
return ret;
- p->powered_off_time = ktime_get();
+ p->powered_off_time = ktime_get_boottime();
p->el3_was_on = false;
return 0;
@@ -93,7 +93,7 @@ static int atana33xc20_resume(struct device *dev)
ret = regulator_enable(p->supply);
if (ret)
return ret;
- p->powered_on_time = ktime_get();
+ p->powered_on_time = ktime_get_boottime();
if (p->no_hpd) {
msleep(HPD_MAX_MS);
@@ -142,7 +142,7 @@ static int atana33xc20_disable(struct drm_panel *panel)
return 0;
gpiod_set_value_cansleep(p->el_on3_gpio, 0);
- p->el_on3_off_time = ktime_get();
+ p->el_on3_off_time = ktime_get_boottime();
p->enabled = false;
/*
@@ -310,7 +310,7 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
ret = devm_add_action_or_reset(dev, atana33xc20_runtime_disable, dev);
if (ret)
return ret;
- pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_set_autosuspend_delay(dev, 2000);
pm_runtime_use_autosuspend(dev);
ret = devm_add_action_or_reset(dev, atana33xc20_dont_use_autosuspend, dev);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 5c621b15e84c..39eef3dce7c9 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -692,7 +692,9 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
+ MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET;
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
@@ -731,6 +733,7 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index e06fd35de814..46d6f4a87bf7 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -446,7 +446,8 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 1;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_NO_HFP |
+ MIPI_DSI_MODE_VIDEO_NO_HBP | MIPI_DSI_MODE_VIDEO_NO_HSA;
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
@@ -462,6 +463,7 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs,
DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
&s6e63j0x03_bl_ops, NULL);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index 97ff7a18545c..7431cae7427e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -28,14 +28,6 @@ s6e88a0_ams452ef01 *to_s6e88a0_ams452ef01(struct drm_panel *panel)
return container_of(panel, struct s6e88a0_ams452ef01, panel);
}
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void s6e88a0_ams452ef01_reset(struct s6e88a0_ams452ef01 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
@@ -54,8 +46,8 @@ static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
- dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
+ mipi_dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
@@ -65,23 +57,23 @@ static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
msleep(120);
// set default brightness/gama
- dsi_dcs_write_seq(dsi, 0xca,
- 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, // V255 RR,GG,BB
- 0x80, 0x80, 0x80, // V203 R,G,B
- 0x80, 0x80, 0x80, // V151 R,G,B
- 0x80, 0x80, 0x80, // V87 R,G,B
- 0x80, 0x80, 0x80, // V51 R,G,B
- 0x80, 0x80, 0x80, // V35 R,G,B
- 0x80, 0x80, 0x80, // V23 R,G,B
- 0x80, 0x80, 0x80, // V11 R,G,B
- 0x6b, 0x68, 0x71, // V3 R,G,B
- 0x00, 0x00, 0x00); // V1 R,G,B
+ mipi_dsi_dcs_write_seq(dsi, 0xca,
+ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, // V255 RR,GG,BB
+ 0x80, 0x80, 0x80, // V203 R,G,B
+ 0x80, 0x80, 0x80, // V151 R,G,B
+ 0x80, 0x80, 0x80, // V87 R,G,B
+ 0x80, 0x80, 0x80, // V51 R,G,B
+ 0x80, 0x80, 0x80, // V35 R,G,B
+ 0x80, 0x80, 0x80, // V23 R,G,B
+ 0x80, 0x80, 0x80, // V11 R,G,B
+ 0x6b, 0x68, 0x71, // V3 R,G,B
+ 0x00, 0x00, 0x00); // V1 R,G,B
// set default Amoled Off Ratio
- dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
- dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
- dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
- dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
+ mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index 54213beafaf5..c51d07ec1529 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -990,8 +990,6 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
- | MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP
- | MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET
| MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT;
ret = s6e8aa0_parse_dt(ctx);
@@ -1018,6 +1016,7 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index 1a0d24595faa..1ebb79e3103c 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -10,7 +10,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/swab.h>
#include <linux/backlight.h>
#include <video/mipi_display.h>
@@ -34,14 +33,6 @@ struct sofef00_panel *to_sofef00_panel(struct drm_panel *panel)
return container_of(panel, struct sofef00_panel, panel);
}
-#define dsi_dcs_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void sofef00_panel_reset(struct sofef00_panel *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
@@ -67,7 +58,7 @@ static int sofef00_panel_on(struct sofef00_panel *ctx)
}
usleep_range(10000, 11000);
- dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
if (ret < 0) {
@@ -75,13 +66,13 @@ static int sofef00_panel_on(struct sofef00_panel *ctx)
return ret;
}
- dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
- dsi_dcs_write_seq(dsi, 0xb0, 0x07);
- dsi_dcs_write_seq(dsi, 0xb6, 0x12);
- dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
- dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x07);
+ mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x12);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
ret = mipi_dsi_dcs_set_display_on(dsi);
if (ret < 0) {
@@ -221,13 +212,9 @@ static int sofef00_panel_bl_update_status(struct backlight_device *bl)
{
struct mipi_dsi_device *dsi = bl_get_data(bl);
int err;
- u16 brightness;
-
- brightness = (u16)backlight_get_brightness(bl);
- // This panel needs the high and low bytes swapped for the brightness value
- brightness = __swab16(brightness);
+ u16 brightness = (u16)backlight_get_brightness(bl);
- err = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+ err = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
index 8a4e0c1fe73f..68f52eaaf4fa 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -32,12 +32,6 @@ static inline struct sharp_ls060 *to_sharp_ls060(struct drm_panel *panel)
return container_of(panel, struct sharp_ls060, panel);
}
-#define dsi_dcs_write_seq(dsi, seq...) ({ \
- static const u8 d[] = { seq }; \
- \
- mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
- })
-
static void sharp_ls060_reset(struct sharp_ls060 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
@@ -56,17 +50,8 @@ static int sharp_ls060_on(struct sharp_ls060 *ctx)
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = dsi_dcs_write_seq(dsi, 0xbb, 0x13);
- if (ret < 0) {
- dev_err(dev, "Failed to send command: %d\n", ret);
- return ret;
- }
-
- ret = dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
- if (ret < 0) {
- dev_err(dev, "Failed to send command: %d\n", ret);
- return ret;
- }
+ mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_MEMORY_START);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 8a3b685c2fcc..065f378bba9d 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -280,7 +280,7 @@ static void panel_simple_wait(ktime_t start_ktime, unsigned int min_ms)
return;
min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
- now_ktime = ktime_get();
+ now_ktime = ktime_get_boottime();
if (ktime_before(now_ktime, min_ktime))
msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
@@ -307,7 +307,7 @@ static int panel_simple_suspend(struct device *dev)
gpiod_set_value_cansleep(p->enable_gpio, 0);
regulator_disable(p->supply);
- p->unprepared_time = ktime_get();
+ p->unprepared_time = ktime_get_boottime();
kfree(p->edid);
p->edid = NULL;
@@ -351,7 +351,7 @@ static int panel_simple_resume(struct device *dev)
if (p->desc->delay.prepare)
msleep(p->desc->delay.prepare);
- p->prepared_time = ktime_get();
+ p->prepared_time = ktime_get_boottime();
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 86a472b01360..6747ca237ced 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -73,14 +73,6 @@ static inline struct st7703 *panel_to_st7703(struct drm_panel *panel)
return container_of(panel, struct st7703, panel);
}
-#define dsi_generic_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static int jh057n_init_sequence(struct st7703 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -90,50 +82,50 @@ static int jh057n_init_sequence(struct st7703 *ctx)
* resemble the ST7703 but the number of parameters often don't match
* so it's likely a clone.
*/
- dsi_generic_write_seq(dsi, ST7703_CMD_SETEXTC,
- 0xF1, 0x12, 0x83);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETRGBIF,
- 0x10, 0x10, 0x05, 0x05, 0x03, 0xFF, 0x00, 0x00,
- 0x00, 0x00);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETSCR,
- 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
- 0x00);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETDISP, 0xF0, 0x12, 0x30);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETEQ,
- 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
- 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETBGP, 0x08, 0x08);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETEXTC,
+ 0xF1, 0x12, 0x83);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETRGBIF,
+ 0x10, 0x10, 0x05, 0x05, 0x03, 0xFF, 0x00, 0x00,
+ 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETSCR,
+ 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+ 0x00);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETDISP, 0xF0, 0x12, 0x30);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETEQ,
+ 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETBGP, 0x08, 0x08);
msleep(20);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETVCOM, 0x3F, 0x3F);
- dsi_generic_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP1,
- 0x82, 0x10, 0x06, 0x05, 0x9E, 0x0A, 0xA5, 0x12,
- 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
- 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00,
- 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88,
- 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64,
- 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
- 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP2,
- 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88,
- 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13,
- 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
- 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0A,
- 0xA5, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, ST7703_CMD_SETGAMMA,
- 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41, 0x37,
- 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10, 0x11,
- 0x18, 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41,
- 0x37, 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10,
- 0x11, 0x18);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETVCOM, 0x3F, 0x3F);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP1,
+ 0x82, 0x10, 0x06, 0x05, 0x9E, 0x0A, 0xA5, 0x12,
+ 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
+ 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64,
+ 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP2,
+ 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13,
+ 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0A,
+ 0xA5, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_SETGAMMA,
+ 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41, 0x37,
+ 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10, 0x11,
+ 0x18, 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41,
+ 0x37, 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10,
+ 0x11, 0x18);
return 0;
}
@@ -162,15 +154,6 @@ static const struct st7703_panel_desc jh057n00900_panel_desc = {
.init_sequence = jh057n_init_sequence,
};
-#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
-
static int xbd599_init_sequence(struct st7703 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -180,154 +163,154 @@ static int xbd599_init_sequence(struct st7703 *ctx)
*/
/* Magic sequence to unlock user commands below. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETEXTC, 0xF1, 0x12, 0x83);
-
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETMIPI,
- 0x33, /* VC_main = 0, Lane_Number = 3 (4 lanes) */
- 0x81, /* DSI_LDO_SEL = 1.7V, RTERM = 90 Ohm */
- 0x05, /* IHSRX = x6 (Low High Speed driving ability) */
- 0xF9, /* TX_CLK_SEL = fDSICLK/16 */
- 0x0E, /* HFP_OSC (min. HFP number in DSI mode) */
- 0x0E, /* HBP_OSC (min. HBP number in DSI mode) */
- /* The rest is undocumented in ST7703 datasheet */
- 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x44, 0x25, 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02,
- 0x4F, 0x11, 0x00, 0x00, 0x37);
-
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER_EXT,
- 0x25, /* PCCS = 2, ECP_DC_DIV = 1/4 HSYNC */
- 0x22, /* DT = 15ms XDK_ECP = x2 */
- 0x20, /* PFM_DC_DIV = /1 */
- 0x03 /* ECP_SYNC_EN = 1, VGX_SYNC_EN = 1 */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEXTC, 0xF1, 0x12, 0x83);
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETMIPI,
+ 0x33, /* VC_main = 0, Lane_Number = 3 (4 lanes) */
+ 0x81, /* DSI_LDO_SEL = 1.7V, RTERM = 90 Ohm */
+ 0x05, /* IHSRX = x6 (Low High Speed driving ability) */
+ 0xF9, /* TX_CLK_SEL = fDSICLK/16 */
+ 0x0E, /* HFP_OSC (min. HFP number in DSI mode) */
+ 0x0E, /* HBP_OSC (min. HBP number in DSI mode) */
+ /* The rest is undocumented in ST7703 datasheet */
+ 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x44, 0x25, 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02,
+ 0x4F, 0x11, 0x00, 0x00, 0x37);
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER_EXT,
+ 0x25, /* PCCS = 2, ECP_DC_DIV = 1/4 HSYNC */
+ 0x22, /* DT = 15ms XDK_ECP = x2 */
+ 0x20, /* PFM_DC_DIV = /1 */
+ 0x03 /* ECP_SYNC_EN = 1, VGX_SYNC_EN = 1 */);
/* RGB I/F porch timing */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETRGBIF,
- 0x10, /* VBP_RGB_GEN */
- 0x10, /* VFP_RGB_GEN */
- 0x05, /* DE_BP_RGB_GEN */
- 0x05, /* DE_FP_RGB_GEN */
- /* The rest is undocumented in ST7703 datasheet */
- 0x03, 0xFF,
- 0x00, 0x00,
- 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETRGBIF,
+ 0x10, /* VBP_RGB_GEN */
+ 0x10, /* VFP_RGB_GEN */
+ 0x05, /* DE_BP_RGB_GEN */
+ 0x05, /* DE_FP_RGB_GEN */
+ /* The rest is undocumented in ST7703 datasheet */
+ 0x03, 0xFF,
+ 0x00, 0x00,
+ 0x00, 0x00);
/* Source driving settings. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETSCR,
- 0x73, /* N_POPON */
- 0x73, /* N_NOPON */
- 0x50, /* I_POPON */
- 0x50, /* I_NOPON */
- 0x00, /* SCR[31,24] */
- 0xC0, /* SCR[23,16] */
- 0x08, /* SCR[15,8] */
- 0x70, /* SCR[7,0] */
- 0x00 /* Undocumented */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETSCR,
+ 0x73, /* N_POPON */
+ 0x73, /* N_NOPON */
+ 0x50, /* I_POPON */
+ 0x50, /* I_NOPON */
+ 0x00, /* SCR[31,24] */
+ 0xC0, /* SCR[23,16] */
+ 0x08, /* SCR[15,8] */
+ 0x70, /* SCR[7,0] */
+ 0x00 /* Undocumented */);
/* NVDDD_SEL = -1.8V, VDDD_SEL = out of range (possibly 1.9V?) */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E);
/*
* SS_PANEL = 1 (reverse scan), GS_PANEL = 0 (normal scan)
* REV_PANEL = 1 (normally black panel), BGR_PANEL = 1 (BGR)
*/
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B);
/* Zig-Zag Type C column inversion. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
/* Set display resolution. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETDISP,
- 0xF0, /* NL = 240 */
- 0x12, /* RES_V_LSB = 0, BLK_CON = VSSD,
- * RESO_SEL = 720RGB
- */
- 0xF0 /* WHITE_GND_EN = 1 (GND),
- * WHITE_FRAME_SEL = 7 frames,
- * ISC = 0 frames
- */);
-
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETEQ,
- 0x00, /* PNOEQ */
- 0x00, /* NNOEQ */
- 0x0B, /* PEQGND */
- 0x0B, /* NEQGND */
- 0x10, /* PEQVCI */
- 0x10, /* NEQVCI */
- 0x00, /* PEQVCI1 */
- 0x00, /* NEQVCI1 */
- 0x00, /* reserved */
- 0x00, /* reserved */
- 0xFF, /* reserved */
- 0x00, /* reserved */
- 0xC0, /* ESD_DET_DATA_WHITE = 1, ESD_WHITE_EN = 1 */
- 0x10 /* SLPIN_OPTION = 1 (no need vsync after sleep-in)
- * VEDIO_NO_CHECK_EN = 0
- * ESD_WHITE_GND_EN = 0
- * ESD_DET_TIME_SEL = 0 frames
- */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETDISP,
+ 0xF0, /* NL = 240 */
+ 0x12, /* RES_V_LSB = 0, BLK_CON = VSSD,
+ * RESO_SEL = 720RGB
+ */
+ 0xF0 /* WHITE_GND_EN = 1 (GND),
+ * WHITE_FRAME_SEL = 7 frames,
+ * ISC = 0 frames
+ */);
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETEQ,
+ 0x00, /* PNOEQ */
+ 0x00, /* NNOEQ */
+ 0x0B, /* PEQGND */
+ 0x0B, /* NEQGND */
+ 0x10, /* PEQVCI */
+ 0x10, /* NEQVCI */
+ 0x00, /* PEQVCI1 */
+ 0x00, /* NEQVCI1 */
+ 0x00, /* reserved */
+ 0x00, /* reserved */
+ 0xFF, /* reserved */
+ 0x00, /* reserved */
+ 0xC0, /* ESD_DET_DATA_WHITE = 1, ESD_WHITE_EN = 1 */
+ 0x10 /* SLPIN_OPTION = 1 (no need vsync after sleep-in)
+ * VEDIO_NO_CHECK_EN = 0
+ * ESD_WHITE_GND_EN = 0
+ * ESD_DET_TIME_SEL = 0 frames
+ */);
/* Undocumented command. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_C6, 0x01, 0x00, 0xFF, 0xFF, 0x00);
-
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER,
- 0x74, /* VBTHS, VBTLS: VGH = 17V, VBL = -11V */
- 0x00, /* FBOFF_VGH = 0, FBOFF_VGL = 0 */
- 0x32, /* VRP */
- 0x32, /* VRN */
- 0x77, /* reserved */
- 0xF1, /* APS = 1 (small),
- * VGL_DET_EN = 1, VGH_DET_EN = 1,
- * VGL_TURBO = 1, VGH_TURBO = 1
- */
- 0xFF, /* VGH1_L_DIV, VGL1_L_DIV (1.5MHz) */
- 0xFF, /* VGH1_R_DIV, VGL1_R_DIV (1.5MHz) */
- 0xCC, /* VGH2_L_DIV, VGL2_L_DIV (2.6MHz) */
- 0xCC, /* VGH2_R_DIV, VGL2_R_DIV (2.6MHz) */
- 0x77, /* VGH3_L_DIV, VGL3_L_DIV (4.5MHz) */
- 0x77 /* VGH3_R_DIV, VGL3_R_DIV (4.5MHz) */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_C6, 0x01, 0x00, 0xFF, 0xFF, 0x00);
+
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETPOWER,
+ 0x74, /* VBTHS, VBTLS: VGH = 17V, VBL = -11V */
+ 0x00, /* FBOFF_VGH = 0, FBOFF_VGL = 0 */
+ 0x32, /* VRP */
+ 0x32, /* VRN */
+ 0x77, /* reserved */
+ 0xF1, /* APS = 1 (small),
+ * VGL_DET_EN = 1, VGH_DET_EN = 1,
+ * VGL_TURBO = 1, VGH_TURBO = 1
+ */
+ 0xFF, /* VGH1_L_DIV, VGL1_L_DIV (1.5MHz) */
+ 0xFF, /* VGH1_R_DIV, VGL1_R_DIV (1.5MHz) */
+ 0xCC, /* VGH2_L_DIV, VGL2_L_DIV (2.6MHz) */
+ 0xCC, /* VGH2_R_DIV, VGL2_R_DIV (2.6MHz) */
+ 0x77, /* VGH3_L_DIV, VGL3_L_DIV (4.5MHz) */
+ 0x77 /* VGH3_R_DIV, VGL3_R_DIV (4.5MHz) */);
/* Reference voltage. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETBGP,
- 0x07, /* VREF_SEL = 4.2V */
- 0x07 /* NVREF_SEL = 4.2V */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETBGP,
+ 0x07, /* VREF_SEL = 4.2V */
+ 0x07 /* NVREF_SEL = 4.2V */);
msleep(20);
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETVCOM,
- 0x2C, /* VCOMDC_F = -0.67V */
- 0x2C /* VCOMDC_B = -0.67V */);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETVCOM,
+ 0x2C, /* VCOMDC_F = -0.67V */
+ 0x2C /* VCOMDC_B = -0.67V */);
/* Undocumented command. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_UNKNOWN_BF, 0x02, 0x11, 0x00);
/* This command is to set forward GIP timing. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP1,
- 0x82, 0x10, 0x06, 0x05, 0xA2, 0x0A, 0xA5, 0x12,
- 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
- 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00,
- 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88,
- 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64,
- 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
- 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP1,
+ 0x82, 0x10, 0x06, 0x05, 0xA2, 0x0A, 0xA5, 0x12,
+ 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
+ 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64,
+ 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
/* This command is to set backward GIP timing. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP2,
- 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88,
- 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13,
- 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
- 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x0A,
- 0xA5, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGIP2,
+ 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13,
+ 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x0A,
+ 0xA5, 0x00, 0x00, 0x00, 0x00);
/* Adjust the gamma characteristics of the panel. */
- dsi_dcs_write_seq(dsi, ST7703_CMD_SETGAMMA,
- 0x00, 0x09, 0x0D, 0x23, 0x27, 0x3C, 0x41, 0x35,
- 0x07, 0x0D, 0x0E, 0x12, 0x13, 0x10, 0x12, 0x12,
- 0x18, 0x00, 0x09, 0x0D, 0x23, 0x27, 0x3C, 0x41,
- 0x35, 0x07, 0x0D, 0x0E, 0x12, 0x13, 0x10, 0x12,
- 0x12, 0x18);
+ mipi_dsi_dcs_write_seq(dsi, ST7703_CMD_SETGAMMA,
+ 0x00, 0x09, 0x0D, 0x23, 0x27, 0x3C, 0x41, 0x35,
+ 0x07, 0x0D, 0x0E, 0x12, 0x13, 0x10, 0x12, 0x12,
+ 0x18, 0x00, 0x09, 0x0D, 0x23, 0x27, 0x3C, 0x41,
+ 0x35, 0x07, 0x0D, 0x0E, 0x12, 0x13, 0x10, 0x12,
+ 0x12, 0x18);
return 0;
}
@@ -499,7 +482,7 @@ static int allpixelson_set(void *data, u64 val)
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
dev_dbg(ctx->dev, "Setting all pixels on\n");
- dsi_generic_write_seq(dsi, ST7703_CMD_ALL_PIXEL_ON);
+ mipi_dsi_generic_write_seq(dsi, ST7703_CMD_ALL_PIXEL_ON);
msleep(val * 1000);
/* Reset the panel to get video back */
drm_panel_disable(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index fa9be3c299c0..ee5d20ecc577 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -33,14 +33,6 @@ struct truly_nt35521 *to_truly_nt35521(struct drm_panel *panel)
return container_of(panel, struct truly_nt35521, panel);
}
-#define dsi_generic_write_seq(dsi, seq...) do { \
- static const u8 d[] = { seq }; \
- int ret; \
- ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static void truly_nt35521_reset(struct truly_nt35521 *ctx)
{
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
@@ -59,200 +51,200 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
- dsi_generic_write_seq(dsi, 0xff, 0xaa, 0x55, 0xa5, 0x80);
- dsi_generic_write_seq(dsi, 0x6f, 0x11, 0x00);
- dsi_generic_write_seq(dsi, 0xf7, 0x20, 0x00);
- dsi_generic_write_seq(dsi, 0x6f, 0x01);
- dsi_generic_write_seq(dsi, 0xb1, 0x21);
- dsi_generic_write_seq(dsi, 0xbd, 0x01, 0xa0, 0x10, 0x08, 0x01);
- dsi_generic_write_seq(dsi, 0xb8, 0x01, 0x02, 0x0c, 0x02);
- dsi_generic_write_seq(dsi, 0xbb, 0x11, 0x11);
- dsi_generic_write_seq(dsi, 0xbc, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xb6, 0x02);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x01);
- dsi_generic_write_seq(dsi, 0xb0, 0x09, 0x09);
- dsi_generic_write_seq(dsi, 0xb1, 0x09, 0x09);
- dsi_generic_write_seq(dsi, 0xbc, 0x8c, 0x00);
- dsi_generic_write_seq(dsi, 0xbd, 0x8c, 0x00);
- dsi_generic_write_seq(dsi, 0xca, 0x00);
- dsi_generic_write_seq(dsi, 0xc0, 0x04);
- dsi_generic_write_seq(dsi, 0xbe, 0xb5);
- dsi_generic_write_seq(dsi, 0xb3, 0x35, 0x35);
- dsi_generic_write_seq(dsi, 0xb4, 0x25, 0x25);
- dsi_generic_write_seq(dsi, 0xb9, 0x43, 0x43);
- dsi_generic_write_seq(dsi, 0xba, 0x24, 0x24);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x02);
- dsi_generic_write_seq(dsi, 0xee, 0x03);
- dsi_generic_write_seq(dsi, 0xb0,
- 0x00, 0xb2, 0x00, 0xb3, 0x00, 0xb6, 0x00, 0xc3,
- 0x00, 0xce, 0x00, 0xe1, 0x00, 0xf3, 0x01, 0x11);
- dsi_generic_write_seq(dsi, 0xb1,
- 0x01, 0x2e, 0x01, 0x5c, 0x01, 0x82, 0x01, 0xc3,
- 0x01, 0xfe, 0x02, 0x00, 0x02, 0x37, 0x02, 0x77);
- dsi_generic_write_seq(dsi, 0xb2,
- 0x02, 0xa1, 0x02, 0xd7, 0x02, 0xfe, 0x03, 0x2c,
- 0x03, 0x4b, 0x03, 0x63, 0x03, 0x8f, 0x03, 0x90);
- dsi_generic_write_seq(dsi, 0xb3, 0x03, 0x96, 0x03, 0x98);
- dsi_generic_write_seq(dsi, 0xb4,
- 0x00, 0x81, 0x00, 0x8b, 0x00, 0x9c, 0x00, 0xa9,
- 0x00, 0xb5, 0x00, 0xcb, 0x00, 0xdf, 0x01, 0x02);
- dsi_generic_write_seq(dsi, 0xb5,
- 0x01, 0x1f, 0x01, 0x51, 0x01, 0x7a, 0x01, 0xbf,
- 0x01, 0xfa, 0x01, 0xfc, 0x02, 0x34, 0x02, 0x76);
- dsi_generic_write_seq(dsi, 0xb6,
- 0x02, 0x9f, 0x02, 0xd7, 0x02, 0xfc, 0x03, 0x2c,
- 0x03, 0x4a, 0x03, 0x63, 0x03, 0x8f, 0x03, 0xa2);
- dsi_generic_write_seq(dsi, 0xb7, 0x03, 0xb8, 0x03, 0xba);
- dsi_generic_write_seq(dsi, 0xb8,
- 0x00, 0x01, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x2a,
- 0x00, 0x41, 0x00, 0x67, 0x00, 0x87, 0x00, 0xb9);
- dsi_generic_write_seq(dsi, 0xb9,
- 0x00, 0xe2, 0x01, 0x22, 0x01, 0x54, 0x01, 0xa3,
- 0x01, 0xe6, 0x01, 0xe7, 0x02, 0x24, 0x02, 0x67);
- dsi_generic_write_seq(dsi, 0xba,
- 0x02, 0x93, 0x02, 0xcd, 0x02, 0xf6, 0x03, 0x31,
- 0x03, 0x6c, 0x03, 0xe9, 0x03, 0xef, 0x03, 0xf4);
- dsi_generic_write_seq(dsi, 0xbb, 0x03, 0xf6, 0x03, 0xf7);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x03);
- dsi_generic_write_seq(dsi, 0xb0, 0x22, 0x00);
- dsi_generic_write_seq(dsi, 0xb1, 0x22, 0x00);
- dsi_generic_write_seq(dsi, 0xb2, 0x05, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xb3, 0x05, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xb4, 0x05, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xb5, 0x05, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xba, 0x53, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xbb, 0x53, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xbc, 0x53, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xbd, 0x53, 0x00, 0x60, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xc0, 0x00, 0x34, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xc1, 0x00, 0x00, 0x34, 0x00);
- dsi_generic_write_seq(dsi, 0xc2, 0x00, 0x00, 0x34, 0x00);
- dsi_generic_write_seq(dsi, 0xc3, 0x00, 0x00, 0x34, 0x00);
- dsi_generic_write_seq(dsi, 0xc4, 0x60);
- dsi_generic_write_seq(dsi, 0xc5, 0xc0);
- dsi_generic_write_seq(dsi, 0xc6, 0x00);
- dsi_generic_write_seq(dsi, 0xc7, 0x00);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x05);
- dsi_generic_write_seq(dsi, 0xb0, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb1, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb2, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb3, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb4, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb5, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb6, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb7, 0x17, 0x06);
- dsi_generic_write_seq(dsi, 0xb8, 0x00);
- dsi_generic_write_seq(dsi, 0xb9, 0x00, 0x03);
- dsi_generic_write_seq(dsi, 0xba, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xbb, 0x02, 0x03);
- dsi_generic_write_seq(dsi, 0xbc, 0x02, 0x03);
- dsi_generic_write_seq(dsi, 0xbd, 0x03, 0x03, 0x00, 0x03, 0x03);
- dsi_generic_write_seq(dsi, 0xc0, 0x0b);
- dsi_generic_write_seq(dsi, 0xc1, 0x09);
- dsi_generic_write_seq(dsi, 0xc2, 0xa6);
- dsi_generic_write_seq(dsi, 0xc3, 0x05);
- dsi_generic_write_seq(dsi, 0xc4, 0x00);
- dsi_generic_write_seq(dsi, 0xc5, 0x02);
- dsi_generic_write_seq(dsi, 0xc6, 0x22);
- dsi_generic_write_seq(dsi, 0xc7, 0x03);
- dsi_generic_write_seq(dsi, 0xc8, 0x07, 0x20);
- dsi_generic_write_seq(dsi, 0xc9, 0x03, 0x20);
- dsi_generic_write_seq(dsi, 0xca, 0x01, 0x60);
- dsi_generic_write_seq(dsi, 0xcb, 0x01, 0x60);
- dsi_generic_write_seq(dsi, 0xcc, 0x00, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xcd, 0x00, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xce, 0x00, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xcf, 0x00, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xd1, 0x00, 0x05, 0x01, 0x07, 0x10);
- dsi_generic_write_seq(dsi, 0xd2, 0x10, 0x05, 0x05, 0x03, 0x10);
- dsi_generic_write_seq(dsi, 0xd3, 0x20, 0x00, 0x43, 0x07, 0x10);
- dsi_generic_write_seq(dsi, 0xd4, 0x30, 0x00, 0x43, 0x07, 0x10);
- dsi_generic_write_seq(dsi, 0xd0,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xd5,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xd6,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xd7,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xe5, 0x06);
- dsi_generic_write_seq(dsi, 0xe6, 0x06);
- dsi_generic_write_seq(dsi, 0xe7, 0x00);
- dsi_generic_write_seq(dsi, 0xe8, 0x06);
- dsi_generic_write_seq(dsi, 0xe9, 0x06);
- dsi_generic_write_seq(dsi, 0xea, 0x06);
- dsi_generic_write_seq(dsi, 0xeb, 0x00);
- dsi_generic_write_seq(dsi, 0xec, 0x00);
- dsi_generic_write_seq(dsi, 0xed, 0x30);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x06);
- dsi_generic_write_seq(dsi, 0xb0, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xb1, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xb2, 0x2d, 0x2e);
- dsi_generic_write_seq(dsi, 0xb3, 0x31, 0x34);
- dsi_generic_write_seq(dsi, 0xb4, 0x29, 0x2a);
- dsi_generic_write_seq(dsi, 0xb5, 0x12, 0x10);
- dsi_generic_write_seq(dsi, 0xb6, 0x18, 0x16);
- dsi_generic_write_seq(dsi, 0xb7, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xb8, 0x08, 0x31);
- dsi_generic_write_seq(dsi, 0xb9, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xba, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xbb, 0x31, 0x08);
- dsi_generic_write_seq(dsi, 0xbc, 0x03, 0x01);
- dsi_generic_write_seq(dsi, 0xbd, 0x17, 0x19);
- dsi_generic_write_seq(dsi, 0xbe, 0x11, 0x13);
- dsi_generic_write_seq(dsi, 0xbf, 0x2a, 0x29);
- dsi_generic_write_seq(dsi, 0xc0, 0x34, 0x31);
- dsi_generic_write_seq(dsi, 0xc1, 0x2e, 0x2d);
- dsi_generic_write_seq(dsi, 0xc2, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xc3, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xc4, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xc5, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xc6, 0x2e, 0x2d);
- dsi_generic_write_seq(dsi, 0xc7, 0x31, 0x34);
- dsi_generic_write_seq(dsi, 0xc8, 0x29, 0x2a);
- dsi_generic_write_seq(dsi, 0xc9, 0x17, 0x19);
- dsi_generic_write_seq(dsi, 0xca, 0x11, 0x13);
- dsi_generic_write_seq(dsi, 0xcb, 0x03, 0x01);
- dsi_generic_write_seq(dsi, 0xcc, 0x08, 0x31);
- dsi_generic_write_seq(dsi, 0xcd, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xce, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xcf, 0x31, 0x08);
- dsi_generic_write_seq(dsi, 0xd0, 0x00, 0x02);
- dsi_generic_write_seq(dsi, 0xd1, 0x12, 0x10);
- dsi_generic_write_seq(dsi, 0xd2, 0x18, 0x16);
- dsi_generic_write_seq(dsi, 0xd3, 0x2a, 0x29);
- dsi_generic_write_seq(dsi, 0xd4, 0x34, 0x31);
- dsi_generic_write_seq(dsi, 0xd5, 0x2d, 0x2e);
- dsi_generic_write_seq(dsi, 0xd6, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xd7, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xe5, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xe6, 0x31, 0x31);
- dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xe7, 0x00);
- dsi_generic_write_seq(dsi, 0x6f, 0x02);
- dsi_generic_write_seq(dsi, 0xf7, 0x47);
- dsi_generic_write_seq(dsi, 0x6f, 0x0a);
- dsi_generic_write_seq(dsi, 0xf7, 0x02);
- dsi_generic_write_seq(dsi, 0x6f, 0x17);
- dsi_generic_write_seq(dsi, 0xf4, 0x60);
- dsi_generic_write_seq(dsi, 0x6f, 0x01);
- dsi_generic_write_seq(dsi, 0xf9, 0x46);
- dsi_generic_write_seq(dsi, 0x6f, 0x11);
- dsi_generic_write_seq(dsi, 0xf3, 0x01);
- dsi_generic_write_seq(dsi, 0x35, 0x00);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
- dsi_generic_write_seq(dsi, 0xd9, 0x02, 0x03, 0x00);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
- dsi_generic_write_seq(dsi, 0xb1, 0x6c, 0x21);
- dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
- dsi_generic_write_seq(dsi, 0x35, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xff, 0xaa, 0x55, 0xa5, 0x80);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x11, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf7, 0x20, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x21);
+ mipi_dsi_generic_write_seq(dsi, 0xbd, 0x01, 0xa0, 0x10, 0x08, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xb8, 0x01, 0x02, 0x0c, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xbb, 0x11, 0x11);
+ mipi_dsi_generic_write_seq(dsi, 0xbc, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb6, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x09, 0x09);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x09, 0x09);
+ mipi_dsi_generic_write_seq(dsi, 0xbc, 0x8c, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xbd, 0x8c, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xca, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc0, 0x04);
+ mipi_dsi_generic_write_seq(dsi, 0xbe, 0xb5);
+ mipi_dsi_generic_write_seq(dsi, 0xb3, 0x35, 0x35);
+ mipi_dsi_generic_write_seq(dsi, 0xb4, 0x25, 0x25);
+ mipi_dsi_generic_write_seq(dsi, 0xb9, 0x43, 0x43);
+ mipi_dsi_generic_write_seq(dsi, 0xba, 0x24, 0x24);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xee, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xb0,
+ 0x00, 0xb2, 0x00, 0xb3, 0x00, 0xb6, 0x00, 0xc3,
+ 0x00, 0xce, 0x00, 0xe1, 0x00, 0xf3, 0x01, 0x11);
+ mipi_dsi_generic_write_seq(dsi, 0xb1,
+ 0x01, 0x2e, 0x01, 0x5c, 0x01, 0x82, 0x01, 0xc3,
+ 0x01, 0xfe, 0x02, 0x00, 0x02, 0x37, 0x02, 0x77);
+ mipi_dsi_generic_write_seq(dsi, 0xb2,
+ 0x02, 0xa1, 0x02, 0xd7, 0x02, 0xfe, 0x03, 0x2c,
+ 0x03, 0x4b, 0x03, 0x63, 0x03, 0x8f, 0x03, 0x90);
+ mipi_dsi_generic_write_seq(dsi, 0xb3, 0x03, 0x96, 0x03, 0x98);
+ mipi_dsi_generic_write_seq(dsi, 0xb4,
+ 0x00, 0x81, 0x00, 0x8b, 0x00, 0x9c, 0x00, 0xa9,
+ 0x00, 0xb5, 0x00, 0xcb, 0x00, 0xdf, 0x01, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xb5,
+ 0x01, 0x1f, 0x01, 0x51, 0x01, 0x7a, 0x01, 0xbf,
+ 0x01, 0xfa, 0x01, 0xfc, 0x02, 0x34, 0x02, 0x76);
+ mipi_dsi_generic_write_seq(dsi, 0xb6,
+ 0x02, 0x9f, 0x02, 0xd7, 0x02, 0xfc, 0x03, 0x2c,
+ 0x03, 0x4a, 0x03, 0x63, 0x03, 0x8f, 0x03, 0xa2);
+ mipi_dsi_generic_write_seq(dsi, 0xb7, 0x03, 0xb8, 0x03, 0xba);
+ mipi_dsi_generic_write_seq(dsi, 0xb8,
+ 0x00, 0x01, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x2a,
+ 0x00, 0x41, 0x00, 0x67, 0x00, 0x87, 0x00, 0xb9);
+ mipi_dsi_generic_write_seq(dsi, 0xb9,
+ 0x00, 0xe2, 0x01, 0x22, 0x01, 0x54, 0x01, 0xa3,
+ 0x01, 0xe6, 0x01, 0xe7, 0x02, 0x24, 0x02, 0x67);
+ mipi_dsi_generic_write_seq(dsi, 0xba,
+ 0x02, 0x93, 0x02, 0xcd, 0x02, 0xf6, 0x03, 0x31,
+ 0x03, 0x6c, 0x03, 0xe9, 0x03, 0xef, 0x03, 0xf4);
+ mipi_dsi_generic_write_seq(dsi, 0xbb, 0x03, 0xf6, 0x03, 0xf7);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x22, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x22, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb2, 0x05, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb3, 0x05, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb4, 0x05, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb5, 0x05, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xba, 0x53, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xbb, 0x53, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xbc, 0x53, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xbd, 0x53, 0x00, 0x60, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc0, 0x00, 0x34, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc1, 0x00, 0x00, 0x34, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc2, 0x00, 0x00, 0x34, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc3, 0x00, 0x00, 0x34, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc4, 0x60);
+ mipi_dsi_generic_write_seq(dsi, 0xc5, 0xc0);
+ mipi_dsi_generic_write_seq(dsi, 0xc6, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc7, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x05);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb2, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb3, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb4, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb5, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb6, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb7, 0x17, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb8, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb9, 0x00, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xba, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xbb, 0x02, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xbc, 0x02, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xbd, 0x03, 0x03, 0x00, 0x03, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xc0, 0x0b);
+ mipi_dsi_generic_write_seq(dsi, 0xc1, 0x09);
+ mipi_dsi_generic_write_seq(dsi, 0xc2, 0xa6);
+ mipi_dsi_generic_write_seq(dsi, 0xc3, 0x05);
+ mipi_dsi_generic_write_seq(dsi, 0xc4, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xc5, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xc6, 0x22);
+ mipi_dsi_generic_write_seq(dsi, 0xc7, 0x03);
+ mipi_dsi_generic_write_seq(dsi, 0xc8, 0x07, 0x20);
+ mipi_dsi_generic_write_seq(dsi, 0xc9, 0x03, 0x20);
+ mipi_dsi_generic_write_seq(dsi, 0xca, 0x01, 0x60);
+ mipi_dsi_generic_write_seq(dsi, 0xcb, 0x01, 0x60);
+ mipi_dsi_generic_write_seq(dsi, 0xcc, 0x00, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xcd, 0x00, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xce, 0x00, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xcf, 0x00, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xd1, 0x00, 0x05, 0x01, 0x07, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xd2, 0x10, 0x05, 0x05, 0x03, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xd3, 0x20, 0x00, 0x43, 0x07, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xd4, 0x30, 0x00, 0x43, 0x07, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xe5, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xe6, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xe7, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xe8, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xe9, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xea, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xeb, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xec, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xed, 0x30);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x06);
+ mipi_dsi_generic_write_seq(dsi, 0xb0, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xb2, 0x2d, 0x2e);
+ mipi_dsi_generic_write_seq(dsi, 0xb3, 0x31, 0x34);
+ mipi_dsi_generic_write_seq(dsi, 0xb4, 0x29, 0x2a);
+ mipi_dsi_generic_write_seq(dsi, 0xb5, 0x12, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xb6, 0x18, 0x16);
+ mipi_dsi_generic_write_seq(dsi, 0xb7, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xb8, 0x08, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xb9, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xba, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xbb, 0x31, 0x08);
+ mipi_dsi_generic_write_seq(dsi, 0xbc, 0x03, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xbd, 0x17, 0x19);
+ mipi_dsi_generic_write_seq(dsi, 0xbe, 0x11, 0x13);
+ mipi_dsi_generic_write_seq(dsi, 0xbf, 0x2a, 0x29);
+ mipi_dsi_generic_write_seq(dsi, 0xc0, 0x34, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xc1, 0x2e, 0x2d);
+ mipi_dsi_generic_write_seq(dsi, 0xc2, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xc3, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xc4, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xc5, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xc6, 0x2e, 0x2d);
+ mipi_dsi_generic_write_seq(dsi, 0xc7, 0x31, 0x34);
+ mipi_dsi_generic_write_seq(dsi, 0xc8, 0x29, 0x2a);
+ mipi_dsi_generic_write_seq(dsi, 0xc9, 0x17, 0x19);
+ mipi_dsi_generic_write_seq(dsi, 0xca, 0x11, 0x13);
+ mipi_dsi_generic_write_seq(dsi, 0xcb, 0x03, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xcc, 0x08, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xcd, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xce, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xcf, 0x31, 0x08);
+ mipi_dsi_generic_write_seq(dsi, 0xd0, 0x00, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xd1, 0x12, 0x10);
+ mipi_dsi_generic_write_seq(dsi, 0xd2, 0x18, 0x16);
+ mipi_dsi_generic_write_seq(dsi, 0xd3, 0x2a, 0x29);
+ mipi_dsi_generic_write_seq(dsi, 0xd4, 0x34, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xd5, 0x2d, 0x2e);
+ mipi_dsi_generic_write_seq(dsi, 0xd6, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xd7, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xe5, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xe6, 0x31, 0x31);
+ mipi_dsi_generic_write_seq(dsi, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xe7, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0xf7, 0x47);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x0a);
+ mipi_dsi_generic_write_seq(dsi, 0xf7, 0x02);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x17);
+ mipi_dsi_generic_write_seq(dsi, 0xf4, 0x60);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0xf9, 0x46);
+ mipi_dsi_generic_write_seq(dsi, 0x6f, 0x11);
+ mipi_dsi_generic_write_seq(dsi, 0xf3, 0x01);
+ mipi_dsi_generic_write_seq(dsi, 0x35, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xd9, 0x02, 0x03, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x08, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0xb1, 0x6c, 0x21);
+ mipi_dsi_generic_write_seq(dsi, 0xf0, 0x55, 0xaa, 0x52, 0x00, 0x00);
+ mipi_dsi_generic_write_seq(dsi, 0x35, 0x00);
ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
if (ret < 0) {
@@ -268,7 +260,7 @@ static int truly_nt35521_on(struct truly_nt35521 *ctx)
}
usleep_range(1000, 2000);
- dsi_generic_write_seq(dsi, 0x53, 0x24);
+ mipi_dsi_generic_write_seq(dsi, 0x53, 0x24);
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
new file mode 100644
index 000000000000..bb0dfd86ea67
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2023, Linaro Limited
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <drm/display/drm_dsc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct visionox_vtdr6130 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[3];
+ bool prepared;
+};
+
+static inline struct visionox_vtdr6130 *to_visionox_vtdr6130(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_vtdr6130, panel);
+}
+
+static void visionox_vtdr6130_reset(struct visionox_vtdr6130 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static int visionox_vtdr6130_on(struct visionox_vtdr6130 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret)
+ return ret;
+
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x59, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0x6c, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x6f, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0x70,
+ 0x12, 0x00, 0x00, 0xab, 0x30, 0x80, 0x09, 0x60, 0x04,
+ 0x38, 0x00, 0x28, 0x02, 0x1c, 0x02, 0x1c, 0x02, 0x00,
+ 0x02, 0x0e, 0x00, 0x20, 0x03, 0xdd, 0x00, 0x07, 0x00,
+ 0x0c, 0x02, 0x77, 0x02, 0x8b, 0x18, 0x00, 0x10, 0xf0,
+ 0x07, 0x10, 0x20, 0x00, 0x06, 0x0f, 0x0f, 0x33, 0x0e,
+ 0x1c, 0x2a, 0x38, 0x46, 0x54, 0x62, 0x69, 0x70, 0x77,
+ 0x79, 0x7b, 0x7d, 0x7e, 0x02, 0x02, 0x22, 0x00, 0x2a,
+ 0x40, 0x2a, 0xbe, 0x3a, 0xfc, 0x3a, 0xfa, 0x3a, 0xf8,
+ 0x3b, 0x38, 0x3b, 0x78, 0x3b, 0xb6, 0x4b, 0xb6, 0x4b,
+ 0xf4, 0x4b, 0xf4, 0x6c, 0x34, 0x84, 0x74, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, 0xb1,
+ 0x01, 0x38, 0x00, 0x14, 0x00, 0x1c, 0x00, 0x01, 0x66,
+ 0x00, 0x14, 0x00, 0x14, 0x00, 0x01, 0x66, 0x00, 0x14,
+ 0x05, 0xcc, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x13);
+ mipi_dsi_dcs_write_seq(dsi, 0xce,
+ 0x09, 0x11, 0x09, 0x11, 0x08, 0xc1, 0x07, 0xfa, 0x05,
+ 0xa4, 0x00, 0x3c, 0x00, 0x34, 0x00, 0x24, 0x00, 0x0c,
+ 0x00, 0x0c, 0x04, 0x00, 0x35);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x03, 0x33);
+ mipi_dsi_dcs_write_seq(dsi, 0xb4,
+ 0x00, 0x33, 0x00, 0x00, 0x00, 0x3e, 0x00, 0x00, 0x00,
+ 0x3e, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xb5,
+ 0x00, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09, 0x06, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0x00, 0x08, 0x09, 0x09, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, 0xbc,
+ 0x10, 0x00, 0x00, 0x06, 0x11, 0x09, 0x3b, 0x09, 0x47,
+ 0x09, 0x47, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xbe,
+ 0x10, 0x10, 0x00, 0x08, 0x22, 0x09, 0x19, 0x09, 0x25,
+ 0x09, 0x25, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, 0x65, 0x14);
+ mipi_dsi_dcs_write_seq(dsi, 0xfa, 0x08, 0x08, 0x08);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x81);
+ mipi_dsi_dcs_write_seq(dsi, 0x65, 0x05);
+ mipi_dsi_dcs_write_seq(dsi, 0xf3, 0x0f);
+ mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xaa, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x82);
+ mipi_dsi_dcs_write_seq(dsi, 0xf9, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x51, 0x83);
+ mipi_dsi_dcs_write_seq(dsi, 0x65, 0x04);
+ mipi_dsi_dcs_write_seq(dsi, 0xf8, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, 0x65, 0x01);
+ mipi_dsi_dcs_write_seq(dsi, 0xf4, 0x9a);
+ mipi_dsi_dcs_write_seq(dsi, 0xff, 0x5a, 0x00);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ return 0;
+}
+
+static int visionox_vtdr6130_off(struct visionox_vtdr6130 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static int visionox_vtdr6130_prepare(struct drm_panel *panel)
+{
+ struct visionox_vtdr6130 *ctx = to_visionox_vtdr6130(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ visionox_vtdr6130_reset(ctx);
+
+ ret = visionox_vtdr6130_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int visionox_vtdr6130_unprepare(struct drm_panel *panel)
+{
+ struct visionox_vtdr6130 *ctx = to_visionox_vtdr6130(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = visionox_vtdr6130_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode visionox_vtdr6130_mode = {
+ .clock = (1080 + 20 + 2 + 20) * (2400 + 20 + 2 + 18) * 144 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 20,
+ .hsync_end = 1080 + 20 + 2,
+ .htotal = 1080 + 20 + 2 + 20,
+ .vdisplay = 2400,
+ .vsync_start = 2400 + 20,
+ .vsync_end = 2400 + 20 + 2,
+ .vtotal = 2400 + 20 + 2 + 18,
+ .width_mm = 71,
+ .height_mm = 157,
+};
+
+static int visionox_vtdr6130_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &visionox_vtdr6130_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs visionox_vtdr6130_panel_funcs = {
+ .prepare = visionox_vtdr6130_prepare,
+ .unprepare = visionox_vtdr6130_unprepare,
+ .get_modes = visionox_vtdr6130_get_modes,
+};
+
+static int visionox_vtdr6130_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+
+ return mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+}
+
+static const struct backlight_ops visionox_vtdr6130_bl_ops = {
+ .update_status = visionox_vtdr6130_bl_update_status,
+};
+
+static struct backlight_device *
+visionox_vtdr6130_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 4095,
+ .max_brightness = 4095,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &visionox_vtdr6130_bl_ops, &props);
+}
+
+static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_vtdr6130 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vddio";
+ ctx->supplies[1].supply = "vci";
+ ctx->supplies[2].supply = "vdd";
+
+ ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_NO_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ drm_panel_init(&ctx->panel, dev, &visionox_vtdr6130_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ctx->panel.backlight = visionox_vtdr6130_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void visionox_vtdr6130_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_vtdr6130 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id visionox_vtdr6130_of_match[] = {
+ { .compatible = "visionox,vtdr6130" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_vtdr6130_of_match);
+
+static struct mipi_dsi_driver visionox_vtdr6130_driver = {
+ .probe = visionox_vtdr6130_probe,
+ .remove = visionox_vtdr6130_remove,
+ .driver = {
+ .name = "panel-visionox-vtdr6130",
+ .of_match_table = visionox_vtdr6130_of_match,
+ },
+};
+module_mipi_dsi_driver(visionox_vtdr6130_driver);
+
+MODULE_AUTHOR("Neil Armstrong <neil.armstrong@linaro.org>");
+MODULE_DESCRIPTION("Panel driver for the Visionox VTDR6130 AMOLED DSI panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 2c54733ee241..8670386498a4 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -60,14 +60,6 @@ static inline struct xpp055c272 *panel_to_xpp055c272(struct drm_panel *panel)
return container_of(panel, struct xpp055c272, panel);
}
-#define dsi_generic_write_seq(dsi, cmd, seq...) do { \
- static const u8 b[] = { cmd, seq }; \
- int ret; \
- ret = mipi_dsi_dcs_write_buffer(dsi, b, ARRAY_SIZE(b)); \
- if (ret < 0) \
- return ret; \
- } while (0)
-
static int xpp055c272_init_sequence(struct xpp055c272 *ctx)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
@@ -77,60 +69,60 @@ static int xpp055c272_init_sequence(struct xpp055c272 *ctx)
* Init sequence was supplied by the panel vendor without much
* documentation.
*/
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETMIPI,
- 0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
- 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
- 0x00, 0x00, 0x37);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPOWER_EXT, 0x25);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETRGBIF,
- 0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
- 0x00, 0x00);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETSCR,
- 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
- 0x00);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETVDC, 0x46);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPANEL, 0x0b);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETCYC, 0x80);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETEQ,
- 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
- 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETPOWER,
- 0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
- 0x67, 0x77, 0x33, 0x33);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
- 0xff, 0x01, 0xff);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETBGP, 0x09, 0x09);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETEXTC, 0xf1, 0x12, 0x83);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETMIPI,
+ 0x33, 0x81, 0x05, 0xf9, 0x0e, 0x0e, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x25,
+ 0x00, 0x91, 0x0a, 0x00, 0x00, 0x02, 0x4f, 0x01,
+ 0x00, 0x00, 0x37);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPOWER_EXT, 0x25);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPCR, 0x02, 0x11, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETRGBIF,
+ 0x0c, 0x10, 0x0a, 0x50, 0x03, 0xff, 0x00, 0x00,
+ 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETSCR,
+ 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+ 0x00);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETVDC, 0x46);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPANEL, 0x0b);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETCYC, 0x80);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETDISP, 0xc8, 0x12, 0x30);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETEQ,
+ 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETPOWER,
+ 0x53, 0x00, 0x1e, 0x1e, 0x77, 0xe1, 0xcc, 0xdd,
+ 0x67, 0x77, 0x33, 0x33);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETECO, 0x00, 0x00, 0xff,
+ 0xff, 0x01, 0xff);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETBGP, 0x09, 0x09);
msleep(20);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGIP1,
- 0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
- 0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
- 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
- 0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
- 0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
- 0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
- 0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGIP2,
- 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
- 0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
- 0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
- 0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
- 0xa0, 0x00, 0x00, 0x00, 0x00);
- dsi_generic_write_seq(dsi, XPP055C272_CMD_SETGAMMA,
- 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
- 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
- 0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
- 0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
- 0x11, 0x18);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETVCOM, 0x87, 0x95);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGIP1,
+ 0xc2, 0x10, 0x05, 0x05, 0x10, 0x05, 0xa0, 0x12,
+ 0x31, 0x23, 0x3f, 0x81, 0x0a, 0xa0, 0x37, 0x18,
+ 0x00, 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x01, 0x00, 0x00, 0x00, 0x48, 0xf8, 0x86, 0x42,
+ 0x08, 0x88, 0x88, 0x80, 0x88, 0x88, 0x88, 0x58,
+ 0xf8, 0x87, 0x53, 0x18, 0x88, 0x88, 0x81, 0x88,
+ 0x88, 0x88, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGIP2,
+ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x1f, 0x88, 0x81, 0x35,
+ 0x78, 0x88, 0x88, 0x85, 0x88, 0x88, 0x88, 0x0f,
+ 0x88, 0x80, 0x24, 0x68, 0x88, 0x88, 0x84, 0x88,
+ 0x88, 0x88, 0x23, 0x10, 0x00, 0x00, 0x1c, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x05,
+ 0xa0, 0x00, 0x00, 0x00, 0x00);
+ mipi_dsi_dcs_write_seq(dsi, XPP055C272_CMD_SETGAMMA,
+ 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38, 0x36,
+ 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13, 0x11,
+ 0x18, 0x00, 0x06, 0x08, 0x2a, 0x31, 0x3f, 0x38,
+ 0x36, 0x07, 0x0c, 0x0d, 0x11, 0x13, 0x12, 0x13,
+ 0x11, 0x18);
msleep(60);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index ee612303f076..fa1a086a862b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -6,6 +6,7 @@
#include <linux/reset.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include "panfrost_device.h"
@@ -400,8 +401,7 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
panfrost_job_enable_interrupts(pfdev);
}
-#ifdef CONFIG_PM
-int panfrost_device_resume(struct device *dev)
+static int panfrost_device_resume(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
@@ -411,7 +411,7 @@ int panfrost_device_resume(struct device *dev)
return 0;
}
-int panfrost_device_suspend(struct device *dev)
+static int panfrost_device_suspend(struct device *dev)
{
struct panfrost_device *pfdev = dev_get_drvdata(dev);
@@ -423,4 +423,6 @@ int panfrost_device_suspend(struct device *dev)
return 0;
}
-#endif
+
+EXPORT_GPL_RUNTIME_DEV_PM_OPS(panfrost_pm_ops, panfrost_device_suspend,
+ panfrost_device_resume, NULL);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 8b25278f34c8..d9ba68cffb77 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -7,6 +7,7 @@
#include <linux/atomic.h>
#include <linux/io-pgtable.h>
+#include <linux/pm.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <drm/drm_device.h>
@@ -172,8 +173,7 @@ int panfrost_device_init(struct panfrost_device *pfdev);
void panfrost_device_fini(struct panfrost_device *pfdev);
void panfrost_device_reset(struct panfrost_device *pfdev);
-int panfrost_device_resume(struct device *dev);
-int panfrost_device_suspend(struct device *dev);
+extern const struct dev_pm_ops panfrost_pm_ops;
enum drm_panfrost_exception_type {
DRM_PANFROST_EXCEPTION_OK = 0x00,
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 919e6cc04982..abb0dadd8f63 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -685,17 +685,12 @@ static const struct of_device_id dt_match[] = {
};
MODULE_DEVICE_TABLE(of, dt_match);
-static const struct dev_pm_ops panfrost_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL)
-};
-
static struct platform_driver panfrost_driver = {
.probe = panfrost_probe,
.remove = panfrost_remove,
.driver = {
.name = "panfrost",
- .pm = &panfrost_pm_ops,
+ .pm = pm_ptr(&panfrost_pm_ops),
.of_match_table = dt_match,
},
};
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 63aa96a69752..281edab518cd 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -579,7 +579,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_upd
static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall)
{
- int ret;
+ long ret;
ret = qxl_bo_reserve(surf);
if (ret)
@@ -588,7 +588,19 @@ static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stal
if (stall)
mutex_unlock(&qdev->surf_evict_mutex);
- ret = ttm_bo_wait(&surf->tbo, true, !stall);
+ if (stall) {
+ ret = dma_resv_wait_timeout(surf->tbo.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP, true,
+ 15 * HZ);
+ if (ret > 0)
+ ret = 0;
+ else if (ret == 0)
+ ret = -EBUSY;
+ } else {
+ ret = dma_resv_test_signaled(surf->tbo.base.resv,
+ DMA_RESV_USAGE_BOOKKEEP);
+ ret = ret ? -EBUSY : 0;
+ }
if (stall)
mutex_lock(&qdev->surf_evict_mutex);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 76f060810f63..ea993d7162e8 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -42,8 +42,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_gem.h>
#include <drm/qxl_drm.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index ee95001e6b5e..a92a5b0d4c25 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -29,10 +29,10 @@
#include <drm/drm_file.h>
#include <drm/drm_debugfs.h>
#include <drm/qxl_drm.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/r128/Makefile b/drivers/gpu/drm/r128/Makefile
deleted file mode 100644
index c07a069533ef..000000000000
--- a/drivers/gpu/drm/r128/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-r128-y := r128_drv.o r128_cce.o r128_state.o r128_irq.o ati_pcigart.o
-
-r128-$(CONFIG_COMPAT) += r128_ioc32.o
-
-obj-$(CONFIG_DRM_R128) += r128.o
diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
deleted file mode 100644
index dde0501aea68..000000000000
--- a/drivers/gpu/drm/r128/ati_pcigart.c
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * \file ati_pcigart.c
- * ATI PCI GART support
- *
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/export.h>
-#include <linux/pci.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_print.h>
-
-#include "ati_pcigart.h"
-
-# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
-
-static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
- struct drm_ati_pcigart_info *gart_info)
-{
- drm_dma_handle_t *dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
-
- if (!dmah)
- return -ENOMEM;
-
- dmah->size = gart_info->table_size;
- dmah->vaddr = dma_alloc_coherent(dev->dev,
- dmah->size,
- &dmah->busaddr,
- GFP_KERNEL);
-
- if (!dmah->vaddr) {
- kfree(dmah);
- return -ENOMEM;
- }
-
- gart_info->table_handle = dmah;
- return 0;
-}
-
-static void drm_ati_free_pcigart_table(struct drm_device *dev,
- struct drm_ati_pcigart_info *gart_info)
-{
- drm_dma_handle_t *dmah = gart_info->table_handle;
-
- dma_free_coherent(dev->dev, dmah->size, dmah->vaddr, dmah->busaddr);
- kfree(dmah);
-
- gart_info->table_handle = NULL;
-}
-
-int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
-{
- struct drm_sg_mem *entry = dev->sg;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- unsigned long pages;
- int i;
- int max_pages;
-
- /* we need to support large memory configurations */
- if (!entry) {
- DRM_ERROR("no scatter/gather memory!\n");
- return 0;
- }
-
- if (gart_info->bus_addr) {
-
- max_pages = (gart_info->table_size / sizeof(u32));
- pages = (entry->pages <= max_pages)
- ? entry->pages : max_pages;
-
- for (i = 0; i < pages; i++) {
- if (!entry->busaddr[i])
- break;
- dma_unmap_page(&pdev->dev, entry->busaddr[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
- gart_info->bus_addr = 0;
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
- gart_info->table_handle) {
- drm_ati_free_pcigart_table(dev, gart_info);
- }
-
- return 1;
-}
-
-int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
-{
- struct drm_local_map *map = &gart_info->mapping;
- struct drm_sg_mem *entry = dev->sg;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- void *address = NULL;
- unsigned long pages;
- u32 *pci_gart = NULL, page_base, gart_idx;
- dma_addr_t bus_address = 0;
- int i, j, ret = -ENOMEM;
- int max_ati_pages, max_real_pages;
-
- if (!entry) {
- DRM_ERROR("no scatter/gather memory!\n");
- goto done;
- }
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
- DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
-
- if (dma_set_mask(&pdev->dev, gart_info->table_mask)) {
- DRM_ERROR("fail to set dma mask to 0x%Lx\n",
- (unsigned long long)gart_info->table_mask);
- ret = -EFAULT;
- goto done;
- }
-
- ret = drm_ati_alloc_pcigart_table(dev, gart_info);
- if (ret) {
- DRM_ERROR("cannot allocate PCI GART page!\n");
- goto done;
- }
-
- pci_gart = gart_info->table_handle->vaddr;
- address = gart_info->table_handle->vaddr;
- bus_address = gart_info->table_handle->busaddr;
- } else {
- address = gart_info->addr;
- bus_address = gart_info->bus_addr;
- DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
- (unsigned long long)bus_address,
- (unsigned long)address);
- }
-
-
- max_ati_pages = (gart_info->table_size / sizeof(u32));
- max_real_pages = max_ati_pages / (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE);
- pages = (entry->pages <= max_real_pages)
- ? entry->pages : max_real_pages;
-
- if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
- memset(pci_gart, 0, max_ati_pages * sizeof(u32));
- } else {
- memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32));
- }
-
- gart_idx = 0;
- for (i = 0; i < pages; i++) {
- /* we need to support large memory configurations */
- entry->busaddr[i] = dma_map_page(&pdev->dev, entry->pagelist[i],
- 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&pdev->dev, entry->busaddr[i])) {
- DRM_ERROR("unable to map PCIGART pages!\n");
- drm_ati_pcigart_cleanup(dev, gart_info);
- address = NULL;
- bus_address = 0;
- ret = -ENOMEM;
- goto done;
- }
- page_base = (u32) entry->busaddr[i];
-
- for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
- u32 offset;
- u32 val;
-
- switch(gart_info->gart_reg_if) {
- case DRM_ATI_GART_IGP:
- val = page_base | 0xc;
- break;
- case DRM_ATI_GART_PCIE:
- val = (page_base >> 8) | 0xc;
- break;
- default:
- case DRM_ATI_GART_PCI:
- val = page_base;
- break;
- }
- if (gart_info->gart_table_location ==
- DRM_ATI_GART_MAIN) {
- pci_gart[gart_idx] = cpu_to_le32(val);
- } else {
- offset = gart_idx * sizeof(u32);
- writel(val, (void __iomem *)map->handle + offset);
- }
- gart_idx++;
- page_base += ATI_PCIGART_PAGE_SIZE;
- }
- }
- ret = 0;
-
-#ifdef CONFIG_X86
- wbinvd();
-#else
- mb();
-#endif
-
- done:
- gart_info->addr = address;
- gart_info->bus_addr = bus_address;
- return ret;
-}
diff --git a/drivers/gpu/drm/r128/ati_pcigart.h b/drivers/gpu/drm/r128/ati_pcigart.h
deleted file mode 100644
index a728a1364e66..000000000000
--- a/drivers/gpu/drm/r128/ati_pcigart.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef DRM_ATI_PCIGART_H
-#define DRM_ATI_PCIGART_H
-
-#include <drm/drm_legacy.h>
-
-/* location of GART table */
-#define DRM_ATI_GART_MAIN 1
-#define DRM_ATI_GART_FB 2
-
-#define DRM_ATI_GART_PCI 1
-#define DRM_ATI_GART_PCIE 2
-#define DRM_ATI_GART_IGP 3
-
-struct drm_ati_pcigart_info {
- int gart_table_location;
- int gart_reg_if;
- void *addr;
- dma_addr_t bus_addr;
- dma_addr_t table_mask;
- struct drm_dma_handle *table_handle;
- struct drm_local_map mapping;
- int table_size;
-};
-
-extern int drm_ati_pcigart_init(struct drm_device *dev,
- struct drm_ati_pcigart_info * gart_info);
-extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
- struct drm_ati_pcigart_info * gart_info);
-
-#endif
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
deleted file mode 100644
index c04d84a69dd2..000000000000
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ /dev/null
@@ -1,944 +0,0 @@
-/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
- * Created: Wed Apr 5 19:24:19 2000 by kevin@precisioninsight.com
- */
-/*
- * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/firmware.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_print.h>
-#include <drm/r128_drm.h>
-
-#include "r128_drv.h"
-
-#define R128_FIFO_DEBUG 0
-
-#define FIRMWARE_NAME "r128/r128_cce.bin"
-
-MODULE_FIRMWARE(FIRMWARE_NAME);
-
-static int R128_READ_PLL(struct drm_device *dev, int addr)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
-
- R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
- return R128_READ(R128_CLOCK_CNTL_DATA);
-}
-
-#if R128_FIFO_DEBUG
-static void r128_status(drm_r128_private_t *dev_priv)
-{
- printk("GUI_STAT = 0x%08x\n",
- (unsigned int)R128_READ(R128_GUI_STAT));
- printk("PM4_STAT = 0x%08x\n",
- (unsigned int)R128_READ(R128_PM4_STAT));
- printk("PM4_BUFFER_DL_WPTR = 0x%08x\n",
- (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR));
- printk("PM4_BUFFER_DL_RPTR = 0x%08x\n",
- (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR));
- printk("PM4_MICRO_CNTL = 0x%08x\n",
- (unsigned int)R128_READ(R128_PM4_MICRO_CNTL));
- printk("PM4_BUFFER_CNTL = 0x%08x\n",
- (unsigned int)R128_READ(R128_PM4_BUFFER_CNTL));
-}
-#endif
-
-/* ================================================================
- * Engine, FIFO control
- */
-
-static int r128_do_pixcache_flush(drm_r128_private_t *dev_priv)
-{
- u32 tmp;
- int i;
-
- tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
- R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
-
- for (i = 0; i < dev_priv->usec_timeout; i++) {
- if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY))
- return 0;
- udelay(1);
- }
-
-#if R128_FIFO_DEBUG
- DRM_ERROR("failed!\n");
-#endif
- return -EBUSY;
-}
-
-static int r128_do_wait_for_fifo(drm_r128_private_t *dev_priv, int entries)
-{
- int i;
-
- for (i = 0; i < dev_priv->usec_timeout; i++) {
- int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
- if (slots >= entries)
- return 0;
- udelay(1);
- }
-
-#if R128_FIFO_DEBUG
- DRM_ERROR("failed!\n");
-#endif
- return -EBUSY;
-}
-
-static int r128_do_wait_for_idle(drm_r128_private_t *dev_priv)
-{
- int i, ret;
-
- ret = r128_do_wait_for_fifo(dev_priv, 64);
- if (ret)
- return ret;
-
- for (i = 0; i < dev_priv->usec_timeout; i++) {
- if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
- r128_do_pixcache_flush(dev_priv);
- return 0;
- }
- udelay(1);
- }
-
-#if R128_FIFO_DEBUG
- DRM_ERROR("failed!\n");
-#endif
- return -EBUSY;
-}
-
-/* ================================================================
- * CCE control, initialization
- */
-
-/* Load the microcode for the CCE */
-static int r128_cce_load_microcode(drm_r128_private_t *dev_priv)
-{
- struct platform_device *pdev;
- const struct firmware *fw;
- const __be32 *fw_data;
- int rc, i;
-
- DRM_DEBUG("\n");
-
- pdev = platform_device_register_simple("r128_cce", 0, NULL, 0);
- if (IS_ERR(pdev)) {
- pr_err("r128_cce: Failed to register firmware\n");
- return PTR_ERR(pdev);
- }
- rc = request_firmware(&fw, FIRMWARE_NAME, &pdev->dev);
- platform_device_unregister(pdev);
- if (rc) {
- pr_err("r128_cce: Failed to load firmware \"%s\"\n",
- FIRMWARE_NAME);
- return rc;
- }
-
- if (fw->size != 256 * 8) {
- pr_err("r128_cce: Bogus length %zu in firmware \"%s\"\n",
- fw->size, FIRMWARE_NAME);
- rc = -EINVAL;
- goto out_release;
- }
-
- r128_do_wait_for_idle(dev_priv);
-
- fw_data = (const __be32 *)fw->data;
- R128_WRITE(R128_PM4_MICROCODE_ADDR, 0);
- for (i = 0; i < 256; i++) {
- R128_WRITE(R128_PM4_MICROCODE_DATAH,
- be32_to_cpup(&fw_data[i * 2]));
- R128_WRITE(R128_PM4_MICROCODE_DATAL,
- be32_to_cpup(&fw_data[i * 2 + 1]));
- }
-
-out_release:
- release_firmware(fw);
- return rc;
-}
-
-/* Flush any pending commands to the CCE. This should only be used just
- * prior to a wait for idle, as it informs the engine that the command
- * stream is ending.
- */
-static void r128_do_cce_flush(drm_r128_private_t *dev_priv)
-{
- u32 tmp;
-
- tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE;
- R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp);
-}
-
-/* Wait for the CCE to go idle.
- */
-int r128_do_cce_idle(drm_r128_private_t *dev_priv)
-{
- int i;
-
- for (i = 0; i < dev_priv->usec_timeout; i++) {
- if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) {
- int pm4stat = R128_READ(R128_PM4_STAT);
- if (((pm4stat & R128_PM4_FIFOCNT_MASK) >=
- dev_priv->cce_fifo_size) &&
- !(pm4stat & (R128_PM4_BUSY |
- R128_PM4_GUI_ACTIVE))) {
- return r128_do_pixcache_flush(dev_priv);
- }
- }
- udelay(1);
- }
-
-#if R128_FIFO_DEBUG
- DRM_ERROR("failed!\n");
- r128_status(dev_priv);
-#endif
- return -EBUSY;
-}
-
-/* Start the Concurrent Command Engine.
- */
-static void r128_do_cce_start(drm_r128_private_t *dev_priv)
-{
- r128_do_wait_for_idle(dev_priv);
-
- R128_WRITE(R128_PM4_BUFFER_CNTL,
- dev_priv->cce_mode | dev_priv->ring.size_l2qw
- | R128_PM4_BUFFER_CNTL_NOUPDATE);
- R128_READ(R128_PM4_BUFFER_ADDR); /* as per the sample code */
- R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
-
- dev_priv->cce_running = 1;
-}
-
-/* Reset the Concurrent Command Engine. This will not flush any pending
- * commands, so you must wait for the CCE command stream to complete
- * before calling this routine.
- */
-static void r128_do_cce_reset(drm_r128_private_t *dev_priv)
-{
- R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
- R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
- dev_priv->ring.tail = 0;
-}
-
-/* Stop the Concurrent Command Engine. This will not flush any pending
- * commands, so you must flush the command stream and wait for the CCE
- * to go idle before calling this routine.
- */
-static void r128_do_cce_stop(drm_r128_private_t *dev_priv)
-{
- R128_WRITE(R128_PM4_MICRO_CNTL, 0);
- R128_WRITE(R128_PM4_BUFFER_CNTL,
- R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE);
-
- dev_priv->cce_running = 0;
-}
-
-/* Reset the engine. This will stop the CCE if it is running.
- */
-static int r128_do_engine_reset(struct drm_device *dev)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
-
- r128_do_pixcache_flush(dev_priv);
-
- clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
- mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
-
- R128_WRITE_PLL(R128_MCLK_CNTL,
- mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP);
-
- gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
-
- /* Taken from the sample code - do not change */
- R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
- R128_READ(R128_GEN_RESET_CNTL);
- R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
- R128_READ(R128_GEN_RESET_CNTL);
-
- R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
- R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
- R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl);
-
- /* Reset the CCE ring */
- r128_do_cce_reset(dev_priv);
-
- /* The CCE is no longer running after an engine reset */
- dev_priv->cce_running = 0;
-
- /* Reset any pending vertex, indirect buffers */
- r128_freelist_reset(dev);
-
- return 0;
-}
-
-static void r128_cce_init_ring_buffer(struct drm_device *dev,
- drm_r128_private_t *dev_priv)
-{
- u32 ring_start;
- u32 tmp;
-
- DRM_DEBUG("\n");
-
- /* The manual (p. 2) says this address is in "VM space". This
- * means it's an offset from the start of AGP space.
- */
-#if IS_ENABLED(CONFIG_AGP)
- if (!dev_priv->is_pci)
- ring_start = dev_priv->cce_ring->offset - dev->agp->base;
- else
-#endif
- ring_start = dev_priv->cce_ring->offset -
- (unsigned long)dev->sg->virtual;
-
- R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET);
-
- R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
- R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
-
- /* Set watermark control */
- R128_WRITE(R128_PM4_BUFFER_WM_CNTL,
- ((R128_WATERMARK_L / 4) << R128_WMA_SHIFT)
- | ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT)
- | ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT)
- | ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT));
-
- /* Force read. Why? Because it's in the examples... */
- R128_READ(R128_PM4_BUFFER_ADDR);
-
- /* Turn on bus mastering */
- tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS;
- R128_WRITE(R128_BUS_CNTL, tmp);
-}
-
-static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
-{
- drm_r128_private_t *dev_priv;
- int rc;
-
- DRM_DEBUG("\n");
-
- if (dev->dev_private) {
- DRM_DEBUG("called when already initialized\n");
- return -EINVAL;
- }
-
- dev_priv = kzalloc(sizeof(drm_r128_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- dev_priv->is_pci = init->is_pci;
-
- if (dev_priv->is_pci && !dev->sg) {
- DRM_ERROR("PCI GART memory not allocated!\n");
- dev->dev_private = (void *)dev_priv;
- r128_do_cleanup_cce(dev);
- return -EINVAL;
- }
-
- dev_priv->usec_timeout = init->usec_timeout;
- if (dev_priv->usec_timeout < 1 ||
- dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
- DRM_DEBUG("TIMEOUT problem!\n");
- dev->dev_private = (void *)dev_priv;
- r128_do_cleanup_cce(dev);
- return -EINVAL;
- }
-
- dev_priv->cce_mode = init->cce_mode;
-
- /* GH: Simple idle check.
- */
- atomic_set(&dev_priv->idle_count, 0);
-
- /* We don't support anything other than bus-mastering ring mode,
- * but the ring can be in either AGP or PCI space for the ring
- * read pointer.
- */
- if ((init->cce_mode != R128_PM4_192BM) &&
- (init->cce_mode != R128_PM4_128BM_64INDBM) &&
- (init->cce_mode != R128_PM4_64BM_128INDBM) &&
- (init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) {
- DRM_DEBUG("Bad cce_mode!\n");
- dev->dev_private = (void *)dev_priv;
- r128_do_cleanup_cce(dev);
- return -EINVAL;
- }
-
- switch (init->cce_mode) {
- case R128_PM4_NONPM4:
- dev_priv->cce_fifo_size = 0;
- break;
- case R128_PM4_192PIO:
- case R128_PM4_192BM:
- dev_priv->cce_fifo_size = 192;
- break;
- case R128_PM4_128PIO_64INDBM:
- case R128_PM4_128BM_64INDBM:
- dev_priv->cce_fifo_size = 128;
- break;
- case R128_PM4_64PIO_128INDBM:
- case R128_PM4_64BM_128INDBM:
- case R128_PM4_64PIO_64VCBM_64INDBM:
- case R128_PM4_64BM_64VCBM_64INDBM:
- case R128_PM4_64PIO_64VCPIO_64INDPIO:
- dev_priv->cce_fifo_size = 64;
- break;
- }
-
- switch (init->fb_bpp) {
- case 16:
- dev_priv->color_fmt = R128_DATATYPE_RGB565;
- break;
- case 32:
- default:
- dev_priv->color_fmt = R128_DATATYPE_ARGB8888;
- break;
- }
- dev_priv->front_offset = init->front_offset;
- dev_priv->front_pitch = init->front_pitch;
- dev_priv->back_offset = init->back_offset;
- dev_priv->back_pitch = init->back_pitch;
-
- switch (init->depth_bpp) {
- case 16:
- dev_priv->depth_fmt = R128_DATATYPE_RGB565;
- break;
- case 24:
- case 32:
- default:
- dev_priv->depth_fmt = R128_DATATYPE_ARGB8888;
- break;
- }
- dev_priv->depth_offset = init->depth_offset;
- dev_priv->depth_pitch = init->depth_pitch;
- dev_priv->span_offset = init->span_offset;
-
- dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) |
- (dev_priv->front_offset >> 5));
- dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) |
- (dev_priv->back_offset >> 5));
- dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
- (dev_priv->depth_offset >> 5) |
- R128_DST_TILE);
- dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
- (dev_priv->span_offset >> 5));
-
- dev_priv->sarea = drm_legacy_getsarea(dev);
- if (!dev_priv->sarea) {
- DRM_ERROR("could not find sarea!\n");
- dev->dev_private = (void *)dev_priv;
- r128_do_cleanup_cce(dev);
- return -EINVAL;
- }
-
- dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio) {
- DRM_ERROR("could not find mmio region!\n");
- dev->dev_private = (void *)dev_priv;
- r128_do_cleanup_cce(dev);
- return -EINVAL;
- }
- dev_priv->cce_ring = drm_legacy_findmap(dev, init->ring_offset);
- if (!dev_priv->cce_ring) {
- DRM_ERROR("could not find cce ring region!\n");
- dev->dev_private = (void *)dev_priv;
- r128_do_cleanup_cce(dev);
- return -EINVAL;
- }
- dev_priv->ring_rptr = drm_legacy_findmap(dev, init->ring_rptr_offset);
- if (!dev_priv->ring_rptr) {
- DRM_ERROR("could not find ring read pointer!\n");
- dev->dev_private = (void *)dev_priv;
- r128_do_cleanup_cce(dev);
- return -EINVAL;
- }
- dev->agp_buffer_token = init->buffers_offset;
- dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset);
- if (!dev->agp_buffer_map) {
- DRM_ERROR("could not find dma buffer region!\n");
- dev->dev_private = (void *)dev_priv;
- r128_do_cleanup_cce(dev);
- return -EINVAL;
- }
-
- if (!dev_priv->is_pci) {
- dev_priv->agp_textures =
- drm_legacy_findmap(dev, init->agp_textures_offset);
- if (!dev_priv->agp_textures) {
- DRM_ERROR("could not find agp texture region!\n");
- dev->dev_private = (void *)dev_priv;
- r128_do_cleanup_cce(dev);
- return -EINVAL;
- }
- }
-
- dev_priv->sarea_priv =
- (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle +
- init->sarea_priv_offset);
-
-#if IS_ENABLED(CONFIG_AGP)
- if (!dev_priv->is_pci) {
- drm_legacy_ioremap_wc(dev_priv->cce_ring, dev);
- drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev);
- drm_legacy_ioremap_wc(dev->agp_buffer_map, dev);
- if (!dev_priv->cce_ring->handle ||
- !dev_priv->ring_rptr->handle ||
- !dev->agp_buffer_map->handle) {
- DRM_ERROR("Could not ioremap agp regions!\n");
- dev->dev_private = (void *)dev_priv;
- r128_do_cleanup_cce(dev);
- return -ENOMEM;
- }
- } else
-#endif
- {
- dev_priv->cce_ring->handle =
- (void *)(unsigned long)dev_priv->cce_ring->offset;
- dev_priv->ring_rptr->handle =
- (void *)(unsigned long)dev_priv->ring_rptr->offset;
- dev->agp_buffer_map->handle =
- (void *)(unsigned long)dev->agp_buffer_map->offset;
- }
-
-#if IS_ENABLED(CONFIG_AGP)
- if (!dev_priv->is_pci)
- dev_priv->cce_buffers_offset = dev->agp->base;
- else
-#endif
- dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
-
- dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle;
- dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
- + init->ring_size / sizeof(u32));
- dev_priv->ring.size = init->ring_size;
- dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
-
- dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
-
- dev_priv->ring.high_mark = 128;
-
- dev_priv->sarea_priv->last_frame = 0;
- R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
-
- dev_priv->sarea_priv->last_dispatch = 0;
- R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
-
-#if IS_ENABLED(CONFIG_AGP)
- if (dev_priv->is_pci) {
-#endif
- dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
- dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
- dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
- dev_priv->gart_info.addr = NULL;
- dev_priv->gart_info.bus_addr = 0;
- dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
- rc = drm_ati_pcigart_init(dev, &dev_priv->gart_info);
- if (rc) {
- DRM_ERROR("failed to init PCI GART!\n");
- dev->dev_private = (void *)dev_priv;
- r128_do_cleanup_cce(dev);
- return rc;
- }
- R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
-#if IS_ENABLED(CONFIG_AGP)
- }
-#endif
-
- r128_cce_init_ring_buffer(dev, dev_priv);
- rc = r128_cce_load_microcode(dev_priv);
-
- dev->dev_private = (void *)dev_priv;
-
- r128_do_engine_reset(dev);
-
- if (rc) {
- DRM_ERROR("Failed to load firmware!\n");
- r128_do_cleanup_cce(dev);
- }
-
- return rc;
-}
-
-int r128_do_cleanup_cce(struct drm_device *dev)
-{
-
- /* Make sure interrupts are disabled here because the uninstall ioctl
- * may not have been called from userspace and after dev_private
- * is freed, it's too late.
- */
- if (dev->irq_enabled)
- drm_legacy_irq_uninstall(dev);
-
- if (dev->dev_private) {
- drm_r128_private_t *dev_priv = dev->dev_private;
-
-#if IS_ENABLED(CONFIG_AGP)
- if (!dev_priv->is_pci) {
- if (dev_priv->cce_ring != NULL)
- drm_legacy_ioremapfree(dev_priv->cce_ring, dev);
- if (dev_priv->ring_rptr != NULL)
- drm_legacy_ioremapfree(dev_priv->ring_rptr, dev);
- if (dev->agp_buffer_map != NULL) {
- drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
- dev->agp_buffer_map = NULL;
- }
- } else
-#endif
- {
- if (dev_priv->gart_info.bus_addr)
- if (!drm_ati_pcigart_cleanup(dev,
- &dev_priv->gart_info))
- DRM_ERROR
- ("failed to cleanup PCI GART!\n");
- }
-
- kfree(dev->dev_private);
- dev->dev_private = NULL;
- }
-
- return 0;
-}
-
-int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_init_t *init = data;
-
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- switch (init->func) {
- case R128_INIT_CCE:
- return r128_do_init_cce(dev, init);
- case R128_CLEANUP_CCE:
- return r128_do_cleanup_cce(dev);
- }
-
- return -EINVAL;
-}
-
-int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
- DRM_DEBUG("while CCE running\n");
- return 0;
- }
-
- r128_do_cce_start(dev_priv);
-
- return 0;
-}
-
-/* Stop the CCE. The engine must have been idled before calling this
- * routine.
- */
-int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_cce_stop_t *stop = data;
- int ret;
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- /* Flush any pending CCE commands. This ensures any outstanding
- * commands are exectuted by the engine before we turn it off.
- */
- if (stop->flush)
- r128_do_cce_flush(dev_priv);
-
- /* If we fail to make the engine go idle, we return an error
- * code so that the DRM ioctl wrapper can try again.
- */
- if (stop->idle) {
- ret = r128_do_cce_idle(dev_priv);
- if (ret)
- return ret;
- }
-
- /* Finally, we can turn off the CCE. If the engine isn't idle,
- * we will get some dropped triangles as they won't be fully
- * rendered before the CCE is shut down.
- */
- r128_do_cce_stop(dev_priv);
-
- /* Reset the engine */
- r128_do_engine_reset(dev);
-
- return 0;
-}
-
-/* Just reset the CCE ring. Called as part of an X Server engine reset.
- */
-int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- r128_do_cce_reset(dev_priv);
-
- /* The CCE is no longer running after an engine reset */
- dev_priv->cce_running = 0;
-
- return 0;
-}
-
-int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- if (dev_priv->cce_running)
- r128_do_cce_flush(dev_priv);
-
- return r128_do_cce_idle(dev_priv);
-}
-
-int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev->dev_private);
-
- return r128_do_engine_reset(dev);
-}
-
-int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- return -EINVAL;
-}
-
-/* ================================================================
- * Freelist management
- */
-#define R128_BUFFER_USED 0xffffffff
-#define R128_BUFFER_FREE 0
-
-#if 0
-static int r128_freelist_init(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_r128_private_t *dev_priv = dev->dev_private;
- struct drm_buf *buf;
- drm_r128_buf_priv_t *buf_priv;
- drm_r128_freelist_t *entry;
- int i;
-
- dev_priv->head = kzalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL);
- if (dev_priv->head == NULL)
- return -ENOMEM;
-
- dev_priv->head->age = R128_BUFFER_USED;
-
- for (i = 0; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
- buf_priv = buf->dev_private;
-
- entry = kmalloc(sizeof(drm_r128_freelist_t), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- entry->age = R128_BUFFER_FREE;
- entry->buf = buf;
- entry->prev = dev_priv->head;
- entry->next = dev_priv->head->next;
- if (!entry->next)
- dev_priv->tail = entry;
-
- buf_priv->discard = 0;
- buf_priv->dispatched = 0;
- buf_priv->list_entry = entry;
-
- dev_priv->head->next = entry;
-
- if (dev_priv->head->next)
- dev_priv->head->next->prev = entry;
- }
-
- return 0;
-
-}
-#endif
-
-static struct drm_buf *r128_freelist_get(struct drm_device * dev)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_buf_priv_t *buf_priv;
- struct drm_buf *buf;
- int i, t;
-
- /* FIXME: Optimize -- use freelist code */
-
- for (i = 0; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
- buf_priv = buf->dev_private;
- if (!buf->file_priv)
- return buf;
- }
-
- for (t = 0; t < dev_priv->usec_timeout; t++) {
- u32 done_age = R128_READ(R128_LAST_DISPATCH_REG);
-
- for (i = 0; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
- buf_priv = buf->dev_private;
- if (buf->pending && buf_priv->age <= done_age) {
- /* The buffer has been processed, so it
- * can now be used.
- */
- buf->pending = 0;
- return buf;
- }
- }
- udelay(1);
- }
-
- DRM_DEBUG("returning NULL!\n");
- return NULL;
-}
-
-void r128_freelist_reset(struct drm_device *dev)
-{
- struct drm_device_dma *dma = dev->dma;
- int i;
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_r128_buf_priv_t *buf_priv = buf->dev_private;
- buf_priv->age = 0;
- }
-}
-
-/* ================================================================
- * CCE command submission
- */
-
-int r128_wait_ring(drm_r128_private_t *dev_priv, int n)
-{
- drm_r128_ring_buffer_t *ring = &dev_priv->ring;
- int i;
-
- for (i = 0; i < dev_priv->usec_timeout; i++) {
- r128_update_ring_snapshot(dev_priv);
- if (ring->space >= n)
- return 0;
- udelay(1);
- }
-
- /* FIXME: This is being ignored... */
- DRM_ERROR("failed!\n");
- return -EBUSY;
-}
-
-static int r128_cce_get_buffers(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_dma *d)
-{
- int i;
- struct drm_buf *buf;
-
- for (i = d->granted_count; i < d->request_count; i++) {
- buf = r128_freelist_get(dev);
- if (!buf)
- return -EAGAIN;
-
- buf->file_priv = file_priv;
-
- if (copy_to_user(&d->request_indices[i], &buf->idx,
- sizeof(buf->idx)))
- return -EFAULT;
- if (copy_to_user(&d->request_sizes[i], &buf->total,
- sizeof(buf->total)))
- return -EFAULT;
-
- d->granted_count++;
- }
- return 0;
-}
-
-int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- int ret = 0;
- struct drm_dma *d = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* Please don't send us buffers.
- */
- if (d->send_count != 0) {
- DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- task_pid_nr(current), d->send_count);
- return -EINVAL;
- }
-
- /* We'll send you buffers.
- */
- if (d->request_count < 0 || d->request_count > dma->buf_count) {
- DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- task_pid_nr(current), d->request_count, dma->buf_count);
- return -EINVAL;
- }
-
- d->granted_count = 0;
-
- if (d->request_count)
- ret = r128_cce_get_buffers(dev, file_priv, d);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
deleted file mode 100644
index e35a3a1449bd..000000000000
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ /dev/null
@@ -1,116 +0,0 @@
-/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
- * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_pciids.h>
-#include <drm/drm_vblank.h>
-#include <drm/r128_drm.h>
-
-#include "r128_drv.h"
-
-static struct pci_device_id pciidlist[] = {
- r128_PCI_IDS
-};
-
-static const struct file_operations r128_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_legacy_mmap,
- .poll = drm_poll,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = r128_compat_ioctl,
-#endif
- .llseek = noop_llseek,
-};
-
-static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_LEGACY |
- DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ,
- .dev_priv_size = sizeof(drm_r128_buf_priv_t),
- .load = r128_driver_load,
- .preclose = r128_driver_preclose,
- .lastclose = r128_driver_lastclose,
- .get_vblank_counter = r128_get_vblank_counter,
- .enable_vblank = r128_enable_vblank,
- .disable_vblank = r128_disable_vblank,
- .irq_preinstall = r128_driver_irq_preinstall,
- .irq_postinstall = r128_driver_irq_postinstall,
- .irq_uninstall = r128_driver_irq_uninstall,
- .irq_handler = r128_driver_irq_handler,
- .ioctls = r128_ioctls,
- .dma_ioctl = r128_cce_buffers,
- .fops = &r128_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-int r128_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- pci_set_master(pdev);
- return drm_vblank_init(dev, 1);
-}
-
-static struct pci_driver r128_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
-};
-
-static int __init r128_init(void)
-{
- driver.num_ioctls = r128_max_ioctl;
-
- return drm_legacy_pci_init(&driver, &r128_pci_driver);
-}
-
-static void __exit r128_exit(void)
-{
- drm_legacy_pci_exit(&driver, &r128_pci_driver);
-}
-
-module_init(r128_init);
-module_exit(r128_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
deleted file mode 100644
index 970e192b0d51..000000000000
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ /dev/null
@@ -1,544 +0,0 @@
-/* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
- * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
- */
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Kevin E. Martin <martin@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- * Michel Dänzer <daenzerm@student.ethz.ch>
- */
-
-#ifndef __R128_DRV_H__
-#define __R128_DRV_H__
-
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/irqreturn.h>
-
-#include <drm/drm_ioctl.h>
-#include <drm/drm_legacy.h>
-#include <drm/r128_drm.h>
-
-#include "ati_pcigart.h"
-
-/* General customization:
- */
-#define DRIVER_AUTHOR "Gareth Hughes, VA Linux Systems Inc."
-
-#define DRIVER_NAME "r128"
-#define DRIVER_DESC "ATI Rage 128"
-#define DRIVER_DATE "20030725"
-
-/* Interface history:
- *
- * ?? - ??
- * 2.4 - Add support for ycbcr textures (no new ioctls)
- * 2.5 - Add FLIP ioctl, disable FULLSCREEN.
- */
-#define DRIVER_MAJOR 2
-#define DRIVER_MINOR 5
-#define DRIVER_PATCHLEVEL 0
-
-#define GET_RING_HEAD(dev_priv) R128_READ(R128_PM4_BUFFER_DL_RPTR)
-
-typedef struct drm_r128_freelist {
- unsigned int age;
- struct drm_buf *buf;
- struct drm_r128_freelist *next;
- struct drm_r128_freelist *prev;
-} drm_r128_freelist_t;
-
-typedef struct drm_r128_ring_buffer {
- u32 *start;
- u32 *end;
- int size;
- int size_l2qw;
-
- u32 tail;
- u32 tail_mask;
- int space;
-
- int high_mark;
-} drm_r128_ring_buffer_t;
-
-typedef struct drm_r128_private {
- drm_r128_ring_buffer_t ring;
- drm_r128_sarea_t *sarea_priv;
-
- int cce_mode;
- int cce_fifo_size;
- int cce_running;
-
- drm_r128_freelist_t *head;
- drm_r128_freelist_t *tail;
-
- int usec_timeout;
- int is_pci;
- unsigned long cce_buffers_offset;
-
- atomic_t idle_count;
-
- int page_flipping;
- int current_page;
- u32 crtc_offset;
- u32 crtc_offset_cntl;
-
- atomic_t vbl_received;
-
- u32 color_fmt;
- unsigned int front_offset;
- unsigned int front_pitch;
- unsigned int back_offset;
- unsigned int back_pitch;
-
- u32 depth_fmt;
- unsigned int depth_offset;
- unsigned int depth_pitch;
- unsigned int span_offset;
-
- u32 front_pitch_offset_c;
- u32 back_pitch_offset_c;
- u32 depth_pitch_offset_c;
- u32 span_pitch_offset_c;
-
- drm_local_map_t *sarea;
- drm_local_map_t *mmio;
- drm_local_map_t *cce_ring;
- drm_local_map_t *ring_rptr;
- drm_local_map_t *agp_textures;
- struct drm_ati_pcigart_info gart_info;
-} drm_r128_private_t;
-
-typedef struct drm_r128_buf_priv {
- u32 age;
- int prim;
- int discard;
- int dispatched;
- drm_r128_freelist_t *list_entry;
-} drm_r128_buf_priv_t;
-
-extern const struct drm_ioctl_desc r128_ioctls[];
-extern int r128_max_ioctl;
-
- /* r128_cce.c */
-extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
-
-extern int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv);
-
-extern void r128_freelist_reset(struct drm_device *dev);
-
-extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n);
-
-extern int r128_do_cce_idle(drm_r128_private_t *dev_priv);
-extern int r128_do_cleanup_cce(struct drm_device *dev);
-
-extern int r128_enable_vblank(struct drm_device *dev, unsigned int pipe);
-extern void r128_disable_vblank(struct drm_device *dev, unsigned int pipe);
-extern u32 r128_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
-extern irqreturn_t r128_driver_irq_handler(int irq, void *arg);
-extern void r128_driver_irq_preinstall(struct drm_device *dev);
-extern int r128_driver_irq_postinstall(struct drm_device *dev);
-extern void r128_driver_irq_uninstall(struct drm_device *dev);
-extern void r128_driver_lastclose(struct drm_device *dev);
-extern int r128_driver_load(struct drm_device *dev, unsigned long flags);
-extern void r128_driver_preclose(struct drm_device *dev,
- struct drm_file *file_priv);
-
-extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
-
-/* Register definitions, register access macros and drmAddMap constants
- * for Rage 128 kernel driver.
- */
-
-#define R128_AUX_SC_CNTL 0x1660
-# define R128_AUX1_SC_EN (1 << 0)
-# define R128_AUX1_SC_MODE_OR (0 << 1)
-# define R128_AUX1_SC_MODE_NAND (1 << 1)
-# define R128_AUX2_SC_EN (1 << 2)
-# define R128_AUX2_SC_MODE_OR (0 << 3)
-# define R128_AUX2_SC_MODE_NAND (1 << 3)
-# define R128_AUX3_SC_EN (1 << 4)
-# define R128_AUX3_SC_MODE_OR (0 << 5)
-# define R128_AUX3_SC_MODE_NAND (1 << 5)
-#define R128_AUX1_SC_LEFT 0x1664
-#define R128_AUX1_SC_RIGHT 0x1668
-#define R128_AUX1_SC_TOP 0x166c
-#define R128_AUX1_SC_BOTTOM 0x1670
-#define R128_AUX2_SC_LEFT 0x1674
-#define R128_AUX2_SC_RIGHT 0x1678
-#define R128_AUX2_SC_TOP 0x167c
-#define R128_AUX2_SC_BOTTOM 0x1680
-#define R128_AUX3_SC_LEFT 0x1684
-#define R128_AUX3_SC_RIGHT 0x1688
-#define R128_AUX3_SC_TOP 0x168c
-#define R128_AUX3_SC_BOTTOM 0x1690
-
-#define R128_BRUSH_DATA0 0x1480
-#define R128_BUS_CNTL 0x0030
-# define R128_BUS_MASTER_DIS (1 << 6)
-
-#define R128_CLOCK_CNTL_INDEX 0x0008
-#define R128_CLOCK_CNTL_DATA 0x000c
-# define R128_PLL_WR_EN (1 << 7)
-#define R128_CONSTANT_COLOR_C 0x1d34
-#define R128_CRTC_OFFSET 0x0224
-#define R128_CRTC_OFFSET_CNTL 0x0228
-# define R128_CRTC_OFFSET_FLIP_CNTL (1 << 16)
-
-#define R128_DP_GUI_MASTER_CNTL 0x146c
-# define R128_GMC_SRC_PITCH_OFFSET_CNTL (1 << 0)
-# define R128_GMC_DST_PITCH_OFFSET_CNTL (1 << 1)
-# define R128_GMC_BRUSH_SOLID_COLOR (13 << 4)
-# define R128_GMC_BRUSH_NONE (15 << 4)
-# define R128_GMC_DST_16BPP (4 << 8)
-# define R128_GMC_DST_24BPP (5 << 8)
-# define R128_GMC_DST_32BPP (6 << 8)
-# define R128_GMC_DST_DATATYPE_SHIFT 8
-# define R128_GMC_SRC_DATATYPE_COLOR (3 << 12)
-# define R128_DP_SRC_SOURCE_MEMORY (2 << 24)
-# define R128_DP_SRC_SOURCE_HOST_DATA (3 << 24)
-# define R128_GMC_CLR_CMP_CNTL_DIS (1 << 28)
-# define R128_GMC_AUX_CLIP_DIS (1 << 29)
-# define R128_GMC_WR_MSK_DIS (1 << 30)
-# define R128_ROP3_S 0x00cc0000
-# define R128_ROP3_P 0x00f00000
-#define R128_DP_WRITE_MASK 0x16cc
-#define R128_DST_PITCH_OFFSET_C 0x1c80
-# define R128_DST_TILE (1 << 31)
-
-#define R128_GEN_INT_CNTL 0x0040
-# define R128_CRTC_VBLANK_INT_EN (1 << 0)
-#define R128_GEN_INT_STATUS 0x0044
-# define R128_CRTC_VBLANK_INT (1 << 0)
-# define R128_CRTC_VBLANK_INT_AK (1 << 0)
-#define R128_GEN_RESET_CNTL 0x00f0
-# define R128_SOFT_RESET_GUI (1 << 0)
-
-#define R128_GUI_SCRATCH_REG0 0x15e0
-#define R128_GUI_SCRATCH_REG1 0x15e4
-#define R128_GUI_SCRATCH_REG2 0x15e8
-#define R128_GUI_SCRATCH_REG3 0x15ec
-#define R128_GUI_SCRATCH_REG4 0x15f0
-#define R128_GUI_SCRATCH_REG5 0x15f4
-
-#define R128_GUI_STAT 0x1740
-# define R128_GUI_FIFOCNT_MASK 0x0fff
-# define R128_GUI_ACTIVE (1 << 31)
-
-#define R128_MCLK_CNTL 0x000f
-# define R128_FORCE_GCP (1 << 16)
-# define R128_FORCE_PIPE3D_CP (1 << 17)
-# define R128_FORCE_RCP (1 << 18)
-
-#define R128_PC_GUI_CTLSTAT 0x1748
-#define R128_PC_NGUI_CTLSTAT 0x0184
-# define R128_PC_FLUSH_GUI (3 << 0)
-# define R128_PC_RI_GUI (1 << 2)
-# define R128_PC_FLUSH_ALL 0x00ff
-# define R128_PC_BUSY (1 << 31)
-
-#define R128_PCI_GART_PAGE 0x017c
-#define R128_PRIM_TEX_CNTL_C 0x1cb0
-
-#define R128_SCALE_3D_CNTL 0x1a00
-#define R128_SEC_TEX_CNTL_C 0x1d00
-#define R128_SEC_TEXTURE_BORDER_COLOR_C 0x1d3c
-#define R128_SETUP_CNTL 0x1bc4
-#define R128_STEN_REF_MASK_C 0x1d40
-
-#define R128_TEX_CNTL_C 0x1c9c
-# define R128_TEX_CACHE_FLUSH (1 << 23)
-
-#define R128_WAIT_UNTIL 0x1720
-# define R128_EVENT_CRTC_OFFSET (1 << 0)
-#define R128_WINDOW_XY_OFFSET 0x1bcc
-
-/* CCE registers
- */
-#define R128_PM4_BUFFER_OFFSET 0x0700
-#define R128_PM4_BUFFER_CNTL 0x0704
-# define R128_PM4_MASK (15 << 28)
-# define R128_PM4_NONPM4 (0 << 28)
-# define R128_PM4_192PIO (1 << 28)
-# define R128_PM4_192BM (2 << 28)
-# define R128_PM4_128PIO_64INDBM (3 << 28)
-# define R128_PM4_128BM_64INDBM (4 << 28)
-# define R128_PM4_64PIO_128INDBM (5 << 28)
-# define R128_PM4_64BM_128INDBM (6 << 28)
-# define R128_PM4_64PIO_64VCBM_64INDBM (7 << 28)
-# define R128_PM4_64BM_64VCBM_64INDBM (8U << 28)
-# define R128_PM4_64PIO_64VCPIO_64INDPIO (15U << 28)
-# define R128_PM4_BUFFER_CNTL_NOUPDATE (1 << 27)
-
-#define R128_PM4_BUFFER_WM_CNTL 0x0708
-# define R128_WMA_SHIFT 0
-# define R128_WMB_SHIFT 8
-# define R128_WMC_SHIFT 16
-# define R128_WB_WM_SHIFT 24
-
-#define R128_PM4_BUFFER_DL_RPTR_ADDR 0x070c
-#define R128_PM4_BUFFER_DL_RPTR 0x0710
-#define R128_PM4_BUFFER_DL_WPTR 0x0714
-# define R128_PM4_BUFFER_DL_DONE (1 << 31)
-
-#define R128_PM4_VC_FPU_SETUP 0x071c
-
-#define R128_PM4_IW_INDOFF 0x0738
-#define R128_PM4_IW_INDSIZE 0x073c
-
-#define R128_PM4_STAT 0x07b8
-# define R128_PM4_FIFOCNT_MASK 0x0fff
-# define R128_PM4_BUSY (1 << 16)
-# define R128_PM4_GUI_ACTIVE (1 << 31)
-
-#define R128_PM4_MICROCODE_ADDR 0x07d4
-#define R128_PM4_MICROCODE_RADDR 0x07d8
-#define R128_PM4_MICROCODE_DATAH 0x07dc
-#define R128_PM4_MICROCODE_DATAL 0x07e0
-
-#define R128_PM4_BUFFER_ADDR 0x07f0
-#define R128_PM4_MICRO_CNTL 0x07fc
-# define R128_PM4_MICRO_FREERUN (1 << 30)
-
-#define R128_PM4_FIFO_DATA_EVEN 0x1000
-#define R128_PM4_FIFO_DATA_ODD 0x1004
-
-/* CCE command packets
- */
-#define R128_CCE_PACKET0 0x00000000
-#define R128_CCE_PACKET1 0x40000000
-#define R128_CCE_PACKET2 0x80000000
-#define R128_CCE_PACKET3 0xC0000000
-# define R128_CNTL_HOSTDATA_BLT 0x00009400
-# define R128_CNTL_PAINT_MULTI 0x00009A00
-# define R128_CNTL_BITBLT_MULTI 0x00009B00
-# define R128_3D_RNDR_GEN_INDX_PRIM 0x00002300
-
-#define R128_CCE_PACKET_MASK 0xC0000000
-#define R128_CCE_PACKET_COUNT_MASK 0x3fff0000
-#define R128_CCE_PACKET0_REG_MASK 0x000007ff
-#define R128_CCE_PACKET1_REG0_MASK 0x000007ff
-#define R128_CCE_PACKET1_REG1_MASK 0x003ff800
-
-#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE 0x00000000
-#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT 0x00000001
-#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE 0x00000002
-#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE 0x00000003
-#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST 0x00000004
-#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN 0x00000005
-#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP 0x00000006
-#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2 0x00000007
-#define R128_CCE_VC_CNTL_PRIM_WALK_IND 0x00000010
-#define R128_CCE_VC_CNTL_PRIM_WALK_LIST 0x00000020
-#define R128_CCE_VC_CNTL_PRIM_WALK_RING 0x00000030
-#define R128_CCE_VC_CNTL_NUM_SHIFT 16
-
-#define R128_DATATYPE_VQ 0
-#define R128_DATATYPE_CI4 1
-#define R128_DATATYPE_CI8 2
-#define R128_DATATYPE_ARGB1555 3
-#define R128_DATATYPE_RGB565 4
-#define R128_DATATYPE_RGB888 5
-#define R128_DATATYPE_ARGB8888 6
-#define R128_DATATYPE_RGB332 7
-#define R128_DATATYPE_Y8 8
-#define R128_DATATYPE_RGB8 9
-#define R128_DATATYPE_CI16 10
-#define R128_DATATYPE_YVYU422 11
-#define R128_DATATYPE_VYUY422 12
-#define R128_DATATYPE_AYUV444 14
-#define R128_DATATYPE_ARGB4444 15
-
-/* Constants */
-#define R128_AGP_OFFSET 0x02000000
-
-#define R128_WATERMARK_L 16
-#define R128_WATERMARK_M 8
-#define R128_WATERMARK_N 8
-#define R128_WATERMARK_K 128
-
-#define R128_MAX_USEC_TIMEOUT 100000 /* 100 ms */
-
-#define R128_LAST_FRAME_REG R128_GUI_SCRATCH_REG0
-#define R128_LAST_DISPATCH_REG R128_GUI_SCRATCH_REG1
-#define R128_MAX_VB_AGE 0x7fffffff
-#define R128_MAX_VB_VERTS (0xffff)
-
-#define R128_RING_HIGH_MARK 128
-
-#define R128_PERFORMANCE_BOXES 0
-
-#define R128_PCIGART_TABLE_SIZE 32768
-
-#define R128_READ(reg) readl(((void __iomem *)dev_priv->mmio->handle) + (reg))
-#define R128_WRITE(reg, val) writel(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
-#define R128_READ8(reg) readb(((void __iomem *)dev_priv->mmio->handle) + (reg))
-#define R128_WRITE8(reg, val) writeb(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
-
-#define R128_WRITE_PLL(addr, val) \
-do { \
- R128_WRITE8(R128_CLOCK_CNTL_INDEX, \
- ((addr) & 0x1f) | R128_PLL_WR_EN); \
- R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \
-} while (0)
-
-#define CCE_PACKET0(reg, n) (R128_CCE_PACKET0 | \
- ((n) << 16) | ((reg) >> 2))
-#define CCE_PACKET1(reg0, reg1) (R128_CCE_PACKET1 | \
- (((reg1) >> 2) << 11) | ((reg0) >> 2))
-#define CCE_PACKET2() (R128_CCE_PACKET2)
-#define CCE_PACKET3(pkt, n) (R128_CCE_PACKET3 | \
- (pkt) | ((n) << 16))
-
-static __inline__ void r128_update_ring_snapshot(drm_r128_private_t *dev_priv)
-{
- drm_r128_ring_buffer_t *ring = &dev_priv->ring;
- ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
- if (ring->space <= 0)
- ring->space += ring->size;
-}
-
-/* ================================================================
- * Misc helper macros
- */
-
-#define DEV_INIT_TEST_WITH_RETURN(_dev_priv) \
-do { \
- if (!_dev_priv) { \
- DRM_ERROR("called with no initialization\n"); \
- return -EINVAL; \
- } \
-} while (0)
-
-#define RING_SPACE_TEST_WITH_RETURN(dev_priv) \
-do { \
- drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \
- if (ring->space < ring->high_mark) { \
- for (i = 0 ; i < dev_priv->usec_timeout ; i++) { \
- r128_update_ring_snapshot(dev_priv); \
- if (ring->space >= ring->high_mark) \
- goto __ring_space_done; \
- udelay(1); \
- } \
- DRM_ERROR("ring space check failed!\n"); \
- return -EBUSY; \
- } \
- __ring_space_done: \
- ; \
-} while (0)
-
-#define VB_AGE_TEST_WITH_RETURN(dev_priv) \
-do { \
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \
- if (sarea_priv->last_dispatch >= R128_MAX_VB_AGE) { \
- int __ret = r128_do_cce_idle(dev_priv); \
- if (__ret) \
- return __ret; \
- sarea_priv->last_dispatch = 0; \
- r128_freelist_reset(dev); \
- } \
-} while (0)
-
-#define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \
- OUT_RING(CCE_PACKET0(R128_WAIT_UNTIL, 0)); \
- OUT_RING(R128_EVENT_CRTC_OFFSET); \
-} while (0)
-
-/* ================================================================
- * Ring control
- */
-
-#define R128_VERBOSE 0
-
-#define RING_LOCALS \
- int write, _nr; unsigned int tail_mask; volatile u32 *ring;
-
-#define BEGIN_RING(n) do { \
- if (R128_VERBOSE) \
- DRM_INFO("BEGIN_RING(%d)\n", (n)); \
- if (dev_priv->ring.space <= (n) * sizeof(u32)) { \
- COMMIT_RING(); \
- r128_wait_ring(dev_priv, (n) * sizeof(u32)); \
- } \
- _nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
- ring = dev_priv->ring.start; \
- write = dev_priv->ring.tail; \
- tail_mask = dev_priv->ring.tail_mask; \
-} while (0)
-
-/* You can set this to zero if you want. If the card locks up, you'll
- * need to keep this set. It works around a bug in early revs of the
- * Rage 128 chipset, where the CCE would read 32 dwords past the end of
- * the ring buffer before wrapping around.
- */
-#define R128_BROKEN_CCE 1
-
-#define ADVANCE_RING() do { \
- if (R128_VERBOSE) \
- DRM_INFO("ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
- write, dev_priv->ring.tail); \
- if (R128_BROKEN_CCE && write < 32) \
- memcpy(dev_priv->ring.end, \
- dev_priv->ring.start, \
- write * sizeof(u32)); \
- if (((dev_priv->ring.tail + _nr) & tail_mask) != write) \
- DRM_ERROR( \
- "ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
- ((dev_priv->ring.tail + _nr) & tail_mask), \
- write, __LINE__); \
- else \
- dev_priv->ring.tail = write; \
-} while (0)
-
-#define COMMIT_RING() do { \
- if (R128_VERBOSE) \
- DRM_INFO("COMMIT_RING() tail=0x%06x\n", \
- dev_priv->ring.tail); \
- mb(); \
- R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail); \
- R128_READ(R128_PM4_BUFFER_DL_WPTR); \
-} while (0)
-
-#define OUT_RING(x) do { \
- if (R128_VERBOSE) \
- DRM_INFO(" OUT_RING( 0x%08x ) at 0x%x\n", \
- (unsigned int)(x), write); \
- ring[write++] = cpu_to_le32(x); \
- write &= tail_mask; \
-} while (0)
-
-#endif /* __R128_DRV_H__ */
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
deleted file mode 100644
index cdeb1db87222..000000000000
--- a/drivers/gpu/drm/r128/r128_ioc32.c
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * \file r128_ioc32.c
- *
- * 32-bit ioctl compatibility routines for the R128 DRM.
- *
- * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
- *
- * Copyright (C) Paul Mackerras 2005
- * Copyright (C) Egbert Eich 2003,2004
- * Copyright (C) Dave Airlie 2005
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- */
-
-#include <linux/compat.h>
-
-#include <drm/r128_drm.h>
-
-#include "r128_drv.h"
-
-typedef struct drm_r128_init32 {
- int func;
- unsigned int sarea_priv_offset;
- int is_pci;
- int cce_mode;
- int cce_secure;
- int ring_size;
- int usec_timeout;
-
- unsigned int fb_bpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_bpp;
- unsigned int depth_offset, depth_pitch;
- unsigned int span_offset;
-
- unsigned int fb_offset;
- unsigned int mmio_offset;
- unsigned int ring_offset;
- unsigned int ring_rptr_offset;
- unsigned int buffers_offset;
- unsigned int agp_textures_offset;
-} drm_r128_init32_t;
-
-static int compat_r128_init(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_r128_init32_t init32;
- drm_r128_init_t init;
-
- if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
- return -EFAULT;
-
- init.func = init32.func;
- init.sarea_priv_offset = init32.sarea_priv_offset;
- init.is_pci = init32.is_pci;
- init.cce_mode = init32.cce_mode;
- init.cce_secure = init32.cce_secure;
- init.ring_size = init32.ring_size;
- init.usec_timeout = init32.usec_timeout;
- init.fb_bpp = init32.fb_bpp;
- init.front_offset = init32.front_offset;
- init.front_pitch = init32.front_pitch;
- init.back_offset = init32.back_offset;
- init.back_pitch = init32.back_pitch;
- init.depth_bpp = init32.depth_bpp;
- init.depth_offset = init32.depth_offset;
- init.depth_pitch = init32.depth_pitch;
- init.span_offset = init32.span_offset;
- init.fb_offset = init32.fb_offset;
- init.mmio_offset = init32.mmio_offset;
- init.ring_offset = init32.ring_offset;
- init.ring_rptr_offset = init32.ring_rptr_offset;
- init.buffers_offset = init32.buffers_offset;
- init.agp_textures_offset = init32.agp_textures_offset;
-
- return drm_ioctl_kernel(file, r128_cce_init, &init,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
-}
-
-typedef struct drm_r128_depth32 {
- int func;
- int n;
- u32 x;
- u32 y;
- u32 buffer;
- u32 mask;
-} drm_r128_depth32_t;
-
-static int compat_r128_depth(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_r128_depth32_t depth32;
- drm_r128_depth_t depth;
-
- if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32)))
- return -EFAULT;
-
- depth.func = depth32.func;
- depth.n = depth32.n;
- depth.x = compat_ptr(depth32.x);
- depth.y = compat_ptr(depth32.y);
- depth.buffer = compat_ptr(depth32.buffer);
- depth.mask = compat_ptr(depth32.mask);
-
- return drm_ioctl_kernel(file, r128_cce_depth, &depth, DRM_AUTH);
-}
-
-typedef struct drm_r128_stipple32 {
- u32 mask;
-} drm_r128_stipple32_t;
-
-static int compat_r128_stipple(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_r128_stipple32_t stipple32;
- drm_r128_stipple_t stipple;
-
- if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32)))
- return -EFAULT;
-
- stipple.mask = compat_ptr(stipple32.mask);
-
- return drm_ioctl_kernel(file, r128_cce_stipple, &stipple, DRM_AUTH);
-}
-
-typedef struct drm_r128_getparam32 {
- int param;
- u32 value;
-} drm_r128_getparam32_t;
-
-static int compat_r128_getparam(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- drm_r128_getparam32_t getparam32;
- drm_r128_getparam_t getparam;
-
- if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
- return -EFAULT;
-
- getparam.param = getparam32.param;
- getparam.value = compat_ptr(getparam32.value);
-
- return drm_ioctl_kernel(file, r128_getparam, &getparam, DRM_AUTH);
-}
-
-drm_ioctl_compat_t *r128_compat_ioctls[] = {
- [DRM_R128_INIT] = compat_r128_init,
- [DRM_R128_DEPTH] = compat_r128_depth,
- [DRM_R128_STIPPLE] = compat_r128_stipple,
- [DRM_R128_GETPARAM] = compat_r128_getparam,
-};
-
-/**
- * r128_compat_ioctl - Called whenever a 32-bit process running under
- * a 64-bit kernel performs an ioctl on /dev/dri/card<n>.
- *
- * @filp: file pointer.
- * @cmd: command.
- * @arg: user argument.
- * return: zero on success or negative number on failure.
- */
-long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- unsigned int nr = DRM_IOCTL_NR(cmd);
- drm_ioctl_compat_t *fn = NULL;
- int ret;
-
- if (nr < DRM_COMMAND_BASE)
- return drm_compat_ioctl(filp, cmd, arg);
-
- if (nr < DRM_COMMAND_BASE + ARRAY_SIZE(r128_compat_ioctls))
- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
-
- if (fn != NULL)
- ret = (*fn) (filp, cmd, arg);
- else
- ret = drm_ioctl(filp, cmd, arg);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
deleted file mode 100644
index d84e9c96e20a..000000000000
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ /dev/null
@@ -1,118 +0,0 @@
-/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
-/*
- * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
- *
- * The Weather Channel (TM) funded Tungsten Graphics to develop the
- * initial release of the Radeon 8500 driver under the XFree86 license.
- * This notice must be preserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Keith Whitwell <keith@tungstengraphics.com>
- * Eric Anholt <anholt@FreeBSD.org>
- */
-
-#include <drm/drm_device.h>
-#include <drm/drm_print.h>
-#include <drm/drm_vblank.h>
-#include <drm/r128_drm.h>
-
-#include "r128_drv.h"
-
-u32 r128_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
- const drm_r128_private_t *dev_priv = dev->dev_private;
-
- if (pipe != 0)
- return 0;
-
- return atomic_read(&dev_priv->vbl_received);
-}
-
-irqreturn_t r128_driver_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
- int status;
-
- status = R128_READ(R128_GEN_INT_STATUS);
-
- /* VBLANK interrupt */
- if (status & R128_CRTC_VBLANK_INT) {
- R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
- atomic_inc(&dev_priv->vbl_received);
- drm_handle_vblank(dev, 0);
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
-}
-
-int r128_enable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
-
- if (pipe != 0) {
- DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
- return -EINVAL;
- }
-
- R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
- return 0;
-}
-
-void r128_disable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- if (pipe != 0)
- DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
-
- /*
- * FIXME: implement proper interrupt disable by using the vblank
- * counter register (if available)
- *
- * R128_WRITE(R128_GEN_INT_CNTL,
- * R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
- */
-}
-
-void r128_driver_irq_preinstall(struct drm_device *dev)
-{
- drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
-
- /* Disable *all* interrupts */
- R128_WRITE(R128_GEN_INT_CNTL, 0);
- /* Clear vblank bit if it's already high */
- R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
-}
-
-int r128_driver_irq_postinstall(struct drm_device *dev)
-{
- return 0;
-}
-
-void r128_driver_irq_uninstall(struct drm_device *dev)
-{
- drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
- if (!dev_priv)
- return;
-
- /* Disable *all* interrupts */
- R128_WRITE(R128_GEN_INT_CNTL, 0);
-}
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
deleted file mode 100644
index ac13fc2a0214..000000000000
--- a/drivers/gpu/drm/r128/r128_state.c
+++ /dev/null
@@ -1,1641 +0,0 @@
-/* r128_state.c -- State support for r128 -*- linux-c -*-
- * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
- */
-/*
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-#include <drm/r128_drm.h>
-
-#include "r128_drv.h"
-
-/* ================================================================
- * CCE hardware state programming functions
- */
-
-static void r128_emit_clip_rects(drm_r128_private_t *dev_priv,
- struct drm_clip_rect *boxes, int count)
-{
- u32 aux_sc_cntl = 0x00000000;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
-
- if (count >= 1) {
- OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
- OUT_RING(boxes[0].x1);
- OUT_RING(boxes[0].x2 - 1);
- OUT_RING(boxes[0].y1);
- OUT_RING(boxes[0].y2 - 1);
-
- aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
- }
- if (count >= 2) {
- OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
- OUT_RING(boxes[1].x1);
- OUT_RING(boxes[1].x2 - 1);
- OUT_RING(boxes[1].y1);
- OUT_RING(boxes[1].y2 - 1);
-
- aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
- }
- if (count >= 3) {
- OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
- OUT_RING(boxes[2].x1);
- OUT_RING(boxes[2].x2 - 1);
- OUT_RING(boxes[2].y1);
- OUT_RING(boxes[2].y2 - 1);
-
- aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
- }
-
- OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
- OUT_RING(aux_sc_cntl);
-
- ADVANCE_RING();
-}
-
-static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv)
-{
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_RING(2);
-
- OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
- OUT_RING(ctx->scale_3d_cntl);
-
- ADVANCE_RING();
-}
-
-static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv)
-{
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_RING(13);
-
- OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
- OUT_RING(ctx->dst_pitch_offset_c);
- OUT_RING(ctx->dp_gui_master_cntl_c);
- OUT_RING(ctx->sc_top_left_c);
- OUT_RING(ctx->sc_bottom_right_c);
- OUT_RING(ctx->z_offset_c);
- OUT_RING(ctx->z_pitch_c);
- OUT_RING(ctx->z_sten_cntl_c);
- OUT_RING(ctx->tex_cntl_c);
- OUT_RING(ctx->misc_3d_state_cntl_reg);
- OUT_RING(ctx->texture_clr_cmp_clr_c);
- OUT_RING(ctx->texture_clr_cmp_msk_c);
- OUT_RING(ctx->fog_color_c);
-
- ADVANCE_RING();
-}
-
-static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv)
-{
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_RING(3);
-
- OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
- OUT_RING(ctx->setup_cntl);
- OUT_RING(ctx->pm4_vc_fpu_setup);
-
- ADVANCE_RING();
-}
-
-static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv)
-{
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_RING(5);
-
- OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
- OUT_RING(ctx->dp_write_mask);
-
- OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
- OUT_RING(ctx->sten_ref_mask_c);
- OUT_RING(ctx->plane_3d_mask_c);
-
- ADVANCE_RING();
-}
-
-static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv)
-{
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_RING(2);
-
- OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
- OUT_RING(ctx->window_xy_offset);
-
- ADVANCE_RING();
-}
-
-static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv)
-{
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
- drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
- int i;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
-
- OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
- 2 + R128_MAX_TEXTURE_LEVELS));
- OUT_RING(tex->tex_cntl);
- OUT_RING(tex->tex_combine_cntl);
- OUT_RING(ctx->tex_size_pitch_c);
- for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
- OUT_RING(tex->tex_offset[i]);
-
- OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
- OUT_RING(ctx->constant_color_c);
- OUT_RING(tex->tex_border_color);
-
- ADVANCE_RING();
-}
-
-static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv)
-{
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
- int i;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
-
- OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
- OUT_RING(tex->tex_cntl);
- OUT_RING(tex->tex_combine_cntl);
- for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
- OUT_RING(tex->tex_offset[i]);
-
- OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
- OUT_RING(tex->tex_border_color);
-
- ADVANCE_RING();
-}
-
-static void r128_emit_state(drm_r128_private_t *dev_priv)
-{
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- unsigned int dirty = sarea_priv->dirty;
-
- DRM_DEBUG("dirty=0x%08x\n", dirty);
-
- if (dirty & R128_UPLOAD_CORE) {
- r128_emit_core(dev_priv);
- sarea_priv->dirty &= ~R128_UPLOAD_CORE;
- }
-
- if (dirty & R128_UPLOAD_CONTEXT) {
- r128_emit_context(dev_priv);
- sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
- }
-
- if (dirty & R128_UPLOAD_SETUP) {
- r128_emit_setup(dev_priv);
- sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
- }
-
- if (dirty & R128_UPLOAD_MASKS) {
- r128_emit_masks(dev_priv);
- sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
- }
-
- if (dirty & R128_UPLOAD_WINDOW) {
- r128_emit_window(dev_priv);
- sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
- }
-
- if (dirty & R128_UPLOAD_TEX0) {
- r128_emit_tex0(dev_priv);
- sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
- }
-
- if (dirty & R128_UPLOAD_TEX1) {
- r128_emit_tex1(dev_priv);
- sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
- }
-
- /* Turn off the texture cache flushing */
- sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
-
- sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
-}
-
-#if R128_PERFORMANCE_BOXES
-/* ================================================================
- * Performance monitoring functions
- */
-
-static void r128_clear_box(drm_r128_private_t *dev_priv,
- int x, int y, int w, int h, int r, int g, int b)
-{
- u32 pitch, offset;
- u32 fb_bpp, color;
- RING_LOCALS;
-
- switch (dev_priv->fb_bpp) {
- case 16:
- fb_bpp = R128_GMC_DST_16BPP;
- color = (((r & 0xf8) << 8) |
- ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
- break;
- case 24:
- fb_bpp = R128_GMC_DST_24BPP;
- color = ((r << 16) | (g << 8) | b);
- break;
- case 32:
- fb_bpp = R128_GMC_DST_32BPP;
- color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
- break;
- default:
- return;
- }
-
- offset = dev_priv->back_offset;
- pitch = dev_priv->back_pitch >> 3;
-
- BEGIN_RING(6);
-
- OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
- OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_SOLID_COLOR |
- fb_bpp |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_P |
- R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
-
- OUT_RING((pitch << 21) | (offset >> 5));
- OUT_RING(color);
-
- OUT_RING((x << 16) | y);
- OUT_RING((w << 16) | h);
-
- ADVANCE_RING();
-}
-
-static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
-{
- if (atomic_read(&dev_priv->idle_count) == 0)
- r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
- else
- atomic_set(&dev_priv->idle_count, 0);
-}
-
-#endif
-
-/* ================================================================
- * CCE command dispatch functions
- */
-
-static void r128_print_dirty(const char *msg, unsigned int flags)
-{
- DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
- msg,
- flags,
- (flags & R128_UPLOAD_CORE) ? "core, " : "",
- (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
- (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
- (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
- (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
- (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
- (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
- (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
- (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
-}
-
-static void r128_cce_dispatch_clear(struct drm_device *dev,
- drm_r128_clear_t *clear)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- unsigned int flags = clear->flags;
- int i;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- if (dev_priv->page_flipping && dev_priv->current_page == 1) {
- unsigned int tmp = flags;
-
- flags &= ~(R128_FRONT | R128_BACK);
- if (tmp & R128_FRONT)
- flags |= R128_BACK;
- if (tmp & R128_BACK)
- flags |= R128_FRONT;
- }
-
- for (i = 0; i < nbox; i++) {
- int x = pbox[i].x1;
- int y = pbox[i].y1;
- int w = pbox[i].x2 - x;
- int h = pbox[i].y2 - y;
-
- DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
- pbox[i].x1, pbox[i].y1, pbox[i].x2,
- pbox[i].y2, flags);
-
- if (flags & (R128_FRONT | R128_BACK)) {
- BEGIN_RING(2);
-
- OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
- OUT_RING(clear->color_mask);
-
- ADVANCE_RING();
- }
-
- if (flags & R128_FRONT) {
- BEGIN_RING(6);
-
- OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
- OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_SOLID_COLOR |
- (dev_priv->color_fmt << 8) |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_P |
- R128_GMC_CLR_CMP_CNTL_DIS |
- R128_GMC_AUX_CLIP_DIS);
-
- OUT_RING(dev_priv->front_pitch_offset_c);
- OUT_RING(clear->clear_color);
-
- OUT_RING((x << 16) | y);
- OUT_RING((w << 16) | h);
-
- ADVANCE_RING();
- }
-
- if (flags & R128_BACK) {
- BEGIN_RING(6);
-
- OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
- OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_SOLID_COLOR |
- (dev_priv->color_fmt << 8) |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_P |
- R128_GMC_CLR_CMP_CNTL_DIS |
- R128_GMC_AUX_CLIP_DIS);
-
- OUT_RING(dev_priv->back_pitch_offset_c);
- OUT_RING(clear->clear_color);
-
- OUT_RING((x << 16) | y);
- OUT_RING((w << 16) | h);
-
- ADVANCE_RING();
- }
-
- if (flags & R128_DEPTH) {
- BEGIN_RING(6);
-
- OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
- OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_SOLID_COLOR |
- (dev_priv->depth_fmt << 8) |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_P |
- R128_GMC_CLR_CMP_CNTL_DIS |
- R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
-
- OUT_RING(dev_priv->depth_pitch_offset_c);
- OUT_RING(clear->clear_depth);
-
- OUT_RING((x << 16) | y);
- OUT_RING((w << 16) | h);
-
- ADVANCE_RING();
- }
- }
-}
-
-static void r128_cce_dispatch_swap(struct drm_device *dev)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int nbox = sarea_priv->nbox;
- struct drm_clip_rect *pbox = sarea_priv->boxes;
- int i;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
-#if R128_PERFORMANCE_BOXES
- /* Do some trivial performance monitoring...
- */
- r128_cce_performance_boxes(dev_priv);
-#endif
-
- for (i = 0; i < nbox; i++) {
- int x = pbox[i].x1;
- int y = pbox[i].y1;
- int w = pbox[i].x2 - x;
- int h = pbox[i].y2 - y;
-
- BEGIN_RING(7);
-
- OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
- OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
- R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_NONE |
- (dev_priv->color_fmt << 8) |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_S |
- R128_DP_SRC_SOURCE_MEMORY |
- R128_GMC_CLR_CMP_CNTL_DIS |
- R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
-
- /* Make this work even if front & back are flipped:
- */
- if (dev_priv->current_page == 0) {
- OUT_RING(dev_priv->back_pitch_offset_c);
- OUT_RING(dev_priv->front_pitch_offset_c);
- } else {
- OUT_RING(dev_priv->front_pitch_offset_c);
- OUT_RING(dev_priv->back_pitch_offset_c);
- }
-
- OUT_RING((x << 16) | y);
- OUT_RING((x << 16) | y);
- OUT_RING((w << 16) | h);
-
- ADVANCE_RING();
- }
-
- /* Increment the frame counter. The client-side 3D driver must
- * throttle the framerate by waiting for this value before
- * performing the swapbuffer ioctl.
- */
- dev_priv->sarea_priv->last_frame++;
-
- BEGIN_RING(2);
-
- OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
- OUT_RING(dev_priv->sarea_priv->last_frame);
-
- ADVANCE_RING();
-}
-
-static void r128_cce_dispatch_flip(struct drm_device *dev)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- RING_LOCALS;
- DRM_DEBUG("page=%d pfCurrentPage=%d\n",
- dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
-
-#if R128_PERFORMANCE_BOXES
- /* Do some trivial performance monitoring...
- */
- r128_cce_performance_boxes(dev_priv);
-#endif
-
- BEGIN_RING(4);
-
- R128_WAIT_UNTIL_PAGE_FLIPPED();
- OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
-
- if (dev_priv->current_page == 0)
- OUT_RING(dev_priv->back_offset);
- else
- OUT_RING(dev_priv->front_offset);
-
- ADVANCE_RING();
-
- /* Increment the frame counter. The client-side 3D driver must
- * throttle the framerate by waiting for this value before
- * performing the swapbuffer ioctl.
- */
- dev_priv->sarea_priv->last_frame++;
- dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
- 1 - dev_priv->current_page;
-
- BEGIN_RING(2);
-
- OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
- OUT_RING(dev_priv->sarea_priv->last_frame);
-
- ADVANCE_RING();
-}
-
-static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_buf_priv_t *buf_priv = buf->dev_private;
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int format = sarea_priv->vc_format;
- int offset = buf->bus_address;
- int size = buf->used;
- int prim = buf_priv->prim;
- int i = 0;
- RING_LOCALS;
- DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
-
- if (0)
- r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
-
- if (buf->used) {
- buf_priv->dispatched = 1;
-
- if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
- r128_emit_state(dev_priv);
-
- do {
- /* Emit the next set of up to three cliprects */
- if (i < sarea_priv->nbox) {
- r128_emit_clip_rects(dev_priv,
- &sarea_priv->boxes[i],
- sarea_priv->nbox - i);
- }
-
- /* Emit the vertex buffer rendering commands */
- BEGIN_RING(5);
-
- OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
- OUT_RING(offset);
- OUT_RING(size);
- OUT_RING(format);
- OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
- (size << R128_CCE_VC_CNTL_NUM_SHIFT));
-
- ADVANCE_RING();
-
- i += 3;
- } while (i < sarea_priv->nbox);
- }
-
- if (buf_priv->discard) {
- buf_priv->age = dev_priv->sarea_priv->last_dispatch;
-
- /* Emit the vertex buffer age */
- BEGIN_RING(2);
-
- OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
- OUT_RING(buf_priv->age);
-
- ADVANCE_RING();
-
- buf->pending = 1;
- buf->used = 0;
- /* FIXME: Check dispatched field */
- buf_priv->dispatched = 0;
- }
-
- dev_priv->sarea_priv->last_dispatch++;
-
- sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
- sarea_priv->nbox = 0;
-}
-
-static void r128_cce_dispatch_indirect(struct drm_device *dev,
- struct drm_buf *buf, int start, int end)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_buf_priv_t *buf_priv = buf->dev_private;
- RING_LOCALS;
- DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
-
- if (start != end) {
- int offset = buf->bus_address + start;
- int dwords = (end - start + 3) / sizeof(u32);
-
- /* Indirect buffer data must be an even number of
- * dwords, so if we've been given an odd number we must
- * pad the data with a Type-2 CCE packet.
- */
- if (dwords & 1) {
- u32 *data = (u32 *)
- ((char *)dev->agp_buffer_map->handle
- + buf->offset + start);
- data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
- }
-
- buf_priv->dispatched = 1;
-
- /* Fire off the indirect buffer */
- BEGIN_RING(3);
-
- OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
- OUT_RING(offset);
- OUT_RING(dwords);
-
- ADVANCE_RING();
- }
-
- if (buf_priv->discard) {
- buf_priv->age = dev_priv->sarea_priv->last_dispatch;
-
- /* Emit the indirect buffer age */
- BEGIN_RING(2);
-
- OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
- OUT_RING(buf_priv->age);
-
- ADVANCE_RING();
-
- buf->pending = 1;
- buf->used = 0;
- /* FIXME: Check dispatched field */
- buf_priv->dispatched = 0;
- }
-
- dev_priv->sarea_priv->last_dispatch++;
-}
-
-static void r128_cce_dispatch_indices(struct drm_device *dev,
- struct drm_buf *buf,
- int start, int end, int count)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_buf_priv_t *buf_priv = buf->dev_private;
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- int format = sarea_priv->vc_format;
- int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
- int prim = buf_priv->prim;
- u32 *data;
- int dwords;
- int i = 0;
- RING_LOCALS;
- DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
-
- if (0)
- r128_print_dirty("dispatch_indices", sarea_priv->dirty);
-
- if (start != end) {
- buf_priv->dispatched = 1;
-
- if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
- r128_emit_state(dev_priv);
-
- dwords = (end - start + 3) / sizeof(u32);
-
- data = (u32 *) ((char *)dev->agp_buffer_map->handle
- + buf->offset + start);
-
- data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
- dwords - 2));
-
- data[1] = cpu_to_le32(offset);
- data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
- data[3] = cpu_to_le32(format);
- data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
- (count << 16)));
-
- if (count & 0x1) {
-#ifdef __LITTLE_ENDIAN
- data[dwords - 1] &= 0x0000ffff;
-#else
- data[dwords - 1] &= 0xffff0000;
-#endif
- }
-
- do {
- /* Emit the next set of up to three cliprects */
- if (i < sarea_priv->nbox) {
- r128_emit_clip_rects(dev_priv,
- &sarea_priv->boxes[i],
- sarea_priv->nbox - i);
- }
-
- r128_cce_dispatch_indirect(dev, buf, start, end);
-
- i += 3;
- } while (i < sarea_priv->nbox);
- }
-
- if (buf_priv->discard) {
- buf_priv->age = dev_priv->sarea_priv->last_dispatch;
-
- /* Emit the vertex buffer age */
- BEGIN_RING(2);
-
- OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
- OUT_RING(buf_priv->age);
-
- ADVANCE_RING();
-
- buf->pending = 1;
- /* FIXME: Check dispatched field */
- buf_priv->dispatched = 0;
- }
-
- dev_priv->sarea_priv->last_dispatch++;
-
- sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
- sarea_priv->nbox = 0;
-}
-
-static int r128_cce_dispatch_blit(struct drm_device *dev,
- struct drm_file *file_priv,
- drm_r128_blit_t *blit)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf *buf;
- drm_r128_buf_priv_t *buf_priv;
- u32 *data;
- int dword_shift, dwords;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- /* The compiler won't optimize away a division by a variable,
- * even if the only legal values are powers of two. Thus, we'll
- * use a shift instead.
- */
- switch (blit->format) {
- case R128_DATATYPE_ARGB8888:
- dword_shift = 0;
- break;
- case R128_DATATYPE_ARGB1555:
- case R128_DATATYPE_RGB565:
- case R128_DATATYPE_ARGB4444:
- case R128_DATATYPE_YVYU422:
- case R128_DATATYPE_VYUY422:
- dword_shift = 1;
- break;
- case R128_DATATYPE_CI8:
- case R128_DATATYPE_RGB8:
- dword_shift = 2;
- break;
- default:
- DRM_ERROR("invalid blit format %d\n", blit->format);
- return -EINVAL;
- }
-
- /* Flush the pixel cache, and mark the contents as Read Invalid.
- * This ensures no pixel data gets mixed up with the texture
- * data from the host data blit, otherwise part of the texture
- * image may be corrupted.
- */
- BEGIN_RING(2);
-
- OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
- OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
-
- ADVANCE_RING();
-
- /* Dispatch the indirect buffer.
- */
- buf = dma->buflist[blit->idx];
- buf_priv = buf->dev_private;
-
- if (buf->file_priv != file_priv) {
- DRM_ERROR("process %d using buffer owned by %p\n",
- task_pid_nr(current), buf->file_priv);
- return -EINVAL;
- }
- if (buf->pending) {
- DRM_ERROR("sending pending buffer %d\n", blit->idx);
- return -EINVAL;
- }
-
- buf_priv->discard = 1;
-
- dwords = (blit->width * blit->height) >> dword_shift;
-
- data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
-
- data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
- data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_NONE |
- (blit->format << 8) |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_S |
- R128_DP_SRC_SOURCE_HOST_DATA |
- R128_GMC_CLR_CMP_CNTL_DIS |
- R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
-
- data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
- data[3] = cpu_to_le32(0xffffffff);
- data[4] = cpu_to_le32(0xffffffff);
- data[5] = cpu_to_le32((blit->y << 16) | blit->x);
- data[6] = cpu_to_le32((blit->height << 16) | blit->width);
- data[7] = cpu_to_le32(dwords);
-
- buf->used = (dwords + 8) * sizeof(u32);
-
- r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
-
- /* Flush the pixel cache after the blit completes. This ensures
- * the texture data is written out to memory before rendering
- * continues.
- */
- BEGIN_RING(2);
-
- OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
- OUT_RING(R128_PC_FLUSH_GUI);
-
- ADVANCE_RING();
-
- return 0;
-}
-
-/* ================================================================
- * Tiled depth buffer management
- *
- * FIXME: These should all set the destination write mask for when we
- * have hardware stencil support.
- */
-
-static int r128_cce_dispatch_write_span(struct drm_device *dev,
- drm_r128_depth_t *depth)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- int count, x, y;
- u32 *buffer;
- u8 *mask;
- int i, buffer_size, mask_size;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- count = depth->n;
- if (count > 4096 || count <= 0)
- return -EMSGSIZE;
-
- if (copy_from_user(&x, depth->x, sizeof(x)))
- return -EFAULT;
- if (copy_from_user(&y, depth->y, sizeof(y)))
- return -EFAULT;
-
- buffer_size = depth->n * sizeof(u32);
- buffer = memdup_user(depth->buffer, buffer_size);
- if (IS_ERR(buffer))
- return PTR_ERR(buffer);
-
- mask_size = depth->n;
- if (depth->mask) {
- mask = memdup_user(depth->mask, mask_size);
- if (IS_ERR(mask)) {
- kfree(buffer);
- return PTR_ERR(mask);
- }
-
- for (i = 0; i < count; i++, x++) {
- if (mask[i]) {
- BEGIN_RING(6);
-
- OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
- OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_SOLID_COLOR |
- (dev_priv->depth_fmt << 8) |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_P |
- R128_GMC_CLR_CMP_CNTL_DIS |
- R128_GMC_WR_MSK_DIS);
-
- OUT_RING(dev_priv->depth_pitch_offset_c);
- OUT_RING(buffer[i]);
-
- OUT_RING((x << 16) | y);
- OUT_RING((1 << 16) | 1);
-
- ADVANCE_RING();
- }
- }
-
- kfree(mask);
- } else {
- for (i = 0; i < count; i++, x++) {
- BEGIN_RING(6);
-
- OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
- OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_SOLID_COLOR |
- (dev_priv->depth_fmt << 8) |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_P |
- R128_GMC_CLR_CMP_CNTL_DIS |
- R128_GMC_WR_MSK_DIS);
-
- OUT_RING(dev_priv->depth_pitch_offset_c);
- OUT_RING(buffer[i]);
-
- OUT_RING((x << 16) | y);
- OUT_RING((1 << 16) | 1);
-
- ADVANCE_RING();
- }
- }
-
- kfree(buffer);
-
- return 0;
-}
-
-static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
- drm_r128_depth_t *depth)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- int count, *x, *y;
- u32 *buffer;
- u8 *mask;
- int i, xbuf_size, ybuf_size, buffer_size, mask_size;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- count = depth->n;
- if (count > 4096 || count <= 0)
- return -EMSGSIZE;
-
- xbuf_size = count * sizeof(*x);
- ybuf_size = count * sizeof(*y);
- x = memdup_user(depth->x, xbuf_size);
- if (IS_ERR(x))
- return PTR_ERR(x);
- y = memdup_user(depth->y, ybuf_size);
- if (IS_ERR(y)) {
- kfree(x);
- return PTR_ERR(y);
- }
- buffer_size = depth->n * sizeof(u32);
- buffer = memdup_user(depth->buffer, buffer_size);
- if (IS_ERR(buffer)) {
- kfree(x);
- kfree(y);
- return PTR_ERR(buffer);
- }
-
- if (depth->mask) {
- mask_size = depth->n;
- mask = memdup_user(depth->mask, mask_size);
- if (IS_ERR(mask)) {
- kfree(x);
- kfree(y);
- kfree(buffer);
- return PTR_ERR(mask);
- }
-
- for (i = 0; i < count; i++) {
- if (mask[i]) {
- BEGIN_RING(6);
-
- OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
- OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_SOLID_COLOR |
- (dev_priv->depth_fmt << 8) |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_P |
- R128_GMC_CLR_CMP_CNTL_DIS |
- R128_GMC_WR_MSK_DIS);
-
- OUT_RING(dev_priv->depth_pitch_offset_c);
- OUT_RING(buffer[i]);
-
- OUT_RING((x[i] << 16) | y[i]);
- OUT_RING((1 << 16) | 1);
-
- ADVANCE_RING();
- }
- }
-
- kfree(mask);
- } else {
- for (i = 0; i < count; i++) {
- BEGIN_RING(6);
-
- OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
- OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_SOLID_COLOR |
- (dev_priv->depth_fmt << 8) |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_P |
- R128_GMC_CLR_CMP_CNTL_DIS |
- R128_GMC_WR_MSK_DIS);
-
- OUT_RING(dev_priv->depth_pitch_offset_c);
- OUT_RING(buffer[i]);
-
- OUT_RING((x[i] << 16) | y[i]);
- OUT_RING((1 << 16) | 1);
-
- ADVANCE_RING();
- }
- }
-
- kfree(x);
- kfree(y);
- kfree(buffer);
-
- return 0;
-}
-
-static int r128_cce_dispatch_read_span(struct drm_device *dev,
- drm_r128_depth_t *depth)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- int count, x, y;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- count = depth->n;
- if (count > 4096 || count <= 0)
- return -EMSGSIZE;
-
- if (copy_from_user(&x, depth->x, sizeof(x)))
- return -EFAULT;
- if (copy_from_user(&y, depth->y, sizeof(y)))
- return -EFAULT;
-
- BEGIN_RING(7);
-
- OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
- OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
- R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_NONE |
- (dev_priv->depth_fmt << 8) |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_S |
- R128_DP_SRC_SOURCE_MEMORY |
- R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
-
- OUT_RING(dev_priv->depth_pitch_offset_c);
- OUT_RING(dev_priv->span_pitch_offset_c);
-
- OUT_RING((x << 16) | y);
- OUT_RING((0 << 16) | 0);
- OUT_RING((count << 16) | 1);
-
- ADVANCE_RING();
-
- return 0;
-}
-
-static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
- drm_r128_depth_t *depth)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- int count, *x, *y;
- int i, xbuf_size, ybuf_size;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- count = depth->n;
- if (count > 4096 || count <= 0)
- return -EMSGSIZE;
-
- if (count > dev_priv->depth_pitch)
- count = dev_priv->depth_pitch;
-
- xbuf_size = count * sizeof(*x);
- ybuf_size = count * sizeof(*y);
- x = kmalloc(xbuf_size, GFP_KERNEL);
- if (x == NULL)
- return -ENOMEM;
- y = kmalloc(ybuf_size, GFP_KERNEL);
- if (y == NULL) {
- kfree(x);
- return -ENOMEM;
- }
- if (copy_from_user(x, depth->x, xbuf_size)) {
- kfree(x);
- kfree(y);
- return -EFAULT;
- }
- if (copy_from_user(y, depth->y, ybuf_size)) {
- kfree(x);
- kfree(y);
- return -EFAULT;
- }
-
- for (i = 0; i < count; i++) {
- BEGIN_RING(7);
-
- OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
- OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
- R128_GMC_DST_PITCH_OFFSET_CNTL |
- R128_GMC_BRUSH_NONE |
- (dev_priv->depth_fmt << 8) |
- R128_GMC_SRC_DATATYPE_COLOR |
- R128_ROP3_S |
- R128_DP_SRC_SOURCE_MEMORY |
- R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
-
- OUT_RING(dev_priv->depth_pitch_offset_c);
- OUT_RING(dev_priv->span_pitch_offset_c);
-
- OUT_RING((x[i] << 16) | y[i]);
- OUT_RING((i << 16) | 0);
- OUT_RING((1 << 16) | 1);
-
- ADVANCE_RING();
- }
-
- kfree(x);
- kfree(y);
-
- return 0;
-}
-
-/* ================================================================
- * Polygon stipple
- */
-
-static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- int i;
- RING_LOCALS;
- DRM_DEBUG("\n");
-
- BEGIN_RING(33);
-
- OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
- for (i = 0; i < 32; i++)
- OUT_RING(stipple[i]);
-
- ADVANCE_RING();
-}
-
-/* ================================================================
- * IOCTL functions
- */
-
-static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_sarea_t *sarea_priv;
- drm_r128_clear_t *clear = data;
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
-
- sarea_priv = dev_priv->sarea_priv;
-
- if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
- sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
-
- r128_cce_dispatch_clear(dev, clear);
- COMMIT_RING();
-
- /* Make sure we restore the 3D state next time.
- */
- dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
-
- return 0;
-}
-
-static int r128_do_init_pageflip(struct drm_device *dev)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("\n");
-
- dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
- dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
-
- R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
- R128_WRITE(R128_CRTC_OFFSET_CNTL,
- dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
-
- dev_priv->page_flipping = 1;
- dev_priv->current_page = 0;
- dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
-
- return 0;
-}
-
-static int r128_do_cleanup_pageflip(struct drm_device *dev)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("\n");
-
- R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
- R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
-
- if (dev_priv->current_page != 0) {
- r128_cce_dispatch_flip(dev);
- COMMIT_RING();
- }
-
- dev_priv->page_flipping = 0;
- return 0;
-}
-
-/* Swapping and flipping are different operations, need different ioctls.
- * They can & should be intermixed to support multiple 3d windows.
- */
-
-static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
-
- if (!dev_priv->page_flipping)
- r128_do_init_pageflip(dev);
-
- r128_cce_dispatch_flip(dev);
-
- COMMIT_RING();
- return 0;
-}
-
-static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
-
- if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
- sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
-
- r128_cce_dispatch_swap(dev);
- dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
- R128_UPLOAD_MASKS);
-
- COMMIT_RING();
- return 0;
-}
-
-static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf *buf;
- drm_r128_buf_priv_t *buf_priv;
- drm_r128_vertex_t *vertex = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
- task_pid_nr(current), vertex->idx, vertex->count, vertex->discard);
-
- if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
- DRM_ERROR("buffer index %d (of %d max)\n",
- vertex->idx, dma->buf_count - 1);
- return -EINVAL;
- }
- if (vertex->prim < 0 ||
- vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
- DRM_ERROR("buffer prim %d\n", vertex->prim);
- return -EINVAL;
- }
-
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
- VB_AGE_TEST_WITH_RETURN(dev_priv);
-
- buf = dma->buflist[vertex->idx];
- buf_priv = buf->dev_private;
-
- if (buf->file_priv != file_priv) {
- DRM_ERROR("process %d using buffer owned by %p\n",
- task_pid_nr(current), buf->file_priv);
- return -EINVAL;
- }
- if (buf->pending) {
- DRM_ERROR("sending pending buffer %d\n", vertex->idx);
- return -EINVAL;
- }
-
- buf->used = vertex->count;
- buf_priv->prim = vertex->prim;
- buf_priv->discard = vertex->discard;
-
- r128_cce_dispatch_vertex(dev, buf);
-
- COMMIT_RING();
- return 0;
-}
-
-static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf *buf;
- drm_r128_buf_priv_t *buf_priv;
- drm_r128_indices_t *elts = data;
- int count;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", task_pid_nr(current),
- elts->idx, elts->start, elts->end, elts->discard);
-
- if (elts->idx < 0 || elts->idx >= dma->buf_count) {
- DRM_ERROR("buffer index %d (of %d max)\n",
- elts->idx, dma->buf_count - 1);
- return -EINVAL;
- }
- if (elts->prim < 0 ||
- elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
- DRM_ERROR("buffer prim %d\n", elts->prim);
- return -EINVAL;
- }
-
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
- VB_AGE_TEST_WITH_RETURN(dev_priv);
-
- buf = dma->buflist[elts->idx];
- buf_priv = buf->dev_private;
-
- if (buf->file_priv != file_priv) {
- DRM_ERROR("process %d using buffer owned by %p\n",
- task_pid_nr(current), buf->file_priv);
- return -EINVAL;
- }
- if (buf->pending) {
- DRM_ERROR("sending pending buffer %d\n", elts->idx);
- return -EINVAL;
- }
-
- count = (elts->end - elts->start) / sizeof(u16);
- elts->start -= R128_INDEX_PRIM_OFFSET;
-
- if (elts->start & 0x7) {
- DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
- return -EINVAL;
- }
- if (elts->start < buf->used) {
- DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
- return -EINVAL;
- }
-
- buf->used = elts->end;
- buf_priv->prim = elts->prim;
- buf_priv->discard = elts->discard;
-
- r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
-
- COMMIT_RING();
- return 0;
-}
-
-static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_blit_t *blit = data;
- int ret;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- DRM_DEBUG("pid=%d index=%d\n", task_pid_nr(current), blit->idx);
-
- if (blit->idx < 0 || blit->idx >= dma->buf_count) {
- DRM_ERROR("buffer index %d (of %d max)\n",
- blit->idx, dma->buf_count - 1);
- return -EINVAL;
- }
-
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
- VB_AGE_TEST_WITH_RETURN(dev_priv);
-
- ret = r128_cce_dispatch_blit(dev, file_priv, blit);
-
- COMMIT_RING();
- return ret;
-}
-
-int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_depth_t *depth = data;
- int ret;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
-
- ret = -EINVAL;
- switch (depth->func) {
- case R128_WRITE_SPAN:
- ret = r128_cce_dispatch_write_span(dev, depth);
- break;
- case R128_WRITE_PIXELS:
- ret = r128_cce_dispatch_write_pixels(dev, depth);
- break;
- case R128_READ_SPAN:
- ret = r128_cce_dispatch_read_span(dev, depth);
- break;
- case R128_READ_PIXELS:
- ret = r128_cce_dispatch_read_pixels(dev, depth);
- break;
- }
-
- COMMIT_RING();
- return ret;
-}
-
-int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_stipple_t *stipple = data;
- u32 mask[32];
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- if (copy_from_user(&mask, stipple->mask, 32 * sizeof(u32)))
- return -EFAULT;
-
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
-
- r128_cce_dispatch_stipple(dev, mask);
-
- COMMIT_RING();
- return 0;
-}
-
-static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf *buf;
- drm_r128_buf_priv_t *buf_priv;
- drm_r128_indirect_t *indirect = data;
-#if 0
- RING_LOCALS;
-#endif
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
- indirect->idx, indirect->start, indirect->end,
- indirect->discard);
-
- if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
- DRM_ERROR("buffer index %d (of %d max)\n",
- indirect->idx, dma->buf_count - 1);
- return -EINVAL;
- }
-
- buf = dma->buflist[indirect->idx];
- buf_priv = buf->dev_private;
-
- if (buf->file_priv != file_priv) {
- DRM_ERROR("process %d using buffer owned by %p\n",
- task_pid_nr(current), buf->file_priv);
- return -EINVAL;
- }
- if (buf->pending) {
- DRM_ERROR("sending pending buffer %d\n", indirect->idx);
- return -EINVAL;
- }
-
- if (indirect->start < buf->used) {
- DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
- indirect->start, buf->used);
- return -EINVAL;
- }
-
- RING_SPACE_TEST_WITH_RETURN(dev_priv);
- VB_AGE_TEST_WITH_RETURN(dev_priv);
-
- buf->used = indirect->end;
- buf_priv->discard = indirect->discard;
-
-#if 0
- /* Wait for the 3D stream to idle before the indirect buffer
- * containing 2D acceleration commands is processed.
- */
- BEGIN_RING(2);
- RADEON_WAIT_UNTIL_3D_IDLE();
- ADVANCE_RING();
-#endif
-
- /* Dispatch the indirect buffer full of commands from the
- * X server. This is insecure and is thus only available to
- * privileged clients.
- */
- r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
-
- COMMIT_RING();
- return 0;
-}
-
-int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_getparam_t *param = data;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int value;
-
- DEV_INIT_TEST_WITH_RETURN(dev_priv);
-
- DRM_DEBUG("pid=%d\n", task_pid_nr(current));
-
- switch (param->param) {
- case R128_PARAM_IRQ_NR:
- value = pdev->irq;
- break;
- default:
- return -EINVAL;
- }
-
- if (copy_to_user(param->value, &value, sizeof(int))) {
- DRM_ERROR("copy_to_user\n");
- return -EFAULT;
- }
-
- return 0;
-}
-
-void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
-{
- if (dev->dev_private) {
- drm_r128_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping)
- r128_do_cleanup_pageflip(dev);
- }
-}
-void r128_driver_lastclose(struct drm_device *dev)
-{
- r128_do_cleanup_cce(dev);
-}
-
-const struct drm_ioctl_desc r128_ioctls[] = {
- DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_RESET, r128_engine_reset, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_SWAP, r128_cce_swap, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_FLIP, r128_cce_flip, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_CLEAR, r128_cce_clear, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_VERTEX, r128_cce_vertex, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_INDICES, r128_cce_indices, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_BLIT, r128_cce_blit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_DEPTH, r128_cce_depth, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(R128_GETPARAM, r128_getparam, DRM_AUTH),
-};
-
-int r128_max_ioctl = ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 97a277f9a25e..62a596d3a891 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -15,6 +15,8 @@ config DRM_RADEON
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
+ select I2C
+ select I2C_ALGOBIT
# radeon depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 235e59b547a1..8a6621f1e82c 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -4020,7 +4020,7 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH
USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
USHORT usConnObjectId; //Connector Object ID
USHORT usGPUObjectId; //GPU ID
- USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
+ USHORT usGraphicObjIds[]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
}ATOM_DISPLAY_OBJECT_PATH;
typedef struct _ATOM_DISPLAY_EXTERNAL_OBJECT_PATH
@@ -4037,7 +4037,7 @@ typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
UCHAR ucNumOfDispPath;
UCHAR ucVersion;
UCHAR ucPadding[2];
- ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
+ ATOM_DISPLAY_OBJECT_PATH asDispPath[];
}ATOM_DISPLAY_OBJECT_PATH_TABLE;
@@ -4053,7 +4053,7 @@ typedef struct _ATOM_OBJECT_TABLE //Above 4 object table
{
UCHAR ucNumberOfObjects;
UCHAR ucPadding[3];
- ATOM_OBJECT asObjects[1];
+ ATOM_OBJECT asObjects[];
}ATOM_OBJECT_TABLE;
typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure
@@ -4615,7 +4615,7 @@ typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
UCHAR ucPhaseDelay; // phase delay in unit of micro second
UCHAR ucReserved;
ULONG ulGpioMaskVal; // GPIO Mask value
- VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];
+ VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[];
}ATOM_GPIO_VOLTAGE_OBJECT_V3;
typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
@@ -7964,7 +7964,7 @@ typedef struct {
typedef struct {
VFCT_IMAGE_HEADER VbiosHeader;
- UCHAR VbiosContent[1];
+ UCHAR VbiosContent[];
}GOP_VBIOS_CONTENT;
typedef struct {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index d28d3acb3ba1..ade13173921b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -24,11 +24,10 @@
* Alex Deucher
*/
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index c841c273222e..1471c3a96602 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_file.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/radeon_drm.h>
#include <acpi/video.h>
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 621ff174dff3..7b0cfeaddcec 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -31,7 +31,6 @@
#include <linux/slab.h>
#include <drm/drm.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2e7161acd443..57e20780a458 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -73,8 +73,7 @@
#include <linux/mmu_notifier.h>
#endif
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_execbuf_util.h>
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index b603c0b77075..5771d1fcb073 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -22,6 +22,7 @@
*/
#include <linux/acpi.h>
+#include <linux/backlight.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/power_supply.h>
@@ -30,7 +31,6 @@
#include <acpi/acpi_bus.h>
#include <acpi/video.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_probe_helper.h>
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index bfacf8fe5cc1..802b5af19261 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -30,7 +30,6 @@
#include <linux/pci.h>
#include <linux/vgaarb.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "atom.h"
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index f7431d224604..07193cd0c417 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -27,7 +27,7 @@
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 6344454a7721..afbb3a80c0c6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1023,6 +1023,7 @@ void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
kfree(rdev->mode_info.atom_context->scratch);
+ kfree(rdev->mode_info.atom_context->iio);
}
kfree(rdev->mode_info.atom_context);
rdev->mode_info.atom_context = NULL;
@@ -1772,7 +1773,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
bool saved = false;
int i, r;
- int resched;
down_write(&rdev->exclusive_lock);
@@ -1784,8 +1784,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
atomic_inc(&rdev->gpu_reset_counter);
radeon_save_bios_scratch_regs(rdev);
- /* block TTM */
- resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
radeon_hpd_fini(rdev);
@@ -1844,8 +1842,6 @@ int radeon_gpu_reset(struct radeon_device *rdev)
/* reset hpd state */
radeon_hpd_init(rdev);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
-
rdev->in_reset = true;
rdev->needs_reset = false;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 9bed1a6cb163..f34a7f63261d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -38,6 +38,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_dp_auxch.c b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
index 69379b95146e..1e5b6baf76a1 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_auxch.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_auxch.c
@@ -158,7 +158,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
} while (retry_count++ < 1000);
if (retry_count >= 1000) {
- DRM_ERROR("auxch hw never signalled completion, error %08x\n", tmp);
+ dev_err(rdev->dev, "auxch hw never signalled completion, error %08x\n", tmp);
ret = -EIO;
goto done;
}
@@ -168,8 +168,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
goto done;
}
if (tmp & AUX_RX_ERROR_FLAGS) {
- DRM_DEBUG_KMS_RATELIMITED("dp_aux_ch flags not zero: %08x\n",
- tmp);
+ drm_dbg_kms_ratelimited(dev, "dp_aux_ch flags not zero: %08x\n", tmp);
ret = -EIO;
goto done;
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 6cbe1ab81aba..716ab85a376b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -38,9 +38,7 @@
#include <linux/pci.h>
#include <drm/drm_aperture.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index fbc0a2182318..b3518a8f95a0 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -26,7 +26,6 @@
#include <linux/pci.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index c1710ed1cab8..c4807f0c43bc 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -277,10 +277,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
- /* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = rdev->mc.aper_base;
- info->apertures->ranges[0].size = rdev->mc.aper_size;
-
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
if (info->screen_base == NULL) {
@@ -352,7 +348,7 @@ int radeon_fbdev_init(struct radeon_device *rdev)
rfbdev->rdev = rdev;
rdev->mode_info.rfbdev = rfbdev;
- drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
+ drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper, bpp_sel,
&radeon_fb_helper_funcs);
ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper);
@@ -362,7 +358,7 @@ int radeon_fbdev_init(struct radeon_device *rdev)
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(rdev->ddev);
- ret = drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
+ ret = drm_fb_helper_initial_config(&rfbdev->helper);
if (ret)
goto fini;
@@ -371,6 +367,7 @@ int radeon_fbdev_init(struct radeon_device *rdev)
fini:
drm_fb_helper_fini(&rfbdev->helper);
free:
+ drm_fb_helper_unprepare(&rfbdev->helper);
kfree(rfbdev);
return ret;
}
@@ -381,6 +378,7 @@ void radeon_fbdev_fini(struct radeon_device *rdev)
return;
radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev);
+ drm_fb_helper_unprepare(&rdev->mode_info.rfbdev->helper);
kfree(rdev->mode_info.rfbdev);
rdev->mode_info.rfbdev = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index da2173435edd..3377fbc71f65 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -29,7 +29,6 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 6072ed5f2dd3..825b351ff53c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -24,11 +24,10 @@
* Alex Deucher
*/
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_vblank.h>
#include <drm/radeon_drm.h>
@@ -322,7 +321,7 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
*/
if (rdev->flags & RADEON_SINGLE_CRTC)
crtc_ext_cntl = RADEON_CRTC_CRT_ON;
-
+
switch (mode) {
case DRM_MODE_DPMS_ON:
radeon_crtc->enabled = true;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 0cd32c65456c..601d35d34eab 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -27,9 +27,9 @@
#include <linux/backlight.h>
#include <linux/pci.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_util.h>
#include <drm/radeon_drm.h>
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index d9df7f311e76..12e180b119ac 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: MIT
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include "radeon.h"
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 9f5be416454f..3a59d016e8cd 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -35,7 +35,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fixed.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 04c693ca419a..cbc554928bcc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1853,11 +1853,10 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
static void radeon_dynpm_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev;
- int resched;
+
rdev = container_of(work, struct radeon_device,
pm.dynpm_idle_work.work);
- resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
int not_processed = 0;
@@ -1908,7 +1907,6 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
}
mutex_unlock(&rdev->pm.mutex);
- ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
}
/*
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 42a87948e28c..b3cfc99f4d7e 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -29,6 +29,8 @@
#include <drm/drm_prime.h>
#include <drm/radeon_drm.h>
+#include <drm/ttm/ttm_tt.h>
+
#include "radeon.h"
#include "radeon_prime.h"
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 30402b5ce4c5..1e8e287e113c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -42,10 +42,10 @@
#include <drm/drm_file.h>
#include <drm/drm_prime.h>
#include <drm/radeon_drm.h>
-#include <drm/ttm/ttm_bo_api.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
+#include <drm/ttm/ttm_tt.h>
#include "radeon_reg.h"
#include "radeon.h"
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index b2bddbeca878..53c356aed5d5 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -25,6 +25,7 @@ config DRM_RCAR_CMM
config DRM_RCAR_DW_HDMI
tristate "R-Car Gen3 and RZ/G2 DU HDMI Encoder Support"
depends on DRM && OF
+ depends on DRM_RCAR_DU || COMPILE_TEST
select DRM_DW_HDMI
help
Enable support for R-Car Gen3 or RZ/G2 internal HDMI encoder.
@@ -32,6 +33,7 @@ config DRM_RCAR_DW_HDMI
config DRM_RCAR_USE_LVDS
bool "R-Car DU LVDS Encoder Support"
depends on DRM_BRIDGE && OF
+ depends on DRM_RCAR_DU || COMPILE_TEST
default DRM_RCAR_DU
help
Enable support for the R-Car Display Unit embedded LVDS encoders.
@@ -39,12 +41,15 @@ config DRM_RCAR_USE_LVDS
config DRM_RCAR_LVDS
def_tristate DRM_RCAR_DU
depends on DRM_RCAR_USE_LVDS
+ depends on PM
select DRM_KMS_HELPER
select DRM_PANEL
+ select RESET_CONTROLLER
config DRM_RCAR_USE_MIPI_DSI
bool "R-Car DU MIPI DSI Encoder Support"
depends on DRM_BRIDGE && OF
+ depends on DRM_RCAR_DU || COMPILE_TEST
default DRM_RCAR_DU
help
Enable support for the R-Car Display Unit embedded MIPI DSI encoders.
@@ -53,6 +58,7 @@ config DRM_RCAR_MIPI_DSI
def_tristate DRM_RCAR_DU
depends on DRM_RCAR_USE_MIPI_DSI
select DRM_MIPI_DSI
+ select RESET_CONTROLLER
config DRM_RZG2L_MIPI_DSI
tristate "RZ/G2L MIPI DSI Encoder Support"
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 3619e1ddeb62..008e172ed43b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -10,7 +10,6 @@
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -204,11 +203,6 @@ static void rcar_du_escr_divider(struct clk *clk, unsigned long target,
}
}
-static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
- { .soc_id = "r8a7795", .revision = "ES1.*" },
- { /* sentinel */ }
-};
-
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
@@ -238,7 +232,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
* no post-divider when a display PLL is present (as shown by
* the workaround breaking HDMI output on M3-W during testing).
*/
- if (soc_device_match(rcar_du_r8a7795_es1)) {
+ if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY) {
target *= 2;
div = 1;
}
@@ -251,13 +245,30 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
| DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
| DPLLCR_STBY;
- if (rcrtc->index == 1)
+ if (rcrtc->index == 1) {
dpllcr |= DPLLCR_PLCS1
| DPLLCR_INCS_DOTCLKIN1;
- else
- dpllcr |= DPLLCR_PLCS0
+ } else {
+ dpllcr |= DPLLCR_PLCS0_PLL
| DPLLCR_INCS_DOTCLKIN0;
+ /*
+ * On ES2.x we have a single mux controlled via bit 21,
+ * which selects between DCLKIN source (bit 21 = 0) and
+ * a PLL source (bit 21 = 1), where the PLL is always
+ * PLL1.
+ *
+ * On ES1.x we have an additional mux, controlled
+ * via bit 20, for choosing between PLL0 (bit 20 = 0)
+ * and PLL1 (bit 20 = 1). We always want to use PLL1,
+ * so on ES1.x, in addition to setting bit 21, we need
+ * to set the bit 20.
+ */
+
+ if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PLL)
+ dpllcr |= DPLLCR_PLCS0_H3ES1X_PLL1;
+ }
+
rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
escr = ESCR_DCLKSEL_DCLKIN | div;
@@ -287,10 +298,12 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
escr = params.escr;
}
- dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
+ if (rcdu->info->gen < 4) {
+ dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
- rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
- rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
+ rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
+ rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
+ }
/* Signal polarities */
dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index d003e8d9e7a2..b9a94c5260e9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/slab.h>
+#include <linux/sys_soc.h>
#include <linux/wait.h>
#include <drm/drm_atomic_helper.h>
@@ -386,6 +387,43 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
.dpll_mask = BIT(2) | BIT(1),
};
+static const struct rcar_du_device_info rcar_du_r8a7795_es1_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_INTERLACED
+ | RCAR_DU_FEATURE_TVM_SYNC,
+ .quirks = RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY
+ | RCAR_DU_QUIRK_H3_ES1_PLL,
+ .channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+ .routes = {
+ /*
+ * R8A7795 has one RGB output, two HDMI outputs and one
+ * LVDS output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(3),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_HDMI0] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_HDMI1] = {
+ .possible_crtcs = BIT(2),
+ .port = 2,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .port = 3,
+ },
+ },
+ .num_lvds = 1,
+ .num_rpf = 5,
+ .dpll_mask = BIT(2) | BIT(1),
+};
+
static const struct rcar_du_device_info rcar_du_r8a7796_info = {
.gen = 3,
.features = RCAR_DU_FEATURE_CRTC_IRQ
@@ -504,7 +542,7 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
};
static const struct rcar_du_device_info rcar_du_r8a779a0_info = {
- .gen = 3,
+ .gen = 4,
.features = RCAR_DU_FEATURE_CRTC_IRQ
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_NO_BLENDING,
@@ -524,6 +562,27 @@ static const struct rcar_du_device_info rcar_du_r8a779a0_info = {
.dsi_clk_mask = BIT(1) | BIT(0),
};
+static const struct rcar_du_device_info rcar_du_r8a779g0_info = {
+ .gen = 4,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_VSP1_SOURCE
+ | RCAR_DU_FEATURE_NO_BLENDING,
+ .channels_mask = BIT(1) | BIT(0),
+ .routes = {
+ /* R8A779G0 has two MIPI DSI outputs. */
+ [RCAR_DU_OUTPUT_DSI0] = {
+ .possible_crtcs = BIT(0),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_DSI1] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ },
+ .num_rpf = 5,
+ .dsi_clk_mask = BIT(1) | BIT(0),
+};
+
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
@@ -549,11 +608,17 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a77990", .data = &rcar_du_r8a7799x_info },
{ .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info },
{ .compatible = "renesas,du-r8a779a0", .data = &rcar_du_r8a779a0_info },
+ { .compatible = "renesas,du-r8a779g0", .data = &rcar_du_r8a779g0_info },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_du_of_table);
+static const struct soc_device_attribute rcar_du_soc_table[] = {
+ { .soc_id = "r8a7795", .revision = "ES1.*", .data = &rcar_du_r8a7795_es1_info },
+ { /* sentinel */ }
+};
+
const char *rcar_du_output_name(enum rcar_du_output output)
{
static const char * const names[] = {
@@ -599,7 +664,6 @@ static const struct drm_driver rcar_du_driver = {
* Power management
*/
-#ifdef CONFIG_PM_SLEEP
static int rcar_du_pm_suspend(struct device *dev)
{
struct rcar_du_device *rcdu = dev_get_drvdata(dev);
@@ -613,11 +677,9 @@ static int rcar_du_pm_resume(struct device *dev)
return drm_mode_config_helper_resume(&rcdu->ddev);
}
-#endif
-static const struct dev_pm_ops rcar_du_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(rcar_du_pm_suspend, rcar_du_pm_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(rcar_du_pm_ops,
+ rcar_du_pm_suspend, rcar_du_pm_resume);
/* -----------------------------------------------------------------------------
* Platform driver
@@ -645,6 +707,7 @@ static void rcar_du_shutdown(struct platform_device *pdev)
static int rcar_du_probe(struct platform_device *pdev)
{
+ const struct soc_device_attribute *soc_attr;
struct rcar_du_device *rcdu;
unsigned int mask;
int ret;
@@ -659,8 +722,13 @@ static int rcar_du_probe(struct platform_device *pdev)
return PTR_ERR(rcdu);
rcdu->dev = &pdev->dev;
+
rcdu->info = of_device_get_match_data(rcdu->dev);
+ soc_attr = soc_device_match(rcar_du_soc_table);
+ if (soc_attr)
+ rcdu->info = soc_attr->data;
+
platform_set_drvdata(pdev, rcdu);
/* I/O resources */
@@ -712,7 +780,7 @@ static struct platform_driver rcar_du_platform_driver = {
.shutdown = rcar_du_shutdown,
.driver = {
.name = "rcar-du",
- .pm = &rcar_du_pm_ops,
+ .pm = pm_sleep_ptr(&rcar_du_pm_ops),
.of_match_table = rcar_du_of_table,
},
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 5cfa2bb7ad93..acc3673fefe1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -34,6 +34,8 @@ struct rcar_du_device;
#define RCAR_DU_FEATURE_NO_BLENDING BIT(5) /* PnMR.SPIM does not have ALP nor EOR bits */
#define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */
+#define RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY BIT(1) /* H3 ES1 has pclk stability issue */
+#define RCAR_DU_QUIRK_H3_ES1_PLL BIT(2) /* H3 ES1 PLL setup differs from non-ES1 */
enum rcar_du_output {
RCAR_DU_OUTPUT_DPAD0,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 1fe8581577ed..152602236377 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -107,7 +107,7 @@ static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp)
*/
rcrtc = rcdu->crtcs;
num_crtcs = rcdu->num_crtcs;
- } else if (rcdu->info->gen == 3 && rgrp->num_crtcs > 1) {
+ } else if (rcdu->info->gen >= 3 && rgrp->num_crtcs > 1) {
/*
* On Gen3 dot clocks are setup through per-group registers,
* only available when the group has two channels.
@@ -148,19 +148,23 @@ static void rcar_du_group_setup(struct rcar_du_group *rgrp)
}
rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
- rcar_du_group_setup_pins(rgrp);
+ if (rcdu->info->gen < 4)
+ rcar_du_group_setup_pins(rgrp);
- /*
- * TODO: Handle routing of the DU output to CMM dynamically, as we
- * should bypass CMM completely when no color management feature is
- * used.
- */
- defr7 |= (rgrp->cmms_mask & BIT(1) ? DEFR7_CMME1 : 0) |
- (rgrp->cmms_mask & BIT(0) ? DEFR7_CMME0 : 0);
- rcar_du_group_write(rgrp, DEFR7, defr7);
+ if (rcdu->info->gen < 4) {
+ /*
+ * TODO: Handle routing of the DU output to CMM dynamically, as
+ * we should bypass CMM completely when no color management
+ * feature is used.
+ */
+ defr7 |= (rgrp->cmms_mask & BIT(1) ? DEFR7_CMME1 : 0) |
+ (rgrp->cmms_mask & BIT(0) ? DEFR7_CMME0 : 0);
+ rcar_du_group_write(rgrp, DEFR7, defr7);
+ }
if (rcdu->info->gen >= 2) {
- rcar_du_group_setup_defr8(rgrp);
+ if (rcdu->info->gen < 4)
+ rcar_du_group_setup_defr8(rgrp);
rcar_du_group_setup_didsr(rgrp);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 8c2719efda2a..adfb36b0e815 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -260,6 +260,24 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.planes = 1,
.hsub = 1,
}, {
+ .fourcc = DRM_FORMAT_RGBX1010102,
+ .v4l2 = V4L2_PIX_FMT_RGBX1010102,
+ .bpp = 32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_RGBA1010102,
+ .v4l2 = V4L2_PIX_FMT_RGBA1010102,
+ .bpp = 32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_ARGB2101010,
+ .v4l2 = V4L2_PIX_FMT_ARGB2101010,
+ .bpp = 32,
+ .planes = 1,
+ .hsub = 1,
+ }, {
.fourcc = DRM_FORMAT_YVYU,
.v4l2 = V4L2_PIX_FMT_YVYU,
.bpp = 16,
@@ -307,6 +325,18 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
.bpp = 24,
.planes = 3,
.hsub = 1,
+ }, {
+ .fourcc = DRM_FORMAT_Y210,
+ .v4l2 = V4L2_PIX_FMT_Y210,
+ .bpp = 32,
+ .planes = 1,
+ .hsub = 2,
+ }, {
+ .fourcc = DRM_FORMAT_Y212,
+ .v4l2 = V4L2_PIX_FMT_Y212,
+ .bpp = 32,
+ .planes = 1,
+ .hsub = 2,
},
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index c1bcb0e8b5b4..789ae9285108 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -283,12 +283,8 @@
#define DPLLCR 0x20044
#define DPLLCR_CODE (0x95 << 24)
#define DPLLCR_PLCS1 (1 << 23)
-/*
- * PLCS0 is bit 21, but H3 ES1.x requires bit 20 to be set as well. As bit 20
- * isn't implemented by other SoC in the Gen3 family it can safely be set
- * unconditionally.
- */
-#define DPLLCR_PLCS0 (3 << 20)
+#define DPLLCR_PLCS0_PLL (1 << 21)
+#define DPLLCR_PLCS0_H3ES1X_PLL1 (1 << 20)
#define DPLLCR_CLKE (1 << 18)
#define DPLLCR_FDPLL(n) ((n) << 12)
#define DPLLCR_N(n) ((n) << 5)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index e465aef41585..fe90be51d64e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -139,6 +139,43 @@ static const u32 rcar_du_vsp_formats[] = {
DRM_FORMAT_YVU444,
};
+/*
+ * Gen4 supports the same formats as above, and additionally 2-10-10-10 RGB
+ * formats and Y210 & Y212 formats.
+ */
+static const u32 rcar_du_vsp_formats_gen4[] = {
+ DRM_FORMAT_RGB332,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX1010102,
+ DRM_FORMAT_RGBA1010102,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU444,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+};
+
static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
{
struct rcar_du_vsp_plane_state *state =
@@ -436,14 +473,23 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
? DRM_PLANE_TYPE_PRIMARY
: DRM_PLANE_TYPE_OVERLAY;
struct rcar_du_vsp_plane *plane = &vsp->planes[i];
+ unsigned int num_formats;
+ const u32 *formats;
+
+ if (rcdu->info->gen < 4) {
+ num_formats = ARRAY_SIZE(rcar_du_vsp_formats);
+ formats = rcar_du_vsp_formats;
+ } else {
+ num_formats = ARRAY_SIZE(rcar_du_vsp_formats_gen4);
+ formats = rcar_du_vsp_formats_gen4;
+ }
plane->vsp = vsp;
plane->index = i;
ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane,
crtcs, &rcar_du_vsp_plane_funcs,
- rcar_du_vsp_formats,
- ARRAY_SIZE(rcar_du_vsp_formats),
+ formats, num_formats,
NULL, type, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 81a060c2fe3f..260ea5d8624e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -16,6 +16,8 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@@ -60,6 +62,7 @@ struct rcar_lvds_device_info {
struct rcar_lvds {
struct device *dev;
const struct rcar_lvds_device_info *info;
+ struct reset_control *rstc;
struct drm_bridge bridge;
@@ -80,6 +83,11 @@ struct rcar_lvds {
#define bridge_to_rcar_lvds(b) \
container_of(b, struct rcar_lvds, bridge)
+static u32 rcar_lvds_read(struct rcar_lvds *lvds, u32 reg)
+{
+ return ioread32(lvds->mmio + reg);
+}
+
static void rcar_lvds_write(struct rcar_lvds *lvds, u32 reg, u32 data)
{
iowrite32(data, lvds->mmio + reg);
@@ -316,8 +324,8 @@ int rcar_lvds_pclk_enable(struct drm_bridge *bridge, unsigned long freq)
dev_dbg(lvds->dev, "enabling LVDS PLL, freq=%luHz\n", freq);
- ret = clk_prepare_enable(lvds->clocks.mod);
- if (ret < 0)
+ ret = pm_runtime_resume_and_get(lvds->dev);
+ if (ret)
return ret;
__rcar_lvds_pll_setup_d3_e3(lvds, freq, true);
@@ -337,7 +345,7 @@ void rcar_lvds_pclk_disable(struct drm_bridge *bridge)
rcar_lvds_write(lvds, LVDPLLCR, 0);
- clk_disable_unprepare(lvds->clocks.mod);
+ pm_runtime_put_sync(lvds->dev);
}
EXPORT_SYMBOL_GPL(rcar_lvds_pclk_disable);
@@ -396,8 +404,8 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
u32 lvdcr0;
int ret;
- ret = clk_prepare_enable(lvds->clocks.mod);
- if (ret < 0)
+ ret = pm_runtime_resume_and_get(lvds->dev);
+ if (ret)
return;
/* Enable the companion LVDS encoder in dual-link mode. */
@@ -541,6 +549,32 @@ static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_bridge_state)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+ u32 lvdcr0;
+
+ /*
+ * Clear the LVDCR0 bits in the order specified by the hardware
+ * documentation, ending with a write of 0 to the full register to
+ * clear all remaining bits.
+ */
+ lvdcr0 = rcar_lvds_read(lvds, LVDCR0);
+
+ lvdcr0 &= ~LVDCR0_LVRES;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN3_LVEN) {
+ lvdcr0 &= ~LVDCR0_LVEN;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ }
+
+ if (lvds->info->quirks & RCAR_LVDS_QUIRK_PWD) {
+ lvdcr0 &= ~LVDCR0_PWD;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ }
+
+ if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
+ lvdcr0 &= ~LVDCR0_PLLON;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ }
rcar_lvds_write(lvds, LVDCR0, 0);
rcar_lvds_write(lvds, LVDCR1, 0);
@@ -551,7 +585,7 @@ static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
lvds->companion->funcs->atomic_disable(lvds->companion,
old_bridge_state);
- clk_disable_unprepare(lvds->clocks.mod);
+ pm_runtime_put_sync(lvds->dev);
}
static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
@@ -844,6 +878,13 @@ static int rcar_lvds_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ lvds->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(lvds->rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(lvds->rstc),
+ "failed to get cpg reset\n");
+
+ pm_runtime_enable(&pdev->dev);
+
drm_bridge_add(&lvds->bridge);
return 0;
@@ -855,6 +896,8 @@ static int rcar_lvds_remove(struct platform_device *pdev)
drm_bridge_remove(&lvds->bridge);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
@@ -913,11 +956,48 @@ static const struct of_device_id rcar_lvds_of_table[] = {
MODULE_DEVICE_TABLE(of, rcar_lvds_of_table);
+static int rcar_lvds_runtime_suspend(struct device *dev)
+{
+ struct rcar_lvds *lvds = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(lvds->clocks.mod);
+
+ reset_control_assert(lvds->rstc);
+
+ return 0;
+}
+
+static int rcar_lvds_runtime_resume(struct device *dev)
+{
+ struct rcar_lvds *lvds = dev_get_drvdata(dev);
+ int ret;
+
+ ret = reset_control_deassert(lvds->rstc);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(lvds->clocks.mod);
+ if (ret < 0)
+ goto err_reset_assert;
+
+ return 0;
+
+err_reset_assert:
+ reset_control_assert(lvds->rstc);
+
+ return ret;
+}
+
+static const struct dev_pm_ops rcar_lvds_pm_ops = {
+ SET_RUNTIME_PM_OPS(rcar_lvds_runtime_suspend, rcar_lvds_runtime_resume, NULL)
+};
+
static struct platform_driver rcar_lvds_platform_driver = {
.probe = rcar_lvds_probe,
.remove = rcar_lvds_remove,
.driver = {
.name = "rcar-lvds",
+ .pm = &rcar_lvds_pm_ops,
.of_match_table = rcar_lvds_of_table,
},
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
index a7f2b7f66a17..e10e4d4b89a2 100644
--- a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/math64.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -28,6 +29,31 @@
#include "rcar_mipi_dsi.h"
#include "rcar_mipi_dsi_regs.h"
+#define MHZ(v) ((u32)((v) * 1000000U))
+
+enum rcar_mipi_dsi_hw_model {
+ RCAR_DSI_V3U,
+ RCAR_DSI_V4H,
+};
+
+struct rcar_mipi_dsi_device_info {
+ enum rcar_mipi_dsi_hw_model model;
+
+ const struct dsi_clk_config *clk_cfg;
+
+ u8 clockset2_m_offset;
+
+ u8 n_min;
+ u8 n_max;
+ u8 n_mul;
+ unsigned long fpfd_min;
+ unsigned long fpfd_max;
+ u16 m_min;
+ u16 m_max;
+ unsigned long fout_min;
+ unsigned long fout_max;
+};
+
struct rcar_mipi_dsi {
struct device *dev;
const struct rcar_mipi_dsi_device_info *info;
@@ -50,6 +76,17 @@ struct rcar_mipi_dsi {
unsigned int lanes;
};
+struct dsi_setup_info {
+ unsigned long hsfreq;
+ u16 hsfreqrange;
+
+ unsigned long fout;
+ u16 m;
+ u16 n;
+ u16 vclk_divider;
+ const struct dsi_clk_config *clkset;
+};
+
static inline struct rcar_mipi_dsi *
bridge_to_rcar_mipi_dsi(struct drm_bridge *bridge)
{
@@ -62,65 +99,78 @@ host_to_rcar_mipi_dsi(struct mipi_dsi_host *host)
return container_of(host, struct rcar_mipi_dsi, host);
}
-static const u32 phtw[] = {
- 0x01020114, 0x01600115, /* General testing */
- 0x01030116, 0x0102011d, /* General testing */
- 0x011101a4, 0x018601a4, /* 1Gbps testing */
- 0x014201a0, 0x010001a3, /* 1Gbps testing */
- 0x0101011f, /* 1Gbps testing */
-};
-
-static const u32 phtw2[] = {
- 0x010c0130, 0x010c0140, /* General testing */
- 0x010c0150, 0x010c0180, /* General testing */
- 0x010c0190,
- 0x010a0160, 0x010a0170,
- 0x01800164, 0x01800174, /* 1Gbps testing */
-};
-
static const u32 hsfreqrange_table[][2] = {
- { 80000000U, 0x00 }, { 90000000U, 0x10 }, { 100000000U, 0x20 },
- { 110000000U, 0x30 }, { 120000000U, 0x01 }, { 130000000U, 0x11 },
- { 140000000U, 0x21 }, { 150000000U, 0x31 }, { 160000000U, 0x02 },
- { 170000000U, 0x12 }, { 180000000U, 0x22 }, { 190000000U, 0x32 },
- { 205000000U, 0x03 }, { 220000000U, 0x13 }, { 235000000U, 0x23 },
- { 250000000U, 0x33 }, { 275000000U, 0x04 }, { 300000000U, 0x14 },
- { 325000000U, 0x25 }, { 350000000U, 0x35 }, { 400000000U, 0x05 },
- { 450000000U, 0x16 }, { 500000000U, 0x26 }, { 550000000U, 0x37 },
- { 600000000U, 0x07 }, { 650000000U, 0x18 }, { 700000000U, 0x28 },
- { 750000000U, 0x39 }, { 800000000U, 0x09 }, { 850000000U, 0x19 },
- { 900000000U, 0x29 }, { 950000000U, 0x3a }, { 1000000000U, 0x0a },
- { 1050000000U, 0x1a }, { 1100000000U, 0x2a }, { 1150000000U, 0x3b },
- { 1200000000U, 0x0b }, { 1250000000U, 0x1b }, { 1300000000U, 0x2b },
- { 1350000000U, 0x3c }, { 1400000000U, 0x0c }, { 1450000000U, 0x1c },
- { 1500000000U, 0x2c }, { 1550000000U, 0x3d }, { 1600000000U, 0x0d },
- { 1650000000U, 0x1d }, { 1700000000U, 0x2e }, { 1750000000U, 0x3e },
- { 1800000000U, 0x0e }, { 1850000000U, 0x1e }, { 1900000000U, 0x2f },
- { 1950000000U, 0x3f }, { 2000000000U, 0x0f }, { 2050000000U, 0x40 },
- { 2100000000U, 0x41 }, { 2150000000U, 0x42 }, { 2200000000U, 0x43 },
- { 2250000000U, 0x44 }, { 2300000000U, 0x45 }, { 2350000000U, 0x46 },
- { 2400000000U, 0x47 }, { 2450000000U, 0x48 }, { 2500000000U, 0x49 },
+ { MHZ(80), 0x00 }, { MHZ(90), 0x10 }, { MHZ(100), 0x20 },
+ { MHZ(110), 0x30 }, { MHZ(120), 0x01 }, { MHZ(130), 0x11 },
+ { MHZ(140), 0x21 }, { MHZ(150), 0x31 }, { MHZ(160), 0x02 },
+ { MHZ(170), 0x12 }, { MHZ(180), 0x22 }, { MHZ(190), 0x32 },
+ { MHZ(205), 0x03 }, { MHZ(220), 0x13 }, { MHZ(235), 0x23 },
+ { MHZ(250), 0x33 }, { MHZ(275), 0x04 }, { MHZ(300), 0x14 },
+ { MHZ(325), 0x25 }, { MHZ(350), 0x35 }, { MHZ(400), 0x05 },
+ { MHZ(450), 0x16 }, { MHZ(500), 0x26 }, { MHZ(550), 0x37 },
+ { MHZ(600), 0x07 }, { MHZ(650), 0x18 }, { MHZ(700), 0x28 },
+ { MHZ(750), 0x39 }, { MHZ(800), 0x09 }, { MHZ(850), 0x19 },
+ { MHZ(900), 0x29 }, { MHZ(950), 0x3a }, { MHZ(1000), 0x0a },
+ { MHZ(1050), 0x1a }, { MHZ(1100), 0x2a }, { MHZ(1150), 0x3b },
+ { MHZ(1200), 0x0b }, { MHZ(1250), 0x1b }, { MHZ(1300), 0x2b },
+ { MHZ(1350), 0x3c }, { MHZ(1400), 0x0c }, { MHZ(1450), 0x1c },
+ { MHZ(1500), 0x2c }, { MHZ(1550), 0x3d }, { MHZ(1600), 0x0d },
+ { MHZ(1650), 0x1d }, { MHZ(1700), 0x2e }, { MHZ(1750), 0x3e },
+ { MHZ(1800), 0x0e }, { MHZ(1850), 0x1e }, { MHZ(1900), 0x2f },
+ { MHZ(1950), 0x3f }, { MHZ(2000), 0x0f }, { MHZ(2050), 0x40 },
+ { MHZ(2100), 0x41 }, { MHZ(2150), 0x42 }, { MHZ(2200), 0x43 },
+ { MHZ(2250), 0x44 }, { MHZ(2300), 0x45 }, { MHZ(2350), 0x46 },
+ { MHZ(2400), 0x47 }, { MHZ(2450), 0x48 }, { MHZ(2500), 0x49 },
{ /* sentinel */ },
};
-struct vco_cntrl_value {
+struct dsi_clk_config {
u32 min_freq;
u32 max_freq;
- u16 value;
+ u8 vco_cntrl;
+ u8 cpbias_cntrl;
+ u8 gmp_cntrl;
+ u8 int_cntrl;
+ u8 prop_cntrl;
};
-static const struct vco_cntrl_value vco_cntrl_table[] = {
- { .min_freq = 40000000U, .max_freq = 55000000U, .value = 0x3f },
- { .min_freq = 52500000U, .max_freq = 80000000U, .value = 0x39 },
- { .min_freq = 80000000U, .max_freq = 110000000U, .value = 0x2f },
- { .min_freq = 105000000U, .max_freq = 160000000U, .value = 0x29 },
- { .min_freq = 160000000U, .max_freq = 220000000U, .value = 0x1f },
- { .min_freq = 210000000U, .max_freq = 320000000U, .value = 0x19 },
- { .min_freq = 320000000U, .max_freq = 440000000U, .value = 0x0f },
- { .min_freq = 420000000U, .max_freq = 660000000U, .value = 0x09 },
- { .min_freq = 630000000U, .max_freq = 1149000000U, .value = 0x03 },
- { .min_freq = 1100000000U, .max_freq = 1152000000U, .value = 0x01 },
- { .min_freq = 1150000000U, .max_freq = 1250000000U, .value = 0x01 },
+static const struct dsi_clk_config dsi_clk_cfg_v3u[] = {
+ { MHZ(40), MHZ(55), 0x3f, 0x10, 0x01, 0x00, 0x0b },
+ { MHZ(52.5), MHZ(80), 0x39, 0x10, 0x01, 0x00, 0x0b },
+ { MHZ(80), MHZ(110), 0x2f, 0x10, 0x01, 0x00, 0x0b },
+ { MHZ(105), MHZ(160), 0x29, 0x10, 0x01, 0x00, 0x0b },
+ { MHZ(160), MHZ(220), 0x1f, 0x10, 0x01, 0x00, 0x0b },
+ { MHZ(210), MHZ(320), 0x19, 0x10, 0x01, 0x00, 0x0b },
+ { MHZ(320), MHZ(440), 0x0f, 0x10, 0x01, 0x00, 0x0b },
+ { MHZ(420), MHZ(660), 0x09, 0x10, 0x01, 0x00, 0x0b },
+ { MHZ(630), MHZ(1149), 0x03, 0x10, 0x01, 0x00, 0x0b },
+ { MHZ(1100), MHZ(1152), 0x01, 0x10, 0x01, 0x00, 0x0b },
+ { MHZ(1150), MHZ(1250), 0x01, 0x10, 0x01, 0x00, 0x0c },
+ { /* sentinel */ },
+};
+
+static const struct dsi_clk_config dsi_clk_cfg_v4h[] = {
+ { MHZ(40), MHZ(45.31), 0x2b, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(45.31), MHZ(54.66), 0x28, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(54.66), MHZ(62.5), 0x28, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(62.5), MHZ(75), 0x27, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(75), MHZ(90.63), 0x23, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(90.63), MHZ(109.37), 0x20, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(109.37), MHZ(125), 0x20, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(125), MHZ(150), 0x1f, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(150), MHZ(181.25), 0x1b, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(181.25), MHZ(218.75), 0x18, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(218.75), MHZ(250), 0x18, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(250), MHZ(300), 0x17, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(300), MHZ(362.5), 0x13, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(362.5), MHZ(455.48), 0x10, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(455.48), MHZ(500), 0x10, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(500), MHZ(600), 0x0f, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(600), MHZ(725), 0x0b, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(725), MHZ(875), 0x08, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(875), MHZ(1000), 0x08, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(1000), MHZ(1200), 0x07, 0x00, 0x00, 0x08, 0x0a },
+ { MHZ(1200), MHZ(1250), 0x03, 0x00, 0x00, 0x08, 0x0a },
{ /* sentinel */ },
};
@@ -144,7 +194,7 @@ static void rcar_mipi_dsi_set(struct rcar_mipi_dsi *dsi, u32 reg, u32 set)
rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) | set);
}
-static int rcar_mipi_dsi_phtw_test(struct rcar_mipi_dsi *dsi, u32 phtw)
+static int rcar_mipi_dsi_write_phtw(struct rcar_mipi_dsi *dsi, u32 phtw)
{
u32 status;
int ret;
@@ -163,32 +213,181 @@ static int rcar_mipi_dsi_phtw_test(struct rcar_mipi_dsi *dsi, u32 phtw)
return ret;
}
+static int rcar_mipi_dsi_write_phtw_arr(struct rcar_mipi_dsi *dsi,
+ const u32 *phtw, unsigned int size)
+{
+ for (unsigned int i = 0; i < size; i++) {
+ int ret = rcar_mipi_dsi_write_phtw(dsi, phtw[i]);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+#define WRITE_PHTW(...) \
+ ({ \
+ static const u32 phtw[] = { __VA_ARGS__ }; \
+ int ret; \
+ ret = rcar_mipi_dsi_write_phtw_arr(dsi, phtw, \
+ ARRAY_SIZE(phtw)); \
+ ret; \
+ })
+
+static int rcar_mipi_dsi_init_phtw_v3u(struct rcar_mipi_dsi *dsi)
+{
+ return WRITE_PHTW(0x01020114, 0x01600115, 0x01030116, 0x0102011d,
+ 0x011101a4, 0x018601a4, 0x014201a0, 0x010001a3,
+ 0x0101011f);
+}
+
+static int rcar_mipi_dsi_post_init_phtw_v3u(struct rcar_mipi_dsi *dsi)
+{
+ return WRITE_PHTW(0x010c0130, 0x010c0140, 0x010c0150, 0x010c0180,
+ 0x010c0190, 0x010a0160, 0x010a0170, 0x01800164,
+ 0x01800174);
+}
+
+static int rcar_mipi_dsi_init_phtw_v4h(struct rcar_mipi_dsi *dsi,
+ const struct dsi_setup_info *setup_info)
+{
+ int ret;
+
+ if (setup_info->hsfreq < MHZ(450)) {
+ ret = WRITE_PHTW(0x01010100, 0x011b01ac);
+ if (ret)
+ return ret;
+ }
+
+ ret = WRITE_PHTW(0x01010100, 0x01030173, 0x01000174, 0x01500175,
+ 0x01030176, 0x01040166, 0x010201ad);
+ if (ret)
+ return ret;
+
+ if (setup_info->hsfreq <= MHZ(1000))
+ ret = WRITE_PHTW(0x01020100, 0x01910170, 0x01020171,
+ 0x01110172);
+ else if (setup_info->hsfreq <= MHZ(1500))
+ ret = WRITE_PHTW(0x01020100, 0x01980170, 0x01030171,
+ 0x01100172);
+ else if (setup_info->hsfreq <= MHZ(2500))
+ ret = WRITE_PHTW(0x01020100, 0x0144016b, 0x01000172);
+ else
+ return -EINVAL;
+
+ if (ret)
+ return ret;
+
+ if (dsi->lanes <= 1) {
+ ret = WRITE_PHTW(0x01070100, 0x010e010b);
+ if (ret)
+ return ret;
+ }
+
+ if (dsi->lanes <= 2) {
+ ret = WRITE_PHTW(0x01090100, 0x010e010b);
+ if (ret)
+ return ret;
+ }
+
+ if (dsi->lanes <= 3) {
+ ret = WRITE_PHTW(0x010b0100, 0x010e010b);
+ if (ret)
+ return ret;
+ }
+
+ if (setup_info->hsfreq <= MHZ(1500)) {
+ ret = WRITE_PHTW(0x01010100, 0x01c0016e);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rcar_mipi_dsi_post_init_phtw_v4h(struct rcar_mipi_dsi *dsi,
+ const struct dsi_setup_info *setup_info)
+{
+ u32 status;
+ int ret;
+
+ if (setup_info->hsfreq <= MHZ(1500)) {
+ WRITE_PHTW(0x01020100, 0x00000180);
+
+ ret = read_poll_timeout(rcar_mipi_dsi_read, status,
+ status & PHTR_TEST, 2000, 10000, false,
+ dsi, PHTR);
+ if (ret < 0) {
+ dev_err(dsi->dev, "failed to test PHTR\n");
+ return ret;
+ }
+
+ WRITE_PHTW(0x01010100, 0x0100016e);
+ }
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* Hardware Setup
*/
-struct dsi_setup_info {
- unsigned long fout;
- u16 vco_cntrl;
- u16 prop_cntrl;
- u16 hsfreqrange;
- u16 div;
- unsigned int m;
- unsigned int n;
-};
+static void rcar_mipi_dsi_pll_calc(struct rcar_mipi_dsi *dsi,
+ unsigned long fin_rate,
+ unsigned long fout_target,
+ struct dsi_setup_info *setup_info)
+{
+ unsigned int best_err = -1;
+ const struct rcar_mipi_dsi_device_info *info = dsi->info;
+
+ for (unsigned int n = info->n_min; n <= info->n_max; n++) {
+ unsigned long fpfd;
+
+ fpfd = fin_rate / n;
+
+ if (fpfd < info->fpfd_min || fpfd > info->fpfd_max)
+ continue;
+
+ for (unsigned int m = info->m_min; m <= info->m_max; m++) {
+ unsigned int err;
+ u64 fout;
+
+ fout = div64_u64((u64)fpfd * m, dsi->info->n_mul);
+
+ if (fout < info->fout_min || fout > info->fout_max)
+ continue;
+
+ fout = div64_u64(fout, setup_info->vclk_divider);
+
+ if (fout < setup_info->clkset->min_freq ||
+ fout > setup_info->clkset->max_freq)
+ continue;
+
+ err = abs((long)(fout - fout_target) * 10000 /
+ (long)fout_target);
+ if (err < best_err) {
+ setup_info->m = m;
+ setup_info->n = n;
+ setup_info->fout = (unsigned long)fout;
+ best_err = err;
+
+ if (err == 0)
+ return;
+ }
+ }
+ }
+}
static void rcar_mipi_dsi_parameters_calc(struct rcar_mipi_dsi *dsi,
struct clk *clk, unsigned long target,
struct dsi_setup_info *setup_info)
{
- const struct vco_cntrl_value *vco_cntrl;
+ const struct dsi_clk_config *clk_cfg;
unsigned long fout_target;
- unsigned long fin, fout;
- unsigned long hsfreq;
- unsigned int best_err = -1;
- unsigned int divider;
- unsigned int n;
+ unsigned long fin_rate;
unsigned int i;
unsigned int err;
@@ -198,70 +397,53 @@ static void rcar_mipi_dsi_parameters_calc(struct rcar_mipi_dsi *dsi,
*/
fout_target = target * mipi_dsi_pixel_format_to_bpp(dsi->format)
/ (2 * dsi->lanes);
- if (fout_target < 40000000 || fout_target > 1250000000)
+ if (fout_target < MHZ(40) || fout_target > MHZ(1250))
return;
- /* Find vco_cntrl */
- for (vco_cntrl = vco_cntrl_table; vco_cntrl->min_freq != 0; vco_cntrl++) {
- if (fout_target > vco_cntrl->min_freq &&
- fout_target <= vco_cntrl->max_freq) {
- setup_info->vco_cntrl = vco_cntrl->value;
- if (fout_target >= 1150000000)
- setup_info->prop_cntrl = 0x0c;
- else
- setup_info->prop_cntrl = 0x0b;
+ /* Find PLL settings */
+ for (clk_cfg = dsi->info->clk_cfg; clk_cfg->min_freq != 0; clk_cfg++) {
+ if (fout_target > clk_cfg->min_freq &&
+ fout_target <= clk_cfg->max_freq) {
+ setup_info->clkset = clk_cfg;
break;
}
}
- /* Add divider */
- setup_info->div = (setup_info->vco_cntrl & 0x30) >> 4;
+ fin_rate = clk_get_rate(clk);
+
+ switch (dsi->info->model) {
+ case RCAR_DSI_V3U:
+ default:
+ setup_info->vclk_divider = 1 << ((clk_cfg->vco_cntrl >> 4) & 0x3);
+ break;
+
+ case RCAR_DSI_V4H:
+ setup_info->vclk_divider = 1 << (((clk_cfg->vco_cntrl >> 3) & 0x7) + 1);
+ break;
+ }
+
+ rcar_mipi_dsi_pll_calc(dsi, fin_rate, fout_target, setup_info);
/* Find hsfreqrange */
- hsfreq = fout_target * 2;
+ setup_info->hsfreq = setup_info->fout * 2;
for (i = 0; i < ARRAY_SIZE(hsfreqrange_table); i++) {
- if (hsfreqrange_table[i][0] >= hsfreq) {
+ if (hsfreqrange_table[i][0] >= setup_info->hsfreq) {
setup_info->hsfreqrange = hsfreqrange_table[i][1];
break;
}
}
- /*
- * Calculate n and m for PLL clock
- * Following the HW manual the ranges of n and m are
- * n = [3-8] and m = [64-625]
- */
- fin = clk_get_rate(clk);
- divider = 1 << setup_info->div;
- for (n = 3; n < 9; n++) {
- unsigned long fpfd;
- unsigned int m;
-
- fpfd = fin / n;
-
- for (m = 64; m < 626; m++) {
- fout = fpfd * m / divider;
- err = abs((long)(fout - fout_target) * 10000 /
- (long)fout_target);
- if (err < best_err) {
- setup_info->m = m - 2;
- setup_info->n = n - 1;
- setup_info->fout = fout;
- best_err = err;
- if (err == 0)
- goto done;
- }
- }
- }
+ err = abs((long)(setup_info->fout - fout_target) * 10000 / (long)fout_target);
-done:
dev_dbg(dsi->dev,
- "%pC %lu Hz -> Fout %lu Hz (target %lu Hz, error %d.%02u%%), PLL M/N/DIV %u/%u/%u\n",
- clk, fin, setup_info->fout, fout_target, best_err / 100,
- best_err % 100, setup_info->m, setup_info->n, setup_info->div);
+ "Fout = %u * %lu / (%u * %u * %u) = %lu (target %lu Hz, error %d.%02u%%)\n",
+ setup_info->m, fin_rate, dsi->info->n_mul, setup_info->n,
+ setup_info->vclk_divider, setup_info->fout, fout_target,
+ err / 100, err % 100);
+
dev_dbg(dsi->dev,
"vco_cntrl = 0x%x\tprop_cntrl = 0x%x\thsfreqrange = 0x%x\n",
- setup_info->vco_cntrl, setup_info->prop_cntrl,
+ clk_cfg->vco_cntrl, clk_cfg->prop_cntrl,
setup_info->hsfreqrange);
}
@@ -324,7 +506,7 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
{
struct dsi_setup_info setup_info = {};
unsigned int timeout;
- int ret, i;
+ int ret;
int dsi_format;
u32 phy_setup;
u32 clockset2, clockset3;
@@ -360,10 +542,19 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
phy_setup |= PHYSETUP_HSFREQRANGE(setup_info.hsfreqrange);
rcar_mipi_dsi_write(dsi, PHYSETUP, phy_setup);
- for (i = 0; i < ARRAY_SIZE(phtw); i++) {
- ret = rcar_mipi_dsi_phtw_test(dsi, phtw[i]);
+ switch (dsi->info->model) {
+ case RCAR_DSI_V3U:
+ default:
+ ret = rcar_mipi_dsi_init_phtw_v3u(dsi);
if (ret < 0)
return ret;
+ break;
+
+ case RCAR_DSI_V4H:
+ ret = rcar_mipi_dsi_init_phtw_v4h(dsi, &setup_info);
+ if (ret < 0)
+ return ret;
+ break;
}
/* PLL Clock Setting */
@@ -371,12 +562,13 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR);
rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR);
- clockset2 = CLOCKSET2_M(setup_info.m) | CLOCKSET2_N(setup_info.n)
- | CLOCKSET2_VCO_CNTRL(setup_info.vco_cntrl);
- clockset3 = CLOCKSET3_PROP_CNTRL(setup_info.prop_cntrl)
- | CLOCKSET3_INT_CNTRL(0)
- | CLOCKSET3_CPBIAS_CNTRL(0x10)
- | CLOCKSET3_GMP_CNTRL(1);
+ clockset2 = CLOCKSET2_M(setup_info.m - dsi->info->clockset2_m_offset)
+ | CLOCKSET2_N(setup_info.n - 1)
+ | CLOCKSET2_VCO_CNTRL(setup_info.clkset->vco_cntrl);
+ clockset3 = CLOCKSET3_PROP_CNTRL(setup_info.clkset->prop_cntrl)
+ | CLOCKSET3_INT_CNTRL(setup_info.clkset->int_cntrl)
+ | CLOCKSET3_CPBIAS_CNTRL(setup_info.clkset->cpbias_cntrl)
+ | CLOCKSET3_GMP_CNTRL(setup_info.clkset->gmp_cntrl);
rcar_mipi_dsi_write(dsi, CLOCKSET2, clockset2);
rcar_mipi_dsi_write(dsi, CLOCKSET3, clockset3);
@@ -407,10 +599,19 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
return -ETIMEDOUT;
}
- for (i = 0; i < ARRAY_SIZE(phtw2); i++) {
- ret = rcar_mipi_dsi_phtw_test(dsi, phtw2[i]);
+ switch (dsi->info->model) {
+ case RCAR_DSI_V3U:
+ default:
+ ret = rcar_mipi_dsi_post_init_phtw_v3u(dsi);
+ if (ret < 0)
+ return ret;
+ break;
+
+ case RCAR_DSI_V4H:
+ ret = rcar_mipi_dsi_post_init_phtw_v4h(dsi, &setup_info);
if (ret < 0)
return ret;
+ break;
}
/* Enable DOT clock */
@@ -427,8 +628,19 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi,
dev_warn(dsi->dev, "unsupported format");
return -EINVAL;
}
- vclkset |= VCLKSET_COLOR_RGB | VCLKSET_DIV(setup_info.div)
- | VCLKSET_LANE(dsi->lanes - 1);
+
+ vclkset |= VCLKSET_COLOR_RGB | VCLKSET_LANE(dsi->lanes - 1);
+
+ switch (dsi->info->model) {
+ case RCAR_DSI_V3U:
+ default:
+ vclkset |= VCLKSET_DIV_V3U(__ffs(setup_info.vclk_divider));
+ break;
+
+ case RCAR_DSI_V4H:
+ vclkset |= VCLKSET_DIV_V4H(__ffs(setup_info.vclk_divider) - 1);
+ break;
+ }
rcar_mipi_dsi_write(dsi, VCLKSET, vclkset);
@@ -841,8 +1053,39 @@ static int rcar_mipi_dsi_remove(struct platform_device *pdev)
return 0;
}
+static const struct rcar_mipi_dsi_device_info v3u_data = {
+ .model = RCAR_DSI_V3U,
+ .clk_cfg = dsi_clk_cfg_v3u,
+ .clockset2_m_offset = 2,
+ .n_min = 3,
+ .n_max = 8,
+ .n_mul = 1,
+ .fpfd_min = MHZ(2),
+ .fpfd_max = MHZ(8),
+ .m_min = 64,
+ .m_max = 625,
+ .fout_min = MHZ(320),
+ .fout_max = MHZ(1250),
+};
+
+static const struct rcar_mipi_dsi_device_info v4h_data = {
+ .model = RCAR_DSI_V4H,
+ .clk_cfg = dsi_clk_cfg_v4h,
+ .clockset2_m_offset = 0,
+ .n_min = 1,
+ .n_max = 8,
+ .n_mul = 2,
+ .fpfd_min = MHZ(8),
+ .fpfd_max = MHZ(24),
+ .m_min = 167,
+ .m_max = 1000,
+ .fout_min = MHZ(2000),
+ .fout_max = MHZ(4000),
+};
+
static const struct of_device_id rcar_mipi_dsi_of_table[] = {
- { .compatible = "renesas,r8a779a0-dsi-csi2-tx" },
+ { .compatible = "renesas,r8a779a0-dsi-csi2-tx", .data = &v3u_data },
+ { .compatible = "renesas,r8a779g0-dsi-csi2-tx", .data = &v4h_data },
{ }
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h
index 2eaca54636f3..f8114d11f2d1 100644
--- a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h
@@ -122,7 +122,8 @@
#define VCLKSET_CKEN (1 << 16)
#define VCLKSET_COLOR_RGB (0 << 8)
#define VCLKSET_COLOR_YCC (1 << 8)
-#define VCLKSET_DIV(x) (((x) & 0x3) << 4)
+#define VCLKSET_DIV_V3U(x) (((x) & 0x3) << 4)
+#define VCLKSET_DIV_V4H(x) (((x) & 0x7) << 4)
#define VCLKSET_BPP_16 (0 << 2)
#define VCLKSET_BPP_18 (1 << 2)
#define VCLKSET_BPP_18L (2 << 2)
@@ -166,6 +167,9 @@
#define PHTW_CWEN (1 << 8)
#define PHTW_TESTDIN_CODE(x) (((x) & 0xff) << 0)
+#define PHTR 0x1038
+#define PHTR_TEST (1 << 16)
+
#define PHTC 0x103c
#define PHTC_TESTCLR (1 << 0)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 6edb7c52cb3d..8ea09d915c3c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -251,8 +251,7 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
* We allocated a struct page table for rk_obj, so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_flags &= ~VM_PFNMAP;
+ vm_flags_mod(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP, VM_PFNMAP);
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index 8cecf81a5ae0..ba3b81789509 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -25,7 +25,6 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_framebuffer.h>
diff --git a/drivers/gpu/drm/savage/Makefile b/drivers/gpu/drm/savage/Makefile
deleted file mode 100644
index 3e520763d259..000000000000
--- a/drivers/gpu/drm/savage/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-savage-y := savage_drv.o savage_bci.o savage_state.o
-
-obj-$(CONFIG_DRM_SAVAGE)+= savage.o
-
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
deleted file mode 100644
index e33385dfe3ed..000000000000
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ /dev/null
@@ -1,1082 +0,0 @@
-/* savage_bci.c -- BCI support for Savage
- *
- * Copyright 2004 Felix Kuehling
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-#include <drm/savage_drm.h>
-
-#include "savage_drv.h"
-
-/* Need a long timeout for shadow status updates can take a while
- * and so can waiting for events when the queue is full. */
-#define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
-#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
-#define SAVAGE_FREELIST_DEBUG 0
-
-static int savage_do_cleanup_bci(struct drm_device *dev);
-
-static int
-savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
-{
- uint32_t mask = dev_priv->status_used_mask;
- uint32_t threshold = dev_priv->bci_threshold_hi;
- uint32_t status;
- int i;
-
-#if SAVAGE_BCI_DEBUG
- if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
- DRM_ERROR("Trying to emit %d words "
- "(more than guaranteed space in COB)\n", n);
-#endif
-
- for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
- mb();
- status = dev_priv->status_ptr[0];
- if ((status & mask) < threshold)
- return 0;
- udelay(1);
- }
-
-#if SAVAGE_BCI_DEBUG
- DRM_ERROR("failed!\n");
- DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
-#endif
- return -EBUSY;
-}
-
-static int
-savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
-{
- uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
- uint32_t status;
- int i;
-
- for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
- status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
- if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
- return 0;
- udelay(1);
- }
-
-#if SAVAGE_BCI_DEBUG
- DRM_ERROR("failed!\n");
- DRM_INFO(" status=0x%08x\n", status);
-#endif
- return -EBUSY;
-}
-
-static int
-savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
-{
- uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
- uint32_t status;
- int i;
-
- for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
- status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
- if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
- return 0;
- udelay(1);
- }
-
-#if SAVAGE_BCI_DEBUG
- DRM_ERROR("failed!\n");
- DRM_INFO(" status=0x%08x\n", status);
-#endif
- return -EBUSY;
-}
-
-/*
- * Waiting for events.
- *
- * The BIOSresets the event tag to 0 on mode changes. Therefore we
- * never emit 0 to the event tag. If we find a 0 event tag we know the
- * BIOS stomped on it and return success assuming that the BIOS waited
- * for engine idle.
- *
- * Note: if the Xserver uses the event tag it has to follow the same
- * rule. Otherwise there may be glitches every 2^16 events.
- */
-static int
-savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
-{
- uint32_t status;
- int i;
-
- for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
- mb();
- status = dev_priv->status_ptr[1];
- if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
- (status & 0xffff) == 0)
- return 0;
- udelay(1);
- }
-
-#if SAVAGE_BCI_DEBUG
- DRM_ERROR("failed!\n");
- DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
-#endif
-
- return -EBUSY;
-}
-
-static int
-savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
-{
- uint32_t status;
- int i;
-
- for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
- status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
- if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
- (status & 0xffff) == 0)
- return 0;
- udelay(1);
- }
-
-#if SAVAGE_BCI_DEBUG
- DRM_ERROR("failed!\n");
- DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
-#endif
-
- return -EBUSY;
-}
-
-uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
- unsigned int flags)
-{
- uint16_t count;
- BCI_LOCALS;
-
- if (dev_priv->status_ptr) {
- /* coordinate with Xserver */
- count = dev_priv->status_ptr[1023];
- if (count < dev_priv->event_counter)
- dev_priv->event_wrap++;
- } else {
- count = dev_priv->event_counter;
- }
- count = (count + 1) & 0xffff;
- if (count == 0) {
- count++; /* See the comment above savage_wait_event_*. */
- dev_priv->event_wrap++;
- }
- dev_priv->event_counter = count;
- if (dev_priv->status_ptr)
- dev_priv->status_ptr[1023] = (uint32_t) count;
-
- if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
- unsigned int wait_cmd = BCI_CMD_WAIT;
- if ((flags & SAVAGE_WAIT_2D))
- wait_cmd |= BCI_CMD_WAIT_2D;
- if ((flags & SAVAGE_WAIT_3D))
- wait_cmd |= BCI_CMD_WAIT_3D;
- BEGIN_BCI(2);
- BCI_WRITE(wait_cmd);
- } else {
- BEGIN_BCI(1);
- }
- BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count);
-
- return count;
-}
-
-/*
- * Freelist management
- */
-static int savage_freelist_init(struct drm_device * dev)
-{
- drm_savage_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf *buf;
- drm_savage_buf_priv_t *entry;
- int i;
- DRM_DEBUG("count=%d\n", dma->buf_count);
-
- dev_priv->head.next = &dev_priv->tail;
- dev_priv->head.prev = NULL;
- dev_priv->head.buf = NULL;
-
- dev_priv->tail.next = NULL;
- dev_priv->tail.prev = &dev_priv->head;
- dev_priv->tail.buf = NULL;
-
- for (i = 0; i < dma->buf_count; i++) {
- buf = dma->buflist[i];
- entry = buf->dev_private;
-
- SET_AGE(&entry->age, 0, 0);
- entry->buf = buf;
-
- entry->next = dev_priv->head.next;
- entry->prev = &dev_priv->head;
- dev_priv->head.next->prev = entry;
- dev_priv->head.next = entry;
- }
-
- return 0;
-}
-
-static struct drm_buf *savage_freelist_get(struct drm_device * dev)
-{
- drm_savage_private_t *dev_priv = dev->dev_private;
- drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
- uint16_t event;
- unsigned int wrap;
- DRM_DEBUG("\n");
-
- UPDATE_EVENT_COUNTER();
- if (dev_priv->status_ptr)
- event = dev_priv->status_ptr[1] & 0xffff;
- else
- event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
- wrap = dev_priv->event_wrap;
- if (event > dev_priv->event_counter)
- wrap--; /* hardware hasn't passed the last wrap yet */
-
- DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
- DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
-
- if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
- drm_savage_buf_priv_t *next = tail->next;
- drm_savage_buf_priv_t *prev = tail->prev;
- prev->next = next;
- next->prev = prev;
- tail->next = tail->prev = NULL;
- return tail->buf;
- }
-
- DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
- return NULL;
-}
-
-void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
-{
- drm_savage_private_t *dev_priv = dev->dev_private;
- drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
-
- DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
-
- if (entry->next != NULL || entry->prev != NULL) {
- DRM_ERROR("entry already on freelist.\n");
- return;
- }
-
- prev = &dev_priv->head;
- next = prev->next;
- prev->next = entry;
- next->prev = entry;
- entry->prev = prev;
- entry->next = next;
-}
-
-/*
- * Command DMA
- */
-static int savage_dma_init(drm_savage_private_t * dev_priv)
-{
- unsigned int i;
-
- dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
- (SAVAGE_DMA_PAGE_SIZE * 4);
- dev_priv->dma_pages = kmalloc_array(dev_priv->nr_dma_pages,
- sizeof(drm_savage_dma_page_t),
- GFP_KERNEL);
- if (dev_priv->dma_pages == NULL)
- return -ENOMEM;
-
- for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
- SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
- dev_priv->dma_pages[i].used = 0;
- dev_priv->dma_pages[i].flushed = 0;
- }
- SET_AGE(&dev_priv->last_dma_age, 0, 0);
-
- dev_priv->first_dma_page = 0;
- dev_priv->current_dma_page = 0;
-
- return 0;
-}
-
-void savage_dma_reset(drm_savage_private_t * dev_priv)
-{
- uint16_t event;
- unsigned int wrap, i;
- event = savage_bci_emit_event(dev_priv, 0);
- wrap = dev_priv->event_wrap;
- for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
- SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
- dev_priv->dma_pages[i].used = 0;
- dev_priv->dma_pages[i].flushed = 0;
- }
- SET_AGE(&dev_priv->last_dma_age, event, wrap);
- dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
-}
-
-void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
-{
- uint16_t event;
- unsigned int wrap;
-
- /* Faked DMA buffer pages don't age. */
- if (dev_priv->cmd_dma == &dev_priv->fake_dma)
- return;
-
- UPDATE_EVENT_COUNTER();
- if (dev_priv->status_ptr)
- event = dev_priv->status_ptr[1] & 0xffff;
- else
- event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
- wrap = dev_priv->event_wrap;
- if (event > dev_priv->event_counter)
- wrap--; /* hardware hasn't passed the last wrap yet */
-
- if (dev_priv->dma_pages[page].age.wrap > wrap ||
- (dev_priv->dma_pages[page].age.wrap == wrap &&
- dev_priv->dma_pages[page].age.event > event)) {
- if (dev_priv->wait_evnt(dev_priv,
- dev_priv->dma_pages[page].age.event)
- < 0)
- DRM_ERROR("wait_evnt failed!\n");
- }
-}
-
-uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n)
-{
- unsigned int cur = dev_priv->current_dma_page;
- unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
- dev_priv->dma_pages[cur].used;
- unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
- SAVAGE_DMA_PAGE_SIZE;
- uint32_t *dma_ptr;
- unsigned int i;
-
- DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
- cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
-
- if (cur + nr_pages < dev_priv->nr_dma_pages) {
- dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
- cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
- if (n < rest)
- rest = n;
- dev_priv->dma_pages[cur].used += rest;
- n -= rest;
- cur++;
- } else {
- dev_priv->dma_flush(dev_priv);
- nr_pages =
- (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
- for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
- dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
- dev_priv->dma_pages[i].used = 0;
- dev_priv->dma_pages[i].flushed = 0;
- }
- dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle;
- dev_priv->first_dma_page = cur = 0;
- }
- for (i = cur; nr_pages > 0; ++i, --nr_pages) {
-#if SAVAGE_DMA_DEBUG
- if (dev_priv->dma_pages[i].used) {
- DRM_ERROR("unflushed page %u: used=%u\n",
- i, dev_priv->dma_pages[i].used);
- }
-#endif
- if (n > SAVAGE_DMA_PAGE_SIZE)
- dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
- else
- dev_priv->dma_pages[i].used = n;
- n -= SAVAGE_DMA_PAGE_SIZE;
- }
- dev_priv->current_dma_page = --i;
-
- DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
- i, dev_priv->dma_pages[i].used, n);
-
- savage_dma_wait(dev_priv, dev_priv->current_dma_page);
-
- return dma_ptr;
-}
-
-static void savage_dma_flush(drm_savage_private_t * dev_priv)
-{
- unsigned int first = dev_priv->first_dma_page;
- unsigned int cur = dev_priv->current_dma_page;
- uint16_t event;
- unsigned int wrap, pad, align, len, i;
- unsigned long phys_addr;
- BCI_LOCALS;
-
- if (first == cur &&
- dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
- return;
-
- /* pad length to multiples of 2 entries
- * align start of next DMA block to multiles of 8 entries */
- pad = -dev_priv->dma_pages[cur].used & 1;
- align = -(dev_priv->dma_pages[cur].used + pad) & 7;
-
- DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
- "pad=%u, align=%u\n",
- first, cur, dev_priv->dma_pages[first].flushed,
- dev_priv->dma_pages[cur].used, pad, align);
-
- /* pad with noops */
- if (pad) {
- uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
- cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
- dev_priv->dma_pages[cur].used += pad;
- while (pad != 0) {
- *dma_ptr++ = BCI_CMD_WAIT;
- pad--;
- }
- }
-
- mb();
-
- /* do flush ... */
- phys_addr = dev_priv->cmd_dma->offset +
- (first * SAVAGE_DMA_PAGE_SIZE +
- dev_priv->dma_pages[first].flushed) * 4;
- len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
- dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
-
- DRM_DEBUG("phys_addr=%lx, len=%u\n",
- phys_addr | dev_priv->dma_type, len);
-
- BEGIN_BCI(3);
- BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
- BCI_WRITE(phys_addr | dev_priv->dma_type);
- BCI_DMA(len);
-
- /* fix alignment of the start of the next block */
- dev_priv->dma_pages[cur].used += align;
-
- /* age DMA pages */
- event = savage_bci_emit_event(dev_priv, 0);
- wrap = dev_priv->event_wrap;
- for (i = first; i < cur; ++i) {
- SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
- dev_priv->dma_pages[i].used = 0;
- dev_priv->dma_pages[i].flushed = 0;
- }
- /* age the current page only when it's full */
- if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
- SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
- dev_priv->dma_pages[cur].used = 0;
- dev_priv->dma_pages[cur].flushed = 0;
- /* advance to next page */
- cur++;
- if (cur == dev_priv->nr_dma_pages)
- cur = 0;
- dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
- } else {
- dev_priv->first_dma_page = cur;
- dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
- }
- SET_AGE(&dev_priv->last_dma_age, event, wrap);
-
- DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
- dev_priv->dma_pages[cur].used,
- dev_priv->dma_pages[cur].flushed);
-}
-
-static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
-{
- unsigned int i, j;
- BCI_LOCALS;
-
- if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
- dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
- return;
-
- DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
- dev_priv->first_dma_page, dev_priv->current_dma_page,
- dev_priv->dma_pages[dev_priv->current_dma_page].used);
-
- for (i = dev_priv->first_dma_page;
- i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
- ++i) {
- uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
- i * SAVAGE_DMA_PAGE_SIZE;
-#if SAVAGE_DMA_DEBUG
- /* Sanity check: all pages except the last one must be full. */
- if (i < dev_priv->current_dma_page &&
- dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
- DRM_ERROR("partial DMA page %u: used=%u",
- i, dev_priv->dma_pages[i].used);
- }
-#endif
- BEGIN_BCI(dev_priv->dma_pages[i].used);
- for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
- BCI_WRITE(dma_ptr[j]);
- }
- dev_priv->dma_pages[i].used = 0;
- }
-
- /* reset to first page */
- dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
-}
-
-int savage_driver_load(struct drm_device *dev, unsigned long chipset)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- drm_savage_private_t *dev_priv;
-
- dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- dev->dev_private = (void *)dev_priv;
-
- dev_priv->chipset = (enum savage_family)chipset;
-
- pci_set_master(pdev);
-
- return 0;
-}
-
-
-/*
- * Initialize mappings. On Savage4 and SavageIX the alignment
- * and size of the aperture is not suitable for automatic MTRR setup
- * in drm_legacy_addmap. Therefore we add them manually before the maps are
- * initialized, and tear them down on last close.
- */
-int savage_driver_firstopen(struct drm_device *dev)
-{
- drm_savage_private_t *dev_priv = dev->dev_private;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- unsigned long mmio_base, fb_base, fb_size, aperture_base;
- int ret = 0;
-
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- fb_base = pci_resource_start(pdev, 0);
- fb_size = SAVAGE_FB_SIZE_S3;
- mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
- aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
- /* this should always be true */
- if (pci_resource_len(pdev, 0) == 0x08000000) {
- /* Don't make MMIO write-cobining! We need 3
- * MTRRs. */
- dev_priv->mtrr_handles[0] =
- arch_phys_wc_add(fb_base, 0x01000000);
- dev_priv->mtrr_handles[1] =
- arch_phys_wc_add(fb_base + 0x02000000,
- 0x02000000);
- dev_priv->mtrr_handles[2] =
- arch_phys_wc_add(fb_base + 0x04000000,
- 0x04000000);
- } else {
- DRM_ERROR("strange pci_resource_len %08llx\n",
- (unsigned long long)
- pci_resource_len(pdev, 0));
- }
- } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
- dev_priv->chipset != S3_SAVAGE2000) {
- mmio_base = pci_resource_start(pdev, 0);
- fb_base = pci_resource_start(pdev, 1);
- fb_size = SAVAGE_FB_SIZE_S4;
- aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
- /* this should always be true */
- if (pci_resource_len(pdev, 1) == 0x08000000) {
- /* Can use one MTRR to cover both fb and
- * aperture. */
- dev_priv->mtrr_handles[0] =
- arch_phys_wc_add(fb_base,
- 0x08000000);
- } else {
- DRM_ERROR("strange pci_resource_len %08llx\n",
- (unsigned long long)
- pci_resource_len(pdev, 1));
- }
- } else {
- mmio_base = pci_resource_start(pdev, 0);
- fb_base = pci_resource_start(pdev, 1);
- fb_size = pci_resource_len(pdev, 1);
- aperture_base = pci_resource_start(pdev, 2);
- /* Automatic MTRR setup will do the right thing. */
- }
-
- ret = drm_legacy_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE,
- _DRM_REGISTERS, _DRM_READ_ONLY,
- &dev_priv->mmio);
- if (ret)
- return ret;
-
- ret = drm_legacy_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
- _DRM_WRITE_COMBINING, &dev_priv->fb);
- if (ret)
- return ret;
-
- ret = drm_legacy_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
- _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
- &dev_priv->aperture);
- return ret;
-}
-
-/*
- * Delete MTRRs and free device-private data.
- */
-void savage_driver_lastclose(struct drm_device *dev)
-{
- drm_savage_private_t *dev_priv = dev->dev_private;
- int i;
-
- for (i = 0; i < 3; ++i) {
- arch_phys_wc_del(dev_priv->mtrr_handles[i]);
- dev_priv->mtrr_handles[i] = 0;
- }
-}
-
-void savage_driver_unload(struct drm_device *dev)
-{
- drm_savage_private_t *dev_priv = dev->dev_private;
-
- kfree(dev_priv);
-}
-
-static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
-{
- drm_savage_private_t *dev_priv = dev->dev_private;
-
- if (init->fb_bpp != 16 && init->fb_bpp != 32) {
- DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
- return -EINVAL;
- }
- if (init->depth_bpp != 16 && init->depth_bpp != 32) {
- DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
- return -EINVAL;
- }
- if (init->dma_type != SAVAGE_DMA_AGP &&
- init->dma_type != SAVAGE_DMA_PCI) {
- DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
- return -EINVAL;
- }
-
- dev_priv->cob_size = init->cob_size;
- dev_priv->bci_threshold_lo = init->bci_threshold_lo;
- dev_priv->bci_threshold_hi = init->bci_threshold_hi;
- dev_priv->dma_type = init->dma_type;
-
- dev_priv->fb_bpp = init->fb_bpp;
- dev_priv->front_offset = init->front_offset;
- dev_priv->front_pitch = init->front_pitch;
- dev_priv->back_offset = init->back_offset;
- dev_priv->back_pitch = init->back_pitch;
- dev_priv->depth_bpp = init->depth_bpp;
- dev_priv->depth_offset = init->depth_offset;
- dev_priv->depth_pitch = init->depth_pitch;
-
- dev_priv->texture_offset = init->texture_offset;
- dev_priv->texture_size = init->texture_size;
-
- dev_priv->sarea = drm_legacy_getsarea(dev);
- if (!dev_priv->sarea) {
- DRM_ERROR("could not find sarea!\n");
- savage_do_cleanup_bci(dev);
- return -EINVAL;
- }
- if (init->status_offset != 0) {
- dev_priv->status = drm_legacy_findmap(dev, init->status_offset);
- if (!dev_priv->status) {
- DRM_ERROR("could not find shadow status region!\n");
- savage_do_cleanup_bci(dev);
- return -EINVAL;
- }
- } else {
- dev_priv->status = NULL;
- }
- if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
- dev->agp_buffer_token = init->buffers_offset;
- dev->agp_buffer_map = drm_legacy_findmap(dev,
- init->buffers_offset);
- if (!dev->agp_buffer_map) {
- DRM_ERROR("could not find DMA buffer region!\n");
- savage_do_cleanup_bci(dev);
- return -EINVAL;
- }
- drm_legacy_ioremap(dev->agp_buffer_map, dev);
- if (!dev->agp_buffer_map->handle) {
- DRM_ERROR("failed to ioremap DMA buffer region!\n");
- savage_do_cleanup_bci(dev);
- return -ENOMEM;
- }
- }
- if (init->agp_textures_offset) {
- dev_priv->agp_textures =
- drm_legacy_findmap(dev, init->agp_textures_offset);
- if (!dev_priv->agp_textures) {
- DRM_ERROR("could not find agp texture region!\n");
- savage_do_cleanup_bci(dev);
- return -EINVAL;
- }
- } else {
- dev_priv->agp_textures = NULL;
- }
-
- if (init->cmd_dma_offset) {
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- DRM_ERROR("command DMA not supported on "
- "Savage3D/MX/IX.\n");
- savage_do_cleanup_bci(dev);
- return -EINVAL;
- }
- if (dev->dma && dev->dma->buflist) {
- DRM_ERROR("command and vertex DMA not supported "
- "at the same time.\n");
- savage_do_cleanup_bci(dev);
- return -EINVAL;
- }
- dev_priv->cmd_dma = drm_legacy_findmap(dev, init->cmd_dma_offset);
- if (!dev_priv->cmd_dma) {
- DRM_ERROR("could not find command DMA region!\n");
- savage_do_cleanup_bci(dev);
- return -EINVAL;
- }
- if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
- if (dev_priv->cmd_dma->type != _DRM_AGP) {
- DRM_ERROR("AGP command DMA region is not a "
- "_DRM_AGP map!\n");
- savage_do_cleanup_bci(dev);
- return -EINVAL;
- }
- drm_legacy_ioremap(dev_priv->cmd_dma, dev);
- if (!dev_priv->cmd_dma->handle) {
- DRM_ERROR("failed to ioremap command "
- "DMA region!\n");
- savage_do_cleanup_bci(dev);
- return -ENOMEM;
- }
- } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
- DRM_ERROR("PCI command DMA region is not a "
- "_DRM_CONSISTENT map!\n");
- savage_do_cleanup_bci(dev);
- return -EINVAL;
- }
- } else {
- dev_priv->cmd_dma = NULL;
- }
-
- dev_priv->dma_flush = savage_dma_flush;
- if (!dev_priv->cmd_dma) {
- DRM_DEBUG("falling back to faked command DMA.\n");
- dev_priv->fake_dma.offset = 0;
- dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
- dev_priv->fake_dma.type = _DRM_SHM;
- dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE,
- GFP_KERNEL);
- if (!dev_priv->fake_dma.handle) {
- DRM_ERROR("could not allocate faked DMA buffer!\n");
- savage_do_cleanup_bci(dev);
- return -ENOMEM;
- }
- dev_priv->cmd_dma = &dev_priv->fake_dma;
- dev_priv->dma_flush = savage_fake_dma_flush;
- }
-
- dev_priv->sarea_priv =
- (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle +
- init->sarea_priv_offset);
-
- /* setup bitmap descriptors */
- {
- unsigned int color_tile_format;
- unsigned int depth_tile_format;
- unsigned int front_stride, back_stride, depth_stride;
- if (dev_priv->chipset <= S3_SAVAGE4) {
- color_tile_format = dev_priv->fb_bpp == 16 ?
- SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
- depth_tile_format = dev_priv->depth_bpp == 16 ?
- SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
- } else {
- color_tile_format = SAVAGE_BD_TILE_DEST;
- depth_tile_format = SAVAGE_BD_TILE_DEST;
- }
- front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
- back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
- depth_stride =
- dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
-
- dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
- (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
- (color_tile_format << SAVAGE_BD_TILE_SHIFT);
-
- dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
- (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
- (color_tile_format << SAVAGE_BD_TILE_SHIFT);
-
- dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
- (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
- (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
- }
-
- /* setup status and bci ptr */
- dev_priv->event_counter = 0;
- dev_priv->event_wrap = 0;
- dev_priv->bci_ptr = (volatile uint32_t *)
- ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
- } else {
- dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
- }
- if (dev_priv->status != NULL) {
- dev_priv->status_ptr =
- (volatile uint32_t *)dev_priv->status->handle;
- dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
- dev_priv->wait_evnt = savage_bci_wait_event_shadow;
- dev_priv->status_ptr[1023] = dev_priv->event_counter;
- } else {
- dev_priv->status_ptr = NULL;
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
- } else {
- dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
- }
- dev_priv->wait_evnt = savage_bci_wait_event_reg;
- }
-
- /* cliprect functions */
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
- dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
- else
- dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
-
- if (savage_freelist_init(dev) < 0) {
- DRM_ERROR("could not initialize freelist\n");
- savage_do_cleanup_bci(dev);
- return -ENOMEM;
- }
-
- if (savage_dma_init(dev_priv) < 0) {
- DRM_ERROR("could not initialize command DMA\n");
- savage_do_cleanup_bci(dev);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int savage_do_cleanup_bci(struct drm_device * dev)
-{
- drm_savage_private_t *dev_priv = dev->dev_private;
-
- if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
- kfree(dev_priv->fake_dma.handle);
- } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
- dev_priv->cmd_dma->type == _DRM_AGP &&
- dev_priv->dma_type == SAVAGE_DMA_AGP)
- drm_legacy_ioremapfree(dev_priv->cmd_dma, dev);
-
- if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
- dev->agp_buffer_map && dev->agp_buffer_map->handle) {
- drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
- /* make sure the next instance (which may be running
- * in PCI mode) doesn't try to use an old
- * agp_buffer_map. */
- dev->agp_buffer_map = NULL;
- }
-
- kfree(dev_priv->dma_pages);
-
- return 0;
-}
-
-static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_savage_init_t *init = data;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- switch (init->func) {
- case SAVAGE_INIT_BCI:
- return savage_do_init_bci(dev, init);
- case SAVAGE_CLEANUP_BCI:
- return savage_do_cleanup_bci(dev);
- }
-
- return -EINVAL;
-}
-
-static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_savage_private_t *dev_priv = dev->dev_private;
- drm_savage_event_emit_t *event = data;
-
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- event->count = savage_bci_emit_event(dev_priv, event->flags);
- event->count |= dev_priv->event_wrap << 16;
-
- return 0;
-}
-
-static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_savage_private_t *dev_priv = dev->dev_private;
- drm_savage_event_wait_t *event = data;
- unsigned int event_e, hw_e;
- unsigned int event_w, hw_w;
-
- DRM_DEBUG("\n");
-
- UPDATE_EVENT_COUNTER();
- if (dev_priv->status_ptr)
- hw_e = dev_priv->status_ptr[1] & 0xffff;
- else
- hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
- hw_w = dev_priv->event_wrap;
- if (hw_e > dev_priv->event_counter)
- hw_w--; /* hardware hasn't passed the last wrap yet */
-
- event_e = event->count & 0xffff;
- event_w = event->count >> 16;
-
- /* Don't need to wait if
- * - event counter wrapped since the event was emitted or
- * - the hardware has advanced up to or over the event to wait for.
- */
- if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
- return 0;
- else
- return dev_priv->wait_evnt(dev_priv, event_e);
-}
-
-/*
- * DMA buffer management
- */
-
-static int savage_bci_get_buffers(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_dma *d)
-{
- struct drm_buf *buf;
- int i;
-
- for (i = d->granted_count; i < d->request_count; i++) {
- buf = savage_freelist_get(dev);
- if (!buf)
- return -EAGAIN;
-
- buf->file_priv = file_priv;
-
- if (copy_to_user(&d->request_indices[i],
- &buf->idx, sizeof(buf->idx)))
- return -EFAULT;
- if (copy_to_user(&d->request_sizes[i],
- &buf->total, sizeof(buf->total)))
- return -EFAULT;
-
- d->granted_count++;
- }
- return 0;
-}
-
-int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_dma *d = data;
- int ret = 0;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- /* Please don't send us buffers.
- */
- if (d->send_count != 0) {
- DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- task_pid_nr(current), d->send_count);
- return -EINVAL;
- }
-
- /* We'll send you buffers.
- */
- if (d->request_count < 0 || d->request_count > dma->buf_count) {
- DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- task_pid_nr(current), d->request_count, dma->buf_count);
- return -EINVAL;
- }
-
- d->granted_count = 0;
-
- if (d->request_count) {
- ret = savage_bci_get_buffers(dev, file_priv, d);
- }
-
- return ret;
-}
-
-void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
-{
- struct drm_device_dma *dma = dev->dma;
- drm_savage_private_t *dev_priv = dev->dev_private;
- int release_idlelock = 0;
- int i;
-
- if (!dma)
- return;
- if (!dev_priv)
- return;
- if (!dma->buflist)
- return;
-
- if (file_priv->master && file_priv->master->lock.hw_lock) {
- drm_legacy_idlelock_take(&file_priv->master->lock);
- release_idlelock = 1;
- }
-
- for (i = 0; i < dma->buf_count; i++) {
- struct drm_buf *buf = dma->buflist[i];
- drm_savage_buf_priv_t *buf_priv = buf->dev_private;
-
- if (buf->file_priv == file_priv && buf_priv &&
- buf_priv->next == NULL && buf_priv->prev == NULL) {
- uint16_t event;
- DRM_DEBUG("reclaimed from client\n");
- event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
- SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
- savage_freelist_put(dev, buf);
- }
- }
-
- if (release_idlelock)
- drm_legacy_idlelock_release(&file_priv->master->lock);
-}
-
-const struct drm_ioctl_desc savage_ioctls[] = {
- DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
-};
-
-int savage_max_ioctl = ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
deleted file mode 100644
index 799bd11adb9c..000000000000
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ /dev/null
@@ -1,91 +0,0 @@
-/* savage_drv.c -- Savage driver for Linux
- *
- * Copyright 2004 Felix Kuehling
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_pciids.h>
-
-#include "savage_drv.h"
-
-static struct pci_device_id pciidlist[] = {
- savage_PCI_IDS
-};
-
-static const struct file_operations savage_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_legacy_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
-
-static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA | DRIVER_LEGACY,
- .dev_priv_size = sizeof(drm_savage_buf_priv_t),
- .load = savage_driver_load,
- .firstopen = savage_driver_firstopen,
- .preclose = savage_reclaim_buffers,
- .lastclose = savage_driver_lastclose,
- .unload = savage_driver_unload,
- .ioctls = savage_ioctls,
- .dma_ioctl = savage_bci_buffers,
- .fops = &savage_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static struct pci_driver savage_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
-};
-
-static int __init savage_init(void)
-{
- driver.num_ioctls = savage_max_ioctl;
- return drm_legacy_pci_init(&driver, &savage_pci_driver);
-}
-
-static void __exit savage_exit(void)
-{
- drm_legacy_pci_exit(&driver, &savage_pci_driver);
-}
-
-module_init(savage_init);
-module_exit(savage_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
deleted file mode 100644
index b0081bb64776..000000000000
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ /dev/null
@@ -1,580 +0,0 @@
-/* savage_drv.h -- Private header for the savage driver */
-/*
- * Copyright 2004 Felix Kuehling
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __SAVAGE_DRV_H__
-#define __SAVAGE_DRV_H__
-
-#include <linux/io.h>
-
-#include <drm/drm_ioctl.h>
-#include <drm/drm_legacy.h>
-#include <drm/savage_drm.h>
-
-#define DRIVER_AUTHOR "Felix Kuehling"
-
-#define DRIVER_NAME "savage"
-#define DRIVER_DESC "Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
-#define DRIVER_DATE "20050313"
-
-#define DRIVER_MAJOR 2
-#define DRIVER_MINOR 4
-#define DRIVER_PATCHLEVEL 1
-/* Interface history:
- *
- * 1.x The DRM driver from the VIA/S3 code drop, basically a dummy
- * 2.0 The first real DRM
- * 2.1 Scissors registers managed by the DRM, 3D operations clipped by
- * cliprects of the cmdbuf ioctl
- * 2.2 Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
- * 2.3 Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
- * wide and thus very long lived (unlikely to ever wrap). The size
- * in the struct was 32 bits before, but only 16 bits were used
- * 2.4 Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
- * actually used
- */
-
-typedef struct drm_savage_age {
- uint16_t event;
- unsigned int wrap;
-} drm_savage_age_t;
-
-typedef struct drm_savage_buf_priv {
- struct drm_savage_buf_priv *next;
- struct drm_savage_buf_priv *prev;
- drm_savage_age_t age;
- struct drm_buf *buf;
-} drm_savage_buf_priv_t;
-
-typedef struct drm_savage_dma_page {
- drm_savage_age_t age;
- unsigned int used, flushed;
-} drm_savage_dma_page_t;
-#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */
-/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
- * size of 16kbytes or 4k entries. Minimum requirement would be
- * 10kbytes for 255 40-byte vertices in one drawing command. */
-#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
-
-/* interesting bits of hardware state that are saved in dev_priv */
-typedef union {
- struct drm_savage_common_state {
- uint32_t vbaddr;
- } common;
- struct {
- unsigned char pad[sizeof(struct drm_savage_common_state)];
- uint32_t texctrl, texaddr;
- uint32_t scstart, new_scstart;
- uint32_t scend, new_scend;
- } s3d;
- struct {
- unsigned char pad[sizeof(struct drm_savage_common_state)];
- uint32_t texdescr, texaddr0, texaddr1;
- uint32_t drawctrl0, new_drawctrl0;
- uint32_t drawctrl1, new_drawctrl1;
- } s4;
-} drm_savage_state_t;
-
-/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
-enum savage_family {
- S3_UNKNOWN = 0,
- S3_SAVAGE3D,
- S3_SAVAGE_MX,
- S3_SAVAGE4,
- S3_PROSAVAGE,
- S3_TWISTER,
- S3_PROSAVAGEDDR,
- S3_SUPERSAVAGE,
- S3_SAVAGE2000,
- S3_LAST
-};
-
-extern const struct drm_ioctl_desc savage_ioctls[];
-extern int savage_max_ioctl;
-
-#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
-
-#define S3_SAVAGE4_SERIES(chip) ((chip==S3_SAVAGE4) \
- || (chip==S3_PROSAVAGE) \
- || (chip==S3_TWISTER) \
- || (chip==S3_PROSAVAGEDDR))
-
-#define S3_SAVAGE_MOBILE_SERIES(chip) ((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
-
-#define S3_SAVAGE_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
-
-#define S3_MOBILE_TWISTER_SERIES(chip) ((chip==S3_TWISTER) \
- ||(chip==S3_PROSAVAGEDDR))
-
-/* flags */
-#define SAVAGE_IS_AGP 1
-
-typedef struct drm_savage_private {
- drm_savage_sarea_t *sarea_priv;
-
- drm_savage_buf_priv_t head, tail;
-
- /* who am I? */
- enum savage_family chipset;
-
- unsigned int cob_size;
- unsigned int bci_threshold_lo, bci_threshold_hi;
- unsigned int dma_type;
-
- /* frame buffer layout */
- unsigned int fb_bpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_bpp;
- unsigned int depth_offset, depth_pitch;
-
- /* bitmap descriptors for swap and clear */
- unsigned int front_bd, back_bd, depth_bd;
-
- /* local textures */
- unsigned int texture_offset;
- unsigned int texture_size;
-
- /* memory regions in physical memory */
- drm_local_map_t *sarea;
- drm_local_map_t *mmio;
- drm_local_map_t *fb;
- drm_local_map_t *aperture;
- drm_local_map_t *status;
- drm_local_map_t *agp_textures;
- drm_local_map_t *cmd_dma;
- drm_local_map_t fake_dma;
-
- int mtrr_handles[3];
-
- /* BCI and status-related stuff */
- volatile uint32_t *status_ptr, *bci_ptr;
- uint32_t status_used_mask;
- uint16_t event_counter;
- unsigned int event_wrap;
-
- /* Savage4 command DMA */
- drm_savage_dma_page_t *dma_pages;
- unsigned int nr_dma_pages, first_dma_page, current_dma_page;
- drm_savage_age_t last_dma_age;
-
- /* saved hw state for global/local check on S3D */
- uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
- /* and for scissors (global, so don't emit if not changed) */
- uint32_t hw_scissors_start, hw_scissors_end;
-
- drm_savage_state_t state;
-
- /* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
- unsigned int waiting;
-
- /* config/hardware-dependent function pointers */
- int (*wait_fifo) (struct drm_savage_private * dev_priv, unsigned int n);
- int (*wait_evnt) (struct drm_savage_private * dev_priv, uint16_t e);
- /* Err, there is a macro wait_event in include/linux/wait.h.
- * Avoid unwanted macro expansion. */
- void (*emit_clip_rect) (struct drm_savage_private * dev_priv,
- const struct drm_clip_rect * pbox);
- void (*dma_flush) (struct drm_savage_private * dev_priv);
-} drm_savage_private_t;
-
-/* ioctls */
-extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
-
-/* BCI functions */
-extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
- unsigned int flags);
-extern void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf);
-extern void savage_dma_reset(drm_savage_private_t * dev_priv);
-extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page);
-extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv,
- unsigned int n);
-extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
-extern int savage_driver_firstopen(struct drm_device *dev);
-extern void savage_driver_lastclose(struct drm_device *dev);
-extern void savage_driver_unload(struct drm_device *dev);
-extern void savage_reclaim_buffers(struct drm_device *dev,
- struct drm_file *file_priv);
-
-/* state functions */
-extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
- const struct drm_clip_rect * pbox);
-extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
- const struct drm_clip_rect * pbox);
-
-#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
-#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
-#define SAVAGE_MMIO_SIZE 0x00080000 /* 512kB */
-#define SAVAGE_APERTURE_OFFSET 0x02000000 /* 32MB */
-#define SAVAGE_APERTURE_SIZE 0x05000000 /* 5 tiled surfaces, 16MB each */
-
-#define SAVAGE_BCI_OFFSET 0x00010000 /* offset of the BCI region
- * inside the MMIO region */
-#define SAVAGE_BCI_FIFO_SIZE 32 /* number of entries in on-chip
- * BCI FIFO */
-
-/*
- * MMIO registers
- */
-#define SAVAGE_STATUS_WORD0 0x48C00
-#define SAVAGE_STATUS_WORD1 0x48C04
-#define SAVAGE_ALT_STATUS_WORD0 0x48C60
-
-#define SAVAGE_FIFO_USED_MASK_S3D 0x0001ffff
-#define SAVAGE_FIFO_USED_MASK_S4 0x001fffff
-
-/* Copied from savage_bci.h in the 2D driver with some renaming. */
-
-/* Bitmap descriptors */
-#define SAVAGE_BD_STRIDE_SHIFT 0
-#define SAVAGE_BD_BPP_SHIFT 16
-#define SAVAGE_BD_TILE_SHIFT 24
-#define SAVAGE_BD_BW_DISABLE (1<<28)
-/* common: */
-#define SAVAGE_BD_TILE_LINEAR 0
-/* savage4, MX, IX, 3D */
-#define SAVAGE_BD_TILE_16BPP 2
-#define SAVAGE_BD_TILE_32BPP 3
-/* twister, prosavage, DDR, supersavage, 2000 */
-#define SAVAGE_BD_TILE_DEST 1
-#define SAVAGE_BD_TILE_TEXTURE 2
-/* GBD - BCI enable */
-/* savage4, MX, IX, 3D */
-#define SAVAGE_GBD_BCI_ENABLE 8
-/* twister, prosavage, DDR, supersavage, 2000 */
-#define SAVAGE_GBD_BCI_ENABLE_TWISTER 0
-
-#define SAVAGE_GBD_BIG_ENDIAN 4
-#define SAVAGE_GBD_LITTLE_ENDIAN 0
-#define SAVAGE_GBD_64 1
-
-/* Global Bitmap Descriptor */
-#define SAVAGE_BCI_GLB_BD_LOW 0x8168
-#define SAVAGE_BCI_GLB_BD_HIGH 0x816C
-
-/*
- * BCI registers
- */
-/* Savage4/Twister/ProSavage 3D registers */
-#define SAVAGE_DRAWLOCALCTRL_S4 0x1e
-#define SAVAGE_TEXPALADDR_S4 0x1f
-#define SAVAGE_TEXCTRL0_S4 0x20
-#define SAVAGE_TEXCTRL1_S4 0x21
-#define SAVAGE_TEXADDR0_S4 0x22
-#define SAVAGE_TEXADDR1_S4 0x23
-#define SAVAGE_TEXBLEND0_S4 0x24
-#define SAVAGE_TEXBLEND1_S4 0x25
-#define SAVAGE_TEXXPRCLR_S4 0x26 /* never used */
-#define SAVAGE_TEXDESCR_S4 0x27
-#define SAVAGE_FOGTABLE_S4 0x28
-#define SAVAGE_FOGCTRL_S4 0x30
-#define SAVAGE_STENCILCTRL_S4 0x31
-#define SAVAGE_ZBUFCTRL_S4 0x32
-#define SAVAGE_ZBUFOFF_S4 0x33
-#define SAVAGE_DESTCTRL_S4 0x34
-#define SAVAGE_DRAWCTRL0_S4 0x35
-#define SAVAGE_DRAWCTRL1_S4 0x36
-#define SAVAGE_ZWATERMARK_S4 0x37
-#define SAVAGE_DESTTEXRWWATERMARK_S4 0x38
-#define SAVAGE_TEXBLENDCOLOR_S4 0x39
-/* Savage3D/MX/IX 3D registers */
-#define SAVAGE_TEXPALADDR_S3D 0x18
-#define SAVAGE_TEXXPRCLR_S3D 0x19 /* never used */
-#define SAVAGE_TEXADDR_S3D 0x1A
-#define SAVAGE_TEXDESCR_S3D 0x1B
-#define SAVAGE_TEXCTRL_S3D 0x1C
-#define SAVAGE_FOGTABLE_S3D 0x20
-#define SAVAGE_FOGCTRL_S3D 0x30
-#define SAVAGE_DRAWCTRL_S3D 0x31
-#define SAVAGE_ZBUFCTRL_S3D 0x32
-#define SAVAGE_ZBUFOFF_S3D 0x33
-#define SAVAGE_DESTCTRL_S3D 0x34
-#define SAVAGE_SCSTART_S3D 0x35
-#define SAVAGE_SCEND_S3D 0x36
-#define SAVAGE_ZWATERMARK_S3D 0x37
-#define SAVAGE_DESTTEXRWWATERMARK_S3D 0x38
-/* common stuff */
-#define SAVAGE_VERTBUFADDR 0x3e
-#define SAVAGE_BITPLANEWTMASK 0xd7
-#define SAVAGE_DMABUFADDR 0x51
-
-/* texture enable bits (needed for tex addr checking) */
-#define SAVAGE_TEXCTRL_TEXEN_MASK 0x00010000 /* S3D */
-#define SAVAGE_TEXDESCR_TEX0EN_MASK 0x02000000 /* S4 */
-#define SAVAGE_TEXDESCR_TEX1EN_MASK 0x04000000 /* S4 */
-
-/* Global fields in Savage4/Twister/ProSavage 3D registers:
- *
- * All texture registers and DrawLocalCtrl are local. All other
- * registers are global. */
-
-/* Global fields in Savage3D/MX/IX 3D registers:
- *
- * All texture registers are local. DrawCtrl and ZBufCtrl are
- * partially local. All other registers are global.
- *
- * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
- * ZBufCtrl global fields: zCmpFunc, zBufEn
- */
-#define SAVAGE_DRAWCTRL_S3D_GLOBAL 0x03f3c00c
-#define SAVAGE_ZBUFCTRL_S3D_GLOBAL 0x00000027
-
-/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
- */
-#define SAVAGE_SCISSOR_MASK_S4 0x00fff7ff
-#define SAVAGE_SCISSOR_MASK_S3D 0x07ff07ff
-
-/*
- * BCI commands
- */
-#define BCI_CMD_NOP 0x40000000
-#define BCI_CMD_RECT 0x48000000
-#define BCI_CMD_RECT_XP 0x01000000
-#define BCI_CMD_RECT_YP 0x02000000
-#define BCI_CMD_SCANLINE 0x50000000
-#define BCI_CMD_LINE 0x5C000000
-#define BCI_CMD_LINE_LAST_PIXEL 0x58000000
-#define BCI_CMD_BYTE_TEXT 0x63000000
-#define BCI_CMD_NT_BYTE_TEXT 0x67000000
-#define BCI_CMD_BIT_TEXT 0x6C000000
-#define BCI_CMD_GET_ROP(cmd) (((cmd) >> 16) & 0xFF)
-#define BCI_CMD_SET_ROP(cmd, rop) ((cmd) |= ((rop & 0xFF) << 16))
-#define BCI_CMD_SEND_COLOR 0x00008000
-
-#define BCI_CMD_CLIP_NONE 0x00000000
-#define BCI_CMD_CLIP_CURRENT 0x00002000
-#define BCI_CMD_CLIP_LR 0x00004000
-#define BCI_CMD_CLIP_NEW 0x00006000
-
-#define BCI_CMD_DEST_GBD 0x00000000
-#define BCI_CMD_DEST_PBD 0x00000800
-#define BCI_CMD_DEST_PBD_NEW 0x00000C00
-#define BCI_CMD_DEST_SBD 0x00001000
-#define BCI_CMD_DEST_SBD_NEW 0x00001400
-
-#define BCI_CMD_SRC_TRANSPARENT 0x00000200
-#define BCI_CMD_SRC_SOLID 0x00000000
-#define BCI_CMD_SRC_GBD 0x00000020
-#define BCI_CMD_SRC_COLOR 0x00000040
-#define BCI_CMD_SRC_MONO 0x00000060
-#define BCI_CMD_SRC_PBD_COLOR 0x00000080
-#define BCI_CMD_SRC_PBD_MONO 0x000000A0
-#define BCI_CMD_SRC_PBD_COLOR_NEW 0x000000C0
-#define BCI_CMD_SRC_PBD_MONO_NEW 0x000000E0
-#define BCI_CMD_SRC_SBD_COLOR 0x00000100
-#define BCI_CMD_SRC_SBD_MONO 0x00000120
-#define BCI_CMD_SRC_SBD_COLOR_NEW 0x00000140
-#define BCI_CMD_SRC_SBD_MONO_NEW 0x00000160
-
-#define BCI_CMD_PAT_TRANSPARENT 0x00000010
-#define BCI_CMD_PAT_NONE 0x00000000
-#define BCI_CMD_PAT_COLOR 0x00000002
-#define BCI_CMD_PAT_MONO 0x00000003
-#define BCI_CMD_PAT_PBD_COLOR 0x00000004
-#define BCI_CMD_PAT_PBD_MONO 0x00000005
-#define BCI_CMD_PAT_PBD_COLOR_NEW 0x00000006
-#define BCI_CMD_PAT_PBD_MONO_NEW 0x00000007
-#define BCI_CMD_PAT_SBD_COLOR 0x00000008
-#define BCI_CMD_PAT_SBD_MONO 0x00000009
-#define BCI_CMD_PAT_SBD_COLOR_NEW 0x0000000A
-#define BCI_CMD_PAT_SBD_MONO_NEW 0x0000000B
-
-#define BCI_BD_BW_DISABLE 0x10000000
-#define BCI_BD_TILE_MASK 0x03000000
-#define BCI_BD_TILE_NONE 0x00000000
-#define BCI_BD_TILE_16 0x02000000
-#define BCI_BD_TILE_32 0x03000000
-#define BCI_BD_GET_BPP(bd) (((bd) >> 16) & 0xFF)
-#define BCI_BD_SET_BPP(bd, bpp) ((bd) |= (((bpp) & 0xFF) << 16))
-#define BCI_BD_GET_STRIDE(bd) ((bd) & 0xFFFF)
-#define BCI_BD_SET_STRIDE(bd, st) ((bd) |= ((st) & 0xFFFF))
-
-#define BCI_CMD_SET_REGISTER 0x96000000
-
-#define BCI_CMD_WAIT 0xC0000000
-#define BCI_CMD_WAIT_3D 0x00010000
-#define BCI_CMD_WAIT_2D 0x00020000
-
-#define BCI_CMD_UPDATE_EVENT_TAG 0x98000000
-
-#define BCI_CMD_DRAW_PRIM 0x80000000
-#define BCI_CMD_DRAW_INDEXED_PRIM 0x88000000
-#define BCI_CMD_DRAW_CONT 0x01000000
-#define BCI_CMD_DRAW_TRILIST 0x00000000
-#define BCI_CMD_DRAW_TRISTRIP 0x02000000
-#define BCI_CMD_DRAW_TRIFAN 0x04000000
-#define BCI_CMD_DRAW_SKIPFLAGS 0x000000ff
-#define BCI_CMD_DRAW_NO_Z 0x00000001
-#define BCI_CMD_DRAW_NO_W 0x00000002
-#define BCI_CMD_DRAW_NO_CD 0x00000004
-#define BCI_CMD_DRAW_NO_CS 0x00000008
-#define BCI_CMD_DRAW_NO_U0 0x00000010
-#define BCI_CMD_DRAW_NO_V0 0x00000020
-#define BCI_CMD_DRAW_NO_UV0 0x00000030
-#define BCI_CMD_DRAW_NO_U1 0x00000040
-#define BCI_CMD_DRAW_NO_V1 0x00000080
-#define BCI_CMD_DRAW_NO_UV1 0x000000c0
-
-#define BCI_CMD_DMA 0xa8000000
-
-#define BCI_W_H(w, h) ((((h) << 16) | (w)) & 0x0FFF0FFF)
-#define BCI_X_Y(x, y) ((((y) << 16) | (x)) & 0x0FFF0FFF)
-#define BCI_X_W(x, y) ((((w) << 16) | (x)) & 0x0FFF0FFF)
-#define BCI_CLIP_LR(l, r) ((((r) << 16) | (l)) & 0x0FFF0FFF)
-#define BCI_CLIP_TL(t, l) ((((t) << 16) | (l)) & 0x0FFF0FFF)
-#define BCI_CLIP_BR(b, r) ((((b) << 16) | (r)) & 0x0FFF0FFF)
-
-#define BCI_LINE_X_Y(x, y) (((y) << 16) | ((x) & 0xFFFF))
-#define BCI_LINE_STEPS(diag, axi) (((axi) << 16) | ((diag) & 0xFFFF))
-#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
- (((maj) & 0x1FFF) | \
- ((ym) ? 1<<13 : 0) | \
- ((xp) ? 1<<14 : 0) | \
- ((yp) ? 1<<15 : 0) | \
- ((err) << 16))
-
-/*
- * common commands
- */
-#define BCI_SET_REGISTERS( first, n ) \
- BCI_WRITE(BCI_CMD_SET_REGISTER | \
- ((uint32_t)(n) & 0xff) << 16 | \
- ((uint32_t)(first) & 0xffff))
-#define DMA_SET_REGISTERS( first, n ) \
- DMA_WRITE(BCI_CMD_SET_REGISTER | \
- ((uint32_t)(n) & 0xff) << 16 | \
- ((uint32_t)(first) & 0xffff))
-
-#define BCI_DRAW_PRIMITIVE(n, type, skip) \
- BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
- ((n) << 16))
-#define DMA_DRAW_PRIMITIVE(n, type, skip) \
- DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
- ((n) << 16))
-
-#define BCI_DRAW_INDICES_S3D(n, type, i0) \
- BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
- ((n) << 16) | (i0))
-
-#define BCI_DRAW_INDICES_S4(n, type, skip) \
- BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) | \
- (skip) | ((n) << 16))
-
-#define BCI_DMA(n) \
- BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
-
-/*
- * access to MMIO
- */
-#define SAVAGE_READ(reg) \
- readl(((void __iomem *)dev_priv->mmio->handle) + (reg))
-#define SAVAGE_WRITE(reg) \
- writel(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
-
-/*
- * access to the burst command interface (BCI)
- */
-#define SAVAGE_BCI_DEBUG 1
-
-#define BCI_LOCALS volatile uint32_t *bci_ptr;
-
-#define BEGIN_BCI( n ) do { \
- dev_priv->wait_fifo(dev_priv, (n)); \
- bci_ptr = dev_priv->bci_ptr; \
-} while(0)
-
-#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
-
-/*
- * command DMA support
- */
-#define SAVAGE_DMA_DEBUG 1
-
-#define DMA_LOCALS uint32_t *dma_ptr;
-
-#define BEGIN_DMA( n ) do { \
- unsigned int cur = dev_priv->current_dma_page; \
- unsigned int rest = SAVAGE_DMA_PAGE_SIZE - \
- dev_priv->dma_pages[cur].used; \
- if ((n) > rest) { \
- dma_ptr = savage_dma_alloc(dev_priv, (n)); \
- } else { /* fast path for small allocations */ \
- dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + \
- cur * SAVAGE_DMA_PAGE_SIZE + \
- dev_priv->dma_pages[cur].used; \
- if (dev_priv->dma_pages[cur].used == 0) \
- savage_dma_wait(dev_priv, cur); \
- dev_priv->dma_pages[cur].used += (n); \
- } \
-} while(0)
-
-#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
-
-#define DMA_COPY(src, n) do { \
- memcpy(dma_ptr, (src), (n)*4); \
- dma_ptr += n; \
-} while(0)
-
-#if SAVAGE_DMA_DEBUG
-#define DMA_COMMIT() do { \
- unsigned int cur = dev_priv->current_dma_page; \
- uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle + \
- cur * SAVAGE_DMA_PAGE_SIZE + \
- dev_priv->dma_pages[cur].used; \
- if (dma_ptr != expected) { \
- DRM_ERROR("DMA allocation and use don't match: " \
- "%p != %p\n", expected, dma_ptr); \
- savage_dma_reset(dev_priv); \
- } \
-} while(0)
-#else
-#define DMA_COMMIT() do {/* nothing */} while(0)
-#endif
-
-#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
-
-/* Buffer aging via event tag
- */
-
-#define UPDATE_EVENT_COUNTER( ) do { \
- if (dev_priv->status_ptr) { \
- uint16_t count; \
- /* coordinate with Xserver */ \
- count = dev_priv->status_ptr[1023]; \
- if (count < dev_priv->event_counter) \
- dev_priv->event_wrap++; \
- dev_priv->event_counter = count; \
- } \
-} while(0)
-
-#define SET_AGE( age, e, w ) do { \
- (age)->event = e; \
- (age)->wrap = w; \
-} while(0)
-
-#define TEST_AGE( age, e, w ) \
- ( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
-
-#endif /* __SAVAGE_DRV_H__ */
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
deleted file mode 100644
index e0d40ae67d54..000000000000
--- a/drivers/gpu/drm/savage/savage_state.c
+++ /dev/null
@@ -1,1169 +0,0 @@
-/* savage_state.c -- State and drawing support for Savage
- *
- * Copyright 2004 Felix Kuehling
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
- * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/drm_print.h>
-#include <drm/savage_drm.h>
-
-#include "savage_drv.h"
-
-void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
- const struct drm_clip_rect * pbox)
-{
- uint32_t scstart = dev_priv->state.s3d.new_scstart;
- uint32_t scend = dev_priv->state.s3d.new_scend;
- scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
- ((uint32_t) pbox->x1 & 0x000007ff) |
- (((uint32_t) pbox->y1 << 16) & 0x07ff0000);
- scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
- (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
- ((((uint32_t) pbox->y2 - 1) << 16) & 0x07ff0000);
- if (scstart != dev_priv->state.s3d.scstart ||
- scend != dev_priv->state.s3d.scend) {
- DMA_LOCALS;
- BEGIN_DMA(4);
- DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
- DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
- DMA_WRITE(scstart);
- DMA_WRITE(scend);
- dev_priv->state.s3d.scstart = scstart;
- dev_priv->state.s3d.scend = scend;
- dev_priv->waiting = 1;
- DMA_COMMIT();
- }
-}
-
-void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
- const struct drm_clip_rect * pbox)
-{
- uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
- uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
- drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
- ((uint32_t) pbox->x1 & 0x000007ff) |
- (((uint32_t) pbox->y1 << 12) & 0x00fff000);
- drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
- (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
- ((((uint32_t) pbox->y2 - 1) << 12) & 0x00fff000);
- if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
- drawctrl1 != dev_priv->state.s4.drawctrl1) {
- DMA_LOCALS;
- BEGIN_DMA(4);
- DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
- DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
- DMA_WRITE(drawctrl0);
- DMA_WRITE(drawctrl1);
- dev_priv->state.s4.drawctrl0 = drawctrl0;
- dev_priv->state.s4.drawctrl1 = drawctrl1;
- dev_priv->waiting = 1;
- DMA_COMMIT();
- }
-}
-
-static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
- uint32_t addr)
-{
- if ((addr & 6) != 2) { /* reserved bits */
- DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
- return -EINVAL;
- }
- if (!(addr & 1)) { /* local */
- addr &= ~7;
- if (addr < dev_priv->texture_offset ||
- addr >= dev_priv->texture_offset + dev_priv->texture_size) {
- DRM_ERROR
- ("bad texAddr%d %08x (local addr out of range)\n",
- unit, addr);
- return -EINVAL;
- }
- } else { /* AGP */
- if (!dev_priv->agp_textures) {
- DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
- unit, addr);
- return -EINVAL;
- }
- addr &= ~7;
- if (addr < dev_priv->agp_textures->offset ||
- addr >= (dev_priv->agp_textures->offset +
- dev_priv->agp_textures->size)) {
- DRM_ERROR
- ("bad texAddr%d %08x (AGP addr out of range)\n",
- unit, addr);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-#define SAVE_STATE(reg,where) \
- if(start <= reg && start+count > reg) \
- dev_priv->state.where = regs[reg - start]
-#define SAVE_STATE_MASK(reg,where,mask) do { \
- if(start <= reg && start+count > reg) { \
- uint32_t tmp; \
- tmp = regs[reg - start]; \
- dev_priv->state.where = (tmp & (mask)) | \
- (dev_priv->state.where & ~(mask)); \
- } \
-} while (0)
-
-static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
- unsigned int start, unsigned int count,
- const uint32_t *regs)
-{
- if (start < SAVAGE_TEXPALADDR_S3D ||
- start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
- DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
- start, start + count - 1);
- return -EINVAL;
- }
-
- SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
- ~SAVAGE_SCISSOR_MASK_S3D);
- SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
- ~SAVAGE_SCISSOR_MASK_S3D);
-
- /* if any texture regs were changed ... */
- if (start <= SAVAGE_TEXCTRL_S3D &&
- start + count > SAVAGE_TEXPALADDR_S3D) {
- /* ... check texture state */
- SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
- SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
- if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
- return savage_verify_texaddr(dev_priv, 0,
- dev_priv->state.s3d.texaddr);
- }
-
- return 0;
-}
-
-static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
- unsigned int start, unsigned int count,
- const uint32_t *regs)
-{
- int ret = 0;
-
- if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
- start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
- DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
- start, start + count - 1);
- return -EINVAL;
- }
-
- SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
- ~SAVAGE_SCISSOR_MASK_S4);
- SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
- ~SAVAGE_SCISSOR_MASK_S4);
-
- /* if any texture regs were changed ... */
- if (start <= SAVAGE_TEXDESCR_S4 &&
- start + count > SAVAGE_TEXPALADDR_S4) {
- /* ... check texture state */
- SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
- SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
- SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
- if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
- ret |= savage_verify_texaddr(dev_priv, 0,
- dev_priv->state.s4.texaddr0);
- if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
- ret |= savage_verify_texaddr(dev_priv, 1,
- dev_priv->state.s4.texaddr1);
- }
-
- return ret;
-}
-
-#undef SAVE_STATE
-#undef SAVE_STATE_MASK
-
-static int savage_dispatch_state(drm_savage_private_t * dev_priv,
- const drm_savage_cmd_header_t * cmd_header,
- const uint32_t *regs)
-{
- unsigned int count = cmd_header->state.count;
- unsigned int start = cmd_header->state.start;
- unsigned int count2 = 0;
- unsigned int bci_size;
- int ret;
- DMA_LOCALS;
-
- if (!count)
- return 0;
-
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- ret = savage_verify_state_s3d(dev_priv, start, count, regs);
- if (ret != 0)
- return ret;
- /* scissor regs are emitted in savage_dispatch_draw */
- if (start < SAVAGE_SCSTART_S3D) {
- if (start + count > SAVAGE_SCEND_S3D + 1)
- count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
- if (start + count > SAVAGE_SCSTART_S3D)
- count = SAVAGE_SCSTART_S3D - start;
- } else if (start <= SAVAGE_SCEND_S3D) {
- if (start + count > SAVAGE_SCEND_S3D + 1) {
- count -= SAVAGE_SCEND_S3D + 1 - start;
- start = SAVAGE_SCEND_S3D + 1;
- } else
- return 0;
- }
- } else {
- ret = savage_verify_state_s4(dev_priv, start, count, regs);
- if (ret != 0)
- return ret;
- /* scissor regs are emitted in savage_dispatch_draw */
- if (start < SAVAGE_DRAWCTRL0_S4) {
- if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
- count2 = count -
- (SAVAGE_DRAWCTRL1_S4 + 1 - start);
- if (start + count > SAVAGE_DRAWCTRL0_S4)
- count = SAVAGE_DRAWCTRL0_S4 - start;
- } else if (start <= SAVAGE_DRAWCTRL1_S4) {
- if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
- count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
- start = SAVAGE_DRAWCTRL1_S4 + 1;
- } else
- return 0;
- }
- }
-
- bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
-
- if (cmd_header->state.global) {
- BEGIN_DMA(bci_size + 1);
- DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
- dev_priv->waiting = 1;
- } else {
- BEGIN_DMA(bci_size);
- }
-
- do {
- while (count > 0) {
- unsigned int n = count < 255 ? count : 255;
- DMA_SET_REGISTERS(start, n);
- DMA_COPY(regs, n);
- count -= n;
- start += n;
- regs += n;
- }
- start += 2;
- regs += 2;
- count = count2;
- count2 = 0;
- } while (count);
-
- DMA_COMMIT();
-
- return 0;
-}
-
-static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
- const drm_savage_cmd_header_t * cmd_header,
- const struct drm_buf * dmabuf)
-{
- unsigned char reorder = 0;
- unsigned int prim = cmd_header->prim.prim;
- unsigned int skip = cmd_header->prim.skip;
- unsigned int n = cmd_header->prim.count;
- unsigned int start = cmd_header->prim.start;
- unsigned int i;
- BCI_LOCALS;
-
- if (!dmabuf) {
- DRM_ERROR("called without dma buffers!\n");
- return -EINVAL;
- }
-
- if (!n)
- return 0;
-
- switch (prim) {
- case SAVAGE_PRIM_TRILIST_201:
- reorder = 1;
- prim = SAVAGE_PRIM_TRILIST;
- fallthrough;
- case SAVAGE_PRIM_TRILIST:
- if (n % 3 != 0) {
- DRM_ERROR("wrong number of vertices %u in TRILIST\n",
- n);
- return -EINVAL;
- }
- break;
- case SAVAGE_PRIM_TRISTRIP:
- case SAVAGE_PRIM_TRIFAN:
- if (n < 3) {
- DRM_ERROR
- ("wrong number of vertices %u in TRIFAN/STRIP\n",
- n);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("invalid primitive type %u\n", prim);
- return -EINVAL;
- }
-
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- if (skip != 0) {
- DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
- return -EINVAL;
- }
- } else {
- unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
- (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
- (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
- if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
- DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
- return -EINVAL;
- }
- if (reorder) {
- DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
- return -EINVAL;
- }
- }
-
- if (start + n > dmabuf->total / 32) {
- DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
- start, start + n - 1, dmabuf->total / 32);
- return -EINVAL;
- }
-
- /* Vertex DMA doesn't work with command DMA at the same time,
- * so we use BCI_... to submit commands here. Flush buffered
- * faked DMA first. */
- DMA_FLUSH();
-
- if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
- BEGIN_BCI(2);
- BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
- BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
- dev_priv->state.common.vbaddr = dmabuf->bus_address;
- }
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
- /* Workaround for what looks like a hardware bug. If a
- * WAIT_3D_IDLE was emitted some time before the
- * indexed drawing command then the engine will lock
- * up. There are two known workarounds:
- * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
- BEGIN_BCI(63);
- for (i = 0; i < 63; ++i)
- BCI_WRITE(BCI_CMD_WAIT);
- dev_priv->waiting = 0;
- }
-
- prim <<= 25;
- while (n != 0) {
- /* Can emit up to 255 indices (85 triangles) at once. */
- unsigned int count = n > 255 ? 255 : n;
- if (reorder) {
- /* Need to reorder indices for correct flat
- * shading while preserving the clock sense
- * for correct culling. Only on Savage3D. */
- int reorder[3] = { -1, -1, -1 };
- reorder[start % 3] = 2;
-
- BEGIN_BCI((count + 1 + 1) / 2);
- BCI_DRAW_INDICES_S3D(count, prim, start + 2);
-
- for (i = start + 1; i + 1 < start + count; i += 2)
- BCI_WRITE((i + reorder[i % 3]) |
- ((i + 1 +
- reorder[(i + 1) % 3]) << 16));
- if (i < start + count)
- BCI_WRITE(i + reorder[i % 3]);
- } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- BEGIN_BCI((count + 1 + 1) / 2);
- BCI_DRAW_INDICES_S3D(count, prim, start);
-
- for (i = start + 1; i + 1 < start + count; i += 2)
- BCI_WRITE(i | ((i + 1) << 16));
- if (i < start + count)
- BCI_WRITE(i);
- } else {
- BEGIN_BCI((count + 2 + 1) / 2);
- BCI_DRAW_INDICES_S4(count, prim, skip);
-
- for (i = start; i + 1 < start + count; i += 2)
- BCI_WRITE(i | ((i + 1) << 16));
- if (i < start + count)
- BCI_WRITE(i);
- }
-
- start += count;
- n -= count;
-
- prim |= BCI_CMD_DRAW_CONT;
- }
-
- return 0;
-}
-
-static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
- const drm_savage_cmd_header_t * cmd_header,
- const uint32_t *vtxbuf, unsigned int vb_size,
- unsigned int vb_stride)
-{
- unsigned char reorder = 0;
- unsigned int prim = cmd_header->prim.prim;
- unsigned int skip = cmd_header->prim.skip;
- unsigned int n = cmd_header->prim.count;
- unsigned int start = cmd_header->prim.start;
- unsigned int vtx_size;
- unsigned int i;
- DMA_LOCALS;
-
- if (!n)
- return 0;
-
- switch (prim) {
- case SAVAGE_PRIM_TRILIST_201:
- reorder = 1;
- prim = SAVAGE_PRIM_TRILIST;
- fallthrough;
- case SAVAGE_PRIM_TRILIST:
- if (n % 3 != 0) {
- DRM_ERROR("wrong number of vertices %u in TRILIST\n",
- n);
- return -EINVAL;
- }
- break;
- case SAVAGE_PRIM_TRISTRIP:
- case SAVAGE_PRIM_TRIFAN:
- if (n < 3) {
- DRM_ERROR
- ("wrong number of vertices %u in TRIFAN/STRIP\n",
- n);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("invalid primitive type %u\n", prim);
- return -EINVAL;
- }
-
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- if (skip > SAVAGE_SKIP_ALL_S3D) {
- DRM_ERROR("invalid skip flags 0x%04x\n", skip);
- return -EINVAL;
- }
- vtx_size = 8; /* full vertex */
- } else {
- if (skip > SAVAGE_SKIP_ALL_S4) {
- DRM_ERROR("invalid skip flags 0x%04x\n", skip);
- return -EINVAL;
- }
- vtx_size = 10; /* full vertex */
- }
-
- vtx_size -= (skip & 1) + (skip >> 1 & 1) +
- (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
- (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
-
- if (vtx_size > vb_stride) {
- DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
- vtx_size, vb_stride);
- return -EINVAL;
- }
-
- if (start + n > vb_size / (vb_stride * 4)) {
- DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
- start, start + n - 1, vb_size / (vb_stride * 4));
- return -EINVAL;
- }
-
- prim <<= 25;
- while (n != 0) {
- /* Can emit up to 255 vertices (85 triangles) at once. */
- unsigned int count = n > 255 ? 255 : n;
- if (reorder) {
- /* Need to reorder vertices for correct flat
- * shading while preserving the clock sense
- * for correct culling. Only on Savage3D. */
- int reorder[3] = { -1, -1, -1 };
- reorder[start % 3] = 2;
-
- BEGIN_DMA(count * vtx_size + 1);
- DMA_DRAW_PRIMITIVE(count, prim, skip);
-
- for (i = start; i < start + count; ++i) {
- unsigned int j = i + reorder[i % 3];
- DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
- }
-
- DMA_COMMIT();
- } else {
- BEGIN_DMA(count * vtx_size + 1);
- DMA_DRAW_PRIMITIVE(count, prim, skip);
-
- if (vb_stride == vtx_size) {
- DMA_COPY(&vtxbuf[vb_stride * start],
- vtx_size * count);
- } else {
- for (i = start; i < start + count; ++i) {
- DMA_COPY(&vtxbuf [vb_stride * i],
- vtx_size);
- }
- }
-
- DMA_COMMIT();
- }
-
- start += count;
- n -= count;
-
- prim |= BCI_CMD_DRAW_CONT;
- }
-
- return 0;
-}
-
-static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
- const drm_savage_cmd_header_t * cmd_header,
- const uint16_t *idx,
- const struct drm_buf * dmabuf)
-{
- unsigned char reorder = 0;
- unsigned int prim = cmd_header->idx.prim;
- unsigned int skip = cmd_header->idx.skip;
- unsigned int n = cmd_header->idx.count;
- unsigned int i;
- BCI_LOCALS;
-
- if (!dmabuf) {
- DRM_ERROR("called without dma buffers!\n");
- return -EINVAL;
- }
-
- if (!n)
- return 0;
-
- switch (prim) {
- case SAVAGE_PRIM_TRILIST_201:
- reorder = 1;
- prim = SAVAGE_PRIM_TRILIST;
- fallthrough;
- case SAVAGE_PRIM_TRILIST:
- if (n % 3 != 0) {
- DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
- return -EINVAL;
- }
- break;
- case SAVAGE_PRIM_TRISTRIP:
- case SAVAGE_PRIM_TRIFAN:
- if (n < 3) {
- DRM_ERROR
- ("wrong number of indices %u in TRIFAN/STRIP\n", n);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("invalid primitive type %u\n", prim);
- return -EINVAL;
- }
-
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- if (skip != 0) {
- DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
- return -EINVAL;
- }
- } else {
- unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
- (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
- (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
- if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
- DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
- return -EINVAL;
- }
- if (reorder) {
- DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
- return -EINVAL;
- }
- }
-
- /* Vertex DMA doesn't work with command DMA at the same time,
- * so we use BCI_... to submit commands here. Flush buffered
- * faked DMA first. */
- DMA_FLUSH();
-
- if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
- BEGIN_BCI(2);
- BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
- BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
- dev_priv->state.common.vbaddr = dmabuf->bus_address;
- }
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
- /* Workaround for what looks like a hardware bug. If a
- * WAIT_3D_IDLE was emitted some time before the
- * indexed drawing command then the engine will lock
- * up. There are two known workarounds:
- * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
- BEGIN_BCI(63);
- for (i = 0; i < 63; ++i)
- BCI_WRITE(BCI_CMD_WAIT);
- dev_priv->waiting = 0;
- }
-
- prim <<= 25;
- while (n != 0) {
- /* Can emit up to 255 indices (85 triangles) at once. */
- unsigned int count = n > 255 ? 255 : n;
-
- /* check indices */
- for (i = 0; i < count; ++i) {
- if (idx[i] > dmabuf->total / 32) {
- DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
- i, idx[i], dmabuf->total / 32);
- return -EINVAL;
- }
- }
-
- if (reorder) {
- /* Need to reorder indices for correct flat
- * shading while preserving the clock sense
- * for correct culling. Only on Savage3D. */
- int reorder[3] = { 2, -1, -1 };
-
- BEGIN_BCI((count + 1 + 1) / 2);
- BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
-
- for (i = 1; i + 1 < count; i += 2)
- BCI_WRITE(idx[i + reorder[i % 3]] |
- (idx[i + 1 +
- reorder[(i + 1) % 3]] << 16));
- if (i < count)
- BCI_WRITE(idx[i + reorder[i % 3]]);
- } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- BEGIN_BCI((count + 1 + 1) / 2);
- BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
-
- for (i = 1; i + 1 < count; i += 2)
- BCI_WRITE(idx[i] | (idx[i + 1] << 16));
- if (i < count)
- BCI_WRITE(idx[i]);
- } else {
- BEGIN_BCI((count + 2 + 1) / 2);
- BCI_DRAW_INDICES_S4(count, prim, skip);
-
- for (i = 0; i + 1 < count; i += 2)
- BCI_WRITE(idx[i] | (idx[i + 1] << 16));
- if (i < count)
- BCI_WRITE(idx[i]);
- }
-
- idx += count;
- n -= count;
-
- prim |= BCI_CMD_DRAW_CONT;
- }
-
- return 0;
-}
-
-static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
- const drm_savage_cmd_header_t * cmd_header,
- const uint16_t *idx,
- const uint32_t *vtxbuf,
- unsigned int vb_size, unsigned int vb_stride)
-{
- unsigned char reorder = 0;
- unsigned int prim = cmd_header->idx.prim;
- unsigned int skip = cmd_header->idx.skip;
- unsigned int n = cmd_header->idx.count;
- unsigned int vtx_size;
- unsigned int i;
- DMA_LOCALS;
-
- if (!n)
- return 0;
-
- switch (prim) {
- case SAVAGE_PRIM_TRILIST_201:
- reorder = 1;
- prim = SAVAGE_PRIM_TRILIST;
- fallthrough;
- case SAVAGE_PRIM_TRILIST:
- if (n % 3 != 0) {
- DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
- return -EINVAL;
- }
- break;
- case SAVAGE_PRIM_TRISTRIP:
- case SAVAGE_PRIM_TRIFAN:
- if (n < 3) {
- DRM_ERROR
- ("wrong number of indices %u in TRIFAN/STRIP\n", n);
- return -EINVAL;
- }
- break;
- default:
- DRM_ERROR("invalid primitive type %u\n", prim);
- return -EINVAL;
- }
-
- if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- if (skip > SAVAGE_SKIP_ALL_S3D) {
- DRM_ERROR("invalid skip flags 0x%04x\n", skip);
- return -EINVAL;
- }
- vtx_size = 8; /* full vertex */
- } else {
- if (skip > SAVAGE_SKIP_ALL_S4) {
- DRM_ERROR("invalid skip flags 0x%04x\n", skip);
- return -EINVAL;
- }
- vtx_size = 10; /* full vertex */
- }
-
- vtx_size -= (skip & 1) + (skip >> 1 & 1) +
- (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
- (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
-
- if (vtx_size > vb_stride) {
- DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
- vtx_size, vb_stride);
- return -EINVAL;
- }
-
- prim <<= 25;
- while (n != 0) {
- /* Can emit up to 255 vertices (85 triangles) at once. */
- unsigned int count = n > 255 ? 255 : n;
-
- /* Check indices */
- for (i = 0; i < count; ++i) {
- if (idx[i] > vb_size / (vb_stride * 4)) {
- DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
- i, idx[i], vb_size / (vb_stride * 4));
- return -EINVAL;
- }
- }
-
- if (reorder) {
- /* Need to reorder vertices for correct flat
- * shading while preserving the clock sense
- * for correct culling. Only on Savage3D. */
- int reorder[3] = { 2, -1, -1 };
-
- BEGIN_DMA(count * vtx_size + 1);
- DMA_DRAW_PRIMITIVE(count, prim, skip);
-
- for (i = 0; i < count; ++i) {
- unsigned int j = idx[i + reorder[i % 3]];
- DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
- }
-
- DMA_COMMIT();
- } else {
- BEGIN_DMA(count * vtx_size + 1);
- DMA_DRAW_PRIMITIVE(count, prim, skip);
-
- for (i = 0; i < count; ++i) {
- unsigned int j = idx[i];
- DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
- }
-
- DMA_COMMIT();
- }
-
- idx += count;
- n -= count;
-
- prim |= BCI_CMD_DRAW_CONT;
- }
-
- return 0;
-}
-
-static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
- const drm_savage_cmd_header_t * cmd_header,
- const drm_savage_cmd_header_t *data,
- unsigned int nbox,
- const struct drm_clip_rect *boxes)
-{
- unsigned int flags = cmd_header->clear0.flags;
- unsigned int clear_cmd;
- unsigned int i, nbufs;
- DMA_LOCALS;
-
- if (nbox == 0)
- return 0;
-
- clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
- BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
- BCI_CMD_SET_ROP(clear_cmd, 0xCC);
-
- nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
- ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0);
- if (nbufs == 0)
- return 0;
-
- if (data->clear1.mask != 0xffffffff) {
- /* set mask */
- BEGIN_DMA(2);
- DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
- DMA_WRITE(data->clear1.mask);
- DMA_COMMIT();
- }
- for (i = 0; i < nbox; ++i) {
- unsigned int x, y, w, h;
- unsigned int buf;
- x = boxes[i].x1, y = boxes[i].y1;
- w = boxes[i].x2 - boxes[i].x1;
- h = boxes[i].y2 - boxes[i].y1;
- BEGIN_DMA(nbufs * 6);
- for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
- if (!(flags & buf))
- continue;
- DMA_WRITE(clear_cmd);
- switch (buf) {
- case SAVAGE_FRONT:
- DMA_WRITE(dev_priv->front_offset);
- DMA_WRITE(dev_priv->front_bd);
- break;
- case SAVAGE_BACK:
- DMA_WRITE(dev_priv->back_offset);
- DMA_WRITE(dev_priv->back_bd);
- break;
- case SAVAGE_DEPTH:
- DMA_WRITE(dev_priv->depth_offset);
- DMA_WRITE(dev_priv->depth_bd);
- break;
- }
- DMA_WRITE(data->clear1.value);
- DMA_WRITE(BCI_X_Y(x, y));
- DMA_WRITE(BCI_W_H(w, h));
- }
- DMA_COMMIT();
- }
- if (data->clear1.mask != 0xffffffff) {
- /* reset mask */
- BEGIN_DMA(2);
- DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
- DMA_WRITE(0xffffffff);
- DMA_COMMIT();
- }
-
- return 0;
-}
-
-static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
- unsigned int nbox, const struct drm_clip_rect *boxes)
-{
- unsigned int swap_cmd;
- unsigned int i;
- DMA_LOCALS;
-
- if (nbox == 0)
- return 0;
-
- swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
- BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
- BCI_CMD_SET_ROP(swap_cmd, 0xCC);
-
- for (i = 0; i < nbox; ++i) {
- BEGIN_DMA(6);
- DMA_WRITE(swap_cmd);
- DMA_WRITE(dev_priv->back_offset);
- DMA_WRITE(dev_priv->back_bd);
- DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
- DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
- DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
- boxes[i].y2 - boxes[i].y1));
- DMA_COMMIT();
- }
-
- return 0;
-}
-
-static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
- const drm_savage_cmd_header_t *start,
- const drm_savage_cmd_header_t *end,
- const struct drm_buf * dmabuf,
- const unsigned int *vtxbuf,
- unsigned int vb_size, unsigned int vb_stride,
- unsigned int nbox,
- const struct drm_clip_rect *boxes)
-{
- unsigned int i, j;
- int ret;
-
- for (i = 0; i < nbox; ++i) {
- const drm_savage_cmd_header_t *cmdbuf;
- dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
-
- cmdbuf = start;
- while (cmdbuf < end) {
- drm_savage_cmd_header_t cmd_header;
- cmd_header = *cmdbuf;
- cmdbuf++;
- switch (cmd_header.cmd.cmd) {
- case SAVAGE_CMD_DMA_PRIM:
- ret = savage_dispatch_dma_prim(
- dev_priv, &cmd_header, dmabuf);
- break;
- case SAVAGE_CMD_VB_PRIM:
- ret = savage_dispatch_vb_prim(
- dev_priv, &cmd_header,
- vtxbuf, vb_size, vb_stride);
- break;
- case SAVAGE_CMD_DMA_IDX:
- j = (cmd_header.idx.count + 3) / 4;
- /* j was check in savage_bci_cmdbuf */
- ret = savage_dispatch_dma_idx(dev_priv,
- &cmd_header, (const uint16_t *)cmdbuf,
- dmabuf);
- cmdbuf += j;
- break;
- case SAVAGE_CMD_VB_IDX:
- j = (cmd_header.idx.count + 3) / 4;
- /* j was check in savage_bci_cmdbuf */
- ret = savage_dispatch_vb_idx(dev_priv,
- &cmd_header, (const uint16_t *)cmdbuf,
- (const uint32_t *)vtxbuf, vb_size,
- vb_stride);
- cmdbuf += j;
- break;
- default:
- /* What's the best return code? EFAULT? */
- DRM_ERROR("IMPLEMENTATION ERROR: "
- "non-drawing-command %d\n",
- cmd_header.cmd.cmd);
- return -EINVAL;
- }
-
- if (ret != 0)
- return ret;
- }
- }
-
- return 0;
-}
-
-int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_savage_private_t *dev_priv = dev->dev_private;
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf *dmabuf;
- drm_savage_cmdbuf_t *cmdbuf = data;
- drm_savage_cmd_header_t *kcmd_addr = NULL;
- drm_savage_cmd_header_t *first_draw_cmd;
- unsigned int *kvb_addr = NULL;
- struct drm_clip_rect *kbox_addr = NULL;
- unsigned int i, j;
- int ret = 0;
-
- DRM_DEBUG("\n");
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- if (dma && dma->buflist) {
- if (cmdbuf->dma_idx >= dma->buf_count) {
- DRM_ERROR
- ("vertex buffer index %u out of range (0-%u)\n",
- cmdbuf->dma_idx, dma->buf_count - 1);
- return -EINVAL;
- }
- dmabuf = dma->buflist[cmdbuf->dma_idx];
- } else {
- dmabuf = NULL;
- }
-
- /* Copy the user buffers into kernel temporary areas. This hasn't been
- * a performance loss compared to VERIFYAREA_READ/
- * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
- * for locking on FreeBSD.
- */
- if (cmdbuf->size) {
- kcmd_addr = kmalloc_array(cmdbuf->size, 8, GFP_KERNEL);
- if (kcmd_addr == NULL)
- return -ENOMEM;
-
- if (copy_from_user(kcmd_addr, cmdbuf->cmd_addr,
- cmdbuf->size * 8))
- {
- kfree(kcmd_addr);
- return -EFAULT;
- }
- cmdbuf->cmd_addr = kcmd_addr;
- }
- if (cmdbuf->vb_size) {
- kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
- if (IS_ERR(kvb_addr)) {
- ret = PTR_ERR(kvb_addr);
- kvb_addr = NULL;
- goto done;
- }
- cmdbuf->vb_addr = kvb_addr;
- }
- if (cmdbuf->nbox) {
- kbox_addr = kmalloc_array(cmdbuf->nbox, sizeof(struct drm_clip_rect),
- GFP_KERNEL);
- if (kbox_addr == NULL) {
- ret = -ENOMEM;
- goto done;
- }
-
- if (copy_from_user(kbox_addr, cmdbuf->box_addr,
- cmdbuf->nbox * sizeof(struct drm_clip_rect))) {
- ret = -EFAULT;
- goto done;
- }
- cmdbuf->box_addr = kbox_addr;
- }
-
- /* Make sure writes to DMA buffers are finished before sending
- * DMA commands to the graphics hardware. */
- mb();
-
- /* Coming from user space. Don't know if the Xserver has
- * emitted wait commands. Assuming the worst. */
- dev_priv->waiting = 1;
-
- i = 0;
- first_draw_cmd = NULL;
- while (i < cmdbuf->size) {
- drm_savage_cmd_header_t cmd_header;
- cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
- cmdbuf->cmd_addr++;
- i++;
-
- /* Group drawing commands with same state to minimize
- * iterations over clip rects. */
- j = 0;
- switch (cmd_header.cmd.cmd) {
- case SAVAGE_CMD_DMA_IDX:
- case SAVAGE_CMD_VB_IDX:
- j = (cmd_header.idx.count + 3) / 4;
- if (i + j > cmdbuf->size) {
- DRM_ERROR("indexed drawing command extends "
- "beyond end of command buffer\n");
- DMA_FLUSH();
- ret = -EINVAL;
- goto done;
- }
- fallthrough;
- case SAVAGE_CMD_DMA_PRIM:
- case SAVAGE_CMD_VB_PRIM:
- if (!first_draw_cmd)
- first_draw_cmd = cmdbuf->cmd_addr - 1;
- cmdbuf->cmd_addr += j;
- i += j;
- break;
- default:
- if (first_draw_cmd) {
- ret = savage_dispatch_draw(
- dev_priv, first_draw_cmd,
- cmdbuf->cmd_addr - 1,
- dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size,
- cmdbuf->vb_stride,
- cmdbuf->nbox, cmdbuf->box_addr);
- if (ret != 0)
- goto done;
- first_draw_cmd = NULL;
- }
- }
- if (first_draw_cmd)
- continue;
-
- switch (cmd_header.cmd.cmd) {
- case SAVAGE_CMD_STATE:
- j = (cmd_header.state.count + 1) / 2;
- if (i + j > cmdbuf->size) {
- DRM_ERROR("command SAVAGE_CMD_STATE extends "
- "beyond end of command buffer\n");
- DMA_FLUSH();
- ret = -EINVAL;
- goto done;
- }
- ret = savage_dispatch_state(dev_priv, &cmd_header,
- (const uint32_t *)cmdbuf->cmd_addr);
- cmdbuf->cmd_addr += j;
- i += j;
- break;
- case SAVAGE_CMD_CLEAR:
- if (i + 1 > cmdbuf->size) {
- DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
- "beyond end of command buffer\n");
- DMA_FLUSH();
- ret = -EINVAL;
- goto done;
- }
- ret = savage_dispatch_clear(dev_priv, &cmd_header,
- cmdbuf->cmd_addr,
- cmdbuf->nbox,
- cmdbuf->box_addr);
- cmdbuf->cmd_addr++;
- i++;
- break;
- case SAVAGE_CMD_SWAP:
- ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
- cmdbuf->box_addr);
- break;
- default:
- DRM_ERROR("invalid command 0x%x\n",
- cmd_header.cmd.cmd);
- DMA_FLUSH();
- ret = -EINVAL;
- goto done;
- }
-
- if (ret != 0) {
- DMA_FLUSH();
- goto done;
- }
- }
-
- if (first_draw_cmd) {
- ret = savage_dispatch_draw (
- dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
- cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
- cmdbuf->nbox, cmdbuf->box_addr);
- if (ret != 0) {
- DMA_FLUSH();
- goto done;
- }
- }
-
- DMA_FLUSH();
-
- if (dmabuf && cmdbuf->discard) {
- drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
- uint16_t event;
- event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
- SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
- savage_freelist_put(dev, dmabuf);
- }
-
-done:
- /* If we didn't need to allocate them, these'll be NULL */
- kfree(kcmd_addr);
- kfree(kvb_addr);
- kfree(kbox_addr);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index fd22d753b4ed..4e6ad6e122bc 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -551,10 +551,21 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
EXPORT_SYMBOL(drm_sched_start);
/**
- * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list
+ * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
*
* @sched: scheduler instance
*
+ * Re-submitting jobs was a concept AMD came up as cheap way to implement
+ * recovery after a job timeout.
+ *
+ * This turned out to be not working very well. First of all there are many
+ * problem with the dma_fence implementation and requirements. Either the
+ * implementation is risking deadlocks with core memory management or violating
+ * documented implementation details of the dma_fence object.
+ *
+ * Drivers can still save and restore their state for recovery operations, but
+ * we shouldn't make this a general scheduler feature around the dma_fence
+ * interface.
*/
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
{
@@ -895,6 +906,12 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
spin_unlock(&sched->job_list_lock);
+ if (job) {
+ job->entity->elapsed_ns += ktime_to_ns(
+ ktime_sub(job->s_fence->finished.timestamp,
+ job->s_fence->scheduled.timestamp));
+ }
+
return job;
}
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 4624c0aff51f..d354ab3077ce 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -16,6 +16,8 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 3d511fa38913..faacfee24763 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -15,7 +15,6 @@
#include <linux/pm.h>
#include <linux/slab.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_module.h>
@@ -143,7 +142,6 @@ static const struct drm_driver shmob_drm_driver = {
* Power management
*/
-#ifdef CONFIG_PM_SLEEP
static int shmob_drm_pm_suspend(struct device *dev)
{
struct shmob_drm_device *sdev = dev_get_drvdata(dev);
@@ -165,11 +163,9 @@ static int shmob_drm_pm_resume(struct device *dev)
drm_kms_helper_poll_enable(sdev->ddev);
return 0;
}
-#endif
-static const struct dev_pm_ops shmob_drm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(shmob_drm_pm_suspend, shmob_drm_pm_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(shmob_drm_pm_ops,
+ shmob_drm_pm_suspend, shmob_drm_pm_resume);
/* -----------------------------------------------------------------------------
* Platform driver
@@ -292,7 +288,7 @@ static struct platform_driver shmob_drm_platform_driver = {
.remove = shmob_drm_remove,
.driver = {
.name = "shmob-drm",
- .pm = &shmob_drm_pm_ops,
+ .pm = pm_sleep_ptr(&shmob_drm_pm_ops),
},
};
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
index 6c5f0cbe7d95..604ae23825da 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c
@@ -8,7 +8,6 @@
*/
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
diff --git a/drivers/gpu/drm/sis/Makefile b/drivers/gpu/drm/sis/Makefile
deleted file mode 100644
index 02b0253fda93..000000000000
--- a/drivers/gpu/drm/sis/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-sis-y := sis_drv.o sis_mm.o
-
-obj-$(CONFIG_DRM_SIS) += sis.o
-
-
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
deleted file mode 100644
index 6173020a9bf5..000000000000
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/* sis.c -- sis driver -*- linux-c -*-
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_pciids.h>
-#include <drm/sis_drm.h>
-
-#include "sis_drv.h"
-
-static struct pci_device_id pciidlist[] = {
- sisdrv_PCI_IDS
-};
-
-static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- drm_sis_private_t *dev_priv;
-
- pci_set_master(pdev);
-
- dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- idr_init_base(&dev_priv->object_idr, 1);
- dev->dev_private = (void *)dev_priv;
- dev_priv->chipset = chipset;
-
- return 0;
-}
-
-static void sis_driver_unload(struct drm_device *dev)
-{
- drm_sis_private_t *dev_priv = dev->dev_private;
-
- idr_destroy(&dev_priv->object_idr);
-
- kfree(dev_priv);
-}
-
-static const struct file_operations sis_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_legacy_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
-
-static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
-{
- struct sis_file_private *file_priv;
-
- DRM_DEBUG_DRIVER("\n");
- file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
- if (!file_priv)
- return -ENOMEM;
-
- file->driver_priv = file_priv;
-
- INIT_LIST_HEAD(&file_priv->obj_list);
-
- return 0;
-}
-
-static void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
-{
- struct sis_file_private *file_priv = file->driver_priv;
-
- kfree(file_priv);
-}
-
-static struct drm_driver driver = {
- .driver_features = DRIVER_USE_AGP | DRIVER_LEGACY,
- .load = sis_driver_load,
- .unload = sis_driver_unload,
- .open = sis_driver_open,
- .preclose = sis_reclaim_buffers_locked,
- .postclose = sis_driver_postclose,
- .dma_quiescent = sis_idle,
- .lastclose = sis_lastclose,
- .ioctls = sis_ioctls,
- .fops = &sis_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static struct pci_driver sis_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
-};
-
-static int __init sis_init(void)
-{
- driver.num_ioctls = sis_max_ioctl;
- return drm_legacy_pci_init(&driver, &sis_pci_driver);
-}
-
-static void __exit sis_exit(void)
-{
- drm_legacy_pci_exit(&driver, &sis_pci_driver);
-}
-
-module_init(sis_init);
-module_exit(sis_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
deleted file mode 100644
index 81339443b3b1..000000000000
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
-/*
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _SIS_DRV_H_
-#define _SIS_DRV_H_
-
-#include <drm/drm_ioctl.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_mm.h>
-
-/* General customization:
- */
-
-#define DRIVER_AUTHOR "SIS, Tungsten Graphics"
-#define DRIVER_NAME "sis"
-#define DRIVER_DESC "SIS 300/630/540 and XGI V3XE/V5/V8"
-#define DRIVER_DATE "20070626"
-#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 3
-#define DRIVER_PATCHLEVEL 0
-
-enum sis_family {
- SIS_OTHER = 0,
- SIS_CHIP_315 = 1,
-};
-
-#define SIS_READ(reg) readl(((void __iomem *)dev_priv->mmio->handle) + (reg))
-#define SIS_WRITE(reg, val) writel(val, ((void __iomem *)dev_priv->mmio->handle) + (reg))
-
-typedef struct drm_sis_private {
- drm_local_map_t *mmio;
- unsigned int idle_fault;
- unsigned int chipset;
- int vram_initialized;
- int agp_initialized;
- unsigned long vram_offset;
- unsigned long agp_offset;
- struct drm_mm vram_mm;
- struct drm_mm agp_mm;
- /** Mapping of userspace keys to mm objects */
- struct idr object_idr;
-} drm_sis_private_t;
-
-struct sis_file_private {
- struct list_head obj_list;
-};
-
-extern int sis_idle(struct drm_device *dev);
-extern void sis_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv);
-extern void sis_lastclose(struct drm_device *dev);
-
-extern const struct drm_ioctl_desc sis_ioctls[];
-extern int sis_max_ioctl;
-
-#endif
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
deleted file mode 100644
index e51d4289a3d0..000000000000
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ /dev/null
@@ -1,363 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- *
- **************************************************************************/
-
-/*
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <video/sisfb.h>
-
-#include <drm/drm_device.h>
-#include <drm/drm_file.h>
-#include <drm/sis_drm.h>
-
-#include "sis_drv.h"
-
-
-#define VIDEO_TYPE 0
-#define AGP_TYPE 1
-
-
-struct sis_memblock {
- struct drm_mm_node mm_node;
- struct sis_memreq req;
- struct list_head owner_list;
-};
-
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
-/* fb management via fb device */
-
-#define SIS_MM_ALIGN_SHIFT 0
-#define SIS_MM_ALIGN_MASK 0
-
-#else /* CONFIG_FB_SIS[_MODULE] */
-
-#define SIS_MM_ALIGN_SHIFT 4
-#define SIS_MM_ALIGN_MASK ((1 << SIS_MM_ALIGN_SHIFT) - 1)
-
-#endif /* CONFIG_FB_SIS[_MODULE] */
-
-static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_fb_t *fb = data;
-
- mutex_lock(&dev->struct_mutex);
- /* Unconditionally init the drm_mm, even though we don't use it when the
- * fb sis driver is available - make cleanup easier. */
- drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT);
-
- dev_priv->vram_initialized = 1;
- dev_priv->vram_offset = fb->offset;
-
- mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %lu, size = %lu\n", fb->offset, fb->size);
-
- return 0;
-}
-
-static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
- void *data, int pool)
-{
- drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t *mem = data;
- int retval = 0, user_key;
- struct sis_memblock *item;
- struct sis_file_private *file_priv = file->driver_priv;
- unsigned long offset;
-
- mutex_lock(&dev->struct_mutex);
-
- if (0 == ((pool == 0) ? dev_priv->vram_initialized :
- dev_priv->agp_initialized)) {
- DRM_ERROR
- ("Attempt to allocate from uninitialized memory manager.\n");
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- item = kzalloc(sizeof(*item), GFP_KERNEL);
- if (!item) {
- retval = -ENOMEM;
- goto fail_alloc;
- }
-
- mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
- if (pool == AGP_TYPE) {
- retval = drm_mm_insert_node(&dev_priv->agp_mm,
- &item->mm_node,
- mem->size);
- offset = item->mm_node.start;
- } else {
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
- item->req.size = mem->size;
- sis_malloc(&item->req);
- if (item->req.size == 0)
- retval = -ENOMEM;
- offset = item->req.offset;
-#else
- retval = drm_mm_insert_node(&dev_priv->vram_mm,
- &item->mm_node,
- mem->size);
- offset = item->mm_node.start;
-#endif
- }
- if (retval)
- goto fail_alloc;
-
- retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
- if (retval < 0)
- goto fail_idr;
- user_key = retval;
-
- list_add(&item->owner_list, &file_priv->obj_list);
- mutex_unlock(&dev->struct_mutex);
-
- mem->offset = ((pool == 0) ?
- dev_priv->vram_offset : dev_priv->agp_offset) +
- (offset << SIS_MM_ALIGN_SHIFT);
- mem->free = user_key;
- mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
-
- return 0;
-
-fail_idr:
- drm_mm_remove_node(&item->mm_node);
-fail_alloc:
- kfree(item);
- mutex_unlock(&dev->struct_mutex);
-
- mem->offset = 0;
- mem->size = 0;
- mem->free = 0;
-
- DRM_DEBUG("alloc %d, size = %ld, offset = %ld\n", pool, mem->size,
- mem->offset);
-
- return retval;
-}
-
-static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t *mem = data;
- struct sis_memblock *obj;
-
- mutex_lock(&dev->struct_mutex);
- obj = idr_find(&dev_priv->object_idr, mem->free);
- if (obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- idr_remove(&dev_priv->object_idr, mem->free);
- list_del(&obj->owner_list);
- if (drm_mm_node_allocated(&obj->mm_node))
- drm_mm_remove_node(&obj->mm_node);
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
- else
- sis_free(obj->req.offset);
-#endif
- kfree(obj);
- mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("free = 0x%lx\n", mem->free);
-
- return 0;
-}
-
-static int sis_fb_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE);
-}
-
-static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_agp_t *agp = data;
- dev_priv = dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT);
-
- dev_priv->agp_initialized = 1;
- dev_priv->agp_offset = agp->offset;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("offset = %lu, size = %lu\n", agp->offset, agp->size);
- return 0;
-}
-
-static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
-
- return sis_drm_alloc(dev, file_priv, data, AGP_TYPE);
-}
-
-static drm_local_map_t *sis_reg_init(struct drm_device *dev)
-{
- struct drm_map_list *entry;
- drm_local_map_t *map;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- map = entry->map;
- if (!map)
- continue;
- if (map->type == _DRM_REGISTERS)
- return map;
- }
- return NULL;
-}
-
-int sis_idle(struct drm_device *dev)
-{
- drm_sis_private_t *dev_priv = dev->dev_private;
- uint32_t idle_reg;
- unsigned long end;
- int i;
-
- if (dev_priv->idle_fault)
- return 0;
-
- if (dev_priv->mmio == NULL) {
- dev_priv->mmio = sis_reg_init(dev);
- if (dev_priv->mmio == NULL) {
- DRM_ERROR("Could not find register map.\n");
- return 0;
- }
- }
-
- /*
- * Implement a device switch here if needed
- */
-
- if (dev_priv->chipset != SIS_CHIP_315)
- return 0;
-
- /*
- * Timeout after 3 seconds. We cannot use DRM_WAIT_ON here
- * because its polling frequency is too low.
- */
-
- end = jiffies + (HZ * 3);
-
- for (i = 0; i < 4; ++i) {
- do {
- idle_reg = SIS_READ(0x85cc);
- } while (!time_after_eq(jiffies, end) &&
- ((idle_reg & 0x80000000) != 0x80000000));
- }
-
- if (time_after_eq(jiffies, end)) {
- DRM_ERROR("Graphics engine idle timeout. "
- "Disabling idle check\n");
- dev_priv->idle_fault = 1;
- }
-
- /*
- * The caller never sees an error code. It gets trapped
- * in libdrm.
- */
-
- return 0;
-}
-
-
-void sis_lastclose(struct drm_device *dev)
-{
- drm_sis_private_t *dev_priv = dev->dev_private;
-
- if (!dev_priv)
- return;
-
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->vram_initialized) {
- drm_mm_takedown(&dev_priv->vram_mm);
- dev_priv->vram_initialized = 0;
- }
- if (dev_priv->agp_initialized) {
- drm_mm_takedown(&dev_priv->agp_mm);
- dev_priv->agp_initialized = 0;
- }
- dev_priv->mmio = NULL;
- mutex_unlock(&dev->struct_mutex);
-}
-
-void sis_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file)
-{
- struct sis_file_private *file_priv = file->driver_priv;
- struct sis_memblock *entry, *next;
-
- if (!(dev->master && file->master->lock.hw_lock))
- return;
-
- drm_legacy_idlelock_take(&file->master->lock);
-
- mutex_lock(&dev->struct_mutex);
- if (list_empty(&file_priv->obj_list)) {
- mutex_unlock(&dev->struct_mutex);
- drm_legacy_idlelock_release(&file->master->lock);
-
- return;
- }
-
- sis_idle(dev);
-
-
- list_for_each_entry_safe(entry, next, &file_priv->obj_list,
- owner_list) {
- list_del(&entry->owner_list);
- if (drm_mm_node_allocated(&entry->mm_node))
- drm_mm_remove_node(&entry->mm_node);
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
- else
- sis_free(entry->req.offset);
-#endif
- kfree(entry);
- }
- mutex_unlock(&dev->struct_mutex);
-
- drm_legacy_idlelock_release(&file->master->lock);
-
- return;
-}
-
-const struct drm_ioctl_desc sis_ioctls[] = {
- DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
-};
-
-int sis_max_ioctl = ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 53464afc2b9a..8cbf5aa66e19 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -81,7 +81,7 @@
#define SSD130X_SET_PRECHARGE_PERIOD2_MASK GENMASK(7, 4)
#define SSD130X_SET_PRECHARGE_PERIOD2_SET(val) FIELD_PREP(SSD130X_SET_PRECHARGE_PERIOD2_MASK, (val))
#define SSD130X_SET_COM_PINS_CONFIG1_MASK GENMASK(4, 4)
-#define SSD130X_SET_COM_PINS_CONFIG1_SET(val) FIELD_PREP(SSD130X_SET_COM_PINS_CONFIG1_MASK, !(val))
+#define SSD130X_SET_COM_PINS_CONFIG1_SET(val) FIELD_PREP(SSD130X_SET_COM_PINS_CONFIG1_MASK, (val))
#define SSD130X_SET_COM_PINS_CONFIG2_MASK GENMASK(5, 5)
#define SSD130X_SET_COM_PINS_CONFIG2_SET(val) FIELD_PREP(SSD130X_SET_COM_PINS_CONFIG2_MASK, (val))
@@ -298,6 +298,7 @@ static void ssd130x_power_off(struct ssd130x_device *ssd130x)
static int ssd130x_init(struct ssd130x_device *ssd130x)
{
u32 precharge, dclk, com_invdir, compins, chargepump, seg_remap;
+ bool scan_mode;
int ret;
/* Set initial contrast */
@@ -360,7 +361,13 @@ static int ssd130x_init(struct ssd130x_device *ssd130x)
/* Set COM pins configuration */
compins = BIT(1);
- compins |= (SSD130X_SET_COM_PINS_CONFIG1_SET(ssd130x->com_seq) |
+ /*
+ * The COM scan mode field values are the inverse of the boolean DT
+ * property "solomon,com-seq". The value 0b means scan from COM0 to
+ * COM[N - 1] while 1b means scan from COM[N - 1] to COM0.
+ */
+ scan_mode = !ssd130x->com_seq;
+ compins |= (SSD130X_SET_COM_PINS_CONFIG1_SET(scan_mode) |
SSD130X_SET_COM_PINS_CONFIG2_SET(ssd130x->com_lrremap));
ret = ssd130x_write_cmd(ssd130x, 2, SSD130X_SET_COM_PINS_CONFIG, compins);
if (ret < 0)
@@ -656,18 +663,8 @@ static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
.atomic_check = drm_crtc_helper_atomic_check,
};
-static void ssd130x_crtc_reset(struct drm_crtc *crtc)
-{
- struct drm_device *drm = crtc->dev;
- struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
-
- ssd130x_init(ssd130x);
-
- drm_atomic_helper_crtc_reset(crtc);
-}
-
static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
- .reset = ssd130x_crtc_reset,
+ .reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
@@ -686,6 +683,12 @@ static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
if (ret)
return;
+ ret = ssd130x_init(ssd130x);
+ if (ret) {
+ ssd130x_power_off(ssd130x);
+ return;
+ }
+
ssd130x_write_cmd(ssd130x, 1, SSD130X_DISPLAY_ON);
backlight_enable(ssd130x->bl_dev);
@@ -876,7 +879,7 @@ static int ssd130x_init_modeset(struct ssd130x_device *ssd130x)
drm->mode_config.max_width = max_width;
drm->mode_config.min_height = mode->vdisplay;
drm->mode_config.max_height = max_height;
- drm->mode_config.preferred_depth = 32;
+ drm->mode_config.preferred_depth = 24;
drm->mode_config.funcs = &ssd130x_mode_config_funcs;
/* Primary plane */
@@ -1006,7 +1009,7 @@ struct ssd130x_device *ssd130x_probe(struct device *dev, struct regmap *regmap)
if (ret)
return ERR_PTR(dev_err_probe(dev, ret, "DRM device register failed\n"));
- drm_fbdev_generic_setup(drm, 0);
+ drm_fbdev_generic_setup(drm, 32);
return ssd130x;
}
diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c
index 88f4259680f1..b96fc6837b0d 100644
--- a/drivers/gpu/drm/sprd/sprd_dpu.c
+++ b/drivers/gpu/drm/sprd/sprd_dpu.c
@@ -18,7 +18,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_dma_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_dma_helper.h>
@@ -803,10 +802,8 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu,
}
ctx->irq = platform_get_irq(pdev, 0);
- if (ctx->irq < 0) {
- dev_err(dev, "failed to get dpu irq\n");
+ if (ctx->irq < 0)
return ctx->irq;
- }
/* disable and clear interrupts before register dpu IRQ. */
writel(0x00, ctx->base + REG_DPU_INT_EN);
diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c
index 9d42f17a5734..be60c0d546a3 100644
--- a/drivers/gpu/drm/sprd/sprd_drm.c
+++ b/drivers/gpu/drm/sprd/sprd_drm.c
@@ -11,7 +11,6 @@
#include <linux/of_platform.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c
index 12b67a5d5923..ab0e5cce7adb 100644
--- a/drivers/gpu/drm/sprd/sprd_dsi.c
+++ b/drivers/gpu/drm/sprd/sprd_dsi.c
@@ -13,7 +13,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index c65f0a89b6b0..9625a00a48ba 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -141,23 +141,14 @@ struct resync_parameters {
struct tv_mode {
char *name;
+ unsigned int tv_mode;
+
u32 mode;
u32 chroma_freq;
u16 back_porch;
u16 front_porch;
- u16 line_number;
u16 vblank_level;
- u32 hdisplay;
- u16 hfront_porch;
- u16 hsync_len;
- u16 hback_porch;
-
- u32 vdisplay;
- u16 vfront_porch;
- u16 vsync_len;
- u16 vback_porch;
-
bool yc_en;
bool dac3_en;
bool dac_bit25_en;
@@ -213,7 +204,7 @@ static const struct resync_parameters pal_resync_parameters = {
static const struct tv_mode tv_modes[] = {
{
- .name = "NTSC",
+ .tv_mode = DRM_MODE_TV_MODE_NTSC,
.mode = SUN4I_TVE_CFG0_RES_480i,
.chroma_freq = 0x21f07c1f,
.yc_en = true,
@@ -222,17 +213,6 @@ static const struct tv_mode tv_modes[] = {
.back_porch = 118,
.front_porch = 32,
- .line_number = 525,
-
- .hdisplay = 720,
- .hfront_porch = 18,
- .hsync_len = 2,
- .hback_porch = 118,
-
- .vdisplay = 480,
- .vfront_porch = 26,
- .vsync_len = 2,
- .vback_porch = 17,
.vblank_level = 240,
@@ -242,23 +222,12 @@ static const struct tv_mode tv_modes[] = {
.resync_params = &ntsc_resync_parameters,
},
{
- .name = "PAL",
+ .tv_mode = DRM_MODE_TV_MODE_PAL,
.mode = SUN4I_TVE_CFG0_RES_576i,
.chroma_freq = 0x2a098acb,
.back_porch = 138,
.front_porch = 24,
- .line_number = 625,
-
- .hdisplay = 720,
- .hfront_porch = 3,
- .hsync_len = 2,
- .hback_porch = 139,
-
- .vdisplay = 576,
- .vfront_porch = 28,
- .vsync_len = 2,
- .vback_porch = 19,
.vblank_level = 252,
@@ -276,63 +245,21 @@ drm_encoder_to_sun4i_tv(struct drm_encoder *encoder)
encoder);
}
-/*
- * FIXME: If only the drm_display_mode private field was usable, this
- * could go away...
- *
- * So far, it doesn't seem to be preserved when the mode is passed by
- * to mode_set for some reason.
- */
-static const struct tv_mode *sun4i_tv_find_tv_by_mode(const struct drm_display_mode *mode)
+static const struct tv_mode *
+sun4i_tv_find_tv_by_mode(unsigned int mode)
{
int i;
- /* First try to identify the mode by name */
for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
const struct tv_mode *tv_mode = &tv_modes[i];
- DRM_DEBUG_DRIVER("Comparing mode %s vs %s",
- mode->name, tv_mode->name);
-
- if (!strcmp(mode->name, tv_mode->name))
- return tv_mode;
- }
-
- /* Then by number of lines */
- for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
- const struct tv_mode *tv_mode = &tv_modes[i];
-
- DRM_DEBUG_DRIVER("Comparing mode %s vs %s (X: %d vs %d)",
- mode->name, tv_mode->name,
- mode->vdisplay, tv_mode->vdisplay);
-
- if (mode->vdisplay == tv_mode->vdisplay)
+ if (tv_mode->tv_mode == mode)
return tv_mode;
}
return NULL;
}
-static void sun4i_tv_mode_to_drm_mode(const struct tv_mode *tv_mode,
- struct drm_display_mode *mode)
-{
- DRM_DEBUG_DRIVER("Creating mode %s\n", mode->name);
-
- mode->type = DRM_MODE_TYPE_DRIVER;
- mode->clock = 13500;
- mode->flags = DRM_MODE_FLAG_INTERLACE;
-
- mode->hdisplay = tv_mode->hdisplay;
- mode->hsync_start = mode->hdisplay + tv_mode->hfront_porch;
- mode->hsync_end = mode->hsync_start + tv_mode->hsync_len;
- mode->htotal = mode->hsync_end + tv_mode->hback_porch;
-
- mode->vdisplay = tv_mode->vdisplay;
- mode->vsync_start = mode->vdisplay + tv_mode->vfront_porch;
- mode->vsync_end = mode->vsync_start + tv_mode->vsync_len;
- mode->vtotal = mode->vsync_end + tv_mode->vback_porch;
-}
-
static void sun4i_tv_disable(struct drm_encoder *encoder,
struct drm_atomic_state *state)
{
@@ -356,7 +283,11 @@ static void sun4i_tv_enable(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state =
drm_atomic_get_new_crtc_state(state, encoder->crtc);
struct drm_display_mode *mode = &crtc_state->mode;
- const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
+ struct drm_connector *connector = &tv->connector;
+ struct drm_connector_state *conn_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ const struct tv_mode *tv_mode =
+ sun4i_tv_find_tv_by_mode(conn_state->tv.mode);
DRM_DEBUG_DRIVER("Enabling the TV Output\n");
@@ -404,7 +335,7 @@ static void sun4i_tv_enable(struct drm_encoder *encoder,
/* Set the lines setup */
regmap_write(tv->regs, SUN4I_TVE_LINE_REG,
SUN4I_TVE_LINE_FIRST(22) |
- SUN4I_TVE_LINE_NUMBER(tv_mode->line_number));
+ SUN4I_TVE_LINE_NUMBER(mode->vtotal));
regmap_write(tv->regs, SUN4I_TVE_LEVEL_REG,
SUN4I_TVE_LEVEL_BLANK(tv_mode->video_levels->blank) |
@@ -465,37 +396,21 @@ static const struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
.atomic_enable = sun4i_tv_enable,
};
-static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
- struct drm_display_mode *mode;
- const struct tv_mode *tv_mode = &tv_modes[i];
-
- mode = drm_mode_create(connector->dev);
- if (!mode) {
- DRM_ERROR("Failed to create a new display mode\n");
- return 0;
- }
-
- strcpy(mode->name, tv_mode->name);
-
- sun4i_tv_mode_to_drm_mode(tv_mode, mode);
- drm_mode_probed_add(connector, mode);
- }
-
- return i;
-}
-
static const struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
- .get_modes = sun4i_tv_comp_get_modes,
+ .atomic_check = drm_atomic_helper_connector_tv_check,
+ .get_modes = drm_connector_helper_tv_get_modes,
};
+static void sun4i_tv_connector_reset(struct drm_connector *connector)
+{
+ drm_atomic_helper_connector_reset(connector);
+ drm_atomic_helper_connector_tv_reset(connector);
+}
+
static const struct drm_connector_funcs sun4i_tv_comp_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
+ .reset = sun4i_tv_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -587,8 +502,20 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
drm_connector_attach_encoder(&tv->connector, &tv->encoder);
+ ret = drm_mode_create_tv_properties(drm,
+ BIT(DRM_MODE_TV_MODE_NTSC) |
+ BIT(DRM_MODE_TV_MODE_PAL));
+ if (ret)
+ goto err_cleanup_connector;
+
+ drm_object_attach_property(&tv->connector.base,
+ drm->mode_config.tv_mode_property,
+ DRM_MODE_TV_MODE_NTSC);
+
return 0;
+err_cleanup_connector:
+ drm_connector_cleanup(&tv->connector);
err_cleanup_encoder:
drm_encoder_cleanup(&tv->encoder);
err_disable_clk:
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 477cb6985b4d..7cab4213a680 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -8,7 +8,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_of.h>
#include <drm/drm_simple_kms_helper.h>
diff --git a/drivers/gpu/drm/tdfx/Makefile b/drivers/gpu/drm/tdfx/Makefile
deleted file mode 100644
index 03b7d0f087b0..000000000000
--- a/drivers/gpu/drm/tdfx/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-tdfx-y := tdfx_drv.o
-
-obj-$(CONFIG_DRM_TDFX) += tdfx.o
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
deleted file mode 100644
index 58c185c299f4..000000000000
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/* tdfx_drv.c -- tdfx driver -*- linux-c -*-
- * Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Rickard E. (Rik) Faith <faith@valinux.com>
- * Daryll Strauss <daryll@valinux.com>
- * Gareth Hughes <gareth@valinux.com>
- */
-
-#include <linux/module.h>
-#include <linux/pci.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_pciids.h>
-
-#include "tdfx_drv.h"
-
-static struct pci_device_id pciidlist[] = {
- tdfx_PCI_IDS
-};
-
-static const struct file_operations tdfx_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_legacy_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
-
-static const struct drm_driver driver = {
- .driver_features = DRIVER_LEGACY,
- .fops = &tdfx_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static struct pci_driver tdfx_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
-};
-
-static int __init tdfx_init(void)
-{
- return drm_legacy_pci_init(&driver, &tdfx_pci_driver);
-}
-
-static void __exit tdfx_exit(void)
-{
- drm_legacy_pci_exit(&driver, &tdfx_pci_driver);
-}
-
-module_init(tdfx_init);
-module_exit(tdfx_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 7dc681e2ee90..3c84e73d5051 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -598,7 +598,6 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
static int tegra_dpaux_suspend(struct device *dev)
{
struct tegra_dpaux *dpaux = dev_get_drvdata(dev);
@@ -657,10 +656,9 @@ disable_clk:
clk_disable_unprepare(dpaux->clk);
return err;
}
-#endif
static const struct dev_pm_ops tegra_dpaux_pm_ops = {
- SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
+ RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL)
};
static const struct tegra_dpaux_soc tegra124_dpaux_soc = {
@@ -694,7 +692,7 @@ struct platform_driver tegra_dpaux_driver = {
.driver = {
.name = "tegra-dpaux",
.of_match_table = tegra_dpaux_of_match,
- .pm = &tegra_dpaux_pm_ops,
+ .pm = pm_ptr(&tegra_dpaux_pm_ops),
},
.probe = tegra_dpaux_probe,
.remove = tegra_dpaux_remove,
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 7bd2e65c2a16..6ca9f396e55b 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1057,7 +1057,7 @@ void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
*dma = iova_dma_addr(&tegra->carveout.domain, alloc);
err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
- size, IOMMU_READ | IOMMU_WRITE);
+ size, IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (err < 0)
goto free_iova;
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index a900300ae5bd..bfebe2786d61 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -308,18 +308,18 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm)
return ERR_PTR(-ENOMEM);
}
- drm_fb_helper_prepare(drm, &fbdev->base, &tegra_fb_helper_funcs);
+ drm_fb_helper_prepare(drm, &fbdev->base, 32, &tegra_fb_helper_funcs);
return fbdev;
}
static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
{
+ drm_fb_helper_unprepare(&fbdev->base);
kfree(fbdev);
}
static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
- unsigned int preferred_bpp,
unsigned int num_crtc,
unsigned int max_connectors)
{
@@ -333,7 +333,7 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
return err;
}
- err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
+ err = drm_fb_helper_initial_config(&fbdev->base);
if (err < 0) {
dev_err(drm->dev, "failed to set initial configuration: %d\n",
err);
@@ -396,7 +396,7 @@ int tegra_drm_fb_init(struct drm_device *drm)
struct tegra_drm *tegra = drm->dev_private;
int err;
- err = tegra_fbdev_init(tegra->fbdev, 32, drm->mode_config.num_crtc,
+ err = tegra_fbdev_init(tegra->fbdev, drm->mode_config.num_crtc,
drm->mode_config.num_connector);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tegra/firewall.c b/drivers/gpu/drm/tegra/firewall.c
index 1824d2db0e2c..d53f890fa689 100644
--- a/drivers/gpu/drm/tegra/firewall.c
+++ b/drivers/gpu/drm/tegra/firewall.c
@@ -97,6 +97,9 @@ static int fw_check_regs_imm(struct tegra_drm_firewall *fw, u32 offset)
{
bool is_addr;
+ if (!fw->client->ops->is_addr_reg)
+ return 0;
+
is_addr = fw->client->ops->is_addr_reg(fw->client->base.dev, fw->class,
offset);
if (is_addr)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 979e7bc902f6..bce991a2ccc0 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -574,7 +574,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
* and set the vm_pgoff (used as a fake buffer offset by DRM)
* to 0 as we want to map the whole buffer.
*/
- vma->vm_flags &= ~VM_PFNMAP;
+ vm_flags_clear(vma, VM_PFNMAP);
vma->vm_pgoff = 0;
err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova,
@@ -588,8 +588,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
} else {
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_flags &= ~VM_PFNMAP;
+ vm_flags_mod(vma, VM_MIXEDMAP, VM_PFNMAP);
vma->vm_page_prot = pgprot_writecombine(prot);
}
diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c
index 10fd21517281..86c5818ac27b 100644
--- a/drivers/gpu/drm/tegra/nvdec.c
+++ b/drivers/gpu/drm/tegra/nvdec.c
@@ -67,26 +67,18 @@ static inline void nvdec_writel(struct nvdec *nvdec, u32 value,
static int nvdec_boot_falcon(struct nvdec *nvdec)
{
-#ifdef CONFIG_IOMMU_API
- struct iommu_fwspec *spec = dev_iommu_fwspec_get(nvdec->dev);
-#endif
+ u32 stream_id;
int err;
-#ifdef CONFIG_IOMMU_API
- if (nvdec->config->supports_sid && spec) {
+ if (nvdec->config->supports_sid && tegra_dev_iommu_get_stream_id(nvdec->dev, &stream_id)) {
u32 value;
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW);
nvdec_writel(nvdec, value, NVDEC_TFBIF_TRANSCFG);
- if (spec->num_ids > 0) {
- value = spec->ids[0] & 0xffff;
-
- nvdec_writel(nvdec, value, VIC_THI_STREAMID0);
- nvdec_writel(nvdec, value, VIC_THI_STREAMID1);
- }
+ nvdec_writel(nvdec, stream_id, VIC_THI_STREAMID0);
+ nvdec_writel(nvdec, stream_id, VIC_THI_STREAMID1);
}
-#endif
err = falcon_boot(&nvdec->falcon);
if (err < 0)
diff --git a/drivers/gpu/drm/tegra/submit.c b/drivers/gpu/drm/tegra/submit.c
index 066f88564169..2430fcc97448 100644
--- a/drivers/gpu/drm/tegra/submit.c
+++ b/drivers/gpu/drm/tegra/submit.c
@@ -609,21 +609,13 @@ int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
host1x_memory_context_get(job->memory_context);
}
} else if (context->client->ops->get_streamid_offset) {
-#ifdef CONFIG_IOMMU_API
- struct iommu_fwspec *spec;
-
/*
* Job submission will need to temporarily change stream ID,
* so need to tell it what to change it back to.
*/
- spec = dev_iommu_fwspec_get(context->client->base.dev);
- if (spec && spec->num_ids > 0)
- job->engine_fallback_streamid = spec->ids[0] & 0xffff;
- else
- job->engine_fallback_streamid = 0x7f;
-#else
- job->engine_fallback_streamid = 0x7f;
-#endif
+ if (!tegra_dev_iommu_get_stream_id(context->client->base.dev,
+ &job->engine_fallback_streamid))
+ job->engine_fallback_streamid = TEGRA_STREAM_ID_BYPASS;
}
/* Boot engine. */
@@ -654,7 +646,7 @@ int tegra_drm_ioctl_channel_submit(struct drm_device *drm, void *data,
args->syncpt.value = job->syncpt_end;
if (syncobj) {
- struct dma_fence *fence = host1x_fence_create(job->syncpt, job->syncpt_end);
+ struct dma_fence *fence = host1x_fence_create(job->syncpt, job->syncpt_end, true);
if (IS_ERR(fence)) {
err = PTR_ERR(fence);
SUBMIT_ERR(context, "failed to create postfence: %d", err);
@@ -680,8 +672,7 @@ free_job_data:
kfree(job_data->used_mappings);
}
- if (job_data)
- kfree(job_data);
+ kfree(job_data);
put_bo:
gather_bo_put(&bo->base);
unlock:
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 7382ee132eb7..531a71c72061 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -56,41 +56,30 @@ static void vic_writel(struct vic *vic, u32 value, unsigned int offset)
static int vic_boot(struct vic *vic)
{
-#ifdef CONFIG_IOMMU_API
- struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
-#endif
- u32 fce_ucode_size, fce_bin_data_offset;
+ u32 fce_ucode_size, fce_bin_data_offset, stream_id;
void *hdr;
int err = 0;
-#ifdef CONFIG_IOMMU_API
- if (vic->config->supports_sid && spec) {
+ if (vic->config->supports_sid && tegra_dev_iommu_get_stream_id(vic->dev, &stream_id)) {
u32 value;
value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) |
TRANSCFG_ATT(0, TRANSCFG_SID_HW);
vic_writel(vic, value, VIC_TFBIF_TRANSCFG);
- if (spec->num_ids > 0) {
- value = spec->ids[0] & 0xffff;
-
- /*
- * STREAMID0 is used for input/output buffers.
- * Initialize it to SID_VIC in case context isolation
- * is not enabled, and SID_VIC is used for both firmware
- * and data buffers.
- *
- * If context isolation is enabled, it will be
- * overridden by the SETSTREAMID opcode as part of
- * each job.
- */
- vic_writel(vic, value, VIC_THI_STREAMID0);
-
- /* STREAMID1 is used for firmware loading. */
- vic_writel(vic, value, VIC_THI_STREAMID1);
- }
+ /*
+ * STREAMID0 is used for input/output buffers. Initialize it to SID_VIC in case
+ * context isolation is not enabled, and SID_VIC is used for both firmware and
+ * data buffers.
+ *
+ * If context isolation is enabled, it will be overridden by the SETSTREAMID
+ * opcode as part of each job.
+ */
+ vic_writel(vic, stream_id, VIC_THI_STREAMID0);
+
+ /* STREAMID1 is used for firmware loading. */
+ vic_writel(vic, stream_id, VIC_THI_STREAMID1);
}
-#endif
/* setup clockgating registers */
vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index f896ef85c2f2..bca726a8f483 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -1,16 +1,22 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \
+ drm_kunit_helpers.o
+
obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_buddy_test.o \
drm_cmdline_parser_test.o \
+ drm_connector_test.o \
drm_damage_helper_test.o \
drm_dp_mst_helper_test.o \
drm_format_helper_test.o \
drm_format_test.o \
drm_framebuffer_test.o \
- drm_kunit_helpers.o \
+ drm_managed_test.o \
drm_mm_test.o \
+ drm_modes_test.o \
drm_plane_helper_test.o \
+ drm_probe_helper_test.o \
drm_rect_test.o
CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c
index 362a5fbd82f5..416a279b6dae 100644
--- a/drivers/gpu/drm/tests/drm_client_modeset_test.c
+++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c
@@ -8,20 +8,39 @@
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
#include <drm/drm_modes.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
-#include "drm_kunit_helpers.h"
-
struct drm_client_modeset_test_priv {
struct drm_device *drm;
+ struct device *dev;
struct drm_connector connector;
};
static int drm_client_modeset_connector_get_modes(struct drm_connector *connector)
{
- return drm_add_modes_noedid(connector, 1920, 1200);
+ struct drm_display_mode *mode;
+ int count;
+
+ count = drm_add_modes_noedid(connector, 1920, 1200);
+
+ mode = drm_mode_analog_ntsc_480i(connector->dev);
+ if (!mode)
+ return count;
+
+ drm_mode_probed_add(connector, mode);
+ count += 1;
+
+ mode = drm_mode_analog_pal_576i(connector->dev);
+ if (!mode)
+ return count;
+
+ drm_mode_probed_add(connector, mode);
+ count += 1;
+
+ return count;
}
static const struct drm_connector_helper_funcs drm_client_modeset_connector_helper_funcs = {
@@ -41,7 +60,12 @@ static int drm_client_modeset_test_init(struct kunit *test)
test->priv = priv;
- priv->drm = drm_kunit_device_init(test, DRIVER_MODESET, "drm-client-modeset-test");
+ priv->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev,
+ sizeof(*priv->drm), 0,
+ DRIVER_MODESET);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
ret = drmm_connector_init(priv->drm, &priv->connector,
@@ -52,9 +76,19 @@ static int drm_client_modeset_test_init(struct kunit *test)
drm_connector_helper_add(&priv->connector, &drm_client_modeset_connector_helper_funcs);
+ priv->connector.interlace_allowed = true;
+ priv->connector.doublescan_allowed = true;
+
return 0;
}
+static void drm_client_modeset_test_exit(struct kunit *test)
+{
+ struct drm_client_modeset_test_priv *priv = test->priv;
+
+ drm_kunit_helper_free_device(test, priv->dev);
+}
+
static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
{
struct drm_client_modeset_test_priv *priv = test->priv;
@@ -84,15 +118,83 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test)
KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode));
}
+struct drm_connector_pick_cmdline_mode_test {
+ const char *cmdline;
+ struct drm_display_mode *(*func)(struct drm_device *drm);
+};
+
+#define TEST_CMDLINE(_cmdline, _fn) \
+ { \
+ .cmdline = _cmdline, \
+ .func = _fn, \
+ }
+
+static void drm_test_pick_cmdline_named(struct kunit *test)
+{
+ const struct drm_connector_pick_cmdline_mode_test *params = test->param_value;
+ struct drm_client_modeset_test_priv *priv = test->priv;
+ struct drm_device *drm = priv->drm;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode;
+ const struct drm_display_mode *expected_mode, *mode;
+ const char *cmdline = params->cmdline;
+ int ret;
+
+ KUNIT_ASSERT_TRUE(test,
+ drm_mode_parse_command_line_for_connector(cmdline,
+ connector,
+ cmdline_mode));
+
+ mutex_lock(&drm->mode_config.mutex);
+ ret = drm_helper_probe_single_connector_modes(connector, 1920, 1080);
+ mutex_unlock(&drm->mode_config.mutex);
+ KUNIT_ASSERT_GT(test, ret, 0);
+
+ mode = drm_connector_pick_cmdline_mode(connector);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ expected_mode = params->func(drm);
+ KUNIT_ASSERT_NOT_NULL(test, expected_mode);
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected_mode, mode));
+}
+
+static const
+struct drm_connector_pick_cmdline_mode_test drm_connector_pick_cmdline_mode_tests[] = {
+ TEST_CMDLINE("NTSC", drm_mode_analog_ntsc_480i),
+ TEST_CMDLINE("NTSC-J", drm_mode_analog_ntsc_480i),
+ TEST_CMDLINE("PAL", drm_mode_analog_pal_576i),
+ TEST_CMDLINE("PAL-M", drm_mode_analog_ntsc_480i),
+};
+
+static void
+drm_connector_pick_cmdline_mode_desc(const struct drm_connector_pick_cmdline_mode_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->cmdline);
+}
+
+KUNIT_ARRAY_PARAM(drm_connector_pick_cmdline_mode,
+ drm_connector_pick_cmdline_mode_tests,
+ drm_connector_pick_cmdline_mode_desc);
+
static struct kunit_case drm_test_pick_cmdline_tests[] = {
KUNIT_CASE(drm_test_pick_cmdline_res_1920_1080_60),
+ KUNIT_CASE_PARAM(drm_test_pick_cmdline_named,
+ drm_connector_pick_cmdline_mode_gen_params),
{}
};
static struct kunit_suite drm_test_pick_cmdline_test_suite = {
.name = "drm_test_pick_cmdline",
.init = drm_client_modeset_test_init,
+ .exit = drm_client_modeset_test_exit,
.test_cases = drm_test_pick_cmdline_tests
};
kunit_test_suite(drm_test_pick_cmdline_test_suite);
+
+/*
+ * This file is included directly by drm_client_modeset.c so we can't
+ * use any MODULE_* macro here.
+ */
diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
index 34790e7a3760..88f7f518ffb3 100644
--- a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
+++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
@@ -927,6 +927,14 @@ static const struct drm_cmdline_invalid_test drm_cmdline_invalid_tests[] = {
.name = "invalid_option",
.cmdline = "720x480,test=42",
},
+ {
+ .name = "invalid_tv_option",
+ .cmdline = "720x480i,tv_mode=invalid",
+ },
+ {
+ .name = "truncated_tv_option",
+ .cmdline = "720x480i,tv_mode=NTS",
+ },
};
static void drm_cmdline_invalid_desc(const struct drm_cmdline_invalid_test *t,
@@ -937,6 +945,65 @@ static void drm_cmdline_invalid_desc(const struct drm_cmdline_invalid_test *t,
KUNIT_ARRAY_PARAM(drm_cmdline_invalid, drm_cmdline_invalid_tests, drm_cmdline_invalid_desc);
+struct drm_cmdline_tv_option_test {
+ const char *name;
+ const char *cmdline;
+ struct drm_display_mode *(*mode_fn)(struct drm_device *dev);
+ enum drm_connector_tv_mode tv_mode;
+};
+
+static void drm_test_cmdline_tv_options(struct kunit *test)
+{
+ const struct drm_cmdline_tv_option_test *params = test->param_value;
+ const struct drm_display_mode *expected_mode = params->mode_fn(NULL);
+ struct drm_cmdline_mode mode = { };
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, expected_mode->hdisplay);
+ KUNIT_EXPECT_EQ(test, mode.yres, expected_mode->vdisplay);
+ KUNIT_EXPECT_EQ(test, mode.tv_mode, params->tv_mode);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_EQ(test, mode.interlace, !!(expected_mode->flags & DRM_MODE_FLAG_INTERLACE));
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+#define TV_OPT_TEST(_opt, _cmdline, _mode_fn) \
+ { \
+ .name = #_opt, \
+ .cmdline = _cmdline, \
+ .mode_fn = _mode_fn, \
+ .tv_mode = DRM_MODE_TV_MODE_ ## _opt, \
+ }
+
+static const struct drm_cmdline_tv_option_test drm_cmdline_tv_option_tests[] = {
+ TV_OPT_TEST(NTSC, "720x480i,tv_mode=NTSC", drm_mode_analog_ntsc_480i),
+ TV_OPT_TEST(NTSC_443, "720x480i,tv_mode=NTSC-443", drm_mode_analog_ntsc_480i),
+ TV_OPT_TEST(NTSC_J, "720x480i,tv_mode=NTSC-J", drm_mode_analog_ntsc_480i),
+ TV_OPT_TEST(PAL, "720x576i,tv_mode=PAL", drm_mode_analog_pal_576i),
+ TV_OPT_TEST(PAL_M, "720x480i,tv_mode=PAL-M", drm_mode_analog_ntsc_480i),
+ TV_OPT_TEST(PAL_N, "720x576i,tv_mode=PAL-N", drm_mode_analog_pal_576i),
+ TV_OPT_TEST(SECAM, "720x576i,tv_mode=SECAM", drm_mode_analog_pal_576i),
+};
+
+static void drm_cmdline_tv_option_desc(const struct drm_cmdline_tv_option_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+KUNIT_ARRAY_PARAM(drm_cmdline_tv_option,
+ drm_cmdline_tv_option_tests,
+ drm_cmdline_tv_option_desc);
+
static struct kunit_case drm_cmdline_parser_tests[] = {
KUNIT_CASE(drm_test_cmdline_force_d_only),
KUNIT_CASE(drm_test_cmdline_force_D_only_dvi),
@@ -977,6 +1044,7 @@ static struct kunit_case drm_cmdline_parser_tests[] = {
KUNIT_CASE(drm_test_cmdline_freestanding_force_e_and_options),
KUNIT_CASE(drm_test_cmdline_panel_orientation),
KUNIT_CASE_PARAM(drm_test_cmdline_invalid, drm_cmdline_invalid_gen_params),
+ KUNIT_CASE_PARAM(drm_test_cmdline_tv_options, drm_cmdline_tv_option_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_connector_test.c b/drivers/gpu/drm/tests/drm_connector_test.c
new file mode 100644
index 000000000000..c66aa2dc8d9d
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_connector_test.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_modes functions
+ */
+
+#include <drm/drm_connector.h>
+
+#include <kunit/test.h>
+
+struct drm_get_tv_mode_from_name_test {
+ const char *name;
+ enum drm_connector_tv_mode expected_mode;
+};
+
+#define TV_MODE_NAME(_name, _mode) \
+ { \
+ .name = _name, \
+ .expected_mode = _mode, \
+ }
+
+static void drm_test_get_tv_mode_from_name_valid(struct kunit *test)
+{
+ const struct drm_get_tv_mode_from_name_test *params = test->param_value;
+
+ KUNIT_EXPECT_EQ(test,
+ drm_get_tv_mode_from_name(params->name, strlen(params->name)),
+ params->expected_mode);
+}
+
+static const
+struct drm_get_tv_mode_from_name_test drm_get_tv_mode_from_name_valid_tests[] = {
+ TV_MODE_NAME("NTSC", DRM_MODE_TV_MODE_NTSC),
+ TV_MODE_NAME("NTSC-443", DRM_MODE_TV_MODE_NTSC_443),
+ TV_MODE_NAME("NTSC-J", DRM_MODE_TV_MODE_NTSC_J),
+ TV_MODE_NAME("PAL", DRM_MODE_TV_MODE_PAL),
+ TV_MODE_NAME("PAL-M", DRM_MODE_TV_MODE_PAL_M),
+ TV_MODE_NAME("PAL-N", DRM_MODE_TV_MODE_PAL_N),
+ TV_MODE_NAME("SECAM", DRM_MODE_TV_MODE_SECAM),
+};
+
+static void
+drm_get_tv_mode_from_name_valid_desc(const struct drm_get_tv_mode_from_name_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+KUNIT_ARRAY_PARAM(drm_get_tv_mode_from_name_valid,
+ drm_get_tv_mode_from_name_valid_tests,
+ drm_get_tv_mode_from_name_valid_desc);
+
+static void drm_test_get_tv_mode_from_name_truncated(struct kunit *test)
+{
+ const char *name = "NTS";
+ int ret;
+
+ ret = drm_get_tv_mode_from_name(name, strlen(name));
+ KUNIT_EXPECT_LT(test, ret, 0);
+};
+
+static struct kunit_case drm_get_tv_mode_from_name_tests[] = {
+ KUNIT_CASE_PARAM(drm_test_get_tv_mode_from_name_valid,
+ drm_get_tv_mode_from_name_valid_gen_params),
+ KUNIT_CASE(drm_test_get_tv_mode_from_name_truncated),
+ { }
+};
+
+static struct kunit_suite drm_get_tv_mode_from_name_test_suite = {
+ .name = "drm_get_tv_mode_from_name",
+ .test_cases = drm_get_tv_mode_from_name_tests,
+};
+
+kunit_test_suite(drm_get_tv_mode_from_name_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 567c71f95edc..34e80eb6d96e 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -32,16 +32,41 @@ struct convert_to_rgb565_result {
const u16 expected_swab[TEST_BUF_SIZE];
};
+struct convert_to_xrgb1555_result {
+ unsigned int dst_pitch;
+ const u16 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_argb1555_result {
+ unsigned int dst_pitch;
+ const u16 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_rgba5551_result {
+ unsigned int dst_pitch;
+ const u16 expected[TEST_BUF_SIZE];
+};
+
struct convert_to_rgb888_result {
unsigned int dst_pitch;
const u8 expected[TEST_BUF_SIZE];
};
+struct convert_to_argb8888_result {
+ unsigned int dst_pitch;
+ const u32 expected[TEST_BUF_SIZE];
+};
+
struct convert_to_xrgb2101010_result {
unsigned int dst_pitch;
const u32 expected[TEST_BUF_SIZE];
};
+struct convert_to_argb2101010_result {
+ unsigned int dst_pitch;
+ const u32 expected[TEST_BUF_SIZE];
+};
+
struct convert_xrgb8888_case {
const char *name;
unsigned int pitch;
@@ -50,8 +75,13 @@ struct convert_xrgb8888_case {
struct convert_to_gray8_result gray8_result;
struct convert_to_rgb332_result rgb332_result;
struct convert_to_rgb565_result rgb565_result;
+ struct convert_to_xrgb1555_result xrgb1555_result;
+ struct convert_to_argb1555_result argb1555_result;
+ struct convert_to_rgba5551_result rgba5551_result;
struct convert_to_rgb888_result rgb888_result;
+ struct convert_to_argb8888_result argb8888_result;
struct convert_to_xrgb2101010_result xrgb2101010_result;
+ struct convert_to_argb2101010_result argb2101010_result;
};
static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
@@ -73,14 +103,34 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.expected = { 0xF800 },
.expected_swab = { 0x00F8 },
},
+ .xrgb1555_result = {
+ .dst_pitch = 0,
+ .expected = { 0x7C00 },
+ },
+ .argb1555_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFC00 },
+ },
+ .rgba5551_result = {
+ .dst_pitch = 0,
+ .expected = { 0xF801 },
+ },
.rgb888_result = {
.dst_pitch = 0,
.expected = { 0x00, 0x00, 0xFF },
},
+ .argb8888_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFFFF0000 },
+ },
.xrgb2101010_result = {
.dst_pitch = 0,
.expected = { 0x3FF00000 },
},
+ .argb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFFF00000 },
+ },
},
{
.name = "single_pixel_clip_rectangle",
@@ -103,14 +153,34 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.expected = { 0xF800 },
.expected_swab = { 0x00F8 },
},
+ .xrgb1555_result = {
+ .dst_pitch = 0,
+ .expected = { 0x7C00 },
+ },
+ .argb1555_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFC00 },
+ },
+ .rgba5551_result = {
+ .dst_pitch = 0,
+ .expected = { 0xF801 },
+ },
.rgb888_result = {
.dst_pitch = 0,
.expected = { 0x00, 0x00, 0xFF },
},
+ .argb8888_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFFFF0000 },
+ },
.xrgb2101010_result = {
.dst_pitch = 0,
.expected = { 0x3FF00000 },
},
+ .argb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0xFFF00000 },
+ },
},
{
/* Well known colors: White, black, red, green, blue, magenta,
@@ -160,6 +230,33 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0xE0FF, 0xFF07,
},
},
+ .xrgb1555_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0x7FFF, 0x0000,
+ 0x7C00, 0x03E0,
+ 0x001F, 0x7C1F,
+ 0x7FE0, 0x03FF,
+ },
+ },
+ .argb1555_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFFFF, 0x8000,
+ 0xFC00, 0x83E0,
+ 0x801F, 0xFC1F,
+ 0xFFE0, 0x83FF,
+ },
+ },
+ .rgba5551_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFFFF, 0x0001,
+ 0xF801, 0x07C1,
+ 0x003F, 0xF83F,
+ 0xFFC1, 0x07FF,
+ },
+ },
.rgb888_result = {
.dst_pitch = 0,
.expected = {
@@ -169,6 +266,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
},
},
+ .argb8888_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFFFFFFFF, 0xFF000000,
+ 0xFFFF0000, 0xFF00FF00,
+ 0xFF0000FF, 0xFFFF00FF,
+ 0xFFFFFF00, 0xFF00FFFF,
+ },
+ },
.xrgb2101010_result = {
.dst_pitch = 0,
.expected = {
@@ -178,6 +284,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x3FFFFC00, 0x000FFFFF,
},
},
+ .argb2101010_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFFFFFFFF, 0xC0000000,
+ 0xFFF00000, 0xC00FFC00,
+ 0xC00003FF, 0xFFF003FF,
+ 0xFFFFFC00, 0xC00FFFFF,
+ },
+ },
},
{
/* Randomly picked colors. Full buffer within the clip area. */
@@ -218,6 +333,30 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00A8, 0x8E6B, 0x330A, 0x0000, 0x0000,
},
},
+ .xrgb1555_result = {
+ .dst_pitch = 10,
+ .expected = {
+ 0x0513, 0x0920, 0x5400, 0x0000, 0x0000,
+ 0x35CE, 0x0513, 0x0920, 0x0000, 0x0000,
+ 0x5400, 0x35CE, 0x0513, 0x0000, 0x0000,
+ },
+ },
+ .argb1555_result = {
+ .dst_pitch = 10,
+ .expected = {
+ 0x8513, 0x8920, 0xD400, 0x0000, 0x0000,
+ 0xB5CE, 0x8513, 0x8920, 0x0000, 0x0000,
+ 0xD400, 0xB5CE, 0x8513, 0x0000, 0x0000,
+ },
+ },
+ .rgba5551_result = {
+ .dst_pitch = 10,
+ .expected = {
+ 0x0A27, 0x1241, 0xA801, 0x0000, 0x0000,
+ 0x6B9D, 0x0A27, 0x1241, 0x0000, 0x0000,
+ 0xA801, 0x6B9D, 0x0A27, 0x0000, 0x0000,
+ },
+ },
.rgb888_result = {
.dst_pitch = 15,
.expected = {
@@ -229,6 +368,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
},
+ .argb8888_result = {
+ .dst_pitch = 20,
+ .expected = {
+ 0xFF0E449C, 0xFF114D05, 0xFFA80303, 0x00000000, 0x00000000,
+ 0xFF6C7073, 0xFF0E449C, 0xFF114D05, 0x00000000, 0x00000000,
+ 0xFFA80303, 0xFF6C7073, 0xFF0E449C, 0x00000000, 0x00000000,
+ },
+ },
.xrgb2101010_result = {
.dst_pitch = 20,
.expected = {
@@ -237,6 +384,14 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
0x2A20300C, 0x1B1705CD, 0x03844672, 0x00000000, 0x00000000,
},
},
+ .argb2101010_result = {
+ .dst_pitch = 20,
+ .expected = {
+ 0xC3844672, 0xC444D414, 0xEA20300C, 0x00000000, 0x00000000,
+ 0xDB1705CD, 0xC3844672, 0xC444D414, 0x00000000, 0x00000000,
+ 0xEA20300C, 0xDB1705CD, 0xC3844672, 0x00000000, 0x00000000,
+ },
+ },
},
};
@@ -264,7 +419,22 @@ static size_t conversion_buf_size(u32 dst_format, unsigned int dst_pitch,
return dst_pitch * drm_rect_height(clip);
}
-static u32 *le32buf_to_cpu(struct kunit *test, const u32 *buf, size_t buf_size)
+static u16 *le16buf_to_cpu(struct kunit *test, const __le16 *buf, size_t buf_size)
+{
+ u16 *dst = NULL;
+ int n;
+
+ dst = kunit_kzalloc(test, sizeof(*dst) * buf_size, GFP_KERNEL);
+ if (!dst)
+ return NULL;
+
+ for (n = 0; n < buf_size; n++)
+ dst[n] = le16_to_cpu(buf[n]);
+
+ return dst;
+}
+
+static u32 *le32buf_to_cpu(struct kunit *test, const __le32 *buf, size_t buf_size)
{
u32 *dst = NULL;
int n;
@@ -279,6 +449,21 @@ static u32 *le32buf_to_cpu(struct kunit *test, const u32 *buf, size_t buf_size)
return dst;
}
+static __le32 *cpubuf_to_le32(struct kunit *test, const u32 *buf, size_t buf_size)
+{
+ __le32 *dst = NULL;
+ int n;
+
+ dst = kunit_kzalloc(test, sizeof(*dst) * buf_size, GFP_KERNEL);
+ if (!dst)
+ return NULL;
+
+ for (n = 0; n < buf_size; n++)
+ dst[n] = cpu_to_le32(buf[n]);
+
+ return dst;
+}
+
static void convert_xrgb8888_case_desc(struct convert_xrgb8888_case *t,
char *desc)
{
@@ -293,8 +478,8 @@ static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test)
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_gray8_result *result = &params->gray8_result;
size_t dst_size;
- __u8 *buf = NULL;
- __u32 *xrgb8888 = NULL;
+ u8 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
@@ -310,7 +495,7 @@ static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
- xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
@@ -323,8 +508,8 @@ static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test)
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_rgb332_result *result = &params->rgb332_result;
size_t dst_size;
- __u8 *buf = NULL;
- __u32 *xrgb8888 = NULL;
+ u8 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
@@ -340,7 +525,7 @@ static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
- xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
@@ -353,8 +538,8 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_rgb565_result *result = &params->rgb565_result;
size_t dst_size;
- __u16 *buf = NULL;
- __u32 *xrgb8888 = NULL;
+ u16 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
@@ -370,24 +555,120 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
- xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, false);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ buf = dst.vaddr; /* restore original value of buf */
drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, true);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
KUNIT_EXPECT_MEMEQ(test, buf, result->expected_swab, dst_size);
}
+static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_xrgb1555_result *result = &params->xrgb1555_result;
+ size_t dst_size;
+ u16 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_XRGB1555, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_xrgb1555(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_argb1555_result *result = &params->argb1555_result;
+ size_t dst_size;
+ u16 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_ARGB1555, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_argb1555(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgba5551_result *result = &params->rgba5551_result;
+ size_t dst_size;
+ u16 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGBA5551, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgba5551(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_rgb888_result *result = &params->rgb888_result;
size_t dst_size;
- __u8 *buf = NULL;
- __u32 *xrgb8888 = NULL;
+ u8 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
@@ -403,21 +684,56 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
- xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
+ /*
+ * RGB888 expected results are already in little-endian
+ * order, so there's no need to convert the test output.
+ */
drm_fb_xrgb8888_to_rgb888(&dst, &result->dst_pitch, &src, &fb, &params->clip);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
+static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_argb8888_result *result = &params->argb8888_result;
+ size_t dst_size;
+ u32 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_ARGB8888,
+ result->dst_pitch, &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_argb8888(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
{
const struct convert_xrgb8888_case *params = test->param_value;
const struct convert_to_xrgb2101010_result *result = &params->xrgb2101010_result;
size_t dst_size;
- __u32 *buf = NULL;
- __u32 *xrgb8888 = NULL;
+ u32 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
struct iosys_map dst, src;
struct drm_framebuffer fb = {
@@ -433,7 +749,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
iosys_map_set_vaddr(&dst, buf);
- xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
iosys_map_set_vaddr(&src, xrgb8888);
@@ -442,12 +758,48 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
+static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_argb2101010_result *result = &params->argb2101010_result;
+ size_t dst_size;
+ u32 *buf = NULL;
+ __le32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_ARGB2101010,
+ result->dst_pitch, &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_argb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
static struct kunit_case drm_format_helper_test_cases[] = {
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_gray8, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb332, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb565, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb1555, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb1555, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgba5551, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index f1662091f250..e98b4150f556 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -1,63 +1,101 @@
// SPDX-License-Identifier: GPL-2.0
#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
#include <drm/drm_managed.h>
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/platform_device.h>
-#include "drm_kunit_helpers.h"
-
-struct kunit_dev {
- struct drm_device base;
-};
+#define KUNIT_DEVICE_NAME "drm-kunit-mock-device"
static const struct drm_mode_config_funcs drm_mode_config_funcs = {
};
-static int dev_init(struct kunit_resource *res, void *ptr)
+static int fake_probe(struct platform_device *pdev)
{
- char *name = ptr;
- struct device *dev;
-
- dev = root_device_register(name);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ return 0;
+}
- res->data = dev;
+static int fake_remove(struct platform_device *pdev)
+{
return 0;
}
-static void dev_free(struct kunit_resource *res)
+static struct platform_driver fake_platform_driver = {
+ .probe = fake_probe,
+ .remove = fake_remove,
+ .driver = {
+ .name = KUNIT_DEVICE_NAME,
+ },
+};
+
+/**
+ * drm_kunit_helper_alloc_device - Allocate a mock device for a KUnit test
+ * @test: The test context object
+ *
+ * This allocates a fake struct &device to create a mock for a KUnit
+ * test. The device will also be bound to a fake driver. It will thus be
+ * able to leverage the usual infrastructure and most notably the
+ * device-managed resources just like a "real" device.
+ *
+ * Callers need to make sure drm_kunit_helper_free_device() on the
+ * device when done.
+ *
+ * Returns:
+ * A pointer to the new device, or an ERR_PTR() otherwise.
+ */
+struct device *drm_kunit_helper_alloc_device(struct kunit *test)
{
- struct device *dev = res->data;
+ struct platform_device *pdev;
+ int ret;
- root_device_unregister(dev);
+ ret = platform_driver_register(&fake_platform_driver);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ pdev = platform_device_alloc(KUNIT_DEVICE_NAME, PLATFORM_DEVID_NONE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pdev);
+
+ ret = platform_device_add(pdev);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return &pdev->dev;
+}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_alloc_device);
+
+/**
+ * drm_kunit_helper_free_device - Frees a mock device
+ * @test: The test context object
+ * @dev: The device to free
+ *
+ * Frees a device allocated with drm_kunit_helper_alloc_device().
+ */
+void drm_kunit_helper_free_device(struct kunit *test, struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&fake_platform_driver);
}
+EXPORT_SYMBOL_GPL(drm_kunit_helper_free_device);
-struct drm_device *drm_kunit_device_init(struct kunit *test, u32 features, char *name)
+struct drm_device *
+__drm_kunit_helper_alloc_drm_device_with_driver(struct kunit *test,
+ struct device *dev,
+ size_t size, size_t offset,
+ const struct drm_driver *driver)
{
- struct kunit_dev *kdev;
struct drm_device *drm;
- struct drm_driver *driver;
- struct device *dev;
+ void *container;
int ret;
- dev = kunit_alloc_resource(test, dev_init, dev_free, GFP_KERNEL, name);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
- if (!driver)
- return ERR_PTR(-ENOMEM);
-
- driver->driver_features = features;
- kdev = devm_drm_dev_alloc(dev, driver, struct kunit_dev, base);
- if (IS_ERR(kdev))
- return ERR_CAST(kdev);
+ container = __devm_drm_dev_alloc(dev, driver, size, offset);
+ if (IS_ERR(container))
+ return ERR_CAST(container);
- drm = &kdev->base;
+ drm = container + offset;
drm->mode_config.funcs = &drm_mode_config_funcs;
ret = drmm_mode_config_init(drm);
@@ -66,6 +104,7 @@ struct drm_device *drm_kunit_device_init(struct kunit *test, u32 features, char
return drm;
}
+EXPORT_SYMBOL_GPL(__drm_kunit_helper_alloc_drm_device_with_driver);
MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.h b/drivers/gpu/drm/tests/drm_kunit_helpers.h
deleted file mode 100644
index 20ab6eec4c89..000000000000
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.h
+++ /dev/null
@@ -1,11 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-#ifndef DRM_KUNIT_HELPERS_H_
-#define DRM_KUNIT_HELPERS_H_
-
-struct drm_device;
-struct kunit;
-
-struct drm_device *drm_kunit_device_init(struct kunit *test, u32 features, char *name);
-
-#endif // DRM_KUNIT_HELPERS_H_
diff --git a/drivers/gpu/drm/tests/drm_managed_test.c b/drivers/gpu/drm/tests/drm_managed_test.c
new file mode 100644
index 000000000000..1652dca11d30
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_managed_test.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_managed.h>
+
+#include <kunit/resource.h>
+
+#include <linux/device.h>
+
+/* Ought to be enough for anybody */
+#define TEST_TIMEOUT_MS 100
+
+struct managed_test_priv {
+ bool action_done;
+ wait_queue_head_t action_wq;
+};
+
+static void drm_action(struct drm_device *drm, void *ptr)
+{
+ struct managed_test_priv *priv = ptr;
+
+ priv->action_done = true;
+ wake_up_interruptible(&priv->action_wq);
+}
+
+static void drm_test_managed_run_action(struct kunit *test)
+{
+ struct managed_test_priv *priv;
+ struct drm_device *drm;
+ struct device *dev;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+ init_waitqueue_head(&priv->action_wq);
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
+
+ ret = drmm_add_action_or_reset(drm, drm_action, priv);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = drm_dev_register(drm, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_dev_unregister(drm);
+ drm_kunit_helper_free_device(test, dev);
+
+ ret = wait_event_interruptible_timeout(priv->action_wq, priv->action_done,
+ msecs_to_jiffies(TEST_TIMEOUT_MS));
+ KUNIT_EXPECT_GT(test, ret, 0);
+}
+
+static struct kunit_case drm_managed_tests[] = {
+ KUNIT_CASE(drm_test_managed_run_action),
+ {}
+};
+
+static struct kunit_suite drm_managed_test_suite = {
+ .name = "drm-test-managed",
+ .test_cases = drm_managed_tests
+};
+
+kunit_test_suite(drm_managed_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_modes_test.c b/drivers/gpu/drm/tests/drm_modes_test.c
new file mode 100644
index 000000000000..bc4aa2ce78be
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_modes_test.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_modes functions
+ */
+
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_modes.h>
+
+#include <kunit/test.h>
+
+#include <linux/units.h>
+
+struct drm_test_modes_priv {
+ struct drm_device *drm;
+ struct device *dev;
+};
+
+static int drm_test_modes_init(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ priv->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev,
+ sizeof(*priv->drm), 0,
+ DRIVER_MODESET);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
+
+ test->priv = priv;
+
+ return 0;
+}
+
+static void drm_test_modes_exit(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv = test->priv;
+
+ drm_kunit_helper_free_device(test, priv->dev);
+}
+
+static void drm_test_modes_analog_tv_ntsc_480i(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv = test->priv;
+ struct drm_display_mode *mode;
+
+ mode = drm_analog_tv_mode(priv->drm,
+ DRM_MODE_TV_MODE_NTSC,
+ 13500 * HZ_PER_KHZ, 720, 480,
+ true);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 60);
+ KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
+
+ /* BT.601 defines hsync_start at 736 for 480i */
+ KUNIT_EXPECT_EQ(test, mode->hsync_start, 736);
+
+ /*
+ * The NTSC standard expects a line to take 63.556us. With a
+ * pixel clock of 13.5 MHz, a pixel takes around 74ns, so we
+ * need to have 63556ns / 74ns = 858.
+ *
+ * This is also mandated by BT.601.
+ */
+ KUNIT_EXPECT_EQ(test, mode->htotal, 858);
+
+ KUNIT_EXPECT_EQ(test, mode->vdisplay, 480);
+ KUNIT_EXPECT_EQ(test, mode->vtotal, 525);
+}
+
+static void drm_test_modes_analog_tv_ntsc_480i_inlined(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv = test->priv;
+ struct drm_display_mode *expected, *mode;
+
+ expected = drm_analog_tv_mode(priv->drm,
+ DRM_MODE_TV_MODE_NTSC,
+ 13500 * HZ_PER_KHZ, 720, 480,
+ true);
+ KUNIT_ASSERT_NOT_NULL(test, expected);
+
+ mode = drm_mode_analog_ntsc_480i(priv->drm);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
+}
+
+static void drm_test_modes_analog_tv_pal_576i(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv = test->priv;
+ struct drm_display_mode *mode;
+
+ mode = drm_analog_tv_mode(priv->drm,
+ DRM_MODE_TV_MODE_PAL,
+ 13500 * HZ_PER_KHZ, 720, 576,
+ true);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ KUNIT_EXPECT_EQ(test, drm_mode_vrefresh(mode), 50);
+ KUNIT_EXPECT_EQ(test, mode->hdisplay, 720);
+
+ /* BT.601 defines hsync_start at 732 for 576i */
+ KUNIT_EXPECT_EQ(test, mode->hsync_start, 732);
+
+ /*
+ * The PAL standard expects a line to take 64us. With a pixel
+ * clock of 13.5 MHz, a pixel takes around 74ns, so we need to
+ * have 64000ns / 74ns = 864.
+ *
+ * This is also mandated by BT.601.
+ */
+ KUNIT_EXPECT_EQ(test, mode->htotal, 864);
+
+ KUNIT_EXPECT_EQ(test, mode->vdisplay, 576);
+ KUNIT_EXPECT_EQ(test, mode->vtotal, 625);
+}
+
+static void drm_test_modes_analog_tv_pal_576i_inlined(struct kunit *test)
+{
+ struct drm_test_modes_priv *priv = test->priv;
+ struct drm_display_mode *expected, *mode;
+
+ expected = drm_analog_tv_mode(priv->drm,
+ DRM_MODE_TV_MODE_PAL,
+ 13500 * HZ_PER_KHZ, 720, 576,
+ true);
+ KUNIT_ASSERT_NOT_NULL(test, expected);
+
+ mode = drm_mode_analog_pal_576i(priv->drm);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_equal(expected, mode));
+}
+
+static struct kunit_case drm_modes_analog_tv_tests[] = {
+ KUNIT_CASE(drm_test_modes_analog_tv_ntsc_480i),
+ KUNIT_CASE(drm_test_modes_analog_tv_ntsc_480i_inlined),
+ KUNIT_CASE(drm_test_modes_analog_tv_pal_576i),
+ KUNIT_CASE(drm_test_modes_analog_tv_pal_576i_inlined),
+ { }
+};
+
+static struct kunit_suite drm_modes_analog_tv_test_suite = {
+ .name = "drm_modes_analog_tv",
+ .init = drm_test_modes_init,
+ .exit = drm_test_modes_exit,
+ .test_cases = drm_modes_analog_tv_tests,
+};
+
+kunit_test_suite(drm_modes_analog_tv_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_probe_helper_test.c b/drivers/gpu/drm/tests/drm_probe_helper_test.c
new file mode 100644
index 000000000000..0ee65828623e
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_probe_helper_test.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Kunit test for drm_probe_helper functions
+ */
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+#include <kunit/test.h>
+
+struct drm_probe_helper_test_priv {
+ struct drm_device *drm;
+ struct device *dev;
+ struct drm_connector connector;
+};
+
+static const struct drm_connector_helper_funcs drm_probe_helper_connector_helper_funcs = {
+};
+
+static const struct drm_connector_funcs drm_probe_helper_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+static int drm_probe_helper_test_init(struct kunit *test)
+{
+ struct drm_probe_helper_test_priv *priv;
+ struct drm_connector *connector;
+ int ret;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+ test->priv = priv;
+
+ priv->dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ priv->drm = __drm_kunit_helper_alloc_drm_device(test, priv->dev,
+ sizeof(*priv->drm), 0,
+ DRIVER_MODESET | DRIVER_ATOMIC);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->drm);
+
+ connector = &priv->connector;
+ ret = drmm_connector_init(priv->drm, connector,
+ &drm_probe_helper_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_connector_helper_add(connector, &drm_probe_helper_connector_helper_funcs);
+
+ return 0;
+}
+
+static void drm_probe_helper_test_exit(struct kunit *test)
+{
+ struct drm_probe_helper_test_priv *priv = test->priv;
+
+ drm_kunit_helper_free_device(test, priv->dev);
+}
+
+typedef struct drm_display_mode *(*expected_mode_func_t)(struct drm_device *);
+
+struct drm_connector_helper_tv_get_modes_test {
+ const char *name;
+ unsigned int supported_tv_modes;
+ enum drm_connector_tv_mode default_mode;
+ bool cmdline;
+ enum drm_connector_tv_mode cmdline_mode;
+ expected_mode_func_t *expected_modes;
+ unsigned int num_expected_modes;
+};
+
+#define _TV_MODE_TEST(_name, _supported, _default, _cmdline, _cmdline_mode, ...) \
+ { \
+ .name = _name, \
+ .supported_tv_modes = _supported, \
+ .default_mode = _default, \
+ .cmdline = _cmdline, \
+ .cmdline_mode = _cmdline_mode, \
+ .expected_modes = (expected_mode_func_t[]) { __VA_ARGS__ }, \
+ .num_expected_modes = sizeof((expected_mode_func_t[]) { __VA_ARGS__ }) / \
+ (sizeof(expected_mode_func_t)), \
+ }
+
+#define TV_MODE_TEST(_name, _supported, _default, ...) \
+ _TV_MODE_TEST(_name, _supported, _default, false, 0, __VA_ARGS__)
+
+#define TV_MODE_TEST_CMDLINE(_name, _supported, _default, _cmdline, ...) \
+ _TV_MODE_TEST(_name, _supported, _default, true, _cmdline, __VA_ARGS__)
+
+static void
+drm_test_connector_helper_tv_get_modes_check(struct kunit *test)
+{
+ const struct drm_connector_helper_tv_get_modes_test *params = test->param_value;
+ struct drm_probe_helper_test_priv *priv = test->priv;
+ struct drm_connector *connector = &priv->connector;
+ struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
+ struct drm_display_mode *mode;
+ const struct drm_display_mode *expected;
+ size_t len;
+ int ret;
+
+ if (params->cmdline) {
+ cmdline->tv_mode_specified = true;
+ cmdline->tv_mode = params->cmdline_mode;
+ }
+
+ ret = drm_mode_create_tv_properties(priv->drm, params->supported_tv_modes);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_object_attach_property(&connector->base,
+ priv->drm->mode_config.tv_mode_property,
+ params->default_mode);
+
+ mutex_lock(&priv->drm->mode_config.mutex);
+
+ ret = drm_connector_helper_tv_get_modes(connector);
+ KUNIT_EXPECT_EQ(test, ret, params->num_expected_modes);
+
+ len = 0;
+ list_for_each_entry(mode, &connector->probed_modes, head)
+ len++;
+ KUNIT_EXPECT_EQ(test, len, params->num_expected_modes);
+
+ if (params->num_expected_modes >= 1) {
+ mode = list_first_entry_or_null(&connector->probed_modes,
+ struct drm_display_mode, head);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ expected = params->expected_modes[0](priv->drm);
+ KUNIT_ASSERT_NOT_NULL(test, expected);
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
+ KUNIT_EXPECT_TRUE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
+ }
+
+ if (params->num_expected_modes >= 2) {
+ mode = list_next_entry(mode, head);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ expected = params->expected_modes[1](priv->drm);
+ KUNIT_ASSERT_NOT_NULL(test, expected);
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_equal(mode, expected));
+ KUNIT_EXPECT_FALSE(test, mode->type & DRM_MODE_TYPE_PREFERRED);
+ }
+
+ mutex_unlock(&priv->drm->mode_config.mutex);
+}
+
+static const
+struct drm_connector_helper_tv_get_modes_test drm_connector_helper_tv_get_modes_tests[] = {
+ { .name = "None" },
+ TV_MODE_TEST("PAL",
+ BIT(DRM_MODE_TV_MODE_PAL),
+ DRM_MODE_TV_MODE_PAL,
+ drm_mode_analog_pal_576i),
+ TV_MODE_TEST("NTSC",
+ BIT(DRM_MODE_TV_MODE_NTSC),
+ DRM_MODE_TV_MODE_NTSC,
+ drm_mode_analog_ntsc_480i),
+ TV_MODE_TEST("Both, NTSC Default",
+ BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
+ DRM_MODE_TV_MODE_NTSC,
+ drm_mode_analog_ntsc_480i, drm_mode_analog_pal_576i),
+ TV_MODE_TEST("Both, PAL Default",
+ BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
+ DRM_MODE_TV_MODE_PAL,
+ drm_mode_analog_pal_576i, drm_mode_analog_ntsc_480i),
+ TV_MODE_TEST_CMDLINE("Both, NTSC Default, with PAL on command-line",
+ BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
+ DRM_MODE_TV_MODE_NTSC,
+ DRM_MODE_TV_MODE_PAL,
+ drm_mode_analog_pal_576i, drm_mode_analog_ntsc_480i),
+ TV_MODE_TEST_CMDLINE("Both, PAL Default, with NTSC on command-line",
+ BIT(DRM_MODE_TV_MODE_NTSC) | BIT(DRM_MODE_TV_MODE_PAL),
+ DRM_MODE_TV_MODE_PAL,
+ DRM_MODE_TV_MODE_NTSC,
+ drm_mode_analog_ntsc_480i, drm_mode_analog_pal_576i),
+};
+
+static void
+drm_connector_helper_tv_get_modes_desc(const struct drm_connector_helper_tv_get_modes_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+KUNIT_ARRAY_PARAM(drm_connector_helper_tv_get_modes,
+ drm_connector_helper_tv_get_modes_tests,
+ drm_connector_helper_tv_get_modes_desc);
+
+static struct kunit_case drm_test_connector_helper_tv_get_modes_tests[] = {
+ KUNIT_CASE_PARAM(drm_test_connector_helper_tv_get_modes_check,
+ drm_connector_helper_tv_get_modes_gen_params),
+ { }
+};
+
+static struct kunit_suite drm_test_connector_helper_tv_get_modes_suite = {
+ .name = "drm_connector_helper_tv_get_modes",
+ .init = drm_probe_helper_test_init,
+ .exit = drm_probe_helper_test_exit,
+ .test_cases = drm_test_connector_helper_tv_get_modes_tests,
+};
+
+kunit_test_suite(drm_test_connector_helper_tv_get_modes_suite);
+
+MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index cd3c43a6c806..5e5e466f35d1 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -7,7 +7,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_dma_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index ad93acc9abd2..165365b515e1 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -1858,8 +1858,8 @@ static const struct {
{ DRM_FORMAT_XBGR4444, 0x21, },
{ DRM_FORMAT_RGBX4444, 0x22, },
- { DRM_FORMAT_ARGB1555, 0x25, },
- { DRM_FORMAT_ABGR1555, 0x26, },
+ { DRM_FORMAT_XRGB1555, 0x25, },
+ { DRM_FORMAT_XBGR1555, 0x26, },
{ DRM_FORMAT_XRGB8888, 0x27, },
{ DRM_FORMAT_XBGR8888, 0x28, },
@@ -2686,6 +2686,8 @@ int dispc_init(struct tidss_device *tidss)
dev_warn(dev, "cannot set DMA masks to 48-bit\n");
}
+ dma_set_max_seg_size(dev, UINT_MAX);
+
dispc = devm_kzalloc(dev, sizeof(*dispc), GFP_KERNEL);
if (!dispc)
return -ENOMEM;
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index 07d94b1e8089..2dac8727d2f4 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -12,7 +12,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_gem_dma_helper.h>
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
index e278a9c89476..0d4865e9c03d 100644
--- a/drivers/gpu/drm/tidss/tidss_encoder.c
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -7,7 +7,7 @@
#include <linux/export.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
#include <drm/drm_of.h>
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index 345bcc3011e4..ad2fa3c3d4a7 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -9,7 +9,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 42d50ec5526d..fe2c41f0cd4f 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -8,7 +8,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 80615ecdae0b..4ca426007dc8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -496,7 +496,6 @@ static const struct drm_driver tilcdc_driver = {
* Power management:
*/
-#ifdef CONFIG_PM_SLEEP
static int tilcdc_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
@@ -518,11 +517,9 @@ static int tilcdc_pm_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
return drm_mode_config_helper_resume(ddev);
}
-#endif
-static const struct dev_pm_ops tilcdc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tilcdc_pm_suspend, tilcdc_pm_resume)
-};
+static DEFINE_SIMPLE_DEV_PM_OPS(tilcdc_pm_ops,
+ tilcdc_pm_suspend, tilcdc_pm_resume);
/*
* Platform driver:
@@ -597,7 +594,7 @@ static struct platform_driver tilcdc_platform_driver = {
.remove = tilcdc_pdev_remove,
.driver = {
.name = "tilcdc",
- .pm = &tilcdc_pm_ops,
+ .pm = pm_sleep_ptr(&tilcdc_pm_ops),
.of_match_table = tilcdc_of_match,
},
};
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index 678c2ef1cae7..cf35b6090503 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -604,7 +604,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+ drm_fbdev_generic_setup(dev, 16);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 130fd07a967d..c5bb683e440c 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -4,6 +4,7 @@
*/
#include <linux/module.h>
+#include <linux/pm.h>
#include <linux/usb.h>
#include <drm/drm_atomic_helper.h>
@@ -718,15 +719,15 @@ static void gm12u320_usb_disconnect(struct usb_interface *interface)
drm_atomic_helper_shutdown(dev);
}
-static __maybe_unused int gm12u320_suspend(struct usb_interface *interface,
- pm_message_t message)
+static int gm12u320_suspend(struct usb_interface *interface,
+ pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
return drm_mode_config_helper_suspend(dev);
}
-static __maybe_unused int gm12u320_resume(struct usb_interface *interface)
+static int gm12u320_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
struct gm12u320_device *gm12u320 = to_gm12u320(dev);
@@ -747,11 +748,9 @@ static struct usb_driver gm12u320_usb_driver = {
.probe = gm12u320_usb_probe,
.disconnect = gm12u320_usb_disconnect,
.id_table = id_table,
-#ifdef CONFIG_PM
- .suspend = gm12u320_suspend,
- .resume = gm12u320_resume,
- .reset_resume = gm12u320_resume,
-#endif
+ .suspend = pm_ptr(gm12u320_suspend),
+ .resume = pm_ptr(gm12u320_resume),
+ .reset_resume = pm_ptr(gm12u320_resume),
};
module_usb_driver(gm12u320_usb_driver);
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 9f634f720817..cdc4486e059b 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -181,10 +181,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = yx240qv29_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(yx240qv29_enable),
};
static const struct drm_display_mode yx350hv15_mode = {
diff --git a/drivers/gpu/drm/tiny/ili9163.c b/drivers/gpu/drm/tiny/ili9163.c
index ca0451f79962..bc4384d410fc 100644
--- a/drivers/gpu/drm/tiny/ili9163.c
+++ b/drivers/gpu/drm/tiny/ili9163.c
@@ -100,11 +100,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9163_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = yx240qv29_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
- .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(yx240qv29_enable),
};
static const struct drm_display_mode yx240qv29_mode = {
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 815bab285823..077c6ff5a2e1 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -25,6 +25,7 @@
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -76,9 +77,9 @@ static inline int ili9225_command(struct mipi_dbi *dbi, u8 cmd, u16 data)
return mipi_dbi_command_buf(dbi, cmd, par, 2);
}
-static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
+static void ili9225_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
+ struct drm_rect *rect)
{
- struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
@@ -86,13 +87,10 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
bool swap = dbi->swap_bytes;
u16 x_start, y_start;
u16 x1, x2, y1, y2;
- int idx, ret = 0;
+ int ret = 0;
bool full;
void *tr;
- if (!drm_dev_enter(fb->dev, &idx))
- return;
-
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -100,11 +98,11 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
if (!dbi->dc || !full || swap ||
fb->format->format == DRM_FORMAT_XRGB8888) {
tr = dbidev->tx_buf;
- ret = mipi_dbi_buf_copy(dbidev->tx_buf, fb, rect, swap);
+ ret = mipi_dbi_buf_copy(tr, src, fb, rect, swap);
if (ret)
goto err_msg;
} else {
- tr = dma_obj->vaddr;
+ tr = src->vaddr; /* TODO: Use mapping abstraction properly */
}
switch (dbidev->rotation) {
@@ -155,21 +153,27 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
-
- drm_dev_exit(idx);
}
static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
+ int idx;
if (!pipe->crtc.state->active)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- ili9225_fb_dirty(state->fb, &rect);
+ ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+
+ drm_dev_exit(idx);
}
static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
@@ -177,6 +181,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct device *dev = pipe->crtc.dev->dev;
struct mipi_dbi *dbi = &dbidev->dbi;
@@ -276,7 +281,8 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
ili9225_command(dbi, ILI9225_DISPLAY_CONTROL_1, 0x1017);
- ili9225_fb_dirty(fb, &rect);
+ ili9225_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+
out_exit:
drm_dev_exit(idx);
}
@@ -326,9 +332,15 @@ static int ili9225_dbi_command(struct mipi_dbi *dbi, u8 *cmd, u8 *par,
}
static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = ili9225_pipe_enable,
.disable = ili9225_pipe_disable,
.update = ili9225_pipe_update,
+ .begin_fb_access = mipi_dbi_pipe_begin_fb_access,
+ .end_fb_access = mipi_dbi_pipe_end_fb_access,
+ .reset_plane = mipi_dbi_pipe_reset_plane,
+ .duplicate_plane_state = mipi_dbi_pipe_duplicate_plane_state,
+ .destroy_plane_state = mipi_dbi_pipe_destroy_plane_state,
};
static const struct drm_display_mode ili9225_mode = {
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 420f6005a956..47b61c3bf145 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -137,10 +137,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = yx240qv29_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(yx240qv29_enable),
};
static const struct drm_display_mode yx240qv29_mode = {
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 1bb847466b10..02265c898816 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -43,6 +43,7 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
size_t num)
{
struct spi_device *spi = mipi->spi;
+ unsigned int bpw = 8;
void *data = par;
u32 speed_hz;
int i, ret;
@@ -56,8 +57,6 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
* The displays are Raspberry Pi HATs and connected to the 8-bit only
* SPI controller, so 16-bit command and parameters need byte swapping
* before being transferred as 8-bit on the big endian SPI bus.
- * Pixel data bytes have already been swapped before this function is
- * called.
*/
buf[0] = cpu_to_be16(*cmd);
gpiod_set_value_cansleep(mipi->dc, 0);
@@ -71,12 +70,18 @@ static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
for (i = 0; i < num; i++)
buf[i] = cpu_to_be16(par[i]);
num *= 2;
- speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
data = buf;
}
+ /*
+ * Check whether pixel data bytes needs to be swapped or not
+ */
+ if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
+ bpw = 16;
+
gpiod_set_value_cansleep(mipi->dc, 1);
- ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, data, num);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, bpw, data, num);
free:
kfree(buf);
@@ -150,10 +155,7 @@ static void waveshare_enable(struct drm_simple_display_pipe *pipe,
}
static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = waveshare_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(waveshare_enable),
};
static const struct drm_display_mode waveshare_mode = {
@@ -183,6 +185,8 @@ MODULE_DEVICE_TABLE(of, ili9486_of_match);
static const struct spi_device_id ili9486_id[] = {
{ "ili9486", 0 },
+ { "rpi-lcd-35", 0 },
+ { "piscreen", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, ili9486_id);
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index 47df2b5a3048..01ff43c8ac3f 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -141,10 +141,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = mi0283qt_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(mi0283qt_enable),
};
static const struct drm_display_mode mi0283qt_mode = {
diff --git a/drivers/gpu/drm/tiny/ofdrm.c b/drivers/gpu/drm/tiny/ofdrm.c
index dc9e4d71b12a..6e349ca42485 100644
--- a/drivers/gpu/drm/tiny/ofdrm.c
+++ b/drivers/gpu/drm/tiny/ofdrm.c
@@ -754,24 +754,6 @@ static void ofdrm_crtc_state_destroy(struct ofdrm_crtc_state *ofdrm_crtc_state)
kfree(ofdrm_crtc_state);
}
-/*
- * Support all formats of OF display and maybe more; in order
- * of preference. The display's update function will do any
- * conversion necessary.
- *
- * TODO: Add blit helpers for remaining formats and uncomment
- * constants.
- */
-static const uint32_t ofdrm_primary_plane_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB565,
- //DRM_FORMAT_XRGB1555,
- //DRM_FORMAT_C8,
- /* Big-endian formats below */
- DRM_FORMAT_BGRX8888,
- DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN,
-};
-
static const uint64_t ofdrm_primary_plane_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
@@ -1284,21 +1266,12 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
dev->mode_config.min_height = height;
dev->mode_config.max_height = max_height;
dev->mode_config.funcs = &ofdrm_mode_config_funcs;
- switch (depth) {
- case 32:
- dev->mode_config.preferred_depth = 24;
- break;
- default:
- dev->mode_config.preferred_depth = depth;
- break;
- }
+ dev->mode_config.preferred_depth = format->depth;
dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
/* Primary plane */
nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- ofdrm_primary_plane_formats,
- ARRAY_SIZE(ofdrm_primary_plane_formats),
odev->formats, ARRAY_SIZE(odev->formats));
primary_plane = &odev->primary_plane;
@@ -1379,6 +1352,7 @@ static int ofdrm_probe(struct platform_device *pdev)
{
struct ofdrm_device *odev;
struct drm_device *dev;
+ unsigned int color_mode;
int ret;
odev = ofdrm_device_create(&ofdrm_driver, pdev);
@@ -1390,11 +1364,11 @@ static int ofdrm_probe(struct platform_device *pdev)
if (ret)
return ret;
- /*
- * FIXME: 24-bit color depth does not work reliably with a 32-bpp
- * value. Force the bpp value of the scanout buffer's format.
- */
- drm_fbdev_generic_setup(dev, drm_format_info_bpp(odev->format, 0));
+ color_mode = drm_format_info_bpp(odev->format, 0);
+ if (color_mode == 16)
+ color_mode = odev->format->depth; // can be 15 or 16
+
+ drm_fbdev_generic_setup(dev, color_mode);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/panel-mipi-dbi.c b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
index 03a7d569cd56..eb9f13f18a02 100644
--- a/drivers/gpu/drm/tiny/panel-mipi-dbi.c
+++ b/drivers/gpu/drm/tiny/panel-mipi-dbi.c
@@ -212,10 +212,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs panel_mipi_dbi_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = panel_mipi_dbi_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(panel_mipi_dbi_enable),
};
DEFINE_DRM_GEM_DMA_FOPS(panel_mipi_dbi_fops);
@@ -297,6 +294,11 @@ static int panel_mipi_dbi_spi_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(dbidev->regulator),
"Failed to get regulator 'power'\n");
+ dbidev->io_regulator = devm_regulator_get(dev, "io");
+ if (IS_ERR(dbidev->io_regulator))
+ return dev_err_probe(dev, PTR_ERR(dbidev->io_regulator),
+ "Failed to get regulator 'io'\n");
+
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
return dev_err_probe(dev, PTR_ERR(dbidev->backlight), "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 162eb44dcba8..63881a3754f8 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -3,6 +3,7 @@
#include <linux/clk.h>
#include <linux/of_clk.h>
#include <linux/minmax.h>
+#include <linux/of_address.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
@@ -184,6 +185,31 @@ simplefb_get_format_of(struct drm_device *dev, struct device_node *of_node)
return simplefb_get_validated_format(dev, format);
}
+static struct resource *
+simplefb_get_memory_of(struct drm_device *dev, struct device_node *of_node)
+{
+ struct device_node *np;
+ struct resource *res;
+ int err;
+
+ np = of_parse_phandle(of_node, "memory-region", 0);
+ if (!np)
+ return NULL;
+
+ res = devm_kzalloc(dev->dev, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return ERR_PTR(-ENOMEM);
+
+ err = of_address_to_resource(np, 0, res);
+ if (err)
+ return ERR_PTR(err);
+
+ if (of_get_property(of_node, "reg", NULL))
+ drm_warn(dev, "preferring \"memory-region\" over \"reg\" property\n");
+
+ return res;
+}
+
/*
* Simple Framebuffer device
*/
@@ -208,7 +234,7 @@ struct simpledrm_device {
unsigned int pitch;
/* memory management */
- void __iomem *screen_base;
+ struct iosys_map screen_base;
/* modesetting */
uint32_t formats[8];
@@ -446,25 +472,6 @@ static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
* Modesetting
*/
-/*
- * Support all formats of simplefb and maybe more; in order
- * of preference. The display's update function will do any
- * conversion necessary.
- *
- * TODO: Add blit helpers for remaining formats and uncomment
- * constants.
- */
-static const uint32_t simpledrm_primary_plane_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB565,
- //DRM_FORMAT_XRGB1555,
- //DRM_FORMAT_ARGB1555,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_ARGB2101010,
-};
-
static const uint64_t simpledrm_primary_plane_format_modifiers[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
@@ -492,15 +499,15 @@ static void simpledrm_primary_plane_helper_atomic_update(struct drm_plane *plane
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
- struct iosys_map dst = IOSYS_MAP_INIT_VADDR(sdev->screen_base);
struct drm_rect dst_clip = plane_state->dst;
+ struct iosys_map dst = sdev->screen_base;
if (!drm_rect_intersect(&dst_clip, &damage))
continue;
iosys_map_incr(&dst, drm_fb_clip_offset(sdev->pitch, sdev->format, &dst_clip));
- drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data, fb,
- &damage);
+ drm_fb_blit(&dst, &sdev->pitch, sdev->format->format, shadow_plane_state->data,
+ fb, &damage);
}
drm_dev_exit(idx);
@@ -519,7 +526,7 @@ static void simpledrm_primary_plane_helper_atomic_disable(struct drm_plane *plan
return;
/* Clear screen to black if disabled */
- memset_io(sdev->screen_base, 0, sdev->pitch * sdev->mode.vdisplay);
+ iosys_map_memset(&sdev->screen_base, 0, 0, sdev->pitch * sdev->mode.vdisplay);
drm_dev_exit(idx);
}
@@ -623,8 +630,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
struct drm_device *dev;
int width, height, stride;
const struct drm_format_info *format;
- struct resource *res, *mem;
- void __iomem *screen_base;
+ struct resource *res, *mem = NULL;
struct drm_plane *primary_plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
@@ -676,6 +682,9 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
format = simplefb_get_format_of(dev, of_node);
if (IS_ERR(format))
return ERR_CAST(format);
+ mem = simplefb_get_memory_of(dev, of_node);
+ if (IS_ERR(mem))
+ return ERR_CAST(mem);
} else {
drm_err(dev, "no simplefb configuration found\n");
return ERR_PTR(-ENODEV);
@@ -698,31 +707,55 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
* Memory management
*/
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return ERR_PTR(-EINVAL);
+ if (mem) {
+ void *screen_base;
- ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
- if (ret) {
- drm_err(dev, "could not acquire memory range %pr: error %d\n", res, ret);
- return ERR_PTR(ret);
- }
+ ret = devm_aperture_acquire_from_firmware(dev, mem->start, resource_size(mem));
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", mem, ret);
+ return ERR_PTR(ret);
+ }
- mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), drv->name);
- if (!mem) {
- /*
- * We cannot make this fatal. Sometimes this comes from magic
- * spaces our resource handlers simply don't know about. Use
- * the I/O-memory resource as-is and try to map that instead.
- */
- drm_warn(dev, "could not acquire memory region %pr\n", res);
- mem = res;
- }
+ drm_dbg(dev, "using system memory framebuffer at %pr\n", mem);
- screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
- if (!screen_base)
- return ERR_PTR(-ENOMEM);
- sdev->screen_base = screen_base;
+ screen_base = devm_memremap(dev->dev, mem->start, resource_size(mem), MEMREMAP_WC);
+ if (IS_ERR(screen_base))
+ return screen_base;
+
+ iosys_map_set_vaddr(&sdev->screen_base, screen_base);
+ } else {
+ void __iomem *screen_base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return ERR_PTR(-EINVAL);
+
+ ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
+ if (ret) {
+ drm_err(dev, "could not acquire memory range %pr: %d\n", &res, ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_dbg(dev, "using I/O memory framebuffer at %pr\n", res);
+
+ mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
+ drv->name);
+ if (!mem) {
+ /*
+ * We cannot make this fatal. Sometimes this comes from magic
+ * spaces our resource handlers simply don't know about. Use
+ * the I/O-memory resource as-is and try to map that instead.
+ */
+ drm_warn(dev, "could not acquire memory region %pr\n", res);
+ mem = res;
+ }
+
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start, resource_size(mem));
+ if (!screen_base)
+ return ERR_PTR(-ENOMEM);
+
+ iosys_map_set_vaddr_iomem(&sdev->screen_base, screen_base);
+ }
/*
* Modesetting
@@ -739,14 +772,12 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
dev->mode_config.max_width = max_width;
dev->mode_config.min_height = height;
dev->mode_config.max_height = max_height;
- dev->mode_config.preferred_depth = format->cpp[0] * 8;
+ dev->mode_config.preferred_depth = format->depth;
dev->mode_config.funcs = &simpledrm_mode_config_funcs;
/* Primary plane */
nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- simpledrm_primary_plane_formats,
- ARRAY_SIZE(simpledrm_primary_plane_formats),
sdev->formats, ARRAY_SIZE(sdev->formats));
primary_plane = &sdev->primary_plane;
@@ -823,6 +854,7 @@ static int simpledrm_probe(struct platform_device *pdev)
{
struct simpledrm_device *sdev;
struct drm_device *dev;
+ unsigned int color_mode;
int ret;
sdev = simpledrm_device_create(&simpledrm_driver, pdev);
@@ -834,7 +866,11 @@ static int simpledrm_probe(struct platform_device *pdev)
if (ret)
return ret;
- drm_fbdev_generic_setup(dev, 0);
+ color_mode = drm_format_info_bpp(sdev->format, 0);
+ if (color_mode == 16)
+ color_mode = sdev->format->depth; // can be 15 or 16
+
+ drm_fbdev_generic_setup(dev, color_mode);
return 0;
}
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index ce57fa9917e5..3cf4eec16a81 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -92,32 +92,28 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
kfree(buf);
}
-static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
+static int st7586_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer *fb,
struct drm_rect *clip)
{
- struct drm_gem_dma_object *dma_obj = drm_fb_dma_get_gem_obj(fb, 0);
- void *src = dma_obj->vaddr;
- int ret = 0;
+ int ret;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
- st7586_xrgb8888_to_gray332(dst, src, fb, clip);
+ st7586_xrgb8888_to_gray332(dst, src->vaddr, fb, clip);
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return 0;
}
-static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
+static void st7586_fb_dirty(struct iosys_map *src, struct drm_framebuffer *fb,
+ struct drm_rect *rect)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(fb->dev);
struct mipi_dbi *dbi = &dbidev->dbi;
- int start, end, idx, ret = 0;
-
- if (!drm_dev_enter(fb->dev, &idx))
- return;
+ int start, end, ret = 0;
/* 3 pixels per byte, so grow clip to nearest multiple of 3 */
rect->x1 = rounddown(rect->x1, 3);
@@ -125,7 +121,7 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
- ret = st7586_buf_copy(dbidev->tx_buf, fb, rect);
+ ret = st7586_buf_copy(dbidev->tx_buf, src, fb, rect);
if (ret)
goto err_msg;
@@ -146,21 +142,27 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
-
- drm_dev_exit(idx);
}
static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
+ struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
+ int idx;
if (!pipe->crtc.state->active)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- st7586_fb_dirty(state->fb, &rect);
+ st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
+
+ drm_dev_exit(idx);
}
static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
@@ -168,6 +170,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct mipi_dbi *dbi = &dbidev->dbi;
struct drm_rect rect = {
@@ -235,7 +238,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
msleep(100);
- st7586_fb_dirty(fb, &rect);
+ st7586_fb_dirty(&shadow_plane_state->data[0], fb, &rect);
mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
out_exit:
@@ -263,9 +266,15 @@ static const u32 st7586_formats[] = {
};
static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
+ .mode_valid = mipi_dbi_pipe_mode_valid,
.enable = st7586_pipe_enable,
.disable = st7586_pipe_disable,
.update = st7586_pipe_update,
+ .begin_fb_access = mipi_dbi_pipe_begin_fb_access,
+ .end_fb_access = mipi_dbi_pipe_end_fb_access,
+ .reset_plane = mipi_dbi_pipe_reset_plane,
+ .duplicate_plane_state = mipi_dbi_pipe_duplicate_plane_state,
+ .destroy_plane_state = mipi_dbi_pipe_destroy_plane_state,
};
static const struct drm_display_mode st7586_mode = {
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 15d9cf283c66..477eb36fbb70 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -133,10 +133,7 @@ out_exit:
}
static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = {
- .mode_valid = mipi_dbi_pipe_mode_valid,
- .enable = st7735r_pipe_enable,
- .disable = mipi_dbi_pipe_disable,
- .update = mipi_dbi_pipe_update,
+ DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(st7735r_pipe_enable),
};
static const struct st7735r_cfg jd_t18003_t01_cfg = {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c3f4b33136e5..326a3d13a829 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -31,8 +31,10 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
@@ -280,14 +282,13 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
ret = 0;
}
- if (ret || unlikely(list_empty(&bo->ddestroy))) {
+ if (ret) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
spin_unlock(&bo->bdev->lru_lock);
return ret;
}
- list_del_init(&bo->ddestroy);
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
@@ -300,47 +301,21 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
}
/*
- * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
- * encountered buffers.
+ * Block for the dma_resv object to become idle, lock the buffer and clean up
+ * the resource and tt object.
*/
-bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
+static void ttm_bo_delayed_delete(struct work_struct *work)
{
- struct list_head removed;
- bool empty;
-
- INIT_LIST_HEAD(&removed);
-
- spin_lock(&bdev->lru_lock);
- while (!list_empty(&bdev->ddestroy)) {
- struct ttm_buffer_object *bo;
-
- bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
- ddestroy);
- list_move_tail(&bo->ddestroy, &removed);
- if (!ttm_bo_get_unless_zero(bo))
- continue;
-
- if (remove_all || bo->base.resv != &bo->base._resv) {
- spin_unlock(&bdev->lru_lock);
- dma_resv_lock(bo->base.resv, NULL);
-
- spin_lock(&bdev->lru_lock);
- ttm_bo_cleanup_refs(bo, false, !remove_all, true);
+ struct ttm_buffer_object *bo;
- } else if (dma_resv_trylock(bo->base.resv)) {
- ttm_bo_cleanup_refs(bo, false, !remove_all, true);
- } else {
- spin_unlock(&bdev->lru_lock);
- }
-
- ttm_bo_put(bo);
- spin_lock(&bdev->lru_lock);
- }
- list_splice_tail(&removed, &bdev->ddestroy);
- empty = list_empty(&bdev->ddestroy);
- spin_unlock(&bdev->lru_lock);
+ bo = container_of(work, typeof(*bo), delayed_delete);
- return empty;
+ dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false,
+ MAX_SCHEDULE_TIMEOUT);
+ dma_resv_lock(bo->base.resv, NULL);
+ ttm_bo_cleanup_memtype_use(bo);
+ dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
}
static void ttm_bo_release(struct kref *kref)
@@ -369,69 +344,58 @@ static void ttm_bo_release(struct kref *kref)
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_free(bdev, bo->resource);
- }
- if (!dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP) ||
- !dma_resv_trylock(bo->base.resv)) {
- /* The BO is not idle, resurrect it for delayed destroy */
- ttm_bo_flush_all_fences(bo);
- bo->deleted = true;
+ if (!dma_resv_test_signaled(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP) ||
+ !dma_resv_trylock(bo->base.resv)) {
+ /* The BO is not idle, resurrect it for delayed destroy */
+ ttm_bo_flush_all_fences(bo);
+ bo->deleted = true;
- spin_lock(&bo->bdev->lru_lock);
-
- /*
- * Make pinned bos immediately available to
- * shrinkers, now that they are queued for
- * destruction.
- *
- * FIXME: QXL is triggering this. Can be removed when the
- * driver is fixed.
- */
- if (bo->pin_count) {
- bo->pin_count = 0;
- ttm_resource_move_to_lru_tail(bo->resource);
- }
+ spin_lock(&bo->bdev->lru_lock);
- kref_init(&bo->kref);
- list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&bo->bdev->lru_lock);
+ /*
+ * Make pinned bos immediately available to
+ * shrinkers, now that they are queued for
+ * destruction.
+ *
+ * FIXME: QXL is triggering this. Can be removed when the
+ * driver is fixed.
+ */
+ if (bo->pin_count) {
+ bo->pin_count = 0;
+ ttm_resource_move_to_lru_tail(bo->resource);
+ }
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
- return;
- }
+ kref_init(&bo->kref);
+ spin_unlock(&bo->bdev->lru_lock);
- spin_lock(&bo->bdev->lru_lock);
- list_del(&bo->ddestroy);
- spin_unlock(&bo->bdev->lru_lock);
+ INIT_WORK(&bo->delayed_delete, ttm_bo_delayed_delete);
+ queue_work(bdev->wq, &bo->delayed_delete);
+ return;
+ }
- ttm_bo_cleanup_memtype_use(bo);
- dma_resv_unlock(bo->base.resv);
+ ttm_bo_cleanup_memtype_use(bo);
+ dma_resv_unlock(bo->base.resv);
+ }
atomic_dec(&ttm_glob.bo_count);
bo->destroy(bo);
}
+/**
+ * ttm_bo_put
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference a buffer object.
+ */
void ttm_bo_put(struct ttm_buffer_object *bo)
{
kref_put(&bo->kref, ttm_bo_release);
}
EXPORT_SYMBOL(ttm_bo_put);
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
-{
- return cancel_delayed_work_sync(&bdev->wq);
-}
-EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
-
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
-{
- if (resched)
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
-}
-EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
-
static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
struct ttm_resource **mem,
struct ttm_operation_ctx *ctx,
@@ -475,7 +439,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
bdev->funcs->evict_flags(bo, &placement);
if (!placement.num_placement && !placement.num_busy_placement) {
- ret = ttm_bo_wait(bo, true, false);
+ ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
@@ -512,6 +476,14 @@ out:
return ret;
}
+/**
+ * ttm_bo_eviction_valuable
+ *
+ * @bo: The buffer object to evict
+ * @place: the placement we need to make room for
+ *
+ * Check if it is valuable to evict the BO to make room for the given placement.
+ */
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
@@ -771,13 +743,23 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ttm_bo_add_move_fence(bo, man, *mem, ctx->no_wait_gpu);
}
-/*
- * Creates space for memory region @mem according to its type.
+/**
+ * ttm_bo_mem_space
*
- * This function first searches for free space in compatible memory types in
- * the priority order defined by the driver. If free space isn't found, then
- * ttm_bo_mem_force_space is attempted in priority order to evict and find
- * space.
+ * @bo: Pointer to a struct ttm_buffer_object. the data of which
+ * we want to allocate space for.
+ * @proposed_placement: Proposed new placement for the buffer object.
+ * @mem: A struct ttm_resource.
+ * @ctx: if and how to sleep, lock buffers and alloc memory
+ *
+ * Allocate memory space for the buffer object pointed to by @bo, using
+ * the placement flags in @placement, potentially evicting other idle buffer objects.
+ * This function may sleep while waiting for space to become available.
+ * Returns:
+ * -EBUSY: No space available (only if no_wait == 1).
+ * -ENOMEM: Could not allocate memory for the buffer object, either due to
+ * fragmentation or concurrent allocators.
+ * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
*/
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
@@ -883,6 +865,21 @@ out:
return ret;
}
+/**
+ * ttm_bo_validate
+ *
+ * @bo: The buffer object.
+ * @placement: Proposed placement for the buffer object.
+ * @ctx: validation parameters.
+ *
+ * Changes placement and caching policy of the buffer object
+ * according proposed placement.
+ * Returns
+ * -EINVAL on invalid proposed placement.
+ * -ENOMEM on out-of-memory condition.
+ * -EBUSY if no_wait is true and buffer busy.
+ * -ERESTARTSYS if interrupted by a signal.
+ */
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
@@ -960,7 +957,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
int ret;
kref_init(&bo->kref);
- INIT_LIST_HEAD(&bo->ddestroy);
bo->bdev = bdev;
bo->type = type;
bo->page_alignment = alignment;
@@ -1076,6 +1072,11 @@ EXPORT_SYMBOL(ttm_bo_init_validate);
* buffer object vm functions.
*/
+/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ */
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
struct ttm_device *bdev = bo->bdev;
@@ -1085,36 +1086,44 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
}
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
-int ttm_bo_wait(struct ttm_buffer_object *bo,
- bool interruptible, bool no_wait)
+/**
+ * ttm_bo_wait_ctx - wait for buffer idle.
+ *
+ * @bo: The buffer object.
+ * @ctx: defines how to wait
+ *
+ * Waits for the buffer to be idle. Used timeout depends on the context.
+ * Returns -EBUSY if wait timed outt, -ERESTARTSYS if interrupted by a signal or
+ * zero on success.
+ */
+int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
{
- long timeout = 15 * HZ;
+ long ret;
- if (no_wait) {
- if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP))
+ if (ctx->no_wait_gpu) {
+ if (dma_resv_test_signaled(bo->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP))
return 0;
else
return -EBUSY;
}
- timeout = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
- interruptible, timeout);
- if (timeout < 0)
- return timeout;
-
- if (timeout == 0)
+ ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ ctx->interruptible, 15 * HZ);
+ if (unlikely(ret < 0))
+ return ret;
+ if (unlikely(ret == 0))
return -EBUSY;
-
return 0;
}
-EXPORT_SYMBOL(ttm_bo_wait);
+EXPORT_SYMBOL(ttm_bo_wait_ctx);
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
struct ttm_place place;
bool locked;
- int ret;
+ long ret;
/*
* While the bo may already reside in SYSTEM placement, set
@@ -1169,7 +1178,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
/*
* Make sure BO is idle.
*/
- ret = ttm_bo_wait(bo, false, false);
+ ret = ttm_bo_wait_ctx(bo, ctx);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index da5493f789df..7635d7d6b13b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -29,18 +29,13 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <drm/ttm/ttm_bo_driver.h>
+#include <linux/vmalloc.h>
+
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
#include <drm/drm_cache.h>
-#include <drm/drm_vma_manager.h>
-#include <linux/iosys-map.h>
-#include <linux/io.h>
-#include <linux/highmem.h>
-#include <linux/wait.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/dma-resv.h>
struct ttm_transfer_obj {
struct ttm_buffer_object base;
@@ -128,6 +123,22 @@ void ttm_move_memcpy(bool clear,
}
EXPORT_SYMBOL(ttm_move_memcpy);
+/**
+ * ttm_bo_move_memcpy
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @ctx: operation context
+ * @dst_mem: struct ttm_resource indicating where to move.
+ *
+ * Fallback move function for a mappable buffer object in mappable memory.
+ * The function will, if successful,
+ * free any old aperture space, and set (@new_mem)->mm_node to NULL,
+ * and update the (@bo)->mem placement flags. If unsuccessful, the old
+ * data remains untouched, and it's up to the caller to free the
+ * memory space indicated by @new_mem.
+ * Returns:
+ * !0: Failure.
+ */
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_resource *dst_mem)
@@ -230,7 +241,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
*/
atomic_inc(&ttm_glob.bo_count);
- INIT_LIST_HEAD(&fbo->base.ddestroy);
drm_vma_node_reset(&fbo->base.base.vma_node);
kref_init(&fbo->base.kref);
@@ -267,6 +277,16 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
return 0;
}
+/**
+ * ttm_io_prot
+ *
+ * @bo: ttm buffer object
+ * @res: ttm resource object
+ * @tmp: Page protection flag for a normal, cached mapping.
+ *
+ * Utility function that returns the pgprot_t that should be used for
+ * setting up a PTE with the caching model indicated by @c_state.
+ */
pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
pgprot_t tmp)
{
@@ -348,6 +368,22 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
return (!map->virtual) ? -ENOMEM : 0;
}
+/**
+ * ttm_bo_kmap
+ *
+ * @bo: The buffer object.
+ * @start_page: The first page to map.
+ * @num_pages: Number of pages to map.
+ * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
+ * data in the buffer object. The ttm_kmap_obj_virtual function can then be
+ * used to obtain a virtual address to the data.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
int ttm_bo_kmap(struct ttm_buffer_object *bo,
unsigned long start_page, unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
@@ -375,6 +411,13 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_kmap);
+/**
+ * ttm_bo_kunmap
+ *
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_kmap.
+ */
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
{
if (!map->virtual)
@@ -400,6 +443,20 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
}
EXPORT_SYMBOL(ttm_bo_kunmap);
+/**
+ * ttm_bo_vmap
+ *
+ * @bo: The buffer object.
+ * @map: pointer to a struct iosys_map representing the map.
+ *
+ * Sets up a kernel virtual mapping, using ioremap or vmap to the
+ * data in the buffer object. The parameter @map returns the virtual
+ * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid range.
+ */
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
@@ -461,6 +518,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
}
EXPORT_SYMBOL(ttm_bo_vmap);
+/**
+ * ttm_bo_vunmap
+ *
+ * @bo: The buffer object.
+ * @map: Object describing the map to unmap.
+ *
+ * Unmaps a kernel map set up by ttm_bo_vmap().
+ */
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
@@ -483,9 +548,13 @@ EXPORT_SYMBOL(ttm_bo_vunmap);
static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
bool dst_use_tt)
{
- int ret;
- ret = ttm_bo_wait(bo, false, false);
- if (ret)
+ long ret;
+
+ ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, 15 * HZ);
+ if (ret == 0)
+ return -EBUSY;
+ if (ret < 0)
return ret;
if (!dst_use_tt)
@@ -554,6 +623,22 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
ttm_resource_free(bo, &bo->resource);
}
+/**
+ * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @fence: A fence object that signals when moving is complete.
+ * @evict: This is an evict move. Don't return until the buffer is idle.
+ * @pipeline: evictions are to be pipelined.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Accelerated move function to be called when an accelerated move
+ * has been scheduled. The function will create a new temporary buffer object
+ * representing the old placement, and put the sync object on both buffer
+ * objects. After that the newly created buffer object is unref'd to be
+ * destroyed when the move is complete. This will help pipeline
+ * buffer moves.
+ */
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
struct dma_fence *fence,
bool evict,
@@ -582,6 +667,15 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
+/**
+ * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @new_mem: struct ttm_resource indicating where to move.
+ *
+ * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
+ * by the caller to be idle. Typically used after memcpy buffer moves.
+ */
void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem)
{
@@ -621,8 +715,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
return ret;
/* If already idle, no need for ghost object dance. */
- ret = ttm_bo_wait(bo, false, true);
- if (ret != -EBUSY) {
+ if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
if (!bo->ttm) {
/* See comment below about clearing. */
ret = ttm_tt_create(bo, true);
@@ -659,8 +752,10 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
/* Last resort, wait for the BO to be idle when we are OOM */
- if (ret)
- ttm_bo_wait(bo, false, false);
+ if (ret) {
+ dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+ }
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 5a3e4b891377..ca7744b852f5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -31,17 +31,12 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/drm_vma_manager.h>
+#include <drm/ttm/ttm_tt.h>
+
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
-#include <linux/mm.h>
-#include <linux/pfn_t.h>
-#include <linux/rbtree.h>
-#include <linux/module.h>
-#include <linux/uaccess.h>
-#include <linux/mem_encrypt.h>
static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
struct vm_fault *vmf)
@@ -446,6 +441,14 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.access = ttm_bo_vm_access,
};
+/**
+ * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
+ *
+ * @vma: vma as input from the fbdev mmap method.
+ * @bo: The bo backing the address space.
+ *
+ * Maps a buffer object.
+ */
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
{
/* Enforce no COW since would have really strange behavior with it. */
@@ -468,8 +471,7 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_private_data = bo;
- vma->vm_flags |= VM_PFNMAP;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_PFNMAP | VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
return 0;
}
EXPORT_SYMBOL(ttm_bo_mmap_obj);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index e7147e304637..c7a1862f322a 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -29,10 +29,10 @@
#include <linux/mm.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_bo_api.h>
#include "ttm_module.h"
@@ -175,16 +175,6 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
}
EXPORT_SYMBOL(ttm_device_swapout);
-static void ttm_device_delayed_workqueue(struct work_struct *work)
-{
- struct ttm_device *bdev =
- container_of(work, struct ttm_device, wq.work);
-
- if (!ttm_bo_delayed_delete(bdev, false))
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
-}
-
/**
* ttm_device_init
*
@@ -215,15 +205,19 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
if (ret)
return ret;
+ bdev->wq = alloc_workqueue("ttm", WQ_MEM_RECLAIM | WQ_HIGHPRI, 16);
+ if (!bdev->wq) {
+ ttm_global_release();
+ return -ENOMEM;
+ }
+
bdev->funcs = funcs;
ttm_sys_man_init(bdev);
ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
- INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
spin_lock_init(&bdev->lru_lock);
- INIT_LIST_HEAD(&bdev->ddestroy);
INIT_LIST_HEAD(&bdev->pinned);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
@@ -247,10 +241,8 @@ void ttm_device_fini(struct ttm_device *bdev)
list_del(&bdev->device_list);
mutex_unlock(&ttm_global_mutex);
- cancel_delayed_work_sync(&bdev->wq);
-
- if (ttm_bo_delayed_delete(bdev, true))
- pr_debug("Delayed destroy list was clean\n");
+ drain_workqueue(bdev->wq);
+ destroy_workqueue(bdev->wq);
spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index dbee34a058df..f1c60fa80c2d 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -27,11 +27,7 @@
**************************************************************************/
#include <drm/ttm/ttm_execbuf_util.h>
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/module.h>
+#include <drm/ttm/ttm_bo.h>
static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
struct ttm_validate_buffer *entry)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 9f6764bf3b15..aa116a7bbae3 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/sched/mm.h>
@@ -41,8 +42,8 @@
#endif
#include <drm/ttm/ttm_pool.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_bo.h>
#include "ttm_module.h"
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 0a8bc0b7f380..ae11d07eb63a 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -32,7 +32,7 @@
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_range_manager.h>
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/drm_mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 328391bb1d87..b8a826a24fb2 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -26,8 +26,9 @@
#include <linux/io-mapping.h>
#include <linux/scatterlist.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
-#include <drm/ttm/ttm_bo_driver.h>
/**
* ttm_lru_bulk_move_init - initialize a bulk move structure
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d505603930a7..ab725d9d14a6 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -36,7 +36,8 @@
#include <linux/file.h>
#include <linux/module.h>
#include <drm/drm_cache.h>
-#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_tt.h>
#include "ttm_module.h"
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index e81352126a0f..1506094a8009 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -5,12 +5,12 @@
#include <linux/module.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 4b79d44752c9..aa02fd2789c3 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -12,7 +12,6 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index efbde124c296..330669f51fa7 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -79,8 +79,8 @@ static const struct v3d_reg_def v3d_csd_reg_defs[] = {
static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
int i, core;
@@ -126,8 +126,8 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
u32 ident0, ident1, ident2, ident3, cores;
int core;
@@ -188,8 +188,8 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
mutex_lock(&v3d->bo_lock);
@@ -204,8 +204,8 @@ static int v3d_debugfs_bo_stats(struct seq_file *m, void *unused)
static int v3d_measure_clock(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct v3d_dev *v3d = to_v3d_dev(dev);
uint32_t cycles;
int core = 0;
@@ -236,7 +236,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
return 0;
}
-static const struct drm_info_list v3d_debugfs_list[] = {
+static const struct drm_debugfs_info v3d_debugfs_list[] = {
{"v3d_ident", v3d_v3d_debugfs_ident, 0},
{"v3d_regs", v3d_v3d_debugfs_regs, 0},
{"measure_clock", v3d_measure_clock, 0},
@@ -246,7 +246,5 @@ static const struct drm_info_list v3d_debugfs_list[] = {
void
v3d_debugfs_init(struct drm_minor *minor)
{
- drm_debugfs_create_files(v3d_debugfs_list,
- ARRAY_SIZE(v3d_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_add_files(minor->dev, v3d_debugfs_list, ARRAY_SIZE(v3d_debugfs_list));
}
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 96af1cb5202a..5da1806f3969 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -299,10 +299,6 @@ v3d_lookup_bos(struct drm_device *dev,
u64 bo_handles,
u32 bo_count)
{
- u32 *handles;
- int ret = 0;
- int i;
-
job->bo_count = bo_count;
if (!job->bo_count) {
@@ -313,48 +309,9 @@ v3d_lookup_bos(struct drm_device *dev,
return -EINVAL;
}
- job->bo = kvmalloc_array(job->bo_count,
- sizeof(struct drm_gem_dma_object *),
- GFP_KERNEL | __GFP_ZERO);
- if (!job->bo) {
- DRM_DEBUG("Failed to allocate validated BO pointers\n");
- return -ENOMEM;
- }
-
- handles = kvmalloc_array(job->bo_count, sizeof(u32), GFP_KERNEL);
- if (!handles) {
- ret = -ENOMEM;
- DRM_DEBUG("Failed to allocate incoming GEM handles\n");
- goto fail;
- }
-
- if (copy_from_user(handles,
- (void __user *)(uintptr_t)bo_handles,
- job->bo_count * sizeof(u32))) {
- ret = -EFAULT;
- DRM_DEBUG("Failed to copy in GEM handles\n");
- goto fail;
- }
-
- spin_lock(&file_priv->table_lock);
- for (i = 0; i < job->bo_count; i++) {
- struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
- handles[i]);
- if (!bo) {
- DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
- i, handles[i]);
- ret = -ENOENT;
- spin_unlock(&file_priv->table_lock);
- goto fail;
- }
- drm_gem_object_get(bo);
- job->bo[i] = bo;
- }
- spin_unlock(&file_priv->table_lock);
-
-fail:
- kvfree(handles);
- return ret;
+ return drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)bo_handles,
+ job->bo_count, &job->bo);
}
static void
@@ -363,11 +320,11 @@ v3d_job_free(struct kref *ref)
struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
int i;
- for (i = 0; i < job->bo_count; i++) {
- if (job->bo[i])
+ if (job->bo) {
+ for (i = 0; i < job->bo_count; i++)
drm_gem_object_put(job->bo[i]);
+ kvfree(job->bo);
}
- kvfree(job->bo);
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
@@ -904,7 +861,6 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
job->args = *args;
- spin_lock(&file_priv->table_lock);
for (job->base.bo_count = 0;
job->base.bo_count < ARRAY_SIZE(args->bo_handles);
job->base.bo_count++) {
@@ -913,20 +869,16 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
if (!args->bo_handles[job->base.bo_count])
break;
- bo = idr_find(&file_priv->object_idr,
- args->bo_handles[job->base.bo_count]);
+ bo = drm_gem_object_lookup(file_priv, args->bo_handles[job->base.bo_count]);
if (!bo) {
DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
job->base.bo_count,
args->bo_handles[job->base.bo_count]);
ret = -ENOENT;
- spin_unlock(&file_priv->table_lock);
goto fail;
}
- drm_gem_object_get(bo);
job->base.bo[job->base.bo_count] = bo;
}
- spin_unlock(&file_priv->table_lock);
ret = v3d_lock_bo_reservations(&job->base, &acquire_ctx);
if (ret)
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index b450f449a3ab..4fee15c97c34 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -12,12 +12,12 @@
#include <linux/vt_kern.h>
#include <drm/drm_aperture.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_module.h>
#include "vbox_drv.h"
@@ -102,7 +102,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
vbox_hw_fini(vbox);
}
-#ifdef CONFIG_PM_SLEEP
static int vbox_pm_suspend(struct device *dev)
{
struct vbox_private *vbox = dev_get_drvdata(dev);
@@ -160,16 +159,13 @@ static const struct dev_pm_ops vbox_pm_ops = {
.poweroff = vbox_pm_poweroff,
.restore = vbox_pm_resume,
};
-#endif
static struct pci_driver vbox_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = vbox_pci_probe,
.remove = vbox_pci_remove,
-#ifdef CONFIG_PM_SLEEP
- .driver.pm = &vbox_pm_ops,
-#endif
+ .driver.pm = pm_sleep_ptr(&vbox_pm_ops),
};
DEFINE_DRM_GEM_FOPS(vbox_fops);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 3b83e550f4df..42c2d8a99509 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -11,7 +11,6 @@
#include <linux/pci.h>
#include <linux/vbox_err.h>
-#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include "vbox_drv.h"
diff --git a/drivers/gpu/drm/vc4/Kconfig b/drivers/gpu/drm/vc4/Kconfig
index 246305d17a52..91dcf8d174d6 100644
--- a/drivers/gpu/drm/vc4/Kconfig
+++ b/drivers/gpu/drm/vc4/Kconfig
@@ -34,3 +34,19 @@ config DRM_VC4_HDMI_CEC
help
Choose this option if you have a Broadcom VC4 GPU
and want to use CEC.
+
+config DRM_VC4_KUNIT_TEST
+ tristate "KUnit tests for VC4" if !KUNIT_ALL_TESTS
+ depends on DRM_VC4 && KUNIT
+ select DRM_KUNIT_TEST_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ This builds unit tests for the VC4 DRM/KMS driver. This option is
+ not useful for distributions or general kernels, but only for kernel
+ developers working on the VC4 driver.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in
+ Documentation/dev-tools/kunit/.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/vc4/Makefile b/drivers/gpu/drm/vc4/Makefile
index d0163e18e9ca..c41f89a15a55 100644
--- a/drivers/gpu/drm/vc4/Makefile
+++ b/drivers/gpu/drm/vc4/Makefile
@@ -25,6 +25,13 @@ vc4-y := \
vc4_validate.o \
vc4_validate_shaders.o
+vc4-$(CONFIG_DRM_VC4_KUNIT_TEST) += \
+ tests/vc4_mock.o \
+ tests/vc4_mock_crtc.o \
+ tests/vc4_mock_output.o \
+ tests/vc4_mock_plane.o \
+ tests/vc4_test_pv_muxing.o
+
vc4-$(CONFIG_DEBUG_FS) += vc4_debugfs.o
obj-$(CONFIG_DRM_VC4) += vc4.o
diff --git a/drivers/gpu/drm/vc4/tests/.kunitconfig b/drivers/gpu/drm/vc4/tests/.kunitconfig
new file mode 100644
index 000000000000..b503e9036c7f
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/.kunitconfig
@@ -0,0 +1,13 @@
+CONFIG_ARCH_BCM=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_VC4=y
+CONFIG_DRM_VC4_KUNIT_TEST=y
+CONFIG_MAILBOX=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SOUND=y
+CONFIG_COMMON_CLK=y
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c
new file mode 100644
index 000000000000..a4bed26af32f
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_drv.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include <kunit/test.h>
+
+#include "vc4_mock.h"
+
+struct vc4_mock_output_desc {
+ enum vc4_encoder_type vc4_encoder_type;
+ unsigned int encoder_type;
+ unsigned int connector_type;
+};
+
+#define VC4_MOCK_OUTPUT_DESC(_vc4_type, _etype, _ctype) \
+ { \
+ .vc4_encoder_type = _vc4_type, \
+ .encoder_type = _etype, \
+ .connector_type = _ctype, \
+ }
+
+struct vc4_mock_pipe_desc {
+ const struct vc4_crtc_data *data;
+ const struct vc4_mock_output_desc *outputs;
+ unsigned int noutputs;
+};
+
+#define VC4_MOCK_CRTC_DESC(_data, ...) \
+ { \
+ .data = _data, \
+ .outputs = (struct vc4_mock_output_desc[]) { __VA_ARGS__ }, \
+ .noutputs = sizeof((struct vc4_mock_output_desc[]) { __VA_ARGS__ }) / \
+ sizeof(struct vc4_mock_output_desc), \
+ }
+
+#define VC4_MOCK_PIXELVALVE_DESC(_data, ...) \
+ VC4_MOCK_CRTC_DESC(&(_data)->base, __VA_ARGS__)
+
+struct vc4_mock_desc {
+ const struct vc4_mock_pipe_desc *pipes;
+ unsigned int npipes;
+};
+
+#define VC4_MOCK_DESC(...) \
+ { \
+ .pipes = (struct vc4_mock_pipe_desc[]) { __VA_ARGS__ }, \
+ .npipes = sizeof((struct vc4_mock_pipe_desc[]) { __VA_ARGS__ }) / \
+ sizeof(struct vc4_mock_pipe_desc), \
+ }
+
+static const struct vc4_mock_desc vc4_mock =
+ VC4_MOCK_DESC(
+ VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP,
+ DRM_MODE_ENCODER_VIRTUAL,
+ DRM_MODE_CONNECTOR_WRITEBACK)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv0_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI0,
+ DRM_MODE_ENCODER_DSI,
+ DRM_MODE_CONNECTOR_DSI),
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DPI,
+ DRM_MODE_ENCODER_DPI,
+ DRM_MODE_CONNECTOR_DPI)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv1_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI1,
+ DRM_MODE_ENCODER_DSI,
+ DRM_MODE_CONNECTOR_DSI)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2835_pv2_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_HDMI0,
+ DRM_MODE_ENCODER_TMDS,
+ DRM_MODE_CONNECTOR_HDMIA),
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_VEC,
+ DRM_MODE_ENCODER_TVDAC,
+ DRM_MODE_CONNECTOR_Composite)),
+);
+
+static const struct vc4_mock_desc vc5_mock =
+ VC4_MOCK_DESC(
+ VC4_MOCK_CRTC_DESC(&vc4_txp_crtc_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_TXP,
+ DRM_MODE_ENCODER_VIRTUAL,
+ DRM_MODE_CONNECTOR_WRITEBACK)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv0_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI0,
+ DRM_MODE_ENCODER_DSI,
+ DRM_MODE_CONNECTOR_DSI),
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DPI,
+ DRM_MODE_ENCODER_DPI,
+ DRM_MODE_CONNECTOR_DPI)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv1_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_DSI1,
+ DRM_MODE_ENCODER_DSI,
+ DRM_MODE_CONNECTOR_DSI)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv2_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_HDMI0,
+ DRM_MODE_ENCODER_TMDS,
+ DRM_MODE_CONNECTOR_HDMIA)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv3_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_VEC,
+ DRM_MODE_ENCODER_TVDAC,
+ DRM_MODE_CONNECTOR_Composite)),
+ VC4_MOCK_PIXELVALVE_DESC(&bcm2711_pv4_data,
+ VC4_MOCK_OUTPUT_DESC(VC4_ENCODER_TYPE_HDMI1,
+ DRM_MODE_ENCODER_TMDS,
+ DRM_MODE_CONNECTOR_HDMIA)),
+);
+
+static int __build_one_pipe(struct kunit *test, struct drm_device *drm,
+ const struct vc4_mock_pipe_desc *pipe)
+{
+ struct vc4_dummy_plane *dummy_plane;
+ struct drm_plane *plane;
+ struct vc4_dummy_crtc *dummy_crtc;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ dummy_plane = vc4_dummy_plane(test, drm, DRM_PLANE_TYPE_PRIMARY);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_plane);
+
+ plane = &dummy_plane->plane.base;
+ dummy_crtc = vc4_mock_pv(test, drm, plane, pipe->data);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_crtc);
+
+ crtc = &dummy_crtc->crtc.base;
+ for (i = 0; i < pipe->noutputs; i++) {
+ const struct vc4_mock_output_desc *mock_output = &pipe->outputs[i];
+ struct vc4_dummy_output *dummy_output;
+
+ dummy_output = vc4_dummy_output(test, drm, crtc,
+ mock_output->vc4_encoder_type,
+ mock_output->encoder_type,
+ mock_output->connector_type);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output);
+ }
+
+ return 0;
+}
+
+static int __build_mock(struct kunit *test, struct drm_device *drm,
+ const struct vc4_mock_desc *mock)
+{
+ unsigned int i;
+
+ for (i = 0; i < mock->npipes; i++) {
+ const struct vc4_mock_pipe_desc *pipe = &mock->pipes[i];
+ int ret;
+
+ ret = __build_one_pipe(test, drm, pipe);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ }
+
+ return 0;
+}
+
+static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
+{
+ struct drm_device *drm;
+ const struct drm_driver *drv = is_vc5 ? &vc5_drm_driver : &vc4_drm_driver;
+ const struct vc4_mock_desc *desc = is_vc5 ? &vc5_mock : &vc4_mock;
+ struct vc4_dev *vc4;
+ struct device *dev;
+ int ret;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ vc4 = drm_kunit_helper_alloc_drm_device_with_driver(test, dev,
+ struct vc4_dev, base,
+ drv);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
+
+ vc4->dev = dev;
+ vc4->is_vc5 = is_vc5;
+
+ vc4->hvs = __vc4_hvs_alloc(vc4, NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4->hvs);
+
+ drm = &vc4->base;
+ ret = __build_mock(test, drm, desc);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = vc4_kms_load(drm);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_dev_register(drm, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return vc4;
+}
+
+struct vc4_dev *vc4_mock_device(struct kunit *test)
+{
+ return __mock_device(test, false);
+}
+
+struct vc4_dev *vc5_mock_device(struct kunit *test)
+{
+ return __mock_device(test, true);
+}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.h b/drivers/gpu/drm/vc4/tests/vc4_mock.h
new file mode 100644
index 000000000000..db8e9a141ef8
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef VC4_MOCK_H_
+#define VC4_MOCK_H_
+
+#include "../vc4_drv.h"
+
+static inline
+struct drm_crtc *vc4_find_crtc_for_encoder(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_encoder *encoder)
+{
+ struct drm_crtc *crtc;
+
+ KUNIT_ASSERT_EQ(test, hweight32(encoder->possible_crtcs), 1);
+
+ drm_for_each_crtc(crtc, drm)
+ if (encoder->possible_crtcs & drm_crtc_mask(crtc))
+ return crtc;
+
+ return NULL;
+}
+
+struct vc4_dummy_plane {
+ struct vc4_plane plane;
+};
+
+struct vc4_dummy_plane *vc4_dummy_plane(struct kunit *test,
+ struct drm_device *drm,
+ enum drm_plane_type type);
+
+struct vc4_dummy_crtc {
+ struct vc4_crtc crtc;
+};
+
+struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_plane *plane,
+ const struct vc4_crtc_data *data);
+
+struct vc4_dummy_output {
+ struct vc4_encoder encoder;
+ struct drm_connector connector;
+};
+
+struct vc4_dummy_output *vc4_dummy_output(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ enum vc4_encoder_type vc4_encoder_type,
+ unsigned int kms_encoder_type,
+ unsigned int connector_type);
+
+struct vc4_dev *vc4_mock_device(struct kunit *test);
+struct vc4_dev *vc5_mock_device(struct kunit *test);
+
+int vc4_mock_atomic_add_output(struct kunit *test,
+ struct drm_atomic_state *state,
+ enum vc4_encoder_type type);
+int vc4_mock_atomic_del_output(struct kunit *test,
+ struct drm_atomic_state *state,
+ enum vc4_encoder_type type);
+
+#endif // VC4_MOCK_H_
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
new file mode 100644
index 000000000000..5d12d7beef0e
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+#include <kunit/test.h>
+
+#include "vc4_mock.h"
+
+static const struct drm_crtc_helper_funcs vc4_dummy_crtc_helper_funcs = {
+ .atomic_check = vc4_crtc_atomic_check,
+};
+
+static const struct drm_crtc_funcs vc4_dummy_crtc_funcs = {
+ .atomic_destroy_state = vc4_crtc_destroy_state,
+ .atomic_duplicate_state = vc4_crtc_duplicate_state,
+ .reset = vc4_crtc_reset,
+};
+
+struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_plane *plane,
+ const struct vc4_crtc_data *data)
+{
+ struct vc4_dummy_crtc *dummy_crtc;
+ struct vc4_crtc *vc4_crtc;
+ int ret;
+
+ dummy_crtc = kunit_kzalloc(test, sizeof(*dummy_crtc), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, dummy_crtc);
+
+ vc4_crtc = &dummy_crtc->crtc;
+ ret = __vc4_crtc_init(drm, NULL,
+ vc4_crtc, data, plane,
+ &vc4_dummy_crtc_funcs,
+ &vc4_dummy_crtc_helper_funcs,
+ false);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return dummy_crtc;
+}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_output.c b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
new file mode 100644
index 000000000000..8d33be828d9a
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_output.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_modeset_helper_vtables.h>
+
+#include <kunit/test.h>
+
+#include "vc4_mock.h"
+
+static const struct drm_connector_helper_funcs vc4_dummy_connector_helper_funcs = {
+};
+
+static const struct drm_connector_funcs vc4_dummy_connector_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .reset = drm_atomic_helper_connector_reset,
+};
+
+struct vc4_dummy_output *vc4_dummy_output(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_crtc *crtc,
+ enum vc4_encoder_type vc4_encoder_type,
+ unsigned int kms_encoder_type,
+ unsigned int connector_type)
+{
+ struct vc4_dummy_output *dummy_output;
+ struct drm_connector *conn;
+ struct drm_encoder *enc;
+ int ret;
+
+ dummy_output = kunit_kzalloc(test, sizeof(*dummy_output), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output);
+ dummy_output->encoder.type = vc4_encoder_type;
+
+ enc = &dummy_output->encoder.base;
+ ret = drmm_encoder_init(drm, enc,
+ NULL,
+ kms_encoder_type,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ enc->possible_crtcs = drm_crtc_mask(crtc);
+
+ conn = &dummy_output->connector;
+ ret = drmm_connector_init(drm, conn,
+ &vc4_dummy_connector_funcs,
+ connector_type,
+ NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_connector_helper_add(conn, &vc4_dummy_connector_helper_funcs);
+ drm_connector_attach_encoder(conn, enc);
+
+ return dummy_output;
+}
+
+static const struct drm_display_mode default_mode = {
+ DRM_SIMPLE_MODE(640, 480, 64, 48)
+};
+
+int vc4_mock_atomic_add_output(struct kunit *test,
+ struct drm_atomic_state *state,
+ enum vc4_encoder_type type)
+{
+ struct drm_device *drm = state->dev;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct vc4_dummy_output *output;
+ struct drm_connector *conn;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+ int ret;
+
+ encoder = vc4_find_encoder_by_type(drm, type);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+
+ crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+
+ output = container_of(encoder, struct vc4_dummy_output, encoder.base);
+ conn = &output->connector;
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, &default_mode);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ crtc_state->active = true;
+
+ return 0;
+}
+
+int vc4_mock_atomic_del_output(struct kunit *test,
+ struct drm_atomic_state *state,
+ enum vc4_encoder_type type)
+{
+ struct drm_device *drm = state->dev;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct vc4_dummy_output *output;
+ struct drm_connector *conn;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+ int ret;
+
+ encoder = vc4_find_encoder_by_type(drm, type);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+
+ crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ crtc_state->active = false;
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ output = container_of(encoder, struct vc4_dummy_output, encoder.base);
+ conn = &output->connector;
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ ret = drm_atomic_set_crtc_for_connector(conn_state, NULL);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock_plane.c b/drivers/gpu/drm/vc4/tests/vc4_mock_plane.c
new file mode 100644
index 000000000000..62b18f5f41db
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock_plane.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane.h>
+
+#include <kunit/test.h>
+
+#include "vc4_mock.h"
+
+static const struct drm_plane_helper_funcs vc4_dummy_plane_helper_funcs = {
+};
+
+static const struct drm_plane_funcs vc4_dummy_plane_funcs = {
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .reset = drm_atomic_helper_plane_reset,
+};
+
+static const uint32_t vc4_dummy_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+struct vc4_dummy_plane *vc4_dummy_plane(struct kunit *test,
+ struct drm_device *drm,
+ enum drm_plane_type type)
+{
+ struct vc4_dummy_plane *dummy_plane;
+ struct drm_plane *plane;
+
+ dummy_plane = drmm_universal_plane_alloc(drm,
+ struct vc4_dummy_plane, plane.base,
+ 0,
+ &vc4_dummy_plane_funcs,
+ vc4_dummy_plane_formats,
+ ARRAY_SIZE(vc4_dummy_plane_formats),
+ NULL,
+ DRM_PLANE_TYPE_PRIMARY,
+ NULL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_plane);
+
+ plane = &dummy_plane->plane.base;
+ drm_plane_helper_add(plane, &vc4_dummy_plane_helper_funcs);
+
+ return dummy_plane;
+}
diff --git a/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
new file mode 100644
index 000000000000..ae0bd0f81698
--- /dev/null
+++ b/drivers/gpu/drm/vc4/tests/vc4_test_pv_muxing.c
@@ -0,0 +1,1039 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_kunit_helpers.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane.h>
+
+#include <kunit/test.h>
+
+#include "../vc4_drv.h"
+
+#include "vc4_mock.h"
+
+struct pv_muxing_priv {
+ struct vc4_dev *vc4;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+};
+
+static bool check_fifo_conflict(struct kunit *test,
+ const struct drm_atomic_state *state)
+{
+ struct vc4_hvs_state *hvs_state;
+ unsigned int used_fifos = 0;
+ unsigned int i;
+
+ hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, hvs_state);
+
+ for (i = 0; i < HVS_NUM_CHANNELS; i++) {
+ if (!hvs_state->fifo_state[i].in_use)
+ continue;
+
+ KUNIT_EXPECT_FALSE(test, used_fifos & BIT(i));
+ used_fifos |= BIT(i);
+ }
+
+ return true;
+}
+
+struct encoder_constraint {
+ enum vc4_encoder_type type;
+ unsigned int *channels;
+ size_t nchannels;
+};
+
+#define ENCODER_CONSTRAINT(_type, ...) \
+ { \
+ .type = _type, \
+ .channels = (unsigned int[]) { __VA_ARGS__ }, \
+ .nchannels = sizeof((unsigned int[]) { __VA_ARGS__ }) / \
+ sizeof(unsigned int), \
+ }
+
+static bool __check_encoder_constraints(const struct encoder_constraint *constraints,
+ size_t nconstraints,
+ enum vc4_encoder_type type,
+ unsigned int channel)
+{
+ unsigned int i;
+
+ for (i = 0; i < nconstraints; i++) {
+ const struct encoder_constraint *constraint = &constraints[i];
+ unsigned int j;
+
+ if (constraint->type != type)
+ continue;
+
+ for (j = 0; j < constraint->nchannels; j++) {
+ unsigned int _channel = constraint->channels[j];
+
+ if (channel != _channel)
+ continue;
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static const struct encoder_constraint vc4_encoder_constraints[] = {
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DPI, 0),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 1),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 2),
+};
+
+static const struct encoder_constraint vc5_encoder_constraints[] = {
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DPI, 0),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI0, 0),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_VEC, 1),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_TXP, 0, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_DSI1, 0, 1, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI0, 0, 1, 2),
+ ENCODER_CONSTRAINT(VC4_ENCODER_TYPE_HDMI1, 0, 1, 2),
+};
+
+static bool check_vc4_encoder_constraints(enum vc4_encoder_type type, unsigned int channel)
+{
+ return __check_encoder_constraints(vc4_encoder_constraints,
+ ARRAY_SIZE(vc4_encoder_constraints),
+ type, channel);
+}
+
+static bool check_vc5_encoder_constraints(enum vc4_encoder_type type, unsigned int channel)
+{
+ return __check_encoder_constraints(vc5_encoder_constraints,
+ ARRAY_SIZE(vc5_encoder_constraints),
+ type, channel);
+}
+
+static struct vc4_crtc_state *
+get_vc4_crtc_state_for_encoder(struct kunit *test,
+ const struct drm_atomic_state *state,
+ enum vc4_encoder_type type)
+{
+ struct drm_device *drm = state->dev;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_encoder *encoder;
+ struct drm_crtc *crtc;
+
+ encoder = vc4_find_encoder_by_type(drm, type);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder);
+
+ crtc = vc4_find_crtc_for_encoder(test, drm, encoder);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc);
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state)
+ return NULL;
+
+ return to_vc4_crtc_state(new_crtc_state);
+}
+
+static bool check_channel_for_encoder(struct kunit *test,
+ const struct drm_atomic_state *state,
+ enum vc4_encoder_type type,
+ bool (*check_fn)(enum vc4_encoder_type type, unsigned int channel))
+{
+ struct vc4_crtc_state *new_vc4_crtc_state;
+ struct vc4_hvs_state *new_hvs_state;
+ unsigned int channel;
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state, type);
+ KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state);
+
+ channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_EXPECT_NE(test, channel, VC4_HVS_CHANNEL_DISABLED);
+
+ KUNIT_EXPECT_TRUE(test, new_hvs_state->fifo_state[channel].in_use);
+
+ KUNIT_EXPECT_TRUE(test, check_fn(type, channel));
+
+ return true;
+}
+
+struct pv_muxing_param {
+ const char *name;
+ struct vc4_dev *(*mock_fn)(struct kunit *test);
+ bool (*check_fn)(enum vc4_encoder_type type, unsigned int channel);
+ enum vc4_encoder_type *encoders;
+ size_t nencoders;
+};
+
+static void vc4_test_pv_muxing_desc(const struct pv_muxing_param *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+#define PV_MUXING_TEST(_name, _mock_fn, _check_fn, ...) \
+ { \
+ .name = _name, \
+ .mock_fn = &_mock_fn, \
+ .check_fn = &_check_fn, \
+ .encoders = (enum vc4_encoder_type[]) { __VA_ARGS__ }, \
+ .nencoders = sizeof((enum vc4_encoder_type[]) { __VA_ARGS__ }) / \
+ sizeof(enum vc4_encoder_type), \
+ }
+
+#define VC4_PV_MUXING_TEST(_name, ...) \
+ PV_MUXING_TEST(_name, vc4_mock_device, check_vc4_encoder_constraints, __VA_ARGS__)
+
+#define VC5_PV_MUXING_TEST(_name, ...) \
+ PV_MUXING_TEST(_name, vc5_mock_device, check_vc5_encoder_constraints, __VA_ARGS__)
+
+static const struct pv_muxing_param vc4_test_pv_muxing_params[] = {
+ VC4_PV_MUXING_TEST("1 output: DSI0",
+ VC4_ENCODER_TYPE_DSI0),
+ VC4_PV_MUXING_TEST("1 output: DPI",
+ VC4_ENCODER_TYPE_DPI),
+ VC4_PV_MUXING_TEST("1 output: HDMI0",
+ VC4_ENCODER_TYPE_HDMI0),
+ VC4_PV_MUXING_TEST("1 output: VEC",
+ VC4_ENCODER_TYPE_VEC),
+ VC4_PV_MUXING_TEST("1 output: DSI1",
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("1 output: TXP",
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("2 outputs: DSI0, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC4_PV_MUXING_TEST("2 outputs: DSI0, VEC",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC),
+ VC4_PV_MUXING_TEST("2 outputs: DSI0, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("2 outputs: DSI0, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("2 outputs: DPI, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC4_PV_MUXING_TEST("2 outputs: DPI, VEC",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC),
+ VC4_PV_MUXING_TEST("2 outputs: DPI, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("2 outputs: DPI, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("2 outputs: HDMI0, DSI1",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("2 outputs: HDMI0, TXP",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("2 outputs: VEC, DSI1",
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("2 outputs: VEC, TXP",
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("3 outputs: DPI, HDMI0, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP),
+};
+
+KUNIT_ARRAY_PARAM(vc4_test_pv_muxing,
+ vc4_test_pv_muxing_params,
+ vc4_test_pv_muxing_desc);
+
+static const struct pv_muxing_param vc4_test_pv_muxing_invalid_params[] = {
+ VC4_PV_MUXING_TEST("DPI/DSI0 Conflict",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI0),
+ VC4_PV_MUXING_TEST("TXP/DSI1 Conflict",
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1),
+ VC4_PV_MUXING_TEST("HDMI0/VEC Conflict",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_VEC),
+ VC4_PV_MUXING_TEST("More than 3 outputs: DSI0, HDMI0, DSI1, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("More than 3 outputs: DPI, HDMI0, DSI1, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_TXP),
+ VC4_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_TXP),
+};
+
+KUNIT_ARRAY_PARAM(vc4_test_pv_muxing_invalid,
+ vc4_test_pv_muxing_invalid_params,
+ vc4_test_pv_muxing_desc);
+
+static const struct pv_muxing_param vc5_test_pv_muxing_params[] = {
+ VC5_PV_MUXING_TEST("1 output: DPI",
+ VC4_ENCODER_TYPE_DPI),
+ VC5_PV_MUXING_TEST("1 output: DSI0",
+ VC4_ENCODER_TYPE_DSI0),
+ VC5_PV_MUXING_TEST("1 output: DSI1",
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("1 output: HDMI0",
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("1 output: HDMI1",
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("1 output: VEC",
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, VEC",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: DPI, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, VEC",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: DSI0, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("2 outputs: DSI1, VEC",
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: DSI1, TXP",
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("2 outputs: DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("2 outputs: DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("2 outputs: HDMI0, VEC",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: HDMI0, TXP",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("2 outputs: HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("2 outputs: HDMI1, VEC",
+ VC4_ENCODER_TYPE_HDMI1,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("2 outputs: HDMI1, TXP",
+ VC4_ENCODER_TYPE_HDMI1,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("2 outputs: TXP, VEC",
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_VEC),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, TXP",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, VEC, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, TXP, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DPI, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, TXP",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, VEC, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, TXP, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("3 outputs: DSI0, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+};
+
+KUNIT_ARRAY_PARAM(vc5_test_pv_muxing,
+ vc5_test_pv_muxing_params,
+ vc4_test_pv_muxing_desc);
+
+static const struct pv_muxing_param vc5_test_pv_muxing_invalid_params[] = {
+ VC5_PV_MUXING_TEST("DPI/DSI0 Conflict",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, TXP, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, TXP, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: VEC, TXP, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DPI, VEC, TXP, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+ VC5_PV_MUXING_TEST("More than 3 outputs: DSI0, VEC, TXP, DSI1, HDMI0, HDMI1",
+ VC4_ENCODER_TYPE_DSI0,
+ VC4_ENCODER_TYPE_VEC,
+ VC4_ENCODER_TYPE_TXP,
+ VC4_ENCODER_TYPE_DSI1,
+ VC4_ENCODER_TYPE_HDMI0,
+ VC4_ENCODER_TYPE_HDMI1),
+};
+
+KUNIT_ARRAY_PARAM(vc5_test_pv_muxing_invalid,
+ vc5_test_pv_muxing_invalid_params,
+ vc4_test_pv_muxing_desc);
+
+static void drm_vc4_test_pv_muxing(struct kunit *test)
+{
+ const struct pv_muxing_param *params = test->param_value;
+ const struct pv_muxing_priv *priv = test->priv;
+ struct drm_atomic_state *state = priv->state;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < params->nencoders; i++) {
+ enum vc4_encoder_type enc_type = params->encoders[i];
+
+ ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ }
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_TRUE(test,
+ check_fifo_conflict(test, state));
+
+ for (i = 0; i < params->nencoders; i++) {
+ enum vc4_encoder_type enc_type = params->encoders[i];
+
+ KUNIT_EXPECT_TRUE(test, check_channel_for_encoder(test, state, enc_type,
+ params->check_fn));
+ }
+}
+
+static void drm_vc4_test_pv_muxing_invalid(struct kunit *test)
+{
+ const struct pv_muxing_param *params = test->param_value;
+ const struct pv_muxing_priv *priv = test->priv;
+ struct drm_atomic_state *state = priv->state;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < params->nencoders; i++) {
+ enum vc4_encoder_type enc_type = params->encoders[i];
+
+ ret = vc4_mock_atomic_add_output(test, state, enc_type);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ }
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_EXPECT_LT(test, ret, 0);
+}
+
+static int vc4_pv_muxing_test_init(struct kunit *test)
+{
+ const struct pv_muxing_param *params = test->param_value;
+ struct drm_atomic_state *state;
+ struct pv_muxing_priv *priv;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+ test->priv = priv;
+
+ vc4 = params->mock_fn(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
+ priv->vc4 = vc4;
+
+ drm_modeset_acquire_init(&priv->ctx, 0);
+
+ drm = &vc4->base;
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &priv->ctx;
+
+ priv->state = state;
+
+ return 0;
+}
+
+static void vc4_pv_muxing_test_exit(struct kunit *test)
+{
+ struct pv_muxing_priv *priv = test->priv;
+ struct vc4_dev *vc4 = priv->vc4;
+ struct drm_device *drm = &vc4->base;
+ struct drm_atomic_state *state = priv->state;
+
+ drm_atomic_state_put(state);
+ drm_modeset_drop_locks(&priv->ctx);
+ drm_modeset_acquire_fini(&priv->ctx);
+ drm_dev_unregister(drm);
+ drm_kunit_helper_free_device(test, vc4->dev);
+}
+
+static struct kunit_case vc4_pv_muxing_tests[] = {
+ KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing,
+ vc4_test_pv_muxing_gen_params),
+ KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing_invalid,
+ vc4_test_pv_muxing_invalid_gen_params),
+ {}
+};
+
+static struct kunit_suite vc4_pv_muxing_test_suite = {
+ .name = "vc4-pv-muxing-combinations",
+ .init = vc4_pv_muxing_test_init,
+ .exit = vc4_pv_muxing_test_exit,
+ .test_cases = vc4_pv_muxing_tests,
+};
+
+static struct kunit_case vc5_pv_muxing_tests[] = {
+ KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing,
+ vc5_test_pv_muxing_gen_params),
+ KUNIT_CASE_PARAM(drm_vc4_test_pv_muxing_invalid,
+ vc5_test_pv_muxing_invalid_gen_params),
+ {}
+};
+
+static struct kunit_suite vc5_pv_muxing_test_suite = {
+ .name = "vc5-pv-muxing-combinations",
+ .init = vc4_pv_muxing_test_init,
+ .exit = vc4_pv_muxing_test_exit,
+ .test_cases = vc5_pv_muxing_tests,
+};
+
+/* See
+ * https://lore.kernel.org/all/3e113525-aa89-b1e2-56b7-ca55bd41d057@samsung.com/
+ * and
+ * https://lore.kernel.org/dri-devel/20200917121623.42023-1-maxime@cerno.tech/
+ */
+static void drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct vc4_crtc_state *new_vc4_crtc_state;
+ struct vc4_hvs_state *new_hvs_state;
+ unsigned int hdmi0_channel;
+ unsigned int hdmi1_channel;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
+ int ret;
+
+ vc4 = vc5_mock_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ drm = &vc4->base;
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state);
+
+ hdmi0_channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_ASSERT_NE(test, hdmi0_channel, VC4_HVS_CHANNEL_DISABLED);
+ KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi0_channel].in_use);
+
+ ret = drm_atomic_helper_swap_state(state, false);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_atomic_state_put(state);
+
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI1);
+ KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state);
+
+ hdmi1_channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_ASSERT_NE(test, hdmi1_channel, VC4_HVS_CHANNEL_DISABLED);
+ KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use);
+
+ KUNIT_EXPECT_NE(test, hdmi0_channel, hdmi1_channel);
+
+ drm_atomic_state_put(state);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_dev_unregister(drm);
+ drm_kunit_helper_free_device(test, vc4->dev);
+}
+
+static void drm_test_vc5_pv_muxing_bugs_stable_fifo(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct vc4_crtc_state *new_vc4_crtc_state;
+ struct vc4_hvs_state *new_hvs_state;
+ unsigned int old_hdmi0_channel;
+ unsigned int old_hdmi1_channel;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
+ int ret;
+
+ vc4 = vc5_mock_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ drm = &vc4->base;
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state);
+
+ old_hdmi0_channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_ASSERT_NE(test, old_hdmi0_channel, VC4_HVS_CHANNEL_DISABLED);
+ KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[old_hdmi0_channel].in_use);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI1);
+ KUNIT_ASSERT_NOT_NULL(test, new_vc4_crtc_state);
+
+ old_hdmi1_channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_ASSERT_NE(test, old_hdmi1_channel, VC4_HVS_CHANNEL_DISABLED);
+ KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[old_hdmi1_channel].in_use);
+
+ ret = drm_atomic_helper_swap_state(state, false);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_atomic_state_put(state);
+
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_del_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_hvs_state = vc4_hvs_get_new_global_state(state);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, new_hvs_state);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI1);
+
+ if (new_vc4_crtc_state) {
+ unsigned int hdmi1_channel;
+
+ hdmi1_channel = new_vc4_crtc_state->assigned_channel;
+ KUNIT_ASSERT_NE(test, hdmi1_channel, VC4_HVS_CHANNEL_DISABLED);
+ KUNIT_ASSERT_TRUE(test, new_hvs_state->fifo_state[hdmi1_channel].in_use);
+
+ KUNIT_EXPECT_EQ(test, old_hdmi1_channel, hdmi1_channel);
+ }
+
+ drm_atomic_state_put(state);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_dev_unregister(drm);
+ drm_kunit_helper_free_device(test, vc4->dev);
+}
+
+static void
+drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state(struct kunit *test)
+{
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct vc4_crtc_state *new_vc4_crtc_state;
+ struct drm_device *drm;
+ struct vc4_dev *vc4;
+ int ret;
+
+ vc4 = vc5_mock_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ drm = &vc4->base;
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_helper_swap_state(state, false);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ drm_atomic_state_put(state);
+
+ state = drm_atomic_state_alloc(drm);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+ state->acquire_ctx = &ctx;
+
+ ret = vc4_mock_atomic_add_output(test, state, VC4_ENCODER_TYPE_HDMI1);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ new_vc4_crtc_state = get_vc4_crtc_state_for_encoder(test, state,
+ VC4_ENCODER_TYPE_HDMI0);
+ KUNIT_EXPECT_NULL(test, new_vc4_crtc_state);
+
+ drm_atomic_state_put(state);
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_dev_unregister(drm);
+ drm_kunit_helper_free_device(test, vc4->dev);
+}
+
+static struct kunit_case vc5_pv_muxing_bugs_tests[] = {
+ KUNIT_CASE(drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable),
+ KUNIT_CASE(drm_test_vc5_pv_muxing_bugs_subsequent_crtc_enable_too_many_crtc_state),
+ KUNIT_CASE(drm_test_vc5_pv_muxing_bugs_stable_fifo),
+ {}
+};
+
+static struct kunit_suite vc5_pv_muxing_bugs_test_suite = {
+ .name = "vc5-pv-muxing-bugs",
+ .test_cases = vc5_pv_muxing_bugs_tests,
+};
+
+kunit_test_suites(
+ &vc4_pv_muxing_test_suite,
+ &vc5_pv_muxing_test_suite,
+ &vc5_pv_muxing_bugs_test_suite
+);
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index c5947ed8cc81..86d629e45307 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -69,8 +69,8 @@ static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_printer p = drm_seq_file_printer(m);
@@ -991,15 +991,11 @@ int vc4_bo_debugfs_init(struct drm_minor *minor)
{
struct drm_device *drm = minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(drm);
- int ret;
if (!vc4->v3d)
return -ENODEV;
- ret = vc4_debugfs_add_file(minor, "bo_stats",
- vc4_bo_stats_debugfs, NULL);
- if (ret)
- return ret;
+ drm_debugfs_add_file(drm, "bo_stats", vc4_bo_stats_debugfs, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0108613e79d5..bef9d45ef1df 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -50,8 +50,17 @@
#define HVS_FIFO_LATENCY_PIX 6
-#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
-#define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
+#define CRTC_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, vc4_crtc->regs + (offset)); \
+ } while (0)
+
+#define CRTC_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(vc4_crtc->regs + (offset)); \
+ })
static const struct debugfs_reg32 crtc_regs[] = {
VC4_REG32(PV_CONTROL),
@@ -326,8 +335,14 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
bool is_dsi = (vc4_encoder->type == VC4_ENCODER_TYPE_DSI0 ||
vc4_encoder->type == VC4_ENCODER_TYPE_DSI1);
bool is_dsi1 = vc4_encoder->type == VC4_ENCODER_TYPE_DSI1;
+ bool is_vec = vc4_encoder->type == VC4_ENCODER_TYPE_VEC;
u32 format = is_dsi1 ? PV_CONTROL_FORMAT_DSIV_24 : PV_CONTROL_FORMAT_24;
u8 ppc = pv_data->pixels_per_clock;
+
+ u16 vert_bp = mode->crtc_vtotal - mode->crtc_vsync_end;
+ u16 vert_sync = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ u16 vert_fp = mode->crtc_vsync_start - mode->crtc_vdisplay;
+
bool debug_dump_regs = false;
int idx;
@@ -355,49 +370,60 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode
VC4_SET_FIELD(mode->hdisplay * pixel_rep / ppc,
PV_HORZB_HACTIVE));
- CRTC_WRITE(PV_VERTA,
- VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
- interlace,
- PV_VERTA_VBP) |
- VC4_SET_FIELD(mode->crtc_vsync_end - mode->crtc_vsync_start,
- PV_VERTA_VSYNC));
- CRTC_WRITE(PV_VERTB,
- VC4_SET_FIELD(mode->crtc_vsync_start - mode->crtc_vdisplay,
- PV_VERTB_VFP) |
- VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
-
if (interlace) {
+ bool odd_field_first = false;
+ u32 field_delay = mode->htotal * pixel_rep / (2 * ppc);
+ u16 vert_bp_even = vert_bp;
+ u16 vert_fp_even = vert_fp;
+
+ if (is_vec) {
+ /* VEC (composite output) */
+ ++field_delay;
+ if (mode->htotal == 858) {
+ /* 525-line mode (NTSC or PAL-M) */
+ odd_field_first = true;
+ }
+ }
+
+ if (odd_field_first)
+ ++vert_fp_even;
+ else
+ ++vert_bp;
+
CRTC_WRITE(PV_VERTA_EVEN,
- VC4_SET_FIELD(mode->crtc_vtotal -
- mode->crtc_vsync_end,
- PV_VERTA_VBP) |
- VC4_SET_FIELD(mode->crtc_vsync_end -
- mode->crtc_vsync_start,
- PV_VERTA_VSYNC));
+ VC4_SET_FIELD(vert_bp_even, PV_VERTA_VBP) |
+ VC4_SET_FIELD(vert_sync, PV_VERTA_VSYNC));
CRTC_WRITE(PV_VERTB_EVEN,
- VC4_SET_FIELD(mode->crtc_vsync_start -
- mode->crtc_vdisplay,
- PV_VERTB_VFP) |
+ VC4_SET_FIELD(vert_fp_even, PV_VERTB_VFP) |
VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
- /* We set up first field even mode for HDMI. VEC's
- * NTSC mode would want first field odd instead, once
- * we support it (to do so, set ODD_FIRST and put the
- * delay in VSYNCD_EVEN instead).
+ /* We set up first field even mode for HDMI and VEC's PAL.
+ * For NTSC, we need first field odd.
*/
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
(is_dsi ? PV_VCONTROL_DSI : 0) |
PV_VCONTROL_INTERLACE |
- VC4_SET_FIELD(mode->htotal * pixel_rep / (2 * ppc),
- PV_VCONTROL_ODD_DELAY));
- CRTC_WRITE(PV_VSYNCD_EVEN, 0);
+ (odd_field_first
+ ? PV_VCONTROL_ODD_FIRST
+ : VC4_SET_FIELD(field_delay,
+ PV_VCONTROL_ODD_DELAY)));
+ CRTC_WRITE(PV_VSYNCD_EVEN,
+ (odd_field_first ? field_delay : 0));
} else {
CRTC_WRITE(PV_V_CONTROL,
PV_VCONTROL_CONTINUOUS |
(is_dsi ? PV_VCONTROL_DSI : 0));
+ CRTC_WRITE(PV_VSYNCD_EVEN, 0);
}
+ CRTC_WRITE(PV_VERTA,
+ VC4_SET_FIELD(vert_bp, PV_VERTA_VBP) |
+ VC4_SET_FIELD(vert_sync, PV_VERTA_VSYNC));
+ CRTC_WRITE(PV_VERTB,
+ VC4_SET_FIELD(vert_fp, PV_VERTB_VFP) |
+ VC4_SET_FIELD(mode->crtc_vdisplay, PV_VERTB_VACTIVE));
+
if (is_dsi)
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
@@ -486,21 +512,6 @@ static int vc4_crtc_disable(struct drm_crtc *crtc,
return 0;
}
-static struct drm_encoder *vc4_crtc_get_encoder_by_type(struct drm_crtc *crtc,
- enum vc4_encoder_type type)
-{
- struct drm_encoder *encoder;
-
- drm_for_each_encoder(encoder, crtc->dev) {
- struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
-
- if (vc4_encoder->type == type)
- return encoder;
- }
-
- return NULL;
-}
-
int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
{
struct drm_device *drm = crtc->dev;
@@ -536,7 +547,7 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
pv_data = vc4_crtc_to_vc4_pv_data(vc4_crtc);
encoder_type = pv_data->encoder_types[encoder_sel];
- encoder = vc4_crtc_get_encoder_by_type(crtc, encoder_type);
+ encoder = vc4_find_encoder_by_type(drm, encoder_type);
if (WARN_ON(!encoder))
return 0;
@@ -690,8 +701,8 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
}
}
-static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
+int vc4_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
@@ -711,7 +722,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
- vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
+ vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 8000,
mode->clock * 9 / 10) * 1000;
} else {
vc4_state->hvs_load = mode->clock * 1000;
@@ -1096,12 +1107,9 @@ int vc4_crtc_late_register(struct drm_crtc *crtc)
struct drm_device *drm = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
const struct vc4_crtc_data *crtc_data = vc4_crtc_to_vc4_crtc_data(vc4_crtc);
- int ret;
- ret = vc4_debugfs_add_regset32(drm->primary, crtc_data->debugfs_name,
- &vc4_crtc->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, crtc_data->debugfs_name,
+ &vc4_crtc->regset);
return 0;
}
@@ -1131,8 +1139,9 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.get_scanout_position = vc4_crtc_get_scanout_position,
};
-static const struct vc4_pv_data bcm2835_pv0_data = {
+const struct vc4_pv_data bcm2835_pv0_data = {
.base = {
+ .name = "pixelvalve-0",
.debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
@@ -1145,8 +1154,9 @@ static const struct vc4_pv_data bcm2835_pv0_data = {
},
};
-static const struct vc4_pv_data bcm2835_pv1_data = {
+const struct vc4_pv_data bcm2835_pv1_data = {
.base = {
+ .name = "pixelvalve-1",
.debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
@@ -1159,8 +1169,9 @@ static const struct vc4_pv_data bcm2835_pv1_data = {
},
};
-static const struct vc4_pv_data bcm2835_pv2_data = {
+const struct vc4_pv_data bcm2835_pv2_data = {
.base = {
+ .name = "pixelvalve-2",
.debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
@@ -1173,8 +1184,9 @@ static const struct vc4_pv_data bcm2835_pv2_data = {
},
};
-static const struct vc4_pv_data bcm2711_pv0_data = {
+const struct vc4_pv_data bcm2711_pv0_data = {
.base = {
+ .name = "pixelvalve-0",
.debugfs_name = "crtc0_regs",
.hvs_available_channels = BIT(0),
.hvs_output = 0,
@@ -1187,8 +1199,9 @@ static const struct vc4_pv_data bcm2711_pv0_data = {
},
};
-static const struct vc4_pv_data bcm2711_pv1_data = {
+const struct vc4_pv_data bcm2711_pv1_data = {
.base = {
+ .name = "pixelvalve-1",
.debugfs_name = "crtc1_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 3,
@@ -1201,8 +1214,9 @@ static const struct vc4_pv_data bcm2711_pv1_data = {
},
};
-static const struct vc4_pv_data bcm2711_pv2_data = {
+const struct vc4_pv_data bcm2711_pv2_data = {
.base = {
+ .name = "pixelvalve-2",
.debugfs_name = "crtc2_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 4,
@@ -1214,8 +1228,9 @@ static const struct vc4_pv_data bcm2711_pv2_data = {
},
};
-static const struct vc4_pv_data bcm2711_pv3_data = {
+const struct vc4_pv_data bcm2711_pv3_data = {
.base = {
+ .name = "pixelvalve-3",
.debugfs_name = "crtc3_regs",
.hvs_available_channels = BIT(1),
.hvs_output = 1,
@@ -1227,8 +1242,9 @@ static const struct vc4_pv_data bcm2711_pv3_data = {
},
};
-static const struct vc4_pv_data bcm2711_pv4_data = {
+const struct vc4_pv_data bcm2711_pv4_data = {
.base = {
+ .name = "pixelvalve-4",
.debugfs_name = "crtc4_regs",
.hvs_available_channels = BIT(0) | BIT(1) | BIT(2),
.hvs_output = 5,
@@ -1278,31 +1294,44 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
}
}
-int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
- const struct drm_crtc_funcs *crtc_funcs,
- const struct drm_crtc_helper_funcs *crtc_helper_funcs)
+/**
+ * __vc4_crtc_init - Initializes a CRTC
+ * @drm: DRM Device
+ * @pdev: CRTC Platform Device
+ * @vc4_crtc: CRTC Object to Initialize
+ * @data: Configuration data associated with this CRTC
+ * @primary_plane: Primary plane for CRTC
+ * @crtc_funcs: Callbacks for the new CRTC
+ * @crtc_helper_funcs: Helper Callbacks for the new CRTC
+ * @feeds_txp: Is this CRTC connected to the TXP?
+ *
+ * Initializes our private CRTC structure. This function is mostly
+ * relevant for KUnit testing, all other users should use
+ * vc4_crtc_init() instead.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure.
+ */
+int __vc4_crtc_init(struct drm_device *drm,
+ struct platform_device *pdev,
+ struct vc4_crtc *vc4_crtc,
+ const struct vc4_crtc_data *data,
+ struct drm_plane *primary_plane,
+ const struct drm_crtc_funcs *crtc_funcs,
+ const struct drm_crtc_helper_funcs *crtc_helper_funcs,
+ bool feeds_txp)
{
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct drm_crtc *crtc = &vc4_crtc->base;
- struct drm_plane *primary_plane;
unsigned int i;
int ret;
- /* For now, we create just the primary and the legacy cursor
- * planes. We should be able to stack more planes on easily,
- * but to do that we would need to compute the bandwidth
- * requirement of the plane configuration, and reject ones
- * that will take too much.
- */
- primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY, 0);
- if (IS_ERR(primary_plane)) {
- dev_err(drm->dev, "failed to construct primary plane\n");
- return PTR_ERR(primary_plane);
- }
-
+ vc4_crtc->data = data;
+ vc4_crtc->pdev = pdev;
+ vc4_crtc->feeds_txp = feeds_txp;
spin_lock_init(&vc4_crtc->irq_lock);
ret = drmm_crtc_init_with_planes(drm, crtc, primary_plane, NULL,
- crtc_funcs, NULL);
+ crtc_funcs, data->name);
if (ret)
return ret;
@@ -1328,6 +1357,31 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
return 0;
}
+int vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev,
+ struct vc4_crtc *vc4_crtc,
+ const struct vc4_crtc_data *data,
+ const struct drm_crtc_funcs *crtc_funcs,
+ const struct drm_crtc_helper_funcs *crtc_helper_funcs,
+ bool feeds_txp)
+{
+ struct drm_plane *primary_plane;
+
+ /* For now, we create just the primary and the legacy cursor
+ * planes. We should be able to stack more planes on easily,
+ * but to do that we would need to compute the bandwidth
+ * requirement of the plane configuration, and reject ones
+ * that will take too much.
+ */
+ primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY, 0);
+ if (IS_ERR(primary_plane)) {
+ dev_err(drm->dev, "failed to construct primary plane\n");
+ return PTR_ERR(primary_plane);
+ }
+
+ return __vc4_crtc_init(drm, pdev, vc4_crtc, data, primary_plane,
+ crtc_funcs, crtc_helper_funcs, feeds_txp);
+}
+
static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1345,8 +1399,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
pv_data = of_device_get_match_data(dev);
if (!pv_data)
return -ENODEV;
- vc4_crtc->data = &pv_data->base;
- vc4_crtc->pdev = pdev;
vc4_crtc->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(vc4_crtc->regs))
@@ -1356,8 +1408,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
vc4_crtc->regset.regs = crtc_regs;
vc4_crtc->regset.nregs = ARRAY_SIZE(crtc_regs);
- ret = vc4_crtc_init(drm, vc4_crtc,
- &vc4_crtc_funcs, &vc4_crtc_helper_funcs);
+ ret = vc4_crtc_init(drm, pdev, vc4_crtc, &pv_data->base,
+ &vc4_crtc_funcs, &vc4_crtc_helper_funcs,
+ false);
if (ret)
return ret;
vc4_set_crtc_possible_masks(drm, crtc);
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 19cda4f91a82..fac624a663ea 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -34,9 +34,9 @@ vc4_debugfs_init(struct drm_minor *minor)
static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
- struct debugfs_regset32 *regset = node->info_ent->data;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *drm = entry->dev;
+ struct debugfs_regset32 *regset = entry->file.data;
struct drm_printer p = drm_seq_file_printer(m);
int idx;
@@ -50,31 +50,9 @@ static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
return 0;
}
-int vc4_debugfs_add_file(struct drm_minor *minor,
- const char *name,
- int (*show)(struct seq_file*, void*),
- void *data)
+void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *name,
+ struct debugfs_regset32 *regset)
{
- struct drm_device *dev = minor->dev;
- struct dentry *root = minor->debugfs_root;
- struct drm_info_list *file;
-
- file = drmm_kzalloc(dev, sizeof(*file), GFP_KERNEL);
- if (!file)
- return -ENOMEM;
-
- file->name = name;
- file->show = show;
- file->data = data;
-
- drm_debugfs_create_files(file, 1, root, minor);
-
- return 0;
-}
-
-int vc4_debugfs_add_regset32(struct drm_minor *minor,
- const char *name,
- struct debugfs_regset32 *regset)
-{
- return vc4_debugfs_add_file(minor, name, vc4_debugfs_regset32, regset);
+ drm_debugfs_add_file(drm, name, vc4_debugfs_regset32, regset);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 1f8f44b7b5a5..f518d6e59ed6 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -103,8 +103,17 @@ to_vc4_dpi(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dpi, encoder.base);
}
-#define DPI_READ(offset) readl(dpi->regs + (offset))
-#define DPI_WRITE(offset, val) writel(val, dpi->regs + (offset))
+#define DPI_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(dpi->regs + (offset)); \
+ })
+
+#define DPI_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, dpi->regs + (offset)); \
+ } while (0)
static const struct debugfs_reg32 dpi_regs[] = {
VC4_REG32(DPI_C),
@@ -150,8 +159,8 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
}
drm_connector_list_iter_end(&conn_iter);
- /* Default to 24bit if no connector or format found. */
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_24BIT_888_RGB, DPI_FORMAT);
+ /* Default to 18bit if no connector or format found. */
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1, DPI_FORMAT);
if (connector) {
if (connector->display_info.num_bus_formats) {
@@ -170,16 +179,26 @@ static void vc4_dpi_encoder_enable(struct drm_encoder *encoder)
dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR,
DPI_ORDER);
break;
+ case MEDIA_BUS_FMT_BGR666_1X24_CPADHI:
+ dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER);
+ fallthrough;
case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_2,
DPI_FORMAT);
break;
+ case MEDIA_BUS_FMT_BGR666_1X18:
+ dpi_c |= VC4_SET_FIELD(DPI_ORDER_BGR, DPI_ORDER);
+ fallthrough;
case MEDIA_BUS_FMT_RGB666_1X18:
dpi_c |= VC4_SET_FIELD(DPI_FORMAT_18BIT_666_RGB_1,
DPI_FORMAT);
break;
case MEDIA_BUS_FMT_RGB565_1X16:
- dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_3,
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_1,
+ DPI_FORMAT);
+ break;
+ case MEDIA_BUS_FMT_RGB565_1X24_CPADHI:
+ dpi_c |= VC4_SET_FIELD(DPI_FORMAT_16BIT_565_RGB_2,
DPI_FORMAT);
break;
default:
@@ -248,11 +267,8 @@ static int vc4_dpi_late_register(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
struct vc4_dpi *dpi = to_vc4_dpi(encoder);
- int ret;
- ret = vc4_debugfs_add_regset32(drm->primary, "dpi_regs", &dpi->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, "dpi_regs", &dpi->regset);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5990d8f8c363..0ccaee57fe9a 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -196,7 +196,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(VC4_PERFMON_GET_VALUES, vc4_perfmon_get_values_ioctl, DRM_RENDER_ALLOW),
};
-static const struct drm_driver vc4_drm_driver = {
+const struct drm_driver vc4_drm_driver = {
.driver_features = (DRIVER_MODESET |
DRIVER_ATOMIC |
DRIVER_GEM |
@@ -225,7 +225,7 @@ static const struct drm_driver vc4_drm_driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
-static const struct drm_driver vc5_drm_driver = {
+const struct drm_driver vc5_drm_driver = {
.driver_features = (DRIVER_MODESET |
DRIVER_ATOMIC |
DRIVER_GEM),
@@ -320,7 +320,6 @@ static int vc4_drm_bind(struct device *dev)
drm = &vc4->base;
platform_set_drvdata(pdev, drm);
- INIT_LIST_HEAD(&vc4->debugfs_list);
if (!is_vc5) {
ret = drmm_mutex_init(drm, &vc4->bin_bo_lock);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 515228682e8e..95069bb16821 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -19,11 +19,16 @@
#include <drm/drm_mm.h>
#include <drm/drm_modeset_lock.h>
+#include <kunit/test-bug.h>
+
#include "uapi/drm/vc4_drm.h"
struct drm_device;
struct drm_gem_object;
+extern const struct drm_driver vc4_drm_driver;
+extern const struct drm_driver vc5_drm_driver;
+
/* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
* this.
*/
@@ -221,11 +226,6 @@ struct vc4_dev {
struct drm_private_obj hvs_channels;
struct drm_private_obj load_tracker;
- /* List of vc4_debugfs_info_entry for adding to debugfs once
- * the minor is available (after drm_dev_register()).
- */
- struct list_head debugfs_list;
-
/* Mutex for binner bo allocation. */
struct mutex bin_bo_lock;
/* Reference count for our binner bo. */
@@ -233,7 +233,7 @@ struct vc4_dev {
};
static inline struct vc4_dev *
-to_vc4_dev(struct drm_device *dev)
+to_vc4_dev(const struct drm_device *dev)
{
return container_of(dev, struct vc4_dev, base);
}
@@ -286,7 +286,7 @@ struct vc4_bo {
};
static inline struct vc4_bo *
-to_vc4_bo(struct drm_gem_object *bo)
+to_vc4_bo(const struct drm_gem_object *bo)
{
return container_of(to_drm_gem_dma_obj(bo), struct vc4_bo, base);
}
@@ -299,7 +299,7 @@ struct vc4_fence {
};
static inline struct vc4_fence *
-to_vc4_fence(struct dma_fence *fence)
+to_vc4_fence(const struct dma_fence *fence)
{
return container_of(fence, struct vc4_fence, base);
}
@@ -355,12 +355,35 @@ struct vc4_hvs {
bool vc5_hdmi_enable_4096by2160;
};
+#define HVS_NUM_CHANNELS 3
+
+struct vc4_hvs_state {
+ struct drm_private_state base;
+ unsigned long core_clock_rate;
+
+ struct {
+ unsigned in_use: 1;
+ unsigned long fifo_load;
+ struct drm_crtc_commit *pending_commit;
+ } fifo_state[HVS_NUM_CHANNELS];
+};
+
+static inline struct vc4_hvs_state *
+to_vc4_hvs_state(const struct drm_private_state *priv)
+{
+ return container_of(priv, struct vc4_hvs_state, base);
+}
+
+struct vc4_hvs_state *vc4_hvs_get_global_state(struct drm_atomic_state *state);
+struct vc4_hvs_state *vc4_hvs_get_old_global_state(const struct drm_atomic_state *state);
+struct vc4_hvs_state *vc4_hvs_get_new_global_state(const struct drm_atomic_state *state);
+
struct vc4_plane {
struct drm_plane base;
};
static inline struct vc4_plane *
-to_vc4_plane(struct drm_plane *plane)
+to_vc4_plane(const struct drm_plane *plane)
{
return container_of(plane, struct vc4_plane, base);
}
@@ -436,7 +459,7 @@ struct vc4_plane_state {
};
static inline struct vc4_plane_state *
-to_vc4_plane_state(struct drm_plane_state *state)
+to_vc4_plane_state(const struct drm_plane_state *state)
{
return container_of(state, struct vc4_plane_state, base);
}
@@ -450,6 +473,7 @@ enum vc4_encoder_type {
VC4_ENCODER_TYPE_DSI1,
VC4_ENCODER_TYPE_SMI,
VC4_ENCODER_TYPE_DPI,
+ VC4_ENCODER_TYPE_TXP,
};
struct vc4_encoder {
@@ -466,12 +490,30 @@ struct vc4_encoder {
};
static inline struct vc4_encoder *
-to_vc4_encoder(struct drm_encoder *encoder)
+to_vc4_encoder(const struct drm_encoder *encoder)
{
return container_of(encoder, struct vc4_encoder, base);
}
+static inline
+struct drm_encoder *vc4_find_encoder_by_type(struct drm_device *drm,
+ enum vc4_encoder_type type)
+{
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, drm) {
+ struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
+
+ if (vc4_encoder->type == type)
+ return encoder;
+ }
+
+ return NULL;
+}
+
struct vc4_crtc_data {
+ const char *name;
+
const char *debugfs_name;
/* Bitmask of channels (FIFOs) of the HVS that the output can source from */
@@ -481,6 +523,8 @@ struct vc4_crtc_data {
int hvs_output;
};
+extern const struct vc4_crtc_data vc4_txp_crtc_data;
+
struct vc4_pv_data {
struct vc4_crtc_data base;
@@ -493,6 +537,15 @@ struct vc4_pv_data {
enum vc4_encoder_type encoder_types[4];
};
+extern const struct vc4_pv_data bcm2835_pv0_data;
+extern const struct vc4_pv_data bcm2835_pv1_data;
+extern const struct vc4_pv_data bcm2835_pv2_data;
+extern const struct vc4_pv_data bcm2711_pv0_data;
+extern const struct vc4_pv_data bcm2711_pv1_data;
+extern const struct vc4_pv_data bcm2711_pv2_data;
+extern const struct vc4_pv_data bcm2711_pv3_data;
+extern const struct vc4_pv_data bcm2711_pv4_data;
+
struct vc4_crtc {
struct drm_crtc base;
struct platform_device *pdev;
@@ -539,7 +592,7 @@ struct vc4_crtc {
};
static inline struct vc4_crtc *
-to_vc4_crtc(struct drm_crtc *crtc)
+to_vc4_crtc(const struct drm_crtc *crtc)
{
return container_of(crtc, struct vc4_crtc, base);
}
@@ -584,15 +637,34 @@ struct vc4_crtc_state {
#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
static inline struct vc4_crtc_state *
-to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
+to_vc4_crtc_state(const struct drm_crtc_state *crtc_state)
{
return container_of(crtc_state, struct vc4_crtc_state, base);
}
-#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
-#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
-#define HVS_READ(offset) readl(hvs->regs + offset)
-#define HVS_WRITE(offset, val) writel(val, hvs->regs + offset)
+#define V3D_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(vc4->v3d->regs + (offset)); \
+ })
+
+#define V3D_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, vc4->v3d->regs + (offset)); \
+ } while (0)
+
+#define HVS_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(hvs->regs + (offset)); \
+ })
+
+#define HVS_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, hvs->regs + (offset)); \
+ } while (0)
#define VC4_REG32(reg) { .name = #reg, .offset = reg }
@@ -862,14 +934,24 @@ int vc4_bo_debugfs_init(struct drm_minor *minor);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
int vc4_crtc_disable_at_boot(struct drm_crtc *crtc);
-int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc,
+int __vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev,
+ struct vc4_crtc *vc4_crtc, const struct vc4_crtc_data *data,
+ struct drm_plane *primary_plane,
+ const struct drm_crtc_funcs *crtc_funcs,
+ const struct drm_crtc_helper_funcs *crtc_helper_funcs,
+ bool feeds_txp);
+int vc4_crtc_init(struct drm_device *drm, struct platform_device *pdev,
+ struct vc4_crtc *vc4_crtc, const struct vc4_crtc_data *data,
const struct drm_crtc_funcs *crtc_funcs,
- const struct drm_crtc_helper_funcs *crtc_helper_funcs);
+ const struct drm_crtc_helper_funcs *crtc_helper_funcs,
+ bool feeds_txp);
int vc4_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags,
struct drm_modeset_acquire_ctx *ctx);
+int vc4_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc);
void vc4_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
@@ -884,28 +966,15 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
/* vc4_debugfs.c */
void vc4_debugfs_init(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
-int vc4_debugfs_add_file(struct drm_minor *minor,
- const char *filename,
- int (*show)(struct seq_file*, void*),
- void *data);
-int vc4_debugfs_add_regset32(struct drm_minor *minor,
- const char *filename,
- struct debugfs_regset32 *regset);
+void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *filename,
+ struct debugfs_regset32 *regset);
#else
-static inline int vc4_debugfs_add_file(struct drm_minor *minor,
- const char *filename,
- int (*show)(struct seq_file*, void*),
- void *data)
-{
- return 0;
-}
-static inline int vc4_debugfs_add_regset32(struct drm_minor *minor,
- const char *filename,
- struct debugfs_regset32 *regset)
-{
- return 0;
-}
+static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *filename,
+ struct debugfs_regset32 *regset)
+{}
#endif
/* vc4_drv.c */
@@ -959,6 +1028,7 @@ void vc4_irq_reset(struct drm_device *dev);
/* vc4_hvs.c */
extern struct platform_driver vc4_hvs_driver;
+struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev);
void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output);
int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output);
u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo);
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 878e05d79e81..a5c075f802e4 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -24,7 +24,6 @@
#include <linux/component.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
-#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -556,8 +555,8 @@ struct vc4_dsi {
struct platform_device *pdev;
- struct drm_bridge *bridge;
- struct list_head bridge_chain;
+ struct drm_bridge *out_bridge;
+ struct drm_bridge bridge;
void __iomem *regs;
@@ -609,6 +608,12 @@ to_vc4_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dsi, encoder.base);
}
+static inline struct vc4_dsi *
+bridge_to_vc4_dsi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct vc4_dsi, bridge);
+}
+
static inline void
dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
{
@@ -617,6 +622,8 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
dma_cookie_t cookie;
int ret;
+ kunit_fail_current_test("Accessing a register in a unit test!\n");
+
/* DSI0 should be able to write normally. */
if (!chan) {
writel(val, dsi->regs + offset);
@@ -645,7 +652,12 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val)
DRM_ERROR("Failed to wait for DMA: %d\n", ret);
}
-#define DSI_READ(offset) readl(dsi->regs + (offset))
+#define DSI_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(dsi->regs + (offset)); \
+ })
+
#define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val)
#define DSI_PORT_READ(offset) \
DSI_READ(dsi->variant->port ? DSI1_##offset : DSI0_##offset)
@@ -790,26 +802,22 @@ dsi_esc_timing(u32 ns)
return DIV_ROUND_UP(ns, ESC_TIME_NS);
}
-static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
+static void vc4_dsi_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
{
- struct vc4_dsi *dsi = to_vc4_dsi(encoder);
- struct device *dev = &dsi->pdev->dev;
- struct drm_bridge *iter;
-
- list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
- if (iter->funcs->disable)
- iter->funcs->disable(iter);
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
+ u32 disp0_ctrl;
- if (iter == dsi->bridge)
- break;
- }
-
- vc4_dsi_ulps(dsi, true);
+ disp0_ctrl = DSI_PORT_READ(DISP0_CTRL);
+ disp0_ctrl &= ~DSI_DISP0_ENABLE;
+ DSI_PORT_WRITE(DISP0_CTRL, disp0_ctrl);
+}
- list_for_each_entry_from(iter, &dsi->bridge_chain, chain_node) {
- if (iter->funcs->post_disable)
- iter->funcs->post_disable(iter);
- }
+static void vc4_dsi_bridge_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
+ struct device *dev = &dsi->pdev->dev;
clk_disable_unprepare(dsi->pll_phy_clock);
clk_disable_unprepare(dsi->escape_clock);
@@ -831,11 +839,11 @@ static void vc4_dsi_encoder_disable(struct drm_encoder *encoder)
* higher-than-expected clock rate to the panel, but that's what the
* firmware does too.
*/
-static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool vc4_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct vc4_dsi *dsi = to_vc4_dsi(encoder);
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
struct clk *phy_parent = clk_get_parent(dsi->pll_phy_clock);
unsigned long parent_rate = clk_get_rate(phy_parent);
unsigned long pixel_clock_hz = mode->clock * 1000;
@@ -867,18 +875,22 @@ static bool vc4_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
return true;
}
-static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
+static void vc4_dsi_bridge_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
{
- struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
- struct vc4_dsi *dsi = to_vc4_dsi(encoder);
+ struct drm_atomic_state *state = old_state->base.state;
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
+ const struct drm_crtc_state *crtc_state;
struct device *dev = &dsi->pdev->dev;
+ const struct drm_display_mode *mode;
+ struct drm_connector *connector;
bool debug_dump_regs = false;
- struct drm_bridge *iter;
unsigned long hs_clock;
+ struct drm_crtc *crtc;
u32 ui_ns;
/* Minimum LP state duration in escape clock cycles. */
u32 lpx = dsi_esc_timing(60);
- unsigned long pixel_clock_hz = mode->clock * 1000;
+ unsigned long pixel_clock_hz;
unsigned long dsip_clock;
unsigned long phy_clock;
int ret;
@@ -895,6 +907,18 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
drm_print_regset32(&p, &dsi->regset);
}
+ /*
+ * Retrieve the CRTC adjusted mode. This requires a little dance to go
+ * from the bridge to the encoder, to the connector and to the CRTC.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ mode = &crtc_state->adjusted_mode;
+
+ pixel_clock_hz = mode->clock * 1000;
+
/* Round up the clk_set_rate() request slightly, since
* PLLD_DSI1 is an integer divider and its rate selection will
* never round up.
@@ -1106,11 +1130,6 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
vc4_dsi_ulps(dsi, false);
- list_for_each_entry_reverse(iter, &dsi->bridge_chain, chain_node) {
- if (iter->funcs->pre_enable)
- iter->funcs->pre_enable(iter);
- }
-
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
DSI_PORT_WRITE(DISP0_CTRL,
VC4_SET_FIELD(dsi->divider,
@@ -1118,18 +1137,23 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
VC4_SET_FIELD(dsi->format, DSI_DISP0_PFORMAT) |
VC4_SET_FIELD(DSI_DISP0_LP_STOP_PERFRAME,
DSI_DISP0_LP_STOP_CTRL) |
- DSI_DISP0_ST_END |
- DSI_DISP0_ENABLE);
+ DSI_DISP0_ST_END);
} else {
DSI_PORT_WRITE(DISP0_CTRL,
- DSI_DISP0_COMMAND_MODE |
- DSI_DISP0_ENABLE);
+ DSI_DISP0_COMMAND_MODE);
}
+}
- list_for_each_entry(iter, &dsi->bridge_chain, chain_node) {
- if (iter->funcs->enable)
- iter->funcs->enable(iter);
- }
+static void vc4_dsi_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_state)
+{
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
+ bool debug_dump_regs = false;
+ u32 disp0_ctrl;
+
+ disp0_ctrl = DSI_PORT_READ(DISP0_CTRL);
+ disp0_ctrl |= DSI_DISP0_ENABLE;
+ DSI_PORT_WRITE(DISP0_CTRL, disp0_ctrl);
if (debug_dump_regs) {
struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
@@ -1138,6 +1162,16 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
}
}
+static int vc4_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge);
+
+ /* Attach the panel or bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, dsi->out_bridge,
+ &dsi->bridge, flags);
+}
+
static ssize_t vc4_dsi_host_transfer(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
@@ -1314,6 +1348,7 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct vc4_dsi *dsi = host_to_dsi(host);
+ int ret;
dsi->lanes = device->lanes;
dsi->channel = device->channel;
@@ -1348,7 +1383,15 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
return 0;
}
- return component_add(&dsi->pdev->dev, &vc4_dsi_ops);
+ drm_bridge_add(&dsi->bridge);
+
+ ret = component_add(&dsi->pdev->dev, &vc4_dsi_ops);
+ if (ret) {
+ drm_bridge_remove(&dsi->bridge);
+ return ret;
+ }
+
+ return 0;
}
static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
@@ -1357,6 +1400,7 @@ static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
struct vc4_dsi *dsi = host_to_dsi(host);
component_del(&dsi->pdev->dev, &vc4_dsi_ops);
+ drm_bridge_remove(&dsi->bridge);
return 0;
}
@@ -1366,22 +1410,24 @@ static const struct mipi_dsi_host_ops vc4_dsi_host_ops = {
.transfer = vc4_dsi_host_transfer,
};
-static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = {
- .disable = vc4_dsi_encoder_disable,
- .enable = vc4_dsi_encoder_enable,
- .mode_fixup = vc4_dsi_encoder_mode_fixup,
+static const struct drm_bridge_funcs vc4_dsi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = vc4_dsi_bridge_pre_enable,
+ .atomic_enable = vc4_dsi_bridge_enable,
+ .atomic_disable = vc4_dsi_bridge_disable,
+ .atomic_post_disable = vc4_dsi_bridge_post_disable,
+ .attach = vc4_dsi_bridge_attach,
+ .mode_fixup = vc4_dsi_bridge_mode_fixup,
};
static int vc4_dsi_late_register(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
struct vc4_dsi *dsi = to_vc4_dsi(encoder);
- int ret;
- ret = vc4_debugfs_add_regset32(drm->primary, dsi->variant->debugfs_name,
- &dsi->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset);
return 0;
}
@@ -1617,7 +1663,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
dsi->variant = of_device_get_match_data(dev);
- INIT_LIST_HEAD(&dsi->bridge_chain);
dsi->encoder.type = dsi->variant->port ?
VC4_ENCODER_TYPE_DSI1 : VC4_ENCODER_TYPE_DSI0;
@@ -1723,9 +1768,9 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- dsi->bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0);
- if (IS_ERR(dsi->bridge))
- return PTR_ERR(dsi->bridge);
+ dsi->out_bridge = drmm_of_get_bridge(drm, dev->of_node, 0, 0);
+ if (IS_ERR(dsi->out_bridge))
+ return PTR_ERR(dsi->out_bridge);
/* The esc clock rate is supposed to always be 100Mhz. */
ret = clk_set_rate(dsi->escape_clock, 100 * 1000000);
@@ -1745,41 +1790,19 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- drm_encoder_helper_add(encoder, &vc4_dsi_encoder_helper_funcs);
-
ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
- ret = drm_bridge_attach(encoder, dsi->bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, &dsi->bridge, NULL, 0);
if (ret)
return ret;
- /* Disable the atomic helper calls into the bridge. We
- * manually call the bridge pre_enable / enable / etc. calls
- * from our driver, since we need to sequence them within the
- * encoder's enable/disable paths.
- */
- list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
return 0;
}
-static void vc4_dsi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct vc4_dsi *dsi = dev_get_drvdata(dev);
- struct drm_encoder *encoder = &dsi->encoder.base;
-
- /*
- * Restore the bridge_chain so the bridge detach procedure can happen
- * normally.
- */
- list_splice_init(&dsi->bridge_chain, &encoder->bridge_chain);
-}
-
static const struct component_ops vc4_dsi_ops = {
.bind = vc4_dsi_bind,
- .unbind = vc4_dsi_unbind,
};
static int vc4_dsi_dev_probe(struct platform_device *pdev)
@@ -1793,7 +1816,13 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
dev_set_drvdata(dev, dsi);
kref_init(&dsi->kref);
+
dsi->pdev = pdev;
+ dsi->bridge.funcs = &vc4_dsi_bridge_funcs;
+#ifdef CONFIG_OF
+ dsi->bridge.of_node = dev->of_node;
+#endif
+ dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
dsi->dsi_host.ops = &vc4_dsi_host_ops;
dsi->dsi_host.dev = dev;
mipi_dsi_host_register(&dsi->dsi_host);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 12a00d644b61..ea22c9bf223a 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -97,6 +97,10 @@
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8
#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8)
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK VC4_MASK(7, 0)
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_SET_AVMUTE BIT(0)
+#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE BIT(4)
+
# define VC4_HD_M_SW_RST BIT(2)
# define VC4_HD_M_ENABLE BIT(0)
@@ -160,8 +164,8 @@ static bool vc4_hdmi_is_full_range_rgb(struct vc4_hdmi *vc4_hdmi,
static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct vc4_hdmi *vc4_hdmi = node->info_ent->data;
+ struct drm_debugfs_entry *entry = m->private;
+ struct vc4_hdmi *vc4_hdmi = entry->file.data;
struct drm_device *drm = vc4_hdmi->connector.dev;
struct drm_printer p = drm_seq_file_printer(m);
int idx;
@@ -402,6 +406,7 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
{
struct drm_connector *connector = &vc4_hdmi->connector;
struct edid *edid;
+ int ret;
/*
* NOTE: This function should really be called with
@@ -430,7 +435,15 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi,
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
kfree(edid);
- vc4_hdmi_reset_link(connector, ctx);
+ for (;;) {
+ ret = vc4_hdmi_reset_link(connector, ctx);
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(ctx);
+ continue;
+ }
+
+ break;
+ }
}
static int vc4_hdmi_connector_detect_ctx(struct drm_connector *connector,
@@ -1298,15 +1311,15 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
VC4_SET_FIELD(mode->crtc_vdisplay, VC5_HDMI_VERTA_VAL));
u32 vertb = (VC4_SET_FIELD(mode->htotal >> (2 - pixel_rep),
VC5_HDMI_VERTB_VSPO) |
- VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end,
+ VC4_SET_FIELD(mode->crtc_vtotal - mode->crtc_vsync_end +
+ interlaced,
VC4_HDMI_VERTB_VBP));
u32 vertb_even = (VC4_SET_FIELD(0, VC5_HDMI_VERTB_VSPO) |
VC4_SET_FIELD(mode->crtc_vtotal -
- mode->crtc_vsync_end - interlaced,
+ mode->crtc_vsync_end,
VC4_HDMI_VERTB_VBP));
unsigned long flags;
unsigned char gcp;
- bool gcp_en;
u32 reg;
int idx;
@@ -1341,16 +1354,13 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
switch (vc4_state->output_bpc) {
case 12:
gcp = 6;
- gcp_en = true;
break;
case 10:
gcp = 5;
- gcp_en = true;
break;
case 8:
default:
- gcp = 4;
- gcp_en = false;
+ gcp = 0;
break;
}
@@ -1359,8 +1369,7 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
* doesn't signal in GCP.
*/
if (vc4_state->output_format == VC4_HDMI_OUTPUT_YUV422) {
- gcp = 4;
- gcp_en = false;
+ gcp = 0;
}
reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1);
@@ -1373,11 +1382,12 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi,
reg = HDMI_READ(HDMI_GCP_WORD_1);
reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK;
reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1);
+ reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_MASK;
+ reg |= VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_0_CLEAR_AVMUTE;
HDMI_WRITE(HDMI_GCP_WORD_1, reg);
reg = HDMI_READ(HDMI_GCP_CONFIG);
- reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
- reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0;
+ reg |= VC5_HDMI_GCP_CONFIG_GCP_ENABLE;
HDMI_WRITE(HDMI_GCP_CONFIG, reg);
reg = HDMI_READ(HDMI_MISC_CONTROL);
@@ -1995,13 +2005,9 @@ static int vc4_hdmi_late_register(struct drm_encoder *encoder)
struct drm_device *drm = encoder->dev;
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
const struct vc4_hdmi_variant *variant = vc4_hdmi->variant;
- int ret;
- ret = vc4_debugfs_add_file(drm->primary, variant->debugfs_name,
- vc4_hdmi_debugfs_regs,
- vc4_hdmi);
- if (ret)
- return ret;
+ drm_debugfs_add_file(drm, variant->debugfs_name,
+ vc4_hdmi_debugfs_regs, vc4_hdmi);
return 0;
}
@@ -3018,7 +3024,8 @@ static int vc4_hdmi_cec_init(struct vc4_hdmi *vc4_hdmi)
}
vc4_hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops,
- vc4_hdmi, "vc4",
+ vc4_hdmi,
+ vc4_hdmi->variant->card_name,
CEC_CAP_DEFAULTS |
CEC_CAP_CONNECTOR_INFO, 1);
ret = PTR_ERR_OR_ZERO(vc4_hdmi->cec_adap);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
index 48db438550b1..b04b2fc8d831 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h
@@ -456,6 +456,8 @@ static inline u32 vc4_hdmi_read(struct vc4_hdmi *hdmi,
WARN_ON(pm_runtime_status_suspended(&hdmi->pdev->dev));
+ kunit_fail_current_test("Accessing an HDMI register in a unit test!\n");
+
if (reg >= variant->num_registers) {
dev_warn(&hdmi->pdev->dev,
"Invalid register ID %u\n", reg);
@@ -486,6 +488,8 @@ static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi,
WARN_ON(pm_runtime_status_suspended(&hdmi->pdev->dev));
+ kunit_fail_current_test("Accessing an HDMI register in a unit test!\n");
+
if (reg >= variant->num_registers) {
dev_warn(&hdmi->pdev->dev,
"Invalid register ID %u\n", reg);
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index c4453a5ae163..4da66ef96783 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -93,8 +93,8 @@ void vc4_hvs_dump_state(struct vc4_hvs *hvs)
static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_printer p = drm_seq_file_printer(m);
@@ -105,8 +105,8 @@ static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data)
{
- struct drm_info_node *node = m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
struct drm_printer p = drm_seq_file_printer(m);
@@ -370,28 +370,30 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
* mode.
*/
dispctrl = SCALER_DISPCTRLX_ENABLE;
+ dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
- if (!vc4->is_vc5)
+ if (!vc4->is_vc5) {
dispctrl |= VC4_SET_FIELD(mode->hdisplay,
SCALER_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay,
SCALER_DISPCTRLX_HEIGHT) |
(oneshot ? SCALER_DISPCTRLX_ONESHOT : 0);
- else
+ dispbkgndx |= SCALER_DISPBKGND_AUTOHS;
+ } else {
dispctrl |= VC4_SET_FIELD(mode->hdisplay,
SCALER5_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay,
SCALER5_DISPCTRLX_HEIGHT) |
(oneshot ? SCALER5_DISPCTRLX_ONESHOT : 0);
+ dispbkgndx &= ~SCALER5_DISPBKGND_BCK2BCK;
+ }
HVS_WRITE(SCALER_DISPCTRLX(chan), dispctrl);
- dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE;
HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
- SCALER_DISPBKGND_AUTOHS |
((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) |
(interlace ? SCALER_DISPBKGND_INTERLACE : 0));
@@ -568,6 +570,8 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
bool enable_bg_fill = false;
u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
u32 __iomem *dlist_next = dlist_start;
+ unsigned int zpos = 0;
+ bool found = false;
int idx;
if (!drm_dev_enter(dev, &idx)) {
@@ -575,29 +579,43 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
return;
}
+ if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED)
+ return;
+
if (debug_dump_regs) {
DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
vc4_hvs_dump_state(hvs);
}
/* Copy all the active planes' dlist contents to the hardware dlist. */
- drm_atomic_crtc_for_each_plane(plane, crtc) {
- /* Is this the first active plane? */
- if (dlist_next == dlist_start) {
- /* We need to enable background fill when a plane
- * could be alpha blending from the background, i.e.
- * where no other plane is underneath. It suffices to
- * consider the first active plane here since we set
- * needs_bg_fill such that either the first plane
- * already needs it or all planes on top blend from
- * the first or a lower plane.
- */
- vc4_plane_state = to_vc4_plane_state(plane->state);
- enable_bg_fill = vc4_plane_state->needs_bg_fill;
+ do {
+ found = false;
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (plane->state->normalized_zpos != zpos)
+ continue;
+
+ /* Is this the first active plane? */
+ if (dlist_next == dlist_start) {
+ /* We need to enable background fill when a plane
+ * could be alpha blending from the background, i.e.
+ * where no other plane is underneath. It suffices to
+ * consider the first active plane here since we set
+ * needs_bg_fill such that either the first plane
+ * already needs it or all planes on top blend from
+ * the first or a lower plane.
+ */
+ vc4_plane_state = to_vc4_plane_state(plane->state);
+ enable_bg_fill = vc4_plane_state->needs_bg_fill;
+ }
+
+ dlist_next += vc4_plane_write_dlist(plane, dlist_next);
+
+ found = true;
}
- dlist_next += vc4_plane_write_dlist(plane, dlist_next);
- }
+ zpos++;
+ } while (found);
writel(SCALER_CTL0_END, dlist_next);
dlist_next++;
@@ -658,7 +676,8 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
return;
dispctrl = HVS_READ(SCALER_DISPCTRL);
- dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
+ dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel));
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
@@ -675,7 +694,8 @@ void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
return;
dispctrl = HVS_READ(SCALER_DISPCTRL);
- dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
+ dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel));
HVS_WRITE(SCALER_DISPSTAT,
SCALER_DISPSTAT_EUFLOW(channel));
@@ -701,6 +721,7 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
int channel;
u32 control;
u32 status;
+ u32 dspeislur;
/*
* NOTE: We don't need to protect the register access using
@@ -717,9 +738,11 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
control = HVS_READ(SCALER_DISPCTRL);
for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
+ dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel);
/* Interrupt masking is not always honored, so check it here. */
if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
- control & SCALER_DISPCTRL_DSPEISLUR(channel)) {
+ control & dspeislur) {
vc4_hvs_mask_underrun(hvs, channel);
vc4_hvs_report_underrun(dev);
@@ -740,7 +763,6 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor)
struct drm_device *drm = minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_hvs *hvs = vc4->hvs;
- int ret;
if (!vc4->hvs)
return -ENODEV;
@@ -750,24 +772,55 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor)
minor->debugfs_root,
&vc4->load_tracker_enabled);
- ret = vc4_debugfs_add_file(minor, "hvs_dlists",
- vc4_hvs_debugfs_dlist, NULL);
- if (ret)
- return ret;
+ drm_debugfs_add_file(drm, "hvs_dlists", vc4_hvs_debugfs_dlist, NULL);
- ret = vc4_debugfs_add_file(minor, "hvs_underrun",
- vc4_hvs_debugfs_underrun, NULL);
- if (ret)
- return ret;
+ drm_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun, NULL);
- ret = vc4_debugfs_add_regset32(minor, "hvs_regs",
- &hvs->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
return 0;
}
+struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev)
+{
+ struct drm_device *drm = &vc4->base;
+ struct vc4_hvs *hvs;
+
+ hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
+ if (!hvs)
+ return ERR_PTR(-ENOMEM);
+
+ hvs->vc4 = vc4;
+ hvs->pdev = pdev;
+
+ spin_lock_init(&hvs->mm_lock);
+
+ /* Set up the HVS display list memory manager. We never
+ * overwrite the setup from the bootloader (just 128b out of
+ * our 16K), since we don't want to scramble the screen when
+ * transitioning from the firmware's boot setup to runtime.
+ */
+ drm_mm_init(&hvs->dlist_mm,
+ HVS_BOOTLOADER_DLIST_END,
+ (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
+
+ /* Set up the HVS LBM memory manager. We could have some more
+ * complicated data structure that allowed reuse of LBM areas
+ * between planes when they don't overlap on the screen, but
+ * for now we just allocate globally.
+ */
+ if (!vc4->is_vc5)
+ /* 48k words of 2x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
+ else
+ /* 60k words of 4x12-bit pixels */
+ drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
+
+ vc4->hvs = hvs;
+
+ return hvs;
+}
+
static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -776,13 +829,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
struct vc4_hvs *hvs = NULL;
int ret;
u32 dispctrl;
- u32 reg;
+ u32 reg, top;
- hvs = drmm_kzalloc(drm, sizeof(*hvs), GFP_KERNEL);
- if (!hvs)
- return -ENOMEM;
- hvs->vc4 = vc4;
- hvs->pdev = pdev;
+ hvs = __vc4_hvs_alloc(vc4, NULL);
+ if (IS_ERR(hvs))
+ return PTR_ERR(hvs);
hvs->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(hvs->regs))
@@ -835,29 +886,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
else
hvs->dlist = hvs->regs + SCALER5_DLIST_START;
- spin_lock_init(&hvs->mm_lock);
-
- /* Set up the HVS display list memory manager. We never
- * overwrite the setup from the bootloader (just 128b out of
- * our 16K), since we don't want to scramble the screen when
- * transitioning from the firmware's boot setup to runtime.
- */
- drm_mm_init(&hvs->dlist_mm,
- HVS_BOOTLOADER_DLIST_END,
- (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
-
- /* Set up the HVS LBM memory manager. We could have some more
- * complicated data structure that allowed reuse of LBM areas
- * between planes when they don't overlap on the screen, but
- * for now we just allocate globally.
- */
- if (!vc4->is_vc5)
- /* 48k words of 2x12-bit pixels */
- drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
- else
- /* 60k words of 4x12-bit pixels */
- drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
-
/* Upload filter kernels. We only have the one for now, so we
* keep it around for the lifetime of the driver.
*/
@@ -867,8 +895,6 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- vc4->hvs = hvs;
-
reg = HVS_READ(SCALER_DISPECTRL);
reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK;
HVS_WRITE(SCALER_DISPECTRL,
@@ -896,22 +922,102 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
SCALER_DISPCTRL_DISPEIRQ(1) |
SCALER_DISPCTRL_DISPEIRQ(2);
- dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
- SCALER_DISPCTRL_SLVWREIRQ |
- SCALER_DISPCTRL_SLVRDEIRQ |
- SCALER_DISPCTRL_DSPEIEOF(0) |
- SCALER_DISPCTRL_DSPEIEOF(1) |
- SCALER_DISPCTRL_DSPEIEOF(2) |
- SCALER_DISPCTRL_DSPEIEOLN(0) |
- SCALER_DISPCTRL_DSPEIEOLN(1) |
- SCALER_DISPCTRL_DSPEIEOLN(2) |
- SCALER_DISPCTRL_DSPEISLUR(0) |
- SCALER_DISPCTRL_DSPEISLUR(1) |
- SCALER_DISPCTRL_DSPEISLUR(2) |
- SCALER_DISPCTRL_SCLEIRQ);
+ if (!vc4->is_vc5)
+ dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
+ SCALER_DISPCTRL_SLVWREIRQ |
+ SCALER_DISPCTRL_SLVRDEIRQ |
+ SCALER_DISPCTRL_DSPEIEOF(0) |
+ SCALER_DISPCTRL_DSPEIEOF(1) |
+ SCALER_DISPCTRL_DSPEIEOF(2) |
+ SCALER_DISPCTRL_DSPEIEOLN(0) |
+ SCALER_DISPCTRL_DSPEIEOLN(1) |
+ SCALER_DISPCTRL_DSPEIEOLN(2) |
+ SCALER_DISPCTRL_DSPEISLUR(0) |
+ SCALER_DISPCTRL_DSPEISLUR(1) |
+ SCALER_DISPCTRL_DSPEISLUR(2) |
+ SCALER_DISPCTRL_SCLEIRQ);
+ else
+ dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
+ SCALER5_DISPCTRL_SLVEIRQ |
+ SCALER5_DISPCTRL_DSPEIEOF(0) |
+ SCALER5_DISPCTRL_DSPEIEOF(1) |
+ SCALER5_DISPCTRL_DSPEIEOF(2) |
+ SCALER5_DISPCTRL_DSPEIEOLN(0) |
+ SCALER5_DISPCTRL_DSPEIEOLN(1) |
+ SCALER5_DISPCTRL_DSPEIEOLN(2) |
+ SCALER5_DISPCTRL_DSPEISLUR(0) |
+ SCALER5_DISPCTRL_DSPEISLUR(1) |
+ SCALER5_DISPCTRL_DSPEISLUR(2) |
+ SCALER_DISPCTRL_SCLEIRQ);
+
+
+ /* Set AXI panic mode.
+ * VC4 panics when < 2 lines in FIFO.
+ * VC5 panics when less than 1 line in the FIFO.
+ */
+ dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK |
+ SCALER_DISPCTRL_PANIC1_MASK |
+ SCALER_DISPCTRL_PANIC2_MASK);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1);
+ dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+ /* Recompute Composite Output Buffer (COB) allocations for the displays
+ */
+ if (!vc4->is_vc5) {
+ /* The COB is 20736 pixels, or just over 10 lines at 2048 wide.
+ * The bottom 2048 pixels are full 32bpp RGBA (intended for the
+ * TXP composing RGBA to memory), whilst the remainder are only
+ * 24bpp RGB.
+ *
+ * Assign 3 lines to channels 1 & 2, and just over 4 lines to
+ * channel 0.
+ */
+ #define VC4_COB_SIZE 20736
+ #define VC4_COB_LINE_WIDTH 2048
+ #define VC4_COB_NUM_LINES 3
+ reg = 0;
+ top = VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES;
+ reg |= (top - 1) << 16;
+ HVS_WRITE(SCALER_DISPBASE2, reg);
+ reg = top;
+ top += VC4_COB_LINE_WIDTH * VC4_COB_NUM_LINES;
+ reg |= (top - 1) << 16;
+ HVS_WRITE(SCALER_DISPBASE1, reg);
+ reg = top;
+ top = VC4_COB_SIZE;
+ reg |= (top - 1) << 16;
+ HVS_WRITE(SCALER_DISPBASE0, reg);
+ } else {
+ /* The COB is 44416 pixels, or 10.8 lines at 4096 wide.
+ * The bottom 4096 pixels are full RGBA (intended for the TXP
+ * composing RGBA to memory), whilst the remainder are only
+ * RGB. Addressing is always pixel wide.
+ *
+ * Assign 3 lines of 4096 to channels 1 & 2, and just over 4
+ * lines. to channel 0.
+ */
+ #define VC5_COB_SIZE 44416
+ #define VC5_COB_LINE_WIDTH 4096
+ #define VC5_COB_NUM_LINES 3
+ reg = 0;
+ top = VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES;
+ reg |= top << 16;
+ HVS_WRITE(SCALER_DISPBASE2, reg);
+ top += 16;
+ reg = top;
+ top += VC5_COB_LINE_WIDTH * VC5_COB_NUM_LINES;
+ reg |= top << 16;
+ HVS_WRITE(SCALER_DISPBASE1, reg);
+ top += 16;
+ reg = top;
+ top = VC5_COB_SIZE;
+ reg |= top << 16;
+ HVS_WRITE(SCALER_DISPBASE0, reg);
+ }
+
ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
if (ret)
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 8fbeecdf2ec4..a7e3d47c50f4 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk.h>
+#include <linux/sort.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -24,8 +25,6 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-#define HVS_NUM_CHANNELS 3
-
struct vc4_ctm_state {
struct drm_private_state base;
struct drm_color_ctm *ctm;
@@ -38,23 +37,6 @@ to_vc4_ctm_state(const struct drm_private_state *priv)
return container_of(priv, struct vc4_ctm_state, base);
}
-struct vc4_hvs_state {
- struct drm_private_state base;
- unsigned long core_clock_rate;
-
- struct {
- unsigned in_use: 1;
- unsigned long fifo_load;
- struct drm_crtc_commit *pending_commit;
- } fifo_state[HVS_NUM_CHANNELS];
-};
-
-static struct vc4_hvs_state *
-to_vc4_hvs_state(const struct drm_private_state *priv)
-{
- return container_of(priv, struct vc4_hvs_state, base);
-}
-
struct vc4_load_tracker_state {
struct drm_private_state base;
u64 hvs_load;
@@ -190,8 +172,8 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
}
-static struct vc4_hvs_state *
-vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
+struct vc4_hvs_state *
+vc4_hvs_get_new_global_state(const struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
struct drm_private_state *priv_state;
@@ -203,8 +185,8 @@ vc4_hvs_get_new_global_state(struct drm_atomic_state *state)
return to_vc4_hvs_state(priv_state);
}
-static struct vc4_hvs_state *
-vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
+struct vc4_hvs_state *
+vc4_hvs_get_old_global_state(const struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
struct drm_private_state *priv_state;
@@ -216,7 +198,7 @@ vc4_hvs_get_old_global_state(struct drm_atomic_state *state)
return to_vc4_hvs_state(priv_state);
}
-static struct vc4_hvs_state *
+struct vc4_hvs_state *
vc4_hvs_get_global_state(struct drm_atomic_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->dev);
@@ -776,6 +758,20 @@ static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
}
+static int cmp_vc4_crtc_hvs_output(const void *a, const void *b)
+{
+ const struct vc4_crtc *crtc_a =
+ to_vc4_crtc(*(const struct drm_crtc **)a);
+ const struct vc4_crtc_data *data_a =
+ vc4_crtc_to_vc4_crtc_data(crtc_a);
+ const struct vc4_crtc *crtc_b =
+ to_vc4_crtc(*(const struct drm_crtc **)b);
+ const struct vc4_crtc_data *data_b =
+ vc4_crtc_to_vc4_crtc_data(crtc_b);
+
+ return data_a->hvs_output - data_b->hvs_output;
+}
+
/*
* The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
* the TXP (and therefore all the CRTCs found on that platform).
@@ -810,10 +806,11 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
struct vc4_hvs_state *hvs_new_state;
- struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_crtc **sorted_crtcs;
struct drm_crtc *crtc;
unsigned int unassigned_channels = 0;
unsigned int i;
+ int ret;
hvs_new_state = vc4_hvs_get_global_state(state);
if (IS_ERR(hvs_new_state))
@@ -823,15 +820,59 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
if (!hvs_new_state->fifo_state[i].in_use)
unassigned_channels |= BIT(i);
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct vc4_crtc_state *old_vc4_crtc_state =
- to_vc4_crtc_state(old_crtc_state);
- struct vc4_crtc_state *new_vc4_crtc_state =
- to_vc4_crtc_state(new_crtc_state);
- struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
+ /*
+ * The problem we have to solve here is that we have up to 7
+ * encoders, connected to up to 6 CRTCs.
+ *
+ * Those CRTCs, depending on the instance, can be routed to 1, 2
+ * or 3 HVS FIFOs, and we need to set the muxing between FIFOs and
+ * outputs in the HVS accordingly.
+ *
+ * It would be pretty hard to come up with an algorithm that
+ * would generically solve this. However, the current routing
+ * trees we support allow us to simplify a bit the problem.
+ *
+ * Indeed, with the current supported layouts, if we try to
+ * assign in the ascending crtc index order the FIFOs, we can't
+ * fall into the situation where an earlier CRTC that had
+ * multiple routes is assigned one that was the only option for
+ * a later CRTC.
+ *
+ * If the layout changes and doesn't give us that in the future,
+ * we will need to have something smarter, but it works so far.
+ */
+ sorted_crtcs = kmalloc_array(dev->num_crtcs, sizeof(*sorted_crtcs), GFP_KERNEL);
+ if (!sorted_crtcs)
+ return -ENOMEM;
+
+ i = 0;
+ drm_for_each_crtc(crtc, dev)
+ sorted_crtcs[i++] = crtc;
+
+ sort(sorted_crtcs, i, sizeof(*sorted_crtcs), cmp_vc4_crtc_hvs_output, NULL);
+
+ for (i = 0; i < dev->num_crtcs; i++) {
+ struct vc4_crtc_state *old_vc4_crtc_state, *new_vc4_crtc_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct vc4_crtc *vc4_crtc;
unsigned int matching_channels;
unsigned int channel;
+ crtc = sorted_crtcs[i];
+ if (!crtc)
+ continue;
+ vc4_crtc = to_vc4_crtc(crtc);
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ if (!old_crtc_state)
+ continue;
+ old_vc4_crtc_state = to_vc4_crtc_state(old_crtc_state);
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state)
+ continue;
+ new_vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
+
drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name);
/* Nothing to do here, let's skip it */
@@ -860,33 +901,11 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
continue;
}
- /*
- * The problem we have to solve here is that we have
- * up to 7 encoders, connected to up to 6 CRTCs.
- *
- * Those CRTCs, depending on the instance, can be
- * routed to 1, 2 or 3 HVS FIFOs, and we need to set
- * the change the muxing between FIFOs and outputs in
- * the HVS accordingly.
- *
- * It would be pretty hard to come up with an
- * algorithm that would generically solve
- * this. However, the current routing trees we support
- * allow us to simplify a bit the problem.
- *
- * Indeed, with the current supported layouts, if we
- * try to assign in the ascending crtc index order the
- * FIFOs, we can't fall into the situation where an
- * earlier CRTC that had multiple routes is assigned
- * one that was the only option for a later CRTC.
- *
- * If the layout changes and doesn't give us that in
- * the future, we will need to have something smarter,
- * but it works so far.
- */
matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
- if (!matching_channels)
- return -EINVAL;
+ if (!matching_channels) {
+ ret = -EINVAL;
+ goto err_free_crtc_array;
+ }
channel = ffs(matching_channels) - 1;
@@ -896,7 +915,12 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
hvs_new_state->fifo_state[channel].in_use = true;
}
+ kfree(sorted_crtcs);
return 0;
+
+err_free_crtc_array:
+ kfree(sorted_crtcs);
+ return ret;
}
static int
@@ -1050,6 +1074,7 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
+ dev->mode_config.normalize_zpos = true;
ret = vc4_ctm_obj_init(vc4);
if (ret)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 8b92a45a3c89..97c84a3f5a46 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -65,78 +65,176 @@ static const struct hvs_format {
.drm = DRM_FORMAT_RGB565,
.hvs = HVS_PIXEL_FORMAT_RGB565,
.pixel_order = HVS_PIXEL_ORDER_XRGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XRGB,
},
{
.drm = DRM_FORMAT_BGR565,
.hvs = HVS_PIXEL_FORMAT_RGB565,
.pixel_order = HVS_PIXEL_ORDER_XBGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XBGR,
},
{
.drm = DRM_FORMAT_ARGB1555,
.hvs = HVS_PIXEL_FORMAT_RGBA5551,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_XRGB1555,
.hvs = HVS_PIXEL_FORMAT_RGBA5551,
.pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
},
{
.drm = DRM_FORMAT_RGB888,
.hvs = HVS_PIXEL_FORMAT_RGB888,
.pixel_order = HVS_PIXEL_ORDER_XRGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XRGB,
},
{
.drm = DRM_FORMAT_BGR888,
.hvs = HVS_PIXEL_FORMAT_RGB888,
.pixel_order = HVS_PIXEL_ORDER_XBGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XBGR,
},
{
.drm = DRM_FORMAT_YUV422,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_YVU422,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_YUV420,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_YVU420,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_NV12,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_NV21,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_NV16,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
},
{
.drm = DRM_FORMAT_NV61,
.hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_2PLANE,
.pixel_order = HVS_PIXEL_ORDER_XYCRCB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB,
},
{
.drm = DRM_FORMAT_P030,
.hvs = HVS_PIXEL_FORMAT_YCBCR_10BIT,
- .pixel_order = HVS_PIXEL_ORDER_XYCBCR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR,
+ .hvs5_only = true,
+ },
+ {
+ .drm = DRM_FORMAT_XRGB2101010,
+ .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ .hvs5_only = true,
+ },
+ {
+ .drm = DRM_FORMAT_ARGB2101010,
+ .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ .hvs5_only = true,
+ },
+ {
+ .drm = DRM_FORMAT_ABGR2101010,
+ .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
.hvs5_only = true,
},
+ {
+ .drm = DRM_FORMAT_XBGR2101010,
+ .hvs = HVS_PIXEL_FORMAT_RGBA1010102,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
+ .hvs5_only = true,
+ },
+ {
+ .drm = DRM_FORMAT_RGB332,
+ .hvs = HVS_PIXEL_FORMAT_RGB332,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ },
+ {
+ .drm = DRM_FORMAT_BGR233,
+ .hvs = HVS_PIXEL_FORMAT_RGB332,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
+ },
+ {
+ .drm = DRM_FORMAT_XRGB4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ },
+ {
+ .drm = DRM_FORMAT_ARGB4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_ABGR,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ARGB,
+ },
+ {
+ .drm = DRM_FORMAT_XBGR4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
+ },
+ {
+ .drm = DRM_FORMAT_ABGR4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_ARGB,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_ABGR,
+ },
+ {
+ .drm = DRM_FORMAT_BGRX4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_RGBA,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_BGRA,
+ },
+ {
+ .drm = DRM_FORMAT_BGRA4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_RGBA,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_BGRA,
+ },
+ {
+ .drm = DRM_FORMAT_RGBX4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_BGRA,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_RGBA,
+ },
+ {
+ .drm = DRM_FORMAT_RGBA4444,
+ .hvs = HVS_PIXEL_FORMAT_RGBA4444,
+ .pixel_order = HVS_PIXEL_ORDER_BGRA,
+ .pixel_order_hvs5 = HVS_PIXEL_ORDER_RGBA,
+ },
};
static const struct hvs_format *vc4_get_hvs_format(u32 drm_format)
@@ -340,7 +438,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
struct drm_framebuffer *fb = state->fb;
- struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, 0);
+ struct drm_gem_dma_object *bo;
int num_planes = fb->format->num_planes;
struct drm_crtc_state *crtc_state;
u32 h_subsample = fb->format->hsub;
@@ -359,8 +457,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
if (ret)
return ret;
- for (i = 0; i < num_planes; i++)
+ for (i = 0; i < num_planes; i++) {
+ bo = drm_fb_dma_get_gem_obj(fb, i);
vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
+ }
/*
* We don't support subpixel source positioning for scaling,
@@ -1001,15 +1101,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_dlist_write(vc4_state, 0xc0c0c0c0);
} else {
- u32 hvs_pixel_order = format->pixel_order;
-
- if (format->pixel_order_hvs5)
- hvs_pixel_order = format->pixel_order_hvs5;
-
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
- (hvs_pixel_order << SCALER_CTL0_ORDER_SHIFT) |
+ (format->pixel_order_hvs5 << SCALER_CTL0_ORDER_SHIFT) |
(hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
VC4_SET_FIELD(tiling, SCALER_CTL0_TILING) |
(vc4_state->is_unity ?
@@ -1488,6 +1583,16 @@ static bool vc4_format_mod_supported(struct drm_plane *plane,
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
@@ -1568,9 +1673,14 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
DRM_COLOR_YCBCR_BT709,
DRM_COLOR_YCBCR_LIMITED_RANGE);
+ if (type == DRM_PLANE_TYPE_PRIMARY)
+ drm_plane_create_zpos_immutable_property(plane, 0);
+
return plane;
}
+#define VC4_NUM_OVERLAY_PLANES 16
+
int vc4_plane_create_additional_planes(struct drm_device *drm)
{
struct drm_plane *cursor_plane;
@@ -1586,24 +1696,35 @@ int vc4_plane_create_additional_planes(struct drm_device *drm)
* modest number of planes to expose, that should hopefully
* still cover any sane usecase.
*/
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < VC4_NUM_OVERLAY_PLANES; i++) {
struct drm_plane *plane =
vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY,
GENMASK(drm->mode_config.num_crtc - 1, 0));
if (IS_ERR(plane))
continue;
+
+ /* Create zpos property. Max of all the overlays + 1 primary +
+ * 1 cursor plane on a crtc.
+ */
+ drm_plane_create_zpos_property(plane, i + 1, 1,
+ VC4_NUM_OVERLAY_PLANES + 1);
}
drm_for_each_crtc(crtc, drm) {
/* Set up the legacy cursor after overlay initialization,
- * since we overlay planes on the CRTC in the order they were
- * initialized.
+ * since the zpos fallback is that planes are rendered by plane
+ * ID order, and that then puts the cursor on top.
*/
cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR,
drm_crtc_mask(crtc));
if (!IS_ERR(cursor_plane)) {
crtc->cursor = cursor_plane;
+
+ drm_plane_create_zpos_property(cursor_plane,
+ VC4_NUM_OVERLAY_PLANES + 1,
+ 1,
+ VC4_NUM_OVERLAY_PLANES + 1);
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index f0290fad991d..f3763bd600f6 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -220,6 +220,12 @@
#define SCALER_DISPCTRL 0x00000000
/* Global register for clock gating the HVS */
# define SCALER_DISPCTRL_ENABLE BIT(31)
+# define SCALER_DISPCTRL_PANIC0_MASK VC4_MASK(25, 24)
+# define SCALER_DISPCTRL_PANIC0_SHIFT 24
+# define SCALER_DISPCTRL_PANIC1_MASK VC4_MASK(27, 26)
+# define SCALER_DISPCTRL_PANIC1_SHIFT 26
+# define SCALER_DISPCTRL_PANIC2_MASK VC4_MASK(29, 28)
+# define SCALER_DISPCTRL_PANIC2_SHIFT 28
# define SCALER_DISPCTRL_DSP3_MUX_MASK VC4_MASK(19, 18)
# define SCALER_DISPCTRL_DSP3_MUX_SHIFT 18
@@ -228,15 +234,21 @@
* always enabled.
*/
# define SCALER_DISPCTRL_DSPEISLUR(x) BIT(13 + (x))
+# define SCALER5_DISPCTRL_DSPEISLUR(x) BIT(9 + ((x) * 4))
/* Enables Display 0 end-of-line-N contribution to
* SCALER_DISPSTAT_IRQDISP0
*/
# define SCALER_DISPCTRL_DSPEIEOLN(x) BIT(8 + ((x) * 2))
+# define SCALER5_DISPCTRL_DSPEIEOLN(x) BIT(8 + ((x) * 4))
/* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */
# define SCALER_DISPCTRL_DSPEIEOF(x) BIT(7 + ((x) * 2))
+# define SCALER5_DISPCTRL_DSPEIEOF(x) BIT(7 + ((x) * 4))
-# define SCALER_DISPCTRL_SLVRDEIRQ BIT(6)
-# define SCALER_DISPCTRL_SLVWREIRQ BIT(5)
+# define SCALER5_DISPCTRL_DSPEIVST(x) BIT(6 + ((x) * 4))
+
+# define SCALER_DISPCTRL_SLVRDEIRQ BIT(6) /* HVS4 only */
+# define SCALER_DISPCTRL_SLVWREIRQ BIT(5) /* HVS4 only */
+# define SCALER5_DISPCTRL_SLVEIRQ BIT(5)
# define SCALER_DISPCTRL_DMAEIRQ BIT(4)
/* Enables interrupt generation on the enabled EOF/EOLN/EISLUR
* bits and short frames..
@@ -360,6 +372,7 @@
#define SCALER_DISPBKGND0 0x00000044
# define SCALER_DISPBKGND_AUTOHS BIT(31)
+# define SCALER5_DISPBKGND_BCK2BCK BIT(31)
# define SCALER_DISPBKGND_INTERLACE BIT(30)
# define SCALER_DISPBKGND_GAMMA BIT(29)
# define SCALER_DISPBKGND_TESTMODE_MASK VC4_MASK(28, 25)
@@ -835,16 +848,19 @@ enum hvs_pixel_format {
/* Note: the LSB is the rightmost character shown. Only valid for
* HVS_PIXEL_FORMAT_RGB8888, not RGB888.
*/
+/* For modes 332, 4444, 555, 5551, 6666, 8888, 10:10:10:2 */
#define HVS_PIXEL_ORDER_RGBA 0
#define HVS_PIXEL_ORDER_BGRA 1
#define HVS_PIXEL_ORDER_ARGB 2
#define HVS_PIXEL_ORDER_ABGR 3
+/* For modes 666 and 888 (4 & 5) */
#define HVS_PIXEL_ORDER_XBRG 0
#define HVS_PIXEL_ORDER_XRBG 1
#define HVS_PIXEL_ORDER_XRGB 2
#define HVS_PIXEL_ORDER_XBGR 3
+/* For YCbCr modes (8-12, and 17) */
#define HVS_PIXEL_ORDER_XYCBCR 0
#define HVS_PIXEL_ORDER_XYCRCB 1
#define HVS_PIXEL_ORDER_YXCBCR 2
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index bd181b5a7b52..ef5cab2a3aa9 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -145,14 +145,24 @@
/* Number of lines received and committed to memory. */
#define TXP_PROGRESS 0x10
-#define TXP_READ(offset) readl(txp->regs + (offset))
-#define TXP_WRITE(offset, val) writel(val, txp->regs + (offset))
+#define TXP_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(txp->regs + (offset)); \
+ })
+
+#define TXP_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, txp->regs + (offset)); \
+ } while (0)
struct vc4_txp {
struct vc4_crtc base;
struct platform_device *pdev;
+ struct vc4_encoder encoder;
struct drm_writeback_connector connector;
void __iomem *regs;
@@ -160,7 +170,7 @@ struct vc4_txp {
static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
{
- return container_of(encoder, struct vc4_txp, connector.encoder);
+ return container_of(encoder, struct vc4_txp, encoder.base);
}
static inline struct vc4_txp *connector_to_vc4_txp(struct drm_connector *conn)
@@ -478,7 +488,8 @@ static irqreturn_t vc4_txp_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-static const struct vc4_crtc_data vc4_txp_crtc_data = {
+const struct vc4_crtc_data vc4_txp_crtc_data = {
+ .name = "txp",
.debugfs_name = "txp_regs",
.hvs_available_channels = BIT(2),
.hvs_output = 2,
@@ -488,10 +499,10 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
+ struct vc4_encoder *vc4_encoder;
+ struct drm_encoder *encoder;
struct vc4_crtc *vc4_crtc;
struct vc4_txp *txp;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
int ret, irq;
irq = platform_get_irq(pdev, 0);
@@ -501,39 +512,42 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
txp = drmm_kzalloc(drm, sizeof(*txp), GFP_KERNEL);
if (!txp)
return -ENOMEM;
- vc4_crtc = &txp->base;
- crtc = &vc4_crtc->base;
-
- vc4_crtc->pdev = pdev;
- vc4_crtc->data = &vc4_txp_crtc_data;
- vc4_crtc->feeds_txp = true;
txp->pdev = pdev;
-
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
return PTR_ERR(txp->regs);
+
+ vc4_crtc = &txp->base;
vc4_crtc->regset.base = txp->regs;
vc4_crtc->regset.regs = txp_regs;
vc4_crtc->regset.nregs = ARRAY_SIZE(txp_regs);
- drm_connector_helper_add(&txp->connector.base,
- &vc4_txp_connector_helper_funcs);
- ret = drm_writeback_connector_init(drm, &txp->connector,
- &vc4_txp_connector_funcs,
- &vc4_txp_encoder_helper_funcs,
- drm_fmts, ARRAY_SIZE(drm_fmts),
- 0);
+ ret = vc4_crtc_init(drm, pdev, vc4_crtc, &vc4_txp_crtc_data,
+ &vc4_txp_crtc_funcs, &vc4_txp_crtc_helper_funcs, true);
if (ret)
return ret;
- ret = vc4_crtc_init(drm, vc4_crtc,
- &vc4_txp_crtc_funcs, &vc4_txp_crtc_helper_funcs);
+ vc4_encoder = &txp->encoder;
+ txp->encoder.type = VC4_ENCODER_TYPE_TXP;
+
+ encoder = &vc4_encoder->base;
+ encoder->possible_crtcs = drm_crtc_mask(&vc4_crtc->base);
+
+ drm_encoder_helper_add(encoder, &vc4_txp_encoder_helper_funcs);
+
+ ret = drmm_encoder_init(drm, encoder, NULL, DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
return ret;
- encoder = &txp->connector.encoder;
- encoder->possible_crtcs = drm_crtc_mask(crtc);
+ drm_connector_helper_add(&txp->connector.base,
+ &vc4_txp_connector_helper_funcs);
+ ret = drm_writeback_connector_init_with_encoder(drm, &txp->connector,
+ encoder,
+ &vc4_txp_connector_funcs,
+ drm_fmts, ARRAY_SIZE(drm_fmts));
+ if (ret)
+ return ret;
ret = devm_request_irq(dev, irq, vc4_txp_interrupt, 0,
dev_name(dev), txp);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 56abb0d6bc39..29a664c8bf44 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -96,8 +96,8 @@ static const struct debugfs_reg32 v3d_regs[] = {
static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret = vc4_v3d_pm_get(vc4);
@@ -404,19 +404,13 @@ int vc4_v3d_debugfs_init(struct drm_minor *minor)
struct drm_device *drm = minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(drm);
struct vc4_v3d *v3d = vc4->v3d;
- int ret;
if (!vc4->v3d)
return -ENODEV;
- ret = vc4_debugfs_add_file(minor, "v3d_ident",
- vc4_v3d_debugfs_ident, NULL);
- if (ret)
- return ret;
+ drm_debugfs_add_file(drm, "v3d_ident", vc4_v3d_debugfs_ident, NULL);
- ret = vc4_debugfs_add_regset32(minor, "v3d_regs", &v3d->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, "v3d_regs", &v3d->regset);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 92c07e31d632..a3782d05cd66 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -46,6 +46,7 @@
#define VEC_CONFIG0_YDEL(x) ((x) << 26)
#define VEC_CONFIG0_CDEL_MASK GENMASK(25, 24)
#define VEC_CONFIG0_CDEL(x) ((x) << 24)
+#define VEC_CONFIG0_SECAM_STD BIT(21)
#define VEC_CONFIG0_PBPR_FIL BIT(18)
#define VEC_CONFIG0_CHROMA_GAIN_MASK GENMASK(17, 16)
#define VEC_CONFIG0_CHROMA_GAIN_UNITY (0 << 16)
@@ -76,6 +77,27 @@
#define VEC_SOFT_RESET 0x10c
#define VEC_CLMP0_START 0x144
#define VEC_CLMP0_END 0x148
+
+/*
+ * These set the color subcarrier frequency
+ * if VEC_CONFIG1_CUSTOM_FREQ is enabled.
+ *
+ * VEC_FREQ1_0 contains the most significant 16-bit half-word,
+ * VEC_FREQ3_2 contains the least significant 16-bit half-word.
+ * 0x80000000 seems to be equivalent to the pixel clock
+ * (which itself is the VEC clock divided by 8).
+ *
+ * Reference values (with the default pixel clock of 13.5 MHz):
+ *
+ * NTSC (3579545.[45] Hz) - 0x21F07C1F
+ * PAL (4433618.75 Hz) - 0x2A098ACB
+ * PAL-M (3575611.[888111] Hz) - 0x21E6EFE3
+ * PAL-N (3582056.25 Hz) - 0x21F69446
+ *
+ * NOTE: For SECAM, it is used as the Dr center frequency,
+ * regardless of whether VEC_CONFIG1_CUSTOM_FREQ is enabled or not;
+ * that is specified as 4406250 Hz, which corresponds to 0x29C71C72.
+ */
#define VEC_FREQ3_2 0x180
#define VEC_FREQ1_0 0x184
@@ -118,6 +140,14 @@
#define VEC_INTERRUPT_CONTROL 0x190
#define VEC_INTERRUPT_STATUS 0x194
+
+/*
+ * Db center frequency for SECAM; the clock for this is the same as for
+ * VEC_FREQ3_2/VEC_FREQ1_0, which is used for Dr center frequency.
+ *
+ * This is specified as 4250000 Hz, which corresponds to 0x284BDA13.
+ * That is also the default value, so no need to set it explicitly.
+ */
#define VEC_FCW_SECAM_B 0x198
#define VEC_SECAM_GAIN_VAL 0x19c
@@ -172,11 +202,22 @@ struct vc4_vec {
struct clk *clock;
+ struct drm_property *legacy_tv_mode_property;
+
struct debugfs_regset32 regset;
};
-#define VEC_READ(offset) readl(vec->regs + (offset))
-#define VEC_WRITE(offset, val) writel(val, vec->regs + (offset))
+#define VEC_READ(offset) \
+ ({ \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ readl(vec->regs + (offset)); \
+ })
+
+#define VEC_WRITE(offset, val) \
+ do { \
+ kunit_fail_current_test("Accessing a register in a unit test!\n"); \
+ writel(val, vec->regs + (offset)); \
+ } while (0)
static inline struct vc4_vec *
encoder_to_vc4_vec(struct drm_encoder *encoder)
@@ -184,15 +225,26 @@ encoder_to_vc4_vec(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_vec, encoder.base);
}
+static inline struct vc4_vec *
+connector_to_vc4_vec(struct drm_connector *connector)
+{
+ return container_of(connector, struct vc4_vec, connector);
+}
+
enum vc4_vec_tv_mode_id {
VC4_VEC_TV_MODE_NTSC,
VC4_VEC_TV_MODE_NTSC_J,
VC4_VEC_TV_MODE_PAL,
VC4_VEC_TV_MODE_PAL_M,
+ VC4_VEC_TV_MODE_NTSC_443,
+ VC4_VEC_TV_MODE_PAL_60,
+ VC4_VEC_TV_MODE_PAL_N,
+ VC4_VEC_TV_MODE_SECAM,
};
struct vc4_vec_tv_mode {
- const struct drm_display_mode *mode;
+ unsigned int mode;
+ u16 expected_htotal;
u32 config0;
u32 config1;
u32 custom_freq;
@@ -225,41 +277,86 @@ static const struct debugfs_reg32 vec_regs[] = {
VC4_REG32(VEC_DAC_MISC),
};
-static const struct drm_display_mode ntsc_mode = {
- DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 13500,
- 720, 720 + 14, 720 + 14 + 64, 720 + 14 + 64 + 60, 0,
- 480, 480 + 7, 480 + 7 + 6, 525, 0,
- DRM_MODE_FLAG_INTERLACE)
-};
-
-static const struct drm_display_mode pal_mode = {
- DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 13500,
- 720, 720 + 20, 720 + 20 + 64, 720 + 20 + 64 + 60, 0,
- 576, 576 + 4, 576 + 4 + 6, 625, 0,
- DRM_MODE_FLAG_INTERLACE)
-};
-
static const struct vc4_vec_tv_mode vc4_vec_tv_modes[] = {
- [VC4_VEC_TV_MODE_NTSC] = {
- .mode = &ntsc_mode,
+ {
+ .mode = DRM_MODE_TV_MODE_NTSC,
+ .expected_htotal = 858,
.config0 = VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN,
.config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
- [VC4_VEC_TV_MODE_NTSC_J] = {
- .mode = &ntsc_mode,
+ {
+ .mode = DRM_MODE_TV_MODE_NTSC_443,
+ .expected_htotal = 858,
+ .config0 = VEC_CONFIG0_NTSC_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ,
+ .custom_freq = 0x2a098acb,
+ },
+ {
+ .mode = DRM_MODE_TV_MODE_NTSC_J,
+ .expected_htotal = 858,
.config0 = VEC_CONFIG0_NTSC_STD,
.config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
- [VC4_VEC_TV_MODE_PAL] = {
- .mode = &pal_mode,
+ {
+ .mode = DRM_MODE_TV_MODE_PAL,
+ .expected_htotal = 864,
.config0 = VEC_CONFIG0_PAL_BDGHI_STD,
.config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
- [VC4_VEC_TV_MODE_PAL_M] = {
- .mode = &ntsc_mode,
+ {
+ /* PAL-60 */
+ .mode = DRM_MODE_TV_MODE_PAL,
+ .expected_htotal = 858,
+ .config0 = VEC_CONFIG0_PAL_M_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ,
+ .custom_freq = 0x2a098acb,
+ },
+ {
+ .mode = DRM_MODE_TV_MODE_PAL_M,
+ .expected_htotal = 858,
.config0 = VEC_CONFIG0_PAL_M_STD,
.config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
+ {
+ .mode = DRM_MODE_TV_MODE_PAL_N,
+ .expected_htotal = 864,
+ .config0 = VEC_CONFIG0_PAL_N_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
+ },
+ {
+ .mode = DRM_MODE_TV_MODE_SECAM,
+ .expected_htotal = 864,
+ .config0 = VEC_CONFIG0_SECAM_STD,
+ .config1 = VEC_CONFIG1_C_CVBS_CVBS,
+ .custom_freq = 0x29c71c72,
+ },
+};
+
+static inline const struct vc4_vec_tv_mode *
+vc4_vec_tv_mode_lookup(unsigned int mode, u16 htotal)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vc4_vec_tv_modes); i++) {
+ const struct vc4_vec_tv_mode *tv_mode = &vc4_vec_tv_modes[i];
+
+ if (tv_mode->mode == mode &&
+ tv_mode->expected_htotal == htotal)
+ return tv_mode;
+ }
+
+ return NULL;
+}
+
+static const struct drm_prop_enum_list legacy_tv_mode_names[] = {
+ { VC4_VEC_TV_MODE_NTSC, "NTSC", },
+ { VC4_VEC_TV_MODE_NTSC_443, "NTSC-443", },
+ { VC4_VEC_TV_MODE_NTSC_J, "NTSC-J", },
+ { VC4_VEC_TV_MODE_PAL, "PAL", },
+ { VC4_VEC_TV_MODE_PAL_60, "PAL-60", },
+ { VC4_VEC_TV_MODE_PAL_M, "PAL-M", },
+ { VC4_VEC_TV_MODE_PAL_N, "PAL-N", },
+ { VC4_VEC_TV_MODE_SECAM, "SECAM", },
};
static enum drm_connector_status
@@ -268,38 +365,126 @@ vc4_vec_connector_detect(struct drm_connector *connector, bool force)
return connector_status_unknown;
}
-static int vc4_vec_connector_get_modes(struct drm_connector *connector)
+static void vc4_vec_connector_reset(struct drm_connector *connector)
{
- struct drm_connector_state *state = connector->state;
- struct drm_display_mode *mode;
+ drm_atomic_helper_connector_reset(connector);
+ drm_atomic_helper_connector_tv_reset(connector);
+}
- mode = drm_mode_duplicate(connector->dev,
- vc4_vec_tv_modes[state->tv.mode].mode);
- if (!mode) {
- DRM_ERROR("Failed to create a new display mode\n");
- return -ENOMEM;
+static int
+vc4_vec_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct vc4_vec *vec = connector_to_vc4_vec(connector);
+
+ if (property != vec->legacy_tv_mode_property)
+ return -EINVAL;
+
+ switch (val) {
+ case VC4_VEC_TV_MODE_NTSC:
+ state->tv.mode = DRM_MODE_TV_MODE_NTSC;
+ break;
+
+ case VC4_VEC_TV_MODE_NTSC_443:
+ state->tv.mode = DRM_MODE_TV_MODE_NTSC_443;
+ break;
+
+ case VC4_VEC_TV_MODE_NTSC_J:
+ state->tv.mode = DRM_MODE_TV_MODE_NTSC_J;
+ break;
+
+ case VC4_VEC_TV_MODE_PAL:
+ case VC4_VEC_TV_MODE_PAL_60:
+ state->tv.mode = DRM_MODE_TV_MODE_PAL;
+ break;
+
+ case VC4_VEC_TV_MODE_PAL_M:
+ state->tv.mode = DRM_MODE_TV_MODE_PAL_M;
+ break;
+
+ case VC4_VEC_TV_MODE_PAL_N:
+ state->tv.mode = DRM_MODE_TV_MODE_PAL_N;
+ break;
+
+ case VC4_VEC_TV_MODE_SECAM:
+ state->tv.mode = DRM_MODE_TV_MODE_SECAM;
+ break;
+
+ default:
+ return -EINVAL;
}
- drm_mode_probed_add(connector, mode);
+ return 0;
+}
+
+static int
+vc4_vec_connector_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct vc4_vec *vec = connector_to_vc4_vec(connector);
+
+ if (property != vec->legacy_tv_mode_property)
+ return -EINVAL;
+
+ switch (state->tv.mode) {
+ case DRM_MODE_TV_MODE_NTSC:
+ *val = VC4_VEC_TV_MODE_NTSC;
+ break;
+
+ case DRM_MODE_TV_MODE_NTSC_443:
+ *val = VC4_VEC_TV_MODE_NTSC_443;
+ break;
+
+ case DRM_MODE_TV_MODE_NTSC_J:
+ *val = VC4_VEC_TV_MODE_NTSC_J;
+ break;
- return 1;
+ case DRM_MODE_TV_MODE_PAL:
+ *val = VC4_VEC_TV_MODE_PAL;
+ break;
+
+ case DRM_MODE_TV_MODE_PAL_M:
+ *val = VC4_VEC_TV_MODE_PAL_M;
+ break;
+
+ case DRM_MODE_TV_MODE_PAL_N:
+ *val = VC4_VEC_TV_MODE_PAL_N;
+ break;
+
+ case DRM_MODE_TV_MODE_SECAM:
+ *val = VC4_VEC_TV_MODE_SECAM;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
static const struct drm_connector_funcs vc4_vec_connector_funcs = {
.detect = vc4_vec_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .reset = drm_atomic_helper_connector_reset,
+ .reset = vc4_vec_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_get_property = vc4_vec_connector_get_property,
+ .atomic_set_property = vc4_vec_connector_set_property,
};
static const struct drm_connector_helper_funcs vc4_vec_connector_helper_funcs = {
- .get_modes = vc4_vec_connector_get_modes,
+ .atomic_check = drm_atomic_helper_connector_tv_check,
+ .get_modes = drm_connector_helper_tv_get_modes,
};
static int vc4_vec_connector_init(struct drm_device *dev, struct vc4_vec *vec)
{
struct drm_connector *connector = &vec->connector;
+ struct drm_property *prop;
int ret;
connector->interlace_allowed = true;
@@ -313,7 +498,16 @@ static int vc4_vec_connector_init(struct drm_device *dev, struct vc4_vec *vec)
drm_object_attach_property(&connector->base,
dev->mode_config.tv_mode_property,
- VC4_VEC_TV_MODE_NTSC);
+ DRM_MODE_TV_MODE_NTSC);
+
+ prop = drm_property_create_enum(dev, 0, "mode",
+ legacy_tv_mode_names,
+ ARRAY_SIZE(legacy_tv_mode_names));
+ if (!prop)
+ return -ENOMEM;
+ vec->legacy_tv_mode_property = prop;
+
+ drm_object_attach_property(&connector->base, prop, VC4_VEC_TV_MODE_NTSC);
drm_connector_attach_encoder(connector, &vec->encoder.base);
@@ -360,14 +554,20 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder,
struct drm_connector *connector = &vec->connector;
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
- const struct vc4_vec_tv_mode *tv_mode =
- &vc4_vec_tv_modes[conn_state->tv.mode];
+ struct drm_display_mode *adjusted_mode =
+ &encoder->crtc->state->adjusted_mode;
+ const struct vc4_vec_tv_mode *tv_mode;
int idx, ret;
if (!drm_dev_enter(drm, &idx))
return;
- ret = pm_runtime_get_sync(&vec->pdev->dev);
+ tv_mode = vc4_vec_tv_mode_lookup(conn_state->tv.mode,
+ adjusted_mode->htotal);
+ if (!tv_mode)
+ goto err_dev_exit;
+
+ ret = pm_runtime_resume_and_get(&vec->pdev->dev);
if (ret < 0) {
DRM_ERROR("Failed to retain power domain: %d\n", ret);
goto err_dev_exit;
@@ -413,7 +613,9 @@ static void vc4_vec_encoder_enable(struct drm_encoder *encoder,
VEC_WRITE(VEC_CLMP0_START, 0xac);
VEC_WRITE(VEC_CLMP0_END, 0xec);
VEC_WRITE(VEC_CONFIG2,
- VEC_CONFIG2_UV_DIG_DIS | VEC_CONFIG2_RGB_DIG_DIS);
+ VEC_CONFIG2_UV_DIG_DIS |
+ VEC_CONFIG2_RGB_DIG_DIS |
+ ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 0 : VEC_CONFIG2_PROG_SCAN));
VEC_WRITE(VEC_CONFIG3, VEC_CONFIG3_HORIZ_LEN_STD);
VEC_WRITE(VEC_DAC_CONFIG, vec->variant->dac_config);
@@ -447,13 +649,61 @@ static int vc4_vec_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- const struct vc4_vec_tv_mode *vec_mode;
+ const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ const struct vc4_vec_tv_mode *tv_mode;
+
+ tv_mode = vc4_vec_tv_mode_lookup(conn_state->tv.mode, mode->htotal);
+ if (!tv_mode)
+ return -EINVAL;
+
+ if (mode->crtc_hdisplay % 4)
+ return -EINVAL;
+
+ if (!(mode->crtc_hsync_end - mode->crtc_hsync_start))
+ return -EINVAL;
+
+ switch (mode->htotal) {
+ /* NTSC */
+ case 858:
+ if (mode->crtc_vtotal > 262)
+ return -EINVAL;
+
+ if (mode->crtc_vdisplay < 1 || mode->crtc_vdisplay > 253)
+ return -EINVAL;
+
+ if (!(mode->crtc_vsync_start - mode->crtc_vdisplay))
+ return -EINVAL;
+
+ if ((mode->crtc_vsync_end - mode->crtc_vsync_start) != 3)
+ return -EINVAL;
+
+ if ((mode->crtc_vtotal - mode->crtc_vsync_end) < 4)
+ return -EINVAL;
- vec_mode = &vc4_vec_tv_modes[conn_state->tv.mode];
+ break;
- if (conn_state->crtc &&
- !drm_mode_equal(vec_mode->mode, &crtc_state->adjusted_mode))
+ /* PAL/SECAM */
+ case 864:
+ if (mode->crtc_vtotal > 312)
+ return -EINVAL;
+
+ if (mode->crtc_vdisplay < 1 || mode->crtc_vdisplay > 305)
+ return -EINVAL;
+
+ if (!(mode->crtc_vsync_start - mode->crtc_vdisplay))
+ return -EINVAL;
+
+ if ((mode->crtc_vsync_end - mode->crtc_vsync_start) != 3)
+ return -EINVAL;
+
+ if ((mode->crtc_vtotal - mode->crtc_vsync_end) < 2)
+ return -EINVAL;
+
+ break;
+
+ default:
return -EINVAL;
+ }
return 0;
}
@@ -468,12 +718,8 @@ static int vc4_vec_late_register(struct drm_encoder *encoder)
{
struct drm_device *drm = encoder->dev;
struct vc4_vec *vec = encoder_to_vc4_vec(encoder);
- int ret;
- ret = vc4_debugfs_add_regset32(drm->primary, "vec_regs",
- &vec->regset);
- if (ret)
- return ret;
+ vc4_debugfs_add_regset32(drm, "vec_regs", &vec->regset);
return 0;
}
@@ -500,13 +746,6 @@ static const struct of_device_id vc4_vec_dt_match[] = {
{ /* sentinel */ },
};
-static const char * const tv_mode_names[] = {
- [VC4_VEC_TV_MODE_NTSC] = "NTSC",
- [VC4_VEC_TV_MODE_NTSC_J] = "NTSC-J",
- [VC4_VEC_TV_MODE_PAL] = "PAL",
- [VC4_VEC_TV_MODE_PAL_M] = "PAL-M",
-};
-
static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -514,8 +753,14 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
struct vc4_vec *vec;
int ret;
- ret = drm_mode_create_tv_properties(drm, ARRAY_SIZE(tv_mode_names),
- tv_mode_names);
+ ret = drm_mode_create_tv_properties(drm,
+ BIT(DRM_MODE_TV_MODE_NTSC) |
+ BIT(DRM_MODE_TV_MODE_NTSC_443) |
+ BIT(DRM_MODE_TV_MODE_NTSC_J) |
+ BIT(DRM_MODE_TV_MODE_PAL) |
+ BIT(DRM_MODE_TV_MODE_PAL_M) |
+ BIT(DRM_MODE_TV_MODE_PAL_N) |
+ BIT(DRM_MODE_TV_MODE_SECAM));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/via/Makefile b/drivers/gpu/drm/via/Makefile
deleted file mode 100644
index 8b978dd51a25..000000000000
--- a/drivers/gpu/drm/via/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the drm device driver. This driver provides support for the
-# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-
-via-y := via_dri1.o
-
-obj-$(CONFIG_DRM_VIA) +=via.o
diff --git a/drivers/gpu/drm/via/via_3d_reg.h b/drivers/gpu/drm/via/via_3d_reg.h
deleted file mode 100644
index eb848508b12b..000000000000
--- a/drivers/gpu/drm/via/via_3d_reg.h
+++ /dev/null
@@ -1,1771 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright 1998-2011 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2011 S3 Graphics, Inc. All Rights Reserved.
- */
-
-#ifndef VIA_3D_REG_H
-#define VIA_3D_REG_H
-#define HC_REG_BASE 0x0400
-
-#define HC_REG_TRANS_SPACE 0x0040
-
-#define HC_ParaN_MASK 0xffffffff
-#define HC_Para_MASK 0x00ffffff
-#define HC_SubA_MASK 0xff000000
-#define HC_SubA_SHIFT 24
-/* Transmission Setting
- */
-#define HC_REG_TRANS_SET 0x003c
-#define HC_ParaSubType_MASK 0xff000000
-#define HC_ParaType_MASK 0x00ff0000
-#define HC_ParaOS_MASK 0x0000ff00
-#define HC_ParaAdr_MASK 0x000000ff
-#define HC_ParaSubType_SHIFT 24
-#define HC_ParaType_SHIFT 16
-#define HC_ParaOS_SHIFT 8
-#define HC_ParaAdr_SHIFT 0
-
-#define HC_ParaType_CmdVdata 0x0000
-#define HC_ParaType_NotTex 0x0001
-#define HC_ParaType_Tex 0x0002
-#define HC_ParaType_Palette 0x0003
-#define HC_ParaType_PreCR 0x0010
-#define HC_ParaType_Auto 0x00fe
-#define INV_ParaType_Dummy 0x00300000
-
-/* Transmission Space
- */
-#define HC_REG_Hpara0 0x0040
-#define HC_REG_HpataAF 0x02fc
-
-/* Read
- */
-#define HC_REG_HREngSt 0x0000
-#define HC_REG_HRFIFOempty 0x0004
-#define HC_REG_HRFIFOfull 0x0008
-#define HC_REG_HRErr 0x000c
-#define HC_REG_FIFOstatus 0x0010
-/* HC_REG_HREngSt 0x0000
- */
-#define HC_HDASZC_MASK 0x00010000
-#define HC_HSGEMI_MASK 0x0000f000
-#define HC_HLGEMISt_MASK 0x00000f00
-#define HC_HCRSt_MASK 0x00000080
-#define HC_HSE0St_MASK 0x00000040
-#define HC_HSE1St_MASK 0x00000020
-#define HC_HPESt_MASK 0x00000010
-#define HC_HXESt_MASK 0x00000008
-#define HC_HBESt_MASK 0x00000004
-#define HC_HE2St_MASK 0x00000002
-#define HC_HE3St_MASK 0x00000001
-/* HC_REG_HRFIFOempty 0x0004
- */
-#define HC_HRZDempty_MASK 0x00000010
-#define HC_HRTXAempty_MASK 0x00000008
-#define HC_HRTXDempty_MASK 0x00000004
-#define HC_HWZDempty_MASK 0x00000002
-#define HC_HWCDempty_MASK 0x00000001
-/* HC_REG_HRFIFOfull 0x0008
- */
-#define HC_HRZDfull_MASK 0x00000010
-#define HC_HRTXAfull_MASK 0x00000008
-#define HC_HRTXDfull_MASK 0x00000004
-#define HC_HWZDfull_MASK 0x00000002
-#define HC_HWCDfull_MASK 0x00000001
-/* HC_REG_HRErr 0x000c
- */
-#define HC_HAGPCMErr_MASK 0x80000000
-#define HC_HAGPCMErrC_MASK 0x70000000
-/* HC_REG_FIFOstatus 0x0010
- */
-#define HC_HRFIFOATall_MASK 0x80000000
-#define HC_HRFIFOATbusy_MASK 0x40000000
-#define HC_HRATFGMDo_MASK 0x00000100
-#define HC_HRATFGMDi_MASK 0x00000080
-#define HC_HRATFRZD_MASK 0x00000040
-#define HC_HRATFRTXA_MASK 0x00000020
-#define HC_HRATFRTXD_MASK 0x00000010
-#define HC_HRATFWZD_MASK 0x00000008
-#define HC_HRATFWCD_MASK 0x00000004
-#define HC_HRATTXTAG_MASK 0x00000002
-#define HC_HRATTXCH_MASK 0x00000001
-
-/* AGP Command Setting
- */
-#define HC_SubA_HAGPBstL 0x0060
-#define HC_SubA_HAGPBendL 0x0061
-#define HC_SubA_HAGPCMNT 0x0062
-#define HC_SubA_HAGPBpL 0x0063
-#define HC_SubA_HAGPBpH 0x0064
-/* HC_SubA_HAGPCMNT 0x0062
- */
-#define HC_HAGPCMNT_MASK 0x00800000
-#define HC_HCmdErrClr_MASK 0x00400000
-#define HC_HAGPBendH_MASK 0x0000ff00
-#define HC_HAGPBstH_MASK 0x000000ff
-#define HC_HAGPBendH_SHIFT 8
-#define HC_HAGPBstH_SHIFT 0
-/* HC_SubA_HAGPBpL 0x0063
- */
-#define HC_HAGPBpL_MASK 0x00fffffc
-#define HC_HAGPBpID_MASK 0x00000003
-#define HC_HAGPBpID_PAUSE 0x00000000
-#define HC_HAGPBpID_JUMP 0x00000001
-#define HC_HAGPBpID_STOP 0x00000002
-/* HC_SubA_HAGPBpH 0x0064
- */
-#define HC_HAGPBpH_MASK 0x00ffffff
-
-/* Miscellaneous Settings
- */
-#define HC_SubA_HClipTB 0x0070
-#define HC_SubA_HClipLR 0x0071
-#define HC_SubA_HFPClipTL 0x0072
-#define HC_SubA_HFPClipBL 0x0073
-#define HC_SubA_HFPClipLL 0x0074
-#define HC_SubA_HFPClipRL 0x0075
-#define HC_SubA_HFPClipTBH 0x0076
-#define HC_SubA_HFPClipLRH 0x0077
-#define HC_SubA_HLP 0x0078
-#define HC_SubA_HLPRF 0x0079
-#define HC_SubA_HSolidCL 0x007a
-#define HC_SubA_HPixGC 0x007b
-#define HC_SubA_HSPXYOS 0x007c
-#define HC_SubA_HVertexCNT 0x007d
-
-#define HC_HClipT_MASK 0x00fff000
-#define HC_HClipT_SHIFT 12
-#define HC_HClipB_MASK 0x00000fff
-#define HC_HClipB_SHIFT 0
-#define HC_HClipL_MASK 0x00fff000
-#define HC_HClipL_SHIFT 12
-#define HC_HClipR_MASK 0x00000fff
-#define HC_HClipR_SHIFT 0
-#define HC_HFPClipBH_MASK 0x0000ff00
-#define HC_HFPClipBH_SHIFT 8
-#define HC_HFPClipTH_MASK 0x000000ff
-#define HC_HFPClipTH_SHIFT 0
-#define HC_HFPClipRH_MASK 0x0000ff00
-#define HC_HFPClipRH_SHIFT 8
-#define HC_HFPClipLH_MASK 0x000000ff
-#define HC_HFPClipLH_SHIFT 0
-#define HC_HSolidCH_MASK 0x000000ff
-#define HC_HPixGC_MASK 0x00800000
-#define HC_HSPXOS_MASK 0x00fff000
-#define HC_HSPXOS_SHIFT 12
-#define HC_HSPYOS_MASK 0x00000fff
-
-/*
- * Command A
- */
-#define HC_HCmdHeader_MASK 0xfe000000 /*0xffe00000 */
-#define HC_HE3Fire_MASK 0x00100000
-#define HC_HPMType_MASK 0x000f0000
-#define HC_HEFlag_MASK 0x0000e000
-#define HC_HShading_MASK 0x00001c00
-#define HC_HPMValidN_MASK 0x00000200
-#define HC_HPLEND_MASK 0x00000100
-#define HC_HVCycle_MASK 0x000000ff
-#define HC_HVCycle_Style_MASK 0x000000c0
-#define HC_HVCycle_ChgA_MASK 0x00000030
-#define HC_HVCycle_ChgB_MASK 0x0000000c
-#define HC_HVCycle_ChgC_MASK 0x00000003
-#define HC_HPMType_Point 0x00000000
-#define HC_HPMType_Line 0x00010000
-#define HC_HPMType_Tri 0x00020000
-#define HC_HPMType_TriWF 0x00040000
-#define HC_HEFlag_NoAA 0x00000000
-#define HC_HEFlag_ab 0x00008000
-#define HC_HEFlag_bc 0x00004000
-#define HC_HEFlag_ca 0x00002000
-#define HC_HShading_Solid 0x00000000
-#define HC_HShading_FlatA 0x00000400
-#define HC_HShading_FlatB 0x00000800
-#define HC_HShading_FlatC 0x00000c00
-#define HC_HShading_Gouraud 0x00001000
-#define HC_HVCycle_Full 0x00000000
-#define HC_HVCycle_AFP 0x00000040
-#define HC_HVCycle_One 0x000000c0
-#define HC_HVCycle_NewA 0x00000000
-#define HC_HVCycle_AA 0x00000010
-#define HC_HVCycle_AB 0x00000020
-#define HC_HVCycle_AC 0x00000030
-#define HC_HVCycle_NewB 0x00000000
-#define HC_HVCycle_BA 0x00000004
-#define HC_HVCycle_BB 0x00000008
-#define HC_HVCycle_BC 0x0000000c
-#define HC_HVCycle_NewC 0x00000000
-#define HC_HVCycle_CA 0x00000001
-#define HC_HVCycle_CB 0x00000002
-#define HC_HVCycle_CC 0x00000003
-
-/* Command B
- */
-#define HC_HLPrst_MASK 0x00010000
-#define HC_HLLastP_MASK 0x00008000
-#define HC_HVPMSK_MASK 0x00007f80
-#define HC_HBFace_MASK 0x00000040
-#define HC_H2nd1VT_MASK 0x0000003f
-#define HC_HVPMSK_X 0x00004000
-#define HC_HVPMSK_Y 0x00002000
-#define HC_HVPMSK_Z 0x00001000
-#define HC_HVPMSK_W 0x00000800
-#define HC_HVPMSK_Cd 0x00000400
-#define HC_HVPMSK_Cs 0x00000200
-#define HC_HVPMSK_S 0x00000100
-#define HC_HVPMSK_T 0x00000080
-
-/* Enable Setting
- */
-#define HC_SubA_HEnable 0x0000
-#define HC_HenForce1P_MASK 0x00800000 /* [Force 1 Pipe] */
-#define HC_HenZDCheck_MASK 0x00400000 /* [Z dirty bit settings] */
-#define HC_HenTXEnvMap_MASK 0x00200000
-#define HC_HenVertexCNT_MASK 0x00100000
-#define HC_HenCPUDAZ_MASK 0x00080000
-#define HC_HenDASZWC_MASK 0x00040000
-#define HC_HenFBCull_MASK 0x00020000
-#define HC_HenCW_MASK 0x00010000
-#define HC_HenAA_MASK 0x00008000
-#define HC_HenST_MASK 0x00004000
-#define HC_HenZT_MASK 0x00002000
-#define HC_HenZW_MASK 0x00001000
-#define HC_HenAT_MASK 0x00000800
-#define HC_HenAW_MASK 0x00000400
-#define HC_HenSP_MASK 0x00000200
-#define HC_HenLP_MASK 0x00000100
-#define HC_HenTXCH_MASK 0x00000080
-#define HC_HenTXMP_MASK 0x00000040
-#define HC_HenTXPP_MASK 0x00000020
-#define HC_HenTXTR_MASK 0x00000010
-#define HC_HenCS_MASK 0x00000008
-#define HC_HenFOG_MASK 0x00000004
-#define HC_HenABL_MASK 0x00000002
-#define HC_HenDT_MASK 0x00000001
-
-/* Z Setting
- */
-#define HC_SubA_HZWBBasL 0x0010
-#define HC_SubA_HZWBBasH 0x0011
-#define HC_SubA_HZWBType 0x0012
-#define HC_SubA_HZBiasL 0x0013
-#define HC_SubA_HZWBend 0x0014
-#define HC_SubA_HZWTMD 0x0015
-#define HC_SubA_HZWCDL 0x0016
-#define HC_SubA_HZWCTAGnum 0x0017
-#define HC_SubA_HZCYNum 0x0018
-#define HC_SubA_HZWCFire 0x0019
-/* HC_SubA_HZWBType
- */
-#define HC_HZWBType_MASK 0x00800000
-#define HC_HZBiasedWB_MASK 0x00400000
-#define HC_HZONEasFF_MASK 0x00200000
-#define HC_HZOONEasFF_MASK 0x00100000
-#define HC_HZWBFM_MASK 0x00030000
-#define HC_HZWBLoc_MASK 0x0000c000
-#define HC_HZWBPit_MASK 0x00003fff
-#define HC_HZWBFM_16 0x00000000
-#define HC_HZWBFM_32 0x00020000
-#define HC_HZWBFM_24 0x00030000
-#define HC_HZWBLoc_Local 0x00000000
-#define HC_HZWBLoc_SyS 0x00004000
-/* HC_SubA_HZWBend
- */
-#define HC_HZWBend_MASK 0x00ffe000
-#define HC_HZBiasH_MASK 0x000000ff
-#define HC_HZWBend_SHIFT 10
-/* HC_SubA_HZWTMD
- */
-#define HC_HZWTMD_MASK 0x00070000
-#define HC_HEBEBias_MASK 0x00007f00
-#define HC_HZNF_MASK 0x000000ff
-#define HC_HZWTMD_NeverPass 0x00000000
-#define HC_HZWTMD_LT 0x00010000
-#define HC_HZWTMD_EQ 0x00020000
-#define HC_HZWTMD_LE 0x00030000
-#define HC_HZWTMD_GT 0x00040000
-#define HC_HZWTMD_NE 0x00050000
-#define HC_HZWTMD_GE 0x00060000
-#define HC_HZWTMD_AllPass 0x00070000
-#define HC_HEBEBias_SHIFT 8
-/* HC_SubA_HZWCDL 0x0016
- */
-#define HC_HZWCDL_MASK 0x00ffffff
-/* HC_SubA_HZWCTAGnum 0x0017
- */
-#define HC_HZWCTAGnum_MASK 0x00ff0000
-#define HC_HZWCTAGnum_SHIFT 16
-#define HC_HZWCDH_MASK 0x000000ff
-#define HC_HZWCDH_SHIFT 0
-/* HC_SubA_HZCYNum 0x0018
- */
-#define HC_HZCYNum_MASK 0x00030000
-#define HC_HZCYNum_SHIFT 16
-#define HC_HZWCQWnum_MASK 0x00003fff
-#define HC_HZWCQWnum_SHIFT 0
-/* HC_SubA_HZWCFire 0x0019
- */
-#define HC_ZWCFire_MASK 0x00010000
-#define HC_HZWCQWnumLast_MASK 0x00003fff
-#define HC_HZWCQWnumLast_SHIFT 0
-
-/* Stencil Setting
- */
-#define HC_SubA_HSTREF 0x0023
-#define HC_SubA_HSTMD 0x0024
-/* HC_SubA_HSBFM
- */
-#define HC_HSBFM_MASK 0x00030000
-#define HC_HSBLoc_MASK 0x0000c000
-#define HC_HSBPit_MASK 0x00003fff
-/* HC_SubA_HSTREF
- */
-#define HC_HSTREF_MASK 0x00ff0000
-#define HC_HSTOPMSK_MASK 0x0000ff00
-#define HC_HSTBMSK_MASK 0x000000ff
-#define HC_HSTREF_SHIFT 16
-#define HC_HSTOPMSK_SHIFT 8
-/* HC_SubA_HSTMD
- */
-#define HC_HSTMD_MASK 0x00070000
-#define HC_HSTOPSF_MASK 0x000001c0
-#define HC_HSTOPSPZF_MASK 0x00000038
-#define HC_HSTOPSPZP_MASK 0x00000007
-#define HC_HSTMD_NeverPass 0x00000000
-#define HC_HSTMD_LT 0x00010000
-#define HC_HSTMD_EQ 0x00020000
-#define HC_HSTMD_LE 0x00030000
-#define HC_HSTMD_GT 0x00040000
-#define HC_HSTMD_NE 0x00050000
-#define HC_HSTMD_GE 0x00060000
-#define HC_HSTMD_AllPass 0x00070000
-#define HC_HSTOPSF_KEEP 0x00000000
-#define HC_HSTOPSF_ZERO 0x00000040
-#define HC_HSTOPSF_REPLACE 0x00000080
-#define HC_HSTOPSF_INCRSAT 0x000000c0
-#define HC_HSTOPSF_DECRSAT 0x00000100
-#define HC_HSTOPSF_INVERT 0x00000140
-#define HC_HSTOPSF_INCR 0x00000180
-#define HC_HSTOPSF_DECR 0x000001c0
-#define HC_HSTOPSPZF_KEEP 0x00000000
-#define HC_HSTOPSPZF_ZERO 0x00000008
-#define HC_HSTOPSPZF_REPLACE 0x00000010
-#define HC_HSTOPSPZF_INCRSAT 0x00000018
-#define HC_HSTOPSPZF_DECRSAT 0x00000020
-#define HC_HSTOPSPZF_INVERT 0x00000028
-#define HC_HSTOPSPZF_INCR 0x00000030
-#define HC_HSTOPSPZF_DECR 0x00000038
-#define HC_HSTOPSPZP_KEEP 0x00000000
-#define HC_HSTOPSPZP_ZERO 0x00000001
-#define HC_HSTOPSPZP_REPLACE 0x00000002
-#define HC_HSTOPSPZP_INCRSAT 0x00000003
-#define HC_HSTOPSPZP_DECRSAT 0x00000004
-#define HC_HSTOPSPZP_INVERT 0x00000005
-#define HC_HSTOPSPZP_INCR 0x00000006
-#define HC_HSTOPSPZP_DECR 0x00000007
-
-/* Alpha Setting
- */
-#define HC_SubA_HABBasL 0x0030
-#define HC_SubA_HABBasH 0x0031
-#define HC_SubA_HABFM 0x0032
-#define HC_SubA_HATMD 0x0033
-#define HC_SubA_HABLCsat 0x0034
-#define HC_SubA_HABLCop 0x0035
-#define HC_SubA_HABLAsat 0x0036
-#define HC_SubA_HABLAop 0x0037
-#define HC_SubA_HABLRCa 0x0038
-#define HC_SubA_HABLRFCa 0x0039
-#define HC_SubA_HABLRCbias 0x003a
-#define HC_SubA_HABLRCb 0x003b
-#define HC_SubA_HABLRFCb 0x003c
-#define HC_SubA_HABLRAa 0x003d
-#define HC_SubA_HABLRAb 0x003e
-/* HC_SubA_HABFM
- */
-#define HC_HABFM_MASK 0x00030000
-#define HC_HABLoc_MASK 0x0000c000
-#define HC_HABPit_MASK 0x000007ff
-/* HC_SubA_HATMD
- */
-#define HC_HATMD_MASK 0x00000700
-#define HC_HATREF_MASK 0x000000ff
-#define HC_HATMD_NeverPass 0x00000000
-#define HC_HATMD_LT 0x00000100
-#define HC_HATMD_EQ 0x00000200
-#define HC_HATMD_LE 0x00000300
-#define HC_HATMD_GT 0x00000400
-#define HC_HATMD_NE 0x00000500
-#define HC_HATMD_GE 0x00000600
-#define HC_HATMD_AllPass 0x00000700
-/* HC_SubA_HABLCsat
- */
-#define HC_HABLCsat_MASK 0x00010000
-#define HC_HABLCa_MASK 0x0000fc00
-#define HC_HABLCa_C_MASK 0x0000c000
-#define HC_HABLCa_OPC_MASK 0x00003c00
-#define HC_HABLFCa_MASK 0x000003f0
-#define HC_HABLFCa_C_MASK 0x00000300
-#define HC_HABLFCa_OPC_MASK 0x000000f0
-#define HC_HABLCbias_MASK 0x0000000f
-#define HC_HABLCbias_C_MASK 0x00000008
-#define HC_HABLCbias_OPC_MASK 0x00000007
-/*-- Define the input color.
- */
-#define HC_XC_Csrc 0x00000000
-#define HC_XC_Cdst 0x00000001
-#define HC_XC_Asrc 0x00000002
-#define HC_XC_Adst 0x00000003
-#define HC_XC_Fog 0x00000004
-#define HC_XC_HABLRC 0x00000005
-#define HC_XC_minSrcDst 0x00000006
-#define HC_XC_maxSrcDst 0x00000007
-#define HC_XC_mimAsrcInvAdst 0x00000008
-#define HC_XC_OPC 0x00000000
-#define HC_XC_InvOPC 0x00000010
-#define HC_XC_OPCp5 0x00000020
-/*-- Define the input Alpha
- */
-#define HC_XA_OPA 0x00000000
-#define HC_XA_InvOPA 0x00000010
-#define HC_XA_OPAp5 0x00000020
-#define HC_XA_0 0x00000000
-#define HC_XA_Asrc 0x00000001
-#define HC_XA_Adst 0x00000002
-#define HC_XA_Fog 0x00000003
-#define HC_XA_minAsrcFog 0x00000004
-#define HC_XA_minAsrcAdst 0x00000005
-#define HC_XA_maxAsrcFog 0x00000006
-#define HC_XA_maxAsrcAdst 0x00000007
-#define HC_XA_HABLRA 0x00000008
-#define HC_XA_minAsrcInvAdst 0x00000008
-#define HC_XA_HABLFRA 0x00000009
-/*--
- */
-#define HC_HABLCa_OPC (HC_XC_OPC << 10)
-#define HC_HABLCa_InvOPC (HC_XC_InvOPC << 10)
-#define HC_HABLCa_OPCp5 (HC_XC_OPCp5 << 10)
-#define HC_HABLCa_Csrc (HC_XC_Csrc << 10)
-#define HC_HABLCa_Cdst (HC_XC_Cdst << 10)
-#define HC_HABLCa_Asrc (HC_XC_Asrc << 10)
-#define HC_HABLCa_Adst (HC_XC_Adst << 10)
-#define HC_HABLCa_Fog (HC_XC_Fog << 10)
-#define HC_HABLCa_HABLRCa (HC_XC_HABLRC << 10)
-#define HC_HABLCa_minSrcDst (HC_XC_minSrcDst << 10)
-#define HC_HABLCa_maxSrcDst (HC_XC_maxSrcDst << 10)
-#define HC_HABLFCa_OPC (HC_XC_OPC << 4)
-#define HC_HABLFCa_InvOPC (HC_XC_InvOPC << 4)
-#define HC_HABLFCa_OPCp5 (HC_XC_OPCp5 << 4)
-#define HC_HABLFCa_Csrc (HC_XC_Csrc << 4)
-#define HC_HABLFCa_Cdst (HC_XC_Cdst << 4)
-#define HC_HABLFCa_Asrc (HC_XC_Asrc << 4)
-#define HC_HABLFCa_Adst (HC_XC_Adst << 4)
-#define HC_HABLFCa_Fog (HC_XC_Fog << 4)
-#define HC_HABLFCa_HABLRCa (HC_XC_HABLRC << 4)
-#define HC_HABLFCa_minSrcDst (HC_XC_minSrcDst << 4)
-#define HC_HABLFCa_maxSrcDst (HC_XC_maxSrcDst << 4)
-#define HC_HABLFCa_mimAsrcInvAdst (HC_XC_mimAsrcInvAdst << 4)
-#define HC_HABLCbias_HABLRCbias 0x00000000
-#define HC_HABLCbias_Asrc 0x00000001
-#define HC_HABLCbias_Adst 0x00000002
-#define HC_HABLCbias_Fog 0x00000003
-#define HC_HABLCbias_Cin 0x00000004
-/* HC_SubA_HABLCop 0x0035
- */
-#define HC_HABLdot_MASK 0x00010000
-#define HC_HABLCop_MASK 0x00004000
-#define HC_HABLCb_MASK 0x00003f00
-#define HC_HABLCb_C_MASK 0x00003000
-#define HC_HABLCb_OPC_MASK 0x00000f00
-#define HC_HABLFCb_MASK 0x000000fc
-#define HC_HABLFCb_C_MASK 0x000000c0
-#define HC_HABLFCb_OPC_MASK 0x0000003c
-#define HC_HABLCshift_MASK 0x00000003
-#define HC_HABLCb_OPC (HC_XC_OPC << 8)
-#define HC_HABLCb_InvOPC (HC_XC_InvOPC << 8)
-#define HC_HABLCb_OPCp5 (HC_XC_OPCp5 << 8)
-#define HC_HABLCb_Csrc (HC_XC_Csrc << 8)
-#define HC_HABLCb_Cdst (HC_XC_Cdst << 8)
-#define HC_HABLCb_Asrc (HC_XC_Asrc << 8)
-#define HC_HABLCb_Adst (HC_XC_Adst << 8)
-#define HC_HABLCb_Fog (HC_XC_Fog << 8)
-#define HC_HABLCb_HABLRCa (HC_XC_HABLRC << 8)
-#define HC_HABLCb_minSrcDst (HC_XC_minSrcDst << 8)
-#define HC_HABLCb_maxSrcDst (HC_XC_maxSrcDst << 8)
-#define HC_HABLFCb_OPC (HC_XC_OPC << 2)
-#define HC_HABLFCb_InvOPC (HC_XC_InvOPC << 2)
-#define HC_HABLFCb_OPCp5 (HC_XC_OPCp5 << 2)
-#define HC_HABLFCb_Csrc (HC_XC_Csrc << 2)
-#define HC_HABLFCb_Cdst (HC_XC_Cdst << 2)
-#define HC_HABLFCb_Asrc (HC_XC_Asrc << 2)
-#define HC_HABLFCb_Adst (HC_XC_Adst << 2)
-#define HC_HABLFCb_Fog (HC_XC_Fog << 2)
-#define HC_HABLFCb_HABLRCb (HC_XC_HABLRC << 2)
-#define HC_HABLFCb_minSrcDst (HC_XC_minSrcDst << 2)
-#define HC_HABLFCb_maxSrcDst (HC_XC_maxSrcDst << 2)
-#define HC_HABLFCb_mimAsrcInvAdst (HC_XC_mimAsrcInvAdst << 2)
-/* HC_SubA_HABLAsat 0x0036
- */
-#define HC_HABLAsat_MASK 0x00010000
-#define HC_HABLAa_MASK 0x0000fc00
-#define HC_HABLAa_A_MASK 0x0000c000
-#define HC_HABLAa_OPA_MASK 0x00003c00
-#define HC_HABLFAa_MASK 0x000003f0
-#define HC_HABLFAa_A_MASK 0x00000300
-#define HC_HABLFAa_OPA_MASK 0x000000f0
-#define HC_HABLAbias_MASK 0x0000000f
-#define HC_HABLAbias_A_MASK 0x00000008
-#define HC_HABLAbias_OPA_MASK 0x00000007
-#define HC_HABLAa_OPA (HC_XA_OPA << 10)
-#define HC_HABLAa_InvOPA (HC_XA_InvOPA << 10)
-#define HC_HABLAa_OPAp5 (HC_XA_OPAp5 << 10)
-#define HC_HABLAa_0 (HC_XA_0 << 10)
-#define HC_HABLAa_Asrc (HC_XA_Asrc << 10)
-#define HC_HABLAa_Adst (HC_XA_Adst << 10)
-#define HC_HABLAa_Fog (HC_XA_Fog << 10)
-#define HC_HABLAa_minAsrcFog (HC_XA_minAsrcFog << 10)
-#define HC_HABLAa_minAsrcAdst (HC_XA_minAsrcAdst << 10)
-#define HC_HABLAa_maxAsrcFog (HC_XA_maxAsrcFog << 10)
-#define HC_HABLAa_maxAsrcAdst (HC_XA_maxAsrcAdst << 10)
-#define HC_HABLAa_HABLRA (HC_XA_HABLRA << 10)
-#define HC_HABLFAa_OPA (HC_XA_OPA << 4)
-#define HC_HABLFAa_InvOPA (HC_XA_InvOPA << 4)
-#define HC_HABLFAa_OPAp5 (HC_XA_OPAp5 << 4)
-#define HC_HABLFAa_0 (HC_XA_0 << 4)
-#define HC_HABLFAa_Asrc (HC_XA_Asrc << 4)
-#define HC_HABLFAa_Adst (HC_XA_Adst << 4)
-#define HC_HABLFAa_Fog (HC_XA_Fog << 4)
-#define HC_HABLFAa_minAsrcFog (HC_XA_minAsrcFog << 4)
-#define HC_HABLFAa_minAsrcAdst (HC_XA_minAsrcAdst << 4)
-#define HC_HABLFAa_maxAsrcFog (HC_XA_maxAsrcFog << 4)
-#define HC_HABLFAa_maxAsrcAdst (HC_XA_maxAsrcAdst << 4)
-#define HC_HABLFAa_minAsrcInvAdst (HC_XA_minAsrcInvAdst << 4)
-#define HC_HABLFAa_HABLFRA (HC_XA_HABLFRA << 4)
-#define HC_HABLAbias_HABLRAbias 0x00000000
-#define HC_HABLAbias_Asrc 0x00000001
-#define HC_HABLAbias_Adst 0x00000002
-#define HC_HABLAbias_Fog 0x00000003
-#define HC_HABLAbias_Aaa 0x00000004
-/* HC_SubA_HABLAop 0x0037
- */
-#define HC_HABLAop_MASK 0x00004000
-#define HC_HABLAb_MASK 0x00003f00
-#define HC_HABLAb_OPA_MASK 0x00000f00
-#define HC_HABLFAb_MASK 0x000000fc
-#define HC_HABLFAb_OPA_MASK 0x0000003c
-#define HC_HABLAshift_MASK 0x00000003
-#define HC_HABLAb_OPA (HC_XA_OPA << 8)
-#define HC_HABLAb_InvOPA (HC_XA_InvOPA << 8)
-#define HC_HABLAb_OPAp5 (HC_XA_OPAp5 << 8)
-#define HC_HABLAb_0 (HC_XA_0 << 8)
-#define HC_HABLAb_Asrc (HC_XA_Asrc << 8)
-#define HC_HABLAb_Adst (HC_XA_Adst << 8)
-#define HC_HABLAb_Fog (HC_XA_Fog << 8)
-#define HC_HABLAb_minAsrcFog (HC_XA_minAsrcFog << 8)
-#define HC_HABLAb_minAsrcAdst (HC_XA_minAsrcAdst << 8)
-#define HC_HABLAb_maxAsrcFog (HC_XA_maxAsrcFog << 8)
-#define HC_HABLAb_maxAsrcAdst (HC_XA_maxAsrcAdst << 8)
-#define HC_HABLAb_HABLRA (HC_XA_HABLRA << 8)
-#define HC_HABLFAb_OPA (HC_XA_OPA << 2)
-#define HC_HABLFAb_InvOPA (HC_XA_InvOPA << 2)
-#define HC_HABLFAb_OPAp5 (HC_XA_OPAp5 << 2)
-#define HC_HABLFAb_0 (HC_XA_0 << 2)
-#define HC_HABLFAb_Asrc (HC_XA_Asrc << 2)
-#define HC_HABLFAb_Adst (HC_XA_Adst << 2)
-#define HC_HABLFAb_Fog (HC_XA_Fog << 2)
-#define HC_HABLFAb_minAsrcFog (HC_XA_minAsrcFog << 2)
-#define HC_HABLFAb_minAsrcAdst (HC_XA_minAsrcAdst << 2)
-#define HC_HABLFAb_maxAsrcFog (HC_XA_maxAsrcFog << 2)
-#define HC_HABLFAb_maxAsrcAdst (HC_XA_maxAsrcAdst << 2)
-#define HC_HABLFAb_minAsrcInvAdst (HC_XA_minAsrcInvAdst << 2)
-#define HC_HABLFAb_HABLFRA (HC_XA_HABLFRA << 2)
-/* HC_SubA_HABLRAa 0x003d
- */
-#define HC_HABLRAa_MASK 0x00ff0000
-#define HC_HABLRFAa_MASK 0x0000ff00
-#define HC_HABLRAbias_MASK 0x000000ff
-#define HC_HABLRAa_SHIFT 16
-#define HC_HABLRFAa_SHIFT 8
-/* HC_SubA_HABLRAb 0x003e
- */
-#define HC_HABLRAb_MASK 0x0000ff00
-#define HC_HABLRFAb_MASK 0x000000ff
-#define HC_HABLRAb_SHIFT 8
-
-/* Destination Setting
- */
-#define HC_SubA_HDBBasL 0x0040
-#define HC_SubA_HDBBasH 0x0041
-#define HC_SubA_HDBFM 0x0042
-#define HC_SubA_HFBBMSKL 0x0043
-#define HC_SubA_HROP 0x0044
-/* HC_SubA_HDBFM 0x0042
- */
-#define HC_HDBFM_MASK 0x001f0000
-#define HC_HDBLoc_MASK 0x0000c000
-#define HC_HDBPit_MASK 0x00003fff
-#define HC_HDBFM_RGB555 0x00000000
-#define HC_HDBFM_RGB565 0x00010000
-#define HC_HDBFM_ARGB4444 0x00020000
-#define HC_HDBFM_ARGB1555 0x00030000
-#define HC_HDBFM_BGR555 0x00040000
-#define HC_HDBFM_BGR565 0x00050000
-#define HC_HDBFM_ABGR4444 0x00060000
-#define HC_HDBFM_ABGR1555 0x00070000
-#define HC_HDBFM_ARGB0888 0x00080000
-#define HC_HDBFM_ARGB8888 0x00090000
-#define HC_HDBFM_ABGR0888 0x000a0000
-#define HC_HDBFM_ABGR8888 0x000b0000
-#define HC_HDBLoc_Local 0x00000000
-#define HC_HDBLoc_Sys 0x00004000
-/* HC_SubA_HROP 0x0044
- */
-#define HC_HROP_MASK 0x00000f00
-#define HC_HFBBMSKH_MASK 0x000000ff
-#define HC_HROP_BLACK 0x00000000
-#define HC_HROP_DPon 0x00000100
-#define HC_HROP_DPna 0x00000200
-#define HC_HROP_Pn 0x00000300
-#define HC_HROP_PDna 0x00000400
-#define HC_HROP_Dn 0x00000500
-#define HC_HROP_DPx 0x00000600
-#define HC_HROP_DPan 0x00000700
-#define HC_HROP_DPa 0x00000800
-#define HC_HROP_DPxn 0x00000900
-#define HC_HROP_D 0x00000a00
-#define HC_HROP_DPno 0x00000b00
-#define HC_HROP_P 0x00000c00
-#define HC_HROP_PDno 0x00000d00
-#define HC_HROP_DPo 0x00000e00
-#define HC_HROP_WHITE 0x00000f00
-
-/* Fog Setting
- */
-#define HC_SubA_HFogLF 0x0050
-#define HC_SubA_HFogCL 0x0051
-#define HC_SubA_HFogCH 0x0052
-#define HC_SubA_HFogStL 0x0053
-#define HC_SubA_HFogStH 0x0054
-#define HC_SubA_HFogOOdMF 0x0055
-#define HC_SubA_HFogOOdEF 0x0056
-#define HC_SubA_HFogEndL 0x0057
-#define HC_SubA_HFogDenst 0x0058
-/* HC_SubA_FogLF 0x0050
- */
-#define HC_FogLF_MASK 0x00000010
-#define HC_FogEq_MASK 0x00000008
-#define HC_FogMD_MASK 0x00000007
-#define HC_FogMD_LocalFog 0x00000000
-#define HC_FogMD_LinearFog 0x00000002
-#define HC_FogMD_ExponentialFog 0x00000004
-#define HC_FogMD_Exponential2Fog 0x00000005
-/* #define HC_FogMD_FogTable 0x00000003 */
-
-/* HC_SubA_HFogDenst 0x0058
- */
-#define HC_FogDenst_MASK 0x001fff00
-#define HC_FogEndL_MASK 0x000000ff
-
-/* Texture subtype definitions
- */
-#define HC_SubType_Samp0 0x00000020
-#define HC_SubType_Samp1 0x00000021
-
-
-/* Texture subtype definitions
- */
-#define HC_SubType_Tex0 0x00000000
-#define HC_SubType_Tex1 0x00000001
-#define HC_SubType_TexGeneral 0x000000fe
-
-/* Attribute of texture n
- */
-#define HC_SubA_HTXnL0BasL 0x0000
-#define HC_SubA_HTXnL1BasL 0x0001
-#define HC_SubA_HTXnL2BasL 0x0002
-#define HC_SubA_HTXnL3BasL 0x0003
-#define HC_SubA_HTXnL4BasL 0x0004
-#define HC_SubA_HTXnL5BasL 0x0005
-#define HC_SubA_HTXnL6BasL 0x0006
-#define HC_SubA_HTXnL7BasL 0x0007
-#define HC_SubA_HTXnL8BasL 0x0008
-#define HC_SubA_HTXnL9BasL 0x0009
-#define HC_SubA_HTXnLaBasL 0x000a
-#define HC_SubA_HTXnLbBasL 0x000b
-#define HC_SubA_HTXnLcBasL 0x000c
-#define HC_SubA_HTXnLdBasL 0x000d
-#define HC_SubA_HTXnLeBasL 0x000e
-#define HC_SubA_HTXnLfBasL 0x000f
-#define HC_SubA_HTXnL10BasL 0x0010
-#define HC_SubA_HTXnL11BasL 0x0011
-#define HC_SubA_HTXnL012BasH 0x0020
-#define HC_SubA_HTXnL345BasH 0x0021
-#define HC_SubA_HTXnL678BasH 0x0022
-#define HC_SubA_HTXnL9abBasH 0x0023
-#define HC_SubA_HTXnLcdeBasH 0x0024
-#define HC_SubA_HTXnLf1011BasH 0x0025
-#define HC_SubA_HTXnL0Pit 0x002b
-#define HC_SubA_HTXnL1Pit 0x002c
-#define HC_SubA_HTXnL2Pit 0x002d
-#define HC_SubA_HTXnL3Pit 0x002e
-#define HC_SubA_HTXnL4Pit 0x002f
-#define HC_SubA_HTXnL5Pit 0x0030
-#define HC_SubA_HTXnL6Pit 0x0031
-#define HC_SubA_HTXnL7Pit 0x0032
-#define HC_SubA_HTXnL8Pit 0x0033
-#define HC_SubA_HTXnL9Pit 0x0034
-#define HC_SubA_HTXnLaPit 0x0035
-#define HC_SubA_HTXnLbPit 0x0036
-#define HC_SubA_HTXnLcPit 0x0037
-#define HC_SubA_HTXnLdPit 0x0038
-#define HC_SubA_HTXnLePit 0x0039
-#define HC_SubA_HTXnLfPit 0x003a
-#define HC_SubA_HTXnL10Pit 0x003b
-#define HC_SubA_HTXnL11Pit 0x003c
-#define HC_SubA_HTXnL0_5WE 0x004b
-#define HC_SubA_HTXnL6_bWE 0x004c
-#define HC_SubA_HTXnLc_11WE 0x004d
-#define HC_SubA_HTXnL0_5HE 0x0051
-#define HC_SubA_HTXnL6_bHE 0x0052
-#define HC_SubA_HTXnLc_11HE 0x0053
-#define HC_SubA_HTXnL0OS 0x0077
-#define HC_SubA_HTXnTB 0x0078
-#define HC_SubA_HTXnMPMD 0x0079
-#define HC_SubA_HTXnCLODu 0x007a
-#define HC_SubA_HTXnFM 0x007b
-#define HC_SubA_HTXnTRCH 0x007c
-#define HC_SubA_HTXnTRCL 0x007d
-#define HC_SubA_HTXnTBC 0x007e
-#define HC_SubA_HTXnTRAH 0x007f
-#define HC_SubA_HTXnTBLCsat 0x0080
-#define HC_SubA_HTXnTBLCop 0x0081
-#define HC_SubA_HTXnTBLMPfog 0x0082
-#define HC_SubA_HTXnTBLAsat 0x0083
-#define HC_SubA_HTXnTBLRCa 0x0085
-#define HC_SubA_HTXnTBLRCb 0x0086
-#define HC_SubA_HTXnTBLRCc 0x0087
-#define HC_SubA_HTXnTBLRCbias 0x0088
-#define HC_SubA_HTXnTBLRAa 0x0089
-#define HC_SubA_HTXnTBLRFog 0x008a
-#define HC_SubA_HTXnBumpM00 0x0090
-#define HC_SubA_HTXnBumpM01 0x0091
-#define HC_SubA_HTXnBumpM10 0x0092
-#define HC_SubA_HTXnBumpM11 0x0093
-#define HC_SubA_HTXnLScale 0x0094
-
-#define HC_SubA_HTXSMD 0x0000
-#define HC_SubA_HTXYUV2RGB1 0x0001
-#define HC_SubA_HTXYUV2RGB2 0x0002
-#define HC_SubA_HTXYUV2RGB3 0x0003
-#define HTXYUV2RGB4BT601 (1<<23)
-#define HTXYUV2RGB4BT709 (1<<22)
-/* HC_SubA_HTXnL012BasH 0x0020
- */
-#define HC_HTXnL0BasH_MASK 0x000000ff
-#define HC_HTXnL1BasH_MASK 0x0000ff00
-#define HC_HTXnL2BasH_MASK 0x00ff0000
-#define HC_HTXnL1BasH_SHIFT 8
-#define HC_HTXnL2BasH_SHIFT 16
-/* HC_SubA_HTXnL345BasH 0x0021
- */
-#define HC_HTXnL3BasH_MASK 0x000000ff
-#define HC_HTXnL4BasH_MASK 0x0000ff00
-#define HC_HTXnL5BasH_MASK 0x00ff0000
-#define HC_HTXnL4BasH_SHIFT 8
-#define HC_HTXnL5BasH_SHIFT 16
-/* HC_SubA_HTXnL678BasH 0x0022
- */
-#define HC_HTXnL6BasH_MASK 0x000000ff
-#define HC_HTXnL7BasH_MASK 0x0000ff00
-#define HC_HTXnL8BasH_MASK 0x00ff0000
-#define HC_HTXnL7BasH_SHIFT 8
-#define HC_HTXnL8BasH_SHIFT 16
-/* HC_SubA_HTXnL9abBasH 0x0023
- */
-#define HC_HTXnL9BasH_MASK 0x000000ff
-#define HC_HTXnLaBasH_MASK 0x0000ff00
-#define HC_HTXnLbBasH_MASK 0x00ff0000
-#define HC_HTXnLaBasH_SHIFT 8
-#define HC_HTXnLbBasH_SHIFT 16
-/* HC_SubA_HTXnLcdeBasH 0x0024
- */
-#define HC_HTXnLcBasH_MASK 0x000000ff
-#define HC_HTXnLdBasH_MASK 0x0000ff00
-#define HC_HTXnLeBasH_MASK 0x00ff0000
-#define HC_HTXnLdBasH_SHIFT 8
-#define HC_HTXnLeBasH_SHIFT 16
-/* HC_SubA_HTXnLcdeBasH 0x0025
- */
-#define HC_HTXnLfBasH_MASK 0x000000ff
-#define HC_HTXnL10BasH_MASK 0x0000ff00
-#define HC_HTXnL11BasH_MASK 0x00ff0000
-#define HC_HTXnL10BasH_SHIFT 8
-#define HC_HTXnL11BasH_SHIFT 16
-/* HC_SubA_HTXnL0Pit 0x002b
- */
-#define HC_HTXnLnPit_MASK 0x00003fff
-#define HC_HTXnEnPit_MASK 0x00080000
-#define HC_HTXnLnPitE_MASK 0x00f00000
-#define HC_HTXnLnPitE_SHIFT 20
-/* HC_SubA_HTXnL0_5WE 0x004b
- */
-#define HC_HTXnL0WE_MASK 0x0000000f
-#define HC_HTXnL1WE_MASK 0x000000f0
-#define HC_HTXnL2WE_MASK 0x00000f00
-#define HC_HTXnL3WE_MASK 0x0000f000
-#define HC_HTXnL4WE_MASK 0x000f0000
-#define HC_HTXnL5WE_MASK 0x00f00000
-#define HC_HTXnL1WE_SHIFT 4
-#define HC_HTXnL2WE_SHIFT 8
-#define HC_HTXnL3WE_SHIFT 12
-#define HC_HTXnL4WE_SHIFT 16
-#define HC_HTXnL5WE_SHIFT 20
-/* HC_SubA_HTXnL6_bWE 0x004c
- */
-#define HC_HTXnL6WE_MASK 0x0000000f
-#define HC_HTXnL7WE_MASK 0x000000f0
-#define HC_HTXnL8WE_MASK 0x00000f00
-#define HC_HTXnL9WE_MASK 0x0000f000
-#define HC_HTXnLaWE_MASK 0x000f0000
-#define HC_HTXnLbWE_MASK 0x00f00000
-#define HC_HTXnL7WE_SHIFT 4
-#define HC_HTXnL8WE_SHIFT 8
-#define HC_HTXnL9WE_SHIFT 12
-#define HC_HTXnLaWE_SHIFT 16
-#define HC_HTXnLbWE_SHIFT 20
-/* HC_SubA_HTXnLc_11WE 0x004d
- */
-#define HC_HTXnLcWE_MASK 0x0000000f
-#define HC_HTXnLdWE_MASK 0x000000f0
-#define HC_HTXnLeWE_MASK 0x00000f00
-#define HC_HTXnLfWE_MASK 0x0000f000
-#define HC_HTXnL10WE_MASK 0x000f0000
-#define HC_HTXnL11WE_MASK 0x00f00000
-#define HC_HTXnLdWE_SHIFT 4
-#define HC_HTXnLeWE_SHIFT 8
-#define HC_HTXnLfWE_SHIFT 12
-#define HC_HTXnL10WE_SHIFT 16
-#define HC_HTXnL11WE_SHIFT 20
-/* HC_SubA_HTXnL0_5HE 0x0051
- */
-#define HC_HTXnL0HE_MASK 0x0000000f
-#define HC_HTXnL1HE_MASK 0x000000f0
-#define HC_HTXnL2HE_MASK 0x00000f00
-#define HC_HTXnL3HE_MASK 0x0000f000
-#define HC_HTXnL4HE_MASK 0x000f0000
-#define HC_HTXnL5HE_MASK 0x00f00000
-#define HC_HTXnL1HE_SHIFT 4
-#define HC_HTXnL2HE_SHIFT 8
-#define HC_HTXnL3HE_SHIFT 12
-#define HC_HTXnL4HE_SHIFT 16
-#define HC_HTXnL5HE_SHIFT 20
-/* HC_SubA_HTXnL6_bHE 0x0052
- */
-#define HC_HTXnL6HE_MASK 0x0000000f
-#define HC_HTXnL7HE_MASK 0x000000f0
-#define HC_HTXnL8HE_MASK 0x00000f00
-#define HC_HTXnL9HE_MASK 0x0000f000
-#define HC_HTXnLaHE_MASK 0x000f0000
-#define HC_HTXnLbHE_MASK 0x00f00000
-#define HC_HTXnL7HE_SHIFT 4
-#define HC_HTXnL8HE_SHIFT 8
-#define HC_HTXnL9HE_SHIFT 12
-#define HC_HTXnLaHE_SHIFT 16
-#define HC_HTXnLbHE_SHIFT 20
-/* HC_SubA_HTXnLc_11HE 0x0053
- */
-#define HC_HTXnLcHE_MASK 0x0000000f
-#define HC_HTXnLdHE_MASK 0x000000f0
-#define HC_HTXnLeHE_MASK 0x00000f00
-#define HC_HTXnLfHE_MASK 0x0000f000
-#define HC_HTXnL10HE_MASK 0x000f0000
-#define HC_HTXnL11HE_MASK 0x00f00000
-#define HC_HTXnLdHE_SHIFT 4
-#define HC_HTXnLeHE_SHIFT 8
-#define HC_HTXnLfHE_SHIFT 12
-#define HC_HTXnL10HE_SHIFT 16
-#define HC_HTXnL11HE_SHIFT 20
-/* HC_SubA_HTXnL0OS 0x0077
- */
-#define HC_HTXnL0OS_MASK 0x003ff000
-#define HC_HTXnLVmax_MASK 0x00000fc0
-#define HC_HTXnLVmin_MASK 0x0000003f
-#define HC_HTXnL0OS_SHIFT 12
-#define HC_HTXnLVmax_SHIFT 6
-/* HC_SubA_HTXnTB 0x0078
- */
-#define HC_HTXnTB_MASK 0x00f00000
-#define HC_HTXnFLSe_MASK 0x0000e000
-#define HC_HTXnFLSs_MASK 0x00001c00
-#define HC_HTXnFLTe_MASK 0x00000380
-#define HC_HTXnFLTs_MASK 0x00000070
-#define HC_HTXnFLDs_MASK 0x0000000f
-#define HC_HTXnTB_NoTB 0x00000000
-#define HC_HTXnTB_TBC_S 0x00100000
-#define HC_HTXnTB_TBC_T 0x00200000
-#define HC_HTXnTB_TB_S 0x00400000
-#define HC_HTXnTB_TB_T 0x00800000
-#define HC_HTXnFLSe_Nearest 0x00000000
-#define HC_HTXnFLSe_Linear 0x00002000
-#define HC_HTXnFLSe_NonLinear 0x00004000
-#define HC_HTXnFLSe_Sharp 0x00008000
-#define HC_HTXnFLSe_Flat_Gaussian_Cubic 0x0000c000
-#define HC_HTXnFLSs_Nearest 0x00000000
-#define HC_HTXnFLSs_Linear 0x00000400
-#define HC_HTXnFLSs_NonLinear 0x00000800
-#define HC_HTXnFLSs_Flat_Gaussian_Cubic 0x00001800
-#define HC_HTXnFLTe_Nearest 0x00000000
-#define HC_HTXnFLTe_Linear 0x00000080
-#define HC_HTXnFLTe_NonLinear 0x00000100
-#define HC_HTXnFLTe_Sharp 0x00000180
-#define HC_HTXnFLTe_Flat_Gaussian_Cubic 0x00000300
-#define HC_HTXnFLTs_Nearest 0x00000000
-#define HC_HTXnFLTs_Linear 0x00000010
-#define HC_HTXnFLTs_NonLinear 0x00000020
-#define HC_HTXnFLTs_Flat_Gaussian_Cubic 0x00000060
-#define HC_HTXnFLDs_Tex0 0x00000000
-#define HC_HTXnFLDs_Nearest 0x00000001
-#define HC_HTXnFLDs_Linear 0x00000002
-#define HC_HTXnFLDs_NonLinear 0x00000003
-#define HC_HTXnFLDs_Dither 0x00000004
-#define HC_HTXnFLDs_ConstLOD 0x00000005
-#define HC_HTXnFLDs_Ani 0x00000006
-#define HC_HTXnFLDs_AniDither 0x00000007
-/* HC_SubA_HTXnMPMD 0x0079
- */
-#define HC_HTXnMPMD_SMASK 0x00070000
-#define HC_HTXnMPMD_TMASK 0x00380000
-#define HC_HTXnLODDTf_MASK 0x00000007
-#define HC_HTXnXY2ST_MASK 0x00000008
-#define HC_HTXnMPMD_Tsingle 0x00000000
-#define HC_HTXnMPMD_Tclamp 0x00080000
-#define HC_HTXnMPMD_Trepeat 0x00100000
-#define HC_HTXnMPMD_Tmirror 0x00180000
-#define HC_HTXnMPMD_Twrap 0x00200000
-#define HC_HTXnMPMD_Ssingle 0x00000000
-#define HC_HTXnMPMD_Sclamp 0x00010000
-#define HC_HTXnMPMD_Srepeat 0x00020000
-#define HC_HTXnMPMD_Smirror 0x00030000
-#define HC_HTXnMPMD_Swrap 0x00040000
-/* HC_SubA_HTXnCLODu 0x007a
- */
-#define HC_HTXnCLODu_MASK 0x000ffc00
-#define HC_HTXnCLODd_MASK 0x000003ff
-#define HC_HTXnCLODu_SHIFT 10
-/* HC_SubA_HTXnFM 0x007b
- */
-#define HC_HTXnFM_MASK 0x00ff0000
-#define HC_HTXnLoc_MASK 0x00000003
-#define HC_HTXnFM_INDEX 0x00000000
-#define HC_HTXnFM_Intensity 0x00080000
-#define HC_HTXnFM_Lum 0x00100000
-#define HC_HTXnFM_Alpha 0x00180000
-#define HC_HTXnFM_DX 0x00280000
-#define HC_HTXnFM_YUV 0x00300000
-#define HC_HTXnFM_ARGB16 0x00880000
-#define HC_HTXnFM_ARGB32 0x00980000
-#define HC_HTXnFM_ABGR16 0x00a80000
-#define HC_HTXnFM_ABGR32 0x00b80000
-#define HC_HTXnFM_RGBA16 0x00c80000
-#define HC_HTXnFM_RGBA32 0x00d80000
-#define HC_HTXnFM_BGRA16 0x00e80000
-#define HC_HTXnFM_BGRA32 0x00f80000
-#define HC_HTXnFM_BUMPMAP 0x00380000
-#define HC_HTXnFM_Index1 (HC_HTXnFM_INDEX | 0x00000000)
-#define HC_HTXnFM_Index2 (HC_HTXnFM_INDEX | 0x00010000)
-#define HC_HTXnFM_Index4 (HC_HTXnFM_INDEX | 0x00020000)
-#define HC_HTXnFM_Index8 (HC_HTXnFM_INDEX | 0x00030000)
-#define HC_HTXnFM_T1 (HC_HTXnFM_Intensity | 0x00000000)
-#define HC_HTXnFM_T2 (HC_HTXnFM_Intensity | 0x00010000)
-#define HC_HTXnFM_T4 (HC_HTXnFM_Intensity | 0x00020000)
-#define HC_HTXnFM_T8 (HC_HTXnFM_Intensity | 0x00030000)
-#define HC_HTXnFM_L1 (HC_HTXnFM_Lum | 0x00000000)
-#define HC_HTXnFM_L2 (HC_HTXnFM_Lum | 0x00010000)
-#define HC_HTXnFM_L4 (HC_HTXnFM_Lum | 0x00020000)
-#define HC_HTXnFM_L8 (HC_HTXnFM_Lum | 0x00030000)
-#define HC_HTXnFM_AL44 (HC_HTXnFM_Lum | 0x00040000)
-#define HC_HTXnFM_AL88 (HC_HTXnFM_Lum | 0x00050000)
-#define HC_HTXnFM_A1 (HC_HTXnFM_Alpha | 0x00000000)
-#define HC_HTXnFM_A2 (HC_HTXnFM_Alpha | 0x00010000)
-#define HC_HTXnFM_A4 (HC_HTXnFM_Alpha | 0x00020000)
-#define HC_HTXnFM_A8 (HC_HTXnFM_Alpha | 0x00030000)
-#define HC_HTXnFM_DX1 (HC_HTXnFM_DX | 0x00010000)
-#define HC_HTXnFM_DX23 (HC_HTXnFM_DX | 0x00020000)
-#define HC_HTXnFM_DX45 (HC_HTXnFM_DX | 0x00030000)
-/* YUV package mode */
-#define HC_HTXnFM_YUY2 (HC_HTXnFM_YUV | 0x00000000)
-/* YUV planner mode */
-#define HC_HTXnFM_YV12 (HC_HTXnFM_YUV | 0x00040000)
-/* YUV planner mode */
-#define HC_HTXnFM_IYUV (HC_HTXnFM_YUV | 0x00040000)
-#define HC_HTXnFM_RGB555 (HC_HTXnFM_ARGB16 | 0x00000000)
-#define HC_HTXnFM_RGB565 (HC_HTXnFM_ARGB16 | 0x00010000)
-#define HC_HTXnFM_ARGB1555 (HC_HTXnFM_ARGB16 | 0x00020000)
-#define HC_HTXnFM_ARGB4444 (HC_HTXnFM_ARGB16 | 0x00030000)
-#define HC_HTXnFM_ARGB0888 (HC_HTXnFM_ARGB32 | 0x00000000)
-#define HC_HTXnFM_ARGB8888 (HC_HTXnFM_ARGB32 | 0x00010000)
-#define HC_HTXnFM_BGR555 (HC_HTXnFM_ABGR16 | 0x00000000)
-#define HC_HTXnFM_BGR565 (HC_HTXnFM_ABGR16 | 0x00010000)
-#define HC_HTXnFM_ABGR1555 (HC_HTXnFM_ABGR16 | 0x00020000)
-#define HC_HTXnFM_ABGR4444 (HC_HTXnFM_ABGR16 | 0x00030000)
-#define HC_HTXnFM_ABGR0888 (HC_HTXnFM_ABGR32 | 0x00000000)
-#define HC_HTXnFM_ABGR8888 (HC_HTXnFM_ABGR32 | 0x00010000)
-#define HC_HTXnFM_RGBA5550 (HC_HTXnFM_RGBA16 | 0x00000000)
-#define HC_HTXnFM_RGBA5551 (HC_HTXnFM_RGBA16 | 0x00020000)
-#define HC_HTXnFM_RGBA4444 (HC_HTXnFM_RGBA16 | 0x00030000)
-#define HC_HTXnFM_RGBA8880 (HC_HTXnFM_RGBA32 | 0x00000000)
-#define HC_HTXnFM_RGBA8888 (HC_HTXnFM_RGBA32 | 0x00010000)
-#define HC_HTXnFM_BGRA5550 (HC_HTXnFM_BGRA16 | 0x00000000)
-#define HC_HTXnFM_BGRA5551 (HC_HTXnFM_BGRA16 | 0x00020000)
-#define HC_HTXnFM_BGRA4444 (HC_HTXnFM_BGRA16 | 0x00030000)
-#define HC_HTXnFM_BGRA8880 (HC_HTXnFM_BGRA32 | 0x00000000)
-#define HC_HTXnFM_BGRA8888 (HC_HTXnFM_BGRA32 | 0x00010000)
-#define HC_HTXnFM_VU88 (HC_HTXnFM_BUMPMAP | 0x00000000)
-#define HC_HTXnFM_LVU655 (HC_HTXnFM_BUMPMAP | 0x00010000)
-#define HC_HTXnFM_LVU888 (HC_HTXnFM_BUMPMAP | 0x00020000)
-#define HC_HTXnLoc_Local 0x00000000
-#define HC_HTXnLoc_Sys 0x00000002
-#define HC_HTXnLoc_AGP 0x00000003
-
-/* Video Texture */
-#define HC_HTXnYUV2RGBMode_RGB 0x00000000
-#define HC_HTXnYUV2RGBMode_SDTV 0x00000001
-#define HC_HTXnYUV2RGBMode_HDTV 0x00000002
-#define HC_HTXnYUV2RGBMode_TABLE 0x00000003
-
-/* HC_SubA_HTXnTRAH 0x007f
- */
-#define HC_HTXnTRAH_MASK 0x00ff0000
-#define HC_HTXnTRAL_MASK 0x0000ff00
-#define HC_HTXnTBA_MASK 0x000000ff
-#define HC_HTXnTRAH_SHIFT 16
-#define HC_HTXnTRAL_SHIFT 8
-/* HC_SubA_HTXnTBLCsat 0x0080
- *-- Define the input texture.
- */
-#define HC_XTC_TOPC 0x00000000
-#define HC_XTC_InvTOPC 0x00000010
-#define HC_XTC_TOPCp5 0x00000020
-#define HC_XTC_Cbias 0x00000000
-#define HC_XTC_InvCbias 0x00000010
-#define HC_XTC_0 0x00000000
-#define HC_XTC_Dif 0x00000001
-#define HC_XTC_Spec 0x00000002
-#define HC_XTC_Tex 0x00000003
-#define HC_XTC_Cur 0x00000004
-#define HC_XTC_Adif 0x00000005
-#define HC_XTC_Fog 0x00000006
-#define HC_XTC_Atex 0x00000007
-#define HC_XTC_Acur 0x00000008
-#define HC_XTC_HTXnTBLRC 0x00000009
-#define HC_XTC_Ctexnext 0x0000000a
-/*--
- */
-#define HC_HTXnTBLCsat_MASK 0x00800000
-#define HC_HTXnTBLCa_MASK 0x000fc000
-#define HC_HTXnTBLCb_MASK 0x00001f80
-#define HC_HTXnTBLCc_MASK 0x0000003f
-#define HC_HTXnTBLCa_TOPC (HC_XTC_TOPC << 14)
-#define HC_HTXnTBLCa_InvTOPC (HC_XTC_InvTOPC << 14)
-#define HC_HTXnTBLCa_TOPCp5 (HC_XTC_TOPCp5 << 14)
-#define HC_HTXnTBLCa_0 (HC_XTC_0 << 14)
-#define HC_HTXnTBLCa_Dif (HC_XTC_Dif << 14)
-#define HC_HTXnTBLCa_Spec (HC_XTC_Spec << 14)
-#define HC_HTXnTBLCa_Tex (HC_XTC_Tex << 14)
-#define HC_HTXnTBLCa_Cur (HC_XTC_Cur << 14)
-#define HC_HTXnTBLCa_Adif (HC_XTC_Adif << 14)
-#define HC_HTXnTBLCa_Fog (HC_XTC_Fog << 14)
-#define HC_HTXnTBLCa_Atex (HC_XTC_Atex << 14)
-#define HC_HTXnTBLCa_Acur (HC_XTC_Acur << 14)
-#define HC_HTXnTBLCa_HTXnTBLRC (HC_XTC_HTXnTBLRC << 14)
-#define HC_HTXnTBLCa_Ctexnext (HC_XTC_Ctexnext << 14)
-#define HC_HTXnTBLCb_TOPC (HC_XTC_TOPC << 7)
-#define HC_HTXnTBLCb_InvTOPC (HC_XTC_InvTOPC << 7)
-#define HC_HTXnTBLCb_TOPCp5 (HC_XTC_TOPCp5 << 7)
-#define HC_HTXnTBLCb_0 (HC_XTC_0 << 7)
-#define HC_HTXnTBLCb_Dif (HC_XTC_Dif << 7)
-#define HC_HTXnTBLCb_Spec (HC_XTC_Spec << 7)
-#define HC_HTXnTBLCb_Tex (HC_XTC_Tex << 7)
-#define HC_HTXnTBLCb_Cur (HC_XTC_Cur << 7)
-#define HC_HTXnTBLCb_Adif (HC_XTC_Adif << 7)
-#define HC_HTXnTBLCb_Fog (HC_XTC_Fog << 7)
-#define HC_HTXnTBLCb_Atex (HC_XTC_Atex << 7)
-#define HC_HTXnTBLCb_Acur (HC_XTC_Acur << 7)
-#define HC_HTXnTBLCb_HTXnTBLRC (HC_XTC_HTXnTBLRC << 7)
-#define HC_HTXnTBLCb_Ctexnext (HC_XTC_Ctexnext << 7)
-#define HC_HTXnTBLCc_TOPC (HC_XTC_TOPC << 0)
-#define HC_HTXnTBLCc_InvTOPC (HC_XTC_InvTOPC << 0)
-#define HC_HTXnTBLCc_TOPCp5 (HC_XTC_TOPCp5 << 0)
-#define HC_HTXnTBLCc_0 (HC_XTC_0 << 0)
-#define HC_HTXnTBLCc_Dif (HC_XTC_Dif << 0)
-#define HC_HTXnTBLCc_Spec (HC_XTC_Spec << 0)
-#define HC_HTXnTBLCc_Tex (HC_XTC_Tex << 0)
-#define HC_HTXnTBLCc_Cur (HC_XTC_Cur << 0)
-#define HC_HTXnTBLCc_Adif (HC_XTC_Adif << 0)
-#define HC_HTXnTBLCc_Fog (HC_XTC_Fog << 0)
-#define HC_HTXnTBLCc_Atex (HC_XTC_Atex << 0)
-#define HC_HTXnTBLCc_Acur (HC_XTC_Acur << 0)
-#define HC_HTXnTBLCc_HTXnTBLRC (HC_XTC_HTXnTBLRC << 0)
-#define HC_HTXnTBLCc_Ctexnext (HC_XTC_Ctexnext << 0)
-/* HC_SubA_HTXnTBLCop 0x0081
- */
-#define HC_HTXnTBLdot_MASK 0x00c00000
-#define HC_HTXnTBLCop_MASK 0x00380000
-#define HC_HTXnTBLCbias_MASK 0x0007c000
-#define HC_HTXnTBLCshift_MASK 0x00001800
-#define HC_HTXnTBLAop_MASK 0x00000380
-#define HC_HTXnTBLAbias_MASK 0x00000078
-#define HC_HTXnTBLAshift_MASK 0x00000003
-#define HC_HTXnTBLCop_Add 0x00000000
-#define HC_HTXnTBLCop_Sub 0x00080000
-#define HC_HTXnTBLCop_Min 0x00100000
-#define HC_HTXnTBLCop_Max 0x00180000
-#define HC_HTXnTBLCop_Mask 0x00200000
-#define HC_HTXnTBLCbias_Cbias (HC_XTC_Cbias << 14)
-#define HC_HTXnTBLCbias_InvCbias (HC_XTC_InvCbias << 14)
-#define HC_HTXnTBLCbias_0 (HC_XTC_0 << 14)
-#define HC_HTXnTBLCbias_Dif (HC_XTC_Dif << 14)
-#define HC_HTXnTBLCbias_Spec (HC_XTC_Spec << 14)
-#define HC_HTXnTBLCbias_Tex (HC_XTC_Tex << 14)
-#define HC_HTXnTBLCbias_Cur (HC_XTC_Cur << 14)
-#define HC_HTXnTBLCbias_Adif (HC_XTC_Adif << 14)
-#define HC_HTXnTBLCbias_Fog (HC_XTC_Fog << 14)
-#define HC_HTXnTBLCbias_Atex (HC_XTC_Atex << 14)
-#define HC_HTXnTBLCbias_Acur (HC_XTC_Acur << 14)
-#define HC_HTXnTBLCbias_HTXnTBLRC (HC_XTC_HTXnTBLRC << 14)
-#define HC_HTXnTBLCshift_1 0x00000000
-#define HC_HTXnTBLCshift_2 0x00000800
-#define HC_HTXnTBLCshift_No 0x00001000
-#define HC_HTXnTBLCshift_DotP 0x00001800
-/*=* John Sheng [2003.7.18] texture combine *=*/
-#define HC_HTXnTBLDOT3 0x00080000
-#define HC_HTXnTBLDOT4 0x000C0000
-
-#define HC_HTXnTBLAop_Add 0x00000000
-#define HC_HTXnTBLAop_Sub 0x00000080
-#define HC_HTXnTBLAop_Min 0x00000100
-#define HC_HTXnTBLAop_Max 0x00000180
-#define HC_HTXnTBLAop_Mask 0x00000200
-#define HC_HTXnTBLAbias_Inv 0x00000040
-#define HC_HTXnTBLAbias_Adif 0x00000000
-#define HC_HTXnTBLAbias_Fog 0x00000008
-#define HC_HTXnTBLAbias_Acur 0x00000010
-#define HC_HTXnTBLAbias_HTXnTBLRAbias 0x00000018
-#define HC_HTXnTBLAbias_Atex 0x00000020
-#define HC_HTXnTBLAshift_1 0x00000000
-#define HC_HTXnTBLAshift_2 0x00000001
-#define HC_HTXnTBLAshift_No 0x00000002
-/* #define HC_HTXnTBLAshift_DotP 0x00000003 */
-/* HC_SubA_HTXnTBLMPFog 0x0082
- */
-#define HC_HTXnTBLMPfog_MASK 0x00e00000
-#define HC_HTXnTBLMPfog_0 0x00000000
-#define HC_HTXnTBLMPfog_Adif 0x00200000
-#define HC_HTXnTBLMPfog_Fog 0x00400000
-#define HC_HTXnTBLMPfog_Atex 0x00600000
-#define HC_HTXnTBLMPfog_Acur 0x00800000
-#define HC_HTXnTBLMPfog_GHTXnTBLRFog 0x00a00000
-/* HC_SubA_HTXnTBLAsat 0x0083
- *-- Define the texture alpha input.
- */
-#define HC_XTA_TOPA 0x00000000
-#define HC_XTA_InvTOPA 0x00000008
-#define HC_XTA_TOPAp5 0x00000010
-#define HC_XTA_Adif 0x00000000
-#define HC_XTA_Fog 0x00000001
-#define HC_XTA_Acur 0x00000002
-#define HC_XTA_HTXnTBLRA 0x00000003
-#define HC_XTA_Atex 0x00000004
-#define HC_XTA_Atexnext 0x00000005
-/*--
- */
-#define HC_HTXnTBLAsat_MASK 0x00800000
-#define HC_HTXnTBLAMB_MASK 0x00700000
-#define HC_HTXnTBLAa_MASK 0x0007c000
-#define HC_HTXnTBLAb_MASK 0x00000f80
-#define HC_HTXnTBLAc_MASK 0x0000001f
-#define HC_HTXnTBLAMB_SHIFT 20
-#define HC_HTXnTBLAa_TOPA (HC_XTA_TOPA << 14)
-#define HC_HTXnTBLAa_InvTOPA (HC_XTA_InvTOPA << 14)
-#define HC_HTXnTBLAa_TOPAp5 (HC_XTA_TOPAp5 << 14)
-#define HC_HTXnTBLAa_Adif (HC_XTA_Adif << 14)
-#define HC_HTXnTBLAa_Fog (HC_XTA_Fog << 14)
-#define HC_HTXnTBLAa_Acur (HC_XTA_Acur << 14)
-#define HC_HTXnTBLAa_HTXnTBLRA (HC_XTA_HTXnTBLRA << 14)
-#define HC_HTXnTBLAa_Atex (HC_XTA_Atex << 14)
-#define HC_HTXnTBLAa_Atexnext (HC_XTA_Atexnext << 14)
-#define HC_HTXnTBLAb_TOPA (HC_XTA_TOPA << 7)
-#define HC_HTXnTBLAb_InvTOPA (HC_XTA_InvTOPA << 7)
-#define HC_HTXnTBLAb_TOPAp5 (HC_XTA_TOPAp5 << 7)
-#define HC_HTXnTBLAb_Adif (HC_XTA_Adif << 7)
-#define HC_HTXnTBLAb_Fog (HC_XTA_Fog << 7)
-#define HC_HTXnTBLAb_Acur (HC_XTA_Acur << 7)
-#define HC_HTXnTBLAb_HTXnTBLRA (HC_XTA_HTXnTBLRA << 7)
-#define HC_HTXnTBLAb_Atex (HC_XTA_Atex << 7)
-#define HC_HTXnTBLAb_Atexnext (HC_XTA_Atexnext << 7)
-#define HC_HTXnTBLAc_TOPA (HC_XTA_TOPA << 0)
-#define HC_HTXnTBLAc_InvTOPA (HC_XTA_InvTOPA << 0)
-#define HC_HTXnTBLAc_TOPAp5 (HC_XTA_TOPAp5 << 0)
-#define HC_HTXnTBLAc_Adif (HC_XTA_Adif << 0)
-#define HC_HTXnTBLAc_Fog (HC_XTA_Fog << 0)
-#define HC_HTXnTBLAc_Acur (HC_XTA_Acur << 0)
-#define HC_HTXnTBLAc_HTXnTBLRA (HC_XTA_HTXnTBLRA << 0)
-#define HC_HTXnTBLAc_Atex (HC_XTA_Atex << 0)
-#define HC_HTXnTBLAc_Atexnext (HC_XTA_Atexnext << 0)
-/* HC_SubA_HTXnTBLRAa 0x0089
- */
-#define HC_HTXnTBLRAa_MASK 0x00ff0000
-#define HC_HTXnTBLRAb_MASK 0x0000ff00
-#define HC_HTXnTBLRAc_MASK 0x000000ff
-#define HC_HTXnTBLRAa_SHIFT 16
-#define HC_HTXnTBLRAb_SHIFT 8
-#define HC_HTXnTBLRAc_SHIFT 0
-/* HC_SubA_HTXnTBLRFog 0x008a
- */
-#define HC_HTXnTBLRFog_MASK 0x0000ff00
-#define HC_HTXnTBLRAbias_MASK 0x000000ff
-#define HC_HTXnTBLRFog_SHIFT 8
-#define HC_HTXnTBLRAbias_SHIFT 0
-/* HC_SubA_HTXnLScale 0x0094
- */
-#define HC_HTXnLScale_MASK 0x0007fc00
-#define HC_HTXnLOff_MASK 0x000001ff
-#define HC_HTXnLScale_SHIFT 10
-/* HC_SubA_HTXSMD 0x0000
- */
-#define HC_HTXSMD_MASK 0x00000080
-#define HC_HTXTMD_MASK 0x00000040
-#define HC_HTXNum_MASK 0x00000038
-#define HC_HTXTRMD_MASK 0x00000006
-#define HC_HTXCHCLR_MASK 0x00000001
-#define HC_HTXNum_SHIFT 3
-
-/* Texture Palette n
- */
-#define HC_SubType_TexPalette0 0x00000000
-#define HC_SubType_TexPalette1 0x00000001
-#define HC_SubType_FogTable 0x00000010
-#define HC_SubType_Stipple 0x00000014
-/* HC_SubA_TexPalette0 0x0000
- */
-#define HC_HTPnA_MASK 0xff000000
-#define HC_HTPnR_MASK 0x00ff0000
-#define HC_HTPnG_MASK 0x0000ff00
-#define HC_HTPnB_MASK 0x000000ff
-/* HC_SubA_FogTable 0x0010
- */
-#define HC_HFPn3_MASK 0xff000000
-#define HC_HFPn2_MASK 0x00ff0000
-#define HC_HFPn1_MASK 0x0000ff00
-#define HC_HFPn_MASK 0x000000ff
-#define HC_HFPn3_SHIFT 24
-#define HC_HFPn2_SHIFT 16
-#define HC_HFPn1_SHIFT 8
-
-/* Auto Testing & Security
- */
-#define HC_SubA_HenFIFOAT 0x0000
-#define HC_SubA_HFBDrawFirst 0x0004
-#define HC_SubA_HFBBasL 0x0005
-#define HC_SubA_HFBDst 0x0006
-/* HC_SubA_HenFIFOAT 0x0000
- */
-#define HC_HenFIFOAT_MASK 0x00000020
-#define HC_HenGEMILock_MASK 0x00000010
-#define HC_HenFBASwap_MASK 0x00000008
-#define HC_HenOT_MASK 0x00000004
-#define HC_HenCMDQ_MASK 0x00000002
-#define HC_HenTXCTSU_MASK 0x00000001
-/* HC_SubA_HFBDrawFirst 0x0004
- */
-#define HC_HFBDrawFirst_MASK 0x00000800
-#define HC_HFBQueue_MASK 0x00000400
-#define HC_HFBLock_MASK 0x00000200
-#define HC_HEOF_MASK 0x00000100
-#define HC_HFBBasH_MASK 0x000000ff
-
-/* GEMI Setting
- */
-#define HC_SubA_HTArbRCM 0x0008
-#define HC_SubA_HTArbRZ 0x000a
-#define HC_SubA_HTArbWZ 0x000b
-#define HC_SubA_HTArbRTX 0x000c
-#define HC_SubA_HTArbRCW 0x000d
-#define HC_SubA_HTArbE2 0x000e
-#define HC_SubA_HArbRQCM 0x0010
-#define HC_SubA_HArbWQCM 0x0011
-#define HC_SubA_HGEMITout 0x0020
-#define HC_SubA_HFthRTXD 0x0040
-#define HC_SubA_HFthRTXA 0x0044
-#define HC_SubA_HCMDQstL 0x0050
-#define HC_SubA_HCMDQendL 0x0051
-#define HC_SubA_HCMDQLen 0x0052
-/* HC_SubA_HTArbRCM 0x0008
- */
-#define HC_HTArbRCM_MASK 0x0000ffff
-/* HC_SubA_HTArbRZ 0x000a
- */
-#define HC_HTArbRZ_MASK 0x0000ffff
-/* HC_SubA_HTArbWZ 0x000b
- */
-#define HC_HTArbWZ_MASK 0x0000ffff
-/* HC_SubA_HTArbRTX 0x000c
- */
-#define HC_HTArbRTX_MASK 0x0000ffff
-/* HC_SubA_HTArbRCW 0x000d
- */
-#define HC_HTArbRCW_MASK 0x0000ffff
-/* HC_SubA_HTArbE2 0x000e
- */
-#define HC_HTArbE2_MASK 0x0000ffff
-/* HC_SubA_HArbRQCM 0x0010
- */
-#define HC_HTArbRQCM_MASK 0x0000ffff
-/* HC_SubA_HArbWQCM 0x0011
- */
-#define HC_HArbWQCM_MASK 0x0000ffff
-/* HC_SubA_HGEMITout 0x0020
- */
-#define HC_HGEMITout_MASK 0x000f0000
-#define HC_HNPArbZC_MASK 0x0000ffff
-#define HC_HGEMITout_SHIFT 16
-/* HC_SubA_HFthRTXD 0x0040
- */
-#define HC_HFthRTXD_MASK 0x00ff0000
-#define HC_HFthRZD_MASK 0x0000ff00
-#define HC_HFthWZD_MASK 0x000000ff
-#define HC_HFthRTXD_SHIFT 16
-#define HC_HFthRZD_SHIFT 8
-/* HC_SubA_HFthRTXA 0x0044
- */
-#define HC_HFthRTXA_MASK 0x000000ff
-
-/****************************************************************************
- * Define the Halcyon Internal register access constants. For simulator only.
- ***************************************************************************/
-#define HC_SIMA_HAGPBstL 0x0000
-#define HC_SIMA_HAGPBendL 0x0001
-#define HC_SIMA_HAGPCMNT 0x0002
-#define HC_SIMA_HAGPBpL 0x0003
-#define HC_SIMA_HAGPBpH 0x0004
-#define HC_SIMA_HClipTB 0x0005
-#define HC_SIMA_HClipLR 0x0006
-#define HC_SIMA_HFPClipTL 0x0007
-#define HC_SIMA_HFPClipBL 0x0008
-#define HC_SIMA_HFPClipLL 0x0009
-#define HC_SIMA_HFPClipRL 0x000a
-#define HC_SIMA_HFPClipTBH 0x000b
-#define HC_SIMA_HFPClipLRH 0x000c
-#define HC_SIMA_HLP 0x000d
-#define HC_SIMA_HLPRF 0x000e
-#define HC_SIMA_HSolidCL 0x000f
-#define HC_SIMA_HPixGC 0x0010
-#define HC_SIMA_HSPXYOS 0x0011
-#define HC_SIMA_HCmdA 0x0012
-#define HC_SIMA_HCmdB 0x0013
-#define HC_SIMA_HEnable 0x0014
-#define HC_SIMA_HZWBBasL 0x0015
-#define HC_SIMA_HZWBBasH 0x0016
-#define HC_SIMA_HZWBType 0x0017
-#define HC_SIMA_HZBiasL 0x0018
-#define HC_SIMA_HZWBend 0x0019
-#define HC_SIMA_HZWTMD 0x001a
-#define HC_SIMA_HZWCDL 0x001b
-#define HC_SIMA_HZWCTAGnum 0x001c
-#define HC_SIMA_HZCYNum 0x001d
-#define HC_SIMA_HZWCFire 0x001e
-/* #define HC_SIMA_HSBBasL 0x001d */
-/* #define HC_SIMA_HSBBasH 0x001e */
-/* #define HC_SIMA_HSBFM 0x001f */
-#define HC_SIMA_HSTREF 0x0020
-#define HC_SIMA_HSTMD 0x0021
-#define HC_SIMA_HABBasL 0x0022
-#define HC_SIMA_HABBasH 0x0023
-#define HC_SIMA_HABFM 0x0024
-#define HC_SIMA_HATMD 0x0025
-#define HC_SIMA_HABLCsat 0x0026
-#define HC_SIMA_HABLCop 0x0027
-#define HC_SIMA_HABLAsat 0x0028
-#define HC_SIMA_HABLAop 0x0029
-#define HC_SIMA_HABLRCa 0x002a
-#define HC_SIMA_HABLRFCa 0x002b
-#define HC_SIMA_HABLRCbias 0x002c
-#define HC_SIMA_HABLRCb 0x002d
-#define HC_SIMA_HABLRFCb 0x002e
-#define HC_SIMA_HABLRAa 0x002f
-#define HC_SIMA_HABLRAb 0x0030
-#define HC_SIMA_HDBBasL 0x0031
-#define HC_SIMA_HDBBasH 0x0032
-#define HC_SIMA_HDBFM 0x0033
-#define HC_SIMA_HFBBMSKL 0x0034
-#define HC_SIMA_HROP 0x0035
-#define HC_SIMA_HFogLF 0x0036
-#define HC_SIMA_HFogCL 0x0037
-#define HC_SIMA_HFogCH 0x0038
-#define HC_SIMA_HFogStL 0x0039
-#define HC_SIMA_HFogStH 0x003a
-#define HC_SIMA_HFogOOdMF 0x003b
-#define HC_SIMA_HFogOOdEF 0x003c
-#define HC_SIMA_HFogEndL 0x003d
-#define HC_SIMA_HFogDenst 0x003e
-/*---- start of texture 0 setting ----
- */
-#define HC_SIMA_HTX0L0BasL 0x0040
-#define HC_SIMA_HTX0L1BasL 0x0041
-#define HC_SIMA_HTX0L2BasL 0x0042
-#define HC_SIMA_HTX0L3BasL 0x0043
-#define HC_SIMA_HTX0L4BasL 0x0044
-#define HC_SIMA_HTX0L5BasL 0x0045
-#define HC_SIMA_HTX0L6BasL 0x0046
-#define HC_SIMA_HTX0L7BasL 0x0047
-#define HC_SIMA_HTX0L8BasL 0x0048
-#define HC_SIMA_HTX0L9BasL 0x0049
-#define HC_SIMA_HTX0LaBasL 0x004a
-#define HC_SIMA_HTX0LbBasL 0x004b
-#define HC_SIMA_HTX0LcBasL 0x004c
-#define HC_SIMA_HTX0LdBasL 0x004d
-#define HC_SIMA_HTX0LeBasL 0x004e
-#define HC_SIMA_HTX0LfBasL 0x004f
-#define HC_SIMA_HTX0L10BasL 0x0050
-#define HC_SIMA_HTX0L11BasL 0x0051
-#define HC_SIMA_HTX0L012BasH 0x0052
-#define HC_SIMA_HTX0L345BasH 0x0053
-#define HC_SIMA_HTX0L678BasH 0x0054
-#define HC_SIMA_HTX0L9abBasH 0x0055
-#define HC_SIMA_HTX0LcdeBasH 0x0056
-#define HC_SIMA_HTX0Lf1011BasH 0x0057
-#define HC_SIMA_HTX0L0Pit 0x0058
-#define HC_SIMA_HTX0L1Pit 0x0059
-#define HC_SIMA_HTX0L2Pit 0x005a
-#define HC_SIMA_HTX0L3Pit 0x005b
-#define HC_SIMA_HTX0L4Pit 0x005c
-#define HC_SIMA_HTX0L5Pit 0x005d
-#define HC_SIMA_HTX0L6Pit 0x005e
-#define HC_SIMA_HTX0L7Pit 0x005f
-#define HC_SIMA_HTX0L8Pit 0x0060
-#define HC_SIMA_HTX0L9Pit 0x0061
-#define HC_SIMA_HTX0LaPit 0x0062
-#define HC_SIMA_HTX0LbPit 0x0063
-#define HC_SIMA_HTX0LcPit 0x0064
-#define HC_SIMA_HTX0LdPit 0x0065
-#define HC_SIMA_HTX0LePit 0x0066
-#define HC_SIMA_HTX0LfPit 0x0067
-#define HC_SIMA_HTX0L10Pit 0x0068
-#define HC_SIMA_HTX0L11Pit 0x0069
-#define HC_SIMA_HTX0L0_5WE 0x006a
-#define HC_SIMA_HTX0L6_bWE 0x006b
-#define HC_SIMA_HTX0Lc_11WE 0x006c
-#define HC_SIMA_HTX0L0_5HE 0x006d
-#define HC_SIMA_HTX0L6_bHE 0x006e
-#define HC_SIMA_HTX0Lc_11HE 0x006f
-#define HC_SIMA_HTX0L0OS 0x0070
-#define HC_SIMA_HTX0TB 0x0071
-#define HC_SIMA_HTX0MPMD 0x0072
-#define HC_SIMA_HTX0CLODu 0x0073
-#define HC_SIMA_HTX0FM 0x0074
-#define HC_SIMA_HTX0TRCH 0x0075
-#define HC_SIMA_HTX0TRCL 0x0076
-#define HC_SIMA_HTX0TBC 0x0077
-#define HC_SIMA_HTX0TRAH 0x0078
-#define HC_SIMA_HTX0TBLCsat 0x0079
-#define HC_SIMA_HTX0TBLCop 0x007a
-#define HC_SIMA_HTX0TBLMPfog 0x007b
-#define HC_SIMA_HTX0TBLAsat 0x007c
-#define HC_SIMA_HTX0TBLRCa 0x007d
-#define HC_SIMA_HTX0TBLRCb 0x007e
-#define HC_SIMA_HTX0TBLRCc 0x007f
-#define HC_SIMA_HTX0TBLRCbias 0x0080
-#define HC_SIMA_HTX0TBLRAa 0x0081
-#define HC_SIMA_HTX0TBLRFog 0x0082
-#define HC_SIMA_HTX0BumpM00 0x0083
-#define HC_SIMA_HTX0BumpM01 0x0084
-#define HC_SIMA_HTX0BumpM10 0x0085
-#define HC_SIMA_HTX0BumpM11 0x0086
-#define HC_SIMA_HTX0LScale 0x0087
-/*---- end of texture 0 setting ---- 0x008f
- */
-#define HC_SIMA_TX0TX1_OFF 0x0050
-/*---- start of texture 1 setting ----
- */
-#define HC_SIMA_HTX1L0BasL (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L1BasL (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L2BasL (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L3BasL (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L4BasL (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L5BasL (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6BasL (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L7BasL (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L8BasL (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L9BasL (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LaBasL (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LbBasL (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LcBasL (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LdBasL (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LeBasL (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LfBasL (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L10BasL (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L11BasL (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L012BasH (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L345BasH (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L678BasH (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L9abBasH (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LcdeBasH (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1Lf1011BasH (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0Pit (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L1Pit (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L2Pit (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L3Pit (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L4Pit (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L5Pit (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6Pit (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L7Pit (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L8Pit (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L9Pit (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LaPit (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LbPit (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LcPit (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LdPit (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LePit (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LfPit (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L10Pit (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L11Pit (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0_5WE (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6_bWE (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1Lc_11WE (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0_5HE (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L6_bHE (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1Lc_11HE (HC_SIMA_HTX0Lc_11HE + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1L0OS (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TB (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1MPMD (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1CLODu (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1FM (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TRCH (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TRCL (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBC (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TRAH (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LTC (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LTA (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLCsat (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLCop (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLMPfog (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLAsat (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCa (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCb (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCc (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRCbias (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRAa (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1TBLRFog (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM00 (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM01 (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM10 (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1BumpM11 (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF)
-#define HC_SIMA_HTX1LScale (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF)
-/*---- end of texture 1 setting ---- 0xaf
- */
-#define HC_SIMA_HTXSMD 0x00b0
-#define HC_SIMA_HenFIFOAT 0x00b1
-#define HC_SIMA_HFBDrawFirst 0x00b2
-#define HC_SIMA_HFBBasL 0x00b3
-#define HC_SIMA_HTArbRCM 0x00b4
-#define HC_SIMA_HTArbRZ 0x00b5
-#define HC_SIMA_HTArbWZ 0x00b6
-#define HC_SIMA_HTArbRTX 0x00b7
-#define HC_SIMA_HTArbRCW 0x00b8
-#define HC_SIMA_HTArbE2 0x00b9
-#define HC_SIMA_HGEMITout 0x00ba
-#define HC_SIMA_HFthRTXD 0x00bb
-#define HC_SIMA_HFthRTXA 0x00bc
-/* Define the texture palette 0
- */
-#define HC_SIMA_HTP0 0x0100
-#define HC_SIMA_HTP1 0x0200
-#define HC_SIMA_FOGTABLE 0x0300
-#define HC_SIMA_STIPPLE 0x0400
-#define HC_SIMA_HE3Fire 0x0440
-#define HC_SIMA_TRANS_SET 0x0441
-#define HC_SIMA_HREngSt 0x0442
-#define HC_SIMA_HRFIFOempty 0x0443
-#define HC_SIMA_HRFIFOfull 0x0444
-#define HC_SIMA_HRErr 0x0445
-#define HC_SIMA_FIFOstatus 0x0446
-
-/****************************************************************************
- * Define the AGP command header.
- ***************************************************************************/
-#define HC_ACMD_MASK 0xfe000000
-#define HC_ACMD_SUB_MASK 0x0c000000
-#define HC_ACMD_HCmdA 0xee000000
-#define HC_ACMD_HCmdB 0xec000000
-#define HC_ACMD_HCmdC 0xea000000
-#define HC_ACMD_H1 0xf0000000
-#define HC_ACMD_H2 0xf2000000
-#define HC_ACMD_H3 0xf4000000
-#define HC_ACMD_H4 0xf6000000
-
-#define HC_ACMD_H1IO_MASK 0x000001ff
-#define HC_ACMD_H2IO1_MASK 0x001ff000
-#define HC_ACMD_H2IO2_MASK 0x000001ff
-#define HC_ACMD_H2IO1_SHIFT 12
-#define HC_ACMD_H2IO2_SHIFT 0
-#define HC_ACMD_H3IO_MASK 0x000001ff
-#define HC_ACMD_H3COUNT_MASK 0x01fff000
-#define HC_ACMD_H3COUNT_SHIFT 12
-#define HC_ACMD_H4ID_MASK 0x000001ff
-#define HC_ACMD_H4COUNT_MASK 0x01fffe00
-#define HC_ACMD_H4COUNT_SHIFT 9
-
-/*****************************************************************************
- * Define Header
- ****************************************************************************/
-#define HC_HEADER2 0xF210F110
-
-/*****************************************************************************
- * Define Dummy Value
- ****************************************************************************/
-#define HC_DUMMY 0xCCCCCCCC
-/*****************************************************************************
- * Define for DMA use
- ****************************************************************************/
-#define HALCYON_HEADER2 0XF210F110
-#define HALCYON_FIRECMD 0XEE100000
-#define HALCYON_FIREMASK 0XFFF00000
-#define HALCYON_CMDB 0XEC000000
-#define HALCYON_CMDBMASK 0XFFFE0000
-#define HALCYON_SUB_ADDR0 0X00000000
-#define HALCYON_HEADER1MASK 0XFFFFFC00
-#define HALCYON_HEADER1 0XF0000000
-#define HC_SubA_HAGPBstL 0x0060
-#define HC_SubA_HAGPBendL 0x0061
-#define HC_SubA_HAGPCMNT 0x0062
-#define HC_SubA_HAGPBpL 0x0063
-#define HC_SubA_HAGPBpH 0x0064
-#define HC_HAGPCMNT_MASK 0x00800000
-#define HC_HCmdErrClr_MASK 0x00400000
-#define HC_HAGPBendH_MASK 0x0000ff00
-#define HC_HAGPBstH_MASK 0x000000ff
-#define HC_HAGPBendH_SHIFT 8
-#define HC_HAGPBstH_SHIFT 0
-#define HC_HAGPBpL_MASK 0x00fffffc
-#define HC_HAGPBpID_MASK 0x00000003
-#define HC_HAGPBpID_PAUSE 0x00000000
-#define HC_HAGPBpID_JUMP 0x00000001
-#define HC_HAGPBpID_STOP 0x00000002
-#define HC_HAGPBpH_MASK 0x00ffffff
-
-
-#define VIA_VIDEO_HEADER5 0xFE040000
-#define VIA_VIDEO_HEADER6 0xFE050000
-#define VIA_VIDEO_HEADER7 0xFE060000
-#define VIA_VIDEOMASK 0xFFFF0000
-
-/*****************************************************************************
- * Define for H5 DMA use
- ****************************************************************************/
-#define H5_HC_DUMMY 0xCC000000
-
-/* Command Header Type */
-#define INV_DUMMY_MASK 0xFF000000
-#define INV_AGPHeader0 0xFE000000
-#define INV_AGPHeader1 0xFE010000
-#define INV_AGPHeader2 0xFE020000
-#define INV_AGPHeader3 0xFE030000
-#define INV_AGPHeader4 0xFE040000
-#define INV_AGPHeader5 0xFE050000
-#define INV_AGPHeader6 0xFE060000
-#define INV_AGPHeader7 0xFE070000
-#define INV_AGPHeader9 0xFE090000
-#define INV_AGPHeaderA 0xFE0A0000
-#define INV_AGPHeader40 0xFE400000
-#define INV_AGPHeader41 0xFE410000
-#define INV_AGPHeader43 0xFE430000
-#define INV_AGPHeader45 0xFE450000
-#define INV_AGPHeader47 0xFE470000
-#define INV_AGPHeader4A 0xFE4A0000
-#define INV_AGPHeader82 0xFE820000
-#define INV_AGPHeader83 0xFE830000
-#define INV_AGPHeader_MASK 0xFFFF0000
-#define INV_AGPHeader2A 0xFE2A0000
-#define INV_AGPHeader25 0xFE250000
-#define INV_AGPHeader20 0xFE200000
-#define INV_AGPHeader23 0xFE230000
-#define INV_AGPHeaderE2 0xFEE20000
-#define INV_AGPHeaderE3 0xFEE30000
-
-/*Transmission IO Space*/
-#define INV_REG_CR_TRANS 0x041C
-#define INV_REG_CR_BEGIN 0x0420
-#define INV_REG_CR_END 0x0438
-
-#define INV_REG_3D_TRANS 0x043C
-#define INV_REG_3D_BEGIN 0x0440
-#define INV_REG_3D_END 0x06FC
-
-#define INV_ParaType_CmdVdata 0x0000
-
-/* H5 Enable Setting
- */
-#define INV_HC_SubA_HEnable1 0x00
-
-#define INV_HC_HenAT4ALLRT_MASK 0x00100000
-#define INV_HC_HenATMRT3_MASK 0x00080000
-#define INV_HC_HenATMRT2_MASK 0x00040000
-#define INV_HC_HenATMRT1_MASK 0x00020000
-#define INV_HC_HenATMRT0_MASK 0x00010000
-#define INV_HC_HenSCMRT3_MASK 0x00008000
-#define INV_HC_HenSCMRT2_MASK 0x00004000
-#define INV_HC_HenSCMRT1_MASK 0x00002000
-#define INV_HC_HenSCMRT0_MASK 0x00001000
-#define INV_HC_HenFOGMRT3_MASK 0x00000800
-#define INV_HC_HenFOGMRT2_MASK 0x00000400
-#define INV_HC_HenFOGMRT1_MASK 0x00000200
-#define INV_HC_HenFOGMRT0_MASK 0x00000100
-#define INV_HC_HenABLMRT3_MASK 0x00000080
-#define INV_HC_HenABLMRT2_MASK 0x00000040
-#define INV_HC_HenABLMRT1_MASK 0x00000020
-#define INV_HC_HenABLMRT0_MASK 0x00000010
-#define INV_HC_HenDTMRT3_MASK 0x00000008
-#define INV_HC_HenDTMRT2_MASK 0x00000004
-#define INV_HC_HenDTMRT1_MASK 0x00000002
-#define INV_HC_HenDTMRT0_MASK 0x00000001
-
-#define INV_HC_SubA_HEnable2 0x01
-
-#define INV_HC_HenLUL2DR_MASK 0x00800000
-#define INV_HC_HenLDIAMOND_MASK 0x00400000
-#define INV_HC_HenPSPRITE_MASK 0x00200000
-#define INV_HC_HenC2S_MASK 0x00100000
-#define INV_HC_HenFOGPP_MASK 0x00080000
-#define INV_HC_HenSCPP_MASK 0x00040000
-#define INV_HC_HenCPP_MASK 0x00020000
-#define INV_HC_HenCZ_MASK 0x00002000
-#define INV_HC_HenVC_MASK 0x00001000
-#define INV_HC_HenCL_MASK 0x00000800
-#define INV_HC_HenPS_MASK 0x00000400
-#define INV_HC_HenWCZ_MASK 0x00000200
-#define INV_HC_HenTXCH_MASK 0x00000100
-#define INV_HC_HenBFCULL_MASK 0x00000080
-#define INV_HC_HenCW_MASK 0x00000040
-#define INV_HC_HenAA_MASK 0x00000020
-#define INV_HC_HenST_MASK 0x00000010
-#define INV_HC_HenZT_MASK 0x00000008
-#define INV_HC_HenZW_MASK 0x00000004
-#define INV_HC_HenSP_MASK 0x00000002
-#define INV_HC_HenLP_MASK 0x00000001
-
-/* H5 Miscellaneous Settings
- */
-#define INV_HC_SubA_HCClipTL 0x0080
-#define INV_HC_SubA_HCClipBL 0x0081
-#define INV_HC_SubA_HSClipTL 0x0082
-#define INV_HC_SubA_HSClipBL 0x0083
-#define INV_HC_SubA_HSolidCL 0x0086
-#define INV_HC_SubA_HSolidCH 0x0087
-#define INV_HC_SubA_HGBClipGL 0x0088
-#define INV_HC_SubA_HGBClipGR 0x0089
-
-
-#define INV_HC_ParaType_Vetex 0x00040000
-
-#endif
diff --git a/drivers/gpu/drm/via/via_dri1.c b/drivers/gpu/drm/via/via_dri1.c
deleted file mode 100644
index 217d1e84b0ea..000000000000
--- a/drivers/gpu/drm/via/via_dri1.c
+++ /dev/null
@@ -1,3630 +0,0 @@
-/*
- * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
- * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
- * Copyright 2002 Tungsten Graphics, Inc.
- * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. All Rights Reserved.
- * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
- * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A. All Rights Reserved.
- * Copyright 2004 The Unichrome project. All Rights Reserved.
- * Copyright 2004 BEAM Ltd.
- * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sub license,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_file.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_legacy.h>
-#include <drm/drm_mm.h>
-#include <drm/drm_pciids.h>
-#include <drm/drm_print.h>
-#include <drm/drm_vblank.h>
-#include <drm/via_drm.h>
-
-#include "via_3d_reg.h"
-
-#define DRIVER_AUTHOR "Various"
-
-#define DRIVER_NAME "via"
-#define DRIVER_DESC "VIA Unichrome / Pro"
-#define DRIVER_DATE "20070202"
-
-#define DRIVER_MAJOR 2
-#define DRIVER_MINOR 11
-#define DRIVER_PATCHLEVEL 1
-
-typedef enum {
- no_sequence = 0,
- z_address,
- dest_address,
- tex_address
-} drm_via_sequence_t;
-
-typedef struct {
- unsigned texture;
- uint32_t z_addr;
- uint32_t d_addr;
- uint32_t t_addr[2][10];
- uint32_t pitch[2][10];
- uint32_t height[2][10];
- uint32_t tex_level_lo[2];
- uint32_t tex_level_hi[2];
- uint32_t tex_palette_size[2];
- uint32_t tex_npot[2];
- drm_via_sequence_t unfinished;
- int agp_texture;
- int multitex;
- struct drm_device *dev;
- drm_local_map_t *map_cache;
- uint32_t vertex_count;
- int agp;
- const uint32_t *buf_start;
-} drm_via_state_t;
-
-#define VIA_PCI_BUF_SIZE 60000
-#define VIA_FIRE_BUF_SIZE 1024
-#define VIA_NUM_IRQS 4
-
-
-#define VIA_NUM_BLIT_ENGINES 2
-#define VIA_NUM_BLIT_SLOTS 8
-
-struct _drm_via_descriptor;
-
-typedef struct _drm_via_sg_info {
- struct page **pages;
- unsigned long num_pages;
- struct _drm_via_descriptor **desc_pages;
- int num_desc_pages;
- int num_desc;
- enum dma_data_direction direction;
- unsigned char *bounce_buffer;
- dma_addr_t chain_start;
- uint32_t free_on_sequence;
- unsigned int descriptors_per_page;
- int aborted;
- enum {
- dr_via_device_mapped,
- dr_via_desc_pages_alloc,
- dr_via_pages_locked,
- dr_via_pages_alloc,
- dr_via_sg_init
- } state;
-} drm_via_sg_info_t;
-
-typedef struct _drm_via_blitq {
- struct drm_device *dev;
- uint32_t cur_blit_handle;
- uint32_t done_blit_handle;
- unsigned serviced;
- unsigned head;
- unsigned cur;
- unsigned num_free;
- unsigned num_outstanding;
- unsigned long end;
- int aborting;
- int is_active;
- drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
- spinlock_t blit_lock;
- wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
- wait_queue_head_t busy_queue;
- struct work_struct wq;
- struct timer_list poll_timer;
-} drm_via_blitq_t;
-
-typedef struct drm_via_ring_buffer {
- drm_local_map_t map;
- char *virtual_start;
-} drm_via_ring_buffer_t;
-
-typedef uint32_t maskarray_t[5];
-
-typedef struct drm_via_irq {
- atomic_t irq_received;
- uint32_t pending_mask;
- uint32_t enable_mask;
- wait_queue_head_t irq_queue;
-} drm_via_irq_t;
-
-typedef struct drm_via_private {
- drm_via_sarea_t *sarea_priv;
- drm_local_map_t *sarea;
- drm_local_map_t *fb;
- drm_local_map_t *mmio;
- unsigned long agpAddr;
- wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
- char *dma_ptr;
- unsigned int dma_low;
- unsigned int dma_high;
- unsigned int dma_offset;
- uint32_t dma_wrap;
- volatile uint32_t *last_pause_ptr;
- volatile uint32_t *hw_addr_ptr;
- drm_via_ring_buffer_t ring;
- ktime_t last_vblank;
- int last_vblank_valid;
- ktime_t nsec_per_vblank;
- atomic_t vbl_received;
- drm_via_state_t hc_state;
- char pci_buf[VIA_PCI_BUF_SIZE];
- const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
- uint32_t num_fire_offsets;
- int chipset;
- drm_via_irq_t via_irqs[VIA_NUM_IRQS];
- unsigned num_irqs;
- maskarray_t *irq_masks;
- uint32_t irq_enable_mask;
- uint32_t irq_pending_mask;
- int *irq_map;
- unsigned int idle_fault;
- int vram_initialized;
- struct drm_mm vram_mm;
- int agp_initialized;
- struct drm_mm agp_mm;
- /** Mapping of userspace keys to mm objects */
- struct idr object_idr;
- unsigned long vram_offset;
- unsigned long agp_offset;
- drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
- uint32_t dma_diff;
-} drm_via_private_t;
-
-struct via_file_private {
- struct list_head obj_list;
-};
-
-enum via_family {
- VIA_OTHER = 0, /* Baseline */
- VIA_PRO_GROUP_A, /* Another video engine and DMA commands */
- VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */
-};
-
-/* VIA MMIO register access */
-static inline u32 via_read(struct drm_via_private *dev_priv, u32 reg)
-{
- return readl((void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-static inline void via_write(struct drm_via_private *dev_priv, u32 reg,
- u32 val)
-{
- writel(val, (void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-static inline void via_write8(struct drm_via_private *dev_priv, u32 reg,
- u32 val)
-{
- writeb(val, (void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-static inline void via_write8_mask(struct drm_via_private *dev_priv,
- u32 reg, u32 mask, u32 val)
-{
- u32 tmp;
-
- tmp = readb((void __iomem *)(dev_priv->mmio->handle + reg));
- tmp = (tmp & ~mask) | (val & mask);
- writeb(tmp, (void __iomem *)(dev_priv->mmio->handle + reg));
-}
-
-/*
- * Poll in a loop waiting for 'contidition' to be true.
- * Note: A direct replacement with wait_event_interruptible_timeout()
- * will not work unless driver is updated to emit wake_up()
- * in relevant places that can impact the 'condition'
- *
- * Returns:
- * ret keeps current value if 'condition' becomes true
- * ret = -BUSY if timeout happens
- * ret = -EINTR if a signal interrupted the waiting period
- */
-#define VIA_WAIT_ON( ret, queue, timeout, condition ) \
-do { \
- DECLARE_WAITQUEUE(entry, current); \
- unsigned long end = jiffies + (timeout); \
- add_wait_queue(&(queue), &entry); \
- \
- for (;;) { \
- __set_current_state(TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (time_after_eq(jiffies, end)) { \
- ret = -EBUSY; \
- break; \
- } \
- schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
- if (signal_pending(current)) { \
- ret = -EINTR; \
- break; \
- } \
- } \
- __set_current_state(TASK_RUNNING); \
- remove_wait_queue(&(queue), &entry); \
-} while (0)
-
-int via_do_cleanup_map(struct drm_device *dev);
-
-int via_dma_cleanup(struct drm_device *dev);
-int via_driver_dma_quiescent(struct drm_device *dev);
-
-#define CMDBUF_ALIGNMENT_SIZE (0x100)
-#define CMDBUF_ALIGNMENT_MASK (0x0ff)
-
-/* defines for VIA 3D registers */
-#define VIA_REG_STATUS 0x400
-#define VIA_REG_TRANSET 0x43C
-#define VIA_REG_TRANSPACE 0x440
-
-/* VIA_REG_STATUS(0x400): Engine Status */
-#define VIA_CMD_RGTR_BUSY 0x00000080 /* Command Regulator is busy */
-#define VIA_2D_ENG_BUSY 0x00000001 /* 2D Engine is busy */
-#define VIA_3D_ENG_BUSY 0x00000002 /* 3D Engine is busy */
-#define VIA_VR_QUEUE_BUSY 0x00020000 /* Virtual Queue is busy */
-
-#define SetReg2DAGP(nReg, nData) { \
- *((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
- *((uint32_t *)(vb) + 1) = (nData); \
- vb = ((uint32_t *)vb) + 2; \
- dev_priv->dma_low += 8; \
-}
-
-#define via_flush_write_combine() mb()
-
-#define VIA_OUT_RING_QW(w1, w2) do { \
- *vb++ = (w1); \
- *vb++ = (w2); \
- dev_priv->dma_low += 8; \
-} while (0)
-
-#define VIA_MM_ALIGN_SHIFT 4
-#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
-
-struct via_memblock {
- struct drm_mm_node mm_node;
- struct list_head owner_list;
-};
-
-#define VIA_REG_INTERRUPT 0x200
-
-/* VIA_REG_INTERRUPT */
-#define VIA_IRQ_GLOBAL (1 << 31)
-#define VIA_IRQ_VBLANK_ENABLE (1 << 19)
-#define VIA_IRQ_VBLANK_PENDING (1 << 3)
-#define VIA_IRQ_HQV0_ENABLE (1 << 11)
-#define VIA_IRQ_HQV1_ENABLE (1 << 25)
-#define VIA_IRQ_HQV0_PENDING (1 << 9)
-#define VIA_IRQ_HQV1_PENDING (1 << 10)
-#define VIA_IRQ_DMA0_DD_ENABLE (1 << 20)
-#define VIA_IRQ_DMA0_TD_ENABLE (1 << 21)
-#define VIA_IRQ_DMA1_DD_ENABLE (1 << 22)
-#define VIA_IRQ_DMA1_TD_ENABLE (1 << 23)
-#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
-#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
-#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
-#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
-
-/*
- * PCI DMA Registers
- * Channels 2 & 3 don't seem to be implemented in hardware.
- */
-
-#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
-#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
-#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
-#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
-
-#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
-#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
-#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
-#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
-
-#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
-#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
-#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
-#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
-
-#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
-#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
-#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
-#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
-
-#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
-#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
-#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
-#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
-
-#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
-#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
-#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
-#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
-
-#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
-
-/* Define for DMA engine */
-/* DPR */
-#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
-#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
-#define VIA_DMA_DPR_DT (1<<3) /* direction of transfer (RO) */
-
-/* MR */
-#define VIA_DMA_MR_CM (1<<0) /* chaining mode */
-#define VIA_DMA_MR_TDIE (1<<1) /* transfer done interrupt enable */
-#define VIA_DMA_MR_HENDMACMD (1<<7) /* ? */
-
-/* CSR */
-#define VIA_DMA_CSR_DE (1<<0) /* DMA enable */
-#define VIA_DMA_CSR_TS (1<<1) /* transfer start */
-#define VIA_DMA_CSR_TA (1<<2) /* transfer abort */
-#define VIA_DMA_CSR_TD (1<<3) /* transfer done */
-#define VIA_DMA_CSR_DD (1<<4) /* descriptor done */
-#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
-
-/*
- * Device-specific IRQs go here. This type might need to be extended with
- * the register if there are multiple IRQ control registers.
- * Currently we activate the HQV interrupts of Unichrome Pro group A.
- */
-
-static maskarray_t via_pro_group_a_irqs[] = {
- {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
- 0x00000000 },
- {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
- 0x00000000 },
- {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
- {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
-};
-static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
-static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
-
-static maskarray_t via_unichrome_irqs[] = {
- {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
- {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
- VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
-};
-static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
-static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
-
-
-/*
- * Unmaps the DMA mappings.
- * FIXME: Is this a NoOp on x86? Also
- * FIXME: What happens if this one is called and a pending blit has previously done
- * the same DMA mappings?
- */
-#define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
-#define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
-#define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
-
-typedef struct _drm_via_descriptor {
- uint32_t mem_addr;
- uint32_t dev_addr;
- uint32_t size;
- uint32_t next;
-} drm_via_descriptor_t;
-
-typedef enum {
- state_command,
- state_header2,
- state_header1,
- state_vheader5,
- state_vheader6,
- state_error
-} verifier_state_t;
-
-typedef enum {
- no_check = 0,
- check_for_header2,
- check_for_header1,
- check_for_header2_err,
- check_for_header1_err,
- check_for_fire,
- check_z_buffer_addr0,
- check_z_buffer_addr1,
- check_z_buffer_addr_mode,
- check_destination_addr0,
- check_destination_addr1,
- check_destination_addr_mode,
- check_for_dummy,
- check_for_dd,
- check_texture_addr0,
- check_texture_addr1,
- check_texture_addr2,
- check_texture_addr3,
- check_texture_addr4,
- check_texture_addr5,
- check_texture_addr6,
- check_texture_addr7,
- check_texture_addr8,
- check_texture_addr_mode,
- check_for_vertex_count,
- check_number_texunits,
- forbidden_command
-} hazard_t;
-
-/*
- * Associates each hazard above with a possible multi-command
- * sequence. For example an address that is split over multiple
- * commands and that needs to be checked at the first command
- * that does not include any part of the address.
- */
-
-static drm_via_sequence_t seqs[] = {
- no_sequence,
- no_sequence,
- no_sequence,
- no_sequence,
- no_sequence,
- no_sequence,
- z_address,
- z_address,
- z_address,
- dest_address,
- dest_address,
- dest_address,
- no_sequence,
- no_sequence,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- tex_address,
- no_sequence
-};
-
-typedef struct {
- unsigned int code;
- hazard_t hz;
-} hz_init_t;
-
-static hz_init_t init_table1[] = {
- {0xf2, check_for_header2_err},
- {0xf0, check_for_header1_err},
- {0xee, check_for_fire},
- {0xcc, check_for_dummy},
- {0xdd, check_for_dd},
- {0x00, no_check},
- {0x10, check_z_buffer_addr0},
- {0x11, check_z_buffer_addr1},
- {0x12, check_z_buffer_addr_mode},
- {0x13, no_check},
- {0x14, no_check},
- {0x15, no_check},
- {0x23, no_check},
- {0x24, no_check},
- {0x33, no_check},
- {0x34, no_check},
- {0x35, no_check},
- {0x36, no_check},
- {0x37, no_check},
- {0x38, no_check},
- {0x39, no_check},
- {0x3A, no_check},
- {0x3B, no_check},
- {0x3C, no_check},
- {0x3D, no_check},
- {0x3E, no_check},
- {0x40, check_destination_addr0},
- {0x41, check_destination_addr1},
- {0x42, check_destination_addr_mode},
- {0x43, no_check},
- {0x44, no_check},
- {0x50, no_check},
- {0x51, no_check},
- {0x52, no_check},
- {0x53, no_check},
- {0x54, no_check},
- {0x55, no_check},
- {0x56, no_check},
- {0x57, no_check},
- {0x58, no_check},
- {0x70, no_check},
- {0x71, no_check},
- {0x78, no_check},
- {0x79, no_check},
- {0x7A, no_check},
- {0x7B, no_check},
- {0x7C, no_check},
- {0x7D, check_for_vertex_count}
-};
-
-static hz_init_t init_table2[] = {
- {0xf2, check_for_header2_err},
- {0xf0, check_for_header1_err},
- {0xee, check_for_fire},
- {0xcc, check_for_dummy},
- {0x00, check_texture_addr0},
- {0x01, check_texture_addr0},
- {0x02, check_texture_addr0},
- {0x03, check_texture_addr0},
- {0x04, check_texture_addr0},
- {0x05, check_texture_addr0},
- {0x06, check_texture_addr0},
- {0x07, check_texture_addr0},
- {0x08, check_texture_addr0},
- {0x09, check_texture_addr0},
- {0x20, check_texture_addr1},
- {0x21, check_texture_addr1},
- {0x22, check_texture_addr1},
- {0x23, check_texture_addr4},
- {0x2B, check_texture_addr3},
- {0x2C, check_texture_addr3},
- {0x2D, check_texture_addr3},
- {0x2E, check_texture_addr3},
- {0x2F, check_texture_addr3},
- {0x30, check_texture_addr3},
- {0x31, check_texture_addr3},
- {0x32, check_texture_addr3},
- {0x33, check_texture_addr3},
- {0x34, check_texture_addr3},
- {0x4B, check_texture_addr5},
- {0x4C, check_texture_addr6},
- {0x51, check_texture_addr7},
- {0x52, check_texture_addr8},
- {0x77, check_texture_addr2},
- {0x78, no_check},
- {0x79, no_check},
- {0x7A, no_check},
- {0x7B, check_texture_addr_mode},
- {0x7C, no_check},
- {0x7D, no_check},
- {0x7E, no_check},
- {0x7F, no_check},
- {0x80, no_check},
- {0x81, no_check},
- {0x82, no_check},
- {0x83, no_check},
- {0x85, no_check},
- {0x86, no_check},
- {0x87, no_check},
- {0x88, no_check},
- {0x89, no_check},
- {0x8A, no_check},
- {0x90, no_check},
- {0x91, no_check},
- {0x92, no_check},
- {0x93, no_check}
-};
-
-static hz_init_t init_table3[] = {
- {0xf2, check_for_header2_err},
- {0xf0, check_for_header1_err},
- {0xcc, check_for_dummy},
- {0x00, check_number_texunits}
-};
-
-static hazard_t table1[256];
-static hazard_t table2[256];
-static hazard_t table3[256];
-
-static __inline__ int
-eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
-{
- if ((buf_end - *buf) >= num_words) {
- *buf += num_words;
- return 0;
- }
- DRM_ERROR("Illegal termination of DMA command buffer\n");
- return 1;
-}
-
-/*
- * Partially stolen from drm_memory.h
- */
-
-static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
- unsigned long offset,
- unsigned long size,
- struct drm_device *dev)
-{
- struct drm_map_list *r_list;
- drm_local_map_t *map = seq->map_cache;
-
- if (map && map->offset <= offset
- && (offset + size) <= (map->offset + map->size)) {
- return map;
- }
-
- list_for_each_entry(r_list, &dev->maplist, head) {
- map = r_list->map;
- if (!map)
- continue;
- if (map->offset <= offset
- && (offset + size) <= (map->offset + map->size)
- && !(map->flags & _DRM_RESTRICTED)
- && (map->type == _DRM_AGP)) {
- seq->map_cache = map;
- return map;
- }
- }
- return NULL;
-}
-
-/*
- * Require that all AGP texture levels reside in the same AGP map which should
- * be mappable by the client. This is not a big restriction.
- * FIXME: To actually enforce this security policy strictly, drm_rmmap
- * would have to wait for dma quiescent before removing an AGP map.
- * The via_drm_lookup_agp_map call in reality seems to take
- * very little CPU time.
- */
-
-static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
-{
- switch (cur_seq->unfinished) {
- case z_address:
- DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
- break;
- case dest_address:
- DRM_DEBUG("Destination start address is 0x%x\n",
- cur_seq->d_addr);
- break;
- case tex_address:
- if (cur_seq->agp_texture) {
- unsigned start =
- cur_seq->tex_level_lo[cur_seq->texture];
- unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
- unsigned long lo = ~0, hi = 0, tmp;
- uint32_t *addr, *pitch, *height, tex;
- unsigned i;
- int npot;
-
- if (end > 9)
- end = 9;
- if (start > 9)
- start = 9;
-
- addr =
- &(cur_seq->t_addr[tex = cur_seq->texture][start]);
- pitch = &(cur_seq->pitch[tex][start]);
- height = &(cur_seq->height[tex][start]);
- npot = cur_seq->tex_npot[tex];
- for (i = start; i <= end; ++i) {
- tmp = *addr++;
- if (tmp < lo)
- lo = tmp;
- if (i == 0 && npot)
- tmp += (*height++ * *pitch++);
- else
- tmp += (*height++ << *pitch++);
- if (tmp > hi)
- hi = tmp;
- }
-
- if (!via_drm_lookup_agp_map
- (cur_seq, lo, hi - lo, cur_seq->dev)) {
- DRM_ERROR
- ("AGP texture is not in allowed map\n");
- return 2;
- }
- }
- break;
- default:
- break;
- }
- cur_seq->unfinished = no_sequence;
- return 0;
-}
-
-static __inline__ int
-investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
-{
- register uint32_t tmp, *tmp_addr;
-
- if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
- int ret;
- if ((ret = finish_current_sequence(cur_seq)))
- return ret;
- }
-
- switch (hz) {
- case check_for_header2:
- if (cmd == HALCYON_HEADER2)
- return 1;
- return 0;
- case check_for_header1:
- if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- return 1;
- return 0;
- case check_for_header2_err:
- if (cmd == HALCYON_HEADER2)
- return 1;
- DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
- break;
- case check_for_header1_err:
- if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- return 1;
- DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
- break;
- case check_for_fire:
- if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
- return 1;
- DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
- break;
- case check_for_dummy:
- if (HC_DUMMY == cmd)
- return 0;
- DRM_ERROR("Illegal DMA HC_DUMMY command\n");
- break;
- case check_for_dd:
- if (0xdddddddd == cmd)
- return 0;
- DRM_ERROR("Illegal DMA 0xdddddddd command\n");
- break;
- case check_z_buffer_addr0:
- cur_seq->unfinished = z_address;
- cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
- (cmd & 0x00FFFFFF);
- return 0;
- case check_z_buffer_addr1:
- cur_seq->unfinished = z_address;
- cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
- ((cmd & 0xFF) << 24);
- return 0;
- case check_z_buffer_addr_mode:
- cur_seq->unfinished = z_address;
- if ((cmd & 0x0000C000) == 0)
- return 0;
- DRM_ERROR("Attempt to place Z buffer in system memory\n");
- return 2;
- case check_destination_addr0:
- cur_seq->unfinished = dest_address;
- cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
- (cmd & 0x00FFFFFF);
- return 0;
- case check_destination_addr1:
- cur_seq->unfinished = dest_address;
- cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
- ((cmd & 0xFF) << 24);
- return 0;
- case check_destination_addr_mode:
- cur_seq->unfinished = dest_address;
- if ((cmd & 0x0000C000) == 0)
- return 0;
- DRM_ERROR
- ("Attempt to place 3D drawing buffer in system memory\n");
- return 2;
- case check_texture_addr0:
- cur_seq->unfinished = tex_address;
- tmp = (cmd >> 24);
- tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
- *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
- return 0;
- case check_texture_addr1:
- cur_seq->unfinished = tex_address;
- tmp = ((cmd >> 24) - 0x20);
- tmp += tmp << 1;
- tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
- tmp_addr++;
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
- tmp_addr++;
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
- return 0;
- case check_texture_addr2:
- cur_seq->unfinished = tex_address;
- cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
- cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
- return 0;
- case check_texture_addr3:
- cur_seq->unfinished = tex_address;
- tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
- if (tmp == 0 &&
- (cmd & HC_HTXnEnPit_MASK)) {
- cur_seq->pitch[cur_seq->texture][tmp] =
- (cmd & HC_HTXnLnPit_MASK);
- cur_seq->tex_npot[cur_seq->texture] = 1;
- } else {
- cur_seq->pitch[cur_seq->texture][tmp] =
- (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
- cur_seq->tex_npot[cur_seq->texture] = 0;
- if (cmd & 0x000FFFFF) {
- DRM_ERROR
- ("Unimplemented texture level 0 pitch mode.\n");
- return 2;
- }
- }
- return 0;
- case check_texture_addr4:
- cur_seq->unfinished = tex_address;
- tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
- *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
- return 0;
- case check_texture_addr5:
- case check_texture_addr6:
- cur_seq->unfinished = tex_address;
- /*
- * Texture width. We don't care since we have the pitch.
- */
- return 0;
- case check_texture_addr7:
- cur_seq->unfinished = tex_address;
- tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
- tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
- tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
- tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
- tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
- tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
- tmp_addr[0] = 1 << (cmd & 0x0000000F);
- return 0;
- case check_texture_addr8:
- cur_seq->unfinished = tex_address;
- tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
- tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
- tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
- tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
- tmp_addr[6] = 1 << (cmd & 0x0000000F);
- return 0;
- case check_texture_addr_mode:
- cur_seq->unfinished = tex_address;
- if (2 == (tmp = cmd & 0x00000003)) {
- DRM_ERROR
- ("Attempt to fetch texture from system memory.\n");
- return 2;
- }
- cur_seq->agp_texture = (tmp == 3);
- cur_seq->tex_palette_size[cur_seq->texture] =
- (cmd >> 16) & 0x000000007;
- return 0;
- case check_for_vertex_count:
- cur_seq->vertex_count = cmd & 0x0000FFFF;
- return 0;
- case check_number_texunits:
- cur_seq->multitex = (cmd >> 3) & 1;
- return 0;
- default:
- DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
- return 2;
- }
- return 2;
-}
-
-static __inline__ int
-via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
- drm_via_state_t *cur_seq)
-{
- drm_via_private_t *dev_priv =
- (drm_via_private_t *) cur_seq->dev->dev_private;
- uint32_t a_fire, bcmd, dw_count;
- int ret = 0;
- int have_fire;
- const uint32_t *buf = *buffer;
-
- while (buf < buf_end) {
- have_fire = 0;
- if ((buf_end - buf) < 2) {
- DRM_ERROR
- ("Unexpected termination of primitive list.\n");
- ret = 1;
- break;
- }
- if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
- break;
- bcmd = *buf++;
- if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
- DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
- *buf);
- ret = 1;
- break;
- }
- a_fire =
- *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
- HC_HE3Fire_MASK;
-
- /*
- * How many dwords per vertex ?
- */
-
- if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
- DRM_ERROR("Illegal B command vertex data for AGP.\n");
- ret = 1;
- break;
- }
-
- dw_count = 0;
- if (bcmd & (1 << 7))
- dw_count += (cur_seq->multitex) ? 2 : 1;
- if (bcmd & (1 << 8))
- dw_count += (cur_seq->multitex) ? 2 : 1;
- if (bcmd & (1 << 9))
- dw_count++;
- if (bcmd & (1 << 10))
- dw_count++;
- if (bcmd & (1 << 11))
- dw_count++;
- if (bcmd & (1 << 12))
- dw_count++;
- if (bcmd & (1 << 13))
- dw_count++;
- if (bcmd & (1 << 14))
- dw_count++;
-
- while (buf < buf_end) {
- if (*buf == a_fire) {
- if (dev_priv->num_fire_offsets >=
- VIA_FIRE_BUF_SIZE) {
- DRM_ERROR("Fire offset buffer full.\n");
- ret = 1;
- break;
- }
- dev_priv->fire_offsets[dev_priv->
- num_fire_offsets++] =
- buf;
- have_fire = 1;
- buf++;
- if (buf < buf_end && *buf == a_fire)
- buf++;
- break;
- }
- if ((*buf == HALCYON_HEADER2) ||
- ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
- DRM_ERROR("Missing Vertex Fire command, "
- "Stray Vertex Fire command or verifier "
- "lost sync.\n");
- ret = 1;
- break;
- }
- if ((ret = eat_words(&buf, buf_end, dw_count)))
- break;
- }
- if (buf >= buf_end && !have_fire) {
- DRM_ERROR("Missing Vertex Fire command or verifier "
- "lost sync.\n");
- ret = 1;
- break;
- }
- if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
- DRM_ERROR("AGP Primitive list end misaligned.\n");
- ret = 1;
- break;
- }
- }
- *buffer = buf;
- return ret;
-}
-
-static __inline__ verifier_state_t
-via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
- drm_via_state_t *hc_state)
-{
- uint32_t cmd;
- int hz_mode;
- hazard_t hz;
- const uint32_t *buf = *buffer;
- const hazard_t *hz_table;
-
- if ((buf_end - buf) < 2) {
- DRM_ERROR
- ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
- return state_error;
- }
- buf++;
- cmd = (*buf++ & 0xFFFF0000) >> 16;
-
- switch (cmd) {
- case HC_ParaType_CmdVdata:
- if (via_check_prim_list(&buf, buf_end, hc_state))
- return state_error;
- *buffer = buf;
- return state_command;
- case HC_ParaType_NotTex:
- hz_table = table1;
- break;
- case HC_ParaType_Tex:
- hc_state->texture = 0;
- hz_table = table2;
- break;
- case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
- hc_state->texture = 1;
- hz_table = table2;
- break;
- case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
- hz_table = table3;
- break;
- case HC_ParaType_Auto:
- if (eat_words(&buf, buf_end, 2))
- return state_error;
- *buffer = buf;
- return state_command;
- case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
- if (eat_words(&buf, buf_end, 32))
- return state_error;
- *buffer = buf;
- return state_command;
- case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
- case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
- DRM_ERROR("Texture palettes are rejected because of "
- "lack of info how to determine their size.\n");
- return state_error;
- case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
- DRM_ERROR("Fog factor palettes are rejected because of "
- "lack of info how to determine their size.\n");
- return state_error;
- default:
-
- /*
- * There are some unimplemented HC_ParaTypes here, that
- * need to be implemented if the Mesa driver is extended.
- */
-
- DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
- "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
- cmd, *(buf - 2));
- *buffer = buf;
- return state_error;
- }
-
- while (buf < buf_end) {
- cmd = *buf++;
- if ((hz = hz_table[cmd >> 24])) {
- if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
- if (hz_mode == 1) {
- buf--;
- break;
- }
- return state_error;
- }
- } else if (hc_state->unfinished &&
- finish_current_sequence(hc_state)) {
- return state_error;
- }
- }
- if (hc_state->unfinished && finish_current_sequence(hc_state))
- return state_error;
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end, int *fire_count)
-{
- uint32_t cmd;
- const uint32_t *buf = *buffer;
- const uint32_t *next_fire;
- int burst = 0;
-
- next_fire = dev_priv->fire_offsets[*fire_count];
- buf++;
- cmd = (*buf & 0xFFFF0000) >> 16;
- via_write(dev_priv, HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
- switch (cmd) {
- case HC_ParaType_CmdVdata:
- while ((buf < buf_end) &&
- (*fire_count < dev_priv->num_fire_offsets) &&
- (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
- while (buf <= next_fire) {
- via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
- (burst & 63), *buf++);
- burst += 4;
- }
- if ((buf < buf_end)
- && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
- buf++;
-
- if (++(*fire_count) < dev_priv->num_fire_offsets)
- next_fire = dev_priv->fire_offsets[*fire_count];
- }
- break;
- default:
- while (buf < buf_end) {
-
- if (*buf == HC_HEADER2 ||
- (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
- (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
- (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
- break;
-
- via_write(dev_priv, HC_REG_TRANS_SPACE + HC_REG_BASE +
- (burst & 63), *buf++);
- burst += 4;
- }
- }
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ int verify_mmio_address(uint32_t address)
-{
- if ((address > 0x3FF) && (address < 0xC00)) {
- DRM_ERROR("Invalid VIDEO DMA command. "
- "Attempt to access 3D- or command burst area.\n");
- return 1;
- } else if ((address > 0xCFF) && (address < 0x1300)) {
- DRM_ERROR("Invalid VIDEO DMA command. "
- "Attempt to access PCI DMA area.\n");
- return 1;
- } else if (address > 0x13FF) {
- DRM_ERROR("Invalid VIDEO DMA command. "
- "Attempt to access VGA registers.\n");
- return 1;
- }
- return 0;
-}
-
-static __inline__ int
-verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
- uint32_t dwords)
-{
- const uint32_t *buf = *buffer;
-
- if (buf_end - buf < dwords) {
- DRM_ERROR("Illegal termination of video command.\n");
- return 1;
- }
- while (dwords--) {
- if (*buf++) {
- DRM_ERROR("Illegal video command tail.\n");
- return 1;
- }
- }
- *buffer = buf;
- return 0;
-}
-
-static __inline__ verifier_state_t
-via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
-{
- uint32_t cmd;
- const uint32_t *buf = *buffer;
- verifier_state_t ret = state_command;
-
- while (buf < buf_end) {
- cmd = *buf;
- if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
- (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
- if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
- break;
- DRM_ERROR("Invalid HALCYON_HEADER1 command. "
- "Attempt to access 3D- or command burst area.\n");
- ret = state_error;
- break;
- } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
- if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
- break;
- DRM_ERROR("Invalid HALCYON_HEADER1 command. "
- "Attempt to access VGA registers.\n");
- ret = state_error;
- break;
- } else {
- buf += 2;
- }
- }
- *buffer = buf;
- return ret;
-}
-
-static __inline__ verifier_state_t
-via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end)
-{
- register uint32_t cmd;
- const uint32_t *buf = *buffer;
-
- while (buf < buf_end) {
- cmd = *buf;
- if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
- break;
- via_write(dev_priv, (cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
- buf++;
- }
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
-{
- uint32_t data;
- const uint32_t *buf = *buffer;
-
- if (buf_end - buf < 4) {
- DRM_ERROR("Illegal termination of video header5 command\n");
- return state_error;
- }
-
- data = *buf++ & ~VIA_VIDEOMASK;
- if (verify_mmio_address(data))
- return state_error;
-
- data = *buf++;
- if (*buf++ != 0x00F50000) {
- DRM_ERROR("Illegal header5 header data\n");
- return state_error;
- }
- if (*buf++ != 0x00000000) {
- DRM_ERROR("Illegal header5 header data\n");
- return state_error;
- }
- if (eat_words(&buf, buf_end, data))
- return state_error;
- if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
- return state_error;
- *buffer = buf;
- return state_command;
-
-}
-
-static __inline__ verifier_state_t
-via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end)
-{
- uint32_t addr, count, i;
- const uint32_t *buf = *buffer;
-
- addr = *buf++ & ~VIA_VIDEOMASK;
- i = count = *buf;
- buf += 3;
- while (i--)
- via_write(dev_priv, addr, *buf++);
- if (count & 3)
- buf += 4 - (count & 3);
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
-{
- uint32_t data;
- const uint32_t *buf = *buffer;
- uint32_t i;
-
- if (buf_end - buf < 4) {
- DRM_ERROR("Illegal termination of video header6 command\n");
- return state_error;
- }
- buf++;
- data = *buf++;
- if (*buf++ != 0x00F60000) {
- DRM_ERROR("Illegal header6 header data\n");
- return state_error;
- }
- if (*buf++ != 0x00000000) {
- DRM_ERROR("Illegal header6 header data\n");
- return state_error;
- }
- if ((buf_end - buf) < (data << 1)) {
- DRM_ERROR("Illegal termination of video header6 command\n");
- return state_error;
- }
- for (i = 0; i < data; ++i) {
- if (verify_mmio_address(*buf++))
- return state_error;
- buf++;
- }
- data <<= 1;
- if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
- return state_error;
- *buffer = buf;
- return state_command;
-}
-
-static __inline__ verifier_state_t
-via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
- const uint32_t *buf_end)
-{
-
- uint32_t addr, count, i;
- const uint32_t *buf = *buffer;
-
- i = count = *++buf;
- buf += 3;
- while (i--) {
- addr = *buf++;
- via_write(dev_priv, addr, *buf++);
- }
- count <<= 1;
- if (count & 3)
- buf += 4 - (count & 3);
- *buffer = buf;
- return state_command;
-}
-
-static int
-via_verify_command_stream(const uint32_t * buf, unsigned int size,
- struct drm_device * dev, int agp)
-{
-
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_state_t *hc_state = &dev_priv->hc_state;
- drm_via_state_t saved_state = *hc_state;
- uint32_t cmd;
- const uint32_t *buf_end = buf + (size >> 2);
- verifier_state_t state = state_command;
- int cme_video;
- int supported_3d;
-
- cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
- dev_priv->chipset == VIA_DX9_0);
-
- supported_3d = dev_priv->chipset != VIA_DX9_0;
-
- hc_state->dev = dev;
- hc_state->unfinished = no_sequence;
- hc_state->map_cache = NULL;
- hc_state->agp = agp;
- hc_state->buf_start = buf;
- dev_priv->num_fire_offsets = 0;
-
- while (buf < buf_end) {
-
- switch (state) {
- case state_header2:
- state = via_check_header2(&buf, buf_end, hc_state);
- break;
- case state_header1:
- state = via_check_header1(&buf, buf_end);
- break;
- case state_vheader5:
- state = via_check_vheader5(&buf, buf_end);
- break;
- case state_vheader6:
- state = via_check_vheader6(&buf, buf_end);
- break;
- case state_command:
- cmd = *buf;
- if ((cmd == HALCYON_HEADER2) && supported_3d)
- state = state_header2;
- else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- state = state_header1;
- else if (cme_video
- && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
- state = state_vheader5;
- else if (cme_video
- && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
- state = state_vheader6;
- else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
- DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
- state = state_error;
- } else {
- DRM_ERROR
- ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
- cmd);
- state = state_error;
- }
- break;
- case state_error:
- default:
- *hc_state = saved_state;
- return -EINVAL;
- }
- }
- if (state == state_error) {
- *hc_state = saved_state;
- return -EINVAL;
- }
- return 0;
-}
-
-static int
-via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
- unsigned int size)
-{
-
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- uint32_t cmd;
- const uint32_t *buf_end = buf + (size >> 2);
- verifier_state_t state = state_command;
- int fire_count = 0;
-
- while (buf < buf_end) {
-
- switch (state) {
- case state_header2:
- state =
- via_parse_header2(dev_priv, &buf, buf_end,
- &fire_count);
- break;
- case state_header1:
- state = via_parse_header1(dev_priv, &buf, buf_end);
- break;
- case state_vheader5:
- state = via_parse_vheader5(dev_priv, &buf, buf_end);
- break;
- case state_vheader6:
- state = via_parse_vheader6(dev_priv, &buf, buf_end);
- break;
- case state_command:
- cmd = *buf;
- if (cmd == HALCYON_HEADER2)
- state = state_header2;
- else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
- state = state_header1;
- else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
- state = state_vheader5;
- else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
- state = state_vheader6;
- else {
- DRM_ERROR
- ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
- cmd);
- state = state_error;
- }
- break;
- case state_error:
- default:
- return -EINVAL;
- }
- }
- if (state == state_error)
- return -EINVAL;
- return 0;
-}
-
-static void
-setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
-{
- int i;
-
- for (i = 0; i < 256; ++i)
- table[i] = forbidden_command;
-
- for (i = 0; i < size; ++i)
- table[init_table[i].code] = init_table[i].hz;
-}
-
-static void via_init_command_verifier(void)
-{
- setup_hazard_table(init_table1, table1, ARRAY_SIZE(init_table1));
- setup_hazard_table(init_table2, table2, ARRAY_SIZE(init_table2));
- setup_hazard_table(init_table3, table3, ARRAY_SIZE(init_table3));
-}
-/*
- * Unmap a DMA mapping.
- */
-static void
-via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
-{
- int num_desc = vsg->num_desc;
- unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
- unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
- drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
- descriptor_this_page;
- dma_addr_t next = vsg->chain_start;
-
- while (num_desc--) {
- if (descriptor_this_page-- == 0) {
- cur_descriptor_page--;
- descriptor_this_page = vsg->descriptors_per_page - 1;
- desc_ptr = vsg->desc_pages[cur_descriptor_page] +
- descriptor_this_page;
- }
- dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
- dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
- next = (dma_addr_t) desc_ptr->next;
- desc_ptr--;
- }
-}
-
-/*
- * If mode = 0, count how many descriptors are needed.
- * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
- * Descriptors are run in reverse order by the hardware because we are not allowed to update the
- * 'next' field without syncing calls when the descriptor is already mapped.
- */
-static void
-via_map_blit_for_device(struct pci_dev *pdev,
- const drm_via_dmablit_t *xfer,
- drm_via_sg_info_t *vsg,
- int mode)
-{
- unsigned cur_descriptor_page = 0;
- unsigned num_descriptors_this_page = 0;
- unsigned char *mem_addr = xfer->mem_addr;
- unsigned char *cur_mem;
- unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
- uint32_t fb_addr = xfer->fb_addr;
- uint32_t cur_fb;
- unsigned long line_len;
- unsigned remaining_len;
- int num_desc = 0;
- int cur_line;
- dma_addr_t next = 0 | VIA_DMA_DPR_EC;
- drm_via_descriptor_t *desc_ptr = NULL;
-
- if (mode == 1)
- desc_ptr = vsg->desc_pages[cur_descriptor_page];
-
- for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
-
- line_len = xfer->line_length;
- cur_fb = fb_addr;
- cur_mem = mem_addr;
-
- while (line_len > 0) {
-
- remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
- line_len -= remaining_len;
-
- if (mode == 1) {
- desc_ptr->mem_addr =
- dma_map_page(&pdev->dev,
- vsg->pages[VIA_PFN(cur_mem) -
- VIA_PFN(first_addr)],
- VIA_PGOFF(cur_mem), remaining_len,
- vsg->direction);
- desc_ptr->dev_addr = cur_fb;
-
- desc_ptr->size = remaining_len;
- desc_ptr->next = (uint32_t) next;
- next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
- DMA_TO_DEVICE);
- desc_ptr++;
- if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
- num_descriptors_this_page = 0;
- desc_ptr = vsg->desc_pages[++cur_descriptor_page];
- }
- }
-
- num_desc++;
- cur_mem += remaining_len;
- cur_fb += remaining_len;
- }
-
- mem_addr += xfer->mem_stride;
- fb_addr += xfer->fb_stride;
- }
-
- if (mode == 1) {
- vsg->chain_start = next;
- vsg->state = dr_via_device_mapped;
- }
- vsg->num_desc = num_desc;
-}
-
-/*
- * Function that frees up all resources for a blit. It is usable even if the
- * blit info has only been partially built as long as the status enum is consistent
- * with the actual status of the used resources.
- */
-static void
-via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
-{
- int i;
-
- switch (vsg->state) {
- case dr_via_device_mapped:
- via_unmap_blit_from_device(pdev, vsg);
- fallthrough;
- case dr_via_desc_pages_alloc:
- for (i = 0; i < vsg->num_desc_pages; ++i) {
- if (vsg->desc_pages[i] != NULL)
- free_page((unsigned long)vsg->desc_pages[i]);
- }
- kfree(vsg->desc_pages);
- fallthrough;
- case dr_via_pages_locked:
- unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages,
- (vsg->direction == DMA_FROM_DEVICE));
- fallthrough;
- case dr_via_pages_alloc:
- vfree(vsg->pages);
- fallthrough;
- default:
- vsg->state = dr_via_sg_init;
- }
- vfree(vsg->bounce_buffer);
- vsg->bounce_buffer = NULL;
- vsg->free_on_sequence = 0;
-}
-
-/*
- * Fire a blit engine.
- */
-static void
-via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
-
- via_write(dev_priv, VIA_PCI_DMA_MAR0 + engine*0x10, 0);
- via_write(dev_priv, VIA_PCI_DMA_DAR0 + engine*0x10, 0);
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
- VIA_DMA_CSR_DE);
- via_write(dev_priv, VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
- via_write(dev_priv, VIA_PCI_DMA_BCR0 + engine*0x10, 0);
- via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
- wmb();
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
- via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04);
-}
-
-/*
- * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
- * occur here if the calling user does not have access to the submitted address.
- */
-static int
-via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
-{
- int ret;
- unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
- vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
- first_pfn + 1;
-
- vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages));
- if (NULL == vsg->pages)
- return -ENOMEM;
- ret = pin_user_pages_fast((unsigned long)xfer->mem_addr,
- vsg->num_pages,
- vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
- vsg->pages);
- if (ret != vsg->num_pages) {
- if (ret < 0)
- return ret;
- vsg->state = dr_via_pages_locked;
- return -EINVAL;
- }
- vsg->state = dr_via_pages_locked;
- DRM_DEBUG("DMA pages locked\n");
- return 0;
-}
-
-/*
- * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
- * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
- * quite large for some blits, and pages don't need to be contiguous.
- */
-static int
-via_alloc_desc_pages(drm_via_sg_info_t *vsg)
-{
- int i;
-
- vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
- vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
- vsg->descriptors_per_page;
-
- if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
- return -ENOMEM;
-
- vsg->state = dr_via_desc_pages_alloc;
- for (i = 0; i < vsg->num_desc_pages; ++i) {
- if (NULL == (vsg->desc_pages[i] =
- (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
- return -ENOMEM;
- }
- DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
- vsg->num_desc);
- return 0;
-}
-
-static void
-via_abort_dmablit(struct drm_device *dev, int engine)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
-
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
-}
-
-static void
-via_dmablit_engine_off(struct drm_device *dev, int engine)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
-
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
-}
-
-/*
- * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
- * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
- * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
- * the workqueue task takes care of processing associated with the old blit.
- */
-static void
-via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
- int cur;
- int done_transfer;
- unsigned long irqsave = 0;
- uint32_t status = 0;
-
- DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
- engine, from_irq, (unsigned long) blitq);
-
- if (from_irq)
- spin_lock(&blitq->blit_lock);
- else
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- done_transfer = blitq->is_active &&
- ((status = via_read(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
- done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
-
- cur = blitq->cur;
- if (done_transfer) {
-
- blitq->blits[cur]->aborted = blitq->aborting;
- blitq->done_blit_handle++;
- wake_up(blitq->blit_queue + cur);
-
- cur++;
- if (cur >= VIA_NUM_BLIT_SLOTS)
- cur = 0;
- blitq->cur = cur;
-
- /*
- * Clear transfer done flag.
- */
-
- via_write(dev_priv, VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
-
- blitq->is_active = 0;
- blitq->aborting = 0;
- schedule_work(&blitq->wq);
-
- } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
-
- /*
- * Abort transfer after one second.
- */
-
- via_abort_dmablit(dev, engine);
- blitq->aborting = 1;
- blitq->end = jiffies + HZ;
- }
-
- if (!blitq->is_active) {
- if (blitq->num_outstanding) {
- via_fire_dmablit(dev, blitq->blits[cur], engine);
- blitq->is_active = 1;
- blitq->cur = cur;
- blitq->num_outstanding--;
- blitq->end = jiffies + HZ;
- if (!timer_pending(&blitq->poll_timer))
- mod_timer(&blitq->poll_timer, jiffies + 1);
- } else {
- if (timer_pending(&blitq->poll_timer))
- del_timer(&blitq->poll_timer);
- via_dmablit_engine_off(dev, engine);
- }
- }
-
- if (from_irq)
- spin_unlock(&blitq->blit_lock);
- else
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-}
-
-/*
- * Check whether this blit is still active, performing necessary locking.
- */
-static int
-via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
-{
- unsigned long irqsave;
- uint32_t slot;
- int active;
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- /*
- * Allow for handle wraparounds.
- */
-
- active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
- ((blitq->cur_blit_handle - handle) <= (1 << 23));
-
- if (queue && active) {
- slot = handle - blitq->done_blit_handle + blitq->cur - 1;
- if (slot >= VIA_NUM_BLIT_SLOTS)
- slot -= VIA_NUM_BLIT_SLOTS;
- *queue = blitq->blit_queue + slot;
- }
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- return active;
-}
-
-/*
- * Sync. Wait for at least three seconds for the blit to be performed.
- */
-static int
-via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
-{
-
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
- wait_queue_head_t *queue;
- int ret = 0;
-
- if (via_dmablit_active(blitq, engine, handle, &queue)) {
- VIA_WAIT_ON(ret, *queue, 3 * HZ,
- !via_dmablit_active(blitq, engine, handle, NULL));
- }
- DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
- handle, engine, ret);
-
- return ret;
-}
-
-/*
- * A timer that regularly polls the blit engine in cases where we don't have interrupts:
- * a) Broken hardware (typically those that don't have any video capture facility).
- * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
- * The timer and hardware IRQ's can and do work in parallel. If the hardware has
- * irqs, it will shorten the latency somewhat.
- */
-static void
-via_dmablit_timer(struct timer_list *t)
-{
- drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
- struct drm_device *dev = blitq->dev;
- int engine = (int)
- (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
-
- DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
- (unsigned long) jiffies);
-
- via_dmablit_handler(dev, engine, 0);
-
- if (!timer_pending(&blitq->poll_timer)) {
- mod_timer(&blitq->poll_timer, jiffies + 1);
-
- /*
- * Rerun handler to delete timer if engines are off, and
- * to shorten abort latency. This is a little nasty.
- */
-
- via_dmablit_handler(dev, engine, 0);
-
- }
-}
-
-/*
- * Workqueue task that frees data and mappings associated with a blit.
- * Also wakes up waiting processes. Each of these tasks handles one
- * blit engine only and may not be called on each interrupt.
- */
-static void
-via_dmablit_workqueue(struct work_struct *work)
-{
- drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
- struct drm_device *dev = blitq->dev;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- unsigned long irqsave;
- drm_via_sg_info_t *cur_sg;
- int cur_released;
-
-
- DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
- (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- while (blitq->serviced != blitq->cur) {
-
- cur_released = blitq->serviced++;
-
- DRM_DEBUG("Releasing blit slot %d\n", cur_released);
-
- if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
- blitq->serviced = 0;
-
- cur_sg = blitq->blits[cur_released];
- blitq->num_free++;
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- wake_up(&blitq->busy_queue);
-
- via_free_sg_info(pdev, cur_sg);
- kfree(cur_sg);
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- }
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-}
-
-/*
- * Init all blit engines. Currently we use two, but some hardware have 4.
- */
-static void
-via_init_dmablit(struct drm_device *dev)
-{
- int i, j;
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- drm_via_blitq_t *blitq;
-
- pci_set_master(pdev);
-
- for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
- blitq = dev_priv->blit_queues + i;
- blitq->dev = dev;
- blitq->cur_blit_handle = 0;
- blitq->done_blit_handle = 0;
- blitq->head = 0;
- blitq->cur = 0;
- blitq->serviced = 0;
- blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
- blitq->num_outstanding = 0;
- blitq->is_active = 0;
- blitq->aborting = 0;
- spin_lock_init(&blitq->blit_lock);
- for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
- init_waitqueue_head(blitq->blit_queue + j);
- init_waitqueue_head(&blitq->busy_queue);
- INIT_WORK(&blitq->wq, via_dmablit_workqueue);
- timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
- }
-}
-
-/*
- * Build all info and do all mappings required for a blit.
- */
-static int
-via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- int draw = xfer->to_fb;
- int ret = 0;
-
- vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
- vsg->bounce_buffer = NULL;
-
- vsg->state = dr_via_sg_init;
-
- if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
- DRM_ERROR("Zero size bitblt.\n");
- return -EINVAL;
- }
-
- /*
- * Below check is a driver limitation, not a hardware one. We
- * don't want to lock unused pages, and don't want to incoporate the
- * extra logic of avoiding them. Make sure there are no.
- * (Not a big limitation anyway.)
- */
-
- if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
- DRM_ERROR("Too large system memory stride. Stride: %d, "
- "Length: %d\n", xfer->mem_stride, xfer->line_length);
- return -EINVAL;
- }
-
- if ((xfer->mem_stride == xfer->line_length) &&
- (xfer->fb_stride == xfer->line_length)) {
- xfer->mem_stride *= xfer->num_lines;
- xfer->line_length = xfer->mem_stride;
- xfer->fb_stride = xfer->mem_stride;
- xfer->num_lines = 1;
- }
-
- /*
- * Don't lock an arbitrary large number of pages, since that causes a
- * DOS security hole.
- */
-
- if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
- DRM_ERROR("Too large PCI DMA bitblt.\n");
- return -EINVAL;
- }
-
- /*
- * we allow a negative fb stride to allow flipping of images in
- * transfer.
- */
-
- if (xfer->mem_stride < xfer->line_length ||
- abs(xfer->fb_stride) < xfer->line_length) {
- DRM_ERROR("Invalid frame-buffer / memory stride.\n");
- return -EINVAL;
- }
-
- /*
- * A hardware bug seems to be worked around if system memory addresses start on
- * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
- * about this. Meanwhile, impose the following restrictions:
- */
-
-#ifdef VIA_BUGFREE
- if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
- ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
- DRM_ERROR("Invalid DRM bitblt alignment.\n");
- return -EINVAL;
- }
-#else
- if ((((unsigned long)xfer->mem_addr & 15) ||
- ((unsigned long)xfer->fb_addr & 3)) ||
- ((xfer->num_lines > 1) &&
- ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
- DRM_ERROR("Invalid DRM bitblt alignment.\n");
- return -EINVAL;
- }
-#endif
-
- if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
- DRM_ERROR("Could not lock DMA pages.\n");
- via_free_sg_info(pdev, vsg);
- return ret;
- }
-
- via_map_blit_for_device(pdev, xfer, vsg, 0);
- if (0 != (ret = via_alloc_desc_pages(vsg))) {
- DRM_ERROR("Could not allocate DMA descriptor pages.\n");
- via_free_sg_info(pdev, vsg);
- return ret;
- }
- via_map_blit_for_device(pdev, xfer, vsg, 1);
-
- return 0;
-}
-
-/*
- * Reserve one free slot in the blit queue. Will wait for one second for one
- * to become available. Otherwise -EBUSY is returned.
- */
-static int
-via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
-{
- int ret = 0;
- unsigned long irqsave;
-
- DRM_DEBUG("Num free is %d\n", blitq->num_free);
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- while (blitq->num_free == 0) {
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0);
- if (ret)
- return (-EINTR == ret) ? -EAGAIN : ret;
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- }
-
- blitq->num_free--;
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
- return 0;
-}
-
-/*
- * Hand back a free slot if we changed our mind.
- */
-static void
-via_dmablit_release_slot(drm_via_blitq_t *blitq)
-{
- unsigned long irqsave;
-
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
- blitq->num_free++;
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- wake_up(&blitq->busy_queue);
-}
-
-/*
- * Grab a free slot. Build blit info and queue a blit.
- */
-static int
-via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- drm_via_sg_info_t *vsg;
- drm_via_blitq_t *blitq;
- int ret;
- int engine;
- unsigned long irqsave;
-
- if (dev_priv == NULL) {
- DRM_ERROR("Called without initialization.\n");
- return -EINVAL;
- }
-
- engine = (xfer->to_fb) ? 0 : 1;
- blitq = dev_priv->blit_queues + engine;
- if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
- return ret;
- if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
- via_dmablit_release_slot(blitq);
- return -ENOMEM;
- }
- if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
- via_dmablit_release_slot(blitq);
- kfree(vsg);
- return ret;
- }
- spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
- blitq->blits[blitq->head++] = vsg;
- if (blitq->head >= VIA_NUM_BLIT_SLOTS)
- blitq->head = 0;
- blitq->num_outstanding++;
- xfer->sync.sync_handle = ++blitq->cur_blit_handle;
-
- spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- xfer->sync.engine = engine;
-
- via_dmablit_handler(dev, engine, 0);
-
- return 0;
-}
-
-/*
- * Sync on a previously submitted blit. Note that the X server use signals extensively, and
- * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
- * case it returns with -EAGAIN for the signal to be delivered.
- * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
- */
-static int
-via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_blitsync_t *sync = data;
- int err;
-
- if (sync->engine >= VIA_NUM_BLIT_ENGINES)
- return -EINVAL;
-
- err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
-
- if (-EINTR == err)
- err = -EAGAIN;
-
- return err;
-}
-
-/*
- * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
- * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
- * be reissued. See the above IOCTL code.
- */
-static int
-via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_dmablit_t *xfer = data;
- int err;
-
- err = via_dmablit(dev, xfer);
-
- return err;
-}
-
-static u32 via_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- if (pipe != 0)
- return 0;
-
- return atomic_read(&dev_priv->vbl_received);
-}
-
-static irqreturn_t via_driver_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
- int handled = 0;
- ktime_t cur_vblank;
- drm_via_irq_t *cur_irq = dev_priv->via_irqs;
- int i;
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- if (status & VIA_IRQ_VBLANK_PENDING) {
- atomic_inc(&dev_priv->vbl_received);
- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
- cur_vblank = ktime_get();
- if (dev_priv->last_vblank_valid) {
- dev_priv->nsec_per_vblank =
- ktime_sub(cur_vblank,
- dev_priv->last_vblank) >> 4;
- }
- dev_priv->last_vblank = cur_vblank;
- dev_priv->last_vblank_valid = 1;
- }
- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
- DRM_DEBUG("nsec per vblank is: %llu\n",
- ktime_to_ns(dev_priv->nsec_per_vblank));
- }
- drm_handle_vblank(dev, 0);
- handled = 1;
- }
-
- for (i = 0; i < dev_priv->num_irqs; ++i) {
- if (status & cur_irq->pending_mask) {
- atomic_inc(&cur_irq->irq_received);
- wake_up(&cur_irq->irq_queue);
- handled = 1;
- if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
- via_dmablit_handler(dev, 0, 1);
- else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
- via_dmablit_handler(dev, 1, 1);
- }
- cur_irq++;
- }
-
- /* Acknowledge interrupts */
- via_write(dev_priv, VIA_REG_INTERRUPT, status);
-
-
- if (handled)
- return IRQ_HANDLED;
- else
- return IRQ_NONE;
-}
-
-static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
-{
- u32 status;
-
- if (dev_priv) {
- /* Acknowledge interrupts */
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status |
- dev_priv->irq_pending_mask);
- }
-}
-
-static int via_enable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- u32 status;
-
- if (pipe != 0) {
- DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
- return -EINVAL;
- }
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_VBLANK_ENABLE);
-
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
-
- return 0;
-}
-
-static void via_disable_vblank(struct drm_device *dev, unsigned int pipe)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- u32 status;
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status & ~VIA_IRQ_VBLANK_ENABLE);
-
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
-
- if (pipe != 0)
- DRM_ERROR("%s: bad crtc %u\n", __func__, pipe);
-}
-
-static int
-via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
- unsigned int *sequence)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- unsigned int cur_irq_sequence;
- drm_via_irq_t *cur_irq;
- int ret = 0;
- maskarray_t *masks;
- int real_irq;
-
- DRM_DEBUG("\n");
-
- if (!dev_priv) {
- DRM_ERROR("called with no initialization\n");
- return -EINVAL;
- }
-
- if (irq >= drm_via_irq_num) {
- DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
- return -EINVAL;
- }
-
- real_irq = dev_priv->irq_map[irq];
-
- if (real_irq < 0) {
- DRM_ERROR("Video IRQ %d not available on this hardware.\n",
- irq);
- return -EINVAL;
- }
-
- masks = dev_priv->irq_masks;
- cur_irq = dev_priv->via_irqs + real_irq;
-
- if (masks[real_irq][2] && !force_sequence) {
- VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
- ((via_read(dev_priv, masks[irq][2]) & masks[irq][3]) ==
- masks[irq][4]));
- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
- } else {
- VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ,
- (((cur_irq_sequence =
- atomic_read(&cur_irq->irq_received)) -
- *sequence) <= (1 << 23)));
- }
- *sequence = cur_irq_sequence;
- return ret;
-}
-
-
-/*
- * drm_dma.h hooks
- */
-
-static void via_driver_irq_preinstall(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
- drm_via_irq_t *cur_irq;
- int i;
-
- DRM_DEBUG("dev_priv: %p\n", dev_priv);
- if (dev_priv) {
- cur_irq = dev_priv->via_irqs;
-
- dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
- dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
-
- if (dev_priv->chipset == VIA_PRO_GROUP_A ||
- dev_priv->chipset == VIA_DX9_0) {
- dev_priv->irq_masks = via_pro_group_a_irqs;
- dev_priv->num_irqs = via_num_pro_group_a;
- dev_priv->irq_map = via_irqmap_pro_group_a;
- } else {
- dev_priv->irq_masks = via_unichrome_irqs;
- dev_priv->num_irqs = via_num_unichrome;
- dev_priv->irq_map = via_irqmap_unichrome;
- }
-
- for (i = 0; i < dev_priv->num_irqs; ++i) {
- atomic_set(&cur_irq->irq_received, 0);
- cur_irq->enable_mask = dev_priv->irq_masks[i][0];
- cur_irq->pending_mask = dev_priv->irq_masks[i][1];
- init_waitqueue_head(&cur_irq->irq_queue);
- dev_priv->irq_enable_mask |= cur_irq->enable_mask;
- dev_priv->irq_pending_mask |= cur_irq->pending_mask;
- cur_irq++;
-
- DRM_DEBUG("Initializing IRQ %d\n", i);
- }
-
- dev_priv->last_vblank_valid = 0;
-
- /* Clear VSync interrupt regs */
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status &
- ~(dev_priv->irq_enable_mask));
-
- /* Clear bits if they're already high */
- viadrv_acknowledge_irqs(dev_priv);
- }
-}
-
-static int via_driver_irq_postinstall(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
-
- DRM_DEBUG("fun: %s\n", __func__);
- if (!dev_priv)
- return -EINVAL;
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
- | dev_priv->irq_enable_mask);
-
- /* Some magic, oh for some data sheets ! */
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0x30);
-
- return 0;
-}
-
-static void via_driver_irq_uninstall(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- u32 status;
-
- DRM_DEBUG("\n");
- if (dev_priv) {
-
- /* Some more magic, oh for some data sheets ! */
-
- via_write8(dev_priv, 0x83d4, 0x11);
- via_write8_mask(dev_priv, 0x83d5, 0x30, 0);
-
- status = via_read(dev_priv, VIA_REG_INTERRUPT);
- via_write(dev_priv, VIA_REG_INTERRUPT, status &
- ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
- }
-}
-
-static int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_irqwait_t *irqwait = data;
- struct timespec64 now;
- int ret = 0;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_irq_t *cur_irq = dev_priv->via_irqs;
- int force_sequence;
-
- if (irqwait->request.irq >= dev_priv->num_irqs) {
- DRM_ERROR("Trying to wait on unknown irq %d\n",
- irqwait->request.irq);
- return -EINVAL;
- }
-
- cur_irq += irqwait->request.irq;
-
- switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
- case VIA_IRQ_RELATIVE:
- irqwait->request.sequence +=
- atomic_read(&cur_irq->irq_received);
- irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
- break;
- case VIA_IRQ_ABSOLUTE:
- break;
- default:
- return -EINVAL;
- }
-
- if (irqwait->request.type & VIA_IRQ_SIGNAL) {
- DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
- return -EINVAL;
- }
-
- force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
-
- ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
- &irqwait->request.sequence);
- ktime_get_ts64(&now);
- irqwait->reply.tval_sec = now.tv_sec;
- irqwait->reply.tval_usec = now.tv_nsec / NSEC_PER_USEC;
-
- return ret;
-}
-
-static void via_init_futex(drm_via_private_t *dev_priv)
-{
- unsigned int i;
-
- DRM_DEBUG("\n");
-
- for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
- init_waitqueue_head(&(dev_priv->decoder_queue[i]));
- XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
- }
-}
-
-static void via_cleanup_futex(drm_via_private_t *dev_priv)
-{
-}
-
-static void via_release_futex(drm_via_private_t *dev_priv, int context)
-{
- unsigned int i;
- volatile int *lock;
-
- if (!dev_priv->sarea_priv)
- return;
-
- for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
- lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
- if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
- if (_DRM_LOCK_IS_HELD(*lock)
- && (*lock & _DRM_LOCK_CONT)) {
- wake_up(&(dev_priv->decoder_queue[i]));
- }
- *lock = 0;
- }
- }
-}
-
-static int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_futex_t *fx = data;
- volatile int *lock;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
- int ret = 0;
-
- DRM_DEBUG("\n");
-
- if (fx->lock >= VIA_NR_XVMC_LOCKS)
- return -EFAULT;
-
- lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
-
- switch (fx->func) {
- case VIA_FUTEX_WAIT:
- VIA_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
- (fx->ms / 10) * (HZ / 100), *lock != fx->val);
- return ret;
- case VIA_FUTEX_WAKE:
- wake_up(&(dev_priv->decoder_queue[fx->lock]));
- return 0;
- }
- return 0;
-}
-
-static int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_agp_t *agp = data;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
-
- dev_priv->agp_initialized = 1;
- dev_priv->agp_offset = agp->offset;
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
- return 0;
-}
-
-static int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_fb_t *fb = data;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- mutex_lock(&dev->struct_mutex);
- drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
-
- dev_priv->vram_initialized = 1;
- dev_priv->vram_offset = fb->offset;
-
- mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
-
- return 0;
-
-}
-
-static int via_final_context(struct drm_device *dev, int context)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- via_release_futex(dev_priv, context);
-
- /* Linux specific until context tracking code gets ported to BSD */
- /* Last context, perform cleanup */
- if (list_is_singular(&dev->ctxlist)) {
- DRM_DEBUG("Last Context\n");
- drm_legacy_irq_uninstall(dev);
- via_cleanup_futex(dev_priv);
- via_do_cleanup_map(dev);
- }
- return 1;
-}
-
-static void via_lastclose(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-
- if (!dev_priv)
- return;
-
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->vram_initialized) {
- drm_mm_takedown(&dev_priv->vram_mm);
- dev_priv->vram_initialized = 0;
- }
- if (dev_priv->agp_initialized) {
- drm_mm_takedown(&dev_priv->agp_mm);
- dev_priv->agp_initialized = 0;
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-static int via_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- drm_via_mem_t *mem = data;
- int retval = 0, user_key;
- struct via_memblock *item;
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- struct via_file_private *file_priv = file->driver_priv;
- unsigned long tmpSize;
-
- if (mem->type > VIA_MEM_AGP) {
- DRM_ERROR("Unknown memory type allocation\n");
- return -EINVAL;
- }
- mutex_lock(&dev->struct_mutex);
- if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
- dev_priv->agp_initialized)) {
- mutex_unlock(&dev->struct_mutex);
- DRM_ERROR
- ("Attempt to allocate from uninitialized memory manager.\n");
- return -EINVAL;
- }
-
- item = kzalloc(sizeof(*item), GFP_KERNEL);
- if (!item) {
- retval = -ENOMEM;
- goto fail_alloc;
- }
-
- tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
- if (mem->type == VIA_MEM_AGP)
- retval = drm_mm_insert_node(&dev_priv->agp_mm,
- &item->mm_node,
- tmpSize);
- else
- retval = drm_mm_insert_node(&dev_priv->vram_mm,
- &item->mm_node,
- tmpSize);
- if (retval)
- goto fail_alloc;
-
- retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL);
- if (retval < 0)
- goto fail_idr;
- user_key = retval;
-
- list_add(&item->owner_list, &file_priv->obj_list);
- mutex_unlock(&dev->struct_mutex);
-
- mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
- dev_priv->vram_offset : dev_priv->agp_offset) +
- ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
- mem->index = user_key;
-
- return 0;
-
-fail_idr:
- drm_mm_remove_node(&item->mm_node);
-fail_alloc:
- kfree(item);
- mutex_unlock(&dev->struct_mutex);
-
- mem->offset = 0;
- mem->size = 0;
- mem->index = 0;
- DRM_DEBUG("Video memory allocation failed\n");
-
- return retval;
-}
-
-static int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- drm_via_mem_t *mem = data;
- struct via_memblock *obj;
-
- mutex_lock(&dev->struct_mutex);
- obj = idr_find(&dev_priv->object_idr, mem->index);
- if (obj == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
- idr_remove(&dev_priv->object_idr, mem->index);
- list_del(&obj->owner_list);
- drm_mm_remove_node(&obj->mm_node);
- kfree(obj);
- mutex_unlock(&dev->struct_mutex);
-
- DRM_DEBUG("free = 0x%lx\n", mem->index);
-
- return 0;
-}
-
-
-static void via_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file)
-{
- struct via_file_private *file_priv = file->driver_priv;
- struct via_memblock *entry, *next;
-
- if (!(dev->master && file->master->lock.hw_lock))
- return;
-
- drm_legacy_idlelock_take(&file->master->lock);
-
- mutex_lock(&dev->struct_mutex);
- if (list_empty(&file_priv->obj_list)) {
- mutex_unlock(&dev->struct_mutex);
- drm_legacy_idlelock_release(&file->master->lock);
-
- return;
- }
-
- via_driver_dma_quiescent(dev);
-
- list_for_each_entry_safe(entry, next, &file_priv->obj_list,
- owner_list) {
- list_del(&entry->owner_list);
- drm_mm_remove_node(&entry->mm_node);
- kfree(entry);
- }
- mutex_unlock(&dev->struct_mutex);
-
- drm_legacy_idlelock_release(&file->master->lock);
-
- return;
-}
-
-static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- DRM_DEBUG("\n");
-
- dev_priv->sarea = drm_legacy_getsarea(dev);
- if (!dev_priv->sarea) {
- DRM_ERROR("could not find sarea!\n");
- dev->dev_private = (void *)dev_priv;
- via_do_cleanup_map(dev);
- return -EINVAL;
- }
-
- dev_priv->fb = drm_legacy_findmap(dev, init->fb_offset);
- if (!dev_priv->fb) {
- DRM_ERROR("could not find framebuffer!\n");
- dev->dev_private = (void *)dev_priv;
- via_do_cleanup_map(dev);
- return -EINVAL;
- }
- dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset);
- if (!dev_priv->mmio) {
- DRM_ERROR("could not find mmio region!\n");
- dev->dev_private = (void *)dev_priv;
- via_do_cleanup_map(dev);
- return -EINVAL;
- }
-
- dev_priv->sarea_priv =
- (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
- init->sarea_priv_offset);
-
- dev_priv->agpAddr = init->agpAddr;
-
- via_init_futex(dev_priv);
-
- via_init_dmablit(dev);
-
- dev->dev_private = (void *)dev_priv;
- return 0;
-}
-
-int via_do_cleanup_map(struct drm_device *dev)
-{
- via_dma_cleanup(dev);
-
- return 0;
-}
-
-static int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_init_t *init = data;
-
- DRM_DEBUG("\n");
-
- switch (init->func) {
- case VIA_INIT_MAP:
- return via_do_init_map(dev, init);
- case VIA_CLEANUP_MAP:
- return via_do_cleanup_map(dev);
- }
-
- return -EINVAL;
-}
-
-static int via_driver_load(struct drm_device *dev, unsigned long chipset)
-{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
- drm_via_private_t *dev_priv;
- int ret = 0;
-
- dev_priv = kzalloc(sizeof(drm_via_private_t), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- idr_init_base(&dev_priv->object_idr, 1);
- dev->dev_private = (void *)dev_priv;
-
- dev_priv->chipset = chipset;
-
- pci_set_master(pdev);
-
- ret = drm_vblank_init(dev, 1);
- if (ret) {
- kfree(dev_priv);
- return ret;
- }
-
- return 0;
-}
-
-static void via_driver_unload(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- idr_destroy(&dev_priv->object_idr);
-
- kfree(dev_priv);
-}
-
-static void via_cmdbuf_start(drm_via_private_t *dev_priv);
-static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
-static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
-static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
-static int via_wait_idle(drm_via_private_t *dev_priv);
-static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
-
-/*
- * Free space in command buffer.
- */
-
-static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
-{
- uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
-
- return ((hw_addr <= dev_priv->dma_low) ?
- (dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
- (hw_addr - dev_priv->dma_low));
-}
-
-/*
- * How much does the command regulator lag behind?
- */
-
-static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
-{
- uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
-
- return ((hw_addr <= dev_priv->dma_low) ?
- (dev_priv->dma_low - hw_addr) :
- (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
-}
-
-/*
- * Check that the given size fits in the buffer, otherwise wait.
- */
-
-static inline int
-via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
-{
- uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- uint32_t cur_addr, hw_addr, next_addr;
- volatile uint32_t *hw_addr_ptr;
- uint32_t count;
- hw_addr_ptr = dev_priv->hw_addr_ptr;
- cur_addr = dev_priv->dma_low;
- next_addr = cur_addr + size + 512 * 1024;
- count = 1000000;
- do {
- hw_addr = *hw_addr_ptr - agp_base;
- if (count-- == 0) {
- DRM_ERROR
- ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
- hw_addr, cur_addr, next_addr);
- return -1;
- }
- if ((cur_addr < hw_addr) && (next_addr >= hw_addr))
- msleep(1);
- } while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
- return 0;
-}
-
-/*
- * Checks whether buffer head has reach the end. Rewind the ring buffer
- * when necessary.
- *
- * Returns virtual pointer to ring buffer.
- */
-
-static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
- unsigned int size)
-{
- if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
- dev_priv->dma_high) {
- via_cmdbuf_rewind(dev_priv);
- }
- if (via_cmdbuf_wait(dev_priv, size) != 0)
- return NULL;
-
- return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
-}
-
-int via_dma_cleanup(struct drm_device *dev)
-{
- if (dev->dev_private) {
- drm_via_private_t *dev_priv =
- (drm_via_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start && dev_priv->mmio) {
- via_cmdbuf_reset(dev_priv);
-
- drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
- dev_priv->ring.virtual_start = NULL;
- }
-
- }
-
- return 0;
-}
-
-static int via_initialize(struct drm_device *dev,
- drm_via_private_t *dev_priv,
- drm_via_dma_init_t *init)
-{
- if (!dev_priv || !dev_priv->mmio) {
- DRM_ERROR("via_dma_init called before via_map_init\n");
- return -EFAULT;
- }
-
- if (dev_priv->ring.virtual_start != NULL) {
- DRM_ERROR("called again without calling cleanup\n");
- return -EFAULT;
- }
-
- if (!dev->agp || !dev->agp->base) {
- DRM_ERROR("called with no agp memory available\n");
- return -EFAULT;
- }
-
- if (dev_priv->chipset == VIA_DX9_0) {
- DRM_ERROR("AGP DMA is not supported on this chip\n");
- return -EINVAL;
- }
-
- dev_priv->ring.map.offset = dev->agp->base + init->offset;
- dev_priv->ring.map.size = init->size;
- dev_priv->ring.map.type = 0;
- dev_priv->ring.map.flags = 0;
- dev_priv->ring.map.mtrr = 0;
-
- drm_legacy_ioremap(&dev_priv->ring.map, dev);
-
- if (dev_priv->ring.map.handle == NULL) {
- via_dma_cleanup(dev);
- DRM_ERROR("can not ioremap virtual address for"
- " ring buffer\n");
- return -ENOMEM;
- }
-
- dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
-
- dev_priv->dma_ptr = dev_priv->ring.virtual_start;
- dev_priv->dma_low = 0;
- dev_priv->dma_high = init->size;
- dev_priv->dma_wrap = init->size;
- dev_priv->dma_offset = init->offset;
- dev_priv->last_pause_ptr = NULL;
- dev_priv->hw_addr_ptr =
- (volatile uint32_t *)((char *)dev_priv->mmio->handle +
- init->reg_pause_addr);
-
- via_cmdbuf_start(dev_priv);
-
- return 0;
-}
-
-static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_dma_init_t *init = data;
- int retcode = 0;
-
- switch (init->func) {
- case VIA_INIT_DMA:
- if (!capable(CAP_SYS_ADMIN))
- retcode = -EPERM;
- else
- retcode = via_initialize(dev, dev_priv, init);
- break;
- case VIA_CLEANUP_DMA:
- if (!capable(CAP_SYS_ADMIN))
- retcode = -EPERM;
- else
- retcode = via_dma_cleanup(dev);
- break;
- case VIA_DMA_INITIALIZED:
- retcode = (dev_priv->ring.virtual_start != NULL) ?
- 0 : -EFAULT;
- break;
- default:
- retcode = -EINVAL;
- break;
- }
-
- return retcode;
-}
-
-static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
-{
- drm_via_private_t *dev_priv;
- uint32_t *vb;
- int ret;
-
- dev_priv = (drm_via_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start == NULL) {
- DRM_ERROR("called without initializing AGP ring buffer.\n");
- return -EFAULT;
- }
-
- if (cmd->size > VIA_PCI_BUF_SIZE)
- return -ENOMEM;
-
- if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
- return -EFAULT;
-
- /*
- * Running this function on AGP memory is dead slow. Therefore
- * we run it on a temporary cacheable system memory buffer and
- * copy it to AGP memory when ready.
- */
-
- if ((ret =
- via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
- cmd->size, dev, 1))) {
- return ret;
- }
-
- vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
- if (vb == NULL)
- return -EAGAIN;
-
- memcpy(vb, dev_priv->pci_buf, cmd->size);
-
- dev_priv->dma_low += cmd->size;
-
- /*
- * Small submissions somehow stalls the CPU. (AGP cache effects?)
- * pad to greater size.
- */
-
- if (cmd->size < 0x100)
- via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
- via_cmdbuf_pause(dev_priv);
-
- return 0;
-}
-
-int via_driver_dma_quiescent(struct drm_device *dev)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
-
- if (!via_wait_idle(dev_priv))
- return -EBUSY;
- return 0;
-}
-
-static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- return via_driver_dma_quiescent(dev);
-}
-
-static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_cmdbuffer_t *cmdbuf = data;
- int ret;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
-
- ret = via_dispatch_cmdbuffer(dev, cmdbuf);
- return ret;
-}
-
-static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
- drm_via_cmdbuffer_t *cmd)
-{
- drm_via_private_t *dev_priv = dev->dev_private;
- int ret;
-
- if (cmd->size > VIA_PCI_BUF_SIZE)
- return -ENOMEM;
- if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size))
- return -EFAULT;
-
- if ((ret =
- via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
- cmd->size, dev, 0))) {
- return ret;
- }
-
- ret =
- via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
- cmd->size);
- return ret;
-}
-
-static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_cmdbuffer_t *cmdbuf = data;
- int ret;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
-
- ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
- return ret;
-}
-
-static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
- uint32_t * vb, int qw_count)
-{
- for (; qw_count > 0; --qw_count)
- VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
- return vb;
-}
-
-/*
- * This function is used internally by ring buffer management code.
- *
- * Returns virtual pointer to ring buffer.
- */
-static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
-{
- return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
-}
-
-/*
- * Hooks a segment of data into the tail of the ring-buffer by
- * modifying the pause address stored in the buffer itself. If
- * the regulator has already paused, restart it.
- */
-static int via_hook_segment(drm_via_private_t *dev_priv,
- uint32_t pause_addr_hi, uint32_t pause_addr_lo,
- int no_pci_fire)
-{
- int paused, count;
- volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
- uint32_t reader, ptr;
- uint32_t diff;
-
- paused = 0;
- via_flush_write_combine();
- (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
-
- *paused_at = pause_addr_lo;
- via_flush_write_combine();
- (void) *paused_at;
-
- reader = *(dev_priv->hw_addr_ptr);
- ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
- dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
-
- dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
-
- /*
- * If there is a possibility that the command reader will
- * miss the new pause address and pause on the old one,
- * In that case we need to program the new start address
- * using PCI.
- */
-
- diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
- count = 10000000;
- while (diff == 0 && count--) {
- paused = (via_read(dev_priv, 0x41c) & 0x80000000);
- if (paused)
- break;
- reader = *(dev_priv->hw_addr_ptr);
- diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
- }
-
- paused = via_read(dev_priv, 0x41c) & 0x80000000;
-
- if (paused && !no_pci_fire) {
- reader = *(dev_priv->hw_addr_ptr);
- diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
- diff &= (dev_priv->dma_high - 1);
- if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
- DRM_ERROR("Paused at incorrect address. "
- "0x%08x, 0x%08x 0x%08x\n",
- ptr, reader, dev_priv->dma_diff);
- } else if (diff == 0) {
- /*
- * There is a concern that these writes may stall the PCI bus
- * if the GPU is not idle. However, idling the GPU first
- * doesn't make a difference.
- */
-
- via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
- via_read(dev_priv, VIA_REG_TRANSPACE);
- }
- }
- return paused;
-}
-
-static int via_wait_idle(drm_via_private_t *dev_priv)
-{
- int count = 10000000;
-
- while (!(via_read(dev_priv, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count)
- ;
-
- while (count && (via_read(dev_priv, VIA_REG_STATUS) &
- (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
- VIA_3D_ENG_BUSY)))
- --count;
- return count;
-}
-
-static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
- uint32_t addr, uint32_t *cmd_addr_hi,
- uint32_t *cmd_addr_lo, int skip_wait)
-{
- uint32_t agp_base;
- uint32_t cmd_addr, addr_lo, addr_hi;
- uint32_t *vb;
- uint32_t qw_pad_count;
-
- if (!skip_wait)
- via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
-
- vb = via_get_dma(dev_priv);
- VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
- (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
- agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
- ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
-
- cmd_addr = (addr) ? addr :
- agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
- addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
- (cmd_addr & HC_HAGPBpL_MASK));
- addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
-
- vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
- VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
- return vb;
-}
-
-static void via_cmdbuf_start(drm_via_private_t *dev_priv)
-{
- uint32_t pause_addr_lo, pause_addr_hi;
- uint32_t start_addr, start_addr_lo;
- uint32_t end_addr, end_addr_lo;
- uint32_t command;
- uint32_t agp_base;
- uint32_t ptr;
- uint32_t reader;
- int count;
-
- dev_priv->dma_low = 0;
-
- agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
- start_addr = agp_base;
- end_addr = agp_base + dev_priv->dma_high;
-
- start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
- end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
- command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
- ((end_addr & 0xff000000) >> 16));
-
- dev_priv->last_pause_ptr =
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
- &pause_addr_hi, &pause_addr_lo, 1) - 1;
-
- via_flush_write_combine();
- (void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
-
- via_write(dev_priv, VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
- via_write(dev_priv, VIA_REG_TRANSPACE, command);
- via_write(dev_priv, VIA_REG_TRANSPACE, start_addr_lo);
- via_write(dev_priv, VIA_REG_TRANSPACE, end_addr_lo);
-
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_hi);
- via_write(dev_priv, VIA_REG_TRANSPACE, pause_addr_lo);
- wmb();
- via_write(dev_priv, VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
- via_read(dev_priv, VIA_REG_TRANSPACE);
-
- dev_priv->dma_diff = 0;
-
- count = 10000000;
- while (!(via_read(dev_priv, 0x41c) & 0x80000000) && count--);
-
- reader = *(dev_priv->hw_addr_ptr);
- ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
- dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
-
- /*
- * This is the difference between where we tell the
- * command reader to pause and where it actually pauses.
- * This differs between hw implementation so we need to
- * detect it.
- */
-
- dev_priv->dma_diff = ptr - reader;
-}
-
-static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
-{
- uint32_t *vb;
-
- via_cmdbuf_wait(dev_priv, qwords + 2);
- vb = via_get_dma(dev_priv);
- VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
- via_align_buffer(dev_priv, vb, qwords);
-}
-
-static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
-{
- uint32_t *vb = via_get_dma(dev_priv);
- SetReg2DAGP(0x0C, (0 | (0 << 16)));
- SetReg2DAGP(0x10, 0 | (0 << 16));
- SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
-}
-
-static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
-{
- uint32_t pause_addr_lo, pause_addr_hi;
- uint32_t jump_addr_lo, jump_addr_hi;
- volatile uint32_t *last_pause_ptr;
- uint32_t dma_low_save1, dma_low_save2;
-
- via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
- &jump_addr_lo, 0);
-
- dev_priv->dma_wrap = dev_priv->dma_low;
-
- /*
- * Wrap command buffer to the beginning.
- */
-
- dev_priv->dma_low = 0;
- if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
- DRM_ERROR("via_cmdbuf_jump failed\n");
-
- via_dummy_bitblt(dev_priv);
- via_dummy_bitblt(dev_priv);
-
- last_pause_ptr =
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0) - 1;
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0);
-
- *last_pause_ptr = pause_addr_lo;
- dma_low_save1 = dev_priv->dma_low;
-
- /*
- * Now, set a trap that will pause the regulator if it tries to rerun the old
- * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
- * and reissues the jump command over PCI, while the regulator has already taken the jump
- * and actually paused at the current buffer end).
- * There appears to be no other way to detect this condition, since the hw_addr_pointer
- * does not seem to get updated immediately when a jump occurs.
- */
-
- last_pause_ptr =
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0) - 1;
- via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
- &pause_addr_lo, 0);
- *last_pause_ptr = pause_addr_lo;
-
- dma_low_save2 = dev_priv->dma_low;
- dev_priv->dma_low = dma_low_save1;
- via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
- dev_priv->dma_low = dma_low_save2;
- via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
-}
-
-
-static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
-{
- via_cmdbuf_jump(dev_priv);
-}
-
-static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
-{
- uint32_t pause_addr_lo, pause_addr_hi;
-
- via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
- via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
-}
-
-static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
-{
- via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
-}
-
-static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
-{
- via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
- via_wait_idle(dev_priv);
-}
-
-/*
- * User interface to the space and lag functions.
- */
-
-static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- drm_via_cmdbuf_size_t *d_siz = data;
- int ret = 0;
- uint32_t tmp_size, count;
- drm_via_private_t *dev_priv;
-
- DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
- dev_priv = (drm_via_private_t *) dev->dev_private;
-
- if (dev_priv->ring.virtual_start == NULL) {
- DRM_ERROR("called without initializing AGP ring buffer.\n");
- return -EFAULT;
- }
-
- count = 1000000;
- tmp_size = d_siz->size;
- switch (d_siz->func) {
- case VIA_CMDBUF_SPACE:
- while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
- && --count) {
- if (!d_siz->wait)
- break;
- }
- if (!count) {
- DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
- ret = -EAGAIN;
- }
- break;
- case VIA_CMDBUF_LAG:
- while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
- && --count) {
- if (!d_siz->wait)
- break;
- }
- if (!count) {
- DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
- ret = -EAGAIN;
- }
- break;
- default:
- ret = -EFAULT;
- }
- d_siz->size = tmp_size;
-
- return ret;
-}
-
-static const struct drm_ioctl_desc via_ioctls[] = {
- DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
- DRM_IOCTL_DEF_DRV(VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_DMA_INIT, via_dma_init, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
-};
-
-static int via_max_ioctl = ARRAY_SIZE(via_ioctls);
-static int via_driver_open(struct drm_device *dev, struct drm_file *file)
-{
- struct via_file_private *file_priv;
-
- DRM_DEBUG_DRIVER("\n");
- file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
- if (!file_priv)
- return -ENOMEM;
-
- file->driver_priv = file_priv;
-
- INIT_LIST_HEAD(&file_priv->obj_list);
-
- return 0;
-}
-
-static void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
-{
- struct via_file_private *file_priv = file->driver_priv;
-
- kfree(file_priv);
-}
-
-static struct pci_device_id pciidlist[] = {
- viadrv_PCI_IDS
-};
-
-static const struct file_operations via_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_legacy_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
-
-static struct drm_driver driver = {
- .driver_features =
- DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_LEGACY,
- .load = via_driver_load,
- .unload = via_driver_unload,
- .open = via_driver_open,
- .preclose = via_reclaim_buffers_locked,
- .postclose = via_driver_postclose,
- .context_dtor = via_final_context,
- .get_vblank_counter = via_get_vblank_counter,
- .enable_vblank = via_enable_vblank,
- .disable_vblank = via_disable_vblank,
- .irq_preinstall = via_driver_irq_preinstall,
- .irq_postinstall = via_driver_irq_postinstall,
- .irq_uninstall = via_driver_irq_uninstall,
- .irq_handler = via_driver_irq_handler,
- .dma_quiescent = via_driver_dma_quiescent,
- .lastclose = via_lastclose,
- .ioctls = via_ioctls,
- .fops = &via_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
-};
-
-static struct pci_driver via_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
-};
-
-static int __init via_init(void)
-{
- driver.num_ioctls = via_max_ioctl;
- via_init_command_verifier();
- return drm_legacy_pci_init(&driver, &via_pci_driver);
-}
-
-static void __exit via_exit(void)
-{
- drm_legacy_pci_exit(&driver, &via_pci_driver);
-}
-
-module_init(via_init);
-module_exit(via_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index b7a64c7dcc2c..af6ffb696086 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -165,6 +165,8 @@ struct virtio_gpu_vbuffer {
struct virtio_gpu_object_array *objs;
struct list_head list;
+
+ uint32_t seqno;
};
struct virtio_gpu_output {
@@ -194,6 +196,7 @@ struct virtio_gpu_queue {
spinlock_t qlock;
wait_queue_head_t ack_queue;
struct work_struct dequeue_work;
+ uint32_t seqno;
};
struct virtio_gpu_drv_capset {
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 9f4a90493aea..da45215a933d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -126,7 +126,6 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
void __user *user_bo_handles = NULL;
struct virtio_gpu_object_array *buflist = NULL;
struct sync_file *sync_file;
- int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
void *buf;
uint64_t fence_ctx;
@@ -152,13 +151,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
ring_idx = exbuf->ring_idx;
}
- exbuf->fence_fd = -1;
-
virtio_gpu_create_context(dev, file);
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
struct dma_fence *in_fence;
- in_fence = sync_file_get_fence(in_fence_fd);
+ in_fence = sync_file_get_fence(exbuf->fence_fd);
if (!in_fence)
return -EINVAL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_trace.h b/drivers/gpu/drm/virtio/virtgpu_trace.h
index 711ecc2bd241..031bc77689d5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_trace.h
+++ b/drivers/gpu/drm/virtio/virtgpu_trace.h
@@ -9,40 +9,44 @@
#define TRACE_INCLUDE_FILE virtgpu_trace
DECLARE_EVENT_CLASS(virtio_gpu_cmd,
- TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
- TP_ARGS(vq, hdr),
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+ TP_ARGS(vq, hdr, seqno),
TP_STRUCT__entry(
__field(int, dev)
__field(unsigned int, vq)
- __field(const char *, name)
+ __string(name, vq->name)
__field(u32, type)
__field(u32, flags)
__field(u64, fence_id)
__field(u32, ctx_id)
+ __field(u32, num_free)
+ __field(u32, seqno)
),
TP_fast_assign(
__entry->dev = vq->vdev->index;
__entry->vq = vq->index;
- __entry->name = vq->name;
+ __assign_str(name, vq->name);
__entry->type = le32_to_cpu(hdr->type);
__entry->flags = le32_to_cpu(hdr->flags);
__entry->fence_id = le64_to_cpu(hdr->fence_id);
__entry->ctx_id = le32_to_cpu(hdr->ctx_id);
+ __entry->num_free = vq->num_free;
+ __entry->seqno = seqno;
),
- TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u",
- __entry->dev, __entry->vq, __entry->name,
+ TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u num_free=%u seqno=%u",
+ __entry->dev, __entry->vq, __get_str(name),
__entry->type, __entry->flags, __entry->fence_id,
- __entry->ctx_id)
+ __entry->ctx_id, __entry->num_free, __entry->seqno)
);
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_queue,
- TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
- TP_ARGS(vq, hdr)
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+ TP_ARGS(vq, hdr, seqno)
);
DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_response,
- TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr),
- TP_ARGS(vq, hdr)
+ TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr, u32 seqno),
+ TP_ARGS(vq, hdr, seqno)
);
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 9ff8660b50ad..a04a9b20896d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -215,7 +215,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
list_for_each_entry(entry, &reclaim_list, list) {
resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
- trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
+ trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp, entry->seqno);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
@@ -261,6 +261,10 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
spin_unlock(&vgdev->cursorq.qlock);
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
+ struct virtio_gpu_ctrl_hdr *resp =
+ (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
+
+ trace_virtio_gpu_cmd_response(vgdev->cursorq.vq, resp, entry->seqno);
list_del(&entry->list);
free_vbuf(vgdev, entry);
}
@@ -353,7 +357,8 @@ again:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
WARN_ON(ret);
- trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+ vbuf->seqno = ++vgdev->ctrlq.seqno;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf), vbuf->seqno);
atomic_inc(&vgdev->pending_commands);
@@ -465,8 +470,10 @@ retry:
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
+ vbuf->seqno = ++vgdev->cursorq.seqno;
trace_virtio_gpu_cmd_queue(vq,
- virtio_gpu_vbuf_ctrl_hdr(vbuf));
+ virtio_gpu_vbuf_ctrl_hdr(vbuf),
+ vbuf->seqno);
notify = virtqueue_kick_prepare(vq);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 6b45b0429fef..25df81c02783 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -46,7 +46,7 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
return -EINVAL;
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vm_flags_set(vma, VM_MIXEDMAP | VM_DONTEXPAND);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
vma->vm_ops = &virtio_gpu_vram_vm_ops;
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 293dbca50c31..6d3a2d57d992 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -57,7 +57,8 @@ static void vkms_release(struct drm_device *dev)
{
struct vkms_device *vkms = drm_device_to_vkms_device(dev);
- destroy_workqueue(vkms->output.composer_workq);
+ if (vkms->output.composer_workq)
+ destroy_workqueue(vkms->output.composer_workq);
}
static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
@@ -91,8 +92,8 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state)
static int vkms_config_show(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct drm_debugfs_entry *entry = m->private;
+ struct drm_device *dev = entry->dev;
struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback);
@@ -102,24 +103,16 @@ static int vkms_config_show(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list vkms_config_debugfs_list[] = {
+static const struct drm_debugfs_info vkms_config_debugfs_list[] = {
{ "vkms_config", vkms_config_show, 0 },
};
-static void vkms_config_debugfs_init(struct drm_minor *minor)
-{
- drm_debugfs_create_files(vkms_config_debugfs_list, ARRAY_SIZE(vkms_config_debugfs_list),
- minor->debugfs_root, minor);
-}
-
static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.release = vkms_release,
.fops = &vkms_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .debugfs_init = vkms_config_debugfs_init,
-
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -201,6 +194,9 @@ static int vkms_create(struct vkms_config *config)
if (ret)
goto out_devres;
+ drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list,
+ ARRAY_SIZE(vkms_config_debugfs_list));
+
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
goto out_devres;
@@ -218,6 +214,7 @@ out_unregister:
static int __init vkms_init(void)
{
+ int ret;
struct vkms_config *config;
config = kmalloc(sizeof(*config), GFP_KERNEL);
@@ -230,7 +227,11 @@ static int __init vkms_init(void)
config->writeback = enable_writeback;
config->overlay = enable_overlay;
- return vkms_create(config);
+ ret = vkms_create(config);
+ if (ret)
+ kfree(config);
+
+ return ret;
}
static void vkms_destroy(struct vkms_config *config)
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 0a67b8073f7e..4a248567efb2 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -12,8 +12,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_writeback.h>
-#define XRES_MIN 20
-#define YRES_MIN 20
+#define XRES_MIN 10
+#define YRES_MIN 10
#define XRES_DEF 1024
#define YRES_DEF 768
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index c3a845220e10..b3f8a115cc23 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -160,10 +160,44 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
return 0;
}
+static int vkms_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_shadow_plane_state *shadow_plane_state;
+ struct drm_framebuffer *fb = state->fb;
+ int ret;
+
+ if (!fb)
+ return 0;
+
+ shadow_plane_state = to_drm_shadow_plane_state(state);
+
+ ret = drm_gem_plane_helper_prepare_fb(plane, state);
+ if (ret)
+ return ret;
+
+ return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
+}
+
+static void vkms_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_shadow_plane_state *shadow_plane_state;
+ struct drm_framebuffer *fb = state->fb;
+
+ if (!fb)
+ return;
+
+ shadow_plane_state = to_drm_shadow_plane_state(state);
+
+ drm_gem_fb_vunmap(fb, shadow_plane_state->map);
+}
+
static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
.atomic_update = vkms_plane_atomic_update,
.atomic_check = vkms_plane_atomic_check,
- DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .prepare_fb = vkms_prepare_fb,
+ .cleanup_fb = vkms_cleanup_fb,
};
struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index 8098a3846bae..e6b77ee33e55 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -42,6 +42,8 @@
#include <linux/list.h>
#include <linux/rcupdate.h>
+#include <drm/ttm/ttm_bo.h>
+
/**
* enum ttm_object_type
*
@@ -307,4 +309,12 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
#define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead)
+static inline int ttm_bo_wait(struct ttm_buffer_object *bo, bool intr,
+ bool no_wait)
+{
+ struct ttm_operation_ctx ctx = { intr, no_wait };
+
+ return ttm_bo_wait_ctx(bo, &ctx);
+}
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index aa1cd5126a32..4dcf2eb7aa80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -462,6 +462,9 @@ int vmw_bo_create(struct vmw_private *vmw,
return -ENOMEM;
}
+ /*
+ * vmw_bo_init will delete the *p_bo object if it fails
+ */
ret = vmw_bo_init(vmw, *p_bo, size,
placement, interruptible, pin,
bo_free);
@@ -470,7 +473,6 @@ int vmw_bo_create(struct vmw_private *vmw,
return ret;
out_error:
- kfree(*p_bo);
*p_bo = NULL;
return ret;
}
@@ -596,6 +598,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
ttm_bo_put(&vmw_bo->base);
}
+ drm_gem_object_put(&vmw_bo->base.base);
return ret;
}
@@ -636,6 +639,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
+ drm_gem_object_put(&vbo->base.base);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
@@ -693,7 +697,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
* struct vmw_buffer_object should be placed.
* Return: Zero on success, Negative error code on error.
*
- * The vmw buffer object pointer will be refcounted.
+ * The vmw buffer object pointer will be refcounted (both ttm and gem)
*/
int vmw_user_bo_lookup(struct drm_file *filp,
uint32_t handle,
@@ -710,7 +714,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
*out = gem_to_vmw_bo(gobj);
ttm_bo_get(&(*out)->base);
- drm_gem_object_put(gobj);
return 0;
}
@@ -791,7 +794,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
args->size, &args->handle,
&vbo);
-
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&vbo->base.base);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 3c06df2a5474..2b843ff4b437 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -28,7 +28,7 @@
#include <linux/dmapool.h>
#include <linux/pci.h>
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include "vmwgfx_drv.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index bd02cb0e6837..9ad28346aff7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -40,7 +40,6 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_module.h>
#include <drm/drm_sysfs.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_placement.h>
#include <generated/utsrelease.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5acbf5849b27..203fa32cd4c1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -37,8 +37,10 @@
#include <drm/drm_file.h>
#include <drm/drm_rect.h>
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_bo.h>
#include "ttm_object.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index a44d53e33cdb..0590bb22c73a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -29,7 +29,7 @@
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
-#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include "vmwgfx_so.h"
#include "vmwgfx_binding.h"
@@ -1160,6 +1160,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
ttm_bo_put(&vmw_bo->base);
+ drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0))
return ret;
@@ -1214,6 +1215,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
}
ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
ttm_bo_put(&vmw_bo->base);
+ drm_gem_object_put(&vmw_bo->base.base);
if (unlikely(ret != 0))
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index ce609e7d758f..4d2c28e39f4e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -146,14 +146,12 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
&vmw_sys_placement :
&vmw_vram_sys_placement,
true, false, &vmw_gem_destroy, p_vbo);
-
- (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
if (ret != 0)
goto out_no_bo;
+ (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+
ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
- /* drop reference from allocate - handle holds it now */
- drm_gem_object_put(&(*p_vbo)->base.base);
out_no_bo:
return ret;
}
@@ -180,6 +178,8 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put(&vbo->base.base);
out_no_bo:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index c482e5298e11..20158a92acc7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -25,7 +25,6 @@
*
**************************************************************************/
-#include <drm/ttm/ttm_bo_driver.h>
#include "vmwgfx_drv.h"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index abd5e3323ebf..ceb4d3d3b965 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -29,7 +29,6 @@
*/
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <linux/idr.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 257f090071f1..445d619e1fdc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1815,8 +1815,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
- if (bo)
+ if (bo) {
vmw_bo_unreference(&bo);
+ drm_gem_object_put(&bo->base.base);
+ }
if (surface)
vmw_surface_unreference(&surface);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index e9f5c89b4ca6..b5b311f2a91a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -458,6 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_bo_unreference(&buf);
+ drm_gem_object_put(&buf->base.base);
out_unlock:
mutex_unlock(&overlay->mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 108a496b5d18..51e83dfa1cac 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -807,6 +807,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
num_output_sig, tfile, shader_handle);
out_bad_arg:
vmw_bo_unreference(&buffer);
+ drm_gem_object_put(&buffer->base.base);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3bc63ae768f3..dcfb003841b3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -683,7 +683,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
- if (base->shareable && res && res->backup)
+ if (res && res->backup)
drm_gem_object_put(&res->backup->base.base);
*p_base = NULL;
@@ -864,7 +864,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
vmw_bo_reference(res->backup);
- drm_gem_object_get(&res->backup->base.base);
+ /*
+ * We don't expose the handle to the userspace and surface
+ * already holds a gem reference
+ */
+ drm_gem_handle_delete(file_priv, backup_handle);
}
tmp = vmw_resource_reference(&srf->res);
@@ -1568,8 +1572,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
rep->buffer_size = res->backup->base.base.size;
rep->buffer_handle = backup_handle;
- if (user_srf->prime.base.shareable)
- drm_gem_object_get(&res->backup->base.base);
} else {
rep->buffer_map_handle = 0;
rep->buffer_size = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
index d3007bf1b8f5..ee7964cbdaca 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_system_manager.c
@@ -26,7 +26,6 @@
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_resource.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 4e3938e62c08..856a352a72a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -26,7 +26,6 @@
**************************************************************************/
#include "vmwgfx_drv.h"
-#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
static const struct ttm_place vram_placement_flags = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index 265f7c48d856..90097d04b45f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -97,7 +97,7 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
/* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */
if (!is_cow_mapping(vma->vm_flags))
- vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
+ vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP);
ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 4c95ebcdcc2d..3ad2b4cfd1f0 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -69,8 +69,7 @@ static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
* vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
* the whole buffer.
*/
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
vma->vm_pgoff = 0;
/*
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index bdee16a0bb8e..bc7271a00a94 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -338,7 +338,7 @@ static int host1x_device_match(struct device *dev, struct device_driver *drv)
return strcmp(dev_name(dev), drv->name) == 0;
}
-static int host1x_device_uevent(struct device *dev,
+static int host1x_device_uevent(const struct device *dev,
struct kobj_uevent_env *env)
{
struct device_node *np = dev->parent->of_node;
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 103fda055394..d1336e438f4f 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -105,7 +105,7 @@ static int host1x_pushbuffer_init(struct push_buffer *pb)
pb->dma = iova_dma_addr(&host1x->iova, alloc);
err = iommu_map(host1x->domain, pb->dma, pb->phys, size,
- IOMMU_READ);
+ IOMMU_READ, GFP_KERNEL);
if (err)
goto iommu_free_iova;
} else {
@@ -490,6 +490,15 @@ resume:
host1x_hw_cdma_resume(host1x, cdma, restart_addr);
}
+static void cdma_update_work(struct work_struct *work)
+{
+ struct host1x_cdma *cdma = container_of(work, struct host1x_cdma, update_work);
+
+ mutex_lock(&cdma->lock);
+ update_cdma_locked(cdma);
+ mutex_unlock(&cdma->lock);
+}
+
/*
* Create a cdma
*/
@@ -499,6 +508,7 @@ int host1x_cdma_init(struct host1x_cdma *cdma)
mutex_init(&cdma->lock);
init_completion(&cdma->complete);
+ INIT_WORK(&cdma->update_work, cdma_update_work);
INIT_LIST_HEAD(&cdma->sync_queue);
@@ -679,7 +689,5 @@ void host1x_cdma_end(struct host1x_cdma *cdma,
*/
void host1x_cdma_update(struct host1x_cdma *cdma)
{
- mutex_lock(&cdma->lock);
- update_cdma_locked(cdma);
- mutex_unlock(&cdma->lock);
+ schedule_work(&cdma->update_work);
}
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
index 12c4327c4df0..7fd8168af4f9 100644
--- a/drivers/gpu/host1x/cdma.h
+++ b/drivers/gpu/host1x/cdma.h
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/list.h>
+#include <linux/workqueue.h>
struct host1x_syncpt;
struct host1x_userctx_timeout;
@@ -69,6 +70,7 @@ struct host1x_cdma {
struct buffer_timeout timeout; /* channel's timeout state/wq */
bool running;
bool torndown;
+ struct work_struct update_work;
};
#define cdma_to_channel(cdma) container_of(cdma, struct host1x_channel, cdma)
diff --git a/drivers/gpu/host1x/context.c b/drivers/gpu/host1x/context.c
index c8e7994c2c9c..8beedcf080ab 100644
--- a/drivers/gpu/host1x/context.c
+++ b/drivers/gpu/host1x/context.c
@@ -35,8 +35,6 @@ int host1x_memory_context_list_init(struct host1x *host1x)
cdl->len = err / 4;
for (i = 0; i < cdl->len; i++) {
- struct iommu_fwspec *fwspec;
-
ctx = &cdl->devs[i];
ctx->host = host1x;
@@ -70,14 +68,12 @@ int host1x_memory_context_list_init(struct host1x *host1x)
goto del_devices;
}
- fwspec = dev_iommu_fwspec_get(&ctx->dev);
- if (!fwspec || !device_iommu_mapped(&ctx->dev)) {
+ if (!tegra_dev_iommu_get_stream_id(&ctx->dev, &ctx->stream_id) ||
+ !device_iommu_mapped(&ctx->dev)) {
dev_err(host1x->dev, "Context device %d has no IOMMU!\n", i);
device_del(&ctx->dev);
goto del_devices;
}
-
- ctx->stream_id = fwspec->ids[0] & 0xffff;
}
return 0;
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index 6649b04b7131..a18cc8d8caf5 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -77,6 +77,7 @@ static int show_channel(struct host1x_channel *ch, void *data, bool show_fifo)
static void show_syncpts(struct host1x *m, struct output *o, bool show_all)
{
+ unsigned long irqflags;
struct list_head *pos;
unsigned int i;
int err;
@@ -92,10 +93,10 @@ static void show_syncpts(struct host1x *m, struct output *o, bool show_all)
u32 min = host1x_syncpt_load(m->syncpt + i);
unsigned int waiters = 0;
- spin_lock(&m->syncpt[i].intr.lock);
- list_for_each(pos, &m->syncpt[i].intr.wait_head)
+ spin_lock_irqsave(&m->syncpt[i].fences.lock, irqflags);
+ list_for_each(pos, &m->syncpt[i].fences.list)
waiters++;
- spin_unlock(&m->syncpt[i].intr.lock);
+ spin_unlock_irqrestore(&m->syncpt[i].fences.lock, irqflags);
if (!kref_read(&m->syncpt[i].ref))
continue;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f31039aca03c..4872d183d860 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -516,7 +516,7 @@ static int host1x_probe(struct platform_device *pdev)
return PTR_ERR(host->regs);
}
- syncpt_irq = platform_get_irq(pdev, 0);
+ host->syncpt_irq = platform_get_irq(pdev, 0);
if (syncpt_irq < 0)
return syncpt_irq;
@@ -578,7 +578,7 @@ static int host1x_probe(struct platform_device *pdev)
goto free_contexts;
}
- err = host1x_intr_init(host, syncpt_irq);
+ err = host1x_intr_init(host);
if (err) {
dev_err(&pdev->dev, "failed to initialize interrupts\n");
goto deinit_syncpt;
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 920e5548cfbc..75de50fe03d0 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -74,8 +74,7 @@ struct host1x_syncpt_ops {
};
struct host1x_intr_ops {
- int (*init_host_sync)(struct host1x *host, u32 cpm,
- void (*syncpt_thresh_work)(struct work_struct *work));
+ int (*init_host_sync)(struct host1x *host, u32 cpm);
void (*set_syncpt_threshold)(
struct host1x *host, unsigned int id, u32 thresh);
void (*enable_syncpt_intr)(struct host1x *host, unsigned int id);
@@ -125,6 +124,7 @@ struct host1x {
void __iomem *regs;
void __iomem *hv_regs; /* hypervisor region */
void __iomem *common_regs;
+ int syncpt_irq;
struct host1x_syncpt *syncpt;
struct host1x_syncpt_base *bases;
struct device *dev;
@@ -138,7 +138,6 @@ struct host1x {
dma_addr_t iova_end;
struct mutex intr_mutex;
- int intr_syncpt_irq;
const struct host1x_syncpt_ops *syncpt_op;
const struct host1x_intr_ops *intr_op;
@@ -216,10 +215,9 @@ static inline void host1x_hw_syncpt_enable_protection(struct host1x *host)
return host->syncpt_op->enable_protection(host);
}
-static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
- void (*syncpt_thresh_work)(struct work_struct *))
+static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm)
{
- return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work);
+ return host->intr_op->init_host_sync(host, cpm);
}
static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
diff --git a/drivers/gpu/host1x/fence.c b/drivers/gpu/host1x/fence.c
index df428bcbae69..139ad1afd935 100644
--- a/drivers/gpu/host1x/fence.c
+++ b/drivers/gpu/host1x/fence.c
@@ -15,22 +15,6 @@
#include "intr.h"
#include "syncpt.h"
-static DEFINE_SPINLOCK(lock);
-
-struct host1x_syncpt_fence {
- struct dma_fence base;
-
- atomic_t signaling;
-
- struct host1x_syncpt *sp;
- u32 threshold;
-
- struct host1x_waitlist *waiter;
- void *waiter_ref;
-
- struct delayed_work timeout_work;
-};
-
static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
{
return "host1x";
@@ -49,11 +33,11 @@ static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
{
struct host1x_syncpt_fence *sf = to_host1x_fence(f);
- int err;
if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
return false;
+ /* Reference for interrupt path. */
dma_fence_get(f);
/*
@@ -61,24 +45,17 @@ static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
* reference to any fences for which 'enable_signaling' has been
* called (and that have not been signalled).
*
- * We provide a userspace API to create arbitrary syncpoint fences,
- * so we cannot normally guarantee that all fences get signalled.
- * As such, setup a timeout, so that long-lasting fences will get
- * reaped eventually.
+ * We cannot currently always guarantee that all fences get signalled
+ * or cancelled. As such, for such situations, set up a timeout, so
+ * that long-lasting fences will get reaped eventually.
*/
- schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
-
- err = host1x_intr_add_action(sf->sp->host, sf->sp, sf->threshold,
- HOST1X_INTR_ACTION_SIGNAL_FENCE, f,
- sf->waiter, &sf->waiter_ref);
- if (err) {
- cancel_delayed_work_sync(&sf->timeout_work);
- dma_fence_put(f);
- return false;
+ if (sf->timeout) {
+ /* Reference for timeout path. */
+ dma_fence_get(f);
+ schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
}
- /* intr framework takes ownership of waiter */
- sf->waiter = NULL;
+ host1x_intr_add_fence_locked(sf->sp->host, sf);
/*
* The fence may get signalled at any time after the above call,
@@ -89,37 +66,32 @@ static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
return true;
}
-static void host1x_syncpt_fence_release(struct dma_fence *f)
-{
- struct host1x_syncpt_fence *sf = to_host1x_fence(f);
-
- if (sf->waiter)
- kfree(sf->waiter);
-
- dma_fence_free(f);
-}
-
static const struct dma_fence_ops host1x_syncpt_fence_ops = {
.get_driver_name = host1x_syncpt_fence_get_driver_name,
.get_timeline_name = host1x_syncpt_fence_get_timeline_name,
.enable_signaling = host1x_syncpt_fence_enable_signaling,
- .release = host1x_syncpt_fence_release,
};
void host1x_fence_signal(struct host1x_syncpt_fence *f)
{
- if (atomic_xchg(&f->signaling, 1))
+ if (atomic_xchg(&f->signaling, 1)) {
+ /*
+ * Already on timeout path, but we removed the fence before
+ * timeout path could, so drop interrupt path reference.
+ */
+ dma_fence_put(&f->base);
return;
+ }
- /*
- * Cancel pending timeout work - if it races, it will
- * not get 'f->signaling' and return.
- */
- cancel_delayed_work_sync(&f->timeout_work);
-
- host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, false);
+ if (f->timeout && cancel_delayed_work(&f->timeout_work)) {
+ /*
+ * We know that the timeout path will not be entered.
+ * Safe to drop the timeout path's reference now.
+ */
+ dma_fence_put(&f->base);
+ }
- dma_fence_signal(&f->base);
+ dma_fence_signal_locked(&f->base);
dma_fence_put(&f->base);
}
@@ -129,21 +101,29 @@ static void do_fence_timeout(struct work_struct *work)
struct host1x_syncpt_fence *f =
container_of(dwork, struct host1x_syncpt_fence, timeout_work);
- if (atomic_xchg(&f->signaling, 1))
+ if (atomic_xchg(&f->signaling, 1)) {
+ /* Already on interrupt path, drop timeout path reference if any. */
+ if (f->timeout)
+ dma_fence_put(&f->base);
return;
+ }
- /*
- * Cancel pending timeout work - if it races, it will
- * not get 'f->signaling' and return.
- */
- host1x_intr_put_ref(f->sp->host, f->sp->id, f->waiter_ref, true);
+ if (host1x_intr_remove_fence(f->sp->host, f)) {
+ /*
+ * Managed to remove fence from queue, so it's safe to drop
+ * the interrupt path's reference.
+ */
+ dma_fence_put(&f->base);
+ }
dma_fence_set_error(&f->base, -ETIMEDOUT);
dma_fence_signal(&f->base);
- dma_fence_put(&f->base);
+ if (f->timeout)
+ dma_fence_put(&f->base);
}
-struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
+struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
+ bool timeout)
{
struct host1x_syncpt_fence *fence;
@@ -151,16 +131,11 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
if (!fence)
return ERR_PTR(-ENOMEM);
- fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
- if (!fence->waiter) {
- kfree(fence);
- return ERR_PTR(-ENOMEM);
- }
-
fence->sp = sp;
fence->threshold = threshold;
+ fence->timeout = timeout;
- dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &lock,
+ dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &sp->fences.lock,
dma_fence_context_alloc(1), 0);
INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);
@@ -168,3 +143,12 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
return &fence->base;
}
EXPORT_SYMBOL(host1x_fence_create);
+
+void host1x_fence_cancel(struct dma_fence *f)
+{
+ struct host1x_syncpt_fence *sf = to_host1x_fence(f);
+
+ schedule_delayed_work(&sf->timeout_work, 0);
+ flush_delayed_work(&sf->timeout_work);
+}
+EXPORT_SYMBOL(host1x_fence_cancel);
diff --git a/drivers/gpu/host1x/fence.h b/drivers/gpu/host1x/fence.h
index 70c91de82f14..f3c644c73cad 100644
--- a/drivers/gpu/host1x/fence.h
+++ b/drivers/gpu/host1x/fence.h
@@ -6,7 +6,24 @@
#ifndef HOST1X_FENCE_H
#define HOST1X_FENCE_H
-struct host1x_syncpt_fence;
+struct host1x_syncpt_fence {
+ struct dma_fence base;
+
+ atomic_t signaling;
+
+ struct host1x_syncpt *sp;
+ u32 threshold;
+ bool timeout;
+
+ struct delayed_work timeout_work;
+
+ struct list_head list;
+};
+
+struct host1x_fence_list {
+ spinlock_t lock;
+ struct list_head list;
+};
void host1x_fence_signal(struct host1x_syncpt_fence *fence);
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 732abe0750ff..d44b8de890be 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -179,14 +179,12 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
static void host1x_channel_set_streamid(struct host1x_channel *channel)
{
#if HOST1X_HW >= 6
- u32 sid = 0x7f;
-#ifdef CONFIG_IOMMU_API
- struct iommu_fwspec *spec = dev_iommu_fwspec_get(channel->dev->parent);
- if (spec)
- sid = spec->ids[0] & 0xffff;
-#endif
+ u32 stream_id;
+
+ if (!tegra_dev_iommu_get_stream_id(channel->dev->parent, &stream_id))
+ stream_id = TEGRA_STREAM_ID_BYPASS;
- host1x_ch_writel(channel, sid, HOST1X_CHANNEL_SMMU_STREAMID);
+ host1x_ch_writel(channel, stream_id, HOST1X_CHANNEL_SMMU_STREAMID);
#endif
}
@@ -278,6 +276,14 @@ static void channel_program_cdma(struct host1x_job *job)
#endif
}
+static void job_complete_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct host1x_job *job = container_of(cb, struct host1x_job, fence_cb);
+
+ /* Schedules CDMA update. */
+ host1x_cdma_update(&job->channel->cdma);
+}
+
static int channel_submit(struct host1x_job *job)
{
struct host1x_channel *ch = job->channel;
@@ -285,7 +291,6 @@ static int channel_submit(struct host1x_job *job)
u32 prev_max = 0;
u32 syncval;
int err;
- struct host1x_waitlist *completed_waiter = NULL;
struct host1x *host = dev_get_drvdata(ch->dev->parent);
trace_host1x_channel_submit(dev_name(ch->dev),
@@ -298,14 +303,7 @@ static int channel_submit(struct host1x_job *job)
/* get submit lock */
err = mutex_lock_interruptible(&ch->submitlock);
if (err)
- goto error;
-
- completed_waiter = kzalloc(sizeof(*completed_waiter), GFP_KERNEL);
- if (!completed_waiter) {
- mutex_unlock(&ch->submitlock);
- err = -ENOMEM;
- goto error;
- }
+ return err;
host1x_channel_set_streamid(ch);
host1x_enable_gather_filter(ch);
@@ -315,31 +313,37 @@ static int channel_submit(struct host1x_job *job)
err = host1x_cdma_begin(&ch->cdma, job);
if (err) {
mutex_unlock(&ch->submitlock);
- goto error;
+ return err;
}
channel_program_cdma(job);
syncval = host1x_syncpt_read_max(sp);
+ /*
+ * Create fence before submitting job to HW to avoid job completing
+ * before the fence is set up.
+ */
+ job->fence = host1x_fence_create(sp, syncval, true);
+ if (WARN(IS_ERR(job->fence), "Failed to create submit complete fence")) {
+ job->fence = NULL;
+ } else {
+ err = dma_fence_add_callback(job->fence, &job->fence_cb,
+ job_complete_callback);
+ }
+
/* end CDMA submit & stash pinned hMems into sync queue */
host1x_cdma_end(&ch->cdma, job);
trace_host1x_channel_submitted(dev_name(ch->dev), prev_max, syncval);
- /* schedule a submit complete interrupt */
- err = host1x_intr_add_action(host, sp, syncval,
- HOST1X_INTR_ACTION_SUBMIT_COMPLETE, ch,
- completed_waiter, &job->waiter);
- completed_waiter = NULL;
- WARN(err, "Failed to set submit complete interrupt");
-
mutex_unlock(&ch->submitlock);
- return 0;
+ if (err == -ENOENT)
+ host1x_cdma_update(&ch->cdma);
+ else
+ WARN(err, "Failed to set submit complete interrupt");
-error:
- kfree(completed_waiter);
- return err;
+ return 0;
}
static int host1x_channel_init(struct host1x_channel *ch, struct host1x *dev,
diff --git a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
index 5f831438d19b..50c32de452fb 100644
--- a/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x06_uclass.h
@@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
host1x_uclass_incr_syncpt_cond_f(v)
static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
{
- return (v & 0xff) << 0;
+ return (v & 0x3ff) << 0;
}
#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
host1x_uclass_incr_syncpt_indx_f(v)
diff --git a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
index 8cd2ef087d5d..887b878f92f7 100644
--- a/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x07_uclass.h
@@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
host1x_uclass_incr_syncpt_cond_f(v)
static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
{
- return (v & 0xff) << 0;
+ return (v & 0x3ff) << 0;
}
#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
host1x_uclass_incr_syncpt_indx_f(v)
diff --git a/drivers/gpu/host1x/hw/hw_host1x08_uclass.h b/drivers/gpu/host1x/hw/hw_host1x08_uclass.h
index 724cccd71aa1..4fb1d090edae 100644
--- a/drivers/gpu/host1x/hw/hw_host1x08_uclass.h
+++ b/drivers/gpu/host1x/hw/hw_host1x08_uclass.h
@@ -53,7 +53,7 @@ static inline u32 host1x_uclass_incr_syncpt_cond_f(u32 v)
host1x_uclass_incr_syncpt_cond_f(v)
static inline u32 host1x_uclass_incr_syncpt_indx_f(u32 v)
{
- return (v & 0xff) << 0;
+ return (v & 0x3ff) << 0;
}
#define HOST1X_UCLASS_INCR_SYNCPT_INDX_F(v) \
host1x_uclass_incr_syncpt_indx_f(v)
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index 9acccdb139e6..b915ef7d0348 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -13,23 +13,6 @@
#include "../intr.h"
#include "../dev.h"
-/*
- * Sync point threshold interrupt service function
- * Handles sync point threshold triggers, in interrupt context
- */
-static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
-{
- unsigned int id = syncpt->id;
- struct host1x *host = syncpt->host;
-
- host1x_sync_writel(host, BIT(id % 32),
- HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id / 32));
- host1x_sync_writel(host, BIT(id % 32),
- HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id / 32));
-
- schedule_work(&syncpt->intr.work);
-}
-
static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
{
struct host1x *host = dev_id;
@@ -39,17 +22,20 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
reg = host1x_sync_readl(host,
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
- for_each_set_bit(id, &reg, 32) {
- struct host1x_syncpt *syncpt =
- host->syncpt + (i * 32 + id);
- host1x_intr_syncpt_handle(syncpt);
- }
+
+ host1x_sync_writel(host, reg,
+ HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i));
+ host1x_sync_writel(host, reg,
+ HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i));
+
+ for_each_set_bit(id, &reg, 32)
+ host1x_intr_handle_interrupt(host, i * 32 + id);
}
return IRQ_HANDLED;
}
-static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
+static void host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
{
unsigned int i;
@@ -90,45 +76,38 @@ static void intr_hw_init(struct host1x *host, u32 cpm)
}
static int
-_host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
- void (*syncpt_thresh_work)(struct work_struct *))
+host1x_intr_init_host_sync(struct host1x *host, u32 cpm)
{
- unsigned int i;
int err;
host1x_hw_intr_disable_all_syncpt_intrs(host);
- for (i = 0; i < host->info->nb_pts; i++)
- INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work);
-
- err = devm_request_irq(host->dev, host->intr_syncpt_irq,
+ err = devm_request_irq(host->dev, host->syncpt_irq,
syncpt_thresh_isr, IRQF_SHARED,
"host1x_syncpt", host);
- if (err < 0) {
- WARN_ON(1);
+ if (err < 0)
return err;
- }
intr_hw_init(host, cpm);
return 0;
}
-static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
+static void host1x_intr_set_syncpt_threshold(struct host1x *host,
unsigned int id,
u32 thresh)
{
host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
}
-static void _host1x_intr_enable_syncpt_intr(struct host1x *host,
+static void host1x_intr_enable_syncpt_intr(struct host1x *host,
unsigned int id)
{
host1x_sync_writel(host, BIT(id % 32),
HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id / 32));
}
-static void _host1x_intr_disable_syncpt_intr(struct host1x *host,
+static void host1x_intr_disable_syncpt_intr(struct host1x *host,
unsigned int id)
{
host1x_sync_writel(host, BIT(id % 32),
@@ -137,23 +116,10 @@ static void _host1x_intr_disable_syncpt_intr(struct host1x *host,
HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id / 32));
}
-static int _host1x_free_syncpt_irq(struct host1x *host)
-{
- unsigned int i;
-
- devm_free_irq(host->dev, host->intr_syncpt_irq, host);
-
- for (i = 0; i < host->info->nb_pts; i++)
- cancel_work_sync(&host->syncpt[i].intr.work);
-
- return 0;
-}
-
static const struct host1x_intr_ops host1x_intr_ops = {
- .init_host_sync = _host1x_intr_init_host_sync,
- .set_syncpt_threshold = _host1x_intr_set_syncpt_threshold,
- .enable_syncpt_intr = _host1x_intr_enable_syncpt_intr,
- .disable_syncpt_intr = _host1x_intr_disable_syncpt_intr,
- .disable_all_syncpt_intrs = _host1x_intr_disable_all_syncpt_intrs,
- .free_syncpt_irq = _host1x_free_syncpt_irq,
+ .init_host_sync = host1x_intr_init_host_sync,
+ .set_syncpt_threshold = host1x_intr_set_syncpt_threshold,
+ .enable_syncpt_intr = host1x_intr_enable_syncpt_intr,
+ .disable_syncpt_intr = host1x_intr_disable_syncpt_intr,
+ .disable_all_syncpt_intrs = host1x_intr_disable_all_syncpt_intrs,
};
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index dd39d67ccec3..8cf35b2eff3d 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -106,9 +106,6 @@ static void syncpt_assign_to_channel(struct host1x_syncpt *sp,
#if HOST1X_HW >= 6
struct host1x *host = sp->host;
- if (!host->hv_regs)
- return;
-
host1x_sync_writel(host,
HOST1X_SYNC_SYNCPT_CH_APP_CH(ch ? ch->id : 0xff),
HOST1X_SYNC_SYNCPT_CH_APP(sp->id));
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 965ba21818b1..995bfa980837 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -2,299 +2,113 @@
/*
* Tegra host1x Interrupt Management
*
- * Copyright (c) 2010-2013, NVIDIA Corporation.
+ * Copyright (c) 2010-2021, NVIDIA Corporation.
*/
#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-#include <trace/events/host1x.h>
-#include "channel.h"
#include "dev.h"
#include "fence.h"
#include "intr.h"
-/* Wait list management */
-
-enum waitlist_state {
- WLS_PENDING,
- WLS_REMOVED,
- WLS_CANCELLED,
- WLS_HANDLED
-};
-
-static void waiter_release(struct kref *kref)
-{
- kfree(container_of(kref, struct host1x_waitlist, refcount));
-}
-
-/*
- * add a waiter to a waiter queue, sorted by threshold
- * returns true if it was added at the head of the queue
- */
-static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
- struct list_head *queue)
-{
- struct host1x_waitlist *pos;
- u32 thresh = waiter->thresh;
-
- list_for_each_entry_reverse(pos, queue, list)
- if ((s32)(pos->thresh - thresh) <= 0) {
- list_add(&waiter->list, &pos->list);
- return false;
- }
-
- list_add(&waiter->list, queue);
- return true;
-}
-
-/*
- * run through a waiter queue for a single sync point ID
- * and gather all completed waiters into lists by actions
- */
-static void remove_completed_waiters(struct list_head *head, u32 sync,
- struct list_head completed[HOST1X_INTR_ACTION_COUNT])
+static void host1x_intr_add_fence_to_list(struct host1x_fence_list *list,
+ struct host1x_syncpt_fence *fence)
{
- struct list_head *dest;
- struct host1x_waitlist *waiter, *next, *prev;
-
- list_for_each_entry_safe(waiter, next, head, list) {
- if ((s32)(waiter->thresh - sync) > 0)
- break;
+ struct host1x_syncpt_fence *fence_in_list;
- dest = completed + waiter->action;
-
- /* consolidate submit cleanups */
- if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
- !list_empty(dest)) {
- prev = list_entry(dest->prev,
- struct host1x_waitlist, list);
- if (prev->data == waiter->data) {
- prev->count++;
- dest = NULL;
- }
+ list_for_each_entry_reverse(fence_in_list, &list->list, list) {
+ if ((s32)(fence_in_list->threshold - fence->threshold) <= 0) {
+ /* Fence in list is before us, we can insert here */
+ list_add(&fence->list, &fence_in_list->list);
+ return;
}
-
- /* PENDING->REMOVED or CANCELLED->HANDLED */
- if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
- list_del(&waiter->list);
- kref_put(&waiter->refcount, waiter_release);
- } else
- list_move_tail(&waiter->list, dest);
}
-}
-
-static void reset_threshold_interrupt(struct host1x *host,
- struct list_head *head,
- unsigned int id)
-{
- u32 thresh =
- list_first_entry(head, struct host1x_waitlist, list)->thresh;
-
- host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
- host1x_hw_intr_enable_syncpt_intr(host, id);
-}
-
-static void action_submit_complete(struct host1x_waitlist *waiter)
-{
- struct host1x_channel *channel = waiter->data;
-
- host1x_cdma_update(&channel->cdma);
-
- /* Add nr_completed to trace */
- trace_host1x_channel_submit_complete(dev_name(channel->dev),
- waiter->count, waiter->thresh);
-}
-static void action_wakeup(struct host1x_waitlist *waiter)
-{
- wait_queue_head_t *wq = waiter->data;
-
- wake_up(wq);
-}
-
-static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
-{
- wait_queue_head_t *wq = waiter->data;
-
- wake_up_interruptible(wq);
+ /* Add as first in list */
+ list_add(&fence->list, &list->list);
}
-static void action_signal_fence(struct host1x_waitlist *waiter)
+static void host1x_intr_update_hw_state(struct host1x *host, struct host1x_syncpt *sp)
{
- struct host1x_syncpt_fence *f = waiter->data;
-
- host1x_fence_signal(f);
-}
+ struct host1x_syncpt_fence *fence;
-typedef void (*action_handler)(struct host1x_waitlist *waiter);
+ if (!list_empty(&sp->fences.list)) {
+ fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list);
-static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
- action_submit_complete,
- action_wakeup,
- action_wakeup_interruptible,
- action_signal_fence,
-};
-
-static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
-{
- struct list_head *head = completed;
- unsigned int i;
-
- for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
- action_handler handler = action_handlers[i];
- struct host1x_waitlist *waiter, *next;
-
- list_for_each_entry_safe(waiter, next, head, list) {
- list_del(&waiter->list);
- handler(waiter);
- WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
- WLS_REMOVED);
- kref_put(&waiter->refcount, waiter_release);
- }
+ host1x_hw_intr_set_syncpt_threshold(host, sp->id, fence->threshold);
+ host1x_hw_intr_enable_syncpt_intr(host, sp->id);
+ } else {
+ host1x_hw_intr_disable_syncpt_intr(host, sp->id);
}
}
-/*
- * Remove & handle all waiters that have completed for the given syncpt
- */
-static int process_wait_list(struct host1x *host,
- struct host1x_syncpt *syncpt,
- u32 threshold)
+void host1x_intr_add_fence_locked(struct host1x *host, struct host1x_syncpt_fence *fence)
{
- struct list_head completed[HOST1X_INTR_ACTION_COUNT];
- unsigned int i;
- int empty;
-
- for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
- INIT_LIST_HEAD(completed + i);
-
- spin_lock(&syncpt->intr.lock);
-
- remove_completed_waiters(&syncpt->intr.wait_head, threshold,
- completed);
-
- empty = list_empty(&syncpt->intr.wait_head);
- if (empty)
- host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
- else
- reset_threshold_interrupt(host, &syncpt->intr.wait_head,
- syncpt->id);
-
- spin_unlock(&syncpt->intr.lock);
-
- run_handlers(completed);
-
- return empty;
-}
+ struct host1x_fence_list *fence_list = &fence->sp->fences;
-/*
- * Sync point threshold interrupt service thread function
- * Handles sync point threshold triggers, in thread context
- */
+ INIT_LIST_HEAD(&fence->list);
-static void syncpt_thresh_work(struct work_struct *work)
-{
- struct host1x_syncpt_intr *syncpt_intr =
- container_of(work, struct host1x_syncpt_intr, work);
- struct host1x_syncpt *syncpt =
- container_of(syncpt_intr, struct host1x_syncpt, intr);
- unsigned int id = syncpt->id;
- struct host1x *host = syncpt->host;
-
- (void)process_wait_list(host, syncpt,
- host1x_syncpt_load(host->syncpt + id));
+ host1x_intr_add_fence_to_list(fence_list, fence);
+ host1x_intr_update_hw_state(host, fence->sp);
}
-int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
- u32 thresh, enum host1x_intr_action action,
- void *data, struct host1x_waitlist *waiter,
- void **ref)
+bool host1x_intr_remove_fence(struct host1x *host, struct host1x_syncpt_fence *fence)
{
- int queue_was_empty;
-
- if (waiter == NULL) {
- pr_warn("%s: NULL waiter\n", __func__);
- return -EINVAL;
- }
-
- /* initialize a new waiter */
- INIT_LIST_HEAD(&waiter->list);
- kref_init(&waiter->refcount);
- if (ref)
- kref_get(&waiter->refcount);
- waiter->thresh = thresh;
- waiter->action = action;
- atomic_set(&waiter->state, WLS_PENDING);
- waiter->data = data;
- waiter->count = 1;
-
- spin_lock(&syncpt->intr.lock);
+ struct host1x_fence_list *fence_list = &fence->sp->fences;
+ unsigned long irqflags;
- queue_was_empty = list_empty(&syncpt->intr.wait_head);
+ spin_lock_irqsave(&fence_list->lock, irqflags);
- if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
- /* added at head of list - new threshold value */
- host1x_hw_intr_set_syncpt_threshold(host, syncpt->id, thresh);
-
- /* added as first waiter - enable interrupt */
- if (queue_was_empty)
- host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
+ if (list_empty(&fence->list)) {
+ spin_unlock_irqrestore(&fence_list->lock, irqflags);
+ return false;
}
- if (ref)
- *ref = waiter;
+ list_del_init(&fence->list);
+ host1x_intr_update_hw_state(host, fence->sp);
- spin_unlock(&syncpt->intr.lock);
+ spin_unlock_irqrestore(&fence_list->lock, irqflags);
- return 0;
+ return true;
}
-void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
- bool flush)
+void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id)
{
- struct host1x_waitlist *waiter = ref;
- struct host1x_syncpt *syncpt;
+ struct host1x_syncpt *sp = &host->syncpt[id];
+ struct host1x_syncpt_fence *fence, *tmp;
+ unsigned int value;
- atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED);
+ value = host1x_syncpt_load(sp);
- syncpt = host->syncpt + id;
+ spin_lock(&sp->fences.lock);
- spin_lock(&syncpt->intr.lock);
- if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) ==
- WLS_CANCELLED) {
- list_del(&waiter->list);
- kref_put(&waiter->refcount, waiter_release);
- }
- spin_unlock(&syncpt->intr.lock);
+ list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) {
+ if (((value - fence->threshold) & 0x80000000U) != 0U) {
+ /* Fence is not yet expired, we are done */
+ break;
+ }
- if (flush) {
- /* Wait until any concurrently executing handler has finished. */
- while (atomic_read(&waiter->state) != WLS_HANDLED)
- schedule();
+ list_del_init(&fence->list);
+ host1x_fence_signal(fence);
}
- kref_put(&waiter->refcount, waiter_release);
+ /* Re-enable interrupt if necessary */
+ host1x_intr_update_hw_state(host, sp);
+
+ spin_unlock(&sp->fences.lock);
}
-int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
+int host1x_intr_init(struct host1x *host)
{
unsigned int id;
- u32 nb_pts = host1x_syncpt_nb_pts(host);
mutex_init(&host->intr_mutex);
- host->intr_syncpt_irq = irq_sync;
- for (id = 0; id < nb_pts; ++id) {
- struct host1x_syncpt *syncpt = host->syncpt + id;
+ for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
+ struct host1x_syncpt *syncpt = &host->syncpt[id];
- spin_lock_init(&syncpt->intr.lock);
- INIT_LIST_HEAD(&syncpt->intr.wait_head);
- snprintf(syncpt->intr.thresh_irq_name,
- sizeof(syncpt->intr.thresh_irq_name),
- "host1x_sp_%02u", id);
+ spin_lock_init(&syncpt->fences.lock);
+ INIT_LIST_HEAD(&syncpt->fences.list);
}
return 0;
@@ -310,8 +124,7 @@ void host1x_intr_start(struct host1x *host)
int err;
mutex_lock(&host->intr_mutex);
- err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
- syncpt_thresh_work);
+ err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000));
if (err) {
mutex_unlock(&host->intr_mutex);
return;
@@ -321,36 +134,5 @@ void host1x_intr_start(struct host1x *host)
void host1x_intr_stop(struct host1x *host)
{
- unsigned int id;
- struct host1x_syncpt *syncpt = host->syncpt;
- u32 nb_pts = host1x_syncpt_nb_pts(host);
-
- mutex_lock(&host->intr_mutex);
-
host1x_hw_intr_disable_all_syncpt_intrs(host);
-
- for (id = 0; id < nb_pts; ++id) {
- struct host1x_waitlist *waiter, *next;
-
- list_for_each_entry_safe(waiter, next,
- &syncpt[id].intr.wait_head, list) {
- if (atomic_cmpxchg(&waiter->state,
- WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
- list_del(&waiter->list);
- kref_put(&waiter->refcount, waiter_release);
- }
- }
-
- if (!list_empty(&syncpt[id].intr.wait_head)) {
- /* output diagnostics */
- mutex_unlock(&host->intr_mutex);
- pr_warn("%s cannot stop syncpt intr id=%u\n",
- __func__, id);
- return;
- }
- }
-
- host1x_hw_intr_free_syncpt_irq(host);
-
- mutex_unlock(&host->intr_mutex);
}
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
index e4c346099273..3b5610b525e5 100644
--- a/drivers/gpu/host1x/intr.h
+++ b/drivers/gpu/host1x/intr.h
@@ -2,87 +2,17 @@
/*
* Tegra host1x Interrupt Management
*
- * Copyright (c) 2010-2013, NVIDIA Corporation.
+ * Copyright (c) 2010-2021, NVIDIA Corporation.
*/
#ifndef __HOST1X_INTR_H
#define __HOST1X_INTR_H
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-
-struct host1x_syncpt;
struct host1x;
-
-enum host1x_intr_action {
- /*
- * Perform cleanup after a submit has completed.
- * 'data' points to a channel
- */
- HOST1X_INTR_ACTION_SUBMIT_COMPLETE = 0,
-
- /*
- * Wake up a task.
- * 'data' points to a wait_queue_head_t
- */
- HOST1X_INTR_ACTION_WAKEUP,
-
- /*
- * Wake up a interruptible task.
- * 'data' points to a wait_queue_head_t
- */
- HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
-
- HOST1X_INTR_ACTION_SIGNAL_FENCE,
-
- HOST1X_INTR_ACTION_COUNT
-};
-
-struct host1x_syncpt_intr {
- spinlock_t lock;
- struct list_head wait_head;
- char thresh_irq_name[12];
- struct work_struct work;
-};
-
-struct host1x_waitlist {
- struct list_head list;
- struct kref refcount;
- u32 thresh;
- enum host1x_intr_action action;
- atomic_t state;
- void *data;
- int count;
-};
-
-/*
- * Schedule an action to be taken when a sync point reaches the given threshold.
- *
- * @id the sync point
- * @thresh the threshold
- * @action the action to take
- * @data a pointer to extra data depending on action, see above
- * @waiter waiter structure - assumes ownership
- * @ref must be passed if cancellation is possible, else NULL
- *
- * This is a non-blocking api.
- */
-int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
- u32 thresh, enum host1x_intr_action action,
- void *data, struct host1x_waitlist *waiter,
- void **ref);
-
-/*
- * Unreference an action submitted to host1x_intr_add_action().
- * You must call this if you passed non-NULL as ref.
- * @ref the ref returned from host1x_intr_add_action()
- * @flush wait until any pending handlers have completed before returning.
- */
-void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
- bool flush);
+struct host1x_syncpt_fence;
/* Initialize host1x sync point interrupt */
-int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
+int host1x_intr_init(struct host1x *host);
/* Deinitialize host1x sync point interrupt */
void host1x_intr_deinit(struct host1x *host);
@@ -93,5 +23,10 @@ void host1x_intr_start(struct host1x *host);
/* Disable host1x sync point interrupt */
void host1x_intr_stop(struct host1x *host);
-irqreturn_t host1x_syncpt_thresh_fn(void *dev_id);
+void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id);
+
+void host1x_intr_add_fence_locked(struct host1x *host, struct host1x_syncpt_fence *fence);
+
+bool host1x_intr_remove_fence(struct host1x *host, struct host1x_syncpt_fence *fence);
+
#endif
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index b2761aa03b95..3ed49e1fd933 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -88,9 +88,15 @@ static void job_free(struct kref *ref)
if (job->release)
job->release(job);
- if (job->waiter)
- host1x_intr_put_ref(job->syncpt->host, job->syncpt->id,
- job->waiter, false);
+ if (job->fence) {
+ /*
+ * remove_callback is atomic w.r.t. fence signaling, so
+ * after the call returns, we know that the callback is not
+ * in execution, and the fence can be safely freed.
+ */
+ dma_fence_remove_callback(job->fence, &job->fence_cb);
+ dma_fence_put(job->fence);
+ }
if (job->syncpt)
host1x_syncpt_put(job->syncpt);
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index f87a8705f518..2d2007760eac 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/dma-fence.h>
#include <linux/slab.h>
#include <trace/events/host1x.h>
@@ -209,17 +210,6 @@ int host1x_syncpt_incr(struct host1x_syncpt *sp)
}
EXPORT_SYMBOL(host1x_syncpt_incr);
-/*
- * Updated sync point form hardware, and returns true if syncpoint is expired,
- * false if we may need to wait
- */
-static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
-{
- host1x_hw_syncpt_load(sp->host, sp);
-
- return host1x_syncpt_is_expired(sp, thresh);
-}
-
/**
* host1x_syncpt_wait() - wait for a syncpoint to reach a given value
* @sp: host1x syncpoint
@@ -230,10 +220,10 @@ static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
u32 *value)
{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- void *ref;
- struct host1x_waitlist *waiter;
- int err = 0, check_count = 0;
+ struct dma_fence *fence;
+ long wait_err;
+
+ host1x_hw_syncpt_load(sp->host, sp);
if (value)
*value = host1x_syncpt_load(sp);
@@ -241,73 +231,29 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
if (host1x_syncpt_is_expired(sp, thresh))
return 0;
- if (!timeout) {
- err = -EAGAIN;
- goto done;
- }
-
- /* allocate a waiter */
- waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
- if (!waiter) {
- err = -ENOMEM;
- goto done;
- }
-
- /* schedule a wakeup when the syncpoint value is reached */
- err = host1x_intr_add_action(sp->host, sp, thresh,
- HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
- &wq, waiter, &ref);
- if (err)
- goto done;
-
- err = -EAGAIN;
- /* Caller-specified timeout may be impractically low */
if (timeout < 0)
timeout = LONG_MAX;
+ else if (timeout == 0)
+ return -EAGAIN;
- /* wait for the syncpoint, or timeout, or signal */
- while (timeout) {
- long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
- int remain;
-
- remain = wait_event_interruptible_timeout(wq,
- syncpt_load_min_is_expired(sp, thresh),
- check);
- if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
- if (value)
- *value = host1x_syncpt_load(sp);
+ fence = host1x_fence_create(sp, thresh, false);
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
- err = 0;
+ wait_err = dma_fence_wait_timeout(fence, true, timeout);
+ if (wait_err == 0)
+ host1x_fence_cancel(fence);
+ dma_fence_put(fence);
- break;
- }
-
- if (remain < 0) {
- err = remain;
- break;
- }
-
- timeout -= check;
-
- if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
- dev_warn(sp->host->dev,
- "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
- current->comm, sp->id, sp->name,
- thresh, timeout);
-
- host1x_debug_dump_syncpts(sp->host);
-
- if (check_count == MAX_STUCK_CHECK_COUNT)
- host1x_debug_dump(sp->host);
-
- check_count++;
- }
- }
-
- host1x_intr_put_ref(sp->host, sp->id, ref, true);
+ if (value)
+ *value = host1x_syncpt_load(sp);
-done:
- return err;
+ if (wait_err == 0)
+ return -EAGAIN;
+ else if (wait_err < 0)
+ return wait_err;
+ else
+ return 0;
}
EXPORT_SYMBOL(host1x_syncpt_wait);
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 95cd29b79d6d..4c3f3b2f0e9c 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -14,6 +14,7 @@
#include <linux/kref.h>
#include <linux/sched.h>
+#include "fence.h"
#include "intr.h"
struct host1x;
@@ -39,7 +40,7 @@ struct host1x_syncpt {
struct host1x_syncpt_base *base;
/* interrupt data */
- struct host1x_syncpt_intr intr;
+ struct host1x_fence_list fences;
/*
* If a submission incrementing this syncpoint fails, lock it so that
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 118318513e2d..c35eac1116f5 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1165,6 +1165,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
pdev = platform_device_alloc(reg->name, id++);
if (!pdev) {
ret = -ENOMEM;
+ of_node_put(of_node);
goto err_register;
}
diff --git a/drivers/greybus/core.c b/drivers/greybus/core.c
index e546c6431877..5714be740470 100644
--- a/drivers/greybus/core.c
+++ b/drivers/greybus/core.c
@@ -78,14 +78,14 @@ static int greybus_match_device(struct device *dev, struct device_driver *drv)
return 0;
}
-static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int greybus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct gb_host_device *hd;
- struct gb_module *module = NULL;
- struct gb_interface *intf = NULL;
- struct gb_control *control = NULL;
- struct gb_bundle *bundle = NULL;
- struct gb_svc *svc = NULL;
+ const struct gb_host_device *hd;
+ const struct gb_module *module = NULL;
+ const struct gb_interface *intf = NULL;
+ const struct gb_control *control = NULL;
+ const struct gb_bundle *bundle = NULL;
+ const struct gb_svc *svc = NULL;
if (is_gb_host_device(dev)) {
hd = to_gb_host_device(dev);
diff --git a/drivers/hid/.kunitconfig b/drivers/hid/.kunitconfig
index 04daeff5c970..675a8209c7ae 100644
--- a/drivers/hid/.kunitconfig
+++ b/drivers/hid/.kunitconfig
@@ -1,5 +1,6 @@
CONFIG_KUNIT=y
CONFIG_USB=y
CONFIG_USB_HID=y
+CONFIG_HID_BATTERY_STRENGTH=y
CONFIG_HID_UCLOGIC=y
CONFIG_HID_KUNIT_TEST=y
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index e2a5d30c8895..82f64fb31fda 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -2,13 +2,20 @@
#
# HID driver configuration
#
-menu "HID support"
- depends on INPUT
+menuconfig HID_SUPPORT
+ bool "HID bus support"
+ default y
+ depends on INPUT
+ help
+ This option adds core support for human interface device (HID).
+ You will also need drivers from the following menu to make use of it.
+
+if HID_SUPPORT
config HID
- tristate "HID bus support"
- depends on INPUT
+ tristate "HID bus core support"
default y
+ depends on INPUT
help
A human interface device (HID) is a type of computer device that
interacts directly with and takes input from humans. The term "HID"
@@ -329,6 +336,13 @@ config HID_ELO
Support for the ELO USB 4000/4500 touchscreens. Note that this is for
different devices than those handled by CONFIG_TOUCHSCREEN_USB_ELO.
+config HID_EVISION
+ tristate "EVision Keyboards Support"
+ depends on HID
+ help
+ Support for some EVision keyboards. Note that this is needed only when
+ applying customization using userspace programs.
+
config HID_EZKEY
tristate "Ezkey BTC 8193 keyboard"
default !EXPERT
@@ -1018,13 +1032,21 @@ config HID_SPEEDLINK
Support for Speedlink Vicious and Divine Cezanne mouse.
config HID_STEAM
- tristate "Steam Controller support"
+ tristate "Steam Controller/Deck support"
select POWER_SUPPLY
help
- Say Y here if you have a Steam Controller if you want to use it
+ Say Y here if you have a Steam Controller or Deck if you want to use it
without running the Steam Client. It supports both the wired and
the wireless adaptor.
+config STEAM_FF
+ bool "Steam Deck force feedback support"
+ depends on HID_STEAM
+ select INPUT_FF_MEMLESS
+ help
+ Say Y here if you want to enable force feedback support for the Steam
+ Deck.
+
config HID_STEELSERIES
tristate "Steelseries SRW-S1 steering wheel support"
help
@@ -1264,6 +1286,7 @@ config HID_MCP2221
config HID_KUNIT_TEST
tristate "KUnit tests for HID" if !KUNIT_ALL_TESTS
depends on KUNIT=y
+ depends on HID_BATTERY_STRENGTH
depends on HID_UCLOGIC
default KUNIT_ALL_TESTS
help
@@ -1279,6 +1302,8 @@ config HID_KUNIT_TEST
endmenu
+source "drivers/hid/bpf/Kconfig"
+
endif # HID
source "drivers/hid/usbhid/Kconfig"
@@ -1291,4 +1316,4 @@ source "drivers/hid/amd-sfh-hid/Kconfig"
source "drivers/hid/surface-hid/Kconfig"
-endmenu
+endif # HID_SUPPORT
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index e8014c1a2f8b..5d37cacbde33 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -5,6 +5,8 @@
hid-y := hid-core.o hid-input.o hid-quirks.o
hid-$(CONFIG_DEBUG_FS) += hid-debug.o
+obj-$(CONFIG_HID_BPF) += bpf/
+
obj-$(CONFIG_HID) += hid.o
obj-$(CONFIG_UHID) += uhid.o
@@ -45,6 +47,7 @@ obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
obj-$(CONFIG_HID_ELAN) += hid-elan.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_ELO) += hid-elo.o
+obj-$(CONFIG_HID_EVISION) += hid-evision.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_FT260) += hid-ft260.o
obj-$(CONFIG_HID_GEMBIRD) += hid-gembird.o
diff --git a/drivers/hid/amd-sfh-hid/Kconfig b/drivers/hid/amd-sfh-hid/Kconfig
index db069a83e9a2..af752dd3a340 100644
--- a/drivers/hid/amd-sfh-hid/Kconfig
+++ b/drivers/hid/amd-sfh-hid/Kconfig
@@ -2,10 +2,10 @@
menu "AMD SFH HID Support"
depends on X86_64 || COMPILE_TEST
depends on PCI
- depends on HID
config AMD_SFH_HID
tristate "AMD Sensor Fusion Hub"
+ depends on HID
help
If you say yes to this option, support will be included for the
AMD Sensor Fusion Hub.
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
index 1fb0f7105fb2..c751d12f5df8 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c
@@ -227,6 +227,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->num_hid_devices = amd_mp2_get_sensor_num(privdata, &cl_data->sensor_idx[0]);
if (cl_data->num_hid_devices == 0)
return -ENODEV;
+ cl_data->is_any_sensor_enabled = false;
INIT_DELAYED_WORK(&cl_data->work, amd_sfh_work);
INIT_DELAYED_WORK(&cl_data->work_buffer, amd_sfh_work_buffer);
@@ -287,6 +288,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
status = amd_sfh_wait_for_response
(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
if (status == SENSOR_ENABLED) {
+ cl_data->is_any_sensor_enabled = true;
cl_data->sensor_sts[i] = SENSOR_ENABLED;
rc = amdtp_hid_probe(cl_data->cur_hid_dev, cl_data);
if (rc) {
@@ -301,19 +303,26 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
cl_data->sensor_sts[i]);
goto cleanup;
}
+ } else {
+ cl_data->sensor_sts[i] = SENSOR_DISABLED;
+ dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
+ cl_data->sensor_idx[i],
+ get_sensor_name(cl_data->sensor_idx[i]),
+ cl_data->sensor_sts[i]);
}
dev_dbg(dev, "sid 0x%x (%s) status 0x%x\n",
cl_data->sensor_idx[i], get_sensor_name(cl_data->sensor_idx[i]),
cl_data->sensor_sts[i]);
}
- if (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0) {
+ if (!cl_data->is_any_sensor_enabled ||
+ (mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
amd_sfh_hid_client_deinit(privdata);
for (i = 0; i < cl_data->num_hid_devices; i++) {
devm_kfree(dev, cl_data->feature_report[i]);
devm_kfree(dev, in_data->input_report[i]);
devm_kfree(dev, cl_data->report_descr[i]);
}
- dev_warn(dev, "Failed to discover, sensors not enabled\n");
+ dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
return -EOPNOTSUPP;
}
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
index 1b18291fc5af..705b52337068 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.c
@@ -112,7 +112,7 @@ void amdtp_hid_wakeup(struct hid_device *hid)
}
}
-static struct hid_ll_driver amdtp_hid_ll_driver = {
+static const struct hid_ll_driver amdtp_hid_ll_driver = {
.parse = amdtp_hid_parse,
.start = amdtp_hid_start,
.stop = amdtp_hid_stop,
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
index 3754fb423e3a..528036892c9d 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_hid.h
@@ -32,6 +32,7 @@ struct amd_input_data {
struct amdtp_cl_data {
u8 init_done;
u32 cur_hid_dev;
+ bool is_any_sensor_enabled;
u32 hid_dev_count;
u32 num_hid_devices;
struct device_info *hid_devices;
diff --git a/drivers/hid/bpf/Kconfig b/drivers/hid/bpf/Kconfig
new file mode 100644
index 000000000000..83214bae6768
--- /dev/null
+++ b/drivers/hid/bpf/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "HID-BPF support"
+
+config HID_BPF
+ bool "HID-BPF support"
+ depends on BPF
+ depends on BPF_SYSCALL
+ depends on DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ help
+ This option allows to support eBPF programs on the HID subsystem.
+ eBPF programs can fix HID devices in a lighter way than a full
+ kernel patch and allow a lot more flexibility.
+
+ For documentation, see Documentation/hid/hid-bpf.rst
+
+endmenu
diff --git a/drivers/hid/bpf/Makefile b/drivers/hid/bpf/Makefile
new file mode 100644
index 000000000000..cf55120cf7d6
--- /dev/null
+++ b/drivers/hid/bpf/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for HID-BPF
+#
+
+LIBBPF_INCLUDE = $(srctree)/tools/lib
+
+obj-$(CONFIG_HID_BPF) += hid_bpf.o
+CFLAGS_hid_bpf_dispatch.o += -I$(LIBBPF_INCLUDE)
+CFLAGS_hid_bpf_jmp_table.o += -I$(LIBBPF_INCLUDE)
+hid_bpf-objs += hid_bpf_dispatch.o hid_bpf_jmp_table.o
diff --git a/drivers/hid/bpf/entrypoints/Makefile b/drivers/hid/bpf/entrypoints/Makefile
new file mode 100644
index 000000000000..a12edcfa4fe3
--- /dev/null
+++ b/drivers/hid/bpf/entrypoints/Makefile
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: GPL-2.0
+OUTPUT := .output
+abs_out := $(abspath $(OUTPUT))
+
+CLANG ?= clang
+LLC ?= llc
+LLVM_STRIP ?= llvm-strip
+
+TOOLS_PATH := $(abspath ../../../../tools)
+BPFTOOL_SRC := $(TOOLS_PATH)/bpf/bpftool
+BPFTOOL_OUTPUT := $(abs_out)/bpftool
+DEFAULT_BPFTOOL := $(BPFTOOL_OUTPUT)/bootstrap/bpftool
+BPFTOOL ?= $(DEFAULT_BPFTOOL)
+
+LIBBPF_SRC := $(TOOLS_PATH)/lib/bpf
+LIBBPF_OUTPUT := $(abs_out)/libbpf
+LIBBPF_DESTDIR := $(LIBBPF_OUTPUT)
+LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include
+BPFOBJ := $(LIBBPF_OUTPUT)/libbpf.a
+
+INCLUDES := -I$(OUTPUT) -I$(LIBBPF_INCLUDE) -I$(TOOLS_PATH)/include/uapi
+CFLAGS := -g -Wall
+
+VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
+ $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+ ../../../../vmlinux \
+ /sys/kernel/btf/vmlinux \
+ /boot/vmlinux-$(shell uname -r)
+VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+ifeq ($(VMLINUX_BTF),)
+$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
+endif
+
+ifeq ($(V),1)
+Q =
+msg =
+else
+Q = @
+msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
+MAKEFLAGS += --no-print-directory
+submake_extras := feature_display=0
+endif
+
+.DELETE_ON_ERROR:
+
+.PHONY: all clean
+
+all: entrypoints.lskel.h
+
+clean:
+ $(call msg,CLEAN)
+ $(Q)rm -rf $(OUTPUT) entrypoints
+
+entrypoints.lskel.h: $(OUTPUT)/entrypoints.bpf.o | $(BPFTOOL)
+ $(call msg,GEN-SKEL,$@)
+ $(Q)$(BPFTOOL) gen skeleton -L $< > $@
+
+
+$(OUTPUT)/entrypoints.bpf.o: entrypoints.bpf.c $(OUTPUT)/vmlinux.h $(BPFOBJ) | $(OUTPUT)
+ $(call msg,BPF,$@)
+ $(Q)$(CLANG) -g -O2 -target bpf $(INCLUDES) \
+ -c $(filter %.c,$^) -o $@ && \
+ $(LLVM_STRIP) -g $@
+
+$(OUTPUT)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
+ifeq ($(VMLINUX_H),)
+ $(call msg,GEN,,$@)
+ $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
+else
+ $(call msg,CP,,$@)
+ $(Q)cp "$(VMLINUX_H)" $@
+endif
+
+$(OUTPUT) $(LIBBPF_OUTPUT) $(BPFTOOL_OUTPUT):
+ $(call msg,MKDIR,$@)
+ $(Q)mkdir -p $@
+
+$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \
+ OUTPUT=$(abspath $(dir $@))/ prefix= \
+ DESTDIR=$(LIBBPF_DESTDIR) $(abspath $@) install_headers
+
+ifeq ($(CROSS_COMPILE),)
+$(DEFAULT_BPFTOOL): $(BPFOBJ) | $(BPFTOOL_OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOL_SRC) \
+ OUTPUT=$(BPFTOOL_OUTPUT)/ \
+ LIBBPF_BOOTSTRAP_OUTPUT=$(LIBBPF_OUTPUT)/ \
+ LIBBPF_BOOTSTRAP_DESTDIR=$(LIBBPF_DESTDIR)/ bootstrap
+else
+$(DEFAULT_BPFTOOL): | $(BPFTOOL_OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOL_SRC) \
+ OUTPUT=$(BPFTOOL_OUTPUT)/ bootstrap
+endif
diff --git a/drivers/hid/bpf/entrypoints/README b/drivers/hid/bpf/entrypoints/README
new file mode 100644
index 000000000000..147e0d41509f
--- /dev/null
+++ b/drivers/hid/bpf/entrypoints/README
@@ -0,0 +1,4 @@
+WARNING:
+If you change "entrypoints.bpf.c" do "make -j" in this directory to rebuild "entrypoints.skel.h".
+Make sure to have clang 10 installed.
+See Documentation/bpf/bpf_devel_QA.rst
diff --git a/drivers/hid/bpf/entrypoints/entrypoints.bpf.c b/drivers/hid/bpf/entrypoints/entrypoints.bpf.c
new file mode 100644
index 000000000000..c22921125a1a
--- /dev/null
+++ b/drivers/hid/bpf/entrypoints/entrypoints.bpf.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Benjamin Tissoires */
+
+#include ".output/vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define HID_BPF_MAX_PROGS 1024
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, HID_BPF_MAX_PROGS);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} hid_jmp_table SEC(".maps");
+
+SEC("fmod_ret/__hid_bpf_tail_call")
+int BPF_PROG(hid_tail_call, struct hid_bpf_ctx *hctx)
+{
+ bpf_tail_call(ctx, &hid_jmp_table, hctx->index);
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/drivers/hid/bpf/entrypoints/entrypoints.lskel.h b/drivers/hid/bpf/entrypoints/entrypoints.lskel.h
new file mode 100644
index 000000000000..35618051598c
--- /dev/null
+++ b/drivers/hid/bpf/entrypoints/entrypoints.lskel.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+/* THIS FILE IS AUTOGENERATED BY BPFTOOL! */
+#ifndef __ENTRYPOINTS_BPF_SKEL_H__
+#define __ENTRYPOINTS_BPF_SKEL_H__
+
+#include <bpf/skel_internal.h>
+
+struct entrypoints_bpf {
+ struct bpf_loader_ctx ctx;
+ struct {
+ struct bpf_map_desc hid_jmp_table;
+ } maps;
+ struct {
+ struct bpf_prog_desc hid_tail_call;
+ } progs;
+ struct {
+ int hid_tail_call_fd;
+ } links;
+};
+
+static inline int
+entrypoints_bpf__hid_tail_call__attach(struct entrypoints_bpf *skel)
+{
+ int prog_fd = skel->progs.hid_tail_call.prog_fd;
+ int fd = skel_raw_tracepoint_open(NULL, prog_fd);
+
+ if (fd > 0)
+ skel->links.hid_tail_call_fd = fd;
+ return fd;
+}
+
+static inline int
+entrypoints_bpf__attach(struct entrypoints_bpf *skel)
+{
+ int ret = 0;
+
+ ret = ret < 0 ? ret : entrypoints_bpf__hid_tail_call__attach(skel);
+ return ret < 0 ? ret : 0;
+}
+
+static inline void
+entrypoints_bpf__detach(struct entrypoints_bpf *skel)
+{
+ skel_closenz(skel->links.hid_tail_call_fd);
+}
+static void
+entrypoints_bpf__destroy(struct entrypoints_bpf *skel)
+{
+ if (!skel)
+ return;
+ entrypoints_bpf__detach(skel);
+ skel_closenz(skel->progs.hid_tail_call.prog_fd);
+ skel_closenz(skel->maps.hid_jmp_table.map_fd);
+ skel_free(skel);
+}
+static inline struct entrypoints_bpf *
+entrypoints_bpf__open(void)
+{
+ struct entrypoints_bpf *skel;
+
+ skel = skel_alloc(sizeof(*skel));
+ if (!skel)
+ goto cleanup;
+ skel->ctx.sz = (void *)&skel->links - (void *)skel;
+ return skel;
+cleanup:
+ entrypoints_bpf__destroy(skel);
+ return NULL;
+}
+
+static inline int
+entrypoints_bpf__load(struct entrypoints_bpf *skel)
+{
+ struct bpf_load_and_run_opts opts = {};
+ int err;
+
+ opts.ctx = (struct bpf_loader_ctx *)skel;
+ opts.data_sz = 2856;
+ opts.data = (void *)"\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x9f\xeb\x01\0\
+\x18\0\0\0\0\0\0\0\x60\x02\0\0\x60\x02\0\0\x12\x02\0\0\0\0\0\0\0\0\0\x02\x03\0\
+\0\0\x01\0\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\
+\0\0\x04\0\0\0\x03\0\0\0\x05\0\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\
+\x02\x06\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\0\x04\0\0\0\0\0\0\
+\0\0\0\x02\x08\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x02\0\0\0\x04\0\0\0\x04\0\0\0\0\
+\0\0\0\x04\0\0\x04\x20\0\0\0\x19\0\0\0\x01\0\0\0\0\0\0\0\x1e\0\0\0\x05\0\0\0\
+\x40\0\0\0\x2a\0\0\0\x07\0\0\0\x80\0\0\0\x33\0\0\0\x07\0\0\0\xc0\0\0\0\x3e\0\0\
+\0\0\0\0\x0e\x09\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\x02\x0c\0\0\0\x4c\0\0\0\0\0\0\
+\x01\x08\0\0\0\x40\0\0\0\0\0\0\0\x01\0\0\x0d\x02\0\0\0\x5f\0\0\0\x0b\0\0\0\x63\
+\0\0\0\x01\0\0\x0c\x0d\0\0\0\x09\x01\0\0\x05\0\0\x04\x20\0\0\0\x15\x01\0\0\x10\
+\0\0\0\0\0\0\0\x1b\x01\0\0\x12\0\0\0\x40\0\0\0\x1f\x01\0\0\x10\0\0\0\x80\0\0\0\
+\x2e\x01\0\0\x14\0\0\0\xa0\0\0\0\0\0\0\0\x15\0\0\0\xc0\0\0\0\x3a\x01\0\0\0\0\0\
+\x08\x11\0\0\0\x40\x01\0\0\0\0\0\x01\x04\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\x02\x13\
+\0\0\0\0\0\0\0\0\0\0\x0a\x1c\0\0\0\x4d\x01\0\0\x04\0\0\x06\x04\0\0\0\x5d\x01\0\
+\0\0\0\0\0\x6e\x01\0\0\x01\0\0\0\x80\x01\0\0\x02\0\0\0\x93\x01\0\0\x03\0\0\0\0\
+\0\0\0\x02\0\0\x05\x04\0\0\0\xa4\x01\0\0\x16\0\0\0\0\0\0\0\xab\x01\0\0\x16\0\0\
+\0\0\0\0\0\xb0\x01\0\0\0\0\0\x08\x02\0\0\0\xec\x01\0\0\0\0\0\x01\x01\0\0\0\x08\
+\0\0\x01\0\0\0\0\0\0\0\x03\0\0\0\0\x17\0\0\0\x04\0\0\0\x04\0\0\0\xf1\x01\0\0\0\
+\0\0\x0e\x18\0\0\0\x01\0\0\0\xf9\x01\0\0\x01\0\0\x0f\x20\0\0\0\x0a\0\0\0\0\0\0\
+\0\x20\0\0\0\xff\x01\0\0\x01\0\0\x0f\x04\0\0\0\x19\0\0\0\0\0\0\0\x04\0\0\0\x07\
+\x02\0\0\0\0\0\x07\0\0\0\0\0\x69\x6e\x74\0\x5f\x5f\x41\x52\x52\x41\x59\x5f\x53\
+\x49\x5a\x45\x5f\x54\x59\x50\x45\x5f\x5f\0\x74\x79\x70\x65\0\x6d\x61\x78\x5f\
+\x65\x6e\x74\x72\x69\x65\x73\0\x6b\x65\x79\x5f\x73\x69\x7a\x65\0\x76\x61\x6c\
+\x75\x65\x5f\x73\x69\x7a\x65\0\x68\x69\x64\x5f\x6a\x6d\x70\x5f\x74\x61\x62\x6c\
+\x65\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x6c\x6f\x6e\x67\x20\x6c\x6f\x6e\x67\
+\0\x63\x74\x78\0\x68\x69\x64\x5f\x74\x61\x69\x6c\x5f\x63\x61\x6c\x6c\0\x66\x6d\
+\x6f\x64\x5f\x72\x65\x74\x2f\x5f\x5f\x68\x69\x64\x5f\x62\x70\x66\x5f\x74\x61\
+\x69\x6c\x5f\x63\x61\x6c\x6c\0\x2f\x68\x6f\x6d\x65\x2f\x62\x74\x69\x73\x73\x6f\
+\x69\x72\x2f\x53\x72\x63\x2f\x68\x69\x64\x2f\x64\x72\x69\x76\x65\x72\x73\x2f\
+\x68\x69\x64\x2f\x62\x70\x66\x2f\x65\x6e\x74\x72\x79\x70\x6f\x69\x6e\x74\x73\
+\x2f\x65\x6e\x74\x72\x79\x70\x6f\x69\x6e\x74\x73\x2e\x62\x70\x66\x2e\x63\0\x69\
+\x6e\x74\x20\x42\x50\x46\x5f\x50\x52\x4f\x47\x28\x68\x69\x64\x5f\x74\x61\x69\
+\x6c\x5f\x63\x61\x6c\x6c\x2c\x20\x73\x74\x72\x75\x63\x74\x20\x68\x69\x64\x5f\
+\x62\x70\x66\x5f\x63\x74\x78\x20\x2a\x68\x63\x74\x78\x29\0\x68\x69\x64\x5f\x62\
+\x70\x66\x5f\x63\x74\x78\0\x69\x6e\x64\x65\x78\0\x68\x69\x64\0\x61\x6c\x6c\x6f\
+\x63\x61\x74\x65\x64\x5f\x73\x69\x7a\x65\0\x72\x65\x70\x6f\x72\x74\x5f\x74\x79\
+\x70\x65\0\x5f\x5f\x75\x33\x32\0\x75\x6e\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\
+\x74\0\x68\x69\x64\x5f\x72\x65\x70\x6f\x72\x74\x5f\x74\x79\x70\x65\0\x48\x49\
+\x44\x5f\x49\x4e\x50\x55\x54\x5f\x52\x45\x50\x4f\x52\x54\0\x48\x49\x44\x5f\x4f\
+\x55\x54\x50\x55\x54\x5f\x52\x45\x50\x4f\x52\x54\0\x48\x49\x44\x5f\x46\x45\x41\
+\x54\x55\x52\x45\x5f\x52\x45\x50\x4f\x52\x54\0\x48\x49\x44\x5f\x52\x45\x50\x4f\
+\x52\x54\x5f\x54\x59\x50\x45\x53\0\x72\x65\x74\x76\x61\x6c\0\x73\x69\x7a\x65\0\
+\x5f\x5f\x73\x33\x32\0\x30\x3a\x30\0\x09\x62\x70\x66\x5f\x74\x61\x69\x6c\x5f\
+\x63\x61\x6c\x6c\x28\x63\x74\x78\x2c\x20\x26\x68\x69\x64\x5f\x6a\x6d\x70\x5f\
+\x74\x61\x62\x6c\x65\x2c\x20\x68\x63\x74\x78\x2d\x3e\x69\x6e\x64\x65\x78\x29\
+\x3b\0\x63\x68\x61\x72\0\x4c\x49\x43\x45\x4e\x53\x45\0\x2e\x6d\x61\x70\x73\0\
+\x6c\x69\x63\x65\x6e\x73\x65\0\x68\x69\x64\x5f\x64\x65\x76\x69\x63\x65\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x8a\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\
+\0\0\0\x04\0\0\0\x04\0\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x68\x69\x64\x5f\
+\x6a\x6d\x70\x5f\x74\x61\x62\x6c\x65\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\x47\x50\x4c\0\0\0\0\0\x79\x12\0\0\0\0\0\0\x61\x23\0\0\0\0\
+\0\0\x18\x52\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x85\0\0\0\x0c\0\0\0\xb7\0\0\0\0\0\0\0\
+\x95\0\0\0\0\0\0\0\0\0\0\0\x0e\0\0\0\0\0\0\0\x8e\0\0\0\xd3\0\0\0\x05\x48\0\0\
+\x01\0\0\0\x8e\0\0\0\xba\x01\0\0\x02\x50\0\0\x05\0\0\0\x8e\0\0\0\xd3\0\0\0\x05\
+\x48\0\0\x08\0\0\0\x0f\0\0\0\xb6\x01\0\0\0\0\0\0\x1a\0\0\0\x07\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x68\x69\
+\x64\x5f\x74\x61\x69\x6c\x5f\x63\x61\x6c\x6c\0\0\0\0\0\0\0\x1a\0\0\0\0\0\0\0\
+\x08\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\x10\0\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\x01\0\
+\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x5f\
+\x5f\x68\x69\x64\x5f\x62\x70\x66\x5f\x74\x61\x69\x6c\x5f\x63\x61\x6c\x6c\0\0\0\
+\0\0";
+ opts.insns_sz = 1192;
+ opts.insns = (void *)"\
+\xbf\x16\0\0\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\x78\xff\xff\xff\xb7\x02\0\
+\0\x88\0\0\0\xb7\x03\0\0\0\0\0\0\x85\0\0\0\x71\0\0\0\x05\0\x11\0\0\0\0\0\x61\
+\xa1\x78\xff\0\0\0\0\xd5\x01\x01\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x61\xa1\x7c\xff\
+\0\0\0\0\xd5\x01\x01\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x61\xa1\x80\xff\0\0\0\0\xd5\
+\x01\x01\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\
+\x01\0\0\0\0\0\0\xd5\x01\x02\0\0\0\0\0\xbf\x19\0\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\
+\xbf\x70\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\x61\x60\x08\0\0\0\0\0\x18\x61\0\0\0\0\0\
+\0\0\0\0\0\xa8\x09\0\0\x63\x01\0\0\0\0\0\0\x61\x60\x0c\0\0\0\0\0\x18\x61\0\0\0\
+\0\0\0\0\0\0\0\xa4\x09\0\0\x63\x01\0\0\0\0\0\0\x79\x60\x10\0\0\0\0\0\x18\x61\0\
+\0\0\0\0\0\0\0\0\0\x98\x09\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\
+\0\x05\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x90\x09\0\0\x7b\x01\0\0\0\0\0\0\xb7\x01\
+\0\0\x12\0\0\0\x18\x62\0\0\0\0\0\0\0\0\0\0\x90\x09\0\0\xb7\x03\0\0\x1c\0\0\0\
+\x85\0\0\0\xa6\0\0\0\xbf\x07\0\0\0\0\0\0\xc5\x07\xd7\xff\0\0\0\0\x63\x7a\x78\
+\xff\0\0\0\0\x61\x60\x1c\0\0\0\0\0\x15\0\x03\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\
+\0\0\xbc\x09\0\0\x63\x01\0\0\0\0\0\0\xb7\x01\0\0\0\0\0\0\x18\x62\0\0\0\0\0\0\0\
+\0\0\0\xb0\x09\0\0\xb7\x03\0\0\x48\0\0\0\x85\0\0\0\xa6\0\0\0\xbf\x07\0\0\0\0\0\
+\0\xc5\x07\xca\xff\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x63\x71\0\0\0\0\
+\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\xf8\x09\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x90\
+\x0a\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\0\x0a\0\0\x18\x61\0\0\
+\0\0\0\0\0\0\0\0\x88\x0a\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\
+\x38\x0a\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\xd0\x0a\0\0\x7b\x01\0\0\0\0\0\0\x18\
+\x60\0\0\0\0\0\0\0\0\0\0\x40\x0a\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\xe0\x0a\0\0\
+\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x70\x0a\0\0\x18\x61\0\0\0\0\0\
+\0\0\0\0\0\0\x0b\0\0\x7b\x01\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\x18\x61\0\0\0\0\0\0\0\0\0\0\xf8\x0a\0\0\x7b\x01\0\0\0\0\0\0\x61\x60\x08\0\0\0\
+\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x98\x0a\0\0\x63\x01\0\0\0\0\0\0\x61\x60\x0c\0\
+\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x9c\x0a\0\0\x63\x01\0\0\0\0\0\0\x79\x60\
+\x10\0\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\xa0\x0a\0\0\x7b\x01\0\0\0\0\0\0\x61\
+\xa0\x78\xff\0\0\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\xc8\x0a\0\0\x63\x01\0\0\0\0\0\
+\0\x18\x61\0\0\0\0\0\0\0\0\0\0\x10\x0b\0\0\xb7\x02\0\0\x14\0\0\0\xb7\x03\0\0\
+\x0c\0\0\0\xb7\x04\0\0\0\0\0\0\x85\0\0\0\xa7\0\0\0\xbf\x07\0\0\0\0\0\0\xc5\x07\
+\x91\xff\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\x80\x0a\0\0\x63\x70\x6c\0\0\0\0\0\
+\x77\x07\0\0\x20\0\0\0\x63\x70\x70\0\0\0\0\0\xb7\x01\0\0\x05\0\0\0\x18\x62\0\0\
+\0\0\0\0\0\0\0\0\x80\x0a\0\0\xb7\x03\0\0\x8c\0\0\0\x85\0\0\0\xa6\0\0\0\xbf\x07\
+\0\0\0\0\0\0\x18\x60\0\0\0\0\0\0\0\0\0\0\xf0\x0a\0\0\x61\x01\0\0\0\0\0\0\xd5\
+\x01\x02\0\0\0\0\0\xbf\x19\0\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\xc5\x07\x7f\xff\0\0\
+\0\0\x63\x7a\x80\xff\0\0\0\0\x61\xa1\x78\xff\0\0\0\0\xd5\x01\x02\0\0\0\0\0\xbf\
+\x19\0\0\0\0\0\0\x85\0\0\0\xa8\0\0\0\x61\xa0\x80\xff\0\0\0\0\x63\x06\x28\0\0\0\
+\0\0\x18\x61\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x61\x10\0\0\0\0\0\0\x63\x06\x18\0\0\0\
+\0\0\xb7\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0";
+ err = bpf_load_and_run(&opts);
+ if (err < 0)
+ return err;
+ return 0;
+}
+
+static inline struct entrypoints_bpf *
+entrypoints_bpf__open_and_load(void)
+{
+ struct entrypoints_bpf *skel;
+
+ skel = entrypoints_bpf__open();
+ if (!skel)
+ return NULL;
+ if (entrypoints_bpf__load(skel)) {
+ entrypoints_bpf__destroy(skel);
+ return NULL;
+ }
+ return skel;
+}
+
+__attribute__((unused)) static void
+entrypoints_bpf__assert(struct entrypoints_bpf *s __attribute__((unused)))
+{
+#ifdef __cplusplus
+#define _Static_assert static_assert
+#endif
+#ifdef __cplusplus
+#undef _Static_assert
+#endif
+}
+
+#endif /* __ENTRYPOINTS_BPF_SKEL_H__ */
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
new file mode 100644
index 000000000000..8a034a555d4c
--- /dev/null
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -0,0 +1,551 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * HID-BPF support for Linux
+ *
+ * Copyright (c) 2022 Benjamin Tissoires
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitops.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/filter.h>
+#include <linux/hid.h>
+#include <linux/hid_bpf.h>
+#include <linux/init.h>
+#include <linux/kfifo.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include "hid_bpf_dispatch.h"
+#include "entrypoints/entrypoints.lskel.h"
+
+struct hid_bpf_ops *hid_bpf_ops;
+EXPORT_SYMBOL(hid_bpf_ops);
+
+/**
+ * hid_bpf_device_event - Called whenever an event is coming in from the device
+ *
+ * @ctx: The HID-BPF context
+ *
+ * @return %0 on success and keep processing; a positive value to change the
+ * incoming size buffer; a negative error code to interrupt the processing
+ * of this event
+ *
+ * Declare an %fmod_ret tracing bpf program to this function and attach this
+ * program through hid_bpf_attach_prog() to have this helper called for
+ * any incoming event from the device itself.
+ *
+ * The function is called while on IRQ context, so we can not sleep.
+ */
+/* never used by the kernel but declared so we can load and attach a tracepoint */
+__weak noinline int hid_bpf_device_event(struct hid_bpf_ctx *ctx)
+{
+ return 0;
+}
+
+u8 *
+dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
+ u32 *size, int interrupt)
+{
+ struct hid_bpf_ctx_kern ctx_kern = {
+ .ctx = {
+ .hid = hdev,
+ .report_type = type,
+ .allocated_size = hdev->bpf.allocated_data,
+ .size = *size,
+ },
+ .data = hdev->bpf.device_data,
+ };
+ int ret;
+
+ if (type >= HID_REPORT_TYPES)
+ return ERR_PTR(-EINVAL);
+
+ /* no program has been attached yet */
+ if (!hdev->bpf.device_data)
+ return data;
+
+ memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
+ memcpy(ctx_kern.data, data, *size);
+
+ ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_DEVICE_EVENT, &ctx_kern);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (ret) {
+ if (ret > ctx_kern.ctx.allocated_size)
+ return ERR_PTR(-EINVAL);
+
+ *size = ret;
+ }
+
+ return ctx_kern.data;
+}
+EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
+
+/**
+ * hid_bpf_rdesc_fixup - Called when the probe function parses the report
+ * descriptor of the HID device
+ *
+ * @ctx: The HID-BPF context
+ *
+ * @return 0 on success and keep processing; a positive value to change the
+ * incoming size buffer; a negative error code to interrupt the processing
+ * of this event
+ *
+ * Declare an %fmod_ret tracing bpf program to this function and attach this
+ * program through hid_bpf_attach_prog() to have this helper called before any
+ * parsing of the report descriptor by HID.
+ */
+/* never used by the kernel but declared so we can load and attach a tracepoint */
+__weak noinline int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx)
+{
+ return 0;
+}
+
+u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
+{
+ int ret;
+ struct hid_bpf_ctx_kern ctx_kern = {
+ .ctx = {
+ .hid = hdev,
+ .size = *size,
+ .allocated_size = HID_MAX_DESCRIPTOR_SIZE,
+ },
+ };
+
+ ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
+ if (!ctx_kern.data)
+ goto ignore_bpf;
+
+ memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
+
+ ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_RDESC_FIXUP, &ctx_kern);
+ if (ret < 0)
+ goto ignore_bpf;
+
+ if (ret) {
+ if (ret > ctx_kern.ctx.allocated_size)
+ goto ignore_bpf;
+
+ *size = ret;
+ }
+
+ rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL);
+
+ return rdesc;
+
+ ignore_bpf:
+ kfree(ctx_kern.data);
+ return kmemdup(rdesc, *size, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
+
+/**
+ * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
+ *
+ * @ctx: The HID-BPF context
+ * @offset: The offset within the memory
+ * @rdwr_buf_size: the const size of the buffer
+ *
+ * @returns %NULL on error, an %__u8 memory pointer on success
+ */
+noinline __u8 *
+hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
+{
+ struct hid_bpf_ctx_kern *ctx_kern;
+
+ if (!ctx)
+ return NULL;
+
+ ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
+
+ if (rdwr_buf_size + offset > ctx->allocated_size)
+ return NULL;
+
+ return ctx_kern->data + offset;
+}
+
+/*
+ * The following set contains all functions we agree BPF programs
+ * can use.
+ */
+BTF_SET8_START(hid_bpf_kfunc_ids)
+BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
+BTF_SET8_END(hid_bpf_kfunc_ids)
+
+static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &hid_bpf_kfunc_ids,
+};
+
+static int device_match_id(struct device *dev, const void *id)
+{
+ struct hid_device *hdev = to_hid_device(dev);
+
+ return hdev->id == *(int *)id;
+}
+
+static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
+{
+ u8 *alloc_data;
+ unsigned int i, j, max_report_len = 0;
+ size_t alloc_size = 0;
+
+ /* compute the maximum report length for this device */
+ for (i = 0; i < HID_REPORT_TYPES; i++) {
+ struct hid_report_enum *report_enum = hdev->report_enum + i;
+
+ for (j = 0; j < HID_MAX_IDS; j++) {
+ struct hid_report *report = report_enum->report_id_hash[j];
+
+ if (report)
+ max_report_len = max(max_report_len, hid_report_len(report));
+ }
+ }
+
+ /*
+ * Give us a little bit of extra space and some predictability in the
+ * buffer length we create. This way, we can tell users that they can
+ * work on chunks of 64 bytes of memory without having the bpf verifier
+ * scream at them.
+ */
+ alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64;
+
+ alloc_data = kzalloc(alloc_size, GFP_KERNEL);
+ if (!alloc_data)
+ return -ENOMEM;
+
+ *data = alloc_data;
+ *size = alloc_size;
+
+ return 0;
+}
+
+static int hid_bpf_allocate_event_data(struct hid_device *hdev)
+{
+ /* hdev->bpf.device_data is already allocated, abort */
+ if (hdev->bpf.device_data)
+ return 0;
+
+ return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data);
+}
+
+int hid_bpf_reconnect(struct hid_device *hdev)
+{
+ if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
+ return device_reprobe(&hdev->dev);
+
+ return 0;
+}
+
+/**
+ * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
+ *
+ * @hid_id: the system unique identifier of the HID device
+ * @prog_fd: an fd in the user process representing the program to attach
+ * @flags: any logical OR combination of &enum hid_bpf_attach_flags
+ *
+ * @returns an fd of a bpf_link object on success (> %0), an error code otherwise.
+ * Closing this fd will detach the program from the HID device (unless the bpf_link
+ * is pinned to the BPF file system).
+ */
+/* called from syscall */
+noinline int
+hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
+{
+ struct hid_device *hdev;
+ struct device *dev;
+ int fd, err, prog_type = hid_bpf_get_prog_attach_type(prog_fd);
+
+ if (!hid_bpf_ops)
+ return -EINVAL;
+
+ if (prog_type < 0)
+ return prog_type;
+
+ if (prog_type >= HID_BPF_PROG_TYPE_MAX)
+ return -EINVAL;
+
+ if ((flags & ~HID_BPF_FLAG_MASK))
+ return -EINVAL;
+
+ dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
+ if (!dev)
+ return -EINVAL;
+
+ hdev = to_hid_device(dev);
+
+ if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
+ err = hid_bpf_allocate_event_data(hdev);
+ if (err)
+ return err;
+ }
+
+ fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, flags);
+ if (fd < 0)
+ return fd;
+
+ if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
+ err = hid_bpf_reconnect(hdev);
+ if (err) {
+ close_fd(fd);
+ return err;
+ }
+ }
+
+ return fd;
+}
+
+/**
+ * hid_bpf_allocate_context - Allocate a context to the given HID device
+ *
+ * @hid_id: the system unique identifier of the HID device
+ *
+ * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
+ */
+noinline struct hid_bpf_ctx *
+hid_bpf_allocate_context(unsigned int hid_id)
+{
+ struct hid_device *hdev;
+ struct hid_bpf_ctx_kern *ctx_kern = NULL;
+ struct device *dev;
+
+ if (!hid_bpf_ops)
+ return NULL;
+
+ dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
+ if (!dev)
+ return NULL;
+
+ hdev = to_hid_device(dev);
+
+ ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
+ if (!ctx_kern)
+ return NULL;
+
+ ctx_kern->ctx.hid = hdev;
+
+ return &ctx_kern->ctx;
+}
+
+/**
+ * hid_bpf_release_context - Release the previously allocated context @ctx
+ *
+ * @ctx: the HID-BPF context to release
+ *
+ */
+noinline void
+hid_bpf_release_context(struct hid_bpf_ctx *ctx)
+{
+ struct hid_bpf_ctx_kern *ctx_kern;
+
+ if (!ctx)
+ return;
+
+ ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
+
+ kfree(ctx_kern);
+}
+
+/**
+ * hid_bpf_hw_request - Communicate with a HID device
+ *
+ * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
+ * @buf: a %PTR_TO_MEM buffer
+ * @buf__sz: the size of the data to transfer
+ * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
+ * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...)
+ *
+ * @returns %0 on success, a negative error code otherwise.
+ */
+noinline int
+hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
+ enum hid_report_type rtype, enum hid_class_request reqtype)
+{
+ struct hid_device *hdev;
+ struct hid_report *report;
+ struct hid_report_enum *report_enum;
+ u8 *dma_data;
+ u32 report_len;
+ int ret;
+
+ /* check arguments */
+ if (!ctx || !hid_bpf_ops || !buf)
+ return -EINVAL;
+
+ switch (rtype) {
+ case HID_INPUT_REPORT:
+ case HID_OUTPUT_REPORT:
+ case HID_FEATURE_REPORT:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (reqtype) {
+ case HID_REQ_GET_REPORT:
+ case HID_REQ_GET_IDLE:
+ case HID_REQ_GET_PROTOCOL:
+ case HID_REQ_SET_REPORT:
+ case HID_REQ_SET_IDLE:
+ case HID_REQ_SET_PROTOCOL:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (buf__sz < 1)
+ return -EINVAL;
+
+ hdev = (struct hid_device *)ctx->hid; /* discard const */
+
+ report_enum = hdev->report_enum + rtype;
+ report = hid_bpf_ops->hid_get_report(report_enum, buf);
+ if (!report)
+ return -EINVAL;
+
+ report_len = hid_report_len(report);
+
+ if (buf__sz > report_len)
+ buf__sz = report_len;
+
+ dma_data = kmemdup(buf, buf__sz, GFP_KERNEL);
+ if (!dma_data)
+ return -ENOMEM;
+
+ ret = hid_bpf_ops->hid_hw_raw_request(hdev,
+ dma_data[0],
+ dma_data,
+ buf__sz,
+ rtype,
+ reqtype);
+
+ if (ret > 0)
+ memcpy(buf, dma_data, ret);
+
+ kfree(dma_data);
+ return ret;
+}
+
+/* our HID-BPF entrypoints */
+BTF_SET8_START(hid_bpf_fmodret_ids)
+BTF_ID_FLAGS(func, hid_bpf_device_event)
+BTF_ID_FLAGS(func, hid_bpf_rdesc_fixup)
+BTF_ID_FLAGS(func, __hid_bpf_tail_call)
+BTF_SET8_END(hid_bpf_fmodret_ids)
+
+static const struct btf_kfunc_id_set hid_bpf_fmodret_set = {
+ .owner = THIS_MODULE,
+ .set = &hid_bpf_fmodret_ids,
+};
+
+/* for syscall HID-BPF */
+BTF_SET8_START(hid_bpf_syscall_kfunc_ids)
+BTF_ID_FLAGS(func, hid_bpf_attach_prog)
+BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
+BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
+BTF_ID_FLAGS(func, hid_bpf_hw_request)
+BTF_SET8_END(hid_bpf_syscall_kfunc_ids)
+
+static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &hid_bpf_syscall_kfunc_ids,
+};
+
+int hid_bpf_connect_device(struct hid_device *hdev)
+{
+ struct hid_bpf_prog_list *prog_list;
+
+ rcu_read_lock();
+ prog_list = rcu_dereference(hdev->bpf.progs[HID_BPF_PROG_TYPE_DEVICE_EVENT]);
+ rcu_read_unlock();
+
+ /* only allocate BPF data if there are programs attached */
+ if (!prog_list)
+ return 0;
+
+ return hid_bpf_allocate_event_data(hdev);
+}
+EXPORT_SYMBOL_GPL(hid_bpf_connect_device);
+
+void hid_bpf_disconnect_device(struct hid_device *hdev)
+{
+ kfree(hdev->bpf.device_data);
+ hdev->bpf.device_data = NULL;
+ hdev->bpf.allocated_data = 0;
+}
+EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device);
+
+void hid_bpf_destroy_device(struct hid_device *hdev)
+{
+ if (!hdev)
+ return;
+
+ /* mark the device as destroyed in bpf so we don't reattach it */
+ hdev->bpf.destroyed = true;
+
+ __hid_bpf_destroy_device(hdev);
+}
+EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
+
+void hid_bpf_device_init(struct hid_device *hdev)
+{
+ spin_lock_init(&hdev->bpf.progs_lock);
+}
+EXPORT_SYMBOL_GPL(hid_bpf_device_init);
+
+static int __init hid_bpf_init(void)
+{
+ int err;
+
+ /* Note: if we exit with an error any time here, we would entirely break HID, which
+ * is probably not something we want. So we log an error and return success.
+ *
+ * This is not a big deal: the syscall allowing to attach a BPF program to a HID device
+ * will not be available, so nobody will be able to use the functionality.
+ */
+
+ err = register_btf_fmodret_id_set(&hid_bpf_fmodret_set);
+ if (err) {
+ pr_warn("error while registering fmodret entrypoints: %d", err);
+ return 0;
+ }
+
+ err = hid_bpf_preload_skel();
+ if (err) {
+ pr_warn("error while preloading HID BPF dispatcher: %d", err);
+ return 0;
+ }
+
+ /* register tracing kfuncs after we are sure we can load our preloaded bpf program */
+ err = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &hid_bpf_kfunc_set);
+ if (err) {
+ pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
+ return 0;
+ }
+
+ /* register syscalls after we are sure we can load our preloaded bpf program */
+ err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
+ if (err) {
+ pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
+ return 0;
+ }
+
+ return 0;
+}
+
+static void __exit hid_bpf_exit(void)
+{
+ /* HID depends on us, so if we hit that code, we are guaranteed that hid
+ * has been removed and thus we do not need to clear the HID devices
+ */
+ hid_bpf_free_links_and_skel();
+}
+
+late_initcall(hid_bpf_init);
+module_exit(hid_bpf_exit);
+MODULE_AUTHOR("Benjamin Tissoires");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.h b/drivers/hid/bpf/hid_bpf_dispatch.h
new file mode 100644
index 000000000000..63dfc8605cd2
--- /dev/null
+++ b/drivers/hid/bpf/hid_bpf_dispatch.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _BPF_HID_BPF_DISPATCH_H
+#define _BPF_HID_BPF_DISPATCH_H
+
+#include <linux/hid.h>
+
+struct hid_bpf_ctx_kern {
+ struct hid_bpf_ctx ctx;
+ u8 *data;
+};
+
+int hid_bpf_preload_skel(void);
+void hid_bpf_free_links_and_skel(void);
+int hid_bpf_get_prog_attach_type(int prog_fd);
+int __hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type, int prog_fd,
+ __u32 flags);
+void __hid_bpf_destroy_device(struct hid_device *hdev);
+int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
+ struct hid_bpf_ctx_kern *ctx_kern);
+int hid_bpf_reconnect(struct hid_device *hdev);
+
+struct bpf_prog;
+
+#endif
diff --git a/drivers/hid/bpf/hid_bpf_jmp_table.c b/drivers/hid/bpf/hid_bpf_jmp_table.c
new file mode 100644
index 000000000000..eca34b7372f9
--- /dev/null
+++ b/drivers/hid/bpf/hid_bpf_jmp_table.c
@@ -0,0 +1,565 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * HID-BPF support for Linux
+ *
+ * Copyright (c) 2022 Benjamin Tissoires
+ */
+
+#include <linux/bitops.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
+#include <linux/circ_buf.h>
+#include <linux/filter.h>
+#include <linux/hid.h>
+#include <linux/hid_bpf.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include "hid_bpf_dispatch.h"
+#include "entrypoints/entrypoints.lskel.h"
+
+#define HID_BPF_MAX_PROGS 1024 /* keep this in sync with preloaded bpf,
+ * needs to be a power of 2 as we use it as
+ * a circular buffer
+ */
+
+#define NEXT(idx) (((idx) + 1) & (HID_BPF_MAX_PROGS - 1))
+#define PREV(idx) (((idx) - 1) & (HID_BPF_MAX_PROGS - 1))
+
+/*
+ * represents one attached program stored in the hid jump table
+ */
+struct hid_bpf_prog_entry {
+ struct bpf_prog *prog;
+ struct hid_device *hdev;
+ enum hid_bpf_prog_type type;
+ u16 idx;
+};
+
+struct hid_bpf_jmp_table {
+ struct bpf_map *map;
+ struct hid_bpf_prog_entry entries[HID_BPF_MAX_PROGS]; /* compacted list, circular buffer */
+ int tail, head;
+ struct bpf_prog *progs[HID_BPF_MAX_PROGS]; /* idx -> progs mapping */
+ unsigned long enabled[BITS_TO_LONGS(HID_BPF_MAX_PROGS)];
+};
+
+#define FOR_ENTRIES(__i, __start, __end) \
+ for (__i = __start; CIRC_CNT(__end, __i, HID_BPF_MAX_PROGS); __i = NEXT(__i))
+
+static struct hid_bpf_jmp_table jmp_table;
+
+static DEFINE_MUTEX(hid_bpf_attach_lock); /* held when attaching/detaching programs */
+
+static void hid_bpf_release_progs(struct work_struct *work);
+
+static DECLARE_WORK(release_work, hid_bpf_release_progs);
+
+BTF_ID_LIST(hid_bpf_btf_ids)
+BTF_ID(func, hid_bpf_device_event) /* HID_BPF_PROG_TYPE_DEVICE_EVENT */
+BTF_ID(func, hid_bpf_rdesc_fixup) /* HID_BPF_PROG_TYPE_RDESC_FIXUP */
+
+static int hid_bpf_max_programs(enum hid_bpf_prog_type type)
+{
+ switch (type) {
+ case HID_BPF_PROG_TYPE_DEVICE_EVENT:
+ return HID_BPF_MAX_PROGS_PER_DEV;
+ case HID_BPF_PROG_TYPE_RDESC_FIXUP:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hid_bpf_program_count(struct hid_device *hdev,
+ struct bpf_prog *prog,
+ enum hid_bpf_prog_type type)
+{
+ int i, n = 0;
+
+ if (type >= HID_BPF_PROG_TYPE_MAX)
+ return -EINVAL;
+
+ FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
+ struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
+
+ if (type != HID_BPF_PROG_TYPE_UNDEF && entry->type != type)
+ continue;
+
+ if (hdev && entry->hdev != hdev)
+ continue;
+
+ if (prog && entry->prog != prog)
+ continue;
+
+ n++;
+ }
+
+ return n;
+}
+
+__weak noinline int __hid_bpf_tail_call(struct hid_bpf_ctx *ctx)
+{
+ return 0;
+}
+
+int hid_bpf_prog_run(struct hid_device *hdev, enum hid_bpf_prog_type type,
+ struct hid_bpf_ctx_kern *ctx_kern)
+{
+ struct hid_bpf_prog_list *prog_list;
+ int i, idx, err = 0;
+
+ rcu_read_lock();
+ prog_list = rcu_dereference(hdev->bpf.progs[type]);
+
+ if (!prog_list)
+ goto out_unlock;
+
+ for (i = 0; i < prog_list->prog_cnt; i++) {
+ idx = prog_list->prog_idx[i];
+
+ if (!test_bit(idx, jmp_table.enabled))
+ continue;
+
+ ctx_kern->ctx.index = idx;
+ err = __hid_bpf_tail_call(&ctx_kern->ctx);
+ if (err < 0)
+ break;
+ if (err)
+ ctx_kern->ctx.retval = err;
+ }
+
+ out_unlock:
+ rcu_read_unlock();
+
+ return err;
+}
+
+/*
+ * assign the list of programs attached to a given hid device.
+ */
+static void __hid_bpf_set_hdev_progs(struct hid_device *hdev, struct hid_bpf_prog_list *new_list,
+ enum hid_bpf_prog_type type)
+{
+ struct hid_bpf_prog_list *old_list;
+
+ spin_lock(&hdev->bpf.progs_lock);
+ old_list = rcu_dereference_protected(hdev->bpf.progs[type],
+ lockdep_is_held(&hdev->bpf.progs_lock));
+ rcu_assign_pointer(hdev->bpf.progs[type], new_list);
+ spin_unlock(&hdev->bpf.progs_lock);
+ synchronize_rcu();
+
+ kfree(old_list);
+}
+
+/*
+ * allocate and populate the list of programs attached to a given hid device.
+ *
+ * Must be called under lock.
+ */
+static int hid_bpf_populate_hdev(struct hid_device *hdev, enum hid_bpf_prog_type type)
+{
+ struct hid_bpf_prog_list *new_list;
+ int i;
+
+ if (type >= HID_BPF_PROG_TYPE_MAX || !hdev)
+ return -EINVAL;
+
+ if (hdev->bpf.destroyed)
+ return 0;
+
+ new_list = kzalloc(sizeof(*new_list), GFP_KERNEL);
+ if (!new_list)
+ return -ENOMEM;
+
+ FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
+ struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
+
+ if (entry->type == type && entry->hdev == hdev &&
+ test_bit(entry->idx, jmp_table.enabled))
+ new_list->prog_idx[new_list->prog_cnt++] = entry->idx;
+ }
+
+ __hid_bpf_set_hdev_progs(hdev, new_list, type);
+
+ return 0;
+}
+
+static void __hid_bpf_do_release_prog(int map_fd, unsigned int idx)
+{
+ skel_map_delete_elem(map_fd, &idx);
+ jmp_table.progs[idx] = NULL;
+}
+
+static void hid_bpf_release_progs(struct work_struct *work)
+{
+ int i, j, n, map_fd = -1;
+
+ if (!jmp_table.map)
+ return;
+
+ /* retrieve a fd of our prog_array map in BPF */
+ map_fd = skel_map_get_fd_by_id(jmp_table.map->id);
+ if (map_fd < 0)
+ return;
+
+ mutex_lock(&hid_bpf_attach_lock); /* protects against attaching new programs */
+
+ /* detach unused progs from HID devices */
+ FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
+ struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
+ enum hid_bpf_prog_type type;
+ struct hid_device *hdev;
+
+ if (test_bit(entry->idx, jmp_table.enabled))
+ continue;
+
+ /* we have an attached prog */
+ if (entry->hdev) {
+ hdev = entry->hdev;
+ type = entry->type;
+
+ hid_bpf_populate_hdev(hdev, type);
+
+ /* mark all other disabled progs from hdev of the given type as detached */
+ FOR_ENTRIES(j, i, jmp_table.head) {
+ struct hid_bpf_prog_entry *next;
+
+ next = &jmp_table.entries[j];
+
+ if (test_bit(next->idx, jmp_table.enabled))
+ continue;
+
+ if (next->hdev == hdev && next->type == type)
+ next->hdev = NULL;
+ }
+
+ /* if type was rdesc fixup, reconnect device */
+ if (type == HID_BPF_PROG_TYPE_RDESC_FIXUP)
+ hid_bpf_reconnect(hdev);
+ }
+ }
+
+ /* remove all unused progs from the jump table */
+ FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
+ struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
+
+ if (test_bit(entry->idx, jmp_table.enabled))
+ continue;
+
+ if (entry->prog)
+ __hid_bpf_do_release_prog(map_fd, entry->idx);
+ }
+
+ /* compact the entry list */
+ n = jmp_table.tail;
+ FOR_ENTRIES(i, jmp_table.tail, jmp_table.head) {
+ struct hid_bpf_prog_entry *entry = &jmp_table.entries[i];
+
+ if (!test_bit(entry->idx, jmp_table.enabled))
+ continue;
+
+ jmp_table.entries[n] = jmp_table.entries[i];
+ n = NEXT(n);
+ }
+
+ jmp_table.head = n;
+
+ mutex_unlock(&hid_bpf_attach_lock);
+
+ if (map_fd >= 0)
+ close_fd(map_fd);
+}
+
+static void hid_bpf_release_prog_at(int idx)
+{
+ int map_fd = -1;
+
+ /* retrieve a fd of our prog_array map in BPF */
+ map_fd = skel_map_get_fd_by_id(jmp_table.map->id);
+ if (map_fd < 0)
+ return;
+
+ __hid_bpf_do_release_prog(map_fd, idx);
+
+ close(map_fd);
+}
+
+/*
+ * Insert the given BPF program represented by its fd in the jmp table.
+ * Returns the index in the jump table or a negative error.
+ */
+static int hid_bpf_insert_prog(int prog_fd, struct bpf_prog *prog)
+{
+ int i, index = -1, map_fd = -1, err = -EINVAL;
+
+ /* retrieve a fd of our prog_array map in BPF */
+ map_fd = skel_map_get_fd_by_id(jmp_table.map->id);
+
+ if (map_fd < 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* find the first available index in the jmp_table */
+ for (i = 0; i < HID_BPF_MAX_PROGS; i++) {
+ if (!jmp_table.progs[i] && index < 0) {
+ /* mark the index as used */
+ jmp_table.progs[i] = prog;
+ index = i;
+ __set_bit(i, jmp_table.enabled);
+ }
+ }
+ if (index < 0) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* insert the program in the jump table */
+ err = skel_map_update_elem(map_fd, &index, &prog_fd, 0);
+ if (err)
+ goto out;
+
+ /* return the index */
+ err = index;
+
+ out:
+ if (err < 0)
+ __hid_bpf_do_release_prog(map_fd, index);
+ if (map_fd >= 0)
+ close_fd(map_fd);
+ return err;
+}
+
+int hid_bpf_get_prog_attach_type(int prog_fd)
+{
+ struct bpf_prog *prog = NULL;
+ int i;
+ int prog_type = HID_BPF_PROG_TYPE_UNDEF;
+
+ prog = bpf_prog_get(prog_fd);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ for (i = 0; i < HID_BPF_PROG_TYPE_MAX; i++) {
+ if (hid_bpf_btf_ids[i] == prog->aux->attach_btf_id) {
+ prog_type = i;
+ break;
+ }
+ }
+
+ bpf_prog_put(prog);
+
+ return prog_type;
+}
+
+static void hid_bpf_link_release(struct bpf_link *link)
+{
+ struct hid_bpf_link *hid_link =
+ container_of(link, struct hid_bpf_link, link);
+
+ __clear_bit(hid_link->hid_table_index, jmp_table.enabled);
+ schedule_work(&release_work);
+}
+
+static void hid_bpf_link_dealloc(struct bpf_link *link)
+{
+ struct hid_bpf_link *hid_link =
+ container_of(link, struct hid_bpf_link, link);
+
+ kfree(hid_link);
+}
+
+static void hid_bpf_link_show_fdinfo(const struct bpf_link *link,
+ struct seq_file *seq)
+{
+ seq_printf(seq,
+ "attach_type:\tHID-BPF\n");
+}
+
+static const struct bpf_link_ops hid_bpf_link_lops = {
+ .release = hid_bpf_link_release,
+ .dealloc = hid_bpf_link_dealloc,
+ .show_fdinfo = hid_bpf_link_show_fdinfo,
+};
+
+/* called from syscall */
+noinline int
+__hid_bpf_attach_prog(struct hid_device *hdev, enum hid_bpf_prog_type prog_type,
+ int prog_fd, __u32 flags)
+{
+ struct bpf_link_primer link_primer;
+ struct hid_bpf_link *link;
+ struct bpf_prog *prog = NULL;
+ struct hid_bpf_prog_entry *prog_entry;
+ int cnt, err = -EINVAL, prog_table_idx = -1;
+
+ /* take a ref on the prog itself */
+ prog = bpf_prog_get(prog_fd);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ mutex_lock(&hid_bpf_attach_lock);
+
+ link = kzalloc(sizeof(*link), GFP_USER);
+ if (!link) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+
+ bpf_link_init(&link->link, BPF_LINK_TYPE_UNSPEC,
+ &hid_bpf_link_lops, prog);
+
+ /* do not attach too many programs to a given HID device */
+ cnt = hid_bpf_program_count(hdev, NULL, prog_type);
+ if (cnt < 0) {
+ err = cnt;
+ goto err_unlock;
+ }
+
+ if (cnt >= hid_bpf_max_programs(prog_type)) {
+ err = -E2BIG;
+ goto err_unlock;
+ }
+
+ prog_table_idx = hid_bpf_insert_prog(prog_fd, prog);
+ /* if the jmp table is full, abort */
+ if (prog_table_idx < 0) {
+ err = prog_table_idx;
+ goto err_unlock;
+ }
+
+ if (flags & HID_BPF_FLAG_INSERT_HEAD) {
+ /* take the previous prog_entry slot */
+ jmp_table.tail = PREV(jmp_table.tail);
+ prog_entry = &jmp_table.entries[jmp_table.tail];
+ } else {
+ /* take the next prog_entry slot */
+ prog_entry = &jmp_table.entries[jmp_table.head];
+ jmp_table.head = NEXT(jmp_table.head);
+ }
+
+ /* we steal the ref here */
+ prog_entry->prog = prog;
+ prog_entry->idx = prog_table_idx;
+ prog_entry->hdev = hdev;
+ prog_entry->type = prog_type;
+
+ /* finally store the index in the device list */
+ err = hid_bpf_populate_hdev(hdev, prog_type);
+ if (err) {
+ hid_bpf_release_prog_at(prog_table_idx);
+ goto err_unlock;
+ }
+
+ link->hid_table_index = prog_table_idx;
+
+ err = bpf_link_prime(&link->link, &link_primer);
+ if (err)
+ goto err_unlock;
+
+ mutex_unlock(&hid_bpf_attach_lock);
+
+ return bpf_link_settle(&link_primer);
+
+ err_unlock:
+ mutex_unlock(&hid_bpf_attach_lock);
+
+ bpf_prog_put(prog);
+ kfree(link);
+
+ return err;
+}
+
+void __hid_bpf_destroy_device(struct hid_device *hdev)
+{
+ int type, i;
+ struct hid_bpf_prog_list *prog_list;
+
+ rcu_read_lock();
+
+ for (type = 0; type < HID_BPF_PROG_TYPE_MAX; type++) {
+ prog_list = rcu_dereference(hdev->bpf.progs[type]);
+
+ if (!prog_list)
+ continue;
+
+ for (i = 0; i < prog_list->prog_cnt; i++)
+ __clear_bit(prog_list->prog_idx[i], jmp_table.enabled);
+ }
+
+ rcu_read_unlock();
+
+ for (type = 0; type < HID_BPF_PROG_TYPE_MAX; type++)
+ __hid_bpf_set_hdev_progs(hdev, NULL, type);
+
+ /* schedule release of all detached progs */
+ schedule_work(&release_work);
+}
+
+#define HID_BPF_PROGS_COUNT 1
+
+static struct bpf_link *links[HID_BPF_PROGS_COUNT];
+static struct entrypoints_bpf *skel;
+
+void hid_bpf_free_links_and_skel(void)
+{
+ int i;
+
+ /* the following is enough to release all programs attached to hid */
+ if (jmp_table.map)
+ bpf_map_put_with_uref(jmp_table.map);
+
+ for (i = 0; i < ARRAY_SIZE(links); i++) {
+ if (!IS_ERR_OR_NULL(links[i]))
+ bpf_link_put(links[i]);
+ }
+ entrypoints_bpf__destroy(skel);
+}
+
+#define ATTACH_AND_STORE_LINK(__name) do { \
+ err = entrypoints_bpf__##__name##__attach(skel); \
+ if (err) \
+ goto out; \
+ \
+ links[idx] = bpf_link_get_from_fd(skel->links.__name##_fd); \
+ if (IS_ERR(links[idx])) { \
+ err = PTR_ERR(links[idx]); \
+ goto out; \
+ } \
+ \
+ /* Avoid taking over stdin/stdout/stderr of init process. Zeroing out \
+ * makes skel_closenz() a no-op later in iterators_bpf__destroy(). \
+ */ \
+ close_fd(skel->links.__name##_fd); \
+ skel->links.__name##_fd = 0; \
+ idx++; \
+} while (0)
+
+int hid_bpf_preload_skel(void)
+{
+ int err, idx = 0;
+
+ skel = entrypoints_bpf__open();
+ if (!skel)
+ return -ENOMEM;
+
+ err = entrypoints_bpf__load(skel);
+ if (err)
+ goto out;
+
+ jmp_table.map = bpf_map_get_with_uref(skel->maps.hid_jmp_table.map_fd);
+ if (IS_ERR(jmp_table.map)) {
+ err = PTR_ERR(jmp_table.map);
+ goto out;
+ }
+
+ ATTACH_AND_STORE_LINK(hid_tail_call);
+
+ return 0;
+out:
+ hid_bpf_free_links_and_skel();
+ return err;
+}
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index f99752b998f3..d1094bb1aa42 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -98,6 +98,7 @@ struct asus_kbd_leds {
struct hid_device *hdev;
struct work_struct work;
unsigned int brightness;
+ spinlock_t lock;
bool removed;
};
@@ -490,21 +491,42 @@ static int rog_nkey_led_init(struct hid_device *hdev)
return ret;
}
+static void asus_schedule_work(struct asus_kbd_leds *led)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&led->lock, flags);
+ if (!led->removed)
+ schedule_work(&led->work);
+ spin_unlock_irqrestore(&led->lock, flags);
+}
+
static void asus_kbd_backlight_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&led->lock, flags);
led->brightness = brightness;
- schedule_work(&led->work);
+ spin_unlock_irqrestore(&led->lock, flags);
+
+ asus_schedule_work(led);
}
static enum led_brightness asus_kbd_backlight_get(struct led_classdev *led_cdev)
{
struct asus_kbd_leds *led = container_of(led_cdev, struct asus_kbd_leds,
cdev);
+ enum led_brightness brightness;
+ unsigned long flags;
+
+ spin_lock_irqsave(&led->lock, flags);
+ brightness = led->brightness;
+ spin_unlock_irqrestore(&led->lock, flags);
- return led->brightness;
+ return brightness;
}
static void asus_kbd_backlight_work(struct work_struct *work)
@@ -512,11 +534,11 @@ static void asus_kbd_backlight_work(struct work_struct *work)
struct asus_kbd_leds *led = container_of(work, struct asus_kbd_leds, work);
u8 buf[] = { FEATURE_KBD_REPORT_ID, 0xba, 0xc5, 0xc4, 0x00 };
int ret;
+ unsigned long flags;
- if (led->removed)
- return;
-
+ spin_lock_irqsave(&led->lock, flags);
buf[4] = led->brightness;
+ spin_unlock_irqrestore(&led->lock, flags);
ret = asus_kbd_set_report(led->hdev, buf, sizeof(buf));
if (ret < 0)
@@ -584,6 +606,7 @@ static int asus_kbd_register_leds(struct hid_device *hdev)
drvdata->kbd_backlight->cdev.brightness_set = asus_kbd_backlight_set;
drvdata->kbd_backlight->cdev.brightness_get = asus_kbd_backlight_get;
INIT_WORK(&drvdata->kbd_backlight->work, asus_kbd_backlight_work);
+ spin_lock_init(&drvdata->kbd_backlight->lock);
ret = devm_led_classdev_register(&hdev->dev, &drvdata->kbd_backlight->cdev);
if (ret < 0) {
@@ -1119,9 +1142,13 @@ err_stop_hw:
static void asus_remove(struct hid_device *hdev)
{
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
+ unsigned long flags;
if (drvdata->kbd_backlight) {
+ spin_lock_irqsave(&drvdata->kbd_backlight->lock, flags);
drvdata->kbd_backlight->removed = true;
+ spin_unlock_irqrestore(&drvdata->kbd_backlight->lock, flags);
+
cancel_work_sync(&drvdata->kbd_backlight->work);
}
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
index e8b16665860d..a02cb517b4c4 100644
--- a/drivers/hid/hid-bigbenff.c
+++ b/drivers/hid/hid-bigbenff.c
@@ -174,6 +174,7 @@ static __u8 pid0902_rdesc_fixed[] = {
struct bigben_device {
struct hid_device *hid;
struct hid_report *report;
+ spinlock_t lock;
bool removed;
u8 led_state; /* LED1 = 1 .. LED4 = 8 */
u8 right_motor_on; /* right motor off/on 0/1 */
@@ -184,18 +185,39 @@ struct bigben_device {
struct work_struct worker;
};
+static inline void bigben_schedule_work(struct bigben_device *bigben)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bigben->lock, flags);
+ if (!bigben->removed)
+ schedule_work(&bigben->worker);
+ spin_unlock_irqrestore(&bigben->lock, flags);
+}
static void bigben_worker(struct work_struct *work)
{
struct bigben_device *bigben = container_of(work,
struct bigben_device, worker);
struct hid_field *report_field = bigben->report->field[0];
-
- if (bigben->removed || !report_field)
+ bool do_work_led = false;
+ bool do_work_ff = false;
+ u8 *buf;
+ u32 len;
+ unsigned long flags;
+
+ buf = hid_alloc_report_buf(bigben->report, GFP_KERNEL);
+ if (!buf)
return;
+ len = hid_report_len(bigben->report);
+
+ /* LED work */
+ spin_lock_irqsave(&bigben->lock, flags);
+
if (bigben->work_led) {
bigben->work_led = false;
+ do_work_led = true;
report_field->value[0] = 0x01; /* 1 = led message */
report_field->value[1] = 0x08; /* reserved value, always 8 */
report_field->value[2] = bigben->led_state;
@@ -204,11 +226,22 @@ static void bigben_worker(struct work_struct *work)
report_field->value[5] = 0x00; /* padding */
report_field->value[6] = 0x00; /* padding */
report_field->value[7] = 0x00; /* padding */
- hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
+ hid_output_report(bigben->report, buf);
+ }
+
+ spin_unlock_irqrestore(&bigben->lock, flags);
+
+ if (do_work_led) {
+ hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len,
+ bigben->report->type, HID_REQ_SET_REPORT);
}
+ /* FF work */
+ spin_lock_irqsave(&bigben->lock, flags);
+
if (bigben->work_ff) {
bigben->work_ff = false;
+ do_work_ff = true;
report_field->value[0] = 0x02; /* 2 = rumble effect message */
report_field->value[1] = 0x08; /* reserved value, always 8 */
report_field->value[2] = bigben->right_motor_on;
@@ -217,8 +250,17 @@ static void bigben_worker(struct work_struct *work)
report_field->value[5] = 0x00; /* padding */
report_field->value[6] = 0x00; /* padding */
report_field->value[7] = 0x00; /* padding */
- hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
+ hid_output_report(bigben->report, buf);
+ }
+
+ spin_unlock_irqrestore(&bigben->lock, flags);
+
+ if (do_work_ff) {
+ hid_hw_raw_request(bigben->hid, bigben->report->id, buf, len,
+ bigben->report->type, HID_REQ_SET_REPORT);
}
+
+ kfree(buf);
}
static int hid_bigben_play_effect(struct input_dev *dev, void *data,
@@ -228,6 +270,7 @@ static int hid_bigben_play_effect(struct input_dev *dev, void *data,
struct bigben_device *bigben = hid_get_drvdata(hid);
u8 right_motor_on;
u8 left_motor_force;
+ unsigned long flags;
if (!bigben) {
hid_err(hid, "no device data\n");
@@ -242,10 +285,13 @@ static int hid_bigben_play_effect(struct input_dev *dev, void *data,
if (right_motor_on != bigben->right_motor_on ||
left_motor_force != bigben->left_motor_force) {
+ spin_lock_irqsave(&bigben->lock, flags);
bigben->right_motor_on = right_motor_on;
bigben->left_motor_force = left_motor_force;
bigben->work_ff = true;
- schedule_work(&bigben->worker);
+ spin_unlock_irqrestore(&bigben->lock, flags);
+
+ bigben_schedule_work(bigben);
}
return 0;
@@ -259,6 +305,7 @@ static void bigben_set_led(struct led_classdev *led,
struct bigben_device *bigben = hid_get_drvdata(hid);
int n;
bool work;
+ unsigned long flags;
if (!bigben) {
hid_err(hid, "no device data\n");
@@ -267,6 +314,7 @@ static void bigben_set_led(struct led_classdev *led,
for (n = 0; n < NUM_LEDS; n++) {
if (led == bigben->leds[n]) {
+ spin_lock_irqsave(&bigben->lock, flags);
if (value == LED_OFF) {
work = (bigben->led_state & BIT(n));
bigben->led_state &= ~BIT(n);
@@ -274,10 +322,11 @@ static void bigben_set_led(struct led_classdev *led,
work = !(bigben->led_state & BIT(n));
bigben->led_state |= BIT(n);
}
+ spin_unlock_irqrestore(&bigben->lock, flags);
if (work) {
bigben->work_led = true;
- schedule_work(&bigben->worker);
+ bigben_schedule_work(bigben);
}
return;
}
@@ -307,8 +356,12 @@ static enum led_brightness bigben_get_led(struct led_classdev *led)
static void bigben_remove(struct hid_device *hid)
{
struct bigben_device *bigben = hid_get_drvdata(hid);
+ unsigned long flags;
+ spin_lock_irqsave(&bigben->lock, flags);
bigben->removed = true;
+ spin_unlock_irqrestore(&bigben->lock, flags);
+
cancel_work_sync(&bigben->worker);
hid_hw_stop(hid);
}
@@ -318,7 +371,6 @@ static int bigben_probe(struct hid_device *hid,
{
struct bigben_device *bigben;
struct hid_input *hidinput;
- struct list_head *report_list;
struct led_classdev *led;
char *name;
size_t name_sz;
@@ -343,14 +395,12 @@ static int bigben_probe(struct hid_device *hid,
return error;
}
- report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
- if (list_empty(report_list)) {
+ bigben->report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 8);
+ if (!bigben->report) {
hid_err(hid, "no output report found\n");
error = -ENODEV;
goto error_hw_stop;
}
- bigben->report = list_entry(report_list->next,
- struct hid_report, list);
if (list_empty(&hid->inputs)) {
hid_err(hid, "no inputs found\n");
@@ -362,6 +412,7 @@ static int bigben_probe(struct hid_device *hid,
set_bit(FF_RUMBLE, hidinput->input->ffbit);
INIT_WORK(&bigben->worker, bigben_worker);
+ spin_lock_init(&bigben->lock);
error = input_ff_create_memless(hidinput->input, NULL,
hid_bigben_play_effect);
@@ -402,7 +453,7 @@ static int bigben_probe(struct hid_device *hid,
bigben->left_motor_force = 0;
bigben->work_led = true;
bigben->work_ff = true;
- schedule_work(&bigben->worker);
+ bigben_schedule_work(bigben);
hid_info(hid, "LED and force feedback support for BigBen gamepad\n");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3e1803592bd4..842afc88a949 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -41,11 +41,6 @@
#define DRIVER_DESC "HID core driver"
-int hid_debug = 0;
-module_param_named(debug, hid_debug, int, 0600);
-MODULE_PARM_DESC(debug, "toggle HID debugging messages");
-EXPORT_SYMBOL_GPL(hid_debug);
-
static int hid_ignore_special_drivers = 0;
module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
@@ -804,7 +799,8 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
int i;
if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
- type == HID_COLLECTION_PHYSICAL)
+ (type == HID_COLLECTION_PHYSICAL ||
+ type == HID_COLLECTION_APPLICATION))
hid->group = HID_GROUP_SENSOR_HUB;
if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
@@ -1202,6 +1198,7 @@ int hid_open_report(struct hid_device *device)
__u8 *end;
__u8 *next;
int ret;
+ int i;
static int (*dispatch_type[])(struct hid_parser *parser,
struct hid_item *item) = {
hid_parser_main,
@@ -1218,7 +1215,8 @@ int hid_open_report(struct hid_device *device)
return -ENODEV;
size = device->dev_rsize;
- buf = kmemdup(start, size, GFP_KERNEL);
+ /* call_hid_bpf_rdesc_fixup() ensures we work on a copy of rdesc */
+ buf = call_hid_bpf_rdesc_fixup(device, start, &size);
if (buf == NULL)
return -ENOMEM;
@@ -1252,6 +1250,8 @@ int hid_open_report(struct hid_device *device)
goto err;
}
device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
+ for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
+ device->collection[i].parent_idx = -1;
ret = -EINVAL;
while ((next = fetch_item(start, end, &item)) != NULL) {
@@ -2043,6 +2043,12 @@ int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data
report_enum = hid->report_enum + type;
hdrv = hid->driver;
+ data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ goto unlock;
+ }
+
if (!size) {
dbg_hid("empty report\n");
ret = -1;
@@ -2157,6 +2163,10 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
int len;
int ret;
+ ret = hid_bpf_connect_device(hdev);
+ if (ret)
+ return ret;
+
if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
@@ -2258,6 +2268,8 @@ void hid_disconnect(struct hid_device *hdev)
if (hdev->claimed & HID_CLAIMED_HIDRAW)
hidraw_disconnect(hdev);
hdev->claimed = 0;
+
+ hid_bpf_disconnect_device(hdev);
}
EXPORT_SYMBOL_GPL(hid_disconnect);
@@ -2664,9 +2676,9 @@ static const struct attribute_group hid_dev_group = {
};
__ATTRIBUTE_GROUPS(hid_dev);
-static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct hid_device *hdev = to_hid_device(dev);
+ const struct hid_device *hdev = to_hid_device(dev);
if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
hdev->bus, hdev->vendor, hdev->product))
@@ -2793,6 +2805,8 @@ struct hid_device *hid_allocate_device(void)
sema_init(&hdev->driver_input_lock, 1);
mutex_init(&hdev->ll_open_lock);
+ hid_bpf_device_init(hdev);
+
return hdev;
}
EXPORT_SYMBOL_GPL(hid_allocate_device);
@@ -2819,6 +2833,7 @@ static void hid_remove_device(struct hid_device *hdev)
*/
void hid_destroy_device(struct hid_device *hdev)
{
+ hid_bpf_destroy_device(hdev);
hid_remove_device(hdev);
put_device(&hdev->dev);
}
@@ -2905,20 +2920,29 @@ int hid_check_keys_pressed(struct hid_device *hid)
}
EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
+#ifdef CONFIG_HID_BPF
+static struct hid_bpf_ops hid_ops = {
+ .hid_get_report = hid_get_report,
+ .hid_hw_raw_request = hid_hw_raw_request,
+ .owner = THIS_MODULE,
+ .bus_type = &hid_bus_type,
+};
+#endif
+
static int __init hid_init(void)
{
int ret;
- if (hid_debug)
- pr_warn("hid_debug is now used solely for parser and driver debugging.\n"
- "debugfs is now used for inspecting the device (report descriptor, reports)\n");
-
ret = bus_register(&hid_bus_type);
if (ret) {
pr_err("can't register hid bus\n");
goto err;
}
+#ifdef CONFIG_HID_BPF
+ hid_bpf_ops = &hid_ops;
+#endif
+
ret = hidraw_init();
if (ret)
goto err_bus;
@@ -2934,6 +2958,9 @@ err:
static void __exit hid_exit(void)
{
+#ifdef CONFIG_HID_BPF
+ hid_bpf_ops = NULL;
+#endif
hid_debug_exit();
hidraw_exit();
bus_unregister(&hid_bus_type);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index e213bdde543a..e7ef1ea107c9 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -975,6 +975,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_CAMERA_ACCESS_DISABLE] = "CameraAccessDisable",
[KEY_CAMERA_ACCESS_TOGGLE] = "CameraAccessToggle",
[KEY_DICTATE] = "Dictate",
+ [KEY_MICMUTE] = "MicrophoneMute",
[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c
index e59e9911fc37..4fa45ee77503 100644
--- a/drivers/hid/hid-elecom.c
+++ b/drivers/hid/hid-elecom.c
@@ -12,6 +12,7 @@
* Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org>
* Copyright (c) 2017 Tomasz Kramkowski <tk@the-tk.com>
* Copyright (c) 2020 YOSHIOKA Takuma <lo48576@hard-wi.red>
+ * Copyright (c) 2022 Takahiro Fujii <fujii@xaxxi.net>
*/
/*
@@ -89,7 +90,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
case USB_DEVICE_ID_ELECOM_M_DT1URBK:
case USB_DEVICE_ID_ELECOM_M_DT1DRBK:
case USB_DEVICE_ID_ELECOM_M_HT1URBK:
- case USB_DEVICE_ID_ELECOM_M_HT1DRBK:
+ case USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D:
/*
* Report descriptor format:
* 12: button bit count
@@ -99,6 +100,16 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*/
mouse_button_fixup(hdev, rdesc, *rsize, 12, 30, 14, 20, 8);
break;
+ case USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C:
+ /*
+ * Report descriptor format:
+ * 22: button bit count
+ * 30: padding bit count
+ * 24: button report size
+ * 16: button usage maximum
+ */
+ mouse_button_fixup(hdev, rdesc, *rsize, 22, 30, 24, 16, 8);
+ break;
}
return rdesc;
}
@@ -112,7 +123,8 @@ static const struct hid_device_id elecom_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
{ }
};
MODULE_DEVICE_TABLE(hid, elecom_devices);
diff --git a/drivers/hid/hid-evision.c b/drivers/hid/hid-evision.c
new file mode 100644
index 000000000000..ef6b4b435215
--- /dev/null
+++ b/drivers/hid/hid-evision.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * HID driver for EVision devices
+ * For now, only ignore bogus consumer reports
+ * sent after the keyboard has been configured
+ *
+ * Copyright (c) 2022 Philippe Valembois
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+#include "hid-ids.h"
+
+static int evision_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER)
+ return 0;
+
+ /* Ignore key down event */
+ if ((usage->hid & HID_USAGE) >> 8 == 0x05)
+ return -1;
+ /* Ignore key up event */
+ if ((usage->hid & HID_USAGE) >> 8 == 0x06)
+ return -1;
+
+ switch (usage->hid & HID_USAGE) {
+ /* Ignore configuration saved event */
+ case 0x0401: return -1;
+ /* Ignore reset event */
+ case 0x0402: return -1;
+ }
+ return 0;
+}
+
+static const struct hid_device_id evision_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_EVISION, USB_DEVICE_ID_EVISION_ICL01) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, evision_devices);
+
+static struct hid_driver evision_driver = {
+ .name = "evision",
+ .id_table = evision_devices,
+ .input_mapping = evision_input_mapping,
+};
+module_hid_driver(evision_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index cf12f17e6533..49d4a26895e7 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -424,7 +424,7 @@ static int mousevsc_hid_raw_request(struct hid_device *hid,
return 0;
}
-static struct hid_ll_driver mousevsc_ll_driver = {
+static const struct hid_ll_driver mousevsc_ll_driver = {
.parse = mousevsc_hid_parse,
.open = mousevsc_hid_open,
.close = mousevsc_hid_close,
@@ -524,7 +524,7 @@ probe_err0:
}
-static int mousevsc_remove(struct hv_device *dev)
+static void mousevsc_remove(struct hv_device *dev)
{
struct mousevsc_dev *input_dev = hv_get_drvdata(dev);
@@ -533,8 +533,6 @@ static int mousevsc_remove(struct hv_device *dev)
hid_hw_stop(input_dev->hid_device);
hid_destroy_device(input_dev->hid_device);
mousevsc_free_device(input_dev);
-
- return 0;
}
static int mousevsc_suspend(struct hv_device *dev)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0f8c11842a3a..63545cd307e5 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -413,6 +413,8 @@
#define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF
#define I2C_DEVICE_ID_HP_ENVY_X360_EU0009NV 0x2CF9
#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
+#define I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG 0x29DF
+#define I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN 0x2BC8
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
@@ -428,7 +430,8 @@
#define USB_DEVICE_ID_ELECOM_M_DT1URBK 0x00fe
#define USB_DEVICE_ID_ELECOM_M_DT1DRBK 0x00ff
#define USB_DEVICE_ID_ELECOM_M_HT1URBK 0x010c
-#define USB_DEVICE_ID_ELECOM_M_HT1DRBK 0x010d
+#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D 0x010d
+#define USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C 0x011c
#define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34
#define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004
@@ -445,6 +448,9 @@
#define USB_VENDOR_ID_EMS 0x2006
#define USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II 0x0118
+#define USB_VENDOR_ID_EVISION 0x320f
+#define USB_DEVICE_ID_EVISION_ICL01 0x5041
+
#define USB_VENDOR_ID_FLATFROG 0x25b5
#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002
@@ -819,6 +825,7 @@
#define USB_DEVICE_ID_LOGITECH_G510_USB_AUDIO 0xc22e
#define USB_DEVICE_ID_LOGITECH_G29_WHEEL 0xc24f
#define USB_DEVICE_ID_LOGITECH_G920_WHEEL 0xc262
+#define USB_DEVICE_ID_LOGITECH_G923_XBOX_WHEEL 0xc26e
#define USB_DEVICE_ID_LOGITECH_WINGMAN_F3D 0xc283
#define USB_DEVICE_ID_LOGITECH_FORCE3D_PRO 0xc286
#define USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940 0xc287
@@ -1182,6 +1189,7 @@
#define USB_VENDOR_ID_VALVE 0x28de
#define USB_DEVICE_ID_STEAM_CONTROLLER 0x1102
#define USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS 0x1142
+#define USB_DEVICE_ID_STEAM_DECK 0x1205
#define USB_VENDOR_ID_STEELSERIES 0x1038
#define USB_DEVICE_ID_STEELSERIES_SRWS1 0x1410
@@ -1296,7 +1304,9 @@
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01 0x0042
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2 0x0905
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L 0x0935
+#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW 0x0934
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S 0x0909
+#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW 0x0933
#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06 0x0078
#define USB_DEVICE_ID_UGEE_TABLET_G5 0x0074
#define USB_DEVICE_ID_UGEE_TABLET_EX07S 0x0071
diff --git a/drivers/hid/hid-input-test.c b/drivers/hid/hid-input-test.c
new file mode 100644
index 000000000000..77c2d45ac62a
--- /dev/null
+++ b/drivers/hid/hid-input-test.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HID to Linux Input mapping
+ *
+ * Copyright (c) 2022 José Expósito <jose.exposito89@gmail.com>
+ */
+
+#include <kunit/test.h>
+
+static void hid_test_input_set_battery_charge_status(struct kunit *test)
+{
+ struct hid_device *dev;
+ bool handled;
+
+ dev = kunit_kzalloc(test, sizeof(*dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ handled = hidinput_set_battery_charge_status(dev, HID_DG_HEIGHT, 0);
+ KUNIT_EXPECT_FALSE(test, handled);
+ KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_UNKNOWN);
+
+ handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 0);
+ KUNIT_EXPECT_TRUE(test, handled);
+ KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_DISCHARGING);
+
+ handled = hidinput_set_battery_charge_status(dev, HID_BAT_CHARGING, 1);
+ KUNIT_EXPECT_TRUE(test, handled);
+ KUNIT_EXPECT_EQ(test, dev->battery_charge_status, POWER_SUPPLY_STATUS_CHARGING);
+}
+
+static void hid_test_input_get_battery_property(struct kunit *test)
+{
+ struct power_supply *psy;
+ struct hid_device *dev;
+ union power_supply_propval val;
+ int ret;
+
+ dev = kunit_kzalloc(test, sizeof(*dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+ dev->battery_avoid_query = true;
+
+ psy = kunit_kzalloc(test, sizeof(*psy), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, psy);
+ psy->drv_data = dev;
+
+ dev->battery_status = HID_BATTERY_UNKNOWN;
+ dev->battery_charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ ret = hidinput_get_battery_property(psy, POWER_SUPPLY_PROP_STATUS, &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val.intval, POWER_SUPPLY_STATUS_UNKNOWN);
+
+ dev->battery_status = HID_BATTERY_REPORTED;
+ dev->battery_charge_status = POWER_SUPPLY_STATUS_CHARGING;
+ ret = hidinput_get_battery_property(psy, POWER_SUPPLY_PROP_STATUS, &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val.intval, POWER_SUPPLY_STATUS_CHARGING);
+
+ dev->battery_status = HID_BATTERY_REPORTED;
+ dev->battery_charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
+ ret = hidinput_get_battery_property(psy, POWER_SUPPLY_PROP_STATUS, &val);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, val.intval, POWER_SUPPLY_STATUS_DISCHARGING);
+}
+
+static struct kunit_case hid_input_tests[] = {
+ KUNIT_CASE(hid_test_input_set_battery_charge_status),
+ KUNIT_CASE(hid_test_input_get_battery_property),
+ { }
+};
+
+static struct kunit_suite hid_input_test_suite = {
+ .name = "hid_input",
+ .test_cases = hid_input_tests,
+};
+
+kunit_test_suite(hid_input_test_suite);
+
+MODULE_DESCRIPTION("HID input KUnit tests");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("José Expósito <jose.exposito89@gmail.com>");
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 9b59e436df0a..7fc967964dd8 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -370,12 +370,18 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_ASUS_TP420IA_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L),
HID_BATTERY_QUIRK_AVOID_QUERY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW),
+ HID_BATTERY_QUIRK_AVOID_QUERY },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW),
+ HID_BATTERY_QUIRK_AVOID_QUERY },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100),
@@ -384,6 +390,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_13_AW0020NG),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
@@ -482,7 +490,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
if (dev->battery_status == HID_BATTERY_UNKNOWN)
val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
else
- val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ val->intval = dev->battery_charge_status;
break;
case POWER_SUPPLY_PROP_SCOPE:
@@ -550,6 +558,7 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
dev->battery_max = max;
dev->battery_report_type = report_type;
dev->battery_report_id = field->report->id;
+ dev->battery_charge_status = POWER_SUPPLY_STATUS_DISCHARGING;
/*
* Stylus is normally not connected to the device and thus we
@@ -616,6 +625,20 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
power_supply_changed(dev->battery);
}
}
+
+static bool hidinput_set_battery_charge_status(struct hid_device *dev,
+ unsigned int usage, int value)
+{
+ switch (usage) {
+ case HID_BAT_CHARGING:
+ dev->battery_charge_status = value ?
+ POWER_SUPPLY_STATUS_CHARGING :
+ POWER_SUPPLY_STATUS_DISCHARGING;
+ return true;
+ }
+
+ return false;
+}
#else /* !CONFIG_HID_BATTERY_STRENGTH */
static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
struct hid_field *field, bool is_percentage)
@@ -630,6 +653,12 @@ static void hidinput_cleanup_battery(struct hid_device *dev)
static void hidinput_update_battery(struct hid_device *dev, int value)
{
}
+
+static bool hidinput_set_battery_charge_status(struct hid_device *dev,
+ unsigned int usage, int value)
+{
+ return false;
+}
#endif /* CONFIG_HID_BATTERY_STRENGTH */
static bool hidinput_field_in_collection(struct hid_device *device, struct hid_field *field,
@@ -789,6 +818,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
}
+ if ((usage->hid & 0xf0) == 0xa0) { /* SystemControl */
+ switch (usage->hid & 0xf) {
+ case 0x9: map_key_clear(KEY_MICMUTE); break;
+ default: goto ignore;
+ }
+ break;
+ }
+
if ((usage->hid & 0xf0) == 0xb0) { /* SC - Display */
switch (usage->hid & 0xf) {
case 0x05: map_key_clear(KEY_SWITCHVIDEOMODE); break;
@@ -1219,6 +1256,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
hidinput_setup_battery(device, HID_INPUT_REPORT, field, true);
usage->type = EV_PWR;
return;
+ case HID_BAT_CHARGING:
+ usage->type = EV_PWR;
+ return;
}
goto unknown;
@@ -1461,7 +1501,11 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
return;
if (usage->type == EV_PWR) {
- hidinput_update_battery(hid, value);
+ bool handled = hidinput_set_battery_charge_status(hid, usage->hid, value);
+
+ if (!handled)
+ hidinput_update_battery(hid, value);
+
return;
}
@@ -2317,3 +2361,7 @@ void hidinput_disconnect(struct hid_device *hid)
cancel_work_sync(&hid->led_work);
}
EXPORT_SYMBOL_GPL(hidinput_disconnect);
+
+#ifdef CONFIG_HID_KUNIT_TEST
+#include "hid-input-test.c"
+#endif
diff --git a/drivers/hid/hid-letsketch.c b/drivers/hid/hid-letsketch.c
index 74d17cf518ba..97f047f18136 100644
--- a/drivers/hid/hid-letsketch.c
+++ b/drivers/hid/hid-letsketch.c
@@ -238,7 +238,7 @@ static int letsketch_probe(struct hid_device *hdev, const struct hid_device_id *
char buf[256];
int i, ret;
- if (!hid_is_using_ll_driver(hdev, &usb_hid_driver))
+ if (!hid_is_usb(hdev))
return -ENODEV;
intf = to_usb_interface(hdev->dev.parent);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index c358778e070b..62180414efcc 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -554,7 +554,7 @@ static const u8 hid_reportid_size_map[NUMBER_OF_HID_REPORTS] = {
#define LOGITECH_DJ_INTERFACE_NUMBER 0x02
-static struct hid_ll_driver logi_dj_ll_driver;
+static const struct hid_ll_driver logi_dj_ll_driver;
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
static void delayedwork_callback(struct work_struct *work);
@@ -1506,7 +1506,7 @@ static bool logi_dj_ll_may_wakeup(struct hid_device *hid)
return hid_hw_may_wakeup(djrcv_dev->hidpp);
}
-static struct hid_ll_driver logi_dj_ll_driver = {
+static const struct hid_ll_driver logi_dj_ll_driver = {
.parse = logi_dj_ll_parse,
.start = logi_dj_ll_start,
.stop = logi_dj_ll_stop,
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index abf2c95e4d0b..25dcda76d6c7 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -30,11 +30,7 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>");
-
-static bool disable_raw_mode;
-module_param(disable_raw_mode, bool, 0644);
-MODULE_PARM_DESC(disable_raw_mode,
- "Disable Raw mode reporting for touchpads and keep firmware gestures.");
+MODULE_AUTHOR("Bastien Nocera <hadess@hadess.net>");
static bool disable_tap_to_click;
module_param(disable_tap_to_click, bool, 0644);
@@ -71,12 +67,13 @@ MODULE_PARM_DESC(disable_tap_to_click,
/* bits 2..20 are reserved for classes */
/* #define HIDPP_QUIRK_CONNECT_EVENTS BIT(21) disabled */
#define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22)
-#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
+#define HIDPP_QUIRK_DELAYED_INIT BIT(23)
#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24)
#define HIDPP_QUIRK_UNIFYING BIT(25)
#define HIDPP_QUIRK_HIDPP_WHEELS BIT(26)
#define HIDPP_QUIRK_HIDPP_EXTRA_MOUSE_BTNS BIT(27)
#define HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS BIT(28)
+#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(29)
/* These are just aliases for now */
#define HIDPP_QUIRK_KBD_SCROLL_WHEEL HIDPP_QUIRK_HIDPP_WHEELS
@@ -87,8 +84,6 @@ MODULE_PARM_DESC(disable_tap_to_click,
HIDPP_CAPABILITY_HIDPP20_HI_RES_SCROLL | \
HIDPP_CAPABILITY_HIDPP20_HI_RES_WHEEL)
-#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
-
#define HIDPP_CAPABILITY_HIDPP10_BATTERY BIT(0)
#define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1)
#define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2)
@@ -225,6 +220,16 @@ struct hidpp_device {
#define HIDPP_ERROR_INVALID_PARAM_VALUE 0x0b
#define HIDPP_ERROR_WRONG_PIN_CODE 0x0c
/* HID++ 2.0 error codes */
+#define HIDPP20_ERROR_NO_ERROR 0x00
+#define HIDPP20_ERROR_UNKNOWN 0x01
+#define HIDPP20_ERROR_INVALID_ARGS 0x02
+#define HIDPP20_ERROR_OUT_OF_RANGE 0x03
+#define HIDPP20_ERROR_HW_ERROR 0x04
+#define HIDPP20_ERROR_LOGITECH_INTERNAL 0x05
+#define HIDPP20_ERROR_INVALID_FEATURE_INDEX 0x06
+#define HIDPP20_ERROR_INVALID_FUNCTION_ID 0x07
+#define HIDPP20_ERROR_BUSY 0x08
+#define HIDPP20_ERROR_UNSUPPORTED 0x09
#define HIDPP20_ERROR 0xff
static void hidpp_connect_event(struct hidpp_device *hidpp_dev);
@@ -279,6 +284,7 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
struct hidpp_report *response)
{
int ret;
+ int max_retries = 3;
mutex_lock(&hidpp->send_mutex);
@@ -291,34 +297,39 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
*/
*response = *message;
- ret = __hidpp_send_report(hidpp->hid_dev, message);
+ for (; max_retries != 0; max_retries--) {
+ ret = __hidpp_send_report(hidpp->hid_dev, message);
- if (ret) {
- dbg_hid("__hidpp_send_report returned err: %d\n", ret);
- memset(response, 0, sizeof(struct hidpp_report));
- goto exit;
- }
+ if (ret) {
+ dbg_hid("__hidpp_send_report returned err: %d\n", ret);
+ memset(response, 0, sizeof(struct hidpp_report));
+ goto exit;
+ }
- if (!wait_event_timeout(hidpp->wait, hidpp->answer_available,
- 5*HZ)) {
- dbg_hid("%s:timeout waiting for response\n", __func__);
- memset(response, 0, sizeof(struct hidpp_report));
- ret = -ETIMEDOUT;
- }
+ if (!wait_event_timeout(hidpp->wait, hidpp->answer_available,
+ 5*HZ)) {
+ dbg_hid("%s:timeout waiting for response\n", __func__);
+ memset(response, 0, sizeof(struct hidpp_report));
+ ret = -ETIMEDOUT;
+ }
- if (response->report_id == REPORT_ID_HIDPP_SHORT &&
- response->rap.sub_id == HIDPP_ERROR) {
- ret = response->rap.params[1];
- dbg_hid("%s:got hidpp error %02X\n", __func__, ret);
- goto exit;
- }
+ if (response->report_id == REPORT_ID_HIDPP_SHORT &&
+ response->rap.sub_id == HIDPP_ERROR) {
+ ret = response->rap.params[1];
+ dbg_hid("%s:got hidpp error %02X\n", __func__, ret);
+ goto exit;
+ }
- if ((response->report_id == REPORT_ID_HIDPP_LONG ||
- response->report_id == REPORT_ID_HIDPP_VERY_LONG) &&
- response->fap.feature_index == HIDPP20_ERROR) {
- ret = response->fap.params[1];
- dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret);
- goto exit;
+ if ((response->report_id == REPORT_ID_HIDPP_LONG ||
+ response->report_id == REPORT_ID_HIDPP_VERY_LONG) &&
+ response->fap.feature_index == HIDPP20_ERROR) {
+ ret = response->fap.params[1];
+ if (ret != HIDPP20_ERROR_BUSY) {
+ dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret);
+ goto exit;
+ }
+ dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret);
+ }
}
exit:
@@ -334,8 +345,13 @@ static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp,
struct hidpp_report *message;
int ret;
- if (param_count > sizeof(message->fap.params))
+ if (param_count > sizeof(message->fap.params)) {
+ hid_dbg(hidpp->hid_dev,
+ "Invalid number of parameters passed to command (%d != %llu)\n",
+ param_count,
+ (unsigned long long) sizeof(message->fap.params));
return -EINVAL;
+ }
message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL);
if (!message)
@@ -3436,11 +3452,17 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
ret = hidpp10_enable_scrolling_acceleration(hidpp);
multiplier = 8;
}
- if (ret)
+ if (ret) {
+ hid_dbg(hidpp->hid_dev,
+ "Could not enable hi-res scrolling: %d\n", ret);
return ret;
+ }
- if (multiplier == 0)
+ if (multiplier == 0) {
+ hid_dbg(hidpp->hid_dev,
+ "Invalid multiplier 0 from device, setting it to 1\n");
multiplier = 1;
+ }
hidpp->vertical_wheel_counter.wheel_multiplier = multiplier;
hid_dbg(hidpp->hid_dev, "wheel multiplier = %d\n", multiplier);
@@ -3472,14 +3494,8 @@ static int hidpp_initialize_hires_scroll(struct hidpp_device *hidpp)
hid_dbg(hidpp->hid_dev, "Detected HID++ 2.0 hi-res scrolling\n");
}
} else {
- struct hidpp_report response;
-
- ret = hidpp_send_rap_command_sync(hidpp,
- REPORT_ID_HIDPP_SHORT,
- HIDPP_GET_REGISTER,
- HIDPP_ENABLE_FAST_SCROLL,
- NULL, 0, &response);
- if (!ret) {
+ /* We cannot detect fast scrolling support on HID++ 1.0 devices */
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) {
hidpp->capabilities |= HIDPP_CAPABILITY_HIDPP10_FAST_SCROLL;
hid_dbg(hidpp->hid_dev, "Detected HID++ 1.0 fast scroll\n");
}
@@ -3978,7 +3994,8 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
}
hidpp_initialize_battery(hidpp);
- hidpp_initialize_hires_scroll(hidpp);
+ if (!hid_is_usb(hidpp->hid_dev))
+ hidpp_initialize_hires_scroll(hidpp);
/* forward current battery state */
if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) {
@@ -4001,7 +4018,7 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
if (hidpp->capabilities & HIDPP_CAPABILITY_HI_RES_SCROLL)
hi_res_scroll_enable(hidpp);
- if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
+ if (!(hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) || hidpp->delayed_input)
/* if the input nodes are already created, we can stop now */
return;
@@ -4106,6 +4123,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
bool connected;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
struct hidpp_ff_private_data data;
+ bool will_restart = false;
/* report_fixup needs drvdata to be set before we call hid_parse */
hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
@@ -4146,11 +4164,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
hidpp_application_equals(hdev, HID_GD_KEYBOARD))
hidpp->quirks |= HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS;
- if (disable_raw_mode) {
- hidpp->quirks &= ~HIDPP_QUIRK_CLASS_WTP;
- hidpp->quirks &= ~HIDPP_QUIRK_NO_HIDINPUT;
- }
-
if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) {
ret = wtp_allocate(hdev, id);
if (ret)
@@ -4161,6 +4174,10 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
return ret;
}
+ if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT ||
+ hidpp->quirks & HIDPP_QUIRK_UNIFYING)
+ will_restart = true;
+
INIT_WORK(&hidpp->work, delayed_work_cb);
mutex_init(&hidpp->send_mutex);
init_waitqueue_head(&hidpp->wait);
@@ -4175,7 +4192,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
* Plain USB connections need to actually call start and open
* on the transport driver to allow incoming data.
*/
- ret = hid_hw_start(hdev, 0);
+ ret = hid_hw_start(hdev, will_restart ? 0 : connect_mask);
if (ret) {
hid_err(hdev, "hw start failed\n");
goto hid_hw_start_fail;
@@ -4212,6 +4229,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
hidpp->wireless_feature_index = 0;
else if (ret)
goto hid_hw_init_fail;
+ ret = 0;
}
if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
@@ -4226,19 +4244,21 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
hidpp_connect_event(hidpp);
- /* Reset the HID node state */
- hid_device_io_stop(hdev);
- hid_hw_close(hdev);
- hid_hw_stop(hdev);
+ if (will_restart) {
+ /* Reset the HID node state */
+ hid_device_io_stop(hdev);
+ hid_hw_close(hdev);
+ hid_hw_stop(hdev);
- if (hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT)
- connect_mask &= ~HID_CONNECT_HIDINPUT;
+ if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
+ connect_mask &= ~HID_CONNECT_HIDINPUT;
- /* Now export the actual inputs and hidraw nodes to the world */
- ret = hid_hw_start(hdev, connect_mask);
- if (ret) {
- hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
- goto hid_hw_start_fail;
+ /* Now export the actual inputs and hidraw nodes to the world */
+ ret = hid_hw_start(hdev, connect_mask);
+ if (ret) {
+ hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
+ goto hid_hw_start_fail;
+ }
}
if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
@@ -4296,9 +4316,15 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_T651),
.driver_data = HIDPP_QUIRK_CLASS_WTP },
+ { /* Mouse Logitech Anywhere MX */
+ LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
{ /* Mouse logitech M560 */
LDJ_DEVICE(0x402d),
.driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
+ { /* Mouse Logitech M705 (firmware RQM17) */
+ LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ { /* Mouse Logitech Performance MX */
+ LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
{ /* Keyboard logitech K400 */
LDJ_DEVICE(0x4024),
.driver_data = HIDPP_QUIRK_CLASS_K400 },
@@ -4347,6 +4373,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Logitech G920 Wheel over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
+ { /* Logitech G923 Wheel (Xbox version) over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G923_XBOX_WHEEL),
+ .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS },
{ /* Logitech G Pro Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
@@ -4366,6 +4395,8 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX Ergo trackball over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01d) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e) },
+ { /* Signature M650 over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb02a) },
{ /* MX Master 3 mouse over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023) },
{}
diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c
index e61dd039354b..f74a977cf8f8 100644
--- a/drivers/hid/hid-mcp2221.c
+++ b/drivers/hid/hid-mcp2221.c
@@ -922,6 +922,9 @@ static void mcp2221_hid_unregister(void *ptr)
/* This is needed to be sure hid_hw_stop() isn't called twice by the subsystem */
static void mcp2221_remove(struct hid_device *hdev)
{
+ struct mcp2221 *mcp = hid_get_drvdata(hdev);
+
+ cancel_delayed_work_sync(&mcp->init_work);
}
#if IS_REACHABLE(CONFIG_IIO)
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 372cbdd223e0..e31be0cb8b85 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -71,6 +71,7 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_SEPARATE_APP_REPORT BIT(19)
#define MT_QUIRK_FORCE_MULTI_INPUT BIT(20)
#define MT_QUIRK_DISABLE_WAKEUP BIT(21)
+#define MT_QUIRK_ORIENTATION_INVERT BIT(22)
#define MT_INPUTMODE_TOUCHSCREEN 0x02
#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -1009,6 +1010,7 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
struct mt_usages *slot)
{
struct input_mt *mt = input->mt;
+ struct hid_device *hdev = td->hdev;
__s32 quirks = app->quirks;
bool valid = true;
bool confidence_state = true;
@@ -1086,6 +1088,10 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
int orientation = wide;
int max_azimuth;
int azimuth;
+ int x;
+ int y;
+ int cx;
+ int cy;
if (slot->a != DEFAULT_ZERO) {
/*
@@ -1104,6 +1110,9 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
if (azimuth > max_azimuth * 2)
azimuth -= max_azimuth * 4;
orientation = -azimuth;
+ if (quirks & MT_QUIRK_ORIENTATION_INVERT)
+ orientation = -orientation;
+
}
if (quirks & MT_QUIRK_TOUCH_SIZE_SCALING) {
@@ -1115,10 +1124,23 @@ static int mt_process_slot(struct mt_device *td, struct input_dev *input,
minor = minor >> 1;
}
- input_event(input, EV_ABS, ABS_MT_POSITION_X, *slot->x);
- input_event(input, EV_ABS, ABS_MT_POSITION_Y, *slot->y);
- input_event(input, EV_ABS, ABS_MT_TOOL_X, *slot->cx);
- input_event(input, EV_ABS, ABS_MT_TOOL_Y, *slot->cy);
+ x = hdev->quirks & HID_QUIRK_X_INVERT ?
+ input_abs_get_max(input, ABS_MT_POSITION_X) - *slot->x :
+ *slot->x;
+ y = hdev->quirks & HID_QUIRK_Y_INVERT ?
+ input_abs_get_max(input, ABS_MT_POSITION_Y) - *slot->y :
+ *slot->y;
+ cx = hdev->quirks & HID_QUIRK_X_INVERT ?
+ input_abs_get_max(input, ABS_MT_POSITION_X) - *slot->cx :
+ *slot->cx;
+ cy = hdev->quirks & HID_QUIRK_Y_INVERT ?
+ input_abs_get_max(input, ABS_MT_POSITION_Y) - *slot->cy :
+ *slot->cy;
+
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, y);
+ input_event(input, EV_ABS, ABS_MT_TOOL_X, cx);
+ input_event(input, EV_ABS, ABS_MT_TOOL_Y, cy);
input_event(input, EV_ABS, ABS_MT_DISTANCE, !*slot->tip_state);
input_event(input, EV_ABS, ABS_MT_ORIENTATION, orientation);
input_event(input, EV_ABS, ABS_MT_PRESSURE, *slot->p);
@@ -1735,6 +1757,15 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
td->serial_maybe = true;
+
+ /* Orientation is inverted if the X or Y axes are
+ * flipped, but normalized if both are inverted.
+ */
+ if (hdev->quirks & (HID_QUIRK_X_INVERT | HID_QUIRK_Y_INVERT) &&
+ !((hdev->quirks & HID_QUIRK_X_INVERT)
+ && (hdev->quirks & HID_QUIRK_Y_INVERT)))
+ td->mtclass.quirks = MT_QUIRK_ORIENTATION_INVERT;
+
/* This allows the driver to correctly support devices
* that emit events over several HID messages.
*/
diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c
index 27c40894acab..8ac8f7b8e317 100644
--- a/drivers/hid/hid-playstation.c
+++ b/drivers/hid/hid-playstation.c
@@ -993,19 +993,22 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
*/
speed_2x = (gyro_speed_plus + gyro_speed_minus);
ds->gyro_calib_data[0].abs_code = ABS_RX;
- ds->gyro_calib_data[0].bias = gyro_pitch_bias;
+ ds->gyro_calib_data[0].bias = 0;
ds->gyro_calib_data[0].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
- ds->gyro_calib_data[0].sens_denom = gyro_pitch_plus - gyro_pitch_minus;
+ ds->gyro_calib_data[0].sens_denom = abs(gyro_pitch_plus - gyro_pitch_bias) +
+ abs(gyro_pitch_minus - gyro_pitch_bias);
ds->gyro_calib_data[1].abs_code = ABS_RY;
- ds->gyro_calib_data[1].bias = gyro_yaw_bias;
+ ds->gyro_calib_data[1].bias = 0;
ds->gyro_calib_data[1].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
- ds->gyro_calib_data[1].sens_denom = gyro_yaw_plus - gyro_yaw_minus;
+ ds->gyro_calib_data[1].sens_denom = abs(gyro_yaw_plus - gyro_yaw_bias) +
+ abs(gyro_yaw_minus - gyro_yaw_bias);
ds->gyro_calib_data[2].abs_code = ABS_RZ;
- ds->gyro_calib_data[2].bias = gyro_roll_bias;
+ ds->gyro_calib_data[2].bias = 0;
ds->gyro_calib_data[2].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
- ds->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus;
+ ds->gyro_calib_data[2].sens_denom = abs(gyro_roll_plus - gyro_roll_bias) +
+ abs(gyro_roll_minus - gyro_roll_bias);
/*
* Sanity check gyro calibration data. This is needed to prevent crashes
@@ -1388,8 +1391,7 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r
for (i = 0; i < ARRAY_SIZE(ds_report->gyro); i++) {
int raw_data = (short)le16_to_cpu(ds_report->gyro[i]);
int calib_data = mult_frac(ds->gyro_calib_data[i].sens_numer,
- raw_data - ds->gyro_calib_data[i].bias,
- ds->gyro_calib_data[i].sens_denom);
+ raw_data, ds->gyro_calib_data[i].sens_denom);
input_report_abs(ds->sensors, ds->gyro_calib_data[i].abs_code, calib_data);
}
@@ -1792,11 +1794,10 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
if (retries < 2) {
hid_warn(hdev, "Retrying DualShock 4 get calibration report (0x02) request\n");
continue;
- } else {
- ret = -EILSEQ;
- goto err_free;
}
+
hid_err(hdev, "Failed to retrieve DualShock4 calibration info: %d\n", ret);
+ ret = -EILSEQ;
goto err_free;
} else {
break;
@@ -1849,19 +1850,22 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
*/
speed_2x = (gyro_speed_plus + gyro_speed_minus);
ds4->gyro_calib_data[0].abs_code = ABS_RX;
- ds4->gyro_calib_data[0].bias = gyro_pitch_bias;
+ ds4->gyro_calib_data[0].bias = 0;
ds4->gyro_calib_data[0].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
- ds4->gyro_calib_data[0].sens_denom = gyro_pitch_plus - gyro_pitch_minus;
+ ds4->gyro_calib_data[0].sens_denom = abs(gyro_pitch_plus - gyro_pitch_bias) +
+ abs(gyro_pitch_minus - gyro_pitch_bias);
ds4->gyro_calib_data[1].abs_code = ABS_RY;
- ds4->gyro_calib_data[1].bias = gyro_yaw_bias;
+ ds4->gyro_calib_data[1].bias = 0;
ds4->gyro_calib_data[1].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
- ds4->gyro_calib_data[1].sens_denom = gyro_yaw_plus - gyro_yaw_minus;
+ ds4->gyro_calib_data[1].sens_denom = abs(gyro_yaw_plus - gyro_yaw_bias) +
+ abs(gyro_yaw_minus - gyro_yaw_bias);
ds4->gyro_calib_data[2].abs_code = ABS_RZ;
- ds4->gyro_calib_data[2].bias = gyro_roll_bias;
+ ds4->gyro_calib_data[2].bias = 0;
ds4->gyro_calib_data[2].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
- ds4->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus;
+ ds4->gyro_calib_data[2].sens_denom = abs(gyro_roll_plus - gyro_roll_bias) +
+ abs(gyro_roll_minus - gyro_roll_bias);
/*
* Sanity check gyro calibration data. This is needed to prevent crashes
@@ -2242,8 +2246,7 @@ static int dualshock4_parse_report(struct ps_device *ps_dev, struct hid_report *
for (i = 0; i < ARRAY_SIZE(ds4_report->gyro); i++) {
int raw_data = (short)le16_to_cpu(ds4_report->gyro[i]);
int calib_data = mult_frac(ds4->gyro_calib_data[i].sens_numer,
- raw_data - ds4->gyro_calib_data[i].bias,
- ds4->gyro_calib_data[i].sens_denom);
+ raw_data, ds4->gyro_calib_data[i].sens_denom);
input_report_abs(ds4->sensors, ds4->gyro_calib_data[i].abs_code, calib_data);
}
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index be3ad02573de..66e64350f138 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -393,7 +393,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1URBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_DT1DRBK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1URBK) },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_010D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_M_HT1DRBK_011C) },
#endif
#if IS_ENABLED(CONFIG_HID_ELO)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
@@ -1236,7 +1237,7 @@ EXPORT_SYMBOL_GPL(hid_quirks_exit);
static unsigned long hid_gets_squirk(const struct hid_device *hdev)
{
const struct hid_device_id *bl_entry;
- unsigned long quirks = 0;
+ unsigned long quirks = hdev->initial_quirks;
if (hid_match_id(hdev, hid_ignore_list))
quirks |= HID_QUIRK_IGNORE;
diff --git a/drivers/hid/hid-sensor-custom.c b/drivers/hid/hid-sensor-custom.c
index f444e63e9f36..3e3f89e01d81 100644
--- a/drivers/hid/hid-sensor-custom.c
+++ b/drivers/hid/hid-sensor-custom.c
@@ -5,6 +5,7 @@
*/
#include <linux/ctype.h>
+#include <linux/dmi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -750,114 +751,209 @@ static void hid_sensor_custom_dev_if_remove(struct hid_sensor_custom
}
-/* luid defined in FW (e.g. ISH). Maybe used to identify sensor. */
-static const char *const known_sensor_luid[] = { "020B000000000000" };
+/*
+ * Match a known custom sensor.
+ * tag and luid is mandatory.
+ */
+struct hid_sensor_custom_match {
+ const char *tag;
+ const char *luid;
+ const char *model;
+ const char *manufacturer;
+ bool check_dmi;
+ struct dmi_system_id dmi;
+};
-static int get_luid_table_index(unsigned char *usage_str)
-{
- int i;
+/*
+ * Custom sensor properties used for matching.
+ */
+struct hid_sensor_custom_properties {
+ u16 serial_num[HID_CUSTOM_MAX_FEATURE_BYTES];
+ u16 model[HID_CUSTOM_MAX_FEATURE_BYTES];
+ u16 manufacturer[HID_CUSTOM_MAX_FEATURE_BYTES];
+};
+
+static const struct hid_sensor_custom_match hid_sensor_custom_known_table[] = {
+ /*
+ * Intel Integrated Sensor Hub (ISH)
+ */
+ { /* Intel ISH hinge */
+ .tag = "INT",
+ .luid = "020B000000000000",
+ .manufacturer = "INTEL",
+ },
+ /*
+ * Lenovo Intelligent Sensing Solution (LISS)
+ */
+ { /* ambient light */
+ .tag = "LISS",
+ .luid = "0041010200000082",
+ .model = "STK3X3X Sensor",
+ .manufacturer = "Vendor 258",
+ .check_dmi = true,
+ .dmi.matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ }
+ },
+ { /* human presence */
+ .tag = "LISS",
+ .luid = "0226000171AC0081",
+ .model = "VL53L1_HOD Sensor",
+ .manufacturer = "ST_MICRO",
+ .check_dmi = true,
+ .dmi.matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ }
+ },
+ {}
+};
- for (i = 0; i < ARRAY_SIZE(known_sensor_luid); i++) {
- if (!strncmp(usage_str, known_sensor_luid[i],
- strlen(known_sensor_luid[i])))
- return i;
+static bool hid_sensor_custom_prop_match_str(const u16 *prop, const char *match,
+ size_t count)
+{
+ while (count-- && *prop && *match) {
+ if (*prop != (u16) *match)
+ return false;
+ prop++;
+ match++;
}
- return -ENODEV;
+ return (count == -1) || *prop == (u16)*match;
}
-static int get_known_custom_sensor_index(struct hid_sensor_hub_device *hsdev)
+static int hid_sensor_custom_get_prop(struct hid_sensor_hub_device *hsdev,
+ u32 prop_usage_id, size_t prop_size,
+ u16 *prop)
{
- struct hid_sensor_hub_attribute_info sensor_manufacturer = { 0 };
- struct hid_sensor_hub_attribute_info sensor_luid_info = { 0 };
- int report_size;
+ struct hid_sensor_hub_attribute_info prop_attr = { 0 };
int ret;
- static u16 w_buf[HID_CUSTOM_MAX_FEATURE_BYTES];
- static char buf[HID_CUSTOM_MAX_FEATURE_BYTES];
- int i;
- memset(w_buf, 0, sizeof(w_buf));
- memset(buf, 0, sizeof(buf));
+ memset(prop, 0, prop_size);
- /* get manufacturer info */
- ret = sensor_hub_input_get_attribute_info(hsdev,
- HID_FEATURE_REPORT, hsdev->usage,
- HID_USAGE_SENSOR_PROP_MANUFACTURER, &sensor_manufacturer);
+ ret = sensor_hub_input_get_attribute_info(hsdev, HID_FEATURE_REPORT,
+ hsdev->usage, prop_usage_id,
+ &prop_attr);
if (ret < 0)
return ret;
- report_size =
- sensor_hub_get_feature(hsdev, sensor_manufacturer.report_id,
- sensor_manufacturer.index, sizeof(w_buf),
- w_buf);
- if (report_size <= 0) {
- hid_err(hsdev->hdev,
- "Failed to get sensor manufacturer info %d\n",
- report_size);
- return -ENODEV;
+ ret = sensor_hub_get_feature(hsdev, prop_attr.report_id,
+ prop_attr.index, prop_size, prop);
+ if (ret < 0) {
+ hid_err(hsdev->hdev, "Failed to get sensor property %08x %d\n",
+ prop_usage_id, ret);
+ return ret;
}
- /* convert from wide char to char */
- for (i = 0; i < ARRAY_SIZE(buf) - 1 && w_buf[i]; i++)
- buf[i] = (char)w_buf[i];
+ return 0;
+}
- /* ensure it's ISH sensor */
- if (strncmp(buf, "INTEL", strlen("INTEL")))
- return -ENODEV;
+static bool
+hid_sensor_custom_do_match(struct hid_sensor_hub_device *hsdev,
+ const struct hid_sensor_custom_match *match,
+ const struct hid_sensor_custom_properties *prop)
+{
+ struct dmi_system_id dmi[] = { match->dmi, { 0 } };
- memset(w_buf, 0, sizeof(w_buf));
- memset(buf, 0, sizeof(buf));
+ if (!hid_sensor_custom_prop_match_str(prop->serial_num, "LUID:", 5) ||
+ !hid_sensor_custom_prop_match_str(prop->serial_num + 5, match->luid,
+ HID_CUSTOM_MAX_FEATURE_BYTES - 5))
+ return false;
- /* get real usage id */
- ret = sensor_hub_input_get_attribute_info(hsdev,
- HID_FEATURE_REPORT, hsdev->usage,
- HID_USAGE_SENSOR_PROP_SERIAL_NUM, &sensor_luid_info);
+ if (match->model &&
+ !hid_sensor_custom_prop_match_str(prop->model, match->model,
+ HID_CUSTOM_MAX_FEATURE_BYTES))
+ return false;
+
+ if (match->manufacturer &&
+ !hid_sensor_custom_prop_match_str(prop->manufacturer, match->manufacturer,
+ HID_CUSTOM_MAX_FEATURE_BYTES))
+ return false;
+
+ if (match->check_dmi && !dmi_check_system(dmi))
+ return false;
+
+ return true;
+}
+
+static int
+hid_sensor_custom_properties_get(struct hid_sensor_hub_device *hsdev,
+ struct hid_sensor_custom_properties *prop)
+{
+ int ret;
+
+ ret = hid_sensor_custom_get_prop(hsdev,
+ HID_USAGE_SENSOR_PROP_SERIAL_NUM,
+ HID_CUSTOM_MAX_FEATURE_BYTES,
+ prop->serial_num);
if (ret < 0)
return ret;
- report_size = sensor_hub_get_feature(hsdev, sensor_luid_info.report_id,
- sensor_luid_info.index, sizeof(w_buf),
- w_buf);
- if (report_size <= 0) {
- hid_err(hsdev->hdev, "Failed to get real usage info %d\n",
- report_size);
- return -ENODEV;
- }
+ /*
+ * Ignore errors on the following model and manufacturer properties.
+ * Because these are optional, it is not an error if they are missing.
+ */
- /* convert from wide char to char */
- for (i = 0; i < ARRAY_SIZE(buf) - 1 && w_buf[i]; i++)
- buf[i] = (char)w_buf[i];
+ hid_sensor_custom_get_prop(hsdev, HID_USAGE_SENSOR_PROP_MODEL,
+ HID_CUSTOM_MAX_FEATURE_BYTES,
+ prop->model);
- if (strlen(buf) != strlen(known_sensor_luid[0]) + 5) {
- hid_err(hsdev->hdev,
- "%s luid length not match %zu != (%zu + 5)\n", __func__,
- strlen(buf), strlen(known_sensor_luid[0]));
- return -ENODEV;
- }
+ hid_sensor_custom_get_prop(hsdev, HID_USAGE_SENSOR_PROP_MANUFACTURER,
+ HID_CUSTOM_MAX_FEATURE_BYTES,
+ prop->manufacturer);
- /* get table index with luid (not matching 'LUID: ' in luid) */
- return get_luid_table_index(&buf[5]);
+ return 0;
+}
+
+static int
+hid_sensor_custom_get_known(struct hid_sensor_hub_device *hsdev,
+ const struct hid_sensor_custom_match **known)
+{
+ int ret;
+ const struct hid_sensor_custom_match *match =
+ hid_sensor_custom_known_table;
+ struct hid_sensor_custom_properties *prop;
+
+ prop = kmalloc(sizeof(struct hid_sensor_custom_properties), GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+
+ ret = hid_sensor_custom_properties_get(hsdev, prop);
+ if (ret < 0)
+ goto out;
+
+ while (match->tag) {
+ if (hid_sensor_custom_do_match(hsdev, match, prop)) {
+ *known = match;
+ ret = 0;
+ goto out;
+ }
+ match++;
+ }
+ ret = -ENODATA;
+out:
+ kfree(prop);
+ return ret;
}
static struct platform_device *
hid_sensor_register_platform_device(struct platform_device *pdev,
struct hid_sensor_hub_device *hsdev,
- int index)
+ const struct hid_sensor_custom_match *match)
{
- char real_usage[HID_SENSOR_USAGE_LENGTH] = { 0 };
+ char real_usage[HID_SENSOR_USAGE_LENGTH];
struct platform_device *custom_pdev;
const char *dev_name;
char *c;
- /* copy real usage id */
- memcpy(real_usage, known_sensor_luid[index], 4);
+ memcpy(real_usage, match->luid, 4);
/* usage id are all lowcase */
for (c = real_usage; *c != '\0'; c++)
*c = tolower(*c);
- /* HID-SENSOR-INT-REAL_USAGE_ID */
- dev_name = kasprintf(GFP_KERNEL, "HID-SENSOR-INT-%s", real_usage);
+ /* HID-SENSOR-TAG-REAL_USAGE_ID */
+ dev_name = kasprintf(GFP_KERNEL, "HID-SENSOR-%s-%s",
+ match->tag, real_usage);
if (!dev_name)
return ERR_PTR(-ENOMEM);
@@ -873,7 +969,7 @@ static int hid_sensor_custom_probe(struct platform_device *pdev)
struct hid_sensor_custom *sensor_inst;
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
int ret;
- int index;
+ const struct hid_sensor_custom_match *match;
sensor_inst = devm_kzalloc(&pdev->dev, sizeof(*sensor_inst),
GFP_KERNEL);
@@ -888,10 +984,10 @@ static int hid_sensor_custom_probe(struct platform_device *pdev)
mutex_init(&sensor_inst->mutex);
platform_set_drvdata(pdev, sensor_inst);
- index = get_known_custom_sensor_index(hsdev);
- if (index >= 0 && index < ARRAY_SIZE(known_sensor_luid)) {
+ ret = hid_sensor_custom_get_known(hsdev, &match);
+ if (!ret) {
sensor_inst->custom_pdev =
- hid_sensor_register_platform_device(pdev, hsdev, index);
+ hid_sensor_register_platform_device(pdev, hsdev, match);
ret = PTR_ERR_OR_ZERO(sensor_inst->custom_pdev);
if (ret) {
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 6abd3e2a9094..83237b86c8ff 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -397,7 +397,8 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
for (i = 0; i < report->maxfield; ++i) {
field = report->field[i];
if (field->maxusage) {
- if (field->physical == usage_id &&
+ if ((field->physical == usage_id ||
+ field->application == usage_id) &&
(field->logical == attr_usage_id ||
field->usage[0].hid ==
attr_usage_id) &&
@@ -506,7 +507,8 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
collection->usage);
callback = sensor_hub_get_callback(hdev,
- report->field[i]->physical,
+ report->field[i]->physical ? report->field[i]->physical :
+ report->field[i]->application,
report->field[i]->usage[0].collection_index,
&hsdev, &priv);
if (!callback) {
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 13125997ab5e..dd942061fd77 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -49,38 +49,28 @@
#define SIXAXIS_CONTROLLER_BT BIT(2)
#define BUZZ_CONTROLLER BIT(3)
#define PS3REMOTE BIT(4)
-#define DUALSHOCK4_CONTROLLER_USB BIT(5)
-#define DUALSHOCK4_CONTROLLER_BT BIT(6)
-#define DUALSHOCK4_DONGLE BIT(7)
-#define MOTION_CONTROLLER_USB BIT(8)
-#define MOTION_CONTROLLER_BT BIT(9)
-#define NAVIGATION_CONTROLLER_USB BIT(10)
-#define NAVIGATION_CONTROLLER_BT BIT(11)
-#define SINO_LITE_CONTROLLER BIT(12)
-#define FUTUREMAX_DANCE_MAT BIT(13)
-#define NSG_MR5U_REMOTE_BT BIT(14)
-#define NSG_MR7U_REMOTE_BT BIT(15)
-#define SHANWAN_GAMEPAD BIT(16)
-#define GH_GUITAR_CONTROLLER BIT(17)
-#define GHL_GUITAR_PS3WIIU BIT(18)
-#define GHL_GUITAR_PS4 BIT(19)
+#define MOTION_CONTROLLER_USB BIT(5)
+#define MOTION_CONTROLLER_BT BIT(6)
+#define NAVIGATION_CONTROLLER_USB BIT(7)
+#define NAVIGATION_CONTROLLER_BT BIT(8)
+#define SINO_LITE_CONTROLLER BIT(9)
+#define FUTUREMAX_DANCE_MAT BIT(10)
+#define NSG_MR5U_REMOTE_BT BIT(11)
+#define NSG_MR7U_REMOTE_BT BIT(12)
+#define SHANWAN_GAMEPAD BIT(13)
+#define GH_GUITAR_CONTROLLER BIT(14)
+#define GHL_GUITAR_PS3WIIU BIT(15)
+#define GHL_GUITAR_PS4 BIT(16)
#define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
#define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT)
#define NAVIGATION_CONTROLLER (NAVIGATION_CONTROLLER_USB |\
NAVIGATION_CONTROLLER_BT)
-#define DUALSHOCK4_CONTROLLER (DUALSHOCK4_CONTROLLER_USB |\
- DUALSHOCK4_CONTROLLER_BT | \
- DUALSHOCK4_DONGLE)
#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER | BUZZ_CONTROLLER |\
- DUALSHOCK4_CONTROLLER | MOTION_CONTROLLER |\
- NAVIGATION_CONTROLLER)
-#define SONY_BATTERY_SUPPORT (SIXAXIS_CONTROLLER | DUALSHOCK4_CONTROLLER |\
- MOTION_CONTROLLER_BT | NAVIGATION_CONTROLLER)
-#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER | DUALSHOCK4_CONTROLLER |\
- MOTION_CONTROLLER)
-#define SONY_BT_DEVICE (SIXAXIS_CONTROLLER_BT | DUALSHOCK4_CONTROLLER_BT |\
- MOTION_CONTROLLER_BT | NAVIGATION_CONTROLLER_BT)
+ MOTION_CONTROLLER | NAVIGATION_CONTROLLER)
+#define SONY_BATTERY_SUPPORT (SIXAXIS_CONTROLLER | MOTION_CONTROLLER_BT | NAVIGATION_CONTROLLER)
+#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER | MOTION_CONTROLLER)
+#define SONY_BT_DEVICE (SIXAXIS_CONTROLLER_BT | MOTION_CONTROLLER_BT | NAVIGATION_CONTROLLER_BT)
#define NSG_MRXU_REMOTE (NSG_MR5U_REMOTE_BT | NSG_MR7U_REMOTE_BT)
#define MAX_LEDS 4
@@ -428,36 +418,6 @@ static const unsigned int sixaxis_keymap[] = {
[0x11] = BTN_MODE, /* PS */
};
-static const unsigned int ds4_absmap[] = {
- [0x30] = ABS_X,
- [0x31] = ABS_Y,
- [0x32] = ABS_RX, /* right stick X */
- [0x33] = ABS_Z, /* L2 */
- [0x34] = ABS_RZ, /* R2 */
- [0x35] = ABS_RY, /* right stick Y */
-};
-
-static const unsigned int ds4_keymap[] = {
- [0x1] = BTN_WEST, /* Square */
- [0x2] = BTN_SOUTH, /* Cross */
- [0x3] = BTN_EAST, /* Circle */
- [0x4] = BTN_NORTH, /* Triangle */
- [0x5] = BTN_TL, /* L1 */
- [0x6] = BTN_TR, /* R1 */
- [0x7] = BTN_TL2, /* L2 */
- [0x8] = BTN_TR2, /* R2 */
- [0x9] = BTN_SELECT, /* Share */
- [0xa] = BTN_START, /* Options */
- [0xb] = BTN_THUMBL, /* L3 */
- [0xc] = BTN_THUMBR, /* R3 */
- [0xd] = BTN_MODE, /* PS */
-};
-
-static const struct {int x; int y; } ds4_hat_mapping[] = {
- {0, -1}, {1, -1}, {1, 0}, {1, 1}, {0, 1}, {-1, 1}, {-1, 0}, {-1, -1},
- {0, 0}
-};
-
static enum power_supply_property sony_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_CAPACITY,
@@ -502,35 +462,12 @@ struct motion_output_report_02 {
u8 rumble;
};
-#define DS4_FEATURE_REPORT_0x02_SIZE 37
-#define DS4_FEATURE_REPORT_0x05_SIZE 41
-#define DS4_FEATURE_REPORT_0x81_SIZE 7
-#define DS4_FEATURE_REPORT_0xA3_SIZE 49
-#define DS4_INPUT_REPORT_0x11_SIZE 78
-#define DS4_OUTPUT_REPORT_0x05_SIZE 32
-#define DS4_OUTPUT_REPORT_0x11_SIZE 78
#define SIXAXIS_REPORT_0xF2_SIZE 17
#define SIXAXIS_REPORT_0xF5_SIZE 8
#define MOTION_REPORT_0x02_SIZE 49
-/* Offsets relative to USB input report (0x1). Bluetooth (0x11) requires an
- * additional +2.
- */
-#define DS4_INPUT_REPORT_AXIS_OFFSET 1
-#define DS4_INPUT_REPORT_BUTTON_OFFSET 5
-#define DS4_INPUT_REPORT_TIMESTAMP_OFFSET 10
-#define DS4_INPUT_REPORT_GYRO_X_OFFSET 13
-#define DS4_INPUT_REPORT_BATTERY_OFFSET 30
-#define DS4_INPUT_REPORT_TOUCHPAD_OFFSET 33
-
#define SENSOR_SUFFIX " Motion Sensors"
-#define DS4_TOUCHPAD_SUFFIX " Touchpad"
-
-/* Default to 4ms poll interval, which is same as USB (not adjustable). */
-#define DS4_BT_DEFAULT_POLL_INTERVAL_MS 4
-#define DS4_BT_MAX_POLL_INTERVAL_MS 62
-#define DS4_GYRO_RES_PER_DEG_S 1024
-#define DS4_ACC_RES_PER_G 8192
+#define TOUCHPAD_SUFFIX " Touchpad"
#define SIXAXIS_INPUT_REPORT_ACC_X_OFFSET 41
#define SIXAXIS_ACC_RES_PER_G 113
@@ -539,28 +476,8 @@ static DEFINE_SPINLOCK(sony_dev_list_lock);
static LIST_HEAD(sony_device_list);
static DEFINE_IDA(sony_device_id_allocator);
-/* Used for calibration of DS4 accelerometer and gyro. */
-struct ds4_calibration_data {
- int abs_code;
- short bias;
- /* Calibration requires scaling against a sensitivity value, which is a
- * float. Store sensitivity as a fraction to limit floating point
- * calculations until final calibration.
- */
- int sens_numer;
- int sens_denom;
-};
-
-enum ds4_dongle_state {
- DONGLE_DISCONNECTED,
- DONGLE_CALIBRATING,
- DONGLE_CONNECTED,
- DONGLE_DISABLED
-};
-
enum sony_worker {
- SONY_WORKER_STATE,
- SONY_WORKER_HOTPLUG
+ SONY_WORKER_STATE
};
struct sony_sc {
@@ -571,16 +488,11 @@ struct sony_sc {
struct input_dev *sensor_dev;
struct led_classdev *leds[MAX_LEDS];
unsigned long quirks;
- struct work_struct hotplug_worker;
struct work_struct state_worker;
void (*send_output_report)(struct sony_sc *);
struct power_supply *battery;
struct power_supply_desc battery_desc;
int device_id;
- unsigned fw_version;
- bool fw_version_created;
- unsigned hw_version;
- bool hw_version_created;
u8 *output_report_dmabuf;
#ifdef CONFIG_SONY_FF
@@ -589,7 +501,6 @@ struct sony_sc {
#endif
u8 mac_address[6];
- u8 hotplug_worker_initialized;
u8 state_worker_initialized;
u8 defer_initialization;
u8 battery_capacity;
@@ -599,14 +510,6 @@ struct sony_sc {
u8 led_delay_off[MAX_LEDS];
u8 led_count;
- bool timestamp_initialized;
- u16 prev_timestamp;
- unsigned int timestamp_us;
-
- u8 ds4_bt_poll_interval;
- enum ds4_dongle_state ds4_dongle_state;
- /* DS4 calibration data */
- struct ds4_calibration_data ds4_calib_data[6];
/* GH Live */
struct urb *ghl_urb;
struct timer_list ghl_poke_timer;
@@ -626,10 +529,6 @@ static inline void sony_schedule_work(struct sony_sc *sc,
schedule_work(&sc->state_worker);
spin_unlock_irqrestore(&sc->lock, flags);
break;
- case SONY_WORKER_HOTPLUG:
- if (sc->hotplug_worker_initialized)
- schedule_work(&sc->hotplug_worker);
- break;
}
}
@@ -700,67 +599,6 @@ static int guitar_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
-static ssize_t ds4_show_poll_interval(struct device *dev,
- struct device_attribute
- *attr, char *buf)
-{
- struct hid_device *hdev = to_hid_device(dev);
- struct sony_sc *sc = hid_get_drvdata(hdev);
-
- return snprintf(buf, PAGE_SIZE, "%i\n", sc->ds4_bt_poll_interval);
-}
-
-static ssize_t ds4_store_poll_interval(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct hid_device *hdev = to_hid_device(dev);
- struct sony_sc *sc = hid_get_drvdata(hdev);
- unsigned long flags;
- u8 interval;
-
- if (kstrtou8(buf, 0, &interval))
- return -EINVAL;
-
- if (interval > DS4_BT_MAX_POLL_INTERVAL_MS)
- return -EINVAL;
-
- spin_lock_irqsave(&sc->lock, flags);
- sc->ds4_bt_poll_interval = interval;
- spin_unlock_irqrestore(&sc->lock, flags);
-
- sony_schedule_work(sc, SONY_WORKER_STATE);
-
- return count;
-}
-
-static DEVICE_ATTR(bt_poll_interval, 0644, ds4_show_poll_interval,
- ds4_store_poll_interval);
-
-static ssize_t sony_show_firmware_version(struct device *dev,
- struct device_attribute
- *attr, char *buf)
-{
- struct hid_device *hdev = to_hid_device(dev);
- struct sony_sc *sc = hid_get_drvdata(hdev);
-
- return snprintf(buf, PAGE_SIZE, "0x%04x\n", sc->fw_version);
-}
-
-static DEVICE_ATTR(firmware_version, 0444, sony_show_firmware_version, NULL);
-
-static ssize_t sony_show_hardware_version(struct device *dev,
- struct device_attribute
- *attr, char *buf)
-{
- struct hid_device *hdev = to_hid_device(dev);
- struct sony_sc *sc = hid_get_drvdata(hdev);
-
- return snprintf(buf, PAGE_SIZE, "0x%04x\n", sc->hw_version);
-}
-
-static DEVICE_ATTR(hardware_version, 0444, sony_show_hardware_version, NULL);
-
static u8 *motion_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
@@ -905,37 +743,6 @@ static int sixaxis_mapping(struct hid_device *hdev, struct hid_input *hi,
return -1;
}
-static int ds4_mapping(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- if ((usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) {
- unsigned int key = usage->hid & HID_USAGE;
-
- if (key >= ARRAY_SIZE(ds4_keymap))
- return -1;
-
- key = ds4_keymap[key];
- hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key);
- return 1;
- } else if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK) {
- unsigned int abs = usage->hid & HID_USAGE;
-
- /* Let the HID parser deal with the HAT. */
- if (usage->hid == HID_GD_HATSWITCH)
- return 0;
-
- if (abs >= ARRAY_SIZE(ds4_absmap))
- return -1;
-
- abs = ds4_absmap[abs];
- hid_map_usage_clear(hi, usage, bit, max, EV_ABS, abs);
- return 1;
- }
-
- return 0;
-}
-
static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
unsigned int *rsize)
{
@@ -1034,216 +841,6 @@ static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
}
}
-static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
-{
- struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
- struct hid_input, list);
- struct input_dev *input_dev = hidinput->input;
- unsigned long flags;
- int n, m, offset, num_touch_data, max_touch_data;
- u8 cable_state, battery_capacity;
- int battery_status;
- u16 timestamp;
-
- /* When using Bluetooth the header is 2 bytes longer, so skip these. */
- int data_offset = (sc->quirks & DUALSHOCK4_CONTROLLER_BT) ? 2 : 0;
-
- /* Second bit of third button byte is for the touchpad button. */
- offset = data_offset + DS4_INPUT_REPORT_BUTTON_OFFSET;
- input_report_key(sc->touchpad, BTN_LEFT, rd[offset+2] & 0x2);
-
- /*
- * The default behavior of the Dualshock 4 is to send reports using
- * report type 1 when running over Bluetooth. However, when feature
- * report 2 is requested during the controller initialization it starts
- * sending input reports in report 17. Since report 17 is undefined
- * in the default HID descriptor, the HID layer won't generate events.
- * While it is possible (and this was done before) to fixup the HID
- * descriptor to add this mapping, it was better to do this manually.
- * The reason is there were various pieces software both open and closed
- * source, relying on the descriptors to be the same across various
- * operating systems. If the descriptors wouldn't match some
- * applications e.g. games on Wine would not be able to function due
- * to different descriptors, which such applications are not parsing.
- */
- if (rd[0] == 17) {
- int value;
-
- offset = data_offset + DS4_INPUT_REPORT_AXIS_OFFSET;
- input_report_abs(input_dev, ABS_X, rd[offset]);
- input_report_abs(input_dev, ABS_Y, rd[offset+1]);
- input_report_abs(input_dev, ABS_RX, rd[offset+2]);
- input_report_abs(input_dev, ABS_RY, rd[offset+3]);
-
- value = rd[offset+4] & 0xf;
- if (value > 7)
- value = 8; /* Center 0, 0 */
- input_report_abs(input_dev, ABS_HAT0X, ds4_hat_mapping[value].x);
- input_report_abs(input_dev, ABS_HAT0Y, ds4_hat_mapping[value].y);
-
- input_report_key(input_dev, BTN_WEST, rd[offset+4] & 0x10);
- input_report_key(input_dev, BTN_SOUTH, rd[offset+4] & 0x20);
- input_report_key(input_dev, BTN_EAST, rd[offset+4] & 0x40);
- input_report_key(input_dev, BTN_NORTH, rd[offset+4] & 0x80);
-
- input_report_key(input_dev, BTN_TL, rd[offset+5] & 0x1);
- input_report_key(input_dev, BTN_TR, rd[offset+5] & 0x2);
- input_report_key(input_dev, BTN_TL2, rd[offset+5] & 0x4);
- input_report_key(input_dev, BTN_TR2, rd[offset+5] & 0x8);
- input_report_key(input_dev, BTN_SELECT, rd[offset+5] & 0x10);
- input_report_key(input_dev, BTN_START, rd[offset+5] & 0x20);
- input_report_key(input_dev, BTN_THUMBL, rd[offset+5] & 0x40);
- input_report_key(input_dev, BTN_THUMBR, rd[offset+5] & 0x80);
-
- input_report_key(input_dev, BTN_MODE, rd[offset+6] & 0x1);
-
- input_report_abs(input_dev, ABS_Z, rd[offset+7]);
- input_report_abs(input_dev, ABS_RZ, rd[offset+8]);
-
- input_sync(input_dev);
- }
-
- /* Convert timestamp (in 5.33us unit) to timestamp_us */
- offset = data_offset + DS4_INPUT_REPORT_TIMESTAMP_OFFSET;
- timestamp = get_unaligned_le16(&rd[offset]);
- if (!sc->timestamp_initialized) {
- sc->timestamp_us = ((unsigned int)timestamp * 16) / 3;
- sc->timestamp_initialized = true;
- } else {
- u16 delta;
-
- if (sc->prev_timestamp > timestamp)
- delta = (U16_MAX - sc->prev_timestamp + timestamp + 1);
- else
- delta = timestamp - sc->prev_timestamp;
- sc->timestamp_us += (delta * 16) / 3;
- }
- sc->prev_timestamp = timestamp;
- input_event(sc->sensor_dev, EV_MSC, MSC_TIMESTAMP, sc->timestamp_us);
-
- offset = data_offset + DS4_INPUT_REPORT_GYRO_X_OFFSET;
- for (n = 0; n < 6; n++) {
- /* Store data in int for more precision during mult_frac. */
- int raw_data = (short)((rd[offset+1] << 8) | rd[offset]);
- struct ds4_calibration_data *calib = &sc->ds4_calib_data[n];
-
- /* High precision is needed during calibration, but the
- * calibrated values are within 32-bit.
- * Note: we swap numerator 'x' and 'numer' in mult_frac for
- * precision reasons so we don't need 64-bit.
- */
- int calib_data = mult_frac(calib->sens_numer,
- raw_data - calib->bias,
- calib->sens_denom);
-
- input_report_abs(sc->sensor_dev, calib->abs_code, calib_data);
- offset += 2;
- }
- input_sync(sc->sensor_dev);
-
- /*
- * The lower 4 bits of byte 30 (or 32 for BT) contain the battery level
- * and the 5th bit contains the USB cable state.
- */
- offset = data_offset + DS4_INPUT_REPORT_BATTERY_OFFSET;
- cable_state = (rd[offset] >> 4) & 0x01;
-
- /*
- * Interpretation of the battery_capacity data depends on the cable state.
- * When no cable is connected (bit4 is 0):
- * - 0:10: percentage in units of 10%.
- * When a cable is plugged in:
- * - 0-10: percentage in units of 10%.
- * - 11: battery is full
- * - 14: not charging due to Voltage or temperature error
- * - 15: charge error
- */
- if (cable_state) {
- u8 battery_data = rd[offset] & 0xf;
-
- if (battery_data < 10) {
- /* Take the mid-point for each battery capacity value,
- * because on the hardware side 0 = 0-9%, 1=10-19%, etc.
- * This matches official platform behavior, which does
- * the same.
- */
- battery_capacity = battery_data * 10 + 5;
- battery_status = POWER_SUPPLY_STATUS_CHARGING;
- } else if (battery_data == 10) {
- battery_capacity = 100;
- battery_status = POWER_SUPPLY_STATUS_CHARGING;
- } else if (battery_data == 11) {
- battery_capacity = 100;
- battery_status = POWER_SUPPLY_STATUS_FULL;
- } else { /* 14, 15 and undefined values */
- battery_capacity = 0;
- battery_status = POWER_SUPPLY_STATUS_UNKNOWN;
- }
- } else {
- u8 battery_data = rd[offset] & 0xf;
-
- if (battery_data < 10)
- battery_capacity = battery_data * 10 + 5;
- else /* 10 */
- battery_capacity = 100;
-
- battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
- }
-
- spin_lock_irqsave(&sc->lock, flags);
- sc->battery_capacity = battery_capacity;
- sc->battery_status = battery_status;
- spin_unlock_irqrestore(&sc->lock, flags);
-
- /*
- * The Dualshock 4 multi-touch trackpad data starts at offset 33 on USB
- * and 35 on Bluetooth.
- * The first byte indicates the number of touch data in the report.
- * Trackpad data starts 2 bytes later (e.g. 35 for USB).
- */
- offset = data_offset + DS4_INPUT_REPORT_TOUCHPAD_OFFSET;
- max_touch_data = (sc->quirks & DUALSHOCK4_CONTROLLER_BT) ? 4 : 3;
- if (rd[offset] > 0 && rd[offset] <= max_touch_data)
- num_touch_data = rd[offset];
- else
- num_touch_data = 1;
- offset += 1;
-
- for (m = 0; m < num_touch_data; m++) {
- /* Skip past timestamp */
- offset += 1;
-
- /*
- * The first 7 bits of the first byte is a counter and bit 8 is
- * a touch indicator that is 0 when pressed and 1 when not
- * pressed.
- * The next 3 bytes are two 12 bit touch coordinates, X and Y.
- * The data for the second touch is in the same format and
- * immediately follows the data for the first.
- */
- for (n = 0; n < 2; n++) {
- u16 x, y;
- bool active;
-
- x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
- y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
-
- active = !(rd[offset] >> 7);
- input_mt_slot(sc->touchpad, n);
- input_mt_report_slot_state(sc->touchpad, MT_TOOL_FINGER, active);
-
- if (active) {
- input_report_abs(sc->touchpad, ABS_MT_POSITION_X, x);
- input_report_abs(sc->touchpad, ABS_MT_POSITION_Y, y);
- }
-
- offset += 4;
- }
- input_mt_sync_frame(sc->touchpad);
- input_sync(sc->touchpad);
- }
-}
-
static void nsg_mrxu_parse_report(struct sony_sc *sc, u8 *rd, int size)
{
int n, offset, relx, rely;
@@ -1350,83 +947,6 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
} else if ((sc->quirks & NAVIGATION_CONTROLLER) && rd[0] == 0x01 &&
size == 49) {
sixaxis_parse_report(sc, rd, size);
- } else if ((sc->quirks & DUALSHOCK4_CONTROLLER_USB) && rd[0] == 0x01 &&
- size == 64) {
- dualshock4_parse_report(sc, rd, size);
- } else if (((sc->quirks & DUALSHOCK4_CONTROLLER_BT) && rd[0] == 0x11 &&
- size == 78)) {
- /* CRC check */
- u8 bthdr = 0xA1;
- u32 crc;
- u32 report_crc;
-
- crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
- crc = ~crc32_le(crc, rd, DS4_INPUT_REPORT_0x11_SIZE-4);
- report_crc = get_unaligned_le32(&rd[DS4_INPUT_REPORT_0x11_SIZE-4]);
- if (crc != report_crc) {
- hid_dbg(sc->hdev, "DualShock 4 input report's CRC check failed, received crc 0x%0x != 0x%0x\n",
- report_crc, crc);
- return -EILSEQ;
- }
-
- dualshock4_parse_report(sc, rd, size);
- } else if ((sc->quirks & DUALSHOCK4_DONGLE) && rd[0] == 0x01 &&
- size == 64) {
- unsigned long flags;
- enum ds4_dongle_state dongle_state;
-
- /*
- * In the case of a DS4 USB dongle, bit[2] of byte 31 indicates
- * if a DS4 is actually connected (indicated by '0').
- * For non-dongle, this bit is always 0 (connected).
- */
- bool connected = (rd[31] & 0x04) ? false : true;
-
- spin_lock_irqsave(&sc->lock, flags);
- dongle_state = sc->ds4_dongle_state;
- spin_unlock_irqrestore(&sc->lock, flags);
-
- /*
- * The dongle always sends input reports even when no
- * DS4 is attached. When a DS4 is connected, we need to
- * obtain calibration data before we can use it.
- * The code below tracks dongle state and kicks of
- * calibration when needed and only allows us to process
- * input if a DS4 is actually connected.
- */
- if (dongle_state == DONGLE_DISCONNECTED && connected) {
- hid_info(sc->hdev, "DualShock 4 USB dongle: controller connected\n");
- sony_set_leds(sc);
-
- spin_lock_irqsave(&sc->lock, flags);
- sc->ds4_dongle_state = DONGLE_CALIBRATING;
- spin_unlock_irqrestore(&sc->lock, flags);
-
- sony_schedule_work(sc, SONY_WORKER_HOTPLUG);
-
- /* Don't process the report since we don't have
- * calibration data, but let hidraw have it anyway.
- */
- return 0;
- } else if ((dongle_state == DONGLE_CONNECTED ||
- dongle_state == DONGLE_DISABLED) && !connected) {
- hid_info(sc->hdev, "DualShock 4 USB dongle: controller disconnected\n");
-
- spin_lock_irqsave(&sc->lock, flags);
- sc->ds4_dongle_state = DONGLE_DISCONNECTED;
- spin_unlock_irqrestore(&sc->lock, flags);
-
- /* Return 0, so hidraw can get the report. */
- return 0;
- } else if (dongle_state == DONGLE_CALIBRATING ||
- dongle_state == DONGLE_DISABLED ||
- dongle_state == DONGLE_DISCONNECTED) {
- /* Return 0, so hidraw can get the report. */
- return 0;
- }
-
- dualshock4_parse_report(sc, rd, size);
-
} else if ((sc->quirks & NSG_MRXU_REMOTE) && rd[0] == 0x02) {
nsg_mrxu_parse_report(sc, rd, size);
return 1;
@@ -1478,9 +998,6 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
if (sc->quirks & SIXAXIS_CONTROLLER)
return sixaxis_mapping(hdev, hi, field, usage, bit, max);
- if (sc->quirks & DUALSHOCK4_CONTROLLER)
- return ds4_mapping(hdev, hi, field, usage, bit, max);
-
if (sc->quirks & GH_GUITAR_CONTROLLER)
return guitar_mapping(hdev, hi, field, usage, bit, max);
@@ -1508,14 +1025,17 @@ static int sony_register_touchpad(struct sony_sc *sc, int touch_count,
sc->touchpad->id.product = sc->hdev->product;
sc->touchpad->id.version = sc->hdev->version;
- /* Append a suffix to the controller name as there are various
- * DS4 compatible non-Sony devices with different names.
+ /* This suffix was originally apended when hid-sony also
+ * supported DS4 devices. The DS4 was implemented using multiple
+ * evdev nodes and hence had the need to separete them out using
+ * a suffix. Other devices which were added later like Sony TV remotes
+ * inhirited this suffix.
*/
- name_sz = strlen(sc->hdev->name) + sizeof(DS4_TOUCHPAD_SUFFIX);
+ name_sz = strlen(sc->hdev->name) + sizeof(TOUCHPAD_SUFFIX);
name = devm_kzalloc(&sc->hdev->dev, name_sz, GFP_KERNEL);
if (!name)
return -ENOMEM;
- snprintf(name, name_sz, "%s" DS4_TOUCHPAD_SUFFIX, sc->hdev->name);
+ snprintf(name, name_sz, "%s" TOUCHPAD_SUFFIX, sc->hdev->name);
sc->touchpad->name = name;
/* We map the button underneath the touchpad to BTN_LEFT. */
@@ -1557,7 +1077,6 @@ static int sony_register_sensors(struct sony_sc *sc)
size_t name_sz;
char *name;
int ret;
- int range;
sc->sensor_dev = devm_input_allocate_device(&sc->hdev->dev);
if (!sc->sensor_dev)
@@ -1595,25 +1114,6 @@ static int sony_register_sensors(struct sony_sc *sc)
input_abs_set_res(sc->sensor_dev, ABS_X, SIXAXIS_ACC_RES_PER_G);
input_abs_set_res(sc->sensor_dev, ABS_Y, SIXAXIS_ACC_RES_PER_G);
input_abs_set_res(sc->sensor_dev, ABS_Z, SIXAXIS_ACC_RES_PER_G);
- } else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
- range = DS4_ACC_RES_PER_G*4;
- input_set_abs_params(sc->sensor_dev, ABS_X, -range, range, 16, 0);
- input_set_abs_params(sc->sensor_dev, ABS_Y, -range, range, 16, 0);
- input_set_abs_params(sc->sensor_dev, ABS_Z, -range, range, 16, 0);
- input_abs_set_res(sc->sensor_dev, ABS_X, DS4_ACC_RES_PER_G);
- input_abs_set_res(sc->sensor_dev, ABS_Y, DS4_ACC_RES_PER_G);
- input_abs_set_res(sc->sensor_dev, ABS_Z, DS4_ACC_RES_PER_G);
-
- range = DS4_GYRO_RES_PER_DEG_S*2048;
- input_set_abs_params(sc->sensor_dev, ABS_RX, -range, range, 16, 0);
- input_set_abs_params(sc->sensor_dev, ABS_RY, -range, range, 16, 0);
- input_set_abs_params(sc->sensor_dev, ABS_RZ, -range, range, 16, 0);
- input_abs_set_res(sc->sensor_dev, ABS_RX, DS4_GYRO_RES_PER_DEG_S);
- input_abs_set_res(sc->sensor_dev, ABS_RY, DS4_GYRO_RES_PER_DEG_S);
- input_abs_set_res(sc->sensor_dev, ABS_RZ, DS4_GYRO_RES_PER_DEG_S);
-
- __set_bit(EV_MSC, sc->sensor_dev->evbit);
- __set_bit(MSC_TIMESTAMP, sc->sensor_dev->mscbit);
}
__set_bit(INPUT_PROP_ACCELEROMETER, sc->sensor_dev->propbit);
@@ -1697,224 +1197,6 @@ static int sixaxis_set_operational_bt(struct hid_device *hdev)
return ret;
}
-/*
- * Request DS4 calibration data for the motion sensors.
- * For Bluetooth this also affects the operating mode (see below).
- */
-static int dualshock4_get_calibration_data(struct sony_sc *sc)
-{
- u8 *buf;
- int ret;
- short gyro_pitch_bias, gyro_pitch_plus, gyro_pitch_minus;
- short gyro_yaw_bias, gyro_yaw_plus, gyro_yaw_minus;
- short gyro_roll_bias, gyro_roll_plus, gyro_roll_minus;
- short gyro_speed_plus, gyro_speed_minus;
- short acc_x_plus, acc_x_minus;
- short acc_y_plus, acc_y_minus;
- short acc_z_plus, acc_z_minus;
- int speed_2x;
- int range_2g;
-
- /* For Bluetooth we use a different request, which supports CRC.
- * Note: in Bluetooth mode feature report 0x02 also changes the state
- * of the controller, so that it sends input reports of type 0x11.
- */
- if (sc->quirks & (DUALSHOCK4_CONTROLLER_USB | DUALSHOCK4_DONGLE)) {
- int retries;
-
- buf = kmalloc(DS4_FEATURE_REPORT_0x02_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* We should normally receive the feature report data we asked
- * for, but hidraw applications such as Steam can issue feature
- * reports as well. In particular for Dongle reconnects, Steam
- * and this function are competing resulting in often receiving
- * data for a different HID report, so retry a few times.
- */
- for (retries = 0; retries < 3; retries++) {
- ret = hid_hw_raw_request(sc->hdev, 0x02, buf,
- DS4_FEATURE_REPORT_0x02_SIZE,
- HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret < 0)
- goto err_stop;
-
- if (buf[0] != 0x02) {
- if (retries < 2) {
- hid_warn(sc->hdev, "Retrying DualShock 4 get calibration report (0x02) request\n");
- continue;
- } else {
- ret = -EILSEQ;
- goto err_stop;
- }
- } else {
- break;
- }
- }
- } else {
- u8 bthdr = 0xA3;
- u32 crc;
- u32 report_crc;
- int retries;
-
- buf = kmalloc(DS4_FEATURE_REPORT_0x05_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (retries = 0; retries < 3; retries++) {
- ret = hid_hw_raw_request(sc->hdev, 0x05, buf,
- DS4_FEATURE_REPORT_0x05_SIZE,
- HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret < 0)
- goto err_stop;
-
- /* CRC check */
- crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
- crc = ~crc32_le(crc, buf, DS4_FEATURE_REPORT_0x05_SIZE-4);
- report_crc = get_unaligned_le32(&buf[DS4_FEATURE_REPORT_0x05_SIZE-4]);
- if (crc != report_crc) {
- hid_warn(sc->hdev, "DualShock 4 calibration report's CRC check failed, received crc 0x%0x != 0x%0x\n",
- report_crc, crc);
- if (retries < 2) {
- hid_warn(sc->hdev, "Retrying DualShock 4 get calibration report request\n");
- continue;
- } else {
- ret = -EILSEQ;
- goto err_stop;
- }
- } else {
- break;
- }
- }
- }
-
- gyro_pitch_bias = get_unaligned_le16(&buf[1]);
- gyro_yaw_bias = get_unaligned_le16(&buf[3]);
- gyro_roll_bias = get_unaligned_le16(&buf[5]);
- if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
- gyro_pitch_plus = get_unaligned_le16(&buf[7]);
- gyro_pitch_minus = get_unaligned_le16(&buf[9]);
- gyro_yaw_plus = get_unaligned_le16(&buf[11]);
- gyro_yaw_minus = get_unaligned_le16(&buf[13]);
- gyro_roll_plus = get_unaligned_le16(&buf[15]);
- gyro_roll_minus = get_unaligned_le16(&buf[17]);
- } else {
- /* BT + Dongle */
- gyro_pitch_plus = get_unaligned_le16(&buf[7]);
- gyro_yaw_plus = get_unaligned_le16(&buf[9]);
- gyro_roll_plus = get_unaligned_le16(&buf[11]);
- gyro_pitch_minus = get_unaligned_le16(&buf[13]);
- gyro_yaw_minus = get_unaligned_le16(&buf[15]);
- gyro_roll_minus = get_unaligned_le16(&buf[17]);
- }
- gyro_speed_plus = get_unaligned_le16(&buf[19]);
- gyro_speed_minus = get_unaligned_le16(&buf[21]);
- acc_x_plus = get_unaligned_le16(&buf[23]);
- acc_x_minus = get_unaligned_le16(&buf[25]);
- acc_y_plus = get_unaligned_le16(&buf[27]);
- acc_y_minus = get_unaligned_le16(&buf[29]);
- acc_z_plus = get_unaligned_le16(&buf[31]);
- acc_z_minus = get_unaligned_le16(&buf[33]);
-
- /* Set gyroscope calibration and normalization parameters.
- * Data values will be normalized to 1/DS4_GYRO_RES_PER_DEG_S degree/s.
- */
- speed_2x = (gyro_speed_plus + gyro_speed_minus);
- sc->ds4_calib_data[0].abs_code = ABS_RX;
- sc->ds4_calib_data[0].bias = gyro_pitch_bias;
- sc->ds4_calib_data[0].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
- sc->ds4_calib_data[0].sens_denom = gyro_pitch_plus - gyro_pitch_minus;
-
- sc->ds4_calib_data[1].abs_code = ABS_RY;
- sc->ds4_calib_data[1].bias = gyro_yaw_bias;
- sc->ds4_calib_data[1].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
- sc->ds4_calib_data[1].sens_denom = gyro_yaw_plus - gyro_yaw_minus;
-
- sc->ds4_calib_data[2].abs_code = ABS_RZ;
- sc->ds4_calib_data[2].bias = gyro_roll_bias;
- sc->ds4_calib_data[2].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
- sc->ds4_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus;
-
- /* Set accelerometer calibration and normalization parameters.
- * Data values will be normalized to 1/DS4_ACC_RES_PER_G G.
- */
- range_2g = acc_x_plus - acc_x_minus;
- sc->ds4_calib_data[3].abs_code = ABS_X;
- sc->ds4_calib_data[3].bias = acc_x_plus - range_2g / 2;
- sc->ds4_calib_data[3].sens_numer = 2*DS4_ACC_RES_PER_G;
- sc->ds4_calib_data[3].sens_denom = range_2g;
-
- range_2g = acc_y_plus - acc_y_minus;
- sc->ds4_calib_data[4].abs_code = ABS_Y;
- sc->ds4_calib_data[4].bias = acc_y_plus - range_2g / 2;
- sc->ds4_calib_data[4].sens_numer = 2*DS4_ACC_RES_PER_G;
- sc->ds4_calib_data[4].sens_denom = range_2g;
-
- range_2g = acc_z_plus - acc_z_minus;
- sc->ds4_calib_data[5].abs_code = ABS_Z;
- sc->ds4_calib_data[5].bias = acc_z_plus - range_2g / 2;
- sc->ds4_calib_data[5].sens_numer = 2*DS4_ACC_RES_PER_G;
- sc->ds4_calib_data[5].sens_denom = range_2g;
-
-err_stop:
- kfree(buf);
- return ret;
-}
-
-static void dualshock4_calibration_work(struct work_struct *work)
-{
- struct sony_sc *sc = container_of(work, struct sony_sc, hotplug_worker);
- unsigned long flags;
- enum ds4_dongle_state dongle_state;
- int ret;
-
- ret = dualshock4_get_calibration_data(sc);
- if (ret < 0) {
- /* This call is very unlikely to fail for the dongle. When it
- * fails we are probably in a very bad state, so mark the
- * dongle as disabled. We will re-enable the dongle if a new
- * DS4 hotplug is detect from sony_raw_event as any issues
- * are likely resolved then (the dongle is quite stupid).
- */
- hid_err(sc->hdev, "DualShock 4 USB dongle: calibration failed, disabling device\n");
- dongle_state = DONGLE_DISABLED;
- } else {
- hid_info(sc->hdev, "DualShock 4 USB dongle: calibration completed\n");
- dongle_state = DONGLE_CONNECTED;
- }
-
- spin_lock_irqsave(&sc->lock, flags);
- sc->ds4_dongle_state = dongle_state;
- spin_unlock_irqrestore(&sc->lock, flags);
-}
-
-static int dualshock4_get_version_info(struct sony_sc *sc)
-{
- u8 *buf;
- int ret;
-
- buf = kmalloc(DS4_FEATURE_REPORT_0xA3_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = hid_hw_raw_request(sc->hdev, 0xA3, buf,
- DS4_FEATURE_REPORT_0xA3_SIZE,
- HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret < 0) {
- kfree(buf);
- return ret;
- }
-
- sc->hw_version = get_unaligned_le16(&buf[35]);
- sc->fw_version = get_unaligned_le16(&buf[41]);
-
- kfree(buf);
- return 0;
-}
-
static void sixaxis_set_leds_from_id(struct sony_sc *sc)
{
static const u8 sixaxis_leds[10][4] = {
@@ -1941,30 +1223,6 @@ static void sixaxis_set_leds_from_id(struct sony_sc *sc)
memcpy(sc->led_state, sixaxis_leds[id], sizeof(sixaxis_leds[id]));
}
-static void dualshock4_set_leds_from_id(struct sony_sc *sc)
-{
- /* The first 4 color/index entries match what the PS4 assigns */
- static const u8 color_code[7][3] = {
- /* Blue */ { 0x00, 0x00, 0x40 },
- /* Red */ { 0x40, 0x00, 0x00 },
- /* Green */ { 0x00, 0x40, 0x00 },
- /* Pink */ { 0x20, 0x00, 0x20 },
- /* Orange */ { 0x02, 0x01, 0x00 },
- /* Teal */ { 0x00, 0x01, 0x01 },
- /* White */ { 0x01, 0x01, 0x01 }
- };
-
- int id = sc->device_id;
-
- BUILD_BUG_ON(MAX_LEDS < ARRAY_SIZE(color_code[0]));
-
- if (id < 0)
- return;
-
- id %= 7;
- memcpy(sc->led_state, color_code[id], sizeof(color_code[id]));
-}
-
static void buzz_set_leds(struct sony_sc *sc)
{
struct hid_device *hdev = sc->hdev;
@@ -2110,13 +1368,13 @@ static int sony_leds_init(struct sony_sc *sc)
{
struct hid_device *hdev = sc->hdev;
int n, ret = 0;
- int use_ds4_names;
+ int use_color_names;
struct led_classdev *led;
size_t name_sz;
char *name;
size_t name_len;
const char *name_fmt;
- static const char * const ds4_name_str[] = { "red", "green", "blue",
+ static const char * const color_name_str[] = { "red", "green", "blue",
"global" };
u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
u8 use_hw_blink[MAX_LEDS] = { 0 };
@@ -2125,25 +1383,16 @@ static int sony_leds_init(struct sony_sc *sc)
if (sc->quirks & BUZZ_CONTROLLER) {
sc->led_count = 4;
- use_ds4_names = 0;
+ use_color_names = 0;
name_len = strlen("::buzz#");
name_fmt = "%s::buzz%d";
/* Validate expected report characteristics. */
if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
return -ENODEV;
- } else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
- dualshock4_set_leds_from_id(sc);
- sc->led_state[3] = 1;
- sc->led_count = 4;
- memset(max_brightness, 255, 3);
- use_hw_blink[3] = 1;
- use_ds4_names = 1;
- name_len = 0;
- name_fmt = "%s:%s";
} else if (sc->quirks & MOTION_CONTROLLER) {
sc->led_count = 3;
memset(max_brightness, 255, 3);
- use_ds4_names = 1;
+ use_color_names = 1;
name_len = 0;
name_fmt = "%s:%s";
} else if (sc->quirks & NAVIGATION_CONTROLLER) {
@@ -2152,14 +1401,14 @@ static int sony_leds_init(struct sony_sc *sc)
memcpy(sc->led_state, navigation_leds, sizeof(navigation_leds));
sc->led_count = 1;
memset(use_hw_blink, 1, 4);
- use_ds4_names = 0;
+ use_color_names = 0;
name_len = strlen("::sony#");
name_fmt = "%s::sony%d";
} else {
sixaxis_set_leds_from_id(sc);
sc->led_count = 4;
memset(use_hw_blink, 1, 4);
- use_ds4_names = 0;
+ use_color_names = 0;
name_len = strlen("::sony#");
name_fmt = "%s::sony%d";
}
@@ -2175,8 +1424,8 @@ static int sony_leds_init(struct sony_sc *sc)
for (n = 0; n < sc->led_count; n++) {
- if (use_ds4_names)
- name_sz = strlen(dev_name(&hdev->dev)) + strlen(ds4_name_str[n]) + 2;
+ if (use_color_names)
+ name_sz = strlen(dev_name(&hdev->dev)) + strlen(color_name_str[n]) + 2;
led = devm_kzalloc(&hdev->dev, sizeof(struct led_classdev) + name_sz, GFP_KERNEL);
if (!led) {
@@ -2185,9 +1434,9 @@ static int sony_leds_init(struct sony_sc *sc)
}
name = (void *)(&led[1]);
- if (use_ds4_names)
+ if (use_color_names)
snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev),
- ds4_name_str[n]);
+ color_name_str[n]);
else
snprintf(name, name_sz, name_fmt, dev_name(&hdev->dev), n + 1);
led->name = name;
@@ -2273,68 +1522,6 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
}
-static void dualshock4_send_output_report(struct sony_sc *sc)
-{
- struct hid_device *hdev = sc->hdev;
- u8 *buf = sc->output_report_dmabuf;
- int offset;
-
- /*
- * NOTE: The lower 6 bits of buf[1] field of the Bluetooth report
- * control the interval at which Dualshock 4 reports data:
- * 0x00 - 1ms
- * 0x01 - 1ms
- * 0x02 - 2ms
- * 0x3E - 62ms
- * 0x3F - disabled
- */
- if (sc->quirks & (DUALSHOCK4_CONTROLLER_USB | DUALSHOCK4_DONGLE)) {
- memset(buf, 0, DS4_OUTPUT_REPORT_0x05_SIZE);
- buf[0] = 0x05;
- buf[1] = 0x07; /* blink + LEDs + motor */
- offset = 4;
- } else {
- memset(buf, 0, DS4_OUTPUT_REPORT_0x11_SIZE);
- buf[0] = 0x11;
- buf[1] = 0xC0 /* HID + CRC */ | sc->ds4_bt_poll_interval;
- buf[3] = 0x07; /* blink + LEDs + motor */
- offset = 6;
- }
-
-#ifdef CONFIG_SONY_FF
- buf[offset++] = sc->right;
- buf[offset++] = sc->left;
-#else
- offset += 2;
-#endif
-
- /* LED 3 is the global control */
- if (sc->led_state[3]) {
- buf[offset++] = sc->led_state[0];
- buf[offset++] = sc->led_state[1];
- buf[offset++] = sc->led_state[2];
- } else {
- offset += 3;
- }
-
- /* If both delay values are zero the DualShock 4 disables blinking. */
- buf[offset++] = sc->led_delay_on[3];
- buf[offset++] = sc->led_delay_off[3];
-
- if (sc->quirks & (DUALSHOCK4_CONTROLLER_USB | DUALSHOCK4_DONGLE))
- hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x05_SIZE);
- else {
- /* CRC generation */
- u8 bthdr = 0xA2;
- u32 crc;
-
- crc = crc32_le(0xFFFFFFFF, &bthdr, 1);
- crc = ~crc32_le(crc, buf, DS4_OUTPUT_REPORT_0x11_SIZE-4);
- put_unaligned_le32(crc, &buf[74]);
- hid_hw_output_report(hdev, buf, DS4_OUTPUT_REPORT_0x11_SIZE);
- }
-}
-
static void motion_send_output_report(struct sony_sc *sc)
{
struct hid_device *hdev = sc->hdev;
@@ -2378,14 +1565,6 @@ static int sony_allocate_output_report(struct sony_sc *sc)
devm_kmalloc(&sc->hdev->dev,
sizeof(union sixaxis_output_report_01),
GFP_KERNEL);
- else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
- sc->output_report_dmabuf = devm_kmalloc(&sc->hdev->dev,
- DS4_OUTPUT_REPORT_0x11_SIZE,
- GFP_KERNEL);
- else if (sc->quirks & (DUALSHOCK4_CONTROLLER_USB | DUALSHOCK4_DONGLE))
- sc->output_report_dmabuf = devm_kmalloc(&sc->hdev->dev,
- DS4_OUTPUT_REPORT_0x05_SIZE,
- GFP_KERNEL);
else if (sc->quirks & MOTION_CONTROLLER)
sc->output_report_dmabuf = devm_kmalloc(&sc->hdev->dev,
MOTION_REPORT_0x02_SIZE,
@@ -2600,8 +1779,7 @@ static int sony_check_add(struct sony_sc *sc)
u8 *buf = NULL;
int n, ret;
- if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) ||
- (sc->quirks & MOTION_CONTROLLER_BT) ||
+ if ((sc->quirks & MOTION_CONTROLLER_BT) ||
(sc->quirks & NAVIGATION_CONTROLLER_BT) ||
(sc->quirks & SIXAXIS_CONTROLLER_BT)) {
/*
@@ -2614,30 +1792,6 @@ static int sony_check_add(struct sony_sc *sc)
hid_warn(sc->hdev, "UNIQ does not contain a MAC address; duplicate check skipped\n");
return 0;
}
- } else if (sc->quirks & (DUALSHOCK4_CONTROLLER_USB | DUALSHOCK4_DONGLE)) {
- buf = kmalloc(DS4_FEATURE_REPORT_0x81_SIZE, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /*
- * The MAC address of a DS4 controller connected via USB can be
- * retrieved with feature report 0x81. The address begins at
- * offset 1.
- */
- ret = hid_hw_raw_request(sc->hdev, 0x81, buf,
- DS4_FEATURE_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
-
- if (ret != DS4_FEATURE_REPORT_0x81_SIZE) {
- hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n");
- ret = ret < 0 ? ret : -EINVAL;
- goto out_free;
- }
-
- memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
-
- snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
- "%pMR", sc->mac_address);
} else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
(sc->quirks & NAVIGATION_CONTROLLER_USB)) {
buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
@@ -2686,11 +1840,10 @@ static int sony_set_device_id(struct sony_sc *sc)
int ret;
/*
- * Only DualShock 4 or Sixaxis controllers get an id.
+ * Only Sixaxis controllers get an id.
* All others are set to -1.
*/
- if ((sc->quirks & SIXAXIS_CONTROLLER) ||
- (sc->quirks & DUALSHOCK4_CONTROLLER)) {
+ if (sc->quirks & SIXAXIS_CONTROLLER) {
ret = ida_simple_get(&sony_device_id_allocator, 0, 0,
GFP_KERNEL);
if (ret < 0) {
@@ -2728,8 +1881,6 @@ static inline void sony_cancel_work_sync(struct sony_sc *sc)
{
unsigned long flags;
- if (sc->hotplug_worker_initialized)
- cancel_work_sync(&sc->hotplug_worker);
if (sc->state_worker_initialized) {
spin_lock_irqsave(&sc->lock, flags);
sc->state_worker_initialized = 0;
@@ -2849,68 +2000,6 @@ static int sony_input_configured(struct hid_device *hdev,
}
sony_init_output_report(sc, sixaxis_send_output_report);
- } else if (sc->quirks & DUALSHOCK4_CONTROLLER) {
- ret = dualshock4_get_calibration_data(sc);
- if (ret < 0) {
- hid_err(hdev, "Failed to get calibration data from Dualshock 4\n");
- goto err_stop;
- }
-
- ret = dualshock4_get_version_info(sc);
- if (ret < 0) {
- hid_err(sc->hdev, "Failed to get version data from Dualshock 4\n");
- goto err_stop;
- }
-
- ret = device_create_file(&sc->hdev->dev, &dev_attr_firmware_version);
- if (ret) {
- hid_err(sc->hdev, "can't create sysfs firmware_version attribute err: %d\n", ret);
- goto err_stop;
- }
- sc->fw_version_created = true;
-
- ret = device_create_file(&sc->hdev->dev, &dev_attr_hardware_version);
- if (ret) {
- hid_err(sc->hdev, "can't create sysfs hardware_version attribute err: %d\n", ret);
- goto err_stop;
- }
- sc->hw_version_created = true;
-
- /*
- * The Dualshock 4 touchpad supports 2 touches and has a
- * resolution of 1920x942 (44.86 dots/mm).
- */
- ret = sony_register_touchpad(sc, 2, 1920, 942, 0, 0, 0);
- if (ret) {
- hid_err(sc->hdev,
- "Unable to initialize multi-touch slots: %d\n",
- ret);
- goto err_stop;
- }
-
- ret = sony_register_sensors(sc);
- if (ret) {
- hid_err(sc->hdev,
- "Unable to initialize motion sensors: %d\n", ret);
- goto err_stop;
- }
-
- if (sc->quirks & DUALSHOCK4_CONTROLLER_BT) {
- sc->ds4_bt_poll_interval = DS4_BT_DEFAULT_POLL_INTERVAL_MS;
- ret = device_create_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
- if (ret)
- hid_warn(sc->hdev,
- "can't create sysfs bt_poll_interval attribute err: %d\n",
- ret);
- }
-
- if (sc->quirks & DUALSHOCK4_DONGLE) {
- INIT_WORK(&sc->hotplug_worker, dualshock4_calibration_work);
- sc->hotplug_worker_initialized = 1;
- sc->ds4_dongle_state = DONGLE_DISCONNECTED;
- }
-
- sony_init_output_report(sc, dualshock4_send_output_report);
} else if (sc->quirks & NSG_MRXU_REMOTE) {
/*
* The NSG-MRxU touchpad supports 2 touches and has a
@@ -2960,16 +2049,6 @@ static int sony_input_configured(struct hid_device *hdev,
err_close:
hid_hw_close(hdev);
err_stop:
- /* Piggy back on the default ds4_bt_ poll_interval to determine
- * if we need to remove the file as we don't know for sure if we
- * executed that logic.
- */
- if (sc->ds4_bt_poll_interval)
- device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
- if (sc->fw_version_created)
- device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
- if (sc->hw_version_created)
- device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
sony_cancel_work_sync(sc);
sony_remove_dev_list(sc);
sony_release_device_id(sc);
@@ -3014,13 +2093,13 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
else if (sc->quirks & SIXAXIS_CONTROLLER)
connect_mask |= HID_CONNECT_HIDDEV_FORCE;
- /* Patch the hw version on DS3/4 compatible devices, so applications can
+ /* Patch the hw version on DS3 compatible devices, so applications can
* distinguish between the default HID mappings and the mappings defined
* by the Linux game controller spec. This is important for the SDL2
* library, which has a game controller database, which uses device ids
* in combination with version as a key.
*/
- if (sc->quirks & (SIXAXIS_CONTROLLER | DUALSHOCK4_CONTROLLER))
+ if (sc->quirks & SIXAXIS_CONTROLLER)
hdev->version |= 0x8000;
ret = hid_hw_start(hdev, connect_mask);
@@ -3091,15 +2170,6 @@ static void sony_remove(struct hid_device *hdev)
hid_hw_close(hdev);
- if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
- device_remove_file(&sc->hdev->dev, &dev_attr_bt_poll_interval);
-
- if (sc->fw_version_created)
- device_remove_file(&sc->hdev->dev, &dev_attr_firmware_version);
-
- if (sc->hw_version_created)
- device_remove_file(&sc->hdev->dev, &dev_attr_hardware_version);
-
sony_cancel_work_sync(sc);
sony_remove_dev_list(sc);
@@ -3180,17 +2250,6 @@ static const struct hid_device_id sony_devices[] = {
/* SMK-Link PS3 BD Remote Control */
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE),
.driver_data = PS3REMOTE },
- /* Sony Dualshock 4 controllers for PS4 */
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
- .driver_data = DUALSHOCK4_CONTROLLER_USB },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
- .driver_data = DUALSHOCK4_CONTROLLER_BT },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
- .driver_data = DUALSHOCK4_CONTROLLER_USB },
- { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
- .driver_data = DUALSHOCK4_CONTROLLER_BT },
- { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
- .driver_data = DUALSHOCK4_DONGLE },
/* Nyko Core Controller for PS3 */
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
.driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 8ee43cb225fc..b110818fc945 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -3,6 +3,7 @@
* HID driver for Valve Steam Controller
*
* Copyright (c) 2018 Rodrigo Rivas Costa <rodrigorivascosta@gmail.com>
+ * Copyright (c) 2022 Valve Software
*
* Supports both the wired and wireless interfaces.
*
@@ -53,6 +54,7 @@ static DEFINE_MUTEX(steam_devices_lock);
static LIST_HEAD(steam_devices);
#define STEAM_QUIRK_WIRELESS BIT(0)
+#define STEAM_QUIRK_DECK BIT(1)
/* Touch pads are 40 mm in diameter and 65535 units */
#define STEAM_PAD_RESOLUTION 1638
@@ -60,6 +62,10 @@ static LIST_HEAD(steam_devices);
#define STEAM_TRIGGER_RESOLUTION 51
/* Joystick runs are about 5 mm and 256 units */
#define STEAM_JOYSTICK_RESOLUTION 51
+/* Trigger runs are about 6 mm and 32768 units */
+#define STEAM_DECK_TRIGGER_RESOLUTION 5461
+/* Joystick runs are about 5 mm and 32768 units */
+#define STEAM_DECK_JOYSTICK_RESOLUTION 6553
#define STEAM_PAD_FUZZ 256
@@ -85,6 +91,7 @@ static LIST_HEAD(steam_devices);
#define STEAM_CMD_FORCEFEEDBAK 0x8f
#define STEAM_CMD_REQUEST_COMM_STATUS 0xb4
#define STEAM_CMD_GET_SERIAL 0xae
+#define STEAM_CMD_HAPTIC_RUMBLE 0xeb
/* Some useful register ids */
#define STEAM_REG_LPAD_MODE 0x07
@@ -92,11 +99,14 @@ static LIST_HEAD(steam_devices);
#define STEAM_REG_RPAD_MARGIN 0x18
#define STEAM_REG_LED 0x2d
#define STEAM_REG_GYRO_MODE 0x30
+#define STEAM_REG_LPAD_CLICK_PRESSURE 0x34
+#define STEAM_REG_RPAD_CLICK_PRESSURE 0x35
/* Raw event identifiers */
#define STEAM_EV_INPUT_DATA 0x01
#define STEAM_EV_CONNECT 0x03
#define STEAM_EV_BATTERY 0x04
+#define STEAM_EV_DECK_INPUT_DATA 0x09
/* Values for GYRO_MODE (bitmask) */
#define STEAM_GYRO_MODE_OFF 0x0000
@@ -124,6 +134,10 @@ struct steam_device {
struct power_supply __rcu *battery;
u8 battery_charge;
u16 voltage;
+ struct delayed_work heartbeat;
+ struct work_struct rumble_work;
+ u16 rumble_left;
+ u16 rumble_right;
};
static int steam_recv_report(struct steam_device *steam,
@@ -193,7 +207,7 @@ static int steam_send_report(struct steam_device *steam,
*/
do {
ret = hid_hw_raw_request(steam->hdev, 0,
- buf, size + 1,
+ buf, max(size, 64) + 1,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret != -EPIPE)
break;
@@ -219,6 +233,7 @@ static int steam_write_registers(struct steam_device *steam,
u8 reg;
u16 val;
u8 cmd[64] = {STEAM_CMD_WRITE_REGISTER, 0x00};
+ int ret;
va_list args;
va_start(args, steam);
@@ -234,7 +249,16 @@ static int steam_write_registers(struct steam_device *steam,
}
va_end(args);
- return steam_send_report(steam, cmd, 2 + cmd[1]);
+ ret = steam_send_report(steam, cmd, 2 + cmd[1]);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Sometimes a lingering report for this command can
+ * get read back instead of the last set report if
+ * this isn't explicitly queried
+ */
+ return steam_recv_report(steam, cmd, 2 + cmd[1]);
}
static int steam_get_serial(struct steam_device *steam)
@@ -270,6 +294,45 @@ static inline int steam_request_conn_status(struct steam_device *steam)
return steam_send_report_byte(steam, STEAM_CMD_REQUEST_COMM_STATUS);
}
+static inline int steam_haptic_rumble(struct steam_device *steam,
+ u16 intensity, u16 left_speed, u16 right_speed,
+ u8 left_gain, u8 right_gain)
+{
+ u8 report[11] = {STEAM_CMD_HAPTIC_RUMBLE, 9};
+
+ report[3] = intensity & 0xFF;
+ report[4] = intensity >> 8;
+ report[5] = left_speed & 0xFF;
+ report[6] = left_speed >> 8;
+ report[7] = right_speed & 0xFF;
+ report[8] = right_speed >> 8;
+ report[9] = left_gain;
+ report[10] = right_gain;
+
+ return steam_send_report(steam, report, sizeof(report));
+}
+
+static void steam_haptic_rumble_cb(struct work_struct *work)
+{
+ struct steam_device *steam = container_of(work, struct steam_device,
+ rumble_work);
+ steam_haptic_rumble(steam, 0, steam->rumble_left,
+ steam->rumble_right, 2, 0);
+}
+
+#ifdef CONFIG_STEAM_FF
+static int steam_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct steam_device *steam = input_get_drvdata(dev);
+
+ steam->rumble_left = effect->u.rumble.strong_magnitude;
+ steam->rumble_right = effect->u.rumble.weak_magnitude;
+
+ return schedule_work(&steam->rumble_work);
+}
+#endif
+
static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
{
if (enable) {
@@ -280,13 +343,33 @@ static void steam_set_lizard_mode(struct steam_device *steam, bool enable)
steam_write_registers(steam,
STEAM_REG_RPAD_MARGIN, 0x01, /* enable margin */
0);
+
+ cancel_delayed_work_sync(&steam->heartbeat);
} else {
/* disable esc, enter, cursor */
steam_send_report_byte(steam, STEAM_CMD_CLEAR_MAPPINGS);
- steam_write_registers(steam,
- STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
- STEAM_REG_RPAD_MARGIN, 0x00, /* disable margin */
- 0);
+
+ if (steam->quirks & STEAM_QUIRK_DECK) {
+ steam_write_registers(steam,
+ STEAM_REG_RPAD_MARGIN, 0x00, /* disable margin */
+ STEAM_REG_LPAD_MODE, 0x07, /* disable mouse */
+ STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
+ STEAM_REG_LPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
+ STEAM_REG_RPAD_CLICK_PRESSURE, 0xFFFF, /* disable clicky pad */
+ 0);
+ /*
+ * The Steam Deck has a watchdog that automatically enables
+ * lizard mode if it doesn't see any traffic for too long
+ */
+ if (!work_busy(&steam->heartbeat.work))
+ schedule_delayed_work(&steam->heartbeat, 5 * HZ);
+ } else {
+ steam_write_registers(steam,
+ STEAM_REG_RPAD_MARGIN, 0x00, /* disable margin */
+ STEAM_REG_LPAD_MODE, 0x07, /* disable mouse */
+ STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
+ 0);
+ }
}
}
@@ -413,8 +496,8 @@ static int steam_input_register(struct steam_device *steam)
input->open = steam_input_open;
input->close = steam_input_close;
- input->name = (steam->quirks & STEAM_QUIRK_WIRELESS) ?
- "Wireless Steam Controller" :
+ input->name = (steam->quirks & STEAM_QUIRK_WIRELESS) ? "Wireless Steam Controller" :
+ (steam->quirks & STEAM_QUIRK_DECK) ? "Steam Deck" :
"Steam Controller";
input->phys = hdev->phys;
input->uniq = steam->serial_no;
@@ -438,33 +521,76 @@ static int steam_input_register(struct steam_device *steam)
input_set_capability(input, EV_KEY, BTN_SELECT);
input_set_capability(input, EV_KEY, BTN_MODE);
input_set_capability(input, EV_KEY, BTN_START);
- input_set_capability(input, EV_KEY, BTN_GEAR_DOWN);
- input_set_capability(input, EV_KEY, BTN_GEAR_UP);
input_set_capability(input, EV_KEY, BTN_THUMBR);
input_set_capability(input, EV_KEY, BTN_THUMBL);
input_set_capability(input, EV_KEY, BTN_THUMB);
input_set_capability(input, EV_KEY, BTN_THUMB2);
+ if (steam->quirks & STEAM_QUIRK_DECK) {
+ input_set_capability(input, EV_KEY, BTN_BASE);
+ input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY1);
+ input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY2);
+ input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY3);
+ input_set_capability(input, EV_KEY, BTN_TRIGGER_HAPPY4);
+ } else {
+ input_set_capability(input, EV_KEY, BTN_GEAR_DOWN);
+ input_set_capability(input, EV_KEY, BTN_GEAR_UP);
+ }
- input_set_abs_params(input, ABS_HAT2Y, 0, 255, 0, 0);
- input_set_abs_params(input, ABS_HAT2X, 0, 255, 0, 0);
input_set_abs_params(input, ABS_X, -32767, 32767, 0, 0);
input_set_abs_params(input, ABS_Y, -32767, 32767, 0, 0);
- input_set_abs_params(input, ABS_RX, -32767, 32767,
- STEAM_PAD_FUZZ, 0);
- input_set_abs_params(input, ABS_RY, -32767, 32767,
- STEAM_PAD_FUZZ, 0);
+
input_set_abs_params(input, ABS_HAT0X, -32767, 32767,
STEAM_PAD_FUZZ, 0);
input_set_abs_params(input, ABS_HAT0Y, -32767, 32767,
STEAM_PAD_FUZZ, 0);
- input_abs_set_res(input, ABS_X, STEAM_JOYSTICK_RESOLUTION);
- input_abs_set_res(input, ABS_Y, STEAM_JOYSTICK_RESOLUTION);
- input_abs_set_res(input, ABS_RX, STEAM_PAD_RESOLUTION);
- input_abs_set_res(input, ABS_RY, STEAM_PAD_RESOLUTION);
+
+ if (steam->quirks & STEAM_QUIRK_DECK) {
+ input_set_abs_params(input, ABS_HAT2Y, 0, 32767, 0, 0);
+ input_set_abs_params(input, ABS_HAT2X, 0, 32767, 0, 0);
+
+ input_set_abs_params(input, ABS_RX, -32767, 32767, 0, 0);
+ input_set_abs_params(input, ABS_RY, -32767, 32767, 0, 0);
+
+ input_set_abs_params(input, ABS_HAT1X, -32767, 32767,
+ STEAM_PAD_FUZZ, 0);
+ input_set_abs_params(input, ABS_HAT1Y, -32767, 32767,
+ STEAM_PAD_FUZZ, 0);
+
+ input_abs_set_res(input, ABS_X, STEAM_DECK_JOYSTICK_RESOLUTION);
+ input_abs_set_res(input, ABS_Y, STEAM_DECK_JOYSTICK_RESOLUTION);
+ input_abs_set_res(input, ABS_RX, STEAM_DECK_JOYSTICK_RESOLUTION);
+ input_abs_set_res(input, ABS_RY, STEAM_DECK_JOYSTICK_RESOLUTION);
+ input_abs_set_res(input, ABS_HAT1X, STEAM_PAD_RESOLUTION);
+ input_abs_set_res(input, ABS_HAT1Y, STEAM_PAD_RESOLUTION);
+ input_abs_set_res(input, ABS_HAT2Y, STEAM_DECK_TRIGGER_RESOLUTION);
+ input_abs_set_res(input, ABS_HAT2X, STEAM_DECK_TRIGGER_RESOLUTION);
+ } else {
+ input_set_abs_params(input, ABS_HAT2Y, 0, 255, 0, 0);
+ input_set_abs_params(input, ABS_HAT2X, 0, 255, 0, 0);
+
+ input_set_abs_params(input, ABS_RX, -32767, 32767,
+ STEAM_PAD_FUZZ, 0);
+ input_set_abs_params(input, ABS_RY, -32767, 32767,
+ STEAM_PAD_FUZZ, 0);
+
+ input_abs_set_res(input, ABS_X, STEAM_JOYSTICK_RESOLUTION);
+ input_abs_set_res(input, ABS_Y, STEAM_JOYSTICK_RESOLUTION);
+ input_abs_set_res(input, ABS_RX, STEAM_PAD_RESOLUTION);
+ input_abs_set_res(input, ABS_RY, STEAM_PAD_RESOLUTION);
+ input_abs_set_res(input, ABS_HAT2Y, STEAM_TRIGGER_RESOLUTION);
+ input_abs_set_res(input, ABS_HAT2X, STEAM_TRIGGER_RESOLUTION);
+ }
input_abs_set_res(input, ABS_HAT0X, STEAM_PAD_RESOLUTION);
input_abs_set_res(input, ABS_HAT0Y, STEAM_PAD_RESOLUTION);
- input_abs_set_res(input, ABS_HAT2Y, STEAM_TRIGGER_RESOLUTION);
- input_abs_set_res(input, ABS_HAT2X, STEAM_TRIGGER_RESOLUTION);
+
+#ifdef CONFIG_STEAM_FF
+ if (steam->quirks & STEAM_QUIRK_DECK) {
+ input_set_capability(input, EV_FF, FF_RUMBLE);
+ ret = input_ff_create_memless(input, NULL, steam_play_effect);
+ if (ret)
+ goto input_register_fail;
+ }
+#endif
ret = input_register_device(input);
if (ret)
@@ -612,6 +738,22 @@ static bool steam_is_valve_interface(struct hid_device *hdev)
return !list_empty(&rep_enum->report_list);
}
+static void steam_lizard_mode_heartbeat(struct work_struct *work)
+{
+ struct steam_device *steam = container_of(work, struct steam_device,
+ heartbeat.work);
+
+ mutex_lock(&steam->mutex);
+ if (!steam->client_opened && steam->client_hdev) {
+ steam_send_report_byte(steam, STEAM_CMD_CLEAR_MAPPINGS);
+ steam_write_registers(steam,
+ STEAM_REG_RPAD_MODE, 0x07, /* disable mouse */
+ 0);
+ schedule_delayed_work(&steam->heartbeat, 5 * HZ);
+ }
+ mutex_unlock(&steam->mutex);
+}
+
static int steam_client_ll_parse(struct hid_device *hdev)
{
struct steam_device *steam = hdev->driver_data;
@@ -674,7 +816,7 @@ static int steam_client_ll_raw_request(struct hid_device *hdev,
report_type, reqtype);
}
-static struct hid_ll_driver steam_client_ll_driver = {
+static const struct hid_ll_driver steam_client_ll_driver = {
.parse = steam_client_ll_parse,
.start = steam_client_ll_start,
.stop = steam_client_ll_stop,
@@ -750,6 +892,8 @@ static int steam_probe(struct hid_device *hdev,
steam->quirks = id->driver_data;
INIT_WORK(&steam->work_connect, steam_work_connect_cb);
INIT_LIST_HEAD(&steam->list);
+ INIT_DEFERRABLE_WORK(&steam->heartbeat, steam_lizard_mode_heartbeat);
+ INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb);
steam->client_hdev = steam_create_client_hid(hdev);
if (IS_ERR(steam->client_hdev)) {
@@ -805,6 +949,8 @@ hid_hw_start_fail:
hid_destroy_device(steam->client_hdev);
client_hdev_fail:
cancel_work_sync(&steam->work_connect);
+ cancel_delayed_work_sync(&steam->heartbeat);
+ cancel_work_sync(&steam->rumble_work);
steam_alloc_fail:
hid_err(hdev, "%s: failed with error %d\n",
__func__, ret);
@@ -821,7 +967,11 @@ static void steam_remove(struct hid_device *hdev)
}
hid_destroy_device(steam->client_hdev);
+ mutex_lock(&steam->mutex);
+ steam->client_hdev = NULL;
steam->client_opened = false;
+ cancel_delayed_work_sync(&steam->heartbeat);
+ mutex_unlock(&steam->mutex);
cancel_work_sync(&steam->work_connect);
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
hid_info(hdev, "Steam wireless receiver disconnected");
@@ -906,10 +1056,10 @@ static inline s16 steam_le16(u8 *data)
* 8.5 | BTN_B | button B
* 8.6 | BTN_X | button X
* 8.7 | BTN_A | button A
- * 9.0 | BTN_DPAD_UP | lef-pad up
- * 9.1 | BTN_DPAD_RIGHT | lef-pad right
- * 9.2 | BTN_DPAD_LEFT | lef-pad left
- * 9.3 | BTN_DPAD_DOWN | lef-pad down
+ * 9.0 | BTN_DPAD_UP | left-pad up
+ * 9.1 | BTN_DPAD_RIGHT | left-pad right
+ * 9.2 | BTN_DPAD_LEFT | left-pad left
+ * 9.3 | BTN_DPAD_DOWN | left-pad down
* 9.4 | BTN_SELECT | menu left
* 9.5 | BTN_MODE | steam logo
* 9.6 | BTN_START | menu right
@@ -994,6 +1144,172 @@ static void steam_do_input_event(struct steam_device *steam,
}
/*
+ * The size for this message payload is 56.
+ * The known values are:
+ * Offset| Type | Mapped to |Meaning
+ * -------+-------+-----------+--------------------------
+ * 4-7 | u32 | -- | sequence number
+ * 8-15 | u64 | see below | buttons
+ * 16-17 | s16 | ABS_HAT0X | left-pad X value
+ * 18-19 | s16 | ABS_HAT0Y | left-pad Y value
+ * 20-21 | s16 | ABS_HAT1X | right-pad X value
+ * 22-23 | s16 | ABS_HAT1Y | right-pad Y value
+ * 24-25 | s16 | -- | accelerometer X value
+ * 26-27 | s16 | -- | accelerometer Y value
+ * 28-29 | s16 | -- | accelerometer Z value
+ * 30-31 | s16 | -- | gyro X value
+ * 32-33 | s16 | -- | gyro Y value
+ * 34-35 | s16 | -- | gyro Z value
+ * 36-37 | s16 | -- | quaternion W value
+ * 38-39 | s16 | -- | quaternion X value
+ * 40-41 | s16 | -- | quaternion Y value
+ * 42-43 | s16 | -- | quaternion Z value
+ * 44-45 | u16 | ABS_HAT2Y | left trigger (uncalibrated)
+ * 46-47 | u16 | ABS_HAT2X | right trigger (uncalibrated)
+ * 48-49 | s16 | ABS_X | left joystick X
+ * 50-51 | s16 | ABS_Y | left joystick Y
+ * 52-53 | s16 | ABS_RX | right joystick X
+ * 54-55 | s16 | ABS_RY | right joystick Y
+ * 56-57 | u16 | -- | left pad pressure
+ * 58-59 | u16 | -- | right pad pressure
+ *
+ * The buttons are:
+ * Bit | Mapped to | Description
+ * ------+------------+--------------------------------
+ * 8.0 | BTN_TR2 | right trigger fully pressed
+ * 8.1 | BTN_TL2 | left trigger fully pressed
+ * 8.2 | BTN_TR | right shoulder
+ * 8.3 | BTN_TL | left shoulder
+ * 8.4 | BTN_Y | button Y
+ * 8.5 | BTN_B | button B
+ * 8.6 | BTN_X | button X
+ * 8.7 | BTN_A | button A
+ * 9.0 | BTN_DPAD_UP | left-pad up
+ * 9.1 | BTN_DPAD_RIGHT | left-pad right
+ * 9.2 | BTN_DPAD_LEFT | left-pad left
+ * 9.3 | BTN_DPAD_DOWN | left-pad down
+ * 9.4 | BTN_SELECT | menu left
+ * 9.5 | BTN_MODE | steam logo
+ * 9.6 | BTN_START | menu right
+ * 9.7 | BTN_TRIGGER_HAPPY3 | left bottom grip button
+ * 10.0 | BTN_TRIGGER_HAPPY4 | right bottom grip button
+ * 10.1 | BTN_THUMB | left pad pressed
+ * 10.2 | BTN_THUMB2 | right pad pressed
+ * 10.3 | -- | left pad touched
+ * 10.4 | -- | right pad touched
+ * 10.5 | -- | unknown
+ * 10.6 | BTN_THUMBL | left joystick clicked
+ * 10.7 | -- | unknown
+ * 11.0 | -- | unknown
+ * 11.1 | -- | unknown
+ * 11.2 | BTN_THUMBR | right joystick clicked
+ * 11.3 | -- | unknown
+ * 11.4 | -- | unknown
+ * 11.5 | -- | unknown
+ * 11.6 | -- | unknown
+ * 11.7 | -- | unknown
+ * 12.0 | -- | unknown
+ * 12.1 | -- | unknown
+ * 12.2 | -- | unknown
+ * 12.3 | -- | unknown
+ * 12.4 | -- | unknown
+ * 12.5 | -- | unknown
+ * 12.6 | -- | unknown
+ * 12.7 | -- | unknown
+ * 13.0 | -- | unknown
+ * 13.1 | BTN_TRIGGER_HAPPY1 | left top grip button
+ * 13.2 | BTN_TRIGGER_HAPPY2 | right top grip button
+ * 13.3 | -- | unknown
+ * 13.4 | -- | unknown
+ * 13.5 | -- | unknown
+ * 13.6 | -- | left joystick touched
+ * 13.7 | -- | right joystick touched
+ * 14.0 | -- | unknown
+ * 14.1 | -- | unknown
+ * 14.2 | BTN_BASE | quick access button
+ * 14.3 | -- | unknown
+ * 14.4 | -- | unknown
+ * 14.5 | -- | unknown
+ * 14.6 | -- | unknown
+ * 14.7 | -- | unknown
+ * 15.0 | -- | unknown
+ * 15.1 | -- | unknown
+ * 15.2 | -- | unknown
+ * 15.3 | -- | unknown
+ * 15.4 | -- | unknown
+ * 15.5 | -- | unknown
+ * 15.6 | -- | unknown
+ * 15.7 | -- | unknown
+ */
+static void steam_do_deck_input_event(struct steam_device *steam,
+ struct input_dev *input, u8 *data)
+{
+ u8 b8, b9, b10, b11, b13, b14;
+ bool lpad_touched, rpad_touched;
+
+ b8 = data[8];
+ b9 = data[9];
+ b10 = data[10];
+ b11 = data[11];
+ b13 = data[13];
+ b14 = data[14];
+
+ lpad_touched = b10 & BIT(3);
+ rpad_touched = b10 & BIT(4);
+
+ if (lpad_touched) {
+ input_report_abs(input, ABS_HAT0X, steam_le16(data + 16));
+ input_report_abs(input, ABS_HAT0Y, steam_le16(data + 18));
+ } else {
+ input_report_abs(input, ABS_HAT0X, 0);
+ input_report_abs(input, ABS_HAT0Y, 0);
+ }
+
+ if (rpad_touched) {
+ input_report_abs(input, ABS_HAT1X, steam_le16(data + 20));
+ input_report_abs(input, ABS_HAT1Y, steam_le16(data + 22));
+ } else {
+ input_report_abs(input, ABS_HAT1X, 0);
+ input_report_abs(input, ABS_HAT1Y, 0);
+ }
+
+ input_report_abs(input, ABS_X, steam_le16(data + 48));
+ input_report_abs(input, ABS_Y, -steam_le16(data + 50));
+ input_report_abs(input, ABS_RX, steam_le16(data + 52));
+ input_report_abs(input, ABS_RY, -steam_le16(data + 54));
+
+ input_report_abs(input, ABS_HAT2Y, steam_le16(data + 44));
+ input_report_abs(input, ABS_HAT2X, steam_le16(data + 46));
+
+ input_event(input, EV_KEY, BTN_TR2, !!(b8 & BIT(0)));
+ input_event(input, EV_KEY, BTN_TL2, !!(b8 & BIT(1)));
+ input_event(input, EV_KEY, BTN_TR, !!(b8 & BIT(2)));
+ input_event(input, EV_KEY, BTN_TL, !!(b8 & BIT(3)));
+ input_event(input, EV_KEY, BTN_Y, !!(b8 & BIT(4)));
+ input_event(input, EV_KEY, BTN_B, !!(b8 & BIT(5)));
+ input_event(input, EV_KEY, BTN_X, !!(b8 & BIT(6)));
+ input_event(input, EV_KEY, BTN_A, !!(b8 & BIT(7)));
+ input_event(input, EV_KEY, BTN_SELECT, !!(b9 & BIT(4)));
+ input_event(input, EV_KEY, BTN_MODE, !!(b9 & BIT(5)));
+ input_event(input, EV_KEY, BTN_START, !!(b9 & BIT(6)));
+ input_event(input, EV_KEY, BTN_TRIGGER_HAPPY3, !!(b9 & BIT(7)));
+ input_event(input, EV_KEY, BTN_TRIGGER_HAPPY4, !!(b10 & BIT(0)));
+ input_event(input, EV_KEY, BTN_THUMBL, !!(b10 & BIT(6)));
+ input_event(input, EV_KEY, BTN_THUMBR, !!(b11 & BIT(2)));
+ input_event(input, EV_KEY, BTN_DPAD_UP, !!(b9 & BIT(0)));
+ input_event(input, EV_KEY, BTN_DPAD_RIGHT, !!(b9 & BIT(1)));
+ input_event(input, EV_KEY, BTN_DPAD_LEFT, !!(b9 & BIT(2)));
+ input_event(input, EV_KEY, BTN_DPAD_DOWN, !!(b9 & BIT(3)));
+ input_event(input, EV_KEY, BTN_THUMB, !!(b10 & BIT(1)));
+ input_event(input, EV_KEY, BTN_THUMB2, !!(b10 & BIT(2)));
+ input_event(input, EV_KEY, BTN_TRIGGER_HAPPY1, !!(b13 & BIT(1)));
+ input_event(input, EV_KEY, BTN_TRIGGER_HAPPY2, !!(b13 & BIT(2)));
+ input_event(input, EV_KEY, BTN_BASE, !!(b14 & BIT(2)));
+
+ input_sync(input);
+}
+
+/*
* The size for this message payload is 11.
* The known values are:
* Offset| Type | Meaning
@@ -1052,6 +1368,7 @@ static int steam_raw_event(struct hid_device *hdev,
* 0x01: input data (60 bytes)
* 0x03: wireless connect/disconnect (1 byte)
* 0x04: battery status (11 bytes)
+ * 0x09: Steam Deck input data (56 bytes)
*/
if (size != 64 || data[0] != 1 || data[1] != 0)
@@ -1067,6 +1384,15 @@ static int steam_raw_event(struct hid_device *hdev,
steam_do_input_event(steam, input, data);
rcu_read_unlock();
break;
+ case STEAM_EV_DECK_INPUT_DATA:
+ if (steam->client_opened)
+ return 0;
+ rcu_read_lock();
+ input = rcu_dereference(steam->input);
+ if (likely(input))
+ steam_do_deck_input_event(steam, input, data);
+ rcu_read_unlock();
+ break;
case STEAM_EV_CONNECT:
/*
* The payload of this event is a single byte:
@@ -1141,6 +1467,11 @@ static const struct hid_device_id steam_controllers[] = {
USB_DEVICE_ID_STEAM_CONTROLLER_WIRELESS),
.driver_data = STEAM_QUIRK_WIRELESS
},
+ { /* Steam Deck */
+ HID_USB_DEVICE(USB_VENDOR_ID_VALVE,
+ USB_DEVICE_ID_STEAM_DECK),
+ .driver_data = STEAM_QUIRK_DECK
+ },
{}
};
diff --git a/drivers/hid/hid-uclogic-core-test.c b/drivers/hid/hid-uclogic-core-test.c
new file mode 100644
index 000000000000..2bb916226a38
--- /dev/null
+++ b/drivers/hid/hid-uclogic-core-test.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * HID driver for UC-Logic devices not fully compliant with HID standard
+ *
+ * Copyright (c) 2022 José Expósito <jose.exposito89@gmail.com>
+ */
+
+#include <kunit/test.h>
+#include "./hid-uclogic-params.h"
+
+#define MAX_EVENT_SIZE 12
+
+struct uclogic_raw_event_hook_test {
+ u8 event[MAX_EVENT_SIZE];
+ size_t size;
+ bool expected;
+};
+
+static struct uclogic_raw_event_hook_test hook_events[] = {
+ {
+ .event = { 0xA1, 0xB2, 0xC3, 0xD4 },
+ .size = 4,
+ },
+ {
+ .event = { 0x1F, 0x2E, 0x3D, 0x4C, 0x5B, 0x6A },
+ .size = 6,
+ },
+};
+
+static struct uclogic_raw_event_hook_test test_events[] = {
+ {
+ .event = { 0xA1, 0xB2, 0xC3, 0xD4 },
+ .size = 4,
+ .expected = true,
+ },
+ {
+ .event = { 0x1F, 0x2E, 0x3D, 0x4C, 0x5B, 0x6A },
+ .size = 6,
+ .expected = true,
+ },
+ {
+ .event = { 0xA1, 0xB2, 0xC3 },
+ .size = 3,
+ .expected = false,
+ },
+ {
+ .event = { 0xA1, 0xB2, 0xC3, 0xD4, 0x00 },
+ .size = 5,
+ .expected = false,
+ },
+ {
+ .event = { 0x2E, 0x3D, 0x4C, 0x5B, 0x6A, 0x1F },
+ .size = 6,
+ .expected = false,
+ },
+};
+
+static void hid_test_uclogic_exec_event_hook_test(struct kunit *test)
+{
+ struct uclogic_params p = {0, };
+ struct uclogic_raw_event_hook *filter;
+ bool res;
+ int n;
+
+ /* Initialize the list of events to hook */
+ p.event_hooks = kunit_kzalloc(test, sizeof(*p.event_hooks), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, p.event_hooks);
+ INIT_LIST_HEAD(&p.event_hooks->list);
+
+ for (n = 0; n < ARRAY_SIZE(hook_events); n++) {
+ filter = kunit_kzalloc(test, sizeof(*filter), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filter);
+
+ filter->size = hook_events[n].size;
+ filter->event = kunit_kzalloc(test, filter->size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filter->event);
+ memcpy(filter->event, &hook_events[n].event[0], filter->size);
+
+ list_add_tail(&filter->list, &p.event_hooks->list);
+ }
+
+ /* Test uclogic_exec_event_hook() */
+ for (n = 0; n < ARRAY_SIZE(test_events); n++) {
+ res = uclogic_exec_event_hook(&p, &test_events[n].event[0],
+ test_events[n].size);
+ KUNIT_ASSERT_EQ(test, res, test_events[n].expected);
+ }
+}
+
+static struct kunit_case hid_uclogic_core_test_cases[] = {
+ KUNIT_CASE(hid_test_uclogic_exec_event_hook_test),
+ {}
+};
+
+static struct kunit_suite hid_uclogic_core_test_suite = {
+ .name = "hid_uclogic_core_test",
+ .test_cases = hid_uclogic_core_test_cases,
+};
+
+kunit_test_suite(hid_uclogic_core_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for the UC-Logic driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("José Expósito <jose.exposito89@gmail.com>");
diff --git a/drivers/hid/hid-uclogic-core.c b/drivers/hid/hid-uclogic-core.c
index cfbbc39807a6..f67835f9ed4c 100644
--- a/drivers/hid/hid-uclogic-core.c
+++ b/drivers/hid/hid-uclogic-core.c
@@ -22,25 +22,6 @@
#include "hid-ids.h"
-/* Driver data */
-struct uclogic_drvdata {
- /* Interface parameters */
- struct uclogic_params params;
- /* Pointer to the replacement report descriptor. NULL if none. */
- __u8 *desc_ptr;
- /*
- * Size of the replacement report descriptor.
- * Only valid if desc_ptr is not NULL
- */
- unsigned int desc_size;
- /* Pen input device */
- struct input_dev *pen_input;
- /* In-range timer */
- struct timer_list inrange_timer;
- /* Last rotary encoder state, or U8_MAX for none */
- u8 re_state;
-};
-
/**
* uclogic_inrange_timeout - handle pen in-range state timeout.
* Emulate input events normally generated when pen goes out of range for
@@ -202,6 +183,7 @@ static int uclogic_probe(struct hid_device *hdev,
}
timer_setup(&drvdata->inrange_timer, uclogic_inrange_timeout, 0);
drvdata->re_state = U8_MAX;
+ drvdata->quirks = id->driver_data;
hid_set_drvdata(hdev, drvdata);
/* Initialize the device and retrieve interface parameters */
@@ -268,6 +250,34 @@ static int uclogic_resume(struct hid_device *hdev)
#endif
/**
+ * uclogic_exec_event_hook - if the received event is hooked schedules the
+ * associated work.
+ *
+ * @p: Tablet interface report parameters.
+ * @event: Raw event.
+ * @size: The size of event.
+ *
+ * Returns:
+ * Whether the event was hooked or not.
+ */
+static bool uclogic_exec_event_hook(struct uclogic_params *p, u8 *event, int size)
+{
+ struct uclogic_raw_event_hook *curr;
+
+ if (!p->event_hooks)
+ return false;
+
+ list_for_each_entry(curr, &p->event_hooks->list, list) {
+ if (curr->size == size && memcmp(curr->event, event, size) == 0) {
+ schedule_work(&curr->work);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
* uclogic_raw_event_pen - handle raw pen events (pen HID reports).
*
* @drvdata: Driver data.
@@ -425,6 +435,9 @@ static int uclogic_raw_event(struct hid_device *hdev,
if (report->type != HID_INPUT_REPORT)
return 0;
+ if (uclogic_exec_event_hook(params, data, size))
+ return 0;
+
while (true) {
/* Tweak pen reports, if necessary */
if ((report_id == params->pen.id) && (size >= 2)) {
@@ -530,8 +543,14 @@ static const struct hid_device_id uclogic_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW),
+ .driver_data = UCLOGIC_MOUSE_FRAME_QUIRK | UCLOGIC_BATTERY_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW),
+ .driver_data = UCLOGIC_MOUSE_FRAME_QUIRK | UCLOGIC_BATTERY_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06) },
{ }
};
@@ -556,3 +575,7 @@ module_hid_driver(uclogic_driver);
MODULE_AUTHOR("Martin Rusko");
MODULE_AUTHOR("Nikolai Kondrashov");
MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_HID_KUNIT_TEST
+#include "hid-uclogic-core-test.c"
+#endif
diff --git a/drivers/hid/hid-uclogic-params-test.c b/drivers/hid/hid-uclogic-params-test.c
index bfa7ccb7d1e8..678f50cbb160 100644
--- a/drivers/hid/hid-uclogic-params-test.c
+++ b/drivers/hid/hid-uclogic-params-test.c
@@ -174,9 +174,25 @@ static void hid_test_uclogic_parse_ugee_v2_desc(struct kunit *test)
KUNIT_EXPECT_EQ(test, params->frame_type, frame_type);
}
+static void hid_test_uclogic_params_cleanup_event_hooks(struct kunit *test)
+{
+ int res, n;
+ struct uclogic_params p = {0, };
+
+ res = uclogic_params_ugee_v2_init_event_hooks(NULL, &p);
+ KUNIT_ASSERT_EQ(test, res, 0);
+
+ /* Check that the function can be called repeatedly */
+ for (n = 0; n < 4; n++) {
+ uclogic_params_cleanup_event_hooks(&p);
+ KUNIT_EXPECT_PTR_EQ(test, p.event_hooks, NULL);
+ }
+}
+
static struct kunit_case hid_uclogic_params_test_cases[] = {
KUNIT_CASE_PARAM(hid_test_uclogic_parse_ugee_v2_desc,
uclogic_parse_ugee_v2_desc_gen_params),
+ KUNIT_CASE(hid_test_uclogic_params_cleanup_event_hooks),
{}
};
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 3c5eea3df328..9859dad36495 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -616,6 +616,31 @@ cleanup:
}
/**
+ * uclogic_params_cleanup_event_hooks - free resources used by the list of raw
+ * event hooks.
+ * Can be called repeatedly.
+ *
+ * @params: Input parameters to cleanup. Cannot be NULL.
+ */
+static void uclogic_params_cleanup_event_hooks(struct uclogic_params *params)
+{
+ struct uclogic_raw_event_hook *curr, *n;
+
+ if (!params || !params->event_hooks)
+ return;
+
+ list_for_each_entry_safe(curr, n, &params->event_hooks->list, list) {
+ cancel_work_sync(&curr->work);
+ list_del(&curr->list);
+ kfree(curr->event);
+ kfree(curr);
+ }
+
+ kfree(params->event_hooks);
+ params->event_hooks = NULL;
+}
+
+/**
* uclogic_params_cleanup - free resources used by struct uclogic_params
* (tablet interface's parameters).
* Can be called repeatedly.
@@ -631,6 +656,7 @@ void uclogic_params_cleanup(struct uclogic_params *params)
for (i = 0; i < ARRAY_SIZE(params->frame_list); i++)
uclogic_params_frame_cleanup(&params->frame_list[i]);
+ uclogic_params_cleanup_event_hooks(params);
memset(params, 0, sizeof(*params));
}
}
@@ -1021,8 +1047,8 @@ cleanup:
* Returns:
* Zero, if successful. A negative errno code on error.
*/
-static int uclogic_probe_interface(struct hid_device *hdev, u8 *magic_arr,
- int magic_size, int endpoint)
+static int uclogic_probe_interface(struct hid_device *hdev, const u8 *magic_arr,
+ size_t magic_size, int endpoint)
{
struct usb_device *udev;
unsigned int pipe = 0;
@@ -1222,6 +1248,11 @@ static int uclogic_params_ugee_v2_init_frame_mouse(struct uclogic_params *p)
*/
static bool uclogic_params_ugee_v2_has_battery(struct hid_device *hdev)
{
+ struct uclogic_drvdata *drvdata = hid_get_drvdata(hdev);
+
+ if (drvdata->quirks & UCLOGIC_BATTERY_QUIRK)
+ return true;
+
/* The XP-PEN Deco LW vendor, product and version are identical to the
* Deco L. The only difference reported by their firmware is the product
* name. Add a quirk to support battery reporting on the wireless
@@ -1276,6 +1307,72 @@ static int uclogic_params_ugee_v2_init_battery(struct hid_device *hdev,
}
/**
+ * uclogic_params_ugee_v2_reconnect_work() - When a wireless tablet looses
+ * connection to the USB dongle and reconnects, either because of its physical
+ * distance or because it was switches off and on using the frame's switch,
+ * uclogic_probe_interface() needs to be called again to enable the tablet.
+ *
+ * @work: The work that triggered this function.
+ */
+static void uclogic_params_ugee_v2_reconnect_work(struct work_struct *work)
+{
+ struct uclogic_raw_event_hook *event_hook;
+
+ event_hook = container_of(work, struct uclogic_raw_event_hook, work);
+ uclogic_probe_interface(event_hook->hdev, uclogic_ugee_v2_probe_arr,
+ uclogic_ugee_v2_probe_size,
+ uclogic_ugee_v2_probe_endpoint);
+}
+
+/**
+ * uclogic_params_ugee_v2_init_event_hooks() - initialize the list of events
+ * to be hooked for UGEE v2 devices.
+ * @hdev: The HID device of the tablet interface to initialize and get
+ * parameters from.
+ * @p: Parameters to fill in, cannot be NULL.
+ *
+ * Returns:
+ * Zero, if successful. A negative errno code on error.
+ */
+static int uclogic_params_ugee_v2_init_event_hooks(struct hid_device *hdev,
+ struct uclogic_params *p)
+{
+ struct uclogic_raw_event_hook *event_hook;
+ __u8 reconnect_event[] = {
+ /* Event received on wireless tablet reconnection */
+ 0x02, 0xF8, 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ };
+
+ if (!p)
+ return -EINVAL;
+
+ /* The reconnection event is only received if the tablet has battery */
+ if (!uclogic_params_ugee_v2_has_battery(hdev))
+ return 0;
+
+ p->event_hooks = kzalloc(sizeof(*p->event_hooks), GFP_KERNEL);
+ if (!p->event_hooks)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&p->event_hooks->list);
+
+ event_hook = kzalloc(sizeof(*event_hook), GFP_KERNEL);
+ if (!event_hook)
+ return -ENOMEM;
+
+ INIT_WORK(&event_hook->work, uclogic_params_ugee_v2_reconnect_work);
+ event_hook->hdev = hdev;
+ event_hook->size = ARRAY_SIZE(reconnect_event);
+ event_hook->event = kmemdup(reconnect_event, event_hook->size, GFP_KERNEL);
+ if (!event_hook->event)
+ return -ENOMEM;
+
+ list_add_tail(&event_hook->list, &p->event_hooks->list);
+
+ return 0;
+}
+
+/**
* uclogic_params_ugee_v2_init() - initialize a UGEE graphics tablets by
* discovering their parameters.
*
@@ -1298,6 +1395,7 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
struct hid_device *hdev)
{
int rc = 0;
+ struct uclogic_drvdata *drvdata;
struct usb_interface *iface;
__u8 bInterfaceNumber;
const int str_desc_len = 12;
@@ -1305,9 +1403,6 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
__u8 *rdesc_pen = NULL;
s32 desc_params[UCLOGIC_RDESC_PH_ID_NUM];
enum uclogic_params_frame_type frame_type;
- __u8 magic_arr[] = {
- 0x02, 0xb0, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
- };
/* The resulting parameters (noop) */
struct uclogic_params p = {0, };
@@ -1316,6 +1411,7 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
goto cleanup;
}
+ drvdata = hid_get_drvdata(hdev);
iface = to_usb_interface(hdev->dev.parent);
bInterfaceNumber = iface->cur_altsetting->desc.bInterfaceNumber;
@@ -1337,7 +1433,9 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
* The specific data was discovered by sniffing the Windows driver
* traffic.
*/
- rc = uclogic_probe_interface(hdev, magic_arr, sizeof(magic_arr), 0x03);
+ rc = uclogic_probe_interface(hdev, uclogic_ugee_v2_probe_arr,
+ uclogic_ugee_v2_probe_size,
+ uclogic_ugee_v2_probe_endpoint);
if (rc) {
uclogic_params_init_invalid(&p);
goto output;
@@ -1382,6 +1480,9 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
p.pen.subreport_list[0].id = UCLOGIC_RDESC_V1_FRAME_ID;
/* Initialize the frame interface */
+ if (drvdata->quirks & UCLOGIC_MOUSE_FRAME_QUIRK)
+ frame_type = UCLOGIC_PARAMS_FRAME_MOUSE;
+
switch (frame_type) {
case UCLOGIC_PARAMS_FRAME_DIAL:
case UCLOGIC_PARAMS_FRAME_MOUSE:
@@ -1407,6 +1508,13 @@ static int uclogic_params_ugee_v2_init(struct uclogic_params *params,
}
}
+ /* Create a list of raw events to be ignored */
+ rc = uclogic_params_ugee_v2_init_event_hooks(hdev, &p);
+ if (rc) {
+ hid_err(hdev, "error initializing event hook list: %d\n", rc);
+ goto cleanup;
+ }
+
output:
/* Output parameters */
memcpy(params, &p, sizeof(*params));
@@ -1660,7 +1768,11 @@ int uclogic_params_init(struct uclogic_params *params,
case VID_PID(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L):
case VID_PID(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW):
+ case VID_PID(USB_VENDOR_ID_UGEE,
USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S):
+ case VID_PID(USB_VENDOR_ID_UGEE,
+ USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_SW):
rc = uclogic_params_ugee_v2_init(&p, hdev);
if (rc != 0)
goto cleanup;
diff --git a/drivers/hid/hid-uclogic-params.h b/drivers/hid/hid-uclogic-params.h
index a97477c02ff8..d6ffadb2f601 100644
--- a/drivers/hid/hid-uclogic-params.h
+++ b/drivers/hid/hid-uclogic-params.h
@@ -18,6 +18,10 @@
#include <linux/usb.h>
#include <linux/hid.h>
+#include <linux/list.h>
+
+#define UCLOGIC_MOUSE_FRAME_QUIRK BIT(0)
+#define UCLOGIC_BATTERY_QUIRK BIT(1)
/* Types of pen in-range reporting */
enum uclogic_params_pen_inrange {
@@ -174,6 +178,17 @@ struct uclogic_params_frame {
};
/*
+ * List of works to be performed when a certain raw event is received.
+ */
+struct uclogic_raw_event_hook {
+ struct hid_device *hdev;
+ __u8 *event;
+ size_t size;
+ struct work_struct work;
+ struct list_head list;
+};
+
+/*
* Tablet interface report parameters.
*
* Must use declarative (descriptive) language, not imperative, to simplify
@@ -213,6 +228,31 @@ struct uclogic_params {
* parts. Only valid, if "invalid" is false.
*/
struct uclogic_params_frame frame_list[3];
+ /*
+ * List of event hooks.
+ */
+ struct uclogic_raw_event_hook *event_hooks;
+};
+
+/* Driver data */
+struct uclogic_drvdata {
+ /* Interface parameters */
+ struct uclogic_params params;
+ /* Pointer to the replacement report descriptor. NULL if none. */
+ __u8 *desc_ptr;
+ /*
+ * Size of the replacement report descriptor.
+ * Only valid if desc_ptr is not NULL
+ */
+ unsigned int desc_size;
+ /* Pen input device */
+ struct input_dev *pen_input;
+ /* In-range timer */
+ struct timer_list inrange_timer;
+ /* Last rotary encoder state, or U8_MAX for none */
+ u8 re_state;
+ /* Device quirks */
+ unsigned long quirks;
};
/* Initialize a tablet interface and discover its parameters */
diff --git a/drivers/hid/hid-uclogic-rdesc-test.c b/drivers/hid/hid-uclogic-rdesc-test.c
index b429c541bf2f..90bf4e586e01 100644
--- a/drivers/hid/hid-uclogic-rdesc-test.c
+++ b/drivers/hid/hid-uclogic-rdesc-test.c
@@ -197,8 +197,7 @@ static void hid_test_uclogic_template(struct kunit *test)
params->param_list,
params->param_num);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, res);
- KUNIT_EXPECT_EQ(test, 0,
- memcmp(res, params->expected, params->template_size));
+ KUNIT_EXPECT_MEMEQ(test, res, params->expected, params->template_size);
kfree(res);
}
diff --git a/drivers/hid/hid-uclogic-rdesc.c b/drivers/hid/hid-uclogic-rdesc.c
index fb40775f5f5b..b6dfdf6356a6 100644
--- a/drivers/hid/hid-uclogic-rdesc.c
+++ b/drivers/hid/hid-uclogic-rdesc.c
@@ -859,6 +859,12 @@ const __u8 uclogic_rdesc_v2_frame_dial_arr[] = {
const size_t uclogic_rdesc_v2_frame_dial_size =
sizeof(uclogic_rdesc_v2_frame_dial_arr);
+const __u8 uclogic_ugee_v2_probe_arr[] = {
+ 0x02, 0xb0, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+const size_t uclogic_ugee_v2_probe_size = sizeof(uclogic_ugee_v2_probe_arr);
+const int uclogic_ugee_v2_probe_endpoint = 0x03;
+
/* Fixed report descriptor template for UGEE v2 pen reports */
const __u8 uclogic_rdesc_ugee_v2_pen_template_arr[] = {
0x05, 0x0d, /* Usage Page (Digitizers), */
diff --git a/drivers/hid/hid-uclogic-rdesc.h b/drivers/hid/hid-uclogic-rdesc.h
index a1f78c07293f..906d068f50a9 100644
--- a/drivers/hid/hid-uclogic-rdesc.h
+++ b/drivers/hid/hid-uclogic-rdesc.h
@@ -164,6 +164,11 @@ extern const size_t uclogic_rdesc_v2_frame_dial_size;
/* Report ID for tweaked UGEE v2 battery reports */
#define UCLOGIC_RDESC_UGEE_V2_BATTERY_ID 0xba
+/* Magic data expected by UGEEv2 devices on probe */
+extern const __u8 uclogic_ugee_v2_probe_arr[];
+extern const size_t uclogic_ugee_v2_probe_size;
+extern const int uclogic_ugee_v2_probe_endpoint;
+
/* Fixed report descriptor template for UGEE v2 pen reports */
extern const __u8 uclogic_rdesc_ugee_v2_pen_template_arr[];
extern const size_t uclogic_rdesc_ugee_v2_pen_template_size;
diff --git a/drivers/hid/i2c-hid/Kconfig b/drivers/hid/i2c-hid/Kconfig
index d65abe65ce73..4439be7fa74d 100644
--- a/drivers/hid/i2c-hid/Kconfig
+++ b/drivers/hid/i2c-hid/Kconfig
@@ -1,11 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
-menu "I2C HID support"
- depends on I2C
+menuconfig I2C_HID
+ tristate "I2C HID support"
+ default y
+ depends on I2C && INPUT && HID
+
+if I2C_HID
config I2C_HID_ACPI
tristate "HID over I2C transport layer ACPI driver"
- default n
- depends on I2C && INPUT && ACPI
+ depends on ACPI
+ select I2C_HID_CORE
help
Say Y here if you use a keyboard, a touchpad, a touchscreen, or any
other HID based devices which is connected to your computer via I2C.
@@ -19,8 +23,8 @@ config I2C_HID_ACPI
config I2C_HID_OF
tristate "HID over I2C transport layer Open Firmware driver"
- default n
- depends on I2C && INPUT && OF
+ depends on OF
+ select I2C_HID_CORE
help
Say Y here if you use a keyboard, a touchpad, a touchscreen, or any
other HID based devices which is connected to your computer via I2C.
@@ -34,8 +38,8 @@ config I2C_HID_OF
config I2C_HID_OF_ELAN
tristate "Driver for Elan hid-i2c based devices on OF systems"
- default n
- depends on I2C && INPUT && OF
+ depends on OF
+ select I2C_HID_CORE
help
Say Y here if you want support for Elan i2c devices that use
the i2c-hid protocol on Open Firmware (Device Tree)-based
@@ -49,8 +53,8 @@ config I2C_HID_OF_ELAN
config I2C_HID_OF_GOODIX
tristate "Driver for Goodix hid-i2c based devices on OF systems"
- default n
- depends on I2C && INPUT && OF
+ depends on OF
+ select I2C_HID_CORE
help
Say Y here if you want support for Goodix i2c devices that use
the i2c-hid protocol on Open Firmware (Device Tree)-based
@@ -62,10 +66,7 @@ config I2C_HID_OF_GOODIX
will be called i2c-hid-of-goodix. It will also build/depend on
the module i2c-hid.
-endmenu
-
config I2C_HID_CORE
tristate
- default y if I2C_HID_ACPI=y || I2C_HID_OF=y || I2C_HID_OF_ELAN=y || I2C_HID_OF_GOODIX=y
- default m if I2C_HID_ACPI=m || I2C_HID_OF=m || I2C_HID_OF_ELAN=m || I2C_HID_OF_GOODIX=m
- select HID
+endif
+
diff --git a/drivers/hid/i2c-hid/i2c-hid-acpi.c b/drivers/hid/i2c-hid/i2c-hid-acpi.c
index 375c77c3db74..3a7e2bcb5412 100644
--- a/drivers/hid/i2c-hid/i2c-hid-acpi.c
+++ b/drivers/hid/i2c-hid/i2c-hid-acpi.c
@@ -39,8 +39,8 @@ static const struct acpi_device_id i2c_hid_acpi_blacklist[] = {
* The CHPN0001 ACPI device, which is used to describe the Chipone
* ICN8505 controller, has a _CID of PNP0C50 but is not HID compatible.
*/
- {"CHPN0001", 0 },
- { },
+ { "CHPN0001" },
+ { }
};
/* HID I²C Device: 3cdff6f7-4267-4555-ad05-b30a3d8938de */
@@ -48,8 +48,9 @@ static guid_t i2c_hid_guid =
GUID_INIT(0x3CDFF6F7, 0x4267, 0x4555,
0xAD, 0x05, 0xB3, 0x0A, 0x3D, 0x89, 0x38, 0xDE);
-static int i2c_hid_acpi_get_descriptor(struct acpi_device *adev)
+static int i2c_hid_acpi_get_descriptor(struct i2c_hid_acpi *ihid_acpi)
{
+ struct acpi_device *adev = ihid_acpi->adev;
acpi_handle handle = acpi_device_handle(adev);
union acpi_object *obj;
u16 hid_descriptor_address;
@@ -81,38 +82,31 @@ static int i2c_hid_acpi_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct i2c_hid_acpi *ihid_acpi;
- struct acpi_device *adev;
u16 hid_descriptor_address;
int ret;
- adev = ACPI_COMPANION(dev);
- if (!adev) {
- dev_err(&client->dev, "Error could not get ACPI device\n");
- return -ENODEV;
- }
-
ihid_acpi = devm_kzalloc(&client->dev, sizeof(*ihid_acpi), GFP_KERNEL);
if (!ihid_acpi)
return -ENOMEM;
- ihid_acpi->adev = adev;
+ ihid_acpi->adev = ACPI_COMPANION(dev);
ihid_acpi->ops.shutdown_tail = i2c_hid_acpi_shutdown_tail;
- ret = i2c_hid_acpi_get_descriptor(adev);
+ ret = i2c_hid_acpi_get_descriptor(ihid_acpi);
if (ret < 0)
return ret;
hid_descriptor_address = ret;
- acpi_device_fix_up_power(adev);
+ acpi_device_fix_up_power(ihid_acpi->adev);
return i2c_hid_core_probe(client, &ihid_acpi->ops,
hid_descriptor_address, 0);
}
static const struct acpi_device_id i2c_hid_acpi_match[] = {
- {"ACPI0C50", 0 },
- {"PNP0C50", 0 },
- { },
+ { "ACPI0C50" },
+ { "PNP0C50" },
+ { }
};
MODULE_DEVICE_TABLE(acpi, i2c_hid_acpi_match);
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index b86b62f97108..efbba0465eef 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -67,16 +67,7 @@
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
-/* debug option */
-static bool debug;
-module_param(debug, bool, 0444);
-MODULE_PARM_DESC(debug, "print a lot of debug information");
-
-#define i2c_hid_dbg(ihid, fmt, arg...) \
-do { \
- if (debug) \
- dev_printk(KERN_DEBUG, &(ihid)->client->dev, fmt, ##arg); \
-} while (0)
+#define i2c_hid_dbg(ihid, ...) dev_dbg(&(ihid)->client->dev, __VA_ARGS__)
struct i2c_hid_desc {
__le16 wHIDDescLength;
@@ -842,7 +833,7 @@ static void i2c_hid_close(struct hid_device *hid)
clear_bit(I2C_HID_STARTED, &ihid->flags);
}
-struct hid_ll_driver i2c_hid_ll_driver = {
+static const struct hid_ll_driver i2c_hid_ll_driver = {
.parse = i2c_hid_parse,
.start = i2c_hid_start,
.stop = i2c_hid_stop,
@@ -851,7 +842,6 @@ struct hid_ll_driver i2c_hid_ll_driver = {
.output_report = i2c_hid_output_report,
.raw_request = i2c_hid_raw_request,
};
-EXPORT_SYMBOL_GPL(i2c_hid_ll_driver);
static int i2c_hid_init_irq(struct i2c_client *client)
{
@@ -859,7 +849,7 @@ static int i2c_hid_init_irq(struct i2c_client *client)
unsigned long irqflags = 0;
int ret;
- dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
+ i2c_hid_dbg(ihid, "Requesting IRQ: %d\n", client->irq);
if (!irq_get_trigger_type(client->irq))
irqflags = IRQF_TRIGGER_LOW;
@@ -1003,7 +993,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
/* Make sure there is something at this address */
ret = i2c_smbus_read_byte(client);
if (ret < 0) {
- dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
+ i2c_hid_dbg(ihid, "nothing at this address: %d\n", ret);
ret = -ENXIO;
goto err_powered;
}
@@ -1035,6 +1025,10 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
hid->product = le16_to_cpu(ihid->hdesc.wProductID);
+ hid->initial_quirks = quirks;
+ hid->initial_quirks |= i2c_hid_get_dmi_quirks(hid->vendor,
+ hid->product);
+
snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
client->name, (u16)hid->vendor, (u16)hid->product);
strscpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
@@ -1048,8 +1042,6 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
goto err_mem_free;
}
- hid->quirks |= quirks;
-
return 0;
err_mem_free:
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index 8e0f67455c09..210f17c3a0be 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -10,8 +10,10 @@
#include <linux/types.h>
#include <linux/dmi.h>
#include <linux/mod_devicetable.h>
+#include <linux/hid.h>
#include "i2c-hid.h"
+#include "../hid-ids.h"
struct i2c_hid_desc_override {
@@ -416,6 +418,28 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
{ } /* Terminate list */
};
+static const struct hid_device_id i2c_hid_elan_flipped_quirks = {
+ HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_ELAN, 0x2dcd),
+ HID_QUIRK_X_INVERT | HID_QUIRK_Y_INVERT
+};
+
+/*
+ * This list contains devices which have specific issues based on the system
+ * they're on and not just the device itself. The driver_data will have a
+ * specific hid device to match against.
+ */
+static const struct dmi_system_id i2c_hid_dmi_quirk_table[] = {
+ {
+ .ident = "DynaBook K50/FR",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dynabook Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "dynabook K50/FR"),
+ },
+ .driver_data = (void *)&i2c_hid_elan_flipped_quirks,
+ },
+ { } /* Terminate list */
+};
+
struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
{
@@ -450,3 +474,21 @@ char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
*size = override->hid_report_desc_size;
return override->hid_report_desc;
}
+
+u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product)
+{
+ u32 quirks = 0;
+ const struct dmi_system_id *system_id =
+ dmi_first_match(i2c_hid_dmi_quirk_table);
+
+ if (system_id) {
+ const struct hid_device_id *device_id =
+ (struct hid_device_id *)(system_id->driver_data);
+
+ if (device_id && device_id->vendor == vendor &&
+ device_id->product == product)
+ quirks = device_id->driver_data;
+ }
+
+ return quirks;
+}
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
index 29c6cb174032..0060e3dcd775 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
@@ -26,28 +26,33 @@ struct i2c_hid_of_goodix {
struct i2chid_ops ops;
struct regulator *vdd;
- struct notifier_block nb;
+ struct regulator *vddio;
struct gpio_desc *reset_gpio;
const struct goodix_i2c_hid_timing_data *timings;
};
-static void goodix_i2c_hid_deassert_reset(struct i2c_hid_of_goodix *ihid_goodix,
- bool regulator_just_turned_on)
+static int goodix_i2c_hid_power_up(struct i2chid_ops *ops)
{
- if (regulator_just_turned_on && ihid_goodix->timings->post_power_delay_ms)
+ struct i2c_hid_of_goodix *ihid_goodix =
+ container_of(ops, struct i2c_hid_of_goodix, ops);
+ int ret;
+
+ ret = regulator_enable(ihid_goodix->vdd);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(ihid_goodix->vddio);
+ if (ret)
+ return ret;
+
+ if (ihid_goodix->timings->post_power_delay_ms)
msleep(ihid_goodix->timings->post_power_delay_ms);
gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 0);
if (ihid_goodix->timings->post_gpio_reset_delay_ms)
msleep(ihid_goodix->timings->post_gpio_reset_delay_ms);
-}
-
-static int goodix_i2c_hid_power_up(struct i2chid_ops *ops)
-{
- struct i2c_hid_of_goodix *ihid_goodix =
- container_of(ops, struct i2c_hid_of_goodix, ops);
- return regulator_enable(ihid_goodix->vdd);
+ return 0;
}
static void goodix_i2c_hid_power_down(struct i2chid_ops *ops)
@@ -55,42 +60,15 @@ static void goodix_i2c_hid_power_down(struct i2chid_ops *ops)
struct i2c_hid_of_goodix *ihid_goodix =
container_of(ops, struct i2c_hid_of_goodix, ops);
+ gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
+ regulator_disable(ihid_goodix->vddio);
regulator_disable(ihid_goodix->vdd);
}
-static int ihid_goodix_vdd_notify(struct notifier_block *nb,
- unsigned long event,
- void *ignored)
-{
- struct i2c_hid_of_goodix *ihid_goodix =
- container_of(nb, struct i2c_hid_of_goodix, nb);
- int ret = NOTIFY_OK;
-
- switch (event) {
- case REGULATOR_EVENT_PRE_DISABLE:
- gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
- break;
-
- case REGULATOR_EVENT_ENABLE:
- goodix_i2c_hid_deassert_reset(ihid_goodix, true);
- break;
-
- case REGULATOR_EVENT_ABORT_DISABLE:
- goodix_i2c_hid_deassert_reset(ihid_goodix, false);
- break;
-
- default:
- ret = NOTIFY_DONE;
- break;
- }
-
- return ret;
-}
-
static int i2c_hid_of_goodix_probe(struct i2c_client *client)
{
struct i2c_hid_of_goodix *ihid_goodix;
- int ret;
+
ihid_goodix = devm_kzalloc(&client->dev, sizeof(*ihid_goodix),
GFP_KERNEL);
if (!ihid_goodix)
@@ -109,41 +87,11 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client)
if (IS_ERR(ihid_goodix->vdd))
return PTR_ERR(ihid_goodix->vdd);
- ihid_goodix->timings = device_get_match_data(&client->dev);
+ ihid_goodix->vddio = devm_regulator_get(&client->dev, "mainboard-vddio");
+ if (IS_ERR(ihid_goodix->vddio))
+ return PTR_ERR(ihid_goodix->vddio);
- /*
- * We need to control the "reset" line in lockstep with the regulator
- * actually turning on an off instead of just when we make the request.
- * This matters if the regulator is shared with another consumer.
- * - If the regulator is off then we must assert reset. The reset
- * line is active low and on some boards it could cause a current
- * leak if left high.
- * - If the regulator is on then we don't want reset asserted for very
- * long. Holding the controller in reset apparently draws extra
- * power.
- */
- ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
- ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
- if (ret)
- return dev_err_probe(&client->dev, ret,
- "regulator notifier request failed\n");
-
- /*
- * If someone else is holding the regulator on (or the regulator is
- * an always-on one) we might never be told to deassert reset. Do it
- * now... and temporarily bump the regulator reference count just to
- * make sure it is impossible for this to race with our own notifier!
- * We also assume that someone else might have _just barely_ turned
- * the regulator on so we'll do the full "post_power_delay" just in
- * case.
- */
- if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) {
- ret = regulator_enable(ihid_goodix->vdd);
- if (ret)
- return ret;
- goodix_i2c_hid_deassert_reset(ihid_goodix, true);
- regulator_disable(ihid_goodix->vdd);
- }
+ ihid_goodix->timings = device_get_match_data(&client->dev);
return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
}
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
index 96c75510ad3f..2c7b66d5caa0 100644
--- a/drivers/hid/i2c-hid/i2c-hid.h
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -9,6 +9,7 @@
struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
unsigned int *size);
+u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product);
#else
static inline struct i2c_hid_desc
*i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
@@ -16,6 +17,8 @@ static inline struct i2c_hid_desc
static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
unsigned int *size)
{ return NULL; }
+static inline u32 i2c_hid_get_dmi_quirks(const u16 vendor, const u16 product)
+{ return 0; }
#endif
/**
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
index 689da84a520d..253dc10d35ef 100644
--- a/drivers/hid/intel-ish-hid/Kconfig
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -6,7 +6,7 @@ config INTEL_ISH_HID
tristate "Intel Integrated Sensor Hub"
default n
depends on X86
- select HID
+ depends on HID
help
The Integrated Sensor Hub (ISH) enables the ability to offload
sensor polling and algorithm processing to a dedicated low power
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index 14c271d7d8a9..00c6f0ebf356 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -183,7 +183,7 @@ void ishtp_hid_wakeup(struct hid_device *hid)
wake_up_interruptible(&hid_data->hid_wait);
}
-static struct hid_ll_driver ishtp_hid_ll_driver = {
+static const struct hid_ll_driver ishtp_hid_ll_driver = {
.parse = ishtp_hid_parse,
.start = ishtp_hid_start,
.stop = ishtp_hid_stop,
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index f68aba8794fe..81385ab37fa9 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -361,7 +361,7 @@ static struct attribute *ishtp_cl_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(ishtp_cl_dev);
-static int ishtp_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ishtp_cl_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
if (add_uevent_var(env, "MODALIAS=" ISHTP_MODULE_PREFIX "%s", dev_name(dev)))
return -ENOMEM;
diff --git a/drivers/hid/surface-hid/surface_hid.c b/drivers/hid/surface-hid/surface_hid.c
index d4aa8c81903a..61e5814b0ad7 100644
--- a/drivers/hid/surface-hid/surface_hid.c
+++ b/drivers/hid/surface-hid/surface_hid.c
@@ -80,7 +80,7 @@ static int ssam_hid_get_descriptor(struct surface_hid_device *shid, u8 entry, u8
rsp.length = 0;
- status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp,
+ status = ssam_retry(ssam_request_do_sync_onstack, shid->ctrl, &rqst, &rsp,
sizeof(*slice));
if (status)
return status;
@@ -131,7 +131,7 @@ static int ssam_hid_set_raw_report(struct surface_hid_device *shid, u8 rprt_id,
buf[0] = rprt_id;
- return ssam_retry(ssam_request_sync, shid->ctrl, &rqst, NULL);
+ return ssam_retry(ssam_request_do_sync, shid->ctrl, &rqst, NULL);
}
static int ssam_hid_get_raw_report(struct surface_hid_device *shid, u8 rprt_id, u8 *buf, size_t len)
@@ -151,7 +151,7 @@ static int ssam_hid_get_raw_report(struct surface_hid_device *shid, u8 rprt_id,
rsp.length = 0;
rsp.pointer = buf;
- return ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(rprt_id));
+ return ssam_retry(ssam_request_do_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(rprt_id));
}
static u32 ssam_hid_event_fn(struct ssam_event_notifier *nf, const struct ssam_event *event)
@@ -230,7 +230,7 @@ static void surface_hid_remove(struct ssam_device *sdev)
}
static const struct ssam_device_id surface_hid_match[] = {
- { SSAM_SDEV(HID, SSAM_ANY_TID, SSAM_ANY_IID, 0x00) },
+ { SSAM_SDEV(HID, ANY, SSAM_SSH_IID_ANY, 0x00) },
{ },
};
MODULE_DEVICE_TABLE(ssam, surface_hid_match);
diff --git a/drivers/hid/surface-hid/surface_hid_core.c b/drivers/hid/surface-hid/surface_hid_core.c
index 87637f813de2..a3e9cceddfac 100644
--- a/drivers/hid/surface-hid/surface_hid_core.c
+++ b/drivers/hid/surface-hid/surface_hid_core.c
@@ -174,7 +174,7 @@ static int surface_hid_raw_request(struct hid_device *hid, unsigned char reportn
return -EIO;
}
-static struct hid_ll_driver surface_hid_ll_driver = {
+static const struct hid_ll_driver surface_hid_ll_driver = {
.start = surface_hid_start,
.stop = surface_hid_stop,
.open = surface_hid_open,
diff --git a/drivers/hid/surface-hid/surface_kbd.c b/drivers/hid/surface-hid/surface_kbd.c
index 0635341bc517..4fbce201db6a 100644
--- a/drivers/hid/surface-hid/surface_kbd.c
+++ b/drivers/hid/surface-hid/surface_kbd.c
@@ -49,7 +49,7 @@ static int ssam_kbd_get_descriptor(struct surface_hid_device *shid, u8 entry, u8
rsp.length = 0;
rsp.pointer = buf;
- status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(entry));
+ status = ssam_retry(ssam_request_do_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(entry));
if (status)
return status;
@@ -75,7 +75,7 @@ static int ssam_kbd_set_caps_led(struct surface_hid_device *shid, bool value)
rqst.length = sizeof(value_u8);
rqst.payload = &value_u8;
- return ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, NULL, sizeof(value_u8));
+ return ssam_retry(ssam_request_do_sync_onstack, shid->ctrl, &rqst, NULL, sizeof(value_u8));
}
static int ssam_kbd_get_feature_report(struct surface_hid_device *shid, u8 *buf, size_t len)
@@ -97,7 +97,7 @@ static int ssam_kbd_get_feature_report(struct surface_hid_device *shid, u8 *buf,
rsp.length = 0;
rsp.pointer = buf;
- status = ssam_retry(ssam_request_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(payload));
+ status = ssam_retry(ssam_request_do_sync_onstack, shid->ctrl, &rqst, &rsp, sizeof(payload));
if (status)
return status;
@@ -250,7 +250,7 @@ static int surface_kbd_probe(struct platform_device *pdev)
shid->uid.domain = SSAM_DOMAIN_SERIALHUB;
shid->uid.category = SSAM_SSH_TC_KBD;
- shid->uid.target = 2;
+ shid->uid.target = SSAM_SSH_TID_KIP;
shid->uid.instance = 0;
shid->uid.function = 0;
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 2a918aeb0af1..f161c95a1ad2 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -387,7 +387,7 @@ static int uhid_hid_output_report(struct hid_device *hid, __u8 *buf,
return uhid_hid_output_raw(hid, buf, count, HID_OUTPUT_REPORT);
}
-struct hid_ll_driver uhid_hid_driver = {
+static const struct hid_ll_driver uhid_hid_driver = {
.start = uhid_hid_start,
.stop = uhid_hid_stop,
.open = uhid_hid_open,
@@ -396,7 +396,6 @@ struct hid_ll_driver uhid_hid_driver = {
.raw_request = uhid_hid_raw_request,
.output_report = uhid_hid_output_report,
};
-EXPORT_SYMBOL_GPL(uhid_hid_driver);
#ifdef CONFIG_COMPAT
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index be4c731aaa65..257dd73e37bf 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1318,7 +1318,7 @@ static bool usbhid_may_wakeup(struct hid_device *hid)
return device_may_wakeup(&dev->dev);
}
-struct hid_ll_driver usb_hid_driver = {
+static const struct hid_ll_driver usb_hid_driver = {
.parse = usbhid_parse,
.start = usbhid_start,
.stop = usbhid_stop,
@@ -1332,7 +1332,12 @@ struct hid_ll_driver usb_hid_driver = {
.idle = usbhid_idle,
.may_wakeup = usbhid_may_wakeup,
};
-EXPORT_SYMBOL_GPL(usb_hid_driver);
+
+bool hid_is_usb(const struct hid_device *hdev)
+{
+ return hdev->ll_driver == &usb_hid_driver;
+}
+EXPORT_SYMBOL_GPL(hid_is_usb);
static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c
index 8069f795c864..daa8e1bff5d9 100644
--- a/drivers/hsi/clients/cmt_speech.c
+++ b/drivers/hsi/clients/cmt_speech.c
@@ -1264,7 +1264,7 @@ static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
if (vma_pages(vma) != 1)
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND;
+ vm_flags_set(vma, VM_IO | VM_DONTDUMP | VM_DONTEXPAND);
vma->vm_ops = &cs_char_vm_ops;
vma->vm_private_data = file->private_data;
diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
index 884066109699..8fda8f1d064d 100644
--- a/drivers/hsi/hsi_core.c
+++ b/drivers/hsi/hsi_core.c
@@ -30,7 +30,7 @@ static struct attribute *hsi_bus_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(hsi_bus_dev);
-static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int hsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev));
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 4d6480d57546..8b0dd8e5244d 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -147,7 +147,7 @@ int hv_synic_alloc(void)
* Synic message and event pages are allocated by paravisor.
* Skip these pages allocation here.
*/
- if (!hv_isolation_type_snp()) {
+ if (!hv_isolation_type_snp() && !hv_root_partition) {
hv_cpu->synic_message_page =
(void *)get_zeroed_page(GFP_ATOMIC);
if (hv_cpu->synic_message_page == NULL) {
@@ -216,7 +216,7 @@ void hv_synic_enable_regs(unsigned int cpu)
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 1;
- if (hv_isolation_type_snp()) {
+ if (hv_isolation_type_snp() || hv_root_partition) {
hv_cpu->synic_message_page
= memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
HV_HYP_PAGE_SIZE, MEMREMAP_WB);
@@ -233,7 +233,7 @@ void hv_synic_enable_regs(unsigned int cpu)
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 1;
- if (hv_isolation_type_snp()) {
+ if (hv_isolation_type_snp() || hv_root_partition) {
hv_cpu->synic_event_page =
memremap(siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT,
HV_HYP_PAGE_SIZE, MEMREMAP_WB);
@@ -315,20 +315,24 @@ void hv_synic_disable_regs(unsigned int cpu)
* addresses.
*/
simp.simp_enabled = 0;
- if (hv_isolation_type_snp())
+ if (hv_isolation_type_snp() || hv_root_partition) {
memunmap(hv_cpu->synic_message_page);
- else
+ hv_cpu->synic_message_page = NULL;
+ } else {
simp.base_simp_gpa = 0;
+ }
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 0;
- if (hv_isolation_type_snp())
+ if (hv_isolation_type_snp() || hv_root_partition) {
memunmap(hv_cpu->synic_event_page);
- else
+ hv_cpu->synic_event_page = NULL;
+ } else {
siefp.base_siefp_gpa = 0;
+ }
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index cbe43e2567a7..dffcc894f117 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1963,7 +1963,7 @@ static void hv_balloon_debugfs_init(struct hv_dynmem_device *b)
static void hv_balloon_debugfs_exit(struct hv_dynmem_device *b)
{
- debugfs_remove(debugfs_lookup("hv-balloon", NULL));
+ debugfs_lookup_and_remove("hv-balloon", NULL);
}
#else
@@ -2042,7 +2042,7 @@ connect_error:
return ret;
}
-static int balloon_remove(struct hv_device *dev)
+static void balloon_remove(struct hv_device *dev)
{
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
struct hv_hotadd_state *has, *tmp;
@@ -2083,8 +2083,6 @@ static int balloon_remove(struct hv_device *dev)
kfree(has);
}
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
-
- return 0;
}
static int balloon_suspend(struct hv_device *hv_dev)
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index ae68298c0dca..52a6f89ccdbd 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -25,17 +25,20 @@
#include <asm/mshyperv.h>
/*
- * hv_root_partition and ms_hyperv are defined here with other Hyper-V
- * specific globals so they are shared across all architectures and are
+ * hv_root_partition, ms_hyperv and hv_nested are defined here with other
+ * Hyper-V specific globals so they are shared across all architectures and are
* built only when CONFIG_HYPERV is defined. But on x86,
* ms_hyperv_init_platform() is built even when CONFIG_HYPERV is not
- * defined, and it uses these two variables. So mark them as __weak
+ * defined, and it uses these three variables. So mark them as __weak
* here, allowing for an overriding definition in the module containing
* ms_hyperv_init_platform().
*/
bool __weak hv_root_partition;
EXPORT_SYMBOL_GPL(hv_root_partition);
+bool __weak hv_nested;
+EXPORT_SYMBOL_GPL(hv_nested);
+
struct ms_hyperv_info __weak ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv);
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index d776074b49cb..42aec2c5606a 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -602,7 +602,7 @@ error1:
return ret;
}
-static int util_remove(struct hv_device *dev)
+static void util_remove(struct hv_device *dev)
{
struct hv_util_service *srv = hv_get_drvdata(dev);
@@ -610,8 +610,6 @@ static int util_remove(struct hv_device *dev)
srv->util_deinit();
vmbus_close(dev->channel);
kfree(srv->recv_buffer);
-
- return 0;
}
/*
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 3146710d4ac6..d24dd65b33d4 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -711,9 +711,9 @@ __ATTRIBUTE_GROUPS(vmbus_bus);
* representation of the device guid (each byte of the guid will be
* represented with two hex characters.
*/
-static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env)
+static int vmbus_uevent(const struct device *device, struct kobj_uevent_env *env)
{
- struct hv_device *dev = device_to_hv_device(device);
+ const struct hv_device *dev = device_to_hv_device(device);
const char *format = "MODALIAS=vmbus:%*phN";
return add_uevent_var(env, format, UUID_SIZE, &dev->dev_type);
@@ -2744,7 +2744,7 @@ static int __init hv_acpi_init(void)
if (!hv_is_hyperv_initialized())
return -ENODEV;
- if (hv_root_partition)
+ if (hv_root_partition && !hv_nested)
return 0;
/*
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 3176c33af6c6..5b3b76477b0e 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -714,6 +714,15 @@ config SENSORS_GPIO_FAN
This driver can also be built as a module. If so, the module
will be called gpio-fan.
+config SENSORS_GXP_FAN_CTRL
+ tristate "HPE GXP fan controller"
+ depends on ARCH_HPE_GXP || COMPILE_TEST
+ help
+ If you say yes here you get support for GXP fan control functionality.
+
+ The GXP controls fan function via the CPLD through the use of PWM
+ registers. This driver reports status and pwm setting of the fans.
+
config SENSORS_HIH6130
tristate "Honeywell Humidicon HIH-6130 humidity/temperature sensor"
depends on I2C
@@ -1166,6 +1175,13 @@ config SENSORS_MAX31790
This driver can also be built as a module. If so, the module
will be called max31790.
+config SENSORS_MC34VR500
+ tristate "NXP MC34VR500 hardware monitoring driver"
+ depends on I2C
+ help
+ If you say yes here you get support for the temperature and input
+ voltage sensors of the NXP MC34VR500.
+
config SENSORS_MCP3021
tristate "Microchip MCP3021 and compatibles"
depends on I2C
@@ -1516,7 +1532,7 @@ config SENSORS_NCT6775_CORE
config SENSORS_NCT6775
tristate "Platform driver for Nuvoton NCT6775F and compatibles"
depends on !PPC
- depends on ACPI_WMI || ACPI_WMI=n
+ depends on ACPI || ACPI=n
select HWMON_VID
select SENSORS_NCT6775_CORE
help
@@ -1749,23 +1765,6 @@ config SENSORS_SHTC1
This driver can also be built as a module. If so, the module
will be called shtc1.
-config SENSORS_S3C
- tristate "Samsung built-in ADC"
- depends on S3C_ADC
- help
- If you say yes here you get support for the on-board ADCs of
- the Samsung S3C24XX, S3C64XX and other series of SoC
-
- This driver can also be built as a module. If so, the module
- will be called s3c-hwmon.
-
-config SENSORS_S3C_RAW
- bool "Include raw channel attributes in sysfs"
- depends on SENSORS_S3C
- help
- Say Y here if you want to include raw copies of all the ADC
- channels in sysfs.
-
config SENSORS_SIS5595
tristate "Silicon Integrated Systems Corp. SiS5595"
depends on PCI
@@ -2341,7 +2340,7 @@ config SENSORS_XGENE
config SENSORS_INTEL_M10_BMC_HWMON
tristate "Intel MAX10 BMC Hardware Monitoring"
- depends on MFD_INTEL_M10_BMC
+ depends on MFD_INTEL_M10_BMC_CORE
help
This driver provides support for the hardware monitoring functionality
on Intel MAX10 BMC chip.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index e2e4e87b282f..88712b5031c8 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -83,6 +83,7 @@ obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o
obj-$(CONFIG_SENSORS_GSC) += gsc-hwmon.o
obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
+obj-$(CONFIG_SENSORS_GXP_FAN_CTRL) += gxp-fan-ctrl.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o
@@ -149,6 +150,7 @@ obj-$(CONFIG_SENSORS_MAX6650) += max6650.o
obj-$(CONFIG_SENSORS_MAX6697) += max6697.o
obj-$(CONFIG_SENSORS_MAX31790) += max31790.o
obj-$(CONFIG_SENSORS_MC13783_ADC)+= mc13783-adc.o
+obj-$(CONFIG_SENSORS_MC34VR500) += mc34vr500.o
obj-$(CONFIG_SENSORS_MCP3021) += mcp3021.o
obj-$(CONFIG_SENSORS_TC654) += tc654.o
obj-$(CONFIG_SENSORS_TPS23861) += tps23861.o
@@ -174,7 +176,6 @@ obj-$(CONFIG_SENSORS_PCF8591) += pcf8591.o
obj-$(CONFIG_SENSORS_POWR1220) += powr1220.o
obj-$(CONFIG_SENSORS_PWM_FAN) += pwm-fan.o
obj-$(CONFIG_SENSORS_RASPBERRYPI_HWMON) += raspberrypi-hwmon.o
-obj-$(CONFIG_SENSORS_S3C) += s3c-hwmon.o
obj-$(CONFIG_SENSORS_SBTSI) += sbtsi_temp.o
obj-$(CONFIG_SENSORS_SBRMI) += sbrmi.o
obj-$(CONFIG_SENSORS_SCH56XX_COMMON)+= sch56xx-common.o
diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
index d76f3441ecf1..9babd69d54a3 100644
--- a/drivers/hwmon/aht10.c
+++ b/drivers/hwmon/aht10.c
@@ -79,7 +79,6 @@ struct aht10_data {
/**
* aht10_init() - Initialize an AHT10 chip
- * @client: the i2c client associated with the AHT10
* @data: the data associated with this AHT10 chip
* Return: 0 if succesfull, 1 if not
*/
@@ -124,7 +123,7 @@ static int aht10_polltime_expired(struct aht10_data *data)
/**
* aht10_read_values() - read and parse the raw data from the AHT10
- * @aht10_data: the struct aht10_data to use for the lock
+ * @data: the struct aht10_data to use for the lock
* Return: 0 if succesfull, 1 if not
*/
static int aht10_read_values(struct aht10_data *data)
diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c
index 9cc10080160b..12682a610ce7 100644
--- a/drivers/hwmon/aquacomputer_d5next.c
+++ b/drivers/hwmon/aquacomputer_d5next.c
@@ -1,10 +1,11 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* hwmon driver for Aquacomputer devices (D5 Next, Farbwerk, Farbwerk 360, Octo,
- * Quadro, High Flow Next)
+ * Quadro, High Flow Next, Aquaero, Aquastream Ultimate)
*
* Aquacomputer devices send HID reports (with ID 0x01) every second to report
- * sensor values.
+ * sensor values, except for devices that communicate through the
+ * legacy way (currently, Poweradjust 3).
*
* Copyright 2021 Aleksa Savic <savicaleksa83@gmail.com>
* Copyright 2022 Jack Doan <me@jackdoan.com>
@@ -21,14 +22,20 @@
#include <asm/unaligned.h>
#define USB_VENDOR_ID_AQUACOMPUTER 0x0c70
+#define USB_PRODUCT_ID_AQUAERO 0xf001
#define USB_PRODUCT_ID_FARBWERK 0xf00a
#define USB_PRODUCT_ID_QUADRO 0xf00d
#define USB_PRODUCT_ID_D5NEXT 0xf00e
#define USB_PRODUCT_ID_FARBWERK360 0xf010
#define USB_PRODUCT_ID_OCTO 0xf011
#define USB_PRODUCT_ID_HIGHFLOWNEXT 0xf012
+#define USB_PRODUCT_ID_AQUASTREAMULT 0xf00b
+#define USB_PRODUCT_ID_POWERADJUST3 0xf0bd
-enum kinds { d5next, farbwerk, farbwerk360, octo, quadro, highflownext };
+enum kinds {
+ d5next, farbwerk, farbwerk360, octo, quadro,
+ highflownext, aquaero, poweradjust3, aquastreamult
+};
static const char *const aqc_device_names[] = {
[d5next] = "d5next",
@@ -36,16 +43,17 @@ static const char *const aqc_device_names[] = {
[farbwerk360] = "farbwerk360",
[octo] = "octo",
[quadro] = "quadro",
- [highflownext] = "highflownext"
+ [highflownext] = "highflownext",
+ [aquaero] = "aquaero",
+ [aquastreamult] = "aquastreamultimate",
+ [poweradjust3] = "poweradjust3"
};
#define DRIVER_NAME "aquacomputer_d5next"
#define STATUS_REPORT_ID 0x01
#define STATUS_UPDATE_INTERVAL (2 * HZ) /* In seconds */
-#define SERIAL_FIRST_PART 3
-#define SERIAL_SECOND_PART 5
-#define FIRMWARE_VERSION 13
+#define SERIAL_PART_OFFSET 2
#define CTRL_REPORT_ID 0x03
@@ -59,8 +67,14 @@ static u8 secondary_ctrl_report[] = {
0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x34, 0xC6
};
-/* Sensor sizes and offsets for all Aquacomputer devices */
-#define AQC_TEMP_SENSOR_SIZE 0x02
+/* Report IDs for legacy devices */
+#define POWERADJUST3_STATUS_REPORT_ID 0x03
+
+/* Info, sensor sizes and offsets for most Aquacomputer devices */
+#define AQC_SERIAL_START 0x3
+#define AQC_FIRMWARE_VERSION 0xD
+
+#define AQC_SENSOR_SIZE 0x02
#define AQC_TEMP_SENSOR_DISCONNECTED 0x7FFF
#define AQC_FAN_PERCENT_OFFSET 0x00
#define AQC_FAN_VOLTAGE_OFFSET 0x02
@@ -68,6 +82,26 @@ static u8 secondary_ctrl_report[] = {
#define AQC_FAN_POWER_OFFSET 0x06
#define AQC_FAN_SPEED_OFFSET 0x08
+/* Specs of the Aquaero fan controllers */
+#define AQUAERO_SERIAL_START 0x07
+#define AQUAERO_FIRMWARE_VERSION 0x0B
+#define AQUAERO_NUM_FANS 4
+#define AQUAERO_NUM_SENSORS 8
+#define AQUAERO_NUM_VIRTUAL_SENSORS 8
+#define AQUAERO_NUM_CALC_VIRTUAL_SENSORS 4
+#define AQUAERO_NUM_FLOW_SENSORS 2
+
+/* Sensor report offsets for Aquaero fan controllers */
+#define AQUAERO_SENSOR_START 0x65
+#define AQUAERO_VIRTUAL_SENSOR_START 0x85
+#define AQUAERO_CALC_VIRTUAL_SENSOR_START 0x95
+#define AQUAERO_FLOW_SENSORS_START 0xF9
+#define AQUAERO_FAN_VOLTAGE_OFFSET 0x04
+#define AQUAERO_FAN_CURRENT_OFFSET 0x06
+#define AQUAERO_FAN_POWER_OFFSET 0x08
+#define AQUAERO_FAN_SPEED_OFFSET 0x00
+static u16 aquaero_sensor_fan_offsets[] = { 0x167, 0x173, 0x17f, 0x18B };
+
/* Specs of the D5 Next pump */
#define D5NEXT_NUM_FANS 2
#define D5NEXT_NUM_SENSORS 1
@@ -82,12 +116,32 @@ static u8 secondary_ctrl_report[] = {
#define D5NEXT_5V_VOLTAGE 0x39
#define D5NEXT_12V_VOLTAGE 0x37
#define D5NEXT_VIRTUAL_SENSORS_START 0x3f
-static u8 d5next_sensor_fan_offsets[] = { D5NEXT_PUMP_OFFSET, D5NEXT_FAN_OFFSET };
+static u16 d5next_sensor_fan_offsets[] = { D5NEXT_PUMP_OFFSET, D5NEXT_FAN_OFFSET };
/* Control report offsets for the D5 Next pump */
#define D5NEXT_TEMP_CTRL_OFFSET 0x2D /* Temperature sensor offsets location */
static u16 d5next_ctrl_fan_offsets[] = { 0x97, 0x42 }; /* Pump and fan speed (from 0-100%) */
+/* Specs of the Aquastream Ultimate pump */
+/* Pump does not follow the standard structure, so only consider the fan */
+#define AQUASTREAMULT_NUM_FANS 1
+#define AQUASTREAMULT_NUM_SENSORS 2
+
+/* Sensor report offsets for the Aquastream Ultimate pump */
+#define AQUASTREAMULT_SENSOR_START 0x2D
+#define AQUASTREAMULT_PUMP_OFFSET 0x51
+#define AQUASTREAMULT_PUMP_VOLTAGE 0x3D
+#define AQUASTREAMULT_PUMP_CURRENT 0x53
+#define AQUASTREAMULT_PUMP_POWER 0x55
+#define AQUASTREAMULT_FAN_OFFSET 0x41
+#define AQUASTREAMULT_PRESSURE_OFFSET 0x57
+#define AQUASTREAMULT_FLOW_SENSOR_OFFSET 0x37
+#define AQUASTREAMULT_FAN_VOLTAGE_OFFSET 0x02
+#define AQUASTREAMULT_FAN_CURRENT_OFFSET 0x00
+#define AQUASTREAMULT_FAN_POWER_OFFSET 0x04
+#define AQUASTREAMULT_FAN_SPEED_OFFSET 0x06
+static u16 aquastreamult_sensor_fan_offsets[] = { AQUASTREAMULT_FAN_OFFSET };
+
/* Spec and sensor report offset for the Farbwerk RGB controller */
#define FARBWERK_NUM_SENSORS 4
#define FARBWERK_SENSOR_START 0x2f
@@ -114,7 +168,7 @@ static u16 d5next_ctrl_fan_offsets[] = { 0x97, 0x42 }; /* Pump and fan speed (fr
#define OCTO_POWER_CYCLES 0x18
#define OCTO_SENSOR_START 0x3D
#define OCTO_VIRTUAL_SENSORS_START 0x45
-static u8 octo_sensor_fan_offsets[] = { 0x7D, 0x8A, 0x97, 0xA4, 0xB1, 0xBE, 0xCB, 0xD8 };
+static u16 octo_sensor_fan_offsets[] = { 0x7D, 0x8A, 0x97, 0xA4, 0xB1, 0xBE, 0xCB, 0xD8 };
/* Control report offsets for the Octo */
#define OCTO_TEMP_CTRL_OFFSET 0xA
@@ -125,6 +179,7 @@ static u16 octo_ctrl_fan_offsets[] = { 0x5B, 0xB0, 0x105, 0x15A, 0x1AF, 0x204, 0
#define QUADRO_NUM_FANS 4
#define QUADRO_NUM_SENSORS 4
#define QUADRO_NUM_VIRTUAL_SENSORS 16
+#define QUADRO_NUM_FLOW_SENSORS 1
#define QUADRO_CTRL_REPORT_SIZE 0x3c1
/* Sensor report offsets for the Quadro */
@@ -132,7 +187,7 @@ static u16 octo_ctrl_fan_offsets[] = { 0x5B, 0xB0, 0x105, 0x15A, 0x1AF, 0x204, 0
#define QUADRO_SENSOR_START 0x34
#define QUADRO_VIRTUAL_SENSORS_START 0x3c
#define QUADRO_FLOW_SENSOR_OFFSET 0x6e
-static u8 quadro_sensor_fan_offsets[] = { 0x70, 0x7D, 0x8A, 0x97 };
+static u16 quadro_sensor_fan_offsets[] = { 0x70, 0x7D, 0x8A, 0x97 };
/* Control report offsets for the Quadro */
#define QUADRO_TEMP_CTRL_OFFSET 0xA
@@ -141,6 +196,7 @@ static u16 quadro_ctrl_fan_offsets[] = { 0x37, 0x8c, 0xe1, 0x136 }; /* Fan speed
/* Specs of High Flow Next flow sensor */
#define HIGHFLOWNEXT_NUM_SENSORS 2
+#define HIGHFLOWNEXT_NUM_FLOW_SENSORS 1
/* Sensor report offsets for the High Flow Next */
#define HIGHFLOWNEXT_SENSOR_START 85
@@ -151,6 +207,13 @@ static u16 quadro_ctrl_fan_offsets[] = { 0x37, 0x8c, 0xe1, 0x136 }; /* Fan speed
#define HIGHFLOWNEXT_5V_VOLTAGE 97
#define HIGHFLOWNEXT_5V_VOLTAGE_USB 99
+/* Specs of the Poweradjust 3 */
+#define POWERADJUST3_NUM_SENSORS 1
+#define POWERADJUST3_SENSOR_REPORT_SIZE 0x32
+
+/* Sensor report offsets for the Poweradjust 3 */
+#define POWERADJUST3_SENSOR_START 0x03
+
/* Labels for D5 Next */
static const char *const label_d5next_temp[] = {
"Coolant temp"
@@ -178,12 +241,16 @@ static const char *const label_d5next_current[] = {
"Fan current"
};
-/* Labels for Farbwerk, Farbwerk 360 and Octo and Quadro temperature sensors */
+/* Labels for Aquaero, Farbwerk, Farbwerk 360 and Octo and Quadro temperature sensors */
static const char *const label_temp_sensors[] = {
"Sensor 1",
"Sensor 2",
"Sensor 3",
- "Sensor 4"
+ "Sensor 4",
+ "Sensor 5",
+ "Sensor 6",
+ "Sensor 7",
+ "Sensor 8"
};
static const char *const label_virtual_temp_sensors[] = {
@@ -205,6 +272,13 @@ static const char *const label_virtual_temp_sensors[] = {
"Virtual sensor 16",
};
+static const char *const label_aquaero_calc_temp_sensors[] = {
+ "Calc. virtual sensor 1",
+ "Calc. virtual sensor 2",
+ "Calc. virtual sensor 3",
+ "Calc. virtual sensor 4"
+};
+
/* Labels for Octo and Quadro (except speed) */
static const char *const label_fan_speed[] = {
"Fan 1 speed",
@@ -259,6 +333,16 @@ static const char *const label_quadro_speeds[] = {
"Flow speed [dL/h]"
};
+/* Labels for Aquaero fan speeds */
+static const char *const label_aquaero_speeds[] = {
+ "Fan 1 speed",
+ "Fan 2 speed",
+ "Fan 3 speed",
+ "Fan 4 speed",
+ "Flow sensor 1 [dL/h]",
+ "Flow sensor 2 [dL/h]"
+};
+
/* Labels for High Flow Next */
static const char *const label_highflownext_temp_sensors[] = {
"Coolant temp",
@@ -280,6 +364,70 @@ static const char *const label_highflownext_voltage[] = {
"+5V USB voltage"
};
+/* Labels for Aquastream Ultimate */
+static const char *const label_aquastreamult_temp[] = {
+ "Coolant temp",
+ "External temp"
+};
+
+static const char *const label_aquastreamult_speeds[] = {
+ "Fan speed",
+ "Pump speed",
+ "Pressure [mbar]",
+ "Flow speed [dL/h]"
+};
+
+static const char *const label_aquastreamult_power[] = {
+ "Fan power",
+ "Pump power"
+};
+
+static const char *const label_aquastreamult_voltages[] = {
+ "Fan voltage",
+ "Pump voltage"
+};
+
+static const char *const label_aquastreamult_current[] = {
+ "Fan current",
+ "Pump current"
+};
+
+/* Labels for Poweradjust 3 */
+static const char *const label_poweradjust3_temp_sensors[] = {
+ "External sensor"
+};
+
+struct aqc_fan_structure_offsets {
+ u8 voltage;
+ u8 curr;
+ u8 power;
+ u8 speed;
+};
+
+/* Fan structure offsets for Aquaero */
+static struct aqc_fan_structure_offsets aqc_aquaero_fan_structure = {
+ .voltage = AQUAERO_FAN_VOLTAGE_OFFSET,
+ .curr = AQUAERO_FAN_CURRENT_OFFSET,
+ .power = AQUAERO_FAN_POWER_OFFSET,
+ .speed = AQUAERO_FAN_SPEED_OFFSET
+};
+
+/* Fan structure offsets for Aquastream Ultimate */
+static struct aqc_fan_structure_offsets aqc_aquastreamult_fan_structure = {
+ .voltage = AQUASTREAMULT_FAN_VOLTAGE_OFFSET,
+ .curr = AQUASTREAMULT_FAN_CURRENT_OFFSET,
+ .power = AQUASTREAMULT_FAN_POWER_OFFSET,
+ .speed = AQUASTREAMULT_FAN_SPEED_OFFSET
+};
+
+/* Fan structure offsets for all devices except those above */
+static struct aqc_fan_structure_offsets aqc_general_fan_structure = {
+ .voltage = AQC_FAN_VOLTAGE_OFFSET,
+ .curr = AQC_FAN_CURRENT_OFFSET,
+ .power = AQC_FAN_POWER_OFFSET,
+ .speed = AQC_FAN_SPEED_OFFSET
+};
+
struct aqc_data {
struct hid_device *hdev;
struct device *hwmon_dev;
@@ -288,6 +436,8 @@ struct aqc_data {
enum kinds kind;
const char *name;
+ int status_report_id; /* Used for legacy devices, report is stored in buffer */
+
int buffer_size;
u8 *buffer;
int checksum_start;
@@ -295,26 +445,32 @@ struct aqc_data {
int checksum_offset;
int num_fans;
- u8 *fan_sensor_offsets;
+ u16 *fan_sensor_offsets;
u16 *fan_ctrl_offsets;
int num_temp_sensors;
int temp_sensor_start_offset;
int num_virtual_temp_sensors;
int virtual_temp_sensor_start_offset;
+ int num_calc_virt_temp_sensors;
+ int calc_virt_temp_sensor_start_offset;
u16 temp_ctrl_offset;
u16 power_cycle_count_offset;
- u8 flow_sensor_offset;
+ int num_flow_sensors;
+ u8 flow_sensors_start_offset;
u8 flow_pulses_ctrl_offset;
+ struct aqc_fan_structure_offsets *fan_structure;
/* General info, same across all devices */
+ u8 serial_number_start_offset;
u32 serial_number[2];
+ u8 firmware_version_offset;
u16 firmware_version;
/* How many times the device was powered on, if available */
u32 power_cycles;
/* Sensor values */
- s32 temp_input[20]; /* Max 4 physical and 16 virtual */
+ s32 temp_input[20]; /* Max 4 physical and 16 virtual or 8 physical and 12 virtual */
u16 speed_input[8];
u32 power_input[8];
u16 voltage_input[8];
@@ -323,6 +479,7 @@ struct aqc_data {
/* Label values */
const char *const *temp_label;
const char *const *virtual_temp_label;
+ const char *const *calc_virt_temp_label; /* For Aquaero */
const char *const *speed_label;
const char *const *power_label;
const char *const *voltage_label;
@@ -443,7 +600,9 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
}
}
- if (channel < priv->num_temp_sensors + priv->num_virtual_temp_sensors)
+ if (channel <
+ priv->num_temp_sensors + priv->num_virtual_temp_sensors +
+ priv->num_calc_virt_temp_sensors)
switch (attr) {
case hwmon_temp_label:
case hwmon_temp_input:
@@ -467,6 +626,14 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
case hwmon_fan_input:
case hwmon_fan_label:
switch (priv->kind) {
+ case aquastreamult:
+ /*
+ * Special case to support pump RPM, fan RPM,
+ * pressure and flow sensor
+ */
+ if (channel < 4)
+ return 0444;
+ break;
case highflownext:
/* Special case to support flow sensor, water quality
* and conductivity
@@ -474,9 +641,10 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
if (channel < 3)
return 0444;
break;
+ case aquaero:
case quadro:
- /* Special case to support flow sensor */
- if (channel < priv->num_fans + 1)
+ /* Special case to support flow sensors */
+ if (channel < priv->num_fans + priv->num_flow_sensors)
return 0444;
break;
default:
@@ -496,6 +664,11 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
break;
case hwmon_power:
switch (priv->kind) {
+ case aquastreamult:
+ /* Special case to support pump and fan power */
+ if (channel < 2)
+ return 0444;
+ break;
case highflownext:
/* Special case to support one power sensor */
if (channel == 0)
@@ -508,8 +681,17 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
}
break;
case hwmon_curr:
- if (channel < priv->num_fans)
- return 0444;
+ switch (priv->kind) {
+ case aquastreamult:
+ /* Special case to support pump and fan current */
+ if (channel < 2)
+ return 0444;
+ break;
+ default:
+ if (channel < priv->num_fans)
+ return 0444;
+ break;
+ }
break;
case hwmon_in:
switch (priv->kind) {
@@ -518,6 +700,7 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
if (channel < priv->num_fans + 2)
return 0444;
break;
+ case aquastreamult:
case highflownext:
/* Special case to support two voltage sensors */
if (channel < 2)
@@ -536,14 +719,49 @@ static umode_t aqc_is_visible(const void *data, enum hwmon_sensor_types type, u3
return 0;
}
+/* Read device sensors by manually requesting the sensor report (legacy way) */
+static int aqc_legacy_read(struct aqc_data *priv)
+{
+ int ret, i, sensor_value;
+
+ mutex_lock(&priv->mutex);
+
+ memset(priv->buffer, 0x00, priv->buffer_size);
+ ret = hid_hw_raw_request(priv->hdev, priv->status_report_id, priv->buffer,
+ priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
+ if (ret < 0)
+ goto unlock_and_return;
+
+ /* Temperature sensor readings */
+ for (i = 0; i < priv->num_temp_sensors; i++) {
+ sensor_value = get_unaligned_le16(priv->buffer + priv->temp_sensor_start_offset +
+ i * AQC_SENSOR_SIZE);
+ priv->temp_input[i] = sensor_value * 10;
+ }
+
+ priv->updated = jiffies;
+
+unlock_and_return:
+ mutex_unlock(&priv->mutex);
+ return ret;
+}
+
static int aqc_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long *val)
{
int ret;
struct aqc_data *priv = dev_get_drvdata(dev);
- if (time_after(jiffies, priv->updated + STATUS_UPDATE_INTERVAL))
- return -ENODATA;
+ if (time_after(jiffies, priv->updated + STATUS_UPDATE_INTERVAL)) {
+ if (priv->status_report_id != 0) {
+ /* Legacy devices require manual reads */
+ ret = aqc_legacy_read(priv);
+ if (ret < 0)
+ return -ENODATA;
+ } else {
+ return -ENODATA;
+ }
+ }
switch (type) {
case hwmon_temp:
@@ -557,7 +775,7 @@ static int aqc_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
case hwmon_temp_offset:
ret =
aqc_get_ctrl_val(priv, priv->temp_ctrl_offset +
- channel * AQC_TEMP_SENSOR_SIZE, val);
+ channel * AQC_SENSOR_SIZE, val);
if (ret < 0)
return ret;
@@ -611,12 +829,20 @@ static int aqc_read_string(struct device *dev, enum hwmon_sensor_types type, u32
{
struct aqc_data *priv = dev_get_drvdata(dev);
+ /* Number of sensors that are not calculated */
+ int num_non_calc_sensors = priv->num_temp_sensors + priv->num_virtual_temp_sensors;
+
switch (type) {
case hwmon_temp:
- if (channel < priv->num_temp_sensors)
+ if (channel < priv->num_temp_sensors) {
*str = priv->temp_label[channel];
- else
- *str = priv->virtual_temp_label[channel - priv->num_temp_sensors];
+ } else {
+ if (priv->kind == aquaero && channel >= num_non_calc_sensors)
+ *str =
+ priv->calc_virt_temp_label[channel - num_non_calc_sensors];
+ else
+ *str = priv->virtual_temp_label[channel - priv->num_temp_sensors];
+ }
break;
case hwmon_fan:
*str = priv->speed_label[channel];
@@ -651,7 +877,7 @@ static int aqc_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
val = clamp_val(val, -15000, 15000) / 10;
ret =
aqc_set_ctrl_val(priv, priv->temp_ctrl_offset +
- channel * AQC_TEMP_SENSOR_SIZE, val);
+ channel * AQC_SENSOR_SIZE, val);
if (ret < 0)
return ret;
break;
@@ -789,15 +1015,16 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
priv = hid_get_drvdata(hdev);
/* Info provided with every report */
- priv->serial_number[0] = get_unaligned_be16(data + SERIAL_FIRST_PART);
- priv->serial_number[1] = get_unaligned_be16(data + SERIAL_SECOND_PART);
- priv->firmware_version = get_unaligned_be16(data + FIRMWARE_VERSION);
+ priv->serial_number[0] = get_unaligned_be16(data + priv->serial_number_start_offset);
+ priv->serial_number[1] = get_unaligned_be16(data + priv->serial_number_start_offset +
+ SERIAL_PART_OFFSET);
+ priv->firmware_version = get_unaligned_be16(data + priv->firmware_version_offset);
/* Physical temperature sensor readings */
for (i = 0; i < priv->num_temp_sensors; i++) {
sensor_value = get_unaligned_be16(data +
priv->temp_sensor_start_offset +
- i * AQC_TEMP_SENSOR_SIZE);
+ i * AQC_SENSOR_SIZE);
if (sensor_value == AQC_TEMP_SENSOR_DISCONNECTED)
priv->temp_input[i] = -ENODATA;
else
@@ -808,7 +1035,7 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
for (j = 0; j < priv->num_virtual_temp_sensors; j++) {
sensor_value = get_unaligned_be16(data +
priv->virtual_temp_sensor_start_offset +
- j * AQC_TEMP_SENSOR_SIZE);
+ j * AQC_SENSOR_SIZE);
if (sensor_value == AQC_TEMP_SENSOR_DISCONNECTED)
priv->temp_input[i] = -ENODATA;
else
@@ -819,15 +1046,24 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
/* Fan speed and related readings */
for (i = 0; i < priv->num_fans; i++) {
priv->speed_input[i] =
- get_unaligned_be16(data + priv->fan_sensor_offsets[i] + AQC_FAN_SPEED_OFFSET);
+ get_unaligned_be16(data + priv->fan_sensor_offsets[i] +
+ priv->fan_structure->speed);
priv->power_input[i] =
get_unaligned_be16(data + priv->fan_sensor_offsets[i] +
- AQC_FAN_POWER_OFFSET) * 10000;
+ priv->fan_structure->power) * 10000;
priv->voltage_input[i] =
get_unaligned_be16(data + priv->fan_sensor_offsets[i] +
- AQC_FAN_VOLTAGE_OFFSET) * 10;
+ priv->fan_structure->voltage) * 10;
priv->current_input[i] =
- get_unaligned_be16(data + priv->fan_sensor_offsets[i] + AQC_FAN_CURRENT_OFFSET);
+ get_unaligned_be16(data + priv->fan_sensor_offsets[i] +
+ priv->fan_structure->curr);
+ }
+
+ /* Flow sensor readings */
+ for (j = 0; j < priv->num_flow_sensors; j++) {
+ priv->speed_input[i] = get_unaligned_be16(data + priv->flow_sensors_start_offset +
+ j * AQC_SENSOR_SIZE);
+ i++;
}
if (priv->power_cycle_count_offset != 0)
@@ -835,13 +1071,35 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
/* Special-case sensor readings */
switch (priv->kind) {
+ case aquaero:
+ /* Read calculated virtual temp sensors */
+ i = priv->num_temp_sensors + priv->num_virtual_temp_sensors;
+ for (j = 0; j < priv->num_calc_virt_temp_sensors; j++) {
+ sensor_value = get_unaligned_be16(data +
+ priv->calc_virt_temp_sensor_start_offset +
+ j * AQC_SENSOR_SIZE);
+ if (sensor_value == AQC_TEMP_SENSOR_DISCONNECTED)
+ priv->temp_input[i] = -ENODATA;
+ else
+ priv->temp_input[i] = sensor_value * 10;
+ i++;
+ }
+ break;
+ case aquastreamult:
+ priv->speed_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_OFFSET);
+ priv->speed_input[2] = get_unaligned_be16(data + AQUASTREAMULT_PRESSURE_OFFSET);
+ priv->speed_input[3] = get_unaligned_be16(data + AQUASTREAMULT_FLOW_SENSOR_OFFSET);
+
+ priv->power_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_POWER) * 10000;
+
+ priv->voltage_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_VOLTAGE) * 10;
+
+ priv->current_input[1] = get_unaligned_be16(data + AQUASTREAMULT_PUMP_CURRENT);
+ break;
case d5next:
priv->voltage_input[2] = get_unaligned_be16(data + D5NEXT_5V_VOLTAGE) * 10;
priv->voltage_input[3] = get_unaligned_be16(data + D5NEXT_12V_VOLTAGE) * 10;
break;
- case quadro:
- priv->speed_input[4] = get_unaligned_be16(data + priv->flow_sensor_offset);
- break;
case highflownext:
/* If external temp sensor is not connected, its power reading is also N/A */
if (priv->temp_input[1] == -ENODATA)
@@ -854,7 +1112,6 @@ static int aqc_raw_event(struct hid_device *hdev, struct hid_report *report, u8
priv->voltage_input[1] =
get_unaligned_be16(data + HIGHFLOWNEXT_5V_VOLTAGE_USB) * 10;
- priv->speed_input[0] = get_unaligned_be16(data + HIGHFLOWNEXT_FLOW);
priv->speed_input[1] = get_unaligned_be16(data + HIGHFLOWNEXT_WATER_QUALITY);
priv->speed_input[2] = get_unaligned_be16(data + HIGHFLOWNEXT_CONDUCTIVITY);
break;
@@ -907,9 +1164,13 @@ static void aqc_debugfs_init(struct aqc_data *priv)
dev_name(&priv->hdev->dev));
priv->debugfs = debugfs_create_dir(name, NULL);
- debugfs_create_file("serial_number", 0444, priv->debugfs, priv, &serial_number_fops);
- debugfs_create_file("firmware_version", 0444, priv->debugfs, priv, &firmware_version_fops);
+ if (priv->serial_number_start_offset != 0)
+ debugfs_create_file("serial_number", 0444, priv->debugfs, priv,
+ &serial_number_fops);
+ if (priv->firmware_version_offset != 0)
+ debugfs_create_file("firmware_version", 0444, priv->debugfs, priv,
+ &firmware_version_fops);
if (priv->power_cycle_count_offset != 0)
debugfs_create_file("power_cycles", 0444, priv->debugfs, priv, &power_cycles_fops);
}
@@ -949,6 +1210,45 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto fail_and_stop;
switch (hdev->product) {
+ case USB_PRODUCT_ID_AQUAERO:
+ /*
+ * Aquaero presents itself as three HID devices under the same product ID:
+ * "aquaero keyboard/mouse", "aquaero System Control" and "aquaero Device",
+ * which is the one we want to communicate with. Unlike most other Aquacomputer
+ * devices, Aquaero does not return meaningful data when explicitly requested
+ * using GET_FEATURE_REPORT.
+ *
+ * The difference between "aquaero Device" and the other two is in the collections
+ * they present. The two other devices have the type of the second element in
+ * their respective collections set to 1, while the real device has it set to 0.
+ */
+ if (hdev->collection[1].type != 0) {
+ ret = -ENODEV;
+ goto fail_and_close;
+ }
+
+ priv->kind = aquaero;
+
+ priv->num_fans = AQUAERO_NUM_FANS;
+ priv->fan_sensor_offsets = aquaero_sensor_fan_offsets;
+
+ priv->num_temp_sensors = AQUAERO_NUM_SENSORS;
+ priv->temp_sensor_start_offset = AQUAERO_SENSOR_START;
+ priv->num_virtual_temp_sensors = AQUAERO_NUM_VIRTUAL_SENSORS;
+ priv->virtual_temp_sensor_start_offset = AQUAERO_VIRTUAL_SENSOR_START;
+ priv->num_calc_virt_temp_sensors = AQUAERO_NUM_CALC_VIRTUAL_SENSORS;
+ priv->calc_virt_temp_sensor_start_offset = AQUAERO_CALC_VIRTUAL_SENSOR_START;
+ priv->num_flow_sensors = AQUAERO_NUM_FLOW_SENSORS;
+ priv->flow_sensors_start_offset = AQUAERO_FLOW_SENSORS_START;
+
+ priv->temp_label = label_temp_sensors;
+ priv->virtual_temp_label = label_virtual_temp_sensors;
+ priv->calc_virt_temp_label = label_aquaero_calc_temp_sensors;
+ priv->speed_label = label_aquaero_speeds;
+ priv->power_label = label_fan_power;
+ priv->voltage_label = label_fan_voltage;
+ priv->current_label = label_fan_current;
+ break;
case USB_PRODUCT_ID_D5NEXT:
priv->kind = d5next;
@@ -1034,11 +1334,13 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->temp_sensor_start_offset = QUADRO_SENSOR_START;
priv->num_virtual_temp_sensors = QUADRO_NUM_VIRTUAL_SENSORS;
priv->virtual_temp_sensor_start_offset = QUADRO_VIRTUAL_SENSORS_START;
+ priv->num_flow_sensors = QUADRO_NUM_FLOW_SENSORS;
+ priv->flow_sensors_start_offset = QUADRO_FLOW_SENSOR_OFFSET;
+
priv->temp_ctrl_offset = QUADRO_TEMP_CTRL_OFFSET;
priv->buffer_size = QUADRO_CTRL_REPORT_SIZE;
- priv->flow_sensor_offset = QUADRO_FLOW_SENSOR_OFFSET;
priv->flow_pulses_ctrl_offset = QUADRO_FLOW_PULSES_CTRL_OFFSET;
priv->power_cycle_count_offset = QUADRO_POWER_CYCLES;
@@ -1056,6 +1358,8 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->num_temp_sensors = HIGHFLOWNEXT_NUM_SENSORS;
priv->temp_sensor_start_offset = HIGHFLOWNEXT_SENSOR_START;
+ priv->num_flow_sensors = HIGHFLOWNEXT_NUM_FLOW_SENSORS;
+ priv->flow_sensors_start_offset = HIGHFLOWNEXT_FLOW;
priv->power_cycle_count_offset = QUADRO_POWER_CYCLES;
@@ -1064,10 +1368,57 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id)
priv->power_label = label_highflownext_power;
priv->voltage_label = label_highflownext_voltage;
break;
+ case USB_PRODUCT_ID_AQUASTREAMULT:
+ priv->kind = aquastreamult;
+
+ priv->num_fans = AQUASTREAMULT_NUM_FANS;
+ priv->fan_sensor_offsets = aquastreamult_sensor_fan_offsets;
+
+ priv->num_temp_sensors = AQUASTREAMULT_NUM_SENSORS;
+ priv->temp_sensor_start_offset = AQUASTREAMULT_SENSOR_START;
+
+ priv->temp_label = label_aquastreamult_temp;
+ priv->speed_label = label_aquastreamult_speeds;
+ priv->power_label = label_aquastreamult_power;
+ priv->voltage_label = label_aquastreamult_voltages;
+ priv->current_label = label_aquastreamult_current;
+ break;
+ case USB_PRODUCT_ID_POWERADJUST3:
+ priv->kind = poweradjust3;
+
+ priv->num_fans = 0;
+
+ priv->num_temp_sensors = POWERADJUST3_NUM_SENSORS;
+ priv->temp_sensor_start_offset = POWERADJUST3_SENSOR_START;
+ priv->buffer_size = POWERADJUST3_SENSOR_REPORT_SIZE;
+
+ priv->temp_label = label_poweradjust3_temp_sensors;
+ break;
default:
break;
}
+ switch (priv->kind) {
+ case aquaero:
+ priv->serial_number_start_offset = AQUAERO_SERIAL_START;
+ priv->firmware_version_offset = AQUAERO_FIRMWARE_VERSION;
+
+ priv->fan_structure = &aqc_aquaero_fan_structure;
+ break;
+ case poweradjust3:
+ priv->status_report_id = POWERADJUST3_STATUS_REPORT_ID;
+ break;
+ default:
+ priv->serial_number_start_offset = AQC_SERIAL_START;
+ priv->firmware_version_offset = AQC_FIRMWARE_VERSION;
+
+ if (priv->kind == aquastreamult)
+ priv->fan_structure = &aqc_aquastreamult_fan_structure;
+ else
+ priv->fan_structure = &aqc_general_fan_structure;
+ break;
+ }
+
if (priv->buffer_size != 0) {
priv->checksum_start = 0x01;
priv->checksum_length = priv->buffer_size - 3;
@@ -1115,12 +1466,15 @@ static void aqc_remove(struct hid_device *hdev)
}
static const struct hid_device_id aqc_table[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_AQUAERO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_D5NEXT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_FARBWERK360) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_OCTO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_QUADRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_HIGHFLOWNEXT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_AQUASTREAMULT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_AQUACOMPUTER, USB_PRODUCT_ID_POWERADJUST3) },
{ }
};
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index a901e4e33d81..2768b7511684 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -299,6 +299,7 @@ static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
.sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
.family = family_amd_500_series,
};
@@ -466,6 +467,8 @@ static const struct dmi_system_id dmi_table[] = {
&board_info_strix_z690_a_gaming_wifi_d4),
DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME",
&board_info_zenith_ii_extreme),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME ALPHA",
+ &board_info_zenith_ii_extreme),
{},
};
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index ca7a9b373bbd..30d77f451937 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -27,6 +27,7 @@
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpu_device_id.h>
+#include <linux/sched/isolation.h>
#define DRVNAME "coretemp"
@@ -502,6 +503,9 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu,
u32 eax, edx;
int err, index, attr_no;
+ if (!housekeeping_cpu(cpu, HK_TYPE_MISC))
+ return 0;
+
/*
* Find attr number for sysfs:
* We map the attr number to core id of the CPU
@@ -588,66 +592,49 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx)
ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO);
}
-static int coretemp_probe(struct platform_device *pdev)
+static int coretemp_device_add(int zoneid)
{
- struct device *dev = &pdev->dev;
+ struct platform_device *pdev;
struct platform_data *pdata;
+ int err;
/* Initialize the per-zone data structures */
- pdata = devm_kzalloc(dev, sizeof(struct platform_data), GFP_KERNEL);
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- pdata->pkg_id = pdev->id;
+ pdata->pkg_id = zoneid;
ida_init(&pdata->ida);
- platform_set_drvdata(pdev, pdata);
-
- pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME,
- pdata, NULL);
- return PTR_ERR_OR_ZERO(pdata->hwmon_dev);
-}
-static int coretemp_remove(struct platform_device *pdev)
-{
- struct platform_data *pdata = platform_get_drvdata(pdev);
- int i;
+ pdev = platform_device_alloc(DRVNAME, zoneid);
+ if (!pdev) {
+ err = -ENOMEM;
+ goto err_free_pdata;
+ }
- for (i = MAX_CORE_DATA - 1; i >= 0; --i)
- if (pdata->core_data[i])
- coretemp_remove_core(pdata, i);
+ err = platform_device_add(pdev);
+ if (err)
+ goto err_put_dev;
- ida_destroy(&pdata->ida);
+ platform_set_drvdata(pdev, pdata);
+ zone_devices[zoneid] = pdev;
return 0;
-}
-static struct platform_driver coretemp_driver = {
- .driver = {
- .name = DRVNAME,
- },
- .probe = coretemp_probe,
- .remove = coretemp_remove,
-};
+err_put_dev:
+ platform_device_put(pdev);
+err_free_pdata:
+ kfree(pdata);
+ return err;
+}
-static struct platform_device *coretemp_device_add(unsigned int cpu)
+static void coretemp_device_remove(int zoneid)
{
- int err, zoneid = topology_logical_die_id(cpu);
- struct platform_device *pdev;
-
- if (zoneid < 0)
- return ERR_PTR(-ENOMEM);
-
- pdev = platform_device_alloc(DRVNAME, zoneid);
- if (!pdev)
- return ERR_PTR(-ENOMEM);
-
- err = platform_device_add(pdev);
- if (err) {
- platform_device_put(pdev);
- return ERR_PTR(err);
- }
+ struct platform_device *pdev = zone_devices[zoneid];
+ struct platform_data *pdata = platform_get_drvdata(pdev);
- zone_devices[zoneid] = pdev;
- return pdev;
+ ida_destroy(&pdata->ida);
+ kfree(pdata);
+ platform_device_unregister(pdev);
}
static int coretemp_cpu_online(unsigned int cpu)
@@ -671,7 +658,10 @@ static int coretemp_cpu_online(unsigned int cpu)
if (!cpu_has(c, X86_FEATURE_DTHERM))
return -ENODEV;
- if (!pdev) {
+ pdata = platform_get_drvdata(pdev);
+ if (!pdata->hwmon_dev) {
+ struct device *hwmon;
+
/* Check the microcode version of the CPU */
if (chk_ucode_version(cpu))
return -EINVAL;
@@ -682,9 +672,11 @@ static int coretemp_cpu_online(unsigned int cpu)
* online. So, initialize per-pkg data structures and
* then bring this core online.
*/
- pdev = coretemp_device_add(cpu);
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
+ hwmon = hwmon_device_register_with_groups(&pdev->dev, DRVNAME,
+ pdata, NULL);
+ if (IS_ERR(hwmon))
+ return PTR_ERR(hwmon);
+ pdata->hwmon_dev = hwmon;
/*
* Check whether pkgtemp support is available.
@@ -694,7 +686,6 @@ static int coretemp_cpu_online(unsigned int cpu)
coretemp_add_core(pdev, cpu, 1);
}
- pdata = platform_get_drvdata(pdev);
/*
* Check whether a thread sibling is already online. If not add the
* interface for this CPU core.
@@ -713,18 +704,14 @@ static int coretemp_cpu_offline(unsigned int cpu)
struct temp_data *tdata;
int i, indx = -1, target;
- /*
- * Don't execute this on suspend as the device remove locks
- * up the machine.
- */
+ /* No need to tear down any interfaces for suspend */
if (cpuhp_tasks_frozen)
return 0;
/* If the physical CPU device does not exist, just return */
- if (!pdev)
- return 0;
-
pd = platform_get_drvdata(pdev);
+ if (!pd->hwmon_dev)
+ return 0;
for (i = 0; i < NUM_REAL_CORES; i++) {
if (pd->cpu_map[i] == topology_core_id(cpu)) {
@@ -756,13 +743,14 @@ static int coretemp_cpu_offline(unsigned int cpu)
}
/*
- * If all cores in this pkg are offline, remove the device. This
- * will invoke the platform driver remove function, which cleans up
- * the rest.
+ * If all cores in this pkg are offline, remove the interface.
*/
+ tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
if (cpumask_empty(&pd->cpumask)) {
- zone_devices[topology_logical_die_id(cpu)] = NULL;
- platform_device_unregister(pdev);
+ if (tdata)
+ coretemp_remove_core(pd, PKG_SYSFS_ATTR_NO);
+ hwmon_device_unregister(pd->hwmon_dev);
+ pd->hwmon_dev = NULL;
return 0;
}
@@ -770,7 +758,6 @@ static int coretemp_cpu_offline(unsigned int cpu)
* Check whether this core is the target for the package
* interface. We need to assign it to some other cpu.
*/
- tdata = pd->core_data[PKG_SYSFS_ATTR_NO];
if (tdata && tdata->cpu == cpu) {
target = cpumask_first(&pd->cpumask);
mutex_lock(&tdata->update_lock);
@@ -789,7 +776,7 @@ static enum cpuhp_state coretemp_hp_online;
static int __init coretemp_init(void)
{
- int err;
+ int i, err;
/*
* CPUID.06H.EAX[0] indicates whether the CPU has thermal
@@ -805,20 +792,22 @@ static int __init coretemp_init(void)
if (!zone_devices)
return -ENOMEM;
- err = platform_driver_register(&coretemp_driver);
- if (err)
- goto outzone;
+ for (i = 0; i < max_zones; i++) {
+ err = coretemp_device_add(i);
+ if (err)
+ goto outzone;
+ }
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/coretemp:online",
coretemp_cpu_online, coretemp_cpu_offline);
if (err < 0)
- goto outdrv;
+ goto outzone;
coretemp_hp_online = err;
return 0;
-outdrv:
- platform_driver_unregister(&coretemp_driver);
outzone:
+ while (i--)
+ coretemp_device_remove(i);
kfree(zone_devices);
return err;
}
@@ -826,8 +815,11 @@ module_init(coretemp_init)
static void __exit coretemp_exit(void)
{
+ int i;
+
cpuhp_remove_state(coretemp_hp_online);
- platform_driver_unregister(&coretemp_driver);
+ for (i = 0; i < max_zones; i++)
+ coretemp_device_remove(i);
kfree(zone_devices);
}
module_exit(coretemp_exit)
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 5bac2b0fc7bb..8e5759b42390 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -164,7 +164,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
u8 lba_low, u8 lba_mid, u8 lba_high)
{
u8 scsi_cmd[MAX_COMMAND_SIZE];
- int data_dir;
+ enum req_op op;
memset(scsi_cmd, 0, sizeof(scsi_cmd));
scsi_cmd[0] = ATA_16;
@@ -175,7 +175,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
* field.
*/
scsi_cmd[2] = 0x06;
- data_dir = DMA_TO_DEVICE;
+ op = REQ_OP_DRV_OUT;
} else {
scsi_cmd[1] = (4 << 1); /* PIO Data-in */
/*
@@ -183,7 +183,7 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
* field.
*/
scsi_cmd[2] = 0x0e;
- data_dir = DMA_FROM_DEVICE;
+ op = REQ_OP_DRV_IN;
}
scsi_cmd[4] = feature;
scsi_cmd[6] = 1; /* 1 sector */
@@ -192,9 +192,8 @@ static int drivetemp_scsi_command(struct drivetemp_data *st,
scsi_cmd[12] = lba_high;
scsi_cmd[14] = ata_command;
- return scsi_execute_req(st->sdev, scsi_cmd, data_dir,
- st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5,
- NULL);
+ return scsi_execute_cmd(st->sdev, scsi_cmd, op, st->smartdata,
+ ATA_SECT_SIZE, HZ, 5, NULL);
}
static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature,
diff --git a/drivers/hwmon/emc2305.c b/drivers/hwmon/emc2305.c
index 6ad055e5868e..f65467fbd86c 100644
--- a/drivers/hwmon/emc2305.c
+++ b/drivers/hwmon/emc2305.c
@@ -59,10 +59,11 @@ static const struct i2c_device_id emc2305_ids[] = {
MODULE_DEVICE_TABLE(i2c, emc2305_ids);
/**
- * @cdev: cooling device;
- * @curr_state: cooling current state;
- * @last_hwmon_state: last cooling state updated by hwmon subsystem;
- * @last_thermal_state: last cooling state updated by thermal subsystem;
+ * struct emc2305_cdev_data - device-specific cooling device state
+ * @cdev: cooling device
+ * @cur_state: cooling current state
+ * @last_hwmon_state: last cooling state updated by hwmon subsystem
+ * @last_thermal_state: last cooling state updated by thermal subsystem
*
* The 'last_hwmon_state' and 'last_thermal_state' fields are provided to support fan low limit
* speed feature. The purpose of this feature is to provides ability to limit fan speed
@@ -86,13 +87,14 @@ struct emc2305_cdev_data {
};
/**
- * @client: i2c client;
- * @hwmon_dev: hwmon device;
- * @max_state: maximum cooling state of the cooling device;
- * @pwm_num: number of PWM channels;
- * @pwm_separate: separate PWM settings for every channel;
- * @pwm_min: array of minimum PWM per channel;
- * @cdev_data: array of cooling devices data;
+ * struct emc2305_data - device-specific data
+ * @client: i2c client
+ * @hwmon_dev: hwmon device
+ * @max_state: maximum cooling state of the cooling device
+ * @pwm_num: number of PWM channels
+ * @pwm_separate: separate PWM settings for every channel
+ * @pwm_min: array of minimum PWM per channel
+ * @cdev_data: array of cooling devices data
*/
struct emc2305_data {
struct i2c_client *client;
diff --git a/drivers/hwmon/ftsteutates.c b/drivers/hwmon/ftsteutates.c
index f5b8e724a8ca..25afd9167a34 100644
--- a/drivers/hwmon/ftsteutates.c
+++ b/drivers/hwmon/ftsteutates.c
@@ -6,17 +6,14 @@
* Thilo Cestonaro <thilo.cestonaro@ts.fujitsu.com>
*/
#include <linux/err.h>
-#include <linux/fs.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/jiffies.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/uaccess.h>
#include <linux/watchdog.h>
#define FTS_DEVICE_ID_REG 0x0000
@@ -48,6 +45,8 @@
#define FTS_NO_TEMP_SENSORS 0x10
#define FTS_NO_VOLT_SENSORS 0x04
+#define FTS_FAN_SOURCE_INVALID 0xff
+
static const unsigned short normal_i2c[] = { 0x73, I2C_CLIENT_END };
static const struct i2c_device_id fts_id[] = {
@@ -187,7 +186,7 @@ static int fts_update_device(struct fts_data *data)
data->fan_source[i] = err;
} else {
data->fan_input[i] = 0;
- data->fan_source[i] = 0;
+ data->fan_source[i] = FTS_FAN_SOURCE_INVALID;
}
}
@@ -336,373 +335,243 @@ static int fts_watchdog_init(struct fts_data *data)
/* max timeout 255 minutes. */
data->wdd.max_hw_heartbeat_ms = 0xFF * 60 * MSEC_PER_SEC;
- return watchdog_register_device(&data->wdd);
-}
-
-/*****************************************************************************/
-/* SysFS handler functions */
-/*****************************************************************************/
-static ssize_t in_value_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct fts_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(devattr)->index;
- int err;
-
- err = fts_update_device(data);
- if (err < 0)
- return err;
-
- return sprintf(buf, "%u\n", data->volt[index]);
+ return devm_watchdog_register_device(&data->client->dev, &data->wdd);
}
-static ssize_t temp_value_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static umode_t fts_is_visible(const void *devdata, enum hwmon_sensor_types type, u32 attr,
+ int channel)
{
- struct fts_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(devattr)->index;
- int err;
-
- err = fts_update_device(data);
- if (err < 0)
- return err;
-
- return sprintf(buf, "%u\n", data->temp_input[index]);
-}
-
-static ssize_t temp_fault_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct fts_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(devattr)->index;
- int err;
-
- err = fts_update_device(data);
- if (err < 0)
- return err;
-
- /* 00h Temperature = Sensor Error */
- return sprintf(buf, "%d\n", data->temp_input[index] == 0);
-}
-
-static ssize_t temp_alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct fts_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(devattr)->index;
- int err;
-
- err = fts_update_device(data);
- if (err < 0)
- return err;
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_fault:
+ return 0444;
+ case hwmon_temp_alarm:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ case hwmon_fan_fault:
+ return 0444;
+ case hwmon_fan_alarm:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ case hwmon_in:
+ return 0444;
+ default:
+ break;
+ }
- return sprintf(buf, "%u\n", !!(data->temp_alarm & BIT(index)));
+ return 0;
}
-static ssize_t
-temp_alarm_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static int fts_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long *val)
{
struct fts_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(devattr)->index;
- long ret;
+ int ret = fts_update_device(data);
- ret = fts_update_device(data);
if (ret < 0)
return ret;
- if (kstrtoul(buf, 10, &ret) || ret != 0)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- ret = fts_read_byte(data->client, FTS_REG_TEMP_CONTROL(index));
- if (ret < 0)
- goto error;
-
- ret = fts_write_byte(data->client, FTS_REG_TEMP_CONTROL(index),
- ret | 0x1);
- if (ret < 0)
- goto error;
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = (data->temp_input[channel] - 64) * 1000;
- data->valid = false;
- ret = count;
-error:
- mutex_unlock(&data->update_lock);
- return ret;
-}
-
-static ssize_t fan_value_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct fts_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(devattr)->index;
- int err;
-
- err = fts_update_device(data);
- if (err < 0)
- return err;
+ return 0;
+ case hwmon_temp_alarm:
+ *val = !!(data->temp_alarm & BIT(channel));
- return sprintf(buf, "%u\n", data->fan_input[index]);
-}
+ return 0;
+ case hwmon_temp_fault:
+ /* 00h Temperature = Sensor Error */;
+ *val = (data->temp_input[channel] == 0);
-static ssize_t fan_source_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct fts_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(devattr)->index;
- int err;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = data->fan_input[channel] * 60;
- err = fts_update_device(data);
- if (err < 0)
- return err;
+ return 0;
+ case hwmon_fan_alarm:
+ *val = !!(data->fan_alarm & BIT(channel));
- return sprintf(buf, "%u\n", data->fan_source[index]);
-}
+ return 0;
+ case hwmon_fan_fault:
+ *val = !(data->fan_present & BIT(channel));
-static ssize_t fan_alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct fts_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(devattr)->index;
- int err;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_auto_channels_temp:
+ if (data->fan_source[channel] == FTS_FAN_SOURCE_INVALID)
+ *val = 0;
+ else
+ *val = BIT(data->fan_source[channel]);
+
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_input:
+ *val = DIV_ROUND_CLOSEST(data->volt[channel] * 3300, 255);
- err = fts_update_device(data);
- if (err < 0)
- return err;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
- return sprintf(buf, "%d\n", !!(data->fan_alarm & BIT(index)));
+ return -EOPNOTSUPP;
}
-static ssize_t
-fan_alarm_store(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
+static int fts_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long val)
{
struct fts_data *data = dev_get_drvdata(dev);
- int index = to_sensor_dev_attr(devattr)->index;
- long ret;
+ int ret = fts_update_device(data);
- ret = fts_update_device(data);
if (ret < 0)
return ret;
- if (kstrtoul(buf, 10, &ret) || ret != 0)
- return -EINVAL;
-
- mutex_lock(&data->update_lock);
- ret = fts_read_byte(data->client, FTS_REG_FAN_CONTROL(index));
- if (ret < 0)
- goto error;
-
- ret = fts_write_byte(data->client, FTS_REG_FAN_CONTROL(index),
- ret | 0x1);
- if (ret < 0)
- goto error;
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_alarm:
+ if (val)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ ret = fts_read_byte(data->client, FTS_REG_TEMP_CONTROL(channel));
+ if (ret >= 0)
+ ret = fts_write_byte(data->client, FTS_REG_TEMP_CONTROL(channel),
+ ret | 0x1);
+ if (ret >= 0)
+ data->valid = false;
+
+ mutex_unlock(&data->update_lock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_alarm:
+ if (val)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ ret = fts_read_byte(data->client, FTS_REG_FAN_CONTROL(channel));
+ if (ret >= 0)
+ ret = fts_write_byte(data->client, FTS_REG_FAN_CONTROL(channel),
+ ret | 0x1);
+ if (ret >= 0)
+ data->valid = false;
+
+ mutex_unlock(&data->update_lock);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
- data->valid = false;
- ret = count;
-error:
- mutex_unlock(&data->update_lock);
- return ret;
+ return -EOPNOTSUPP;
}
-/*****************************************************************************/
-/* SysFS structs */
-/*****************************************************************************/
-
-/* Temperature sensors */
-static SENSOR_DEVICE_ATTR_RO(temp1_input, temp_value, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_input, temp_value, 1);
-static SENSOR_DEVICE_ATTR_RO(temp3_input, temp_value, 2);
-static SENSOR_DEVICE_ATTR_RO(temp4_input, temp_value, 3);
-static SENSOR_DEVICE_ATTR_RO(temp5_input, temp_value, 4);
-static SENSOR_DEVICE_ATTR_RO(temp6_input, temp_value, 5);
-static SENSOR_DEVICE_ATTR_RO(temp7_input, temp_value, 6);
-static SENSOR_DEVICE_ATTR_RO(temp8_input, temp_value, 7);
-static SENSOR_DEVICE_ATTR_RO(temp9_input, temp_value, 8);
-static SENSOR_DEVICE_ATTR_RO(temp10_input, temp_value, 9);
-static SENSOR_DEVICE_ATTR_RO(temp11_input, temp_value, 10);
-static SENSOR_DEVICE_ATTR_RO(temp12_input, temp_value, 11);
-static SENSOR_DEVICE_ATTR_RO(temp13_input, temp_value, 12);
-static SENSOR_DEVICE_ATTR_RO(temp14_input, temp_value, 13);
-static SENSOR_DEVICE_ATTR_RO(temp15_input, temp_value, 14);
-static SENSOR_DEVICE_ATTR_RO(temp16_input, temp_value, 15);
-
-static SENSOR_DEVICE_ATTR_RO(temp1_fault, temp_fault, 0);
-static SENSOR_DEVICE_ATTR_RO(temp2_fault, temp_fault, 1);
-static SENSOR_DEVICE_ATTR_RO(temp3_fault, temp_fault, 2);
-static SENSOR_DEVICE_ATTR_RO(temp4_fault, temp_fault, 3);
-static SENSOR_DEVICE_ATTR_RO(temp5_fault, temp_fault, 4);
-static SENSOR_DEVICE_ATTR_RO(temp6_fault, temp_fault, 5);
-static SENSOR_DEVICE_ATTR_RO(temp7_fault, temp_fault, 6);
-static SENSOR_DEVICE_ATTR_RO(temp8_fault, temp_fault, 7);
-static SENSOR_DEVICE_ATTR_RO(temp9_fault, temp_fault, 8);
-static SENSOR_DEVICE_ATTR_RO(temp10_fault, temp_fault, 9);
-static SENSOR_DEVICE_ATTR_RO(temp11_fault, temp_fault, 10);
-static SENSOR_DEVICE_ATTR_RO(temp12_fault, temp_fault, 11);
-static SENSOR_DEVICE_ATTR_RO(temp13_fault, temp_fault, 12);
-static SENSOR_DEVICE_ATTR_RO(temp14_fault, temp_fault, 13);
-static SENSOR_DEVICE_ATTR_RO(temp15_fault, temp_fault, 14);
-static SENSOR_DEVICE_ATTR_RO(temp16_fault, temp_fault, 15);
-
-static SENSOR_DEVICE_ATTR_RW(temp1_alarm, temp_alarm, 0);
-static SENSOR_DEVICE_ATTR_RW(temp2_alarm, temp_alarm, 1);
-static SENSOR_DEVICE_ATTR_RW(temp3_alarm, temp_alarm, 2);
-static SENSOR_DEVICE_ATTR_RW(temp4_alarm, temp_alarm, 3);
-static SENSOR_DEVICE_ATTR_RW(temp5_alarm, temp_alarm, 4);
-static SENSOR_DEVICE_ATTR_RW(temp6_alarm, temp_alarm, 5);
-static SENSOR_DEVICE_ATTR_RW(temp7_alarm, temp_alarm, 6);
-static SENSOR_DEVICE_ATTR_RW(temp8_alarm, temp_alarm, 7);
-static SENSOR_DEVICE_ATTR_RW(temp9_alarm, temp_alarm, 8);
-static SENSOR_DEVICE_ATTR_RW(temp10_alarm, temp_alarm, 9);
-static SENSOR_DEVICE_ATTR_RW(temp11_alarm, temp_alarm, 10);
-static SENSOR_DEVICE_ATTR_RW(temp12_alarm, temp_alarm, 11);
-static SENSOR_DEVICE_ATTR_RW(temp13_alarm, temp_alarm, 12);
-static SENSOR_DEVICE_ATTR_RW(temp14_alarm, temp_alarm, 13);
-static SENSOR_DEVICE_ATTR_RW(temp15_alarm, temp_alarm, 14);
-static SENSOR_DEVICE_ATTR_RW(temp16_alarm, temp_alarm, 15);
-
-static struct attribute *fts_temp_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp4_input.dev_attr.attr,
- &sensor_dev_attr_temp5_input.dev_attr.attr,
- &sensor_dev_attr_temp6_input.dev_attr.attr,
- &sensor_dev_attr_temp7_input.dev_attr.attr,
- &sensor_dev_attr_temp8_input.dev_attr.attr,
- &sensor_dev_attr_temp9_input.dev_attr.attr,
- &sensor_dev_attr_temp10_input.dev_attr.attr,
- &sensor_dev_attr_temp11_input.dev_attr.attr,
- &sensor_dev_attr_temp12_input.dev_attr.attr,
- &sensor_dev_attr_temp13_input.dev_attr.attr,
- &sensor_dev_attr_temp14_input.dev_attr.attr,
- &sensor_dev_attr_temp15_input.dev_attr.attr,
- &sensor_dev_attr_temp16_input.dev_attr.attr,
-
- &sensor_dev_attr_temp1_fault.dev_attr.attr,
- &sensor_dev_attr_temp2_fault.dev_attr.attr,
- &sensor_dev_attr_temp3_fault.dev_attr.attr,
- &sensor_dev_attr_temp4_fault.dev_attr.attr,
- &sensor_dev_attr_temp5_fault.dev_attr.attr,
- &sensor_dev_attr_temp6_fault.dev_attr.attr,
- &sensor_dev_attr_temp7_fault.dev_attr.attr,
- &sensor_dev_attr_temp8_fault.dev_attr.attr,
- &sensor_dev_attr_temp9_fault.dev_attr.attr,
- &sensor_dev_attr_temp10_fault.dev_attr.attr,
- &sensor_dev_attr_temp11_fault.dev_attr.attr,
- &sensor_dev_attr_temp12_fault.dev_attr.attr,
- &sensor_dev_attr_temp13_fault.dev_attr.attr,
- &sensor_dev_attr_temp14_fault.dev_attr.attr,
- &sensor_dev_attr_temp15_fault.dev_attr.attr,
- &sensor_dev_attr_temp16_fault.dev_attr.attr,
-
- &sensor_dev_attr_temp1_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_alarm.dev_attr.attr,
- &sensor_dev_attr_temp4_alarm.dev_attr.attr,
- &sensor_dev_attr_temp5_alarm.dev_attr.attr,
- &sensor_dev_attr_temp6_alarm.dev_attr.attr,
- &sensor_dev_attr_temp7_alarm.dev_attr.attr,
- &sensor_dev_attr_temp8_alarm.dev_attr.attr,
- &sensor_dev_attr_temp9_alarm.dev_attr.attr,
- &sensor_dev_attr_temp10_alarm.dev_attr.attr,
- &sensor_dev_attr_temp11_alarm.dev_attr.attr,
- &sensor_dev_attr_temp12_alarm.dev_attr.attr,
- &sensor_dev_attr_temp13_alarm.dev_attr.attr,
- &sensor_dev_attr_temp14_alarm.dev_attr.attr,
- &sensor_dev_attr_temp15_alarm.dev_attr.attr,
- &sensor_dev_attr_temp16_alarm.dev_attr.attr,
- NULL
-};
-
-/* Fans */
-static SENSOR_DEVICE_ATTR_RO(fan1_input, fan_value, 0);
-static SENSOR_DEVICE_ATTR_RO(fan2_input, fan_value, 1);
-static SENSOR_DEVICE_ATTR_RO(fan3_input, fan_value, 2);
-static SENSOR_DEVICE_ATTR_RO(fan4_input, fan_value, 3);
-static SENSOR_DEVICE_ATTR_RO(fan5_input, fan_value, 4);
-static SENSOR_DEVICE_ATTR_RO(fan6_input, fan_value, 5);
-static SENSOR_DEVICE_ATTR_RO(fan7_input, fan_value, 6);
-static SENSOR_DEVICE_ATTR_RO(fan8_input, fan_value, 7);
-
-static SENSOR_DEVICE_ATTR_RO(fan1_source, fan_source, 0);
-static SENSOR_DEVICE_ATTR_RO(fan2_source, fan_source, 1);
-static SENSOR_DEVICE_ATTR_RO(fan3_source, fan_source, 2);
-static SENSOR_DEVICE_ATTR_RO(fan4_source, fan_source, 3);
-static SENSOR_DEVICE_ATTR_RO(fan5_source, fan_source, 4);
-static SENSOR_DEVICE_ATTR_RO(fan6_source, fan_source, 5);
-static SENSOR_DEVICE_ATTR_RO(fan7_source, fan_source, 6);
-static SENSOR_DEVICE_ATTR_RO(fan8_source, fan_source, 7);
-
-static SENSOR_DEVICE_ATTR_RW(fan1_alarm, fan_alarm, 0);
-static SENSOR_DEVICE_ATTR_RW(fan2_alarm, fan_alarm, 1);
-static SENSOR_DEVICE_ATTR_RW(fan3_alarm, fan_alarm, 2);
-static SENSOR_DEVICE_ATTR_RW(fan4_alarm, fan_alarm, 3);
-static SENSOR_DEVICE_ATTR_RW(fan5_alarm, fan_alarm, 4);
-static SENSOR_DEVICE_ATTR_RW(fan6_alarm, fan_alarm, 5);
-static SENSOR_DEVICE_ATTR_RW(fan7_alarm, fan_alarm, 6);
-static SENSOR_DEVICE_ATTR_RW(fan8_alarm, fan_alarm, 7);
-
-static struct attribute *fts_fan_attrs[] = {
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan3_input.dev_attr.attr,
- &sensor_dev_attr_fan4_input.dev_attr.attr,
- &sensor_dev_attr_fan5_input.dev_attr.attr,
- &sensor_dev_attr_fan6_input.dev_attr.attr,
- &sensor_dev_attr_fan7_input.dev_attr.attr,
- &sensor_dev_attr_fan8_input.dev_attr.attr,
-
- &sensor_dev_attr_fan1_source.dev_attr.attr,
- &sensor_dev_attr_fan2_source.dev_attr.attr,
- &sensor_dev_attr_fan3_source.dev_attr.attr,
- &sensor_dev_attr_fan4_source.dev_attr.attr,
- &sensor_dev_attr_fan5_source.dev_attr.attr,
- &sensor_dev_attr_fan6_source.dev_attr.attr,
- &sensor_dev_attr_fan7_source.dev_attr.attr,
- &sensor_dev_attr_fan8_source.dev_attr.attr,
-
- &sensor_dev_attr_fan1_alarm.dev_attr.attr,
- &sensor_dev_attr_fan2_alarm.dev_attr.attr,
- &sensor_dev_attr_fan3_alarm.dev_attr.attr,
- &sensor_dev_attr_fan4_alarm.dev_attr.attr,
- &sensor_dev_attr_fan5_alarm.dev_attr.attr,
- &sensor_dev_attr_fan6_alarm.dev_attr.attr,
- &sensor_dev_attr_fan7_alarm.dev_attr.attr,
- &sensor_dev_attr_fan8_alarm.dev_attr.attr,
- NULL
+static const struct hwmon_ops fts_ops = {
+ .is_visible = fts_is_visible,
+ .read = fts_read,
+ .write = fts_write,
};
-/* Voltages */
-static SENSOR_DEVICE_ATTR_RO(in1_input, in_value, 0);
-static SENSOR_DEVICE_ATTR_RO(in2_input, in_value, 1);
-static SENSOR_DEVICE_ATTR_RO(in3_input, in_value, 2);
-static SENSOR_DEVICE_ATTR_RO(in4_input, in_value, 3);
-static struct attribute *fts_voltage_attrs[] = {
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr,
+static const struct hwmon_channel_info *fts_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT,
+ HWMON_T_INPUT | HWMON_T_ALARM | HWMON_T_FAULT
+ ),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_ALARM | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_ALARM | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_ALARM | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_ALARM | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_ALARM | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_ALARM | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_ALARM | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_ALARM | HWMON_F_FAULT
+ ),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP,
+ HWMON_PWM_AUTO_CHANNELS_TEMP
+ ),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT,
+ HWMON_I_INPUT
+ ),
NULL
};
-static const struct attribute_group fts_voltage_attr_group = {
- .attrs = fts_voltage_attrs
-};
-
-static const struct attribute_group fts_temp_attr_group = {
- .attrs = fts_temp_attrs
-};
-
-static const struct attribute_group fts_fan_attr_group = {
- .attrs = fts_fan_attrs
-};
-
-static const struct attribute_group *fts_attr_groups[] = {
- &fts_voltage_attr_group,
- &fts_temp_attr_group,
- &fts_fan_attr_group,
- NULL
+static const struct hwmon_chip_info fts_chip_info = {
+ .ops = &fts_ops,
+ .info = fts_info,
};
/*****************************************************************************/
@@ -744,13 +613,6 @@ static int fts_detect(struct i2c_client *client,
return 0;
}
-static void fts_remove(struct i2c_client *client)
-{
- struct fts_data *data = dev_get_drvdata(&client->dev);
-
- watchdog_unregister_device(&data->wdd);
-}
-
static int fts_probe(struct i2c_client *client)
{
u8 revision;
@@ -793,10 +655,8 @@ static int fts_probe(struct i2c_client *client)
return err;
revision = err;
- hwmon_dev = devm_hwmon_device_register_with_groups(&client->dev,
- "ftsteutates",
- data,
- fts_attr_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(&client->dev, "ftsteutates", data,
+ &fts_chip_info, NULL);
if (IS_ERR(hwmon_dev))
return PTR_ERR(hwmon_dev);
@@ -819,7 +679,6 @@ static struct i2c_driver fts_driver = {
},
.id_table = fts_id,
.probe_new = fts_probe,
- .remove = fts_remove,
.detect = fts_detect,
.address_list = normal_i2c,
};
diff --git a/drivers/hwmon/gxp-fan-ctrl.c b/drivers/hwmon/gxp-fan-ctrl.c
new file mode 100644
index 000000000000..0014b8b0fd41
--- /dev/null
+++ b/drivers/hwmon/gxp-fan-ctrl.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#define OFS_FAN_INST 0 /* Is 0 because plreg base will be set at INST */
+#define OFS_FAN_FAIL 2 /* Is 2 bytes after base */
+#define OFS_SEVSTAT 0 /* Is 0 because fn2 base will be set at SEVSTAT */
+#define POWER_BIT 24
+
+struct gxp_fan_ctrl_drvdata {
+ void __iomem *base;
+ void __iomem *plreg;
+ void __iomem *fn2;
+};
+
+static bool fan_installed(struct device *dev, int fan)
+{
+ struct gxp_fan_ctrl_drvdata *drvdata = dev_get_drvdata(dev);
+ u8 val;
+
+ val = readb(drvdata->plreg + OFS_FAN_INST);
+
+ return !!(val & BIT(fan));
+}
+
+static long fan_failed(struct device *dev, int fan)
+{
+ struct gxp_fan_ctrl_drvdata *drvdata = dev_get_drvdata(dev);
+ u8 val;
+
+ val = readb(drvdata->plreg + OFS_FAN_FAIL);
+
+ return !!(val & BIT(fan));
+}
+
+static long fan_enabled(struct device *dev, int fan)
+{
+ struct gxp_fan_ctrl_drvdata *drvdata = dev_get_drvdata(dev);
+ u32 val;
+
+ /*
+ * Check the power status as if the platform is off the value
+ * reported for the PWM will be incorrect. Report fan as
+ * disabled.
+ */
+ val = readl(drvdata->fn2 + OFS_SEVSTAT);
+
+ return !!((val & BIT(POWER_BIT)) && fan_installed(dev, fan));
+}
+
+static int gxp_pwm_write(struct device *dev, u32 attr, int channel, long val)
+{
+ struct gxp_fan_ctrl_drvdata *drvdata = dev_get_drvdata(dev);
+
+ switch (attr) {
+ case hwmon_pwm_input:
+ if (val > 255 || val < 0)
+ return -EINVAL;
+ writeb(val, drvdata->base + channel);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int gxp_fan_ctrl_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return gxp_pwm_write(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int gxp_fan_read(struct device *dev, u32 attr, int channel, long *val)
+{
+ switch (attr) {
+ case hwmon_fan_enable:
+ *val = fan_enabled(dev, channel);
+ return 0;
+ case hwmon_fan_fault:
+ *val = fan_failed(dev, channel);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int gxp_pwm_read(struct device *dev, u32 attr, int channel, long *val)
+{
+ struct gxp_fan_ctrl_drvdata *drvdata = dev_get_drvdata(dev);
+ u32 reg;
+
+ /*
+ * Check the power status of the platform. If the platform is off
+ * the value reported for the PWM will be incorrect. In this case
+ * report a PWM of zero.
+ */
+
+ reg = readl(drvdata->fn2 + OFS_SEVSTAT);
+
+ if (reg & BIT(POWER_BIT))
+ *val = fan_installed(dev, channel) ? readb(drvdata->base + channel) : 0;
+ else
+ *val = 0;
+
+ return 0;
+}
+
+static int gxp_fan_ctrl_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ switch (type) {
+ case hwmon_fan:
+ return gxp_fan_read(dev, attr, channel, val);
+ case hwmon_pwm:
+ return gxp_pwm_read(dev, attr, channel, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static umode_t gxp_fan_ctrl_is_visible(const void *_data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ umode_t mode = 0;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_enable:
+ case hwmon_fan_fault:
+ mode = 0444;
+ break;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ mode = 0644;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return mode;
+}
+
+static const struct hwmon_ops gxp_fan_ctrl_ops = {
+ .is_visible = gxp_fan_ctrl_is_visible,
+ .read = gxp_fan_ctrl_read,
+ .write = gxp_fan_ctrl_write,
+};
+
+static const struct hwmon_channel_info *gxp_fan_ctrl_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_FAULT | HWMON_F_ENABLE,
+ HWMON_F_FAULT | HWMON_F_ENABLE),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info gxp_fan_ctrl_chip_info = {
+ .ops = &gxp_fan_ctrl_ops,
+ .info = gxp_fan_ctrl_info,
+
+};
+
+static int gxp_fan_ctrl_probe(struct platform_device *pdev)
+{
+ struct gxp_fan_ctrl_drvdata *drvdata;
+ struct device *dev = &pdev->dev;
+ struct device *hwmon_dev;
+
+ drvdata = devm_kzalloc(dev, sizeof(struct gxp_fan_ctrl_drvdata),
+ GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(drvdata->base))
+ return dev_err_probe(dev, PTR_ERR(drvdata->base),
+ "failed to map base\n");
+
+ drvdata->plreg = devm_platform_ioremap_resource_byname(pdev,
+ "pl");
+ if (IS_ERR(drvdata->plreg))
+ return dev_err_probe(dev, PTR_ERR(drvdata->plreg),
+ "failed to map plreg\n");
+
+ drvdata->fn2 = devm_platform_ioremap_resource_byname(pdev,
+ "fn2");
+ if (IS_ERR(drvdata->fn2))
+ return dev_err_probe(dev, PTR_ERR(drvdata->fn2),
+ "failed to map fn2\n");
+
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "hpe_gxp_fan_ctrl",
+ drvdata,
+ &gxp_fan_ctrl_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct of_device_id gxp_fan_ctrl_of_match[] = {
+ { .compatible = "hpe,gxp-fan-ctrl", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gxp_fan_ctrl_of_match);
+
+static struct platform_driver gxp_fan_ctrl_driver = {
+ .probe = gxp_fan_ctrl_probe,
+ .driver = {
+ .name = "gxp-fan-ctrl",
+ .of_match_table = gxp_fan_ctrl_of_match,
+ },
+};
+module_platform_driver(gxp_fan_ctrl_driver);
+
+MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>");
+MODULE_DESCRIPTION("HPE GXP fan controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index d9394e19fea8..3a7582824f94 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -150,7 +150,7 @@ out:
}
/**
- * hih6130_show_temperature() - show temperature measurement value in sysfs
+ * hih6130_temperature_show() - show temperature measurement value in sysfs
* @dev: device
* @attr: device attribute
* @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
@@ -172,7 +172,7 @@ static ssize_t hih6130_temperature_show(struct device *dev,
}
/**
- * hih6130_show_humidity() - show humidity measurement value in sysfs
+ * hih6130_humidity_show() - show humidity measurement value in sysfs
* @dev: device
* @attr: device attribute
* @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index 1837cccd993c..db066b368918 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -546,7 +546,7 @@ static void ibmpex_bmc_gone(int iface)
static void ibmpex_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
{
- struct ibmpex_bmc_data *data = (struct ibmpex_bmc_data *)user_msg_data;
+ struct ibmpex_bmc_data *data = user_msg_data;
if (msg->msgid != data->tx_msgid) {
dev_err(data->bmc_device,
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index 3aa40893fc09..4c8a80847891 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -77,9 +77,11 @@ static int iio_hwmon_probe(struct platform_device *pdev)
channels = devm_iio_channel_get_all(dev);
if (IS_ERR(channels)) {
- if (PTR_ERR(channels) == -ENODEV)
- return -EPROBE_DEFER;
- return PTR_ERR(channels);
+ ret = PTR_ERR(channels);
+ if (ret == -ENODEV)
+ ret = -EPROBE_DEFER;
+ return dev_err_probe(dev, ret,
+ "Failed to get channels\n");
}
st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
diff --git a/drivers/hwmon/intel-m10-bmc-hwmon.c b/drivers/hwmon/intel-m10-bmc-hwmon.c
index 6e82f7200d1c..2f0323c14bab 100644
--- a/drivers/hwmon/intel-m10-bmc-hwmon.c
+++ b/drivers/hwmon/intel-m10-bmc-hwmon.c
@@ -340,6 +340,231 @@ static const struct m10bmc_hwmon_board_data n5010bmc_hwmon_bdata = {
.hinfo = n5010bmc_hinfo,
};
+static const struct m10bmc_sdata n6000bmc_temp_tbl[] = {
+ { 0x444, 0x448, 0x44c, 0x0, 0x0, 500, "FPGA E-TILE Temperature #1" },
+ { 0x450, 0x454, 0x458, 0x0, 0x0, 500, "FPGA E-TILE Temperature #2" },
+ { 0x45c, 0x460, 0x464, 0x0, 0x0, 500, "FPGA E-TILE Temperature #3" },
+ { 0x468, 0x46c, 0x470, 0x0, 0x0, 500, "FPGA E-TILE Temperature #4" },
+ { 0x474, 0x478, 0x47c, 0x0, 0x0, 500, "FPGA P-TILE Temperature" },
+ { 0x484, 0x488, 0x48c, 0x0, 0x0, 500, "FPGA FABRIC Digital Temperature #1" },
+ { 0x490, 0x494, 0x498, 0x0, 0x0, 500, "FPGA FABRIC Digital Temperature #2" },
+ { 0x49c, 0x4a0, 0x4a4, 0x0, 0x0, 500, "FPGA FABRIC Digital Temperature #3" },
+ { 0x4a8, 0x4ac, 0x4b0, 0x0, 0x0, 500, "FPGA FABRIC Digital Temperature #4" },
+ { 0x4b4, 0x4b8, 0x4bc, 0x0, 0x0, 500, "FPGA FABRIC Digital Temperature #5" },
+ { 0x4c0, 0x4c4, 0x4c8, 0x0, 0x0, 500, "FPGA FABRIC Remote Digital Temperature #1" },
+ { 0x4cc, 0x4d0, 0x4d4, 0x0, 0x0, 500, "FPGA FABRIC Remote Digital Temperature #2" },
+ { 0x4d8, 0x4dc, 0x4e0, 0x0, 0x0, 500, "FPGA FABRIC Remote Digital Temperature #3" },
+ { 0x4e4, 0x4e8, 0x4ec, 0x0, 0x0, 500, "FPGA FABRIC Remote Digital Temperature #4" },
+ { 0x4f0, 0x4f4, 0x4f8, 0x52c, 0x0, 500, "Board Top Near FPGA Temperature" },
+ { 0x4fc, 0x500, 0x504, 0x52c, 0x0, 500, "Board Bottom Near CVL Temperature" },
+ { 0x508, 0x50c, 0x510, 0x52c, 0x0, 500, "Board Top East Near VRs Temperature" },
+ { 0x514, 0x518, 0x51c, 0x52c, 0x0, 500, "Columbiaville Die Temperature" },
+ { 0x520, 0x524, 0x528, 0x52c, 0x0, 500, "Board Rear Side Temperature" },
+ { 0x530, 0x534, 0x538, 0x52c, 0x0, 500, "Board Front Side Temperature" },
+ { 0x53c, 0x540, 0x544, 0x0, 0x0, 500, "QSFP1 Case Temperature" },
+ { 0x548, 0x54c, 0x550, 0x0, 0x0, 500, "QSFP2 Case Temperature" },
+ { 0x554, 0x0, 0x0, 0x0, 0x0, 500, "FPGA Core Voltage Phase 0 VR Temperature" },
+ { 0x560, 0x0, 0x0, 0x0, 0x0, 500, "FPGA Core Voltage Phase 1 VR Temperature" },
+ { 0x56c, 0x0, 0x0, 0x0, 0x0, 500, "FPGA Core Voltage Phase 2 VR Temperature" },
+ { 0x578, 0x0, 0x0, 0x0, 0x0, 500, "FPGA Core Voltage VR Controller Temperature" },
+ { 0x584, 0x0, 0x0, 0x0, 0x0, 500, "FPGA VCCH VR Temperature" },
+ { 0x590, 0x0, 0x0, 0x0, 0x0, 500, "FPGA VCC_1V2 VR Temperature" },
+ { 0x59c, 0x0, 0x0, 0x0, 0x0, 500, "FPGA VCCH, VCC_1V2 VR Controller Temperature" },
+ { 0x5a8, 0x0, 0x0, 0x0, 0x0, 500, "3V3 VR Temperature" },
+ { 0x5b4, 0x0, 0x0, 0x0, 0x0, 500, "CVL Core Voltage VR Temperature" },
+ { 0x5c4, 0x5c8, 0x5cc, 0x5c0, 0x0, 500, "FPGA P-Tile Temperature [Remote]" },
+ { 0x5d0, 0x5d4, 0x5d8, 0x5c0, 0x0, 500, "FPGA E-Tile Temperature [Remote]" },
+ { 0x5dc, 0x5e0, 0x5e4, 0x5c0, 0x0, 500, "FPGA SDM Temperature [Remote]" },
+ { 0x5e8, 0x5ec, 0x5f0, 0x5c0, 0x0, 500, "FPGA Corner Temperature [Remote]" },
+};
+
+static const struct m10bmc_sdata n6000bmc_in_tbl[] = {
+ { 0x5f4, 0x0, 0x0, 0x0, 0x0, 1, "Inlet 12V PCIe Rail Voltage" },
+ { 0x60c, 0x0, 0x0, 0x0, 0x0, 1, "Inlet 12V Aux Rail Voltage" },
+ { 0x624, 0x0, 0x0, 0x0, 0x0, 1, "Inlet 3V3 PCIe Rail Voltage" },
+ { 0x63c, 0x0, 0x0, 0x0, 0x0, 1, "FPGA Core Voltage Rail Voltage" },
+ { 0x644, 0x0, 0x0, 0x0, 0x0, 1, "FPGA VCCH Rail Voltage" },
+ { 0x64c, 0x0, 0x0, 0x0, 0x0, 1, "FPGA VCC_1V2 Rail Voltage" },
+ { 0x654, 0x0, 0x0, 0x0, 0x0, 1, "FPGA VCCH_GXER_1V1, VCCA_1V8 Voltage" },
+ { 0x664, 0x0, 0x0, 0x0, 0x0, 1, "FPGA VCCIO_1V2 Voltage" },
+ { 0x674, 0x0, 0x0, 0x0, 0x0, 1, "CVL Non Core Rails Inlet Voltage" },
+ { 0x684, 0x0, 0x0, 0x0, 0x0, 1, "MAX10 & Board CLK PWR 3V3 Inlet Voltage" },
+ { 0x694, 0x0, 0x0, 0x0, 0x0, 1, "CVL Core Voltage Rail Voltage" },
+ { 0x6ac, 0x0, 0x0, 0x0, 0x0, 1, "Board 3V3 VR Voltage" },
+ { 0x6b4, 0x0, 0x0, 0x0, 0x0, 1, "QSFP 3V3 Rail Voltage" },
+ { 0x6c4, 0x0, 0x0, 0x0, 0x0, 1, "QSFP (Primary) Supply Rail Voltage" },
+ { 0x6c8, 0x0, 0x0, 0x0, 0x0, 1, "QSFP (Secondary) Supply Rail Voltage" },
+ { 0x6cc, 0x0, 0x0, 0x0, 0x0, 1, "VCCCLK_GXER_2V5 Voltage" },
+ { 0x6d0, 0x0, 0x0, 0x0, 0x0, 1, "AVDDH_1V1_CVL Voltage" },
+ { 0x6d4, 0x0, 0x0, 0x0, 0x0, 1, "VDDH_1V8_CVL Voltage" },
+ { 0x6d8, 0x0, 0x0, 0x0, 0x0, 1, "VCCA_PLL Voltage" },
+ { 0x6e0, 0x0, 0x0, 0x0, 0x0, 1, "VCCRT_GXER_0V9 Voltage" },
+ { 0x6e8, 0x0, 0x0, 0x0, 0x0, 1, "VCCRT_GXPL_0V9 Voltage" },
+ { 0x6f0, 0x0, 0x0, 0x0, 0x0, 1, "VCCH_GXPL_1V8 Voltage" },
+ { 0x6f4, 0x0, 0x0, 0x0, 0x0, 1, "VCCPT_1V8 Voltage" },
+ { 0x6fc, 0x0, 0x0, 0x0, 0x0, 1, "VCC_3V3_M10 Voltage" },
+ { 0x700, 0x0, 0x0, 0x0, 0x0, 1, "VCC_1V8_M10 Voltage" },
+ { 0x704, 0x0, 0x0, 0x0, 0x0, 1, "VCC_1V2_EMIF1_2_3 Voltage" },
+ { 0x70c, 0x0, 0x0, 0x0, 0x0, 1, "VCC_1V2_EMIF4_5 Voltage" },
+ { 0x714, 0x0, 0x0, 0x0, 0x0, 1, "VCCA_1V8 Voltage" },
+ { 0x718, 0x0, 0x0, 0x0, 0x0, 1, "VCCH_GXER_1V1 Voltage" },
+ { 0x71c, 0x0, 0x0, 0x0, 0x0, 1, "AVDD_ETH_0V9_CVL Voltage" },
+ { 0x720, 0x0, 0x0, 0x0, 0x0, 1, "AVDD_PCIE_0V9_CVL Voltage" },
+};
+
+static const struct m10bmc_sdata n6000bmc_curr_tbl[] = {
+ { 0x600, 0x604, 0x608, 0x0, 0x0, 1, "Inlet 12V PCIe Rail Current" },
+ { 0x618, 0x61c, 0x620, 0x0, 0x0, 1, "Inlet 12V Aux Rail Current" },
+ { 0x630, 0x634, 0x638, 0x0, 0x0, 1, "Inlet 3V3 PCIe Rail Current" },
+ { 0x640, 0x0, 0x0, 0x0, 0x0, 1, "FPGA Core Voltage Rail Current" },
+ { 0x648, 0x0, 0x0, 0x0, 0x0, 1, "FPGA VCCH Rail Current" },
+ { 0x650, 0x0, 0x0, 0x0, 0x0, 1, "FPGA VCC_1V2 Rail Current" },
+ { 0x658, 0x65c, 0x660, 0x0, 0x0, 1, "FPGA VCCH_GXER_1V1, VCCA_1V8 Current" },
+ { 0x668, 0x66c, 0x670, 0x0, 0x0, 1, "FPGA VCCIO_1V2 Current" },
+ { 0x678, 0x67c, 0x680, 0x0, 0x0, 1, "CVL Non Core Rails Inlet Current" },
+ { 0x688, 0x68c, 0x690, 0x0, 0x0, 1, "MAX10 & Board CLK PWR 3V3 Inlet Current" },
+ { 0x698, 0x0, 0x0, 0x0, 0x0, 1, "CVL Core Voltage Rail Current" },
+ { 0x6b0, 0x0, 0x0, 0x0, 0x0, 1, "Board 3V3 VR Current" },
+ { 0x6b8, 0x6bc, 0x6c0, 0x0, 0x0, 1, "QSFP 3V3 Rail Current" },
+};
+
+static const struct m10bmc_sdata n6000bmc_power_tbl[] = {
+ { 0x724, 0x0, 0x0, 0x0, 0x0, 1, "Board Power" },
+};
+
+static const struct hwmon_channel_info *n6000bmc_hinfo[] = {
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_CRIT |
+ HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST |
+ HWMON_T_CRIT | HWMON_T_CRIT_HYST | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(in,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL,
+ HWMON_I_INPUT | HWMON_I_LABEL),
+ HWMON_CHANNEL_INFO(curr,
+ HWMON_C_INPUT | HWMON_C_MAX | HWMON_C_CRIT |
+ HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_MAX | HWMON_C_CRIT |
+ HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_MAX | HWMON_C_CRIT |
+ HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_MAX | HWMON_C_CRIT |
+ HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_MAX | HWMON_C_CRIT |
+ HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_MAX | HWMON_C_CRIT |
+ HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_MAX | HWMON_C_CRIT |
+ HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_LABEL,
+ HWMON_C_INPUT | HWMON_C_MAX | HWMON_C_CRIT |
+ HWMON_C_LABEL),
+ HWMON_CHANNEL_INFO(power,
+ HWMON_P_INPUT | HWMON_P_LABEL),
+ NULL
+};
+
+static const struct m10bmc_hwmon_board_data n6000bmc_hwmon_bdata = {
+ .tables = {
+ [hwmon_temp] = n6000bmc_temp_tbl,
+ [hwmon_in] = n6000bmc_in_tbl,
+ [hwmon_curr] = n6000bmc_curr_tbl,
+ [hwmon_power] = n6000bmc_power_tbl,
+ },
+
+ .hinfo = n6000bmc_hinfo,
+};
+
static umode_t
m10bmc_hwmon_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
@@ -549,6 +774,10 @@ static const struct platform_device_id intel_m10bmc_hwmon_ids[] = {
.name = "n5010bmc-hwmon",
.driver_data = (unsigned long)&n5010bmc_hwmon_bdata,
},
+ {
+ .name = "n6000bmc-hwmon",
+ .driver_data = (unsigned long)&n6000bmc_hwmon_bdata,
+ },
{ }
};
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 9997f76b1f4a..66f7ceaa7c3f 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -34,6 +34,7 @@
* IT8786E Super I/O chip w/LPC interface
* IT8790E Super I/O chip w/LPC interface
* IT8792E Super I/O chip w/LPC interface
+ * IT87952E Super I/O chip w/LPC interface
* Sis950 A clone of the IT8705F
*
* Copyright (C) 2001 Chris Gauthron
@@ -63,15 +64,7 @@
enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728, it8732,
it8771, it8772, it8781, it8782, it8783, it8786, it8790,
- it8792, it8603, it8620, it8622, it8628 };
-
-static unsigned short force_id;
-module_param(force_id, ushort, 0);
-MODULE_PARM_DESC(force_id, "Override the detected device ID");
-
-static bool ignore_resource_conflict;
-module_param(ignore_resource_conflict, bool, 0);
-MODULE_PARM_DESC(ignore_resource_conflict, "Ignore ACPI resource conflict");
+ it8792, it8603, it8620, it8622, it8628, it87952 };
static struct platform_device *it87_pdev[2];
@@ -87,6 +80,14 @@ static struct platform_device *it87_pdev[2];
#define DEVID 0x20 /* Register: Device ID */
#define DEVREV 0x22 /* Register: Device Revision */
+static inline void __superio_enter(int ioreg)
+{
+ outb(0x87, ioreg);
+ outb(0x01, ioreg);
+ outb(0x55, ioreg);
+ outb(ioreg == REG_4E ? 0xaa : 0x55, ioreg);
+}
+
static inline int superio_inb(int ioreg, int reg)
{
outb(reg, ioreg);
@@ -124,17 +125,16 @@ static inline int superio_enter(int ioreg)
if (!request_muxed_region(ioreg, 2, DRVNAME))
return -EBUSY;
- outb(0x87, ioreg);
- outb(0x01, ioreg);
- outb(0x55, ioreg);
- outb(ioreg == REG_4E ? 0xaa : 0x55, ioreg);
+ __superio_enter(ioreg);
return 0;
}
-static inline void superio_exit(int ioreg)
+static inline void superio_exit(int ioreg, bool noexit)
{
- outb(0x02, ioreg);
- outb(0x02, ioreg + 1);
+ if (!noexit) {
+ outb(0x02, ioreg);
+ outb(0x02, ioreg + 1);
+ }
release_region(ioreg, 2);
}
@@ -161,6 +161,7 @@ static inline void superio_exit(int ioreg)
#define IT8622E_DEVID 0x8622
#define IT8623E_DEVID 0x8623
#define IT8628E_DEVID 0x8628
+#define IT87952E_DEVID 0x8695
#define IT87_ACT_REG 0x30
#define IT87_BASE_REG 0x60
@@ -176,6 +177,13 @@ static inline void superio_exit(int ioreg)
#define IT87_SIO_VID_REG 0xfc /* VID value */
#define IT87_SIO_BEEP_PIN_REG 0xf6 /* Beep pin mapping */
+/* Force chip IDs to specified values. Should only be used for testing */
+static unsigned short force_id[2];
+static unsigned int force_id_cnt;
+
+/* ACPI resource conflicts are ignored if this parameter is set to 1 */
+static bool ignore_resource_conflict;
+
/* Update battery voltage after every reading if true */
static bool update_vbat;
@@ -272,7 +280,7 @@ static const u8 IT87_REG_AUTO_BASE[] = { 0x60, 0x68, 0x70, 0x78, 0xa0, 0xa8 };
struct it87_devices {
const char *name;
- const char * const suffix;
+ const char * const model;
u32 features;
u8 peci_mask;
u8 old_peci_mask;
@@ -297,28 +305,35 @@ struct it87_devices {
#define FEAT_PWM_FREQ2 BIT(16) /* Separate pwm freq 2 */
#define FEAT_SIX_TEMP BIT(17) /* Up to 6 temp sensors */
#define FEAT_VIN3_5V BIT(18) /* VIN3 connected to +5V */
+/*
+ * Disabling configuration mode on some chips can result in system
+ * hang-ups and access failures to the Super-IO chip at the
+ * second SIO address. Never exit configuration mode on these
+ * chips to avoid the problem.
+ */
+#define FEAT_CONF_NOEXIT BIT(19) /* Chip should not exit conf mode */
static const struct it87_devices it87_devices[] = {
[it87] = {
.name = "it87",
- .suffix = "F",
+ .model = "IT87F",
.features = FEAT_OLD_AUTOPWM, /* may need to overwrite */
},
[it8712] = {
.name = "it8712",
- .suffix = "F",
+ .model = "IT8712F",
.features = FEAT_OLD_AUTOPWM | FEAT_VID,
/* may need to overwrite */
},
[it8716] = {
.name = "it8716",
- .suffix = "F",
+ .model = "IT8716F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
| FEAT_FAN16_CONFIG | FEAT_FIVE_FANS | FEAT_PWM_FREQ2,
},
[it8718] = {
.name = "it8718",
- .suffix = "F",
+ .model = "IT8718F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
| FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS
| FEAT_PWM_FREQ2,
@@ -326,7 +341,7 @@ static const struct it87_devices it87_devices[] = {
},
[it8720] = {
.name = "it8720",
- .suffix = "F",
+ .model = "IT8720F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET | FEAT_VID
| FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_FIVE_FANS
| FEAT_PWM_FREQ2,
@@ -334,7 +349,7 @@ static const struct it87_devices it87_devices[] = {
},
[it8721] = {
.name = "it8721",
- .suffix = "F",
+ .model = "IT8721F",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
| FEAT_FAN16_CONFIG | FEAT_FIVE_FANS | FEAT_IN7_INTERNAL
@@ -344,7 +359,7 @@ static const struct it87_devices it87_devices[] = {
},
[it8728] = {
.name = "it8728",
- .suffix = "F",
+ .model = "IT8728F",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_FIVE_FANS
| FEAT_IN7_INTERNAL | FEAT_PWM_FREQ2,
@@ -352,7 +367,7 @@ static const struct it87_devices it87_devices[] = {
},
[it8732] = {
.name = "it8732",
- .suffix = "F",
+ .model = "IT8732F",
.features = FEAT_NEWER_AUTOPWM | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
| FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL,
@@ -361,7 +376,7 @@ static const struct it87_devices it87_devices[] = {
},
[it8771] = {
.name = "it8771",
- .suffix = "E",
+ .model = "IT8771E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
| FEAT_PWM_FREQ2,
@@ -373,7 +388,7 @@ static const struct it87_devices it87_devices[] = {
},
[it8772] = {
.name = "it8772",
- .suffix = "E",
+ .model = "IT8772E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
| FEAT_PWM_FREQ2,
@@ -385,28 +400,28 @@ static const struct it87_devices it87_devices[] = {
},
[it8781] = {
.name = "it8781",
- .suffix = "F",
+ .model = "IT8781F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
| FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8782] = {
.name = "it8782",
- .suffix = "F",
+ .model = "IT8782F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
| FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8783] = {
.name = "it8783",
- .suffix = "E/F",
+ .model = "IT8783E/F",
.features = FEAT_16BIT_FANS | FEAT_TEMP_OFFSET
| FEAT_TEMP_OLD_PECI | FEAT_FAN16_CONFIG | FEAT_PWM_FREQ2,
.old_peci_mask = 0x4,
},
[it8786] = {
.name = "it8786",
- .suffix = "E",
+ .model = "IT8786E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
| FEAT_PWM_FREQ2,
@@ -414,24 +429,24 @@ static const struct it87_devices it87_devices[] = {
},
[it8790] = {
.name = "it8790",
- .suffix = "E",
+ .model = "IT8790E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
- | FEAT_PWM_FREQ2,
+ | FEAT_PWM_FREQ2 | FEAT_CONF_NOEXIT,
.peci_mask = 0x07,
},
[it8792] = {
.name = "it8792",
- .suffix = "E",
+ .model = "IT8792E/IT8795E",
.features = FEAT_NEWER_AUTOPWM | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
- | FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL,
+ | FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL | FEAT_CONF_NOEXIT,
.peci_mask = 0x07,
.old_peci_mask = 0x02, /* Actually reports PCH */
},
[it8603] = {
.name = "it8603",
- .suffix = "E",
+ .model = "IT8603E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_IN7_INTERNAL
| FEAT_AVCC3 | FEAT_PWM_FREQ2,
@@ -439,7 +454,7 @@ static const struct it87_devices it87_devices[] = {
},
[it8620] = {
.name = "it8620",
- .suffix = "E",
+ .model = "IT8620E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
| FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
@@ -448,7 +463,7 @@ static const struct it87_devices it87_devices[] = {
},
[it8622] = {
.name = "it8622",
- .suffix = "E",
+ .model = "IT8622E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_FIVE_FANS
| FEAT_FIVE_PWM | FEAT_IN7_INTERNAL | FEAT_PWM_FREQ2
@@ -457,13 +472,22 @@ static const struct it87_devices it87_devices[] = {
},
[it8628] = {
.name = "it8628",
- .suffix = "E",
+ .model = "IT8628E",
.features = FEAT_NEWER_AUTOPWM | FEAT_12MV_ADC | FEAT_16BIT_FANS
| FEAT_TEMP_OFFSET | FEAT_TEMP_PECI | FEAT_SIX_FANS
| FEAT_IN7_INTERNAL | FEAT_SIX_PWM | FEAT_PWM_FREQ2
| FEAT_SIX_TEMP | FEAT_VIN3_5V,
.peci_mask = 0x07,
},
+ [it87952] = {
+ .name = "it87952",
+ .model = "IT87952E",
+ .features = FEAT_NEWER_AUTOPWM | FEAT_16BIT_FANS
+ | FEAT_TEMP_OFFSET | FEAT_TEMP_OLD_PECI | FEAT_TEMP_PECI
+ | FEAT_10_9MV_ADC | FEAT_IN7_INTERNAL | FEAT_CONF_NOEXIT,
+ .peci_mask = 0x07,
+ .old_peci_mask = 0x02, /* Actually reports PCH */
+ },
};
#define has_16bit_fans(data) ((data)->features & FEAT_16BIT_FANS)
@@ -490,6 +514,7 @@ static const struct it87_devices it87_devices[] = {
#define has_pwm_freq2(data) ((data)->features & FEAT_PWM_FREQ2)
#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
#define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V)
+#define has_conf_noexit(data) ((data)->features & FEAT_CONF_NOEXIT)
struct it87_sio_data {
int sioaddr;
@@ -2397,11 +2422,11 @@ static const struct attribute_group it87_group_auto_pwm = {
/* SuperIO detection - will change isa_address if a chip is found */
static int __init it87_find(int sioaddr, unsigned short *address,
- struct it87_sio_data *sio_data)
+ struct it87_sio_data *sio_data, int chip_cnt)
{
int err;
u16 chip_type;
- const struct it87_devices *config;
+ const struct it87_devices *config = NULL;
err = superio_enter(sioaddr);
if (err)
@@ -2413,8 +2438,12 @@ static int __init it87_find(int sioaddr, unsigned short *address,
if (chip_type == 0xffff)
goto exit;
- if (force_id)
- chip_type = force_id;
+ if (force_id_cnt == 1) {
+ /* If only one value given use for all chips */
+ if (force_id[0])
+ chip_type = force_id[0];
+ } else if (force_id[chip_cnt])
+ chip_type = force_id[chip_cnt];
switch (chip_type) {
case IT8705F_DEVID:
@@ -2479,6 +2508,9 @@ static int __init it87_find(int sioaddr, unsigned short *address,
case IT8628E_DEVID:
sio_data->type = it8628;
break;
+ case IT87952E_DEVID:
+ sio_data->type = it87952;
+ break;
case 0xffff: /* No device at all */
goto exit;
default:
@@ -2486,27 +2518,29 @@ static int __init it87_find(int sioaddr, unsigned short *address,
goto exit;
}
+ config = &it87_devices[sio_data->type];
+
superio_select(sioaddr, PME);
if (!(superio_inb(sioaddr, IT87_ACT_REG) & 0x01)) {
- pr_info("Device not activated, skipping\n");
+ pr_info("Device (chip %s ioreg 0x%x) not activated, skipping\n",
+ config->model, sioaddr);
goto exit;
}
*address = superio_inw(sioaddr, IT87_BASE_REG) & ~(IT87_EXTENT - 1);
if (*address == 0) {
- pr_info("Base address not set, skipping\n");
+ pr_info("Base address not set (chip %s ioreg 0x%x), skipping\n",
+ config->model, sioaddr);
goto exit;
}
err = 0;
sio_data->sioaddr = sioaddr;
sio_data->revision = superio_inb(sioaddr, DEVREV) & 0x0f;
- pr_info("Found IT%04x%s chip at 0x%x, revision %d\n", chip_type,
- it87_devices[sio_data->type].suffix,
+ pr_info("Found %s chip at 0x%x, revision %d\n",
+ it87_devices[sio_data->type].model,
*address, sio_data->revision);
- config = &it87_devices[sio_data->type];
-
/* in7 (VSB or VCCH5V) is always internal on some chips */
if (has_in7_internal(config))
sio_data->internal |= BIT(1);
@@ -2824,7 +2858,7 @@ static int __init it87_find(int sioaddr, unsigned short *address,
sio_data->skip_pwm |= dmi_data->skip_pwm;
exit:
- superio_exit(sioaddr);
+ superio_exit(sioaddr, config ? has_conf_noexit(config) : false);
return err;
}
@@ -3210,7 +3244,7 @@ static void it87_resume_sio(struct platform_device *pdev)
reg2c);
}
- superio_exit(data->sioaddr);
+ superio_exit(data->sioaddr, has_conf_noexit(data));
}
static int it87_resume(struct device *dev)
@@ -3311,6 +3345,27 @@ static int it87_dmi_cb(const struct dmi_system_id *dmi_entry)
}
/*
+ * On various Gigabyte AM4 boards (AB350, AX370), the second Super-IO chip
+ * (IT8792E) needs to be in configuration mode before accessing the first
+ * due to a bug in IT8792E which otherwise results in LPC bus access errors.
+ * This needs to be done before accessing the first Super-IO chip since
+ * the second chip may have been accessed prior to loading this driver.
+ *
+ * The problem is also reported to affect IT8795E, which is used on X299 boards
+ * and has the same chip ID as IT8792E (0x8733). It also appears to affect
+ * systems with IT8790E, which is used on some Z97X-Gaming boards as well as
+ * Z87X-OC.
+ * DMI entries for those systems will be added as they become available and
+ * as the problem is confirmed to affect those boards.
+ */
+static int it87_sio_force(const struct dmi_system_id *dmi_entry)
+{
+ __superio_enter(REG_4E);
+
+ return it87_dmi_cb(dmi_entry);
+};
+
+/*
* On the Shuttle SN68PT, FAN_CTL2 is apparently not
* connected to a fan, but to something else. One user
* has reported instant system power-off when changing
@@ -3332,7 +3387,34 @@ static struct it87_dmi_data nvidia_fn68pt = {
.driver_data = data, \
}
+#define IT87_DMI_MATCH_GBT(name, cb, data) \
+ IT87_DMI_MATCH_VND("Gigabyte Technology Co., Ltd.", name, cb, data)
+
static const struct dmi_system_id it87_dmi_table[] __initconst = {
+ IT87_DMI_MATCH_GBT("AB350", it87_sio_force, NULL),
+ /* ? + IT8792E/IT8795E */
+ IT87_DMI_MATCH_GBT("AX370", it87_sio_force, NULL),
+ /* ? + IT8792E/IT8795E */
+ IT87_DMI_MATCH_GBT("Z97X-Gaming G1", it87_sio_force, NULL),
+ /* ? + IT8790E */
+ IT87_DMI_MATCH_GBT("TRX40 AORUS XTREME", it87_sio_force, NULL),
+ /* IT8688E + IT8792E/IT8795E */
+ IT87_DMI_MATCH_GBT("Z390 AORUS ULTRA-CF", it87_sio_force, NULL),
+ /* IT8688E + IT8792E/IT8795E */
+ IT87_DMI_MATCH_GBT("B550 AORUS PRO AC", it87_sio_force, NULL),
+ /* IT8688E + IT8792E/IT8795E */
+ IT87_DMI_MATCH_GBT("X570 AORUS MASTER", it87_sio_force, NULL),
+ /* IT8688E + IT8792E/IT8795E */
+ IT87_DMI_MATCH_GBT("X570 AORUS PRO", it87_sio_force, NULL),
+ /* IT8688E + IT8792E/IT8795E */
+ IT87_DMI_MATCH_GBT("X570 AORUS PRO WIFI", it87_sio_force, NULL),
+ /* IT8688E + IT8792E/IT8795E */
+ IT87_DMI_MATCH_GBT("X570S AERO G", it87_sio_force, NULL),
+ /* IT8689E + IT87952E */
+ IT87_DMI_MATCH_GBT("Z690 AORUS PRO DDR4", it87_sio_force, NULL),
+ /* IT8689E + IT87952E */
+ IT87_DMI_MATCH_GBT("Z690 AORUS PRO", it87_sio_force, NULL),
+ /* IT8689E + IT87952E */
IT87_DMI_MATCH_VND("nVIDIA", "FN68PT", it87_dmi_cb, &nvidia_fn68pt),
{ }
@@ -3356,7 +3438,7 @@ static int __init sm_it87_init(void)
for (i = 0; i < ARRAY_SIZE(sioaddr); i++) {
memset(&sio_data, 0, sizeof(struct it87_sio_data));
isa_address[i] = 0;
- err = it87_find(sioaddr[i], &isa_address[i], &sio_data);
+ err = it87_find(sioaddr[i], &isa_address[i], &sio_data, i);
if (err || isa_address[i] == 0)
continue;
/*
@@ -3404,11 +3486,20 @@ static void __exit sm_it87_exit(void)
MODULE_AUTHOR("Chris Gauthron, Jean Delvare <jdelvare@suse.de>");
MODULE_DESCRIPTION("IT8705F/IT871xF/IT872xF hardware monitoring driver");
+
+module_param_array(force_id, ushort, &force_id_cnt, 0);
+MODULE_PARM_DESC(force_id, "Override one or more detected device ID(s)");
+
+module_param(ignore_resource_conflict, bool, 0);
+MODULE_PARM_DESC(ignore_resource_conflict, "Ignore ACPI resource conflict");
+
module_param(update_vbat, bool, 0);
MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
+
module_param(fix_pwm_polarity, bool, 0);
MODULE_PARM_DESC(fix_pwm_polarity,
"Force PWM polarity to active high (DANGEROUS)");
+
MODULE_LICENSE("GPL");
module_init(sm_it87_init);
diff --git a/drivers/hwmon/ltc2945.c b/drivers/hwmon/ltc2945.c
index 9adebb59f604..3494f7261ebf 100644
--- a/drivers/hwmon/ltc2945.c
+++ b/drivers/hwmon/ltc2945.c
@@ -58,6 +58,22 @@
#define CONTROL_MULT_SELECT (1 << 0)
#define CONTROL_TEST_MODE (1 << 4)
+static const struct of_device_id __maybe_unused ltc2945_of_match[] = {
+ { .compatible = "adi,ltc2945" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ltc2945_of_match);
+
+/**
+ * struct ltc2945_data - LTC2945 device data
+ * @regmap: regmap device
+ * @shunt_resistor: shunt resistor value in micro ohms (1000 by default)
+ */
+struct ltc2945_data {
+ struct regmap *regmap;
+ u32 shunt_resistor;
+};
+
static inline bool is_power_reg(u8 reg)
{
return reg < LTC2945_SENSE_H;
@@ -66,7 +82,9 @@ static inline bool is_power_reg(u8 reg)
/* Return the value from the given register in uW, mV, or mA */
static long long ltc2945_reg_to_val(struct device *dev, u8 reg)
{
- struct regmap *regmap = dev_get_drvdata(dev);
+ struct ltc2945_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ u32 shunt_resistor = data->shunt_resistor;
unsigned int control;
u8 buf[3];
long long val;
@@ -78,10 +96,10 @@ static long long ltc2945_reg_to_val(struct device *dev, u8 reg)
return ret;
if (is_power_reg(reg)) {
- /* power */
+ /* 24-bit power */
val = (buf[0] << 16) + (buf[1] << 8) + buf[2];
} else {
- /* current, voltage */
+ /* 12-bit current, voltage */
val = (buf[0] << 4) + (buf[1] >> 4);
}
@@ -92,9 +110,7 @@ static long long ltc2945_reg_to_val(struct device *dev, u8 reg)
case LTC2945_MAX_POWER_THRES_H:
case LTC2945_MIN_POWER_THRES_H:
/*
- * Convert to uW by assuming current is measured with
- * an 1mOhm sense resistor, similar to current
- * measurements.
+ * Convert to uW
* Control register bit 0 selects if voltage at SENSE+/VDD
* or voltage at ADIN is used to measure power.
*/
@@ -108,6 +124,14 @@ static long long ltc2945_reg_to_val(struct device *dev, u8 reg)
/* 0.5 mV * 25 uV = 0.0125 uV resolution. */
val = (val * 25LL) >> 1;
}
+ val *= 1000;
+ /* Overflow check: Assuming max 24-bit power, val is at most 53 bits right now. */
+ val = DIV_ROUND_CLOSEST_ULL(val, shunt_resistor);
+ /*
+ * Overflow check: After division, depending on shunt resistor,
+ * val can still be > 32 bits so returning long long makes sense
+ */
+
break;
case LTC2945_VIN_H:
case LTC2945_MAX_VIN_H:
@@ -130,14 +154,11 @@ static long long ltc2945_reg_to_val(struct device *dev, u8 reg)
case LTC2945_MIN_SENSE_H:
case LTC2945_MAX_SENSE_THRES_H:
case LTC2945_MIN_SENSE_THRES_H:
- /*
- * 25 uV resolution. Convert to current as measured with
- * an 1 mOhm sense resistor, in mA. If a different sense
- * resistor is installed, calculate the actual current by
- * dividing the reported current by the sense resistor value
- * in mOhm.
- */
- val *= 25;
+ /* 25 uV resolution. Convert to mA. */
+ val *= 25 * 1000;
+ /* Overflow check: Assuming max 12-bit sense, val is at most 27 bits right now */
+ val = DIV_ROUND_CLOSEST_ULL(val, shunt_resistor);
+ /* Overflow check: After division, <= 27 bits */
break;
default:
return -EINVAL;
@@ -145,13 +166,18 @@ static long long ltc2945_reg_to_val(struct device *dev, u8 reg)
return val;
}
-static int ltc2945_val_to_reg(struct device *dev, u8 reg,
- unsigned long val)
+static long long ltc2945_val_to_reg(struct device *dev, u8 reg,
+ unsigned long long val)
{
- struct regmap *regmap = dev_get_drvdata(dev);
+ struct ltc2945_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
+ u32 shunt_resistor = data->shunt_resistor;
unsigned int control;
int ret;
+ /* Ensure we don't overflow */
+ val = clamp_val(val, 0, U32_MAX);
+
switch (reg) {
case LTC2945_POWER_H:
case LTC2945_MAX_POWER_H:
@@ -159,9 +185,6 @@ static int ltc2945_val_to_reg(struct device *dev, u8 reg,
case LTC2945_MAX_POWER_THRES_H:
case LTC2945_MIN_POWER_THRES_H:
/*
- * Convert to register value by assuming current is measured
- * with an 1mOhm sense resistor, similar to current
- * measurements.
* Control register bit 0 selects if voltage at SENSE+/VDD
* or voltage at ADIN is used to measure power, which in turn
* determines register calculations.
@@ -171,14 +194,16 @@ static int ltc2945_val_to_reg(struct device *dev, u8 reg,
return ret;
if (control & CONTROL_MULT_SELECT) {
/* 25 mV * 25 uV = 0.625 uV resolution. */
- val = DIV_ROUND_CLOSEST(val, 625);
+ val *= shunt_resistor;
+ /* Overflow check: Assuming 32-bit val and shunt resistor, val <= 64bits */
+ val = DIV_ROUND_CLOSEST_ULL(val, 625 * 1000);
+ /* Overflow check: val is now <= 44 bits */
} else {
- /*
- * 0.5 mV * 25 uV = 0.0125 uV resolution.
- * Divide first to avoid overflow;
- * accept loss of accuracy.
- */
- val = DIV_ROUND_CLOSEST(val, 25) * 2;
+ /* 0.5 mV * 25 uV = 0.0125 uV resolution. */
+ val *= shunt_resistor;
+ /* Overflow check: Assuming 32-bit val and shunt resistor, val <= 64bits */
+ val = DIV_ROUND_CLOSEST_ULL(val, 25 * 1000) * 2;
+ /* Overflow check: val is now <= 51 bits */
}
break;
case LTC2945_VIN_H:
@@ -187,7 +212,7 @@ static int ltc2945_val_to_reg(struct device *dev, u8 reg,
case LTC2945_MAX_VIN_THRES_H:
case LTC2945_MIN_VIN_THRES_H:
/* 25 mV resolution. */
- val /= 25;
+ val = DIV_ROUND_CLOSEST_ULL(val, 25);
break;
case LTC2945_ADIN_H:
case LTC2945_MAX_ADIN_H:
@@ -202,14 +227,11 @@ static int ltc2945_val_to_reg(struct device *dev, u8 reg,
case LTC2945_MIN_SENSE_H:
case LTC2945_MAX_SENSE_THRES_H:
case LTC2945_MIN_SENSE_THRES_H:
- /*
- * 25 uV resolution. Convert to current as measured with
- * an 1 mOhm sense resistor, in mA. If a different sense
- * resistor is installed, calculate the actual current by
- * dividing the reported current by the sense resistor value
- * in mOhm.
- */
- val = DIV_ROUND_CLOSEST(val, 25);
+ /* 25 uV resolution. Convert to mA. */
+ val *= shunt_resistor;
+ /* Overflow check: Assuming 32-bit val and 32-bit shunt resistor, val is 64bits */
+ val = DIV_ROUND_CLOSEST_ULL(val, 25 * 1000);
+ /* Overflow check: val is now <= 50 bits */
break;
default:
return -EINVAL;
@@ -234,20 +256,23 @@ static ssize_t ltc2945_value_store(struct device *dev,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct regmap *regmap = dev_get_drvdata(dev);
+ struct ltc2945_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
u8 reg = attr->index;
- unsigned long val;
+ unsigned int val;
u8 regbuf[3];
int num_regs;
- int regval;
+ long long regval;
int ret;
- ret = kstrtoul(buf, 10, &val);
+ ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
/* convert to register value, then clamp and write result */
regval = ltc2945_val_to_reg(dev, reg, val);
+ if (regval < 0)
+ return regval;
if (is_power_reg(reg)) {
regval = clamp_val(regval, 0, 0xffffff);
regbuf[0] = regval >> 16;
@@ -269,7 +294,8 @@ static ssize_t ltc2945_history_store(struct device *dev,
const char *buf, size_t count)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct regmap *regmap = dev_get_drvdata(dev);
+ struct ltc2945_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
u8 reg = attr->index;
int num_regs = is_power_reg(reg) ? 3 : 2;
u8 buf_min[3] = { 0xff, 0xff, 0xff };
@@ -321,7 +347,8 @@ static ssize_t ltc2945_bool_show(struct device *dev,
struct device_attribute *da, char *buf)
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
- struct regmap *regmap = dev_get_drvdata(dev);
+ struct ltc2945_data *data = dev_get_drvdata(dev);
+ struct regmap *regmap = data->regmap;
unsigned int fault;
int ret;
@@ -450,6 +477,12 @@ static int ltc2945_probe(struct i2c_client *client)
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct regmap *regmap;
+ struct ltc2945_data *data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ dev_set_drvdata(dev, data);
regmap = devm_regmap_init_i2c(client, &ltc2945_regmap_config);
if (IS_ERR(regmap)) {
@@ -457,11 +490,19 @@ static int ltc2945_probe(struct i2c_client *client)
return PTR_ERR(regmap);
}
+ data->regmap = regmap;
+ if (device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &data->shunt_resistor))
+ data->shunt_resistor = 1000;
+
+ if (data->shunt_resistor == 0)
+ return -EINVAL;
+
/* Clear faults */
regmap_write(regmap, LTC2945_FAULT, 0x00);
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- regmap,
+ data,
ltc2945_groups);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
@@ -475,8 +516,9 @@ MODULE_DEVICE_TABLE(i2c, ltc2945_id);
static struct i2c_driver ltc2945_driver = {
.driver = {
- .name = "ltc2945",
- },
+ .name = "ltc2945",
+ .of_match_table = of_match_ptr(ltc2945_of_match),
+ },
.probe_new = ltc2945_probe,
.id_table = ltc2945_id,
};
diff --git a/drivers/hwmon/mc34vr500.c b/drivers/hwmon/mc34vr500.c
new file mode 100644
index 000000000000..6268e973049c
--- /dev/null
+++ b/drivers/hwmon/mc34vr500.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * An hwmon driver for the NXP MC34VR500 PMIC
+ *
+ * Author: Mario Kicherer <dev@kicherer.org>
+ */
+
+#include <linux/bits.h>
+#include <linux/dev_printk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define MC34VR500_I2C_ADDR 0x08
+#define MC34VR500_DEVICEID_VALUE 0x14
+
+/* INTSENSE0 */
+#define ENS_BIT BIT(0)
+#define LOWVINS_BIT BIT(1)
+#define THERM110S_BIT BIT(2)
+#define THERM120S_BIT BIT(3)
+#define THERM125S_BIT BIT(4)
+#define THERM130S_BIT BIT(5)
+
+#define MC34VR500_DEVICEID 0x00
+
+#define MC34VR500_SILICONREVID 0x03
+#define MC34VR500_FABID 0x04
+#define MC34VR500_INTSTAT0 0x05
+#define MC34VR500_INTMASK0 0x06
+#define MC34VR500_INTSENSE0 0x07
+
+struct mc34vr500_data {
+ struct device *hwmon_dev;
+ struct regmap *regmap;
+};
+
+static irqreturn_t mc34vr500_process_interrupt(int irq, void *userdata)
+{
+ struct mc34vr500_data *data = (struct mc34vr500_data *)userdata;
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(data->regmap, MC34VR500_INTSTAT0, &reg);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ if (reg) {
+ if (reg & LOWVINS_BIT)
+ hwmon_notify_event(data->hwmon_dev, hwmon_in,
+ hwmon_in_min_alarm, 0);
+
+ if (reg & THERM110S_BIT)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_max_alarm, 0);
+
+ if (reg & THERM120S_BIT)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_crit_alarm, 0);
+
+ if (reg & THERM130S_BIT)
+ hwmon_notify_event(data->hwmon_dev, hwmon_temp,
+ hwmon_temp_emergency_alarm, 0);
+
+ /* write 1 to clear */
+ regmap_write(data->regmap, MC34VR500_INTSTAT0, LOWVINS_BIT |
+ THERM110S_BIT | THERM120S_BIT | THERM130S_BIT);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static umode_t mc34vr500_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (attr) {
+ case hwmon_in_min_alarm:
+ case hwmon_temp_max_alarm:
+ case hwmon_temp_crit_alarm:
+ case hwmon_temp_emergency_alarm:
+ return 0444;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mc34vr500_alarm_read(struct mc34vr500_data *data, int index,
+ long *val)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = regmap_read(data->regmap, MC34VR500_INTSENSE0, &reg);
+ if (ret < 0)
+ return ret;
+
+ *val = !!(reg & index);
+
+ return 0;
+}
+
+static int mc34vr500_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ struct mc34vr500_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_in:
+ switch (attr) {
+ case hwmon_in_min_alarm:
+ return mc34vr500_alarm_read(data, LOWVINS_BIT, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_max_alarm:
+ return mc34vr500_alarm_read(data, THERM110S_BIT, val);
+ case hwmon_temp_crit_alarm:
+ return mc34vr500_alarm_read(data, THERM120S_BIT, val);
+ case hwmon_temp_emergency_alarm:
+ return mc34vr500_alarm_read(data, THERM130S_BIT, val);
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *mc34vr500_info[] = {
+ HWMON_CHANNEL_INFO(in, HWMON_I_MIN_ALARM),
+ HWMON_CHANNEL_INFO(temp, HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM
+ | HWMON_T_EMERGENCY_ALARM),
+ NULL,
+};
+
+static const struct hwmon_ops mc34vr500_hwmon_ops = {
+ .is_visible = mc34vr500_is_visible,
+ .read = mc34vr500_read,
+};
+
+static const struct hwmon_chip_info mc34vr500_chip_info = {
+ .ops = &mc34vr500_hwmon_ops,
+ .info = mc34vr500_info,
+};
+
+static const struct regmap_config mc34vr500_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MC34VR500_INTSENSE0,
+};
+
+static int mc34vr500_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct mc34vr500_data *data;
+ struct device *hwmon_dev;
+ int ret;
+ unsigned int reg, revid, fabid;
+ struct regmap *regmap;
+
+ regmap = devm_regmap_init_i2c(client, &mc34vr500_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ data = devm_kzalloc(dev, sizeof(struct mc34vr500_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->regmap = regmap;
+
+ ret = regmap_read(regmap, MC34VR500_DEVICEID, &reg);
+ if (ret < 0)
+ return ret;
+
+ if (reg != MC34VR500_DEVICEID_VALUE)
+ return -ENODEV;
+
+ ret = regmap_read(regmap, MC34VR500_SILICONREVID, &revid);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read(regmap, MC34VR500_FABID, &fabid);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "mc34vr500: revid 0x%x fabid 0x%x\n", revid, fabid);
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &mc34vr500_chip_info,
+ NULL);
+ if (IS_ERR(hwmon_dev))
+ return PTR_ERR(hwmon_dev);
+
+ data->hwmon_dev = hwmon_dev;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ mc34vr500_process_interrupt,
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT |
+ IRQF_SHARED,
+ dev_name(dev), data);
+ if (ret)
+ return ret;
+
+ /* write 1 to clear interrupts */
+ ret = regmap_write(regmap, MC34VR500_INTSTAT0, LOWVINS_BIT |
+ THERM110S_BIT | THERM120S_BIT |
+ THERM130S_BIT);
+ if (ret)
+ return ret;
+
+ /* unmask interrupts */
+ ret = regmap_write(regmap, MC34VR500_INTMASK0,
+ (unsigned int) ~(LOWVINS_BIT | THERM110S_BIT |
+ THERM120S_BIT | THERM130S_BIT));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id mc34vr500_id[] = {
+ { "mc34vr500", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mc34vr500_id);
+
+static const struct of_device_id __maybe_unused mc34vr500_of_match[] = {
+ { .compatible = "nxp,mc34vr500" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mc34vr500_of_match);
+
+static struct i2c_driver mc34vr500_driver = {
+ .driver = {
+ .name = "mc34vr500",
+ .of_match_table = of_match_ptr(mc34vr500_of_match),
+ },
+ .probe_new = mc34vr500_probe,
+ .id_table = mc34vr500_id,
+};
+
+module_i2c_driver(mc34vr500_driver);
+
+MODULE_AUTHOR("Mario Kicherer <dev@kicherer.org>");
+
+MODULE_DESCRIPTION("MC34VR500 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index b48bd7c961d6..96017cc8da7e 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -155,6 +155,12 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
if (err)
return err;
+ if (MLXREG_FAN_GET_FAULT(regval, tacho->mask)) {
+ /* FAN is broken - return zero for FAN speed. */
+ *val = 0;
+ return 0;
+ }
+
*val = MLXREG_FAN_GET_RPM(regval, fan->divider,
fan->samples);
break;
diff --git a/drivers/hwmon/nct6775-core.c b/drivers/hwmon/nct6775-core.c
index da9ec6983e13..c54233f0369b 100644
--- a/drivers/hwmon/nct6775-core.c
+++ b/drivers/hwmon/nct6775-core.c
@@ -1150,7 +1150,7 @@ static int nct6775_write_fan_div(struct nct6775_data *data, int nr)
if (err)
return err;
reg &= 0x70 >> oddshift;
- reg |= data->fan_div[nr] & (0x7 << oddshift);
+ reg |= (data->fan_div[nr] & 0x7) << oddshift;
return nct6775_write_value(data, fandiv_reg, reg);
}
diff --git a/drivers/hwmon/nct6775-platform.c b/drivers/hwmon/nct6775-platform.c
index bf43f73dc835..76c6b564d7fc 100644
--- a/drivers/hwmon/nct6775-platform.c
+++ b/drivers/hwmon/nct6775-platform.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/wmi.h>
#include "nct6775.h"
@@ -107,40 +106,51 @@ struct nct6775_sio_data {
void (*sio_exit)(struct nct6775_sio_data *sio_data);
};
-#define ASUSWMI_MONITORING_GUID "466747A0-70EC-11DE-8A39-0800200C9A66"
+#define ASUSWMI_METHOD "WMBD"
#define ASUSWMI_METHODID_RSIO 0x5253494F
#define ASUSWMI_METHODID_WSIO 0x5753494F
#define ASUSWMI_METHODID_RHWM 0x5248574D
#define ASUSWMI_METHODID_WHWM 0x5748574D
#define ASUSWMI_UNSUPPORTED_METHOD 0xFFFFFFFE
+#define ASUSWMI_DEVICE_HID "PNP0C14"
+#define ASUSWMI_DEVICE_UID "ASUSWMI"
+#define ASUSMSI_DEVICE_UID "AsusMbSwInterface"
+
+#if IS_ENABLED(CONFIG_ACPI)
+/*
+ * ASUS boards have only one device with WMI "WMBD" method and have provided
+ * access to only one SuperIO chip at 0x0290.
+ */
+static struct acpi_device *asus_acpi_dev;
+#endif
static int nct6775_asuswmi_evaluate_method(u32 method_id, u8 bank, u8 reg, u8 val, u32 *retval)
{
-#if IS_ENABLED(CONFIG_ACPI_WMI)
+#if IS_ENABLED(CONFIG_ACPI)
+ acpi_handle handle = acpi_device_handle(asus_acpi_dev);
u32 args = bank | (reg << 8) | (val << 16);
- struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
- struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_object_list input;
+ union acpi_object params[3];
+ unsigned long long result;
acpi_status status;
- union acpi_object *obj;
- u32 tmp = ASUSWMI_UNSUPPORTED_METHOD;
-
- status = wmi_evaluate_method(ASUSWMI_MONITORING_GUID, 0,
- method_id, &input, &output);
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = 0;
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = method_id;
+ params[2].type = ACPI_TYPE_BUFFER;
+ params[2].buffer.length = sizeof(args);
+ params[2].buffer.pointer = (void *)&args;
+ input.count = 3;
+ input.pointer = params;
+
+ status = acpi_evaluate_integer(handle, ASUSWMI_METHOD, &input, &result);
if (ACPI_FAILURE(status))
return -EIO;
- obj = output.pointer;
- if (obj && obj->type == ACPI_TYPE_INTEGER)
- tmp = obj->integer.value;
-
if (retval)
- *retval = tmp;
-
- kfree(obj);
+ *retval = (u32)result & 0xFFFFFFFF;
- if (tmp == ASUSWMI_UNSUPPORTED_METHOD)
- return -ENODEV;
return 0;
#else
return -EOPNOTSUPP;
@@ -1099,6 +1109,91 @@ static const char * const asus_wmi_boards[] = {
"TUF GAMING Z490-PLUS (WI-FI)",
};
+static const char * const asus_msi_boards[] = {
+ "EX-B660M-V5 PRO D4",
+ "PRIME B650-PLUS",
+ "PRIME B650M-A",
+ "PRIME B650M-A AX",
+ "PRIME B650M-A II",
+ "PRIME B650M-A WIFI",
+ "PRIME B650M-A WIFI II",
+ "PRIME B660M-A D4",
+ "PRIME B660M-A WIFI D4",
+ "PRIME X670-P",
+ "PRIME X670-P WIFI",
+ "PRIME X670E-PRO WIFI",
+ "Pro B660M-C-D4",
+ "ProArt B660-CREATOR D4",
+ "ProArt X670E-CREATOR WIFI",
+ "ROG CROSSHAIR X670E EXTREME",
+ "ROG CROSSHAIR X670E GENE",
+ "ROG CROSSHAIR X670E HERO",
+ "ROG MAXIMUS XIII EXTREME GLACIAL",
+ "ROG MAXIMUS Z690 EXTREME",
+ "ROG MAXIMUS Z690 EXTREME GLACIAL",
+ "ROG STRIX B650-A GAMING WIFI",
+ "ROG STRIX B650E-E GAMING WIFI",
+ "ROG STRIX B650E-F GAMING WIFI",
+ "ROG STRIX B650E-I GAMING WIFI",
+ "ROG STRIX B660-A GAMING WIFI D4",
+ "ROG STRIX B660-F GAMING WIFI",
+ "ROG STRIX B660-G GAMING WIFI",
+ "ROG STRIX B660-I GAMING WIFI",
+ "ROG STRIX X670E-A GAMING WIFI",
+ "ROG STRIX X670E-E GAMING WIFI",
+ "ROG STRIX X670E-F GAMING WIFI",
+ "ROG STRIX X670E-I GAMING WIFI",
+ "ROG STRIX Z590-A GAMING WIFI II",
+ "ROG STRIX Z690-A GAMING WIFI D4",
+ "TUF GAMING B650-PLUS",
+ "TUF GAMING B650-PLUS WIFI",
+ "TUF GAMING B650M-PLUS",
+ "TUF GAMING B650M-PLUS WIFI",
+ "TUF GAMING B660M-PLUS WIFI",
+ "TUF GAMING X670E-PLUS",
+ "TUF GAMING X670E-PLUS WIFI",
+ "TUF GAMING Z590-PLUS WIFI",
+};
+
+#if IS_ENABLED(CONFIG_ACPI)
+/*
+ * Callback for acpi_bus_for_each_dev() to find the right device
+ * by _UID and _HID and return 1 to stop iteration.
+ */
+static int nct6775_asuswmi_device_match(struct device *dev, void *data)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+ const char *uid = acpi_device_uid(adev);
+ const char *hid = acpi_device_hid(adev);
+
+ if (hid && !strcmp(hid, ASUSWMI_DEVICE_HID) && uid && !strcmp(uid, data)) {
+ asus_acpi_dev = adev;
+ return 1;
+ }
+
+ return 0;
+}
+#endif
+
+static enum sensor_access nct6775_determine_access(const char *device_uid)
+{
+#if IS_ENABLED(CONFIG_ACPI)
+ u8 tmp;
+
+ acpi_bus_for_each_dev(nct6775_asuswmi_device_match, (void *)device_uid);
+ if (!asus_acpi_dev)
+ return access_direct;
+
+ /* if reading chip id via ACPI succeeds, use WMI "WMBD" method for access */
+ if (!nct6775_asuswmi_read(0, NCT6775_PORT_CHIPID, &tmp) && tmp) {
+ pr_debug("Using Asus WMBD method of %s to access %#x chip.\n", device_uid, tmp);
+ return access_asuswmi;
+ }
+#endif
+
+ return access_direct;
+}
+
static int __init sensors_nct6775_platform_init(void)
{
int i, err;
@@ -1109,7 +1204,6 @@ static int __init sensors_nct6775_platform_init(void)
int sioaddr[2] = { 0x2e, 0x4e };
enum sensor_access access = access_direct;
const char *board_vendor, *board_name;
- u8 tmp;
err = platform_driver_register(&nct6775_driver);
if (err)
@@ -1122,15 +1216,13 @@ static int __init sensors_nct6775_platform_init(void)
!strcmp(board_vendor, "ASUSTeK COMPUTER INC.")) {
err = match_string(asus_wmi_boards, ARRAY_SIZE(asus_wmi_boards),
board_name);
- if (err >= 0) {
- /* if reading chip id via WMI succeeds, use WMI */
- if (!nct6775_asuswmi_read(0, NCT6775_PORT_CHIPID, &tmp) && tmp) {
- pr_info("Using Asus WMI to access %#x chip.\n", tmp);
- access = access_asuswmi;
- } else {
- pr_err("Can't read ChipID by Asus WMI.\n");
- }
- }
+ if (err >= 0)
+ access = nct6775_determine_access(ASUSWMI_DEVICE_UID);
+
+ err = match_string(asus_msi_boards, ARRAY_SIZE(asus_msi_boards),
+ board_name);
+ if (err >= 0)
+ access = nct6775_determine_access(ASUSMSI_DEVICE_UID);
}
/*
diff --git a/drivers/hwmon/nzxt-smart2.c b/drivers/hwmon/nzxt-smart2.c
index 533f38b0b4e9..2b93ba89610a 100644
--- a/drivers/hwmon/nzxt-smart2.c
+++ b/drivers/hwmon/nzxt-smart2.c
@@ -791,6 +791,7 @@ static const struct hid_device_id nzxt_smart2_hid_id_table[] = {
{ HID_USB_DEVICE(0x1e71, 0x2009) }, /* NZXT RGB & Fan Controller */
{ HID_USB_DEVICE(0x1e71, 0x200e) }, /* NZXT RGB & Fan Controller */
{ HID_USB_DEVICE(0x1e71, 0x2010) }, /* NZXT RGB & Fan Controller */
+ { HID_USB_DEVICE(0x1e71, 0x2019) }, /* NZXT RGB & Fan Controller */
{},
};
diff --git a/drivers/hwmon/oxp-sensors.c b/drivers/hwmon/oxp-sensors.c
index f84ec8f8eda9..36872b57912a 100644
--- a/drivers/hwmon/oxp-sensors.c
+++ b/drivers/hwmon/oxp-sensors.c
@@ -1,12 +1,12 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Platform driver for OXP Handhelds that expose fan reading and control
- * via hwmon sysfs.
+ * Platform driver for OneXPlayer, AOK ZOE, and Aya Neo Handhelds that expose
+ * fan reading and control via hwmon sysfs.
*
- * Old boards have the same DMI strings and they are told appart by the
- * boot cpu vendor (Intel/AMD). Currently only AMD boards are supported
- * but the code is made to be simple to add other handheld boards in the
- * future.
+ * Old OXP boards have the same DMI strings and they are told apart by
+ * the boot cpu vendor (Intel/AMD). Currently only AMD boards are
+ * supported but the code is made to be simple to add other handheld
+ * boards in the future.
* Fan control is provided via pwm interface in the range [0-255].
* Old AMD boards use [0-100] as range in the EC, the written value is
* scaled to accommodate for that. Newer boards like the mini PRO and
@@ -42,6 +42,8 @@ static bool unlock_global_acpi_lock(void)
enum oxp_board {
aok_zoe_a1 = 1,
+ aya_neo_air,
+ aya_neo_air_pro,
oxp_mini_amd,
oxp_mini_amd_pro,
};
@@ -62,6 +64,20 @@ static const struct dmi_system_id dmi_table[] = {
},
{
.matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"),
+ },
+ .driver_data = (void *) &(enum oxp_board) {aya_neo_air},
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"),
+ },
+ .driver_data = (void *) &(enum oxp_board) {aya_neo_air_pro},
+ },
+ {
+ .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ONE-NETBOOK"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "ONE XPLAYER"),
},
@@ -161,8 +177,17 @@ static int oxp_platform_read(struct device *dev, enum hwmon_sensor_types type,
ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
if (ret)
return ret;
- if (board == oxp_mini_amd)
+ switch (board) {
+ case aya_neo_air:
+ case aya_neo_air_pro:
+ case oxp_mini_amd:
*val = (*val * 255) / 100;
+ break;
+ case oxp_mini_amd_pro:
+ case aok_zoe_a1:
+ default:
+ break;
+ }
return 0;
case hwmon_pwm_enable:
return read_from_ec(OXP_SENSOR_PWM_ENABLE_REG, 1, val);
@@ -191,8 +216,17 @@ static int oxp_platform_write(struct device *dev, enum hwmon_sensor_types type,
case hwmon_pwm_input:
if (val < 0 || val > 255)
return -EINVAL;
- if (board == oxp_mini_amd)
+ switch (board) {
+ case aya_neo_air:
+ case aya_neo_air_pro:
+ case oxp_mini_amd:
val = (val * 100) / 255;
+ break;
+ case aok_zoe_a1:
+ case oxp_mini_amd_pro:
+ default:
+ break;
+ }
return write_to_ec(dev, OXP_SENSOR_PWM_REG, val);
default:
break;
@@ -233,7 +267,7 @@ static int oxp_platform_probe(struct platform_device *pdev)
/*
* Have to check for AMD processor here because DMI strings are the
- * same between Intel and AMD boards, the only way to tell them appart
+ * same between Intel and AMD boards, the only way to tell them apart
* is the CPU.
* Intel boards seem to have different EC registers and values to
* read/write.
diff --git a/drivers/hwmon/peci/cputemp.c b/drivers/hwmon/peci/cputemp.c
index 57470fda5f6c..30850a479f61 100644
--- a/drivers/hwmon/peci/cputemp.c
+++ b/drivers/hwmon/peci/cputemp.c
@@ -402,7 +402,7 @@ static int create_temp_label(struct peci_cputemp *priv)
unsigned long core_max = find_last_bit(priv->core_mask, CORE_NUMS_MAX);
int i;
- priv->coretemp_label = devm_kzalloc(priv->dev, core_max * sizeof(char *), GFP_KERNEL);
+ priv->coretemp_label = devm_kzalloc(priv->dev, (core_max + 1) * sizeof(char *), GFP_KERNEL);
if (!priv->coretemp_label)
return -ENOMEM;
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 89668af67206..59d9a7430499 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -237,10 +237,10 @@ config SENSORS_MAX16064
be called max16064.
config SENSORS_MAX16601
- tristate "Maxim MAX16508, MAX16601, MAX16602"
+ tristate "Maxim MAX16508, MAX16600, MAX16601, and MAX16602"
help
If you say yes here you get hardware monitoring support for Maxim
- MAX16508, MAX16601 and MAX16602.
+ MAX16508, MAX16600, MAX16601, and MAX16602.
This driver can also be built as a module. If so, the module will
be called max16601.
@@ -317,6 +317,22 @@ config SENSORS_MP5023
This driver can also be built as a module. If so, the module will
be called mp5023.
+config SENSORS_MPQ7932_REGULATOR
+ bool "Regulator support for MPQ7932"
+ depends on SENSORS_MPQ7932 && REGULATOR
+ help
+ If you say yes here you get six integrated buck converter regulator
+ support for power management IC MPS MPQ7932.
+
+config SENSORS_MPQ7932
+ tristate "MPS MPQ7932"
+ help
+ If you say yes here you get hardware monitoring functionality support
+ for power management IC MPS MPQ7932.
+
+ This driver can also be built as a module. If so, the module will
+ be called mpq7932.
+
config SENSORS_PIM4328
tristate "Flex PIM4328 and compatibles"
help
@@ -379,6 +395,22 @@ config SENSORS_STPDDC60
This driver can also be built as a module. If so, the module will
be called stpddc60.
+config SENSORS_TDA38640
+ tristate "Infineon TDA38640"
+ help
+ If you say yes here you get hardware monitoring support for Infineon
+ TDA38640.
+
+ This driver can also be built as a module. If so, the module will
+ be called tda38640.
+
+config SENSORS_TDA38640_REGULATOR
+ bool "Regulator support for TDA38640 and compatibles"
+ depends on SENSORS_TDA38640 && REGULATOR
+ help
+ If you say yes here you get regulator support for Infineon
+ TDA38640 as regulator.
+
config SENSORS_TPS40422
tristate "TI TPS40422"
help
diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile
index 0002dbe22d52..3ae019916267 100644
--- a/drivers/hwmon/pmbus/Makefile
+++ b/drivers/hwmon/pmbus/Makefile
@@ -34,11 +34,13 @@ obj-$(CONFIG_SENSORS_MAX8688) += max8688.o
obj-$(CONFIG_SENSORS_MP2888) += mp2888.o
obj-$(CONFIG_SENSORS_MP2975) += mp2975.o
obj-$(CONFIG_SENSORS_MP5023) += mp5023.o
+obj-$(CONFIG_SENSORS_MPQ7932) += mpq7932.o
obj-$(CONFIG_SENSORS_PLI1209BC) += pli1209bc.o
obj-$(CONFIG_SENSORS_PM6764TR) += pm6764tr.o
obj-$(CONFIG_SENSORS_PXE1610) += pxe1610.o
obj-$(CONFIG_SENSORS_Q54SJ108A2) += q54sj108a2.o
obj-$(CONFIG_SENSORS_STPDDC60) += stpddc60.o
+obj-$(CONFIG_SENSORS_TDA38640) += tda38640.o
obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o
obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o
obj-$(CONFIG_SENSORS_TPS546D24) += tps546d24.o
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index 79f480b4425d..91df8e895147 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -570,14 +570,14 @@ MODULE_DEVICE_TABLE(i2c, ltc2978_id);
#define LTC2978_N_VOLTAGES ((LTC2978_MAX_UV / LTC2978_UV_STEP) + 1)
static const struct regulator_desc ltc2978_reg_desc[] = {
- PMBUS_REGULATOR_STEP("vout", 0, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
- PMBUS_REGULATOR_STEP("vout", 1, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
- PMBUS_REGULATOR_STEP("vout", 2, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
- PMBUS_REGULATOR_STEP("vout", 3, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
- PMBUS_REGULATOR_STEP("vout", 4, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
- PMBUS_REGULATOR_STEP("vout", 5, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
- PMBUS_REGULATOR_STEP("vout", 6, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
- PMBUS_REGULATOR_STEP("vout", 7, LTC2978_N_VOLTAGES, LTC2978_UV_STEP),
+ PMBUS_REGULATOR_STEP("vout", 0, LTC2978_N_VOLTAGES, LTC2978_UV_STEP, 0),
+ PMBUS_REGULATOR_STEP("vout", 1, LTC2978_N_VOLTAGES, LTC2978_UV_STEP, 0),
+ PMBUS_REGULATOR_STEP("vout", 2, LTC2978_N_VOLTAGES, LTC2978_UV_STEP, 0),
+ PMBUS_REGULATOR_STEP("vout", 3, LTC2978_N_VOLTAGES, LTC2978_UV_STEP, 0),
+ PMBUS_REGULATOR_STEP("vout", 4, LTC2978_N_VOLTAGES, LTC2978_UV_STEP, 0),
+ PMBUS_REGULATOR_STEP("vout", 5, LTC2978_N_VOLTAGES, LTC2978_UV_STEP, 0),
+ PMBUS_REGULATOR_STEP("vout", 6, LTC2978_N_VOLTAGES, LTC2978_UV_STEP, 0),
+ PMBUS_REGULATOR_STEP("vout", 7, LTC2978_N_VOLTAGES, LTC2978_UV_STEP, 0),
};
static const struct regulator_desc ltc2978_reg_desc_default[] = {
diff --git a/drivers/hwmon/pmbus/max16601.c b/drivers/hwmon/pmbus/max16601.c
index b628405e6586..6724f723f74c 100644
--- a/drivers/hwmon/pmbus/max16601.c
+++ b/drivers/hwmon/pmbus/max16601.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Hardware monitoring driver for Maxim MAX16508, MAX16601 and MAX16602.
+ * Hardware monitoring driver for Maxim MAX16508, MAX16600, MAX16601,
+ * and MAX16602.
*
* Implementation notes:
*
@@ -31,7 +32,7 @@
#include "pmbus.h"
-enum chips { max16508, max16601, max16602 };
+enum chips { max16508, max16600, max16601, max16602 };
#define REG_DEFAULT_NUM_POP 0xc4
#define REG_SETPT_DVID 0xd1
@@ -202,7 +203,7 @@ static int max16601_identify(struct i2c_client *client,
else
info->vrm_version[0] = vr12;
- if (data->id != max16601 && data->id != max16602)
+ if (data->id != max16600 && data->id != max16601 && data->id != max16602)
return 0;
reg = i2c_smbus_read_byte_data(client, REG_DEFAULT_NUM_POP);
@@ -263,6 +264,7 @@ static void max16601_remove(void *_data)
static const struct i2c_device_id max16601_id[] = {
{"max16508", max16508},
+ {"max16600", max16600},
{"max16601", max16601},
{"max16602", max16602},
{}
@@ -281,11 +283,13 @@ static int max16601_get_id(struct i2c_client *client)
return -ENODEV;
/*
- * PMBUS_IC_DEVICE_ID is expected to return "MAX16601y.xx" or
- * MAX16602y.xx or "MAX16500y.xx".cdxxcccccccccc
+ * PMBUS_IC_DEVICE_ID is expected to return MAX1660[012]y.xx" or
+ * "MAX16500y.xx".cdxxcccccccccc
*/
if (!strncmp(buf, "MAX16500", 8)) {
id = max16508;
+ } else if (!strncmp(buf, "MAX16600", 8)) {
+ id = max16600;
} else if (!strncmp(buf, "MAX16601", 8)) {
id = max16601;
} else if (!strncmp(buf, "MAX16602", 8)) {
diff --git a/drivers/hwmon/pmbus/mpq7932.c b/drivers/hwmon/pmbus/mpq7932.c
new file mode 100644
index 000000000000..ff939881dc3b
--- /dev/null
+++ b/drivers/hwmon/pmbus/mpq7932.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * mpq7932.c - hwmon with optional regulator driver for mps mpq7932
+ * Copyright 2022 Monolithic Power Systems, Inc
+ *
+ * Author: Saravanan Sekar <saravanan@linumiz.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/pmbus.h>
+#include "pmbus.h"
+
+#define MPQ7932_BUCK_UV_MIN 206250
+#define MPQ7932_UV_STEP 6250
+#define MPQ7932_N_VOLTAGES 256
+#define MPQ7932_VOUT_MAX 0xFF
+#define MPQ7932_NUM_PAGES 6
+
+#define MPQ7932_TON_DELAY 0x60
+#define MPQ7932_VOUT_STARTUP_SLEW 0xA3
+#define MPQ7932_VOUT_SHUTDOWN_SLEW 0xA5
+#define MPQ7932_VOUT_SLEW_MASK GENMASK(1, 0)
+#define MPQ7932_TON_DELAY_MASK GENMASK(4, 0)
+
+struct mpq7932_data {
+ struct pmbus_driver_info info;
+ struct pmbus_platform_data pdata;
+};
+
+#if IS_ENABLED(CONFIG_SENSORS_MPQ7932_REGULATOR)
+static struct regulator_desc mpq7932_regulators_desc[] = {
+ PMBUS_REGULATOR_STEP("buck", 0, MPQ7932_N_VOLTAGES,
+ MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
+ PMBUS_REGULATOR_STEP("buck", 1, MPQ7932_N_VOLTAGES,
+ MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
+ PMBUS_REGULATOR_STEP("buck", 2, MPQ7932_N_VOLTAGES,
+ MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
+ PMBUS_REGULATOR_STEP("buck", 3, MPQ7932_N_VOLTAGES,
+ MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
+ PMBUS_REGULATOR_STEP("buck", 4, MPQ7932_N_VOLTAGES,
+ MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
+ PMBUS_REGULATOR_STEP("buck", 5, MPQ7932_N_VOLTAGES,
+ MPQ7932_UV_STEP, MPQ7932_BUCK_UV_MIN),
+};
+#endif
+
+static int mpq7932_write_word_data(struct i2c_client *client, int page, int reg,
+ u16 word)
+{
+ switch (reg) {
+ /*
+ * chip supports only byte access for VOUT_COMMAND otherwise
+ * access results -EREMOTEIO
+ */
+ case PMBUS_VOUT_COMMAND:
+ return pmbus_write_byte_data(client, page, reg, word & 0xFF);
+
+ default:
+ return -ENODATA;
+ }
+}
+
+static int mpq7932_read_word_data(struct i2c_client *client, int page,
+ int phase, int reg)
+{
+ switch (reg) {
+ /*
+ * chip supports neither (PMBUS_VOUT_MARGIN_HIGH, PMBUS_VOUT_MARGIN_LOW)
+ * nor (PMBUS_MFR_VOUT_MIN, PMBUS_MFR_VOUT_MAX). As a result set voltage
+ * fails due to error in pmbus_regulator_get_low_margin, so faked.
+ */
+ case PMBUS_MFR_VOUT_MIN:
+ return 0;
+
+ case PMBUS_MFR_VOUT_MAX:
+ return MPQ7932_VOUT_MAX;
+
+ /*
+ * chip supports only byte access for VOUT_COMMAND otherwise
+ * access results in -EREMOTEIO
+ */
+ case PMBUS_READ_VOUT:
+ return pmbus_read_byte_data(client, page, PMBUS_VOUT_COMMAND);
+
+ default:
+ return -ENODATA;
+ }
+}
+
+static int mpq7932_probe(struct i2c_client *client)
+{
+ struct mpq7932_data *data;
+ struct pmbus_driver_info *info;
+ struct device *dev = &client->dev;
+ int i;
+
+ data = devm_kzalloc(dev, sizeof(struct mpq7932_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ info = &data->info;
+ info->pages = MPQ7932_NUM_PAGES;
+ info->format[PSC_VOLTAGE_OUT] = direct;
+ info->m[PSC_VOLTAGE_OUT] = 160;
+ info->b[PSC_VOLTAGE_OUT] = -33;
+ for (i = 0; i < info->pages; i++) {
+ info->func[i] = PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_STATUS_TEMP;
+ }
+
+#if IS_ENABLED(CONFIG_SENSORS_MPQ7932_REGULATOR)
+ info->num_regulators = ARRAY_SIZE(mpq7932_regulators_desc);
+ info->reg_desc = mpq7932_regulators_desc;
+#endif
+
+ info->read_word_data = mpq7932_read_word_data;
+ info->write_word_data = mpq7932_write_word_data;
+
+ data->pdata.flags = PMBUS_NO_CAPABILITY;
+ dev->platform_data = &data->pdata;
+
+ return pmbus_do_probe(client, info);
+}
+
+static const struct of_device_id mpq7932_of_match[] = {
+ { .compatible = "mps,mpq7932"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, mpq7932_of_match);
+
+static const struct i2c_device_id mpq7932_id[] = {
+ { "mpq7932", },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mpq7932_id);
+
+static struct i2c_driver mpq7932_regulator_driver = {
+ .driver = {
+ .name = "mpq7932",
+ .of_match_table = mpq7932_of_match,
+ },
+ .probe_new = mpq7932_probe,
+ .id_table = mpq7932_id,
+};
+module_i2c_driver(mpq7932_regulator_driver);
+
+MODULE_AUTHOR("Saravanan Sekar <saravanan@linumiz.com>");
+MODULE_DESCRIPTION("MPQ7932 PMIC regulator driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h
index 10fb17879f8e..713ea7915425 100644
--- a/drivers/hwmon/pmbus/pmbus.h
+++ b/drivers/hwmon/pmbus/pmbus.h
@@ -464,7 +464,7 @@ struct pmbus_driver_info {
extern const struct regulator_ops pmbus_regulator_ops;
/* Macros for filling in array of struct regulator_desc */
-#define PMBUS_REGULATOR_STEP(_name, _id, _voltages, _step) \
+#define PMBUS_REGULATOR_STEP(_name, _id, _voltages, _step, _min_uV) \
[_id] = { \
.name = (_name # _id), \
.id = (_id), \
@@ -475,9 +475,10 @@ extern const struct regulator_ops pmbus_regulator_ops;
.owner = THIS_MODULE, \
.n_voltages = _voltages, \
.uV_step = _step, \
+ .min_uV = _min_uV, \
}
-#define PMBUS_REGULATOR(_name, _id) PMBUS_REGULATOR_STEP(_name, _id, 0, 0)
+#define PMBUS_REGULATOR(_name, _id) PMBUS_REGULATOR_STEP(_name, _id, 0, 0, 0)
/* Function declarations */
diff --git a/drivers/hwmon/pmbus/tda38640.c b/drivers/hwmon/pmbus/tda38640.c
new file mode 100644
index 000000000000..c3e781319cd1
--- /dev/null
+++ b/drivers/hwmon/pmbus/tda38640.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Hardware monitoring driver for Infineon TDA38640
+ *
+ * Copyright (c) 2023 9elements GmbH
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regulator/driver.h>
+#include "pmbus.h"
+
+static const struct regulator_desc __maybe_unused tda38640_reg_desc[] = {
+ PMBUS_REGULATOR("vout", 0),
+};
+
+static struct pmbus_driver_info tda38640_info = {
+ .pages = 1,
+ .format[PSC_VOLTAGE_IN] = linear,
+ .format[PSC_VOLTAGE_OUT] = linear,
+ .format[PSC_CURRENT_OUT] = linear,
+ .format[PSC_CURRENT_IN] = linear,
+ .format[PSC_POWER] = linear,
+ .format[PSC_TEMPERATURE] = linear,
+
+ .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP
+ | PMBUS_HAVE_IIN
+ | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
+ | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
+ | PMBUS_HAVE_POUT | PMBUS_HAVE_PIN,
+#if IS_ENABLED(CONFIG_SENSORS_TDA38640_REGULATOR)
+ .num_regulators = 1,
+ .reg_desc = tda38640_reg_desc,
+#endif
+};
+
+static int tda38640_probe(struct i2c_client *client)
+{
+ return pmbus_do_probe(client, &tda38640_info);
+}
+
+static const struct i2c_device_id tda38640_id[] = {
+ {"tda38640", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, tda38640_id);
+
+static const struct of_device_id __maybe_unused tda38640_of_match[] = {
+ { .compatible = "infineon,tda38640"},
+ { },
+};
+MODULE_DEVICE_TABLE(of, tda38640_of_match);
+
+/* This is the driver that will be inserted */
+static struct i2c_driver tda38640_driver = {
+ .driver = {
+ .name = "tda38640",
+ .of_match_table = of_match_ptr(tda38640_of_match),
+ },
+ .probe_new = tda38640_probe,
+ .id_table = tda38640_id,
+};
+
+module_i2c_driver(tda38640_driver);
+
+MODULE_AUTHOR("Patrick Rudolph <patrick.rudolph@9elements.com>");
+MODULE_DESCRIPTION("PMBus driver for Infineon TDA38640");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(PMBUS);
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
deleted file mode 100644
index 70ae665db477..000000000000
--- a/drivers/hwmon/s3c-hwmon.c
+++ /dev/null
@@ -1,379 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* linux/drivers/hwmon/s3c-hwmon.c
- *
- * Copyright (C) 2005, 2008, 2009 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX/S3C64XX ADC hwmon support
-*/
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-
-#include <linux/soc/samsung/s3c-adc.h>
-#include <linux/platform_data/hwmon-s3c.h>
-
-struct s3c_hwmon_attr {
- struct sensor_device_attribute in;
- struct sensor_device_attribute label;
- char in_name[12];
- char label_name[12];
-};
-
-/**
- * struct s3c_hwmon - ADC hwmon client information
- * @lock: Access lock to serialise the conversions.
- * @client: The client we registered with the S3C ADC core.
- * @hwmon_dev: The hwmon device we created.
- * @attr: The holders for the channel attributes.
-*/
-struct s3c_hwmon {
- struct mutex lock;
- struct s3c_adc_client *client;
- struct device *hwmon_dev;
-
- struct s3c_hwmon_attr attrs[8];
-};
-
-/**
- * s3c_hwmon_read_ch - read a value from a given adc channel.
- * @dev: The device.
- * @hwmon: Our state.
- * @channel: The channel we're reading from.
- *
- * Read a value from the @channel with the proper locking and sleep until
- * either the read completes or we timeout awaiting the ADC core to get
- * back to us.
- */
-static int s3c_hwmon_read_ch(struct device *dev,
- struct s3c_hwmon *hwmon, int channel)
-{
- int ret;
-
- ret = mutex_lock_interruptible(&hwmon->lock);
- if (ret < 0)
- return ret;
-
- dev_dbg(dev, "reading channel %d\n", channel);
-
- ret = s3c_adc_read(hwmon->client, channel);
- mutex_unlock(&hwmon->lock);
-
- return ret;
-}
-
-#ifdef CONFIG_SENSORS_S3C_RAW
-/**
- * s3c_hwmon_show_raw - show a conversion from the raw channel number.
- * @dev: The device that the attribute belongs to.
- * @attr: The attribute being read.
- * @buf: The result buffer.
- *
- * This show deals with the raw attribute, registered for each possible
- * ADC channel. This does a conversion and returns the raw (un-scaled)
- * value returned from the hardware.
- */
-static ssize_t s3c_hwmon_show_raw(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct s3c_hwmon *adc = dev_get_drvdata(dev);
- struct sensor_device_attribute *sa = to_sensor_dev_attr(attr);
- int ret;
-
- ret = s3c_hwmon_read_ch(dev, adc, sa->index);
-
- return (ret < 0) ? ret : snprintf(buf, PAGE_SIZE, "%d\n", ret);
-}
-
-static SENSOR_DEVICE_ATTR(adc0_raw, S_IRUGO, s3c_hwmon_show_raw, NULL, 0);
-static SENSOR_DEVICE_ATTR(adc1_raw, S_IRUGO, s3c_hwmon_show_raw, NULL, 1);
-static SENSOR_DEVICE_ATTR(adc2_raw, S_IRUGO, s3c_hwmon_show_raw, NULL, 2);
-static SENSOR_DEVICE_ATTR(adc3_raw, S_IRUGO, s3c_hwmon_show_raw, NULL, 3);
-static SENSOR_DEVICE_ATTR(adc4_raw, S_IRUGO, s3c_hwmon_show_raw, NULL, 4);
-static SENSOR_DEVICE_ATTR(adc5_raw, S_IRUGO, s3c_hwmon_show_raw, NULL, 5);
-static SENSOR_DEVICE_ATTR(adc6_raw, S_IRUGO, s3c_hwmon_show_raw, NULL, 6);
-static SENSOR_DEVICE_ATTR(adc7_raw, S_IRUGO, s3c_hwmon_show_raw, NULL, 7);
-
-static struct attribute *s3c_hwmon_attrs[9] = {
- &sensor_dev_attr_adc0_raw.dev_attr.attr,
- &sensor_dev_attr_adc1_raw.dev_attr.attr,
- &sensor_dev_attr_adc2_raw.dev_attr.attr,
- &sensor_dev_attr_adc3_raw.dev_attr.attr,
- &sensor_dev_attr_adc4_raw.dev_attr.attr,
- &sensor_dev_attr_adc5_raw.dev_attr.attr,
- &sensor_dev_attr_adc6_raw.dev_attr.attr,
- &sensor_dev_attr_adc7_raw.dev_attr.attr,
- NULL,
-};
-
-static struct attribute_group s3c_hwmon_attrgroup = {
- .attrs = s3c_hwmon_attrs,
-};
-
-static inline int s3c_hwmon_add_raw(struct device *dev)
-{
- return sysfs_create_group(&dev->kobj, &s3c_hwmon_attrgroup);
-}
-
-static inline void s3c_hwmon_remove_raw(struct device *dev)
-{
- sysfs_remove_group(&dev->kobj, &s3c_hwmon_attrgroup);
-}
-
-#else
-
-static inline int s3c_hwmon_add_raw(struct device *dev) { return 0; }
-static inline void s3c_hwmon_remove_raw(struct device *dev) { }
-
-#endif /* CONFIG_SENSORS_S3C_RAW */
-
-/**
- * s3c_hwmon_ch_show - show value of a given channel
- * @dev: The device that the attribute belongs to.
- * @attr: The attribute being read.
- * @buf: The result buffer.
- *
- * Read a value from the ADC and scale it before returning it to the
- * caller. The scale factor is gained from the channel configuration
- * passed via the platform data when the device was registered.
- */
-static ssize_t s3c_hwmon_ch_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sen_attr = to_sensor_dev_attr(attr);
- struct s3c_hwmon *hwmon = dev_get_drvdata(dev);
- struct s3c_hwmon_pdata *pdata = dev_get_platdata(dev);
- struct s3c_hwmon_chcfg *cfg;
- int ret;
-
- cfg = pdata->in[sen_attr->index];
-
- ret = s3c_hwmon_read_ch(dev, hwmon, sen_attr->index);
- if (ret < 0)
- return ret;
-
- ret *= cfg->mult;
- ret = DIV_ROUND_CLOSEST(ret, cfg->div);
-
- return sysfs_emit(buf, "%d\n", ret);
-}
-
-/**
- * s3c_hwmon_label_show - show label name of the given channel.
- * @dev: The device that the attribute belongs to.
- * @attr: The attribute being read.
- * @buf: The result buffer.
- *
- * Return the label name of a given channel
- */
-static ssize_t s3c_hwmon_label_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct sensor_device_attribute *sen_attr = to_sensor_dev_attr(attr);
- struct s3c_hwmon_pdata *pdata = dev_get_platdata(dev);
- struct s3c_hwmon_chcfg *cfg;
-
- cfg = pdata->in[sen_attr->index];
-
- return sysfs_emit(buf, "%s\n", cfg->name);
-}
-
-/**
- * s3c_hwmon_create_attr - create hwmon attribute for given channel.
- * @dev: The device to create the attribute on.
- * @cfg: The channel configuration passed from the platform data.
- * @channel: The ADC channel number to process.
- *
- * Create the scaled attribute for use with hwmon from the specified
- * platform data in @pdata. The sysfs entry is handled by the routine
- * s3c_hwmon_ch_show().
- *
- * The attribute name is taken from the configuration data if present
- * otherwise the name is taken by concatenating in_ with the channel
- * number.
- */
-static int s3c_hwmon_create_attr(struct device *dev,
- struct s3c_hwmon_chcfg *cfg,
- struct s3c_hwmon_attr *attrs,
- int channel)
-{
- struct sensor_device_attribute *attr;
- int ret;
-
- snprintf(attrs->in_name, sizeof(attrs->in_name), "in%d_input", channel);
-
- attr = &attrs->in;
- attr->index = channel;
- sysfs_attr_init(&attr->dev_attr.attr);
- attr->dev_attr.attr.name = attrs->in_name;
- attr->dev_attr.attr.mode = S_IRUGO;
- attr->dev_attr.show = s3c_hwmon_ch_show;
-
- ret = device_create_file(dev, &attr->dev_attr);
- if (ret < 0) {
- dev_err(dev, "failed to create input attribute\n");
- return ret;
- }
-
- /* if this has a name, add a label */
- if (cfg->name) {
- snprintf(attrs->label_name, sizeof(attrs->label_name),
- "in%d_label", channel);
-
- attr = &attrs->label;
- attr->index = channel;
- sysfs_attr_init(&attr->dev_attr.attr);
- attr->dev_attr.attr.name = attrs->label_name;
- attr->dev_attr.attr.mode = S_IRUGO;
- attr->dev_attr.show = s3c_hwmon_label_show;
-
- ret = device_create_file(dev, &attr->dev_attr);
- if (ret < 0) {
- device_remove_file(dev, &attrs->in.dev_attr);
- dev_err(dev, "failed to create label attribute\n");
- }
- }
-
- return ret;
-}
-
-static void s3c_hwmon_remove_attr(struct device *dev,
- struct s3c_hwmon_attr *attrs)
-{
- device_remove_file(dev, &attrs->in.dev_attr);
- device_remove_file(dev, &attrs->label.dev_attr);
-}
-
-/**
- * s3c_hwmon_probe - device probe entry.
- * @dev: The device being probed.
-*/
-static int s3c_hwmon_probe(struct platform_device *dev)
-{
- struct s3c_hwmon_pdata *pdata = dev_get_platdata(&dev->dev);
- struct s3c_hwmon *hwmon;
- int ret = 0;
- int i;
-
- if (!pdata) {
- dev_err(&dev->dev, "no platform data supplied\n");
- return -EINVAL;
- }
-
- hwmon = devm_kzalloc(&dev->dev, sizeof(struct s3c_hwmon), GFP_KERNEL);
- if (hwmon == NULL)
- return -ENOMEM;
-
- platform_set_drvdata(dev, hwmon);
-
- mutex_init(&hwmon->lock);
-
- /* Register with the core ADC driver. */
-
- hwmon->client = s3c_adc_register(dev, NULL, NULL, 0);
- if (IS_ERR(hwmon->client)) {
- dev_err(&dev->dev, "cannot register adc\n");
- return PTR_ERR(hwmon->client);
- }
-
- /* add attributes for our adc devices. */
-
- ret = s3c_hwmon_add_raw(&dev->dev);
- if (ret)
- goto err_registered;
-
- /* register with the hwmon core */
-
- hwmon->hwmon_dev = hwmon_device_register(&dev->dev);
- if (IS_ERR(hwmon->hwmon_dev)) {
- dev_err(&dev->dev, "error registering with hwmon\n");
- ret = PTR_ERR(hwmon->hwmon_dev);
- goto err_raw_attribute;
- }
-
- for (i = 0; i < ARRAY_SIZE(pdata->in); i++) {
- struct s3c_hwmon_chcfg *cfg = pdata->in[i];
-
- if (!cfg)
- continue;
-
- if (cfg->mult >= 0x10000)
- dev_warn(&dev->dev,
- "channel %d multiplier too large\n",
- i);
-
- if (cfg->div == 0) {
- dev_err(&dev->dev, "channel %d divider zero\n", i);
- continue;
- }
-
- ret = s3c_hwmon_create_attr(&dev->dev, pdata->in[i],
- &hwmon->attrs[i], i);
- if (ret) {
- dev_err(&dev->dev,
- "error creating channel %d\n", i);
-
- for (i--; i >= 0; i--)
- s3c_hwmon_remove_attr(&dev->dev,
- &hwmon->attrs[i]);
-
- goto err_hwmon_register;
- }
- }
-
- return 0;
-
- err_hwmon_register:
- hwmon_device_unregister(hwmon->hwmon_dev);
-
- err_raw_attribute:
- s3c_hwmon_remove_raw(&dev->dev);
-
- err_registered:
- s3c_adc_release(hwmon->client);
-
- return ret;
-}
-
-static int s3c_hwmon_remove(struct platform_device *dev)
-{
- struct s3c_hwmon *hwmon = platform_get_drvdata(dev);
- int i;
-
- s3c_hwmon_remove_raw(&dev->dev);
-
- for (i = 0; i < ARRAY_SIZE(hwmon->attrs); i++)
- s3c_hwmon_remove_attr(&dev->dev, &hwmon->attrs[i]);
-
- hwmon_device_unregister(hwmon->hwmon_dev);
- s3c_adc_release(hwmon->client);
-
- return 0;
-}
-
-static struct platform_driver s3c_hwmon_driver = {
- .driver = {
- .name = "s3c-hwmon",
- },
- .probe = s3c_hwmon_probe,
- .remove = s3c_hwmon_remove,
-};
-
-module_platform_driver(s3c_hwmon_driver);
-
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("S3C ADC HWMon driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c-hwmon");
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index ae4d14257a11..32a41fc56fc9 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -668,7 +668,7 @@ static inline int sht15_calc_humid(struct sht15_data *data)
}
/**
- * sht15_show_status() - show status information in sysfs
+ * sht15_status_show() - show status information in sysfs
* @dev: device.
* @attr: device attribute.
* @buf: sysfs buffer where information is written to.
@@ -690,7 +690,7 @@ static ssize_t sht15_status_show(struct device *dev,
}
/**
- * sht15_store_heater() - change heater state via sysfs
+ * sht15_status_store() - change heater state via sysfs
* @dev: device.
* @attr: device attribute.
* @buf: sysfs buffer to read the new heater state from.
@@ -725,7 +725,7 @@ static ssize_t sht15_status_store(struct device *dev,
}
/**
- * sht15_show_temp() - show temperature measurement value in sysfs
+ * sht15_temp_show() - show temperature measurement value in sysfs
* @dev: device.
* @attr: device attribute.
* @buf: sysfs buffer where measurement values are written to.
@@ -747,7 +747,7 @@ static ssize_t sht15_temp_show(struct device *dev,
}
/**
- * sht15_show_humidity() - show humidity measurement value in sysfs
+ * sht15_humidity_show() - show humidity measurement value in sysfs
* @dev: device.
* @attr: device attribute.
* @buf: sysfs buffer where measurement values are written to.
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index e23dbf287233..f50b90198f23 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -114,7 +114,7 @@ out:
}
/**
- * sht21_show_temperature() - show temperature measurement value in sysfs
+ * sht21_temperature_show() - show temperature measurement value in sysfs
* @dev: device
* @attr: device attribute
* @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
@@ -136,7 +136,7 @@ static ssize_t sht21_temperature_show(struct device *dev,
}
/**
- * sht21_show_humidity() - show humidity measurement value in sysfs
+ * sht21_humidity_show() - show humidity measurement value in sysfs
* @dev: device
* @attr: device attribute
* @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 45c1eb5dfcb7..2b5bbfffbc4f 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -201,4 +201,39 @@ config CORESIGHT_TRBE
To compile this driver as a module, choose M here: the module will be
called coresight-trbe.
+
+config ULTRASOC_SMB
+ tristate "Ultrasoc system memory buffer drivers"
+ depends on ACPI || COMPILE_TEST
+ depends on ARM64 && CORESIGHT_LINKS_AND_SINKS
+ help
+ This driver provides support for the Ultrasoc system memory buffer (SMB).
+ SMB is responsible for receiving the trace data from Coresight ETM devices
+ and storing them to a system buffer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ultrasoc-smb.
+
+config CORESIGHT_TPDM
+ tristate "CoreSight Trace, Profiling & Diagnostics Monitor driver"
+ select CORESIGHT_LINKS_AND_SINKS
+ select CORESIGHT_TPDA
+ help
+ This driver provides support for configuring monitor. Monitors are
+ primarily responsible for data set collection and support the
+ ability to collect any permutation of data set types.
+
+ To compile this driver as a module, choose M here: the module will be
+ called coresight-tpdm.
+
+config CORESIGHT_TPDA
+ tristate "CoreSight Trace, Profiling & Diagnostics Aggregator driver"
+ help
+ This driver provides support for configuring aggregator. This is
+ primarily useful for pulling the data sets from one or more
+ attached monitors and pushing the resultant data out. Multiple
+ monitors are connected on different input ports of TPDA.
+
+ To compile this driver as a module, choose M here: the module will be
+ called coresight-tpda.
endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index b6c4a48140ec..33bcc3f7b8ae 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_CORESIGHT) += coresight.o
coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \
coresight-sysfs.o coresight-syscfg.o coresight-config.o \
coresight-cfg-preload.o coresight-cfg-afdo.o \
- coresight-syscfg-configfs.o
+ coresight-syscfg-configfs.o coresight-trace-id.o
obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
coresight-tmc-y := coresight-tmc-core.o coresight-tmc-etf.o \
coresight-tmc-etr.o
@@ -25,5 +25,8 @@ obj-$(CONFIG_CORESIGHT_CPU_DEBUG) += coresight-cpu-debug.o
obj-$(CONFIG_CORESIGHT_CATU) += coresight-catu.o
obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
obj-$(CONFIG_CORESIGHT_TRBE) += coresight-trbe.o
+obj-$(CONFIG_CORESIGHT_TPDM) += coresight-tpdm.o
+obj-$(CONFIG_CORESIGHT_TPDA) += coresight-tpda.o
coresight-cti-y := coresight-cti-core.o coresight-cti-platform.o \
coresight-cti-sysfs.o
+obj-$(CONFIG_ULTRASOC_SMB) += ultrasoc-smb.o
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index f3068175ca9d..d3bf82c0de1d 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/device.h>
#include <linux/io.h>
+#include <linux/idr.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -26,6 +27,13 @@
static DEFINE_MUTEX(coresight_mutex);
static DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
+/*
+ * Use IDR to map the hash of the source's device name
+ * to the pointer of path for the source. The idr is for
+ * the sources which aren't associated with CPU.
+ */
+static DEFINE_IDR(path_idr);
+
/**
* struct coresight_node - elements of a path, from source to sink
* @csdev: Address of an element.
@@ -43,14 +51,6 @@ struct coresight_node {
static DEFINE_PER_CPU(struct list_head *, tracer_path);
/*
- * As of this writing only a single STM can be found in CS topologies. Since
- * there is no way to know if we'll ever see more and what kind of
- * configuration they will enact, for the time being only define a single path
- * for STM.
- */
-static struct list_head *stm_path;
-
-/*
* When losing synchronisation a new barrier packet needs to be inserted at the
* beginning of the data collected in a buffer. That way the decoder knows that
* it needs to look for another sync sequence.
@@ -112,45 +112,6 @@ struct coresight_device *coresight_get_percpu_sink(int cpu)
}
EXPORT_SYMBOL_GPL(coresight_get_percpu_sink);
-static int coresight_id_match(struct device *dev, void *data)
-{
- int trace_id, i_trace_id;
- struct coresight_device *csdev, *i_csdev;
-
- csdev = data;
- i_csdev = to_coresight_device(dev);
-
- /*
- * No need to care about oneself and components that are not
- * sources or not enabled
- */
- if (i_csdev == csdev || !i_csdev->enable ||
- i_csdev->type != CORESIGHT_DEV_TYPE_SOURCE)
- return 0;
-
- /* Get the source ID for both components */
- trace_id = source_ops(csdev)->trace_id(csdev);
- i_trace_id = source_ops(i_csdev)->trace_id(i_csdev);
-
- /* All you need is one */
- if (trace_id == i_trace_id)
- return 1;
-
- return 0;
-}
-
-static int coresight_source_is_unique(struct coresight_device *csdev)
-{
- int trace_id = source_ops(csdev)->trace_id(csdev);
-
- /* this shouldn't happen */
- if (trace_id < 0)
- return 0;
-
- return !bus_for_each_dev(&coresight_bustype, NULL,
- csdev, coresight_id_match);
-}
-
static int coresight_find_link_inport(struct coresight_device *csdev,
struct coresight_device *parent)
{
@@ -459,12 +420,6 @@ static int coresight_enable_source(struct coresight_device *csdev, u32 mode)
{
int ret;
- if (!coresight_source_is_unique(csdev)) {
- dev_warn(&csdev->dev, "traceID %d not unique\n",
- source_ops(csdev)->trace_id(csdev));
- return -EINVAL;
- }
-
if (!csdev->enable) {
if (source_ops(csdev)->enable) {
ret = coresight_control_assoc_ectdev(csdev, true);
@@ -1106,7 +1061,8 @@ static int coresight_validate_source(struct coresight_device *csdev,
}
if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC &&
- subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE) {
+ subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE &&
+ subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS) {
dev_err(&csdev->dev, "wrong device subtype in %s\n", function);
return -EINVAL;
}
@@ -1120,6 +1076,7 @@ int coresight_enable(struct coresight_device *csdev)
struct coresight_device *sink;
struct list_head *path;
enum coresight_dev_subtype_source subtype;
+ u32 hash;
subtype = csdev->subtype.source_subtype;
@@ -1174,7 +1131,15 @@ int coresight_enable(struct coresight_device *csdev)
per_cpu(tracer_path, cpu) = path;
break;
case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
- stm_path = path;
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
+ /*
+ * Use the hash of source's device name as ID
+ * and map the ID to the pointer of the path.
+ */
+ hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
+ ret = idr_alloc_u32(&path_idr, path, &hash, hash, GFP_KERNEL);
+ if (ret)
+ goto err_source;
break;
default:
/* We can't be here */
@@ -1198,6 +1163,7 @@ void coresight_disable(struct coresight_device *csdev)
{
int cpu, ret;
struct list_head *path = NULL;
+ u32 hash;
mutex_lock(&coresight_mutex);
@@ -1215,8 +1181,15 @@ void coresight_disable(struct coresight_device *csdev)
per_cpu(tracer_path, cpu) = NULL;
break;
case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
- path = stm_path;
- stm_path = NULL;
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
+ hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
+ /* Find the path by the hash. */
+ path = idr_find(&path_idr, hash);
+ if (path == NULL) {
+ pr_err("Path is not found for %s\n", dev_name(&csdev->dev));
+ goto out;
+ }
+ idr_remove(&path_idr, hash);
break;
default:
/* We can't be here */
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index d2cf4f4848e1..277c890a1f1f 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -107,12 +107,12 @@ static int cti_enable_hw(struct cti_drvdata *drvdata)
cti_write_all_hw_regs(drvdata);
config->hw_enabled = true;
- atomic_inc(&drvdata->config.enable_req_count);
+ drvdata->config.enable_req_count++;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return rc;
cti_state_unchanged:
- atomic_inc(&drvdata->config.enable_req_count);
+ drvdata->config.enable_req_count++;
/* cannot enable due to error */
cti_err_not_enabled:
@@ -129,7 +129,7 @@ static void cti_cpuhp_enable_hw(struct cti_drvdata *drvdata)
config->hw_powered = true;
/* no need to do anything if no enable request */
- if (!atomic_read(&drvdata->config.enable_req_count))
+ if (!drvdata->config.enable_req_count)
goto cti_hp_not_enabled;
/* try to claim the device */
@@ -151,11 +151,18 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
{
struct cti_config *config = &drvdata->config;
struct coresight_device *csdev = drvdata->csdev;
+ int ret = 0;
spin_lock(&drvdata->spinlock);
+ /* don't allow negative refcounts, return an error */
+ if (!drvdata->config.enable_req_count) {
+ ret = -EINVAL;
+ goto cti_not_disabled;
+ }
+
/* check refcount - disable on 0 */
- if (atomic_dec_return(&drvdata->config.enable_req_count) > 0)
+ if (--drvdata->config.enable_req_count > 0)
goto cti_not_disabled;
/* no need to do anything if disabled or cpu unpowered */
@@ -171,12 +178,12 @@ static int cti_disable_hw(struct cti_drvdata *drvdata)
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
spin_unlock(&drvdata->spinlock);
- return 0;
+ return ret;
/* not disabled this call */
cti_not_disabled:
spin_unlock(&drvdata->spinlock);
- return 0;
+ return ret;
}
void cti_write_single_reg(struct cti_drvdata *drvdata, int offset, u32 value)
@@ -232,7 +239,7 @@ static void cti_set_default_config(struct device *dev,
/* Most regs default to 0 as zalloc'ed except...*/
config->trig_filter_enable = true;
config->ctigate = GENMASK(config->nr_ctm_channels - 1, 0);
- atomic_set(&config->enable_req_count, 0);
+ config->enable_req_count = 0;
}
/*
@@ -689,7 +696,7 @@ static int cti_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd,
drvdata->config.hw_enabled = false;
/* check enable reference count to enable HW */
- if (atomic_read(&drvdata->config.enable_req_count)) {
+ if (drvdata->config.enable_req_count) {
/* check we can claim the device as we re-power */
if (coresight_claim_device(csdev))
goto cti_notify_exit;
diff --git a/drivers/hwtracing/coresight/coresight-cti-sysfs.c b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
index 6d59c815ecf5..e528cff9d4e2 100644
--- a/drivers/hwtracing/coresight/coresight-cti-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-cti-sysfs.c
@@ -84,8 +84,8 @@ static ssize_t enable_show(struct device *dev,
bool enabled, powered;
struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
- enable_req = atomic_read(&drvdata->config.enable_req_count);
spin_lock(&drvdata->spinlock);
+ enable_req = drvdata->config.enable_req_count;
powered = drvdata->config.hw_powered;
enabled = drvdata->config.hw_enabled;
spin_unlock(&drvdata->spinlock);
@@ -108,10 +108,19 @@ static ssize_t enable_store(struct device *dev,
if (ret)
return ret;
- if (val)
+ if (val) {
+ ret = pm_runtime_resume_and_get(dev->parent);
+ if (ret)
+ return ret;
ret = cti_enable(drvdata->csdev);
- else
+ if (ret)
+ pm_runtime_put(dev->parent);
+ } else {
ret = cti_disable(drvdata->csdev);
+ if (!ret)
+ pm_runtime_put(dev->parent);
+ }
+
if (ret)
return ret;
return size;
diff --git a/drivers/hwtracing/coresight/coresight-cti.h b/drivers/hwtracing/coresight/coresight-cti.h
index acf7b545e6b9..8b106b13a244 100644
--- a/drivers/hwtracing/coresight/coresight-cti.h
+++ b/drivers/hwtracing/coresight/coresight-cti.h
@@ -141,7 +141,7 @@ struct cti_config {
int nr_trig_max;
/* cti enable control */
- atomic_t enable_req_count;
+ int enable_req_count;
bool hw_enabled;
bool hw_powered;
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 43bbd5dc3d3b..a48c97da8165 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -4,6 +4,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/bitfield.h>
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
#include <linux/cpumask.h>
@@ -22,6 +23,7 @@
#include "coresight-etm-perf.h"
#include "coresight-priv.h"
#include "coresight-syscfg.h"
+#include "coresight-trace-id.h"
static struct pmu etm_pmu;
static bool etm_perf_up;
@@ -228,8 +230,12 @@ static void free_event_data(struct work_struct *work)
if (!(IS_ERR_OR_NULL(*ppath)))
coresight_release_path(*ppath);
*ppath = NULL;
+ coresight_trace_id_put_cpu_id(cpu);
}
+ /* mark perf event as done for trace id allocator */
+ coresight_trace_id_perf_stop();
+
free_percpu(event_data->path);
kfree(event_data);
}
@@ -300,6 +306,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
{
u32 id, cfg_hash;
int cpu = event->cpu;
+ int trace_id;
cpumask_t *mask;
struct coresight_device *sink = NULL;
struct coresight_device *user_sink = NULL, *last_sink = NULL;
@@ -316,6 +323,9 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
sink = user_sink = coresight_get_sink_by_id(id);
}
+ /* tell the trace ID allocator that a perf event is starting up */
+ coresight_trace_id_perf_start();
+
/* check if user wants a coresight configuration selected */
cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
if (cfg_hash) {
@@ -388,6 +398,13 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
continue;
}
+ /* ensure we can allocate a trace ID for this CPU */
+ trace_id = coresight_trace_id_get_cpu_id(cpu);
+ if (!IS_VALID_CS_TRACE_ID(trace_id)) {
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
+
*etm_event_cpu_path_ptr(event_data, cpu) = path;
}
@@ -432,6 +449,7 @@ static void etm_event_start(struct perf_event *event, int flags)
struct perf_output_handle *handle = &ctxt->handle;
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
struct list_head *path;
+ u64 hw_id;
if (!csdev)
goto fail;
@@ -477,6 +495,19 @@ static void etm_event_start(struct perf_event *event, int flags)
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
goto fail_disable_path;
+ /*
+ * output cpu / trace ID in perf record, once for the lifetime
+ * of the event.
+ */
+ if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) {
+ cpumask_set_cpu(cpu, &event_data->aux_hwid_done);
+ hw_id = FIELD_PREP(CS_AUX_HW_ID_VERSION_MASK,
+ CS_AUX_HW_ID_CURR_VERSION);
+ hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK,
+ coresight_trace_id_read_cpu_id(cpu));
+ perf_report_aux_output_id(event, hw_id);
+ }
+
out:
/* Tell the perf core the event is alive */
event->hw.state = 0;
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index 468f7799ab4f..bebbadee2ceb 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -48,6 +48,7 @@ struct etm_filters {
* struct etm_event_data - Coresight specifics associated to an event
* @work: Handle to free allocated memory outside IRQ context.
* @mask: Hold the CPU(s) this event was set for.
+ * @aux_hwid_done: Whether a CPU has emitted the TraceID packet or not.
* @snk_config: The sink configuration.
* @cfg_hash: The hash id of any coresight config selected.
* @path: An array of path, each slot for one CPU.
@@ -55,6 +56,7 @@ struct etm_filters {
struct etm_event_data {
struct work_struct work;
cpumask_t mask;
+ cpumask_t aux_hwid_done;
void *snk_config;
u32 cfg_hash;
struct list_head * __percpu *path;
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index f3ab96eaf44e..9a0d08b092ae 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -283,8 +283,9 @@ static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
}
extern const struct attribute_group *coresight_etm_groups[];
-int etm_get_trace_id(struct etm_drvdata *drvdata);
void etm_set_default(struct etm_config *config);
void etm_config_trace_mode(struct etm_config *config);
struct etm_config *get_etm_config(struct etm_drvdata *drvdata);
+int etm_read_alloc_trace_id(struct etm_drvdata *drvdata);
+void etm_release_trace_id(struct etm_drvdata *drvdata);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index d0ab9933472b..afc57195ee52 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -32,6 +32,7 @@
#include "coresight-etm.h"
#include "coresight-etm-perf.h"
+#include "coresight-trace-id.h"
/*
* Not really modular but using module_param is the easiest way to
@@ -454,52 +455,59 @@ static int etm_cpu_id(struct coresight_device *csdev)
return drvdata->cpu;
}
-int etm_get_trace_id(struct etm_drvdata *drvdata)
+int etm_read_alloc_trace_id(struct etm_drvdata *drvdata)
{
- unsigned long flags;
- int trace_id = -1;
- struct device *etm_dev;
+ int trace_id;
- if (!drvdata)
- goto out;
-
- etm_dev = drvdata->csdev->dev.parent;
- if (!local_read(&drvdata->mode))
- return drvdata->traceid;
-
- pm_runtime_get_sync(etm_dev);
-
- spin_lock_irqsave(&drvdata->spinlock, flags);
-
- CS_UNLOCK(drvdata->base);
- trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
- CS_LOCK(drvdata->base);
-
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- pm_runtime_put(etm_dev);
-
-out:
+ /*
+ * This will allocate a trace ID to the cpu,
+ * or return the one currently allocated.
+ *
+ * trace id function has its own lock
+ */
+ trace_id = coresight_trace_id_get_cpu_id(drvdata->cpu);
+ if (IS_VALID_CS_TRACE_ID(trace_id))
+ drvdata->traceid = (u8)trace_id;
+ else
+ dev_err(&drvdata->csdev->dev,
+ "Failed to allocate trace ID for %s on CPU%d\n",
+ dev_name(&drvdata->csdev->dev), drvdata->cpu);
return trace_id;
-
}
-static int etm_trace_id(struct coresight_device *csdev)
+void etm_release_trace_id(struct etm_drvdata *drvdata)
{
- struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- return etm_get_trace_id(drvdata);
+ coresight_trace_id_put_cpu_id(drvdata->cpu);
}
static int etm_enable_perf(struct coresight_device *csdev,
struct perf_event *event)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int trace_id;
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
return -EINVAL;
/* Configure the tracer based on the session's specifics */
etm_parse_event_config(drvdata, event);
+
+ /*
+ * perf allocates cpu ids as part of _setup_aux() - device needs to use
+ * the allocated ID. This reads the current version without allocation.
+ *
+ * This does not use the trace id lock to prevent lock_dep issues
+ * with perf locks - we know the ID cannot change until perf shuts down
+ * the session
+ */
+ trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu);
+ if (!IS_VALID_CS_TRACE_ID(trace_id)) {
+ dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
+ dev_name(&drvdata->csdev->dev), drvdata->cpu);
+ return -EINVAL;
+ }
+ drvdata->traceid = (u8)trace_id;
+
/* And enable it */
return etm_enable_hw(drvdata);
}
@@ -512,6 +520,11 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
spin_lock(&drvdata->spinlock);
+ /* sysfs needs to allocate and set a trace ID */
+ ret = etm_read_alloc_trace_id(drvdata);
+ if (ret < 0)
+ goto unlock_enable_sysfs;
+
/*
* Configure the ETM only if the CPU is online. If it isn't online
* hw configuration will take place on the local CPU during bring up.
@@ -528,6 +541,10 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
ret = -ENODEV;
}
+ if (ret)
+ etm_release_trace_id(drvdata);
+
+unlock_enable_sysfs:
spin_unlock(&drvdata->spinlock);
if (!ret)
@@ -611,6 +628,12 @@ static void etm_disable_perf(struct coresight_device *csdev)
coresight_disclaim_device_unlocked(csdev);
CS_LOCK(drvdata->base);
+
+ /*
+ * perf will release trace ids when _free_aux()
+ * is called at the end of the session
+ */
+
}
static void etm_disable_sysfs(struct coresight_device *csdev)
@@ -635,6 +658,13 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
+ /*
+ * we only release trace IDs when resetting sysfs.
+ * This permits sysfs users to read the trace ID after the trace
+ * session has completed. This maintains operational behaviour with
+ * prior trace id allocation method
+ */
+
dev_dbg(&csdev->dev, "ETM tracing disabled\n");
}
@@ -671,7 +701,6 @@ static void etm_disable(struct coresight_device *csdev,
static const struct coresight_ops_source etm_source_ops = {
.cpu_id = etm_cpu_id,
- .trace_id = etm_trace_id,
.enable = etm_enable,
.disable = etm_disable,
};
@@ -781,11 +810,6 @@ static void etm_init_arch_data(void *info)
CS_LOCK(drvdata->base);
}
-static void etm_init_trace_id(struct etm_drvdata *drvdata)
-{
- drvdata->traceid = coresight_get_trace_id(drvdata->cpu);
-}
-
static int __init etm_hp_setup(void)
{
int ret;
@@ -871,7 +895,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
if (etm_arch_supported(drvdata->arch) == false)
return -EINVAL;
- etm_init_trace_id(drvdata);
etm_set_default(&drvdata->config);
pdata = coresight_get_platform_data(dev);
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index fd81eca3ec18..2f271b7fb048 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -85,6 +85,7 @@ static ssize_t reset_store(struct device *dev,
}
etm_set_default(config);
+ etm_release_trace_id(drvdata);
spin_unlock(&drvdata->spinlock);
}
@@ -1189,30 +1190,16 @@ static DEVICE_ATTR_RO(cpu);
static ssize_t traceid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- unsigned long val;
- struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- val = etm_get_trace_id(drvdata);
-
- return sprintf(buf, "%#lx\n", val);
-}
-
-static ssize_t traceid_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
+ int trace_id;
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
+ trace_id = etm_read_alloc_trace_id(drvdata);
+ if (trace_id < 0)
+ return trace_id;
- drvdata->traceid = val & ETM_TRACEID_MASK;
- return size;
+ return sysfs_emit(buf, "%#x\n", trace_id);
}
-static DEVICE_ATTR_RW(traceid);
+static DEVICE_ATTR_RO(traceid);
static struct attribute *coresight_etm_attrs[] = {
&dev_attr_nr_addr_cmp.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 1cc052979e01..1ea8f173cca0 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -42,6 +42,7 @@
#include "coresight-etm4x-cfg.h"
#include "coresight-self-hosted-trace.h"
#include "coresight-syscfg.h"
+#include "coresight-trace-id.h"
static int boot_enable;
module_param(boot_enable, int, 0444);
@@ -230,11 +231,28 @@ static int etm4_cpu_id(struct coresight_device *csdev)
return drvdata->cpu;
}
-static int etm4_trace_id(struct coresight_device *csdev)
+int etm4_read_alloc_trace_id(struct etmv4_drvdata *drvdata)
{
- struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int trace_id;
+
+ /*
+ * This will allocate a trace ID to the cpu,
+ * or return the one currently allocated.
+ * The trace id function has its own lock
+ */
+ trace_id = coresight_trace_id_get_cpu_id(drvdata->cpu);
+ if (IS_VALID_CS_TRACE_ID(trace_id))
+ drvdata->trcid = (u8)trace_id;
+ else
+ dev_err(&drvdata->csdev->dev,
+ "Failed to allocate trace ID for %s on CPU%d\n",
+ dev_name(&drvdata->csdev->dev), drvdata->cpu);
+ return trace_id;
+}
- return drvdata->trcid;
+void etm4_release_trace_id(struct etmv4_drvdata *drvdata)
+{
+ coresight_trace_id_put_cpu_id(drvdata->cpu);
}
struct etm4_enable_arg {
@@ -427,8 +445,10 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
etm4x_relaxed_write32(csa, config->vipcssctlr, TRCVIPCSSCTLR);
for (i = 0; i < drvdata->nrseqstate - 1; i++)
etm4x_relaxed_write32(csa, config->seq_ctrl[i], TRCSEQEVRn(i));
- etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
- etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
+ if (drvdata->nrseqstate) {
+ etm4x_relaxed_write32(csa, config->seq_rst, TRCSEQRSTEVR);
+ etm4x_relaxed_write32(csa, config->seq_state, TRCSEQSTR);
+ }
etm4x_relaxed_write32(csa, config->ext_inp, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
etm4x_relaxed_write32(csa, config->cntrldvr[i], TRCCNTRLDVRn(i));
@@ -720,7 +740,7 @@ out:
static int etm4_enable_perf(struct coresight_device *csdev,
struct perf_event *event)
{
- int ret = 0;
+ int ret = 0, trace_id;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id())) {
@@ -732,6 +752,24 @@ static int etm4_enable_perf(struct coresight_device *csdev,
ret = etm4_parse_event_config(csdev, event);
if (ret)
goto out;
+
+ /*
+ * perf allocates cpu ids as part of _setup_aux() - device needs to use
+ * the allocated ID. This reads the current version without allocation.
+ *
+ * This does not use the trace id lock to prevent lock_dep issues
+ * with perf locks - we know the ID cannot change until perf shuts down
+ * the session
+ */
+ trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu);
+ if (!IS_VALID_CS_TRACE_ID(trace_id)) {
+ dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
+ dev_name(&drvdata->csdev->dev), drvdata->cpu);
+ ret = -EINVAL;
+ goto out;
+ }
+ drvdata->trcid = (u8)trace_id;
+
/* And enable it */
ret = etm4_enable_hw(drvdata);
@@ -756,6 +794,11 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
spin_lock(&drvdata->spinlock);
+ /* sysfs needs to read and allocate a trace ID */
+ ret = etm4_read_alloc_trace_id(drvdata);
+ if (ret < 0)
+ goto unlock_sysfs_enable;
+
/*
* Executing etm4_enable_hw on the cpu whose ETM is being enabled
* ensures that register writes occur when cpu is powered.
@@ -767,6 +810,11 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
ret = arg.rc;
if (!ret)
drvdata->sticky_enable = true;
+
+ if (ret)
+ etm4_release_trace_id(drvdata);
+
+unlock_sysfs_enable:
spin_unlock(&drvdata->spinlock);
if (!ret)
@@ -898,6 +946,11 @@ static int etm4_disable_perf(struct coresight_device *csdev,
/* TRCVICTLR::SSSTATUS, bit[9] */
filters->ssstatus = (control & BIT(9));
+ /*
+ * perf will release trace ids when _free_aux() is
+ * called at the end of the session.
+ */
+
return 0;
}
@@ -923,6 +976,13 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
+ /*
+ * we only release trace IDs when resetting sysfs.
+ * This permits sysfs users to read the trace ID after the trace
+ * session has completed. This maintains operational behaviour with
+ * prior trace id allocation method
+ */
+
dev_dbg(&csdev->dev, "ETM tracing disabled\n");
}
@@ -956,7 +1016,6 @@ static void etm4_disable(struct coresight_device *csdev,
static const struct coresight_ops_source etm4_source_ops = {
.cpu_id = etm4_cpu_id,
- .trace_id = etm4_trace_id,
.enable = etm4_enable,
.disable = etm4_disable,
};
@@ -1565,11 +1624,6 @@ static int etm4_dying_cpu(unsigned int cpu)
return 0;
}
-static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
-{
- drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
-}
-
static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
{
int i, ret = 0;
@@ -1634,8 +1688,10 @@ static int __etm4_cpu_save(struct etmv4_drvdata *drvdata)
for (i = 0; i < drvdata->nrseqstate - 1; i++)
state->trcseqevr[i] = etm4x_read32(csa, TRCSEQEVRn(i));
- state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
- state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
+ if (drvdata->nrseqstate) {
+ state->trcseqrstevr = etm4x_read32(csa, TRCSEQRSTEVR);
+ state->trcseqstr = etm4x_read32(csa, TRCSEQSTR);
+ }
state->trcextinselr = etm4x_read32(csa, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
@@ -1763,8 +1819,10 @@ static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata)
for (i = 0; i < drvdata->nrseqstate - 1; i++)
etm4x_relaxed_write32(csa, state->trcseqevr[i], TRCSEQEVRn(i));
- etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
- etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
+ if (drvdata->nrseqstate) {
+ etm4x_relaxed_write32(csa, state->trcseqrstevr, TRCSEQRSTEVR);
+ etm4x_relaxed_write32(csa, state->trcseqstr, TRCSEQSTR);
+ }
etm4x_relaxed_write32(csa, state->trcextinselr, TRCEXTINSELR);
for (i = 0; i < drvdata->nr_cntr; i++) {
@@ -1946,7 +2004,6 @@ static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
if (!desc.name)
return -ENOMEM;
- etm4_init_trace_id(drvdata);
etm4_set_default(&drvdata->config);
pdata = coresight_get_platform_data(dev);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
index 9cac848cffaf..5e62aa40ecd0 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -266,10 +266,11 @@ static ssize_t reset_store(struct device *dev,
config->vmid_mask0 = 0x0;
config->vmid_mask1 = 0x0;
- drvdata->trcid = drvdata->cpu + 1;
-
spin_unlock(&drvdata->spinlock);
+ /* for sysfs - only release trace id when resetting */
+ etm4_release_trace_id(drvdata);
+
cscfg_csdev_reset_feats(to_coresight_device(dev));
return size;
@@ -2392,6 +2393,26 @@ static struct attribute *coresight_etmv4_attrs[] = {
NULL,
};
+/*
+ * Trace ID allocated dynamically on enable - but also allocate on read
+ * in case sysfs or perf read before enable to ensure consistent metadata
+ * information for trace decode
+ */
+static ssize_t trctraceid_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int trace_id;
+ struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ trace_id = etm4_read_alloc_trace_id(drvdata);
+ if (trace_id < 0)
+ return trace_id;
+
+ return sysfs_emit(buf, "0x%x\n", trace_id);
+}
+static DEVICE_ATTR_RO(trctraceid);
+
struct etmv4_reg {
struct coresight_device *csdev;
u32 offset;
@@ -2528,7 +2549,7 @@ static struct attribute *coresight_etmv4_mgmt_attrs[] = {
coresight_etm4x_reg(trcpidr3, TRCPIDR3),
coresight_etm4x_reg(trcoslsr, TRCOSLSR),
coresight_etm4x_reg(trcconfig, TRCCONFIGR),
- coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
+ &dev_attr_trctraceid.attr,
coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
NULL,
};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index 4b21bb79f168..434f4e95ee17 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -1095,4 +1095,7 @@ static inline bool etm4x_is_ete(struct etmv4_drvdata *drvdata)
{
return drvdata->arch >= ETM_ARCH_ETE;
}
+
+int etm4_read_alloc_trace_id(struct etmv4_drvdata *drvdata);
+void etm4_release_trace_id(struct etmv4_drvdata *drvdata);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 463f449cfb79..66a614c5492c 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -31,6 +31,7 @@
#include <linux/stm.h>
#include "coresight-priv.h"
+#include "coresight-trace-id.h"
#define STMDMASTARTR 0xc04
#define STMDMASTOPR 0xc08
@@ -280,15 +281,7 @@ static void stm_disable(struct coresight_device *csdev,
}
}
-static int stm_trace_id(struct coresight_device *csdev)
-{
- struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- return drvdata->traceid;
-}
-
static const struct coresight_ops_source stm_source_ops = {
- .trace_id = stm_trace_id,
.enable = stm_enable,
.disable = stm_disable,
};
@@ -615,24 +608,7 @@ static ssize_t traceid_show(struct device *dev,
val = drvdata->traceid;
return sprintf(buf, "%#lx\n", val);
}
-
-static ssize_t traceid_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
-
- ret = kstrtoul(buf, 16, &val);
- if (ret)
- return ret;
-
- /* traceid field is 7bit wide on STM32 */
- drvdata->traceid = val & 0x7f;
- return size;
-}
-static DEVICE_ATTR_RW(traceid);
+static DEVICE_ATTR_RO(traceid);
static struct attribute *coresight_stm_attrs[] = {
&dev_attr_hwevent_enable.attr,
@@ -803,14 +779,6 @@ static void stm_init_default_data(struct stm_drvdata *drvdata)
*/
drvdata->stmsper = ~0x0;
- /*
- * The trace ID value for *ETM* tracers start at CPU_ID * 2 + 0x10 and
- * anything equal to or higher than 0x70 is reserved. Since 0x00 is
- * also reserved the STM trace ID needs to be higher than 0x00 and
- * lowner than 0x10.
- */
- drvdata->traceid = 0x1;
-
/* Set invariant transaction timing on all channels */
bitmap_clear(drvdata->chs.guaranteed, 0, drvdata->numsp);
}
@@ -838,7 +806,7 @@ static void stm_init_generic_data(struct stm_drvdata *drvdata,
static int stm_probe(struct amba_device *adev, const struct amba_id *id)
{
- int ret;
+ int ret, trace_id;
void __iomem *base;
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
@@ -922,12 +890,22 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
goto stm_unregister;
}
+ trace_id = coresight_trace_id_get_system_id();
+ if (trace_id < 0) {
+ ret = trace_id;
+ goto cs_unregister;
+ }
+ drvdata->traceid = (u8)trace_id;
+
pm_runtime_put(&adev->dev);
dev_info(&drvdata->csdev->dev, "%s initialized\n",
(char *)coresight_get_uci_data(id));
return 0;
+cs_unregister:
+ coresight_unregister(drvdata->csdev);
+
stm_unregister:
stm_unregister_device(&drvdata->stm);
return ret;
@@ -937,6 +915,7 @@ static void stm_remove(struct amba_device *adev)
{
struct stm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+ coresight_trace_id_put_system_id(drvdata->traceid);
coresight_unregister(drvdata->csdev);
stm_unregister_device(&drvdata->stm);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 07abf28ad725..c106d142e632 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -31,7 +31,7 @@ DEFINE_CORESIGHT_DEVLIST(etb_devs, "tmc_etb");
DEFINE_CORESIGHT_DEVLIST(etf_devs, "tmc_etf");
DEFINE_CORESIGHT_DEVLIST(etr_devs, "tmc_etr");
-void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
+int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
{
struct coresight_device *csdev = drvdata->csdev;
struct csdev_access *csa = &csdev->access;
@@ -40,7 +40,9 @@ void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
if (coresight_timeout(csa, TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
dev_err(&csdev->dev,
"timeout while waiting for TMC to be Ready\n");
+ return -EBUSY;
}
+ return 0;
}
void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 4c4cbd1f7258..0ab1f73c2d06 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -16,12 +16,20 @@
static int tmc_set_etf_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle);
-static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
+static int __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
{
+ int rc = 0;
+
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
- tmc_wait_for_tmcready(drvdata);
+ rc = tmc_wait_for_tmcready(drvdata);
+ if (rc) {
+ dev_err(&drvdata->csdev->dev,
+ "Failed to enable: TMC not ready\n");
+ CS_LOCK(drvdata->base);
+ return rc;
+ }
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
@@ -33,6 +41,7 @@ static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
tmc_enable_hw(drvdata);
CS_LOCK(drvdata->base);
+ return rc;
}
static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
@@ -42,8 +51,10 @@ static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
if (rc)
return rc;
- __tmc_etb_enable_hw(drvdata);
- return 0;
+ rc = __tmc_etb_enable_hw(drvdata);
+ if (rc)
+ coresight_disclaim_device(drvdata->csdev);
+ return rc;
}
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
@@ -91,12 +102,20 @@ static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
coresight_disclaim_device(drvdata->csdev);
}
-static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
+static int __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
{
+ int rc = 0;
+
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
- tmc_wait_for_tmcready(drvdata);
+ rc = tmc_wait_for_tmcready(drvdata);
+ if (rc) {
+ dev_err(&drvdata->csdev->dev,
+ "Failed to enable : TMC is not ready\n");
+ CS_LOCK(drvdata->base);
+ return rc;
+ }
writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
@@ -105,6 +124,7 @@ static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
tmc_enable_hw(drvdata);
CS_LOCK(drvdata->base);
+ return rc;
}
static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
@@ -114,8 +134,10 @@ static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
if (rc)
return rc;
- __tmc_etf_enable_hw(drvdata);
- return 0;
+ rc = __tmc_etf_enable_hw(drvdata);
+ if (rc)
+ coresight_disclaim_device(drvdata->csdev);
+ return rc;
}
static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
@@ -639,6 +661,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
char *buf = NULL;
enum tmc_mode mode;
unsigned long flags;
+ int rc = 0;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
@@ -664,7 +687,11 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
* can't be NULL.
*/
memset(drvdata->buf, 0, drvdata->size);
- __tmc_etb_enable_hw(drvdata);
+ rc = __tmc_etb_enable_hw(drvdata);
+ if (rc) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return rc;
+ }
} else {
/*
* The ETB/ETF is not tracing and the buffer was just read.
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 867ad8bb9b0c..918d461fcf4a 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -983,15 +983,22 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
etr_buf->ops->sync(etr_buf, rrp, rwp);
}
-static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
+static int __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl, sts;
struct etr_buf *etr_buf = drvdata->etr_buf;
+ int rc = 0;
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
- tmc_wait_for_tmcready(drvdata);
+ rc = tmc_wait_for_tmcready(drvdata);
+ if (rc) {
+ dev_err(&drvdata->csdev->dev,
+ "Failed to enable : TMC not ready\n");
+ CS_LOCK(drvdata->base);
+ return rc;
+ }
writel_relaxed(etr_buf->size / 4, drvdata->base + TMC_RSZ);
writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
@@ -1032,6 +1039,7 @@ static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
tmc_enable_hw(drvdata);
CS_LOCK(drvdata->base);
+ return rc;
}
static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
@@ -1060,7 +1068,12 @@ static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
rc = coresight_claim_device(drvdata->csdev);
if (!rc) {
drvdata->etr_buf = etr_buf;
- __tmc_etr_enable_hw(drvdata);
+ rc = __tmc_etr_enable_hw(drvdata);
+ if (rc) {
+ drvdata->etr_buf = NULL;
+ coresight_disclaim_device(drvdata->csdev);
+ tmc_etr_disable_catu(drvdata);
+ }
}
return rc;
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 66959557cf39..01c0382a29c0 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -255,7 +255,7 @@ struct tmc_sg_table {
};
/* Generic functions */
-void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
+int tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
void tmc_enable_hw(struct tmc_drvdata *drvdata);
void tmc_disable_hw(struct tmc_drvdata *drvdata);
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
new file mode 100644
index 000000000000..f712e112ecff
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/bitfield.h>
+#include <linux/coresight.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "coresight-priv.h"
+#include "coresight-tpda.h"
+#include "coresight-trace-id.h"
+
+DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda");
+
+/* Settings pre enabling port control register */
+static void tpda_enable_pre_port(struct tpda_drvdata *drvdata)
+{
+ u32 val;
+
+ val = readl_relaxed(drvdata->base + TPDA_CR);
+ val &= ~TPDA_CR_ATID;
+ val |= FIELD_PREP(TPDA_CR_ATID, drvdata->atid);
+ writel_relaxed(val, drvdata->base + TPDA_CR);
+}
+
+static void tpda_enable_port(struct tpda_drvdata *drvdata, int port)
+{
+ u32 val;
+
+ val = readl_relaxed(drvdata->base + TPDA_Pn_CR(port));
+ /* Enable the port */
+ val |= TPDA_Pn_CR_ENA;
+ writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
+}
+
+static void __tpda_enable(struct tpda_drvdata *drvdata, int port)
+{
+ CS_UNLOCK(drvdata->base);
+
+ if (!drvdata->csdev->enable)
+ tpda_enable_pre_port(drvdata);
+
+ tpda_enable_port(drvdata, port);
+
+ CS_LOCK(drvdata->base);
+}
+
+static int tpda_enable(struct coresight_device *csdev, int inport, int outport)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ spin_lock(&drvdata->spinlock);
+ if (atomic_read(&csdev->refcnt[inport]) == 0)
+ __tpda_enable(drvdata, inport);
+
+ atomic_inc(&csdev->refcnt[inport]);
+ spin_unlock(&drvdata->spinlock);
+
+ dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", inport);
+ return 0;
+}
+
+static void __tpda_disable(struct tpda_drvdata *drvdata, int port)
+{
+ u32 val;
+
+ CS_UNLOCK(drvdata->base);
+
+ val = readl_relaxed(drvdata->base + TPDA_Pn_CR(port));
+ val &= ~TPDA_Pn_CR_ENA;
+ writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tpda_disable(struct coresight_device *csdev, int inport,
+ int outport)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ spin_lock(&drvdata->spinlock);
+ if (atomic_dec_return(&csdev->refcnt[inport]) == 0)
+ __tpda_disable(drvdata, inport);
+
+ spin_unlock(&drvdata->spinlock);
+
+ dev_dbg(drvdata->dev, "TPDA inport %d disabled\n", inport);
+}
+
+static const struct coresight_ops_link tpda_link_ops = {
+ .enable = tpda_enable,
+ .disable = tpda_disable,
+};
+
+static const struct coresight_ops tpda_cs_ops = {
+ .link_ops = &tpda_link_ops,
+};
+
+static int tpda_init_default_data(struct tpda_drvdata *drvdata)
+{
+ int atid;
+ /*
+ * TPDA must has a unique atid. This atid can uniquely
+ * identify the TPDM trace source connected to the TPDA.
+ * The TPDMs which are connected to same TPDA share the
+ * same trace-id. When TPDA does packetization, different
+ * port will have unique channel number for decoding.
+ */
+ atid = coresight_trace_id_get_system_id();
+ if (atid < 0)
+ return atid;
+
+ drvdata->atid = atid;
+ return 0;
+}
+
+static int tpda_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ int ret;
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata;
+ struct tpda_drvdata *drvdata;
+ struct coresight_desc desc = { 0 };
+ void __iomem *base;
+
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
+
+ base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ drvdata->base = base;
+
+ spin_lock_init(&drvdata->spinlock);
+
+ ret = tpda_init_default_data(drvdata);
+ if (ret)
+ return ret;
+
+ desc.name = coresight_alloc_device_name(&tpda_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
+ desc.type = CORESIGHT_DEV_TYPE_LINK;
+ desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
+ desc.ops = &tpda_cs_ops;
+ desc.pdata = adev->dev.platform_data;
+ desc.dev = &adev->dev;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
+ drvdata->csdev = coresight_register(&desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ pm_runtime_put(&adev->dev);
+
+ dev_dbg(drvdata->dev, "TPDA initialized\n");
+ return 0;
+}
+
+static void tpda_remove(struct amba_device *adev)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ coresight_trace_id_put_system_id(drvdata->atid);
+ coresight_unregister(drvdata->csdev);
+}
+
+/*
+ * Different TPDA has different periph id.
+ * The difference is 0-7 bits' value. So ignore 0-7 bits.
+ */
+static struct amba_id tpda_ids[] = {
+ {
+ .id = 0x000f0f00,
+ .mask = 0x000fff00,
+ },
+ { 0, 0},
+};
+
+static struct amba_driver tpda_driver = {
+ .drv = {
+ .name = "coresight-tpda",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tpda_probe,
+ .remove = tpda_remove,
+ .id_table = tpda_ids,
+};
+
+module_amba_driver(tpda_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Aggregator driver");
diff --git a/drivers/hwtracing/coresight/coresight-tpda.h b/drivers/hwtracing/coresight/coresight-tpda.h
new file mode 100644
index 000000000000..0399678df312
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tpda.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _CORESIGHT_CORESIGHT_TPDA_H
+#define _CORESIGHT_CORESIGHT_TPDA_H
+
+#define TPDA_CR (0x000)
+#define TPDA_Pn_CR(n) (0x004 + (n * 4))
+/* Aggregator port enable bit */
+#define TPDA_Pn_CR_ENA BIT(0)
+
+#define TPDA_MAX_INPORTS 32
+
+/* Bits 6 ~ 12 is for atid value */
+#define TPDA_CR_ATID GENMASK(12, 6)
+
+/**
+ * struct tpda_drvdata - specifics associated to an TPDA component
+ * @base: memory mapped base address for this component.
+ * @dev: The device entity associated to this component.
+ * @csdev: component vitals needed by the framework.
+ * @spinlock: lock for the drvdata value.
+ * @enable: enable status of the component.
+ */
+struct tpda_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ spinlock_t spinlock;
+ u8 atid;
+};
+
+#endif /* _CORESIGHT_CORESIGHT_TPDA_H */
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
new file mode 100644
index 000000000000..9479a5e8c672
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/bitmap.h>
+#include <linux/coresight.h>
+#include <linux/coresight-pmu.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include "coresight-priv.h"
+#include "coresight-tpdm.h"
+
+DEFINE_CORESIGHT_DEVLIST(tpdm_devs, "tpdm");
+
+static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
+{
+ u32 val;
+
+ /* Set the enable bit of DSB control register to 1 */
+ val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
+ val |= TPDM_DSB_CR_ENA;
+ writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
+}
+
+/* TPDM enable operations */
+static void __tpdm_enable(struct tpdm_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ /* Check if DSB datasets is present for TPDM. */
+ if (drvdata->datasets & TPDM_PIDR0_DS_DSB)
+ tpdm_enable_dsb(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static int tpdm_enable(struct coresight_device *csdev,
+ struct perf_event *event, u32 mode)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ spin_lock(&drvdata->spinlock);
+ if (drvdata->enable) {
+ spin_unlock(&drvdata->spinlock);
+ return -EBUSY;
+ }
+
+ __tpdm_enable(drvdata);
+ drvdata->enable = true;
+ spin_unlock(&drvdata->spinlock);
+
+ dev_dbg(drvdata->dev, "TPDM tracing enabled\n");
+ return 0;
+}
+
+static void tpdm_disable_dsb(struct tpdm_drvdata *drvdata)
+{
+ u32 val;
+
+ /* Set the enable bit of DSB control register to 0 */
+ val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
+ val &= ~TPDM_DSB_CR_ENA;
+ writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
+}
+
+/* TPDM disable operations */
+static void __tpdm_disable(struct tpdm_drvdata *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ /* Check if DSB datasets is present for TPDM. */
+ if (drvdata->datasets & TPDM_PIDR0_DS_DSB)
+ tpdm_disable_dsb(drvdata);
+
+ CS_LOCK(drvdata->base);
+}
+
+static void tpdm_disable(struct coresight_device *csdev,
+ struct perf_event *event)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ spin_lock(&drvdata->spinlock);
+ if (!drvdata->enable) {
+ spin_unlock(&drvdata->spinlock);
+ return;
+ }
+
+ __tpdm_disable(drvdata);
+ drvdata->enable = false;
+ spin_unlock(&drvdata->spinlock);
+
+ dev_dbg(drvdata->dev, "TPDM tracing disabled\n");
+}
+
+static const struct coresight_ops_source tpdm_source_ops = {
+ .enable = tpdm_enable,
+ .disable = tpdm_disable,
+};
+
+static const struct coresight_ops tpdm_cs_ops = {
+ .source_ops = &tpdm_source_ops,
+};
+
+static void tpdm_init_default_data(struct tpdm_drvdata *drvdata)
+{
+ u32 pidr;
+
+ CS_UNLOCK(drvdata->base);
+ /* Get the datasets present on the TPDM. */
+ pidr = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR0);
+ drvdata->datasets |= pidr & GENMASK(TPDM_DATASETS - 1, 0);
+ CS_LOCK(drvdata->base);
+}
+
+/*
+ * value 1: 64 bits test data
+ * value 2: 32 bits test data
+ */
+static ssize_t integration_test_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ int i, ret = 0;
+ unsigned long val;
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val != 1 && val != 2)
+ return -EINVAL;
+
+ if (!drvdata->enable)
+ return -EINVAL;
+
+ if (val == 1)
+ val = ATBCNTRL_VAL_64;
+ else
+ val = ATBCNTRL_VAL_32;
+ CS_UNLOCK(drvdata->base);
+ writel_relaxed(0x1, drvdata->base + TPDM_ITCNTRL);
+
+ for (i = 0; i < INTEGRATION_TEST_CYCLE; i++)
+ writel_relaxed(val, drvdata->base + TPDM_ITATBCNTRL);
+
+ writel_relaxed(0, drvdata->base + TPDM_ITCNTRL);
+ CS_LOCK(drvdata->base);
+ return size;
+}
+static DEVICE_ATTR_WO(integration_test);
+
+static struct attribute *tpdm_attrs[] = {
+ &dev_attr_integration_test.attr,
+ NULL,
+};
+
+static struct attribute_group tpdm_attr_grp = {
+ .attrs = tpdm_attrs,
+};
+
+static const struct attribute_group *tpdm_attr_grps[] = {
+ &tpdm_attr_grp,
+ NULL,
+};
+
+static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ void __iomem *base;
+ struct device *dev = &adev->dev;
+ struct coresight_platform_data *pdata;
+ struct tpdm_drvdata *drvdata;
+ struct coresight_desc desc = { 0 };
+
+ pdata = coresight_get_platform_data(dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ adev->dev.platform_data = pdata;
+
+ /* driver data*/
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+ drvdata->dev = &adev->dev;
+ dev_set_drvdata(dev, drvdata);
+
+ base = devm_ioremap_resource(dev, &adev->res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ drvdata->base = base;
+
+ /* Set up coresight component description */
+ desc.name = coresight_alloc_device_name(&tpdm_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
+ desc.type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS;
+ desc.ops = &tpdm_cs_ops;
+ desc.pdata = adev->dev.platform_data;
+ desc.dev = &adev->dev;
+ desc.access = CSDEV_ACCESS_IOMEM(base);
+ desc.groups = tpdm_attr_grps;
+ drvdata->csdev = coresight_register(&desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ spin_lock_init(&drvdata->spinlock);
+ tpdm_init_default_data(drvdata);
+ /* Decrease pm refcount when probe is done.*/
+ pm_runtime_put(&adev->dev);
+
+ return 0;
+}
+
+static void tpdm_remove(struct amba_device *adev)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
+
+ coresight_unregister(drvdata->csdev);
+}
+
+/*
+ * Different TPDM has different periph id.
+ * The difference is 0-7 bits' value. So ignore 0-7 bits.
+ */
+static struct amba_id tpdm_ids[] = {
+ {
+ .id = 0x000f0e00,
+ .mask = 0x000fff00,
+ },
+ { 0, 0},
+};
+
+static struct amba_driver tpdm_driver = {
+ .drv = {
+ .name = "coresight-tpdm",
+ .owner = THIS_MODULE,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tpdm_probe,
+ .id_table = tpdm_ids,
+ .remove = tpdm_remove,
+};
+
+module_amba_driver(tpdm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Monitor driver");
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.h b/drivers/hwtracing/coresight/coresight-tpdm.h
new file mode 100644
index 000000000000..543854043a2d
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tpdm.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _CORESIGHT_CORESIGHT_TPDM_H
+#define _CORESIGHT_CORESIGHT_TPDM_H
+
+/* The max number of the datasets that TPDM supports */
+#define TPDM_DATASETS 7
+
+/* DSB Subunit Registers */
+#define TPDM_DSB_CR (0x780)
+/* Enable bit for DSB subunit */
+#define TPDM_DSB_CR_ENA BIT(0)
+
+/* TPDM integration test registers */
+#define TPDM_ITATBCNTRL (0xEF0)
+#define TPDM_ITCNTRL (0xF00)
+
+/* Register value for integration test */
+#define ATBCNTRL_VAL_32 0xC00F1409
+#define ATBCNTRL_VAL_64 0xC01F1409
+
+/*
+ * Number of cycles to write value when
+ * integration test.
+ */
+#define INTEGRATION_TEST_CYCLE 10
+
+/**
+ * The bits of PERIPHIDR0 register.
+ * The fields [6:0] of PERIPHIDR0 are used to determine what
+ * interfaces and subunits are present on a given TPDM.
+ *
+ * PERIPHIDR0[0] : Fix to 1 if ImplDef subunit present, else 0
+ * PERIPHIDR0[1] : Fix to 1 if DSB subunit present, else 0
+ */
+
+#define TPDM_PIDR0_DS_IMPDEF BIT(0)
+#define TPDM_PIDR0_DS_DSB BIT(1)
+
+/**
+ * struct tpdm_drvdata - specifics associated to an TPDM component
+ * @base: memory mapped base address for this component.
+ * @dev: The device entity associated to this component.
+ * @csdev: component vitals needed by the framework.
+ * @spinlock: lock for the drvdata value.
+ * @enable: enable status of the component.
+ * @datasets: The datasets types present of the TPDM.
+ */
+
+struct tpdm_drvdata {
+ void __iomem *base;
+ struct device *dev;
+ struct coresight_device *csdev;
+ spinlock_t spinlock;
+ bool enable;
+ unsigned long datasets;
+};
+
+#endif /* _CORESIGHT_CORESIGHT_TPDM_H */
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.c b/drivers/hwtracing/coresight/coresight-trace-id.c
new file mode 100644
index 000000000000..af5b4ef59cea
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-trace-id.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2022, Linaro Limited, All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+#include <linux/coresight-pmu.h>
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "coresight-trace-id.h"
+
+/* Default trace ID map. Used on systems that don't require per sink mappings */
+static struct coresight_trace_id_map id_map_default;
+
+/* maintain a record of the mapping of IDs and pending releases per cpu */
+static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0);
+static cpumask_t cpu_id_release_pending;
+
+/* perf session active counter */
+static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0);
+
+/* lock to protect id_map and cpu data */
+static DEFINE_SPINLOCK(id_map_lock);
+
+/* #define TRACE_ID_DEBUG 1 */
+#if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST)
+
+static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
+ const char *func_name)
+{
+ pr_debug("%s id_map::\n", func_name);
+ pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids);
+ pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids);
+}
+#define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__)
+#define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id)
+#define DUMP_ID(id) pr_debug("%s called; id=%d\n", __func__, id)
+#define PERF_SESSION(n) pr_debug("%s perf count %d\n", __func__, n)
+#else
+#define DUMP_ID_MAP(map)
+#define DUMP_ID(id)
+#define DUMP_ID_CPU(cpu, id)
+#define PERF_SESSION(n)
+#endif
+
+/* unlocked read of current trace ID value for given CPU */
+static int _coresight_trace_id_read_cpu_id(int cpu)
+{
+ return atomic_read(&per_cpu(cpu_id, cpu));
+}
+
+/* look for next available odd ID, return 0 if none found */
+static int coresight_trace_id_find_odd_id(struct coresight_trace_id_map *id_map)
+{
+ int found_id = 0, bit = 1, next_id;
+
+ while ((bit < CORESIGHT_TRACE_ID_RES_TOP) && !found_id) {
+ /*
+ * bitmap length of CORESIGHT_TRACE_ID_RES_TOP,
+ * search from offset `bit`.
+ */
+ next_id = find_next_zero_bit(id_map->used_ids,
+ CORESIGHT_TRACE_ID_RES_TOP, bit);
+ if ((next_id < CORESIGHT_TRACE_ID_RES_TOP) && (next_id & 0x1))
+ found_id = next_id;
+ else
+ bit = next_id + 1;
+ }
+ return found_id;
+}
+
+/*
+ * Allocate new ID and set in use
+ *
+ * if @preferred_id is a valid id then try to use that value if available.
+ * if @preferred_id is not valid and @prefer_odd_id is true, try for odd id.
+ *
+ * Otherwise allocate next available ID.
+ */
+static int coresight_trace_id_alloc_new_id(struct coresight_trace_id_map *id_map,
+ int preferred_id, bool prefer_odd_id)
+{
+ int id = 0;
+
+ /* for backwards compatibility, cpu IDs may use preferred value */
+ if (IS_VALID_CS_TRACE_ID(preferred_id) &&
+ !test_bit(preferred_id, id_map->used_ids)) {
+ id = preferred_id;
+ goto trace_id_allocated;
+ } else if (prefer_odd_id) {
+ /* may use odd ids to avoid preferred legacy cpu IDs */
+ id = coresight_trace_id_find_odd_id(id_map);
+ if (id)
+ goto trace_id_allocated;
+ }
+
+ /*
+ * skip reserved bit 0, look at bitmap length of
+ * CORESIGHT_TRACE_ID_RES_TOP from offset of bit 1.
+ */
+ id = find_next_zero_bit(id_map->used_ids, CORESIGHT_TRACE_ID_RES_TOP, 1);
+ if (id >= CORESIGHT_TRACE_ID_RES_TOP)
+ return -EINVAL;
+
+ /* mark as used */
+trace_id_allocated:
+ set_bit(id, id_map->used_ids);
+ return id;
+}
+
+static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_map)
+{
+ if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
+ return;
+ if (WARN(!test_bit(id, id_map->used_ids), "Freeing unused ID %d\n", id))
+ return;
+ clear_bit(id, id_map->used_ids);
+}
+
+static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map)
+{
+ if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
+ return;
+ set_bit(id, id_map->pend_rel_ids);
+}
+
+/*
+ * release all pending IDs for all current maps & clear CPU associations
+ *
+ * This currently operates on the default id map, but may be extended to
+ * operate on all registered id maps if per sink id maps are used.
+ */
+static void coresight_trace_id_release_all_pending(void)
+{
+ struct coresight_trace_id_map *id_map = &id_map_default;
+ unsigned long flags;
+ int cpu, bit;
+
+ spin_lock_irqsave(&id_map_lock, flags);
+ for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) {
+ clear_bit(bit, id_map->used_ids);
+ clear_bit(bit, id_map->pend_rel_ids);
+ }
+ for_each_cpu(cpu, &cpu_id_release_pending) {
+ atomic_set(&per_cpu(cpu_id, cpu), 0);
+ cpumask_clear_cpu(cpu, &cpu_id_release_pending);
+ }
+ spin_unlock_irqrestore(&id_map_lock, flags);
+ DUMP_ID_MAP(id_map);
+}
+
+static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
+{
+ unsigned long flags;
+ int id;
+
+ spin_lock_irqsave(&id_map_lock, flags);
+
+ /* check for existing allocation for this CPU */
+ id = _coresight_trace_id_read_cpu_id(cpu);
+ if (id)
+ goto get_cpu_id_clr_pend;
+
+ /*
+ * Find a new ID.
+ *
+ * Use legacy values where possible in the dynamic trace ID allocator to
+ * allow older tools to continue working if they are not upgraded at the
+ * same time as the kernel drivers.
+ *
+ * If the generated legacy ID is invalid, or not available then the next
+ * available dynamic ID will be used.
+ */
+ id = coresight_trace_id_alloc_new_id(id_map,
+ CORESIGHT_LEGACY_CPU_TRACE_ID(cpu),
+ false);
+ if (!IS_VALID_CS_TRACE_ID(id))
+ goto get_cpu_id_out_unlock;
+
+ /* allocate the new id to the cpu */
+ atomic_set(&per_cpu(cpu_id, cpu), id);
+
+get_cpu_id_clr_pend:
+ /* we are (re)using this ID - so ensure it is not marked for release */
+ cpumask_clear_cpu(cpu, &cpu_id_release_pending);
+ clear_bit(id, id_map->pend_rel_ids);
+
+get_cpu_id_out_unlock:
+ spin_unlock_irqrestore(&id_map_lock, flags);
+
+ DUMP_ID_CPU(cpu, id);
+ DUMP_ID_MAP(id_map);
+ return id;
+}
+
+static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
+{
+ unsigned long flags;
+ int id;
+
+ /* check for existing allocation for this CPU */
+ id = _coresight_trace_id_read_cpu_id(cpu);
+ if (!id)
+ return;
+
+ spin_lock_irqsave(&id_map_lock, flags);
+
+ if (atomic_read(&perf_cs_etm_session_active)) {
+ /* set release at pending if perf still active */
+ coresight_trace_id_set_pend_rel(id, id_map);
+ cpumask_set_cpu(cpu, &cpu_id_release_pending);
+ } else {
+ /* otherwise clear id */
+ coresight_trace_id_free(id, id_map);
+ atomic_set(&per_cpu(cpu_id, cpu), 0);
+ }
+
+ spin_unlock_irqrestore(&id_map_lock, flags);
+ DUMP_ID_CPU(cpu, id);
+ DUMP_ID_MAP(id_map);
+}
+
+static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *id_map)
+{
+ unsigned long flags;
+ int id;
+
+ spin_lock_irqsave(&id_map_lock, flags);
+ /* prefer odd IDs for system components to avoid legacy CPU IDS */
+ id = coresight_trace_id_alloc_new_id(id_map, 0, true);
+ spin_unlock_irqrestore(&id_map_lock, flags);
+
+ DUMP_ID(id);
+ DUMP_ID_MAP(id_map);
+ return id;
+}
+
+static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *id_map, int id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&id_map_lock, flags);
+ coresight_trace_id_free(id, id_map);
+ spin_unlock_irqrestore(&id_map_lock, flags);
+
+ DUMP_ID(id);
+ DUMP_ID_MAP(id_map);
+}
+
+/* API functions */
+
+int coresight_trace_id_get_cpu_id(int cpu)
+{
+ return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id);
+
+void coresight_trace_id_put_cpu_id(int cpu)
+{
+ coresight_trace_id_map_put_cpu_id(cpu, &id_map_default);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id);
+
+int coresight_trace_id_read_cpu_id(int cpu)
+{
+ return _coresight_trace_id_read_cpu_id(cpu);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id);
+
+int coresight_trace_id_get_system_id(void)
+{
+ return coresight_trace_id_map_get_system_id(&id_map_default);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_get_system_id);
+
+void coresight_trace_id_put_system_id(int id)
+{
+ coresight_trace_id_map_put_system_id(&id_map_default, id);
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
+
+void coresight_trace_id_perf_start(void)
+{
+ atomic_inc(&perf_cs_etm_session_active);
+ PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
+
+void coresight_trace_id_perf_stop(void)
+{
+ if (!atomic_dec_return(&perf_cs_etm_session_active))
+ coresight_trace_id_release_all_pending();
+ PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
+}
+EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);
diff --git a/drivers/hwtracing/coresight/coresight-trace-id.h b/drivers/hwtracing/coresight/coresight-trace-id.h
new file mode 100644
index 000000000000..3797777d367e
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-trace-id.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(C) 2022 Linaro Limited. All rights reserved.
+ * Author: Mike Leach <mike.leach@linaro.org>
+ */
+
+#ifndef _CORESIGHT_TRACE_ID_H
+#define _CORESIGHT_TRACE_ID_H
+
+/*
+ * Coresight trace ID allocation API
+ *
+ * With multi cpu systems, and more additional trace sources a scalable
+ * trace ID reservation system is required.
+ *
+ * The system will allocate Ids on a demand basis, and allow them to be
+ * released when done.
+ *
+ * In order to ensure that a consistent cpu / ID matching is maintained
+ * throughout a perf cs_etm event session - a session in progress flag will
+ * be maintained, and released IDs not cleared until the perf session is
+ * complete. This allows the same CPU to be re-allocated its prior ID.
+ *
+ *
+ * Trace ID maps will be created and initialised to prevent architecturally
+ * reserved IDs from being allocated.
+ *
+ * API permits multiple maps to be maintained - for large systems where
+ * different sets of cpus trace into different independent sinks.
+ */
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+
+
+/* architecturally we have 128 IDs some of which are reserved */
+#define CORESIGHT_TRACE_IDS_MAX 128
+
+/* ID 0 is reserved */
+#define CORESIGHT_TRACE_ID_RES_0 0
+
+/* ID 0x70 onwards are reserved */
+#define CORESIGHT_TRACE_ID_RES_TOP 0x70
+
+/* check an ID is in the valid range */
+#define IS_VALID_CS_TRACE_ID(id) \
+ ((id > CORESIGHT_TRACE_ID_RES_0) && (id < CORESIGHT_TRACE_ID_RES_TOP))
+
+/**
+ * Trace ID map.
+ *
+ * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
+ * Initialised so that the reserved IDs are permanently marked as
+ * in use.
+ * @pend_rel_ids: CPU IDs that have been released by the trace source but not
+ * yet marked as available, to allow re-allocation to the same
+ * CPU during a perf session.
+ */
+struct coresight_trace_id_map {
+ DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
+ DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX);
+};
+
+/* Allocate and release IDs for a single default trace ID map */
+
+/**
+ * Read and optionally allocate a CoreSight trace ID and associate with a CPU.
+ *
+ * Function will read the current trace ID for the associated CPU,
+ * allocating an new ID if one is not currently allocated.
+ *
+ * Numeric ID values allocated use legacy allocation algorithm if possible,
+ * otherwise any available ID is used.
+ *
+ * @cpu: The CPU index to allocate for.
+ *
+ * return: CoreSight trace ID or -EINVAL if allocation impossible.
+ */
+int coresight_trace_id_get_cpu_id(int cpu);
+
+/**
+ * Release an allocated trace ID associated with the CPU.
+ *
+ * This will release the CoreSight trace ID associated with the CPU,
+ * unless a perf session is in operation.
+ *
+ * If a perf session is in operation then the ID will be marked as pending
+ * release.
+ *
+ * @cpu: The CPU index to release the associated trace ID.
+ */
+void coresight_trace_id_put_cpu_id(int cpu);
+
+/**
+ * Read the current allocated CoreSight Trace ID value for the CPU.
+ *
+ * Fast read of the current value that does not allocate if no ID allocated
+ * for the CPU.
+ *
+ * Used in perf context where it is known that the value for the CPU will not
+ * be changing, when perf starts and event on a core and outputs the Trace ID
+ * for the CPU as a packet in the data file. IDs cannot change during a perf
+ * session.
+ *
+ * This function does not take the lock protecting the ID lists, avoiding
+ * locking dependency issues with perf locks.
+ *
+ * @cpu: The CPU index to read.
+ *
+ * return: current value, will be 0 if unallocated.
+ */
+int coresight_trace_id_read_cpu_id(int cpu);
+
+/**
+ * Allocate a CoreSight trace ID for a system component.
+ *
+ * Unconditionally allocates a Trace ID, without associating the ID with a CPU.
+ *
+ * Used to allocate IDs for system trace sources such as STM.
+ *
+ * return: Trace ID or -EINVAL if allocation is impossible.
+ */
+int coresight_trace_id_get_system_id(void);
+
+/**
+ * Release an allocated system trace ID.
+ *
+ * Unconditionally release a trace ID allocated to a system component.
+ *
+ * @id: value of trace ID allocated.
+ */
+void coresight_trace_id_put_system_id(int id);
+
+/* notifiers for perf session start and stop */
+
+/**
+ * Notify the Trace ID allocator that a perf session is starting.
+ *
+ * Increase the perf session reference count - called by perf when setting up
+ * a trace event.
+ *
+ * This reference count is used by the ID allocator to ensure that trace IDs
+ * associated with a CPU cannot change or be released during a perf session.
+ */
+void coresight_trace_id_perf_start(void);
+
+/**
+ * Notify the ID allocator that a perf session is stopping.
+ *
+ * Decrease the perf session reference count.
+ * if this causes the count to go to zero, then all Trace IDs marked as pending
+ * release, will be released.
+ */
+void coresight_trace_id_perf_stop(void);
+
+#endif /* _CORESIGHT_TRACE_ID_H */
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
new file mode 100644
index 000000000000..b317342c7ce5
--- /dev/null
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Siemens System Memory Buffer driver.
+ * Copyright(c) 2022, HiSilicon Limited.
+ */
+
+#include <linux/atomic.h>
+#include <linux/acpi.h>
+#include <linux/circ_buf.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+
+#include "coresight-etm-perf.h"
+#include "coresight-priv.h"
+#include "ultrasoc-smb.h"
+
+DEFINE_CORESIGHT_DEVLIST(sink_devs, "ultra_smb");
+
+#define ULTRASOC_SMB_DSM_UUID "82ae1283-7f6a-4cbe-aa06-53e8fb24db18"
+
+static bool smb_buffer_not_empty(struct smb_drv_data *drvdata)
+{
+ u32 buf_status = readl(drvdata->base + SMB_LB_INT_STS_REG);
+
+ return FIELD_GET(SMB_LB_INT_STS_NOT_EMPTY_MSK, buf_status);
+}
+
+static void smb_update_data_size(struct smb_drv_data *drvdata)
+{
+ struct smb_data_buffer *sdb = &drvdata->sdb;
+ u32 buf_wrptr;
+
+ buf_wrptr = readl(drvdata->base + SMB_LB_WR_ADDR_REG) -
+ sdb->buf_hw_base;
+
+ /* Buffer is full */
+ if (buf_wrptr == sdb->buf_rdptr && smb_buffer_not_empty(drvdata)) {
+ sdb->data_size = sdb->buf_size;
+ return;
+ }
+
+ /* The buffer mode is circular buffer mode */
+ sdb->data_size = CIRC_CNT(buf_wrptr, sdb->buf_rdptr,
+ sdb->buf_size);
+}
+
+/*
+ * The read pointer adds @nbytes bytes (may round up to the beginning)
+ * after the data is read or discarded, while needing to update the
+ * available data size.
+ */
+static void smb_update_read_ptr(struct smb_drv_data *drvdata, u32 nbytes)
+{
+ struct smb_data_buffer *sdb = &drvdata->sdb;
+
+ sdb->buf_rdptr += nbytes;
+ sdb->buf_rdptr %= sdb->buf_size;
+ writel(sdb->buf_hw_base + sdb->buf_rdptr,
+ drvdata->base + SMB_LB_RD_ADDR_REG);
+
+ sdb->data_size -= nbytes;
+}
+
+static void smb_reset_buffer(struct smb_drv_data *drvdata)
+{
+ struct smb_data_buffer *sdb = &drvdata->sdb;
+ u32 write_ptr;
+
+ /*
+ * We must flush and discard any data left in hardware path
+ * to avoid corrupting the next session.
+ * Note: The write pointer will never exceed the read pointer.
+ */
+ writel(SMB_LB_PURGE_PURGED, drvdata->base + SMB_LB_PURGE_REG);
+
+ /* Reset SMB logical buffer status flags */
+ writel(SMB_LB_INT_STS_RESET, drvdata->base + SMB_LB_INT_STS_REG);
+
+ write_ptr = readl(drvdata->base + SMB_LB_WR_ADDR_REG);
+
+ /* Do nothing, not data left in hardware path */
+ if (!write_ptr || write_ptr == sdb->buf_rdptr + sdb->buf_hw_base)
+ return;
+
+ /*
+ * The SMB_LB_WR_ADDR_REG register is read-only,
+ * Synchronize the read pointer to write pointer.
+ */
+ writel(write_ptr, drvdata->base + SMB_LB_RD_ADDR_REG);
+ sdb->buf_rdptr = write_ptr - sdb->buf_hw_base;
+}
+
+static int smb_open(struct inode *inode, struct file *file)
+{
+ struct smb_drv_data *drvdata = container_of(file->private_data,
+ struct smb_drv_data, miscdev);
+ int ret = 0;
+
+ mutex_lock(&drvdata->mutex);
+
+ if (drvdata->reading) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (atomic_read(drvdata->csdev->refcnt)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ smb_update_data_size(drvdata);
+
+ drvdata->reading = true;
+out:
+ mutex_unlock(&drvdata->mutex);
+
+ return ret;
+}
+
+static ssize_t smb_read(struct file *file, char __user *data, size_t len,
+ loff_t *ppos)
+{
+ struct smb_drv_data *drvdata = container_of(file->private_data,
+ struct smb_drv_data, miscdev);
+ struct smb_data_buffer *sdb = &drvdata->sdb;
+ struct device *dev = &drvdata->csdev->dev;
+ ssize_t to_copy = 0;
+
+ if (!len)
+ return 0;
+
+ mutex_lock(&drvdata->mutex);
+
+ if (!sdb->data_size)
+ goto out;
+
+ to_copy = min(sdb->data_size, len);
+
+ /* Copy parts of trace data when read pointer wrap around SMB buffer */
+ if (sdb->buf_rdptr + to_copy > sdb->buf_size)
+ to_copy = sdb->buf_size - sdb->buf_rdptr;
+
+ if (copy_to_user(data, sdb->buf_base + sdb->buf_rdptr, to_copy)) {
+ dev_dbg(dev, "Failed to copy data to user\n");
+ to_copy = -EFAULT;
+ goto out;
+ }
+
+ *ppos += to_copy;
+
+ smb_update_read_ptr(drvdata, to_copy);
+
+ dev_dbg(dev, "%zu bytes copied\n", to_copy);
+out:
+ if (!sdb->data_size)
+ smb_reset_buffer(drvdata);
+ mutex_unlock(&drvdata->mutex);
+
+ return to_copy;
+}
+
+static int smb_release(struct inode *inode, struct file *file)
+{
+ struct smb_drv_data *drvdata = container_of(file->private_data,
+ struct smb_drv_data, miscdev);
+
+ mutex_lock(&drvdata->mutex);
+ drvdata->reading = false;
+ mutex_unlock(&drvdata->mutex);
+
+ return 0;
+}
+
+static const struct file_operations smb_fops = {
+ .owner = THIS_MODULE,
+ .open = smb_open,
+ .read = smb_read,
+ .release = smb_release,
+ .llseek = no_llseek,
+};
+
+static ssize_t buf_size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct smb_drv_data *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "0x%lx\n", drvdata->sdb.buf_size);
+}
+static DEVICE_ATTR_RO(buf_size);
+
+static struct attribute *smb_sink_attrs[] = {
+ coresight_simple_reg32(read_pos, SMB_LB_RD_ADDR_REG),
+ coresight_simple_reg32(write_pos, SMB_LB_WR_ADDR_REG),
+ coresight_simple_reg32(buf_status, SMB_LB_INT_STS_REG),
+ &dev_attr_buf_size.attr,
+ NULL
+};
+
+static const struct attribute_group smb_sink_group = {
+ .attrs = smb_sink_attrs,
+ .name = "mgmt",
+};
+
+static const struct attribute_group *smb_sink_groups[] = {
+ &smb_sink_group,
+ NULL
+};
+
+static void smb_enable_hw(struct smb_drv_data *drvdata)
+{
+ writel(SMB_GLB_EN_HW_ENABLE, drvdata->base + SMB_GLB_EN_REG);
+}
+
+static void smb_disable_hw(struct smb_drv_data *drvdata)
+{
+ writel(0x0, drvdata->base + SMB_GLB_EN_REG);
+}
+
+static void smb_enable_sysfs(struct coresight_device *csdev)
+{
+ struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ if (drvdata->mode != CS_MODE_DISABLED)
+ return;
+
+ smb_enable_hw(drvdata);
+ drvdata->mode = CS_MODE_SYSFS;
+}
+
+static int smb_enable_perf(struct coresight_device *csdev, void *data)
+{
+ struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct perf_output_handle *handle = data;
+ struct cs_buffers *buf = etm_perf_sink_config(handle);
+ pid_t pid;
+
+ if (!buf)
+ return -EINVAL;
+
+ /* Get a handle on the pid of the target process */
+ pid = buf->pid;
+
+ /* Device is already in used by other session */
+ if (drvdata->pid != -1 && drvdata->pid != pid)
+ return -EBUSY;
+
+ if (drvdata->pid == -1) {
+ smb_enable_hw(drvdata);
+ drvdata->pid = pid;
+ drvdata->mode = CS_MODE_PERF;
+ }
+
+ return 0;
+}
+
+static int smb_enable(struct coresight_device *csdev, u32 mode, void *data)
+{
+ struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret = 0;
+
+ mutex_lock(&drvdata->mutex);
+
+ /* Do nothing, the trace data is reading by other interface now */
+ if (drvdata->reading) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Do nothing, the SMB is already enabled as other mode */
+ if (drvdata->mode != CS_MODE_DISABLED && drvdata->mode != mode) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ switch (mode) {
+ case CS_MODE_SYSFS:
+ smb_enable_sysfs(csdev);
+ break;
+ case CS_MODE_PERF:
+ ret = smb_enable_perf(csdev, data);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto out;
+
+ atomic_inc(csdev->refcnt);
+
+ dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
+out:
+ mutex_unlock(&drvdata->mutex);
+
+ return ret;
+}
+
+static int smb_disable(struct coresight_device *csdev)
+{
+ struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
+ int ret = 0;
+
+ mutex_lock(&drvdata->mutex);
+
+ if (drvdata->reading) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (atomic_dec_return(csdev->refcnt)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Complain if we (somehow) got out of sync */
+ WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+
+ smb_disable_hw(drvdata);
+
+ /* Dissociate from the target process. */
+ drvdata->pid = -1;
+ drvdata->mode = CS_MODE_DISABLED;
+
+ dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
+out:
+ mutex_unlock(&drvdata->mutex);
+
+ return ret;
+}
+
+static void *smb_alloc_buffer(struct coresight_device *csdev,
+ struct perf_event *event, void **pages,
+ int nr_pages, bool overwrite)
+{
+ struct cs_buffers *buf;
+ int node;
+
+ node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
+ buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
+ if (!buf)
+ return NULL;
+
+ buf->snapshot = overwrite;
+ buf->nr_pages = nr_pages;
+ buf->data_pages = pages;
+ buf->pid = task_pid_nr(event->owner);
+
+ return buf;
+}
+
+static void smb_free_buffer(void *config)
+{
+ struct cs_buffers *buf = config;
+
+ kfree(buf);
+}
+
+static void smb_sync_perf_buffer(struct smb_drv_data *drvdata,
+ struct cs_buffers *buf,
+ unsigned long head)
+{
+ struct smb_data_buffer *sdb = &drvdata->sdb;
+ char **dst_pages = (char **)buf->data_pages;
+ unsigned long to_copy;
+ long pg_idx, pg_offset;
+
+ pg_idx = head >> PAGE_SHIFT;
+ pg_offset = head & (PAGE_SIZE - 1);
+
+ while (sdb->data_size) {
+ unsigned long pg_space = PAGE_SIZE - pg_offset;
+
+ to_copy = min(sdb->data_size, pg_space);
+
+ /* Copy parts of trace data when read pointer wrap around */
+ if (sdb->buf_rdptr + to_copy > sdb->buf_size)
+ to_copy = sdb->buf_size - sdb->buf_rdptr;
+
+ memcpy(dst_pages[pg_idx] + pg_offset,
+ sdb->buf_base + sdb->buf_rdptr, to_copy);
+
+ pg_offset += to_copy;
+ if (pg_offset >= PAGE_SIZE) {
+ pg_offset = 0;
+ pg_idx++;
+ pg_idx %= buf->nr_pages;
+ }
+ smb_update_read_ptr(drvdata, to_copy);
+ }
+
+ smb_reset_buffer(drvdata);
+}
+
+static unsigned long smb_update_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *sink_config)
+{
+ struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct smb_data_buffer *sdb = &drvdata->sdb;
+ struct cs_buffers *buf = sink_config;
+ unsigned long data_size = 0;
+ bool lost = false;
+
+ if (!buf)
+ return 0;
+
+ mutex_lock(&drvdata->mutex);
+
+ /* Don't do anything if another tracer is using this sink. */
+ if (atomic_read(csdev->refcnt) != 1)
+ goto out;
+
+ smb_disable_hw(drvdata);
+ smb_update_data_size(drvdata);
+
+ /*
+ * The SMB buffer may be bigger than the space available in the
+ * perf ring buffer (handle->size). If so advance the offset so
+ * that we get the latest trace data.
+ */
+ if (sdb->data_size > handle->size) {
+ smb_update_read_ptr(drvdata, sdb->data_size - handle->size);
+ lost = true;
+ }
+
+ data_size = sdb->data_size;
+ smb_sync_perf_buffer(drvdata, buf, handle->head);
+ if (!buf->snapshot && lost)
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+out:
+ mutex_unlock(&drvdata->mutex);
+
+ return data_size;
+}
+
+static const struct coresight_ops_sink smb_cs_ops = {
+ .enable = smb_enable,
+ .disable = smb_disable,
+ .alloc_buffer = smb_alloc_buffer,
+ .free_buffer = smb_free_buffer,
+ .update_buffer = smb_update_buffer,
+};
+
+static const struct coresight_ops cs_ops = {
+ .sink_ops = &smb_cs_ops,
+};
+
+static int smb_init_data_buffer(struct platform_device *pdev,
+ struct smb_data_buffer *sdb)
+{
+ struct resource *res;
+ void *base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, SMB_BUF_ADDR_RES);
+ if (!res) {
+ dev_err(&pdev->dev, "SMB device failed to get resource\n");
+ return -EINVAL;
+ }
+
+ sdb->buf_rdptr = 0;
+ sdb->buf_hw_base = FIELD_GET(SMB_BUF_ADDR_LO_MSK, res->start);
+ sdb->buf_size = resource_size(res);
+ if (sdb->buf_size == 0)
+ return -EINVAL;
+
+ /*
+ * This is a chunk of memory, use classic mapping with better
+ * performance.
+ */
+ base = devm_memremap(&pdev->dev, sdb->buf_hw_base, sdb->buf_size,
+ MEMREMAP_WB);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ sdb->buf_base = base;
+
+ return 0;
+}
+
+static void smb_init_hw(struct smb_drv_data *drvdata)
+{
+ smb_disable_hw(drvdata);
+ smb_reset_buffer(drvdata);
+
+ writel(SMB_LB_CFG_LO_DEFAULT, drvdata->base + SMB_LB_CFG_LO_REG);
+ writel(SMB_LB_CFG_HI_DEFAULT, drvdata->base + SMB_LB_CFG_HI_REG);
+ writel(SMB_GLB_CFG_DEFAULT, drvdata->base + SMB_GLB_CFG_REG);
+ writel(SMB_GLB_INT_CFG, drvdata->base + SMB_GLB_INT_REG);
+ writel(SMB_LB_INT_CTRL_CFG, drvdata->base + SMB_LB_INT_CTRL_REG);
+}
+
+static int smb_register_sink(struct platform_device *pdev,
+ struct smb_drv_data *drvdata)
+{
+ struct coresight_platform_data *pdata = NULL;
+ struct coresight_desc desc = { 0 };
+ int ret;
+
+ pdata = coresight_get_platform_data(&pdev->dev);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ desc.type = CORESIGHT_DEV_TYPE_SINK;
+ desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+ desc.ops = &cs_ops;
+ desc.pdata = pdata;
+ desc.dev = &pdev->dev;
+ desc.groups = smb_sink_groups;
+ desc.name = coresight_alloc_device_name(&sink_devs, &pdev->dev);
+ if (!desc.name) {
+ dev_err(&pdev->dev, "Failed to alloc coresight device name");
+ return -ENOMEM;
+ }
+ desc.access = CSDEV_ACCESS_IOMEM(drvdata->base);
+
+ drvdata->csdev = coresight_register(&desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ drvdata->miscdev.name = desc.name;
+ drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
+ drvdata->miscdev.fops = &smb_fops;
+ ret = misc_register(&drvdata->miscdev);
+ if (ret) {
+ coresight_unregister(drvdata->csdev);
+ dev_err(&pdev->dev, "Failed to register misc, ret=%d\n", ret);
+ }
+
+ return ret;
+}
+
+static void smb_unregister_sink(struct smb_drv_data *drvdata)
+{
+ misc_deregister(&drvdata->miscdev);
+ coresight_unregister(drvdata->csdev);
+}
+
+static int smb_config_inport(struct device *dev, bool enable)
+{
+ u64 func = enable ? 1 : 0;
+ union acpi_object *obj;
+ guid_t guid;
+ u64 rev = 0;
+
+ /*
+ * Using DSM calls to enable/disable ultrasoc hardwares on
+ * tracing path, to prevent ultrasoc packet format being exposed.
+ */
+ if (guid_parse(ULTRASOC_SMB_DSM_UUID, &guid)) {
+ dev_err(dev, "Get GUID failed\n");
+ return -EINVAL;
+ }
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, rev, func, NULL);
+ if (!obj) {
+ dev_err(dev, "ACPI handle failed\n");
+ return -ENODEV;
+ }
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+static int smb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct smb_drv_data *drvdata;
+ int ret;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->base = devm_platform_ioremap_resource(pdev, SMB_REG_ADDR_RES);
+ if (IS_ERR(drvdata->base)) {
+ dev_err(dev, "Failed to ioremap resource\n");
+ return PTR_ERR(drvdata->base);
+ }
+
+ smb_init_hw(drvdata);
+
+ ret = smb_init_data_buffer(pdev, &drvdata->sdb);
+ if (ret) {
+ dev_err(dev, "Failed to init buffer, ret = %d\n", ret);
+ return ret;
+ }
+
+ mutex_init(&drvdata->mutex);
+ drvdata->pid = -1;
+
+ ret = smb_register_sink(pdev, drvdata);
+ if (ret) {
+ dev_err(dev, "Failed to register SMB sink\n");
+ return ret;
+ }
+
+ ret = smb_config_inport(dev, true);
+ if (ret) {
+ smb_unregister_sink(drvdata);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, drvdata);
+
+ return 0;
+}
+
+static int smb_remove(struct platform_device *pdev)
+{
+ struct smb_drv_data *drvdata = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = smb_config_inport(&pdev->dev, false);
+ if (ret)
+ return ret;
+
+ smb_unregister_sink(drvdata);
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ultrasoc_smb_acpi_match[] = {
+ {"HISI03A1", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, ultrasoc_smb_acpi_match);
+#endif
+
+static struct platform_driver smb_driver = {
+ .driver = {
+ .name = "ultrasoc-smb",
+ .acpi_match_table = ACPI_PTR(ultrasoc_smb_acpi_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = smb_probe,
+ .remove = smb_remove,
+};
+module_platform_driver(smb_driver);
+
+MODULE_DESCRIPTION("UltraSoc SMB CoreSight driver");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Jonathan Zhou <jonathan.zhouwen@huawei.com>");
+MODULE_AUTHOR("Qi Liu <liuqi115@huawei.com>");
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.h b/drivers/hwtracing/coresight/ultrasoc-smb.h
new file mode 100644
index 000000000000..7dfbe42e37a0
--- /dev/null
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Siemens System Memory Buffer driver.
+ * Copyright(c) 2022, HiSilicon Limited.
+ */
+
+#ifndef _ULTRASOC_SMB_H
+#define _ULTRASOC_SMB_H
+
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+
+/* Offset of SMB global registers */
+#define SMB_GLB_CFG_REG 0x00
+#define SMB_GLB_EN_REG 0x04
+#define SMB_GLB_INT_REG 0x08
+
+/* Offset of SMB logical buffer registers */
+#define SMB_LB_CFG_LO_REG 0x40
+#define SMB_LB_CFG_HI_REG 0x44
+#define SMB_LB_INT_CTRL_REG 0x48
+#define SMB_LB_INT_STS_REG 0x4c
+#define SMB_LB_RD_ADDR_REG 0x5c
+#define SMB_LB_WR_ADDR_REG 0x60
+#define SMB_LB_PURGE_REG 0x64
+
+/* Set global config register */
+#define SMB_GLB_CFG_BURST_LEN_MSK GENMASK(11, 4)
+#define SMB_GLB_CFG_IDLE_PRD_MSK GENMASK(15, 12)
+#define SMB_GLB_CFG_MEM_WR_MSK GENMASK(21, 16)
+#define SMB_GLB_CFG_MEM_RD_MSK GENMASK(27, 22)
+#define SMB_GLB_CFG_DEFAULT (FIELD_PREP(SMB_GLB_CFG_BURST_LEN_MSK, 0xf) | \
+ FIELD_PREP(SMB_GLB_CFG_IDLE_PRD_MSK, 0xf) | \
+ FIELD_PREP(SMB_GLB_CFG_MEM_WR_MSK, 0x3) | \
+ FIELD_PREP(SMB_GLB_CFG_MEM_RD_MSK, 0x1b))
+
+#define SMB_GLB_EN_HW_ENABLE BIT(0)
+
+/* Set global interrupt control register */
+#define SMB_GLB_INT_EN BIT(0)
+#define SMB_GLB_INT_PULSE BIT(1) /* Interrupt type: 1 - Pulse */
+#define SMB_GLB_INT_ACT_H BIT(2) /* Interrupt polarity: 1 - Active high */
+#define SMB_GLB_INT_CFG (SMB_GLB_INT_EN | SMB_GLB_INT_PULSE | \
+ SMB_GLB_INT_ACT_H)
+
+/* Set logical buffer config register lower 32 bits */
+#define SMB_LB_CFG_LO_EN BIT(0)
+#define SMB_LB_CFG_LO_SINGLE_END BIT(1)
+#define SMB_LB_CFG_LO_INIT BIT(8)
+#define SMB_LB_CFG_LO_CONT BIT(11)
+#define SMB_LB_CFG_LO_FLOW_MSK GENMASK(19, 16)
+#define SMB_LB_CFG_LO_DEFAULT (SMB_LB_CFG_LO_EN | SMB_LB_CFG_LO_SINGLE_END | \
+ SMB_LB_CFG_LO_INIT | SMB_LB_CFG_LO_CONT | \
+ FIELD_PREP(SMB_LB_CFG_LO_FLOW_MSK, 0xf))
+
+/* Set logical buffer config register upper 32 bits */
+#define SMB_LB_CFG_HI_RANGE_UP_MSK GENMASK(15, 8)
+#define SMB_LB_CFG_HI_DEFAULT FIELD_PREP(SMB_LB_CFG_HI_RANGE_UP_MSK, 0xff)
+
+/*
+ * Set logical buffer interrupt control register.
+ * The register control the validity of both real-time events and
+ * interrupts. When logical buffer status changes causes to issue
+ * an interrupt at the same time as it issues a real-time event.
+ * Real-time events are used in SMB driver, which needs to get the buffer
+ * status. Interrupts are used in debugger mode.
+ * SMB_LB_INT_CTRL_BUF_NOTE_MASK control which events flags or interrupts
+ * are valid.
+ */
+#define SMB_LB_INT_CTRL_EN BIT(0)
+#define SMB_LB_INT_CTRL_BUF_NOTE_MSK GENMASK(11, 8)
+#define SMB_LB_INT_CTRL_CFG (SMB_LB_INT_CTRL_EN | \
+ FIELD_PREP(SMB_LB_INT_CTRL_BUF_NOTE_MSK, 0xf))
+
+/* Set logical buffer interrupt status register */
+#define SMB_LB_INT_STS_NOT_EMPTY_MSK BIT(0)
+#define SMB_LB_INT_STS_BUF_RESET_MSK GENMASK(3, 0)
+#define SMB_LB_INT_STS_RESET FIELD_PREP(SMB_LB_INT_STS_BUF_RESET_MSK, 0xf)
+
+#define SMB_LB_PURGE_PURGED BIT(0)
+
+#define SMB_REG_ADDR_RES 0
+#define SMB_BUF_ADDR_RES 1
+#define SMB_BUF_ADDR_LO_MSK GENMASK(31, 0)
+
+/**
+ * struct smb_data_buffer - Details of the buffer used by SMB
+ * @buf_base: Memory mapped base address of SMB.
+ * @buf_hw_base: SMB buffer start Physical base address, only used 32bits.
+ * @buf_size: Size of the buffer.
+ * @data_size: Size of the available trace data for SMB.
+ * @buf_rdptr: Current read position (index) within the buffer.
+ */
+struct smb_data_buffer {
+ void *buf_base;
+ u32 buf_hw_base;
+ unsigned long buf_size;
+ unsigned long data_size;
+ unsigned long buf_rdptr;
+};
+
+/**
+ * struct smb_drv_data - specifics associated to an SMB component
+ * @base: Memory mapped base address for SMB component.
+ * @csdev: Component vitals needed by the framework.
+ * @sdb: Data buffer for SMB.
+ * @miscdev: Specifics to handle "/dev/xyz.smb" entry.
+ * @mutex: Control data access to one at a time.
+ * @reading: Synchronise user space access to SMB buffer.
+ * @pid: Process ID of the process being monitored by the
+ * session that is using this component.
+ * @mode: How this SMB is being used, perf mode or sysfs mode.
+ */
+struct smb_drv_data {
+ void __iomem *base;
+ struct coresight_device *csdev;
+ struct smb_data_buffer sdb;
+ struct miscdevice miscdev;
+ struct mutex mutex;
+ bool reading;
+ pid_t pid;
+ u32 mode;
+};
+
+#endif
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 7e753a75d23b..cc7f879bb175 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -185,11 +185,11 @@ static struct device_type intel_th_source_device_type = {
.release = intel_th_device_release,
};
-static char *intel_th_output_devnode(struct device *dev, umode_t *mode,
+static char *intel_th_output_devnode(const struct device *dev, umode_t *mode,
kuid_t *uid, kgid_t *gid)
{
- struct intel_th_device *thdev = to_intel_th_device(dev);
- struct intel_th *th = to_intel_th(thdev);
+ const struct intel_th_device *thdev = to_intel_th_device(dev);
+ const struct intel_th *th = to_intel_th(thdev);
char *node;
if (thdev->id >= 0)
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index 0ffb42990175..6cbba733f259 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -205,7 +205,7 @@ struct intel_th_driver {
* INTEL_TH_SWITCH and INTEL_TH_SOURCE are children of the intel_th device.
*/
static inline struct intel_th_device *
-to_intel_th_parent(struct intel_th_device *thdev)
+to_intel_th_parent(const struct intel_th_device *thdev)
{
struct device *parent = thdev->dev.parent;
@@ -215,7 +215,7 @@ to_intel_th_parent(struct intel_th_device *thdev)
return to_intel_th_device(parent);
}
-static inline struct intel_th *to_intel_th(struct intel_th_device *thdev)
+static inline struct intel_th *to_intel_th(const struct intel_th_device *thdev)
{
if (thdev->type == INTEL_TH_OUTPUT)
thdev = to_intel_th_parent(thdev);
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 6c8215a47a60..9621efe0e95c 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -1659,7 +1659,7 @@ out:
atomic_dec(&msc->user_count);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTCOPY;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY);
vma->vm_ops = &msc_mmap_ops;
return ret;
}
diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
index 5d5526aa60c4..30f1525639b5 100644
--- a/drivers/hwtracing/ptt/hisi_ptt.c
+++ b/drivers/hwtracing/ptt/hisi_ptt.c
@@ -356,8 +356,18 @@ static int hisi_ptt_register_irq(struct hisi_ptt *hisi_ptt)
static int hisi_ptt_init_filters(struct pci_dev *pdev, void *data)
{
+ struct pci_dev *root_port = pcie_find_root_port(pdev);
struct hisi_ptt_filter_desc *filter;
struct hisi_ptt *hisi_ptt = data;
+ u32 port_devid;
+
+ if (!root_port)
+ return 0;
+
+ port_devid = PCI_DEVID(root_port->bus->number, root_port->devfn);
+ if (port_devid < hisi_ptt->lower_bdf ||
+ port_devid > hisi_ptt->upper_bdf)
+ return 0;
/*
* We won't fail the probe if filter allocation failed here. The filters
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index aad594fe79cc..eda6b11d40a1 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -2,7 +2,6 @@
config STM
tristate "System Trace Module devices"
select CONFIGFS_FS
- select SRCU
help
A System Trace Module (STM) is a device exporting data in System
Trace Protocol (STP) format as defined by MIPI STP standards.
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 2712e699ba08..534fbefc7f6a 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -715,7 +715,7 @@ static int stm_char_mmap(struct file *file, struct vm_area_struct *vma)
pm_runtime_get_sync(&stm->dev);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &stm_mmap_vmops;
vm_iomap_memory(vma, phys, size);
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index fc90293afcbf..eddf25b90ca8 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -184,8 +184,9 @@ static int i2c_outb(struct i2c_adapter *i2c_adap, unsigned char c)
/* read ack: SDA should be pulled down by slave, or it may
* NAK (usually to report problems with the data we wrote).
+ * Always report ACK if SDA is write-only.
*/
- ack = !getsda(adap); /* ack: sda is pulled low -> success */
+ ack = !adap->getsda || !getsda(adap); /* ack: sda is pulled low -> success */
bit_dbg(2, &i2c_adap->dev, "i2c_outb: 0x%02x %s\n", (int)c,
ack ? "A" : "NA");
@@ -238,71 +239,55 @@ static int test_bus(struct i2c_adapter *i2c_adap)
return -ENODEV;
}
+ if (adap->getsda == NULL)
+ pr_info("%s: SDA is write-only, testing not possible\n", name);
if (adap->getscl == NULL)
- pr_info("%s: Testing SDA only, SCL is not readable\n", name);
+ pr_info("%s: SCL is write-only, testing not possible\n", name);
- sda = getsda(adap);
- scl = (adap->getscl == NULL) ? 1 : getscl(adap);
+ sda = adap->getsda ? getsda(adap) : 1;
+ scl = adap->getscl ? getscl(adap) : 1;
if (!scl || !sda) {
- printk(KERN_WARNING
- "%s: bus seems to be busy (scl=%d, sda=%d)\n",
- name, scl, sda);
+ pr_warn("%s: bus seems to be busy (scl=%d, sda=%d)\n", name, scl, sda);
goto bailout;
}
sdalo(adap);
- sda = getsda(adap);
- scl = (adap->getscl == NULL) ? 1 : getscl(adap);
- if (sda) {
- printk(KERN_WARNING "%s: SDA stuck high!\n", name);
+ if (adap->getsda && getsda(adap)) {
+ pr_warn("%s: SDA stuck high!\n", name);
goto bailout;
}
- if (!scl) {
- printk(KERN_WARNING
- "%s: SCL unexpected low while pulling SDA low!\n",
- name);
+ if (adap->getscl && !getscl(adap)) {
+ pr_warn("%s: SCL unexpected low while pulling SDA low!\n", name);
goto bailout;
}
sdahi(adap);
- sda = getsda(adap);
- scl = (adap->getscl == NULL) ? 1 : getscl(adap);
- if (!sda) {
- printk(KERN_WARNING "%s: SDA stuck low!\n", name);
+ if (adap->getsda && !getsda(adap)) {
+ pr_warn("%s: SDA stuck low!\n", name);
goto bailout;
}
- if (!scl) {
- printk(KERN_WARNING
- "%s: SCL unexpected low while pulling SDA high!\n",
- name);
+ if (adap->getscl && !getscl(adap)) {
+ pr_warn("%s: SCL unexpected low while pulling SDA high!\n", name);
goto bailout;
}
scllo(adap);
- sda = getsda(adap);
- scl = (adap->getscl == NULL) ? 0 : getscl(adap);
- if (scl) {
- printk(KERN_WARNING "%s: SCL stuck high!\n", name);
+ if (adap->getscl && getscl(adap)) {
+ pr_warn("%s: SCL stuck high!\n", name);
goto bailout;
}
- if (!sda) {
- printk(KERN_WARNING
- "%s: SDA unexpected low while pulling SCL low!\n",
- name);
+ if (adap->getsda && !getsda(adap)) {
+ pr_warn("%s: SDA unexpected low while pulling SCL low!\n", name);
goto bailout;
}
sclhi(adap);
- sda = getsda(adap);
- scl = (adap->getscl == NULL) ? 1 : getscl(adap);
- if (!scl) {
- printk(KERN_WARNING "%s: SCL stuck low!\n", name);
+ if (adap->getscl && !getscl(adap)) {
+ pr_warn("%s: SCL stuck low!\n", name);
goto bailout;
}
- if (!sda) {
- printk(KERN_WARNING
- "%s: SDA unexpected low while pulling SCL high!\n",
- name);
+ if (adap->getsda && !getsda(adap)) {
+ pr_warn("%s: SDA unexpected low while pulling SCL high!\n", name);
goto bailout;
}
@@ -420,6 +405,10 @@ static int readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
unsigned char *temp = msg->buf;
int count = msg->len;
const unsigned flags = msg->flags;
+ struct i2c_algo_bit_data *adap = i2c_adap->algo_data;
+
+ if (!adap->getsda)
+ return -EOPNOTSUPP;
while (count > 0) {
inval = i2c_inb(i2c_adap);
@@ -670,11 +659,15 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
if (ret < 0)
return ret;
- /* Complain if SCL can't be read */
- if (bit_adap->getscl == NULL) {
+ if (bit_adap->getsda == NULL)
+ dev_warn(&adap->dev, "Not I2C compliant: can't read SDA\n");
+
+ if (bit_adap->getscl == NULL)
dev_warn(&adap->dev, "Not I2C compliant: can't read SCL\n");
+
+ if (bit_adap->getsda == NULL || bit_adap->getscl == NULL)
dev_warn(&adap->dev, "Bus may be unreliable\n");
- }
+
return 0;
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a7bfddf08fa7..9b8e84f20604 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -659,6 +659,13 @@ config I2C_GPIO_FAULT_INJECTOR
faults to an I2C bus, so another bus master can be stress-tested.
This is for debugging. If unsure, say 'no'.
+config I2C_GXP
+ tristate "GXP I2C Interface"
+ depends on ARCH_HPE_GXP || COMPILE_TEST
+ help
+ This enables support for GXP I2C interface. The I2C engines can be
+ either I2C master or I2C slaves.
+
config I2C_HIGHLANDER
tristate "Highlander FPGA SMBus interface"
depends on SH_HIGHLANDER || COMPILE_TEST
@@ -723,11 +730,11 @@ config I2C_IMX_LPI2C
will be called i2c-imx-lpi2c.
config I2C_IOP3XX
- tristate "Intel IOPx3xx and IXP4xx on-chip I2C interface"
- depends on ARCH_IOP32X || ARCH_IXP4XX || COMPILE_TEST
+ tristate "Intel IXP4xx on-chip I2C interface"
+ depends on ARCH_IXP4XX || COMPILE_TEST
help
Say Y here if you want to use the IIC bus controller on
- the Intel IOPx3xx I/O Processors or IXP4xx Network Processors.
+ the Intel IXP4xx Network Processors.
This driver can also be built as a module. If so, the module
will be called i2c-iop3xx.
@@ -761,6 +768,17 @@ config I2C_LPC2K
This driver can also be built as a module. If so, the module
will be called i2c-lpc2k.
+config I2C_LS2X
+ tristate "Loongson LS2X I2C adapter"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for the
+ I2C interface on the Loongson-2K SoCs and Loongson LS7A bridge
+ chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-ls2x.
+
config I2C_MLXBF
tristate "Mellanox BlueField I2C controller"
depends on MELLANOX_PLATFORM && ARM64
@@ -874,7 +892,7 @@ config I2C_OCORES
config I2C_OMAP
tristate "OMAP I2C adapter"
depends on ARCH_OMAP || ARCH_K3 || COMPILE_TEST
- default y if MACH_OMAP_H3 || MACH_OMAP_OSK
+ default MACH_OMAP_OSK
help
If you say yes to this option, support will be included for the
I2C interface on the Texas Instruments OMAP1/2 family of processors.
@@ -1010,8 +1028,7 @@ config I2C_RZV2M
config I2C_S3C2410
tristate "S3C/Exynos I2C Driver"
- depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || \
- ARCH_S5PV210 || COMPILE_TEST
+ depends on ARCH_EXYNOS || ARCH_S3C64XX || ARCH_S5PV210 || COMPILE_TEST
help
Say Y here to include support for I2C controller in the
Samsung SoCs (S3C, S5Pv210, Exynos).
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index e73cdb1d2b5a..af56fe2c75c0 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -77,6 +77,7 @@ obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
obj-$(CONFIG_I2C_JZ4780) += i2c-jz4780.o
obj-$(CONFIG_I2C_KEMPLD) += i2c-kempld.o
obj-$(CONFIG_I2C_LPC2K) += i2c-lpc2k.o
+obj-$(CONFIG_I2C_LS2X) += i2c-ls2x.o
obj-$(CONFIG_I2C_MESON) += i2c-meson.o
obj-$(CONFIG_I2C_MICROCHIP_CORE) += i2c-microchip-corei2c.o
obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
@@ -127,6 +128,7 @@ obj-$(CONFIG_I2C_THUNDERX) += i2c-thunderx.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o
obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
+obj-$(CONFIG_I2C_GXP) += i2c-gxp.o
# External I2C/SMBus adapter drivers
obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index c64c381b69b7..d3c99c5b3247 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -979,15 +979,13 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
const struct of_device_id *match;
struct aspeed_i2c_bus *bus;
struct clk *parent_clk;
- struct resource *res;
int irq, ret;
bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL);
if (!bus)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bus->base = devm_ioremap_resource(&pdev->dev, res);
+ bus->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(bus->base))
return PTR_ERR(bus->base);
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 99bd24d0e6a5..7b42d35b1294 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -302,7 +302,6 @@ static int
i2c_au1550_probe(struct platform_device *pdev)
{
struct i2c_au1550_data *priv;
- struct resource *r;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(struct i2c_au1550_data),
@@ -310,8 +309,7 @@ i2c_au1550_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->psc_base = devm_ioremap_resource(&pdev->dev, r);
+ priv->psc_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(priv->psc_base))
return PTR_ERR(priv->psc_base);
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index f72c6576d8a3..09a077b31bfe 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -407,7 +407,6 @@ static const struct i2c_adapter_quirks bcm2835_i2c_quirks = {
static int bcm2835_i2c_probe(struct platform_device *pdev)
{
struct bcm2835_i2c_dev *i2c_dev;
- struct resource *mem;
int ret;
struct i2c_adapter *adap;
struct clk *mclk;
@@ -420,8 +419,7 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
i2c_dev->dev = &pdev->dev;
init_completion(&i2c_dev->completion);
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c_dev->regs = devm_ioremap_resource(&pdev->dev, mem);
+ i2c_dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(i2c_dev->regs))
return PTR_ERR(i2c_dev->regs);
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index f58943cb1341..b5d22e7282c2 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -115,8 +115,6 @@
#define CNDS_I2C_PM_TIMEOUT 1000 /* ms */
#define CDNS_I2C_FIFO_DEPTH 16
-/* FIFO depth at which the DATA interrupt occurs */
-#define CDNS_I2C_DATA_INTR_DEPTH (CDNS_I2C_FIFO_DEPTH - 2)
#define CDNS_I2C_MAX_TRANSFER_SIZE 255
/* Transfer size in multiples of data interrupt depth */
#define CDNS_I2C_TRANSFER_SIZE (CDNS_I2C_MAX_TRANSFER_SIZE - 3)
@@ -175,7 +173,6 @@ enum cdns_i2c_slave_state {
* @send_count: Number of bytes still expected to send
* @recv_count: Number of bytes still expected to receive
* @curr_recv_count: Number of bytes to be received in current transfer
- * @irq: IRQ number
* @input_clk: Input clock to I2C controller
* @i2c_clk: Maximum I2C clock speed
* @bus_hold_flag: Flag used in repeated start for clearing HOLD bit
@@ -200,7 +197,6 @@ struct cdns_i2c {
unsigned int send_count;
unsigned int recv_count;
unsigned int curr_recv_count;
- int irq;
unsigned long input_clk;
unsigned int i2c_clk;
unsigned int bus_hold_flag;
@@ -616,9 +612,7 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
}
/* Determine hold_clear based on number of bytes to receive and hold flag */
- if (!id->bus_hold_flag &&
- ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
- (id->recv_count <= CDNS_I2C_FIFO_DEPTH)) {
+ if (!id->bus_hold_flag && id->recv_count <= CDNS_I2C_FIFO_DEPTH) {
if (cdns_i2c_readreg(CDNS_I2C_CR_OFFSET) & CDNS_I2C_CR_HOLD) {
hold_clear = true;
if (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT)
@@ -1246,7 +1240,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
{
struct resource *r_mem;
struct cdns_i2c *id;
- int ret;
+ int ret, irq;
const struct of_device_id *match;
id = devm_kzalloc(&pdev->dev, sizeof(*id), GFP_KERNEL);
@@ -1277,10 +1271,9 @@ static int cdns_i2c_probe(struct platform_device *pdev)
if (IS_ERR(id->membase))
return PTR_ERR(id->membase);
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
- id->irq = ret;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
id->adap.owner = THIS_MODULE;
id->adap.dev.of_node = pdev->dev.of_node;
@@ -1331,10 +1324,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
goto err_clk_dis;
}
- ret = devm_request_irq(&pdev->dev, id->irq, cdns_i2c_isr, 0,
+ ret = devm_request_irq(&pdev->dev, irq, cdns_i2c_isr, 0,
DRIVER_NAME, id);
if (ret) {
- dev_err(&pdev->dev, "cannot get irq %d\n", id->irq);
+ dev_err(&pdev->dev, "cannot get irq %d\n", irq);
goto err_clk_dis;
}
cdns_i2c_init(id);
@@ -1344,7 +1337,7 @@ static int cdns_i2c_probe(struct platform_device *pdev)
goto err_clk_dis;
dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
- id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
+ id->i2c_clk / 1000, (unsigned long)r_mem->start, irq);
return 0;
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 190abdc46dd3..2b2c3d090089 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -380,6 +380,49 @@ static struct i2c_board_info lenovo_yogabook1_board_info = {
.platform_data = &bq2589x_pdata,
};
+/********** Lenovo Yogabook YT3-X90F charger settings **********/
+static const char * const lenovo_yt3_bq25892_1_suppliers[] = { "cht_wcove_pwrsrc" };
+
+/*
+ * bq25892 charger settings for the round li-ion cells in the hinge,
+ * this is the main / biggest battery.
+ */
+static const struct property_entry lenovo_yt3_bq25892_1_props[] = {
+ PROPERTY_ENTRY_STRING_ARRAY("supplied-from", lenovo_yt3_bq25892_1_suppliers),
+ PROPERTY_ENTRY_STRING("linux,secondary-charger-name", "bq25890-charger-0"),
+ PROPERTY_ENTRY_U32("linux,iinlim-percentage", 60),
+ PROPERTY_ENTRY_U32("linux,pump-express-vbus-max", 12000000),
+ PROPERTY_ENTRY_BOOL("linux,skip-reset"),
+ /*
+ * The firmware sets everything to the defaults, leading to a low(ish)
+ * charge-current and battery-voltage of 2048mA resp 4.2V. Use the
+ * Android values instead of "linux,read-back-settings" to fix this.
+ */
+ PROPERTY_ENTRY_U32("ti,charge-current", 3072000),
+ PROPERTY_ENTRY_U32("ti,battery-regulation-voltage", 4352000),
+ PROPERTY_ENTRY_U32("ti,termination-current", 128000),
+ PROPERTY_ENTRY_U32("ti,precharge-current", 128000),
+ PROPERTY_ENTRY_U32("ti,minimum-sys-voltage", 3700000),
+ PROPERTY_ENTRY_BOOL("ti,use-ilim-pin"),
+ /* Set 5V boost current-limit to 1.2A (MAX/POR values are 2.45A/1.4A) */
+ PROPERTY_ENTRY_U32("ti,boost-voltage", 4998000),
+ PROPERTY_ENTRY_U32("ti,boost-max-current", 1200000),
+ { }
+};
+
+static const struct software_node lenovo_yt3_bq25892_1_node = {
+ .properties = lenovo_yt3_bq25892_1_props,
+};
+
+/* bq25892 charger for the round li-ion cells in the hinge */
+static struct i2c_board_info lenovo_yoga_tab3_board_info = {
+ .type = "bq25892",
+ .addr = 0x6b,
+ .dev_name = "bq25892_1",
+ .swnode = &lenovo_yt3_bq25892_1_node,
+ .platform_data = &bq2589x_pdata,
+};
+
static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
@@ -459,6 +502,9 @@ static int cht_wc_i2c_adap_i2c_probe(struct platform_device *pdev)
case INTEL_CHT_WC_LENOVO_YOGABOOK1:
board_info = &lenovo_yogabook1_board_info;
break;
+ case INTEL_CHT_WC_LENOVO_YT3_X90:
+ board_info = &lenovo_yoga_tab3_board_info;
+ break;
default:
dev_warn(&pdev->dev, "Unknown model, not instantiating charger device\n");
break;
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index 581e02cc979a..0dc6b1ce663f 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -391,7 +391,7 @@ u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev)
{
- u32 reg;
+ unsigned int reg;
int ret;
ret = i2c_dw_acquire_lock(dev);
@@ -442,7 +442,7 @@ err_release_lock:
void __i2c_dw_disable(struct dw_i2c_dev *dev)
{
int timeout = 100;
- u32 status;
+ unsigned int status;
do {
__i2c_dw_disable_nowait(dev);
@@ -465,7 +465,7 @@ void __i2c_dw_disable(struct dw_i2c_dev *dev)
dev_warn(dev->dev, "timeout in disabling adapter\n");
}
-unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev)
+u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev)
{
/*
* Clock is not necessary if we got LCNT/HCNT values directly from
@@ -527,7 +527,7 @@ void i2c_dw_release_lock(struct dw_i2c_dev *dev)
*/
int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
{
- u32 status;
+ unsigned int status;
int ret;
ret = regmap_read_poll_timeout(dev->map, DW_IC_STATUS, status,
@@ -571,7 +571,8 @@ int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
int i2c_dw_set_fifo_size(struct dw_i2c_dev *dev)
{
- u32 param, tx_fifo_depth, rx_fifo_depth;
+ u32 tx_fifo_depth, rx_fifo_depth;
+ unsigned int param;
int ret;
/*
@@ -611,7 +612,7 @@ u32 i2c_dw_func(struct i2c_adapter *adap)
void i2c_dw_disable(struct dw_i2c_dev *dev)
{
- u32 dummy;
+ unsigned int dummy;
int ret;
ret = i2c_dw_acquire_lock(dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 95ebc5eaa5d1..050d8c63ad3c 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -37,6 +37,7 @@
#define DW_IC_CON_STOP_DET_IFADDRESSED BIT(7)
#define DW_IC_CON_TX_EMPTY_CTRL BIT(8)
#define DW_IC_CON_RX_FIFO_FULL_HLD_CTRL BIT(9)
+#define DW_IC_CON_BUS_CLEAR_CTRL BIT(11)
#define DW_IC_DATA_CMD_DAT GENMASK(7, 0)
@@ -264,7 +265,7 @@ struct dw_i2c_dev {
u8 *rx_buf;
int msg_err;
unsigned int status;
- u32 abort_source;
+ unsigned int abort_source;
int irq;
u32 flags;
struct i2c_adapter adapter;
@@ -320,7 +321,7 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev);
u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset);
u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset);
int i2c_dw_set_sda_hold(struct dw_i2c_dev *dev);
-unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev);
+u32 i2c_dw_clk_rate(struct dw_i2c_dev *dev);
int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare);
int i2c_dw_acquire_lock(struct dw_i2c_dev *dev);
void i2c_dw_release_lock(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 45f569155bfe..55ea91a63382 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -39,7 +39,7 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
{
- u32 comp_param1;
+ unsigned int comp_param1;
u32 sda_falling_time, scl_falling_time;
struct i2c_timings *t = &dev->timings;
const char *fp_str = "";
@@ -211,7 +211,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
u32 ic_con = 0, ic_tar = 0;
- u32 dummy;
+ unsigned int dummy;
/* Disable the adapter */
__i2c_dw_disable(dev);
@@ -287,7 +287,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
int cmd = 0, status;
u8 *tx_buf;
- u32 val;
+ unsigned int val;
/*
* In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
@@ -505,7 +505,8 @@ i2c_dw_read(struct dw_i2c_dev *dev)
unsigned int rx_valid;
for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
- u32 len, tmp;
+ unsigned int tmp;
+ u32 len;
u8 *buf;
if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
@@ -653,7 +654,7 @@ static const struct i2c_adapter_quirks i2c_dw_quirks = {
static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
{
- u32 stat, dummy;
+ unsigned int stat, dummy;
/*
* The IC_INTR_STAT register just indicates "enabled" interrupts.
@@ -714,7 +715,7 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
{
struct dw_i2c_dev *dev = dev_id;
- u32 stat, enabled;
+ unsigned int stat, enabled;
regmap_read(dev->map, DW_IC_ENABLE, &enabled);
regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
@@ -865,6 +866,7 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
{
struct i2c_adapter *adap = &dev->adapter;
unsigned long irq_flags;
+ unsigned int ic_con;
int ret;
init_completion(&dev->cmd_complete);
@@ -884,6 +886,25 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev)
if (ret)
return ret;
+ /* Lock the bus for accessing DW_IC_CON */
+ ret = i2c_dw_acquire_lock(dev);
+ if (ret)
+ return ret;
+
+ /*
+ * On AMD platforms BIOS advertises the bus clear feature
+ * and enables the SCL/SDA stuck low. SMU FW does the
+ * bus recovery process. Driver should not ignore this BIOS
+ * advertisement of bus clear feature.
+ */
+ ret = regmap_read(dev->map, DW_IC_CON, &ic_con);
+ i2c_dw_release_lock(dev);
+ if (ret)
+ return ret;
+
+ if (ic_con & DW_IC_CON_BUS_CLEAR_CTRL)
+ dev->master_cfg |= DW_IC_CON_BUS_CLEAR_CTRL;
+
ret = dev->init(dev);
if (ret)
return ret;
diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c
index c6d2e4c2ac23..cec25054bb24 100644
--- a/drivers/i2c/busses/i2c-designware-slave.c
+++ b/drivers/i2c/busses/i2c-designware-slave.c
@@ -98,7 +98,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave)
static u32 i2c_dw_read_clear_intrbits_slave(struct dw_i2c_dev *dev)
{
- u32 stat, dummy;
+ unsigned int stat, dummy;
/*
* The IC_INTR_STAT register just indicates "enabled" interrupts.
@@ -150,7 +150,7 @@ static u32 i2c_dw_read_clear_intrbits_slave(struct dw_i2c_dev *dev)
static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id)
{
struct dw_i2c_dev *dev = dev_id;
- u32 raw_stat, stat, enabled, tmp;
+ unsigned int raw_stat, stat, enabled, tmp;
u8 val = 0, slave_activity;
regmap_read(dev->map, DW_IC_ENABLE, &enabled);
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 0e4385a9bcf7..1794c0399f22 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -13,9 +13,9 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/platform_data/i2c-gpio.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/slab.h>
struct i2c_gpio_private_data {
@@ -300,22 +300,29 @@ static inline void i2c_gpio_fault_injector_init(struct platform_device *pdev) {}
static inline void i2c_gpio_fault_injector_exit(struct platform_device *pdev) {}
#endif /* CONFIG_I2C_GPIO_FAULT_INJECTOR*/
-static void of_i2c_gpio_get_props(struct device_node *np,
- struct i2c_gpio_platform_data *pdata)
+/* Get i2c-gpio properties from DT or ACPI table */
+static void i2c_gpio_get_properties(struct device *dev,
+ struct i2c_gpio_platform_data *pdata)
{
u32 reg;
- of_property_read_u32(np, "i2c-gpio,delay-us", &pdata->udelay);
+ device_property_read_u32(dev, "i2c-gpio,delay-us", &pdata->udelay);
- if (!of_property_read_u32(np, "i2c-gpio,timeout-ms", &reg))
+ if (!device_property_read_u32(dev, "i2c-gpio,timeout-ms", &reg))
pdata->timeout = msecs_to_jiffies(reg);
pdata->sda_is_open_drain =
- of_property_read_bool(np, "i2c-gpio,sda-open-drain");
+ device_property_read_bool(dev, "i2c-gpio,sda-open-drain");
pdata->scl_is_open_drain =
- of_property_read_bool(np, "i2c-gpio,scl-open-drain");
+ device_property_read_bool(dev, "i2c-gpio,scl-open-drain");
pdata->scl_is_output_only =
- of_property_read_bool(np, "i2c-gpio,scl-output-only");
+ device_property_read_bool(dev, "i2c-gpio,scl-output-only");
+ pdata->sda_is_output_only =
+ device_property_read_bool(dev, "i2c-gpio,sda-output-only");
+ pdata->sda_has_no_pullup =
+ device_property_read_bool(dev, "i2c-gpio,sda-has-no-pullup");
+ pdata->scl_has_no_pullup =
+ device_property_read_bool(dev, "i2c-gpio,scl-has-no-pullup");
}
static struct gpio_desc *i2c_gpio_get_desc(struct device *dev,
@@ -361,7 +368,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
struct i2c_algo_bit_data *bit_data;
struct i2c_adapter *adap;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
enum gpiod_flags gflags;
int ret;
@@ -373,8 +380,8 @@ static int i2c_gpio_probe(struct platform_device *pdev)
bit_data = &priv->bit_data;
pdata = &priv->pdata;
- if (np) {
- of_i2c_gpio_get_props(np, pdata);
+ if (fwnode) {
+ i2c_gpio_get_properties(dev, pdata);
} else {
/*
* If all platform data settings are zero it is OK
@@ -392,7 +399,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
* handle them as we handle any other output. Else we enforce open
* drain as this is required for an I2C bus.
*/
- if (pdata->sda_is_open_drain)
+ if (pdata->sda_is_open_drain || pdata->sda_has_no_pullup)
gflags = GPIOD_OUT_HIGH;
else
gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
@@ -400,7 +407,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
if (IS_ERR(priv->sda))
return PTR_ERR(priv->sda);
- if (pdata->scl_is_open_drain)
+ if (pdata->scl_is_open_drain || pdata->scl_has_no_pullup)
gflags = GPIOD_OUT_HIGH;
else
gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
@@ -418,7 +425,8 @@ static int i2c_gpio_probe(struct platform_device *pdev)
if (!pdata->scl_is_output_only)
bit_data->getscl = i2c_gpio_getscl;
- bit_data->getsda = i2c_gpio_getsda;
+ if (!pdata->sda_is_output_only)
+ bit_data->getsda = i2c_gpio_getsda;
if (pdata->udelay)
bit_data->udelay = pdata->udelay;
@@ -435,7 +443,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
bit_data->data = priv;
adap->owner = THIS_MODULE;
- if (np)
+ if (fwnode)
strscpy(adap->name, dev_name(dev), sizeof(adap->name));
else
snprintf(adap->name, sizeof(adap->name), "i2c-gpio%d", pdev->id);
@@ -443,7 +451,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
adap->algo_data = bit_data;
adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
adap->dev.parent = dev;
- adap->dev.of_node = np;
+ device_set_node(&adap->dev, fwnode);
adap->nr = pdev->id;
ret = i2c_bit_add_numbered_bus(adap);
@@ -489,10 +497,17 @@ static const struct of_device_id i2c_gpio_dt_ids[] = {
MODULE_DEVICE_TABLE(of, i2c_gpio_dt_ids);
+static const struct acpi_device_id i2c_gpio_acpi_match[] = {
+ { "LOON0005" }, /* LoongArch */
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, i2c_gpio_acpi_match);
+
static struct platform_driver i2c_gpio_driver = {
.driver = {
.name = "i2c-gpio",
.of_match_table = i2c_gpio_dt_ids,
+ .acpi_match_table = i2c_gpio_acpi_match,
},
.probe = i2c_gpio_probe,
.remove = i2c_gpio_remove,
diff --git a/drivers/i2c/busses/i2c-gxp.c b/drivers/i2c/busses/i2c-gxp.c
new file mode 100644
index 000000000000..da4c8e5a8039
--- /dev/null
+++ b/drivers/i2c/busses/i2c-gxp.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2022 Hewlett-Packard Enterprise Development Company, L.P. */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#define GXP_MAX_I2C_ENGINE 10
+static const char * const gxp_i2c_name[] = {
+ "gxp-i2c0", "gxp-i2c1", "gxp-i2c2", "gxp-i2c3",
+ "gxp-i2c4", "gxp-i2c5", "gxp-i2c6", "gxp-i2c7",
+ "gxp-i2c8", "gxp-i2c9" };
+
+/* GXP I2C Global interrupt status/enable register*/
+#define GXP_I2CINTSTAT 0x00
+#define GXP_I2CINTEN 0x04
+
+/* GXP I2C registers */
+#define GXP_I2CSTAT 0x00
+#define MASK_STOP_EVENT 0x20
+#define MASK_ACK 0x08
+#define MASK_RW 0x04
+#define GXP_I2CEVTERR 0x01
+#define MASK_SLAVE_CMD_EVENT 0x01
+#define MASK_SLAVE_DATA_EVENT 0x02
+#define MASK_MASTER_EVENT 0x10
+#define GXP_I2CSNPDAT 0x02
+#define GXP_I2CMCMD 0x04
+#define GXP_I2CSCMD 0x06
+#define GXP_I2CSNPAA 0x09
+#define GXP_I2CADVFEAT 0x0A
+#define GXP_I2COWNADR 0x0B
+#define GXP_I2CFREQDIV 0x0C
+#define GXP_I2CFLTFAIR 0x0D
+#define GXP_I2CTMOEDG 0x0E
+#define GXP_I2CCYCTIM 0x0F
+
+/* I2CSCMD Bits */
+#define SNOOP_EVT_CLR 0x80
+#define SLAVE_EVT_CLR 0x40
+#define SNOOP_EVT_MASK 0x20
+#define SLAVE_EVT_MASK 0x10
+#define SLAVE_ACK_ENAB 0x08
+#define SLAVE_EVT_STALL 0x01
+
+/* I2CMCMD Bits */
+#define MASTER_EVT_CLR 0x80
+#define MASTER_ACK_ENAB 0x08
+#define RW_CMD 0x04
+#define STOP_CMD 0x02
+#define START_CMD 0x01
+
+/* I2CTMOEDG value */
+#define GXP_DATA_EDGE_RST_CTRL 0x0a /* 30ns */
+
+/* I2CFLTFAIR Bits */
+#define FILTER_CNT 0x30
+#define FAIRNESS_CNT 0x02
+
+enum {
+ GXP_I2C_IDLE = 0,
+ GXP_I2C_ADDR_PHASE,
+ GXP_I2C_RDATA_PHASE,
+ GXP_I2C_WDATA_PHASE,
+ GXP_I2C_ADDR_NACK,
+ GXP_I2C_DATA_NACK,
+ GXP_I2C_ERROR,
+ GXP_I2C_COMP
+};
+
+struct gxp_i2c_drvdata {
+ struct device *dev;
+ void __iomem *base;
+ struct i2c_timings t;
+ u32 engine;
+ int irq;
+ struct completion completion;
+ struct i2c_adapter adapter;
+ struct i2c_msg *curr_msg;
+ int msgs_remaining;
+ int msgs_num;
+ u8 *buf;
+ size_t buf_remaining;
+ unsigned char state;
+ struct i2c_client *slave;
+ unsigned char stopped;
+};
+
+static struct regmap *i2cg_map;
+
+static void gxp_i2c_start(struct gxp_i2c_drvdata *drvdata)
+{
+ u16 value;
+
+ drvdata->buf = drvdata->curr_msg->buf;
+ drvdata->buf_remaining = drvdata->curr_msg->len;
+
+ /* Note: Address in struct i2c_msg is 7 bits */
+ value = drvdata->curr_msg->addr << 9;
+
+ /* Read or Write */
+ value |= drvdata->curr_msg->flags & I2C_M_RD ? RW_CMD | START_CMD : START_CMD;
+
+ drvdata->state = GXP_I2C_ADDR_PHASE;
+ writew(value, drvdata->base + GXP_I2CMCMD);
+}
+
+static int gxp_i2c_master_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs, int num)
+{
+ int ret;
+ struct gxp_i2c_drvdata *drvdata = i2c_get_adapdata(adapter);
+ unsigned long time_left;
+
+ drvdata->msgs_remaining = num;
+ drvdata->curr_msg = msgs;
+ drvdata->msgs_num = num;
+ reinit_completion(&drvdata->completion);
+
+ gxp_i2c_start(drvdata);
+
+ time_left = wait_for_completion_timeout(&drvdata->completion,
+ adapter->timeout);
+ ret = num - drvdata->msgs_remaining;
+ if (time_left == 0) {
+ switch (drvdata->state) {
+ case GXP_I2C_WDATA_PHASE:
+ break;
+ case GXP_I2C_RDATA_PHASE:
+ break;
+ case GXP_I2C_ADDR_PHASE:
+ break;
+ default:
+ break;
+ }
+ return -ETIMEDOUT;
+ }
+
+ if (drvdata->state == GXP_I2C_ADDR_NACK ||
+ drvdata->state == GXP_I2C_DATA_NACK)
+ return -EIO;
+
+ return ret;
+}
+
+static u32 gxp_i2c_func(struct i2c_adapter *adap)
+{
+ if (IS_ENABLED(CONFIG_I2C_SLAVE))
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SLAVE;
+
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static int gxp_i2c_reg_slave(struct i2c_client *slave)
+{
+ struct gxp_i2c_drvdata *drvdata = i2c_get_adapdata(slave->adapter);
+
+ if (drvdata->slave)
+ return -EBUSY;
+
+ if (slave->flags & I2C_CLIENT_TEN)
+ return -EAFNOSUPPORT;
+
+ drvdata->slave = slave;
+
+ writeb(slave->addr << 1, drvdata->base + GXP_I2COWNADR);
+ writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK | SLAVE_ACK_ENAB |
+ SLAVE_EVT_STALL, drvdata->base + GXP_I2CSCMD);
+
+ return 0;
+}
+
+static int gxp_i2c_unreg_slave(struct i2c_client *slave)
+{
+ struct gxp_i2c_drvdata *drvdata = i2c_get_adapdata(slave->adapter);
+
+ WARN_ON(!drvdata->slave);
+
+ writeb(0x00, drvdata->base + GXP_I2COWNADR);
+ writeb(SNOOP_EVT_CLR | SLAVE_EVT_CLR | SNOOP_EVT_MASK |
+ SLAVE_EVT_MASK, drvdata->base + GXP_I2CSCMD);
+
+ drvdata->slave = NULL;
+
+ return 0;
+}
+#endif
+
+static const struct i2c_algorithm gxp_i2c_algo = {
+ .master_xfer = gxp_i2c_master_xfer,
+ .functionality = gxp_i2c_func,
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ .reg_slave = gxp_i2c_reg_slave,
+ .unreg_slave = gxp_i2c_unreg_slave,
+#endif
+};
+
+static void gxp_i2c_stop(struct gxp_i2c_drvdata *drvdata)
+{
+ /* Clear event and send stop */
+ writeb(MASTER_EVT_CLR | STOP_CMD, drvdata->base + GXP_I2CMCMD);
+
+ complete(&drvdata->completion);
+}
+
+static void gxp_i2c_restart(struct gxp_i2c_drvdata *drvdata)
+{
+ u16 value;
+
+ drvdata->buf = drvdata->curr_msg->buf;
+ drvdata->buf_remaining = drvdata->curr_msg->len;
+
+ value = drvdata->curr_msg->addr << 9;
+
+ if (drvdata->curr_msg->flags & I2C_M_RD) {
+ /* Read and clear master event */
+ value |= MASTER_EVT_CLR | RW_CMD | START_CMD;
+ } else {
+ /* Write and clear master event */
+ value |= MASTER_EVT_CLR | START_CMD;
+ }
+
+ drvdata->state = GXP_I2C_ADDR_PHASE;
+
+ writew(value, drvdata->base + GXP_I2CMCMD);
+}
+
+static void gxp_i2c_chk_addr_ack(struct gxp_i2c_drvdata *drvdata)
+{
+ u16 value;
+
+ value = readb(drvdata->base + GXP_I2CSTAT);
+ if (!(value & MASK_ACK)) {
+ /* Got no ack, stop */
+ drvdata->state = GXP_I2C_ADDR_NACK;
+ gxp_i2c_stop(drvdata);
+ return;
+ }
+
+ if (drvdata->curr_msg->flags & I2C_M_RD) {
+ /* Start to read data from slave */
+ if (drvdata->buf_remaining == 0) {
+ /* No more data to read, stop */
+ drvdata->msgs_remaining--;
+ drvdata->state = GXP_I2C_COMP;
+ gxp_i2c_stop(drvdata);
+ return;
+ }
+ drvdata->state = GXP_I2C_RDATA_PHASE;
+
+ if (drvdata->buf_remaining == 1) {
+ /* The last data, do not ack */
+ writeb(MASTER_EVT_CLR | RW_CMD,
+ drvdata->base + GXP_I2CMCMD);
+ } else {
+ /* Read data and ack it */
+ writeb(MASTER_EVT_CLR | MASTER_ACK_ENAB |
+ RW_CMD, drvdata->base + GXP_I2CMCMD);
+ }
+ } else {
+ /* Start to write first data to slave */
+ if (drvdata->buf_remaining == 0) {
+ /* No more data to write, stop */
+ drvdata->msgs_remaining--;
+ drvdata->state = GXP_I2C_COMP;
+ gxp_i2c_stop(drvdata);
+ return;
+ }
+ value = *drvdata->buf;
+ value = value << 8;
+ /* Clear master event */
+ value |= MASTER_EVT_CLR;
+ drvdata->buf++;
+ drvdata->buf_remaining--;
+ drvdata->state = GXP_I2C_WDATA_PHASE;
+ writew(value, drvdata->base + GXP_I2CMCMD);
+ }
+}
+
+static void gxp_i2c_ack_data(struct gxp_i2c_drvdata *drvdata)
+{
+ u8 value;
+
+ /* Store the data returned */
+ value = readb(drvdata->base + GXP_I2CSNPDAT);
+ *drvdata->buf = value;
+ drvdata->buf++;
+ drvdata->buf_remaining--;
+
+ if (drvdata->buf_remaining == 0) {
+ /* No more data, this message is completed. */
+ drvdata->msgs_remaining--;
+
+ if (drvdata->msgs_remaining == 0) {
+ /* No more messages, stop */
+ drvdata->state = GXP_I2C_COMP;
+ gxp_i2c_stop(drvdata);
+ return;
+ }
+ /* Move to next message and start transfer */
+ drvdata->curr_msg++;
+ gxp_i2c_restart(drvdata);
+ return;
+ }
+
+ /* Ack the slave to make it send next byte */
+ drvdata->state = GXP_I2C_RDATA_PHASE;
+ if (drvdata->buf_remaining == 1) {
+ /* The last data, do not ack */
+ writeb(MASTER_EVT_CLR | RW_CMD,
+ drvdata->base + GXP_I2CMCMD);
+ } else {
+ /* Read data and ack it */
+ writeb(MASTER_EVT_CLR | MASTER_ACK_ENAB |
+ RW_CMD, drvdata->base + GXP_I2CMCMD);
+ }
+}
+
+static void gxp_i2c_chk_data_ack(struct gxp_i2c_drvdata *drvdata)
+{
+ u16 value;
+
+ value = readb(drvdata->base + GXP_I2CSTAT);
+ if (!(value & MASK_ACK)) {
+ /* Received No ack, stop */
+ drvdata->state = GXP_I2C_DATA_NACK;
+ gxp_i2c_stop(drvdata);
+ return;
+ }
+
+ /* Got ack, check if there is more data to write */
+ if (drvdata->buf_remaining == 0) {
+ /* No more data, this message is completed */
+ drvdata->msgs_remaining--;
+
+ if (drvdata->msgs_remaining == 0) {
+ /* No more messages, stop */
+ drvdata->state = GXP_I2C_COMP;
+ gxp_i2c_stop(drvdata);
+ return;
+ }
+ /* Move to next message and start transfer */
+ drvdata->curr_msg++;
+ gxp_i2c_restart(drvdata);
+ return;
+ }
+
+ /* Write data to slave */
+ value = *drvdata->buf;
+ value = value << 8;
+
+ /* Clear master event */
+ value |= MASTER_EVT_CLR;
+ drvdata->buf++;
+ drvdata->buf_remaining--;
+ drvdata->state = GXP_I2C_WDATA_PHASE;
+ writew(value, drvdata->base + GXP_I2CMCMD);
+}
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static bool gxp_i2c_slave_irq_handler(struct gxp_i2c_drvdata *drvdata)
+{
+ u8 value;
+ u8 buf;
+ int ret;
+
+ value = readb(drvdata->base + GXP_I2CEVTERR);
+
+ /* Received start or stop event */
+ if (value & MASK_SLAVE_CMD_EVENT) {
+ value = readb(drvdata->base + GXP_I2CSTAT);
+ /* Master sent stop */
+ if (value & MASK_STOP_EVENT) {
+ if (drvdata->stopped == 0)
+ i2c_slave_event(drvdata->slave, I2C_SLAVE_STOP, &buf);
+ writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK |
+ SLAVE_ACK_ENAB | SLAVE_EVT_STALL, drvdata->base + GXP_I2CSCMD);
+ drvdata->stopped = 1;
+ } else {
+ /* Master sent start and wants to read */
+ drvdata->stopped = 0;
+ if (value & MASK_RW) {
+ i2c_slave_event(drvdata->slave,
+ I2C_SLAVE_READ_REQUESTED, &buf);
+ value = buf << 8 | (SLAVE_EVT_CLR | SNOOP_EVT_MASK |
+ SLAVE_EVT_STALL);
+ writew(value, drvdata->base + GXP_I2CSCMD);
+ } else {
+ /* Master wants to write to us */
+ ret = i2c_slave_event(drvdata->slave,
+ I2C_SLAVE_WRITE_REQUESTED, &buf);
+ if (!ret) {
+ /* Ack next byte from master */
+ writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK |
+ SLAVE_ACK_ENAB | SLAVE_EVT_STALL,
+ drvdata->base + GXP_I2CSCMD);
+ } else {
+ /* Nack next byte from master */
+ writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK |
+ SLAVE_EVT_STALL, drvdata->base + GXP_I2CSCMD);
+ }
+ }
+ }
+ } else if (value & MASK_SLAVE_DATA_EVENT) {
+ value = readb(drvdata->base + GXP_I2CSTAT);
+ /* Master wants to read */
+ if (value & MASK_RW) {
+ /* Master wants another byte */
+ if (value & MASK_ACK) {
+ i2c_slave_event(drvdata->slave,
+ I2C_SLAVE_READ_PROCESSED, &buf);
+ value = buf << 8 | (SLAVE_EVT_CLR | SNOOP_EVT_MASK |
+ SLAVE_EVT_STALL);
+ writew(value, drvdata->base + GXP_I2CSCMD);
+ } else {
+ /* No more bytes needed */
+ writew(SLAVE_EVT_CLR | SNOOP_EVT_MASK |
+ SLAVE_ACK_ENAB | SLAVE_EVT_STALL,
+ drvdata->base + GXP_I2CSCMD);
+ }
+ } else {
+ /* Master wants to write to us */
+ value = readb(drvdata->base + GXP_I2CSNPDAT);
+ buf = (uint8_t)value;
+ ret = i2c_slave_event(drvdata->slave,
+ I2C_SLAVE_WRITE_RECEIVED, &buf);
+ if (!ret) {
+ /* Ack next byte from master */
+ writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK |
+ SLAVE_ACK_ENAB | SLAVE_EVT_STALL,
+ drvdata->base + GXP_I2CSCMD);
+ } else {
+ /* Nack next byte from master */
+ writeb(SLAVE_EVT_CLR | SNOOP_EVT_MASK |
+ SLAVE_EVT_STALL, drvdata->base + GXP_I2CSCMD);
+ }
+ }
+ } else {
+ return false;
+ }
+
+ return true;
+}
+#endif
+
+static irqreturn_t gxp_i2c_irq_handler(int irq, void *_drvdata)
+{
+ struct gxp_i2c_drvdata *drvdata = (struct gxp_i2c_drvdata *)_drvdata;
+ u32 value;
+
+ /* Check if the interrupt is for the current engine */
+ regmap_read(i2cg_map, GXP_I2CINTSTAT, &value);
+ if (!(value & BIT(drvdata->engine)))
+ return IRQ_NONE;
+
+ value = readb(drvdata->base + GXP_I2CEVTERR);
+
+ /* Error */
+ if (value & ~(MASK_MASTER_EVENT | MASK_SLAVE_CMD_EVENT |
+ MASK_SLAVE_DATA_EVENT)) {
+ /* Clear all events */
+ writeb(0x00, drvdata->base + GXP_I2CEVTERR);
+ drvdata->state = GXP_I2C_ERROR;
+ gxp_i2c_stop(drvdata);
+ return IRQ_HANDLED;
+ }
+
+ if (IS_ENABLED(CONFIG_I2C_SLAVE)) {
+ /* Slave mode */
+ if (value & (MASK_SLAVE_CMD_EVENT | MASK_SLAVE_DATA_EVENT)) {
+ if (gxp_i2c_slave_irq_handler(drvdata))
+ return IRQ_HANDLED;
+ return IRQ_NONE;
+ }
+ }
+
+ /* Master mode */
+ switch (drvdata->state) {
+ case GXP_I2C_ADDR_PHASE:
+ gxp_i2c_chk_addr_ack(drvdata);
+ break;
+
+ case GXP_I2C_RDATA_PHASE:
+ gxp_i2c_ack_data(drvdata);
+ break;
+
+ case GXP_I2C_WDATA_PHASE:
+ gxp_i2c_chk_data_ack(drvdata);
+ break;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void gxp_i2c_init(struct gxp_i2c_drvdata *drvdata)
+{
+ drvdata->state = GXP_I2C_IDLE;
+ writeb(2000000 / drvdata->t.bus_freq_hz,
+ drvdata->base + GXP_I2CFREQDIV);
+ writeb(FILTER_CNT | FAIRNESS_CNT,
+ drvdata->base + GXP_I2CFLTFAIR);
+ writeb(GXP_DATA_EDGE_RST_CTRL, drvdata->base + GXP_I2CTMOEDG);
+ writeb(0x00, drvdata->base + GXP_I2CCYCTIM);
+ writeb(0x00, drvdata->base + GXP_I2CSNPAA);
+ writeb(0x00, drvdata->base + GXP_I2CADVFEAT);
+ writeb(SNOOP_EVT_CLR | SLAVE_EVT_CLR | SNOOP_EVT_MASK |
+ SLAVE_EVT_MASK, drvdata->base + GXP_I2CSCMD);
+ writeb(MASTER_EVT_CLR, drvdata->base + GXP_I2CMCMD);
+ writeb(0x00, drvdata->base + GXP_I2CEVTERR);
+ writeb(0x00, drvdata->base + GXP_I2COWNADR);
+}
+
+static int gxp_i2c_probe(struct platform_device *pdev)
+{
+ struct gxp_i2c_drvdata *drvdata;
+ int rc;
+ struct i2c_adapter *adapter;
+
+ if (!i2cg_map) {
+ i2cg_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "hpe,sysreg");
+ if (IS_ERR(i2cg_map)) {
+ return dev_err_probe(&pdev->dev, IS_ERR(i2cg_map),
+ "failed to map i2cg_handle\n");
+ }
+
+ /* Disable interrupt */
+ regmap_update_bits(i2cg_map, GXP_I2CINTEN, 0x00000FFF, 0);
+ }
+
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata),
+ GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, drvdata);
+ drvdata->dev = &pdev->dev;
+ init_completion(&drvdata->completion);
+
+ drvdata->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ /* Use physical memory address to determine which I2C engine this is. */
+ drvdata->engine = ((size_t)drvdata->base & 0xf00) >> 8;
+
+ if (drvdata->engine >= GXP_MAX_I2C_ENGINE) {
+ return dev_err_probe(&pdev->dev, -EINVAL, "i2c engine% is unsupported\n",
+ drvdata->engine);
+ }
+
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0)
+ return rc;
+
+ drvdata->irq = rc;
+ rc = devm_request_irq(&pdev->dev, drvdata->irq, gxp_i2c_irq_handler,
+ IRQF_SHARED, gxp_i2c_name[drvdata->engine], drvdata);
+ if (rc < 0)
+ return dev_err_probe(&pdev->dev, rc, "irq request failed\n");
+
+ i2c_parse_fw_timings(&pdev->dev, &drvdata->t, true);
+
+ gxp_i2c_init(drvdata);
+
+ /* Enable interrupt */
+ regmap_update_bits(i2cg_map, GXP_I2CINTEN, BIT(drvdata->engine),
+ BIT(drvdata->engine));
+
+ adapter = &drvdata->adapter;
+ i2c_set_adapdata(adapter, drvdata);
+
+ adapter->owner = THIS_MODULE;
+ strscpy(adapter->name, "HPE GXP I2C adapter", sizeof(adapter->name));
+ adapter->algo = &gxp_i2c_algo;
+ adapter->dev.parent = &pdev->dev;
+ adapter->dev.of_node = pdev->dev.of_node;
+
+ rc = i2c_add_adapter(adapter);
+ if (rc)
+ return dev_err_probe(&pdev->dev, rc, "i2c add adapter failed\n");
+
+ return 0;
+}
+
+static int gxp_i2c_remove(struct platform_device *pdev)
+{
+ struct gxp_i2c_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ /* Disable interrupt */
+ regmap_update_bits(i2cg_map, GXP_I2CINTEN, BIT(drvdata->engine), 0);
+ i2c_del_adapter(&drvdata->adapter);
+
+ return 0;
+}
+
+static const struct of_device_id gxp_i2c_of_match[] = {
+ { .compatible = "hpe,gxp-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gxp_i2c_of_match);
+
+static struct platform_driver gxp_i2c_driver = {
+ .probe = gxp_i2c_probe,
+ .remove = gxp_i2c_remove,
+ .driver = {
+ .name = "gxp-i2c",
+ .of_match_table = gxp_i2c_of_match,
+ },
+};
+module_platform_driver(gxp_i2c_driver);
+
+MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>");
+MODULE_DESCRIPTION("HPE GXP I2C bus driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 1fda1eaa6d6a..ac5326747c51 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -434,7 +434,7 @@ static int i801_wait_intr(struct i801_priv *priv)
busy = status & SMBHSTSTS_HOST_BUSY;
status &= STATUS_ERROR_FLAGS | SMBHSTSTS_INTR;
if (!busy && status)
- return status;
+ return status & STATUS_ERROR_FLAGS;
} while (time_is_after_eq_jiffies(timeout));
return -ETIMEDOUT;
@@ -458,26 +458,20 @@ static int i801_wait_byte_done(struct i801_priv *priv)
static int i801_transaction(struct i801_priv *priv, int xact)
{
- int status;
unsigned long result;
const struct i2c_adapter *adap = &priv->adapter;
- status = i801_check_pre(priv);
- if (status < 0)
- return status;
-
if (priv->features & FEATURE_IRQ) {
reinit_completion(&priv->done);
outb_p(xact | SMBHSTCNT_INTREN | SMBHSTCNT_START,
SMBHSTCNT(priv));
result = wait_for_completion_timeout(&priv->done, adap->timeout);
- return i801_check_post(priv, result ? priv->status : -ETIMEDOUT);
+ return result ? priv->status : -ETIMEDOUT;
}
outb_p(xact | SMBHSTCNT_START, SMBHSTCNT(priv));
- status = i801_wait_intr(priv);
- return i801_check_post(priv, status);
+ return i801_wait_intr(priv);
}
static int i801_block_transaction_by_block(struct i801_priv *priv,
@@ -511,19 +505,23 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
status = i801_transaction(priv, xact);
if (status)
- return status;
+ goto out;
if (read_write == I2C_SMBUS_READ ||
command == I2C_SMBUS_BLOCK_PROC_CALL) {
len = inb_p(SMBHSTDAT0(priv));
- if (len < 1 || len > I2C_SMBUS_BLOCK_MAX)
- return -EPROTO;
+ if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
+ status = -EPROTO;
+ goto out;
+ }
data->block[0] = len;
for (i = 0; i < len; i++)
data->block[i + 1] = inb_p(SMBBLKDAT(priv));
}
- return 0;
+out:
+ outb_p(inb_p(SMBAUXCTL(priv)) & ~SMBAUXCTL_E32B, SMBAUXCTL(priv));
+ return status;
}
static void i801_isr_byte_done(struct i801_priv *priv)
@@ -558,9 +556,6 @@ static void i801_isr_byte_done(struct i801_priv *priv)
/* Write next byte, except for IRQ after last byte */
outb_p(priv->data[++priv->count], SMBBLKDAT(priv));
}
-
- /* Clear BYTE_DONE to continue with next byte */
- outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv));
}
static irqreturn_t i801_host_notify_isr(struct i801_priv *priv)
@@ -590,7 +585,6 @@ static irqreturn_t i801_host_notify_isr(struct i801_priv *priv)
* BUS_ERR - SMI# transaction collision
* FAILED - transaction was canceled due to a KILL request
* When any of these occur, update ->status and signal completion.
- * ->status must be cleared before kicking off the next transaction.
*
* 2) For byte-by-byte (I2C read/write) transactions, one BYTE_DONE interrupt
* occurs for each byte of a byte-by-byte to prepare the next byte.
@@ -615,25 +609,20 @@ static irqreturn_t i801_isr(int irq, void *dev_id)
}
status = inb_p(SMBHSTSTS(priv));
- if (status & SMBHSTSTS_BYTE_DONE)
+ if ((status & (SMBHSTSTS_BYTE_DONE | STATUS_ERROR_FLAGS)) == SMBHSTSTS_BYTE_DONE)
i801_isr_byte_done(priv);
/*
- * Clear remaining IRQ sources: Completion of last command, errors
- * and the SMB_ALERT signal. SMB_ALERT status is set after signal
- * assertion independently of the interrupt generation being blocked
- * or not so clear it always when the status is set.
- */
- status &= SMBHSTSTS_INTR | STATUS_ERROR_FLAGS | SMBHSTSTS_SMBALERT_STS;
- if (status)
- outb_p(status, SMBHSTSTS(priv));
- status &= ~SMBHSTSTS_SMBALERT_STS; /* SMB_ALERT not reported */
- /*
- * Report transaction result.
- * ->status must be cleared before the next transaction is started.
+ * Clear IRQ sources: SMB_ALERT status is set after signal assertion
+ * independently of the interrupt generation being blocked or not
+ * so clear it always when the status is set.
*/
+ status &= STATUS_FLAGS | SMBHSTSTS_SMBALERT_STS;
+ outb_p(status, SMBHSTSTS(priv));
+
+ status &= STATUS_ERROR_FLAGS | SMBHSTSTS_INTR;
if (status) {
- priv->status = status;
+ priv->status = status & STATUS_ERROR_FLAGS;
complete(&priv->done);
}
@@ -658,10 +647,6 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
if (command == I2C_SMBUS_BLOCK_PROC_CALL)
return -EOPNOTSUPP;
- status = i801_check_pre(priv);
- if (status < 0)
- return status;
-
len = data->block[0];
if (read_write == I2C_SMBUS_WRITE) {
@@ -687,7 +672,7 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
reinit_completion(&priv->done);
outb_p(priv->cmd | SMBHSTCNT_START, SMBHSTCNT(priv));
result = wait_for_completion_timeout(&priv->done, adap->timeout);
- return i801_check_post(priv, result ? priv->status : -ETIMEDOUT);
+ return result ? priv->status : -ETIMEDOUT;
}
for (i = 1; i <= len; i++) {
@@ -701,7 +686,7 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
status = i801_wait_byte_done(priv);
if (status)
- goto exit;
+ return status;
if (i == 1 && read_write == I2C_SMBUS_READ
&& command != I2C_SMBUS_I2C_BLOCK_DATA) {
@@ -731,14 +716,82 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS(priv));
}
- status = i801_wait_intr(priv);
-exit:
- return i801_check_post(priv, status);
+ return i801_wait_intr(priv);
+}
+
+static void i801_set_hstadd(struct i801_priv *priv, u8 addr, char read_write)
+{
+ outb_p((addr << 1) | (read_write & 0x01), SMBHSTADD(priv));
+}
+
+/* Single value transaction function */
+static int i801_simple_transaction(struct i801_priv *priv, union i2c_smbus_data *data,
+ u8 addr, u8 hstcmd, char read_write, int command)
+{
+ int xact, ret;
+
+ switch (command) {
+ case I2C_SMBUS_QUICK:
+ i801_set_hstadd(priv, addr, read_write);
+ xact = I801_QUICK;
+ break;
+ case I2C_SMBUS_BYTE:
+ i801_set_hstadd(priv, addr, read_write);
+ if (read_write == I2C_SMBUS_WRITE)
+ outb_p(hstcmd, SMBHSTCMD(priv));
+ xact = I801_BYTE;
+ break;
+ case I2C_SMBUS_BYTE_DATA:
+ i801_set_hstadd(priv, addr, read_write);
+ if (read_write == I2C_SMBUS_WRITE)
+ outb_p(data->byte, SMBHSTDAT0(priv));
+ outb_p(hstcmd, SMBHSTCMD(priv));
+ xact = I801_BYTE_DATA;
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ i801_set_hstadd(priv, addr, read_write);
+ if (read_write == I2C_SMBUS_WRITE) {
+ outb_p(data->word & 0xff, SMBHSTDAT0(priv));
+ outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
+ }
+ outb_p(hstcmd, SMBHSTCMD(priv));
+ xact = I801_WORD_DATA;
+ break;
+ case I2C_SMBUS_PROC_CALL:
+ i801_set_hstadd(priv, addr, I2C_SMBUS_WRITE);
+ outb_p(data->word & 0xff, SMBHSTDAT0(priv));
+ outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
+ outb_p(hstcmd, SMBHSTCMD(priv));
+ read_write = I2C_SMBUS_READ;
+ xact = I801_PROC_CALL;
+ break;
+ default:
+ pci_err(priv->pci_dev, "Unsupported transaction %d\n", command);
+ return -EOPNOTSUPP;
+ }
+
+ ret = i801_transaction(priv, xact);
+ if (ret || read_write == I2C_SMBUS_WRITE)
+ return ret;
+
+ switch (command) {
+ case I2C_SMBUS_BYTE:
+ case I2C_SMBUS_BYTE_DATA:
+ data->byte = inb_p(SMBHSTDAT0(priv));
+ break;
+ case I2C_SMBUS_WORD_DATA:
+ case I2C_SMBUS_PROC_CALL:
+ data->word = inb_p(SMBHSTDAT0(priv)) +
+ (inb_p(SMBHSTDAT1(priv)) << 8);
+ break;
+ }
+
+ return 0;
}
/* Block transaction function */
static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *data,
- char read_write, int command)
+ u8 addr, u8 hstcmd, char read_write, int command)
{
int result = 0;
unsigned char hostc;
@@ -748,7 +801,29 @@ static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *
else if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
- if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
+ switch (command) {
+ case I2C_SMBUS_BLOCK_DATA:
+ i801_set_hstadd(priv, addr, read_write);
+ outb_p(hstcmd, SMBHSTCMD(priv));
+ break;
+ case I2C_SMBUS_I2C_BLOCK_DATA:
+ /*
+ * NB: page 240 of ICH5 datasheet shows that the R/#W
+ * bit should be cleared here, even when reading.
+ * However if SPD Write Disable is set (Lynx Point and later),
+ * the read will fail if we don't set the R/#W bit.
+ */
+ i801_set_hstadd(priv, addr,
+ priv->original_hstcfg & SMBHSTCFG_SPD_WD ?
+ read_write : I2C_SMBUS_WRITE);
+ if (read_write == I2C_SMBUS_READ) {
+ /* NB: page 240 of ICH5 datasheet also shows
+ * that DATA1 is the cmd field when reading
+ */
+ outb_p(hstcmd, SMBHSTDAT1(priv));
+ } else
+ outb_p(hstcmd, SMBHSTCMD(priv));
+
if (read_write == I2C_SMBUS_WRITE) {
/* set I2C_EN bit in configuration register */
pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc);
@@ -759,6 +834,12 @@ static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *
"I2C block read is unsupported!\n");
return -EOPNOTSUPP;
}
+ break;
+ case I2C_SMBUS_BLOCK_PROC_CALL:
+ /* Needs to be flagged as write transaction */
+ i801_set_hstadd(priv, addr, I2C_SMBUS_WRITE);
+ outb_p(hstcmd, SMBHSTCMD(priv));
+ break;
}
/* Experience has shown that the block buffer can only be used for
@@ -787,9 +868,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
unsigned short flags, char read_write, u8 command,
int size, union i2c_smbus_data *data)
{
- int hwpec;
- int block = 0;
- int ret, xact;
+ int hwpec, ret;
struct i801_priv *priv = i2c_get_adapdata(adap);
mutex_lock(&priv->acpi_lock);
@@ -800,127 +879,34 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
pm_runtime_get_sync(&priv->pci_dev->dev);
+ ret = i801_check_pre(priv);
+ if (ret)
+ goto out;
+
hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
&& size != I2C_SMBUS_QUICK
&& size != I2C_SMBUS_I2C_BLOCK_DATA;
- switch (size) {
- case I2C_SMBUS_QUICK:
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD(priv));
- xact = I801_QUICK;
- break;
- case I2C_SMBUS_BYTE:
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD(priv));
- if (read_write == I2C_SMBUS_WRITE)
- outb_p(command, SMBHSTCMD(priv));
- xact = I801_BYTE;
- break;
- case I2C_SMBUS_BYTE_DATA:
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD(priv));
- outb_p(command, SMBHSTCMD(priv));
- if (read_write == I2C_SMBUS_WRITE)
- outb_p(data->byte, SMBHSTDAT0(priv));
- xact = I801_BYTE_DATA;
- break;
- case I2C_SMBUS_WORD_DATA:
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD(priv));
- outb_p(command, SMBHSTCMD(priv));
- if (read_write == I2C_SMBUS_WRITE) {
- outb_p(data->word & 0xff, SMBHSTDAT0(priv));
- outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
- }
- xact = I801_WORD_DATA;
- break;
- case I2C_SMBUS_PROC_CALL:
- outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
- outb_p(command, SMBHSTCMD(priv));
- outb_p(data->word & 0xff, SMBHSTDAT0(priv));
- outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
- xact = I801_PROC_CALL;
- read_write = I2C_SMBUS_READ;
- break;
- case I2C_SMBUS_BLOCK_DATA:
- outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
- SMBHSTADD(priv));
- outb_p(command, SMBHSTCMD(priv));
- block = 1;
- break;
- case I2C_SMBUS_I2C_BLOCK_DATA:
- /*
- * NB: page 240 of ICH5 datasheet shows that the R/#W
- * bit should be cleared here, even when reading.
- * However if SPD Write Disable is set (Lynx Point and later),
- * the read will fail if we don't set the R/#W bit.
- */
- outb_p(((addr & 0x7f) << 1) |
- ((priv->original_hstcfg & SMBHSTCFG_SPD_WD) ?
- (read_write & 0x01) : 0),
- SMBHSTADD(priv));
- if (read_write == I2C_SMBUS_READ) {
- /* NB: page 240 of ICH5 datasheet also shows
- * that DATA1 is the cmd field when reading */
- outb_p(command, SMBHSTDAT1(priv));
- } else
- outb_p(command, SMBHSTCMD(priv));
- block = 1;
- break;
- case I2C_SMBUS_BLOCK_PROC_CALL:
- /*
- * Bit 0 of the slave address register always indicate a write
- * command.
- */
- outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
- outb_p(command, SMBHSTCMD(priv));
- block = 1;
- break;
- default:
- dev_err(&priv->pci_dev->dev, "Unsupported transaction %d\n",
- size);
- ret = -EOPNOTSUPP;
- goto out;
- }
-
if (hwpec) /* enable/disable hardware PEC */
outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv));
else
outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC),
SMBAUXCTL(priv));
- if (block)
- ret = i801_block_transaction(priv, data, read_write, size);
+ if (size == I2C_SMBUS_BLOCK_DATA ||
+ size == I2C_SMBUS_I2C_BLOCK_DATA ||
+ size == I2C_SMBUS_BLOCK_PROC_CALL)
+ ret = i801_block_transaction(priv, data, addr, command, read_write, size);
else
- ret = i801_transaction(priv, xact);
-
- /* Some BIOSes don't like it when PEC is enabled at reboot or resume
- time, so we forcibly disable it after every transaction. Turn off
- E32B for the same reason. */
- if (hwpec || block)
- outb_p(inb_p(SMBAUXCTL(priv)) &
- ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
-
- if (block)
- goto out;
- if (ret)
- goto out;
- if ((read_write == I2C_SMBUS_WRITE) || (xact == I801_QUICK))
- goto out;
+ ret = i801_simple_transaction(priv, data, addr, command, read_write, size);
- switch (xact) {
- case I801_BYTE: /* Result put in SMBHSTDAT0 */
- case I801_BYTE_DATA:
- data->byte = inb_p(SMBHSTDAT0(priv));
- break;
- case I801_WORD_DATA:
- case I801_PROC_CALL:
- data->word = inb_p(SMBHSTDAT0(priv)) +
- (inb_p(SMBHSTDAT1(priv)) << 8);
- break;
- }
+ ret = i801_check_post(priv, ret);
+ /* Some BIOSes don't like it when PEC is enabled at reboot or resume
+ * time, so we forcibly disable it after every transaction.
+ */
+ if (hwpec)
+ outb_p(inb_p(SMBAUXCTL(priv)) & ~SMBAUXCTL_CRC, SMBAUXCTL(priv));
out:
/*
* Unlock the SMBus device for use by BIOS/ACPI,
@@ -1667,6 +1653,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
priv->features &= ~disable_features;
+ /* The block process call uses block buffer mode */
+ if (!(priv->features & FEATURE_BLOCK_BUFFER))
+ priv->features &= ~FEATURE_BLOCK_PROC;
+
err = pcim_enable_device(dev);
if (err) {
dev_err(&dev->dev, "Failed to enable SMBus PCI device (%d)\n",
@@ -1714,11 +1704,6 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
outb_p(inb_p(SMBAUXCTL(priv)) &
~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
- /* Remember original Interrupt and Host Notify settings */
- priv->original_hstcnt = inb_p(SMBHSTCNT(priv)) & ~SMBHSTCNT_KILL;
- if (priv->features & FEATURE_HOST_NOTIFY)
- priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
-
/* Default timeout in interrupt mode: 200 ms */
priv->adapter.timeout = HZ / 5;
@@ -1748,6 +1733,15 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev_info(&dev->dev, "SMBus using %s\n",
priv->features & FEATURE_IRQ ? "PCI interrupt" : "polling");
+ /* Host notification uses an interrupt */
+ if (!(priv->features & FEATURE_IRQ))
+ priv->features &= ~FEATURE_HOST_NOTIFY;
+
+ /* Remember original Interrupt and Host Notify settings */
+ priv->original_hstcnt = inb_p(SMBHSTCNT(priv)) & ~SMBHSTCNT_KILL;
+ if (priv->features & FEATURE_HOST_NOTIFY)
+ priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
+
i801_add_tco(priv);
snprintf(priv->adapter.name, sizeof(priv->adapter.name),
diff --git a/drivers/i2c/busses/i2c-ls2x.c b/drivers/i2c/busses/i2c-ls2x.c
new file mode 100644
index 000000000000..ebae6035701d
--- /dev/null
+++ b/drivers/i2c/busses/i2c-ls2x.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Loongson-2K/Loongson LS7A I2C master mode driver
+ *
+ * Copyright (C) 2013 Loongson Technology Corporation Limited.
+ * Copyright (C) 2014-2017 Lemote, Inc.
+ * Copyright (C) 2018-2022 Loongson Technology Corporation Limited.
+ *
+ * Originally written by liushaozong
+ * Rewritten for mainline by Binbin Zhou <zhoubinbin@loongson.cn>
+ */
+
+#include <linux/bits.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/iopoll.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/units.h>
+
+/* I2C Registers */
+#define I2C_LS2X_PRER 0x0 /* Freq Division Register(16 bits) */
+#define I2C_LS2X_CTR 0x2 /* Control Register */
+#define I2C_LS2X_TXR 0x3 /* Transport Data Register */
+#define I2C_LS2X_RXR 0x3 /* Receive Data Register */
+#define I2C_LS2X_CR 0x4 /* Command Control Register */
+#define I2C_LS2X_SR 0x4 /* State Register */
+
+/* Command Control Register Bit */
+#define LS2X_CR_START BIT(7) /* Start signal */
+#define LS2X_CR_STOP BIT(6) /* Stop signal */
+#define LS2X_CR_READ BIT(5) /* Read signal */
+#define LS2X_CR_WRITE BIT(4) /* Write signal */
+#define LS2X_CR_ACK BIT(3) /* Response signal */
+#define LS2X_CR_IACK BIT(0) /* Interrupt response signal */
+
+/* State Register Bit */
+#define LS2X_SR_NOACK BIT(7) /* Receive NACK */
+#define LS2X_SR_BUSY BIT(6) /* Bus busy state */
+#define LS2X_SR_AL BIT(5) /* Arbitration lost */
+#define LS2X_SR_TIP BIT(1) /* Transmission state */
+#define LS2X_SR_IF BIT(0) /* Interrupt flag */
+
+/* Control Register Bit */
+#define LS2X_CTR_EN BIT(7) /* 0: I2c frequency setting 1: Normal */
+#define LS2X_CTR_IEN BIT(6) /* Enable i2c interrupt */
+#define LS2X_CTR_MST BIT(5) /* 0: Slave mode 1: Master mode */
+#define CTR_FREQ_MASK GENMASK(7, 6)
+#define CTR_READY_MASK GENMASK(7, 5)
+
+/* The PCLK frequency from LPB */
+#define LS2X_I2C_PCLK_FREQ (50 * HZ_PER_MHZ)
+
+/* The default bus frequency, which is an empirical value */
+#define LS2X_I2C_FREQ_STD (33 * HZ_PER_KHZ)
+
+struct ls2x_i2c_priv {
+ struct i2c_adapter adapter;
+ void __iomem *base;
+ struct i2c_timings i2c_t;
+ struct completion cmd_complete;
+};
+
+/*
+ * Interrupt service routine.
+ * This gets called whenever an I2C interrupt occurs.
+ */
+static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id)
+{
+ struct ls2x_i2c_priv *priv = dev_id;
+
+ if (!(readb(priv->base + I2C_LS2X_SR) & LS2X_SR_IF))
+ return IRQ_NONE;
+
+ writeb(LS2X_CR_IACK, priv->base + I2C_LS2X_CR);
+ complete(&priv->cmd_complete);
+ return IRQ_HANDLED;
+}
+
+/*
+ * The ls2x i2c controller supports standard mode and fast mode, so the
+ * maximum bus frequency is '400kHz'.
+ * The bus frequency is set to the empirical value of '33KHz' by default,
+ * but it can also be taken from ACPI or FDT for compatibility with more
+ * devices.
+ */
+static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
+{
+ struct i2c_timings *t = &priv->i2c_t;
+ struct device *dev = priv->adapter.dev.parent;
+ u32 acpi_speed = i2c_acpi_find_bus_speed(dev);
+
+ i2c_parse_fw_timings(dev, t, false);
+
+ if (acpi_speed || t->bus_freq_hz)
+ t->bus_freq_hz = max(t->bus_freq_hz, acpi_speed);
+ else
+ t->bus_freq_hz = LS2X_I2C_FREQ_STD;
+
+ /* Calculate and set i2c frequency. */
+ writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1,
+ priv->base + I2C_LS2X_PRER);
+}
+
+static void ls2x_i2c_init(struct ls2x_i2c_priv *priv)
+{
+ /* Set i2c frequency setting mode and disable interrupts. */
+ writeb(readb(priv->base + I2C_LS2X_CTR) & ~CTR_FREQ_MASK,
+ priv->base + I2C_LS2X_CTR);
+
+ ls2x_i2c_adjust_bus_speed(priv);
+
+ /* Set i2c normal operating mode and enable interrupts. */
+ writeb(readb(priv->base + I2C_LS2X_CTR) | CTR_READY_MASK,
+ priv->base + I2C_LS2X_CTR);
+}
+
+static int ls2x_i2c_xfer_byte(struct ls2x_i2c_priv *priv, u8 txdata, u8 *rxdatap)
+{
+ u8 rxdata;
+ unsigned long time_left;
+
+ writeb(txdata, priv->base + I2C_LS2X_CR);
+
+ time_left = wait_for_completion_timeout(&priv->cmd_complete,
+ priv->adapter.timeout);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ rxdata = readb(priv->base + I2C_LS2X_SR);
+ if (rxdatap)
+ *rxdatap = rxdata;
+
+ return 0;
+}
+
+static int ls2x_i2c_send_byte(struct ls2x_i2c_priv *priv, u8 txdata)
+{
+ int ret;
+ u8 rxdata;
+
+ ret = ls2x_i2c_xfer_byte(priv, txdata, &rxdata);
+ if (ret)
+ return ret;
+
+ if (rxdata & LS2X_SR_AL)
+ return -EAGAIN;
+
+ if (rxdata & LS2X_SR_NOACK)
+ return -ENXIO;
+
+ return 0;
+}
+
+static int ls2x_i2c_stop(struct ls2x_i2c_priv *priv)
+{
+ u8 value;
+
+ writeb(LS2X_CR_STOP, priv->base + I2C_LS2X_CR);
+ return readb_poll_timeout(priv->base + I2C_LS2X_SR, value,
+ !(value & LS2X_SR_BUSY), 100,
+ jiffies_to_usecs(priv->adapter.timeout));
+}
+
+static int ls2x_i2c_start(struct ls2x_i2c_priv *priv, struct i2c_msg *msgs)
+{
+ reinit_completion(&priv->cmd_complete);
+
+ writeb(i2c_8bit_addr_from_msg(msgs), priv->base + I2C_LS2X_TXR);
+ return ls2x_i2c_send_byte(priv, LS2X_CR_START | LS2X_CR_WRITE);
+}
+
+static int ls2x_i2c_rx(struct ls2x_i2c_priv *priv, struct i2c_msg *msg)
+{
+ int ret;
+ u8 rxdata, *buf = msg->buf;
+ u16 len = msg->len;
+
+ /* Contains steps to send start condition and address. */
+ ret = ls2x_i2c_start(priv, msg);
+ if (ret)
+ return ret;
+
+ while (len--) {
+ ret = ls2x_i2c_xfer_byte(priv,
+ LS2X_CR_READ | (len ? 0 : LS2X_CR_ACK),
+ &rxdata);
+ if (ret)
+ return ret;
+
+ *buf++ = readb(priv->base + I2C_LS2X_RXR);
+ }
+
+ return 0;
+}
+
+static int ls2x_i2c_tx(struct ls2x_i2c_priv *priv, struct i2c_msg *msg)
+{
+ int ret;
+ u8 *buf = msg->buf;
+ u16 len = msg->len;
+
+ /* Contains steps to send start condition and address. */
+ ret = ls2x_i2c_start(priv, msg);
+ if (ret)
+ return ret;
+
+ while (len--) {
+ writeb(*buf++, priv->base + I2C_LS2X_TXR);
+
+ ret = ls2x_i2c_send_byte(priv, LS2X_CR_WRITE);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ls2x_i2c_xfer_one(struct ls2x_i2c_priv *priv,
+ struct i2c_msg *msg, bool stop)
+{
+ int ret;
+
+ if (msg->flags & I2C_M_RD)
+ ret = ls2x_i2c_rx(priv, msg);
+ else
+ ret = ls2x_i2c_tx(priv, msg);
+
+ if (ret < 0) {
+ /* Fatel error. Needs reinit. */
+ if (ret == -ETIMEDOUT)
+ ls2x_i2c_init(priv);
+
+ return ret;
+ }
+
+ if (stop) {
+ /* Failed to issue STOP. Needs reinit. */
+ ret = ls2x_i2c_stop(priv);
+ if (ret)
+ ls2x_i2c_init(priv);
+ }
+
+ return ret;
+}
+
+static int ls2x_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ int ret;
+ struct i2c_msg *msg, *emsg = msgs + num;
+ struct ls2x_i2c_priv *priv = i2c_get_adapdata(adap);
+
+ for (msg = msgs; msg < emsg; msg++) {
+ ret = ls2x_i2c_xfer_one(priv, msg, msg == emsg - 1);
+ if (ret)
+ return ret;
+ }
+
+ return num;
+}
+
+static unsigned int ls2x_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm ls2x_i2c_algo = {
+ .master_xfer = ls2x_i2c_master_xfer,
+ .functionality = ls2x_i2c_func,
+};
+
+static int ls2x_i2c_probe(struct platform_device *pdev)
+{
+ int ret, irq;
+ struct i2c_adapter *adap;
+ struct ls2x_i2c_priv *priv;
+ struct device *dev = &pdev->dev;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Map hardware registers */
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ /* Add the i2c adapter */
+ adap = &priv->adapter;
+ adap->retries = 5;
+ adap->nr = pdev->id;
+ adap->dev.parent = dev;
+ adap->owner = THIS_MODULE;
+ adap->algo = &ls2x_i2c_algo;
+ adap->timeout = msecs_to_jiffies(100);
+ device_set_node(&adap->dev, dev_fwnode(dev));
+ i2c_set_adapdata(adap, priv);
+ strscpy(adap->name, pdev->name, sizeof(adap->name));
+ init_completion(&priv->cmd_complete);
+ platform_set_drvdata(pdev, priv);
+
+ ls2x_i2c_init(priv);
+
+ ret = devm_request_irq(dev, irq, ls2x_i2c_isr, IRQF_SHARED, "ls2x-i2c",
+ priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Unable to request irq %d\n", irq);
+
+ return devm_i2c_add_adapter(dev, adap);
+}
+
+static int ls2x_i2c_suspend(struct device *dev)
+{
+ struct ls2x_i2c_priv *priv = dev_get_drvdata(dev);
+
+ /* Disable interrupts */
+ writeb(readb(priv->base + I2C_LS2X_CTR) & ~LS2X_CTR_IEN,
+ priv->base + I2C_LS2X_CTR);
+
+ return 0;
+}
+
+static int ls2x_i2c_resume(struct device *dev)
+{
+ ls2x_i2c_init(dev_get_drvdata(dev));
+ return 0;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ls2x_i2c_pm_ops,
+ ls2x_i2c_suspend, ls2x_i2c_resume, NULL);
+
+static const struct of_device_id ls2x_i2c_id_table[] = {
+ { .compatible = "loongson,ls2k-i2c" },
+ { .compatible = "loongson,ls7a-i2c" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ls2x_i2c_id_table);
+
+static const struct acpi_device_id ls2x_i2c_acpi_match[] = {
+ { "LOON0004" }, /* Loongson LS7A */
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, ls2x_i2c_acpi_match);
+
+static struct platform_driver ls2x_i2c_driver = {
+ .probe = ls2x_i2c_probe,
+ .driver = {
+ .name = "ls2x-i2c",
+ .pm = pm_sleep_ptr(&ls2x_i2c_pm_ops),
+ .of_match_table = ls2x_i2c_id_table,
+ .acpi_match_table = ls2x_i2c_acpi_match,
+ },
+};
+module_platform_driver(ls2x_i2c_driver);
+
+MODULE_DESCRIPTION("Loongson LS2X I2C Bus driver");
+MODULE_AUTHOR("Loongson Technology Corporation Limited");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index d80e59340d97..43dd966d5ef5 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -1366,20 +1366,17 @@ static int mtk_i2c_probe(struct platform_device *pdev)
{
int ret = 0;
struct mtk_i2c *i2c;
- struct resource *res;
int i, irq, speed_clk;
i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
if (!i2c)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- i2c->base = devm_ioremap_resource(&pdev->dev, res);
+ i2c->base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(i2c->base))
return PTR_ERR(i2c->base);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- i2c->pdmabase = devm_ioremap_resource(&pdev->dev, res);
+ i2c->pdmabase = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
if (IS_ERR(i2c->pdmabase))
return PTR_ERR(i2c->pdmabase);
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index 12e330cd7635..a8b99e7f6262 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -259,8 +259,8 @@ static const struct pci_device_id gpu_i2c_ids[] = {
MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
static const struct property_entry ccgx_props[] = {
- /* Use FW built for NVIDIA (nv) only */
- PROPERTY_ENTRY_U16("ccgx,firmware-build", ('n' << 8) | 'v'),
+ /* Use FW built for NVIDIA GPU only */
+ PROPERTY_ENTRY_STRING("firmware-name", "nvidia,gpu"),
{ }
};
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index a4b97fe3c3a5..01358472680c 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -811,9 +811,15 @@ static const struct cci_data cci_v2_data = {
static const struct of_device_id cci_dt_match[] = {
{ .compatible = "qcom,msm8226-cci", .data = &cci_v1_data},
- { .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
{ .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data},
{ .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
+
+
+ /*
+ * Legacy compatibles kept for backwards compatibility.
+ * Do not add any new ones unless they introduce a new config
+ */
+ { .compatible = "qcom,msm8916-cci", .data = &cci_v1_data},
{ .compatible = "qcom,sdm845-cci", .data = &cci_v2_data},
{ .compatible = "qcom,sm8250-cci", .data = &cci_v2_data},
{ .compatible = "qcom,sm8450-cci", .data = &cci_v2_data},
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index fd70794bfcee..83909b02a03e 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -14,7 +14,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/qcom-geni-se.h>
+#include <linux/soc/qcom/geni-se.h>
#include <linux/spinlock.h>
#define SE_I2C_TX_TRANS_LEN 0x26c
@@ -1025,7 +1025,7 @@ static const struct dev_pm_ops geni_i2c_pm_ops = {
NULL)
};
-const struct geni_i2c_desc i2c_master_hub = {
+static const struct geni_i2c_desc i2c_master_hub = {
.has_core_clk = true,
.icc_ddr = NULL,
.no_dma_support = true,
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 36dab9cd208c..45e9df81345a 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -116,9 +116,6 @@ struct s3c24xx_i2c {
struct s3c2410_platform_i2c *pdata;
struct gpio_desc *gpios[2];
struct pinctrl *pctrl;
-#if defined(CONFIG_ARM_S3C24XX_CPUFREQ)
- struct notifier_block freq_transition;
-#endif
struct regmap *sysreg;
unsigned int sys_i2c_cfg;
};
@@ -885,65 +882,6 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
return 0;
}
-#if defined(CONFIG_ARM_S3C24XX_CPUFREQ)
-
-#define freq_to_i2c(_n) container_of(_n, struct s3c24xx_i2c, freq_transition)
-
-static int s3c24xx_i2c_cpufreq_transition(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct s3c24xx_i2c *i2c = freq_to_i2c(nb);
- unsigned int got;
- int delta_f;
- int ret;
-
- delta_f = clk_get_rate(i2c->clk) - i2c->clkrate;
-
- /* if we're post-change and the input clock has slowed down
- * or at pre-change and the clock is about to speed up, then
- * adjust our clock rate. <0 is slow, >0 speedup.
- */
-
- if ((val == CPUFREQ_POSTCHANGE && delta_f < 0) ||
- (val == CPUFREQ_PRECHANGE && delta_f > 0)) {
- i2c_lock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER);
- ret = s3c24xx_i2c_clockrate(i2c, &got);
- i2c_unlock_bus(&i2c->adap, I2C_LOCK_ROOT_ADAPTER);
-
- if (ret < 0)
- dev_err(i2c->dev, "cannot find frequency (%d)\n", ret);
- else
- dev_info(i2c->dev, "setting freq %d\n", got);
- }
-
- return 0;
-}
-
-static inline int s3c24xx_i2c_register_cpufreq(struct s3c24xx_i2c *i2c)
-{
- i2c->freq_transition.notifier_call = s3c24xx_i2c_cpufreq_transition;
-
- return cpufreq_register_notifier(&i2c->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-static inline void s3c24xx_i2c_deregister_cpufreq(struct s3c24xx_i2c *i2c)
-{
- cpufreq_unregister_notifier(&i2c->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-#else
-static inline int s3c24xx_i2c_register_cpufreq(struct s3c24xx_i2c *i2c)
-{
- return 0;
-}
-
-static inline void s3c24xx_i2c_deregister_cpufreq(struct s3c24xx_i2c *i2c)
-{
-}
-#endif
-
#ifdef CONFIG_OF
static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
@@ -1152,13 +1090,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
}
}
- ret = s3c24xx_i2c_register_cpufreq(i2c);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
- clk_unprepare(i2c->clk);
- return ret;
- }
-
/*
* Note, previous versions of the driver used i2c_add_adapter()
* to add the bus at any number. We now pass the bus number via
@@ -1175,7 +1106,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
ret = i2c_add_numbered_adapter(&i2c->adap);
if (ret < 0) {
pm_runtime_disable(&pdev->dev);
- s3c24xx_i2c_deregister_cpufreq(i2c);
clk_unprepare(i2c->clk);
return ret;
}
@@ -1192,8 +1122,6 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- s3c24xx_i2c_deregister_cpufreq(i2c);
-
i2c_del_adapter(&i2c->adap);
return 0;
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index 88482316d22a..f823913b75a6 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -740,7 +740,6 @@ static int st_i2c_xfer(struct i2c_adapter *i2c_adap,
return (ret < 0) ? ret : i;
}
-#ifdef CONFIG_PM_SLEEP
static int st_i2c_suspend(struct device *dev)
{
struct st_i2c_dev *i2c_dev = dev_get_drvdata(dev);
@@ -762,11 +761,7 @@ static int st_i2c_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(st_i2c_pm, st_i2c_suspend, st_i2c_resume);
-#define ST_I2C_PM (&st_i2c_pm)
-#else
-#define ST_I2C_PM NULL
-#endif
+static DEFINE_SIMPLE_DEV_PM_OPS(st_i2c_pm, st_i2c_suspend, st_i2c_resume);
static u32 st_i2c_func(struct i2c_adapter *adap)
{
@@ -901,7 +896,7 @@ static struct platform_driver st_i2c_driver = {
.driver = {
.name = "st-i2c",
.of_match_table = st_i2c_match,
- .pm = ST_I2C_PM,
+ .pm = pm_sleep_ptr(&st_i2c_pm),
},
.probe = st_i2c_probe,
.remove = st_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index bee5a2ef1f22..dbb792fc197e 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -32,6 +32,8 @@
#include <linux/pm_runtime.h>
#define DRIVER_NAME "xiic-i2c"
+#define DYNAMIC_MODE_READ_BROKEN_BIT BIT(0)
+#define SMBUS_BLOCK_READ_MIN_LEN 3
enum xilinx_i2c_state {
STATE_DONE,
@@ -44,6 +46,12 @@ enum xiic_endian {
BIG
};
+enum i2c_scl_freq {
+ REG_VALUES_100KHZ = 0,
+ REG_VALUES_400KHZ = 1,
+ REG_VALUES_1MHZ = 2
+};
+
/**
* struct xiic_i2c - Internal representation of the XIIC I2C bus
* @dev: Pointer to device structure
@@ -60,6 +68,12 @@ enum xiic_endian {
* @clk: Pointer to AXI4-lite input clock
* @state: See STATE_
* @singlemaster: Indicates bus is single master
+ * @dynamic: Mode of controller
+ * @prev_msg_tx: Previous message is Tx
+ * @quirks: To hold platform specific bug info
+ * @smbus_block_read: Flag to handle block read
+ * @input_clk: Input clock to I2C controller
+ * @i2c_clk: I2C SCL frequency
*/
struct xiic_i2c {
struct device *dev;
@@ -76,6 +90,39 @@ struct xiic_i2c {
struct clk *clk;
enum xilinx_i2c_state state;
bool singlemaster;
+ bool dynamic;
+ bool prev_msg_tx;
+ u32 quirks;
+ bool smbus_block_read;
+ unsigned long input_clk;
+ unsigned int i2c_clk;
+};
+
+struct xiic_version_data {
+ u32 quirks;
+};
+
+/**
+ * struct timing_regs - AXI I2C timing registers that depend on I2C spec
+ * @tsusta: setup time for a repeated START condition
+ * @tsusto: setup time for a STOP condition
+ * @thdsta: hold time for a repeated START condition
+ * @tsudat: setup time for data
+ * @tbuf: bus free time between STOP and START
+ */
+struct timing_regs {
+ unsigned int tsusta;
+ unsigned int tsusto;
+ unsigned int thdsta;
+ unsigned int tsudat;
+ unsigned int tbuf;
+};
+
+/* Reg values in ns derived from I2C spec and AXI I2C PG for different frequencies */
+static const struct timing_regs timing_reg_values[] = {
+ { 5700, 5000, 4300, 550, 5000 }, /* Reg values for 100KHz */
+ { 900, 900, 900, 400, 1600 }, /* Reg values for 400KHz */
+ { 380, 380, 380, 170, 620 }, /* Reg values for 1MHz */
};
#define XIIC_MSB_OFFSET 0
@@ -96,6 +143,19 @@ struct xiic_i2c {
#define XIIC_RFD_REG_OFFSET (0x20 + XIIC_REG_OFFSET) /* Rx FIFO Depth reg */
#define XIIC_GPO_REG_OFFSET (0x24 + XIIC_REG_OFFSET) /* Output Register */
+/*
+ * Timing register offsets from RegisterBase. These are used only for
+ * setting i2c clock frequency for the line.
+ */
+#define XIIC_TSUSTA_REG_OFFSET (0x28 + XIIC_REG_OFFSET) /* TSUSTA Register */
+#define XIIC_TSUSTO_REG_OFFSET (0x2C + XIIC_REG_OFFSET) /* TSUSTO Register */
+#define XIIC_THDSTA_REG_OFFSET (0x30 + XIIC_REG_OFFSET) /* THDSTA Register */
+#define XIIC_TSUDAT_REG_OFFSET (0x34 + XIIC_REG_OFFSET) /* TSUDAT Register */
+#define XIIC_TBUF_REG_OFFSET (0x38 + XIIC_REG_OFFSET) /* TBUF Register */
+#define XIIC_THIGH_REG_OFFSET (0x3C + XIIC_REG_OFFSET) /* THIGH Register */
+#define XIIC_TLOW_REG_OFFSET (0x40 + XIIC_REG_OFFSET) /* TLOW Register */
+#define XIIC_THDDAT_REG_OFFSET (0x44 + XIIC_REG_OFFSET) /* THDDAT Register */
+
/* Control Register masks */
#define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */
#define XIIC_CR_TX_FIFO_RESET_MASK 0x02 /* Transmit FIFO reset=1 */
@@ -143,6 +203,9 @@ struct xiic_i2c {
#define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */
#define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */
+/* Dynamic mode constants */
+#define MAX_READ_LENGTH_DYNAMIC 255 /* Max length for dynamic read */
+
/*
* The following constants define the register offsets for the Interrupt
* registers. There are some holes in the memory map for reserved addresses
@@ -275,12 +338,120 @@ static int xiic_clear_rx_fifo(struct xiic_i2c *i2c)
return 0;
}
+static int xiic_wait_tx_empty(struct xiic_i2c *i2c)
+{
+ u8 isr;
+ unsigned long timeout;
+
+ timeout = jiffies + XIIC_I2C_TIMEOUT;
+ for (isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
+ !(isr & XIIC_INTR_TX_EMPTY_MASK);
+ isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET)) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(i2c->dev, "Timeout waiting at Tx empty\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * xiic_setclk - Sets the configured clock rate
+ * @i2c: Pointer to the xiic device structure
+ *
+ * The timing register values are calculated according to the input clock
+ * frequency and configured scl frequency. For details, please refer the
+ * AXI I2C PG and NXP I2C Spec.
+ * Supported frequencies are 100KHz, 400KHz and 1MHz.
+ *
+ * Return: 0 on success (Supported frequency selected or not configurable in SW)
+ * -EINVAL on failure (scl frequency not supported or THIGH is 0)
+ */
+static int xiic_setclk(struct xiic_i2c *i2c)
+{
+ unsigned int clk_in_mhz;
+ unsigned int index = 0;
+ u32 reg_val;
+
+ dev_dbg(i2c->adap.dev.parent,
+ "%s entry, i2c->input_clk: %ld, i2c->i2c_clk: %d\n",
+ __func__, i2c->input_clk, i2c->i2c_clk);
+
+ /* If not specified in DT, do not configure in SW. Rely only on Vivado design */
+ if (!i2c->i2c_clk || !i2c->input_clk)
+ return 0;
+
+ clk_in_mhz = DIV_ROUND_UP(i2c->input_clk, 1000000);
+
+ switch (i2c->i2c_clk) {
+ case I2C_MAX_FAST_MODE_PLUS_FREQ:
+ index = REG_VALUES_1MHZ;
+ break;
+ case I2C_MAX_FAST_MODE_FREQ:
+ index = REG_VALUES_400KHZ;
+ break;
+ case I2C_MAX_STANDARD_MODE_FREQ:
+ index = REG_VALUES_100KHZ;
+ break;
+ default:
+ dev_warn(i2c->adap.dev.parent, "Unsupported scl frequency\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Value to be stored in a register is the number of clock cycles required
+ * for the time duration. So the time is divided by the input clock time
+ * period to get the number of clock cycles required. Refer Xilinx AXI I2C
+ * PG document and I2C specification for further details.
+ */
+
+ /* THIGH - Depends on SCL clock frequency(i2c_clk) as below */
+ reg_val = (DIV_ROUND_UP(i2c->input_clk, 2 * i2c->i2c_clk)) - 7;
+ if (reg_val == 0)
+ return -EINVAL;
+
+ xiic_setreg32(i2c, XIIC_THIGH_REG_OFFSET, reg_val - 1);
+
+ /* TLOW - Value same as THIGH */
+ xiic_setreg32(i2c, XIIC_TLOW_REG_OFFSET, reg_val - 1);
+
+ /* TSUSTA */
+ reg_val = (timing_reg_values[index].tsusta * clk_in_mhz) / 1000;
+ xiic_setreg32(i2c, XIIC_TSUSTA_REG_OFFSET, reg_val - 1);
+
+ /* TSUSTO */
+ reg_val = (timing_reg_values[index].tsusto * clk_in_mhz) / 1000;
+ xiic_setreg32(i2c, XIIC_TSUSTO_REG_OFFSET, reg_val - 1);
+
+ /* THDSTA */
+ reg_val = (timing_reg_values[index].thdsta * clk_in_mhz) / 1000;
+ xiic_setreg32(i2c, XIIC_THDSTA_REG_OFFSET, reg_val - 1);
+
+ /* TSUDAT */
+ reg_val = (timing_reg_values[index].tsudat * clk_in_mhz) / 1000;
+ xiic_setreg32(i2c, XIIC_TSUDAT_REG_OFFSET, reg_val - 1);
+
+ /* TBUF */
+ reg_val = (timing_reg_values[index].tbuf * clk_in_mhz) / 1000;
+ xiic_setreg32(i2c, XIIC_TBUF_REG_OFFSET, reg_val - 1);
+
+ /* THDDAT */
+ xiic_setreg32(i2c, XIIC_THDDAT_REG_OFFSET, 1);
+
+ return 0;
+}
+
static int xiic_reinit(struct xiic_i2c *i2c)
{
int ret;
xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
+ ret = xiic_setclk(i2c);
+ if (ret)
+ return ret;
+
/* Set receive Fifo depth to maximum (zero based). */
xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1);
@@ -314,15 +485,72 @@ static void xiic_deinit(struct xiic_i2c *i2c)
xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_ENABLE_DEVICE_MASK);
}
+static void xiic_smbus_block_read_setup(struct xiic_i2c *i2c)
+{
+ u8 rxmsg_len, rfd_set = 0;
+
+ /*
+ * Clear the I2C_M_RECV_LEN flag to avoid setting
+ * message length again
+ */
+ i2c->rx_msg->flags &= ~I2C_M_RECV_LEN;
+
+ /* Set smbus_block_read flag to identify in isr */
+ i2c->smbus_block_read = true;
+
+ /* Read byte from rx fifo and set message length */
+ rxmsg_len = xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
+
+ i2c->rx_msg->buf[i2c->rx_pos++] = rxmsg_len;
+
+ /* Check if received length is valid */
+ if (rxmsg_len <= I2C_SMBUS_BLOCK_MAX) {
+ /* Set Receive fifo depth */
+ if (rxmsg_len > IIC_RX_FIFO_DEPTH) {
+ /*
+ * When Rx msg len greater than or equal to Rx fifo capacity
+ * Receive fifo depth should set to Rx fifo capacity minus 1
+ */
+ rfd_set = IIC_RX_FIFO_DEPTH - 1;
+ i2c->rx_msg->len = rxmsg_len + 1;
+ } else if ((rxmsg_len == 1) ||
+ (rxmsg_len == 0)) {
+ /*
+ * Minimum of 3 bytes required to exit cleanly. 1 byte
+ * already received, Second byte is being received. Have
+ * to set NACK in read_rx before receiving the last byte
+ */
+ rfd_set = 0;
+ i2c->rx_msg->len = SMBUS_BLOCK_READ_MIN_LEN;
+ } else {
+ /*
+ * When Rx msg len less than Rx fifo capacity
+ * Receive fifo depth should set to Rx msg len minus 2
+ */
+ rfd_set = rxmsg_len - 2;
+ i2c->rx_msg->len = rxmsg_len + 1;
+ }
+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rfd_set);
+
+ return;
+ }
+
+ /* Invalid message length, trigger STATE_ERROR with tx_msg_len in ISR */
+ i2c->tx_msg->len = 3;
+ i2c->smbus_block_read = false;
+ dev_err(i2c->adap.dev.parent, "smbus_block_read Invalid msg length\n");
+}
+
static void xiic_read_rx(struct xiic_i2c *i2c)
{
- u8 bytes_in_fifo;
+ u8 bytes_in_fifo, cr = 0, bytes_to_read = 0;
+ u32 bytes_rem = 0;
int i;
bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
dev_dbg(i2c->adap.dev.parent,
- "%s entry, bytes in fifo: %d, msg: %d, SR: 0x%x, CR: 0x%x\n",
+ "%s entry, bytes in fifo: %d, rem: %d, SR: 0x%x, CR: 0x%x\n",
__func__, bytes_in_fifo, xiic_rx_space(i2c),
xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
@@ -330,13 +558,58 @@ static void xiic_read_rx(struct xiic_i2c *i2c)
if (bytes_in_fifo > xiic_rx_space(i2c))
bytes_in_fifo = xiic_rx_space(i2c);
- for (i = 0; i < bytes_in_fifo; i++)
+ bytes_to_read = bytes_in_fifo;
+
+ if (!i2c->dynamic) {
+ bytes_rem = xiic_rx_space(i2c) - bytes_in_fifo;
+
+ /* Set msg length if smbus_block_read */
+ if (i2c->rx_msg->flags & I2C_M_RECV_LEN) {
+ xiic_smbus_block_read_setup(i2c);
+ return;
+ }
+
+ if (bytes_rem > IIC_RX_FIFO_DEPTH) {
+ bytes_to_read = bytes_in_fifo;
+ } else if (bytes_rem > 1) {
+ bytes_to_read = bytes_rem - 1;
+ } else if (bytes_rem == 1) {
+ bytes_to_read = 1;
+ /* Set NACK in CR to indicate slave transmitter */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr |
+ XIIC_CR_NO_ACK_MASK);
+ } else if (bytes_rem == 0) {
+ bytes_to_read = bytes_in_fifo;
+
+ /* Generate stop on the bus if it is last message */
+ if (i2c->nmsgs == 1) {
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr &
+ ~XIIC_CR_MSMS_MASK);
+ }
+
+ /* Make TXACK=0, clean up for next transaction */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr &
+ ~XIIC_CR_NO_ACK_MASK);
+ }
+ }
+
+ /* Read the fifo */
+ for (i = 0; i < bytes_to_read; i++) {
i2c->rx_msg->buf[i2c->rx_pos++] =
xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
+ }
- xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET,
- (xiic_rx_space(i2c) > IIC_RX_FIFO_DEPTH) ?
- IIC_RX_FIFO_DEPTH - 1 : xiic_rx_space(i2c) - 1);
+ if (i2c->dynamic) {
+ u8 bytes;
+
+ /* Receive remaining bytes if less than fifo depth */
+ bytes = min_t(u8, xiic_rx_space(i2c), IIC_RX_FIFO_DEPTH);
+ bytes--;
+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, bytes);
+ }
}
static int xiic_tx_fifo_space(struct xiic_i2c *i2c)
@@ -360,7 +633,22 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
if (!xiic_tx_space(i2c) && i2c->nmsgs == 1) {
/* last message in transfer -> STOP */
- data |= XIIC_TX_DYN_STOP_MASK;
+ if (i2c->dynamic) {
+ data |= XIIC_TX_DYN_STOP_MASK;
+ } else {
+ u8 cr;
+ int status;
+
+ /* Wait till FIFO is empty so STOP is sent last */
+ status = xiic_wait_tx_empty(i2c);
+ if (status)
+ return;
+
+ /* Write to CR to stop */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr &
+ ~XIIC_CR_MSMS_MASK);
+ }
dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
}
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
@@ -401,7 +689,9 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
dev_dbg(i2c->adap.dev.parent, "%s: SR: 0x%x, msg: %p, nmsgs: %d\n",
__func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
i2c->tx_msg, i2c->nmsgs);
-
+ dev_dbg(i2c->adap.dev.parent, "%s, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
/* Service requesting interrupt */
if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
@@ -512,6 +802,12 @@ static irqreturn_t xiic_process(int irq, void *dev_id)
/* The bus is not busy, disable BusNotBusy interrupt */
xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
+ if (i2c->tx_msg && i2c->smbus_block_read) {
+ i2c->smbus_block_read = false;
+ /* Set requested message len=1 to indicate STATE_DONE */
+ i2c->tx_msg->len = 1;
+ }
+
if (!i2c->tx_msg)
goto out;
@@ -579,31 +875,113 @@ static int xiic_busy(struct xiic_i2c *i2c)
static void xiic_start_recv(struct xiic_i2c *i2c)
{
u16 rx_watermark;
+ u8 cr = 0, rfd_set = 0;
struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
- /* Clear and enable Rx full interrupt. */
- xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
+ dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
- /* we want to get all but last byte, because the TX_ERROR IRQ is used
- * to inidicate error ACK on the address, and negative ack on the last
- * received byte, so to not mix them receive all but last.
- * In the case where there is only one byte to receive
- * we can check if ERROR and RX full is set at the same time
- */
- rx_watermark = msg->len;
- if (rx_watermark > IIC_RX_FIFO_DEPTH)
- rx_watermark = IIC_RX_FIFO_DEPTH;
- xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, (u8)(rx_watermark - 1));
+ /* Disable Tx interrupts */
+ xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK | XIIC_INTR_TX_EMPTY_MASK);
+
+ if (i2c->dynamic) {
+ u8 bytes;
+ u16 val;
+
+ /* Clear and enable Rx full interrupt. */
+ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK |
+ XIIC_INTR_TX_ERROR_MASK);
+
+ /*
+ * We want to get all but last byte, because the TX_ERROR IRQ
+ * is used to indicate error ACK on the address, and
+ * negative ack on the last received byte, so to not mix
+ * them receive all but last.
+ * In the case where there is only one byte to receive
+ * we can check if ERROR and RX full is set at the same time
+ */
+ rx_watermark = msg->len;
+ bytes = min_t(u8, rx_watermark, IIC_RX_FIFO_DEPTH);
+
+ if (rx_watermark > 0)
+ bytes--;
+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, bytes);
- if (!(msg->flags & I2C_M_NOSTART))
/* write the address */
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
- i2c_8bit_addr_from_msg(msg) | XIIC_TX_DYN_START_MASK);
+ i2c_8bit_addr_from_msg(msg) |
+ XIIC_TX_DYN_START_MASK);
+
+ /* If last message, include dynamic stop bit with length */
+ val = (i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0;
+ val |= msg->len;
+
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, val);
+
+ xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
+ } else {
+ /*
+ * If previous message is Tx, make sure that Tx FIFO is empty
+ * before starting a new transfer as the repeated start in
+ * standard mode can corrupt the transaction if there are
+ * still bytes to be transmitted in FIFO
+ */
+ if (i2c->prev_msg_tx) {
+ int status;
+
+ status = xiic_wait_tx_empty(i2c);
+ if (status)
+ return;
+ }
- xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
- xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
- msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
+ /* Set Receive fifo depth */
+ rx_watermark = msg->len;
+ if (rx_watermark > IIC_RX_FIFO_DEPTH) {
+ rfd_set = IIC_RX_FIFO_DEPTH - 1;
+ } else if (rx_watermark == 1) {
+ rfd_set = rx_watermark - 1;
+
+ /* Set No_ACK, except for smbus_block_read */
+ if (!(i2c->rx_msg->flags & I2C_M_RECV_LEN)) {
+ /* Handle single byte transfer separately */
+ cr |= XIIC_CR_NO_ACK_MASK;
+ }
+ } else if (rx_watermark == 0) {
+ rfd_set = rx_watermark;
+ } else {
+ rfd_set = rx_watermark - 2;
+ }
+ /* Check if RSTA should be set */
+ if (cr & XIIC_CR_MSMS_MASK) {
+ /* Already a master, RSTA should be set */
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr |
+ XIIC_CR_REPEATED_START_MASK) &
+ ~(XIIC_CR_DIR_IS_TX_MASK));
+ }
+
+ xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rfd_set);
+
+ /* Clear and enable Rx full and transmit complete interrupts */
+ xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK |
+ XIIC_INTR_TX_ERROR_MASK);
+
+ /* Write the address */
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
+ i2c_8bit_addr_from_msg(msg));
+
+ /* Write to Control Register,to start transaction in Rx mode */
+ if ((cr & XIIC_CR_MSMS_MASK) == 0) {
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr |
+ XIIC_CR_MSMS_MASK)
+ & ~(XIIC_CR_DIR_IS_TX_MASK));
+ }
+ dev_dbg(i2c->adap.dev.parent, "%s end, ISR: 0x%x, CR: 0x%x\n",
+ __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
+ xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
+ }
if (i2c->nmsgs == 1)
/* very last, enable bus not busy as well */
@@ -611,10 +989,17 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
/* the message is tx:ed */
i2c->tx_pos = msg->len;
+
+ /* Enable interrupts */
+ xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
+
+ i2c->prev_msg_tx = false;
}
static void xiic_start_send(struct xiic_i2c *i2c)
{
+ u8 cr = 0;
+ u16 data;
struct i2c_msg *msg = i2c->tx_msg;
dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d",
@@ -623,24 +1008,70 @@ static void xiic_start_send(struct xiic_i2c *i2c)
__func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
- if (!(msg->flags & I2C_M_NOSTART)) {
+ if (i2c->dynamic) {
/* write the address */
- u16 data = i2c_8bit_addr_from_msg(msg) |
- XIIC_TX_DYN_START_MASK;
- if ((i2c->nmsgs == 1) && msg->len == 0)
+ data = i2c_8bit_addr_from_msg(msg) |
+ XIIC_TX_DYN_START_MASK;
+
+ if (i2c->nmsgs == 1 && msg->len == 0)
/* no data and last message -> add STOP */
data |= XIIC_TX_DYN_STOP_MASK;
xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
- }
- /* Clear any pending Tx empty, Tx Error and then enable them. */
- xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK |
- XIIC_INTR_BNB_MASK |
- ((i2c->nmsgs > 1 || xiic_tx_space(i2c)) ?
- XIIC_INTR_TX_HALF_MASK : 0));
+ /* Clear any pending Tx empty, Tx Error and then enable them */
+ xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK |
+ XIIC_INTR_TX_ERROR_MASK |
+ XIIC_INTR_BNB_MASK |
+ ((i2c->nmsgs > 1 || xiic_tx_space(i2c)) ?
+ XIIC_INTR_TX_HALF_MASK : 0));
+
+ xiic_fill_tx_fifo(i2c);
+ } else {
+ /*
+ * If previous message is Tx, make sure that Tx FIFO is empty
+ * before starting a new transfer as the repeated start in
+ * standard mode can corrupt the transaction if there are
+ * still bytes to be transmitted in FIFO
+ */
+ if (i2c->prev_msg_tx) {
+ int status;
+
+ status = xiic_wait_tx_empty(i2c);
+ if (status)
+ return;
+ }
+ /* Check if RSTA should be set */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ if (cr & XIIC_CR_MSMS_MASK) {
+ /* Already a master, RSTA should be set */
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, (cr |
+ XIIC_CR_REPEATED_START_MASK |
+ XIIC_CR_DIR_IS_TX_MASK) &
+ ~(XIIC_CR_NO_ACK_MASK));
+ }
+
+ /* Write address to FIFO */
+ data = i2c_8bit_addr_from_msg(msg);
+ xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
+
+ /* Fill fifo */
+ xiic_fill_tx_fifo(i2c);
- xiic_fill_tx_fifo(i2c);
+ if ((cr & XIIC_CR_MSMS_MASK) == 0) {
+ /* Start Tx by writing to CR */
+ cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
+ xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr |
+ XIIC_CR_MSMS_MASK |
+ XIIC_CR_DIR_IS_TX_MASK);
+ }
+
+ /* Clear any pending Tx empty, Tx Error and then enable them */
+ xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK |
+ XIIC_INTR_TX_ERROR_MASK |
+ XIIC_INTR_BNB_MASK);
+ }
+ i2c->prev_msg_tx = true;
}
static void __xiic_start_xfer(struct xiic_i2c *i2c)
@@ -666,7 +1097,8 @@ static void __xiic_start_xfer(struct xiic_i2c *i2c)
static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
{
- int ret;
+ bool broken_read, max_read_len, smbus_blk_read;
+ int ret, count;
mutex_lock(&i2c->lock);
@@ -679,6 +1111,34 @@ static int xiic_start_xfer(struct xiic_i2c *i2c, struct i2c_msg *msgs, int num)
i2c->nmsgs = num;
init_completion(&i2c->completion);
+ /* Decide standard mode or Dynamic mode */
+ i2c->dynamic = true;
+
+ /* Initialize prev message type */
+ i2c->prev_msg_tx = false;
+
+ /*
+ * Scan through nmsgs, use dynamic mode when none of the below three
+ * conditions occur. We need standard mode even if one condition holds
+ * true in the entire array of messages in a single transfer.
+ * If read transaction as dynamic mode is broken for delayed reads
+ * in xlnx,axi-iic-2.0 / xlnx,xps-iic-2.00.a IP versions.
+ * If read length is > 255 bytes.
+ * If smbus_block_read transaction.
+ */
+ for (count = 0; count < i2c->nmsgs; count++) {
+ broken_read = (i2c->quirks & DYNAMIC_MODE_READ_BROKEN_BIT) &&
+ (i2c->tx_msg[count].flags & I2C_M_RD);
+ max_read_len = (i2c->tx_msg[count].flags & I2C_M_RD) &&
+ (i2c->tx_msg[count].len > MAX_READ_LENGTH_DYNAMIC);
+ smbus_blk_read = (i2c->tx_msg[count].flags & I2C_M_RECV_LEN);
+
+ if (broken_read || max_read_len || smbus_blk_read) {
+ i2c->dynamic = false;
+ break;
+ }
+ }
+
ret = xiic_reinit(i2c);
if (!ret)
__xiic_start_xfer(i2c);
@@ -714,10 +1174,6 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
i2c->rx_msg = NULL;
i2c->nmsgs = 0;
err = -ETIMEDOUT;
- } else if (err < 0) { /* Completion error */
- i2c->tx_msg = NULL;
- i2c->rx_msg = NULL;
- i2c->nmsgs = 0;
} else {
err = (i2c->state == STATE_DONE) ? num : -EIO;
}
@@ -729,7 +1185,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
static u32 xiic_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA;
}
static const struct i2c_algorithm xiic_algorithm = {
@@ -737,21 +1193,30 @@ static const struct i2c_algorithm xiic_algorithm = {
.functionality = xiic_func,
};
-static const struct i2c_adapter_quirks xiic_quirks = {
- .max_read_len = 255,
-};
-
static const struct i2c_adapter xiic_adapter = {
.owner = THIS_MODULE,
.class = I2C_CLASS_DEPRECATED,
.algo = &xiic_algorithm,
- .quirks = &xiic_quirks,
};
+static const struct xiic_version_data xiic_2_00 = {
+ .quirks = DYNAMIC_MODE_READ_BROKEN_BIT,
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id xiic_of_match[] = {
+ { .compatible = "xlnx,xps-iic-2.00.a", .data = &xiic_2_00 },
+ { .compatible = "xlnx,axi-iic-2.1", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xiic_of_match);
+#endif
+
static int xiic_i2c_probe(struct platform_device *pdev)
{
struct xiic_i2c *i2c;
struct xiic_i2c_platform_data *pdata;
+ const struct of_device_id *match;
struct resource *res;
int ret, irq;
u8 i;
@@ -761,6 +1226,13 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (!i2c)
return -ENOMEM;
+ match = of_match_node(xiic_of_match, pdev->dev.of_node);
+ if (match && match->data) {
+ const struct xiic_version_data *data = match->data;
+
+ i2c->quirks = data->quirks;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2c->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(i2c->base))
@@ -798,6 +1270,15 @@ static int xiic_i2c_probe(struct platform_device *pdev)
pm_runtime_use_autosuspend(i2c->dev);
pm_runtime_set_active(i2c->dev);
pm_runtime_enable(i2c->dev);
+
+ /* SCL frequency configuration */
+ i2c->input_clk = clk_get_rate(i2c->clk);
+ ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+ &i2c->i2c_clk);
+ /* If clock-frequency not specified in DT, do not configure in SW */
+ if (ret || i2c->i2c_clk > I2C_MAX_FAST_MODE_PLUS_FREQ)
+ i2c->i2c_clk = 0;
+
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
xiic_process, IRQF_ONESHOT,
pdev->name, i2c);
@@ -841,6 +1322,9 @@ static int xiic_i2c_probe(struct platform_device *pdev)
i2c_new_client_device(&i2c->adap, pdata->devices + i);
}
+ dev_dbg(&pdev->dev, "mmio %08lx irq %d scl clock frequency %d\n",
+ (unsigned long)res->start, irq, i2c->i2c_clk);
+
return 0;
err_clk_dis:
@@ -875,14 +1359,6 @@ static int xiic_i2c_remove(struct platform_device *pdev)
return 0;
}
-#if defined(CONFIG_OF)
-static const struct of_device_id xiic_of_match[] = {
- { .compatible = "xlnx,xps-iic-2.00.a", },
- {},
-};
-MODULE_DEVICE_TABLE(of, xiic_of_match);
-#endif
-
static int __maybe_unused xiic_i2c_runtime_suspend(struct device *dev)
{
struct xiic_i2c *i2c = dev_get_drvdata(dev);
diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
index 4dd777cc0c89..d6037a328669 100644
--- a/drivers/i2c/i2c-core-acpi.c
+++ b/drivers/i2c/i2c-core-acpi.c
@@ -442,18 +442,7 @@ EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
{
- struct device *dev;
- struct i2c_client *client;
-
- dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
- if (!dev)
- return NULL;
-
- client = i2c_verify_client(dev);
- if (!client)
- put_device(dev);
-
- return client;
+ return i2c_find_device_by_fwnode(acpi_fwnode_handle(adev));
}
static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 087e480b624c..cb5fa971d67e 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -34,6 +34,7 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/devinfo.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
@@ -136,9 +137,9 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int i2c_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct i2c_client *client = to_i2c_client(dev);
+ const struct i2c_client *client = to_i2c_client(dev);
int rc;
rc = of_device_uevent_modalias(dev, env);
@@ -282,7 +283,9 @@ static void i2c_gpio_init_pinctrl_recovery(struct i2c_adapter *adap)
{
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
struct device *dev = &adap->dev;
- struct pinctrl *p = bri->pinctrl;
+ struct pinctrl *p = bri->pinctrl ?: dev_pinctrl(dev->parent);
+
+ bri->pinctrl = p;
/*
* we can't change states without pinctrl, so remove the states if
@@ -1012,6 +1015,35 @@ void i2c_unregister_device(struct i2c_client *client)
}
EXPORT_SYMBOL_GPL(i2c_unregister_device);
+/**
+ * i2c_find_device_by_fwnode() - find an i2c_client for the fwnode
+ * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_client
+ *
+ * Look up and return the &struct i2c_client corresponding to the @fwnode.
+ * If no client can be found, or @fwnode is NULL, this returns NULL.
+ *
+ * The user must call put_device(&client->dev) once done with the i2c client.
+ */
+struct i2c_client *i2c_find_device_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct i2c_client *client;
+ struct device *dev;
+
+ if (!fwnode)
+ return NULL;
+
+ dev = bus_find_device_by_fwnode(&i2c_bus_type, fwnode);
+ if (!dev)
+ return NULL;
+
+ client = i2c_verify_client(dev);
+ if (!client)
+ put_device(dev);
+
+ return client;
+}
+EXPORT_SYMBOL(i2c_find_device_by_fwnode);
+
static const struct i2c_device_id dummy_id[] = {
{ "dummy", 0 },
@@ -1761,6 +1793,75 @@ int devm_i2c_add_adapter(struct device *dev, struct i2c_adapter *adapter)
}
EXPORT_SYMBOL_GPL(devm_i2c_add_adapter);
+static int i2c_dev_or_parent_fwnode_match(struct device *dev, const void *data)
+{
+ if (dev_fwnode(dev) == data)
+ return 1;
+
+ if (dev->parent && dev_fwnode(dev->parent) == data)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * i2c_find_adapter_by_fwnode() - find an i2c_adapter for the fwnode
+ * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_adapter
+ *
+ * Look up and return the &struct i2c_adapter corresponding to the @fwnode.
+ * If no adapter can be found, or @fwnode is NULL, this returns NULL.
+ *
+ * The user must call put_device(&adapter->dev) once done with the i2c adapter.
+ */
+struct i2c_adapter *i2c_find_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct i2c_adapter *adapter;
+ struct device *dev;
+
+ if (!fwnode)
+ return NULL;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, fwnode,
+ i2c_dev_or_parent_fwnode_match);
+ if (!dev)
+ return NULL;
+
+ adapter = i2c_verify_adapter(dev);
+ if (!adapter)
+ put_device(dev);
+
+ return adapter;
+}
+EXPORT_SYMBOL(i2c_find_adapter_by_fwnode);
+
+/**
+ * i2c_get_adapter_by_fwnode() - find an i2c_adapter for the fwnode
+ * @fwnode: &struct fwnode_handle corresponding to the &struct i2c_adapter
+ *
+ * Look up and return the &struct i2c_adapter corresponding to the @fwnode,
+ * and increment the adapter module's use count. If no adapter can be found,
+ * or @fwnode is NULL, this returns NULL.
+ *
+ * The user must call i2c_put_adapter(adapter) once done with the i2c adapter.
+ * Note that this is different from i2c_find_adapter_by_node().
+ */
+struct i2c_adapter *i2c_get_adapter_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct i2c_adapter *adapter;
+
+ adapter = i2c_find_adapter_by_fwnode(fwnode);
+ if (!adapter)
+ return NULL;
+
+ if (!try_module_get(adapter->owner)) {
+ put_device(&adapter->dev);
+ adapter = NULL;
+ }
+
+ return adapter;
+}
+EXPORT_SYMBOL(i2c_get_adapter_by_fwnode);
+
static void i2c_parse_timing(struct device *dev, char *prop_name, u32 *cur_val_p,
u32 def_val, bool use_def)
{
diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
index 3ed74aa4b44b..bce6b796e04c 100644
--- a/drivers/i2c/i2c-core-of.c
+++ b/drivers/i2c/i2c-core-of.c
@@ -113,72 +113,6 @@ void of_i2c_register_devices(struct i2c_adapter *adap)
of_node_put(bus);
}
-static int of_dev_or_parent_node_match(struct device *dev, const void *data)
-{
- if (dev->of_node == data)
- return 1;
-
- if (dev->parent)
- return dev->parent->of_node == data;
-
- return 0;
-}
-
-/* must call put_device() when done with returned i2c_client device */
-struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
-{
- struct device *dev;
- struct i2c_client *client;
-
- dev = bus_find_device_by_of_node(&i2c_bus_type, node);
- if (!dev)
- return NULL;
-
- client = i2c_verify_client(dev);
- if (!client)
- put_device(dev);
-
- return client;
-}
-EXPORT_SYMBOL(of_find_i2c_device_by_node);
-
-/* must call put_device() when done with returned i2c_adapter device */
-struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
-{
- struct device *dev;
- struct i2c_adapter *adapter;
-
- dev = bus_find_device(&i2c_bus_type, NULL, node,
- of_dev_or_parent_node_match);
- if (!dev)
- return NULL;
-
- adapter = i2c_verify_adapter(dev);
- if (!adapter)
- put_device(dev);
-
- return adapter;
-}
-EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
-
-/* must call i2c_put_adapter() when done with returned i2c_adapter device */
-struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node)
-{
- struct i2c_adapter *adapter;
-
- adapter = of_find_i2c_adapter_by_node(node);
- if (!adapter)
- return NULL;
-
- if (!try_module_get(adapter->owner)) {
- put_device(&adapter->dev);
- adapter = NULL;
- }
-
- return adapter;
-}
-EXPORT_SYMBOL(of_get_i2c_adapter_by_node);
-
static const struct of_device_id*
i2c_of_match_device_sysfs(const struct of_device_id *matches,
struct i2c_client *client)
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index ab0adaa130da..107623c4cc14 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -653,12 +653,12 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
int res;
if (dev->type != &i2c_adapter_type)
- return 0;
+ return NOTIFY_DONE;
adap = to_i2c_adapter(dev);
i2c_dev = get_free_i2c_dev(adap);
if (IS_ERR(i2c_dev))
- return PTR_ERR(i2c_dev);
+ return NOTIFY_DONE;
cdev_init(&i2c_dev->cdev, &i2cdev_fops);
i2c_dev->cdev.owner = THIS_MODULE;
@@ -678,11 +678,11 @@ static int i2cdev_attach_adapter(struct device *dev, void *dummy)
goto err_put_i2c_dev;
pr_debug("adapter [%s] registered as minor %d\n", adap->name, adap->nr);
- return 0;
+ return NOTIFY_OK;
err_put_i2c_dev:
put_i2c_dev(i2c_dev, false);
- return res;
+ return NOTIFY_DONE;
}
static int i2cdev_detach_adapter(struct device *dev, void *dummy)
@@ -691,17 +691,17 @@ static int i2cdev_detach_adapter(struct device *dev, void *dummy)
struct i2c_dev *i2c_dev;
if (dev->type != &i2c_adapter_type)
- return 0;
+ return NOTIFY_DONE;
adap = to_i2c_adapter(dev);
i2c_dev = i2c_dev_get_by_minor(adap->nr);
if (!i2c_dev) /* attach_adapter must have failed */
- return 0;
+ return NOTIFY_DONE;
put_i2c_dev(i2c_dev, true);
pr_debug("adapter [%s] unregistered\n", adap->name);
- return 0;
+ return NOTIFY_OK;
}
static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
@@ -716,7 +716,7 @@ static int i2cdev_notifier_call(struct notifier_block *nb, unsigned long action,
return i2cdev_detach_adapter(dev, NULL);
}
- return 0;
+ return NOTIFY_DONE;
}
static struct notifier_block i2cdev_notifier = {
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
index 9762630b917e..1a6a8703dbc3 100644
--- a/drivers/i3c/device.c
+++ b/drivers/i3c/device.c
@@ -78,7 +78,7 @@ EXPORT_SYMBOL_GPL(i3c_device_do_setdasa);
*
* Retrieve I3C dev info.
*/
-void i3c_device_get_info(struct i3c_device *dev,
+void i3c_device_get_info(const struct i3c_device *dev,
struct i3c_device_info *info)
{
if (!info)
@@ -209,18 +209,6 @@ struct device *i3cdev_to_dev(struct i3c_device *i3cdev)
EXPORT_SYMBOL_GPL(i3cdev_to_dev);
/**
- * dev_to_i3cdev() - Returns the I3C device containing @dev
- * @dev: device object
- *
- * Return: a pointer to an I3C device object.
- */
-struct i3c_device *dev_to_i3cdev(struct device *dev)
-{
- return container_of(dev, struct i3c_device, dev);
-}
-EXPORT_SYMBOL_GPL(dev_to_i3cdev);
-
-/**
* i3c_device_match_id() - Returns the i3c_device_id entry matching @i3cdev
* @i3cdev: I3C device
* @id_table: I3C device match table
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index d7e6f6c99aea..7a60e1c5e587 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -273,9 +273,9 @@ static struct attribute *i3c_device_attrs[] = {
};
ATTRIBUTE_GROUPS(i3c_device);
-static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int i3c_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ const struct i3c_device *i3cdev = dev_to_i3cdev(dev);
struct i3c_device_info devinfo;
u16 manuf, part, ext;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index cfeb24d40d37..938c17f25d94 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -168,13 +168,7 @@ static __cpuidle int intel_idle_irq(struct cpuidle_device *dev,
raw_local_irq_enable();
ret = __intel_idle(dev, drv, index);
-
- /*
- * The lockdep hardirqs state may be changed to 'on' with timer
- * tick interrupt followed by __do_softirq(). Use local_irq_disable()
- * to keep the hardirqs state correct.
- */
- local_irq_disable();
+ raw_local_irq_disable();
return ret;
}
@@ -187,12 +181,12 @@ static __cpuidle int intel_idle_ibrs(struct cpuidle_device *dev,
int ret;
if (smt_active)
- wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
ret = __intel_idle(dev, drv, index);
if (smt_active)
- wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
+ native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
return ret;
}
@@ -1430,6 +1424,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &idle_cpu_adl_l),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &idle_cpu_adl_n),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &idle_cpu_spr),
+ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &idle_cpu_spr),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &idle_cpu_knl),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
@@ -1843,6 +1838,9 @@ static bool __init intel_idle_verify_cstate(unsigned int mwait_hint)
return true;
}
+static bool force_irq_on __read_mostly;
+module_param(force_irq_on, bool, 0444);
+
static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
{
int cstate;
@@ -1862,6 +1860,7 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
skx_idle_state_table_update();
break;
case INTEL_FAM6_SAPPHIRERAPIDS_X:
+ case INTEL_FAM6_EMERALDRAPIDS_X:
spr_idle_state_table_update();
break;
case INTEL_FAM6_ALDERLAKE:
@@ -1895,8 +1894,10 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
/* Structure copy. */
drv->states[drv->state_count] = cpuidle_state_table[cstate];
- if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE)
+ if ((cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE) || force_irq_on) {
+ printk("intel_idle: forced intel_idle_irq for state %d\n", cstate);
drv->states[drv->state_count].enter = intel_idle_irq;
+ }
if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS) &&
cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IBRS) {
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 03ac410c162e..b6b45d359f28 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -380,7 +380,7 @@ config IIO_ST_ACCEL_3AXIS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics accelerometers:
- LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
+ LSM303C, LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL,
LNG2DM, LIS3DE, LIS2DE12, LIS2HH12
diff --git a/drivers/iio/accel/bma400.h b/drivers/iio/accel/bma400.h
index 36edbaff4f7f..932358b45f17 100644
--- a/drivers/iio/accel/bma400.h
+++ b/drivers/iio/accel/bma400.h
@@ -141,10 +141,6 @@
#define BMA400_SCALE_MIN 9577
#define BMA400_SCALE_MAX 76617
-#define BMA400_NUM_REGULATORS 2
-#define BMA400_VDD_REGULATOR 0
-#define BMA400_VDDIO_REGULATOR 1
-
extern const struct regmap_config bma400_regmap_config;
int bma400_probe(struct device *dev, struct regmap *regmap, int irq,
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index b612d0146d4d..623f37cbaf50 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -98,7 +98,6 @@ enum bma400_activity {
struct bma400_data {
struct device *dev;
struct regmap *regmap;
- struct regulator_bulk_data regulators[BMA400_NUM_REGULATORS];
struct mutex mutex; /* data register lock */
struct iio_mount_matrix orientation;
enum bma400_power_mode power_mode;
@@ -832,13 +831,6 @@ static void bma400_init_tables(void)
}
}
-static void bma400_regulators_disable(void *data_ptr)
-{
- struct bma400_data *data = data_ptr;
-
- regulator_bulk_disable(ARRAY_SIZE(data->regulators), data->regulators);
-}
-
static void bma400_power_disable(void *data_ptr)
{
struct bma400_data *data = data_ptr;
@@ -868,30 +860,17 @@ static enum iio_modifier bma400_act_to_mod(enum bma400_activity activity)
static int bma400_init(struct bma400_data *data)
{
+ static const char * const regulator_names[] = { "vdd", "vddio" };
unsigned int val;
int ret;
- data->regulators[BMA400_VDD_REGULATOR].supply = "vdd";
- data->regulators[BMA400_VDDIO_REGULATOR].supply = "vddio";
- ret = devm_regulator_bulk_get(data->dev,
- ARRAY_SIZE(data->regulators),
- data->regulators);
+ ret = devm_regulator_bulk_get_enable(data->dev,
+ ARRAY_SIZE(regulator_names),
+ regulator_names);
if (ret)
return dev_err_probe(data->dev, ret, "Failed to get regulators: %d\n",
ret);
- ret = regulator_bulk_enable(ARRAY_SIZE(data->regulators),
- data->regulators);
- if (ret) {
- dev_err(data->dev, "Failed to enable regulators: %d\n",
- ret);
- return ret;
- }
-
- ret = devm_add_action_or_reset(data->dev, bma400_regulators_disable, data);
- if (ret)
- return ret;
-
/* Try to read chip_id register. It must return 0x90. */
ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val);
if (ret) {
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index a2def6f9380a..5eac7ea19993 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -280,6 +280,7 @@ static int accel_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
hid_sensor_convert_timestamp(
&accel_state->common_attributes,
*(int64_t *)raw_data);
+ ret = 0;
break;
default:
break;
diff --git a/drivers/iio/accel/mma9551_core.c b/drivers/iio/accel/mma9551_core.c
index 64ca7d7a9673..b898f865fb87 100644
--- a/drivers/iio/accel/mma9551_core.c
+++ b/drivers/iio/accel/mma9551_core.c
@@ -296,9 +296,12 @@ int mma9551_read_config_word(struct i2c_client *client, u8 app_id,
ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
reg, NULL, 0, (u8 *)&v, 2);
+ if (ret < 0)
+ return ret;
+
*val = be16_to_cpu(v);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_NS(mma9551_read_config_word, IIO_MMA9551);
@@ -354,9 +357,12 @@ int mma9551_read_status_word(struct i2c_client *client, u8 app_id,
ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
reg, NULL, 0, (u8 *)&v, 2);
+ if (ret < 0)
+ return ret;
+
*val = be16_to_cpu(v);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_NS(mma9551_read_status_word, IIO_MMA9551);
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 5b0f54e33d9e..56ed0c776d4a 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -37,6 +37,7 @@
#define LIS2DE12_ACCEL_DEV_NAME "lis2de12"
#define LIS2HH12_ACCEL_DEV_NAME "lis2hh12"
#define LIS302DL_ACCEL_DEV_NAME "lis302dl"
+#define LSM303C_ACCEL_DEV_NAME "lsm303c_accel"
#define SC7A20_ACCEL_DEV_NAME "sc7a20"
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index c8c8eb15c34e..6b8562f684d5 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -929,6 +929,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
.sensors_supported = {
[0] = LIS2HH12_ACCEL_DEV_NAME,
+ [1] = LSM303C_ACCEL_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_accel_16bit_channels,
.odr = {
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 45ee0ddc133c..3f02fd5d5946 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -112,6 +112,10 @@ static const struct of_device_id st_accel_of_match[] = {
.data = LIS302DL_ACCEL_DEV_NAME,
},
{
+ .compatible = "st,lsm303c-accel",
+ .data = LSM303C_ACCEL_DEV_NAME,
+ },
+ {
.compatible = "silan,sc7a20",
.data = SC7A20_ACCEL_DEV_NAME,
},
@@ -151,6 +155,7 @@ static const struct i2c_device_id st_accel_id_table[] = {
{ LIS2DE12_ACCEL_DEV_NAME },
{ LIS2HH12_ACCEL_DEV_NAME },
{ LIS302DL_ACCEL_DEV_NAME },
+ { LSM303C_ACCEL_DEV_NAME },
{ SC7A20_ACCEL_DEV_NAME },
{},
};
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 6c0917750288..5740dc1820bd 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -96,6 +96,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis302dl",
.data = LIS302DL_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,lsm303c-accel",
+ .data = LSM303C_ACCEL_DEV_NAME,
+ },
{}
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
@@ -152,6 +156,7 @@ static const struct spi_device_id st_accel_id_table[] = {
{ LIS3DHH_ACCEL_DEV_NAME },
{ LIS3DE_ACCEL_DEV_NAME },
{ LIS302DL_ACCEL_DEV_NAME },
+ { LSM303C_ACCEL_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_accel_id_table);
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 63f80d747cbd..45af2302be53 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -441,7 +441,8 @@ config ENVELOPE_DETECTOR
config EP93XX_ADC
tristate "Cirrus Logic EP93XX ADC driver"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Driver for the ADC module on the EP93XX series of SoC from Cirrus Logic.
It's recommended to switch on CONFIG_HIGH_RES_TIMERS option, in this
@@ -452,11 +453,11 @@ config EP93XX_ADC
config EXYNOS_ADC
tristate "Exynos ADC driver support"
- depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 || (OF && COMPILE_TEST)
+ depends on ARCH_EXYNOS || ARCH_S3C64XX || ARCH_S5PV210 || (OF && COMPILE_TEST)
depends on HAS_IOMEM
help
- Driver for the ADC block found in the Samsung S3C (S3C2410, S3C2416,
- S3C2440, S3C2443, S3C6410), S5Pv210 and Exynos SoCs.
+ Driver for the ADC block found in the Samsung S3C6410, S5Pv210 and
+ Exynos SoCs.
Choose Y here only if you build for such Samsung SoC.
To compile this driver as a module, choose M here: the module will be
@@ -565,6 +566,16 @@ config IMX8QXP_ADC
This driver can also be built as a module. If so, the module will be
called imx8qxp-adc.
+config IMX93_ADC
+ tristate "IMX93 ADC driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to build support for IMX93 ADC.
+
+ This driver can also be built as a module. If so, the module will be
+ called imx93_adc.
+
config LP8788_ADC
tristate "LP8788 ADC driver"
depends on MFD_LP8788
@@ -1207,6 +1218,17 @@ config TI_ADS1015
This driver can also be built as a module. If so, the module will be
called ti-ads1015.
+config TI_ADS7924
+ tristate "Texas Instruments ADS7924 ADC"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for Texas Instruments ADS7924
+ 4 channels, 12-bit I2C ADC chip.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads7924.
+
config TI_ADS7950
tristate "Texas Instruments ADS7950 ADC driver"
depends on SPI && GPIOLIB
@@ -1274,6 +1296,16 @@ config TI_AM335X_ADC
To compile this driver as a module, choose M here: the module will be
called ti_am335x_adc.
+config TI_LMP92064
+ tristate "Texas Instruments LMP92064 ADC driver"
+ depends on SPI
+ help
+ Say yes here to build support for the LMP92064 Precision Current and Voltage
+ sensor.
+
+ This driver can also be built as a module. If so, the module will be called
+ ti-lmp92064.
+
config TI_TLC4541
tristate "Texas Instruments TLC4541 ADC driver"
depends on SPI
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 4ef41a7dfac6..36c18177322a 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_HI8435) += hi8435.o
obj-$(CONFIG_HX711) += hx711.o
obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
obj-$(CONFIG_IMX8QXP_ADC) += imx8qxp-adc.o
+obj-$(CONFIG_IMX93_ADC) += imx93_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_INGENIC_ADC) += ingenic-adc.o
obj-$(CONFIG_INTEL_MRFLD_ADC) += intel_mrfld_adc.o
@@ -107,12 +108,14 @@ obj-$(CONFIG_TI_ADC108S102) += ti-adc108s102.o
obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
obj-$(CONFIG_TI_ADC161S626) += ti-adc161s626.o
obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
+obj-$(CONFIG_TI_ADS7924) += ti-ads7924.o
obj-$(CONFIG_TI_ADS7950) += ti-ads7950.o
obj-$(CONFIG_TI_ADS8344) += ti-ads8344.o
obj-$(CONFIG_TI_ADS8688) += ti-ads8688.o
obj-$(CONFIG_TI_ADS124S08) += ti-ads124s08.o
obj-$(CONFIG_TI_ADS131E08) += ti-ads131e08.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
+obj-$(CONFIG_TI_LMP92064) += ti-lmp92064.o
obj-$(CONFIG_TI_TLC4541) += ti-tlc4541.o
obj-$(CONFIG_TI_TSC2046) += ti-tsc2046.o
obj-$(CONFIG_TWL4030_MADC) += twl4030-madc.o
diff --git a/drivers/iio/adc/ad7291.c b/drivers/iio/adc/ad7291.c
index 3dd0105f63d7..f9ee189925de 100644
--- a/drivers/iio/adc/ad7291.c
+++ b/drivers/iio/adc/ad7291.c
@@ -179,7 +179,7 @@ static unsigned int ad7291_threshold_reg(const struct iio_chan_spec *chan,
offset = AD7291_VOLTAGE_OFFSET;
break;
default:
- return 0;
+ return 0;
}
switch (info) {
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index ed4f8501bda8..50d02e5fc6fc 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -2181,7 +2181,7 @@ static ssize_t at91_adc_get_fifo_state(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct at91_adc_state *st = iio_priv(indio_dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", !!st->dma_st.dma_chan);
+ return sysfs_emit(buf, "%d\n", !!st->dma_st.dma_chan);
}
static ssize_t at91_adc_get_watermark(struct device *dev,
@@ -2190,7 +2190,7 @@ static ssize_t at91_adc_get_watermark(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct at91_adc_state *st = iio_priv(indio_dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", st->dma_st.watermark);
+ return sysfs_emit(buf, "%d\n", st->dma_st.watermark);
}
static IIO_DEVICE_ATTR(hwfifo_enabled, 0444,
diff --git a/drivers/iio/adc/berlin2-adc.c b/drivers/iio/adc/berlin2-adc.c
index 3d2e8b4db61a..a4e7c7eff5ac 100644
--- a/drivers/iio/adc/berlin2-adc.c
+++ b/drivers/iio/adc/berlin2-adc.c
@@ -298,8 +298,10 @@ static int berlin2_adc_probe(struct platform_device *pdev)
int ret;
indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*priv));
- if (!indio_dev)
+ if (!indio_dev) {
+ of_node_put(parent_np);
return -ENOMEM;
+ }
priv = iio_priv(indio_dev);
diff --git a/drivers/iio/adc/ep93xx_adc.c b/drivers/iio/adc/ep93xx_adc.c
index fd5a9404c8dc..a35e6cead67d 100644
--- a/drivers/iio/adc/ep93xx_adc.c
+++ b/drivers/iio/adc/ep93xx_adc.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
/*
* This code could benefit from real HR Timers, but jiffy granularity would
@@ -227,9 +228,16 @@ static int ep93xx_adc_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ep93xx_adc_of_ids[] = {
+ { .compatible = "cirrus,ep9301-adc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ep93xx_adc_of_ids);
+
static struct platform_driver ep93xx_adc_driver = {
.driver = {
.name = "ep93xx-adc",
+ .of_match_table = ep93xx_adc_of_ids,
},
.probe = ep93xx_adc_probe,
.remove = ep93xx_adc_remove,
diff --git a/drivers/iio/adc/imx8qxp-adc.c b/drivers/iio/adc/imx8qxp-adc.c
index 36777b827165..f5a0fc9e64c5 100644
--- a/drivers/iio/adc/imx8qxp-adc.c
+++ b/drivers/iio/adc/imx8qxp-adc.c
@@ -86,6 +86,8 @@
#define IMX8QXP_ADC_TIMEOUT msecs_to_jiffies(100)
+#define IMX8QXP_ADC_MAX_FIFO_SIZE 16
+
struct imx8qxp_adc {
struct device *dev;
void __iomem *regs;
@@ -95,6 +97,7 @@ struct imx8qxp_adc {
/* Serialise ADC channel reads */
struct mutex lock;
struct completion completion;
+ u32 fifo[IMX8QXP_ADC_MAX_FIFO_SIZE];
};
#define IMX8QXP_ADC_CHAN(_idx) { \
@@ -238,8 +241,7 @@ static int imx8qxp_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
- *val = FIELD_GET(IMX8QXP_ADC_RESFIFO_VAL_MASK,
- readl(adc->regs + IMX8QXP_ADR_ADC_RESFIFO));
+ *val = adc->fifo[0];
mutex_unlock(&adc->lock);
return IIO_VAL_INT;
@@ -265,10 +267,15 @@ static irqreturn_t imx8qxp_adc_isr(int irq, void *dev_id)
{
struct imx8qxp_adc *adc = dev_id;
u32 fifo_count;
+ int i;
fifo_count = FIELD_GET(IMX8QXP_ADC_FCTRL_FCOUNT_MASK,
readl(adc->regs + IMX8QXP_ADR_ADC_FCTRL));
+ for (i = 0; i < fifo_count; i++)
+ adc->fifo[i] = FIELD_GET(IMX8QXP_ADC_RESFIFO_VAL_MASK,
+ readl_relaxed(adc->regs + IMX8QXP_ADR_ADC_RESFIFO));
+
if (fifo_count)
complete(&adc->completion);
diff --git a/drivers/iio/adc/imx93_adc.c b/drivers/iio/adc/imx93_adc.c
new file mode 100644
index 000000000000..a775d2e40567
--- /dev/null
+++ b/drivers/iio/adc/imx93_adc.c
@@ -0,0 +1,484 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * NXP i.MX93 ADC driver
+ *
+ * Copyright 2023 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/iio/iio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#define IMX93_ADC_DRIVER_NAME "imx93-adc"
+
+/* Register map definition */
+#define IMX93_ADC_MCR 0x00
+#define IMX93_ADC_MSR 0x04
+#define IMX93_ADC_ISR 0x10
+#define IMX93_ADC_IMR 0x20
+#define IMX93_ADC_CIMR0 0x24
+#define IMX93_ADC_CTR0 0x94
+#define IMX93_ADC_NCMR0 0xA4
+#define IMX93_ADC_PCDR0 0x100
+#define IMX93_ADC_PCDR1 0x104
+#define IMX93_ADC_PCDR2 0x108
+#define IMX93_ADC_PCDR3 0x10c
+#define IMX93_ADC_PCDR4 0x110
+#define IMX93_ADC_PCDR5 0x114
+#define IMX93_ADC_PCDR6 0x118
+#define IMX93_ADC_PCDR7 0x11c
+#define IMX93_ADC_CALSTAT 0x39C
+
+/* ADC bit shift */
+#define IMX93_ADC_MCR_MODE_MASK BIT(29)
+#define IMX93_ADC_MCR_NSTART_MASK BIT(24)
+#define IMX93_ADC_MCR_CALSTART_MASK BIT(14)
+#define IMX93_ADC_MCR_ADCLKSE_MASK BIT(8)
+#define IMX93_ADC_MCR_PWDN_MASK BIT(0)
+#define IMX93_ADC_MSR_CALFAIL_MASK BIT(30)
+#define IMX93_ADC_MSR_CALBUSY_MASK BIT(29)
+#define IMX93_ADC_MSR_ADCSTATUS_MASK GENMASK(2, 0)
+#define IMX93_ADC_ISR_ECH_MASK BIT(0)
+#define IMX93_ADC_ISR_EOC_MASK BIT(1)
+#define IMX93_ADC_ISR_EOC_ECH_MASK (IMX93_ADC_ISR_EOC_MASK | \
+ IMX93_ADC_ISR_ECH_MASK)
+#define IMX93_ADC_IMR_JEOC_MASK BIT(3)
+#define IMX93_ADC_IMR_JECH_MASK BIT(2)
+#define IMX93_ADC_IMR_EOC_MASK BIT(1)
+#define IMX93_ADC_IMR_ECH_MASK BIT(0)
+#define IMX93_ADC_PCDR_CDATA_MASK GENMASK(11, 0)
+
+/* ADC status */
+#define IMX93_ADC_MSR_ADCSTATUS_IDLE 0
+#define IMX93_ADC_MSR_ADCSTATUS_POWER_DOWN 1
+#define IMX93_ADC_MSR_ADCSTATUS_WAIT_STATE 2
+#define IMX93_ADC_MSR_ADCSTATUS_BUSY_IN_CALIBRATION 3
+#define IMX93_ADC_MSR_ADCSTATUS_SAMPLE 4
+#define IMX93_ADC_MSR_ADCSTATUS_CONVERSION 6
+
+#define IMX93_ADC_TIMEOUT msecs_to_jiffies(100)
+
+struct imx93_adc {
+ struct device *dev;
+ void __iomem *regs;
+ struct clk *ipg_clk;
+ int irq;
+ struct regulator *vref;
+ /* lock to protect against multiple access to the device */
+ struct mutex lock;
+ struct completion completion;
+};
+
+#define IMX93_ADC_CHAN(_idx) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (_idx), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+}
+
+static const struct iio_chan_spec imx93_adc_iio_channels[] = {
+ IMX93_ADC_CHAN(0),
+ IMX93_ADC_CHAN(1),
+ IMX93_ADC_CHAN(2),
+ IMX93_ADC_CHAN(3),
+};
+
+static void imx93_adc_power_down(struct imx93_adc *adc)
+{
+ u32 mcr, msr;
+ int ret;
+
+ mcr = readl(adc->regs + IMX93_ADC_MCR);
+ mcr |= FIELD_PREP(IMX93_ADC_MCR_PWDN_MASK, 1);
+ writel(mcr, adc->regs + IMX93_ADC_MCR);
+
+ ret = readl_poll_timeout(adc->regs + IMX93_ADC_MSR, msr,
+ ((msr & IMX93_ADC_MSR_ADCSTATUS_MASK) ==
+ IMX93_ADC_MSR_ADCSTATUS_POWER_DOWN),
+ 1, 50);
+ if (ret == -ETIMEDOUT)
+ dev_warn(adc->dev,
+ "ADC do not in power down mode, current MSR is %x\n",
+ msr);
+}
+
+static void imx93_adc_power_up(struct imx93_adc *adc)
+{
+ u32 mcr;
+
+ /* bring ADC out of power down state, in idle state */
+ mcr = readl(adc->regs + IMX93_ADC_MCR);
+ mcr &= ~FIELD_PREP(IMX93_ADC_MCR_PWDN_MASK, 1);
+ writel(mcr, adc->regs + IMX93_ADC_MCR);
+}
+
+static void imx93_adc_config_ad_clk(struct imx93_adc *adc)
+{
+ u32 mcr;
+
+ /* put adc in power down mode */
+ imx93_adc_power_down(adc);
+
+ /* config the AD_CLK equal to bus clock */
+ mcr = readl(adc->regs + IMX93_ADC_MCR);
+ mcr |= FIELD_PREP(IMX93_ADC_MCR_ADCLKSE_MASK, 1);
+ writel(mcr, adc->regs + IMX93_ADC_MCR);
+
+ imx93_adc_power_up(adc);
+}
+
+static int imx93_adc_calibration(struct imx93_adc *adc)
+{
+ u32 mcr, msr;
+ int ret;
+
+ /* make sure ADC in power down mode */
+ imx93_adc_power_down(adc);
+
+ /* config SAR controller operating clock */
+ mcr = readl(adc->regs + IMX93_ADC_MCR);
+ mcr &= ~FIELD_PREP(IMX93_ADC_MCR_ADCLKSE_MASK, 1);
+ writel(mcr, adc->regs + IMX93_ADC_MCR);
+
+ imx93_adc_power_up(adc);
+
+ /*
+ * TODO: we use the default TSAMP/NRSMPL/AVGEN in MCR,
+ * can add the setting of these bit if need in future.
+ */
+
+ /* run calibration */
+ mcr = readl(adc->regs + IMX93_ADC_MCR);
+ mcr |= FIELD_PREP(IMX93_ADC_MCR_CALSTART_MASK, 1);
+ writel(mcr, adc->regs + IMX93_ADC_MCR);
+
+ /* wait calibration to be finished */
+ ret = readl_poll_timeout(adc->regs + IMX93_ADC_MSR, msr,
+ !(msr & IMX93_ADC_MSR_CALBUSY_MASK), 1000, 2000000);
+ if (ret == -ETIMEDOUT) {
+ dev_warn(adc->dev, "ADC do not finish calibration in 2 min!\n");
+ imx93_adc_power_down(adc);
+ return ret;
+ }
+
+ /* check whether calbration is success or not */
+ msr = readl(adc->regs + IMX93_ADC_MSR);
+ if (msr & IMX93_ADC_MSR_CALFAIL_MASK) {
+ dev_warn(adc->dev, "ADC calibration failed!\n");
+ imx93_adc_power_down(adc);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int imx93_adc_read_channel_conversion(struct imx93_adc *adc,
+ int channel_number,
+ int *result)
+{
+ u32 channel;
+ u32 imr, mcr, pcda;
+ long ret;
+
+ reinit_completion(&adc->completion);
+
+ /* config channel mask register */
+ channel = 1 << channel_number;
+ writel(channel, adc->regs + IMX93_ADC_NCMR0);
+
+ /* TODO: can config desired sample time in CTRn if need */
+
+ /* config interrupt mask */
+ imr = FIELD_PREP(IMX93_ADC_IMR_EOC_MASK, 1);
+ writel(imr, adc->regs + IMX93_ADC_IMR);
+ writel(channel, adc->regs + IMX93_ADC_CIMR0);
+
+ /* config one-shot mode */
+ mcr = readl(adc->regs + IMX93_ADC_MCR);
+ mcr &= ~FIELD_PREP(IMX93_ADC_MCR_MODE_MASK, 1);
+ writel(mcr, adc->regs + IMX93_ADC_MCR);
+
+ /* start normal conversion */
+ mcr = readl(adc->regs + IMX93_ADC_MCR);
+ mcr |= FIELD_PREP(IMX93_ADC_MCR_NSTART_MASK, 1);
+ writel(mcr, adc->regs + IMX93_ADC_MCR);
+
+ ret = wait_for_completion_interruptible_timeout(&adc->completion,
+ IMX93_ADC_TIMEOUT);
+ if (ret == 0)
+ return -ETIMEDOUT;
+
+ if (ret < 0)
+ return ret;
+
+ pcda = readl(adc->regs + IMX93_ADC_PCDR0 + channel_number * 4);
+
+ *result = FIELD_GET(IMX93_ADC_PCDR_CDATA_MASK, pcda);
+
+ return ret;
+}
+
+static int imx93_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct imx93_adc *adc = iio_priv(indio_dev);
+ struct device *dev = adc->dev;
+ long ret;
+ u32 vref_uv;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ pm_runtime_get_sync(dev);
+ mutex_lock(&adc->lock);
+ ret = imx93_adc_read_channel_conversion(adc, chan->channel, val);
+ mutex_unlock(&adc->lock);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_sync_autosuspend(dev);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = vref_uv = regulator_get_voltage(adc->vref);
+ if (ret < 0)
+ return ret;
+ *val = vref_uv / 1000;
+ *val2 = 12;
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = clk_get_rate(adc->ipg_clk);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t imx93_adc_isr(int irq, void *dev_id)
+{
+ struct imx93_adc *adc = dev_id;
+ u32 isr, eoc, unexpected;
+
+ isr = readl(adc->regs + IMX93_ADC_ISR);
+
+ if (FIELD_GET(IMX93_ADC_ISR_EOC_ECH_MASK, isr)) {
+ eoc = isr & IMX93_ADC_ISR_EOC_ECH_MASK;
+ writel(eoc, adc->regs + IMX93_ADC_ISR);
+ complete(&adc->completion);
+ }
+
+ unexpected = isr & ~IMX93_ADC_ISR_EOC_ECH_MASK;
+ if (unexpected) {
+ writel(unexpected, adc->regs + IMX93_ADC_ISR);
+ dev_err(adc->dev, "Unexpected interrupt 0x%08x.\n", unexpected);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_info imx93_adc_iio_info = {
+ .read_raw = &imx93_adc_read_raw,
+};
+
+static int imx93_adc_probe(struct platform_device *pdev)
+{
+ struct imx93_adc *adc;
+ struct iio_dev *indio_dev;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*adc));
+ if (!indio_dev)
+ return dev_err_probe(dev, -ENOMEM,
+ "Failed allocating iio device\n");
+
+ adc = iio_priv(indio_dev);
+ adc->dev = dev;
+
+ mutex_init(&adc->lock);
+ adc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adc->regs))
+ return dev_err_probe(dev, PTR_ERR(adc->regs),
+ "Failed getting ioremap resource\n");
+
+ /* The third irq is for ADC conversion usage */
+ adc->irq = platform_get_irq(pdev, 2);
+ if (adc->irq < 0)
+ return adc->irq;
+
+ adc->ipg_clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(adc->ipg_clk))
+ return dev_err_probe(dev, PTR_ERR(adc->ipg_clk),
+ "Failed getting clock.\n");
+
+ adc->vref = devm_regulator_get(dev, "vref");
+ if (IS_ERR(adc->vref))
+ return dev_err_probe(dev, PTR_ERR(adc->vref),
+ "Failed getting reference voltage.\n");
+
+ ret = regulator_enable(adc->vref);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to enable reference voltage.\n");
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ init_completion(&adc->completion);
+
+ indio_dev->name = "imx93-adc";
+ indio_dev->info = &imx93_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = imx93_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(imx93_adc_iio_channels);
+
+ ret = clk_prepare_enable(adc->ipg_clk);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "Failed to enable ipg clock.\n");
+ goto error_regulator_disable;
+ }
+
+ ret = request_irq(adc->irq, imx93_adc_isr, 0, IMX93_ADC_DRIVER_NAME, adc);
+ if (ret < 0) {
+ dev_err_probe(dev, ret,
+ "Failed requesting irq, irq = %d\n", adc->irq);
+ goto error_ipg_clk_disable;
+ }
+
+ ret = imx93_adc_calibration(adc);
+ if (ret < 0)
+ goto error_free_adc_irq;
+
+ imx93_adc_config_ad_clk(adc);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "Failed to register this iio device.\n");
+ goto error_adc_power_down;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+
+error_adc_power_down:
+ imx93_adc_power_down(adc);
+error_free_adc_irq:
+ free_irq(adc->irq, adc);
+error_ipg_clk_disable:
+ clk_disable_unprepare(adc->ipg_clk);
+error_regulator_disable:
+ regulator_disable(adc->vref);
+
+ return ret;
+}
+
+static int imx93_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct imx93_adc *adc = iio_priv(indio_dev);
+ struct device *dev = adc->dev;
+
+ /* adc power down need clock on */
+ pm_runtime_get_sync(dev);
+
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_put_noidle(dev);
+
+ iio_device_unregister(indio_dev);
+ imx93_adc_power_down(adc);
+ free_irq(adc->irq, adc);
+ clk_disable_unprepare(adc->ipg_clk);
+ regulator_disable(adc->vref);
+
+ return 0;
+}
+
+static int imx93_adc_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct imx93_adc *adc = iio_priv(indio_dev);
+
+ imx93_adc_power_down(adc);
+ clk_disable_unprepare(adc->ipg_clk);
+ regulator_disable(adc->vref);
+
+ return 0;
+}
+
+static int imx93_adc_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct imx93_adc *adc = iio_priv(indio_dev);
+ int ret;
+
+ ret = regulator_enable(adc->vref);
+ if (ret) {
+ dev_err(dev,
+ "Can't enable adc reference top voltage, err = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(adc->ipg_clk);
+ if (ret) {
+ dev_err(dev, "Could not prepare or enable clock.\n");
+ goto err_disable_reg;
+ }
+
+ imx93_adc_power_up(adc);
+
+ return 0;
+
+err_disable_reg:
+ regulator_disable(adc->vref);
+
+ return ret;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(imx93_adc_pm_ops,
+ imx93_adc_runtime_suspend,
+ imx93_adc_runtime_resume, NULL);
+
+static const struct of_device_id imx93_adc_match[] = {
+ { .compatible = "nxp,imx93-adc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx93_adc_match);
+
+static struct platform_driver imx93_adc_driver = {
+ .probe = imx93_adc_probe,
+ .remove = imx93_adc_remove,
+ .driver = {
+ .name = IMX93_ADC_DRIVER_NAME,
+ .of_match_table = imx93_adc_match,
+ .pm = pm_ptr(&imx93_adc_pm_ops),
+ },
+};
+
+module_platform_driver(imx93_adc_driver);
+
+MODULE_DESCRIPTION("NXP i.MX93 ADC driver");
+MODULE_AUTHOR("Haibo Chen <haibo.chen@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/max11410.c b/drivers/iio/adc/max11410.c
index fdc9f03135b5..b74b689ee7de 100644
--- a/drivers/iio/adc/max11410.c
+++ b/drivers/iio/adc/max11410.c
@@ -4,7 +4,6 @@
*
* Copyright 2022 Analog Devices Inc.
*/
-#include <asm-generic/unaligned.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -16,6 +15,8 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <asm/unaligned.h>
+
#include <linux/iio/buffer.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index 821fee60a765..e90c299c913a 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -543,6 +543,8 @@ static const struct adc5_channels adc5_chans_pmic[ADC5_MAX_CHANNEL] = {
SCALE_HW_CALIB_DEFAULT)
[ADC5_XO_THERM_100K_PU] = ADC5_CHAN_TEMP("xo_therm", 0,
SCALE_HW_CALIB_XOTHERM)
+ [ADC5_BAT_ID_100K_PU] = ADC5_CHAN_TEMP("bat_id", 0,
+ SCALE_HW_CALIB_DEFAULT)
[ADC5_AMUX_THM1_100K_PU] = ADC5_CHAN_TEMP("amux_thm1_100k_pu", 0,
SCALE_HW_CALIB_THERM_100K_PULLUP)
[ADC5_AMUX_THM2_100K_PU] = ADC5_CHAN_TEMP("amux_thm2_100k_pu", 0,
@@ -894,10 +896,8 @@ static int adc5_probe(struct platform_device *pdev)
mutex_init(&adc->lock);
ret = adc5_get_fw_data(adc);
- if (ret) {
- dev_err(dev, "adc get dt data failed\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "adc get dt data failed\n");
irq_eoc = platform_get_irq(pdev, 0);
if (irq_eoc < 0) {
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index 6d21ea84fa82..a428bdb567d5 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1520,6 +1520,7 @@ static const struct of_device_id stm32_dfsdm_adc_match[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, stm32_dfsdm_adc_match);
static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
{
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index a3d4de6ba4c2..0362df285a57 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -6,6 +6,7 @@
* Author(s): Arnaud Pouliquen <arnaud.pouliquen@st.com> for STMicroelectronics.
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -19,7 +20,15 @@
#include "stm32-dfsdm.h"
+/**
+ * struct stm32_dfsdm_dev_data - DFSDM compatible configuration data
+ * @ipid: DFSDM identification number. Used only if hardware provides identification registers
+ * @num_filters: DFSDM number of filters. Unused if identification registers are available
+ * @num_channels: DFSDM number of channels. Unused if identification registers are available
+ * @regmap_cfg: SAI register map configuration pointer
+ */
struct stm32_dfsdm_dev_data {
+ u32 ipid;
unsigned int num_filters;
unsigned int num_channels;
const struct regmap_config *regmap_cfg;
@@ -27,8 +36,6 @@ struct stm32_dfsdm_dev_data {
#define STM32H7_DFSDM_NUM_FILTERS 4
#define STM32H7_DFSDM_NUM_CHANNELS 8
-#define STM32MP1_DFSDM_NUM_FILTERS 6
-#define STM32MP1_DFSDM_NUM_CHANNELS 8
static bool stm32_dfsdm_volatile_reg(struct device *dev, unsigned int reg)
{
@@ -75,8 +82,7 @@ static const struct regmap_config stm32mp1_dfsdm_regmap_cfg = {
};
static const struct stm32_dfsdm_dev_data stm32mp1_dfsdm_data = {
- .num_filters = STM32MP1_DFSDM_NUM_FILTERS,
- .num_channels = STM32MP1_DFSDM_NUM_CHANNELS,
+ .ipid = STM32MP15_IPIDR_NUMBER,
.regmap_cfg = &stm32mp1_dfsdm_regmap_cfg,
};
@@ -295,6 +301,65 @@ static const struct of_device_id stm32_dfsdm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, stm32_dfsdm_of_match);
+static int stm32_dfsdm_probe_identification(struct platform_device *pdev,
+ struct dfsdm_priv *priv,
+ const struct stm32_dfsdm_dev_data *dev_data)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *child;
+ struct stm32_dfsdm *dfsdm = &priv->dfsdm;
+ const char *compat;
+ int ret, count = 0;
+ u32 id, val;
+
+ if (!dev_data->ipid) {
+ dfsdm->num_fls = dev_data->num_filters;
+ dfsdm->num_chs = dev_data->num_channels;
+ return 0;
+ }
+
+ ret = regmap_read(dfsdm->regmap, DFSDM_IPIDR, &id);
+ if (ret)
+ return ret;
+
+ if (id != dev_data->ipid) {
+ dev_err(&pdev->dev, "Unexpected IP version: 0x%x", id);
+ return -EINVAL;
+ }
+
+ for_each_child_of_node(np, child) {
+ ret = of_property_read_string(child, "compatible", &compat);
+ if (ret)
+ continue;
+ /* Count only child nodes with dfsdm compatible */
+ if (strstr(compat, "dfsdm"))
+ count++;
+ }
+
+ ret = regmap_read(dfsdm->regmap, DFSDM_HWCFGR, &val);
+ if (ret)
+ return ret;
+
+ dfsdm->num_fls = FIELD_GET(DFSDM_HWCFGR_NBF_MASK, val);
+ dfsdm->num_chs = FIELD_GET(DFSDM_HWCFGR_NBT_MASK, val);
+
+ if (count > dfsdm->num_fls) {
+ dev_err(&pdev->dev, "Unexpected child number: %d", count);
+ return -EINVAL;
+ }
+
+ ret = regmap_read(dfsdm->regmap, DFSDM_VERR, &val);
+ if (ret)
+ return ret;
+
+ dev_dbg(&pdev->dev, "DFSDM version: %lu.%lu. %d channels/%d filters\n",
+ FIELD_GET(DFSDM_VERR_MAJREV_MASK, val),
+ FIELD_GET(DFSDM_VERR_MINREV_MASK, val),
+ dfsdm->num_chs, dfsdm->num_fls);
+
+ return 0;
+}
+
static int stm32_dfsdm_probe(struct platform_device *pdev)
{
struct dfsdm_priv *priv;
@@ -311,18 +376,6 @@ static int stm32_dfsdm_probe(struct platform_device *pdev)
dev_data = of_device_get_match_data(&pdev->dev);
dfsdm = &priv->dfsdm;
- dfsdm->fl_list = devm_kcalloc(&pdev->dev, dev_data->num_filters,
- sizeof(*dfsdm->fl_list), GFP_KERNEL);
- if (!dfsdm->fl_list)
- return -ENOMEM;
-
- dfsdm->num_fls = dev_data->num_filters;
- dfsdm->ch_list = devm_kcalloc(&pdev->dev, dev_data->num_channels,
- sizeof(*dfsdm->ch_list),
- GFP_KERNEL);
- if (!dfsdm->ch_list)
- return -ENOMEM;
- dfsdm->num_chs = dev_data->num_channels;
ret = stm32_dfsdm_parse_of(pdev, priv);
if (ret < 0)
@@ -338,6 +391,20 @@ static int stm32_dfsdm_probe(struct platform_device *pdev)
return ret;
}
+ ret = stm32_dfsdm_probe_identification(pdev, priv, dev_data);
+ if (ret < 0)
+ return ret;
+
+ dfsdm->fl_list = devm_kcalloc(&pdev->dev, dfsdm->num_fls,
+ sizeof(*dfsdm->fl_list), GFP_KERNEL);
+ if (!dfsdm->fl_list)
+ return -ENOMEM;
+
+ dfsdm->ch_list = devm_kcalloc(&pdev->dev, dfsdm->num_chs,
+ sizeof(*dfsdm->ch_list), GFP_KERNEL);
+ if (!dfsdm->ch_list)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, dfsdm);
ret = stm32_dfsdm_clk_prepare_enable(dfsdm);
diff --git a/drivers/iio/adc/stm32-dfsdm.h b/drivers/iio/adc/stm32-dfsdm.h
index 4afc1f528b78..570a1552aec4 100644
--- a/drivers/iio/adc/stm32-dfsdm.h
+++ b/drivers/iio/adc/stm32-dfsdm.h
@@ -13,25 +13,29 @@
/*
* STM32 DFSDM - global register map
- * ________________________________________________________
- * | Offset | Registers block |
- * --------------------------------------------------------
- * | 0x000 | CHANNEL 0 + COMMON CHANNEL FIELDS |
- * --------------------------------------------------------
- * | 0x020 | CHANNEL 1 |
- * --------------------------------------------------------
- * | ... | ..... |
- * --------------------------------------------------------
- * | 0x0E0 | CHANNEL 7 |
- * --------------------------------------------------------
- * | 0x100 | FILTER 0 + COMMON FILTER FIELDs |
- * --------------------------------------------------------
- * | 0x200 | FILTER 1 |
- * --------------------------------------------------------
- * | 0x300 | FILTER 2 |
- * --------------------------------------------------------
- * | 0x400 | FILTER 3 |
- * --------------------------------------------------------
+ * __________________________________________________________
+ * | Offset | Registers block |
+ * ----------------------------------------------------------
+ * | 0x000 | CHANNEL 0 + COMMON CHANNEL FIELDS |
+ * ----------------------------------------------------------
+ * | 0x020 | CHANNEL 1 |
+ * ----------------------------------------------------------
+ * | ... | ..... |
+ * ----------------------------------------------------------
+ * | 0x20 x n | CHANNEL n |
+ * ----------------------------------------------------------
+ * | 0x100 | FILTER 0 + COMMON FILTER FIELDs |
+ * ----------------------------------------------------------
+ * | 0x200 | FILTER 1 |
+ * ----------------------------------------------------------
+ * | | ..... |
+ * ----------------------------------------------------------
+ * | 0x100 x m | FILTER m |
+ * ----------------------------------------------------------
+ * | | ..... |
+ * ----------------------------------------------------------
+ * | 0x7F0-7FC | Identification registers |
+ * ----------------------------------------------------------
*/
/*
@@ -231,6 +235,24 @@
#define DFSDM_AWCFR_AWHTF_MASK GENMASK(15, 8)
#define DFSDM_AWCFR_AWHTF(v) FIELD_PREP(DFSDM_AWCFR_AWHTF_MASK, v)
+/*
+ * Identification register definitions
+ */
+#define DFSDM_HWCFGR 0x7F0
+#define DFSDM_VERR 0x7F4
+#define DFSDM_IPIDR 0x7F8
+#define DFSDM_SIDR 0x7FC
+
+/* HWCFGR: Hardware configuration register */
+#define DFSDM_HWCFGR_NBT_MASK GENMASK(7, 0)
+#define DFSDM_HWCFGR_NBF_MASK GENMASK(15, 8)
+
+/* VERR: Version register */
+#define DFSDM_VERR_MINREV_MASK GENMASK(3, 0)
+#define DFSDM_VERR_MAJREV_MASK GENMASK(7, 4)
+
+#define STM32MP15_IPIDR_NUMBER 0x00110031
+
/* DFSDM filter order */
enum stm32_dfsdm_sinc_order {
DFSDM_FASTSINC_ORDER, /* FastSinc filter type */
diff --git a/drivers/iio/adc/ti-adc128s052.c b/drivers/iio/adc/ti-adc128s052.c
index b3d5b9b7255b..a456ea78462f 100644
--- a/drivers/iio/adc/ti-adc128s052.c
+++ b/drivers/iio/adc/ti-adc128s052.c
@@ -9,14 +9,13 @@
* https://www.ti.com/lit/ds/symlink/adc124s021.pdf
*/
-#include <linux/acpi.h>
#include <linux/err.h>
-#include <linux/spi/spi.h>
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
struct adc128_configuration {
const struct iio_chan_spec *channels;
@@ -139,16 +138,11 @@ static void adc128_disable_regulator(void *reg)
static int adc128_probe(struct spi_device *spi)
{
+ const struct adc128_configuration *config;
struct iio_dev *indio_dev;
- unsigned int config;
struct adc128 *adc;
int ret;
- if (dev_fwnode(&spi->dev))
- config = (unsigned long) device_get_match_data(&spi->dev);
- else
- config = spi_get_device_id(spi)->driver_data;
-
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
if (!indio_dev)
return -ENOMEM;
@@ -160,8 +154,10 @@ static int adc128_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &adc128_info;
- indio_dev->channels = adc128_config[config].channels;
- indio_dev->num_channels = adc128_config[config].num_channels;
+ config = spi_get_device_match_data(spi);
+
+ indio_dev->channels = config->channels;
+ indio_dev->num_channels = config->num_channels;
adc->reg = devm_regulator_get(&spi->dev, "vref");
if (IS_ERR(adc->reg))
@@ -181,42 +177,40 @@ static int adc128_probe(struct spi_device *spi)
}
static const struct of_device_id adc128_of_match[] = {
- { .compatible = "ti,adc128s052", .data = (void*)0L, },
- { .compatible = "ti,adc122s021", .data = (void*)1L, },
- { .compatible = "ti,adc122s051", .data = (void*)1L, },
- { .compatible = "ti,adc122s101", .data = (void*)1L, },
- { .compatible = "ti,adc124s021", .data = (void*)2L, },
- { .compatible = "ti,adc124s051", .data = (void*)2L, },
- { .compatible = "ti,adc124s101", .data = (void*)2L, },
+ { .compatible = "ti,adc128s052", .data = &adc128_config[0] },
+ { .compatible = "ti,adc122s021", .data = &adc128_config[1] },
+ { .compatible = "ti,adc122s051", .data = &adc128_config[1] },
+ { .compatible = "ti,adc122s101", .data = &adc128_config[1] },
+ { .compatible = "ti,adc124s021", .data = &adc128_config[2] },
+ { .compatible = "ti,adc124s051", .data = &adc128_config[2] },
+ { .compatible = "ti,adc124s101", .data = &adc128_config[2] },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, adc128_of_match);
static const struct spi_device_id adc128_id[] = {
- { "adc128s052", 0 }, /* index into adc128_config */
- { "adc122s021", 1 },
- { "adc122s051", 1 },
- { "adc122s101", 1 },
- { "adc124s021", 2 },
- { "adc124s051", 2 },
- { "adc124s101", 2 },
+ { "adc128s052", (kernel_ulong_t)&adc128_config[0] },
+ { "adc122s021", (kernel_ulong_t)&adc128_config[1] },
+ { "adc122s051", (kernel_ulong_t)&adc128_config[1] },
+ { "adc122s101", (kernel_ulong_t)&adc128_config[1] },
+ { "adc124s021", (kernel_ulong_t)&adc128_config[2] },
+ { "adc124s051", (kernel_ulong_t)&adc128_config[2] },
+ { "adc124s101", (kernel_ulong_t)&adc128_config[2] },
{ }
};
MODULE_DEVICE_TABLE(spi, adc128_id);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id adc128_acpi_match[] = {
- { "AANT1280", 2 }, /* ADC124S021 compatible ACPI ID */
+ { "AANT1280", (kernel_ulong_t)&adc128_config[2] },
{ }
};
MODULE_DEVICE_TABLE(acpi, adc128_acpi_match);
-#endif
static struct spi_driver adc128_driver = {
.driver = {
.name = "adc128s052",
.of_match_table = adc128_of_match,
- .acpi_match_table = ACPI_PTR(adc128_acpi_match),
+ .acpi_match_table = adc128_acpi_match,
},
.probe = adc128_probe,
.id_table = adc128_id,
diff --git a/drivers/iio/adc/ti-ads7924.c b/drivers/iio/adc/ti-ads7924.c
new file mode 100644
index 000000000000..b02abb026966
--- /dev/null
+++ b/drivers/iio/adc/ti-ads7924.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * IIO driver for Texas Instruments ADS7924 ADC, 12-bit, 4-Channels, I2C
+ *
+ * Author: Hugo Villeneuve <hvilleneuve@dimonoff.com>
+ * Copyright 2022 DimOnOff
+ *
+ * based on iio/adc/ti-ads1015.c
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * Datasheet: https://www.ti.com/lit/gpn/ads7924
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+#define ADS7924_CHANNELS 4
+#define ADS7924_BITS 12
+#define ADS7924_DATA_SHIFT 4
+
+/* Registers. */
+#define ADS7924_MODECNTRL_REG 0x00
+#define ADS7924_INTCNTRL_REG 0x01
+#define ADS7924_DATA0_U_REG 0x02
+#define ADS7924_DATA0_L_REG 0x03
+#define ADS7924_DATA1_U_REG 0x04
+#define ADS7924_DATA1_L_REG 0x05
+#define ADS7924_DATA2_U_REG 0x06
+#define ADS7924_DATA2_L_REG 0x07
+#define ADS7924_DATA3_U_REG 0x08
+#define ADS7924_DATA3_L_REG 0x09
+#define ADS7924_ULR0_REG 0x0A
+#define ADS7924_LLR0_REG 0x0B
+#define ADS7924_ULR1_REG 0x0C
+#define ADS7924_LLR1_REG 0x0D
+#define ADS7924_ULR2_REG 0x0E
+#define ADS7924_LLR2_REG 0x0F
+#define ADS7924_ULR3_REG 0x10
+#define ADS7924_LLR3_REG 0x11
+#define ADS7924_INTCONFIG_REG 0x12
+#define ADS7924_SLPCONFIG_REG 0x13
+#define ADS7924_ACQCONFIG_REG 0x14
+#define ADS7924_PWRCONFIG_REG 0x15
+#define ADS7924_RESET_REG 0x16
+
+/*
+ * Register address INC bit: when set to '1', the register address is
+ * automatically incremented after every register read which allows convenient
+ * reading of multiple registers. Set INC to '0' when reading a single register.
+ */
+#define ADS7924_AUTO_INCREMENT_BIT BIT(7)
+
+#define ADS7924_MODECNTRL_MODE_MASK GENMASK(7, 2)
+
+#define ADS7924_MODECNTRL_SEL_MASK GENMASK(1, 0)
+
+#define ADS7924_CFG_INTPOL_BIT 1
+#define ADS7924_CFG_INTTRIG_BIT 0
+
+#define ADS7924_CFG_INTPOL_MASK BIT(ADS7924_CFG_INTPOL_BIT)
+#define ADS7924_CFG_INTTRIG_MASK BIT(ADS7924_CFG_INTTRIG_BIT)
+
+/* Interrupt pin polarity */
+#define ADS7924_CFG_INTPOL_LOW 0
+#define ADS7924_CFG_INTPOL_HIGH 1
+
+/* Interrupt pin signaling */
+#define ADS7924_CFG_INTTRIG_LEVEL 0
+#define ADS7924_CFG_INTTRIG_EDGE 1
+
+/* Mode control values */
+#define ADS7924_MODECNTRL_IDLE 0x00
+#define ADS7924_MODECNTRL_AWAKE 0x20
+#define ADS7924_MODECNTRL_MANUAL_SINGLE 0x30
+#define ADS7924_MODECNTRL_MANUAL_SCAN 0x32
+#define ADS7924_MODECNTRL_AUTO_SINGLE 0x31
+#define ADS7924_MODECNTRL_AUTO_SCAN 0x33
+#define ADS7924_MODECNTRL_AUTO_SINGLE_SLEEP 0x39
+#define ADS7924_MODECNTRL_AUTO_SCAN_SLEEP 0x3B
+#define ADS7924_MODECNTRL_AUTO_BURST_SLEEP 0x3F
+
+#define ADS7924_ACQTIME_MASK GENMASK(4, 0)
+
+#define ADS7924_PWRUPTIME_MASK GENMASK(4, 0)
+
+/*
+ * The power-up time is allowed to elapse whenever the device has been shutdown
+ * in idle mode. Power-up time can allow external circuits, such as an
+ * operational amplifier, between the MUXOUT and ADCIN pins to turn on.
+ * The nominal time programmed by the PUTIME[4:0] register bits is given by:
+ * t PU = PWRUPTIME[4:0] × 2 μs
+ * If a power-up time is not required, set the bits to '0' to effectively bypass.
+ */
+#define ADS7924_PWRUPTIME_US 0 /* Bypass (0us). */
+
+/*
+ * Acquisition Time according to ACQTIME[4:0] register bits.
+ * The Acquisition Time is given by:
+ * t ACQ = (ACQTIME[4:0] × 2 μs) + 6 μs
+ * Using default value of 0 for ACQTIME[4:0] results in a minimum acquisition
+ * time of 6us.
+ */
+#define ADS7924_ACQTIME_US 6
+
+/* The conversion time is always 4μs and cannot be programmed by the user. */
+#define ADS7924_CONVTIME_US 4
+
+#define ADS7924_TOTAL_CONVTIME_US (ADS7924_PWRUPTIME_US + ADS7924_ACQTIME_US + \
+ ADS7924_CONVTIME_US)
+
+#define ADS7924_V_CHAN(_chan, _addr) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _chan, \
+ .address = _addr, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = "AIN"#_chan, \
+}
+
+struct ads7924_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regulator *vref_reg;
+
+ /* GPIO descriptor for device hard-reset pin. */
+ struct gpio_desc *reset_gpio;
+
+ /*
+ * Protects ADC ops, e.g: concurrent sysfs/buffered
+ * data reads, configuration updates
+ */
+ struct mutex lock;
+
+ /*
+ * Set to true when the ADC is switched to the continuous-conversion
+ * mode and exits from a power-down state. This flag is used to avoid
+ * getting the stale result from the conversion register.
+ */
+ bool conv_invalid;
+};
+
+static bool ads7924_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case ADS7924_MODECNTRL_REG:
+ case ADS7924_INTCNTRL_REG:
+ case ADS7924_ULR0_REG:
+ case ADS7924_LLR0_REG:
+ case ADS7924_ULR1_REG:
+ case ADS7924_LLR1_REG:
+ case ADS7924_ULR2_REG:
+ case ADS7924_LLR2_REG:
+ case ADS7924_ULR3_REG:
+ case ADS7924_LLR3_REG:
+ case ADS7924_INTCONFIG_REG:
+ case ADS7924_SLPCONFIG_REG:
+ case ADS7924_ACQCONFIG_REG:
+ case ADS7924_PWRCONFIG_REG:
+ case ADS7924_RESET_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config ads7924_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ADS7924_RESET_REG,
+ .writeable_reg = ads7924_is_writeable_reg,
+};
+
+static const struct iio_chan_spec ads7924_channels[] = {
+ ADS7924_V_CHAN(0, ADS7924_DATA0_U_REG),
+ ADS7924_V_CHAN(1, ADS7924_DATA1_U_REG),
+ ADS7924_V_CHAN(2, ADS7924_DATA2_U_REG),
+ ADS7924_V_CHAN(3, ADS7924_DATA3_U_REG),
+};
+
+static int ads7924_get_adc_result(struct ads7924_data *data,
+ struct iio_chan_spec const *chan, int *val)
+{
+ int ret;
+ __be16 be_val;
+
+ if (chan->channel < 0 || chan->channel >= ADS7924_CHANNELS)
+ return -EINVAL;
+
+ if (data->conv_invalid) {
+ int conv_time;
+
+ conv_time = ADS7924_TOTAL_CONVTIME_US;
+ /* Allow 10% for internal clock inaccuracy. */
+ conv_time += conv_time / 10;
+ usleep_range(conv_time, conv_time + 1);
+ data->conv_invalid = false;
+ }
+
+ ret = regmap_raw_read(data->regmap, ADS7924_AUTO_INCREMENT_BIT |
+ chan->address, &be_val, sizeof(be_val));
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(be_val) >> ADS7924_DATA_SHIFT;
+
+ return 0;
+}
+
+static int ads7924_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret, vref_uv;
+ struct ads7924_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->lock);
+ ret = ads7924_get_adc_result(data, chan, val);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ vref_uv = regulator_get_voltage(data->vref_reg);
+ if (vref_uv < 0)
+ return vref_uv;
+
+ *val = vref_uv / 1000; /* Convert reg voltage to mV */
+ *val2 = ADS7924_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ads7924_info = {
+ .read_raw = ads7924_read_raw,
+};
+
+static int ads7924_get_channels_config(struct i2c_client *client,
+ struct iio_dev *indio_dev)
+{
+ struct ads7924_data *priv = iio_priv(indio_dev);
+ struct device *dev = priv->dev;
+ struct fwnode_handle *node;
+ int num_channels = 0;
+
+ device_for_each_child_node(dev, node) {
+ u32 pval;
+ unsigned int channel;
+
+ if (fwnode_property_read_u32(node, "reg", &pval)) {
+ dev_err(dev, "invalid reg on %pfw\n", node);
+ continue;
+ }
+
+ channel = pval;
+ if (channel >= ADS7924_CHANNELS) {
+ dev_err(dev, "invalid channel index %d on %pfw\n",
+ channel, node);
+ continue;
+ }
+
+ num_channels++;
+ }
+
+ if (!num_channels)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ads7924_set_conv_mode(struct ads7924_data *data, int mode)
+{
+ int ret;
+ unsigned int mode_field;
+ struct device *dev = data->dev;
+
+ /*
+ * When switching between modes, be sure to first select the Awake mode
+ * and then switch to the desired mode. This procedure ensures the
+ * internal control logic is properly synchronized.
+ */
+ if (mode != ADS7924_MODECNTRL_IDLE) {
+ mode_field = FIELD_PREP(ADS7924_MODECNTRL_MODE_MASK,
+ ADS7924_MODECNTRL_AWAKE);
+
+ ret = regmap_update_bits(data->regmap, ADS7924_MODECNTRL_REG,
+ ADS7924_MODECNTRL_MODE_MASK,
+ mode_field);
+ if (ret) {
+ dev_err(dev, "failed to set awake mode (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ mode_field = FIELD_PREP(ADS7924_MODECNTRL_MODE_MASK, mode);
+
+ ret = regmap_update_bits(data->regmap, ADS7924_MODECNTRL_REG,
+ ADS7924_MODECNTRL_MODE_MASK, mode_field);
+ if (ret)
+ dev_err(dev, "failed to set mode %d (%pe)\n", mode,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static int ads7924_reset(struct iio_dev *indio_dev)
+{
+ struct ads7924_data *data = iio_priv(indio_dev);
+
+ if (data->reset_gpio) {
+ gpiod_set_value(data->reset_gpio, 1); /* Assert. */
+ /* Educated guess: assert time not specified in datasheet... */
+ mdelay(100);
+ gpiod_set_value(data->reset_gpio, 0); /* Deassert. */
+ return 0;
+ }
+
+ /*
+ * A write of 10101010 to this register will generate a
+ * software reset of the ADS7924.
+ */
+ return regmap_write(data->regmap, ADS7924_RESET_REG, 0b10101010);
+};
+
+static void ads7924_reg_disable(void *data)
+{
+ regulator_disable(data);
+}
+
+static void ads7924_set_idle_mode(void *data)
+{
+ ads7924_set_conv_mode(data, ADS7924_MODECNTRL_IDLE);
+}
+
+static int ads7924_probe(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev;
+ struct ads7924_data *data;
+ struct device *dev = &client->dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to allocate iio device\n");
+
+ data = iio_priv(indio_dev);
+
+ data->dev = dev;
+
+ /* Initialize the reset GPIO as output with an initial value of 0. */
+ data->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(data->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(data->reset_gpio),
+ "failed to get request reset GPIO\n");
+
+ mutex_init(&data->lock);
+
+ indio_dev->name = "ads7924";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ indio_dev->channels = ads7924_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ads7924_channels);
+ indio_dev->info = &ads7924_info;
+
+ ret = ads7924_get_channels_config(client, indio_dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to get channels configuration\n");
+
+ data->regmap = devm_regmap_init_i2c(client, &ads7924_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "failed to init regmap\n");
+
+ data->vref_reg = devm_regulator_get(dev, "vref");
+ if (IS_ERR(data->vref_reg))
+ return dev_err_probe(dev, PTR_ERR(data->vref_reg),
+ "failed to get vref regulator\n");
+
+ ret = regulator_enable(data->vref_reg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to enable regulator\n");
+
+ ret = devm_add_action_or_reset(dev, ads7924_reg_disable, data->vref_reg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to add regulator disable action\n");
+
+ ret = ads7924_reset(indio_dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to reset device\n");
+
+ ret = ads7924_set_conv_mode(data, ADS7924_MODECNTRL_AUTO_SCAN);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to set conversion mode\n");
+
+ ret = devm_add_action_or_reset(dev, ads7924_set_idle_mode, data);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to add idle mode action\n");
+
+ /* Use minimum signal acquire time. */
+ ret = regmap_update_bits(data->regmap, ADS7924_ACQCONFIG_REG,
+ ADS7924_ACQTIME_MASK,
+ FIELD_PREP(ADS7924_ACQTIME_MASK, 0));
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to configure signal acquire time\n");
+
+ /* Disable power-up time. */
+ ret = regmap_update_bits(data->regmap, ADS7924_PWRCONFIG_REG,
+ ADS7924_PWRUPTIME_MASK,
+ FIELD_PREP(ADS7924_PWRUPTIME_MASK, 0));
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to configure power-up time\n");
+
+ data->conv_invalid = true;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "failed to register IIO device\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id ads7924_id[] = {
+ { "ads7924", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ads7924_id);
+
+static const struct of_device_id ads7924_of_match[] = {
+ { .compatible = "ti,ads7924", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ads7924_of_match);
+
+static struct i2c_driver ads7924_driver = {
+ .driver = {
+ .name = "ads7924",
+ .of_match_table = ads7924_of_match,
+ },
+ .probe_new = ads7924_probe,
+ .id_table = ads7924_id,
+};
+
+module_i2c_driver(ads7924_driver);
+
+MODULE_AUTHOR("Hugo Villeneuve <hvilleneuve@dimonoff.com>");
+MODULE_DESCRIPTION("Texas Instruments ADS7924 ADC I2C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ti-lmp92064.c b/drivers/iio/adc/ti-lmp92064.c
new file mode 100644
index 000000000000..c30ed824924f
--- /dev/null
+++ b/drivers/iio/adc/ti-lmp92064.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments LMP92064 SPI ADC driver
+ *
+ * Copyright (c) 2022 Leonard Göhrs <kernel@pengutronix.de>, Pengutronix
+ *
+ * Based on linux/drivers/iio/adc/ti-tsc2046.c
+ * Copyright (c) 2021 Oleksij Rempel <kernel@pengutronix.de>, Pengutronix
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+
+#define TI_LMP92064_REG_CONFIG_A 0x0000
+#define TI_LMP92064_REG_CONFIG_B 0x0001
+#define TI_LMP92064_REG_CHIP_REV 0x0006
+
+#define TI_LMP92064_REG_MFR_ID1 0x000C
+#define TI_LMP92064_REG_MFR_ID2 0x000D
+
+#define TI_LMP92064_REG_REG_UPDATE 0x000F
+#define TI_LMP92064_REG_CONFIG_REG 0x0100
+#define TI_LMP92064_REG_STATUS 0x0103
+
+#define TI_LMP92064_REG_DATA_VOUT_LSB 0x0200
+#define TI_LMP92064_REG_DATA_VOUT_MSB 0x0201
+#define TI_LMP92064_REG_DATA_COUT_LSB 0x0202
+#define TI_LMP92064_REG_DATA_COUT_MSB 0x0203
+
+#define TI_LMP92064_VAL_CONFIG_A 0x99
+#define TI_LMP92064_VAL_CONFIG_B 0x00
+#define TI_LMP92064_VAL_STATUS_OK 0x01
+
+/*
+ * Channel number definitions for the two channels of the device
+ * - IN Current (INC)
+ * - IN Voltage (INV)
+ */
+#define TI_LMP92064_CHAN_INC 0
+#define TI_LMP92064_CHAN_INV 1
+
+static const struct regmap_range lmp92064_readable_reg_ranges[] = {
+ regmap_reg_range(TI_LMP92064_REG_CONFIG_A, TI_LMP92064_REG_CHIP_REV),
+ regmap_reg_range(TI_LMP92064_REG_MFR_ID1, TI_LMP92064_REG_MFR_ID2),
+ regmap_reg_range(TI_LMP92064_REG_REG_UPDATE, TI_LMP92064_REG_REG_UPDATE),
+ regmap_reg_range(TI_LMP92064_REG_CONFIG_REG, TI_LMP92064_REG_CONFIG_REG),
+ regmap_reg_range(TI_LMP92064_REG_STATUS, TI_LMP92064_REG_STATUS),
+ regmap_reg_range(TI_LMP92064_REG_DATA_VOUT_LSB, TI_LMP92064_REG_DATA_COUT_MSB),
+};
+
+static const struct regmap_access_table lmp92064_readable_regs = {
+ .yes_ranges = lmp92064_readable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(lmp92064_readable_reg_ranges),
+};
+
+static const struct regmap_range lmp92064_writable_reg_ranges[] = {
+ regmap_reg_range(TI_LMP92064_REG_CONFIG_A, TI_LMP92064_REG_CONFIG_B),
+ regmap_reg_range(TI_LMP92064_REG_REG_UPDATE, TI_LMP92064_REG_REG_UPDATE),
+ regmap_reg_range(TI_LMP92064_REG_CONFIG_REG, TI_LMP92064_REG_CONFIG_REG),
+};
+
+static const struct regmap_access_table lmp92064_writable_regs = {
+ .yes_ranges = lmp92064_writable_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(lmp92064_writable_reg_ranges),
+};
+
+static const struct regmap_config lmp92064_spi_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = TI_LMP92064_REG_DATA_COUT_MSB,
+ .rd_table = &lmp92064_readable_regs,
+ .wr_table = &lmp92064_writable_regs,
+};
+
+struct lmp92064_adc_priv {
+ int shunt_resistor_uohm;
+ struct spi_device *spi;
+ struct regmap *regmap;
+};
+
+static const struct iio_chan_spec lmp92064_adc_channels[] = {
+ {
+ .type = IIO_CURRENT,
+ .address = TI_LMP92064_CHAN_INC,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .datasheet_name = "INC",
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .address = TI_LMP92064_CHAN_INV,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
+ .datasheet_name = "INV",
+ },
+};
+
+static int lmp92064_read_meas(struct lmp92064_adc_priv *priv, u16 *res)
+{
+ __be16 raw[2];
+ int ret;
+
+ /*
+ * The ADC only latches in new samples if all DATA registers are read
+ * in descending sequential order.
+ * The ADC auto-decrements the register index with each clocked byte.
+ * Read both channels in single SPI transfer by selecting the highest
+ * register using the command below and clocking out all four data
+ * bytes.
+ */
+
+ ret = regmap_bulk_read(priv->regmap, TI_LMP92064_REG_DATA_COUT_MSB,
+ &raw, sizeof(raw));
+
+ if (ret) {
+ dev_err(&priv->spi->dev, "regmap_bulk_read failed: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ res[0] = be16_to_cpu(raw[0]);
+ res[1] = be16_to_cpu(raw[1]);
+
+ return 0;
+}
+
+static int lmp92064_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct lmp92064_adc_priv *priv = iio_priv(indio_dev);
+ u16 raw[2];
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = lmp92064_read_meas(priv, raw);
+ if (ret < 0)
+ return ret;
+
+ *val = (chan->address == TI_LMP92064_CHAN_INC) ? raw[0] : raw[1];
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->address == TI_LMP92064_CHAN_INC) {
+ /*
+ * processed (mA) = raw * current_lsb (mA)
+ * current_lsb (mA) = shunt_voltage_lsb (nV) / shunt_resistor (uOhm)
+ * shunt_voltage_lsb (nV) = 81920000 / 4096 = 20000
+ */
+ *val = 20000;
+ *val2 = priv->shunt_resistor_uohm;
+ } else {
+ /*
+ * processed (mV) = raw * voltage_lsb (mV)
+ * voltage_lsb (mV) = 2048 / 4096
+ */
+ *val = 2048;
+ *val2 = 4096;
+ }
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lmp92064_reset(struct lmp92064_adc_priv *priv,
+ struct gpio_desc *gpio_reset)
+{
+ unsigned int status;
+ int ret, i;
+
+ if (gpio_reset) {
+ /*
+ * Perform a hard reset if gpio_reset is available.
+ * The datasheet specifies a very low 3.5ns reset pulse duration and does not
+ * specify how long to wait after a reset to access the device.
+ * Use more conservative pulse lengths to allow analog RC filtering of the
+ * reset line at the board level (as recommended in the datasheet).
+ */
+ gpiod_set_value_cansleep(gpio_reset, 1);
+ usleep_range(1, 10);
+ gpiod_set_value_cansleep(gpio_reset, 0);
+ usleep_range(500, 750);
+ } else {
+ /*
+ * Perform a soft-reset if not.
+ * Also write default values to the config registers that are not
+ * affected by soft reset.
+ */
+ ret = regmap_write(priv->regmap, TI_LMP92064_REG_CONFIG_A,
+ TI_LMP92064_VAL_CONFIG_A);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(priv->regmap, TI_LMP92064_REG_CONFIG_B,
+ TI_LMP92064_VAL_CONFIG_B);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * Wait for the device to signal readiness to prevent reading bogus data
+ * and make sure device is actually connected.
+ * The datasheet does not specify how long this takes but usually it is
+ * not more than 3-4 iterations of this loop.
+ */
+ for (i = 0; i < 10; i++) {
+ ret = regmap_read(priv->regmap, TI_LMP92064_REG_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ if (status == TI_LMP92064_VAL_STATUS_OK)
+ return 0;
+
+ usleep_range(1000, 2000);
+ }
+
+ /*
+ * No (correct) response received.
+ * Device is mostly likely not connected to the bus.
+ */
+ return -ENXIO;
+}
+
+static const struct iio_info lmp92064_adc_info = {
+ .read_raw = lmp92064_read_raw,
+};
+
+static int lmp92064_adc_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct lmp92064_adc_priv *priv;
+ struct gpio_desc *gpio_reset;
+ struct iio_dev *indio_dev;
+ u32 shunt_resistor_uohm;
+ struct regmap *regmap;
+ int ret;
+
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Error in SPI setup\n");
+
+ regmap = devm_regmap_init_spi(spi, &lmp92064_spi_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "Failed to set up SPI regmap\n");
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+
+ priv->spi = spi;
+ priv->regmap = regmap;
+
+ ret = device_property_read_u32(dev, "shunt-resistor-micro-ohms",
+ &shunt_resistor_uohm);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Failed to get shunt-resistor value\n");
+
+ /*
+ * The shunt resistance is passed to userspace as the denominator of an iio
+ * fraction. Make sure it is in range for that.
+ */
+ if (shunt_resistor_uohm == 0 || shunt_resistor_uohm > INT_MAX) {
+ dev_err(dev, "Shunt resistance is out of range\n");
+ return -EINVAL;
+ }
+
+ priv->shunt_resistor_uohm = shunt_resistor_uohm;
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return ret;
+
+ ret = devm_regulator_get_enable(dev, "vdig");
+ if (ret)
+ return ret;
+
+ gpio_reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio_reset))
+ return dev_err_probe(dev, PTR_ERR(gpio_reset),
+ "Failed to get GPIO reset pin\n");
+
+ ret = lmp92064_reset(priv, gpio_reset);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to reset device\n");
+
+ indio_dev->name = "lmp92064";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = lmp92064_adc_channels;
+ indio_dev->num_channels = ARRAY_SIZE(lmp92064_adc_channels);
+ indio_dev->info = &lmp92064_adc_info;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id lmp92064_id_table[] = {
+ { "lmp92064" },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, lmp92064_id_table);
+
+static const struct of_device_id lmp92064_of_table[] = {
+ { .compatible = "ti,lmp92064" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, lmp92064_of_table);
+
+static struct spi_driver lmp92064_adc_driver = {
+ .driver = {
+ .name = "lmp92064",
+ .of_match_table = lmp92064_of_table,
+ },
+ .probe = lmp92064_adc_probe,
+ .id_table = lmp92064_id_table,
+};
+module_spi_driver(lmp92064_adc_driver);
+
+MODULE_AUTHOR("Leonard Göhrs <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("TI LMP92064 ADC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
index f53e8558b560..32873fb5f367 100644
--- a/drivers/iio/adc/twl6030-gpadc.c
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -57,6 +57,18 @@
#define TWL6030_GPADCS BIT(1)
#define TWL6030_GPADCR BIT(0)
+#define USB_VBUS_CTRL_SET 0x04
+#define USB_ID_CTRL_SET 0x06
+
+#define TWL6030_MISC1 0xE4
+#define VBUS_MEAS 0x01
+#define ID_MEAS 0x01
+
+#define VAC_MEAS 0x04
+#define VBAT_MEAS 0x02
+#define BB_MEAS 0x01
+
+
/**
* struct twl6030_chnl_calib - channel calibration
* @gain: slope coefficient for ideal curve
@@ -927,6 +939,26 @@ static int twl6030_gpadc_probe(struct platform_device *pdev)
return ret;
}
+ ret = twl_i2c_write_u8(TWL_MODULE_USB, VBUS_MEAS, USB_VBUS_CTRL_SET);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
+ ret = twl_i2c_write_u8(TWL_MODULE_USB, ID_MEAS, USB_ID_CTRL_SET);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID0,
+ VBAT_MEAS | BB_MEAS | VAC_MEAS,
+ TWL6030_MISC1);
+ if (ret < 0) {
+ dev_err(dev, "failed to wire up inputs\n");
+ return ret;
+ }
+
indio_dev->name = DRIVER_NAME;
indio_dev->info = &twl6030_gpadc_iio_info;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index 5b4bdf3a26bb..34cf336b3490 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -1220,8 +1220,7 @@ static int ams_init_module(struct iio_dev *indio_dev,
int num_channels = 0;
int ret;
- if (fwnode_property_match_string(fwnode, "compatible",
- "xlnx,zynqmp-ams-ps") == 0) {
+ if (fwnode_device_is_compatible(fwnode, "xlnx,zynqmp-ams-ps")) {
ams->ps_base = fwnode_iomap(fwnode, 0);
if (!ams->ps_base)
return -ENXIO;
@@ -1232,8 +1231,7 @@ static int ams_init_module(struct iio_dev *indio_dev,
/* add PS channels to iio device channels */
memcpy(channels, ams_ps_channels, sizeof(ams_ps_channels));
num_channels = ARRAY_SIZE(ams_ps_channels);
- } else if (fwnode_property_match_string(fwnode, "compatible",
- "xlnx,zynqmp-ams-pl") == 0) {
+ } else if (fwnode_device_is_compatible(fwnode, "xlnx,zynqmp-ams-pl")) {
ams->pl_base = fwnode_iomap(fwnode, 0);
if (!ams->pl_base)
return -ENXIO;
@@ -1247,8 +1245,7 @@ static int ams_init_module(struct iio_dev *indio_dev,
num_channels += AMS_PL_MAX_FIXED_CHANNEL;
num_channels = ams_get_ext_chan(fwnode, channels,
num_channels);
- } else if (fwnode_property_match_string(fwnode, "compatible",
- "xlnx,zynqmp-ams") == 0) {
+ } else if (fwnode_device_is_compatible(fwnode, "xlnx,zynqmp-ams")) {
/* add AMS channels to iio device channels */
memcpy(channels, ams_ctrl_channels, sizeof(ams_ctrl_channels));
num_channels += ARRAY_SIZE(ams_ctrl_channels);
@@ -1329,7 +1326,7 @@ static int ams_parse_firmware(struct iio_dev *indio_dev)
dev_channels = devm_krealloc(dev, ams_channels, dev_size, GFP_KERNEL);
if (!dev_channels)
- ret = -ENOMEM;
+ return -ENOMEM;
indio_dev->channels = dev_channels;
indio_dev->num_channels = num_channels;
diff --git a/drivers/iio/cdc/ad7746.c b/drivers/iio/cdc/ad7746.c
index 6f68651ce1d5..a1db5469f2d1 100644
--- a/drivers/iio/cdc/ad7746.c
+++ b/drivers/iio/cdc/ad7746.c
@@ -285,8 +285,7 @@ static int ad7746_select_channel(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- if (chip->capdac_set != chan->channel)
- chip->capdac_set = chan->channel;
+ chip->capdac_set = chan->channel;
break;
case IIO_VOLTAGE:
case IIO_TEMP:
diff --git a/drivers/iio/chemical/scd30_core.c b/drivers/iio/chemical/scd30_core.c
index 682fca39d14d..7be5a45cf71a 100644
--- a/drivers/iio/chemical/scd30_core.c
+++ b/drivers/iio/chemical/scd30_core.c
@@ -354,7 +354,7 @@ static ssize_t sampling_frequency_available_show(struct device *dev, struct devi
ssize_t len = 0;
do {
- len += scnprintf(buf + len, PAGE_SIZE - len, "0.%09u ", 1000000000 / i);
+ len += sysfs_emit_at(buf, len, "0.%09u ", 1000000000 / i);
/*
* Not all values fit PAGE_SIZE buffer hence print every 6th
* (each frequency differs by 6s in time domain from the
@@ -380,7 +380,7 @@ static ssize_t calibration_auto_enable_show(struct device *dev, struct device_at
ret = scd30_command_read(state, CMD_ASC, &val);
mutex_unlock(&state->lock);
- return ret ?: sprintf(buf, "%d\n", val);
+ return ret ?: sysfs_emit(buf, "%d\n", val);
}
static ssize_t calibration_auto_enable_store(struct device *dev, struct device_attribute *attr,
@@ -414,7 +414,7 @@ static ssize_t calibration_forced_value_show(struct device *dev, struct device_a
ret = scd30_command_read(state, CMD_FRC, &val);
mutex_unlock(&state->lock);
- return ret ?: sprintf(buf, "%d\n", val);
+ return ret ?: sysfs_emit(buf, "%d\n", val);
}
static ssize_t calibration_forced_value_store(struct device *dev, struct device_attribute *attr,
@@ -642,10 +642,8 @@ static int scd30_setup_trigger(struct iio_dev *indio_dev)
trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name,
iio_device_id(indio_dev));
- if (!trig) {
- dev_err(dev, "failed to allocate trigger\n");
- return -ENOMEM;
- }
+ if (!trig)
+ return dev_err_probe(dev, -ENOMEM, "failed to allocate trigger\n");
trig->ops = &scd30_trigger_ops;
iio_trigger_set_drvdata(trig, indio_dev);
@@ -667,9 +665,9 @@ static int scd30_setup_trigger(struct iio_dev *indio_dev)
IRQF_NO_AUTOEN,
indio_dev->name, indio_dev);
if (ret)
- dev_err(dev, "failed to request irq\n");
+ return dev_err_probe(dev, ret, "failed to request irq\n");
- return ret;
+ return 0;
}
int scd30_probe(struct device *dev, int irq, const char *name, void *priv,
@@ -717,17 +715,13 @@ int scd30_probe(struct device *dev, int irq, const char *name, void *priv,
return ret;
ret = scd30_reset(state);
- if (ret) {
- dev_err(dev, "failed to reset device: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to reset device\n");
if (state->irq > 0) {
ret = scd30_setup_trigger(indio_dev);
- if (ret) {
- dev_err(dev, "failed to setup trigger: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to setup trigger\n");
}
ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, scd30_trigger_handler, NULL);
@@ -735,23 +729,17 @@ int scd30_probe(struct device *dev, int irq, const char *name, void *priv,
return ret;
ret = scd30_command_read(state, CMD_FW_VERSION, &val);
- if (ret) {
- dev_err(dev, "failed to read firmware version: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read firmware version\n");
dev_info(dev, "firmware version: %d.%d\n", val >> 8, (char)val);
ret = scd30_command_write(state, CMD_MEAS_INTERVAL, state->meas_interval);
- if (ret) {
- dev_err(dev, "failed to set measurement interval: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set measurement interval\n");
ret = scd30_command_write(state, CMD_START_MEAS, state->pressure_comp);
- if (ret) {
- dev_err(dev, "failed to start measurement: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to start measurement\n");
ret = devm_add_action_or_reset(dev, scd30_stop_meas, state);
if (ret)
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index d92f7f651f7b..0c2caf3570db 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -400,12 +400,12 @@ static ssize_t scmi_iio_get_raw_available(struct iio_dev *iio_dev,
rem = do_div(resolution,
int_pow(10, abs(exponent))
);
- len = scnprintf(buf, PAGE_SIZE,
+ len = sysfs_emit(buf,
"[%lld %llu.%llu %lld]\n", min_range,
resolution, rem, max_range);
} else {
resolution = resolution * int_pow(10, exponent);
- len = scnprintf(buf, PAGE_SIZE, "[%lld %llu %lld]\n",
+ len = sysfs_emit(buf, "[%lld %llu %lld]\n",
min_range, resolution, max_range);
}
}
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 80521bd28d0f..d3f90cf86143 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -162,10 +162,10 @@ config AD5696_I2C
depends on I2C
select AD5686
help
- Say yes here to build support for Analog Devices AD5311R, AD5338R,
- AD5671R, AD5673R, AD5675R, AD5677R, AD5691R, AD5692R, AD5693, AD5693R,
- AD5694, AD5694R, AD5695R, AD5696, and AD5696R Digital to Analog
- converters.
+ Say yes here to build support for Analog Devices AD5311R, AD5337,
+ AD5338R, AD5671R, AD5673R, AD5675R, AD5677R, AD5691R, AD5692R, AD5693,
+ AD5693R, AD5694, AD5694R, AD5695R, AD5696, and AD5696R Digital to
+ Analog converters.
To compile this driver as a module, choose M here: the module will be
called ad5696.
@@ -357,6 +357,19 @@ config MAX517
This driver can also be built as a module. If so, the module
will be called max517.
+config MAX5522
+ tristate "Maxim MAX5522 DAC driver"
+ depends on SPI_MASTER
+ select REGMAP_SPI
+ help
+ Say Y here if you want to build a driver for the Maxim MAX5522.
+
+ MAX5522 is a dual, ultra-low-power, 10-Bit, voltage-output
+ digital to analog converter (DAC) offering rail-to-rail buffered
+ voltage outputs.
+
+ If compiled as a module, it will be called max5522.
+
config MAX5821
tristate "Maxim MAX5821 DAC driver"
depends on I2C
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index ec3e42713f00..6c74fea21736 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_LTC2632) += ltc2632.o
obj-$(CONFIG_LTC2688) += ltc2688.o
obj-$(CONFIG_M62332) += m62332.o
obj-$(CONFIG_MAX517) += max517.o
+obj-$(CONFIG_MAX5522) += max5522.o
obj-$(CONFIG_MAX5821) += max5821.o
obj-$(CONFIG_MCP4725) += mcp4725.o
obj-$(CONFIG_MCP4922) += mcp4922.o
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 15361d8bbf94..57cc0f0eedc6 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -258,6 +258,7 @@ static const struct iio_chan_spec name[] = { \
DECLARE_AD5693_CHANNELS(ad5310r_channels, 10, 2);
DECLARE_AD5693_CHANNELS(ad5311r_channels, 10, 6);
+DECLARE_AD5338_CHANNELS(ad5337r_channels, 8, 8);
DECLARE_AD5338_CHANNELS(ad5338r_channels, 10, 6);
DECLARE_AD5676_CHANNELS(ad5672_channels, 12, 4);
DECLARE_AD5679_CHANNELS(ad5674r_channels, 12, 4);
@@ -283,6 +284,12 @@ static const struct ad5686_chip_info ad5686_chip_info_tbl[] = {
.num_channels = 1,
.regmap_type = AD5693_REGMAP,
},
+ [ID_AD5337R] = {
+ .channels = ad5337r_channels,
+ .int_vref_mv = 2500,
+ .num_channels = 2,
+ .regmap_type = AD5686_REGMAP,
+ },
[ID_AD5338R] = {
.channels = ad5338r_channels,
.int_vref_mv = 2500,
diff --git a/drivers/iio/dac/ad5686.h b/drivers/iio/dac/ad5686.h
index b7ade3a6b9b6..760f852911df 100644
--- a/drivers/iio/dac/ad5686.h
+++ b/drivers/iio/dac/ad5686.h
@@ -54,6 +54,7 @@
enum ad5686_supported_device_ids {
ID_AD5310R,
ID_AD5311R,
+ ID_AD5337R,
ID_AD5338R,
ID_AD5671R,
ID_AD5672R,
diff --git a/drivers/iio/dac/ad5696-i2c.c b/drivers/iio/dac/ad5696-i2c.c
index 160e80cf9135..8a95f0278018 100644
--- a/drivers/iio/dac/ad5696-i2c.c
+++ b/drivers/iio/dac/ad5696-i2c.c
@@ -72,6 +72,7 @@ static void ad5686_i2c_remove(struct i2c_client *i2c)
static const struct i2c_device_id ad5686_i2c_id[] = {
{"ad5311r", ID_AD5311R},
+ {"ad5337r", ID_AD5337R},
{"ad5338r", ID_AD5338R},
{"ad5671r", ID_AD5671R},
{"ad5673r", ID_AD5673R},
@@ -92,6 +93,7 @@ MODULE_DEVICE_TABLE(i2c, ad5686_i2c_id);
static const struct of_device_id ad5686_of_match[] = {
{ .compatible = "adi,ad5311r" },
+ { .compatible = "adi,ad5337r" },
{ .compatible = "adi,ad5338r" },
{ .compatible = "adi,ad5671r" },
{ .compatible = "adi,ad5675r" },
diff --git a/drivers/iio/dac/max5522.c b/drivers/iio/dac/max5522.c
new file mode 100644
index 000000000000..00ba4e98fb9c
--- /dev/null
+++ b/drivers/iio/dac/max5522.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Maxim MAX5522
+ * Dual, Ultra-Low-Power 10-Bit, Voltage-Output DACs
+ *
+ * Copyright 2022 Timesys Corp.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+
+#include <linux/iio/iio.h>
+
+#define MAX5522_MAX_ADDR 15
+#define MAX5522_CTRL_NONE 0
+#define MAX5522_CTRL_LOAD_IN_A 9
+#define MAX5522_CTRL_LOAD_IN_B 10
+
+#define MAX5522_REG_DATA(x) ((x) + MAX5522_CTRL_LOAD_IN_A)
+
+struct max5522_chip_info {
+ const char *name;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+};
+
+struct max5522_state {
+ struct regmap *regmap;
+ const struct max5522_chip_info *chip_info;
+ unsigned short dac_cache[2];
+ struct regulator *vrefin_reg;
+};
+
+#define MAX5522_CHANNEL(chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 10, \
+ .storagebits = 16, \
+ .shift = 2, \
+ } \
+}
+
+const struct iio_chan_spec max5522_channels[] = {
+ MAX5522_CHANNEL(0),
+ MAX5522_CHANNEL(1),
+};
+
+enum max5522_type {
+ ID_MAX5522,
+};
+
+static const struct max5522_chip_info max5522_chip_info_tbl[] = {
+ [ID_MAX5522] = {
+ .name = "max5522",
+ .channels = max5522_channels,
+ .num_channels = 2,
+ },
+};
+
+static inline int max5522_info_to_reg(struct iio_chan_spec const *chan)
+{
+ return MAX5522_REG_DATA(chan->channel);
+}
+
+static int max5522_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct max5522_state *state = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ *val = state->dac_cache[chan->channel];
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ ret = regulator_get_voltage(state->vrefin_reg);
+ if (ret < 0)
+ return -EINVAL;
+ *val = ret / 1000;
+ *val2 = 10;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int max5522_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct max5522_state *state = iio_priv(indio_dev);
+ int rval;
+
+ if (val > 1023 || val < 0)
+ return -EINVAL;
+
+ rval = regmap_write(state->regmap, max5522_info_to_reg(chan),
+ val << chan->scan_type.shift);
+ if (rval < 0)
+ return rval;
+
+ state->dac_cache[chan->channel] = val;
+
+ return 0;
+}
+
+static const struct iio_info max5522_info = {
+ .read_raw = max5522_read_raw,
+ .write_raw = max5522_write_raw,
+};
+
+static const struct regmap_config max5522_regmap_config = {
+ .reg_bits = 4,
+ .val_bits = 12,
+ .max_register = MAX5522_MAX_ADDR,
+};
+
+static int max5522_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct iio_dev *indio_dev;
+ struct max5522_state *state;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*state));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ state = iio_priv(indio_dev);
+ state->chip_info = device_get_match_data(&spi->dev);
+ if (!state->chip_info) {
+ state->chip_info =
+ (struct max5522_chip_info *)(id->driver_data);
+ if (!state->chip_info)
+ return -EINVAL;
+ }
+
+ state->vrefin_reg = devm_regulator_get(&spi->dev, "vrefin");
+ if (IS_ERR(state->vrefin_reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(state->vrefin_reg),
+ "Vrefin regulator not specified\n");
+
+ ret = regulator_enable(state->vrefin_reg);
+ if (ret) {
+ return dev_err_probe(&spi->dev, ret,
+ "Failed to enable vref regulators\n");
+ }
+
+ state->regmap = devm_regmap_init_spi(spi, &max5522_regmap_config);
+
+ if (IS_ERR(state->regmap))
+ return PTR_ERR(state->regmap);
+
+ indio_dev->info = &max5522_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = max5522_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max5522_channels);
+ indio_dev->name = max5522_chip_info_tbl[ID_MAX5522].name;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id max5522_ids[] = {
+ { "max5522", (kernel_ulong_t)&max5522_chip_info_tbl[ID_MAX5522] },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, max5522_ids);
+
+static const struct of_device_id max5522_of_match[] = {
+ {
+ .compatible = "maxim,max5522",
+ .data = &max5522_chip_info_tbl[ID_MAX5522],
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, max5522_of_match);
+
+static struct spi_driver max5522_spi_driver = {
+ .driver = {
+ .name = "max5522",
+ .of_match_table = max5522_of_match,
+ },
+ .probe = max5522_spi_probe,
+ .id_table = max5522_ids,
+};
+module_spi_driver(max5522_spi_driver);
+
+MODULE_AUTHOR("Angelo Dureghello <angelo.dureghello@timesys.com");
+MODULE_DESCRIPTION("MAX5522 DAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 8f0ad022c7f1..698c50da1f10 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -231,6 +231,7 @@ static int gyro_3d_capture_sample(struct hid_sensor_hub_device *hsdev,
gyro_state->timestamp =
hid_sensor_convert_timestamp(&gyro_state->common_attributes,
*(s64 *)raw_data);
+ ret = 0;
break;
default:
break;
diff --git a/drivers/iio/imu/bno055/bno055_ser_trace.c b/drivers/iio/imu/bno055/bno055_ser_trace.c
index 48397b66daef..ab564186d19c 100644
--- a/drivers/iio/imu/bno055/bno055_ser_trace.c
+++ b/drivers/iio/imu/bno055/bno055_ser_trace.c
@@ -1,4 +1,4 @@
-//SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0
/*
* bno055_ser Trace Support
diff --git a/drivers/iio/imu/fxos8700_core.c b/drivers/iio/imu/fxos8700_core.c
index 423cfe526f2a..6d189c4b9ff9 100644
--- a/drivers/iio/imu/fxos8700_core.c
+++ b/drivers/iio/imu/fxos8700_core.c
@@ -10,6 +10,7 @@
#include <linux/regmap.h>
#include <linux/acpi.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -144,9 +145,8 @@
#define FXOS8700_NVM_DATA_BNK0 0xa7
/* Bit definitions for FXOS8700_CTRL_REG1 */
-#define FXOS8700_CTRL_ODR_MSK 0x38
#define FXOS8700_CTRL_ODR_MAX 0x00
-#define FXOS8700_CTRL_ODR_MIN GENMASK(4, 3)
+#define FXOS8700_CTRL_ODR_MSK GENMASK(5, 3)
/* Bit definitions for FXOS8700_M_CTRL_REG1 */
#define FXOS8700_HMS_MASK GENMASK(1, 0)
@@ -320,7 +320,7 @@ static enum fxos8700_sensor fxos8700_to_sensor(enum iio_chan_type iio_type)
switch (iio_type) {
case IIO_ACCEL:
return FXOS8700_ACCEL;
- case IIO_ANGL_VEL:
+ case IIO_MAGN:
return FXOS8700_MAGN;
default:
return -EINVAL;
@@ -345,15 +345,35 @@ static int fxos8700_set_active_mode(struct fxos8700_data *data,
static int fxos8700_set_scale(struct fxos8700_data *data,
enum fxos8700_sensor t, int uscale)
{
- int i;
+ int i, ret, val;
+ bool active_mode;
static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
struct device *dev = regmap_get_device(data->regmap);
if (t == FXOS8700_MAGN) {
- dev_err(dev, "Magnetometer scale is locked at 1200uT\n");
+ dev_err(dev, "Magnetometer scale is locked at 0.001Gs\n");
return -EINVAL;
}
+ /*
+ * When device is in active mode, it failed to set an ACCEL
+ * full-scale range(2g/4g/8g) in FXOS8700_XYZ_DATA_CFG.
+ * This is not align with the datasheet, but it is a fxos8700
+ * chip behavier. Set the device in standby mode before setting
+ * an ACCEL full-scale range.
+ */
+ ret = regmap_read(data->regmap, FXOS8700_CTRL_REG1, &val);
+ if (ret)
+ return ret;
+
+ active_mode = val & FXOS8700_ACTIVE;
+ if (active_mode) {
+ ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ val & ~FXOS8700_ACTIVE);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < scale_num; i++)
if (fxos8700_accel_scale[i].uscale == uscale)
break;
@@ -361,8 +381,12 @@ static int fxos8700_set_scale(struct fxos8700_data *data,
if (i == scale_num)
return -EINVAL;
- return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
+ ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG,
fxos8700_accel_scale[i].bits);
+ if (ret)
+ return ret;
+ return regmap_write(data->regmap, FXOS8700_CTRL_REG1,
+ active_mode);
}
static int fxos8700_get_scale(struct fxos8700_data *data,
@@ -372,7 +396,7 @@ static int fxos8700_get_scale(struct fxos8700_data *data,
static const int scale_num = ARRAY_SIZE(fxos8700_accel_scale);
if (t == FXOS8700_MAGN) {
- *uscale = 1200; /* Magnetometer is locked at 1200uT */
+ *uscale = 1000; /* Magnetometer is locked at 0.001Gs */
return 0;
}
@@ -394,22 +418,61 @@ static int fxos8700_get_data(struct fxos8700_data *data, int chan_type,
int axis, int *val)
{
u8 base, reg;
+ s16 tmp;
int ret;
- enum fxos8700_sensor type = fxos8700_to_sensor(chan_type);
- base = type ? FXOS8700_OUT_X_MSB : FXOS8700_M_OUT_X_MSB;
+ /*
+ * Different register base addresses varies with channel types.
+ * This bug hasn't been noticed before because using an enum is
+ * really hard to read. Use an a switch statement to take over that.
+ */
+ switch (chan_type) {
+ case IIO_ACCEL:
+ base = FXOS8700_OUT_X_MSB;
+ break;
+ case IIO_MAGN:
+ base = FXOS8700_M_OUT_X_MSB;
+ break;
+ default:
+ return -EINVAL;
+ }
/* Block read 6 bytes of device output registers to avoid data loss */
ret = regmap_bulk_read(data->regmap, base, data->buf,
- FXOS8700_DATA_BUF_SIZE);
+ sizeof(data->buf));
if (ret)
return ret;
/* Convert axis to buffer index */
reg = axis - IIO_MOD_X;
+ /*
+ * Convert to native endianness. The accel data and magn data
+ * are signed, so a forced type conversion is needed.
+ */
+ tmp = be16_to_cpu(data->buf[reg]);
+
+ /*
+ * ACCEL output data registers contain the X-axis, Y-axis, and Z-axis
+ * 14-bit left-justified sample data and MAGN output data registers
+ * contain the X-axis, Y-axis, and Z-axis 16-bit sample data. Apply
+ * a signed 2 bits right shift to the readback raw data from ACCEL
+ * output data register and keep that from MAGN sensor as the origin.
+ * Value should be extended to 32 bit.
+ */
+ switch (chan_type) {
+ case IIO_ACCEL:
+ tmp = tmp >> 2;
+ break;
+ case IIO_MAGN:
+ /* Nothing to do */
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* Convert to native endianness */
- *val = sign_extend32(be16_to_cpu(data->buf[reg]), 15);
+ *val = sign_extend32(tmp, 15);
return 0;
}
@@ -445,10 +508,9 @@ static int fxos8700_set_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
if (i >= odr_num)
return -EINVAL;
- return regmap_update_bits(data->regmap,
- FXOS8700_CTRL_REG1,
- FXOS8700_CTRL_ODR_MSK + FXOS8700_ACTIVE,
- fxos8700_odr[i].bits << 3 | active_mode);
+ val &= ~FXOS8700_CTRL_ODR_MSK;
+ val |= FIELD_PREP(FXOS8700_CTRL_ODR_MSK, fxos8700_odr[i].bits) | FXOS8700_ACTIVE;
+ return regmap_write(data->regmap, FXOS8700_CTRL_REG1, val);
}
static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
@@ -461,7 +523,7 @@ static int fxos8700_get_odr(struct fxos8700_data *data, enum fxos8700_sensor t,
if (ret)
return ret;
- val &= FXOS8700_CTRL_ODR_MSK;
+ val = FIELD_GET(FXOS8700_CTRL_ODR_MSK, val);
for (i = 0; i < odr_num; i++)
if (val == fxos8700_odr[i].bits)
@@ -526,7 +588,7 @@ static IIO_CONST_ATTR(in_accel_sampling_frequency_available,
static IIO_CONST_ATTR(in_magn_sampling_frequency_available,
"1.5625 6.25 12.5 50 100 200 400 800");
static IIO_CONST_ATTR(in_accel_scale_available, "0.000244 0.000488 0.000976");
-static IIO_CONST_ATTR(in_magn_scale_available, "0.000001200");
+static IIO_CONST_ATTR(in_magn_scale_available, "0.001000");
static struct attribute *fxos8700_attrs[] = {
&iio_const_attr_in_accel_sampling_frequency_available.dev_attr.attr,
@@ -592,14 +654,19 @@ static int fxos8700_chip_init(struct fxos8700_data *data, bool use_spi)
if (ret)
return ret;
- /* Max ODR (800Hz individual or 400Hz hybrid), active mode */
- ret = regmap_write(data->regmap, FXOS8700_CTRL_REG1,
- FXOS8700_CTRL_ODR_MAX | FXOS8700_ACTIVE);
+ /*
+ * Set max full-scale range (+/-8G) for ACCEL sensor in chip
+ * initialization then activate the device.
+ */
+ ret = regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
if (ret)
return ret;
- /* Set for max full-scale range (+/-8G) */
- return regmap_write(data->regmap, FXOS8700_XYZ_DATA_CFG, MODE_8G);
+ /* Max ODR (800Hz individual or 400Hz hybrid), active mode */
+ return regmap_update_bits(data->regmap, FXOS8700_CTRL_REG1,
+ FXOS8700_CTRL_ODR_MSK | FXOS8700_ACTIVE,
+ FIELD_PREP(FXOS8700_CTRL_ODR_MSK, FXOS8700_CTRL_ODR_MAX) |
+ FXOS8700_ACTIVE);
}
static void fxos8700_chip_uninit(void *data)
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index e692dfeeda44..53ba020fa5d0 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -649,7 +649,7 @@ static int kmx61_chip_update_thresholds(struct kmx61_data *data)
KMX61_REG_WUF_TIMER,
data->wake_duration);
if (ret < 0) {
- dev_err(&data->client->dev, "Errow writing reg_wuf_timer\n");
+ dev_err(&data->client->dev, "Error writing reg_wuf_timer\n");
return ret;
}
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index f6660847fb58..8c16cdacf2f2 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -4,6 +4,7 @@ config IIO_ST_LSM6DSX
tristate "ST_LSM6DSx driver for STM 6-axis IMU MEMS sensors"
depends on (I2C || SPI || I3C)
select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
select IIO_KFIFO_BUF
select IIO_ST_LSM6DSX_I2C if (I2C)
select IIO_ST_LSM6DSX_SPI if (SPI_MASTER)
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index 5b6f195748fc..499fcf8875b4 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -93,7 +93,7 @@ enum st_lsm6dsx_hw_id {
.endianness = IIO_LE, \
}, \
.event_spec = &st_lsm6dsx_event, \
- .ext_info = st_lsm6dsx_accel_ext_info, \
+ .ext_info = st_lsm6dsx_ext_info, \
.num_event_specs = 1, \
}
@@ -113,6 +113,7 @@ enum st_lsm6dsx_hw_id {
.storagebits = 16, \
.endianness = IIO_LE, \
}, \
+ .ext_info = st_lsm6dsx_ext_info, \
}
struct st_lsm6dsx_reg {
@@ -528,7 +529,7 @@ st_lsm6dsx_device_set_enable(struct st_lsm6dsx_sensor *sensor, bool enable)
}
static const
-struct iio_chan_spec_ext_info __maybe_unused st_lsm6dsx_accel_ext_info[] = {
+struct iio_chan_spec_ext_info __maybe_unused st_lsm6dsx_ext_info[] = {
IIO_MOUNT_MATRIX(IIO_SHARED_BY_ALL, st_lsm6dsx_get_mount_matrix),
{ }
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
index f2b64b4956a3..c1b444520d2a 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
@@ -704,18 +704,18 @@ static ssize_t st_lsm6dsx_shub_scale_avail(struct device *dev,
static IIO_DEV_ATTR_SAMP_FREQ_AVAIL(st_lsm6dsx_shub_sampling_freq_avail);
static IIO_DEVICE_ATTR(in_scale_available, 0444,
st_lsm6dsx_shub_scale_avail, NULL, 0);
-static struct attribute *st_lsm6dsx_ext_attributes[] = {
+static struct attribute *st_lsm6dsx_shub_attributes[] = {
&iio_dev_attr_sampling_frequency_available.dev_attr.attr,
&iio_dev_attr_in_scale_available.dev_attr.attr,
NULL,
};
-static const struct attribute_group st_lsm6dsx_ext_attribute_group = {
- .attrs = st_lsm6dsx_ext_attributes,
+static const struct attribute_group st_lsm6dsx_shub_attribute_group = {
+ .attrs = st_lsm6dsx_shub_attributes,
};
-static const struct iio_info st_lsm6dsx_ext_info = {
- .attrs = &st_lsm6dsx_ext_attribute_group,
+static const struct iio_info st_lsm6dsx_shub_info = {
+ .attrs = &st_lsm6dsx_shub_attribute_group,
.read_raw = st_lsm6dsx_shub_read_raw,
.write_raw = st_lsm6dsx_shub_write_raw,
.hwfifo_set_watermark = st_lsm6dsx_set_watermark,
@@ -737,7 +737,7 @@ st_lsm6dsx_shub_alloc_iiodev(struct st_lsm6dsx_hw *hw,
return NULL;
iio_dev->modes = INDIO_DIRECT_MODE;
- iio_dev->info = &st_lsm6dsx_ext_info;
+ iio_dev->info = &st_lsm6dsx_shub_info;
sensor = iio_priv(iio_dev);
sensor->id = id;
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 52e690f031cb..c117f50d0cf3 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -8,30 +8,32 @@
#define pr_fmt(fmt) "iio-core: " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/idr.h>
-#include <linux/kdev_t.h>
-#include <linux/err.h>
+#include <linux/anon_inodes.h>
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/err.h>
#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/property.h>
#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/cdev.h>
#include <linux/slab.h>
-#include <linux/anon_inodes.h>
-#include <linux/debugfs.h>
-#include <linux/mutex.h>
-#include <linux/iio/iio.h>
+#include <linux/wait.h>
+
+#include <linux/iio/buffer.h>
+#include <linux/iio/buffer_impl.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio-opaque.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
#include "iio_core.h"
#include "iio_core_trigger.h"
-#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/buffer_impl.h>
/* IDA to assign each registered device a unique id */
static DEFINE_IDA(iio_ida);
@@ -205,36 +207,6 @@ bool iio_buffer_enabled(struct iio_dev *indio_dev)
}
EXPORT_SYMBOL_GPL(iio_buffer_enabled);
-/**
- * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps
- * @array: array of strings
- * @n: number of strings in the array
- * @str: string to match with
- *
- * Returns index of @str in the @array or -EINVAL, similar to match_string().
- * Uses sysfs_streq instead of strcmp for matching.
- *
- * This routine will look for a string in an array of strings.
- * The search will continue until the element is found or the n-th element
- * is reached, regardless of any NULL elements in the array.
- */
-static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n,
- const char *str)
-{
- const char *item;
- int index;
-
- for (index = 0; index < n; index++) {
- item = array[index];
- if (!item)
- continue;
- if (sysfs_streq(item, str))
- return index;
- }
-
- return -EINVAL;
-}
-
#if defined(CONFIG_DEBUG_FS)
/*
* There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for
@@ -569,7 +541,7 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
if (!e->set)
return -EINVAL;
- ret = iio_sysfs_match_string_with_gaps(e->items, e->num_items, buf);
+ ret = __sysfs_match_string(e->items, e->num_items, buf);
if (ret < 0)
return ret;
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 6f23817fae6f..d74d2b5ff14c 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -39,7 +39,6 @@ obj-$(CONFIG_NOA1305) += noa1305.o
obj-$(CONFIG_OPT3001) += opt3001.o
obj-$(CONFIG_PA12203001) += pa12203001.o
obj-$(CONFIG_RPR0521) += rpr0521.o
-obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
obj-$(CONFIG_SI1133) += si1133.o
obj-$(CONFIG_SI1145) += si1145.o
obj-$(CONFIG_STK3310) += stk3310.o
@@ -48,6 +47,7 @@ obj-$(CONFIG_ST_UVIS25_I2C) += st_uvis25_i2c.o
obj-$(CONFIG_ST_UVIS25_SPI) += st_uvis25_spi.o
obj-$(CONFIG_TCS3414) += tcs3414.o
obj-$(CONFIG_TCS3472) += tcs3472.o
+obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
obj-$(CONFIG_TSL2583) += tsl2583.o
obj-$(CONFIG_TSL2591) += tsl2591.o
obj-$(CONFIG_TSL2772) += tsl2772.o
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index 001055d09750..b1674a5bfa36 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -440,6 +440,8 @@ static int cm32181_probe(struct i2c_client *client)
if (!indio_dev)
return -ENOMEM;
+ i2c_set_clientdata(client, indio_dev);
+
/*
* Some ACPI systems list 2 I2C resources for the CM3218 sensor, the
* SMBus Alert Response Address (ARA, 0x0c) and the actual I2C address.
@@ -460,8 +462,6 @@ static int cm32181_probe(struct i2c_client *client)
return PTR_ERR(client);
}
- i2c_set_clientdata(client, indio_dev);
-
cm32181 = iio_priv(indio_dev);
cm32181->client = client;
cm32181->dev = dev;
@@ -490,7 +490,8 @@ static int cm32181_probe(struct i2c_client *client)
static int cm32181_suspend(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
+ struct cm32181_chip *cm32181 = iio_priv(dev_get_drvdata(dev));
+ struct i2c_client *client = cm32181->client;
return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
CM32181_CMD_ALS_DISABLE);
@@ -498,8 +499,8 @@ static int cm32181_suspend(struct device *dev)
static int cm32181_resume(struct device *dev)
{
- struct i2c_client *client = to_i2c_client(dev);
struct cm32181_chip *cm32181 = iio_priv(dev_get_drvdata(dev));
+ struct i2c_client *client = cm32181->client;
return i2c_smbus_write_word_data(client, CM32181_REG_ADDR_CMD,
cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index 5a1a625d8d16..eb1aedad7edc 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -86,6 +86,7 @@ static int als_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct als_state *als_state = iio_priv(indio_dev);
+ struct hid_sensor_hub_device *hsdev = als_state->common_attributes.hsdev;
int report_id = -1;
u32 address;
int ret_type;
@@ -110,11 +111,8 @@ static int als_read_raw(struct iio_dev *indio_dev,
hid_sensor_power_state(&als_state->common_attributes,
true);
*val = sensor_hub_input_attr_get_raw_value(
- als_state->common_attributes.hsdev,
- HID_USAGE_SENSOR_ALS, address,
- report_id,
- SENSOR_HUB_SYNC,
- min < 0);
+ hsdev, hsdev->usage, address, report_id,
+ SENSOR_HUB_SYNC, min < 0);
hid_sensor_power_state(&als_state->common_attributes,
false);
} else {
@@ -259,9 +257,7 @@ static int als_parse_report(struct platform_device *pdev,
dev_dbg(&pdev->dev, "als %x:%x\n", st->als_illum.index,
st->als_illum.report_id);
- st->scale_precision = hid_sensor_format_scale(
- HID_USAGE_SENSOR_ALS,
- &st->als_illum,
+ st->scale_precision = hid_sensor_format_scale(usage_id, &st->als_illum,
&st->scale_pre_decml, &st->scale_post_decml);
return ret;
@@ -285,7 +281,8 @@ static int hid_als_probe(struct platform_device *pdev)
als_state->common_attributes.hsdev = hsdev;
als_state->common_attributes.pdev = pdev;
- ret = hid_sensor_parse_common_attributes(hsdev, HID_USAGE_SENSOR_ALS,
+ ret = hid_sensor_parse_common_attributes(hsdev,
+ hsdev->usage,
&als_state->common_attributes,
als_sensitivity_addresses,
ARRAY_SIZE(als_sensitivity_addresses));
@@ -303,7 +300,8 @@ static int hid_als_probe(struct platform_device *pdev)
ret = als_parse_report(pdev, hsdev,
(struct iio_chan_spec *)indio_dev->channels,
- HID_USAGE_SENSOR_ALS, als_state);
+ hsdev->usage,
+ als_state);
if (ret) {
dev_err(&pdev->dev, "failed to setup attributes\n");
return ret;
@@ -333,8 +331,7 @@ static int hid_als_probe(struct platform_device *pdev)
als_state->callbacks.send_event = als_proc_event;
als_state->callbacks.capture_sample = als_capture_sample;
als_state->callbacks.pdev = pdev;
- ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_ALS,
- &als_state->callbacks);
+ ret = sensor_hub_register_callback(hsdev, hsdev->usage, &als_state->callbacks);
if (ret < 0) {
dev_err(&pdev->dev, "callback reg failed\n");
goto error_iio_unreg;
@@ -356,7 +353,7 @@ static int hid_als_remove(struct platform_device *pdev)
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct als_state *als_state = iio_priv(indio_dev);
- sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_ALS);
+ sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &als_state->common_attributes);
@@ -368,6 +365,10 @@ static const struct platform_device_id hid_als_ids[] = {
/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-200041",
},
+ {
+ /* Format: HID-SENSOR-custom_sensor_tag-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-LISS-0041",
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, hid_als_ids);
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index f10fa2abfe72..a47591e1bad9 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -61,6 +61,7 @@ static int prox_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct prox_state *prox_state = iio_priv(indio_dev);
+ struct hid_sensor_hub_device *hsdev;
int report_id = -1;
u32 address;
int ret_type;
@@ -75,6 +76,7 @@ static int prox_read_raw(struct iio_dev *indio_dev,
report_id = prox_state->prox_attr.report_id;
min = prox_state->prox_attr.logical_minimum;
address = HID_USAGE_SENSOR_HUMAN_PRESENCE;
+ hsdev = prox_state->common_attributes.hsdev;
break;
default:
report_id = -1;
@@ -84,11 +86,8 @@ static int prox_read_raw(struct iio_dev *indio_dev,
hid_sensor_power_state(&prox_state->common_attributes,
true);
*val = sensor_hub_input_attr_get_raw_value(
- prox_state->common_attributes.hsdev,
- HID_USAGE_SENSOR_PROX, address,
- report_id,
- SENSOR_HUB_SYNC,
- min < 0);
+ hsdev, hsdev->usage, address, report_id,
+ SENSOR_HUB_SYNC, min < 0);
hid_sensor_power_state(&prox_state->common_attributes,
false);
} else {
@@ -191,10 +190,16 @@ static int prox_capture_sample(struct hid_sensor_hub_device *hsdev,
switch (usage_id) {
case HID_USAGE_SENSOR_HUMAN_PRESENCE:
- prox_state->human_presence = *(u32 *)raw_data;
- ret = 0;
- break;
- default:
+ switch (raw_len) {
+ case 1:
+ prox_state->human_presence = *(u8 *)raw_data;
+ return 0;
+ case 4:
+ prox_state->human_presence = *(u32 *)raw_data;
+ return 0;
+ default:
+ break;
+ }
break;
}
@@ -244,7 +249,7 @@ static int hid_prox_probe(struct platform_device *pdev)
prox_state->common_attributes.hsdev = hsdev;
prox_state->common_attributes.pdev = pdev;
- ret = hid_sensor_parse_common_attributes(hsdev, HID_USAGE_SENSOR_PROX,
+ ret = hid_sensor_parse_common_attributes(hsdev, hsdev->usage,
&prox_state->common_attributes,
prox_sensitivity_addresses,
ARRAY_SIZE(prox_sensitivity_addresses));
@@ -262,7 +267,7 @@ static int hid_prox_probe(struct platform_device *pdev)
ret = prox_parse_report(pdev, hsdev,
(struct iio_chan_spec *)indio_dev->channels,
- HID_USAGE_SENSOR_PROX, prox_state);
+ hsdev->usage, prox_state);
if (ret) {
dev_err(&pdev->dev, "failed to setup attributes\n");
return ret;
@@ -291,8 +296,8 @@ static int hid_prox_probe(struct platform_device *pdev)
prox_state->callbacks.send_event = prox_proc_event;
prox_state->callbacks.capture_sample = prox_capture_sample;
prox_state->callbacks.pdev = pdev;
- ret = sensor_hub_register_callback(hsdev, HID_USAGE_SENSOR_PROX,
- &prox_state->callbacks);
+ ret = sensor_hub_register_callback(hsdev, hsdev->usage,
+ &prox_state->callbacks);
if (ret < 0) {
dev_err(&pdev->dev, "callback reg failed\n");
goto error_iio_unreg;
@@ -314,7 +319,7 @@ static int hid_prox_remove(struct platform_device *pdev)
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
struct prox_state *prox_state = iio_priv(indio_dev);
- sensor_hub_remove_callback(hsdev, HID_USAGE_SENSOR_PROX);
+ sensor_hub_remove_callback(hsdev, hsdev->usage);
iio_device_unregister(indio_dev);
hid_sensor_remove_trigger(indio_dev, &prox_state->common_attributes);
@@ -326,6 +331,10 @@ static const struct platform_device_id hid_prox_ids[] = {
/* Format: HID-SENSOR-usage_id_in_hex_lowercase */
.name = "HID-SENSOR-200011",
},
+ {
+ /* Format: HID-SENSOR-tag-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-LISS-0226",
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(platform, hid_prox_ids);
diff --git a/drivers/iio/light/max44009.c b/drivers/iio/light/max44009.c
index 801e5a0ad496..3dadace09fe2 100644
--- a/drivers/iio/light/max44009.c
+++ b/drivers/iio/light/max44009.c
@@ -487,8 +487,7 @@ static irqreturn_t max44009_threaded_irq_handler(int irq, void *p)
return IRQ_NONE;
}
-static int max44009_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int max44009_probe(struct i2c_client *client)
{
struct max44009_data *data;
struct iio_dev *indio_dev;
@@ -538,7 +537,7 @@ static struct i2c_driver max44009_driver = {
.driver = {
.name = MAX44009_DRV_NAME,
},
- .probe = max44009_probe,
+ .probe_new = max44009_probe,
.id_table = max44009_id,
};
module_i2c_driver(max44009_driver);
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index d0e42b73203a..f2f55239a072 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -11,69 +11,63 @@
* Amit Kucheria <amit.kucheria@verdurent.com>
*/
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/property.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/sched.h>
+#include <linux/math.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/delay.h>
#include <linux/pm.h>
-#include <linux/err.h>
+#include <linux/property.h>
+#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/iio/events.h>
-#include <linux/platform_data/tsl2563.h>
/* Use this many bits for fraction part. */
#define ADC_FRAC_BITS 14
/* Given number of 1/10000's in ADC_FRAC_BITS precision. */
-#define FRAC10K(f) (((f) * (1L << (ADC_FRAC_BITS))) / (10000))
+#define FRAC10K(f) (((f) * BIT(ADC_FRAC_BITS)) / (10000))
/* Bits used for fraction in calibration coefficients.*/
#define CALIB_FRAC_BITS 10
-/* 0.5 in CALIB_FRAC_BITS precision */
-#define CALIB_FRAC_HALF (1 << (CALIB_FRAC_BITS - 1))
-/* Make a fraction from a number n that was multiplied with b. */
-#define CALIB_FRAC(n, b) (((n) << CALIB_FRAC_BITS) / (b))
/* Decimal 10^(digits in sysfs presentation) */
#define CALIB_BASE_SYSFS 1000
-#define TSL2563_CMD 0x80
-#define TSL2563_CLEARINT 0x40
+#define TSL2563_CMD BIT(7)
+#define TSL2563_CLEARINT BIT(6)
#define TSL2563_REG_CTRL 0x00
#define TSL2563_REG_TIMING 0x01
-#define TSL2563_REG_LOWLOW 0x02 /* data0 low threshold, 2 bytes */
-#define TSL2563_REG_LOWHIGH 0x03
-#define TSL2563_REG_HIGHLOW 0x04 /* data0 high threshold, 2 bytes */
-#define TSL2563_REG_HIGHHIGH 0x05
+#define TSL2563_REG_LOW 0x02 /* data0 low threshold, 2 bytes */
+#define TSL2563_REG_HIGH 0x04 /* data0 high threshold, 2 bytes */
#define TSL2563_REG_INT 0x06
#define TSL2563_REG_ID 0x0a
-#define TSL2563_REG_DATA0LOW 0x0c /* broadband sensor value, 2 bytes */
-#define TSL2563_REG_DATA0HIGH 0x0d
-#define TSL2563_REG_DATA1LOW 0x0e /* infrared sensor value, 2 bytes */
-#define TSL2563_REG_DATA1HIGH 0x0f
+#define TSL2563_REG_DATA0 0x0c /* broadband sensor value, 2 bytes */
+#define TSL2563_REG_DATA1 0x0e /* infrared sensor value, 2 bytes */
#define TSL2563_CMD_POWER_ON 0x03
#define TSL2563_CMD_POWER_OFF 0x00
-#define TSL2563_CTRL_POWER_MASK 0x03
+#define TSL2563_CTRL_POWER_MASK GENMASK(1, 0)
#define TSL2563_TIMING_13MS 0x00
#define TSL2563_TIMING_100MS 0x01
#define TSL2563_TIMING_400MS 0x02
-#define TSL2563_TIMING_MASK 0x03
+#define TSL2563_TIMING_MASK GENMASK(1, 0)
#define TSL2563_TIMING_GAIN16 0x10
#define TSL2563_TIMING_GAIN1 0x00
#define TSL2563_INT_DISABLED 0x00
#define TSL2563_INT_LEVEL 0x10
-#define TSL2563_INT_PERSIST(n) ((n) & 0x0F)
+#define TSL2563_INT_MASK GENMASK(5, 4)
+#define TSL2563_INT_PERSIST(n) ((n) & GENMASK(3, 0))
struct tsl2563_gainlevel_coeff {
u8 gaintime;
@@ -161,24 +155,16 @@ static int tsl2563_configure(struct tsl2563_chip *chip)
chip->gainlevel->gaintime);
if (ret)
goto error_ret;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2563_CMD | TSL2563_REG_HIGHLOW,
- chip->high_thres & 0xFF);
- if (ret)
- goto error_ret;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2563_CMD | TSL2563_REG_HIGHHIGH,
- (chip->high_thres >> 8) & 0xFF);
+ ret = i2c_smbus_write_word_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_HIGH,
+ chip->high_thres);
if (ret)
goto error_ret;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2563_CMD | TSL2563_REG_LOWLOW,
- chip->low_thres & 0xFF);
+ ret = i2c_smbus_write_word_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_LOW,
+ chip->low_thres);
if (ret)
goto error_ret;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2563_CMD | TSL2563_REG_LOWHIGH,
- (chip->low_thres >> 8) & 0xFF);
/*
* Interrupt register is automatically written anyway if it is relevant
* so is not here.
@@ -223,6 +209,24 @@ static int tsl2563_read_id(struct tsl2563_chip *chip, u8 *id)
return 0;
}
+static int tsl2563_configure_irq(struct tsl2563_chip *chip, bool enable)
+{
+ int ret;
+
+ chip->intr &= ~TSL2563_INT_MASK;
+ if (enable)
+ chip->intr |= TSL2563_INT_LEVEL;
+
+ ret = i2c_smbus_write_byte_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_INT,
+ chip->intr);
+ if (ret < 0)
+ return ret;
+
+ chip->int_enabled = enable;
+ return 0;
+}
+
/*
* "Normalized" ADC value is one obtained with 400ms of integration time and
* 16x gain. This function returns the number of bits of shift needed to
@@ -325,13 +329,13 @@ static int tsl2563_get_adc(struct tsl2563_chip *chip)
while (retry) {
ret = i2c_smbus_read_word_data(client,
- TSL2563_CMD | TSL2563_REG_DATA0LOW);
+ TSL2563_CMD | TSL2563_REG_DATA0);
if (ret < 0)
goto out;
adc0 = ret;
ret = i2c_smbus_read_word_data(client,
- TSL2563_CMD | TSL2563_REG_DATA1LOW);
+ TSL2563_CMD | TSL2563_REG_DATA1);
if (ret < 0)
goto out;
adc1 = ret;
@@ -352,12 +356,12 @@ out:
static inline int tsl2563_calib_to_sysfs(u32 calib)
{
- return (int) (((calib * CALIB_BASE_SYSFS) +
- CALIB_FRAC_HALF) >> CALIB_FRAC_BITS);
+ return (int)DIV_ROUND_CLOSEST(calib * CALIB_BASE_SYSFS, BIT(CALIB_FRAC_BITS));
}
static inline u32 tsl2563_calib_from_sysfs(int value)
{
+ /* Make a fraction from a number n that was multiplied with b. */
return (((u32) value) << CALIB_FRAC_BITS) / CALIB_BASE_SYSFS;
}
@@ -584,20 +588,18 @@ static int tsl2563_write_thresh(struct iio_dev *indio_dev,
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
int ret;
- u8 address;
+
+ mutex_lock(&chip->lock);
if (dir == IIO_EV_DIR_RISING)
- address = TSL2563_REG_HIGHLOW;
+ ret = i2c_smbus_write_word_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_HIGH, val);
else
- address = TSL2563_REG_LOWLOW;
- mutex_lock(&chip->lock);
- ret = i2c_smbus_write_byte_data(chip->client, TSL2563_CMD | address,
- val & 0xFF);
+ ret = i2c_smbus_write_word_data(chip->client,
+ TSL2563_CMD | TSL2563_REG_LOW, val);
if (ret)
goto error_ret;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2563_CMD | (address + 1),
- (val >> 8) & 0xFF);
+
if (dir == IIO_EV_DIR_RISING)
chip->high_thres = val;
else
@@ -634,9 +636,7 @@ static int tsl2563_write_interrupt_config(struct iio_dev *indio_dev,
int ret = 0;
mutex_lock(&chip->lock);
- if (state && !(chip->intr & 0x30)) {
- chip->intr &= ~0x30;
- chip->intr |= 0x10;
+ if (state && !(chip->intr & TSL2563_INT_MASK)) {
/* ensure the chip is actually on */
cancel_delayed_work_sync(&chip->poweroff_work);
if (!tsl2563_get_power(chip)) {
@@ -647,18 +647,11 @@ static int tsl2563_write_interrupt_config(struct iio_dev *indio_dev,
if (ret)
goto out;
}
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2563_CMD | TSL2563_REG_INT,
- chip->intr);
- chip->int_enabled = true;
+ ret = tsl2563_configure_irq(chip, true);
}
- if (!state && (chip->intr & 0x30)) {
- chip->intr &= ~0x30;
- ret = i2c_smbus_write_byte_data(chip->client,
- TSL2563_CMD | TSL2563_REG_INT,
- chip->intr);
- chip->int_enabled = false;
+ if (!state && (chip->intr & TSL2563_INT_MASK)) {
+ ret = tsl2563_configure_irq(chip, false);
/* now the interrupt is not enabled, we can go to sleep */
schedule_delayed_work(&chip->poweroff_work, 5 * HZ);
}
@@ -682,7 +675,7 @@ static int tsl2563_read_interrupt_config(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- return !!(ret & 0x30);
+ return !!(ret & TSL2563_INT_MASK);
}
static const struct iio_info tsl2563_info_no_irq = {
@@ -701,13 +694,14 @@ static const struct iio_info tsl2563_info = {
static int tsl2563_probe(struct i2c_client *client)
{
+ struct device *dev = &client->dev;
struct iio_dev *indio_dev;
struct tsl2563_chip *chip;
- struct tsl2563_platform_data *pdata = client->dev.platform_data;
- int err = 0;
+ unsigned long irq_flags;
u8 id = 0;
+ int err;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
@@ -717,16 +711,12 @@ static int tsl2563_probe(struct i2c_client *client)
chip->client = client;
err = tsl2563_detect(chip);
- if (err) {
- dev_err(&client->dev, "detect error %d\n", -err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "detect error\n");
err = tsl2563_read_id(chip, &id);
- if (err) {
- dev_err(&client->dev, "read id error %d\n", -err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "read id error\n");
mutex_init(&chip->lock);
@@ -738,16 +728,10 @@ static int tsl2563_probe(struct i2c_client *client)
chip->calib0 = tsl2563_calib_from_sysfs(CALIB_BASE_SYSFS);
chip->calib1 = tsl2563_calib_from_sysfs(CALIB_BASE_SYSFS);
- if (pdata) {
- chip->cover_comp_gain = pdata->cover_comp_gain;
- } else {
- err = device_property_read_u32(&client->dev, "amstaos,cover-comp-gain",
- &chip->cover_comp_gain);
- if (err)
- chip->cover_comp_gain = 1;
- }
+ chip->cover_comp_gain = 1;
+ device_property_read_u32(dev, "amstaos,cover-comp-gain", &chip->cover_comp_gain);
- dev_info(&client->dev, "model %d, rev. %d\n", id >> 4, id & 0x0f);
+ dev_info(dev, "model %d, rev. %d\n", id >> 4, id & 0x0f);
indio_dev->name = client->name;
indio_dev->channels = tsl2563_channels;
indio_dev->num_channels = ARRAY_SIZE(tsl2563_channels);
@@ -759,23 +743,24 @@ static int tsl2563_probe(struct i2c_client *client)
indio_dev->info = &tsl2563_info_no_irq;
if (client->irq) {
- err = devm_request_threaded_irq(&client->dev, client->irq,
+ irq_flags = irq_get_trigger_type(client->irq);
+ if (irq_flags == IRQF_TRIGGER_NONE)
+ irq_flags = IRQF_TRIGGER_RISING;
+ irq_flags |= IRQF_ONESHOT;
+
+ err = devm_request_threaded_irq(dev, client->irq,
NULL,
&tsl2563_event_handler,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ irq_flags,
"tsl2563_event",
indio_dev);
- if (err) {
- dev_err(&client->dev, "irq request error %d\n", -err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "irq request error\n");
}
err = tsl2563_configure(chip);
- if (err) {
- dev_err(&client->dev, "configure error %d\n", -err);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "configure error\n");
INIT_DELAYED_WORK(&chip->poweroff_work, tsl2563_poweroff_work);
@@ -784,7 +769,7 @@ static int tsl2563_probe(struct i2c_client *client)
err = iio_device_register(indio_dev);
if (err) {
- dev_err(&client->dev, "iio registration error %d\n", -err);
+ dev_err_probe(dev, err, "iio registration error\n");
goto fail;
}
@@ -804,15 +789,13 @@ static void tsl2563_remove(struct i2c_client *client)
if (!chip->int_enabled)
cancel_delayed_work_sync(&chip->poweroff_work);
/* Ensure that interrupts are disabled - then flush any bottom halves */
- chip->intr &= ~0x30;
- i2c_smbus_write_byte_data(chip->client, TSL2563_CMD | TSL2563_REG_INT,
- chip->intr);
+ tsl2563_configure_irq(chip, false);
tsl2563_set_power(chip, 0);
}
static int tsl2563_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tsl2563_chip *chip = iio_priv(indio_dev);
int ret;
@@ -831,7 +814,7 @@ out:
static int tsl2563_resume(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tsl2563_chip *chip = iio_priv(indio_dev);
int ret;
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index cc1a2062e76d..6bdfce9747f9 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -60,8 +60,11 @@
#define VCNL4200_AL_CONF 0x00 /* Ambient light configuration */
#define VCNL4200_PS_CONF1 0x03 /* Proximity configuration */
+#define VCNL4040_PS_THDL_LM 0x06 /* Proximity threshold low */
+#define VCNL4040_PS_THDH_LM 0x07 /* Proximity threshold high */
#define VCNL4200_PS_DATA 0x08 /* Proximity data */
#define VCNL4200_AL_DATA 0x09 /* Ambient light data */
+#define VCNL4040_INT_FLAGS 0x0b /* Interrupt register */
#define VCNL4200_DEV_ID 0x0e /* Device ID, slave address and version */
#define VCNL4040_DEV_ID 0x0c /* Device ID and version */
@@ -78,6 +81,9 @@
#define VCNL4040_ALS_CONF_ALS_SHUTDOWN BIT(0)
#define VCNL4040_PS_CONF1_PS_SHUTDOWN BIT(0)
#define VCNL4040_PS_CONF2_PS_IT GENMASK(3, 1) /* Proximity integration time */
+#define VCNL4040_PS_CONF2_PS_INT GENMASK(9, 8) /* Proximity interrupt mode */
+#define VCNL4040_PS_IF_AWAY BIT(8) /* Proximity event cross low threshold */
+#define VCNL4040_PS_IF_CLOSE BIT(9) /* Proximity event cross high threshold */
/* Bit masks for interrupt registers. */
#define VCNL4010_INT_THR_SEL BIT(0) /* Select threshold interrupt source */
@@ -138,6 +144,7 @@ struct vcnl4000_data {
enum vcnl4000_device_ids id;
int rev;
int al_scale;
+ u8 ps_int; /* proximity interrupt mode */
const struct vcnl4000_chip_spec *chip_spec;
struct mutex vcnl4000_lock;
struct vcnl4200_channel vcnl4200_al;
@@ -150,11 +157,13 @@ struct vcnl4000_chip_spec {
struct iio_chan_spec const *channels;
const int num_channels;
const struct iio_info *info;
- bool irq_support;
+ const struct iio_buffer_setup_ops *buffer_setup_ops;
int (*init)(struct vcnl4000_data *data);
int (*measure_light)(struct vcnl4000_data *data, int *val);
int (*measure_proximity)(struct vcnl4000_data *data, int *val);
int (*set_power_state)(struct vcnl4000_data *data, bool on);
+ irqreturn_t (*irq_thread)(int irq, void *priv);
+ irqreturn_t (*trig_buffer_func)(int irq, void *priv);
};
static const struct i2c_device_id vcnl4000_id[] = {
@@ -254,6 +263,10 @@ static int vcnl4200_set_power_state(struct vcnl4000_data *data, bool on)
{
int ret;
+ /* Do not power down if interrupts are enabled */
+ if (!on && data->ps_int)
+ return 0;
+
ret = vcnl4000_write_als_enable(data, on);
if (ret < 0)
return ret;
@@ -295,6 +308,7 @@ static int vcnl4200_init(struct vcnl4000_data *data)
dev_dbg(&data->client->dev, "device id 0x%x", id);
data->rev = (ret >> 8) & 0xf;
+ data->ps_int = 0;
data->vcnl4200_al.reg = VCNL4200_AL_DATA;
data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
@@ -795,6 +809,64 @@ static int vcnl4010_write_event(struct iio_dev *indio_dev,
}
}
+static int vcnl4040_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = i2c_smbus_read_word_data(data->client,
+ VCNL4040_PS_THDH_LM);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ ret = i2c_smbus_read_word_data(data->client,
+ VCNL4040_PS_THDL_LM);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vcnl4040_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = i2c_smbus_write_word_data(data->client,
+ VCNL4040_PS_THDH_LM, val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ ret = i2c_smbus_write_word_data(data->client,
+ VCNL4040_PS_THDL_LM, val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
static bool vcnl4010_is_thr_enabled(struct vcnl4000_data *data)
{
int ret;
@@ -877,6 +949,86 @@ static int vcnl4010_write_event_config(struct iio_dev *indio_dev,
}
}
+static int vcnl4040_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ int ret;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
+ if (ret < 0)
+ return ret;
+
+ data->ps_int = FIELD_GET(VCNL4040_PS_CONF2_PS_INT, ret);
+
+ return (dir == IIO_EV_DIR_RISING) ?
+ FIELD_GET(VCNL4040_PS_IF_AWAY, ret) :
+ FIELD_GET(VCNL4040_PS_IF_CLOSE, ret);
+}
+
+static int vcnl4040_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, int state)
+{
+ int ret;
+ u16 val, mask;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->vcnl4000_lock);
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
+ if (ret < 0)
+ goto out;
+
+ if (dir == IIO_EV_DIR_RISING)
+ mask = VCNL4040_PS_IF_AWAY;
+ else
+ mask = VCNL4040_PS_IF_CLOSE;
+
+ val = state ? (ret | mask) : (ret & ~mask);
+
+ data->ps_int = FIELD_GET(VCNL4040_PS_CONF2_PS_INT, val);
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1, val);
+
+out:
+ mutex_unlock(&data->vcnl4000_lock);
+ data->chip_spec->set_power_state(data, data->ps_int != 0);
+
+ return ret;
+}
+
+static irqreturn_t vcnl4040_irq_thread(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_read_word_data(data->client, VCNL4040_INT_FLAGS);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ if (ret & VCNL4040_PS_IF_CLOSE) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ if (ret & VCNL4040_PS_IF_AWAY) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ return IRQ_HANDLED;
+}
+
static ssize_t vcnl4000_read_near_level(struct iio_dev *indio_dev,
uintptr_t priv,
const struct iio_chan_spec *chan,
@@ -887,6 +1039,134 @@ static ssize_t vcnl4000_read_near_level(struct iio_dev *indio_dev,
return sprintf(buf, "%u\n", data->near_level);
}
+static irqreturn_t vcnl4010_irq_thread(int irq, void *p)
+{
+ struct iio_dev *indio_dev = p;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ unsigned long isr;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_ISR);
+ if (ret < 0)
+ goto end;
+
+ isr = ret;
+
+ if (isr & VCNL4010_INT_THR) {
+ if (test_bit(VCNL4010_INT_THR_LOW, &isr)) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ 1,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ if (test_bit(VCNL4010_INT_THR_HIGH, &isr)) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(
+ IIO_PROXIMITY,
+ 1,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ }
+
+ i2c_smbus_write_byte_data(data->client, VCNL4010_ISR,
+ isr & VCNL4010_INT_THR);
+ }
+
+ if (isr & VCNL4010_INT_DRDY && iio_buffer_enabled(indio_dev))
+ iio_trigger_poll_chained(indio_dev->trig);
+
+end:
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ const unsigned long *active_scan_mask = indio_dev->active_scan_mask;
+ u16 buffer[8] __aligned(8) = {0}; /* 1x16-bit + naturally aligned ts */
+ bool data_read = false;
+ unsigned long isr;
+ int val = 0;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(data->client, VCNL4010_ISR);
+ if (ret < 0)
+ goto end;
+
+ isr = ret;
+
+ if (test_bit(0, active_scan_mask)) {
+ if (test_bit(VCNL4010_INT_PROXIMITY, &isr)) {
+ ret = vcnl4000_read_data(data,
+ VCNL4000_PS_RESULT_HI,
+ &val);
+ if (ret < 0)
+ goto end;
+
+ buffer[0] = val;
+ data_read = true;
+ }
+ }
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_ISR,
+ isr & VCNL4010_INT_DRDY);
+ if (ret < 0)
+ goto end;
+
+ if (!data_read)
+ goto end;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buffer,
+ iio_get_time_ns(indio_dev));
+
+end:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int vcnl4010_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret;
+ int cmd;
+
+ /* Do not enable the buffer if we are already capturing events. */
+ if (vcnl4010_is_in_periodic_mode(data))
+ return -EBUSY;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL,
+ VCNL4010_INT_PROX_EN);
+ if (ret < 0)
+ return ret;
+
+ cmd = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN;
+ return i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, cmd);
+}
+
+static int vcnl4010_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct vcnl4000_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, 0);
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0);
+}
+
+static const struct iio_buffer_setup_ops vcnl4010_buffer_ops = {
+ .postenable = &vcnl4010_buffer_postenable,
+ .predisable = &vcnl4010_buffer_predisable,
+};
+
static const struct iio_chan_spec_ext_info vcnl4000_ext_info[] = {
{
.name = "nearlevel",
@@ -912,6 +1192,18 @@ static const struct iio_event_spec vcnl4000_event_spec[] = {
}
};
+static const struct iio_event_spec vcnl4040_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
static const struct iio_chan_spec vcnl4000_channels[] = {
{
.type = IIO_LIGHT,
@@ -960,6 +1252,8 @@ static const struct iio_chan_spec vcnl4040_channels[] = {
BIT(IIO_CHAN_INFO_INT_TIME),
.info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME),
.ext_info = vcnl4000_ext_info,
+ .event_spec = vcnl4040_event_spec,
+ .num_event_specs = ARRAY_SIZE(vcnl4040_event_spec),
}
};
@@ -980,6 +1274,10 @@ static const struct iio_info vcnl4010_info = {
static const struct iio_info vcnl4040_info = {
.read_raw = vcnl4000_read_raw,
.write_raw = vcnl4040_write_raw,
+ .read_event_value = vcnl4040_read_event,
+ .write_event_value = vcnl4040_write_event,
+ .read_event_config = vcnl4040_read_event_config,
+ .write_event_config = vcnl4040_write_event_config,
.read_avail = vcnl4040_read_avail,
};
@@ -993,7 +1291,6 @@ static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
.channels = vcnl4000_channels,
.num_channels = ARRAY_SIZE(vcnl4000_channels),
.info = &vcnl4000_info,
- .irq_support = false,
},
[VCNL4010] = {
.prod = "VCNL4010/4020",
@@ -1004,7 +1301,9 @@ static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
.channels = vcnl4010_channels,
.num_channels = ARRAY_SIZE(vcnl4010_channels),
.info = &vcnl4010_info,
- .irq_support = true,
+ .irq_thread = vcnl4010_irq_thread,
+ .trig_buffer_func = vcnl4010_trigger_handler,
+ .buffer_setup_ops = &vcnl4010_buffer_ops,
},
[VCNL4040] = {
.prod = "VCNL4040",
@@ -1015,7 +1314,7 @@ static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
.channels = vcnl4040_channels,
.num_channels = ARRAY_SIZE(vcnl4040_channels),
.info = &vcnl4040_info,
- .irq_support = false,
+ .irq_thread = vcnl4040_irq_thread,
},
[VCNL4200] = {
.prod = "VCNL4200",
@@ -1026,138 +1325,9 @@ static const struct vcnl4000_chip_spec vcnl4000_chip_spec_cfg[] = {
.channels = vcnl4000_channels,
.num_channels = ARRAY_SIZE(vcnl4000_channels),
.info = &vcnl4000_info,
- .irq_support = false,
},
};
-static irqreturn_t vcnl4010_irq_thread(int irq, void *p)
-{
- struct iio_dev *indio_dev = p;
- struct vcnl4000_data *data = iio_priv(indio_dev);
- unsigned long isr;
- int ret;
-
- ret = i2c_smbus_read_byte_data(data->client, VCNL4010_ISR);
- if (ret < 0)
- goto end;
-
- isr = ret;
-
- if (isr & VCNL4010_INT_THR) {
- if (test_bit(VCNL4010_INT_THR_LOW, &isr)) {
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(
- IIO_PROXIMITY,
- 1,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_FALLING),
- iio_get_time_ns(indio_dev));
- }
-
- if (test_bit(VCNL4010_INT_THR_HIGH, &isr)) {
- iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(
- IIO_PROXIMITY,
- 1,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_RISING),
- iio_get_time_ns(indio_dev));
- }
-
- i2c_smbus_write_byte_data(data->client, VCNL4010_ISR,
- isr & VCNL4010_INT_THR);
- }
-
- if (isr & VCNL4010_INT_DRDY && iio_buffer_enabled(indio_dev))
- iio_trigger_poll_chained(indio_dev->trig);
-
-end:
- return IRQ_HANDLED;
-}
-
-static irqreturn_t vcnl4010_trigger_handler(int irq, void *p)
-{
- struct iio_poll_func *pf = p;
- struct iio_dev *indio_dev = pf->indio_dev;
- struct vcnl4000_data *data = iio_priv(indio_dev);
- const unsigned long *active_scan_mask = indio_dev->active_scan_mask;
- u16 buffer[8] __aligned(8) = {0}; /* 1x16-bit + naturally aligned ts */
- bool data_read = false;
- unsigned long isr;
- int val = 0;
- int ret;
-
- ret = i2c_smbus_read_byte_data(data->client, VCNL4010_ISR);
- if (ret < 0)
- goto end;
-
- isr = ret;
-
- if (test_bit(0, active_scan_mask)) {
- if (test_bit(VCNL4010_INT_PROXIMITY, &isr)) {
- ret = vcnl4000_read_data(data,
- VCNL4000_PS_RESULT_HI,
- &val);
- if (ret < 0)
- goto end;
-
- buffer[0] = val;
- data_read = true;
- }
- }
-
- ret = i2c_smbus_write_byte_data(data->client, VCNL4010_ISR,
- isr & VCNL4010_INT_DRDY);
- if (ret < 0)
- goto end;
-
- if (!data_read)
- goto end;
-
- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
- iio_get_time_ns(indio_dev));
-
-end:
- iio_trigger_notify_done(indio_dev->trig);
- return IRQ_HANDLED;
-}
-
-static int vcnl4010_buffer_postenable(struct iio_dev *indio_dev)
-{
- struct vcnl4000_data *data = iio_priv(indio_dev);
- int ret;
- int cmd;
-
- /* Do not enable the buffer if we are already capturing events. */
- if (vcnl4010_is_in_periodic_mode(data))
- return -EBUSY;
-
- ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL,
- VCNL4010_INT_PROX_EN);
- if (ret < 0)
- return ret;
-
- cmd = VCNL4000_SELF_TIMED_EN | VCNL4000_PROX_EN;
- return i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, cmd);
-}
-
-static int vcnl4010_buffer_predisable(struct iio_dev *indio_dev)
-{
- struct vcnl4000_data *data = iio_priv(indio_dev);
- int ret;
-
- ret = i2c_smbus_write_byte_data(data->client, VCNL4010_INT_CTRL, 0);
- if (ret < 0)
- return ret;
-
- return i2c_smbus_write_byte_data(data->client, VCNL4000_COMMAND, 0);
-}
-
-static const struct iio_buffer_setup_ops vcnl4010_buffer_ops = {
- .postenable = &vcnl4010_buffer_postenable,
- .predisable = &vcnl4010_buffer_predisable,
-};
-
static const struct iio_trigger_ops vcnl4010_trigger_ops = {
.validate_device = iio_trigger_validate_own_device,
};
@@ -1214,22 +1384,25 @@ static int vcnl4000_probe(struct i2c_client *client)
indio_dev->name = VCNL4000_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- if (client->irq && data->chip_spec->irq_support) {
+ if (data->chip_spec->trig_buffer_func &&
+ data->chip_spec->buffer_setup_ops) {
ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev,
NULL,
- vcnl4010_trigger_handler,
- &vcnl4010_buffer_ops);
+ data->chip_spec->trig_buffer_func,
+ data->chip_spec->buffer_setup_ops);
if (ret < 0) {
dev_err(&client->dev,
"unable to setup iio triggered buffer\n");
return ret;
}
+ }
+ if (client->irq && data->chip_spec->irq_thread) {
ret = devm_request_threaded_irq(&client->dev, client->irq,
- NULL, vcnl4010_irq_thread,
+ NULL, data->chip_spec->irq_thread,
IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
- "vcnl4010_irq",
+ "vcnl4000_irq",
indio_dev);
if (ret < 0) {
dev_err(&client->dev, "irq request failed\n");
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index b91fc5e6a26e..38532d840f2a 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -119,7 +119,7 @@ config IIO_ST_MAGN_3AXIS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics magnetometers:
- LSM303DLHC, LSM303DLM, LIS3MDL.
+ LSM303C, LSM303DLHC, LSM303DLM, LIS3MDL.
Also need to enable at least one of I2C and SPI interface drivers
below.
@@ -208,6 +208,18 @@ config SENSORS_RM3100_SPI
To compile this driver as a module, choose M here: the module
will be called rm3100-spi.
+config TI_TMAG5273
+ tristate "TI TMAG5273 Low-Power Linear 3D Hall-Effect Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here to add support for the TI TMAG5273 Low-Power
+ Linear 3D Hall-Effect Sensor.
+
+ This driver can also be compiled as a module.
+ To compile this driver as a module, choose M here: the module
+ will be called tmag5273.
+
config YAMAHA_YAS530
tristate "Yamaha YAS530 family of 3-Axis Magnetometers (I2C)"
depends on I2C
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index b9f45b7fafc3..b1c784ea71c8 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -29,4 +29,6 @@ obj-$(CONFIG_SENSORS_RM3100) += rm3100-core.o
obj-$(CONFIG_SENSORS_RM3100_I2C) += rm3100-i2c.o
obj-$(CONFIG_SENSORS_RM3100_SPI) += rm3100-spi.o
+obj-$(CONFIG_TI_TMAG5273) += tmag5273.o
+
obj-$(CONFIG_YAMAHA_YAS530) += yamaha-yas530.o
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 785b7f7b8b06..89945984d966 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -22,6 +22,7 @@
#define LIS2MDL_MAGN_DEV_NAME "lis2mdl"
#define LSM9DS1_MAGN_DEV_NAME "lsm9ds1_magn"
#define IIS2MDC_MAGN_DEV_NAME "iis2mdc"
+#define LSM303C_MAGN_DEV_NAME "lsm303c_magn"
#ifdef CONFIG_IIO_BUFFER
int st_magn_allocate_ring(struct iio_dev *indio_dev);
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index e2fd233b3626..8faa7409d9e1 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -305,6 +305,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.sensors_supported = {
[0] = LIS3MDL_MAGN_DEV_NAME,
[1] = LSM9DS1_MAGN_DEV_NAME,
+ [2] = LSM303C_MAGN_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_magn_2_16bit_channels,
.odr = {
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index b4098d3b3813..cc0e0e94b129 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -50,6 +50,10 @@ static const struct of_device_id st_magn_of_match[] = {
.compatible = "st,iis2mdc",
.data = IIS2MDC_MAGN_DEV_NAME,
},
+ {
+ .compatible = "st,lsm303c-magn",
+ .data = LSM303C_MAGN_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_magn_of_match);
@@ -97,6 +101,7 @@ static const struct i2c_device_id st_magn_id_table[] = {
{ LIS2MDL_MAGN_DEV_NAME },
{ LSM9DS1_MAGN_DEV_NAME },
{ IIS2MDC_MAGN_DEV_NAME },
+ { LSM303C_MAGN_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_magn_id_table);
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 6ddc4318564a..f203e1f87eec 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -45,6 +45,10 @@ static const struct of_device_id st_magn_of_match[] = {
.compatible = "st,iis2mdc",
.data = IIS2MDC_MAGN_DEV_NAME,
},
+ {
+ .compatible = "st,lsm303c-magn",
+ .data = LSM303C_MAGN_DEV_NAME,
+ },
{}
};
MODULE_DEVICE_TABLE(of, st_magn_of_match);
@@ -89,6 +93,7 @@ static const struct spi_device_id st_magn_id_table[] = {
{ LIS2MDL_MAGN_DEV_NAME },
{ LSM9DS1_MAGN_DEV_NAME },
{ IIS2MDC_MAGN_DEV_NAME },
+ { LSM303C_MAGN_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_magn_id_table);
diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c
new file mode 100644
index 000000000000..28bb7efe8df8
--- /dev/null
+++ b/drivers/iio/magnetometer/tmag5273.c
@@ -0,0 +1,743 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the TI TMAG5273 Low-Power Linear 3D Hall-Effect Sensor
+ *
+ * Copyright (C) 2022 WolfVision GmbH
+ *
+ * Author: Gerald Loacker <gerald.loacker@wolfvision.net>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define TMAG5273_DEVICE_CONFIG_1 0x00
+#define TMAG5273_DEVICE_CONFIG_2 0x01
+#define TMAG5273_SENSOR_CONFIG_1 0x02
+#define TMAG5273_SENSOR_CONFIG_2 0x03
+#define TMAG5273_X_THR_CONFIG 0x04
+#define TMAG5273_Y_THR_CONFIG 0x05
+#define TMAG5273_Z_THR_CONFIG 0x06
+#define TMAG5273_T_CONFIG 0x07
+#define TMAG5273_INT_CONFIG_1 0x08
+#define TMAG5273_MAG_GAIN_CONFIG 0x09
+#define TMAG5273_MAG_OFFSET_CONFIG_1 0x0A
+#define TMAG5273_MAG_OFFSET_CONFIG_2 0x0B
+#define TMAG5273_I2C_ADDRESS 0x0C
+#define TMAG5273_DEVICE_ID 0x0D
+#define TMAG5273_MANUFACTURER_ID_LSB 0x0E
+#define TMAG5273_MANUFACTURER_ID_MSB 0x0F
+#define TMAG5273_T_MSB_RESULT 0x10
+#define TMAG5273_T_LSB_RESULT 0x11
+#define TMAG5273_X_MSB_RESULT 0x12
+#define TMAG5273_X_LSB_RESULT 0x13
+#define TMAG5273_Y_MSB_RESULT 0x14
+#define TMAG5273_Y_LSB_RESULT 0x15
+#define TMAG5273_Z_MSB_RESULT 0x16
+#define TMAG5273_Z_LSB_RESULT 0x17
+#define TMAG5273_CONV_STATUS 0x18
+#define TMAG5273_ANGLE_RESULT_MSB 0x19
+#define TMAG5273_ANGLE_RESULT_LSB 0x1A
+#define TMAG5273_MAGNITUDE_RESULT 0x1B
+#define TMAG5273_DEVICE_STATUS 0x1C
+#define TMAG5273_MAX_REG TMAG5273_DEVICE_STATUS
+
+#define TMAG5273_AUTOSLEEP_DELAY_MS 5000
+#define TMAG5273_MAX_AVERAGE 32
+
+/*
+ * bits in the TMAG5273_MANUFACTURER_ID_LSB / MSB register
+ * 16-bit unique manufacturer ID 0x49 / 0x54 = "TI"
+ */
+#define TMAG5273_MANUFACTURER_ID 0x5449
+
+/* bits in the TMAG5273_DEVICE_CONFIG_1 register */
+#define TMAG5273_AVG_MODE_MASK GENMASK(4, 2)
+#define TMAG5273_AVG_1_MODE FIELD_PREP(TMAG5273_AVG_MODE_MASK, 0)
+#define TMAG5273_AVG_2_MODE FIELD_PREP(TMAG5273_AVG_MODE_MASK, 1)
+#define TMAG5273_AVG_4_MODE FIELD_PREP(TMAG5273_AVG_MODE_MASK, 2)
+#define TMAG5273_AVG_8_MODE FIELD_PREP(TMAG5273_AVG_MODE_MASK, 3)
+#define TMAG5273_AVG_16_MODE FIELD_PREP(TMAG5273_AVG_MODE_MASK, 4)
+#define TMAG5273_AVG_32_MODE FIELD_PREP(TMAG5273_AVG_MODE_MASK, 5)
+
+/* bits in the TMAG5273_DEVICE_CONFIG_2 register */
+#define TMAG5273_OP_MODE_MASK GENMASK(1, 0)
+#define TMAG5273_OP_MODE_STANDBY FIELD_PREP(TMAG5273_OP_MODE_MASK, 0)
+#define TMAG5273_OP_MODE_SLEEP FIELD_PREP(TMAG5273_OP_MODE_MASK, 1)
+#define TMAG5273_OP_MODE_CONT FIELD_PREP(TMAG5273_OP_MODE_MASK, 2)
+#define TMAG5273_OP_MODE_WAKEUP FIELD_PREP(TMAG5273_OP_MODE_MASK, 3)
+
+/* bits in the TMAG5273_SENSOR_CONFIG_1 register */
+#define TMAG5273_MAG_CH_EN_MASK GENMASK(7, 4)
+#define TMAG5273_MAG_CH_EN_X_Y_Z 7
+
+/* bits in the TMAG5273_SENSOR_CONFIG_2 register */
+#define TMAG5273_Z_RANGE_MASK BIT(0)
+#define TMAG5273_X_Y_RANGE_MASK BIT(1)
+#define TMAG5273_ANGLE_EN_MASK GENMASK(3, 2)
+#define TMAG5273_ANGLE_EN_OFF 0
+#define TMAG5273_ANGLE_EN_X_Y 1
+#define TMAG5273_ANGLE_EN_Y_Z 2
+#define TMAG5273_ANGLE_EN_X_Z 3
+
+/* bits in the TMAG5273_T_CONFIG register */
+#define TMAG5273_T_CH_EN BIT(0)
+
+/* bits in the TMAG5273_DEVICE_ID register */
+#define TMAG5273_VERSION_MASK GENMASK(1, 0)
+
+/* bits in the TMAG5273_CONV_STATUS register */
+#define TMAG5273_CONV_STATUS_COMPLETE BIT(0)
+
+enum tmag5273_channels {
+ TEMPERATURE = 0,
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z,
+ ANGLE,
+ MAGNITUDE,
+};
+
+enum tmag5273_scale_index {
+ MAGN_RANGE_LOW = 0,
+ MAGN_RANGE_HIGH,
+ MAGN_RANGE_NUM
+};
+
+/* state container for the TMAG5273 driver */
+struct tmag5273_data {
+ struct device *dev;
+ unsigned int devid;
+ unsigned int version;
+ char name[16];
+ unsigned int conv_avg;
+ unsigned int scale;
+ enum tmag5273_scale_index scale_index;
+ unsigned int angle_measurement;
+ struct regmap *map;
+ struct regulator *vcc;
+
+ /*
+ * Locks the sensor for exclusive use during a measurement (which
+ * involves several register transactions so the regmap lock is not
+ * enough) so that measurements get serialized in a
+ * first-come-first-serve manner.
+ */
+ struct mutex lock;
+};
+
+static const char *const tmag5273_angle_names[] = { "off", "x-y", "y-z", "x-z" };
+
+/*
+ * Averaging enables additional sampling of the sensor data to reduce the noise
+ * effect, but also increases conversion time.
+ */
+static const unsigned int tmag5273_avg_table[] = {
+ 1, 2, 4, 8, 16, 32,
+};
+
+/*
+ * Magnetic resolution in Gauss for different TMAG5273 versions.
+ * Scale[Gauss] = Range[mT] * 1000 / 2^15 * 10, (1 mT = 10 Gauss)
+ * Only version 1 and 2 are valid, version 0 and 3 are reserved.
+ */
+static const struct iio_val_int_plus_micro tmag5273_scale[][MAGN_RANGE_NUM] = {
+ { { 0, 0 }, { 0, 0 } },
+ { { 0, 12200 }, { 0, 24400 } },
+ { { 0, 40600 }, { 0, 81200 } },
+ { { 0, 0 }, { 0, 0 } },
+};
+
+static int tmag5273_get_measure(struct tmag5273_data *data, s16 *t, s16 *x,
+ s16 *y, s16 *z, u16 *angle, u16 *magnitude)
+{
+ unsigned int status, val;
+ __be16 reg_data[4];
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ /*
+ * Max. conversion time is 2425 us in 32x averaging mode for all three
+ * channels. Since we are in continuous measurement mode, a measurement
+ * may already be there, so poll for completed measurement with
+ * timeout.
+ */
+ ret = regmap_read_poll_timeout(data->map, TMAG5273_CONV_STATUS, status,
+ status & TMAG5273_CONV_STATUS_COMPLETE,
+ 100, 10000);
+ if (ret) {
+ dev_err(data->dev, "timeout waiting for measurement\n");
+ goto out_unlock;
+ }
+
+ ret = regmap_bulk_read(data->map, TMAG5273_T_MSB_RESULT, reg_data,
+ sizeof(reg_data));
+ if (ret)
+ goto out_unlock;
+ *t = be16_to_cpu(reg_data[0]);
+ *x = be16_to_cpu(reg_data[1]);
+ *y = be16_to_cpu(reg_data[2]);
+ *z = be16_to_cpu(reg_data[3]);
+
+ ret = regmap_bulk_read(data->map, TMAG5273_ANGLE_RESULT_MSB,
+ &reg_data[0], sizeof(reg_data[0]));
+ if (ret)
+ goto out_unlock;
+ /*
+ * angle has 9 bits integer value and 4 bits fractional part
+ * 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0
+ * 0 0 0 a a a a a a a a a f f f f
+ */
+ *angle = be16_to_cpu(reg_data[0]);
+
+ ret = regmap_read(data->map, TMAG5273_MAGNITUDE_RESULT, &val);
+ if (ret < 0)
+ goto out_unlock;
+ *magnitude = val;
+
+out_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static int tmag5273_write_osr(struct tmag5273_data *data, int val)
+{
+ int i;
+
+ if (val == data->conv_avg)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(tmag5273_avg_table); i++) {
+ if (tmag5273_avg_table[i] == val)
+ break;
+ }
+ if (i == ARRAY_SIZE(tmag5273_avg_table))
+ return -EINVAL;
+ data->conv_avg = val;
+
+ return regmap_update_bits(data->map, TMAG5273_DEVICE_CONFIG_1,
+ TMAG5273_AVG_MODE_MASK,
+ FIELD_PREP(TMAG5273_AVG_MODE_MASK, i));
+}
+
+static int tmag5273_write_scale(struct tmag5273_data *data, int scale_micro)
+{
+ u32 value;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tmag5273_scale[0]); i++) {
+ if (tmag5273_scale[data->version][i].micro == scale_micro)
+ break;
+ }
+ if (i == ARRAY_SIZE(tmag5273_scale[0]))
+ return -EINVAL;
+ data->scale_index = i;
+
+ if (data->scale_index == MAGN_RANGE_LOW)
+ value = 0;
+ else
+ value = TMAG5273_Z_RANGE_MASK | TMAG5273_X_Y_RANGE_MASK;
+
+ return regmap_update_bits(data->map, TMAG5273_SENSOR_CONFIG_2,
+ TMAG5273_Z_RANGE_MASK | TMAG5273_X_Y_RANGE_MASK, value);
+}
+
+static int tmag5273_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct tmag5273_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = tmag5273_avg_table;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(tmag5273_avg_table);
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_MAGN:
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ *vals = (int *)tmag5273_scale[data->version];
+ *length = ARRAY_SIZE(tmag5273_scale[data->version]) *
+ MAGN_RANGE_NUM;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tmag5273_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val,
+ int *val2, long mask)
+{
+ struct tmag5273_data *data = iio_priv(indio_dev);
+ s16 t, x, y, z;
+ u16 angle, magnitude;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ case IIO_CHAN_INFO_RAW:
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = tmag5273_get_measure(data, &t, &x, &y, &z, &angle, &magnitude);
+ if (ret)
+ return ret;
+
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+
+ switch (chan->address) {
+ case TEMPERATURE:
+ *val = t;
+ return IIO_VAL_INT;
+ case AXIS_X:
+ *val = x;
+ return IIO_VAL_INT;
+ case AXIS_Y:
+ *val = y;
+ return IIO_VAL_INT;
+ case AXIS_Z:
+ *val = z;
+ return IIO_VAL_INT;
+ case ANGLE:
+ *val = angle;
+ return IIO_VAL_INT;
+ case MAGNITUDE:
+ *val = magnitude;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ /*
+ * Convert device specific value to millicelsius.
+ * Resolution from the sensor is 60.1 LSB/celsius and
+ * the reference value at 25 celsius is 17508 LSBs.
+ */
+ *val = 10000;
+ *val2 = 601;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_MAGN:
+ /* Magnetic resolution in uT */
+ *val = 0;
+ *val2 = tmag5273_scale[data->version]
+ [data->scale_index].micro;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_ANGL:
+ /*
+ * Angle is in degrees and has four fractional bits,
+ * therefore use 1/16 * pi/180 to convert to radians.
+ */
+ *val = 1000;
+ *val2 = 916732;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = -266314;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = data->conv_avg;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int tmag5273_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct tmag5273_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return tmag5273_write_osr(data, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_MAGN:
+ if (val)
+ return -EINVAL;
+ return tmag5273_write_scale(data, val2);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+#define TMAG5273_AXIS_CHANNEL(axis, index) \
+ { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ }, \
+ }
+
+static const struct iio_chan_spec tmag5273_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .address = TEMPERATURE,
+ .scan_index = TEMPERATURE,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ TMAG5273_AXIS_CHANNEL(X, AXIS_X),
+ TMAG5273_AXIS_CHANNEL(Y, AXIS_Y),
+ TMAG5273_AXIS_CHANNEL(Z, AXIS_Z),
+ {
+ .type = IIO_ANGL,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .address = ANGLE,
+ .scan_index = ANGLE,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_DISTANCE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_all =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available =
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .address = MAGNITUDE,
+ .scan_index = MAGNITUDE,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(6),
+};
+
+static const struct iio_info tmag5273_info = {
+ .read_avail = tmag5273_read_avail,
+ .read_raw = tmag5273_read_raw,
+ .write_raw = tmag5273_write_raw,
+};
+
+static bool tmag5273_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return reg >= TMAG5273_T_MSB_RESULT && reg <= TMAG5273_MAGNITUDE_RESULT;
+}
+
+static const struct regmap_config tmag5273_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = TMAG5273_MAX_REG,
+ .volatile_reg = tmag5273_volatile_reg,
+};
+
+static int tmag5273_set_operating_mode(struct tmag5273_data *data,
+ unsigned int val)
+{
+ return regmap_write(data->map, TMAG5273_DEVICE_CONFIG_2, val);
+}
+
+static void tmag5273_read_device_property(struct tmag5273_data *data)
+{
+ struct device *dev = data->dev;
+ const char *str;
+ int ret;
+
+ data->angle_measurement = TMAG5273_ANGLE_EN_X_Y;
+
+ ret = device_property_read_string(dev, "ti,angle-measurement", &str);
+ if (ret)
+ return;
+
+ ret = match_string(tmag5273_angle_names,
+ ARRAY_SIZE(tmag5273_angle_names), str);
+ if (ret >= 0)
+ data->angle_measurement = ret;
+}
+
+static void tmag5273_wake_up(struct tmag5273_data *data)
+{
+ int val;
+
+ /* Wake up the chip by sending a dummy I2C command */
+ regmap_read(data->map, TMAG5273_DEVICE_ID, &val);
+ /*
+ * Time to go to stand-by mode from sleep mode is 50us
+ * typically, during this time no I2C access is possible.
+ */
+ usleep_range(80, 200);
+}
+
+static int tmag5273_chip_init(struct tmag5273_data *data)
+{
+ int ret;
+
+ ret = regmap_write(data->map, TMAG5273_DEVICE_CONFIG_1,
+ TMAG5273_AVG_32_MODE);
+ if (ret)
+ return ret;
+ data->conv_avg = 32;
+
+ ret = regmap_write(data->map, TMAG5273_DEVICE_CONFIG_2,
+ TMAG5273_OP_MODE_CONT);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->map, TMAG5273_SENSOR_CONFIG_1,
+ FIELD_PREP(TMAG5273_MAG_CH_EN_MASK,
+ TMAG5273_MAG_CH_EN_X_Y_Z));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->map, TMAG5273_SENSOR_CONFIG_2,
+ FIELD_PREP(TMAG5273_ANGLE_EN_MASK,
+ data->angle_measurement));
+ if (ret)
+ return ret;
+ data->scale_index = MAGN_RANGE_LOW;
+
+ return regmap_write(data->map, TMAG5273_T_CONFIG, TMAG5273_T_CH_EN);
+}
+
+static int tmag5273_check_device_id(struct tmag5273_data *data)
+{
+ __le16 devid;
+ int val, ret;
+
+ ret = regmap_read(data->map, TMAG5273_DEVICE_ID, &val);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "failed to power on device\n");
+ data->version = FIELD_PREP(TMAG5273_VERSION_MASK, val);
+
+ ret = regmap_bulk_read(data->map, TMAG5273_MANUFACTURER_ID_LSB, &devid,
+ sizeof(devid));
+ if (ret)
+ return dev_err_probe(data->dev, ret, "failed to read device ID\n");
+ data->devid = le16_to_cpu(devid);
+
+ switch (data->devid) {
+ case TMAG5273_MANUFACTURER_ID:
+ /*
+ * The device name matches the orderable part number. 'x' stands
+ * for A, B, C or D devices, which have different I2C addresses.
+ * Versions 1 or 2 (0 and 3 is reserved) stands for different
+ * magnetic strengths.
+ */
+ snprintf(data->name, sizeof(data->name), "tmag5273x%1u", data->version);
+ if (data->version < 1 || data->version > 2)
+ dev_warn(data->dev, "Unsupported device %s\n", data->name);
+ return 0;
+ default:
+ /*
+ * Only print warning in case of unknown device ID to allow
+ * fallback compatible in device tree.
+ */
+ dev_warn(data->dev, "Unknown device ID 0x%x\n", data->devid);
+ return 0;
+ }
+}
+
+static void tmag5273_power_down(void *data)
+{
+ tmag5273_set_operating_mode(data, TMAG5273_OP_MODE_SLEEP);
+}
+
+static int tmag5273_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct tmag5273_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+ i2c_set_clientdata(i2c, indio_dev);
+
+ data->map = devm_regmap_init_i2c(i2c, &tmag5273_regmap_config);
+ if (IS_ERR(data->map))
+ return dev_err_probe(dev, PTR_ERR(data->map),
+ "failed to allocate register map\n");
+
+ mutex_init(&data->lock);
+
+ ret = devm_regulator_get_enable(dev, "vcc");
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable regulator\n");
+
+ tmag5273_wake_up(data);
+
+ ret = tmag5273_check_device_id(data);
+ if (ret)
+ return ret;
+
+ ret = tmag5273_set_operating_mode(data, TMAG5273_OP_MODE_CONT);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to power on device\n");
+
+ /*
+ * Register powerdown deferred callback which suspends the chip
+ * after module unloaded.
+ *
+ * TMAG5273 should be in SUSPEND mode in the two cases:
+ * 1) When driver is loaded, but we do not have any data or
+ * configuration requests to it (we are solving it using
+ * autosuspend feature).
+ * 2) When driver is unloaded and device is not used (devm action is
+ * used in this case).
+ */
+ ret = devm_add_action_or_reset(dev, tmag5273_power_down, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add powerdown action\n");
+
+ ret = pm_runtime_set_active(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_autosuspend_delay(dev, TMAG5273_AUTOSLEEP_DELAY_MS);
+ pm_runtime_use_autosuspend(dev);
+
+ tmag5273_read_device_property(data);
+
+ ret = tmag5273_chip_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init device\n");
+
+ indio_dev->info = &tmag5273_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->name = data->name;
+ indio_dev->channels = tmag5273_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tmag5273_channels);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "device register failed\n");
+
+ return 0;
+}
+
+static int tmag5273_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tmag5273_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = tmag5273_set_operating_mode(data, TMAG5273_OP_MODE_SLEEP);
+ if (ret)
+ dev_err(dev, "failed to power off device (%pe)\n", ERR_PTR(ret));
+
+ return ret;
+}
+
+static int tmag5273_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct tmag5273_data *data = iio_priv(indio_dev);
+ int ret;
+
+ tmag5273_wake_up(data);
+
+ ret = tmag5273_set_operating_mode(data, TMAG5273_OP_MODE_CONT);
+ if (ret)
+ dev_err(dev, "failed to power on device (%pe)\n", ERR_PTR(ret));
+
+ return ret;
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(tmag5273_pm_ops,
+ tmag5273_runtime_suspend, tmag5273_runtime_resume,
+ NULL);
+
+static const struct i2c_device_id tmag5273_id[] = {
+ { "tmag5273" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, tmag5273_id);
+
+static const struct of_device_id tmag5273_of_match[] = {
+ { .compatible = "ti,tmag5273" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tmag5273_of_match);
+
+static struct i2c_driver tmag5273_driver = {
+ .driver = {
+ .name = "tmag5273",
+ .of_match_table = tmag5273_of_match,
+ .pm = pm_ptr(&tmag5273_pm_ops),
+ },
+ .probe_new = tmag5273_probe,
+ .id_table = tmag5273_id,
+};
+module_i2c_driver(tmag5273_driver);
+
+MODULE_DESCRIPTION("TI TMAG5273 Low-Power Linear 3D Hall-Effect Sensor driver");
+MODULE_AUTHOR("Gerald Loacker <gerald.loacker@wolfvision.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index 550b75b7186f..a78acd3fb18f 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -13,8 +13,6 @@
#include <linux/iio/iio.h>
#include <linux/mutex.h>
-struct regulator;
-
#define MS5611_RESET 0x1e
#define MS5611_READ_ADC 0x00
#define MS5611_READ_PROM_WORD 0xA0
@@ -52,11 +50,9 @@ struct ms5611_state {
int (*compensate_temp_and_pressure)(struct ms5611_state *st, s32 *temp,
s32 *pressure);
- struct regulator *vdd;
};
int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
const char *name, int type);
-void ms5611_remove(struct iio_dev *indio_dev);
#endif /* _MS5611_H */
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index c564a1d6cafe..627497e61a63 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -380,40 +380,21 @@ static const struct iio_info ms5611_info = {
static int ms5611_init(struct iio_dev *indio_dev)
{
int ret;
- struct ms5611_state *st = iio_priv(indio_dev);
/* Enable attached regulator if any. */
- st->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd");
- if (IS_ERR(st->vdd))
- return PTR_ERR(st->vdd);
-
- ret = regulator_enable(st->vdd);
- if (ret) {
- dev_err(indio_dev->dev.parent,
- "failed to enable Vdd supply: %d\n", ret);
+ ret = devm_regulator_get_enable(indio_dev->dev.parent, "vdd");
+ if (ret)
return ret;
- }
ret = ms5611_reset(indio_dev);
if (ret < 0)
- goto err_regulator_disable;
+ return ret;
ret = ms5611_read_prom(indio_dev);
if (ret < 0)
- goto err_regulator_disable;
+ return ret;
return 0;
-
-err_regulator_disable:
- regulator_disable(st->vdd);
- return ret;
-}
-
-static void ms5611_fini(const struct iio_dev *indio_dev)
-{
- const struct ms5611_state *st = iio_priv(indio_dev);
-
- regulator_disable(st->vdd);
}
int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
@@ -453,37 +434,23 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
if (ret < 0)
return ret;
- ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
ms5611_trigger_handler, NULL);
if (ret < 0) {
dev_err(dev, "iio triggered buffer setup failed\n");
- goto err_fini;
+ return ret;
}
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(dev, indio_dev);
if (ret < 0) {
dev_err(dev, "unable to register iio device\n");
- goto err_buffer_cleanup;
+ return ret;
}
return 0;
-
-err_buffer_cleanup:
- iio_triggered_buffer_cleanup(indio_dev);
-err_fini:
- ms5611_fini(indio_dev);
- return ret;
}
EXPORT_SYMBOL_NS(ms5611_probe, IIO_MS5611);
-void ms5611_remove(struct iio_dev *indio_dev)
-{
- iio_device_unregister(indio_dev);
- iio_triggered_buffer_cleanup(indio_dev);
- ms5611_fini(indio_dev);
-}
-EXPORT_SYMBOL_NS(ms5611_remove, IIO_MS5611);
-
MODULE_AUTHOR("Tomasz Duszynski <tduszyns@gmail.com>");
MODULE_DESCRIPTION("MS5611 core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index caf882497656..e3c68a3ed76a 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -105,11 +105,6 @@ static int ms5611_i2c_probe(struct i2c_client *client)
return ms5611_probe(indio_dev, &client->dev, id->name, id->driver_data);
}
-static void ms5611_i2c_remove(struct i2c_client *client)
-{
- ms5611_remove(i2c_get_clientdata(client));
-}
-
static const struct of_device_id ms5611_i2c_matches[] = {
{ .compatible = "meas,ms5611" },
{ .compatible = "meas,ms5607" },
@@ -131,7 +126,6 @@ static struct i2c_driver ms5611_driver = {
},
.id_table = ms5611_id,
.probe_new = ms5611_i2c_probe,
- .remove = ms5611_i2c_remove,
};
module_i2c_driver(ms5611_driver);
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index a0a7205c9c3a..cc9d1f68c53c 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -107,11 +107,6 @@ static int ms5611_spi_probe(struct spi_device *spi)
spi_get_device_id(spi)->driver_data);
}
-static void ms5611_spi_remove(struct spi_device *spi)
-{
- ms5611_remove(spi_get_drvdata(spi));
-}
-
static const struct of_device_id ms5611_spi_matches[] = {
{ .compatible = "meas,ms5611" },
{ .compatible = "meas,ms5607" },
@@ -133,7 +128,6 @@ static struct spi_driver ms5611_driver = {
},
.id_table = ms5611_id,
.probe = ms5611_spi_probe,
- .remove = ms5611_spi_remove,
};
module_spi_driver(ms5611_driver);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 68721ff10255..308155937713 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -479,13 +479,20 @@ static int compare_netdev_and_ip(int ifindex_a, struct sockaddr *sa,
if (sa->sa_family != sb->sa_family)
return sa->sa_family - sb->sa_family;
- if (sa->sa_family == AF_INET)
- return memcmp((char *)&((struct sockaddr_in *)sa)->sin_addr,
- (char *)&((struct sockaddr_in *)sb)->sin_addr,
+ if (sa->sa_family == AF_INET &&
+ __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in)) {
+ return memcmp(&((struct sockaddr_in *)sa)->sin_addr,
+ &((struct sockaddr_in *)sb)->sin_addr,
sizeof(((struct sockaddr_in *)sa)->sin_addr));
+ }
+
+ if (sa->sa_family == AF_INET6 &&
+ __builtin_object_size(sa, 0) >= sizeof(struct sockaddr_in6)) {
+ return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
+ &((struct sockaddr_in6 *)sb)->sin6_addr);
+ }
- return ipv6_addr_cmp(&((struct sockaddr_in6 *)sa)->sin6_addr,
- &((struct sockaddr_in6 *)sb)->sin6_addr);
+ return -1;
}
static int cma_add_id_to_tree(struct rdma_id_private *node_id_priv)
@@ -2819,8 +2826,8 @@ int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
}
EXPORT_SYMBOL(rdma_set_min_rnr_timer);
-static void route_set_path_rec_inbound(struct cma_work *work,
- struct sa_path_rec *path_rec)
+static int route_set_path_rec_inbound(struct cma_work *work,
+ struct sa_path_rec *path_rec)
{
struct rdma_route *route = &work->id->id.route;
@@ -2828,14 +2835,15 @@ static void route_set_path_rec_inbound(struct cma_work *work,
route->path_rec_inbound =
kzalloc(sizeof(*route->path_rec_inbound), GFP_KERNEL);
if (!route->path_rec_inbound)
- return;
+ return -ENOMEM;
}
*route->path_rec_inbound = *path_rec;
+ return 0;
}
-static void route_set_path_rec_outbound(struct cma_work *work,
- struct sa_path_rec *path_rec)
+static int route_set_path_rec_outbound(struct cma_work *work,
+ struct sa_path_rec *path_rec)
{
struct rdma_route *route = &work->id->id.route;
@@ -2843,14 +2851,15 @@ static void route_set_path_rec_outbound(struct cma_work *work,
route->path_rec_outbound =
kzalloc(sizeof(*route->path_rec_outbound), GFP_KERNEL);
if (!route->path_rec_outbound)
- return;
+ return -ENOMEM;
}
*route->path_rec_outbound = *path_rec;
+ return 0;
}
static void cma_query_handler(int status, struct sa_path_rec *path_rec,
- int num_prs, void *context)
+ unsigned int num_prs, void *context)
{
struct cma_work *work = context;
struct rdma_route *route;
@@ -2865,13 +2874,15 @@ static void cma_query_handler(int status, struct sa_path_rec *path_rec,
if (!path_rec[i].flags || (path_rec[i].flags & IB_PATH_GMP))
*route->path_rec = path_rec[i];
else if (path_rec[i].flags & IB_PATH_INBOUND)
- route_set_path_rec_inbound(work, &path_rec[i]);
+ status = route_set_path_rec_inbound(work, &path_rec[i]);
else if (path_rec[i].flags & IB_PATH_OUTBOUND)
- route_set_path_rec_outbound(work, &path_rec[i]);
- }
- if (!route->path_rec) {
- status = -EINVAL;
- goto fail;
+ status = route_set_path_rec_outbound(work,
+ &path_rec[i]);
+ else
+ status = -EINVAL;
+
+ if (status)
+ goto fail;
}
route->num_pri_alt_paths = 1;
@@ -3541,121 +3552,6 @@ err:
return ret;
}
-static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- const struct sockaddr *dst_addr)
-{
- struct sockaddr_storage zero_sock = {};
-
- if (src_addr && src_addr->sa_family)
- return rdma_bind_addr(id, src_addr);
-
- /*
- * When the src_addr is not specified, automatically supply an any addr
- */
- zero_sock.ss_family = dst_addr->sa_family;
- if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
- struct sockaddr_in6 *src_addr6 =
- (struct sockaddr_in6 *)&zero_sock;
- struct sockaddr_in6 *dst_addr6 =
- (struct sockaddr_in6 *)dst_addr;
-
- src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
- if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
- id->route.addr.dev_addr.bound_dev_if =
- dst_addr6->sin6_scope_id;
- } else if (dst_addr->sa_family == AF_IB) {
- ((struct sockaddr_ib *)&zero_sock)->sib_pkey =
- ((struct sockaddr_ib *)dst_addr)->sib_pkey;
- }
- return rdma_bind_addr(id, (struct sockaddr *)&zero_sock);
-}
-
-/*
- * If required, resolve the source address for bind and leave the id_priv in
- * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
- * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
- * ignored.
- */
-static int resolve_prepare_src(struct rdma_id_private *id_priv,
- struct sockaddr *src_addr,
- const struct sockaddr *dst_addr)
-{
- int ret;
-
- memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
- /* For a well behaved ULP state will be RDMA_CM_IDLE */
- ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
- if (ret)
- goto err_dst;
- if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
- RDMA_CM_ADDR_QUERY))) {
- ret = -EINVAL;
- goto err_dst;
- }
- }
-
- if (cma_family(id_priv) != dst_addr->sa_family) {
- ret = -EINVAL;
- goto err_state;
- }
- return 0;
-
-err_state:
- cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
-err_dst:
- memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
- return ret;
-}
-
-int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- const struct sockaddr *dst_addr, unsigned long timeout_ms)
-{
- struct rdma_id_private *id_priv =
- container_of(id, struct rdma_id_private, id);
- int ret;
-
- ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
- if (ret)
- return ret;
-
- if (cma_any_addr(dst_addr)) {
- ret = cma_resolve_loopback(id_priv);
- } else {
- if (dst_addr->sa_family == AF_IB) {
- ret = cma_resolve_ib_addr(id_priv);
- } else {
- /*
- * The FSM can return back to RDMA_CM_ADDR_BOUND after
- * rdma_resolve_ip() is called, eg through the error
- * path in addr_handler(). If this happens the existing
- * request must be canceled before issuing a new one.
- * Since canceling a request is a bit slow and this
- * oddball path is rare, keep track once a request has
- * been issued. The track turns out to be a permanent
- * state since this is the only cancel as it is
- * immediately before rdma_resolve_ip().
- */
- if (id_priv->used_resolve_ip)
- rdma_addr_cancel(&id->route.addr.dev_addr);
- else
- id_priv->used_resolve_ip = 1;
- ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
- &id->route.addr.dev_addr,
- timeout_ms, addr_handler,
- false, id_priv);
- }
- }
- if (ret)
- goto err;
-
- return 0;
-err:
- cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
- return ret;
-}
-EXPORT_SYMBOL(rdma_resolve_addr);
-
int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
{
struct rdma_id_private *id_priv;
@@ -4058,27 +3954,26 @@ err:
}
EXPORT_SYMBOL(rdma_listen);
-int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
+static int rdma_bind_addr_dst(struct rdma_id_private *id_priv,
+ struct sockaddr *addr, const struct sockaddr *daddr)
{
- struct rdma_id_private *id_priv;
+ struct sockaddr *id_daddr;
int ret;
- struct sockaddr *daddr;
if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
addr->sa_family != AF_IB)
return -EAFNOSUPPORT;
- id_priv = container_of(id, struct rdma_id_private, id);
if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
return -EINVAL;
- ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
+ ret = cma_check_linklocal(&id_priv->id.route.addr.dev_addr, addr);
if (ret)
goto err1;
memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
if (!cma_any_addr(addr)) {
- ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
+ ret = cma_translate_addr(addr, &id_priv->id.route.addr.dev_addr);
if (ret)
goto err1;
@@ -4098,8 +3993,10 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
}
#endif
}
- daddr = cma_dst_addr(id_priv);
- daddr->sa_family = addr->sa_family;
+ id_daddr = cma_dst_addr(id_priv);
+ if (daddr != id_daddr)
+ memcpy(id_daddr, daddr, rdma_addr_size(addr));
+ id_daddr->sa_family = addr->sa_family;
ret = cma_get_port(id_priv);
if (ret)
@@ -4115,6 +4012,127 @@ err1:
cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
return ret;
}
+
+static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
+ const struct sockaddr *dst_addr)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+ struct sockaddr_storage zero_sock = {};
+
+ if (src_addr && src_addr->sa_family)
+ return rdma_bind_addr_dst(id_priv, src_addr, dst_addr);
+
+ /*
+ * When the src_addr is not specified, automatically supply an any addr
+ */
+ zero_sock.ss_family = dst_addr->sa_family;
+ if (IS_ENABLED(CONFIG_IPV6) && dst_addr->sa_family == AF_INET6) {
+ struct sockaddr_in6 *src_addr6 =
+ (struct sockaddr_in6 *)&zero_sock;
+ struct sockaddr_in6 *dst_addr6 =
+ (struct sockaddr_in6 *)dst_addr;
+
+ src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
+ if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
+ id->route.addr.dev_addr.bound_dev_if =
+ dst_addr6->sin6_scope_id;
+ } else if (dst_addr->sa_family == AF_IB) {
+ ((struct sockaddr_ib *)&zero_sock)->sib_pkey =
+ ((struct sockaddr_ib *)dst_addr)->sib_pkey;
+ }
+ return rdma_bind_addr_dst(id_priv, (struct sockaddr *)&zero_sock, dst_addr);
+}
+
+/*
+ * If required, resolve the source address for bind and leave the id_priv in
+ * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
+ * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
+ * ignored.
+ */
+static int resolve_prepare_src(struct rdma_id_private *id_priv,
+ struct sockaddr *src_addr,
+ const struct sockaddr *dst_addr)
+{
+ int ret;
+
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
+ /* For a well behaved ULP state will be RDMA_CM_IDLE */
+ ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
+ if (ret)
+ return ret;
+ if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
+ RDMA_CM_ADDR_QUERY)))
+ return -EINVAL;
+
+ }
+
+ if (cma_family(id_priv) != dst_addr->sa_family) {
+ ret = -EINVAL;
+ goto err_state;
+ }
+ return 0;
+
+err_state:
+ cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
+ return ret;
+}
+
+int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
+ const struct sockaddr *dst_addr, unsigned long timeout_ms)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+ int ret;
+
+ ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
+ if (ret)
+ return ret;
+
+ if (cma_any_addr(dst_addr)) {
+ ret = cma_resolve_loopback(id_priv);
+ } else {
+ if (dst_addr->sa_family == AF_IB) {
+ ret = cma_resolve_ib_addr(id_priv);
+ } else {
+ /*
+ * The FSM can return back to RDMA_CM_ADDR_BOUND after
+ * rdma_resolve_ip() is called, eg through the error
+ * path in addr_handler(). If this happens the existing
+ * request must be canceled before issuing a new one.
+ * Since canceling a request is a bit slow and this
+ * oddball path is rare, keep track once a request has
+ * been issued. The track turns out to be a permanent
+ * state since this is the only cancel as it is
+ * immediately before rdma_resolve_ip().
+ */
+ if (id_priv->used_resolve_ip)
+ rdma_addr_cancel(&id->route.addr.dev_addr);
+ else
+ id_priv->used_resolve_ip = 1;
+ ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
+ &id->route.addr.dev_addr,
+ timeout_ms, addr_handler,
+ false, id_priv);
+ }
+ }
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
+ return ret;
+}
+EXPORT_SYMBOL(rdma_resolve_addr);
+
+int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
+{
+ struct rdma_id_private *id_priv =
+ container_of(id, struct rdma_id_private, id);
+
+ return rdma_bind_addr_dst(id_priv, addr, cma_dst_addr(id_priv));
+}
EXPORT_SYMBOL(rdma_bind_addr);
static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 0de83d9a4985..59179cfc20ef 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -106,7 +106,7 @@ struct ib_sa_device {
struct ib_sa_query {
void (*callback)(struct ib_sa_query *sa_query, int status,
- int num_prs, struct ib_sa_mad *mad);
+ struct ib_sa_mad *mad);
void (*release)(struct ib_sa_query *);
struct ib_sa_client *client;
struct ib_sa_port *port;
@@ -118,12 +118,6 @@ struct ib_sa_query {
u32 seq; /* Local svc request sequence number */
unsigned long timeout; /* Local svc timeout */
u8 path_use; /* How will the pathrecord be used */
-
- /* A separate buffer to save pathrecords of a response, as in cases
- * like IB/netlink, mulptiple pathrecords are supported, so that
- * mad->data is not large enough to hold them
- */
- void *resp_pr_data;
};
#define IB_SA_ENABLE_LOCAL_SERVICE 0x00000001
@@ -132,7 +126,7 @@ struct ib_sa_query {
struct ib_sa_path_query {
void (*callback)(int status, struct sa_path_rec *rec,
- int num_paths, void *context);
+ unsigned int num_paths, void *context);
void *context;
struct ib_sa_query sa_query;
struct sa_path_rec *conv_pr;
@@ -690,6 +684,8 @@ static const struct ib_field guidinfo_rec_table[] = {
.size_bits = 512 },
};
+#define RDMA_PRIMARY_PATH_MAX_REC_NUM 3
+
static inline void ib_sa_disable_local_svc(struct ib_sa_query *query)
{
query->flags &= ~IB_SA_ENABLE_LOCAL_SERVICE;
@@ -874,30 +870,21 @@ static void send_handler(struct ib_mad_agent *agent,
static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
const struct nlmsghdr *nlh)
{
- struct ib_path_rec_data *srec, *drec;
+ struct sa_path_rec recs[RDMA_PRIMARY_PATH_MAX_REC_NUM];
struct ib_sa_path_query *path_query;
+ struct ib_path_rec_data *rec_data;
struct ib_mad_send_wc mad_send_wc;
const struct nlattr *head, *curr;
struct ib_sa_mad *mad = NULL;
- int len, rem, num_prs = 0;
+ int len, rem, status = -EIO;
+ unsigned int num_prs = 0;
u32 mask = 0;
- int status = -EIO;
if (!query->callback)
goto out;
path_query = container_of(query, struct ib_sa_path_query, sa_query);
mad = query->mad_buf->mad;
- if (!path_query->conv_pr &&
- (be16_to_cpu(mad->mad_hdr.attr_id) == IB_SA_ATTR_PATH_REC)) {
- /* Need a larger buffer for possible multiple PRs */
- query->resp_pr_data = kvcalloc(RDMA_PRIMARY_PATH_MAX_REC_NUM,
- sizeof(*drec), GFP_KERNEL);
- if (!query->resp_pr_data) {
- query->callback(query, -ENOMEM, 0, NULL);
- return;
- }
- }
head = (const struct nlattr *) nlmsg_data(nlh);
len = nlmsg_len(nlh);
@@ -917,36 +904,41 @@ static void ib_nl_process_good_resolve_rsp(struct ib_sa_query *query,
break;
}
- drec = (struct ib_path_rec_data *)query->resp_pr_data;
nla_for_each_attr(curr, head, len, rem) {
if (curr->nla_type != LS_NLA_TYPE_PATH_RECORD)
continue;
- srec = nla_data(curr);
- if ((srec->flags & mask) != mask)
+ rec_data = nla_data(curr);
+ if ((rec_data->flags & mask) != mask)
continue;
- status = 0;
- if (!drec) {
- memcpy(mad->data, srec->path_rec,
- sizeof(srec->path_rec));
- num_prs = 1;
- break;
+ if ((query->flags & IB_SA_QUERY_OPA) ||
+ path_query->conv_pr) {
+ mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
+ memcpy(mad->data, rec_data->path_rec,
+ sizeof(rec_data->path_rec));
+ query->callback(query, 0, mad);
+ goto out;
}
- memcpy(drec, srec, sizeof(*drec));
- drec++;
+ status = 0;
+ ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
+ rec_data->path_rec, &recs[num_prs]);
+ recs[num_prs].flags = rec_data->flags;
+ recs[num_prs].rec_type = SA_PATH_REC_TYPE_IB;
+ sa_path_set_dmac_zero(&recs[num_prs]);
+
num_prs++;
if (num_prs >= RDMA_PRIMARY_PATH_MAX_REC_NUM)
break;
}
- if (!status)
+ if (!status) {
mad->mad_hdr.method |= IB_MGMT_METHOD_RESP;
-
- query->callback(query, status, num_prs, mad);
- kvfree(query->resp_pr_data);
- query->resp_pr_data = NULL;
+ path_query->callback(status, recs, num_prs,
+ path_query->context);
+ } else
+ query->callback(query, status, mad);
out:
mad_send_wc.send_buf = query->mad_buf;
@@ -1451,11 +1443,26 @@ static int opa_pr_query_possible(struct ib_sa_client *client,
return PR_IB_SUPPORTED;
}
-static void ib_sa_pr_callback_single(struct ib_sa_path_query *query,
- int status, struct ib_sa_mad *mad)
+static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
+ int status, struct ib_sa_mad *mad)
{
+ struct ib_sa_path_query *query =
+ container_of(sa_query, struct ib_sa_path_query, sa_query);
struct sa_path_rec rec = {};
+ if (!mad) {
+ query->callback(status, NULL, 0, query->context);
+ return;
+ }
+
+ if (sa_query->flags & IB_SA_QUERY_OPA) {
+ ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
+ mad->data, &rec);
+ rec.rec_type = SA_PATH_REC_TYPE_OPA;
+ query->callback(status, &rec, 1, query->context);
+ return;
+ }
+
ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
mad->data, &rec);
rec.rec_type = SA_PATH_REC_TYPE_IB;
@@ -1472,71 +1479,6 @@ static void ib_sa_pr_callback_single(struct ib_sa_path_query *query,
}
}
-/**
- * ib_sa_pr_callback_multiple() - Parse path records then do callback.
- *
- * In a multiple-PR case the PRs are saved in "query->resp_pr_data"
- * (instead of"mad->data") and with "ib_path_rec_data" structure format,
- * so that rec->flags can be set to indicate the type of PR.
- * This is valid only in IB fabric.
- */
-static void ib_sa_pr_callback_multiple(struct ib_sa_path_query *query,
- int status, int num_prs,
- struct ib_path_rec_data *rec_data)
-{
- struct sa_path_rec *rec;
- int i;
-
- rec = kvcalloc(num_prs, sizeof(*rec), GFP_KERNEL);
- if (!rec) {
- query->callback(-ENOMEM, NULL, 0, query->context);
- return;
- }
-
- for (i = 0; i < num_prs; i++) {
- ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table),
- rec_data[i].path_rec, rec + i);
- rec[i].rec_type = SA_PATH_REC_TYPE_IB;
- sa_path_set_dmac_zero(rec + i);
- rec[i].flags = rec_data[i].flags;
- }
-
- query->callback(status, rec, num_prs, query->context);
- kvfree(rec);
-}
-
-static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
- int status, int num_prs,
- struct ib_sa_mad *mad)
-{
- struct ib_sa_path_query *query =
- container_of(sa_query, struct ib_sa_path_query, sa_query);
- struct sa_path_rec rec;
-
- if (!mad || !num_prs) {
- query->callback(status, NULL, 0, query->context);
- return;
- }
-
- if (sa_query->flags & IB_SA_QUERY_OPA) {
- if (num_prs != 1) {
- query->callback(-EINVAL, NULL, 0, query->context);
- return;
- }
-
- ib_unpack(opa_path_rec_table, ARRAY_SIZE(opa_path_rec_table),
- mad->data, &rec);
- rec.rec_type = SA_PATH_REC_TYPE_OPA;
- query->callback(status, &rec, num_prs, query->context);
- } else {
- if (!sa_query->resp_pr_data)
- ib_sa_pr_callback_single(query, status, mad);
- else
- ib_sa_pr_callback_multiple(query, status, num_prs,
- sa_query->resp_pr_data);
- }
-}
-
static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
{
struct ib_sa_path_query *query =
@@ -1578,7 +1520,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct sa_path_rec *resp,
- int num_paths, void *context),
+ unsigned int num_paths, void *context),
void *context,
struct ib_sa_query **sa_query)
{
@@ -1677,8 +1619,7 @@ err1:
EXPORT_SYMBOL(ib_sa_path_rec_get);
static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
- int status, int num_prs,
- struct ib_sa_mad *mad)
+ int status, struct ib_sa_mad *mad)
{
struct ib_sa_mcmember_query *query =
container_of(sa_query, struct ib_sa_mcmember_query, sa_query);
@@ -1769,8 +1710,7 @@ err1:
/* Support GuidInfoRecord */
static void ib_sa_guidinfo_rec_callback(struct ib_sa_query *sa_query,
- int status, int num_paths,
- struct ib_sa_mad *mad)
+ int status, struct ib_sa_mad *mad)
{
struct ib_sa_guidinfo_query *query =
container_of(sa_query, struct ib_sa_guidinfo_query, sa_query);
@@ -1879,8 +1819,7 @@ static void ib_classportinfo_cb(void *context)
}
static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
- int status, int num_prs,
- struct ib_sa_mad *mad)
+ int status, struct ib_sa_mad *mad)
{
unsigned long flags;
struct ib_sa_classport_info_query *query =
@@ -2055,13 +1994,13 @@ static void send_handler(struct ib_mad_agent *agent,
/* No callback -- already got recv */
break;
case IB_WC_RESP_TIMEOUT_ERR:
- query->callback(query, -ETIMEDOUT, 0, NULL);
+ query->callback(query, -ETIMEDOUT, NULL);
break;
case IB_WC_WR_FLUSH_ERR:
- query->callback(query, -EINTR, 0, NULL);
+ query->callback(query, -EINTR, NULL);
break;
default:
- query->callback(query, -EIO, 0, NULL);
+ query->callback(query, -EIO, NULL);
break;
}
@@ -2089,10 +2028,10 @@ static void recv_handler(struct ib_mad_agent *mad_agent,
if (mad_recv_wc->wc->status == IB_WC_SUCCESS)
query->callback(query,
mad_recv_wc->recv_buf.mad->mad_hdr.status ?
- -EINVAL : 0, 1,
+ -EINVAL : 0,
(struct ib_sa_mad *) mad_recv_wc->recv_buf.mad);
else
- query->callback(query, -EIO, 0, NULL);
+ query->callback(query, -EIO, NULL);
}
ib_free_recv_mad(mad_recv_wc);
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index 43b26bc12288..39357dc2d229 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -26,8 +26,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
if (umem_dmabuf->sgt)
goto wait_fence;
- sgt = dma_buf_map_attachment_unlocked(umem_dmabuf->attach,
- DMA_BIDIRECTIONAL);
+ sgt = dma_buf_map_attachment(umem_dmabuf->attach,
+ DMA_BIDIRECTIONAL);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
@@ -103,8 +103,8 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
umem_dmabuf->last_sg_trim = 0;
}
- dma_buf_unmap_attachment_unlocked(umem_dmabuf->attach, umem_dmabuf->sgt,
- DMA_BIDIRECTIONAL);
+ dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
+ DMA_BIDIRECTIONAL);
umem_dmabuf->sgt = NULL;
}
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 785c37cae3c0..5a2baf49ecaa 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -89,13 +89,6 @@ struct bnxt_re_ring_attr {
u8 mode;
};
-struct bnxt_re_work {
- struct work_struct work;
- unsigned long event;
- struct bnxt_re_dev *rdev;
- struct net_device *vlan_dev;
-};
-
struct bnxt_re_sqp_entries {
struct bnxt_qplib_sge sge;
u64 wrid;
@@ -132,10 +125,10 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
+ struct notifier_block nb;
unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
- struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
int num_msix;
int id;
@@ -194,5 +187,4 @@ static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
return &rdev->ibdev.dev;
return NULL;
}
-
#endif
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 8c0c80a8d338..c5867e78f231 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -48,6 +48,7 @@
#include <net/ipv6.h>
#include <net/addrconf.h>
#include <linux/if_ether.h>
+#include <linux/auxiliary_bus.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
@@ -74,14 +75,14 @@ MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
MODULE_LICENSE("Dual BSD/GPL");
/* globals */
-static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
-/* Mutex to protect the list of bnxt_re devices added */
-static DEFINE_MUTEX(bnxt_re_dev_lock);
-static struct workqueue_struct *bnxt_re_wq;
-static void bnxt_re_remove_device(struct bnxt_re_dev *rdev);
-static void bnxt_re_dealloc_driver(struct ib_device *ib_dev);
+static DEFINE_MUTEX(bnxt_re_mutex);
+
static void bnxt_re_stop_irq(void *handle);
static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev);
+static int bnxt_re_netdev_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr);
+static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
+static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev);
static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
{
@@ -111,16 +112,14 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
{
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
- struct bnxt *bp;
en_dev = rdev->en_dev;
- bp = netdev_priv(en_dev->net);
chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
if (!chip_ctx)
return -ENOMEM;
- chip_ctx->chip_num = bp->chip_num;
- chip_ctx->hw_stats_size = bp->hw_ring_stats_size;
+ chip_ctx->chip_num = en_dev->chip_num;
+ chip_ctx->hw_stats_size = en_dev->hw_ring_stats_size;
rdev->chip_ctx = chip_ctx;
/* rest members to follow eventually */
@@ -128,7 +127,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
rdev->qplib_res.cctx = rdev->chip_ctx;
rdev->rcfw.res = &rdev->qplib_res;
rdev->qplib_res.dattr = &rdev->dev_attr;
- rdev->qplib_res.is_vf = BNXT_VF(bp);
+ rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
bnxt_re_set_drv_mode(rdev, wqe_mode);
if (bnxt_qplib_determine_atomics(en_dev->pdev))
@@ -141,10 +140,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
static void bnxt_re_get_sriov_func_type(struct bnxt_re_dev *rdev)
{
- struct bnxt *bp;
-
- bp = netdev_priv(rdev->en_dev->net);
- if (BNXT_VF(bp))
+ if (BNXT_EN_VF(rdev->en_dev))
rdev->is_virtfn = 1;
}
@@ -225,56 +221,12 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
}
-/* for handling bnxt_en callbacks later */
-static void bnxt_re_stop(void *p)
-{
- struct bnxt_re_dev *rdev = p;
- struct bnxt *bp;
-
- if (!rdev)
- return;
- ASSERT_RTNL();
-
- /* L2 driver invokes this callback during device error/crash or device
- * reset. Current RoCE driver doesn't recover the device in case of
- * error. Handle the error by dispatching fatal events to all qps
- * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
- * L2 driver want to modify the MSIx table.
- */
- bp = netdev_priv(rdev->netdev);
-
- ibdev_info(&rdev->ibdev, "Handle device stop call from L2 driver");
- /* Check the current device state from L2 structure and move the
- * device to detached state if FW_FATAL_COND is set.
- * This prevents more commands to HW during clean-up,
- * in case the device is already in error.
- */
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
- set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
-
- bnxt_re_dev_stop(rdev);
- bnxt_re_stop_irq(rdev);
- /* Move the device states to detached and avoid sending any more
- * commands to HW
- */
- set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
- set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
-}
-
-static void bnxt_re_start(void *p)
-{
-}
-
-static void bnxt_re_sriov_config(void *p, int num_vfs)
+static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
{
- struct bnxt_re_dev *rdev = p;
-
- if (!rdev)
- return;
if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
return;
- rdev->num_vfs = num_vfs;
+ rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
bnxt_re_set_resource_limits(rdev);
bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
@@ -282,16 +234,14 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
}
}
-static void bnxt_re_shutdown(void *p)
+static void bnxt_re_shutdown(struct auxiliary_device *adev)
{
- struct bnxt_re_dev *rdev = p;
+ struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
if (!rdev)
return;
- ASSERT_RTNL();
- /* Release the MSIx vectors before queuing unregister */
- bnxt_re_stop_irq(rdev);
- ib_unregister_device_queued(&rdev->ibdev);
+ ib_unregister_device(&rdev->ibdev);
+ bnxt_re_dev_uninit(rdev);
}
static void bnxt_re_stop_irq(void *handle)
@@ -312,7 +262,7 @@ static void bnxt_re_stop_irq(void *handle)
static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
{
struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
- struct bnxt_msix_entry *msix_ent = rdev->msix_entries;
+ struct bnxt_msix_entry *msix_ent = rdev->en_dev->msix_entries;
struct bnxt_qplib_rcfw *rcfw = &rdev->rcfw;
struct bnxt_qplib_nq *nq;
int indx, rc;
@@ -331,7 +281,7 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
* in device sctructure.
*/
for (indx = 0; indx < rdev->num_msix; indx++)
- rdev->msix_entries[indx].vector = ent[indx].vector;
+ rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
false);
@@ -346,93 +296,22 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
}
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
- .ulp_async_notifier = NULL,
- .ulp_stop = bnxt_re_stop,
- .ulp_start = bnxt_re_start,
- .ulp_sriov_config = bnxt_re_sriov_config,
- .ulp_shutdown = bnxt_re_shutdown,
.ulp_irq_stop = bnxt_re_stop_irq,
.ulp_irq_restart = bnxt_re_start_irq
};
/* RoCE -> Net driver */
-/* Driver registration routines used to let the networking driver (bnxt_en)
- * to know that the RoCE driver is now installed
- */
-static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
-{
- struct bnxt_en_dev *en_dev;
- int rc;
-
- if (!rdev)
- return -EINVAL;
-
- en_dev = rdev->en_dev;
-
- rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
- BNXT_ROCE_ULP);
- return rc;
-}
-
static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
{
struct bnxt_en_dev *en_dev;
int rc = 0;
- if (!rdev)
- return -EINVAL;
-
en_dev = rdev->en_dev;
- rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
- &bnxt_re_ulp_ops, rdev);
- rdev->qplib_res.pdev = rdev->en_dev->pdev;
- return rc;
-}
-
-static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
-{
- struct bnxt_en_dev *en_dev;
- int rc;
-
- if (!rdev)
- return -EINVAL;
-
- en_dev = rdev->en_dev;
-
-
- rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
-
- return rc;
-}
-
-static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
-{
- int rc = 0, num_msix_want = BNXT_RE_MAX_MSIX, num_msix_got;
- struct bnxt_en_dev *en_dev;
-
- if (!rdev)
- return -EINVAL;
-
- en_dev = rdev->en_dev;
-
- num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
-
- num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
- rdev->msix_entries,
- num_msix_want);
- if (num_msix_got < BNXT_RE_MIN_MSIX) {
- rc = -EINVAL;
- goto done;
- }
- if (num_msix_got != num_msix_want) {
- ibdev_warn(&rdev->ibdev,
- "Requested %d MSI-X vectors, got %d\n",
- num_msix_want, num_msix_got);
- }
- rdev->num_msix = num_msix_got;
-done:
+ rc = bnxt_register_dev(en_dev, &bnxt_re_ulp_ops, rdev);
+ if (!rc)
+ rdev->qplib_res.pdev = rdev->en_dev->pdev;
return rc;
}
@@ -458,12 +337,17 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
u16 fw_ring_id, int type)
{
- struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_en_dev *en_dev;
struct hwrm_ring_free_input req = {0};
struct hwrm_ring_free_output resp;
struct bnxt_fw_msg fw_msg;
int rc = -EINVAL;
+ if (!rdev)
+ return rc;
+
+ en_dev = rdev->en_dev;
+
if (!en_dev)
return rc;
@@ -477,7 +361,7 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
req.ring_id = cpu_to_le16(fw_ring_id);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x",
req.ring_id, rc);
@@ -514,7 +398,7 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
req.int_mode = ring_attr->mode;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc)
*fw_ring_id = le16_to_cpu(resp.ring_id);
@@ -542,7 +426,7 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
rc);
@@ -575,7 +459,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (!rc)
*fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
@@ -584,21 +468,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
/* Device */
-static bool is_bnxt_re_dev(struct net_device *netdev)
-{
- struct ethtool_drvinfo drvinfo;
-
- if (netdev->ethtool_ops && netdev->ethtool_ops->get_drvinfo) {
- memset(&drvinfo, 0, sizeof(drvinfo));
- netdev->ethtool_ops->get_drvinfo(netdev, &drvinfo);
-
- if (strcmp(drvinfo.driver, "bnxt_en"))
- return false;
- return true;
- }
- return false;
-}
-
static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
{
struct ib_device *ibdev =
@@ -609,31 +478,6 @@ static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
return container_of(ibdev, struct bnxt_re_dev, ibdev);
}
-static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
-{
- struct bnxt_en_dev *en_dev;
- struct pci_dev *pdev;
-
- en_dev = bnxt_ulp_probe(netdev);
- if (IS_ERR(en_dev))
- return en_dev;
-
- pdev = en_dev->pdev;
- if (!pdev)
- return ERR_PTR(-EINVAL);
-
- if (!(en_dev->flags & BNXT_EN_FLAG_ROCE_CAP)) {
- dev_info(&pdev->dev,
- "%s: probe error: RoCE is not supported on this device",
- ROCE_DRV_MODULE_NAME);
- return ERR_PTR(-ENODEV);
- }
-
- dev_hold(netdev);
-
- return en_dev;
-}
-
static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
@@ -679,7 +523,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.create_qp = bnxt_re_create_qp,
.create_srq = bnxt_re_create_srq,
.create_user_ah = bnxt_re_create_ah,
- .dealloc_driver = bnxt_re_dealloc_driver,
.dealloc_pd = bnxt_re_dealloc_pd,
.dealloc_ucontext = bnxt_re_dealloc_ucontext,
.del_gid = bnxt_re_del_gid,
@@ -744,18 +587,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
return ib_register_device(ibdev, "bnxt_re%d", &rdev->en_dev->pdev->dev);
}
-static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
-{
- dev_put(rdev->netdev);
- rdev->netdev = NULL;
- mutex_lock(&bnxt_re_dev_lock);
- list_del_rcu(&rdev->list);
- mutex_unlock(&bnxt_re_dev_lock);
-
- synchronize_rcu();
-}
-
-static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
+static struct bnxt_re_dev *bnxt_re_dev_add(struct bnxt_aux_priv *aux_priv,
struct bnxt_en_dev *en_dev)
{
struct bnxt_re_dev *rdev;
@@ -768,8 +600,8 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
return NULL;
}
/* Default values */
- rdev->netdev = netdev;
- dev_hold(rdev->netdev);
+ rdev->nb.notifier_call = NULL;
+ rdev->netdev = en_dev->net;
rdev->en_dev = en_dev;
rdev->id = rdev->en_dev->pdev->devfn;
INIT_LIST_HEAD(&rdev->qp_list);
@@ -784,9 +616,6 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
rdev->cosq[0] = 0xFFFF;
rdev->cosq[1] = 0xFFFF;
- mutex_lock(&bnxt_re_dev_lock);
- list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list);
- mutex_unlock(&bnxt_re_dev_lock);
return rdev;
}
@@ -930,7 +759,7 @@ static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
(rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
BNXT_RE_GEN_P5_PF_NQ_DB) :
- rdev->msix_entries[indx].db_offset;
+ rdev->en_dev->msix_entries[indx].db_offset;
}
static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
@@ -955,7 +784,7 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
for (i = 1; i < rdev->num_msix ; i++) {
db_offt = bnxt_re_get_nqdb_offset(rdev, i);
rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
- i - 1, rdev->msix_entries[i].vector,
+ i - 1, rdev->en_dev->msix_entries[i].vector,
db_offt, &bnxt_re_cqn_handler,
&bnxt_re_srqn_handler);
if (rc) {
@@ -1042,7 +871,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_NQE_MAX_CNT - 1;
- rattr.lrid = rdev->msix_entries[i + 1].ring_idx;
+ rattr.lrid = rdev->en_dev->msix_entries[i + 1].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
if (rc) {
ibdev_err(&rdev->ibdev,
@@ -1095,7 +924,6 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
u64 *cid_map)
{
struct hwrm_queue_pri2cos_qcfg_input req = {0};
- struct bnxt *bp = netdev_priv(rdev->netdev);
struct hwrm_queue_pri2cos_qcfg_output resp;
struct bnxt_en_dev *en_dev = rdev->en_dev;
struct bnxt_fw_msg fw_msg;
@@ -1112,11 +940,11 @@ static int bnxt_re_query_hwrm_pri2cos(struct bnxt_re_dev *rdev, u8 dir,
flags |= (dir & 0x01);
flags |= HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN;
req.flags = cpu_to_le32(flags);
- req.port_id = bp->pf.port_id;
+ req.port_id = en_dev->pf_port_id;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc)
return rc;
@@ -1299,7 +1127,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
- rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ rc = bnxt_send_msg(en_dev, &fw_msg);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x",
rc);
@@ -1323,7 +1151,7 @@ static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
pr_err("Failed to register with IB: %#x\n", rc);
return rc;
}
- dev_info(rdev_to_dev(rdev), "Device registered successfully");
+ dev_info(rdev_to_dev(rdev), "Device registered with IB successfully");
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
@@ -1362,20 +1190,12 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev)
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type);
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
}
- if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
- rc = bnxt_re_free_msix(rdev);
- if (rc)
- ibdev_warn(&rdev->ibdev,
- "Failed to free MSI-X vectors: %#x", rc);
- }
+ if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags))
+ rdev->num_msix = 0;
bnxt_re_destroy_chip_ctx(rdev);
- if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
- rc = bnxt_re_unregister_netdev(rdev);
- if (rc)
- ibdev_warn(&rdev->ibdev,
- "Failed to unregister with netdev: %#x", rc);
- }
+ if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags))
+ bnxt_unregister_dev(rdev->en_dev);
}
/* worker thread for polling periodic events. Now used for QoS programming*/
@@ -1416,13 +1236,15 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
/* Check whether VF or PF */
bnxt_re_get_sriov_func_type(rdev);
- rc = bnxt_re_request_msix(rdev);
- if (rc) {
+ if (!rdev->en_dev->ulp_tbl->msix_requested) {
ibdev_err(&rdev->ibdev,
"Failed to get MSI-X vectors: %#x\n", rc);
rc = -EINVAL;
goto fail;
}
+ ibdev_dbg(&rdev->ibdev, "Got %d MSI-X vectors\n",
+ rdev->en_dev->ulp_tbl->msix_requested);
+ rdev->num_msix = rdev->en_dev->ulp_tbl->msix_requested;
set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
bnxt_re_query_hwrm_intf_version(rdev);
@@ -1446,14 +1268,14 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
rattr.type = type;
rattr.mode = RING_ALLOC_REQ_INT_MODE_MSIX;
rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
- rattr.lrid = rdev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
+ rattr.lrid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
if (rc) {
ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
goto free_rcfw;
}
db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
- vid = rdev->msix_entries[BNXT_RE_AEQ_IDX].vector;
+ vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
vid, db_offt, rdev->is_virtfn,
&bnxt_re_aeq_handler);
@@ -1521,6 +1343,11 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags);
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
+ /*
+ * Use the total VF count since the actual VF count may not be
+ * available at this point.
+ */
+ bnxt_re_vf_res_config(rdev);
}
return 0;
@@ -1541,135 +1368,43 @@ fail:
return rc;
}
-static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
-{
- struct net_device *netdev = rdev->netdev;
-
- bnxt_re_dev_remove(rdev);
-
- if (netdev)
- dev_put(netdev);
-}
-
-static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct net_device *netdev)
+static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
{
+ struct bnxt_aux_priv *aux_priv =
+ container_of(adev, struct bnxt_aux_priv, aux_dev);
struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
int rc = 0;
- if (!is_bnxt_re_dev(netdev))
- return -ENODEV;
+ /* en_dev should never be NULL as long as adev and aux_dev are valid. */
+ en_dev = aux_priv->edev;
- en_dev = bnxt_re_dev_probe(netdev);
- if (IS_ERR(en_dev)) {
- if (en_dev != ERR_PTR(-ENODEV))
- ibdev_err(&(*rdev)->ibdev, "%s: Failed to probe\n",
- ROCE_DRV_MODULE_NAME);
- rc = PTR_ERR(en_dev);
- goto exit;
- }
- *rdev = bnxt_re_dev_add(netdev, en_dev);
- if (!*rdev) {
+ rdev = bnxt_re_dev_add(aux_priv, en_dev);
+ if (!rdev || !rdev_to_dev(rdev)) {
rc = -ENOMEM;
- dev_put(netdev);
goto exit;
}
-exit:
- return rc;
-}
-static void bnxt_re_remove_device(struct bnxt_re_dev *rdev)
-{
- bnxt_re_dev_uninit(rdev);
- pci_dev_put(rdev->en_dev->pdev);
- bnxt_re_dev_unreg(rdev);
-}
-
-static int bnxt_re_add_device(struct bnxt_re_dev **rdev,
- struct net_device *netdev, u8 wqe_mode)
-{
- int rc;
-
- rc = bnxt_re_dev_reg(rdev, netdev);
- if (rc == -ENODEV)
- return rc;
- if (rc) {
- pr_err("Failed to register with the device %s: %#x\n",
- netdev->name, rc);
- return rc;
- }
+ rc = bnxt_re_dev_init(rdev, wqe_mode);
+ if (rc)
+ goto re_dev_dealloc;
- pci_dev_get((*rdev)->en_dev->pdev);
- rc = bnxt_re_dev_init(*rdev, wqe_mode);
+ rc = bnxt_re_ib_init(rdev);
if (rc) {
- pci_dev_put((*rdev)->en_dev->pdev);
- bnxt_re_dev_unreg(*rdev);
+ pr_err("Failed to register with IB: %s",
+ aux_priv->aux_dev.name);
+ goto re_dev_uninit;
}
+ auxiliary_set_drvdata(adev, rdev);
- return rc;
-}
-
-static void bnxt_re_dealloc_driver(struct ib_device *ib_dev)
-{
- struct bnxt_re_dev *rdev =
- container_of(ib_dev, struct bnxt_re_dev, ibdev);
-
- dev_info(rdev_to_dev(rdev), "Unregistering Device");
-
- rtnl_lock();
- bnxt_re_remove_device(rdev);
- rtnl_unlock();
-}
-
-/* Handle all deferred netevents tasks */
-static void bnxt_re_task(struct work_struct *work)
-{
- struct bnxt_re_work *re_work;
- struct bnxt_re_dev *rdev;
- int rc = 0;
-
- re_work = container_of(work, struct bnxt_re_work, work);
- rdev = re_work->rdev;
-
- if (re_work->event == NETDEV_REGISTER) {
- rc = bnxt_re_ib_init(rdev);
- if (rc) {
- ibdev_err(&rdev->ibdev,
- "Failed to register with IB: %#x", rc);
- rtnl_lock();
- bnxt_re_remove_device(rdev);
- rtnl_unlock();
- goto exit;
- }
- goto exit;
- }
-
- if (!ib_device_try_get(&rdev->ibdev))
- goto exit;
+ return 0;
- switch (re_work->event) {
- case NETDEV_UP:
- bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
- IB_EVENT_PORT_ACTIVE);
- break;
- case NETDEV_DOWN:
- bnxt_re_dev_stop(rdev);
- break;
- case NETDEV_CHANGE:
- if (!netif_carrier_ok(rdev->netdev))
- bnxt_re_dev_stop(rdev);
- else if (netif_carrier_ok(rdev->netdev))
- bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
- IB_EVENT_PORT_ACTIVE);
- ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
- &rdev->active_width);
- break;
- default:
- break;
- }
- ib_device_put(&rdev->ibdev);
+re_dev_uninit:
+ bnxt_re_dev_uninit(rdev);
+re_dev_dealloc:
+ ib_dealloc_device(&rdev->ibdev);
exit:
- put_device(&rdev->ibdev.dev);
- kfree(re_work);
+ return rc;
}
/*
@@ -1690,109 +1425,189 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
unsigned long event, void *ptr)
{
struct net_device *real_dev, *netdev = netdev_notifier_info_to_dev(ptr);
- struct bnxt_re_work *re_work;
struct bnxt_re_dev *rdev;
- int rc = 0;
- bool sch_work = false;
- bool release = true;
real_dev = rdma_vlan_dev_real_dev(netdev);
if (!real_dev)
real_dev = netdev;
- rdev = bnxt_re_from_netdev(real_dev);
- if (!rdev && event != NETDEV_REGISTER)
- return NOTIFY_OK;
-
if (real_dev != netdev)
goto exit;
- switch (event) {
- case NETDEV_REGISTER:
- if (rdev)
- break;
- rc = bnxt_re_add_device(&rdev, real_dev,
- BNXT_QPLIB_WQE_MODE_STATIC);
- if (!rc)
- sch_work = true;
- release = false;
- break;
+ rdev = bnxt_re_from_netdev(real_dev);
+ if (!rdev)
+ return NOTIFY_DONE;
- case NETDEV_UNREGISTER:
- ib_unregister_device_queued(&rdev->ibdev);
- break;
+ switch (event) {
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ case NETDEV_CHANGE:
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+ netif_carrier_ok(real_dev) ?
+ IB_EVENT_PORT_ACTIVE :
+ IB_EVENT_PORT_ERR);
+ break;
default:
- sch_work = true;
break;
}
- if (sch_work) {
- /* Allocate for the deferred task */
- re_work = kzalloc(sizeof(*re_work), GFP_KERNEL);
- if (re_work) {
- get_device(&rdev->ibdev.dev);
- re_work->rdev = rdev;
- re_work->event = event;
- re_work->vlan_dev = (real_dev == netdev ?
- NULL : netdev);
- INIT_WORK(&re_work->work, bnxt_re_task);
- queue_work(bnxt_re_wq, &re_work->work);
- }
- }
-
+ ib_device_put(&rdev->ibdev);
exit:
- if (rdev && release)
- ib_device_put(&rdev->ibdev);
return NOTIFY_DONE;
}
-static struct notifier_block bnxt_re_netdev_notifier = {
- .notifier_call = bnxt_re_netdev_event
-};
+#define BNXT_ADEV_NAME "bnxt_en"
-static int __init bnxt_re_mod_init(void)
+static void bnxt_re_remove(struct auxiliary_device *adev)
{
- int rc = 0;
+ struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
- pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
+ if (!rdev)
+ return;
- bnxt_re_wq = create_singlethread_workqueue("bnxt_re");
- if (!bnxt_re_wq)
- return -ENOMEM;
+ mutex_lock(&bnxt_re_mutex);
+ if (rdev->nb.notifier_call) {
+ unregister_netdevice_notifier(&rdev->nb);
+ rdev->nb.notifier_call = NULL;
+ } else {
+ /* If notifier is null, we should have already done a
+ * clean up before coming here.
+ */
+ goto skip_remove;
+ }
+
+ ib_unregister_device(&rdev->ibdev);
+ ib_dealloc_device(&rdev->ibdev);
+ bnxt_re_dev_uninit(rdev);
+skip_remove:
+ mutex_unlock(&bnxt_re_mutex);
+}
+
+static int bnxt_re_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct bnxt_re_dev *rdev;
+ int rc;
- INIT_LIST_HEAD(&bnxt_re_dev_list);
+ mutex_lock(&bnxt_re_mutex);
+ rc = bnxt_re_add_device(adev, BNXT_QPLIB_WQE_MODE_STATIC);
+ if (rc) {
+ mutex_unlock(&bnxt_re_mutex);
+ return rc;
+ }
+
+ rdev = auxiliary_get_drvdata(adev);
- rc = register_netdevice_notifier(&bnxt_re_netdev_notifier);
+ rdev->nb.notifier_call = bnxt_re_netdev_event;
+ rc = register_netdevice_notifier(&rdev->nb);
if (rc) {
+ rdev->nb.notifier_call = NULL;
pr_err("%s: Cannot register to netdevice_notifier",
ROCE_DRV_MODULE_NAME);
- goto err_netdev;
+ goto err;
}
+
+ mutex_unlock(&bnxt_re_mutex);
return 0;
-err_netdev:
- destroy_workqueue(bnxt_re_wq);
+err:
+ mutex_unlock(&bnxt_re_mutex);
+ bnxt_re_remove(adev);
return rc;
}
-static void __exit bnxt_re_mod_exit(void)
+static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct bnxt_re_dev *rdev;
+ struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
- unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
- if (bnxt_re_wq)
- destroy_workqueue(bnxt_re_wq);
- list_for_each_entry(rdev, &bnxt_re_dev_list, list) {
- /* VF device removal should be called before the removal
- * of PF device. Queue VFs unregister first, so that VFs
- * shall be removed before the PF during the call of
- * ib_unregister_driver.
- */
- if (rdev->is_virtfn)
- ib_unregister_device(&rdev->ibdev);
+ if (!rdev)
+ return 0;
+
+ mutex_lock(&bnxt_re_mutex);
+ /* L2 driver may invoke this callback during device error/crash or device
+ * reset. Current RoCE driver doesn't recover the device in case of
+ * error. Handle the error by dispatching fatal events to all qps
+ * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
+ * L2 driver want to modify the MSIx table.
+ */
+
+ ibdev_info(&rdev->ibdev, "Handle device suspend call");
+ /* Check the current device state from bnxt_en_dev and move the
+ * device to detached state if FW_FATAL_COND is set.
+ * This prevents more commands to HW during clean-up,
+ * in case the device is already in error.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &rdev->en_dev->en_state))
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+
+ bnxt_re_dev_stop(rdev);
+ bnxt_re_stop_irq(rdev);
+ /* Move the device states to detached and avoid sending any more
+ * commands to HW
+ */
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ mutex_unlock(&bnxt_re_mutex);
+
+ return 0;
+}
+
+static int bnxt_re_resume(struct auxiliary_device *adev)
+{
+ struct bnxt_re_dev *rdev = auxiliary_get_drvdata(adev);
+
+ if (!rdev)
+ return 0;
+
+ mutex_lock(&bnxt_re_mutex);
+ /* L2 driver may invoke this callback during device recovery, resume.
+ * reset. Current RoCE driver doesn't recover the device in case of
+ * error. Handle the error by dispatching fatal events to all qps
+ * ie. by calling bnxt_re_dev_stop and release the MSIx vectors as
+ * L2 driver want to modify the MSIx table.
+ */
+
+ ibdev_info(&rdev->ibdev, "Handle device resume call");
+ mutex_unlock(&bnxt_re_mutex);
+
+ return 0;
+}
+
+static const struct auxiliary_device_id bnxt_re_id_table[] = {
+ { .name = BNXT_ADEV_NAME ".rdma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, bnxt_re_id_table);
+
+static struct auxiliary_driver bnxt_re_driver = {
+ .name = "rdma",
+ .probe = bnxt_re_probe,
+ .remove = bnxt_re_remove,
+ .shutdown = bnxt_re_shutdown,
+ .suspend = bnxt_re_suspend,
+ .resume = bnxt_re_resume,
+ .id_table = bnxt_re_id_table,
+};
+
+static int __init bnxt_re_mod_init(void)
+{
+ int rc = 0;
+
+ pr_info("%s: %s", ROCE_DRV_MODULE_NAME, version);
+ rc = auxiliary_driver_register(&bnxt_re_driver);
+ if (rc) {
+ pr_err("%s: Failed to register auxiliary driver\n",
+ ROCE_DRV_MODULE_NAME);
+ return rc;
}
- ib_unregister_driver(RDMA_DRIVER_BNXT_RE);
+ return 0;
+}
+
+static void __exit bnxt_re_mod_exit(void)
+{
+ auxiliary_driver_unregister(&bnxt_re_driver);
}
module_init(bnxt_re_mod_init);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 499a425a3379..ced615b5ea09 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2676,6 +2676,9 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
u16 tcp_opt = ntohs(req->tcp_opt);
ep = get_ep_from_tid(dev, tid);
+ if (!ep)
+ return 0;
+
pr_debug("ep %p tid %u\n", ep, ep->hwtid);
ep->snd_seq = be32_to_cpu(req->snd_isn);
ep->rcv_seq = be32_to_cpu(req->rcv_isn);
@@ -4144,6 +4147,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
if (neigh->dev->flags & IFF_LOOPBACK) {
pdev = ip_dev_find(&init_net, iph->daddr);
+ if (!pdev) {
+ pr_err("%s - failed to find device!\n", __func__);
+ goto free_dst;
+ }
e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
pdev, 0);
pi = (struct port_info *)netdev_priv(pdev);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index c7e8d7b3baa1..7e2835dcbc1c 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -767,7 +767,7 @@ static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
goto out;
wc->wr_id = cookie;
- wc->qp = qhp ? &qhp->ibqp : NULL;
+ wc->qp = &qhp->ibqp;
wc->vendor_err = CQE_STATUS(&cqe);
wc->wc_flags = 0;
diff --git a/drivers/infiniband/hw/cxgb4/restrack.c b/drivers/infiniband/hw/cxgb4/restrack.c
index ff645b955a08..fd22c85d35f4 100644
--- a/drivers/infiniband/hw/cxgb4/restrack.c
+++ b/drivers/infiniband/hw/cxgb4/restrack.c
@@ -238,7 +238,7 @@ int c4iw_fill_res_cm_id_entry(struct sk_buff *msg,
if (rdma_nl_put_driver_u64_hex(msg, "history", epcp->history))
goto err_cancel_table;
- if (epcp->state == LISTEN) {
+ if (listen_ep) {
if (rdma_nl_put_driver_u32(msg, "stid", listen_ep->stid))
goto err_cancel_table;
if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog))
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index a2f5e29ef226..1f79537fc8d1 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -122,9 +122,7 @@ struct fw_ri_dsgl {
__be16 nsge;
__be32 len0;
__be64 addr0;
-#ifndef C99_NOT_SUPPORTED
struct fw_ri_dsge_pair sge[];
-#endif
};
struct fw_ri_sge {
@@ -138,9 +136,7 @@ struct fw_ri_isgl {
__u8 r1;
__be16 nsge;
__be32 r2;
-#ifndef C99_NOT_SUPPORTED
struct fw_ri_sge sge[];
-#endif
};
struct fw_ri_immd {
@@ -148,9 +144,7 @@ struct fw_ri_immd {
__u8 r1;
__be16 r2;
__be32 immdlen;
-#ifndef C99_NOT_SUPPORTED
__u8 data[];
-#endif
};
struct fw_ri_tpte {
@@ -320,9 +314,7 @@ struct fw_ri_res_wr {
__be32 op_nres;
__be32 len16_pkd;
__u64 cookie;
-#ifndef C99_NOT_SUPPORTED
struct fw_ri_res res[];
-#endif
};
#define FW_RI_RES_WR_NRES_S 0
@@ -562,12 +554,10 @@ struct fw_ri_rdma_write_wr {
__be32 plen;
__be32 stag_sink;
__be64 to_sink;
-#ifndef C99_NOT_SUPPORTED
union {
- struct fw_ri_immd immd_src[0];
- struct fw_ri_isgl isgl_src[0];
+ DECLARE_FLEX_ARRAY(struct fw_ri_immd, immd_src);
+ DECLARE_FLEX_ARRAY(struct fw_ri_isgl, isgl_src);
} u;
-#endif
};
struct fw_ri_send_wr {
@@ -581,12 +571,10 @@ struct fw_ri_send_wr {
__be32 plen;
__be32 r3;
__be64 r4;
-#ifndef C99_NOT_SUPPORTED
union {
- struct fw_ri_immd immd_src[0];
- struct fw_ri_isgl isgl_src[0];
+ DECLARE_FLEX_ARRAY(struct fw_ri_immd, immd_src);
+ DECLARE_FLEX_ARRAY(struct fw_ri_isgl, isgl_src);
} u;
-#endif
};
#define FW_RI_SEND_WR_SENDOP_S 0
@@ -618,12 +606,10 @@ struct fw_ri_rdma_write_cmpl_wr {
struct fw_ri_isgl isgl_src;
} u_cmpl;
__be64 r3;
-#ifndef C99_NOT_SUPPORTED
union fw_ri_write {
- struct fw_ri_immd immd_src[0];
- struct fw_ri_isgl isgl_src[0];
+ DECLARE_FLEX_ARRAY(struct fw_ri_immd, immd_src);
+ DECLARE_FLEX_ARRAY(struct fw_ri_isgl, isgl_src);
} u;
-#endif
};
struct fw_ri_rdma_read_wr {
diff --git a/drivers/infiniband/hw/erdma/erdma_cm.c b/drivers/infiniband/hw/erdma/erdma_cm.c
index 74f6348f240a..771059a8eb7d 100644
--- a/drivers/infiniband/hw/erdma/erdma_cm.c
+++ b/drivers/infiniband/hw/erdma/erdma_cm.c
@@ -11,6 +11,7 @@
/* Copyright (c) 2017, Open Grid Computing, Inc. */
#include <linux/workqueue.h>
+#include <trace/events/sock.h>
#include "erdma.h"
#include "erdma_cm.h"
@@ -925,6 +926,8 @@ static void erdma_cm_llp_data_ready(struct sock *sk)
{
struct erdma_cep *cep;
+ trace_sk_data_ready(sk);
+
read_lock(&sk->sk_callback_lock);
cep = sk_to_cep(sk);
diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h
index ab371fec610c..4c38d99c73f1 100644
--- a/drivers/infiniband/hw/erdma/erdma_hw.h
+++ b/drivers/infiniband/hw/erdma/erdma_hw.h
@@ -397,7 +397,7 @@ struct erdma_write_sqe {
__le32 rsvd;
- struct erdma_sge sgl[0];
+ struct erdma_sge sgl[];
};
struct erdma_send_sqe {
@@ -408,7 +408,7 @@ struct erdma_send_sqe {
};
__le32 length;
- struct erdma_sge sgl[0];
+ struct erdma_sge sgl[];
};
struct erdma_readreq_sqe {
diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
index 5dab1e87975b..9c30d78730aa 100644
--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
+++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
@@ -1110,12 +1110,14 @@ int erdma_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
prot = pgprot_device(vma->vm_page_prot);
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto put_entry;
}
err = rdma_user_mmap_io(ctx, vma, PFN_DOWN(entry->address), PAGE_SIZE,
prot, rdma_entry);
+put_entry:
rdma_user_mmap_entry_put(rdma_entry);
return err;
}
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index ebe970f76232..90b672feed83 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1056,7 +1056,7 @@ static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
static void handle_temp_err(struct hfi1_devdata *dd);
static void dc_shutdown(struct hfi1_devdata *dd);
static void dc_start(struct hfi1_devdata *dd);
-static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
+static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
unsigned int *np);
static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
@@ -13362,7 +13362,6 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
int ret;
unsigned ngroups;
int rmt_count;
- int user_rmt_reduced;
u32 n_usr_ctxts;
u32 send_contexts = chip_send_contexts(dd);
u32 rcv_contexts = chip_rcv_contexts(dd);
@@ -13421,28 +13420,34 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
(num_kernel_contexts + n_usr_ctxts),
&node_affinity.real_cpu_mask);
/*
- * The RMT entries are currently allocated as shown below:
- * 1. QOS (0 to 128 entries);
- * 2. FECN (num_kernel_context - 1 + num_user_contexts +
- * num_netdev_contexts);
- * 3. netdev (num_netdev_contexts).
- * It should be noted that FECN oversubscribe num_netdev_contexts
- * entries of RMT because both netdev and PSM could allocate any receive
- * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
- * and PSM FECN must reserve an RMT entry for each possible PSM receive
- * context.
+ * RMT entries are allocated as follows:
+ * 1. QOS (0 to 128 entries)
+ * 2. FECN (num_kernel_context - 1 [a] + num_user_contexts +
+ * num_netdev_contexts [b])
+ * 3. netdev (NUM_NETDEV_MAP_ENTRIES)
+ *
+ * Notes:
+ * [a] Kernel contexts (except control) are included in FECN if kernel
+ * TID_RDMA is active.
+ * [b] Netdev and user contexts are randomly allocated from the same
+ * context pool, so FECN must cover all contexts in the pool.
*/
- rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_netdev_contexts * 2);
- if (HFI1_CAP_IS_KSET(TID_RDMA))
- rmt_count += num_kernel_contexts - 1;
- if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
- user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
- dd_dev_err(dd,
- "RMT size is reducing the number of user receive contexts from %u to %d\n",
- n_usr_ctxts,
- user_rmt_reduced);
- /* recalculate */
- n_usr_ctxts = user_rmt_reduced;
+ rmt_count = qos_rmt_entries(num_kernel_contexts - 1, NULL, NULL)
+ + (HFI1_CAP_IS_KSET(TID_RDMA) ? num_kernel_contexts - 1
+ : 0)
+ + n_usr_ctxts
+ + num_netdev_contexts
+ + NUM_NETDEV_MAP_ENTRIES;
+ if (rmt_count > NUM_MAP_ENTRIES) {
+ int over = rmt_count - NUM_MAP_ENTRIES;
+ /* try to squish user contexts, minimum of 1 */
+ if (over >= n_usr_ctxts) {
+ dd_dev_err(dd, "RMT overflow: reduce the requested number of contexts\n");
+ return -EINVAL;
+ }
+ dd_dev_err(dd, "RMT overflow: reducing # user contexts from %u to %u\n",
+ n_usr_ctxts, n_usr_ctxts - over);
+ n_usr_ctxts -= over;
}
/* the first N are kernel contexts, the rest are user/netdev contexts */
@@ -14299,15 +14304,15 @@ static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
}
/* return the number of RSM map table entries that will be used for QOS */
-static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
+static int qos_rmt_entries(unsigned int n_krcv_queues, unsigned int *mp,
unsigned int *np)
{
int i;
unsigned int m, n;
- u8 max_by_vl = 0;
+ uint max_by_vl = 0;
/* is QOS active at all? */
- if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
+ if (n_krcv_queues < MIN_KERNEL_KCTXTS ||
num_vls == 1 ||
krcvqsset <= 1)
goto no_qos;
@@ -14365,7 +14370,7 @@ static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
if (!rmt)
goto bail;
- rmt_entries = qos_rmt_entries(dd, &m, &n);
+ rmt_entries = qos_rmt_entries(dd->n_krcv_queues - 1, &m, &n);
if (rmt_entries == 0)
goto bail;
qpns_per_vl = 1 << m;
diff --git a/drivers/infiniband/hw/hfi1/exp_rcv.h b/drivers/infiniband/hw/hfi1/exp_rcv.h
index c6291bbf723c..41f7fe5d1839 100644
--- a/drivers/infiniband/hw/hfi1/exp_rcv.h
+++ b/drivers/infiniband/hw/hfi1/exp_rcv.h
@@ -133,12 +133,13 @@ static inline struct tid_group *tid_group_pop(struct exp_tid_set *set)
return grp;
}
-static inline u32 rcventry2tidinfo(u32 rcventry)
+static inline u32 create_tid(u32 rcventry, u32 npages)
{
u32 pair = rcventry & ~0x1;
return EXP_TID_SET(IDX, pair >> 1) |
- EXP_TID_SET(CTRL, 1 << (rcventry - pair));
+ EXP_TID_SET(CTRL, 1 << (rcventry - pair)) |
+ EXP_TID_SET(LEN, npages);
}
/**
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index f5f9269fdc16..b1d6ca7e9708 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -306,6 +306,17 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
return reqs;
}
+static inline void mmap_cdbg(u16 ctxt, u8 subctxt, u8 type, u8 mapio, u8 vmf,
+ u64 memaddr, void *memvirt, dma_addr_t memdma,
+ ssize_t memlen, struct vm_area_struct *vma)
+{
+ hfi1_cdbg(PROC,
+ "%u:%u type:%u io/vf/dma:%d/%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx",
+ ctxt, subctxt, type, mapio, vmf, !!memdma,
+ memaddr ?: (u64)memvirt, memlen,
+ vma->vm_end - vma->vm_start, vma->vm_flags);
+}
+
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
{
struct hfi1_filedata *fd = fp->private_data;
@@ -315,6 +326,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
u64 token = vma->vm_pgoff << PAGE_SHIFT,
memaddr = 0;
void *memvirt = NULL;
+ dma_addr_t memdma = 0;
u8 subctxt, mapio = 0, vmf = 0, type;
ssize_t memlen = 0;
int ret = 0;
@@ -334,6 +346,11 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
goto done;
}
+ /*
+ * vm_pgoff is used as a buffer selector cookie. Always mmap from
+ * the beginning.
+ */
+ vma->vm_pgoff = 0;
flags = vma->vm_flags;
switch (type) {
@@ -355,7 +372,8 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
mapio = 1;
break;
- case PIO_CRED:
+ case PIO_CRED: {
+ u64 cr_page_offset;
if (flags & VM_WRITE) {
ret = -EPERM;
goto done;
@@ -365,10 +383,11 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
* second or third page allocated for credit returns (if number
* of enabled contexts > 64 and 128 respectively).
*/
- memvirt = dd->cr_base[uctxt->numa_id].va;
- memaddr = virt_to_phys(memvirt) +
- (((u64)uctxt->sc->hw_free -
- (u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
+ cr_page_offset = ((u64)uctxt->sc->hw_free -
+ (u64)dd->cr_base[uctxt->numa_id].va) &
+ PAGE_MASK;
+ memvirt = dd->cr_base[uctxt->numa_id].va + cr_page_offset;
+ memdma = dd->cr_base[uctxt->numa_id].dma + cr_page_offset;
memlen = PAGE_SIZE;
flags &= ~VM_MAYWRITE;
flags |= VM_DONTCOPY | VM_DONTEXPAND;
@@ -378,14 +397,16 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
* memory been flagged as non-cached?
*/
/* vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); */
- mapio = 1;
break;
+ }
case RCV_HDRQ:
memlen = rcvhdrq_size(uctxt);
memvirt = uctxt->rcvhdrq;
+ memdma = uctxt->rcvhdrq_dma;
break;
case RCV_EGRBUF: {
- unsigned long addr;
+ unsigned long vm_start_save;
+ unsigned long vm_end_save;
int i;
/*
* The RcvEgr buffer need to be handled differently
@@ -403,25 +424,35 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
ret = -EPERM;
goto done;
}
- vma->vm_flags &= ~VM_MAYWRITE;
- addr = vma->vm_start;
+ vm_flags_clear(vma, VM_MAYWRITE);
+ /*
+ * Mmap multiple separate allocations into a single vma. From
+ * here, dma_mmap_coherent() calls dma_direct_mmap(), which
+ * requires the mmap to exactly fill the vma starting at
+ * vma_start. Adjust the vma start and end for each eager
+ * buffer segment mapped. Restore the originals when done.
+ */
+ vm_start_save = vma->vm_start;
+ vm_end_save = vma->vm_end;
+ vma->vm_end = vma->vm_start;
for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
memlen = uctxt->egrbufs.buffers[i].len;
memvirt = uctxt->egrbufs.buffers[i].addr;
- ret = remap_pfn_range(
- vma, addr,
- /*
- * virt_to_pfn() does the same, but
- * it's not available on x86_64
- * when CONFIG_MMU is enabled.
- */
- PFN_DOWN(__pa(memvirt)),
- memlen,
- vma->vm_page_prot);
- if (ret < 0)
+ memdma = uctxt->egrbufs.buffers[i].dma;
+ vma->vm_end += memlen;
+ mmap_cdbg(ctxt, subctxt, type, mapio, vmf, memaddr,
+ memvirt, memdma, memlen, vma);
+ ret = dma_mmap_coherent(&dd->pcidev->dev, vma,
+ memvirt, memdma, memlen);
+ if (ret < 0) {
+ vma->vm_start = vm_start_save;
+ vma->vm_end = vm_end_save;
goto done;
- addr += memlen;
+ }
+ vma->vm_start += memlen;
}
+ vma->vm_start = vm_start_save;
+ vma->vm_end = vm_end_save;
ret = 0;
goto done;
}
@@ -481,6 +512,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
}
memlen = PAGE_SIZE;
memvirt = (void *)hfi1_rcvhdrtail_kvaddr(uctxt);
+ memdma = uctxt->rcvhdrqtailaddr_dma;
flags &= ~VM_MAYWRITE;
break;
case SUBCTXT_UREGS:
@@ -528,15 +560,16 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
goto done;
}
- vma->vm_flags = flags;
- hfi1_cdbg(PROC,
- "%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
- ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
- vma->vm_end - vma->vm_start, vma->vm_flags);
+ vm_flags_reset(vma, flags);
+ mmap_cdbg(ctxt, subctxt, type, mapio, vmf, memaddr, memvirt, memdma,
+ memlen, vma);
if (vmf) {
vma->vm_pgoff = PFN_DOWN(memaddr);
vma->vm_ops = &vm_ops;
ret = 0;
+ } else if (memdma) {
+ ret = dma_mmap_coherent(&dd->pcidev->dev, vma,
+ memvirt, memdma, memlen);
} else if (mapio) {
ret = io_remap_pfn_range(vma, vma->vm_start,
PFN_DOWN(memaddr),
@@ -1318,12 +1351,15 @@ static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
sizeof(tinfo.tidcnt)))
- return -EFAULT;
+ ret = -EFAULT;
addr = arg + offsetof(struct hfi1_tid_info, length);
- if (copy_to_user((void __user *)addr, &tinfo.length,
+ if (!ret && copy_to_user((void __user *)addr, &tinfo.length,
sizeof(tinfo.length)))
ret = -EFAULT;
+
+ if (ret)
+ hfi1_user_exp_rcv_invalid(fd, &tinfo);
}
return ret;
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 24c0f0d257fc..62b6c5020039 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -464,7 +464,7 @@ bail:
*
* This wrapper is the free function that matches hfi1_create_ctxtdata().
* When a context is done being used (kernel or user), this function is called
- * for the "final" put to match the kref init from hf1i_create_ctxtdata().
+ * for the "final" put to match the kref init from hfi1_create_ctxtdata().
* Other users of the context do a get/put sequence to make sure that the
* structure isn't removed while in use.
*/
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index a95b654f5254..8ed20392e9f0 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -3160,8 +3160,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{
int rval = 0;
- tx->num_desc++;
- if ((unlikely(tx->num_desc == tx->desc_limit))) {
+ if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx);
if (rval) {
__sdma_txclean(dd, tx);
@@ -3174,6 +3173,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
SDMA_MAP_NONE,
dd->sdma_pad_phys,
sizeof(u32) - (tx->packet_len & (sizeof(u32) - 1)));
+ tx->num_desc++;
_sdma_close_tx(dd, tx);
return rval;
}
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index d8170fcbfbdd..b023fc461bd5 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -631,14 +631,13 @@ static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
static inline void _sdma_close_tx(struct hfi1_devdata *dd,
struct sdma_txreq *tx)
{
- tx->descp[tx->num_desc].qw[0] |=
- SDMA_DESC0_LAST_DESC_FLAG;
- tx->descp[tx->num_desc].qw[1] |=
- dd->default_desc1;
+ u16 last_desc = tx->num_desc - 1;
+
+ tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG;
+ tx->descp[last_desc].qw[1] |= dd->default_desc1;
if (tx->flags & SDMA_TXREQ_F_URGENT)
- tx->descp[tx->num_desc].qw[1] |=
- (SDMA_DESC1_HEAD_TO_HOST_FLAG |
- SDMA_DESC1_INT_REQ_FLAG);
+ tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG |
+ SDMA_DESC1_INT_REQ_FLAG);
}
static inline int _sdma_txadd_daddr(
@@ -655,6 +654,7 @@ static inline int _sdma_txadd_daddr(
type,
addr, len);
WARN_ON(len > tx->tlen);
+ tx->num_desc++;
tx->tlen -= len;
/* special cases for last */
if (!tx->tlen) {
@@ -666,7 +666,6 @@ static inline int _sdma_txadd_daddr(
_sdma_close_tx(dd, tx);
}
}
- tx->num_desc++;
return rval;
}
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index b02f2f0809c8..96058baf36ed 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -27,8 +27,7 @@ static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq);
static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
- struct tid_group *grp,
- unsigned int start, u16 count,
+ struct tid_group *grp, u16 count,
u32 *tidlist, unsigned int *tididx,
unsigned int *pmapped);
static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
@@ -160,16 +159,11 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
{
int pinned;
- unsigned int npages;
+ unsigned int npages = tidbuf->npages;
unsigned long vaddr = tidbuf->vaddr;
struct page **pages = NULL;
struct hfi1_devdata *dd = fd->uctxt->dd;
- /* Get the number of pages the user buffer spans */
- npages = num_user_pages(vaddr, tidbuf->length);
- if (!npages)
- return -EINVAL;
-
if (npages > fd->uctxt->expected_count) {
dd_dev_err(dd, "Expected buffer too big\n");
return -EINVAL;
@@ -196,7 +190,6 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
return pinned;
}
tidbuf->pages = pages;
- tidbuf->npages = npages;
fd->tid_n_pinned += pinned;
return pinned;
}
@@ -256,7 +249,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
int ret = 0, need_group = 0, pinned;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
- unsigned int ngroups, pageidx = 0, pageset_count,
+ unsigned int ngroups, pageset_count,
tididx = 0, mapped, mapped_pages = 0;
u32 *tidlist = NULL;
struct tid_user_buf *tidbuf;
@@ -274,6 +267,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
mutex_init(&tidbuf->cover_mutex);
tidbuf->vaddr = tinfo->vaddr;
tidbuf->length = tinfo->length;
+ tidbuf->npages = num_user_pages(tidbuf->vaddr, tidbuf->length);
tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
GFP_KERNEL);
if (!tidbuf->psets) {
@@ -337,7 +331,7 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
tid_group_pop(&uctxt->tid_group_list);
ret = program_rcvarray(fd, tidbuf, grp,
- pageidx, dd->rcv_entries.group_size,
+ dd->rcv_entries.group_size,
tidlist, &tididx, &mapped);
/*
* If there was a failure to program the RcvArray
@@ -353,11 +347,10 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
tid_group_add_tail(grp, &uctxt->tid_full_list);
ngroups--;
- pageidx += ret;
mapped_pages += mapped;
}
- while (pageidx < pageset_count) {
+ while (tididx < pageset_count) {
struct tid_group *grp, *ptr;
/*
* If we don't have any partially used tid groups, check
@@ -379,11 +372,11 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
*/
list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
list) {
- unsigned use = min_t(unsigned, pageset_count - pageidx,
+ unsigned use = min_t(unsigned, pageset_count - tididx,
grp->size - grp->used);
ret = program_rcvarray(fd, tidbuf, grp,
- pageidx, use, tidlist,
+ use, tidlist,
&tididx, &mapped);
if (ret < 0) {
hfi1_cdbg(TID,
@@ -395,11 +388,10 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
tid_group_move(grp,
&uctxt->tid_used_list,
&uctxt->tid_full_list);
- pageidx += ret;
mapped_pages += mapped;
need_group = 0;
/* Check if we are done so we break out early */
- if (pageidx >= pageset_count)
+ if (tididx >= pageset_count)
break;
} else if (WARN_ON(ret == 0)) {
/*
@@ -643,7 +635,6 @@ static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
* struct tid_pageset holding information on physically contiguous
* chunks from the user buffer), and other fields.
* @grp: RcvArray group
- * @start: starting index into sets array
* @count: number of struct tid_pageset's to program
* @tidlist: the array of u32 elements when the information about the
* programmed RcvArray entries is to be encoded.
@@ -663,14 +654,14 @@ static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
* number of RcvArray entries programmed.
*/
static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
- struct tid_group *grp,
- unsigned int start, u16 count,
+ struct tid_group *grp, u16 count,
u32 *tidlist, unsigned int *tididx,
unsigned int *pmapped)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
u16 idx;
+ unsigned int start = *tididx;
u32 tidinfo = 0, rcventry, useidx = 0;
int mapped = 0;
@@ -715,8 +706,7 @@ static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
return ret;
mapped += npages;
- tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
- EXP_TID_SET(LEN, npages);
+ tidinfo = create_tid(rcventry - uctxt->expected_base, npages);
tidlist[(*tididx)++] = tidinfo;
grp->used++;
grp->map |= 1 << useidx++;
@@ -800,20 +790,20 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd = uctxt->dd;
struct tid_rb_node *node;
- u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
+ u32 tidctrl = EXP_TID_GET(tidinfo, CTRL);
u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
- if (tididx >= uctxt->expected_count) {
- dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
- tididx, uctxt->ctxt);
- return -EINVAL;
- }
-
- if (tidctrl == 0x3)
+ if (tidctrl == 0x3 || tidctrl == 0x0)
return -EINVAL;
rcventry = tididx + (tidctrl - 1);
+ if (rcventry >= uctxt->expected_count) {
+ dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
+ rcventry, uctxt->ctxt);
+ return -EINVAL;
+ }
+
node = fd->entry_to_rb[rcventry];
if (!node || node->rcventry != (uctxt->expected_base + rcventry))
return -EBADF;
@@ -925,9 +915,8 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
spin_lock(&fdata->invalid_lock);
if (fdata->invalid_tid_idx < uctxt->expected_count) {
fdata->invalid_tids[fdata->invalid_tid_idx] =
- rcventry2tidinfo(node->rcventry - uctxt->expected_base);
- fdata->invalid_tids[fdata->invalid_tid_idx] |=
- EXP_TID_SET(LEN, node->npages);
+ create_tid(node->rcventry - uctxt->expected_base,
+ node->npages);
if (!fdata->invalid_tid_idx) {
unsigned long *ev;
diff --git a/drivers/infiniband/hw/hfi1/user_pages.c b/drivers/infiniband/hw/hfi1/user_pages.c
index 7bce963e2ae6..36aaedc65145 100644
--- a/drivers/infiniband/hw/hfi1/user_pages.c
+++ b/drivers/infiniband/hw/hfi1/user_pages.c
@@ -29,33 +29,52 @@ MODULE_PARM_DESC(cache_size, "Send and receive side cache size limit (in MB)");
bool hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm,
u32 nlocked, u32 npages)
{
- unsigned long ulimit = rlimit(RLIMIT_MEMLOCK), pinned, cache_limit,
- size = (cache_size * (1UL << 20)); /* convert to bytes */
- unsigned int usr_ctxts =
- dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
- bool can_lock = capable(CAP_IPC_LOCK);
+ unsigned long ulimit_pages;
+ unsigned long cache_limit_pages;
+ unsigned int usr_ctxts;
/*
- * Calculate per-cache size. The calculation below uses only a quarter
- * of the available per-context limit. This leaves space for other
- * pinning. Should we worry about shared ctxts?
+ * Perform RLIMIT_MEMLOCK based checks unless CAP_IPC_LOCK is present.
*/
- cache_limit = (ulimit / usr_ctxts) / 4;
-
- /* If ulimit isn't set to "unlimited" and is smaller than cache_size. */
- if (ulimit != (-1UL) && size > cache_limit)
- size = cache_limit;
-
- /* Convert to number of pages */
- size = DIV_ROUND_UP(size, PAGE_SIZE);
-
- pinned = atomic64_read(&mm->pinned_vm);
+ if (!capable(CAP_IPC_LOCK)) {
+ ulimit_pages =
+ DIV_ROUND_DOWN_ULL(rlimit(RLIMIT_MEMLOCK), PAGE_SIZE);
+
+ /*
+ * Pinning these pages would exceed this process's locked memory
+ * limit.
+ */
+ if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages)
+ return false;
+
+ /*
+ * Only allow 1/4 of the user's RLIMIT_MEMLOCK to be used for HFI
+ * caches. This fraction is then equally distributed among all
+ * existing user contexts. Note that if RLIMIT_MEMLOCK is
+ * 'unlimited' (-1), the value of this limit will be > 2^42 pages
+ * (2^64 / 2^12 / 2^8 / 2^2).
+ *
+ * The effectiveness of this check may be reduced if I/O occurs on
+ * some user contexts before all user contexts are created. This
+ * check assumes that this process is the only one using this
+ * context (e.g., the corresponding fd was not passed to another
+ * process for concurrent access) as there is no per-context,
+ * per-process tracking of pinned pages. It also assumes that each
+ * user context has only one cache to limit.
+ */
+ usr_ctxts = dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt;
+ if (nlocked + npages > (ulimit_pages / usr_ctxts / 4))
+ return false;
+ }
- /* First, check the absolute limit against all pinned pages. */
- if (pinned + npages >= ulimit && !can_lock)
+ /*
+ * Pinning these pages would exceed the size limit for this cache.
+ */
+ cache_limit_pages = cache_size * (1024 * 1024) / PAGE_SIZE;
+ if (nlocked + npages > cache_limit_pages)
return false;
- return ((nlocked + npages) <= size) || can_lock;
+ return true;
}
int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages,
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index e6e17984553c..7f6d7fc7951d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1598,13 +1598,11 @@ static const char * const driver_cntr_names[] = {
"DRIVER_EgrHdrFull"
};
-static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
static struct rdma_stat_desc *dev_cntr_descs;
static struct rdma_stat_desc *port_cntr_descs;
int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
static int num_dev_cntrs;
static int num_port_cntrs;
-static int cntr_names_initialized;
/*
* Convert a list of names separated by '\n' into an array of NULL terminated
@@ -1615,8 +1613,8 @@ static int init_cntr_names(const char *names_in, const size_t names_len,
int num_extra_names, int *num_cntrs,
struct rdma_stat_desc **cntr_descs)
{
- struct rdma_stat_desc *q;
- char *names_out, *p;
+ struct rdma_stat_desc *names_out;
+ char *p;
int i, n;
n = 0;
@@ -1624,65 +1622,45 @@ static int init_cntr_names(const char *names_in, const size_t names_len,
if (names_in[i] == '\n')
n++;
- names_out =
- kzalloc((n + num_extra_names) * sizeof(*q) + names_len,
- GFP_KERNEL);
+ names_out = kzalloc((n + num_extra_names) * sizeof(*names_out)
+ + names_len,
+ GFP_KERNEL);
if (!names_out) {
*num_cntrs = 0;
*cntr_descs = NULL;
return -ENOMEM;
}
- p = names_out + (n + num_extra_names) * sizeof(*q);
+ p = (char *)&names_out[n + num_extra_names];
memcpy(p, names_in, names_len);
- q = (struct rdma_stat_desc *)names_out;
for (i = 0; i < n; i++) {
- q[i].name = p;
+ names_out[i].name = p;
p = strchr(p, '\n');
*p++ = '\0';
}
*num_cntrs = n;
- *cntr_descs = (struct rdma_stat_desc *)names_out;
+ *cntr_descs = names_out;
return 0;
}
-static int init_counters(struct ib_device *ibdev)
-{
- struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
- int i, err = 0;
-
- mutex_lock(&cntr_names_lock);
- if (cntr_names_initialized)
- goto out_unlock;
-
- err = init_cntr_names(dd->cntrnames, dd->cntrnameslen, num_driver_cntrs,
- &num_dev_cntrs, &dev_cntr_descs);
- if (err)
- goto out_unlock;
-
- for (i = 0; i < num_driver_cntrs; i++)
- dev_cntr_descs[num_dev_cntrs + i].name = driver_cntr_names[i];
-
- err = init_cntr_names(dd->portcntrnames, dd->portcntrnameslen, 0,
- &num_port_cntrs, &port_cntr_descs);
- if (err) {
- kfree(dev_cntr_descs);
- dev_cntr_descs = NULL;
- goto out_unlock;
- }
- cntr_names_initialized = 1;
-
-out_unlock:
- mutex_unlock(&cntr_names_lock);
- return err;
-}
-
static struct rdma_hw_stats *hfi1_alloc_hw_device_stats(struct ib_device *ibdev)
{
- if (init_counters(ibdev))
- return NULL;
+ if (!dev_cntr_descs) {
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ int i, err;
+
+ err = init_cntr_names(dd->cntrnames, dd->cntrnameslen,
+ num_driver_cntrs,
+ &num_dev_cntrs, &dev_cntr_descs);
+ if (err)
+ return NULL;
+
+ for (i = 0; i < num_driver_cntrs; i++)
+ dev_cntr_descs[num_dev_cntrs + i].name =
+ driver_cntr_names[i];
+ }
return rdma_alloc_hw_stats_struct(dev_cntr_descs,
num_dev_cntrs + num_driver_cntrs,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
@@ -1691,8 +1669,16 @@ static struct rdma_hw_stats *hfi1_alloc_hw_device_stats(struct ib_device *ibdev)
static struct rdma_hw_stats *hfi_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
- if (init_counters(ibdev))
- return NULL;
+ if (!port_cntr_descs) {
+ struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
+ int err;
+
+ err = init_cntr_names(dd->portcntrnames, dd->portcntrnameslen,
+ 0,
+ &num_port_cntrs, &port_cntr_descs);
+ if (err)
+ return NULL;
+ }
return rdma_alloc_hw_stats_struct(port_cntr_descs, num_port_cntrs,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -1917,13 +1903,10 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
del_timer_sync(&dev->mem_timer);
verbs_txreq_exit(dev);
- mutex_lock(&cntr_names_lock);
kfree(dev_cntr_descs);
kfree(port_cntr_descs);
dev_cntr_descs = NULL;
port_cntr_descs = NULL;
- cntr_names_initialized = 0;
- mutex_unlock(&cntr_names_lock);
}
void hfi1_cnp_rcv(struct hfi1_packet *packet)
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index f701cc86896b..84239b907de2 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -144,6 +144,7 @@ enum {
HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12),
HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
HNS_ROCE_CAP_FLAG_STASH = BIT(17),
+ HNS_ROCE_CAP_FLAG_CQE_INLINE = BIT(19),
};
#define HNS_ROCE_DB_TYPE_COUNT 2
@@ -567,21 +568,6 @@ struct hns_roce_mbox_msg {
struct hns_roce_dev;
-struct hns_roce_rinl_sge {
- void *addr;
- u32 len;
-};
-
-struct hns_roce_rinl_wqe {
- struct hns_roce_rinl_sge *sg_list;
- u32 sge_cnt;
-};
-
-struct hns_roce_rinl_buf {
- struct hns_roce_rinl_wqe *wqe_list;
- u32 wqe_cnt;
-};
-
enum {
HNS_ROCE_FLUSH_FLAG = 0,
};
@@ -632,7 +618,6 @@ struct hns_roce_qp {
/* 0: flush needed, 1: unneeded */
unsigned long flush_flag;
struct hns_roce_work flush_work;
- struct hns_roce_rinl_buf rq_inl_buf;
struct list_head node; /* all qps are on a list */
struct list_head rq_node; /* all recv qps are on a list */
struct list_head sq_node; /* all send qps are on a list */
@@ -887,7 +872,7 @@ struct hns_roce_hw {
u32 step_idx);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state);
+ enum ib_qp_state new_state, struct ib_udata *udata);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
void (*dereg_mr)(struct hns_roce_dev *hr_dev);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index b2421883993b..dbf97fe5948f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -821,22 +821,10 @@ static void fill_recv_sge_to_wqe(const struct ib_recv_wr *wr, void *wqe,
static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr,
u32 wqe_idx, u32 max_sge)
{
- struct hns_roce_rinl_sge *sge_list;
void *wqe = NULL;
- u32 i;
wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge);
-
- /* rq support inline data */
- if (hr_qp->rq_inl_buf.wqe_cnt) {
- sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
- hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt = (u32)wr->num_sge;
- for (i = 0; i < wr->num_sge; i++) {
- sge_list[i].addr = (void *)(u64)wr->sg_list[i].addr;
- sge_list[i].len = wr->sg_list[i].length;
- }
- }
}
static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
@@ -2849,7 +2837,7 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
attr->port_num = 1;
attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
- IB_QPS_INIT);
+ IB_QPS_INIT, NULL);
if (ret) {
ibdev_err(ibdev, "failed to modify qp to init, ret = %d.\n",
ret);
@@ -2871,7 +2859,7 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
rdma_ah_set_sl(&attr->ah_attr, (u8)sl_num);
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_INIT,
- IB_QPS_RTR);
+ IB_QPS_RTR, NULL);
hr_dev->loop_idc = loopback;
if (ret) {
ibdev_err(ibdev, "failed to modify qp to rtr, ret = %d.\n",
@@ -2886,7 +2874,7 @@ static int free_mr_modify_rsv_qp(struct hns_roce_dev *hr_dev,
attr->retry_cnt = HNS_ROCE_FREE_MR_USED_QP_RETRY_CNT;
attr->timeout = HNS_ROCE_FREE_MR_USED_QP_TIMEOUT;
ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, attr, mask, IB_QPS_RTR,
- IB_QPS_RTS);
+ IB_QPS_RTS, NULL);
if (ret)
ibdev_err(ibdev, "failed to modify qp to rts, ret = %d.\n",
ret);
@@ -3730,39 +3718,6 @@ static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
return 0;
}
-static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
- struct hns_roce_qp *qp,
- struct ib_wc *wc)
-{
- struct hns_roce_rinl_sge *sge_list;
- u32 wr_num, wr_cnt, sge_num;
- u32 sge_cnt, data_len, size;
- void *wqe_buf;
-
- wr_num = hr_reg_read(cqe, CQE_WQE_IDX);
- wr_cnt = wr_num & (qp->rq.wqe_cnt - 1);
-
- sge_list = qp->rq_inl_buf.wqe_list[wr_cnt].sg_list;
- sge_num = qp->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
- wqe_buf = hns_roce_get_recv_wqe(qp, wr_cnt);
- data_len = wc->byte_len;
-
- for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
- size = min(sge_list[sge_cnt].len, data_len);
- memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
-
- data_len -= size;
- wqe_buf += size;
- }
-
- if (unlikely(data_len)) {
- wc->status = IB_WC_LOC_LEN_ERR;
- return -EAGAIN;
- }
-
- return 0;
-}
-
static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
int num_entries, struct ib_wc *wc)
{
@@ -3974,22 +3929,10 @@ static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
wc->opcode = ib_opcode;
}
-static inline bool is_rq_inl_enabled(struct ib_wc *wc, u32 hr_opcode,
- struct hns_roce_v2_cqe *cqe)
-{
- return wc->qp->qp_type != IB_QPT_UD && wc->qp->qp_type != IB_QPT_GSI &&
- (hr_opcode == HNS_ROCE_V2_OPCODE_SEND ||
- hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
- hr_opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
- hr_reg_read(cqe, CQE_RQ_INLINE);
-}
-
static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
{
- struct hns_roce_qp *qp = to_hr_qp(wc->qp);
u32 hr_opcode;
int ib_opcode;
- int ret;
wc->byte_len = le32_to_cpu(cqe->byte_cnt);
@@ -4014,12 +3957,6 @@ static int fill_recv_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe)
else
wc->opcode = ib_opcode;
- if (is_rq_inl_enabled(wc, hr_opcode, cqe)) {
- ret = hns_roce_handle_recv_inl_wqe(cqe, qp, wc);
- if (unlikely(ret))
- return ret;
- }
-
wc->sl = hr_reg_read(cqe, CQE_SL);
wc->src_qp = hr_reg_read(cqe, CQE_RMT_QPN);
wc->slid = 0;
@@ -4445,10 +4382,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
hr_reg_write(context, QPC_RQ_DB_RECORD_ADDR_H,
upper_32_bits(hr_qp->rdb.dma));
- if (ibqp->qp_type != IB_QPT_UD && ibqp->qp_type != IB_QPT_GSI)
- hr_reg_write_bool(context, QPC_RQIE,
- hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE);
-
hr_reg_write(context, QPC_RX_CQN, get_cqn(ibqp->recv_cq));
if (ibqp->srq) {
@@ -4639,8 +4572,11 @@ static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
- struct hns_roce_v2_qp_context *qpc_mask)
+ struct hns_roce_v2_qp_context *qpc_mask,
+ struct ib_udata *udata)
{
+ struct hns_roce_ucontext *uctx = rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
@@ -4760,6 +4696,26 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_write(context, QPC_LP_SGEN_INI, 3);
hr_reg_clear(qpc_mask, QPC_LP_SGEN_INI);
+ if (udata && ibqp->qp_type == IB_QPT_RC &&
+ (uctx->config & HNS_ROCE_RQ_INLINE_FLAGS)) {
+ hr_reg_write_bool(context, QPC_RQIE,
+ hr_dev->caps.flags &
+ HNS_ROCE_CAP_FLAG_RQ_INLINE);
+ hr_reg_clear(qpc_mask, QPC_RQIE);
+ }
+
+ if (udata &&
+ (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_XRC_TGT) &&
+ (uctx->config & HNS_ROCE_CQE_INLINE_FLAGS)) {
+ hr_reg_write_bool(context, QPC_CQEIE,
+ hr_dev->caps.flags &
+ HNS_ROCE_CAP_FLAG_CQE_INLINE);
+ hr_reg_clear(qpc_mask, QPC_CQEIE);
+
+ hr_reg_write(context, QPC_CQEIS, 0);
+ hr_reg_clear(qpc_mask, QPC_CQEIS);
+ }
+
return 0;
}
@@ -5107,7 +5063,8 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
enum ib_qp_state cur_state,
enum ib_qp_state new_state,
struct hns_roce_v2_qp_context *context,
- struct hns_roce_v2_qp_context *qpc_mask)
+ struct hns_roce_v2_qp_context *qpc_mask,
+ struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
int ret = 0;
@@ -5124,7 +5081,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
modify_qp_init_to_init(ibqp, attr, context, qpc_mask);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
- qpc_mask);
+ qpc_mask, udata);
} else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
qpc_mask);
@@ -5329,7 +5286,7 @@ static void v2_set_flushed_fields(struct ib_qp *ibqp,
static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr,
int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
+ enum ib_qp_state new_state, struct ib_udata *udata)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
@@ -5352,7 +5309,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
memset(qpc_mask, 0xff, hr_dev->caps.qpc_sz);
ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
- new_state, context, qpc_mask);
+ new_state, context, qpc_mask, udata);
if (ret)
goto out;
@@ -5555,7 +5512,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
if (modify_qp_is_ok(hr_qp)) {
/* Modify qp to reset before destroying qp */
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
- hr_qp->state, IB_QPS_RESET);
+ hr_qp->state, IB_QPS_RESET, udata);
if (ret)
ibdev_err(ibdev,
"failed to modify QP to RST, ret = %d.\n",
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index b1b3e1e0b84e..af9d00225cdf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -531,7 +531,8 @@ struct hns_roce_v2_qp_context {
#define QPC_RQ_RTY_TX_ERR QPC_FIELD_LOC(607, 607)
#define QPC_RX_CQN QPC_FIELD_LOC(631, 608)
#define QPC_XRC_QP_TYPE QPC_FIELD_LOC(632, 632)
-#define QPC_RSV3 QPC_FIELD_LOC(634, 633)
+#define QPC_CQEIE QPC_FIELD_LOC(633, 633)
+#define QPC_CQEIS QPC_FIELD_LOC(634, 634)
#define QPC_MIN_RNR_TIME QPC_FIELD_LOC(639, 635)
#define QPC_RQ_PRODUCER_IDX QPC_FIELD_LOC(655, 640)
#define QPC_RQ_CONSUMER_IDX QPC_FIELD_LOC(671, 656)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 8ba68ac12388..485e110ca433 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -379,6 +379,18 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
resp.max_inline_data = hr_dev->caps.max_sq_inline;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
+ context->config |= ucmd.config & HNS_ROCE_RQ_INLINE_FLAGS;
+ if (context->config & HNS_ROCE_RQ_INLINE_FLAGS)
+ resp.config |= HNS_ROCE_RSP_RQ_INLINE_FLAGS;
+ }
+
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQE_INLINE) {
+ context->config |= ucmd.config & HNS_ROCE_CQE_INLINE_FLAGS;
+ if (context->config & HNS_ROCE_CQE_INLINE_FLAGS)
+ resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS;
+ }
+
ret = hns_roce_uar_alloc(hr_dev, &context->uar);
if (ret)
goto error_fail_uar_alloc;
@@ -443,14 +455,15 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
prot = pgprot_device(vma->vm_page_prot);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
prot, rdma_entry);
+out:
rdma_user_mmap_entry_put(rdma_entry);
-
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 0ae335fb205c..d855a917f4cf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -433,7 +433,6 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
if (!has_rq) {
hr_qp->rq.wqe_cnt = 0;
hr_qp->rq.max_gs = 0;
- hr_qp->rq_inl_buf.wqe_cnt = 0;
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
@@ -463,12 +462,6 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq.max_gs);
hr_qp->rq.wqe_cnt = cnt;
- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
- hr_qp->ibqp.qp_type != IB_QPT_UD &&
- hr_qp->ibqp.qp_type != IB_QPT_GSI)
- hr_qp->rq_inl_buf.wqe_cnt = cnt;
- else
- hr_qp->rq_inl_buf.wqe_cnt = 0;
cap->max_recv_wr = cnt;
cap->max_recv_sge = hr_qp->rq.max_gs - hr_qp->rq.rsv_sge;
@@ -732,49 +725,6 @@ static int hns_roce_qp_has_rq(struct ib_qp_init_attr *attr)
return 1;
}
-static int alloc_rq_inline_buf(struct hns_roce_qp *hr_qp,
- struct ib_qp_init_attr *init_attr)
-{
- u32 max_recv_sge = init_attr->cap.max_recv_sge;
- u32 wqe_cnt = hr_qp->rq_inl_buf.wqe_cnt;
- struct hns_roce_rinl_wqe *wqe_list;
- int i;
-
- /* allocate recv inline buf */
- wqe_list = kcalloc(wqe_cnt, sizeof(struct hns_roce_rinl_wqe),
- GFP_KERNEL);
- if (!wqe_list)
- goto err;
-
- /* Allocate a continuous buffer for all inline sge we need */
- wqe_list[0].sg_list = kcalloc(wqe_cnt, (max_recv_sge *
- sizeof(struct hns_roce_rinl_sge)),
- GFP_KERNEL);
- if (!wqe_list[0].sg_list)
- goto err_wqe_list;
-
- /* Assign buffers of sg_list to each inline wqe */
- for (i = 1; i < wqe_cnt; i++)
- wqe_list[i].sg_list = &wqe_list[0].sg_list[i * max_recv_sge];
-
- hr_qp->rq_inl_buf.wqe_list = wqe_list;
-
- return 0;
-
-err_wqe_list:
- kfree(wqe_list);
-
-err:
- return -ENOMEM;
-}
-
-static void free_rq_inline_buf(struct hns_roce_qp *hr_qp)
-{
- if (hr_qp->rq_inl_buf.wqe_list)
- kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
- kfree(hr_qp->rq_inl_buf.wqe_list);
-}
-
static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, unsigned long addr)
@@ -783,18 +733,6 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct hns_roce_buf_attr buf_attr = {};
int ret;
- if (!udata && hr_qp->rq_inl_buf.wqe_cnt) {
- ret = alloc_rq_inline_buf(hr_qp, init_attr);
- if (ret) {
- ibdev_err(ibdev,
- "failed to alloc inline buf, ret = %d.\n",
- ret);
- return ret;
- }
- } else {
- hr_qp->rq_inl_buf.wqe_list = NULL;
- }
-
ret = set_wqe_buf_attr(hr_dev, hr_qp, &buf_attr);
if (ret) {
ibdev_err(ibdev, "failed to split WQE buf, ret = %d.\n", ret);
@@ -814,7 +752,6 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
return 0;
err_inline:
- free_rq_inline_buf(hr_qp);
return ret;
}
@@ -822,7 +759,6 @@ err_inline:
static void free_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
hns_roce_mtr_destroy(hr_dev, &hr_qp->mtr);
- free_rq_inline_buf(hr_qp);
}
static inline bool user_qp_has_sdb(struct hns_roce_dev *hr_dev,
@@ -1410,7 +1346,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
goto out;
ret = hr_dev->hw->modify_qp(ibqp, attr, attr_mask, cur_state,
- new_state);
+ new_state, udata);
out:
mutex_unlock(&hr_qp->mutex);
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 7b086fe63a24..195aa9ea18b6 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -1722,6 +1722,9 @@ static int irdma_add_mqh_4(struct irdma_device *iwdev,
continue;
idev = in_dev_get(ip_dev);
+ if (!idev)
+ continue;
+
in_dev_for_each_ifa_rtnl(ifa, idev) {
ibdev_dbg(&iwdev->ibdev,
"CM: Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n",
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index ab246447520b..2e1e2bad0401 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -483,6 +483,8 @@ static int irdma_save_msix_info(struct irdma_pci_f *rf)
iw_qvlist->num_vectors = rf->msix_count;
if (rf->msix_count <= num_online_cpus())
rf->msix_shared = true;
+ else if (rf->msix_count > num_online_cpus() + 1)
+ rf->msix_count = num_online_cpus() + 1;
pmsix = rf->msix_entries;
for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) {
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index f6973ea55eda..1b2e3e800c9a 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -2745,6 +2745,162 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
return ret;
}
+static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ bool use_pbles;
+ u32 stag;
+ int err;
+
+ use_pbles = iwmr->page_cnt != 1;
+
+ err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
+ if (err)
+ return err;
+
+ if (use_pbles) {
+ err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
+ iwmr->page_size);
+ if (err) {
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+ iwpbl->pbl_allocated = false;
+ }
+ }
+
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ err = -ENOMEM;
+ goto free_pble;
+ }
+
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ err = irdma_hwreg_mr(iwdev, iwmr, access);
+ if (err)
+ goto err_hwreg;
+
+ return 0;
+
+err_hwreg:
+ irdma_free_stag(iwdev, stag);
+
+free_pble:
+ if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
+ irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
+
+ return err;
+}
+
+static struct irdma_mr *irdma_alloc_iwmr(struct ib_umem *region,
+ struct ib_pd *pd, u64 virt,
+ enum irdma_memreg_type reg_type)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct irdma_pbl *iwpbl = NULL;
+ struct irdma_mr *iwmr = NULL;
+ unsigned long pgsz_bitmap;
+
+ iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
+ if (!iwmr)
+ return ERR_PTR(-ENOMEM);
+
+ iwpbl = &iwmr->iwpbl;
+ iwpbl->iwmr = iwmr;
+ iwmr->region = region;
+ iwmr->ibmr.pd = pd;
+ iwmr->ibmr.device = pd->device;
+ iwmr->ibmr.iova = virt;
+ iwmr->type = reg_type;
+
+ pgsz_bitmap = (reg_type == IRDMA_MEMREG_TYPE_MEM) ?
+ iwdev->rf->sc_dev.hw_attrs.page_size_cap : PAGE_SIZE;
+
+ iwmr->page_size = ib_umem_find_best_pgsz(region, pgsz_bitmap, virt);
+ if (unlikely(!iwmr->page_size)) {
+ kfree(iwmr);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ iwmr->len = region->length;
+ iwpbl->user_base = virt;
+ iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
+
+ return iwmr;
+}
+
+static void irdma_free_iwmr(struct irdma_mr *iwmr)
+{
+ kfree(iwmr);
+}
+
+static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext = NULL;
+ unsigned long flags;
+ bool use_pbles;
+ u32 total;
+ int err;
+
+ total = req.sq_pages + req.rq_pages + 1;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ total = req.sq_pages + req.rq_pages;
+ use_pbles = total > 2;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ if (err)
+ return err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
+static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
+ struct ib_udata *udata,
+ struct irdma_mr *iwmr)
+{
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+ struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ struct irdma_ucontext *ucontext = NULL;
+ u8 shadow_pgcnt = 1;
+ unsigned long flags;
+ bool use_pbles;
+ u32 total;
+ int err;
+
+ if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
+ shadow_pgcnt = 0;
+ total = req.cq_pages + shadow_pgcnt;
+ if (total > iwmr->page_cnt)
+ return -EINVAL;
+
+ use_pbles = req.cq_pages > 1;
+ err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ if (err)
+ return err;
+
+ ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
+ ibucontext);
+ spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
+ list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
+ iwpbl->on_list = true;
+ spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
+
+ return 0;
+}
+
/**
* irdma_reg_user_mr - Register a user memory region
* @pd: ptr of pd
@@ -2760,18 +2916,10 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
{
#define IRDMA_MEM_REG_MIN_REQ_LEN offsetofend(struct irdma_mem_reg_req, sq_pages)
struct irdma_device *iwdev = to_iwdev(pd->device);
- struct irdma_ucontext *ucontext;
- struct irdma_pble_alloc *palloc;
- struct irdma_pbl *iwpbl;
- struct irdma_mr *iwmr;
- struct ib_umem *region;
- struct irdma_mem_reg_req req;
- u32 total, stag = 0;
- u8 shadow_pgcnt = 1;
- bool use_pbles = false;
- unsigned long flags;
- int err = -EINVAL;
- int ret;
+ struct irdma_mem_reg_req req = {};
+ struct ib_umem *region = NULL;
+ struct irdma_mr *iwmr = NULL;
+ int err;
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL);
@@ -2792,122 +2940,80 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
return ERR_PTR(-EFAULT);
}
- iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
- if (!iwmr) {
+ iwmr = irdma_alloc_iwmr(region, pd, virt, req.reg_type);
+ if (IS_ERR(iwmr)) {
ib_umem_release(region);
- return ERR_PTR(-ENOMEM);
- }
-
- iwpbl = &iwmr->iwpbl;
- iwpbl->iwmr = iwmr;
- iwmr->region = region;
- iwmr->ibmr.pd = pd;
- iwmr->ibmr.device = pd->device;
- iwmr->ibmr.iova = virt;
- iwmr->page_size = PAGE_SIZE;
-
- if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) {
- iwmr->page_size = ib_umem_find_best_pgsz(region,
- iwdev->rf->sc_dev.hw_attrs.page_size_cap,
- virt);
- if (unlikely(!iwmr->page_size)) {
- kfree(iwmr);
- ib_umem_release(region);
- return ERR_PTR(-EOPNOTSUPP);
- }
+ return (struct ib_mr *)iwmr;
}
- iwmr->len = region->length;
- iwpbl->user_base = virt;
- palloc = &iwpbl->pble_alloc;
- iwmr->type = req.reg_type;
- iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
switch (req.reg_type) {
case IRDMA_MEMREG_TYPE_QP:
- total = req.sq_pages + req.rq_pages + shadow_pgcnt;
- if (total > iwmr->page_cnt) {
- err = -EINVAL;
- goto error;
- }
- total = req.sq_pages + req.rq_pages;
- use_pbles = (total > 2);
- err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ err = irdma_reg_user_mr_type_qp(req, udata, iwmr);
if (err)
goto error;
- ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
- ibucontext);
- spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
- list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
- iwpbl->on_list = true;
- spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags);
break;
case IRDMA_MEMREG_TYPE_CQ:
- if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
- shadow_pgcnt = 0;
- total = req.cq_pages + shadow_pgcnt;
- if (total > iwmr->page_cnt) {
- err = -EINVAL;
- goto error;
- }
-
- use_pbles = (req.cq_pages > 1);
- err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles);
+ err = irdma_reg_user_mr_type_cq(req, udata, iwmr);
if (err)
goto error;
-
- ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext,
- ibucontext);
- spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
- list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
- iwpbl->on_list = true;
- spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags);
break;
case IRDMA_MEMREG_TYPE_MEM:
- use_pbles = (iwmr->page_cnt != 1);
-
- err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false);
+ err = irdma_reg_user_mr_type_mem(iwmr, access);
if (err)
goto error;
- if (use_pbles) {
- ret = irdma_check_mr_contiguous(palloc,
- iwmr->page_size);
- if (ret) {
- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
- iwpbl->pbl_allocated = false;
- }
- }
-
- stag = irdma_create_stag(iwdev);
- if (!stag) {
- err = -ENOMEM;
- goto error;
- }
-
- iwmr->stag = stag;
- iwmr->ibmr.rkey = stag;
- iwmr->ibmr.lkey = stag;
- err = irdma_hwreg_mr(iwdev, iwmr, access);
- if (err) {
- irdma_free_stag(iwdev, stag);
- goto error;
- }
-
break;
default:
+ err = -EINVAL;
goto error;
}
- iwmr->type = req.reg_type;
-
return &iwmr->ibmr;
-
error:
- if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
ib_umem_release(region);
- kfree(iwmr);
+ irdma_free_iwmr(iwmr);
+
+ return ERR_PTR(err);
+}
+
+static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
+ u64 len, u64 virt,
+ int fd, int access,
+ struct ib_udata *udata)
+{
+ struct irdma_device *iwdev = to_iwdev(pd->device);
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct irdma_mr *iwmr;
+ int err;
+
+ if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
+ return ERR_PTR(-EINVAL);
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
+ if (IS_ERR(umem_dmabuf)) {
+ err = PTR_ERR(umem_dmabuf);
+ ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
+ return ERR_PTR(err);
+ }
+
+ iwmr = irdma_alloc_iwmr(&umem_dmabuf->umem, pd, virt, IRDMA_MEMREG_TYPE_MEM);
+ if (IS_ERR(iwmr)) {
+ err = PTR_ERR(iwmr);
+ goto err_release;
+ }
+
+ err = irdma_reg_user_mr_type_mem(iwmr, access);
+ if (err)
+ goto err_iwmr;
+
+ return &iwmr->ibmr;
+
+err_iwmr:
+ irdma_free_iwmr(iwmr);
+
+err_release:
+ ib_umem_release(&umem_dmabuf->umem);
return ERR_PTR(err);
}
@@ -4418,6 +4524,7 @@ static const struct ib_device_ops irdma_dev_ops = {
.query_port = irdma_query_port,
.query_qp = irdma_query_qp,
.reg_user_mr = irdma_reg_user_mr,
+ .reg_user_mr_dmabuf = irdma_reg_user_mr_dmabuf,
.req_notify_cq = irdma_req_notify_cq,
.resize_cq = irdma_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_pd, irdma_pd, ibpd),
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 8b3bc302d6f3..7be4c3adb4e2 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -249,7 +249,8 @@ static int
mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
struct gdma_context *gc,
struct gdma_create_dma_region_req *create_req,
- size_t num_pages, mana_handle_t *gdma_region)
+ size_t num_pages, mana_handle_t *gdma_region,
+ u32 expected_status)
{
struct gdma_create_dma_region_resp create_resp = {};
unsigned int create_req_msg_size;
@@ -261,7 +262,7 @@ mana_ib_gd_first_dma_region(struct mana_ib_dev *dev,
err = mana_gd_send_request(gc, create_req_msg_size, create_req,
sizeof(create_resp), &create_resp);
- if (err || create_resp.hdr.status) {
+ if (err || create_resp.hdr.status != expected_status) {
ibdev_dbg(&dev->ib_dev,
"Failed to create DMA region: %d, 0x%x\n",
err, create_resp.hdr.status);
@@ -372,14 +373,21 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
page_addr_list = create_req->page_addr_list;
rdma_umem_for_each_dma_block(umem, &biter, page_sz) {
+ u32 expected_status = 0;
+
page_addr_list[tail++] = rdma_block_iter_dma_address(&biter);
if (tail < num_pages_to_handle)
continue;
+ if (num_pages_processed + num_pages_to_handle <
+ num_pages_total)
+ expected_status = GDMA_STATUS_MORE_ENTRIES;
+
if (!num_pages_processed) {
/* First create message */
err = mana_ib_gd_first_dma_region(dev, gc, create_req,
- tail, gdma_region);
+ tail, gdma_region,
+ expected_status);
if (err)
goto out;
@@ -392,14 +400,8 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
page_addr_list = add_req->page_addr_list;
} else {
/* Subsequent create messages */
- u32 expected_s = 0;
-
- if (num_pages_processed + num_pages_to_handle <
- num_pages_total)
- expected_s = GDMA_STATUS_MORE_ENTRIES;
-
err = mana_ib_gd_add_dma_region(dev, gc, add_req, tail,
- expected_s);
+ expected_status);
if (err)
break;
}
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index ea15ec77e321..54b61930a7fd 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -289,7 +289,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
/* IB ports start with 1, MANA Ethernet ports start with 0 */
port = ucmd.port;
- if (ucmd.port > mc->num_ports)
+ if (port < 1 || port > mc->num_ports)
return -EINVAL;
if (attr->cap.max_send_wr > MAX_SEND_BUFFERS_PER_QUEUE) {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index dceebcd885bb..b18e9f2adc82 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -3303,6 +3303,10 @@ static int __init mlx4_ib_init(void)
if (!wq)
return -ENOMEM;
+ err = mlx4_ib_qp_event_init();
+ if (err)
+ goto clean_qp_event;
+
err = mlx4_ib_cm_init();
if (err)
goto clean_wq;
@@ -3324,6 +3328,9 @@ clean_cm:
mlx4_ib_cm_destroy();
clean_wq:
+ mlx4_ib_qp_event_cleanup();
+
+clean_qp_event:
destroy_workqueue(wq);
return err;
}
@@ -3333,6 +3340,7 @@ static void __exit mlx4_ib_cleanup(void)
mlx4_unregister_interface(&mlx4_ib_interface);
mlx4_ib_mcg_destroy();
mlx4_ib_cm_destroy();
+ mlx4_ib_qp_event_cleanup();
destroy_workqueue(wq);
}
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 6a3b0f121045..17fee1e73a45 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -940,4 +940,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
int mlx4_ib_cm_init(void);
void mlx4_ib_cm_destroy(void);
+int mlx4_ib_qp_event_init(void);
+void mlx4_ib_qp_event_cleanup(void);
+
#endif /* MLX4_IB_H */
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index b17d6ebc5b70..884825b2e5f7 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -102,6 +102,14 @@ enum mlx4_ib_source_type {
MLX4_IB_RWQ_SRC = 1,
};
+struct mlx4_ib_qp_event_work {
+ struct work_struct work;
+ struct mlx4_qp *qp;
+ enum mlx4_event type;
+};
+
+static struct workqueue_struct *mlx4_ib_qp_event_wq;
+
static int is_tunnel_qp(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
{
if (!mlx4_is_master(dev->dev))
@@ -200,50 +208,77 @@ static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n)
}
}
+static void mlx4_ib_handle_qp_event(struct work_struct *_work)
+{
+ struct mlx4_ib_qp_event_work *qpe_work =
+ container_of(_work, struct mlx4_ib_qp_event_work, work);
+ struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp;
+ struct ib_event event = {};
+
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+
+ switch (qpe_work->type) {
+ case MLX4_EVENT_TYPE_PATH_MIG:
+ event.event = IB_EVENT_PATH_MIG;
+ break;
+ case MLX4_EVENT_TYPE_COMM_EST:
+ event.event = IB_EVENT_COMM_EST;
+ break;
+ case MLX4_EVENT_TYPE_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
+ event.event = IB_EVENT_PATH_MIG_ERR;
+ break;
+ case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ default:
+ pr_warn("Unexpected event type %d on QP %06x\n",
+ qpe_work->type, qpe_work->qp->qpn);
+ goto out;
+ }
+
+ ibqp->event_handler(&event, ibqp->qp_context);
+
+out:
+ mlx4_put_qp(qpe_work->qp);
+ kfree(qpe_work);
+}
+
static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
{
- struct ib_event event;
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
+ struct mlx4_ib_qp_event_work *qpe_work;
if (type == MLX4_EVENT_TYPE_PATH_MIG)
to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
- if (ibqp->event_handler) {
- event.device = ibqp->device;
- event.element.qp = ibqp;
- switch (type) {
- case MLX4_EVENT_TYPE_PATH_MIG:
- event.event = IB_EVENT_PATH_MIG;
- break;
- case MLX4_EVENT_TYPE_COMM_EST:
- event.event = IB_EVENT_COMM_EST;
- break;
- case MLX4_EVENT_TYPE_SQ_DRAINED:
- event.event = IB_EVENT_SQ_DRAINED;
- break;
- case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
- event.event = IB_EVENT_QP_LAST_WQE_REACHED;
- break;
- case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
- event.event = IB_EVENT_QP_FATAL;
- break;
- case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
- event.event = IB_EVENT_PATH_MIG_ERR;
- break;
- case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
- event.event = IB_EVENT_QP_REQ_ERR;
- break;
- case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
- event.event = IB_EVENT_QP_ACCESS_ERR;
- break;
- default:
- pr_warn("Unexpected event type %d "
- "on QP %06x\n", type, qp->qpn);
- return;
- }
+ if (!ibqp->event_handler)
+ goto out_no_handler;
- ibqp->event_handler(&event, ibqp->qp_context);
- }
+ qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC);
+ if (!qpe_work)
+ goto out_no_handler;
+
+ qpe_work->qp = qp;
+ qpe_work->type = type;
+ INIT_WORK(&qpe_work->work, mlx4_ib_handle_qp_event);
+ queue_work(mlx4_ib_qp_event_wq, &qpe_work->work);
+ return;
+
+out_no_handler:
+ mlx4_put_qp(qp);
}
static void mlx4_ib_wq_event(struct mlx4_qp *qp, enum mlx4_event type)
@@ -4468,3 +4503,17 @@ void mlx4_ib_drain_rq(struct ib_qp *qp)
handle_drain_completion(cq, &rdrain, dev);
}
+
+int mlx4_ib_qp_event_init(void)
+{
+ mlx4_ib_qp_event_wq = alloc_ordered_workqueue("mlx4_ib_qp_event_wq", 0);
+ if (!mlx4_ib_qp_event_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_ib_qp_event_cleanup(void)
+{
+ destroy_workqueue(mlx4_ib_qp_event_wq);
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index ff3742b0460a..1d0c8d5e745b 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -5,34 +5,41 @@
#include "cmd.h"
-int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey)
+int mlx5r_cmd_query_special_mkeys(struct mlx5_ib_dev *dev)
{
u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ bool is_terminate, is_dump, is_null;
int err;
- MLX5_SET(query_special_contexts_in, in, opcode,
- MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
- if (!err)
- *mkey = MLX5_GET(query_special_contexts_out, out,
- dump_fill_mkey);
- return err;
-}
+ is_terminate = MLX5_CAP_GEN(dev->mdev, terminate_scatter_list_mkey);
+ is_dump = MLX5_CAP_GEN(dev->mdev, dump_fill_mkey);
+ is_null = MLX5_CAP_GEN(dev->mdev, null_mkey);
-int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey)
-{
- u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
- u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
- int err;
+ dev->mkeys.terminate_scatter_list_mkey = MLX5_TERMINATE_SCATTER_LIST_LKEY;
+ if (!is_terminate && !is_dump && !is_null)
+ return 0;
MLX5_SET(query_special_contexts_in, in, opcode,
MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
- if (!err)
- *null_mkey = MLX5_GET(query_special_contexts_out, out,
- null_mkey);
- return err;
+ err = mlx5_cmd_exec_inout(dev->mdev, query_special_contexts, in, out);
+ if (err)
+ return err;
+
+ if (is_dump)
+ dev->mkeys.dump_fill_mkey = MLX5_GET(query_special_contexts_out,
+ out, dump_fill_mkey);
+
+ if (is_null)
+ dev->mkeys.null_mkey = cpu_to_be32(
+ MLX5_GET(query_special_contexts_out, out, null_mkey));
+
+ if (is_terminate)
+ dev->mkeys.terminate_scatter_list_mkey =
+ cpu_to_be32(MLX5_GET(query_special_contexts_out, out,
+ terminate_scatter_list_mkey));
+
+ return 0;
}
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index ee46638db5de..93a971a40d11 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -37,8 +37,7 @@
#include <linux/kernel.h>
#include <linux/mlx5/driver.h>
-int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey);
-int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey);
+int mlx5r_cmd_query_special_mkeys(struct mlx5_ib_dev *dev);
int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point,
void *out);
int mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
diff --git a/drivers/infiniband/hw/mlx5/cong.c b/drivers/infiniband/hw/mlx5/cong.c
index 290ea8ac3838..f87531318feb 100644
--- a/drivers/infiniband/hw/mlx5/cong.c
+++ b/drivers/infiniband/hw/mlx5/cong.c
@@ -38,6 +38,7 @@
enum mlx5_ib_cong_node_type {
MLX5_IB_RROCE_ECN_RP = 1,
MLX5_IB_RROCE_ECN_NP = 2,
+ MLX5_IB_RROCE_GENERAL = 3,
};
static const char * const mlx5_ib_dbg_cc_name[] = {
@@ -61,6 +62,8 @@ static const char * const mlx5_ib_dbg_cc_name[] = {
"np_cnp_dscp",
"np_cnp_prio_mode",
"np_cnp_prio",
+ "rtt_resp_dscp_valid",
+ "rtt_resp_dscp",
};
#define MLX5_IB_RP_CLAMP_TGT_RATE_ATTR BIT(1)
@@ -84,14 +87,18 @@ static const char * const mlx5_ib_dbg_cc_name[] = {
#define MLX5_IB_NP_CNP_DSCP_ATTR BIT(3)
#define MLX5_IB_NP_CNP_PRIO_MODE_ATTR BIT(4)
+#define MLX5_IB_GENERAL_RTT_RESP_DSCP_ATTR BIT(0)
+
static enum mlx5_ib_cong_node_type
mlx5_ib_param_to_node(enum mlx5_ib_dbg_cc_types param_offset)
{
- if (param_offset >= MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE &&
- param_offset <= MLX5_IB_DBG_CC_RP_GD)
+ if (param_offset <= MLX5_IB_DBG_CC_RP_GD)
return MLX5_IB_RROCE_ECN_RP;
- else
+
+ if (param_offset <= MLX5_IB_DBG_CC_NP_CNP_PRIO)
return MLX5_IB_RROCE_ECN_NP;
+
+ return MLX5_IB_RROCE_GENERAL;
}
static u32 mlx5_get_cc_param_val(void *field, int offset)
@@ -157,6 +164,12 @@ static u32 mlx5_get_cc_param_val(void *field, int offset)
case MLX5_IB_DBG_CC_NP_CNP_PRIO:
return MLX5_GET(cong_control_r_roce_ecn_np, field,
cnp_802p_prio);
+ case MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID:
+ return MLX5_GET(cong_control_r_roce_general, field,
+ rtt_resp_dscp_valid);
+ case MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP:
+ return MLX5_GET(cong_control_r_roce_general, field,
+ rtt_resp_dscp);
default:
return 0;
}
@@ -264,6 +277,15 @@ static void mlx5_ib_set_cc_param_mask_val(void *field, int offset,
MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_prio_mode, 0);
MLX5_SET(cong_control_r_roce_ecn_np, field, cnp_802p_prio, var);
break;
+ case MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID:
+ *attr_mask |= MLX5_IB_GENERAL_RTT_RESP_DSCP_ATTR;
+ MLX5_SET(cong_control_r_roce_general, field, rtt_resp_dscp_valid, var);
+ break;
+ case MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP:
+ *attr_mask |= MLX5_IB_GENERAL_RTT_RESP_DSCP_ATTR;
+ MLX5_SET(cong_control_r_roce_general, field, rtt_resp_dscp_valid, 1);
+ MLX5_SET(cong_control_r_roce_general, field, rtt_resp_dscp, var);
+ break;
}
}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 52821485371a..ddcfc116b19a 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -37,6 +37,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
const struct mlx5_ib_profile *profile;
struct mlx5_core_dev *peer_dev;
struct mlx5_ib_dev *ibdev;
+ int second_uplink = false;
u32 peer_num_ports;
int vport_index;
int ret;
@@ -47,17 +48,24 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
peer_dev = mlx5_lag_get_peer_mdev(dev);
peer_num_ports = mlx5_eswitch_get_total_vports(peer_dev);
if (mlx5_lag_is_master(dev)) {
- /* Only 1 ib port is the representor for both uplinks */
- num_ports += peer_num_ports - 1;
+ if (mlx5_lag_is_mpesw(dev))
+ num_ports += peer_num_ports;
+ else
+ num_ports += peer_num_ports - 1;
+
} else {
- if (rep->vport == MLX5_VPORT_UPLINK)
- return 0;
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+ if (!mlx5_lag_is_mpesw(dev))
+ return 0;
+ second_uplink = true;
+ }
+
vport_index += peer_num_ports;
dev = peer_dev;
}
}
- if (rep->vport == MLX5_VPORT_UPLINK)
+ if (rep->vport == MLX5_VPORT_UPLINK && !second_uplink)
profile = &raw_eth_profile;
else
return mlx5_ib_set_vport_rep(dev, rep, vport_index);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c669ef6e47e7..5b988db66b8f 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1756,13 +1756,9 @@ static int set_ucontext_resp(struct ib_ucontext *uctx,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_ucontext *context = to_mucontext(uctx);
struct mlx5_bfreg_info *bfregi = &context->bfregi;
- int err;
if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
- err = mlx5_cmd_dump_fill_mkey(dev->mdev,
- &resp->dump_fill_mkey);
- if (err)
- return err;
+ resp->dump_fill_mkey = dev->mkeys.dump_fill_mkey;
resp->comp_mask |=
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY;
}
@@ -2087,7 +2083,7 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
if (vma->vm_flags & (VM_WRITE | VM_EXEC))
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
if (!dev->mdev->clock_info)
return -EOPNOTSUPP;
@@ -2311,7 +2307,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
if (vma->vm_flags & VM_WRITE)
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
/* Don't expose to user-space information it shouldn't have */
if (PAGE_SIZE > 4096)
@@ -3012,26 +3008,63 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
}
}
-static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
+static void mlx5_netdev_notifier_register(struct mlx5_roce *roce,
+ struct net_device *netdev)
{
int err;
- dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event;
- err = register_netdevice_notifier(&dev->port[port_num].roce.nb);
- if (err) {
- dev->port[port_num].roce.nb.notifier_call = NULL;
- return err;
- }
+ if (roce->tracking_netdev)
+ return;
+ roce->tracking_netdev = netdev;
+ roce->nb.notifier_call = mlx5_netdev_event;
+ err = register_netdevice_notifier_dev_net(netdev, &roce->nb, &roce->nn);
+ WARN_ON(err);
+}
- return 0;
+static void mlx5_netdev_notifier_unregister(struct mlx5_roce *roce)
+{
+ if (!roce->tracking_netdev)
+ return;
+ unregister_netdevice_notifier_dev_net(roce->tracking_netdev, &roce->nb,
+ &roce->nn);
+ roce->tracking_netdev = NULL;
}
-static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u32 port_num)
+static int mlx5e_mdev_notifier_event(struct notifier_block *nb,
+ unsigned long event, void *data)
{
- if (dev->port[port_num].roce.nb.notifier_call) {
- unregister_netdevice_notifier(&dev->port[port_num].roce.nb);
- dev->port[port_num].roce.nb.notifier_call = NULL;
+ struct mlx5_roce *roce = container_of(nb, struct mlx5_roce, mdev_nb);
+ struct net_device *netdev = data;
+
+ switch (event) {
+ case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
+ if (netdev)
+ mlx5_netdev_notifier_register(roce, netdev);
+ else
+ mlx5_netdev_notifier_unregister(roce);
+ break;
+ default:
+ return NOTIFY_DONE;
}
+
+ return NOTIFY_OK;
+}
+
+static void mlx5_mdev_netdev_track(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ struct mlx5_roce *roce = &dev->port[port_num].roce;
+
+ roce->mdev_nb.notifier_call = mlx5e_mdev_notifier_event;
+ mlx5_blocking_notifier_register(dev->mdev, &roce->mdev_nb);
+ mlx5_core_uplink_netdev_event_replay(dev->mdev);
+}
+
+static void mlx5_mdev_netdev_untrack(struct mlx5_ib_dev *dev, u32 port_num)
+{
+ struct mlx5_roce *roce = &dev->port[port_num].roce;
+
+ mlx5_blocking_notifier_unregister(dev->mdev, &roce->mdev_nb);
+ mlx5_netdev_notifier_unregister(roce);
}
static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
@@ -3138,7 +3171,7 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
if (mpi->mdev_events.notifier_call)
mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
mpi->mdev_events.notifier_call = NULL;
- mlx5_remove_netdev_notifier(ibdev, port_num);
+ mlx5_mdev_netdev_untrack(ibdev, port_num);
spin_lock(&port->mp.mpi_lock);
comps = mpi->mdev_refcnt;
@@ -3196,12 +3229,7 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
if (err)
goto unbind;
- err = mlx5_add_netdev_notifier(ibdev, port_num);
- if (err) {
- mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
- port_num + 1);
- goto unbind;
- }
+ mlx5_mdev_netdev_track(ibdev, port_num);
mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port;
mlx5_notifier_register(mpi->mdev, &mpi->mdev_events);
@@ -3634,6 +3662,10 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
}
+ err = mlx5r_cmd_query_special_mkeys(dev);
+ if (err)
+ return err;
+
err = mlx5_ib_init_multiport_master(dev);
if (err)
return err;
@@ -3909,9 +3941,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
port_num = mlx5_core_native_port_num(dev->mdev) - 1;
/* Register only for native ports */
- err = mlx5_add_netdev_notifier(dev, port_num);
- if (err)
- return err;
+ mlx5_mdev_netdev_track(dev, port_num);
err = mlx5_enable_eth(dev);
if (err)
@@ -3920,7 +3950,7 @@ static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev)
return 0;
cleanup:
- mlx5_remove_netdev_notifier(dev, port_num);
+ mlx5_mdev_netdev_untrack(dev, port_num);
return err;
}
@@ -3938,7 +3968,7 @@ static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev)
mlx5_disable_eth(dev);
port_num = mlx5_core_native_port_num(dev->mdev) - 1;
- mlx5_remove_netdev_notifier(dev, port_num);
+ mlx5_mdev_netdev_untrack(dev, port_num);
}
}
@@ -4000,12 +4030,7 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
{
- int err;
-
- err = mlx5_mkey_cache_cleanup(dev);
- if (err)
- mlx5_ib_warn(dev, "mr cache cleanup failed\n");
-
+ mlx5_mkey_cache_cleanup(dev);
mlx5r_umr_resource_cleanup(dev);
}
@@ -4403,6 +4428,10 @@ static int __init mlx5_ib_init(void)
return -ENOMEM;
}
+ ret = mlx5_ib_qp_event_init();
+ if (ret)
+ goto qp_event_err;
+
mlx5_ib_odp_init();
ret = mlx5r_rep_init();
if (ret)
@@ -4420,6 +4449,8 @@ drv_err:
mp_err:
mlx5r_rep_cleanup();
rep_err:
+ mlx5_ib_qp_event_cleanup();
+qp_event_err:
destroy_workqueue(mlx5_ib_event_wq);
free_page((unsigned long)xlt_emergency_page);
return ret;
@@ -4431,6 +4462,7 @@ static void __exit mlx5_ib_cleanup(void)
auxiliary_driver_unregister(&mlx5r_mp_driver);
mlx5r_rep_cleanup();
+ mlx5_ib_qp_event_cleanup();
destroy_workqueue(mlx5_ib_event_wq);
free_page((unsigned long)xlt_emergency_page);
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 8b91babdd4c0..efa4dc6e7dee 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -617,12 +617,21 @@ enum mlx5_mkey_type {
MLX5_MKEY_INDIRECT_DEVX,
};
+struct mlx5r_cache_rb_key {
+ u8 ats:1;
+ unsigned int access_mode;
+ unsigned int access_flags;
+ unsigned int ndescs;
+};
+
struct mlx5_ib_mkey {
u32 key;
enum mlx5_mkey_type type;
unsigned int ndescs;
struct wait_queue_head wait;
refcount_t usecount;
+ /* User Mkey must hold either a rb_key or a cache_ent. */
+ struct mlx5r_cache_rb_key rb_key;
struct mlx5_cache_ent *cache_ent;
};
@@ -737,11 +746,11 @@ struct mlx5_cache_ent {
unsigned long reserved;
char name[4];
- u32 order;
- u32 access_mode;
- u32 page;
- unsigned int ndescs;
+ struct rb_node node;
+ struct mlx5r_cache_rb_key rb_key;
+
+ u8 is_tmp:1;
u8 disabled:1;
u8 fill_to_high_water:1;
@@ -771,9 +780,11 @@ struct mlx5r_async_create_mkey {
struct mlx5_mkey_cache {
struct workqueue_struct *wq;
- struct mlx5_cache_ent ent[MAX_MKEY_CACHE_ENTRIES];
- struct dentry *root;
+ struct rb_root rb_root;
+ struct mutex rb_lock;
+ struct dentry *fs_root;
unsigned long last_add;
+ struct delayed_work remove_ent_dwork;
};
struct mlx5_ib_port_resources {
@@ -832,6 +843,9 @@ struct mlx5_roce {
rwlock_t netdev_lock;
struct net_device *netdev;
struct notifier_block nb;
+ struct netdev_net_notifier nn;
+ struct notifier_block mdev_nb;
+ struct net_device *tracking_netdev;
atomic_t tx_port_affinity;
enum ib_port_state last_port_state;
struct mlx5_ib_dev *dev;
@@ -874,6 +888,8 @@ enum mlx5_ib_dbg_cc_types {
MLX5_IB_DBG_CC_NP_CNP_DSCP,
MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
MLX5_IB_DBG_CC_NP_CNP_PRIO,
+ MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP_VALID,
+ MLX5_IB_DBG_CC_GENERAL_RTT_RESP_DSCP,
MLX5_IB_DBG_CC_MAX,
};
@@ -1051,6 +1067,13 @@ struct mlx5_port_caps {
u8 ext_port_cap;
};
+
+struct mlx5_special_mkeys {
+ u32 dump_fill_mkey;
+ __be32 null_mkey;
+ __be32 terminate_scatter_list_mkey;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev *mdev;
@@ -1081,7 +1104,6 @@ struct mlx5_ib_dev {
struct xarray odp_mkeys;
- u32 null_mkey;
struct mlx5_ib_flow_db *flow_db;
/* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock;
@@ -1110,6 +1132,7 @@ struct mlx5_ib_dev {
struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
u16 pkey_table_len;
u8 lag_ports;
+ struct mlx5_special_mkeys mkeys;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1316,11 +1339,15 @@ void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
-int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
+void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
+struct mlx5_cache_ent *
+mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
+ struct mlx5r_cache_rb_key rb_key,
+ bool persistent_entry);
struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
- struct mlx5_cache_ent *ent,
- int access_flags);
+ int access_flags, int access_mode,
+ int ndescs);
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
struct ib_mr_status *mr_status);
@@ -1344,7 +1371,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent);
+int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev);
void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags);
@@ -1363,7 +1390,10 @@ static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
static inline int mlx5_ib_odp_init(void) { return 0; }
static inline void mlx5_ib_odp_cleanup(void) {}
-static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {}
+static inline int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
+{
+ return 0;
+}
static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
struct mlx5_ib_mr *mr, int flags) {}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 053fe946e45a..67356f515261 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -140,19 +140,16 @@ static void create_mkey_warn(struct mlx5_ib_dev *dev, int status, void *out)
mlx5_cmd_out_err(dev->mdev, MLX5_CMD_OP_CREATE_MKEY, 0, out);
}
-
-static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
- void *to_store)
+static int push_mkey_locked(struct mlx5_cache_ent *ent, bool limit_pendings,
+ void *to_store)
{
XA_STATE(xas, &ent->mkeys, 0);
void *curr;
- xa_lock_irq(&ent->mkeys);
if (limit_pendings &&
- (ent->reserved - ent->stored) > MAX_PENDING_REG_MR) {
- xa_unlock_irq(&ent->mkeys);
+ (ent->reserved - ent->stored) > MAX_PENDING_REG_MR)
return -EAGAIN;
- }
+
while (1) {
/*
* This is cmpxchg (NULL, XA_ZERO_ENTRY) however this version
@@ -191,6 +188,7 @@ static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
break;
xa_lock_irq(&ent->mkeys);
}
+ xa_lock_irq(&ent->mkeys);
if (xas_error(&xas))
return xas_error(&xas);
if (WARN_ON(curr))
@@ -198,6 +196,17 @@ static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
return 0;
}
+static int push_mkey(struct mlx5_cache_ent *ent, bool limit_pendings,
+ void *to_store)
+{
+ int ret;
+
+ xa_lock_irq(&ent->mkeys);
+ ret = push_mkey_locked(ent, limit_pendings, to_store);
+ xa_unlock_irq(&ent->mkeys);
+ return ret;
+}
+
static void undo_push_reserve_mkey(struct mlx5_cache_ent *ent)
{
void *old;
@@ -292,12 +301,14 @@ static void set_cache_mkc(struct mlx5_cache_ent *ent, void *mkc)
set_mkc_access_pd_addr_fields(mkc, 0, 0, ent->dev->umrc.pd);
MLX5_SET(mkc, mkc, free, 1);
MLX5_SET(mkc, mkc, umr_en, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, ent->access_mode & 0x3);
- MLX5_SET(mkc, mkc, access_mode_4_2, (ent->access_mode >> 2) & 0x7);
+ MLX5_SET(mkc, mkc, access_mode_1_0, ent->rb_key.access_mode & 0x3);
+ MLX5_SET(mkc, mkc, access_mode_4_2,
+ (ent->rb_key.access_mode >> 2) & 0x7);
MLX5_SET(mkc, mkc, translations_octword_size,
- get_mkc_octo_size(ent->access_mode, ent->ndescs));
- MLX5_SET(mkc, mkc, log_page_size, ent->page);
+ get_mkc_octo_size(ent->rb_key.access_mode,
+ ent->rb_key.ndescs));
+ MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT);
}
/* Asynchronously schedule new MRs to be populated in the cache. */
@@ -515,18 +526,22 @@ static const struct file_operations limit_fops = {
static bool someone_adding(struct mlx5_mkey_cache *cache)
{
- unsigned int i;
-
- for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
- struct mlx5_cache_ent *ent = &cache->ent[i];
- bool ret;
+ struct mlx5_cache_ent *ent;
+ struct rb_node *node;
+ bool ret;
+ mutex_lock(&cache->rb_lock);
+ for (node = rb_first(&cache->rb_root); node; node = rb_next(node)) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
xa_lock_irq(&ent->mkeys);
ret = ent->stored < ent->limit;
xa_unlock_irq(&ent->mkeys);
- if (ret)
+ if (ret) {
+ mutex_unlock(&cache->rb_lock);
return true;
+ }
}
+ mutex_unlock(&cache->rb_lock);
return false;
}
@@ -539,7 +554,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent)
{
lockdep_assert_held(&ent->mkeys.xa_lock);
- if (ent->disabled || READ_ONCE(ent->dev->fill_delay))
+ if (ent->disabled || READ_ONCE(ent->dev->fill_delay) || ent->is_tmp)
return;
if (ent->stored < ent->limit) {
ent->fill_to_high_water = true;
@@ -590,8 +605,8 @@ static void __cache_work_func(struct mlx5_cache_ent *ent)
if (err != -EAGAIN) {
mlx5_ib_warn(
dev,
- "command failed order %d, err %d\n",
- ent->order, err);
+ "add keys command failed, err %d\n",
+ err);
queue_delayed_work(cache->wq, &ent->dwork,
msecs_to_jiffies(1000));
}
@@ -637,16 +652,99 @@ static void delayed_cache_work_func(struct work_struct *work)
__cache_work_func(ent);
}
-struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
- struct mlx5_cache_ent *ent,
- int access_flags)
+static int cache_ent_key_cmp(struct mlx5r_cache_rb_key key1,
+ struct mlx5r_cache_rb_key key2)
+{
+ int res;
+
+ res = key1.ats - key2.ats;
+ if (res)
+ return res;
+
+ res = key1.access_mode - key2.access_mode;
+ if (res)
+ return res;
+
+ res = key1.access_flags - key2.access_flags;
+ if (res)
+ return res;
+
+ /*
+ * keep ndescs the last in the compare table since the find function
+ * searches for an exact match on all properties and only closest
+ * match in size.
+ */
+ return key1.ndescs - key2.ndescs;
+}
+
+static int mlx5_cache_ent_insert(struct mlx5_mkey_cache *cache,
+ struct mlx5_cache_ent *ent)
+{
+ struct rb_node **new = &cache->rb_root.rb_node, *parent = NULL;
+ struct mlx5_cache_ent *cur;
+ int cmp;
+
+ /* Figure out where to put new node */
+ while (*new) {
+ cur = rb_entry(*new, struct mlx5_cache_ent, node);
+ parent = *new;
+ cmp = cache_ent_key_cmp(cur->rb_key, ent->rb_key);
+ if (cmp > 0)
+ new = &((*new)->rb_left);
+ if (cmp < 0)
+ new = &((*new)->rb_right);
+ if (cmp == 0) {
+ mutex_unlock(&cache->rb_lock);
+ return -EEXIST;
+ }
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&ent->node, parent, new);
+ rb_insert_color(&ent->node, &cache->rb_root);
+
+ return 0;
+}
+
+static struct mlx5_cache_ent *
+mkey_cache_ent_from_rb_key(struct mlx5_ib_dev *dev,
+ struct mlx5r_cache_rb_key rb_key)
+{
+ struct rb_node *node = dev->cache.rb_root.rb_node;
+ struct mlx5_cache_ent *cur, *smallest = NULL;
+ int cmp;
+
+ /*
+ * Find the smallest ent with order >= requested_order.
+ */
+ while (node) {
+ cur = rb_entry(node, struct mlx5_cache_ent, node);
+ cmp = cache_ent_key_cmp(cur->rb_key, rb_key);
+ if (cmp > 0) {
+ smallest = cur;
+ node = node->rb_left;
+ }
+ if (cmp < 0)
+ node = node->rb_right;
+ if (cmp == 0)
+ return cur;
+ }
+
+ return (smallest &&
+ smallest->rb_key.access_mode == rb_key.access_mode &&
+ smallest->rb_key.access_flags == rb_key.access_flags &&
+ smallest->rb_key.ats == rb_key.ats) ?
+ smallest :
+ NULL;
+}
+
+static struct mlx5_ib_mr *_mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ struct mlx5_cache_ent *ent,
+ int access_flags)
{
struct mlx5_ib_mr *mr;
int err;
- if (!mlx5r_umr_can_reconfig(dev, 0, access_flags))
- return ERR_PTR(-EOPNOTSUPP);
-
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(-ENOMEM);
@@ -677,10 +775,48 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
return mr;
}
-static void clean_keys(struct mlx5_ib_dev *dev, int c)
+static int get_unchangeable_access_flags(struct mlx5_ib_dev *dev,
+ int access_flags)
+{
+ int ret = 0;
+
+ if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ MLX5_CAP_GEN(dev->mdev, atomic) &&
+ MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
+ ret |= IB_ACCESS_REMOTE_ATOMIC;
+
+ if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
+ MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
+ ret |= IB_ACCESS_RELAXED_ORDERING;
+
+ if ((access_flags & IB_ACCESS_RELAXED_ORDERING) &&
+ MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
+ !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
+ ret |= IB_ACCESS_RELAXED_ORDERING;
+
+ return ret;
+}
+
+struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
+ int access_flags, int access_mode,
+ int ndescs)
+{
+ struct mlx5r_cache_rb_key rb_key = {
+ .ndescs = ndescs,
+ .access_mode = access_mode,
+ .access_flags = get_unchangeable_access_flags(dev, access_flags)
+ };
+ struct mlx5_cache_ent *ent = mkey_cache_ent_from_rb_key(dev, rb_key);
+
+ if (!ent)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return _mlx5_mr_cache_alloc(dev, ent, access_flags);
+}
+
+static void clean_keys(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent)
{
- struct mlx5_mkey_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent = &cache->ent[c];
u32 mkey;
cancel_delayed_work(&ent->dwork);
@@ -699,31 +835,39 @@ static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
if (!mlx5_debugfs_root || dev->is_rep)
return;
- debugfs_remove_recursive(dev->cache.root);
- dev->cache.root = NULL;
+ debugfs_remove_recursive(dev->cache.fs_root);
+ dev->cache.fs_root = NULL;
}
-static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
+static void mlx5_mkey_cache_debugfs_add_ent(struct mlx5_ib_dev *dev,
+ struct mlx5_cache_ent *ent)
{
- struct mlx5_mkey_cache *cache = &dev->cache;
- struct mlx5_cache_ent *ent;
+ int order = order_base_2(ent->rb_key.ndescs);
struct dentry *dir;
- int i;
if (!mlx5_debugfs_root || dev->is_rep)
return;
- cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev));
+ if (ent->rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
+ order = MLX5_IMR_KSM_CACHE_ENTRY + 2;
- for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
- ent = &cache->ent[i];
- sprintf(ent->name, "%d", ent->order);
- dir = debugfs_create_dir(ent->name, cache->root);
- debugfs_create_file("size", 0600, dir, ent, &size_fops);
- debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
- debugfs_create_ulong("cur", 0400, dir, &ent->stored);
- debugfs_create_u32("miss", 0600, dir, &ent->miss);
- }
+ sprintf(ent->name, "%d", order);
+ dir = debugfs_create_dir(ent->name, dev->cache.fs_root);
+ debugfs_create_file("size", 0600, dir, ent, &size_fops);
+ debugfs_create_file("limit", 0600, dir, ent, &limit_fops);
+ debugfs_create_ulong("cur", 0400, dir, &ent->stored);
+ debugfs_create_u32("miss", 0600, dir, &ent->miss);
+}
+
+static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev)
+{
+ struct dentry *dbg_root = mlx5_debugfs_get_dev_root(dev->mdev);
+ struct mlx5_mkey_cache *cache = &dev->cache;
+
+ if (!mlx5_debugfs_root || dev->is_rep)
+ return;
+
+ cache->fs_root = debugfs_create_dir("mr_cache", dbg_root);
}
static void delay_time_func(struct timer_list *t)
@@ -733,13 +877,100 @@ static void delay_time_func(struct timer_list *t)
WRITE_ONCE(dev->fill_delay, 0);
}
+struct mlx5_cache_ent *
+mlx5r_cache_create_ent_locked(struct mlx5_ib_dev *dev,
+ struct mlx5r_cache_rb_key rb_key,
+ bool persistent_entry)
+{
+ struct mlx5_cache_ent *ent;
+ int order;
+ int ret;
+
+ ent = kzalloc(sizeof(*ent), GFP_KERNEL);
+ if (!ent)
+ return ERR_PTR(-ENOMEM);
+
+ xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
+ ent->rb_key = rb_key;
+ ent->dev = dev;
+ ent->is_tmp = !persistent_entry;
+
+ INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
+
+ ret = mlx5_cache_ent_insert(&dev->cache, ent);
+ if (ret) {
+ kfree(ent);
+ return ERR_PTR(ret);
+ }
+
+ if (persistent_entry) {
+ if (rb_key.access_mode == MLX5_MKC_ACCESS_MODE_KSM)
+ order = MLX5_IMR_KSM_CACHE_ENTRY;
+ else
+ order = order_base_2(rb_key.ndescs) - 2;
+
+ if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
+ !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
+ mlx5r_umr_can_load_pas(dev, 0))
+ ent->limit = dev->mdev->profile.mr_cache[order].limit;
+ else
+ ent->limit = 0;
+
+ mlx5_mkey_cache_debugfs_add_ent(dev, ent);
+ } else {
+ mod_delayed_work(ent->dev->cache.wq,
+ &ent->dev->cache.remove_ent_dwork,
+ msecs_to_jiffies(30 * 1000));
+ }
+
+ return ent;
+}
+
+static void remove_ent_work_func(struct work_struct *work)
+{
+ struct mlx5_mkey_cache *cache;
+ struct mlx5_cache_ent *ent;
+ struct rb_node *cur;
+
+ cache = container_of(work, struct mlx5_mkey_cache,
+ remove_ent_dwork.work);
+ mutex_lock(&cache->rb_lock);
+ cur = rb_last(&cache->rb_root);
+ while (cur) {
+ ent = rb_entry(cur, struct mlx5_cache_ent, node);
+ cur = rb_prev(cur);
+ mutex_unlock(&cache->rb_lock);
+
+ xa_lock_irq(&ent->mkeys);
+ if (!ent->is_tmp) {
+ xa_unlock_irq(&ent->mkeys);
+ mutex_lock(&cache->rb_lock);
+ continue;
+ }
+ xa_unlock_irq(&ent->mkeys);
+
+ clean_keys(ent->dev, ent);
+ mutex_lock(&cache->rb_lock);
+ }
+ mutex_unlock(&cache->rb_lock);
+}
+
int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
{
struct mlx5_mkey_cache *cache = &dev->cache;
+ struct rb_root *root = &dev->cache.rb_root;
+ struct mlx5r_cache_rb_key rb_key = {
+ .access_mode = MLX5_MKC_ACCESS_MODE_MTT,
+ };
struct mlx5_cache_ent *ent;
+ struct rb_node *node;
+ int ret;
int i;
mutex_init(&dev->slow_path_mutex);
+ mutex_init(&dev->cache.rb_lock);
+ dev->cache.rb_root = RB_ROOT;
+ INIT_DELAYED_WORK(&dev->cache.remove_ent_dwork, remove_ent_work_func);
cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM);
if (!cache->wq) {
mlx5_ib_warn(dev, "failed to create work queue\n");
@@ -748,52 +979,51 @@ int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev)
mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx);
timer_setup(&dev->delay_timer, delay_time_func, 0);
- for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
- ent = &cache->ent[i];
- xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ);
- ent->order = i + 2;
- ent->dev = dev;
- ent->limit = 0;
-
- INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
-
- if (i > MKEY_CACHE_LAST_STD_ENTRY) {
- mlx5_odp_init_mkey_cache_entry(ent);
- continue;
+ mlx5_mkey_cache_debugfs_init(dev);
+ mutex_lock(&cache->rb_lock);
+ for (i = 0; i <= mkey_cache_max_order(dev); i++) {
+ rb_key.ndescs = 1 << (i + 2);
+ ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
+ if (IS_ERR(ent)) {
+ ret = PTR_ERR(ent);
+ goto err;
}
+ }
- if (ent->order > mkey_cache_max_order(dev))
- continue;
+ ret = mlx5_odp_init_mkey_cache(dev);
+ if (ret)
+ goto err;
- ent->page = PAGE_SHIFT;
- ent->ndescs = 1 << ent->order;
- ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
- if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
- !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
- mlx5r_umr_can_load_pas(dev, 0))
- ent->limit = dev->mdev->profile.mr_cache[i].limit;
- else
- ent->limit = 0;
+ mutex_unlock(&cache->rb_lock);
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
xa_lock_irq(&ent->mkeys);
queue_adjust_cache_locked(ent);
xa_unlock_irq(&ent->mkeys);
}
- mlx5_mkey_cache_debugfs_init(dev);
-
return 0;
+
+err:
+ mutex_unlock(&cache->rb_lock);
+ mlx5_mkey_cache_debugfs_cleanup(dev);
+ mlx5_ib_warn(dev, "failed to create mkey cache entry\n");
+ return ret;
}
-int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
+void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
{
- unsigned int i;
+ struct rb_root *root = &dev->cache.rb_root;
+ struct mlx5_cache_ent *ent;
+ struct rb_node *node;
if (!dev->cache.wq)
- return 0;
-
- for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) {
- struct mlx5_cache_ent *ent = &dev->cache.ent[i];
+ return;
+ cancel_delayed_work_sync(&dev->cache.remove_ent_dwork);
+ mutex_lock(&dev->cache.rb_lock);
+ for (node = rb_first(root); node; node = rb_next(node)) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
xa_lock_irq(&ent->mkeys);
ent->disabled = true;
xa_unlock_irq(&ent->mkeys);
@@ -803,13 +1033,18 @@ int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
mlx5_mkey_cache_debugfs_cleanup(dev);
mlx5_cmd_cleanup_async_ctx(&dev->async_ctx);
- for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++)
- clean_keys(dev, i);
+ node = rb_first(root);
+ while (node) {
+ ent = rb_entry(node, struct mlx5_cache_ent, node);
+ node = rb_next(node);
+ clean_keys(dev, ent);
+ rb_erase(&ent->node, root);
+ kfree(ent);
+ }
+ mutex_unlock(&dev->cache.rb_lock);
destroy_workqueue(dev->cache.wq);
del_timer_sync(&dev->delay_timer);
-
- return 0;
}
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
@@ -873,23 +1108,10 @@ static int get_octo_len(u64 addr, u64 len, int page_shift)
static int mkey_cache_max_order(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
- return MKEY_CACHE_LAST_STD_ENTRY + 2;
+ return MKEY_CACHE_LAST_STD_ENTRY;
return MLX5_MAX_UMR_SHIFT;
}
-static struct mlx5_cache_ent *mkey_cache_ent_from_order(struct mlx5_ib_dev *dev,
- unsigned int order)
-{
- struct mlx5_mkey_cache *cache = &dev->cache;
-
- if (order < cache->ent[0].order)
- return &cache->ent[0];
- order = order - cache->ent[0].order;
- if (order > MKEY_CACHE_LAST_STD_ENTRY)
- return NULL;
- return &cache->ent[order];
-}
-
static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
u64 length, int access_flags, u64 iova)
{
@@ -916,6 +1138,9 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
struct ib_umem *umem, u64 iova,
int access_flags)
{
+ struct mlx5r_cache_rb_key rb_key = {
+ .access_mode = MLX5_MKC_ACCESS_MODE_MTT,
+ };
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_cache_ent *ent;
struct mlx5_ib_mr *mr;
@@ -928,22 +1153,26 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
0, iova);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
- ent = mkey_cache_ent_from_order(
- dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size)));
+
+ rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size);
+ rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags);
+ rb_key.access_flags = get_unchangeable_access_flags(dev, access_flags);
+ ent = mkey_cache_ent_from_rb_key(dev, rb_key);
/*
- * Matches access in alloc_cache_mr(). If the MR can't come from the
- * cache then synchronously create an uncached one.
+ * If the MR can't come from the cache then synchronously create an uncached
+ * one.
*/
- if (!ent || ent->limit == 0 ||
- !mlx5r_umr_can_reconfig(dev, 0, access_flags) ||
- mlx5_umem_needs_ats(dev, umem, access_flags)) {
+ if (!ent) {
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(pd, umem, iova, access_flags, page_size, false);
mutex_unlock(&dev->slow_path_mutex);
+ if (IS_ERR(mr))
+ return mr;
+ mr->mmkey.rb_key = rb_key;
return mr;
}
- mr = mlx5_mr_cache_alloc(dev, ent, access_flags);
+ mr = _mlx5_mr_cache_alloc(dev, ent, access_flags);
if (IS_ERR(mr))
return mr;
@@ -1030,6 +1259,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
goto err_2;
}
mr->mmkey.type = MLX5_MKEY_MR;
+ mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift);
mr->umem = umem;
set_mr_fields(dev, mr, umem->length, access_flags, iova);
kvfree(in);
@@ -1372,7 +1602,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
if (WARN_ON(!*page_size))
return false;
- return (1ULL << mr->mmkey.cache_ent->order) >=
+ return (mr->mmkey.cache_ent->rb_key.ndescs) >=
ib_umem_num_dma_blocks(new_umem, *page_size);
}
@@ -1567,6 +1797,49 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
}
}
+static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mr *mr)
+{
+ struct mlx5_mkey_cache *cache = &dev->cache;
+ struct mlx5_cache_ent *ent;
+ int ret;
+
+ if (mr->mmkey.cache_ent) {
+ xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
+ mr->mmkey.cache_ent->in_use--;
+ goto end;
+ }
+
+ mutex_lock(&cache->rb_lock);
+ ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key);
+ if (ent) {
+ if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) {
+ if (ent->disabled) {
+ mutex_unlock(&cache->rb_lock);
+ return -EOPNOTSUPP;
+ }
+ mr->mmkey.cache_ent = ent;
+ xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
+ mutex_unlock(&cache->rb_lock);
+ goto end;
+ }
+ }
+
+ ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false);
+ mutex_unlock(&cache->rb_lock);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ mr->mmkey.cache_ent = ent;
+ xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
+
+end:
+ ret = push_mkey_locked(mr->mmkey.cache_ent, false,
+ xa_mk_value(mr->mmkey.key));
+ xa_unlock_irq(&mr->mmkey.cache_ent->mkeys);
+ return ret;
+}
+
int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct mlx5_ib_mr *mr = to_mmr(ibmr);
@@ -1612,16 +1885,11 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
}
/* Stop DMA */
- if (mr->mmkey.cache_ent) {
- xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
- mr->mmkey.cache_ent->in_use--;
- xa_unlock_irq(&mr->mmkey.cache_ent->mkeys);
-
+ if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
if (mlx5r_umr_revoke_mr(mr) ||
- push_mkey(mr->mmkey.cache_ent, false,
- xa_mk_value(mr->mmkey.key)))
+ cache_ent_find_and_store(dev, mr))
mr->mmkey.cache_ent = NULL;
- }
+
if (!mr->mmkey.cache_ent) {
rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
if (rc)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index e6e021af6aa9..4a04cbc5b78a 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -104,7 +104,7 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
if (flags & MLX5_IB_UPD_XLT_ZAP) {
for (; pklm != end; pklm++, idx++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
- pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
+ pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
pklm->va = 0;
}
return;
@@ -137,7 +137,7 @@ static void populate_klm(struct mlx5_klm *pklm, size_t idx, size_t nentries,
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
pklm->va = cpu_to_be64(idx * MLX5_IMR_MTT_SIZE);
} else {
- pklm->key = cpu_to_be32(mr_to_mdev(imr)->null_mkey);
+ pklm->key = mr_to_mdev(imr)->mkeys.null_mkey;
pklm->va = 0;
}
}
@@ -417,8 +417,9 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
if (IS_ERR(odp))
return ERR_CAST(odp);
- mr = mlx5_mr_cache_alloc(dev, &dev->cache.ent[MLX5_IMR_MTT_CACHE_ENTRY],
- imr->access_flags);
+ mr = mlx5_mr_cache_alloc(dev, imr->access_flags,
+ MLX5_MKC_ACCESS_MODE_MTT,
+ MLX5_IMR_MTT_ENTRIES);
if (IS_ERR(mr)) {
ib_umem_odp_release(odp);
return mr;
@@ -492,9 +493,8 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
if (IS_ERR(umem_odp))
return ERR_CAST(umem_odp);
- imr = mlx5_mr_cache_alloc(dev,
- &dev->cache.ent[MLX5_IMR_KSM_CACHE_ENTRY],
- access_flags);
+ imr = mlx5_mr_cache_alloc(dev, access_flags, MLX5_MKC_ACCESS_MODE_KSM,
+ mlx5_imr_ksm_entries);
if (IS_ERR(imr)) {
ib_umem_odp_release(umem_odp);
return imr;
@@ -986,7 +986,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
{
int ret = 0, npages = 0;
u64 io_virt;
- u32 key;
+ __be32 key;
u32 byte_count;
size_t bcnt;
int inline_segment;
@@ -1000,7 +1000,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
struct mlx5_wqe_data_seg *dseg = wqe;
io_virt = be64_to_cpu(dseg->addr);
- key = be32_to_cpu(dseg->lkey);
+ key = dseg->lkey;
byte_count = be32_to_cpu(dseg->byte_count);
inline_segment = !!(byte_count & MLX5_INLINE_SEG);
bcnt = byte_count & ~MLX5_INLINE_SEG;
@@ -1014,7 +1014,8 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
}
/* receive WQE end of sg list. */
- if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
+ if (receive_queue && bcnt == 0 &&
+ key == dev->mkeys.terminate_scatter_list_mkey &&
io_virt == 0)
break;
@@ -1034,7 +1035,7 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
continue;
}
- ret = pagefault_single_data_segment(dev, NULL, key,
+ ret = pagefault_single_data_segment(dev, NULL, be32_to_cpu(key),
io_virt, bcnt,
&pfault->bytes_committed,
bytes_mapped);
@@ -1587,26 +1588,22 @@ mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
return err;
}
-void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent)
+int mlx5_odp_init_mkey_cache(struct mlx5_ib_dev *dev)
{
- if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
- return;
+ struct mlx5r_cache_rb_key rb_key = {
+ .access_mode = MLX5_MKC_ACCESS_MODE_KSM,
+ .ndescs = mlx5_imr_ksm_entries,
+ };
+ struct mlx5_cache_ent *ent;
- switch (ent->order - 2) {
- case MLX5_IMR_MTT_CACHE_ENTRY:
- ent->page = PAGE_SHIFT;
- ent->ndescs = MLX5_IMR_MTT_ENTRIES;
- ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
- ent->limit = 0;
- break;
+ if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT))
+ return 0;
- case MLX5_IMR_KSM_CACHE_ENTRY:
- ent->page = MLX5_KSM_PAGE_SHIFT;
- ent->ndescs = mlx5_imr_ksm_entries;
- ent->access_mode = MLX5_MKC_ACCESS_MODE_KSM;
- ent->limit = 0;
- break;
- }
+ ent = mlx5r_cache_create_ent_locked(dev, rb_key, true);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return 0;
}
static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
@@ -1615,25 +1612,15 @@ static const struct ib_device_ops mlx5_ib_dev_odp_ops = {
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
{
- int ret = 0;
-
internal_fill_odp_caps(dev);
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
- return ret;
+ return 0;
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
- if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
- ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
- if (ret) {
- mlx5_ib_err(dev, "Error getting null_mkey %d\n", ret);
- return ret;
- }
- }
-
mutex_init(&dev->odp_eq_mutex);
- return ret;
+ return 0;
}
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index cf953d23d18d..7cc3b973dec7 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -71,6 +71,14 @@ struct mlx5_modify_raw_qp_param {
u32 port;
};
+struct mlx5_ib_qp_event_work {
+ struct work_struct work;
+ struct mlx5_core_qp *qp;
+ int type;
+};
+
+static struct workqueue_struct *mlx5_ib_qp_event_wq;
+
static void get_cqs(enum ib_qp_type qp_type,
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
@@ -302,51 +310,120 @@ int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
return mlx5_ib_read_user_wqe_srq(srq, wqe_index, buffer, buflen, bc);
}
+static void mlx5_ib_qp_err_syndrome(struct ib_qp *ibqp)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
+ int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
+ struct mlx5_ib_qp *qp = to_mqp(ibqp);
+ void *pas_ext_union, *err_syn;
+ u32 *outb;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev->mdev, qpc_extension) ||
+ !MLX5_CAP_GEN(dev->mdev, qp_error_syndrome))
+ return;
+
+ outb = kzalloc(outlen, GFP_KERNEL);
+ if (!outb)
+ return;
+
+ err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen,
+ true);
+ if (err)
+ goto out;
+
+ pas_ext_union =
+ MLX5_ADDR_OF(query_qp_out, outb, qp_pas_or_qpc_ext_and_pas);
+ err_syn = MLX5_ADDR_OF(qpc_extension_and_pas_list_in, pas_ext_union,
+ qpc_data_extension.error_syndrome);
+
+ pr_err("%s/%d: QP %d error: %s (0x%x 0x%x 0x%x)\n",
+ ibqp->device->name, ibqp->port, ibqp->qp_num,
+ ib_wc_status_msg(
+ MLX5_GET(cqe_error_syndrome, err_syn, syndrome)),
+ MLX5_GET(cqe_error_syndrome, err_syn, vendor_error_syndrome),
+ MLX5_GET(cqe_error_syndrome, err_syn, hw_syndrome_type),
+ MLX5_GET(cqe_error_syndrome, err_syn, hw_error_syndrome));
+out:
+ kfree(outb);
+}
+
+static void mlx5_ib_handle_qp_event(struct work_struct *_work)
+{
+ struct mlx5_ib_qp_event_work *qpe_work =
+ container_of(_work, struct mlx5_ib_qp_event_work, work);
+ struct ib_qp *ibqp = &to_mibqp(qpe_work->qp)->ibqp;
+ struct ib_event event = {};
+
+ event.device = ibqp->device;
+ event.element.qp = ibqp;
+ switch (qpe_work->type) {
+ case MLX5_EVENT_TYPE_PATH_MIG:
+ event.event = IB_EVENT_PATH_MIG;
+ break;
+ case MLX5_EVENT_TYPE_COMM_EST:
+ event.event = IB_EVENT_COMM_EST;
+ break;
+ case MLX5_EVENT_TYPE_SQ_DRAINED:
+ event.event = IB_EVENT_SQ_DRAINED;
+ break;
+ case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
+ event.event = IB_EVENT_PATH_MIG_ERR;
+ break;
+ case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ default:
+ pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n",
+ qpe_work->type, qpe_work->qp->qpn);
+ goto out;
+ }
+
+ if ((event.event == IB_EVENT_QP_FATAL) ||
+ (event.event == IB_EVENT_QP_ACCESS_ERR))
+ mlx5_ib_qp_err_syndrome(ibqp);
+
+ ibqp->event_handler(&event, ibqp->qp_context);
+
+out:
+ mlx5_core_res_put(&qpe_work->qp->common);
+ kfree(qpe_work);
+}
+
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
- struct ib_event event;
+ struct mlx5_ib_qp_event_work *qpe_work;
if (type == MLX5_EVENT_TYPE_PATH_MIG) {
/* This event is only valid for trans_qps */
to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
}
- if (ibqp->event_handler) {
- event.device = ibqp->device;
- event.element.qp = ibqp;
- switch (type) {
- case MLX5_EVENT_TYPE_PATH_MIG:
- event.event = IB_EVENT_PATH_MIG;
- break;
- case MLX5_EVENT_TYPE_COMM_EST:
- event.event = IB_EVENT_COMM_EST;
- break;
- case MLX5_EVENT_TYPE_SQ_DRAINED:
- event.event = IB_EVENT_SQ_DRAINED;
- break;
- case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
- event.event = IB_EVENT_QP_LAST_WQE_REACHED;
- break;
- case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
- event.event = IB_EVENT_QP_FATAL;
- break;
- case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
- event.event = IB_EVENT_PATH_MIG_ERR;
- break;
- case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
- event.event = IB_EVENT_QP_REQ_ERR;
- break;
- case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
- event.event = IB_EVENT_QP_ACCESS_ERR;
- break;
- default:
- pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
- return;
- }
+ if (!ibqp->event_handler)
+ goto out_no_handler;
- ibqp->event_handler(&event, ibqp->qp_context);
- }
+ qpe_work = kzalloc(sizeof(*qpe_work), GFP_ATOMIC);
+ if (!qpe_work)
+ goto out_no_handler;
+
+ qpe_work->qp = qp;
+ qpe_work->type = type;
+ INIT_WORK(&qpe_work->work, mlx5_ib_handle_qp_event);
+ queue_work(mlx5_ib_qp_event_wq, &qpe_work->work);
+ return;
+
+out_no_handler:
+ mlx5_core_res_put(&qp->common);
}
static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
@@ -4827,7 +4904,8 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!outb)
return -ENOMEM;
- err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen);
+ err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen,
+ false);
if (err)
goto out;
@@ -5720,3 +5798,17 @@ out:
mutex_unlock(&mqp->mutex);
return err;
}
+
+int mlx5_ib_qp_event_init(void)
+{
+ mlx5_ib_qp_event_wq = alloc_ordered_workqueue("mlx5_ib_qp_event_wq", 0);
+ if (!mlx5_ib_qp_event_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx5_ib_qp_event_cleanup(void)
+{
+ destroy_workqueue(mlx5_ib_qp_event_wq);
+}
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
index 5d4e140db99c..77f9b4a54816 100644
--- a/drivers/infiniband/hw/mlx5/qp.h
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -20,7 +20,7 @@ int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp);
int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct);
int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
- u32 *out, int outlen);
+ u32 *out, int outlen, bool qpc_ext);
int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
u32 *out, int outlen);
@@ -44,4 +44,6 @@ void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
+int mlx5_ib_qp_event_init(void);
+void mlx5_ib_qp_event_cleanup(void);
#endif /* _MLX5_IB_QP_H */
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index 542e4c63a8de..bae0334d6e7f 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -135,7 +135,8 @@ static int rsc_event_notifier(struct notifier_block *nb,
case MLX5_RES_SQ:
qp = (struct mlx5_core_qp *)common;
qp->event(qp, event_type);
- break;
+ /* Need to put resource in event handler */
+ return NOTIFY_OK;
case MLX5_RES_DCT:
dct = (struct mlx5_core_dct *)common;
if (event_type == MLX5_EVENT_TYPE_DCT_DRAINED)
@@ -504,12 +505,14 @@ void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev)
}
int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
- u32 *out, int outlen)
+ u32 *out, int outlen, bool qpc_ext)
{
u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
MLX5_SET(query_qp_in, in, qpn, qp->qpn);
+ MLX5_SET(query_qp_in, in, qpc_ext, qpc_ext);
+
return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen);
}
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 09b365a98bbf..a056ea835da5 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -447,7 +447,7 @@ int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
if (i < srq->msrq.max_avail_gather) {
scat[i].byte_count = 0;
- scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ scat[i].lkey = dev->mkeys.terminate_scatter_list_mkey;
scat[i].addr = 0;
}
}
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 029e9536ec28..55f4e048d947 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -636,9 +636,7 @@ int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
mlx5r_umr_set_update_xlt_data_seg(&wqe.data_seg, &sg);
cur_mtt = mtt;
- rdma_for_each_block(mr->umem->sgt_append.sgt.sgl, &biter,
- mr->umem->sgt_append.sgt.nents,
- BIT(mr->page_shift)) {
+ rdma_umem_for_each_dma_block(mr->umem, &biter, BIT(mr->page_shift)) {
if (cur_mtt == (void *)mtt + sg.length) {
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index 855f3f4fefad..df1d1b0a3ef7 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -1252,7 +1252,7 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
if (i < qp->rq.max_gs) {
scat[i].byte_count = 0;
- scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ scat[i].lkey = dev->mkeys.terminate_scatter_list_mkey;
scat[i].addr = 0;
}
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 3937144b2ae5..80fe92a21f96 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -733,7 +733,7 @@ static int qib_mmap_mem(struct vm_area_struct *vma, struct qib_ctxtdata *rcd,
}
/* don't allow them to later change with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
}
pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
@@ -769,7 +769,7 @@ static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd,
phys = dd->physaddr + ureg;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
ret = io_remap_pfn_range(vma, vma->vm_start,
phys >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
@@ -810,8 +810,7 @@ static int mmap_piobufs(struct vm_area_struct *vma,
* don't allow them to later change to readable with mprotect (for when
* not initially mapped readable, as is normally the case)
*/
- vma->vm_flags &= ~VM_MAYREAD;
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vm_flags_mod(vma, VM_DONTCOPY | VM_DONTEXPAND, VM_MAYREAD);
/* We used PAT if wc_cookie == 0 */
if (!dd->wc_cookie)
@@ -852,7 +851,7 @@ static int mmap_rcvegrbufs(struct vm_area_struct *vma,
goto bail;
}
/* don't allow them to later change to writable with mprotect */
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
start = vma->vm_start;
@@ -944,7 +943,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
* Don't allow permission to later change to writable
* with mprotect.
*/
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
} else
goto bail;
len = vma->vm_end - vma->vm_start;
@@ -955,7 +954,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
vma->vm_ops = &qib_file_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
ret = 1;
bail:
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 6e8c4fbb8083..6289238cc5af 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -672,7 +672,7 @@ int usnic_ib_mmap(struct ib_ucontext *context,
usnic_dbg("\n");
us_ibdev = to_usdev(context->device);
- vma->vm_flags |= VM_IO;
+ vm_flags_set(vma, VM_IO);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vfid = vma->vm_pgoff;
usnic_dbg("Page Offset %lu PAGE_SHIFT %u VFID %u\n",
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index c301b3be9f30..2a5cac2658ec 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -277,7 +277,7 @@ iter_chunk:
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ size, flags, GFP_ATOMIC);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
@@ -294,7 +294,7 @@ iter_chunk:
usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
va_start, &pa_start, size, flags);
err = iommu_map(pd->domain, va_start, pa_start,
- size, flags);
+ size, flags, GFP_ATOMIC);
if (err) {
usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
va_start, &pa_start, size, err);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 19176583dbde..9f54aa90a35a 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -408,7 +408,7 @@ int pvrdma_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
}
/* Map UAR to kernel space, VM_LOCKED? */
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
if (io_remap_pfn_range(vma, start, context->uar.pfn, size,
vma->vm_page_prot))
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index ab334900fcc3..2415f3704f57 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -57,6 +57,44 @@
#define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
+/* responder states */
+enum resp_states {
+ RESPST_NONE,
+ RESPST_GET_REQ,
+ RESPST_CHK_PSN,
+ RESPST_CHK_OP_SEQ,
+ RESPST_CHK_OP_VALID,
+ RESPST_CHK_RESOURCE,
+ RESPST_CHK_LENGTH,
+ RESPST_CHK_RKEY,
+ RESPST_EXECUTE,
+ RESPST_READ_REPLY,
+ RESPST_ATOMIC_REPLY,
+ RESPST_ATOMIC_WRITE_REPLY,
+ RESPST_PROCESS_FLUSH,
+ RESPST_COMPLETE,
+ RESPST_ACKNOWLEDGE,
+ RESPST_CLEANUP,
+ RESPST_DUPLICATE_REQUEST,
+ RESPST_ERR_MALFORMED_WQE,
+ RESPST_ERR_UNSUPPORTED_OPCODE,
+ RESPST_ERR_MISALIGNED_ATOMIC,
+ RESPST_ERR_PSN_OUT_OF_SEQ,
+ RESPST_ERR_MISSING_OPCODE_FIRST,
+ RESPST_ERR_MISSING_OPCODE_LAST_C,
+ RESPST_ERR_MISSING_OPCODE_LAST_D1E,
+ RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
+ RESPST_ERR_RNR,
+ RESPST_ERR_RKEY_VIOLATION,
+ RESPST_ERR_INVALIDATE_RKEY,
+ RESPST_ERR_LENGTH,
+ RESPST_ERR_CQ_OVERFLOW,
+ RESPST_ERROR,
+ RESPST_RESET,
+ RESPST_DONE,
+ RESPST_EXIT,
+};
+
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 948ce4902b10..1bb0cb479eb1 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -64,12 +64,16 @@ void rxe_mr_init_dma(int access, struct rxe_mr *mr);
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr);
int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
-int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length);
-int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
- enum rxe_mr_copy_dir dir);
+int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length);
+int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
+ unsigned int length, enum rxe_mr_copy_dir dir);
int copy_data(struct rxe_pd *pd, int access, struct rxe_dma_info *dma,
void *addr, int length, enum rxe_mr_copy_dir dir);
-void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length);
+int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
+int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val);
+int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value);
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type);
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 072eac4b65d2..b10aa1580a64 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -26,22 +26,22 @@ u8 rxe_get_next_key(u32 last_key)
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{
-
-
switch (mr->ibmr.type) {
case IB_MR_TYPE_DMA:
return 0;
case IB_MR_TYPE_USER:
case IB_MR_TYPE_MEM_REG:
- if (iova < mr->ibmr.iova || length > mr->ibmr.length ||
- iova > mr->ibmr.iova + mr->ibmr.length - length)
- return -EFAULT;
+ if (iova < mr->ibmr.iova ||
+ iova + length > mr->ibmr.iova + mr->ibmr.length) {
+ rxe_dbg_mr(mr, "iova/length out of range");
+ return -EINVAL;
+ }
return 0;
default:
- rxe_dbg_mr(mr, "type (%d) not supported\n", mr->ibmr.type);
- return -EFAULT;
+ rxe_dbg_mr(mr, "mr type not supported\n");
+ return -EINVAL;
}
}
@@ -62,57 +62,31 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
mr->lkey = mr->ibmr.lkey = lkey;
mr->rkey = mr->ibmr.rkey = rkey;
+ mr->access = access;
+ mr->ibmr.page_size = PAGE_SIZE;
+ mr->page_mask = PAGE_MASK;
+ mr->page_shift = PAGE_SHIFT;
mr->state = RXE_MR_STATE_INVALID;
}
-static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
-{
- int i;
- int num_map;
- struct rxe_map **map = mr->map;
-
- num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
-
- mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
- if (!mr->map)
- goto err1;
-
- for (i = 0; i < num_map; i++) {
- mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
- if (!mr->map[i])
- goto err2;
- }
-
- BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
-
- mr->map_shift = ilog2(RXE_BUF_PER_MAP);
- mr->map_mask = RXE_BUF_PER_MAP - 1;
-
- mr->num_buf = num_buf;
- mr->num_map = num_map;
- mr->max_buf = num_map * RXE_BUF_PER_MAP;
-
- return 0;
-
-err2:
- for (i--; i >= 0; i--)
- kfree(mr->map[i]);
-
- kfree(mr->map);
- mr->map = NULL;
-err1:
- return -ENOMEM;
-}
-
void rxe_mr_init_dma(int access, struct rxe_mr *mr)
{
rxe_mr_init(access, mr);
- mr->access = access;
mr->state = RXE_MR_STATE_VALID;
mr->ibmr.type = IB_MR_TYPE_DMA;
}
+static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova)
+{
+ return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift);
+}
+
+static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova)
+{
+ return iova & (mr_page_size(mr) - 1);
+}
+
static bool is_pmem_page(struct page *pg)
{
unsigned long paddr = page_to_phys(pg);
@@ -122,86 +96,98 @@ static bool is_pmem_page(struct page *pg)
IORES_DESC_PERSISTENT_MEMORY);
}
+static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
+{
+ XA_STATE(xas, &mr->page_list, 0);
+ struct sg_page_iter sg_iter;
+ struct page *page;
+ bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
+
+ __sg_page_iter_start(&sg_iter, sgt->sgl, sgt->orig_nents, 0);
+ if (!__sg_page_iter_next(&sg_iter))
+ return 0;
+
+ do {
+ xas_lock(&xas);
+ while (true) {
+ page = sg_page_iter_page(&sg_iter);
+
+ if (persistent && !is_pmem_page(page)) {
+ rxe_dbg_mr(mr, "Page can't be persistent\n");
+ xas_set_err(&xas, -EINVAL);
+ break;
+ }
+
+ xas_store(&xas, page);
+ if (xas_error(&xas))
+ break;
+ xas_next(&xas);
+ if (!__sg_page_iter_next(&sg_iter))
+ break;
+ }
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ return xas_error(&xas);
+}
+
int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr)
{
- struct rxe_map **map;
- struct rxe_phys_buf *buf = NULL;
- struct ib_umem *umem;
- struct sg_page_iter sg_iter;
- int num_buf;
- void *vaddr;
+ struct ib_umem *umem;
int err;
+ rxe_mr_init(access, mr);
+
+ xa_init(&mr->page_list);
+
umem = ib_umem_get(&rxe->ib_dev, start, length, access);
if (IS_ERR(umem)) {
rxe_dbg_mr(mr, "Unable to pin memory region err = %d\n",
(int)PTR_ERR(umem));
- err = PTR_ERR(umem);
- goto err_out;
+ return PTR_ERR(umem);
}
- num_buf = ib_umem_num_pages(umem);
-
- rxe_mr_init(access, mr);
-
- err = rxe_mr_alloc(mr, num_buf);
+ err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt);
if (err) {
- rxe_dbg_mr(mr, "Unable to allocate memory for map\n");
- goto err_release_umem;
+ ib_umem_release(umem);
+ return err;
}
- mr->page_shift = PAGE_SHIFT;
- mr->page_mask = PAGE_SIZE - 1;
-
- num_buf = 0;
- map = mr->map;
- if (length > 0) {
- bool persistent_access = access & IB_ACCESS_FLUSH_PERSISTENT;
-
- buf = map[0]->buf;
- for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) {
- struct page *pg = sg_page_iter_page(&sg_iter);
+ mr->umem = umem;
+ mr->ibmr.type = IB_MR_TYPE_USER;
+ mr->state = RXE_MR_STATE_VALID;
- if (persistent_access && !is_pmem_page(pg)) {
- rxe_dbg_mr(mr, "Unable to register persistent access to non-pmem device\n");
- err = -EINVAL;
- goto err_release_umem;
- }
+ return 0;
+}
- if (num_buf >= RXE_BUF_PER_MAP) {
- map++;
- buf = map[0]->buf;
- num_buf = 0;
- }
+static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
+{
+ XA_STATE(xas, &mr->page_list, 0);
+ int i = 0;
+ int err;
- vaddr = page_address(pg);
- if (!vaddr) {
- rxe_dbg_mr(mr, "Unable to get virtual address\n");
- err = -ENOMEM;
- goto err_release_umem;
- }
- buf->addr = (uintptr_t)vaddr;
- buf->size = PAGE_SIZE;
- num_buf++;
- buf++;
+ xa_init(&mr->page_list);
+ do {
+ xas_lock(&xas);
+ while (i != num_buf) {
+ xas_store(&xas, XA_ZERO_ENTRY);
+ if (xas_error(&xas))
+ break;
+ xas_next(&xas);
+ i++;
}
- }
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
- mr->umem = umem;
- mr->access = access;
- mr->offset = ib_umem_offset(umem);
- mr->state = RXE_MR_STATE_VALID;
- mr->ibmr.type = IB_MR_TYPE_USER;
- mr->ibmr.page_size = PAGE_SIZE;
+ err = xas_error(&xas);
+ if (err)
+ return err;
- return 0;
+ mr->num_buf = num_buf;
-err_release_umem:
- ib_umem_release(umem);
-err_out:
- return err;
+ return 0;
}
int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
@@ -215,7 +201,6 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
if (err)
goto err1;
- mr->max_buf = max_pages;
mr->state = RXE_MR_STATE_FREE;
mr->ibmr.type = IB_MR_TYPE_MEM_REG;
@@ -225,187 +210,125 @@ err1:
return err;
}
-static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
- size_t *offset_out)
+static int rxe_set_page(struct ib_mr *ibmr, u64 iova)
{
- size_t offset = iova - mr->ibmr.iova + mr->offset;
- int map_index;
- int buf_index;
- u64 length;
-
- if (likely(mr->page_shift)) {
- *offset_out = offset & mr->page_mask;
- offset >>= mr->page_shift;
- *n_out = offset & mr->map_mask;
- *m_out = offset >> mr->map_shift;
- } else {
- map_index = 0;
- buf_index = 0;
+ struct rxe_mr *mr = to_rmr(ibmr);
+ struct page *page = virt_to_page(iova & mr->page_mask);
+ bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
+ int err;
- length = mr->map[map_index]->buf[buf_index].size;
+ if (persistent && !is_pmem_page(page)) {
+ rxe_dbg_mr(mr, "Page cannot be persistent\n");
+ return -EINVAL;
+ }
- while (offset >= length) {
- offset -= length;
- buf_index++;
+ if (unlikely(mr->nbuf == mr->num_buf))
+ return -ENOMEM;
- if (buf_index == RXE_BUF_PER_MAP) {
- map_index++;
- buf_index = 0;
- }
- length = mr->map[map_index]->buf[buf_index].size;
- }
+ err = xa_err(xa_store(&mr->page_list, mr->nbuf, page, GFP_KERNEL));
+ if (err)
+ return err;
- *m_out = map_index;
- *n_out = buf_index;
- *offset_out = offset;
- }
+ mr->nbuf++;
+ return 0;
}
-void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
+int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sgl,
+ int sg_nents, unsigned int *sg_offset)
{
- size_t offset;
- int m, n;
- void *addr;
-
- if (mr->state != RXE_MR_STATE_VALID) {
- rxe_dbg_mr(mr, "Not in valid state\n");
- addr = NULL;
- goto out;
- }
-
- if (!mr->map) {
- addr = (void *)(uintptr_t)iova;
- goto out;
- }
-
- if (mr_check_range(mr, iova, length)) {
- rxe_dbg_mr(mr, "Range violation\n");
- addr = NULL;
- goto out;
- }
-
- lookup_iova(mr, iova, &m, &n, &offset);
-
- if (offset + length > mr->map[m]->buf[n].size) {
- rxe_dbg_mr(mr, "Crosses page boundary\n");
- addr = NULL;
- goto out;
- }
+ struct rxe_mr *mr = to_rmr(ibmr);
+ unsigned int page_size = mr_page_size(mr);
- addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
+ mr->nbuf = 0;
+ mr->page_shift = ilog2(page_size);
+ mr->page_mask = ~((u64)page_size - 1);
+ mr->page_offset = mr->ibmr.iova & (page_size - 1);
-out:
- return addr;
+ return ib_sg_to_pages(ibmr, sgl, sg_nents, sg_offset, rxe_set_page);
}
-int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, int length)
+static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
+ unsigned int length, enum rxe_mr_copy_dir dir)
{
- size_t offset;
-
- if (length == 0)
- return 0;
-
- if (mr->ibmr.type == IB_MR_TYPE_DMA)
- return -EFAULT;
+ unsigned int page_offset = rxe_mr_iova_to_page_offset(mr, iova);
+ unsigned long index = rxe_mr_iova_to_index(mr, iova);
+ unsigned int bytes;
+ struct page *page;
+ void *va;
- offset = (iova - mr->ibmr.iova + mr->offset) & mr->page_mask;
- while (length > 0) {
- u8 *va;
- int bytes;
-
- bytes = mr->ibmr.page_size - offset;
- if (bytes > length)
- bytes = length;
-
- va = iova_to_vaddr(mr, iova, length);
- if (!va)
+ while (length) {
+ page = xa_load(&mr->page_list, index);
+ if (!page)
return -EFAULT;
- arch_wb_cache_pmem(va, bytes);
-
+ bytes = min_t(unsigned int, length,
+ mr_page_size(mr) - page_offset);
+ va = kmap_local_page(page);
+ if (dir == RXE_FROM_MR_OBJ)
+ memcpy(addr, va + page_offset, bytes);
+ else
+ memcpy(va + page_offset, addr, bytes);
+ kunmap_local(va);
+
+ page_offset = 0;
+ addr += bytes;
length -= bytes;
- iova += bytes;
- offset = 0;
+ index++;
}
return 0;
}
-/* copy data from a range (vaddr, vaddr+length-1) to or from
- * a mr object starting at iova.
- */
-int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
- enum rxe_mr_copy_dir dir)
+static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
+ unsigned int length, enum rxe_mr_copy_dir dir)
{
- int err;
- int bytes;
- u8 *va;
- struct rxe_map **map;
- struct rxe_phys_buf *buf;
- int m;
- int i;
- size_t offset;
+ unsigned int page_offset = iova & (PAGE_SIZE - 1);
+ unsigned int bytes;
+ struct page *page;
+ u8 *va;
- if (length == 0)
- return 0;
-
- if (mr->ibmr.type == IB_MR_TYPE_DMA) {
- u8 *src, *dest;
+ while (length) {
+ page = virt_to_page(iova & mr->page_mask);
+ bytes = min_t(unsigned int, length,
+ PAGE_SIZE - page_offset);
+ va = kmap_local_page(page);
+
+ if (dir == RXE_TO_MR_OBJ)
+ memcpy(va + page_offset, addr, bytes);
+ else
+ memcpy(addr, va + page_offset, bytes);
+
+ kunmap_local(va);
+ page_offset = 0;
+ iova += bytes;
+ addr += bytes;
+ length -= bytes;
+ }
+}
- src = (dir == RXE_TO_MR_OBJ) ? addr : ((void *)(uintptr_t)iova);
+int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
+ unsigned int length, enum rxe_mr_copy_dir dir)
+{
+ int err;
- dest = (dir == RXE_TO_MR_OBJ) ? ((void *)(uintptr_t)iova) : addr;
+ if (length == 0)
+ return 0;
- memcpy(dest, src, length);
+ if (WARN_ON(!mr))
+ return -EINVAL;
+ if (mr->ibmr.type == IB_MR_TYPE_DMA) {
+ rxe_mr_copy_dma(mr, iova, addr, length, dir);
return 0;
}
- WARN_ON_ONCE(!mr->map);
-
err = mr_check_range(mr, iova, length);
- if (err) {
- err = -EFAULT;
- goto err1;
- }
-
- lookup_iova(mr, iova, &m, &i, &offset);
-
- map = mr->map + m;
- buf = map[0]->buf + i;
-
- while (length > 0) {
- u8 *src, *dest;
-
- va = (u8 *)(uintptr_t)buf->addr + offset;
- src = (dir == RXE_TO_MR_OBJ) ? addr : va;
- dest = (dir == RXE_TO_MR_OBJ) ? va : addr;
-
- bytes = buf->size - offset;
-
- if (bytes > length)
- bytes = length;
-
- memcpy(dest, src, bytes);
-
- length -= bytes;
- addr += bytes;
-
- offset = 0;
- buf++;
- i++;
-
- if (i == RXE_BUF_PER_MAP) {
- i = 0;
- map++;
- buf = map[0]->buf;
- }
+ if (unlikely(err)) {
+ rxe_dbg_mr(mr, "iova out of range");
+ return err;
}
- return 0;
-
-err1:
- return err;
+ return rxe_mr_copy_xarray(mr, iova, addr, length, dir);
}
/* copy data in or out of a wqe, i.e. sg list
@@ -477,7 +400,6 @@ int copy_data(
if (bytes > 0) {
iova = sge->addr + offset;
-
err = rxe_mr_copy(mr, iova, addr, bytes, dir);
if (err)
goto err2;
@@ -504,6 +426,165 @@ err1:
return err;
}
+int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length)
+{
+ unsigned int page_offset;
+ unsigned long index;
+ struct page *page;
+ unsigned int bytes;
+ int err;
+ u8 *va;
+
+ /* mr must be valid even if length is zero */
+ if (WARN_ON(!mr))
+ return -EINVAL;
+
+ if (length == 0)
+ return 0;
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA)
+ return -EFAULT;
+
+ err = mr_check_range(mr, iova, length);
+ if (err)
+ return err;
+
+ while (length > 0) {
+ index = rxe_mr_iova_to_index(mr, iova);
+ page = xa_load(&mr->page_list, index);
+ page_offset = rxe_mr_iova_to_page_offset(mr, iova);
+ if (!page)
+ return -EFAULT;
+ bytes = min_t(unsigned int, length,
+ mr_page_size(mr) - page_offset);
+
+ va = kmap_local_page(page);
+ arch_wb_cache_pmem(va + page_offset, bytes);
+ kunmap_local(va);
+
+ length -= bytes;
+ iova += bytes;
+ page_offset = 0;
+ }
+
+ return 0;
+}
+
+/* Guarantee atomicity of atomic operations at the machine level. */
+static DEFINE_SPINLOCK(atomic_ops_lock);
+
+int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
+ u64 compare, u64 swap_add, u64 *orig_val)
+{
+ unsigned int page_offset;
+ struct page *page;
+ u64 value;
+ u64 *va;
+
+ if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
+ rxe_dbg_mr(mr, "mr not in valid state");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA) {
+ page_offset = iova & (PAGE_SIZE - 1);
+ page = virt_to_page(iova & PAGE_MASK);
+ } else {
+ unsigned long index;
+ int err;
+
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (err) {
+ rxe_dbg_mr(mr, "iova out of range");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+ page_offset = rxe_mr_iova_to_page_offset(mr, iova);
+ index = rxe_mr_iova_to_index(mr, iova);
+ page = xa_load(&mr->page_list, index);
+ if (!page)
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (unlikely(page_offset & 0x7)) {
+ rxe_dbg_mr(mr, "iova not aligned");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ va = kmap_local_page(page);
+
+ spin_lock_bh(&atomic_ops_lock);
+ value = *orig_val = va[page_offset >> 3];
+
+ if (opcode == IB_OPCODE_RC_COMPARE_SWAP) {
+ if (value == compare)
+ va[page_offset >> 3] = swap_add;
+ } else {
+ value += swap_add;
+ va[page_offset >> 3] = value;
+ }
+ spin_unlock_bh(&atomic_ops_lock);
+
+ kunmap_local(va);
+
+ return 0;
+}
+
+#if defined CONFIG_64BIT
+/* only implemented or called for 64 bit architectures */
+int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+{
+ unsigned int page_offset;
+ struct page *page;
+ u64 *va;
+
+ /* See IBA oA19-28 */
+ if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
+ rxe_dbg_mr(mr, "mr not in valid state");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ if (mr->ibmr.type == IB_MR_TYPE_DMA) {
+ page_offset = iova & (PAGE_SIZE - 1);
+ page = virt_to_page(iova & PAGE_MASK);
+ } else {
+ unsigned long index;
+ int err;
+
+ /* See IBA oA19-28 */
+ err = mr_check_range(mr, iova, sizeof(value));
+ if (unlikely(err)) {
+ rxe_dbg_mr(mr, "iova out of range");
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+ page_offset = rxe_mr_iova_to_page_offset(mr, iova);
+ index = rxe_mr_iova_to_index(mr, iova);
+ page = xa_load(&mr->page_list, index);
+ if (!page)
+ return RESPST_ERR_RKEY_VIOLATION;
+ }
+
+ /* See IBA A19.4.2 */
+ if (unlikely(page_offset & 0x7)) {
+ rxe_dbg_mr(mr, "misaligned address");
+ return RESPST_ERR_MISALIGNED_ATOMIC;
+ }
+
+ va = kmap_local_page(page);
+
+ /* Do atomic write after all prior operations have completed */
+ smp_store_release(&va[page_offset >> 3], value);
+
+ kunmap_local(va);
+
+ return 0;
+}
+#else
+int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
+{
+ return RESPST_ERR_UNSUPPORTED_OPCODE;
+}
+#endif
+
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
{
struct rxe_sge *sge = &dma->sge[dma->cur_sge];
@@ -537,12 +618,6 @@ int advance_dma_data(struct rxe_dma_info *dma, unsigned int length)
return 0;
}
-/* (1) find the mr corresponding to lkey/rkey
- * depending on lookup_type
- * (2) verify that the (qp) pd matches the mr pd
- * (3) verify that the mr can support the requested access
- * (4) verify that mr state is valid
- */
struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
enum rxe_mr_lookup_type type)
{
@@ -656,22 +731,17 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return -EINVAL;
rxe_cleanup(mr);
-
+ kfree_rcu(mr);
return 0;
}
void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{
struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
- int i;
rxe_put(mr_pd(mr));
ib_umem_release(mr->umem);
- if (mr->map) {
- for (i = 0; i < mr->num_map; i++)
- kfree(mr->map[i]);
-
- kfree(mr->map);
- }
+ if (mr->ibmr.type != IB_MR_TYPE_DMA)
+ xa_destroy(&mr->page_list);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 1151c0b5ccea..6215c6de3a84 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -116,55 +116,12 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
WARN_ON(!xa_empty(&pool->xa));
}
-void *rxe_alloc(struct rxe_pool *pool)
-{
- struct rxe_pool_elem *elem;
- void *obj;
- int err;
-
- if (WARN_ON(!(pool->type == RXE_TYPE_MR)))
- return NULL;
-
- if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
- goto err_cnt;
-
- obj = kzalloc(pool->elem_size, GFP_KERNEL);
- if (!obj)
- goto err_cnt;
-
- elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
-
- elem->pool = pool;
- elem->obj = obj;
- kref_init(&elem->ref_cnt);
- init_completion(&elem->complete);
-
- /* allocate index in array but leave pointer as NULL so it
- * can't be looked up until rxe_finalize() is called
- */
- err = xa_alloc_cyclic(&pool->xa, &elem->index, NULL, pool->limit,
- &pool->next, GFP_KERNEL);
- if (err < 0)
- goto err_free;
-
- return obj;
-
-err_free:
- kfree(obj);
-err_cnt:
- atomic_dec(&pool->num_elem);
- return NULL;
-}
-
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
bool sleepable)
{
int err;
gfp_t gfp_flags;
- if (WARN_ON(pool->type == RXE_TYPE_MR))
- return -EINVAL;
-
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto err_cnt;
@@ -275,9 +232,6 @@ int __rxe_cleanup(struct rxe_pool_elem *elem, bool sleepable)
if (pool->cleanup)
pool->cleanup(elem);
- if (pool->type == RXE_TYPE_MR)
- kfree_rcu(elem->obj);
-
atomic_dec(&pool->num_elem);
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 9d83cb32092f..b42e26427a70 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -54,9 +54,6 @@ void rxe_pool_init(struct rxe_dev *rxe, struct rxe_pool *pool,
/* free resources from object pool */
void rxe_pool_cleanup(struct rxe_pool *pool);
-/* allocate an object from pool */
-void *rxe_alloc(struct rxe_pool *pool);
-
/* connect already allocated object to pool */
int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem,
bool sleepable);
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index ed44042782fa..c711cb98b949 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -35,19 +35,26 @@
/**
* enum queue_type - type of queue
* @QUEUE_TYPE_TO_CLIENT: Queue is written by rxe driver and
- * read by client. Used by rxe driver only.
+ * read by client which may be a user space
+ * application or a kernel ulp.
+ * Used by rxe internals only.
* @QUEUE_TYPE_FROM_CLIENT: Queue is written by client and
- * read by rxe driver. Used by rxe driver only.
- * @QUEUE_TYPE_TO_DRIVER: Queue is written by client and
- * read by rxe driver. Used by kernel client only.
- * @QUEUE_TYPE_FROM_DRIVER: Queue is written by rxe driver and
- * read by client. Used by kernel client only.
+ * read by rxe driver.
+ * Used by rxe internals only.
+ * @QUEUE_TYPE_FROM_ULP: Queue is written by kernel ulp and
+ * read by rxe driver.
+ * Used by kernel verbs APIs only on
+ * behalf of ulps.
+ * @QUEUE_TYPE_TO_ULP: Queue is written by rxe driver and
+ * read by kernel ulp.
+ * Used by kernel verbs APIs only on
+ * behalf of ulps.
*/
enum queue_type {
QUEUE_TYPE_TO_CLIENT,
QUEUE_TYPE_FROM_CLIENT,
- QUEUE_TYPE_TO_DRIVER,
- QUEUE_TYPE_FROM_DRIVER,
+ QUEUE_TYPE_FROM_ULP,
+ QUEUE_TYPE_TO_ULP,
};
struct rxe_queue_buf;
@@ -62,9 +69,9 @@ struct rxe_queue {
u32 index_mask;
enum queue_type type;
/* private copy of index for shared queues between
- * kernel space and user space. Kernel reads and writes
+ * driver and clients. Driver reads and writes
* this copy and then replicates to rxe_queue_buf
- * for read access by user space.
+ * for read access by clients.
*/
u32 index;
};
@@ -97,19 +104,21 @@ static inline u32 queue_get_producer(const struct rxe_queue *q,
switch (type) {
case QUEUE_TYPE_FROM_CLIENT:
- /* protect user index */
+ /* used by rxe, client owns the index */
prod = smp_load_acquire(&q->buf->producer_index);
break;
case QUEUE_TYPE_TO_CLIENT:
+ /* used by rxe which owns the index */
prod = q->index;
break;
- case QUEUE_TYPE_FROM_DRIVER:
- /* protect driver index */
- prod = smp_load_acquire(&q->buf->producer_index);
- break;
- case QUEUE_TYPE_TO_DRIVER:
+ case QUEUE_TYPE_FROM_ULP:
+ /* used by ulp which owns the index */
prod = q->buf->producer_index;
break;
+ case QUEUE_TYPE_TO_ULP:
+ /* used by ulp, rxe owns the index */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ break;
}
return prod;
@@ -122,19 +131,21 @@ static inline u32 queue_get_consumer(const struct rxe_queue *q,
switch (type) {
case QUEUE_TYPE_FROM_CLIENT:
+ /* used by rxe which owns the index */
cons = q->index;
break;
case QUEUE_TYPE_TO_CLIENT:
- /* protect user index */
+ /* used by rxe, client owns the index */
cons = smp_load_acquire(&q->buf->consumer_index);
break;
- case QUEUE_TYPE_FROM_DRIVER:
- cons = q->buf->consumer_index;
- break;
- case QUEUE_TYPE_TO_DRIVER:
- /* protect driver index */
+ case QUEUE_TYPE_FROM_ULP:
+ /* used by ulp, rxe owns the index */
cons = smp_load_acquire(&q->buf->consumer_index);
break;
+ case QUEUE_TYPE_TO_ULP:
+ /* used by ulp which owns the index */
+ cons = q->buf->consumer_index;
+ break;
}
return cons;
@@ -172,24 +183,31 @@ static inline void queue_advance_producer(struct rxe_queue *q,
switch (type) {
case QUEUE_TYPE_FROM_CLIENT:
- pr_warn("%s: attempt to advance client index\n",
- __func__);
+ /* used by rxe, client owns the index */
+ if (WARN_ON(1))
+ pr_warn("%s: attempt to advance client index\n",
+ __func__);
break;
case QUEUE_TYPE_TO_CLIENT:
+ /* used by rxe which owns the index */
prod = q->index;
prod = (prod + 1) & q->index_mask;
q->index = prod;
- /* protect user index */
+ /* release so client can read it safely */
smp_store_release(&q->buf->producer_index, prod);
break;
- case QUEUE_TYPE_FROM_DRIVER:
- pr_warn("%s: attempt to advance driver index\n",
- __func__);
- break;
- case QUEUE_TYPE_TO_DRIVER:
+ case QUEUE_TYPE_FROM_ULP:
+ /* used by ulp which owns the index */
prod = q->buf->producer_index;
prod = (prod + 1) & q->index_mask;
- q->buf->producer_index = prod;
+ /* release so rxe can read it safely */
+ smp_store_release(&q->buf->producer_index, prod);
+ break;
+ case QUEUE_TYPE_TO_ULP:
+ /* used by ulp, rxe owns the index */
+ if (WARN_ON(1))
+ pr_warn("%s: attempt to advance driver index\n",
+ __func__);
break;
}
}
@@ -201,24 +219,30 @@ static inline void queue_advance_consumer(struct rxe_queue *q,
switch (type) {
case QUEUE_TYPE_FROM_CLIENT:
- cons = q->index;
- cons = (cons + 1) & q->index_mask;
+ /* used by rxe which owns the index */
+ cons = (q->index + 1) & q->index_mask;
q->index = cons;
- /* protect user index */
+ /* release so client can read it safely */
smp_store_release(&q->buf->consumer_index, cons);
break;
case QUEUE_TYPE_TO_CLIENT:
- pr_warn("%s: attempt to advance client index\n",
- __func__);
+ /* used by rxe, client owns the index */
+ if (WARN_ON(1))
+ pr_warn("%s: attempt to advance client index\n",
+ __func__);
+ break;
+ case QUEUE_TYPE_FROM_ULP:
+ /* used by ulp, rxe owns the index */
+ if (WARN_ON(1))
+ pr_warn("%s: attempt to advance driver index\n",
+ __func__);
break;
- case QUEUE_TYPE_FROM_DRIVER:
+ case QUEUE_TYPE_TO_ULP:
+ /* used by ulp which owns the index */
cons = q->buf->consumer_index;
cons = (cons + 1) & q->index_mask;
- q->buf->consumer_index = cons;
- break;
- case QUEUE_TYPE_TO_DRIVER:
- pr_warn("%s: attempt to advance driver index\n",
- __func__);
+ /* release so rxe can read it safely */
+ smp_store_release(&q->buf->consumer_index, cons);
break;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index c74972244f08..0cc1ba91d48c 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -10,43 +10,6 @@
#include "rxe_loc.h"
#include "rxe_queue.h"
-enum resp_states {
- RESPST_NONE,
- RESPST_GET_REQ,
- RESPST_CHK_PSN,
- RESPST_CHK_OP_SEQ,
- RESPST_CHK_OP_VALID,
- RESPST_CHK_RESOURCE,
- RESPST_CHK_LENGTH,
- RESPST_CHK_RKEY,
- RESPST_EXECUTE,
- RESPST_READ_REPLY,
- RESPST_ATOMIC_REPLY,
- RESPST_ATOMIC_WRITE_REPLY,
- RESPST_PROCESS_FLUSH,
- RESPST_COMPLETE,
- RESPST_ACKNOWLEDGE,
- RESPST_CLEANUP,
- RESPST_DUPLICATE_REQUEST,
- RESPST_ERR_MALFORMED_WQE,
- RESPST_ERR_UNSUPPORTED_OPCODE,
- RESPST_ERR_MISALIGNED_ATOMIC,
- RESPST_ERR_PSN_OUT_OF_SEQ,
- RESPST_ERR_MISSING_OPCODE_FIRST,
- RESPST_ERR_MISSING_OPCODE_LAST_C,
- RESPST_ERR_MISSING_OPCODE_LAST_D1E,
- RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
- RESPST_ERR_RNR,
- RESPST_ERR_RKEY_VIOLATION,
- RESPST_ERR_INVALIDATE_RKEY,
- RESPST_ERR_LENGTH,
- RESPST_ERR_CQ_OVERFLOW,
- RESPST_ERROR,
- RESPST_RESET,
- RESPST_DONE,
- RESPST_EXIT,
-};
-
static char *resp_state_name[] = {
[RESPST_NONE] = "NONE",
[RESPST_GET_REQ] = "GET_REQ",
@@ -457,13 +420,23 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
return RESPST_CHK_RKEY;
}
+/* if the reth length field is zero we can assume nothing
+ * about the rkey value and should not validate or use it.
+ * Instead set qp->resp.rkey to 0 which is an invalid rkey
+ * value since the minimum index part is 1.
+ */
static void qp_resp_from_reth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{
+ unsigned int length = reth_len(pkt);
+
qp->resp.va = reth_va(pkt);
qp->resp.offset = 0;
- qp->resp.rkey = reth_rkey(pkt);
- qp->resp.resid = reth_len(pkt);
- qp->resp.length = reth_len(pkt);
+ qp->resp.resid = length;
+ qp->resp.length = length;
+ if (pkt->mask & RXE_READ_OR_WRITE_MASK && length == 0)
+ qp->resp.rkey = 0;
+ else
+ qp->resp.rkey = reth_rkey(pkt);
}
static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
@@ -474,6 +447,10 @@ static void qp_resp_from_atmeth(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
qp->resp.resid = sizeof(u64);
}
+/* resolve the packet rkey to qp->resp.mr or set qp->resp.mr to NULL
+ * if an invalid rkey is received or the rdma length is zero. For middle
+ * or last packets use the stored value of mr.
+ */
static enum resp_states check_rkey(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
@@ -510,10 +487,12 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
return RESPST_EXECUTE;
}
- /* A zero-byte op is not required to set an addr or rkey. See C9-88 */
+ /* A zero-byte read or write op is not required to
+ * set an addr or rkey. See C9-88
+ */
if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
- (pkt->mask & RXE_RETH_MASK) &&
- reth_len(pkt) == 0) {
+ (pkt->mask & RXE_RETH_MASK) && reth_len(pkt) == 0) {
+ qp->resp.mr = NULL;
return RESPST_EXECUTE;
}
@@ -592,6 +571,7 @@ skip_check_range:
return RESPST_EXECUTE;
err:
+ qp->resp.mr = NULL;
if (mr)
rxe_put(mr);
if (mw)
@@ -725,17 +705,12 @@ static enum resp_states process_flush(struct rxe_qp *qp,
return RESPST_ACKNOWLEDGE;
}
-/* Guarantee atomicity of atomic operations at the machine level. */
-static DEFINE_SPINLOCK(atomic_ops_lock);
-
static enum resp_states atomic_reply(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
+ struct rxe_pkt_info *pkt)
{
- u64 *vaddr;
- enum resp_states ret;
struct rxe_mr *mr = qp->resp.mr;
struct resp_res *res = qp->resp.res;
- u64 value;
+ int err;
if (!res) {
res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_MASK);
@@ -743,32 +718,14 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
}
if (!res->replay) {
- if (mr->state != RXE_MR_STATE_VALID) {
- ret = RESPST_ERR_RKEY_VIOLATION;
- goto out;
- }
-
- vaddr = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset,
- sizeof(u64));
-
- /* check vaddr is 8 bytes aligned. */
- if (!vaddr || (uintptr_t)vaddr & 7) {
- ret = RESPST_ERR_MISALIGNED_ATOMIC;
- goto out;
- }
+ u64 iova = qp->resp.va + qp->resp.offset;
- spin_lock_bh(&atomic_ops_lock);
- res->atomic.orig_val = value = *vaddr;
-
- if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP) {
- if (value == atmeth_comp(pkt))
- value = atmeth_swap_add(pkt);
- } else {
- value += atmeth_swap_add(pkt);
- }
-
- *vaddr = value;
- spin_unlock_bh(&atomic_ops_lock);
+ err = rxe_mr_do_atomic_op(mr, iova, pkt->opcode,
+ atmeth_comp(pkt),
+ atmeth_swap_add(pkt),
+ &res->atomic.orig_val);
+ if (err)
+ return err;
qp->resp.msn++;
@@ -780,35 +737,35 @@ static enum resp_states atomic_reply(struct rxe_qp *qp,
qp->resp.status = IB_WC_SUCCESS;
}
- ret = RESPST_ACKNOWLEDGE;
-out:
- return ret;
+ return RESPST_ACKNOWLEDGE;
}
-#ifdef CONFIG_64BIT
-static enum resp_states do_atomic_write(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
+static enum resp_states atomic_write_reply(struct rxe_qp *qp,
+ struct rxe_pkt_info *pkt)
{
- struct rxe_mr *mr = qp->resp.mr;
- int payload = payload_size(pkt);
- u64 src, *dst;
-
- if (mr->state != RXE_MR_STATE_VALID)
- return RESPST_ERR_RKEY_VIOLATION;
+ struct resp_res *res = qp->resp.res;
+ struct rxe_mr *mr;
+ u64 value;
+ u64 iova;
+ int err;
- memcpy(&src, payload_addr(pkt), payload);
+ if (!res) {
+ res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
+ qp->resp.res = res;
+ }
- dst = iova_to_vaddr(mr, qp->resp.va + qp->resp.offset, payload);
- /* check vaddr is 8 bytes aligned. */
- if (!dst || (uintptr_t)dst & 7)
- return RESPST_ERR_MISALIGNED_ATOMIC;
+ if (res->replay)
+ return RESPST_ACKNOWLEDGE;
- /* Do atomic write after all prior operations have completed */
- smp_store_release(dst, src);
+ mr = qp->resp.mr;
+ value = *(u64 *)payload_addr(pkt);
+ iova = qp->resp.va + qp->resp.offset;
- /* decrease resp.resid to zero */
- qp->resp.resid -= sizeof(payload);
+ err = rxe_mr_do_atomic_write(mr, iova, value);
+ if (err)
+ return err;
+ qp->resp.resid = 0;
qp->resp.msn++;
/* next expected psn, read handles this separately */
@@ -817,29 +774,8 @@ static enum resp_states do_atomic_write(struct rxe_qp *qp,
qp->resp.opcode = pkt->opcode;
qp->resp.status = IB_WC_SUCCESS;
- return RESPST_ACKNOWLEDGE;
-}
-#else
-static enum resp_states do_atomic_write(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
-{
- return RESPST_ERR_UNSUPPORTED_OPCODE;
-}
-#endif /* CONFIG_64BIT */
-static enum resp_states atomic_write_reply(struct rxe_qp *qp,
- struct rxe_pkt_info *pkt)
-{
- struct resp_res *res = qp->resp.res;
-
- if (!res) {
- res = rxe_prepare_res(qp, pkt, RXE_ATOMIC_WRITE_MASK);
- qp->resp.res = res;
- }
-
- if (res->replay)
- return RESPST_ACKNOWLEDGE;
- return do_atomic_write(qp, pkt);
+ return RESPST_ACKNOWLEDGE;
}
static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
@@ -966,7 +902,11 @@ static enum resp_states read_reply(struct rxe_qp *qp,
}
if (res->state == rdatm_res_state_new) {
- if (!res->replay) {
+ if (!res->replay || qp->resp.length == 0) {
+ /* if length == 0 mr will be NULL (is ok)
+ * otherwise qp->resp.mr holds a ref on mr
+ * which we transfer to mr and drop below.
+ */
mr = qp->resp.mr;
qp->resp.mr = NULL;
} else {
@@ -980,6 +920,10 @@ static enum resp_states read_reply(struct rxe_qp *qp,
else
opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
} else {
+ /* re-lookup mr from rkey on all later packets.
+ * length will be non-zero. This can fail if someone
+ * modifies or destroys the mr since the first packet.
+ */
mr = rxe_recheck_mr(qp, res->read.rkey);
if (!mr)
return RESPST_ERR_RKEY_VIOLATION;
@@ -997,18 +941,16 @@ static enum resp_states read_reply(struct rxe_qp *qp,
skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload,
res->cur_psn, AETH_ACK_UNLIMITED);
if (!skb) {
- if (mr)
- rxe_put(mr);
- return RESPST_ERR_RNR;
+ state = RESPST_ERR_RNR;
+ goto err_out;
}
err = rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt),
payload, RXE_FROM_MR_OBJ);
- if (mr)
- rxe_put(mr);
if (err) {
kfree_skb(skb);
- return RESPST_ERR_RKEY_VIOLATION;
+ state = RESPST_ERR_RKEY_VIOLATION;
+ goto err_out;
}
if (bth_pad(&ack_pkt)) {
@@ -1017,9 +959,12 @@ static enum resp_states read_reply(struct rxe_qp *qp,
memset(pad, 0, bth_pad(&ack_pkt));
}
+ /* rxe_xmit_packet always consumes the skb */
err = rxe_xmit_packet(qp, &ack_pkt, skb);
- if (err)
- return RESPST_ERR_RNR;
+ if (err) {
+ state = RESPST_ERR_RNR;
+ goto err_out;
+ }
res->read.va += payload;
res->read.resid -= payload;
@@ -1036,6 +981,9 @@ static enum resp_states read_reply(struct rxe_qp *qp,
state = RESPST_CLEANUP;
}
+err_out:
+ if (mr)
+ rxe_put(mr);
return state;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 025b35bf014e..e14050a69276 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -245,7 +245,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
int num_sge = ibwr->num_sge;
int full;
- full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER);
+ full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full))
return -ENOMEM;
@@ -256,7 +256,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
for (i = 0; i < num_sge; i++)
length += ibwr->sg_list[i].length;
- recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER);
+ recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_FROM_ULP);
recv_wqe->wr_id = ibwr->wr_id;
memcpy(recv_wqe->dma.sge, ibwr->sg_list,
@@ -268,7 +268,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
recv_wqe->dma.cur_sge = 0;
recv_wqe->dma.sge_offset = 0;
- queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER);
+ queue_advance_producer(rq->queue, QUEUE_TYPE_FROM_ULP);
return 0;
}
@@ -623,17 +623,17 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
spin_lock_irqsave(&qp->sq.sq_lock, flags);
- full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
+ full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full)) {
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
return -ENOMEM;
}
- send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_TO_DRIVER);
+ send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_FROM_ULP);
init_send_wqe(qp, ibwr, mask, length, send_wqe);
- queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
+ queue_advance_producer(sq->queue, QUEUE_TYPE_FROM_ULP);
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
@@ -821,12 +821,12 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->cq_lock, flags);
for (i = 0; i < num_entries; i++) {
- cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
+ cqe = queue_head(cq->queue, QUEUE_TYPE_TO_ULP);
if (!cqe)
break;
memcpy(wc++, &cqe->ibwc, sizeof(*wc));
- queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
+ queue_advance_consumer(cq->queue, QUEUE_TYPE_TO_ULP);
}
spin_unlock_irqrestore(&cq->cq_lock, flags);
@@ -838,7 +838,7 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
struct rxe_cq *cq = to_rcq(ibcq);
int count;
- count = queue_count(cq->queue, QUEUE_TYPE_FROM_DRIVER);
+ count = queue_count(cq->queue, QUEUE_TYPE_TO_ULP);
return (count > wc_cnt) ? wc_cnt : count;
}
@@ -854,7 +854,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
- empty = queue_empty(cq->queue, QUEUE_TYPE_FROM_DRIVER);
+ empty = queue_empty(cq->queue, QUEUE_TYPE_TO_ULP);
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
ret = 1;
@@ -869,10 +869,17 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
struct rxe_dev *rxe = to_rdev(ibpd->device);
struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_mr *mr;
+ int err;
- mr = rxe_alloc(&rxe->mr_pool);
- if (!mr)
- return ERR_PTR(-ENOMEM);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
+ if (err)
+ goto err_free;
rxe_get(pd);
mr->ibmr.pd = ibpd;
@@ -880,8 +887,12 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
rxe_mr_init_dma(access, mr);
rxe_finalize(mr);
-
return &mr->ibmr;
+
+err_free:
+ kfree(mr);
+err_out:
+ return ERR_PTR(err);
}
static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
@@ -895,9 +906,15 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
struct rxe_pd *pd = to_rpd(ibpd);
struct rxe_mr *mr;
- mr = rxe_alloc(&rxe->mr_pool);
- if (!mr)
- return ERR_PTR(-ENOMEM);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
+ if (err)
+ goto err_free;
rxe_get(pd);
mr->ibmr.pd = ibpd;
@@ -905,14 +922,16 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
if (err)
- goto err1;
+ goto err_cleanup;
rxe_finalize(mr);
-
return &mr->ibmr;
-err1:
+err_cleanup:
rxe_cleanup(mr);
+err_free:
+ kfree(mr);
+err_out:
return ERR_PTR(err);
}
@@ -927,9 +946,15 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
if (mr_type != IB_MR_TYPE_MEM_REG)
return ERR_PTR(-EINVAL);
- mr = rxe_alloc(&rxe->mr_pool);
- if (!mr)
- return ERR_PTR(-ENOMEM);
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = rxe_add_to_pool(&rxe->mr_pool, mr);
+ if (err)
+ goto err_free;
rxe_get(pd);
mr->ibmr.pd = ibpd;
@@ -937,53 +962,19 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
err = rxe_mr_init_fast(max_num_sg, mr);
if (err)
- goto err1;
+ goto err_cleanup;
rxe_finalize(mr);
-
return &mr->ibmr;
-err1:
+err_cleanup:
rxe_cleanup(mr);
+err_free:
+ kfree(mr);
+err_out:
return ERR_PTR(err);
}
-static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct rxe_mr *mr = to_rmr(ibmr);
- struct rxe_map *map;
- struct rxe_phys_buf *buf;
-
- if (unlikely(mr->nbuf == mr->num_buf))
- return -ENOMEM;
-
- map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
- buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
-
- buf->addr = addr;
- buf->size = ibmr->page_size;
- mr->nbuf++;
-
- return 0;
-}
-
-static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
- int sg_nents, unsigned int *sg_offset)
-{
- struct rxe_mr *mr = to_rmr(ibmr);
- int n;
-
- mr->nbuf = 0;
-
- n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
-
- mr->page_shift = ilog2(ibmr->page_size);
- mr->page_mask = ibmr->page_size - 1;
- mr->offset = ibmr->iova & mr->page_mask;
-
- return n;
-}
-
static ssize_t parent_show(struct device *device,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 19ddfa890480..c269ae2a3224 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -283,17 +283,6 @@ enum rxe_mr_lookup_type {
RXE_LOOKUP_REMOTE,
};
-#define RXE_BUF_PER_MAP (PAGE_SIZE / sizeof(struct rxe_phys_buf))
-
-struct rxe_phys_buf {
- u64 addr;
- u64 size;
-};
-
-struct rxe_map {
- struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
-};
-
static inline int rkey_is_mw(u32 rkey)
{
u32 index = rkey >> 8;
@@ -310,25 +299,24 @@ struct rxe_mr {
u32 lkey;
u32 rkey;
enum rxe_mr_state state;
- u32 offset;
int access;
+ atomic_t num_mw;
- int page_shift;
- int page_mask;
- int map_shift;
- int map_mask;
+ unsigned int page_offset;
+ unsigned int page_shift;
+ u64 page_mask;
u32 num_buf;
u32 nbuf;
- u32 max_buf;
- u32 num_map;
-
- atomic_t num_mw;
-
- struct rxe_map **map;
+ struct xarray page_list;
};
+static inline unsigned int mr_page_size(struct rxe_mr *mr)
+{
+ return mr ? mr->ibmr.page_size : PAGE_SIZE;
+}
+
enum rxe_mw_state {
RXE_MW_STATE_INVALID = RXE_MR_STATE_INVALID,
RXE_MW_STATE_FREE = RXE_MR_STATE_FREE,
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index f88d2971c2c6..da530c0404da 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -16,6 +16,7 @@
#include <net/tcp.h>
#include <linux/inet.h>
#include <linux/tcp.h>
+#include <trace/events/sock.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
@@ -109,6 +110,8 @@ static void siw_rtr_data_ready(struct sock *sk)
struct siw_qp *qp = NULL;
read_descriptor_t rd_desc;
+ trace_sk_data_ready(sk);
+
read_lock(&sk->sk_callback_lock);
cep = sk_to_cep(sk);
@@ -1216,6 +1219,8 @@ static void siw_cm_llp_data_ready(struct sock *sk)
{
struct siw_cep *cep;
+ trace_sk_data_ready(sk);
+
read_lock(&sk->sk_callback_lock);
cep = sk_to_cep(sk);
diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
index b2b33dd3b4fa..f51ab2ccf151 100644
--- a/drivers/infiniband/sw/siw/siw_mem.c
+++ b/drivers/infiniband/sw/siw/siw_mem.c
@@ -398,7 +398,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
mlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- if (num_pages + atomic64_read(&mm_s->pinned_vm) > mlock_limit) {
+ if (atomic64_add_return(num_pages, &mm_s->pinned_vm) > mlock_limit) {
rv = -ENOMEM;
goto out_sem_up;
}
@@ -411,30 +411,27 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
goto out_sem_up;
}
for (i = 0; num_pages; i++) {
- int got, nents = min_t(int, num_pages, PAGES_PER_CHUNK);
-
- umem->page_chunk[i].plist =
+ int nents = min_t(int, num_pages, PAGES_PER_CHUNK);
+ struct page **plist =
kcalloc(nents, sizeof(struct page *), GFP_KERNEL);
- if (!umem->page_chunk[i].plist) {
+
+ if (!plist) {
rv = -ENOMEM;
goto out_sem_up;
}
- got = 0;
+ umem->page_chunk[i].plist = plist;
while (nents) {
- struct page **plist = &umem->page_chunk[i].plist[got];
-
rv = pin_user_pages(first_page_va, nents, foll_flags,
plist, NULL);
if (rv < 0)
goto out_sem_up;
umem->num_pages += rv;
- atomic64_add(rv, &mm_s->pinned_vm);
first_page_va += rv * PAGE_SIZE;
+ plist += rv;
nents -= rv;
- got += rv;
+ num_pages -= rv;
}
- num_pages -= got;
}
out_sem_up:
mmap_read_unlock(mm_s);
@@ -442,6 +439,10 @@ out_sem_up:
if (rv > 0)
return umem;
+ /* Adjust accounting for pages not pinned */
+ if (num_pages)
+ atomic64_sub(num_pages, &mm_s->pinned_vm);
+
siw_umem_release(umem, false);
return ERR_PTR(rv);
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index e6f634971228..81e9bbd9ebda 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -10,6 +10,7 @@
#include <linux/llist.h>
#include <asm/barrier.h>
#include <net/tcp.h>
+#include <trace/events/sock.h>
#include "siw.h"
#include "siw_verbs.h"
@@ -94,6 +95,8 @@ void siw_qp_llp_data_ready(struct sock *sk)
{
struct siw_qp *qp;
+ trace_sk_data_ready(sk);
+
read_lock(&sk->sk_callback_lock);
if (unlikely(!sk->sk_user_data || !sk_to_qp(sk)))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ac25fc80fb33..cf8b0822f5c8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -742,7 +742,7 @@ void ipoib_flush_paths(struct net_device *dev)
static void path_rec_completion(int status,
struct sa_path_rec *pathrec,
- int num_prs, void *path_ptr)
+ unsigned int num_prs, void *path_ptr)
{
struct ipoib_path *path = path_ptr;
struct net_device *dev = path->dev;
@@ -2200,6 +2200,14 @@ int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
rn->hca = hca;
+
+ rc = netif_set_real_num_tx_queues(dev, 1);
+ if (rc)
+ goto out;
+
+ rc = netif_set_real_num_rx_queues(dev, 1);
+ if (rc)
+ goto out;
}
priv->rn_ops = dev->netdev_ops;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 620ae5b2d80d..6b7603765383 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -446,7 +446,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session,
* @is_leading: indicate if this is the session leading connection (MCS)
*
* Return: zero on success, $error if iscsi_conn_bind fails and
- * -EINVAL in case end-point doesn't exsits anymore or iser connection
+ * -EINVAL in case end-point doesn't exists anymore or iser connection
* state is not UP (teardown already started).
*/
static int iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index c76ba29da1e2..5adba0f754b6 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -312,9 +312,8 @@ void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
if (srv_path->kobj.state_in_sysfs) {
sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
- kobject_del(&srv_path->kobj);
kobject_put(&srv_path->kobj);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
- rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index b4d6a4a5ae81..df21b30b7735 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -699,7 +699,7 @@ static void srp_free_ch_ib(struct srp_target_port *target,
static void srp_path_rec_completion(int status,
struct sa_path_rec *pathrec,
- int num_paths, void *ch_ptr)
+ unsigned int num_paths, void *ch_ptr)
{
struct srp_rdma_ch *ch = ch_ptr;
struct srp_target_port *target = ch->target;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ca2e3dd7188b..0336e799d713 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1372,7 +1372,7 @@ INPUT_DEV_STRING_ATTR_SHOW(phys);
INPUT_DEV_STRING_ATTR_SHOW(uniq);
static int input_print_modalias_bits(char *buf, int size,
- char name, unsigned long *bm,
+ char name, const unsigned long *bm,
unsigned int min_bit, unsigned int max_bit)
{
int len = 0, i;
@@ -1384,7 +1384,7 @@ static int input_print_modalias_bits(char *buf, int size,
return len;
}
-static int input_print_modalias(char *buf, int size, struct input_dev *id,
+static int input_print_modalias(char *buf, int size, const struct input_dev *id,
int add_cr)
{
int len;
@@ -1432,7 +1432,7 @@ static ssize_t input_dev_show_modalias(struct device *dev,
}
static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
-static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
+static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap,
int max, int add_cr);
static ssize_t input_dev_show_properties(struct device *dev,
@@ -1524,7 +1524,7 @@ static const struct attribute_group input_dev_id_attr_group = {
.attrs = input_dev_id_attrs,
};
-static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
+static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap,
int max, int add_cr)
{
int i;
@@ -1621,7 +1621,7 @@ static void input_dev_release(struct device *device)
* device bitfields.
*/
static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
- const char *name, unsigned long *bitmap, int max)
+ const char *name, const unsigned long *bitmap, int max)
{
int len;
@@ -1639,7 +1639,7 @@ static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
}
static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
- struct input_dev *dev)
+ const struct input_dev *dev)
{
int len;
@@ -1677,9 +1677,9 @@ static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
return err; \
} while (0)
-static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
+static int input_dev_uevent(const struct device *device, struct kobj_uevent_env *env)
{
- struct input_dev *dev = to_input_dev(device);
+ const struct input_dev *dev = to_input_dev(device);
INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x",
dev->id.bustype, dev->id.vendor,
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 84490915ae4d..d98650426dc2 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -550,15 +550,6 @@ config KEYBOARD_PXA27x
To compile this driver as a module, choose M here: the
module will be called pxa27x_keypad.
-config KEYBOARD_PXA930_ROTARY
- tristate "PXA930/PXA935 Enhanced Rotary Controller Support"
- depends on CPU_PXA930 || CPU_PXA935
- help
- Enable support for PXA930/PXA935 Enhanced Rotary Controller.
-
- To compile this driver as a module, choose M here: the
- module will be called pxa930_rotary.
-
config KEYBOARD_PMIC8XXX
tristate "Qualcomm PMIC8XXX keypad support"
depends on MFD_PM8XXX
@@ -657,16 +648,6 @@ config KEYBOARD_SUN4I_LRADC
To compile this driver as a module, choose M here: the
module will be called sun4i-lradc-keys.
-config KEYBOARD_DAVINCI
- tristate "TI DaVinci Key Scan"
- depends on ARCH_DAVINCI_DM365
- help
- Say Y to enable keypad module support for the TI DaVinci
- platforms (DM365).
-
- To compile this driver as a module, choose M here: the
- module will be called davinci_keyscan.
-
config KEYBOARD_IPAQ_MICRO
tristate "Buttons on Micro SoC (iPaq h3100,h3600,h3700)"
depends on MFD_IPAQ_MICRO
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index 5f67196bb2c1..aecef00c5d09 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_KEYBOARD_CAP11XX) += cap11xx.o
obj-$(CONFIG_KEYBOARD_CLPS711X) += clps711x-keypad.o
obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o
obj-$(CONFIG_KEYBOARD_CYPRESS_SF) += cypress-sf.o
-obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
obj-$(CONFIG_KEYBOARD_DLINK_DIR685) += dlink-dir685-touchkeys.o
obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o
@@ -55,7 +54,6 @@ obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o
obj-$(CONFIG_KEYBOARD_PINEPHONE) += pinephone-keyboard.o
obj-$(CONFIG_KEYBOARD_PMIC8XXX) += pmic8xxx-keypad.o
obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o
-obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o
obj-$(CONFIG_KEYBOARD_QT1050) += qt1050.o
obj-$(CONFIG_KEYBOARD_QT1070) += qt1070.o
obj-$(CONFIG_KEYBOARD_QT2160) += qt2160.o
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
deleted file mode 100644
index f489cd585b33..000000000000
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ /dev/null
@@ -1,315 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * DaVinci Key Scan Driver for TI platforms
- *
- * Copyright (C) 2009 Texas Instruments, Inc
- *
- * Author: Miguel Aguilar <miguel.aguilar@ridgerun.com>
- *
- * Initial Code: Sandeep Paulraj <s-paulraj@ti.com>
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/input.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-#include <linux/platform_data/keyscan-davinci.h>
-
-/* Key scan registers */
-#define DAVINCI_KEYSCAN_KEYCTRL 0x0000
-#define DAVINCI_KEYSCAN_INTENA 0x0004
-#define DAVINCI_KEYSCAN_INTFLAG 0x0008
-#define DAVINCI_KEYSCAN_INTCLR 0x000c
-#define DAVINCI_KEYSCAN_STRBWIDTH 0x0010
-#define DAVINCI_KEYSCAN_INTERVAL 0x0014
-#define DAVINCI_KEYSCAN_CONTTIME 0x0018
-#define DAVINCI_KEYSCAN_CURRENTST 0x001c
-#define DAVINCI_KEYSCAN_PREVSTATE 0x0020
-#define DAVINCI_KEYSCAN_EMUCTRL 0x0024
-#define DAVINCI_KEYSCAN_IODFTCTRL 0x002c
-
-/* Key Control Register (KEYCTRL) */
-#define DAVINCI_KEYSCAN_KEYEN 0x00000001
-#define DAVINCI_KEYSCAN_PREVMODE 0x00000002
-#define DAVINCI_KEYSCAN_CHATOFF 0x00000004
-#define DAVINCI_KEYSCAN_AUTODET 0x00000008
-#define DAVINCI_KEYSCAN_SCANMODE 0x00000010
-#define DAVINCI_KEYSCAN_OUTTYPE 0x00000020
-
-/* Masks for the interrupts */
-#define DAVINCI_KEYSCAN_INT_CONT 0x00000008
-#define DAVINCI_KEYSCAN_INT_OFF 0x00000004
-#define DAVINCI_KEYSCAN_INT_ON 0x00000002
-#define DAVINCI_KEYSCAN_INT_CHANGE 0x00000001
-#define DAVINCI_KEYSCAN_INT_ALL 0x0000000f
-
-struct davinci_ks {
- struct input_dev *input;
- struct davinci_ks_platform_data *pdata;
- int irq;
- void __iomem *base;
- resource_size_t pbase;
- size_t base_size;
- unsigned short keymap[];
-};
-
-/* Initializing the kp Module */
-static int __init davinci_ks_initialize(struct davinci_ks *davinci_ks)
-{
- struct device *dev = &davinci_ks->input->dev;
- struct davinci_ks_platform_data *pdata = davinci_ks->pdata;
- u32 matrix_ctrl;
-
- /* Enable all interrupts */
- __raw_writel(DAVINCI_KEYSCAN_INT_ALL,
- davinci_ks->base + DAVINCI_KEYSCAN_INTENA);
-
- /* Clear interrupts if any */
- __raw_writel(DAVINCI_KEYSCAN_INT_ALL,
- davinci_ks->base + DAVINCI_KEYSCAN_INTCLR);
-
- /* Setup the scan period = strobe + interval */
- __raw_writel(pdata->strobe,
- davinci_ks->base + DAVINCI_KEYSCAN_STRBWIDTH);
- __raw_writel(pdata->interval,
- davinci_ks->base + DAVINCI_KEYSCAN_INTERVAL);
- __raw_writel(0x01,
- davinci_ks->base + DAVINCI_KEYSCAN_CONTTIME);
-
- /* Define matrix type */
- switch (pdata->matrix_type) {
- case DAVINCI_KEYSCAN_MATRIX_4X4:
- matrix_ctrl = 0;
- break;
- case DAVINCI_KEYSCAN_MATRIX_5X3:
- matrix_ctrl = (1 << 6);
- break;
- default:
- dev_err(dev->parent, "wrong matrix type\n");
- return -EINVAL;
- }
-
- /* Enable key scan module and set matrix type */
- __raw_writel(DAVINCI_KEYSCAN_AUTODET | DAVINCI_KEYSCAN_KEYEN |
- matrix_ctrl, davinci_ks->base + DAVINCI_KEYSCAN_KEYCTRL);
-
- return 0;
-}
-
-static irqreturn_t davinci_ks_interrupt(int irq, void *dev_id)
-{
- struct davinci_ks *davinci_ks = dev_id;
- struct device *dev = &davinci_ks->input->dev;
- unsigned short *keymap = davinci_ks->keymap;
- int keymapsize = davinci_ks->pdata->keymapsize;
- u32 prev_status, new_status, changed;
- bool release;
- int keycode = KEY_UNKNOWN;
- int i;
-
- /* Disable interrupt */
- __raw_writel(0x0, davinci_ks->base + DAVINCI_KEYSCAN_INTENA);
-
- /* Reading previous and new status of the key scan */
- prev_status = __raw_readl(davinci_ks->base + DAVINCI_KEYSCAN_PREVSTATE);
- new_status = __raw_readl(davinci_ks->base + DAVINCI_KEYSCAN_CURRENTST);
-
- changed = prev_status ^ new_status;
-
- if (changed) {
- /*
- * It goes through all bits in 'changed' to ensure
- * that no key changes are being missed
- */
- for (i = 0 ; i < keymapsize; i++) {
- if ((changed>>i) & 0x1) {
- keycode = keymap[i];
- release = (new_status >> i) & 0x1;
- dev_dbg(dev->parent, "key %d %s\n", keycode,
- release ? "released" : "pressed");
- input_report_key(davinci_ks->input, keycode,
- !release);
- input_sync(davinci_ks->input);
- }
- }
- /* Clearing interrupt */
- __raw_writel(DAVINCI_KEYSCAN_INT_ALL,
- davinci_ks->base + DAVINCI_KEYSCAN_INTCLR);
- }
-
- /* Enable interrupts */
- __raw_writel(0x1, davinci_ks->base + DAVINCI_KEYSCAN_INTENA);
-
- return IRQ_HANDLED;
-}
-
-static int __init davinci_ks_probe(struct platform_device *pdev)
-{
- struct davinci_ks *davinci_ks;
- struct input_dev *key_dev;
- struct resource *res, *mem;
- struct device *dev = &pdev->dev;
- struct davinci_ks_platform_data *pdata = dev_get_platdata(dev);
- int error, i;
-
- if (pdata->device_enable) {
- error = pdata->device_enable(dev);
- if (error < 0) {
- dev_dbg(dev, "device enable function failed\n");
- return error;
- }
- }
-
- if (!pdata->keymap) {
- dev_dbg(dev, "no keymap from pdata\n");
- return -EINVAL;
- }
-
- davinci_ks = kzalloc(sizeof(struct davinci_ks) +
- sizeof(unsigned short) * pdata->keymapsize, GFP_KERNEL);
- if (!davinci_ks) {
- dev_dbg(dev, "could not allocate memory for private data\n");
- return -ENOMEM;
- }
-
- memcpy(davinci_ks->keymap, pdata->keymap,
- sizeof(unsigned short) * pdata->keymapsize);
-
- key_dev = input_allocate_device();
- if (!key_dev) {
- dev_dbg(dev, "could not allocate input device\n");
- error = -ENOMEM;
- goto fail1;
- }
-
- davinci_ks->input = key_dev;
-
- davinci_ks->irq = platform_get_irq(pdev, 0);
- if (davinci_ks->irq < 0) {
- error = davinci_ks->irq;
- goto fail2;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no mem resource\n");
- error = -EINVAL;
- goto fail2;
- }
-
- davinci_ks->pbase = res->start;
- davinci_ks->base_size = resource_size(res);
-
- mem = request_mem_region(davinci_ks->pbase, davinci_ks->base_size,
- pdev->name);
- if (!mem) {
- dev_err(dev, "key scan registers at %08x are not free\n",
- davinci_ks->pbase);
- error = -EBUSY;
- goto fail2;
- }
-
- davinci_ks->base = ioremap(davinci_ks->pbase, davinci_ks->base_size);
- if (!davinci_ks->base) {
- dev_err(dev, "can't ioremap MEM resource.\n");
- error = -ENOMEM;
- goto fail3;
- }
-
- /* Enable auto repeat feature of Linux input subsystem */
- if (pdata->rep)
- __set_bit(EV_REP, key_dev->evbit);
-
- /* Setup input device */
- __set_bit(EV_KEY, key_dev->evbit);
-
- /* Setup the platform data */
- davinci_ks->pdata = pdata;
-
- for (i = 0; i < davinci_ks->pdata->keymapsize; i++)
- __set_bit(davinci_ks->pdata->keymap[i], key_dev->keybit);
-
- key_dev->name = "davinci_keyscan";
- key_dev->phys = "davinci_keyscan/input0";
- key_dev->dev.parent = dev;
- key_dev->id.bustype = BUS_HOST;
- key_dev->id.vendor = 0x0001;
- key_dev->id.product = 0x0001;
- key_dev->id.version = 0x0001;
- key_dev->keycode = davinci_ks->keymap;
- key_dev->keycodesize = sizeof(davinci_ks->keymap[0]);
- key_dev->keycodemax = davinci_ks->pdata->keymapsize;
-
- error = input_register_device(davinci_ks->input);
- if (error < 0) {
- dev_err(dev, "unable to register davinci key scan device\n");
- goto fail4;
- }
-
- error = request_irq(davinci_ks->irq, davinci_ks_interrupt,
- 0, pdev->name, davinci_ks);
- if (error < 0) {
- dev_err(dev, "unable to register davinci key scan interrupt\n");
- goto fail5;
- }
-
- error = davinci_ks_initialize(davinci_ks);
- if (error < 0) {
- dev_err(dev, "unable to initialize davinci key scan device\n");
- goto fail6;
- }
-
- platform_set_drvdata(pdev, davinci_ks);
- return 0;
-
-fail6:
- free_irq(davinci_ks->irq, davinci_ks);
-fail5:
- input_unregister_device(davinci_ks->input);
- key_dev = NULL;
-fail4:
- iounmap(davinci_ks->base);
-fail3:
- release_mem_region(davinci_ks->pbase, davinci_ks->base_size);
-fail2:
- input_free_device(key_dev);
-fail1:
- kfree(davinci_ks);
-
- return error;
-}
-
-static int davinci_ks_remove(struct platform_device *pdev)
-{
- struct davinci_ks *davinci_ks = platform_get_drvdata(pdev);
-
- free_irq(davinci_ks->irq, davinci_ks);
-
- input_unregister_device(davinci_ks->input);
-
- iounmap(davinci_ks->base);
- release_mem_region(davinci_ks->pbase, davinci_ks->base_size);
-
- kfree(davinci_ks);
-
- return 0;
-}
-
-static struct platform_driver davinci_ks_driver = {
- .driver = {
- .name = "davinci_keyscan",
- },
- .remove = davinci_ks_remove,
-};
-
-module_platform_driver_probe(davinci_ks_driver, davinci_ks_probe);
-
-MODULE_AUTHOR("Miguel Aguilar");
-MODULE_DESCRIPTION("Texas Instruments DaVinci Key Scan Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
deleted file mode 100644
index 2fe9dcfe0a6f..000000000000
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ /dev/null
@@ -1,195 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Driver for the enhanced rotary controller on pxa930 and pxa935
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/input.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <linux/platform_data/keyboard-pxa930_rotary.h>
-
-#define SBCR (0x04)
-#define ERCR (0x0c)
-
-#define SBCR_ERSB (1 << 5)
-
-struct pxa930_rotary {
- struct input_dev *input_dev;
- void __iomem *mmio_base;
- int last_ercr;
-
- struct pxa930_rotary_platform_data *pdata;
-};
-
-static void clear_sbcr(struct pxa930_rotary *r)
-{
- uint32_t sbcr = __raw_readl(r->mmio_base + SBCR);
-
- __raw_writel(sbcr | SBCR_ERSB, r->mmio_base + SBCR);
- __raw_writel(sbcr & ~SBCR_ERSB, r->mmio_base + SBCR);
-}
-
-static irqreturn_t rotary_irq(int irq, void *dev_id)
-{
- struct pxa930_rotary *r = dev_id;
- struct pxa930_rotary_platform_data *pdata = r->pdata;
- int ercr, delta, key;
-
- ercr = __raw_readl(r->mmio_base + ERCR) & 0xf;
- clear_sbcr(r);
-
- delta = ercr - r->last_ercr;
- if (delta == 0)
- return IRQ_HANDLED;
-
- r->last_ercr = ercr;
-
- if (pdata->up_key && pdata->down_key) {
- key = (delta > 0) ? pdata->up_key : pdata->down_key;
- input_report_key(r->input_dev, key, 1);
- input_sync(r->input_dev);
- input_report_key(r->input_dev, key, 0);
- } else
- input_report_rel(r->input_dev, pdata->rel_code, delta);
-
- input_sync(r->input_dev);
-
- return IRQ_HANDLED;
-}
-
-static int pxa930_rotary_open(struct input_dev *dev)
-{
- struct pxa930_rotary *r = input_get_drvdata(dev);
-
- clear_sbcr(r);
-
- return 0;
-}
-
-static void pxa930_rotary_close(struct input_dev *dev)
-{
- struct pxa930_rotary *r = input_get_drvdata(dev);
-
- clear_sbcr(r);
-}
-
-static int pxa930_rotary_probe(struct platform_device *pdev)
-{
- struct pxa930_rotary_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
- struct pxa930_rotary *r;
- struct input_dev *input_dev;
- struct resource *res;
- int irq;
- int err;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENXIO;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "no I/O memory defined\n");
- return -ENXIO;
- }
-
- if (!pdata) {
- dev_err(&pdev->dev, "no platform data defined\n");
- return -EINVAL;
- }
-
- r = kzalloc(sizeof(struct pxa930_rotary), GFP_KERNEL);
- if (!r)
- return -ENOMEM;
-
- r->mmio_base = ioremap(res->start, resource_size(res));
- if (r->mmio_base == NULL) {
- dev_err(&pdev->dev, "failed to remap IO memory\n");
- err = -ENXIO;
- goto failed_free;
- }
-
- r->pdata = pdata;
- platform_set_drvdata(pdev, r);
-
- /* allocate and register the input device */
- input_dev = input_allocate_device();
- if (!input_dev) {
- dev_err(&pdev->dev, "failed to allocate input device\n");
- err = -ENOMEM;
- goto failed_free_io;
- }
-
- input_dev->name = pdev->name;
- input_dev->id.bustype = BUS_HOST;
- input_dev->open = pxa930_rotary_open;
- input_dev->close = pxa930_rotary_close;
- input_dev->dev.parent = &pdev->dev;
-
- if (pdata->up_key && pdata->down_key) {
- __set_bit(pdata->up_key, input_dev->keybit);
- __set_bit(pdata->down_key, input_dev->keybit);
- __set_bit(EV_KEY, input_dev->evbit);
- } else {
- __set_bit(pdata->rel_code, input_dev->relbit);
- __set_bit(EV_REL, input_dev->evbit);
- }
-
- r->input_dev = input_dev;
- input_set_drvdata(input_dev, r);
-
- err = request_irq(irq, rotary_irq, 0,
- "enhanced rotary", r);
- if (err) {
- dev_err(&pdev->dev, "failed to request IRQ\n");
- goto failed_free_input;
- }
-
- err = input_register_device(input_dev);
- if (err) {
- dev_err(&pdev->dev, "failed to register input device\n");
- goto failed_free_irq;
- }
-
- return 0;
-
-failed_free_irq:
- free_irq(irq, r);
-failed_free_input:
- input_free_device(input_dev);
-failed_free_io:
- iounmap(r->mmio_base);
-failed_free:
- kfree(r);
- return err;
-}
-
-static int pxa930_rotary_remove(struct platform_device *pdev)
-{
- struct pxa930_rotary *r = platform_get_drvdata(pdev);
-
- free_irq(platform_get_irq(pdev, 0), r);
- input_unregister_device(r->input_dev);
- iounmap(r->mmio_base);
- kfree(r);
-
- return 0;
-}
-
-static struct platform_driver pxa930_rotary_driver = {
- .driver = {
- .name = "pxa930-rotary",
- },
- .probe = pxa930_rotary_probe,
- .remove = pxa930_rotary_remove,
-};
-module_platform_driver(pxa930_rotary_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Driver for PXA93x Enhanced Rotary Controller");
-MODULE_AUTHOR("Yao Yong <yaoyong@marvell.com>");
diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig
index 63c9cda555c3..32cc4c62a716 100644
--- a/drivers/input/mouse/Kconfig
+++ b/drivers/input/mouse/Kconfig
@@ -393,12 +393,6 @@ config MOUSE_GPIO
To compile this driver as a module, choose M here: the
module will be called gpio_mouse.
-config MOUSE_PXA930_TRKBALL
- tristate "PXA930 Trackball mouse"
- depends on CPU_PXA930 || CPU_PXA935
- help
- Say Y here to support PXA930 Trackball mouse.
-
config MOUSE_MAPLE
tristate "Maple mouse (for the Dreamcast)"
depends on MAPLE
diff --git a/drivers/input/mouse/Makefile b/drivers/input/mouse/Makefile
index e49f08565076..92b3204ce84e 100644
--- a/drivers/input/mouse/Makefile
+++ b/drivers/input/mouse/Makefile
@@ -18,7 +18,6 @@ obj-$(CONFIG_MOUSE_MAPLE) += maplemouse.o
obj-$(CONFIG_MOUSE_NAVPOINT_PXA27x) += navpoint.o
obj-$(CONFIG_MOUSE_PC110PAD) += pc110pad.o
obj-$(CONFIG_MOUSE_PS2) += psmouse.o
-obj-$(CONFIG_MOUSE_PXA930_TRKBALL) += pxa930_trkball.o
obj-$(CONFIG_MOUSE_RISCPC) += rpcmouse.o
obj-$(CONFIG_MOUSE_SERIAL) += sermouse.o
obj-$(CONFIG_MOUSE_SYNAPTICS_I2C) += synaptics_i2c.o
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
deleted file mode 100644
index f04ba12dbfa8..000000000000
--- a/drivers/input/mouse/pxa930_trkball.c
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * PXA930 track ball mouse driver
- *
- * Copyright (C) 2007 Marvell International Ltd.
- * 2008-02-28: Yong Yao <yaoyong@marvell.com>
- * initial version
- */
-
-#include <linux/input.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <linux/platform_data/mouse-pxa930_trkball.h>
-
-/* Trackball Controller Register Definitions */
-#define TBCR (0x000C)
-#define TBCNTR (0x0010)
-#define TBSBC (0x0014)
-
-#define TBCR_TBRST (1 << 1)
-#define TBCR_TBSB (1 << 10)
-
-#define TBCR_Y_FLT(n) (((n) & 0xf) << 6)
-#define TBCR_X_FLT(n) (((n) & 0xf) << 2)
-
-#define TBCNTR_YM(n) (((n) >> 24) & 0xff)
-#define TBCNTR_YP(n) (((n) >> 16) & 0xff)
-#define TBCNTR_XM(n) (((n) >> 8) & 0xff)
-#define TBCNTR_XP(n) ((n) & 0xff)
-
-#define TBSBC_TBSBC (0x1)
-
-struct pxa930_trkball {
- struct pxa930_trkball_platform_data *pdata;
-
- /* Memory Mapped Register */
- struct resource *mem;
- void __iomem *mmio_base;
-
- struct input_dev *input;
-};
-
-static irqreturn_t pxa930_trkball_interrupt(int irq, void *dev_id)
-{
- struct pxa930_trkball *trkball = dev_id;
- struct input_dev *input = trkball->input;
- int tbcntr, x, y;
-
- /* According to the spec software must read TBCNTR twice:
- * if the read value is the same, the reading is valid
- */
- tbcntr = __raw_readl(trkball->mmio_base + TBCNTR);
-
- if (tbcntr == __raw_readl(trkball->mmio_base + TBCNTR)) {
- x = (TBCNTR_XP(tbcntr) - TBCNTR_XM(tbcntr)) / 2;
- y = (TBCNTR_YP(tbcntr) - TBCNTR_YM(tbcntr)) / 2;
-
- input_report_rel(input, REL_X, x);
- input_report_rel(input, REL_Y, y);
- input_sync(input);
- }
-
- __raw_writel(TBSBC_TBSBC, trkball->mmio_base + TBSBC);
- __raw_writel(0, trkball->mmio_base + TBSBC);
-
- return IRQ_HANDLED;
-}
-
-/* For TBCR, we need to wait for a while to make sure it has been modified. */
-static int write_tbcr(struct pxa930_trkball *trkball, int v)
-{
- int i = 100;
-
- __raw_writel(v, trkball->mmio_base + TBCR);
-
- while (--i) {
- if (__raw_readl(trkball->mmio_base + TBCR) == v)
- break;
- msleep(1);
- }
-
- if (i == 0) {
- pr_err("%s: timed out writing TBCR(%x)!\n", __func__, v);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static void pxa930_trkball_config(struct pxa930_trkball *trkball)
-{
- uint32_t tbcr;
-
- /* According to spec, need to write the filters of x,y to 0xf first! */
- tbcr = __raw_readl(trkball->mmio_base + TBCR);
- write_tbcr(trkball, tbcr | TBCR_X_FLT(0xf) | TBCR_Y_FLT(0xf));
- write_tbcr(trkball, TBCR_X_FLT(trkball->pdata->x_filter) |
- TBCR_Y_FLT(trkball->pdata->y_filter));
-
- /* According to spec, set TBCR_TBRST first, before clearing it! */
- tbcr = __raw_readl(trkball->mmio_base + TBCR);
- write_tbcr(trkball, tbcr | TBCR_TBRST);
- write_tbcr(trkball, tbcr & ~TBCR_TBRST);
-
- __raw_writel(TBSBC_TBSBC, trkball->mmio_base + TBSBC);
- __raw_writel(0, trkball->mmio_base + TBSBC);
-
- pr_debug("%s: final TBCR=%x!\n", __func__,
- __raw_readl(trkball->mmio_base + TBCR));
-}
-
-static int pxa930_trkball_open(struct input_dev *dev)
-{
- struct pxa930_trkball *trkball = input_get_drvdata(dev);
-
- pxa930_trkball_config(trkball);
-
- return 0;
-}
-
-static void pxa930_trkball_disable(struct pxa930_trkball *trkball)
-{
- uint32_t tbcr = __raw_readl(trkball->mmio_base + TBCR);
-
- /* Held in reset, gate the 32-KHz input clock off */
- write_tbcr(trkball, tbcr | TBCR_TBRST);
-}
-
-static void pxa930_trkball_close(struct input_dev *dev)
-{
- struct pxa930_trkball *trkball = input_get_drvdata(dev);
-
- pxa930_trkball_disable(trkball);
-}
-
-static int pxa930_trkball_probe(struct platform_device *pdev)
-{
- struct pxa930_trkball *trkball;
- struct input_dev *input;
- struct resource *res;
- int irq, error;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -ENXIO;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get register memory\n");
- return -ENXIO;
- }
-
- trkball = kzalloc(sizeof(struct pxa930_trkball), GFP_KERNEL);
- if (!trkball)
- return -ENOMEM;
-
- trkball->pdata = dev_get_platdata(&pdev->dev);
- if (!trkball->pdata) {
- dev_err(&pdev->dev, "no platform data defined\n");
- error = -EINVAL;
- goto failed;
- }
-
- trkball->mmio_base = ioremap(res->start, resource_size(res));
- if (!trkball->mmio_base) {
- dev_err(&pdev->dev, "failed to ioremap registers\n");
- error = -ENXIO;
- goto failed;
- }
-
- /* held the module in reset, will be enabled in open() */
- pxa930_trkball_disable(trkball);
-
- error = request_irq(irq, pxa930_trkball_interrupt, 0,
- pdev->name, trkball);
- if (error) {
- dev_err(&pdev->dev, "failed to request irq: %d\n", error);
- goto failed_free_io;
- }
-
- platform_set_drvdata(pdev, trkball);
-
- input = input_allocate_device();
- if (!input) {
- dev_err(&pdev->dev, "failed to allocate input device\n");
- error = -ENOMEM;
- goto failed_free_irq;
- }
-
- input->name = pdev->name;
- input->id.bustype = BUS_HOST;
- input->open = pxa930_trkball_open;
- input->close = pxa930_trkball_close;
- input->dev.parent = &pdev->dev;
- input_set_drvdata(input, trkball);
-
- trkball->input = input;
-
- input_set_capability(input, EV_REL, REL_X);
- input_set_capability(input, EV_REL, REL_Y);
-
- error = input_register_device(input);
- if (error) {
- dev_err(&pdev->dev, "unable to register input device\n");
- goto failed_free_input;
- }
-
- return 0;
-
-failed_free_input:
- input_free_device(input);
-failed_free_irq:
- free_irq(irq, trkball);
-failed_free_io:
- iounmap(trkball->mmio_base);
-failed:
- kfree(trkball);
- return error;
-}
-
-static int pxa930_trkball_remove(struct platform_device *pdev)
-{
- struct pxa930_trkball *trkball = platform_get_drvdata(pdev);
- int irq = platform_get_irq(pdev, 0);
-
- input_unregister_device(trkball->input);
- free_irq(irq, trkball);
- iounmap(trkball->mmio_base);
- kfree(trkball);
-
- return 0;
-}
-
-static struct platform_driver pxa930_trkball_driver = {
- .driver = {
- .name = "pxa930-trkball",
- },
- .probe = pxa930_trkball_probe,
- .remove = pxa930_trkball_remove,
-};
-module_platform_driver(pxa930_trkball_driver);
-
-MODULE_AUTHOR("Yong Yao <yaoyong@marvell.com>");
-MODULE_DESCRIPTION("PXA930 Trackball Mouse Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c
index d62aefb2e245..31def6ce5157 100644
--- a/drivers/input/serio/hyperv-keyboard.c
+++ b/drivers/input/serio/hyperv-keyboard.c
@@ -369,7 +369,7 @@ err_free_mem:
return error;
}
-static int hv_kbd_remove(struct hv_device *hv_dev)
+static void hv_kbd_remove(struct hv_device *hv_dev)
{
struct hv_kbd_dev *kbd_dev = hv_get_drvdata(hv_dev);
@@ -378,8 +378,6 @@ static int hv_kbd_remove(struct hv_device *hv_dev)
kfree(kbd_dev);
hv_set_drvdata(hv_dev, NULL);
-
- return 0;
}
static int hv_kbd_suspend(struct hv_device *hv_dev)
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 15ce3202322f..767fc9efb4a8 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -895,9 +895,9 @@ static int serio_bus_match(struct device *dev, struct device_driver *drv)
return err; \
} while (0)
-static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int serio_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct serio *serio;
+ const struct serio *serio;
if (!dev)
return -ENODEV;
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 68d99a112e14..1a2049b336a6 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -490,18 +490,6 @@ config TOUCHSCREEN_IPROC
To compile this driver as a module, choose M here: the
module will be called bcm_iproc_tsc.
-config TOUCHSCREEN_S3C2410
- tristate "Samsung S3C2410/generic touchscreen input driver"
- depends on ARCH_S3C24XX || SAMSUNG_DEV_TS
- depends on S3C_ADC
- help
- Say Y here if you have the s3c2410 touchscreen.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called s3c2410_ts.
-
config TOUCHSCREEN_S6SY761
tristate "Samsung S6SY761 Touchscreen driver"
depends on I2C
@@ -839,22 +827,6 @@ config TOUCHSCREEN_TI_AM335X_TSC
To compile this driver as a module, choose M here: the
module will be called ti_am335x_tsc.
-config TOUCHSCREEN_UCB1400
- tristate "Philips UCB1400 touchscreen"
- depends on AC97_BUS
- depends on UCB1400_CORE
- help
- This enables support for the Philips UCB1400 touchscreen interface.
- The UCB1400 is an AC97 audio codec. The touchscreen interface
- will be initialized only after the ALSA subsystem has been
- brought up and the UCB1400 detected. You therefore have to
- configure ALSA support as well (either built-in or modular,
- independently of whether this driver is itself built-in or
- modular) for this driver to work.
-
- To compile this driver as a module, choose M here: the
- module will be called ucb1400_ts.
-
config TOUCHSCREEN_PIXCIR
tristate "PIXCIR I2C touchscreens"
depends on I2C
@@ -940,20 +912,6 @@ config TOUCHSCREEN_WM97XX_MAINSTONE
To compile this driver as a module, choose M here: the
module will be called mainstone-wm97xx.
-config TOUCHSCREEN_WM97XX_ZYLONITE
- tristate "Zylonite accelerated touch"
- depends on TOUCHSCREEN_WM97XX && MACH_ZYLONITE
- depends on SND_PXA2XX_LIB_AC97
- select TOUCHSCREEN_WM9713
- help
- Say Y here for support for streaming mode with the touchscreen
- on Zylonite systems.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called zylonite-wm97xx.
-
config TOUCHSCREEN_USB_COMPOSITE
tristate "USB Touchscreen Driver"
depends on USB_ARCH_HAS_HCD
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 4968c370479a..f2fd28cc34a6 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -76,7 +76,6 @@ obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_RM_TS) += raydium_i2c_ts.o
-obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_S6SY761) += s6sy761.o
obj-$(CONFIG_TOUCHSCREEN_SILEAD) += silead.o
obj-$(CONFIG_TOUCHSCREEN_SIS_I2C) += sis_i2c.o
@@ -98,7 +97,6 @@ obj-$(CONFIG_TOUCHSCREEN_TSC2005) += tsc2005.o
tsc2007-y := tsc2007_core.o
tsc2007-$(CONFIG_TOUCHSCREEN_TSC2007_IIO) += tsc2007_iio.o
obj-$(CONFIG_TOUCHSCREEN_TSC2007) += tsc2007.o
-obj-$(CONFIG_TOUCHSCREEN_UCB1400) += ucb1400_ts.o
obj-$(CONFIG_TOUCHSCREEN_WACOM_W8001) += wacom_w8001.o
obj-$(CONFIG_TOUCHSCREEN_WACOM_I2C) += wacom_i2c.o
obj-$(CONFIG_TOUCHSCREEN_WDT87XX_I2C) += wdt87xx_i2c.o
@@ -108,7 +106,6 @@ wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9705) += wm9705.o
wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9712) += wm9712.o
wm97xx-ts-$(CONFIG_TOUCHSCREEN_WM9713) += wm9713.o
obj-$(CONFIG_TOUCHSCREEN_WM97XX_MAINSTONE) += mainstone-wm97xx.o
-obj-$(CONFIG_TOUCHSCREEN_WM97XX_ZYLONITE) += zylonite-wm97xx.o
obj-$(CONFIG_TOUCHSCREEN_SX8654) += sx8654.o
obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
obj-$(CONFIG_TOUCHSCREEN_ZET6223) += zet6223.o
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index c39f49720fe4..85b95ed461e7 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -187,16 +187,6 @@ static int wm97xx_acc_startup(struct wm97xx *wm)
"mainstone accelerated touchscreen driver, %d samples/sec\n",
cinfo[sp_idx].speed);
- /* IRQ driven touchscreen is used on Palm hardware */
- if (machine_is_palmt5() || machine_is_palmtx() || machine_is_palmld()) {
- pen_int = 1;
- /* There is some obscure mutant of WM9712 interbred with WM9713
- * used on Palm HW */
- wm->variant = WM97xx_WM1613;
- } else if (machine_is_zylonite()) {
- pen_int = 1;
- }
-
if (pen_int) {
gpiod_irq = gpiod_get(wm->dev, "touch", GPIOD_IN);
if (IS_ERR(gpiod_irq))
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
deleted file mode 100644
index 2e70c0b79444..000000000000
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ /dev/null
@@ -1,464 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Samsung S3C24XX touchscreen driver
- *
- * Copyright 2004 Arnaud Patard <arnaud.patard@rtp-net.org>
- * Copyright 2008 Ben Dooks <ben-linux@fluff.org>
- * Copyright 2009 Simtec Electronics <linux@simtec.co.uk>
- *
- * Additional work by Herbert Pötzl <herbert@13thfloor.at> and
- * Harald Welte <laforge@openmoko.org>
- */
-
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-
-#include <linux/soc/samsung/s3c-adc.h>
-#include <linux/platform_data/touchscreen-s3c2410.h>
-
-#define S3C2410_ADCCON (0x00)
-#define S3C2410_ADCTSC (0x04)
-#define S3C2410_ADCDLY (0x08)
-#define S3C2410_ADCDAT0 (0x0C)
-#define S3C2410_ADCDAT1 (0x10)
-#define S3C64XX_ADCUPDN (0x14)
-#define S3C2443_ADCMUX (0x18)
-#define S3C64XX_ADCCLRINT (0x18)
-#define S5P_ADCMUX (0x1C)
-#define S3C64XX_ADCCLRINTPNDNUP (0x20)
-
-/* ADCTSC Register Bits */
-#define S3C2443_ADCTSC_UD_SEN (1 << 8)
-#define S3C2410_ADCTSC_YM_SEN (1<<7)
-#define S3C2410_ADCTSC_YP_SEN (1<<6)
-#define S3C2410_ADCTSC_XM_SEN (1<<5)
-#define S3C2410_ADCTSC_XP_SEN (1<<4)
-#define S3C2410_ADCTSC_PULL_UP_DISABLE (1<<3)
-#define S3C2410_ADCTSC_AUTO_PST (1<<2)
-#define S3C2410_ADCTSC_XY_PST(x) (((x)&0x3)<<0)
-
-/* ADCDAT0 Bits */
-#define S3C2410_ADCDAT0_UPDOWN (1<<15)
-#define S3C2410_ADCDAT0_AUTO_PST (1<<14)
-#define S3C2410_ADCDAT0_XY_PST (0x3<<12)
-#define S3C2410_ADCDAT0_XPDATA_MASK (0x03FF)
-
-/* ADCDAT1 Bits */
-#define S3C2410_ADCDAT1_UPDOWN (1<<15)
-#define S3C2410_ADCDAT1_AUTO_PST (1<<14)
-#define S3C2410_ADCDAT1_XY_PST (0x3<<12)
-#define S3C2410_ADCDAT1_YPDATA_MASK (0x03FF)
-
-
-#define TSC_SLEEP (S3C2410_ADCTSC_PULL_UP_DISABLE | S3C2410_ADCTSC_XY_PST(0))
-
-#define INT_DOWN (0)
-#define INT_UP (1 << 8)
-
-#define WAIT4INT (S3C2410_ADCTSC_YM_SEN | \
- S3C2410_ADCTSC_YP_SEN | \
- S3C2410_ADCTSC_XP_SEN | \
- S3C2410_ADCTSC_XY_PST(3))
-
-#define AUTOPST (S3C2410_ADCTSC_YM_SEN | \
- S3C2410_ADCTSC_YP_SEN | \
- S3C2410_ADCTSC_XP_SEN | \
- S3C2410_ADCTSC_AUTO_PST | \
- S3C2410_ADCTSC_XY_PST(0))
-
-#define FEAT_PEN_IRQ (1 << 0) /* HAS ADCCLRINTPNDNUP */
-
-/* Per-touchscreen data. */
-
-/**
- * struct s3c2410ts - driver touchscreen state.
- * @client: The ADC client we registered with the core driver.
- * @dev: The device we are bound to.
- * @input: The input device we registered with the input subsystem.
- * @clock: The clock for the adc.
- * @io: Pointer to the IO base.
- * @xp: The accumulated X position data.
- * @yp: The accumulated Y position data.
- * @irq_tc: The interrupt number for pen up/down interrupt
- * @count: The number of samples collected.
- * @shift: The log2 of the maximum count to read in one go.
- * @features: The features supported by the TSADC MOdule.
- */
-struct s3c2410ts {
- struct s3c_adc_client *client;
- struct device *dev;
- struct input_dev *input;
- struct clk *clock;
- void __iomem *io;
- unsigned long xp;
- unsigned long yp;
- int irq_tc;
- int count;
- int shift;
- int features;
-};
-
-static struct s3c2410ts ts;
-
-/**
- * get_down - return the down state of the pen
- * @data0: The data read from ADCDAT0 register.
- * @data1: The data read from ADCDAT1 register.
- *
- * Return non-zero if both readings show that the pen is down.
- */
-static inline bool get_down(unsigned long data0, unsigned long data1)
-{
- /* returns true if both data values show stylus down */
- return (!(data0 & S3C2410_ADCDAT0_UPDOWN) &&
- !(data1 & S3C2410_ADCDAT0_UPDOWN));
-}
-
-static void touch_timer_fire(struct timer_list *unused)
-{
- unsigned long data0;
- unsigned long data1;
- bool down;
-
- data0 = readl(ts.io + S3C2410_ADCDAT0);
- data1 = readl(ts.io + S3C2410_ADCDAT1);
-
- down = get_down(data0, data1);
-
- if (down) {
- if (ts.count == (1 << ts.shift)) {
- ts.xp >>= ts.shift;
- ts.yp >>= ts.shift;
-
- dev_dbg(ts.dev, "%s: X=%lu, Y=%lu, count=%d\n",
- __func__, ts.xp, ts.yp, ts.count);
-
- input_report_abs(ts.input, ABS_X, ts.xp);
- input_report_abs(ts.input, ABS_Y, ts.yp);
-
- input_report_key(ts.input, BTN_TOUCH, 1);
- input_sync(ts.input);
-
- ts.xp = 0;
- ts.yp = 0;
- ts.count = 0;
- }
-
- s3c_adc_start(ts.client, 0, 1 << ts.shift);
- } else {
- ts.xp = 0;
- ts.yp = 0;
- ts.count = 0;
-
- input_report_key(ts.input, BTN_TOUCH, 0);
- input_sync(ts.input);
-
- writel(WAIT4INT | INT_DOWN, ts.io + S3C2410_ADCTSC);
- }
-}
-
-static DEFINE_TIMER(touch_timer, touch_timer_fire);
-
-/**
- * stylus_irq - touchscreen stylus event interrupt
- * @irq: The interrupt number
- * @dev_id: The device ID.
- *
- * Called when the IRQ_TC is fired for a pen up or down event.
- */
-static irqreturn_t stylus_irq(int irq, void *dev_id)
-{
- unsigned long data0;
- unsigned long data1;
- bool down;
-
- data0 = readl(ts.io + S3C2410_ADCDAT0);
- data1 = readl(ts.io + S3C2410_ADCDAT1);
-
- down = get_down(data0, data1);
-
- /* TODO we should never get an interrupt with down set while
- * the timer is running, but maybe we ought to verify that the
- * timer isn't running anyways. */
-
- if (down)
- s3c_adc_start(ts.client, 0, 1 << ts.shift);
- else
- dev_dbg(ts.dev, "%s: count=%d\n", __func__, ts.count);
-
- if (ts.features & FEAT_PEN_IRQ) {
- /* Clear pen down/up interrupt */
- writel(0x0, ts.io + S3C64XX_ADCCLRINTPNDNUP);
- }
-
- return IRQ_HANDLED;
-}
-
-/**
- * s3c24xx_ts_conversion - ADC conversion callback
- * @client: The client that was registered with the ADC core.
- * @data0: The reading from ADCDAT0.
- * @data1: The reading from ADCDAT1.
- * @left: The number of samples left.
- *
- * Called when a conversion has finished.
- */
-static void s3c24xx_ts_conversion(struct s3c_adc_client *client,
- unsigned data0, unsigned data1,
- unsigned *left)
-{
- dev_dbg(ts.dev, "%s: %d,%d\n", __func__, data0, data1);
-
- ts.xp += data0;
- ts.yp += data1;
-
- ts.count++;
-
- /* From tests, it seems that it is unlikely to get a pen-up
- * event during the conversion process which means we can
- * ignore any pen-up events with less than the requisite
- * count done.
- *
- * In several thousand conversions, no pen-ups where detected
- * before count completed.
- */
-}
-
-/**
- * s3c24xx_ts_select - ADC selection callback.
- * @client: The client that was registered with the ADC core.
- * @select: The reason for select.
- *
- * Called when the ADC core selects (or deslects) us as a client.
- */
-static void s3c24xx_ts_select(struct s3c_adc_client *client, unsigned select)
-{
- if (select) {
- writel(S3C2410_ADCTSC_PULL_UP_DISABLE | AUTOPST,
- ts.io + S3C2410_ADCTSC);
- } else {
- mod_timer(&touch_timer, jiffies+1);
- writel(WAIT4INT | INT_UP, ts.io + S3C2410_ADCTSC);
- }
-}
-
-/**
- * s3c2410ts_probe - device core probe entry point
- * @pdev: The device we are being bound to.
- *
- * Initialise, find and allocate any resources we need to run and then
- * register with the ADC and input systems.
- */
-static int s3c2410ts_probe(struct platform_device *pdev)
-{
- struct s3c2410_ts_mach_info *info;
- struct device *dev = &pdev->dev;
- struct input_dev *input_dev;
- struct resource *res;
- int ret = -EINVAL;
-
- /* Initialise input stuff */
- memset(&ts, 0, sizeof(struct s3c2410ts));
-
- ts.dev = dev;
-
- info = dev_get_platdata(dev);
- if (!info) {
- dev_err(dev, "no platform data, cannot attach\n");
- return -EINVAL;
- }
-
- dev_dbg(dev, "initialising touchscreen\n");
-
- ts.clock = clk_get(dev, "adc");
- if (IS_ERR(ts.clock)) {
- dev_err(dev, "cannot get adc clock source\n");
- return -ENOENT;
- }
-
- ret = clk_prepare_enable(ts.clock);
- if (ret) {
- dev_err(dev, "Failed! to enabled clocks\n");
- goto err_clk_get;
- }
- dev_dbg(dev, "got and enabled clocks\n");
-
- ts.irq_tc = ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "no resource for interrupt\n");
- goto err_clk;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "no resource for registers\n");
- ret = -ENOENT;
- goto err_clk;
- }
-
- ts.io = ioremap(res->start, resource_size(res));
- if (ts.io == NULL) {
- dev_err(dev, "cannot map registers\n");
- ret = -ENOMEM;
- goto err_clk;
- }
-
- /* inititalise the gpio */
- if (info->cfg_gpio)
- info->cfg_gpio(to_platform_device(ts.dev));
-
- ts.client = s3c_adc_register(pdev, s3c24xx_ts_select,
- s3c24xx_ts_conversion, 1);
- if (IS_ERR(ts.client)) {
- dev_err(dev, "failed to register adc client\n");
- ret = PTR_ERR(ts.client);
- goto err_iomap;
- }
-
- /* Initialise registers */
- if ((info->delay & 0xffff) > 0)
- writel(info->delay & 0xffff, ts.io + S3C2410_ADCDLY);
-
- writel(WAIT4INT | INT_DOWN, ts.io + S3C2410_ADCTSC);
-
- input_dev = input_allocate_device();
- if (!input_dev) {
- dev_err(dev, "Unable to allocate the input device !!\n");
- ret = -ENOMEM;
- goto err_iomap;
- }
-
- ts.input = input_dev;
- ts.input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- ts.input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
- input_set_abs_params(ts.input, ABS_X, 0, 0x3FF, 0, 0);
- input_set_abs_params(ts.input, ABS_Y, 0, 0x3FF, 0, 0);
-
- ts.input->name = "S3C24XX TouchScreen";
- ts.input->id.bustype = BUS_HOST;
- ts.input->id.vendor = 0xDEAD;
- ts.input->id.product = 0xBEEF;
- ts.input->id.version = 0x0102;
-
- ts.shift = info->oversampling_shift;
- ts.features = platform_get_device_id(pdev)->driver_data;
-
- ret = request_irq(ts.irq_tc, stylus_irq, 0,
- "s3c2410_ts_pen", ts.input);
- if (ret) {
- dev_err(dev, "cannot get TC interrupt\n");
- goto err_inputdev;
- }
-
- dev_info(dev, "driver attached, registering input device\n");
-
- /* All went ok, so register to the input system */
- ret = input_register_device(ts.input);
- if (ret < 0) {
- dev_err(dev, "failed to register input device\n");
- ret = -EIO;
- goto err_tcirq;
- }
-
- return 0;
-
- err_tcirq:
- free_irq(ts.irq_tc, ts.input);
- err_inputdev:
- input_free_device(ts.input);
- err_iomap:
- iounmap(ts.io);
- err_clk:
- clk_disable_unprepare(ts.clock);
- del_timer_sync(&touch_timer);
- err_clk_get:
- clk_put(ts.clock);
- return ret;
-}
-
-/**
- * s3c2410ts_remove - device core removal entry point
- * @pdev: The device we are being removed from.
- *
- * Free up our state ready to be removed.
- */
-static int s3c2410ts_remove(struct platform_device *pdev)
-{
- free_irq(ts.irq_tc, ts.input);
- del_timer_sync(&touch_timer);
-
- clk_disable_unprepare(ts.clock);
- clk_put(ts.clock);
-
- input_unregister_device(ts.input);
- iounmap(ts.io);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int s3c2410ts_suspend(struct device *dev)
-{
- writel(TSC_SLEEP, ts.io + S3C2410_ADCTSC);
- disable_irq(ts.irq_tc);
- clk_disable(ts.clock);
-
- return 0;
-}
-
-static int s3c2410ts_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct s3c2410_ts_mach_info *info = dev_get_platdata(&pdev->dev);
-
- clk_enable(ts.clock);
- enable_irq(ts.irq_tc);
-
- /* Initialise registers */
- if ((info->delay & 0xffff) > 0)
- writel(info->delay & 0xffff, ts.io + S3C2410_ADCDLY);
-
- writel(WAIT4INT | INT_DOWN, ts.io + S3C2410_ADCTSC);
-
- return 0;
-}
-
-static const struct dev_pm_ops s3c_ts_pmops = {
- .suspend = s3c2410ts_suspend,
- .resume = s3c2410ts_resume,
-};
-#endif
-
-static const struct platform_device_id s3cts_driver_ids[] = {
- { "s3c2410-ts", 0 },
- { "s3c2440-ts", 0 },
- { "s3c64xx-ts", FEAT_PEN_IRQ },
- { }
-};
-MODULE_DEVICE_TABLE(platform, s3cts_driver_ids);
-
-static struct platform_driver s3c_ts_driver = {
- .driver = {
- .name = "samsung-ts",
-#ifdef CONFIG_PM
- .pm = &s3c_ts_pmops,
-#endif
- },
- .id_table = s3cts_driver_ids,
- .probe = s3c2410ts_probe,
- .remove = s3c2410ts_remove,
-};
-module_platform_driver(s3c_ts_driver);
-
-MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>, "
- "Ben Dooks <ben@simtec.co.uk>, "
- "Simtec Electronics <linux@simtec.co.uk>");
-MODULE_DESCRIPTION("S3C24XX Touchscreen driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
deleted file mode 100644
index dfd3b35590c3..000000000000
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ /dev/null
@@ -1,458 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Philips UCB1400 touchscreen driver
- *
- * Author: Nicolas Pitre
- * Created: September 25, 2006
- * Copyright: MontaVista Software, Inc.
- *
- * Spliting done by: Marek Vasut <marek.vasut@gmail.com>
- * If something doesn't work and it worked before spliting, e-mail me,
- * dont bother Nicolas please ;-)
- *
- * This code is heavily based on ucb1x00-*.c copyrighted by Russell King
- * covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has
- * been made separate from ucb1x00-core/ucb1x00-ts on Russell's request.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/input.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/ucb1400.h>
-
-#define UCB1400_TS_POLL_PERIOD 10 /* ms */
-
-static bool adcsync;
-static int ts_delay = 55; /* us */
-static int ts_delay_pressure; /* us */
-
-/* Switch to interrupt mode. */
-static void ucb1400_ts_mode_int(struct ucb1400_ts *ucb)
-{
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
- UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
- UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
- UCB_TS_CR_MODE_INT);
-}
-
-/*
- * Switch to pressure mode, and read pressure. We don't need to wait
- * here, since both plates are being driven.
- */
-static unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
-{
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
- UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
- UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
- UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
-
- udelay(ts_delay_pressure);
-
- return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPY, adcsync);
-}
-
-/*
- * Switch to X position mode and measure Y plate. We switch the plate
- * configuration in pressure mode, then switch to position mode. This
- * gives a faster response time. Even so, we need to wait about 55us
- * for things to stabilise.
- */
-static unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
-{
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
- UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
- UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
- UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
- UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
- UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
- UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA);
-
- udelay(ts_delay);
-
- return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPY, adcsync);
-}
-
-/*
- * Switch to Y position mode and measure X plate. We switch the plate
- * configuration in pressure mode, then switch to position mode. This
- * gives a faster response time. Even so, we need to wait about 55us
- * for things to stabilise.
- */
-static int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
-{
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
- UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
- UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
- UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
- UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
- UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
- UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA);
-
- udelay(ts_delay);
-
- return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPX, adcsync);
-}
-
-/*
- * Switch to X plate resistance mode. Set MX to ground, PX to
- * supply. Measure current.
- */
-static unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
-{
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
- UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
- UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
- return ucb1400_adc_read(ucb->ac97, 0, adcsync);
-}
-
-/*
- * Switch to Y plate resistance mode. Set MY to ground, PY to
- * supply. Measure current.
- */
-static unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
-{
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
- UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
- UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
- return ucb1400_adc_read(ucb->ac97, 0, adcsync);
-}
-
-static int ucb1400_ts_pen_up(struct ucb1400_ts *ucb)
-{
- unsigned short val = ucb1400_reg_read(ucb->ac97, UCB_TS_CR);
-
- return val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW);
-}
-
-static void ucb1400_ts_irq_enable(struct ucb1400_ts *ucb)
-{
- ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, UCB_IE_TSPX);
- ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
- ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_TSPX);
-}
-
-static void ucb1400_ts_irq_disable(struct ucb1400_ts *ucb)
-{
- ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0);
-}
-
-static void ucb1400_ts_report_event(struct input_dev *idev, u16 pressure, u16 x, u16 y)
-{
- input_report_abs(idev, ABS_X, x);
- input_report_abs(idev, ABS_Y, y);
- input_report_abs(idev, ABS_PRESSURE, pressure);
- input_report_key(idev, BTN_TOUCH, 1);
- input_sync(idev);
-}
-
-static void ucb1400_ts_event_release(struct input_dev *idev)
-{
- input_report_abs(idev, ABS_PRESSURE, 0);
- input_report_key(idev, BTN_TOUCH, 0);
- input_sync(idev);
-}
-
-static void ucb1400_clear_pending_irq(struct ucb1400_ts *ucb)
-{
- unsigned int isr;
-
- isr = ucb1400_reg_read(ucb->ac97, UCB_IE_STATUS);
- ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, isr);
- ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
-
- if (isr & UCB_IE_TSPX)
- ucb1400_ts_irq_disable(ucb);
- else
- dev_dbg(&ucb->ts_idev->dev,
- "ucb1400: unexpected IE_STATUS = %#x\n", isr);
-}
-
-/*
- * A restriction with interrupts exists when using the ucb1400, as
- * the codec read/write routines may sleep while waiting for codec
- * access completion and uses semaphores for access control to the
- * AC97 bus. Therefore the driver is forced to use threaded interrupt
- * handler.
- */
-static irqreturn_t ucb1400_irq(int irqnr, void *devid)
-{
- struct ucb1400_ts *ucb = devid;
- unsigned int x, y, p;
-
- if (unlikely(irqnr != ucb->irq))
- return IRQ_NONE;
-
- ucb1400_clear_pending_irq(ucb);
-
- /* Start with a small delay before checking pendown state */
- msleep(UCB1400_TS_POLL_PERIOD);
-
- while (!ucb->stopped && !ucb1400_ts_pen_up(ucb)) {
- ucb1400_adc_enable(ucb->ac97);
- x = ucb1400_ts_read_xpos(ucb);
- y = ucb1400_ts_read_ypos(ucb);
- p = ucb1400_ts_read_pressure(ucb);
- ucb1400_adc_disable(ucb->ac97);
-
- ucb1400_ts_report_event(ucb->ts_idev, p, x, y);
-
- wait_event_timeout(ucb->ts_wait, ucb->stopped,
- msecs_to_jiffies(UCB1400_TS_POLL_PERIOD));
- }
-
- ucb1400_ts_event_release(ucb->ts_idev);
-
- if (!ucb->stopped) {
- /* Switch back to interrupt mode. */
- ucb1400_ts_mode_int(ucb);
- ucb1400_ts_irq_enable(ucb);
- }
-
- return IRQ_HANDLED;
-}
-
-static void ucb1400_ts_stop(struct ucb1400_ts *ucb)
-{
- /* Signal IRQ thread to stop polling and disable the handler. */
- ucb->stopped = true;
- mb();
- wake_up(&ucb->ts_wait);
- disable_irq(ucb->irq);
-
- ucb1400_ts_irq_disable(ucb);
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
-}
-
-/* Must be called with ts->lock held */
-static void ucb1400_ts_start(struct ucb1400_ts *ucb)
-{
- /* Tell IRQ thread that it may poll the device. */
- ucb->stopped = false;
- mb();
-
- ucb1400_ts_mode_int(ucb);
- ucb1400_ts_irq_enable(ucb);
-
- enable_irq(ucb->irq);
-}
-
-static int ucb1400_ts_open(struct input_dev *idev)
-{
- struct ucb1400_ts *ucb = input_get_drvdata(idev);
-
- ucb1400_ts_start(ucb);
-
- return 0;
-}
-
-static void ucb1400_ts_close(struct input_dev *idev)
-{
- struct ucb1400_ts *ucb = input_get_drvdata(idev);
-
- ucb1400_ts_stop(ucb);
-}
-
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
-/*
- * Try to probe our interrupt, rather than relying on lots of
- * hard-coded machine dependencies.
- */
-static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
- struct platform_device *pdev)
-{
- unsigned long mask, timeout;
-
- mask = probe_irq_on();
-
- /* Enable the ADC interrupt. */
- ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, UCB_IE_ADC);
- ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_ADC);
- ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff);
- ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
-
- /* Cause an ADC interrupt. */
- ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA);
- ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START);
-
- /* Wait for the conversion to complete. */
- timeout = jiffies + HZ/2;
- while (!(ucb1400_reg_read(ucb->ac97, UCB_ADC_DATA) &
- UCB_ADC_DAT_VALID)) {
- cpu_relax();
- if (time_after(jiffies, timeout)) {
- dev_err(&pdev->dev, "timed out in IRQ probe\n");
- probe_irq_off(mask);
- return -ENODEV;
- }
- }
- ucb1400_reg_write(ucb->ac97, UCB_ADC_CR, 0);
-
- /* Disable and clear interrupt. */
- ucb1400_reg_write(ucb->ac97, UCB_IE_RIS, 0);
- ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0);
- ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0xffff);
- ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
-
- /* Read triggered interrupt. */
- ucb->irq = probe_irq_off(mask);
- if (ucb->irq < 0 || ucb->irq == NO_IRQ)
- return -ENODEV;
-
- return 0;
-}
-
-static int ucb1400_ts_probe(struct platform_device *pdev)
-{
- struct ucb1400_ts *ucb = dev_get_platdata(&pdev->dev);
- int error, x_res, y_res;
- u16 fcsr;
-
- ucb->ts_idev = input_allocate_device();
- if (!ucb->ts_idev) {
- error = -ENOMEM;
- goto err;
- }
-
- /* Only in case the IRQ line wasn't supplied, try detecting it */
- if (ucb->irq < 0) {
- error = ucb1400_ts_detect_irq(ucb, pdev);
- if (error) {
- dev_err(&pdev->dev, "IRQ probe failed\n");
- goto err_free_devs;
- }
- }
- dev_dbg(&pdev->dev, "found IRQ %d\n", ucb->irq);
-
- init_waitqueue_head(&ucb->ts_wait);
-
- input_set_drvdata(ucb->ts_idev, ucb);
-
- ucb->ts_idev->dev.parent = &pdev->dev;
- ucb->ts_idev->name = "UCB1400 touchscreen interface";
- ucb->ts_idev->id.vendor = ucb1400_reg_read(ucb->ac97,
- AC97_VENDOR_ID1);
- ucb->ts_idev->id.product = ucb->id;
- ucb->ts_idev->open = ucb1400_ts_open;
- ucb->ts_idev->close = ucb1400_ts_close;
- ucb->ts_idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY);
- ucb->ts_idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
-
- /*
- * Enable ADC filter to prevent horrible jitter on Colibri.
- * This also further reduces jitter on boards where ADCSYNC
- * pin is connected.
- */
- fcsr = ucb1400_reg_read(ucb->ac97, UCB_FCSR);
- ucb1400_reg_write(ucb->ac97, UCB_FCSR, fcsr | UCB_FCSR_AVE);
-
- ucb1400_adc_enable(ucb->ac97);
- x_res = ucb1400_ts_read_xres(ucb);
- y_res = ucb1400_ts_read_yres(ucb);
- ucb1400_adc_disable(ucb->ac97);
- dev_dbg(&pdev->dev, "x/y = %d/%d\n", x_res, y_res);
-
- input_set_abs_params(ucb->ts_idev, ABS_X, 0, x_res, 0, 0);
- input_set_abs_params(ucb->ts_idev, ABS_Y, 0, y_res, 0, 0);
- input_set_abs_params(ucb->ts_idev, ABS_PRESSURE, 0, 0, 0, 0);
-
- ucb1400_ts_stop(ucb);
-
- error = request_threaded_irq(ucb->irq, NULL, ucb1400_irq,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "UCB1400", ucb);
- if (error) {
- dev_err(&pdev->dev,
- "unable to grab irq%d: %d\n", ucb->irq, error);
- goto err_free_devs;
- }
-
- error = input_register_device(ucb->ts_idev);
- if (error)
- goto err_free_irq;
-
- return 0;
-
-err_free_irq:
- free_irq(ucb->irq, ucb);
-err_free_devs:
- input_free_device(ucb->ts_idev);
-err:
- return error;
-}
-
-static int ucb1400_ts_remove(struct platform_device *pdev)
-{
- struct ucb1400_ts *ucb = dev_get_platdata(&pdev->dev);
-
- free_irq(ucb->irq, ucb);
- input_unregister_device(ucb->ts_idev);
-
- return 0;
-}
-
-static int __maybe_unused ucb1400_ts_suspend(struct device *dev)
-{
- struct ucb1400_ts *ucb = dev_get_platdata(dev);
- struct input_dev *idev = ucb->ts_idev;
-
- mutex_lock(&idev->mutex);
-
- if (input_device_enabled(idev))
- ucb1400_ts_stop(ucb);
-
- mutex_unlock(&idev->mutex);
- return 0;
-}
-
-static int __maybe_unused ucb1400_ts_resume(struct device *dev)
-{
- struct ucb1400_ts *ucb = dev_get_platdata(dev);
- struct input_dev *idev = ucb->ts_idev;
-
- mutex_lock(&idev->mutex);
-
- if (input_device_enabled(idev))
- ucb1400_ts_start(ucb);
-
- mutex_unlock(&idev->mutex);
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops,
- ucb1400_ts_suspend, ucb1400_ts_resume);
-
-static struct platform_driver ucb1400_ts_driver = {
- .probe = ucb1400_ts_probe,
- .remove = ucb1400_ts_remove,
- .driver = {
- .name = "ucb1400_ts",
- .pm = &ucb1400_ts_pm_ops,
- },
-};
-module_platform_driver(ucb1400_ts_driver);
-
-module_param(adcsync, bool, 0444);
-MODULE_PARM_DESC(adcsync, "Synchronize touch readings with ADCSYNC pin.");
-
-module_param(ts_delay, int, 0444);
-MODULE_PARM_DESC(ts_delay, "Delay between panel setup and"
- " position read. Default = 55us.");
-
-module_param(ts_delay_pressure, int, 0444);
-MODULE_PARM_DESC(ts_delay_pressure,
- "delay between panel setup and pressure read."
- " Default = 0us.");
-
-MODULE_DESCRIPTION("Philips UCB1400 touchscreen driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
deleted file mode 100644
index a70fe4abe520..000000000000
--- a/drivers/input/touchscreen/zylonite-wm97xx.c
+++ /dev/null
@@ -1,220 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * zylonite-wm97xx.c -- Zylonite Continuous Touch screen driver
- *
- * Copyright 2004, 2007, 2008 Wolfson Microelectronics PLC.
- * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
- * Parts Copyright : Ian Molton <spyro@f2s.com>
- * Andrew Zabolotny <zap@homelink.ru>
- *
- * Notes:
- * This is a wm97xx extended touch driver supporting interrupt driven
- * and continuous operation on Marvell Zylonite development systems
- * (which have a WM9713 on board).
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/soc/pxa/cpu.h>
-#include <linux/wm97xx.h>
-
-#include <sound/pxa2xx-lib.h>
-
-struct continuous {
- u16 id; /* codec id */
- u8 code; /* continuous code */
- u8 reads; /* number of coord reads per read cycle */
- u32 speed; /* number of coords per second */
-};
-
-#define WM_READS(sp) ((sp / HZ) + 1)
-
-static const struct continuous cinfo[] = {
- { WM9713_ID2, 0, WM_READS(94), 94 },
- { WM9713_ID2, 1, WM_READS(120), 120 },
- { WM9713_ID2, 2, WM_READS(154), 154 },
- { WM9713_ID2, 3, WM_READS(188), 188 },
-};
-
-/* continuous speed index */
-static int sp_idx;
-
-/*
- * Pen sampling frequency (Hz) in continuous mode.
- */
-static int cont_rate = 200;
-module_param(cont_rate, int, 0);
-MODULE_PARM_DESC(cont_rate, "Sampling rate in continuous mode (Hz)");
-
-/*
- * Pressure readback.
- *
- * Set to 1 to read back pen down pressure
- */
-static int pressure;
-module_param(pressure, int, 0);
-MODULE_PARM_DESC(pressure, "Pressure readback (1 = pressure, 0 = no pressure)");
-
-/*
- * AC97 touch data slot.
- *
- * Touch screen readback data ac97 slot
- */
-static int ac97_touch_slot = 5;
-module_param(ac97_touch_slot, int, 0);
-MODULE_PARM_DESC(ac97_touch_slot, "Touch screen data slot AC97 number");
-
-
-/* flush AC97 slot 5 FIFO machines */
-static void wm97xx_acc_pen_up(struct wm97xx *wm)
-{
- int i;
-
- msleep(1);
-
- for (i = 0; i < 16; i++)
- pxa2xx_ac97_read_modr();
-}
-
-static int wm97xx_acc_pen_down(struct wm97xx *wm)
-{
- u16 x, y, p = 0x100 | WM97XX_ADCSEL_PRES;
- int reads = 0;
- static u16 last, tries;
-
- /* When the AC97 queue has been drained we need to allow time
- * to buffer up samples otherwise we end up spinning polling
- * for samples. The controller can't have a suitably low
- * threshold set to use the notifications it gives.
- */
- msleep(1);
-
- if (tries > 5) {
- tries = 0;
- return RC_PENUP;
- }
-
- x = pxa2xx_ac97_read_modr();
- if (x == last) {
- tries++;
- return RC_AGAIN;
- }
- last = x;
- do {
- if (reads)
- x = pxa2xx_ac97_read_modr();
- y = pxa2xx_ac97_read_modr();
- if (pressure)
- p = pxa2xx_ac97_read_modr();
-
- dev_dbg(wm->dev, "Raw coordinates: x=%x, y=%x, p=%x\n",
- x, y, p);
-
- /* are samples valid */
- if ((x & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_X ||
- (y & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_Y ||
- (p & WM97XX_ADCSEL_MASK) != WM97XX_ADCSEL_PRES)
- goto up;
-
- /* coordinate is good */
- tries = 0;
- input_report_abs(wm->input_dev, ABS_X, x & 0xfff);
- input_report_abs(wm->input_dev, ABS_Y, y & 0xfff);
- input_report_abs(wm->input_dev, ABS_PRESSURE, p & 0xfff);
- input_report_key(wm->input_dev, BTN_TOUCH, (p != 0));
- input_sync(wm->input_dev);
- reads++;
- } while (reads < cinfo[sp_idx].reads);
-up:
- return RC_PENDOWN | RC_AGAIN;
-}
-
-static int wm97xx_acc_startup(struct wm97xx *wm)
-{
- int idx;
-
- /* check we have a codec */
- if (wm->ac97 == NULL)
- return -ENODEV;
-
- /* Go you big red fire engine */
- for (idx = 0; idx < ARRAY_SIZE(cinfo); idx++) {
- if (wm->id != cinfo[idx].id)
- continue;
- sp_idx = idx;
- if (cont_rate <= cinfo[idx].speed)
- break;
- }
- wm->acc_rate = cinfo[sp_idx].code;
- wm->acc_slot = ac97_touch_slot;
- dev_info(wm->dev,
- "zylonite accelerated touchscreen driver, %d samples/sec\n",
- cinfo[sp_idx].speed);
-
- return 0;
-}
-
-static struct wm97xx_mach_ops zylonite_mach_ops = {
- .acc_enabled = 1,
- .acc_pen_up = wm97xx_acc_pen_up,
- .acc_pen_down = wm97xx_acc_pen_down,
- .acc_startup = wm97xx_acc_startup,
- .irq_gpio = WM97XX_GPIO_2,
-};
-
-static int zylonite_wm97xx_probe(struct platform_device *pdev)
-{
- struct wm97xx *wm = platform_get_drvdata(pdev);
- struct gpio_desc *gpio_touch_irq;
- int err;
-
- gpio_touch_irq = devm_gpiod_get(&pdev->dev, "touch", GPIOD_IN);
- err = PTR_ERR_OR_ZERO(gpio_touch_irq);
- if (err) {
- dev_err(&pdev->dev, "Cannot get irq gpio: %d\n", err);
- return err;
- }
-
- wm->pen_irq = gpiod_to_irq(gpio_touch_irq);
- irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
-
- wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
- WM97XX_GPIO_POL_HIGH,
- WM97XX_GPIO_STICKY,
- WM97XX_GPIO_WAKE);
- wm97xx_config_gpio(wm, WM97XX_GPIO_2, WM97XX_GPIO_OUT,
- WM97XX_GPIO_POL_HIGH,
- WM97XX_GPIO_NOTSTICKY,
- WM97XX_GPIO_NOWAKE);
-
- return wm97xx_register_mach_ops(wm, &zylonite_mach_ops);
-}
-
-static int zylonite_wm97xx_remove(struct platform_device *pdev)
-{
- struct wm97xx *wm = platform_get_drvdata(pdev);
-
- wm97xx_unregister_mach_ops(wm);
-
- return 0;
-}
-
-static struct platform_driver zylonite_wm97xx_driver = {
- .probe = zylonite_wm97xx_probe,
- .remove = zylonite_wm97xx_remove,
- .driver = {
- .name = "wm97xx-touch",
- },
-};
-module_platform_driver(zylonite_wm97xx_driver);
-
-/* Module information */
-MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
-MODULE_DESCRIPTION("wm97xx continuous touch driver for Zylonite");
-MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 25debded65a8..0f392f59b135 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -1079,15 +1079,19 @@ void icc_provider_del(struct icc_provider *provider)
}
EXPORT_SYMBOL_GPL(icc_provider_del);
+static const struct of_device_id __maybe_unused ignore_list[] = {
+ { .compatible = "qcom,sc7180-ipa-virt" },
+ { .compatible = "qcom,sc8180x-ipa-virt" },
+ { .compatible = "qcom,sdx55-ipa-virt" },
+ { .compatible = "qcom,sm8150-ipa-virt" },
+ { .compatible = "qcom,sm8250-ipa-virt" },
+ {}
+};
+
static int of_count_icc_providers(struct device_node *np)
{
struct device_node *child;
int count = 0;
- const struct of_device_id __maybe_unused ignore_list[] = {
- { .compatible = "qcom,sc7180-ipa-virt" },
- { .compatible = "qcom,sdx55-ipa-virt" },
- {}
- };
for_each_available_child_of_node(np, child) {
if (of_property_read_bool(child, "#interconnect-cells") &&
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 1a1c941635a2..92d65c7bda23 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -69,6 +69,15 @@ config INTERCONNECT_QCOM_QCS404
This is a driver for the Qualcomm Network-on-Chip on qcs404-based
platforms.
+config INTERCONNECT_QCOM_QDU1000
+ tristate "Qualcomm QDU1000/QRU1000 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on QDU1000-based
+ and QRU1000-based platforms.
+
config INTERCONNECT_QCOM_RPMH_POSSIBLE
tristate
default INTERCONNECT_QCOM
@@ -83,6 +92,15 @@ config INTERCONNECT_QCOM_RPMH_POSSIBLE
config INTERCONNECT_QCOM_RPMH
tristate
+config INTERCONNECT_QCOM_SA8775P
+ tristate "Qualcomm SA8775P interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sa8775p-based
+ platforms.
+
config INTERCONNECT_QCOM_SC7180
tristate "Qualcomm SC7180 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
@@ -128,6 +146,15 @@ config INTERCONNECT_QCOM_SDM660
This is a driver for the Qualcomm Network-on-Chip on sdm660-based
platforms.
+config INTERCONNECT_QCOM_SDM670
+ tristate "Qualcomm SDM670 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sdm670-based
+ platforms.
+
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
@@ -200,5 +227,14 @@ config INTERCONNECT_QCOM_SM8450
This is a driver for the Qualcomm Network-on-Chip on SM8450-based
platforms.
+config INTERCONNECT_QCOM_SM8550
+ tristate "Qualcomm SM8550 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on SM8550-based
+ platforms.
+
config INTERCONNECT_QCOM_SMD_RPM
tristate
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 8e357528185d..ab988926433c 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -11,12 +11,15 @@ qnoc-msm8996-objs := msm8996.o
icc-osm-l3-objs := osm-l3.o
qnoc-qcm2290-objs := qcm2290.o
qnoc-qcs404-objs := qcs404.o
+qnoc-qdu1000-objs := qdu1000.o
icc-rpmh-obj := icc-rpmh.o
+qnoc-sa8775p-objs := sa8775p.o
qnoc-sc7180-objs := sc7180.o
qnoc-sc7280-objs := sc7280.o
qnoc-sc8180x-objs := sc8180x.o
qnoc-sc8280xp-objs := sc8280xp.o
qnoc-sdm660-objs := sdm660.o
+qnoc-sdm670-objs := sdm670.o
qnoc-sdm845-objs := sdm845.o
qnoc-sdx55-objs := sdx55.o
qnoc-sdx65-objs := sdx65.o
@@ -25,6 +28,7 @@ qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
qnoc-sm8350-objs := sm8350.o
qnoc-sm8450-objs := sm8450.o
+qnoc-sm8550-objs := sm8550.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
@@ -35,12 +39,15 @@ obj-$(CONFIG_INTERCONNECT_QCOM_MSM8996) += qnoc-msm8996.o
obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCM2290) += qnoc-qcm2290.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
+obj-$(CONFIG_INTERCONNECT_QCOM_QDU1000) += qnoc-qdu1000.o
obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SA8775P) += qnoc-sa8775p.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7280) += qnoc-sc7280.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC8180X) += qnoc-sc8180x.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC8280XP) += qnoc-sc8280xp.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM660) += qnoc-sdm660.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SDM670) += qnoc-sdm670.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o
@@ -49,4 +56,5 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8450) += qnoc-sm8450.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM8550) += qnoc-sm8550.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/qdu1000.c b/drivers/interconnect/qcom/qdu1000.c
new file mode 100644
index 000000000000..a4cf559de2b0
--- /dev/null
+++ b/drivers/interconnect/qcom/qdu1000.c
@@ -0,0 +1,1067 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,qdu1000-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-common.h"
+#include "icc-rpmh.h"
+#include "qdu1000.h"
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = QDU1000_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = QDU1000_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = QDU1000_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = QDU1000_MASTER_APPSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 4,
+ .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
+ QDU1000_SLAVE_GEMNOC_MODEM_CNOC, QDU1000_SLAVE_MEM_NOC_PCIE_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_ecpri_dma = {
+ .name = "qnm_ecpri_dma",
+ .id = QDU1000_MASTER_GEMNOC_ECPRI_DMA,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_fec_2_gemnoc = {
+ .name = "qnm_fec_2_gemnoc",
+ .id = QDU1000_MASTER_FEC_2_GEMNOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = QDU1000_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 3,
+ .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
+ QDU1000_SLAVE_GEMNOC_MODEM_CNOC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = QDU1000_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = QDU1000_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 4,
+ .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
+ QDU1000_SLAVE_GEMNOC_MODEM_CNOC, QDU1000_SLAVE_MEM_NOC_PCIE_SNOC
+ },
+};
+
+static struct qcom_icc_node qxm_mdsp = {
+ .name = "qxm_mdsp",
+ .id = QDU1000_MASTER_MSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { QDU1000_SLAVE_GEM_NOC_CNOC, QDU1000_SLAVE_LLCC,
+ QDU1000_SLAVE_MEM_NOC_PCIE_SNOC
+ },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = QDU1000_MASTER_LLCC,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qhm_gic = {
+ .name = "qhm_gic",
+ .id = QDU1000_MASTER_GIC_AHB,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = QDU1000_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qhm_qpic = {
+ .name = "qhm_qpic",
+ .id = QDU1000_MASTER_QPIC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = QDU1000_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = QDU1000_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = QDU1000_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_system_noc_cfg = {
+ .name = "qhm_system_noc_cfg",
+ .id = QDU1000_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qnm_aggre_noc = {
+ .name = "qnm_aggre_noc",
+ .id = QDU1000_MASTER_ANOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre_noc_gsi = {
+ .name = "qnm_aggre_noc_gsi",
+ .id = QDU1000_MASTER_ANOC_GSI,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = QDU1000_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 36,
+ .links = { QDU1000_SLAVE_AHB2PHY_SOUTH, QDU1000_SLAVE_AHB2PHY_NORTH,
+ QDU1000_SLAVE_AHB2PHY_EAST, QDU1000_SLAVE_AOSS,
+ QDU1000_SLAVE_CLK_CTL, QDU1000_SLAVE_RBCPR_CX_CFG,
+ QDU1000_SLAVE_RBCPR_MX_CFG, QDU1000_SLAVE_CRYPTO_0_CFG,
+ QDU1000_SLAVE_ECPRI_CFG, QDU1000_SLAVE_IMEM_CFG,
+ QDU1000_SLAVE_IPC_ROUTER_CFG, QDU1000_SLAVE_CNOC_MSS,
+ QDU1000_SLAVE_PCIE_CFG, QDU1000_SLAVE_PDM,
+ QDU1000_SLAVE_PIMEM_CFG, QDU1000_SLAVE_PRNG,
+ QDU1000_SLAVE_QDSS_CFG, QDU1000_SLAVE_QPIC,
+ QDU1000_SLAVE_QSPI_0, QDU1000_SLAVE_QUP_0,
+ QDU1000_SLAVE_QUP_1, QDU1000_SLAVE_SDCC_2,
+ QDU1000_SLAVE_SMBUS_CFG, QDU1000_SLAVE_SNOC_CFG,
+ QDU1000_SLAVE_TCSR, QDU1000_SLAVE_TLMM,
+ QDU1000_SLAVE_TME_CFG, QDU1000_SLAVE_TSC_CFG,
+ QDU1000_SLAVE_USB3_0, QDU1000_SLAVE_VSENSE_CTRL_CFG,
+ QDU1000_SLAVE_DDRSS_CFG, QDU1000_SLAVE_IMEM,
+ QDU1000_SLAVE_PIMEM, QDU1000_SLAVE_ETHERNET_SS,
+ QDU1000_SLAVE_QDSS_STM, QDU1000_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc_modem_slave = {
+ .name = "qnm_gemnoc_modem_slave",
+ .id = QDU1000_MASTER_GEMNOC_MODEM_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_MODEM_OFFLINE },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = QDU1000_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_PCIE_0 },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = QDU1000_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ecpri_gsi = {
+ .name = "qxm_ecpri_gsi",
+ .id = QDU1000_MASTER_ECPRI_GSI,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { QDU1000_SLAVE_ANOC_SNOC_GSI, QDU1000_SLAVE_PCIE_0 },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = QDU1000_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node xm_ecpri_dma = {
+ .name = "xm_ecpri_dma",
+ .id = QDU1000_MASTER_SNOC_ECPRI_DMA,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { QDU1000_SLAVE_ECPRI_GEMNOC, QDU1000_SLAVE_PCIE_0 },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = QDU1000_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node xm_pcie = {
+ .name = "xm_pcie",
+ .id = QDU1000_MASTER_PCIE,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr0 = {
+ .name = "xm_qdss_etr0",
+ .id = QDU1000_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node xm_qdss_etr1 = {
+ .name = "xm_qdss_etr1",
+ .id = QDU1000_MASTER_QDSS_ETR_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node xm_sdc = {
+ .name = "xm_sdc",
+ .id = QDU1000_MASTER_SDCC_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3 = {
+ .name = "xm_usb3",
+ .id = QDU1000_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = QDU1000_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = QDU1000_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = QDU1000_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { QDU1000_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = QDU1000_SLAVE_LLCC,
+ .channels = 8,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { QDU1000_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_modem_slave = {
+ .name = "qns_modem_slave",
+ .id = QDU1000_SLAVE_GEMNOC_MODEM_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { QDU1000_MASTER_GEMNOC_MODEM_CNOC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = QDU1000_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { QDU1000_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = QDU1000_SLAVE_EBI1,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0_south = {
+ .name = "qhs_ahb2phy0_south",
+ .id = QDU1000_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1_north = {
+ .name = "qhs_ahb2phy1_north",
+ .id = QDU1000_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2_east = {
+ .name = "qhs_ahb2phy2_east",
+ .id = QDU1000_SLAVE_AHB2PHY_EAST,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = QDU1000_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = QDU1000_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = QDU1000_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = QDU1000_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto_cfg = {
+ .name = "qhs_crypto_cfg",
+ .id = QDU1000_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ecpri_cfg = {
+ .name = "qhs_ecpri_cfg",
+ .id = QDU1000_SLAVE_ECPRI_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = QDU1000_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = QDU1000_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = QDU1000_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie_cfg = {
+ .name = "qhs_pcie_cfg",
+ .id = QDU1000_SLAVE_PCIE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = QDU1000_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = QDU1000_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = QDU1000_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = QDU1000_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qpic = {
+ .name = "qhs_qpic",
+ .id = QDU1000_SLAVE_QPIC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = QDU1000_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = QDU1000_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = QDU1000_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = QDU1000_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_smbus_cfg = {
+ .name = "qhs_smbus_cfg",
+ .id = QDU1000_SLAVE_SMBUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_system_noc_cfg = {
+ .name = "qhs_system_noc_cfg",
+ .id = QDU1000_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { QDU1000_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = QDU1000_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = QDU1000_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .id = QDU1000_SLAVE_TME_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tsc_cfg = {
+ .name = "qhs_tsc_cfg",
+ .id = QDU1000_SLAVE_TSC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3 = {
+ .name = "qhs_usb3",
+ .id = QDU1000_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = QDU1000_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = QDU1000_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_MASTER_ANOC_SNOC },
+};
+
+static struct qcom_icc_node qns_anoc_snoc_gsi = {
+ .name = "qns_anoc_snoc_gsi",
+ .id = QDU1000_SLAVE_ANOC_SNOC_GSI,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_MASTER_ANOC_GSI },
+};
+
+static struct qcom_icc_node qns_ddrss_cfg = {
+ .name = "qns_ddrss_cfg",
+ .id = QDU1000_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_ecpri_gemnoc = {
+ .name = "qns_ecpri_gemnoc",
+ .id = QDU1000_SLAVE_ECPRI_GEMNOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { QDU1000_MASTER_GEMNOC_ECPRI_DMA },
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = QDU1000_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { QDU1000_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = QDU1000_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { QDU1000_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_modem = {
+ .name = "qns_modem",
+ .id = QDU1000_SLAVE_MODEM_OFFLINE,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_pcie_gemnoc = {
+ .name = "qns_pcie_gemnoc",
+ .id = QDU1000_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .links = { QDU1000_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = QDU1000_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = QDU1000_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_system_noc = {
+ .name = "srvc_system_noc",
+ .id = QDU1000_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_ethernet_ss = {
+ .name = "xs_ethernet_ss",
+ .id = QDU1000_SLAVE_ETHERNET_SS,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie = {
+ .name = "xs_pcie",
+ .id = QDU1000_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = QDU1000_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = QDU1000_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .num_nodes = 44,
+ .nodes = { &qhm_qpic, &qhm_qspi,
+ &qnm_gemnoc_cnoc, &qnm_gemnoc_modem_slave,
+ &qnm_gemnoc_pcie, &xm_sdc,
+ &xm_usb3, &qhs_ahb2phy0_south,
+ &qhs_ahb2phy1_north, &qhs_ahb2phy2_east,
+ &qhs_aoss, &qhs_clk_ctl,
+ &qhs_cpr_cx, &qhs_cpr_mx,
+ &qhs_crypto_cfg, &qhs_ecpri_cfg,
+ &qhs_imem_cfg, &qhs_ipc_router,
+ &qhs_mss_cfg, &qhs_pcie_cfg,
+ &qhs_pdm, &qhs_pimem_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qpic, &qhs_qspi,
+ &qhs_qup0, &qhs_qup1,
+ &qhs_sdc2, &qhs_smbus_cfg,
+ &qhs_system_noc_cfg, &qhs_tcsr,
+ &qhs_tlmm, &qhs_tme_cfg,
+ &qhs_tsc_cfg, &qhs_usb3,
+ &qhs_vsense_ctrl_cfg, &qns_ddrss_cfg,
+ &qns_modem, &qxs_imem,
+ &qxs_pimem, &xs_ethernet_ss,
+ &xs_qdss_stm, &xs_sys_tcu_cfg
+ },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .num_nodes = 2,
+ .nodes = { &qup0_core_slave, &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .num_nodes = 11,
+ .nodes = { &alm_sys_tcu, &chm_apps,
+ &qnm_ecpri_dma, &qnm_fec_2_gemnoc,
+ &qnm_pcie, &qnm_snoc_gc,
+ &qnm_snoc_sf, &qxm_mdsp,
+ &qns_gem_noc_cnoc, &qns_modem_slave,
+ &qns_pcie
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .num_nodes = 6,
+ .nodes = { &qhm_gic, &qxm_pimem,
+ &xm_gic, &xm_qdss_etr0,
+ &xm_qdss_etr1, &qns_gemnoc_gc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 5,
+ .nodes = { &qnm_aggre_noc, &qxm_ecpri_gsi,
+ &xm_ecpri_dma, &qns_anoc_snoc_gsi,
+ &qns_ecpri_gemnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn7 = {
+ .name = "SN7",
+ .num_nodes = 2,
+ .nodes = { &qns_pcie_gemnoc, &xs_pcie },
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_qup0,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+};
+
+static const struct qcom_icc_desc qdu1000_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_GEMNOC_ECPRI_DMA] = &qnm_ecpri_dma,
+ [MASTER_FEC_2_GEMNOC] = &qnm_fec_2_gemnoc,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_MSS_PROC] = &qxm_mdsp,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_GEMNOC_MODEM_CNOC] = &qns_modem_slave,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+};
+
+static const struct qcom_icc_desc qdu1000_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc qdu1000_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_cn0,
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn7,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_GIC_AHB] = &qhm_gic,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QPIC] = &qhm_qpic,
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_SNOC_CFG] = &qhm_system_noc_cfg,
+ [MASTER_ANOC_SNOC] = &qnm_aggre_noc,
+ [MASTER_ANOC_GSI] = &qnm_aggre_noc_gsi,
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEMNOC_MODEM_CNOC] = &qnm_gemnoc_modem_slave,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_ECPRI_GSI] = &qxm_ecpri_gsi,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_SNOC_ECPRI_DMA] = &xm_ecpri_dma,
+ [MASTER_GIC] = &xm_gic,
+ [MASTER_PCIE] = &xm_pcie,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr1,
+ [MASTER_SDCC_1] = &xm_sdc,
+ [MASTER_USB3] = &xm_usb3,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0_south,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1_north,
+ [SLAVE_AHB2PHY_EAST] = &qhs_ahb2phy2_east,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto_cfg,
+ [SLAVE_ECPRI_CFG] = &qhs_ecpri_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_PCIE_CFG] = &qhs_pcie_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QPIC] = &qhs_qpic,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SMBUS_CFG] = &qhs_smbus_cfg,
+ [SLAVE_SNOC_CFG] = &qhs_system_noc_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_TSC_CFG] = &qhs_tsc_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_ANOC_SNOC_GSI] = &qns_anoc_snoc_gsi,
+ [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
+ [SLAVE_ECPRI_GEMNOC] = &qns_ecpri_gemnoc,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_MODEM_OFFLINE] = &qns_modem,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gemnoc,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_SNOC] = &srvc_system_noc,
+ [SLAVE_ETHERNET_SS] = &xs_ethernet_ss,
+ [SLAVE_PCIE_0] = &xs_pcie,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc qdu1000_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = qcom_icc_rpmh_probe(pdev);
+ if (ret)
+ dev_err(&pdev->dev, "failed to register ICC provider\n");
+
+ return ret;
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,qdu1000-clk-virt",
+ .data = &qdu1000_clk_virt
+ },
+ { .compatible = "qcom,qdu1000-gem-noc",
+ .data = &qdu1000_gem_noc
+ },
+ { .compatible = "qcom,qdu1000-mc-virt",
+ .data = &qdu1000_mc_virt
+ },
+ { .compatible = "qcom,qdu1000-system-noc",
+ .data = &qdu1000_system_noc
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-qdu1000",
+ .of_match_table = qnoc_of_match,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("QDU1000 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/qdu1000.h b/drivers/interconnect/qcom/qdu1000.h
new file mode 100644
index 000000000000..e75a6419df23
--- /dev/null
+++ b/drivers/interconnect/qcom/qdu1000.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_QDU1000_H
+#define __DRIVERS_INTERCONNECT_QCOM_QDU1000_H
+
+#define QDU1000_MASTER_SYS_TCU 0
+#define QDU1000_MASTER_APPSS_PROC 1
+#define QDU1000_MASTER_LLCC 2
+#define QDU1000_MASTER_GIC_AHB 3
+#define QDU1000_MASTER_QDSS_BAM 4
+#define QDU1000_MASTER_QPIC 5
+#define QDU1000_MASTER_QSPI_0 6
+#define QDU1000_MASTER_QUP_0 7
+#define QDU1000_MASTER_QUP_1 8
+#define QDU1000_MASTER_SNOC_CFG 9
+#define QDU1000_MASTER_ANOC_SNOC 10
+#define QDU1000_MASTER_ANOC_GSI 11
+#define QDU1000_MASTER_GEMNOC_ECPRI_DMA 12
+#define QDU1000_MASTER_FEC_2_GEMNOC 13
+#define QDU1000_MASTER_GEM_NOC_CNOC 14
+#define QDU1000_MASTER_GEMNOC_MODEM_CNOC 15
+#define QDU1000_MASTER_GEM_NOC_PCIE_SNOC 16
+#define QDU1000_MASTER_ANOC_PCIE_GEM_NOC 17
+#define QDU1000_MASTER_SNOC_GC_MEM_NOC 18
+#define QDU1000_MASTER_SNOC_SF_MEM_NOC 19
+#define QDU1000_MASTER_QUP_CORE_0 20
+#define QDU1000_MASTER_QUP_CORE_1 21
+#define QDU1000_MASTER_CRYPTO 22
+#define QDU1000_MASTER_ECPRI_GSI 23
+#define QDU1000_MASTER_MSS_PROC 24
+#define QDU1000_MASTER_PIMEM 25
+#define QDU1000_MASTER_SNOC_ECPRI_DMA 26
+#define QDU1000_MASTER_GIC 27
+#define QDU1000_MASTER_PCIE 28
+#define QDU1000_MASTER_QDSS_ETR 29
+#define QDU1000_MASTER_QDSS_ETR_1 30
+#define QDU1000_MASTER_SDCC_1 31
+#define QDU1000_MASTER_USB3 32
+#define QDU1000_SLAVE_EBI1 512
+#define QDU1000_SLAVE_AHB2PHY_SOUTH 513
+#define QDU1000_SLAVE_AHB2PHY_NORTH 514
+#define QDU1000_SLAVE_AHB2PHY_EAST 515
+#define QDU1000_SLAVE_AOSS 516
+#define QDU1000_SLAVE_CLK_CTL 517
+#define QDU1000_SLAVE_RBCPR_CX_CFG 518
+#define QDU1000_SLAVE_RBCPR_MX_CFG 519
+#define QDU1000_SLAVE_CRYPTO_0_CFG 520
+#define QDU1000_SLAVE_ECPRI_CFG 521
+#define QDU1000_SLAVE_IMEM_CFG 522
+#define QDU1000_SLAVE_IPC_ROUTER_CFG 523
+#define QDU1000_SLAVE_CNOC_MSS 524
+#define QDU1000_SLAVE_PCIE_CFG 525
+#define QDU1000_SLAVE_PDM 526
+#define QDU1000_SLAVE_PIMEM_CFG 527
+#define QDU1000_SLAVE_PRNG 528
+#define QDU1000_SLAVE_QDSS_CFG 529
+#define QDU1000_SLAVE_QPIC 530
+#define QDU1000_SLAVE_QSPI_0 531
+#define QDU1000_SLAVE_QUP_0 532
+#define QDU1000_SLAVE_QUP_1 533
+#define QDU1000_SLAVE_SDCC_2 534
+#define QDU1000_SLAVE_SMBUS_CFG 535
+#define QDU1000_SLAVE_SNOC_CFG 536
+#define QDU1000_SLAVE_TCSR 537
+#define QDU1000_SLAVE_TLMM 538
+#define QDU1000_SLAVE_TME_CFG 539
+#define QDU1000_SLAVE_TSC_CFG 540
+#define QDU1000_SLAVE_USB3_0 541
+#define QDU1000_SLAVE_VSENSE_CTRL_CFG 542
+#define QDU1000_SLAVE_A1NOC_SNOC 543
+#define QDU1000_SLAVE_ANOC_SNOC_GSI 544
+#define QDU1000_SLAVE_DDRSS_CFG 545
+#define QDU1000_SLAVE_ECPRI_GEMNOC 546
+#define QDU1000_SLAVE_GEM_NOC_CNOC 547
+#define QDU1000_SLAVE_SNOC_GEM_NOC_GC 548
+#define QDU1000_SLAVE_SNOC_GEM_NOC_SF 549
+#define QDU1000_SLAVE_LLCC 550
+#define QDU1000_SLAVE_MODEM_OFFLINE 551
+#define QDU1000_SLAVE_GEMNOC_MODEM_CNOC 552
+#define QDU1000_SLAVE_MEM_NOC_PCIE_SNOC 553
+#define QDU1000_SLAVE_ANOC_PCIE_GEM_NOC 554
+#define QDU1000_SLAVE_QUP_CORE_0 555
+#define QDU1000_SLAVE_QUP_CORE_1 556
+#define QDU1000_SLAVE_IMEM 557
+#define QDU1000_SLAVE_PIMEM 558
+#define QDU1000_SLAVE_SERVICE_SNOC 559
+#define QDU1000_SLAVE_ETHERNET_SS 560
+#define QDU1000_SLAVE_PCIE_0 561
+#define QDU1000_SLAVE_QDSS_STM 562
+#define QDU1000_SLAVE_TCU 563
+
+#endif
diff --git a/drivers/interconnect/qcom/sa8775p.c b/drivers/interconnect/qcom/sa8775p.c
new file mode 100644
index 000000000000..da21cc31a580
--- /dev/null
+++ b/drivers/interconnect/qcom/sa8775p.c
@@ -0,0 +1,2541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sa8775p-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+
+#define SA8775P_MASTER_GPU_TCU 0
+#define SA8775P_MASTER_PCIE_TCU 1
+#define SA8775P_MASTER_SYS_TCU 2
+#define SA8775P_MASTER_APPSS_PROC 3
+#define SA8775P_MASTER_LLCC 4
+#define SA8775P_MASTER_CNOC_LPASS_AG_NOC 5
+#define SA8775P_MASTER_GIC_AHB 6
+#define SA8775P_MASTER_CDSP_NOC_CFG 7
+#define SA8775P_MASTER_CDSPB_NOC_CFG 8
+#define SA8775P_MASTER_QDSS_BAM 9
+#define SA8775P_MASTER_QUP_0 10
+#define SA8775P_MASTER_QUP_1 11
+#define SA8775P_MASTER_QUP_2 12
+#define SA8775P_MASTER_A1NOC_SNOC 13
+#define SA8775P_MASTER_A2NOC_SNOC 14
+#define SA8775P_MASTER_CAMNOC_HF 15
+#define SA8775P_MASTER_CAMNOC_ICP 16
+#define SA8775P_MASTER_CAMNOC_SF 17
+#define SA8775P_MASTER_COMPUTE_NOC 18
+#define SA8775P_MASTER_COMPUTE_NOC_1 19
+#define SA8775P_MASTER_CNOC_A2NOC 20
+#define SA8775P_MASTER_CNOC_DC_NOC 21
+#define SA8775P_MASTER_GEM_NOC_CFG 22
+#define SA8775P_MASTER_GEM_NOC_CNOC 23
+#define SA8775P_MASTER_GEM_NOC_PCIE_SNOC 24
+#define SA8775P_MASTER_GPDSP_SAIL 25
+#define SA8775P_MASTER_GFX3D 26
+#define SA8775P_MASTER_LPASS_ANOC 27
+#define SA8775P_MASTER_MDP0 28
+#define SA8775P_MASTER_MDP1 29
+#define SA8775P_MASTER_MDP_CORE1_0 30
+#define SA8775P_MASTER_MDP_CORE1_1 31
+#define SA8775P_MASTER_MNOC_HF_MEM_NOC 32
+#define SA8775P_MASTER_CNOC_MNOC_HF_CFG 33
+#define SA8775P_MASTER_MNOC_SF_MEM_NOC 34
+#define SA8775P_MASTER_CNOC_MNOC_SF_CFG 35
+#define SA8775P_MASTER_ANOC_PCIE_GEM_NOC 36
+#define SA8775P_MASTER_SNOC_CFG 37
+#define SA8775P_MASTER_SNOC_GC_MEM_NOC 38
+#define SA8775P_MASTER_SNOC_SF_MEM_NOC 39
+#define SA8775P_MASTER_VIDEO_P0 40
+#define SA8775P_MASTER_VIDEO_P1 41
+#define SA8775P_MASTER_VIDEO_PROC 42
+#define SA8775P_MASTER_VIDEO_V_PROC 43
+#define SA8775P_MASTER_QUP_CORE_0 44
+#define SA8775P_MASTER_QUP_CORE_1 45
+#define SA8775P_MASTER_QUP_CORE_2 46
+#define SA8775P_MASTER_QUP_CORE_3 47
+#define SA8775P_MASTER_CRYPTO_CORE0 48
+#define SA8775P_MASTER_CRYPTO_CORE1 49
+#define SA8775P_MASTER_DSP0 50
+#define SA8775P_MASTER_DSP1 51
+#define SA8775P_MASTER_IPA 52
+#define SA8775P_MASTER_LPASS_PROC 53
+#define SA8775P_MASTER_CDSP_PROC 54
+#define SA8775P_MASTER_CDSP_PROC_B 55
+#define SA8775P_MASTER_PIMEM 56
+#define SA8775P_MASTER_QUP_3 57
+#define SA8775P_MASTER_EMAC 58
+#define SA8775P_MASTER_EMAC_1 59
+#define SA8775P_MASTER_GIC 60
+#define SA8775P_MASTER_PCIE_0 61
+#define SA8775P_MASTER_PCIE_1 62
+#define SA8775P_MASTER_QDSS_ETR_0 63
+#define SA8775P_MASTER_QDSS_ETR_1 64
+#define SA8775P_MASTER_SDC 65
+#define SA8775P_MASTER_UFS_CARD 66
+#define SA8775P_MASTER_UFS_MEM 67
+#define SA8775P_MASTER_USB2 68
+#define SA8775P_MASTER_USB3_0 69
+#define SA8775P_MASTER_USB3_1 70
+#define SA8775P_SLAVE_EBI1 512
+#define SA8775P_SLAVE_AHB2PHY_0 513
+#define SA8775P_SLAVE_AHB2PHY_1 514
+#define SA8775P_SLAVE_AHB2PHY_2 515
+#define SA8775P_SLAVE_AHB2PHY_3 516
+#define SA8775P_SLAVE_ANOC_THROTTLE_CFG 517
+#define SA8775P_SLAVE_AOSS 518
+#define SA8775P_SLAVE_APPSS 519
+#define SA8775P_SLAVE_BOOT_ROM 520
+#define SA8775P_SLAVE_CAMERA_CFG 521
+#define SA8775P_SLAVE_CAMERA_NRT_THROTTLE_CFG 522
+#define SA8775P_SLAVE_CAMERA_RT_THROTTLE_CFG 523
+#define SA8775P_SLAVE_CLK_CTL 524
+#define SA8775P_SLAVE_CDSP_CFG 525
+#define SA8775P_SLAVE_CDSP1_CFG 526
+#define SA8775P_SLAVE_RBCPR_CX_CFG 527
+#define SA8775P_SLAVE_RBCPR_MMCX_CFG 528
+#define SA8775P_SLAVE_RBCPR_MX_CFG 529
+#define SA8775P_SLAVE_CPR_NSPCX 530
+#define SA8775P_SLAVE_CRYPTO_0_CFG 531
+#define SA8775P_SLAVE_CX_RDPM 532
+#define SA8775P_SLAVE_DISPLAY_CFG 533
+#define SA8775P_SLAVE_DISPLAY_RT_THROTTLE_CFG 534
+#define SA8775P_SLAVE_DISPLAY1_CFG 535
+#define SA8775P_SLAVE_DISPLAY1_RT_THROTTLE_CFG 536
+#define SA8775P_SLAVE_EMAC_CFG 537
+#define SA8775P_SLAVE_EMAC1_CFG 538
+#define SA8775P_SLAVE_GP_DSP0_CFG 539
+#define SA8775P_SLAVE_GP_DSP1_CFG 540
+#define SA8775P_SLAVE_GPDSP0_THROTTLE_CFG 541
+#define SA8775P_SLAVE_GPDSP1_THROTTLE_CFG 542
+#define SA8775P_SLAVE_GPU_TCU_THROTTLE_CFG 543
+#define SA8775P_SLAVE_GFX3D_CFG 544
+#define SA8775P_SLAVE_HWKM 545
+#define SA8775P_SLAVE_IMEM_CFG 546
+#define SA8775P_SLAVE_IPA_CFG 547
+#define SA8775P_SLAVE_IPC_ROUTER_CFG 548
+#define SA8775P_SLAVE_LLCC_CFG 549
+#define SA8775P_SLAVE_LPASS 550
+#define SA8775P_SLAVE_LPASS_CORE_CFG 551
+#define SA8775P_SLAVE_LPASS_LPI_CFG 552
+#define SA8775P_SLAVE_LPASS_MPU_CFG 553
+#define SA8775P_SLAVE_LPASS_THROTTLE_CFG 554
+#define SA8775P_SLAVE_LPASS_TOP_CFG 555
+#define SA8775P_SLAVE_MX_RDPM 556
+#define SA8775P_SLAVE_MXC_RDPM 557
+#define SA8775P_SLAVE_PCIE_0_CFG 558
+#define SA8775P_SLAVE_PCIE_1_CFG 559
+#define SA8775P_SLAVE_PCIE_RSC_CFG 560
+#define SA8775P_SLAVE_PCIE_TCU_THROTTLE_CFG 561
+#define SA8775P_SLAVE_PCIE_THROTTLE_CFG 562
+#define SA8775P_SLAVE_PDM 563
+#define SA8775P_SLAVE_PIMEM_CFG 564
+#define SA8775P_SLAVE_PKA_WRAPPER_CFG 565
+#define SA8775P_SLAVE_QDSS_CFG 566
+#define SA8775P_SLAVE_QM_CFG 567
+#define SA8775P_SLAVE_QM_MPU_CFG 568
+#define SA8775P_SLAVE_QUP_0 569
+#define SA8775P_SLAVE_QUP_1 570
+#define SA8775P_SLAVE_QUP_2 571
+#define SA8775P_SLAVE_QUP_3 572
+#define SA8775P_SLAVE_SAIL_THROTTLE_CFG 573
+#define SA8775P_SLAVE_SDC1 574
+#define SA8775P_SLAVE_SECURITY 575
+#define SA8775P_SLAVE_SNOC_THROTTLE_CFG 576
+#define SA8775P_SLAVE_TCSR 577
+#define SA8775P_SLAVE_TLMM 578
+#define SA8775P_SLAVE_TSC_CFG 579
+#define SA8775P_SLAVE_UFS_CARD_CFG 580
+#define SA8775P_SLAVE_UFS_MEM_CFG 581
+#define SA8775P_SLAVE_USB2 582
+#define SA8775P_SLAVE_USB3_0 583
+#define SA8775P_SLAVE_USB3_1 584
+#define SA8775P_SLAVE_VENUS_CFG 585
+#define SA8775P_SLAVE_VENUS_CVP_THROTTLE_CFG 586
+#define SA8775P_SLAVE_VENUS_V_CPU_THROTTLE_CFG 587
+#define SA8775P_SLAVE_VENUS_VCODEC_THROTTLE_CFG 588
+#define SA8775P_SLAVE_A1NOC_SNOC 589
+#define SA8775P_SLAVE_A2NOC_SNOC 590
+#define SA8775P_SLAVE_DDRSS_CFG 591
+#define SA8775P_SLAVE_GEM_NOC_CNOC 592
+#define SA8775P_SLAVE_GEM_NOC_CFG 593
+#define SA8775P_SLAVE_SNOC_GEM_NOC_GC 594
+#define SA8775P_SLAVE_SNOC_GEM_NOC_SF 595
+#define SA8775P_SLAVE_GP_DSP_SAIL_NOC 596
+#define SA8775P_SLAVE_GPDSP_NOC_CFG 597
+#define SA8775P_SLAVE_HCP_A 598
+#define SA8775P_SLAVE_LLCC 599
+#define SA8775P_SLAVE_MNOC_HF_MEM_NOC 600
+#define SA8775P_SLAVE_MNOC_SF_MEM_NOC 601
+#define SA8775P_SLAVE_CNOC_MNOC_HF_CFG 602
+#define SA8775P_SLAVE_CNOC_MNOC_SF_CFG 603
+#define SA8775P_SLAVE_CDSP_MEM_NOC 604
+#define SA8775P_SLAVE_CDSPB_MEM_NOC 605
+#define SA8775P_SLAVE_HCP_B 606
+#define SA8775P_SLAVE_GEM_NOC_PCIE_CNOC 607
+#define SA8775P_SLAVE_PCIE_ANOC_CFG 608
+#define SA8775P_SLAVE_ANOC_PCIE_GEM_NOC 609
+#define SA8775P_SLAVE_SNOC_CFG 610
+#define SA8775P_SLAVE_LPASS_SNOC 611
+#define SA8775P_SLAVE_QUP_CORE_0 612
+#define SA8775P_SLAVE_QUP_CORE_1 613
+#define SA8775P_SLAVE_QUP_CORE_2 614
+#define SA8775P_SLAVE_QUP_CORE_3 615
+#define SA8775P_SLAVE_BOOT_IMEM 616
+#define SA8775P_SLAVE_IMEM 617
+#define SA8775P_SLAVE_PIMEM 618
+#define SA8775P_SLAVE_SERVICE_NSP_NOC 619
+#define SA8775P_SLAVE_SERVICE_NSPB_NOC 620
+#define SA8775P_SLAVE_SERVICE_GEM_NOC_1 621
+#define SA8775P_SLAVE_SERVICE_MNOC_HF 622
+#define SA8775P_SLAVE_SERVICE_MNOC_SF 623
+#define SA8775P_SLAVE_SERVICES_LPASS_AML_NOC 624
+#define SA8775P_SLAVE_SERVICE_LPASS_AG_NOC 625
+#define SA8775P_SLAVE_SERVICE_GEM_NOC_2 626
+#define SA8775P_SLAVE_SERVICE_SNOC 627
+#define SA8775P_SLAVE_SERVICE_GEM_NOC 628
+#define SA8775P_SLAVE_SERVICE_GEM_NOC2 629
+#define SA8775P_SLAVE_PCIE_0 630
+#define SA8775P_SLAVE_PCIE_1 631
+#define SA8775P_SLAVE_QDSS_STM 632
+#define SA8775P_SLAVE_TCU 633
+
+static struct qcom_icc_node qxm_qup3 = {
+ .name = "qxm_qup3",
+ .id = SA8775P_MASTER_QUP_3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_emac_0 = {
+ .name = "xm_emac_0",
+ .id = SA8775P_MASTER_EMAC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_emac_1 = {
+ .name = "xm_emac_1",
+ .id = SA8775P_MASTER_EMAC_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc1 = {
+ .name = "xm_sdc1",
+ .id = SA8775P_MASTER_SDC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SA8775P_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb2_2 = {
+ .name = "xm_usb2_2",
+ .id = SA8775P_MASTER_USB2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SA8775P_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+ .name = "xm_usb3_1",
+ .id = SA8775P_MASTER_USB3_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SA8775P_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = SA8775P_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SA8775P_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SA8775P_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_cnoc_datapath = {
+ .name = "qnm_cnoc_datapath",
+ .id = SA8775P_MASTER_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto_0 = {
+ .name = "qxm_crypto_0",
+ .id = SA8775P_MASTER_CRYPTO_CORE0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto_1 = {
+ .name = "qxm_crypto_1",
+ .id = SA8775P_MASTER_CRYPTO_CORE1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SA8775P_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .id = SA8775P_MASTER_QDSS_ETR_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .id = SA8775P_MASTER_QDSS_ETR_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_card = {
+ .name = "xm_ufs_card",
+ .id = SA8775P_MASTER_UFS_CARD,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SA8775P_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SA8775P_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .id = SA8775P_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qup3_core_master = {
+ .name = "qup3_core_master",
+ .id = SA8775P_MASTER_QUP_CORE_3,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_QUP_CORE_3 },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SA8775P_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 82,
+ .links = { SA8775P_SLAVE_AHB2PHY_0,
+ SA8775P_SLAVE_AHB2PHY_1,
+ SA8775P_SLAVE_AHB2PHY_2,
+ SA8775P_SLAVE_AHB2PHY_3,
+ SA8775P_SLAVE_ANOC_THROTTLE_CFG,
+ SA8775P_SLAVE_AOSS,
+ SA8775P_SLAVE_APPSS,
+ SA8775P_SLAVE_BOOT_ROM,
+ SA8775P_SLAVE_CAMERA_CFG,
+ SA8775P_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ SA8775P_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ SA8775P_SLAVE_CLK_CTL,
+ SA8775P_SLAVE_CDSP_CFG,
+ SA8775P_SLAVE_CDSP1_CFG,
+ SA8775P_SLAVE_RBCPR_CX_CFG,
+ SA8775P_SLAVE_RBCPR_MMCX_CFG,
+ SA8775P_SLAVE_RBCPR_MX_CFG,
+ SA8775P_SLAVE_CPR_NSPCX,
+ SA8775P_SLAVE_CRYPTO_0_CFG,
+ SA8775P_SLAVE_CX_RDPM,
+ SA8775P_SLAVE_DISPLAY_CFG,
+ SA8775P_SLAVE_DISPLAY_RT_THROTTLE_CFG,
+ SA8775P_SLAVE_DISPLAY1_CFG,
+ SA8775P_SLAVE_DISPLAY1_RT_THROTTLE_CFG,
+ SA8775P_SLAVE_EMAC_CFG,
+ SA8775P_SLAVE_EMAC1_CFG,
+ SA8775P_SLAVE_GP_DSP0_CFG,
+ SA8775P_SLAVE_GP_DSP1_CFG,
+ SA8775P_SLAVE_GPDSP0_THROTTLE_CFG,
+ SA8775P_SLAVE_GPDSP1_THROTTLE_CFG,
+ SA8775P_SLAVE_GPU_TCU_THROTTLE_CFG,
+ SA8775P_SLAVE_GFX3D_CFG,
+ SA8775P_SLAVE_HWKM,
+ SA8775P_SLAVE_IMEM_CFG,
+ SA8775P_SLAVE_IPA_CFG,
+ SA8775P_SLAVE_IPC_ROUTER_CFG,
+ SA8775P_SLAVE_LPASS,
+ SA8775P_SLAVE_LPASS_THROTTLE_CFG,
+ SA8775P_SLAVE_MX_RDPM,
+ SA8775P_SLAVE_MXC_RDPM,
+ SA8775P_SLAVE_PCIE_0_CFG,
+ SA8775P_SLAVE_PCIE_1_CFG,
+ SA8775P_SLAVE_PCIE_RSC_CFG,
+ SA8775P_SLAVE_PCIE_TCU_THROTTLE_CFG,
+ SA8775P_SLAVE_PCIE_THROTTLE_CFG,
+ SA8775P_SLAVE_PDM,
+ SA8775P_SLAVE_PIMEM_CFG,
+ SA8775P_SLAVE_PKA_WRAPPER_CFG,
+ SA8775P_SLAVE_QDSS_CFG,
+ SA8775P_SLAVE_QM_CFG,
+ SA8775P_SLAVE_QM_MPU_CFG,
+ SA8775P_SLAVE_QUP_0,
+ SA8775P_SLAVE_QUP_1,
+ SA8775P_SLAVE_QUP_2,
+ SA8775P_SLAVE_QUP_3,
+ SA8775P_SLAVE_SAIL_THROTTLE_CFG,
+ SA8775P_SLAVE_SDC1,
+ SA8775P_SLAVE_SECURITY,
+ SA8775P_SLAVE_SNOC_THROTTLE_CFG,
+ SA8775P_SLAVE_TCSR,
+ SA8775P_SLAVE_TLMM,
+ SA8775P_SLAVE_TSC_CFG,
+ SA8775P_SLAVE_UFS_CARD_CFG,
+ SA8775P_SLAVE_UFS_MEM_CFG,
+ SA8775P_SLAVE_USB2,
+ SA8775P_SLAVE_USB3_0,
+ SA8775P_SLAVE_USB3_1,
+ SA8775P_SLAVE_VENUS_CFG,
+ SA8775P_SLAVE_VENUS_CVP_THROTTLE_CFG,
+ SA8775P_SLAVE_VENUS_V_CPU_THROTTLE_CFG,
+ SA8775P_SLAVE_VENUS_VCODEC_THROTTLE_CFG,
+ SA8775P_SLAVE_DDRSS_CFG,
+ SA8775P_SLAVE_GPDSP_NOC_CFG,
+ SA8775P_SLAVE_CNOC_MNOC_HF_CFG,
+ SA8775P_SLAVE_CNOC_MNOC_SF_CFG,
+ SA8775P_SLAVE_PCIE_ANOC_CFG,
+ SA8775P_SLAVE_SNOC_CFG,
+ SA8775P_SLAVE_BOOT_IMEM,
+ SA8775P_SLAVE_IMEM,
+ SA8775P_SLAVE_PIMEM,
+ SA8775P_SLAVE_QDSS_STM,
+ SA8775P_SLAVE_TCU
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SA8775P_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_PCIE_0,
+ SA8775P_SLAVE_PCIE_1
+ },
+};
+
+static struct qcom_icc_node qnm_cnoc_dc_noc = {
+ .name = "qnm_cnoc_dc_noc",
+ .id = SA8775P_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_LLCC_CFG,
+ SA8775P_SLAVE_GEM_NOC_CFG
+ },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SA8775P_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
+ SA8775P_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node alm_pcie_tcu = {
+ .name = "alm_pcie_tcu",
+ .id = SA8775P_MASTER_PCIE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
+ SA8775P_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SA8775P_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
+ SA8775P_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SA8775P_MASTER_APPSS_PROC,
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
+ SA8775P_SLAVE_LLCC,
+ SA8775P_SLAVE_GEM_NOC_PCIE_CNOC
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc0 = {
+ .name = "qnm_cmpnoc0",
+ .id = SA8775P_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
+ SA8775P_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc1 = {
+ .name = "qnm_cmpnoc1",
+ .id = SA8775P_MASTER_COMPUTE_NOC_1,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
+ SA8775P_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cfg = {
+ .name = "qnm_gemnoc_cfg",
+ .id = SA8775P_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 4,
+ .links = { SA8775P_SLAVE_SERVICE_GEM_NOC_1,
+ SA8775P_SLAVE_SERVICE_GEM_NOC_2,
+ SA8775P_SLAVE_SERVICE_GEM_NOC,
+ SA8775P_SLAVE_SERVICE_GEM_NOC2
+ },
+};
+
+static struct qcom_icc_node qnm_gpdsp_sail = {
+ .name = "qnm_gpdsp_sail",
+ .id = SA8775P_MASTER_GPDSP_SAIL,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
+ SA8775P_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SA8775P_MASTER_GFX3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
+ SA8775P_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SA8775P_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_LLCC,
+ SA8775P_SLAVE_GEM_NOC_PCIE_CNOC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SA8775P_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
+ SA8775P_SLAVE_LLCC,
+ SA8775P_SLAVE_GEM_NOC_PCIE_CNOC
+ },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SA8775P_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
+ SA8775P_SLAVE_LLCC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SA8775P_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SA8775P_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SA8775P_SLAVE_GEM_NOC_CNOC,
+ SA8775P_SLAVE_LLCC,
+ SA8775P_SLAVE_GEM_NOC_PCIE_CNOC },
+};
+
+static struct qcom_icc_node qxm_dsp0 = {
+ .name = "qxm_dsp0",
+ .id = SA8775P_MASTER_DSP0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_GP_DSP_SAIL_NOC },
+};
+
+static struct qcom_icc_node qxm_dsp1 = {
+ .name = "qxm_dsp1",
+ .id = SA8775P_MASTER_DSP1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_GP_DSP_SAIL_NOC },
+};
+
+static struct qcom_icc_node qhm_config_noc = {
+ .name = "qhm_config_noc",
+ .id = SA8775P_MASTER_CNOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 6,
+ .links = { SA8775P_SLAVE_LPASS_CORE_CFG,
+ SA8775P_SLAVE_LPASS_LPI_CFG,
+ SA8775P_SLAVE_LPASS_MPU_CFG,
+ SA8775P_SLAVE_LPASS_TOP_CFG,
+ SA8775P_SLAVE_SERVICES_LPASS_AML_NOC,
+ SA8775P_SLAVE_SERVICE_LPASS_AG_NOC
+ },
+};
+
+static struct qcom_icc_node qxm_lpass_dsp = {
+ .name = "qxm_lpass_dsp",
+ .id = SA8775P_MASTER_LPASS_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 4,
+ .links = { SA8775P_SLAVE_LPASS_TOP_CFG,
+ SA8775P_SLAVE_LPASS_SNOC,
+ SA8775P_SLAVE_SERVICES_LPASS_AML_NOC,
+ SA8775P_SLAVE_SERVICE_LPASS_AG_NOC
+ },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SA8775P_MASTER_LLCC,
+ .channels = 8,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = SA8775P_MASTER_CAMNOC_HF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+ .name = "qnm_camnoc_icp",
+ .id = SA8775P_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .id = SA8775P_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp0_0 = {
+ .name = "qnm_mdp0_0",
+ .id = SA8775P_MASTER_MDP0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp0_1 = {
+ .name = "qnm_mdp0_1",
+ .id = SA8775P_MASTER_MDP1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp1_0 = {
+ .name = "qnm_mdp1_0",
+ .id = SA8775P_MASTER_MDP_CORE1_0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp1_1 = {
+ .name = "qnm_mdp1_1",
+ .id = SA8775P_MASTER_MDP_CORE1_1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf_cfg = {
+ .name = "qnm_mnoc_hf_cfg",
+ .id = SA8775P_MASTER_CNOC_MNOC_HF_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_SERVICE_MNOC_HF },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf_cfg = {
+ .name = "qnm_mnoc_sf_cfg",
+ .id = SA8775P_MASTER_CNOC_MNOC_SF_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_SERVICE_MNOC_SF },
+};
+
+static struct qcom_icc_node qnm_video0 = {
+ .name = "qnm_video0",
+ .id = SA8775P_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video1 = {
+ .name = "qnm_video1",
+ .id = SA8775P_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+ .name = "qnm_video_cvp",
+ .id = SA8775P_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .id = SA8775P_MASTER_VIDEO_V_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_nsp_noc_config = {
+ .name = "qhm_nsp_noc_config",
+ .id = SA8775P_MASTER_CDSP_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_SERVICE_NSP_NOC },
+};
+
+static struct qcom_icc_node qxm_nsp = {
+ .name = "qxm_nsp",
+ .id = SA8775P_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_HCP_A, SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_nspb_noc_config = {
+ .name = "qhm_nspb_noc_config",
+ .id = SA8775P_MASTER_CDSPB_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_SERVICE_NSPB_NOC },
+};
+
+static struct qcom_icc_node qxm_nspb = {
+ .name = "qxm_nspb",
+ .id = SA8775P_MASTER_CDSP_PROC_B,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SA8775P_SLAVE_HCP_B, SLAVE_CDSPB_MEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SA8775P_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SA8775P_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qhm_gic = {
+ .name = "qhm_gic",
+ .id = SA8775P_MASTER_GIC_AHB,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SA8775P_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SA8775P_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_lpass_noc = {
+ .name = "qnm_lpass_noc",
+ .id = SA8775P_MASTER_LPASS_ANOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_snoc_cfg = {
+ .name = "qnm_snoc_cfg",
+ .id = SA8775P_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SA8775P_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SA8775P_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SA8775P_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SA8775P_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SA8775P_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SA8775P_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .id = SA8775P_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup3_core_slave = {
+ .name = "qup3_core_slave",
+ .id = SA8775P_SLAVE_QUP_CORE_3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SA8775P_SLAVE_AHB2PHY_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SA8775P_SLAVE_AHB2PHY_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2 = {
+ .name = "qhs_ahb2phy2",
+ .id = SA8775P_SLAVE_AHB2PHY_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy3 = {
+ .name = "qhs_ahb2phy3",
+ .id = SA8775P_SLAVE_AHB2PHY_3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_anoc_throttle_cfg = {
+ .name = "qhs_anoc_throttle_cfg",
+ .id = SA8775P_SLAVE_ANOC_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SA8775P_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SA8775P_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_boot_rom = {
+ .name = "qhs_boot_rom",
+ .id = SA8775P_SLAVE_BOOT_ROM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SA8775P_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_nrt_throttle_cfg = {
+ .name = "qhs_camera_nrt_throttle_cfg",
+ .id = SA8775P_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
+ .name = "qhs_camera_rt_throttle_cfg",
+ .id = SA8775P_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SA8775P_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_compute0_cfg = {
+ .name = "qhs_compute0_cfg",
+ .id = SA8775P_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_CDSP_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_compute1_cfg = {
+ .name = "qhs_compute1_cfg",
+ .id = SA8775P_SLAVE_CDSP1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_CDSPB_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SA8775P_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+ .name = "qhs_cpr_mmcx",
+ .id = SA8775P_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SA8775P_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_nspcx = {
+ .name = "qhs_cpr_nspcx",
+ .id = SA8775P_SLAVE_CPR_NSPCX,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SA8775P_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+ .name = "qhs_cx_rdpm",
+ .id = SA8775P_SLAVE_CX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display0_cfg = {
+ .name = "qhs_display0_cfg",
+ .id = SA8775P_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display0_rt_throttle_cfg = {
+ .name = "qhs_display0_rt_throttle_cfg",
+ .id = SA8775P_SLAVE_DISPLAY_RT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display1_cfg = {
+ .name = "qhs_display1_cfg",
+ .id = SA8775P_SLAVE_DISPLAY1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display1_rt_throttle_cfg = {
+ .name = "qhs_display1_rt_throttle_cfg",
+ .id = SA8775P_SLAVE_DISPLAY1_RT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emac0_cfg = {
+ .name = "qhs_emac0_cfg",
+ .id = SA8775P_SLAVE_EMAC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emac1_cfg = {
+ .name = "qhs_emac1_cfg",
+ .id = SA8775P_SLAVE_EMAC1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gp_dsp0_cfg = {
+ .name = "qhs_gp_dsp0_cfg",
+ .id = SA8775P_SLAVE_GP_DSP0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gp_dsp1_cfg = {
+ .name = "qhs_gp_dsp1_cfg",
+ .id = SA8775P_SLAVE_GP_DSP1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpdsp0_throttle_cfg = {
+ .name = "qhs_gpdsp0_throttle_cfg",
+ .id = SA8775P_SLAVE_GPDSP0_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpdsp1_throttle_cfg = {
+ .name = "qhs_gpdsp1_throttle_cfg",
+ .id = SA8775P_SLAVE_GPDSP1_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpu_tcu_throttle_cfg = {
+ .name = "qhs_gpu_tcu_throttle_cfg",
+ .id = SA8775P_SLAVE_GPU_TCU_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SA8775P_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_hwkm = {
+ .name = "qhs_hwkm",
+ .id = SA8775P_SLAVE_HWKM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SA8775P_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SA8775P_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SA8775P_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_cfg = {
+ .name = "qhs_lpass_cfg",
+ .id = SA8775P_SLAVE_LPASS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_CNOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qhs_lpass_throttle_cfg = {
+ .name = "qhs_lpass_throttle_cfg",
+ .id = SA8775P_SLAVE_LPASS_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mx_rdpm = {
+ .name = "qhs_mx_rdpm",
+ .id = SA8775P_SLAVE_MX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mxc_rdpm = {
+ .name = "qhs_mxc_rdpm",
+ .id = SA8775P_SLAVE_MXC_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SA8775P_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SA8775P_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_rsc_cfg = {
+ .name = "qhs_pcie_rsc_cfg",
+ .id = SA8775P_SLAVE_PCIE_RSC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_tcu_throttle_cfg = {
+ .name = "qhs_pcie_tcu_throttle_cfg",
+ .id = SA8775P_SLAVE_PCIE_TCU_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_throttle_cfg = {
+ .name = "qhs_pcie_throttle_cfg",
+ .id = SA8775P_SLAVE_PCIE_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SA8775P_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SA8775P_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pke_wrapper_cfg = {
+ .name = "qhs_pke_wrapper_cfg",
+ .id = SA8775P_SLAVE_PKA_WRAPPER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SA8775P_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qm_cfg = {
+ .name = "qhs_qm_cfg",
+ .id = SA8775P_SLAVE_QM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qm_mpu_cfg = {
+ .name = "qhs_qm_mpu_cfg",
+ .id = SA8775P_SLAVE_QM_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SA8775P_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SA8775P_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = SA8775P_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup3 = {
+ .name = "qhs_qup3",
+ .id = SA8775P_SLAVE_QUP_3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sail_throttle_cfg = {
+ .name = "qhs_sail_throttle_cfg",
+ .id = SA8775P_SLAVE_SAIL_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc1 = {
+ .name = "qhs_sdc1",
+ .id = SA8775P_SLAVE_SDC1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_security = {
+ .name = "qhs_security",
+ .id = SA8775P_SLAVE_SECURITY,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_snoc_throttle_cfg = {
+ .name = "qhs_snoc_throttle_cfg",
+ .id = SA8775P_SLAVE_SNOC_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SA8775P_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SA8775P_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tsc_cfg = {
+ .name = "qhs_tsc_cfg",
+ .id = SA8775P_SLAVE_TSC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_card_cfg = {
+ .name = "qhs_ufs_card_cfg",
+ .id = SA8775P_SLAVE_UFS_CARD_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SA8775P_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb2_0 = {
+ .name = "qhs_usb2_0",
+ .id = SA8775P_SLAVE_USB2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SA8775P_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_1 = {
+ .name = "qhs_usb3_1",
+ .id = SA8775P_SLAVE_USB3_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SA8775P_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cvp_throttle_cfg = {
+ .name = "qhs_venus_cvp_throttle_cfg",
+ .id = SA8775P_SLAVE_VENUS_CVP_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_v_cpu_throttle_cfg = {
+ .name = "qhs_venus_v_cpu_throttle_cfg",
+ .id = SA8775P_SLAVE_VENUS_V_CPU_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_vcodec_throttle_cfg = {
+ .name = "qhs_venus_vcodec_throttle_cfg",
+ .id = SA8775P_SLAVE_VENUS_VCODEC_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_ddrss_cfg = {
+ .name = "qns_ddrss_cfg",
+ .id = SA8775P_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qns_gpdsp_noc_cfg = {
+ .name = "qns_gpdsp_noc_cfg",
+ .id = SA8775P_SLAVE_GPDSP_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_mnoc_hf_cfg = {
+ .name = "qns_mnoc_hf_cfg",
+ .id = SA8775P_SLAVE_CNOC_MNOC_HF_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_CNOC_MNOC_HF_CFG },
+};
+
+static struct qcom_icc_node qns_mnoc_sf_cfg = {
+ .name = "qns_mnoc_sf_cfg",
+ .id = SA8775P_SLAVE_CNOC_MNOC_SF_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_CNOC_MNOC_SF_CFG },
+};
+
+static struct qcom_icc_node qns_pcie_anoc_cfg = {
+ .name = "qns_pcie_anoc_cfg",
+ .id = SA8775P_SLAVE_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_snoc_cfg = {
+ .name = "qns_snoc_cfg",
+ .id = SA8775P_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+ .name = "qxs_boot_imem",
+ .id = SA8775P_SLAVE_BOOT_IMEM,
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SA8775P_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SA8775P_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SA8775P_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SA8775P_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SA8775P_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SA8775P_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SA8775P_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gemnoc = {
+ .name = "qns_gemnoc",
+ .id = SA8775P_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = SA8775P_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SA8775P_SLAVE_LLCC,
+ .channels = 6,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SA8775P_SLAVE_GEM_NOC_PCIE_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node srvc_even_gemnoc = {
+ .name = "srvc_even_gemnoc",
+ .id = SA8775P_SLAVE_SERVICE_GEM_NOC_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_odd_gemnoc = {
+ .name = "srvc_odd_gemnoc",
+ .id = SA8775P_SLAVE_SERVICE_GEM_NOC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_sys_gemnoc = {
+ .name = "srvc_sys_gemnoc",
+ .id = SA8775P_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_sys_gemnoc_2 = {
+ .name = "srvc_sys_gemnoc_2",
+ .id = SA8775P_SLAVE_SERVICE_GEM_NOC2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gp_dsp_sail_noc = {
+ .name = "qns_gp_dsp_sail_noc",
+ .id = SA8775P_SLAVE_GP_DSP_SAIL_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_GPDSP_SAIL },
+};
+
+static struct qcom_icc_node qhs_lpass_core = {
+ .name = "qhs_lpass_core",
+ .id = SA8775P_SLAVE_LPASS_CORE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_lpi = {
+ .name = "qhs_lpass_lpi",
+ .id = SA8775P_SLAVE_LPASS_LPI_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_mpu = {
+ .name = "qhs_lpass_mpu",
+ .id = SA8775P_SLAVE_LPASS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_lpass_top = {
+ .name = "qhs_lpass_top",
+ .id = SA8775P_SLAVE_LPASS_TOP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_sysnoc = {
+ .name = "qns_sysnoc",
+ .id = SA8775P_SLAVE_LPASS_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_LPASS_ANOC },
+};
+
+static struct qcom_icc_node srvc_niu_aml_noc = {
+ .name = "srvc_niu_aml_noc",
+ .id = SA8775P_SLAVE_SERVICES_LPASS_AML_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_niu_lpass_agnoc = {
+ .name = "srvc_niu_lpass_agnoc",
+ .id = SA8775P_SLAVE_SERVICE_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SA8775P_SLAVE_EBI1,
+ .channels = 8,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SA8775P_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SA8775P_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc_hf = {
+ .name = "srvc_mnoc_hf",
+ .id = SA8775P_SLAVE_SERVICE_MNOC_HF,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_mnoc_sf = {
+ .name = "srvc_mnoc_sf",
+ .id = SA8775P_SLAVE_SERVICE_MNOC_SF,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_hcp = {
+ .name = "qns_hcp",
+ .id = SA8775P_SLAVE_HCP_A,
+ .channels = 2,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = SA8775P_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node service_nsp_noc = {
+ .name = "service_nsp_noc",
+ .id = SA8775P_SLAVE_SERVICE_NSP_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_nspb_gemnoc = {
+ .name = "qns_nspb_gemnoc",
+ .id = SA8775P_SLAVE_CDSPB_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_COMPUTE_NOC_1 },
+};
+
+static struct qcom_icc_node qns_nspb_hcp = {
+ .name = "qns_nspb_hcp",
+ .id = SA8775P_SLAVE_HCP_B,
+ .channels = 2,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node service_nspb_noc = {
+ .name = "service_nspb_noc",
+ .id = SA8775P_SLAVE_SERVICE_NSPB_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = SA8775P_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SA8775P_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SA8775P_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SA8775P_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SA8775P_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 2,
+ .nodes = { &qxm_crypto_0, &qxm_crypto_1 },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 2,
+ .nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 76,
+ .nodes = { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_ahb2phy2, &qhs_ahb2phy3,
+ &qhs_anoc_throttle_cfg, &qhs_aoss,
+ &qhs_apss, &qhs_boot_rom,
+ &qhs_camera_cfg, &qhs_camera_nrt_throttle_cfg,
+ &qhs_camera_rt_throttle_cfg, &qhs_clk_ctl,
+ &qhs_compute0_cfg, &qhs_compute1_cfg,
+ &qhs_cpr_cx, &qhs_cpr_mmcx,
+ &qhs_cpr_mx, &qhs_cpr_nspcx,
+ &qhs_crypto0_cfg, &qhs_cx_rdpm,
+ &qhs_display0_cfg, &qhs_display0_rt_throttle_cfg,
+ &qhs_display1_cfg, &qhs_display1_rt_throttle_cfg,
+ &qhs_emac0_cfg, &qhs_emac1_cfg,
+ &qhs_gp_dsp0_cfg, &qhs_gp_dsp1_cfg,
+ &qhs_gpdsp0_throttle_cfg, &qhs_gpdsp1_throttle_cfg,
+ &qhs_gpu_tcu_throttle_cfg, &qhs_gpuss_cfg,
+ &qhs_hwkm, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_lpass_cfg, &qhs_lpass_throttle_cfg,
+ &qhs_mx_rdpm, &qhs_mxc_rdpm,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pcie_rsc_cfg, &qhs_pcie_tcu_throttle_cfg,
+ &qhs_pcie_throttle_cfg, &qhs_pdm,
+ &qhs_pimem_cfg, &qhs_pke_wrapper_cfg,
+ &qhs_qdss_cfg, &qhs_qm_cfg,
+ &qhs_qm_mpu_cfg, &qhs_sail_throttle_cfg,
+ &qhs_sdc1, &qhs_security,
+ &qhs_snoc_throttle_cfg, &qhs_tcsr,
+ &qhs_tlmm, &qhs_tsc_cfg,
+ &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg,
+ &qhs_usb2_0, &qhs_usb3_0,
+ &qhs_usb3_1, &qhs_venus_cfg,
+ &qhs_venus_cvp_throttle_cfg, &qhs_venus_v_cpu_throttle_cfg,
+ &qhs_venus_vcodec_throttle_cfg, &qns_ddrss_cfg,
+ &qns_gpdsp_noc_cfg, &qns_mnoc_hf_cfg,
+ &qns_mnoc_sf_cfg, &qns_pcie_anoc_cfg,
+ &qns_snoc_cfg, &qxs_boot_imem,
+ &qxs_imem, &xs_sys_tcu_cfg },
+};
+
+static struct qcom_icc_bcm bcm_cn2 = {
+ .name = "CN2",
+ .num_nodes = 4,
+ .nodes = { &qhs_qup0, &qhs_qup1,
+ &qhs_qup2, &qhs_qup3 },
+};
+
+static struct qcom_icc_bcm bcm_cn3 = {
+ .name = "CN3",
+ .num_nodes = 2,
+ .nodes = { &xs_pcie_0, &xs_pcie_1 },
+};
+
+static struct qcom_icc_bcm bcm_gna0 = {
+ .name = "GNA0",
+ .num_nodes = 1,
+ .nodes = { &qxm_dsp0 },
+};
+
+static struct qcom_icc_bcm bcm_gnb0 = {
+ .name = "GNB0",
+ .num_nodes = 1,
+ .nodes = { &qxm_dsp1 },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 5,
+ .nodes = { &qnm_camnoc_hf, &qnm_mdp0_0,
+ &qnm_mdp0_1, &qnm_mdp1_0,
+ &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .num_nodes = 7,
+ .nodes = { &qnm_camnoc_icp, &qnm_camnoc_sf,
+ &qnm_video0, &qnm_video1,
+ &qnm_video_cvp, &qnm_video_v_cpu,
+ &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_nsa0 = {
+ .name = "NSA0",
+ .num_nodes = 2,
+ .nodes = { &qns_hcp, &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_nsa1 = {
+ .name = "NSA1",
+ .num_nodes = 1,
+ .nodes = { &qxm_nsp },
+};
+
+static struct qcom_icc_bcm bcm_nsb0 = {
+ .name = "NSB0",
+ .num_nodes = 2,
+ .nodes = { &qns_nspb_gemnoc, &qns_nspb_hcp },
+};
+
+static struct qcom_icc_bcm bcm_nsb1 = {
+ .name = "NSB1",
+ .num_nodes = 1,
+ .nodes = { &qxm_nspb },
+};
+
+static struct qcom_icc_bcm bcm_pci0 = {
+ .name = "PCI0",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .vote_scale = 1,
+ .num_nodes = 2,
+ .nodes = { &qup2_core_slave, &qup3_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .num_nodes = 1,
+ .nodes = { &chm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 2,
+ .nodes = { &qns_a1noc_snoc, &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 2,
+ .nodes = { &qns_a2noc_snoc, &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .num_nodes = 2,
+ .nodes = { &qns_sysnoc, &qnm_lpass_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn10 = {
+ .name = "SN10",
+ .num_nodes = 1,
+ .nodes = { &xs_qdss_stm },
+};
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+ &bcm_sn3,
+};
+
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+ [MASTER_QUP_3] = &qxm_qup3,
+ [MASTER_EMAC] = &xm_emac_0,
+ [MASTER_EMAC_1] = &xm_emac_1,
+ [MASTER_SDC] = &xm_sdc1,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB2] = &xm_usb2_2,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+};
+
+static const struct qcom_icc_desc sa8775p_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_CNOC_A2NOC] = &qnm_cnoc_datapath,
+ [MASTER_CRYPTO_CORE0] = &qxm_crypto_0,
+ [MASTER_CRYPTO_CORE1] = &qxm_crypto_1,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_QDSS_ETR_0] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [MASTER_UFS_CARD] = &xm_ufs_card,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct qcom_icc_desc sa8775p_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node *clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [MASTER_QUP_CORE_3] = &qup3_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+ [SLAVE_QUP_CORE_3] = &qup3_core_slave,
+};
+
+static const struct qcom_icc_desc sa8775p_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm *config_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+ &bcm_cn2,
+ &bcm_cn3,
+ &bcm_sn2,
+ &bcm_sn10,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AHB2PHY_0] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_1] = &qhs_ahb2phy1,
+ [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
+ [SLAVE_AHB2PHY_3] = &qhs_ahb2phy3,
+ [SLAVE_ANOC_THROTTLE_CFG] = &qhs_anoc_throttle_cfg,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_BOOT_ROM] = &qhs_boot_rom,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_throttle_cfg,
+ [SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute0_cfg,
+ [SLAVE_CDSP1_CFG] = &qhs_compute1_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+ [SLAVE_DISPLAY_CFG] = &qhs_display0_cfg,
+ [SLAVE_DISPLAY_RT_THROTTLE_CFG] = &qhs_display0_rt_throttle_cfg,
+ [SLAVE_DISPLAY1_CFG] = &qhs_display1_cfg,
+ [SLAVE_DISPLAY1_RT_THROTTLE_CFG] = &qhs_display1_rt_throttle_cfg,
+ [SLAVE_EMAC_CFG] = &qhs_emac0_cfg,
+ [SLAVE_EMAC1_CFG] = &qhs_emac1_cfg,
+ [SLAVE_GP_DSP0_CFG] = &qhs_gp_dsp0_cfg,
+ [SLAVE_GP_DSP1_CFG] = &qhs_gp_dsp1_cfg,
+ [SLAVE_GPDSP0_THROTTLE_CFG] = &qhs_gpdsp0_throttle_cfg,
+ [SLAVE_GPDSP1_THROTTLE_CFG] = &qhs_gpdsp1_throttle_cfg,
+ [SLAVE_GPU_TCU_THROTTLE_CFG] = &qhs_gpu_tcu_throttle_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_HWKM] = &qhs_hwkm,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_LPASS] = &qhs_lpass_cfg,
+ [SLAVE_LPASS_THROTTLE_CFG] = &qhs_lpass_throttle_cfg,
+ [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+ [SLAVE_MXC_RDPM] = &qhs_mxc_rdpm,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PCIE_RSC_CFG] = &qhs_pcie_rsc_cfg,
+ [SLAVE_PCIE_TCU_THROTTLE_CFG] = &qhs_pcie_tcu_throttle_cfg,
+ [SLAVE_PCIE_THROTTLE_CFG] = &qhs_pcie_throttle_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PKA_WRAPPER_CFG] = &qhs_pke_wrapper_cfg,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QM_CFG] = &qhs_qm_cfg,
+ [SLAVE_QM_MPU_CFG] = &qhs_qm_mpu_cfg,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_QUP_3] = &qhs_qup3,
+ [SLAVE_SAIL_THROTTLE_CFG] = &qhs_sail_throttle_cfg,
+ [SLAVE_SDC1] = &qhs_sdc1,
+ [SLAVE_SECURITY] = &qhs_security,
+ [SLAVE_SNOC_THROTTLE_CFG] = &qhs_snoc_throttle_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_TSC_CFG] = &qhs_tsc_cfg,
+ [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB2] = &qhs_usb2_0,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_USB3_1] = &qhs_usb3_1,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VENUS_CVP_THROTTLE_CFG] = &qhs_venus_cvp_throttle_cfg,
+ [SLAVE_VENUS_V_CPU_THROTTLE_CFG] = &qhs_venus_v_cpu_throttle_cfg,
+ [SLAVE_VENUS_VCODEC_THROTTLE_CFG] = &qhs_venus_vcodec_throttle_cfg,
+ [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
+ [SLAVE_GPDSP_NOC_CFG] = &qns_gpdsp_noc_cfg,
+ [SLAVE_CNOC_MNOC_HF_CFG] = &qns_mnoc_hf_cfg,
+ [SLAVE_CNOC_MNOC_SF_CFG] = &qns_mnoc_sf_cfg,
+ [SLAVE_PCIE_ANOC_CFG] = &qns_pcie_anoc_cfg,
+ [SLAVE_SNOC_CFG] = &qns_snoc_cfg,
+ [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sa8775p_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm *dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
+};
+
+static const struct qcom_icc_desc sa8775p_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm *gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+};
+
+static struct qcom_icc_node *gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_PCIE_TCU] = &alm_pcie_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc0,
+ [MASTER_COMPUTE_NOC_1] = &qnm_cmpnoc1,
+ [MASTER_GEM_NOC_CFG] = &qnm_gemnoc_cfg,
+ [MASTER_GPDSP_SAIL] = &qnm_gpdsp_sail,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_GEM_NOC_PCIE_CNOC] = &qns_pcie,
+ [SLAVE_SERVICE_GEM_NOC_1] = &srvc_even_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC_2] = &srvc_odd_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_sys_gemnoc,
+ [SLAVE_SERVICE_GEM_NOC2] = &srvc_sys_gemnoc_2,
+};
+
+static const struct qcom_icc_desc sa8775p_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *gpdsp_anoc_bcms[] = {
+ &bcm_gna0,
+ &bcm_gnb0,
+};
+
+static struct qcom_icc_node *gpdsp_anoc_nodes[] = {
+ [MASTER_DSP0] = &qxm_dsp0,
+ [MASTER_DSP1] = &qxm_dsp1,
+ [SLAVE_GP_DSP_SAIL_NOC] = &qns_gp_dsp_sail_noc,
+};
+
+static const struct qcom_icc_desc sa8775p_gpdsp_anoc = {
+ .nodes = gpdsp_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(gpdsp_anoc_nodes),
+ .bcms = gpdsp_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(gpdsp_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+ [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
+ [MASTER_LPASS_PROC] = &qxm_lpass_dsp,
+ [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
+ [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
+ [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
+ [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
+ [SLAVE_LPASS_SNOC] = &qns_sysnoc,
+ [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
+ [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
+};
+
+static const struct qcom_icc_desc sa8775p_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+ .bcms = lpass_ag_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc sa8775p_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_MDP0] = &qnm_mdp0_0,
+ [MASTER_MDP1] = &qnm_mdp0_1,
+ [MASTER_MDP_CORE1_0] = &qnm_mdp1_0,
+ [MASTER_MDP_CORE1_1] = &qnm_mdp1_1,
+ [MASTER_CNOC_MNOC_HF_CFG] = &qnm_mnoc_hf_cfg,
+ [MASTER_CNOC_MNOC_SF_CFG] = &qnm_mnoc_sf_cfg,
+ [MASTER_VIDEO_P0] = &qnm_video0,
+ [MASTER_VIDEO_P1] = &qnm_video1,
+ [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC_HF] = &srvc_mnoc_hf,
+ [SLAVE_SERVICE_MNOC_SF] = &srvc_mnoc_sf,
+};
+
+static const struct qcom_icc_desc sa8775p_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm *nspa_noc_bcms[] = {
+ &bcm_nsa0,
+ &bcm_nsa1,
+};
+
+static struct qcom_icc_node *nspa_noc_nodes[] = {
+ [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
+ [MASTER_CDSP_PROC] = &qxm_nsp,
+ [SLAVE_HCP_A] = &qns_hcp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+ [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
+};
+
+static const struct qcom_icc_desc sa8775p_nspa_noc = {
+ .nodes = nspa_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nspa_noc_nodes),
+ .bcms = nspa_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nspa_noc_bcms),
+};
+
+static struct qcom_icc_bcm *nspb_noc_bcms[] = {
+ &bcm_nsb0,
+ &bcm_nsb1,
+};
+
+static struct qcom_icc_node *nspb_noc_nodes[] = {
+ [MASTER_CDSPB_NOC_CFG] = &qhm_nspb_noc_config,
+ [MASTER_CDSP_PROC_B] = &qxm_nspb,
+ [SLAVE_CDSPB_MEM_NOC] = &qns_nspb_gemnoc,
+ [SLAVE_HCP_B] = &qns_nspb_hcp,
+ [SLAVE_SERVICE_NSPB_NOC] = &service_nspb_noc,
+};
+
+static const struct qcom_icc_desc sa8775p_nspb_noc = {
+ .nodes = nspb_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nspb_noc_nodes),
+ .bcms = nspb_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nspb_noc_bcms),
+};
+
+static struct qcom_icc_bcm *pcie_anoc_bcms[] = {
+ &bcm_pci0,
+};
+
+static struct qcom_icc_node *pcie_anoc_nodes[] = {
+ [MASTER_PCIE_0] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+};
+
+static const struct qcom_icc_desc sa8775p_pcie_anoc = {
+ .nodes = pcie_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
+ .bcms = pcie_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_GIC_AHB] = &qhm_gic,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_LPASS_ANOC] = &qnm_lpass_noc,
+ [MASTER_SNOC_CFG] = &qnm_snoc_cfg,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+};
+
+static const struct qcom_icc_desc sa8775p_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sa8775p-aggre1-noc", .data = &sa8775p_aggre1_noc, },
+ { .compatible = "qcom,sa8775p-aggre2-noc", .data = &sa8775p_aggre2_noc, },
+ { .compatible = "qcom,sa8775p-clk-virt", .data = &sa8775p_clk_virt, },
+ { .compatible = "qcom,sa8775p-config-noc", .data = &sa8775p_config_noc, },
+ { .compatible = "qcom,sa8775p-dc-noc", .data = &sa8775p_dc_noc, },
+ { .compatible = "qcom,sa8775p-gem-noc", .data = &sa8775p_gem_noc, },
+ { .compatible = "qcom,sa8775p-gpdsp-anoc", .data = &sa8775p_gpdsp_anoc, },
+ { .compatible = "qcom,sa8775p-lpass-ag-noc", .data = &sa8775p_lpass_ag_noc, },
+ { .compatible = "qcom,sa8775p-mc-virt", .data = &sa8775p_mc_virt, },
+ { .compatible = "qcom,sa8775p-mmss-noc", .data = &sa8775p_mmss_noc, },
+ { .compatible = "qcom,sa8775p-nspa-noc", .data = &sa8775p_nspa_noc, },
+ { .compatible = "qcom,sa8775p-nspb-noc", .data = &sa8775p_nspb_noc, },
+ { .compatible = "qcom,sa8775p-pcie-anoc", .data = &sa8775p_pcie_anoc, },
+ { .compatible = "qcom,sa8775p-system-noc", .data = &sa8775p_system_noc, },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sa8775p",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SA8775P NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sc7180.h b/drivers/interconnect/qcom/sc7180.h
index c6212a10c2f6..7a2b3eb00923 100644
--- a/drivers/interconnect/qcom/sc7180.h
+++ b/drivers/interconnect/qcom/sc7180.h
@@ -11,7 +11,7 @@
#define SC7180_MASTER_APPSS_PROC 0
#define SC7180_MASTER_SYS_TCU 1
#define SC7180_MASTER_NPU_SYS 2
-#define SC7180_MASTER_IPA_CORE 3
+/* 3 was used by MASTER_IPA_CORE, now represented as RPMh clock */
#define SC7180_MASTER_LLCC 4
#define SC7180_MASTER_A1NOC_CFG 5
#define SC7180_MASTER_A2NOC_CFG 6
@@ -58,7 +58,7 @@
#define SC7180_MASTER_USB3 47
#define SC7180_MASTER_EMMC 48
#define SC7180_SLAVE_EBI1 49
-#define SC7180_SLAVE_IPA_CORE 50
+/* 50 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
#define SC7180_SLAVE_A1NOC_CFG 51
#define SC7180_SLAVE_A2NOC_CFG 52
#define SC7180_SLAVE_AHB2PHY_SOUTH 53
diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c
index 0f515bf10bd7..c76e3a6a98cd 100644
--- a/drivers/interconnect/qcom/sc8180x.c
+++ b/drivers/interconnect/qcom/sc8180x.c
@@ -469,15 +469,6 @@ static struct qcom_icc_node mas_qxm_ecc = {
.links = { SC8180X_SLAVE_LLCC }
};
-static struct qcom_icc_node mas_ipa_core_master = {
- .name = "mas_ipa_core_master",
- .id = SC8180X_MASTER_IPA_CORE,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { SC8180X_SLAVE_IPA_CORE }
-};
-
static struct qcom_icc_node mas_llcc_mc = {
.name = "mas_llcc_mc",
.id = SC8180X_MASTER_LLCC,
@@ -1201,13 +1192,6 @@ static struct qcom_icc_node slv_srvc_gemnoc1 = {
.buswidth = 4
};
-static struct qcom_icc_node slv_ipa_core_slave = {
- .name = "slv_ipa_core_slave",
- .id = SC8180X_SLAVE_IPA_CORE,
- .channels = 1,
- .buswidth = 8
-};
-
static struct qcom_icc_node slv_ebi = {
.name = "slv_ebi",
.id = SC8180X_SLAVE_EBI_CH0,
@@ -1524,11 +1508,6 @@ static struct qcom_icc_bcm bcm_co2 = {
.nodes = { &mas_qnm_npu }
};
-static struct qcom_icc_bcm bcm_ip0 = {
- .name = "IP0",
- .nodes = { &slv_ipa_core_slave }
-};
-
static struct qcom_icc_bcm bcm_sn3 = {
.name = "SN3",
.keepalive = true,
@@ -1604,10 +1583,6 @@ static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh3,
};
-static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
- &bcm_ip0,
-};
-
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_mc0,
&bcm_acv,
@@ -1766,11 +1741,6 @@ static struct qcom_icc_node * const gem_noc_nodes[] = {
[SLAVE_SERVICE_GEM_NOC_1] = &slv_srvc_gemnoc1,
};
-static struct qcom_icc_node * const ipa_virt_nodes[] = {
- [MASTER_IPA_CORE] = &mas_ipa_core_master,
- [SLAVE_IPA_CORE] = &slv_ipa_core_slave,
-};
-
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &mas_llcc_mc,
[SLAVE_EBI_CH0] = &slv_ebi,
@@ -1857,13 +1827,6 @@ static const struct qcom_icc_desc sc8180x_gem_noc = {
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static const struct qcom_icc_desc sc8180x_ipa_virt = {
- .nodes = ipa_virt_nodes,
- .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
- .bcms = ipa_virt_bcms,
- .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
-};
-
static const struct qcom_icc_desc sc8180x_mc_virt = {
.nodes = mc_virt_nodes,
.num_nodes = ARRAY_SIZE(mc_virt_nodes),
@@ -1913,7 +1876,6 @@ static const struct of_device_id qnoc_of_match[] = {
{ .compatible = "qcom,sc8180x-config-noc", .data = &sc8180x_config_noc },
{ .compatible = "qcom,sc8180x-dc-noc", .data = &sc8180x_dc_noc },
{ .compatible = "qcom,sc8180x-gem-noc", .data = &sc8180x_gem_noc },
- { .compatible = "qcom,sc8180x-ipa-virt", .data = &sc8180x_ipa_virt },
{ .compatible = "qcom,sc8180x-mc-virt", .data = &sc8180x_mc_virt },
{ .compatible = "qcom,sc8180x-mmss-noc", .data = &sc8180x_mmss_noc },
{ .compatible = "qcom,sc8180x-qup-virt", .data = &sc8180x_qup_virt },
diff --git a/drivers/interconnect/qcom/sc8180x.h b/drivers/interconnect/qcom/sc8180x.h
index 2eafd35543c7..c138dcd350f1 100644
--- a/drivers/interconnect/qcom/sc8180x.h
+++ b/drivers/interconnect/qcom/sc8180x.h
@@ -51,7 +51,7 @@
#define SC8180X_MASTER_SNOC_GC_MEM_NOC 41
#define SC8180X_MASTER_SNOC_SF_MEM_NOC 42
#define SC8180X_MASTER_ECC 43
-#define SC8180X_MASTER_IPA_CORE 44
+/* 44 was used by MASTER_IPA_CORE, now represented as RPMh clock */
#define SC8180X_MASTER_LLCC 45
#define SC8180X_MASTER_CNOC_MNOC_CFG 46
#define SC8180X_MASTER_CAMNOC_HF0 47
@@ -146,7 +146,7 @@
#define SC8180X_SLAVE_LLCC 136
#define SC8180X_SLAVE_SERVICE_GEM_NOC 137
#define SC8180X_SLAVE_SERVICE_GEM_NOC_1 138
-#define SC8180X_SLAVE_IPA_CORE 139
+/* 139 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
#define SC8180X_SLAVE_EBI_CH0 140
#define SC8180X_SLAVE_MNOC_SF_MEM_NOC 141
#define SC8180X_SLAVE_MNOC_HF_MEM_NOC 142
diff --git a/drivers/interconnect/qcom/sc8280xp.c b/drivers/interconnect/qcom/sc8280xp.c
index 507fe5f89791..e56df893ec3e 100644
--- a/drivers/interconnect/qcom/sc8280xp.c
+++ b/drivers/interconnect/qcom/sc8280xp.c
@@ -284,15 +284,6 @@ static struct qcom_icc_node xm_ufs_card = {
.links = { SC8280XP_SLAVE_A2NOC_SNOC },
};
-static struct qcom_icc_node ipa_core_master = {
- .name = "ipa_core_master",
- .id = SC8280XP_MASTER_IPA_CORE,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { SC8280XP_SLAVE_IPA_CORE },
-};
-
static struct qcom_icc_node qup0_core_master = {
.name = "qup0_core_master",
.id = SC8280XP_MASTER_QUP_CORE_0,
@@ -882,13 +873,6 @@ static struct qcom_icc_node srvc_aggre2_noc = {
.buswidth = 4,
};
-static struct qcom_icc_node ipa_core_slave = {
- .name = "ipa_core_slave",
- .id = SC8280XP_SLAVE_IPA_CORE,
- .channels = 1,
- .buswidth = 8,
-};
-
static struct qcom_icc_node qup0_core_slave = {
.name = "qup0_core_slave",
.id = SC8280XP_SLAVE_QUP_CORE_0,
@@ -1845,12 +1829,6 @@ static struct qcom_icc_bcm bcm_cn3 = {
},
};
-static struct qcom_icc_bcm bcm_ip0 = {
- .name = "IP0",
- .num_nodes = 1,
- .nodes = { &ipa_core_slave },
-};
-
static struct qcom_icc_bcm bcm_mc0 = {
.name = "MC0",
.keepalive = true,
@@ -2077,18 +2055,15 @@ static const struct qcom_icc_desc sc8280xp_aggre2_noc = {
};
static struct qcom_icc_bcm * const clk_virt_bcms[] = {
- &bcm_ip0,
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
static struct qcom_icc_node * const clk_virt_nodes[] = {
- [MASTER_IPA_CORE] = &ipa_core_master,
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
- [SLAVE_IPA_CORE] = &ipa_core_slave,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
[SLAVE_QUP_CORE_1] = &qup1_core_slave,
[SLAVE_QUP_CORE_2] = &qup2_core_slave,
diff --git a/drivers/interconnect/qcom/sc8280xp.h b/drivers/interconnect/qcom/sc8280xp.h
index 74d8fa412d65..c5c410fd5ec3 100644
--- a/drivers/interconnect/qcom/sc8280xp.h
+++ b/drivers/interconnect/qcom/sc8280xp.h
@@ -10,7 +10,7 @@
#define SC8280XP_MASTER_PCIE_TCU 1
#define SC8280XP_MASTER_SYS_TCU 2
#define SC8280XP_MASTER_APPSS_PROC 3
-#define SC8280XP_MASTER_IPA_CORE 4
+/* 4 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
#define SC8280XP_MASTER_LLCC 5
#define SC8280XP_MASTER_CNOC_LPASS_AG_NOC 6
#define SC8280XP_MASTER_CDSP_NOC_CFG 7
@@ -84,7 +84,7 @@
#define SC8280XP_MASTER_USB4_0 75
#define SC8280XP_MASTER_USB4_1 76
#define SC8280XP_SLAVE_EBI1 512
-#define SC8280XP_SLAVE_IPA_CORE 513
+/* 513 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
#define SC8280XP_SLAVE_AHB2PHY_0 514
#define SC8280XP_SLAVE_AHB2PHY_1 515
#define SC8280XP_SLAVE_AHB2PHY_2 516
diff --git a/drivers/interconnect/qcom/sdm670.c b/drivers/interconnect/qcom/sdm670.c
new file mode 100644
index 000000000000..bda955035518
--- /dev/null
+++ b/drivers/interconnect/qcom/sdm670.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sdm670-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sdm670.h"
+
+DEFINE_QNODE(qhm_a1noc_cfg, SDM670_MASTER_A1NOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_A1NOC);
+DEFINE_QNODE(qhm_qup1, SDM670_MASTER_BLSP_1, 1, 4, SDM670_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_tsif, SDM670_MASTER_TSIF, 1, 4, SDM670_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_emmc, SDM670_MASTER_EMMC, 1, 8, SDM670_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc2, SDM670_MASTER_SDCC_2, 1, 8, SDM670_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_sdc4, SDM670_MASTER_SDCC_4, 1, 8, SDM670_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(xm_ufs_mem, SDM670_MASTER_UFS_MEM, 1, 8, SDM670_SLAVE_A1NOC_SNOC);
+DEFINE_QNODE(qhm_a2noc_cfg, SDM670_MASTER_A2NOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_A2NOC);
+DEFINE_QNODE(qhm_qdss_bam, SDM670_MASTER_QDSS_BAM, 1, 4, SDM670_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qhm_qup2, SDM670_MASTER_BLSP_2, 1, 4, SDM670_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qnm_cnoc, SDM670_MASTER_CNOC_A2NOC, 1, 8, SDM670_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_crypto, SDM670_MASTER_CRYPTO_CORE_0, 1, 8, SDM670_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_ipa, SDM670_MASTER_IPA, 1, 8, SDM670_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_qdss_etr, SDM670_MASTER_QDSS_ETR, 1, 8, SDM670_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(xm_usb3_0, SDM670_MASTER_USB3, 1, 8, SDM670_SLAVE_A2NOC_SNOC);
+DEFINE_QNODE(qxm_camnoc_hf0_uncomp, SDM670_MASTER_CAMNOC_HF0_UNCOMP, 1, 32, SDM670_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_hf1_uncomp, SDM670_MASTER_CAMNOC_HF1_UNCOMP, 1, 32, SDM670_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qxm_camnoc_sf_uncomp, SDM670_MASTER_CAMNOC_SF_UNCOMP, 1, 32, SDM670_SLAVE_CAMNOC_UNCOMP);
+DEFINE_QNODE(qhm_spdm, SDM670_MASTER_SPDM, 1, 4, SDM670_SLAVE_CNOC_A2NOC);
+DEFINE_QNODE(qnm_snoc, SDM670_MASTER_SNOC_CNOC, 1, 8, SDM670_SLAVE_TLMM_SOUTH, SDM670_SLAVE_CAMERA_CFG, SDM670_SLAVE_SDCC_4, SDM670_SLAVE_SDCC_2, SDM670_SLAVE_CNOC_MNOC_CFG, SDM670_SLAVE_UFS_MEM_CFG, SDM670_SLAVE_GLM, SDM670_SLAVE_PDM, SDM670_SLAVE_A2NOC_CFG, SDM670_SLAVE_QDSS_CFG, SDM670_SLAVE_DISPLAY_CFG, SDM670_SLAVE_TCSR, SDM670_SLAVE_DCC_CFG, SDM670_SLAVE_CNOC_DDRSS, SDM670_SLAVE_SNOC_CFG, SDM670_SLAVE_SOUTH_PHY_CFG, SDM670_SLAVE_GRAPHICS_3D_CFG, SDM670_SLAVE_VENUS_CFG, SDM670_SLAVE_TSIF, SDM670_SLAVE_CDSP_CFG, SDM670_SLAVE_AOP, SDM670_SLAVE_BLSP_2, SDM670_SLAVE_SERVICE_CNOC, SDM670_SLAVE_USB3, SDM670_SLAVE_IPA_CFG, SDM670_SLAVE_RBCPR_CX_CFG, SDM670_SLAVE_A1NOC_CFG, SDM670_SLAVE_AOSS, SDM670_SLAVE_PRNG, SDM670_SLAVE_VSENSE_CTRL_CFG, SDM670_SLAVE_EMMC_CFG, SDM670_SLAVE_BLSP_1, SDM670_SLAVE_SPDM_WRAPPER, SDM670_SLAVE_CRYPTO_0_CFG, SDM670_SLAVE_PIMEM_CFG, SDM670_SLAVE_TLMM_NORTH, SDM670_SLAVE_CLK_CTL, SDM670_SLAVE_IMEM_CFG);
+DEFINE_QNODE(qhm_cnoc, SDM670_MASTER_CNOC_DC_NOC, 1, 4, SDM670_SLAVE_MEM_NOC_CFG, SDM670_SLAVE_LLCC_CFG);
+DEFINE_QNODE(acm_l3, SDM670_MASTER_AMPSS_M0, 1, 16, SDM670_SLAVE_SERVICE_GNOC, SDM670_SLAVE_GNOC_SNOC, SDM670_SLAVE_GNOC_MEM_NOC);
+DEFINE_QNODE(pm_gnoc_cfg, SDM670_MASTER_GNOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_GNOC);
+DEFINE_QNODE(llcc_mc, SDM670_MASTER_LLCC, 2, 4, SDM670_SLAVE_EBI_CH0);
+DEFINE_QNODE(acm_tcu, SDM670_MASTER_TCU_0, 1, 8, SDM670_SLAVE_MEM_NOC_GNOC, SDM670_SLAVE_LLCC, SDM670_SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_memnoc_cfg, SDM670_MASTER_MEM_NOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_MEM_NOC, SDM670_SLAVE_MSS_PROC_MS_MPU_CFG);
+DEFINE_QNODE(qnm_apps, SDM670_MASTER_GNOC_MEM_NOC, 2, 32, SDM670_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_hf, SDM670_MASTER_MNOC_HF_MEM_NOC, 2, 32, SDM670_SLAVE_LLCC);
+DEFINE_QNODE(qnm_mnoc_sf, SDM670_MASTER_MNOC_SF_MEM_NOC, 1, 32, SDM670_SLAVE_MEM_NOC_GNOC, SDM670_SLAVE_LLCC, SDM670_SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qnm_snoc_gc, SDM670_MASTER_SNOC_GC_MEM_NOC, 1, 8, SDM670_SLAVE_LLCC);
+DEFINE_QNODE(qnm_snoc_sf, SDM670_MASTER_SNOC_SF_MEM_NOC, 1, 16, SDM670_SLAVE_MEM_NOC_GNOC, SDM670_SLAVE_LLCC);
+DEFINE_QNODE(qxm_gpu, SDM670_MASTER_GRAPHICS_3D, 2, 32, SDM670_SLAVE_MEM_NOC_GNOC, SDM670_SLAVE_LLCC, SDM670_SLAVE_MEM_NOC_SNOC);
+DEFINE_QNODE(qhm_mnoc_cfg, SDM670_MASTER_CNOC_MNOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_MNOC);
+DEFINE_QNODE(qxm_camnoc_hf0, SDM670_MASTER_CAMNOC_HF0, 1, 32, SDM670_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_hf1, SDM670_MASTER_CAMNOC_HF1, 1, 32, SDM670_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_camnoc_sf, SDM670_MASTER_CAMNOC_SF, 1, 32, SDM670_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp0, SDM670_MASTER_MDP_PORT0, 1, 32, SDM670_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_mdp1, SDM670_MASTER_MDP_PORT1, 1, 32, SDM670_SLAVE_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(qxm_rot, SDM670_MASTER_ROTATOR, 1, 32, SDM670_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus0, SDM670_MASTER_VIDEO_P0, 1, 32, SDM670_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus1, SDM670_MASTER_VIDEO_P1, 1, 32, SDM670_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxm_venus_arm9, SDM670_MASTER_VIDEO_PROC, 1, 8, SDM670_SLAVE_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qhm_snoc_cfg, SDM670_MASTER_SNOC_CFG, 1, 4, SDM670_SLAVE_SERVICE_SNOC);
+DEFINE_QNODE(qnm_aggre1_noc, SDM670_MASTER_A1NOC_SNOC, 1, 16, SDM670_SLAVE_PIMEM, SDM670_SLAVE_SNOC_MEM_NOC_SF, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_APPSS, SDM670_SLAVE_SNOC_CNOC, SDM670_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_aggre2_noc, SDM670_MASTER_A2NOC_SNOC, 1, 16, SDM670_SLAVE_PIMEM, SDM670_SLAVE_SNOC_MEM_NOC_SF, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_APPSS, SDM670_SLAVE_SNOC_CNOC, SDM670_SLAVE_TCU, SDM670_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_gladiator_sodv, SDM670_MASTER_GNOC_SNOC, 1, 8, SDM670_SLAVE_PIMEM, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_APPSS, SDM670_SLAVE_SNOC_CNOC, SDM670_SLAVE_TCU, SDM670_SLAVE_QDSS_STM);
+DEFINE_QNODE(qnm_memnoc, SDM670_MASTER_MEM_NOC_SNOC, 1, 8, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_APPSS, SDM670_SLAVE_PIMEM, SDM670_SLAVE_SNOC_CNOC, SDM670_SLAVE_QDSS_STM);
+DEFINE_QNODE(qxm_pimem, SDM670_MASTER_PIMEM, 1, 8, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_SNOC_MEM_NOC_GC);
+DEFINE_QNODE(xm_gic, SDM670_MASTER_GIC, 1, 8, SDM670_SLAVE_OCIMEM, SDM670_SLAVE_SNOC_MEM_NOC_GC);
+DEFINE_QNODE(qns_a1noc_snoc, SDM670_SLAVE_A1NOC_SNOC, 1, 16, SDM670_MASTER_A1NOC_SNOC);
+DEFINE_QNODE(srvc_aggre1_noc, SDM670_SLAVE_SERVICE_A1NOC, 1, 4);
+DEFINE_QNODE(qns_a2noc_snoc, SDM670_SLAVE_A2NOC_SNOC, 1, 16, SDM670_MASTER_A2NOC_SNOC);
+DEFINE_QNODE(srvc_aggre2_noc, SDM670_SLAVE_SERVICE_A2NOC, 1, 4);
+DEFINE_QNODE(qns_camnoc_uncomp, SDM670_SLAVE_CAMNOC_UNCOMP, 1, 32);
+DEFINE_QNODE(qhs_a1_noc_cfg, SDM670_SLAVE_A1NOC_CFG, 1, 4, SDM670_MASTER_A1NOC_CFG);
+DEFINE_QNODE(qhs_a2_noc_cfg, SDM670_SLAVE_A2NOC_CFG, 1, 4, SDM670_MASTER_A2NOC_CFG);
+DEFINE_QNODE(qhs_aop, SDM670_SLAVE_AOP, 1, 4);
+DEFINE_QNODE(qhs_aoss, SDM670_SLAVE_AOSS, 1, 4);
+DEFINE_QNODE(qhs_camera_cfg, SDM670_SLAVE_CAMERA_CFG, 1, 4);
+DEFINE_QNODE(qhs_clk_ctl, SDM670_SLAVE_CLK_CTL, 1, 4);
+DEFINE_QNODE(qhs_compute_dsp_cfg, SDM670_SLAVE_CDSP_CFG, 1, 4);
+DEFINE_QNODE(qhs_cpr_cx, SDM670_SLAVE_RBCPR_CX_CFG, 1, 4);
+DEFINE_QNODE(qhs_crypto0_cfg, SDM670_SLAVE_CRYPTO_0_CFG, 1, 4);
+DEFINE_QNODE(qhs_dcc_cfg, SDM670_SLAVE_DCC_CFG, 1, 4, SDM670_MASTER_CNOC_DC_NOC);
+DEFINE_QNODE(qhs_ddrss_cfg, SDM670_SLAVE_CNOC_DDRSS, 1, 4);
+DEFINE_QNODE(qhs_display_cfg, SDM670_SLAVE_DISPLAY_CFG, 1, 4);
+DEFINE_QNODE(qhs_emmc_cfg, SDM670_SLAVE_EMMC_CFG, 1, 4);
+DEFINE_QNODE(qhs_glm, SDM670_SLAVE_GLM, 1, 4);
+DEFINE_QNODE(qhs_gpuss_cfg, SDM670_SLAVE_GRAPHICS_3D_CFG, 1, 8);
+DEFINE_QNODE(qhs_imem_cfg, SDM670_SLAVE_IMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_ipa, SDM670_SLAVE_IPA_CFG, 1, 4);
+DEFINE_QNODE(qhs_mnoc_cfg, SDM670_SLAVE_CNOC_MNOC_CFG, 1, 4, SDM670_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(qhs_pdm, SDM670_SLAVE_PDM, 1, 4);
+DEFINE_QNODE(qhs_phy_refgen_south, SDM670_SLAVE_SOUTH_PHY_CFG, 1, 4);
+DEFINE_QNODE(qhs_pimem_cfg, SDM670_SLAVE_PIMEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_prng, SDM670_SLAVE_PRNG, 1, 4);
+DEFINE_QNODE(qhs_qdss_cfg, SDM670_SLAVE_QDSS_CFG, 1, 4);
+DEFINE_QNODE(qhs_qupv3_north, SDM670_SLAVE_BLSP_2, 1, 4);
+DEFINE_QNODE(qhs_qupv3_south, SDM670_SLAVE_BLSP_1, 1, 4);
+DEFINE_QNODE(qhs_sdc2, SDM670_SLAVE_SDCC_2, 1, 4);
+DEFINE_QNODE(qhs_sdc4, SDM670_SLAVE_SDCC_4, 1, 4);
+DEFINE_QNODE(qhs_snoc_cfg, SDM670_SLAVE_SNOC_CFG, 1, 4, SDM670_MASTER_SNOC_CFG);
+DEFINE_QNODE(qhs_spdm, SDM670_SLAVE_SPDM_WRAPPER, 1, 4);
+DEFINE_QNODE(qhs_tcsr, SDM670_SLAVE_TCSR, 1, 4);
+DEFINE_QNODE(qhs_tlmm_north, SDM670_SLAVE_TLMM_NORTH, 1, 4);
+DEFINE_QNODE(qhs_tlmm_south, SDM670_SLAVE_TLMM_SOUTH, 1, 4);
+DEFINE_QNODE(qhs_tsif, SDM670_SLAVE_TSIF, 1, 4);
+DEFINE_QNODE(qhs_ufs_mem_cfg, SDM670_SLAVE_UFS_MEM_CFG, 1, 4);
+DEFINE_QNODE(qhs_usb3_0, SDM670_SLAVE_USB3, 1, 4);
+DEFINE_QNODE(qhs_venus_cfg, SDM670_SLAVE_VENUS_CFG, 1, 4);
+DEFINE_QNODE(qhs_vsense_ctrl_cfg, SDM670_SLAVE_VSENSE_CTRL_CFG, 1, 4);
+DEFINE_QNODE(qns_cnoc_a2noc, SDM670_SLAVE_CNOC_A2NOC, 1, 8, SDM670_MASTER_CNOC_A2NOC);
+DEFINE_QNODE(srvc_cnoc, SDM670_SLAVE_SERVICE_CNOC, 1, 4);
+DEFINE_QNODE(qhs_llcc, SDM670_SLAVE_LLCC_CFG, 1, 4);
+DEFINE_QNODE(qhs_memnoc, SDM670_SLAVE_MEM_NOC_CFG, 1, 4, SDM670_MASTER_MEM_NOC_CFG);
+DEFINE_QNODE(qns_gladiator_sodv, SDM670_SLAVE_GNOC_SNOC, 1, 8, SDM670_MASTER_GNOC_SNOC);
+DEFINE_QNODE(qns_gnoc_memnoc, SDM670_SLAVE_GNOC_MEM_NOC, 2, 32, SDM670_MASTER_GNOC_MEM_NOC);
+DEFINE_QNODE(srvc_gnoc, SDM670_SLAVE_SERVICE_GNOC, 1, 4);
+DEFINE_QNODE(ebi, SDM670_SLAVE_EBI_CH0, 2, 4);
+DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SDM670_SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4);
+DEFINE_QNODE(qns_apps_io, SDM670_SLAVE_MEM_NOC_GNOC, 1, 32);
+DEFINE_QNODE(qns_llcc, SDM670_SLAVE_LLCC, 2, 16, SDM670_MASTER_LLCC);
+DEFINE_QNODE(qns_memnoc_snoc, SDM670_SLAVE_MEM_NOC_SNOC, 1, 8, SDM670_MASTER_MEM_NOC_SNOC);
+DEFINE_QNODE(srvc_memnoc, SDM670_SLAVE_SERVICE_MEM_NOC, 1, 4);
+DEFINE_QNODE(qns2_mem_noc, SDM670_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SDM670_MASTER_MNOC_SF_MEM_NOC);
+DEFINE_QNODE(qns_mem_noc_hf, SDM670_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SDM670_MASTER_MNOC_HF_MEM_NOC);
+DEFINE_QNODE(srvc_mnoc, SDM670_SLAVE_SERVICE_MNOC, 1, 4);
+DEFINE_QNODE(qhs_apss, SDM670_SLAVE_APPSS, 1, 8);
+DEFINE_QNODE(qns_cnoc, SDM670_SLAVE_SNOC_CNOC, 1, 8, SDM670_MASTER_SNOC_CNOC);
+DEFINE_QNODE(qns_memnoc_gc, SDM670_SLAVE_SNOC_MEM_NOC_GC, 1, 8, SDM670_MASTER_SNOC_GC_MEM_NOC);
+DEFINE_QNODE(qns_memnoc_sf, SDM670_SLAVE_SNOC_MEM_NOC_SF, 1, 16, SDM670_MASTER_SNOC_SF_MEM_NOC);
+DEFINE_QNODE(qxs_imem, SDM670_SLAVE_OCIMEM, 1, 8);
+DEFINE_QNODE(qxs_pimem, SDM670_SLAVE_PIMEM, 1, 8);
+DEFINE_QNODE(srvc_snoc, SDM670_SLAVE_SERVICE_SNOC, 1, 4);
+DEFINE_QNODE(xs_qdss_stm, SDM670_SLAVE_QDSS_STM, 1, 4);
+DEFINE_QNODE(xs_sys_tcu_cfg, SDM670_SLAVE_TCU, 1, 8);
+
+DEFINE_QBCM(bcm_acv, "ACV", false, &ebi);
+DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
+DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
+DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
+DEFINE_QBCM(bcm_sh1, "SH1", false, &qns_apps_io);
+DEFINE_QBCM(bcm_mm1, "MM1", true, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1);
+DEFINE_QBCM(bcm_sh2, "SH2", false, &qns_memnoc_snoc);
+DEFINE_QBCM(bcm_mm2, "MM2", false, &qns2_mem_noc);
+DEFINE_QBCM(bcm_sh3, "SH3", false, &acm_tcu);
+DEFINE_QBCM(bcm_mm3, "MM3", false, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9);
+DEFINE_QBCM(bcm_sh5, "SH5", false, &qnm_apps);
+DEFINE_QBCM(bcm_sn0, "SN0", true, &qns_memnoc_sf);
+DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
+DEFINE_QBCM(bcm_cn0, "CN0", true, &qhm_spdm, &qnm_snoc, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_emmc_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
+DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup1, &qhm_qup2);
+DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
+DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_memnoc_gc);
+DEFINE_QBCM(bcm_sn3, "SN3", false, &qns_cnoc);
+DEFINE_QBCM(bcm_sn4, "SN4", false, &qxm_pimem, &qxs_pimem);
+DEFINE_QBCM(bcm_sn5, "SN5", false, &xs_qdss_stm);
+DEFINE_QBCM(bcm_sn8, "SN8", false, &qnm_aggre1_noc, &srvc_aggre1_noc);
+DEFINE_QBCM(bcm_sn10, "SN10", false, &qnm_aggre2_noc, &srvc_aggre2_noc);
+DEFINE_QBCM(bcm_sn11, "SN11", false, &qnm_gladiator_sodv, &xm_gic);
+DEFINE_QBCM(bcm_sn13, "SN13", false, &qnm_memnoc);
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_qup0,
+ &bcm_sn8,
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_BLSP_1] = &qhm_qup1,
+ [MASTER_TSIF] = &qhm_tsif,
+ [MASTER_EMMC] = &xm_emmc,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static const struct qcom_icc_desc sdm670_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_qup0,
+ &bcm_sn10,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_BLSP_2] = &qhm_qup2,
+ [MASTER_CNOC_A2NOC] = &qnm_cnoc,
+ [MASTER_CRYPTO_CORE_0] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_USB3] = &xm_usb3_0,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static const struct qcom_icc_desc sdm670_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+ [MASTER_SPDM] = &qhm_spdm,
+ [MASTER_SNOC_CNOC] = &qnm_snoc,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DCC_CFG] = &qhs_dcc_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_EMMC_CFG] = &qhs_emmc_cfg,
+ [SLAVE_GLM] = &qhs_glm,
+ [SLAVE_GRAPHICS_3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_SOUTH_PHY_CFG] = &qhs_phy_refgen_south,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_BLSP_2] = &qhs_qupv3_north,
+ [SLAVE_BLSP_1] = &qhs_qupv3_south,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPDM_WRAPPER] = &qhs_spdm,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
+ [SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
+ [SLAVE_TSIF] = &qhs_tsif,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3] = &qhs_usb3_0,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+static const struct qcom_icc_desc sdm670_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+ [SLAVE_MEM_NOC_CFG] = &qhs_memnoc,
+};
+
+static const struct qcom_icc_desc sdm670_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const gladiator_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const gladiator_noc_nodes[] = {
+ [MASTER_AMPSS_M0] = &acm_l3,
+ [MASTER_GNOC_CFG] = &pm_gnoc_cfg,
+ [SLAVE_GNOC_SNOC] = &qns_gladiator_sodv,
+ [SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc,
+ [SLAVE_SERVICE_GNOC] = &srvc_gnoc,
+};
+
+static const struct qcom_icc_desc sdm670_gladiator_noc = {
+ .nodes = gladiator_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gladiator_noc_nodes),
+ .bcms = gladiator_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gladiator_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mem_noc_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ &bcm_sh0,
+ &bcm_sh1,
+ &bcm_sh2,
+ &bcm_sh3,
+ &bcm_sh5,
+};
+
+static struct qcom_icc_node * const mem_noc_nodes[] = {
+ [MASTER_TCU_0] = &acm_tcu,
+ [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg,
+ [MASTER_GNOC_MEM_NOC] = &qnm_apps,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_GRAPHICS_3D] = &qxm_gpu,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_MEM_NOC_GNOC] = &qns_apps_io,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc,
+ [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc,
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI_CH0] = &ebi,
+};
+
+static const struct qcom_icc_desc sdm670_mem_noc = {
+ .nodes = mem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mem_noc_nodes),
+ .bcms = mem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm2,
+ &bcm_mm3,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0,
+ [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_MDP_PORT0] = &qxm_mdp0,
+ [MASTER_MDP_PORT1] = &qxm_mdp1,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_P1] = &qxm_venus1,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct qcom_icc_desc sdm670_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_mm1,
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn10,
+ &bcm_sn11,
+ &bcm_sn13,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+ &bcm_sn5,
+ &bcm_sn8,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv,
+ [MASTER_MEM_NOC_SNOC] = &qnm_memnoc,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_SNOC_CNOC] = &qns_cnoc,
+ [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc,
+ [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf,
+ [SLAVE_OCIMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+};
+
+static const struct qcom_icc_desc sdm670_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sdm670-aggre1-noc",
+ .data = &sdm670_aggre1_noc},
+ { .compatible = "qcom,sdm670-aggre2-noc",
+ .data = &sdm670_aggre2_noc},
+ { .compatible = "qcom,sdm670-config-noc",
+ .data = &sdm670_config_noc},
+ { .compatible = "qcom,sdm670-dc-noc",
+ .data = &sdm670_dc_noc},
+ { .compatible = "qcom,sdm670-gladiator-noc",
+ .data = &sdm670_gladiator_noc},
+ { .compatible = "qcom,sdm670-mem-noc",
+ .data = &sdm670_mem_noc},
+ { .compatible = "qcom,sdm670-mmss-noc",
+ .data = &sdm670_mmss_noc},
+ { .compatible = "qcom,sdm670-system-noc",
+ .data = &sdm670_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sdm670",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_DESCRIPTION("Qualcomm SDM670 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sdm670.h b/drivers/interconnect/qcom/sdm670.h
new file mode 100644
index 000000000000..14155f244c43
--- /dev/null
+++ b/drivers/interconnect/qcom/sdm670.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm #define SDM670 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SDM670_H
+#define __DRIVERS_INTERCONNECT_QCOM_SDM670_H
+
+#define SDM670_MASTER_A1NOC_CFG 0
+#define SDM670_MASTER_A1NOC_SNOC 1
+#define SDM670_MASTER_A2NOC_CFG 2
+#define SDM670_MASTER_A2NOC_SNOC 3
+#define SDM670_MASTER_AMPSS_M0 4
+#define SDM670_MASTER_BLSP_1 5
+#define SDM670_MASTER_BLSP_2 6
+#define SDM670_MASTER_CAMNOC_HF0 7
+#define SDM670_MASTER_CAMNOC_HF0_UNCOMP 8
+#define SDM670_MASTER_CAMNOC_HF1 9
+#define SDM670_MASTER_CAMNOC_HF1_UNCOMP 10
+#define SDM670_MASTER_CAMNOC_SF 11
+#define SDM670_MASTER_CAMNOC_SF_UNCOMP 12
+#define SDM670_MASTER_CNOC_A2NOC 13
+#define SDM670_MASTER_CNOC_DC_NOC 14
+#define SDM670_MASTER_CNOC_MNOC_CFG 15
+#define SDM670_MASTER_CRYPTO_CORE_0 16
+#define SDM670_MASTER_EMMC 17
+#define SDM670_MASTER_GIC 18
+#define SDM670_MASTER_GNOC_CFG 19
+#define SDM670_MASTER_GNOC_MEM_NOC 20
+#define SDM670_MASTER_GNOC_SNOC 21
+#define SDM670_MASTER_GRAPHICS_3D 22
+#define SDM670_MASTER_IPA 23
+#define SDM670_MASTER_LLCC 24
+#define SDM670_MASTER_MDP_PORT0 25
+#define SDM670_MASTER_MDP_PORT1 26
+#define SDM670_MASTER_MEM_NOC_CFG 27
+#define SDM670_MASTER_MEM_NOC_SNOC 28
+#define SDM670_MASTER_MNOC_HF_MEM_NOC 29
+#define SDM670_MASTER_MNOC_SF_MEM_NOC 30
+#define SDM670_MASTER_PIMEM 31
+#define SDM670_MASTER_QDSS_BAM 32
+#define SDM670_MASTER_QDSS_ETR 33
+#define SDM670_MASTER_ROTATOR 34
+#define SDM670_MASTER_SDCC_2 35
+#define SDM670_MASTER_SDCC_4 36
+#define SDM670_MASTER_SNOC_CFG 37
+#define SDM670_MASTER_SNOC_CNOC 38
+#define SDM670_MASTER_SNOC_GC_MEM_NOC 39
+#define SDM670_MASTER_SNOC_SF_MEM_NOC 40
+#define SDM670_MASTER_SPDM 41
+#define SDM670_MASTER_TCU_0 42
+#define SDM670_MASTER_TSIF 43
+#define SDM670_MASTER_UFS_MEM 44
+#define SDM670_MASTER_USB3 45
+#define SDM670_MASTER_VIDEO_P0 46
+#define SDM670_MASTER_VIDEO_P1 47
+#define SDM670_MASTER_VIDEO_PROC 48
+#define SDM670_SLAVE_A1NOC_CFG 49
+#define SDM670_SLAVE_A1NOC_SNOC 50
+#define SDM670_SLAVE_A2NOC_CFG 51
+#define SDM670_SLAVE_A2NOC_SNOC 52
+#define SDM670_SLAVE_AOP 53
+#define SDM670_SLAVE_AOSS 54
+#define SDM670_SLAVE_APPSS 55
+#define SDM670_SLAVE_BLSP_1 56
+#define SDM670_SLAVE_BLSP_2 57
+#define SDM670_SLAVE_CAMERA_CFG 58
+#define SDM670_SLAVE_CAMNOC_UNCOMP 59
+#define SDM670_SLAVE_CDSP_CFG 60
+#define SDM670_SLAVE_CLK_CTL 61
+#define SDM670_SLAVE_CNOC_A2NOC 62
+#define SDM670_SLAVE_CNOC_DDRSS 63
+#define SDM670_SLAVE_CNOC_MNOC_CFG 64
+#define SDM670_SLAVE_CRYPTO_0_CFG 65
+#define SDM670_SLAVE_DCC_CFG 66
+#define SDM670_SLAVE_DISPLAY_CFG 67
+#define SDM670_SLAVE_EBI_CH0 68
+#define SDM670_SLAVE_EMMC_CFG 69
+#define SDM670_SLAVE_GLM 70
+#define SDM670_SLAVE_GNOC_MEM_NOC 71
+#define SDM670_SLAVE_GNOC_SNOC 72
+#define SDM670_SLAVE_GRAPHICS_3D_CFG 73
+#define SDM670_SLAVE_IMEM_CFG 74
+#define SDM670_SLAVE_IPA_CFG 75
+#define SDM670_SLAVE_LLCC 76
+#define SDM670_SLAVE_LLCC_CFG 77
+#define SDM670_SLAVE_MEM_NOC_CFG 78
+#define SDM670_SLAVE_MEM_NOC_GNOC 79
+#define SDM670_SLAVE_MEM_NOC_SNOC 80
+#define SDM670_SLAVE_MNOC_HF_MEM_NOC 81
+#define SDM670_SLAVE_MNOC_SF_MEM_NOC 82
+#define SDM670_SLAVE_MSS_PROC_MS_MPU_CFG 83
+#define SDM670_SLAVE_OCIMEM 84
+#define SDM670_SLAVE_PDM 85
+#define SDM670_SLAVE_PIMEM 86
+#define SDM670_SLAVE_PIMEM_CFG 87
+#define SDM670_SLAVE_PRNG 88
+#define SDM670_SLAVE_QDSS_CFG 89
+#define SDM670_SLAVE_QDSS_STM 90
+#define SDM670_SLAVE_RBCPR_CX_CFG 91
+#define SDM670_SLAVE_SDCC_2 92
+#define SDM670_SLAVE_SDCC_4 93
+#define SDM670_SLAVE_SERVICE_A1NOC 94
+#define SDM670_SLAVE_SERVICE_A2NOC 95
+#define SDM670_SLAVE_SERVICE_CNOC 96
+#define SDM670_SLAVE_SERVICE_GNOC 97
+#define SDM670_SLAVE_SERVICE_MEM_NOC 98
+#define SDM670_SLAVE_SERVICE_MNOC 99
+#define SDM670_SLAVE_SERVICE_SNOC 100
+#define SDM670_SLAVE_SNOC_CFG 101
+#define SDM670_SLAVE_SNOC_CNOC 102
+#define SDM670_SLAVE_SNOC_MEM_NOC_GC 103
+#define SDM670_SLAVE_SNOC_MEM_NOC_SF 104
+#define SDM670_SLAVE_SOUTH_PHY_CFG 105
+#define SDM670_SLAVE_SPDM_WRAPPER 106
+#define SDM670_SLAVE_TCSR 107
+#define SDM670_SLAVE_TCU 108
+#define SDM670_SLAVE_TLMM_NORTH 109
+#define SDM670_SLAVE_TLMM_SOUTH 110
+#define SDM670_SLAVE_TSIF 111
+#define SDM670_SLAVE_UFS_MEM_CFG 112
+#define SDM670_SLAVE_USB3 113
+#define SDM670_SLAVE_VENUS_CFG 114
+#define SDM670_SLAVE_VSENSE_CTRL_CFG 115
+
+#endif
diff --git a/drivers/interconnect/qcom/sdx55.h b/drivers/interconnect/qcom/sdx55.h
index deff8afe0631..46cbabec8aa1 100644
--- a/drivers/interconnect/qcom/sdx55.h
+++ b/drivers/interconnect/qcom/sdx55.h
@@ -6,7 +6,7 @@
#ifndef __DRIVERS_INTERCONNECT_QCOM_SDX55_H
#define __DRIVERS_INTERCONNECT_QCOM_SDX55_H
-#define SDX55_MASTER_IPA_CORE 0
+/* 0 was used by MASTER_IPA_CORE, now represented as RPMh clock */
#define SDX55_MASTER_LLCC 1
#define SDX55_MASTER_TCU_0 2
#define SDX55_MASTER_SNOC_GC_MEM_NOC 3
@@ -28,7 +28,7 @@
#define SDX55_MASTER_QDSS_ETR 19
#define SDX55_MASTER_SDCC_1 20
#define SDX55_MASTER_USB3 21
-#define SDX55_SLAVE_IPA_CORE 22
+/* 22 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
#define SDX55_SLAVE_EBI_CH0 23
#define SDX55_SLAVE_LLCC 24
#define SDX55_SLAVE_MEM_NOC_SNOC 25
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
index 1d04a4bfea80..c5ab29322164 100644
--- a/drivers/interconnect/qcom/sm8150.c
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -56,7 +56,6 @@ DEFINE_QNODE(qnm_pcie, SM8150_MASTER_GEM_NOC_PCIE_SNOC, 1, 16, SM8150_SLAVE_LLCC
DEFINE_QNODE(qnm_snoc_gc, SM8150_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM8150_SLAVE_LLCC);
DEFINE_QNODE(qnm_snoc_sf, SM8150_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM8150_SLAVE_LLCC);
DEFINE_QNODE(qxm_ecc, SM8150_MASTER_ECC, 2, 32, SM8150_SLAVE_LLCC);
-DEFINE_QNODE(ipa_core_master, SM8150_MASTER_IPA_CORE, 1, 8, SM8150_SLAVE_IPA_CORE);
DEFINE_QNODE(llcc_mc, SM8150_MASTER_LLCC, 4, 4, SM8150_SLAVE_EBI_CH0);
DEFINE_QNODE(qhm_mnoc_cfg, SM8150_MASTER_CNOC_MNOC_CFG, 1, 4, SM8150_SLAVE_SERVICE_MNOC);
DEFINE_QNODE(qxm_camnoc_hf0, SM8150_MASTER_CAMNOC_HF0, 1, 32, SM8150_SLAVE_MNOC_HF_MEM_NOC);
@@ -139,7 +138,6 @@ DEFINE_QNODE(qns_ecc, SM8150_SLAVE_ECC, 1, 32);
DEFINE_QNODE(qns_gem_noc_snoc, SM8150_SLAVE_GEM_NOC_SNOC, 1, 8, SM8150_MASTER_GEM_NOC_SNOC);
DEFINE_QNODE(qns_llcc, SM8150_SLAVE_LLCC, 4, 16, SM8150_MASTER_LLCC);
DEFINE_QNODE(srvc_gemnoc, SM8150_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(ipa_core_slave, SM8150_SLAVE_IPA_CORE, 1, 8);
DEFINE_QNODE(ebi, SM8150_SLAVE_EBI_CH0, 4, 4);
DEFINE_QNODE(qns2_mem_noc, SM8150_SLAVE_MNOC_SF_MEM_NOC, 1, 32, SM8150_MASTER_MNOC_SF_MEM_NOC);
DEFINE_QNODE(qns_mem_noc_hf, SM8150_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SM8150_MASTER_MNOC_HF_MEM_NOC);
@@ -172,7 +170,6 @@ DEFINE_QBCM(bcm_co0, "CO0", false, &qns_cdsp_mem_noc);
DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
DEFINE_QBCM(bcm_sn1, "SN1", false, &qxs_imem);
DEFINE_QBCM(bcm_co1, "CO1", false, &qnm_npu);
-DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
DEFINE_QBCM(bcm_cn0, "CN0", true, &qhm_spdm, &qnm_snoc, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_ahb2phy_south, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp, &qhs_cpr_cx, &qhs_cpr_mmcx, &qhs_cpr_mx, &qhs_crypto0_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_emac_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_npu_cfg, &qhs_pcie0_cfg, &qhs_pcie1_cfg, &qhs_phy_refgen_north, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qspi, &qhs_qupv3_east, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_ssc_cfg, &qhs_tcsr, &qhs_tlmm_east, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tlmm_west, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc);
DEFINE_QBCM(bcm_qup0, "QUP0", false, &qhm_qup0, &qhm_qup1, &qhm_qup2);
DEFINE_QBCM(bcm_sn2, "SN2", false, &qns_gemnoc_gc);
@@ -398,22 +395,6 @@ static const struct qcom_icc_desc sm8150_gem_noc = {
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
- &bcm_ip0,
-};
-
-static struct qcom_icc_node * const ipa_virt_nodes[] = {
- [MASTER_IPA_CORE] = &ipa_core_master,
- [SLAVE_IPA_CORE] = &ipa_core_slave,
-};
-
-static const struct qcom_icc_desc sm8150_ipa_virt = {
- .nodes = ipa_virt_nodes,
- .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
- .bcms = ipa_virt_bcms,
- .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
-};
-
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
@@ -517,8 +498,6 @@ static const struct of_device_id qnoc_of_match[] = {
.data = &sm8150_dc_noc},
{ .compatible = "qcom,sm8150-gem-noc",
.data = &sm8150_gem_noc},
- { .compatible = "qcom,sm8150-ipa-virt",
- .data = &sm8150_ipa_virt},
{ .compatible = "qcom,sm8150-mc-virt",
.data = &sm8150_mc_virt},
{ .compatible = "qcom,sm8150-mmss-noc",
diff --git a/drivers/interconnect/qcom/sm8150.h b/drivers/interconnect/qcom/sm8150.h
index 97996f64d799..023161681fb8 100644
--- a/drivers/interconnect/qcom/sm8150.h
+++ b/drivers/interconnect/qcom/sm8150.h
@@ -35,7 +35,7 @@
#define SM8150_MASTER_GPU_TCU 24
#define SM8150_MASTER_GRAPHICS_3D 25
#define SM8150_MASTER_IPA 26
-#define SM8150_MASTER_IPA_CORE 27
+/* 27 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
#define SM8150_MASTER_LLCC 28
#define SM8150_MASTER_MDP_PORT0 29
#define SM8150_MASTER_MDP_PORT1 30
@@ -94,7 +94,7 @@
#define SM8150_SLAVE_GRAPHICS_3D_CFG 83
#define SM8150_SLAVE_IMEM_CFG 84
#define SM8150_SLAVE_IPA_CFG 85
-#define SM8150_SLAVE_IPA_CORE 86
+/* 86 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
#define SM8150_SLAVE_LLCC 87
#define SM8150_SLAVE_LLCC_CFG 88
#define SM8150_SLAVE_MNOC_HF_MEM_NOC 89
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index 5cdb058fa095..e3bb008cb219 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -51,7 +51,6 @@ DEFINE_QNODE(qnm_mnoc_sf, SM8250_MASTER_MNOC_SF_MEM_NOC, 2, 32, SM8250_SLAVE_LLC
DEFINE_QNODE(qnm_pcie, SM8250_MASTER_ANOC_PCIE_GEM_NOC, 1, 16, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC);
DEFINE_QNODE(qnm_snoc_gc, SM8250_MASTER_SNOC_GC_MEM_NOC, 1, 8, SM8250_SLAVE_LLCC);
DEFINE_QNODE(qnm_snoc_sf, SM8250_MASTER_SNOC_SF_MEM_NOC, 1, 16, SM8250_SLAVE_LLCC, SM8250_SLAVE_GEM_NOC_SNOC, SM8250_SLAVE_MEM_NOC_PCIE_SNOC);
-DEFINE_QNODE(ipa_core_master, SM8250_MASTER_IPA_CORE, 1, 8, SM8250_SLAVE_IPA_CORE);
DEFINE_QNODE(llcc_mc, SM8250_MASTER_LLCC, 4, 4, SM8250_SLAVE_EBI_CH0);
DEFINE_QNODE(qhm_mnoc_cfg, SM8250_MASTER_CNOC_MNOC_CFG, 1, 4, SM8250_SLAVE_SERVICE_MNOC);
DEFINE_QNODE(qnm_camnoc_hf, SM8250_MASTER_CAMNOC_HF, 2, 32, SM8250_SLAVE_MNOC_HF_MEM_NOC);
@@ -138,7 +137,6 @@ DEFINE_QNODE(qns_sys_pcie, SM8250_SLAVE_MEM_NOC_PCIE_SNOC, 1, 8, SM8250_MASTER_G
DEFINE_QNODE(srvc_even_gemnoc, SM8250_SLAVE_SERVICE_GEM_NOC_1, 1, 4);
DEFINE_QNODE(srvc_odd_gemnoc, SM8250_SLAVE_SERVICE_GEM_NOC_2, 1, 4);
DEFINE_QNODE(srvc_sys_gemnoc, SM8250_SLAVE_SERVICE_GEM_NOC, 1, 4);
-DEFINE_QNODE(ipa_core_slave, SM8250_SLAVE_IPA_CORE, 1, 8);
DEFINE_QNODE(ebi, SM8250_SLAVE_EBI_CH0, 4, 4);
DEFINE_QNODE(qns_mem_noc_hf, SM8250_SLAVE_MNOC_HF_MEM_NOC, 2, 32, SM8250_MASTER_MNOC_HF_MEM_NOC);
DEFINE_QNODE(qns_mem_noc_sf, SM8250_SLAVE_MNOC_SF_MEM_NOC, 2, 32, SM8250_MASTER_MNOC_SF_MEM_NOC);
@@ -171,7 +169,6 @@ DEFINE_QBCM(bcm_mc0, "MC0", true, &ebi);
DEFINE_QBCM(bcm_sh0, "SH0", true, &qns_llcc);
DEFINE_QBCM(bcm_mm0, "MM0", true, &qns_mem_noc_hf);
DEFINE_QBCM(bcm_ce0, "CE0", false, &qxm_crypto);
-DEFINE_QBCM(bcm_ip0, "IP0", false, &ipa_core_slave);
DEFINE_QBCM(bcm_mm1, "MM1", false, &qnm_camnoc_hf, &qxm_mdp0, &qxm_mdp1);
DEFINE_QBCM(bcm_sh2, "SH2", false, &alm_gpu_tcu, &alm_sys_tcu);
DEFINE_QBCM(bcm_mm2, "MM2", false, &qns_mem_noc_sf);
@@ -386,22 +383,6 @@ static const struct qcom_icc_desc sm8250_gem_noc = {
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm * const ipa_virt_bcms[] = {
- &bcm_ip0,
-};
-
-static struct qcom_icc_node * const ipa_virt_nodes[] = {
- [MASTER_IPA_CORE] = &ipa_core_master,
- [SLAVE_IPA_CORE] = &ipa_core_slave,
-};
-
-static const struct qcom_icc_desc sm8250_ipa_virt = {
- .nodes = ipa_virt_nodes,
- .num_nodes = ARRAY_SIZE(ipa_virt_nodes),
- .bcms = ipa_virt_bcms,
- .num_bcms = ARRAY_SIZE(ipa_virt_bcms),
-};
-
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
@@ -531,8 +512,6 @@ static const struct of_device_id qnoc_of_match[] = {
.data = &sm8250_dc_noc},
{ .compatible = "qcom,sm8250-gem-noc",
.data = &sm8250_gem_noc},
- { .compatible = "qcom,sm8250-ipa-virt",
- .data = &sm8250_ipa_virt},
{ .compatible = "qcom,sm8250-mc-virt",
.data = &sm8250_mc_virt},
{ .compatible = "qcom,sm8250-mmss-noc",
diff --git a/drivers/interconnect/qcom/sm8250.h b/drivers/interconnect/qcom/sm8250.h
index b31fb431a20f..e3fc56bc7ca0 100644
--- a/drivers/interconnect/qcom/sm8250.h
+++ b/drivers/interconnect/qcom/sm8250.h
@@ -31,7 +31,7 @@
#define SM8250_MASTER_GPU_TCU 20
#define SM8250_MASTER_GRAPHICS_3D 21
#define SM8250_MASTER_IPA 22
-#define SM8250_MASTER_IPA_CORE 23
+/* 23 was used by MASTER_IPA_CORE, now represented as RPMh clock */
#define SM8250_MASTER_LLCC 24
#define SM8250_MASTER_MDP_PORT0 25
#define SM8250_MASTER_MDP_PORT1 26
@@ -92,7 +92,7 @@
#define SM8250_SLAVE_GRAPHICS_3D_CFG 81
#define SM8250_SLAVE_IMEM_CFG 82
#define SM8250_SLAVE_IPA_CFG 83
-#define SM8250_SLAVE_IPA_CORE 84
+/* 84 was used by SLAVE_IPA_CORE, now represented as RPMh clock */
#define SM8250_SLAVE_IPC_ROUTER_CFG 85
#define SM8250_SLAVE_ISENSE_CFG 86
#define SM8250_SLAVE_LLCC 87
diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
new file mode 100644
index 000000000000..54fa027ab961
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8550.c
@@ -0,0 +1,2318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sm8550-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-common.h"
+#include "icc-rpmh.h"
+#include "sm8550.h"
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SM8550_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SM8550_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SM8550_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM8550_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM8550_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM8550_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SM8550_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM8550_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM8550_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .id = SM8550_MASTER_SP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .id = SM8550_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .id = SM8550_MASTER_QDSS_ETR_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM8550_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SM8550_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SM8550_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .id = SM8550_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qsm_cfg = {
+ .name = "qsm_cfg",
+ .id = SM8550_MASTER_CNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 44,
+ .links = { SM8550_SLAVE_AHB2PHY_SOUTH, SM8550_SLAVE_AHB2PHY_NORTH,
+ SM8550_SLAVE_APPSS, SM8550_SLAVE_CAMERA_CFG,
+ SM8550_SLAVE_CLK_CTL, SM8550_SLAVE_RBCPR_CX_CFG,
+ SM8550_SLAVE_RBCPR_MMCX_CFG, SM8550_SLAVE_RBCPR_MXA_CFG,
+ SM8550_SLAVE_RBCPR_MXC_CFG, SM8550_SLAVE_CPR_NSPCX,
+ SM8550_SLAVE_CRYPTO_0_CFG, SM8550_SLAVE_CX_RDPM,
+ SM8550_SLAVE_DISPLAY_CFG, SM8550_SLAVE_GFX3D_CFG,
+ SM8550_SLAVE_I2C, SM8550_SLAVE_IMEM_CFG,
+ SM8550_SLAVE_IPA_CFG, SM8550_SLAVE_IPC_ROUTER_CFG,
+ SM8550_SLAVE_CNOC_MSS, SM8550_SLAVE_MX_RDPM,
+ SM8550_SLAVE_PCIE_0_CFG, SM8550_SLAVE_PCIE_1_CFG,
+ SM8550_SLAVE_PDM, SM8550_SLAVE_PIMEM_CFG,
+ SM8550_SLAVE_PRNG, SM8550_SLAVE_QDSS_CFG,
+ SM8550_SLAVE_QSPI_0, SM8550_SLAVE_QUP_1,
+ SM8550_SLAVE_QUP_2, SM8550_SLAVE_SDCC_2,
+ SM8550_SLAVE_SDCC_4, SM8550_SLAVE_SPSS_CFG,
+ SM8550_SLAVE_TCSR, SM8550_SLAVE_TLMM,
+ SM8550_SLAVE_UFS_MEM_CFG, SM8550_SLAVE_USB3_0,
+ SM8550_SLAVE_VENUS_CFG, SM8550_SLAVE_VSENSE_CTRL_CFG,
+ SM8550_SLAVE_LPASS_QTB_CFG, SM8550_SLAVE_CNOC_MNOC_CFG,
+ SM8550_SLAVE_NSP_QTB_CFG, SM8550_SLAVE_PCIE_ANOC_CFG,
+ SM8550_SLAVE_QDSS_STM, SM8550_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SM8550_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 6,
+ .links = { SM8550_SLAVE_AOSS, SM8550_SLAVE_TME_CFG,
+ SM8550_SLAVE_CNOC_CFG, SM8550_SLAVE_DDRSS_CFG,
+ SM8550_SLAVE_BOOT_IMEM, SM8550_SLAVE_IMEM },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SM8550_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8550_SLAVE_PCIE_0, SM8550_SLAVE_PCIE_1 },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SM8550_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SM8550_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SM8550_MASTER_APPSS_PROC,
+ .channels = 3,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
+ SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SM8550_MASTER_GFX3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_lpass_gemnoc = {
+ .name = "qnm_lpass_gemnoc",
+ .id = SM8550_MASTER_LPASS_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
+ SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mdsp = {
+ .name = "qnm_mdsp",
+ .id = SM8550_MASTER_MSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
+ SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM8550_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM8550_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_nsp_gemnoc = {
+ .name = "qnm_nsp_gemnoc",
+ .id = SM8550_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SM8550_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM8550_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8550_SLAVE_GEM_NOC_CNOC, SM8550_SLAVE_LLCC,
+ SM8550_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_lpiaon_noc = {
+ .name = "qnm_lpiaon_noc",
+ .id = SM8550_MASTER_LPIAON_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qnm_lpass_lpinoc = {
+ .name = "qnm_lpass_lpinoc",
+ .id = SM8550_MASTER_LPASS_LPINOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qxm_lpinoc_dsp_axim = {
+ .name = "qxm_lpinoc_dsp_axim",
+ .id = SM8550_MASTER_LPASS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LPICX_NOC_LPIAON_NOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM8550_MASTER_LLCC,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = SM8550_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+ .name = "qnm_camnoc_icp",
+ .id = SM8550_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .id = SM8550_MASTER_CAMNOC_SF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+ .name = "qnm_mdp",
+ .id = SM8550_MASTER_MDP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_vapss_hcp = {
+ .name = "qnm_vapss_hcp",
+ .id = SM8550_MASTER_CDSP_HCP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video = {
+ .name = "qnm_video",
+ .id = SM8550_MASTER_VIDEO,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+ .name = "qnm_video_cv_cpu",
+ .id = SM8550_MASTER_VIDEO_CV_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+ .name = "qnm_video_cvp",
+ .id = SM8550_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .id = SM8550_MASTER_VIDEO_V_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_mnoc_cfg = {
+ .name = "qsm_mnoc_cfg",
+ .id = SM8550_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qxm_nsp = {
+ .name = "qxm_nsp",
+ .id = SM8550_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qsm_pcie_anoc_cfg = {
+ .name = "qsm_pcie_anoc_cfg",
+ .id = SM8550_MASTER_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_SERVICE_PCIE_ANOC },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SM8550_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SM8550_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qhm_gic = {
+ .name = "qhm_gic",
+ .id = SM8550_MASTER_GIC_AHB,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM8550_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM8550_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM8550_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf_disp = {
+ .name = "qnm_mnoc_hf_disp",
+ .id = SM8550_MASTER_MNOC_HF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node qnm_pcie_disp = {
+ .name = "qnm_pcie_disp",
+ .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_DISP,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node llcc_mc_disp = {
+ .name = "llcc_mc_disp",
+ .id = SM8550_MASTER_LLCC_DISP,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_EBI1_DISP },
+};
+
+static struct qcom_icc_node qnm_mdp_disp = {
+ .name = "qnm_mdp_disp",
+ .id = SM8550_MASTER_MDP_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf_cam_ife_0 = {
+ .name = "qnm_mnoc_hf_cam_ife_0",
+ .id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf_cam_ife_0 = {
+ .name = "qnm_mnoc_sf_cam_ife_0",
+ .id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
+};
+
+static struct qcom_icc_node qnm_pcie_cam_ife_0 = {
+ .name = "qnm_pcie_cam_ife_0",
+ .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
+};
+
+static struct qcom_icc_node llcc_mc_cam_ife_0 = {
+ .name = "llcc_mc_cam_ife_0",
+ .id = SM8550_MASTER_LLCC_CAM_IFE_0,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_EBI1_CAM_IFE_0 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf_cam_ife_0 = {
+ .name = "qnm_camnoc_hf_cam_ife_0",
+ .id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_0,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0 },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp_cam_ife_0 = {
+ .name = "qnm_camnoc_icp_cam_ife_0",
+ .id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf_cam_ife_0 = {
+ .name = "qnm_camnoc_sf_cam_ife_0",
+ .id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_0,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf_cam_ife_1 = {
+ .name = "qnm_mnoc_hf_cam_ife_1",
+ .id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf_cam_ife_1 = {
+ .name = "qnm_mnoc_sf_cam_ife_1",
+ .id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
+};
+
+static struct qcom_icc_node qnm_pcie_cam_ife_1 = {
+ .name = "qnm_pcie_cam_ife_1",
+ .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
+};
+
+static struct qcom_icc_node llcc_mc_cam_ife_1 = {
+ .name = "llcc_mc_cam_ife_1",
+ .id = SM8550_MASTER_LLCC_CAM_IFE_1,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_EBI1_CAM_IFE_1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf_cam_ife_1 = {
+ .name = "qnm_camnoc_hf_cam_ife_1",
+ .id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_1,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp_cam_ife_1 = {
+ .name = "qnm_camnoc_icp_cam_ife_1",
+ .id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf_cam_ife_1 = {
+ .name = "qnm_camnoc_sf_cam_ife_1",
+ .id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_1,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf_cam_ife_2 = {
+ .name = "qnm_mnoc_hf_cam_ife_2",
+ .id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf_cam_ife_2 = {
+ .name = "qnm_mnoc_sf_cam_ife_2",
+ .id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
+};
+
+static struct qcom_icc_node qnm_pcie_cam_ife_2 = {
+ .name = "qnm_pcie_cam_ife_2",
+ .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
+};
+
+static struct qcom_icc_node llcc_mc_cam_ife_2 = {
+ .name = "llcc_mc_cam_ife_2",
+ .id = SM8550_MASTER_LLCC_CAM_IFE_2,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_EBI1_CAM_IFE_2 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf_cam_ife_2 = {
+ .name = "qnm_camnoc_hf_cam_ife_2",
+ .id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_2,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2 },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp_cam_ife_2 = {
+ .name = "qnm_camnoc_icp_cam_ife_2",
+ .id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf_cam_ife_2 = {
+ .name = "qnm_camnoc_sf_cam_ife_2",
+ .id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_2,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM8550_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM8550_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SM8550_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SM8550_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .id = SM8550_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SM8550_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SM8550_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SM8550_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM8550_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM8550_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SM8550_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+ .name = "qhs_cpr_mmcx",
+ .id = SM8550_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mxa = {
+ .name = "qhs_cpr_mxa",
+ .id = SM8550_SLAVE_RBCPR_MXA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mxc = {
+ .name = "qhs_cpr_mxc",
+ .id = SM8550_SLAVE_RBCPR_MXC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_nspcx = {
+ .name = "qhs_cpr_nspcx",
+ .id = SM8550_SLAVE_CPR_NSPCX,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM8550_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+ .name = "qhs_cx_rdpm",
+ .id = SM8550_SLAVE_CX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM8550_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM8550_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_i2c = {
+ .name = "qhs_i2c",
+ .id = SM8550_SLAVE_I2C,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM8550_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM8550_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SM8550_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SM8550_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mx_rdpm = {
+ .name = "qhs_mx_rdpm",
+ .id = SM8550_SLAVE_MX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SM8550_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SM8550_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SM8550_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SM8550_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SM8550_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM8550_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SM8550_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SM8550_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = SM8550_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM8550_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SM8550_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .id = SM8550_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM8550_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SM8550_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM8550_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM8550_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM8550_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM8550_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_lpass_qtb_cfg = {
+ .name = "qss_lpass_qtb_cfg",
+ .id = SM8550_SLAVE_LPASS_QTB_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_mnoc_cfg = {
+ .name = "qss_mnoc_cfg",
+ .id = SM8550_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qss_nsp_qtb_cfg = {
+ .name = "qss_nsp_qtb_cfg",
+ .id = SM8550_SLAVE_NSP_QTB_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_pcie_anoc_cfg = {
+ .name = "qss_pcie_anoc_cfg",
+ .id = SM8550_SLAVE_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_MASTER_PCIE_ANOC_CFG },
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM8550_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM8550_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM8550_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .id = SM8550_SLAVE_TME_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qss_cfg = {
+ .name = "qss_cfg",
+ .id = SM8550_SLAVE_CNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8550_MASTER_CNOC_CFG },
+};
+
+static struct qcom_icc_node qss_ddrss_cfg = {
+ .name = "qss_ddrss_cfg",
+ .id = SM8550_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+ .name = "qxs_boot_imem",
+ .id = SM8550_SLAVE_BOOT_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM8550_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SM8550_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SM8550_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = SM8550_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM8550_SLAVE_LLCC,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SM8550_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
+ .name = "qns_lpass_ag_noc_gemnoc",
+ .id = SM8550_SLAVE_LPASS_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_LPASS_GEM_NOC },
+};
+
+static struct qcom_icc_node qns_lpass_aggnoc = {
+ .name = "qns_lpass_aggnoc",
+ .id = SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_LPIAON_NOC },
+};
+
+static struct qcom_icc_node qns_lpi_aon_noc = {
+ .name = "qns_lpi_aon_noc",
+ .id = SM8550_SLAVE_LPICX_NOC_LPIAON_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_LPASS_LPINOC },
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM8550_SLAVE_EBI1,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM8550_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SM8550_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM8550_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = SM8550_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = SM8550_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_pcie_aggre_noc = {
+ .name = "srvc_pcie_aggre_noc",
+ .id = SM8550_SLAVE_SERVICE_PCIE_ANOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SM8550_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8550_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM8550_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_llcc_disp = {
+ .name = "qns_llcc_disp",
+ .id = SM8550_SLAVE_LLCC_DISP,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_LLCC_DISP },
+};
+
+static struct qcom_icc_node ebi_disp = {
+ .name = "ebi_disp",
+ .id = SM8550_SLAVE_EBI1_DISP,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf_disp = {
+ .name = "qns_mem_noc_hf_disp",
+ .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qns_llcc_cam_ife_0 = {
+ .name = "qns_llcc_cam_ife_0",
+ .id = SM8550_SLAVE_LLCC_CAM_IFE_0,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_LLCC_CAM_IFE_0 },
+};
+
+static struct qcom_icc_node ebi_cam_ife_0 = {
+ .name = "ebi_cam_ife_0",
+ .id = SM8550_SLAVE_EBI1_CAM_IFE_0,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf_cam_ife_0 = {
+ .name = "qns_mem_noc_hf_cam_ife_0",
+ .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0 },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf_cam_ife_0 = {
+ .name = "qns_mem_noc_sf_cam_ife_0",
+ .id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0 },
+};
+
+static struct qcom_icc_node qns_llcc_cam_ife_1 = {
+ .name = "qns_llcc_cam_ife_1",
+ .id = SM8550_SLAVE_LLCC_CAM_IFE_1,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_LLCC_CAM_IFE_1 },
+};
+
+static struct qcom_icc_node ebi_cam_ife_1 = {
+ .name = "ebi_cam_ife_1",
+ .id = SM8550_SLAVE_EBI1_CAM_IFE_1,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf_cam_ife_1 = {
+ .name = "qns_mem_noc_hf_cam_ife_1",
+ .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1 },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf_cam_ife_1 = {
+ .name = "qns_mem_noc_sf_cam_ife_1",
+ .id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1 },
+};
+
+static struct qcom_icc_node qns_llcc_cam_ife_2 = {
+ .name = "qns_llcc_cam_ife_2",
+ .id = SM8550_SLAVE_LLCC_CAM_IFE_2,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8550_MASTER_LLCC_CAM_IFE_2 },
+};
+
+static struct qcom_icc_node ebi_cam_ife_2 = {
+ .name = "ebi_cam_ife_2",
+ .id = SM8550_SLAVE_EBI1_CAM_IFE_2,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf_cam_ife_2 = {
+ .name = "qns_mem_noc_hf_cam_ife_2",
+ .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2 },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf_cam_ife_2 = {
+ .name = "qns_mem_noc_sf_cam_ife_2",
+ .id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2 },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 54,
+ .nodes = { &qsm_cfg, &qhs_ahb2phy0,
+ &qhs_ahb2phy1, &qhs_apss,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_cpr_cx, &qhs_cpr_mmcx,
+ &qhs_cpr_mxa, &qhs_cpr_mxc,
+ &qhs_cpr_nspcx, &qhs_crypto0_cfg,
+ &qhs_cx_rdpm, &qhs_gpuss_cfg,
+ &qhs_i2c, &qhs_imem_cfg,
+ &qhs_ipa, &qhs_ipc_router,
+ &qhs_mss_cfg, &qhs_mx_rdpm,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pdm, &qhs_pimem_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup1,
+ &qhs_qup2, &qhs_sdc2,
+ &qhs_sdc4, &qhs_spss_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb3_0,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qss_lpass_qtb_cfg, &qss_mnoc_cfg,
+ &qss_nsp_qtb_cfg, &qss_pcie_anoc_cfg,
+ &xs_qdss_stm, &xs_sys_tcu_cfg,
+ &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie,
+ &qhs_aoss, &qhs_tme_cfg,
+ &qss_cfg, &qss_ddrss_cfg,
+ &qxs_boot_imem, &qxs_imem,
+ &xs_pcie_0, &xs_pcie_1 },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 1,
+ .nodes = { &qhs_display_cfg },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .num_nodes = 2,
+ .nodes = { &qxm_nsp, &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_lp0 = {
+ .name = "LP0",
+ .num_nodes = 2,
+ .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .num_nodes = 8,
+ .nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp,
+ &qnm_camnoc_sf, &qnm_vapss_hcp,
+ &qnm_video_cv_cpu, &qnm_video_cvp,
+ &qnm_video_v_cpu, &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .num_nodes = 13,
+ .nodes = { &alm_gpu_tcu, &alm_sys_tcu,
+ &chm_apps, &qnm_gpu,
+ &qnm_mdsp, &qnm_mnoc_hf,
+ &qnm_mnoc_sf, &qnm_nsp_gemnoc,
+ &qnm_pcie, &qnm_snoc_gc,
+ &qnm_snoc_sf, &qns_gem_noc_cnoc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .num_nodes = 3,
+ .nodes = { &qhm_gic, &xm_gic,
+ &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn7 = {
+ .name = "SN7",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_acv_disp = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mc0_disp = {
+ .name = "MC0",
+ .num_nodes = 1,
+ .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm0_disp = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf_disp },
+};
+
+static struct qcom_icc_bcm bcm_sh0_disp = {
+ .name = "SH0",
+ .num_nodes = 1,
+ .nodes = { &qns_llcc_disp },
+};
+
+static struct qcom_icc_bcm bcm_sh1_disp = {
+ .name = "SH1",
+ .num_nodes = 2,
+ .nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp },
+};
+
+static struct qcom_icc_bcm bcm_acv_cam_ife_0 = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi_cam_ife_0 },
+};
+
+static struct qcom_icc_bcm bcm_mc0_cam_ife_0 = {
+ .name = "MC0",
+ .num_nodes = 1,
+ .nodes = { &ebi_cam_ife_0 },
+};
+
+static struct qcom_icc_bcm bcm_mm0_cam_ife_0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf_cam_ife_0 },
+};
+
+static struct qcom_icc_bcm bcm_mm1_cam_ife_0 = {
+ .name = "MM1",
+ .num_nodes = 4,
+ .nodes = { &qnm_camnoc_hf_cam_ife_0, &qnm_camnoc_icp_cam_ife_0,
+ &qnm_camnoc_sf_cam_ife_0, &qns_mem_noc_sf_cam_ife_0 },
+};
+
+static struct qcom_icc_bcm bcm_sh0_cam_ife_0 = {
+ .name = "SH0",
+ .num_nodes = 1,
+ .nodes = { &qns_llcc_cam_ife_0 },
+};
+
+static struct qcom_icc_bcm bcm_sh1_cam_ife_0 = {
+ .name = "SH1",
+ .num_nodes = 3,
+ .nodes = { &qnm_mnoc_hf_cam_ife_0, &qnm_mnoc_sf_cam_ife_0,
+ &qnm_pcie_cam_ife_0 },
+};
+
+static struct qcom_icc_bcm bcm_acv_cam_ife_1 = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi_cam_ife_1 },
+};
+
+static struct qcom_icc_bcm bcm_mc0_cam_ife_1 = {
+ .name = "MC0",
+ .num_nodes = 1,
+ .nodes = { &ebi_cam_ife_1 },
+};
+
+static struct qcom_icc_bcm bcm_mm0_cam_ife_1 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf_cam_ife_1 },
+};
+
+static struct qcom_icc_bcm bcm_mm1_cam_ife_1 = {
+ .name = "MM1",
+ .num_nodes = 4,
+ .nodes = { &qnm_camnoc_hf_cam_ife_1, &qnm_camnoc_icp_cam_ife_1,
+ &qnm_camnoc_sf_cam_ife_1, &qns_mem_noc_sf_cam_ife_1 },
+};
+
+static struct qcom_icc_bcm bcm_sh0_cam_ife_1 = {
+ .name = "SH0",
+ .num_nodes = 1,
+ .nodes = { &qns_llcc_cam_ife_1 },
+};
+
+static struct qcom_icc_bcm bcm_sh1_cam_ife_1 = {
+ .name = "SH1",
+ .num_nodes = 3,
+ .nodes = { &qnm_mnoc_hf_cam_ife_1, &qnm_mnoc_sf_cam_ife_1,
+ &qnm_pcie_cam_ife_1 },
+};
+
+static struct qcom_icc_bcm bcm_acv_cam_ife_2 = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi_cam_ife_2 },
+};
+
+static struct qcom_icc_bcm bcm_mc0_cam_ife_2 = {
+ .name = "MC0",
+ .num_nodes = 1,
+ .nodes = { &ebi_cam_ife_2 },
+};
+
+static struct qcom_icc_bcm bcm_mm0_cam_ife_2 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf_cam_ife_2 },
+};
+
+static struct qcom_icc_bcm bcm_mm1_cam_ife_2 = {
+ .name = "MM1",
+ .num_nodes = 4,
+ .nodes = { &qnm_camnoc_hf_cam_ife_2, &qnm_camnoc_icp_cam_ife_2,
+ &qnm_camnoc_sf_cam_ife_2, &qns_mem_noc_sf_cam_ife_2 },
+};
+
+static struct qcom_icc_bcm bcm_sh0_cam_ife_2 = {
+ .name = "SH0",
+ .num_nodes = 1,
+ .nodes = { &qns_llcc_cam_ife_2 },
+};
+
+static struct qcom_icc_bcm bcm_sh1_cam_ife_2 = {
+ .name = "SH1",
+ .num_nodes = 3,
+ .nodes = { &qnm_mnoc_hf_cam_ife_2, &qnm_mnoc_sf_cam_ife_2,
+ &qnm_pcie_cam_ife_2 },
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+};
+
+static const struct qcom_icc_desc sm8550_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct qcom_icc_desc sm8550_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc sm8550_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+ [MASTER_CNOC_CFG] = &qsm_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+ [SLAVE_RBCPR_MXA_CFG] = &qhs_cpr_mxa,
+ [SLAVE_RBCPR_MXC_CFG] = &qhs_cpr_mxc,
+ [SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_I2C] = &qhs_i2c,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_LPASS_QTB_CFG] = &qss_lpass_qtb_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
+ [SLAVE_NSP_QTB_CFG] = &qss_nsp_qtb_cfg,
+ [SLAVE_PCIE_ANOC_CFG] = &qss_pcie_anoc_cfg,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sm8550_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const cnoc_main_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_CNOC_CFG] = &qss_cfg,
+ [SLAVE_DDRSS_CFG] = &qss_ddrss_cfg,
+ [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+};
+
+static const struct qcom_icc_desc sm8550_cnoc_main = {
+ .nodes = cnoc_main_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_main_nodes),
+ .bcms = cnoc_main_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_main_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+ &bcm_sh0_disp,
+ &bcm_sh1_disp,
+ &bcm_sh0_cam_ife_0,
+ &bcm_sh1_cam_ife_0,
+ &bcm_sh0_cam_ife_1,
+ &bcm_sh1_cam_ife_1,
+ &bcm_sh0_cam_ife_2,
+ &bcm_sh1_cam_ife_2,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_LPASS_GEM_NOC] = &qnm_lpass_gemnoc,
+ [MASTER_MSS_PROC] = &qnm_mdsp,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+ [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
+ [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp,
+ [SLAVE_LLCC_DISP] = &qns_llcc_disp,
+ [MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0] = &qnm_mnoc_hf_cam_ife_0,
+ [MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0] = &qnm_mnoc_sf_cam_ife_0,
+ [MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0] = &qnm_pcie_cam_ife_0,
+ [SLAVE_LLCC_CAM_IFE_0] = &qns_llcc_cam_ife_0,
+ [MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1] = &qnm_mnoc_hf_cam_ife_1,
+ [MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1] = &qnm_mnoc_sf_cam_ife_1,
+ [MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1] = &qnm_pcie_cam_ife_1,
+ [SLAVE_LLCC_CAM_IFE_1] = &qns_llcc_cam_ife_1,
+ [MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2] = &qnm_mnoc_hf_cam_ife_2,
+ [MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2] = &qnm_mnoc_sf_cam_ife_2,
+ [MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2] = &qnm_pcie_cam_ife_2,
+ [SLAVE_LLCC_CAM_IFE_2] = &qns_llcc_cam_ife_2,
+};
+
+static const struct qcom_icc_desc sm8550_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+ [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
+ [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
+};
+
+static const struct qcom_icc_desc sm8550_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+ .bcms = lpass_ag_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
+ &bcm_lp0,
+};
+
+static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
+ [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
+ [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
+};
+
+static const struct qcom_icc_desc sm8550_lpass_lpiaon_noc = {
+ .nodes = lpass_lpiaon_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
+ .bcms = lpass_lpiaon_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const lpass_lpicx_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
+ [MASTER_LPASS_PROC] = &qxm_lpinoc_dsp_axim,
+ [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
+};
+
+static const struct qcom_icc_desc sm8550_lpass_lpicx_noc = {
+ .nodes = lpass_lpicx_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
+ .bcms = lpass_lpicx_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_lpicx_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ &bcm_acv_disp,
+ &bcm_mc0_disp,
+ &bcm_acv_cam_ife_0,
+ &bcm_mc0_cam_ife_0,
+ &bcm_acv_cam_ife_1,
+ &bcm_mc0_cam_ife_1,
+ &bcm_acv_cam_ife_2,
+ &bcm_mc0_cam_ife_2,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+ [MASTER_LLCC_DISP] = &llcc_mc_disp,
+ [SLAVE_EBI1_DISP] = &ebi_disp,
+ [MASTER_LLCC_CAM_IFE_0] = &llcc_mc_cam_ife_0,
+ [SLAVE_EBI1_CAM_IFE_0] = &ebi_cam_ife_0,
+ [MASTER_LLCC_CAM_IFE_1] = &llcc_mc_cam_ife_1,
+ [SLAVE_EBI1_CAM_IFE_1] = &ebi_cam_ife_1,
+ [MASTER_LLCC_CAM_IFE_2] = &llcc_mc_cam_ife_2,
+ [SLAVE_EBI1_CAM_IFE_2] = &ebi_cam_ife_2,
+};
+
+static const struct qcom_icc_desc sm8550_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm0_disp,
+ &bcm_mm0_cam_ife_0,
+ &bcm_mm1_cam_ife_0,
+ &bcm_mm0_cam_ife_1,
+ &bcm_mm1_cam_ife_1,
+ &bcm_mm0_cam_ife_2,
+ &bcm_mm1_cam_ife_2,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_MDP] = &qnm_mdp,
+ [MASTER_CDSP_HCP] = &qnm_vapss_hcp,
+ [MASTER_VIDEO] = &qnm_video,
+ [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+ [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+ [MASTER_MDP_DISP] = &qnm_mdp_disp,
+ [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
+ [MASTER_CAMNOC_HF_CAM_IFE_0] = &qnm_camnoc_hf_cam_ife_0,
+ [MASTER_CAMNOC_ICP_CAM_IFE_0] = &qnm_camnoc_icp_cam_ife_0,
+ [MASTER_CAMNOC_SF_CAM_IFE_0] = &qnm_camnoc_sf_cam_ife_0,
+ [SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0] = &qns_mem_noc_hf_cam_ife_0,
+ [SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0] = &qns_mem_noc_sf_cam_ife_0,
+ [MASTER_CAMNOC_HF_CAM_IFE_1] = &qnm_camnoc_hf_cam_ife_1,
+ [MASTER_CAMNOC_ICP_CAM_IFE_1] = &qnm_camnoc_icp_cam_ife_1,
+ [MASTER_CAMNOC_SF_CAM_IFE_1] = &qnm_camnoc_sf_cam_ife_1,
+ [SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1] = &qns_mem_noc_hf_cam_ife_1,
+ [SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1] = &qns_mem_noc_sf_cam_ife_1,
+ [MASTER_CAMNOC_HF_CAM_IFE_2] = &qnm_camnoc_hf_cam_ife_2,
+ [MASTER_CAMNOC_ICP_CAM_IFE_2] = &qnm_camnoc_icp_cam_ife_2,
+ [MASTER_CAMNOC_SF_CAM_IFE_2] = &qnm_camnoc_sf_cam_ife_2,
+ [SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2] = &qns_mem_noc_hf_cam_ife_2,
+ [SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2] = &qns_mem_noc_sf_cam_ife_2,
+};
+
+static const struct qcom_icc_desc sm8550_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
+ &bcm_co0,
+};
+
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
+ [MASTER_CDSP_PROC] = &qxm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+};
+
+static const struct qcom_icc_desc sm8550_nsp_noc = {
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
+ &bcm_sn7,
+};
+
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
+ [MASTER_PCIE_ANOC_CFG] = &qsm_pcie_anoc_cfg,
+ [MASTER_PCIE_0] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+ [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
+};
+
+static const struct qcom_icc_desc sm8550_pcie_anoc = {
+ .nodes = pcie_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
+ .bcms = pcie_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_GIC_AHB] = &qhm_gic,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+};
+
+static const struct qcom_icc_desc sm8550_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node * const *qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate_extended = qcom_icc_xlate_extended;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter))
+ return PTR_ERR(qp->voter);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret,
+ "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ if (!qnodes[i])
+ continue;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ icc_provider_del(&qp->provider);
+
+ return 0;
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm8550-aggre1-noc",
+ .data = &sm8550_aggre1_noc},
+ { .compatible = "qcom,sm8550-aggre2-noc",
+ .data = &sm8550_aggre2_noc},
+ { .compatible = "qcom,sm8550-clk-virt",
+ .data = &sm8550_clk_virt},
+ { .compatible = "qcom,sm8550-config-noc",
+ .data = &sm8550_config_noc},
+ { .compatible = "qcom,sm8550-cnoc-main",
+ .data = &sm8550_cnoc_main},
+ { .compatible = "qcom,sm8550-gem-noc",
+ .data = &sm8550_gem_noc},
+ { .compatible = "qcom,sm8550-lpass-ag-noc",
+ .data = &sm8550_lpass_ag_noc},
+ { .compatible = "qcom,sm8550-lpass-lpiaon-noc",
+ .data = &sm8550_lpass_lpiaon_noc},
+ { .compatible = "qcom,sm8550-lpass-lpicx-noc",
+ .data = &sm8550_lpass_lpicx_noc},
+ { .compatible = "qcom,sm8550-mc-virt",
+ .data = &sm8550_mc_virt},
+ { .compatible = "qcom,sm8550-mmss-noc",
+ .data = &sm8550_mmss_noc},
+ { .compatible = "qcom,sm8550-nsp-noc",
+ .data = &sm8550_nsp_noc},
+ { .compatible = "qcom,sm8550-pcie-anoc",
+ .data = &sm8550_pcie_anoc},
+ { .compatible = "qcom,sm8550-system-noc",
+ .data = &sm8550_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sm8550",
+ .of_match_table = qnoc_of_match,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("sm8550 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sm8550.h b/drivers/interconnect/qcom/sm8550.h
new file mode 100644
index 000000000000..8d5862c04bca
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8550.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SM8450 interconnect IDs
+ *
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8450_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM8450_H
+
+#define SM8550_MASTER_A1NOC_SNOC 0
+#define SM8550_MASTER_A2NOC_SNOC 1
+#define SM8550_MASTER_ANOC_PCIE_GEM_NOC 2
+#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0 3
+#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1 4
+#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2 5
+#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_DISP 6
+#define SM8550_MASTER_APPSS_PROC 7
+#define SM8550_MASTER_CAMNOC_HF 8
+#define SM8550_MASTER_CAMNOC_HF_CAM_IFE_0 9
+#define SM8550_MASTER_CAMNOC_HF_CAM_IFE_1 10
+#define SM8550_MASTER_CAMNOC_HF_CAM_IFE_2 11
+#define SM8550_MASTER_CAMNOC_ICP 12
+#define SM8550_MASTER_CAMNOC_ICP_CAM_IFE_0 13
+#define SM8550_MASTER_CAMNOC_ICP_CAM_IFE_1 14
+#define SM8550_MASTER_CAMNOC_ICP_CAM_IFE_2 15
+#define SM8550_MASTER_CAMNOC_SF 16
+#define SM8550_MASTER_CAMNOC_SF_CAM_IFE_0 17
+#define SM8550_MASTER_CAMNOC_SF_CAM_IFE_1 18
+#define SM8550_MASTER_CAMNOC_SF_CAM_IFE_2 19
+#define SM8550_MASTER_CDSP_HCP 20
+#define SM8550_MASTER_CDSP_PROC 21
+#define SM8550_MASTER_CNOC_CFG 22
+#define SM8550_MASTER_CNOC_MNOC_CFG 23
+#define SM8550_MASTER_COMPUTE_NOC 24
+#define SM8550_MASTER_CRYPTO 25
+#define SM8550_MASTER_GEM_NOC_CNOC 26
+#define SM8550_MASTER_GEM_NOC_PCIE_SNOC 27
+#define SM8550_MASTER_GFX3D 28
+#define SM8550_MASTER_GIC 29
+#define SM8550_MASTER_GIC_AHB 30
+#define SM8550_MASTER_GPU_TCU 31
+#define SM8550_MASTER_IPA 32
+#define SM8550_MASTER_LLCC 33
+#define SM8550_MASTER_LLCC_CAM_IFE_0 34
+#define SM8550_MASTER_LLCC_CAM_IFE_1 35
+#define SM8550_MASTER_LLCC_CAM_IFE_2 36
+#define SM8550_MASTER_LLCC_DISP 37
+#define SM8550_MASTER_LPASS_GEM_NOC 38
+#define SM8550_MASTER_LPASS_LPINOC 39
+#define SM8550_MASTER_LPASS_PROC 40
+#define SM8550_MASTER_LPIAON_NOC 41
+#define SM8550_MASTER_MDP 42
+#define SM8550_MASTER_MDP_DISP 43
+#define SM8550_MASTER_MNOC_HF_MEM_NOC 44
+#define SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0 45
+#define SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1 46
+#define SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2 47
+#define SM8550_MASTER_MNOC_HF_MEM_NOC_DISP 48
+#define SM8550_MASTER_MNOC_SF_MEM_NOC 49
+#define SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0 50
+#define SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1 51
+#define SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2 52
+#define SM8550_MASTER_MSS_PROC 53
+#define SM8550_MASTER_PCIE_0 54
+#define SM8550_MASTER_PCIE_1 55
+#define SM8550_MASTER_PCIE_ANOC_CFG 56
+#define SM8550_MASTER_QDSS_BAM 57
+#define SM8550_MASTER_QDSS_ETR 58
+#define SM8550_MASTER_QDSS_ETR_1 59
+#define SM8550_MASTER_QSPI_0 60
+#define SM8550_MASTER_QUP_1 61
+#define SM8550_MASTER_QUP_2 62
+#define SM8550_MASTER_QUP_CORE_0 63
+#define SM8550_MASTER_QUP_CORE_1 64
+#define SM8550_MASTER_QUP_CORE_2 65
+#define SM8550_MASTER_SDCC_2 66
+#define SM8550_MASTER_SDCC_4 67
+#define SM8550_MASTER_SNOC_GC_MEM_NOC 68
+#define SM8550_MASTER_SNOC_SF_MEM_NOC 69
+#define SM8550_MASTER_SP 70
+#define SM8550_MASTER_SYS_TCU 71
+#define SM8550_MASTER_UFS_MEM 72
+#define SM8550_MASTER_USB3_0 73
+#define SM8550_MASTER_VIDEO 74
+#define SM8550_MASTER_VIDEO_CV_PROC 75
+#define SM8550_MASTER_VIDEO_PROC 76
+#define SM8550_MASTER_VIDEO_V_PROC 77
+#define SM8550_SLAVE_A1NOC_SNOC 78
+#define SM8550_SLAVE_A2NOC_SNOC 79
+#define SM8550_SLAVE_AHB2PHY_NORTH 80
+#define SM8550_SLAVE_AHB2PHY_SOUTH 81
+#define SM8550_SLAVE_ANOC_PCIE_GEM_NOC 82
+#define SM8550_SLAVE_AOSS 83
+#define SM8550_SLAVE_APPSS 84
+#define SM8550_SLAVE_BOOT_IMEM 85
+#define SM8550_SLAVE_CAMERA_CFG 86
+#define SM8550_SLAVE_CDSP_MEM_NOC 87
+#define SM8550_SLAVE_CLK_CTL 88
+#define SM8550_SLAVE_CNOC_CFG 89
+#define SM8550_SLAVE_CNOC_MNOC_CFG 90
+#define SM8550_SLAVE_CNOC_MSS 91
+#define SM8550_SLAVE_CPR_NSPCX 92
+#define SM8550_SLAVE_CRYPTO_0_CFG 93
+#define SM8550_SLAVE_CX_RDPM 94
+#define SM8550_SLAVE_DDRSS_CFG 95
+#define SM8550_SLAVE_DISPLAY_CFG 96
+#define SM8550_SLAVE_EBI1 97
+#define SM8550_SLAVE_EBI1_CAM_IFE_0 98
+#define SM8550_SLAVE_EBI1_CAM_IFE_1 99
+#define SM8550_SLAVE_EBI1_CAM_IFE_2 100
+#define SM8550_SLAVE_EBI1_DISP 101
+#define SM8550_SLAVE_GEM_NOC_CNOC 102
+#define SM8550_SLAVE_GFX3D_CFG 103
+#define SM8550_SLAVE_I2C 104
+#define SM8550_SLAVE_IMEM 105
+#define SM8550_SLAVE_IMEM_CFG 106
+#define SM8550_SLAVE_IPA_CFG 107
+#define SM8550_SLAVE_IPC_ROUTER_CFG 108
+#define SM8550_SLAVE_LLCC 109
+#define SM8550_SLAVE_LLCC_CAM_IFE_0 110
+#define SM8550_SLAVE_LLCC_CAM_IFE_1 111
+#define SM8550_SLAVE_LLCC_CAM_IFE_2 112
+#define SM8550_SLAVE_LLCC_DISP 113
+#define SM8550_SLAVE_LPASS_GEM_NOC 114
+#define SM8550_SLAVE_LPASS_QTB_CFG 115
+#define SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC 116
+#define SM8550_SLAVE_LPICX_NOC_LPIAON_NOC 117
+#define SM8550_SLAVE_MEM_NOC_PCIE_SNOC 118
+#define SM8550_SLAVE_MNOC_HF_MEM_NOC 119
+#define SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0 120
+#define SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1 121
+#define SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2 122
+#define SM8550_SLAVE_MNOC_HF_MEM_NOC_DISP 123
+#define SM8550_SLAVE_MNOC_SF_MEM_NOC 124
+#define SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 125
+#define SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 126
+#define SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 127
+#define SM8550_SLAVE_MX_RDPM 128
+#define SM8550_SLAVE_NSP_QTB_CFG 129
+#define SM8550_SLAVE_PCIE_0 130
+#define SM8550_SLAVE_PCIE_0_CFG 131
+#define SM8550_SLAVE_PCIE_1 132
+#define SM8550_SLAVE_PCIE_1_CFG 133
+#define SM8550_SLAVE_PCIE_ANOC_CFG 134
+#define SM8550_SLAVE_PDM 135
+#define SM8550_SLAVE_PIMEM_CFG 136
+#define SM8550_SLAVE_PRNG 137
+#define SM8550_SLAVE_QDSS_CFG 138
+#define SM8550_SLAVE_QDSS_STM 139
+#define SM8550_SLAVE_QSPI_0 140
+#define SM8550_SLAVE_QUP_1 141
+#define SM8550_SLAVE_QUP_2 142
+#define SM8550_SLAVE_QUP_CORE_0 143
+#define SM8550_SLAVE_QUP_CORE_1 144
+#define SM8550_SLAVE_QUP_CORE_2 145
+#define SM8550_SLAVE_RBCPR_CX_CFG 146
+#define SM8550_SLAVE_RBCPR_MMCX_CFG 147
+#define SM8550_SLAVE_RBCPR_MXA_CFG 148
+#define SM8550_SLAVE_RBCPR_MXC_CFG 149
+#define SM8550_SLAVE_SDCC_2 150
+#define SM8550_SLAVE_SDCC_4 151
+#define SM8550_SLAVE_SERVICE_MNOC 152
+#define SM8550_SLAVE_SERVICE_PCIE_ANOC 153
+#define SM8550_SLAVE_SNOC_GEM_NOC_GC 154
+#define SM8550_SLAVE_SNOC_GEM_NOC_SF 155
+#define SM8550_SLAVE_SPSS_CFG 156
+#define SM8550_SLAVE_TCSR 157
+#define SM8550_SLAVE_TCU 158
+#define SM8550_SLAVE_TLMM 159
+#define SM8550_SLAVE_TME_CFG 160
+#define SM8550_SLAVE_UFS_MEM_CFG 161
+#define SM8550_SLAVE_USB3_0 162
+#define SM8550_SLAVE_VENUS_CFG 163
+#define SM8550_SLAVE_VSENSE_CTRL_CFG 164
+
+#endif
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 79707685d54a..889c7efd050b 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -32,7 +32,8 @@ config IOMMU_IO_PGTABLE
config IOMMU_IO_PGTABLE_LPAE
bool "ARMv7/v8 Long Descriptor Format"
select IOMMU_IO_PGTABLE
- depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for cpmxchg64()
help
Enable support for the ARM long descriptor pagetable format.
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
@@ -70,7 +71,8 @@ config IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
config IOMMU_IO_PGTABLE_DART
bool "Apple DART Formats"
select IOMMU_IO_PGTABLE
- depends on ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on ARM64 || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for cpmxchg64()
help
Enable support for the Apple DART pagetable formats. These include
the t8020 and t6000/t8110 DART formats used in Apple M1/M2 family
@@ -284,7 +286,8 @@ config EXYNOS_IOMMU_DEBUG
config IPMMU_VMSA
bool "Renesas VMSA-compatible IPMMU"
- depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU
@@ -304,7 +307,8 @@ config SPAPR_TCE_IOMMU
config APPLE_DART
tristate "Apple DART IOMMU Support"
- depends on ARCH_APPLE || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on ARCH_APPLE || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_DART
select IOMMU_API
select IOMMU_IO_PGTABLE_DART
default ARCH_APPLE
@@ -319,7 +323,8 @@ config APPLE_DART
# ARM IOMMU support
config ARM_SMMU
tristate "ARM Ltd. System MMU (SMMU) Support"
- depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on ARM64 || ARM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
@@ -466,7 +471,8 @@ config MTK_IOMMU_V1
config QCOM_IOMMU
# Note: iommu drivers cannot (yet?) be built as modules
bool "Qualcomm IOMMU Support"
- depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
select QCOM_SCM
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 467b194975b3..19a46b9f7357 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -3475,15 +3475,26 @@ found:
return 1;
}
+#define ACPIID_LEN (ACPIHID_UID_LEN + ACPIHID_HID_LEN)
+
static int __init parse_ivrs_acpihid(char *str)
{
u32 seg = 0, bus, dev, fn;
char *hid, *uid, *p, *addr;
- char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
+ char acpiid[ACPIID_LEN] = {0};
int i;
addr = strchr(str, '@');
if (!addr) {
+ addr = strchr(str, '=');
+ if (!addr)
+ goto not_found;
+
+ ++addr;
+
+ if (strlen(addr) > ACPIID_LEN)
+ goto not_found;
+
if (sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid) == 4 ||
sscanf(str, "[%x:%x:%x.%x]=%s", &seg, &bus, &dev, &fn, acpiid) == 5) {
pr_warn("ivrs_acpihid%s option format deprecated; use ivrs_acpihid=%s@%04x:%02x:%02x.%d instead\n",
@@ -3496,6 +3507,9 @@ static int __init parse_ivrs_acpihid(char *str)
/* We have the '@', make it the terminator to get just the acpiid */
*addr++ = 0;
+ if (strlen(str) > ACPIID_LEN + 1)
+ goto not_found;
+
if (sscanf(str, "=%s", acpiid) != 1)
goto not_found;
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index cbeaab55c0db..5a505ba5467e 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -558,6 +558,15 @@ static void amd_iommu_report_page_fault(struct amd_iommu *iommu,
* prevent logging it.
*/
if (IS_IOMMU_MEM_TRANSACTION(flags)) {
+ /* Device not attached to domain properly */
+ if (dev_data->domain == NULL) {
+ pr_err_ratelimited("Event logged [Device not attached to domain properly]\n");
+ pr_err_ratelimited(" device=%04x:%02x:%02x.%x domain=0x%04x\n",
+ iommu->pci_seg->id, PCI_BUS_NUM(devid), PCI_SLOT(devid),
+ PCI_FUNC(devid), domain_id);
+ goto out;
+ }
+
if (!report_iommu_fault(&dev_data->domain->domain,
&pdev->dev, address,
IS_WRITE_REQUEST(flags) ?
@@ -667,7 +676,14 @@ retry:
event[0], event[1], event[2], event[3]);
}
- memset(__evt, 0, 4 * sizeof(u32));
+ /*
+ * To detect the hardware errata 732 we need to clear the
+ * entry back to zero. This issue does not exist on SNP
+ * enabled system. Also this buffer is not writeable on
+ * SNP enabled system.
+ */
+ if (!amd_iommu_snp_en)
+ memset(__evt, 0, 4 * sizeof(u32));
}
static void iommu_poll_events(struct amd_iommu *iommu)
@@ -736,10 +752,13 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
entry[1] = raw[1];
/*
- * To detect the hardware bug we need to clear the entry
- * back to zero.
+ * To detect the hardware errata 733 we need to clear the
+ * entry back to zero. This issue does not exist on SNP
+ * enabled system. Also this buffer is not writeable on
+ * SNP enabled system.
*/
- raw[0] = raw[1] = 0UL;
+ if (!amd_iommu_snp_en)
+ raw[0] = raw[1] = 0UL;
/* Update head pointer of hardware ring-buffer */
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
@@ -1702,27 +1721,29 @@ static int pdev_pri_ats_enable(struct pci_dev *pdev)
/* Only allow access to user-accessible pages */
ret = pci_enable_pasid(pdev, 0);
if (ret)
- goto out_err;
+ return ret;
/* First reset the PRI state of the device */
ret = pci_reset_pri(pdev);
if (ret)
- goto out_err;
+ goto out_err_pasid;
/* Enable PRI */
/* FIXME: Hardcode number of outstanding requests for now */
ret = pci_enable_pri(pdev, 32);
if (ret)
- goto out_err;
+ goto out_err_pasid;
ret = pci_enable_ats(pdev, PAGE_SHIFT);
if (ret)
- goto out_err;
+ goto out_err_pri;
return 0;
-out_err:
+out_err_pri:
pci_disable_pri(pdev);
+
+out_err_pasid:
pci_disable_pasid(pdev);
return ret;
@@ -2072,6 +2093,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
if (ret)
goto out_err;
+ /* No need to allocate io pgtable ops in passthrough mode */
+ if (type == IOMMU_DOMAIN_IDENTITY)
+ return domain;
+
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
if (!pgtbl_ops) {
domain_id_free(domain->id);
@@ -2126,31 +2151,6 @@ static void amd_iommu_domain_free(struct iommu_domain *dom)
protection_domain_free(domain);
}
-static void amd_iommu_detach_device(struct iommu_domain *dom,
- struct device *dev)
-{
- struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
- struct amd_iommu *iommu;
-
- if (!check_device(dev))
- return;
-
- if (dev_data->domain != NULL)
- detach_device(dev);
-
- iommu = rlookup_amd_iommu(dev);
- if (!iommu)
- return;
-
-#ifdef CONFIG_IRQ_REMAP
- if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
- (dom->type == IOMMU_DOMAIN_UNMANAGED))
- dev_data->use_vapic = 0;
-#endif
-
- iommu_completion_wait(iommu);
-}
-
static int amd_iommu_attach_device(struct iommu_domain *dom,
struct device *dev)
{
@@ -2159,6 +2159,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
struct amd_iommu *iommu = rlookup_amd_iommu(dev);
int ret;
+ /*
+ * Skip attach device to domain if new domain is same as
+ * devices current domain
+ */
+ if (dev_data->domain == domain)
+ return 0;
+
dev_data->defer_attach = false;
if (dev_data->domain)
@@ -2271,8 +2278,6 @@ static bool amd_iommu_capable(struct device *dev, enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
- case IOMMU_CAP_INTR_REMAP:
- return (irq_remapping_enabled == 1);
case IOMMU_CAP_NOEXEC:
return false;
case IOMMU_CAP_PRE_BOOT_PROTECTION:
@@ -2387,12 +2392,17 @@ static int amd_iommu_def_domain_type(struct device *dev)
return 0;
/*
- * Do not identity map IOMMUv2 capable devices when memory encryption is
- * active, because some of those devices (AMD GPUs) don't have the
- * encryption bit in their DMA-mask and require remapping.
+ * Do not identity map IOMMUv2 capable devices when:
+ * - memory encryption is active, because some of those devices
+ * (AMD GPUs) don't have the encryption bit in their DMA-mask
+ * and require remapping.
+ * - SNP is enabled, because it prohibits DTE[Mode]=0.
*/
- if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
+ if (dev_data->iommu_v2 &&
+ !cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
+ !amd_iommu_snp_en) {
return IOMMU_DOMAIN_IDENTITY;
+ }
return 0;
}
@@ -2416,7 +2426,6 @@ const struct iommu_ops amd_iommu_ops = {
.def_domain_type = amd_iommu_def_domain_type,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
- .detach_dev = amd_iommu_detach_device,
.map_pages = amd_iommu_map_pages,
.unmap_pages = amd_iommu_unmap_pages,
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
@@ -3671,7 +3680,8 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
}
irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_AMDVI);
- iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |
+ IRQ_DOMAIN_FLAG_ISOLATED_MSI;
if (amd_iommu_np_cache)
iommu->ir_domain->msi_parent_ops = &virt_amdvi_msi_parent_ops;
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 4f4a323be0d0..06169d36eab8 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -34,57 +34,154 @@
#include "dma-iommu.h"
-#define DART_MAX_STREAMS 16
+#define DART_MAX_STREAMS 256
#define DART_MAX_TTBR 4
#define MAX_DARTS_PER_DEVICE 2
-#define DART_STREAM_ALL 0xffff
+/* Common registers */
#define DART_PARAMS1 0x00
-#define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24)
+#define DART_PARAMS1_PAGE_SHIFT GENMASK(27, 24)
#define DART_PARAMS2 0x04
-#define DART_PARAMS_BYPASS_SUPPORT BIT(0)
+#define DART_PARAMS2_BYPASS_SUPPORT BIT(0)
-#define DART_STREAM_COMMAND 0x20
-#define DART_STREAM_COMMAND_BUSY BIT(2)
-#define DART_STREAM_COMMAND_INVALIDATE BIT(20)
+/* T8020/T6000 registers */
-#define DART_STREAM_SELECT 0x34
+#define DART_T8020_STREAM_COMMAND 0x20
+#define DART_T8020_STREAM_COMMAND_BUSY BIT(2)
+#define DART_T8020_STREAM_COMMAND_INVALIDATE BIT(20)
-#define DART_ERROR 0x40
-#define DART_ERROR_STREAM GENMASK(27, 24)
-#define DART_ERROR_CODE GENMASK(11, 0)
-#define DART_ERROR_FLAG BIT(31)
+#define DART_T8020_STREAM_SELECT 0x34
-#define DART_ERROR_READ_FAULT BIT(4)
-#define DART_ERROR_WRITE_FAULT BIT(3)
-#define DART_ERROR_NO_PTE BIT(2)
-#define DART_ERROR_NO_PMD BIT(1)
-#define DART_ERROR_NO_TTBR BIT(0)
+#define DART_T8020_ERROR 0x40
+#define DART_T8020_ERROR_STREAM GENMASK(27, 24)
+#define DART_T8020_ERROR_CODE GENMASK(11, 0)
+#define DART_T8020_ERROR_FLAG BIT(31)
-#define DART_CONFIG 0x60
-#define DART_CONFIG_LOCK BIT(15)
+#define DART_T8020_ERROR_READ_FAULT BIT(4)
+#define DART_T8020_ERROR_WRITE_FAULT BIT(3)
+#define DART_T8020_ERROR_NO_PTE BIT(2)
+#define DART_T8020_ERROR_NO_PMD BIT(1)
+#define DART_T8020_ERROR_NO_TTBR BIT(0)
-#define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
-
-#define DART_ERROR_ADDR_HI 0x54
-#define DART_ERROR_ADDR_LO 0x50
-
-#define DART_STREAMS_ENABLE 0xfc
+#define DART_T8020_CONFIG 0x60
+#define DART_T8020_CONFIG_LOCK BIT(15)
-#define DART_TCR(sid) (0x100 + 4 * (sid))
-#define DART_TCR_TRANSLATE_ENABLE BIT(7)
-#define DART_TCR_BYPASS0_ENABLE BIT(8)
-#define DART_TCR_BYPASS1_ENABLE BIT(12)
+#define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
-#define DART_TTBR(sid, idx) (0x200 + 16 * (sid) + 4 * (idx))
-#define DART_TTBR_VALID BIT(31)
-#define DART_TTBR_SHIFT 12
+#define DART_T8020_ERROR_ADDR_HI 0x54
+#define DART_T8020_ERROR_ADDR_LO 0x50
+
+#define DART_T8020_STREAMS_ENABLE 0xfc
+
+#define DART_T8020_TCR 0x100
+#define DART_T8020_TCR_TRANSLATE_ENABLE BIT(7)
+#define DART_T8020_TCR_BYPASS_DART BIT(8)
+#define DART_T8020_TCR_BYPASS_DAPF BIT(12)
+
+#define DART_T8020_TTBR 0x200
+#define DART_T8020_TTBR_VALID BIT(31)
+#define DART_T8020_TTBR_ADDR_FIELD_SHIFT 0
+#define DART_T8020_TTBR_SHIFT 12
+
+/* T8110 registers */
+
+#define DART_T8110_PARAMS3 0x08
+#define DART_T8110_PARAMS3_PA_WIDTH GENMASK(29, 24)
+#define DART_T8110_PARAMS3_VA_WIDTH GENMASK(21, 16)
+#define DART_T8110_PARAMS3_VER_MAJ GENMASK(15, 8)
+#define DART_T8110_PARAMS3_VER_MIN GENMASK(7, 0)
+
+#define DART_T8110_PARAMS4 0x0c
+#define DART_T8110_PARAMS4_NUM_CLIENTS GENMASK(24, 16)
+#define DART_T8110_PARAMS4_NUM_SIDS GENMASK(8, 0)
+
+#define DART_T8110_TLB_CMD 0x80
+#define DART_T8110_TLB_CMD_BUSY BIT(31)
+#define DART_T8110_TLB_CMD_OP GENMASK(10, 8)
+#define DART_T8110_TLB_CMD_OP_FLUSH_ALL 0
+#define DART_T8110_TLB_CMD_OP_FLUSH_SID 1
+#define DART_T8110_TLB_CMD_STREAM GENMASK(7, 0)
+
+#define DART_T8110_ERROR 0x100
+#define DART_T8110_ERROR_STREAM GENMASK(27, 20)
+#define DART_T8110_ERROR_CODE GENMASK(14, 0)
+#define DART_T8110_ERROR_FLAG BIT(31)
+
+#define DART_T8110_ERROR_MASK 0x104
+
+#define DART_T8110_ERROR_READ_FAULT BIT(5)
+#define DART_T8110_ERROR_WRITE_FAULT BIT(4)
+#define DART_T8110_ERROR_NO_PTE BIT(3)
+#define DART_T8110_ERROR_NO_PMD BIT(2)
+#define DART_T8110_ERROR_NO_PGD BIT(1)
+#define DART_T8110_ERROR_NO_TTBR BIT(0)
+
+#define DART_T8110_ERROR_ADDR_LO 0x170
+#define DART_T8110_ERROR_ADDR_HI 0x174
+
+#define DART_T8110_PROTECT 0x200
+#define DART_T8110_UNPROTECT 0x204
+#define DART_T8110_PROTECT_LOCK 0x208
+#define DART_T8110_PROTECT_TTBR_TCR BIT(0)
+
+#define DART_T8110_ENABLE_STREAMS 0xc00
+#define DART_T8110_DISABLE_STREAMS 0xc20
+
+#define DART_T8110_TCR 0x1000
+#define DART_T8110_TCR_REMAP GENMASK(11, 8)
+#define DART_T8110_TCR_REMAP_EN BIT(7)
+#define DART_T8110_TCR_BYPASS_DAPF BIT(2)
+#define DART_T8110_TCR_BYPASS_DART BIT(1)
+#define DART_T8110_TCR_TRANSLATE_ENABLE BIT(0)
+
+#define DART_T8110_TTBR 0x1400
+#define DART_T8110_TTBR_VALID BIT(0)
+#define DART_T8110_TTBR_ADDR_FIELD_SHIFT 2
+#define DART_T8110_TTBR_SHIFT 14
+
+#define DART_TCR(dart, sid) ((dart)->hw->tcr + ((sid) << 2))
+
+#define DART_TTBR(dart, sid, idx) ((dart)->hw->ttbr + \
+ (((dart)->hw->ttbr_count * (sid)) << 2) + \
+ ((idx) << 2))
+
+struct apple_dart_stream_map;
+
+enum dart_type {
+ DART_T8020,
+ DART_T6000,
+ DART_T8110,
+};
struct apple_dart_hw {
+ enum dart_type type;
+ irqreturn_t (*irq_handler)(int irq, void *dev);
+ int (*invalidate_tlb)(struct apple_dart_stream_map *stream_map);
+
u32 oas;
enum io_pgtable_fmt fmt;
+
+ int max_sid_count;
+
+ u64 lock;
+ u64 lock_bit;
+
+ u64 error;
+
+ u64 enable_streams;
+
+ u64 tcr;
+ u64 tcr_enabled;
+ u64 tcr_disabled;
+ u64 tcr_bypass;
+
+ u64 ttbr;
+ u64 ttbr_valid;
+ u64 ttbr_addr_field_shift;
+ u64 ttbr_shift;
+ int ttbr_count;
};
/*
@@ -115,12 +212,18 @@ struct apple_dart {
spinlock_t lock;
+ u32 ias;
+ u32 oas;
u32 pgsize;
+ u32 num_streams;
u32 supports_bypass : 1;
u32 force_bypass : 1;
struct iommu_group *sid2group[DART_MAX_STREAMS];
struct iommu_device iommu;
+
+ u32 save_tcr[DART_MAX_STREAMS];
+ u32 save_ttbr[DART_MAX_STREAMS][DART_MAX_TTBR];
};
/*
@@ -140,11 +243,11 @@ struct apple_dart {
*/
struct apple_dart_stream_map {
struct apple_dart *dart;
- unsigned long sidmap;
+ DECLARE_BITMAP(sidmap, DART_MAX_STREAMS);
};
struct apple_dart_atomic_stream_map {
struct apple_dart *dart;
- atomic64_t sidmap;
+ atomic_long_t sidmap[BITS_TO_LONGS(DART_MAX_STREAMS)];
};
/*
@@ -202,50 +305,55 @@ static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
static void
apple_dart_hw_enable_translation(struct apple_dart_stream_map *stream_map)
{
+ struct apple_dart *dart = stream_map->dart;
int sid;
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- writel(DART_TCR_TRANSLATE_ENABLE,
- stream_map->dart->regs + DART_TCR(sid));
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ writel(dart->hw->tcr_enabled, dart->regs + DART_TCR(dart, sid));
}
static void apple_dart_hw_disable_dma(struct apple_dart_stream_map *stream_map)
{
+ struct apple_dart *dart = stream_map->dart;
int sid;
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- writel(0, stream_map->dart->regs + DART_TCR(sid));
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ writel(dart->hw->tcr_disabled, dart->regs + DART_TCR(dart, sid));
}
static void
apple_dart_hw_enable_bypass(struct apple_dart_stream_map *stream_map)
{
+ struct apple_dart *dart = stream_map->dart;
int sid;
WARN_ON(!stream_map->dart->supports_bypass);
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- writel(DART_TCR_BYPASS0_ENABLE | DART_TCR_BYPASS1_ENABLE,
- stream_map->dart->regs + DART_TCR(sid));
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ writel(dart->hw->tcr_bypass,
+ dart->regs + DART_TCR(dart, sid));
}
static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map *stream_map,
u8 idx, phys_addr_t paddr)
{
+ struct apple_dart *dart = stream_map->dart;
int sid;
- WARN_ON(paddr & ((1 << DART_TTBR_SHIFT) - 1));
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- writel(DART_TTBR_VALID | (paddr >> DART_TTBR_SHIFT),
- stream_map->dart->regs + DART_TTBR(sid, idx));
+ WARN_ON(paddr & ((1 << dart->hw->ttbr_shift) - 1));
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ writel(dart->hw->ttbr_valid |
+ (paddr >> dart->hw->ttbr_shift) << dart->hw->ttbr_addr_field_shift,
+ dart->regs + DART_TTBR(dart, sid, idx));
}
static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map *stream_map,
u8 idx)
{
+ struct apple_dart *dart = stream_map->dart;
int sid;
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
- writel(0, stream_map->dart->regs + DART_TTBR(sid, idx));
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams)
+ writel(0, dart->regs + DART_TTBR(dart, sid, idx));
}
static void
@@ -253,12 +361,12 @@ apple_dart_hw_clear_all_ttbrs(struct apple_dart_stream_map *stream_map)
{
int i;
- for (i = 0; i < DART_MAX_TTBR; ++i)
+ for (i = 0; i < stream_map->dart->hw->ttbr_count; ++i)
apple_dart_hw_clear_ttbr(stream_map, i);
}
static int
-apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
+apple_dart_t8020_hw_stream_command(struct apple_dart_stream_map *stream_map,
u32 command)
{
unsigned long flags;
@@ -267,12 +375,12 @@ apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
spin_lock_irqsave(&stream_map->dart->lock, flags);
- writel(stream_map->sidmap, stream_map->dart->regs + DART_STREAM_SELECT);
- writel(command, stream_map->dart->regs + DART_STREAM_COMMAND);
+ writel(stream_map->sidmap[0], stream_map->dart->regs + DART_T8020_STREAM_SELECT);
+ writel(command, stream_map->dart->regs + DART_T8020_STREAM_COMMAND);
ret = readl_poll_timeout_atomic(
- stream_map->dart->regs + DART_STREAM_COMMAND, command_reg,
- !(command_reg & DART_STREAM_COMMAND_BUSY), 1,
+ stream_map->dart->regs + DART_T8020_STREAM_COMMAND, command_reg,
+ !(command_reg & DART_T8020_STREAM_COMMAND_BUSY), 1,
DART_STREAM_COMMAND_BUSY_TIMEOUT);
spin_unlock_irqrestore(&stream_map->dart->lock, flags);
@@ -280,7 +388,45 @@ apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
if (ret) {
dev_err(stream_map->dart->dev,
"busy bit did not clear after command %x for streams %lx\n",
- command, stream_map->sidmap);
+ command, stream_map->sidmap[0]);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+apple_dart_t8110_hw_tlb_command(struct apple_dart_stream_map *stream_map,
+ u32 command)
+{
+ struct apple_dart *dart = stream_map->dart;
+ unsigned long flags;
+ int ret = 0;
+ int sid;
+
+ spin_lock_irqsave(&dart->lock, flags);
+
+ for_each_set_bit(sid, stream_map->sidmap, dart->num_streams) {
+ u32 val = FIELD_PREP(DART_T8110_TLB_CMD_OP, command) |
+ FIELD_PREP(DART_T8110_TLB_CMD_STREAM, sid);
+ writel(val, dart->regs + DART_T8110_TLB_CMD);
+
+ ret = readl_poll_timeout_atomic(
+ dart->regs + DART_T8110_TLB_CMD, val,
+ !(val & DART_T8110_TLB_CMD_BUSY), 1,
+ DART_STREAM_COMMAND_BUSY_TIMEOUT);
+
+ if (ret)
+ break;
+
+ }
+
+ spin_unlock_irqrestore(&dart->lock, flags);
+
+ if (ret) {
+ dev_err(stream_map->dart->dev,
+ "busy bit did not clear after command %x for stream %d\n",
+ command, sid);
return ret;
}
@@ -288,48 +434,64 @@ apple_dart_hw_stream_command(struct apple_dart_stream_map *stream_map,
}
static int
-apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
+apple_dart_t8020_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
+{
+ return apple_dart_t8020_hw_stream_command(
+ stream_map, DART_T8020_STREAM_COMMAND_INVALIDATE);
+}
+
+static int
+apple_dart_t8110_hw_invalidate_tlb(struct apple_dart_stream_map *stream_map)
{
- return apple_dart_hw_stream_command(stream_map,
- DART_STREAM_COMMAND_INVALIDATE);
+ return apple_dart_t8110_hw_tlb_command(
+ stream_map, DART_T8110_TLB_CMD_OP_FLUSH_SID);
}
static int apple_dart_hw_reset(struct apple_dart *dart)
{
u32 config;
struct apple_dart_stream_map stream_map;
+ int i;
- config = readl(dart->regs + DART_CONFIG);
- if (config & DART_CONFIG_LOCK) {
+ config = readl(dart->regs + dart->hw->lock);
+ if (config & dart->hw->lock_bit) {
dev_err(dart->dev, "DART is locked down until reboot: %08x\n",
config);
return -EINVAL;
}
stream_map.dart = dart;
- stream_map.sidmap = DART_STREAM_ALL;
+ bitmap_zero(stream_map.sidmap, DART_MAX_STREAMS);
+ bitmap_set(stream_map.sidmap, 0, dart->num_streams);
apple_dart_hw_disable_dma(&stream_map);
apple_dart_hw_clear_all_ttbrs(&stream_map);
/* enable all streams globally since TCR is used to control isolation */
- writel(DART_STREAM_ALL, dart->regs + DART_STREAMS_ENABLE);
+ for (i = 0; i < BITS_TO_U32(dart->num_streams); i++)
+ writel(U32_MAX, dart->regs + dart->hw->enable_streams + 4 * i);
/* clear any pending errors before the interrupt is unmasked */
- writel(readl(dart->regs + DART_ERROR), dart->regs + DART_ERROR);
+ writel(readl(dart->regs + dart->hw->error), dart->regs + dart->hw->error);
+
+ if (dart->hw->type == DART_T8110)
+ writel(0, dart->regs + DART_T8110_ERROR_MASK);
- return apple_dart_hw_invalidate_tlb(&stream_map);
+ return dart->hw->invalidate_tlb(&stream_map);
}
static void apple_dart_domain_flush_tlb(struct apple_dart_domain *domain)
{
- int i;
+ int i, j;
struct apple_dart_atomic_stream_map *domain_stream_map;
struct apple_dart_stream_map stream_map;
for_each_stream_map(i, domain, domain_stream_map) {
stream_map.dart = domain_stream_map->dart;
- stream_map.sidmap = atomic64_read(&domain_stream_map->sidmap);
- apple_dart_hw_invalidate_tlb(&stream_map);
+
+ for (j = 0; j < BITS_TO_LONGS(stream_map.dart->num_streams); j++)
+ stream_map.sidmap[j] = atomic_long_read(&domain_stream_map->sidmap[j]);
+
+ stream_map.dart->hw->invalidate_tlb(&stream_map);
}
}
@@ -399,11 +561,11 @@ apple_dart_setup_translation(struct apple_dart_domain *domain,
for (i = 0; i < pgtbl_cfg->apple_dart_cfg.n_ttbrs; ++i)
apple_dart_hw_set_ttbr(stream_map, i,
pgtbl_cfg->apple_dart_cfg.ttbr[i]);
- for (; i < DART_MAX_TTBR; ++i)
+ for (; i < stream_map->dart->hw->ttbr_count; ++i)
apple_dart_hw_clear_ttbr(stream_map, i);
apple_dart_hw_enable_translation(stream_map);
- apple_dart_hw_invalidate_tlb(stream_map);
+ stream_map->dart->hw->invalidate_tlb(stream_map);
}
static int apple_dart_finalize_domain(struct iommu_domain *domain,
@@ -413,7 +575,7 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
struct apple_dart *dart = cfg->stream_maps[0].dart;
struct io_pgtable_cfg pgtbl_cfg;
int ret = 0;
- int i;
+ int i, j;
mutex_lock(&dart_domain->init_lock);
@@ -422,14 +584,15 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
dart_domain->stream_maps[i].dart = cfg->stream_maps[i].dart;
- atomic64_set(&dart_domain->stream_maps[i].sidmap,
- cfg->stream_maps[i].sidmap);
+ for (j = 0; j < BITS_TO_LONGS(dart->num_streams); j++)
+ atomic_long_set(&dart_domain->stream_maps[i].sidmap[j],
+ cfg->stream_maps[i].sidmap[j]);
}
pgtbl_cfg = (struct io_pgtable_cfg){
.pgsize_bitmap = dart->pgsize,
- .ias = 32,
- .oas = dart->hw->oas,
+ .ias = dart->ias,
+ .oas = dart->oas,
.coherent_walk = 1,
.iommu_dev = dart->dev,
};
@@ -443,7 +606,7 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
domain->geometry.aperture_start = 0;
- domain->geometry.aperture_end = DMA_BIT_MASK(32);
+ domain->geometry.aperture_end = (dma_addr_t)DMA_BIT_MASK(dart->ias);
domain->geometry.force_aperture = true;
dart_domain->finalized = true;
@@ -458,7 +621,7 @@ apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps,
struct apple_dart_stream_map *master_maps,
bool add_streams)
{
- int i;
+ int i, j;
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
if (domain_maps[i].dart != master_maps[i].dart)
@@ -468,12 +631,14 @@ apple_dart_mod_streams(struct apple_dart_atomic_stream_map *domain_maps,
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
if (!domain_maps[i].dart)
break;
- if (add_streams)
- atomic64_or(master_maps[i].sidmap,
- &domain_maps[i].sidmap);
- else
- atomic64_and(~master_maps[i].sidmap,
- &domain_maps[i].sidmap);
+ for (j = 0; j < BITS_TO_LONGS(domain_maps[i].dart->num_streams); j++) {
+ if (add_streams)
+ atomic_long_or(master_maps[i].sidmap[j],
+ &domain_maps[i].sidmap[j]);
+ else
+ atomic_long_and(~master_maps[i].sidmap[j],
+ &domain_maps[i].sidmap[j]);
+ }
}
return 0;
@@ -486,13 +651,6 @@ static int apple_dart_domain_add_streams(struct apple_dart_domain *domain,
true);
}
-static int apple_dart_domain_remove_streams(struct apple_dart_domain *domain,
- struct apple_dart_master_cfg *cfg)
-{
- return apple_dart_mod_streams(domain->stream_maps, cfg->stream_maps,
- false);
-}
-
static int apple_dart_attach_dev(struct iommu_domain *domain,
struct device *dev)
{
@@ -535,22 +693,6 @@ static int apple_dart_attach_dev(struct iommu_domain *domain,
return ret;
}
-static void apple_dart_detach_dev(struct iommu_domain *domain,
- struct device *dev)
-{
- int i;
- struct apple_dart_stream_map *stream_map;
- struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
- struct apple_dart_domain *dart_domain = to_dart_domain(domain);
-
- for_each_stream_map(i, cfg, stream_map)
- apple_dart_hw_disable_dma(stream_map);
-
- if (domain->type == IOMMU_DOMAIN_DMA ||
- domain->type == IOMMU_DOMAIN_UNMANAGED)
- apple_dart_domain_remove_streams(dart_domain, cfg);
-}
-
static struct iommu_device *apple_dart_probe_device(struct device *dev)
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
@@ -637,14 +779,14 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
if (cfg->stream_maps[i].dart == dart) {
- cfg->stream_maps[i].sidmap |= 1 << sid;
+ set_bit(sid, cfg->stream_maps[i].sidmap);
return 0;
}
}
for (i = 0; i < MAX_DARTS_PER_DEVICE; ++i) {
if (!cfg->stream_maps[i].dart) {
cfg->stream_maps[i].dart = dart;
- cfg->stream_maps[i].sidmap = 1 << sid;
+ set_bit(sid, cfg->stream_maps[i].sidmap);
return 0;
}
}
@@ -663,13 +805,36 @@ static void apple_dart_release_group(void *iommu_data)
mutex_lock(&apple_dart_groups_lock);
for_each_stream_map(i, group_master_cfg, stream_map)
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams)
stream_map->dart->sid2group[sid] = NULL;
kfree(iommu_data);
mutex_unlock(&apple_dart_groups_lock);
}
+static int apple_dart_merge_master_cfg(struct apple_dart_master_cfg *dst,
+ struct apple_dart_master_cfg *src)
+{
+ /*
+ * We know that this function is only called for groups returned from
+ * pci_device_group and that all Apple Silicon platforms never spread
+ * PCIe devices from the same bus across multiple DARTs such that we can
+ * just assume that both src and dst only have the same single DART.
+ */
+ if (src->stream_maps[1].dart)
+ return -EINVAL;
+ if (dst->stream_maps[1].dart)
+ return -EINVAL;
+ if (src->stream_maps[0].dart != dst->stream_maps[0].dart)
+ return -EINVAL;
+
+ bitmap_or(dst->stream_maps[0].sidmap,
+ dst->stream_maps[0].sidmap,
+ src->stream_maps[0].sidmap,
+ dst->stream_maps[0].dart->num_streams);
+ return 0;
+}
+
static struct iommu_group *apple_dart_device_group(struct device *dev)
{
int i, sid;
@@ -682,7 +847,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
mutex_lock(&apple_dart_groups_lock);
for_each_stream_map(i, cfg, stream_map) {
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
+ for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams) {
struct iommu_group *stream_group =
stream_map->dart->sid2group[sid];
@@ -711,17 +876,31 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
if (!group)
goto out;
- group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg), GFP_KERNEL);
- if (!group_master_cfg) {
- iommu_group_put(group);
- goto out;
- }
+ group_master_cfg = iommu_group_get_iommudata(group);
+ if (group_master_cfg) {
+ int ret;
+
+ ret = apple_dart_merge_master_cfg(group_master_cfg, cfg);
+ if (ret) {
+ dev_err(dev, "Failed to merge DART IOMMU grups.\n");
+ iommu_group_put(group);
+ res = ERR_PTR(ret);
+ goto out;
+ }
+ } else {
+ group_master_cfg = kmemdup(cfg, sizeof(*group_master_cfg),
+ GFP_KERNEL);
+ if (!group_master_cfg) {
+ iommu_group_put(group);
+ goto out;
+ }
- iommu_group_set_iommudata(group, group_master_cfg,
- apple_dart_release_group);
+ iommu_group_set_iommudata(group, group_master_cfg,
+ apple_dart_release_group);
+ }
for_each_stream_map(i, cfg, stream_map)
- for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ for_each_set_bit(sid, stream_map->sidmap, stream_map->dart->num_streams)
stream_map->dart->sid2group[sid] = group;
res = group;
@@ -780,7 +959,6 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = apple_dart_attach_dev,
- .detach_dev = apple_dart_detach_dev,
.map_pages = apple_dart_map_pages,
.unmap_pages = apple_dart_unmap_pages,
.flush_iotlb_all = apple_dart_flush_iotlb_all,
@@ -791,30 +969,69 @@ static const struct iommu_ops apple_dart_iommu_ops = {
}
};
-static irqreturn_t apple_dart_irq(int irq, void *dev)
+static irqreturn_t apple_dart_t8020_irq(int irq, void *dev)
+{
+ struct apple_dart *dart = dev;
+ const char *fault_name = NULL;
+ u32 error = readl(dart->regs + DART_T8020_ERROR);
+ u32 error_code = FIELD_GET(DART_T8020_ERROR_CODE, error);
+ u32 addr_lo = readl(dart->regs + DART_T8020_ERROR_ADDR_LO);
+ u32 addr_hi = readl(dart->regs + DART_T8020_ERROR_ADDR_HI);
+ u64 addr = addr_lo | (((u64)addr_hi) << 32);
+ u8 stream_idx = FIELD_GET(DART_T8020_ERROR_STREAM, error);
+
+ if (!(error & DART_T8020_ERROR_FLAG))
+ return IRQ_NONE;
+
+ /* there should only be a single bit set but let's use == to be sure */
+ if (error_code == DART_T8020_ERROR_READ_FAULT)
+ fault_name = "READ FAULT";
+ else if (error_code == DART_T8020_ERROR_WRITE_FAULT)
+ fault_name = "WRITE FAULT";
+ else if (error_code == DART_T8020_ERROR_NO_PTE)
+ fault_name = "NO PTE FOR IOVA";
+ else if (error_code == DART_T8020_ERROR_NO_PMD)
+ fault_name = "NO PMD FOR IOVA";
+ else if (error_code == DART_T8020_ERROR_NO_TTBR)
+ fault_name = "NO TTBR FOR IOVA";
+ else
+ fault_name = "unknown";
+
+ dev_err_ratelimited(
+ dart->dev,
+ "translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
+ error, stream_idx, error_code, fault_name, addr);
+
+ writel(error, dart->regs + DART_T8020_ERROR);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t apple_dart_t8110_irq(int irq, void *dev)
{
struct apple_dart *dart = dev;
const char *fault_name = NULL;
- u32 error = readl(dart->regs + DART_ERROR);
- u32 error_code = FIELD_GET(DART_ERROR_CODE, error);
- u32 addr_lo = readl(dart->regs + DART_ERROR_ADDR_LO);
- u32 addr_hi = readl(dart->regs + DART_ERROR_ADDR_HI);
+ u32 error = readl(dart->regs + DART_T8110_ERROR);
+ u32 error_code = FIELD_GET(DART_T8110_ERROR_CODE, error);
+ u32 addr_lo = readl(dart->regs + DART_T8110_ERROR_ADDR_LO);
+ u32 addr_hi = readl(dart->regs + DART_T8110_ERROR_ADDR_HI);
u64 addr = addr_lo | (((u64)addr_hi) << 32);
- u8 stream_idx = FIELD_GET(DART_ERROR_STREAM, error);
+ u8 stream_idx = FIELD_GET(DART_T8110_ERROR_STREAM, error);
- if (!(error & DART_ERROR_FLAG))
+ if (!(error & DART_T8110_ERROR_FLAG))
return IRQ_NONE;
/* there should only be a single bit set but let's use == to be sure */
- if (error_code == DART_ERROR_READ_FAULT)
+ if (error_code == DART_T8110_ERROR_READ_FAULT)
fault_name = "READ FAULT";
- else if (error_code == DART_ERROR_WRITE_FAULT)
+ else if (error_code == DART_T8110_ERROR_WRITE_FAULT)
fault_name = "WRITE FAULT";
- else if (error_code == DART_ERROR_NO_PTE)
+ else if (error_code == DART_T8110_ERROR_NO_PTE)
fault_name = "NO PTE FOR IOVA";
- else if (error_code == DART_ERROR_NO_PMD)
+ else if (error_code == DART_T8110_ERROR_NO_PMD)
fault_name = "NO PMD FOR IOVA";
- else if (error_code == DART_ERROR_NO_TTBR)
+ else if (error_code == DART_T8110_ERROR_NO_PGD)
+ fault_name = "NO PGD FOR IOVA";
+ else if (error_code == DART_T8110_ERROR_NO_TTBR)
fault_name = "NO TTBR FOR IOVA";
else
fault_name = "unknown";
@@ -824,14 +1041,14 @@ static irqreturn_t apple_dart_irq(int irq, void *dev)
"translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
error, stream_idx, error_code, fault_name, addr);
- writel(error, dart->regs + DART_ERROR);
+ writel(error, dart->regs + DART_T8110_ERROR);
return IRQ_HANDLED;
}
static int apple_dart_probe(struct platform_device *pdev)
{
int ret;
- u32 dart_params[2];
+ u32 dart_params[4];
struct resource *res;
struct apple_dart *dart;
struct device *dev = &pdev->dev;
@@ -866,17 +1083,42 @@ static int apple_dart_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = apple_dart_hw_reset(dart);
- if (ret)
- goto err_clk_disable;
-
dart_params[0] = readl(dart->regs + DART_PARAMS1);
dart_params[1] = readl(dart->regs + DART_PARAMS2);
- dart->pgsize = 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT, dart_params[0]);
- dart->supports_bypass = dart_params[1] & DART_PARAMS_BYPASS_SUPPORT;
+ dart->pgsize = 1 << FIELD_GET(DART_PARAMS1_PAGE_SHIFT, dart_params[0]);
+ dart->supports_bypass = dart_params[1] & DART_PARAMS2_BYPASS_SUPPORT;
+
+ switch (dart->hw->type) {
+ case DART_T8020:
+ case DART_T6000:
+ dart->ias = 32;
+ dart->oas = dart->hw->oas;
+ dart->num_streams = dart->hw->max_sid_count;
+ break;
+
+ case DART_T8110:
+ dart_params[2] = readl(dart->regs + DART_T8110_PARAMS3);
+ dart_params[3] = readl(dart->regs + DART_T8110_PARAMS4);
+ dart->ias = FIELD_GET(DART_T8110_PARAMS3_VA_WIDTH, dart_params[2]);
+ dart->oas = FIELD_GET(DART_T8110_PARAMS3_PA_WIDTH, dart_params[2]);
+ dart->num_streams = FIELD_GET(DART_T8110_PARAMS4_NUM_SIDS, dart_params[3]);
+ break;
+ }
+
+ if (dart->num_streams > DART_MAX_STREAMS) {
+ dev_err(&pdev->dev, "Too many streams (%d > %d)\n",
+ dart->num_streams, DART_MAX_STREAMS);
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
dart->force_bypass = dart->pgsize > PAGE_SIZE;
- ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED,
+ ret = apple_dart_hw_reset(dart);
+ if (ret)
+ goto err_clk_disable;
+
+ ret = request_irq(dart->irq, dart->hw->irq_handler, IRQF_SHARED,
"apple-dart fault handler", dart);
if (ret)
goto err_clk_disable;
@@ -894,8 +1136,8 @@ static int apple_dart_probe(struct platform_device *pdev)
dev_info(
&pdev->dev,
- "DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n",
- dart->pgsize, dart->supports_bypass, dart->force_bypass);
+ "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
+ dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass);
return 0;
err_sysfs_remove:
@@ -924,16 +1166,123 @@ static int apple_dart_remove(struct platform_device *pdev)
}
static const struct apple_dart_hw apple_dart_hw_t8103 = {
+ .type = DART_T8020,
+ .irq_handler = apple_dart_t8020_irq,
+ .invalidate_tlb = apple_dart_t8020_hw_invalidate_tlb,
.oas = 36,
.fmt = APPLE_DART,
+ .max_sid_count = 16,
+
+ .enable_streams = DART_T8020_STREAMS_ENABLE,
+ .lock = DART_T8020_CONFIG,
+ .lock_bit = DART_T8020_CONFIG_LOCK,
+
+ .error = DART_T8020_ERROR,
+
+ .tcr = DART_T8020_TCR,
+ .tcr_enabled = DART_T8020_TCR_TRANSLATE_ENABLE,
+ .tcr_disabled = 0,
+ .tcr_bypass = DART_T8020_TCR_BYPASS_DAPF | DART_T8020_TCR_BYPASS_DART,
+
+ .ttbr = DART_T8020_TTBR,
+ .ttbr_valid = DART_T8020_TTBR_VALID,
+ .ttbr_addr_field_shift = DART_T8020_TTBR_ADDR_FIELD_SHIFT,
+ .ttbr_shift = DART_T8020_TTBR_SHIFT,
+ .ttbr_count = 4,
};
static const struct apple_dart_hw apple_dart_hw_t6000 = {
+ .type = DART_T6000,
+ .irq_handler = apple_dart_t8020_irq,
+ .invalidate_tlb = apple_dart_t8020_hw_invalidate_tlb,
.oas = 42,
.fmt = APPLE_DART2,
+ .max_sid_count = 16,
+
+ .enable_streams = DART_T8020_STREAMS_ENABLE,
+ .lock = DART_T8020_CONFIG,
+ .lock_bit = DART_T8020_CONFIG_LOCK,
+
+ .error = DART_T8020_ERROR,
+
+ .tcr = DART_T8020_TCR,
+ .tcr_enabled = DART_T8020_TCR_TRANSLATE_ENABLE,
+ .tcr_disabled = 0,
+ .tcr_bypass = DART_T8020_TCR_BYPASS_DAPF | DART_T8020_TCR_BYPASS_DART,
+
+ .ttbr = DART_T8020_TTBR,
+ .ttbr_valid = DART_T8020_TTBR_VALID,
+ .ttbr_addr_field_shift = DART_T8020_TTBR_ADDR_FIELD_SHIFT,
+ .ttbr_shift = DART_T8020_TTBR_SHIFT,
+ .ttbr_count = 4,
};
+static const struct apple_dart_hw apple_dart_hw_t8110 = {
+ .type = DART_T8110,
+ .irq_handler = apple_dart_t8110_irq,
+ .invalidate_tlb = apple_dart_t8110_hw_invalidate_tlb,
+ .fmt = APPLE_DART2,
+ .max_sid_count = 256,
+
+ .enable_streams = DART_T8110_ENABLE_STREAMS,
+ .lock = DART_T8110_PROTECT,
+ .lock_bit = DART_T8110_PROTECT_TTBR_TCR,
+
+ .error = DART_T8110_ERROR,
+
+ .tcr = DART_T8110_TCR,
+ .tcr_enabled = DART_T8110_TCR_TRANSLATE_ENABLE,
+ .tcr_disabled = 0,
+ .tcr_bypass = DART_T8110_TCR_BYPASS_DAPF | DART_T8110_TCR_BYPASS_DART,
+
+ .ttbr = DART_T8110_TTBR,
+ .ttbr_valid = DART_T8110_TTBR_VALID,
+ .ttbr_addr_field_shift = DART_T8110_TTBR_ADDR_FIELD_SHIFT,
+ .ttbr_shift = DART_T8110_TTBR_SHIFT,
+ .ttbr_count = 1,
+};
+
+static __maybe_unused int apple_dart_suspend(struct device *dev)
+{
+ struct apple_dart *dart = dev_get_drvdata(dev);
+ unsigned int sid, idx;
+
+ for (sid = 0; sid < dart->num_streams; sid++) {
+ dart->save_tcr[sid] = readl_relaxed(dart->regs + DART_TCR(dart, sid));
+ for (idx = 0; idx < dart->hw->ttbr_count; idx++)
+ dart->save_ttbr[sid][idx] =
+ readl(dart->regs + DART_TTBR(dart, sid, idx));
+ }
+
+ return 0;
+}
+
+static __maybe_unused int apple_dart_resume(struct device *dev)
+{
+ struct apple_dart *dart = dev_get_drvdata(dev);
+ unsigned int sid, idx;
+ int ret;
+
+ ret = apple_dart_hw_reset(dart);
+ if (ret) {
+ dev_err(dev, "Failed to reset DART on resume\n");
+ return ret;
+ }
+
+ for (sid = 0; sid < dart->num_streams; sid++) {
+ for (idx = 0; idx < dart->hw->ttbr_count; idx++)
+ writel(dart->save_ttbr[sid][idx],
+ dart->regs + DART_TTBR(dart, sid, idx));
+ writel(dart->save_tcr[sid], dart->regs + DART_TCR(dart, sid));
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_DEV_PM_OPS(apple_dart_pm_ops, apple_dart_suspend, apple_dart_resume);
+
static const struct of_device_id apple_dart_of_match[] = {
{ .compatible = "apple,t8103-dart", .data = &apple_dart_hw_t8103 },
+ { .compatible = "apple,t8110-dart", .data = &apple_dart_hw_t8110 },
{ .compatible = "apple,t6000-dart", .data = &apple_dart_hw_t6000 },
{},
};
@@ -944,6 +1293,7 @@ static struct platform_driver apple_dart_driver = {
.name = "apple-dart",
.of_match_table = apple_dart_of_match,
.suppress_bind_attrs = true,
+ .pm = pm_sleep_ptr(&apple_dart_pm_ops),
},
.probe = apple_dart_probe,
.remove = apple_dart_remove,
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 91d404deb115..d7ad49aa997e 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -250,6 +250,8 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,sc7280-mdss" },
{ .compatible = "qcom,sc7280-mss-pil" },
{ .compatible = "qcom,sc8180x-mdss" },
+ { .compatible = "qcom,sc8280xp-mdss" },
+ { .compatible = "qcom,sm8150-mdss" },
{ .compatible = "qcom,sm8250-mdss" },
{ .compatible = "qcom,sdm845-mdss" },
{ .compatible = "qcom,sdm845-mss-pil" },
diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
index 270c3d9128ba..d7be3adee426 100644
--- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c
+++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c
@@ -387,28 +387,6 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
return 0;
}
-static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
-{
- struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
- unsigned i;
-
- if (WARN_ON(!qcom_domain->iommu))
- return;
-
- pm_runtime_get_sync(qcom_iommu->dev);
- for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
-
- /* Disable the context bank: */
- iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
-
- ctx->domain = NULL;
- }
- pm_runtime_put_sync(qcom_iommu->dev);
-}
-
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -583,7 +561,6 @@ static const struct iommu_ops qcom_iommu_ops = {
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = qcom_iommu_attach_dev,
- .detach_dev = qcom_iommu_detach_dev,
.map_pages = qcom_iommu_map,
.unmap_pages = qcom_iommu_unmap,
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f798c44e0903..99b2646cb5c7 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -23,6 +23,7 @@
#include <linux/memremap.h>
#include <linux/mm.h>
#include <linux/mutex.h>
+#include <linux/of_iommu.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
@@ -391,6 +392,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
iort_iommu_get_resv_regions(dev, list);
+ if (dev->of_node)
+ of_iommu_get_resv_regions(dev, list);
}
EXPORT_SYMBOL(iommu_dma_get_resv_regions);
@@ -713,7 +716,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
if (!iova)
return DMA_MAPPING_ERROR;
- if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) {
+ if (iommu_map(domain, iova, phys - iova_off, size, prot, GFP_ATOMIC)) {
iommu_dma_free_iova(cookie, iova, size, NULL);
return DMA_MAPPING_ERROR;
}
@@ -822,7 +825,14 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
if (!iova)
goto out_free_pages;
- if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
+ /*
+ * Remove the zone/policy flags from the GFP - these are applied to the
+ * __iommu_dma_alloc_pages() but are not used for the supporting
+ * internal allocations that follow.
+ */
+ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_COMP);
+
+ if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, gfp))
goto out_free_iova;
if (!(ioprot & IOMMU_CACHE)) {
@@ -833,7 +843,8 @@ static struct page **__iommu_dma_alloc_noncontiguous(struct device *dev,
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
- ret = iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot);
+ ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, ioprot,
+ gfp);
if (ret < 0 || ret < size)
goto out_free_sg;
@@ -1281,7 +1292,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
* We'll leave any physical concatenation to the IOMMU driver's
* implementation - it knows better than we do.
*/
- ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot);
+ ret = iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
if (ret < 0 || ret < iova_len)
goto out_free_iova;
@@ -1615,7 +1626,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!iova)
goto out_free_page;
- if (iommu_map(domain, iova, msi_addr, size, prot))
+ if (iommu_map(domain, iova, msi_addr, size, prot, GFP_KERNEL))
goto out_free_iova;
INIT_LIST_HEAD(&msi_page->list);
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index b0cde2211987..483aaaeb6dae 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -185,38 +185,43 @@ static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
lv2table_base(sent)) + lv2ent_offset(iova);
}
-/*
- * IOMMU fault information register
- */
-struct sysmmu_fault_info {
- unsigned int bit; /* bit number in STATUS register */
- unsigned short addr_reg; /* register to read VA fault address */
+struct sysmmu_fault {
+ sysmmu_iova_t addr; /* IOVA address that caused fault */
+ const char *name; /* human readable fault name */
+ unsigned int type; /* fault type for report_iommu_fault() */
+};
+
+struct sysmmu_v1_fault_info {
+ unsigned short addr_reg; /* register to read IOVA fault address */
const char *name; /* human readable fault name */
unsigned int type; /* fault type for report_iommu_fault */
};
-static const struct sysmmu_fault_info sysmmu_faults[] = {
- { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
- { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
- { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
- { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
- { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
- { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
- { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
- { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
+static const struct sysmmu_v1_fault_info sysmmu_v1_faults[] = {
+ { REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
+ { REG_AR_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_READ },
+ { REG_AW_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_WRITE },
+ { REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
+ { REG_AR_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_READ },
+ { REG_AR_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_READ },
+ { REG_AW_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_WRITE },
+ { REG_AW_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_WRITE },
+};
+
+/* SysMMU v5 has the same faults for AR (0..4 bits) and AW (16..20 bits) */
+static const char * const sysmmu_v5_fault_names[] = {
+ "PTW",
+ "PAGE",
+ "MULTI-HIT",
+ "ACCESS PROTECTION",
+ "SECURITY PROTECTION"
};
-static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
- { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
- { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
- { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
- { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
- { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
- { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
- { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
- { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
- { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
- { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
+static const char * const sysmmu_v7_fault_names[] = {
+ "PTW",
+ "PAGE",
+ "ACCESS PROTECTION",
+ "RESERVED"
};
/*
@@ -246,9 +251,12 @@ struct exynos_iommu_domain {
struct iommu_domain domain; /* generic domain data structure */
};
+struct sysmmu_drvdata;
+
/*
* SysMMU version specific data. Contains offsets for the registers which can
* be found in different SysMMU variants, but have different offset values.
+ * Also contains version specific callbacks to abstract the hardware.
*/
struct sysmmu_variant {
u32 pt_base; /* page table base address (physical) */
@@ -259,6 +267,11 @@ struct sysmmu_variant {
u32 flush_end; /* end address of range invalidation */
u32 int_status; /* interrupt status information */
u32 int_clear; /* clear the interrupt */
+ u32 fault_va; /* IOVA address that caused fault */
+ u32 fault_info; /* fault transaction info */
+
+ int (*get_fault_info)(struct sysmmu_drvdata *data, unsigned int itype,
+ struct sysmmu_fault *fault);
};
/*
@@ -293,6 +306,59 @@ struct sysmmu_drvdata {
#define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
+static int exynos_sysmmu_v1_get_fault_info(struct sysmmu_drvdata *data,
+ unsigned int itype,
+ struct sysmmu_fault *fault)
+{
+ const struct sysmmu_v1_fault_info *finfo;
+
+ if (itype >= ARRAY_SIZE(sysmmu_v1_faults))
+ return -ENXIO;
+
+ finfo = &sysmmu_v1_faults[itype];
+ fault->addr = readl(data->sfrbase + finfo->addr_reg);
+ fault->name = finfo->name;
+ fault->type = finfo->type;
+
+ return 0;
+}
+
+static int exynos_sysmmu_v5_get_fault_info(struct sysmmu_drvdata *data,
+ unsigned int itype,
+ struct sysmmu_fault *fault)
+{
+ unsigned int addr_reg;
+
+ if (itype < ARRAY_SIZE(sysmmu_v5_fault_names)) {
+ fault->type = IOMMU_FAULT_READ;
+ addr_reg = REG_V5_FAULT_AR_VA;
+ } else if (itype >= 16 && itype <= 20) {
+ fault->type = IOMMU_FAULT_WRITE;
+ addr_reg = REG_V5_FAULT_AW_VA;
+ itype -= 16;
+ } else {
+ return -ENXIO;
+ }
+
+ fault->name = sysmmu_v5_fault_names[itype];
+ fault->addr = readl(data->sfrbase + addr_reg);
+
+ return 0;
+}
+
+static int exynos_sysmmu_v7_get_fault_info(struct sysmmu_drvdata *data,
+ unsigned int itype,
+ struct sysmmu_fault *fault)
+{
+ u32 info = readl(SYSMMU_REG(data, fault_info));
+
+ fault->addr = readl(SYSMMU_REG(data, fault_va));
+ fault->name = sysmmu_v7_fault_names[itype % 4];
+ fault->type = (info & BIT(20)) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+
+ return 0;
+}
+
/* SysMMU v1..v3 */
static const struct sysmmu_variant sysmmu_v1_variant = {
.flush_all = 0x0c,
@@ -300,9 +366,11 @@ static const struct sysmmu_variant sysmmu_v1_variant = {
.pt_base = 0x14,
.int_status = 0x18,
.int_clear = 0x1c,
+
+ .get_fault_info = exynos_sysmmu_v1_get_fault_info,
};
-/* SysMMU v5 and v7 (non-VM capable) */
+/* SysMMU v5 */
static const struct sysmmu_variant sysmmu_v5_variant = {
.pt_base = 0x0c,
.flush_all = 0x10,
@@ -312,9 +380,27 @@ static const struct sysmmu_variant sysmmu_v5_variant = {
.flush_end = 0x24,
.int_status = 0x60,
.int_clear = 0x64,
+
+ .get_fault_info = exynos_sysmmu_v5_get_fault_info,
};
-/* SysMMU v7: VM capable register set */
+/* SysMMU v7: non-VM capable register layout */
+static const struct sysmmu_variant sysmmu_v7_variant = {
+ .pt_base = 0x0c,
+ .flush_all = 0x10,
+ .flush_entry = 0x14,
+ .flush_range = 0x18,
+ .flush_start = 0x20,
+ .flush_end = 0x24,
+ .int_status = 0x60,
+ .int_clear = 0x64,
+ .fault_va = 0x70,
+ .fault_info = 0x78,
+
+ .get_fault_info = exynos_sysmmu_v7_get_fault_info,
+};
+
+/* SysMMU v7: VM capable register layout */
static const struct sysmmu_variant sysmmu_v7_vm_variant = {
.pt_base = 0x800c,
.flush_all = 0x8010,
@@ -324,6 +410,10 @@ static const struct sysmmu_variant sysmmu_v7_vm_variant = {
.flush_end = 0x8024,
.int_status = 0x60,
.int_clear = 0x64,
+ .fault_va = 0x1000,
+ .fault_info = 0x1004,
+
+ .get_fault_info = exynos_sysmmu_v7_get_fault_info,
};
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@ -446,75 +536,63 @@ static void __sysmmu_get_version(struct sysmmu_drvdata *data)
if (data->has_vcr)
data->variant = &sysmmu_v7_vm_variant;
else
- data->variant = &sysmmu_v5_variant;
+ data->variant = &sysmmu_v7_variant;
}
__sysmmu_disable_clocks(data);
}
static void show_fault_information(struct sysmmu_drvdata *data,
- const struct sysmmu_fault_info *finfo,
- sysmmu_iova_t fault_addr)
+ const struct sysmmu_fault *fault)
{
sysmmu_pte_t *ent;
- dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
- dev_name(data->master), finfo->name, fault_addr);
+ dev_err(data->sysmmu, "%s: [%s] %s FAULT occurred at %#x\n",
+ dev_name(data->master),
+ fault->type == IOMMU_FAULT_READ ? "READ" : "WRITE",
+ fault->name, fault->addr);
dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
- ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
+ ent = section_entry(phys_to_virt(data->pgtable), fault->addr);
dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
if (lv1ent_page(ent)) {
- ent = page_entry(ent, fault_addr);
+ ent = page_entry(ent, fault->addr);
dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
}
}
static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
{
- /* SYSMMU is in blocked state when interrupt occurred. */
struct sysmmu_drvdata *data = dev_id;
- const struct sysmmu_fault_info *finfo;
- unsigned int i, n, itype;
- sysmmu_iova_t fault_addr;
+ unsigned int itype;
+ struct sysmmu_fault fault;
int ret = -ENOSYS;
WARN_ON(!data->active);
- if (MMU_MAJ_VER(data->version) < 5) {
- finfo = sysmmu_faults;
- n = ARRAY_SIZE(sysmmu_faults);
- } else {
- finfo = sysmmu_v5_faults;
- n = ARRAY_SIZE(sysmmu_v5_faults);
- }
-
spin_lock(&data->lock);
-
clk_enable(data->clk_master);
itype = __ffs(readl(SYSMMU_REG(data, int_status)));
- for (i = 0; i < n; i++, finfo++)
- if (finfo->bit == itype)
- break;
- /* unknown/unsupported fault */
- BUG_ON(i == n);
-
- /* print debug message */
- fault_addr = readl(data->sfrbase + finfo->addr_reg);
- show_fault_information(data, finfo, fault_addr);
-
- if (data->domain)
- ret = report_iommu_fault(&data->domain->domain,
- data->master, fault_addr, finfo->type);
- /* fault is not recovered by fault handler */
- BUG_ON(ret != 0);
+ ret = data->variant->get_fault_info(data, itype, &fault);
+ if (ret) {
+ dev_err(data->sysmmu, "Unhandled interrupt bit %u\n", itype);
+ goto out;
+ }
+ show_fault_information(data, &fault);
+ if (data->domain) {
+ ret = report_iommu_fault(&data->domain->domain, data->master,
+ fault.addr, fault.type);
+ }
+ if (ret)
+ panic("Unrecoverable System MMU Fault!");
+
+out:
writel(1 << itype, SYSMMU_REG(data, int_clear));
+ /* SysMMU is in blocked state when interrupt occurred */
sysmmu_unblock(data);
-
clk_disable(data->clk_master);
-
spin_unlock(&data->lock);
return IRQ_HANDLED;
@@ -1346,8 +1424,10 @@ static void exynos_iommu_release_device(struct device *dev)
struct iommu_group *group = iommu_group_get(dev);
if (group) {
+#ifndef CONFIG_ARM
WARN_ON(owner->domain !=
iommu_group_default_domain(group));
+#endif
exynos_iommu_detach_device(owner->domain, dev);
iommu_group_put(group);
}
@@ -1398,13 +1478,15 @@ static int exynos_iommu_of_xlate(struct device *dev,
static const struct iommu_ops exynos_iommu_ops = {
.domain_alloc = exynos_iommu_domain_alloc,
.device_group = generic_device_group,
+#ifdef CONFIG_ARM
+ .set_platform_dma_ops = exynos_iommu_release_device,
+#endif
.probe_device = exynos_iommu_probe_device,
.release_device = exynos_iommu_release_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
.of_xlate = exynos_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = exynos_iommu_attach_device,
- .detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
.unmap = exynos_iommu_unmap,
.iova_to_phys = exynos_iommu_iova_to_phys,
@@ -1446,7 +1528,7 @@ static int __init exynos_iommu_init(void)
return 0;
err_reg_driver:
- platform_driver_unregister(&exynos_sysmmu_driver);
+ kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
err_zero_lv2:
kmem_cache_destroy(lv2table_kmem_cache);
return ret;
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 4408ac3c49b6..bce372297099 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -283,9 +283,9 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
return ret;
}
-static void fsl_pamu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+static void fsl_pamu_set_platform_dma(struct device *dev)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
const u32 *prop;
int len;
@@ -452,9 +452,9 @@ static const struct iommu_ops fsl_pamu_ops = {
.domain_alloc = fsl_pamu_domain_alloc,
.probe_device = fsl_pamu_probe_device,
.device_group = fsl_pamu_device_group,
+ .set_platform_dma_ops = fsl_pamu_set_platform_dma,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = fsl_pamu_attach_device,
- .detach_dev = fsl_pamu_detach_device,
.iova_to_phys = fsl_pamu_iova_to_phys,
.free = fsl_pamu_domain_free,
}
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index b7dff5092fd2..12e1e90fdae1 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -96,4 +96,15 @@ config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
passing intel_iommu=sm_on to the kernel. If not sure, please use
the default value.
+config INTEL_IOMMU_PERF_EVENTS
+ def_bool y
+ bool "Intel IOMMU performance events"
+ depends on INTEL_IOMMU && PERF_EVENTS
+ help
+ Selecting this option will enable the performance monitoring
+ infrastructure in the Intel IOMMU. It collects information about
+ key events occurring during operation of the remapping hardware,
+ to aid performance tuning and debug. These are available on modern
+ processors which support Intel VT-d 4.0 and later.
+
endif # INTEL_IOMMU
diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile
index fa0dae16441c..7af3b8a4f2a0 100644
--- a/drivers/iommu/intel/Makefile
+++ b/drivers/iommu/intel/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_DMAR_PERF) += perf.o
obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o
obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o
+obj-$(CONFIG_INTEL_IOMMU_PERF_EVENTS) += perfmon.o
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index b00a0ceb2d13..6acfe879589c 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -34,6 +34,7 @@
#include "../irq_remapping.h"
#include "perf.h"
#include "trace.h"
+#include "perfmon.h"
typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
struct dmar_res_callback {
@@ -427,6 +428,8 @@ static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
memcpy(dmaru->hdr, header, header->length);
dmaru->reg_base_addr = drhd->address;
dmaru->segment = drhd->segment;
+ /* The size of the register set is 2 ^ N 4 KB pages. */
+ dmaru->reg_size = 1UL << (drhd->size + 12);
dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
((void *)drhd) + drhd->header.length,
@@ -956,17 +959,18 @@ static void unmap_iommu(struct intel_iommu *iommu)
/**
* map_iommu: map the iommu's registers
* @iommu: the iommu to map
- * @phys_addr: the physical address of the base resgister
+ * @drhd: DMA remapping hardware definition structure
*
* Memory map the iommu's registers. Start w/ a single page, and
* possibly expand if that turns out to be insufficent.
*/
-static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
+static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd)
{
+ u64 phys_addr = drhd->reg_base_addr;
int map_size, err=0;
iommu->reg_phys = phys_addr;
- iommu->reg_size = VTD_PAGE_SIZE;
+ iommu->reg_size = drhd->reg_size;
if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
pr_err("Can't reserve memory\n");
@@ -1013,6 +1017,16 @@ static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
goto release;
}
}
+
+ if (cap_ecmds(iommu->cap)) {
+ int i;
+
+ for (i = 0; i < DMA_MAX_NUM_ECMDCAP; i++) {
+ iommu->ecmdcap[i] = dmar_readq(iommu->reg + DMAR_ECCAP_REG +
+ i * DMA_ECMD_REG_STEP);
+ }
+ }
+
err = 0;
goto out;
@@ -1050,7 +1064,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
}
sprintf(iommu->name, "dmar%d", iommu->seq_id);
- err = map_iommu(iommu, drhd->reg_base_addr);
+ err = map_iommu(iommu, drhd);
if (err) {
pr_err("Failed to map %s\n", iommu->name);
goto error_free_seq_id;
@@ -1103,6 +1117,9 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (sts & DMA_GSTS_QIES)
iommu->gcmd |= DMA_GCMD_QIE;
+ if (alloc_iommu_pmu(iommu))
+ pr_debug("Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id);
+
raw_spin_lock_init(&iommu->register_lock);
/*
@@ -1127,6 +1144,8 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
if (err)
goto err_sysfs;
+
+ iommu_pmu_register(iommu);
}
drhd->iommu = iommu;
@@ -1137,6 +1156,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
err_sysfs:
iommu_device_sysfs_remove(&iommu->iommu);
err_unmap:
+ free_iommu_pmu(iommu);
unmap_iommu(iommu);
error_free_seq_id:
ida_free(&dmar_seq_ids, iommu->seq_id);
@@ -1148,10 +1168,13 @@ error:
static void free_iommu(struct intel_iommu *iommu)
{
if (intel_iommu_enabled && !iommu->drhd->ignored) {
+ iommu_pmu_unregister(iommu);
iommu_device_unregister(&iommu->iommu);
iommu_device_sysfs_remove(&iommu->iommu);
}
+ free_iommu_pmu(iommu);
+
if (iommu->irq) {
if (iommu->pr_irq) {
free_irq(iommu->pr_irq, iommu);
@@ -1859,6 +1882,8 @@ static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
return DMAR_FECTL_REG;
else if (iommu->pr_irq == irq)
return DMAR_PECTL_REG;
+ else if (iommu->perf_irq == irq)
+ return DMAR_PERFINTRCTL_REG;
else
BUG();
}
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 59df7e42fd53..7c2f4bd33582 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -16,7 +16,6 @@
#include <linux/crash_dump.h>
#include <linux/dma-direct.h>
#include <linux/dmi.h>
-#include <linux/intel-svm.h>
#include <linux/memory.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
@@ -30,6 +29,7 @@
#include "../iommu-sva.h"
#include "pasid.h"
#include "cap_audit.h"
+#include "perfmon.h"
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
@@ -362,12 +362,12 @@ static int __init intel_iommu_setup(char *str)
}
__setup("intel_iommu=", intel_iommu_setup);
-void *alloc_pgtable_page(int node)
+void *alloc_pgtable_page(int node, gfp_t gfp)
{
struct page *page;
void *vaddr = NULL;
- page = alloc_pages_node(node, GFP_ATOMIC | __GFP_ZERO, 0);
+ page = alloc_pages_node(node, gfp | __GFP_ZERO, 0);
if (page)
vaddr = page_address(page);
return vaddr;
@@ -612,7 +612,7 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
if (!alloc)
return NULL;
- context = alloc_pgtable_page(iommu->node);
+ context = alloc_pgtable_page(iommu->node, GFP_ATOMIC);
if (!context)
return NULL;
@@ -908,7 +908,8 @@ pgtable_walk:
#endif
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
- unsigned long pfn, int *target_level)
+ unsigned long pfn, int *target_level,
+ gfp_t gfp)
{
struct dma_pte *parent, *pte;
int level = agaw_to_level(domain->agaw);
@@ -935,7 +936,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
if (!dma_pte_present(pte)) {
uint64_t pteval;
- tmp_page = alloc_pgtable_page(domain->nid);
+ tmp_page = alloc_pgtable_page(domain->nid, gfp);
if (!tmp_page)
return NULL;
@@ -1186,7 +1187,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
{
struct root_entry *root;
- root = (struct root_entry *)alloc_pgtable_page(iommu->node);
+ root = (struct root_entry *)alloc_pgtable_page(iommu->node, GFP_ATOMIC);
if (!root) {
pr_err("Allocating root entry for %s failed\n",
iommu->name);
@@ -2150,7 +2151,8 @@ static void switch_to_super_page(struct dmar_domain *domain,
while (start_pfn <= end_pfn) {
if (!pte)
- pte = pfn_to_dma_pte(domain, start_pfn, &level);
+ pte = pfn_to_dma_pte(domain, start_pfn, &level,
+ GFP_ATOMIC);
if (dma_pte_present(pte)) {
dma_pte_free_pagetable(domain, start_pfn,
@@ -2172,7 +2174,8 @@ static void switch_to_super_page(struct dmar_domain *domain,
static int
__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
- unsigned long phys_pfn, unsigned long nr_pages, int prot)
+ unsigned long phys_pfn, unsigned long nr_pages, int prot,
+ gfp_t gfp)
{
struct dma_pte *first_pte = NULL, *pte = NULL;
unsigned int largepage_lvl = 0;
@@ -2202,7 +2205,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
largepage_lvl = hardware_largepage_caps(domain, iov_pfn,
phys_pfn, nr_pages);
- pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl);
+ pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl,
+ gfp);
if (!pte)
return -ENOMEM;
first_pte = pte;
@@ -2368,7 +2372,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain,
return __domain_mapping(domain, first_vpfn,
first_vpfn, last_vpfn - first_vpfn + 1,
- DMA_PTE_READ|DMA_PTE_WRITE);
+ DMA_PTE_READ|DMA_PTE_WRITE, GFP_KERNEL);
}
static int md_domain_init(struct dmar_domain *domain, int guest_width);
@@ -2676,7 +2680,7 @@ static int copy_context_table(struct intel_iommu *iommu,
if (!old_ce)
goto out;
- new_ce = alloc_pgtable_page(iommu->node);
+ new_ce = alloc_pgtable_page(iommu->node, GFP_KERNEL);
if (!new_ce)
goto out_unmap;
@@ -4005,7 +4009,8 @@ int __init intel_iommu_init(void)
* is likely to be much lower than the overhead of synchronizing
* the virtual and physical IOMMU page-tables.
*/
- if (cap_caching_mode(iommu->cap)) {
+ if (cap_caching_mode(iommu->cap) &&
+ !first_level_by_default(IOMMU_DOMAIN_DMA)) {
pr_info_once("IOMMU batching disallowed due to virtualization\n");
iommu_set_dma_strict();
}
@@ -4013,6 +4018,8 @@ int __init intel_iommu_init(void)
intel_iommu_groups,
"%s", iommu->name);
iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
+
+ iommu_pmu_register(iommu);
}
up_read(&dmar_global_lock);
@@ -4136,7 +4143,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->max_addr = 0;
/* always allocate the top pgd */
- domain->pgd = alloc_pgtable_page(domain->nid);
+ domain->pgd = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
if (!domain->pgd)
return -ENOMEM;
domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
@@ -4298,7 +4305,7 @@ static int intel_iommu_map(struct iommu_domain *domain,
the low bits of hpa would take us onto the next page */
size = aligned_nrpages(hpa, size);
return __domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
- hpa >> VTD_PAGE_SHIFT, size, prot);
+ hpa >> VTD_PAGE_SHIFT, size, prot, gfp);
}
static int intel_iommu_map_pages(struct iommu_domain *domain,
@@ -4333,7 +4340,8 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
/* Cope with horrid API which requires us to unmap more than the
size argument if it happens to be a large-page mapping. */
- BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
+ BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level,
+ GFP_ATOMIC));
if (size < VTD_PAGE_SIZE << level_to_offset_bits(level))
size = VTD_PAGE_SIZE << level_to_offset_bits(level);
@@ -4346,7 +4354,12 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
- iommu_iotlb_gather_add_page(domain, gather, iova, size);
+ /*
+ * We do not use page-selective IOTLB invalidation in flush queue,
+ * so there is no need to track page and sync iotlb.
+ */
+ if (!iommu_iotlb_gather_queued(gather))
+ iommu_iotlb_gather_add_page(domain, gather, iova, size);
return size;
}
@@ -4392,7 +4405,8 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
int level = 0;
u64 phys = 0;
- pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
+ pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level,
+ GFP_ATOMIC);
if (pte && dma_pte_present(pte))
phys = dma_pte_addr(pte) +
(iova & (BIT_MASK(level_to_offset_bits(level) +
@@ -4464,8 +4478,6 @@ static bool intel_iommu_capable(struct device *dev, enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
- case IOMMU_CAP_INTR_REMAP:
- return irq_remapping_enabled == 1;
case IOMMU_CAP_PRE_BOOT_PROTECTION:
return dmar_platform_optin();
case IOMMU_CAP_ENFORCE_CACHE_COHERENCY:
@@ -4642,8 +4654,12 @@ static int intel_iommu_enable_sva(struct device *dev)
return -EINVAL;
ret = iopf_queue_add_device(iommu->iopf_queue, dev);
- if (!ret)
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+ if (ret)
+ return ret;
+
+ ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+ if (ret)
+ iopf_queue_remove_device(iommu->iopf_queue, dev);
return ret;
}
@@ -4655,8 +4671,12 @@ static int intel_iommu_disable_sva(struct device *dev)
int ret;
ret = iommu_unregister_device_fault_handler(dev);
- if (!ret)
- ret = iopf_queue_remove_device(iommu->iopf_queue, dev);
+ if (ret)
+ return ret;
+
+ ret = iopf_queue_remove_device(iommu->iopf_queue, dev);
+ if (ret)
+ iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
return ret;
}
@@ -5023,3 +5043,59 @@ void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
pasid, qdep, address, mask);
}
}
+
+#define ecmd_get_status_code(res) (((res) & 0xff) >> 1)
+
+/*
+ * Function to submit a command to the enhanced command interface. The
+ * valid enhanced command descriptions are defined in Table 47 of the
+ * VT-d spec. The VT-d hardware implementation may support some but not
+ * all commands, which can be determined by checking the Enhanced
+ * Command Capability Register.
+ *
+ * Return values:
+ * - 0: Command successful without any error;
+ * - Negative: software error value;
+ * - Nonzero positive: failure status code defined in Table 48.
+ */
+int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob)
+{
+ unsigned long flags;
+ u64 res;
+ int ret;
+
+ if (!cap_ecmds(iommu->cap))
+ return -ENODEV;
+
+ raw_spin_lock_irqsave(&iommu->register_lock, flags);
+
+ res = dmar_readq(iommu->reg + DMAR_ECRSP_REG);
+ if (res & DMA_ECMD_ECRSP_IP) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /*
+ * Unconditionally write the operand B, because
+ * - There is no side effect if an ecmd doesn't require an
+ * operand B, but we set the register to some value.
+ * - It's not invoked in any critical path. The extra MMIO
+ * write doesn't bring any performance concerns.
+ */
+ dmar_writeq(iommu->reg + DMAR_ECEO_REG, ob);
+ dmar_writeq(iommu->reg + DMAR_ECMD_REG, ecmd | (oa << DMA_ECMD_OA_SHIFT));
+
+ IOMMU_WAIT_OP(iommu, DMAR_ECRSP_REG, dmar_readq,
+ !(res & DMA_ECMD_ECRSP_IP), res);
+
+ if (res & DMA_ECMD_ECRSP_IP) {
+ ret = -ETIMEDOUT;
+ goto err;
+ }
+
+ ret = ecmd_get_status_code(res);
+err:
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
+
+ return ret;
+}
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 06e61e474856..d6df3b865812 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -22,6 +22,7 @@
#include <linux/ioasid.h>
#include <linux/bitfield.h>
#include <linux/xarray.h>
+#include <linux/perf_event.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -125,6 +126,17 @@
#define DMAR_MTRR_PHYSMASK8_REG 0x208
#define DMAR_MTRR_PHYSBASE9_REG 0x210
#define DMAR_MTRR_PHYSMASK9_REG 0x218
+#define DMAR_PERFCAP_REG 0x300
+#define DMAR_PERFCFGOFF_REG 0x310
+#define DMAR_PERFOVFOFF_REG 0x318
+#define DMAR_PERFCNTROFF_REG 0x31c
+#define DMAR_PERFINTRSTS_REG 0x324
+#define DMAR_PERFINTRCTL_REG 0x328
+#define DMAR_PERFEVNTCAP_REG 0x380
+#define DMAR_ECMD_REG 0x400
+#define DMAR_ECEO_REG 0x408
+#define DMAR_ECRSP_REG 0x410
+#define DMAR_ECCAP_REG 0x430
#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
@@ -148,6 +160,7 @@
*/
#define cap_esrtps(c) (((c) >> 63) & 1)
#define cap_esirtps(c) (((c) >> 62) & 1)
+#define cap_ecmds(c) (((c) >> 61) & 1)
#define cap_fl5lp_support(c) (((c) >> 60) & 1)
#define cap_pi_support(c) (((c) >> 59) & 1)
#define cap_fl1gp_support(c) (((c) >> 56) & 1)
@@ -179,7 +192,8 @@
* Extended Capability Register
*/
-#define ecap_rps(e) (((e) >> 49) & 0x1)
+#define ecap_pms(e) (((e) >> 51) & 0x1)
+#define ecap_rps(e) (((e) >> 49) & 0x1)
#define ecap_smpwc(e) (((e) >> 48) & 0x1)
#define ecap_flts(e) (((e) >> 47) & 0x1)
#define ecap_slts(e) (((e) >> 46) & 0x1)
@@ -210,6 +224,22 @@
#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */
+/*
+ * Decoding Perf Capability Register
+ */
+#define pcap_num_cntr(p) ((p) & 0xffff)
+#define pcap_cntr_width(p) (((p) >> 16) & 0x7f)
+#define pcap_num_event_group(p) (((p) >> 24) & 0x1f)
+#define pcap_filters_mask(p) (((p) >> 32) & 0x1f)
+#define pcap_interrupt(p) (((p) >> 50) & 0x1)
+/* The counter stride is calculated as 2 ^ (x+10) bytes */
+#define pcap_cntr_stride(p) (1ULL << ((((p) >> 52) & 0x7) + 10))
+
+/*
+ * Decoding Perf Event Capability Register
+ */
+#define pecap_es(p) ((p) & 0xfffffff)
+
/* Virtual command interface capability */
#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
@@ -281,6 +311,26 @@
#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
+/* ECMD_REG */
+#define DMA_MAX_NUM_ECMD 256
+#define DMA_MAX_NUM_ECMDCAP (DMA_MAX_NUM_ECMD / 64)
+#define DMA_ECMD_REG_STEP 8
+#define DMA_ECMD_ENABLE 0xf0
+#define DMA_ECMD_DISABLE 0xf1
+#define DMA_ECMD_FREEZE 0xf4
+#define DMA_ECMD_UNFREEZE 0xf5
+#define DMA_ECMD_OA_SHIFT 16
+#define DMA_ECMD_ECRSP_IP 0x1
+#define DMA_ECMD_ECCAP3 3
+#define DMA_ECMD_ECCAP3_ECNTS BIT_ULL(48)
+#define DMA_ECMD_ECCAP3_DCNTS BIT_ULL(49)
+#define DMA_ECMD_ECCAP3_FCNTS BIT_ULL(52)
+#define DMA_ECMD_ECCAP3_UFCNTS BIT_ULL(53)
+#define DMA_ECMD_ECCAP3_ESSENTIAL (DMA_ECMD_ECCAP3_ECNTS | \
+ DMA_ECMD_ECCAP3_DCNTS | \
+ DMA_ECMD_ECCAP3_FCNTS | \
+ DMA_ECMD_ECCAP3_UFCNTS)
+
/* FECTL_REG */
#define DMA_FECTL_IM (((u32)1) << 31)
@@ -309,6 +359,9 @@
#define DMA_VCS_PAS ((u64)1)
+/* PERFINTRSTS_REG */
+#define DMA_PERFINTRSTS_PIS ((u32)1)
+
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
do { \
cycles_t start_time = get_cycles(); \
@@ -438,6 +491,11 @@ struct q_inval {
int free_cnt;
};
+/* Page Request Queue depth */
+#define PRQ_ORDER 4
+#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
+#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5)
+
struct dmar_pci_notify_info;
#ifdef CONFIG_IRQ_REMAP
@@ -554,6 +612,40 @@ struct dmar_domain {
iommu core */
};
+/*
+ * In theory, the VT-d 4.0 spec can support up to 2 ^ 16 counters.
+ * But in practice, there are only 14 counters for the existing
+ * platform. Setting the max number of counters to 64 should be good
+ * enough for a long time. Also, supporting more than 64 counters
+ * requires more extras, e.g., extra freeze and overflow registers,
+ * which is not necessary for now.
+ */
+#define IOMMU_PMU_IDX_MAX 64
+
+struct iommu_pmu {
+ struct intel_iommu *iommu;
+ u32 num_cntr; /* Number of counters */
+ u32 num_eg; /* Number of event group */
+ u32 cntr_width; /* Counter width */
+ u32 cntr_stride; /* Counter Stride */
+ u32 filter; /* Bitmask of filter support */
+ void __iomem *base; /* the PerfMon base address */
+ void __iomem *cfg_reg; /* counter configuration base address */
+ void __iomem *cntr_reg; /* counter 0 address*/
+ void __iomem *overflow; /* overflow status register */
+
+ u64 *evcap; /* Indicates all supported events */
+ u32 **cntr_evcap; /* Supported events of each counter. */
+
+ struct pmu pmu;
+ DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX);
+ struct perf_event *event_list[IOMMU_PMU_IDX_MAX];
+ unsigned char irq_name[16];
+};
+
+#define IOMMU_IRQ_ID_OFFSET_PRQ (DMAR_UNITS_SUPPORTED)
+#define IOMMU_IRQ_ID_OFFSET_PERF (2 * DMAR_UNITS_SUPPORTED)
+
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
@@ -561,12 +653,13 @@ struct intel_iommu {
u64 cap;
u64 ecap;
u64 vccap;
+ u64 ecmdcap[DMA_MAX_NUM_ECMDCAP];
u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
raw_spinlock_t register_lock; /* protect register handling */
int seq_id; /* sequence id of the iommu */
int agaw; /* agaw of this iommu */
int msagaw; /* max sagaw of this iommu */
- unsigned int irq, pr_irq;
+ unsigned int irq, pr_irq, perf_irq;
u16 segment; /* PCI segment# */
unsigned char name[13]; /* Device Name */
@@ -600,6 +693,8 @@ struct intel_iommu {
struct dmar_drhd_unit *drhd;
void *perf_statistic;
+
+ struct iommu_pmu *pmu;
};
/* PCI domain-device relationship */
@@ -737,7 +832,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
extern int dmar_ir_support(void);
-void *alloc_pgtable_page(int node);
+void *alloc_pgtable_page(int node, gfp_t gfp);
void free_pgtable_page(void *vaddr);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
@@ -756,19 +851,13 @@ struct intel_svm_dev {
struct rcu_head rcu;
struct device *dev;
struct intel_iommu *iommu;
- struct iommu_sva sva;
- u32 pasid;
- int users;
u16 did;
- u16 dev_iotlb:1;
u16 sid, qdep;
};
struct intel_svm {
struct mmu_notifier notifier;
struct mm_struct *mm;
-
- unsigned int flags;
u32 pasid;
struct list_head devs;
};
@@ -800,6 +889,14 @@ extern const struct iommu_ops intel_iommu_ops;
extern int intel_iommu_sm;
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
+int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob);
+
+static inline bool ecmd_has_pmu_essential(struct intel_iommu *iommu)
+{
+ return (iommu->ecmdcap[DMA_ECMD_ECCAP3] & DMA_ECMD_ECCAP3_ESSENTIAL) ==
+ DMA_ECMD_ECCAP3_ESSENTIAL;
+}
+
extern int dmar_disabled;
extern int intel_iommu_enabled;
#else
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index f58f5f57af78..6d01fa078c36 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -573,7 +573,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
}
irq_domain_update_bus_token(iommu->ir_domain, DOMAIN_BUS_DMAR);
- iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT;
+ iommu->ir_domain->flags |= IRQ_DOMAIN_FLAG_MSI_PARENT |
+ IRQ_DOMAIN_FLAG_ISOLATED_MSI;
if (cap_caching_mode(iommu->cap))
iommu->ir_domain->msi_parent_ops = &virt_dmar_msi_parent_ops;
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index fb3c7020028d..633e0a4a01e7 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -128,6 +128,9 @@ int intel_pasid_alloc_table(struct device *dev)
pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3);
info->pasid_table = pasid_table;
+ if (!ecap_coherent(info->iommu->ecap))
+ clflush_cache_range(pasid_table->table, size);
+
return 0;
}
@@ -200,7 +203,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
retry:
entries = get_pasid_table_from_pde(&dir[dir_index]);
if (!entries) {
- entries = alloc_pgtable_page(info->iommu->node);
+ entries = alloc_pgtable_page(info->iommu->node, GFP_ATOMIC);
if (!entries)
return NULL;
@@ -215,6 +218,10 @@ retry:
free_pgtable_page(entries);
goto retry;
}
+ if (!ecap_coherent(info->iommu->ecap)) {
+ clflush_cache_range(entries, VTD_PAGE_SIZE);
+ clflush_cache_range(&dir[dir_index].val, sizeof(*dir));
+ }
}
return &entries[index];
@@ -365,6 +372,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
}
/*
+ * Setup No Execute Enable bit (Bit 133) of a scalable mode PASID
+ * entry. It is required when XD bit of the first level page table
+ * entry is about to be set.
+ */
+static inline void pasid_set_nxe(struct pasid_entry *pe)
+{
+ pasid_set_bits(&pe->val[2], 1 << 5, 1 << 5);
+}
+
+/*
* Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
* PASID entry.
*/
@@ -557,6 +574,7 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, iommu->agaw);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
+ pasid_set_nxe(pte);
/* Setup Present and PASID Granular Transfer Type: */
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
diff --git a/drivers/iommu/intel/perfmon.c b/drivers/iommu/intel/perfmon.c
new file mode 100644
index 000000000000..e17d9743a0d8
--- /dev/null
+++ b/drivers/iommu/intel/perfmon.c
@@ -0,0 +1,877 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Support Intel IOMMU PerfMon
+ * Copyright(c) 2023 Intel Corporation.
+ */
+#define pr_fmt(fmt) "DMAR: " fmt
+#define dev_fmt(fmt) pr_fmt(fmt)
+
+#include <linux/dmar.h>
+#include "iommu.h"
+#include "perfmon.h"
+
+PMU_FORMAT_ATTR(event, "config:0-27"); /* ES: Events Select */
+PMU_FORMAT_ATTR(event_group, "config:28-31"); /* EGI: Event Group Index */
+
+static struct attribute *iommu_pmu_format_attrs[] = {
+ &format_attr_event_group.attr,
+ &format_attr_event.attr,
+ NULL
+};
+
+static struct attribute_group iommu_pmu_format_attr_group = {
+ .name = "format",
+ .attrs = iommu_pmu_format_attrs,
+};
+
+/* The available events are added in attr_update later */
+static struct attribute *attrs_empty[] = {
+ NULL
+};
+
+static struct attribute_group iommu_pmu_events_attr_group = {
+ .name = "events",
+ .attrs = attrs_empty,
+};
+
+static cpumask_t iommu_pmu_cpu_mask;
+
+static ssize_t
+cpumask_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return cpumap_print_to_pagebuf(true, buf, &iommu_pmu_cpu_mask);
+}
+static DEVICE_ATTR_RO(cpumask);
+
+static struct attribute *iommu_pmu_cpumask_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL
+};
+
+static struct attribute_group iommu_pmu_cpumask_attr_group = {
+ .attrs = iommu_pmu_cpumask_attrs,
+};
+
+static const struct attribute_group *iommu_pmu_attr_groups[] = {
+ &iommu_pmu_format_attr_group,
+ &iommu_pmu_events_attr_group,
+ &iommu_pmu_cpumask_attr_group,
+ NULL
+};
+
+static inline struct iommu_pmu *dev_to_iommu_pmu(struct device *dev)
+{
+ /*
+ * The perf_event creates its own dev for each PMU.
+ * See pmu_dev_alloc()
+ */
+ return container_of(dev_get_drvdata(dev), struct iommu_pmu, pmu);
+}
+
+#define IOMMU_PMU_ATTR(_name, _format, _filter) \
+ PMU_FORMAT_ATTR(_name, _format); \
+ \
+static struct attribute *_name##_attr[] = { \
+ &format_attr_##_name.attr, \
+ NULL \
+}; \
+ \
+static umode_t \
+_name##_is_visible(struct kobject *kobj, struct attribute *attr, int i) \
+{ \
+ struct device *dev = kobj_to_dev(kobj); \
+ struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev); \
+ \
+ if (!iommu_pmu) \
+ return 0; \
+ return (iommu_pmu->filter & _filter) ? attr->mode : 0; \
+} \
+ \
+static struct attribute_group _name = { \
+ .name = "format", \
+ .attrs = _name##_attr, \
+ .is_visible = _name##_is_visible, \
+};
+
+IOMMU_PMU_ATTR(filter_requester_id_en, "config1:0", IOMMU_PMU_FILTER_REQUESTER_ID);
+IOMMU_PMU_ATTR(filter_domain_en, "config1:1", IOMMU_PMU_FILTER_DOMAIN);
+IOMMU_PMU_ATTR(filter_pasid_en, "config1:2", IOMMU_PMU_FILTER_PASID);
+IOMMU_PMU_ATTR(filter_ats_en, "config1:3", IOMMU_PMU_FILTER_ATS);
+IOMMU_PMU_ATTR(filter_page_table_en, "config1:4", IOMMU_PMU_FILTER_PAGE_TABLE);
+IOMMU_PMU_ATTR(filter_requester_id, "config1:16-31", IOMMU_PMU_FILTER_REQUESTER_ID);
+IOMMU_PMU_ATTR(filter_domain, "config1:32-47", IOMMU_PMU_FILTER_DOMAIN);
+IOMMU_PMU_ATTR(filter_pasid, "config2:0-21", IOMMU_PMU_FILTER_PASID);
+IOMMU_PMU_ATTR(filter_ats, "config2:24-28", IOMMU_PMU_FILTER_ATS);
+IOMMU_PMU_ATTR(filter_page_table, "config2:32-36", IOMMU_PMU_FILTER_PAGE_TABLE);
+
+#define iommu_pmu_en_requester_id(e) ((e) & 0x1)
+#define iommu_pmu_en_domain(e) (((e) >> 1) & 0x1)
+#define iommu_pmu_en_pasid(e) (((e) >> 2) & 0x1)
+#define iommu_pmu_en_ats(e) (((e) >> 3) & 0x1)
+#define iommu_pmu_en_page_table(e) (((e) >> 4) & 0x1)
+#define iommu_pmu_get_requester_id(filter) (((filter) >> 16) & 0xffff)
+#define iommu_pmu_get_domain(filter) (((filter) >> 32) & 0xffff)
+#define iommu_pmu_get_pasid(filter) ((filter) & 0x3fffff)
+#define iommu_pmu_get_ats(filter) (((filter) >> 24) & 0x1f)
+#define iommu_pmu_get_page_table(filter) (((filter) >> 32) & 0x1f)
+
+#define iommu_pmu_set_filter(_name, _config, _filter, _idx, _econfig) \
+{ \
+ if ((iommu_pmu->filter & _filter) && iommu_pmu_en_##_name(_econfig)) { \
+ dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
+ IOMMU_PMU_CFG_SIZE + \
+ (ffs(_filter) - 1) * IOMMU_PMU_CFG_FILTERS_OFFSET, \
+ iommu_pmu_get_##_name(_config) | IOMMU_PMU_FILTER_EN);\
+ } \
+}
+
+#define iommu_pmu_clear_filter(_filter, _idx) \
+{ \
+ if (iommu_pmu->filter & _filter) { \
+ dmar_writel(iommu_pmu->cfg_reg + _idx * IOMMU_PMU_CFG_OFFSET + \
+ IOMMU_PMU_CFG_SIZE + \
+ (ffs(_filter) - 1) * IOMMU_PMU_CFG_FILTERS_OFFSET, \
+ 0); \
+ } \
+}
+
+/*
+ * Define the event attr related functions
+ * Input: _name: event attr name
+ * _string: string of the event in sysfs
+ * _g_idx: event group encoding
+ * _event: event encoding
+ */
+#define IOMMU_PMU_EVENT_ATTR(_name, _string, _g_idx, _event) \
+ PMU_EVENT_ATTR_STRING(_name, event_attr_##_name, _string) \
+ \
+static struct attribute *_name##_attr[] = { \
+ &event_attr_##_name.attr.attr, \
+ NULL \
+}; \
+ \
+static umode_t \
+_name##_is_visible(struct kobject *kobj, struct attribute *attr, int i) \
+{ \
+ struct device *dev = kobj_to_dev(kobj); \
+ struct iommu_pmu *iommu_pmu = dev_to_iommu_pmu(dev); \
+ \
+ if (!iommu_pmu) \
+ return 0; \
+ return (iommu_pmu->evcap[_g_idx] & _event) ? attr->mode : 0; \
+} \
+ \
+static struct attribute_group _name = { \
+ .name = "events", \
+ .attrs = _name##_attr, \
+ .is_visible = _name##_is_visible, \
+};
+
+IOMMU_PMU_EVENT_ATTR(iommu_clocks, "event_group=0x0,event=0x001", 0x0, 0x001)
+IOMMU_PMU_EVENT_ATTR(iommu_requests, "event_group=0x0,event=0x002", 0x0, 0x002)
+IOMMU_PMU_EVENT_ATTR(pw_occupancy, "event_group=0x0,event=0x004", 0x0, 0x004)
+IOMMU_PMU_EVENT_ATTR(ats_blocked, "event_group=0x0,event=0x008", 0x0, 0x008)
+IOMMU_PMU_EVENT_ATTR(iommu_mrds, "event_group=0x1,event=0x001", 0x1, 0x001)
+IOMMU_PMU_EVENT_ATTR(iommu_mem_blocked, "event_group=0x1,event=0x020", 0x1, 0x020)
+IOMMU_PMU_EVENT_ATTR(pg_req_posted, "event_group=0x1,event=0x040", 0x1, 0x040)
+IOMMU_PMU_EVENT_ATTR(ctxt_cache_lookup, "event_group=0x2,event=0x001", 0x2, 0x001)
+IOMMU_PMU_EVENT_ATTR(ctxt_cache_hit, "event_group=0x2,event=0x002", 0x2, 0x002)
+IOMMU_PMU_EVENT_ATTR(pasid_cache_lookup, "event_group=0x2,event=0x004", 0x2, 0x004)
+IOMMU_PMU_EVENT_ATTR(pasid_cache_hit, "event_group=0x2,event=0x008", 0x2, 0x008)
+IOMMU_PMU_EVENT_ATTR(ss_nonleaf_lookup, "event_group=0x2,event=0x010", 0x2, 0x010)
+IOMMU_PMU_EVENT_ATTR(ss_nonleaf_hit, "event_group=0x2,event=0x020", 0x2, 0x020)
+IOMMU_PMU_EVENT_ATTR(fs_nonleaf_lookup, "event_group=0x2,event=0x040", 0x2, 0x040)
+IOMMU_PMU_EVENT_ATTR(fs_nonleaf_hit, "event_group=0x2,event=0x080", 0x2, 0x080)
+IOMMU_PMU_EVENT_ATTR(hpt_nonleaf_lookup, "event_group=0x2,event=0x100", 0x2, 0x100)
+IOMMU_PMU_EVENT_ATTR(hpt_nonleaf_hit, "event_group=0x2,event=0x200", 0x2, 0x200)
+IOMMU_PMU_EVENT_ATTR(iotlb_lookup, "event_group=0x3,event=0x001", 0x3, 0x001)
+IOMMU_PMU_EVENT_ATTR(iotlb_hit, "event_group=0x3,event=0x002", 0x3, 0x002)
+IOMMU_PMU_EVENT_ATTR(hpt_leaf_lookup, "event_group=0x3,event=0x004", 0x3, 0x004)
+IOMMU_PMU_EVENT_ATTR(hpt_leaf_hit, "event_group=0x3,event=0x008", 0x3, 0x008)
+IOMMU_PMU_EVENT_ATTR(int_cache_lookup, "event_group=0x4,event=0x001", 0x4, 0x001)
+IOMMU_PMU_EVENT_ATTR(int_cache_hit_nonposted, "event_group=0x4,event=0x002", 0x4, 0x002)
+IOMMU_PMU_EVENT_ATTR(int_cache_hit_posted, "event_group=0x4,event=0x004", 0x4, 0x004)
+
+static const struct attribute_group *iommu_pmu_attr_update[] = {
+ &filter_requester_id_en,
+ &filter_domain_en,
+ &filter_pasid_en,
+ &filter_ats_en,
+ &filter_page_table_en,
+ &filter_requester_id,
+ &filter_domain,
+ &filter_pasid,
+ &filter_ats,
+ &filter_page_table,
+ &iommu_clocks,
+ &iommu_requests,
+ &pw_occupancy,
+ &ats_blocked,
+ &iommu_mrds,
+ &iommu_mem_blocked,
+ &pg_req_posted,
+ &ctxt_cache_lookup,
+ &ctxt_cache_hit,
+ &pasid_cache_lookup,
+ &pasid_cache_hit,
+ &ss_nonleaf_lookup,
+ &ss_nonleaf_hit,
+ &fs_nonleaf_lookup,
+ &fs_nonleaf_hit,
+ &hpt_nonleaf_lookup,
+ &hpt_nonleaf_hit,
+ &iotlb_lookup,
+ &iotlb_hit,
+ &hpt_leaf_lookup,
+ &hpt_leaf_hit,
+ &int_cache_lookup,
+ &int_cache_hit_nonposted,
+ &int_cache_hit_posted,
+ NULL
+};
+
+static inline void __iomem *
+iommu_event_base(struct iommu_pmu *iommu_pmu, int idx)
+{
+ return iommu_pmu->cntr_reg + idx * iommu_pmu->cntr_stride;
+}
+
+static inline void __iomem *
+iommu_config_base(struct iommu_pmu *iommu_pmu, int idx)
+{
+ return iommu_pmu->cfg_reg + idx * IOMMU_PMU_CFG_OFFSET;
+}
+
+static inline struct iommu_pmu *iommu_event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct iommu_pmu, pmu);
+}
+
+static inline u64 iommu_event_config(struct perf_event *event)
+{
+ u64 config = event->attr.config;
+
+ return (iommu_event_select(config) << IOMMU_EVENT_CFG_ES_SHIFT) |
+ (iommu_event_group(config) << IOMMU_EVENT_CFG_EGI_SHIFT) |
+ IOMMU_EVENT_CFG_INT;
+}
+
+static inline bool is_iommu_pmu_event(struct iommu_pmu *iommu_pmu,
+ struct perf_event *event)
+{
+ return event->pmu == &iommu_pmu->pmu;
+}
+
+static int iommu_pmu_validate_event(struct perf_event *event)
+{
+ struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
+ u32 event_group = iommu_event_group(event->attr.config);
+
+ if (event_group >= iommu_pmu->num_eg)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int iommu_pmu_validate_group(struct perf_event *event)
+{
+ struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
+ struct perf_event *sibling;
+ int nr = 0;
+
+ /*
+ * All events in a group must be scheduled simultaneously.
+ * Check whether there is enough counters for all the events.
+ */
+ for_each_sibling_event(sibling, event->group_leader) {
+ if (!is_iommu_pmu_event(iommu_pmu, sibling) ||
+ sibling->state <= PERF_EVENT_STATE_OFF)
+ continue;
+
+ if (++nr > iommu_pmu->num_cntr)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int iommu_pmu_event_init(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ /* sampling not supported */
+ if (event->attr.sample_period)
+ return -EINVAL;
+
+ if (event->cpu < 0)
+ return -EINVAL;
+
+ if (iommu_pmu_validate_event(event))
+ return -EINVAL;
+
+ hwc->config = iommu_event_config(event);
+
+ return iommu_pmu_validate_group(event);
+}
+
+static void iommu_pmu_event_update(struct perf_event *event)
+{
+ struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev_count, new_count, delta;
+ int shift = 64 - iommu_pmu->cntr_width;
+
+again:
+ prev_count = local64_read(&hwc->prev_count);
+ new_count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx));
+ if (local64_xchg(&hwc->prev_count, new_count) != prev_count)
+ goto again;
+
+ /*
+ * The counter width is enumerated. Always shift the counter
+ * before using it.
+ */
+ delta = (new_count << shift) - (prev_count << shift);
+ delta >>= shift;
+
+ local64_add(delta, &event->count);
+}
+
+static void iommu_pmu_start(struct perf_event *event, int flags)
+{
+ struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
+ struct intel_iommu *iommu = iommu_pmu->iommu;
+ struct hw_perf_event *hwc = &event->hw;
+ u64 count;
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+ return;
+
+ if (WARN_ON_ONCE(hwc->idx < 0 || hwc->idx >= IOMMU_PMU_IDX_MAX))
+ return;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ /* Always reprogram the period */
+ count = dmar_readq(iommu_event_base(iommu_pmu, hwc->idx));
+ local64_set((&hwc->prev_count), count);
+
+ /*
+ * The error of ecmd will be ignored.
+ * - The existing perf_event subsystem doesn't handle the error.
+ * Only IOMMU PMU returns runtime HW error. We don't want to
+ * change the existing generic interfaces for the specific case.
+ * - It's a corner case caused by HW, which is very unlikely to
+ * happen. There is nothing SW can do.
+ * - The worst case is that the user will get <not count> with
+ * perf command, which can give the user some hints.
+ */
+ ecmd_submit_sync(iommu, DMA_ECMD_ENABLE, hwc->idx, 0);
+
+ perf_event_update_userpage(event);
+}
+
+static void iommu_pmu_stop(struct perf_event *event, int flags)
+{
+ struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
+ struct intel_iommu *iommu = iommu_pmu->iommu;
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ ecmd_submit_sync(iommu, DMA_ECMD_DISABLE, hwc->idx, 0);
+
+ iommu_pmu_event_update(event);
+
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+}
+
+static inline int
+iommu_pmu_validate_per_cntr_event(struct iommu_pmu *iommu_pmu,
+ int idx, struct perf_event *event)
+{
+ u32 event_group = iommu_event_group(event->attr.config);
+ u32 select = iommu_event_select(event->attr.config);
+
+ if (!(iommu_pmu->cntr_evcap[idx][event_group] & select))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
+ struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+
+ /*
+ * The counters which support limited events are usually at the end.
+ * Schedule them first to accommodate more events.
+ */
+ for (idx = iommu_pmu->num_cntr - 1; idx >= 0; idx--) {
+ if (test_and_set_bit(idx, iommu_pmu->used_mask))
+ continue;
+ /* Check per-counter event capabilities */
+ if (!iommu_pmu_validate_per_cntr_event(iommu_pmu, idx, event))
+ break;
+ clear_bit(idx, iommu_pmu->used_mask);
+ }
+ if (idx < 0)
+ return -EINVAL;
+
+ iommu_pmu->event_list[idx] = event;
+ hwc->idx = idx;
+
+ /* config events */
+ dmar_writeq(iommu_config_base(iommu_pmu, idx), hwc->config);
+
+ iommu_pmu_set_filter(requester_id, event->attr.config1,
+ IOMMU_PMU_FILTER_REQUESTER_ID, idx,
+ event->attr.config1);
+ iommu_pmu_set_filter(domain, event->attr.config1,
+ IOMMU_PMU_FILTER_DOMAIN, idx,
+ event->attr.config1);
+ iommu_pmu_set_filter(pasid, event->attr.config1,
+ IOMMU_PMU_FILTER_PASID, idx,
+ event->attr.config1);
+ iommu_pmu_set_filter(ats, event->attr.config2,
+ IOMMU_PMU_FILTER_ATS, idx,
+ event->attr.config1);
+ iommu_pmu_set_filter(page_table, event->attr.config2,
+ IOMMU_PMU_FILTER_PAGE_TABLE, idx,
+ event->attr.config1);
+
+ return 0;
+}
+
+static int iommu_pmu_add(struct perf_event *event, int flags)
+{
+ struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
+ struct hw_perf_event *hwc = &event->hw;
+ int ret;
+
+ ret = iommu_pmu_assign_event(iommu_pmu, event);
+ if (ret < 0)
+ return ret;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ iommu_pmu_start(event, 0);
+
+ return 0;
+}
+
+static void iommu_pmu_del(struct perf_event *event, int flags)
+{
+ struct iommu_pmu *iommu_pmu = iommu_event_to_pmu(event);
+ int idx = event->hw.idx;
+
+ iommu_pmu_stop(event, PERF_EF_UPDATE);
+
+ iommu_pmu_clear_filter(IOMMU_PMU_FILTER_REQUESTER_ID, idx);
+ iommu_pmu_clear_filter(IOMMU_PMU_FILTER_DOMAIN, idx);
+ iommu_pmu_clear_filter(IOMMU_PMU_FILTER_PASID, idx);
+ iommu_pmu_clear_filter(IOMMU_PMU_FILTER_ATS, idx);
+ iommu_pmu_clear_filter(IOMMU_PMU_FILTER_PAGE_TABLE, idx);
+
+ iommu_pmu->event_list[idx] = NULL;
+ event->hw.idx = -1;
+ clear_bit(idx, iommu_pmu->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static void iommu_pmu_enable(struct pmu *pmu)
+{
+ struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu);
+ struct intel_iommu *iommu = iommu_pmu->iommu;
+
+ ecmd_submit_sync(iommu, DMA_ECMD_UNFREEZE, 0, 0);
+}
+
+static void iommu_pmu_disable(struct pmu *pmu)
+{
+ struct iommu_pmu *iommu_pmu = container_of(pmu, struct iommu_pmu, pmu);
+ struct intel_iommu *iommu = iommu_pmu->iommu;
+
+ ecmd_submit_sync(iommu, DMA_ECMD_FREEZE, 0, 0);
+}
+
+static void iommu_pmu_counter_overflow(struct iommu_pmu *iommu_pmu)
+{
+ struct perf_event *event;
+ u64 status;
+ int i;
+
+ /*
+ * Two counters may be overflowed very close. Always check
+ * whether there are more to handle.
+ */
+ while ((status = dmar_readq(iommu_pmu->overflow))) {
+ for_each_set_bit(i, (unsigned long *)&status, iommu_pmu->num_cntr) {
+ /*
+ * Find the assigned event of the counter.
+ * Accumulate the value into the event->count.
+ */
+ event = iommu_pmu->event_list[i];
+ if (!event) {
+ pr_warn_once("Cannot find the assigned event for counter %d\n", i);
+ continue;
+ }
+ iommu_pmu_event_update(event);
+ }
+
+ dmar_writeq(iommu_pmu->overflow, status);
+ }
+}
+
+static irqreturn_t iommu_pmu_irq_handler(int irq, void *dev_id)
+{
+ struct intel_iommu *iommu = dev_id;
+
+ if (!dmar_readl(iommu->reg + DMAR_PERFINTRSTS_REG))
+ return IRQ_NONE;
+
+ iommu_pmu_counter_overflow(iommu->pmu);
+
+ /* Clear the status bit */
+ dmar_writel(iommu->reg + DMAR_PERFINTRSTS_REG, DMA_PERFINTRSTS_PIS);
+
+ return IRQ_HANDLED;
+}
+
+static int __iommu_pmu_register(struct intel_iommu *iommu)
+{
+ struct iommu_pmu *iommu_pmu = iommu->pmu;
+
+ iommu_pmu->pmu.name = iommu->name;
+ iommu_pmu->pmu.task_ctx_nr = perf_invalid_context;
+ iommu_pmu->pmu.event_init = iommu_pmu_event_init;
+ iommu_pmu->pmu.pmu_enable = iommu_pmu_enable;
+ iommu_pmu->pmu.pmu_disable = iommu_pmu_disable;
+ iommu_pmu->pmu.add = iommu_pmu_add;
+ iommu_pmu->pmu.del = iommu_pmu_del;
+ iommu_pmu->pmu.start = iommu_pmu_start;
+ iommu_pmu->pmu.stop = iommu_pmu_stop;
+ iommu_pmu->pmu.read = iommu_pmu_event_update;
+ iommu_pmu->pmu.attr_groups = iommu_pmu_attr_groups;
+ iommu_pmu->pmu.attr_update = iommu_pmu_attr_update;
+ iommu_pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE;
+ iommu_pmu->pmu.module = THIS_MODULE;
+
+ return perf_pmu_register(&iommu_pmu->pmu, iommu_pmu->pmu.name, -1);
+}
+
+static inline void __iomem *
+get_perf_reg_address(struct intel_iommu *iommu, u32 offset)
+{
+ u32 off = dmar_readl(iommu->reg + offset);
+
+ return iommu->reg + off;
+}
+
+int alloc_iommu_pmu(struct intel_iommu *iommu)
+{
+ struct iommu_pmu *iommu_pmu;
+ int i, j, ret;
+ u64 perfcap;
+ u32 cap;
+
+ if (!ecap_pms(iommu->ecap))
+ return 0;
+
+ /* The IOMMU PMU requires the ECMD support as well */
+ if (!cap_ecmds(iommu->cap))
+ return -ENODEV;
+
+ perfcap = dmar_readq(iommu->reg + DMAR_PERFCAP_REG);
+ /* The performance monitoring is not supported. */
+ if (!perfcap)
+ return -ENODEV;
+
+ /* Sanity check for the number of the counters and event groups */
+ if (!pcap_num_cntr(perfcap) || !pcap_num_event_group(perfcap))
+ return -ENODEV;
+
+ /* The interrupt on overflow is required */
+ if (!pcap_interrupt(perfcap))
+ return -ENODEV;
+
+ /* Check required Enhanced Command Capability */
+ if (!ecmd_has_pmu_essential(iommu))
+ return -ENODEV;
+
+ iommu_pmu = kzalloc(sizeof(*iommu_pmu), GFP_KERNEL);
+ if (!iommu_pmu)
+ return -ENOMEM;
+
+ iommu_pmu->num_cntr = pcap_num_cntr(perfcap);
+ if (iommu_pmu->num_cntr > IOMMU_PMU_IDX_MAX) {
+ pr_warn_once("The number of IOMMU counters %d > max(%d), clipping!",
+ iommu_pmu->num_cntr, IOMMU_PMU_IDX_MAX);
+ iommu_pmu->num_cntr = IOMMU_PMU_IDX_MAX;
+ }
+
+ iommu_pmu->cntr_width = pcap_cntr_width(perfcap);
+ iommu_pmu->filter = pcap_filters_mask(perfcap);
+ iommu_pmu->cntr_stride = pcap_cntr_stride(perfcap);
+ iommu_pmu->num_eg = pcap_num_event_group(perfcap);
+
+ iommu_pmu->evcap = kcalloc(iommu_pmu->num_eg, sizeof(u64), GFP_KERNEL);
+ if (!iommu_pmu->evcap) {
+ ret = -ENOMEM;
+ goto free_pmu;
+ }
+
+ /* Parse event group capabilities */
+ for (i = 0; i < iommu_pmu->num_eg; i++) {
+ u64 pcap;
+
+ pcap = dmar_readq(iommu->reg + DMAR_PERFEVNTCAP_REG +
+ i * IOMMU_PMU_CAP_REGS_STEP);
+ iommu_pmu->evcap[i] = pecap_es(pcap);
+ }
+
+ iommu_pmu->cntr_evcap = kcalloc(iommu_pmu->num_cntr, sizeof(u32 *), GFP_KERNEL);
+ if (!iommu_pmu->cntr_evcap) {
+ ret = -ENOMEM;
+ goto free_pmu_evcap;
+ }
+ for (i = 0; i < iommu_pmu->num_cntr; i++) {
+ iommu_pmu->cntr_evcap[i] = kcalloc(iommu_pmu->num_eg, sizeof(u32), GFP_KERNEL);
+ if (!iommu_pmu->cntr_evcap[i]) {
+ ret = -ENOMEM;
+ goto free_pmu_cntr_evcap;
+ }
+ /*
+ * Set to the global capabilities, will adjust according
+ * to per-counter capabilities later.
+ */
+ for (j = 0; j < iommu_pmu->num_eg; j++)
+ iommu_pmu->cntr_evcap[i][j] = (u32)iommu_pmu->evcap[j];
+ }
+
+ iommu_pmu->cfg_reg = get_perf_reg_address(iommu, DMAR_PERFCFGOFF_REG);
+ iommu_pmu->cntr_reg = get_perf_reg_address(iommu, DMAR_PERFCNTROFF_REG);
+ iommu_pmu->overflow = get_perf_reg_address(iommu, DMAR_PERFOVFOFF_REG);
+
+ /*
+ * Check per-counter capabilities. All counters should have the
+ * same capabilities on Interrupt on Overflow Support and Counter
+ * Width.
+ */
+ for (i = 0; i < iommu_pmu->num_cntr; i++) {
+ cap = dmar_readl(iommu_pmu->cfg_reg +
+ i * IOMMU_PMU_CFG_OFFSET +
+ IOMMU_PMU_CFG_CNTRCAP_OFFSET);
+ if (!iommu_cntrcap_pcc(cap))
+ continue;
+
+ /*
+ * It's possible that some counters have a different
+ * capability because of e.g., HW bug. Check the corner
+ * case here and simply drop those counters.
+ */
+ if ((iommu_cntrcap_cw(cap) != iommu_pmu->cntr_width) ||
+ !iommu_cntrcap_ios(cap)) {
+ iommu_pmu->num_cntr = i;
+ pr_warn("PMU counter capability inconsistent, counter number reduced to %d\n",
+ iommu_pmu->num_cntr);
+ }
+
+ /* Clear the pre-defined events group */
+ for (j = 0; j < iommu_pmu->num_eg; j++)
+ iommu_pmu->cntr_evcap[i][j] = 0;
+
+ /* Override with per-counter event capabilities */
+ for (j = 0; j < iommu_cntrcap_egcnt(cap); j++) {
+ cap = dmar_readl(iommu_pmu->cfg_reg + i * IOMMU_PMU_CFG_OFFSET +
+ IOMMU_PMU_CFG_CNTREVCAP_OFFSET +
+ (j * IOMMU_PMU_OFF_REGS_STEP));
+ iommu_pmu->cntr_evcap[i][iommu_event_group(cap)] = iommu_event_select(cap);
+ /*
+ * Some events may only be supported by a specific counter.
+ * Track them in the evcap as well.
+ */
+ iommu_pmu->evcap[iommu_event_group(cap)] |= iommu_event_select(cap);
+ }
+ }
+
+ iommu_pmu->iommu = iommu;
+ iommu->pmu = iommu_pmu;
+
+ return 0;
+
+free_pmu_cntr_evcap:
+ for (i = 0; i < iommu_pmu->num_cntr; i++)
+ kfree(iommu_pmu->cntr_evcap[i]);
+ kfree(iommu_pmu->cntr_evcap);
+free_pmu_evcap:
+ kfree(iommu_pmu->evcap);
+free_pmu:
+ kfree(iommu_pmu);
+
+ return ret;
+}
+
+void free_iommu_pmu(struct intel_iommu *iommu)
+{
+ struct iommu_pmu *iommu_pmu = iommu->pmu;
+
+ if (!iommu_pmu)
+ return;
+
+ if (iommu_pmu->evcap) {
+ int i;
+
+ for (i = 0; i < iommu_pmu->num_cntr; i++)
+ kfree(iommu_pmu->cntr_evcap[i]);
+ kfree(iommu_pmu->cntr_evcap);
+ }
+ kfree(iommu_pmu->evcap);
+ kfree(iommu_pmu);
+ iommu->pmu = NULL;
+}
+
+static int iommu_pmu_set_interrupt(struct intel_iommu *iommu)
+{
+ struct iommu_pmu *iommu_pmu = iommu->pmu;
+ int irq, ret;
+
+ irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PERF + iommu->seq_id, iommu->node, iommu);
+ if (irq <= 0)
+ return -EINVAL;
+
+ snprintf(iommu_pmu->irq_name, sizeof(iommu_pmu->irq_name), "dmar%d-perf", iommu->seq_id);
+
+ iommu->perf_irq = irq;
+ ret = request_threaded_irq(irq, NULL, iommu_pmu_irq_handler,
+ IRQF_ONESHOT, iommu_pmu->irq_name, iommu);
+ if (ret) {
+ dmar_free_hwirq(irq);
+ iommu->perf_irq = 0;
+ return ret;
+ }
+ return 0;
+}
+
+static void iommu_pmu_unset_interrupt(struct intel_iommu *iommu)
+{
+ if (!iommu->perf_irq)
+ return;
+
+ free_irq(iommu->perf_irq, iommu);
+ dmar_free_hwirq(iommu->perf_irq);
+ iommu->perf_irq = 0;
+}
+
+static int iommu_pmu_cpu_online(unsigned int cpu)
+{
+ if (cpumask_empty(&iommu_pmu_cpu_mask))
+ cpumask_set_cpu(cpu, &iommu_pmu_cpu_mask);
+
+ return 0;
+}
+
+static int iommu_pmu_cpu_offline(unsigned int cpu)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ int target;
+
+ if (!cpumask_test_and_clear_cpu(cpu, &iommu_pmu_cpu_mask))
+ return 0;
+
+ target = cpumask_any_but(cpu_online_mask, cpu);
+
+ if (target < nr_cpu_ids)
+ cpumask_set_cpu(target, &iommu_pmu_cpu_mask);
+ else
+ target = -1;
+
+ rcu_read_lock();
+
+ for_each_iommu(iommu, drhd) {
+ if (!iommu->pmu)
+ continue;
+ perf_pmu_migrate_context(&iommu->pmu->pmu, cpu, target);
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int nr_iommu_pmu;
+
+static int iommu_pmu_cpuhp_setup(struct iommu_pmu *iommu_pmu)
+{
+ int ret;
+
+ if (nr_iommu_pmu++)
+ return 0;
+
+ ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_IOMMU_PERF_ONLINE,
+ "driver/iommu/intel/perfmon:online",
+ iommu_pmu_cpu_online,
+ iommu_pmu_cpu_offline);
+ if (ret)
+ nr_iommu_pmu = 0;
+
+ return ret;
+}
+
+static void iommu_pmu_cpuhp_free(struct iommu_pmu *iommu_pmu)
+{
+ if (--nr_iommu_pmu)
+ return;
+
+ cpuhp_remove_state(CPUHP_AP_PERF_X86_IOMMU_PERF_ONLINE);
+}
+
+void iommu_pmu_register(struct intel_iommu *iommu)
+{
+ struct iommu_pmu *iommu_pmu = iommu->pmu;
+
+ if (!iommu_pmu)
+ return;
+
+ if (__iommu_pmu_register(iommu))
+ goto err;
+
+ if (iommu_pmu_cpuhp_setup(iommu_pmu))
+ goto unregister;
+
+ /* Set interrupt for overflow */
+ if (iommu_pmu_set_interrupt(iommu))
+ goto cpuhp_free;
+
+ return;
+
+cpuhp_free:
+ iommu_pmu_cpuhp_free(iommu_pmu);
+unregister:
+ perf_pmu_unregister(&iommu_pmu->pmu);
+err:
+ pr_err("Failed to register PMU for iommu (seq_id = %d)\n", iommu->seq_id);
+ free_iommu_pmu(iommu);
+}
+
+void iommu_pmu_unregister(struct intel_iommu *iommu)
+{
+ struct iommu_pmu *iommu_pmu = iommu->pmu;
+
+ if (!iommu_pmu)
+ return;
+
+ iommu_pmu_unset_interrupt(iommu);
+ iommu_pmu_cpuhp_free(iommu_pmu);
+ perf_pmu_unregister(&iommu_pmu->pmu);
+}
diff --git a/drivers/iommu/intel/perfmon.h b/drivers/iommu/intel/perfmon.h
new file mode 100644
index 000000000000..58606af9a2b9
--- /dev/null
+++ b/drivers/iommu/intel/perfmon.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * PERFCFGOFF_REG, PERFFRZOFF_REG
+ * PERFOVFOFF_REG, PERFCNTROFF_REG
+ */
+#define IOMMU_PMU_NUM_OFF_REGS 4
+#define IOMMU_PMU_OFF_REGS_STEP 4
+
+#define IOMMU_PMU_FILTER_REQUESTER_ID 0x01
+#define IOMMU_PMU_FILTER_DOMAIN 0x02
+#define IOMMU_PMU_FILTER_PASID 0x04
+#define IOMMU_PMU_FILTER_ATS 0x08
+#define IOMMU_PMU_FILTER_PAGE_TABLE 0x10
+
+#define IOMMU_PMU_FILTER_EN BIT(31)
+
+#define IOMMU_PMU_CFG_OFFSET 0x100
+#define IOMMU_PMU_CFG_CNTRCAP_OFFSET 0x80
+#define IOMMU_PMU_CFG_CNTREVCAP_OFFSET 0x84
+#define IOMMU_PMU_CFG_SIZE 0x8
+#define IOMMU_PMU_CFG_FILTERS_OFFSET 0x4
+
+#define IOMMU_PMU_CAP_REGS_STEP 8
+
+#define iommu_cntrcap_pcc(p) ((p) & 0x1)
+#define iommu_cntrcap_cw(p) (((p) >> 8) & 0xff)
+#define iommu_cntrcap_ios(p) (((p) >> 16) & 0x1)
+#define iommu_cntrcap_egcnt(p) (((p) >> 28) & 0xf)
+
+#define IOMMU_EVENT_CFG_EGI_SHIFT 8
+#define IOMMU_EVENT_CFG_ES_SHIFT 32
+#define IOMMU_EVENT_CFG_INT BIT_ULL(1)
+
+#define iommu_event_select(p) ((p) & 0xfffffff)
+#define iommu_event_group(p) (((p) >> 28) & 0xf)
+
+#ifdef CONFIG_INTEL_IOMMU_PERF_EVENTS
+int alloc_iommu_pmu(struct intel_iommu *iommu);
+void free_iommu_pmu(struct intel_iommu *iommu);
+void iommu_pmu_register(struct intel_iommu *iommu);
+void iommu_pmu_unregister(struct intel_iommu *iommu);
+#else
+static inline int
+alloc_iommu_pmu(struct intel_iommu *iommu)
+{
+ return 0;
+}
+
+static inline void
+free_iommu_pmu(struct intel_iommu *iommu)
+{
+}
+
+static inline void
+iommu_pmu_register(struct intel_iommu *iommu)
+{
+}
+
+static inline void
+iommu_pmu_unregister(struct intel_iommu *iommu)
+{
+}
+#endif /* CONFIG_INTEL_IOMMU_PERF_EVENTS */
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index c76b66263467..7367f56c3bad 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -9,7 +9,6 @@
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
-#include <linux/intel-svm.h>
#include <linux/rculist.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
@@ -79,7 +78,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
}
iommu->prq = page_address(pages);
- irq = dmar_alloc_hwirq(DMAR_UNITS_SUPPORTED + iommu->seq_id, iommu->node, iommu);
+ irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu);
if (irq <= 0) {
pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
iommu->name);
@@ -299,9 +298,8 @@ out:
return 0;
}
-static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
- struct device *dev,
- struct mm_struct *mm)
+static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
+ struct mm_struct *mm)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_svm_dev *sdev;
@@ -313,7 +311,7 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
if (!svm) {
svm = kzalloc(sizeof(*svm), GFP_KERNEL);
if (!svm)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
svm->pasid = mm->pasid;
svm->mm = mm;
@@ -323,24 +321,17 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
ret = mmu_notifier_register(&svm->notifier, mm);
if (ret) {
kfree(svm);
- return ERR_PTR(ret);
+ return ret;
}
ret = pasid_private_add(svm->pasid, svm);
if (ret) {
mmu_notifier_unregister(&svm->notifier, mm);
kfree(svm);
- return ERR_PTR(ret);
+ return ret;
}
}
- /* Find the matching device in svm list */
- sdev = svm_lookup_device_by_dev(svm, dev);
- if (sdev) {
- sdev->users++;
- goto success;
- }
-
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev) {
ret = -ENOMEM;
@@ -351,12 +342,8 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
sdev->iommu = iommu;
sdev->did = FLPT_DEFAULT_DID;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
- sdev->users = 1;
- sdev->pasid = svm->pasid;
- sdev->sva.dev = dev;
init_rcu_head(&sdev->rcu);
if (info->ats_enabled) {
- sdev->dev_iotlb = 1;
sdev->qdep = info->ats_qdep;
if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
sdev->qdep = 0;
@@ -370,8 +357,8 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
goto free_sdev;
list_add_rcu(&sdev->list, &svm->devs);
-success:
- return &sdev->sva;
+
+ return 0;
free_sdev:
kfree(sdev);
@@ -382,7 +369,7 @@ free_svm:
kfree(svm);
}
- return ERR_PTR(ret);
+ return ret;
}
/* Caller must hold pasid_mutex */
@@ -404,32 +391,32 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid)
mm = svm->mm;
if (sdev) {
- sdev->users--;
- if (!sdev->users) {
- list_del_rcu(&sdev->list);
- /* Flush the PASID cache and IOTLB for this device.
- * Note that we do depend on the hardware *not* using
- * the PASID any more. Just as we depend on other
- * devices never using PASIDs that they have no right
- * to use. We have a *shared* PASID table, because it's
- * large and has to be physically contiguous. So it's
- * hard to be as defensive as we might like. */
- intel_pasid_tear_down_entry(iommu, dev,
- svm->pasid, false);
- intel_svm_drain_prq(dev, svm->pasid);
- kfree_rcu(sdev, rcu);
-
- if (list_empty(&svm->devs)) {
- if (svm->notifier.ops)
- mmu_notifier_unregister(&svm->notifier, mm);
- pasid_private_remove(svm->pasid);
- /* We mandate that no page faults may be outstanding
- * for the PASID when intel_svm_unbind_mm() is called.
- * If that is not obeyed, subtle errors will happen.
- * Let's make them less subtle... */
- memset(svm, 0x6b, sizeof(*svm));
- kfree(svm);
- }
+ list_del_rcu(&sdev->list);
+ /*
+ * Flush the PASID cache and IOTLB for this device.
+ * Note that we do depend on the hardware *not* using
+ * the PASID any more. Just as we depend on other
+ * devices never using PASIDs that they have no right
+ * to use. We have a *shared* PASID table, because it's
+ * large and has to be physically contiguous. So it's
+ * hard to be as defensive as we might like.
+ */
+ intel_pasid_tear_down_entry(iommu, dev, svm->pasid, false);
+ intel_svm_drain_prq(dev, svm->pasid);
+ kfree_rcu(sdev, rcu);
+
+ if (list_empty(&svm->devs)) {
+ if (svm->notifier.ops)
+ mmu_notifier_unregister(&svm->notifier, mm);
+ pasid_private_remove(svm->pasid);
+ /*
+ * We mandate that no page faults may be outstanding
+ * for the PASID when intel_svm_unbind_mm() is called.
+ * If that is not obeyed, subtle errors will happen.
+ * Let's make them less subtle...
+ */
+ memset(svm, 0x6b, sizeof(*svm));
+ kfree(svm);
}
}
out:
@@ -854,13 +841,10 @@ static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu = info->iommu;
struct mm_struct *mm = domain->mm;
- struct iommu_sva *sva;
- int ret = 0;
+ int ret;
mutex_lock(&pasid_mutex);
- sva = intel_svm_bind_mm(iommu, dev, mm);
- if (IS_ERR(sva))
- ret = PTR_ERR(sva);
+ ret = intel_svm_bind_mm(iommu, dev, mm);
mutex_unlock(&pasid_mutex);
return ret;
diff --git a/drivers/iommu/iommu-traces.c b/drivers/iommu/iommu-traces.c
index 1e9ca7789de1..23416bf76df9 100644
--- a/drivers/iommu/iommu-traces.c
+++ b/drivers/iommu/iommu-traces.c
@@ -18,7 +18,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(remove_device_from_group);
/* iommu_device_event */
EXPORT_TRACEPOINT_SYMBOL_GPL(attach_device_to_domain);
-EXPORT_TRACEPOINT_SYMBOL_GPL(detach_device_from_domain);
/* iommu_map_unmap */
EXPORT_TRACEPOINT_SYMBOL_GPL(map);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 5f6a85aea501..10db680acaed 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -30,6 +30,7 @@
#include <linux/cc_platform.h>
#include <trace/events/iommu.h>
#include <linux/sched/mm.h>
+#include <linux/msi.h>
#include "dma-iommu.h"
@@ -371,6 +372,30 @@ err_unlock:
return ret;
}
+static bool iommu_is_attach_deferred(struct device *dev)
+{
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
+
+ if (ops->is_attach_deferred)
+ return ops->is_attach_deferred(dev);
+
+ return false;
+}
+
+static int iommu_group_do_dma_first_attach(struct device *dev, void *data)
+{
+ struct iommu_domain *domain = data;
+
+ lockdep_assert_held(&dev->iommu_group->mutex);
+
+ if (iommu_is_attach_deferred(dev)) {
+ dev->iommu->attach_deferred = 1;
+ return 0;
+ }
+
+ return __iommu_attach_device(domain, dev);
+}
+
int iommu_probe_device(struct device *dev)
{
const struct iommu_ops *ops;
@@ -401,7 +426,7 @@ int iommu_probe_device(struct device *dev)
* attach the default domain.
*/
if (group->default_domain && !group->owner) {
- ret = __iommu_attach_device(group->default_domain, dev);
+ ret = iommu_group_do_dma_first_attach(dev, group->default_domain);
if (ret) {
mutex_unlock(&group->mutex);
iommu_group_put(group);
@@ -774,12 +799,16 @@ struct iommu_group *iommu_group_alloc(void)
ret = iommu_group_create_file(group,
&iommu_group_attr_reserved_regions);
- if (ret)
+ if (ret) {
+ kobject_put(group->devices_kobj);
return ERR_PTR(ret);
+ }
ret = iommu_group_create_file(group, &iommu_group_attr_type);
- if (ret)
+ if (ret) {
+ kobject_put(group->devices_kobj);
return ERR_PTR(ret);
+ }
pr_debug("Allocated group %d\n", group->id);
@@ -930,7 +959,7 @@ map_end:
if (map_size) {
ret = iommu_map(domain, addr - map_size,
addr - map_size, map_size,
- entry->prot);
+ entry->prot, GFP_KERNEL);
if (ret)
goto out;
map_size = 0;
@@ -947,16 +976,6 @@ out:
return ret;
}
-static bool iommu_is_attach_deferred(struct device *dev)
-{
- const struct iommu_ops *ops = dev_iommu_ops(dev);
-
- if (ops->is_attach_deferred)
- return ops->is_attach_deferred(dev);
-
- return false;
-}
-
/**
* iommu_group_add_device - add a device to an iommu group
* @group: the group into which to add the device (reference should be held)
@@ -1009,8 +1028,8 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
- if (group->domain && !iommu_is_attach_deferred(dev))
- ret = __iommu_attach_device(group->domain, dev);
+ if (group->domain)
+ ret = iommu_group_do_dma_first_attach(dev, group->domain);
mutex_unlock(&group->mutex);
if (ret)
goto err_put_group;
@@ -1776,21 +1795,10 @@ static void probe_alloc_default_domain(struct bus_type *bus,
}
-static int iommu_group_do_dma_attach(struct device *dev, void *data)
-{
- struct iommu_domain *domain = data;
- int ret = 0;
-
- if (!iommu_is_attach_deferred(dev))
- ret = __iommu_attach_device(domain, dev);
-
- return ret;
-}
-
-static int __iommu_group_dma_attach(struct iommu_group *group)
+static int __iommu_group_dma_first_attach(struct iommu_group *group)
{
return __iommu_group_for_each_dev(group, group->default_domain,
- iommu_group_do_dma_attach);
+ iommu_group_do_dma_first_attach);
}
static int iommu_group_do_probe_finalize(struct device *dev, void *data)
@@ -1855,7 +1863,7 @@ int bus_iommu_probe(struct bus_type *bus)
iommu_group_create_direct_mappings(group);
- ret = __iommu_group_dma_attach(group);
+ ret = __iommu_group_dma_first_attach(group);
mutex_unlock(&group->mutex);
@@ -1898,6 +1906,29 @@ bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
EXPORT_SYMBOL_GPL(device_iommu_capable);
/**
+ * iommu_group_has_isolated_msi() - Compute msi_device_has_isolated_msi()
+ * for a group
+ * @group: Group to query
+ *
+ * IOMMU groups should not have differing values of
+ * msi_device_has_isolated_msi() for devices in a group. However nothing
+ * directly prevents this, so ensure mistakes don't result in isolation failures
+ * by checking that all the devices are the same.
+ */
+bool iommu_group_has_isolated_msi(struct iommu_group *group)
+{
+ struct group_device *group_dev;
+ bool ret = true;
+
+ mutex_lock(&group->mutex);
+ list_for_each_entry(group_dev, &group->devices, list)
+ ret &= msi_device_has_isolated_msi(group_dev->dev);
+ mutex_unlock(&group->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_group_has_isolated_msi);
+
+/**
* iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain
* @handler: fault handler
@@ -1987,9 +2018,11 @@ static int __iommu_attach_device(struct iommu_domain *domain,
return -ENODEV;
ret = domain->ops->attach_dev(domain, dev);
- if (!ret)
- trace_attach_device_to_domain(dev);
- return ret;
+ if (ret)
+ return ret;
+ dev->iommu->attach_deferred = 0;
+ trace_attach_device_to_domain(dev);
+ return 0;
}
/**
@@ -2034,22 +2067,12 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{
- if (iommu_is_attach_deferred(dev))
+ if (dev->iommu && dev->iommu->attach_deferred)
return __iommu_attach_device(domain, dev);
return 0;
}
-static void __iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
-{
- if (iommu_is_attach_deferred(dev))
- return;
-
- domain->ops->detach_dev(domain, dev);
- trace_detach_device_from_domain(dev);
-}
-
void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
{
struct iommu_group *group;
@@ -2124,8 +2147,22 @@ static int __iommu_attach_group(struct iommu_domain *domain,
ret = __iommu_group_for_each_dev(group, domain,
iommu_group_do_attach_device);
- if (ret == 0)
+ if (ret == 0) {
group->domain = domain;
+ } else {
+ /*
+ * To recover from the case when certain device within the
+ * group fails to attach to the new domain, we need force
+ * attaching all devices back to the old domain. The old
+ * domain is compatible for all devices in the group,
+ * hence the iommu driver should always return success.
+ */
+ struct iommu_domain *old_domain = group->domain;
+
+ group->domain = NULL;
+ WARN(__iommu_group_set_domain(group, old_domain),
+ "iommu driver failed to attach a compatible domain");
+ }
return ret;
}
@@ -2154,11 +2191,12 @@ int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
}
EXPORT_SYMBOL_GPL(iommu_attach_group);
-static int iommu_group_do_detach_device(struct device *dev, void *data)
+static int iommu_group_do_set_platform_dma(struct device *dev, void *data)
{
- struct iommu_domain *domain = data;
+ const struct iommu_ops *ops = dev_iommu_ops(dev);
- __iommu_detach_device(domain, dev);
+ if (!WARN_ON(!ops->set_platform_dma_ops))
+ ops->set_platform_dma_ops(dev);
return 0;
}
@@ -2172,15 +2210,13 @@ static int __iommu_group_set_domain(struct iommu_group *group,
return 0;
/*
- * New drivers should support default domains and so the detach_dev() op
- * will never be called. Otherwise the NULL domain represents some
+ * New drivers should support default domains, so set_platform_dma()
+ * op will never be called. Otherwise the NULL domain represents some
* platform specific behavior.
*/
if (!new_domain) {
- if (WARN_ON(!group->domain->ops->detach_dev))
- return -EINVAL;
- __iommu_group_for_each_dev(group, group->domain,
- iommu_group_do_detach_device);
+ __iommu_group_for_each_dev(group, NULL,
+ iommu_group_do_set_platform_dma);
group->domain = NULL;
return 0;
}
@@ -2360,34 +2396,27 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
return ret;
}
-static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+int iommu_map(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_domain_ops *ops = domain->ops;
int ret;
+ might_sleep_if(gfpflags_allow_blocking(gfp));
+
+ /* Discourage passing strange GFP flags */
+ if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
+ __GFP_HIGHMEM)))
+ return -EINVAL;
+
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
if (ret == 0 && ops->iotlb_sync_map)
ops->iotlb_sync_map(domain, iova, size);
return ret;
}
-
-int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- might_sleep();
- return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
-}
EXPORT_SYMBOL_GPL(iommu_map);
-int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
-{
- return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
-}
-EXPORT_SYMBOL_GPL(iommu_map_atomic);
-
static size_t __iommu_unmap_pages(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
@@ -2477,9 +2506,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_unmap_fast);
-static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot,
- gfp_t gfp)
+ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents, int prot,
+ gfp_t gfp)
{
const struct iommu_domain_ops *ops = domain->ops;
size_t len = 0, mapped = 0;
@@ -2487,6 +2516,13 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
unsigned int i = 0;
int ret;
+ might_sleep_if(gfpflags_allow_blocking(gfp));
+
+ /* Discourage passing strange GFP flags */
+ if (WARN_ON_ONCE(gfp & (__GFP_COMP | __GFP_DMA | __GFP_DMA32 |
+ __GFP_HIGHMEM)))
+ return -EINVAL;
+
while (i <= nents) {
phys_addr_t s_phys = sg_phys(sg);
@@ -2526,21 +2562,8 @@ out_err:
return ret;
}
-
-ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot)
-{
- might_sleep();
- return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
-}
EXPORT_SYMBOL_GPL(iommu_map_sg);
-ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot)
-{
- return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
-}
-
/**
* report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
* @domain: the iommu domain where the fault has happened
diff --git a/drivers/iommu/iommufd/Kconfig b/drivers/iommu/iommufd/Kconfig
index 8306616b6d81..ada693ea51a7 100644
--- a/drivers/iommu/iommufd/Kconfig
+++ b/drivers/iommu/iommufd/Kconfig
@@ -23,7 +23,7 @@ config IOMMUFD_VFIO_CONTAINER
removed.
IOMMUFD VFIO container emulation is known to lack certain features
- of the native VFIO container, such as no-IOMMU support, peer-to-peer
+ of the native VFIO container, such as peer-to-peer
DMA mapping, PPC IOMMU support, as well as other potentially
undiscovered gaps. This option is currently intended for the
purpose of testing IOMMUFD with unmodified userspace supporting VFIO
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index d81f93a321af..a0c66f47a65a 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -4,7 +4,6 @@
#include <linux/iommufd.h>
#include <linux/slab.h>
#include <linux/iommu.h>
-#include <linux/irqdomain.h>
#include "io_pagetable.h"
#include "iommufd_private.h"
@@ -169,8 +168,7 @@ static int iommufd_device_setup_msi(struct iommufd_device *idev,
* operation from the device (eg a simple DMA) cannot trigger an
* interrupt outside this iommufd context.
*/
- if (!device_iommu_capable(idev->dev, IOMMU_CAP_INTR_REMAP) &&
- !irq_domain_check_msi_remap()) {
+ if (!iommu_group_has_isolated_msi(idev->group)) {
if (!allow_unsafe_interrupts)
return -EPERM;
@@ -346,10 +344,6 @@ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id)
rc = iommufd_device_do_attach(idev, hwpt);
if (rc)
goto out_put_pt_obj;
-
- mutex_lock(&hwpt->ioas->mutex);
- list_add_tail(&hwpt->hwpt_item, &hwpt->ioas->hwpt_list);
- mutex_unlock(&hwpt->ioas->mutex);
break;
}
case IOMMUFD_OBJ_IOAS: {
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 222e86591f8a..9d7f71510ca1 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -18,6 +18,8 @@ struct iommufd_ctx {
struct xarray objects;
u8 account_mode;
+ /* Compatibility with VFIO no iommu */
+ u8 no_iommu_mode;
struct iommufd_ioas *vfio_ioas;
};
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 083e6fcbe10a..3fbe636c3d8a 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -252,9 +252,12 @@ union ucmd_buffer {
struct iommu_destroy destroy;
struct iommu_ioas_alloc alloc;
struct iommu_ioas_allow_iovas allow_iovas;
+ struct iommu_ioas_copy ioas_copy;
struct iommu_ioas_iova_ranges iova_ranges;
struct iommu_ioas_map map;
struct iommu_ioas_unmap unmap;
+ struct iommu_option option;
+ struct iommu_vfio_ioas vfio_ioas;
#ifdef CONFIG_IOMMUFD_TEST
struct iommu_test_cmd test;
#endif
diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c
index 1e1d3509efae..f8d92c9bb65b 100644
--- a/drivers/iommu/iommufd/pages.c
+++ b/drivers/iommu/iommufd/pages.c
@@ -456,7 +456,8 @@ static int batch_iommu_map_small(struct iommu_domain *domain,
size % PAGE_SIZE);
while (size) {
- rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot);
+ rc = iommu_map(domain, iova, paddr, PAGE_SIZE, prot,
+ GFP_KERNEL_ACCOUNT);
if (rc)
goto err_unmap;
iova += PAGE_SIZE;
@@ -500,7 +501,8 @@ static int batch_to_domain(struct pfn_batch *batch, struct iommu_domain *domain,
else
rc = iommu_map(domain, iova,
PFN_PHYS(batch->pfns[cur]) + page_offset,
- next_iova - iova, area->iommu_prot);
+ next_iova - iova, area->iommu_prot,
+ GFP_KERNEL_ACCOUNT);
if (rc)
goto err_unmap;
iova = next_iova;
diff --git a/drivers/iommu/iommufd/vfio_compat.c b/drivers/iommu/iommufd/vfio_compat.c
index 3ceca0e8311c..514494a0025b 100644
--- a/drivers/iommu/iommufd/vfio_compat.c
+++ b/drivers/iommu/iommufd/vfio_compat.c
@@ -26,39 +26,84 @@ out_unlock:
}
/**
- * iommufd_vfio_compat_ioas_id - Return the IOAS ID that vfio should use
+ * iommufd_vfio_compat_ioas_get_id - Ensure a compat IOAS exists
+ * @ictx: Context to operate on
+ * @out_ioas_id: The IOAS ID of the compatibility IOAS
+ *
+ * Return the ID of the current compatibility IOAS. The ID can be passed into
+ * other functions that take an ioas_id.
+ */
+int iommufd_vfio_compat_ioas_get_id(struct iommufd_ctx *ictx, u32 *out_ioas_id)
+{
+ struct iommufd_ioas *ioas;
+
+ ioas = get_compat_ioas(ictx);
+ if (IS_ERR(ioas))
+ return PTR_ERR(ioas);
+ *out_ioas_id = ioas->obj.id;
+ iommufd_put_object(&ioas->obj);
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_get_id, IOMMUFD_VFIO);
+
+/**
+ * iommufd_vfio_compat_set_no_iommu - Called when a no-iommu device is attached
+ * @ictx: Context to operate on
+ *
+ * This allows selecting the VFIO_NOIOMMU_IOMMU and blocks normal types.
+ */
+int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
+{
+ int ret;
+
+ xa_lock(&ictx->objects);
+ if (!ictx->vfio_ioas) {
+ ictx->no_iommu_mode = 1;
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ }
+ xa_unlock(&ictx->objects);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_set_no_iommu, IOMMUFD_VFIO);
+
+/**
+ * iommufd_vfio_compat_ioas_create - Ensure the compat IOAS is created
* @ictx: Context to operate on
- * @out_ioas_id: The ioas_id the caller should use
*
* The compatibility IOAS is the IOAS that the vfio compatibility ioctls operate
* on since they do not have an IOAS ID input in their ABI. Only attaching a
- * group should cause a default creation of the internal ioas, this returns the
- * existing ioas if it has already been assigned somehow.
+ * group should cause a default creation of the internal ioas, this does nothing
+ * if an existing ioas has already been assigned somehow.
*/
-int iommufd_vfio_compat_ioas_id(struct iommufd_ctx *ictx, u32 *out_ioas_id)
+int iommufd_vfio_compat_ioas_create(struct iommufd_ctx *ictx)
{
struct iommufd_ioas *ioas = NULL;
- struct iommufd_ioas *out_ioas;
+ int ret;
ioas = iommufd_ioas_alloc(ictx);
if (IS_ERR(ioas))
return PTR_ERR(ioas);
xa_lock(&ictx->objects);
- if (ictx->vfio_ioas && iommufd_lock_obj(&ictx->vfio_ioas->obj))
- out_ioas = ictx->vfio_ioas;
- else {
- out_ioas = ioas;
- ictx->vfio_ioas = ioas;
+ /*
+ * VFIO won't allow attaching a container to both iommu and no iommu
+ * operation
+ */
+ if (ictx->no_iommu_mode) {
+ ret = -EINVAL;
+ goto out_abort;
}
- xa_unlock(&ictx->objects);
- *out_ioas_id = out_ioas->obj.id;
- if (out_ioas != ioas) {
- iommufd_put_object(&out_ioas->obj);
- iommufd_object_abort(ictx, &ioas->obj);
- return 0;
+ if (ictx->vfio_ioas && iommufd_lock_obj(&ictx->vfio_ioas->obj)) {
+ ret = 0;
+ iommufd_put_object(&ictx->vfio_ioas->obj);
+ goto out_abort;
}
+ ictx->vfio_ioas = ioas;
+ xa_unlock(&ictx->objects);
+
/*
* An automatically created compat IOAS is treated as a userspace
* created object. Userspace can learn the ID via IOMMU_VFIO_IOAS_GET,
@@ -67,8 +112,13 @@ int iommufd_vfio_compat_ioas_id(struct iommufd_ctx *ictx, u32 *out_ioas_id)
*/
iommufd_object_finalize(ictx, &ioas->obj);
return 0;
+
+out_abort:
+ xa_unlock(&ictx->objects);
+ iommufd_object_abort(ictx, &ioas->obj);
+ return ret;
}
-EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_id, IOMMUFD_VFIO);
+EXPORT_SYMBOL_NS_GPL(iommufd_vfio_compat_ioas_create, IOMMUFD_VFIO);
int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd)
{
@@ -235,6 +285,9 @@ static int iommufd_vfio_check_extension(struct iommufd_ctx *ictx,
case VFIO_UNMAP_ALL:
return 1;
+ case VFIO_NOIOMMU_IOMMU:
+ return IS_ENABLED(CONFIG_VFIO_NOIOMMU);
+
case VFIO_DMA_CC_IOMMU:
return iommufd_vfio_cc_iommu(ictx);
@@ -261,10 +314,24 @@ static int iommufd_vfio_check_extension(struct iommufd_ctx *ictx,
static int iommufd_vfio_set_iommu(struct iommufd_ctx *ictx, unsigned long type)
{
+ bool no_iommu_mode = READ_ONCE(ictx->no_iommu_mode);
struct iommufd_ioas *ioas = NULL;
int rc = 0;
- if (type != VFIO_TYPE1_IOMMU && type != VFIO_TYPE1v2_IOMMU)
+ /*
+ * Emulation for NOIOMMU is imperfect in that VFIO blocks almost all
+ * other ioctls. We let them keep working but they mostly fail since no
+ * IOAS should exist.
+ */
+ if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) && type == VFIO_NOIOMMU_IOMMU &&
+ no_iommu_mode) {
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+ return 0;
+ }
+
+ if ((type != VFIO_TYPE1_IOMMU && type != VFIO_TYPE1v2_IOMMU) ||
+ no_iommu_mode)
return -EINVAL;
/* VFIO fails the set_iommu if there is no group */
@@ -381,7 +448,7 @@ static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx,
};
size_t minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
struct vfio_info_cap_header __user *last_cap = NULL;
- struct vfio_iommu_type1_info info;
+ struct vfio_iommu_type1_info info = {};
struct iommufd_ioas *ioas;
size_t total_cap_size;
int rc;
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index a003bd5fc65c..bdf1a4e5eae0 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -299,18 +299,6 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
mmu->utlb_ctx[utlb] = domain->context_id;
}
-/*
- * Disable MMU translation for the microTLB.
- */
-static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
- unsigned int utlb)
-{
- struct ipmmu_vmsa_device *mmu = domain->mmu;
-
- ipmmu_imuctr_write(mmu, utlb, 0);
- mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
-}
-
static void ipmmu_tlb_flush_all(void *cookie)
{
struct ipmmu_vmsa_domain *domain = cookie;
@@ -643,21 +631,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
return 0;
}
-static void ipmmu_detach_device(struct iommu_domain *io_domain,
- struct device *dev)
-{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
- unsigned int i;
-
- for (i = 0; i < fwspec->num_ids; ++i)
- ipmmu_utlb_disable(domain, fwspec->ids[i]);
-
- /*
- * TODO: Optimize by disabling the context when no device is attached.
- */
-}
-
static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -876,7 +849,6 @@ static const struct iommu_ops ipmmu_ops = {
.of_xlate = ipmmu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = ipmmu_attach_device,
- .detach_dev = ipmmu_detach_device,
.map_pages = ipmmu_map,
.unmap_pages = ipmmu_unmap,
.flush_iotlb_all = ipmmu_flush_iotlb_all,
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index c60624910872..454f6331c889 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -443,9 +443,9 @@ fail:
return ret;
}
-static void msm_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
+static void msm_iommu_set_platform_dma(struct device *dev)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
struct msm_iommu_dev *iommu;
@@ -678,11 +678,11 @@ static struct iommu_ops msm_iommu_ops = {
.domain_alloc = msm_iommu_domain_alloc,
.probe_device = msm_iommu_probe_device,
.device_group = generic_device_group,
+ .set_platform_dma_ops = msm_iommu_set_platform_dma,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = msm_iommu_attach_dev,
- .detach_dev = msm_iommu_detach_dev,
.map_pages = msm_iommu_map,
.unmap_pages = msm_iommu_unmap,
/*
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 2badd6acfb23..d5a4955910ff 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -710,14 +710,6 @@ err_unlock:
return ret;
}
-static void mtk_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
-{
- struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
-
- mtk_iommu_config(data, dev, false, 0);
-}
-
static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -946,7 +938,6 @@ static const struct iommu_ops mtk_iommu_ops = {
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_attach_device,
- .detach_dev = mtk_iommu_detach_device,
.map_pages = mtk_iommu_map,
.unmap_pages = mtk_iommu_unmap,
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index ca581ff1c769..43e4c8f89e23 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -319,7 +319,7 @@ static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device
return 0;
}
-static void mtk_iommu_v1_detach_device(struct iommu_domain *domain, struct device *dev)
+static void mtk_iommu_v1_set_platform_dma(struct device *dev)
{
struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
@@ -585,10 +585,10 @@ static const struct iommu_ops mtk_iommu_v1_ops = {
.def_domain_type = mtk_iommu_v1_def_domain_type,
.device_group = generic_device_group,
.pgsize_bitmap = MT2701_IOMMU_PAGE_SIZE,
+ .set_platform_dma_ops = mtk_iommu_v1_set_platform_dma,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_v1_attach_device,
- .detach_dev = mtk_iommu_v1_detach_device,
.map_pages = mtk_iommu_v1_map,
.unmap_pages = mtk_iommu_v1_unmap,
.iova_to_phys = mtk_iommu_v1_iova_to_phys,
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 00d98f08732f..40f57d293a79 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -10,6 +10,7 @@
#include <linux/limits.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_pci.h>
#include <linux/pci.h>
@@ -171,3 +172,98 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
return ops;
}
+
+static enum iommu_resv_type __maybe_unused
+iommu_resv_region_get_type(struct device *dev,
+ struct resource *phys,
+ phys_addr_t start, size_t length)
+{
+ phys_addr_t end = start + length - 1;
+
+ /*
+ * IOMMU regions without an associated physical region cannot be
+ * mapped and are simply reservations.
+ */
+ if (phys->start >= phys->end)
+ return IOMMU_RESV_RESERVED;
+
+ /* may be IOMMU_RESV_DIRECT_RELAXABLE for certain cases */
+ if (start == phys->start && end == phys->end)
+ return IOMMU_RESV_DIRECT;
+
+ dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys,
+ &start, &end);
+ return IOMMU_RESV_RESERVED;
+}
+
+/**
+ * of_iommu_get_resv_regions - reserved region driver helper for device tree
+ * @dev: device for which to get reserved regions
+ * @list: reserved region list
+ *
+ * IOMMU drivers can use this to implement their .get_resv_regions() callback
+ * for memory regions attached to a device tree node. See the reserved-memory
+ * device tree bindings on how to use these:
+ *
+ * Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+ */
+void of_iommu_get_resv_regions(struct device *dev, struct list_head *list)
+{
+#if IS_ENABLED(CONFIG_OF_ADDRESS)
+ struct of_phandle_iterator it;
+ int err;
+
+ of_for_each_phandle(&it, err, dev->of_node, "memory-region", NULL, 0) {
+ const __be32 *maps, *end;
+ struct resource phys;
+ int size;
+
+ memset(&phys, 0, sizeof(phys));
+
+ /*
+ * The "reg" property is optional and can be omitted by reserved-memory regions
+ * that represent reservations in the IOVA space, which are regions that should
+ * not be mapped.
+ */
+ if (of_find_property(it.node, "reg", NULL)) {
+ err = of_address_to_resource(it.node, 0, &phys);
+ if (err < 0) {
+ dev_err(dev, "failed to parse memory region %pOF: %d\n",
+ it.node, err);
+ continue;
+ }
+ }
+
+ maps = of_get_property(it.node, "iommu-addresses", &size);
+ if (!maps)
+ continue;
+
+ end = maps + size / sizeof(__be32);
+
+ while (maps < end) {
+ struct device_node *np;
+ u32 phandle;
+
+ phandle = be32_to_cpup(maps++);
+ np = of_find_node_by_phandle(phandle);
+
+ if (np == dev->of_node) {
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ struct iommu_resv_region *region;
+ enum iommu_resv_type type;
+ phys_addr_t iova;
+ size_t length;
+
+ maps = of_translate_dma_region(np, maps, &iova, &length);
+ type = iommu_resv_region_get_type(dev, &phys, iova, length);
+
+ region = iommu_alloc_resv_region(iova, length, prot, type,
+ GFP_KERNEL);
+ if (region)
+ list_add_tail(&region->list, list);
+ }
+ }
+ }
+#endif
+}
+EXPORT_SYMBOL(of_iommu_get_resv_regions);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 2fd7702c6709..3ab078112a7c 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1556,9 +1556,9 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
omap_domain->dev = NULL;
}
-static void omap_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
+static void omap_iommu_set_platform_dma(struct device *dev)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
spin_lock(&omap_domain->lock);
@@ -1737,10 +1737,10 @@ static const struct iommu_ops omap_iommu_ops = {
.probe_device = omap_iommu_probe_device,
.release_device = omap_iommu_release_device,
.device_group = omap_iommu_device_group,
+ .set_platform_dma_ops = omap_iommu_set_platform_dma,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = omap_iommu_attach_dev,
- .detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index a68eadd64f38..f30db22ea5d7 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1192,7 +1192,6 @@ static const struct iommu_ops rk_iommu_ops = {
.of_xlate = rk_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = rk_iommu_attach_device,
- .detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
.iova_to_phys = rk_iommu_iova_to_phys,
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index ed33c6cce083..fbf59a8db29b 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -34,8 +34,6 @@ static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
- case IOMMU_CAP_INTR_REMAP:
- return true;
default:
return false;
}
@@ -52,7 +50,7 @@ static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
if (!s390_domain)
return NULL;
- s390_domain->dma_table = dma_alloc_cpu_table();
+ s390_domain->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
if (!s390_domain->dma_table) {
kfree(s390_domain);
return NULL;
@@ -144,13 +142,10 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
return 0;
}
-static void s390_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
+static void s390_iommu_set_platform_dma(struct device *dev)
{
struct zpci_dev *zdev = to_zpci_dev(dev);
- WARN_ON(zdev->s390_domain != to_s390_domain(domain));
-
__s390_iommu_detach_device(zdev);
zpci_dma_init_device(zdev);
}
@@ -260,7 +255,8 @@ static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
phys_addr_t pa, dma_addr_t dma_addr,
- unsigned long nr_pages, int flags)
+ unsigned long nr_pages, int flags,
+ gfp_t gfp)
{
phys_addr_t page_addr = pa & PAGE_MASK;
unsigned long *entry;
@@ -268,7 +264,8 @@ static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
int rc;
for (i = 0; i < nr_pages; i++) {
- entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
+ entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
+ gfp);
if (unlikely(!entry)) {
rc = -ENOMEM;
goto undo_cpu_trans;
@@ -284,7 +281,7 @@ undo_cpu_trans:
while (i-- > 0) {
dma_addr -= PAGE_SIZE;
entry = dma_walk_cpu_trans(s390_domain->dma_table,
- dma_addr);
+ dma_addr, gfp);
if (!entry)
break;
dma_update_cpu_trans(entry, 0, ZPCI_PTE_INVALID);
@@ -301,7 +298,8 @@ static int s390_iommu_invalidate_trans(struct s390_domain *s390_domain,
int rc = 0;
for (i = 0; i < nr_pages; i++) {
- entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
+ entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr,
+ GFP_ATOMIC);
if (unlikely(!entry)) {
rc = -EINVAL;
break;
@@ -339,7 +337,7 @@ static int s390_iommu_map_pages(struct iommu_domain *domain,
flags |= ZPCI_TABLE_PROTECTED;
rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
- pgcount, flags);
+ pgcount, flags, gfp);
if (!rc)
*mapped = size;
@@ -435,11 +433,11 @@ static const struct iommu_ops s390_iommu_ops = {
.probe_device = s390_iommu_probe_device,
.release_device = s390_iommu_release_device,
.device_group = generic_device_group,
+ .set_platform_dma_ops = s390_iommu_set_platform_dma,
.pgsize_bitmap = SZ_4K,
.get_resv_regions = s390_iommu_get_resv_regions,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = s390_iommu_attach_device,
- .detach_dev = s390_iommu_detach_device,
.map_pages = s390_iommu_map_pages,
.unmap_pages = s390_iommu_unmap_pages,
.flush_iotlb_all = s390_iommu_flush_iotlb_all,
diff --git a/drivers/iommu/sprd-iommu.c b/drivers/iommu/sprd-iommu.c
index 219bfa11f7f4..ae94d74b73f4 100644
--- a/drivers/iommu/sprd-iommu.c
+++ b/drivers/iommu/sprd-iommu.c
@@ -255,21 +255,6 @@ static int sprd_iommu_attach_device(struct iommu_domain *domain,
return 0;
}
-static void sprd_iommu_detach_device(struct iommu_domain *domain,
- struct device *dev)
-{
- struct sprd_iommu_domain *dom = to_sprd_domain(domain);
- struct sprd_iommu_device *sdev = dom->sdev;
- size_t pgt_size = sprd_iommu_pgt_size(domain);
-
- if (!sdev)
- return;
-
- dma_free_coherent(sdev->dev, pgt_size, dom->pgt_va, dom->pgt_pa);
- sprd_iommu_hw_en(sdev, false);
- dom->sdev = NULL;
-}
-
static int sprd_iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped)
@@ -414,7 +399,6 @@ static const struct iommu_ops sprd_iommu_ops = {
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sprd_iommu_attach_device,
- .detach_dev = sprd_iommu_detach_device,
.map_pages = sprd_iommu_map,
.unmap_pages = sprd_iommu_unmap,
.iotlb_sync_map = sprd_iommu_sync_map,
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index 5b585eace3d4..2d993d0cea7d 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -834,7 +834,6 @@ static const struct iommu_ops sun50i_iommu_ops = {
.probe_device = sun50i_iommu_probe_device,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sun50i_iommu_attach_device,
- .detach_dev = sun50i_iommu_detach_device,
.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
.iotlb_sync_map = sun50i_iommu_iotlb_sync_map,
.iotlb_sync = sun50i_iommu_iotlb_sync,
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index ed53279d1106..a482ff838b53 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -124,9 +124,9 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
return ret;
}
-static void gart_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
+static void gart_iommu_set_platform_dma(struct device *dev)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct gart_device *gart = gart_handle;
spin_lock(&gart->dom_lock);
@@ -270,11 +270,11 @@ static const struct iommu_ops gart_iommu_ops = {
.domain_alloc = gart_iommu_domain_alloc,
.probe_device = gart_iommu_probe_device,
.device_group = generic_device_group,
+ .set_platform_dma_ops = gart_iommu_set_platform_dma,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
.of_xlate = gart_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = gart_iommu_attach_dev,
- .detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 5b1af40221ec..1cbf063ccf14 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -511,8 +511,9 @@ disable:
return err;
}
-static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
+static void tegra_smmu_set_platform_dma(struct device *dev)
{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct tegra_smmu_as *as = to_smmu_as(domain);
struct tegra_smmu *smmu = as->smmu;
@@ -671,12 +672,12 @@ static struct page *as_get_pde_page(struct tegra_smmu_as *as,
* allocate page in a sleeping context if GFP flags permit. Hence
* spinlock needs to be unlocked and re-locked after allocation.
*/
- if (!(gfp & __GFP_ATOMIC))
+ if (gfpflags_allow_blocking(gfp))
spin_unlock_irqrestore(&as->lock, *flags);
page = alloc_page(gfp | __GFP_DMA | __GFP_ZERO);
- if (!(gfp & __GFP_ATOMIC))
+ if (gfpflags_allow_blocking(gfp))
spin_lock_irqsave(&as->lock, *flags);
/*
@@ -965,11 +966,11 @@ static const struct iommu_ops tegra_smmu_ops = {
.domain_alloc = tegra_smmu_domain_alloc,
.probe_device = tegra_smmu_probe_device,
.device_group = tegra_smmu_device_group,
+ .set_platform_dma_ops = tegra_smmu_set_platform_dma,
.of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = tegra_smmu_attach_dev,
- .detach_dev = tegra_smmu_detach_dev,
.map = tegra_smmu_map,
.unmap = tegra_smmu_unmap,
.iova_to_phys = tegra_smmu_iova_to_phys,
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index fc00274070b6..a01c15812b70 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -253,7 +253,7 @@ static void ipoctal_irq_channel(struct ipoctal_channel *channel)
static irqreturn_t ipoctal_irq_handler(void *arg)
{
unsigned int i;
- struct ipoctal *ipoctal = (struct ipoctal *) arg;
+ struct ipoctal *ipoctal = arg;
/* Clear the IPack device interrupt */
readw(ipoctal->int_space + ACK_INT_REQ0);
@@ -647,7 +647,7 @@ static void ipoctal_hangup(struct tty_struct *tty)
tty_port_hangup(&channel->tty_port);
ipoctal_reset_channel(channel);
- tty_port_set_initialized(&channel->tty_port, 0);
+ tty_port_set_initialized(&channel->tty_port, false);
wake_up_interruptible(&channel->tty_port.open_wait);
}
@@ -659,7 +659,7 @@ static void ipoctal_shutdown(struct tty_struct *tty)
return;
ipoctal_reset_channel(channel);
- tty_port_set_initialized(&channel->tty_port, 0);
+ tty_port_set_initialized(&channel->tty_port, false);
}
static void ipoctal_cleanup(struct tty_struct *tty)
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index 74d449858a61..cc1ecfd49928 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -76,9 +76,9 @@ static void ipack_bus_remove(struct device *device)
drv->ops->remove(dev);
}
-static int ipack_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ipack_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct ipack_device *idev;
+ const struct ipack_device *idev;
if (!dev)
return -ENODEV;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index caa952c40ff9..7dc990eb2c9b 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -136,11 +136,6 @@ config BRCMSTB_L2_IRQ
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
-config DAVINCI_AINTC
- bool
- select GENERIC_IRQ_CHIP
- select IRQ_DOMAIN
-
config DAVINCI_CP_INTC
bool
select GENERIC_IRQ_CHIP
@@ -389,7 +384,7 @@ config LS_EXTIRQ
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
config PARTITION_PERCPU
bool
@@ -658,6 +653,7 @@ config APPLE_AIC
bool "Apple Interrupt Controller (AIC)"
depends on ARM64
depends on ARCH_APPLE || COMPILE_TEST
+ select GENERIC_IRQ_IPI_MUX
help
Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
such as the M1.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 87b49a10962c..ffd945fe71aa 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_ATH79) += irq-ath79-misc.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
obj-$(CONFIG_ARCH_ACTIONS) += irq-owl-sirq.o
-obj-$(CONFIG_DAVINCI_AINTC) += irq-davinci-aintc.o
obj-$(CONFIG_DAVINCI_CP_INTC) += irq-davinci-cp-intc.o
obj-$(CONFIG_EXYNOS_IRQ_COMBINER) += exynos-combiner.o
obj-$(CONFIG_FARADAY_FTINTC010) += irq-ftintc010.o
diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
index 5ddb8e578ac6..9c8b1349ee17 100644
--- a/drivers/irqchip/irq-alpine-msi.c
+++ b/drivers/irqchip/irq-alpine-msi.c
@@ -199,21 +199,20 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv,
}
gic_domain = irq_find_host(gic_node);
+ of_node_put(gic_node);
if (!gic_domain) {
pr_err("Failed to find the GIC domain\n");
return -ENXIO;
}
- middle_domain = irq_domain_add_tree(NULL,
- &alpine_msix_middle_domain_ops,
- priv);
+ middle_domain = irq_domain_add_hierarchy(gic_domain, 0, 0, NULL,
+ &alpine_msix_middle_domain_ops,
+ priv);
if (!middle_domain) {
pr_err("Failed to create the MSIX middle domain\n");
return -ENOMEM;
}
- middle_domain->parent = gic_domain;
-
msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
&alpine_msix_domain_info,
middle_domain);
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index ae3437f03e6c..eabb3b92965b 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -292,7 +292,6 @@ struct aic_irq_chip {
void __iomem *base;
void __iomem *event;
struct irq_domain *hw_domain;
- struct irq_domain *ipi_domain;
struct {
cpumask_t aff;
} *fiq_aff[AIC_NR_FIQ];
@@ -307,9 +306,6 @@ struct aic_irq_chip {
static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
-static DEFINE_PER_CPU(atomic_t, aic_vipi_flag);
-static DEFINE_PER_CPU(atomic_t, aic_vipi_enable);
-
static struct aic_irq_chip *aic_irqc;
static void aic_handle_ipi(struct pt_regs *regs);
@@ -751,98 +747,8 @@ static void aic_ipi_send_fast(int cpu)
isb();
}
-static void aic_ipi_mask(struct irq_data *d)
-{
- u32 irq_bit = BIT(irqd_to_hwirq(d));
-
- /* No specific ordering requirements needed here. */
- atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable));
-}
-
-static void aic_ipi_unmask(struct irq_data *d)
-{
- struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
- u32 irq_bit = BIT(irqd_to_hwirq(d));
-
- atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable));
-
- /*
- * The atomic_or() above must complete before the atomic_read()
- * below to avoid racing aic_ipi_send_mask().
- */
- smp_mb__after_atomic();
-
- /*
- * If a pending vIPI was unmasked, raise a HW IPI to ourselves.
- * No barriers needed here since this is a self-IPI.
- */
- if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
- if (static_branch_likely(&use_fast_ipi))
- aic_ipi_send_fast(smp_processor_id());
- else
- aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
- }
-}
-
-static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
-{
- struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
- u32 irq_bit = BIT(irqd_to_hwirq(d));
- u32 send = 0;
- int cpu;
- unsigned long pending;
-
- for_each_cpu(cpu, mask) {
- /*
- * This sequence is the mirror of the one in aic_ipi_unmask();
- * see the comment there. Additionally, release semantics
- * ensure that the vIPI flag set is ordered after any shared
- * memory accesses that precede it. This therefore also pairs
- * with the atomic_fetch_andnot in aic_handle_ipi().
- */
- pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu));
-
- /*
- * The atomic_fetch_or_release() above must complete before the
- * atomic_read() below to avoid racing aic_ipi_unmask().
- */
- smp_mb__after_atomic();
-
- if (!(pending & irq_bit) &&
- (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
- if (static_branch_likely(&use_fast_ipi))
- aic_ipi_send_fast(cpu);
- else
- send |= AIC_IPI_SEND_CPU(cpu);
- }
- }
-
- /*
- * The flag writes must complete before the physical IPI is issued
- * to another CPU. This is implied by the control dependency on
- * the result of atomic_read_acquire() above, which is itself
- * already ordered after the vIPI flag write.
- */
- if (send)
- aic_ic_write(ic, AIC_IPI_SEND, send);
-}
-
-static struct irq_chip ipi_chip = {
- .name = "AIC-IPI",
- .irq_mask = aic_ipi_mask,
- .irq_unmask = aic_ipi_unmask,
- .ipi_send_mask = aic_ipi_send_mask,
-};
-
-/*
- * IPI IRQ domain
- */
-
static void aic_handle_ipi(struct pt_regs *regs)
{
- int i;
- unsigned long enabled, firing;
-
/*
* Ack the IPI. We need to order this after the AIC event read, but
* that is enforced by normal MMIO ordering guarantees.
@@ -857,27 +763,7 @@ static void aic_handle_ipi(struct pt_regs *regs)
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
}
- /*
- * The mask read does not need to be ordered. Only we can change
- * our own mask anyway, so no races are possible here, as long as
- * we are properly in the interrupt handler (which is covered by
- * the barrier that is part of the top-level AIC handler's readl()).
- */
- enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable));
-
- /*
- * Clear the IPIs we are about to handle. This pairs with the
- * atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be
- * ordered after the aic_ic_write() above (to avoid dropping vIPIs) and
- * before IPI handling code (to avoid races handling vIPIs before they
- * are signaled). The former is taken care of by the release semantics
- * of the write portion, while the latter is taken care of by the
- * acquire semantics of the read portion.
- */
- firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
-
- for_each_set_bit(i, &firing, AIC_NR_SWIPI)
- generic_handle_domain_irq(aic_irqc->ipi_domain, i);
+ ipi_mux_process();
/*
* No ordering needed here; at worst this just changes the timing of
@@ -887,55 +773,24 @@ static void aic_handle_ipi(struct pt_regs *regs)
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
}
-static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
- unsigned int nr_irqs, void *args)
+static void aic_ipi_send_single(unsigned int cpu)
{
- int i;
-
- for (i = 0; i < nr_irqs; i++) {
- irq_set_percpu_devid(virq + i);
- irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data,
- handle_percpu_devid_irq, NULL, NULL);
- }
-
- return 0;
-}
-
-static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
-{
- /* Not freeing IPIs */
+ if (static_branch_likely(&use_fast_ipi))
+ aic_ipi_send_fast(cpu);
+ else
+ aic_ic_write(aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu));
}
-static const struct irq_domain_ops aic_ipi_domain_ops = {
- .alloc = aic_ipi_alloc,
- .free = aic_ipi_free,
-};
-
static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
{
- struct irq_domain *ipi_domain;
int base_ipi;
- ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI,
- &aic_ipi_domain_ops, irqc);
- if (WARN_ON(!ipi_domain))
- return -ENODEV;
-
- ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
- irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
-
- base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, AIC_NR_SWIPI,
- NUMA_NO_NODE, NULL, false, NULL);
-
- if (WARN_ON(!base_ipi)) {
- irq_domain_remove(ipi_domain);
+ base_ipi = ipi_mux_create(AIC_NR_SWIPI, aic_ipi_send_single);
+ if (WARN_ON(base_ipi <= 0))
return -ENODEV;
- }
set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
- irqc->ipi_domain = ipi_domain;
-
return 0;
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index ee18eb3e72b7..a55528469278 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -454,8 +454,7 @@ static __init void armada_xp_ipi_init(struct device_node *node)
return;
irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
- base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, IPI_DOORBELL_END,
- NUMA_NO_NODE, NULL, false, NULL);
+ base_ipi = irq_domain_alloc_irqs(ipi_domain, IPI_DOORBELL_END, NUMA_NO_NODE, NULL);
if (WARN_ON(!base_ipi))
return;
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index 279e92cf0b16..94a7223e95df 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -17,8 +17,9 @@
#define ASPEED_SCU_IC_REG 0x018
#define ASPEED_SCU_IC_SHIFT 0
-#define ASPEED_SCU_IC_ENABLE GENMASK(6, ASPEED_SCU_IC_SHIFT)
+#define ASPEED_SCU_IC_ENABLE GENMASK(15, ASPEED_SCU_IC_SHIFT)
#define ASPEED_SCU_IC_NUM_IRQS 7
+#define ASPEED_SCU_IC_STATUS GENMASK(28, 16)
#define ASPEED_SCU_IC_STATUS_SHIFT 16
#define ASPEED_AST2600_SCU_IC0_REG 0x560
@@ -155,6 +156,8 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
rc = PTR_ERR(scu_ic->scu);
goto err;
}
+ regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_STATUS, ASPEED_SCU_IC_STATUS);
+ regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_ENABLE, 0);
irq = irq_of_parse_and_map(node, 0);
if (!irq) {
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 51491c3c6fdd..e5f1059b989f 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -268,10 +268,7 @@ static void __init bcm2836_arm_irqchip_smp_init(void)
ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
- base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, BITS_PER_MBOX,
- NUMA_NO_NODE, NULL,
- false, NULL);
-
+ base_ipi = irq_domain_alloc_irqs(ipi_domain, BITS_PER_MBOX, NUMA_NO_NODE, NULL);
if (WARN_ON(!base_ipi))
return;
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index bb6609cebdbc..1e9dab6e0d86 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -279,7 +279,8 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
flags |= IRQ_GC_BE_IO;
ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1,
- dn->full_name, handle_level_irq, clr, 0, flags);
+ dn->full_name, handle_level_irq, clr,
+ IRQ_LEVEL, flags);
if (ret) {
pr_err("failed to allocate generic irq chip\n");
goto out_free_domain;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index e4efc08ac594..091b0fe7e324 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -161,6 +161,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
*init_params)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ unsigned int set = 0;
struct brcmstb_l2_intc_data *data;
struct irq_chip_type *ct;
int ret;
@@ -208,9 +209,12 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
flags |= IRQ_GC_BE_IO;
+ if (init_params->handler == handle_level_irq)
+ set |= IRQ_LEVEL;
+
/* Allocate a single Generic IRQ chip for this node */
ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
- np->full_name, init_params->handler, clr, 0, flags);
+ np->full_name, init_params->handler, clr, set, flags);
if (ret) {
pr_err("failed to allocate generic irq chip\n");
goto out_free_domain;
diff --git a/drivers/irqchip/irq-davinci-aintc.c b/drivers/irqchip/irq-davinci-aintc.c
deleted file mode 100644
index 123eb7bfc117..000000000000
--- a/drivers/irqchip/irq-davinci-aintc.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-//
-// Copyright (C) 2006, 2019 Texas Instruments.
-//
-// Interrupt handler for DaVinci boards.
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/irqchip/irq-davinci-aintc.h>
-#include <linux/io.h>
-#include <linux/irqdomain.h>
-
-#include <asm/exception.h>
-
-#define DAVINCI_AINTC_FIQ_REG0 0x00
-#define DAVINCI_AINTC_FIQ_REG1 0x04
-#define DAVINCI_AINTC_IRQ_REG0 0x08
-#define DAVINCI_AINTC_IRQ_REG1 0x0c
-#define DAVINCI_AINTC_IRQ_IRQENTRY 0x14
-#define DAVINCI_AINTC_IRQ_ENT_REG0 0x18
-#define DAVINCI_AINTC_IRQ_ENT_REG1 0x1c
-#define DAVINCI_AINTC_IRQ_INCTL_REG 0x20
-#define DAVINCI_AINTC_IRQ_EABASE_REG 0x24
-#define DAVINCI_AINTC_IRQ_INTPRI0_REG 0x30
-#define DAVINCI_AINTC_IRQ_INTPRI7_REG 0x4c
-
-static void __iomem *davinci_aintc_base;
-static struct irq_domain *davinci_aintc_irq_domain;
-
-static inline void davinci_aintc_writel(unsigned long value, int offset)
-{
- writel_relaxed(value, davinci_aintc_base + offset);
-}
-
-static inline unsigned long davinci_aintc_readl(int offset)
-{
- return readl_relaxed(davinci_aintc_base + offset);
-}
-
-static __init void
-davinci_aintc_setup_gc(void __iomem *base,
- unsigned int irq_start, unsigned int num)
-{
- struct irq_chip_generic *gc;
- struct irq_chip_type *ct;
-
- gc = irq_get_domain_generic_chip(davinci_aintc_irq_domain, irq_start);
- gc->reg_base = base;
- gc->irq_base = irq_start;
-
- ct = gc->chip_types;
- ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->chip.irq_mask = irq_gc_mask_clr_bit;
- ct->chip.irq_unmask = irq_gc_mask_set_bit;
-
- ct->regs.ack = DAVINCI_AINTC_IRQ_REG0;
- ct->regs.mask = DAVINCI_AINTC_IRQ_ENT_REG0;
- irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
-}
-
-static asmlinkage void __exception_irq_entry
-davinci_aintc_handle_irq(struct pt_regs *regs)
-{
- int irqnr = davinci_aintc_readl(DAVINCI_AINTC_IRQ_IRQENTRY);
-
- /*
- * Use the formula for entry vector index generation from section
- * 8.3.3 of the manual.
- */
- irqnr >>= 2;
- irqnr -= 1;
-
- generic_handle_domain_irq(davinci_aintc_irq_domain, irqnr);
-}
-
-/* ARM Interrupt Controller Initialization */
-void __init davinci_aintc_init(const struct davinci_aintc_config *config)
-{
- unsigned int irq_off, reg_off, prio, shift;
- void __iomem *req;
- int ret, irq_base;
- const u8 *prios;
-
- req = request_mem_region(config->reg.start,
- resource_size(&config->reg),
- "davinci-cp-intc");
- if (!req) {
- pr_err("%s: register range busy\n", __func__);
- return;
- }
-
- davinci_aintc_base = ioremap(config->reg.start,
- resource_size(&config->reg));
- if (!davinci_aintc_base) {
- pr_err("%s: unable to ioremap register range\n", __func__);
- return;
- }
-
- /* Clear all interrupt requests */
- davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG0);
- davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG1);
- davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG0);
- davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG1);
-
- /* Disable all interrupts */
- davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_ENT_REG0);
- davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_ENT_REG1);
-
- /* Interrupts disabled immediately, IRQ entry reflects all */
- davinci_aintc_writel(0x0, DAVINCI_AINTC_IRQ_INCTL_REG);
-
- /* we don't use the hardware vector table, just its entry addresses */
- davinci_aintc_writel(0, DAVINCI_AINTC_IRQ_EABASE_REG);
-
- /* Clear all interrupt requests */
- davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG0);
- davinci_aintc_writel(~0x0, DAVINCI_AINTC_FIQ_REG1);
- davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG0);
- davinci_aintc_writel(~0x0, DAVINCI_AINTC_IRQ_REG1);
-
- prios = config->prios;
- for (reg_off = DAVINCI_AINTC_IRQ_INTPRI0_REG;
- reg_off <= DAVINCI_AINTC_IRQ_INTPRI7_REG; reg_off += 4) {
- for (shift = 0, prio = 0; shift < 32; shift += 4, prios++)
- prio |= (*prios & 0x07) << shift;
- davinci_aintc_writel(prio, reg_off);
- }
-
- irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
- if (irq_base < 0) {
- pr_err("%s: unable to allocate interrupt descriptors: %d\n",
- __func__, irq_base);
- return;
- }
-
- davinci_aintc_irq_domain = irq_domain_add_legacy(NULL,
- config->num_irqs, irq_base, 0,
- &irq_domain_simple_ops, NULL);
- if (!davinci_aintc_irq_domain) {
- pr_err("%s: unable to create interrupt domain\n", __func__);
- return;
- }
-
- ret = irq_alloc_domain_generic_chips(davinci_aintc_irq_domain, 32, 1,
- "AINTC", handle_edge_irq,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0, 0);
- if (ret) {
- pr_err("%s: unable to allocate generic irq chips for domain\n",
- __func__);
- return;
- }
-
- for (irq_off = 0, reg_off = 0;
- irq_off < config->num_irqs;
- irq_off += 32, reg_off += 0x04)
- davinci_aintc_setup_gc(davinci_aintc_base + reg_off,
- irq_base + irq_off, 32);
-
- set_handle_irq(davinci_aintc_handle_irq);
-}
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index f4d7eeb13951..f1e75b35a52a 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -287,15 +287,14 @@ static __init int gicv2m_allocate_domains(struct irq_domain *parent)
if (!v2m)
return 0;
- inner_domain = irq_domain_create_tree(v2m->fwnode,
- &gicv2m_domain_ops, v2m);
+ inner_domain = irq_domain_create_hierarchy(parent, 0, 0, v2m->fwnode,
+ &gicv2m_domain_ops, v2m);
if (!inner_domain) {
pr_err("Failed to create GICv2m domain\n");
return -ENOMEM;
}
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
- inner_domain->parent = parent;
pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
&gicv2m_msi_domain_info,
inner_domain);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 973ede0197e3..586271b8aa39 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -4692,7 +4692,7 @@ static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
}
/* the pre-ITS breaks isolation, so disable MSI remapping */
- its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
+ its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI;
return true;
}
return false;
@@ -4909,18 +4909,19 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
if (!info)
return -ENOMEM;
- inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
+ info->ops = &its_msi_domain_ops;
+ info->data = its;
+
+ inner_domain = irq_domain_create_hierarchy(its_parent,
+ its->msi_domain_flags, 0,
+ handle, &its_domain_ops,
+ info);
if (!inner_domain) {
kfree(info);
return -ENOMEM;
}
- inner_domain->parent = its_parent;
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
- inner_domain->flags |= its->msi_domain_flags;
- info->ops = &its_msi_domain_ops;
- info->data = its;
- inner_domain->host_data = info;
return 0;
}
@@ -5074,7 +5075,7 @@ static int __init its_probe_one(struct resource *res,
its->cmd_write = its->cmd_base;
its->fwnode_handle = handle;
its->get_msi_base = its_irq_get_msi_base;
- its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
+ its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
its_enable_quirks(its);
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index e1efdec9e9ac..dbb8b1efda44 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -233,13 +233,12 @@ static int mbi_allocate_domains(struct irq_domain *parent)
struct irq_domain *nexus_domain, *pci_domain, *plat_domain;
int err;
- nexus_domain = irq_domain_create_tree(parent->fwnode,
- &mbi_domain_ops, NULL);
+ nexus_domain = irq_domain_create_hierarchy(parent, 0, 0, parent->fwnode,
+ &mbi_domain_ops, NULL);
if (!nexus_domain)
return -ENOMEM;
irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS);
- nexus_domain->parent = parent;
err = mbi_allocate_pci_domain(nexus_domain, &pci_domain);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 997104d4338e..fd134e1f481a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -89,15 +89,6 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
*/
static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
-/*
- * Global static key controlling whether an update to PMR allowing more
- * interrupts requires to be propagated to the redistributor (DSB SY).
- * And this needs to be exported for modules to be able to enable
- * interrupts...
- */
-DEFINE_STATIC_KEY_FALSE(gic_pmr_sync);
-EXPORT_SYMBOL(gic_pmr_sync);
-
DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
EXPORT_SYMBOL(gic_nonsecure_priorities);
@@ -1310,9 +1301,7 @@ static void __init gic_smp_init(void)
gic_starting_cpu, NULL);
/* Register all 8 non-secure SGIs */
- base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8,
- NUMA_NO_NODE, &sgi_fwspec,
- false, NULL);
+ base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec);
if (WARN_ON(base_sgi <= 0))
return;
@@ -1768,16 +1757,8 @@ static void gic_enable_nmi_support(void)
for (i = 0; i < gic_data.ppi_nr; i++)
refcount_set(&ppi_nmi_refs[i], 0);
- /*
- * Linux itself doesn't use 1:N distribution, so has no need to
- * set PMHE. The only reason to have it set is if EL3 requires it
- * (and we can't change it).
- */
- if (gic_read_ctlr() & ICC_CTLR_EL1_PMHE_MASK)
- static_branch_enable(&gic_pmr_sync);
-
pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
- static_branch_unlikely(&gic_pmr_sync) ? "forced" : "relaxed");
+ gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
/*
* How priority values are used by the GIC depends on two things:
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index a6277dea4c7a..94d56a03b175 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -139,9 +139,7 @@ static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
if (!vpe->sgi_domain)
goto err;
- sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16,
- NUMA_NO_NODE, vpe,
- false, NULL);
+ sgi_base = irq_domain_alloc_irqs(vpe->sgi_domain, 16, NUMA_NO_NODE, vpe);
if (sgi_base <= 0)
goto err;
@@ -176,9 +174,8 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
vm->vpes[i]->idai = true;
}
- vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes,
- NUMA_NO_NODE, vm,
- false, NULL);
+ vpe_base_irq = irq_domain_alloc_irqs(vm->domain, vm->nr_vpes,
+ NUMA_NO_NODE, vm);
if (vpe_base_irq <= 0)
goto err;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 210bc2f4d555..95e3d2a71db6 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -54,7 +54,7 @@
static void gic_check_cpu_features(void)
{
- WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
+ WARN_TAINT_ONCE(this_cpu_has_cap(ARM64_HAS_GIC_CPUIF_SYSREGS),
TAINT_CPU_OUT_OF_SPEC,
"GICv3 system registers enabled, broken firmware!\n");
}
@@ -868,9 +868,7 @@ static __init void gic_smp_init(void)
"irqchip/arm/gic:starting",
gic_starting_cpu, NULL);
- base_sgi = __irq_domain_alloc_irqs(gic_data[0].domain, -1, 8,
- NUMA_NO_NODE, &sgi_fwspec,
- false, NULL);
+ base_sgi = irq_domain_alloc_irqs(gic_data[0].domain, 8, NUMA_NO_NODE, &sgi_fwspec);
if (WARN_ON(base_sgi <= 0))
return;
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index b9c22f764b4d..8a0e82067924 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -283,6 +283,7 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node,
* later the GPC power domain driver will not be skipped.
*/
of_node_clear_flag(node, OF_POPULATED);
+ fwnode_dev_initialized(domain->fwnode, false);
return 0;
}
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 85b754f7f4e6..8d00a9ad5b00 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -55,6 +55,8 @@ struct liointc_priv {
struct liointc_handler_data handler[LIOINTC_NUM_PARENT];
void __iomem *core_isr[LIOINTC_NUM_CORES];
u8 map_cache[LIOINTC_CHIP_IRQ];
+ u32 int_pol;
+ u32 int_edge;
bool has_lpc_irq_errata;
};
@@ -138,6 +140,14 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
return 0;
}
+static void liointc_suspend(struct irq_chip_generic *gc)
+{
+ struct liointc_priv *priv = gc->private;
+
+ priv->int_pol = readl(gc->reg_base + LIOINTC_REG_INTC_POL);
+ priv->int_edge = readl(gc->reg_base + LIOINTC_REG_INTC_EDGE);
+}
+
static void liointc_resume(struct irq_chip_generic *gc)
{
struct liointc_priv *priv = gc->private;
@@ -150,6 +160,8 @@ static void liointc_resume(struct irq_chip_generic *gc)
/* Restore map cache */
for (i = 0; i < LIOINTC_CHIP_IRQ; i++)
writeb(priv->map_cache[i], gc->reg_base + i);
+ writel(priv->int_pol, gc->reg_base + LIOINTC_REG_INTC_POL);
+ writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE);
/* Restore mask cache */
writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
irq_gc_unlock_irqrestore(gc, flags);
@@ -269,6 +281,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
gc->private = priv;
gc->reg_base = base;
gc->domain = domain;
+ gc->suspend = liointc_suspend;
gc->resume = liointc_resume;
ct = gc->chip_types;
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index a72ede90ffc6..6e1e1f011bb2 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -163,16 +163,15 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
{
struct irq_domain *middle_domain, *msi_domain;
- middle_domain = irq_domain_create_linear(domain_handle,
- priv->num_irqs,
- &pch_msi_middle_domain_ops,
- priv);
+ middle_domain = irq_domain_create_hierarchy(parent, 0, priv->num_irqs,
+ domain_handle,
+ &pch_msi_middle_domain_ops,
+ priv);
if (!middle_domain) {
pr_err("Failed to create the MSI middle domain\n");
return -ENOMEM;
}
- middle_domain->parent = parent;
irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
msi_domain = pci_msi_create_irq_domain(domain_handle,
diff --git a/drivers/irqchip/irq-mvebu-gicp.c b/drivers/irqchip/irq-mvebu-gicp.c
index fe88a782173d..c43a345061d5 100644
--- a/drivers/irqchip/irq-mvebu-gicp.c
+++ b/drivers/irqchip/irq-mvebu-gicp.c
@@ -221,6 +221,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
}
parent_domain = irq_find_host(irq_parent_dn);
+ of_node_put(irq_parent_dn);
if (!parent_domain) {
dev_err(&pdev->dev, "failed to find parent IRQ domain\n");
return -ENODEV;
diff --git a/drivers/irqchip/irq-mvebu-odmi.c b/drivers/irqchip/irq-mvebu-odmi.c
index dc4145abdd6f..108091533e10 100644
--- a/drivers/irqchip/irq-mvebu-odmi.c
+++ b/drivers/irqchip/irq-mvebu-odmi.c
@@ -161,7 +161,7 @@ static struct msi_domain_info odmi_msi_domain_info = {
static int __init mvebu_odmi_init(struct device_node *node,
struct device_node *parent)
{
- struct irq_domain *inner_domain, *plat_domain;
+ struct irq_domain *parent_domain, *inner_domain, *plat_domain;
int ret, i;
if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
@@ -197,16 +197,17 @@ static int __init mvebu_odmi_init(struct device_node *node,
}
}
- inner_domain = irq_domain_create_linear(of_node_to_fwnode(node),
- odmis_count * NODMIS_PER_FRAME,
- &odmi_domain_ops, NULL);
+ parent_domain = irq_find_host(parent);
+
+ inner_domain = irq_domain_create_hierarchy(parent_domain, 0,
+ odmis_count * NODMIS_PER_FRAME,
+ of_node_to_fwnode(node),
+ &odmi_domain_ops, NULL);
if (!inner_domain) {
ret = -ENOMEM;
goto err_unmap;
}
- inner_domain->parent = irq_find_host(parent);
-
plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
&odmi_msi_domain_info,
inner_domain);
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index fe8fad22bcf9..020ddf29efb8 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -236,6 +236,7 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
}
parent_domain = irq_find_host(parent_node);
+ of_node_put(parent_node);
if (!parent_domain) {
dev_err(dev, "Failed to find IRQ parent domain\n");
return -ENODEV;
diff --git a/drivers/irqchip/irqchip.c b/drivers/irqchip/irqchip.c
index 3570f0a588c4..7899607fbee8 100644
--- a/drivers/irqchip/irqchip.c
+++ b/drivers/irqchip/irqchip.c
@@ -38,8 +38,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
struct device_node *par_np = of_irq_find_parent(np);
of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
- if (!irq_init_cb)
+ if (!irq_init_cb) {
+ of_node_put(par_np);
return -EINVAL;
+ }
if (par_np == np)
par_np = NULL;
@@ -52,8 +54,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
* interrupt controller. The actual initialization callback of this
* interrupt controller can check for specific domains as necessary.
*/
- if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY))
+ if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
+ of_node_put(par_np);
return -EPROBE_DEFER;
+ }
return irq_init_cb(np, par_np);
}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 499d0f215a8b..9dbce09eabac 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -244,14 +244,6 @@ config LEDS_MT6323
This option enables support for on-chip LED drivers found on
Mediatek MT6323 PMIC.
-config LEDS_S3C24XX
- tristate "LED Support for Samsung S3C24XX GPIO LEDs"
- depends on LEDS_CLASS
- depends on ARCH_S3C24XX || COMPILE_TEST
- help
- This option enables support for LEDs connected to GPIO lines
- on Samsung S3C24XX series CPUs, such as the S3C2410 and S3C2440.
-
config LEDS_NET48XX
tristate "LED Support for Soekris net48xx series Error LED"
depends on LEDS_CLASS
@@ -631,17 +623,6 @@ config LEDS_NETXBIG
and 5Big Network v2 boards. The LEDs are wired to a CPLD and are
controlled through a GPIO extension bus.
-config LEDS_ASIC3
- bool "LED support for the HTC ASIC3"
- depends on LEDS_CLASS=y
- depends on MFD_ASIC3
- default y
- help
- This option enables support for the LEDs on the HTC ASIC3. The HTC
- ASIC3 LED GPIOs are inputs, not outputs, thus the leds-gpio driver
- cannot be used. This driver supports hardware blinking with an on+off
- period from 62ms to 125s. Say Y to enable LEDs on the HP iPAQ hx4700.
-
config LEDS_TCA6507
tristate "LED Support for TCA6507 I2C chip"
depends on LEDS_CLASS && I2C
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 4fd2f92cd198..d30395d11fd8 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o
obj-$(CONFIG_LEDS_APU) += leds-apu.o
obj-$(CONFIG_LEDS_ARIEL) += leds-ariel.o
-obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
obj-$(CONFIG_LEDS_AW2013) += leds-aw2013.o
obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o
obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o
@@ -74,7 +73,6 @@ obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
-obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o
obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
diff --git a/drivers/leds/flash/leds-mt6360.c b/drivers/leds/flash/leds-mt6360.c
index e1066a52d2d2..1af6c5898343 100644
--- a/drivers/leds/flash/leds-mt6360.c
+++ b/drivers/leds/flash/leds-mt6360.c
@@ -71,10 +71,6 @@ enum {
#define MT6360_STRBTO_STEPUS 32000
#define MT6360_STRBTO_MAXUS 2432000
-#define STATE_OFF 0
-#define STATE_KEEP 1
-#define STATE_ON 2
-
struct mt6360_led {
union {
struct led_classdev isnk;
@@ -84,7 +80,7 @@ struct mt6360_led {
struct v4l2_flash *v4l2_flash;
struct mt6360_priv *priv;
u32 led_no;
- u32 default_state;
+ enum led_default_state default_state;
};
struct mt6360_priv {
@@ -405,10 +401,10 @@ static int mt6360_isnk_init_default_state(struct mt6360_led *led)
level = LED_OFF;
switch (led->default_state) {
- case STATE_ON:
+ case LEDS_DEFSTATE_ON:
led->isnk.brightness = led->isnk.max_brightness;
break;
- case STATE_KEEP:
+ case LEDS_DEFSTATE_KEEP:
led->isnk.brightness = min(level, led->isnk.max_brightness);
break;
default:
@@ -443,10 +439,10 @@ static int mt6360_flash_init_default_state(struct mt6360_led *led)
level = LED_OFF;
switch (led->default_state) {
- case STATE_ON:
+ case LEDS_DEFSTATE_ON:
flash->led_cdev.brightness = flash->led_cdev.max_brightness;
break;
- case STATE_KEEP:
+ case LEDS_DEFSTATE_KEEP:
flash->led_cdev.brightness =
min(level, flash->led_cdev.max_brightness);
break;
@@ -760,25 +756,6 @@ static int mt6360_init_flash_properties(struct mt6360_led *led,
return 0;
}
-static int mt6360_init_common_properties(struct mt6360_led *led,
- struct led_init_data *init_data)
-{
- const char *const states[] = { "off", "keep", "on" };
- const char *str;
- int ret;
-
- if (!fwnode_property_read_string(init_data->fwnode,
- "default-state", &str)) {
- ret = match_string(states, ARRAY_SIZE(states), str);
- if (ret < 0)
- ret = STATE_OFF;
-
- led->default_state = ret;
- }
-
- return 0;
-}
-
static void mt6360_v4l2_flash_release(struct mt6360_priv *priv)
{
int i;
@@ -852,10 +829,7 @@ static int mt6360_led_probe(struct platform_device *pdev)
led->led_no = reg;
led->priv = priv;
-
- ret = mt6360_init_common_properties(led, &init_data);
- if (ret)
- goto out_flash_release;
+ led->default_state = led_init_default_state_get(child);
if (reg == MT6360_VIRTUAL_MULTICOLOR ||
reg <= MT6360_LED_ISNKML)
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 6a8ea94834fa..a6b3adcd044a 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -23,6 +23,8 @@
#include "leds.h"
static struct class *leds_class;
+static DEFINE_MUTEX(leds_lookup_lock);
+static LIST_HEAD(leds_lookup_list);
static ssize_t brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -215,6 +217,23 @@ static int led_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
+static struct led_classdev *led_module_get(struct device *led_dev)
+{
+ struct led_classdev *led_cdev;
+
+ if (!led_dev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ led_cdev = dev_get_drvdata(led_dev);
+
+ if (!try_module_get(led_cdev->dev->parent->driver->owner)) {
+ put_device(led_cdev->dev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return led_cdev;
+}
+
/**
* of_led_get() - request a LED device via the LED framework
* @np: device node to get the LED device from
@@ -226,7 +245,6 @@ static SIMPLE_DEV_PM_OPS(leds_class_dev_pm_ops, led_suspend, led_resume);
struct led_classdev *of_led_get(struct device_node *np, int index)
{
struct device *led_dev;
- struct led_classdev *led_cdev;
struct device_node *led_node;
led_node = of_parse_phandle(np, "leds", index);
@@ -235,16 +253,9 @@ struct led_classdev *of_led_get(struct device_node *np, int index)
led_dev = class_find_device_by_of_node(leds_class, led_node);
of_node_put(led_node);
+ put_device(led_dev);
- if (!led_dev)
- return ERR_PTR(-EPROBE_DEFER);
-
- led_cdev = dev_get_drvdata(led_dev);
-
- if (!try_module_get(led_cdev->dev->parent->driver->owner))
- return ERR_PTR(-ENODEV);
-
- return led_cdev;
+ return led_module_get(led_dev);
}
EXPORT_SYMBOL_GPL(of_led_get);
@@ -255,6 +266,7 @@ EXPORT_SYMBOL_GPL(of_led_get);
void led_put(struct led_classdev *led_cdev)
{
module_put(led_cdev->dev->parent->driver->owner);
+ put_device(led_cdev->dev);
}
EXPORT_SYMBOL_GPL(led_put);
@@ -265,6 +277,22 @@ static void devm_led_release(struct device *dev, void *res)
led_put(*p);
}
+static struct led_classdev *__devm_led_get(struct device *dev, struct led_classdev *led)
+{
+ struct led_classdev **dr;
+
+ dr = devres_alloc(devm_led_release, sizeof(struct led_classdev *), GFP_KERNEL);
+ if (!dr) {
+ led_put(led);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ *dr = led;
+ devres_add(dev, dr);
+
+ return led;
+}
+
/**
* devm_of_led_get - Resource-managed request of a LED device
* @dev: LED consumer
@@ -280,7 +308,6 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
int index)
{
struct led_classdev *led;
- struct led_classdev **dr;
if (!dev)
return ERR_PTR(-EINVAL);
@@ -289,19 +316,91 @@ struct led_classdev *__must_check devm_of_led_get(struct device *dev,
if (IS_ERR(led))
return led;
- dr = devres_alloc(devm_led_release, sizeof(struct led_classdev *),
- GFP_KERNEL);
- if (!dr) {
- led_put(led);
- return ERR_PTR(-ENOMEM);
+ return __devm_led_get(dev, led);
+}
+EXPORT_SYMBOL_GPL(devm_of_led_get);
+
+/**
+ * led_get() - request a LED device via the LED framework
+ * @dev: device for which to get the LED device
+ * @con_id: name of the LED from the device's point of view
+ *
+ * @return a pointer to a LED device or ERR_PTR(errno) on failure.
+ */
+struct led_classdev *led_get(struct device *dev, char *con_id)
+{
+ struct led_lookup_data *lookup;
+ const char *provider = NULL;
+ struct device *led_dev;
+
+ mutex_lock(&leds_lookup_lock);
+ list_for_each_entry(lookup, &leds_lookup_list, list) {
+ if (!strcmp(lookup->dev_id, dev_name(dev)) &&
+ !strcmp(lookup->con_id, con_id)) {
+ provider = kstrdup_const(lookup->provider, GFP_KERNEL);
+ break;
+ }
}
+ mutex_unlock(&leds_lookup_lock);
- *dr = led;
- devres_add(dev, dr);
+ if (!provider)
+ return ERR_PTR(-ENOENT);
- return led;
+ led_dev = class_find_device_by_name(leds_class, provider);
+ kfree_const(provider);
+
+ return led_module_get(led_dev);
}
-EXPORT_SYMBOL_GPL(devm_of_led_get);
+EXPORT_SYMBOL_GPL(led_get);
+
+/**
+ * devm_led_get() - request a LED device via the LED framework
+ * @dev: device for which to get the LED device
+ * @con_id: name of the LED from the device's point of view
+ *
+ * The LED device returned from this function is automatically released
+ * on driver detach.
+ *
+ * @return a pointer to a LED device or ERR_PTR(errno) on failure.
+ */
+struct led_classdev *devm_led_get(struct device *dev, char *con_id)
+{
+ struct led_classdev *led;
+
+ led = led_get(dev, con_id);
+ if (IS_ERR(led))
+ return led;
+
+ return __devm_led_get(dev, led);
+}
+EXPORT_SYMBOL_GPL(devm_led_get);
+
+/**
+ * led_add_lookup() - Add a LED lookup table entry
+ * @led_lookup: the lookup table entry to add
+ *
+ * Add a LED lookup table entry. On systems without devicetree the lookup table
+ * is used by led_get() to find LEDs.
+ */
+void led_add_lookup(struct led_lookup_data *led_lookup)
+{
+ mutex_lock(&leds_lookup_lock);
+ list_add_tail(&led_lookup->list, &leds_lookup_list);
+ mutex_unlock(&leds_lookup_lock);
+}
+EXPORT_SYMBOL_GPL(led_add_lookup);
+
+/**
+ * led_remove_lookup() - Remove a LED lookup table entry
+ * @led_lookup: the lookup table entry to remove
+ */
+void led_remove_lookup(struct led_lookup_data *led_lookup)
+{
+ mutex_lock(&leds_lookup_lock);
+ list_del(&led_lookup->list);
+ mutex_unlock(&leds_lookup_lock);
+}
+EXPORT_SYMBOL_GPL(led_remove_lookup);
static int led_classdev_next_name(const char *init_name, char *name,
size_t len)
diff --git a/drivers/leds/leds-an30259a.c b/drivers/leds/leds-an30259a.c
index e072ee5409f7..89df267853a9 100644
--- a/drivers/leds/leds-an30259a.c
+++ b/drivers/leds/leds-an30259a.c
@@ -55,10 +55,6 @@
#define AN30259A_NAME "an30259a"
-#define STATE_OFF 0
-#define STATE_KEEP 1
-#define STATE_ON 2
-
struct an30259a;
struct an30259a_led {
@@ -66,7 +62,7 @@ struct an30259a_led {
struct fwnode_handle *fwnode;
struct led_classdev cdev;
u32 num;
- u32 default_state;
+ enum led_default_state default_state;
bool sloping;
};
@@ -205,7 +201,6 @@ static int an30259a_dt_init(struct i2c_client *client,
struct device_node *np = dev_of_node(&client->dev), *child;
int count, ret;
int i = 0;
- const char *str;
struct an30259a_led *led;
count = of_get_available_child_count(np);
@@ -228,15 +223,7 @@ static int an30259a_dt_init(struct i2c_client *client,
led->num = source;
led->chip = chip;
led->fwnode = of_fwnode_handle(child);
-
- if (!of_property_read_string(child, "default-state", &str)) {
- if (!strcmp(str, "on"))
- led->default_state = STATE_ON;
- else if (!strcmp(str, "keep"))
- led->default_state = STATE_KEEP;
- else
- led->default_state = STATE_OFF;
- }
+ led->default_state = led_init_default_state_get(led->fwnode);
i++;
}
@@ -261,10 +248,10 @@ static void an30259a_init_default_state(struct an30259a_led *led)
int led_on, err;
switch (led->default_state) {
- case STATE_ON:
+ case LEDS_DEFSTATE_ON:
led->cdev.brightness = LED_FULL;
break;
- case STATE_KEEP:
+ case LEDS_DEFSTATE_KEEP:
err = regmap_read(chip->regmap, AN30259A_REG_LED_ON, &led_on);
if (err)
break;
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
deleted file mode 100644
index 8cbc1b8bafa5..000000000000
--- a/drivers/leds/leds-asic3.c
+++ /dev/null
@@ -1,177 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2011 Paul Parsons <lost.distance@yahoo.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/slab.h>
-
-#include <linux/mfd/asic3.h>
-#include <linux/mfd/core.h>
-#include <linux/module.h>
-
-/*
- * The HTC ASIC3 LED GPIOs are inputs, not outputs.
- * Hence we turn the LEDs on/off via the TimeBase register.
- */
-
-/*
- * When TimeBase is 4 the clock resolution is about 32Hz.
- * This driver supports hardware blinking with an on+off
- * period from 62ms (2 clocks) to 125s (4000 clocks).
- */
-#define MS_TO_CLK(ms) DIV_ROUND_CLOSEST(((ms)*1024), 32000)
-#define CLK_TO_MS(clk) (((clk)*32000)/1024)
-#define MAX_CLK 4000 /* Fits into 12-bit Time registers */
-#define MAX_MS CLK_TO_MS(MAX_CLK)
-
-static const unsigned int led_n_base[ASIC3_NUM_LEDS] = {
- [0] = ASIC3_LED_0_Base,
- [1] = ASIC3_LED_1_Base,
- [2] = ASIC3_LED_2_Base,
-};
-
-static void brightness_set(struct led_classdev *cdev,
- enum led_brightness value)
-{
- struct platform_device *pdev = to_platform_device(cdev->dev->parent);
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
- u32 timebase;
- unsigned int base;
-
- timebase = (value == LED_OFF) ? 0 : (LED_EN|0x4);
-
- base = led_n_base[cell->id];
- asic3_write_register(asic, (base + ASIC3_LED_PeriodTime), 32);
- asic3_write_register(asic, (base + ASIC3_LED_DutyTime), 32);
- asic3_write_register(asic, (base + ASIC3_LED_AutoStopCount), 0);
- asic3_write_register(asic, (base + ASIC3_LED_TimeBase), timebase);
-}
-
-static int blink_set(struct led_classdev *cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
-{
- struct platform_device *pdev = to_platform_device(cdev->dev->parent);
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
- u32 on;
- u32 off;
- unsigned int base;
-
- if (*delay_on > MAX_MS || *delay_off > MAX_MS)
- return -EINVAL;
-
- if (*delay_on == 0 && *delay_off == 0) {
- /* If both are zero then a sensible default should be chosen */
- on = MS_TO_CLK(500);
- off = MS_TO_CLK(500);
- } else {
- on = MS_TO_CLK(*delay_on);
- off = MS_TO_CLK(*delay_off);
- if ((on + off) > MAX_CLK)
- return -EINVAL;
- }
-
- base = led_n_base[cell->id];
- asic3_write_register(asic, (base + ASIC3_LED_PeriodTime), (on + off));
- asic3_write_register(asic, (base + ASIC3_LED_DutyTime), on);
- asic3_write_register(asic, (base + ASIC3_LED_AutoStopCount), 0);
- asic3_write_register(asic, (base + ASIC3_LED_TimeBase), (LED_EN|0x4));
-
- *delay_on = CLK_TO_MS(on);
- *delay_off = CLK_TO_MS(off);
-
- return 0;
-}
-
-static int asic3_led_probe(struct platform_device *pdev)
-{
- struct asic3_led *led = dev_get_platdata(&pdev->dev);
- int ret;
-
- ret = mfd_cell_enable(pdev);
- if (ret < 0)
- return ret;
-
- led->cdev = devm_kzalloc(&pdev->dev, sizeof(struct led_classdev),
- GFP_KERNEL);
- if (!led->cdev) {
- ret = -ENOMEM;
- goto out;
- }
-
- led->cdev->name = led->name;
- led->cdev->flags = LED_CORE_SUSPENDRESUME;
- led->cdev->brightness_set = brightness_set;
- led->cdev->blink_set = blink_set;
- led->cdev->default_trigger = led->default_trigger;
-
- ret = led_classdev_register(&pdev->dev, led->cdev);
- if (ret < 0)
- goto out;
-
- return 0;
-
-out:
- (void) mfd_cell_disable(pdev);
- return ret;
-}
-
-static int asic3_led_remove(struct platform_device *pdev)
-{
- struct asic3_led *led = dev_get_platdata(&pdev->dev);
-
- led_classdev_unregister(led->cdev);
-
- return mfd_cell_disable(pdev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int asic3_led_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- int ret;
-
- ret = 0;
- if (cell->suspend)
- ret = (*cell->suspend)(pdev);
-
- return ret;
-}
-
-static int asic3_led_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- int ret;
-
- ret = 0;
- if (cell->resume)
- ret = (*cell->resume)(pdev);
-
- return ret;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(asic3_led_pm_ops, asic3_led_suspend, asic3_led_resume);
-
-static struct platform_driver asic3_led_driver = {
- .probe = asic3_led_probe,
- .remove = asic3_led_remove,
- .driver = {
- .name = "leds-asic3",
- .pm = &asic3_led_pm_ops,
- },
-};
-
-module_platform_driver(asic3_led_driver);
-
-MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>");
-MODULE_DESCRIPTION("HTC ASIC3 LED driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:leds-asic3");
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 2d4d87957a30..246f1296ab09 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -330,7 +330,9 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
{
struct led_init_data init_data = {};
struct bcm6328_led *led;
- const char *state;
+ enum led_default_state state;
+ unsigned long val, shift;
+ void __iomem *mode;
int rc;
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
@@ -346,31 +348,29 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
if (of_property_read_bool(nc, "active-low"))
led->active_low = true;
- if (!of_property_read_string(nc, "default-state", &state)) {
- if (!strcmp(state, "on")) {
+ init_data.fwnode = of_fwnode_handle(nc);
+
+ state = led_init_default_state_get(init_data.fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = LED_FULL;
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ shift = bcm6328_pin2shift(led->pin);
+ if (shift / 16)
+ mode = mem + BCM6328_REG_MODE_HI;
+ else
+ mode = mem + BCM6328_REG_MODE_LO;
+
+ val = bcm6328_led_read(mode) >> BCM6328_LED_SHIFT(shift % 16);
+ val &= BCM6328_LED_MODE_MASK;
+ if ((led->active_low && val == BCM6328_LED_MODE_OFF) ||
+ (!led->active_low && val == BCM6328_LED_MODE_ON))
led->cdev.brightness = LED_FULL;
- } else if (!strcmp(state, "keep")) {
- void __iomem *mode;
- unsigned long val, shift;
-
- shift = bcm6328_pin2shift(led->pin);
- if (shift / 16)
- mode = mem + BCM6328_REG_MODE_HI;
- else
- mode = mem + BCM6328_REG_MODE_LO;
-
- val = bcm6328_led_read(mode) >>
- BCM6328_LED_SHIFT(shift % 16);
- val &= BCM6328_LED_MODE_MASK;
- if ((led->active_low && val == BCM6328_LED_MODE_OFF) ||
- (!led->active_low && val == BCM6328_LED_MODE_ON))
- led->cdev.brightness = LED_FULL;
- else
- led->cdev.brightness = LED_OFF;
- } else {
+ else
led->cdev.brightness = LED_OFF;
- }
- } else {
+ break;
+ default:
led->cdev.brightness = LED_OFF;
}
@@ -378,7 +378,6 @@ static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg,
led->cdev.brightness_set = bcm6328_led_set;
led->cdev.blink_set = bcm6328_blink_set;
- init_data.fwnode = of_fwnode_handle(nc);
rc = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
if (rc < 0)
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
index 9d2e487fa08a..86e51d44a5a7 100644
--- a/drivers/leds/leds-bcm6358.c
+++ b/drivers/leds/leds-bcm6358.c
@@ -96,7 +96,8 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg,
{
struct led_init_data init_data = {};
struct bcm6358_led *led;
- const char *state;
+ enum led_default_state state;
+ unsigned long val;
int rc;
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
@@ -110,29 +111,28 @@ static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg,
if (of_property_read_bool(nc, "active-low"))
led->active_low = true;
- if (!of_property_read_string(nc, "default-state", &state)) {
- if (!strcmp(state, "on")) {
+ init_data.fwnode = of_fwnode_handle(nc);
+
+ state = led_init_default_state_get(init_data.fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = LED_FULL;
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ val = bcm6358_led_read(led->mem + BCM6358_REG_MODE);
+ val &= BIT(led->pin);
+ if ((led->active_low && !val) || (!led->active_low && val))
led->cdev.brightness = LED_FULL;
- } else if (!strcmp(state, "keep")) {
- unsigned long val;
- val = bcm6358_led_read(led->mem + BCM6358_REG_MODE);
- val &= BIT(led->pin);
- if ((led->active_low && !val) ||
- (!led->active_low && val))
- led->cdev.brightness = LED_FULL;
- else
- led->cdev.brightness = LED_OFF;
- } else {
+ else
led->cdev.brightness = LED_OFF;
- }
- } else {
+ break;
+ default:
led->cdev.brightness = LED_OFF;
}
bcm6358_led_set(&led->cdev, led->cdev.brightness);
led->cdev.brightness_set = bcm6358_led_set;
- init_data.fwnode = of_fwnode_handle(nc);
rc = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
if (rc < 0)
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index 2b6678f6bd56..601185ddabcc 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -656,8 +656,7 @@ static void bd2802_unregister_led_classdev(struct bd2802_led *led)
led_classdev_unregister(&led->cdev_led1r);
}
-static int bd2802_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int bd2802_probe(struct i2c_client *client)
{
struct bd2802_led *led;
int ret, i;
@@ -787,7 +786,7 @@ static struct i2c_driver bd2802_i2c_driver = {
.name = "BD2802",
.pm = &bd2802_pm,
},
- .probe = bd2802_probe,
+ .probe_new = bd2802_probe,
.remove = bd2802_remove,
.id_table = bd2802_id,
};
diff --git a/drivers/leds/leds-blinkm.c b/drivers/leds/leds-blinkm.c
index e19cc8a7b7ca..37f2f32ae42d 100644
--- a/drivers/leds/leds-blinkm.c
+++ b/drivers/leds/leds-blinkm.c
@@ -565,8 +565,7 @@ static int blinkm_detect(struct i2c_client *client, struct i2c_board_info *info)
return 0;
}
-static int blinkm_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int blinkm_probe(struct i2c_client *client)
{
struct blinkm_data *data;
struct blinkm_led *led[3];
@@ -731,7 +730,7 @@ static struct i2c_driver blinkm_driver = {
.driver = {
.name = "blinkm",
},
- .probe = blinkm_probe,
+ .probe_new = blinkm_probe,
.remove = blinkm_remove,
.id_table = blinkm_id,
.detect = blinkm_detect,
diff --git a/drivers/leds/leds-is31fl319x.c b/drivers/leds/leds-is31fl319x.c
index b2f4c4ec7c56..7c908414ac7e 100644
--- a/drivers/leds/leds-is31fl319x.c
+++ b/drivers/leds/leds-is31fl319x.c
@@ -495,6 +495,11 @@ static inline int is31fl3196_db_to_gain(u32 dezibel)
return dezibel / IS31FL3196_AUDIO_GAIN_DB_STEP;
}
+static void is31f1319x_mutex_destroy(void *lock)
+{
+ mutex_destroy(lock);
+}
+
static int is31fl319x_probe(struct i2c_client *client)
{
struct is31fl319x_chip *is31;
@@ -511,7 +516,7 @@ static int is31fl319x_probe(struct i2c_client *client)
return -ENOMEM;
mutex_init(&is31->lock);
- err = devm_add_action(dev, (void (*)(void *))mutex_destroy, &is31->lock);
+ err = devm_add_action_or_reset(dev, is31f1319x_mutex_destroy, &is31->lock);
if (err)
return err;
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index 0d219c1ac3b5..799191859ce0 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -422,8 +422,7 @@ static const struct of_device_id of_is31fl32xx_match[] = {
MODULE_DEVICE_TABLE(of, of_is31fl32xx_match);
-static int is31fl32xx_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int is31fl32xx_probe(struct i2c_client *client)
{
const struct is31fl32xx_chipdef *cdef;
struct device *dev = &client->dev;
@@ -489,7 +488,7 @@ static struct i2c_driver is31fl32xx_driver = {
.name = "is31fl32xx",
.of_match_table = of_is31fl32xx_match,
},
- .probe = is31fl32xx_probe,
+ .probe_new = is31fl32xx_probe,
.remove = is31fl32xx_remove,
.id_table = is31fl32xx_id,
};
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index ba906c253c7f..a9a2018592ff 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -405,8 +405,7 @@ static struct attribute *lm3530_attrs[] = {
};
ATTRIBUTE_GROUPS(lm3530);
-static int lm3530_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm3530_probe(struct i2c_client *client)
{
struct lm3530_platform_data *pdata = dev_get_platdata(&client->dev);
struct lm3530_data *drvdata;
@@ -485,7 +484,7 @@ static const struct i2c_device_id lm3530_id[] = {
MODULE_DEVICE_TABLE(i2c, lm3530_id);
static struct i2c_driver lm3530_i2c_driver = {
- .probe = lm3530_probe,
+ .probe_new = lm3530_probe,
.remove = lm3530_remove,
.id_table = lm3530_id,
.driver = {
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index db64d44bcbbf..a08c09129a68 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -663,8 +663,7 @@ child_out:
return ret;
}
-static int lm3532_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm3532_probe(struct i2c_client *client)
{
struct lm3532_data *drvdata;
int ret = 0;
@@ -727,7 +726,7 @@ static const struct i2c_device_id lm3532_id[] = {
MODULE_DEVICE_TABLE(i2c, lm3532_id);
static struct i2c_driver lm3532_i2c_driver = {
- .probe = lm3532_probe,
+ .probe_new = lm3532_probe,
.remove = lm3532_remove,
.id_table = lm3532_id,
.driver = {
diff --git a/drivers/leds/leds-lm355x.c b/drivers/leds/leds-lm355x.c
index daa35927b301..612873070ca4 100644
--- a/drivers/leds/leds-lm355x.c
+++ b/drivers/leds/leds-lm355x.c
@@ -396,9 +396,9 @@ static const struct regmap_config lm355x_regmap = {
};
/* module initialize */
-static int lm355x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm355x_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct lm355x_platform_data *pdata = dev_get_platdata(&client->dev);
struct lm355x_chip_data *chip;
@@ -516,7 +516,7 @@ static struct i2c_driver lm355x_i2c_driver = {
.name = LM355x_NAME,
.pm = NULL,
},
- .probe = lm355x_probe,
+ .probe_new = lm355x_probe,
.remove = lm355x_remove,
.id_table = lm355x_id,
};
diff --git a/drivers/leds/leds-lm3642.c b/drivers/leds/leds-lm3642.c
index 428a5d928150..b75ee3546c2e 100644
--- a/drivers/leds/leds-lm3642.c
+++ b/drivers/leds/leds-lm3642.c
@@ -289,8 +289,7 @@ static struct attribute *lm3642_torch_attrs[] = {
};
ATTRIBUTE_GROUPS(lm3642_torch);
-static int lm3642_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm3642_probe(struct i2c_client *client)
{
struct lm3642_platform_data *pdata = dev_get_platdata(&client->dev);
struct lm3642_chip_data *chip;
@@ -402,7 +401,7 @@ static struct i2c_driver lm3642_i2c_driver = {
.name = LM3642_NAME,
.pm = NULL,
},
- .probe = lm3642_probe,
+ .probe_new = lm3642_probe,
.remove = lm3642_remove,
.id_table = lm3642_id,
};
diff --git a/drivers/leds/leds-lm3692x.c b/drivers/leds/leds-lm3692x.c
index 54b4662bff41..66126d0666f5 100644
--- a/drivers/leds/leds-lm3692x.c
+++ b/drivers/leds/leds-lm3692x.c
@@ -456,9 +456,9 @@ static int lm3692x_probe_dt(struct lm3692x_led *led)
return ret;
}
-static int lm3692x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm3692x_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct lm3692x_led *led;
int ret;
@@ -518,7 +518,7 @@ static struct i2c_driver lm3692x_driver = {
.name = "lm3692x",
.of_match_table = of_lm3692x_leds_match,
},
- .probe = lm3692x_probe,
+ .probe_new = lm3692x_probe,
.remove = lm3692x_remove,
.id_table = lm3692x_id,
};
diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
index 71231a60eebc..10e904bf40a0 100644
--- a/drivers/leds/leds-lm3697.c
+++ b/drivers/leds/leds-lm3697.c
@@ -299,8 +299,7 @@ child_out:
return ret;
}
-static int lm3697_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lm3697_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct lm3697 *led;
@@ -377,7 +376,7 @@ static struct i2c_driver lm3697_driver = {
.name = "lm3697",
.of_match_table = of_lm3697_leds_match,
},
- .probe = lm3697_probe,
+ .probe_new = lm3697_probe,
.remove = lm3697_remove,
.id_table = lm3697_id,
};
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 673ad8c04f41..be47c66b2e00 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -359,8 +359,7 @@ exit:
return err;
}
-static int lp3944_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lp3944_probe(struct i2c_client *client)
{
struct lp3944_platform_data *lp3944_pdata =
dev_get_platdata(&client->dev);
@@ -428,7 +427,7 @@ static struct i2c_driver lp3944_driver = {
.driver = {
.name = "lp3944",
},
- .probe = lp3944_probe,
+ .probe_new = lp3944_probe,
.remove = lp3944_remove,
.id_table = lp3944_id,
};
diff --git a/drivers/leds/leds-lp3952.c b/drivers/leds/leds-lp3952.c
index bf0ad1b5ce24..24b2e0f9080d 100644
--- a/drivers/leds/leds-lp3952.c
+++ b/drivers/leds/leds-lp3952.c
@@ -207,8 +207,7 @@ static const struct regmap_config lp3952_regmap = {
.cache_type = REGCACHE_RBTREE,
};
-static int lp3952_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lp3952_probe(struct i2c_client *client)
{
int status;
struct lp3952_led_array *priv;
@@ -274,7 +273,7 @@ static struct i2c_driver lp3952_i2c_driver = {
.driver = {
.name = LP3952_NAME,
},
- .probe = lp3952_probe,
+ .probe_new = lp3952_probe,
.remove = lp3952_remove,
.id_table = lp3952_id,
};
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 19478d9c19a7..a004af8e22c7 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -516,9 +516,9 @@ static struct lp55xx_device_config lp5521_cfg = {
.dev_attr_group = &lp5521_group,
};
-static int lp5521_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lp5521_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
int ret;
struct lp55xx_chip *chip;
struct lp55xx_led *led;
@@ -608,7 +608,7 @@ static struct i2c_driver lp5521_driver = {
.name = "lp5521",
.of_match_table = of_match_ptr(of_lp5521_leds_match),
},
- .probe = lp5521_probe,
+ .probe_new = lp5521_probe,
.remove = lp5521_remove,
.id_table = lp5521_id,
};
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index e08e3de1428d..55da914b8e5c 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -887,9 +887,9 @@ static struct lp55xx_device_config lp5523_cfg = {
.dev_attr_group = &lp5523_group,
};
-static int lp5523_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lp5523_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
int ret;
struct lp55xx_chip *chip;
struct lp55xx_led *led;
@@ -983,7 +983,7 @@ static struct i2c_driver lp5523_driver = {
.name = "lp5523x",
.of_match_table = of_match_ptr(of_lp5523_leds_match),
},
- .probe = lp5523_probe,
+ .probe_new = lp5523_probe,
.remove = lp5523_remove,
.id_table = lp5523_id,
};
diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
index 0e490085ff35..b5d877faf6d7 100644
--- a/drivers/leds/leds-lp5562.c
+++ b/drivers/leds/leds-lp5562.c
@@ -511,8 +511,7 @@ static struct lp55xx_device_config lp5562_cfg = {
.dev_attr_group = &lp5562_group,
};
-static int lp5562_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lp5562_probe(struct i2c_client *client)
{
int ret;
struct lp55xx_chip *chip;
@@ -604,7 +603,7 @@ static struct i2c_driver lp5562_driver = {
.name = "lp5562",
.of_match_table = of_match_ptr(of_lp5562_leds_match),
},
- .probe = lp5562_probe,
+ .probe_new = lp5562_probe,
.remove = lp5562_remove,
.id_table = lp5562_id,
};
diff --git a/drivers/leds/leds-lp8501.c b/drivers/leds/leds-lp8501.c
index ae11a02c0ab2..165d6423a928 100644
--- a/drivers/leds/leds-lp8501.c
+++ b/drivers/leds/leds-lp8501.c
@@ -299,9 +299,9 @@ static struct lp55xx_device_config lp8501_cfg = {
.run_engine = lp8501_run_engine,
};
-static int lp8501_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lp8501_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
int ret;
struct lp55xx_chip *chip;
struct lp55xx_led *led;
@@ -392,7 +392,7 @@ static struct i2c_driver lp8501_driver = {
.name = "lp8501",
.of_match_table = of_match_ptr(of_lp8501_leds_match),
},
- .probe = lp8501_probe,
+ .probe_new = lp8501_probe,
.remove = lp8501_remove,
.id_table = lp8501_id,
};
diff --git a/drivers/leds/leds-lp8860.c b/drivers/leds/leds-lp8860.c
index e2b36d3187eb..b66ed5ac1aa5 100644
--- a/drivers/leds/leds-lp8860.c
+++ b/drivers/leds/leds-lp8860.c
@@ -375,8 +375,7 @@ static const struct regmap_config lp8860_eeprom_regmap_config = {
.cache_type = REGCACHE_NONE,
};
-static int lp8860_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int lp8860_probe(struct i2c_client *client)
{
int ret;
struct lp8860_led *led;
@@ -480,7 +479,7 @@ static struct i2c_driver lp8860_driver = {
.name = "lp8860",
.of_match_table = of_lp8860_leds_match,
},
- .probe = lp8860_probe,
+ .probe_new = lp8860_probe,
.remove = lp8860_remove,
.id_table = lp8860_id,
};
diff --git a/drivers/leds/leds-mt6323.c b/drivers/leds/leds-mt6323.c
index f59e0e8bda8b..17ee88043f52 100644
--- a/drivers/leds/leds-mt6323.c
+++ b/drivers/leds/leds-mt6323.c
@@ -339,23 +339,23 @@ static int mt6323_led_set_dt_default(struct led_classdev *cdev,
struct device_node *np)
{
struct mt6323_led *led = container_of(cdev, struct mt6323_led, cdev);
- const char *state;
+ enum led_default_state state;
int ret = 0;
- state = of_get_property(np, "default-state", NULL);
- if (state) {
- if (!strcmp(state, "keep")) {
- ret = mt6323_get_led_hw_brightness(cdev);
- if (ret < 0)
- return ret;
- led->current_brightness = ret;
- ret = 0;
- } else if (!strcmp(state, "on")) {
- ret =
- mt6323_led_set_brightness(cdev, cdev->max_brightness);
- } else {
- ret = mt6323_led_set_brightness(cdev, LED_OFF);
- }
+ state = led_init_default_state_get(of_fwnode_handle(np));
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ ret = mt6323_led_set_brightness(cdev, cdev->max_brightness);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ ret = mt6323_get_led_hw_brightness(cdev);
+ if (ret < 0)
+ return ret;
+ led->current_brightness = ret;
+ ret = 0;
+ break;
+ default:
+ ret = mt6323_led_set_brightness(cdev, LED_OFF);
}
return ret;
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index df83d97cb479..15b1acfa442e 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -50,8 +50,7 @@ struct pca9532_data {
u8 psc[2];
};
-static int pca9532_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
+static int pca9532_probe(struct i2c_client *client);
static void pca9532_remove(struct i2c_client *client);
enum {
@@ -103,7 +102,7 @@ static struct i2c_driver pca9532_driver = {
.name = "leds-pca953x",
.of_match_table = of_match_ptr(of_pca9532_leds_match),
},
- .probe = pca9532_probe,
+ .probe_new = pca9532_probe,
.remove = pca9532_remove,
.id_table = pca9532_id,
};
@@ -504,9 +503,9 @@ pca9532_of_populate_pdata(struct device *dev, struct device_node *np)
return pdata;
}
-static int pca9532_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pca9532_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
int devid;
struct pca9532_data *data = i2c_get_clientdata(client);
struct pca9532_platform_data *pca9532_pdata =
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 33ec4543fb4f..1edd092e7894 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -130,7 +130,7 @@ struct pca955x_led {
struct led_classdev led_cdev;
int led_num; /* 0 .. 15 potentially */
u32 type;
- int default_state;
+ enum led_default_state default_state;
struct fwnode_handle *fwnode;
};
@@ -437,7 +437,6 @@ pca955x_get_pdata(struct i2c_client *client, struct pca955x_chipdef *chip)
return ERR_PTR(-ENOMEM);
device_for_each_child_node(&client->dev, child) {
- const char *state;
u32 reg;
int res;
@@ -448,19 +447,9 @@ pca955x_get_pdata(struct i2c_client *client, struct pca955x_chipdef *chip)
led = &pdata->leds[reg];
led->type = PCA955X_TYPE_LED;
led->fwnode = child;
- fwnode_property_read_u32(child, "type", &led->type);
+ led->default_state = led_init_default_state_get(child);
- if (!fwnode_property_read_string(child, "default-state",
- &state)) {
- if (!strcmp(state, "keep"))
- led->default_state = LEDS_GPIO_DEFSTATE_KEEP;
- else if (!strcmp(state, "on"))
- led->default_state = LEDS_GPIO_DEFSTATE_ON;
- else
- led->default_state = LEDS_GPIO_DEFSTATE_OFF;
- } else {
- led->default_state = LEDS_GPIO_DEFSTATE_OFF;
- }
+ fwnode_property_read_u32(child, "type", &led->type);
}
pdata->num_leds = chip->bits;
@@ -572,13 +561,11 @@ static int pca955x_probe(struct i2c_client *client)
led->brightness_set_blocking = pca955x_led_set;
led->brightness_get = pca955x_led_get;
- if (pdata->leds[i].default_state ==
- LEDS_GPIO_DEFSTATE_OFF) {
+ if (pdata->leds[i].default_state == LEDS_DEFSTATE_OFF) {
err = pca955x_led_set(led, LED_OFF);
if (err)
return err;
- } else if (pdata->leds[i].default_state ==
- LEDS_GPIO_DEFSTATE_ON) {
+ } else if (pdata->leds[i].default_state == LEDS_DEFSTATE_ON) {
err = pca955x_led_set(led, LED_FULL);
if (err)
return err;
@@ -617,8 +604,7 @@ static int pca955x_probe(struct i2c_client *client)
* brightness to see if it's using PWM1. If so, PWM1
* should not be written below.
*/
- if (pdata->leds[i].default_state ==
- LEDS_GPIO_DEFSTATE_KEEP) {
+ if (pdata->leds[i].default_state == LEDS_DEFSTATE_KEEP) {
if (led->brightness != LED_FULL &&
led->brightness != LED_OFF &&
led->brightness != LED_HALF)
diff --git a/drivers/leds/leds-pca963x.c b/drivers/leds/leds-pca963x.c
index a7e052c1db53..9cd476db601f 100644
--- a/drivers/leds/leds-pca963x.c
+++ b/drivers/leds/leds-pca963x.c
@@ -389,9 +389,9 @@ static const struct of_device_id of_pca963x_match[] = {
};
MODULE_DEVICE_TABLE(of, of_pca963x_match);
-static int pca963x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pca963x_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
struct pca963x_chipdef *chipdef;
struct pca963x *chip;
@@ -431,7 +431,7 @@ static struct i2c_driver pca963x_driver = {
.name = "leds-pca963x",
.of_match_table = of_pca963x_match,
},
- .probe = pca963x_probe,
+ .probe_new = pca963x_probe,
.id_table = pca963x_id,
};
diff --git a/drivers/leds/leds-pm8058.c b/drivers/leds/leds-pm8058.c
index fb2ab72c0c40..b9233f14b646 100644
--- a/drivers/leds/leds-pm8058.c
+++ b/drivers/leds/leds-pm8058.c
@@ -93,8 +93,8 @@ static int pm8058_led_probe(struct platform_device *pdev)
struct device_node *np;
int ret;
struct regmap *map;
- const char *state;
enum led_brightness maxbright;
+ enum led_default_state state;
led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
if (!led)
@@ -125,25 +125,26 @@ static int pm8058_led_probe(struct platform_device *pdev)
maxbright = 15; /* 4 bits */
led->cdev.max_brightness = maxbright;
- state = of_get_property(np, "default-state", NULL);
- if (state) {
- if (!strcmp(state, "keep")) {
- led->cdev.brightness = pm8058_led_get(&led->cdev);
- } else if (!strcmp(state, "on")) {
- led->cdev.brightness = maxbright;
- pm8058_led_set(&led->cdev, maxbright);
- } else {
- led->cdev.brightness = LED_OFF;
- pm8058_led_set(&led->cdev, LED_OFF);
- }
+ init_data.fwnode = of_fwnode_handle(np);
+
+ state = led_init_default_state_get(init_data.fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = maxbright;
+ pm8058_led_set(&led->cdev, maxbright);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ led->cdev.brightness = pm8058_led_get(&led->cdev);
+ break;
+ default:
+ led->cdev.brightness = LED_OFF;
+ pm8058_led_set(&led->cdev, LED_OFF);
}
if (led->ledtype == PM8058_LED_TYPE_KEYPAD ||
led->ledtype == PM8058_LED_TYPE_FLASH)
led->cdev.flags = LED_CORE_SUSPENDRESUME;
- init_data.fwnode = of_fwnode_handle(np);
-
ret = devm_led_classdev_register_ext(dev, &led->cdev, &init_data);
if (ret)
dev_err(dev, "Failed to register LED for %pOF\n", np);
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 6832180c1c54..29194cc382af 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -138,9 +138,9 @@ static int led_pwm_create_fwnode(struct device *dev, struct led_pwm_priv *priv)
struct led_pwm led;
int ret;
- memset(&led, 0, sizeof(led));
-
device_for_each_child_node(dev, fwnode) {
+ memset(&led, 0, sizeof(led));
+
ret = fwnode_property_read_string(fwnode, "label", &led.name);
if (ret && is_of_node(fwnode))
led.name = to_of_node(fwnode)->name;
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
deleted file mode 100644
index 3c0c7aa63b8c..000000000000
--- a/drivers/leds/leds-s3c24xx.c
+++ /dev/null
@@ -1,83 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* drivers/leds/leds-s3c24xx.c
- *
- * (c) 2006 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C24XX - LEDs GPIO driver
-*/
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/gpio/consumer.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_data/leds-s3c24xx.h>
-
-/* our context */
-
-struct s3c24xx_gpio_led {
- struct led_classdev cdev;
- struct s3c24xx_led_platdata *pdata;
- struct gpio_desc *gpiod;
-};
-
-static inline struct s3c24xx_gpio_led *to_gpio(struct led_classdev *led_cdev)
-{
- return container_of(led_cdev, struct s3c24xx_gpio_led, cdev);
-}
-
-static void s3c24xx_led_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- struct s3c24xx_gpio_led *led = to_gpio(led_cdev);
-
- gpiod_set_value(led->gpiod, !!value);
-}
-
-static int s3c24xx_led_probe(struct platform_device *dev)
-{
- struct s3c24xx_led_platdata *pdata = dev_get_platdata(&dev->dev);
- struct s3c24xx_gpio_led *led;
- int ret;
-
- led = devm_kzalloc(&dev->dev, sizeof(struct s3c24xx_gpio_led),
- GFP_KERNEL);
- if (!led)
- return -ENOMEM;
-
- led->cdev.brightness_set = s3c24xx_led_set;
- led->cdev.default_trigger = pdata->def_trigger;
- led->cdev.name = pdata->name;
- led->cdev.flags |= LED_CORE_SUSPENDRESUME;
-
- led->pdata = pdata;
-
- /* Default to off */
- led->gpiod = devm_gpiod_get(&dev->dev, NULL, GPIOD_OUT_LOW);
- if (IS_ERR(led->gpiod))
- return PTR_ERR(led->gpiod);
-
- /* register our new led device */
- ret = devm_led_classdev_register(&dev->dev, &led->cdev);
- if (ret < 0)
- dev_err(&dev->dev, "led_classdev_register failed\n");
-
- return ret;
-}
-
-static struct platform_driver s3c24xx_led_driver = {
- .probe = s3c24xx_led_probe,
- .driver = {
- .name = "s3c24xx_led",
- },
-};
-
-module_platform_driver(s3c24xx_led_driver);
-
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("S3C24XX LED driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c24xx_led");
diff --git a/drivers/leds/leds-syscon.c b/drivers/leds/leds-syscon.c
index 7eddb8ecb44e..e38abb5e60c1 100644
--- a/drivers/leds/leds-syscon.c
+++ b/drivers/leds/leds-syscon.c
@@ -61,7 +61,8 @@ static int syscon_led_probe(struct platform_device *pdev)
struct device *parent;
struct regmap *map;
struct syscon_led *sled;
- const char *state;
+ enum led_default_state state;
+ u32 value;
int ret;
parent = dev->parent;
@@ -86,34 +87,30 @@ static int syscon_led_probe(struct platform_device *pdev)
if (of_property_read_u32(np, "mask", &sled->mask))
return -EINVAL;
- state = of_get_property(np, "default-state", NULL);
- if (state) {
- if (!strcmp(state, "keep")) {
- u32 val;
-
- ret = regmap_read(map, sled->offset, &val);
- if (ret < 0)
- return ret;
- sled->state = !!(val & sled->mask);
- } else if (!strcmp(state, "on")) {
- sled->state = true;
- ret = regmap_update_bits(map, sled->offset,
- sled->mask,
- sled->mask);
- if (ret < 0)
- return ret;
- } else {
- sled->state = false;
- ret = regmap_update_bits(map, sled->offset,
- sled->mask, 0);
- if (ret < 0)
- return ret;
- }
+ init_data.fwnode = of_fwnode_handle(np);
+
+ state = led_init_default_state_get(init_data.fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ ret = regmap_update_bits(map, sled->offset, sled->mask, sled->mask);
+ if (ret < 0)
+ return ret;
+ sled->state = true;
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ ret = regmap_read(map, sled->offset, &value);
+ if (ret < 0)
+ return ret;
+ sled->state = !!(value & sled->mask);
+ break;
+ default:
+ ret = regmap_update_bits(map, sled->offset, sled->mask, 0);
+ if (ret < 0)
+ return ret;
+ sled->state = false;
}
sled->cdev.brightness_set = syscon_led_set;
- init_data.fwnode = of_fwnode_handle(np);
-
ret = devm_led_classdev_register_ext(dev, &sled->cdev, &init_data);
if (ret < 0)
return ret;
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 161bef65c6b7..07dd12686a69 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -695,8 +695,7 @@ tca6507_led_dt_init(struct device *dev)
&led.default_trigger);
led.flags = 0;
- if (fwnode_property_match_string(child, "compatible",
- "gpio") >= 0)
+ if (fwnode_device_is_compatible(child, "gpio"))
led.flags |= TCA6507_MAKE_GPIO;
ret = fwnode_property_read_u32(child, "reg", &reg);
@@ -728,8 +727,7 @@ static const struct of_device_id __maybe_unused of_tca6507_leds_match[] = {
};
MODULE_DEVICE_TABLE(of, of_tca6507_leds_match);
-static int tca6507_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int tca6507_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct i2c_adapter *adapter;
@@ -809,7 +807,7 @@ static struct i2c_driver tca6507_driver = {
.name = "leds-tca6507",
.of_match_table = of_match_ptr(of_tca6507_leds_match),
},
- .probe = tca6507_probe,
+ .probe_new = tca6507_probe,
.remove = tca6507_remove,
.id_table = tca6507_id,
};
diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c
index cb7bd1353f9f..ec25e0c16bea 100644
--- a/drivers/leds/leds-tlc591xx.c
+++ b/drivers/leds/leds-tlc591xx.c
@@ -145,8 +145,7 @@ static const struct of_device_id of_tlc591xx_leds_match[] = {
MODULE_DEVICE_TABLE(of, of_tlc591xx_leds_match);
static int
-tlc591xx_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+tlc591xx_probe(struct i2c_client *client)
{
struct device_node *np, *child;
struct device *dev = &client->dev;
@@ -231,7 +230,7 @@ static struct i2c_driver tlc591xx_driver = {
.name = "tlc591xx",
.of_match_table = of_match_ptr(of_tlc591xx_leds_match),
},
- .probe = tlc591xx_probe,
+ .probe_new = tlc591xx_probe,
.id_table = tlc591xx_id,
};
diff --git a/drivers/leds/leds-turris-omnia.c b/drivers/leds/leds-turris-omnia.c
index c7c9851c894a..013f551b32b2 100644
--- a/drivers/leds/leds-turris-omnia.c
+++ b/drivers/leds/leds-turris-omnia.c
@@ -201,8 +201,7 @@ static struct attribute *omnia_led_controller_attrs[] = {
};
ATTRIBUTE_GROUPS(omnia_led_controller);
-static int omnia_leds_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int omnia_leds_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct device_node *np = dev_of_node(dev), *child;
@@ -272,7 +271,7 @@ static const struct i2c_device_id omnia_id[] = {
MODULE_DEVICE_TABLE(i2c, omnia_id);
static struct i2c_driver omnia_leds_driver = {
- .probe = omnia_leds_probe,
+ .probe_new = omnia_leds_probe,
.remove = omnia_leds_remove,
.id_table = omnia_id,
.driver = {
diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h
index aa64757a4d89..345062ccabda 100644
--- a/drivers/leds/leds.h
+++ b/drivers/leds/leds.h
@@ -27,7 +27,6 @@ ssize_t led_trigger_read(struct file *filp, struct kobject *kobj,
ssize_t led_trigger_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t pos, size_t count);
-enum led_default_state led_init_default_state_get(struct fwnode_handle *fwnode);
extern struct rw_semaphore leds_list_lock;
extern struct list_head leds_list;
diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.c b/drivers/leds/simple/simatic-ipc-leds-gpio.c
index 07f0d79d604d..e8d329b5a68c 100644
--- a/drivers/leds/simple/simatic-ipc-leds-gpio.c
+++ b/drivers/leds/simple/simatic-ipc-leds-gpio.c
@@ -77,6 +77,8 @@ static int simatic_ipc_leds_gpio_probe(struct platform_device *pdev)
switch (plat->devmode) {
case SIMATIC_IPC_DEVICE_127E:
+ if (!IS_ENABLED(CONFIG_PINCTRL_BROXTON))
+ return -ENODEV;
simatic_ipc_led_gpio_table = &simatic_ipc_led_gpio_table_127e;
break;
case SIMATIC_IPC_DEVICE_227G:
diff --git a/drivers/leds/trigger/ledtrig-disk.c b/drivers/leds/trigger/ledtrig-disk.c
index 0741910785bb..0b7dfbd04273 100644
--- a/drivers/leds/trigger/ledtrig-disk.c
+++ b/drivers/leds/trigger/ledtrig-disk.c
@@ -16,7 +16,6 @@
DEFINE_LED_TRIGGER(ledtrig_disk);
DEFINE_LED_TRIGGER(ledtrig_disk_read);
DEFINE_LED_TRIGGER(ledtrig_disk_write);
-DEFINE_LED_TRIGGER(ledtrig_ide);
void ledtrig_disk_activity(bool write)
{
@@ -24,8 +23,6 @@ void ledtrig_disk_activity(bool write)
led_trigger_blink_oneshot(ledtrig_disk,
&blink_delay, &blink_delay, 0);
- led_trigger_blink_oneshot(ledtrig_ide,
- &blink_delay, &blink_delay, 0);
if (write)
led_trigger_blink_oneshot(ledtrig_disk_write,
&blink_delay, &blink_delay, 0);
@@ -40,7 +37,6 @@ static int __init ledtrig_disk_init(void)
led_trigger_register_simple("disk-activity", &ledtrig_disk);
led_trigger_register_simple("disk-read", &ledtrig_disk_read);
led_trigger_register_simple("disk-write", &ledtrig_disk_write);
- led_trigger_register_simple("ide-disk", &ledtrig_ide);
return 0;
}
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 3bc1f374e657..211ed9aa9edc 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -128,12 +128,17 @@ static int macio_device_resume(struct device * dev)
return 0;
}
+static int macio_device_modalias(const struct device *dev, struct kobj_uevent_env *env)
+{
+ return of_device_uevent_modalias(dev, env);
+}
+
extern const struct attribute_group *macio_dev_groups[];
struct bus_type macio_bus_type = {
.name = "macio",
.match = macio_bus_match,
- .uevent = of_device_uevent_modalias,
+ .uevent = macio_device_modalias,
.probe = macio_device_probe,
.remove = macio_device_remove,
.shutdown = macio_device_shutdown,
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index b8ad4f16b4ac..978fdfc19a06 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -41,9 +41,9 @@ static int mcb_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int mcb_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int mcb_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mcb_device *mdev = to_mcb_device(dev);
+ const struct mcb_device *mdev = to_mcb_device(dev);
int ret;
ret = add_uevent_var(env, "MODALIAS=mcb:16z%03d", mdev->id);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 998a5cfdbc4e..5f1e2593fad7 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -6,7 +6,6 @@
menuconfig MD
bool "Multiple devices driver support (RAID and LVM)"
depends on BLOCK
- select SRCU
help
Support multiple physical spindles through a single logical device.
Required for RAID and logical volume management.
diff --git a/drivers/md/dm-audit.c b/drivers/md/dm-audit.c
index 3049dfe67e50..2e979eeb1116 100644
--- a/drivers/md/dm-audit.c
+++ b/drivers/md/dm-audit.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Creating audit records for mapped devices.
*
diff --git a/drivers/md/dm-bio-prison-v1.c b/drivers/md/dm-bio-prison-v1.c
index 1f8f98efd97a..c4c05d5d8909 100644
--- a/drivers/md/dm-bio-prison-v1.c
+++ b/drivers/md/dm-bio-prison-v1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -77,9 +78,9 @@ static void __setup_new_cell(struct dm_cell_key *key,
struct bio *holder,
struct dm_bio_prison_cell *cell)
{
- memcpy(&cell->key, key, sizeof(cell->key));
- cell->holder = holder;
- bio_list_init(&cell->bios);
+ memcpy(&cell->key, key, sizeof(cell->key));
+ cell->holder = holder;
+ bio_list_init(&cell->bios);
}
static int cmp_keys(struct dm_cell_key *lhs,
@@ -285,14 +286,14 @@ EXPORT_SYMBOL_GPL(dm_cell_promote_or_release);
struct dm_deferred_entry {
struct dm_deferred_set *ds;
- unsigned count;
+ unsigned int count;
struct list_head work_items;
};
struct dm_deferred_set {
spinlock_t lock;
- unsigned current_entry;
- unsigned sweeper;
+ unsigned int current_entry;
+ unsigned int sweeper;
struct dm_deferred_entry entries[DEFERRED_SET_SIZE];
};
@@ -338,7 +339,7 @@ struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds)
}
EXPORT_SYMBOL_GPL(dm_deferred_entry_inc);
-static unsigned ds_next(unsigned index)
+static unsigned int ds_next(unsigned int index)
{
return (index + 1) % DEFERRED_SET_SIZE;
}
@@ -373,7 +374,7 @@ EXPORT_SYMBOL_GPL(dm_deferred_entry_dec);
int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work)
{
int r = 1;
- unsigned next_entry;
+ unsigned int next_entry;
spin_lock_irq(&ds->lock);
if ((ds->sweeper == ds->current_entry) &&
@@ -432,7 +433,7 @@ static int __init dm_bio_prison_init(void)
return 0;
- bad:
+bad:
while (i--)
_exits[i]();
diff --git a/drivers/md/dm-bio-prison-v1.h b/drivers/md/dm-bio-prison-v1.h
index cec52ac5e1ae..dfbf1e94cb75 100644
--- a/drivers/md/dm-bio-prison-v1.h
+++ b/drivers/md/dm-bio-prison-v1.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011-2017 Red Hat, Inc.
*
diff --git a/drivers/md/dm-bio-prison-v2.c b/drivers/md/dm-bio-prison-v2.c
index 9dec3b61cf70..fd852981ef9c 100644
--- a/drivers/md/dm-bio-prison-v2.c
+++ b/drivers/md/dm-bio-prison-v2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012-2017 Red Hat, Inc.
*
@@ -148,7 +149,7 @@ static bool __find_or_insert(struct dm_bio_prison_v2 *prison,
static bool __get(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell)
@@ -171,7 +172,7 @@ static bool __get(struct dm_bio_prison_v2 *prison,
bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
@@ -194,7 +195,7 @@ static bool __put(struct dm_bio_prison_v2 *prison,
// FIXME: shared locks granted above the lock level could starve this
if (!cell->shared_count) {
- if (cell->exclusive_lock){
+ if (cell->exclusive_lock) {
if (cell->quiesce_continuation) {
queue_work(prison->wq, cell->quiesce_continuation);
cell->quiesce_continuation = NULL;
@@ -224,7 +225,7 @@ EXPORT_SYMBOL_GPL(dm_cell_put_v2);
static int __lock(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
{
@@ -255,7 +256,7 @@ static int __lock(struct dm_bio_prison_v2 *prison,
int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result)
{
@@ -291,7 +292,7 @@ EXPORT_SYMBOL_GPL(dm_cell_quiesce_v2);
static int __promote(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
- unsigned new_lock_level)
+ unsigned int new_lock_level)
{
if (!cell->exclusive_lock)
return -EINVAL;
@@ -302,7 +303,7 @@ static int __promote(struct dm_bio_prison_v2 *prison,
int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
- unsigned new_lock_level)
+ unsigned int new_lock_level)
{
int r;
diff --git a/drivers/md/dm-bio-prison-v2.h b/drivers/md/dm-bio-prison-v2.h
index 6e04234268db..ce2cca5ef2f5 100644
--- a/drivers/md/dm-bio-prison-v2.h
+++ b/drivers/md/dm-bio-prison-v2.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011-2017 Red Hat, Inc.
*
@@ -44,8 +45,8 @@ struct dm_cell_key_v2 {
struct dm_bio_prison_cell_v2 {
// FIXME: pack these
bool exclusive_lock;
- unsigned exclusive_level;
- unsigned shared_count;
+ unsigned int exclusive_level;
+ unsigned int shared_count;
struct work_struct *quiesce_continuation;
struct rb_node node;
@@ -86,7 +87,7 @@ void dm_bio_prison_free_cell_v2(struct dm_bio_prison_v2 *prison,
*/
bool dm_cell_get_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct bio *inmate,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result);
@@ -114,7 +115,7 @@ bool dm_cell_put_v2(struct dm_bio_prison_v2 *prison,
*/
int dm_cell_lock_v2(struct dm_bio_prison_v2 *prison,
struct dm_cell_key_v2 *key,
- unsigned lock_level,
+ unsigned int lock_level,
struct dm_bio_prison_cell_v2 *cell_prealloc,
struct dm_bio_prison_cell_v2 **cell_result);
@@ -132,7 +133,7 @@ void dm_cell_quiesce_v2(struct dm_bio_prison_v2 *prison,
*/
int dm_cell_lock_promote_v2(struct dm_bio_prison_v2 *prison,
struct dm_bio_prison_cell_v2 *cell,
- unsigned new_lock_level);
+ unsigned int new_lock_level);
/*
* Adds any held bios to the bio list.
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 745e3ab4aa0a..04d2f0cda324 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
*
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index bb786c39545e..cf077f9b30c3 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2009-2011 Red Hat, Inc.
*
@@ -89,18 +90,18 @@ struct dm_bufio_client {
unsigned long n_buffers[LIST_SIZE];
struct block_device *bdev;
- unsigned block_size;
+ unsigned int block_size;
s8 sectors_per_block_bits;
- void (*alloc_callback)(struct dm_buffer *);
- void (*write_callback)(struct dm_buffer *);
+ void (*alloc_callback)(struct dm_buffer *buf);
+ void (*write_callback)(struct dm_buffer *buf);
struct kmem_cache *slab_buffer;
struct kmem_cache *slab_cache;
struct dm_io_client *dm_io;
struct list_head reserved_buffers;
- unsigned need_reserved_buffers;
+ unsigned int need_reserved_buffers;
- unsigned minimum_buffers;
+ unsigned int minimum_buffers;
struct rb_root buffer_tree;
wait_queue_head_t free_buffer_wait;
@@ -145,17 +146,17 @@ struct dm_buffer {
unsigned char list_mode; /* LIST_* */
blk_status_t read_error;
blk_status_t write_error;
- unsigned accessed;
- unsigned hold_count;
+ unsigned int accessed;
+ unsigned int hold_count;
unsigned long state;
unsigned long last_accessed;
- unsigned dirty_start;
- unsigned dirty_end;
- unsigned write_start;
- unsigned write_end;
+ unsigned int dirty_start;
+ unsigned int dirty_end;
+ unsigned int write_start;
+ unsigned int write_end;
struct dm_bufio_client *c;
struct list_head write_list;
- void (*end_io)(struct dm_buffer *, blk_status_t);
+ void (*end_io)(struct dm_buffer *buf, blk_status_t stat);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
#define MAX_STACK 10
unsigned int stack_len;
@@ -215,12 +216,12 @@ static DEFINE_SPINLOCK(global_spinlock);
static LIST_HEAD(global_queue);
-static unsigned long global_num = 0;
+static unsigned long global_num;
/*
* Buffers are freed after this timeout
*/
-static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
+static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated;
@@ -258,9 +259,11 @@ static void buffer_record_stack(struct dm_buffer *b)
}
#endif
-/*----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* A red/black tree acts as an index for all the buffers.
- *--------------------------------------------------------------*/
+ *----------------------------------------------------------------
+ */
static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
{
struct rb_node *n = c->buffer_tree.rb_node;
@@ -438,7 +441,7 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
* as if GFP_NOIO was specified.
*/
if (gfp_mask & __GFP_NORETRY) {
- unsigned noio_flag = memalloc_noio_save();
+ unsigned int noio_flag = memalloc_noio_save();
void *ptr = __vmalloc(c->block_size, gfp_mask);
memalloc_noio_restore(noio_flag);
@@ -561,7 +564,8 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
b->last_accessed = jiffies;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------------------
* Submit I/O on the buffer.
*
* Bio interface is faster but it has some problems:
@@ -577,7 +581,8 @@ static void __relink_lru(struct dm_buffer *b, int dirty)
* rejects the bio because it is too large, use dm-io layer to do the I/O.
* The dm-io layer splits the I/O into multiple requests, avoiding the above
* shortcomings.
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------------------
+ */
/*
* dm-io completion routine. It just calls b->bio.bi_end_io, pretending
@@ -591,7 +596,7 @@ static void dmio_complete(unsigned long error, void *context)
}
static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
- unsigned n_sectors, unsigned offset)
+ unsigned int n_sectors, unsigned int offset)
{
int r;
struct dm_io_request io_req = {
@@ -623,17 +628,18 @@ static void bio_complete(struct bio *bio)
{
struct dm_buffer *b = bio->bi_private;
blk_status_t status = bio->bi_status;
+
bio_uninit(bio);
kfree(bio);
b->end_io(b, status);
}
static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
- unsigned n_sectors, unsigned offset)
+ unsigned int n_sectors, unsigned int offset)
{
struct bio *bio;
char *ptr;
- unsigned vec_size, len;
+ unsigned int vec_size, len;
vec_size = b->c->block_size >> PAGE_SHIFT;
if (unlikely(b->c->sectors_per_block_bits < PAGE_SHIFT - SECTOR_SHIFT))
@@ -654,7 +660,8 @@ dmio:
len = n_sectors << SECTOR_SHIFT;
do {
- unsigned this_step = min((unsigned)(PAGE_SIZE - offset_in_page(ptr)), len);
+ unsigned int this_step = min((unsigned int)(PAGE_SIZE - offset_in_page(ptr)), len);
+
if (!bio_add_page(bio, virt_to_page(ptr), this_step,
offset_in_page(ptr))) {
bio_put(bio);
@@ -684,9 +691,9 @@ static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block
static void submit_io(struct dm_buffer *b, enum req_op op,
void (*end_io)(struct dm_buffer *, blk_status_t))
{
- unsigned n_sectors;
+ unsigned int n_sectors;
sector_t sector;
- unsigned offset, end;
+ unsigned int offset, end;
b->end_io = end_io;
@@ -716,9 +723,11 @@ static void submit_io(struct dm_buffer *b, enum req_op op,
use_dmio(b, op, sector, n_sectors, offset);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Writing dirty buffers
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
/*
* The endio routine for write.
@@ -775,6 +784,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
static void __flush_write_list(struct list_head *write_list)
{
struct blk_plug plug;
+
blk_start_plug(&plug);
while (!list_empty(write_list)) {
struct dm_buffer *b =
@@ -998,9 +1008,11 @@ static void __check_watermark(struct dm_bufio_client *c,
__write_dirty_buffers_async(c, 1, write_list);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Getting a buffer
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
enum new_flag nf, int *need_submit,
@@ -1156,7 +1168,7 @@ void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
EXPORT_SYMBOL_GPL(dm_bufio_new);
void dm_bufio_prefetch(struct dm_bufio_client *c,
- sector_t block, unsigned n_blocks)
+ sector_t block, unsigned int n_blocks)
{
struct blk_plug plug;
@@ -1170,6 +1182,7 @@ void dm_bufio_prefetch(struct dm_bufio_client *c,
for (; n_blocks--; block++) {
int need_submit;
struct dm_buffer *b;
+
b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
&write_list);
if (unlikely(!list_empty(&write_list))) {
@@ -1232,7 +1245,7 @@ void dm_bufio_release(struct dm_buffer *b)
EXPORT_SYMBOL_GPL(dm_bufio_release);
void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
- unsigned start, unsigned end)
+ unsigned int start, unsigned int end)
{
struct dm_bufio_client *c = b->c;
@@ -1454,6 +1467,7 @@ retry:
__link_buffer(b, new_block, LIST_DIRTY);
} else {
sector_t old_block;
+
wait_on_bit_lock_io(&b->state, B_WRITING,
TASK_UNINTERRUPTIBLE);
/*
@@ -1529,13 +1543,13 @@ void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t
}
EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
-void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
+void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
{
c->minimum_buffers = n;
}
EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
-unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
+unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
{
return c->block_size;
}
@@ -1544,6 +1558,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
{
sector_t s = bdev_nr_sectors(c->bdev);
+
if (s >= c->start)
s -= c->start;
else
@@ -1659,10 +1674,12 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
+
if (likely(c->sectors_per_block_bits >= 0))
retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
else
retain_bytes /= c->block_size;
+
return retain_bytes;
}
@@ -1734,15 +1751,15 @@ static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrin
/*
* Create the buffering interface
*/
-struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
- unsigned reserved_buffers, unsigned aux_size,
+struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
+ unsigned int reserved_buffers, unsigned int aux_size,
void (*alloc_callback)(struct dm_buffer *),
void (*write_callback)(struct dm_buffer *),
unsigned int flags)
{
int r;
struct dm_bufio_client *c;
- unsigned i;
+ unsigned int i;
char slab_name[27];
if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
@@ -1796,8 +1813,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
if (block_size <= KMALLOC_MAX_SIZE &&
(block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
- unsigned align = min(1U << __ffs(block_size), (unsigned)PAGE_SIZE);
- snprintf(slab_name, sizeof slab_name, "dm_bufio_cache-%u", block_size);
+ unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
+
+ snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u", block_size);
c->slab_cache = kmem_cache_create(slab_name, block_size, align,
SLAB_RECLAIM_ACCOUNT, NULL);
if (!c->slab_cache) {
@@ -1806,9 +1824,9 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
}
}
if (aux_size)
- snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer-%u", aux_size);
+ snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", aux_size);
else
- snprintf(slab_name, sizeof slab_name, "dm_bufio_buffer");
+ snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer");
c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
0, SLAB_RECLAIM_ACCOUNT, NULL);
if (!c->slab_buffer) {
@@ -1833,7 +1851,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
- r = register_shrinker(&c->shrinker, "md-%s:(%u:%u)", slab_name,
+ r = register_shrinker(&c->shrinker, "dm-bufio:(%u:%u)",
MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
if (r)
goto bad;
@@ -1872,7 +1890,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_client_create);
*/
void dm_bufio_client_destroy(struct dm_bufio_client *c)
{
- unsigned i;
+ unsigned int i;
drop_buffers(c);
@@ -1920,9 +1938,9 @@ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
}
EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
-static unsigned get_max_age_hz(void)
+static unsigned int get_max_age_hz(void)
{
- unsigned max_age = READ_ONCE(dm_bufio_max_age);
+ unsigned int max_age = READ_ONCE(dm_bufio_max_age);
if (max_age > UINT_MAX / HZ)
max_age = UINT_MAX / HZ;
@@ -1973,7 +1991,7 @@ static void do_global_cleanup(struct work_struct *w)
struct dm_bufio_client *locked_client = NULL;
struct dm_bufio_client *current_client;
struct dm_buffer *b;
- unsigned spinlock_hold_count;
+ unsigned int spinlock_hold_count;
unsigned long threshold = dm_bufio_cache_size -
dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
unsigned long loops = global_num * 2;
@@ -2059,9 +2077,11 @@ static void work_fn(struct work_struct *w)
DM_BUFIO_WORK_TIMER_SECS * HZ);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Module setup
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
/*
* This is called only once for the whole dm_bufio module.
@@ -2145,28 +2165,28 @@ static void __exit dm_bufio_exit(void)
module_init(dm_bufio_init)
module_exit(dm_bufio_exit)
-module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
+module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
-module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
+module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
-module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
+module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
-module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
+module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
-module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
+module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
-module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
+module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
-module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
+module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
-module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
+module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
index 8eb52e425141..e51076ea629e 100644
--- a/drivers/md/dm-builtin.c
+++ b/drivers/md/dm-builtin.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
#include "dm-core.h"
/*
@@ -45,5 +45,4 @@ void dm_kobject_release(struct kobject *kobj)
{
complete(dm_get_completion_from_kobject(kobj));
}
-
EXPORT_SYMBOL(dm_kobject_release);
diff --git a/drivers/md/dm-cache-background-tracker.c b/drivers/md/dm-cache-background-tracker.c
index 84814e819e4c..9c5308298cf1 100644
--- a/drivers/md/dm-cache-background-tracker.c
+++ b/drivers/md/dm-cache-background-tracker.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Red Hat. All rights reserved.
*
@@ -17,7 +18,7 @@ struct bt_work {
};
struct background_tracker {
- unsigned max_work;
+ unsigned int max_work;
atomic_t pending_promotes;
atomic_t pending_writebacks;
atomic_t pending_demotes;
@@ -29,7 +30,7 @@ struct background_tracker {
struct kmem_cache *work_cache;
};
-struct background_tracker *btracker_create(unsigned max_work)
+struct background_tracker *btracker_create(unsigned int max_work)
{
struct background_tracker *b = kmalloc(sizeof(*b), GFP_KERNEL);
@@ -60,6 +61,14 @@ EXPORT_SYMBOL_GPL(btracker_create);
void btracker_destroy(struct background_tracker *b)
{
+ struct bt_work *w, *tmp;
+
+ BUG_ON(!list_empty(&b->issued));
+ list_for_each_entry_safe (w, tmp, &b->queued, list) {
+ list_del(&w->list);
+ kmem_cache_free(b->work_cache, w);
+ }
+
kmem_cache_destroy(b->work_cache);
kfree(b);
}
@@ -147,13 +156,13 @@ static void update_stats(struct background_tracker *b, struct policy_work *w, in
}
}
-unsigned btracker_nr_writebacks_queued(struct background_tracker *b)
+unsigned int btracker_nr_writebacks_queued(struct background_tracker *b)
{
return atomic_read(&b->pending_writebacks);
}
EXPORT_SYMBOL_GPL(btracker_nr_writebacks_queued);
-unsigned btracker_nr_demotions_queued(struct background_tracker *b)
+unsigned int btracker_nr_demotions_queued(struct background_tracker *b)
{
return atomic_read(&b->pending_demotes);
}
diff --git a/drivers/md/dm-cache-background-tracker.h b/drivers/md/dm-cache-background-tracker.h
index 27ab90dbc275..5b8f5c667b81 100644
--- a/drivers/md/dm-cache-background-tracker.h
+++ b/drivers/md/dm-cache-background-tracker.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2017 Red Hat. All rights reserved.
*
@@ -12,19 +13,44 @@
/*----------------------------------------------------------------*/
+/*
+ * The cache policy decides what background work should be performed,
+ * such as promotions, demotions and writebacks. The core cache target
+ * is in charge of performing the work, and does so when it sees fit.
+ *
+ * The background_tracker acts as a go between. Keeping track of future
+ * work that the policy has decided upon, and handing (issuing) it to
+ * the core target when requested.
+ *
+ * There is no locking in this, so calls will probably need to be
+ * protected with a spinlock.
+ */
+
struct background_work;
struct background_tracker;
/*
- * FIXME: discuss lack of locking in all methods.
+ * Create a new tracker, it will not be able to queue more than
+ * 'max_work' entries.
+ */
+struct background_tracker *btracker_create(unsigned int max_work);
+
+/*
+ * Destroy the tracker. No issued, but not complete, work should
+ * exist when this is called. It is fine to have queued but unissued
+ * work.
*/
-struct background_tracker *btracker_create(unsigned max_work);
void btracker_destroy(struct background_tracker *b);
-unsigned btracker_nr_writebacks_queued(struct background_tracker *b);
-unsigned btracker_nr_demotions_queued(struct background_tracker *b);
+unsigned int btracker_nr_writebacks_queued(struct background_tracker *b);
+unsigned int btracker_nr_demotions_queued(struct background_tracker *b);
/*
+ * Queue some work within the tracker. 'work' should point to the work
+ * to queue, this will be copied (ownership doesn't pass). If pwork
+ * is not NULL then it will be set to point to the tracker's internal
+ * copy of the work.
+ *
* returns -EINVAL iff the work is already queued. -ENOMEM if the work
* couldn't be queued for another reason.
*/
@@ -33,11 +59,20 @@ int btracker_queue(struct background_tracker *b,
struct policy_work **pwork);
/*
+ * Hands out the next piece of work to be performed.
* Returns -ENODATA if there's no work.
*/
int btracker_issue(struct background_tracker *b, struct policy_work **work);
-void btracker_complete(struct background_tracker *b,
- struct policy_work *op);
+
+/*
+ * Informs the tracker that the work has been completed and it may forget
+ * about it.
+ */
+void btracker_complete(struct background_tracker *b, struct policy_work *op);
+
+/*
+ * Predicate to see if an origin block is already scheduled for promotion.
+ */
bool btracker_promotion_already_present(struct background_tracker *b,
dm_oblock_t oblock);
diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h
index 389c9e8ac785..57d2534c59ce 100644
--- a/drivers/md/dm-cache-block-types.h
+++ b/drivers/md/dm-cache-block-types.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc.
*
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 83a5975bcc72..acffed750e3e 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -104,7 +105,7 @@ struct dm_cache_metadata {
refcount_t ref_count;
struct list_head list;
- unsigned version;
+ unsigned int version;
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
@@ -129,7 +130,7 @@ struct dm_cache_metadata {
bool clean_when_opened:1;
char policy_name[CACHE_POLICY_NAME_SIZE];
- unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
+ unsigned int policy_version[CACHE_POLICY_VERSION_SIZE];
size_t policy_hint_size;
struct dm_cache_statistics stats;
@@ -162,10 +163,11 @@ struct dm_cache_metadata {
struct dm_bitset_cursor dirty_cursor;
};
-/*-------------------------------------------------------------------
+/*
+ *-----------------------------------------------------------------
* superblock validator
- *-----------------------------------------------------------------*/
-
+ *-----------------------------------------------------------------
+ */
#define SUPERBLOCK_CSUM_XOR 9031977
static void sb_prepare_for_write(struct dm_block_validator *v,
@@ -201,15 +203,15 @@ static int sb_check(struct dm_block_validator *v,
__le32 csum_le;
if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
- DMERR("sb_check failed: blocknr %llu: wanted %llu",
- le64_to_cpu(disk_super->blocknr),
+ DMERR("%s failed: blocknr %llu: wanted %llu",
+ __func__, le64_to_cpu(disk_super->blocknr),
(unsigned long long)dm_block_location(b));
return -ENOTBLK;
}
if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
- DMERR("sb_check failed: magic %llu: wanted %llu",
- le64_to_cpu(disk_super->magic),
+ DMERR("%s failed: magic %llu: wanted %llu",
+ __func__, le64_to_cpu(disk_super->magic),
(unsigned long long)CACHE_SUPERBLOCK_MAGIC);
return -EILSEQ;
}
@@ -218,8 +220,8 @@ static int sb_check(struct dm_block_validator *v,
sb_block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
if (csum_le != disk_super->csum) {
- DMERR("sb_check failed: csum %u: wanted %u",
- le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
+ DMERR("%s failed: csum %u: wanted %u",
+ __func__, le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
return -EILSEQ;
}
@@ -260,10 +262,10 @@ static int superblock_lock(struct dm_cache_metadata *cmd,
static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
{
int r;
- unsigned i;
+ unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
- unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
@@ -533,6 +535,7 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
bool may_format_device)
{
int r;
+
cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
CACHE_MAX_CONCURRENT_LOCKS);
if (IS_ERR(cmd->bm)) {
@@ -566,6 +569,7 @@ static void update_flags(struct cache_disk_superblock *disk_super,
flags_mutator mutator)
{
uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
+
disk_super->flags = cpu_to_le32(sb_flags);
}
@@ -727,18 +731,20 @@ static int __commit_transaction(struct dm_cache_metadata *cmd,
*/
#define FLAGS_MASK ((1 << 16) - 1)
-static __le64 pack_value(dm_oblock_t block, unsigned flags)
+static __le64 pack_value(dm_oblock_t block, unsigned int flags)
{
uint64_t value = from_oblock(block);
+
value <<= 16;
value = value | (flags & FLAGS_MASK);
return cpu_to_le64(value);
}
-static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
+static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned int *flags)
{
uint64_t value = le64_to_cpu(value_le);
uint64_t b = value >> 16;
+
*block = to_oblock(b);
*flags = value & FLAGS_MASK;
}
@@ -749,7 +755,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
- unsigned metadata_version)
+ unsigned int metadata_version)
{
int r;
struct dm_cache_metadata *cmd;
@@ -810,7 +816,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
- unsigned metadata_version)
+ unsigned int metadata_version)
{
struct dm_cache_metadata *cmd, *cmd2;
@@ -855,7 +861,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
- unsigned metadata_version)
+ unsigned int metadata_version)
{
struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
policy_hint_size, metadata_version);
@@ -890,7 +896,7 @@ static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t
int r;
__le64 value;
dm_oblock_t ob;
- unsigned flags;
+ unsigned int flags;
r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
if (r)
@@ -1009,13 +1015,13 @@ static bool cmd_write_lock(struct dm_cache_metadata *cmd)
do { \
if (!cmd_write_lock((cmd))) \
return -EINVAL; \
- } while(0)
+ } while (0)
#define WRITE_LOCK_VOID(cmd) \
do { \
if (!cmd_write_lock((cmd))) \
return; \
- } while(0)
+ } while (0)
#define WRITE_UNLOCK(cmd) \
up_write(&(cmd)->root_lock)
@@ -1034,13 +1040,13 @@ static bool cmd_read_lock(struct dm_cache_metadata *cmd)
do { \
if (!cmd_read_lock((cmd))) \
return -EINVAL; \
- } while(0)
+ } while (0)
#define READ_LOCK_VOID(cmd) \
do { \
if (!cmd_read_lock((cmd))) \
return; \
- } while(0)
+ } while (0)
#define READ_UNLOCK(cmd) \
up_read(&(cmd)->root_lock)
@@ -1252,6 +1258,7 @@ static int __insert(struct dm_cache_metadata *cmd,
{
int r;
__le64 value = pack_value(oblock, M_VALID);
+
__dm_bless_for_disk(&value);
r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
@@ -1288,7 +1295,7 @@ static bool policy_unchanged(struct dm_cache_metadata *cmd,
struct dm_cache_policy *policy)
{
const char *policy_name = dm_cache_policy_get_name(policy);
- const unsigned *policy_version = dm_cache_policy_get_version(policy);
+ const unsigned int *policy_version = dm_cache_policy_get_version(policy);
size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
/*
@@ -1339,7 +1346,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd,
__le32 *hint_value_le;
dm_oblock_t oblock;
- unsigned flags;
+ unsigned int flags;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
@@ -1381,7 +1388,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd,
__le32 *hint_value_le;
dm_oblock_t oblock;
- unsigned flags;
+ unsigned int flags;
bool dirty = true;
dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
@@ -1513,7 +1520,7 @@ static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
{
__le64 value;
dm_oblock_t oblock;
- unsigned flags;
+ unsigned int flags;
memcpy(&value, leaf, sizeof(value));
unpack_value(value, &oblock, &flags);
@@ -1547,7 +1554,7 @@ int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
{
int r;
- unsigned flags;
+ unsigned int flags;
dm_oblock_t oblock;
__le64 value;
@@ -1574,10 +1581,11 @@ static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty
}
-static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
+static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
{
int r;
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < nr_bits; i++) {
r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
if (r)
@@ -1594,7 +1602,7 @@ static int is_dirty_callback(uint32_t index, bool *value, void *context)
return 0;
}
-static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
+static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned int nr_bits, unsigned long *bits)
{
int r = 0;
@@ -1613,7 +1621,7 @@ static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits,
}
int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
- unsigned nr_bits,
+ unsigned int nr_bits,
unsigned long *bits)
{
int r;
@@ -1712,7 +1720,7 @@ static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *po
int r;
size_t hint_size;
const char *policy_name = dm_cache_policy_get_name(policy);
- const unsigned *policy_version = dm_cache_policy_get_version(policy);
+ const unsigned int *policy_version = dm_cache_policy_get_version(policy);
if (!policy_name[0] ||
(strlen(policy_name) > sizeof(cmd->policy_name) - 1))
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
index 0905f2c1615e..57afc7047947 100644
--- a/drivers/md/dm-cache-metadata.h
+++ b/drivers/md/dm-cache-metadata.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -60,7 +61,7 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool may_format_device,
size_t policy_hint_size,
- unsigned metadata_version);
+ unsigned int metadata_version);
void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
@@ -96,7 +97,7 @@ int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
void *context);
int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
- unsigned nr_bits, unsigned long *bits);
+ unsigned int nr_bits, unsigned long *bits);
struct dm_cache_statistics {
uint32_t read_hits;
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index 56f0a23f698c..476a4f6794fc 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
@@ -85,9 +86,10 @@ static inline void policy_tick(struct dm_cache_policy *p, bool can_block)
}
static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result,
- unsigned maxlen, ssize_t *sz_ptr)
+ unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
+
if (p->emit_config_values)
return p->emit_config_values(p, result, maxlen, sz_ptr);
@@ -112,20 +114,22 @@ static inline void policy_allow_migrations(struct dm_cache_policy *p, bool allow
/*
* Some utility functions commonly used by policies and the core target.
*/
-static inline size_t bitset_size_in_bytes(unsigned nr_entries)
+static inline size_t bitset_size_in_bytes(unsigned int nr_entries)
{
return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
}
-static inline unsigned long *alloc_bitset(unsigned nr_entries)
+static inline unsigned long *alloc_bitset(unsigned int nr_entries)
{
size_t s = bitset_size_in_bytes(nr_entries);
+
return vzalloc(s);
}
-static inline void clear_bitset(void *bitset, unsigned nr_entries)
+static inline void clear_bitset(void *bitset, unsigned int nr_entries)
{
size_t s = bitset_size_in_bytes(nr_entries);
+
memset(bitset, 0, s);
}
@@ -154,7 +158,7 @@ void dm_cache_policy_destroy(struct dm_cache_policy *p);
*/
const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
-const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p);
+const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p);
size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index a3d281fc14c3..493a8715dc8f 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2015 Red Hat. All rights reserved.
*
@@ -23,12 +24,12 @@
/*
* Safe division functions that return zero on divide by zero.
*/
-static unsigned safe_div(unsigned n, unsigned d)
+static unsigned int safe_div(unsigned int n, unsigned int d)
{
return d ? n / d : 0u;
}
-static unsigned safe_mod(unsigned n, unsigned d)
+static unsigned int safe_mod(unsigned int n, unsigned int d)
{
return d ? n % d : 0u;
}
@@ -36,10 +37,10 @@ static unsigned safe_mod(unsigned n, unsigned d)
/*----------------------------------------------------------------*/
struct entry {
- unsigned hash_next:28;
- unsigned prev:28;
- unsigned next:28;
- unsigned level:6;
+ unsigned int hash_next:28;
+ unsigned int prev:28;
+ unsigned int next:28;
+ unsigned int level:6;
bool dirty:1;
bool allocated:1;
bool sentinel:1;
@@ -62,7 +63,7 @@ struct entry_space {
struct entry *end;
};
-static int space_init(struct entry_space *es, unsigned nr_entries)
+static int space_init(struct entry_space *es, unsigned int nr_entries)
{
if (!nr_entries) {
es->begin = es->end = NULL;
@@ -82,7 +83,7 @@ static void space_exit(struct entry_space *es)
vfree(es->begin);
}
-static struct entry *__get_entry(struct entry_space *es, unsigned block)
+static struct entry *__get_entry(struct entry_space *es, unsigned int block)
{
struct entry *e;
@@ -92,13 +93,13 @@ static struct entry *__get_entry(struct entry_space *es, unsigned block)
return e;
}
-static unsigned to_index(struct entry_space *es, struct entry *e)
+static unsigned int to_index(struct entry_space *es, struct entry *e)
{
BUG_ON(e < es->begin || e >= es->end);
return e - es->begin;
}
-static struct entry *to_entry(struct entry_space *es, unsigned block)
+static struct entry *to_entry(struct entry_space *es, unsigned int block)
{
if (block == INDEXER_NULL)
return NULL;
@@ -109,8 +110,8 @@ static struct entry *to_entry(struct entry_space *es, unsigned block)
/*----------------------------------------------------------------*/
struct ilist {
- unsigned nr_elts; /* excluding sentinel entries */
- unsigned head, tail;
+ unsigned int nr_elts; /* excluding sentinel entries */
+ unsigned int head, tail;
};
static void l_init(struct ilist *l)
@@ -252,23 +253,23 @@ static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
struct queue {
struct entry_space *es;
- unsigned nr_elts;
- unsigned nr_levels;
+ unsigned int nr_elts;
+ unsigned int nr_levels;
struct ilist qs[MAX_LEVELS];
/*
* We maintain a count of the number of entries we would like in each
* level.
*/
- unsigned last_target_nr_elts;
- unsigned nr_top_levels;
- unsigned nr_in_top_levels;
- unsigned target_count[MAX_LEVELS];
+ unsigned int last_target_nr_elts;
+ unsigned int nr_top_levels;
+ unsigned int nr_in_top_levels;
+ unsigned int target_count[MAX_LEVELS];
};
-static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
+static void q_init(struct queue *q, struct entry_space *es, unsigned int nr_levels)
{
- unsigned i;
+ unsigned int i;
q->es = es;
q->nr_elts = 0;
@@ -284,7 +285,7 @@ static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
q->nr_in_top_levels = 0u;
}
-static unsigned q_size(struct queue *q)
+static unsigned int q_size(struct queue *q)
{
return q->nr_elts;
}
@@ -332,9 +333,9 @@ static void q_del(struct queue *q, struct entry *e)
/*
* Return the oldest entry of the lowest populated level.
*/
-static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
+static struct entry *q_peek(struct queue *q, unsigned int max_level, bool can_cross_sentinel)
{
- unsigned level;
+ unsigned int level;
struct entry *e;
max_level = min(max_level, q->nr_levels);
@@ -369,7 +370,7 @@ static struct entry *q_pop(struct queue *q)
* used by redistribute, so we know this is true. It also doesn't adjust
* the q->nr_elts count.
*/
-static struct entry *__redist_pop_from(struct queue *q, unsigned level)
+static struct entry *__redist_pop_from(struct queue *q, unsigned int level)
{
struct entry *e;
@@ -383,9 +384,10 @@ static struct entry *__redist_pop_from(struct queue *q, unsigned level)
return NULL;
}
-static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
+static void q_set_targets_subrange_(struct queue *q, unsigned int nr_elts,
+ unsigned int lbegin, unsigned int lend)
{
- unsigned level, nr_levels, entries_per_level, remainder;
+ unsigned int level, nr_levels, entries_per_level, remainder;
BUG_ON(lbegin > lend);
BUG_ON(lend > q->nr_levels);
@@ -426,7 +428,7 @@ static void q_set_targets(struct queue *q)
static void q_redistribute(struct queue *q)
{
- unsigned target, level;
+ unsigned int target, level;
struct ilist *l, *l_above;
struct entry *e;
@@ -467,12 +469,12 @@ static void q_redistribute(struct queue *q)
}
}
-static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
+static void q_requeue(struct queue *q, struct entry *e, unsigned int extra_levels,
struct entry *s1, struct entry *s2)
{
struct entry *de;
- unsigned sentinels_passed = 0;
- unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels);
+ unsigned int sentinels_passed = 0;
+ unsigned int new_level = min(q->nr_levels - 1u, e->level + extra_levels);
/* try and find an entry to swap with */
if (extra_levels && (e->level < q->nr_levels - 1u)) {
@@ -512,9 +514,9 @@ static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
#define EIGHTH (1u << (FP_SHIFT - 3u))
struct stats {
- unsigned hit_threshold;
- unsigned hits;
- unsigned misses;
+ unsigned int hit_threshold;
+ unsigned int hits;
+ unsigned int misses;
};
enum performance {
@@ -523,7 +525,7 @@ enum performance {
Q_WELL
};
-static void stats_init(struct stats *s, unsigned nr_levels)
+static void stats_init(struct stats *s, unsigned int nr_levels)
{
s->hit_threshold = (nr_levels * 3u) / 4u;
s->hits = 0u;
@@ -535,7 +537,7 @@ static void stats_reset(struct stats *s)
s->hits = s->misses = 0u;
}
-static void stats_level_accessed(struct stats *s, unsigned level)
+static void stats_level_accessed(struct stats *s, unsigned int level)
{
if (level >= s->hit_threshold)
s->hits++;
@@ -556,7 +558,7 @@ static void stats_miss(struct stats *s)
*/
static enum performance stats_assess(struct stats *s)
{
- unsigned confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
+ unsigned int confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
if (confidence < SIXTEENTH)
return Q_POOR;
@@ -573,16 +575,16 @@ static enum performance stats_assess(struct stats *s)
struct smq_hash_table {
struct entry_space *es;
unsigned long long hash_bits;
- unsigned *buckets;
+ unsigned int *buckets;
};
/*
* All cache entries are stored in a chained hash table. To save space we
* use indexing again, and only store indexes to the next entry.
*/
-static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr_entries)
+static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned int nr_entries)
{
- unsigned i, nr_buckets;
+ unsigned int i, nr_buckets;
ht->es = es;
nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
@@ -603,7 +605,7 @@ static void h_exit(struct smq_hash_table *ht)
vfree(ht->buckets);
}
-static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket)
+static struct entry *h_head(struct smq_hash_table *ht, unsigned int bucket)
{
return to_entry(ht->es, ht->buckets[bucket]);
}
@@ -613,7 +615,7 @@ static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
return to_entry(ht->es, e->hash_next);
}
-static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e)
+static void __h_insert(struct smq_hash_table *ht, unsigned int bucket, struct entry *e)
{
e->hash_next = ht->buckets[bucket];
ht->buckets[bucket] = to_index(ht->es, e);
@@ -621,11 +623,12 @@ static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry
static void h_insert(struct smq_hash_table *ht, struct entry *e)
{
- unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+ unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+
__h_insert(ht, h, e);
}
-static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock_t oblock,
+static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned int h, dm_oblock_t oblock,
struct entry **prev)
{
struct entry *e;
@@ -641,7 +644,7 @@ static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock
return NULL;
}
-static void __h_unlink(struct smq_hash_table *ht, unsigned h,
+static void __h_unlink(struct smq_hash_table *ht, unsigned int h,
struct entry *e, struct entry *prev)
{
if (prev)
@@ -656,7 +659,7 @@ static void __h_unlink(struct smq_hash_table *ht, unsigned h,
static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
{
struct entry *e, *prev;
- unsigned h = hash_64(from_oblock(oblock), ht->hash_bits);
+ unsigned int h = hash_64(from_oblock(oblock), ht->hash_bits);
e = __h_lookup(ht, h, oblock, &prev);
if (e && prev) {
@@ -673,7 +676,7 @@ static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
static void h_remove(struct smq_hash_table *ht, struct entry *e)
{
- unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
+ unsigned int h = hash_64(from_oblock(e->oblock), ht->hash_bits);
struct entry *prev;
/*
@@ -689,16 +692,16 @@ static void h_remove(struct smq_hash_table *ht, struct entry *e)
struct entry_alloc {
struct entry_space *es;
- unsigned begin;
+ unsigned int begin;
- unsigned nr_allocated;
+ unsigned int nr_allocated;
struct ilist free;
};
static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
- unsigned begin, unsigned end)
+ unsigned int begin, unsigned int end)
{
- unsigned i;
+ unsigned int i;
ea->es = es;
ea->nr_allocated = 0u;
@@ -742,7 +745,7 @@ static struct entry *alloc_entry(struct entry_alloc *ea)
/*
* This assumes the cblock hasn't already been allocated.
*/
-static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i)
+static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned int i)
{
struct entry *e = __get_entry(ea->es, ea->begin + i);
@@ -770,12 +773,12 @@ static bool allocator_empty(struct entry_alloc *ea)
return l_empty(&ea->free);
}
-static unsigned get_index(struct entry_alloc *ea, struct entry *e)
+static unsigned int get_index(struct entry_alloc *ea, struct entry *e)
{
return to_index(ea->es, e) - ea->begin;
}
-static struct entry *get_entry(struct entry_alloc *ea, unsigned index)
+static struct entry *get_entry(struct entry_alloc *ea, unsigned int index)
{
return __get_entry(ea->es, ea->begin + index);
}
@@ -800,9 +803,9 @@ struct smq_policy {
sector_t cache_block_size;
sector_t hotspot_block_size;
- unsigned nr_hotspot_blocks;
- unsigned cache_blocks_per_hotspot_block;
- unsigned hotspot_level_jump;
+ unsigned int nr_hotspot_blocks;
+ unsigned int cache_blocks_per_hotspot_block;
+ unsigned int hotspot_level_jump;
struct entry_space es;
struct entry_alloc writeback_sentinel_alloc;
@@ -831,7 +834,7 @@ struct smq_policy {
* Keeps track of time, incremented by the core. We use this to
* avoid attributing multiple hits within the same tick.
*/
- unsigned tick;
+ unsigned int tick;
/*
* The hash tables allows us to quickly find an entry by origin
@@ -846,8 +849,8 @@ struct smq_policy {
bool current_demote_sentinels;
unsigned long next_demote_period;
- unsigned write_promote_level;
- unsigned read_promote_level;
+ unsigned int write_promote_level;
+ unsigned int read_promote_level;
unsigned long next_hotspot_period;
unsigned long next_cache_period;
@@ -859,24 +862,24 @@ struct smq_policy {
/*----------------------------------------------------------------*/
-static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which)
+static struct entry *get_sentinel(struct entry_alloc *ea, unsigned int level, bool which)
{
return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
}
-static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
+static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned int level)
{
return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
}
-static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
+static struct entry *demote_sentinel(struct smq_policy *mq, unsigned int level)
{
return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
}
static void __update_writeback_sentinels(struct smq_policy *mq)
{
- unsigned level;
+ unsigned int level;
struct queue *q = &mq->dirty;
struct entry *sentinel;
@@ -889,7 +892,7 @@ static void __update_writeback_sentinels(struct smq_policy *mq)
static void __update_demote_sentinels(struct smq_policy *mq)
{
- unsigned level;
+ unsigned int level;
struct queue *q = &mq->clean;
struct entry *sentinel;
@@ -917,7 +920,7 @@ static void update_sentinels(struct smq_policy *mq)
static void __sentinels_init(struct smq_policy *mq)
{
- unsigned level;
+ unsigned int level;
struct entry *sentinel;
for (level = 0; level < NR_CACHE_LEVELS; level++) {
@@ -1008,7 +1011,7 @@ static void requeue(struct smq_policy *mq, struct entry *e)
}
}
-static unsigned default_promote_level(struct smq_policy *mq)
+static unsigned int default_promote_level(struct smq_policy *mq)
{
/*
* The promote level depends on the current performance of the
@@ -1030,9 +1033,9 @@ static unsigned default_promote_level(struct smq_policy *mq)
1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1
};
- unsigned hits = mq->cache_stats.hits;
- unsigned misses = mq->cache_stats.misses;
- unsigned index = safe_div(hits << 4u, hits + misses);
+ unsigned int hits = mq->cache_stats.hits;
+ unsigned int misses = mq->cache_stats.misses;
+ unsigned int index = safe_div(hits << 4u, hits + misses);
return table[index];
}
@@ -1042,7 +1045,7 @@ static void update_promote_levels(struct smq_policy *mq)
* If there are unused cache entries then we want to be really
* eager to promote.
*/
- unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
+ unsigned int threshold_level = allocator_empty(&mq->cache_alloc) ?
default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
@@ -1124,7 +1127,7 @@ static void end_cache_period(struct smq_policy *mq)
#define CLEAN_TARGET 25u
#define FREE_TARGET 25u
-static unsigned percent_to_target(struct smq_policy *mq, unsigned p)
+static unsigned int percent_to_target(struct smq_policy *mq, unsigned int p)
{
return from_cblock(mq->cache_size) * p / 100u;
}
@@ -1150,7 +1153,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
static bool free_target_met(struct smq_policy *mq)
{
- unsigned nr_free;
+ unsigned int nr_free;
nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
@@ -1300,7 +1303,7 @@ static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
{
- unsigned hi;
+ unsigned int hi;
dm_oblock_t hb = to_hblock(mq, b);
struct entry *e = h_lookup(&mq->hotspot_table, hb);
@@ -1549,7 +1552,7 @@ static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
spin_unlock_irqrestore(&mq->lock, flags);
}
-static unsigned random_level(dm_cblock_t cblock)
+static unsigned int random_level(dm_cblock_t cblock)
{
return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
}
@@ -1631,6 +1634,7 @@ static void smq_tick(struct dm_cache_policy *p, bool can_block)
static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
{
struct smq_policy *mq = to_smq_policy(p);
+
mq->migrations_allowed = allow;
}
@@ -1660,7 +1664,7 @@ static int mq_set_config_value(struct dm_cache_policy *p,
}
static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
- unsigned maxlen, ssize_t *sz_ptr)
+ unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
@@ -1699,16 +1703,16 @@ static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
static bool too_many_hotspot_blocks(sector_t origin_size,
sector_t hotspot_block_size,
- unsigned nr_hotspot_blocks)
+ unsigned int nr_hotspot_blocks)
{
return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
}
static void calc_hotspot_params(sector_t origin_size,
sector_t cache_block_size,
- unsigned nr_cache_blocks,
+ unsigned int nr_cache_blocks,
sector_t *hotspot_block_size,
- unsigned *nr_hotspot_blocks)
+ unsigned int *nr_hotspot_blocks)
{
*hotspot_block_size = cache_block_size * 16u;
*nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
@@ -1724,9 +1728,9 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
bool mimic_mq,
bool migrations_allowed)
{
- unsigned i;
- unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
- unsigned total_sentinels = 2u * nr_sentinels_per_queue;
+ unsigned int i;
+ unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
+ unsigned int total_sentinels = 2u * nr_sentinels_per_queue;
struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
if (!mq)
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
index c1a3cee99b44..9330d5748895 100644
--- a/drivers/md/dm-cache-policy.c
+++ b/drivers/md/dm-cache-policy.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
@@ -154,7 +155,7 @@ const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
}
EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
-const unsigned *dm_cache_policy_get_version(struct dm_cache_policy *p)
+const unsigned int *dm_cache_policy_get_version(struct dm_cache_policy *p)
{
struct dm_cache_policy_type *t = p->private;
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 06eb31af626f..a1eedcc42677 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
@@ -75,7 +76,7 @@ struct dm_cache_policy {
* background work.
*/
int (*get_background_work)(struct dm_cache_policy *p, bool idle,
- struct policy_work **result);
+ struct policy_work **result);
/*
* You must pass in the same work pointer that you were given, not
@@ -128,7 +129,7 @@ struct dm_cache_policy {
* Configuration.
*/
int (*emit_config_values)(struct dm_cache_policy *p, char *result,
- unsigned maxlen, ssize_t *sz_ptr);
+ unsigned int maxlen, ssize_t *sz_ptr);
int (*set_config_value)(struct dm_cache_policy *p,
const char *key, const char *value);
@@ -157,7 +158,7 @@ struct dm_cache_policy_type {
* what gets passed on the target line to select your policy.
*/
char name[CACHE_POLICY_NAME_SIZE];
- unsigned version[CACHE_POLICY_VERSION_SIZE];
+ unsigned int version[CACHE_POLICY_VERSION_SIZE];
/*
* For use by an alias dm_cache_policy_type to point to the
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5e92fac90b67..dbbcfa580078 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat. All rights reserved.
*
@@ -180,15 +181,15 @@ static void continue_after_commit(struct batcher *b, struct continuation *k)
*/
static void issue_after_commit(struct batcher *b, struct bio *bio)
{
- bool commit_scheduled;
+ bool commit_scheduled;
- spin_lock_irq(&b->lock);
- commit_scheduled = b->commit_scheduled;
- bio_list_add(&b->bios, bio);
- spin_unlock_irq(&b->lock);
+ spin_lock_irq(&b->lock);
+ commit_scheduled = b->commit_scheduled;
+ bio_list_add(&b->bios, bio);
+ spin_unlock_irq(&b->lock);
- if (commit_scheduled)
- async_commit(b);
+ if (commit_scheduled)
+ async_commit(b);
}
/*
@@ -275,7 +276,7 @@ enum cache_io_mode {
struct cache_features {
enum cache_metadata_mode mode;
enum cache_io_mode io_mode;
- unsigned metadata_version;
+ unsigned int metadata_version;
bool discard_passdown:1;
};
@@ -362,7 +363,7 @@ struct cache {
* Rather than reconstructing the table line for the status we just
* save it and regurgitate.
*/
- unsigned nr_ctr_args;
+ unsigned int nr_ctr_args;
const char **ctr_args;
struct dm_kcopyd_client *copier;
@@ -378,7 +379,7 @@ struct cache {
unsigned long *dirty_bitset;
atomic_t nr_dirty;
- unsigned policy_nr_args;
+ unsigned int policy_nr_args;
struct dm_cache_policy *policy;
/*
@@ -409,7 +410,7 @@ struct cache {
struct per_bio_data {
bool tick:1;
- unsigned req_nr:2;
+ unsigned int req_nr:2;
struct dm_bio_prison_cell_v2 *cell;
struct dm_hook_info hook_info;
sector_t len;
@@ -517,20 +518,23 @@ static void build_key(dm_oblock_t begin, dm_oblock_t end, struct dm_cell_key_v2
#define WRITE_LOCK_LEVEL 0
#define READ_WRITE_LOCK_LEVEL 1
-static unsigned lock_level(struct bio *bio)
+static unsigned int lock_level(struct bio *bio)
{
return bio_data_dir(bio) == WRITE ?
WRITE_LOCK_LEVEL :
READ_WRITE_LOCK_LEVEL;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Per bio data
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static struct per_bio_data *get_per_bio_data(struct bio *bio)
{
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+
BUG_ON(!pb);
return pb;
}
@@ -687,6 +691,7 @@ static void clear_discard(struct cache *cache, dm_dblock_t b)
static bool is_discarded(struct cache *cache, dm_dblock_t b)
{
int r;
+
spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(b), cache->discard_bitset);
spin_unlock_irq(&cache->lock);
@@ -697,6 +702,7 @@ static bool is_discarded(struct cache *cache, dm_dblock_t b)
static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
{
int r;
+
spin_lock_irq(&cache->lock);
r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
cache->discard_bitset);
@@ -705,9 +711,11 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
return r;
}
-/*----------------------------------------------------------------
+/*
+ * -------------------------------------------------------------
* Remapping
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void remap_to_origin(struct cache *cache, struct bio *bio)
{
bio_set_dev(bio, cache->origin_dev->bdev);
@@ -809,6 +817,7 @@ static void accounted_request(struct cache *cache, struct bio *bio)
static void issue_op(struct bio *bio, void *context)
{
struct cache *cache = context;
+
accounted_request(cache, bio);
}
@@ -833,9 +842,11 @@ static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio,
remap_to_cache(cache, bio, cblock);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Failure modes
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static enum cache_metadata_mode get_cache_mode(struct cache *cache)
{
return cache->features.mode;
@@ -848,7 +859,7 @@ static const char *cache_device_name(struct cache *cache)
static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode)
{
- const char *descs[] = {
+ static const char *descs[] = {
"write",
"read-only",
"fail"
@@ -972,13 +983,14 @@ static void update_stats(struct cache_stats *stats, enum policy_operation op)
}
}
-/*----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------------
* Migration processing
*
* Migration covers moving data from the origin device to the cache, or
* vice versa.
- *--------------------------------------------------------------*/
-
+ *---------------------------------------------------------------------
+ */
static void inc_io_migrations(struct cache *cache)
{
atomic_inc(&cache->nr_io_migrations);
@@ -1066,6 +1078,7 @@ static void quiesce(struct dm_cache_migration *mg,
static struct dm_cache_migration *ws_to_mg(struct work_struct *ws)
{
struct continuation *k = container_of(ws, struct continuation, ws);
+
return container_of(k, struct dm_cache_migration, k);
}
@@ -1217,6 +1230,7 @@ static void mg_complete(struct dm_cache_migration *mg, bool success)
static void mg_success(struct work_struct *ws)
{
struct dm_cache_migration *mg = ws_to_mg(ws);
+
mg_complete(mg, mg->k.input == 0);
}
@@ -1355,6 +1369,7 @@ static void mg_copy(struct work_struct *ws)
* Fallback to a real full copy after doing some tidying up.
*/
bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
+
BUG_ON(rb); /* An exclussive lock must _not_ be held for this block */
mg->overwrite_bio = NULL;
inc_io_migrations(mg->cache);
@@ -1430,9 +1445,11 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
return mg_lock_writes(mg);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* invalidation processing
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void invalidate_complete(struct dm_cache_migration *mg, bool success)
{
@@ -1455,12 +1472,15 @@ static void invalidate_complete(struct dm_cache_migration *mg, bool success)
static void invalidate_completed(struct work_struct *ws)
{
struct dm_cache_migration *mg = ws_to_mg(ws);
+
invalidate_complete(mg, !mg->k.input);
}
static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock)
{
- int r = policy_invalidate_mapping(cache->policy, cblock);
+ int r;
+
+ r = policy_invalidate_mapping(cache->policy, cblock);
if (!r) {
r = dm_cache_remove_mapping(cache->cmd, cblock);
if (r) {
@@ -1553,9 +1573,11 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
return invalidate_lock(mg);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* bio processing
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
enum busy {
IDLE,
@@ -1763,9 +1785,11 @@ static bool process_discard_bio(struct cache *cache, struct bio *bio)
{
dm_dblock_t b, e;
- // FIXME: do we need to lock the region? Or can we just assume the
- // user wont be so foolish as to issue discard concurrently with
- // other IO?
+ /*
+ * FIXME: do we need to lock the region? Or can we just assume the
+ * user wont be so foolish as to issue discard concurrently with
+ * other IO?
+ */
calc_discard_block_range(cache, bio, &b, &e);
while (b != e) {
set_discard(cache, b);
@@ -1805,16 +1829,18 @@ static void process_deferred_bios(struct work_struct *ws)
else
commit_needed = process_bio(cache, bio) || commit_needed;
+ cond_resched();
}
if (commit_needed)
schedule_commit(&cache->committer);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Main worker loop
- *--------------------------------------------------------------*/
-
+ *--------------------------------------------------------------
+ */
static void requeue_deferred_bios(struct cache *cache)
{
struct bio *bio;
@@ -1827,6 +1853,7 @@ static void requeue_deferred_bios(struct cache *cache)
while ((bio = bio_list_pop(&bios))) {
bio->bi_status = BLK_STS_DM_REQUEUE;
bio_endio(bio);
+ cond_resched();
}
}
@@ -1867,12 +1894,16 @@ static void check_migrations(struct work_struct *ws)
r = mg_start(cache, op, NULL);
if (r)
break;
+
+ cond_resched();
}
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Target methods
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
/*
* This function gets called on the error paths of the constructor, so we
@@ -1880,7 +1911,7 @@ static void check_migrations(struct work_struct *ws)
*/
static void destroy(struct cache *cache)
{
- unsigned i;
+ unsigned int i;
mempool_exit(&cache->migration_pool);
@@ -2120,7 +2151,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
};
int r, mode_ctr = 0;
- unsigned argc;
+ unsigned int argc;
const char *arg;
struct cache_features *cf = &ca->features;
@@ -2540,7 +2571,7 @@ bad:
static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
{
- unsigned i;
+ unsigned int i;
const char **copy;
copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
@@ -2562,7 +2593,7 @@ static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
return 0;
}
-static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int cache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r = -EINVAL;
struct cache_args *ca;
@@ -2665,7 +2696,7 @@ static int write_dirty_bitset(struct cache *cache)
static int write_discard_bitset(struct cache *cache)
{
- unsigned i, r;
+ unsigned int i, r;
if (get_cache_mode(cache) >= CM_READ_ONLY)
return -EINVAL;
@@ -2979,11 +3010,11 @@ static void cache_resume(struct dm_target *ti)
}
static void emit_flags(struct cache *cache, char *result,
- unsigned maxlen, ssize_t *sz_ptr)
+ unsigned int maxlen, ssize_t *sz_ptr)
{
ssize_t sz = *sz_ptr;
struct cache_features *cf = &cache->features;
- unsigned count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
+ unsigned int count = (cf->metadata_version == 2) + !cf->discard_passdown + 1;
DMEMIT("%u ", count);
@@ -3023,10 +3054,10 @@ static void emit_flags(struct cache *cache, char *result,
* <policy name> <#policy args> <policy args>* <cache metadata mode> <needs_check>
*/
static void cache_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
int r = 0;
- unsigned i;
+ unsigned int i;
ssize_t sz = 0;
dm_block_t nr_free_blocks_metadata = 0;
dm_block_t nr_blocks_metadata = 0;
@@ -3063,18 +3094,18 @@ static void cache_status(struct dm_target *ti, status_type_t type,
residency = policy_residency(cache->policy);
DMEMIT("%u %llu/%llu %llu %llu/%llu %u %u %u %u %u %u %lu ",
- (unsigned)DM_CACHE_METADATA_BLOCK_SIZE,
+ (unsigned int)DM_CACHE_METADATA_BLOCK_SIZE,
(unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
(unsigned long long)nr_blocks_metadata,
(unsigned long long)cache->sectors_per_block,
(unsigned long long) from_cblock(residency),
(unsigned long long) from_cblock(cache->cache_size),
- (unsigned) atomic_read(&cache->stats.read_hit),
- (unsigned) atomic_read(&cache->stats.read_miss),
- (unsigned) atomic_read(&cache->stats.write_hit),
- (unsigned) atomic_read(&cache->stats.write_miss),
- (unsigned) atomic_read(&cache->stats.demotion),
- (unsigned) atomic_read(&cache->stats.promotion),
+ (unsigned int) atomic_read(&cache->stats.read_hit),
+ (unsigned int) atomic_read(&cache->stats.read_miss),
+ (unsigned int) atomic_read(&cache->stats.write_hit),
+ (unsigned int) atomic_read(&cache->stats.write_miss),
+ (unsigned int) atomic_read(&cache->stats.demotion),
+ (unsigned int) atomic_read(&cache->stats.promotion),
(unsigned long) atomic_read(&cache->nr_dirty));
emit_flags(cache, result, maxlen, &sz);
@@ -3253,11 +3284,11 @@ static int request_invalidation(struct cache *cache, struct cblock_range *range)
return r;
}
-static int process_invalidate_cblocks_message(struct cache *cache, unsigned count,
+static int process_invalidate_cblocks_message(struct cache *cache, unsigned int count,
const char **cblock_ranges)
{
int r = 0;
- unsigned i;
+ unsigned int i;
struct cblock_range range;
if (!passthrough_mode(cache)) {
@@ -3294,8 +3325,8 @@ static int process_invalidate_cblocks_message(struct cache *cache, unsigned coun
*
* The key migration_threshold is supported by the cache target core.
*/
-static int cache_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int cache_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
struct cache *cache = ti->private;
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index 29e0b85eeaf0..f38a27604c7a 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -580,7 +580,7 @@ static int hash_table_init(struct clone *clone)
sz = 1 << HASH_TABLE_BITS;
- clone->ht = kvmalloc(sz * sizeof(struct hash_table_bucket), GFP_KERNEL);
+ clone->ht = kvmalloc_array(sz, sizeof(struct hash_table_bucket), GFP_KERNEL);
if (!clone->ht)
return -ENOMEM;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 6c6bd24774f2..aecab0c0720f 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Internal header file _only_ for device mapper core
*
@@ -119,7 +120,7 @@ struct mapped_device {
struct dm_stats stats;
/* the number of internal suspends */
- unsigned internal_suspend_count;
+ unsigned int internal_suspend_count;
int swap_bios;
struct semaphore swap_bios_semaphore;
@@ -216,7 +217,7 @@ struct dm_table {
struct list_head devices;
/* events get handed up using this callback */
- void (*event_fn)(void *);
+ void (*event_fn)(void *data);
void *event_context;
struct dm_md_mempools *mempools;
@@ -326,9 +327,9 @@ static inline struct completion *dm_get_completion_from_kobject(struct kobject *
return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
}
-unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
+unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max);
-static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
+static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen)
{
return !maxlen || strlen(result) + 1 >= maxlen;
}
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 2653516bcdef..40cb1719ae4d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Jana Saout <jana@saout.de>
* Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
@@ -171,14 +172,14 @@ struct crypt_config {
} iv_gen_private;
u64 iv_offset;
unsigned int iv_size;
- unsigned short int sector_size;
+ unsigned short sector_size;
unsigned char sector_shift;
union {
struct crypto_skcipher **tfms;
struct crypto_aead **tfms_aead;
} cipher_tfm;
- unsigned tfms_count;
+ unsigned int tfms_count;
unsigned long cipher_flags;
/*
@@ -212,7 +213,7 @@ struct crypt_config {
* pool for per bio private data, crypto requests,
* encryption requeusts/buffer pages and integrity tags
*/
- unsigned tag_pool_max_sectors;
+ unsigned int tag_pool_max_sectors;
mempool_t tag_pool;
mempool_t req_pool;
mempool_t page_pool;
@@ -229,7 +230,7 @@ struct crypt_config {
#define POOL_ENTRY_SIZE 512
static DEFINE_SPINLOCK(dm_crypt_clients_lock);
-static unsigned dm_crypt_clients_n = 0;
+static unsigned int dm_crypt_clients_n;
static volatile unsigned long dm_crypt_pages_per_client;
#define DM_CRYPT_MEMORY_PERCENT 2
#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_VECS * 16)
@@ -354,7 +355,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
const char *opts)
{
- unsigned bs;
+ unsigned int bs;
int log;
if (crypt_integrity_aead(cc))
@@ -363,9 +364,10 @@ static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
bs = crypto_skcipher_blocksize(any_tfm(cc));
log = ilog2(bs);
- /* we need to calculate how far we must shift the sector count
- * to get the cipher block count, we use this shift in _gen */
-
+ /*
+ * We need to calculate how far we must shift the sector count
+ * to get the cipher block count, we use this shift in _gen.
+ */
if (1 << log != bs) {
ti->error = "cypher blocksize is not a power of 2";
return -EINVAL;
@@ -531,9 +533,9 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
- src = kmap_atomic(sg_page(sg));
+ src = kmap_local_page(sg_page(sg));
r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
- kunmap_atomic(src);
+ kunmap_local(src);
} else
memset(iv, 0, cc->iv_size);
@@ -551,14 +553,14 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
return 0;
sg = crypt_get_sg_data(cc, dmreq->sg_out);
- dst = kmap_atomic(sg_page(sg));
+ dst = kmap_local_page(sg_page(sg));
r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
/* Tweak the first block of plaintext sector */
if (!r)
crypto_xor(dst + sg->offset, iv, cc->iv_size);
- kunmap_atomic(dst);
+ kunmap_local(dst);
return r;
}
@@ -681,9 +683,9 @@ static int crypt_iv_tcw_gen(struct crypt_config *cc, u8 *iv,
/* Remove whitening from ciphertext */
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
- src = kmap_atomic(sg_page(sg));
+ src = kmap_local_page(sg_page(sg));
r = crypt_iv_tcw_whitening(cc, dmreq, src + sg->offset);
- kunmap_atomic(src);
+ kunmap_local(src);
}
/* Calculate IV */
@@ -707,9 +709,9 @@ static int crypt_iv_tcw_post(struct crypt_config *cc, u8 *iv,
/* Apply whitening on ciphertext */
sg = crypt_get_sg_data(cc, dmreq->sg_out);
- dst = kmap_atomic(sg_page(sg));
+ dst = kmap_local_page(sg_page(sg));
r = crypt_iv_tcw_whitening(cc, dmreq, dst + sg->offset);
- kunmap_atomic(dst);
+ kunmap_local(dst);
return r;
}
@@ -731,8 +733,7 @@ static int crypt_iv_eboiv_ctr(struct crypt_config *cc, struct dm_target *ti,
}
if (crypto_skcipher_blocksize(any_tfm(cc)) != cc->iv_size) {
- ti->error = "Block size of EBOIV cipher does "
- "not match IV size of block cipher";
+ ti->error = "Block size of EBOIV cipher does not match IV size of block cipher";
return -EINVAL;
}
@@ -974,35 +975,35 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d
goto out;
sg = crypt_get_sg_data(cc, dmreq->sg_out);
- data = kmap_atomic(sg_page(sg));
+ data = kmap_local_page(sg_page(sg));
data_offset = data + sg->offset;
/* Cannot modify original bio, copy to sg_out and apply Elephant to it */
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
sg2 = crypt_get_sg_data(cc, dmreq->sg_in);
- data2 = kmap_atomic(sg_page(sg2));
+ data2 = kmap_local_page(sg_page(sg2));
memcpy(data_offset, data2 + sg2->offset, cc->sector_size);
- kunmap_atomic(data2);
+ kunmap_local(data2);
}
if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) {
- diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_b_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_a_decrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_b_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_a_decrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
}
for (i = 0; i < (cc->sector_size / 32); i++)
crypto_xor(data_offset + i * 32, ks, 32);
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
- diffuser_disk_to_cpu((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_a_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_b_encrypt((u32*)data_offset, cc->sector_size / sizeof(u32));
- diffuser_cpu_to_disk((__le32*)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_disk_to_cpu((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_a_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_b_encrypt((u32 *)data_offset, cc->sector_size / sizeof(u32));
+ diffuser_cpu_to_disk((__le32 *)data_offset, cc->sector_size / sizeof(u32));
}
- kunmap_atomic(data);
+ kunmap_local(data);
out:
kfree_sensitive(ks);
kfree_sensitive(es);
@@ -1255,6 +1256,7 @@ static __le64 *org_sector_of_dmreq(struct crypt_config *cc,
struct dm_crypt_request *dmreq)
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size + cc->iv_size;
+
return (__le64 *) ptr;
}
@@ -1263,7 +1265,8 @@ static unsigned int *org_tag_of_dmreq(struct crypt_config *cc,
{
u8 *ptr = iv_of_dmreq(cc, dmreq) + cc->iv_size +
cc->iv_size + sizeof(uint64_t);
- return (unsigned int*)ptr;
+
+ return (unsigned int *)ptr;
}
static void *tag_from_dmreq(struct crypt_config *cc,
@@ -1458,13 +1461,12 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc,
return r;
}
-static void kcryptd_async_done(struct crypto_async_request *async_req,
- int error);
+static void kcryptd_async_done(void *async_req, int error);
static int crypt_alloc_req_skcipher(struct crypt_config *cc,
struct convert_context *ctx)
{
- unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1);
+ unsigned int key_index = ctx->cc_sector & (cc->tfms_count - 1);
if (!ctx->r.req) {
ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO);
@@ -1658,13 +1660,13 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
* non-blocking allocations without a mutex first but on failure we fallback
* to blocking allocations with a mutex.
*/
-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
{
struct crypt_config *cc = io->cc;
struct bio *clone;
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
- unsigned i, len, remaining_size;
+ unsigned int i, len, remaining_size;
struct page *page;
retry:
@@ -1739,6 +1741,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
static void kcryptd_io_bio_endio(struct work_struct *work)
{
struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+
bio_endio(io->base_bio);
}
@@ -1804,7 +1807,7 @@ static void crypt_endio(struct bio *clone)
{
struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->cc;
- unsigned rw = bio_data_dir(clone);
+ unsigned int rw = bio_data_dir(clone);
blk_status_t error;
/*
@@ -2147,10 +2150,9 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io)
crypt_dec_pending(io);
}
-static void kcryptd_async_done(struct crypto_async_request *async_req,
- int error)
+static void kcryptd_async_done(void *data, int error)
{
- struct dm_crypt_request *dmreq = async_req->data;
+ struct dm_crypt_request *dmreq = data;
struct convert_context *ctx = dmreq->ctx;
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
struct crypt_config *cc = io->cc;
@@ -2257,7 +2259,7 @@ static void crypt_free_tfms_aead(struct crypt_config *cc)
static void crypt_free_tfms_skcipher(struct crypt_config *cc)
{
- unsigned i;
+ unsigned int i;
if (!cc->cipher_tfm.tfms)
return;
@@ -2282,7 +2284,7 @@ static void crypt_free_tfms(struct crypt_config *cc)
static int crypt_alloc_tfms_skcipher(struct crypt_config *cc, char *ciphermode)
{
- unsigned i;
+ unsigned int i;
int err;
cc->cipher_tfm.tfms = kcalloc(cc->tfms_count,
@@ -2340,12 +2342,12 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
return crypt_alloc_tfms_skcipher(cc, ciphermode);
}
-static unsigned crypt_subkey_size(struct crypt_config *cc)
+static unsigned int crypt_subkey_size(struct crypt_config *cc)
{
return (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
}
-static unsigned crypt_authenckey_size(struct crypt_config *cc)
+static unsigned int crypt_authenckey_size(struct crypt_config *cc)
{
return crypt_subkey_size(cc) + RTA_SPACE(sizeof(struct crypto_authenc_key_param));
}
@@ -2356,7 +2358,7 @@ static unsigned crypt_authenckey_size(struct crypt_config *cc)
* This funcion converts cc->key to this special format.
*/
static void crypt_copy_authenckey(char *p, const void *key,
- unsigned enckeylen, unsigned authkeylen)
+ unsigned int enckeylen, unsigned int authkeylen)
{
struct crypto_authenc_key_param *param;
struct rtattr *rta;
@@ -2374,7 +2376,7 @@ static void crypt_copy_authenckey(char *p, const void *key,
static int crypt_setkey(struct crypt_config *cc)
{
- unsigned subkey_size;
+ unsigned int subkey_size;
int err = 0, i, r;
/* Ignore extra keys (which are used for IV etc) */
@@ -2487,7 +2489,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
}
/* look for next ':' separating key_type from key_description */
- key_desc = strpbrk(key_string, ":");
+ key_desc = strchr(key_string, ':');
if (!key_desc || key_desc == key_string || !strlen(key_desc + 1))
return -EINVAL;
@@ -2502,7 +2504,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
type = &key_type_encrypted;
set_key = set_key_encrypted;
} else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
- !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
+ !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
type = &key_type_trusted;
set_key = set_key_trusted;
} else {
@@ -3413,11 +3415,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
if (cc->on_disk_tag_size) {
- unsigned tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
+ unsigned int tag_len = cc->on_disk_tag_size * (bio_sectors(bio) >> cc->sector_shift);
+
+ if (unlikely(tag_len > KMALLOC_MAX_SIZE))
+ io->integrity_metadata = NULL;
+ else
+ io->integrity_metadata = kmalloc(tag_len, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
- if (unlikely(tag_len > KMALLOC_MAX_SIZE) ||
- unlikely(!(io->integrity_metadata = kmalloc(tag_len,
- GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
+ if (unlikely(!io->integrity_metadata)) {
if (bio_sectors(bio) > cc->tag_pool_max_sectors)
dm_accept_partial_bio(bio, cc->tag_pool_max_sectors);
io->integrity_metadata = mempool_alloc(&cc->tag_pool, GFP_NOIO);
@@ -3441,14 +3446,14 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
static char hex2asc(unsigned char c)
{
- return c + '0' + ((unsigned)(9 - c) >> 4 & 0x27);
+ return c + '0' + ((unsigned int)(9 - c) >> 4 & 0x27);
}
static void crypt_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct crypt_config *cc = ti->private;
- unsigned i, sz = 0;
+ unsigned int i, sz = 0;
int num_feature_args = 0;
switch (type) {
@@ -3564,8 +3569,8 @@ static void crypt_resume(struct dm_target *ti)
* key set <key>
* key wipe
*/
-static int crypt_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int crypt_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
struct crypt_config *cc = ti->private;
int key_size, ret = -EINVAL;
@@ -3626,10 +3631,10 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
limits->max_segment_size = PAGE_SIZE;
limits->logical_block_size =
- max_t(unsigned, limits->logical_block_size, cc->sector_size);
+ max_t(unsigned int, limits->logical_block_size, cc->sector_size);
limits->physical_block_size =
- max_t(unsigned, limits->physical_block_size, cc->sector_size);
- limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size);
+ max_t(unsigned int, limits->physical_block_size, cc->sector_size);
+ limits->io_min = max_t(unsigned int, limits->io_min, cc->sector_size);
limits->dma_alignment = limits->logical_block_size - 1;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 869afef5654a..a425046f88c7 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2005-2007 Red Hat GmbH
*
@@ -20,8 +21,8 @@
struct delay_class {
struct dm_dev *dev;
sector_t start;
- unsigned delay;
- unsigned ops;
+ unsigned int delay;
+ unsigned int ops;
};
struct delay_c {
@@ -305,7 +306,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
static void delay_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct delay_c *dc = ti->private;
int sz = 0;
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 03672204b0e3..7ae9936752de 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018 Red Hat, Inc.
*
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 512cc6cea095..b1068a68bc46 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2020 Red Hat GmbH
*
@@ -241,7 +242,7 @@ static void __ebs_process_bios(struct work_struct *ws)
* <offset>: offset in 512 bytes sectors into <dev_path>
* <ebs>: emulated block size in units of 512 bytes exposed to the upper layer
* [<ubs>]: underlying block size in units of 512 bytes imposed on the lower layer;
- * optional, if not supplied, retrieve logical block size from underlying device
+ * optional, if not supplied, retrieve logical block size from underlying device
*/
static int ebs_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
@@ -390,7 +391,7 @@ static int ebs_map(struct dm_target *ti, struct bio *bio)
}
static void ebs_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct ebs_c *ec = ti->private;
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index e92c1afc3677..c2e7780cdd2d 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -22,9 +22,11 @@
#define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION
#define MIN_BLOCK_SIZE 8
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Writeset
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
struct writeset_metadata {
uint32_t nr_bits;
dm_block_t root;
@@ -51,7 +53,7 @@ static void writeset_free(struct writeset *ws)
}
static int setup_on_disk_bitset(struct dm_disk_bitset *info,
- unsigned nr_bits, dm_block_t *root)
+ unsigned int nr_bits, dm_block_t *root)
{
int r;
@@ -62,7 +64,7 @@ static int setup_on_disk_bitset(struct dm_disk_bitset *info,
return dm_bitset_resize(info, *root, 0, nr_bits, false, root);
}
-static size_t bitset_size(unsigned nr_bits)
+static size_t bitset_size(unsigned int nr_bits)
{
return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG);
}
@@ -110,13 +112,14 @@ static int writeset_marked_on_disk(struct dm_disk_bitset *info,
struct writeset_metadata *m, dm_block_t block,
bool *result)
{
+ int r;
dm_block_t old = m->root;
/*
* The bitset was flushed when it was archived, so we know there'll
* be no change to the root.
*/
- int r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
+ r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
if (r) {
DMERR("%s: dm_bitset_test_bit failed", __func__);
return r;
@@ -148,9 +151,11 @@ static int writeset_test_and_set(struct dm_disk_bitset *info,
return 1;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* On disk metadata layout
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
#define SPACE_MAP_ROOT_SIZE 128
#define UUID_LEN 16
@@ -186,9 +191,11 @@ struct superblock_disk {
__le64 metadata_snap;
} __packed;
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Superblock validation
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void sb_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t sb_block_size)
@@ -204,6 +211,7 @@ static void sb_prepare_for_write(struct dm_block_validator *v,
static int check_metadata_version(struct superblock_disk *disk)
{
uint32_t metadata_version = le32_to_cpu(disk->version);
+
if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) {
DMERR("Era metadata version %u found, but only versions between %u and %u supported.",
metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION);
@@ -221,15 +229,15 @@ static int sb_check(struct dm_block_validator *v,
__le32 csum_le;
if (dm_block_location(b) != le64_to_cpu(disk->blocknr)) {
- DMERR("sb_check failed: blocknr %llu: wanted %llu",
- le64_to_cpu(disk->blocknr),
+ DMERR("%s failed: blocknr %llu: wanted %llu",
+ __func__, le64_to_cpu(disk->blocknr),
(unsigned long long)dm_block_location(b));
return -ENOTBLK;
}
if (le64_to_cpu(disk->magic) != SUPERBLOCK_MAGIC) {
- DMERR("sb_check failed: magic %llu: wanted %llu",
- le64_to_cpu(disk->magic),
+ DMERR("%s failed: magic %llu: wanted %llu",
+ __func__, le64_to_cpu(disk->magic),
(unsigned long long) SUPERBLOCK_MAGIC);
return -EILSEQ;
}
@@ -238,8 +246,8 @@ static int sb_check(struct dm_block_validator *v,
sb_block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
if (csum_le != disk->csum) {
- DMERR("sb_check failed: csum %u: wanted %u",
- le32_to_cpu(csum_le), le32_to_cpu(disk->csum));
+ DMERR("%s failed: csum %u: wanted %u",
+ __func__, le32_to_cpu(csum_le), le32_to_cpu(disk->csum));
return -EILSEQ;
}
@@ -252,9 +260,11 @@ static struct dm_block_validator sb_validator = {
.check = sb_check
};
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Low level metadata handling
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
#define DM_ERA_METADATA_BLOCK_SIZE 4096
#define ERA_MAX_CONCURRENT_LOCKS 5
@@ -323,10 +333,10 @@ static int superblock_lock(struct era_metadata *md,
static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
{
int r;
- unsigned i;
+ unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
- unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ unsigned int sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
@@ -363,12 +373,12 @@ static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata
core->root = le64_to_cpu(disk->root);
}
-static void ws_inc(void *context, const void *value, unsigned count)
+static void ws_inc(void *context, const void *value, unsigned int count)
{
struct era_metadata *md = context;
struct writeset_disk ws_d;
dm_block_t b;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++) {
memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
@@ -377,12 +387,12 @@ static void ws_inc(void *context, const void *value, unsigned count)
}
}
-static void ws_dec(void *context, const void *value, unsigned count)
+static void ws_dec(void *context, const void *value, unsigned int count)
{
struct era_metadata *md = context;
struct writeset_disk ws_d;
dm_block_t b;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++) {
memcpy(&ws_d, value + (i * sizeof(ws_d)), sizeof(ws_d));
@@ -401,6 +411,7 @@ static int ws_eq(void *context, const void *value1, const void *value2)
static void setup_writeset_tree_info(struct era_metadata *md)
{
struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type;
+
md->writeset_tree_info.tm = md->tm;
md->writeset_tree_info.levels = 1;
vt->context = md;
@@ -411,9 +422,9 @@ static void setup_writeset_tree_info(struct era_metadata *md)
}
static void setup_era_array_info(struct era_metadata *md)
-
{
struct dm_btree_value_type vt;
+
vt.context = NULL;
vt.size = sizeof(__le32);
vt.inc = NULL;
@@ -658,21 +669,23 @@ static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset
synchronize_rcu();
}
-/*----------------------------------------------------------------
+/*
+ *------------------------------------------------------------------------
* Writesets get 'digested' into the main era array.
*
* We're using a coroutine here so the worker thread can do the digestion,
* thus avoiding synchronisation of the metadata. Digesting a whole
* writeset in one go would cause too much latency.
- *--------------------------------------------------------------*/
+ *------------------------------------------------------------------------
+ */
struct digest {
uint32_t era;
- unsigned nr_bits, current_bit;
+ unsigned int nr_bits, current_bit;
struct writeset_metadata writeset;
__le32 value;
struct dm_disk_bitset info;
- int (*step)(struct era_metadata *, struct digest *);
+ int (*step)(struct era_metadata *md, struct digest *d);
};
static int metadata_digest_lookup_writeset(struct era_metadata *md,
@@ -702,7 +715,7 @@ static int metadata_digest_transcribe_writeset(struct era_metadata *md,
{
int r;
bool marked;
- unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
+ unsigned int b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
for (b = d->current_bit; b < e; b++) {
r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked);
@@ -784,10 +797,12 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d)
return 0;
}
-/*----------------------------------------------------------------
- * High level metadata interface. Target methods should use these, and not
- * the lower level ones.
- *--------------------------------------------------------------*/
+/*
+ *-----------------------------------------------------------------
+ * High level metadata interface. Target methods should use these,
+ * and not the lower level ones.
+ *-----------------------------------------------------------------
+ */
static struct era_metadata *metadata_open(struct block_device *bdev,
sector_t block_size,
bool may_format)
@@ -1181,17 +1196,19 @@ struct era {
struct rpc {
struct list_head list;
- int (*fn0)(struct era_metadata *);
- int (*fn1)(struct era_metadata *, void *);
+ int (*fn0)(struct era_metadata *md);
+ int (*fn1)(struct era_metadata *md, void *ref);
void *arg;
int result;
struct completion complete;
};
-/*----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Remapping.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static bool block_size_is_power_of_two(struct era *era)
{
return era->sectors_per_block_shift >= 0;
@@ -1214,9 +1231,11 @@ static void remap_to_origin(struct era *era, struct bio *bio)
bio_set_dev(bio, era->origin_dev->bdev);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Worker thread
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void wake_worker(struct era *era)
{
if (!atomic_read(&era->suspended))
@@ -1372,9 +1391,10 @@ static int perform_rpc(struct era *era, struct rpc *rpc)
return rpc->result;
}
-static int in_worker0(struct era *era, int (*fn)(struct era_metadata *))
+static int in_worker0(struct era *era, int (*fn)(struct era_metadata *md))
{
struct rpc rpc;
+
rpc.fn0 = fn;
rpc.fn1 = NULL;
@@ -1382,9 +1402,10 @@ static int in_worker0(struct era *era, int (*fn)(struct era_metadata *))
}
static int in_worker1(struct era *era,
- int (*fn)(struct era_metadata *, void *), void *arg)
+ int (*fn)(struct era_metadata *md, void *ref), void *arg)
{
struct rpc rpc;
+
rpc.fn0 = NULL;
rpc.fn1 = fn;
rpc.arg = arg;
@@ -1403,9 +1424,11 @@ static void stop_worker(struct era *era)
drain_workqueue(era->wq);
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Target methods
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void era_destroy(struct era *era)
{
if (era->md)
@@ -1439,7 +1462,7 @@ static bool valid_block_size(dm_block_t block_size)
/*
* <metadata dev> <data dev> <data block size (sectors)>
*/
-static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int era_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
char dummy;
@@ -1618,7 +1641,7 @@ static int era_preresume(struct dm_target *ti)
* <current era> <held metadata root | '-'>
*/
static void era_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
int r;
struct era *era = ti->private;
@@ -1633,10 +1656,10 @@ static void era_status(struct dm_target *ti, status_type_t type,
goto err;
DMEMIT("%u %llu/%llu %u",
- (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
+ (unsigned int) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
(unsigned long long) stats.used,
(unsigned long long) stats.total,
- (unsigned) stats.era);
+ (unsigned int) stats.era);
if (stats.snap != SUPERBLOCK_LOCATION)
DMEMIT(" %llu", stats.snap);
@@ -1662,8 +1685,8 @@ err:
DMEMIT("Error");
}
-static int era_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int era_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
struct era *era = ti->private;
@@ -1694,6 +1717,7 @@ static int era_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
struct era *era = ti->private;
+
return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
}
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index 3997f34cfebc..c3799757bf4a 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2006-2008 Red Hat GmbH
@@ -142,7 +143,7 @@ EXPORT_SYMBOL(dm_exception_store_type_unregister);
static int set_chunk_size(struct dm_exception_store *store,
const char *chunk_size_arg, char **error)
{
- unsigned chunk_size;
+ unsigned int chunk_size;
if (kstrtouint(chunk_size_arg, 10, &chunk_size)) {
*error = "Invalid chunk size";
@@ -158,7 +159,7 @@ static int set_chunk_size(struct dm_exception_store *store,
}
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
- unsigned chunk_size,
+ unsigned int chunk_size,
char **error)
{
/* Check chunk_size is a power of 2 */
@@ -190,7 +191,7 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_snapshot *snap,
- unsigned *args_used,
+ unsigned int *args_used,
struct dm_exception_store **store)
{
int r = 0;
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index b5f20eba3641..b67976637538 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2008 Red Hat, Inc. All rights reserved.
@@ -43,36 +44,36 @@ struct dm_exception_store_type {
const char *name;
struct module *module;
- int (*ctr) (struct dm_exception_store *store, char *options);
+ int (*ctr)(struct dm_exception_store *store, char *options);
/*
* Destroys this object when you've finished with it.
*/
- void (*dtr) (struct dm_exception_store *store);
+ void (*dtr)(struct dm_exception_store *store);
/*
* The target shouldn't read the COW device until this is
* called. As exceptions are read from the COW, they are
* reported back via the callback.
*/
- int (*read_metadata) (struct dm_exception_store *store,
- int (*callback)(void *callback_context,
- chunk_t old, chunk_t new),
- void *callback_context);
+ int (*read_metadata)(struct dm_exception_store *store,
+ int (*callback)(void *callback_context,
+ chunk_t old, chunk_t new),
+ void *callback_context);
/*
* Find somewhere to store the next exception.
*/
- int (*prepare_exception) (struct dm_exception_store *store,
- struct dm_exception *e);
+ int (*prepare_exception)(struct dm_exception_store *store,
+ struct dm_exception *e);
/*
* Update the metadata with this exception.
*/
- void (*commit_exception) (struct dm_exception_store *store,
- struct dm_exception *e, int valid,
- void (*callback) (void *, int success),
- void *callback_context);
+ void (*commit_exception)(struct dm_exception_store *store,
+ struct dm_exception *e, int valid,
+ void (*callback)(void *, int success),
+ void *callback_context);
/*
* Returns 0 if the exception store is empty.
@@ -82,30 +83,30 @@ struct dm_exception_store_type {
* still-to-be-merged chunk and returns the number of
* consecutive previous ones.
*/
- int (*prepare_merge) (struct dm_exception_store *store,
- chunk_t *last_old_chunk, chunk_t *last_new_chunk);
+ int (*prepare_merge)(struct dm_exception_store *store,
+ chunk_t *last_old_chunk, chunk_t *last_new_chunk);
/*
* Clear the last n exceptions.
* nr_merged must be <= the value returned by prepare_merge.
*/
- int (*commit_merge) (struct dm_exception_store *store, int nr_merged);
+ int (*commit_merge)(struct dm_exception_store *store, int nr_merged);
/*
* The snapshot is invalid, note this in the metadata.
*/
- void (*drop_snapshot) (struct dm_exception_store *store);
+ void (*drop_snapshot)(struct dm_exception_store *store);
- unsigned (*status) (struct dm_exception_store *store,
- status_type_t status, char *result,
- unsigned maxlen);
+ unsigned int (*status)(struct dm_exception_store *store,
+ status_type_t status, char *result,
+ unsigned int maxlen);
/*
* Return how full the snapshot is.
*/
- void (*usage) (struct dm_exception_store *store,
- sector_t *total_sectors, sector_t *sectors_allocated,
- sector_t *metadata_sectors);
+ void (*usage)(struct dm_exception_store *store,
+ sector_t *total_sectors, sector_t *sectors_allocated,
+ sector_t *metadata_sectors);
/* For internal device-mapper use only. */
struct list_head list;
@@ -118,9 +119,9 @@ struct dm_exception_store {
struct dm_snapshot *snap;
/* Size of data blocks saved - must be a power of 2 */
- unsigned chunk_size;
- unsigned chunk_mask;
- unsigned chunk_shift;
+ unsigned int chunk_size;
+ unsigned int chunk_mask;
+ unsigned int chunk_shift;
void *context;
@@ -144,7 +145,7 @@ static inline chunk_t dm_chunk_number(chunk_t chunk)
return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
}
-static inline unsigned dm_consecutive_chunk_count(struct dm_exception *e)
+static inline unsigned int dm_consecutive_chunk_count(struct dm_exception *e)
{
return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
}
@@ -181,12 +182,12 @@ int dm_exception_store_type_register(struct dm_exception_store_type *type);
int dm_exception_store_type_unregister(struct dm_exception_store_type *type);
int dm_exception_store_set_chunk_size(struct dm_exception_store *store,
- unsigned chunk_size,
+ unsigned int chunk_size,
char **error);
int dm_exception_store_create(struct dm_target *ti, int argc, char **argv,
struct dm_snapshot *snap,
- unsigned *args_used,
+ unsigned int *args_used,
struct dm_exception_store **store);
void dm_exception_store_destroy(struct dm_exception_store *store);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 89fa7a68c6c4..5b7556d2a9d9 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software (UK) Limited.
* Copyright (C) 2004, 2010-2011 Red Hat, Inc. All rights reserved.
@@ -26,12 +27,12 @@ struct flakey_c {
struct dm_dev *dev;
unsigned long start_time;
sector_t start;
- unsigned up_interval;
- unsigned down_interval;
+ unsigned int up_interval;
+ unsigned int down_interval;
unsigned long flags;
- unsigned corrupt_bio_byte;
- unsigned corrupt_bio_rw;
- unsigned corrupt_bio_value;
+ unsigned int corrupt_bio_byte;
+ unsigned int corrupt_bio_rw;
+ unsigned int corrupt_bio_value;
blk_opf_t corrupt_bio_flags;
};
@@ -48,7 +49,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
struct dm_target *ti)
{
int r;
- unsigned argc;
+ unsigned int argc;
const char *arg_name;
static const struct dm_arg _args[] = {
@@ -148,7 +149,7 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
BUILD_BUG_ON(sizeof(fc->corrupt_bio_flags) !=
sizeof(unsigned int));
r = dm_read_arg(_args + 3, as,
- (__force unsigned *)&fc->corrupt_bio_flags,
+ (__force unsigned int *)&fc->corrupt_bio_flags,
&ti->error);
if (r)
return r;
@@ -303,9 +304,13 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
*/
bio_for_each_segment(bvec, bio, iter) {
if (bio_iter_len(bio, iter) > corrupt_bio_byte) {
- char *segment = (page_address(bio_iter_page(bio, iter))
- + bio_iter_offset(bio, iter));
+ char *segment;
+ struct page *page = bio_iter_page(bio, iter);
+ if (unlikely(page == ZERO_PAGE(0)))
+ break;
+ segment = bvec_kmap_local(&bvec);
segment[corrupt_bio_byte] = fc->corrupt_bio_value;
+ kunmap_local(segment);
DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
"(rw=%c bi_opf=%u bi_sector=%llu size=%u)\n",
bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
@@ -320,8 +325,9 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
static int flakey_map(struct dm_target *ti, struct bio *bio)
{
struct flakey_c *fc = ti->private;
- unsigned elapsed;
+ unsigned int elapsed;
struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+
pb->bio_submitted = false;
if (op_is_zone_mgmt(bio_op(bio)))
@@ -352,8 +358,7 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
if (test_bit(DROP_WRITES, &fc->flags)) {
bio_endio(bio);
return DM_MAPIO_SUBMITTED;
- }
- else if (test_bit(ERROR_WRITES, &fc->flags)) {
+ } else if (test_bit(ERROR_WRITES, &fc->flags)) {
bio_io_error(bio);
return DM_MAPIO_SUBMITTED;
}
@@ -361,9 +366,11 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
/*
* Corrupt matching writes.
*/
- if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == WRITE)) {
- if (all_corrupt_bio_flags_match(bio, fc))
- corrupt_bio_data(bio, fc);
+ if (fc->corrupt_bio_byte) {
+ if (fc->corrupt_bio_rw == WRITE) {
+ if (all_corrupt_bio_flags_match(bio, fc))
+ corrupt_bio_data(bio, fc);
+ }
goto map_bio;
}
@@ -389,13 +396,14 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
return DM_ENDIO_DONE;
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
- if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
- all_corrupt_bio_flags_match(bio, fc)) {
- /*
- * Corrupt successful matching READs while in down state.
- */
- corrupt_bio_data(bio, fc);
-
+ if (fc->corrupt_bio_byte) {
+ if ((fc->corrupt_bio_rw == READ) &&
+ all_corrupt_bio_flags_match(bio, fc)) {
+ /*
+ * Corrupt successful matching READs while in down state.
+ */
+ corrupt_bio_data(bio, fc);
+ }
} else if (!test_bit(DROP_WRITES, &fc->flags) &&
!test_bit(ERROR_WRITES, &fc->flags)) {
/*
@@ -410,11 +418,11 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
}
static void flakey_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct flakey_c *fc = ti->private;
- unsigned drop_writes, error_writes;
+ unsigned int drop_writes, error_writes;
switch (type) {
case STATUSTYPE_INFO:
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index a1bd7cd52b1b..b90f34259fbb 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -1,11 +1,10 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Microsoft Corporation
*
* Author: Tushar Sugandhi <tusharsu@linux.microsoft.com>
*
- * File: dm-ima.c
- * Enables IMA measurements for DM targets
+ * Enables IMA measurements for DM targets
*/
#include "dm-core.h"
diff --git a/drivers/md/dm-ima.h b/drivers/md/dm-ima.h
index b8c3b614670b..568870a1a145 100644
--- a/drivers/md/dm-ima.h
+++ b/drivers/md/dm-ima.h
@@ -1,11 +1,10 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
* Copyright (C) 2021 Microsoft Corporation
*
* Author: Tushar Sugandhi <tusharsu@linux.microsoft.com>
*
- * File: dm-ima.h
- * Header file for device mapper IMA measurements.
+ * Header file for device mapper IMA measurements.
*/
#ifndef DM_IMA_H
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index dc4381d68313..d369457dbed0 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -1,7 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * dm-init.c
* Copyright (C) 2017 The Chromium OS Authors <chromium-os-dev@chromium.org>
*
* This file is released under the GPLv2.
@@ -296,7 +295,7 @@ static int __init dm_init_init(void)
if (waitfor[i]) {
DMINFO("waiting for device %s ...", waitfor[i]);
while (!dm_get_dev_t(waitfor[i]))
- msleep(5);
+ fsleep(5000);
}
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 1388ee35571e..b0d5057fbdd9 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
* Copyright (C) 2016-2017 Milan Broz
@@ -112,9 +113,9 @@ struct journal_entry {
#endif
#define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
#define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
-#define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
+#define journal_entry_set_unused(je) ((je)->u.s.sector_hi = cpu_to_le32(-1))
#define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
-#define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
+#define journal_entry_set_inprogress(je) ((je)->u.s.sector_hi = cpu_to_le32(-2))
#define JOURNAL_BLOCK_SECTORS 8
#define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
@@ -157,13 +158,13 @@ struct alg_spec {
char *alg_string;
char *key_string;
__u8 *key;
- unsigned key_size;
+ unsigned int key_size;
};
struct dm_integrity_c {
struct dm_dev *dev;
struct dm_dev *meta_dev;
- unsigned tag_size;
+ unsigned int tag_size;
__s8 log2_tag_size;
sector_t start;
mempool_t journal_io_mempool;
@@ -171,8 +172,8 @@ struct dm_integrity_c {
struct dm_bufio_client *bufio;
struct workqueue_struct *metadata_wq;
struct superblock *sb;
- unsigned journal_pages;
- unsigned n_bitmap_blocks;
+ unsigned int journal_pages;
+ unsigned int n_bitmap_blocks;
struct page_list *journal;
struct page_list *journal_io;
@@ -180,7 +181,7 @@ struct dm_integrity_c {
struct page_list *recalc_bitmap;
struct page_list *may_write_bitmap;
struct bitmap_block_status *bbs;
- unsigned bitmap_flush_interval;
+ unsigned int bitmap_flush_interval;
int synchronous_mode;
struct bio_list synchronous_bios;
struct delayed_work bitmap_flush_work;
@@ -201,12 +202,12 @@ struct dm_integrity_c {
unsigned char journal_entries_per_sector;
unsigned char journal_section_entries;
unsigned short journal_section_sectors;
- unsigned journal_sections;
- unsigned journal_entries;
+ unsigned int journal_sections;
+ unsigned int journal_entries;
sector_t data_device_sectors;
sector_t meta_device_sectors;
- unsigned initial_sectors;
- unsigned metadata_run;
+ unsigned int initial_sectors;
+ unsigned int metadata_run;
__s8 log2_metadata_run;
__u8 log2_buffer_sectors;
__u8 sectors_per_block;
@@ -230,17 +231,17 @@ struct dm_integrity_c {
unsigned char commit_seq;
commit_id_t commit_ids[N_COMMIT_IDS];
- unsigned committed_section;
- unsigned n_committed_sections;
+ unsigned int committed_section;
+ unsigned int n_committed_sections;
- unsigned uncommitted_section;
- unsigned n_uncommitted_sections;
+ unsigned int uncommitted_section;
+ unsigned int n_uncommitted_sections;
- unsigned free_section;
+ unsigned int free_section;
unsigned char free_section_entry;
- unsigned free_sectors;
+ unsigned int free_sectors;
- unsigned free_sectors_threshold;
+ unsigned int free_sectors_threshold;
struct workqueue_struct *commit_wq;
struct work_struct commit_work;
@@ -257,7 +258,7 @@ struct dm_integrity_c {
unsigned long autocommit_jiffies;
struct timer_list autocommit_timer;
- unsigned autocommit_msec;
+ unsigned int autocommit_msec;
wait_queue_head_t copy_to_journal_wait;
@@ -305,7 +306,7 @@ struct dm_integrity_io {
struct dm_integrity_range range;
sector_t metadata_block;
- unsigned metadata_offset;
+ unsigned int metadata_offset;
atomic_t in_flight;
blk_status_t bi_status;
@@ -329,7 +330,7 @@ struct journal_io {
struct bitmap_block_status {
struct work_struct work;
struct dm_integrity_c *ic;
- unsigned idx;
+ unsigned int idx;
unsigned long *bitmap;
struct bio_list bio_queue;
spinlock_t bio_queue_lock;
@@ -345,6 +346,7 @@ static struct kmem_cache *journal_io_cache;
static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
{
va_list args;
+
va_start(args, msg);
vprintk(msg, args);
va_end(args);
@@ -410,8 +412,8 @@ static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
return false;
}
-static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
- unsigned j, unsigned char seq)
+static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned int i,
+ unsigned int j, unsigned char seq)
{
/*
* Xor the number with section and sector, so that if a piece of
@@ -426,7 +428,7 @@ static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
if (!ic->meta_dev) {
__u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
*area = data_sector >> log2_interleave_sectors;
- *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
+ *offset = (unsigned int)data_sector & ((1U << log2_interleave_sectors) - 1);
} else {
*area = 0;
*offset = data_sector;
@@ -435,15 +437,15 @@ static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
#define sector_to_block(ic, n) \
do { \
- BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
+ BUG_ON((n) & (unsigned int)((ic)->sectors_per_block - 1)); \
(n) >>= (ic)->sb->log2_sectors_per_block; \
} while (0)
static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
- sector_t offset, unsigned *metadata_offset)
+ sector_t offset, unsigned int *metadata_offset)
{
__u64 ms;
- unsigned mo;
+ unsigned int mo;
ms = area << ic->sb->log2_interleave_sectors;
if (likely(ic->log2_metadata_run >= 0))
@@ -484,7 +486,7 @@ static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector
return result;
}
-static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
+static void wraparound_section(struct dm_integrity_c *ic, unsigned int *sec_ptr)
{
if (unlikely(*sec_ptr >= ic->journal_sections))
*sec_ptr -= ic->journal_sections;
@@ -508,7 +510,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
{
SHASH_DESC_ON_STACK(desc, ic->journal_mac);
int r;
- unsigned size = crypto_shash_digestsize(ic->journal_mac);
+ unsigned int size = crypto_shash_digestsize(ic->journal_mac);
if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
dm_integrity_io_error(ic, "digest is too long", -EINVAL);
@@ -537,6 +539,7 @@ static int sb_mac(struct dm_integrity_c *ic, bool wr)
}
} else {
__u8 result[HASH_MAX_DIGESTSIZE];
+
r = crypto_shash_final(desc, result);
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_final", r);
@@ -627,11 +630,10 @@ static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
end_bit %= PAGE_SIZE * 8;
repeat:
- if (page < end_page) {
+ if (page < end_page)
this_end_bit = PAGE_SIZE * 8 - 1;
- } else {
+ else
this_end_bit = end_bit;
- }
data = lowmem_page_address(bitmap[page].page);
@@ -678,16 +680,18 @@ repeat:
} else if (mode == BITMAP_OP_CLEAR) {
if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
clear_page(data);
- else while (bit <= this_end_bit) {
- if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
- do {
- data[bit / BITS_PER_LONG] = 0;
- bit += BITS_PER_LONG;
- } while (this_end_bit >= bit + BITS_PER_LONG - 1);
- continue;
+ else {
+ while (bit <= this_end_bit) {
+ if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
+ do {
+ data[bit / BITS_PER_LONG] = 0;
+ bit += BITS_PER_LONG;
+ } while (this_end_bit >= bit + BITS_PER_LONG - 1);
+ continue;
+ }
+ __clear_bit(bit, data);
+ bit++;
}
- __clear_bit(bit, data);
- bit++;
}
} else {
BUG();
@@ -704,30 +708,31 @@ repeat:
static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
{
- unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
- unsigned i;
+ unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+ unsigned int i;
for (i = 0; i < n_bitmap_pages; i++) {
unsigned long *dst_data = lowmem_page_address(dst[i].page);
unsigned long *src_data = lowmem_page_address(src[i].page);
+
copy_page(dst_data, src_data);
}
}
static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
{
- unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
- unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
+ unsigned int bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
+ unsigned int bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
return &ic->bbs[bitmap_block];
}
-static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
+static void access_journal_check(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
bool e, const char *function)
{
#if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
- unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
+ unsigned int limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
if (unlikely(section >= ic->journal_sections) ||
unlikely(offset >= limit)) {
@@ -738,10 +743,10 @@ static void access_journal_check(struct dm_integrity_c *ic, unsigned section, un
#endif
}
-static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
- unsigned *pl_index, unsigned *pl_offset)
+static void page_list_location(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
+ unsigned int *pl_index, unsigned int *pl_offset)
{
- unsigned sector;
+ unsigned int sector;
access_journal_check(ic, section, offset, false, "page_list_location");
@@ -752,9 +757,9 @@ static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsi
}
static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
- unsigned section, unsigned offset, unsigned *n_sectors)
+ unsigned int section, unsigned int offset, unsigned int *n_sectors)
{
- unsigned pl_index, pl_offset;
+ unsigned int pl_index, pl_offset;
char *va;
page_list_location(ic, section, offset, &pl_index, &pl_offset);
@@ -767,14 +772,14 @@ static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct
return (struct journal_sector *)(va + pl_offset);
}
-static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
+static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset)
{
return access_page_list(ic, ic->journal, section, offset, NULL);
}
-static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
+static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
{
- unsigned rel_sector, offset;
+ unsigned int rel_sector, offset;
struct journal_sector *js;
access_journal_check(ic, section, n, true, "access_journal_entry");
@@ -786,7 +791,7 @@ static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, uns
return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
}
-static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
+static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned int section, unsigned int n)
{
n <<= ic->sb->log2_sectors_per_block;
@@ -797,11 +802,11 @@ static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, uns
return access_journal(ic, section, n);
}
-static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
+static void section_mac(struct dm_integrity_c *ic, unsigned int section, __u8 result[JOURNAL_MAC_SIZE])
{
SHASH_DESC_ON_STACK(desc, ic->journal_mac);
int r;
- unsigned j, size;
+ unsigned int j, size;
desc->tfm = ic->journal_mac;
@@ -821,7 +826,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
}
section_le = cpu_to_le64(section);
- r = crypto_shash_update(desc, (__u8 *)&section_le, sizeof section_le);
+ r = crypto_shash_update(desc, (__u8 *)&section_le, sizeof(section_le));
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_update", r);
goto err;
@@ -830,7 +835,8 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je = access_journal_entry(ic, section, j);
- r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
+
+ r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof(je->u.sector));
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_update", r);
goto err;
@@ -866,10 +872,10 @@ err:
memset(result, 0, JOURNAL_MAC_SIZE);
}
-static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
+static void rw_section_mac(struct dm_integrity_c *ic, unsigned int section, bool wr)
{
__u8 result[JOURNAL_MAC_SIZE];
- unsigned j;
+ unsigned int j;
if (!ic->journal_mac)
return;
@@ -893,17 +899,18 @@ static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
static void complete_journal_op(void *context)
{
struct journal_completion *comp = context;
+
BUG_ON(!atomic_read(&comp->in_flight));
if (likely(atomic_dec_and_test(&comp->in_flight)))
complete(&comp->comp);
}
-static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
- unsigned n_sections, struct journal_completion *comp)
+static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
+ unsigned int n_sections, struct journal_completion *comp)
{
struct async_submit_ctl submit;
size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
- unsigned pl_index, pl_offset, section_index;
+ unsigned int pl_index, pl_offset, section_index;
struct page_list *source_pl, *target_pl;
if (likely(encrypt)) {
@@ -928,7 +935,8 @@ static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sectio
struct page *dst_page;
while (unlikely(pl_index == section_index)) {
- unsigned dummy;
+ unsigned int dummy;
+
if (likely(encrypt))
rw_section_mac(ic, section, true);
section++;
@@ -955,9 +963,10 @@ static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sectio
async_tx_issue_pending_all();
}
-static void complete_journal_encrypt(struct crypto_async_request *req, int err)
+static void complete_journal_encrypt(void *data, int err)
{
- struct journal_completion *comp = req->data;
+ struct journal_completion *comp = data;
+
if (unlikely(err)) {
if (likely(err == -EINPROGRESS)) {
complete(&comp->ic->crypto_backoff);
@@ -971,6 +980,7 @@ static void complete_journal_encrypt(struct crypto_async_request *req, int err)
static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
{
int r;
+
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
complete_journal_encrypt, comp);
if (likely(encrypt))
@@ -990,8 +1000,8 @@ static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_
return false;
}
-static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
- unsigned n_sections, struct journal_completion *comp)
+static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
+ unsigned int n_sections, struct journal_completion *comp)
{
struct scatterlist **source_sg;
struct scatterlist **target_sg;
@@ -1008,7 +1018,7 @@ static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sect
do {
struct skcipher_request *req;
- unsigned ivsize;
+ unsigned int ivsize;
char *iv;
if (likely(encrypt))
@@ -1034,8 +1044,8 @@ static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned sect
complete_journal_op(comp);
}
-static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
- unsigned n_sections, struct journal_completion *comp)
+static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned int section,
+ unsigned int n_sections, struct journal_completion *comp)
{
if (ic->journal_xor)
return xor_journal(ic, encrypt, section, n_sections, comp);
@@ -1046,18 +1056,19 @@ static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned se
static void complete_journal_io(unsigned long error, void *context)
{
struct journal_completion *comp = context;
+
if (unlikely(error != 0))
dm_integrity_io_error(comp->ic, "writing journal", -EIO);
complete_journal_op(comp);
}
static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
- unsigned sector, unsigned n_sectors,
+ unsigned int sector, unsigned int n_sectors,
struct journal_completion *comp)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
- unsigned pl_index, pl_offset;
+ unsigned int pl_index, pl_offset;
int r;
if (unlikely(dm_integrity_failed(ic))) {
@@ -1099,10 +1110,10 @@ static void rw_journal_sectors(struct dm_integrity_c *ic, blk_opf_t opf,
}
static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
- unsigned section, unsigned n_sections,
+ unsigned int section, unsigned int n_sections,
struct journal_completion *comp)
{
- unsigned sector, n_sectors;
+ unsigned int sector, n_sectors;
sector = section * ic->journal_section_sectors;
n_sectors = n_sections * ic->journal_section_sectors;
@@ -1110,12 +1121,12 @@ static void rw_journal(struct dm_integrity_c *ic, blk_opf_t opf,
rw_journal_sectors(ic, opf, sector, n_sectors, comp);
}
-static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
+static void write_journal(struct dm_integrity_c *ic, unsigned int commit_start, unsigned int commit_sections)
{
struct journal_completion io_comp;
struct journal_completion crypt_comp_1;
struct journal_completion crypt_comp_2;
- unsigned i;
+ unsigned int i;
io_comp.ic = ic;
init_completion(&io_comp.comp);
@@ -1135,7 +1146,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
rw_journal(ic, REQ_OP_WRITE | REQ_FUA | REQ_SYNC, commit_start,
commit_sections, &io_comp);
} else {
- unsigned to_end;
+ unsigned int to_end;
+
io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
to_end = ic->journal_sections - commit_start;
if (ic->journal_io) {
@@ -1172,15 +1184,15 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi
wait_for_completion_io(&io_comp.comp);
}
-static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
- unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
+static void copy_from_journal(struct dm_integrity_c *ic, unsigned int section, unsigned int offset,
+ unsigned int n_sectors, sector_t target, io_notify_fn fn, void *data)
{
struct dm_io_request io_req;
struct dm_io_region io_loc;
int r;
- unsigned sector, pl_index, pl_offset;
+ unsigned int sector, pl_index, pl_offset;
- BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
+ BUG_ON((target | n_sectors | offset) & (unsigned int)(ic->sectors_per_block - 1));
if (unlikely(dm_integrity_failed(ic))) {
fn(-1UL, data);
@@ -1221,10 +1233,11 @@ static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *
struct rb_node **n = &ic->in_progress.rb_node;
struct rb_node *parent;
- BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
+ BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned int)(ic->sectors_per_block - 1));
if (likely(check_waiting)) {
struct dm_integrity_range *range;
+
list_for_each_entry(range, &ic->wait_list, wait_entry) {
if (unlikely(ranges_overlap(range, new_range)))
return false;
@@ -1237,13 +1250,12 @@ static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *
struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
parent = *n;
- if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
+ if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector)
n = &range->node.rb_left;
- } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
+ else if (new_range->logical_sector >= range->logical_sector + range->n_sectors)
n = &range->node.rb_right;
- } else {
+ else
return false;
- }
}
rb_link_node(&new_range->node, parent, n);
@@ -1259,6 +1271,7 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
struct dm_integrity_range *last_range =
list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
struct task_struct *last_range_task;
+
last_range_task = last_range->task;
list_del(&last_range->wait_entry);
if (!add_new_range(ic, last_range, false)) {
@@ -1318,6 +1331,7 @@ static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *nod
while (*link) {
struct journal_node *j;
+
parent = *link;
j = container_of(parent, struct journal_node, node);
if (sector < j->sector)
@@ -1339,28 +1353,29 @@ static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *
#define NOT_FOUND (-1U)
-static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
+static unsigned int find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
{
struct rb_node *n = ic->journal_tree_root.rb_node;
- unsigned found = NOT_FOUND;
+ unsigned int found = NOT_FOUND;
+
*next_sector = (sector_t)-1;
while (n) {
struct journal_node *j = container_of(n, struct journal_node, node);
- if (sector == j->sector) {
+
+ if (sector == j->sector)
found = j - ic->journal_tree;
- }
+
if (sector < j->sector) {
*next_sector = j->sector;
n = j->node.rb_left;
- } else {
+ } else
n = j->node.rb_right;
- }
}
return found;
}
-static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
+static bool test_journal_node(struct dm_integrity_c *ic, unsigned int pos, sector_t sector)
{
struct journal_node *node, *next_node;
struct rb_node *next;
@@ -1385,7 +1400,7 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
{
struct rb_node *next;
struct journal_node *next_node;
- unsigned next_section;
+ unsigned int next_section;
BUG_ON(RB_EMPTY_NODE(&node->node));
@@ -1398,7 +1413,7 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
if (next_node->sector != node->sector)
return false;
- next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
+ next_section = (unsigned int)(next_node - ic->journal_tree) / ic->journal_section_entries;
if (next_section >= ic->committed_section &&
next_section < ic->committed_section + ic->n_committed_sections)
return true;
@@ -1413,17 +1428,17 @@ static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_
#define TAG_CMP 2
static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
- unsigned *metadata_offset, unsigned total_size, int op)
+ unsigned int *metadata_offset, unsigned int total_size, int op)
{
#define MAY_BE_FILLER 1
#define MAY_BE_HASH 2
- unsigned hash_offset = 0;
- unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
+ unsigned int hash_offset = 0;
+ unsigned int may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
do {
unsigned char *data, *dp;
struct dm_buffer *b;
- unsigned to_copy;
+ unsigned int to_copy;
int r;
r = dm_integrity_failed(ic);
@@ -1453,7 +1468,7 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
goto thorough_test;
}
} else {
- unsigned i, ts;
+ unsigned int i, ts;
thorough_test:
ts = total_size;
@@ -1483,9 +1498,8 @@ thorough_test:
*metadata_offset = 0;
}
- if (unlikely(!is_power_of_2(ic->tag_size))) {
+ if (unlikely(!is_power_of_2(ic->tag_size)))
hash_offset = (hash_offset + to_copy) % ic->tag_size;
- }
total_size -= to_copy;
} while (unlikely(total_size));
@@ -1505,6 +1519,7 @@ struct flush_request {
static void flush_notify(unsigned long error, void *fr_)
{
struct flush_request *fr = fr_;
+
if (unlikely(error != 0))
dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
complete(&fr->comp);
@@ -1513,7 +1528,6 @@ static void flush_notify(unsigned long error, void *fr_)
static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
{
int r;
-
struct flush_request fr;
if (!ic->meta_dev)
@@ -1545,6 +1559,7 @@ static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_dat
static void sleep_on_endio_wait(struct dm_integrity_c *ic)
{
DECLARE_WAITQUEUE(wait, current);
+
__add_wait_queue(&ic->endio_wait, &wait);
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock_irq(&ic->endio_wait.lock);
@@ -1582,11 +1597,14 @@ static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *
static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
{
- int r = dm_integrity_failed(ic);
+ int r;
+
+ r = dm_integrity_failed(ic);
if (unlikely(r) && !bio->bi_status)
bio->bi_status = errno_to_blk_status(r);
if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
unsigned long flags;
+
spin_lock_irqsave(&ic->endio_wait.lock, flags);
bio_list_add(&ic->synchronous_bios, bio);
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
@@ -1618,7 +1636,6 @@ static void dec_in_flight(struct dm_integrity_io *dio)
schedule_autocommit(ic);
bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
-
if (unlikely(dio->bi_status) && !bio->bi_status)
bio->bi_status = dio->bi_status;
if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
@@ -1652,7 +1669,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
__le64 sector_le = cpu_to_le64(sector);
SHASH_DESC_ON_STACK(req, ic->internal_hash);
int r;
- unsigned digest_size;
+ unsigned int digest_size;
req->tfm = ic->internal_hash;
@@ -1670,7 +1687,7 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector
}
}
- r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
+ r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof(sector_le));
if (unlikely(r < 0)) {
dm_integrity_io_error(ic, "crypto_shash_update", r);
goto failed;
@@ -1709,13 +1726,13 @@ static void integrity_metadata(struct work_struct *w)
if (ic->internal_hash) {
struct bvec_iter iter;
struct bio_vec bv;
- unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
+ unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
- unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ unsigned int extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
+ char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
sector_t sector;
- unsigned sectors_to_process;
+ unsigned int sectors_to_process;
if (unlikely(ic->mode == 'R'))
goto skip_io;
@@ -1735,14 +1752,15 @@ static void integrity_metadata(struct work_struct *w)
}
if (unlikely(dio->op == REQ_OP_DISCARD)) {
- sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
- unsigned bi_size = dio->bio_details.bi_iter.bi_size;
- unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
- unsigned max_blocks = max_size / ic->tag_size;
+ unsigned int bi_size = dio->bio_details.bi_iter.bi_size;
+ unsigned int max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
+ unsigned int max_blocks = max_size / ic->tag_size;
+
memset(checksums, DISCARD_FILLER, max_size);
while (bi_size) {
- unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+ unsigned int this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
+
this_step_blocks = min(this_step_blocks, max_blocks);
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
this_step_blocks * ic->tag_size, TAG_WRITE);
@@ -1752,13 +1770,7 @@ static void integrity_metadata(struct work_struct *w)
goto error;
}
- /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
- printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
- printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
- BUG();
- }*/
bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
- bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
}
if (likely(checksums != checksums_onstack))
@@ -1770,7 +1782,7 @@ static void integrity_metadata(struct work_struct *w)
sectors_to_process = dio->range.n_sectors;
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
- unsigned pos;
+ unsigned int pos;
char *mem, *checksums_ptr;
again:
@@ -1823,13 +1835,14 @@ again:
if (bip) {
struct bio_vec biv;
struct bvec_iter iter;
- unsigned data_to_process = dio->range.n_sectors;
+ unsigned int data_to_process = dio->range.n_sectors;
+
sector_to_block(ic, data_to_process);
data_to_process *= ic->tag_size;
bip_for_each_vec(biv, bip, iter) {
unsigned char *tag;
- unsigned this_len;
+ unsigned int this_len;
BUG_ON(PageHighMem(biv.bv_page));
tag = bvec_virt(&biv);
@@ -1867,11 +1880,13 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
if (unlikely(dio->op == REQ_OP_DISCARD)) {
if (ti->max_io_len) {
sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
- unsigned log2_max_io_len = __fls(ti->max_io_len);
+ unsigned int log2_max_io_len = __fls(ti->max_io_len);
sector_t start_boundary = sec >> log2_max_io_len;
sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
+
if (start_boundary < end_boundary) {
sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
+
dm_accept_partial_bio(bio, len);
}
}
@@ -1897,7 +1912,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
ic->provided_data_sectors);
return DM_MAPIO_KILL;
}
- if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
+ if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) {
DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
ic->sectors_per_block,
dio->range.logical_sector, bio_sectors(bio));
@@ -1907,6 +1922,7 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
struct bvec_iter iter;
struct bio_vec bv;
+
bio_for_each_segment(bv, bio, iter) {
if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
@@ -1919,7 +1935,8 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
bip = bio_integrity(bio);
if (!ic->internal_hash) {
if (bip) {
- unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
+ unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
+
if (ic->log2_tag_size >= 0)
wanted_tag_size <<= ic->log2_tag_size;
else
@@ -1949,11 +1966,11 @@ static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
}
static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
- unsigned journal_section, unsigned journal_entry)
+ unsigned int journal_section, unsigned int journal_entry)
{
struct dm_integrity_c *ic = dio->ic;
sector_t logical_sector;
- unsigned n_sectors;
+ unsigned int n_sectors;
logical_sector = dio->range.logical_sector;
n_sectors = dio->range.n_sectors;
@@ -1976,7 +1993,7 @@ retry_kmap:
if (unlikely(dio->op == REQ_OP_READ)) {
struct journal_sector *js;
char *mem_ptr;
- unsigned s;
+ unsigned int s;
if (unlikely(journal_entry_is_inprogress(je))) {
flush_dcache_page(bv.bv_page);
@@ -1998,7 +2015,7 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
#ifdef INTERNAL_VERIFY
if (ic->internal_hash) {
- char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
+ char checksums_onstack[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
@@ -2013,31 +2030,32 @@ retry_kmap:
if (!ic->internal_hash) {
struct bio_integrity_payload *bip = bio_integrity(bio);
- unsigned tag_todo = ic->tag_size;
+ unsigned int tag_todo = ic->tag_size;
char *tag_ptr = journal_entry_tag(ic, je);
- if (bip) do {
- struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
- unsigned tag_now = min(biv.bv_len, tag_todo);
- char *tag_addr;
- BUG_ON(PageHighMem(biv.bv_page));
- tag_addr = bvec_virt(&biv);
- if (likely(dio->op == REQ_OP_WRITE))
- memcpy(tag_ptr, tag_addr, tag_now);
- else
- memcpy(tag_addr, tag_ptr, tag_now);
- bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
- tag_ptr += tag_now;
- tag_todo -= tag_now;
- } while (unlikely(tag_todo)); else {
- if (likely(dio->op == REQ_OP_WRITE))
- memset(tag_ptr, 0, tag_todo);
- }
+ if (bip) {
+ do {
+ struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
+ unsigned int tag_now = min(biv.bv_len, tag_todo);
+ char *tag_addr;
+
+ BUG_ON(PageHighMem(biv.bv_page));
+ tag_addr = bvec_virt(&biv);
+ if (likely(dio->op == REQ_OP_WRITE))
+ memcpy(tag_ptr, tag_addr, tag_now);
+ else
+ memcpy(tag_addr, tag_ptr, tag_now);
+ bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
+ tag_ptr += tag_now;
+ tag_todo -= tag_now;
+ } while (unlikely(tag_todo));
+ } else if (likely(dio->op == REQ_OP_WRITE))
+ memset(tag_ptr, 0, tag_todo);
}
if (likely(dio->op == REQ_OP_WRITE)) {
struct journal_sector *js;
- unsigned s;
+ unsigned int s;
js = access_journal_data(ic, journal_section, journal_entry);
memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
@@ -2048,9 +2066,11 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
if (ic->internal_hash) {
- unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
+ unsigned int digest_size = crypto_shash_digestsize(ic->internal_hash);
+
if (unlikely(digest_size > ic->tag_size)) {
char checksums_onstack[HASH_MAX_DIGESTSIZE];
+
integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
} else
@@ -2080,14 +2100,12 @@ retry_kmap:
smp_mb();
if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
wake_up(&ic->copy_to_journal_wait);
- if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
+ if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
queue_work(ic->commit_wq, &ic->commit_work);
- } else {
+ else
schedule_autocommit(ic);
- }
- } else {
+ } else
remove_range(ic, &dio->range);
- }
if (unlikely(bio->bi_iter.bi_size)) {
sector_t area, offset;
@@ -2105,11 +2123,12 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
{
struct dm_integrity_c *ic = dio->ic;
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
- unsigned journal_section, journal_entry;
- unsigned journal_read_pos;
+ unsigned int journal_section, journal_entry;
+ unsigned int journal_read_pos;
struct completion read_comp;
bool discard_retried = false;
bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
+
if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
need_sync_io = true;
@@ -2131,8 +2150,8 @@ retry:
journal_read_pos = NOT_FOUND;
if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
if (dio->op == REQ_OP_WRITE) {
- unsigned next_entry, i, pos;
- unsigned ws, we, range_sectors;
+ unsigned int next_entry, i, pos;
+ unsigned int ws, we, range_sectors;
dio->range.n_sectors = min(dio->range.n_sectors,
(sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
@@ -2180,13 +2199,15 @@ retry:
goto journal_read_write;
} else {
sector_t next_sector;
+
journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
if (likely(journal_read_pos == NOT_FOUND)) {
if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
dio->range.n_sectors = next_sector - dio->range.logical_sector;
} else {
- unsigned i;
- unsigned jp = journal_read_pos + 1;
+ unsigned int i;
+ unsigned int jp = journal_read_pos + 1;
+
for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
break;
@@ -2218,7 +2239,9 @@ offload_to_thread:
*/
if (journal_read_pos != NOT_FOUND) {
sector_t next_sector;
- unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ unsigned int new_pos;
+
+ new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
if (unlikely(new_pos != journal_read_pos)) {
remove_range_unlocked(ic, &dio->range);
goto retry;
@@ -2227,7 +2250,9 @@ offload_to_thread:
}
if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
sector_t next_sector;
- unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ unsigned int new_pos;
+
+ new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
if (unlikely(new_pos != NOT_FOUND) ||
unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
remove_range_unlocked(ic, &dio->range);
@@ -2307,7 +2332,6 @@ offload_to_thread:
else
skip_check:
dec_in_flight(dio);
-
} else {
INIT_WORK(&dio->work, integrity_metadata);
queue_work(ic->metadata_wq, &dio->work);
@@ -2354,8 +2378,8 @@ static void pad_uncommitted(struct dm_integrity_c *ic)
static void integrity_commit(struct work_struct *w)
{
struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
- unsigned commit_start, commit_sections;
- unsigned i, j, n;
+ unsigned int commit_start, commit_sections;
+ unsigned int i, j, n;
struct bio *flushes;
del_timer(&ic->autocommit_timer);
@@ -2382,11 +2406,13 @@ static void integrity_commit(struct work_struct *w)
for (n = 0; n < commit_sections; n++) {
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je;
+
je = access_journal_entry(ic, i, j);
io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
}
for (j = 0; j < ic->journal_section_sectors; j++) {
struct journal_sector *js;
+
js = access_journal(ic, i, j);
js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
}
@@ -2412,6 +2438,7 @@ static void integrity_commit(struct work_struct *w)
release_flush_bios:
while (flushes) {
struct bio *next = flushes->bi_next;
+
flushes->bi_next = NULL;
do_endio(ic, flushes);
flushes = next;
@@ -2423,6 +2450,7 @@ static void complete_copy_from_journal(unsigned long error, void *context)
struct journal_io *io = context;
struct journal_completion *comp = io->comp;
struct dm_integrity_c *ic = comp->ic;
+
remove_range(ic, &io->range);
mempool_free(io, &ic->journal_io_mempool);
if (unlikely(error != 0))
@@ -2433,17 +2461,18 @@ static void complete_copy_from_journal(unsigned long error, void *context)
static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
struct journal_entry *je)
{
- unsigned s = 0;
+ unsigned int s = 0;
+
do {
js->commit_id = je->last_bytes[s];
js++;
} while (++s < ic->sectors_per_block);
}
-static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
- unsigned write_sections, bool from_replay)
+static void do_journal_write(struct dm_integrity_c *ic, unsigned int write_start,
+ unsigned int write_sections, bool from_replay)
{
- unsigned i, j, n;
+ unsigned int i, j, n;
struct journal_completion comp;
struct blk_plug plug;
@@ -2462,9 +2491,9 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je = access_journal_entry(ic, i, j);
sector_t sec, area, offset;
- unsigned k, l, next_loop;
+ unsigned int k, l, next_loop;
sector_t metadata_block;
- unsigned metadata_offset;
+ unsigned int metadata_offset;
struct journal_io *io;
if (journal_entry_is_unused(je))
@@ -2472,7 +2501,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
sec = journal_entry_get_sector(je);
if (unlikely(from_replay)) {
- if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
+ if (unlikely(sec & (unsigned int)(ic->sectors_per_block - 1))) {
dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
sec &= ~(sector_t)(ic->sectors_per_block - 1);
}
@@ -2486,6 +2515,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
for (k = j + 1; k < ic->journal_section_entries; k++) {
struct journal_entry *je2 = access_journal_entry(ic, i, k);
sector_t sec2, area2, offset2;
+
if (journal_entry_is_unused(je2))
break;
BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
@@ -2533,9 +2563,8 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
mempool_free(io, &ic->journal_io_mempool);
goto skip_io;
}
- for (l = j; l < k; l++) {
+ for (l = j; l < k; l++)
remove_journal_node(ic, &section_node[l]);
- }
}
spin_unlock_irq(&ic->endio_wait.lock);
@@ -2562,9 +2591,8 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
journal_entry_set_unused(je2);
r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
ic->tag_size, TAG_WRITE);
- if (unlikely(r)) {
+ if (unlikely(r))
dm_integrity_io_error(ic, "reading tags", r);
- }
}
atomic_inc(&comp.in_flight);
@@ -2590,9 +2618,8 @@ skip_io:
static void integrity_writer(struct work_struct *w)
{
struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
- unsigned write_start, write_sections;
-
- unsigned prev_free_sectors;
+ unsigned int write_start, write_sections;
+ unsigned int prev_free_sectors;
spin_lock_irq(&ic->endio_wait.lock);
write_start = ic->committed_section;
@@ -2639,12 +2666,12 @@ static void integrity_recalc(struct work_struct *w)
struct dm_io_region io_loc;
sector_t area, offset;
sector_t metadata_block;
- unsigned metadata_offset;
+ unsigned int metadata_offset;
sector_t logical_sector, n_sectors;
__u8 *t;
- unsigned i;
+ unsigned int i;
int r;
- unsigned super_counter = 0;
+ unsigned int super_counter = 0;
DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
@@ -2668,7 +2695,7 @@ next_chunk:
get_area_and_offset(ic, range.logical_sector, &area, &offset);
range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
if (!ic->meta_dev)
- range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
+ range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned int)offset);
add_new_range_and_wait(ic, &range);
spin_unlock_irq(&ic->endio_wait.lock);
@@ -2676,9 +2703,9 @@ next_chunk:
n_sectors = range.n_sectors;
if (ic->mode == 'B') {
- if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
+ if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
goto advance_and_next;
- }
+
while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
logical_sector += ic->sectors_per_block;
@@ -2697,9 +2724,9 @@ next_chunk:
if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
recalc_write_super(ic);
- if (ic->mode == 'B') {
+ if (ic->mode == 'B')
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
- }
+
super_counter = 0;
}
@@ -2737,6 +2764,7 @@ next_chunk:
if (ic->mode == 'B') {
sector_t start, end;
+
start = (range.logical_sector >>
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
(ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
@@ -2859,10 +2887,10 @@ static void bitmap_flush_work(struct work_struct *work)
}
-static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
- unsigned n_sections, unsigned char commit_seq)
+static void init_journal(struct dm_integrity_c *ic, unsigned int start_section,
+ unsigned int n_sections, unsigned char commit_seq)
{
- unsigned i, j, n;
+ unsigned int i, j, n;
if (!n_sections)
return;
@@ -2872,12 +2900,14 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
wraparound_section(ic, &i);
for (j = 0; j < ic->journal_section_sectors; j++) {
struct journal_sector *js = access_journal(ic, i, j);
+
BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA);
memset(&js->sectors, 0, sizeof(js->sectors));
js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
}
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je = access_journal_entry(ic, i, j);
+
journal_entry_set_unused(je);
}
}
@@ -2885,9 +2915,10 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
write_journal(ic, start_section, n_sections);
}
-static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
+static int find_commit_seq(struct dm_integrity_c *ic, unsigned int i, unsigned int j, commit_id_t id)
{
unsigned char k;
+
for (k = 0; k < N_COMMIT_IDS; k++) {
if (dm_integrity_commit_id(ic, i, j, k) == id)
return k;
@@ -2898,11 +2929,11 @@ static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, co
static void replay_journal(struct dm_integrity_c *ic)
{
- unsigned i, j;
+ unsigned int i, j;
bool used_commit_ids[N_COMMIT_IDS];
- unsigned max_commit_id_sections[N_COMMIT_IDS];
- unsigned write_start, write_sections;
- unsigned continue_section;
+ unsigned int max_commit_id_sections[N_COMMIT_IDS];
+ unsigned int write_start, write_sections;
+ unsigned int continue_section;
bool journal_empty;
unsigned char unused, last_used, want_commit_seq;
@@ -2922,6 +2953,7 @@ static void replay_journal(struct dm_integrity_c *ic)
DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
if (ic->journal_io) {
struct journal_completion crypt_comp;
+
crypt_comp.ic = ic;
init_completion(&crypt_comp.comp);
crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
@@ -2935,12 +2967,13 @@ static void replay_journal(struct dm_integrity_c *ic)
goto clear_journal;
journal_empty = true;
- memset(used_commit_ids, 0, sizeof used_commit_ids);
- memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
+ memset(used_commit_ids, 0, sizeof(used_commit_ids));
+ memset(max_commit_id_sections, 0, sizeof(max_commit_id_sections));
for (i = 0; i < ic->journal_sections; i++) {
for (j = 0; j < ic->journal_section_sectors; j++) {
int k;
struct journal_sector *js = access_journal(ic, i, j);
+
k = find_commit_seq(ic, i, j, js->commit_id);
if (k < 0)
goto clear_journal;
@@ -2950,6 +2983,7 @@ static void replay_journal(struct dm_integrity_c *ic)
if (journal_empty) {
for (j = 0; j < ic->journal_section_entries; j++) {
struct journal_entry *je = access_journal_entry(ic, i, j);
+
if (!journal_entry_is_unused(je)) {
journal_empty = false;
break;
@@ -3020,8 +3054,9 @@ brk:
ic->commit_seq = want_commit_seq;
DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
} else {
- unsigned s;
+ unsigned int s;
unsigned char erase_seq;
+
clear_journal:
DEBUG_print("clearing journal\n");
@@ -3058,7 +3093,7 @@ clear_journal:
static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
{
- DEBUG_print("dm_integrity_enter_synchronous_mode\n");
+ DEBUG_print("%s\n", __func__);
if (ic->mode == 'B') {
ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
@@ -3074,7 +3109,7 @@ static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, voi
{
struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
- DEBUG_print("dm_integrity_reboot\n");
+ DEBUG_print("%s\n", __func__);
dm_integrity_enter_synchronous_mode(ic);
@@ -3231,6 +3266,7 @@ static void dm_integrity_resume(struct dm_target *ti)
DEBUG_print("testing recalc: %x\n", ic->sb->flags);
if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
__u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
+
DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
if (recalc_pos < ic->provided_data_sectors) {
queue_work(ic->recalc_wq, &ic->recalc_work);
@@ -3252,10 +3288,10 @@ static void dm_integrity_resume(struct dm_target *ti)
}
static void dm_integrity_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
- unsigned arg_count;
+ unsigned int arg_count;
size_t sz = 0;
switch (type) {
@@ -3271,6 +3307,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE: {
__u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
+
watermark_percentage += ic->journal_entries / 2;
do_div(watermark_percentage, ic->journal_entries);
arg_count = 3;
@@ -3305,7 +3342,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
if (ic->mode == 'J') {
- DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
+ DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
DMEMIT(" commit_time:%u", ic->autocommit_msec);
}
if (ic->mode == 'B') {
@@ -3384,7 +3421,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim
static void calculate_journal_section_size(struct dm_integrity_c *ic)
{
- unsigned sector_space = JOURNAL_SECTOR_DATA;
+ unsigned int sector_space = JOURNAL_SECTOR_DATA;
ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
@@ -3430,6 +3467,7 @@ static int calculate_device_limits(struct dm_integrity_c *ic)
return -EINVAL;
} else {
__u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
+
meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
>> (ic->log2_buffer_sectors + SECTOR_SHIFT);
meta_size <<= ic->log2_buffer_sectors;
@@ -3447,6 +3485,7 @@ static void get_provided_data_sectors(struct dm_integrity_c *ic)
{
if (!ic->meta_dev) {
int test_bit;
+
ic->provided_data_sectors = 0;
for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
__u64 prev_data_sectors = ic->provided_data_sectors;
@@ -3461,9 +3500,10 @@ static void get_provided_data_sectors(struct dm_integrity_c *ic)
}
}
-static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
+static int initialize_superblock(struct dm_integrity_c *ic,
+ unsigned int journal_sectors, unsigned int interleave_sectors)
{
- unsigned journal_sections;
+ unsigned int journal_sections;
int test_bit;
memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
@@ -3490,8 +3530,8 @@ static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sec
if (!interleave_sectors)
interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
- ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
- ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
+ ic->sb->log2_interleave_sectors = max_t(__u8, MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
+ ic->sb->log2_interleave_sectors = min_t(__u8, MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
get_provided_data_sectors(ic);
if (!ic->provided_data_sectors)
@@ -3508,6 +3548,7 @@ try_smaller_buffer:
for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
__u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
__u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
+
if (test_journal_sections > journal_sections)
continue;
ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
@@ -3548,7 +3589,7 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
static void dm_integrity_free_page_list(struct page_list *pl)
{
- unsigned i;
+ unsigned int i;
if (!pl)
return;
@@ -3557,10 +3598,10 @@ static void dm_integrity_free_page_list(struct page_list *pl)
kvfree(pl);
}
-static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
+static struct page_list *dm_integrity_alloc_page_list(unsigned int n_pages)
{
struct page_list *pl;
- unsigned i;
+ unsigned int i;
pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
if (!pl)
@@ -3583,7 +3624,8 @@ static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
{
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < ic->journal_sections; i++)
kvfree(sl[i]);
kvfree(sl);
@@ -3593,7 +3635,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
struct page_list *pl)
{
struct scatterlist **sl;
- unsigned i;
+ unsigned int i;
sl = kvmalloc_array(ic->journal_sections,
sizeof(struct scatterlist *),
@@ -3603,10 +3645,10 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
for (i = 0; i < ic->journal_sections; i++) {
struct scatterlist *s;
- unsigned start_index, start_offset;
- unsigned end_index, end_offset;
- unsigned n_pages;
- unsigned idx;
+ unsigned int start_index, start_offset;
+ unsigned int end_index, end_offset;
+ unsigned int n_pages;
+ unsigned int idx;
page_list_location(ic, i, 0, &start_index, &start_offset);
page_list_location(ic, i, ic->journal_section_sectors - 1,
@@ -3624,7 +3666,8 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int
sg_init_table(s, n_pages);
for (idx = start_index; idx <= end_index; idx++) {
char *va = lowmem_page_address(pl[idx].page);
- unsigned start = 0, end = PAGE_SIZE;
+ unsigned int start = 0, end = PAGE_SIZE;
+
if (idx == start_index)
start = start_offset;
if (idx == end_index)
@@ -3642,7 +3685,7 @@ static void free_alg(struct alg_spec *a)
{
kfree_sensitive(a->alg_string);
kfree_sensitive(a->key);
- memset(a, 0, sizeof *a);
+ memset(a, 0, sizeof(*a));
}
static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
@@ -3711,7 +3754,7 @@ static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
static int create_journal(struct dm_integrity_c *ic, char **error)
{
int r = 0;
- unsigned i;
+ unsigned int i;
__u64 journal_pages, journal_desc_size, journal_tree_size;
unsigned char *crypt_data = NULL, *crypt_iv = NULL;
struct skcipher_request *req = NULL;
@@ -3738,7 +3781,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
goto bad;
}
if (ic->journal_crypt_alg.alg_string) {
- unsigned ivsize, blocksize;
+ unsigned int ivsize, blocksize;
struct journal_completion comp;
comp.ic = ic;
@@ -3805,13 +3848,14 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
sg_init_table(sg, ic->journal_pages + 1);
for (i = 0; i < ic->journal_pages; i++) {
char *va = lowmem_page_address(ic->journal_xor[i].page);
+
clear_page(va);
sg_set_buf(&sg[i], va, PAGE_SIZE);
}
- sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
+ sg_set_buf(&sg[i], &ic->commit_ids, sizeof(ic->commit_ids));
skcipher_request_set_crypt(req, sg, sg,
- PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
+ PAGE_SIZE * ic->journal_pages + sizeof(ic->commit_ids), crypt_iv);
init_completion(&comp.comp);
comp.in_flight = (atomic_t)ATOMIC_INIT(1);
if (do_crypt(true, req, &comp))
@@ -3827,7 +3871,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
crypto_free_skcipher(ic->journal_crypt);
ic->journal_crypt = NULL;
} else {
- unsigned crypt_len = roundup(ivsize, blocksize);
+ unsigned int crypt_len = roundup(ivsize, blocksize);
req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
if (!req) {
@@ -3877,7 +3921,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
memset(crypt_iv, 0x00, ivsize);
memset(crypt_data, 0x00, crypt_len);
- memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
+ memcpy(crypt_data, &section_le, min_t(size_t, crypt_len, sizeof(section_le)));
sg_init_one(&sg, crypt_data, crypt_len);
skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
@@ -3915,7 +3959,8 @@ static int create_journal(struct dm_integrity_c *ic, char **error)
}
for (i = 0; i < N_COMMIT_IDS; i++) {
- unsigned j;
+ unsigned int j;
+
retest_commit_id:
for (j = 0; j < i; j++) {
if (ic->commit_ids[j] == ic->commit_ids[i]) {
@@ -3969,17 +4014,17 @@ bad:
* journal_mac
* recalculate
*/
-static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_integrity_c *ic;
char dummy;
int r;
- unsigned extra_args;
+ unsigned int extra_args;
struct dm_arg_set as;
static const struct dm_arg _args[] = {
{0, 18, "Invalid number of feature args"},
};
- unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
+ unsigned int journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
bool should_write_sb;
__u64 threshold;
unsigned long long start;
@@ -4058,8 +4103,9 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
while (extra_args--) {
const char *opt_string;
- unsigned val;
+ unsigned int val;
unsigned long long llval;
+
opt_string = dm_shift_arg(&as);
if (!opt_string) {
r = -EINVAL;
@@ -4090,7 +4136,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
} else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
if (val < 1 << SECTOR_SHIFT ||
val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
- (val & (val -1))) {
+ (val & (val - 1))) {
r = -EINVAL;
ti->error = "Invalid block_size argument";
goto bad;
@@ -4363,9 +4409,9 @@ try_smaller_buffer:
log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
- if (should_write_sb) {
+ if (should_write_sb)
ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
- }
+
n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
+ (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
@@ -4391,7 +4437,7 @@ try_smaller_buffer:
DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
- DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
+ DEBUG_print(" journal_sections %u\n", (unsigned int)le32_to_cpu(ic->sb->journal_sections));
DEBUG_print(" journal_entries %u\n", ic->journal_entries);
DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
@@ -4409,8 +4455,9 @@ try_smaller_buffer:
if (ic->internal_hash) {
size_t recalc_tags_size;
+
ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
- if (!ic->recalc_wq ) {
+ if (!ic->recalc_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;
goto bad;
@@ -4465,8 +4512,8 @@ try_smaller_buffer:
}
if (ic->mode == 'B') {
- unsigned i;
- unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
+ unsigned int i;
+ unsigned int n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
if (!ic->recalc_bitmap) {
@@ -4486,7 +4533,7 @@ try_smaller_buffer:
INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
for (i = 0; i < ic->n_bitmap_blocks; i++) {
struct bitmap_block_status *bbs = &ic->bbs[i];
- unsigned sector, pl_index, pl_offset;
+ unsigned int sector, pl_index, pl_offset;
INIT_WORK(&bbs->work, bitmap_block_work);
bbs->ic = ic;
@@ -4523,7 +4570,9 @@ try_smaller_buffer:
goto bad;
}
if (ic->mode == 'B') {
- unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
+ unsigned int max_io_len;
+
+ max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
if (!max_io_len)
max_io_len = 1U << 31;
DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
@@ -4594,10 +4643,12 @@ static void dm_integrity_dtr(struct dm_target *ti)
if (ic->journal_io_scatterlist)
dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
if (ic->sk_requests) {
- unsigned i;
+ unsigned int i;
for (i = 0; i < ic->journal_sections; i++) {
- struct skcipher_request *req = ic->sk_requests[i];
+ struct skcipher_request *req;
+
+ req = ic->sk_requests[i];
if (req) {
kfree_sensitive(req->iv);
skcipher_request_free(req);
diff --git a/drivers/md/dm-io-rewind.c b/drivers/md/dm-io-rewind.c
index 0db53ccb94ba..6155b0117c9d 100644
--- a/drivers/md/dm-io-rewind.c
+++ b/drivers/md/dm-io-rewind.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022 Red Hat, Inc.
*/
@@ -57,7 +57,7 @@ static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
- unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
+ unsigned int bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector -= bio_integrity_intervals(bi, bytes_done >> 9);
dm_bvec_iter_rewind(bip->bip_vec, &bip->bip_iter, bytes);
@@ -68,7 +68,6 @@ static void dm_bio_integrity_rewind(struct bio *bio, unsigned int bytes_done)
static inline void dm_bio_integrity_rewind(struct bio *bio,
unsigned int bytes_done)
{
- return;
}
#endif
@@ -104,7 +103,6 @@ static void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
static inline void dm_bio_crypt_rewind(struct bio *bio, unsigned int bytes)
{
- return;
}
#endif
@@ -131,7 +129,7 @@ static inline void dm_bio_rewind_iter(const struct bio *bio,
* rewinding from end of bio and restoring its original position.
* Caller is also responsibile for restoring bio's size.
*/
-static void dm_bio_rewind(struct bio *bio, unsigned bytes)
+static void dm_bio_rewind(struct bio *bio, unsigned int bytes)
{
if (bio_integrity(bio))
dm_bio_integrity_rewind(bio, bytes);
diff --git a/drivers/md/dm-io-tracker.h b/drivers/md/dm-io-tracker.h
index bdcc6273ebf0..bea1ca11855e 100644
--- a/drivers/md/dm-io-tracker.h
+++ b/drivers/md/dm-io-tracker.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2021 Red Hat, Inc. All rights reserved.
*
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 783564533459..dc2df76999b0 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2006 Red Hat GmbH
@@ -38,7 +39,7 @@ struct io {
void *context;
void *vma_invalidate_address;
unsigned long vma_invalidate_size;
-} __attribute__((aligned(DM_IO_MAX_REGIONS)));
+} __aligned(DM_IO_MAX_REGIONS);
static struct kmem_cache *_dm_io_cache;
@@ -48,7 +49,7 @@ static struct kmem_cache *_dm_io_cache;
struct dm_io_client *dm_io_client_create(void)
{
struct dm_io_client *client;
- unsigned min_ios = dm_get_reserved_bio_based_ios();
+ unsigned int min_ios = dm_get_reserved_bio_based_ios();
int ret;
client = kzalloc(sizeof(*client), GFP_KERNEL);
@@ -65,7 +66,7 @@ struct dm_io_client *dm_io_client_create(void)
return client;
- bad:
+bad:
mempool_exit(&client->pool);
kfree(client);
return ERR_PTR(ret);
@@ -80,15 +81,17 @@ void dm_io_client_destroy(struct dm_io_client *client)
}
EXPORT_SYMBOL(dm_io_client_destroy);
-/*-----------------------------------------------------------------
+/*
+ *-------------------------------------------------------------------
* We need to keep track of which region a bio is doing io for.
* To avoid a memory allocation to store just 5 or 6 bits, we
* ensure the 'struct io' pointer is aligned so enough low bits are
* always zero and then combine it with the region number directly in
* bi_private.
- *---------------------------------------------------------------*/
+ *-------------------------------------------------------------------
+ */
static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
- unsigned region)
+ unsigned int region)
{
if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
DMCRIT("Unaligned struct io pointer %p", io);
@@ -99,7 +102,7 @@ static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
}
static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
- unsigned *region)
+ unsigned int *region)
{
unsigned long val = (unsigned long)bio->bi_private;
@@ -107,10 +110,12 @@ static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
*region = val & (DM_IO_MAX_REGIONS - 1);
}
-/*-----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* We need an io object to keep track of the number of bios that
* have been dispatched for a particular io.
- *---------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void complete_io(struct io *io)
{
unsigned long error_bits = io->error_bits;
@@ -137,7 +142,7 @@ static void dec_count(struct io *io, unsigned int region, blk_status_t error)
static void endio(struct bio *bio)
{
struct io *io;
- unsigned region;
+ unsigned int region;
blk_status_t error;
if (bio->bi_status && bio_data_dir(bio) == READ)
@@ -154,17 +159,19 @@ static void endio(struct bio *bio)
dec_count(io, region, error);
}
-/*-----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* These little objects provide an abstraction for getting a new
* destination page for io.
- *---------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
struct dpages {
void (*get_page)(struct dpages *dp,
- struct page **p, unsigned long *len, unsigned *offset);
+ struct page **p, unsigned long *len, unsigned int *offset);
void (*next_page)(struct dpages *dp);
union {
- unsigned context_u;
+ unsigned int context_u;
struct bvec_iter context_bi;
};
void *context_ptr;
@@ -177,9 +184,9 @@ struct dpages {
* Functions for getting the pages from a list.
*/
static void list_get_page(struct dpages *dp,
- struct page **p, unsigned long *len, unsigned *offset)
+ struct page **p, unsigned long *len, unsigned int *offset)
{
- unsigned o = dp->context_u;
+ unsigned int o = dp->context_u;
struct page_list *pl = (struct page_list *) dp->context_ptr;
*p = pl->page;
@@ -190,11 +197,12 @@ static void list_get_page(struct dpages *dp,
static void list_next_page(struct dpages *dp)
{
struct page_list *pl = (struct page_list *) dp->context_ptr;
+
dp->context_ptr = pl->next;
dp->context_u = 0;
}
-static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
+static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset)
{
dp->get_page = list_get_page;
dp->next_page = list_next_page;
@@ -206,7 +214,7 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
* Functions for getting the pages from a bvec.
*/
static void bio_get_page(struct dpages *dp, struct page **p,
- unsigned long *len, unsigned *offset)
+ unsigned long *len, unsigned int *offset)
{
struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
dp->context_bi);
@@ -244,7 +252,7 @@ static void bio_dp_init(struct dpages *dp, struct bio *bio)
* Functions for getting the pages from a VMA.
*/
static void vm_get_page(struct dpages *dp,
- struct page **p, unsigned long *len, unsigned *offset)
+ struct page **p, unsigned long *len, unsigned int *offset)
{
*p = vmalloc_to_page(dp->context_ptr);
*offset = dp->context_u;
@@ -269,7 +277,7 @@ static void vm_dp_init(struct dpages *dp, void *data)
* Functions for getting the pages from kernel memory.
*/
static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
- unsigned *offset)
+ unsigned int *offset)
{
*p = virt_to_page(dp->context_ptr);
*offset = dp->context_u;
@@ -290,18 +298,20 @@ static void km_dp_init(struct dpages *dp, void *data)
dp->context_ptr = data;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* IO routines that accept a list of pages.
- *---------------------------------------------------------------*/
-static void do_region(const blk_opf_t opf, unsigned region,
+ *---------------------------------------------------------------
+ */
+static void do_region(const blk_opf_t opf, unsigned int region,
struct dm_io_region *where, struct dpages *dp,
struct io *io)
{
struct bio *bio;
struct page *page;
unsigned long len;
- unsigned offset;
- unsigned num_bvecs;
+ unsigned int offset;
+ unsigned int num_bvecs;
sector_t remaining = where->count;
struct request_queue *q = bdev_get_queue(where->bdev);
sector_t num_sectors;
@@ -350,18 +360,20 @@ static void do_region(const blk_opf_t opf, unsigned region,
num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
remaining -= num_sectors;
- } else while (remaining) {
- /*
- * Try and add as many pages as possible.
- */
- dp->get_page(dp, &page, &len, &offset);
- len = min(len, to_bytes(remaining));
- if (!bio_add_page(bio, page, len, offset))
- break;
-
- offset = 0;
- remaining -= to_sector(len);
- dp->next_page(dp);
+ } else {
+ while (remaining) {
+ /*
+ * Try and add as many pages as possible.
+ */
+ dp->get_page(dp, &page, &len, &offset);
+ len = min(len, to_bytes(remaining));
+ if (!bio_add_page(bio, page, len, offset))
+ break;
+
+ offset = 0;
+ remaining -= to_sector(len);
+ dp->next_page(dp);
+ }
}
atomic_inc(&io->count);
@@ -508,7 +520,7 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
return 0;
}
-int dm_io(struct dm_io_request *io_req, unsigned num_regions,
+int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
struct dm_io_region *where, unsigned long *sync_error_bits)
{
int r;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 36fc6ae4737a..50a1259294d1 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
* Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved.
@@ -31,13 +32,15 @@ struct dm_file {
* poll will wait until the global event number is greater than
* this value.
*/
- volatile unsigned global_event_nr;
+ volatile unsigned int global_event_nr;
};
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* The ioctl interface needs to be able to look up devices by
* name or uuid.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct hash_cell {
struct rb_node name_node;
struct rb_node uuid_node;
@@ -51,10 +54,10 @@ struct hash_cell {
};
struct vers_iter {
- size_t param_size;
- struct dm_target_versions *vers, *old_vers;
- char *end;
- uint32_t flags;
+ size_t param_size;
+ struct dm_target_versions *vers, *old_vers;
+ char *end;
+ uint32_t flags;
};
@@ -78,16 +81,20 @@ static void dm_hash_exit(void)
dm_hash_remove_all(false, false, false);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Code for looking up a device by name
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static struct hash_cell *__get_name_cell(const char *str)
{
struct rb_node *n = name_rb_tree.rb_node;
while (n) {
struct hash_cell *hc = container_of(n, struct hash_cell, name_node);
- int c = strcmp(hc->name, str);
+ int c;
+
+ c = strcmp(hc->name, str);
if (!c) {
dm_get(hc->md);
return hc;
@@ -104,7 +111,9 @@ static struct hash_cell *__get_uuid_cell(const char *str)
while (n) {
struct hash_cell *hc = container_of(n, struct hash_cell, uuid_node);
- int c = strcmp(hc->uuid, str);
+ int c;
+
+ c = strcmp(hc->uuid, str);
if (!c) {
dm_get(hc->md);
return hc;
@@ -144,7 +153,9 @@ static void __link_name(struct hash_cell *new_hc)
while (*n) {
struct hash_cell *hc = container_of(*n, struct hash_cell, name_node);
- int c = strcmp(hc->name, new_hc->name);
+ int c;
+
+ c = strcmp(hc->name, new_hc->name);
BUG_ON(!c);
parent = *n;
n = c >= 0 ? &hc->name_node.rb_left : &hc->name_node.rb_right;
@@ -167,7 +178,9 @@ static void __link_uuid(struct hash_cell *new_hc)
while (*n) {
struct hash_cell *hc = container_of(*n, struct hash_cell, uuid_node);
- int c = strcmp(hc->uuid, new_hc->uuid);
+ int c;
+
+ c = strcmp(hc->uuid, new_hc->uuid);
BUG_ON(!c);
parent = *n;
n = c > 0 ? &hc->uuid_node.rb_left : &hc->uuid_node.rb_right;
@@ -195,9 +208,11 @@ static struct hash_cell *__get_dev_cell(uint64_t dev)
return hc;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Inserting, removing and renaming a device.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static struct hash_cell *alloc_cell(const char *name, const char *uuid,
struct mapped_device *md)
{
@@ -295,6 +310,8 @@ static struct dm_table *__hash_remove(struct hash_cell *hc)
struct dm_table *table;
int srcu_idx;
+ lockdep_assert_held(&_hash_lock);
+
/* remove from the dev trees */
__unlink_name(hc);
__unlink_uuid(hc);
@@ -413,7 +430,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
struct hash_cell *hc;
struct dm_table *table;
struct mapped_device *md;
- unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
+ unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
int srcu_idx;
/*
@@ -434,8 +451,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
hc = __get_name_cell(new);
if (hc) {
- DMERR("Unable to change %s on mapped device %s to one that "
- "already exists: %s",
+ DMERR("Unable to change %s on mapped device %s to one that already exists: %s",
change_uuid ? "uuid" : "name",
param->name, new);
dm_put(hc->md);
@@ -482,7 +498,7 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
dm_table_event(table);
dm_put_live_table(hc->md, srcu_idx);
- if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr))
+ if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr, false))
param->flags |= DM_UEVENT_GENERATED_FLAG;
md = hc->md;
@@ -500,9 +516,11 @@ void dm_deferred_remove(void)
dm_hash_remove_all(true, false, true);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Implementation of the ioctl commands
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
/*
* All the ioctl commands get dispatched to functions with this
* prototype.
@@ -612,6 +630,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
*/
for (n = rb_first(&name_rb_tree); n; n = rb_next(n)) {
void *uuid_ptr;
+
hc = container_of(n, struct hash_cell, name_node);
if (!filter_device(hc, param->name, param->uuid))
continue;
@@ -652,36 +671,34 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
static void list_version_get_needed(struct target_type *tt, void *needed_param)
{
- size_t *needed = needed_param;
+ size_t *needed = needed_param;
- *needed += sizeof(struct dm_target_versions);
- *needed += strlen(tt->name) + 1;
- *needed += ALIGN_MASK;
+ *needed += sizeof(struct dm_target_versions);
+ *needed += strlen(tt->name) + 1;
+ *needed += ALIGN_MASK;
}
static void list_version_get_info(struct target_type *tt, void *param)
{
- struct vers_iter *info = param;
+ struct vers_iter *info = param;
- /* Check space - it might have changed since the first iteration */
- if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 >
- info->end) {
+ /* Check space - it might have changed since the first iteration */
+ if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 > info->end) {
+ info->flags = DM_BUFFER_FULL_FLAG;
+ return;
+ }
- info->flags = DM_BUFFER_FULL_FLAG;
- return;
- }
+ if (info->old_vers)
+ info->old_vers->next = (uint32_t) ((void *)info->vers - (void *)info->old_vers);
- if (info->old_vers)
- info->old_vers->next = (uint32_t) ((void *)info->vers -
- (void *)info->old_vers);
- info->vers->version[0] = tt->version[0];
- info->vers->version[1] = tt->version[1];
- info->vers->version[2] = tt->version[2];
- info->vers->next = 0;
- strcpy(info->vers->name, tt->name);
+ info->vers->version[0] = tt->version[0];
+ info->vers->version[1] = tt->version[1];
+ info->vers->version[2] = tt->version[2];
+ info->vers->next = 0;
+ strcpy(info->vers->name, tt->name);
- info->old_vers = info->vers;
- info->vers = align_ptr((void *)(info->vers + 1) + strlen(tt->name) + 1);
+ info->old_vers = info->vers;
+ info->vers = align_ptr((void *)(info->vers + 1) + strlen(tt->name) + 1);
}
static int __list_versions(struct dm_ioctl *param, size_t param_size, const char *name)
@@ -772,7 +789,7 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *src
down_read(&_hash_lock);
hc = dm_get_mdptr(md);
- if (!hc || hc->md != md) {
+ if (!hc) {
DMERR("device has been removed from the dev hash table.");
goto out;
}
@@ -841,6 +858,7 @@ static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) {
int srcu_idx;
+
table = dm_get_inactive_table(md, &srcu_idx);
if (table) {
if (!(dm_table_get_mode(table) & FMODE_WRITE))
@@ -921,9 +939,9 @@ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
* Sneakily write in both the name and the uuid
* while we have the cell.
*/
- strlcpy(param->name, hc->name, sizeof(param->name));
+ strscpy(param->name, hc->name, sizeof(param->name));
if (hc->uuid)
- strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
+ strscpy(param->uuid, hc->uuid, sizeof(param->uuid));
else
param->uuid[0] = '\0';
@@ -995,7 +1013,7 @@ static int dev_remove(struct file *filp, struct dm_ioctl *param, size_t param_si
dm_ima_measure_on_device_remove(md, false);
- if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
+ if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr, false))
param->flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(md);
@@ -1021,7 +1039,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si
int r;
char *new_data = (char *) param + param->data_start;
struct mapped_device *md;
- unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
+ unsigned int change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
if (new_data < param->data ||
invalid_str(new_data, (void *) param + param_size) || !*new_data ||
@@ -1073,8 +1091,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa
goto out;
}
- if (indata[0] > 65535 || indata[1] > 255 ||
- indata[2] > 255 || indata[3] > ULONG_MAX) {
+ if (indata[0] > 65535 || indata[1] > 255 || indata[2] > 255) {
DMERR("Geometry exceeds range limits.");
goto out;
}
@@ -1096,7 +1113,7 @@ out:
static int do_suspend(struct dm_ioctl *param)
{
int r = 0;
- unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
+ unsigned int suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
struct mapped_device *md;
md = find_device(param);
@@ -1125,10 +1142,11 @@ out:
static int do_resume(struct dm_ioctl *param)
{
int r = 0;
- unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
+ unsigned int suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
struct hash_cell *hc;
struct mapped_device *md;
struct dm_table *new_map, *old_map = NULL;
+ bool need_resize_uevent = false;
down_write(&_hash_lock);
@@ -1149,6 +1167,8 @@ static int do_resume(struct dm_ioctl *param)
/* Do we need to load a new map ? */
if (new_map) {
+ sector_t old_size, new_size;
+
/* Suspend if it isn't already suspended */
if (param->flags & DM_SKIP_LOCKFS_FLAG)
suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
@@ -1157,6 +1177,7 @@ static int do_resume(struct dm_ioctl *param)
if (!dm_suspended_md(md))
dm_suspend(md, suspend_flags);
+ old_size = dm_get_size(md);
old_map = dm_swap_table(md, new_map);
if (IS_ERR(old_map)) {
dm_sync_table(md);
@@ -1164,6 +1185,9 @@ static int do_resume(struct dm_ioctl *param)
dm_put(md);
return PTR_ERR(old_map);
}
+ new_size = dm_get_size(md);
+ if (old_size && new_size && old_size != new_size)
+ need_resize_uevent = true;
if (dm_table_get_mode(new_map) & FMODE_WRITE)
set_disk_ro(dm_disk(md), 0);
@@ -1176,7 +1200,7 @@ static int do_resume(struct dm_ioctl *param)
if (!r) {
dm_ima_measure_on_device_resume(md, new_map ? true : false);
- if (!dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
+ if (!dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr, need_resize_uevent))
param->flags |= DM_UEVENT_GENERATED_FLAG;
}
}
@@ -1236,7 +1260,7 @@ static void retrieve_status(struct dm_table *table,
char *outbuf, *outptr;
status_type_t type;
size_t remaining, len, used = 0;
- unsigned status_flags = 0;
+ unsigned int status_flags = 0;
outptr = outbuf = get_result_buffer(param, param_size, &len);
@@ -1387,7 +1411,7 @@ static int populate_table(struct dm_table *table,
char *target_params;
if (!param->target_count) {
- DMERR("populate_table: no targets specified");
+ DMERR("%s: no targets specified", __func__);
return -EINVAL;
}
@@ -1476,7 +1500,7 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si
/* stage inactive table */
down_write(&_hash_lock);
hc = dm_get_mdptr(md);
- if (!hc || hc->md != md) {
+ if (!hc) {
DMERR("device has been removed from the dev hash table.");
up_write(&_hash_lock);
r = -ENXIO;
@@ -1564,7 +1588,7 @@ static void retrieve_deps(struct dm_table *table,
/*
* Count the devices.
*/
- list_for_each (tmp, dm_table_get_devices(table))
+ list_for_each(tmp, dm_table_get_devices(table))
count++;
/*
@@ -1581,7 +1605,7 @@ static void retrieve_deps(struct dm_table *table,
*/
deps->count = count;
count = 0;
- list_for_each_entry (dd, dm_table_get_devices(table), list)
+ list_for_each_entry(dd, dm_table_get_devices(table), list)
deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
param->data_size = param->data_start + needed;
@@ -1641,8 +1665,8 @@ static int table_status(struct file *filp, struct dm_ioctl *param, size_t param_
* Returns a number <= 1 if message was processed by device mapper.
* Returns 2 if message should be delivered to the target.
*/
-static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int message_for_md(struct mapped_device *md, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r;
@@ -1757,10 +1781,11 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
#define IOCTL_FLAGS_NO_PARAMS 1
#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
-/*-----------------------------------------------------------------
- * Implementation of open/close/ioctl on the special char
- * device.
- *---------------------------------------------------------------*/
+/*
+ *---------------------------------------------------------------
+ * Implementation of open/close/ioctl on the special char device.
+ *---------------------------------------------------------------
+ */
static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
{
static const struct {
@@ -1812,10 +1837,9 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
if (copy_from_user(version, user->version, sizeof(version)))
return -EFAULT;
- if ((DM_VERSION_MAJOR != version[0]) ||
- (DM_VERSION_MINOR < version[1])) {
- DMERR("ioctl interface mismatch: "
- "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
+ if ((version[0] != DM_VERSION_MAJOR) ||
+ (version[1] > DM_VERSION_MINOR)) {
+ DMERR("ioctl interface mismatch: kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
DM_VERSION_MAJOR, DM_VERSION_MINOR,
DM_VERSION_PATCHLEVEL,
version[0], version[1], version[2], cmd);
@@ -1852,7 +1876,7 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
struct dm_ioctl *dmi;
int secure_data;
const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
- unsigned noio_flag;
+ unsigned int noio_flag;
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
@@ -2079,9 +2103,9 @@ static const struct file_operations _ctl_fops = {
static struct miscdevice _dm_misc = {
.minor = MAPPER_CTRL_MINOR,
- .name = DM_NAME,
+ .name = DM_NAME,
.nodename = DM_DIR "/" DM_CONTROL_NODE,
- .fops = &_ctl_fops
+ .fops = &_ctl_fops
};
MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR);
@@ -2128,7 +2152,7 @@ int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
mutex_lock(&dm_hash_cells_mutex);
hc = dm_get_mdptr(md);
- if (!hc || hc->md != md) {
+ if (!hc) {
r = -ENXIO;
goto out;
}
@@ -2241,7 +2265,9 @@ int __init dm_early_create(struct dm_ioctl *dmi,
err_destroy_table:
dm_table_destroy(t);
err_hash_remove:
+ down_write(&_hash_lock);
(void) __hash_remove(__get_name_cell(dmi->name));
+ up_write(&_hash_lock);
/* release reference from __get_name_cell */
dm_put(md);
err_destroy_dm:
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 4d3bbbea2e9a..a158c6e5fbd7 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2002 Sistina Software (UK) Limited.
* Copyright (C) 2006 Red Hat GmbH
@@ -34,14 +35,14 @@
#define DEFAULT_SUB_JOB_SIZE_KB 512
#define MAX_SUB_JOB_SIZE_KB 1024
-static unsigned kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
+static unsigned int kcopyd_subjob_size_kb = DEFAULT_SUB_JOB_SIZE_KB;
-module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
+module_param(kcopyd_subjob_size_kb, uint, 0644);
MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
-static unsigned dm_get_kcopyd_subjob_size(void)
+static unsigned int dm_get_kcopyd_subjob_size(void)
{
- unsigned sub_job_size_kb;
+ unsigned int sub_job_size_kb;
sub_job_size_kb = __dm_get_module_param(&kcopyd_subjob_size_kb,
DEFAULT_SUB_JOB_SIZE_KB,
@@ -50,15 +51,17 @@ static unsigned dm_get_kcopyd_subjob_size(void)
return sub_job_size_kb << 1;
}
-/*-----------------------------------------------------------------
+/*
+ *----------------------------------------------------------------
* Each kcopyd client has its own little pool of preallocated
* pages for kcopyd io.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct dm_kcopyd_client {
struct page_list *pages;
- unsigned nr_reserved_pages;
- unsigned nr_free_pages;
- unsigned sub_job_size;
+ unsigned int nr_reserved_pages;
+ unsigned int nr_free_pages;
+ unsigned int sub_job_size;
struct dm_io_client *io_client;
@@ -109,7 +112,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
* The reason for this is unknown but possibly due to jiffies rounding errors
* or read/write cache inside the disk.
*/
-#define SLEEP_MSEC 100
+#define SLEEP_USEC 100000
/*
* Maximum number of sleep events. There is a theoretical livelock if more
@@ -119,7 +122,7 @@ static DEFINE_SPINLOCK(throttle_spinlock);
static void io_job_start(struct dm_kcopyd_throttle *t)
{
- unsigned throttle, now, difference;
+ unsigned int throttle, now, difference;
int slept = 0, skew;
if (unlikely(!t))
@@ -148,6 +151,7 @@ try_again:
if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
+
t->total_period >>= shift;
t->io_period >>= shift;
}
@@ -157,7 +161,7 @@ try_again:
if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
slept++;
spin_unlock_irq(&throttle_spinlock);
- msleep(SLEEP_MSEC);
+ fsleep(SLEEP_USEC);
goto try_again;
}
@@ -182,7 +186,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t)
goto skip_limit;
if (!t->num_io_jobs) {
- unsigned now, difference;
+ unsigned int now, difference;
now = jiffies;
difference = now - t->last_jiffies;
@@ -303,9 +307,9 @@ static void drop_pages(struct page_list *pl)
/*
* Allocate and reserve nr_pages for the use of a specific client.
*/
-static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned nr_pages)
+static int client_reserve_pages(struct dm_kcopyd_client *kc, unsigned int nr_pages)
{
- unsigned i;
+ unsigned int i;
struct page_list *pl = NULL, *next;
for (i = 0; i < nr_pages; i++) {
@@ -333,15 +337,17 @@ static void client_free_pages(struct dm_kcopyd_client *kc)
kc->nr_free_pages = kc->nr_reserved_pages = 0;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* kcopyd_jobs need to be allocated by the *clients* of kcopyd,
* for this reason we use a mempool to prevent the client from
* ever having to do io (which could cause a deadlock).
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct kcopyd_job {
struct dm_kcopyd_client *kc;
struct list_head list;
- unsigned flags;
+ unsigned int flags;
/*
* Error state of the job.
@@ -582,7 +588,7 @@ static int run_io_job(struct kcopyd_job *job)
static int run_pages_job(struct kcopyd_job *job)
{
int r;
- unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
+ unsigned int nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
if (!r) {
@@ -603,7 +609,7 @@ static int run_pages_job(struct kcopyd_job *job)
* of successful jobs.
*/
static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
- int (*fn) (struct kcopyd_job *))
+ int (*fn)(struct kcopyd_job *))
{
struct kcopyd_job *job;
int r, count = 0;
@@ -673,6 +679,7 @@ static void do_work(struct work_struct *work)
static void dispatch_job(struct kcopyd_job *job)
{
struct dm_kcopyd_client *kc = job->kc;
+
atomic_inc(&kc->nr_jobs);
if (unlikely(!job->source.count))
push(&kc->callback_jobs, job);
@@ -819,7 +826,7 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
job->pages = NULL;
job->op = REQ_OP_READ;
} else {
- memset(&job->source, 0, sizeof job->source);
+ memset(&job->source, 0, sizeof(job->source));
job->source.count = job->dests[0].count;
job->pages = &zero_page_list;
@@ -849,8 +856,8 @@ void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
EXPORT_SYMBOL(dm_kcopyd_copy);
void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context)
+ unsigned int num_dests, struct dm_io_region *dests,
+ unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
{
dm_kcopyd_copy(kc, NULL, num_dests, dests, flags, fn, context);
}
@@ -900,13 +907,15 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
}
#endif /* 0 */
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Client setup
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
int r;
- unsigned reserve_pages;
+ unsigned int reserve_pages;
struct dm_kcopyd_client *kc;
kc = kzalloc(sizeof(*kc), GFP_KERNEL);
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 3212ef6aa81b..3e622dcc9dbd 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2003 Sistina Software (UK) Limited.
*
@@ -64,7 +65,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = lc;
return 0;
- bad:
+bad:
kfree(lc);
return ret;
}
@@ -95,7 +96,7 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
}
static void linear_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct linear_c *lc = (struct linear_c *) ti->private;
size_t sz = 0;
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c
index 9ab93ebea889..5aace6ee6d47 100644
--- a/drivers/md/dm-log-userspace-base.c
+++ b/drivers/md/dm-log-userspace-base.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2009 Red Hat, Inc.
*
@@ -123,7 +124,7 @@ retry:
}
static int build_constructor_string(struct dm_target *ti,
- unsigned argc, char **argv,
+ unsigned int argc, char **argv,
char **ctr_str)
{
int i, str_size;
@@ -188,7 +189,7 @@ static void do_flush(struct work_struct *work)
* to the userspace ctr function.
*/
static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti,
- unsigned argc, char **argv)
+ unsigned int argc, char **argv)
{
int r = 0;
int str_size;
@@ -345,8 +346,6 @@ static void userspace_dtr(struct dm_dirty_log *log)
kfree(lc->usr_argv_str);
kfree(lc);
-
- return;
}
static int userspace_presuspend(struct dm_dirty_log *log)
@@ -660,8 +659,6 @@ static void userspace_mark_region(struct dm_dirty_log *log, region_t region)
fe->region = region;
list_add(&fe->list, &lc->mark_list);
spin_unlock_irqrestore(&lc->flush_lock, flags);
-
- return;
}
/*
@@ -697,8 +694,6 @@ static void userspace_clear_region(struct dm_dirty_log *log, region_t region)
fe->region = region;
list_add(&fe->list, &lc->clear_list);
spin_unlock_irqrestore(&lc->flush_lock, flags);
-
- return;
}
/*
@@ -755,7 +750,6 @@ static void userspace_set_region_sync(struct dm_dirty_log *log,
* It would be nice to be able to report failures.
* However, it is easy enough to detect and resolve.
*/
- return;
}
/*
@@ -792,7 +786,7 @@ static region_t userspace_get_sync_count(struct dm_dirty_log *log)
* Returns: amount of space consumed
*/
static int userspace_status(struct dm_dirty_log *log, status_type_t status_type,
- char *result, unsigned maxlen)
+ char *result, unsigned int maxlen)
{
int r = 0;
char *table_args;
@@ -926,7 +920,6 @@ static void __exit userspace_dirty_log_exit(void)
kmem_cache_destroy(_flush_entry_cache);
DMINFO("version " DM_LOG_USERSPACE_VSN " unloaded");
- return;
}
module_init(userspace_dirty_log_init);
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c
index fdf8ec304f8d..f125b0f553fa 100644
--- a/drivers/md/dm-log-userspace-transfer.c
+++ b/drivers/md/dm-log-userspace-transfer.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2006-2009 Red Hat, Inc.
*
@@ -108,9 +109,8 @@ static int fill_pkg(struct cn_msg *msg, struct dm_ulog_request *tfr)
if (pkg->error != -EAGAIN)
*(pkg->data_size) = 0;
} else if (tfr->data_size > *(pkg->data_size)) {
- DMERR("Insufficient space to receive package [%u] "
- "(%u vs %zu)", tfr->request_type,
- tfr->data_size, *(pkg->data_size));
+ DMERR("Insufficient space to receive package [%u] (%u vs %zu)",
+ tfr->request_type, tfr->data_size, *(pkg->data_size));
*(pkg->data_size) = 0;
pkg->error = -ENOSPC;
@@ -142,7 +142,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
fill_pkg(msg, NULL);
else if (msg->len < sizeof(*tfr))
DMERR("Incomplete message received (expected %u, got %u): [%u]",
- (unsigned)sizeof(*tfr), msg->len, msg->seq);
+ (unsigned int)sizeof(*tfr), msg->len, msg->seq);
else
fill_pkg(NULL, tfr);
spin_unlock(&receiving_list_lock);
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h
index 04ee874f9153..f2f970af906f 100644
--- a/drivers/md/dm-log-userspace-transfer.h
+++ b/drivers/md/dm-log-userspace-transfer.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2006-2009 Red Hat, Inc.
*
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 178e13a5b059..cbd0f81f4a35 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Facebook. All rights reserved.
*
@@ -231,13 +232,13 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
goto error;
}
- ptr = kmap_atomic(page);
+ ptr = kmap_local_page(page);
memcpy(ptr, entry, entrylen);
if (datalen)
memcpy(ptr + entrylen, data, datalen);
memset(ptr + entrylen + datalen, 0,
lc->sectorsize - entrylen - datalen);
- kunmap_atomic(ptr);
+ kunmap_local(ptr);
ret = bio_add_page(bio, page, lc->sectorsize, 0);
if (ret != lc->sectorsize) {
@@ -286,11 +287,11 @@ static int write_inline_data(struct log_writes_c *lc, void *entry,
goto error_bio;
}
- ptr = kmap_atomic(page);
+ ptr = kmap_local_page(page);
memcpy(ptr, data, pg_datalen);
if (pg_sectorlen > pg_datalen)
memset(ptr + pg_datalen, 0, pg_sectorlen - pg_datalen);
- kunmap_atomic(ptr);
+ kunmap_local(ptr);
ret = bio_add_page(bio, page, pg_sectorlen, 0);
if (ret != pg_sectorlen) {
@@ -742,9 +743,9 @@ static int log_writes_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_KILL;
}
- dst = kmap_atomic(page);
+ dst = kmap_local_page(page);
memcpy_from_bvec(dst, &bv);
- kunmap_atomic(dst);
+ kunmap_local(dst);
block->vecs[i].bv_page = page;
block->vecs[i].bv_len = bv.bv_len;
block->vec_cnt++;
@@ -792,10 +793,10 @@ static int normal_end_io(struct dm_target *ti, struct bio *bio,
* INFO format: <logged entries> <highest allocated sector>
*/
static void log_writes_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result,
- unsigned maxlen)
+ unsigned int status_flags, char *result,
+ unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct log_writes_c *lc = ti->private;
switch (type) {
@@ -844,8 +845,8 @@ static int log_writes_iterate_devices(struct dm_target *ti,
* Messages supported:
* mark <mark data> - specify the marked data.
*/
-static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int log_writes_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r = -EINVAL;
struct log_writes_c *lc = ti->private;
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index cf10fa667797..afd94d2e7295 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -182,10 +183,12 @@ void dm_dirty_log_destroy(struct dm_dirty_log *log)
}
EXPORT_SYMBOL(dm_dirty_log_destroy);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Persistent and core logs share a lot of their implementation.
* FIXME: need a reload method to be called from a resume
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
/*
* Magic for persistent mirrors: "MiRr"
*/
@@ -223,7 +226,7 @@ struct log_c {
unsigned int region_count;
region_t sync_count;
- unsigned bitset_uint32_count;
+ unsigned int bitset_uint32_count;
uint32_t *clean_bits;
uint32_t *sync_bits;
uint32_t *recovering_bits; /* FIXME: this seems excessive */
@@ -255,28 +258,30 @@ struct log_c {
* The touched member needs to be updated every time we access
* one of the bitsets.
*/
-static inline int log_test_bit(uint32_t *bs, unsigned bit)
+static inline int log_test_bit(uint32_t *bs, unsigned int bit)
{
return test_bit_le(bit, bs) ? 1 : 0;
}
static inline void log_set_bit(struct log_c *l,
- uint32_t *bs, unsigned bit)
+ uint32_t *bs, unsigned int bit)
{
__set_bit_le(bit, bs);
l->touched_cleaned = 1;
}
static inline void log_clear_bit(struct log_c *l,
- uint32_t *bs, unsigned bit)
+ uint32_t *bs, unsigned int bit)
{
__clear_bit_le(bit, bs);
l->touched_dirtied = 1;
}
-/*----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Header IO
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void header_to_disk(struct log_header_core *core, struct log_header_disk *disk)
{
disk->magic = cpu_to_le32(core->magic);
@@ -352,11 +357,13 @@ static int _check_region_size(struct dm_target *ti, uint32_t region_size)
return 1;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* core log constructor/destructor
*
* argv contains region_size followed optionally by [no]sync
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
#define BYTE_SHIFT 3
static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
unsigned int argc, char **argv,
@@ -382,8 +389,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
else if (!strcmp(argv[1], "nosync"))
sync = NOSYNC;
else {
- DMWARN("unrecognised sync argument to "
- "dirty region log: %s", argv[1]);
+ DMWARN("unrecognised sync argument to dirty region log: %s", argv[1]);
return -EINVAL;
}
}
@@ -441,8 +447,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
*/
buf_size =
dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + bitset_size,
- bdev_logical_block_size(lc->header_location.
- bdev));
+ bdev_logical_block_size(lc->header_location.bdev));
if (buf_size > bdev_nr_bytes(dev->bdev)) {
DMWARN("log device %s too small: need %llu bytes",
@@ -531,11 +536,13 @@ static void core_dtr(struct dm_dirty_log *log)
destroy_log_context(lc);
}
-/*----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------------
* disk log constructor/destructor
*
* argv contains log_device region_size followed optionally by [no]sync
- *--------------------------------------------------------------*/
+ *---------------------------------------------------------------------
+ */
static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
unsigned int argc, char **argv)
{
@@ -582,7 +589,7 @@ static void fail_log_device(struct log_c *lc)
static int disk_resume(struct dm_dirty_log *log)
{
int r;
- unsigned i;
+ unsigned int i;
struct log_c *lc = (struct log_c *) log->context;
size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
@@ -646,12 +653,14 @@ static int disk_resume(struct dm_dirty_log *log)
static uint32_t core_get_region_size(struct dm_dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
+
return lc->region_size;
}
static int core_resume(struct dm_dirty_log *log)
{
struct log_c *lc = (struct log_c *) log->context;
+
lc->sync_search = 0;
return 0;
}
@@ -659,12 +668,14 @@ static int core_resume(struct dm_dirty_log *log)
static int core_is_clean(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
+
return log_test_bit(lc->clean_bits, region);
}
static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
{
struct log_c *lc = (struct log_c *) log->context;
+
return log_test_bit(lc->sync_bits, region);
}
@@ -717,12 +728,14 @@ static int disk_flush(struct dm_dirty_log *log)
static void core_mark_region(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
+
log_clear_bit(lc, lc->clean_bits, region);
}
static void core_clear_region(struct dm_dirty_log *log, region_t region)
{
struct log_c *lc = (struct log_c *) log->context;
+
if (likely(!lc->flush_failed))
log_set_bit(lc, lc->clean_bits, region);
}
@@ -757,8 +770,8 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
log_clear_bit(lc, lc->recovering_bits, region);
if (in_sync) {
log_set_bit(lc, lc->sync_bits, region);
- lc->sync_count++;
- } else if (log_test_bit(lc->sync_bits, region)) {
+ lc->sync_count++;
+ } else if (log_test_bit(lc->sync_bits, region)) {
lc->sync_count--;
log_clear_bit(lc, lc->sync_bits, region);
}
@@ -766,14 +779,16 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
static region_t core_get_sync_count(struct dm_dirty_log *log)
{
- struct log_c *lc = (struct log_c *) log->context;
+ struct log_c *lc = (struct log_c *) log->context;
- return lc->sync_count;
+ return lc->sync_count;
}
#define DMEMIT_SYNC \
- if (lc->sync != DEFAULTSYNC) \
- DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
+ do { \
+ if (lc->sync != DEFAULTSYNC) \
+ DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : ""); \
+ } while (0)
static int core_status(struct dm_dirty_log *log, status_type_t status,
char *result, unsigned int maxlen)
@@ -781,7 +796,7 @@ static int core_status(struct dm_dirty_log *log, status_type_t status,
int sz = 0;
struct log_c *lc = log->context;
- switch(status) {
+ switch (status) {
case STATUSTYPE_INFO:
DMEMIT("1 %s", log->type->name);
break;
@@ -806,7 +821,7 @@ static int disk_status(struct dm_dirty_log *log, status_type_t status,
int sz = 0;
struct log_c *lc = log->context;
- switch(status) {
+ switch (status) {
case STATUSTYPE_INFO:
DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
lc->log_dev_flush_failed ? 'F' :
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 0e325469a252..61ab1a8d2c9c 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
@@ -27,9 +28,11 @@
#include <linux/atomic.h>
#include <linux/blk-mq.h>
+static struct workqueue_struct *dm_mpath_wq;
+
#define DM_MSG_PREFIX "multipath"
#define DM_PG_INIT_DELAY_MSECS 2000
-#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
+#define DM_PG_INIT_DELAY_DEFAULT ((unsigned int) -1)
#define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
@@ -39,7 +42,7 @@ struct pgpath {
struct list_head list;
struct priority_group *pg; /* Owning PG */
- unsigned fail_count; /* Cumulative failure count */
+ unsigned int fail_count; /* Cumulative failure count */
struct dm_path path;
struct delayed_work activate_path;
@@ -59,8 +62,8 @@ struct priority_group {
struct multipath *m; /* Owning multipath instance */
struct path_selector ps;
- unsigned pg_num; /* Reference number */
- unsigned nr_pgpaths; /* Number of paths in PG */
+ unsigned int pg_num; /* Reference number */
+ unsigned int nr_pgpaths; /* Number of paths in PG */
struct list_head pgpaths;
bool bypassed:1; /* Temporarily bypass this PG? */
@@ -78,14 +81,14 @@ struct multipath {
struct priority_group *next_pg; /* Switch to this PG if set */
atomic_t nr_valid_paths; /* Total number of usable paths */
- unsigned nr_priority_groups;
+ unsigned int nr_priority_groups;
struct list_head priority_groups;
const char *hw_handler_name;
char *hw_handler_params;
wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
- unsigned pg_init_retries; /* Number of times to retry pg_init */
- unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
+ unsigned int pg_init_retries; /* Number of times to retry pg_init */
+ unsigned int pg_init_delay_msecs; /* Number of msecs before pg_init retry */
atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
atomic_t pg_init_count; /* Number of times pg_init called */
@@ -117,10 +120,11 @@ static void activate_path_work(struct work_struct *work);
static void process_queued_bios(struct work_struct *work);
static void queue_if_no_path_timeout_work(struct timer_list *t);
-/*-----------------------------------------------
+/*
+ *-----------------------------------------------
* Multipath state flags.
- *-----------------------------------------------*/
-
+ *-----------------------------------------------
+ */
#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
@@ -135,6 +139,7 @@ static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
if (r) {
unsigned long flags;
+
spin_lock_irqsave(&m->lock, flags);
r = test_bit(MPATHF_bit, &m->flags);
spin_unlock_irqrestore(&m->lock, flags);
@@ -143,10 +148,11 @@ static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m)
return r;
}
-/*-----------------------------------------------
+/*
+ *-----------------------------------------------
* Allocation routines
- *-----------------------------------------------*/
-
+ *-----------------------------------------------
+ */
static struct pgpath *alloc_pgpath(void)
{
struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
@@ -302,10 +308,11 @@ static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mp
dm_bio_record(bio_details, bio);
}
-/*-----------------------------------------------
+/*
+ *-----------------------------------------------
* Path selection
- *-----------------------------------------------*/
-
+ *-----------------------------------------------
+ */
static int __pg_init_all_paths(struct multipath *m)
{
struct pgpath *pgpath;
@@ -397,7 +404,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
unsigned long flags;
struct priority_group *pg;
struct pgpath *pgpath;
- unsigned bypassed = 1;
+ unsigned int bypassed = 1;
if (!atomic_read(&m->nr_valid_paths)) {
spin_lock_irqsave(&m->lock, flags);
@@ -467,13 +474,11 @@ failed:
* it has been invoked.
*/
#define dm_report_EIO(m) \
-do { \
DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
dm_table_device_name((m)->ti->table), \
test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
- dm_noflush_suspending((m)->ti)); \
-} while (0)
+ dm_noflush_suspending((m)->ti))
/*
* Check whether bios must be queued in the device-mapper core rather
@@ -707,6 +712,7 @@ static void process_queued_bios(struct work_struct *work)
blk_start_plug(&plug);
while ((bio = bio_list_pop(&bios))) {
struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
+
dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
r = __multipath_map_bio(m, bio, mpio);
switch (r) {
@@ -733,15 +739,15 @@ static void process_queued_bios(struct work_struct *work)
/*
* If we run out of usable paths, should we queue I/O or error it?
*/
-static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
+static int queue_if_no_path(struct multipath *m, bool f_queue_if_no_path,
bool save_old_value, const char *caller)
{
unsigned long flags;
bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
const char *dm_dev_name = dm_table_device_name(m->ti->table);
- DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
- dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
+ DMDEBUG("%s: %s caller=%s f_queue_if_no_path=%d save_old_value=%d",
+ dm_dev_name, __func__, caller, f_queue_if_no_path, save_old_value);
spin_lock_irqsave(&m->lock, flags);
@@ -754,11 +760,11 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
dm_dev_name);
} else
assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
- } else if (!queue_if_no_path && saved_queue_if_no_path_bit) {
+ } else if (!f_queue_if_no_path && saved_queue_if_no_path_bit) {
/* due to "fail_if_no_path" message, need to honor it. */
clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
}
- assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
+ assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, f_queue_if_no_path);
DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
dm_dev_name, __func__,
@@ -768,7 +774,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
spin_unlock_irqrestore(&m->lock, flags);
- if (!queue_if_no_path) {
+ if (!f_queue_if_no_path) {
dm_table_run_md_queue_async(m->ti->table);
process_queued_io_list(m);
}
@@ -825,7 +831,8 @@ static void trigger_event(struct work_struct *work)
dm_table_event(m->ti->table);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Constructor/argument parsing:
* <#multipath feature args> [<arg>]*
* <#hw_handler args> [hw_handler [<arg>]*]
@@ -834,13 +841,14 @@ static void trigger_event(struct work_struct *work)
* [<selector> <#selector args> [<arg>]*
* <#paths> <#per-path selector args>
* [<path> [<arg>]* ]+ ]+
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
struct dm_target *ti)
{
int r;
struct path_selector_type *pst;
- unsigned ps_argc;
+ unsigned int ps_argc;
static const struct dm_arg _args[] = {
{0, 1024, "invalid number of path selector args"},
@@ -983,7 +991,7 @@ static struct priority_group *parse_priority_group(struct dm_arg_set *as,
};
int r;
- unsigned i, nr_selector_args, nr_args;
+ unsigned int i, nr_selector_args, nr_args;
struct priority_group *pg;
struct dm_target *ti = m->ti;
@@ -1049,7 +1057,7 @@ static struct priority_group *parse_priority_group(struct dm_arg_set *as,
static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
{
- unsigned hw_argc;
+ unsigned int hw_argc;
int ret;
struct dm_target *ti = m->ti;
@@ -1086,7 +1094,7 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
goto fail;
}
j = sprintf(p, "%d", hw_argc - 1);
- for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
+ for (i = 0, p += j + 1; i <= hw_argc - 2; i++, p += j + 1)
j = sprintf(p, "%s", as->argv[i]);
}
dm_consume_args(as, hw_argc - 1);
@@ -1101,7 +1109,7 @@ fail:
static int parse_features(struct dm_arg_set *as, struct multipath *m)
{
int r;
- unsigned argc;
+ unsigned int argc;
struct dm_target *ti = m->ti;
const char *arg_name;
@@ -1170,7 +1178,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
return r;
}
-static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int multipath_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
/* target arguments */
static const struct dm_arg _args[] = {
@@ -1181,8 +1189,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
int r;
struct multipath *m;
struct dm_arg_set as;
- unsigned pg_count = 0;
- unsigned next_pg_num;
+ unsigned int pg_count = 0;
+ unsigned int next_pg_num;
unsigned long flags;
as.argc = argc;
@@ -1224,7 +1232,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
/* parse the priority groups */
while (as.argc) {
struct priority_group *pg;
- unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
+ unsigned int nr_valid_paths = atomic_read(&m->nr_valid_paths);
pg = parse_priority_group(&as, m);
if (IS_ERR(pg)) {
@@ -1347,7 +1355,7 @@ static int fail_path(struct pgpath *pgpath)
dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
- schedule_work(&m->trigger_event);
+ queue_work(dm_mpath_wq, &m->trigger_event);
enable_nopath_timeout(m);
@@ -1365,7 +1373,7 @@ static int reinstate_path(struct pgpath *pgpath)
int r = 0, run_queue = 0;
unsigned long flags;
struct multipath *m = pgpath->pg->m;
- unsigned nr_valid_paths;
+ unsigned int nr_valid_paths;
spin_lock_irqsave(&m->lock, flags);
@@ -1454,13 +1462,13 @@ static void bypass_pg(struct multipath *m, struct priority_group *pg,
static int switch_pg_num(struct multipath *m, const char *pgstr)
{
struct priority_group *pg;
- unsigned pgnum;
+ unsigned int pgnum;
unsigned long flags;
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
!m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
- DMWARN("invalid PG number supplied to switch_pg_num");
+ DMWARN("invalid PG number supplied to %s", __func__);
return -EINVAL;
}
@@ -1487,7 +1495,7 @@ static int switch_pg_num(struct multipath *m, const char *pgstr)
static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
{
struct priority_group *pg;
- unsigned pgnum;
+ unsigned int pgnum;
char dummy;
if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
@@ -1789,14 +1797,14 @@ static void multipath_resume(struct dm_target *ti)
* num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
*/
static void multipath_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
int sz = 0, pg_counter, pgpath_counter;
unsigned long flags;
struct multipath *m = ti->private;
struct priority_group *pg;
struct pgpath *p;
- unsigned pg_num;
+ unsigned int pg_num;
char state;
spin_lock_irqsave(&m->lock, flags);
@@ -1821,7 +1829,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
DMEMIT("retain_attached_hw_handler ");
if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
- switch(m->queue_mode) {
+ switch (m->queue_mode) {
case DM_TYPE_BIO_BASED:
DMEMIT("queue_mode bio ");
break;
@@ -1948,8 +1956,8 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
spin_unlock_irqrestore(&m->lock, flags);
}
-static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int multipath_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r = -EINVAL;
struct dm_dev *dev;
@@ -2116,6 +2124,7 @@ static int multipath_busy(struct dm_target *ti)
/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
if (!atomic_read(&m->nr_valid_paths)) {
unsigned long flags;
+
spin_lock_irqsave(&m->lock, flags);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
spin_unlock_irqrestore(&m->lock, flags);
@@ -2168,9 +2177,11 @@ static int multipath_busy(struct dm_target *ti)
return busy;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Module setup
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static struct target_type multipath_target = {
.name = "multipath",
.version = {1, 14, 0},
@@ -2196,12 +2207,11 @@ static struct target_type multipath_target = {
static int __init dm_multipath_init(void)
{
- int r;
+ int r = -ENOMEM;
kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
if (!kmultipathd) {
DMERR("failed to create workqueue kmpathd");
- r = -ENOMEM;
goto bad_alloc_kmultipathd;
}
@@ -2215,10 +2225,15 @@ static int __init dm_multipath_init(void)
WQ_MEM_RECLAIM);
if (!kmpath_handlerd) {
DMERR("failed to create workqueue kmpath_handlerd");
- r = -ENOMEM;
goto bad_alloc_kmpath_handlerd;
}
+ dm_mpath_wq = alloc_workqueue("dm_mpath_wq", 0, 0);
+ if (!dm_mpath_wq) {
+ DMERR("failed to create workqueue dm_mpath_wq");
+ goto bad_alloc_dm_mpath_wq;
+ }
+
r = dm_register_target(&multipath_target);
if (r < 0) {
DMERR("request-based register failed %d", r);
@@ -2229,6 +2244,8 @@ static int __init dm_multipath_init(void)
return 0;
bad_register_target:
+ destroy_workqueue(dm_mpath_wq);
+bad_alloc_dm_mpath_wq:
destroy_workqueue(kmpath_handlerd);
bad_alloc_kmpath_handlerd:
destroy_workqueue(kmultipathd);
@@ -2238,6 +2255,7 @@ bad_alloc_kmultipathd:
static void __exit dm_multipath_exit(void)
{
+ destroy_workqueue(dm_mpath_wq);
destroy_workqueue(kmpath_handlerd);
destroy_workqueue(kmultipathd);
@@ -2247,8 +2265,7 @@ static void __exit dm_multipath_exit(void)
module_init(dm_multipath_init);
module_exit(dm_multipath_exit);
-module_param_named(queue_if_no_path_timeout_secs,
- queue_if_no_path_timeout_secs, ulong, S_IRUGO | S_IWUSR);
+module_param_named(queue_if_no_path_timeout_secs, queue_if_no_path_timeout_secs, ulong, 0644);
MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
MODULE_DESCRIPTION(DM_NAME " multipath target");
diff --git a/drivers/md/dm-mpath.h b/drivers/md/dm-mpath.h
index e230f7196259..0e168e0c82dd 100644
--- a/drivers/md/dm-mpath.h
+++ b/drivers/md/dm-mpath.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
*
@@ -17,6 +18,6 @@ struct dm_path {
};
/* Callback for hwh_pg_init_fn to use when complete */
-void dm_pg_init_complete(struct dm_path *path, unsigned err_flags);
+void dm_pg_init_complete(struct dm_path *path, unsigned int err_flags);
#endif
diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c
index fa0ccc585cb4..3e4cb81ce512 100644
--- a/drivers/md/dm-path-selector.c
+++ b/drivers/md/dm-path-selector.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software.
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
@@ -114,6 +115,7 @@ int dm_register_path_selector(struct path_selector_type *pst)
return r;
}
+EXPORT_SYMBOL_GPL(dm_register_path_selector);
int dm_unregister_path_selector(struct path_selector_type *pst)
{
@@ -135,6 +137,4 @@ int dm_unregister_path_selector(struct path_selector_type *pst)
return 0;
}
-
-EXPORT_SYMBOL_GPL(dm_register_path_selector);
EXPORT_SYMBOL_GPL(dm_unregister_path_selector);
diff --git a/drivers/md/dm-path-selector.h b/drivers/md/dm-path-selector.h
index 83cac2b04b66..3861b2d8b963 100644
--- a/drivers/md/dm-path-selector.h
+++ b/drivers/md/dm-path-selector.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2003 Sistina Software.
* Copyright (C) 2004 Red Hat, Inc. All rights reserved.
@@ -52,44 +53,43 @@ struct path_selector_type {
/*
* Constructs a path selector object, takes custom arguments
*/
- int (*create) (struct path_selector *ps, unsigned argc, char **argv);
- void (*destroy) (struct path_selector *ps);
+ int (*create)(struct path_selector *ps, unsigned int argc, char **argv);
+ void (*destroy)(struct path_selector *ps);
/*
* Add an opaque path object, along with some selector specific
* path args (eg, path priority).
*/
- int (*add_path) (struct path_selector *ps, struct dm_path *path,
- int argc, char **argv, char **error);
+ int (*add_path)(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error);
/*
* Chooses a path for this io, if no paths are available then
* NULL will be returned.
*/
- struct dm_path *(*select_path) (struct path_selector *ps,
- size_t nr_bytes);
+ struct dm_path *(*select_path)(struct path_selector *ps, size_t nr_bytes);
/*
* Notify the selector that a path has failed.
*/
- void (*fail_path) (struct path_selector *ps, struct dm_path *p);
+ void (*fail_path)(struct path_selector *ps, struct dm_path *p);
/*
* Ask selector to reinstate a path.
*/
- int (*reinstate_path) (struct path_selector *ps, struct dm_path *p);
+ int (*reinstate_path)(struct path_selector *ps, struct dm_path *p);
/*
* Table content based on parameters added in ps_add_path_fn
* or path selector status
*/
- int (*status) (struct path_selector *ps, struct dm_path *path,
- status_type_t type, char *result, unsigned int maxlen);
+ int (*status)(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned int maxlen);
- int (*start_io) (struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes);
- int (*end_io) (struct path_selector *ps, struct dm_path *path,
- size_t nr_bytes, u64 start_time);
+ int (*start_io)(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes);
+ int (*end_io)(struct path_selector *ps, struct dm_path *path,
+ size_t nr_bytes, u64 start_time);
};
/* Register a path selector */
diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index 1d82c95d323d..b49e10d76d03 100644
--- a/drivers/md/dm-ps-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Historical Service Time
*
diff --git a/drivers/md/dm-ps-io-affinity.c b/drivers/md/dm-ps-io-affinity.c
index f74501e65a8e..461ee6b2044d 100644
--- a/drivers/md/dm-ps-io-affinity.c
+++ b/drivers/md/dm-ps-io-affinity.c
@@ -108,7 +108,7 @@ free_pi:
return ret;
}
-static int ioa_create(struct path_selector *ps, unsigned argc, char **argv)
+static int ioa_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s;
@@ -138,7 +138,7 @@ free_selector:
static void ioa_destroy(struct path_selector *ps)
{
struct selector *s = ps->context;
- unsigned cpu;
+ unsigned int cpu;
for_each_cpu(cpu, s->path_mask)
ioa_free_path(s, cpu);
@@ -162,7 +162,7 @@ static int ioa_status(struct path_selector *ps, struct dm_path *path,
return sz;
}
- switch(type) {
+ switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%d ", atomic_read(&s->map_misses));
break;
diff --git a/drivers/md/dm-ps-queue-length.c b/drivers/md/dm-ps-queue-length.c
index cef70657bbbc..e305f05ad1e5 100644
--- a/drivers/md/dm-ps-queue-length.c
+++ b/drivers/md/dm-ps-queue-length.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2004-2005 IBM Corp. All Rights Reserved.
* Copyright (C) 2006-2009 NEC Corporation.
@@ -35,7 +36,7 @@ struct selector {
struct path_info {
struct list_head list;
struct dm_path *path;
- unsigned repeat_count;
+ unsigned int repeat_count;
atomic_t qlen; /* the number of in-flight I/Os */
};
@@ -52,7 +53,7 @@ static struct selector *alloc_selector(void)
return s;
}
-static int ql_create(struct path_selector *ps, unsigned argc, char **argv)
+static int ql_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s = alloc_selector();
@@ -84,9 +85,9 @@ static void ql_destroy(struct path_selector *ps)
}
static int ql_status(struct path_selector *ps, struct dm_path *path,
- status_type_t type, char *result, unsigned maxlen)
+ status_type_t type, char *result, unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct path_info *pi;
/* When called with NULL path, return selector status/args. */
@@ -116,14 +117,14 @@ static int ql_add_path(struct path_selector *ps, struct dm_path *path,
{
struct selector *s = ps->context;
struct path_info *pi;
- unsigned repeat_count = QL_MIN_IO;
+ unsigned int repeat_count = QL_MIN_IO;
char dummy;
unsigned long flags;
/*
* Arguments: [<repeat_count>]
- * <repeat_count>: The number of I/Os before switching path.
- * If not given, default (QL_MIN_IO) is used.
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (QL_MIN_IO) is used.
*/
if (argc > 1) {
*error = "queue-length ps: incorrect number of arguments";
diff --git a/drivers/md/dm-ps-round-robin.c b/drivers/md/dm-ps-round-robin.c
index 27f44c5fa04e..0f04b673597a 100644
--- a/drivers/md/dm-ps-round-robin.c
+++ b/drivers/md/dm-ps-round-robin.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software.
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
@@ -20,13 +21,15 @@
#define RR_MIN_IO 1
#define RR_VERSION "1.2.0"
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Path-handling code, paths are held in lists
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct path_info {
struct list_head list;
struct dm_path *path;
- unsigned repeat_count;
+ unsigned int repeat_count;
};
static void free_paths(struct list_head *paths)
@@ -39,10 +42,11 @@ static void free_paths(struct list_head *paths)
}
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Round-robin selector
- *---------------------------------------------------------------*/
-
+ *---------------------------------------------------------------
+ */
struct selector {
struct list_head valid_paths;
struct list_head invalid_paths;
@@ -62,7 +66,7 @@ static struct selector *alloc_selector(void)
return s;
}
-static int rr_create(struct path_selector *ps, unsigned argc, char **argv)
+static int rr_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s;
@@ -93,7 +97,7 @@ static int rr_status(struct path_selector *ps, struct dm_path *path,
if (!path)
DMEMIT("0 ");
else {
- switch(type) {
+ switch (type) {
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
@@ -119,7 +123,7 @@ static int rr_add_path(struct path_selector *ps, struct dm_path *path,
{
struct selector *s = ps->context;
struct path_info *pi;
- unsigned repeat_count = RR_MIN_IO;
+ unsigned int repeat_count = RR_MIN_IO;
char dummy;
unsigned long flags;
diff --git a/drivers/md/dm-ps-service-time.c b/drivers/md/dm-ps-service-time.c
index 3ec9c33265c5..969d31c40272 100644
--- a/drivers/md/dm-ps-service-time.c
+++ b/drivers/md/dm-ps-service-time.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2007-2009 NEC Corporation. All Rights Reserved.
*
@@ -30,8 +31,8 @@ struct selector {
struct path_info {
struct list_head list;
struct dm_path *path;
- unsigned repeat_count;
- unsigned relative_throughput;
+ unsigned int repeat_count;
+ unsigned int relative_throughput;
atomic_t in_flight_size; /* Total size of in-flight I/Os */
};
@@ -48,7 +49,7 @@ static struct selector *alloc_selector(void)
return s;
}
-static int st_create(struct path_selector *ps, unsigned argc, char **argv)
+static int st_create(struct path_selector *ps, unsigned int argc, char **argv)
{
struct selector *s = alloc_selector();
@@ -80,9 +81,9 @@ static void st_destroy(struct path_selector *ps)
}
static int st_status(struct path_selector *ps, struct dm_path *path,
- status_type_t type, char *result, unsigned maxlen)
+ status_type_t type, char *result, unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct path_info *pi;
if (!path)
@@ -113,22 +114,21 @@ static int st_add_path(struct path_selector *ps, struct dm_path *path,
{
struct selector *s = ps->context;
struct path_info *pi;
- unsigned repeat_count = ST_MIN_IO;
- unsigned relative_throughput = 1;
+ unsigned int repeat_count = ST_MIN_IO;
+ unsigned int relative_throughput = 1;
char dummy;
unsigned long flags;
/*
* Arguments: [<repeat_count> [<relative_throughput>]]
- * <repeat_count>: The number of I/Os before switching path.
- * If not given, default (ST_MIN_IO) is used.
- * <relative_throughput>: The relative throughput value of
+ * <repeat_count>: The number of I/Os before switching path.
+ * If not given, default (ST_MIN_IO) is used.
+ * <relative_throughput>: The relative throughput value of
* the path among all paths in the path-group.
- * The valid range: 0-<ST_MAX_RELATIVE_THROUGHPUT>
+ * The valid range: 0-<ST_MAX_RELATIVE_THROUGHPUT>
* If not given, minimum value '1' is used.
* If '0' is given, the path isn't selected while
- * other paths having a positive value are
- * available.
+ * other paths having a positive value are available.
*/
if (argc > 2) {
*error = "service-time ps: incorrect number of arguments";
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 54263679a7b1..60632b409b80 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010-2011 Neil Brown
* Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved.
@@ -29,10 +30,10 @@
*/
#define MIN_RAID456_JOURNAL_SPACE (4*2048)
-static bool devices_handle_discard_safely = false;
+static bool devices_handle_discard_safely;
/*
- * The following flags are used by dm-raid.c to set up the array state.
+ * The following flags are used by dm-raid to set up the array state.
* They must be cleared before md_run is called.
*/
#define FirstUse 10 /* rdev flag */
@@ -362,8 +363,8 @@ static struct {
const int mode;
const char *param;
} _raid456_journal_mode[] = {
- { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
- { R5C_JOURNAL_MODE_WRITE_BACK , "writeback" }
+ { R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" },
+ { R5C_JOURNAL_MODE_WRITE_BACK, "writeback" }
};
/* Return MD raid4/5/6 journal mode for dm @journal_mode one */
@@ -1081,7 +1082,7 @@ static int validate_raid_redundancy(struct raid_set *rs)
if ((!rs->dev[i].rdev.sb_page ||
!test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
(++rebuilds_per_group >= copies))
- goto too_many;
+ goto too_many;
}
break;
default:
@@ -1114,7 +1115,7 @@ too_many:
* [stripe_cache <sectors>] Stripe cache size for higher RAIDs
* [region_size <sectors>] Defines granularity of bitmap
* [journal_dev <dev>] raid4/5/6 journaling deviice
- * (i.e. write hole closing log)
+ * (i.e. write hole closing log)
*
* RAID10-only options:
* [raid10_copies <# copies>] Number of copies. (Default: 2)
@@ -1988,7 +1989,7 @@ struct dm_raid_superblock {
__le64 sectors; /* Used device size in sectors */
/*
- * Additonal Bit field of devices indicating failures to support
+ * Additional Bit field of devices indicating failures to support
* up to 256 devices with the 1.9.0 on-disk metadata format
*/
__le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1];
@@ -2855,7 +2856,7 @@ static int rs_setup_reshape(struct raid_set *rs)
*
* - in case of adding disk(s), array size has
* to grow after the disk adding reshape,
- * which'll hapen in the event handler;
+ * which'll happen in the event handler;
* reshape will happen forward, so space has to
* be available at the beginning of each disk
*
@@ -3148,7 +3149,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
* If a takeover is needed, userspace sets any additional
* devices to rebuild and we can check for a valid request here.
*
- * If acceptible, set the level to the new requested
+ * If acceptable, set the level to the new requested
* one, prohibit requesting recovery, allow the raid
* set to run and store superblocks during resume.
*/
@@ -3183,12 +3184,12 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Out-of-place space has to be available to allow for a reshape unless raid1! */
if (reshape_sectors || rs_is_raid1(rs)) {
/*
- * We can only prepare for a reshape here, because the
- * raid set needs to run to provide the repective reshape
- * check functions via its MD personality instance.
- *
- * So do the reshape check after md_run() succeeded.
- */
+ * We can only prepare for a reshape here, because the
+ * raid set needs to run to provide the repective reshape
+ * check functions via its MD personality instance.
+ *
+ * So do the reshape check after md_run() succeeded.
+ */
r = rs_prepare_reshape(rs);
if (r)
goto bad;
@@ -3712,7 +3713,7 @@ static void raid_status(struct dm_target *ti, status_type_t type,
}
static int raid_message(struct dm_target *ti, unsigned int argc, char **argv,
- char *result, unsigned maxlen)
+ char *result, unsigned int maxlen)
{
struct raid_set *rs = ti->private;
struct mddev *mddev = &rs->md;
@@ -4001,7 +4002,7 @@ static int raid_preresume(struct dm_target *ti)
}
/* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
- if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
+ if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
(test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
(rs->requested_bitmap_chunk_sectors &&
mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 06a38dc32025..bc417a5e5b89 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
@@ -19,6 +20,8 @@
#include <linux/dm-kcopyd.h>
#include <linux/dm-region-hash.h>
+static struct workqueue_struct *dm_raid1_wq;
+
#define DM_MSG_PREFIX "raid1"
#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
@@ -32,9 +35,11 @@
static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Mirror set structures.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
enum dm_raid1_error {
DM_RAID1_WRITE_ERROR,
DM_RAID1_FLUSH_ERROR,
@@ -82,7 +87,7 @@ struct mirror_set {
struct work_struct trigger_event;
- unsigned nr_mirrors;
+ unsigned int nr_mirrors;
struct mirror mirror[];
};
@@ -236,8 +241,8 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
* Better to issue requests to same failing device
* than to risk returning corrupt data.
*/
- DMERR("Primary mirror (%s) failed while out-of-sync: "
- "Reads may fail.", m->dev->name);
+ DMERR("Primary mirror (%s) failed while out-of-sync: Reads may fail.",
+ m->dev->name);
goto out;
}
@@ -248,7 +253,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
DMWARN("All sides of mirror have failed.");
out:
- schedule_work(&ms->trigger_event);
+ queue_work(dm_raid1_wq, &ms->trigger_event);
}
static int mirror_flush(struct dm_target *ti)
@@ -285,13 +290,15 @@ static int mirror_flush(struct dm_target *ti)
return 0;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Recovery.
*
* When a mirror is first activated we may find that some regions
* are in the no-sync state. We have to recover these by
* recopying from the default mirror to all the others.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static void recovery_complete(int read_err, unsigned long write_err,
void *context)
{
@@ -327,7 +334,7 @@ static void recovery_complete(int read_err, unsigned long write_err,
static void recover(struct mirror_set *ms, struct dm_region *reg)
{
- unsigned i;
+ unsigned int i;
struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
struct mirror *m;
unsigned long flags = 0;
@@ -408,9 +415,11 @@ static void do_recovery(struct mirror_set *ms)
}
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Reads
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
{
struct mirror *m = get_default_mirror(ms);
@@ -498,9 +507,11 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
spin_unlock_irq(&ms->lock);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Reads
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static void read_callback(unsigned long error, void *context)
{
struct bio *bio = context;
@@ -517,8 +528,7 @@ static void read_callback(unsigned long error, void *context)
fail_mirror(m, DM_RAID1_READ_ERROR);
if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
- DMWARN_LIMIT("Read failure on mirror device %s. "
- "Trying alternative device.",
+ DMWARN_LIMIT("Read failure on mirror device %s. Trying alternative device.",
m->dev->name);
queue_bio(m->ms, bio, bio_data_dir(bio));
return;
@@ -579,21 +589,21 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
}
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------------
* Writes.
*
* We do different things with the write io depending on the
* state of the region that it's in:
*
- * SYNC: increment pending, use kcopyd to write to *all* mirrors
+ * SYNC: increment pending, use kcopyd to write to *all* mirrors
* RECOVERING: delay the io until recovery completes
* NOSYNC: increment pending, just write to the default mirror
- *---------------------------------------------------------------*/
-
-
+ *---------------------------------------------------------------------
+ */
static void write_callback(unsigned long error, void *context)
{
- unsigned i;
+ unsigned int i;
struct bio *bio = (struct bio *) context;
struct mirror_set *ms;
int should_wake = 0;
@@ -842,9 +852,11 @@ static void trigger_event(struct work_struct *work)
dm_table_event(ms->ti->table);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* kmirrord
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static void do_mirror(struct work_struct *work)
{
struct mirror_set *ms = container_of(work, struct mirror_set,
@@ -868,9 +880,11 @@ static void do_mirror(struct work_struct *work)
do_failures(ms, &failures);
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Target functions
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static struct mirror_set *alloc_context(unsigned int nr_mirrors,
uint32_t region_size,
struct dm_target *ti,
@@ -903,7 +917,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
if (IS_ERR(ms->io_client)) {
ti->error = "Error creating dm_io client";
kfree(ms);
- return NULL;
+ return NULL;
}
ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
@@ -963,10 +977,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
* Create dirty log: log_type #log_params <log_params>
*/
static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
- unsigned argc, char **argv,
- unsigned *args_used)
+ unsigned int argc, char **argv,
+ unsigned int *args_used)
{
- unsigned param_count;
+ unsigned int param_count;
struct dm_dirty_log *dl;
char dummy;
@@ -997,10 +1011,10 @@ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
return dl;
}
-static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
- unsigned *args_used)
+static int parse_features(struct mirror_set *ms, unsigned int argc, char **argv,
+ unsigned int *args_used)
{
- unsigned num_features;
+ unsigned int num_features;
struct dm_target *ti = ms->ti;
char dummy;
int i;
@@ -1389,7 +1403,7 @@ static char device_status_char(struct mirror *m)
static void mirror_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
unsigned int m, sz = 0;
int num_feature_args = 0;
@@ -1458,7 +1472,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
{
struct mirror_set *ms = ti->private;
int ret = 0;
- unsigned i;
+ unsigned int i;
for (i = 0; !ret && i < ms->nr_mirrors; i++)
ret = fn(ti, ms->mirror[i].dev,
@@ -1484,22 +1498,28 @@ static struct target_type mirror_target = {
static int __init dm_mirror_init(void)
{
- int r;
+ int r = -ENOMEM;
+
+ dm_raid1_wq = alloc_workqueue("dm_raid1_wq", 0, 0);
+ if (!dm_raid1_wq)
+ goto bad_target;
r = dm_register_target(&mirror_target);
if (r < 0) {
- DMERR("Failed to register mirror target");
+ destroy_workqueue(dm_raid1_wq);
goto bad_target;
}
return 0;
bad_target:
+ DMERR("Failed to register mirror target");
return r;
}
static void __exit dm_mirror_exit(void)
{
+ destroy_workqueue(dm_raid1_wq);
dm_unregister_target(&mirror_target);
}
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 1f760451e6f4..852cfa37d48a 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Sistina Software Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -18,7 +19,8 @@
#define DM_MSG_PREFIX "region hash"
-/*-----------------------------------------------------------------
+/*
+ *------------------------------------------------------------------
* Region hash
*
* The mirror splits itself up into discrete regions. Each
@@ -53,20 +55,21 @@
* lists in the region_hash, with the 'state', 'list' and
* 'delayed_bios' fields of the regions. This is used from irq
* context, so all other uses will have to suspend local irqs.
- *---------------------------------------------------------------*/
+ *------------------------------------------------------------------
+ */
struct dm_region_hash {
uint32_t region_size;
- unsigned region_shift;
+ unsigned int region_shift;
/* holds persistent region state */
struct dm_dirty_log *log;
/* hash table */
rwlock_t hash_lock;
- unsigned mask;
- unsigned nr_buckets;
- unsigned prime;
- unsigned shift;
+ unsigned int mask;
+ unsigned int nr_buckets;
+ unsigned int prime;
+ unsigned int shift;
struct list_head *buckets;
/*
@@ -74,7 +77,7 @@ struct dm_region_hash {
*/
int flush_failure;
- unsigned max_recovery; /* Max # of regions to recover in parallel */
+ unsigned int max_recovery; /* Max # of regions to recover in parallel */
spinlock_t region_lock;
atomic_t recovery_in_flight;
@@ -163,12 +166,12 @@ struct dm_region_hash *dm_region_hash_create(
struct bio_list *bios),
void (*wakeup_workers)(void *context),
void (*wakeup_all_recovery_waiters)(void *context),
- sector_t target_begin, unsigned max_recovery,
+ sector_t target_begin, unsigned int max_recovery,
struct dm_dirty_log *log, uint32_t region_size,
region_t nr_regions)
{
struct dm_region_hash *rh;
- unsigned nr_buckets, max_buckets;
+ unsigned int nr_buckets, max_buckets;
size_t i;
int ret;
@@ -236,7 +239,7 @@ EXPORT_SYMBOL_GPL(dm_region_hash_create);
void dm_region_hash_destroy(struct dm_region_hash *rh)
{
- unsigned h;
+ unsigned int h;
struct dm_region *reg, *nreg;
BUG_ON(!list_empty(&rh->quiesced_regions));
@@ -263,9 +266,9 @@ struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh)
}
EXPORT_SYMBOL_GPL(dm_rh_dirty_log);
-static unsigned rh_hash(struct dm_region_hash *rh, region_t region)
+static unsigned int rh_hash(struct dm_region_hash *rh, region_t region)
{
- return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask;
+ return (unsigned int) ((region * rh->prime) >> rh->shift) & rh->mask;
}
static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index a41209a43506..f7e9a3632eb3 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
*
@@ -23,33 +24,33 @@ struct dm_rq_target_io {
union map_info info;
struct dm_stats_aux stats_aux;
unsigned long duration_jiffies;
- unsigned n_sectors;
- unsigned completed;
+ unsigned int n_sectors;
+ unsigned int completed;
};
#define DM_MQ_NR_HW_QUEUES 1
#define DM_MQ_QUEUE_DEPTH 2048
-static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
-static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
+static unsigned int dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
+static unsigned int dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
/*
* Request-based DM's mempools' reserved IOs set by the user.
*/
#define RESERVED_REQUEST_BASED_IOS 256
-static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
+static unsigned int reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-unsigned dm_get_reserved_rq_based_ios(void)
+unsigned int dm_get_reserved_rq_based_ios(void)
{
return __dm_get_module_param(&reserved_rq_based_ios,
RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
}
-static unsigned dm_get_blk_mq_nr_hw_queues(void)
+static unsigned int dm_get_blk_mq_nr_hw_queues(void)
{
return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
}
-static unsigned dm_get_blk_mq_queue_depth(void)
+static unsigned int dm_get_blk_mq_queue_depth(void)
{
return __dm_get_module_param(&dm_mq_queue_depth,
DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
@@ -127,6 +128,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
{
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
+
tio->duration_jiffies = jiffies - tio->duration_jiffies;
dm_stats_account_io(&md->stats, rq_data_dir(orig),
blk_rq_pos(orig), tio->n_sectors, true,
@@ -434,6 +436,7 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
+
tio->duration_jiffies = jiffies;
tio->n_sectors = blk_rq_sectors(orig);
dm_stats_account_io(&md->stats, rq_data_dir(orig),
@@ -581,16 +584,16 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
}
}
-module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
+module_param(reserved_rq_based_ios, uint, 0644);
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
/* Unused, but preserved for userspace compatibility */
static bool use_blk_mq = true;
-module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
+module_param(use_blk_mq, bool, 0644);
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
-module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
+module_param(dm_mq_nr_hw_queues, uint, 0644);
MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
-module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
+module_param(dm_mq_queue_depth, uint, 0644);
MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index 1eea0da641db..1f73e194e792 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Internal header file for device mapper
*
@@ -38,7 +39,7 @@ void dm_stop_queue(struct request_queue *q);
void dm_mq_kick_requeue_list(struct mapped_device *md);
-unsigned dm_get_reserved_rq_based_ios(void);
+unsigned int dm_get_reserved_rq_based_ios(void);
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf);
ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 680cc05ec654..f14e5df27874 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2006-2008 Red Hat GmbH
@@ -21,10 +22,12 @@
#define DM_PREFETCH_CHUNKS 12
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Persistent snapshots, by persistent we mean that the snapshot
* will survive a reboot.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
/*
* We need to store a record of which parts of the origin have
@@ -92,7 +95,7 @@ struct core_exception {
};
struct commit_callback {
- void (*callback)(void *, int success);
+ void (*callback)(void *ref, int success);
void *context;
};
@@ -273,6 +276,7 @@ static void skip_metadata(struct pstore *ps)
{
uint32_t stride = ps->exceptions_per_area + 1;
chunk_t next_free = ps->next_free;
+
if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
ps->next_free++;
}
@@ -303,7 +307,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
{
int r;
struct disk_header *dh;
- unsigned chunk_size;
+ unsigned int chunk_size;
int chunk_size_supplied = 1;
char *chunk_err;
@@ -354,8 +358,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
return 0;
if (chunk_size_supplied)
- DMWARN("chunk size %u in device metadata overrides "
- "table chunk size of %u.",
+ DMWARN("chunk size %u in device metadata overrides table chunk size of %u.",
chunk_size, ps->store->chunk_size);
/* We had a bogus chunk_size. Fix stuff up. */
@@ -515,15 +518,18 @@ static int read_exceptions(struct pstore *ps,
if (unlikely(prefetch_area < ps->current_area))
prefetch_area = ps->current_area;
- if (DM_PREFETCH_CHUNKS) do {
- chunk_t pf_chunk = area_location(ps, prefetch_area);
- if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
- break;
- dm_bufio_prefetch(client, pf_chunk, 1);
- prefetch_area++;
- if (unlikely(!prefetch_area))
- break;
- } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
+ if (DM_PREFETCH_CHUNKS) {
+ do {
+ chunk_t pf_chunk = area_location(ps, prefetch_area);
+
+ if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
+ break;
+ dm_bufio_prefetch(client, pf_chunk, 1);
+ prefetch_area++;
+ if (unlikely(!prefetch_area))
+ break;
+ } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
+ }
chunk = area_location(ps, ps->current_area);
@@ -690,7 +696,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
static void persistent_commit_exception(struct dm_exception_store *store,
struct dm_exception *e, int valid,
- void (*callback) (void *, int success),
+ void (*callback)(void *, int success),
void *callback_context)
{
unsigned int i;
@@ -874,6 +880,7 @@ static int persistent_ctr(struct dm_exception_store *store, char *options)
if (options) {
char overflow = toupper(options[0]);
+
if (overflow == 'O')
store->userspace_supports_overflow = true;
else {
@@ -895,11 +902,11 @@ err_workqueue:
return r;
}
-static unsigned persistent_status(struct dm_exception_store *store,
+static unsigned int persistent_status(struct dm_exception_store *store,
status_type_t status, char *result,
- unsigned maxlen)
+ unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
switch (status) {
case STATUSTYPE_INFO:
@@ -958,8 +965,7 @@ int dm_persistent_snapshot_init(void)
r = dm_exception_store_type_register(&_persistent_compat_type);
if (r) {
- DMERR("Unable to register old-style persistent exception "
- "store type");
+ DMERR("Unable to register old-style persistent exception store type");
dm_exception_store_type_unregister(&_persistent_type);
return r;
}
diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
index 0e0ae4c36b37..1e07a745bedd 100644
--- a/drivers/md/dm-snap-transient.c
+++ b/drivers/md/dm-snap-transient.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
* Copyright (C) 2006-2008 Red Hat GmbH
@@ -16,9 +17,11 @@
#define DM_MSG_PREFIX "transient snapshot"
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Implementation of the store for non-persistent snapshots.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
struct transient_c {
sector_t next_free;
};
@@ -53,7 +56,7 @@ static int transient_prepare_exception(struct dm_exception_store *store,
static void transient_commit_exception(struct dm_exception_store *store,
struct dm_exception *e, int valid,
- void (*callback) (void *, int success),
+ void (*callback)(void *, int success),
void *callback_context)
{
/* Just succeed */
@@ -84,11 +87,11 @@ static int transient_ctr(struct dm_exception_store *store, char *options)
return 0;
}
-static unsigned transient_status(struct dm_exception_store *store,
+static unsigned int transient_status(struct dm_exception_store *store,
status_type_t status, char *result,
- unsigned maxlen)
+ unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
switch (status) {
case STATUSTYPE_INFO:
@@ -140,8 +143,7 @@ int dm_transient_snapshot_init(void)
r = dm_exception_store_type_register(&_transient_compat_type);
if (r) {
- DMWARN("Unable to register old-style transient "
- "exception store type");
+ DMWARN("Unable to register old-style transient exception store type");
dm_exception_store_type_unregister(&_transient_type);
return r;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index d1c2f84d27e3..f766c21408f1 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
*
@@ -41,7 +42,7 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
struct dm_exception_table {
uint32_t hash_mask;
- unsigned hash_shift;
+ unsigned int hash_shift;
struct hlist_bl_head *table;
};
@@ -106,7 +107,7 @@ struct dm_snapshot {
/* The on disk metadata handler */
struct dm_exception_store *store;
- unsigned in_progress;
+ unsigned int in_progress;
struct wait_queue_head in_progress_wait;
struct dm_kcopyd_client *kcopyd_client;
@@ -122,11 +123,11 @@ struct dm_snapshot {
* The merge operation failed if this flag is set.
* Failure modes are handled as follows:
* - I/O error reading the header
- * => don't load the target; abort.
+ * => don't load the target; abort.
* - Header does not have "valid" flag set
- * => use the origin; forget about the snapshot.
+ * => use the origin; forget about the snapshot.
* - I/O error when reading exceptions
- * => don't load the target; abort.
+ * => don't load the target; abort.
* (We can't use the intermediate origin state.)
* - I/O error while merging
* => stop merging; set merge_failed; process I/O normally.
@@ -161,7 +162,7 @@ struct dm_snapshot {
*/
#define DEFAULT_COW_THRESHOLD 2048
-static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
+static unsigned int cow_threshold = DEFAULT_COW_THRESHOLD;
module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
@@ -244,12 +245,14 @@ struct dm_snap_tracked_chunk {
static void init_tracked_chunk(struct bio *bio)
{
struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
+
INIT_HLIST_NODE(&c->node);
}
static bool is_bio_tracked(struct bio *bio)
{
struct dm_snap_tracked_chunk *c = dm_per_bio_data(bio, sizeof(struct dm_snap_tracked_chunk));
+
return !hlist_unhashed(&c->node);
}
@@ -297,12 +300,12 @@ static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
/*
* This conflicting I/O is extremely improbable in the caller,
- * so msleep(1) is sufficient and there is no need for a wait queue.
+ * so fsleep(1000) is sufficient and there is no need for a wait queue.
*/
static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk)
{
while (__chunk_is_tracked(s, chunk))
- msleep(1);
+ fsleep(1000);
}
/*
@@ -324,7 +327,7 @@ struct origin {
struct dm_origin {
struct dm_dev *dev;
struct dm_target *ti;
- unsigned split_boundary;
+ unsigned int split_boundary;
struct list_head hash_list;
};
@@ -377,7 +380,7 @@ static void exit_origin_hash(void)
kfree(_dm_origins);
}
-static unsigned origin_hash(struct block_device *bdev)
+static unsigned int origin_hash(struct block_device *bdev)
{
return bdev->bd_dev & ORIGIN_MASK;
}
@@ -388,7 +391,7 @@ static struct origin *__lookup_origin(struct block_device *origin)
struct origin *o;
ol = &_origins[origin_hash(origin)];
- list_for_each_entry (o, ol, hash_list)
+ list_for_each_entry(o, ol, hash_list)
if (bdev_equal(o->bdev, origin))
return o;
@@ -398,6 +401,7 @@ static struct origin *__lookup_origin(struct block_device *origin)
static void __insert_origin(struct origin *o)
{
struct list_head *sl = &_origins[origin_hash(o->bdev)];
+
list_add_tail(&o->hash_list, sl);
}
@@ -407,7 +411,7 @@ static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
struct dm_origin *o;
ol = &_dm_origins[origin_hash(origin)];
- list_for_each_entry (o, ol, hash_list)
+ list_for_each_entry(o, ol, hash_list)
if (bdev_equal(o->dev->bdev, origin))
return o;
@@ -417,6 +421,7 @@ static struct dm_origin *__lookup_dm_origin(struct block_device *origin)
static void __insert_dm_origin(struct dm_origin *o)
{
struct list_head *sl = &_dm_origins[origin_hash(o->dev->bdev)];
+
list_add_tail(&o->hash_list, sl);
}
@@ -490,8 +495,7 @@ static int __validate_exception_handover(struct dm_snapshot *snap)
if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest,
&snap_merge) == 2) ||
snap_dest) {
- snap->ti->error = "Snapshot cow pairing for exception "
- "table handover failed";
+ snap->ti->error = "Snapshot cow pairing for exception table handover failed";
return -EINVAL;
}
@@ -518,8 +522,7 @@ static int __validate_exception_handover(struct dm_snapshot *snap)
if (!snap_src->store->type->prepare_merge ||
!snap_src->store->type->commit_merge) {
- snap->ti->error = "Snapshot exception store does not "
- "support snapshot-merge.";
+ snap->ti->error = "Snapshot exception store does not support snapshot-merge.";
return -EINVAL;
}
@@ -652,7 +655,7 @@ static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
}
static int dm_exception_table_init(struct dm_exception_table *et,
- uint32_t size, unsigned hash_shift)
+ uint32_t size, unsigned int hash_shift)
{
unsigned int i;
@@ -850,7 +853,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
static uint32_t __minimum_chunk_size(struct origin *o)
{
struct dm_snapshot *snap;
- unsigned chunk_size = rounddown_pow_of_two(UINT_MAX);
+ unsigned int chunk_size = rounddown_pow_of_two(UINT_MAX);
if (o)
list_for_each_entry(snap, &o->snapshots, list)
@@ -867,6 +870,7 @@ static int calc_max_buckets(void)
{
/* use a fixed size of 2MB */
unsigned long mem = 2 * 1024 * 1024;
+
mem /= sizeof(struct hlist_bl_head);
return mem;
@@ -937,8 +941,7 @@ static int __remove_single_exception_chunk(struct dm_snapshot *s,
e = dm_lookup_exception(&s->complete, old_chunk);
if (!e) {
- DMERR("Corruption detected: exception for block %llu is "
- "on disk but not in memory",
+ DMERR("Corruption detected: exception for block %llu is on disk but not in memory",
(unsigned long long)old_chunk);
return -EINVAL;
}
@@ -965,8 +968,7 @@ static int __remove_single_exception_chunk(struct dm_snapshot *s,
e->new_chunk++;
} else if (old_chunk != e->old_chunk +
dm_consecutive_chunk_count(e)) {
- DMERR("Attempt to merge block %llu from the "
- "middle of a chunk range [%llu - %llu]",
+ DMERR("Attempt to merge block %llu from the middle of a chunk range [%llu - %llu]",
(unsigned long long)old_chunk,
(unsigned long long)e->old_chunk,
(unsigned long long)
@@ -1010,7 +1012,7 @@ out:
}
static int origin_write_extent(struct dm_snapshot *merging_snap,
- sector_t sector, unsigned chunk_size);
+ sector_t sector, unsigned int chunk_size);
static void merge_callback(int read_err, unsigned long write_err,
void *context);
@@ -1059,8 +1061,7 @@ static void snapshot_merge_next_chunks(struct dm_snapshot *s)
&new_chunk);
if (linear_chunks <= 0) {
if (linear_chunks < 0) {
- DMERR("Read error in exception store: "
- "shutting down merge");
+ DMERR("Read error in exception store: shutting down merge");
down_write(&s->lock);
s->merge_failed = true;
up_write(&s->lock);
@@ -1183,7 +1184,7 @@ static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s,
struct dm_target *ti)
{
int r;
- unsigned argc;
+ unsigned int argc;
const char *arg_name;
static const struct dm_arg _args[] = {
@@ -1241,7 +1242,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int r = -EINVAL;
char *origin_path, *cow_path;
dev_t origin_dev, cow_dev;
- unsigned args_used, num_flush_bios = 1;
+ unsigned int args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
if (argc < 4) {
@@ -1493,7 +1494,7 @@ static void snapshot_dtr(struct dm_target *ti)
unregister_snapshot(s);
while (atomic_read(&s->pending_exceptions_count))
- msleep(1);
+ fsleep(1000);
/*
* Ensure instructions in mempool_exit aren't reordered
* before atomic_read.
@@ -1551,6 +1552,7 @@ static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
* throttling is unlikely to negatively impact performance.
*/
DECLARE_WAITQUEUE(wait, current);
+
__add_wait_queue(&s->in_progress_wait, &wait);
__set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&s->in_progress_wait.lock);
@@ -2208,12 +2210,10 @@ static int snapshot_preresume(struct dm_target *ti)
if (snap_src && snap_dest) {
down_read(&snap_src->lock);
if (s == snap_src) {
- DMERR("Unable to resume snapshot source until "
- "handover completes.");
+ DMERR("Unable to resume snapshot source until handover completes.");
r = -EINVAL;
} else if (!dm_suspended(snap_src->ti)) {
- DMERR("Unable to perform snapshot handover until "
- "source is suspended.");
+ DMERR("Unable to perform snapshot handover until source is suspended.");
r = -EINVAL;
}
up_read(&snap_src->lock);
@@ -2315,11 +2315,11 @@ static void snapshot_merge_resume(struct dm_target *ti)
}
static void snapshot_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct dm_snapshot *snap = ti->private;
- unsigned num_features;
+ unsigned int num_features;
switch (type) {
case STATUSTYPE_INFO:
@@ -2344,8 +2344,7 @@ static void snapshot_status(struct dm_target *ti, status_type_t type,
(unsigned long long)sectors_allocated,
(unsigned long long)total_sectors,
(unsigned long long)metadata_sectors);
- }
- else
+ } else
DMEMIT("Unknown");
}
@@ -2419,10 +2418,11 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
}
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Origin methods
- *---------------------------------------------------------------*/
-
+ *---------------------------------------------------------------
+ */
/*
* If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
* supplied bio was ignored. The caller may submit it immediately.
@@ -2446,7 +2446,7 @@ static int __origin_write(struct list_head *snapshots, sector_t sector,
chunk_t chunk;
/* Do all the snapshots on this origin */
- list_for_each_entry (snap, snapshots, list) {
+ list_for_each_entry(snap, snapshots, list) {
/*
* Don't make new exceptions in a merging snapshot
* because it has effectively been deleted
@@ -2566,6 +2566,7 @@ again:
if (o) {
if (limit) {
struct dm_snapshot *s;
+
list_for_each_entry(s, &o->snapshots, list)
if (unlikely(!wait_for_in_progress(s, true)))
goto again;
@@ -2592,7 +2593,7 @@ again:
* size must be a multiple of merging_snap's chunk_size.
*/
static int origin_write_extent(struct dm_snapshot *merging_snap,
- sector_t sector, unsigned size)
+ sector_t sector, unsigned int size)
{
int must_wait = 0;
sector_t n;
@@ -2668,7 +2669,7 @@ static void origin_dtr(struct dm_target *ti)
static int origin_map(struct dm_target *ti, struct bio *bio)
{
struct dm_origin *o = ti->private;
- unsigned available_sectors;
+ unsigned int available_sectors;
bio_set_dev(bio, o->dev->bdev);
@@ -2679,7 +2680,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
available_sectors = o->split_boundary -
- ((unsigned)bio->bi_iter.bi_sector & (o->split_boundary - 1));
+ ((unsigned int)bio->bi_iter.bi_sector & (o->split_boundary - 1));
if (bio_sectors(bio) > available_sectors)
dm_accept_partial_bio(bio, available_sectors);
@@ -2713,7 +2714,7 @@ static void origin_postsuspend(struct dm_target *ti)
}
static void origin_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct dm_origin *o = ti->private;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index f105a71915ab..c21a19ab73f7 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
#include <linux/errno.h>
#include <linux/numa.h>
#include <linux/slab.h>
@@ -42,12 +42,12 @@ struct dm_stat_shared {
struct dm_stat {
struct list_head list_entry;
int id;
- unsigned stat_flags;
+ unsigned int stat_flags;
size_t n_entries;
sector_t start;
sector_t end;
sector_t step;
- unsigned n_histogram_entries;
+ unsigned int n_histogram_entries;
unsigned long long *histogram_boundaries;
const char *program_id;
const char *aux_data;
@@ -63,7 +63,7 @@ struct dm_stat {
struct dm_stats_last_position {
sector_t last_sector;
- unsigned last_rw;
+ unsigned int last_rw;
};
/*
@@ -250,8 +250,8 @@ static void dm_stats_recalc_precise_timestamps(struct dm_stats *stats)
}
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
- sector_t step, unsigned stat_flags,
- unsigned n_histogram_entries,
+ sector_t step, unsigned int stat_flags,
+ unsigned int n_histogram_entries,
unsigned long long *histogram_boundaries,
const char *program_id, const char *aux_data,
void (*suspend_callback)(struct mapped_device *),
@@ -336,6 +336,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
if (s->n_histogram_entries) {
unsigned long long *hi;
+
hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
if (!hi) {
r = -ENOMEM;
@@ -357,6 +358,7 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
s->stat_percpu[cpu] = p;
if (s->n_histogram_entries) {
unsigned long long *hi;
+
hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
if (!hi) {
r = -ENOMEM;
@@ -470,11 +472,11 @@ do_sync_free:
}
static int dm_stats_list(struct dm_stats *stats, const char *program,
- char *result, unsigned maxlen)
+ char *result, unsigned int maxlen)
{
struct dm_stat *s;
sector_t len;
- unsigned sz = 0;
+ unsigned int sz = 0;
/*
* Output format:
@@ -494,7 +496,8 @@ static int dm_stats_list(struct dm_stats *stats, const char *program,
if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
DMEMIT(" precise_timestamps");
if (s->n_histogram_entries) {
- unsigned i;
+ unsigned int i;
+
DMEMIT(" histogram:");
for (i = 0; i < s->n_histogram_entries; i++) {
if (i)
@@ -518,7 +521,7 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
* This is racy, but so is part_round_stats_single.
*/
unsigned long long now, difference;
- unsigned in_flight_read, in_flight_write;
+ unsigned int in_flight_read, in_flight_write;
if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
now = jiffies;
@@ -529,8 +532,8 @@ static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
if (!difference)
return;
- in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
- in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
+ in_flight_read = (unsigned int)atomic_read(&shared->in_flight[READ]);
+ in_flight_write = (unsigned int)atomic_read(&shared->in_flight[WRITE]);
if (in_flight_read)
p->io_ticks[READ] += difference;
if (in_flight_write)
@@ -567,6 +570,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
*/
#if BITS_PER_LONG == 32
unsigned long flags;
+
local_irq_save(flags);
#else
preempt_disable();
@@ -578,6 +582,7 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
atomic_inc(&shared->in_flight[idx]);
} else {
unsigned long long duration;
+
dm_stat_round(s, shared, p);
atomic_dec(&shared->in_flight[idx]);
p->sectors[idx] += len;
@@ -591,15 +596,15 @@ static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
duration = stats_aux->duration_ns;
}
if (s->n_histogram_entries) {
- unsigned lo = 0, hi = s->n_histogram_entries + 1;
+ unsigned int lo = 0, hi = s->n_histogram_entries + 1;
+
while (lo + 1 < hi) {
- unsigned mid = (lo + hi) / 2;
- if (s->histogram_boundaries[mid - 1] > duration) {
+ unsigned int mid = (lo + hi) / 2;
+
+ if (s->histogram_boundaries[mid - 1] > duration)
hi = mid;
- } else {
+ else
lo = mid;
- }
-
}
p->histogram[lo]++;
}
@@ -651,7 +656,7 @@ static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
}
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
- sector_t bi_sector, unsigned bi_sectors, bool end,
+ sector_t bi_sector, unsigned int bi_sectors, bool end,
unsigned long start_time,
struct dm_stats_aux *stats_aux)
{
@@ -740,7 +745,8 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared
shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
if (s->n_histogram_entries) {
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < s->n_histogram_entries + 1; i++)
shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
}
@@ -774,7 +780,8 @@ static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
p->time_in_queue -= shared->tmp.time_in_queue;
local_irq_enable();
if (s->n_histogram_entries) {
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < s->n_histogram_entries + 1; i++) {
local_irq_disable();
p = &s->stat_percpu[smp_processor_id()][x];
@@ -811,7 +818,7 @@ static int dm_stats_clear(struct dm_stats *stats, int id)
static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
{
unsigned long long result;
- unsigned mult;
+ unsigned int mult;
if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
return j;
@@ -831,9 +838,9 @@ static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long
static int dm_stats_print(struct dm_stats *stats, int id,
size_t idx_start, size_t idx_len,
- bool clear, char *result, unsigned maxlen)
+ bool clear, char *result, unsigned int maxlen)
{
- unsigned sz = 0;
+ unsigned int sz = 0;
struct dm_stat *s;
size_t x;
sector_t start, end, step;
@@ -889,10 +896,10 @@ static int dm_stats_print(struct dm_stats *stats, int id,
dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
if (s->n_histogram_entries) {
- unsigned i;
- for (i = 0; i < s->n_histogram_entries + 1; i++) {
+ unsigned int i;
+
+ for (i = 0; i < s->n_histogram_entries + 1; i++)
DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
- }
}
DMEMIT("\n");
@@ -938,11 +945,11 @@ static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data
return 0;
}
-static int parse_histogram(const char *h, unsigned *n_histogram_entries,
+static int parse_histogram(const char *h, unsigned int *n_histogram_entries,
unsigned long long **histogram_boundaries)
{
const char *q;
- unsigned n;
+ unsigned int n;
unsigned long long last;
*n_histogram_entries = 1;
@@ -962,6 +969,7 @@ static int parse_histogram(const char *h, unsigned *n_histogram_entries,
unsigned long long hi;
int s;
char ch;
+
s = sscanf(h, "%llu%c", &hi, &ch);
if (!s || (s == 2 && ch != ','))
return -EINVAL;
@@ -977,23 +985,21 @@ static int parse_histogram(const char *h, unsigned *n_histogram_entries,
}
static int message_stats_create(struct mapped_device *md,
- unsigned argc, char **argv,
- char *result, unsigned maxlen)
+ unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r;
int id;
char dummy;
unsigned long long start, end, len, step;
- unsigned divisor;
+ unsigned int divisor;
const char *program_id, *aux_data;
- unsigned stat_flags = 0;
-
- unsigned n_histogram_entries = 0;
+ unsigned int stat_flags = 0;
+ unsigned int n_histogram_entries = 0;
unsigned long long *histogram_boundaries = NULL;
-
struct dm_arg_set as, as_backup;
const char *a;
- unsigned feature_args;
+ unsigned int feature_args;
/*
* Input format:
@@ -1046,7 +1052,8 @@ static int message_stats_create(struct mapped_device *md,
else if (!strncasecmp(a, "histogram:", 10)) {
if (n_histogram_entries)
goto ret_einval;
- if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
+ r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries);
+ if (r)
goto ret;
} else
goto ret_einval;
@@ -1102,7 +1109,7 @@ ret:
}
static int message_stats_delete(struct mapped_device *md,
- unsigned argc, char **argv)
+ unsigned int argc, char **argv)
{
int id;
char dummy;
@@ -1117,7 +1124,7 @@ static int message_stats_delete(struct mapped_device *md,
}
static int message_stats_clear(struct mapped_device *md,
- unsigned argc, char **argv)
+ unsigned int argc, char **argv)
{
int id;
char dummy;
@@ -1132,8 +1139,8 @@ static int message_stats_clear(struct mapped_device *md,
}
static int message_stats_list(struct mapped_device *md,
- unsigned argc, char **argv,
- char *result, unsigned maxlen)
+ unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r;
const char *program = NULL;
@@ -1155,8 +1162,8 @@ static int message_stats_list(struct mapped_device *md,
}
static int message_stats_print(struct mapped_device *md,
- unsigned argc, char **argv, bool clear,
- char *result, unsigned maxlen)
+ unsigned int argc, char **argv, bool clear,
+ char *result, unsigned int maxlen)
{
int id;
char dummy;
@@ -1182,7 +1189,7 @@ static int message_stats_print(struct mapped_device *md,
}
static int message_stats_set_aux(struct mapped_device *md,
- unsigned argc, char **argv)
+ unsigned int argc, char **argv)
{
int id;
char dummy;
@@ -1196,8 +1203,8 @@ static int message_stats_set_aux(struct mapped_device *md,
return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
}
-int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r;
@@ -1240,5 +1247,5 @@ void dm_statistics_exit(void)
DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
}
-module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
+module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, 0444);
MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
index 09c81a1ec057..0bc152c8e4f3 100644
--- a/drivers/md/dm-stats.h
+++ b/drivers/md/dm-stats.h
@@ -26,11 +26,11 @@ void dm_stats_cleanup(struct dm_stats *st);
struct mapped_device;
-int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
- char *result, unsigned maxlen);
+int dm_stats_message(struct mapped_device *md, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen);
void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
- sector_t bi_sector, unsigned bi_sectors, bool end,
+ sector_t bi_sector, unsigned int bi_sectors, bool end,
unsigned long start_time,
struct dm_stats_aux *aux);
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index baa085cc67bd..8d6951157106 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001-2003 Sistina Software (UK) Limited.
*
@@ -15,6 +16,8 @@
#include <linux/slab.h>
#include <linux/log2.h>
+static struct workqueue_struct *dm_stripe_wq;
+
#define DM_MSG_PREFIX "striped"
#define DM_IO_ERROR_THRESHOLD 15
@@ -108,15 +111,13 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
width = ti->len;
if (sector_div(width, stripes)) {
- ti->error = "Target length not divisible by "
- "number of stripes";
+ ti->error = "Target length not divisible by number of stripes";
return -EINVAL;
}
tmp_len = width;
if (sector_div(tmp_len, chunk_size)) {
- ti->error = "Target length not divisible by "
- "chunk size";
+ ti->error = "Target length not divisible by chunk size";
return -EINVAL;
}
@@ -124,15 +125,13 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
* Do we have enough arguments for that many stripes ?
*/
if (argc != (2 + 2 * stripes)) {
- ti->error = "Not enough destinations "
- "specified";
+ ti->error = "Not enough destinations specified";
return -EINVAL;
}
sc = kmalloc(struct_size(sc, stripe, stripes), GFP_KERNEL);
if (!sc) {
- ti->error = "Memory allocation for striped context "
- "failed";
+ ti->error = "Memory allocation for striped context failed";
return -ENOMEM;
}
@@ -262,18 +261,18 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
sc->stripe[target_stripe].physical_start;
bio->bi_iter.bi_size = to_bytes(end - begin);
return DM_MAPIO_REMAPPED;
- } else {
- /* The range doesn't map to the target stripe */
- bio_endio(bio);
- return DM_MAPIO_SUBMITTED;
}
+
+ /* The range doesn't map to the target stripe */
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
}
static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = ti->private;
uint32_t stripe;
- unsigned target_bio_nr;
+ unsigned int target_bio_nr;
if (bio->bi_opf & REQ_PREFLUSH) {
target_bio_nr = dm_bio_get_target_bio_nr(bio);
@@ -359,7 +358,7 @@ static size_t stripe_dax_recovery_write(struct dm_target *ti, pgoff_t pgoff,
*/
static void stripe_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
unsigned int sz = 0;
@@ -368,14 +367,12 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%d ", sc->stripes);
- for (i = 0; i < sc->stripes; i++) {
+ for (i = 0; i < sc->stripes; i++)
DMEMIT("%s ", sc->stripe[i].dev->name);
- }
+
DMEMIT("1 ");
- for (i = 0; i < sc->stripes; i++) {
- DMEMIT("%c", atomic_read(&(sc->stripe[i].error_count)) ?
- 'D' : 'A');
- }
+ for (i = 0; i < sc->stripes; i++)
+ DMEMIT("%c", atomic_read(&(sc->stripe[i].error_count)) ? 'D' : 'A');
break;
case STATUSTYPE_TABLE:
@@ -406,7 +403,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
static int stripe_end_io(struct dm_target *ti, struct bio *bio,
blk_status_t *error)
{
- unsigned i;
+ unsigned int i;
char major_minor[16];
struct stripe_c *sc = ti->private;
@@ -433,7 +430,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
atomic_inc(&(sc->stripe[i].error_count));
if (atomic_read(&(sc->stripe[i].error_count)) <
DM_IO_ERROR_THRESHOLD)
- schedule_work(&sc->trigger_event);
+ queue_work(dm_stripe_wq, &sc->trigger_event);
}
return DM_ENDIO_DONE;
@@ -444,7 +441,7 @@ static int stripe_iterate_devices(struct dm_target *ti,
{
struct stripe_c *sc = ti->private;
int ret = 0;
- unsigned i = 0;
+ unsigned int i = 0;
do {
ret = fn(ti, sc->stripe[i].dev,
@@ -459,7 +456,7 @@ static void stripe_io_hints(struct dm_target *ti,
struct queue_limits *limits)
{
struct stripe_c *sc = ti->private;
- unsigned chunk_size = sc->chunk_size << SECTOR_SHIFT;
+ unsigned int chunk_size = sc->chunk_size << SECTOR_SHIFT;
blk_limits_io_min(limits, chunk_size);
blk_limits_io_opt(limits, chunk_size * sc->stripes);
@@ -486,9 +483,14 @@ int __init dm_stripe_init(void)
{
int r;
+ dm_stripe_wq = alloc_workqueue("dm_stripe_wq", 0, 0);
+ if (!dm_stripe_wq)
+ return -ENOMEM;
r = dm_register_target(&stripe_target);
- if (r < 0)
+ if (r < 0) {
+ destroy_workqueue(dm_stripe_wq);
DMWARN("target registration failed");
+ }
return r;
}
@@ -496,4 +498,5 @@ int __init dm_stripe_init(void)
void dm_stripe_exit(void)
{
dm_unregister_target(&stripe_target);
+ destroy_workqueue(dm_stripe_wq);
}
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 534dc2ca8bb0..ee2432927e90 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2010-2012 by Dell Inc. All rights reserved.
* Copyright (C) 2011-2013 Red Hat, Inc.
@@ -38,9 +39,9 @@ struct switch_path {
struct switch_ctx {
struct dm_target *ti;
- unsigned nr_paths; /* Number of paths in path_list. */
+ unsigned int nr_paths; /* Number of paths in path_list. */
- unsigned region_size; /* Region size in 512-byte sectors */
+ unsigned int region_size; /* Region size in 512-byte sectors */
unsigned long nr_regions; /* Number of regions making up the device */
signed char region_size_bits; /* log2 of region_size or -1 */
@@ -56,8 +57,8 @@ struct switch_ctx {
struct switch_path path_list[];
};
-static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_paths,
- unsigned region_size)
+static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned int nr_paths,
+ unsigned int region_size)
{
struct switch_ctx *sctx;
@@ -73,7 +74,7 @@ static struct switch_ctx *alloc_switch_ctx(struct dm_target *ti, unsigned nr_pat
return sctx;
}
-static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
+static int alloc_region_table(struct dm_target *ti, unsigned int nr_paths)
{
struct switch_ctx *sctx = ti->private;
sector_t nr_regions = ti->len;
@@ -124,7 +125,7 @@ static int alloc_region_table(struct dm_target *ti, unsigned nr_paths)
}
static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr,
- unsigned long *region_index, unsigned *bit)
+ unsigned long *region_index, unsigned int *bit)
{
if (sctx->region_entries_per_slot_bits >= 0) {
*region_index = region_nr >> sctx->region_entries_per_slot_bits;
@@ -137,10 +138,10 @@ static void switch_get_position(struct switch_ctx *sctx, unsigned long region_nr
*bit *= sctx->region_table_entry_bits;
}
-static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr)
+static unsigned int switch_region_table_read(struct switch_ctx *sctx, unsigned long region_nr)
{
unsigned long region_index;
- unsigned bit;
+ unsigned int bit;
switch_get_position(sctx, region_nr, &region_index, &bit);
@@ -151,9 +152,9 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long
/*
* Find which path to use at given offset.
*/
-static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
+static unsigned int switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
{
- unsigned path_nr;
+ unsigned int path_nr;
sector_t p;
p = offset;
@@ -172,10 +173,10 @@ static unsigned switch_get_path_nr(struct switch_ctx *sctx, sector_t offset)
}
static void switch_region_table_write(struct switch_ctx *sctx, unsigned long region_nr,
- unsigned value)
+ unsigned int value)
{
unsigned long region_index;
- unsigned bit;
+ unsigned int bit;
region_table_slot_t pte;
switch_get_position(sctx, region_nr, &region_index, &bit);
@@ -191,7 +192,7 @@ static void switch_region_table_write(struct switch_ctx *sctx, unsigned long reg
*/
static void initialise_region_table(struct switch_ctx *sctx)
{
- unsigned path_nr = 0;
+ unsigned int path_nr = 0;
unsigned long region_nr;
for (region_nr = 0; region_nr < sctx->nr_regions; region_nr++) {
@@ -249,7 +250,7 @@ static void switch_dtr(struct dm_target *ti)
* Optional args are to allow for future extension: currently this
* parameter must be 0.
*/
-static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int switch_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
static const struct dm_arg _args[] = {
{1, (KMALLOC_MAX_SIZE - sizeof(struct switch_ctx)) / sizeof(struct switch_path), "Invalid number of paths"},
@@ -259,7 +260,7 @@ static int switch_ctr(struct dm_target *ti, unsigned argc, char **argv)
struct switch_ctx *sctx;
struct dm_arg_set as;
- unsigned nr_paths, region_size, nr_optional_args;
+ unsigned int nr_paths, region_size, nr_optional_args;
int r;
as.argc = argc;
@@ -320,7 +321,7 @@ static int switch_map(struct dm_target *ti, struct bio *bio)
{
struct switch_ctx *sctx = ti->private;
sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
- unsigned path_nr = switch_get_path_nr(sctx, offset);
+ unsigned int path_nr = switch_get_path_nr(sctx, offset);
bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
@@ -371,9 +372,9 @@ static __always_inline unsigned long parse_hex(const char **string)
}
static int process_set_region_mappings(struct switch_ctx *sctx,
- unsigned argc, char **argv)
+ unsigned int argc, char **argv)
{
- unsigned i;
+ unsigned int i;
unsigned long region_index = 0;
for (i = 1; i < argc; i++) {
@@ -466,8 +467,8 @@ static int process_set_region_mappings(struct switch_ctx *sctx,
*
* Only set_region_mappings is supported.
*/
-static int switch_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int switch_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
static DEFINE_MUTEX(message_mutex);
@@ -487,10 +488,10 @@ static int switch_message(struct dm_target *ti, unsigned argc, char **argv,
}
static void switch_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct switch_ctx *sctx = ti->private;
- unsigned sz = 0;
+ unsigned int sz = 0;
int path_nr;
switch (type) {
@@ -519,7 +520,7 @@ static void switch_status(struct dm_target *ti, status_type_t type,
static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct switch_ctx *sctx = ti->private;
- unsigned path_nr;
+ unsigned int path_nr;
path_nr = switch_get_path_nr(sctx, 0);
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index e28c92478536..bfaef27ca79f 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2008 Red Hat, Inc. All rights reserved.
*
@@ -11,13 +12,13 @@
struct dm_sysfs_attr {
struct attribute attr;
- ssize_t (*show)(struct mapped_device *, char *);
- ssize_t (*store)(struct mapped_device *, const char *, size_t count);
+ ssize_t (*show)(struct mapped_device *md, char *p);
+ ssize_t (*store)(struct mapped_device *md, const char *p, size_t count);
};
#define DM_ATTR_RO(_name) \
struct dm_sysfs_attr dm_attr_##_name = \
- __ATTR(_name, S_IRUGO, dm_attr_##_name##_show, NULL)
+ __ATTR(_name, 0444, dm_attr_##_name##_show, NULL)
static ssize_t dm_attr_show(struct kobject *kobj, struct attribute *attr,
char *page)
@@ -42,7 +43,7 @@ static ssize_t dm_attr_show(struct kobject *kobj, struct attribute *attr,
#define DM_ATTR_RW(_name) \
struct dm_sysfs_attr dm_attr_##_name = \
- __ATTR(_name, S_IRUGO | S_IWUSR, dm_attr_##_name##_show, dm_attr_##_name##_store)
+ __ATTR(_name, 0644, dm_attr_##_name##_show, dm_attr_##_name##_store)
static ssize_t dm_attr_store(struct kobject *kobj, struct attribute *attr,
const char *page, size_t count)
@@ -119,7 +120,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
.store = dm_attr_store,
};
-static struct kobj_type dm_ktype = {
+static const struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_groups = dm_groups,
.release = dm_kobject_release,
@@ -142,6 +143,7 @@ int dm_sysfs_init(struct mapped_device *md)
void dm_sysfs_exit(struct mapped_device *md)
{
struct kobject *kobj = dm_kobject(md);
+
kobject_put(kobj);
wait_for_completion(dm_get_completion_from_kobject(kobj));
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8541d5688f3a..2055a758541d 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -72,7 +73,7 @@ static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
n = get_child(n, CHILDREN_PER_NODE - 1);
if (n >= t->counts[l])
- return (sector_t) - 1;
+ return (sector_t) -1;
return get_node(t, l, n)[KEYS_PER_NODE - 1];
}
@@ -126,7 +127,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
}
int dm_table_create(struct dm_table **result, fmode_t mode,
- unsigned num_targets, struct mapped_device *md)
+ unsigned int num_targets, struct mapped_device *md)
{
struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
@@ -211,7 +212,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
{
struct dm_dev_internal *dd;
- list_for_each_entry (dd, l, list)
+ list_for_each_entry(dd, l, list)
if (dd->dm_dev->bdev->bd_dev == dev)
return dd;
@@ -234,8 +235,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
return 0;
if ((start >= dev_size) || (start + len > dev_size)) {
- DMERR("%s: %pg too small for target: "
- "start=%llu, len=%llu, dev_size=%llu",
+ DMERR("%s: %pg too small for target: start=%llu, len=%llu, dev_size=%llu",
dm_device_name(ti->table->md), bdev,
(unsigned long long)start,
(unsigned long long)len,
@@ -280,8 +280,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
return 0;
if (start & (logical_block_size_sectors - 1)) {
- DMERR("%s: start=%llu not aligned to h/w "
- "logical block size %u of %pg",
+ DMERR("%s: start=%llu not aligned to h/w logical block size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)start,
limits->logical_block_size, bdev);
@@ -289,8 +288,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
}
if (len & (logical_block_size_sectors - 1)) {
- DMERR("%s: len=%llu not aligned to h/w "
- "logical block size %u of %pg",
+ DMERR("%s: len=%llu not aligned to h/w logical block size %u of %pg",
dm_device_name(ti->table->md),
(unsigned long long)len,
limits->logical_block_size, bdev);
@@ -364,6 +362,8 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
if (!dev)
return -ENODEV;
}
+ if (dev == disk_devt(t->md->disk))
+ return -EINVAL;
dd = find_device(&t->devices, dev);
if (!dd) {
@@ -371,7 +371,8 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
if (!dd)
return -ENOMEM;
- if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) {
+ r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev);
+ if (r) {
kfree(dd);
return r;
}
@@ -470,10 +471,10 @@ static int adjoin(struct dm_table *t, struct dm_target *ti)
* On the other hand, dm-switch needs to process bulk data using messages and
* excessive use of GFP_NOIO could cause trouble.
*/
-static char **realloc_argv(unsigned *size, char **old_argv)
+static char **realloc_argv(unsigned int *size, char **old_argv)
{
char **argv;
- unsigned new_size;
+ unsigned int new_size;
gfp_t gfp;
if (*size) {
@@ -499,7 +500,7 @@ static char **realloc_argv(unsigned *size, char **old_argv)
int dm_split_args(int *argc, char ***argvp, char *input)
{
char *start, *end = input, *out, **argv = NULL;
- unsigned array_size = 0;
+ unsigned int array_size = 0;
*argc = 0;
@@ -732,9 +733,8 @@ int dm_table_add_target(struct dm_table *t, const char *type,
/*
* Target argument parsing helpers.
*/
-static int validate_next_arg(const struct dm_arg *arg,
- struct dm_arg_set *arg_set,
- unsigned *value, char **error, unsigned grouped)
+static int validate_next_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
+ unsigned int *value, char **error, unsigned int grouped)
{
const char *arg_str = dm_shift_arg(arg_set);
char dummy;
@@ -752,14 +752,14 @@ static int validate_next_arg(const struct dm_arg *arg,
}
int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *value, char **error)
+ unsigned int *value, char **error)
{
return validate_next_arg(arg, arg_set, value, error, 0);
}
EXPORT_SYMBOL(dm_read_arg);
int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set,
- unsigned *value, char **error)
+ unsigned int *value, char **error)
{
return validate_next_arg(arg, arg_set, value, error, 1);
}
@@ -780,7 +780,7 @@ const char *dm_shift_arg(struct dm_arg_set *as)
}
EXPORT_SYMBOL(dm_shift_arg);
-void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
+void dm_consume_args(struct dm_arg_set *as, unsigned int num_args)
{
BUG_ON(as->argc < num_args);
as->argc -= num_args;
@@ -856,7 +856,7 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
static int dm_table_determine_type(struct dm_table *t)
{
- unsigned bio_based = 0, request_based = 0, hybrid = 0;
+ unsigned int bio_based = 0, request_based = 0, hybrid = 0;
struct dm_target *ti;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
@@ -881,8 +881,7 @@ static int dm_table_determine_type(struct dm_table *t)
bio_based = 1;
if (bio_based && request_based) {
- DMERR("Inconsistent table: different target types"
- " can't be mixed up");
+ DMERR("Inconsistent table: different target types can't be mixed up");
return -EINVAL;
}
}
@@ -1185,8 +1184,7 @@ static int dm_table_register_integrity(struct dm_table *t)
* profile the new profile should not conflict.
*/
if (blk_integrity_compare(dm_disk(md), template_disk) < 0) {
- DMERR("%s: conflict with existing integrity profile: "
- "%s profile mismatch",
+ DMERR("%s: conflict with existing integrity profile: %s profile mismatch",
dm_device_name(t->md),
template_disk->disk_name);
return 1;
@@ -1527,7 +1525,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
if (ti->type->iterate_devices &&
ti->type->iterate_devices(ti, func, data))
return true;
- }
+ }
return false;
}
@@ -1535,7 +1533,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
static int count_device(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- unsigned *num_devices = data;
+ unsigned int *num_devices = data;
(*num_devices)++;
@@ -1565,7 +1563,7 @@ bool dm_table_has_no_data_devices(struct dm_table *t)
{
for (unsigned int i = 0; i < t->num_targets; i++) {
struct dm_target *ti = dm_table_get_target(t, i);
- unsigned num_devices = 0;
+ unsigned int num_devices = 0;
if (!ti->type->iterate_devices)
return false;
@@ -1708,8 +1706,7 @@ combine_limits:
* for the table.
*/
if (blk_stack_limits(limits, &ti_limits, 0) < 0)
- DMWARN("%s: adding target device "
- "(start sect %llu len %llu) "
+ DMWARN("%s: adding target device (start sect %llu len %llu) "
"caused an alignment inconsistency",
dm_device_name(t->md),
(unsigned long long) ti->begin,
@@ -1971,8 +1968,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
set_dax_synchronous(t->md->dax_dev);
- }
- else
+ } else
blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL))
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 8cd5184e62f0..26ea22b1a0d7 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001 Sistina Software (UK) Limited
*
@@ -92,6 +93,7 @@ int dm_register_target(struct target_type *tt)
up_write(&_lock);
return rv;
}
+EXPORT_SYMBOL(dm_register_target);
void dm_unregister_target(struct target_type *tt)
{
@@ -105,6 +107,7 @@ void dm_unregister_target(struct target_type *tt)
up_write(&_lock);
}
+EXPORT_SYMBOL(dm_unregister_target);
/*
* io-err: always fails an io, useful for bringing
@@ -170,6 +173,3 @@ void dm_target_exit(void)
{
dm_unregister_target(&error_target);
}
-
-EXPORT_SYMBOL(dm_register_target);
-EXPORT_SYMBOL(dm_unregister_target);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 6bcc4c4786d8..fd464fb024c3 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2012 Red Hat, Inc.
*
@@ -14,7 +15,8 @@
#include <linux/device-mapper.h>
#include <linux/workqueue.h>
-/*--------------------------------------------------------------------------
+/*
+ *--------------------------------------------------------------------------
* As far as the metadata goes, there is:
*
* - A superblock in block zero, taking up fewer than 512 bytes for
@@ -70,7 +72,8 @@
*
* All metadata io is in THIN_METADATA_BLOCK_SIZE sized/aligned chunks
* from the block manager.
- *--------------------------------------------------------------------------*/
+ *--------------------------------------------------------------------------
+ */
#define DM_MSG_PREFIX "thin metadata"
@@ -239,10 +242,11 @@ struct dm_thin_device {
uint32_t snapshotted_time;
};
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* superblock validator
- *--------------------------------------------------------------*/
-
+ *--------------------------------------------------------------
+ */
#define SUPERBLOCK_CSUM_XOR 160774
static void sb_prepare_for_write(struct dm_block_validator *v,
@@ -265,15 +269,15 @@ static int sb_check(struct dm_block_validator *v,
__le32 csum_le;
if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
- DMERR("sb_check failed: blocknr %llu: "
- "wanted %llu", le64_to_cpu(disk_super->blocknr),
+ DMERR("%s failed: blocknr %llu: wanted %llu",
+ __func__, le64_to_cpu(disk_super->blocknr),
(unsigned long long)dm_block_location(b));
return -ENOTBLK;
}
if (le64_to_cpu(disk_super->magic) != THIN_SUPERBLOCK_MAGIC) {
- DMERR("sb_check failed: magic %llu: "
- "wanted %llu", le64_to_cpu(disk_super->magic),
+ DMERR("%s failed: magic %llu: wanted %llu",
+ __func__, le64_to_cpu(disk_super->magic),
(unsigned long long)THIN_SUPERBLOCK_MAGIC);
return -EILSEQ;
}
@@ -282,8 +286,8 @@ static int sb_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
SUPERBLOCK_CSUM_XOR));
if (csum_le != disk_super->csum) {
- DMERR("sb_check failed: csum %u: wanted %u",
- le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
+ DMERR("%s failed: csum %u: wanted %u",
+ __func__, le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
return -EILSEQ;
}
@@ -296,10 +300,11 @@ static struct dm_block_validator sb_validator = {
.check = sb_check
};
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Methods for the btree value types
- *--------------------------------------------------------------*/
-
+ *--------------------------------------------------------------
+ */
static uint64_t pack_block_time(dm_block_t b, uint32_t t)
{
return (b << 24) | t;
@@ -318,12 +323,12 @@ static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
*/
typedef int (*run_fn)(struct dm_space_map *, dm_block_t, dm_block_t);
-static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned count, run_fn fn)
+static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned int count, run_fn fn)
{
uint64_t b, begin, end;
uint32_t t;
bool in_run = false;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++, value_le++) {
/* We know value_le is 8 byte aligned */
@@ -348,13 +353,13 @@ static void with_runs(struct dm_space_map *sm, const __le64 *value_le, unsigned
fn(sm, begin, end);
}
-static void data_block_inc(void *context, const void *value_le, unsigned count)
+static void data_block_inc(void *context, const void *value_le, unsigned int count)
{
with_runs((struct dm_space_map *) context,
(const __le64 *) value_le, count, dm_sm_inc_blocks);
}
-static void data_block_dec(void *context, const void *value_le, unsigned count)
+static void data_block_dec(void *context, const void *value_le, unsigned int count)
{
with_runs((struct dm_space_map *) context,
(const __le64 *) value_le, count, dm_sm_dec_blocks);
@@ -374,21 +379,21 @@ static int data_block_equal(void *context, const void *value1_le, const void *va
return b1 == b2;
}
-static void subtree_inc(void *context, const void *value, unsigned count)
+static void subtree_inc(void *context, const void *value, unsigned int count)
{
struct dm_btree_info *info = context;
const __le64 *root_le = value;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++, root_le++)
dm_tm_inc(info->tm, le64_to_cpu(*root_le));
}
-static void subtree_dec(void *context, const void *value, unsigned count)
+static void subtree_dec(void *context, const void *value, unsigned int count)
{
struct dm_btree_info *info = context;
const __le64 *root_le = value;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++, root_le++)
if (dm_btree_del(info, le64_to_cpu(*root_le)))
@@ -398,6 +403,7 @@ static void subtree_dec(void *context, const void *value, unsigned count)
static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
+
memcpy(&v1_le, value1_le, sizeof(v1_le));
memcpy(&v2_le, value2_le, sizeof(v2_le));
@@ -448,10 +454,10 @@ static int superblock_lock(struct dm_pool_metadata *pmd,
static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
{
int r;
- unsigned i;
+ unsigned int i;
struct dm_block *b;
__le64 *data_le, zero = cpu_to_le64(0);
- unsigned block_size = dm_bm_block_size(bm) / sizeof(__le64);
+ unsigned int block_size = dm_bm_block_size(bm) / sizeof(__le64);
/*
* We can't use a validator here - it may be all zeroes.
@@ -971,7 +977,7 @@ struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
{
int r;
- unsigned open_devices = 0;
+ unsigned int open_devices = 0;
struct dm_thin_device *td, *tmp;
down_read(&pmd->root_lock);
@@ -1530,9 +1536,9 @@ static int __find_block(struct dm_thin_device *td, dm_block_t block,
dm_block_t keys[2] = { td->id, block };
struct dm_btree_info *info;
- if (can_issue_io) {
+ if (can_issue_io)
info = &pmd->info;
- } else
+ else
info = &pmd->nb_info;
r = dm_btree_lookup(info, pmd->root, keys, &value);
@@ -1606,8 +1612,8 @@ static int __find_mapped_range(struct dm_thin_device *td,
if (r) {
if (r == -ENODATA)
break;
- else
- return r;
+
+ return r;
}
if ((lookup.block != pool_end) ||
@@ -1679,7 +1685,7 @@ int dm_thin_insert_block(struct dm_thin_device *td, dm_block_t block,
static int __remove_range(struct dm_thin_device *td, dm_block_t begin, dm_block_t end)
{
int r;
- unsigned count, total_count = 0;
+ unsigned int count, total_count = 0;
struct dm_pool_metadata *pmd = td->pmd;
dm_block_t keys[1] = { td->id };
__le64 value;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 4d7a2caf21d9..2f64f48b5f19 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2010-2011 Red Hat, Inc.
*
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 64cfcf46881d..6cd105c1cef3 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011-2012 Red Hat UK.
*
@@ -32,7 +33,7 @@
#define COMMIT_PERIOD HZ
#define NO_SPACE_TIMEOUT_SECS 60
-static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
+static unsigned int no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
"A percentage of time allocated for copy on write");
@@ -254,7 +255,7 @@ struct pool {
struct delayed_work no_space_timeout;
unsigned long last_commit_jiffies;
- unsigned ref_count;
+ unsigned int ref_count;
spinlock_t lock;
struct bio_list deferred_flush_bios;
@@ -293,7 +294,7 @@ static enum pool_mode get_pool_mode(struct pool *pool)
static void notify_of_pool_mode_change(struct pool *pool)
{
- const char *descs[] = {
+ static const char *descs[] = {
"write",
"out-of-data-space",
"read-only",
@@ -1037,6 +1038,7 @@ out:
static void free_discard_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
+
if (m->cell)
cell_defer_no_holder(tc, m->cell);
mempool_free(m, &tc->pool->mapping_pool);
@@ -1180,9 +1182,9 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
discard_parent->bi_end_io = passdown_endio;
discard_parent->bi_private = m;
- if (m->maybe_shared)
- passdown_double_checking_shared_status(m, discard_parent);
- else {
+ if (m->maybe_shared)
+ passdown_double_checking_shared_status(m, discard_parent);
+ else {
struct discard_op op;
begin_discard(&op, tc, discard_parent);
@@ -2159,7 +2161,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
struct bio *bio;
struct bio_list bios;
struct blk_plug plug;
- unsigned count = 0;
+ unsigned int count = 0;
if (tc->requeue_mode) {
error_thin_bio_list(tc, &tc->deferred_bio_list,
@@ -2207,6 +2209,7 @@ static void process_thin_deferred_bios(struct thin_c *tc)
throttle_work_update(&pool->throttle);
dm_pool_issue_prefetches(pool->pmd);
}
+ cond_resched();
}
blk_finish_plug(&plug);
}
@@ -2228,9 +2231,9 @@ static int cmp_cells(const void *lhs, const void *rhs)
return 0;
}
-static unsigned sort_cells(struct pool *pool, struct list_head *cells)
+static unsigned int sort_cells(struct pool *pool, struct list_head *cells)
{
- unsigned count = 0;
+ unsigned int count = 0;
struct dm_bio_prison_cell *cell, *tmp;
list_for_each_entry_safe(cell, tmp, cells, user_list) {
@@ -2251,7 +2254,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
struct pool *pool = tc->pool;
struct list_head cells;
struct dm_bio_prison_cell *cell;
- unsigned i, j, count;
+ unsigned int i, j, count;
INIT_LIST_HEAD(&cells);
@@ -2289,6 +2292,7 @@ static void process_thin_deferred_cells(struct thin_c *tc)
else
pool->process_cell(tc, cell);
}
+ cond_resched();
} while (!list_empty(&cells));
}
@@ -2411,6 +2415,7 @@ static void do_worker(struct work_struct *ws)
static void do_waker(struct work_struct *ws)
{
struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
+
wake_worker(pool);
queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
}
@@ -2473,6 +2478,7 @@ static struct noflush_work *to_noflush(struct work_struct *ws)
static void do_noflush_start(struct work_struct *ws)
{
struct noflush_work *w = to_noflush(ws);
+
w->tc->requeue_mode = true;
requeue_io(w->tc);
pool_work_complete(&w->pw);
@@ -2481,6 +2487,7 @@ static void do_noflush_start(struct work_struct *ws)
static void do_noflush_stop(struct work_struct *ws)
{
struct noflush_work *w = to_noflush(ws);
+
w->tc->requeue_mode = false;
pool_work_complete(&w->pw);
}
@@ -2799,9 +2806,11 @@ static void requeue_bios(struct pool *pool)
rcu_read_unlock();
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Binding of control targets to a pool object
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static bool is_factor(sector_t block_size, uint32_t n)
{
return !sector_div(block_size, n);
@@ -2865,9 +2874,11 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
pool->ti = NULL;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Pool creation
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
/* Initialize pool features. */
static void pool_features_init(struct pool_features *pf)
{
@@ -3091,9 +3102,11 @@ static struct pool *__pool_find(struct mapped_device *pool_md,
return pool;
}
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Pool target methods
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void pool_dtr(struct dm_target *ti)
{
struct pool_c *pt = ti->private;
@@ -3113,7 +3126,7 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
struct dm_target *ti)
{
int r;
- unsigned argc;
+ unsigned int argc;
const char *arg_name;
static const struct dm_arg _args[] = {
@@ -3234,6 +3247,7 @@ static dm_block_t calc_metadata_threshold(struct pool_c *pt)
* delete after you've grown the device).
*/
dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
+
return min((dm_block_t)1024ULL /* 4M */, quarter);
}
@@ -3250,7 +3264,7 @@ static dm_block_t calc_metadata_threshold(struct pool_c *pt)
* read_only: Don't allow any changes to be made to the pool metadata.
* error_if_no_space: error IOs, instead of queueing, if no space.
*/
-static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int pool_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r, pool_created = 0;
struct pool_c *pt;
@@ -3645,7 +3659,7 @@ static void pool_postsuspend(struct dm_target *ti)
(void) commit(pool);
}
-static int check_arg_count(unsigned argc, unsigned args_required)
+static int check_arg_count(unsigned int argc, unsigned int args_required)
{
if (argc != args_required) {
DMWARN("Message received with %u arguments instead of %u.",
@@ -3668,7 +3682,7 @@ static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
return -EINVAL;
}
-static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_create_thin_mesg(unsigned int argc, char **argv, struct pool *pool)
{
dm_thin_id dev_id;
int r;
@@ -3691,7 +3705,7 @@ static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *poo
return 0;
}
-static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_create_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{
dm_thin_id dev_id;
dm_thin_id origin_dev_id;
@@ -3719,7 +3733,7 @@ static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *poo
return 0;
}
-static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_delete_mesg(unsigned int argc, char **argv, struct pool *pool)
{
dm_thin_id dev_id;
int r;
@@ -3739,7 +3753,7 @@ static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
return r;
}
-static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_set_transaction_id_mesg(unsigned int argc, char **argv, struct pool *pool)
{
dm_thin_id old_id, new_id;
int r;
@@ -3768,7 +3782,7 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
return 0;
}
-static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_reserve_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{
int r;
@@ -3785,7 +3799,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
return r;
}
-static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
+static int process_release_metadata_snap_mesg(unsigned int argc, char **argv, struct pool *pool)
{
int r;
@@ -3809,8 +3823,8 @@ static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct
* reserve_metadata_snap
* release_metadata_snap
*/
-static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int pool_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r = -EINVAL;
struct pool_c *pt = ti->private;
@@ -3850,9 +3864,9 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv,
}
static void emit_flags(struct pool_features *pf, char *result,
- unsigned sz, unsigned maxlen)
+ unsigned int sz, unsigned int maxlen)
{
- unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
+ unsigned int count = !pf->zero_new_blocks + !pf->discard_enabled +
!pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
pf->error_if_no_space;
DMEMIT("%u ", count);
@@ -3880,10 +3894,10 @@ static void emit_flags(struct pool_features *pf, char *result,
* <pool mode> <discard config> <no space config> <needs_check>
*/
static void pool_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
int r;
- unsigned sz = 0;
+ unsigned int sz = 0;
uint64_t transaction_id;
dm_block_t nr_free_blocks_data;
dm_block_t nr_free_blocks_metadata;
@@ -4095,9 +4109,11 @@ static struct target_type pool_target = {
.io_hints = pool_io_hints,
};
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Thin target methods
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void thin_get(struct thin_c *tc)
{
refcount_inc(&tc->refcount);
@@ -4145,7 +4161,7 @@ static void thin_dtr(struct dm_target *ti)
* If the pool device has discards disabled, they get disabled for the thin
* device as well.
*/
-static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int thin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
int r;
struct thin_c *tc;
@@ -4367,7 +4383,7 @@ static int thin_preresume(struct dm_target *ti)
* <nr mapped sectors> <highest mapped sector>
*/
static void thin_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
int r;
ssize_t sz = 0;
@@ -4518,7 +4534,7 @@ static void dm_thin_exit(void)
module_init(dm_thin_init);
module_exit(dm_thin_exit);
-module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
+module_param_named(no_space_timeout, no_space_timeout_secs, uint, 0644);
MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
index 8671267200d8..491738263292 100644
--- a/drivers/md/dm-uevent.c
+++ b/drivers/md/dm-uevent.c
@@ -3,7 +3,7 @@
* Device Mapper Uevent Support (dm-uevent)
*
* Copyright IBM Corporation, 2007
- * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
+ * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
*/
#include <linux/list.h>
#include <linux/slab.h>
@@ -60,7 +60,7 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
enum kobject_action action,
const char *dm_action,
const char *path,
- unsigned nr_valid_paths)
+ unsigned int nr_valid_paths)
{
struct dm_uevent *event;
@@ -168,7 +168,7 @@ EXPORT_SYMBOL_GPL(dm_send_uevents);
*
*/
void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
- const char *path, unsigned nr_valid_paths)
+ const char *path, unsigned int nr_valid_paths)
{
struct mapped_device *md = dm_table_get_md(ti->table);
struct dm_uevent *event;
diff --git a/drivers/md/dm-uevent.h b/drivers/md/dm-uevent.h
index d30d226f2a18..12a5d4fb7d44 100644
--- a/drivers/md/dm-uevent.h
+++ b/drivers/md/dm-uevent.h
@@ -3,7 +3,7 @@
* Device Mapper Uevent Support
*
* Copyright IBM Corporation, 2007
- * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
+ * Author: Mike Anderson <andmike@linux.vnet.ibm.com>
*/
#ifndef DM_UEVENT_H
#define DM_UEVENT_H
@@ -20,7 +20,7 @@ extern void dm_uevent_exit(void);
extern void dm_send_uevents(struct list_head *events, struct kobject *kobj);
extern void dm_path_uevent(enum dm_uevent_type event_type,
struct dm_target *ti, const char *path,
- unsigned nr_valid_paths);
+ unsigned int nr_valid_paths);
#else
@@ -37,7 +37,7 @@ static inline void dm_send_uevents(struct list_head *events,
}
static inline void dm_path_uevent(enum dm_uevent_type event_type,
struct dm_target *ti, const char *path,
- unsigned nr_valid_paths)
+ unsigned int nr_valid_paths)
{
}
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
index fdc8921e5c19..e7b7d5983a16 100644
--- a/drivers/md/dm-unstripe.c
+++ b/drivers/md/dm-unstripe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2017 Intel Corporation.
*
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 23cffce56403..962fc32c947c 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -59,14 +59,14 @@ static int fec_decode_rs8(struct dm_verity *v, struct dm_verity_fec_io *fio,
* to the data block. Caller is responsible for releasing buf.
*/
static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
- unsigned *offset, struct dm_buffer **buf)
+ unsigned int *offset, struct dm_buffer **buf)
{
u64 position, block, rem;
u8 *res;
position = (index + rsb) * v->fec->roots;
block = div64_u64_rem(position, v->fec->io_size, &rem);
- *offset = (unsigned)rem;
+ *offset = (unsigned int)rem;
res = dm_bufio_read(v->fec->bufio, block, buf);
if (IS_ERR(res)) {
@@ -102,7 +102,7 @@ static u8 *fec_read_parity(struct dm_verity *v, u64 rsb, int index,
*/
static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
struct dm_verity_fec_io *fio,
- unsigned i, unsigned j)
+ unsigned int i, unsigned int j)
{
return &fio->bufs[i][j * v->fec->rsn];
}
@@ -111,7 +111,7 @@ static inline u8 *fec_buffer_rs_block(struct dm_verity *v,
* Return an index to the current RS block when called inside
* fec_for_each_buffer_rs_block.
*/
-static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j)
+static inline unsigned int fec_buffer_rs_index(unsigned int i, unsigned int j)
{
return (i << DM_VERITY_FEC_BUF_RS_BITS) + j;
}
@@ -121,12 +121,12 @@ static inline unsigned fec_buffer_rs_index(unsigned i, unsigned j)
* starting from block_offset.
*/
static int fec_decode_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio,
- u64 rsb, int byte_index, unsigned block_offset,
+ u64 rsb, int byte_index, unsigned int block_offset,
int neras)
{
int r, corrected = 0, res;
struct dm_buffer *buf;
- unsigned n, i, offset;
+ unsigned int n, i, offset;
u8 *par, *block;
par = fec_read_parity(v, rsb, block_offset, &offset, &buf);
@@ -197,7 +197,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
* fits into buffers. Check for erasure locations if @neras is non-NULL.
*/
static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
- u64 rsb, u64 target, unsigned block_offset,
+ u64 rsb, u64 target, unsigned int block_offset,
int *neras)
{
bool is_zero;
@@ -208,7 +208,7 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
u64 block, ileaved;
u8 *bbuf, *rs_block;
u8 want_digest[HASH_MAX_DIGESTSIZE];
- unsigned n, k;
+ unsigned int n, k;
if (neras)
*neras = 0;
@@ -304,7 +304,7 @@ done:
*/
static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{
- unsigned n;
+ unsigned int n;
if (!fio->rs)
fio->rs = mempool_alloc(&v->fec->rs_pool, GFP_NOIO);
@@ -344,7 +344,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
*/
static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
{
- unsigned n;
+ unsigned int n;
fec_for_each_buffer(fio, n)
memset(fio->bufs[n], 0, v->fec->rsn << DM_VERITY_FEC_BUF_RS_BITS);
@@ -362,7 +362,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
bool use_erasures)
{
int r, neras = 0;
- unsigned pos;
+ unsigned int pos;
r = fec_alloc_bufs(v, fio);
if (unlikely(r < 0))
@@ -484,7 +484,7 @@ done:
*/
void verity_fec_finish_io(struct dm_verity_io *io)
{
- unsigned n;
+ unsigned int n;
struct dm_verity_fec *f = io->v->fec;
struct dm_verity_fec_io *fio = fec_io(io);
@@ -522,8 +522,8 @@ void verity_fec_init_io(struct dm_verity_io *io)
/*
* Append feature arguments and values to the status table.
*/
-unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
- char *result, unsigned maxlen)
+unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
+ char *result, unsigned int maxlen)
{
if (!verity_fec_is_enabled(v))
return sz;
@@ -589,7 +589,7 @@ bool verity_is_fec_opt_arg(const char *arg_name)
}
int verity_fec_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
- unsigned *argc, const char *arg_name)
+ unsigned int *argc, const char *arg_name)
{
int r;
struct dm_target *ti = v->ti;
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 3c46c8d61883..8454070d2824 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -55,10 +55,10 @@ struct dm_verity_fec_io {
struct rs_control *rs; /* Reed-Solomon state */
int erasures[DM_VERITY_FEC_MAX_RSN]; /* erasures for decode_rs8 */
u8 *bufs[DM_VERITY_FEC_BUF_MAX]; /* bufs for deinterleaving */
- unsigned nbufs; /* number of buffers allocated */
+ unsigned int nbufs; /* number of buffers allocated */
u8 *output; /* buffer for corrected output */
size_t output_pos;
- unsigned level; /* recursion level */
+ unsigned int level; /* recursion level */
};
#ifdef CONFIG_DM_VERITY_FEC
@@ -72,15 +72,15 @@ extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
enum verity_block_type type, sector_t block,
u8 *dest, struct bvec_iter *iter);
-extern unsigned verity_fec_status_table(struct dm_verity *v, unsigned sz,
- char *result, unsigned maxlen);
+extern unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
+ char *result, unsigned int maxlen);
extern void verity_fec_finish_io(struct dm_verity_io *io);
extern void verity_fec_init_io(struct dm_verity_io *io);
extern bool verity_is_fec_opt_arg(const char *arg_name);
extern int verity_fec_parse_opt_args(struct dm_arg_set *as,
- struct dm_verity *v, unsigned *argc,
+ struct dm_verity *v, unsigned int *argc,
const char *arg_name);
extern void verity_fec_dtr(struct dm_verity *v);
@@ -106,9 +106,9 @@ static inline int verity_fec_decode(struct dm_verity *v,
return -EOPNOTSUPP;
}
-static inline unsigned verity_fec_status_table(struct dm_verity *v,
- unsigned sz, char *result,
- unsigned maxlen)
+static inline unsigned int verity_fec_status_table(struct dm_verity *v,
+ unsigned int sz, char *result,
+ unsigned int maxlen)
{
return sz;
}
@@ -128,7 +128,7 @@ static inline bool verity_is_fec_opt_arg(const char *arg_name)
static inline int verity_fec_parse_opt_args(struct dm_arg_set *as,
struct dm_verity *v,
- unsigned *argc,
+ unsigned int *argc,
const char *arg_name)
{
return -EINVAL;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index ccf5b852fbf7..ade83ef3b439 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -41,9 +41,9 @@
#define DM_VERITY_OPTS_MAX (4 + DM_VERITY_OPTS_FEC + \
DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
-static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
+static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
-module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, S_IRUGO | S_IWUSR);
+module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, 0644);
static DEFINE_STATIC_KEY_FALSE(use_tasklet_enabled);
@@ -51,7 +51,7 @@ struct dm_verity_prefetch_work {
struct work_struct work;
struct dm_verity *v;
sector_t block;
- unsigned n_blocks;
+ unsigned int n_blocks;
};
/*
@@ -110,22 +110,24 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
sg_init_one(&sg, data, len);
ahash_request_set_crypt(req, &sg, NULL, len);
return crypto_wait_req(crypto_ahash_update(req), wait);
- } else {
- do {
- int r;
- size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
- flush_kernel_vmap_range((void *)data, this_step);
- sg_init_table(&sg, 1);
- sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
- ahash_request_set_crypt(req, &sg, NULL, this_step);
- r = crypto_wait_req(crypto_ahash_update(req), wait);
- if (unlikely(r))
- return r;
- data += this_step;
- len -= this_step;
- } while (len);
- return 0;
}
+
+ do {
+ int r;
+ size_t this_step = min_t(size_t, len, PAGE_SIZE - offset_in_page(data));
+
+ flush_kernel_vmap_range((void *)data, this_step);
+ sg_init_table(&sg, 1);
+ sg_set_page(&sg, vmalloc_to_page(data), this_step, offset_in_page(data));
+ ahash_request_set_crypt(req, &sg, NULL, this_step);
+ r = crypto_wait_req(crypto_ahash_update(req), wait);
+ if (unlikely(r))
+ return r;
+ data += this_step;
+ len -= this_step;
+ } while (len);
+
+ return 0;
}
/*
@@ -164,7 +166,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req,
r = verity_hash_update(v, req, v->salt, v->salt_size, wait);
if (r < 0) {
- DMERR("verity_hash_final failed updating salt: %d", r);
+ DMERR("%s failed updating salt: %d", __func__, r);
goto out;
}
}
@@ -196,10 +198,10 @@ out:
}
static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
- sector_t *hash_block, unsigned *offset)
+ sector_t *hash_block, unsigned int *offset)
{
sector_t position = verity_position_at_level(v, block, level);
- unsigned idx;
+ unsigned int idx;
*hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
@@ -287,7 +289,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
u8 *data;
int r;
sector_t hash_block;
- unsigned offset;
+ unsigned int offset;
verity_hash_at_level(v, block, level, &hash_block, &offset);
@@ -332,10 +334,8 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
*/
r = -EAGAIN;
goto release_ret_r;
- }
- else if (verity_fec_decode(v, io,
- DM_VERITY_BLOCK_TYPE_METADATA,
- hash_block, data, NULL) == 0)
+ } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
+ hash_block, data, NULL) == 0)
aux->hash_verified = 1;
else if (verity_handle_err(v,
DM_VERITY_BLOCK_TYPE_METADATA,
@@ -424,7 +424,7 @@ static int verity_for_io_block(struct dm_verity *v, struct dm_verity_io *io,
r = crypto_wait_req(crypto_ahash_update(req), wait);
if (unlikely(r < 0)) {
- DMERR("verity_for_io_block crypto op failed: %d", r);
+ DMERR("%s crypto op failed: %d", __func__, r);
return r;
}
@@ -445,13 +445,13 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
struct dm_verity_io *io, u8 *data,
size_t len))
{
- unsigned todo = 1 << v->data_dev_block_bits;
+ unsigned int todo = 1 << v->data_dev_block_bits;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
do {
int r;
u8 *page;
- unsigned len;
+ unsigned int len;
struct bio_vec bv = bio_iter_iovec(bio, *iter);
page = bvec_kmap_local(&bv);
@@ -685,10 +685,12 @@ static void verity_prefetch_io(struct work_struct *work)
for (i = v->levels - 2; i >= 0; i--) {
sector_t hash_block_start;
sector_t hash_block_end;
+
verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
+
if (!i) {
- unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster);
+ unsigned int cluster = READ_ONCE(dm_verity_prefetch_cluster);
cluster >>= v->data_dev_block_bits;
if (unlikely(!cluster))
@@ -753,7 +755,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
bio_set_dev(bio, v->data_dev->bdev);
bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
- if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+ if (((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
DMERR_LIMIT("unaligned io");
return DM_MAPIO_KILL;
@@ -789,12 +791,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
* Status: V (valid) or C (corruption found)
*/
static void verity_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct dm_verity *v = ti->private;
- unsigned args = 0;
- unsigned sz = 0;
- unsigned x;
+ unsigned int args = 0;
+ unsigned int sz = 0;
+ unsigned int x;
switch (type) {
case STATUSTYPE_INFO:
@@ -1054,7 +1056,7 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
bool only_modifier_opts)
{
int r = 0;
- unsigned argc;
+ unsigned int argc;
struct dm_target *ti = v->ti;
const char *arg_name;
@@ -1156,13 +1158,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
* <digest>
* <salt> Hex string or "-" if no salt.
*/
-static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_verity *v;
struct dm_verity_sig_opts verify_args = {0};
struct dm_arg_set as;
unsigned int num;
- unsigned int wq_flags;
unsigned long long num_ll;
int r;
int i;
@@ -1370,6 +1371,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
hash_position = v->hash_start;
for (i = v->levels - 1; i >= 0; i--) {
sector_t s;
+
v->hash_level_block[i] = hash_position;
s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
>> ((i + 1) * v->hash_per_block_bits);
@@ -1399,8 +1401,6 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
- /* WQ_UNBOUND greatly improves performance when running on ramdisk */
- wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND;
/*
* Using WQ_HIGHPRI improves throughput and completion latency by
* reducing wait times when reading from a dm-verity device.
@@ -1410,8 +1410,7 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
* will fall-back to using it for error handling (or if the bufio cache
* doesn't have required hashes).
*/
- wq_flags |= WQ_HIGHPRI;
- v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus());
+ v->verify_wq = alloc_workqueue("kverityd", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
if (!v->verify_wq) {
ti->error = "Cannot allocate workqueue";
r = -ENOMEM;
diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
index db61a1f43ae9..4836508ea50c 100644
--- a/drivers/md/dm-verity-verify-sig.c
+++ b/drivers/md/dm-verity-verify-sig.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2019 Microsoft Corporation.
*
diff --git a/drivers/md/dm-verity-verify-sig.h b/drivers/md/dm-verity-verify-sig.h
index 3987c7141f79..f36ea92127bf 100644
--- a/drivers/md/dm-verity-verify-sig.h
+++ b/drivers/md/dm-verity-verify-sig.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2019 Microsoft Corporation.
*
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 98f306ec6a33..2f555b420367 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -42,7 +42,7 @@ struct dm_verity {
u8 *root_digest; /* digest of the root block */
u8 *salt; /* salt: its size is salt_size */
u8 *zero_digest; /* digest for a zero block */
- unsigned salt_size;
+ unsigned int salt_size;
sector_t data_start; /* data offset in 512-byte sectors */
sector_t hash_start; /* hash start in blocks */
sector_t data_blocks; /* the number of data blocks */
@@ -54,10 +54,10 @@ struct dm_verity {
unsigned char version;
bool hash_failed:1; /* set if hash of any block failed */
bool use_tasklet:1; /* try to verify in tasklet before work-queue */
- unsigned digest_size; /* digest size for the current hash algorithm */
+ unsigned int digest_size; /* digest size for the current hash algorithm */
unsigned int ahash_reqsize;/* the size of temporary space for crypto */
enum verity_mode mode; /* mode for handling verification errors */
- unsigned corrupted_errs;/* Number of errors for corrupted blocks */
+ unsigned int corrupted_errs;/* Number of errors for corrupted blocks */
struct workqueue_struct *verify_wq;
@@ -77,7 +77,7 @@ struct dm_verity_io {
bio_end_io_t *orig_bi_end_io;
sector_t block;
- unsigned n_blocks;
+ unsigned int n_blocks;
bool in_tasklet;
struct bvec_iter iter;
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 96a003eb7323..3aa5874f0aef 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2018 Red Hat. All rights reserved.
*
@@ -83,16 +83,13 @@ struct wc_entry {
struct rb_node rb_node;
struct list_head lru;
unsigned short wc_list_contiguous;
- bool write_in_progress
#if BITS_PER_LONG == 64
- :1
-#endif
- ;
- unsigned long index
-#if BITS_PER_LONG == 64
- :47
+ bool write_in_progress : 1;
+ unsigned long index : 47;
+#else
+ bool write_in_progress;
+ unsigned long index;
#endif
- ;
unsigned long age;
#ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
uint64_t original_sector;
@@ -128,9 +125,9 @@ struct dm_writecache {
unsigned long max_age;
unsigned long pause;
- unsigned uncommitted_blocks;
- unsigned autocommit_blocks;
- unsigned max_writeback_jobs;
+ unsigned int uncommitted_blocks;
+ unsigned int autocommit_blocks;
+ unsigned int max_writeback_jobs;
int error;
@@ -155,7 +152,7 @@ struct dm_writecache {
sector_t data_device_sectors;
void *block_start;
struct wc_entry *entries;
- unsigned block_size;
+ unsigned int block_size;
unsigned char block_size_bits;
bool pmem_mode:1;
@@ -178,13 +175,13 @@ struct dm_writecache {
bool metadata_only:1;
bool pause_set:1;
- unsigned high_wm_percent_value;
- unsigned low_wm_percent_value;
- unsigned autocommit_time_value;
- unsigned max_age_value;
- unsigned pause_value;
+ unsigned int high_wm_percent_value;
+ unsigned int low_wm_percent_value;
+ unsigned int autocommit_time_value;
+ unsigned int max_age_value;
+ unsigned int pause_value;
- unsigned writeback_all;
+ unsigned int writeback_all;
struct workqueue_struct *writeback_wq;
struct work_struct writeback_work;
struct work_struct flush_work;
@@ -202,7 +199,7 @@ struct dm_writecache {
struct dm_kcopyd_client *dm_kcopyd;
unsigned long *dirty_bitmap;
- unsigned dirty_bitmap_size;
+ unsigned int dirty_bitmap_size;
struct bio_set bio_set;
mempool_t copy_pool;
@@ -227,7 +224,7 @@ struct writeback_struct {
struct list_head endio_entry;
struct dm_writecache *wc;
struct wc_entry **wc_list;
- unsigned wc_list_n;
+ unsigned int wc_list_n;
struct wc_entry *wc_list_inline[WB_LIST_INLINE];
struct bio bio;
};
@@ -236,7 +233,7 @@ struct copy_struct {
struct list_head endio_entry;
struct dm_writecache *wc;
struct wc_entry *e;
- unsigned n_entries;
+ unsigned int n_entries;
int error;
};
@@ -300,6 +297,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
}
if (da != p) {
long i;
+
wc->memory_map = NULL;
pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
@@ -309,6 +307,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
i = 0;
do {
long daa;
+
daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i,
p - i, DAX_ACCESS, NULL, &pfn);
if (daa <= 0) {
@@ -369,7 +368,7 @@ static struct page *persistent_memory_page(void *addr)
return virt_to_page(addr);
}
-static unsigned persistent_memory_page_offset(void *addr)
+static unsigned int persistent_memory_page_offset(void *addr)
{
return (unsigned long)addr & (PAGE_SIZE - 1);
}
@@ -502,11 +501,12 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
COMPLETION_INITIALIZER_ONSTACK(endio.c),
ATOMIC_INIT(1),
};
- unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
- unsigned i = 0;
+ unsigned int bitmap_bits = wc->dirty_bitmap_size * 8;
+ unsigned int i = 0;
while (1) {
- unsigned j;
+ unsigned int j;
+
i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
if (unlikely(i == bitmap_bits))
break;
@@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
req.notify.context = &endio;
/* writing via async dm-io (implied by notify.fn above) won't return an error */
- (void) dm_io(&req, 1, &region, NULL);
+ (void) dm_io(&req, 1, &region, NULL);
i = j;
}
@@ -623,20 +623,21 @@ static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
if (unlikely(!node)) {
if (!(flags & WFE_RETURN_FOLLOWING))
return NULL;
- if (read_original_sector(wc, e) >= block) {
+ if (read_original_sector(wc, e) >= block)
return e;
- } else {
- node = rb_next(&e->rb_node);
- if (unlikely(!node))
- return NULL;
- e = container_of(node, struct wc_entry, rb_node);
- return e;
- }
+
+ node = rb_next(&e->rb_node);
+ if (unlikely(!node))
+ return NULL;
+
+ e = container_of(node, struct wc_entry, rb_node);
+ return e;
}
}
while (1) {
struct wc_entry *e2;
+
if (flags & WFE_LOWEST_SEQ)
node = rb_prev(&e->rb_node);
else
@@ -679,6 +680,7 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
{
if (WC_MODE_SORT_FREELIST(wc)) {
struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
+
if (unlikely(!*node))
wc->current_free = e;
while (*node) {
@@ -718,6 +720,7 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, s
if (WC_MODE_SORT_FREELIST(wc)) {
struct rb_node *next;
+
if (unlikely(!wc->current_free))
return NULL;
e = wc->current_free;
@@ -769,7 +772,7 @@ static void writecache_poison_lists(struct dm_writecache *wc)
/*
* Catch incorrect access to these values while the device is suspended.
*/
- memset(&wc->tree, -1, sizeof wc->tree);
+ memset(&wc->tree, -1, sizeof(wc->tree));
wc->lru.next = LIST_POISON1;
wc->lru.prev = LIST_POISON2;
wc->freelist.next = LIST_POISON1;
@@ -864,6 +867,7 @@ static void writecache_flush_work(struct work_struct *work)
static void writecache_autocommit_timer(struct timer_list *t)
{
struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
+
if (!writecache_has_error(wc))
queue_work(wc->writeback_wq, &wc->flush_work);
}
@@ -941,7 +945,8 @@ static void writecache_suspend(struct dm_target *ti)
wc_lock(wc);
if (flush_on_suspend)
wc->writeback_all--;
- while (writecache_wait_for_writeback(wc));
+ while (writecache_wait_for_writeback(wc))
+ ;
if (WC_MODE_PMEM(wc))
persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
@@ -962,6 +967,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
return -ENOMEM;
for (b = 0; b < wc->n_blocks; b++) {
struct wc_entry *e = &wc->entries[b];
+
e->index = b;
e->write_in_progress = false;
cond_resched();
@@ -1005,6 +1011,7 @@ static void writecache_resume(struct dm_target *ti)
r = writecache_read_metadata(wc, wc->metadata_sectors);
if (r) {
size_t sb_entries_offset;
+
writecache_error(wc, r, "unable to read metadata: %d", r);
sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
memset((char *)wc->memory_map + sb_entries_offset, -1,
@@ -1034,6 +1041,7 @@ static void writecache_resume(struct dm_target *ti)
for (b = 0; b < wc->n_blocks; b++) {
struct wc_entry *e = &wc->entries[b];
struct wc_memory_entry wme;
+
if (writecache_has_error(wc)) {
e->original_sector = -1;
e->seq_count = -1;
@@ -1055,6 +1063,7 @@ static void writecache_resume(struct dm_target *ti)
#endif
for (b = 0; b < wc->n_blocks; b++) {
struct wc_entry *e = &wc->entries[b];
+
if (!writecache_entry_is_committed(wc, e)) {
if (read_seq_count(wc, e) != -1) {
erase_this:
@@ -1100,7 +1109,7 @@ erase_this:
wc_unlock(wc);
}
-static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
+static int process_flush_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
{
if (argc != 1)
return -EINVAL;
@@ -1133,7 +1142,7 @@ static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *
return 0;
}
-static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
+static int process_flush_on_suspend_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
{
if (argc != 1)
return -EINVAL;
@@ -1153,7 +1162,7 @@ static void activate_cleaner(struct dm_writecache *wc)
wc->freelist_low_watermark = wc->n_blocks;
}
-static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
+static int process_cleaner_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
{
if (argc != 1)
return -EINVAL;
@@ -1167,20 +1176,20 @@ static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache
return 0;
}
-static int process_clear_stats_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
+static int process_clear_stats_mesg(unsigned int argc, char **argv, struct dm_writecache *wc)
{
if (argc != 1)
return -EINVAL;
wc_lock(wc);
- memset(&wc->stats, 0, sizeof wc->stats);
+ memset(&wc->stats, 0, sizeof(wc->stats));
wc_unlock(wc);
return 0;
}
-static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
- char *result, unsigned maxlen)
+static int writecache_message(struct dm_target *ti, unsigned int argc, char **argv,
+ char *result, unsigned int maxlen)
{
int r = -EINVAL;
struct dm_writecache *wc = ti->private;
@@ -1238,12 +1247,13 @@ static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
{
void *buf;
- unsigned size;
+ unsigned int size;
int rw = bio_data_dir(bio);
- unsigned remaining_size = wc->block_size;
+ unsigned int remaining_size = wc->block_size;
do {
struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
+
buf = bvec_kmap_local(&bv);
size = bv.bv_len;
if (unlikely(size > remaining_size))
@@ -1251,6 +1261,7 @@ static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data
if (rw == READ) {
int r;
+
r = copy_mc_to_kernel(buf, data, size);
flush_dcache_page(bio_page(bio));
if (unlikely(r)) {
@@ -1371,13 +1382,14 @@ read_next_block:
static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
struct wc_entry *e, bool search_used)
{
- unsigned bio_size = wc->block_size;
+ unsigned int bio_size = wc->block_size;
sector_t start_cache_sec = cache_sector(wc, e);
sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
while (bio_size < bio->bi_iter.bi_size) {
if (!search_used) {
struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
+
if (!f)
break;
write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
@@ -1387,6 +1399,7 @@ static void writecache_bio_copy_ssd(struct dm_writecache *wc, struct bio *bio,
} else {
struct wc_entry *f;
struct rb_node *next = rb_next(&e->rb_node);
+
if (!next)
break;
f = container_of(next, struct wc_entry, rb_node);
@@ -1427,6 +1440,7 @@ static enum wc_map_op writecache_map_write(struct dm_writecache *wc, struct bio
do {
bool found_entry = false;
bool search_used = false;
+
if (writecache_has_error(wc)) {
wc->stats.writes += bio->bi_iter.bi_size >> wc->block_size_bits;
return WC_MAP_ERROR;
@@ -1540,7 +1554,7 @@ static int writecache_map(struct dm_target *ti, struct bio *bio)
bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
- if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
+ if (unlikely((((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
(wc->block_size / 512 - 1)) != 0)) {
DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
(unsigned long long)bio->bi_iter.bi_sector,
@@ -1605,6 +1619,7 @@ static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t
if (bio->bi_private == (void *)1) {
int dir = bio_data_dir(bio);
+
if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
wake_up(&wc->bio_in_progress_wait[dir]);
@@ -1666,7 +1681,7 @@ static void writecache_copy_endio(int read_err, unsigned long write_err, void *p
static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
{
- unsigned i;
+ unsigned int i;
struct writeback_struct *wb;
struct wc_entry *e;
unsigned long n_walked = 0;
@@ -1782,7 +1797,7 @@ pop_from_list:
static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e)
{
struct dm_writecache *wc = wb->wc;
- unsigned block_size = wc->block_size;
+ unsigned int block_size = wc->block_size;
void *address = memory_data(wc, e);
persistent_memory_flush_cache(address, block_size);
@@ -1817,7 +1832,7 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
struct wc_entry *e, *f;
struct bio *bio;
struct writeback_struct *wb;
- unsigned max_pages;
+ unsigned int max_pages;
while (wbl->size) {
wbl->size--;
@@ -1832,10 +1847,13 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
wb->wc = wc;
bio->bi_end_io = writecache_writeback_endio;
bio->bi_iter.bi_sector = read_original_sector(wc, e);
- if (max_pages <= WB_LIST_INLINE ||
- unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
- GFP_NOIO | __GFP_NORETRY |
- __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
+
+ if (unlikely(max_pages > WB_LIST_INLINE))
+ wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
+ GFP_NOIO | __GFP_NORETRY |
+ __GFP_NOMEMALLOC | __GFP_NOWARN);
+
+ if (likely(max_pages <= WB_LIST_INLINE) || unlikely(!wb->wc_list)) {
wb->wc_list = wb->wc_list_inline;
max_pages = WB_LIST_INLINE;
}
@@ -1880,7 +1898,7 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac
struct copy_struct *c;
while (wbl->size) {
- unsigned n_sectors;
+ unsigned int n_sectors;
wbl->size--;
e = container_of(wbl->list.prev, struct wc_entry, lru);
@@ -1940,6 +1958,7 @@ static void writecache_writeback(struct work_struct *work)
if (likely(wc->pause != 0)) {
while (1) {
unsigned long idle;
+
if (unlikely(wc->cleaner) || unlikely(wc->writeback_all) ||
unlikely(dm_suspended(wc->ti)))
break;
@@ -1965,9 +1984,8 @@ restart:
goto restart;
}
- if (wc->overwrote_committed) {
+ if (wc->overwrote_committed)
writecache_wait_for_ios(wc, WRITE);
- }
n_walked = 0;
INIT_LIST_HEAD(&skipped);
@@ -1996,9 +2014,9 @@ restart:
} else
e = container_of(wc->lru.prev, struct wc_entry, lru);
BUG_ON(e->write_in_progress);
- if (unlikely(!writecache_entry_is_committed(wc, e))) {
+ if (unlikely(!writecache_entry_is_committed(wc, e)))
writecache_flush(wc);
- }
+
node = rb_prev(&e->rb_node);
if (node) {
f = container_of(node, struct wc_entry, rb_node);
@@ -2087,12 +2105,13 @@ restart:
if (unlikely(wc->writeback_all)) {
wc_lock(wc);
- while (writecache_wait_for_writeback(wc));
+ while (writecache_wait_for_writeback(wc))
+ ;
wc_unlock(wc);
}
}
-static int calculate_memory_size(uint64_t device_size, unsigned block_size,
+static int calculate_memory_size(uint64_t device_size, unsigned int block_size,
size_t *n_blocks_p, size_t *n_metadata_blocks_p)
{
uint64_t n_blocks, offset;
@@ -2155,7 +2174,7 @@ static int init_memory(struct dm_writecache *wc)
writecache_flush_all_metadata(wc);
writecache_commit_flushed(wc, false);
pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
- writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
+ writecache_flush_region(wc, &sb(wc)->magic, sizeof(sb(wc)->magic));
writecache_commit_flushed(wc, false);
return 0;
@@ -2207,12 +2226,12 @@ static void writecache_dtr(struct dm_target *ti)
kfree(wc);
}
-static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+static int writecache_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct dm_writecache *wc;
struct dm_arg_set as;
const char *string;
- unsigned opt_params;
+ unsigned int opt_params;
size_t offset, data_size;
int i, r;
char dummy;
@@ -2384,6 +2403,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
string = dm_shift_arg(&as), opt_params--;
if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
unsigned long long start_sector;
+
string = dm_shift_arg(&as), opt_params--;
if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
goto invalid_optional;
@@ -2419,7 +2439,8 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto invalid_optional;
wc->autocommit_blocks_set = true;
} else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
- unsigned autocommit_msecs;
+ unsigned int autocommit_msecs;
+
string = dm_shift_arg(&as), opt_params--;
if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
goto invalid_optional;
@@ -2429,7 +2450,8 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
wc->autocommit_time_value = autocommit_msecs;
wc->autocommit_time_set = true;
} else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
- unsigned max_age_msecs;
+ unsigned int max_age_msecs;
+
string = dm_shift_arg(&as), opt_params--;
if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
goto invalid_optional;
@@ -2445,16 +2467,19 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (WC_MODE_PMEM(wc)) {
wc->writeback_fua = true;
wc->writeback_fua_set = true;
- } else goto invalid_optional;
+ } else
+ goto invalid_optional;
} else if (!strcasecmp(string, "nofua")) {
if (WC_MODE_PMEM(wc)) {
wc->writeback_fua = false;
wc->writeback_fua_set = true;
- } else goto invalid_optional;
+ } else
+ goto invalid_optional;
} else if (!strcasecmp(string, "metadata_only")) {
wc->metadata_only = true;
} else if (!strcasecmp(string, "pause_writeback") && opt_params >= 1) {
- unsigned pause_msecs;
+ unsigned int pause_msecs;
+
if (WC_MODE_PMEM(wc))
goto invalid_optional;
string = dm_shift_arg(&as), opt_params--;
@@ -2653,11 +2678,11 @@ bad:
}
static void writecache_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+ unsigned int status_flags, char *result, unsigned int maxlen)
{
struct dm_writecache *wc = ti->private;
- unsigned extra_args;
- unsigned sz = 0;
+ unsigned int extra_args;
+ unsigned int sz = 0;
switch (type) {
case STATUSTYPE_INFO:
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index faa1dbffc8b4..2601cd520384 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2003 Jana Saout <jana@saout.de>
*
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 3dafc0e8b7a9..4b82b7798ce4 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
*/
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 0278482fac94..cf9402064aba 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1013,11 +1013,9 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
}
sb_block = le64_to_cpu(sb->sb_block);
- if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift ) {
- dmz_dev_err(dev, "Invalid superblock position "
- "(is %llu expected %llu)",
- sb_block,
- (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
+ if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift) {
+ dmz_dev_err(dev, "Invalid superblock position (is %llu expected %llu)",
+ sb_block, (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
return -EINVAL;
}
if (zmd->sb_version > 1) {
@@ -1030,16 +1028,14 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
} else if (uuid_is_null(&zmd->uuid)) {
uuid_copy(&zmd->uuid, &sb_uuid);
} else if (!uuid_equal(&zmd->uuid, &sb_uuid)) {
- dmz_dev_err(dev, "mismatching DM-Zoned uuid, "
- "is %pUl expected %pUl",
+ dmz_dev_err(dev, "mismatching DM-Zoned uuid, is %pUl expected %pUl",
&sb_uuid, &zmd->uuid);
return -ENXIO;
}
if (!strlen(zmd->label))
memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE);
else if (memcmp(zmd->label, sb->dmz_label, BDEVNAME_SIZE)) {
- dmz_dev_err(dev, "mismatching DM-Zoned label, "
- "is %s expected %s",
+ dmz_dev_err(dev, "mismatching DM-Zoned label, is %s expected %s",
sb->dmz_label, zmd->label);
return -ENXIO;
}
@@ -1346,7 +1342,7 @@ static int dmz_load_sb(struct dmz_metadata *zmd)
if (ret == -EINVAL)
goto out_kfree;
}
- out_kfree:
+out_kfree:
kfree(sb);
}
return ret;
@@ -1430,7 +1426,7 @@ static int dmz_emulate_zones(struct dmz_metadata *zmd, struct dmz_dev *dev)
int idx;
sector_t zone_offset = 0;
- for(idx = 0; idx < dev->nr_zones; idx++) {
+ for (idx = 0; idx < dev->nr_zones; idx++) {
struct dm_zone *zone;
zone = dmz_insert(zmd, idx, dev);
@@ -1457,7 +1453,7 @@ static void dmz_drop_zones(struct dmz_metadata *zmd)
{
int idx;
- for(idx = 0; idx < zmd->nr_zones; idx++) {
+ for (idx = 0; idx < zmd->nr_zones; idx++) {
struct dm_zone *zone = xa_load(&zmd->zones, idx);
kfree(zone);
@@ -2945,7 +2941,7 @@ int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
/* Metadata cache shrinker */
- ret = register_shrinker(&zmd->mblk_shrinker, "md-meta:(%u:%u)",
+ ret = register_shrinker(&zmd->mblk_shrinker, "dm-zoned-meta:(%u:%u)",
MAJOR(dev->bdev->bd_dev),
MINOR(dev->bdev->bd_dev));
if (ret) {
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 95b132b52f33..ad4764dcd013 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -1119,7 +1119,6 @@ static void dmz_status(struct dm_target *ti, status_type_t type,
*result = '\0';
break;
}
- return;
}
static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b424a6ee27ba..eace45a18d45 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
@@ -49,8 +50,8 @@
static const char *_name = DM_NAME;
-static unsigned int major = 0;
-static unsigned int _major = 0;
+static unsigned int major;
+static unsigned int _major;
static DEFINE_IDR(_minor_idr);
@@ -83,7 +84,7 @@ struct clone_info {
struct bio *bio;
struct dm_io *io;
sector_t sector;
- unsigned sector_count;
+ unsigned int sector_count;
bool is_abnormal_io:1;
bool submit_as_polled:1;
};
@@ -104,6 +105,7 @@ EXPORT_SYMBOL_GPL(dm_per_bio_data);
struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
{
struct dm_io *io = (struct dm_io *)((char *)data + data_size);
+
if (io->magic == DM_IO_MAGIC)
return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
BUG_ON(io->magic != DM_TIO_MAGIC);
@@ -111,7 +113,7 @@ struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
}
EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
-unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
+unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
{
return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
}
@@ -127,6 +129,7 @@ static int swap_bios = DEFAULT_SWAP_BIOS;
static int get_swap_bios(void)
{
int latch = READ_ONCE(swap_bios);
+
if (unlikely(latch <= 0))
latch = DEFAULT_SWAP_BIOS;
return latch;
@@ -142,7 +145,7 @@ struct table_device {
* Bio-based DM's mempools' reserved IOs set by the user.
*/
#define RESERVED_BIO_BASED_IOS 16
-static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
+static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
static int __dm_get_module_param_int(int *module_param, int min, int max)
{
@@ -165,11 +168,10 @@ static int __dm_get_module_param_int(int *module_param, int min, int max)
return param;
}
-unsigned __dm_get_module_param(unsigned *module_param,
- unsigned def, unsigned max)
+unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
{
- unsigned param = READ_ONCE(*module_param);
- unsigned modified_param = 0;
+ unsigned int param = READ_ONCE(*module_param);
+ unsigned int modified_param = 0;
if (!param)
modified_param = def;
@@ -184,14 +186,14 @@ unsigned __dm_get_module_param(unsigned *module_param,
return param;
}
-unsigned dm_get_reserved_bio_based_ios(void)
+unsigned int dm_get_reserved_bio_based_ios(void)
{
return __dm_get_module_param(&reserved_bio_based_ios,
RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
}
EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
-static unsigned dm_get_numa_node(void)
+static unsigned int dm_get_numa_node(void)
{
return __dm_get_module_param_int(&dm_numa_node,
DM_NUMA_NODE, num_online_nodes() - 1);
@@ -231,7 +233,6 @@ out_uevent_exit:
static void local_exit(void)
{
- flush_scheduled_work();
destroy_workqueue(deferred_remove_workqueue);
unregister_blkdev(_major, _name);
@@ -435,7 +436,7 @@ retry:
r = ti->type->prepare_ioctl(ti, bdev);
if (r == -ENOTCONN && !fatal_signal_pending(current)) {
dm_put_live_table(md, *srcu_idx);
- msleep(10);
+ fsleep(10000);
goto retry;
}
@@ -604,7 +605,7 @@ static void free_io(struct dm_io *io)
}
static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
- unsigned target_bio_nr, unsigned *len, gfp_t gfp_mask)
+ unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
{
struct mapped_device *md = ci->io->md;
struct dm_target_io *tio;
@@ -1008,6 +1009,7 @@ static void dm_wq_requeue_work(struct work_struct *work)
io->next = NULL;
__dm_io_complete(io, false);
io = next;
+ cond_resched();
}
}
@@ -1115,6 +1117,7 @@ static void clone_endio(struct bio *bio)
if (endio) {
int r = endio(ti, bio, &error);
+
switch (r) {
case DM_ENDIO_REQUEUE:
if (static_branch_unlikely(&zoned_enabled)) {
@@ -1314,11 +1317,11 @@ out:
* the partially processed part (the sum of regions 1+2) must be the same for all
* copies of the bio.
*/
-void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
+void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
{
struct dm_target_io *tio = clone_to_tio(bio);
struct dm_io *io = tio->io;
- unsigned bio_sectors = bio_sectors(bio);
+ unsigned int bio_sectors = bio_sectors(bio);
BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
BUG_ON(op_is_zone_mgmt(bio_op(bio)));
@@ -1403,6 +1406,7 @@ static void __map_bio(struct bio *clone)
if (static_branch_unlikely(&swap_bios_enabled) &&
unlikely(swap_bios_limit(ti, clone))) {
int latch = get_swap_bios();
+
if (unlikely(latch != md->swap_bios))
__set_swap_bios_limit(md, latch);
down(&md->swap_bios_semaphore);
@@ -1447,7 +1451,7 @@ static void __map_bio(struct bio *clone)
}
}
-static void setup_split_accounting(struct clone_info *ci, unsigned len)
+static void setup_split_accounting(struct clone_info *ci, unsigned int len)
{
struct dm_io *io = ci->io;
@@ -1463,7 +1467,7 @@ static void setup_split_accounting(struct clone_info *ci, unsigned len)
}
static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
- struct dm_target *ti, unsigned num_bios)
+ struct dm_target *ti, unsigned int num_bios)
{
struct bio *bio;
int try;
@@ -1492,7 +1496,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
}
static int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
- unsigned int num_bios, unsigned *len)
+ unsigned int num_bios, unsigned int *len)
{
struct bio_list blist = BIO_EMPTY_LIST;
struct bio *clone;
@@ -1558,10 +1562,9 @@ static void __send_empty_flush(struct clone_info *ci)
}
static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
- unsigned num_bios)
+ unsigned int num_bios)
{
- unsigned len;
- unsigned int bios;
+ unsigned int len, bios;
len = min_t(sector_t, ci->sector_count,
max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
@@ -1599,7 +1602,7 @@ static bool is_abnormal_io(struct bio *bio)
static blk_status_t __process_abnormal_io(struct clone_info *ci,
struct dm_target *ti)
{
- unsigned num_bios = 0;
+ unsigned int num_bios = 0;
switch (bio_op(ci->bio)) {
case REQ_OP_DISCARD:
@@ -1677,7 +1680,7 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
{
struct bio *clone;
struct dm_target *ti;
- unsigned len;
+ unsigned int len;
ti = dm_table_find_target(ci->map, ci->sector);
if (unlikely(!ti))
@@ -1874,9 +1877,11 @@ static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
return 1;
}
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* An IDR is used to keep track of allocated minor numbers.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
static void free_minor(int minor)
{
spin_lock(&_minor_lock);
@@ -2139,7 +2144,7 @@ static void event_callback(void *context)
{
unsigned long flags;
LIST_HEAD(uevents);
- struct mapped_device *md = (struct mapped_device *) context;
+ struct mapped_device *md = context;
spin_lock_irqsave(&md->uevent_lock, flags);
list_splice_init(&md->uevent_list, &uevents);
@@ -2172,10 +2177,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
if (size != dm_get_size(md))
memset(&md->geometry, 0, sizeof(md->geometry));
- if (!get_capacity(md->disk))
- set_capacity(md->disk, size);
- else
- set_capacity_and_notify(md->disk, size);
+ set_capacity(md->disk, size);
dm_table_event_callback(t, event_callback, md);
@@ -2375,7 +2377,7 @@ out_undo_holders:
struct mapped_device *dm_get_md(dev_t dev)
{
struct mapped_device *md;
- unsigned minor = MINOR(dev);
+ unsigned int minor = MINOR(dev);
if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
return NULL;
@@ -2457,7 +2459,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
- /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
+ /* dm_put_live_table must be before fsleep, otherwise deadlock is possible */
dm_put_live_table(md, srcu_idx);
mutex_unlock(&md->suspend_lock);
@@ -2469,7 +2471,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
*/
if (wait)
while (atomic_read(&md->holders))
- msleep(1);
+ fsleep(1000);
else if (atomic_read(&md->holders))
DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
dm_device_name(md), atomic_read(&md->holders));
@@ -2546,7 +2548,7 @@ static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_st
break;
}
- msleep(5);
+ fsleep(5000);
}
return r;
@@ -2569,6 +2571,7 @@ static void dm_wq_work(struct work_struct *work)
break;
submit_bio_noacct(bio);
+ cond_resched();
}
}
@@ -2657,7 +2660,7 @@ static void unlock_fs(struct mapped_device *md)
* are being added to md->deferred list.
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
- unsigned suspend_flags, unsigned int task_state,
+ unsigned int suspend_flags, unsigned int task_state,
int dmf_suspended_flag)
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
@@ -2764,7 +2767,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
*
* To abort suspend, start the request_queue.
*/
-int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
+int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
{
struct dm_table *map = NULL;
int r = 0;
@@ -2805,6 +2808,7 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
{
if (map) {
int r = dm_table_resume_targets(map);
+
if (r)
return r;
}
@@ -2866,7 +2870,7 @@ out:
* It may be used only from the kernel.
*/
-static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags)
+static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
{
struct dm_table *map = NULL;
@@ -2964,27 +2968,31 @@ done:
}
EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Event notification.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
- unsigned cookie)
+ unsigned int cookie, bool need_resize_uevent)
{
int r;
- unsigned noio_flag;
+ unsigned int noio_flag;
char udev_cookie[DM_COOKIE_LENGTH];
- char *envp[] = { udev_cookie, NULL };
-
- noio_flag = memalloc_noio_save();
-
- if (!cookie)
- r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
- else {
+ char *envp[3] = { NULL, NULL, NULL };
+ char **envpp = envp;
+ if (cookie) {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
- r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
- action, envp);
+ *envpp++ = udev_cookie;
}
+ if (need_resize_uevent) {
+ *envpp++ = "RESIZE=1";
+ }
+
+ noio_flag = memalloc_noio_save();
+
+ r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
memalloc_noio_restore(noio_flag);
@@ -3382,13 +3390,13 @@ module_exit(dm_exit);
module_param(major, uint, 0);
MODULE_PARM_DESC(major, "The major number of the device mapper");
-module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
+module_param(reserved_bio_based_ios, uint, 0644);
MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
-module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
+module_param(dm_numa_node, int, 0644);
MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
-module_param(swap_bios, int, S_IRUGO | S_IWUSR);
+module_param(swap_bios, int, 0644);
MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
MODULE_DESCRIPTION(DM_NAME " driver");
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 5201df03ce40..22eaed188907 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -48,9 +48,11 @@ struct dm_md_mempools;
struct dm_target_io;
struct dm_io;
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* Internal table functions.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
void dm_table_event_callback(struct dm_table *t,
void (*fn)(void *), void *context);
struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
@@ -119,9 +121,11 @@ static inline int dm_zone_map_bio(struct dm_target_io *tio)
}
#endif
-/*-----------------------------------------------------------------
+/*
+ *---------------------------------------------------------------
* A registry of target types.
- *---------------------------------------------------------------*/
+ *---------------------------------------------------------------
+ */
int dm_target_init(void);
void dm_target_exit(void);
struct target_type *dm_get_target_type(const char *name);
@@ -203,7 +207,7 @@ int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
- unsigned cookie);
+ unsigned int cookie, bool need_resize_uevent);
void dm_internal_suspend(struct mapped_device *md);
void dm_internal_resume(struct mapped_device *md);
@@ -222,6 +226,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools);
/*
* Various helpers
*/
-unsigned dm_get_reserved_bio_based_ios(void);
+unsigned int dm_get_reserved_bio_based_ios(void);
#endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 02b0240e7c71..927a43db5dfb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -380,6 +380,10 @@ EXPORT_SYMBOL_GPL(md_new_event);
static LIST_HEAD(all_mddevs);
static DEFINE_SPINLOCK(all_mddevs_lock);
+static bool is_md_suspended(struct mddev *mddev)
+{
+ return percpu_ref_is_dying(&mddev->active_io);
+}
/* Rather than calling directly into the personality make_request function,
* IO requests come here first so that we can check if the device is
* being suspended pending a reconfiguration.
@@ -389,7 +393,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
*/
static bool is_suspended(struct mddev *mddev, struct bio *bio)
{
- if (mddev->suspended)
+ if (is_md_suspended(mddev))
return true;
if (bio_data_dir(bio) != WRITE)
return false;
@@ -405,12 +409,10 @@ static bool is_suspended(struct mddev *mddev, struct bio *bio)
void md_handle_request(struct mddev *mddev, struct bio *bio)
{
check_suspended:
- rcu_read_lock();
if (is_suspended(mddev, bio)) {
DEFINE_WAIT(__wait);
/* Bail out if REQ_NOWAIT is set for the bio */
if (bio->bi_opf & REQ_NOWAIT) {
- rcu_read_unlock();
bio_wouldblock_error(bio);
return;
}
@@ -419,23 +421,19 @@ check_suspended:
TASK_UNINTERRUPTIBLE);
if (!is_suspended(mddev, bio))
break;
- rcu_read_unlock();
schedule();
- rcu_read_lock();
}
finish_wait(&mddev->sb_wait, &__wait);
}
- atomic_inc(&mddev->active_io);
- rcu_read_unlock();
+ if (!percpu_ref_tryget_live(&mddev->active_io))
+ goto check_suspended;
if (!mddev->pers->make_request(mddev, bio)) {
- atomic_dec(&mddev->active_io);
- wake_up(&mddev->sb_wait);
+ percpu_ref_put(&mddev->active_io);
goto check_suspended;
}
- if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
- wake_up(&mddev->sb_wait);
+ percpu_ref_put(&mddev->active_io);
}
EXPORT_SYMBOL(md_handle_request);
@@ -483,11 +481,10 @@ void mddev_suspend(struct mddev *mddev)
lockdep_assert_held(&mddev->reconfig_mutex);
if (mddev->suspended++)
return;
- synchronize_rcu();
wake_up(&mddev->sb_wait);
set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
- smp_mb__after_atomic();
- wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
+ percpu_ref_kill(&mddev->active_io);
+ wait_event(mddev->sb_wait, percpu_ref_is_zero(&mddev->active_io));
mddev->pers->quiesce(mddev, 1);
clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
@@ -505,6 +502,7 @@ void mddev_resume(struct mddev *mddev)
lockdep_assert_held(&mddev->reconfig_mutex);
if (--mddev->suspended)
return;
+ percpu_ref_resurrect(&mddev->active_io);
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
@@ -683,7 +681,6 @@ void mddev_init(struct mddev *mddev)
timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0);
atomic_set(&mddev->active, 1);
atomic_set(&mddev->openers, 0);
- atomic_set(&mddev->active_io, 0);
spin_lock_init(&mddev->lock);
atomic_set(&mddev->flush_pending, 0);
init_waitqueue_head(&mddev->sb_wait);
@@ -5760,6 +5757,12 @@ static void md_safemode_timeout(struct timer_list *t)
}
static int start_dirty_degraded;
+static void active_io_release(struct percpu_ref *ref)
+{
+ struct mddev *mddev = container_of(ref, struct mddev, active_io);
+
+ wake_up(&mddev->sb_wait);
+}
int md_run(struct mddev *mddev)
{
@@ -5840,10 +5843,15 @@ int md_run(struct mddev *mddev)
nowait = nowait && bdev_nowait(rdev->bdev);
}
+ err = percpu_ref_init(&mddev->active_io, active_io_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
+ if (err)
+ return err;
+
if (!bioset_initialized(&mddev->bio_set)) {
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
if (err)
- return err;
+ goto exit_active_io;
}
if (!bioset_initialized(&mddev->sync_set)) {
err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
@@ -6031,6 +6039,8 @@ abort:
bioset_exit(&mddev->sync_set);
exit_bio_set:
bioset_exit(&mddev->bio_set);
+exit_active_io:
+ percpu_ref_exit(&mddev->active_io);
return err;
}
EXPORT_SYMBOL_GPL(md_run);
@@ -6156,7 +6166,7 @@ static void md_clean(struct mddev *mddev)
mddev->new_level = LEVEL_NONE;
mddev->new_layout = 0;
mddev->new_chunk_sectors = 0;
- mddev->curr_resync = 0;
+ mddev->curr_resync = MD_RESYNC_NONE;
atomic64_set(&mddev->resync_mismatches, 0);
mddev->suspend_lo = mddev->suspend_hi = 0;
mddev->sync_speed_min = mddev->sync_speed_max = 0;
@@ -6219,7 +6229,7 @@ EXPORT_SYMBOL_GPL(md_stop_writes);
static void mddev_detach(struct mddev *mddev)
{
md_bitmap_wait_behind_writes(mddev);
- if (mddev->pers && mddev->pers->quiesce && !mddev->suspended) {
+ if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) {
mddev->pers->quiesce(mddev, 1);
mddev->pers->quiesce(mddev, 0);
}
@@ -6255,6 +6265,8 @@ void md_stop(struct mddev *mddev)
*/
__md_stop_writes(mddev);
__md_stop(mddev);
+ percpu_ref_exit(&mddev->writes_pending);
+ percpu_ref_exit(&mddev->active_io);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
}
@@ -7828,6 +7840,7 @@ static void md_free_disk(struct gendisk *disk)
struct mddev *mddev = disk->private_data;
percpu_ref_exit(&mddev->writes_pending);
+ percpu_ref_exit(&mddev->active_io);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
@@ -8531,7 +8544,7 @@ bool md_write_start(struct mddev *mddev, struct bio *bi)
return true;
wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) ||
- mddev->suspended);
+ is_md_suspended(mddev));
if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
percpu_ref_put(&mddev->writes_pending);
return false;
@@ -8615,12 +8628,15 @@ static void md_end_io_acct(struct bio *bio)
{
struct md_io_acct *md_io_acct = bio->bi_private;
struct bio *orig_bio = md_io_acct->orig_bio;
+ struct mddev *mddev = md_io_acct->mddev;
orig_bio->bi_status = bio->bi_status;
bio_end_io_acct(orig_bio, md_io_acct->start_time);
bio_put(bio);
bio_endio(orig_bio);
+
+ percpu_ref_put(&mddev->active_io);
}
/*
@@ -8636,10 +8652,13 @@ void md_account_bio(struct mddev *mddev, struct bio **bio)
if (!blk_queue_io_stat(bdev->bd_disk->queue))
return;
+ percpu_ref_get(&mddev->active_io);
+
clone = bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_acct_set);
md_io_acct = container_of(clone, struct md_io_acct, bio_clone);
md_io_acct->orig_bio = *bio;
md_io_acct->start_time = bio_start_io_acct(*bio);
+ md_io_acct->mddev = mddev;
clone->bi_end_io = md_end_io_acct;
clone->bi_private = md_io_acct;
@@ -8883,7 +8902,7 @@ void md_do_sync(struct md_thread *thread)
atomic_set(&mddev->recovery_active, 0);
last_check = 0;
- if (j>2) {
+ if (j >= MD_RESYNC_ACTIVE) {
pr_debug("md: resuming %s of %s from checkpoint.\n",
desc, mdname(mddev));
mddev->curr_resync = j;
@@ -8955,7 +8974,7 @@ void md_do_sync(struct md_thread *thread)
if (j > max_sectors)
/* when skipping, extra large numbers can be returned. */
j = max_sectors;
- if (j > 2)
+ if (j >= MD_RESYNC_ACTIVE)
mddev->curr_resync = j;
mddev->curr_mark_cnt = io_sectors;
if (last_check == 0)
@@ -9030,7 +9049,7 @@ void md_do_sync(struct md_thread *thread)
mddev->pers->sync_request(mddev, max_sectors, &skipped);
if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) &&
- mddev->curr_resync >= MD_RESYNC_ACTIVE) {
+ mddev->curr_resync > MD_RESYNC_ACTIVE) {
if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
if (mddev->curr_resync >= mddev->recovery_cp) {
@@ -9259,7 +9278,7 @@ void md_check_recovery(struct mddev *mddev)
wake_up(&mddev->sb_wait);
}
- if (mddev->suspended)
+ if (is_md_suspended(mddev))
return;
if (mddev->bitmap)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 554a9026669a..e148e3c83b0d 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -315,7 +315,7 @@ struct mddev {
unsigned long sb_flags;
int suspended;
- atomic_t active_io;
+ struct percpu_ref active_io;
int ro;
int sysfs_active; /* set when sysfs deletes
* are happening, so run/
@@ -710,9 +710,10 @@ struct md_thread {
};
struct md_io_acct {
- struct bio *orig_bio;
- unsigned long start_time;
- struct bio bio_clone;
+ struct mddev *mddev;
+ struct bio *orig_bio;
+ unsigned long start_time;
+ struct bio bio_clone;
};
#define THREAD_WAKEUP 0
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index 3a963d783a86..798c9c53a343 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -57,7 +58,7 @@ static int array_block_check(struct dm_block_validator *v,
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) {
- DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu",
+ DMERR_LIMIT("%s failed: blocknr %llu != wanted %llu", __func__,
(unsigned long long) le64_to_cpu(bh_le->blocknr),
(unsigned long long) dm_block_location(b));
return -ENOTBLK;
@@ -67,9 +68,9 @@ static int array_block_check(struct dm_block_validator *v,
size_of_block - sizeof(__le32),
CSUM_XOR));
if (csum_disk != bh_le->csum) {
- DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
- (unsigned) le32_to_cpu(csum_disk),
- (unsigned) le32_to_cpu(bh_le->csum));
+ DMERR_LIMIT("%s failed: csum %u != wanted %u", __func__,
+ (unsigned int) le32_to_cpu(csum_disk),
+ (unsigned int) le32_to_cpu(bh_le->csum));
return -EILSEQ;
}
@@ -94,7 +95,7 @@ static struct dm_block_validator array_validator = {
* index - The index into _this_ specific block.
*/
static void *element_at(struct dm_array_info *info, struct array_block *ab,
- unsigned index)
+ unsigned int index)
{
unsigned char *entry = (unsigned char *) (ab + 1);
@@ -108,9 +109,10 @@ static void *element_at(struct dm_array_info *info, struct array_block *ab,
* in an array block.
*/
static void on_entries(struct dm_array_info *info, struct array_block *ab,
- void (*fn)(void *, const void *, unsigned))
+ void (*fn)(void *, const void *, unsigned int))
{
- unsigned nr_entries = le32_to_cpu(ab->nr_entries);
+ unsigned int nr_entries = le32_to_cpu(ab->nr_entries);
+
fn(info->value_type.context, element_at(info, ab, 0), nr_entries);
}
@@ -171,7 +173,7 @@ static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
* the current number of entries.
*/
static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
- const void *value, unsigned new_nr)
+ const void *value, unsigned int new_nr)
{
uint32_t nr_entries, delta, i;
struct dm_btree_value_type *vt = &info->value_type;
@@ -194,7 +196,7 @@ static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
* entries.
*/
static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
- unsigned new_nr)
+ unsigned int new_nr)
{
uint32_t nr_entries, delta;
struct dm_btree_value_type *vt = &info->value_type;
@@ -247,7 +249,7 @@ static void unlock_ablock(struct dm_array_info *info, struct dm_block *block)
* / max_entries).
*/
static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
- unsigned index, struct dm_block **block,
+ unsigned int index, struct dm_block **block,
struct array_block **ab)
{
int r;
@@ -295,7 +297,7 @@ static int __shadow_ablock(struct dm_array_info *info, dm_block_t b,
* The shadow op will often be a noop. Only insert if it really
* copied data.
*/
-static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
+static int __reinsert_ablock(struct dm_array_info *info, unsigned int index,
struct dm_block *block, dm_block_t b,
dm_block_t *root)
{
@@ -321,7 +323,7 @@ static int __reinsert_ablock(struct dm_array_info *info, unsigned index,
* for both the current root block, and the new one.
*/
static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
- unsigned index, struct dm_block **block,
+ unsigned int index, struct dm_block **block,
struct array_block **ab)
{
int r;
@@ -346,7 +348,7 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
*/
static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
uint32_t max_entries,
- unsigned block_index, uint32_t nr,
+ unsigned int block_index, uint32_t nr,
const void *value, dm_block_t *root)
{
int r;
@@ -365,8 +367,8 @@ static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
}
static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
- unsigned begin_block, unsigned end_block,
- unsigned max_entries, const void *value,
+ unsigned int begin_block, unsigned int end_block,
+ unsigned int max_entries, const void *value,
dm_block_t *root)
{
int r = 0;
@@ -402,20 +404,20 @@ struct resize {
/*
* Maximum nr entries in an array block.
*/
- unsigned max_entries;
+ unsigned int max_entries;
/*
* nr of completely full blocks in the array.
*
* 'old' refers to before the resize, 'new' after.
*/
- unsigned old_nr_full_blocks, new_nr_full_blocks;
+ unsigned int old_nr_full_blocks, new_nr_full_blocks;
/*
* Number of entries in the final block. 0 iff only full blocks in
* the array.
*/
- unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
+ unsigned int old_nr_entries_in_last_block, new_nr_entries_in_last_block;
/*
* The default value used when growing the array.
@@ -430,13 +432,14 @@ struct resize {
* begin_index - the index of the first array block to remove.
* end_index - the one-past-the-end value. ie. this block is not removed.
*/
-static int drop_blocks(struct resize *resize, unsigned begin_index,
- unsigned end_index)
+static int drop_blocks(struct resize *resize, unsigned int begin_index,
+ unsigned int end_index)
{
int r;
while (begin_index != end_index) {
uint64_t key = begin_index++;
+
r = dm_btree_remove(&resize->info->btree_info, resize->root,
&key, &resize->root);
if (r)
@@ -449,8 +452,8 @@ static int drop_blocks(struct resize *resize, unsigned begin_index,
/*
* Calculates how many blocks are needed for the array.
*/
-static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
- unsigned nr_entries_in_last_block)
+static unsigned int total_nr_blocks_needed(unsigned int nr_full_blocks,
+ unsigned int nr_entries_in_last_block)
{
return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
}
@@ -461,7 +464,7 @@ static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
static int shrink(struct resize *resize)
{
int r;
- unsigned begin, end;
+ unsigned int begin, end;
struct dm_block *block;
struct array_block *ab;
@@ -527,7 +530,7 @@ static int grow_add_tail_block(struct resize *resize)
static int grow_needs_more_blocks(struct resize *resize)
{
int r;
- unsigned old_nr_blocks = resize->old_nr_full_blocks;
+ unsigned int old_nr_blocks = resize->old_nr_full_blocks;
if (resize->old_nr_entries_in_last_block > 0) {
old_nr_blocks++;
@@ -569,11 +572,11 @@ static int grow(struct resize *resize)
* These are the value_type functions for the btree elements, which point
* to array blocks.
*/
-static void block_inc(void *context, const void *value, unsigned count)
+static void block_inc(void *context, const void *value, unsigned int count)
{
const __le64 *block_le = value;
struct dm_array_info *info = context;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++, block_le++)
dm_tm_inc(info->btree_info.tm, le64_to_cpu(*block_le));
@@ -618,9 +621,10 @@ static void __block_dec(void *context, const void *value)
dm_tm_dec(info->btree_info.tm, b);
}
-static void block_dec(void *context, const void *value, unsigned count)
+static void block_dec(void *context, const void *value, unsigned int count)
{
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < count; i++, value += sizeof(__le64))
__block_dec(context, value);
}
@@ -691,19 +695,21 @@ static int array_resize(struct dm_array_info *info, dm_block_t root,
int dm_array_resize(struct dm_array_info *info, dm_block_t root,
uint32_t old_size, uint32_t new_size,
const void *value, dm_block_t *new_root)
- __dm_written_to_disk(value)
+ __dm_written_to_disk(value)
{
int r = array_resize(info, root, old_size, new_size, value, new_root);
+
__dm_unbless_for_disk(value);
return r;
}
EXPORT_SYMBOL_GPL(dm_array_resize);
static int populate_ablock_with_values(struct dm_array_info *info, struct array_block *ab,
- value_fn fn, void *context, unsigned base, unsigned new_nr)
+ value_fn fn, void *context,
+ unsigned int base, unsigned int new_nr)
{
int r;
- unsigned i;
+ unsigned int i;
struct dm_btree_value_type *vt = &info->value_type;
BUG_ON(le32_to_cpu(ab->nr_entries));
@@ -728,7 +734,7 @@ int dm_array_new(struct dm_array_info *info, dm_block_t *root,
int r;
struct dm_block *block;
struct array_block *ab;
- unsigned block_index, end_block, size_of_block, max_entries;
+ unsigned int block_index, end_block, size_of_block, max_entries;
r = dm_array_empty(info, root);
if (r)
@@ -776,7 +782,7 @@ int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
struct dm_block *block;
struct array_block *ab;
size_t size_of_block;
- unsigned entry, max_entries;
+ unsigned int entry, max_entries;
size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
max_entries = calc_max_entries(info->value_type.size, size_of_block);
@@ -804,8 +810,8 @@ static int array_set_value(struct dm_array_info *info, dm_block_t root,
struct dm_block *block;
struct array_block *ab;
size_t size_of_block;
- unsigned max_entries;
- unsigned entry;
+ unsigned int max_entries;
+ unsigned int entry;
void *old_value;
struct dm_btree_value_type *vt = &info->value_type;
@@ -840,7 +846,7 @@ out:
int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
uint32_t index, const void *value, dm_block_t *new_root)
- __dm_written_to_disk(value)
+ __dm_written_to_disk(value)
{
int r;
@@ -861,9 +867,9 @@ static int walk_ablock(void *context, uint64_t *keys, void *leaf)
struct walk_info *wi = context;
int r;
- unsigned i;
+ unsigned int i;
__le64 block_le;
- unsigned nr_entries, max_entries;
+ unsigned int nr_entries, max_entries;
struct dm_block *block;
struct array_block *ab;
diff --git a/drivers/md/persistent-data/dm-array.h b/drivers/md/persistent-data/dm-array.h
index d7d2d579c662..91d6165427b3 100644
--- a/drivers/md/persistent-data/dm-array.h
+++ b/drivers/md/persistent-data/dm-array.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -198,7 +199,7 @@ struct dm_array_cursor {
struct dm_block *block;
struct array_block *ab;
- unsigned index;
+ unsigned int index;
};
int dm_array_cursor_begin(struct dm_array_info *info,
diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c
index b7208d82e748..00c0a3f186b7 100644
--- a/drivers/md/persistent-data/dm-bitset.c
+++ b/drivers/md/persistent-data/dm-bitset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat, Inc.
*
@@ -41,7 +42,7 @@ EXPORT_SYMBOL_GPL(dm_bitset_empty);
struct packer_context {
bit_value_fn fn;
- unsigned nr_bits;
+ unsigned int nr_bits;
void *context;
};
@@ -49,7 +50,7 @@ static int pack_bits(uint32_t index, void *value, void *context)
{
int r;
struct packer_context *p = context;
- unsigned bit, nr = min(64u, p->nr_bits - (index * 64));
+ unsigned int bit, nr = min(64u, p->nr_bits - (index * 64));
uint64_t word = 0;
bool bv;
@@ -73,6 +74,7 @@ int dm_bitset_new(struct dm_disk_bitset *info, dm_block_t *root,
uint32_t size, bit_value_fn fn, void *context)
{
struct packer_context p;
+
p.fn = fn;
p.nr_bits = size;
p.context = context;
@@ -147,7 +149,7 @@ static int get_array_entry(struct dm_disk_bitset *info, dm_block_t root,
uint32_t index, dm_block_t *new_root)
{
int r;
- unsigned array_index = index / BITS_PER_ARRAY_ENTRY;
+ unsigned int array_index = index / BITS_PER_ARRAY_ENTRY;
if (info->current_index_set) {
if (info->current_index == array_index)
@@ -165,7 +167,7 @@ int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
uint32_t index, dm_block_t *new_root)
{
int r;
- unsigned b = index % BITS_PER_ARRAY_ENTRY;
+ unsigned int b = index % BITS_PER_ARRAY_ENTRY;
r = get_array_entry(info, root, index, new_root);
if (r)
@@ -182,7 +184,7 @@ int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
uint32_t index, dm_block_t *new_root)
{
int r;
- unsigned b = index % BITS_PER_ARRAY_ENTRY;
+ unsigned int b = index % BITS_PER_ARRAY_ENTRY;
r = get_array_entry(info, root, index, new_root);
if (r)
@@ -199,7 +201,7 @@ int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
uint32_t index, dm_block_t *new_root, bool *result)
{
int r;
- unsigned b = index % BITS_PER_ARRAY_ENTRY;
+ unsigned int b = index % BITS_PER_ARRAY_ENTRY;
r = get_array_entry(info, root, index, new_root);
if (r)
diff --git a/drivers/md/persistent-data/dm-bitset.h b/drivers/md/persistent-data/dm-bitset.h
index df888da04ee1..a3392ece5fab 100644
--- a/drivers/md/persistent-data/dm-bitset.h
+++ b/drivers/md/persistent-data/dm-bitset.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Red Hat, Inc.
*
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 11935864f50f..7bdfc23f758a 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -57,10 +58,10 @@ struct waiter {
int wants_write;
};
-static unsigned __find_holder(struct block_lock *lock,
+static unsigned int __find_holder(struct block_lock *lock,
struct task_struct *task)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < MAX_HOLDERS; i++)
if (lock->holders[i] == task)
@@ -73,7 +74,7 @@ static unsigned __find_holder(struct block_lock *lock,
/* call this *after* you increment lock->count */
static void __add_holder(struct block_lock *lock, struct task_struct *task)
{
- unsigned h = __find_holder(lock, NULL);
+ unsigned int h = __find_holder(lock, NULL);
#ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
struct stack_store *t;
#endif
@@ -90,14 +91,15 @@ static void __add_holder(struct block_lock *lock, struct task_struct *task)
/* call this *before* you decrement lock->count */
static void __del_holder(struct block_lock *lock, struct task_struct *task)
{
- unsigned h = __find_holder(lock, task);
+ unsigned int h = __find_holder(lock, task);
+
lock->holders[h] = NULL;
put_task_struct(task);
}
static int __check_holder(struct block_lock *lock)
{
- unsigned i;
+ unsigned int i;
for (i = 0; i < MAX_HOLDERS; i++) {
if (lock->holders[i] == current) {
@@ -354,6 +356,7 @@ struct buffer_aux {
static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
{
struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
+
aux->validator = NULL;
bl_init(&aux->lock);
}
@@ -361,23 +364,26 @@ static void dm_block_manager_alloc_callback(struct dm_buffer *buf)
static void dm_block_manager_write_callback(struct dm_buffer *buf)
{
struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
+
if (aux->validator) {
aux->validator->prepare_for_write(aux->validator, (struct dm_block *) buf,
dm_bufio_get_block_size(dm_bufio_get_client(buf)));
}
}
-/*----------------------------------------------------------------
+/*
+ * -------------------------------------------------------------
* Public interface
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
struct dm_block_manager {
struct dm_bufio_client *bufio;
bool read_only:1;
};
struct dm_block_manager *dm_block_manager_create(struct block_device *bdev,
- unsigned block_size,
- unsigned max_held_per_thread)
+ unsigned int block_size,
+ unsigned int max_held_per_thread)
{
int r;
struct dm_block_manager *bm;
@@ -415,7 +421,7 @@ void dm_block_manager_destroy(struct dm_block_manager *bm)
}
EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
-unsigned dm_bm_block_size(struct dm_block_manager *bm)
+unsigned int dm_bm_block_size(struct dm_block_manager *bm)
{
return dm_bufio_get_block_size(bm->bufio);
}
@@ -433,6 +439,7 @@ static int dm_bm_validate_buffer(struct dm_block_manager *bm,
{
if (unlikely(!aux->validator)) {
int r;
+
if (!v)
return 0;
r = v->check(v, (struct dm_block *) buf, dm_bufio_get_block_size(bm->bufio));
@@ -588,8 +595,7 @@ EXPORT_SYMBOL_GPL(dm_bm_write_lock_zero);
void dm_bm_unlock(struct dm_block *b)
{
- struct buffer_aux *aux;
- aux = dm_bufio_get_aux_data(to_buffer(b));
+ struct buffer_aux *aux = dm_bufio_get_aux_data(to_buffer(b));
if (aux->write_locked) {
dm_bufio_mark_buffer_dirty(to_buffer(b));
@@ -617,7 +623,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b)
bool dm_bm_is_read_only(struct dm_block_manager *bm)
{
- return (bm ? bm->read_only : true);
+ return bm ? bm->read_only : true;
}
EXPORT_SYMBOL_GPL(dm_bm_is_read_only);
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index e728937f376a..5746b0f82a03 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -32,11 +33,11 @@ void *dm_block_data(struct dm_block *b);
*/
struct dm_block_manager;
struct dm_block_manager *dm_block_manager_create(
- struct block_device *bdev, unsigned block_size,
- unsigned max_held_per_thread);
+ struct block_device *bdev, unsigned int block_size,
+ unsigned int max_held_per_thread);
void dm_block_manager_destroy(struct dm_block_manager *bm);
-unsigned dm_bm_block_size(struct dm_block_manager *bm);
+unsigned int dm_bm_block_size(struct dm_block_manager *bm);
dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm);
/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index 893edb426dba..7ed2ce656fcc 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -34,12 +35,12 @@ struct node_header {
__le32 max_entries;
__le32 value_size;
__le32 padding;
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
struct btree_node {
struct node_header header;
__le64 keys[];
-} __attribute__((packed, aligned(8)));
+} __packed __aligned(8);
/*
@@ -118,6 +119,7 @@ static inline void *value_base(struct btree_node *n)
static inline void *value_ptr(struct btree_node *n, uint32_t index)
{
uint32_t value_size = le32_to_cpu(n->header.value_size);
+
return value_base(n) + (value_size * index);
}
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 4ead31e0d8ce..942cd47eb52d 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -86,6 +87,7 @@ static int node_copy(struct btree_node *left, struct btree_node *right, int shif
{
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t value_size = le32_to_cpu(left->header.value_size);
+
if (value_size != le32_to_cpu(right->header.value_size)) {
DMERR("mismatched value size");
return -EILSEQ;
@@ -124,11 +126,12 @@ static int node_copy(struct btree_node *left, struct btree_node *right, int shif
/*
* Delete a specific entry from a leaf node.
*/
-static void delete_at(struct btree_node *n, unsigned index)
+static void delete_at(struct btree_node *n, unsigned int index)
{
- unsigned nr_entries = le32_to_cpu(n->header.nr_entries);
- unsigned nr_to_copy = nr_entries - (index + 1);
+ unsigned int nr_entries = le32_to_cpu(n->header.nr_entries);
+ unsigned int nr_to_copy = nr_entries - (index + 1);
uint32_t value_size = le32_to_cpu(n->header.value_size);
+
BUG_ON(index >= nr_entries);
if (nr_to_copy) {
@@ -144,20 +147,20 @@ static void delete_at(struct btree_node *n, unsigned index)
n->header.nr_entries = cpu_to_le32(nr_entries - 1);
}
-static unsigned merge_threshold(struct btree_node *n)
+static unsigned int merge_threshold(struct btree_node *n)
{
return le32_to_cpu(n->header.max_entries) / 3;
}
struct child {
- unsigned index;
+ unsigned int index;
struct dm_block *block;
struct btree_node *n;
};
static int init_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
struct btree_node *parent,
- unsigned index, struct child *result)
+ unsigned int index, struct child *result)
{
int r, inc;
dm_block_t root;
@@ -263,7 +266,8 @@ static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
/*
* Rebalance.
*/
- unsigned target_left = (nr_left + nr_right) / 2;
+ unsigned int target_left = (nr_left + nr_right) / 2;
+
ret = shift(left, right, nr_left - target_left);
if (ret)
return ret;
@@ -273,7 +277,7 @@ static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
}
static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
- struct dm_btree_value_type *vt, unsigned left_index)
+ struct dm_btree_value_type *vt, unsigned int left_index)
{
int r;
struct btree_node *parent;
@@ -310,7 +314,7 @@ static int delete_center_node(struct dm_btree_info *info, struct btree_node *par
uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
- unsigned shift = min(max_entries - nr_left, nr_center);
+ unsigned int shift = min(max_entries - nr_left, nr_center);
if (nr_left + shift > max_entries) {
DMERR("node shift out of bounds");
@@ -351,10 +355,10 @@ static int redistribute3(struct dm_btree_info *info, struct btree_node *parent,
{
int s, ret;
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
- unsigned total = nr_left + nr_center + nr_right;
- unsigned target_right = total / 3;
- unsigned remainder = (target_right * 3) != total;
- unsigned target_left = target_right + remainder;
+ unsigned int total = nr_left + nr_center + nr_right;
+ unsigned int target_right = total / 3;
+ unsigned int remainder = (target_right * 3) != total;
+ unsigned int target_left = target_right + remainder;
BUG_ON(target_left > max_entries);
BUG_ON(target_right > max_entries);
@@ -422,7 +426,7 @@ static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
uint32_t nr_center = le32_to_cpu(center->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
- unsigned threshold = merge_threshold(left) * 4 + 1;
+ unsigned int threshold = merge_threshold(left) * 4 + 1;
if ((left->header.max_entries != center->header.max_entries) ||
(center->header.max_entries != right->header.max_entries)) {
@@ -440,7 +444,7 @@ static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
}
static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
- struct dm_btree_value_type *vt, unsigned left_index)
+ struct dm_btree_value_type *vt, unsigned int left_index)
{
int r;
struct btree_node *parent = dm_block_data(shadow_current(s));
@@ -519,7 +523,7 @@ static int rebalance_children(struct shadow_spine *s,
return r;
}
-static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
+static int do_leaf(struct btree_node *n, uint64_t key, unsigned int *index)
{
int i = lower_bound(n, key);
@@ -539,7 +543,7 @@ static int do_leaf(struct btree_node *n, uint64_t key, unsigned *index)
*/
static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
struct dm_btree_value_type *vt, dm_block_t root,
- uint64_t key, unsigned *index)
+ uint64_t key, unsigned int *index)
{
int i = *index, r;
struct btree_node *n;
@@ -556,6 +560,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
*/
if (shadow_has_parent(s)) {
__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+
memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
&location, sizeof(__le64));
}
@@ -589,7 +594,7 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, dm_block_t *new_root)
{
- unsigned level, last_level = info->levels - 1;
+ unsigned int level, last_level = info->levels - 1;
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
@@ -601,7 +606,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
r = remove_raw(&spine, info,
(level == last_level ?
&info->value_type : &le64_vt),
- root, keys[level], (unsigned *)&index);
+ root, keys[level], (unsigned int *)&index);
if (r < 0)
break;
@@ -649,6 +654,7 @@ static int remove_nearest(struct shadow_spine *s, struct dm_btree_info *info,
*/
if (shadow_has_parent(s)) {
__le64 location = cpu_to_le64(dm_block_location(shadow_current(s)));
+
memcpy(value_ptr(dm_block_data(shadow_parent(s)), i),
&location, sizeof(__le64));
}
@@ -685,9 +691,9 @@ static int remove_nearest(struct shadow_spine *s, struct dm_btree_info *info,
static int remove_one(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, uint64_t end_key,
- dm_block_t *new_root, unsigned *nr_removed)
+ dm_block_t *new_root, unsigned int *nr_removed)
{
- unsigned level, last_level = info->levels - 1;
+ unsigned int level, last_level = info->levels - 1;
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
@@ -698,7 +704,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
init_shadow_spine(&spine, info);
for (level = 0; level < last_level; level++) {
r = remove_raw(&spine, info, &le64_vt,
- root, keys[level], (unsigned *) &index);
+ root, keys[level], (unsigned int *) &index);
if (r < 0)
goto out;
@@ -742,7 +748,7 @@ out:
int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
uint64_t *first_key, uint64_t end_key,
- dm_block_t *new_root, unsigned *nr_removed)
+ dm_block_t *new_root, unsigned int *nr_removed)
{
int r;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index e653458888a7..7540383b7cf3 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -39,7 +40,7 @@ static int node_check(struct dm_block_validator *v,
uint32_t flags, nr_entries, max_entries;
if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
- DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu",
+ DMERR_LIMIT("%s failed: blocknr %llu != wanted %llu", __func__,
le64_to_cpu(h->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -48,7 +49,7 @@ static int node_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
BTREE_CSUM_XOR));
if (csum_disk != h->csum) {
- DMERR_LIMIT("node_check failed: csum %u != wanted %u",
+ DMERR_LIMIT("%s failed: csum %u != wanted %u", __func__,
le32_to_cpu(csum_disk), le32_to_cpu(h->csum));
return -EILSEQ;
}
@@ -59,12 +60,12 @@ static int node_check(struct dm_block_validator *v,
if (sizeof(struct node_header) +
(sizeof(__le64) + value_size) * max_entries > block_size) {
- DMERR_LIMIT("node_check failed: max_entries too large");
+ DMERR_LIMIT("%s failed: max_entries too large", __func__);
return -EILSEQ;
}
if (nr_entries > max_entries) {
- DMERR_LIMIT("node_check failed: too many entries");
+ DMERR_LIMIT("%s failed: too many entries", __func__);
return -EILSEQ;
}
@@ -73,7 +74,7 @@ static int node_check(struct dm_block_validator *v,
*/
flags = le32_to_cpu(h->flags);
if (!(flags & INTERNAL_NODE) && !(flags & LEAF_NODE)) {
- DMERR_LIMIT("node_check failed: node is neither INTERNAL or LEAF");
+ DMERR_LIMIT("%s failed: node is neither INTERNAL or LEAF", __func__);
return -EILSEQ;
}
@@ -132,9 +133,8 @@ void exit_ro_spine(struct ro_spine *s)
{
int i;
- for (i = 0; i < s->count; i++) {
+ for (i = 0; i < s->count; i++)
unlock_block(s->info, s->nodes[i]);
- }
}
int ro_step(struct ro_spine *s, dm_block_t new_child)
@@ -183,9 +183,8 @@ void exit_shadow_spine(struct shadow_spine *s)
{
int i;
- for (i = 0; i < s->count; i++) {
+ for (i = 0; i < s->count; i++)
unlock_block(s->info, s->nodes[i]);
- }
}
int shadow_step(struct shadow_spine *s, dm_block_t b,
@@ -234,12 +233,12 @@ dm_block_t shadow_root(struct shadow_spine *s)
return s->root;
}
-static void le64_inc(void *context, const void *value_le, unsigned count)
+static void le64_inc(void *context, const void *value_le, unsigned int count)
{
dm_tm_with_runs(context, value_le, count, dm_tm_inc_range);
}
-static void le64_dec(void *context, const void *value_le, unsigned count)
+static void le64_dec(void *context, const void *value_le, unsigned int count)
{
dm_tm_with_runs(context, value_le, count, dm_tm_dec_range);
}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 5ce64e93aae7..0c7a2e8d1846 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -13,9 +14,11 @@
#define DM_MSG_PREFIX "btree"
-/*----------------------------------------------------------------
+/*
+ *--------------------------------------------------------------
* Array manipulation
- *--------------------------------------------------------------*/
+ *--------------------------------------------------------------
+ */
static void memcpy_disk(void *dest, const void *src, size_t len)
__dm_written_to_disk(src)
{
@@ -23,8 +26,8 @@ static void memcpy_disk(void *dest, const void *src, size_t len)
__dm_unbless_for_disk(src);
}
-static void array_insert(void *base, size_t elt_size, unsigned nr_elts,
- unsigned index, void *elt)
+static void array_insert(void *base, size_t elt_size, unsigned int nr_elts,
+ unsigned int index, void *elt)
__dm_written_to_disk(elt)
{
if (index < nr_elts)
@@ -80,7 +83,7 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
vt->inc(vt->context, value_ptr(n, 0), nr_entries);
}
-static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
+static int insert_at(size_t value_size, struct btree_node *node, unsigned int index,
uint64_t key, void *value)
__dm_written_to_disk(value)
{
@@ -162,9 +165,9 @@ EXPORT_SYMBOL_GPL(dm_btree_empty);
struct frame {
struct dm_block *b;
struct btree_node *n;
- unsigned level;
- unsigned nr_children;
- unsigned current_child;
+ unsigned int level;
+ unsigned int nr_children;
+ unsigned int current_child;
};
struct del_stack {
@@ -193,7 +196,7 @@ static int unprocessed_frames(struct del_stack *s)
static void prefetch_children(struct del_stack *s, struct frame *f)
{
- unsigned i;
+ unsigned int i;
struct dm_block_manager *bm = dm_tm_get_bm(s->tm);
for (i = 0; i < f->nr_children; i++)
@@ -205,7 +208,7 @@ static bool is_internal_level(struct dm_btree_info *info, struct frame *f)
return f->level < (info->levels - 1);
}
-static int push_frame(struct del_stack *s, dm_block_t b, unsigned level)
+static int push_frame(struct del_stack *s, dm_block_t b, unsigned int level)
{
int r;
uint32_t ref_count;
@@ -371,7 +374,7 @@ static int btree_lookup_raw(struct ro_spine *s, dm_block_t block, uint64_t key,
int dm_btree_lookup(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value_le)
{
- unsigned level, last_level = info->levels - 1;
+ unsigned int level, last_level = info->levels - 1;
int r = -ENODATA;
uint64_t rkey;
__le64 internal_value_le;
@@ -467,7 +470,7 @@ out:
int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, uint64_t *rkey, void *value_le)
{
- unsigned level;
+ unsigned int level;
int r = -ENODATA;
__le64 internal_value_le;
struct ro_spine spine;
@@ -493,7 +496,6 @@ out:
exit_ro_spine(&spine);
return r;
}
-
EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
/*----------------------------------------------------------------*/
@@ -502,11 +504,12 @@ EXPORT_SYMBOL_GPL(dm_btree_lookup_next);
* Copies entries from one region of a btree node to another. The regions
* must not overlap.
*/
-static void copy_entries(struct btree_node *dest, unsigned dest_offset,
- struct btree_node *src, unsigned src_offset,
- unsigned count)
+static void copy_entries(struct btree_node *dest, unsigned int dest_offset,
+ struct btree_node *src, unsigned int src_offset,
+ unsigned int count)
{
size_t value_size = le32_to_cpu(dest->header.value_size);
+
memcpy(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
memcpy(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
}
@@ -515,11 +518,12 @@ static void copy_entries(struct btree_node *dest, unsigned dest_offset,
* Moves entries from one region fo a btree node to another. The regions
* may overlap.
*/
-static void move_entries(struct btree_node *dest, unsigned dest_offset,
- struct btree_node *src, unsigned src_offset,
- unsigned count)
+static void move_entries(struct btree_node *dest, unsigned int dest_offset,
+ struct btree_node *src, unsigned int src_offset,
+ unsigned int count)
{
size_t value_size = le32_to_cpu(dest->header.value_size);
+
memmove(dest->keys + dest_offset, src->keys + src_offset, count * sizeof(uint64_t));
memmove(value_ptr(dest, dest_offset), value_ptr(src, src_offset), count * value_size);
}
@@ -528,7 +532,7 @@ static void move_entries(struct btree_node *dest, unsigned dest_offset,
* Erases the first 'count' entries of a btree node, shifting following
* entries down into their place.
*/
-static void shift_down(struct btree_node *n, unsigned count)
+static void shift_down(struct btree_node *n, unsigned int count)
{
move_entries(n, 0, n, count, le32_to_cpu(n->header.nr_entries) - count);
}
@@ -537,7 +541,7 @@ static void shift_down(struct btree_node *n, unsigned count)
* Moves entries in a btree node up 'count' places, making space for
* new entries at the start of the node.
*/
-static void shift_up(struct btree_node *n, unsigned count)
+static void shift_up(struct btree_node *n, unsigned int count)
{
move_entries(n, count, n, 0, le32_to_cpu(n->header.nr_entries));
}
@@ -548,18 +552,20 @@ static void shift_up(struct btree_node *n, unsigned count)
*/
static void redistribute2(struct btree_node *left, struct btree_node *right)
{
- unsigned nr_left = le32_to_cpu(left->header.nr_entries);
- unsigned nr_right = le32_to_cpu(right->header.nr_entries);
- unsigned total = nr_left + nr_right;
- unsigned target_left = total / 2;
- unsigned target_right = total - target_left;
+ unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
+ unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
+ unsigned int total = nr_left + nr_right;
+ unsigned int target_left = total / 2;
+ unsigned int target_right = total - target_left;
if (nr_left < target_left) {
- unsigned delta = target_left - nr_left;
+ unsigned int delta = target_left - nr_left;
+
copy_entries(left, nr_left, right, 0, delta);
shift_down(right, delta);
} else if (nr_left > target_left) {
- unsigned delta = nr_left - target_left;
+ unsigned int delta = nr_left - target_left;
+
if (nr_right)
shift_up(right, delta);
copy_entries(right, 0, left, target_left, delta);
@@ -576,10 +582,10 @@ static void redistribute2(struct btree_node *left, struct btree_node *right)
static void redistribute3(struct btree_node *left, struct btree_node *center,
struct btree_node *right)
{
- unsigned nr_left = le32_to_cpu(left->header.nr_entries);
- unsigned nr_center = le32_to_cpu(center->header.nr_entries);
- unsigned nr_right = le32_to_cpu(right->header.nr_entries);
- unsigned total, target_left, target_center, target_right;
+ unsigned int nr_left = le32_to_cpu(left->header.nr_entries);
+ unsigned int nr_center = le32_to_cpu(center->header.nr_entries);
+ unsigned int nr_right = le32_to_cpu(right->header.nr_entries);
+ unsigned int total, target_left, target_center, target_right;
BUG_ON(nr_center);
@@ -589,19 +595,22 @@ static void redistribute3(struct btree_node *left, struct btree_node *center,
target_right = (total - target_left - target_center);
if (nr_left < target_left) {
- unsigned left_short = target_left - nr_left;
+ unsigned int left_short = target_left - nr_left;
+
copy_entries(left, nr_left, right, 0, left_short);
copy_entries(center, 0, right, left_short, target_center);
shift_down(right, nr_right - target_right);
} else if (nr_left < (target_left + target_center)) {
- unsigned left_to_center = nr_left - target_left;
+ unsigned int left_to_center = nr_left - target_left;
+
copy_entries(center, 0, left, target_left, left_to_center);
copy_entries(center, left_to_center, right, 0, target_center - left_to_center);
shift_down(right, nr_right - target_right);
} else {
- unsigned right_short = target_right - nr_right;
+ unsigned int right_short = target_right - nr_right;
+
shift_up(right, right_short);
copy_entries(right, 0, left, nr_left - right_short, right_short);
copy_entries(center, 0, left, target_left, nr_left - target_left);
@@ -642,7 +651,7 @@ static void redistribute3(struct btree_node *left, struct btree_node *center,
*
* Where A* is a shadow of A.
*/
-static int split_one_into_two(struct shadow_spine *s, unsigned parent_index,
+static int split_one_into_two(struct shadow_spine *s, unsigned int parent_index,
struct dm_btree_value_type *vt, uint64_t key)
{
int r;
@@ -696,7 +705,7 @@ static int split_one_into_two(struct shadow_spine *s, unsigned parent_index,
* to the new shadow.
*/
static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *vt,
- struct btree_node *parent, unsigned index,
+ struct btree_node *parent, unsigned int index,
struct dm_block **result)
{
int r, inc;
@@ -725,11 +734,11 @@ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *
* Splits two nodes into three. This is more work, but results in fuller
* nodes, so saves metadata space.
*/
-static int split_two_into_three(struct shadow_spine *s, unsigned parent_index,
- struct dm_btree_value_type *vt, uint64_t key)
+static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
+ struct dm_btree_value_type *vt, uint64_t key)
{
int r;
- unsigned middle_index;
+ unsigned int middle_index;
struct dm_block *left, *middle, *right, *parent;
struct btree_node *ln, *rn, *mn, *pn;
__le64 location;
@@ -781,7 +790,7 @@ static int split_two_into_three(struct shadow_spine *s, unsigned parent_index,
if (shadow_current(s) != right)
unlock_block(s->info, right);
- return r;
+ return r;
}
@@ -830,7 +839,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
{
int r;
size_t size;
- unsigned nr_left, nr_right;
+ unsigned int nr_left, nr_right;
struct dm_block *left, *right, *new_parent;
struct btree_node *pn, *ln, *rn;
__le64 val;
@@ -904,7 +913,7 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
* Redistributes a node's entries with its left sibling.
*/
static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt,
- unsigned parent_index, uint64_t key)
+ unsigned int parent_index, uint64_t key)
{
int r;
struct dm_block *sib;
@@ -933,7 +942,7 @@ static int rebalance_left(struct shadow_spine *s, struct dm_btree_value_type *vt
* Redistributes a nodes entries with its right sibling.
*/
static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *vt,
- unsigned parent_index, uint64_t key)
+ unsigned int parent_index, uint64_t key)
{
int r;
struct dm_block *sib;
@@ -961,10 +970,10 @@ static int rebalance_right(struct shadow_spine *s, struct dm_btree_value_type *v
/*
* Returns the number of spare entries in a node.
*/
-static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned *space)
+static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigned int *space)
{
int r;
- unsigned nr_entries;
+ unsigned int nr_entries;
struct dm_block *block;
struct btree_node *node;
@@ -990,17 +999,18 @@ static int get_node_free_space(struct dm_btree_info *info, dm_block_t b, unsigne
*/
#define SPACE_THRESHOLD 8
static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type *vt,
- unsigned parent_index, uint64_t key)
+ unsigned int parent_index, uint64_t key)
{
int r;
struct btree_node *parent = dm_block_data(shadow_parent(s));
- unsigned nr_parent = le32_to_cpu(parent->header.nr_entries);
- unsigned free_space;
+ unsigned int nr_parent = le32_to_cpu(parent->header.nr_entries);
+ unsigned int free_space;
int left_shared = 0, right_shared = 0;
/* Should we move entries to the left sibling? */
if (parent_index > 0) {
dm_block_t left_b = value64(parent, parent_index - 1);
+
r = dm_tm_block_is_shared(s->info->tm, left_b, &left_shared);
if (r)
return r;
@@ -1018,6 +1028,7 @@ static int rebalance_or_split(struct shadow_spine *s, struct dm_btree_value_type
/* Should we move entries to the right sibling? */
if (parent_index < (nr_parent - 1)) {
dm_block_t right_b = value64(parent, parent_index + 1);
+
r = dm_tm_block_is_shared(s->info->tm, right_b, &right_shared);
if (r)
return r;
@@ -1080,7 +1091,7 @@ static bool has_space_for_insert(struct btree_node *node, uint64_t key)
static int btree_insert_raw(struct shadow_spine *s, dm_block_t root,
struct dm_btree_value_type *vt,
- uint64_t key, unsigned *index)
+ uint64_t key, unsigned int *index)
{
int r, i = *index, top = 1;
struct btree_node *node;
@@ -1214,9 +1225,9 @@ int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
}
static bool need_insert(struct btree_node *node, uint64_t *keys,
- unsigned level, unsigned index)
+ unsigned int level, unsigned int index)
{
- return ((index >= le32_to_cpu(node->header.nr_entries)) ||
+ return ((index >= le32_to_cpu(node->header.nr_entries)) ||
(le64_to_cpu(node->keys[index]) != keys[level]));
}
@@ -1226,7 +1237,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
__dm_written_to_disk(value)
{
int r;
- unsigned level, index = -1, last_level = info->levels - 1;
+ unsigned int level, index = -1, last_level = info->levels - 1;
dm_block_t block = root;
struct shadow_spine spine;
struct btree_node *n;
@@ -1309,7 +1320,7 @@ bad_unblessed:
int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value, dm_block_t *new_root)
- __dm_written_to_disk(value)
+ __dm_written_to_disk(value)
{
return insert(info, root, keys, value, new_root, NULL);
}
@@ -1318,7 +1329,7 @@ EXPORT_SYMBOL_GPL(dm_btree_insert);
int dm_btree_insert_notify(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value, dm_block_t *new_root,
int *inserted)
- __dm_written_to_disk(value)
+ __dm_written_to_disk(value)
{
return insert(info, root, keys, value, new_root, inserted);
}
@@ -1341,8 +1352,8 @@ static int find_key(struct ro_spine *s, dm_block_t block, bool find_highest,
i = le32_to_cpu(ro_node(s)->header.nr_entries);
if (!i)
return -ENODATA;
- else
- i--;
+
+ i--;
if (find_highest)
*result_key = le64_to_cpu(ro_node(s)->keys[i]);
@@ -1412,7 +1423,7 @@ static int walk_node(struct dm_btree_info *info, dm_block_t block,
void *context)
{
int r;
- unsigned i, nr;
+ unsigned int i, nr;
struct dm_block *node;
struct btree_node *n;
uint64_t keys;
@@ -1455,7 +1466,7 @@ EXPORT_SYMBOL_GPL(dm_btree_walk);
static void prefetch_values(struct dm_btree_cursor *c)
{
- unsigned i, nr;
+ unsigned int i, nr;
__le64 value_le;
struct cursor_node *n = c->nodes + c->depth - 1;
struct btree_node *bn = dm_block_data(n->b);
@@ -1585,6 +1596,7 @@ EXPORT_SYMBOL_GPL(dm_btree_cursor_end);
int dm_btree_cursor_next(struct dm_btree_cursor *c)
{
int r = inc_or_backtrack(c);
+
if (!r) {
r = find_leaf(c);
if (r)
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index d2ae5aa4d00b..1b92acd7823d 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -58,14 +59,14 @@ struct dm_btree_value_type {
* somewhere.) This method is _not_ called for insertion of a new
* value: It is assumed the ref count is already 1.
*/
- void (*inc)(void *context, const void *value, unsigned count);
+ void (*inc)(void *context, const void *value, unsigned int count);
/*
* These values are being deleted. The btree takes care of freeing
* the memory pointed to by @value. Often the del function just
* needs to decrement a reference counts somewhere.
*/
- void (*dec)(void *context, const void *value, unsigned count);
+ void (*dec)(void *context, const void *value, unsigned int count);
/*
* A test for equality between two values. When a value is
@@ -84,7 +85,7 @@ struct dm_btree_info {
/*
* Number of nested btrees. (Not the depth of a single tree.)
*/
- unsigned levels;
+ unsigned int levels;
struct dm_btree_value_type value_type;
};
@@ -121,7 +122,7 @@ int dm_btree_lookup_next(struct dm_btree_info *info, dm_block_t root,
*/
int dm_btree_insert(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, void *value, dm_block_t *new_root)
- __dm_written_to_disk(value);
+ __dm_written_to_disk(value);
/*
* A variant of insert that indicates whether it actually inserted or just
@@ -149,7 +150,7 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
*/
int dm_btree_remove_leaves(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, uint64_t end_key,
- dm_block_t *new_root, unsigned *nr_removed);
+ dm_block_t *new_root, unsigned int *nr_removed);
/*
* Returns < 0 on failure. Otherwise the number of key entries that have
@@ -188,7 +189,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
struct cursor_node {
struct dm_block *b;
- unsigned index;
+ unsigned int index;
};
struct dm_btree_cursor {
@@ -196,7 +197,7 @@ struct dm_btree_cursor {
dm_block_t root;
bool prefetch_leaves;
- unsigned depth;
+ unsigned int depth;
struct cursor_node nodes[DM_BTREE_CURSOR_MAX_DEPTH];
};
diff --git a/drivers/md/persistent-data/dm-persistent-data-internal.h b/drivers/md/persistent-data/dm-persistent-data-internal.h
index c49e26fff36c..c482434e566b 100644
--- a/drivers/md/persistent-data/dm-persistent-data-internal.h
+++ b/drivers/md/persistent-data/dm-persistent-data-internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -9,11 +10,11 @@
#include "dm-block-manager.h"
-static inline unsigned dm_hash_block(dm_block_t b, unsigned hash_mask)
+static inline unsigned int dm_hash_block(dm_block_t b, unsigned int hash_mask)
{
- const unsigned BIG_PRIME = 4294967291UL;
+ const unsigned int BIG_PRIME = 4294967291UL;
- return (((unsigned) b) * BIG_PRIME) & hash_mask;
+ return (((unsigned int) b) * BIG_PRIME) & hash_mask;
}
#endif /* _PERSISTENT_DATA_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index bfbfa750e016..591d1a43d035 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -41,7 +42,7 @@ static int index_check(struct dm_block_validator *v,
__le32 csum_disk;
if (dm_block_location(b) != le64_to_cpu(mi_le->blocknr)) {
- DMERR_LIMIT("index_check failed: blocknr %llu != wanted %llu",
+ DMERR_LIMIT("%s failed: blocknr %llu != wanted %llu", __func__,
le64_to_cpu(mi_le->blocknr), dm_block_location(b));
return -ENOTBLK;
}
@@ -50,7 +51,7 @@ static int index_check(struct dm_block_validator *v,
block_size - sizeof(__le32),
INDEX_CSUM_XOR));
if (csum_disk != mi_le->csum) {
- DMERR_LIMIT("index_check failed: csum %u != wanted %u",
+ DMERR_LIMIT("i%s failed: csum %u != wanted %u", __func__,
le32_to_cpu(csum_disk), le32_to_cpu(mi_le->csum));
return -EILSEQ;
}
@@ -126,7 +127,7 @@ static void *dm_bitmap_data(struct dm_block *b)
#define WORD_MASK_HIGH 0xAAAAAAAAAAAAAAAAULL
-static unsigned dm_bitmap_word_used(void *addr, unsigned b)
+static unsigned int dm_bitmap_word_used(void *addr, unsigned int b)
{
__le64 *words_le = addr;
__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
@@ -137,11 +138,11 @@ static unsigned dm_bitmap_word_used(void *addr, unsigned b)
return !(~bits & mask);
}
-static unsigned sm_lookup_bitmap(void *addr, unsigned b)
+static unsigned int sm_lookup_bitmap(void *addr, unsigned int b)
{
__le64 *words_le = addr;
__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
- unsigned hi, lo;
+ unsigned int hi, lo;
b = (b & (ENTRIES_PER_WORD - 1)) << 1;
hi = !!test_bit_le(b, (void *) w_le);
@@ -149,7 +150,7 @@ static unsigned sm_lookup_bitmap(void *addr, unsigned b)
return (hi << 1) | lo;
}
-static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
+static void sm_set_bitmap(void *addr, unsigned int b, unsigned int val)
{
__le64 *words_le = addr;
__le64 *w_le = words_le + (b >> ENTRIES_SHIFT);
@@ -167,8 +168,8 @@ static void sm_set_bitmap(void *addr, unsigned b, unsigned val)
__clear_bit_le(b + 1, (void *) w_le);
}
-static int sm_find_free(void *addr, unsigned begin, unsigned end,
- unsigned *result)
+static int sm_find_free(void *addr, unsigned int begin, unsigned int end,
+ unsigned int *result)
{
while (begin < end) {
if (!(begin & (ENTRIES_PER_WORD - 1)) &&
@@ -237,7 +238,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
{
int r;
dm_block_t i, nr_blocks, nr_indexes;
- unsigned old_blocks, blocks;
+ unsigned int old_blocks, blocks;
nr_blocks = ll->nr_blocks + extra_blocks;
old_blocks = dm_sector_div_up(ll->nr_blocks, ll->entries_per_block);
@@ -351,7 +352,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
for (i = index_begin; i < index_end; i++, begin = 0) {
struct dm_block *blk;
- unsigned position;
+ unsigned int position;
uint32_t bit_end;
r = ll->load_ie(ll, i, &ie_disk);
@@ -369,7 +370,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
bit_end = (i == index_end - 1) ? end : ll->entries_per_block;
r = sm_find_free(dm_bitmap_data(blk),
- max_t(unsigned, begin, le32_to_cpu(ie_disk.none_free_before)),
+ max_t(unsigned int, begin, le32_to_cpu(ie_disk.none_free_before)),
bit_end, &position);
if (r == -ENOSPC) {
/*
@@ -390,7 +391,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
}
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
- dm_block_t begin, dm_block_t end, dm_block_t *b)
+ dm_block_t begin, dm_block_t end, dm_block_t *b)
{
int r;
uint32_t count;
@@ -608,6 +609,7 @@ static int sm_ll_inc_overflow(struct ll_disk *ll, dm_block_t b, struct inc_conte
static inline int shadow_bitmap(struct ll_disk *ll, struct inc_context *ic)
{
int r, inc;
+
r = dm_tm_shadow_block(ll->tm, le64_to_cpu(ic->ie_disk.blocknr),
&dm_sm_bitmap_validator, &ic->bitmap_block, &inc);
if (r < 0) {
@@ -747,6 +749,7 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, dm_block_t e,
*nr_allocations = 0;
while (b != e) {
int r = __sm_ll_inc(ll, b, e, nr_allocations, &b);
+
if (r)
return r;
}
@@ -790,13 +793,12 @@ static int __sm_ll_dec_overflow(struct ll_disk *ll, dm_block_t b,
rc = le32_to_cpu(*v_ptr);
*old_rc = rc;
- if (rc == 3) {
+ if (rc == 3)
return __sm_ll_del_overflow(ll, b, ic);
- } else {
- rc--;
- *v_ptr = cpu_to_le32(rc);
- return 0;
- }
+
+ rc--;
+ *v_ptr = cpu_to_le32(rc);
+ return 0;
}
static int sm_ll_dec_overflow(struct ll_disk *ll, dm_block_t b,
@@ -929,6 +931,7 @@ int sm_ll_dec(struct ll_disk *ll, dm_block_t b, dm_block_t e,
*nr_allocations = 0;
while (b != e) {
int r = __sm_ll_dec(ll, b, e, nr_allocations, &b);
+
if (r)
return r;
}
@@ -1097,7 +1100,7 @@ static inline int ie_cache_writeback(struct ll_disk *ll, struct ie_cache *iec)
&iec->index, &iec->ie, &ll->bitmap_root);
}
-static inline unsigned hash_index(dm_block_t index)
+static inline unsigned int hash_index(dm_block_t index)
{
return dm_hash_block(index, IE_CACHE_MASK);
}
@@ -1106,7 +1109,7 @@ static int disk_ll_load_ie(struct ll_disk *ll, dm_block_t index,
struct disk_index_entry *ie)
{
int r;
- unsigned h = hash_index(index);
+ unsigned int h = hash_index(index);
struct ie_cache *iec = ll->ie_cache + h;
if (iec->valid) {
@@ -1137,7 +1140,7 @@ static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
struct disk_index_entry *ie)
{
int r;
- unsigned h = hash_index(index);
+ unsigned int h = hash_index(index);
struct ie_cache *iec = ll->ie_cache + h;
ll->bitmap_index_changed = true;
@@ -1164,9 +1167,11 @@ static int disk_ll_save_ie(struct ll_disk *ll, dm_block_t index,
static int disk_ll_init_index(struct ll_disk *ll)
{
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < IE_CACHE_SIZE; i++) {
struct ie_cache *iec = ll->ie_cache + i;
+
iec->valid = false;
iec->dirty = false;
}
@@ -1186,10 +1191,11 @@ static dm_block_t disk_ll_max_entries(struct ll_disk *ll)
static int disk_ll_commit(struct ll_disk *ll)
{
int r = 0;
- unsigned i;
+ unsigned int i;
for (i = 0; i < IE_CACHE_SIZE; i++) {
struct ie_cache *iec = ll->ie_cache + i;
+
if (iec->valid && iec->dirty)
r = ie_cache_writeback(ll, iec);
}
diff --git a/drivers/md/persistent-data/dm-space-map-common.h b/drivers/md/persistent-data/dm-space-map-common.h
index 706ceb85d680..e83d1f225078 100644
--- a/drivers/md/persistent-data/dm-space-map-common.h
+++ b/drivers/md/persistent-data/dm-space-map-common.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -33,7 +34,7 @@ struct disk_index_entry {
__le64 blocknr;
__le32 nr_free;
__le32 none_free_before;
-} __attribute__ ((packed, aligned(8)));
+} __packed __aligned(8);
#define MAX_METADATA_BITMAPS 255
@@ -43,7 +44,7 @@ struct disk_metadata_index {
__le64 blocknr;
struct disk_index_entry index[MAX_METADATA_BITMAPS];
-} __attribute__ ((packed, aligned(8)));
+} __packed __aligned(8);
struct ll_disk;
@@ -102,7 +103,7 @@ struct disk_sm_root {
__le64 nr_allocated;
__le64 bitmap_root;
__le64 ref_count_root;
-} __attribute__ ((packed, aligned(8)));
+} __packed __aligned(8);
#define ENTRIES_PER_BYTE 4
@@ -110,7 +111,7 @@ struct disk_bitmap_header {
__le32 csum;
__le32 not_used;
__le64 blocknr;
-} __attribute__ ((packed, aligned(8)));
+} __packed __aligned(8);
/*----------------------------------------------------------------*/
@@ -120,7 +121,7 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
dm_block_t end, dm_block_t *result);
int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
- dm_block_t begin, dm_block_t end, dm_block_t *result);
+ dm_block_t begin, dm_block_t end, dm_block_t *result);
/*
* The next three functions return (via nr_allocations) the net number of
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index d0a8d5e73c28..f4241f54e20e 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -48,6 +49,7 @@ static int sm_disk_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
*count = smd->old_ll.nr_blocks;
return 0;
@@ -56,6 +58,7 @@ static int sm_disk_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
static int sm_disk_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
*count = (smd->old_ll.nr_blocks - smd->old_ll.nr_allocated) - smd->nr_allocated_this_transaction;
return 0;
@@ -65,6 +68,7 @@ static int sm_disk_get_count(struct dm_space_map *sm, dm_block_t b,
uint32_t *result)
{
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
+
return sm_ll_lookup(&smd->ll, b, result);
}
@@ -91,9 +95,8 @@ static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
r = sm_ll_insert(&smd->ll, b, count, &nr_allocations);
- if (!r) {
+ if (!r)
smd->nr_allocated_this_transaction += nr_allocations;
- }
return r;
}
@@ -134,22 +137,20 @@ static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
* Any block we allocate has to be free in both the old and current ll.
*/
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, smd->begin, smd->ll.nr_blocks, b);
- if (r == -ENOSPC) {
+ if (r == -ENOSPC)
/*
* There's no free block between smd->begin and the end of the metadata device.
* We search before smd->begin in case something has been freed.
*/
r = sm_ll_find_common_free_block(&smd->old_ll, &smd->ll, 0, smd->begin, b);
- }
if (r)
return r;
smd->begin = *b + 1;
r = sm_ll_inc(&smd->ll, *b, *b + 1, &nr_allocations);
- if (!r) {
+ if (!r)
smd->nr_allocated_this_transaction += nr_allocations;
- }
return r;
}
diff --git a/drivers/md/persistent-data/dm-space-map-disk.h b/drivers/md/persistent-data/dm-space-map-disk.h
index 447a0a9a2d9f..6f8665db94db 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.h
+++ b/drivers/md/persistent-data/dm-space-map-disk.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 392ae26134a4..04698fd03e60 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -94,8 +95,8 @@ struct block_op {
};
struct bop_ring_buffer {
- unsigned begin;
- unsigned end;
+ unsigned int begin;
+ unsigned int end;
struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
};
@@ -110,9 +111,10 @@ static bool brb_empty(struct bop_ring_buffer *brb)
return brb->begin == brb->end;
}
-static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
+static unsigned int brb_next(struct bop_ring_buffer *brb, unsigned int old)
{
- unsigned r = old + 1;
+ unsigned int r = old + 1;
+
return r >= ARRAY_SIZE(brb->bops) ? 0 : r;
}
@@ -120,7 +122,7 @@ static int brb_push(struct bop_ring_buffer *brb,
enum block_op_type type, dm_block_t b, dm_block_t e)
{
struct block_op *bop;
- unsigned next = brb_next(brb, brb->end);
+ unsigned int next = brb_next(brb, brb->end);
/*
* We don't allow the last bop to be filled, this way we can
@@ -171,8 +173,8 @@ struct sm_metadata {
dm_block_t begin;
- unsigned recursion_count;
- unsigned allocated_this_transaction;
+ unsigned int recursion_count;
+ unsigned int allocated_this_transaction;
struct bop_ring_buffer uncommitted;
struct threshold threshold;
@@ -181,6 +183,7 @@ struct sm_metadata {
static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b, dm_block_t e)
{
int r = brb_push(&smm->uncommitted, type, b, e);
+
if (r) {
DMERR("too many recursive allocations");
return -ENOMEM;
@@ -300,9 +303,9 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
uint32_t *result)
{
int r;
- unsigned i;
+ unsigned int i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
- unsigned adjustment = 0;
+ unsigned int adjustment = 0;
/*
* We may have some uncommitted adjustments to add. This list
@@ -340,7 +343,7 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
dm_block_t b, int *result)
{
int r, adjustment = 0;
- unsigned i;
+ unsigned int i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
uint32_t rc;
@@ -486,6 +489,7 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
int r = sm_metadata_new_block_(sm, b);
+
if (r) {
DMERR_LIMIT("unable to allocate new metadata block");
return r;
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.h b/drivers/md/persistent-data/dm-space-map-metadata.h
index 64df923974d8..ca5c3e532627 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.h
+++ b/drivers/md/persistent-data/dm-space-map-metadata.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
index a015cd11f6e9..dab490353781 100644
--- a/drivers/md/persistent-data/dm-space-map.h
+++ b/drivers/md/persistent-data/dm-space-map.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 16643fc974e8..6dc016248baf 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -28,14 +29,15 @@ struct prefetch_set {
dm_block_t blocks[PREFETCH_SIZE];
};
-static unsigned prefetch_hash(dm_block_t b)
+static unsigned int prefetch_hash(dm_block_t b)
{
return hash_64(b, PREFETCH_BITS);
}
static void prefetch_wipe(struct prefetch_set *p)
{
- unsigned i;
+ unsigned int i;
+
for (i = 0; i < PREFETCH_SIZE; i++)
p->blocks[i] = PREFETCH_SENTINEL;
}
@@ -48,7 +50,7 @@ static void prefetch_init(struct prefetch_set *p)
static void prefetch_add(struct prefetch_set *p, dm_block_t b)
{
- unsigned h = prefetch_hash(b);
+ unsigned int h = prefetch_hash(b);
mutex_lock(&p->lock);
if (p->blocks[h] == PREFETCH_SENTINEL)
@@ -59,7 +61,7 @@ static void prefetch_add(struct prefetch_set *p, dm_block_t b)
static void prefetch_issue(struct prefetch_set *p, struct dm_block_manager *bm)
{
- unsigned i;
+ unsigned int i;
mutex_lock(&p->lock);
@@ -103,7 +105,7 @@ struct dm_transaction_manager {
static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
{
int r = 0;
- unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
+ unsigned int bucket = dm_hash_block(b, DM_HASH_MASK);
struct shadow_info *si;
spin_lock(&tm->lock);
@@ -123,7 +125,7 @@ static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
*/
static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
{
- unsigned bucket;
+ unsigned int bucket;
struct shadow_info *si;
si = kmalloc(sizeof(*si), GFP_NOIO);
@@ -393,11 +395,11 @@ void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t
EXPORT_SYMBOL_GPL(dm_tm_dec_range);
void dm_tm_with_runs(struct dm_transaction_manager *tm,
- const __le64 *value_le, unsigned count, dm_tm_run_fn fn)
+ const __le64 *value_le, unsigned int count, dm_tm_run_fn fn)
{
uint64_t b, begin, end;
bool in_run = false;
- unsigned i;
+ unsigned int i;
for (i = 0; i < count; i++, value_le++) {
b = le64_to_cpu(*value_le);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.h b/drivers/md/persistent-data/dm-transaction-manager.h
index 906c02ed0365..01f7e650118d 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.h
+++ b/drivers/md/persistent-data/dm-transaction-manager.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2011 Red Hat, Inc.
*
@@ -111,7 +112,7 @@ void dm_tm_dec_range(struct dm_transaction_manager *tm, dm_block_t b, dm_block_t
*/
typedef void (*dm_tm_run_fn)(struct dm_transaction_manager *, dm_block_t, dm_block_t);
void dm_tm_with_runs(struct dm_transaction_manager *tm,
- const __le64 *value_le, unsigned count, dm_tm_run_fn fn);
+ const __le64 *value_le, unsigned int count, dm_tm_run_fn fn);
int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b, uint32_t *result);
diff --git a/drivers/media/common/Kconfig b/drivers/media/common/Kconfig
index 852b7d92fbdd..b1bc58da27fc 100644
--- a/drivers/media/common/Kconfig
+++ b/drivers/media/common/Kconfig
@@ -14,6 +14,9 @@ config TTPCI_EEPROM
tristate
depends on I2C
+config UVC_COMMON
+ tristate
+
config VIDEO_CX2341X
tristate
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile
index d78a0df15478..3f17d696feb2 100644
--- a/drivers/media/common/Makefile
+++ b/drivers/media/common/Makefile
@@ -5,5 +5,6 @@ obj-y += b2c2/ siano/ v4l2-tpg/ videobuf2/
# (e. g. LC_ALL=C sort Makefile)
obj-$(CONFIG_CYPRESS_FIRMWARE) += cypress_firmware.o
obj-$(CONFIG_TTPCI_EEPROM) += ttpci-eeprom.o
+obj-$(CONFIG_UVC_COMMON) += uvc.o
obj-$(CONFIG_VIDEO_CX2341X) += cx2341x.o
obj-$(CONFIG_VIDEO_TVEEPROM) += tveeprom.o
diff --git a/drivers/media/common/uvc.c b/drivers/media/common/uvc.c
new file mode 100644
index 000000000000..9c0ba7a6c185
--- /dev/null
+++ b/drivers/media/common/uvc.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/usb/uvc.h>
+#include <linux/videodev2.h>
+
+/* ------------------------------------------------------------------------
+ * Video formats
+ */
+
+static const struct uvc_format_desc uvc_fmts[] = {
+ {
+ .guid = UVC_GUID_FORMAT_YUY2,
+ .fcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_YUY2_ISIGHT,
+ .fcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_NV12,
+ .fcc = V4L2_PIX_FMT_NV12,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_MJPEG,
+ .fcc = V4L2_PIX_FMT_MJPEG,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_YV12,
+ .fcc = V4L2_PIX_FMT_YVU420,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_I420,
+ .fcc = V4L2_PIX_FMT_YUV420,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_M420,
+ .fcc = V4L2_PIX_FMT_M420,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_UYVY,
+ .fcc = V4L2_PIX_FMT_UYVY,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y800,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y8,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_D3DFMT_L8,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_KSMEDIA_L8_IR,
+ .fcc = V4L2_PIX_FMT_GREY,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y10,
+ .fcc = V4L2_PIX_FMT_Y10,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y12,
+ .fcc = V4L2_PIX_FMT_Y12,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y16,
+ .fcc = V4L2_PIX_FMT_Y16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_BY8,
+ .fcc = V4L2_PIX_FMT_SBGGR8,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_BA81,
+ .fcc = V4L2_PIX_FMT_SBGGR8,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_GBRG,
+ .fcc = V4L2_PIX_FMT_SGBRG8,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_GRBG,
+ .fcc = V4L2_PIX_FMT_SGRBG8,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_RGGB,
+ .fcc = V4L2_PIX_FMT_SRGGB8,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_RGBP,
+ .fcc = V4L2_PIX_FMT_RGB565,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_BGR3,
+ .fcc = V4L2_PIX_FMT_BGR24,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_BGR4,
+ .fcc = V4L2_PIX_FMT_XBGR32,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_H264,
+ .fcc = V4L2_PIX_FMT_H264,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_H265,
+ .fcc = V4L2_PIX_FMT_HEVC,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y8I,
+ .fcc = V4L2_PIX_FMT_Y8I,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Y12I,
+ .fcc = V4L2_PIX_FMT_Y12I,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_Z16,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_RW10,
+ .fcc = V4L2_PIX_FMT_SRGGB10P,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_BG16,
+ .fcc = V4L2_PIX_FMT_SBGGR16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_GB16,
+ .fcc = V4L2_PIX_FMT_SGBRG16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_RG16,
+ .fcc = V4L2_PIX_FMT_SRGGB16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_GR16,
+ .fcc = V4L2_PIX_FMT_SGRBG16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_INVZ,
+ .fcc = V4L2_PIX_FMT_Z16,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_INVI,
+ .fcc = V4L2_PIX_FMT_Y10,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_INZI,
+ .fcc = V4L2_PIX_FMT_INZI,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_CNF4,
+ .fcc = V4L2_PIX_FMT_CNF4,
+ },
+ {
+ .guid = UVC_GUID_FORMAT_HEVC,
+ .fcc = V4L2_PIX_FMT_HEVC,
+ },
+};
+
+const struct uvc_format_desc *uvc_format_by_guid(const u8 guid[16])
+{
+ unsigned int len = ARRAY_SIZE(uvc_fmts);
+ unsigned int i;
+
+ for (i = 0; i < len; ++i) {
+ if (memcmp(guid, uvc_fmts[i].guid, 16) == 0)
+ return &uvc_fmts[i];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(uvc_format_by_guid);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index fc3758a5bc1c..53e495223ea0 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -2149,8 +2149,6 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
if (ret)
return ret;
- q->streaming = 1;
-
/*
* Tell driver to start streaming provided sufficient buffers
* are available.
@@ -2161,12 +2159,13 @@ int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
goto unprepare;
}
+ q->streaming = 1;
+
dprintk(q, 3, "successful\n");
return 0;
unprepare:
call_void_qop(q, unprepare_streaming, q);
- q->streaming = 0;
return ret;
}
EXPORT_SYMBOL_GPL(vb2_core_streamon);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 5f1175f8b349..205d3cac425c 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -293,7 +293,7 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
return ret;
}
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = &buf->handler;
vma->vm_ops = &vb2_common_vm_ops;
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index 959b45beb1f3..a6c6d2fcaaa4 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -185,7 +185,7 @@ static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
/*
* Make sure that vm_areas for 2 buffers won't be merged together
*/
- vma->vm_flags |= VM_DONTEXPAND;
+ vm_flags_set(vma, VM_DONTEXPAND);
/*
* Use common vm_area operations to track buffer refcount.
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
index df6c94da2f6a..dfefe0d8aa95 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
@@ -195,6 +195,19 @@ static void cio2_bridge_init_swnode_names(struct cio2_sensor *sensor)
SWNODE_GRAPH_ENDPOINT_NAME_FMT, 0); /* And endpoint 0 */
}
+static void cio2_bridge_init_swnode_group(struct cio2_sensor *sensor)
+{
+ struct software_node *nodes = sensor->swnodes;
+
+ sensor->group[SWNODE_SENSOR_HID] = &nodes[SWNODE_SENSOR_HID];
+ sensor->group[SWNODE_SENSOR_PORT] = &nodes[SWNODE_SENSOR_PORT];
+ sensor->group[SWNODE_SENSOR_ENDPOINT] = &nodes[SWNODE_SENSOR_ENDPOINT];
+ sensor->group[SWNODE_CIO2_PORT] = &nodes[SWNODE_CIO2_PORT];
+ sensor->group[SWNODE_CIO2_ENDPOINT] = &nodes[SWNODE_CIO2_ENDPOINT];
+ if (sensor->ssdb.vcmtype)
+ sensor->group[SWNODE_VCM] = &nodes[SWNODE_VCM];
+}
+
static void cio2_bridge_create_connection_swnodes(struct cio2_bridge *bridge,
struct cio2_sensor *sensor)
{
@@ -219,6 +232,8 @@ static void cio2_bridge_create_connection_swnodes(struct cio2_bridge *bridge,
if (sensor->ssdb.vcmtype)
nodes[SWNODE_VCM] =
NODE_VCM(cio2_vcm_types[sensor->ssdb.vcmtype - 1]);
+
+ cio2_bridge_init_swnode_group(sensor);
}
static void cio2_bridge_instantiate_vcm_i2c_client(struct cio2_sensor *sensor)
@@ -252,7 +267,7 @@ static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge)
for (i = 0; i < bridge->n_sensors; i++) {
sensor = &bridge->sensors[i];
- software_node_unregister_nodes(sensor->swnodes);
+ software_node_unregister_node_group(sensor->group);
ACPI_FREE(sensor->pld);
acpi_dev_put(sensor->adev);
i2c_unregister_device(sensor->vcm_i2c_client);
@@ -263,7 +278,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
struct cio2_bridge *bridge,
struct pci_dev *cio2)
{
- struct fwnode_handle *fwnode;
+ struct fwnode_handle *fwnode, *primary;
struct cio2_sensor *sensor;
struct acpi_device *adev;
acpi_status status;
@@ -310,7 +325,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
cio2_bridge_create_fwnode_properties(sensor, bridge, cfg);
cio2_bridge_create_connection_swnodes(bridge, sensor);
- ret = software_node_register_nodes(sensor->swnodes);
+ ret = software_node_register_node_group(sensor->group);
if (ret)
goto err_free_pld;
@@ -322,7 +337,9 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
}
sensor->adev = acpi_dev_get(adev);
- adev->fwnode.secondary = fwnode;
+
+ primary = acpi_fwnode_handle(adev);
+ primary->secondary = fwnode;
cio2_bridge_instantiate_vcm_i2c_client(sensor);
@@ -335,7 +352,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
return 0;
err_free_swnodes:
- software_node_unregister_nodes(sensor->swnodes);
+ software_node_unregister_node_group(sensor->group);
err_free_pld:
ACPI_FREE(sensor->pld);
err_put_adev:
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.h b/drivers/media/pci/intel/ipu3/cio2-bridge.h
index 4418cbd08208..b93b749c65bd 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.h
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.h
@@ -117,8 +117,9 @@ struct cio2_sensor {
struct acpi_device *adev;
struct i2c_client *vcm_i2c_client;
- /* SWNODE_COUNT + 1 for terminating empty node */
- struct software_node swnodes[SWNODE_COUNT + 1];
+ /* SWNODE_COUNT + 1 for terminating NULL */
+ const struct software_node *group[SWNODE_COUNT + 1];
+ struct software_node swnodes[SWNODE_COUNT];
struct cio2_node_names node_names;
struct cio2_sensor_ssdb ssdb;
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 142d4c74017c..07d4dceb5e72 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -158,7 +158,7 @@ static int venus_boot_no_tz(struct venus_core *core, phys_addr_t mem_phys,
core->fw.mapped_mem_size = mem_size;
ret = iommu_map(iommu, VENUS_FW_START_ADDR, mem_phys, mem_size,
- IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV);
+ IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV, GFP_KERNEL);
if (ret) {
dev_err(dev, "could not map video firmware region\n");
return ret;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drv.c b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
index c260d318d298..5710152d6511 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
@@ -818,9 +818,9 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.wpf_count = 2,
.num_bru_inputs = 5,
}, {
- .version = VI6_IP_VERSION_MODEL_VSPD_V3U,
+ .version = VI6_IP_VERSION_MODEL_VSPD_GEN4,
.model = "VSP2-D",
- .gen = 3,
+ .gen = 4,
.features = VSP1_HAS_BRU | VSP1_HAS_EXT_DL,
.lif_count = 1,
.rpf_count = 5,
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_hgo.c b/drivers/media/platform/renesas/vsp1/vsp1_hgo.c
index bf3f981f93a1..e6492deb0a64 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_hgo.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_hgo.c
@@ -196,10 +196,10 @@ struct vsp1_hgo *vsp1_hgo_create(struct vsp1_device *vsp1)
/* Initialize the control handler. */
v4l2_ctrl_handler_init(&hgo->ctrls.handler,
- vsp1->info->gen == 3 ? 2 : 1);
+ vsp1->info->gen >= 3 ? 2 : 1);
hgo->ctrls.max_rgb = v4l2_ctrl_new_custom(&hgo->ctrls.handler,
&hgo_max_rgb_control, NULL);
- if (vsp1->info->gen == 3)
+ if (vsp1->info->gen >= 3)
hgo->ctrls.num_bins =
v4l2_ctrl_new_custom(&hgo->ctrls.handler,
&hgo_num_bins_control, NULL);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_lif.c b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
index 186a5730e1e3..0ab2e0c70474 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
@@ -114,6 +114,7 @@ static void lif_configure_stream(struct vsp1_entity *entity,
break;
case VI6_IP_VERSION_MODEL_VSPD_GEN3:
+ case VI6_IP_VERSION_MODEL_VSPD_GEN4:
default:
hbth = 0;
obth = 3000;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
index f72ac01c21ea..f8093ba9539e 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_pipe.c
@@ -146,6 +146,18 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGBX1010102, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB10_RGB10A2_A2RGB10,
+ VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_RGBA1010102, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB10_RGB10A2_A2RGB10,
+ VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
+ { V4L2_PIX_FMT_ARGB2101010, MEDIA_BUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB10_RGB10A2_A2RGB10,
+ VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1, false },
{ V4L2_PIX_FMT_UYVY, MEDIA_BUS_FMT_AYUV8_1X32,
VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
@@ -202,6 +214,12 @@ static const struct vsp1_format_info vsp1_video_formats[] = {
VI6_FMT_Y_U_V_444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
3, { 8, 8, 8 }, false, true, 1, 1, false },
+ { V4L2_PIX_FMT_Y210, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 2, 1, false },
+ { V4L2_PIX_FMT_Y212, MEDIA_BUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 2, 1, false },
};
/**
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_regs.h b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
index 8928f4c6bb55..d94343ae57a1 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
@@ -228,6 +228,28 @@
#define VI6_RPF_MULT_ALPHA_RATIO_MASK (0xff << 0)
#define VI6_RPF_MULT_ALPHA_RATIO_SHIFT 0
+#define VI6_RPF_EXT_INFMT0 0x0370
+#define VI6_RPF_EXT_INFMT0_F2B BIT(12)
+#define VI6_RPF_EXT_INFMT0_IPBD_Y_8 (0 << 8)
+#define VI6_RPF_EXT_INFMT0_IPBD_Y_10 (1 << 8)
+#define VI6_RPF_EXT_INFMT0_IPBD_Y_12 (2 << 8)
+#define VI6_RPF_EXT_INFMT0_IPBD_C_8 (0 << 4)
+#define VI6_RPF_EXT_INFMT0_IPBD_C_10 (1 << 4)
+#define VI6_RPF_EXT_INFMT0_IPBD_C_12 (2 << 4)
+#define VI6_RPF_EXT_INFMT0_BYPP_M1_RGB10 (3 << 0)
+
+#define VI6_RPF_EXT_INFMT1 0x0374
+#define VI6_RPF_EXT_INFMT1_PACK_CPOS(a, b, c, d) \
+ (((a) << 24) | ((b) << 16) | ((c) << 8) | ((d) << 0))
+
+#define VI6_RPF_EXT_INFMT2 0x0378
+#define VI6_RPF_EXT_INFMT2_PACK_CLEN(a, b, c, d) \
+ (((a) << 24) | ((b) << 16) | ((c) << 8) | ((d) << 0))
+
+#define VI6_RPF_BRDITH_CTRL 0x03e0
+#define VI6_RPF_BRDITH_CTRL_ODE BIT(8)
+#define VI6_RPF_BRDITH_CTRL_CBRM BIT(0)
+
/* -----------------------------------------------------------------------------
* WPF Control Registers
*/
@@ -766,7 +788,7 @@
#define VI6_IP_VERSION_MODEL_VSPD_V3 (0x18 << 8)
#define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8)
#define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8)
-#define VI6_IP_VERSION_MODEL_VSPD_V3U (0x1c << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_GEN4 (0x1c << 8)
/* RZ/G2L SoCs have no version register, So use 0x80 as the model version */
#define VI6_IP_VERSION_MODEL_VSPD_RZG2L (0x80 << 8)
@@ -782,6 +804,7 @@
#define VI6_IP_VERSION_SOC_M3N (0x04 << 0)
#define VI6_IP_VERSION_SOC_E3 (0x04 << 0)
#define VI6_IP_VERSION_SOC_V3U (0x05 << 0)
+#define VI6_IP_VERSION_SOC_V4H (0x06 << 0)
/* RZ/G2L SoCs have no version register, So use 0x80 for SoC Identification */
#define VI6_IP_VERSION_SOC_RZG2L (0x80 << 0)
@@ -845,6 +868,7 @@
#define VI6_FMT_XBXGXR_262626 0x21
#define VI6_FMT_ABGR_8888 0x22
#define VI6_FMT_XXRGB_88565 0x23
+#define VI6_FMT_RGB10_RGB10A2_A2RGB10 0x30
#define VI6_FMT_Y_UV_444 0x40
#define VI6_FMT_Y_UV_422 0x41
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
index 75083cb234fe..3b17f5fa4067 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_rpf.c
@@ -109,6 +109,58 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
vsp1_rpf_write(rpf, dlb, VI6_RPF_INFMT, infmt);
vsp1_rpf_write(rpf, dlb, VI6_RPF_DSWAP, fmtinfo->swap);
+ if (entity->vsp1->info->gen == 4) {
+ u32 ext_infmt0;
+ u32 ext_infmt1;
+ u32 ext_infmt2;
+
+ switch (fmtinfo->fourcc) {
+ case V4L2_PIX_FMT_RGBX1010102:
+ ext_infmt0 = VI6_RPF_EXT_INFMT0_BYPP_M1_RGB10;
+ ext_infmt1 = VI6_RPF_EXT_INFMT1_PACK_CPOS(0, 10, 20, 0);
+ ext_infmt2 = VI6_RPF_EXT_INFMT2_PACK_CLEN(10, 10, 10, 0);
+ break;
+
+ case V4L2_PIX_FMT_RGBA1010102:
+ ext_infmt0 = VI6_RPF_EXT_INFMT0_BYPP_M1_RGB10;
+ ext_infmt1 = VI6_RPF_EXT_INFMT1_PACK_CPOS(0, 10, 20, 30);
+ ext_infmt2 = VI6_RPF_EXT_INFMT2_PACK_CLEN(10, 10, 10, 2);
+ break;
+
+ case V4L2_PIX_FMT_ARGB2101010:
+ ext_infmt0 = VI6_RPF_EXT_INFMT0_BYPP_M1_RGB10;
+ ext_infmt1 = VI6_RPF_EXT_INFMT1_PACK_CPOS(2, 12, 22, 0);
+ ext_infmt2 = VI6_RPF_EXT_INFMT2_PACK_CLEN(10, 10, 10, 2);
+ break;
+
+ case V4L2_PIX_FMT_Y210:
+ ext_infmt0 = VI6_RPF_EXT_INFMT0_F2B |
+ VI6_RPF_EXT_INFMT0_IPBD_Y_10 |
+ VI6_RPF_EXT_INFMT0_IPBD_C_10;
+ ext_infmt1 = 0x0;
+ ext_infmt2 = 0x0;
+ break;
+
+ case V4L2_PIX_FMT_Y212:
+ ext_infmt0 = VI6_RPF_EXT_INFMT0_F2B |
+ VI6_RPF_EXT_INFMT0_IPBD_Y_12 |
+ VI6_RPF_EXT_INFMT0_IPBD_C_12;
+ ext_infmt1 = 0x0;
+ ext_infmt2 = 0x0;
+ break;
+
+ default:
+ ext_infmt0 = 0;
+ ext_infmt1 = 0;
+ ext_infmt2 = 0;
+ break;
+ }
+
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_EXT_INFMT0, ext_infmt0);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_EXT_INFMT1, ext_infmt1);
+ vsp1_rpf_write(rpf, dlb, VI6_RPF_EXT_INFMT2, ext_infmt2);
+ }
+
/* Output location. */
if (pipe->brx) {
const struct v4l2_rect *compose;
@@ -133,18 +185,18 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
* a fixed alpha value set through the V4L2_CID_ALPHA_COMPONENT control
* otherwise.
*
- * The Gen3 RPF has extended alpha capability and can both multiply the
+ * The Gen3+ RPF has extended alpha capability and can both multiply the
* alpha channel by a fixed global alpha value, and multiply the pixel
* components to convert the input to premultiplied alpha.
*
* As alpha premultiplication is available in the BRx for both Gen2 and
- * Gen3 we handle it there and use the Gen3 alpha multiplier for global
+ * Gen3+ we handle it there and use the Gen3 alpha multiplier for global
* alpha multiplication only. This however prevents conversion to
* premultiplied alpha if no BRx is present in the pipeline. If that use
* case turns out to be useful we will revisit the implementation (for
* Gen3 only).
*
- * We enable alpha multiplication on Gen3 using the fixed alpha value
+ * We enable alpha multiplication on Gen3+ using the fixed alpha value
* set through the V4L2_CID_ALPHA_COMPONENT control when the input
* contains an alpha channel. On Gen2 the global alpha is ignored in
* that case.
@@ -155,7 +207,7 @@ static void rpf_configure_stream(struct vsp1_entity *entity,
(fmtinfo->alpha ? VI6_RPF_ALPH_SEL_ASEL_PACKED
: VI6_RPF_ALPH_SEL_ASEL_FIXED));
- if (entity->vsp1->info->gen == 3) {
+ if (entity->vsp1->info->gen >= 3) {
u32 mult;
if (fmtinfo->alpha) {
@@ -301,10 +353,10 @@ static void rpf_configure_partition(struct vsp1_entity *entity,
}
/*
- * On Gen3 hardware the SPUVS bit has no effect on 3-planar
+ * On Gen3+ hardware the SPUVS bit has no effect on 3-planar
* formats. Swap the U and V planes manually in that case.
*/
- if (vsp1->info->gen == 3 && format->num_planes == 3 &&
+ if (vsp1->info->gen >= 3 && format->num_planes == 3 &&
fmtinfo->swap_uv)
swap(mem.addr[1], mem.addr[2]);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index 9d24647c8f32..544012fd1fe9 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -267,10 +267,10 @@ static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
div_size = format->width;
/*
- * Only Gen3 hardware requires image partitioning, Gen2 will operate
+ * Only Gen3+ hardware requires image partitioning, Gen2 will operate
* with a single partition that covers the whole output.
*/
- if (vsp1->info->gen == 3) {
+ if (vsp1->info->gen >= 3) {
list_for_each_entry(entity, &pipe->entities, list_pipe) {
unsigned int entity_max;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
index 94e91d7bb56c..d0074ca00920 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_wpf.c
@@ -512,10 +512,10 @@ static void wpf_configure_partition(struct vsp1_entity *entity,
}
/*
- * On Gen3 hardware the SPUVS bit has no effect on 3-planar
+ * On Gen3+ hardware the SPUVS bit has no effect on 3-planar
* formats. Swap the U and V planes manually in that case.
*/
- if (vsp1->info->gen == 3 && format->num_planes == 3 &&
+ if (vsp1->info->gen >= 3 && format->num_planes == 3 &&
fmtinfo->swap_uv)
swap(mem.addr[1], mem.addr[2]);
diff --git a/drivers/media/platform/samsung/s3c-camif/Kconfig b/drivers/media/platform/samsung/s3c-camif/Kconfig
index 8cb8d1ac3edc..f359f6382fff 100644
--- a/drivers/media/platform/samsung/s3c-camif/Kconfig
+++ b/drivers/media/platform/samsung/s3c-camif/Kconfig
@@ -1,15 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
config VIDEO_S3C_CAMIF
- tristate "Samsung S3C24XX/S3C64XX SoC Camera Interface driver"
+ tristate "Samsung 3C64XX SoC Camera Interface driver"
depends on V4L_PLATFORM_DRIVERS
depends on VIDEO_DEV && I2C && PM
- depends on ARCH_S3C64XX || PLAT_S3C24XX || COMPILE_TEST
+ depends on ARCH_S3C64XX || COMPILE_TEST
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select VIDEOBUF2_DMA_CONTIG
help
- This is a v4l2 driver for s3c24xx and s3c64xx SoC series camera
- host interface (CAMIF).
+ This is a v4l2 driver for s3c64xx SoC series camera host interface
+ (CAMIF).
To compile this driver as a module, choose M here: the module
will be called s3c-camif.
diff --git a/drivers/media/platform/ti/davinci/Kconfig b/drivers/media/platform/ti/davinci/Kconfig
index 96d4bed7fe9e..542a602e66be 100644
--- a/drivers/media/platform/ti/davinci/Kconfig
+++ b/drivers/media/platform/ti/davinci/Kconfig
@@ -31,19 +31,3 @@ config VIDEO_DAVINCI_VPIF_CAPTURE
To compile this driver as a module, choose M here. There will
be two modules called vpif.ko and vpif_capture.ko
-
-config VIDEO_DAVINCI_VPBE_DISPLAY
- tristate "TI DaVinci VPBE V4L2-Display driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV
- depends on ARCH_DAVINCI || COMPILE_TEST
- depends on I2C
- select VIDEOBUF2_DMA_CONTIG
- help
- Enables Davinci VPBE module used for display devices.
- This module is used for display on TI DM644x/DM365/DM355
- based display devices.
-
- To compile this driver as a module, choose M here. There will
- be five modules created called vpss.ko, vpbe.ko, vpbe_osd.ko,
- vpbe_venc.ko and vpbe_display.ko
diff --git a/drivers/media/platform/ti/davinci/Makefile b/drivers/media/platform/ti/davinci/Makefile
index b20a91653162..512f03369bae 100644
--- a/drivers/media/platform/ti/davinci/Makefile
+++ b/drivers/media/platform/ti/davinci/Makefile
@@ -7,6 +7,3 @@
obj-$(CONFIG_VIDEO_DAVINCI_VPIF_DISPLAY) += vpif.o vpif_display.o
#VPIF Capture driver
obj-$(CONFIG_VIDEO_DAVINCI_VPIF_CAPTURE) += vpif.o vpif_capture.o
-
-obj-$(CONFIG_VIDEO_DAVINCI_VPBE_DISPLAY) += vpss.o vpbe.o vpbe_osd.o \
- vpbe_venc.o vpbe_display.o
diff --git a/drivers/media/platform/ti/davinci/vpbe.c b/drivers/media/platform/ti/davinci/vpbe.c
deleted file mode 100644
index 509ecc84624e..000000000000
--- a/drivers/media/platform/ti/davinci/vpbe.c
+++ /dev/null
@@ -1,840 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2010 Texas Instruments Inc
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/string.h>
-#include <linux/wait.h>
-#include <linux/time.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-
-#include <media/v4l2-device.h>
-#include <media/davinci/vpbe_types.h>
-#include <media/davinci/vpbe.h>
-#include <media/davinci/vpss.h>
-#include <media/davinci/vpbe_venc.h>
-
-#define VPBE_DEFAULT_OUTPUT "Composite"
-#define VPBE_DEFAULT_MODE "ntsc"
-
-static char *def_output = VPBE_DEFAULT_OUTPUT;
-static char *def_mode = VPBE_DEFAULT_MODE;
-static int debug;
-
-module_param(def_output, charp, S_IRUGO);
-module_param(def_mode, charp, S_IRUGO);
-module_param(debug, int, 0644);
-
-MODULE_PARM_DESC(def_output, "vpbe output name (default:Composite)");
-MODULE_PARM_DESC(def_mode, "vpbe output mode name (default:ntsc");
-MODULE_PARM_DESC(debug, "Debug level 0-1");
-
-MODULE_DESCRIPTION("TI DMXXX VPBE Display controller");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Texas Instruments");
-
-/**
- * vpbe_current_encoder_info - Get config info for current encoder
- * @vpbe_dev: vpbe device ptr
- *
- * Return ptr to current encoder config info
- */
-static struct encoder_config_info*
-vpbe_current_encoder_info(struct vpbe_device *vpbe_dev)
-{
- struct vpbe_config *cfg = vpbe_dev->cfg;
- int index = vpbe_dev->current_sd_index;
-
- return ((index == 0) ? &cfg->venc :
- &cfg->ext_encoders[index-1]);
-}
-
-/**
- * vpbe_find_encoder_sd_index - Given a name find encoder sd index
- *
- * @cfg: ptr to vpbe cfg
- * @index: index used by application
- *
- * Return sd index of the encoder
- */
-static int vpbe_find_encoder_sd_index(struct vpbe_config *cfg,
- int index)
-{
- char *encoder_name = cfg->outputs[index].subdev_name;
- int i;
-
- /* Venc is always first */
- if (!strcmp(encoder_name, cfg->venc.module_name))
- return 0;
-
- for (i = 0; i < cfg->num_ext_encoders; i++) {
- if (!strcmp(encoder_name,
- cfg->ext_encoders[i].module_name))
- return i+1;
- }
-
- return -EINVAL;
-}
-
-/**
- * vpbe_enum_outputs - enumerate outputs
- * @vpbe_dev: vpbe device ptr
- * @output: ptr to v4l2_output structure
- *
- * Enumerates the outputs available at the vpbe display
- * returns the status, -EINVAL if end of output list
- */
-static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
- struct v4l2_output *output)
-{
- struct vpbe_config *cfg = vpbe_dev->cfg;
- unsigned int temp_index = output->index;
-
- if (temp_index >= cfg->num_outputs)
- return -EINVAL;
-
- *output = cfg->outputs[temp_index].output;
- output->index = temp_index;
-
- return 0;
-}
-
-static int vpbe_get_mode_info(struct vpbe_device *vpbe_dev, char *mode,
- int output_index)
-{
- struct vpbe_config *cfg = vpbe_dev->cfg;
- struct vpbe_enc_mode_info var;
- int curr_output = output_index;
- int i;
-
- if (!mode)
- return -EINVAL;
-
- for (i = 0; i < cfg->outputs[curr_output].num_modes; i++) {
- var = cfg->outputs[curr_output].modes[i];
- if (!strcmp(mode, var.name)) {
- vpbe_dev->current_timings = var;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-static int vpbe_get_current_mode_info(struct vpbe_device *vpbe_dev,
- struct vpbe_enc_mode_info *mode_info)
-{
- if (!mode_info)
- return -EINVAL;
-
- *mode_info = vpbe_dev->current_timings;
-
- return 0;
-}
-
-/* Get std by std id */
-static int vpbe_get_std_info(struct vpbe_device *vpbe_dev,
- v4l2_std_id std_id)
-{
- struct vpbe_config *cfg = vpbe_dev->cfg;
- struct vpbe_enc_mode_info var;
- int curr_output = vpbe_dev->current_out_index;
- int i;
-
- for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
- var = cfg->outputs[curr_output].modes[i];
- if ((var.timings_type & VPBE_ENC_STD) &&
- (var.std_id & std_id)) {
- vpbe_dev->current_timings = var;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-static int vpbe_get_std_info_by_name(struct vpbe_device *vpbe_dev,
- char *std_name)
-{
- struct vpbe_config *cfg = vpbe_dev->cfg;
- struct vpbe_enc_mode_info var;
- int curr_output = vpbe_dev->current_out_index;
- int i;
-
- for (i = 0; i < vpbe_dev->cfg->outputs[curr_output].num_modes; i++) {
- var = cfg->outputs[curr_output].modes[i];
- if (!strcmp(var.name, std_name)) {
- vpbe_dev->current_timings = var;
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-/**
- * vpbe_set_output - Set output
- * @vpbe_dev: vpbe device ptr
- * @index: index of output
- *
- * Set vpbe output to the output specified by the index
- */
-static int vpbe_set_output(struct vpbe_device *vpbe_dev, int index)
-{
- struct encoder_config_info *curr_enc_info =
- vpbe_current_encoder_info(vpbe_dev);
- struct vpbe_config *cfg = vpbe_dev->cfg;
- struct venc_platform_data *venc_device = vpbe_dev->venc_device;
- int enc_out_index;
- int sd_index;
- int ret;
-
- if (index >= cfg->num_outputs)
- return -EINVAL;
-
- mutex_lock(&vpbe_dev->lock);
-
- sd_index = vpbe_dev->current_sd_index;
- enc_out_index = cfg->outputs[index].output.index;
- /*
- * Currently we switch the encoder based on output selected
- * by the application. If media controller is implemented later
- * there is will be an API added to setup_link between venc
- * and external encoder. So in that case below comparison always
- * match and encoder will not be switched. But if application
- * chose not to use media controller, then this provides current
- * way of switching encoder at the venc output.
- */
- if (strcmp(curr_enc_info->module_name,
- cfg->outputs[index].subdev_name)) {
- /* Need to switch the encoder at the output */
- sd_index = vpbe_find_encoder_sd_index(cfg, index);
- if (sd_index < 0) {
- ret = -EINVAL;
- goto unlock;
- }
-
- ret = venc_device->setup_if_config(cfg->outputs[index].if_params);
- if (ret)
- goto unlock;
- }
-
- /* Set output at the encoder */
- ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
- s_routing, 0, enc_out_index, 0);
- if (ret)
- goto unlock;
-
- /*
- * It is assumed that venc or external encoder will set a default
- * mode in the sub device. For external encoder or LCD pannel output,
- * we also need to set up the lcd port for the required mode. So setup
- * the lcd port for the default mode that is configured in the board
- * arch/arm/mach-davinci/board-dm355-evm.setup file for the external
- * encoder.
- */
- ret = vpbe_get_mode_info(vpbe_dev,
- cfg->outputs[index].default_mode, index);
- if (!ret) {
- struct osd_state *osd_device = vpbe_dev->osd_device;
-
- osd_device->ops.set_left_margin(osd_device,
- vpbe_dev->current_timings.left_margin);
- osd_device->ops.set_top_margin(osd_device,
- vpbe_dev->current_timings.upper_margin);
- vpbe_dev->current_sd_index = sd_index;
- vpbe_dev->current_out_index = index;
- }
-unlock:
- mutex_unlock(&vpbe_dev->lock);
- return ret;
-}
-
-static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
-{
- struct vpbe_config *cfg = vpbe_dev->cfg;
- int i;
-
- for (i = 0; i < cfg->num_outputs; i++) {
- if (!strcmp(def_output,
- cfg->outputs[i].output.name)) {
- int ret = vpbe_set_output(vpbe_dev, i);
-
- if (!ret)
- vpbe_dev->current_out_index = i;
- return ret;
- }
- }
- return 0;
-}
-
-/**
- * vpbe_get_output - Get output
- * @vpbe_dev: vpbe device ptr
- *
- * return current vpbe output to the index
- */
-static unsigned int vpbe_get_output(struct vpbe_device *vpbe_dev)
-{
- return vpbe_dev->current_out_index;
-}
-
-/*
- * vpbe_s_dv_timings - Set the given preset timings in the encoder
- *
- * Sets the timings if supported by the current encoder. Return the status.
- * 0 - success & -EINVAL on error
- */
-static int vpbe_s_dv_timings(struct vpbe_device *vpbe_dev,
- struct v4l2_dv_timings *dv_timings)
-{
- struct vpbe_config *cfg = vpbe_dev->cfg;
- int out_index = vpbe_dev->current_out_index;
- struct vpbe_output *output = &cfg->outputs[out_index];
- int sd_index = vpbe_dev->current_sd_index;
- int ret, i;
-
-
- if (!(cfg->outputs[out_index].output.capabilities &
- V4L2_OUT_CAP_DV_TIMINGS))
- return -ENODATA;
-
- for (i = 0; i < output->num_modes; i++) {
- if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS &&
- !memcmp(&output->modes[i].dv_timings,
- dv_timings, sizeof(*dv_timings)))
- break;
- }
- if (i >= output->num_modes)
- return -EINVAL;
- vpbe_dev->current_timings = output->modes[i];
- mutex_lock(&vpbe_dev->lock);
-
- ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
- s_dv_timings, dv_timings);
- if (!ret && vpbe_dev->amp) {
- /* Call amplifier subdevice */
- ret = v4l2_subdev_call(vpbe_dev->amp, video,
- s_dv_timings, dv_timings);
- }
- /* set the lcd controller output for the given mode */
- if (!ret) {
- struct osd_state *osd_device = vpbe_dev->osd_device;
-
- osd_device->ops.set_left_margin(osd_device,
- vpbe_dev->current_timings.left_margin);
- osd_device->ops.set_top_margin(osd_device,
- vpbe_dev->current_timings.upper_margin);
- }
- mutex_unlock(&vpbe_dev->lock);
-
- return ret;
-}
-
-/*
- * vpbe_g_dv_timings - Get the timings in the current encoder
- *
- * Get the timings in the current encoder. Return the status. 0 - success
- * -EINVAL on error
- */
-static int vpbe_g_dv_timings(struct vpbe_device *vpbe_dev,
- struct v4l2_dv_timings *dv_timings)
-{
- struct vpbe_config *cfg = vpbe_dev->cfg;
- int out_index = vpbe_dev->current_out_index;
-
- if (!(cfg->outputs[out_index].output.capabilities &
- V4L2_OUT_CAP_DV_TIMINGS))
- return -ENODATA;
-
- if (vpbe_dev->current_timings.timings_type &
- VPBE_ENC_DV_TIMINGS) {
- *dv_timings = vpbe_dev->current_timings.dv_timings;
- return 0;
- }
-
- return -EINVAL;
-}
-
-/*
- * vpbe_enum_dv_timings - Enumerate the dv timings in the current encoder
- *
- * Get the timings in the current encoder. Return the status. 0 - success
- * -EINVAL on error
- */
-static int vpbe_enum_dv_timings(struct vpbe_device *vpbe_dev,
- struct v4l2_enum_dv_timings *timings)
-{
- struct vpbe_config *cfg = vpbe_dev->cfg;
- int out_index = vpbe_dev->current_out_index;
- struct vpbe_output *output = &cfg->outputs[out_index];
- int j = 0;
- int i;
-
- if (!(output->output.capabilities & V4L2_OUT_CAP_DV_TIMINGS))
- return -ENODATA;
-
- for (i = 0; i < output->num_modes; i++) {
- if (output->modes[i].timings_type == VPBE_ENC_DV_TIMINGS) {
- if (j == timings->index)
- break;
- j++;
- }
- }
-
- if (i == output->num_modes)
- return -EINVAL;
- timings->timings = output->modes[i].dv_timings;
- return 0;
-}
-
-/*
- * vpbe_s_std - Set the given standard in the encoder
- *
- * Sets the standard if supported by the current encoder. Return the status.
- * 0 - success & -EINVAL on error
- */
-static int vpbe_s_std(struct vpbe_device *vpbe_dev, v4l2_std_id std_id)
-{
- struct vpbe_config *cfg = vpbe_dev->cfg;
- int out_index = vpbe_dev->current_out_index;
- int sd_index = vpbe_dev->current_sd_index;
- int ret;
-
- if (!(cfg->outputs[out_index].output.capabilities &
- V4L2_OUT_CAP_STD))
- return -ENODATA;
-
- ret = vpbe_get_std_info(vpbe_dev, std_id);
- if (ret)
- return ret;
-
- mutex_lock(&vpbe_dev->lock);
-
- ret = v4l2_subdev_call(vpbe_dev->encoders[sd_index], video,
- s_std_output, std_id);
- /* set the lcd controller output for the given mode */
- if (!ret) {
- struct osd_state *osd_device = vpbe_dev->osd_device;
-
- osd_device->ops.set_left_margin(osd_device,
- vpbe_dev->current_timings.left_margin);
- osd_device->ops.set_top_margin(osd_device,
- vpbe_dev->current_timings.upper_margin);
- }
- mutex_unlock(&vpbe_dev->lock);
-
- return ret;
-}
-
-/*
- * vpbe_g_std - Get the standard in the current encoder
- *
- * Get the standard in the current encoder. Return the status. 0 - success
- * -EINVAL on error
- */
-static int vpbe_g_std(struct vpbe_device *vpbe_dev, v4l2_std_id *std_id)
-{
- struct vpbe_enc_mode_info *cur_timings = &vpbe_dev->current_timings;
- struct vpbe_config *cfg = vpbe_dev->cfg;
- int out_index = vpbe_dev->current_out_index;
-
- if (!(cfg->outputs[out_index].output.capabilities & V4L2_OUT_CAP_STD))
- return -ENODATA;
-
- if (cur_timings->timings_type & VPBE_ENC_STD) {
- *std_id = cur_timings->std_id;
- return 0;
- }
-
- return -EINVAL;
-}
-
-/*
- * vpbe_set_mode - Set mode in the current encoder using mode info
- *
- * Use the mode string to decide what timings to set in the encoder
- * This is typically useful when fbset command is used to change the current
- * timings by specifying a string to indicate the timings.
- */
-static int vpbe_set_mode(struct vpbe_device *vpbe_dev,
- struct vpbe_enc_mode_info *mode_info)
-{
- struct vpbe_enc_mode_info *preset_mode = NULL;
- struct vpbe_config *cfg = vpbe_dev->cfg;
- struct v4l2_dv_timings dv_timings;
- struct osd_state *osd_device;
- int out_index = vpbe_dev->current_out_index;
- int i;
-
- if (!mode_info || !mode_info->name)
- return -EINVAL;
-
- for (i = 0; i < cfg->outputs[out_index].num_modes; i++) {
- if (!strcmp(mode_info->name,
- cfg->outputs[out_index].modes[i].name)) {
- preset_mode = &cfg->outputs[out_index].modes[i];
- /*
- * it may be one of the 3 timings type. Check and
- * invoke right API
- */
- if (preset_mode->timings_type & VPBE_ENC_STD)
- return vpbe_s_std(vpbe_dev,
- preset_mode->std_id);
- if (preset_mode->timings_type &
- VPBE_ENC_DV_TIMINGS) {
- dv_timings =
- preset_mode->dv_timings;
- return vpbe_s_dv_timings(vpbe_dev, &dv_timings);
- }
- }
- }
-
- /* Only custom timing should reach here */
- if (!preset_mode)
- return -EINVAL;
-
- mutex_lock(&vpbe_dev->lock);
-
- osd_device = vpbe_dev->osd_device;
- vpbe_dev->current_timings = *preset_mode;
- osd_device->ops.set_left_margin(osd_device,
- vpbe_dev->current_timings.left_margin);
- osd_device->ops.set_top_margin(osd_device,
- vpbe_dev->current_timings.upper_margin);
-
- mutex_unlock(&vpbe_dev->lock);
- return 0;
-}
-
-static int vpbe_set_default_mode(struct vpbe_device *vpbe_dev)
-{
- int ret;
-
- ret = vpbe_get_std_info_by_name(vpbe_dev, def_mode);
- if (ret)
- return ret;
-
- /* set the default mode in the encoder */
- return vpbe_set_mode(vpbe_dev, &vpbe_dev->current_timings);
-}
-
-static int platform_device_get(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct vpbe_device *vpbe_dev = data;
-
- if (strstr(pdev->name, "vpbe-osd"))
- vpbe_dev->osd_device = platform_get_drvdata(pdev);
- if (strstr(pdev->name, "vpbe-venc"))
- vpbe_dev->venc_device = dev_get_platdata(&pdev->dev);
-
- return 0;
-}
-
-/**
- * vpbe_initialize() - Initialize the vpbe display controller
- * @dev: Master and slave device ptr
- * @vpbe_dev: vpbe device ptr
- *
- * Master frame buffer device drivers calls this to initialize vpbe
- * display controller. This will then registers v4l2 device and the sub
- * devices and sets a current encoder sub device for display. v4l2 display
- * device driver is the master and frame buffer display device driver is
- * the slave. Frame buffer display driver checks the initialized during
- * probe and exit if not initialized. Returns status.
- */
-static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
-{
- struct encoder_config_info *enc_info;
- struct amp_config_info *amp_info;
- struct v4l2_subdev **enc_subdev;
- struct osd_state *osd_device;
- struct i2c_adapter *i2c_adap;
- int num_encoders;
- int ret = 0;
- int err;
- int i;
-
- /*
- * v4l2 abd FBDev frame buffer devices will get the vpbe_dev pointer
- * from the platform device by iteration of platform drivers and
- * matching with device name
- */
- if (!vpbe_dev || !dev) {
- printk(KERN_ERR "Null device pointers.\n");
- return -ENODEV;
- }
-
- if (vpbe_dev->initialized)
- return 0;
-
- mutex_lock(&vpbe_dev->lock);
-
- if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
- /* We have dac clock available for platform */
- vpbe_dev->dac_clk = clk_get(vpbe_dev->pdev, "vpss_dac");
- if (IS_ERR(vpbe_dev->dac_clk)) {
- ret = PTR_ERR(vpbe_dev->dac_clk);
- goto fail_mutex_unlock;
- }
- if (clk_prepare_enable(vpbe_dev->dac_clk)) {
- ret = -ENODEV;
- clk_put(vpbe_dev->dac_clk);
- goto fail_mutex_unlock;
- }
- }
-
- /* first enable vpss clocks */
- vpss_enable_clock(VPSS_VPBE_CLOCK, 1);
-
- /* First register a v4l2 device */
- ret = v4l2_device_register(dev, &vpbe_dev->v4l2_dev);
- if (ret) {
- v4l2_err(dev->driver,
- "Unable to register v4l2 device.\n");
- goto fail_clk_put;
- }
- v4l2_info(&vpbe_dev->v4l2_dev, "vpbe v4l2 device registered\n");
-
- err = bus_for_each_dev(&platform_bus_type, NULL, vpbe_dev,
- platform_device_get);
- if (err < 0) {
- ret = err;
- goto fail_dev_unregister;
- }
-
- vpbe_dev->venc = venc_sub_dev_init(&vpbe_dev->v4l2_dev,
- vpbe_dev->cfg->venc.module_name);
- /* register venc sub device */
- if (!vpbe_dev->venc) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "vpbe unable to init venc sub device\n");
- ret = -ENODEV;
- goto fail_dev_unregister;
- }
- /* initialize osd device */
- osd_device = vpbe_dev->osd_device;
- if (osd_device->ops.initialize) {
- err = osd_device->ops.initialize(osd_device);
- if (err) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "unable to initialize the OSD device");
- ret = -ENOMEM;
- goto fail_dev_unregister;
- }
- }
-
- /*
- * Register any external encoders that are configured. At index 0 we
- * store venc sd index.
- */
- num_encoders = vpbe_dev->cfg->num_ext_encoders + 1;
- vpbe_dev->encoders = kmalloc_array(num_encoders,
- sizeof(*vpbe_dev->encoders),
- GFP_KERNEL);
- if (!vpbe_dev->encoders) {
- ret = -ENOMEM;
- goto fail_dev_unregister;
- }
-
- i2c_adap = i2c_get_adapter(vpbe_dev->cfg->i2c_adapter_id);
- for (i = 0; i < (vpbe_dev->cfg->num_ext_encoders + 1); i++) {
- if (i == 0) {
- /* venc is at index 0 */
- enc_subdev = &vpbe_dev->encoders[i];
- *enc_subdev = vpbe_dev->venc;
- continue;
- }
- enc_info = &vpbe_dev->cfg->ext_encoders[i];
- if (enc_info->is_i2c) {
- enc_subdev = &vpbe_dev->encoders[i];
- *enc_subdev = v4l2_i2c_new_subdev_board(
- &vpbe_dev->v4l2_dev, i2c_adap,
- &enc_info->board_info, NULL);
- if (*enc_subdev)
- v4l2_info(&vpbe_dev->v4l2_dev,
- "v4l2 sub device %s registered\n",
- enc_info->module_name);
- else {
- v4l2_err(&vpbe_dev->v4l2_dev, "encoder %s failed to register",
- enc_info->module_name);
- ret = -ENODEV;
- goto fail_kfree_encoders;
- }
- } else
- v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c encoders currently not supported");
- }
- /* Add amplifier subdevice for dm365 */
- if ((strcmp(vpbe_dev->cfg->module_name, "dm365-vpbe-display") == 0) &&
- vpbe_dev->cfg->amp) {
- amp_info = vpbe_dev->cfg->amp;
- if (amp_info->is_i2c) {
- vpbe_dev->amp = v4l2_i2c_new_subdev_board(
- &vpbe_dev->v4l2_dev, i2c_adap,
- &amp_info->board_info, NULL);
- if (!vpbe_dev->amp) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "amplifier %s failed to register",
- amp_info->module_name);
- ret = -ENODEV;
- goto fail_kfree_encoders;
- }
- v4l2_info(&vpbe_dev->v4l2_dev,
- "v4l2 sub device %s registered\n",
- amp_info->module_name);
- } else {
- vpbe_dev->amp = NULL;
- v4l2_warn(&vpbe_dev->v4l2_dev, "non-i2c amplifiers currently not supported");
- }
- } else {
- vpbe_dev->amp = NULL;
- }
-
- /* set the current encoder and output to that of venc by default */
- vpbe_dev->current_sd_index = 0;
- vpbe_dev->current_out_index = 0;
-
- mutex_unlock(&vpbe_dev->lock);
-
- printk(KERN_NOTICE "Setting default output to %s\n", def_output);
- ret = vpbe_set_default_output(vpbe_dev);
- if (ret) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
- def_output);
- goto fail_kfree_amp;
- }
-
- printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
- ret = vpbe_set_default_mode(vpbe_dev);
- if (ret) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
- def_mode);
- goto fail_kfree_amp;
- }
- vpbe_dev->initialized = 1;
- /* TBD handling of bootargs for default output and mode */
- return 0;
-
-fail_kfree_amp:
- mutex_lock(&vpbe_dev->lock);
- kfree(vpbe_dev->amp);
-fail_kfree_encoders:
- kfree(vpbe_dev->encoders);
-fail_dev_unregister:
- v4l2_device_unregister(&vpbe_dev->v4l2_dev);
-fail_clk_put:
- if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
- clk_disable_unprepare(vpbe_dev->dac_clk);
- clk_put(vpbe_dev->dac_clk);
- }
-fail_mutex_unlock:
- mutex_unlock(&vpbe_dev->lock);
- return ret;
-}
-
-/**
- * vpbe_deinitialize() - de-initialize the vpbe display controller
- * @dev: Master and slave device ptr
- * @vpbe_dev: vpbe device ptr
- *
- * vpbe_master and slave frame buffer devices calls this to de-initialize
- * the display controller. It is called when master and slave device
- * driver modules are removed and no longer requires the display controller.
- */
-static void vpbe_deinitialize(struct device *dev, struct vpbe_device *vpbe_dev)
-{
- v4l2_device_unregister(&vpbe_dev->v4l2_dev);
- if (strcmp(vpbe_dev->cfg->module_name, "dm644x-vpbe-display") != 0) {
- clk_disable_unprepare(vpbe_dev->dac_clk);
- clk_put(vpbe_dev->dac_clk);
- }
-
- kfree(vpbe_dev->amp);
- kfree(vpbe_dev->encoders);
- vpbe_dev->initialized = 0;
- /* disable vpss clocks */
- vpss_enable_clock(VPSS_VPBE_CLOCK, 0);
-}
-
-static const struct vpbe_device_ops vpbe_dev_ops = {
- .enum_outputs = vpbe_enum_outputs,
- .set_output = vpbe_set_output,
- .get_output = vpbe_get_output,
- .s_dv_timings = vpbe_s_dv_timings,
- .g_dv_timings = vpbe_g_dv_timings,
- .enum_dv_timings = vpbe_enum_dv_timings,
- .s_std = vpbe_s_std,
- .g_std = vpbe_g_std,
- .initialize = vpbe_initialize,
- .deinitialize = vpbe_deinitialize,
- .get_mode_info = vpbe_get_current_mode_info,
- .set_mode = vpbe_set_mode,
-};
-
-static int vpbe_probe(struct platform_device *pdev)
-{
- struct vpbe_device *vpbe_dev;
- struct vpbe_config *cfg;
-
- if (!pdev->dev.platform_data) {
- v4l2_err(pdev->dev.driver, "No platform data\n");
- return -ENODEV;
- }
- cfg = pdev->dev.platform_data;
-
- if (!cfg->module_name[0] ||
- !cfg->osd.module_name[0] ||
- !cfg->venc.module_name[0]) {
- v4l2_err(pdev->dev.driver, "vpbe display module names not defined\n");
- return -EINVAL;
- }
-
- vpbe_dev = kzalloc(sizeof(*vpbe_dev), GFP_KERNEL);
- if (!vpbe_dev)
- return -ENOMEM;
-
- vpbe_dev->cfg = cfg;
- vpbe_dev->ops = vpbe_dev_ops;
- vpbe_dev->pdev = &pdev->dev;
-
- if (cfg->outputs->num_modes > 0)
- vpbe_dev->current_timings = vpbe_dev->cfg->outputs[0].modes[0];
- else {
- kfree(vpbe_dev);
- return -ENODEV;
- }
-
- /* set the driver data in platform device */
- platform_set_drvdata(pdev, vpbe_dev);
- mutex_init(&vpbe_dev->lock);
-
- return 0;
-}
-
-static int vpbe_remove(struct platform_device *device)
-{
- struct vpbe_device *vpbe_dev = platform_get_drvdata(device);
-
- kfree(vpbe_dev);
-
- return 0;
-}
-
-static struct platform_driver vpbe_driver = {
- .driver = {
- .name = "vpbe_controller",
- },
- .probe = vpbe_probe,
- .remove = vpbe_remove,
-};
-
-module_platform_driver(vpbe_driver);
diff --git a/drivers/media/platform/ti/davinci/vpbe_display.c b/drivers/media/platform/ti/davinci/vpbe_display.c
deleted file mode 100644
index 9ea70817538e..000000000000
--- a/drivers/media/platform/ti/davinci/vpbe_display.c
+++ /dev/null
@@ -1,1510 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2010 Texas Instruments Incorporated - https://www.ti.com/
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/string.h>
-#include <linux/wait.h>
-#include <linux/time.h>
-#include <linux/platform_device.h>
-#include <linux/irq.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/videodev2.h>
-#include <linux/slab.h>
-
-
-#include <media/v4l2-dev.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-device.h>
-#include <media/davinci/vpbe_display.h>
-#include <media/davinci/vpbe_types.h>
-#include <media/davinci/vpbe.h>
-#include <media/davinci/vpbe_venc.h>
-#include <media/davinci/vpbe_osd.h>
-#include "vpbe_venc_regs.h"
-
-#define VPBE_DISPLAY_DRIVER "vpbe-v4l2"
-
-static int debug;
-
-#define VPBE_DEFAULT_NUM_BUFS 3
-
-module_param(debug, int, 0644);
-
-static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
- struct vpbe_layer *layer);
-
-static int venc_is_second_field(struct vpbe_display *disp_dev)
-{
- struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
- int ret, val;
-
- ret = v4l2_subdev_call(vpbe_dev->venc,
- core,
- command,
- VENC_GET_FLD,
- &val);
- if (ret < 0) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Error in getting Field ID 0\n");
- return 1;
- }
- return val;
-}
-
-static void vpbe_isr_even_field(struct vpbe_display *disp_obj,
- struct vpbe_layer *layer)
-{
- if (layer->cur_frm == layer->next_frm)
- return;
-
- layer->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
- vb2_buffer_done(&layer->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
- /* Make cur_frm pointing to next_frm */
- layer->cur_frm = layer->next_frm;
-}
-
-static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
- struct vpbe_layer *layer)
-{
- struct osd_state *osd_device = disp_obj->osd_device;
- unsigned long addr;
-
- spin_lock(&disp_obj->dma_queue_lock);
- if (list_empty(&layer->dma_queue) ||
- (layer->cur_frm != layer->next_frm)) {
- spin_unlock(&disp_obj->dma_queue_lock);
- return;
- }
- /*
- * one field is displayed configure
- * the next frame if it is available
- * otherwise hold on current frame
- * Get next from the buffer queue
- */
- layer->next_frm = list_entry(layer->dma_queue.next,
- struct vpbe_disp_buffer, list);
- /* Remove that from the buffer queue */
- list_del(&layer->next_frm->list);
- spin_unlock(&disp_obj->dma_queue_lock);
- /* Mark state of the frame to active */
- layer->next_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
- addr = vb2_dma_contig_plane_dma_addr(&layer->next_frm->vb.vb2_buf, 0);
- osd_device->ops.start_layer(osd_device,
- layer->layer_info.id,
- addr,
- disp_obj->cbcr_ofst);
-}
-
-/* interrupt service routine */
-static irqreturn_t venc_isr(int irq, void *arg)
-{
- struct vpbe_display *disp_dev = (struct vpbe_display *)arg;
- struct vpbe_layer *layer;
- static unsigned last_event;
- unsigned event = 0;
- int fid;
- int i;
-
- if (!arg || !disp_dev->dev[0])
- return IRQ_HANDLED;
-
- if (venc_is_second_field(disp_dev))
- event |= VENC_SECOND_FIELD;
- else
- event |= VENC_FIRST_FIELD;
-
- if (event == (last_event & ~VENC_END_OF_FRAME)) {
- /*
- * If the display is non-interlaced, then we need to flag the
- * end-of-frame event at every interrupt regardless of the
- * value of the FIDST bit. We can conclude that the display is
- * non-interlaced if the value of the FIDST bit is unchanged
- * from the previous interrupt.
- */
- event |= VENC_END_OF_FRAME;
- } else if (event == VENC_SECOND_FIELD) {
- /* end-of-frame for interlaced display */
- event |= VENC_END_OF_FRAME;
- }
- last_event = event;
-
- for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
- layer = disp_dev->dev[i];
-
- if (!vb2_start_streaming_called(&layer->buffer_queue))
- continue;
-
- if (layer->layer_first_int) {
- layer->layer_first_int = 0;
- continue;
- }
- /* Check the field format */
- if ((V4L2_FIELD_NONE == layer->pix_fmt.field) &&
- (event & VENC_END_OF_FRAME)) {
- /* Progressive mode */
-
- vpbe_isr_even_field(disp_dev, layer);
- vpbe_isr_odd_field(disp_dev, layer);
- } else {
- /* Interlaced mode */
-
- layer->field_id ^= 1;
- if (event & VENC_FIRST_FIELD)
- fid = 0;
- else
- fid = 1;
-
- /*
- * If field id does not match with store
- * field id
- */
- if (fid != layer->field_id) {
- /* Make them in sync */
- layer->field_id = fid;
- continue;
- }
- /*
- * device field id and local field id are
- * in sync. If this is even field
- */
- if (0 == fid)
- vpbe_isr_even_field(disp_dev, layer);
- else /* odd field */
- vpbe_isr_odd_field(disp_dev, layer);
- }
- }
-
- return IRQ_HANDLED;
-}
-
-/*
- * vpbe_buffer_prepare()
- * This is the callback function called from vb2_qbuf() function
- * the buffer is prepared and user space virtual address is converted into
- * physical address
- */
-static int vpbe_buffer_prepare(struct vb2_buffer *vb)
-{
- struct vb2_queue *q = vb->vb2_queue;
- struct vpbe_layer *layer = vb2_get_drv_priv(q);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- unsigned long addr;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "vpbe_buffer_prepare\n");
-
- vb2_set_plane_payload(vb, 0, layer->pix_fmt.sizeimage);
- if (vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0))
- return -EINVAL;
-
- addr = vb2_dma_contig_plane_dma_addr(vb, 0);
- if (!IS_ALIGNED(addr, 8)) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "buffer_prepare:offset is not aligned to 32 bytes\n");
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * vpbe_buffer_setup()
- * This function allocates memory for the buffers
- */
-static int
-vpbe_buffer_queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers, unsigned int *nplanes,
- unsigned int sizes[], struct device *alloc_devs[])
-
-{
- /* Get the file handle object and layer object */
- struct vpbe_layer *layer = vb2_get_drv_priv(vq);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_buffer_setup\n");
-
- /* Store number of buffers allocated in numbuffer member */
- if (vq->num_buffers + *nbuffers < VPBE_DEFAULT_NUM_BUFS)
- *nbuffers = VPBE_DEFAULT_NUM_BUFS - vq->num_buffers;
-
- if (*nplanes)
- return sizes[0] < layer->pix_fmt.sizeimage ? -EINVAL : 0;
-
- *nplanes = 1;
- sizes[0] = layer->pix_fmt.sizeimage;
-
- return 0;
-}
-
-/*
- * vpbe_buffer_queue()
- * This function adds the buffer to DMA queue
- */
-static void vpbe_buffer_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
- /* Get the file handle object and layer object */
- struct vpbe_disp_buffer *buf = container_of(vbuf,
- struct vpbe_disp_buffer, vb);
- struct vpbe_layer *layer = vb2_get_drv_priv(vb->vb2_queue);
- struct vpbe_display *disp = layer->disp_dev;
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- unsigned long flags;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "vpbe_buffer_queue\n");
-
- /* add the buffer to the DMA queue */
- spin_lock_irqsave(&disp->dma_queue_lock, flags);
- list_add_tail(&buf->list, &layer->dma_queue);
- spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
-}
-
-static int vpbe_start_streaming(struct vb2_queue *vq, unsigned int count)
-{
- struct vpbe_layer *layer = vb2_get_drv_priv(vq);
- struct osd_state *osd_device = layer->disp_dev->osd_device;
- int ret;
-
- osd_device->ops.disable_layer(osd_device, layer->layer_info.id);
-
- /* Get the next frame from the buffer queue */
- layer->next_frm = layer->cur_frm = list_entry(layer->dma_queue.next,
- struct vpbe_disp_buffer, list);
- /* Remove buffer from the buffer queue */
- list_del(&layer->cur_frm->list);
- /* Mark state of the current frame to active */
- layer->cur_frm->vb.vb2_buf.state = VB2_BUF_STATE_ACTIVE;
- /* Initialize field_id and started member */
- layer->field_id = 0;
-
- /* Set parameters in OSD and VENC */
- ret = vpbe_set_osd_display_params(layer->disp_dev, layer);
- if (ret < 0) {
- struct vpbe_disp_buffer *buf, *tmp;
-
- vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_QUEUED);
- list_for_each_entry_safe(buf, tmp, &layer->dma_queue, list) {
- list_del(&buf->list);
- vb2_buffer_done(&buf->vb.vb2_buf,
- VB2_BUF_STATE_QUEUED);
- }
-
- return ret;
- }
-
- /*
- * if request format is yuv420 semiplanar, need to
- * enable both video windows
- */
- layer->layer_first_int = 1;
-
- return ret;
-}
-
-static void vpbe_stop_streaming(struct vb2_queue *vq)
-{
- struct vpbe_layer *layer = vb2_get_drv_priv(vq);
- struct osd_state *osd_device = layer->disp_dev->osd_device;
- struct vpbe_display *disp = layer->disp_dev;
- unsigned long flags;
-
- if (!vb2_is_streaming(vq))
- return;
-
- osd_device->ops.disable_layer(osd_device, layer->layer_info.id);
-
- /* release all active buffers */
- spin_lock_irqsave(&disp->dma_queue_lock, flags);
- if (layer->cur_frm == layer->next_frm) {
- vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- } else {
- if (layer->cur_frm)
- vb2_buffer_done(&layer->cur_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- if (layer->next_frm)
- vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
-
- while (!list_empty(&layer->dma_queue)) {
- layer->next_frm = list_entry(layer->dma_queue.next,
- struct vpbe_disp_buffer, list);
- list_del(&layer->next_frm->list);
- vb2_buffer_done(&layer->next_frm->vb.vb2_buf,
- VB2_BUF_STATE_ERROR);
- }
- spin_unlock_irqrestore(&disp->dma_queue_lock, flags);
-}
-
-static const struct vb2_ops video_qops = {
- .queue_setup = vpbe_buffer_queue_setup,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
- .buf_prepare = vpbe_buffer_prepare,
- .start_streaming = vpbe_start_streaming,
- .stop_streaming = vpbe_stop_streaming,
- .buf_queue = vpbe_buffer_queue,
-};
-
-static
-struct vpbe_layer*
-_vpbe_display_get_other_win_layer(struct vpbe_display *disp_dev,
- struct vpbe_layer *layer)
-{
- enum vpbe_display_device_id thiswin, otherwin;
- thiswin = layer->device_id;
-
- otherwin = (thiswin == VPBE_DISPLAY_DEVICE_0) ?
- VPBE_DISPLAY_DEVICE_1 : VPBE_DISPLAY_DEVICE_0;
- return disp_dev->dev[otherwin];
-}
-
-static int vpbe_set_osd_display_params(struct vpbe_display *disp_dev,
- struct vpbe_layer *layer)
-{
- struct osd_layer_config *cfg = &layer->layer_info.config;
- struct osd_state *osd_device = disp_dev->osd_device;
- struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
- unsigned long addr;
- int ret;
-
- addr = vb2_dma_contig_plane_dma_addr(&layer->cur_frm->vb.vb2_buf, 0);
- /* Set address in the display registers */
- osd_device->ops.start_layer(osd_device,
- layer->layer_info.id,
- addr,
- disp_dev->cbcr_ofst);
-
- ret = osd_device->ops.enable_layer(osd_device,
- layer->layer_info.id, 0);
- if (ret < 0) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Error in enabling osd window layer 0\n");
- return -1;
- }
-
- /* Enable the window */
- layer->layer_info.enable = 1;
- if (cfg->pixfmt == PIXFMT_NV12) {
- struct vpbe_layer *otherlayer =
- _vpbe_display_get_other_win_layer(disp_dev, layer);
-
- ret = osd_device->ops.enable_layer(osd_device,
- otherlayer->layer_info.id, 1);
- if (ret < 0) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Error in enabling osd window layer 1\n");
- return -1;
- }
- otherlayer->layer_info.enable = 1;
- }
- return 0;
-}
-
-static void
-vpbe_disp_calculate_scale_factor(struct vpbe_display *disp_dev,
- struct vpbe_layer *layer,
- int expected_xsize, int expected_ysize)
-{
- struct display_layer_info *layer_info = &layer->layer_info;
- struct v4l2_pix_format *pixfmt = &layer->pix_fmt;
- struct osd_layer_config *cfg = &layer->layer_info.config;
- struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
- int calculated_xsize;
- int h_exp = 0;
- int v_exp = 0;
- int h_scale;
- int v_scale;
-
- v4l2_std_id standard_id = vpbe_dev->current_timings.std_id;
-
- /*
- * Application initially set the image format. Current display
- * size is obtained from the vpbe display controller. expected_xsize
- * and expected_ysize are set through S_SELECTION ioctl. Based on this,
- * driver will calculate the scale factors for vertical and
- * horizontal direction so that the image is displayed scaled
- * and expanded. Application uses expansion to display the image
- * in a square pixel. Otherwise it is displayed using displays
- * pixel aspect ratio.It is expected that application chooses
- * the crop coordinates for cropped or scaled display. if crop
- * size is less than the image size, it is displayed cropped or
- * it is displayed scaled and/or expanded.
- *
- * to begin with, set the crop window same as expected. Later we
- * will override with scaled window size
- */
-
- cfg->xsize = pixfmt->width;
- cfg->ysize = pixfmt->height;
- layer_info->h_zoom = ZOOM_X1; /* no horizontal zoom */
- layer_info->v_zoom = ZOOM_X1; /* no horizontal zoom */
- layer_info->h_exp = H_EXP_OFF; /* no horizontal zoom */
- layer_info->v_exp = V_EXP_OFF; /* no horizontal zoom */
-
- if (pixfmt->width < expected_xsize) {
- h_scale = vpbe_dev->current_timings.xres / pixfmt->width;
- if (h_scale < 2)
- h_scale = 1;
- else if (h_scale >= 4)
- h_scale = 4;
- else
- h_scale = 2;
- cfg->xsize *= h_scale;
- if (cfg->xsize < expected_xsize) {
- if ((standard_id & V4L2_STD_525_60) ||
- (standard_id & V4L2_STD_625_50)) {
- calculated_xsize = (cfg->xsize *
- VPBE_DISPLAY_H_EXP_RATIO_N) /
- VPBE_DISPLAY_H_EXP_RATIO_D;
- if (calculated_xsize <= expected_xsize) {
- h_exp = 1;
- cfg->xsize = calculated_xsize;
- }
- }
- }
- if (h_scale == 2)
- layer_info->h_zoom = ZOOM_X2;
- else if (h_scale == 4)
- layer_info->h_zoom = ZOOM_X4;
- if (h_exp)
- layer_info->h_exp = H_EXP_9_OVER_8;
- } else {
- /* no scaling, only cropping. Set display area to crop area */
- cfg->xsize = expected_xsize;
- }
-
- if (pixfmt->height < expected_ysize) {
- v_scale = expected_ysize / pixfmt->height;
- if (v_scale < 2)
- v_scale = 1;
- else if (v_scale >= 4)
- v_scale = 4;
- else
- v_scale = 2;
- cfg->ysize *= v_scale;
- if (cfg->ysize < expected_ysize) {
- if ((standard_id & V4L2_STD_625_50)) {
- calculated_xsize = (cfg->ysize *
- VPBE_DISPLAY_V_EXP_RATIO_N) /
- VPBE_DISPLAY_V_EXP_RATIO_D;
- if (calculated_xsize <= expected_ysize) {
- v_exp = 1;
- cfg->ysize = calculated_xsize;
- }
- }
- }
- if (v_scale == 2)
- layer_info->v_zoom = ZOOM_X2;
- else if (v_scale == 4)
- layer_info->v_zoom = ZOOM_X4;
- if (v_exp)
- layer_info->v_exp = V_EXP_6_OVER_5;
- } else {
- /* no scaling, only cropping. Set display area to crop area */
- cfg->ysize = expected_ysize;
- }
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "crop display xsize = %d, ysize = %d\n",
- cfg->xsize, cfg->ysize);
-}
-
-static void vpbe_disp_adj_position(struct vpbe_display *disp_dev,
- struct vpbe_layer *layer,
- int top, int left)
-{
- struct osd_layer_config *cfg = &layer->layer_info.config;
- struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
-
- cfg->xpos = min((unsigned int)left,
- vpbe_dev->current_timings.xres - cfg->xsize);
- cfg->ypos = min((unsigned int)top,
- vpbe_dev->current_timings.yres - cfg->ysize);
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "new xpos = %d, ypos = %d\n",
- cfg->xpos, cfg->ypos);
-}
-
-static void vpbe_disp_check_window_params(struct vpbe_display *disp_dev,
- struct v4l2_rect *c)
-{
- struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
-
- if ((c->width == 0) ||
- ((c->width + c->left) > vpbe_dev->current_timings.xres))
- c->width = vpbe_dev->current_timings.xres - c->left;
-
- if ((c->height == 0) || ((c->height + c->top) >
- vpbe_dev->current_timings.yres))
- c->height = vpbe_dev->current_timings.yres - c->top;
-
- /* window height must be even for interlaced display */
- if (vpbe_dev->current_timings.interlaced)
- c->height &= (~0x01);
-
-}
-
-/*
- * vpbe_try_format()
- * If user application provides width and height, and have bytesperline set
- * to zero, driver calculates bytesperline and sizeimage based on hardware
- * limits.
- */
-static int vpbe_try_format(struct vpbe_display *disp_dev,
- struct v4l2_pix_format *pixfmt, int check)
-{
- struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
- int min_height = 1;
- int min_width = 32;
- int max_height;
- int max_width;
- int bpp;
-
- if ((pixfmt->pixelformat != V4L2_PIX_FMT_UYVY) &&
- (pixfmt->pixelformat != V4L2_PIX_FMT_NV12))
- /* choose default as V4L2_PIX_FMT_UYVY */
- pixfmt->pixelformat = V4L2_PIX_FMT_UYVY;
-
- /* Check the field format */
- if ((pixfmt->field != V4L2_FIELD_INTERLACED) &&
- (pixfmt->field != V4L2_FIELD_NONE)) {
- if (vpbe_dev->current_timings.interlaced)
- pixfmt->field = V4L2_FIELD_INTERLACED;
- else
- pixfmt->field = V4L2_FIELD_NONE;
- }
-
- if (pixfmt->field == V4L2_FIELD_INTERLACED)
- min_height = 2;
-
- if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
- bpp = 1;
- else
- bpp = 2;
-
- max_width = vpbe_dev->current_timings.xres;
- max_height = vpbe_dev->current_timings.yres;
-
- min_width /= bpp;
-
- if (!pixfmt->width || (pixfmt->width < min_width) ||
- (pixfmt->width > max_width)) {
- pixfmt->width = vpbe_dev->current_timings.xres;
- }
-
- if (!pixfmt->height || (pixfmt->height < min_height) ||
- (pixfmt->height > max_height)) {
- pixfmt->height = vpbe_dev->current_timings.yres;
- }
-
- if (pixfmt->bytesperline < (pixfmt->width * bpp))
- pixfmt->bytesperline = pixfmt->width * bpp;
-
- /* Make the bytesperline 32 byte aligned */
- pixfmt->bytesperline = ((pixfmt->width * bpp + 31) & ~31);
-
- if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
- pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height +
- (pixfmt->bytesperline * pixfmt->height >> 1);
- else
- pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
-
- return 0;
-}
-
-static int vpbe_display_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
-
- snprintf(cap->driver, sizeof(cap->driver), "%s",
- dev_name(vpbe_dev->pdev));
- strscpy(cap->card, vpbe_dev->cfg->module_name, sizeof(cap->card));
-
- return 0;
-}
-
-static int vpbe_display_s_selection(struct file *file, void *priv,
- struct v4l2_selection *sel)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_display *disp_dev = layer->disp_dev;
- struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
- struct osd_layer_config *cfg = &layer->layer_info.config;
- struct osd_state *osd_device = disp_dev->osd_device;
- struct v4l2_rect rect = sel->r;
- int ret;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "VIDIOC_S_SELECTION, layer id = %d\n", layer->device_id);
-
- if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT ||
- sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- if (rect.top < 0)
- rect.top = 0;
- if (rect.left < 0)
- rect.left = 0;
-
- vpbe_disp_check_window_params(disp_dev, &rect);
-
- osd_device->ops.get_layer_config(osd_device,
- layer->layer_info.id, cfg);
-
- vpbe_disp_calculate_scale_factor(disp_dev, layer,
- rect.width,
- rect.height);
- vpbe_disp_adj_position(disp_dev, layer, rect.top,
- rect.left);
- ret = osd_device->ops.set_layer_config(osd_device,
- layer->layer_info.id, cfg);
- if (ret < 0) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Error in set layer config:\n");
- return -EINVAL;
- }
-
- /* apply zooming and h or v expansion */
- osd_device->ops.set_zoom(osd_device,
- layer->layer_info.id,
- layer->layer_info.h_zoom,
- layer->layer_info.v_zoom);
- ret = osd_device->ops.set_vid_expansion(osd_device,
- layer->layer_info.h_exp,
- layer->layer_info.v_exp);
- if (ret < 0) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Error in set vid expansion:\n");
- return -EINVAL;
- }
-
- if ((layer->layer_info.h_zoom != ZOOM_X1) ||
- (layer->layer_info.v_zoom != ZOOM_X1) ||
- (layer->layer_info.h_exp != H_EXP_OFF) ||
- (layer->layer_info.v_exp != V_EXP_OFF))
- /* Enable expansion filter */
- osd_device->ops.set_interpolation_filter(osd_device, 1);
- else
- osd_device->ops.set_interpolation_filter(osd_device, 0);
-
- sel->r = rect;
- return 0;
-}
-
-static int vpbe_display_g_selection(struct file *file, void *priv,
- struct v4l2_selection *sel)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct osd_layer_config *cfg = &layer->layer_info.config;
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- struct osd_state *osd_device = layer->disp_dev->osd_device;
- struct v4l2_rect *rect = &sel->r;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "VIDIOC_G_SELECTION, layer id = %d\n",
- layer->device_id);
-
- if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
- return -EINVAL;
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP:
- osd_device->ops.get_layer_config(osd_device,
- layer->layer_info.id, cfg);
- rect->top = cfg->ypos;
- rect->left = cfg->xpos;
- rect->width = cfg->xsize;
- rect->height = cfg->ysize;
- break;
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP_BOUNDS:
- rect->left = 0;
- rect->top = 0;
- rect->width = vpbe_dev->current_timings.xres;
- rect->height = vpbe_dev->current_timings.yres;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int vpbe_display_g_pixelaspect(struct file *file, void *priv,
- int type, struct v4l2_fract *f)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_CROPCAP ioctl\n");
-
- if (type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
- return -EINVAL;
-
- *f = vpbe_dev->current_timings.aspect;
- return 0;
-}
-
-static int vpbe_display_g_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "VIDIOC_G_FMT, layer id = %d\n",
- layer->device_id);
-
- /* If buffer type is video output */
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
- v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
- return -EINVAL;
- }
- /* Fill in the information about format */
- fmt->fmt.pix = layer->pix_fmt;
-
- return 0;
-}
-
-static int vpbe_display_enum_fmt(struct file *file, void *priv,
- struct v4l2_fmtdesc *fmt)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "VIDIOC_ENUM_FMT, layer id = %d\n",
- layer->device_id);
- if (fmt->index > 1) {
- v4l2_err(&vpbe_dev->v4l2_dev, "Invalid format index\n");
- return -EINVAL;
- }
-
- /* Fill in the information about format */
- if (fmt->index == 0)
- fmt->pixelformat = V4L2_PIX_FMT_UYVY;
- else
- fmt->pixelformat = V4L2_PIX_FMT_NV12;
-
- return 0;
-}
-
-static int vpbe_display_s_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_display *disp_dev = layer->disp_dev;
- struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
- struct osd_layer_config *cfg = &layer->layer_info.config;
- struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
- struct osd_state *osd_device = disp_dev->osd_device;
- int ret;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "VIDIOC_S_FMT, layer id = %d\n",
- layer->device_id);
-
- if (vb2_is_busy(&layer->buffer_queue))
- return -EBUSY;
-
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "invalid type\n");
- return -EINVAL;
- }
- /* Check for valid pixel format */
- ret = vpbe_try_format(disp_dev, pixfmt, 1);
- if (ret)
- return ret;
-
- /* YUV420 is requested, check availability of the
- other video window */
-
- layer->pix_fmt = *pixfmt;
- if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) {
- struct vpbe_layer *otherlayer;
-
- otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer);
- /* if other layer is available, only
- * claim it, do not configure it
- */
- ret = osd_device->ops.request_layer(osd_device,
- otherlayer->layer_info.id);
- if (ret < 0) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Display Manager failed to allocate layer\n");
- return -EBUSY;
- }
- }
-
- /* Get osd layer config */
- osd_device->ops.get_layer_config(osd_device,
- layer->layer_info.id, cfg);
- /* Store the pixel format in the layer object */
- cfg->xsize = pixfmt->width;
- cfg->ysize = pixfmt->height;
- cfg->line_length = pixfmt->bytesperline;
- cfg->ypos = 0;
- cfg->xpos = 0;
- cfg->interlaced = vpbe_dev->current_timings.interlaced;
-
- if (V4L2_PIX_FMT_UYVY == pixfmt->pixelformat)
- cfg->pixfmt = PIXFMT_YCBCRI;
-
- /* Change of the default pixel format for both video windows */
- if (V4L2_PIX_FMT_NV12 == pixfmt->pixelformat) {
- struct vpbe_layer *otherlayer;
- cfg->pixfmt = PIXFMT_NV12;
- otherlayer = _vpbe_display_get_other_win_layer(disp_dev,
- layer);
- otherlayer->layer_info.config.pixfmt = PIXFMT_NV12;
- }
-
- /* Set the layer config in the osd window */
- ret = osd_device->ops.set_layer_config(osd_device,
- layer->layer_info.id, cfg);
- if (ret < 0) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Error in S_FMT params:\n");
- return -EINVAL;
- }
-
- /* Readback and fill the local copy of current pix format */
- osd_device->ops.get_layer_config(osd_device,
- layer->layer_info.id, cfg);
-
- return 0;
-}
-
-static int vpbe_display_try_fmt(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_display *disp_dev = layer->disp_dev;
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- struct v4l2_pix_format *pixfmt = &fmt->fmt.pix;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_TRY_FMT\n");
-
- if (V4L2_BUF_TYPE_VIDEO_OUTPUT != fmt->type) {
- v4l2_err(&vpbe_dev->v4l2_dev, "invalid type\n");
- return -EINVAL;
- }
-
- /* Check for valid field format */
- return vpbe_try_format(disp_dev, pixfmt, 0);
-
-}
-
-/*
- * vpbe_display_s_std - Set the given standard in the encoder
- *
- * Sets the standard if supported by the current encoder. Return the status.
- * 0 - success & -EINVAL on error
- */
-static int vpbe_display_s_std(struct file *file, void *priv,
- v4l2_std_id std_id)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- int ret;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_STD\n");
-
- if (vb2_is_busy(&layer->buffer_queue))
- return -EBUSY;
-
- if (vpbe_dev->ops.s_std) {
- ret = vpbe_dev->ops.s_std(vpbe_dev, std_id);
- if (ret) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Failed to set standard for sub devices\n");
- return -EINVAL;
- }
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * vpbe_display_g_std - Get the standard in the current encoder
- *
- * Get the standard in the current encoder. Return the status. 0 - success
- * -EINVAL on error
- */
-static int vpbe_display_g_std(struct file *file, void *priv,
- v4l2_std_id *std_id)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_STD\n");
-
- /* Get the standard from the current encoder */
- if (vpbe_dev->current_timings.timings_type & VPBE_ENC_STD) {
- *std_id = vpbe_dev->current_timings.std_id;
- return 0;
- }
-
- return -EINVAL;
-}
-
-/*
- * vpbe_display_enum_output - enumerate outputs
- *
- * Enumerates the outputs available at the vpbe display
- * returns the status, -EINVAL if end of output list
- */
-static int vpbe_display_enum_output(struct file *file, void *priv,
- struct v4l2_output *output)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- int ret;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_OUTPUT\n");
-
- /* Enumerate outputs */
- if (!vpbe_dev->ops.enum_outputs)
- return -EINVAL;
-
- ret = vpbe_dev->ops.enum_outputs(vpbe_dev, output);
- if (ret) {
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "Failed to enumerate outputs\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * vpbe_display_s_output - Set output to
- * the output specified by the index
- */
-static int vpbe_display_s_output(struct file *file, void *priv,
- unsigned int i)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- int ret;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_OUTPUT\n");
-
- if (vb2_is_busy(&layer->buffer_queue))
- return -EBUSY;
-
- if (!vpbe_dev->ops.set_output)
- return -EINVAL;
-
- ret = vpbe_dev->ops.set_output(vpbe_dev, i);
- if (ret) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Failed to set output for sub devices\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * vpbe_display_g_output - Get output from subdevice
- * for a given by the index
- */
-static int vpbe_display_g_output(struct file *file, void *priv,
- unsigned int *i)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_OUTPUT\n");
- /* Get the standard from the current encoder */
- *i = vpbe_dev->current_out_index;
-
- return 0;
-}
-
-/*
- * vpbe_display_enum_dv_timings - Enumerate the dv timings
- *
- * enum the timings in the current encoder. Return the status. 0 - success
- * -EINVAL on error
- */
-static int
-vpbe_display_enum_dv_timings(struct file *file, void *priv,
- struct v4l2_enum_dv_timings *timings)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- int ret;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_ENUM_DV_TIMINGS\n");
-
- /* Enumerate outputs */
- if (!vpbe_dev->ops.enum_dv_timings)
- return -EINVAL;
-
- ret = vpbe_dev->ops.enum_dv_timings(vpbe_dev, timings);
- if (ret) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Failed to enumerate dv timings info\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * vpbe_display_s_dv_timings - Set the dv timings
- *
- * Set the timings in the current encoder. Return the status. 0 - success
- * -EINVAL on error
- */
-static int
-vpbe_display_s_dv_timings(struct file *file, void *priv,
- struct v4l2_dv_timings *timings)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
- int ret;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_S_DV_TIMINGS\n");
-
- if (vb2_is_busy(&layer->buffer_queue))
- return -EBUSY;
-
- /* Set the given standard in the encoder */
- if (!vpbe_dev->ops.s_dv_timings)
- return -EINVAL;
-
- ret = vpbe_dev->ops.s_dv_timings(vpbe_dev, timings);
- if (ret) {
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Failed to set the dv timings info\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * vpbe_display_g_dv_timings - Set the dv timings
- *
- * Get the timings in the current encoder. Return the status. 0 - success
- * -EINVAL on error
- */
-static int
-vpbe_display_g_dv_timings(struct file *file, void *priv,
- struct v4l2_dv_timings *dv_timings)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_device *vpbe_dev = layer->disp_dev->vpbe_dev;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "VIDIOC_G_DV_TIMINGS\n");
-
- /* Get the given standard in the encoder */
-
- if (vpbe_dev->current_timings.timings_type &
- VPBE_ENC_DV_TIMINGS) {
- *dv_timings = vpbe_dev->current_timings.dv_timings;
- } else {
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * vpbe_display_open()
- * It creates object of file handle structure and stores it in private_data
- * member of filepointer
- */
-static int vpbe_display_open(struct file *file)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct vpbe_display *disp_dev = layer->disp_dev;
- struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
- struct osd_state *osd_device = disp_dev->osd_device;
- int err;
-
- /* creating context for file descriptor */
- err = v4l2_fh_open(file);
- if (err) {
- v4l2_err(&vpbe_dev->v4l2_dev, "v4l2_fh_open failed\n");
- return err;
- }
-
- /* leaving if layer is already initialized */
- if (!v4l2_fh_is_singular_file(file))
- return err;
-
- if (!layer->usrs) {
- if (mutex_lock_interruptible(&layer->opslock))
- return -ERESTARTSYS;
- /* First claim the layer for this device */
- err = osd_device->ops.request_layer(osd_device,
- layer->layer_info.id);
- mutex_unlock(&layer->opslock);
- if (err < 0) {
- /* Couldn't get layer */
- v4l2_err(&vpbe_dev->v4l2_dev,
- "Display Manager failed to allocate layer\n");
- v4l2_fh_release(file);
- return -EINVAL;
- }
- }
- /* Increment layer usrs counter */
- layer->usrs++;
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev,
- "vpbe display device opened successfully\n");
- return 0;
-}
-
-/*
- * vpbe_display_release()
- * This function deletes buffer queue, frees the buffers and the davinci
- * display file * handle
- */
-static int vpbe_display_release(struct file *file)
-{
- struct vpbe_layer *layer = video_drvdata(file);
- struct osd_layer_config *cfg = &layer->layer_info.config;
- struct vpbe_display *disp_dev = layer->disp_dev;
- struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
- struct osd_state *osd_device = disp_dev->osd_device;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_release\n");
-
- mutex_lock(&layer->opslock);
-
- osd_device->ops.disable_layer(osd_device,
- layer->layer_info.id);
- /* Decrement layer usrs counter */
- layer->usrs--;
- /* If this file handle has initialize encoder device, reset it */
- if (!layer->usrs) {
- if (cfg->pixfmt == PIXFMT_NV12) {
- struct vpbe_layer *otherlayer;
- otherlayer =
- _vpbe_display_get_other_win_layer(disp_dev, layer);
- osd_device->ops.disable_layer(osd_device,
- otherlayer->layer_info.id);
- osd_device->ops.release_layer(osd_device,
- otherlayer->layer_info.id);
- }
- osd_device->ops.disable_layer(osd_device,
- layer->layer_info.id);
- osd_device->ops.release_layer(osd_device,
- layer->layer_info.id);
- }
-
- _vb2_fop_release(file, NULL);
- mutex_unlock(&layer->opslock);
-
- disp_dev->cbcr_ofst = 0;
-
- return 0;
-}
-
-/* vpbe capture ioctl operations */
-static const struct v4l2_ioctl_ops vpbe_ioctl_ops = {
- .vidioc_querycap = vpbe_display_querycap,
- .vidioc_g_fmt_vid_out = vpbe_display_g_fmt,
- .vidioc_enum_fmt_vid_out = vpbe_display_enum_fmt,
- .vidioc_s_fmt_vid_out = vpbe_display_s_fmt,
- .vidioc_try_fmt_vid_out = vpbe_display_try_fmt,
-
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
- .vidioc_querybuf = vb2_ioctl_querybuf,
- .vidioc_qbuf = vb2_ioctl_qbuf,
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
- .vidioc_streamon = vb2_ioctl_streamon,
- .vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_expbuf = vb2_ioctl_expbuf,
-
- .vidioc_g_pixelaspect = vpbe_display_g_pixelaspect,
- .vidioc_g_selection = vpbe_display_g_selection,
- .vidioc_s_selection = vpbe_display_s_selection,
-
- .vidioc_s_std = vpbe_display_s_std,
- .vidioc_g_std = vpbe_display_g_std,
-
- .vidioc_enum_output = vpbe_display_enum_output,
- .vidioc_s_output = vpbe_display_s_output,
- .vidioc_g_output = vpbe_display_g_output,
-
- .vidioc_s_dv_timings = vpbe_display_s_dv_timings,
- .vidioc_g_dv_timings = vpbe_display_g_dv_timings,
- .vidioc_enum_dv_timings = vpbe_display_enum_dv_timings,
-};
-
-static const struct v4l2_file_operations vpbe_fops = {
- .owner = THIS_MODULE,
- .open = vpbe_display_open,
- .release = vpbe_display_release,
- .unlocked_ioctl = video_ioctl2,
- .mmap = vb2_fop_mmap,
- .poll = vb2_fop_poll,
-};
-
-static int vpbe_device_get(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct vpbe_display *vpbe_disp = data;
-
- if (strcmp("vpbe_controller", pdev->name) == 0)
- vpbe_disp->vpbe_dev = platform_get_drvdata(pdev);
-
- if (strstr(pdev->name, "vpbe-osd"))
- vpbe_disp->osd_device = platform_get_drvdata(pdev);
-
- return 0;
-}
-
-static int init_vpbe_layer(int i, struct vpbe_display *disp_dev,
- struct platform_device *pdev)
-{
- struct vpbe_layer *vpbe_display_layer = NULL;
- struct video_device *vbd = NULL;
-
- /* Allocate memory for four plane display objects */
- disp_dev->dev[i] = kzalloc(sizeof(*disp_dev->dev[i]), GFP_KERNEL);
- if (!disp_dev->dev[i])
- return -ENOMEM;
-
- spin_lock_init(&disp_dev->dev[i]->irqlock);
- mutex_init(&disp_dev->dev[i]->opslock);
-
- /* Get the pointer to the layer object */
- vpbe_display_layer = disp_dev->dev[i];
- vbd = &vpbe_display_layer->video_dev;
- /* Initialize field of video device */
- vbd->release = video_device_release_empty;
- vbd->fops = &vpbe_fops;
- vbd->ioctl_ops = &vpbe_ioctl_ops;
- vbd->minor = -1;
- vbd->v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
- vbd->lock = &vpbe_display_layer->opslock;
- vbd->vfl_dir = VFL_DIR_TX;
- vbd->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
-
- if (disp_dev->vpbe_dev->current_timings.timings_type &
- VPBE_ENC_STD)
- vbd->tvnorms = (V4L2_STD_525_60 | V4L2_STD_625_50);
-
- snprintf(vbd->name, sizeof(vbd->name),
- "DaVinci_VPBE Display_DRIVER_V%d.%d.%d",
- (VPBE_DISPLAY_VERSION_CODE >> 16) & 0xff,
- (VPBE_DISPLAY_VERSION_CODE >> 8) & 0xff,
- (VPBE_DISPLAY_VERSION_CODE) & 0xff);
-
- vpbe_display_layer->device_id = i;
-
- vpbe_display_layer->layer_info.id =
- ((i == VPBE_DISPLAY_DEVICE_0) ? WIN_VID0 : WIN_VID1);
-
-
- return 0;
-}
-
-static int register_device(struct vpbe_layer *vpbe_display_layer,
- struct vpbe_display *disp_dev,
- struct platform_device *pdev)
-{
- int err;
-
- v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
- "Trying to register VPBE display device.\n");
- v4l2_info(&disp_dev->vpbe_dev->v4l2_dev,
- "layer=%p,layer->video_dev=%p\n",
- vpbe_display_layer,
- &vpbe_display_layer->video_dev);
-
- vpbe_display_layer->video_dev.queue = &vpbe_display_layer->buffer_queue;
- err = video_register_device(&vpbe_display_layer->video_dev,
- VFL_TYPE_VIDEO,
- -1);
- if (err)
- return -ENODEV;
-
- vpbe_display_layer->disp_dev = disp_dev;
- /* set the driver data in platform device */
- platform_set_drvdata(pdev, disp_dev);
- video_set_drvdata(&vpbe_display_layer->video_dev,
- vpbe_display_layer);
-
- return 0;
-}
-
-
-
-/*
- * vpbe_display_probe()
- * This function creates device entries by register itself to the V4L2 driver
- * and initializes fields of each layer objects
- */
-static int vpbe_display_probe(struct platform_device *pdev)
-{
- struct vpbe_display *disp_dev;
- struct v4l2_device *v4l2_dev;
- struct resource *res = NULL;
- struct vb2_queue *q;
- int k;
- int i;
- int err;
- int irq;
-
- printk(KERN_DEBUG "vpbe_display_probe\n");
- /* Allocate memory for vpbe_display */
- disp_dev = devm_kzalloc(&pdev->dev, sizeof(*disp_dev), GFP_KERNEL);
- if (!disp_dev)
- return -ENOMEM;
-
- spin_lock_init(&disp_dev->dma_queue_lock);
- /*
- * Scan all the platform devices to find the vpbe
- * controller device and get the vpbe_dev object
- */
- err = bus_for_each_dev(&platform_bus_type, NULL, disp_dev,
- vpbe_device_get);
- if (err < 0)
- return err;
-
- v4l2_dev = &disp_dev->vpbe_dev->v4l2_dev;
- /* Initialize the vpbe display controller */
- if (disp_dev->vpbe_dev->ops.initialize) {
- err = disp_dev->vpbe_dev->ops.initialize(&pdev->dev,
- disp_dev->vpbe_dev);
- if (err) {
- v4l2_err(v4l2_dev, "Error initing vpbe\n");
- err = -ENOMEM;
- goto probe_out;
- }
- }
-
- for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
- if (init_vpbe_layer(i, disp_dev, pdev)) {
- err = -ENODEV;
- goto probe_out;
- }
- }
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- v4l2_err(v4l2_dev, "Unable to get VENC interrupt resource\n");
- err = -ENODEV;
- goto probe_out;
- }
-
- irq = res->start;
- err = devm_request_irq(&pdev->dev, irq, venc_isr, 0,
- VPBE_DISPLAY_DRIVER, disp_dev);
- if (err) {
- v4l2_err(v4l2_dev, "VPBE IRQ request failed\n");
- goto probe_out;
- }
-
- for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
- /* initialize vb2 queue */
- q = &disp_dev->dev[i]->buffer_queue;
- memset(q, 0, sizeof(*q));
- q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
- q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
- q->drv_priv = disp_dev->dev[i];
- q->ops = &video_qops;
- q->mem_ops = &vb2_dma_contig_memops;
- q->buf_struct_size = sizeof(struct vpbe_disp_buffer);
- q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = 1;
- q->lock = &disp_dev->dev[i]->opslock;
- q->dev = disp_dev->vpbe_dev->pdev;
- err = vb2_queue_init(q);
- if (err) {
- v4l2_err(v4l2_dev, "vb2_queue_init() failed\n");
- goto probe_out;
- }
-
- INIT_LIST_HEAD(&disp_dev->dev[i]->dma_queue);
-
- if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
- err = -ENODEV;
- goto probe_out;
- }
- }
-
- v4l2_dbg(1, debug, v4l2_dev,
- "Successfully completed the probing of vpbe v4l2 device\n");
-
- return 0;
-
-probe_out:
- for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
- /* Unregister video device */
- if (disp_dev->dev[k]) {
- video_unregister_device(&disp_dev->dev[k]->video_dev);
- kfree(disp_dev->dev[k]);
- }
- }
- return err;
-}
-
-/*
- * vpbe_display_remove()
- * It un-register hardware layer from V4L2 driver
- */
-static int vpbe_display_remove(struct platform_device *pdev)
-{
- struct vpbe_layer *vpbe_display_layer;
- struct vpbe_display *disp_dev = platform_get_drvdata(pdev);
- struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
- int i;
-
- v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
-
- /* deinitialize the vpbe display controller */
- if (vpbe_dev->ops.deinitialize)
- vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
- /* un-register device */
- for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
- /* Get the pointer to the layer object */
- vpbe_display_layer = disp_dev->dev[i];
- /* Unregister video device */
- video_unregister_device(&vpbe_display_layer->video_dev);
-
- }
- for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
- kfree(disp_dev->dev[i]);
- disp_dev->dev[i] = NULL;
- }
-
- return 0;
-}
-
-static struct platform_driver vpbe_display_driver = {
- .driver = {
- .name = VPBE_DISPLAY_DRIVER,
- .bus = &platform_bus_type,
- },
- .probe = vpbe_display_probe,
- .remove = vpbe_display_remove,
-};
-
-module_platform_driver(vpbe_display_driver);
-
-MODULE_DESCRIPTION("TI DM644x/DM355/DM365 VPBE Display controller");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/ti/davinci/vpbe_osd.c b/drivers/media/platform/ti/davinci/vpbe_osd.c
deleted file mode 100644
index 32f7ef547c82..000000000000
--- a/drivers/media/platform/ti/davinci/vpbe_osd.c
+++ /dev/null
@@ -1,1582 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2007-2010 Texas Instruments Inc
- * Copyright (C) 2007 MontaVista Software, Inc.
- *
- * Andy Lowe (alowe@mvista.com), MontaVista Software
- * - Initial version
- * Murali Karicheri (mkaricheri@gmail.com), Texas Instruments Ltd.
- * - ported to sub device interface
- */
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-
-#include <media/davinci/vpss.h>
-#include <media/v4l2-device.h>
-#include <media/davinci/vpbe_types.h>
-#include <media/davinci/vpbe_osd.h>
-
-#include <linux/io.h>
-#include "vpbe_osd_regs.h"
-
-#define MODULE_NAME "davinci-vpbe-osd"
-
-static const struct platform_device_id vpbe_osd_devtype[] = {
- {
- .name = DM644X_VPBE_OSD_SUBDEV_NAME,
- .driver_data = VPBE_VERSION_1,
- }, {
- .name = DM365_VPBE_OSD_SUBDEV_NAME,
- .driver_data = VPBE_VERSION_2,
- }, {
- .name = DM355_VPBE_OSD_SUBDEV_NAME,
- .driver_data = VPBE_VERSION_3,
- },
- {
- /* sentinel */
- }
-};
-
-MODULE_DEVICE_TABLE(platform, vpbe_osd_devtype);
-
-/* register access routines */
-static inline u32 __always_unused osd_read(struct osd_state *sd, u32 offset)
-{
- struct osd_state *osd = sd;
-
- return readl(osd->osd_base + offset);
-}
-
-static inline u32 osd_write(struct osd_state *sd, u32 val, u32 offset)
-{
- struct osd_state *osd = sd;
-
- writel(val, osd->osd_base + offset);
-
- return val;
-}
-
-static inline u32 osd_set(struct osd_state *sd, u32 mask, u32 offset)
-{
- struct osd_state *osd = sd;
-
- void __iomem *addr = osd->osd_base + offset;
- u32 val = readl(addr) | mask;
-
- writel(val, addr);
-
- return val;
-}
-
-static inline u32 osd_clear(struct osd_state *sd, u32 mask, u32 offset)
-{
- struct osd_state *osd = sd;
-
- void __iomem *addr = osd->osd_base + offset;
- u32 val = readl(addr) & ~mask;
-
- writel(val, addr);
-
- return val;
-}
-
-static inline u32 osd_modify(struct osd_state *sd, u32 mask, u32 val,
- u32 offset)
-{
- struct osd_state *osd = sd;
-
- void __iomem *addr = osd->osd_base + offset;
- u32 new_val = (readl(addr) & ~mask) | (val & mask);
-
- writel(new_val, addr);
-
- return new_val;
-}
-
-/* define some macros for layer and pixfmt classification */
-#define is_osd_win(layer) (((layer) == WIN_OSD0) || ((layer) == WIN_OSD1))
-#define is_vid_win(layer) (((layer) == WIN_VID0) || ((layer) == WIN_VID1))
-#define is_rgb_pixfmt(pixfmt) \
- (((pixfmt) == PIXFMT_RGB565) || ((pixfmt) == PIXFMT_RGB888))
-#define is_yc_pixfmt(pixfmt) \
- (((pixfmt) == PIXFMT_YCBCRI) || ((pixfmt) == PIXFMT_YCRCBI) || \
- ((pixfmt) == PIXFMT_NV12))
-#define MAX_WIN_SIZE OSD_VIDWIN0XP_V0X
-#define MAX_LINE_LENGTH (OSD_VIDWIN0OFST_V0LO << 5)
-
-/**
- * _osd_dm6446_vid0_pingpong() - field inversion fix for DM6446
- * @sd: ptr to struct osd_state
- * @field_inversion: inversion flag
- * @fb_base_phys: frame buffer address
- * @lconfig: ptr to layer config
- *
- * This routine implements a workaround for the field signal inversion silicon
- * erratum described in Advisory 1.3.8 for the DM6446. The fb_base_phys and
- * lconfig parameters apply to the vid0 window. This routine should be called
- * whenever the vid0 layer configuration or start address is modified, or when
- * the OSD field inversion setting is modified.
- * Returns: 1 if the ping-pong buffers need to be toggled in the vsync isr, or
- * 0 otherwise
- */
-static int _osd_dm6446_vid0_pingpong(struct osd_state *sd,
- int field_inversion,
- unsigned long fb_base_phys,
- const struct osd_layer_config *lconfig)
-{
- struct osd_platform_data *pdata;
-
- pdata = (struct osd_platform_data *)sd->dev->platform_data;
- if (pdata != NULL && pdata->field_inv_wa_enable) {
-
- if (!field_inversion || !lconfig->interlaced) {
- osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
- osd_write(sd, fb_base_phys & ~0x1F, OSD_PPVWIN0ADR);
- osd_modify(sd, OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, 0,
- OSD_MISCCTL);
- return 0;
- } else {
- unsigned miscctl = OSD_MISCCTL_PPRV;
-
- osd_write(sd,
- (fb_base_phys & ~0x1F) - lconfig->line_length,
- OSD_VIDWIN0ADR);
- osd_write(sd,
- (fb_base_phys & ~0x1F) + lconfig->line_length,
- OSD_PPVWIN0ADR);
- osd_modify(sd,
- OSD_MISCCTL_PPSW | OSD_MISCCTL_PPRV, miscctl,
- OSD_MISCCTL);
-
- return 1;
- }
- }
-
- return 0;
-}
-
-static void _osd_set_field_inversion(struct osd_state *sd, int enable)
-{
- unsigned fsinv = 0;
-
- if (enable)
- fsinv = OSD_MODE_FSINV;
-
- osd_modify(sd, OSD_MODE_FSINV, fsinv, OSD_MODE);
-}
-
-static void _osd_set_blink_attribute(struct osd_state *sd, int enable,
- enum osd_blink_interval blink)
-{
- u32 osdatrmd = 0;
-
- if (enable) {
- osdatrmd |= OSD_OSDATRMD_BLNK;
- osdatrmd |= blink << OSD_OSDATRMD_BLNKINT_SHIFT;
- }
- /* caller must ensure that OSD1 is configured in attribute mode */
- osd_modify(sd, OSD_OSDATRMD_BLNKINT | OSD_OSDATRMD_BLNK, osdatrmd,
- OSD_OSDATRMD);
-}
-
-static void _osd_set_rom_clut(struct osd_state *sd,
- enum osd_rom_clut rom_clut)
-{
- if (rom_clut == ROM_CLUT0)
- osd_clear(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
- else
- osd_set(sd, OSD_MISCCTL_RSEL, OSD_MISCCTL);
-}
-
-static void _osd_set_palette_map(struct osd_state *sd,
- enum osd_win_layer osdwin,
- unsigned char pixel_value,
- unsigned char clut_index,
- enum osd_pix_format pixfmt)
-{
- static const int map_2bpp[] = { 0, 5, 10, 15 };
- static const int map_1bpp[] = { 0, 15 };
- int bmp_offset;
- int bmp_shift;
- int bmp_mask;
- int bmp_reg;
-
- switch (pixfmt) {
- case PIXFMT_1BPP:
- bmp_reg = map_1bpp[pixel_value & 0x1];
- break;
- case PIXFMT_2BPP:
- bmp_reg = map_2bpp[pixel_value & 0x3];
- break;
- case PIXFMT_4BPP:
- bmp_reg = pixel_value & 0xf;
- break;
- default:
- return;
- }
-
- switch (osdwin) {
- case OSDWIN_OSD0:
- bmp_offset = OSD_W0BMP01 + (bmp_reg >> 1) * sizeof(u32);
- break;
- case OSDWIN_OSD1:
- bmp_offset = OSD_W1BMP01 + (bmp_reg >> 1) * sizeof(u32);
- break;
- default:
- return;
- }
-
- if (bmp_reg & 1) {
- bmp_shift = 8;
- bmp_mask = 0xff << 8;
- } else {
- bmp_shift = 0;
- bmp_mask = 0xff;
- }
-
- osd_modify(sd, bmp_mask, clut_index << bmp_shift, bmp_offset);
-}
-
-static void _osd_set_rec601_attenuation(struct osd_state *sd,
- enum osd_win_layer osdwin, int enable)
-{
- switch (osdwin) {
- case OSDWIN_OSD0:
- osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
- enable ? OSD_OSDWIN0MD_ATN0E : 0,
- OSD_OSDWIN0MD);
- if (sd->vpbe_type == VPBE_VERSION_1)
- osd_modify(sd, OSD_OSDWIN0MD_ATN0E,
- enable ? OSD_OSDWIN0MD_ATN0E : 0,
- OSD_OSDWIN0MD);
- else if ((sd->vpbe_type == VPBE_VERSION_3) ||
- (sd->vpbe_type == VPBE_VERSION_2))
- osd_modify(sd, OSD_EXTMODE_ATNOSD0EN,
- enable ? OSD_EXTMODE_ATNOSD0EN : 0,
- OSD_EXTMODE);
- break;
- case OSDWIN_OSD1:
- osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
- enable ? OSD_OSDWIN1MD_ATN1E : 0,
- OSD_OSDWIN1MD);
- if (sd->vpbe_type == VPBE_VERSION_1)
- osd_modify(sd, OSD_OSDWIN1MD_ATN1E,
- enable ? OSD_OSDWIN1MD_ATN1E : 0,
- OSD_OSDWIN1MD);
- else if ((sd->vpbe_type == VPBE_VERSION_3) ||
- (sd->vpbe_type == VPBE_VERSION_2))
- osd_modify(sd, OSD_EXTMODE_ATNOSD1EN,
- enable ? OSD_EXTMODE_ATNOSD1EN : 0,
- OSD_EXTMODE);
- break;
- }
-}
-
-static void _osd_set_blending_factor(struct osd_state *sd,
- enum osd_win_layer osdwin,
- enum osd_blending_factor blend)
-{
- switch (osdwin) {
- case OSDWIN_OSD0:
- osd_modify(sd, OSD_OSDWIN0MD_BLND0,
- blend << OSD_OSDWIN0MD_BLND0_SHIFT, OSD_OSDWIN0MD);
- break;
- case OSDWIN_OSD1:
- osd_modify(sd, OSD_OSDWIN1MD_BLND1,
- blend << OSD_OSDWIN1MD_BLND1_SHIFT, OSD_OSDWIN1MD);
- break;
- }
-}
-
-static void _osd_enable_rgb888_pixblend(struct osd_state *sd,
- enum osd_win_layer osdwin)
-{
-
- osd_modify(sd, OSD_MISCCTL_BLDSEL, 0, OSD_MISCCTL);
- switch (osdwin) {
- case OSDWIN_OSD0:
- osd_modify(sd, OSD_EXTMODE_OSD0BLDCHR,
- OSD_EXTMODE_OSD0BLDCHR, OSD_EXTMODE);
- break;
- case OSDWIN_OSD1:
- osd_modify(sd, OSD_EXTMODE_OSD1BLDCHR,
- OSD_EXTMODE_OSD1BLDCHR, OSD_EXTMODE);
- break;
- }
-}
-
-static void _osd_enable_color_key(struct osd_state *sd,
- enum osd_win_layer osdwin,
- unsigned colorkey,
- enum osd_pix_format pixfmt)
-{
- switch (pixfmt) {
- case PIXFMT_1BPP:
- case PIXFMT_2BPP:
- case PIXFMT_4BPP:
- case PIXFMT_8BPP:
- if (sd->vpbe_type == VPBE_VERSION_3) {
- switch (osdwin) {
- case OSDWIN_OSD0:
- osd_modify(sd, OSD_TRANSPBMPIDX_BMP0,
- colorkey <<
- OSD_TRANSPBMPIDX_BMP0_SHIFT,
- OSD_TRANSPBMPIDX);
- break;
- case OSDWIN_OSD1:
- osd_modify(sd, OSD_TRANSPBMPIDX_BMP1,
- colorkey <<
- OSD_TRANSPBMPIDX_BMP1_SHIFT,
- OSD_TRANSPBMPIDX);
- break;
- }
- }
- break;
- case PIXFMT_RGB565:
- if (sd->vpbe_type == VPBE_VERSION_1)
- osd_write(sd, colorkey & OSD_TRANSPVAL_RGBTRANS,
- OSD_TRANSPVAL);
- else if (sd->vpbe_type == VPBE_VERSION_3)
- osd_write(sd, colorkey & OSD_TRANSPVALL_RGBL,
- OSD_TRANSPVALL);
- break;
- case PIXFMT_YCBCRI:
- case PIXFMT_YCRCBI:
- if (sd->vpbe_type == VPBE_VERSION_3)
- osd_modify(sd, OSD_TRANSPVALU_Y, colorkey,
- OSD_TRANSPVALU);
- break;
- case PIXFMT_RGB888:
- if (sd->vpbe_type == VPBE_VERSION_3) {
- osd_write(sd, colorkey & OSD_TRANSPVALL_RGBL,
- OSD_TRANSPVALL);
- osd_modify(sd, OSD_TRANSPVALU_RGBU, colorkey >> 16,
- OSD_TRANSPVALU);
- }
- break;
- default:
- break;
- }
-
- switch (osdwin) {
- case OSDWIN_OSD0:
- osd_set(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
- break;
- case OSDWIN_OSD1:
- osd_set(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
- break;
- }
-}
-
-static void _osd_disable_color_key(struct osd_state *sd,
- enum osd_win_layer osdwin)
-{
- switch (osdwin) {
- case OSDWIN_OSD0:
- osd_clear(sd, OSD_OSDWIN0MD_TE0, OSD_OSDWIN0MD);
- break;
- case OSDWIN_OSD1:
- osd_clear(sd, OSD_OSDWIN1MD_TE1, OSD_OSDWIN1MD);
- break;
- }
-}
-
-static void _osd_set_osd_clut(struct osd_state *sd,
- enum osd_win_layer osdwin,
- enum osd_clut clut)
-{
- u32 winmd = 0;
-
- switch (osdwin) {
- case OSDWIN_OSD0:
- if (clut == RAM_CLUT)
- winmd |= OSD_OSDWIN0MD_CLUTS0;
- osd_modify(sd, OSD_OSDWIN0MD_CLUTS0, winmd, OSD_OSDWIN0MD);
- break;
- case OSDWIN_OSD1:
- if (clut == RAM_CLUT)
- winmd |= OSD_OSDWIN1MD_CLUTS1;
- osd_modify(sd, OSD_OSDWIN1MD_CLUTS1, winmd, OSD_OSDWIN1MD);
- break;
- }
-}
-
-static void _osd_set_zoom(struct osd_state *sd, enum osd_layer layer,
- enum osd_zoom_factor h_zoom,
- enum osd_zoom_factor v_zoom)
-{
- u32 winmd = 0;
-
- switch (layer) {
- case WIN_OSD0:
- winmd |= (h_zoom << OSD_OSDWIN0MD_OHZ0_SHIFT);
- winmd |= (v_zoom << OSD_OSDWIN0MD_OVZ0_SHIFT);
- osd_modify(sd, OSD_OSDWIN0MD_OHZ0 | OSD_OSDWIN0MD_OVZ0, winmd,
- OSD_OSDWIN0MD);
- break;
- case WIN_VID0:
- winmd |= (h_zoom << OSD_VIDWINMD_VHZ0_SHIFT);
- winmd |= (v_zoom << OSD_VIDWINMD_VVZ0_SHIFT);
- osd_modify(sd, OSD_VIDWINMD_VHZ0 | OSD_VIDWINMD_VVZ0, winmd,
- OSD_VIDWINMD);
- break;
- case WIN_OSD1:
- winmd |= (h_zoom << OSD_OSDWIN1MD_OHZ1_SHIFT);
- winmd |= (v_zoom << OSD_OSDWIN1MD_OVZ1_SHIFT);
- osd_modify(sd, OSD_OSDWIN1MD_OHZ1 | OSD_OSDWIN1MD_OVZ1, winmd,
- OSD_OSDWIN1MD);
- break;
- case WIN_VID1:
- winmd |= (h_zoom << OSD_VIDWINMD_VHZ1_SHIFT);
- winmd |= (v_zoom << OSD_VIDWINMD_VVZ1_SHIFT);
- osd_modify(sd, OSD_VIDWINMD_VHZ1 | OSD_VIDWINMD_VVZ1, winmd,
- OSD_VIDWINMD);
- break;
- }
-}
-
-static void _osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
-{
- switch (layer) {
- case WIN_OSD0:
- osd_clear(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
- break;
- case WIN_VID0:
- osd_clear(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
- break;
- case WIN_OSD1:
- /* disable attribute mode as well as disabling the window */
- osd_clear(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
- OSD_OSDWIN1MD);
- break;
- case WIN_VID1:
- osd_clear(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
- break;
- }
-}
-
-static void osd_disable_layer(struct osd_state *sd, enum osd_layer layer)
-{
- struct osd_state *osd = sd;
- struct osd_window_state *win = &osd->win[layer];
- unsigned long flags;
-
- spin_lock_irqsave(&osd->lock, flags);
-
- if (!win->is_enabled) {
- spin_unlock_irqrestore(&osd->lock, flags);
- return;
- }
- win->is_enabled = 0;
-
- _osd_disable_layer(sd, layer);
-
- spin_unlock_irqrestore(&osd->lock, flags);
-}
-
-static void _osd_enable_attribute_mode(struct osd_state *sd)
-{
- /* enable attribute mode for OSD1 */
- osd_set(sd, OSD_OSDWIN1MD_OASW, OSD_OSDWIN1MD);
-}
-
-static void _osd_enable_layer(struct osd_state *sd, enum osd_layer layer)
-{
- switch (layer) {
- case WIN_OSD0:
- osd_set(sd, OSD_OSDWIN0MD_OACT0, OSD_OSDWIN0MD);
- break;
- case WIN_VID0:
- osd_set(sd, OSD_VIDWINMD_ACT0, OSD_VIDWINMD);
- break;
- case WIN_OSD1:
- /* enable OSD1 and disable attribute mode */
- osd_modify(sd, OSD_OSDWIN1MD_OASW | OSD_OSDWIN1MD_OACT1,
- OSD_OSDWIN1MD_OACT1, OSD_OSDWIN1MD);
- break;
- case WIN_VID1:
- osd_set(sd, OSD_VIDWINMD_ACT1, OSD_VIDWINMD);
- break;
- }
-}
-
-static int osd_enable_layer(struct osd_state *sd, enum osd_layer layer,
- int otherwin)
-{
- struct osd_state *osd = sd;
- struct osd_window_state *win = &osd->win[layer];
- struct osd_layer_config *cfg = &win->lconfig;
- unsigned long flags;
-
- spin_lock_irqsave(&osd->lock, flags);
-
- /*
- * use otherwin flag to know this is the other vid window
- * in YUV420 mode, if is, skip this check
- */
- if (!otherwin && (!win->is_allocated ||
- !win->fb_base_phys ||
- !cfg->line_length ||
- !cfg->xsize ||
- !cfg->ysize)) {
- spin_unlock_irqrestore(&osd->lock, flags);
- return -1;
- }
-
- if (win->is_enabled) {
- spin_unlock_irqrestore(&osd->lock, flags);
- return 0;
- }
- win->is_enabled = 1;
-
- if (cfg->pixfmt != PIXFMT_OSD_ATTR)
- _osd_enable_layer(sd, layer);
- else {
- _osd_enable_attribute_mode(sd);
- _osd_set_blink_attribute(sd, osd->is_blinking, osd->blink);
- }
-
- spin_unlock_irqrestore(&osd->lock, flags);
-
- return 0;
-}
-
-#define OSD_SRC_ADDR_HIGH4 0x7800000
-#define OSD_SRC_ADDR_HIGH7 0x7F0000
-#define OSD_SRCADD_OFSET_SFT 23
-#define OSD_SRCADD_ADD_SFT 16
-#define OSD_WINADL_MASK 0xFFFF
-#define OSD_WINOFST_MASK 0x1000
-#define VPBE_REG_BASE 0x80000000
-
-static void _osd_start_layer(struct osd_state *sd, enum osd_layer layer,
- unsigned long fb_base_phys,
- unsigned long cbcr_ofst)
-{
-
- if (sd->vpbe_type == VPBE_VERSION_1) {
- switch (layer) {
- case WIN_OSD0:
- osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN0ADR);
- break;
- case WIN_VID0:
- osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN0ADR);
- break;
- case WIN_OSD1:
- osd_write(sd, fb_base_phys & ~0x1F, OSD_OSDWIN1ADR);
- break;
- case WIN_VID1:
- osd_write(sd, fb_base_phys & ~0x1F, OSD_VIDWIN1ADR);
- break;
- }
- } else if (sd->vpbe_type == VPBE_VERSION_3) {
- unsigned long fb_offset_32 =
- (fb_base_phys - VPBE_REG_BASE) >> 5;
-
- switch (layer) {
- case WIN_OSD0:
- osd_modify(sd, OSD_OSDWINADH_O0AH,
- fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
- OSD_OSDWINADH_O0AH_SHIFT),
- OSD_OSDWINADH);
- osd_write(sd, fb_offset_32 & OSD_OSDWIN0ADL_O0AL,
- OSD_OSDWIN0ADL);
- break;
- case WIN_VID0:
- osd_modify(sd, OSD_VIDWINADH_V0AH,
- fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
- OSD_VIDWINADH_V0AH_SHIFT),
- OSD_VIDWINADH);
- osd_write(sd, fb_offset_32 & OSD_VIDWIN0ADL_V0AL,
- OSD_VIDWIN0ADL);
- break;
- case WIN_OSD1:
- osd_modify(sd, OSD_OSDWINADH_O1AH,
- fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
- OSD_OSDWINADH_O1AH_SHIFT),
- OSD_OSDWINADH);
- osd_write(sd, fb_offset_32 & OSD_OSDWIN1ADL_O1AL,
- OSD_OSDWIN1ADL);
- break;
- case WIN_VID1:
- osd_modify(sd, OSD_VIDWINADH_V1AH,
- fb_offset_32 >> (OSD_SRCADD_ADD_SFT -
- OSD_VIDWINADH_V1AH_SHIFT),
- OSD_VIDWINADH);
- osd_write(sd, fb_offset_32 & OSD_VIDWIN1ADL_V1AL,
- OSD_VIDWIN1ADL);
- break;
- }
- } else if (sd->vpbe_type == VPBE_VERSION_2) {
- struct osd_window_state *win = &sd->win[layer];
- unsigned long fb_offset_32, cbcr_offset_32;
-
- fb_offset_32 = fb_base_phys - VPBE_REG_BASE;
- if (cbcr_ofst)
- cbcr_offset_32 = cbcr_ofst;
- else
- cbcr_offset_32 = win->lconfig.line_length *
- win->lconfig.ysize;
- cbcr_offset_32 += fb_offset_32;
- fb_offset_32 = fb_offset_32 >> 5;
- cbcr_offset_32 = cbcr_offset_32 >> 5;
- /*
- * DM365: start address is 27-bit long address b26 - b23 are
- * in offset register b12 - b9, and * bit 26 has to be '1'
- */
- if (win->lconfig.pixfmt == PIXFMT_NV12) {
- switch (layer) {
- case WIN_VID0:
- case WIN_VID1:
- /* Y is in VID0 */
- osd_modify(sd, OSD_VIDWIN0OFST_V0AH,
- ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
- (OSD_SRCADD_OFSET_SFT -
- OSD_WINOFST_AH_SHIFT)) |
- OSD_WINOFST_MASK, OSD_VIDWIN0OFST);
- osd_modify(sd, OSD_VIDWINADH_V0AH,
- (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
- (OSD_SRCADD_ADD_SFT -
- OSD_VIDWINADH_V0AH_SHIFT),
- OSD_VIDWINADH);
- osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
- OSD_VIDWIN0ADL);
- /* CbCr is in VID1 */
- osd_modify(sd, OSD_VIDWIN1OFST_V1AH,
- ((cbcr_offset_32 &
- OSD_SRC_ADDR_HIGH4) >>
- (OSD_SRCADD_OFSET_SFT -
- OSD_WINOFST_AH_SHIFT)) |
- OSD_WINOFST_MASK, OSD_VIDWIN1OFST);
- osd_modify(sd, OSD_VIDWINADH_V1AH,
- (cbcr_offset_32 &
- OSD_SRC_ADDR_HIGH7) >>
- (OSD_SRCADD_ADD_SFT -
- OSD_VIDWINADH_V1AH_SHIFT),
- OSD_VIDWINADH);
- osd_write(sd, cbcr_offset_32 & OSD_WINADL_MASK,
- OSD_VIDWIN1ADL);
- break;
- default:
- break;
- }
- }
-
- switch (layer) {
- case WIN_OSD0:
- osd_modify(sd, OSD_OSDWIN0OFST_O0AH,
- ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
- (OSD_SRCADD_OFSET_SFT -
- OSD_WINOFST_AH_SHIFT)) | OSD_WINOFST_MASK,
- OSD_OSDWIN0OFST);
- osd_modify(sd, OSD_OSDWINADH_O0AH,
- (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
- (OSD_SRCADD_ADD_SFT -
- OSD_OSDWINADH_O0AH_SHIFT), OSD_OSDWINADH);
- osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
- OSD_OSDWIN0ADL);
- break;
- case WIN_VID0:
- if (win->lconfig.pixfmt != PIXFMT_NV12) {
- osd_modify(sd, OSD_VIDWIN0OFST_V0AH,
- ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
- (OSD_SRCADD_OFSET_SFT -
- OSD_WINOFST_AH_SHIFT)) |
- OSD_WINOFST_MASK, OSD_VIDWIN0OFST);
- osd_modify(sd, OSD_VIDWINADH_V0AH,
- (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
- (OSD_SRCADD_ADD_SFT -
- OSD_VIDWINADH_V0AH_SHIFT),
- OSD_VIDWINADH);
- osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
- OSD_VIDWIN0ADL);
- }
- break;
- case WIN_OSD1:
- osd_modify(sd, OSD_OSDWIN1OFST_O1AH,
- ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
- (OSD_SRCADD_OFSET_SFT -
- OSD_WINOFST_AH_SHIFT)) | OSD_WINOFST_MASK,
- OSD_OSDWIN1OFST);
- osd_modify(sd, OSD_OSDWINADH_O1AH,
- (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
- (OSD_SRCADD_ADD_SFT -
- OSD_OSDWINADH_O1AH_SHIFT),
- OSD_OSDWINADH);
- osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
- OSD_OSDWIN1ADL);
- break;
- case WIN_VID1:
- if (win->lconfig.pixfmt != PIXFMT_NV12) {
- osd_modify(sd, OSD_VIDWIN1OFST_V1AH,
- ((fb_offset_32 & OSD_SRC_ADDR_HIGH4) >>
- (OSD_SRCADD_OFSET_SFT -
- OSD_WINOFST_AH_SHIFT)) |
- OSD_WINOFST_MASK, OSD_VIDWIN1OFST);
- osd_modify(sd, OSD_VIDWINADH_V1AH,
- (fb_offset_32 & OSD_SRC_ADDR_HIGH7) >>
- (OSD_SRCADD_ADD_SFT -
- OSD_VIDWINADH_V1AH_SHIFT),
- OSD_VIDWINADH);
- osd_write(sd, fb_offset_32 & OSD_WINADL_MASK,
- OSD_VIDWIN1ADL);
- }
- break;
- }
- }
-}
-
-static void osd_start_layer(struct osd_state *sd, enum osd_layer layer,
- unsigned long fb_base_phys,
- unsigned long cbcr_ofst)
-{
- struct osd_state *osd = sd;
- struct osd_window_state *win = &osd->win[layer];
- struct osd_layer_config *cfg = &win->lconfig;
- unsigned long flags;
-
- spin_lock_irqsave(&osd->lock, flags);
-
- win->fb_base_phys = fb_base_phys & ~0x1F;
- _osd_start_layer(sd, layer, fb_base_phys, cbcr_ofst);
-
- if (layer == WIN_VID0) {
- osd->pingpong =
- _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
- win->fb_base_phys,
- cfg);
- }
-
- spin_unlock_irqrestore(&osd->lock, flags);
-}
-
-static void osd_get_layer_config(struct osd_state *sd, enum osd_layer layer,
- struct osd_layer_config *lconfig)
-{
- struct osd_state *osd = sd;
- struct osd_window_state *win = &osd->win[layer];
- unsigned long flags;
-
- spin_lock_irqsave(&osd->lock, flags);
-
- *lconfig = win->lconfig;
-
- spin_unlock_irqrestore(&osd->lock, flags);
-}
-
-/**
- * try_layer_config() - Try a specific configuration for the layer
- * @sd: ptr to struct osd_state
- * @layer: layer to configure
- * @lconfig: layer configuration to try
- *
- * If the requested lconfig is completely rejected and the value of lconfig on
- * exit is the current lconfig, then try_layer_config() returns 1. Otherwise,
- * try_layer_config() returns 0. A return value of 0 does not necessarily mean
- * that the value of lconfig on exit is identical to the value of lconfig on
- * entry, but merely that it represents a change from the current lconfig.
- */
-static int try_layer_config(struct osd_state *sd, enum osd_layer layer,
- struct osd_layer_config *lconfig)
-{
- struct osd_state *osd = sd;
- struct osd_window_state *win = &osd->win[layer];
- int bad_config = 0;
-
- /* verify that the pixel format is compatible with the layer */
- switch (lconfig->pixfmt) {
- case PIXFMT_1BPP:
- case PIXFMT_2BPP:
- case PIXFMT_4BPP:
- case PIXFMT_8BPP:
- case PIXFMT_RGB565:
- if (osd->vpbe_type == VPBE_VERSION_1)
- bad_config = !is_vid_win(layer);
- break;
- case PIXFMT_YCBCRI:
- case PIXFMT_YCRCBI:
- bad_config = !is_vid_win(layer);
- break;
- case PIXFMT_RGB888:
- if (osd->vpbe_type == VPBE_VERSION_1)
- bad_config = !is_vid_win(layer);
- else if ((osd->vpbe_type == VPBE_VERSION_3) ||
- (osd->vpbe_type == VPBE_VERSION_2))
- bad_config = !is_osd_win(layer);
- break;
- case PIXFMT_NV12:
- if (osd->vpbe_type != VPBE_VERSION_2)
- bad_config = 1;
- else
- bad_config = is_osd_win(layer);
- break;
- case PIXFMT_OSD_ATTR:
- bad_config = (layer != WIN_OSD1);
- break;
- default:
- bad_config = 1;
- break;
- }
- if (bad_config) {
- /*
- * The requested pixel format is incompatible with the layer,
- * so keep the current layer configuration.
- */
- *lconfig = win->lconfig;
- return bad_config;
- }
-
- /* DM6446: */
- /* only one OSD window at a time can use RGB pixel formats */
- if ((osd->vpbe_type == VPBE_VERSION_1) &&
- is_osd_win(layer) && is_rgb_pixfmt(lconfig->pixfmt)) {
- enum osd_pix_format pixfmt;
-
- if (layer == WIN_OSD0)
- pixfmt = osd->win[WIN_OSD1].lconfig.pixfmt;
- else
- pixfmt = osd->win[WIN_OSD0].lconfig.pixfmt;
-
- if (is_rgb_pixfmt(pixfmt)) {
- /*
- * The other OSD window is already configured for an
- * RGB, so keep the current layer configuration.
- */
- *lconfig = win->lconfig;
- return 1;
- }
- }
-
- /* DM6446: only one video window at a time can use RGB888 */
- if ((osd->vpbe_type == VPBE_VERSION_1) && is_vid_win(layer) &&
- lconfig->pixfmt == PIXFMT_RGB888) {
- enum osd_pix_format pixfmt;
-
- if (layer == WIN_VID0)
- pixfmt = osd->win[WIN_VID1].lconfig.pixfmt;
- else
- pixfmt = osd->win[WIN_VID0].lconfig.pixfmt;
-
- if (pixfmt == PIXFMT_RGB888) {
- /*
- * The other video window is already configured for
- * RGB888, so keep the current layer configuration.
- */
- *lconfig = win->lconfig;
- return 1;
- }
- }
-
- /* window dimensions must be non-zero */
- if (!lconfig->line_length || !lconfig->xsize || !lconfig->ysize) {
- *lconfig = win->lconfig;
- return 1;
- }
-
- /* round line_length up to a multiple of 32 */
- lconfig->line_length = ((lconfig->line_length + 31) / 32) * 32;
- lconfig->line_length =
- min(lconfig->line_length, (unsigned)MAX_LINE_LENGTH);
- lconfig->xsize = min(lconfig->xsize, (unsigned)MAX_WIN_SIZE);
- lconfig->ysize = min(lconfig->ysize, (unsigned)MAX_WIN_SIZE);
- lconfig->xpos = min(lconfig->xpos, (unsigned)MAX_WIN_SIZE);
- lconfig->ypos = min(lconfig->ypos, (unsigned)MAX_WIN_SIZE);
- lconfig->interlaced = (lconfig->interlaced != 0);
- if (lconfig->interlaced) {
- /* ysize and ypos must be even for interlaced displays */
- lconfig->ysize &= ~1;
- lconfig->ypos &= ~1;
- }
-
- return 0;
-}
-
-static void _osd_disable_vid_rgb888(struct osd_state *sd)
-{
- /*
- * The DM6446 supports RGB888 pixel format in a single video window.
- * This routine disables RGB888 pixel format for both video windows.
- * The caller must ensure that neither video window is currently
- * configured for RGB888 pixel format.
- */
- if (sd->vpbe_type == VPBE_VERSION_1)
- osd_clear(sd, OSD_MISCCTL_RGBEN, OSD_MISCCTL);
-}
-
-static void _osd_enable_vid_rgb888(struct osd_state *sd,
- enum osd_layer layer)
-{
- /*
- * The DM6446 supports RGB888 pixel format in a single video window.
- * This routine enables RGB888 pixel format for the specified video
- * window. The caller must ensure that the other video window is not
- * currently configured for RGB888 pixel format, as this routine will
- * disable RGB888 pixel format for the other window.
- */
- if (sd->vpbe_type == VPBE_VERSION_1) {
- if (layer == WIN_VID0)
- osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
- OSD_MISCCTL_RGBEN, OSD_MISCCTL);
- else if (layer == WIN_VID1)
- osd_modify(sd, OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
- OSD_MISCCTL_RGBEN | OSD_MISCCTL_RGBWIN,
- OSD_MISCCTL);
- }
-}
-
-static void _osd_set_cbcr_order(struct osd_state *sd,
- enum osd_pix_format pixfmt)
-{
- /*
- * The caller must ensure that all windows using YC pixfmt use the same
- * Cb/Cr order.
- */
- if (pixfmt == PIXFMT_YCBCRI)
- osd_clear(sd, OSD_MODE_CS, OSD_MODE);
- else if (pixfmt == PIXFMT_YCRCBI)
- osd_set(sd, OSD_MODE_CS, OSD_MODE);
-}
-
-static void _osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
- const struct osd_layer_config *lconfig)
-{
- u32 winmd = 0, winmd_mask = 0, bmw = 0;
-
- _osd_set_cbcr_order(sd, lconfig->pixfmt);
-
- switch (layer) {
- case WIN_OSD0:
- if (sd->vpbe_type == VPBE_VERSION_1) {
- winmd_mask |= OSD_OSDWIN0MD_RGB0E;
- if (lconfig->pixfmt == PIXFMT_RGB565)
- winmd |= OSD_OSDWIN0MD_RGB0E;
- } else if ((sd->vpbe_type == VPBE_VERSION_3) ||
- (sd->vpbe_type == VPBE_VERSION_2)) {
- winmd_mask |= OSD_OSDWIN0MD_BMP0MD;
- switch (lconfig->pixfmt) {
- case PIXFMT_RGB565:
- winmd |= (1 <<
- OSD_OSDWIN0MD_BMP0MD_SHIFT);
- break;
- case PIXFMT_RGB888:
- winmd |= (2 << OSD_OSDWIN0MD_BMP0MD_SHIFT);
- _osd_enable_rgb888_pixblend(sd, OSDWIN_OSD0);
- break;
- case PIXFMT_YCBCRI:
- case PIXFMT_YCRCBI:
- winmd |= (3 << OSD_OSDWIN0MD_BMP0MD_SHIFT);
- break;
- default:
- break;
- }
- }
-
- winmd_mask |= OSD_OSDWIN0MD_BMW0 | OSD_OSDWIN0MD_OFF0;
-
- switch (lconfig->pixfmt) {
- case PIXFMT_1BPP:
- bmw = 0;
- break;
- case PIXFMT_2BPP:
- bmw = 1;
- break;
- case PIXFMT_4BPP:
- bmw = 2;
- break;
- case PIXFMT_8BPP:
- bmw = 3;
- break;
- default:
- break;
- }
- winmd |= (bmw << OSD_OSDWIN0MD_BMW0_SHIFT);
-
- if (lconfig->interlaced)
- winmd |= OSD_OSDWIN0MD_OFF0;
-
- osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN0MD);
- osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN0OFST);
- osd_write(sd, lconfig->xpos, OSD_OSDWIN0XP);
- osd_write(sd, lconfig->xsize, OSD_OSDWIN0XL);
- if (lconfig->interlaced) {
- osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN0YP);
- osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN0YL);
- } else {
- osd_write(sd, lconfig->ypos, OSD_OSDWIN0YP);
- osd_write(sd, lconfig->ysize, OSD_OSDWIN0YL);
- }
- break;
- case WIN_VID0:
- winmd_mask |= OSD_VIDWINMD_VFF0;
- if (lconfig->interlaced)
- winmd |= OSD_VIDWINMD_VFF0;
-
- osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
- osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN0OFST);
- osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
- osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
- /*
- * For YUV420P format the register contents are
- * duplicated in both VID registers
- */
- if ((sd->vpbe_type == VPBE_VERSION_2) &&
- (lconfig->pixfmt == PIXFMT_NV12)) {
- /* other window also */
- if (lconfig->interlaced) {
- winmd_mask |= OSD_VIDWINMD_VFF1;
- winmd |= OSD_VIDWINMD_VFF1;
- osd_modify(sd, winmd_mask, winmd,
- OSD_VIDWINMD);
- }
-
- osd_modify(sd, OSD_MISCCTL_S420D,
- OSD_MISCCTL_S420D, OSD_MISCCTL);
- osd_write(sd, lconfig->line_length >> 5,
- OSD_VIDWIN1OFST);
- osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
- osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
- /*
- * if NV21 pixfmt and line length not 32B
- * aligned (e.g. NTSC), Need to set window
- * X pixel size to be 32B aligned as well
- */
- if (lconfig->xsize % 32) {
- osd_write(sd,
- ((lconfig->xsize + 31) & ~31),
- OSD_VIDWIN1XL);
- osd_write(sd,
- ((lconfig->xsize + 31) & ~31),
- OSD_VIDWIN0XL);
- }
- } else if ((sd->vpbe_type == VPBE_VERSION_2) &&
- (lconfig->pixfmt != PIXFMT_NV12)) {
- osd_modify(sd, OSD_MISCCTL_S420D, ~OSD_MISCCTL_S420D,
- OSD_MISCCTL);
- }
-
- if (lconfig->interlaced) {
- osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN0YP);
- osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN0YL);
- if ((sd->vpbe_type == VPBE_VERSION_2) &&
- lconfig->pixfmt == PIXFMT_NV12) {
- osd_write(sd, lconfig->ypos >> 1,
- OSD_VIDWIN1YP);
- osd_write(sd, lconfig->ysize >> 1,
- OSD_VIDWIN1YL);
- }
- } else {
- osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
- osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
- if ((sd->vpbe_type == VPBE_VERSION_2) &&
- lconfig->pixfmt == PIXFMT_NV12) {
- osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
- osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
- }
- }
- break;
- case WIN_OSD1:
- /*
- * The caller must ensure that OSD1 is disabled prior to
- * switching from a normal mode to attribute mode or from
- * attribute mode to a normal mode.
- */
- if (lconfig->pixfmt == PIXFMT_OSD_ATTR) {
- if (sd->vpbe_type == VPBE_VERSION_1) {
- winmd_mask |= OSD_OSDWIN1MD_ATN1E |
- OSD_OSDWIN1MD_RGB1E | OSD_OSDWIN1MD_CLUTS1 |
- OSD_OSDWIN1MD_BLND1 | OSD_OSDWIN1MD_TE1;
- } else {
- winmd_mask |= OSD_OSDWIN1MD_BMP1MD |
- OSD_OSDWIN1MD_CLUTS1 | OSD_OSDWIN1MD_BLND1 |
- OSD_OSDWIN1MD_TE1;
- }
- } else {
- if (sd->vpbe_type == VPBE_VERSION_1) {
- winmd_mask |= OSD_OSDWIN1MD_RGB1E;
- if (lconfig->pixfmt == PIXFMT_RGB565)
- winmd |= OSD_OSDWIN1MD_RGB1E;
- } else if ((sd->vpbe_type == VPBE_VERSION_3)
- || (sd->vpbe_type == VPBE_VERSION_2)) {
- winmd_mask |= OSD_OSDWIN1MD_BMP1MD;
- switch (lconfig->pixfmt) {
- case PIXFMT_RGB565:
- winmd |=
- (1 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
- break;
- case PIXFMT_RGB888:
- winmd |=
- (2 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
- _osd_enable_rgb888_pixblend(sd,
- OSDWIN_OSD1);
- break;
- case PIXFMT_YCBCRI:
- case PIXFMT_YCRCBI:
- winmd |=
- (3 << OSD_OSDWIN1MD_BMP1MD_SHIFT);
- break;
- default:
- break;
- }
- }
-
- winmd_mask |= OSD_OSDWIN1MD_BMW1;
- switch (lconfig->pixfmt) {
- case PIXFMT_1BPP:
- bmw = 0;
- break;
- case PIXFMT_2BPP:
- bmw = 1;
- break;
- case PIXFMT_4BPP:
- bmw = 2;
- break;
- case PIXFMT_8BPP:
- bmw = 3;
- break;
- default:
- break;
- }
- winmd |= (bmw << OSD_OSDWIN1MD_BMW1_SHIFT);
- }
-
- winmd_mask |= OSD_OSDWIN1MD_OFF1;
- if (lconfig->interlaced)
- winmd |= OSD_OSDWIN1MD_OFF1;
-
- osd_modify(sd, winmd_mask, winmd, OSD_OSDWIN1MD);
- osd_write(sd, lconfig->line_length >> 5, OSD_OSDWIN1OFST);
- osd_write(sd, lconfig->xpos, OSD_OSDWIN1XP);
- osd_write(sd, lconfig->xsize, OSD_OSDWIN1XL);
- if (lconfig->interlaced) {
- osd_write(sd, lconfig->ypos >> 1, OSD_OSDWIN1YP);
- osd_write(sd, lconfig->ysize >> 1, OSD_OSDWIN1YL);
- } else {
- osd_write(sd, lconfig->ypos, OSD_OSDWIN1YP);
- osd_write(sd, lconfig->ysize, OSD_OSDWIN1YL);
- }
- break;
- case WIN_VID1:
- winmd_mask |= OSD_VIDWINMD_VFF1;
- if (lconfig->interlaced)
- winmd |= OSD_VIDWINMD_VFF1;
-
- osd_modify(sd, winmd_mask, winmd, OSD_VIDWINMD);
- osd_write(sd, lconfig->line_length >> 5, OSD_VIDWIN1OFST);
- osd_write(sd, lconfig->xpos, OSD_VIDWIN1XP);
- osd_write(sd, lconfig->xsize, OSD_VIDWIN1XL);
- /*
- * For YUV420P format the register contents are
- * duplicated in both VID registers
- */
- if (sd->vpbe_type == VPBE_VERSION_2) {
- if (lconfig->pixfmt == PIXFMT_NV12) {
- /* other window also */
- if (lconfig->interlaced) {
- winmd_mask |= OSD_VIDWINMD_VFF0;
- winmd |= OSD_VIDWINMD_VFF0;
- osd_modify(sd, winmd_mask, winmd,
- OSD_VIDWINMD);
- }
- osd_modify(sd, OSD_MISCCTL_S420D,
- OSD_MISCCTL_S420D, OSD_MISCCTL);
- osd_write(sd, lconfig->line_length >> 5,
- OSD_VIDWIN0OFST);
- osd_write(sd, lconfig->xpos, OSD_VIDWIN0XP);
- osd_write(sd, lconfig->xsize, OSD_VIDWIN0XL);
- } else {
- osd_modify(sd, OSD_MISCCTL_S420D,
- ~OSD_MISCCTL_S420D, OSD_MISCCTL);
- }
- }
-
- if (lconfig->interlaced) {
- osd_write(sd, lconfig->ypos >> 1, OSD_VIDWIN1YP);
- osd_write(sd, lconfig->ysize >> 1, OSD_VIDWIN1YL);
- if ((sd->vpbe_type == VPBE_VERSION_2) &&
- lconfig->pixfmt == PIXFMT_NV12) {
- osd_write(sd, lconfig->ypos >> 1,
- OSD_VIDWIN0YP);
- osd_write(sd, lconfig->ysize >> 1,
- OSD_VIDWIN0YL);
- }
- } else {
- osd_write(sd, lconfig->ypos, OSD_VIDWIN1YP);
- osd_write(sd, lconfig->ysize, OSD_VIDWIN1YL);
- if ((sd->vpbe_type == VPBE_VERSION_2) &&
- lconfig->pixfmt == PIXFMT_NV12) {
- osd_write(sd, lconfig->ypos, OSD_VIDWIN0YP);
- osd_write(sd, lconfig->ysize, OSD_VIDWIN0YL);
- }
- }
- break;
- }
-}
-
-static int osd_set_layer_config(struct osd_state *sd, enum osd_layer layer,
- struct osd_layer_config *lconfig)
-{
- struct osd_state *osd = sd;
- struct osd_window_state *win = &osd->win[layer];
- struct osd_layer_config *cfg = &win->lconfig;
- unsigned long flags;
- int reject_config;
-
- spin_lock_irqsave(&osd->lock, flags);
-
- reject_config = try_layer_config(sd, layer, lconfig);
- if (reject_config) {
- spin_unlock_irqrestore(&osd->lock, flags);
- return reject_config;
- }
-
- /* update the current Cb/Cr order */
- if (is_yc_pixfmt(lconfig->pixfmt))
- osd->yc_pixfmt = lconfig->pixfmt;
-
- /*
- * If we are switching OSD1 from normal mode to attribute mode or from
- * attribute mode to normal mode, then we must disable the window.
- */
- if (layer == WIN_OSD1) {
- if (((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
- (cfg->pixfmt != PIXFMT_OSD_ATTR)) ||
- ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
- (cfg->pixfmt == PIXFMT_OSD_ATTR))) {
- win->is_enabled = 0;
- _osd_disable_layer(sd, layer);
- }
- }
-
- _osd_set_layer_config(sd, layer, lconfig);
-
- if (layer == WIN_OSD1) {
- struct osd_osdwin_state *osdwin_state =
- &osd->osdwin[OSDWIN_OSD1];
-
- if ((lconfig->pixfmt != PIXFMT_OSD_ATTR) &&
- (cfg->pixfmt == PIXFMT_OSD_ATTR)) {
- /*
- * We just switched OSD1 from attribute mode to normal
- * mode, so we must initialize the CLUT select, the
- * blend factor, transparency colorkey enable, and
- * attenuation enable (DM6446 only) bits in the
- * OSDWIN1MD register.
- */
- _osd_set_osd_clut(sd, OSDWIN_OSD1,
- osdwin_state->clut);
- _osd_set_blending_factor(sd, OSDWIN_OSD1,
- osdwin_state->blend);
- if (osdwin_state->colorkey_blending) {
- _osd_enable_color_key(sd, OSDWIN_OSD1,
- osdwin_state->
- colorkey,
- lconfig->pixfmt);
- } else
- _osd_disable_color_key(sd, OSDWIN_OSD1);
- _osd_set_rec601_attenuation(sd, OSDWIN_OSD1,
- osdwin_state->
- rec601_attenuation);
- } else if ((lconfig->pixfmt == PIXFMT_OSD_ATTR) &&
- (cfg->pixfmt != PIXFMT_OSD_ATTR)) {
- /*
- * We just switched OSD1 from normal mode to attribute
- * mode, so we must initialize the blink enable and
- * blink interval bits in the OSDATRMD register.
- */
- _osd_set_blink_attribute(sd, osd->is_blinking,
- osd->blink);
- }
- }
-
- /*
- * If we just switched to a 1-, 2-, or 4-bits-per-pixel bitmap format
- * then configure a default palette map.
- */
- if ((lconfig->pixfmt != cfg->pixfmt) &&
- ((lconfig->pixfmt == PIXFMT_1BPP) ||
- (lconfig->pixfmt == PIXFMT_2BPP) ||
- (lconfig->pixfmt == PIXFMT_4BPP))) {
- enum osd_win_layer osdwin =
- ((layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1);
- struct osd_osdwin_state *osdwin_state =
- &osd->osdwin[osdwin];
- unsigned char clut_index;
- unsigned char clut_entries = 0;
-
- switch (lconfig->pixfmt) {
- case PIXFMT_1BPP:
- clut_entries = 2;
- break;
- case PIXFMT_2BPP:
- clut_entries = 4;
- break;
- case PIXFMT_4BPP:
- clut_entries = 16;
- break;
- default:
- break;
- }
- /*
- * The default palette map maps the pixel value to the clut
- * index, i.e. pixel value 0 maps to clut entry 0, pixel value
- * 1 maps to clut entry 1, etc.
- */
- for (clut_index = 0; clut_index < 16; clut_index++) {
- osdwin_state->palette_map[clut_index] = clut_index;
- if (clut_index < clut_entries) {
- _osd_set_palette_map(sd, osdwin, clut_index,
- clut_index,
- lconfig->pixfmt);
- }
- }
- }
-
- *cfg = *lconfig;
- /* DM6446: configure the RGB888 enable and window selection */
- if (osd->win[WIN_VID0].lconfig.pixfmt == PIXFMT_RGB888)
- _osd_enable_vid_rgb888(sd, WIN_VID0);
- else if (osd->win[WIN_VID1].lconfig.pixfmt == PIXFMT_RGB888)
- _osd_enable_vid_rgb888(sd, WIN_VID1);
- else
- _osd_disable_vid_rgb888(sd);
-
- if (layer == WIN_VID0) {
- osd->pingpong =
- _osd_dm6446_vid0_pingpong(sd, osd->field_inversion,
- win->fb_base_phys,
- cfg);
- }
-
- spin_unlock_irqrestore(&osd->lock, flags);
-
- return 0;
-}
-
-static void osd_init_layer(struct osd_state *sd, enum osd_layer layer)
-{
- struct osd_state *osd = sd;
- struct osd_window_state *win = &osd->win[layer];
- enum osd_win_layer osdwin;
- struct osd_osdwin_state *osdwin_state;
- struct osd_layer_config *cfg = &win->lconfig;
- unsigned long flags;
-
- spin_lock_irqsave(&osd->lock, flags);
-
- win->is_enabled = 0;
- _osd_disable_layer(sd, layer);
-
- win->h_zoom = ZOOM_X1;
- win->v_zoom = ZOOM_X1;
- _osd_set_zoom(sd, layer, win->h_zoom, win->v_zoom);
-
- win->fb_base_phys = 0;
- _osd_start_layer(sd, layer, win->fb_base_phys, 0);
-
- cfg->line_length = 0;
- cfg->xsize = 0;
- cfg->ysize = 0;
- cfg->xpos = 0;
- cfg->ypos = 0;
- cfg->interlaced = 0;
- switch (layer) {
- case WIN_OSD0:
- case WIN_OSD1:
- osdwin = (layer == WIN_OSD0) ? OSDWIN_OSD0 : OSDWIN_OSD1;
- osdwin_state = &osd->osdwin[osdwin];
- /*
- * Other code relies on the fact that OSD windows default to a
- * bitmap pixel format when they are deallocated, so don't
- * change this default pixel format.
- */
- cfg->pixfmt = PIXFMT_8BPP;
- _osd_set_layer_config(sd, layer, cfg);
- osdwin_state->clut = RAM_CLUT;
- _osd_set_osd_clut(sd, osdwin, osdwin_state->clut);
- osdwin_state->colorkey_blending = 0;
- _osd_disable_color_key(sd, osdwin);
- osdwin_state->blend = OSD_8_VID_0;
- _osd_set_blending_factor(sd, osdwin, osdwin_state->blend);
- osdwin_state->rec601_attenuation = 0;
- _osd_set_rec601_attenuation(sd, osdwin,
- osdwin_state->
- rec601_attenuation);
- if (osdwin == OSDWIN_OSD1) {
- osd->is_blinking = 0;
- osd->blink = BLINK_X1;
- }
- break;
- case WIN_VID0:
- case WIN_VID1:
- cfg->pixfmt = osd->yc_pixfmt;
- _osd_set_layer_config(sd, layer, cfg);
- break;
- }
-
- spin_unlock_irqrestore(&osd->lock, flags);
-}
-
-static void osd_release_layer(struct osd_state *sd, enum osd_layer layer)
-{
- struct osd_state *osd = sd;
- struct osd_window_state *win = &osd->win[layer];
- unsigned long flags;
-
- spin_lock_irqsave(&osd->lock, flags);
-
- if (!win->is_allocated) {
- spin_unlock_irqrestore(&osd->lock, flags);
- return;
- }
-
- spin_unlock_irqrestore(&osd->lock, flags);
- osd_init_layer(sd, layer);
- spin_lock_irqsave(&osd->lock, flags);
-
- win->is_allocated = 0;
-
- spin_unlock_irqrestore(&osd->lock, flags);
-}
-
-static int osd_request_layer(struct osd_state *sd, enum osd_layer layer)
-{
- struct osd_state *osd = sd;
- struct osd_window_state *win = &osd->win[layer];
- unsigned long flags;
-
- spin_lock_irqsave(&osd->lock, flags);
-
- if (win->is_allocated) {
- spin_unlock_irqrestore(&osd->lock, flags);
- return -1;
- }
- win->is_allocated = 1;
-
- spin_unlock_irqrestore(&osd->lock, flags);
-
- return 0;
-}
-
-static void _osd_init(struct osd_state *sd)
-{
- osd_write(sd, 0, OSD_MODE);
- osd_write(sd, 0, OSD_VIDWINMD);
- osd_write(sd, 0, OSD_OSDWIN0MD);
- osd_write(sd, 0, OSD_OSDWIN1MD);
- osd_write(sd, 0, OSD_RECTCUR);
- osd_write(sd, 0, OSD_MISCCTL);
- if (sd->vpbe_type == VPBE_VERSION_3) {
- osd_write(sd, 0, OSD_VBNDRY);
- osd_write(sd, 0, OSD_EXTMODE);
- osd_write(sd, OSD_MISCCTL_DMANG, OSD_MISCCTL);
- }
-}
-
-static void osd_set_left_margin(struct osd_state *sd, u32 val)
-{
- osd_write(sd, val, OSD_BASEPX);
-}
-
-static void osd_set_top_margin(struct osd_state *sd, u32 val)
-{
- osd_write(sd, val, OSD_BASEPY);
-}
-
-static int osd_initialize(struct osd_state *osd)
-{
- if (osd == NULL)
- return -ENODEV;
- _osd_init(osd);
-
- /* set default Cb/Cr order */
- osd->yc_pixfmt = PIXFMT_YCBCRI;
-
- if (osd->vpbe_type == VPBE_VERSION_3) {
- /*
- * ROM CLUT1 on the DM355 is similar (identical?) to ROM CLUT0
- * on the DM6446, so make ROM_CLUT1 the default on the DM355.
- */
- osd->rom_clut = ROM_CLUT1;
- }
-
- _osd_set_field_inversion(osd, osd->field_inversion);
- _osd_set_rom_clut(osd, osd->rom_clut);
-
- osd_init_layer(osd, WIN_OSD0);
- osd_init_layer(osd, WIN_VID0);
- osd_init_layer(osd, WIN_OSD1);
- osd_init_layer(osd, WIN_VID1);
-
- return 0;
-}
-
-static const struct vpbe_osd_ops osd_ops = {
- .initialize = osd_initialize,
- .request_layer = osd_request_layer,
- .release_layer = osd_release_layer,
- .enable_layer = osd_enable_layer,
- .disable_layer = osd_disable_layer,
- .set_layer_config = osd_set_layer_config,
- .get_layer_config = osd_get_layer_config,
- .start_layer = osd_start_layer,
- .set_left_margin = osd_set_left_margin,
- .set_top_margin = osd_set_top_margin,
-};
-
-static int osd_probe(struct platform_device *pdev)
-{
- const struct platform_device_id *pdev_id;
- struct osd_state *osd;
- struct resource *res;
-
- pdev_id = platform_get_device_id(pdev);
- if (!pdev_id)
- return -EINVAL;
-
- osd = devm_kzalloc(&pdev->dev, sizeof(struct osd_state), GFP_KERNEL);
- if (osd == NULL)
- return -ENOMEM;
-
-
- osd->dev = &pdev->dev;
- osd->vpbe_type = pdev_id->driver_data;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- osd->osd_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(osd->osd_base))
- return PTR_ERR(osd->osd_base);
-
- osd->osd_base_phys = res->start;
- osd->osd_size = resource_size(res);
- spin_lock_init(&osd->lock);
- osd->ops = osd_ops;
- platform_set_drvdata(pdev, osd);
- dev_notice(osd->dev, "OSD sub device probe success\n");
-
- return 0;
-}
-
-static int osd_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static struct platform_driver osd_driver = {
- .probe = osd_probe,
- .remove = osd_remove,
- .driver = {
- .name = MODULE_NAME,
- },
- .id_table = vpbe_osd_devtype
-};
-
-module_platform_driver(osd_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("DaVinci OSD Manager Driver");
-MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/ti/davinci/vpbe_osd_regs.h b/drivers/media/platform/ti/davinci/vpbe_osd_regs.h
deleted file mode 100644
index cecd5991d4c5..000000000000
--- a/drivers/media/platform/ti/davinci/vpbe_osd_regs.h
+++ /dev/null
@@ -1,352 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2006-2010 Texas Instruments Inc
- */
-#ifndef _VPBE_OSD_REGS_H
-#define _VPBE_OSD_REGS_H
-
-/* VPBE Global Registers */
-#define VPBE_PID 0x0
-#define VPBE_PCR 0x4
-
-/* VPSS CLock Registers */
-#define VPSSCLK_PID 0x00
-#define VPSSCLK_CLKCTRL 0x04
-
-/* VPSS Buffer Logic Registers */
-#define VPSSBL_PID 0x00
-#define VPSSBL_PCR 0x04
-#define VPSSBL_BCR 0x08
-#define VPSSBL_INTSTAT 0x0C
-#define VPSSBL_INTSEL 0x10
-#define VPSSBL_EVTSEL 0x14
-#define VPSSBL_MEMCTRL 0x18
-#define VPSSBL_CCDCMUX 0x1C
-
-/* DM365 ISP5 system configuration */
-#define ISP5_PID 0x0
-#define ISP5_PCCR 0x4
-#define ISP5_BCR 0x8
-#define ISP5_INTSTAT 0xC
-#define ISP5_INTSEL1 0x10
-#define ISP5_INTSEL2 0x14
-#define ISP5_INTSEL3 0x18
-#define ISP5_EVTSEL 0x1c
-#define ISP5_CCDCMUX 0x20
-
-/* VPBE On-Screen Display Subsystem Registers (OSD) */
-#define OSD_MODE 0x00
-#define OSD_VIDWINMD 0x04
-#define OSD_OSDWIN0MD 0x08
-#define OSD_OSDWIN1MD 0x0C
-#define OSD_OSDATRMD 0x0C
-#define OSD_RECTCUR 0x10
-#define OSD_VIDWIN0OFST 0x18
-#define OSD_VIDWIN1OFST 0x1C
-#define OSD_OSDWIN0OFST 0x20
-#define OSD_OSDWIN1OFST 0x24
-#define OSD_VIDWINADH 0x28
-#define OSD_VIDWIN0ADL 0x2C
-#define OSD_VIDWIN0ADR 0x2C
-#define OSD_VIDWIN1ADL 0x30
-#define OSD_VIDWIN1ADR 0x30
-#define OSD_OSDWINADH 0x34
-#define OSD_OSDWIN0ADL 0x38
-#define OSD_OSDWIN0ADR 0x38
-#define OSD_OSDWIN1ADL 0x3C
-#define OSD_OSDWIN1ADR 0x3C
-#define OSD_BASEPX 0x40
-#define OSD_BASEPY 0x44
-#define OSD_VIDWIN0XP 0x48
-#define OSD_VIDWIN0YP 0x4C
-#define OSD_VIDWIN0XL 0x50
-#define OSD_VIDWIN0YL 0x54
-#define OSD_VIDWIN1XP 0x58
-#define OSD_VIDWIN1YP 0x5C
-#define OSD_VIDWIN1XL 0x60
-#define OSD_VIDWIN1YL 0x64
-#define OSD_OSDWIN0XP 0x68
-#define OSD_OSDWIN0YP 0x6C
-#define OSD_OSDWIN0XL 0x70
-#define OSD_OSDWIN0YL 0x74
-#define OSD_OSDWIN1XP 0x78
-#define OSD_OSDWIN1YP 0x7C
-#define OSD_OSDWIN1XL 0x80
-#define OSD_OSDWIN1YL 0x84
-#define OSD_CURXP 0x88
-#define OSD_CURYP 0x8C
-#define OSD_CURXL 0x90
-#define OSD_CURYL 0x94
-#define OSD_W0BMP01 0xA0
-#define OSD_W0BMP23 0xA4
-#define OSD_W0BMP45 0xA8
-#define OSD_W0BMP67 0xAC
-#define OSD_W0BMP89 0xB0
-#define OSD_W0BMPAB 0xB4
-#define OSD_W0BMPCD 0xB8
-#define OSD_W0BMPEF 0xBC
-#define OSD_W1BMP01 0xC0
-#define OSD_W1BMP23 0xC4
-#define OSD_W1BMP45 0xC8
-#define OSD_W1BMP67 0xCC
-#define OSD_W1BMP89 0xD0
-#define OSD_W1BMPAB 0xD4
-#define OSD_W1BMPCD 0xD8
-#define OSD_W1BMPEF 0xDC
-#define OSD_VBNDRY 0xE0
-#define OSD_EXTMODE 0xE4
-#define OSD_MISCCTL 0xE8
-#define OSD_CLUTRAMYCB 0xEC
-#define OSD_CLUTRAMCR 0xF0
-#define OSD_TRANSPVAL 0xF4
-#define OSD_TRANSPVALL 0xF4
-#define OSD_TRANSPVALU 0xF8
-#define OSD_TRANSPBMPIDX 0xFC
-#define OSD_PPVWIN0ADR 0xFC
-
-/* bit definitions */
-#define VPBE_PCR_VENC_DIV (1 << 1)
-#define VPBE_PCR_CLK_OFF (1 << 0)
-
-#define VPSSBL_INTSTAT_HSSIINT (1 << 14)
-#define VPSSBL_INTSTAT_CFALDINT (1 << 13)
-#define VPSSBL_INTSTAT_IPIPE_INT5 (1 << 12)
-#define VPSSBL_INTSTAT_IPIPE_INT4 (1 << 11)
-#define VPSSBL_INTSTAT_IPIPE_INT3 (1 << 10)
-#define VPSSBL_INTSTAT_IPIPE_INT2 (1 << 9)
-#define VPSSBL_INTSTAT_IPIPE_INT1 (1 << 8)
-#define VPSSBL_INTSTAT_IPIPE_INT0 (1 << 7)
-#define VPSSBL_INTSTAT_IPIPEIFINT (1 << 6)
-#define VPSSBL_INTSTAT_OSDINT (1 << 5)
-#define VPSSBL_INTSTAT_VENCINT (1 << 4)
-#define VPSSBL_INTSTAT_H3AINT (1 << 3)
-#define VPSSBL_INTSTAT_CCDC_VDINT2 (1 << 2)
-#define VPSSBL_INTSTAT_CCDC_VDINT1 (1 << 1)
-#define VPSSBL_INTSTAT_CCDC_VDINT0 (1 << 0)
-
-/* DM365 ISP5 bit definitions */
-#define ISP5_INTSTAT_VENCINT (1 << 21)
-#define ISP5_INTSTAT_OSDINT (1 << 20)
-
-/* VMOD TVTYP options for HDMD=0 */
-#define SDTV_NTSC 0
-#define SDTV_PAL 1
-/* VMOD TVTYP options for HDMD=1 */
-#define HDTV_525P 0
-#define HDTV_625P 1
-#define HDTV_1080I 2
-#define HDTV_720P 3
-
-#define OSD_MODE_CS (1 << 15)
-#define OSD_MODE_OVRSZ (1 << 14)
-#define OSD_MODE_OHRSZ (1 << 13)
-#define OSD_MODE_EF (1 << 12)
-#define OSD_MODE_VVRSZ (1 << 11)
-#define OSD_MODE_VHRSZ (1 << 10)
-#define OSD_MODE_FSINV (1 << 9)
-#define OSD_MODE_BCLUT (1 << 8)
-#define OSD_MODE_CABG_SHIFT 0
-#define OSD_MODE_CABG (0xff << 0)
-
-#define OSD_VIDWINMD_VFINV (1 << 15)
-#define OSD_VIDWINMD_V1EFC (1 << 14)
-#define OSD_VIDWINMD_VHZ1_SHIFT 12
-#define OSD_VIDWINMD_VHZ1 (3 << 12)
-#define OSD_VIDWINMD_VVZ1_SHIFT 10
-#define OSD_VIDWINMD_VVZ1 (3 << 10)
-#define OSD_VIDWINMD_VFF1 (1 << 9)
-#define OSD_VIDWINMD_ACT1 (1 << 8)
-#define OSD_VIDWINMD_V0EFC (1 << 6)
-#define OSD_VIDWINMD_VHZ0_SHIFT 4
-#define OSD_VIDWINMD_VHZ0 (3 << 4)
-#define OSD_VIDWINMD_VVZ0_SHIFT 2
-#define OSD_VIDWINMD_VVZ0 (3 << 2)
-#define OSD_VIDWINMD_VFF0 (1 << 1)
-#define OSD_VIDWINMD_ACT0 (1 << 0)
-
-#define OSD_OSDWIN0MD_ATN0E (1 << 14)
-#define OSD_OSDWIN0MD_RGB0E (1 << 13)
-#define OSD_OSDWIN0MD_BMP0MD_SHIFT 13
-#define OSD_OSDWIN0MD_BMP0MD (3 << 13)
-#define OSD_OSDWIN0MD_CLUTS0 (1 << 12)
-#define OSD_OSDWIN0MD_OHZ0_SHIFT 10
-#define OSD_OSDWIN0MD_OHZ0 (3 << 10)
-#define OSD_OSDWIN0MD_OVZ0_SHIFT 8
-#define OSD_OSDWIN0MD_OVZ0 (3 << 8)
-#define OSD_OSDWIN0MD_BMW0_SHIFT 6
-#define OSD_OSDWIN0MD_BMW0 (3 << 6)
-#define OSD_OSDWIN0MD_BLND0_SHIFT 3
-#define OSD_OSDWIN0MD_BLND0 (7 << 3)
-#define OSD_OSDWIN0MD_TE0 (1 << 2)
-#define OSD_OSDWIN0MD_OFF0 (1 << 1)
-#define OSD_OSDWIN0MD_OACT0 (1 << 0)
-
-#define OSD_OSDWIN1MD_OASW (1 << 15)
-#define OSD_OSDWIN1MD_ATN1E (1 << 14)
-#define OSD_OSDWIN1MD_RGB1E (1 << 13)
-#define OSD_OSDWIN1MD_BMP1MD_SHIFT 13
-#define OSD_OSDWIN1MD_BMP1MD (3 << 13)
-#define OSD_OSDWIN1MD_CLUTS1 (1 << 12)
-#define OSD_OSDWIN1MD_OHZ1_SHIFT 10
-#define OSD_OSDWIN1MD_OHZ1 (3 << 10)
-#define OSD_OSDWIN1MD_OVZ1_SHIFT 8
-#define OSD_OSDWIN1MD_OVZ1 (3 << 8)
-#define OSD_OSDWIN1MD_BMW1_SHIFT 6
-#define OSD_OSDWIN1MD_BMW1 (3 << 6)
-#define OSD_OSDWIN1MD_BLND1_SHIFT 3
-#define OSD_OSDWIN1MD_BLND1 (7 << 3)
-#define OSD_OSDWIN1MD_TE1 (1 << 2)
-#define OSD_OSDWIN1MD_OFF1 (1 << 1)
-#define OSD_OSDWIN1MD_OACT1 (1 << 0)
-
-#define OSD_OSDATRMD_OASW (1 << 15)
-#define OSD_OSDATRMD_OHZA_SHIFT 10
-#define OSD_OSDATRMD_OHZA (3 << 10)
-#define OSD_OSDATRMD_OVZA_SHIFT 8
-#define OSD_OSDATRMD_OVZA (3 << 8)
-#define OSD_OSDATRMD_BLNKINT_SHIFT 6
-#define OSD_OSDATRMD_BLNKINT (3 << 6)
-#define OSD_OSDATRMD_OFFA (1 << 1)
-#define OSD_OSDATRMD_BLNK (1 << 0)
-
-#define OSD_RECTCUR_RCAD_SHIFT 8
-#define OSD_RECTCUR_RCAD (0xff << 8)
-#define OSD_RECTCUR_CLUTSR (1 << 7)
-#define OSD_RECTCUR_RCHW_SHIFT 4
-#define OSD_RECTCUR_RCHW (7 << 4)
-#define OSD_RECTCUR_RCVW_SHIFT 1
-#define OSD_RECTCUR_RCVW (7 << 1)
-#define OSD_RECTCUR_RCACT (1 << 0)
-
-#define OSD_VIDWIN0OFST_V0LO (0x1ff << 0)
-
-#define OSD_VIDWIN1OFST_V1LO (0x1ff << 0)
-
-#define OSD_OSDWIN0OFST_O0LO (0x1ff << 0)
-
-#define OSD_OSDWIN1OFST_O1LO (0x1ff << 0)
-
-#define OSD_WINOFST_AH_SHIFT 9
-
-#define OSD_VIDWIN0OFST_V0AH (0xf << 9)
-#define OSD_VIDWIN1OFST_V1AH (0xf << 9)
-#define OSD_OSDWIN0OFST_O0AH (0xf << 9)
-#define OSD_OSDWIN1OFST_O1AH (0xf << 9)
-
-#define OSD_VIDWINADH_V1AH_SHIFT 8
-#define OSD_VIDWINADH_V1AH (0x7f << 8)
-#define OSD_VIDWINADH_V0AH_SHIFT 0
-#define OSD_VIDWINADH_V0AH (0x7f << 0)
-
-#define OSD_VIDWIN0ADL_V0AL (0xffff << 0)
-
-#define OSD_VIDWIN1ADL_V1AL (0xffff << 0)
-
-#define OSD_OSDWINADH_O1AH_SHIFT 8
-#define OSD_OSDWINADH_O1AH (0x7f << 8)
-#define OSD_OSDWINADH_O0AH_SHIFT 0
-#define OSD_OSDWINADH_O0AH (0x7f << 0)
-
-#define OSD_OSDWIN0ADL_O0AL (0xffff << 0)
-
-#define OSD_OSDWIN1ADL_O1AL (0xffff << 0)
-
-#define OSD_BASEPX_BPX (0x3ff << 0)
-
-#define OSD_BASEPY_BPY (0x1ff << 0)
-
-#define OSD_VIDWIN0XP_V0X (0x7ff << 0)
-
-#define OSD_VIDWIN0YP_V0Y (0x7ff << 0)
-
-#define OSD_VIDWIN0XL_V0W (0x7ff << 0)
-
-#define OSD_VIDWIN0YL_V0H (0x7ff << 0)
-
-#define OSD_VIDWIN1XP_V1X (0x7ff << 0)
-
-#define OSD_VIDWIN1YP_V1Y (0x7ff << 0)
-
-#define OSD_VIDWIN1XL_V1W (0x7ff << 0)
-
-#define OSD_VIDWIN1YL_V1H (0x7ff << 0)
-
-#define OSD_OSDWIN0XP_W0X (0x7ff << 0)
-
-#define OSD_OSDWIN0YP_W0Y (0x7ff << 0)
-
-#define OSD_OSDWIN0XL_W0W (0x7ff << 0)
-
-#define OSD_OSDWIN0YL_W0H (0x7ff << 0)
-
-#define OSD_OSDWIN1XP_W1X (0x7ff << 0)
-
-#define OSD_OSDWIN1YP_W1Y (0x7ff << 0)
-
-#define OSD_OSDWIN1XL_W1W (0x7ff << 0)
-
-#define OSD_OSDWIN1YL_W1H (0x7ff << 0)
-
-#define OSD_CURXP_RCSX (0x7ff << 0)
-
-#define OSD_CURYP_RCSY (0x7ff << 0)
-
-#define OSD_CURXL_RCSW (0x7ff << 0)
-
-#define OSD_CURYL_RCSH (0x7ff << 0)
-
-#define OSD_EXTMODE_EXPMDSEL (1 << 15)
-#define OSD_EXTMODE_SCRNHEXP_SHIFT 13
-#define OSD_EXTMODE_SCRNHEXP (3 << 13)
-#define OSD_EXTMODE_SCRNVEXP (1 << 12)
-#define OSD_EXTMODE_OSD1BLDCHR (1 << 11)
-#define OSD_EXTMODE_OSD0BLDCHR (1 << 10)
-#define OSD_EXTMODE_ATNOSD1EN (1 << 9)
-#define OSD_EXTMODE_ATNOSD0EN (1 << 8)
-#define OSD_EXTMODE_OSDHRSZ15 (1 << 7)
-#define OSD_EXTMODE_VIDHRSZ15 (1 << 6)
-#define OSD_EXTMODE_ZMFILV1HEN (1 << 5)
-#define OSD_EXTMODE_ZMFILV1VEN (1 << 4)
-#define OSD_EXTMODE_ZMFILV0HEN (1 << 3)
-#define OSD_EXTMODE_ZMFILV0VEN (1 << 2)
-#define OSD_EXTMODE_EXPFILHEN (1 << 1)
-#define OSD_EXTMODE_EXPFILVEN (1 << 0)
-
-#define OSD_MISCCTL_BLDSEL (1 << 15)
-#define OSD_MISCCTL_S420D (1 << 14)
-#define OSD_MISCCTL_BMAPT (1 << 13)
-#define OSD_MISCCTL_DM365M (1 << 12)
-#define OSD_MISCCTL_RGBEN (1 << 7)
-#define OSD_MISCCTL_RGBWIN (1 << 6)
-#define OSD_MISCCTL_DMANG (1 << 6)
-#define OSD_MISCCTL_TMON (1 << 5)
-#define OSD_MISCCTL_RSEL (1 << 4)
-#define OSD_MISCCTL_CPBSY (1 << 3)
-#define OSD_MISCCTL_PPSW (1 << 2)
-#define OSD_MISCCTL_PPRV (1 << 1)
-
-#define OSD_CLUTRAMYCB_Y_SHIFT 8
-#define OSD_CLUTRAMYCB_Y (0xff << 8)
-#define OSD_CLUTRAMYCB_CB_SHIFT 0
-#define OSD_CLUTRAMYCB_CB (0xff << 0)
-
-#define OSD_CLUTRAMCR_CR_SHIFT 8
-#define OSD_CLUTRAMCR_CR (0xff << 8)
-#define OSD_CLUTRAMCR_CADDR_SHIFT 0
-#define OSD_CLUTRAMCR_CADDR (0xff << 0)
-
-#define OSD_TRANSPVAL_RGBTRANS (0xffff << 0)
-
-#define OSD_TRANSPVALL_RGBL (0xffff << 0)
-
-#define OSD_TRANSPVALU_Y_SHIFT 8
-#define OSD_TRANSPVALU_Y (0xff << 8)
-#define OSD_TRANSPVALU_RGBU_SHIFT 0
-#define OSD_TRANSPVALU_RGBU (0xff << 0)
-
-#define OSD_TRANSPBMPIDX_BMP1_SHIFT 8
-#define OSD_TRANSPBMPIDX_BMP1 (0xff << 8)
-#define OSD_TRANSPBMPIDX_BMP0_SHIFT 0
-#define OSD_TRANSPBMPIDX_BMP0 0xff
-
-#endif /* _DAVINCI_VPBE_H_ */
diff --git a/drivers/media/platform/ti/davinci/vpbe_venc.c b/drivers/media/platform/ti/davinci/vpbe_venc.c
deleted file mode 100644
index 4c8e31de12b1..000000000000
--- a/drivers/media/platform/ti/davinci/vpbe_venc.c
+++ /dev/null
@@ -1,676 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2010 Texas Instruments Inc
- */
-#include <linux/module.h>
-#include <linux/mod_devicetable.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ctype.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/videodev2.h>
-#include <linux/slab.h>
-
-#include <linux/platform_data/i2c-davinci.h>
-
-#include <linux/io.h>
-
-#include <media/davinci/vpbe_types.h>
-#include <media/davinci/vpbe_venc.h>
-#include <media/davinci/vpss.h>
-#include <media/v4l2-device.h>
-
-#include "vpbe_venc_regs.h"
-
-#define MODULE_NAME "davinci-vpbe-venc"
-
-static const struct platform_device_id vpbe_venc_devtype[] = {
- {
- .name = DM644X_VPBE_VENC_SUBDEV_NAME,
- .driver_data = VPBE_VERSION_1,
- }, {
- .name = DM365_VPBE_VENC_SUBDEV_NAME,
- .driver_data = VPBE_VERSION_2,
- }, {
- .name = DM355_VPBE_VENC_SUBDEV_NAME,
- .driver_data = VPBE_VERSION_3,
- },
- {
- /* sentinel */
- }
-};
-
-MODULE_DEVICE_TABLE(platform, vpbe_venc_devtype);
-
-static int debug = 2;
-module_param(debug, int, 0644);
-MODULE_PARM_DESC(debug, "Debug level 0-2");
-
-struct venc_state {
- struct v4l2_subdev sd;
- struct venc_callback *callback;
- struct venc_platform_data *pdata;
- struct device *pdev;
- u32 output;
- v4l2_std_id std;
- spinlock_t lock;
- void __iomem *venc_base;
- void __iomem *vdaccfg_reg;
- enum vpbe_version venc_type;
-};
-
-static inline struct venc_state *to_state(struct v4l2_subdev *sd)
-{
- return container_of(sd, struct venc_state, sd);
-}
-
-static inline u32 venc_read(struct v4l2_subdev *sd, u32 offset)
-{
- struct venc_state *venc = to_state(sd);
-
- return readl(venc->venc_base + offset);
-}
-
-static inline u32 venc_write(struct v4l2_subdev *sd, u32 offset, u32 val)
-{
- struct venc_state *venc = to_state(sd);
-
- writel(val, (venc->venc_base + offset));
-
- return val;
-}
-
-static inline u32 venc_modify(struct v4l2_subdev *sd, u32 offset,
- u32 val, u32 mask)
-{
- u32 new_val = (venc_read(sd, offset) & ~mask) | (val & mask);
-
- venc_write(sd, offset, new_val);
-
- return new_val;
-}
-
-static inline u32 vdaccfg_write(struct v4l2_subdev *sd, u32 val)
-{
- struct venc_state *venc = to_state(sd);
-
- writel(val, venc->vdaccfg_reg);
-
- val = readl(venc->vdaccfg_reg);
-
- return val;
-}
-
-#define VDAC_COMPONENT 0x543
-#define VDAC_S_VIDEO 0x210
-/* This function sets the dac of the VPBE for various outputs
- */
-static int venc_set_dac(struct v4l2_subdev *sd, u32 out_index)
-{
- switch (out_index) {
- case 0:
- v4l2_dbg(debug, 1, sd, "Setting output to Composite\n");
- venc_write(sd, VENC_DACSEL, 0);
- break;
- case 1:
- v4l2_dbg(debug, 1, sd, "Setting output to Component\n");
- venc_write(sd, VENC_DACSEL, VDAC_COMPONENT);
- break;
- case 2:
- v4l2_dbg(debug, 1, sd, "Setting output to S-video\n");
- venc_write(sd, VENC_DACSEL, VDAC_S_VIDEO);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void venc_enabledigitaloutput(struct v4l2_subdev *sd, int benable)
-{
- struct venc_state *venc = to_state(sd);
-
- v4l2_dbg(debug, 2, sd, "venc_enabledigitaloutput\n");
-
- if (benable) {
- venc_write(sd, VENC_VMOD, 0);
- venc_write(sd, VENC_CVBS, 0);
- venc_write(sd, VENC_LCDOUT, 0);
- venc_write(sd, VENC_HSPLS, 0);
- venc_write(sd, VENC_HSTART, 0);
- venc_write(sd, VENC_HVALID, 0);
- venc_write(sd, VENC_HINT, 0);
- venc_write(sd, VENC_VSPLS, 0);
- venc_write(sd, VENC_VSTART, 0);
- venc_write(sd, VENC_VVALID, 0);
- venc_write(sd, VENC_VINT, 0);
- venc_write(sd, VENC_YCCCTL, 0);
- venc_write(sd, VENC_DACSEL, 0);
-
- } else {
- venc_write(sd, VENC_VMOD, 0);
- /* disable VCLK output pin enable */
- venc_write(sd, VENC_VIDCTL, 0x141);
-
- /* Disable output sync pins */
- venc_write(sd, VENC_SYNCCTL, 0);
-
- /* Disable DCLOCK */
- venc_write(sd, VENC_DCLKCTL, 0);
- venc_write(sd, VENC_DRGBX1, 0x0000057C);
-
- /* Disable LCD output control (accepting default polarity) */
- venc_write(sd, VENC_LCDOUT, 0);
- if (venc->venc_type != VPBE_VERSION_3)
- venc_write(sd, VENC_CMPNT, 0x100);
- venc_write(sd, VENC_HSPLS, 0);
- venc_write(sd, VENC_HINT, 0);
- venc_write(sd, VENC_HSTART, 0);
- venc_write(sd, VENC_HVALID, 0);
-
- venc_write(sd, VENC_VSPLS, 0);
- venc_write(sd, VENC_VINT, 0);
- venc_write(sd, VENC_VSTART, 0);
- venc_write(sd, VENC_VVALID, 0);
-
- venc_write(sd, VENC_HSDLY, 0);
- venc_write(sd, VENC_VSDLY, 0);
-
- venc_write(sd, VENC_YCCCTL, 0);
- venc_write(sd, VENC_VSTARTA, 0);
-
- /* Set OSD clock and OSD Sync Adavance registers */
- venc_write(sd, VENC_OSDCLK0, 1);
- venc_write(sd, VENC_OSDCLK1, 2);
- }
-}
-
-static void
-venc_enable_vpss_clock(int venc_type,
- enum vpbe_enc_timings_type type,
- unsigned int pclock)
-{
- if (venc_type == VPBE_VERSION_1)
- return;
-
- if (venc_type == VPBE_VERSION_2 && (type == VPBE_ENC_STD || (type ==
- VPBE_ENC_DV_TIMINGS && pclock <= 27000000))) {
- vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
- vpss_enable_clock(VPSS_VPBE_CLOCK, 1);
- return;
- }
-
- if (venc_type == VPBE_VERSION_3 && type == VPBE_ENC_STD)
- vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 0);
-}
-
-#define VDAC_CONFIG_SD_V3 0x0E21A6B6
-#define VDAC_CONFIG_SD_V2 0x081141CF
-/*
- * setting NTSC mode
- */
-static int venc_set_ntsc(struct v4l2_subdev *sd)
-{
- struct venc_state *venc = to_state(sd);
- struct venc_platform_data *pdata = venc->pdata;
-
- v4l2_dbg(debug, 2, sd, "venc_set_ntsc\n");
-
- /* Setup clock at VPSS & VENC for SD */
- vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
- if (pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_525_60) < 0)
- return -EINVAL;
-
- venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_STD, V4L2_STD_525_60);
- venc_enabledigitaloutput(sd, 0);
-
- if (venc->venc_type == VPBE_VERSION_3) {
- venc_write(sd, VENC_CLKCTL, 0x01);
- venc_write(sd, VENC_VIDCTL, 0);
- vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
- } else if (venc->venc_type == VPBE_VERSION_2) {
- venc_write(sd, VENC_CLKCTL, 0x01);
- venc_write(sd, VENC_VIDCTL, 0);
- vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
- } else {
- /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
- venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
- /* Set REC656 Mode */
- venc_write(sd, VENC_YCCCTL, 0x1);
- venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAFRQ);
- venc_modify(sd, VENC_VDPRO, 0, VENC_VDPRO_DAUPS);
- }
-
- venc_write(sd, VENC_VMOD, 0);
- venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
- VENC_VMOD_VIE);
- venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
- venc_modify(sd, VENC_VMOD, (0 << VENC_VMOD_TVTYP_SHIFT),
- VENC_VMOD_TVTYP);
- venc_write(sd, VENC_DACTST, 0x0);
- venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
-
- return 0;
-}
-
-/*
- * setting PAL mode
- */
-static int venc_set_pal(struct v4l2_subdev *sd)
-{
- struct venc_state *venc = to_state(sd);
-
- v4l2_dbg(debug, 2, sd, "venc_set_pal\n");
-
- /* Setup clock at VPSS & VENC for SD */
- vpss_enable_clock(VPSS_VENC_CLOCK_SEL, 1);
- if (venc->pdata->setup_clock(VPBE_ENC_STD, V4L2_STD_625_50) < 0)
- return -EINVAL;
-
- venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_STD, V4L2_STD_625_50);
- venc_enabledigitaloutput(sd, 0);
-
- if (venc->venc_type == VPBE_VERSION_3) {
- venc_write(sd, VENC_CLKCTL, 0x1);
- venc_write(sd, VENC_VIDCTL, 0);
- vdaccfg_write(sd, VDAC_CONFIG_SD_V3);
- } else if (venc->venc_type == VPBE_VERSION_2) {
- venc_write(sd, VENC_CLKCTL, 0x1);
- venc_write(sd, VENC_VIDCTL, 0);
- vdaccfg_write(sd, VDAC_CONFIG_SD_V2);
- } else {
- /* to set VENC CLK DIV to 1 - final clock is 54 MHz */
- venc_modify(sd, VENC_VIDCTL, 0, 1 << 1);
- /* Set REC656 Mode */
- venc_write(sd, VENC_YCCCTL, 0x1);
- }
-
- venc_modify(sd, VENC_SYNCCTL, 1 << VENC_SYNCCTL_OVD_SHIFT,
- VENC_SYNCCTL_OVD);
- venc_write(sd, VENC_VMOD, 0);
- venc_modify(sd, VENC_VMOD,
- (1 << VENC_VMOD_VIE_SHIFT),
- VENC_VMOD_VIE);
- venc_modify(sd, VENC_VMOD,
- (0 << VENC_VMOD_VMD), VENC_VMOD_VMD);
- venc_modify(sd, VENC_VMOD,
- (1 << VENC_VMOD_TVTYP_SHIFT),
- VENC_VMOD_TVTYP);
- venc_write(sd, VENC_DACTST, 0x0);
- venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
-
- return 0;
-}
-
-#define VDAC_CONFIG_HD_V2 0x081141EF
-/*
- * venc_set_480p59_94
- *
- * This function configures the video encoder to EDTV(525p) component setting.
- */
-static int venc_set_480p59_94(struct v4l2_subdev *sd)
-{
- struct venc_state *venc = to_state(sd);
- struct venc_platform_data *pdata = venc->pdata;
-
- v4l2_dbg(debug, 2, sd, "venc_set_480p59_94\n");
- if (venc->venc_type != VPBE_VERSION_1 &&
- venc->venc_type != VPBE_VERSION_2)
- return -EINVAL;
-
- /* Setup clock at VPSS & VENC for SD */
- if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 27000000) < 0)
- return -EINVAL;
-
- venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 27000000);
- venc_enabledigitaloutput(sd, 0);
-
- if (venc->venc_type == VPBE_VERSION_2)
- vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
- venc_write(sd, VENC_OSDCLK0, 0);
- venc_write(sd, VENC_OSDCLK1, 1);
-
- if (venc->venc_type == VPBE_VERSION_1) {
- venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
- VENC_VDPRO_DAFRQ);
- venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
- VENC_VDPRO_DAUPS);
- }
-
- venc_write(sd, VENC_VMOD, 0);
- venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
- VENC_VMOD_VIE);
- venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
- venc_modify(sd, VENC_VMOD, (HDTV_525P << VENC_VMOD_TVTYP_SHIFT),
- VENC_VMOD_TVTYP);
- venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
- VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
-
- venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
-
- return 0;
-}
-
-/*
- * venc_set_625p
- *
- * This function configures the video encoder to HDTV(625p) component setting
- */
-static int venc_set_576p50(struct v4l2_subdev *sd)
-{
- struct venc_state *venc = to_state(sd);
- struct venc_platform_data *pdata = venc->pdata;
-
- v4l2_dbg(debug, 2, sd, "venc_set_576p50\n");
-
- if (venc->venc_type != VPBE_VERSION_1 &&
- venc->venc_type != VPBE_VERSION_2)
- return -EINVAL;
- /* Setup clock at VPSS & VENC for SD */
- if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 27000000) < 0)
- return -EINVAL;
-
- venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 27000000);
- venc_enabledigitaloutput(sd, 0);
-
- if (venc->venc_type == VPBE_VERSION_2)
- vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
-
- venc_write(sd, VENC_OSDCLK0, 0);
- venc_write(sd, VENC_OSDCLK1, 1);
-
- if (venc->venc_type == VPBE_VERSION_1) {
- venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAFRQ,
- VENC_VDPRO_DAFRQ);
- venc_modify(sd, VENC_VDPRO, VENC_VDPRO_DAUPS,
- VENC_VDPRO_DAUPS);
- }
-
- venc_write(sd, VENC_VMOD, 0);
- venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
- VENC_VMOD_VIE);
- venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
- venc_modify(sd, VENC_VMOD, (HDTV_625P << VENC_VMOD_TVTYP_SHIFT),
- VENC_VMOD_TVTYP);
-
- venc_modify(sd, VENC_VMOD, VENC_VMOD_VDMD_YCBCR8 <<
- VENC_VMOD_VDMD_SHIFT, VENC_VMOD_VDMD);
- venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
-
- return 0;
-}
-
-/*
- * venc_set_720p60_internal - Setup 720p60 in venc for dm365 only
- */
-static int venc_set_720p60_internal(struct v4l2_subdev *sd)
-{
- struct venc_state *venc = to_state(sd);
- struct venc_platform_data *pdata = venc->pdata;
-
- if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 74250000) < 0)
- return -EINVAL;
-
- venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 74250000);
- venc_enabledigitaloutput(sd, 0);
-
- venc_write(sd, VENC_OSDCLK0, 0);
- venc_write(sd, VENC_OSDCLK1, 1);
-
- venc_write(sd, VENC_VMOD, 0);
- /* DM365 component HD mode */
- venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
- VENC_VMOD_VIE);
- venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
- venc_modify(sd, VENC_VMOD, (HDTV_720P << VENC_VMOD_TVTYP_SHIFT),
- VENC_VMOD_TVTYP);
- venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
- venc_write(sd, VENC_XHINTVL, 0);
- return 0;
-}
-
-/*
- * venc_set_1080i30_internal - Setup 1080i30 in venc for dm365 only
- */
-static int venc_set_1080i30_internal(struct v4l2_subdev *sd)
-{
- struct venc_state *venc = to_state(sd);
- struct venc_platform_data *pdata = venc->pdata;
-
- if (pdata->setup_clock(VPBE_ENC_DV_TIMINGS, 74250000) < 0)
- return -EINVAL;
-
- venc_enable_vpss_clock(venc->venc_type, VPBE_ENC_DV_TIMINGS, 74250000);
- venc_enabledigitaloutput(sd, 0);
-
- venc_write(sd, VENC_OSDCLK0, 0);
- venc_write(sd, VENC_OSDCLK1, 1);
-
-
- venc_write(sd, VENC_VMOD, 0);
- /* DM365 component HD mode */
- venc_modify(sd, VENC_VMOD, (1 << VENC_VMOD_VIE_SHIFT),
- VENC_VMOD_VIE);
- venc_modify(sd, VENC_VMOD, VENC_VMOD_HDMD, VENC_VMOD_HDMD);
- venc_modify(sd, VENC_VMOD, (HDTV_1080I << VENC_VMOD_TVTYP_SHIFT),
- VENC_VMOD_TVTYP);
- venc_modify(sd, VENC_VMOD, VENC_VMOD_VENC, VENC_VMOD_VENC);
- venc_write(sd, VENC_XHINTVL, 0);
- return 0;
-}
-
-static int venc_s_std_output(struct v4l2_subdev *sd, v4l2_std_id norm)
-{
- v4l2_dbg(debug, 1, sd, "venc_s_std_output\n");
-
- if (norm & V4L2_STD_525_60)
- return venc_set_ntsc(sd);
- else if (norm & V4L2_STD_625_50)
- return venc_set_pal(sd);
-
- return -EINVAL;
-}
-
-static int venc_s_dv_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *dv_timings)
-{
- struct venc_state *venc = to_state(sd);
- u32 height = dv_timings->bt.height;
- int ret;
-
- v4l2_dbg(debug, 1, sd, "venc_s_dv_timings\n");
-
- if (height == 576)
- return venc_set_576p50(sd);
- else if (height == 480)
- return venc_set_480p59_94(sd);
- else if ((height == 720) &&
- (venc->venc_type == VPBE_VERSION_2)) {
- /* TBD setup internal 720p mode here */
- ret = venc_set_720p60_internal(sd);
- /* for DM365 VPBE, there is DAC inside */
- vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
- return ret;
- } else if ((height == 1080) &&
- (venc->venc_type == VPBE_VERSION_2)) {
- /* TBD setup internal 1080i mode here */
- ret = venc_set_1080i30_internal(sd);
- /* for DM365 VPBE, there is DAC inside */
- vdaccfg_write(sd, VDAC_CONFIG_HD_V2);
- return ret;
- }
- return -EINVAL;
-}
-
-static int venc_s_routing(struct v4l2_subdev *sd, u32 input, u32 output,
- u32 config)
-{
- struct venc_state *venc = to_state(sd);
- int ret;
-
- v4l2_dbg(debug, 1, sd, "venc_s_routing\n");
-
- ret = venc_set_dac(sd, output);
- if (!ret)
- venc->output = output;
-
- return ret;
-}
-
-static long venc_command(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
- u32 val;
-
- switch (cmd) {
- case VENC_GET_FLD:
- val = venc_read(sd, VENC_VSTAT);
- *((int *)arg) = ((val & VENC_VSTAT_FIDST) ==
- VENC_VSTAT_FIDST);
- break;
- default:
- v4l2_err(sd, "Wrong IOCTL cmd\n");
- break;
- }
-
- return 0;
-}
-
-static const struct v4l2_subdev_core_ops venc_core_ops = {
- .command = venc_command,
-};
-
-static const struct v4l2_subdev_video_ops venc_video_ops = {
- .s_routing = venc_s_routing,
- .s_std_output = venc_s_std_output,
- .s_dv_timings = venc_s_dv_timings,
-};
-
-static const struct v4l2_subdev_ops venc_ops = {
- .core = &venc_core_ops,
- .video = &venc_video_ops,
-};
-
-static int venc_initialize(struct v4l2_subdev *sd)
-{
- struct venc_state *venc = to_state(sd);
- int ret;
-
- /* Set default to output to composite and std to NTSC */
- venc->output = 0;
- venc->std = V4L2_STD_525_60;
-
- ret = venc_s_routing(sd, 0, venc->output, 0);
- if (ret < 0) {
- v4l2_err(sd, "Error setting output during init\n");
- return -EINVAL;
- }
-
- ret = venc_s_std_output(sd, venc->std);
- if (ret < 0) {
- v4l2_err(sd, "Error setting std during init\n");
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int venc_device_get(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct venc_state **venc = data;
-
- if (strstr(pdev->name, "vpbe-venc") != NULL)
- *venc = platform_get_drvdata(pdev);
-
- return 0;
-}
-
-struct v4l2_subdev *venc_sub_dev_init(struct v4l2_device *v4l2_dev,
- const char *venc_name)
-{
- struct venc_state *venc = NULL;
-
- bus_for_each_dev(&platform_bus_type, NULL, &venc,
- venc_device_get);
- if (venc == NULL)
- return NULL;
-
- v4l2_subdev_init(&venc->sd, &venc_ops);
-
- strscpy(venc->sd.name, venc_name, sizeof(venc->sd.name));
- if (v4l2_device_register_subdev(v4l2_dev, &venc->sd) < 0) {
- v4l2_err(v4l2_dev,
- "vpbe unable to register venc sub device\n");
- return NULL;
- }
- if (venc_initialize(&venc->sd)) {
- v4l2_err(v4l2_dev,
- "vpbe venc initialization failed\n");
- return NULL;
- }
-
- return &venc->sd;
-}
-EXPORT_SYMBOL(venc_sub_dev_init);
-
-static int venc_probe(struct platform_device *pdev)
-{
- const struct platform_device_id *pdev_id;
- struct venc_state *venc;
-
- if (!pdev->dev.platform_data) {
- dev_err(&pdev->dev, "No platform data for VENC sub device");
- return -EINVAL;
- }
-
- pdev_id = platform_get_device_id(pdev);
- if (!pdev_id)
- return -EINVAL;
-
- venc = devm_kzalloc(&pdev->dev, sizeof(struct venc_state), GFP_KERNEL);
- if (venc == NULL)
- return -ENOMEM;
-
- venc->venc_type = pdev_id->driver_data;
- venc->pdev = &pdev->dev;
- venc->pdata = pdev->dev.platform_data;
-
- venc->venc_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(venc->venc_base))
- return PTR_ERR(venc->venc_base);
-
- if (venc->venc_type != VPBE_VERSION_1) {
- venc->vdaccfg_reg = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(venc->vdaccfg_reg))
- return PTR_ERR(venc->vdaccfg_reg);
- }
- spin_lock_init(&venc->lock);
- platform_set_drvdata(pdev, venc);
- dev_notice(venc->pdev, "VENC sub device probe success\n");
-
- return 0;
-}
-
-static int venc_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static struct platform_driver venc_driver = {
- .probe = venc_probe,
- .remove = venc_remove,
- .driver = {
- .name = MODULE_NAME,
- },
- .id_table = vpbe_venc_devtype
-};
-
-module_platform_driver(venc_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("VPBE VENC Driver");
-MODULE_AUTHOR("Texas Instruments");
diff --git a/drivers/media/platform/ti/davinci/vpbe_venc_regs.h b/drivers/media/platform/ti/davinci/vpbe_venc_regs.h
deleted file mode 100644
index 29d8fc3af662..000000000000
--- a/drivers/media/platform/ti/davinci/vpbe_venc_regs.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2006-2010 Texas Instruments Inc
- */
-#ifndef _VPBE_VENC_REGS_H
-#define _VPBE_VENC_REGS_H
-
-/* VPBE Video Encoder / Digital LCD Subsystem Registers (VENC) */
-#define VENC_VMOD 0x00
-#define VENC_VIDCTL 0x04
-#define VENC_VDPRO 0x08
-#define VENC_SYNCCTL 0x0C
-#define VENC_HSPLS 0x10
-#define VENC_VSPLS 0x14
-#define VENC_HINT 0x18
-#define VENC_HSTART 0x1C
-#define VENC_HVALID 0x20
-#define VENC_VINT 0x24
-#define VENC_VSTART 0x28
-#define VENC_VVALID 0x2C
-#define VENC_HSDLY 0x30
-#define VENC_VSDLY 0x34
-#define VENC_YCCCTL 0x38
-#define VENC_RGBCTL 0x3C
-#define VENC_RGBCLP 0x40
-#define VENC_LINECTL 0x44
-#define VENC_CULLLINE 0x48
-#define VENC_LCDOUT 0x4C
-#define VENC_BRTS 0x50
-#define VENC_BRTW 0x54
-#define VENC_ACCTL 0x58
-#define VENC_PWMP 0x5C
-#define VENC_PWMW 0x60
-#define VENC_DCLKCTL 0x64
-#define VENC_DCLKPTN0 0x68
-#define VENC_DCLKPTN1 0x6C
-#define VENC_DCLKPTN2 0x70
-#define VENC_DCLKPTN3 0x74
-#define VENC_DCLKPTN0A 0x78
-#define VENC_DCLKPTN1A 0x7C
-#define VENC_DCLKPTN2A 0x80
-#define VENC_DCLKPTN3A 0x84
-#define VENC_DCLKHS 0x88
-#define VENC_DCLKHSA 0x8C
-#define VENC_DCLKHR 0x90
-#define VENC_DCLKVS 0x94
-#define VENC_DCLKVR 0x98
-#define VENC_CAPCTL 0x9C
-#define VENC_CAPDO 0xA0
-#define VENC_CAPDE 0xA4
-#define VENC_ATR0 0xA8
-#define VENC_ATR1 0xAC
-#define VENC_ATR2 0xB0
-#define VENC_VSTAT 0xB8
-#define VENC_RAMADR 0xBC
-#define VENC_RAMPORT 0xC0
-#define VENC_DACTST 0xC4
-#define VENC_YCOLVL 0xC8
-#define VENC_SCPROG 0xCC
-#define VENC_CVBS 0xDC
-#define VENC_CMPNT 0xE0
-#define VENC_ETMG0 0xE4
-#define VENC_ETMG1 0xE8
-#define VENC_ETMG2 0xEC
-#define VENC_ETMG3 0xF0
-#define VENC_DACSEL 0xF4
-#define VENC_ARGBX0 0x100
-#define VENC_ARGBX1 0x104
-#define VENC_ARGBX2 0x108
-#define VENC_ARGBX3 0x10C
-#define VENC_ARGBX4 0x110
-#define VENC_DRGBX0 0x114
-#define VENC_DRGBX1 0x118
-#define VENC_DRGBX2 0x11C
-#define VENC_DRGBX3 0x120
-#define VENC_DRGBX4 0x124
-#define VENC_VSTARTA 0x128
-#define VENC_OSDCLK0 0x12C
-#define VENC_OSDCLK1 0x130
-#define VENC_HVLDCL0 0x134
-#define VENC_HVLDCL1 0x138
-#define VENC_OSDHADV 0x13C
-#define VENC_CLKCTL 0x140
-#define VENC_GAMCTL 0x144
-#define VENC_XHINTVL 0x174
-
-/* bit definitions */
-#define VPBE_PCR_VENC_DIV (1 << 1)
-#define VPBE_PCR_CLK_OFF (1 << 0)
-
-#define VENC_VMOD_VDMD_SHIFT 12
-#define VENC_VMOD_VDMD_YCBCR16 0
-#define VENC_VMOD_VDMD_YCBCR8 1
-#define VENC_VMOD_VDMD_RGB666 2
-#define VENC_VMOD_VDMD_RGB8 3
-#define VENC_VMOD_VDMD_EPSON 4
-#define VENC_VMOD_VDMD_CASIO 5
-#define VENC_VMOD_VDMD_UDISPQVGA 6
-#define VENC_VMOD_VDMD_STNLCD 7
-#define VENC_VMOD_VIE_SHIFT 1
-#define VENC_VMOD_VDMD (7 << 12)
-#define VENC_VMOD_ITLCL (1 << 11)
-#define VENC_VMOD_ITLC (1 << 10)
-#define VENC_VMOD_NSIT (1 << 9)
-#define VENC_VMOD_HDMD (1 << 8)
-#define VENC_VMOD_TVTYP_SHIFT 6
-#define VENC_VMOD_TVTYP (3 << 6)
-#define VENC_VMOD_SLAVE (1 << 5)
-#define VENC_VMOD_VMD (1 << 4)
-#define VENC_VMOD_BLNK (1 << 3)
-#define VENC_VMOD_VIE (1 << 1)
-#define VENC_VMOD_VENC (1 << 0)
-
-/* VMOD TVTYP options for HDMD=0 */
-#define SDTV_NTSC 0
-#define SDTV_PAL 1
-/* VMOD TVTYP options for HDMD=1 */
-#define HDTV_525P 0
-#define HDTV_625P 1
-#define HDTV_1080I 2
-#define HDTV_720P 3
-
-#define VENC_VIDCTL_VCLKP (1 << 14)
-#define VENC_VIDCTL_VCLKE_SHIFT 13
-#define VENC_VIDCTL_VCLKE (1 << 13)
-#define VENC_VIDCTL_VCLKZ_SHIFT 12
-#define VENC_VIDCTL_VCLKZ (1 << 12)
-#define VENC_VIDCTL_SYDIR_SHIFT 8
-#define VENC_VIDCTL_SYDIR (1 << 8)
-#define VENC_VIDCTL_DOMD_SHIFT 4
-#define VENC_VIDCTL_DOMD (3 << 4)
-#define VENC_VIDCTL_YCDIR_SHIFT 0
-#define VENC_VIDCTL_YCDIR (1 << 0)
-
-#define VENC_VDPRO_ATYCC_SHIFT 5
-#define VENC_VDPRO_ATYCC (1 << 5)
-#define VENC_VDPRO_ATCOM_SHIFT 4
-#define VENC_VDPRO_ATCOM (1 << 4)
-#define VENC_VDPRO_DAFRQ (1 << 3)
-#define VENC_VDPRO_DAUPS (1 << 2)
-#define VENC_VDPRO_CUPS (1 << 1)
-#define VENC_VDPRO_YUPS (1 << 0)
-
-#define VENC_SYNCCTL_VPL_SHIFT 3
-#define VENC_SYNCCTL_VPL (1 << 3)
-#define VENC_SYNCCTL_HPL_SHIFT 2
-#define VENC_SYNCCTL_HPL (1 << 2)
-#define VENC_SYNCCTL_SYEV_SHIFT 1
-#define VENC_SYNCCTL_SYEV (1 << 1)
-#define VENC_SYNCCTL_SYEH_SHIFT 0
-#define VENC_SYNCCTL_SYEH (1 << 0)
-#define VENC_SYNCCTL_OVD_SHIFT 14
-#define VENC_SYNCCTL_OVD (1 << 14)
-
-#define VENC_DCLKCTL_DCKEC_SHIFT 11
-#define VENC_DCLKCTL_DCKEC (1 << 11)
-#define VENC_DCLKCTL_DCKPW_SHIFT 0
-#define VENC_DCLKCTL_DCKPW (0x3f << 0)
-
-#define VENC_VSTAT_FIDST (1 << 4)
-
-#define VENC_CMPNT_MRGB_SHIFT 14
-#define VENC_CMPNT_MRGB (1 << 14)
-
-#endif /* _VPBE_VENC_REGS_H */
diff --git a/drivers/media/platform/ti/davinci/vpss.c b/drivers/media/platform/ti/davinci/vpss.c
deleted file mode 100644
index d15b991ab17c..000000000000
--- a/drivers/media/platform/ti/davinci/vpss.c
+++ /dev/null
@@ -1,529 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2009 Texas Instruments.
- *
- * common vpss system module platform driver for all video drivers.
- */
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/pm_runtime.h>
-#include <linux/err.h>
-
-#include <media/davinci/vpss.h>
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("VPSS Driver");
-MODULE_AUTHOR("Texas Instruments");
-
-/* DM644x defines */
-#define DM644X_SBL_PCR_VPSS (4)
-
-#define DM355_VPSSBL_INTSEL 0x10
-#define DM355_VPSSBL_EVTSEL 0x14
-/* vpss BL register offsets */
-#define DM355_VPSSBL_CCDCMUX 0x1c
-/* vpss CLK register offsets */
-#define DM355_VPSSCLK_CLKCTRL 0x04
-/* masks and shifts */
-#define VPSS_HSSISEL_SHIFT 4
-/*
- * VDINT0 - vpss_int0, VDINT1 - vpss_int1, H3A - vpss_int4,
- * IPIPE_INT1_SDR - vpss_int5
- */
-#define DM355_VPSSBL_INTSEL_DEFAULT 0xff83ff10
-/* VENCINT - vpss_int8 */
-#define DM355_VPSSBL_EVTSEL_DEFAULT 0x4
-
-#define DM365_ISP5_PCCR 0x04
-#define DM365_ISP5_PCCR_BL_CLK_ENABLE BIT(0)
-#define DM365_ISP5_PCCR_ISIF_CLK_ENABLE BIT(1)
-#define DM365_ISP5_PCCR_H3A_CLK_ENABLE BIT(2)
-#define DM365_ISP5_PCCR_RSZ_CLK_ENABLE BIT(3)
-#define DM365_ISP5_PCCR_IPIPE_CLK_ENABLE BIT(4)
-#define DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE BIT(5)
-#define DM365_ISP5_PCCR_RSV BIT(6)
-
-#define DM365_ISP5_BCR 0x08
-#define DM365_ISP5_BCR_ISIF_OUT_ENABLE BIT(1)
-
-#define DM365_ISP5_INTSEL1 0x10
-#define DM365_ISP5_INTSEL2 0x14
-#define DM365_ISP5_INTSEL3 0x18
-#define DM365_ISP5_CCDCMUX 0x20
-#define DM365_ISP5_PG_FRAME_SIZE 0x28
-#define DM365_VPBE_CLK_CTRL 0x00
-
-#define VPSS_CLK_CTRL 0x01c40044
-#define VPSS_CLK_CTRL_VENCCLKEN BIT(3)
-#define VPSS_CLK_CTRL_DACCLKEN BIT(4)
-
-/*
- * vpss interrupts. VDINT0 - vpss_int0, VDINT1 - vpss_int1,
- * AF - vpss_int3
- */
-#define DM365_ISP5_INTSEL1_DEFAULT 0x0b1f0100
-/* AEW - vpss_int6, RSZ_INT_DMA - vpss_int5 */
-#define DM365_ISP5_INTSEL2_DEFAULT 0x1f0a0f1f
-/* VENC - vpss_int8 */
-#define DM365_ISP5_INTSEL3_DEFAULT 0x00000015
-
-/* masks and shifts for DM365*/
-#define DM365_CCDC_PG_VD_POL_SHIFT 0
-#define DM365_CCDC_PG_HD_POL_SHIFT 1
-
-#define CCD_SRC_SEL_MASK (BIT_MASK(5) | BIT_MASK(4))
-#define CCD_SRC_SEL_SHIFT 4
-
-/* Different SoC platforms supported by this driver */
-enum vpss_platform_type {
- DM644X,
- DM355,
- DM365,
-};
-
-/*
- * vpss operations. Depends on platform. Not all functions are available
- * on all platforms. The api, first check if a function is available before
- * invoking it. In the probe, the function ptrs are initialized based on
- * vpss name. vpss name can be "dm355_vpss", "dm644x_vpss" etc.
- */
-struct vpss_hw_ops {
- /* enable clock */
- int (*enable_clock)(enum vpss_clock_sel clock_sel, int en);
- /* select input to ccdc */
- void (*select_ccdc_source)(enum vpss_ccdc_source_sel src_sel);
- /* clear wbl overflow bit */
- int (*clear_wbl_overflow)(enum vpss_wbl_sel wbl_sel);
- /* set sync polarity */
- void (*set_sync_pol)(struct vpss_sync_pol);
- /* set the PG_FRAME_SIZE register*/
- void (*set_pg_frame_size)(struct vpss_pg_frame_size);
- /* check and clear interrupt if occurred */
- int (*dma_complete_interrupt)(void);
-};
-
-/* vpss configuration */
-struct vpss_oper_config {
- __iomem void *vpss_regs_base0;
- __iomem void *vpss_regs_base1;
- __iomem void *vpss_regs_base2;
- enum vpss_platform_type platform;
- spinlock_t vpss_lock;
- struct vpss_hw_ops hw_ops;
-};
-
-static struct vpss_oper_config oper_cfg;
-
-/* register access routines */
-static inline u32 bl_regr(u32 offset)
-{
- return __raw_readl(oper_cfg.vpss_regs_base0 + offset);
-}
-
-static inline void bl_regw(u32 val, u32 offset)
-{
- __raw_writel(val, oper_cfg.vpss_regs_base0 + offset);
-}
-
-static inline u32 vpss_regr(u32 offset)
-{
- return __raw_readl(oper_cfg.vpss_regs_base1 + offset);
-}
-
-static inline void vpss_regw(u32 val, u32 offset)
-{
- __raw_writel(val, oper_cfg.vpss_regs_base1 + offset);
-}
-
-/* For DM365 only */
-static inline u32 isp5_read(u32 offset)
-{
- return __raw_readl(oper_cfg.vpss_regs_base0 + offset);
-}
-
-/* For DM365 only */
-static inline void isp5_write(u32 val, u32 offset)
-{
- __raw_writel(val, oper_cfg.vpss_regs_base0 + offset);
-}
-
-static void dm365_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
-{
- u32 temp = isp5_read(DM365_ISP5_CCDCMUX) & ~CCD_SRC_SEL_MASK;
-
- /* if we are using pattern generator, enable it */
- if (src_sel == VPSS_PGLPBK || src_sel == VPSS_CCDCPG)
- temp |= 0x08;
-
- temp |= (src_sel << CCD_SRC_SEL_SHIFT);
- isp5_write(temp, DM365_ISP5_CCDCMUX);
-}
-
-static void dm355_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
-{
- bl_regw(src_sel << VPSS_HSSISEL_SHIFT, DM355_VPSSBL_CCDCMUX);
-}
-
-int vpss_dma_complete_interrupt(void)
-{
- if (!oper_cfg.hw_ops.dma_complete_interrupt)
- return 2;
- return oper_cfg.hw_ops.dma_complete_interrupt();
-}
-EXPORT_SYMBOL(vpss_dma_complete_interrupt);
-
-int vpss_select_ccdc_source(enum vpss_ccdc_source_sel src_sel)
-{
- if (!oper_cfg.hw_ops.select_ccdc_source)
- return -EINVAL;
-
- oper_cfg.hw_ops.select_ccdc_source(src_sel);
- return 0;
-}
-EXPORT_SYMBOL(vpss_select_ccdc_source);
-
-static int dm644x_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
-{
- u32 mask = 1, val;
-
- if (wbl_sel < VPSS_PCR_AEW_WBL_0 ||
- wbl_sel > VPSS_PCR_CCDC_WBL_O)
- return -EINVAL;
-
- /* writing a 0 clear the overflow */
- mask = ~(mask << wbl_sel);
- val = bl_regr(DM644X_SBL_PCR_VPSS) & mask;
- bl_regw(val, DM644X_SBL_PCR_VPSS);
- return 0;
-}
-
-void vpss_set_sync_pol(struct vpss_sync_pol sync)
-{
- if (!oper_cfg.hw_ops.set_sync_pol)
- return;
-
- oper_cfg.hw_ops.set_sync_pol(sync);
-}
-EXPORT_SYMBOL(vpss_set_sync_pol);
-
-int vpss_clear_wbl_overflow(enum vpss_wbl_sel wbl_sel)
-{
- if (!oper_cfg.hw_ops.clear_wbl_overflow)
- return -EINVAL;
-
- return oper_cfg.hw_ops.clear_wbl_overflow(wbl_sel);
-}
-EXPORT_SYMBOL(vpss_clear_wbl_overflow);
-
-/*
- * dm355_enable_clock - Enable VPSS Clock
- * @clock_sel: Clock to be enabled/disabled
- * @en: enable/disable flag
- *
- * This is called to enable or disable a vpss clock
- */
-static int dm355_enable_clock(enum vpss_clock_sel clock_sel, int en)
-{
- unsigned long flags;
- u32 utemp, mask = 0x1, shift = 0;
-
- switch (clock_sel) {
- case VPSS_VPBE_CLOCK:
- /* nothing since lsb */
- break;
- case VPSS_VENC_CLOCK_SEL:
- shift = 2;
- break;
- case VPSS_CFALD_CLOCK:
- shift = 3;
- break;
- case VPSS_H3A_CLOCK:
- shift = 4;
- break;
- case VPSS_IPIPE_CLOCK:
- shift = 5;
- break;
- case VPSS_CCDC_CLOCK:
- shift = 6;
- break;
- default:
- printk(KERN_ERR "dm355_enable_clock: Invalid selector: %d\n",
- clock_sel);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&oper_cfg.vpss_lock, flags);
- utemp = vpss_regr(DM355_VPSSCLK_CLKCTRL);
- if (!en)
- utemp &= ~(mask << shift);
- else
- utemp |= (mask << shift);
-
- vpss_regw(utemp, DM355_VPSSCLK_CLKCTRL);
- spin_unlock_irqrestore(&oper_cfg.vpss_lock, flags);
- return 0;
-}
-
-static int dm365_enable_clock(enum vpss_clock_sel clock_sel, int en)
-{
- unsigned long flags;
- u32 utemp, mask = 0x1, shift = 0, offset = DM365_ISP5_PCCR;
- u32 (*read)(u32 offset) = isp5_read;
- void(*write)(u32 val, u32 offset) = isp5_write;
-
- switch (clock_sel) {
- case VPSS_BL_CLOCK:
- break;
- case VPSS_CCDC_CLOCK:
- shift = 1;
- break;
- case VPSS_H3A_CLOCK:
- shift = 2;
- break;
- case VPSS_RSZ_CLOCK:
- shift = 3;
- break;
- case VPSS_IPIPE_CLOCK:
- shift = 4;
- break;
- case VPSS_IPIPEIF_CLOCK:
- shift = 5;
- break;
- case VPSS_PCLK_INTERNAL:
- shift = 6;
- break;
- case VPSS_PSYNC_CLOCK_SEL:
- shift = 7;
- break;
- case VPSS_VPBE_CLOCK:
- read = vpss_regr;
- write = vpss_regw;
- offset = DM365_VPBE_CLK_CTRL;
- break;
- case VPSS_VENC_CLOCK_SEL:
- shift = 2;
- read = vpss_regr;
- write = vpss_regw;
- offset = DM365_VPBE_CLK_CTRL;
- break;
- case VPSS_LDC_CLOCK:
- shift = 3;
- read = vpss_regr;
- write = vpss_regw;
- offset = DM365_VPBE_CLK_CTRL;
- break;
- case VPSS_FDIF_CLOCK:
- shift = 4;
- read = vpss_regr;
- write = vpss_regw;
- offset = DM365_VPBE_CLK_CTRL;
- break;
- case VPSS_OSD_CLOCK_SEL:
- shift = 6;
- read = vpss_regr;
- write = vpss_regw;
- offset = DM365_VPBE_CLK_CTRL;
- break;
- case VPSS_LDC_CLOCK_SEL:
- shift = 7;
- read = vpss_regr;
- write = vpss_regw;
- offset = DM365_VPBE_CLK_CTRL;
- break;
- default:
- printk(KERN_ERR "dm365_enable_clock: Invalid selector: %d\n",
- clock_sel);
- return -1;
- }
-
- spin_lock_irqsave(&oper_cfg.vpss_lock, flags);
- utemp = read(offset);
- if (!en) {
- mask = ~mask;
- utemp &= (mask << shift);
- } else
- utemp |= (mask << shift);
-
- write(utemp, offset);
- spin_unlock_irqrestore(&oper_cfg.vpss_lock, flags);
-
- return 0;
-}
-
-int vpss_enable_clock(enum vpss_clock_sel clock_sel, int en)
-{
- if (!oper_cfg.hw_ops.enable_clock)
- return -EINVAL;
-
- return oper_cfg.hw_ops.enable_clock(clock_sel, en);
-}
-EXPORT_SYMBOL(vpss_enable_clock);
-
-void dm365_vpss_set_sync_pol(struct vpss_sync_pol sync)
-{
- int val = 0;
- val = isp5_read(DM365_ISP5_CCDCMUX);
-
- val |= (sync.ccdpg_hdpol << DM365_CCDC_PG_HD_POL_SHIFT);
- val |= (sync.ccdpg_vdpol << DM365_CCDC_PG_VD_POL_SHIFT);
-
- isp5_write(val, DM365_ISP5_CCDCMUX);
-}
-EXPORT_SYMBOL(dm365_vpss_set_sync_pol);
-
-void vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
-{
- if (!oper_cfg.hw_ops.set_pg_frame_size)
- return;
-
- oper_cfg.hw_ops.set_pg_frame_size(frame_size);
-}
-EXPORT_SYMBOL(vpss_set_pg_frame_size);
-
-void dm365_vpss_set_pg_frame_size(struct vpss_pg_frame_size frame_size)
-{
- int current_reg = ((frame_size.hlpfr >> 1) - 1) << 16;
-
- current_reg |= (frame_size.pplen - 1);
- isp5_write(current_reg, DM365_ISP5_PG_FRAME_SIZE);
-}
-EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size);
-
-static int vpss_probe(struct platform_device *pdev)
-{
- char *platform_name;
-
- if (!pdev->dev.platform_data) {
- dev_err(&pdev->dev, "no platform data\n");
- return -ENOENT;
- }
-
- platform_name = pdev->dev.platform_data;
- if (!strcmp(platform_name, "dm355_vpss"))
- oper_cfg.platform = DM355;
- else if (!strcmp(platform_name, "dm365_vpss"))
- oper_cfg.platform = DM365;
- else if (!strcmp(platform_name, "dm644x_vpss"))
- oper_cfg.platform = DM644X;
- else {
- dev_err(&pdev->dev, "vpss driver not supported on this platform\n");
- return -ENODEV;
- }
-
- dev_info(&pdev->dev, "%s vpss probed\n", platform_name);
- oper_cfg.vpss_regs_base0 = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(oper_cfg.vpss_regs_base0))
- return PTR_ERR(oper_cfg.vpss_regs_base0);
-
- if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
- oper_cfg.vpss_regs_base1 = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(oper_cfg.vpss_regs_base1))
- return PTR_ERR(oper_cfg.vpss_regs_base1);
- }
-
- if (oper_cfg.platform == DM355) {
- oper_cfg.hw_ops.enable_clock = dm355_enable_clock;
- oper_cfg.hw_ops.select_ccdc_source = dm355_select_ccdc_source;
- /* Setup vpss interrupts */
- bl_regw(DM355_VPSSBL_INTSEL_DEFAULT, DM355_VPSSBL_INTSEL);
- bl_regw(DM355_VPSSBL_EVTSEL_DEFAULT, DM355_VPSSBL_EVTSEL);
- } else if (oper_cfg.platform == DM365) {
- oper_cfg.hw_ops.enable_clock = dm365_enable_clock;
- oper_cfg.hw_ops.select_ccdc_source = dm365_select_ccdc_source;
- /* Setup vpss interrupts */
- isp5_write((isp5_read(DM365_ISP5_PCCR) |
- DM365_ISP5_PCCR_BL_CLK_ENABLE |
- DM365_ISP5_PCCR_ISIF_CLK_ENABLE |
- DM365_ISP5_PCCR_H3A_CLK_ENABLE |
- DM365_ISP5_PCCR_RSZ_CLK_ENABLE |
- DM365_ISP5_PCCR_IPIPE_CLK_ENABLE |
- DM365_ISP5_PCCR_IPIPEIF_CLK_ENABLE |
- DM365_ISP5_PCCR_RSV), DM365_ISP5_PCCR);
- isp5_write((isp5_read(DM365_ISP5_BCR) |
- DM365_ISP5_BCR_ISIF_OUT_ENABLE), DM365_ISP5_BCR);
- isp5_write(DM365_ISP5_INTSEL1_DEFAULT, DM365_ISP5_INTSEL1);
- isp5_write(DM365_ISP5_INTSEL2_DEFAULT, DM365_ISP5_INTSEL2);
- isp5_write(DM365_ISP5_INTSEL3_DEFAULT, DM365_ISP5_INTSEL3);
- } else
- oper_cfg.hw_ops.clear_wbl_overflow = dm644x_clear_wbl_overflow;
-
- pm_runtime_enable(&pdev->dev);
-
- pm_runtime_get(&pdev->dev);
-
- spin_lock_init(&oper_cfg.vpss_lock);
- dev_info(&pdev->dev, "%s vpss probe success\n", platform_name);
-
- return 0;
-}
-
-static int vpss_remove(struct platform_device *pdev)
-{
- pm_runtime_disable(&pdev->dev);
- return 0;
-}
-
-static int vpss_suspend(struct device *dev)
-{
- pm_runtime_put(dev);
- return 0;
-}
-
-static int vpss_resume(struct device *dev)
-{
- pm_runtime_get(dev);
- return 0;
-}
-
-static const struct dev_pm_ops vpss_pm_ops = {
- .suspend = vpss_suspend,
- .resume = vpss_resume,
-};
-
-static struct platform_driver vpss_driver = {
- .driver = {
- .name = "vpss",
- .pm = &vpss_pm_ops,
- },
- .remove = vpss_remove,
- .probe = vpss_probe,
-};
-
-static void vpss_exit(void)
-{
- platform_driver_unregister(&vpss_driver);
- iounmap(oper_cfg.vpss_regs_base2);
- release_mem_region(VPSS_CLK_CTRL, 4);
-}
-
-static int __init vpss_init(void)
-{
- int ret;
-
- if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
- return -EBUSY;
-
- oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
- if (unlikely(!oper_cfg.vpss_regs_base2)) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
-
- writel(VPSS_CLK_CTRL_VENCCLKEN |
- VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
-
- ret = platform_driver_register(&vpss_driver);
- if (ret)
- goto err_pd_register;
-
- return 0;
-
-err_pd_register:
- iounmap(oper_cfg.vpss_regs_base2);
-err_ioremap:
- release_mem_region(VPSS_CLK_CTRL, 4);
- return ret;
-}
-subsys_initcall(vpss_init);
-module_exit(vpss_exit);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 527d9324742b..6bdad6341844 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1614,7 +1614,7 @@ static void rc_dev_release(struct device *device)
kfree(dev);
}
-static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
+static int rc_dev_uevent(const struct device *device, struct kobj_uevent_env *env)
{
struct rc_dev *dev = to_rc_dev(device);
int ret = 0;
diff --git a/drivers/media/usb/uvc/Kconfig b/drivers/media/usb/uvc/Kconfig
index ca51ee8e45f3..579532272fd6 100644
--- a/drivers/media/usb/uvc/Kconfig
+++ b/drivers/media/usb/uvc/Kconfig
@@ -3,6 +3,7 @@ config USB_VIDEO_CLASS
tristate "USB Video Class (UVC)"
depends on VIDEO_DEV
select VIDEOBUF2_VMALLOC
+ select UVC_COMMON
help
Support for the USB Video Class (UVC). Currently only video
input devices, such as webcams, are supported.
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index c95a2229f4fa..5e9d3da862dd 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -6,19 +6,21 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <asm/barrier.h>
+#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
+#include <linux/usb/uvc.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <media/v4l2-ctrls.h>
-#include <media/v4l2-uvc.h>
#include "uvcvideo.h"
@@ -363,19 +365,45 @@ static const u32 uvc_control_classes[] = {
V4L2_CID_USER_CLASS,
};
-static const struct uvc_menu_info power_line_frequency_controls[] = {
- { 0, "Disabled" },
- { 1, "50 Hz" },
- { 2, "60 Hz" },
- { 3, "Auto" },
-};
+static const int exposure_auto_mapping[] = { 2, 1, 4, 8 };
-static const struct uvc_menu_info exposure_auto_controls[] = {
- { 2, "Auto Mode" },
- { 1, "Manual Mode" },
- { 4, "Shutter Priority Mode" },
- { 8, "Aperture Priority Mode" },
-};
+/*
+ * This function translates the V4L2 menu index @idx, as exposed to userspace as
+ * the V4L2 control value, to the corresponding UVC control value used by the
+ * device. The custom menu_mapping in the control @mapping is used when
+ * available, otherwise the function assumes that the V4L2 and UVC values are
+ * identical.
+ *
+ * For controls of type UVC_CTRL_DATA_TYPE_BITMASK, the UVC control value is
+ * expressed as a bitmask and is thus guaranteed to have a single bit set.
+ *
+ * The function returns -EINVAL if the V4L2 menu index @idx isn't valid for the
+ * control, which includes all controls whose type isn't UVC_CTRL_DATA_TYPE_ENUM
+ * or UVC_CTRL_DATA_TYPE_BITMASK.
+ */
+static int uvc_mapping_get_menu_value(const struct uvc_control_mapping *mapping,
+ u32 idx)
+{
+ if (!test_bit(idx, &mapping->menu_mask))
+ return -EINVAL;
+
+ if (mapping->menu_mapping)
+ return mapping->menu_mapping[idx];
+
+ return idx;
+}
+
+static const char *
+uvc_mapping_get_menu_name(const struct uvc_control_mapping *mapping, u32 idx)
+{
+ if (!test_bit(idx, &mapping->menu_mask))
+ return NULL;
+
+ if (mapping->menu_names)
+ return mapping->menu_names[idx];
+
+ return v4l2_ctrl_get_menu(mapping->id)[idx];
+}
static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping,
u8 query, const u8 *data)
@@ -524,8 +552,9 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
.offset = 0,
.v4l2_type = V4L2_CTRL_TYPE_MENU,
.data_type = UVC_CTRL_DATA_TYPE_BITMASK,
- .menu_info = exposure_auto_controls,
- .menu_count = ARRAY_SIZE(exposure_auto_controls),
+ .menu_mapping = exposure_auto_mapping,
+ .menu_mask = GENMASK(V4L2_EXPOSURE_APERTURE_PRIORITY,
+ V4L2_EXPOSURE_AUTO),
.slave_ids = { V4L2_CID_EXPOSURE_ABSOLUTE, },
},
{
@@ -721,32 +750,50 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
};
-static const struct uvc_control_mapping uvc_ctrl_mappings_uvc11[] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .entity = UVC_GUID_UVC_PROCESSING,
- .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
- .size = 2,
- .offset = 0,
- .v4l2_type = V4L2_CTRL_TYPE_MENU,
- .data_type = UVC_CTRL_DATA_TYPE_ENUM,
- .menu_info = power_line_frequency_controls,
- .menu_count = ARRAY_SIZE(power_line_frequency_controls) - 1,
- },
+const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .entity = UVC_GUID_UVC_PROCESSING,
+ .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+ .size = 2,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_MENU,
+ .data_type = UVC_CTRL_DATA_TYPE_ENUM,
+ .menu_mask = GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
+ V4L2_CID_POWER_LINE_FREQUENCY_50HZ),
};
-static const struct uvc_control_mapping uvc_ctrl_mappings_uvc15[] = {
- {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .entity = UVC_GUID_UVC_PROCESSING,
- .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
- .size = 2,
- .offset = 0,
- .v4l2_type = V4L2_CTRL_TYPE_MENU,
- .data_type = UVC_CTRL_DATA_TYPE_ENUM,
- .menu_info = power_line_frequency_controls,
- .menu_count = ARRAY_SIZE(power_line_frequency_controls),
- },
+const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc11 = {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .entity = UVC_GUID_UVC_PROCESSING,
+ .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+ .size = 2,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_MENU,
+ .data_type = UVC_CTRL_DATA_TYPE_ENUM,
+ .menu_mask = GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_60HZ,
+ V4L2_CID_POWER_LINE_FREQUENCY_DISABLED),
+};
+
+static const struct uvc_control_mapping *uvc_ctrl_mappings_uvc11[] = {
+ &uvc_ctrl_power_line_mapping_uvc11,
+ NULL, /* Sentinel */
+};
+
+static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc15 = {
+ .id = V4L2_CID_POWER_LINE_FREQUENCY,
+ .entity = UVC_GUID_UVC_PROCESSING,
+ .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
+ .size = 2,
+ .offset = 0,
+ .v4l2_type = V4L2_CTRL_TYPE_MENU,
+ .data_type = UVC_CTRL_DATA_TYPE_ENUM,
+ .menu_mask = GENMASK(V4L2_CID_POWER_LINE_FREQUENCY_AUTO,
+ V4L2_CID_POWER_LINE_FREQUENCY_DISABLED),
+};
+
+static const struct uvc_control_mapping *uvc_ctrl_mappings_uvc15[] = {
+ &uvc_ctrl_power_line_mapping_uvc15,
+ NULL, /* Sentinel */
};
/* ------------------------------------------------------------------------
@@ -972,11 +1019,17 @@ static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping,
s32 value = mapping->get(mapping, UVC_GET_CUR, data);
if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) {
- const struct uvc_menu_info *menu = mapping->menu_info;
unsigned int i;
- for (i = 0; i < mapping->menu_count; ++i, ++menu) {
- if (menu->value == value) {
+ for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
+ u32 menu_value;
+
+ if (!test_bit(i, &mapping->menu_mask))
+ continue;
+
+ menu_value = uvc_mapping_get_menu_value(mapping, i);
+
+ if (menu_value == value) {
value = i;
break;
}
@@ -1085,11 +1138,28 @@ static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
return 0;
}
+/*
+ * Check if control @v4l2_id can be accessed by the given control @ioctl
+ * (VIDIOC_G_EXT_CTRLS, VIDIOC_TRY_EXT_CTRLS or VIDIOC_S_EXT_CTRLS).
+ *
+ * For set operations on slave controls, check if the master's value is set to
+ * manual, either in the others controls set in the same ioctl call, or from
+ * the master's current value. This catches VIDIOC_S_EXT_CTRLS calls that set
+ * both the master and slave control, such as for instance setting
+ * auto_exposure=1, exposure_time_absolute=251.
+ */
int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
- bool read)
+ const struct v4l2_ext_controls *ctrls,
+ unsigned long ioctl)
{
+ struct uvc_control_mapping *master_map = NULL;
+ struct uvc_control *master_ctrl = NULL;
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
+ bool read = ioctl == VIDIOC_G_EXT_CTRLS;
+ s32 val;
+ int ret;
+ int i;
if (__uvc_query_v4l2_class(chain, v4l2_id, 0) >= 0)
return -EACCES;
@@ -1104,6 +1174,29 @@ int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) && !read)
return -EACCES;
+ if (ioctl != VIDIOC_S_EXT_CTRLS || !mapping->master_id)
+ return 0;
+
+ /*
+ * Iterate backwards in cases where the master control is accessed
+ * multiple times in the same ioctl. We want the last value.
+ */
+ for (i = ctrls->count - 1; i >= 0; i--) {
+ if (ctrls->controls[i].id == mapping->master_id)
+ return ctrls->controls[i].value ==
+ mapping->master_manual ? 0 : -EACCES;
+ }
+
+ __uvc_find_control(ctrl->entity, mapping->master_id, &master_map,
+ &master_ctrl, 0);
+
+ if (!master_ctrl || !(master_ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
+ return 0;
+
+ ret = __uvc_ctrl_get(chain, master_ctrl, master_map, &val);
+ if (ret >= 0 && val != mapping->master_manual)
+ return -EACCES;
+
return 0;
}
@@ -1121,6 +1214,25 @@ static const char *uvc_map_get_name(const struct uvc_control_mapping *map)
return "Unknown Control";
}
+static u32 uvc_get_ctrl_bitmap(struct uvc_control *ctrl,
+ struct uvc_control_mapping *mapping)
+{
+ /*
+ * Some controls, like CT_AE_MODE_CONTROL, use GET_RES to represent
+ * the number of bits supported. Those controls do not list GET_MAX
+ * as supported.
+ */
+ if (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)
+ return mapping->get(mapping, UVC_GET_RES,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
+
+ if (ctrl->info.flags & UVC_CTRL_FLAG_GET_MAX)
+ return mapping->get(mapping, UVC_GET_MAX,
+ uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX));
+
+ return ~0;
+}
+
static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
struct uvc_control *ctrl,
struct uvc_control_mapping *mapping,
@@ -1128,7 +1240,6 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
{
struct uvc_control_mapping *master_map = NULL;
struct uvc_control *master_ctrl = NULL;
- const struct uvc_menu_info *menu;
unsigned int i;
memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
@@ -1169,13 +1280,19 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
switch (mapping->v4l2_type) {
case V4L2_CTRL_TYPE_MENU:
- v4l2_ctrl->minimum = 0;
- v4l2_ctrl->maximum = mapping->menu_count - 1;
+ v4l2_ctrl->minimum = ffs(mapping->menu_mask) - 1;
+ v4l2_ctrl->maximum = fls(mapping->menu_mask) - 1;
v4l2_ctrl->step = 1;
- menu = mapping->menu_info;
- for (i = 0; i < mapping->menu_count; ++i, ++menu) {
- if (menu->value == v4l2_ctrl->default_value) {
+ for (i = 0; BIT(i) <= mapping->menu_mask; ++i) {
+ u32 menu_value;
+
+ if (!test_bit(i, &mapping->menu_mask))
+ continue;
+
+ menu_value = uvc_mapping_get_menu_value(mapping, i);
+
+ if (menu_value == v4l2_ctrl->default_value) {
v4l2_ctrl->default_value = i;
break;
}
@@ -1195,6 +1312,12 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
v4l2_ctrl->step = 0;
return 0;
+ case V4L2_CTRL_TYPE_BITMASK:
+ v4l2_ctrl->minimum = 0;
+ v4l2_ctrl->maximum = uvc_get_ctrl_bitmap(ctrl, mapping);
+ v4l2_ctrl->step = 0;
+ return 0;
+
default:
break;
}
@@ -1268,11 +1391,11 @@ done:
int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
struct v4l2_querymenu *query_menu)
{
- const struct uvc_menu_info *menu_info;
struct uvc_control_mapping *mapping;
struct uvc_control *ctrl;
u32 index = query_menu->index;
u32 id = query_menu->id;
+ const char *name;
int ret;
memset(query_menu, 0, sizeof(*query_menu));
@@ -1289,16 +1412,13 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
goto done;
}
- if (query_menu->index >= mapping->menu_count) {
+ if (!test_bit(query_menu->index, &mapping->menu_mask)) {
ret = -EINVAL;
goto done;
}
- menu_info = &mapping->menu_info[query_menu->index];
-
- if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
- (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
- s32 bitmap;
+ if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK) {
+ int mask;
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
@@ -1306,15 +1426,25 @@ int uvc_query_v4l2_menu(struct uvc_video_chain *chain,
goto done;
}
- bitmap = mapping->get(mapping, UVC_GET_RES,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
- if (!(bitmap & menu_info->value)) {
+ mask = uvc_mapping_get_menu_value(mapping, query_menu->index);
+ if (mask < 0) {
+ ret = mask;
+ goto done;
+ }
+
+ if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & mask)) {
ret = -EINVAL;
goto done;
}
}
- strscpy(query_menu->name, menu_info->name, sizeof(query_menu->name));
+ name = uvc_mapping_get_menu_name(mapping, query_menu->index);
+ if (!name) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ strscpy(query_menu->name, name, sizeof(query_menu->name));
done:
mutex_unlock(&chain->ctrl_mutex);
@@ -1442,6 +1572,10 @@ static void uvc_ctrl_status_event_work(struct work_struct *work)
uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
+ /* The barrier is needed to synchronize with uvc_status_stop(). */
+ if (smp_load_acquire(&dev->flush_status))
+ return;
+
/* Resubmit the URB. */
w->urb->interval = dev->int_ep->desc.bInterval;
ret = usb_submit_urb(w->urb, GFP_KERNEL);
@@ -1791,31 +1925,44 @@ int uvc_ctrl_set(struct uvc_fh *handle,
value = xctrl->value;
break;
+ case V4L2_CTRL_TYPE_BITMASK:
+ if (!ctrl->cached) {
+ ret = uvc_ctrl_populate_cache(chain, ctrl);
+ if (ret < 0)
+ return ret;
+ }
+
+ xctrl->value &= uvc_get_ctrl_bitmap(ctrl, mapping);
+ value = xctrl->value;
+ break;
+
case V4L2_CTRL_TYPE_BOOLEAN:
xctrl->value = clamp(xctrl->value, 0, 1);
value = xctrl->value;
break;
case V4L2_CTRL_TYPE_MENU:
- if (xctrl->value < 0 || xctrl->value >= mapping->menu_count)
+ if (xctrl->value < (ffs(mapping->menu_mask) - 1) ||
+ xctrl->value > (fls(mapping->menu_mask) - 1))
return -ERANGE;
- value = mapping->menu_info[xctrl->value].value;
+
+ if (!test_bit(xctrl->value, &mapping->menu_mask))
+ return -EINVAL;
+
+ value = uvc_mapping_get_menu_value(mapping, xctrl->value);
/*
* Valid menu indices are reported by the GET_RES request for
* UVC controls that support it.
*/
- if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK &&
- (ctrl->info.flags & UVC_CTRL_FLAG_GET_RES)) {
+ if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK) {
if (!ctrl->cached) {
ret = uvc_ctrl_populate_cache(chain, ctrl);
if (ret < 0)
return ret;
}
- step = mapping->get(mapping, UVC_GET_RES,
- uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES));
- if (!(step & value))
+ if (!(uvc_get_ctrl_bitmap(ctrl, mapping) & value))
return -EINVAL;
}
@@ -2218,31 +2365,42 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
unsigned int i;
/*
- * Most mappings come from static kernel data and need to be duplicated.
+ * Most mappings come from static kernel data, and need to be duplicated.
* Mappings that come from userspace will be unnecessarily duplicated,
* this could be optimized.
*/
map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL);
- if (map == NULL)
+ if (!map)
return -ENOMEM;
+ map->name = NULL;
+ map->menu_names = NULL;
+ map->menu_mapping = NULL;
+
/* For UVCIOC_CTRL_MAP custom control */
if (mapping->name) {
map->name = kstrdup(mapping->name, GFP_KERNEL);
- if (!map->name) {
- kfree(map);
- return -ENOMEM;
- }
+ if (!map->name)
+ goto err_nomem;
}
INIT_LIST_HEAD(&map->ev_subs);
- size = sizeof(*mapping->menu_info) * mapping->menu_count;
- map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL);
- if (map->menu_info == NULL) {
- kfree(map->name);
- kfree(map);
- return -ENOMEM;
+ if (mapping->menu_mapping && mapping->menu_mask) {
+ size = sizeof(mapping->menu_mapping[0])
+ * fls(mapping->menu_mask);
+ map->menu_mapping = kmemdup(mapping->menu_mapping, size,
+ GFP_KERNEL);
+ if (!map->menu_mapping)
+ goto err_nomem;
+ }
+ if (mapping->menu_names && mapping->menu_mask) {
+ size = sizeof(mapping->menu_names[0])
+ * fls(mapping->menu_mask);
+ map->menu_names = kmemdup(mapping->menu_names, size,
+ GFP_KERNEL);
+ if (!map->menu_names)
+ goto err_nomem;
}
if (map->get == NULL)
@@ -2264,6 +2422,13 @@ static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
ctrl->info.selector);
return 0;
+
+err_nomem:
+ kfree(map->menu_names);
+ kfree(map->menu_mapping);
+ kfree(map->name);
+ kfree(map);
+ return -ENOMEM;
}
int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
@@ -2421,8 +2586,7 @@ static void uvc_ctrl_prune_entity(struct uvc_device *dev,
static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
struct uvc_control *ctrl)
{
- const struct uvc_control_mapping *mappings;
- unsigned int num_mappings;
+ const struct uvc_control_mapping **mappings;
unsigned int i;
/*
@@ -2489,16 +2653,11 @@ static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
}
/* Finally process version-specific mappings. */
- if (chain->dev->uvc_version < 0x0150) {
- mappings = uvc_ctrl_mappings_uvc11;
- num_mappings = ARRAY_SIZE(uvc_ctrl_mappings_uvc11);
- } else {
- mappings = uvc_ctrl_mappings_uvc15;
- num_mappings = ARRAY_SIZE(uvc_ctrl_mappings_uvc15);
- }
+ mappings = chain->dev->uvc_version < 0x0150
+ ? uvc_ctrl_mappings_uvc11 : uvc_ctrl_mappings_uvc15;
- for (i = 0; i < num_mappings; ++i) {
- const struct uvc_control_mapping *mapping = &mappings[i];
+ for (i = 0; mappings[i]; ++i) {
+ const struct uvc_control_mapping *mapping = mappings[i];
if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
ctrl->info.selector == mapping->selector)
@@ -2591,7 +2750,8 @@ static void uvc_ctrl_cleanup_mappings(struct uvc_device *dev,
list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) {
list_del(&mapping->list);
- kfree(mapping->menu_info);
+ kfree(mapping->menu_names);
+ kfree(mapping->menu_mapping);
kfree(mapping->name);
kfree(mapping);
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index e4bcb5011360..7aefa76a42b3 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -7,12 +7,14 @@
*/
#include <linux/atomic.h>
+#include <linux/bits.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
+#include <linux/usb/uvc.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
@@ -20,7 +22,6 @@
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
-#include <media/v4l2-uvc.h>
#include "uvcvideo.h"
@@ -224,7 +225,7 @@ static int uvc_parse_format(struct uvc_device *dev,
{
struct usb_interface *intf = streaming->intf;
struct usb_host_interface *alts = intf->cur_altsetting;
- struct uvc_format_desc *fmtdesc;
+ const struct uvc_format_desc *fmtdesc;
struct uvc_frame *frame;
const unsigned char *start = buffer;
unsigned int width_multiplier = 1;
@@ -251,14 +252,10 @@ static int uvc_parse_format(struct uvc_device *dev,
fmtdesc = uvc_format_by_guid(&buffer[5]);
if (fmtdesc != NULL) {
- strscpy(format->name, fmtdesc->name,
- sizeof(format->name));
format->fcc = fmtdesc->fcc;
} else {
dev_info(&streaming->intf->dev,
"Unknown video format %pUl\n", &buffer[5]);
- snprintf(format->name, sizeof(format->name), "%pUl\n",
- &buffer[5]);
format->fcc = 0;
}
@@ -270,8 +267,6 @@ static int uvc_parse_format(struct uvc_device *dev,
*/
if (dev->quirks & UVC_QUIRK_FORCE_Y8) {
if (format->fcc == V4L2_PIX_FMT_YUYV) {
- strscpy(format->name, "Greyscale 8-bit (Y8 )",
- sizeof(format->name));
format->fcc = V4L2_PIX_FMT_GREY;
format->bpp = 8;
width_multiplier = 2;
@@ -312,7 +307,6 @@ static int uvc_parse_format(struct uvc_device *dev,
return -EINVAL;
}
- strscpy(format->name, "MJPEG", sizeof(format->name));
format->fcc = V4L2_PIX_FMT_MJPEG;
format->flags = UVC_FMT_FLAG_COMPRESSED;
format->bpp = 0;
@@ -328,17 +322,7 @@ static int uvc_parse_format(struct uvc_device *dev,
return -EINVAL;
}
- switch (buffer[8] & 0x7f) {
- case 0:
- strscpy(format->name, "SD-DV", sizeof(format->name));
- break;
- case 1:
- strscpy(format->name, "SDL-DV", sizeof(format->name));
- break;
- case 2:
- strscpy(format->name, "HD-DV", sizeof(format->name));
- break;
- default:
+ if ((buffer[8] & 0x7f) > 2) {
uvc_dbg(dev, DESCR,
"device %d videostreaming interface %d: unknown DV format %u\n",
dev->udev->devnum,
@@ -346,9 +330,6 @@ static int uvc_parse_format(struct uvc_device *dev,
return -EINVAL;
}
- strlcat(format->name, buffer[8] & (1 << 7) ? " 60Hz" : " 50Hz",
- sizeof(format->name));
-
format->fcc = V4L2_PIX_FMT_DV;
format->flags = UVC_FMT_FLAG_COMPRESSED | UVC_FMT_FLAG_STREAM;
format->bpp = 0;
@@ -375,7 +356,7 @@ static int uvc_parse_format(struct uvc_device *dev,
return -EINVAL;
}
- uvc_dbg(dev, DESCR, "Found format %s\n", format->name);
+ uvc_dbg(dev, DESCR, "Found format %p4cc", &format->fcc);
buflen -= buffer[0];
buffer += buffer[0];
@@ -732,6 +713,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
/* Parse the alternate settings to find the maximum bandwidth. */
for (i = 0; i < intf->num_altsetting; ++i) {
struct usb_host_endpoint *ep;
+
alts = &intf->altsetting[i];
ep = uvc_find_endpoint(alts,
streaming->header.bEndpointAddress);
@@ -813,6 +795,27 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u16 id,
return entity;
}
+static void uvc_entity_set_name(struct uvc_device *dev, struct uvc_entity *entity,
+ const char *type_name, u8 string_id)
+{
+ int ret;
+
+ /*
+ * First attempt to read the entity name from the device. If the entity
+ * has no associated string, or if reading the string fails (most
+ * likely due to a buggy firmware), fall back to default names based on
+ * the entity type.
+ */
+ if (string_id) {
+ ret = usb_string(dev->udev, string_id, entity->name,
+ sizeof(entity->name));
+ if (!ret)
+ return;
+ }
+
+ sprintf(entity->name, "%s %u", type_name, entity->id);
+}
+
/* Parse vendor-specific extensions. */
static int uvc_parse_vendor_control(struct uvc_device *dev,
const unsigned char *buffer, int buflen)
@@ -879,11 +882,7 @@ static int uvc_parse_vendor_control(struct uvc_device *dev,
+ n;
memcpy(unit->extension.bmControls, &buffer[23+p], 2*n);
- if (buffer[24+p+2*n] != 0)
- usb_string(udev, buffer[24+p+2*n], unit->name,
- sizeof(unit->name));
- else
- sprintf(unit->name, "Extension %u", buffer[3]);
+ uvc_entity_set_name(dev, unit, "Extension", buffer[24+p+2*n]);
list_add_tail(&unit->list, &dev->entities);
handled = 1;
@@ -901,6 +900,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
struct usb_interface *intf;
struct usb_host_interface *alts = dev->intf->cur_altsetting;
unsigned int i, n, p, len;
+ const char *type_name;
u16 type;
switch (buffer[2]) {
@@ -1006,15 +1006,14 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
memcpy(term->media.bmTransportModes, &buffer[10+n], p);
}
- if (buffer[7] != 0)
- usb_string(udev, buffer[7], term->name,
- sizeof(term->name));
- else if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA)
- sprintf(term->name, "Camera %u", buffer[3]);
+ if (UVC_ENTITY_TYPE(term) == UVC_ITT_CAMERA)
+ type_name = "Camera";
else if (UVC_ENTITY_TYPE(term) == UVC_ITT_MEDIA_TRANSPORT_INPUT)
- sprintf(term->name, "Media %u", buffer[3]);
+ type_name = "Media";
else
- sprintf(term->name, "Input %u", buffer[3]);
+ type_name = "Input";
+
+ uvc_entity_set_name(dev, term, type_name, buffer[7]);
list_add_tail(&term->list, &dev->entities);
break;
@@ -1047,11 +1046,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
memcpy(term->baSourceID, &buffer[7], 1);
- if (buffer[8] != 0)
- usb_string(udev, buffer[8], term->name,
- sizeof(term->name));
- else
- sprintf(term->name, "Output %u", buffer[3]);
+ uvc_entity_set_name(dev, term, "Output", buffer[8]);
list_add_tail(&term->list, &dev->entities);
break;
@@ -1072,11 +1067,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
memcpy(unit->baSourceID, &buffer[5], p);
- if (buffer[5+p] != 0)
- usb_string(udev, buffer[5+p], unit->name,
- sizeof(unit->name));
- else
- sprintf(unit->name, "Selector %u", buffer[3]);
+ uvc_entity_set_name(dev, unit, "Selector", buffer[5+p]);
list_add_tail(&unit->list, &dev->entities);
break;
@@ -1105,11 +1096,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
if (dev->uvc_version >= 0x0110)
unit->processing.bmVideoStandards = buffer[9+n];
- if (buffer[8+n] != 0)
- usb_string(udev, buffer[8+n], unit->name,
- sizeof(unit->name));
- else
- sprintf(unit->name, "Processing %u", buffer[3]);
+ uvc_entity_set_name(dev, unit, "Processing", buffer[8+n]);
list_add_tail(&unit->list, &dev->entities);
break;
@@ -1136,11 +1123,7 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
unit->extension.bmControls = (u8 *)unit + sizeof(*unit);
memcpy(unit->extension.bmControls, &buffer[23+p], n);
- if (buffer[23+p+n] != 0)
- usb_string(udev, buffer[23+p+n], unit->name,
- sizeof(unit->name));
- else
- sprintf(unit->name, "Extension %u", buffer[3]);
+ uvc_entity_set_name(dev, unit, "Extension", buffer[23+p+n]);
list_add_tail(&unit->list, &dev->entities);
break;
@@ -1173,7 +1156,8 @@ static int uvc_parse_control(struct uvc_device *dev)
buffer[1] != USB_DT_CS_INTERFACE)
goto next_descriptor;
- if ((ret = uvc_parse_standard_control(dev, buffer, buflen)) < 0)
+ ret = uvc_parse_standard_control(dev, buffer, buflen);
+ if (ret < 0)
return ret;
next_descriptor:
@@ -1856,12 +1840,14 @@ static void uvc_delete(struct kref *kref)
list_for_each_safe(p, n, &dev->chains) {
struct uvc_video_chain *chain;
+
chain = list_entry(p, struct uvc_video_chain, list);
kfree(chain);
}
list_for_each_safe(p, n, &dev->entities) {
struct uvc_entity *entity;
+
entity = list_entry(p, struct uvc_entity, list);
#ifdef CONFIG_MEDIA_CONTROLLER
uvc_mc_cleanup_entity(entity);
@@ -1871,6 +1857,7 @@ static void uvc_delete(struct kref *kref)
list_for_each_safe(p, n, &dev->streams) {
struct uvc_streaming *streaming;
+
streaming = list_entry(p, struct uvc_streaming, list);
usb_driver_release_interface(&uvc_driver.driver,
streaming->intf);
@@ -2206,7 +2193,8 @@ static int uvc_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
/* Initialize the interrupt URB. */
- if ((ret = uvc_status_init(dev)) < 0) {
+ ret = uvc_status_init(dev);
+ if (ret < 0) {
dev_info(&dev->udev->dev,
"Unable to initialize the status endpoint (%d), status interrupt will not be supported.\n",
ret);
@@ -2353,40 +2341,23 @@ static int uvc_clock_param_set(const char *val, const struct kernel_param *kp)
}
module_param_call(clock, uvc_clock_param_set, uvc_clock_param_get,
- &uvc_clock_param, S_IRUGO|S_IWUSR);
+ &uvc_clock_param, 0644);
MODULE_PARM_DESC(clock, "Video buffers timestamp clock");
-module_param_named(hwtimestamps, uvc_hw_timestamps_param, uint, S_IRUGO|S_IWUSR);
+module_param_named(hwtimestamps, uvc_hw_timestamps_param, uint, 0644);
MODULE_PARM_DESC(hwtimestamps, "Use hardware timestamps");
-module_param_named(nodrop, uvc_no_drop_param, uint, S_IRUGO|S_IWUSR);
+module_param_named(nodrop, uvc_no_drop_param, uint, 0644);
MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames");
-module_param_named(quirks, uvc_quirks_param, uint, S_IRUGO|S_IWUSR);
+module_param_named(quirks, uvc_quirks_param, uint, 0644);
MODULE_PARM_DESC(quirks, "Forced device quirks");
-module_param_named(trace, uvc_dbg_param, uint, S_IRUGO|S_IWUSR);
+module_param_named(trace, uvc_dbg_param, uint, 0644);
MODULE_PARM_DESC(trace, "Trace level bitmask");
-module_param_named(timeout, uvc_timeout_param, uint, S_IRUGO|S_IWUSR);
+module_param_named(timeout, uvc_timeout_param, uint, 0644);
MODULE_PARM_DESC(timeout, "Streaming control requests timeout");
/* ------------------------------------------------------------------------
* Driver initialization and cleanup
*/
-static const struct uvc_menu_info power_line_frequency_controls_limited[] = {
- { 1, "50 Hz" },
- { 2, "60 Hz" },
-};
-
-static const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited = {
- .id = V4L2_CID_POWER_LINE_FREQUENCY,
- .entity = UVC_GUID_UVC_PROCESSING,
- .selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
- .size = 2,
- .offset = 0,
- .v4l2_type = V4L2_CTRL_TYPE_MENU,
- .data_type = UVC_CTRL_DATA_TYPE_ENUM,
- .menu_info = power_line_frequency_controls_limited,
- .menu_count = ARRAY_SIZE(power_line_frequency_controls_limited),
-};
-
static const struct uvc_device_info uvc_ctrl_power_line_limited = {
.mappings = (const struct uvc_control_mapping *[]) {
&uvc_ctrl_power_line_mapping_limited,
@@ -2394,6 +2365,13 @@ static const struct uvc_device_info uvc_ctrl_power_line_limited = {
},
};
+static const struct uvc_device_info uvc_ctrl_power_line_uvc11 = {
+ .mappings = (const struct uvc_control_mapping *[]) {
+ &uvc_ctrl_power_line_mapping_uvc11,
+ NULL, /* Sentinel */
+ },
+};
+
static const struct uvc_device_info uvc_quirk_probe_minmax = {
.quirks = UVC_QUIRK_PROBE_MINMAX,
};
@@ -2496,6 +2474,24 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_quirk_probe_minmax },
+ /* Logitech, Webcam C910 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x0821,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)},
+ /* Logitech, Webcam B910 */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x046d,
+ .idProduct = 0x0823,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = UVC_INFO_QUIRK(UVC_QUIRK_WAKE_AUTOSUSPEND)},
/* Logitech Quickcam Fusion */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2973,6 +2969,15 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_QUIRK(UVC_QUIRK_FORCE_BPP) },
+ /* Lenovo Integrated Camera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x30c9,
+ .idProduct = 0x0093,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_uvc11 },
/* Sonix Technology USB 2.0 Camera */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
@@ -2991,6 +2996,24 @@ static const struct usb_device_id uvc_ids[] = {
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
+ /* Acer EasyCamera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x5986,
+ .idProduct = 0x1180,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
+ /* Acer EasyCamera */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = 0x5986,
+ .idProduct = 0x1180,
+ .bInterfaceClass = USB_CLASS_VIDEO,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 0,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
/* Intel RealSense D4M */
{ .match_flags = USB_DEVICE_ID_MATCH_DEVICE
| USB_DEVICE_ID_MATCH_INT_INFO,
diff --git a/drivers/media/usb/uvc/uvc_entity.c b/drivers/media/usb/uvc/uvc_entity.c
index 7c4d2f93d351..cc68dd24eb42 100644
--- a/drivers/media/usb/uvc/uvc_entity.c
+++ b/drivers/media/usb/uvc/uvc_entity.c
@@ -37,7 +37,7 @@ static int uvc_mc_create_links(struct uvc_video_chain *chain,
continue;
remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]);
- if (remote == NULL)
+ if (remote == NULL || remote->num_pads == 0)
return -EINVAL;
source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
diff --git a/drivers/media/usb/uvc/uvc_status.c b/drivers/media/usb/uvc/uvc_status.c
index 7518ffce22ed..a78a88c710e2 100644
--- a/drivers/media/usb/uvc/uvc_status.c
+++ b/drivers/media/usb/uvc/uvc_status.c
@@ -6,6 +6,7 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <asm/barrier.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/slab.h>
@@ -18,11 +19,34 @@
* Input device
*/
#ifdef CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV
+
+static bool uvc_input_has_button(struct uvc_device *dev)
+{
+ struct uvc_streaming *stream;
+
+ /*
+ * The device has button events if both bTriggerSupport and
+ * bTriggerUsage are one. Otherwise the camera button does not
+ * exist or is handled automatically by the camera without host
+ * driver or client application intervention.
+ */
+ list_for_each_entry(stream, &dev->streams, list) {
+ if (stream->header.bTriggerSupport == 1 &&
+ stream->header.bTriggerUsage == 1)
+ return true;
+ }
+
+ return false;
+}
+
static int uvc_input_init(struct uvc_device *dev)
{
struct input_dev *input;
int ret;
+ if (!uvc_input_has_button(dev))
+ return 0;
+
input = input_allocate_device();
if (input == NULL)
return -ENOMEM;
@@ -73,38 +97,23 @@ static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
/* --------------------------------------------------------------------------
* Status interrupt endpoint
*/
-struct uvc_streaming_status {
- u8 bStatusType;
- u8 bOriginator;
- u8 bEvent;
- u8 bValue[];
-} __packed;
-
-struct uvc_control_status {
- u8 bStatusType;
- u8 bOriginator;
- u8 bEvent;
- u8 bSelector;
- u8 bAttribute;
- u8 bValue[];
-} __packed;
-
static void uvc_event_streaming(struct uvc_device *dev,
- struct uvc_streaming_status *status, int len)
+ struct uvc_status *status, int len)
{
- if (len < 3) {
+ if (len <= offsetof(struct uvc_status, bEvent)) {
uvc_dbg(dev, STATUS,
"Invalid streaming status event received\n");
return;
}
if (status->bEvent == 0) {
- if (len < 4)
+ if (len <= offsetof(struct uvc_status, streaming))
return;
+
uvc_dbg(dev, STATUS, "Button (intf %u) %s len %d\n",
status->bOriginator,
- status->bValue[0] ? "pressed" : "released", len);
- uvc_input_report_key(dev, KEY_CAMERA, status->bValue[0]);
+ status->streaming.button ? "pressed" : "released", len);
+ uvc_input_report_key(dev, KEY_CAMERA, status->streaming.button);
} else {
uvc_dbg(dev, STATUS, "Stream %u error event %02x len %d\n",
status->bOriginator, status->bEvent, len);
@@ -131,7 +140,7 @@ static struct uvc_control *uvc_event_entity_find_ctrl(struct uvc_entity *entity,
}
static struct uvc_control *uvc_event_find_ctrl(struct uvc_device *dev,
- const struct uvc_control_status *status,
+ const struct uvc_status *status,
struct uvc_video_chain **chain)
{
list_for_each_entry((*chain), &dev->chains, list) {
@@ -143,7 +152,7 @@ static struct uvc_control *uvc_event_find_ctrl(struct uvc_device *dev,
continue;
ctrl = uvc_event_entity_find_ctrl(entity,
- status->bSelector);
+ status->control.bSelector);
if (ctrl)
return ctrl;
}
@@ -153,7 +162,7 @@ static struct uvc_control *uvc_event_find_ctrl(struct uvc_device *dev,
}
static bool uvc_event_control(struct urb *urb,
- const struct uvc_control_status *status, int len)
+ const struct uvc_status *status, int len)
{
static const char *attrs[] = { "value", "info", "failure", "min", "max" };
struct uvc_device *dev = urb->context;
@@ -161,24 +170,24 @@ static bool uvc_event_control(struct urb *urb,
struct uvc_control *ctrl;
if (len < 6 || status->bEvent != 0 ||
- status->bAttribute >= ARRAY_SIZE(attrs)) {
+ status->control.bAttribute >= ARRAY_SIZE(attrs)) {
uvc_dbg(dev, STATUS, "Invalid control status event received\n");
return false;
}
uvc_dbg(dev, STATUS, "Control %u/%u %s change len %d\n",
- status->bOriginator, status->bSelector,
- attrs[status->bAttribute], len);
+ status->bOriginator, status->control.bSelector,
+ attrs[status->control.bAttribute], len);
/* Find the control. */
ctrl = uvc_event_find_ctrl(dev, status, &chain);
if (!ctrl)
return false;
- switch (status->bAttribute) {
+ switch (status->control.bAttribute) {
case UVC_CTRL_VALUE_CHANGE:
return uvc_ctrl_status_event_async(urb, chain, ctrl,
- status->bValue);
+ status->control.bValue);
case UVC_CTRL_INFO_CHANGE:
case UVC_CTRL_FAILURE_CHANGE:
@@ -214,28 +223,22 @@ static void uvc_status_complete(struct urb *urb)
len = urb->actual_length;
if (len > 0) {
- switch (dev->status[0] & 0x0f) {
+ switch (dev->status->bStatusType & 0x0f) {
case UVC_STATUS_TYPE_CONTROL: {
- struct uvc_control_status *status =
- (struct uvc_control_status *)dev->status;
-
- if (uvc_event_control(urb, status, len))
+ if (uvc_event_control(urb, dev->status, len))
/* The URB will be resubmitted in work context. */
return;
break;
}
case UVC_STATUS_TYPE_STREAMING: {
- struct uvc_streaming_status *status =
- (struct uvc_streaming_status *)dev->status;
-
- uvc_event_streaming(dev, status, len);
+ uvc_event_streaming(dev, dev->status, len);
break;
}
default:
uvc_dbg(dev, STATUS, "Unknown status event type %u\n",
- dev->status[0]);
+ dev->status->bStatusType);
break;
}
}
@@ -259,12 +262,12 @@ int uvc_status_init(struct uvc_device *dev)
uvc_input_init(dev);
- dev->status = kzalloc(UVC_MAX_STATUS_SIZE, GFP_KERNEL);
- if (dev->status == NULL)
+ dev->status = kzalloc(sizeof(*dev->status), GFP_KERNEL);
+ if (!dev->status)
return -ENOMEM;
dev->int_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (dev->int_urb == NULL) {
+ if (!dev->int_urb) {
kfree(dev->status);
return -ENOMEM;
}
@@ -281,7 +284,7 @@ int uvc_status_init(struct uvc_device *dev)
interval = fls(interval) - 1;
usb_fill_int_urb(dev->int_urb, dev->udev, pipe,
- dev->status, UVC_MAX_STATUS_SIZE, uvc_status_complete,
+ dev->status, sizeof(*dev->status), uvc_status_complete,
dev, interval);
return 0;
@@ -309,5 +312,41 @@ int uvc_status_start(struct uvc_device *dev, gfp_t flags)
void uvc_status_stop(struct uvc_device *dev)
{
+ struct uvc_ctrl_work *w = &dev->async_ctrl;
+
+ /*
+ * Prevent the asynchronous control handler from requeing the URB. The
+ * barrier is needed so the flush_status change is visible to other
+ * CPUs running the asynchronous handler before usb_kill_urb() is
+ * called below.
+ */
+ smp_store_release(&dev->flush_status, true);
+
+ /*
+ * Cancel any pending asynchronous work. If any status event was queued,
+ * process it synchronously.
+ */
+ if (cancel_work_sync(&w->work))
+ uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
+
+ /* Kill the urb. */
usb_kill_urb(dev->int_urb);
+
+ /*
+ * The URB completion handler may have queued asynchronous work. This
+ * won't resubmit the URB as flush_status is set, but it needs to be
+ * cancelled before returning or it could then race with a future
+ * uvc_status_start() call.
+ */
+ if (cancel_work_sync(&w->work))
+ uvc_ctrl_status_event(w->chain, w->ctrl, w->data);
+
+ /*
+ * From this point, there are no events on the queue and the status URB
+ * is dead. No events will be queued until uvc_status_start() is called.
+ * The barrier is needed to make sure that flush_status is visible to
+ * uvc_ctrl_status_event_work() when uvc_status_start() will be called
+ * again.
+ */
+ smp_store_release(&dev->flush_status, false);
}
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index f4d4c33b6dfb..35453f81c1d9 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -6,6 +6,7 @@
* Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <linux/bits.h>
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -25,14 +26,84 @@
#include "uvcvideo.h"
+static int uvc_control_add_xu_mapping(struct uvc_video_chain *chain,
+ struct uvc_control_mapping *map,
+ const struct uvc_xu_control_mapping *xmap)
+{
+ unsigned int i;
+ size_t size;
+ int ret;
+
+ /*
+ * Prevent excessive memory consumption, as well as integer
+ * overflows.
+ */
+ if (xmap->menu_count == 0 ||
+ xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES)
+ return -EINVAL;
+
+ map->menu_names = NULL;
+ map->menu_mapping = NULL;
+
+ map->menu_mask = BIT_MASK(xmap->menu_count);
+
+ size = xmap->menu_count * sizeof(*map->menu_mapping);
+ map->menu_mapping = kzalloc(size, GFP_KERNEL);
+ if (!map->menu_mapping) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < xmap->menu_count ; i++) {
+ if (copy_from_user((u32 *)&map->menu_mapping[i],
+ &xmap->menu_info[i].value,
+ sizeof(map->menu_mapping[i]))) {
+ ret = -EACCES;
+ goto done;
+ }
+ }
+
+ /*
+ * Always use the standard naming if available, otherwise copy the
+ * names supplied by userspace.
+ */
+ if (!v4l2_ctrl_get_menu(map->id)) {
+ size = xmap->menu_count * sizeof(map->menu_names[0]);
+ map->menu_names = kzalloc(size, GFP_KERNEL);
+ if (!map->menu_names) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < xmap->menu_count ; i++) {
+ /* sizeof(names[i]) - 1: to take care of \0 */
+ if (copy_from_user((char *)map->menu_names[i],
+ xmap->menu_info[i].name,
+ sizeof(map->menu_names[i]) - 1)) {
+ ret = -EACCES;
+ goto done;
+ }
+ }
+ }
+
+ ret = uvc_ctrl_add_mapping(chain, map);
+
+done:
+ kfree(map->menu_names);
+ map->menu_names = NULL;
+ kfree(map->menu_mapping);
+ map->menu_mapping = NULL;
+
+ return ret;
+}
+
/* ------------------------------------------------------------------------
* UVC ioctls
*/
-static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
- struct uvc_xu_control_mapping *xmap)
+static int uvc_ioctl_xu_ctrl_map(struct uvc_video_chain *chain,
+ struct uvc_xu_control_mapping *xmap)
{
struct uvc_control_mapping *map;
- unsigned int size;
int ret;
map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -60,39 +131,20 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
case V4L2_CTRL_TYPE_INTEGER:
case V4L2_CTRL_TYPE_BOOLEAN:
case V4L2_CTRL_TYPE_BUTTON:
+ ret = uvc_ctrl_add_mapping(chain, map);
break;
case V4L2_CTRL_TYPE_MENU:
- /*
- * Prevent excessive memory consumption, as well as integer
- * overflows.
- */
- if (xmap->menu_count == 0 ||
- xmap->menu_count > UVC_MAX_CONTROL_MENU_ENTRIES) {
- ret = -EINVAL;
- goto free_map;
- }
-
- size = xmap->menu_count * sizeof(*map->menu_info);
- map->menu_info = memdup_user(xmap->menu_info, size);
- if (IS_ERR(map->menu_info)) {
- ret = PTR_ERR(map->menu_info);
- goto free_map;
- }
-
- map->menu_count = xmap->menu_count;
+ ret = uvc_control_add_xu_mapping(chain, map, xmap);
break;
default:
uvc_dbg(chain->dev, CONTROL,
"Unsupported V4L2 control type %u\n", xmap->v4l2_type);
ret = -ENOTTY;
- goto free_map;
+ break;
}
- ret = uvc_ctrl_add_mapping(chain, map);
-
- kfree(map->menu_info);
free_map:
kfree(map);
@@ -660,8 +712,6 @@ static int uvc_ioctl_enum_fmt(struct uvc_streaming *stream,
fmt->flags = 0;
if (format->flags & UVC_FMT_FLAG_COMPRESSED)
fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
- strscpy(fmt->description, format->name, sizeof(fmt->description));
- fmt->description[sizeof(fmt->description) - 1] = 0;
fmt->pixelformat = format->fcc;
return 0;
}
@@ -1020,8 +1070,7 @@ static int uvc_ctrl_check_access(struct uvc_video_chain *chain,
int ret = 0;
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
- ret = uvc_ctrl_is_accessible(chain, ctrl->id,
- ioctl == VIDIOC_G_EXT_CTRLS);
+ ret = uvc_ctrl_is_accessible(chain, ctrl->id, ctrls, ioctl);
if (ret)
break;
}
@@ -1316,7 +1365,7 @@ static long uvc_ioctl_default(struct file *file, void *fh, bool valid_prio,
switch (cmd) {
/* Dynamic controls. */
case UVCIOC_CTRL_MAP:
- return uvc_ioctl_ctrl_map(chain, arg);
+ return uvc_ioctl_xu_ctrl_map(chain, arg);
case UVCIOC_CTRL_QUERY:
return uvc_xu_ctrl_query(chain, arg);
@@ -1429,7 +1478,7 @@ static long uvc_v4l2_compat_ioctl32(struct file *file,
ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
if (ret)
return ret;
- ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
+ ret = uvc_ioctl_xu_ctrl_map(handle->chain, &karg.xmap);
if (ret)
return ret;
ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index d2eb9066e4dc..d4b023d4de7c 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -79,13 +79,14 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
if (likely(ret == size))
return 0;
- dev_err(&dev->udev->dev,
- "Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
- uvc_query_name(query), cs, unit, ret, size);
-
- if (ret != -EPIPE)
- return ret;
+ if (ret != -EPIPE) {
+ dev_err(&dev->udev->dev,
+ "Failed to query (%s) UVC control %u on unit %u: %d (exp. %u).\n",
+ uvc_query_name(query), cs, unit, ret, size);
+ return ret < 0 ? ret : -EPIPE;
+ }
+ /* Reuse data[0] to request the error code. */
tmp = *(u8 *)data;
ret = __uvc_query_ctrl(dev, UVC_GET_CUR, 0, intfnum,
@@ -107,7 +108,7 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
case 1: /* Not ready */
return -EBUSY;
case 2: /* Wrong state */
- return -EILSEQ;
+ return -EACCES;
case 3: /* Power */
return -EREMOTE;
case 4: /* Out of range */
@@ -129,12 +130,13 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
return -EPIPE;
}
+static const struct usb_device_id elgato_cam_link_4k = {
+ USB_DEVICE(0x0fd9, 0x0066)
+};
+
static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl)
{
- static const struct usb_device_id elgato_cam_link_4k = {
- USB_DEVICE(0x0fd9, 0x0066)
- };
struct uvc_format *format = NULL;
struct uvc_frame *frame = NULL;
unsigned int i;
@@ -297,7 +299,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
dev_err(&stream->intf->dev,
"Failed to query (%u) UVC %s control : %d (exp. %u).\n",
query, probe ? "probe" : "commit", ret, size);
- ret = -EIO;
+ ret = (ret == -EPROTO) ? -EPROTO : -EIO;
goto out;
}
@@ -516,7 +518,9 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
/*
* To limit the amount of data, drop SCRs with an SOF identical to the
- * previous one.
+ * previous one. This filtering is also needed to support UVC 1.5, where
+ * all the data packets of the same frame contains the same SOF. In that
+ * case only the first one will match the host_sof.
*/
dev_sof = get_unaligned_le16(&data[header_size - 2]);
if (dev_sof == stream->clock.last_sof)
@@ -1352,7 +1356,9 @@ static void uvc_video_decode_meta(struct uvc_streaming *stream,
if (has_scr)
memcpy(stream->clock.last_scr, scr, 6);
- memcpy(&meta->length, mem, length);
+ meta->length = mem[0];
+ meta->flags = mem[1];
+ memcpy(meta->buf, &mem[2], length - 2);
meta_buf->bytesused += length + sizeof(meta->ns) + sizeof(meta->sof);
uvc_dbg(stream->dev, FRAME,
@@ -1965,6 +1971,17 @@ static int uvc_video_start_transfer(struct uvc_streaming *stream,
"Selecting alternate setting %u (%u B/frame bandwidth)\n",
altsetting, best_psize);
+ /*
+ * Some devices, namely the Logitech C910 and B910, are unable
+ * to recover from a USB autosuspend, unless the alternate
+ * setting of the streaming interface is toggled.
+ */
+ if (stream->dev->quirks & UVC_QUIRK_WAKE_AUTOSUSPEND) {
+ usb_set_interface(stream->dev->udev, intfnum,
+ altsetting);
+ usb_set_interface(stream->dev->udev, intfnum, 0);
+ }
+
ret = usb_set_interface(stream->dev->udev, intfnum, altsetting);
if (ret < 0)
return ret;
@@ -2121,6 +2138,21 @@ int uvc_video_init(struct uvc_streaming *stream)
* request on the probe control, as required by the UVC specification.
*/
ret = uvc_get_video_ctrl(stream, probe, 1, UVC_GET_CUR);
+
+ /*
+ * Elgato Cam Link 4k can be in a stalled state if the resolution of
+ * the external source has changed while the firmware initializes.
+ * Once in this state, the device is useless until it receives a
+ * USB reset. It has even been observed that the stalled state will
+ * continue even after unplugging the device.
+ */
+ if (ret == -EPROTO &&
+ usb_match_one_id(stream->dev->intf, &elgato_cam_link_4k)) {
+ dev_err(&stream->intf->dev, "Elgato Cam Link 4K firmware crash detected\n");
+ dev_err(&stream->intf->dev, "Resetting the device, unplug and replug to recover\n");
+ usb_reset_device(stream->dev->udev);
+ }
+
if (ret < 0)
return ret;
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index df93db259312..9a596c8d894a 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -51,8 +51,6 @@
#define UVC_URBS 5
/* Maximum number of packets per URB. */
#define UVC_MAX_PACKETS 32
-/* Maximum status buffer size in bytes of interrupt URB. */
-#define UVC_MAX_STATUS_SIZE 16
#define UVC_CTRL_CONTROL_TIMEOUT 5000
#define UVC_CTRL_STREAMING_TIMEOUT 5000
@@ -74,6 +72,7 @@
#define UVC_QUIRK_RESTORE_CTRLS_ON_INIT 0x00000400
#define UVC_QUIRK_FORCE_Y8 0x00000800
#define UVC_QUIRK_FORCE_BPP 0x00001000
+#define UVC_QUIRK_WAKE_AUTOSUSPEND 0x00002000
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
@@ -116,8 +115,9 @@ struct uvc_control_mapping {
enum v4l2_ctrl_type v4l2_type;
u32 data_type;
- const struct uvc_menu_info *menu_info;
- u32 menu_count;
+ const u32 *menu_mapping;
+ const char (*menu_names)[UVC_MENU_NAME_LEN];
+ unsigned long menu_mask;
u32 master_id;
s32 master_manual;
@@ -264,8 +264,6 @@ struct uvc_format {
u32 fcc;
u32 flags;
- char name[32];
-
unsigned int nframes;
struct uvc_frame *frame;
};
@@ -527,6 +525,26 @@ struct uvc_device_info {
const struct uvc_control_mapping **mappings;
};
+struct uvc_status_streaming {
+ u8 button;
+} __packed;
+
+struct uvc_status_control {
+ u8 bSelector;
+ u8 bAttribute;
+ u8 bValue[11];
+} __packed;
+
+struct uvc_status {
+ u8 bStatusType;
+ u8 bOriginator;
+ u8 bEvent;
+ union {
+ struct uvc_status_control control;
+ struct uvc_status_streaming streaming;
+ };
+} __packed;
+
struct uvc_device {
struct usb_device *udev;
struct usb_interface *intf;
@@ -559,7 +577,9 @@ struct uvc_device {
/* Status Interrupt Endpoint */
struct usb_host_endpoint *int_ep;
struct urb *int_urb;
- u8 *status;
+ struct uvc_status *status;
+ bool flush_status;
+
struct input_dev *input;
char input_phys[64];
@@ -728,6 +748,8 @@ int uvc_status_start(struct uvc_device *dev, gfp_t flags);
void uvc_status_stop(struct uvc_device *dev);
/* Controls */
+extern const struct uvc_control_mapping uvc_ctrl_power_line_mapping_limited;
+extern const struct uvc_control_mapping uvc_ctrl_power_line_mapping_uvc11;
extern const struct v4l2_subscribed_event_ops uvc_ctrl_sub_ev_ops;
int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
@@ -761,7 +783,8 @@ static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl);
int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
- bool read);
+ const struct v4l2_ext_controls *ctrls,
+ unsigned long ioctl);
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index 2f1b718a9189..d7e9ffc7aa23 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -24,6 +24,8 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
+#include "v4l2-subdev-priv.h"
+
static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
struct v4l2_subdev *subdev,
struct v4l2_async_subdev *asd)
@@ -822,6 +824,8 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
if (!sd->async_list.next)
return;
+ v4l2_subdev_put_privacy_led(sd);
+
mutex_lock(&list_lock);
__v4l2_async_nf_unregister(sd->subdev_notifier);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c
index 3d3b6dc24ca6..002ea6588edf 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-api.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c
@@ -150,8 +150,8 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl)
* then return an error.
*/
if (strlen(ctrl->p_new.p_char) == ctrl->maximum && last)
- ctrl->is_new = 1;
return -ERANGE;
+ ctrl->is_new = 1;
}
return ret;
default:
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 3d9533c1b202..049c2f2001ea 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -28,6 +28,8 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
+#include "v4l2-subdev-priv.h"
+
static const struct v4l2_fwnode_bus_conv {
enum v4l2_fwnode_bus_type fwnode_bus_type;
enum v4l2_mbus_type mbus_type;
@@ -1302,6 +1304,10 @@ int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
v4l2_async_nf_init(notifier);
+ ret = v4l2_subdev_get_privacy_led(sd);
+ if (ret < 0)
+ goto out_cleanup;
+
ret = v4l2_async_nf_parse_fwnode_sensor(sd->dev, notifier);
if (ret < 0)
goto out_cleanup;
@@ -1322,6 +1328,7 @@ out_unregister:
v4l2_async_nf_unregister(notifier);
out_cleanup:
+ v4l2_subdev_put_privacy_led(sd);
v4l2_async_nf_cleanup(notifier);
kfree(notifier);
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 8e0a0ff62a70..2a0139c72d29 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -1298,6 +1298,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break;
case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break;
case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break;
+ case V4L2_PIX_FMT_RGBX1010102: descr = "32-bit RGBX 10-10-10-2"; break;
+ case V4L2_PIX_FMT_RGBA1010102: descr = "32-bit RGBA 10-10-10-2"; break;
+ case V4L2_PIX_FMT_ARGB2101010: descr = "32-bit ARGB 2-10-10-10"; break;
case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break;
case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break;
case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break;
@@ -1442,6 +1445,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break;
case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break;
case V4L2_PIX_FMT_NV12M_10BE_8L128: descr = "10-bit NV12M (8x128 Linear, BE)"; break;
+ case V4L2_PIX_FMT_Y210: descr = "10-bit YUYV Packed"; break;
+ case V4L2_PIX_FMT_Y212: descr = "12-bit YUYV Packed"; break;
+ case V4L2_PIX_FMT_Y216: descr = "16-bit YUYV Packed"; break;
default:
/* Compressed formats */
diff --git a/drivers/media/v4l2-core/v4l2-subdev-priv.h b/drivers/media/v4l2-core/v4l2-subdev-priv.h
new file mode 100644
index 000000000000..52391d6d8ab7
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-subdev-priv.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * V4L2 sub-device pivate header.
+ *
+ * Copyright (C) 2023 Hans de Goede <hdegoede@redhat.com>
+ */
+
+#ifndef _V4L2_SUBDEV_PRIV_H_
+#define _V4L2_SUBDEV_PRIV_H_
+
+int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd);
+void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd);
+
+#endif
diff --git a/drivers/media/v4l2-core/v4l2-subdev.c b/drivers/media/v4l2-core/v4l2-subdev.c
index 4988a25bd8f4..6630fb30bc7d 100644
--- a/drivers/media/v4l2-core/v4l2-subdev.c
+++ b/drivers/media/v4l2-core/v4l2-subdev.c
@@ -9,6 +9,7 @@
*/
#include <linux/ioctl.h>
+#include <linux/leds.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -23,6 +24,8 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-event.h>
+#include "v4l2-subdev-priv.h"
+
#if defined(CONFIG_VIDEO_V4L2_SUBDEV_API)
static int subdev_fh_init(struct v4l2_subdev_fh *fh, struct v4l2_subdev *sd)
{
@@ -322,6 +325,15 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
{
int ret;
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+ if (enable)
+ led_set_brightness(sd->privacy_led,
+ sd->privacy_led->max_brightness);
+ else
+ led_set_brightness(sd->privacy_led, 0);
+ }
+#endif
ret = sd->ops->video->s_stream(sd, enable);
if (!enable && ret < 0) {
@@ -1090,6 +1102,7 @@ void v4l2_subdev_init(struct v4l2_subdev *sd, const struct v4l2_subdev_ops *ops)
sd->grp_id = 0;
sd->dev_priv = NULL;
sd->host_priv = NULL;
+ sd->privacy_led = NULL;
#if defined(CONFIG_MEDIA_CONTROLLER)
sd->entity.name = sd->name;
sd->entity.obj_type = MEDIA_ENTITY_TYPE_V4L2_SUBDEV;
@@ -1105,3 +1118,36 @@ void v4l2_subdev_notify_event(struct v4l2_subdev *sd,
v4l2_subdev_notify(sd, V4L2_DEVICE_NOTIFY_EVENT, (void *)ev);
}
EXPORT_SYMBOL_GPL(v4l2_subdev_notify_event);
+
+int v4l2_subdev_get_privacy_led(struct v4l2_subdev *sd)
+{
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ sd->privacy_led = led_get(sd->dev, "privacy-led");
+ if (IS_ERR(sd->privacy_led) && PTR_ERR(sd->privacy_led) != -ENOENT)
+ return dev_err_probe(sd->dev, PTR_ERR(sd->privacy_led),
+ "getting privacy LED\n");
+
+ if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+ mutex_lock(&sd->privacy_led->led_access);
+ led_sysfs_disable(sd->privacy_led);
+ led_trigger_remove(sd->privacy_led);
+ led_set_brightness(sd->privacy_led, 0);
+ mutex_unlock(&sd->privacy_led->led_access);
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_get_privacy_led);
+
+void v4l2_subdev_put_privacy_led(struct v4l2_subdev *sd)
+{
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
+ if (!IS_ERR_OR_NULL(sd->privacy_led)) {
+ mutex_lock(&sd->privacy_led->led_access);
+ led_sysfs_enable(sd->privacy_led);
+ mutex_unlock(&sd->privacy_led->led_access);
+ led_put(sd->privacy_led);
+ }
+#endif
+}
+EXPORT_SYMBOL_GPL(v4l2_subdev_put_privacy_led);
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index f2c439359557..4c2ec7a0d804 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -314,7 +314,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
}
vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND;
+ vm_flags_set(vma, VM_DONTEXPAND);
vma->vm_private_data = map;
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 234e9f647c96..53001532e8e3 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -630,8 +630,8 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
map->count = 1;
map->q = q;
vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */
+ /* using shared anonymous pages */
+ vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index 9b2443720ab0..85c7090606d6 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -247,7 +247,7 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
}
vma->vm_ops = &videobuf_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = map;
dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 660df7d269fa..bf7667845459 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -57,10 +57,10 @@ static int memstick_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int memstick_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int memstick_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct memstick_dev *card = container_of(dev, struct memstick_dev,
- dev);
+ const struct memstick_dev *card = container_of_const(dev, struct memstick_dev,
+ dev);
if (add_uevent_var(env, "MEMSTICK_TYPE=%02X", card->id.type))
return -ENOMEM;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 30db49f31866..fcc141e067b9 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -15,6 +15,7 @@ config MFD_CS5535
tristate "AMD CS5535 and CS5536 southbridge core functions"
select MFD_CORE
depends on PCI && (X86_32 || (X86 && COMPILE_TEST))
+ depends on !UML
help
This is the core driver for CS5535/CS5536 MFD functions. This is
necessary for using the board's GPIO and MFGPT functionality.
@@ -300,15 +301,6 @@ config MFD_CS47L92
help
Support for Cirrus Logic CS42L92, CS47L92 and CS47L93 Smart Codecs
-config MFD_ASIC3
- bool "Compaq ASIC3"
- depends on GPIOLIB
- depends on ARM || COMPILE_TEST
- select MFD_CORE
- help
- This driver supports the ASIC3 multifunction chip found on many
- PDAs (mainly iPAQ and HTC based ones)
-
config PMIC_DA903X
bool "Dialog Semiconductor DA9030/DA9034 PMIC Support"
depends on I2C=y
@@ -550,15 +542,6 @@ config MFD_HI655X_PMIC
help
Select this option to enable Hisilicon hi655x series pmic driver.
-config HTC_PASIC3
- tristate "HTC PASIC3 LED/DS1WM chip support"
- select MFD_CORE
- help
- This core driver provides register access for the LED/DS1WM
- chips labeled "AIC2" and "AIC3", found on HTC Blueangel and
- HTC Magician devices, respectively. Actual functionality is
- handled by the leds-pasic3 and ds1wm drivers.
-
config MFD_INTEL_QUARK_I2C_GPIO
tristate "Intel Quark MFD I2C GPIO"
depends on PCI
@@ -702,7 +685,7 @@ config MFD_INTEL_PMC_BXT
config MFD_IPAQ_MICRO
bool "Atmel Micro ASIC (iPAQ h3100/h3600/h3700) Support"
- depends on SA1100_H3100 || SA1100_H3600
+ depends on SA1100_H3600
select MFD_CORE
help
Select this to get support for the Microcontroller found in
@@ -1080,17 +1063,6 @@ config PCF50633_GPIO
Say yes here if you want to include support GPIO for pins on
the PCF50633 chip.
-config UCB1400_CORE
- tristate "Philips UCB1400 Core driver"
- depends on AC97_BUS
- depends on GPIOLIB
- help
- This enables support for the Philips UCB1400 core functions.
- The UCB1400 is an AC97 audio codec.
-
- To compile this driver as a module, choose M here: the
- module will be called ucb1400_core.
-
config MFD_PM8XXX
tristate "Qualcomm PM8xxx PMIC chips driver"
depends on (ARM || HEXAGON || COMPILE_TEST)
@@ -1514,7 +1486,7 @@ config TPS6105X
config TPS65010
tristate "TI TPS6501x Power Management chips"
depends on I2C && GPIOLIB
- default y if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_OSK
+ default MACH_OMAP_OSK
help
If you say yes here you get support for the TPS6501x series of
Power Management chips. These include voltage regulators,
@@ -1795,35 +1767,6 @@ config MFD_TC3589X
additional drivers must be enabled in order to use the
functionality of the device.
-config MFD_TMIO
- bool
- default n
-
-config MFD_T7L66XB
- bool "Toshiba T7L66XB"
- depends on ARM && HAVE_CLK
- select MFD_CORE
- select MFD_TMIO
- help
- Support for Toshiba Mobile IO Controller T7L66XB
-
-config MFD_TC6387XB
- bool "Toshiba TC6387XB"
- depends on ARM && HAVE_CLK
- select MFD_CORE
- select MFD_TMIO
- help
- Support for Toshiba Mobile IO Controller TC6387XB
-
-config MFD_TC6393XB
- bool "Toshiba TC6393XB"
- depends on ARM && HAVE_CLK
- select GPIOLIB
- select MFD_CORE
- select MFD_TMIO
- help
- Support for Toshiba Mobile IO Controller TC6393XB
-
config MFD_TQMX86
tristate "TQ-Systems IO controller TQMX86"
select MFD_CORE
@@ -2224,14 +2167,32 @@ config SGI_MFD_IOC3
If you have an SGI Origin, Octane, or a PCI IOC3 card,
then say Y. Otherwise say N.
-config MFD_INTEL_M10_BMC
- tristate "Intel MAX 10 Board Management Controller"
- depends on SPI_MASTER
- select REGMAP_SPI_AVMM
- select MFD_CORE
+config MFD_INTEL_M10_BMC_CORE
+ tristate
+ select MFD_CORE
+ select REGMAP
+ default n
+
+config MFD_INTEL_M10_BMC_SPI
+ tristate "Intel MAX 10 Board Management Controller with SPI"
+ depends on SPI_MASTER
+ select MFD_INTEL_M10_BMC_CORE
+ select REGMAP_SPI_AVMM
+ help
+ Support for the Intel MAX 10 board management controller using the
+ SPI interface.
+
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device.
+
+config MFD_INTEL_M10_BMC_PMCI
+ tristate "Intel MAX 10 Board Management Controller with PMCI"
+ depends on FPGA_DFL
+ select MFD_INTEL_M10_BMC_CORE
+ select REGMAP
help
- Support for the Intel MAX 10 board management controller using the
- SPI interface.
+ Support for the Intel MAX 10 board management controller via PMCI.
This driver provides common support for accessing the device,
additional drivers must be enabled in order to use the functionality
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 457471478a93..2f6c89d1e277 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_MFD_88PM800) += 88pm800.o 88pm80x.o
obj-$(CONFIG_MFD_88PM805) += 88pm805.o 88pm80x.o
obj-$(CONFIG_MFD_ACT8945A) += act8945a.o
obj-$(CONFIG_MFD_SM501) += sm501.o
-obj-$(CONFIG_MFD_ASIC3) += asic3.o tmio_core.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835-pm.o
obj-$(CONFIG_MFD_BCM590XX) += bcm590xx.o
obj-$(CONFIG_MFD_BD9571MWV) += bd9571mwv.o
@@ -18,8 +17,6 @@ obj-$(CONFIG_MFD_ENE_KB3930) += ene-kb3930.o
obj-$(CONFIG_MFD_EXYNOS_LPASS) += exynos-lpass.o
obj-$(CONFIG_MFD_GATEWORKS_GSC) += gateworks-gsc.o
-obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
-
obj-$(CONFIG_MFD_TI_LP873X) += lp873x.o
obj-$(CONFIG_MFD_TI_LP87565) += lp87565.o
obj-$(CONFIG_MFD_TI_AM335X_TSCADC) += ti_am335x_tscadc.o
@@ -30,9 +27,6 @@ obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o
obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o
obj-$(CONFIG_MFD_SUN6I_PRCM) += sun6i-prcm.o
obj-$(CONFIG_MFD_TC3589X) += tc3589x.o
-obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
-obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
-obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
obj-$(CONFIG_MFD_TQMX86) += tqmx86.o
obj-$(CONFIG_MFD_LOCHNAGAR) += lochnagar-i2c.o
@@ -131,7 +125,6 @@ obj-$(CONFIG_MCP_UCB1200_TS) += ucb1x00-ts.o
ifeq ($(CONFIG_SA1100_ASSABET),y)
obj-$(CONFIG_MCP_UCB1200) += ucb1x00-assabet.o
endif
-obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
@@ -269,7 +262,10 @@ obj-$(CONFIG_MFD_QCOM_PM8008) += qcom-pm8008.o
obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o
obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o
obj-$(CONFIG_MFD_SMPRO) += smpro-core.o
-obj-$(CONFIG_MFD_INTEL_M10_BMC) += intel-m10-bmc.o
+
+obj-$(CONFIG_MFD_INTEL_M10_BMC_CORE) += intel-m10-bmc-core.o
+obj-$(CONFIG_MFD_INTEL_M10_BMC_SPI) += intel-m10-bmc-spi.o
+obj-$(CONFIG_MFD_INTEL_M10_BMC_PMCI) += intel-m10-bmc-pmci.o
obj-$(CONFIG_MFD_ATC260X) += atc260x-core.o
obj-$(CONFIG_MFD_ATC260X_I2C) += atc260x-i2c.o
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bd7ee3260d53..c166fcd331f1 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -45,7 +45,7 @@ int arizona_clk32k_enable(struct arizona *arizona)
if (arizona->clk32k_ref == 1) {
switch (arizona->pdata.clk32k_src) {
case ARIZONA_32KZ_MCLK1:
- ret = pm_runtime_get_sync(arizona->dev);
+ ret = pm_runtime_resume_and_get(arizona->dev);
if (ret != 0)
goto err_ref;
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
deleted file mode 100644
index 4fb7e35eb5ed..000000000000
--- a/drivers/mfd/asic3.c
+++ /dev/null
@@ -1,1071 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * driver/mfd/asic3.c
- *
- * Compaq ASIC3 support.
- *
- * Copyright 2001 Compaq Computer Corporation.
- * Copyright 2004-2005 Phil Blundell
- * Copyright 2007-2008 OpenedHand Ltd.
- *
- * Authors: Phil Blundell <pb@handhelds.org>,
- * Samuel Ortiz <sameo@openedhand.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/gpio/driver.h>
-#include <linux/export.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/platform_device.h>
-
-#include <linux/mfd/asic3.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/ds1wm.h>
-#include <linux/mfd/tmio.h>
-
-#include <linux/mmc/host.h>
-
-enum {
- ASIC3_CLOCK_SPI,
- ASIC3_CLOCK_OWM,
- ASIC3_CLOCK_PWM0,
- ASIC3_CLOCK_PWM1,
- ASIC3_CLOCK_LED0,
- ASIC3_CLOCK_LED1,
- ASIC3_CLOCK_LED2,
- ASIC3_CLOCK_SD_HOST,
- ASIC3_CLOCK_SD_BUS,
- ASIC3_CLOCK_SMBUS,
- ASIC3_CLOCK_EX0,
- ASIC3_CLOCK_EX1,
-};
-
-struct asic3_clk {
- int enabled;
- unsigned int cdex;
- unsigned long rate;
-};
-
-#define INIT_CDEX(_name, _rate) \
- [ASIC3_CLOCK_##_name] = { \
- .cdex = CLOCK_CDEX_##_name, \
- .rate = _rate, \
- }
-
-static struct asic3_clk asic3_clk_init[] __initdata = {
- INIT_CDEX(SPI, 0),
- INIT_CDEX(OWM, 5000000),
- INIT_CDEX(PWM0, 0),
- INIT_CDEX(PWM1, 0),
- INIT_CDEX(LED0, 0),
- INIT_CDEX(LED1, 0),
- INIT_CDEX(LED2, 0),
- INIT_CDEX(SD_HOST, 24576000),
- INIT_CDEX(SD_BUS, 12288000),
- INIT_CDEX(SMBUS, 0),
- INIT_CDEX(EX0, 32768),
- INIT_CDEX(EX1, 24576000),
-};
-
-struct asic3 {
- void __iomem *mapping;
- unsigned int bus_shift;
- unsigned int irq_nr;
- unsigned int irq_base;
- raw_spinlock_t lock;
- u16 irq_bothedge[4];
- struct gpio_chip gpio;
- struct device *dev;
- void __iomem *tmio_cnf;
-
- struct asic3_clk clocks[ARRAY_SIZE(asic3_clk_init)];
-};
-
-static int asic3_gpio_get(struct gpio_chip *chip, unsigned offset);
-
-void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 value)
-{
- iowrite16(value, asic->mapping +
- (reg >> asic->bus_shift));
-}
-EXPORT_SYMBOL_GPL(asic3_write_register);
-
-u32 asic3_read_register(struct asic3 *asic, unsigned int reg)
-{
- return ioread16(asic->mapping +
- (reg >> asic->bus_shift));
-}
-EXPORT_SYMBOL_GPL(asic3_read_register);
-
-static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
-{
- unsigned long flags;
- u32 val;
-
- raw_spin_lock_irqsave(&asic->lock, flags);
- val = asic3_read_register(asic, reg);
- if (set)
- val |= bits;
- else
- val &= ~bits;
- asic3_write_register(asic, reg, val);
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-}
-
-/* IRQs */
-#define MAX_ASIC_ISR_LOOPS 20
-#define ASIC3_GPIO_BASE_INCR \
- (ASIC3_GPIO_B_BASE - ASIC3_GPIO_A_BASE)
-
-static void asic3_irq_flip_edge(struct asic3 *asic,
- u32 base, int bit)
-{
- u16 edge;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&asic->lock, flags);
- edge = asic3_read_register(asic,
- base + ASIC3_GPIO_EDGE_TRIGGER);
- edge ^= bit;
- asic3_write_register(asic,
- base + ASIC3_GPIO_EDGE_TRIGGER, edge);
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-}
-
-static void asic3_irq_demux(struct irq_desc *desc)
-{
- struct asic3 *asic = irq_desc_get_handler_data(desc);
- struct irq_data *data = irq_desc_get_irq_data(desc);
- int iter, i;
- unsigned long flags;
-
- data->chip->irq_ack(data);
-
- for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
- u32 status;
- int bank;
-
- raw_spin_lock_irqsave(&asic->lock, flags);
- status = asic3_read_register(asic,
- ASIC3_OFFSET(INTR, P_INT_STAT));
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-
- /* Check all ten register bits */
- if ((status & 0x3ff) == 0)
- break;
-
- /* Handle GPIO IRQs */
- for (bank = 0; bank < ASIC3_NUM_GPIO_BANKS; bank++) {
- if (status & (1 << bank)) {
- unsigned long base, istat;
-
- base = ASIC3_GPIO_A_BASE
- + bank * ASIC3_GPIO_BASE_INCR;
- raw_spin_lock_irqsave(&asic->lock, flags);
- istat = asic3_read_register(asic,
- base +
- ASIC3_GPIO_INT_STATUS);
- /* Clearing IntStatus */
- asic3_write_register(asic,
- base +
- ASIC3_GPIO_INT_STATUS, 0);
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-
- for (i = 0; i < ASIC3_GPIOS_PER_BANK; i++) {
- int bit = (1 << i);
- unsigned int irqnr;
-
- if (!(istat & bit))
- continue;
-
- irqnr = asic->irq_base +
- (ASIC3_GPIOS_PER_BANK * bank)
- + i;
- generic_handle_irq(irqnr);
- if (asic->irq_bothedge[bank] & bit)
- asic3_irq_flip_edge(asic, base,
- bit);
- }
- }
- }
-
- /* Handle remaining IRQs in the status register */
- for (i = ASIC3_NUM_GPIOS; i < ASIC3_NR_IRQS; i++) {
- /* They start at bit 4 and go up */
- if (status & (1 << (i - ASIC3_NUM_GPIOS + 4)))
- generic_handle_irq(asic->irq_base + i);
- }
- }
-
- if (iter >= MAX_ASIC_ISR_LOOPS)
- dev_err(asic->dev, "interrupt processing overrun\n");
-}
-
-static inline int asic3_irq_to_bank(struct asic3 *asic, int irq)
-{
- int n;
-
- n = (irq - asic->irq_base) >> 4;
-
- return (n * (ASIC3_GPIO_B_BASE - ASIC3_GPIO_A_BASE));
-}
-
-static inline int asic3_irq_to_index(struct asic3 *asic, int irq)
-{
- return (irq - asic->irq_base) & 0xf;
-}
-
-static void asic3_mask_gpio_irq(struct irq_data *data)
-{
- struct asic3 *asic = irq_data_get_irq_chip_data(data);
- u32 val, bank, index;
- unsigned long flags;
-
- bank = asic3_irq_to_bank(asic, data->irq);
- index = asic3_irq_to_index(asic, data->irq);
-
- raw_spin_lock_irqsave(&asic->lock, flags);
- val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
- val |= 1 << index;
- asic3_write_register(asic, bank + ASIC3_GPIO_MASK, val);
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-}
-
-static void asic3_mask_irq(struct irq_data *data)
-{
- struct asic3 *asic = irq_data_get_irq_chip_data(data);
- int regval;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&asic->lock, flags);
- regval = asic3_read_register(asic,
- ASIC3_INTR_BASE +
- ASIC3_INTR_INT_MASK);
-
- regval &= ~(ASIC3_INTMASK_MASK0 <<
- (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
-
- asic3_write_register(asic,
- ASIC3_INTR_BASE +
- ASIC3_INTR_INT_MASK,
- regval);
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-}
-
-static void asic3_unmask_gpio_irq(struct irq_data *data)
-{
- struct asic3 *asic = irq_data_get_irq_chip_data(data);
- u32 val, bank, index;
- unsigned long flags;
-
- bank = asic3_irq_to_bank(asic, data->irq);
- index = asic3_irq_to_index(asic, data->irq);
-
- raw_spin_lock_irqsave(&asic->lock, flags);
- val = asic3_read_register(asic, bank + ASIC3_GPIO_MASK);
- val &= ~(1 << index);
- asic3_write_register(asic, bank + ASIC3_GPIO_MASK, val);
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-}
-
-static void asic3_unmask_irq(struct irq_data *data)
-{
- struct asic3 *asic = irq_data_get_irq_chip_data(data);
- int regval;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&asic->lock, flags);
- regval = asic3_read_register(asic,
- ASIC3_INTR_BASE +
- ASIC3_INTR_INT_MASK);
-
- regval |= (ASIC3_INTMASK_MASK0 <<
- (data->irq - (asic->irq_base + ASIC3_NUM_GPIOS)));
-
- asic3_write_register(asic,
- ASIC3_INTR_BASE +
- ASIC3_INTR_INT_MASK,
- regval);
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-}
-
-static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type)
-{
- struct asic3 *asic = irq_data_get_irq_chip_data(data);
- u32 bank, index;
- u16 trigger, level, edge, bit;
- unsigned long flags;
-
- bank = asic3_irq_to_bank(asic, data->irq);
- index = asic3_irq_to_index(asic, data->irq);
- bit = 1<<index;
-
- raw_spin_lock_irqsave(&asic->lock, flags);
- level = asic3_read_register(asic,
- bank + ASIC3_GPIO_LEVEL_TRIGGER);
- edge = asic3_read_register(asic,
- bank + ASIC3_GPIO_EDGE_TRIGGER);
- trigger = asic3_read_register(asic,
- bank + ASIC3_GPIO_TRIGGER_TYPE);
- asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] &= ~bit;
-
- if (type == IRQ_TYPE_EDGE_RISING) {
- trigger |= bit;
- edge |= bit;
- } else if (type == IRQ_TYPE_EDGE_FALLING) {
- trigger |= bit;
- edge &= ~bit;
- } else if (type == IRQ_TYPE_EDGE_BOTH) {
- trigger |= bit;
- if (asic3_gpio_get(&asic->gpio, data->irq - asic->irq_base))
- edge &= ~bit;
- else
- edge |= bit;
- asic->irq_bothedge[(data->irq - asic->irq_base) >> 4] |= bit;
- } else if (type == IRQ_TYPE_LEVEL_LOW) {
- trigger &= ~bit;
- level &= ~bit;
- } else if (type == IRQ_TYPE_LEVEL_HIGH) {
- trigger &= ~bit;
- level |= bit;
- } else {
- /*
- * if type == IRQ_TYPE_NONE, we should mask interrupts, but
- * be careful to not unmask them if mask was also called.
- * Probably need internal state for mask.
- */
- dev_notice(asic->dev, "irq type not changed\n");
- }
- asic3_write_register(asic, bank + ASIC3_GPIO_LEVEL_TRIGGER,
- level);
- asic3_write_register(asic, bank + ASIC3_GPIO_EDGE_TRIGGER,
- edge);
- asic3_write_register(asic, bank + ASIC3_GPIO_TRIGGER_TYPE,
- trigger);
- raw_spin_unlock_irqrestore(&asic->lock, flags);
- return 0;
-}
-
-static int asic3_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
-{
- struct asic3 *asic = irq_data_get_irq_chip_data(data);
- u32 bank, index;
- u16 bit;
-
- bank = asic3_irq_to_bank(asic, data->irq);
- index = asic3_irq_to_index(asic, data->irq);
- bit = 1<<index;
-
- asic3_set_register(asic, bank + ASIC3_GPIO_SLEEP_MASK, bit, !on);
-
- return 0;
-}
-
-static struct irq_chip asic3_gpio_irq_chip = {
- .name = "ASIC3-GPIO",
- .irq_ack = asic3_mask_gpio_irq,
- .irq_mask = asic3_mask_gpio_irq,
- .irq_unmask = asic3_unmask_gpio_irq,
- .irq_set_type = asic3_gpio_irq_type,
- .irq_set_wake = asic3_gpio_irq_set_wake,
-};
-
-static struct irq_chip asic3_irq_chip = {
- .name = "ASIC3",
- .irq_ack = asic3_mask_irq,
- .irq_mask = asic3_mask_irq,
- .irq_unmask = asic3_unmask_irq,
-};
-
-static int __init asic3_irq_probe(struct platform_device *pdev)
-{
- struct asic3 *asic = platform_get_drvdata(pdev);
- unsigned long clksel = 0;
- unsigned int irq, irq_base;
- int ret;
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
- asic->irq_nr = ret;
-
- /* turn on clock to IRQ controller */
- clksel |= CLOCK_SEL_CX;
- asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL),
- clksel);
-
- irq_base = asic->irq_base;
-
- for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
- if (irq < asic->irq_base + ASIC3_NUM_GPIOS)
- irq_set_chip(irq, &asic3_gpio_irq_chip);
- else
- irq_set_chip(irq, &asic3_irq_chip);
-
- irq_set_chip_data(irq, asic);
- irq_set_handler(irq, handle_level_irq);
- irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
-
- asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK),
- ASIC3_INTMASK_GINTMASK);
-
- irq_set_chained_handler_and_data(asic->irq_nr, asic3_irq_demux, asic);
- irq_set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING);
-
- return 0;
-}
-
-static void asic3_irq_remove(struct platform_device *pdev)
-{
- struct asic3 *asic = platform_get_drvdata(pdev);
- unsigned int irq, irq_base;
-
- irq_base = asic->irq_base;
-
- for (irq = irq_base; irq < irq_base + ASIC3_NR_IRQS; irq++) {
- irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- irq_set_chip_and_handler(irq, NULL, NULL);
- irq_set_chip_data(irq, NULL);
- }
- irq_set_chained_handler(asic->irq_nr, NULL);
-}
-
-/* GPIOs */
-static int asic3_gpio_direction(struct gpio_chip *chip,
- unsigned offset, int out)
-{
- u32 mask = ASIC3_GPIO_TO_MASK(offset), out_reg;
- unsigned int gpio_base;
- unsigned long flags;
- struct asic3 *asic;
-
- asic = gpiochip_get_data(chip);
- gpio_base = ASIC3_GPIO_TO_BASE(offset);
-
- if (gpio_base > ASIC3_GPIO_D_BASE) {
- dev_err(asic->dev, "Invalid base (0x%x) for gpio %d\n",
- gpio_base, offset);
- return -EINVAL;
- }
-
- raw_spin_lock_irqsave(&asic->lock, flags);
-
- out_reg = asic3_read_register(asic, gpio_base + ASIC3_GPIO_DIRECTION);
-
- /* Input is 0, Output is 1 */
- if (out)
- out_reg |= mask;
- else
- out_reg &= ~mask;
-
- asic3_write_register(asic, gpio_base + ASIC3_GPIO_DIRECTION, out_reg);
-
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-
- return 0;
-
-}
-
-static int asic3_gpio_direction_input(struct gpio_chip *chip,
- unsigned offset)
-{
- return asic3_gpio_direction(chip, offset, 0);
-}
-
-static int asic3_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- return asic3_gpio_direction(chip, offset, 1);
-}
-
-static int asic3_gpio_get(struct gpio_chip *chip,
- unsigned offset)
-{
- unsigned int gpio_base;
- u32 mask = ASIC3_GPIO_TO_MASK(offset);
- struct asic3 *asic;
-
- asic = gpiochip_get_data(chip);
- gpio_base = ASIC3_GPIO_TO_BASE(offset);
-
- if (gpio_base > ASIC3_GPIO_D_BASE) {
- dev_err(asic->dev, "Invalid base (0x%x) for gpio %d\n",
- gpio_base, offset);
- return -EINVAL;
- }
-
- return !!(asic3_read_register(asic,
- gpio_base + ASIC3_GPIO_STATUS) & mask);
-}
-
-static void asic3_gpio_set(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- u32 mask, out_reg;
- unsigned int gpio_base;
- unsigned long flags;
- struct asic3 *asic;
-
- asic = gpiochip_get_data(chip);
- gpio_base = ASIC3_GPIO_TO_BASE(offset);
-
- if (gpio_base > ASIC3_GPIO_D_BASE) {
- dev_err(asic->dev, "Invalid base (0x%x) for gpio %d\n",
- gpio_base, offset);
- return;
- }
-
- mask = ASIC3_GPIO_TO_MASK(offset);
-
- raw_spin_lock_irqsave(&asic->lock, flags);
-
- out_reg = asic3_read_register(asic, gpio_base + ASIC3_GPIO_OUT);
-
- if (value)
- out_reg |= mask;
- else
- out_reg &= ~mask;
-
- asic3_write_register(asic, gpio_base + ASIC3_GPIO_OUT, out_reg);
-
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-}
-
-static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct asic3 *asic = gpiochip_get_data(chip);
-
- return asic->irq_base + offset;
-}
-
-static __init int asic3_gpio_probe(struct platform_device *pdev,
- u16 *gpio_config, int num)
-{
- struct asic3 *asic = platform_get_drvdata(pdev);
- u16 alt_reg[ASIC3_NUM_GPIO_BANKS];
- u16 out_reg[ASIC3_NUM_GPIO_BANKS];
- u16 dir_reg[ASIC3_NUM_GPIO_BANKS];
- int i;
-
- memset(alt_reg, 0, ASIC3_NUM_GPIO_BANKS * sizeof(u16));
- memset(out_reg, 0, ASIC3_NUM_GPIO_BANKS * sizeof(u16));
- memset(dir_reg, 0, ASIC3_NUM_GPIO_BANKS * sizeof(u16));
-
- /* Enable all GPIOs */
- asic3_write_register(asic, ASIC3_GPIO_OFFSET(A, MASK), 0xffff);
- asic3_write_register(asic, ASIC3_GPIO_OFFSET(B, MASK), 0xffff);
- asic3_write_register(asic, ASIC3_GPIO_OFFSET(C, MASK), 0xffff);
- asic3_write_register(asic, ASIC3_GPIO_OFFSET(D, MASK), 0xffff);
-
- for (i = 0; i < num; i++) {
- u8 alt, pin, dir, init, bank_num, bit_num;
- u16 config = gpio_config[i];
-
- pin = ASIC3_CONFIG_GPIO_PIN(config);
- alt = ASIC3_CONFIG_GPIO_ALT(config);
- dir = ASIC3_CONFIG_GPIO_DIR(config);
- init = ASIC3_CONFIG_GPIO_INIT(config);
-
- bank_num = ASIC3_GPIO_TO_BANK(pin);
- bit_num = ASIC3_GPIO_TO_BIT(pin);
-
- alt_reg[bank_num] |= (alt << bit_num);
- out_reg[bank_num] |= (init << bit_num);
- dir_reg[bank_num] |= (dir << bit_num);
- }
-
- for (i = 0; i < ASIC3_NUM_GPIO_BANKS; i++) {
- asic3_write_register(asic,
- ASIC3_BANK_TO_BASE(i) +
- ASIC3_GPIO_DIRECTION,
- dir_reg[i]);
- asic3_write_register(asic,
- ASIC3_BANK_TO_BASE(i) + ASIC3_GPIO_OUT,
- out_reg[i]);
- asic3_write_register(asic,
- ASIC3_BANK_TO_BASE(i) +
- ASIC3_GPIO_ALT_FUNCTION,
- alt_reg[i]);
- }
-
- return gpiochip_add_data(&asic->gpio, asic);
-}
-
-static void asic3_gpio_remove(struct platform_device *pdev)
-{
- struct asic3 *asic = platform_get_drvdata(pdev);
-
- gpiochip_remove(&asic->gpio);
-}
-
-static void asic3_clk_enable(struct asic3 *asic, struct asic3_clk *clk)
-{
- unsigned long flags;
- u32 cdex;
-
- raw_spin_lock_irqsave(&asic->lock, flags);
- if (clk->enabled++ == 0) {
- cdex = asic3_read_register(asic, ASIC3_OFFSET(CLOCK, CDEX));
- cdex |= clk->cdex;
- asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex);
- }
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-}
-
-static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk)
-{
- unsigned long flags;
- u32 cdex;
-
- WARN_ON(clk->enabled == 0);
-
- raw_spin_lock_irqsave(&asic->lock, flags);
- if (--clk->enabled == 0) {
- cdex = asic3_read_register(asic, ASIC3_OFFSET(CLOCK, CDEX));
- cdex &= ~clk->cdex;
- asic3_write_register(asic, ASIC3_OFFSET(CLOCK, CDEX), cdex);
- }
- raw_spin_unlock_irqrestore(&asic->lock, flags);
-}
-
-/* MFD cells (SPI, PWM, LED, DS1WM, MMC) */
-static struct ds1wm_driver_data ds1wm_pdata = {
- .active_high = 1,
- .reset_recover_delay = 1,
-};
-
-static struct resource ds1wm_resources[] = {
- {
- .start = ASIC3_OWM_BASE,
- .end = ASIC3_OWM_BASE + 0x13,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = ASIC3_IRQ_OWM,
- .end = ASIC3_IRQ_OWM,
- .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
- },
-};
-
-static int ds1wm_enable(struct platform_device *pdev)
-{
- struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
-
- /* Turn on external clocks and the OWM clock */
- asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
- asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
- asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_OWM]);
- usleep_range(1000, 5000);
-
- /* Reset and enable DS1WM */
- asic3_set_register(asic, ASIC3_OFFSET(EXTCF, RESET),
- ASIC3_EXTCF_OWM_RESET, 1);
- usleep_range(1000, 5000);
- asic3_set_register(asic, ASIC3_OFFSET(EXTCF, RESET),
- ASIC3_EXTCF_OWM_RESET, 0);
- usleep_range(1000, 5000);
- asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
- ASIC3_EXTCF_OWM_EN, 1);
- usleep_range(1000, 5000);
-
- return 0;
-}
-
-static int ds1wm_disable(struct platform_device *pdev)
-{
- struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
-
- asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
- ASIC3_EXTCF_OWM_EN, 0);
-
- asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_OWM]);
- asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
- asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
-
- return 0;
-}
-
-static const struct mfd_cell asic3_cell_ds1wm = {
- .name = "ds1wm",
- .enable = ds1wm_enable,
- .disable = ds1wm_disable,
- .platform_data = &ds1wm_pdata,
- .pdata_size = sizeof(ds1wm_pdata),
- .num_resources = ARRAY_SIZE(ds1wm_resources),
- .resources = ds1wm_resources,
-};
-
-static void asic3_mmc_pwr(struct platform_device *pdev, int state)
-{
- struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
-
- tmio_core_mmc_pwr(asic->tmio_cnf, 1 - asic->bus_shift, state);
-}
-
-static void asic3_mmc_clk_div(struct platform_device *pdev, int state)
-{
- struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
-
- tmio_core_mmc_clk_div(asic->tmio_cnf, 1 - asic->bus_shift, state);
-}
-
-static struct tmio_mmc_data asic3_mmc_data = {
- .hclk = 24576000,
- .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
- .set_pwr = asic3_mmc_pwr,
- .set_clk_div = asic3_mmc_clk_div,
-};
-
-static struct resource asic3_mmc_resources[] = {
- DEFINE_RES_MEM(ASIC3_SD_CTRL_BASE, 0x400),
- DEFINE_RES_IRQ(0)
-};
-
-static int asic3_mmc_enable(struct platform_device *pdev)
-{
- struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
-
- /* Not sure if it must be done bit by bit, but leaving as-is */
- asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
- ASIC3_SDHWCTRL_LEVCD, 1);
- asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
- ASIC3_SDHWCTRL_LEVWP, 1);
- asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
- ASIC3_SDHWCTRL_SUSPEND, 0);
- asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
- ASIC3_SDHWCTRL_PCLR, 0);
-
- asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
- /* CLK32 used for card detection and for interruption detection
- * when HCLK is stopped.
- */
- asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
- usleep_range(1000, 5000);
-
- /* HCLK 24.576 MHz, BCLK 12.288 MHz: */
- asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL),
- CLOCK_SEL_CX | CLOCK_SEL_SD_HCLK_SEL);
-
- asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_SD_HOST]);
- asic3_clk_enable(asic, &asic->clocks[ASIC3_CLOCK_SD_BUS]);
- usleep_range(1000, 5000);
-
- asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
- ASIC3_EXTCF_SD_MEM_ENABLE, 1);
-
- /* Enable SD card slot 3.3V power supply */
- asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
- ASIC3_SDHWCTRL_SDPWR, 1);
-
- /* ASIC3_SD_CTRL_BASE assumes 32-bit addressing, TMIO is 16-bit */
- tmio_core_mmc_enable(asic->tmio_cnf, 1 - asic->bus_shift,
- ASIC3_SD_CTRL_BASE >> 1);
-
- return 0;
-}
-
-static int asic3_mmc_disable(struct platform_device *pdev)
-{
- struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
-
- /* Put in suspend mode */
- asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
- ASIC3_SDHWCTRL_SUSPEND, 1);
-
- /* Disable clocks */
- asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_SD_HOST]);
- asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_SD_BUS]);
- asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX0]);
- asic3_clk_disable(asic, &asic->clocks[ASIC3_CLOCK_EX1]);
- return 0;
-}
-
-static const struct mfd_cell asic3_cell_mmc = {
- .name = "tmio-mmc",
- .enable = asic3_mmc_enable,
- .disable = asic3_mmc_disable,
- .suspend = asic3_mmc_disable,
- .resume = asic3_mmc_enable,
- .platform_data = &asic3_mmc_data,
- .pdata_size = sizeof(asic3_mmc_data),
- .num_resources = ARRAY_SIZE(asic3_mmc_resources),
- .resources = asic3_mmc_resources,
-};
-
-static const int clock_ledn[ASIC3_NUM_LEDS] = {
- [0] = ASIC3_CLOCK_LED0,
- [1] = ASIC3_CLOCK_LED1,
- [2] = ASIC3_CLOCK_LED2,
-};
-
-static int asic3_leds_enable(struct platform_device *pdev)
-{
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
-
- asic3_clk_enable(asic, &asic->clocks[clock_ledn[cell->id]]);
-
- return 0;
-}
-
-static int asic3_leds_disable(struct platform_device *pdev)
-{
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
-
- asic3_clk_disable(asic, &asic->clocks[clock_ledn[cell->id]]);
-
- return 0;
-}
-
-static int asic3_leds_suspend(struct platform_device *pdev)
-{
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
-
- while (asic3_gpio_get(&asic->gpio, ASIC3_GPIO(C, cell->id)) != 0)
- usleep_range(1000, 5000);
-
- asic3_clk_disable(asic, &asic->clocks[clock_ledn[cell->id]]);
-
- return 0;
-}
-
-static struct mfd_cell asic3_cell_leds[ASIC3_NUM_LEDS] = {
- [0] = {
- .name = "leds-asic3",
- .id = 0,
- .enable = asic3_leds_enable,
- .disable = asic3_leds_disable,
- .suspend = asic3_leds_suspend,
- .resume = asic3_leds_enable,
- },
- [1] = {
- .name = "leds-asic3",
- .id = 1,
- .enable = asic3_leds_enable,
- .disable = asic3_leds_disable,
- .suspend = asic3_leds_suspend,
- .resume = asic3_leds_enable,
- },
- [2] = {
- .name = "leds-asic3",
- .id = 2,
- .enable = asic3_leds_enable,
- .disable = asic3_leds_disable,
- .suspend = asic3_leds_suspend,
- .resume = asic3_leds_enable,
- },
-};
-
-static int __init asic3_mfd_probe(struct platform_device *pdev,
- struct asic3_platform_data *pdata,
- struct resource *mem)
-{
- struct asic3 *asic = platform_get_drvdata(pdev);
- struct resource *mem_sdio;
- int irq, ret;
-
- mem_sdio = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!mem_sdio)
- dev_dbg(asic->dev, "no SDIO MEM resource\n");
-
- irq = platform_get_irq(pdev, 1);
- if (irq < 0)
- dev_dbg(asic->dev, "no SDIO IRQ resource\n");
-
- /* DS1WM */
- asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
- ASIC3_EXTCF_OWM_SMB, 0);
-
- ds1wm_resources[0].start >>= asic->bus_shift;
- ds1wm_resources[0].end >>= asic->bus_shift;
-
- /* MMC */
- if (mem_sdio) {
- asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >>
- asic->bus_shift) + mem_sdio->start,
- ASIC3_SD_CONFIG_SIZE >> asic->bus_shift);
- if (!asic->tmio_cnf) {
- ret = -ENOMEM;
- dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n");
- goto out;
- }
- }
- asic3_mmc_resources[0].start >>= asic->bus_shift;
- asic3_mmc_resources[0].end >>= asic->bus_shift;
-
- if (pdata->clock_rate) {
- ds1wm_pdata.clock_rate = pdata->clock_rate;
- ret = mfd_add_devices(&pdev->dev, pdev->id,
- &asic3_cell_ds1wm, 1, mem, asic->irq_base, NULL);
- if (ret < 0)
- goto out_unmap;
- }
-
- if (mem_sdio && (irq >= 0)) {
- ret = mfd_add_devices(&pdev->dev, pdev->id,
- &asic3_cell_mmc, 1, mem_sdio, irq, NULL);
- if (ret < 0)
- goto out_unmap;
- }
-
- ret = 0;
- if (pdata->leds) {
- int i;
-
- for (i = 0; i < ASIC3_NUM_LEDS; ++i) {
- asic3_cell_leds[i].platform_data = &pdata->leds[i];
- asic3_cell_leds[i].pdata_size = sizeof(pdata->leds[i]);
- }
- ret = mfd_add_devices(&pdev->dev, 0,
- asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0, NULL);
- }
- return ret;
-
-out_unmap:
- if (asic->tmio_cnf)
- iounmap(asic->tmio_cnf);
-out:
- return ret;
-}
-
-static void asic3_mfd_remove(struct platform_device *pdev)
-{
- struct asic3 *asic = platform_get_drvdata(pdev);
-
- mfd_remove_devices(&pdev->dev);
- iounmap(asic->tmio_cnf);
-}
-
-/* Core */
-static int __init asic3_probe(struct platform_device *pdev)
-{
- struct asic3_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct asic3 *asic;
- struct resource *mem;
- unsigned long clksel;
- int ret = 0;
-
- asic = devm_kzalloc(&pdev->dev,
- sizeof(struct asic3), GFP_KERNEL);
- if (!asic)
- return -ENOMEM;
-
- raw_spin_lock_init(&asic->lock);
- platform_set_drvdata(pdev, asic);
- asic->dev = &pdev->dev;
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(asic->dev, "no MEM resource\n");
- return -ENOMEM;
- }
-
- asic->mapping = ioremap(mem->start, resource_size(mem));
- if (!asic->mapping) {
- dev_err(asic->dev, "Couldn't ioremap\n");
- return -ENOMEM;
- }
-
- asic->irq_base = pdata->irq_base;
-
- /* calculate bus shift from mem resource */
- asic->bus_shift = 2 - (resource_size(mem) >> 12);
-
- clksel = 0;
- asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), clksel);
-
- ret = asic3_irq_probe(pdev);
- if (ret < 0) {
- dev_err(asic->dev, "Couldn't probe IRQs\n");
- goto out_unmap;
- }
-
- asic->gpio.label = "asic3";
- asic->gpio.base = pdata->gpio_base;
- asic->gpio.ngpio = ASIC3_NUM_GPIOS;
- asic->gpio.get = asic3_gpio_get;
- asic->gpio.set = asic3_gpio_set;
- asic->gpio.direction_input = asic3_gpio_direction_input;
- asic->gpio.direction_output = asic3_gpio_direction_output;
- asic->gpio.to_irq = asic3_gpio_to_irq;
-
- ret = asic3_gpio_probe(pdev,
- pdata->gpio_config,
- pdata->gpio_config_num);
- if (ret < 0) {
- dev_err(asic->dev, "GPIO probe failed\n");
- goto out_irq;
- }
-
- /* Making a per-device copy is only needed for the
- * theoretical case of multiple ASIC3s on one board:
- */
- memcpy(asic->clocks, asic3_clk_init, sizeof(asic3_clk_init));
-
- asic3_mfd_probe(pdev, pdata, mem);
-
- asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
- (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 1);
-
- dev_info(asic->dev, "ASIC3 Core driver\n");
-
- return 0;
-
- out_irq:
- asic3_irq_remove(pdev);
-
- out_unmap:
- iounmap(asic->mapping);
-
- return ret;
-}
-
-static int asic3_remove(struct platform_device *pdev)
-{
- struct asic3 *asic = platform_get_drvdata(pdev);
-
- asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
- (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 0);
-
- asic3_mfd_remove(pdev);
-
- asic3_gpio_remove(pdev);
-
- asic3_irq_remove(pdev);
-
- asic3_write_register(asic, ASIC3_OFFSET(CLOCK, SEL), 0);
-
- iounmap(asic->mapping);
-
- return 0;
-}
-
-static void asic3_shutdown(struct platform_device *pdev)
-{
-}
-
-static struct platform_driver asic3_device_driver = {
- .driver = {
- .name = "asic3",
- },
- .remove = asic3_remove,
- .shutdown = asic3_shutdown,
-};
-
-static int __init asic3_init(void)
-{
- int retval = 0;
-
- retval = platform_driver_probe(&asic3_device_driver, asic3_probe);
-
- return retval;
-}
-
-subsys_initcall(asic3_init);
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 47fd700f284f..01a6bbb6d266 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -23,7 +23,7 @@
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/pm_runtime.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
@@ -825,17 +825,16 @@ static const struct mfd_cell axp813_cells[] = {
},
};
-static struct axp20x_dev *axp20x_pm_power_off;
-static void axp20x_power_off(void)
+static int axp20x_power_off(struct sys_off_data *data)
{
- if (axp20x_pm_power_off->variant == AXP288_ID)
- return;
+ struct axp20x_dev *axp20x = data->cb_data;
- regmap_write(axp20x_pm_power_off->regmap, AXP20X_OFF_CTRL,
- AXP20X_OFF);
+ regmap_write(axp20x->regmap, AXP20X_OFF_CTRL, AXP20X_OFF);
/* Give capacitors etc. time to drain to avoid kernel panic msg. */
mdelay(500);
+
+ return NOTIFY_DONE;
}
int axp20x_match_device(struct axp20x_dev *axp20x)
@@ -1002,10 +1001,11 @@ int axp20x_device_probe(struct axp20x_dev *axp20x)
return ret;
}
- if (!pm_power_off) {
- axp20x_pm_power_off = axp20x;
- pm_power_off = axp20x_power_off;
- }
+ if (axp20x->variant != AXP288_ID)
+ devm_register_sys_off_handler(axp20x->dev,
+ SYS_OFF_MODE_POWER_OFF,
+ SYS_OFF_PRIO_DEFAULT,
+ axp20x_power_off, axp20x);
dev_info(axp20x->dev, "AXP20X driver loaded\n");
@@ -1015,11 +1015,6 @@ EXPORT_SYMBOL(axp20x_device_probe);
void axp20x_device_remove(struct axp20x_dev *axp20x)
{
- if (axp20x == axp20x_pm_power_off) {
- axp20x_pm_power_off = NULL;
- pm_power_off = NULL;
- }
-
mfd_remove_devices(axp20x->dev);
regmap_del_irq_chip(axp20x->irq, axp20x->regmap_irqc);
}
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 344ad03bdc42..02d4271dfe06 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -65,11 +65,6 @@ static const struct cros_feature_to_name cros_mcu_devices[] = {
.desc = "System Control Processor",
},
{
- .id = EC_FEATURE_SCP_C1,
- .name = CROS_EC_DEV_SCP_C1_NAME,
- .desc = "System Control Processor 2nd Core",
- },
- {
.id = EC_FEATURE_TOUCHPAD,
.name = CROS_EC_DEV_TP_NAME,
.desc = "Touchpad",
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
deleted file mode 100644
index 0c46b7e64b2d..000000000000
--- a/drivers/mfd/htc-pasic3.c
+++ /dev/null
@@ -1,210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Core driver for HTC PASIC3 LED/DS1WM chip.
- *
- * Copyright (C) 2006 Philipp Zabel <philipp.zabel@gmail.com>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/ds1wm.h>
-#include <linux/mfd/htc-pasic3.h>
-#include <linux/slab.h>
-
-struct pasic3_data {
- void __iomem *mapping;
- unsigned int bus_shift;
-};
-
-#define REG_ADDR 5
-#define REG_DATA 6
-
-#define READ_MODE 0x80
-
-/*
- * write to a secondary register on the PASIC3
- */
-void pasic3_write_register(struct device *dev, u32 reg, u8 val)
-{
- struct pasic3_data *asic = dev_get_drvdata(dev);
- int bus_shift = asic->bus_shift;
- void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
- void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
-
- __raw_writeb(~READ_MODE & reg, addr);
- __raw_writeb(val, data);
-}
-EXPORT_SYMBOL(pasic3_write_register); /* for leds-pasic3 */
-
-/*
- * read from a secondary register on the PASIC3
- */
-u8 pasic3_read_register(struct device *dev, u32 reg)
-{
- struct pasic3_data *asic = dev_get_drvdata(dev);
- int bus_shift = asic->bus_shift;
- void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift);
- void __iomem *data = asic->mapping + (REG_DATA << bus_shift);
-
- __raw_writeb(READ_MODE | reg, addr);
- return __raw_readb(data);
-}
-EXPORT_SYMBOL(pasic3_read_register); /* for leds-pasic3 */
-
-/*
- * LEDs
- */
-
-static struct mfd_cell led_cell __initdata = {
- .name = "leds-pasic3",
-};
-
-/*
- * DS1WM
- */
-
-static int ds1wm_enable(struct platform_device *pdev)
-{
- struct device *dev = pdev->dev.parent;
- int c;
-
- c = pasic3_read_register(dev, 0x28);
- pasic3_write_register(dev, 0x28, c & 0x7f);
-
- dev_dbg(dev, "DS1WM OWM_EN low (active) %02x\n", c & 0x7f);
- return 0;
-}
-
-static int ds1wm_disable(struct platform_device *pdev)
-{
- struct device *dev = pdev->dev.parent;
- int c;
-
- c = pasic3_read_register(dev, 0x28);
- pasic3_write_register(dev, 0x28, c | 0x80);
-
- dev_dbg(dev, "DS1WM OWM_EN high (inactive) %02x\n", c | 0x80);
- return 0;
-}
-
-static struct ds1wm_driver_data ds1wm_pdata = {
- .active_high = 0,
- .reset_recover_delay = 1,
-};
-
-static struct resource ds1wm_resources[] __initdata = {
- [0] = {
- .start = 0,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static const struct mfd_cell ds1wm_cell __initconst = {
- .name = "ds1wm",
- .enable = ds1wm_enable,
- .disable = ds1wm_disable,
- .platform_data = &ds1wm_pdata,
- .pdata_size = sizeof(ds1wm_pdata),
- .num_resources = 2,
- .resources = ds1wm_resources,
-};
-
-static int __init pasic3_probe(struct platform_device *pdev)
-{
- struct pasic3_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct device *dev = &pdev->dev;
- struct pasic3_data *asic;
- struct resource *r;
- int ret;
- int irq = 0;
-
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (r) {
- ds1wm_resources[1].flags = IORESOURCE_IRQ | (r->flags &
- (IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE));
- irq = r->start;
- }
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENXIO;
-
- if (!request_mem_region(r->start, resource_size(r), "pasic3"))
- return -EBUSY;
-
- asic = devm_kzalloc(dev, sizeof(struct pasic3_data), GFP_KERNEL);
- if (!asic)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, asic);
-
- asic->mapping = ioremap(r->start, resource_size(r));
- if (!asic->mapping) {
- dev_err(dev, "couldn't ioremap PASIC3\n");
- return -ENOMEM;
- }
-
- /* calculate bus shift from mem resource */
- asic->bus_shift = (resource_size(r) - 5) >> 3;
-
- if (pdata && pdata->clock_rate) {
- ds1wm_pdata.clock_rate = pdata->clock_rate;
- /* the first 5 PASIC3 registers control the DS1WM */
- ds1wm_resources[0].end = (5 << asic->bus_shift) - 1;
- ret = mfd_add_devices(&pdev->dev, pdev->id,
- &ds1wm_cell, 1, r, irq, NULL);
- if (ret < 0)
- dev_warn(dev, "failed to register DS1WM\n");
- }
-
- if (pdata && pdata->led_pdata) {
- led_cell.platform_data = pdata->led_pdata;
- led_cell.pdata_size = sizeof(struct pasic3_leds_machinfo);
- ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r,
- 0, NULL);
- if (ret < 0)
- dev_warn(dev, "failed to register LED device\n");
- }
-
- return 0;
-}
-
-static int pasic3_remove(struct platform_device *pdev)
-{
- struct pasic3_data *asic = platform_get_drvdata(pdev);
- struct resource *r;
-
- mfd_remove_devices(&pdev->dev);
-
- iounmap(asic->mapping);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, resource_size(r));
- return 0;
-}
-
-MODULE_ALIAS("platform:pasic3");
-
-static struct platform_driver pasic3_driver = {
- .driver = {
- .name = "pasic3",
- },
- .remove = pasic3_remove,
-};
-
-module_platform_driver_probe(pasic3_driver, pasic3_probe);
-
-MODULE_AUTHOR("Philipp Zabel <philipp.zabel@gmail.com>");
-MODULE_DESCRIPTION("Core driver for HTC PASIC3");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/intel-m10-bmc-core.c b/drivers/mfd/intel-m10-bmc-core.c
new file mode 100644
index 000000000000..dac9cf7bcb4a
--- /dev/null
+++ b/drivers/mfd/intel-m10-bmc-core.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel MAX 10 Board Management Controller chip - common code
+ *
+ * Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/dev_printk.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel-m10-bmc.h>
+#include <linux/module.h>
+
+static ssize_t bmc_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct intel_m10bmc *ddata = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = m10bmc_sys_read(ddata, ddata->info->csr_map->build_version, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", val);
+}
+static DEVICE_ATTR_RO(bmc_version);
+
+static ssize_t bmcfw_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct intel_m10bmc *ddata = dev_get_drvdata(dev);
+ unsigned int val;
+ int ret;
+
+ ret = m10bmc_sys_read(ddata, ddata->info->csr_map->fw_version, &val);
+ if (ret)
+ return ret;
+
+ return sprintf(buf, "0x%x\n", val);
+}
+static DEVICE_ATTR_RO(bmcfw_version);
+
+static ssize_t mac_address_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct intel_m10bmc *ddata = dev_get_drvdata(dev);
+ unsigned int macaddr_low, macaddr_high;
+ int ret;
+
+ ret = m10bmc_sys_read(ddata, ddata->info->csr_map->mac_low, &macaddr_low);
+ if (ret)
+ return ret;
+
+ ret = m10bmc_sys_read(ddata, ddata->info->csr_map->mac_high, &macaddr_high);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+ (u8)FIELD_GET(M10BMC_N3000_MAC_BYTE1, macaddr_low),
+ (u8)FIELD_GET(M10BMC_N3000_MAC_BYTE2, macaddr_low),
+ (u8)FIELD_GET(M10BMC_N3000_MAC_BYTE3, macaddr_low),
+ (u8)FIELD_GET(M10BMC_N3000_MAC_BYTE4, macaddr_low),
+ (u8)FIELD_GET(M10BMC_N3000_MAC_BYTE5, macaddr_high),
+ (u8)FIELD_GET(M10BMC_N3000_MAC_BYTE6, macaddr_high));
+}
+static DEVICE_ATTR_RO(mac_address);
+
+static ssize_t mac_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct intel_m10bmc *ddata = dev_get_drvdata(dev);
+ unsigned int macaddr_high;
+ int ret;
+
+ ret = m10bmc_sys_read(ddata, ddata->info->csr_map->mac_high, &macaddr_high);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%u\n", (u8)FIELD_GET(M10BMC_N3000_MAC_COUNT, macaddr_high));
+}
+static DEVICE_ATTR_RO(mac_count);
+
+static struct attribute *m10bmc_attrs[] = {
+ &dev_attr_bmc_version.attr,
+ &dev_attr_bmcfw_version.attr,
+ &dev_attr_mac_address.attr,
+ &dev_attr_mac_count.attr,
+ NULL,
+};
+
+static const struct attribute_group m10bmc_group = {
+ .attrs = m10bmc_attrs,
+};
+
+const struct attribute_group *m10bmc_dev_groups[] = {
+ &m10bmc_group,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(m10bmc_dev_groups);
+
+int m10bmc_dev_init(struct intel_m10bmc *m10bmc, const struct intel_m10bmc_platform_info *info)
+{
+ int ret;
+
+ m10bmc->info = info;
+ dev_set_drvdata(m10bmc->dev, m10bmc);
+
+ ret = devm_mfd_add_devices(m10bmc->dev, PLATFORM_DEVID_AUTO,
+ info->cells, info->n_cells,
+ NULL, 0, NULL);
+ if (ret)
+ dev_err(m10bmc->dev, "Failed to register sub-devices: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(m10bmc_dev_init);
+
+MODULE_DESCRIPTION("Intel MAX 10 BMC core driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/intel-m10-bmc-pmci.c b/drivers/mfd/intel-m10-bmc-pmci.c
new file mode 100644
index 000000000000..8821f1876dd6
--- /dev/null
+++ b/drivers/mfd/intel-m10-bmc-pmci.c
@@ -0,0 +1,455 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MAX10 BMC Platform Management Component Interface (PMCI) based
+ * interface.
+ *
+ * Copyright (C) 2020-2023 Intel Corporation.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/dfl.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel-m10-bmc.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+struct m10bmc_pmci_device {
+ void __iomem *base;
+ struct intel_m10bmc m10bmc;
+ struct mutex flash_mutex; /* protects flash_busy and serializes flash read/read */
+ bool flash_busy;
+};
+
+/*
+ * Intel FGPA indirect register access via hardware controller/bridge.
+ */
+#define INDIRECT_CMD_OFF 0
+#define INDIRECT_CMD_CLR 0
+#define INDIRECT_CMD_RD BIT(0)
+#define INDIRECT_CMD_WR BIT(1)
+#define INDIRECT_CMD_ACK BIT(2)
+
+#define INDIRECT_ADDR_OFF 0x4
+#define INDIRECT_RD_OFF 0x8
+#define INDIRECT_WR_OFF 0xc
+
+#define INDIRECT_INT_US 1
+#define INDIRECT_TIMEOUT_US 10000
+
+struct indirect_ctx {
+ void __iomem *base;
+ struct device *dev;
+};
+
+static int indirect_clear_cmd(struct indirect_ctx *ctx)
+{
+ unsigned int cmd;
+ int ret;
+
+ writel(INDIRECT_CMD_CLR, ctx->base + INDIRECT_CMD_OFF);
+
+ ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, cmd,
+ cmd == INDIRECT_CMD_CLR,
+ INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
+ if (ret)
+ dev_err(ctx->dev, "timed out waiting clear cmd (residual cmd=0x%x)\n", cmd);
+
+ return ret;
+}
+
+static int indirect_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct indirect_ctx *ctx = context;
+ unsigned int cmd, ack, tmpval;
+ int ret, ret2;
+
+ cmd = readl(ctx->base + INDIRECT_CMD_OFF);
+ if (cmd != INDIRECT_CMD_CLR)
+ dev_warn(ctx->dev, "residual cmd 0x%x on read entry\n", cmd);
+
+ writel(reg, ctx->base + INDIRECT_ADDR_OFF);
+ writel(INDIRECT_CMD_RD, ctx->base + INDIRECT_CMD_OFF);
+
+ ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, ack,
+ (ack & INDIRECT_CMD_ACK) == INDIRECT_CMD_ACK,
+ INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
+ if (ret)
+ dev_err(ctx->dev, "read timed out on reg 0x%x ack 0x%x\n", reg, ack);
+ else
+ tmpval = readl(ctx->base + INDIRECT_RD_OFF);
+
+ ret2 = indirect_clear_cmd(ctx);
+
+ if (ret)
+ return ret;
+ if (ret2)
+ return ret2;
+
+ *val = tmpval;
+ return 0;
+}
+
+static int indirect_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct indirect_ctx *ctx = context;
+ unsigned int cmd, ack;
+ int ret, ret2;
+
+ cmd = readl(ctx->base + INDIRECT_CMD_OFF);
+ if (cmd != INDIRECT_CMD_CLR)
+ dev_warn(ctx->dev, "residual cmd 0x%x on write entry\n", cmd);
+
+ writel(val, ctx->base + INDIRECT_WR_OFF);
+ writel(reg, ctx->base + INDIRECT_ADDR_OFF);
+ writel(INDIRECT_CMD_WR, ctx->base + INDIRECT_CMD_OFF);
+
+ ret = readl_poll_timeout(ctx->base + INDIRECT_CMD_OFF, ack,
+ (ack & INDIRECT_CMD_ACK) == INDIRECT_CMD_ACK,
+ INDIRECT_INT_US, INDIRECT_TIMEOUT_US);
+ if (ret)
+ dev_err(ctx->dev, "write timed out on reg 0x%x ack 0x%x\n", reg, ack);
+
+ ret2 = indirect_clear_cmd(ctx);
+
+ if (ret)
+ return ret;
+ return ret2;
+}
+
+static void pmci_write_fifo(void __iomem *base, const u32 *buf, size_t count)
+{
+ while (count--)
+ writel(*buf++, base);
+}
+
+static void pmci_read_fifo(void __iomem *base, u32 *buf, size_t count)
+{
+ while (count--)
+ *buf++ = readl(base);
+}
+
+static u32 pmci_get_write_space(struct m10bmc_pmci_device *pmci)
+{
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout(readl, val,
+ FIELD_GET(M10BMC_N6000_FLASH_FIFO_SPACE, val) ==
+ M10BMC_N6000_FIFO_MAX_WORDS,
+ M10BMC_FLASH_INT_US, M10BMC_FLASH_TIMEOUT_US,
+ false, pmci->base + M10BMC_N6000_FLASH_CTRL);
+ if (ret == -ETIMEDOUT)
+ return 0;
+
+ return FIELD_GET(M10BMC_N6000_FLASH_FIFO_SPACE, val) * M10BMC_N6000_FIFO_WORD_SIZE;
+}
+
+static int pmci_flash_bulk_write(struct intel_m10bmc *m10bmc, const u8 *buf, u32 size)
+{
+ struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
+ u32 blk_size, offset = 0, write_count;
+
+ while (size) {
+ blk_size = min(pmci_get_write_space(pmci), size);
+ if (blk_size == 0) {
+ dev_err(m10bmc->dev, "get FIFO available size fail\n");
+ return -EIO;
+ }
+
+ if (size < M10BMC_N6000_FIFO_WORD_SIZE)
+ break;
+
+ write_count = blk_size / M10BMC_N6000_FIFO_WORD_SIZE;
+ pmci_write_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO,
+ (u32 *)(buf + offset), write_count);
+
+ size -= blk_size;
+ offset += blk_size;
+ }
+
+ /* Handle remainder (less than M10BMC_N6000_FIFO_WORD_SIZE bytes) */
+ if (size) {
+ u32 tmp = 0;
+
+ memcpy(&tmp, buf + offset, size);
+ pmci_write_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO, &tmp, 1);
+ }
+
+ return 0;
+}
+
+static int pmci_flash_bulk_read(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size)
+{
+ struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
+ u32 blk_size, offset = 0, val, full_read_count, read_count;
+ int ret;
+
+ while (size) {
+ blk_size = min_t(u32, size, M10BMC_N6000_READ_BLOCK_SIZE);
+ full_read_count = blk_size / M10BMC_N6000_FIFO_WORD_SIZE;
+
+ read_count = full_read_count;
+ if (full_read_count * M10BMC_N6000_FIFO_WORD_SIZE < blk_size)
+ read_count++;
+
+ writel(addr + offset, pmci->base + M10BMC_N6000_FLASH_ADDR);
+ writel(FIELD_PREP(M10BMC_N6000_FLASH_READ_COUNT, read_count) |
+ M10BMC_N6000_FLASH_RD_MODE,
+ pmci->base + M10BMC_N6000_FLASH_CTRL);
+
+ ret = readl_poll_timeout((pmci->base + M10BMC_N6000_FLASH_CTRL), val,
+ !(val & M10BMC_N6000_FLASH_BUSY),
+ M10BMC_FLASH_INT_US, M10BMC_FLASH_TIMEOUT_US);
+ if (ret) {
+ dev_err(m10bmc->dev, "read timed out on reading flash 0x%xn", val);
+ return ret;
+ }
+
+ pmci_read_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO,
+ (u32 *)(buf + offset), full_read_count);
+
+ size -= blk_size;
+ offset += blk_size;
+
+ if (full_read_count < read_count)
+ break;
+
+ writel(0, pmci->base + M10BMC_N6000_FLASH_CTRL);
+ }
+
+ /* Handle remainder (less than M10BMC_N6000_FIFO_WORD_SIZE bytes) */
+ if (size) {
+ u32 tmp;
+
+ pmci_read_fifo(pmci->base + M10BMC_N6000_FLASH_FIFO, &tmp, 1);
+ memcpy(buf + offset, &tmp, size);
+
+ writel(0, pmci->base + M10BMC_N6000_FLASH_CTRL);
+ }
+
+ return 0;
+}
+
+static int m10bmc_pmci_set_flash_host_mux(struct intel_m10bmc *m10bmc, bool request)
+{
+ u32 ctrl;
+ int ret;
+
+ ret = regmap_update_bits(m10bmc->regmap, M10BMC_N6000_FLASH_MUX_CTRL,
+ M10BMC_N6000_FLASH_HOST_REQUEST,
+ FIELD_PREP(M10BMC_N6000_FLASH_HOST_REQUEST, request));
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(m10bmc->regmap,
+ M10BMC_N6000_FLASH_MUX_CTRL, ctrl,
+ request ?
+ (get_flash_mux(ctrl) == M10BMC_N6000_FLASH_MUX_HOST) :
+ (get_flash_mux(ctrl) != M10BMC_N6000_FLASH_MUX_HOST),
+ M10BMC_FLASH_INT_US, M10BMC_FLASH_TIMEOUT_US);
+}
+
+static int m10bmc_pmci_flash_read(struct intel_m10bmc *m10bmc, u8 *buf, u32 addr, u32 size)
+{
+ struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
+ int ret, ret2;
+
+ mutex_lock(&pmci->flash_mutex);
+ if (pmci->flash_busy) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ ret = m10bmc_pmci_set_flash_host_mux(m10bmc, true);
+ if (ret)
+ goto mux_fail;
+
+ ret = pmci_flash_bulk_read(m10bmc, buf, addr, size);
+
+mux_fail:
+ ret2 = m10bmc_pmci_set_flash_host_mux(m10bmc, false);
+
+unlock:
+ mutex_unlock(&pmci->flash_mutex);
+ if (ret)
+ return ret;
+ return ret2;
+}
+
+static int m10bmc_pmci_flash_write(struct intel_m10bmc *m10bmc, const u8 *buf, u32 offset, u32 size)
+{
+ struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
+ int ret;
+
+ mutex_lock(&pmci->flash_mutex);
+ WARN_ON_ONCE(!pmci->flash_busy);
+ /* On write, firmware manages flash MUX */
+ ret = pmci_flash_bulk_write(m10bmc, buf + offset, size);
+ mutex_unlock(&pmci->flash_mutex);
+
+ return ret;
+}
+
+static int m10bmc_pmci_flash_lock(struct intel_m10bmc *m10bmc)
+{
+ struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
+ int ret = 0;
+
+ mutex_lock(&pmci->flash_mutex);
+ if (pmci->flash_busy) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ pmci->flash_busy = true;
+
+unlock:
+ mutex_unlock(&pmci->flash_mutex);
+ return ret;
+}
+
+static void m10bmc_pmci_flash_unlock(struct intel_m10bmc *m10bmc)
+{
+ struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
+
+ mutex_lock(&pmci->flash_mutex);
+ WARN_ON_ONCE(!pmci->flash_busy);
+ pmci->flash_busy = false;
+ mutex_unlock(&pmci->flash_mutex);
+}
+
+static const struct intel_m10bmc_flash_bulk_ops m10bmc_pmci_flash_bulk_ops = {
+ .read = m10bmc_pmci_flash_read,
+ .write = m10bmc_pmci_flash_write,
+ .lock_write = m10bmc_pmci_flash_lock,
+ .unlock_write = m10bmc_pmci_flash_unlock,
+};
+
+static const struct regmap_range m10bmc_pmci_regmap_range[] = {
+ regmap_reg_range(M10BMC_N6000_SYS_BASE, M10BMC_N6000_SYS_END),
+};
+
+static const struct regmap_access_table m10bmc_pmci_access_table = {
+ .yes_ranges = m10bmc_pmci_regmap_range,
+ .n_yes_ranges = ARRAY_SIZE(m10bmc_pmci_regmap_range),
+};
+
+static struct regmap_config m10bmc_pmci_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .wr_table = &m10bmc_pmci_access_table,
+ .rd_table = &m10bmc_pmci_access_table,
+ .reg_read = &indirect_reg_read,
+ .reg_write = &indirect_reg_write,
+ .max_register = M10BMC_N6000_SYS_END,
+};
+
+static struct mfd_cell m10bmc_pmci_n6000_bmc_subdevs[] = {
+ { .name = "n6000bmc-hwmon" },
+ { .name = "n6000bmc-sec-update" },
+};
+
+static const struct m10bmc_csr_map m10bmc_n6000_csr_map = {
+ .base = M10BMC_N6000_SYS_BASE,
+ .build_version = M10BMC_N6000_BUILD_VER,
+ .fw_version = NIOS2_N6000_FW_VERSION,
+ .mac_low = M10BMC_N6000_MAC_LOW,
+ .mac_high = M10BMC_N6000_MAC_HIGH,
+ .doorbell = M10BMC_N6000_DOORBELL,
+ .auth_result = M10BMC_N6000_AUTH_RESULT,
+ .bmc_prog_addr = M10BMC_N6000_BMC_PROG_ADDR,
+ .bmc_reh_addr = M10BMC_N6000_BMC_REH_ADDR,
+ .bmc_magic = M10BMC_N6000_BMC_PROG_MAGIC,
+ .sr_prog_addr = M10BMC_N6000_SR_PROG_ADDR,
+ .sr_reh_addr = M10BMC_N6000_SR_REH_ADDR,
+ .sr_magic = M10BMC_N6000_SR_PROG_MAGIC,
+ .pr_prog_addr = M10BMC_N6000_PR_PROG_ADDR,
+ .pr_reh_addr = M10BMC_N6000_PR_REH_ADDR,
+ .pr_magic = M10BMC_N6000_PR_PROG_MAGIC,
+ .rsu_update_counter = M10BMC_N6000_STAGING_FLASH_COUNT,
+};
+
+static const struct intel_m10bmc_platform_info m10bmc_pmci_n6000 = {
+ .cells = m10bmc_pmci_n6000_bmc_subdevs,
+ .n_cells = ARRAY_SIZE(m10bmc_pmci_n6000_bmc_subdevs),
+ .csr_map = &m10bmc_n6000_csr_map,
+};
+
+static int m10bmc_pmci_probe(struct dfl_device *ddev)
+{
+ struct device *dev = &ddev->dev;
+ struct m10bmc_pmci_device *pmci;
+ struct indirect_ctx *ctx;
+ int ret;
+
+ pmci = devm_kzalloc(dev, sizeof(*pmci), GFP_KERNEL);
+ if (!pmci)
+ return -ENOMEM;
+
+ pmci->m10bmc.flash_bulk_ops = &m10bmc_pmci_flash_bulk_ops;
+ pmci->m10bmc.dev = dev;
+
+ pmci->base = devm_ioremap_resource(dev, &ddev->mmio_res);
+ if (IS_ERR(pmci->base))
+ return PTR_ERR(pmci->base);
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mutex_init(&pmci->flash_mutex);
+
+ ctx->base = pmci->base + M10BMC_N6000_INDIRECT_BASE;
+ ctx->dev = dev;
+ indirect_clear_cmd(ctx);
+ pmci->m10bmc.regmap = devm_regmap_init(dev, NULL, ctx, &m10bmc_pmci_regmap_config);
+
+ if (IS_ERR(pmci->m10bmc.regmap)) {
+ ret = PTR_ERR(pmci->m10bmc.regmap);
+ goto destroy_mutex;
+ }
+
+ ret = m10bmc_dev_init(&pmci->m10bmc, &m10bmc_pmci_n6000);
+ if (ret)
+ goto destroy_mutex;
+ return 0;
+
+destroy_mutex:
+ mutex_destroy(&pmci->flash_mutex);
+ return ret;
+}
+
+static void m10bmc_pmci_remove(struct dfl_device *ddev)
+{
+ struct intel_m10bmc *m10bmc = dev_get_drvdata(&ddev->dev);
+ struct m10bmc_pmci_device *pmci = container_of(m10bmc, struct m10bmc_pmci_device, m10bmc);
+
+ mutex_destroy(&pmci->flash_mutex);
+}
+
+#define FME_FEATURE_ID_M10BMC_PMCI 0x12
+
+static const struct dfl_device_id m10bmc_pmci_ids[] = {
+ { FME_ID, FME_FEATURE_ID_M10BMC_PMCI },
+ { }
+};
+MODULE_DEVICE_TABLE(dfl, m10bmc_pmci_ids);
+
+static struct dfl_driver m10bmc_pmci_driver = {
+ .drv = {
+ .name = "intel-m10-bmc",
+ .dev_groups = m10bmc_dev_groups,
+ },
+ .id_table = m10bmc_pmci_ids,
+ .probe = m10bmc_pmci_probe,
+ .remove = m10bmc_pmci_remove,
+};
+
+module_dfl_driver(m10bmc_pmci_driver);
+
+MODULE_DESCRIPTION("MAX10 BMC PMCI-based interface");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/intel-m10-bmc-spi.c b/drivers/mfd/intel-m10-bmc-spi.c
new file mode 100644
index 000000000000..957200e17fed
--- /dev/null
+++ b/drivers/mfd/intel-m10-bmc-spi.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel MAX 10 Board Management Controller chip
+ *
+ * Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
+ */
+#include <linux/bitfield.h>
+#include <linux/dev_printk.h>
+#include <linux/init.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/intel-m10-bmc.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+static const struct regmap_range m10bmc_regmap_range[] = {
+ regmap_reg_range(M10BMC_N3000_LEGACY_BUILD_VER, M10BMC_N3000_LEGACY_BUILD_VER),
+ regmap_reg_range(M10BMC_N3000_SYS_BASE, M10BMC_N3000_SYS_END),
+ regmap_reg_range(M10BMC_N3000_FLASH_BASE, M10BMC_N3000_FLASH_END),
+};
+
+static const struct regmap_access_table m10bmc_access_table = {
+ .yes_ranges = m10bmc_regmap_range,
+ .n_yes_ranges = ARRAY_SIZE(m10bmc_regmap_range),
+};
+
+static struct regmap_config intel_m10bmc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .wr_table = &m10bmc_access_table,
+ .rd_table = &m10bmc_access_table,
+ .max_register = M10BMC_N3000_MEM_END,
+};
+
+static int check_m10bmc_version(struct intel_m10bmc *ddata)
+{
+ unsigned int v;
+ int ret;
+
+ /*
+ * This check is to filter out the very old legacy BMC versions. In the
+ * old BMC chips, the BMC version info is stored in the old version
+ * register (M10BMC_N3000_LEGACY_BUILD_VER), so its read out value would have
+ * not been M10BMC_N3000_VER_LEGACY_INVALID (0xffffffff). But in new BMC
+ * chips that the driver supports, the value of this register should be
+ * M10BMC_N3000_VER_LEGACY_INVALID.
+ */
+ ret = m10bmc_raw_read(ddata, M10BMC_N3000_LEGACY_BUILD_VER, &v);
+ if (ret)
+ return -ENODEV;
+
+ if (v != M10BMC_N3000_VER_LEGACY_INVALID) {
+ dev_err(ddata->dev, "bad version M10BMC detected\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int intel_m10_bmc_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct intel_m10bmc_platform_info *info;
+ struct device *dev = &spi->dev;
+ struct intel_m10bmc *ddata;
+ int ret;
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ info = (struct intel_m10bmc_platform_info *)id->driver_data;
+ ddata->dev = dev;
+
+ ddata->regmap = devm_regmap_init_spi_avmm(spi, &intel_m10bmc_regmap_config);
+ if (IS_ERR(ddata->regmap)) {
+ ret = PTR_ERR(ddata->regmap);
+ dev_err(dev, "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+
+ spi_set_drvdata(spi, ddata);
+
+ ret = check_m10bmc_version(ddata);
+ if (ret) {
+ dev_err(dev, "Failed to identify m10bmc hardware\n");
+ return ret;
+ }
+
+ return m10bmc_dev_init(ddata, info);
+}
+
+static const struct m10bmc_csr_map m10bmc_n3000_csr_map = {
+ .base = M10BMC_N3000_SYS_BASE,
+ .build_version = M10BMC_N3000_BUILD_VER,
+ .fw_version = NIOS2_N3000_FW_VERSION,
+ .mac_low = M10BMC_N3000_MAC_LOW,
+ .mac_high = M10BMC_N3000_MAC_HIGH,
+ .doorbell = M10BMC_N3000_DOORBELL,
+ .auth_result = M10BMC_N3000_AUTH_RESULT,
+ .bmc_prog_addr = M10BMC_N3000_BMC_PROG_ADDR,
+ .bmc_reh_addr = M10BMC_N3000_BMC_REH_ADDR,
+ .bmc_magic = M10BMC_N3000_BMC_PROG_MAGIC,
+ .sr_prog_addr = M10BMC_N3000_SR_PROG_ADDR,
+ .sr_reh_addr = M10BMC_N3000_SR_REH_ADDR,
+ .sr_magic = M10BMC_N3000_SR_PROG_MAGIC,
+ .pr_prog_addr = M10BMC_N3000_PR_PROG_ADDR,
+ .pr_reh_addr = M10BMC_N3000_PR_REH_ADDR,
+ .pr_magic = M10BMC_N3000_PR_PROG_MAGIC,
+ .rsu_update_counter = M10BMC_N3000_STAGING_FLASH_COUNT,
+};
+
+static struct mfd_cell m10bmc_d5005_subdevs[] = {
+ { .name = "d5005bmc-hwmon" },
+ { .name = "d5005bmc-sec-update" },
+};
+
+static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
+ { .name = "n3000bmc-hwmon" },
+ { .name = "n3000bmc-retimer" },
+ { .name = "n3000bmc-sec-update" },
+};
+
+static struct mfd_cell m10bmc_n5010_subdevs[] = {
+ { .name = "n5010bmc-hwmon" },
+};
+
+static const struct intel_m10bmc_platform_info m10bmc_spi_n3000 = {
+ .cells = m10bmc_pacn3000_subdevs,
+ .n_cells = ARRAY_SIZE(m10bmc_pacn3000_subdevs),
+ .csr_map = &m10bmc_n3000_csr_map,
+};
+
+static const struct intel_m10bmc_platform_info m10bmc_spi_d5005 = {
+ .cells = m10bmc_d5005_subdevs,
+ .n_cells = ARRAY_SIZE(m10bmc_d5005_subdevs),
+ .csr_map = &m10bmc_n3000_csr_map,
+};
+
+static const struct intel_m10bmc_platform_info m10bmc_spi_n5010 = {
+ .cells = m10bmc_n5010_subdevs,
+ .n_cells = ARRAY_SIZE(m10bmc_n5010_subdevs),
+ .csr_map = &m10bmc_n3000_csr_map,
+};
+
+static const struct spi_device_id m10bmc_spi_id[] = {
+ { "m10-n3000", (kernel_ulong_t)&m10bmc_spi_n3000 },
+ { "m10-d5005", (kernel_ulong_t)&m10bmc_spi_d5005 },
+ { "m10-n5010", (kernel_ulong_t)&m10bmc_spi_n5010 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, m10bmc_spi_id);
+
+static struct spi_driver intel_m10bmc_spi_driver = {
+ .driver = {
+ .name = "intel-m10-bmc",
+ .dev_groups = m10bmc_dev_groups,
+ },
+ .probe = intel_m10_bmc_spi_probe,
+ .id_table = m10bmc_spi_id,
+};
+module_spi_driver(intel_m10bmc_spi_driver);
+
+MODULE_DESCRIPTION("Intel MAX 10 BMC SPI bus interface");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:intel-m10-bmc");
diff --git a/drivers/mfd/intel-m10-bmc.c b/drivers/mfd/intel-m10-bmc.c
deleted file mode 100644
index 7e3319e5b22f..000000000000
--- a/drivers/mfd/intel-m10-bmc.c
+++ /dev/null
@@ -1,238 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel MAX 10 Board Management Controller chip
- *
- * Copyright (C) 2018-2020 Intel Corporation. All rights reserved.
- */
-#include <linux/bitfield.h>
-#include <linux/init.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/intel-m10-bmc.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/regmap.h>
-#include <linux/spi/spi.h>
-
-enum m10bmc_type {
- M10_N3000,
- M10_D5005,
- M10_N5010,
-};
-
-static struct mfd_cell m10bmc_d5005_subdevs[] = {
- { .name = "d5005bmc-hwmon" },
- { .name = "d5005bmc-sec-update" }
-};
-
-static struct mfd_cell m10bmc_pacn3000_subdevs[] = {
- { .name = "n3000bmc-hwmon" },
- { .name = "n3000bmc-retimer" },
- { .name = "n3000bmc-sec-update" },
-};
-
-static struct mfd_cell m10bmc_n5010_subdevs[] = {
- { .name = "n5010bmc-hwmon" },
-};
-
-static const struct regmap_range m10bmc_regmap_range[] = {
- regmap_reg_range(M10BMC_LEGACY_BUILD_VER, M10BMC_LEGACY_BUILD_VER),
- regmap_reg_range(M10BMC_SYS_BASE, M10BMC_SYS_END),
- regmap_reg_range(M10BMC_FLASH_BASE, M10BMC_FLASH_END),
-};
-
-static const struct regmap_access_table m10bmc_access_table = {
- .yes_ranges = m10bmc_regmap_range,
- .n_yes_ranges = ARRAY_SIZE(m10bmc_regmap_range),
-};
-
-static struct regmap_config intel_m10bmc_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .wr_table = &m10bmc_access_table,
- .rd_table = &m10bmc_access_table,
- .max_register = M10BMC_MEM_END,
-};
-
-static ssize_t bmc_version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct intel_m10bmc *ddata = dev_get_drvdata(dev);
- unsigned int val;
- int ret;
-
- ret = m10bmc_sys_read(ddata, M10BMC_BUILD_VER, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "0x%x\n", val);
-}
-static DEVICE_ATTR_RO(bmc_version);
-
-static ssize_t bmcfw_version_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct intel_m10bmc *ddata = dev_get_drvdata(dev);
- unsigned int val;
- int ret;
-
- ret = m10bmc_sys_read(ddata, NIOS2_FW_VERSION, &val);
- if (ret)
- return ret;
-
- return sprintf(buf, "0x%x\n", val);
-}
-static DEVICE_ATTR_RO(bmcfw_version);
-
-static ssize_t mac_address_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct intel_m10bmc *max10 = dev_get_drvdata(dev);
- unsigned int macaddr_low, macaddr_high;
- int ret;
-
- ret = m10bmc_sys_read(max10, M10BMC_MAC_LOW, &macaddr_low);
- if (ret)
- return ret;
-
- ret = m10bmc_sys_read(max10, M10BMC_MAC_HIGH, &macaddr_high);
- if (ret)
- return ret;
-
- return sysfs_emit(buf, "%02x:%02x:%02x:%02x:%02x:%02x\n",
- (u8)FIELD_GET(M10BMC_MAC_BYTE1, macaddr_low),
- (u8)FIELD_GET(M10BMC_MAC_BYTE2, macaddr_low),
- (u8)FIELD_GET(M10BMC_MAC_BYTE3, macaddr_low),
- (u8)FIELD_GET(M10BMC_MAC_BYTE4, macaddr_low),
- (u8)FIELD_GET(M10BMC_MAC_BYTE5, macaddr_high),
- (u8)FIELD_GET(M10BMC_MAC_BYTE6, macaddr_high));
-}
-static DEVICE_ATTR_RO(mac_address);
-
-static ssize_t mac_count_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct intel_m10bmc *max10 = dev_get_drvdata(dev);
- unsigned int macaddr_high;
- int ret;
-
- ret = m10bmc_sys_read(max10, M10BMC_MAC_HIGH, &macaddr_high);
- if (ret)
- return ret;
-
- return sysfs_emit(buf, "%u\n",
- (u8)FIELD_GET(M10BMC_MAC_COUNT, macaddr_high));
-}
-static DEVICE_ATTR_RO(mac_count);
-
-static struct attribute *m10bmc_attrs[] = {
- &dev_attr_bmc_version.attr,
- &dev_attr_bmcfw_version.attr,
- &dev_attr_mac_address.attr,
- &dev_attr_mac_count.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(m10bmc);
-
-static int check_m10bmc_version(struct intel_m10bmc *ddata)
-{
- unsigned int v;
- int ret;
-
- /*
- * This check is to filter out the very old legacy BMC versions. In the
- * old BMC chips, the BMC version info is stored in the old version
- * register (M10BMC_LEGACY_BUILD_VER), so its read out value would have
- * not been M10BMC_VER_LEGACY_INVALID (0xffffffff). But in new BMC
- * chips that the driver supports, the value of this register should be
- * M10BMC_VER_LEGACY_INVALID.
- */
- ret = m10bmc_raw_read(ddata, M10BMC_LEGACY_BUILD_VER, &v);
- if (ret)
- return -ENODEV;
-
- if (v != M10BMC_VER_LEGACY_INVALID) {
- dev_err(ddata->dev, "bad version M10BMC detected\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int intel_m10_bmc_spi_probe(struct spi_device *spi)
-{
- const struct spi_device_id *id = spi_get_device_id(spi);
- struct device *dev = &spi->dev;
- struct mfd_cell *cells;
- struct intel_m10bmc *ddata;
- int ret, n_cell;
-
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- ddata->dev = dev;
-
- ddata->regmap =
- devm_regmap_init_spi_avmm(spi, &intel_m10bmc_regmap_config);
- if (IS_ERR(ddata->regmap)) {
- ret = PTR_ERR(ddata->regmap);
- dev_err(dev, "Failed to allocate regmap: %d\n", ret);
- return ret;
- }
-
- spi_set_drvdata(spi, ddata);
-
- ret = check_m10bmc_version(ddata);
- if (ret) {
- dev_err(dev, "Failed to identify m10bmc hardware\n");
- return ret;
- }
-
- switch (id->driver_data) {
- case M10_N3000:
- cells = m10bmc_pacn3000_subdevs;
- n_cell = ARRAY_SIZE(m10bmc_pacn3000_subdevs);
- break;
- case M10_D5005:
- cells = m10bmc_d5005_subdevs;
- n_cell = ARRAY_SIZE(m10bmc_d5005_subdevs);
- break;
- case M10_N5010:
- cells = m10bmc_n5010_subdevs;
- n_cell = ARRAY_SIZE(m10bmc_n5010_subdevs);
- break;
- default:
- return -ENODEV;
- }
-
- ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cells, n_cell,
- NULL, 0, NULL);
- if (ret)
- dev_err(dev, "Failed to register sub-devices: %d\n", ret);
-
- return ret;
-}
-
-static const struct spi_device_id m10bmc_spi_id[] = {
- { "m10-n3000", M10_N3000 },
- { "m10-d5005", M10_D5005 },
- { "m10-n5010", M10_N5010 },
- { }
-};
-MODULE_DEVICE_TABLE(spi, m10bmc_spi_id);
-
-static struct spi_driver intel_m10bmc_spi_driver = {
- .driver = {
- .name = "intel-m10-bmc",
- .dev_groups = m10bmc_groups,
- },
- .probe = intel_m10_bmc_spi_probe,
- .id_table = m10bmc_spi_id,
-};
-module_spi_driver(intel_m10bmc_spi_driver);
-
-MODULE_DESCRIPTION("Intel MAX 10 BMC Device Driver");
-MODULE_AUTHOR("Intel Corporation");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:intel-m10-bmc");
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index 9216f0d34206..d53dae255490 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -165,6 +165,14 @@ static const struct dmi_system_id cht_wc_model_dmi_ids[] = {
/* Non exact match to match all versions */
DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9"),
},
+ }, {
+ /* Lenovo Yoga Tab 3 Pro YT3-X90F */
+ .driver_data = (void *)(long)INTEL_CHT_WC_LENOVO_YT3_X90,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
+ },
},
{ }
};
diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c
index bb26241c73bd..33c6cfe9fe42 100644
--- a/drivers/mfd/kempld-core.c
+++ b/drivers/mfd/kempld-core.c
@@ -349,7 +349,7 @@ static ssize_t pld_version_show(struct device *dev,
{
struct kempld_device_data *pld = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", pld->info.version);
+ return sysfs_emit(buf, "%s\n", pld->info.version);
}
static ssize_t pld_specification_show(struct device *dev,
@@ -357,8 +357,7 @@ static ssize_t pld_specification_show(struct device *dev,
{
struct kempld_device_data *pld = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d.%d\n", pld->info.spec_major,
- pld->info.spec_minor);
+ return sysfs_emit(buf, "%d.%d\n", pld->info.spec_major, pld->info.spec_minor);
}
static ssize_t pld_type_show(struct device *dev,
@@ -366,7 +365,7 @@ static ssize_t pld_type_show(struct device *dev,
{
struct kempld_device_data *pld = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%s\n", kempld_get_type_string(pld));
+ return sysfs_emit(buf, "%s\n", kempld_get_type_string(pld));
}
static DEVICE_ATTR_RO(pld_version);
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
index 74a553329416..946f94f3a3c3 100644
--- a/drivers/mfd/lm3533-core.c
+++ b/drivers/mfd/lm3533-core.c
@@ -286,7 +286,7 @@ static ssize_t show_output(struct device *dev,
val = (val & mask) >> shift;
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t store_output(struct device *dev,
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index eb3f061c8ee6..0246bbe80354 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -469,12 +469,6 @@ static struct max8925_irq_data max8925_irqs[] = {
},
};
-static inline struct max8925_irq_data *irq_to_max8925(struct max8925_chip *chip,
- int irq)
-{
- return &max8925_irqs[irq - chip->irq_base];
-}
-
static irqreturn_t max8925_irq(int irq, void *data)
{
struct max8925_chip *chip = data;
diff --git a/drivers/mfd/ntxec.c b/drivers/mfd/ntxec.c
index e16a7a82a929..b02785b10d48 100644
--- a/drivers/mfd/ntxec.c
+++ b/drivers/mfd/ntxec.c
@@ -175,6 +175,7 @@ static int ntxec_probe(struct i2c_client *client)
/* Bail out if we encounter an unknown firmware version */
switch (version) {
case NTXEC_VERSION_KOBO_AURA:
+ case NTXEC_VERSION_TOLINO_VISION:
subdevs = ntxec_subdev;
n_subdevs = ARRAY_SIZE(ntxec_subdev);
break;
diff --git a/drivers/mfd/ocelot-core.c b/drivers/mfd/ocelot-core.c
index 1816d52c65c5..b0ff05c1759f 100644
--- a/drivers/mfd/ocelot-core.c
+++ b/drivers/mfd/ocelot-core.c
@@ -34,16 +34,49 @@
#define VSC7512_MIIM0_RES_START 0x7107009c
#define VSC7512_MIIM1_RES_START 0x710700c0
-#define VSC7512_MIIM_RES_SIZE 0x024
+#define VSC7512_MIIM_RES_SIZE 0x00000024
#define VSC7512_PHY_RES_START 0x710700f0
-#define VSC7512_PHY_RES_SIZE 0x004
+#define VSC7512_PHY_RES_SIZE 0x00000004
#define VSC7512_GPIO_RES_START 0x71070034
-#define VSC7512_GPIO_RES_SIZE 0x06c
+#define VSC7512_GPIO_RES_SIZE 0x0000006c
#define VSC7512_SIO_CTRL_RES_START 0x710700f8
-#define VSC7512_SIO_CTRL_RES_SIZE 0x100
+#define VSC7512_SIO_CTRL_RES_SIZE 0x00000100
+
+#define VSC7512_ANA_RES_START 0x71880000
+#define VSC7512_ANA_RES_SIZE 0x00010000
+
+#define VSC7512_QS_RES_START 0x71080000
+#define VSC7512_QS_RES_SIZE 0x00000100
+
+#define VSC7512_QSYS_RES_START 0x71800000
+#define VSC7512_QSYS_RES_SIZE 0x00200000
+
+#define VSC7512_REW_RES_START 0x71030000
+#define VSC7512_REW_RES_SIZE 0x00010000
+
+#define VSC7512_SYS_RES_START 0x71010000
+#define VSC7512_SYS_RES_SIZE 0x00010000
+
+#define VSC7512_S0_RES_START 0x71040000
+#define VSC7512_S1_RES_START 0x71050000
+#define VSC7512_S2_RES_START 0x71060000
+#define VCAP_RES_SIZE 0x00000400
+
+#define VSC7512_PORT_0_RES_START 0x711e0000
+#define VSC7512_PORT_1_RES_START 0x711f0000
+#define VSC7512_PORT_2_RES_START 0x71200000
+#define VSC7512_PORT_3_RES_START 0x71210000
+#define VSC7512_PORT_4_RES_START 0x71220000
+#define VSC7512_PORT_5_RES_START 0x71230000
+#define VSC7512_PORT_6_RES_START 0x71240000
+#define VSC7512_PORT_7_RES_START 0x71250000
+#define VSC7512_PORT_8_RES_START 0x71260000
+#define VSC7512_PORT_9_RES_START 0x71270000
+#define VSC7512_PORT_10_RES_START 0x71280000
+#define VSC7512_PORT_RES_SIZE 0x00010000
#define VSC7512_GCB_RST_SLEEP_US 100
#define VSC7512_GCB_RST_TIMEOUT_US 100000
@@ -96,6 +129,28 @@ static const struct resource vsc7512_sgpio_resources[] = {
DEFINE_RES_REG_NAMED(VSC7512_SIO_CTRL_RES_START, VSC7512_SIO_CTRL_RES_SIZE, "gcb_sio"),
};
+static const struct resource vsc7512_switch_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_ANA_RES_START, VSC7512_ANA_RES_SIZE, "ana"),
+ DEFINE_RES_REG_NAMED(VSC7512_QS_RES_START, VSC7512_QS_RES_SIZE, "qs"),
+ DEFINE_RES_REG_NAMED(VSC7512_QSYS_RES_START, VSC7512_QSYS_RES_SIZE, "qsys"),
+ DEFINE_RES_REG_NAMED(VSC7512_REW_RES_START, VSC7512_REW_RES_SIZE, "rew"),
+ DEFINE_RES_REG_NAMED(VSC7512_SYS_RES_START, VSC7512_SYS_RES_SIZE, "sys"),
+ DEFINE_RES_REG_NAMED(VSC7512_S0_RES_START, VCAP_RES_SIZE, "s0"),
+ DEFINE_RES_REG_NAMED(VSC7512_S1_RES_START, VCAP_RES_SIZE, "s1"),
+ DEFINE_RES_REG_NAMED(VSC7512_S2_RES_START, VCAP_RES_SIZE, "s2"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_0_RES_START, VSC7512_PORT_RES_SIZE, "port0"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_1_RES_START, VSC7512_PORT_RES_SIZE, "port1"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_2_RES_START, VSC7512_PORT_RES_SIZE, "port2"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_3_RES_START, VSC7512_PORT_RES_SIZE, "port3"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_4_RES_START, VSC7512_PORT_RES_SIZE, "port4"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_5_RES_START, VSC7512_PORT_RES_SIZE, "port5"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_6_RES_START, VSC7512_PORT_RES_SIZE, "port6"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_7_RES_START, VSC7512_PORT_RES_SIZE, "port7"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_8_RES_START, VSC7512_PORT_RES_SIZE, "port8"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_9_RES_START, VSC7512_PORT_RES_SIZE, "port9"),
+ DEFINE_RES_REG_NAMED(VSC7512_PORT_10_RES_START, VSC7512_PORT_RES_SIZE, "port10")
+};
+
static const struct mfd_cell vsc7512_devs[] = {
{
.name = "ocelot-pinctrl",
@@ -121,6 +176,11 @@ static const struct mfd_cell vsc7512_devs[] = {
.use_of_reg = true,
.num_resources = ARRAY_SIZE(vsc7512_miim1_resources),
.resources = vsc7512_miim1_resources,
+ }, {
+ .name = "ocelot-switch",
+ .of_compatible = "mscc,vsc7512-switch",
+ .num_resources = ARRAY_SIZE(vsc7512_switch_resources),
+ .resources = vsc7512_switch_resources,
},
};
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index 5cd653e61512..191b1bc6141c 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -136,6 +136,7 @@ int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
void *callback_param)
{
struct pcf50633_adc_request *req;
+ int ret;
/* req is freed when the result is ready, in interrupt handler */
req = kmalloc(sizeof(*req), GFP_KERNEL);
@@ -147,7 +148,11 @@ int pcf50633_adc_async_read(struct pcf50633 *pcf, int mux, int avg,
req->callback = callback;
req->callback_param = callback_param;
- return adc_enqueue_request(pcf, req);
+ ret = adc_enqueue_request(pcf, req);
+ if (ret)
+ kfree(req);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(pcf50633_adc_async_read);
diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
index 601106580e2e..9a948df8c28d 100644
--- a/drivers/mfd/qcom-pm8xxx.c
+++ b/drivers/mfd/qcom-pm8xxx.c
@@ -510,7 +510,6 @@ static int pm8xxx_probe(struct platform_device *pdev)
struct regmap *regmap;
int irq, rc;
unsigned int val;
- u32 rev;
struct pm_irq_chip *chip;
data = of_device_get_match_data(&pdev->dev);
@@ -535,7 +534,6 @@ static int pm8xxx_probe(struct platform_device *pdev)
return rc;
}
pr_info("PMIC revision 1: %02X\n", val);
- rev = val;
/* Read PMIC chip revision 2 */
rc = regmap_read(regmap, REG_HWREV_2, &val);
@@ -545,7 +543,6 @@ static int pm8xxx_probe(struct platform_device *pdev)
return rc;
}
pr_info("PMIC revision 2: %02X\n", val);
- rev |= val << BITS_PER_BYTE;
chip = devm_kzalloc(&pdev->dev,
struct_size(chip, config, data->num_irqs),
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index f44fc3f080a8..0f22ef61e817 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -189,6 +189,7 @@ static const struct mfd_cell rk817s[] = {
};
static const struct mfd_cell rk818s[] = {
+ { .name = "rk808-clkout", .id = PLATFORM_DEVID_NONE, },
{ .name = "rk808-regulator", .id = PLATFORM_DEVID_NONE, },
{
.name = "rk808-rtc",
diff --git a/drivers/mfd/simple-mfd-i2c.c b/drivers/mfd/simple-mfd-i2c.c
index f4c8fc3ee463..e31f13fd6a79 100644
--- a/drivers/mfd/simple-mfd-i2c.c
+++ b/drivers/mfd/simple-mfd-i2c.c
@@ -48,7 +48,7 @@ static int simple_mfd_i2c_probe(struct i2c_client *i2c)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- /* If no MFD cells are spedified, use register the DT child nodes instead */
+ /* If no MFD cells are specified, register using the DT child nodes instead */
if (!simple_mfd_data || !simple_mfd_data->mfd_cell)
return devm_of_platform_populate(&i2c->dev);
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index bdb2ce7ff03b..57b29c325131 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -20,6 +20,7 @@
#include <linux/platform_data/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <linux/mfd/syscon.h>
#include <linux/slab.h>
@@ -31,6 +32,7 @@ static LIST_HEAD(syscon_list);
struct syscon {
struct device_node *np;
struct regmap *regmap;
+ struct reset_control *reset;
struct list_head list;
};
@@ -40,7 +42,7 @@ static const struct regmap_config syscon_regmap_config = {
.reg_stride = 4,
};
-static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
+static struct syscon *of_syscon_register(struct device_node *np, bool check_res)
{
struct clk *clk;
struct syscon *syscon;
@@ -50,6 +52,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
int ret;
struct regmap_config syscon_config = syscon_regmap_config;
struct resource res;
+ struct reset_control *reset;
syscon = kzalloc(sizeof(*syscon), GFP_KERNEL);
if (!syscon)
@@ -114,7 +117,7 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
goto err_regmap;
}
- if (check_clk) {
+ if (check_res) {
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
@@ -124,8 +127,18 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
} else {
ret = regmap_mmio_attach_clk(regmap, clk);
if (ret)
- goto err_attach;
+ goto err_attach_clk;
}
+
+ reset = of_reset_control_get_optional_exclusive(np, NULL);
+ if (IS_ERR(reset)) {
+ ret = PTR_ERR(reset);
+ goto err_attach_clk;
+ }
+
+ ret = reset_control_deassert(reset);
+ if (ret)
+ goto err_reset;
}
syscon->regmap = regmap;
@@ -137,7 +150,9 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk)
return syscon;
-err_attach:
+err_reset:
+ reset_control_put(reset);
+err_attach_clk:
if (!IS_ERR(clk))
clk_put(clk);
err_clk:
@@ -150,7 +165,7 @@ err_map:
}
static struct regmap *device_node_get_regmap(struct device_node *np,
- bool check_clk)
+ bool check_res)
{
struct syscon *entry, *syscon = NULL;
@@ -165,7 +180,7 @@ static struct regmap *device_node_get_regmap(struct device_node *np,
spin_unlock(&syscon_list_slock);
if (!syscon)
- syscon = of_syscon_register(np, check_clk);
+ syscon = of_syscon_register(np, check_res);
if (IS_ERR(syscon))
return ERR_CAST(syscon);
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
deleted file mode 100644
index 1d9d1d38d068..000000000000
--- a/drivers/mfd/t7l66xb.c
+++ /dev/null
@@ -1,427 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- *
- * Toshiba T7L66XB core mfd support
- *
- * Copyright (c) 2005, 2007, 2008 Ian Molton
- * Copyright (c) 2008 Dmitry Baryshkov
- *
- * T7L66 features:
- *
- * Supported in this driver:
- * SD/MMC
- * SM/NAND flash controller
- *
- * As yet not supported
- * GPIO interface (on NAND pins)
- * Serial interface
- * TFT 'interface converter'
- * PCMCIA interface logic
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mfd/t7l66xb.h>
-
-enum {
- T7L66XB_CELL_NAND,
- T7L66XB_CELL_MMC,
-};
-
-static const struct resource t7l66xb_mmc_resources[] = {
- DEFINE_RES_MEM(0x800, 0x200),
- DEFINE_RES_IRQ(IRQ_T7L66XB_MMC)
-};
-
-#define SCR_REVID 0x08 /* b Revision ID */
-#define SCR_IMR 0x42 /* b Interrupt Mask */
-#define SCR_DEV_CTL 0xe0 /* b Device control */
-#define SCR_ISR 0xe1 /* b Interrupt Status */
-#define SCR_GPO_OC 0xf0 /* b GPO output control */
-#define SCR_GPO_OS 0xf1 /* b GPO output enable */
-#define SCR_GPI_S 0xf2 /* w GPI status */
-#define SCR_APDC 0xf8 /* b Active pullup down ctrl */
-
-#define SCR_DEV_CTL_USB BIT(0) /* USB enable */
-#define SCR_DEV_CTL_MMC BIT(1) /* MMC enable */
-
-/*--------------------------------------------------------------------------*/
-
-struct t7l66xb {
- void __iomem *scr;
- /* Lock to protect registers requiring read/modify/write ops. */
- raw_spinlock_t lock;
-
- struct resource rscr;
- struct clk *clk48m;
- struct clk *clk32k;
- int irq;
- int irq_base;
-};
-
-/*--------------------------------------------------------------------------*/
-
-static int t7l66xb_mmc_enable(struct platform_device *mmc)
-{
- struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
- unsigned long flags;
- u8 dev_ctl;
- int ret;
-
- ret = clk_prepare_enable(t7l66xb->clk32k);
- if (ret)
- return ret;
-
- raw_spin_lock_irqsave(&t7l66xb->lock, flags);
-
- dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL);
- dev_ctl |= SCR_DEV_CTL_MMC;
- tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL);
-
- raw_spin_unlock_irqrestore(&t7l66xb->lock, flags);
-
- tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0,
- t7l66xb_mmc_resources[0].start & 0xfffe);
-
- return 0;
-}
-
-static int t7l66xb_mmc_disable(struct platform_device *mmc)
-{
- struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
- unsigned long flags;
- u8 dev_ctl;
-
- raw_spin_lock_irqsave(&t7l66xb->lock, flags);
-
- dev_ctl = tmio_ioread8(t7l66xb->scr + SCR_DEV_CTL);
- dev_ctl &= ~SCR_DEV_CTL_MMC;
- tmio_iowrite8(dev_ctl, t7l66xb->scr + SCR_DEV_CTL);
-
- raw_spin_unlock_irqrestore(&t7l66xb->lock, flags);
-
- clk_disable_unprepare(t7l66xb->clk32k);
-
- return 0;
-}
-
-static void t7l66xb_mmc_pwr(struct platform_device *mmc, int state)
-{
- struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
-
- tmio_core_mmc_pwr(t7l66xb->scr + 0x200, 0, state);
-}
-
-static void t7l66xb_mmc_clk_div(struct platform_device *mmc, int state)
-{
- struct t7l66xb *t7l66xb = dev_get_drvdata(mmc->dev.parent);
-
- tmio_core_mmc_clk_div(t7l66xb->scr + 0x200, 0, state);
-}
-
-/*--------------------------------------------------------------------------*/
-
-static struct tmio_mmc_data t7166xb_mmc_data = {
- .hclk = 24000000,
- .set_pwr = t7l66xb_mmc_pwr,
- .set_clk_div = t7l66xb_mmc_clk_div,
-};
-
-static const struct resource t7l66xb_nand_resources[] = {
- {
- .start = 0xc00,
- .end = 0xc07,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x0100,
- .end = 0x01ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_T7L66XB_NAND,
- .end = IRQ_T7L66XB_NAND,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct mfd_cell t7l66xb_cells[] = {
- [T7L66XB_CELL_MMC] = {
- .name = "tmio-mmc",
- .enable = t7l66xb_mmc_enable,
- .disable = t7l66xb_mmc_disable,
- .platform_data = &t7166xb_mmc_data,
- .pdata_size = sizeof(t7166xb_mmc_data),
- .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources),
- .resources = t7l66xb_mmc_resources,
- },
- [T7L66XB_CELL_NAND] = {
- .name = "tmio-nand",
- .num_resources = ARRAY_SIZE(t7l66xb_nand_resources),
- .resources = t7l66xb_nand_resources,
- },
-};
-
-/*--------------------------------------------------------------------------*/
-
-/* Handle the T7L66XB interrupt mux */
-static void t7l66xb_irq(struct irq_desc *desc)
-{
- struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc);
- unsigned int isr;
- unsigned int i, irq_base;
-
- irq_base = t7l66xb->irq_base;
-
- while ((isr = tmio_ioread8(t7l66xb->scr + SCR_ISR) &
- ~tmio_ioread8(t7l66xb->scr + SCR_IMR)))
- for (i = 0; i < T7L66XB_NR_IRQS; i++)
- if (isr & (1 << i))
- generic_handle_irq(irq_base + i);
-}
-
-static void t7l66xb_irq_mask(struct irq_data *data)
-{
- struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
- unsigned long flags;
- u8 imr;
-
- raw_spin_lock_irqsave(&t7l66xb->lock, flags);
- imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
- imr |= 1 << (data->irq - t7l66xb->irq_base);
- tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
- raw_spin_unlock_irqrestore(&t7l66xb->lock, flags);
-}
-
-static void t7l66xb_irq_unmask(struct irq_data *data)
-{
- struct t7l66xb *t7l66xb = irq_data_get_irq_chip_data(data);
- unsigned long flags;
- u8 imr;
-
- raw_spin_lock_irqsave(&t7l66xb->lock, flags);
- imr = tmio_ioread8(t7l66xb->scr + SCR_IMR);
- imr &= ~(1 << (data->irq - t7l66xb->irq_base));
- tmio_iowrite8(imr, t7l66xb->scr + SCR_IMR);
- raw_spin_unlock_irqrestore(&t7l66xb->lock, flags);
-}
-
-static struct irq_chip t7l66xb_chip = {
- .name = "t7l66xb",
- .irq_ack = t7l66xb_irq_mask,
- .irq_mask = t7l66xb_irq_mask,
- .irq_unmask = t7l66xb_irq_unmask,
-};
-
-/*--------------------------------------------------------------------------*/
-
-/* Install the IRQ handler */
-static void t7l66xb_attach_irq(struct platform_device *dev)
-{
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- unsigned int irq, irq_base;
-
- irq_base = t7l66xb->irq_base;
-
- for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
- irq_set_chip_and_handler(irq, &t7l66xb_chip, handle_level_irq);
- irq_set_chip_data(irq, t7l66xb);
- }
-
- irq_set_irq_type(t7l66xb->irq, IRQ_TYPE_EDGE_FALLING);
- irq_set_chained_handler_and_data(t7l66xb->irq, t7l66xb_irq, t7l66xb);
-}
-
-static void t7l66xb_detach_irq(struct platform_device *dev)
-{
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- unsigned int irq, irq_base;
-
- irq_base = t7l66xb->irq_base;
-
- irq_set_chained_handler_and_data(t7l66xb->irq, NULL, NULL);
-
- for (irq = irq_base; irq < irq_base + T7L66XB_NR_IRQS; irq++) {
- irq_set_chip(irq, NULL);
- irq_set_chip_data(irq, NULL);
- }
-}
-
-/*--------------------------------------------------------------------------*/
-
-static int t7l66xb_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
-
- if (pdata && pdata->suspend)
- pdata->suspend(dev);
- clk_disable_unprepare(t7l66xb->clk48m);
-
- return 0;
-}
-
-static int t7l66xb_resume(struct platform_device *dev)
-{
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
- struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
- int ret;
-
- ret = clk_prepare_enable(t7l66xb->clk48m);
- if (ret)
- return ret;
-
- if (pdata && pdata->resume)
- pdata->resume(dev);
-
- tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0,
- t7l66xb_mmc_resources[0].start & 0xfffe);
-
- return 0;
-}
-
-/*--------------------------------------------------------------------------*/
-
-static int t7l66xb_probe(struct platform_device *dev)
-{
- struct t7l66xb_platform_data *pdata = dev_get_platdata(&dev->dev);
- struct t7l66xb *t7l66xb;
- struct resource *iomem, *rscr;
- int ret;
-
- if (!pdata)
- return -EINVAL;
-
- iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!iomem)
- return -EINVAL;
-
- t7l66xb = kzalloc(sizeof *t7l66xb, GFP_KERNEL);
- if (!t7l66xb)
- return -ENOMEM;
-
- raw_spin_lock_init(&t7l66xb->lock);
-
- platform_set_drvdata(dev, t7l66xb);
-
- ret = platform_get_irq(dev, 0);
- if (ret >= 0)
- t7l66xb->irq = ret;
- else
- goto err_noirq;
-
- t7l66xb->irq_base = pdata->irq_base;
-
- t7l66xb->clk32k = clk_get(&dev->dev, "CLK_CK32K");
- if (IS_ERR(t7l66xb->clk32k)) {
- ret = PTR_ERR(t7l66xb->clk32k);
- goto err_clk32k_get;
- }
-
- t7l66xb->clk48m = clk_get(&dev->dev, "CLK_CK48M");
- if (IS_ERR(t7l66xb->clk48m)) {
- ret = PTR_ERR(t7l66xb->clk48m);
- goto err_clk48m_get;
- }
-
- rscr = &t7l66xb->rscr;
- rscr->name = "t7l66xb-core";
- rscr->start = iomem->start;
- rscr->end = iomem->start + 0xff;
- rscr->flags = IORESOURCE_MEM;
-
- ret = request_resource(iomem, rscr);
- if (ret)
- goto err_request_scr;
-
- t7l66xb->scr = ioremap(rscr->start, resource_size(rscr));
- if (!t7l66xb->scr) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
-
- ret = clk_prepare_enable(t7l66xb->clk48m);
- if (ret)
- goto err_clk_enable;
-
- if (pdata->enable)
- pdata->enable(dev);
-
- /* Mask all interrupts */
- tmio_iowrite8(0xbf, t7l66xb->scr + SCR_IMR);
-
- printk(KERN_INFO "%s rev %d @ 0x%08lx, irq %d\n",
- dev->name, tmio_ioread8(t7l66xb->scr + SCR_REVID),
- (unsigned long)iomem->start, t7l66xb->irq);
-
- t7l66xb_attach_irq(dev);
-
- t7l66xb_cells[T7L66XB_CELL_NAND].platform_data = pdata->nand_data;
- t7l66xb_cells[T7L66XB_CELL_NAND].pdata_size = sizeof(*pdata->nand_data);
-
- ret = mfd_add_devices(&dev->dev, dev->id,
- t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells),
- iomem, t7l66xb->irq_base, NULL);
-
- if (!ret)
- return 0;
-
- t7l66xb_detach_irq(dev);
- clk_disable_unprepare(t7l66xb->clk48m);
-err_clk_enable:
- iounmap(t7l66xb->scr);
-err_ioremap:
- release_resource(&t7l66xb->rscr);
-err_request_scr:
- clk_put(t7l66xb->clk48m);
-err_clk48m_get:
- clk_put(t7l66xb->clk32k);
-err_clk32k_get:
-err_noirq:
- kfree(t7l66xb);
- return ret;
-}
-
-static int t7l66xb_remove(struct platform_device *dev)
-{
- struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
-
- clk_disable_unprepare(t7l66xb->clk48m);
- clk_put(t7l66xb->clk48m);
- clk_disable_unprepare(t7l66xb->clk32k);
- clk_put(t7l66xb->clk32k);
- t7l66xb_detach_irq(dev);
- iounmap(t7l66xb->scr);
- release_resource(&t7l66xb->rscr);
- mfd_remove_devices(&dev->dev);
- kfree(t7l66xb);
-
- return 0;
-}
-
-static struct platform_driver t7l66xb_platform_driver = {
- .driver = {
- .name = "t7l66xb",
- },
- .suspend = pm_sleep_ptr(t7l66xb_suspend),
- .resume = pm_sleep_ptr(t7l66xb_resume),
- .probe = t7l66xb_probe,
- .remove = t7l66xb_remove,
-};
-
-/*--------------------------------------------------------------------------*/
-
-module_platform_driver(t7l66xb_platform_driver);
-
-MODULE_DESCRIPTION("Toshiba T7L66XB core driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Ian Molton");
-MODULE_ALIAS("platform:t7l66xb");
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
deleted file mode 100644
index 5392da6ba7b0..000000000000
--- a/drivers/mfd/tc6387xb.c
+++ /dev/null
@@ -1,228 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Toshiba TC6387XB support
- * Copyright (c) 2005 Ian Molton
- *
- * This file contains TC6387XB base support.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mfd/tc6387xb.h>
-#include <linux/slab.h>
-
-enum {
- TC6387XB_CELL_MMC,
-};
-
-struct tc6387xb {
- void __iomem *scr;
- struct clk *clk32k;
- struct resource rscr;
-};
-
-static const struct resource tc6387xb_mmc_resources[] = {
- {
- .start = 0x800,
- .end = 0x9ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-/*--------------------------------------------------------------------------*/
-
-static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
- struct tc6387xb_platform_data *pdata = dev_get_platdata(&dev->dev);
-
- if (pdata && pdata->suspend)
- pdata->suspend(dev);
- clk_disable_unprepare(tc6387xb->clk32k);
-
- return 0;
-}
-
-static int tc6387xb_resume(struct platform_device *dev)
-{
- struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
- struct tc6387xb_platform_data *pdata = dev_get_platdata(&dev->dev);
-
- clk_prepare_enable(tc6387xb->clk32k);
- if (pdata && pdata->resume)
- pdata->resume(dev);
-
- tmio_core_mmc_resume(tc6387xb->scr + 0x200, 0,
- tc6387xb_mmc_resources[0].start & 0xfffe);
-
- return 0;
-}
-
-/*--------------------------------------------------------------------------*/
-
-static void tc6387xb_mmc_pwr(struct platform_device *mmc, int state)
-{
- struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
-
- tmio_core_mmc_pwr(tc6387xb->scr + 0x200, 0, state);
-}
-
-static void tc6387xb_mmc_clk_div(struct platform_device *mmc, int state)
-{
- struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
-
- tmio_core_mmc_clk_div(tc6387xb->scr + 0x200, 0, state);
-}
-
-
-static int tc6387xb_mmc_enable(struct platform_device *mmc)
-{
- struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
-
- clk_prepare_enable(tc6387xb->clk32k);
-
- tmio_core_mmc_enable(tc6387xb->scr + 0x200, 0,
- tc6387xb_mmc_resources[0].start & 0xfffe);
-
- return 0;
-}
-
-static int tc6387xb_mmc_disable(struct platform_device *mmc)
-{
- struct tc6387xb *tc6387xb = dev_get_drvdata(mmc->dev.parent);
-
- clk_disable_unprepare(tc6387xb->clk32k);
-
- return 0;
-}
-
-static struct tmio_mmc_data tc6387xb_mmc_data = {
- .hclk = 24000000,
- .set_pwr = tc6387xb_mmc_pwr,
- .set_clk_div = tc6387xb_mmc_clk_div,
-};
-
-/*--------------------------------------------------------------------------*/
-
-static const struct mfd_cell tc6387xb_cells[] = {
- [TC6387XB_CELL_MMC] = {
- .name = "tmio-mmc",
- .enable = tc6387xb_mmc_enable,
- .disable = tc6387xb_mmc_disable,
- .platform_data = &tc6387xb_mmc_data,
- .pdata_size = sizeof(tc6387xb_mmc_data),
- .num_resources = ARRAY_SIZE(tc6387xb_mmc_resources),
- .resources = tc6387xb_mmc_resources,
- },
-};
-
-static int tc6387xb_probe(struct platform_device *dev)
-{
- struct tc6387xb_platform_data *pdata = dev_get_platdata(&dev->dev);
- struct resource *iomem, *rscr;
- struct clk *clk32k;
- struct tc6387xb *tc6387xb;
- int irq, ret;
-
- iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!iomem)
- return -EINVAL;
-
- tc6387xb = kzalloc(sizeof(*tc6387xb), GFP_KERNEL);
- if (!tc6387xb)
- return -ENOMEM;
-
- ret = platform_get_irq(dev, 0);
- if (ret >= 0)
- irq = ret;
- else
- goto err_no_irq;
-
- clk32k = clk_get(&dev->dev, "CLK_CK32K");
- if (IS_ERR(clk32k)) {
- ret = PTR_ERR(clk32k);
- goto err_no_clk;
- }
-
- rscr = &tc6387xb->rscr;
- rscr->name = "tc6387xb-core";
- rscr->start = iomem->start;
- rscr->end = iomem->start + 0xff;
- rscr->flags = IORESOURCE_MEM;
-
- ret = request_resource(iomem, rscr);
- if (ret)
- goto err_resource;
-
- tc6387xb->scr = ioremap(rscr->start, resource_size(rscr));
- if (!tc6387xb->scr) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
-
- tc6387xb->clk32k = clk32k;
- platform_set_drvdata(dev, tc6387xb);
-
- if (pdata && pdata->enable)
- pdata->enable(dev);
-
- dev_info(&dev->dev, "Toshiba tc6387xb initialised\n");
-
- ret = mfd_add_devices(&dev->dev, dev->id, tc6387xb_cells,
- ARRAY_SIZE(tc6387xb_cells), iomem, irq, NULL);
-
- if (!ret)
- return 0;
-
- iounmap(tc6387xb->scr);
-err_ioremap:
- release_resource(&tc6387xb->rscr);
-err_resource:
- clk_put(clk32k);
-err_no_clk:
-err_no_irq:
- kfree(tc6387xb);
- return ret;
-}
-
-static int tc6387xb_remove(struct platform_device *dev)
-{
- struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
-
- mfd_remove_devices(&dev->dev);
- iounmap(tc6387xb->scr);
- release_resource(&tc6387xb->rscr);
- clk_disable_unprepare(tc6387xb->clk32k);
- clk_put(tc6387xb->clk32k);
- kfree(tc6387xb);
-
- return 0;
-}
-
-
-static struct platform_driver tc6387xb_platform_driver = {
- .driver = {
- .name = "tc6387xb",
- },
- .probe = tc6387xb_probe,
- .remove = tc6387xb_remove,
- .suspend = pm_sleep_ptr(tc6387xb_suspend),
- .resume = pm_sleep_ptr(tc6387xb_resume),
-};
-
-module_platform_driver(tc6387xb_platform_driver);
-
-MODULE_DESCRIPTION("Toshiba TC6387XB core driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Ian Molton");
-MODULE_ALIAS("platform:tc6387xb");
-
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
deleted file mode 100644
index 997bb8b5881d..000000000000
--- a/drivers/mfd/tc6393xb.c
+++ /dev/null
@@ -1,907 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Toshiba TC6393XB SoC support
- *
- * Copyright(c) 2005-2006 Chris Humbert
- * Copyright(c) 2005 Dirk Opfer
- * Copyright(c) 2005 Ian Molton <spyro@f2s.com>
- * Copyright(c) 2007 Dmitry Baryshkov
- *
- * Based on code written by Sharp/Lineo for 2.4 kernels
- * Based on locomo.c
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mfd/tc6393xb.h>
-#include <linux/gpio/driver.h>
-#include <linux/gpio/machine.h>
-#include <linux/gpio/consumer.h>
-#include <linux/slab.h>
-
-#define SCR_REVID 0x08 /* b Revision ID */
-#define SCR_ISR 0x50 /* b Interrupt Status */
-#define SCR_IMR 0x52 /* b Interrupt Mask */
-#define SCR_IRR 0x54 /* b Interrupt Routing */
-#define SCR_GPER 0x60 /* w GP Enable */
-#define SCR_GPI_SR(i) (0x64 + (i)) /* b3 GPI Status */
-#define SCR_GPI_IMR(i) (0x68 + (i)) /* b3 GPI INT Mask */
-#define SCR_GPI_EDER(i) (0x6c + (i)) /* b3 GPI Edge Detect Enable */
-#define SCR_GPI_LIR(i) (0x70 + (i)) /* b3 GPI Level Invert */
-#define SCR_GPO_DSR(i) (0x78 + (i)) /* b3 GPO Data Set */
-#define SCR_GPO_DOECR(i) (0x7c + (i)) /* b3 GPO Data OE Control */
-#define SCR_GP_IARCR(i) (0x80 + (i)) /* b3 GP Internal Active Register Control */
-#define SCR_GP_IARLCR(i) (0x84 + (i)) /* b3 GP INTERNAL Active Register Level Control */
-#define SCR_GPI_BCR(i) (0x88 + (i)) /* b3 GPI Buffer Control */
-#define SCR_GPA_IARCR 0x8c /* w GPa Internal Active Register Control */
-#define SCR_GPA_IARLCR 0x90 /* w GPa Internal Active Register Level Control */
-#define SCR_GPA_BCR 0x94 /* w GPa Buffer Control */
-#define SCR_CCR 0x98 /* w Clock Control */
-#define SCR_PLL2CR 0x9a /* w PLL2 Control */
-#define SCR_PLL1CR 0x9c /* l PLL1 Control */
-#define SCR_DIARCR 0xa0 /* b Device Internal Active Register Control */
-#define SCR_DBOCR 0xa1 /* b Device Buffer Off Control */
-#define SCR_FER 0xe0 /* b Function Enable */
-#define SCR_MCR 0xe4 /* w Mode Control */
-#define SCR_CONFIG 0xfc /* b Configuration Control */
-#define SCR_DEBUG 0xff /* b Debug */
-
-#define SCR_CCR_CK32K BIT(0)
-#define SCR_CCR_USBCK BIT(1)
-#define SCR_CCR_UNK1 BIT(4)
-#define SCR_CCR_MCLK_MASK (7 << 8)
-#define SCR_CCR_MCLK_OFF (0 << 8)
-#define SCR_CCR_MCLK_12 (1 << 8)
-#define SCR_CCR_MCLK_24 (2 << 8)
-#define SCR_CCR_MCLK_48 (3 << 8)
-#define SCR_CCR_HCLK_MASK (3 << 12)
-#define SCR_CCR_HCLK_24 (0 << 12)
-#define SCR_CCR_HCLK_48 (1 << 12)
-
-#define SCR_FER_USBEN BIT(0) /* USB host enable */
-#define SCR_FER_LCDCVEN BIT(1) /* polysilicon TFT enable */
-#define SCR_FER_SLCDEN BIT(2) /* SLCD enable */
-
-#define SCR_MCR_RDY_MASK (3 << 0)
-#define SCR_MCR_RDY_OPENDRAIN (0 << 0)
-#define SCR_MCR_RDY_TRISTATE (1 << 0)
-#define SCR_MCR_RDY_PUSHPULL (2 << 0)
-#define SCR_MCR_RDY_UNK BIT(2)
-#define SCR_MCR_RDY_EN BIT(3)
-#define SCR_MCR_INT_MASK (3 << 4)
-#define SCR_MCR_INT_OPENDRAIN (0 << 4)
-#define SCR_MCR_INT_TRISTATE (1 << 4)
-#define SCR_MCR_INT_PUSHPULL (2 << 4)
-#define SCR_MCR_INT_UNK BIT(6)
-#define SCR_MCR_INT_EN BIT(7)
-/* bits 8 - 16 are unknown */
-
-#define TC_GPIO_BIT(i) (1 << (i & 0x7))
-
-/*--------------------------------------------------------------------------*/
-
-struct tc6393xb {
- void __iomem *scr;
- struct device *dev;
-
- struct gpio_chip gpio;
- struct gpio_desc *vcc_on;
-
- struct clk *clk; /* 3,6 Mhz */
-
- raw_spinlock_t lock; /* protects RMW cycles */
-
- struct {
- u8 fer;
- u16 ccr;
- u8 gpi_bcr[3];
- u8 gpo_dsr[3];
- u8 gpo_doecr[3];
- } suspend_state;
-
- struct resource rscr;
- struct resource *iomem;
- int irq;
- int irq_base;
-};
-
-enum {
- TC6393XB_CELL_NAND,
- TC6393XB_CELL_MMC,
- TC6393XB_CELL_OHCI,
- TC6393XB_CELL_FB,
-};
-
-/*--------------------------------------------------------------------------*/
-
-static int tc6393xb_nand_enable(struct platform_device *nand)
-{
- struct tc6393xb *tc6393xb = dev_get_drvdata(nand->dev.parent);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
-
- /* SMD buffer on */
- dev_dbg(nand->dev.parent, "SMD buffer on\n");
- tmio_iowrite8(0xff, tc6393xb->scr + SCR_GPI_BCR(1));
-
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-
- return 0;
-}
-
-static const struct resource tc6393xb_nand_resources[] = {
- {
- .start = 0x1000,
- .end = 0x1007,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x0100,
- .end = 0x01ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_TC6393_NAND,
- .end = IRQ_TC6393_NAND,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static const struct resource tc6393xb_mmc_resources[] = {
- {
- .start = 0x800,
- .end = 0x9ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_TC6393_MMC,
- .end = IRQ_TC6393_MMC,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static const struct resource tc6393xb_ohci_resources[] = {
- {
- .start = 0x3000,
- .end = 0x31ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x0300,
- .end = 0x03ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x010000,
- .end = 0x017fff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x018000,
- .end = 0x01ffff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_TC6393_OHCI,
- .end = IRQ_TC6393_OHCI,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static const struct resource tc6393xb_fb_resources[] = {
- {
- .start = 0x5000,
- .end = 0x51ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x0500,
- .end = 0x05ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x100000,
- .end = 0x1fffff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_TC6393_FB,
- .end = IRQ_TC6393_FB,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static int tc6393xb_ohci_enable(struct platform_device *dev)
-{
- struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
- unsigned long flags;
- u16 ccr;
- u8 fer;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
-
- ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
- ccr |= SCR_CCR_USBCK;
- tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
-
- fer = tmio_ioread8(tc6393xb->scr + SCR_FER);
- fer |= SCR_FER_USBEN;
- tmio_iowrite8(fer, tc6393xb->scr + SCR_FER);
-
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-
- return 0;
-}
-
-static int tc6393xb_ohci_disable(struct platform_device *dev)
-{
- struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
- unsigned long flags;
- u16 ccr;
- u8 fer;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
-
- fer = tmio_ioread8(tc6393xb->scr + SCR_FER);
- fer &= ~SCR_FER_USBEN;
- tmio_iowrite8(fer, tc6393xb->scr + SCR_FER);
-
- ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
- ccr &= ~SCR_CCR_USBCK;
- tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
-
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-
- return 0;
-}
-
-static int tc6393xb_ohci_suspend(struct platform_device *dev)
-{
- struct tc6393xb_platform_data *tcpd = dev_get_platdata(dev->dev.parent);
-
- /* We can't properly store/restore OHCI state, so fail here */
- if (tcpd->resume_restore)
- return -EBUSY;
-
- return tc6393xb_ohci_disable(dev);
-}
-
-static int tc6393xb_fb_enable(struct platform_device *dev)
-{
- struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
- unsigned long flags;
- u16 ccr;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
-
- ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
- ccr &= ~SCR_CCR_MCLK_MASK;
- ccr |= SCR_CCR_MCLK_48;
- tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
-
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-
- return 0;
-}
-
-static int tc6393xb_fb_disable(struct platform_device *dev)
-{
- struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
- unsigned long flags;
- u16 ccr;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
-
- ccr = tmio_ioread16(tc6393xb->scr + SCR_CCR);
- ccr &= ~SCR_CCR_MCLK_MASK;
- ccr |= SCR_CCR_MCLK_OFF;
- tmio_iowrite16(ccr, tc6393xb->scr + SCR_CCR);
-
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-
- return 0;
-}
-
-int tc6393xb_lcd_set_power(struct platform_device *fb, bool on)
-{
- struct tc6393xb *tc6393xb = dev_get_drvdata(fb->dev.parent);
- u8 fer;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
-
- fer = ioread8(tc6393xb->scr + SCR_FER);
- if (on)
- fer |= SCR_FER_SLCDEN;
- else
- fer &= ~SCR_FER_SLCDEN;
- iowrite8(fer, tc6393xb->scr + SCR_FER);
-
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(tc6393xb_lcd_set_power);
-
-int tc6393xb_lcd_mode(struct platform_device *fb,
- const struct fb_videomode *mode) {
- struct tc6393xb *tc6393xb = dev_get_drvdata(fb->dev.parent);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
-
- iowrite16(mode->pixclock, tc6393xb->scr + SCR_PLL1CR + 0);
- iowrite16(mode->pixclock >> 16, tc6393xb->scr + SCR_PLL1CR + 2);
-
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-
- return 0;
-}
-EXPORT_SYMBOL(tc6393xb_lcd_mode);
-
-static int tc6393xb_mmc_enable(struct platform_device *mmc)
-{
- struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
-
- tmio_core_mmc_enable(tc6393xb->scr + 0x200, 0,
- tc6393xb_mmc_resources[0].start & 0xfffe);
-
- return 0;
-}
-
-static int tc6393xb_mmc_resume(struct platform_device *mmc)
-{
- struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
-
- tmio_core_mmc_resume(tc6393xb->scr + 0x200, 0,
- tc6393xb_mmc_resources[0].start & 0xfffe);
-
- return 0;
-}
-
-static void tc6393xb_mmc_pwr(struct platform_device *mmc, int state)
-{
- struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
-
- tmio_core_mmc_pwr(tc6393xb->scr + 0x200, 0, state);
-}
-
-static void tc6393xb_mmc_clk_div(struct platform_device *mmc, int state)
-{
- struct tc6393xb *tc6393xb = dev_get_drvdata(mmc->dev.parent);
-
- tmio_core_mmc_clk_div(tc6393xb->scr + 0x200, 0, state);
-}
-
-static struct tmio_mmc_data tc6393xb_mmc_data = {
- .hclk = 24000000,
- .set_pwr = tc6393xb_mmc_pwr,
- .set_clk_div = tc6393xb_mmc_clk_div,
-};
-
-static struct mfd_cell tc6393xb_cells[] = {
- [TC6393XB_CELL_NAND] = {
- .name = "tmio-nand",
- .enable = tc6393xb_nand_enable,
- .num_resources = ARRAY_SIZE(tc6393xb_nand_resources),
- .resources = tc6393xb_nand_resources,
- },
- [TC6393XB_CELL_MMC] = {
- .name = "tmio-mmc",
- .enable = tc6393xb_mmc_enable,
- .resume = tc6393xb_mmc_resume,
- .platform_data = &tc6393xb_mmc_data,
- .pdata_size = sizeof(tc6393xb_mmc_data),
- .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
- .resources = tc6393xb_mmc_resources,
- },
- [TC6393XB_CELL_OHCI] = {
- .name = "tmio-ohci",
- .num_resources = ARRAY_SIZE(tc6393xb_ohci_resources),
- .resources = tc6393xb_ohci_resources,
- .enable = tc6393xb_ohci_enable,
- .suspend = tc6393xb_ohci_suspend,
- .resume = tc6393xb_ohci_enable,
- .disable = tc6393xb_ohci_disable,
- },
- [TC6393XB_CELL_FB] = {
- .name = "tmio-fb",
- .num_resources = ARRAY_SIZE(tc6393xb_fb_resources),
- .resources = tc6393xb_fb_resources,
- .enable = tc6393xb_fb_enable,
- .suspend = tc6393xb_fb_disable,
- .resume = tc6393xb_fb_enable,
- .disable = tc6393xb_fb_disable,
- },
-};
-
-/*--------------------------------------------------------------------------*/
-
-static int tc6393xb_gpio_get(struct gpio_chip *chip,
- unsigned offset)
-{
- struct tc6393xb *tc6393xb = gpiochip_get_data(chip);
-
- /* XXX: does dsr also represent inputs? */
- return !!(tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8))
- & TC_GPIO_BIT(offset));
-}
-
-static void __tc6393xb_gpio_set(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct tc6393xb *tc6393xb = gpiochip_get_data(chip);
- u8 dsr;
-
- dsr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DSR(offset / 8));
- if (value)
- dsr |= TC_GPIO_BIT(offset);
- else
- dsr &= ~TC_GPIO_BIT(offset);
-
- tmio_iowrite8(dsr, tc6393xb->scr + SCR_GPO_DSR(offset / 8));
-}
-
-static void tc6393xb_gpio_set(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct tc6393xb *tc6393xb = gpiochip_get_data(chip);
- unsigned long flags;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
-
- __tc6393xb_gpio_set(chip, offset, value);
-
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-}
-
-static int tc6393xb_gpio_direction_input(struct gpio_chip *chip,
- unsigned offset)
-{
- struct tc6393xb *tc6393xb = gpiochip_get_data(chip);
- unsigned long flags;
- u8 doecr;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
-
- doecr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
- doecr &= ~TC_GPIO_BIT(offset);
- tmio_iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
-
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-
- return 0;
-}
-
-static int tc6393xb_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- struct tc6393xb *tc6393xb = gpiochip_get_data(chip);
- unsigned long flags;
- u8 doecr;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
-
- __tc6393xb_gpio_set(chip, offset, value);
-
- doecr = tmio_ioread8(tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
- doecr |= TC_GPIO_BIT(offset);
- tmio_iowrite8(doecr, tc6393xb->scr + SCR_GPO_DOECR(offset / 8));
-
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-
- return 0;
-}
-
-/*
- * TC6393XB GPIOs as used on TOSA, are the only user of this chip.
- * GPIOs 2, 5, 8 and 13 are not connected.
- */
-#define TOSA_GPIO_TG_ON 0
-#define TOSA_GPIO_L_MUTE 1
-#define TOSA_GPIO_BL_C20MA 3
-#define TOSA_GPIO_CARD_VCC_ON 4
-#define TOSA_GPIO_CHARGE_OFF 6
-#define TOSA_GPIO_CHARGE_OFF_JC 7
-#define TOSA_GPIO_BAT0_V_ON 9
-#define TOSA_GPIO_BAT1_V_ON 10
-#define TOSA_GPIO_BU_CHRG_ON 11
-#define TOSA_GPIO_BAT_SW_ON 12
-#define TOSA_GPIO_BAT0_TH_ON 14
-#define TOSA_GPIO_BAT1_TH_ON 15
-
-
-GPIO_LOOKUP_SINGLE(tosa_lcd_gpio_lookup, "spi2.0", "tc6393xb",
- TOSA_GPIO_TG_ON, "tg #pwr", GPIO_ACTIVE_HIGH);
-
-GPIO_LOOKUP_SINGLE(tosa_lcd_bl_gpio_lookup, "i2c-tos-bl", "tc6393xb",
- TOSA_GPIO_BL_C20MA, "backlight", GPIO_ACTIVE_HIGH);
-
-GPIO_LOOKUP_SINGLE(tosa_audio_gpio_lookup, "tosa-audio", "tc6393xb",
- TOSA_GPIO_L_MUTE, NULL, GPIO_ACTIVE_HIGH);
-
-static struct gpiod_lookup_table tosa_battery_gpio_lookup = {
- .dev_id = "wm97xx-battery",
- .table = {
- GPIO_LOOKUP("tc6393xb", TOSA_GPIO_CHARGE_OFF,
- "main charge off", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("tc6393xb", TOSA_GPIO_CHARGE_OFF_JC,
- "jacket charge off", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT0_V_ON,
- "main battery", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT1_V_ON,
- "jacket battery", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BU_CHRG_ON,
- "backup battery", GPIO_ACTIVE_HIGH),
- /* BAT1 and BAT0 thermistors appear to be swapped */
- GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT1_TH_ON,
- "main battery temp", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT0_TH_ON,
- "jacket battery temp", GPIO_ACTIVE_HIGH),
- GPIO_LOOKUP("tc6393xb", TOSA_GPIO_BAT_SW_ON,
- "battery switch", GPIO_ACTIVE_HIGH),
- { },
- },
-};
-
-static struct gpiod_lookup_table *tc6393xb_gpio_lookups[] = {
- &tosa_lcd_gpio_lookup,
- &tosa_lcd_bl_gpio_lookup,
- &tosa_audio_gpio_lookup,
- &tosa_battery_gpio_lookup,
-};
-
-static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb)
-{
- struct gpio_chip *gc = &tc6393xb->gpio;
- struct device *dev = tc6393xb->dev;
- int ret;
-
- gc->label = "tc6393xb";
- gc->base = -1; /* Dynamic allocation */
- gc->ngpio = 16;
- gc->set = tc6393xb_gpio_set;
- gc->get = tc6393xb_gpio_get;
- gc->direction_input = tc6393xb_gpio_direction_input;
- gc->direction_output = tc6393xb_gpio_direction_output;
-
- ret = devm_gpiochip_add_data(dev, gc, tc6393xb);
- if (ret)
- return dev_err_probe(dev, ret, "failed to add GPIO chip\n");
-
- /* Register descriptor look-ups for consumers */
- gpiod_add_lookup_tables(tc6393xb_gpio_lookups, ARRAY_SIZE(tc6393xb_gpio_lookups));
-
- /* Request some of our own GPIOs */
- tc6393xb->vcc_on = gpiochip_request_own_desc(gc, TOSA_GPIO_CARD_VCC_ON, "VCC ON",
- GPIO_ACTIVE_HIGH, GPIOD_OUT_HIGH);
- if (IS_ERR(tc6393xb->vcc_on))
- return dev_err_probe(dev, PTR_ERR(tc6393xb->vcc_on),
- "failed to request VCC ON GPIO\n");
-
- return 0;
-}
-
-/*--------------------------------------------------------------------------*/
-
-static void tc6393xb_irq(struct irq_desc *desc)
-{
- struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc);
- unsigned int isr;
- unsigned int i, irq_base;
-
- irq_base = tc6393xb->irq_base;
-
- while ((isr = tmio_ioread8(tc6393xb->scr + SCR_ISR) &
- ~tmio_ioread8(tc6393xb->scr + SCR_IMR)))
- for (i = 0; i < TC6393XB_NR_IRQS; i++) {
- if (isr & (1 << i))
- generic_handle_irq(irq_base + i);
- }
-}
-
-static void tc6393xb_irq_ack(struct irq_data *data)
-{
-}
-
-static void tc6393xb_irq_mask(struct irq_data *data)
-{
- struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
- unsigned long flags;
- u8 imr;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
- imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
- imr |= 1 << (data->irq - tc6393xb->irq_base);
- tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-}
-
-static void tc6393xb_irq_unmask(struct irq_data *data)
-{
- struct tc6393xb *tc6393xb = irq_data_get_irq_chip_data(data);
- unsigned long flags;
- u8 imr;
-
- raw_spin_lock_irqsave(&tc6393xb->lock, flags);
- imr = tmio_ioread8(tc6393xb->scr + SCR_IMR);
- imr &= ~(1 << (data->irq - tc6393xb->irq_base));
- tmio_iowrite8(imr, tc6393xb->scr + SCR_IMR);
- raw_spin_unlock_irqrestore(&tc6393xb->lock, flags);
-}
-
-static struct irq_chip tc6393xb_chip = {
- .name = "tc6393xb",
- .irq_ack = tc6393xb_irq_ack,
- .irq_mask = tc6393xb_irq_mask,
- .irq_unmask = tc6393xb_irq_unmask,
-};
-
-static void tc6393xb_attach_irq(struct platform_device *dev)
-{
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
- unsigned int irq, irq_base;
-
- irq_base = tc6393xb->irq_base;
-
- for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
- irq_set_chip_and_handler(irq, &tc6393xb_chip, handle_edge_irq);
- irq_set_chip_data(irq, tc6393xb);
- irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
-
- irq_set_irq_type(tc6393xb->irq, IRQ_TYPE_EDGE_FALLING);
- irq_set_chained_handler_and_data(tc6393xb->irq, tc6393xb_irq,
- tc6393xb);
-}
-
-static void tc6393xb_detach_irq(struct platform_device *dev)
-{
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
- unsigned int irq, irq_base;
-
- irq_set_chained_handler_and_data(tc6393xb->irq, NULL, NULL);
-
- irq_base = tc6393xb->irq_base;
-
- for (irq = irq_base; irq < irq_base + TC6393XB_NR_IRQS; irq++) {
- irq_set_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- irq_set_chip(irq, NULL);
- irq_set_chip_data(irq, NULL);
- }
-}
-
-/*--------------------------------------------------------------------------*/
-
-static int tc6393xb_probe(struct platform_device *dev)
-{
- struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
- struct tc6393xb *tc6393xb;
- struct resource *iomem, *rscr;
- int ret;
-
- iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!iomem)
- return -EINVAL;
-
- tc6393xb = kzalloc(sizeof *tc6393xb, GFP_KERNEL);
- if (!tc6393xb) {
- ret = -ENOMEM;
- goto err_kzalloc;
- }
- tc6393xb->dev = &dev->dev;
-
- raw_spin_lock_init(&tc6393xb->lock);
-
- platform_set_drvdata(dev, tc6393xb);
-
- ret = platform_get_irq(dev, 0);
- if (ret >= 0)
- tc6393xb->irq = ret;
- else
- goto err_noirq;
-
- tc6393xb->iomem = iomem;
- tc6393xb->irq_base = tcpd->irq_base;
-
- tc6393xb->clk = clk_get(&dev->dev, "CLK_CK3P6MI");
- if (IS_ERR(tc6393xb->clk)) {
- ret = PTR_ERR(tc6393xb->clk);
- goto err_clk_get;
- }
-
- rscr = &tc6393xb->rscr;
- rscr->name = "tc6393xb-core";
- rscr->start = iomem->start;
- rscr->end = iomem->start + 0xff;
- rscr->flags = IORESOURCE_MEM;
-
- ret = request_resource(iomem, rscr);
- if (ret)
- goto err_request_scr;
-
- tc6393xb->scr = ioremap(rscr->start, resource_size(rscr));
- if (!tc6393xb->scr) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
-
- ret = clk_prepare_enable(tc6393xb->clk);
- if (ret)
- goto err_clk_enable;
-
- ret = tcpd->enable(dev);
- if (ret)
- goto err_enable;
-
- iowrite8(0, tc6393xb->scr + SCR_FER);
- iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR);
- iowrite16(SCR_CCR_UNK1 | SCR_CCR_HCLK_48,
- tc6393xb->scr + SCR_CCR);
- iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
- SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
- BIT(15), tc6393xb->scr + SCR_MCR);
- iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER);
- iowrite8(0, tc6393xb->scr + SCR_IRR);
- iowrite8(0xbf, tc6393xb->scr + SCR_IMR);
-
- printk(KERN_INFO "Toshiba tc6393xb revision %d at 0x%08lx, irq %d\n",
- tmio_ioread8(tc6393xb->scr + SCR_REVID),
- (unsigned long) iomem->start, tc6393xb->irq);
-
- ret = tc6393xb_register_gpio(tc6393xb);
- if (ret)
- goto err_gpio_add;
-
- tc6393xb_attach_irq(dev);
-
- tc6393xb_cells[TC6393XB_CELL_NAND].platform_data = tcpd->nand_data;
- tc6393xb_cells[TC6393XB_CELL_NAND].pdata_size =
- sizeof(*tcpd->nand_data);
- tc6393xb_cells[TC6393XB_CELL_FB].platform_data = tcpd->fb_data;
- tc6393xb_cells[TC6393XB_CELL_FB].pdata_size = sizeof(*tcpd->fb_data);
-
- ret = mfd_add_devices(&dev->dev, dev->id,
- tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
- iomem, tcpd->irq_base, NULL);
-
- if (!ret)
- return 0;
-
- tc6393xb_detach_irq(dev);
-err_gpio_add:
- tcpd->disable(dev);
-err_enable:
- clk_disable_unprepare(tc6393xb->clk);
-err_clk_enable:
- iounmap(tc6393xb->scr);
-err_ioremap:
- release_resource(&tc6393xb->rscr);
-err_request_scr:
- clk_put(tc6393xb->clk);
-err_noirq:
-err_clk_get:
- kfree(tc6393xb);
-err_kzalloc:
- return ret;
-}
-
-static int tc6393xb_remove(struct platform_device *dev)
-{
- struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
-
- mfd_remove_devices(&dev->dev);
-
- tc6393xb_detach_irq(dev);
-
- tcpd->disable(dev);
- clk_disable_unprepare(tc6393xb->clk);
- iounmap(tc6393xb->scr);
- release_resource(&tc6393xb->rscr);
- clk_put(tc6393xb->clk);
- kfree(tc6393xb);
-
- return 0;
-}
-
-static int tc6393xb_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
- int i, ret;
-
- tc6393xb->suspend_state.ccr = ioread16(tc6393xb->scr + SCR_CCR);
- tc6393xb->suspend_state.fer = ioread8(tc6393xb->scr + SCR_FER);
-
- for (i = 0; i < 3; i++) {
- tc6393xb->suspend_state.gpo_dsr[i] =
- ioread8(tc6393xb->scr + SCR_GPO_DSR(i));
- tc6393xb->suspend_state.gpo_doecr[i] =
- ioread8(tc6393xb->scr + SCR_GPO_DOECR(i));
- tc6393xb->suspend_state.gpi_bcr[i] =
- ioread8(tc6393xb->scr + SCR_GPI_BCR(i));
- }
- ret = tcpd->suspend(dev);
- clk_disable_unprepare(tc6393xb->clk);
-
- return ret;
-}
-
-static int tc6393xb_resume(struct platform_device *dev)
-{
- struct tc6393xb_platform_data *tcpd = dev_get_platdata(&dev->dev);
- struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
- int ret;
- int i;
-
- ret = clk_prepare_enable(tc6393xb->clk);
- if (ret)
- return ret;
-
- ret = tcpd->resume(dev);
- if (ret)
- return ret;
-
- if (!tcpd->resume_restore)
- return 0;
-
- iowrite8(tc6393xb->suspend_state.fer, tc6393xb->scr + SCR_FER);
- iowrite16(tcpd->scr_pll2cr, tc6393xb->scr + SCR_PLL2CR);
- iowrite16(tc6393xb->suspend_state.ccr, tc6393xb->scr + SCR_CCR);
- iowrite16(SCR_MCR_RDY_OPENDRAIN | SCR_MCR_RDY_UNK | SCR_MCR_RDY_EN |
- SCR_MCR_INT_OPENDRAIN | SCR_MCR_INT_UNK | SCR_MCR_INT_EN |
- BIT(15), tc6393xb->scr + SCR_MCR);
- iowrite16(tcpd->scr_gper, tc6393xb->scr + SCR_GPER);
- iowrite8(0, tc6393xb->scr + SCR_IRR);
- iowrite8(0xbf, tc6393xb->scr + SCR_IMR);
-
- for (i = 0; i < 3; i++) {
- iowrite8(tc6393xb->suspend_state.gpo_dsr[i],
- tc6393xb->scr + SCR_GPO_DSR(i));
- iowrite8(tc6393xb->suspend_state.gpo_doecr[i],
- tc6393xb->scr + SCR_GPO_DOECR(i));
- iowrite8(tc6393xb->suspend_state.gpi_bcr[i],
- tc6393xb->scr + SCR_GPI_BCR(i));
- }
-
- return 0;
-}
-
-static struct platform_driver tc6393xb_driver = {
- .probe = tc6393xb_probe,
- .remove = tc6393xb_remove,
- .suspend = pm_sleep_ptr(tc6393xb_suspend),
- .resume = pm_sleep_ptr(tc6393xb_resume),
-
- .driver = {
- .name = "tc6393xb",
- },
-};
-
-static int __init tc6393xb_init(void)
-{
- return platform_driver_register(&tc6393xb_driver);
-}
-
-static void __exit tc6393xb_exit(void)
-{
- platform_driver_unregister(&tc6393xb_driver);
-}
-
-subsys_initcall(tc6393xb_init);
-module_exit(tc6393xb_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer");
-MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller");
-MODULE_ALIAS("platform:tc6393xb");
-
diff --git a/drivers/mfd/tmio_core.c b/drivers/mfd/tmio_core.c
deleted file mode 100644
index 7ee873551482..000000000000
--- a/drivers/mfd/tmio_core.c
+++ /dev/null
@@ -1,70 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright(c) 2009 Ian Molton <spyro@f2s.com>
- */
-
-#include <linux/export.h>
-#include <linux/mfd/tmio.h>
-
-#define CNF_CMD 0x04
-#define CNF_CTL_BASE 0x10
-#define CNF_INT_PIN 0x3d
-#define CNF_STOP_CLK_CTL 0x40
-#define CNF_GCLK_CTL 0x41
-#define CNF_SD_CLK_MODE 0x42
-#define CNF_PIN_STATUS 0x44
-#define CNF_PWR_CTL_1 0x48
-#define CNF_PWR_CTL_2 0x49
-#define CNF_PWR_CTL_3 0x4a
-#define CNF_CARD_DETECT_MODE 0x4c
-#define CNF_SD_SLOT 0x50
-#define CNF_EXT_GCLK_CTL_1 0xf0
-#define CNF_EXT_GCLK_CTL_2 0xf1
-#define CNF_EXT_GCLK_CTL_3 0xf9
-#define CNF_SD_LED_EN_1 0xfa
-#define CNF_SD_LED_EN_2 0xfe
-
-#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
-
-int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base)
-{
- /* Enable the MMC/SD Control registers */
- sd_config_write16(cnf, shift, CNF_CMD, SDCREN);
- sd_config_write32(cnf, shift, CNF_CTL_BASE, base & 0xfffe);
-
- /* Disable SD power during suspend */
- sd_config_write8(cnf, shift, CNF_PWR_CTL_3, 0x01);
-
- /* The below is required but why? FIXME */
- sd_config_write8(cnf, shift, CNF_STOP_CLK_CTL, 0x1f);
-
- /* Power down SD bus */
- sd_config_write8(cnf, shift, CNF_PWR_CTL_2, 0x00);
-
- return 0;
-}
-EXPORT_SYMBOL(tmio_core_mmc_enable);
-
-int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base)
-{
-
- /* Enable the MMC/SD Control registers */
- sd_config_write16(cnf, shift, CNF_CMD, SDCREN);
- sd_config_write32(cnf, shift, CNF_CTL_BASE, base & 0xfffe);
-
- return 0;
-}
-EXPORT_SYMBOL(tmio_core_mmc_resume);
-
-void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state)
-{
- sd_config_write8(cnf, shift, CNF_PWR_CTL_2, state ? 0x02 : 0x00);
-}
-EXPORT_SYMBOL(tmio_core_mmc_pwr);
-
-void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state)
-{
- sd_config_write8(cnf, shift, CNF_SD_CLK_MODE, state ? 1 : 0);
-}
-EXPORT_SYMBOL(tmio_core_mmc_clk_div);
-
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 62be2326c9b2..e2d9a93be43b 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -110,8 +110,8 @@
#define TWL6030_BASEADD_PWM 0x00BA
#define TWL6030_BASEADD_GASGAUGE 0x00C0
#define TWL6030_BASEADD_PIH 0x00D0
-#define TWL6030_BASEADD_CHARGER 0x00E0
#define TWL6032_BASEADD_CHARGER 0x00DA
+#define TWL6030_BASEADD_CHARGER 0x00E0
#define TWL6030_BASEADD_LED 0x00F4
/* subchip/slave 2 0x4A - DFT */
@@ -353,6 +353,9 @@ static struct twl_mapping twl6030_map[] = {
{ 2, TWL6030_BASEADD_ZERO },
{ 1, TWL6030_BASEADD_GPADC_CTRL },
{ 1, TWL6030_BASEADD_GASGAUGE },
+
+ /* TWL6032 specific charger registers */
+ { 1, TWL6032_BASEADD_CHARGER },
};
static const struct regmap_config twl6030_regmap_config[3] = {
@@ -803,10 +806,6 @@ twl_probe(struct i2c_client *client)
if ((id->driver_data) & TWL6030_CLASS) {
twl_priv->twl_id = TWL6030_CLASS_ID;
twl_priv->twl_map = &twl6030_map[0];
- /* The charger base address is different in twl6032 */
- if ((id->driver_data) & TWL6032_SUBCLASS)
- twl_priv->twl_map[TWL_MODULE_MAIN_CHARGE].base =
- TWL6032_BASEADD_CHARGER;
twl_regmap_config = twl6030_regmap_config;
} else {
twl_priv->twl_id = TWL4030_CLASS_ID;
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index 6b36932263ba..e35b0f788c50 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -953,18 +953,12 @@ relock:
return err;
}
-static int twl4030_power_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static struct platform_driver twl4030_power_driver = {
.driver = {
.name = "twl4030_power",
.of_match_table = of_match_ptr(twl4030_power_of_match),
},
.probe = twl4030_power_probe,
- .remove = twl4030_power_remove,
};
module_platform_driver(twl4030_power_driver);
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c
deleted file mode 100644
index ac1d18039568..000000000000
--- a/drivers/mfd/ucb1400_core.c
+++ /dev/null
@@ -1,158 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Core functions for:
- * Philips UCB1400 multifunction chip
- *
- * Based on ucb1400_ts.c:
- * Author: Nicolas Pitre
- * Created: September 25, 2006
- * Copyright: MontaVista Software, Inc.
- *
- * Spliting done by: Marek Vasut <marek.vasut@gmail.com>
- * If something doesn't work and it worked before spliting, e-mail me,
- * dont bother Nicolas please ;-)
- *
- * This code is heavily based on ucb1x00-*.c copyrighted by Russell King
- * covering the UCB1100, UCB1200 and UCB1300.. Support for the UCB1400 has
- * been made separate from ucb1x00-core/ucb1x00-ts on Russell's request.
- */
-
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/ucb1400.h>
-
-unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel,
- int adcsync)
-{
- unsigned int val;
-
- if (adcsync)
- adc_channel |= UCB_ADC_SYNC_ENA;
-
- ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel);
- ucb1400_reg_write(ac97, UCB_ADC_CR, UCB_ADC_ENA | adc_channel |
- UCB_ADC_START);
-
- while (!((val = ucb1400_reg_read(ac97, UCB_ADC_DATA))
- & UCB_ADC_DAT_VALID))
- schedule_timeout_uninterruptible(1);
-
- return val & UCB_ADC_DAT_MASK;
-}
-EXPORT_SYMBOL_GPL(ucb1400_adc_read);
-
-static int ucb1400_core_probe(struct device *dev)
-{
- int err;
- struct ucb1400 *ucb;
- struct ucb1400_ts ucb_ts;
- struct ucb1400_gpio ucb_gpio;
- struct snd_ac97 *ac97;
- struct ucb1400_pdata *pdata = dev_get_platdata(dev);
-
- memset(&ucb_ts, 0, sizeof(ucb_ts));
- memset(&ucb_gpio, 0, sizeof(ucb_gpio));
-
- ucb = kzalloc(sizeof(struct ucb1400), GFP_KERNEL);
- if (!ucb) {
- err = -ENOMEM;
- goto err;
- }
-
- dev_set_drvdata(dev, ucb);
-
- ac97 = to_ac97_t(dev);
-
- ucb_ts.id = ucb1400_reg_read(ac97, UCB_ID);
- if (ucb_ts.id != UCB_ID_1400) {
- err = -ENODEV;
- goto err0;
- }
-
- /* GPIO */
- ucb_gpio.ac97 = ac97;
- if (pdata)
- ucb_gpio.gpio_offset = pdata->gpio_offset;
-
- ucb->ucb1400_gpio = platform_device_alloc("ucb1400_gpio", -1);
- if (!ucb->ucb1400_gpio) {
- err = -ENOMEM;
- goto err0;
- }
- err = platform_device_add_data(ucb->ucb1400_gpio, &ucb_gpio,
- sizeof(ucb_gpio));
- if (err)
- goto err1;
- err = platform_device_add(ucb->ucb1400_gpio);
- if (err)
- goto err1;
-
- /* TOUCHSCREEN */
- ucb_ts.ac97 = ac97;
-
- if (pdata != NULL && pdata->irq >= 0)
- ucb_ts.irq = pdata->irq;
- else
- ucb_ts.irq = -1;
-
- ucb->ucb1400_ts = platform_device_alloc("ucb1400_ts", -1);
- if (!ucb->ucb1400_ts) {
- err = -ENOMEM;
- goto err2;
- }
- err = platform_device_add_data(ucb->ucb1400_ts, &ucb_ts,
- sizeof(ucb_ts));
- if (err)
- goto err3;
- err = platform_device_add(ucb->ucb1400_ts);
- if (err)
- goto err3;
-
- return 0;
-
-err3:
- platform_device_put(ucb->ucb1400_ts);
-err2:
- platform_device_del(ucb->ucb1400_gpio);
-err1:
- platform_device_put(ucb->ucb1400_gpio);
-err0:
- kfree(ucb);
-err:
- return err;
-}
-
-static int ucb1400_core_remove(struct device *dev)
-{
- struct ucb1400 *ucb = dev_get_drvdata(dev);
-
- platform_device_unregister(ucb->ucb1400_ts);
- platform_device_unregister(ucb->ucb1400_gpio);
-
- kfree(ucb);
- return 0;
-}
-
-static struct device_driver ucb1400_core_driver = {
- .name = "ucb1400_core",
- .bus = &ac97_bus_type,
- .probe = ucb1400_core_probe,
- .remove = ucb1400_core_remove,
-};
-
-static int __init ucb1400_core_init(void)
-{
- return driver_register(&ucb1400_core_driver);
-}
-
-static void __exit ucb1400_core_exit(void)
-{
- driver_unregister(&ucb1400_core_driver);
-}
-
-module_init(ucb1400_core_init);
-module_exit(ucb1400_core_exit);
-
-MODULE_DESCRIPTION("Philips UCB1400 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/wm97xx-core.c b/drivers/mfd/wm97xx-core.c
index 9a2331eb1bfa..663acbb1854c 100644
--- a/drivers/mfd/wm97xx-core.c
+++ b/drivers/mfd/wm97xx-core.c
@@ -319,13 +319,11 @@ err_free_compat:
return ret;
}
-static int wm97xx_ac97_remove(struct ac97_codec_device *adev)
+static void wm97xx_ac97_remove(struct ac97_codec_device *adev)
{
struct wm97xx_priv *wm97xx = ac97_get_drvdata(adev);
snd_ac97_compat_release(wm97xx->ac97);
-
- return 0;
}
static const struct ac97_id wm97xx_ac97_ids[] = {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 9947b7892bd5..433aa4197785 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -518,6 +518,26 @@ config VCPU_STALL_DETECTOR
If you do not intend to run this kernel as a guest, say N.
+config TMR_MANAGER
+ tristate "Select TMR Manager"
+ depends on MICROBLAZE && MB_MANAGER
+ help
+ This option enables the driver developed for TMR Manager.
+ The Triple Modular Redundancy(TMR) manager provides support for
+ fault detection.
+
+ Say N here unless you know what you are doing.
+
+config TMR_INJECT
+ tristate "Select TMR Inject"
+ depends on TMR_MANAGER && FAULT_INJECTION_DEBUG_FS
+ help
+ This option enables the driver developed for TMR Inject.
+ The Triple Modular Redundancy(TMR) Inject provides
+ fault injection.
+
+ Say N here unless you know what you are doing.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -532,7 +552,6 @@ source "drivers/misc/cxl/Kconfig"
source "drivers/misc/ocxl/Kconfig"
source "drivers/misc/bcm-vk/Kconfig"
source "drivers/misc/cardreader/Kconfig"
-source "drivers/misc/habanalabs/Kconfig"
source "drivers/misc/uacce/Kconfig"
source "drivers/misc/pvpanic/Kconfig"
source "drivers/misc/mchp_pci1xxxx/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 87b54a4a4422..56de43943cd5 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -56,7 +56,6 @@ obj-$(CONFIG_OCXL) += ocxl/
obj-$(CONFIG_BCM_VK) += bcm-vk/
obj-y += cardreader/
obj-$(CONFIG_PVPANIC) += pvpanic/
-obj-$(CONFIG_HABANA_AI) += habanalabs/
obj-$(CONFIG_UACCE) += uacce/
obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
obj-$(CONFIG_HISI_HIKEY_USB) += hisi_hikey_usb.o
@@ -64,3 +63,5 @@ obj-$(CONFIG_HI6421V600_IRQ) += hi6421v600-irq.o
obj-$(CONFIG_OPEN_DICE) += open-dice.o
obj-$(CONFIG_GP_PCI1XXXX) += mchp_pci1xxxx/
obj-$(CONFIG_VCPU_STALL_DETECTOR) += vcpu_stall_detector.o
+obj-$(CONFIG_TMR_MANAGER) += xilinx_tmr_manager.o
+obj-$(CONFIG_TMR_INJECT) += xilinx_tmr_inject.o
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index acaa44809c58..76b5ea66dfa1 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -220,7 +220,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master);
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &cxl_mmap_vmops;
return 0;
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index bdffc6543f6f..65d49a6de1a7 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -437,12 +437,6 @@ static int at25_probe(struct spi_device *spi)
struct spi_eeprom *pdata;
bool is_fram;
- err = device_property_match_string(&spi->dev, "compatible", "cypress,fm25");
- if (err >= 0)
- is_fram = true;
- else
- is_fram = false;
-
/*
* Ping the chip ... the status register is pretty portable,
* unlike probing manufacturer IDs. We do expect that system
@@ -462,6 +456,8 @@ static int at25_probe(struct spi_device *spi)
at25->spi = spi;
spi_set_drvdata(spi, at25);
+ is_fram = fwnode_device_is_compatible(dev_fwnode(&spi->dev), "cypress,fm25");
+
/* Chip description */
pdata = dev_get_platdata(&spi->dev);
if (pdata) {
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index 4e07ee9cb500..7075d0b37881 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -1566,12 +1566,20 @@ static struct i2c_driver idt_driver = {
*/
static int __init idt_init(void)
{
+ int ret;
+
/* Create Debugfs directory first */
if (debugfs_initialized())
csr_dbgdir = debugfs_create_dir("idt_csr", NULL);
/* Add new i2c-device driver */
- return i2c_add_driver(&idt_driver);
+ ret = i2c_add_driver(&idt_driver);
+ if (ret) {
+ debugfs_remove_recursive(csr_dbgdir);
+ return ret;
+ }
+
+ return 0;
}
module_init(idt_init);
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 1b010d9267c9..4ba966529458 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -32,7 +32,7 @@ static struct class enclosure_class;
* found. @start can be used as a starting point to obtain multiple
* enclosures per parent (should begin with NULL and then be set to
* each returned enclosure device). Obtains a reference to the
- * enclosure class device which must be released with device_put().
+ * enclosure class device which must be released with put_device().
* If @start is not NULL, a reference must be taken on it which is
* released before returning (this allows a loop through all
* enclosures to exit with only the reference on the enclosure of
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 5310606113fe..7ccaca1b7cb8 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -2315,7 +2315,18 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
data->domain_id = domain_id;
data->rpdev = rpdev;
- return of_platform_populate(rdev->of_node, NULL, NULL, rdev);
+ err = of_platform_populate(rdev->of_node, NULL, NULL, rdev);
+ if (err)
+ goto populate_error;
+
+ return 0;
+
+populate_error:
+ if (data->fdevice)
+ misc_deregister(&data->fdevice->miscdev);
+ if (data->secure_fdevice)
+ misc_deregister(&data->secure_fdevice->miscdev);
+
fdev_error:
kfree(data);
return err;
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 1167463f26fb..f778e11237a6 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -151,6 +151,9 @@ int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
return i;
}
+#define CRC32_POLYNOMIAL 0x20044009
+static u32 crc32_tab[256]; /* crc32 lookup table */
+
/**
* genwqe_init_crc32() - Prepare a lookup table for fast crc32 calculations
*
@@ -159,9 +162,6 @@ int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
*
* Genwqe's Polynomial = 0x20044009
*/
-#define CRC32_POLYNOMIAL 0x20044009
-static u32 crc32_tab[256]; /* crc32 lookup table */
-
void genwqe_init_crc32(void)
{
int i, j;
diff --git a/drivers/misc/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h b/drivers/misc/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h
deleted file mode 100644
index 2cf30c206ac6..000000000000
--- a/drivers/misc/habanalabs/include/gaudi2/arc/gaudi2_arc_common_packets.h
+++ /dev/null
@@ -1,213 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2020 HabanaLabs Ltd.
- * All Rights Reserved.
- */
-
-#ifndef __GAUDI2_ARC_COMMON_PACKETS_H__
-#define __GAUDI2_ARC_COMMON_PACKETS_H__
-
-/*
- * CPU IDs for each ARC CPUs
- */
-
-#define CPU_ID_SCHED_ARC0 0 /* FARM_ARC0 */
-#define CPU_ID_SCHED_ARC1 1 /* FARM_ARC1 */
-#define CPU_ID_SCHED_ARC2 2 /* FARM_ARC2 */
-#define CPU_ID_SCHED_ARC3 3 /* FARM_ARC3 */
-/* Dcore1 MME Engine ARC instance used as scheduler */
-#define CPU_ID_SCHED_ARC4 4 /* DCORE1_MME0 */
-/* Dcore3 MME Engine ARC instance used as scheduler */
-#define CPU_ID_SCHED_ARC5 5 /* DCORE3_MME0 */
-
-#define CPU_ID_TPC_QMAN_ARC0 6 /* DCORE0_TPC0 */
-#define CPU_ID_TPC_QMAN_ARC1 7 /* DCORE0_TPC1 */
-#define CPU_ID_TPC_QMAN_ARC2 8 /* DCORE0_TPC2 */
-#define CPU_ID_TPC_QMAN_ARC3 9 /* DCORE0_TPC3 */
-#define CPU_ID_TPC_QMAN_ARC4 10 /* DCORE0_TPC4 */
-#define CPU_ID_TPC_QMAN_ARC5 11 /* DCORE0_TPC5 */
-#define CPU_ID_TPC_QMAN_ARC6 12 /* DCORE1_TPC0 */
-#define CPU_ID_TPC_QMAN_ARC7 13 /* DCORE1_TPC1 */
-#define CPU_ID_TPC_QMAN_ARC8 14 /* DCORE1_TPC2 */
-#define CPU_ID_TPC_QMAN_ARC9 15 /* DCORE1_TPC3 */
-#define CPU_ID_TPC_QMAN_ARC10 16 /* DCORE1_TPC4 */
-#define CPU_ID_TPC_QMAN_ARC11 17 /* DCORE1_TPC5 */
-#define CPU_ID_TPC_QMAN_ARC12 18 /* DCORE2_TPC0 */
-#define CPU_ID_TPC_QMAN_ARC13 19 /* DCORE2_TPC1 */
-#define CPU_ID_TPC_QMAN_ARC14 20 /* DCORE2_TPC2 */
-#define CPU_ID_TPC_QMAN_ARC15 21 /* DCORE2_TPC3 */
-#define CPU_ID_TPC_QMAN_ARC16 22 /* DCORE2_TPC4 */
-#define CPU_ID_TPC_QMAN_ARC17 23 /* DCORE2_TPC5 */
-#define CPU_ID_TPC_QMAN_ARC18 24 /* DCORE3_TPC0 */
-#define CPU_ID_TPC_QMAN_ARC19 25 /* DCORE3_TPC1 */
-#define CPU_ID_TPC_QMAN_ARC20 26 /* DCORE3_TPC2 */
-#define CPU_ID_TPC_QMAN_ARC21 27 /* DCORE3_TPC3 */
-#define CPU_ID_TPC_QMAN_ARC22 28 /* DCORE3_TPC4 */
-#define CPU_ID_TPC_QMAN_ARC23 29 /* DCORE3_TPC5 */
-#define CPU_ID_TPC_QMAN_ARC24 30 /* DCORE0_TPC6 - Never present */
-
-#define CPU_ID_MME_QMAN_ARC0 31 /* DCORE0_MME0 */
-#define CPU_ID_MME_QMAN_ARC1 32 /* DCORE2_MME0 */
-
-#define CPU_ID_EDMA_QMAN_ARC0 33 /* DCORE0_EDMA0 */
-#define CPU_ID_EDMA_QMAN_ARC1 34 /* DCORE0_EDMA1 */
-#define CPU_ID_EDMA_QMAN_ARC2 35 /* DCORE1_EDMA0 */
-#define CPU_ID_EDMA_QMAN_ARC3 36 /* DCORE1_EDMA1 */
-#define CPU_ID_EDMA_QMAN_ARC4 37 /* DCORE2_EDMA0 */
-#define CPU_ID_EDMA_QMAN_ARC5 38 /* DCORE2_EDMA1 */
-#define CPU_ID_EDMA_QMAN_ARC6 39 /* DCORE3_EDMA0 */
-#define CPU_ID_EDMA_QMAN_ARC7 40 /* DCORE3_EDMA1 */
-
-#define CPU_ID_PDMA_QMAN_ARC0 41 /* DCORE0_PDMA0 */
-#define CPU_ID_PDMA_QMAN_ARC1 42 /* DCORE0_PDMA1 */
-
-#define CPU_ID_ROT_QMAN_ARC0 43 /* ROT0 */
-#define CPU_ID_ROT_QMAN_ARC1 44 /* ROT1 */
-
-#define CPU_ID_NIC_QMAN_ARC0 45 /* NIC0_0 */
-#define CPU_ID_NIC_QMAN_ARC1 46 /* NIC0_1 */
-#define CPU_ID_NIC_QMAN_ARC2 47 /* NIC1_0 */
-#define CPU_ID_NIC_QMAN_ARC3 48 /* NIC1_1 */
-#define CPU_ID_NIC_QMAN_ARC4 49 /* NIC2_0 */
-#define CPU_ID_NIC_QMAN_ARC5 50 /* NIC2_1 */
-#define CPU_ID_NIC_QMAN_ARC6 51 /* NIC3_0 */
-#define CPU_ID_NIC_QMAN_ARC7 52 /* NIC3_1 */
-#define CPU_ID_NIC_QMAN_ARC8 53 /* NIC4_0 */
-#define CPU_ID_NIC_QMAN_ARC9 54 /* NIC4_1 */
-#define CPU_ID_NIC_QMAN_ARC10 55 /* NIC5_0 */
-#define CPU_ID_NIC_QMAN_ARC11 56 /* NIC5_1 */
-#define CPU_ID_NIC_QMAN_ARC12 57 /* NIC6_0 */
-#define CPU_ID_NIC_QMAN_ARC13 58 /* NIC6_1 */
-#define CPU_ID_NIC_QMAN_ARC14 59 /* NIC7_0 */
-#define CPU_ID_NIC_QMAN_ARC15 60 /* NIC7_1 */
-#define CPU_ID_NIC_QMAN_ARC16 61 /* NIC8_0 */
-#define CPU_ID_NIC_QMAN_ARC17 62 /* NIC8_1 */
-#define CPU_ID_NIC_QMAN_ARC18 63 /* NIC9_0 */
-#define CPU_ID_NIC_QMAN_ARC19 64 /* NIC9_1 */
-#define CPU_ID_NIC_QMAN_ARC20 65 /* NIC10_0 */
-#define CPU_ID_NIC_QMAN_ARC21 66 /* NIC10_1 */
-#define CPU_ID_NIC_QMAN_ARC22 67 /* NIC11_0 */
-#define CPU_ID_NIC_QMAN_ARC23 68 /* NIC11_1 */
-
-#define CPU_ID_MAX 69
-#define CPU_ID_SCHED_MAX 6
-
-#define CPU_ID_ALL 0xFE
-#define CPU_ID_INVALID 0xFF
-
-enum arc_regions_t {
- ARC_REGION0_UNSED = 0,
- /*
- * Extension registers
- * None
- */
- ARC_REGION1_SRAM = 1,
- /*
- * Extension registers
- * AUX_SRAM_LSB_ADDR
- * AUX_SRAM_MSB_ADDR
- * ARC Address: 0x1000_0000
- */
- ARC_REGION2_CFG = 2,
- /*
- * Extension registers
- * AUX_CFG_LSB_ADDR
- * AUX_CFG_MSB_ADDR
- * ARC Address: 0x2000_0000
- */
- ARC_REGION3_GENERAL = 3,
- /*
- * Extension registers
- * AUX_GENERAL_PURPOSE_LSB_ADDR_0
- * AUX_GENERAL_PURPOSE_MSB_ADDR_0
- * ARC Address: 0x3000_0000
- */
- ARC_REGION4_HBM0_FW = 4,
- /*
- * Extension registers
- * AUX_HBM0_LSB_ADDR
- * AUX_HBM0_MSB_ADDR
- * AUX_HBM0_OFFSET
- * ARC Address: 0x4000_0000
- */
- ARC_REGION5_HBM1_GC_DATA = 5,
- /*
- * Extension registers
- * AUX_HBM1_LSB_ADDR
- * AUX_HBM1_MSB_ADDR
- * AUX_HBM1_OFFSET
- * ARC Address: 0x5000_0000
- */
- ARC_REGION6_HBM2_GC_DATA = 6,
- /*
- * Extension registers
- * AUX_HBM2_LSB_ADDR
- * AUX_HBM2_MSB_ADDR
- * AUX_HBM2_OFFSET
- * ARC Address: 0x6000_0000
- */
- ARC_REGION7_HBM3_GC_DATA = 7,
- /*
- * Extension registers
- * AUX_HBM3_LSB_ADDR
- * AUX_HBM3_MSB_ADDR
- * AUX_HBM3_OFFSET
- * ARC Address: 0x7000_0000
- */
- ARC_REGION8_DCCM = 8,
- /*
- * Extension registers
- * None
- * ARC Address: 0x8000_0000
- */
- ARC_REGION9_PCIE = 9,
- /*
- * Extension registers
- * AUX_PCIE_LSB_ADDR
- * AUX_PCIE_MSB_ADDR
- * ARC Address: 0x9000_0000
- */
- ARC_REGION10_GENERAL = 10,
- /*
- * Extension registers
- * AUX_GENERAL_PURPOSE_LSB_ADDR_1
- * AUX_GENERAL_PURPOSE_MSB_ADDR_1
- * ARC Address: 0xA000_0000
- */
- ARC_REGION11_GENERAL = 11,
- /*
- * Extension registers
- * AUX_GENERAL_PURPOSE_LSB_ADDR_2
- * AUX_GENERAL_PURPOSE_MSB_ADDR_2
- * ARC Address: 0xB000_0000
- */
- ARC_REGION12_GENERAL = 12,
- /*
- * Extension registers
- * AUX_GENERAL_PURPOSE_LSB_ADDR_3
- * AUX_GENERAL_PURPOSE_MSB_ADDR_3
- * ARC Address: 0xC000_0000
- */
- ARC_REGION13_GENERAL = 13,
- /*
- * Extension registers
- * AUX_GENERAL_PURPOSE_LSB_ADDR_4
- * AUX_GENERAL_PURPOSE_MSB_ADDR_4
- * ARC Address: 0xD000_0000
- */
- ARC_REGION14_GENERAL = 14,
- /*
- * Extension registers
- * AUX_GENERAL_PURPOSE_LSB_ADDR_5
- * AUX_GENERAL_PURPOSE_MSB_ADDR_5
- * ARC Address: 0xE000_0000
- */
- ARC_REGION15_LBU = 15
- /*
- * Extension registers
- * None
- * ARC Address: 0xF000_0000
- */
-};
-
-#endif /* __GAUDI2_ARC_COMMON_PACKETS_H__ */
diff --git a/drivers/misc/isl29003.c b/drivers/misc/isl29003.c
index aeda2fa89e61..147b58f7968d 100644
--- a/drivers/misc/isl29003.c
+++ b/drivers/misc/isl29003.c
@@ -186,7 +186,7 @@ static ssize_t isl29003_show_range(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
- return sprintf(buf, "%i\n", isl29003_get_range(client));
+ return sysfs_emit(buf, "%i\n", isl29003_get_range(client));
}
static ssize_t isl29003_store_range(struct device *dev,
@@ -222,7 +222,7 @@ static ssize_t isl29003_show_resolution(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
- return sprintf(buf, "%d\n", isl29003_get_resolution(client));
+ return sysfs_emit(buf, "%d\n", isl29003_get_resolution(client));
}
static ssize_t isl29003_store_resolution(struct device *dev,
@@ -256,7 +256,7 @@ static ssize_t isl29003_show_mode(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
- return sprintf(buf, "%d\n", isl29003_get_mode(client));
+ return sysfs_emit(buf, "%d\n", isl29003_get_mode(client));
}
static ssize_t isl29003_store_mode(struct device *dev,
@@ -291,7 +291,7 @@ static ssize_t isl29003_show_power_state(struct device *dev,
{
struct i2c_client *client = to_i2c_client(dev);
- return sprintf(buf, "%d\n", isl29003_get_power_state(client));
+ return sysfs_emit(buf, "%d\n", isl29003_get_power_state(client));
}
static ssize_t isl29003_store_power_state(struct device *dev,
@@ -327,7 +327,7 @@ static ssize_t isl29003_show_lux(struct device *dev,
if (!isl29003_get_power_state(client))
return -EBUSY;
- return sprintf(buf, "%d\n", isl29003_get_adc_value(client));
+ return sysfs_emit(buf, "%d\n", isl29003_get_adc_value(client));
}
static DEVICE_ATTR(lux, S_IRUGO, isl29003_show_lux, NULL);
diff --git a/drivers/misc/lkdtm/heap.c b/drivers/misc/lkdtm/heap.c
index 62516078a619..0ce4cbf6abda 100644
--- a/drivers/misc/lkdtm/heap.c
+++ b/drivers/misc/lkdtm/heap.c
@@ -31,6 +31,7 @@ static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
char *one, *two;
one = vzalloc(PAGE_SIZE);
+ OPTIMIZER_HIDE_VAR(one);
two = vzalloc(PAGE_SIZE);
pr_info("Attempting vmalloc linear overflow ...\n");
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 6df7679d9739..211536109308 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2022, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2023, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -151,7 +151,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
ret = __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0,
MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
- dev_err(&cldev->dev, "Could not send ReqFWVersion cmd\n");
+ dev_info(&cldev->dev, "Could not send ReqFWVersion cmd ret = %d\n", ret);
return ret;
}
@@ -163,7 +163,7 @@ static int mei_fwver(struct mei_cl_device *cldev)
* Should be at least one version block,
* error out if nothing found
*/
- dev_err(&cldev->dev, "Could not read FW version\n");
+ dev_info(&cldev->dev, "Could not read FW version ret = %d\n", bytes_recv);
return -EIO;
}
@@ -220,15 +220,15 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
if (cldev->bus->fw_f_fw_ver_supported) {
ret = mei_fwver(cldev);
if (ret < 0)
- dev_err(&cldev->dev, "FW version command failed %d\n",
- ret);
+ dev_info(&cldev->dev, "FW version command failed %d\n",
+ ret);
}
if (cldev->bus->hbm_f_os_supported) {
ret = mei_osver(cldev);
if (ret < 0)
- dev_err(&cldev->dev, "OS version command failed %d\n",
- ret);
+ dev_info(&cldev->dev, "OS version command failed %d\n",
+ ret);
}
mei_cldev_disable(cldev);
}
@@ -247,7 +247,7 @@ static void mei_gsc_mkhi_ver(struct mei_cl_device *cldev)
ret = mei_fwver(cldev);
if (ret < 0)
- dev_err(&cldev->dev, "FW version command failed %d\n", ret);
+ dev_info(&cldev->dev, "FW version command failed %d\n", ret);
mei_cldev_disable(cldev);
}
@@ -278,8 +278,8 @@ static void mei_gsc_mkhi_fix_ver(struct mei_cl_device *cldev)
ret = mei_fwver(cldev);
if (ret < 0)
- dev_err(&cldev->dev, "FW version command failed %d\n",
- ret);
+ dev_info(&cldev->dev, "FW version command failed %d\n",
+ ret);
out:
mei_cldev_disable(cldev);
}
@@ -380,7 +380,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), 0,
MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
- dev_err(bus->dev, "Could not send IF version cmd\n");
+ dev_err(bus->dev, "Could not send IF version cmd ret = %d\n", ret);
return ret;
}
@@ -395,7 +395,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, &vtag,
0, 0);
if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) {
- dev_err(bus->dev, "Could not read IF version\n");
+ dev_err(bus->dev, "Could not read IF version ret = %d\n", bytes_recv);
ret = -EIO;
goto err;
}
@@ -403,7 +403,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
memcpy(ver, reply->data, sizeof(*ver));
dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
- ver->fw_ivn, ver->vendor_id, ver->radio_type);
+ ver->fw_ivn, ver->vendor_id, ver->radio_type);
err:
kfree(reply);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index a81b890c7ee6..5d7a68674d9b 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2023, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -1227,9 +1227,9 @@ ATTRIBUTE_GROUPS(mei_cldev);
*
* Return: 0 on success -ENOMEM on when add_uevent_var fails
*/
-static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int mei_cl_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ const struct mei_cl_device *cldev = to_mei_cl_device(dev);
const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
u8 version = mei_me_cl_ver(cldev->me_cl);
@@ -1392,6 +1392,7 @@ static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
*/
static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
{
+ cldev->do_match = 0;
if (cldev->is_added)
device_release_driver(&cldev->dev);
}
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index e889a8bd7ac8..e0dcd5c114db 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -859,8 +859,8 @@ static void mei_hdcp_remove(struct mei_cl_device *cldev)
dev_warn(&cldev->dev, "mei_cldev_disable() failed\n");
}
-#define MEI_UUID_HDCP GUID_INIT(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
- 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
+#define MEI_UUID_HDCP UUID_LE(0xB638AB7E, 0x94E2, 0x4EA2, 0xA5, \
+ 0x52, 0xD1, 0xC5, 0x4B, 0x62, 0x7F, 0x04)
static const struct mei_cl_device_id mei_hdcp_tbl[] = {
{ .uuid = MEI_UUID_HDCP, .version = MEI_CL_VERSION_ANY },
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 996b70a988be..895011b7a0bf 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -13,6 +13,11 @@
#include <linux/mei.h>
#include <linux/mei_cl_bus.h>
+static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
+{
+ return memcmp(&u1, &u2, sizeof(uuid_le));
+}
+
#include "hw.h"
#include "hbm.h"
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
index 8dd09b1722eb..7ee1fa7b1cb3 100644
--- a/drivers/misc/mei/pxp/mei_pxp.c
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -238,8 +238,8 @@ static void mei_pxp_remove(struct mei_cl_device *cldev)
}
/* fbf6fcf1-96cf-4e2e-a6a6-1bab8cbe36b1 : PAVP GUID*/
-#define MEI_GUID_PXP GUID_INIT(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
- 0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
+#define MEI_GUID_PXP UUID_LE(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
+ 0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
static struct mei_cl_device_id mei_pxp_tbl[] = {
{ .uuid = MEI_GUID_PXP, .version = MEI_CL_VERSION_ANY },
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index 9eb0d93b01c6..7f83116ae11a 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -180,7 +180,7 @@ static int check_mmap_afu_irq(struct ocxl_context *ctx,
if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) ||
!(vma->vm_flags & VM_WRITE))
return -EINVAL;
- vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC);
+ vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
return 0;
}
@@ -204,7 +204,7 @@ int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma)
if (rc)
return rc;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &ocxl_vmops;
return 0;
diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c
index 25c78df8055d..405180d47d9b 100644
--- a/drivers/misc/ocxl/sysfs.c
+++ b/drivers/misc/ocxl/sysfs.c
@@ -134,7 +134,7 @@ static int global_mmio_mmap(struct file *filp, struct kobject *kobj,
(afu->config.global_mmio_size >> PAGE_SHIFT))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &global_mmio_vmops;
vma->vm_private_data = afu;
diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c
index c61be3404c6f..8aea2d070a40 100644
--- a/drivers/misc/open-dice.c
+++ b/drivers/misc/open-dice.c
@@ -90,19 +90,17 @@ static int open_dice_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct open_dice_drvdata *drvdata = to_open_dice_drvdata(filp);
- /* Do not allow userspace to modify the underlying data. */
- if ((vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_SHARED))
- return -EPERM;
-
- /* Ensure userspace cannot acquire VM_WRITE + VM_SHARED later. */
- if (vma->vm_flags & VM_WRITE)
- vma->vm_flags &= ~VM_MAYSHARE;
- else if (vma->vm_flags & VM_SHARED)
- vma->vm_flags &= ~VM_MAYWRITE;
+ if (vma->vm_flags & VM_MAYSHARE) {
+ /* Do not allow userspace to modify the underlying data. */
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+ /* Ensure userspace cannot acquire VM_WRITE later. */
+ vm_flags_clear(vma, VM_MAYWRITE);
+ }
/* Create write-combine mapping so all clients observe a wipe. */
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- vma->vm_flags |= VM_DONTCOPY | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTDUMP);
return vm_iomap_memory(vma, drvdata->rmem->base, drvdata->rmem->size);
}
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 11530b4ec389..a7244de081ec 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Host side test driver to test endpoint functionality
*
* Copyright (C) 2017 Texas Instruments
@@ -72,6 +72,7 @@
#define PCI_DEVICE_ID_TI_J7200 0xb00f
#define PCI_DEVICE_ID_TI_AM64 0xb010
#define PCI_DEVICE_ID_LS1088A 0x80c0
+#define PCI_DEVICE_ID_IMX8 0x0808
#define is_am654_pci_dev(pdev) \
((pdev)->device == PCI_DEVICE_ID_TI_AM654)
@@ -980,6 +981,7 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
.driver_data = (kernel_ulong_t)&default_data,
},
+ { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
.driver_data = (kernel_ulong_t)&default_data,
},
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 7ffcfc0bb587..a3d659c11cc4 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -101,8 +101,8 @@ static int gru_file_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_end & (GRU_GSEG_PAGESIZE - 1))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_LOCKED |
- VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_LOCKED |
+ VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_page_prot = PAGE_SHARED;
vma->vm_ops = &gru_vm_ops;
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index fa1f5a632e7f..37e804bbb1f2 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -425,7 +425,7 @@ int gru_get_cb_exception_detail(void *cb,
static char *gru_get_cb_exception_detail_str(int ret, void *cb,
char *buf, int size)
{
- struct gru_control_block_status *gen = (void *)cb;
+ struct gru_control_block_status *gen = cb;
struct control_block_extended_exc_detail excdet;
if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
@@ -452,7 +452,7 @@ static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
static int gru_retry_exception(void *cb)
{
- struct gru_control_block_status *gen = (void *)cb;
+ struct gru_control_block_status *gen = cb;
struct control_block_extended_exc_detail excdet;
int retry = EXCEPTION_RETRY_LIMIT;
@@ -475,7 +475,7 @@ static int gru_retry_exception(void *cb)
int gru_check_status_proc(void *cb)
{
- struct gru_control_block_status *gen = (void *)cb;
+ struct gru_control_block_status *gen = cb;
int ret;
ret = gen->istatus;
@@ -488,7 +488,7 @@ int gru_check_status_proc(void *cb)
int gru_wait_proc(void *cb)
{
- struct gru_control_block_status *gen = (void *)cb;
+ struct gru_control_block_status *gen = cb;
int ret;
ret = gru_wait_idle_or_exception(gen);
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 7f6976a9f508..01d2257deea4 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -338,7 +338,7 @@ void st_int_recv(void *disc_data,
ptr++;
count--;
continue;
- /* Unknow packet? */
+ /* Unknown packet? */
default:
type = *ptr;
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index a3098fea3bf7..eee9b6581604 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -55,9 +55,9 @@ static int tifm_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int tifm_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int tifm_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
+ const struct tifm_dev *sock = container_of_const(dev, struct tifm_dev, dev);
if (add_uevent_var(env, "TIFM_CARD_TYPE=%s", tifm_media_type_name(sock->type, 1)))
return -ENOMEM;
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 905eff1f840e..07023397afc7 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -229,7 +229,7 @@ static int uacce_fops_mmap(struct file *filep, struct vm_area_struct *vma)
if (!qfr)
return -ENOMEM;
- vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK;
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_WIPEONFORK);
vma->vm_ops = &uacce_vm_ops;
vma->vm_private_data = q;
qfr->type = type;
@@ -363,12 +363,52 @@ static ssize_t region_dus_size_show(struct device *dev,
uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
}
+static ssize_t isolate_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+
+ return sysfs_emit(buf, "%d\n", uacce->ops->get_isolate_state(uacce));
+}
+
+static ssize_t isolate_strategy_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+ u32 val;
+
+ val = uacce->ops->isolate_err_threshold_read(uacce);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t isolate_strategy_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uacce_device *uacce = to_uacce_device(dev);
+ unsigned long val;
+ int ret;
+
+ if (kstrtoul(buf, 0, &val) < 0)
+ return -EINVAL;
+
+ if (val > UACCE_MAX_ERR_THRESHOLD)
+ return -EINVAL;
+
+ ret = uacce->ops->isolate_err_threshold_write(uacce, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
static DEVICE_ATTR_RO(api);
static DEVICE_ATTR_RO(flags);
static DEVICE_ATTR_RO(available_instances);
static DEVICE_ATTR_RO(algorithms);
static DEVICE_ATTR_RO(region_mmio_size);
static DEVICE_ATTR_RO(region_dus_size);
+static DEVICE_ATTR_RO(isolate);
+static DEVICE_ATTR_RW(isolate_strategy);
static struct attribute *uacce_dev_attrs[] = {
&dev_attr_api.attr,
@@ -377,6 +417,8 @@ static struct attribute *uacce_dev_attrs[] = {
&dev_attr_algorithms.attr,
&dev_attr_region_mmio_size.attr,
&dev_attr_region_dus_size.attr,
+ &dev_attr_isolate.attr,
+ &dev_attr_isolate_strategy.attr,
NULL,
};
@@ -392,6 +434,14 @@ static umode_t uacce_dev_is_visible(struct kobject *kobj,
(!uacce->qf_pg_num[UACCE_QFRT_DUS])))
return 0;
+ if (attr == &dev_attr_isolate_strategy.attr &&
+ (!uacce->ops->isolate_err_threshold_read &&
+ !uacce->ops->isolate_err_threshold_write))
+ return 0;
+
+ if (attr == &dev_attr_isolate.attr && !uacce->ops->get_isolate_state)
+ return 0;
+
return attr->mode;
}
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 61a2be712bf7..9ce9b9e0e9b6 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -1709,7 +1709,7 @@ static void __init vmballoon_debugfs_init(struct vmballoon *b)
static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
{
static_key_disable(&balloon_stat_enabled.key);
- debugfs_remove(debugfs_lookup("vmmemctl", NULL));
+ debugfs_lookup_and_remove("vmmemctl", NULL);
kfree(b->stats);
b->stats = NULL;
}
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index da1e2a773823..857b9851402a 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -242,6 +242,8 @@ static int vmci_host_setup_notify(struct vmci_ctx *context,
context->notify_page = NULL;
return VMCI_ERROR_GENERIC;
}
+ if (context->notify_page == NULL)
+ return VMCI_ERROR_UNAVAILABLE;
/*
* Map the locked page and set up notify pointer.
diff --git a/drivers/misc/xilinx_tmr_inject.c b/drivers/misc/xilinx_tmr_inject.c
new file mode 100644
index 000000000000..d96f6d7cd109
--- /dev/null
+++ b/drivers/misc/xilinx_tmr_inject.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Xilinx TMR Inject IP.
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Description:
+ * This driver is developed for TMR Inject IP,The Triple Modular Redundancy(TMR)
+ * Inject provides fault injection.
+ */
+
+#include <asm/xilinx_mb_manager.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/fault-inject.h>
+
+/* TMR Inject Register offsets */
+#define XTMR_INJECT_CR_OFFSET 0x0
+#define XTMR_INJECT_AIR_OFFSET 0x4
+#define XTMR_INJECT_IIR_OFFSET 0xC
+#define XTMR_INJECT_EAIR_OFFSET 0x10
+#define XTMR_INJECT_ERR_OFFSET 0x204
+
+/* Register Bitmasks/shifts */
+#define XTMR_INJECT_CR_CPUID_SHIFT 8
+#define XTMR_INJECT_CR_IE_SHIFT 10
+#define XTMR_INJECT_IIR_ADDR_MASK GENMASK(31, 16)
+
+#define XTMR_INJECT_MAGIC_MAX_VAL 255
+
+/**
+ * struct xtmr_inject_dev - Driver data for TMR Inject
+ * @regs: device physical base address
+ * @magic: Magic hardware configuration value
+ */
+struct xtmr_inject_dev {
+ void __iomem *regs;
+ u32 magic;
+};
+
+static DECLARE_FAULT_ATTR(inject_fault);
+static char *inject_request;
+module_param(inject_request, charp, 0);
+MODULE_PARM_DESC(inject_request, "default fault injection attributes");
+static struct dentry *dbgfs_root;
+
+/* IO accessors */
+static inline void xtmr_inject_write(struct xtmr_inject_dev *xtmr_inject,
+ u32 addr, u32 value)
+{
+ iowrite32(value, xtmr_inject->regs + addr);
+}
+
+static inline u32 xtmr_inject_read(struct xtmr_inject_dev *xtmr_inject,
+ u32 addr)
+{
+ return ioread32(xtmr_inject->regs + addr);
+}
+
+static int xtmr_inject_set(void *data, u64 val)
+{
+ if (val != 1)
+ return -EINVAL;
+
+ xmb_inject_err();
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(xtmr_inject_fops, NULL, xtmr_inject_set, "%llu\n");
+
+static void xtmr_init_debugfs(struct xtmr_inject_dev *xtmr_inject)
+{
+ struct dentry *dir;
+
+ dbgfs_root = debugfs_create_dir("xtmr_inject", NULL);
+ dir = fault_create_debugfs_attr("inject_fault", dbgfs_root,
+ &inject_fault);
+ debugfs_create_file("inject_fault", 0200, dir, NULL,
+ &xtmr_inject_fops);
+}
+
+static void xtmr_inject_init(struct xtmr_inject_dev *xtmr_inject)
+{
+ u32 cr_val;
+
+ if (inject_request)
+ setup_fault_attr(&inject_fault, inject_request);
+ /* Allow fault injection */
+ cr_val = xtmr_inject->magic |
+ (1 << XTMR_INJECT_CR_IE_SHIFT) |
+ (1 << XTMR_INJECT_CR_CPUID_SHIFT);
+ xtmr_inject_write(xtmr_inject, XTMR_INJECT_CR_OFFSET,
+ cr_val);
+ /* Initialize the address inject and instruction inject registers */
+ xtmr_inject_write(xtmr_inject, XTMR_INJECT_AIR_OFFSET,
+ XMB_INJECT_ERR_OFFSET);
+ xtmr_inject_write(xtmr_inject, XTMR_INJECT_IIR_OFFSET,
+ XMB_INJECT_ERR_OFFSET & XTMR_INJECT_IIR_ADDR_MASK);
+}
+
+/**
+ * xtmr_inject_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This is the driver probe routine. It does all the memory
+ * allocation for the device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xtmr_inject_probe(struct platform_device *pdev)
+{
+ struct xtmr_inject_dev *xtmr_inject;
+ int err;
+
+ xtmr_inject = devm_kzalloc(&pdev->dev, sizeof(*xtmr_inject),
+ GFP_KERNEL);
+ if (!xtmr_inject)
+ return -ENOMEM;
+
+ xtmr_inject->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(xtmr_inject->regs))
+ return PTR_ERR(xtmr_inject->regs);
+
+ err = of_property_read_u32(pdev->dev.of_node, "xlnx,magic",
+ &xtmr_inject->magic);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to read xlnx,magic property");
+ return err;
+ }
+
+ if (xtmr_inject->magic > XTMR_INJECT_MAGIC_MAX_VAL) {
+ dev_err(&pdev->dev, "invalid xlnx,magic property value");
+ return -EINVAL;
+ }
+
+ /* Initialize TMR Inject */
+ xtmr_inject_init(xtmr_inject);
+
+ xtmr_init_debugfs(xtmr_inject);
+
+ platform_set_drvdata(pdev, xtmr_inject);
+
+ return 0;
+}
+
+static int xtmr_inject_remove(struct platform_device *pdev)
+{
+ debugfs_remove_recursive(dbgfs_root);
+ dbgfs_root = NULL;
+ return 0;
+}
+
+static const struct of_device_id xtmr_inject_of_match[] = {
+ {
+ .compatible = "xlnx,tmr-inject-1.0",
+ },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xtmr_inject_of_match);
+
+static struct platform_driver xtmr_inject_driver = {
+ .driver = {
+ .name = "xilinx-tmr_inject",
+ .of_match_table = xtmr_inject_of_match,
+ },
+ .probe = xtmr_inject_probe,
+ .remove = xtmr_inject_remove,
+};
+module_platform_driver(xtmr_inject_driver);
+MODULE_AUTHOR("Advanced Micro Devices, Inc");
+MODULE_DESCRIPTION("Xilinx TMR Inject Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/xilinx_tmr_manager.c b/drivers/misc/xilinx_tmr_manager.c
new file mode 100644
index 000000000000..0ef55e06d3a0
--- /dev/null
+++ b/drivers/misc/xilinx_tmr_manager.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for Xilinx TMR Manager IP.
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ *
+ * Description:
+ * This driver is developed for TMR Manager,The Triple Modular Redundancy(TMR)
+ * Manager is responsible for handling the TMR subsystem state, including
+ * fault detection and error recovery. The core is triplicated in each of
+ * the sub-blocks in the TMR subsystem, and provides majority voting of
+ * its internal state provides soft error detection, correction and
+ * recovery.
+ */
+
+#include <asm/xilinx_mb_manager.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+/* TMR Manager Register offsets */
+#define XTMR_MANAGER_CR_OFFSET 0x0
+#define XTMR_MANAGER_FFR_OFFSET 0x4
+#define XTMR_MANAGER_CMR0_OFFSET 0x8
+#define XTMR_MANAGER_CMR1_OFFSET 0xC
+#define XTMR_MANAGER_BDIR_OFFSET 0x10
+#define XTMR_MANAGER_SEMIMR_OFFSET 0x1C
+
+/* Register Bitmasks/shifts */
+#define XTMR_MANAGER_CR_MAGIC1_MASK GENMASK(7, 0)
+#define XTMR_MANAGER_CR_MAGIC2_MASK GENMASK(15, 8)
+#define XTMR_MANAGER_CR_RIR_MASK BIT(16)
+#define XTMR_MANAGER_FFR_LM12_MASK BIT(0)
+#define XTMR_MANAGER_FFR_LM13_MASK BIT(1)
+#define XTMR_MANAGER_FFR_LM23_MASK BIT(2)
+
+#define XTMR_MANAGER_CR_MAGIC2_SHIFT 4
+#define XTMR_MANAGER_CR_RIR_SHIFT 16
+#define XTMR_MANAGER_CR_BB_SHIFT 18
+
+#define XTMR_MANAGER_MAGIC1_MAX_VAL 255
+
+/**
+ * struct xtmr_manager_dev - Driver data for TMR Manager
+ * @regs: device physical base address
+ * @cr_val: control register value
+ * @magic1: Magic 1 hardware configuration value
+ * @err_cnt: error statistics count
+ * @phys_baseaddr: Physical base address
+ */
+struct xtmr_manager_dev {
+ void __iomem *regs;
+ u32 cr_val;
+ u32 magic1;
+ u32 err_cnt;
+ resource_size_t phys_baseaddr;
+};
+
+/* IO accessors */
+static inline void xtmr_manager_write(struct xtmr_manager_dev *xtmr_manager,
+ u32 addr, u32 value)
+{
+ iowrite32(value, xtmr_manager->regs + addr);
+}
+
+static inline u32 xtmr_manager_read(struct xtmr_manager_dev *xtmr_manager,
+ u32 addr)
+{
+ return ioread32(xtmr_manager->regs + addr);
+}
+
+static void xmb_manager_reset_handler(struct xtmr_manager_dev *xtmr_manager)
+{
+ /* Clear the FFR Register contents as a part of recovery process. */
+ xtmr_manager_write(xtmr_manager, XTMR_MANAGER_FFR_OFFSET, 0);
+}
+
+static void xmb_manager_update_errcnt(struct xtmr_manager_dev *xtmr_manager)
+{
+ xtmr_manager->err_cnt++;
+}
+
+static ssize_t errcnt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct xtmr_manager_dev *xtmr_manager = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%x\n", xtmr_manager->err_cnt);
+}
+static DEVICE_ATTR_RO(errcnt);
+
+static ssize_t dis_block_break_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct xtmr_manager_dev *xtmr_manager = dev_get_drvdata(dev);
+ int ret;
+ long value;
+
+ ret = kstrtoul(buf, 16, &value);
+ if (ret)
+ return ret;
+
+ /* unblock the break signal*/
+ xtmr_manager->cr_val &= ~(1 << XTMR_MANAGER_CR_BB_SHIFT);
+ xtmr_manager_write(xtmr_manager, XTMR_MANAGER_CR_OFFSET,
+ xtmr_manager->cr_val);
+ return size;
+}
+static DEVICE_ATTR_WO(dis_block_break);
+
+static struct attribute *xtmr_manager_dev_attrs[] = {
+ &dev_attr_dis_block_break.attr,
+ &dev_attr_errcnt.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(xtmr_manager_dev);
+
+static void xtmr_manager_init(struct xtmr_manager_dev *xtmr_manager)
+{
+ /* Clear the SEM interrupt mask register to disable the interrupt */
+ xtmr_manager_write(xtmr_manager, XTMR_MANAGER_SEMIMR_OFFSET, 0);
+
+ /* Allow recovery reset by default */
+ xtmr_manager->cr_val = (1 << XTMR_MANAGER_CR_RIR_SHIFT) |
+ xtmr_manager->magic1;
+ xtmr_manager_write(xtmr_manager, XTMR_MANAGER_CR_OFFSET,
+ xtmr_manager->cr_val);
+ /*
+ * Configure Break Delay Initialization Register to zero so that
+ * break occurs immediately
+ */
+ xtmr_manager_write(xtmr_manager, XTMR_MANAGER_BDIR_OFFSET, 0);
+
+ /*
+ * To come out of break handler need to block the break signal
+ * in the tmr manager, update the xtmr_manager cr_val for the same
+ */
+ xtmr_manager->cr_val |= (1 << XTMR_MANAGER_CR_BB_SHIFT);
+
+ /*
+ * When the break vector gets asserted because of error injection,
+ * the break signal must be blocked before exiting from the
+ * break handler, Below api updates the TMR manager address and
+ * control register and error counter callback arguments,
+ * which will be used by the break handler to block the
+ * break and call the callback function.
+ */
+ xmb_manager_register(xtmr_manager->phys_baseaddr, xtmr_manager->cr_val,
+ (void *)xmb_manager_update_errcnt,
+ xtmr_manager, (void *)xmb_manager_reset_handler);
+}
+
+/**
+ * xtmr_manager_probe - Driver probe function
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This is the driver probe routine. It does all the memory
+ * allocation for the device.
+ *
+ * Return: 0 on success and failure value on error
+ */
+static int xtmr_manager_probe(struct platform_device *pdev)
+{
+ struct xtmr_manager_dev *xtmr_manager;
+ struct resource *res;
+ int err;
+
+ xtmr_manager = devm_kzalloc(&pdev->dev, sizeof(*xtmr_manager),
+ GFP_KERNEL);
+ if (!xtmr_manager)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xtmr_manager->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xtmr_manager->regs))
+ return PTR_ERR(xtmr_manager->regs);
+
+ xtmr_manager->phys_baseaddr = res->start;
+
+ err = of_property_read_u32(pdev->dev.of_node, "xlnx,magic1",
+ &xtmr_manager->magic1);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to read xlnx,magic1 property");
+ return err;
+ }
+
+ if (xtmr_manager->magic1 > XTMR_MANAGER_MAGIC1_MAX_VAL) {
+ dev_err(&pdev->dev, "invalid xlnx,magic1 property value");
+ return -EINVAL;
+ }
+
+ /* Initialize TMR Manager */
+ xtmr_manager_init(xtmr_manager);
+
+ platform_set_drvdata(pdev, xtmr_manager);
+
+ return 0;
+}
+
+static const struct of_device_id xtmr_manager_of_match[] = {
+ {
+ .compatible = "xlnx,tmr-manager-1.0",
+ },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, xtmr_manager_of_match);
+
+static struct platform_driver xtmr_manager_driver = {
+ .driver = {
+ .name = "xilinx-tmr_manager",
+ .of_match_table = xtmr_manager_of_match,
+ .dev_groups = xtmr_manager_dev_groups,
+ },
+ .probe = xtmr_manager_probe,
+};
+module_platform_driver(xtmr_manager_driver);
+
+MODULE_AUTHOR("Advanced Micro Devices, Inc");
+MODULE_DESCRIPTION("Xilinx TMR Manager Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 36679f4e9acc..2c3074a605fc 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -55,9 +55,9 @@ static struct attribute *mmc_dev_attrs[] = {
ATTRIBUTE_GROUPS(mmc_dev);
static int
-mmc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+mmc_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct mmc_card *card = mmc_dev_to_card(dev);
+ const struct mmc_card *card = mmc_dev_to_card(dev);
const char *type;
unsigned int i;
int retval = 0;
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index babf21a0adeb..47a48e902a24 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -120,9 +120,9 @@ static int sdio_bus_match(struct device *dev, struct device_driver *drv)
}
static int
-sdio_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+sdio_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct sdio_func *func = dev_to_sdio_func(dev);
+ const struct sdio_func *func = dev_to_sdio_func(dev);
unsigned int i;
if (add_uevent_var(env,
@@ -294,6 +294,12 @@ static void sdio_release_func(struct device *dev)
if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO))
sdio_free_func_cis(func);
+ /*
+ * We have now removed the link to the tuples in the
+ * card structure, so remove the reference.
+ */
+ put_device(&func->card->dev);
+
kfree(func->info);
kfree(func->tmpbuf);
kfree(func);
@@ -324,6 +330,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)
device_initialize(&func->dev);
+ /*
+ * We may link to tuples in the card structure,
+ * we need make sure we have a reference to it.
+ */
+ get_device(&func->card->dev);
+
func->dev.parent = &card->dev;
func->dev.bus = &sdio_bus_type;
func->dev.release = sdio_release_func;
@@ -377,10 +389,9 @@ int sdio_add_func(struct sdio_func *func)
*/
void sdio_remove_func(struct sdio_func *func)
{
- if (!sdio_func_present(func))
- return;
+ if (sdio_func_present(func))
+ device_del(&func->dev);
- device_del(&func->dev);
of_node_put(func->dev.of_node);
put_device(&func->dev);
}
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index a705ba6eff5b..afaa6cab1adc 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -404,12 +404,6 @@ int sdio_read_func_cis(struct sdio_func *func)
return ret;
/*
- * Since we've linked to tuples in the card structure,
- * we must make sure we have a reference to it.
- */
- get_device(&func->card->dev);
-
- /*
* Vendor/device id is optional for function CIS, so
* copy it from the card structure as needed.
*/
@@ -434,11 +428,5 @@ void sdio_free_func_cis(struct sdio_func *func)
}
func->tuples = NULL;
-
- /*
- * We have now removed the link to the tuples in the
- * card structure, so remove the reference.
- */
- put_device(&func->card->dev);
}
diff --git a/drivers/mmc/core/sdio_uart.c b/drivers/mmc/core/sdio_uart.c
index ae7ef2e038be..50536fe59f1a 100644
--- a/drivers/mmc/core/sdio_uart.c
+++ b/drivers/mmc/core/sdio_uart.c
@@ -526,7 +526,7 @@ static void sdio_uart_irq(struct sdio_func *func)
port->in_sdio_uart_irq = NULL;
}
-static int uart_carrier_raised(struct tty_port *tport)
+static bool uart_carrier_raised(struct tty_port *tport)
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
@@ -535,28 +535,27 @@ static int uart_carrier_raised(struct tty_port *tport)
return 1;
ret = sdio_uart_get_mctrl(port);
sdio_uart_release_func(port);
- if (ret & TIOCM_CAR)
- return 1;
- return 0;
+
+ return ret & TIOCM_CAR;
}
/**
* uart_dtr_rts - port helper to set uart signals
* @tport: tty port to be updated
- * @onoff: set to turn on DTR/RTS
+ * @active: set to turn on DTR/RTS
*
* Called by the tty port helpers when the modem signals need to be
* adjusted during an open, close and hangup.
*/
-static void uart_dtr_rts(struct tty_port *tport, int onoff)
+static void uart_dtr_rts(struct tty_port *tport, bool active)
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
int ret = sdio_uart_claim_func(port);
if (ret)
return;
- if (onoff == 0)
+ if (!active)
sdio_uart_clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
else
sdio_uart_set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 5e19a961c34d..e13b0b0b8ebb 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -262,17 +262,6 @@ config MMC_SDHCI_CADENCE
If unsure, say N.
-config MMC_SDHCI_CNS3XXX
- tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
- depends on ARCH_CNS3XXX || COMPILE_TEST
- depends on MMC_SDHCI_PLTFM
- help
- This selects the SDHCI support for CNS3xxx System-on-Chip devices.
-
- If you have a controller with this interface, say Y or M here.
-
- If unsure, say N.
-
config MMC_SDHCI_ESDHC_MCF
tristate "SDHCI support for the Freescale eSDHC ColdFire controller"
depends on M5441x
@@ -332,9 +321,8 @@ config MMC_SDHCI_S3C
depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- often referrered to as the HSMMC block in some of the Samsung S3C
- (S3C2416, S3C2443, S3C6410), S5Pv210 and Exynos (Exynso4210,
- Exynos4412) SoCs.
+ often referrered to as the HSMMC block in some of the Samsung
+ S3C6410, S5Pv210 and Exynos (Exynso4210, Exynos4412) SoCs.
If you have a controller with this interface (thereforeyou build for
such Samsung SoC), say Y or M here.
@@ -500,7 +488,6 @@ config MMC_SDHCI_ST
config MMC_OMAP
tristate "TI OMAP Multimedia Card Interface support"
depends on ARCH_OMAP
- depends on TPS65010 || !MACH_OMAP_H2
help
This selects the TI OMAP Multimedia card Interface.
If you have an OMAP board with a Multimedia Card slot,
@@ -640,49 +627,6 @@ config MMC_SPI
If unsure, or if your system has no SPI master driver, say N.
-config MMC_S3C
- tristate "Samsung S3C SD/MMC Card Interface support"
- depends on ARCH_S3C24XX || COMPILE_TEST
- depends on S3C24XX_DMAC || COMPILE_TEST
- help
- This selects a driver for the MCI interface found in
- Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs.
- If you have a board based on one of those and a MMC/SD
- slot, say Y or M here.
-
- If unsure, say N.
-
-config MMC_S3C_HW_SDIO_IRQ
- bool "Hardware support for SDIO IRQ"
- depends on MMC_S3C
- help
- Enable the hardware support for SDIO interrupts instead of using
- the generic polling code.
-
-choice
- prompt "Samsung S3C SD/MMC transfer code"
- depends on MMC_S3C
-
-config MMC_S3C_PIO
- bool "Use PIO transfers only"
- help
- Use PIO to transfer data between memory and the hardware.
-
- PIO is slower than DMA as it requires CPU instructions to
- move the data. This has been the traditional default for
- the S3C MCI driver.
-
-config MMC_S3C_DMA
- bool "Use DMA transfers only"
- help
- Use DMA to transfer data between memory and the hardware.
-
- Currently, the DMA support in this driver seems to not be
- working properly and needs to be debugged before this
- option is useful.
-
-endchoice
-
config MMC_SDRICOH_CS
tristate "MMC/SD driver for Ricoh Bay1Controllers"
depends on PCI && PCMCIA
@@ -710,14 +654,6 @@ config MMC_SDHCI_SPRD
config MMC_TMIO_CORE
tristate
-config MMC_TMIO
- tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support"
- depends on MFD_TMIO || MFD_ASIC3 || COMPILE_TEST
- select MMC_TMIO_CORE
- help
- This provides support for the SD/MMC cell found in TC6393XB,
- T7L66XB and also HTC ASIC3
-
config MMC_SDHI
tristate "Renesas SDHI SD/SDIO controller support"
depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index ba0c6d0cd85d..b498c17cd124 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -34,9 +34,7 @@ obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o
obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o
obj-$(CONFIG_MMC_SPI) += mmc_spi.o
obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o
-obj-$(CONFIG_MMC_S3C) += s3cmci.o
obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
-obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
obj-$(CONFIG_MMC_SDHI) += renesas_sdhi_core.o
obj-$(CONFIG_MMC_SDHI_SYS_DMAC) += renesas_sdhi_sys_dmac.o
@@ -77,7 +75,6 @@ obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_CADENCE) += sdhci-cadence.o
-obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_MCF) += sdhci-esdhc-mcf.o
obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index dc2db9c185ea..eda1e2ddcaca 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1053,6 +1053,16 @@ static int jz4740_mmc_probe(struct platform_device* pdev)
mmc->ops = &jz4740_mmc_ops;
if (!mmc->f_max)
mmc->f_max = JZ_MMC_CLK_RATE;
+
+ /*
+ * There seems to be a problem with this driver on the JZ4760 and
+ * JZ4760B SoCs. There, when using the maximum rate supported (50 MHz),
+ * the communication fails with many SD cards.
+ * Until this bug is sorted out, limit the maximum rate to 24 MHz.
+ */
+ if (host->version == JZ_MMC_JZ4760 && mmc->f_max > JZ_MMC_CLK_RATE)
+ mmc->f_max = JZ_MMC_CLK_RATE;
+
mmc->f_min = mmc->f_max / 128;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 6e5ea0213b47..5c94ad4661ce 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -435,7 +435,8 @@ static int meson_mmc_clk_init(struct meson_host *host)
clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
- clk_reg |= CLK_IRQ_SDIO_SLEEP(host);
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+ clk_reg |= CLK_IRQ_SDIO_SLEEP(host);
writel(clk_reg, host->regs + SD_EMMC_CLOCK);
/* get the mux parents */
@@ -948,16 +949,18 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
{
struct meson_host *host = dev_id;
struct mmc_command *cmd;
- u32 status, raw_status;
+ u32 status, raw_status, irq_mask = IRQ_EN_MASK;
irqreturn_t ret = IRQ_NONE;
+ if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
+ irq_mask |= IRQ_SDIO;
raw_status = readl(host->regs + SD_EMMC_STATUS);
- status = raw_status & (IRQ_EN_MASK | IRQ_SDIO);
+ status = raw_status & irq_mask;
if (!status) {
dev_dbg(host->dev,
- "Unexpected IRQ! irq_en 0x%08lx - status 0x%08x\n",
- IRQ_EN_MASK | IRQ_SDIO, raw_status);
+ "Unexpected IRQ! irq_en 0x%08x - status 0x%08x\n",
+ irq_mask, raw_status);
return IRQ_NONE;
}
@@ -1204,6 +1207,11 @@ static int meson_mmc_probe(struct platform_device *pdev)
goto free_host;
}
+ mmc->caps |= MMC_CAP_CMD23;
+
+ if (mmc->caps & MMC_CAP_SDIO_IRQ)
+ mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+
host->data = (struct meson_mmc_data *)
of_device_get_match_data(&pdev->dev);
if (!host->data) {
@@ -1277,11 +1285,6 @@ static int meson_mmc_probe(struct platform_device *pdev)
spin_lock_init(&host->lock);
- mmc->caps |= MMC_CAP_CMD23;
-
- if (mmc->caps & MMC_CAP_SDIO_IRQ)
- mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
-
if (host->dram_access_quirk) {
/* Limit segments to 1 due to low available sram memory */
mmc->max_segs = 1;
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 106dd204b1a7..cc333ad67cac 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1437,7 +1437,7 @@ static int mmc_spi_probe(struct spi_device *spi)
status = mmc_add_host(mmc);
if (status != 0)
- goto fail_add_host;
+ goto fail_glue_init;
/*
* Index 0 is card detect
@@ -1445,7 +1445,7 @@ static int mmc_spi_probe(struct spi_device *spi)
*/
status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000);
if (status == -EPROBE_DEFER)
- goto fail_add_host;
+ goto fail_gpiod_request;
if (!status) {
/*
* The platform has a CD GPIO signal that may support
@@ -1460,7 +1460,7 @@ static int mmc_spi_probe(struct spi_device *spi)
/* Index 1 is write protect/read only */
status = mmc_gpiod_request_ro(mmc, NULL, 1, 0);
if (status == -EPROBE_DEFER)
- goto fail_add_host;
+ goto fail_gpiod_request;
if (!status)
has_ro = true;
@@ -1474,7 +1474,7 @@ static int mmc_spi_probe(struct spi_device *spi)
? ", cd polling" : "");
return 0;
-fail_add_host:
+fail_gpiod_request:
mmc_remove_host(mmc);
fail_glue_init:
mmc_spi_dma_free(host);
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
deleted file mode 100644
index 8d5929a32d34..000000000000
--- a/drivers/mmc/host/s3cmci.c
+++ /dev/null
@@ -1,1777 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver
- *
- * Copyright (C) 2004-2006 maintech GmbH, Thomas Kleffel <tk@maintech.de>
- *
- * Current driver maintained by Ben Dooks and Simtec Electronics
- * Copyright (C) 2008 Simtec Electronics <ben-linux@fluff.org>
- */
-
-#include <linux/module.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/clk.h>
-#include <linux/mmc/host.h>
-#include <linux/platform_device.h>
-#include <linux/cpufreq.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-#include <linux/gpio/consumer.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/mmc/slot-gpio.h>
-#include <linux/platform_data/mmc-s3cmci.h>
-
-#include "s3cmci.h"
-
-#define DRIVER_NAME "s3c-mci"
-
-#define S3C2410_SDICON (0x00)
-#define S3C2410_SDIPRE (0x04)
-#define S3C2410_SDICMDARG (0x08)
-#define S3C2410_SDICMDCON (0x0C)
-#define S3C2410_SDICMDSTAT (0x10)
-#define S3C2410_SDIRSP0 (0x14)
-#define S3C2410_SDIRSP1 (0x18)
-#define S3C2410_SDIRSP2 (0x1C)
-#define S3C2410_SDIRSP3 (0x20)
-#define S3C2410_SDITIMER (0x24)
-#define S3C2410_SDIBSIZE (0x28)
-#define S3C2410_SDIDCON (0x2C)
-#define S3C2410_SDIDCNT (0x30)
-#define S3C2410_SDIDSTA (0x34)
-#define S3C2410_SDIFSTA (0x38)
-
-#define S3C2410_SDIDATA (0x3C)
-#define S3C2410_SDIIMSK (0x40)
-
-#define S3C2440_SDIDATA (0x40)
-#define S3C2440_SDIIMSK (0x3C)
-
-#define S3C2440_SDICON_SDRESET (1 << 8)
-#define S3C2410_SDICON_SDIOIRQ (1 << 3)
-#define S3C2410_SDICON_FIFORESET (1 << 1)
-#define S3C2410_SDICON_CLOCKTYPE (1 << 0)
-
-#define S3C2410_SDICMDCON_LONGRSP (1 << 10)
-#define S3C2410_SDICMDCON_WAITRSP (1 << 9)
-#define S3C2410_SDICMDCON_CMDSTART (1 << 8)
-#define S3C2410_SDICMDCON_SENDERHOST (1 << 6)
-#define S3C2410_SDICMDCON_INDEX (0x3f)
-
-#define S3C2410_SDICMDSTAT_CRCFAIL (1 << 12)
-#define S3C2410_SDICMDSTAT_CMDSENT (1 << 11)
-#define S3C2410_SDICMDSTAT_CMDTIMEOUT (1 << 10)
-#define S3C2410_SDICMDSTAT_RSPFIN (1 << 9)
-
-#define S3C2440_SDIDCON_DS_WORD (2 << 22)
-#define S3C2410_SDIDCON_TXAFTERRESP (1 << 20)
-#define S3C2410_SDIDCON_RXAFTERCMD (1 << 19)
-#define S3C2410_SDIDCON_BLOCKMODE (1 << 17)
-#define S3C2410_SDIDCON_WIDEBUS (1 << 16)
-#define S3C2410_SDIDCON_DMAEN (1 << 15)
-#define S3C2410_SDIDCON_STOP (1 << 14)
-#define S3C2440_SDIDCON_DATSTART (1 << 14)
-
-#define S3C2410_SDIDCON_XFER_RXSTART (2 << 12)
-#define S3C2410_SDIDCON_XFER_TXSTART (3 << 12)
-
-#define S3C2410_SDIDCON_BLKNUM_MASK (0xFFF)
-
-#define S3C2410_SDIDSTA_SDIOIRQDETECT (1 << 9)
-#define S3C2410_SDIDSTA_FIFOFAIL (1 << 8)
-#define S3C2410_SDIDSTA_CRCFAIL (1 << 7)
-#define S3C2410_SDIDSTA_RXCRCFAIL (1 << 6)
-#define S3C2410_SDIDSTA_DATATIMEOUT (1 << 5)
-#define S3C2410_SDIDSTA_XFERFINISH (1 << 4)
-#define S3C2410_SDIDSTA_TXDATAON (1 << 1)
-#define S3C2410_SDIDSTA_RXDATAON (1 << 0)
-
-#define S3C2440_SDIFSTA_FIFORESET (1 << 16)
-#define S3C2440_SDIFSTA_FIFOFAIL (3 << 14)
-#define S3C2410_SDIFSTA_TFDET (1 << 13)
-#define S3C2410_SDIFSTA_RFDET (1 << 12)
-#define S3C2410_SDIFSTA_COUNTMASK (0x7f)
-
-#define S3C2410_SDIIMSK_RESPONSECRC (1 << 17)
-#define S3C2410_SDIIMSK_CMDSENT (1 << 16)
-#define S3C2410_SDIIMSK_CMDTIMEOUT (1 << 15)
-#define S3C2410_SDIIMSK_RESPONSEND (1 << 14)
-#define S3C2410_SDIIMSK_SDIOIRQ (1 << 12)
-#define S3C2410_SDIIMSK_FIFOFAIL (1 << 11)
-#define S3C2410_SDIIMSK_CRCSTATUS (1 << 10)
-#define S3C2410_SDIIMSK_DATACRC (1 << 9)
-#define S3C2410_SDIIMSK_DATATIMEOUT (1 << 8)
-#define S3C2410_SDIIMSK_DATAFINISH (1 << 7)
-#define S3C2410_SDIIMSK_TXFIFOHALF (1 << 4)
-#define S3C2410_SDIIMSK_RXFIFOLAST (1 << 2)
-#define S3C2410_SDIIMSK_RXFIFOHALF (1 << 0)
-
-enum dbg_channels {
- dbg_err = (1 << 0),
- dbg_debug = (1 << 1),
- dbg_info = (1 << 2),
- dbg_irq = (1 << 3),
- dbg_sg = (1 << 4),
- dbg_dma = (1 << 5),
- dbg_pio = (1 << 6),
- dbg_fail = (1 << 7),
- dbg_conf = (1 << 8),
-};
-
-static const int dbgmap_err = dbg_fail;
-static const int dbgmap_info = dbg_info | dbg_conf;
-static const int dbgmap_debug = dbg_err | dbg_debug;
-
-#define dbg(host, channels, args...) \
- do { \
- if (dbgmap_err & channels) \
- dev_err(&host->pdev->dev, args); \
- else if (dbgmap_info & channels) \
- dev_info(&host->pdev->dev, args); \
- else if (dbgmap_debug & channels) \
- dev_dbg(&host->pdev->dev, args); \
- } while (0)
-
-static void finalize_request(struct s3cmci_host *host);
-static void s3cmci_send_request(struct mmc_host *mmc);
-static void s3cmci_reset(struct s3cmci_host *host);
-
-#ifdef CONFIG_MMC_DEBUG
-
-static void dbg_dumpregs(struct s3cmci_host *host, char *prefix)
-{
- u32 con, pre, cmdarg, cmdcon, cmdsta, r0, r1, r2, r3, timer;
- u32 datcon, datcnt, datsta, fsta;
-
- con = readl(host->base + S3C2410_SDICON);
- pre = readl(host->base + S3C2410_SDIPRE);
- cmdarg = readl(host->base + S3C2410_SDICMDARG);
- cmdcon = readl(host->base + S3C2410_SDICMDCON);
- cmdsta = readl(host->base + S3C2410_SDICMDSTAT);
- r0 = readl(host->base + S3C2410_SDIRSP0);
- r1 = readl(host->base + S3C2410_SDIRSP1);
- r2 = readl(host->base + S3C2410_SDIRSP2);
- r3 = readl(host->base + S3C2410_SDIRSP3);
- timer = readl(host->base + S3C2410_SDITIMER);
- datcon = readl(host->base + S3C2410_SDIDCON);
- datcnt = readl(host->base + S3C2410_SDIDCNT);
- datsta = readl(host->base + S3C2410_SDIDSTA);
- fsta = readl(host->base + S3C2410_SDIFSTA);
-
- dbg(host, dbg_debug, "%s CON:[%08x] PRE:[%08x] TMR:[%08x]\n",
- prefix, con, pre, timer);
-
- dbg(host, dbg_debug, "%s CCON:[%08x] CARG:[%08x] CSTA:[%08x]\n",
- prefix, cmdcon, cmdarg, cmdsta);
-
- dbg(host, dbg_debug, "%s DCON:[%08x] FSTA:[%08x]"
- " DSTA:[%08x] DCNT:[%08x]\n",
- prefix, datcon, fsta, datsta, datcnt);
-
- dbg(host, dbg_debug, "%s R0:[%08x] R1:[%08x]"
- " R2:[%08x] R3:[%08x]\n",
- prefix, r0, r1, r2, r3);
-}
-
-static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
- int stop)
-{
- snprintf(host->dbgmsg_cmd, 300,
- "#%u%s op:%i arg:0x%08x flags:0x08%x retries:%u",
- host->ccnt, (stop ? " (STOP)" : ""),
- cmd->opcode, cmd->arg, cmd->flags, cmd->retries);
-
- if (cmd->data) {
- snprintf(host->dbgmsg_dat, 300,
- "#%u bsize:%u blocks:%u bytes:%u",
- host->dcnt, cmd->data->blksz,
- cmd->data->blocks,
- cmd->data->blocks * cmd->data->blksz);
- } else {
- host->dbgmsg_dat[0] = '\0';
- }
-}
-
-static void dbg_dumpcmd(struct s3cmci_host *host, struct mmc_command *cmd,
- int fail)
-{
- unsigned int dbglvl = fail ? dbg_fail : dbg_debug;
-
- if (!cmd)
- return;
-
- if (cmd->error == 0) {
- dbg(host, dbglvl, "CMD[OK] %s R0:0x%08x\n",
- host->dbgmsg_cmd, cmd->resp[0]);
- } else {
- dbg(host, dbglvl, "CMD[ERR %i] %s Status:%s\n",
- cmd->error, host->dbgmsg_cmd, host->status);
- }
-
- if (!cmd->data)
- return;
-
- if (cmd->data->error == 0) {
- dbg(host, dbglvl, "DAT[OK] %s\n", host->dbgmsg_dat);
- } else {
- dbg(host, dbglvl, "DAT[ERR %i] %s DCNT:0x%08x\n",
- cmd->data->error, host->dbgmsg_dat,
- readl(host->base + S3C2410_SDIDCNT));
- }
-}
-#else
-static void dbg_dumpcmd(struct s3cmci_host *host,
- struct mmc_command *cmd, int fail) { }
-
-static void prepare_dbgmsg(struct s3cmci_host *host, struct mmc_command *cmd,
- int stop) { }
-
-static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { }
-
-#endif /* CONFIG_MMC_DEBUG */
-
-/**
- * s3cmci_host_usedma - return whether the host is using dma or pio
- * @host: The host state
- *
- * Return true if the host is using DMA to transfer data, else false
- * to use PIO mode. Will return static data depending on the driver
- * configuration.
- */
-static inline bool s3cmci_host_usedma(struct s3cmci_host *host)
-{
-#ifdef CONFIG_MMC_S3C_PIO
- return false;
-#else /* CONFIG_MMC_S3C_DMA */
- return true;
-#endif
-}
-
-static inline u32 enable_imask(struct s3cmci_host *host, u32 imask)
-{
- u32 newmask;
-
- newmask = readl(host->base + host->sdiimsk);
- newmask |= imask;
-
- writel(newmask, host->base + host->sdiimsk);
-
- return newmask;
-}
-
-static inline u32 disable_imask(struct s3cmci_host *host, u32 imask)
-{
- u32 newmask;
-
- newmask = readl(host->base + host->sdiimsk);
- newmask &= ~imask;
-
- writel(newmask, host->base + host->sdiimsk);
-
- return newmask;
-}
-
-static inline void clear_imask(struct s3cmci_host *host)
-{
- u32 mask = readl(host->base + host->sdiimsk);
-
- /* preserve the SDIO IRQ mask state */
- mask &= S3C2410_SDIIMSK_SDIOIRQ;
- writel(mask, host->base + host->sdiimsk);
-}
-
-/**
- * s3cmci_check_sdio_irq - test whether the SDIO IRQ is being signalled
- * @host: The host to check.
- *
- * Test to see if the SDIO interrupt is being signalled in case the
- * controller has failed to re-detect a card interrupt. Read GPE8 and
- * see if it is low and if so, signal a SDIO interrupt.
- *
- * This is currently called if a request is finished (we assume that the
- * bus is now idle) and when the SDIO IRQ is enabled in case the IRQ is
- * already being indicated.
-*/
-static void s3cmci_check_sdio_irq(struct s3cmci_host *host)
-{
- if (host->sdio_irqen) {
- if (host->pdata->bus[3] &&
- gpiod_get_value(host->pdata->bus[3]) == 0) {
- pr_debug("%s: signalling irq\n", __func__);
- mmc_signal_sdio_irq(host->mmc);
- }
- }
-}
-
-static inline int get_data_buffer(struct s3cmci_host *host,
- u32 *bytes, u32 **pointer)
-{
- struct scatterlist *sg;
-
- if (host->pio_active == XFER_NONE)
- return -EINVAL;
-
- if ((!host->mrq) || (!host->mrq->data))
- return -EINVAL;
-
- if (host->pio_sgptr >= host->mrq->data->sg_len) {
- dbg(host, dbg_debug, "no more buffers (%i/%i)\n",
- host->pio_sgptr, host->mrq->data->sg_len);
- return -EBUSY;
- }
- sg = &host->mrq->data->sg[host->pio_sgptr];
-
- *bytes = sg->length;
- *pointer = sg_virt(sg);
-
- host->pio_sgptr++;
-
- dbg(host, dbg_sg, "new buffer (%i/%i)\n",
- host->pio_sgptr, host->mrq->data->sg_len);
-
- return 0;
-}
-
-static inline u32 fifo_count(struct s3cmci_host *host)
-{
- u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
-
- fifostat &= S3C2410_SDIFSTA_COUNTMASK;
- return fifostat;
-}
-
-static inline u32 fifo_free(struct s3cmci_host *host)
-{
- u32 fifostat = readl(host->base + S3C2410_SDIFSTA);
-
- fifostat &= S3C2410_SDIFSTA_COUNTMASK;
- return 63 - fifostat;
-}
-
-/**
- * s3cmci_enable_irq - enable IRQ, after having disabled it.
- * @host: The device state.
- * @more: True if more IRQs are expected from transfer.
- *
- * Enable the main IRQ if needed after it has been disabled.
- *
- * The IRQ can be one of the following states:
- * - disabled during IDLE
- * - disabled whilst processing data
- * - enabled during transfer
- * - enabled whilst awaiting SDIO interrupt detection
- */
-static void s3cmci_enable_irq(struct s3cmci_host *host, bool more)
-{
- unsigned long flags;
- bool enable = false;
-
- local_irq_save(flags);
-
- host->irq_enabled = more;
- host->irq_disabled = false;
-
- enable = more | host->sdio_irqen;
-
- if (host->irq_state != enable) {
- host->irq_state = enable;
-
- if (enable)
- enable_irq(host->irq);
- else
- disable_irq(host->irq);
- }
-
- local_irq_restore(flags);
-}
-
-static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- /* pr_debug("%s: transfer %d\n", __func__, transfer); */
-
- host->irq_disabled = transfer;
-
- if (transfer && host->irq_state) {
- host->irq_state = false;
- disable_irq(host->irq);
- }
-
- local_irq_restore(flags);
-}
-
-static void do_pio_read(struct s3cmci_host *host)
-{
- int res;
- u32 fifo;
- u32 *ptr;
- u32 fifo_words;
- void __iomem *from_ptr;
-
- /* write real prescaler to host, it might be set slow to fix */
- writel(host->prescaler, host->base + S3C2410_SDIPRE);
-
- from_ptr = host->base + host->sdidata;
-
- while ((fifo = fifo_count(host))) {
- if (!host->pio_bytes) {
- res = get_data_buffer(host, &host->pio_bytes,
- &host->pio_ptr);
- if (res) {
- host->pio_active = XFER_NONE;
- host->complete_what = COMPLETION_FINALIZE;
-
- dbg(host, dbg_pio, "pio_read(): "
- "complete (no more data).\n");
- return;
- }
-
- dbg(host, dbg_pio,
- "pio_read(): new target: [%i]@[%p]\n",
- host->pio_bytes, host->pio_ptr);
- }
-
- dbg(host, dbg_pio,
- "pio_read(): fifo:[%02i] buffer:[%03i] dcnt:[%08X]\n",
- fifo, host->pio_bytes,
- readl(host->base + S3C2410_SDIDCNT));
-
- /* If we have reached the end of the block, we can
- * read a word and get 1 to 3 bytes. If we in the
- * middle of the block, we have to read full words,
- * otherwise we will write garbage, so round down to
- * an even multiple of 4. */
- if (fifo >= host->pio_bytes)
- fifo = host->pio_bytes;
- else
- fifo -= fifo & 3;
-
- host->pio_bytes -= fifo;
- host->pio_count += fifo;
-
- fifo_words = fifo >> 2;
- ptr = host->pio_ptr;
- while (fifo_words--)
- *ptr++ = readl(from_ptr);
- host->pio_ptr = ptr;
-
- if (fifo & 3) {
- u32 n = fifo & 3;
- u32 data = readl(from_ptr);
- u8 *p = (u8 *)host->pio_ptr;
-
- while (n--) {
- *p++ = data;
- data >>= 8;
- }
- }
- }
-
- if (!host->pio_bytes) {
- res = get_data_buffer(host, &host->pio_bytes, &host->pio_ptr);
- if (res) {
- dbg(host, dbg_pio,
- "pio_read(): complete (no more buffers).\n");
- host->pio_active = XFER_NONE;
- host->complete_what = COMPLETION_FINALIZE;
-
- return;
- }
- }
-
- enable_imask(host,
- S3C2410_SDIIMSK_RXFIFOHALF | S3C2410_SDIIMSK_RXFIFOLAST);
-}
-
-static void do_pio_write(struct s3cmci_host *host)
-{
- void __iomem *to_ptr;
- int res;
- u32 fifo;
- u32 *ptr;
-
- to_ptr = host->base + host->sdidata;
-
- while ((fifo = fifo_free(host)) > 3) {
- if (!host->pio_bytes) {
- res = get_data_buffer(host, &host->pio_bytes,
- &host->pio_ptr);
- if (res) {
- dbg(host, dbg_pio,
- "pio_write(): complete (no more data).\n");
- host->pio_active = XFER_NONE;
-
- return;
- }
-
- dbg(host, dbg_pio,
- "pio_write(): new source: [%i]@[%p]\n",
- host->pio_bytes, host->pio_ptr);
-
- }
-
- /* If we have reached the end of the block, we have to
- * write exactly the remaining number of bytes. If we
- * in the middle of the block, we have to write full
- * words, so round down to an even multiple of 4. */
- if (fifo >= host->pio_bytes)
- fifo = host->pio_bytes;
- else
- fifo -= fifo & 3;
-
- host->pio_bytes -= fifo;
- host->pio_count += fifo;
-
- fifo = (fifo + 3) >> 2;
- ptr = host->pio_ptr;
- while (fifo--)
- writel(*ptr++, to_ptr);
- host->pio_ptr = ptr;
- }
-
- enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
-}
-
-static void pio_tasklet(struct tasklet_struct *t)
-{
- struct s3cmci_host *host = from_tasklet(host, t, pio_tasklet);
-
- s3cmci_disable_irq(host, true);
-
- if (host->pio_active == XFER_WRITE)
- do_pio_write(host);
-
- if (host->pio_active == XFER_READ)
- do_pio_read(host);
-
- if (host->complete_what == COMPLETION_FINALIZE) {
- clear_imask(host);
- if (host->pio_active != XFER_NONE) {
- dbg(host, dbg_err, "unfinished %s "
- "- pio_count:[%u] pio_bytes:[%u]\n",
- (host->pio_active == XFER_READ) ? "read" : "write",
- host->pio_count, host->pio_bytes);
-
- if (host->mrq->data)
- host->mrq->data->error = -EINVAL;
- }
-
- s3cmci_enable_irq(host, false);
- finalize_request(host);
- } else
- s3cmci_enable_irq(host, true);
-}
-
-/*
- * ISR for SDI Interface IRQ
- * Communication between driver and ISR works as follows:
- * host->mrq points to current request
- * host->complete_what Indicates when the request is considered done
- * COMPLETION_CMDSENT when the command was sent
- * COMPLETION_RSPFIN when a response was received
- * COMPLETION_XFERFINISH when the data transfer is finished
- * COMPLETION_XFERFINISH_RSPFIN both of the above.
- * host->complete_request is the completion-object the driver waits for
- *
- * 1) Driver sets up host->mrq and host->complete_what
- * 2) Driver prepares the transfer
- * 3) Driver enables interrupts
- * 4) Driver starts transfer
- * 5) Driver waits for host->complete_rquest
- * 6) ISR checks for request status (errors and success)
- * 6) ISR sets host->mrq->cmd->error and host->mrq->data->error
- * 7) ISR completes host->complete_request
- * 8) ISR disables interrupts
- * 9) Driver wakes up and takes care of the request
- *
- * Note: "->error"-fields are expected to be set to 0 before the request
- * was issued by mmc.c - therefore they are only set, when an error
- * contition comes up
- */
-
-static irqreturn_t s3cmci_irq(int irq, void *dev_id)
-{
- struct s3cmci_host *host = dev_id;
- struct mmc_command *cmd;
- u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk;
- u32 mci_cclear = 0, mci_dclear;
- unsigned long iflags;
-
- mci_dsta = readl(host->base + S3C2410_SDIDSTA);
- mci_imsk = readl(host->base + host->sdiimsk);
-
- if (mci_dsta & S3C2410_SDIDSTA_SDIOIRQDETECT) {
- if (mci_imsk & S3C2410_SDIIMSK_SDIOIRQ) {
- mci_dclear = S3C2410_SDIDSTA_SDIOIRQDETECT;
- writel(mci_dclear, host->base + S3C2410_SDIDSTA);
-
- mmc_signal_sdio_irq(host->mmc);
- return IRQ_HANDLED;
- }
- }
-
- spin_lock_irqsave(&host->complete_lock, iflags);
-
- mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
- mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
- mci_fsta = readl(host->base + S3C2410_SDIFSTA);
- mci_dclear = 0;
-
- if ((host->complete_what == COMPLETION_NONE) ||
- (host->complete_what == COMPLETION_FINALIZE)) {
- host->status = "nothing to complete";
- clear_imask(host);
- goto irq_out;
- }
-
- if (!host->mrq) {
- host->status = "no active mrq";
- clear_imask(host);
- goto irq_out;
- }
-
- cmd = host->cmd_is_stop ? host->mrq->stop : host->mrq->cmd;
-
- if (!cmd) {
- host->status = "no active cmd";
- clear_imask(host);
- goto irq_out;
- }
-
- if (!s3cmci_host_usedma(host)) {
- if ((host->pio_active == XFER_WRITE) &&
- (mci_fsta & S3C2410_SDIFSTA_TFDET)) {
-
- disable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
- tasklet_schedule(&host->pio_tasklet);
- host->status = "pio tx";
- }
-
- if ((host->pio_active == XFER_READ) &&
- (mci_fsta & S3C2410_SDIFSTA_RFDET)) {
-
- disable_imask(host,
- S3C2410_SDIIMSK_RXFIFOHALF |
- S3C2410_SDIIMSK_RXFIFOLAST);
-
- tasklet_schedule(&host->pio_tasklet);
- host->status = "pio rx";
- }
- }
-
- if (mci_csta & S3C2410_SDICMDSTAT_CMDTIMEOUT) {
- dbg(host, dbg_err, "CMDSTAT: error CMDTIMEOUT\n");
- cmd->error = -ETIMEDOUT;
- host->status = "error: command timeout";
- goto fail_transfer;
- }
-
- if (mci_csta & S3C2410_SDICMDSTAT_CMDSENT) {
- if (host->complete_what == COMPLETION_CMDSENT) {
- host->status = "ok: command sent";
- goto close_transfer;
- }
-
- mci_cclear |= S3C2410_SDICMDSTAT_CMDSENT;
- }
-
- if (mci_csta & S3C2410_SDICMDSTAT_CRCFAIL) {
- if (cmd->flags & MMC_RSP_CRC) {
- if (host->mrq->cmd->flags & MMC_RSP_136) {
- dbg(host, dbg_irq,
- "fixup: ignore CRC fail with long rsp\n");
- } else {
- /* note, we used to fail the transfer
- * here, but it seems that this is just
- * the hardware getting it wrong.
- *
- * cmd->error = -EILSEQ;
- * host->status = "error: bad command crc";
- * goto fail_transfer;
- */
- }
- }
-
- mci_cclear |= S3C2410_SDICMDSTAT_CRCFAIL;
- }
-
- if (mci_csta & S3C2410_SDICMDSTAT_RSPFIN) {
- if (host->complete_what == COMPLETION_RSPFIN) {
- host->status = "ok: command response received";
- goto close_transfer;
- }
-
- if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
- host->complete_what = COMPLETION_XFERFINISH;
-
- mci_cclear |= S3C2410_SDICMDSTAT_RSPFIN;
- }
-
- /* errors handled after this point are only relevant
- when a data transfer is in progress */
-
- if (!cmd->data)
- goto clear_status_bits;
-
- /* Check for FIFO failure */
- if (host->is2440) {
- if (mci_fsta & S3C2440_SDIFSTA_FIFOFAIL) {
- dbg(host, dbg_err, "FIFO failure\n");
- host->mrq->data->error = -EILSEQ;
- host->status = "error: 2440 fifo failure";
- goto fail_transfer;
- }
- } else {
- if (mci_dsta & S3C2410_SDIDSTA_FIFOFAIL) {
- dbg(host, dbg_err, "FIFO failure\n");
- cmd->data->error = -EILSEQ;
- host->status = "error: fifo failure";
- goto fail_transfer;
- }
- }
-
- if (mci_dsta & S3C2410_SDIDSTA_RXCRCFAIL) {
- dbg(host, dbg_err, "bad data crc (outgoing)\n");
- cmd->data->error = -EILSEQ;
- host->status = "error: bad data crc (outgoing)";
- goto fail_transfer;
- }
-
- if (mci_dsta & S3C2410_SDIDSTA_CRCFAIL) {
- dbg(host, dbg_err, "bad data crc (incoming)\n");
- cmd->data->error = -EILSEQ;
- host->status = "error: bad data crc (incoming)";
- goto fail_transfer;
- }
-
- if (mci_dsta & S3C2410_SDIDSTA_DATATIMEOUT) {
- dbg(host, dbg_err, "data timeout\n");
- cmd->data->error = -ETIMEDOUT;
- host->status = "error: data timeout";
- goto fail_transfer;
- }
-
- if (mci_dsta & S3C2410_SDIDSTA_XFERFINISH) {
- if (host->complete_what == COMPLETION_XFERFINISH) {
- host->status = "ok: data transfer completed";
- goto close_transfer;
- }
-
- if (host->complete_what == COMPLETION_XFERFINISH_RSPFIN)
- host->complete_what = COMPLETION_RSPFIN;
-
- mci_dclear |= S3C2410_SDIDSTA_XFERFINISH;
- }
-
-clear_status_bits:
- writel(mci_cclear, host->base + S3C2410_SDICMDSTAT);
- writel(mci_dclear, host->base + S3C2410_SDIDSTA);
-
- goto irq_out;
-
-fail_transfer:
- host->pio_active = XFER_NONE;
-
-close_transfer:
- host->complete_what = COMPLETION_FINALIZE;
-
- clear_imask(host);
- tasklet_schedule(&host->pio_tasklet);
-
- goto irq_out;
-
-irq_out:
- dbg(host, dbg_irq,
- "csta:0x%08x dsta:0x%08x fsta:0x%08x dcnt:0x%08x status:%s.\n",
- mci_csta, mci_dsta, mci_fsta, mci_dcnt, host->status);
-
- spin_unlock_irqrestore(&host->complete_lock, iflags);
- return IRQ_HANDLED;
-
-}
-
-static void s3cmci_dma_done_callback(void *arg)
-{
- struct s3cmci_host *host = arg;
- unsigned long iflags;
-
- BUG_ON(!host->mrq);
- BUG_ON(!host->mrq->data);
-
- spin_lock_irqsave(&host->complete_lock, iflags);
-
- dbg(host, dbg_dma, "DMA FINISHED\n");
-
- host->dma_complete = 1;
- host->complete_what = COMPLETION_FINALIZE;
-
- tasklet_schedule(&host->pio_tasklet);
- spin_unlock_irqrestore(&host->complete_lock, iflags);
-
-}
-
-static void finalize_request(struct s3cmci_host *host)
-{
- struct mmc_request *mrq = host->mrq;
- struct mmc_command *cmd;
- int debug_as_failure = 0;
-
- if (host->complete_what != COMPLETION_FINALIZE)
- return;
-
- if (!mrq)
- return;
- cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
-
- if (cmd->data && (cmd->error == 0) &&
- (cmd->data->error == 0)) {
- if (s3cmci_host_usedma(host) && (!host->dma_complete)) {
- dbg(host, dbg_dma, "DMA Missing (%d)!\n",
- host->dma_complete);
- return;
- }
- }
-
- /* Read response from controller. */
- cmd->resp[0] = readl(host->base + S3C2410_SDIRSP0);
- cmd->resp[1] = readl(host->base + S3C2410_SDIRSP1);
- cmd->resp[2] = readl(host->base + S3C2410_SDIRSP2);
- cmd->resp[3] = readl(host->base + S3C2410_SDIRSP3);
-
- writel(host->prescaler, host->base + S3C2410_SDIPRE);
-
- if (cmd->error)
- debug_as_failure = 1;
-
- if (cmd->data && cmd->data->error)
- debug_as_failure = 1;
-
- dbg_dumpcmd(host, cmd, debug_as_failure);
-
- /* Cleanup controller */
- writel(0, host->base + S3C2410_SDICMDARG);
- writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
- writel(0, host->base + S3C2410_SDICMDCON);
- clear_imask(host);
-
- if (cmd->data && cmd->error)
- cmd->data->error = cmd->error;
-
- if (cmd->data && cmd->data->stop && (!host->cmd_is_stop)) {
- host->cmd_is_stop = 1;
- s3cmci_send_request(host->mmc);
- return;
- }
-
- /* If we have no data transfer we are finished here */
- if (!mrq->data)
- goto request_done;
-
- /* Calculate the amout of bytes transfer if there was no error */
- if (mrq->data->error == 0) {
- mrq->data->bytes_xfered =
- (mrq->data->blocks * mrq->data->blksz);
- } else {
- mrq->data->bytes_xfered = 0;
- }
-
- /* If we had an error while transferring data we flush the
- * DMA channel and the fifo to clear out any garbage. */
- if (mrq->data->error != 0) {
- if (s3cmci_host_usedma(host))
- dmaengine_terminate_all(host->dma);
-
- if (host->is2440) {
- /* Clear failure register and reset fifo. */
- writel(S3C2440_SDIFSTA_FIFORESET |
- S3C2440_SDIFSTA_FIFOFAIL,
- host->base + S3C2410_SDIFSTA);
- } else {
- u32 mci_con;
-
- /* reset fifo */
- mci_con = readl(host->base + S3C2410_SDICON);
- mci_con |= S3C2410_SDICON_FIFORESET;
-
- writel(mci_con, host->base + S3C2410_SDICON);
- }
- }
-
-request_done:
- host->complete_what = COMPLETION_NONE;
- host->mrq = NULL;
-
- s3cmci_check_sdio_irq(host);
- mmc_request_done(host->mmc, mrq);
-}
-
-static void s3cmci_send_command(struct s3cmci_host *host,
- struct mmc_command *cmd)
-{
- u32 ccon, imsk;
-
- imsk = S3C2410_SDIIMSK_CRCSTATUS | S3C2410_SDIIMSK_CMDTIMEOUT |
- S3C2410_SDIIMSK_RESPONSEND | S3C2410_SDIIMSK_CMDSENT |
- S3C2410_SDIIMSK_RESPONSECRC;
-
- enable_imask(host, imsk);
-
- if (cmd->data)
- host->complete_what = COMPLETION_XFERFINISH_RSPFIN;
- else if (cmd->flags & MMC_RSP_PRESENT)
- host->complete_what = COMPLETION_RSPFIN;
- else
- host->complete_what = COMPLETION_CMDSENT;
-
- writel(cmd->arg, host->base + S3C2410_SDICMDARG);
-
- ccon = cmd->opcode & S3C2410_SDICMDCON_INDEX;
- ccon |= S3C2410_SDICMDCON_SENDERHOST | S3C2410_SDICMDCON_CMDSTART;
-
- if (cmd->flags & MMC_RSP_PRESENT)
- ccon |= S3C2410_SDICMDCON_WAITRSP;
-
- if (cmd->flags & MMC_RSP_136)
- ccon |= S3C2410_SDICMDCON_LONGRSP;
-
- writel(ccon, host->base + S3C2410_SDICMDCON);
-}
-
-static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
-{
- u32 dcon, imsk, stoptries = 3;
-
- if ((data->blksz & 3) != 0) {
- /* We cannot deal with unaligned blocks with more than
- * one block being transferred. */
-
- if (data->blocks > 1) {
- pr_warn("%s: can't do non-word sized block transfers (blksz %d)\n",
- __func__, data->blksz);
- return -EINVAL;
- }
- }
-
- while (readl(host->base + S3C2410_SDIDSTA) &
- (S3C2410_SDIDSTA_TXDATAON | S3C2410_SDIDSTA_RXDATAON)) {
-
- dbg(host, dbg_err,
- "mci_setup_data() transfer stillin progress.\n");
-
- writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
- s3cmci_reset(host);
-
- if ((stoptries--) == 0) {
- dbg_dumpregs(host, "DRF");
- return -EINVAL;
- }
- }
-
- dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK;
-
- if (s3cmci_host_usedma(host))
- dcon |= S3C2410_SDIDCON_DMAEN;
-
- if (host->bus_width == MMC_BUS_WIDTH_4)
- dcon |= S3C2410_SDIDCON_WIDEBUS;
-
- dcon |= S3C2410_SDIDCON_BLOCKMODE;
-
- if (data->flags & MMC_DATA_WRITE) {
- dcon |= S3C2410_SDIDCON_TXAFTERRESP;
- dcon |= S3C2410_SDIDCON_XFER_TXSTART;
- }
-
- if (data->flags & MMC_DATA_READ) {
- dcon |= S3C2410_SDIDCON_RXAFTERCMD;
- dcon |= S3C2410_SDIDCON_XFER_RXSTART;
- }
-
- if (host->is2440) {
- dcon |= S3C2440_SDIDCON_DS_WORD;
- dcon |= S3C2440_SDIDCON_DATSTART;
- }
-
- writel(dcon, host->base + S3C2410_SDIDCON);
-
- /* write BSIZE register */
-
- writel(data->blksz, host->base + S3C2410_SDIBSIZE);
-
- /* add to IMASK register */
- imsk = S3C2410_SDIIMSK_FIFOFAIL | S3C2410_SDIIMSK_DATACRC |
- S3C2410_SDIIMSK_DATATIMEOUT | S3C2410_SDIIMSK_DATAFINISH;
-
- enable_imask(host, imsk);
-
- /* write TIMER register */
-
- if (host->is2440) {
- writel(0x007FFFFF, host->base + S3C2410_SDITIMER);
- } else {
- writel(0x0000FFFF, host->base + S3C2410_SDITIMER);
-
- /* FIX: set slow clock to prevent timeouts on read */
- if (data->flags & MMC_DATA_READ)
- writel(0xFF, host->base + S3C2410_SDIPRE);
- }
-
- return 0;
-}
-
-#define BOTH_DIR (MMC_DATA_WRITE | MMC_DATA_READ)
-
-static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data)
-{
- int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
-
- BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
-
- host->pio_sgptr = 0;
- host->pio_bytes = 0;
- host->pio_count = 0;
- host->pio_active = rw ? XFER_WRITE : XFER_READ;
-
- if (rw) {
- do_pio_write(host);
- enable_imask(host, S3C2410_SDIIMSK_TXFIFOHALF);
- } else {
- enable_imask(host, S3C2410_SDIIMSK_RXFIFOHALF
- | S3C2410_SDIIMSK_RXFIFOLAST);
- }
-
- return 0;
-}
-
-static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
-{
- int rw = data->flags & MMC_DATA_WRITE;
- struct dma_async_tx_descriptor *desc;
- struct dma_slave_config conf = {
- .src_addr = host->mem->start + host->sdidata,
- .dst_addr = host->mem->start + host->sdidata,
- .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
- .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
- };
-
- BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
-
- /* Restore prescaler value */
- writel(host->prescaler, host->base + S3C2410_SDIPRE);
-
- if (!rw)
- conf.direction = DMA_DEV_TO_MEM;
- else
- conf.direction = DMA_MEM_TO_DEV;
-
- dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
-
- dmaengine_slave_config(host->dma, &conf);
- desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len,
- conf.direction,
- DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
- if (!desc)
- goto unmap_exit;
- desc->callback = s3cmci_dma_done_callback;
- desc->callback_param = host;
- dmaengine_submit(desc);
- dma_async_issue_pending(host->dma);
-
- return 0;
-
-unmap_exit:
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- mmc_get_dma_dir(data));
- return -ENOMEM;
-}
-
-static void s3cmci_send_request(struct mmc_host *mmc)
-{
- struct s3cmci_host *host = mmc_priv(mmc);
- struct mmc_request *mrq = host->mrq;
- struct mmc_command *cmd = host->cmd_is_stop ? mrq->stop : mrq->cmd;
-
- host->ccnt++;
- prepare_dbgmsg(host, cmd, host->cmd_is_stop);
-
- /* Clear command, data and fifo status registers
- Fifo clear only necessary on 2440, but doesn't hurt on 2410
- */
- writel(0xFFFFFFFF, host->base + S3C2410_SDICMDSTAT);
- writel(0xFFFFFFFF, host->base + S3C2410_SDIDSTA);
- writel(0xFFFFFFFF, host->base + S3C2410_SDIFSTA);
-
- if (cmd->data) {
- int res = s3cmci_setup_data(host, cmd->data);
-
- host->dcnt++;
-
- if (res) {
- dbg(host, dbg_err, "setup data error %d\n", res);
- cmd->error = res;
- cmd->data->error = res;
-
- mmc_request_done(mmc, mrq);
- return;
- }
-
- if (s3cmci_host_usedma(host))
- res = s3cmci_prepare_dma(host, cmd->data);
- else
- res = s3cmci_prepare_pio(host, cmd->data);
-
- if (res) {
- dbg(host, dbg_err, "data prepare error %d\n", res);
- cmd->error = res;
- cmd->data->error = res;
-
- mmc_request_done(mmc, mrq);
- return;
- }
- }
-
- /* Send command */
- s3cmci_send_command(host, cmd);
-
- /* Enable Interrupt */
- s3cmci_enable_irq(host, true);
-}
-
-static void s3cmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
-{
- struct s3cmci_host *host = mmc_priv(mmc);
-
- host->status = "mmc request";
- host->cmd_is_stop = 0;
- host->mrq = mrq;
-
- if (mmc_gpio_get_cd(mmc) == 0) {
- dbg(host, dbg_err, "%s: no medium present\n", __func__);
- host->mrq->cmd->error = -ENOMEDIUM;
- mmc_request_done(mmc, mrq);
- } else
- s3cmci_send_request(mmc);
-}
-
-static void s3cmci_set_clk(struct s3cmci_host *host, struct mmc_ios *ios)
-{
- u32 mci_psc;
-
- /* Set clock */
- for (mci_psc = 0; mci_psc < 255; mci_psc++) {
- host->real_rate = host->clk_rate / (host->clk_div*(mci_psc+1));
-
- if (host->real_rate <= ios->clock)
- break;
- }
-
- if (mci_psc > 255)
- mci_psc = 255;
-
- host->prescaler = mci_psc;
- writel(host->prescaler, host->base + S3C2410_SDIPRE);
-
- /* If requested clock is 0, real_rate will be 0, too */
- if (ios->clock == 0)
- host->real_rate = 0;
-}
-
-static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct s3cmci_host *host = mmc_priv(mmc);
- u32 mci_con;
-
- /* Set the power state */
-
- mci_con = readl(host->base + S3C2410_SDICON);
-
- switch (ios->power_mode) {
- case MMC_POWER_ON:
- case MMC_POWER_UP:
- if (!host->is2440)
- mci_con |= S3C2410_SDICON_FIFORESET;
- break;
-
- case MMC_POWER_OFF:
- default:
- if (host->is2440)
- mci_con |= S3C2440_SDICON_SDRESET;
- break;
- }
-
- if (host->pdata->set_power)
- host->pdata->set_power(ios->power_mode, ios->vdd);
-
- s3cmci_set_clk(host, ios);
-
- /* Set CLOCK_ENABLE */
- if (ios->clock)
- mci_con |= S3C2410_SDICON_CLOCKTYPE;
- else
- mci_con &= ~S3C2410_SDICON_CLOCKTYPE;
-
- writel(mci_con, host->base + S3C2410_SDICON);
-
- if ((ios->power_mode == MMC_POWER_ON) ||
- (ios->power_mode == MMC_POWER_UP)) {
- dbg(host, dbg_conf, "running at %lukHz (requested: %ukHz).\n",
- host->real_rate/1000, ios->clock/1000);
- } else {
- dbg(host, dbg_conf, "powered down.\n");
- }
-
- host->bus_width = ios->bus_width;
-}
-
-static void s3cmci_reset(struct s3cmci_host *host)
-{
- u32 con = readl(host->base + S3C2410_SDICON);
-
- con |= S3C2440_SDICON_SDRESET;
- writel(con, host->base + S3C2410_SDICON);
-}
-
-static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
-{
- struct s3cmci_host *host = mmc_priv(mmc);
- unsigned long flags;
- u32 con;
-
- local_irq_save(flags);
-
- con = readl(host->base + S3C2410_SDICON);
- host->sdio_irqen = enable;
-
- if (enable == host->sdio_irqen)
- goto same_state;
-
- if (enable) {
- con |= S3C2410_SDICON_SDIOIRQ;
- enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
-
- if (!host->irq_state && !host->irq_disabled) {
- host->irq_state = true;
- enable_irq(host->irq);
- }
- } else {
- disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
- con &= ~S3C2410_SDICON_SDIOIRQ;
-
- if (!host->irq_enabled && host->irq_state) {
- disable_irq_nosync(host->irq);
- host->irq_state = false;
- }
- }
-
- writel(con, host->base + S3C2410_SDICON);
-
- same_state:
- local_irq_restore(flags);
-
- s3cmci_check_sdio_irq(host);
-}
-
-static const struct mmc_host_ops s3cmci_ops = {
- .request = s3cmci_request,
- .set_ios = s3cmci_set_ios,
- .get_ro = mmc_gpio_get_ro,
- .get_cd = mmc_gpio_get_cd,
- .enable_sdio_irq = s3cmci_enable_sdio_irq,
-};
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
-
-static int s3cmci_cpufreq_transition(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct s3cmci_host *host;
- struct mmc_host *mmc;
- unsigned long newclk;
- unsigned long flags;
-
- host = container_of(nb, struct s3cmci_host, freq_transition);
- newclk = clk_get_rate(host->clk);
- mmc = host->mmc;
-
- if ((val == CPUFREQ_PRECHANGE && newclk > host->clk_rate) ||
- (val == CPUFREQ_POSTCHANGE && newclk < host->clk_rate)) {
- spin_lock_irqsave(&mmc->lock, flags);
-
- host->clk_rate = newclk;
-
- if (mmc->ios.power_mode != MMC_POWER_OFF &&
- mmc->ios.clock != 0)
- s3cmci_set_clk(host, &mmc->ios);
-
- spin_unlock_irqrestore(&mmc->lock, flags);
- }
-
- return 0;
-}
-
-static inline int s3cmci_cpufreq_register(struct s3cmci_host *host)
-{
- host->freq_transition.notifier_call = s3cmci_cpufreq_transition;
-
- return cpufreq_register_notifier(&host->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
-{
- cpufreq_unregister_notifier(&host->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-#else
-static inline int s3cmci_cpufreq_register(struct s3cmci_host *host)
-{
- return 0;
-}
-
-static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
-{
-}
-#endif
-
-
-#ifdef CONFIG_DEBUG_FS
-
-static int s3cmci_state_show(struct seq_file *seq, void *v)
-{
- struct s3cmci_host *host = seq->private;
-
- seq_printf(seq, "Register base = 0x%p\n", host->base);
- seq_printf(seq, "Clock rate = %ld\n", host->clk_rate);
- seq_printf(seq, "Prescale = %d\n", host->prescaler);
- seq_printf(seq, "is2440 = %d\n", host->is2440);
- seq_printf(seq, "IRQ = %d\n", host->irq);
- seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled);
- seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled);
- seq_printf(seq, "IRQ state = %d\n", host->irq_state);
- seq_printf(seq, "CD IRQ = %d\n", host->irq_cd);
- seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host));
- seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk);
- seq_printf(seq, "SDIDATA at %d\n", host->sdidata);
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(s3cmci_state);
-
-#define DBG_REG(_r) { .addr = S3C2410_SDI##_r, .name = #_r }
-
-struct s3cmci_reg {
- unsigned short addr;
- unsigned char *name;
-};
-
-static const struct s3cmci_reg debug_regs[] = {
- DBG_REG(CON),
- DBG_REG(PRE),
- DBG_REG(CMDARG),
- DBG_REG(CMDCON),
- DBG_REG(CMDSTAT),
- DBG_REG(RSP0),
- DBG_REG(RSP1),
- DBG_REG(RSP2),
- DBG_REG(RSP3),
- DBG_REG(TIMER),
- DBG_REG(BSIZE),
- DBG_REG(DCON),
- DBG_REG(DCNT),
- DBG_REG(DSTA),
- DBG_REG(FSTA),
- {}
-};
-
-static int s3cmci_regs_show(struct seq_file *seq, void *v)
-{
- struct s3cmci_host *host = seq->private;
- const struct s3cmci_reg *rptr = debug_regs;
-
- for (; rptr->name; rptr++)
- seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
- readl(host->base + rptr->addr));
-
- seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk));
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(s3cmci_regs);
-
-static void s3cmci_debugfs_attach(struct s3cmci_host *host)
-{
- struct device *dev = &host->pdev->dev;
- struct dentry *root;
-
- root = debugfs_create_dir(dev_name(dev), NULL);
- host->debug_root = root;
-
- debugfs_create_file("state", 0444, root, host, &s3cmci_state_fops);
- debugfs_create_file("regs", 0444, root, host, &s3cmci_regs_fops);
-}
-
-static void s3cmci_debugfs_remove(struct s3cmci_host *host)
-{
- debugfs_remove_recursive(host->debug_root);
-}
-
-#else
-static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { }
-static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }
-
-#endif /* CONFIG_DEBUG_FS */
-
-static int s3cmci_probe_pdata(struct s3cmci_host *host)
-{
- struct platform_device *pdev = host->pdev;
- struct mmc_host *mmc = host->mmc;
- struct s3c24xx_mci_pdata *pdata;
- int i, ret;
-
- host->is2440 = platform_get_device_id(pdev)->driver_data;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "need platform data");
- return -ENXIO;
- }
-
- for (i = 0; i < 6; i++) {
- pdata->bus[i] = devm_gpiod_get_index(&pdev->dev, "bus", i,
- GPIOD_OUT_LOW);
- if (IS_ERR(pdata->bus[i])) {
- dev_err(&pdev->dev, "failed to get gpio %d\n", i);
- return PTR_ERR(pdata->bus[i]);
- }
- }
-
- if (pdata->no_wprotect)
- mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
-
- if (pdata->no_detect)
- mmc->caps |= MMC_CAP_NEEDS_POLL;
-
- if (pdata->wprotect_invert)
- mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
-
- /* If we get -ENOENT we have no card detect GPIO line */
- ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
- if (ret != -ENOENT) {
- dev_err(&pdev->dev, "error requesting GPIO for CD %d\n",
- ret);
- return ret;
- }
-
- ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0);
- if (ret != -ENOENT) {
- dev_err(&pdev->dev, "error requesting GPIO for WP %d\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
-static int s3cmci_probe_dt(struct s3cmci_host *host)
-{
- struct platform_device *pdev = host->pdev;
- struct s3c24xx_mci_pdata *pdata;
- struct mmc_host *mmc = host->mmc;
- int ret;
-
- host->is2440 = (long) of_device_get_match_data(&pdev->dev);
-
- ret = mmc_of_parse(mmc);
- if (ret)
- return ret;
-
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- pdev->dev.platform_data = pdata;
-
- return 0;
-}
-
-static int s3cmci_probe(struct platform_device *pdev)
-{
- struct s3cmci_host *host;
- struct mmc_host *mmc;
- int ret;
-
- mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto probe_out;
- }
-
- host = mmc_priv(mmc);
- host->mmc = mmc;
- host->pdev = pdev;
-
- if (pdev->dev.of_node)
- ret = s3cmci_probe_dt(host);
- else
- ret = s3cmci_probe_pdata(host);
-
- if (ret)
- goto probe_free_host;
-
- host->pdata = pdev->dev.platform_data;
-
- spin_lock_init(&host->complete_lock);
- tasklet_setup(&host->pio_tasklet, pio_tasklet);
-
- if (host->is2440) {
- host->sdiimsk = S3C2440_SDIIMSK;
- host->sdidata = S3C2440_SDIDATA;
- host->clk_div = 1;
- } else {
- host->sdiimsk = S3C2410_SDIIMSK;
- host->sdidata = S3C2410_SDIDATA;
- host->clk_div = 2;
- }
-
- host->complete_what = COMPLETION_NONE;
- host->pio_active = XFER_NONE;
-
- host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!host->mem) {
- dev_err(&pdev->dev,
- "failed to get io memory region resource.\n");
-
- ret = -ENOENT;
- goto probe_free_host;
- }
-
- host->mem = request_mem_region(host->mem->start,
- resource_size(host->mem), pdev->name);
-
- if (!host->mem) {
- dev_err(&pdev->dev, "failed to request io memory region.\n");
- ret = -ENOENT;
- goto probe_free_host;
- }
-
- host->base = ioremap(host->mem->start, resource_size(host->mem));
- if (!host->base) {
- dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
- ret = -EINVAL;
- goto probe_free_mem_region;
- }
-
- host->irq = platform_get_irq(pdev, 0);
- if (host->irq <= 0) {
- ret = -EINVAL;
- goto probe_iounmap;
- }
-
- if (request_irq(host->irq, s3cmci_irq, IRQF_NO_AUTOEN, DRIVER_NAME, host)) {
- dev_err(&pdev->dev, "failed to request mci interrupt.\n");
- ret = -ENOENT;
- goto probe_iounmap;
- }
-
- host->irq_state = false;
-
- /* Depending on the dma state, get a DMA channel to use. */
-
- if (s3cmci_host_usedma(host)) {
- host->dma = dma_request_chan(&pdev->dev, "rx-tx");
- ret = PTR_ERR_OR_ZERO(host->dma);
- if (ret) {
- dev_err(&pdev->dev, "cannot get DMA channel.\n");
- goto probe_free_irq;
- }
- }
-
- host->clk = clk_get(&pdev->dev, "sdi");
- if (IS_ERR(host->clk)) {
- dev_err(&pdev->dev, "failed to find clock source.\n");
- ret = PTR_ERR(host->clk);
- host->clk = NULL;
- goto probe_free_dma;
- }
-
- ret = clk_prepare_enable(host->clk);
- if (ret) {
- dev_err(&pdev->dev, "failed to enable clock source.\n");
- goto clk_free;
- }
-
- host->clk_rate = clk_get_rate(host->clk);
-
- mmc->ops = &s3cmci_ops;
- mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
-#ifdef CONFIG_MMC_S3C_HW_SDIO_IRQ
- mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
-#else
- mmc->caps = MMC_CAP_4_BIT_DATA;
-#endif
- mmc->f_min = host->clk_rate / (host->clk_div * 256);
- mmc->f_max = host->clk_rate / host->clk_div;
-
- if (host->pdata->ocr_avail)
- mmc->ocr_avail = host->pdata->ocr_avail;
-
- mmc->max_blk_count = 4095;
- mmc->max_blk_size = 4095;
- mmc->max_req_size = 4095 * 512;
- mmc->max_seg_size = mmc->max_req_size;
-
- mmc->max_segs = 128;
-
- dbg(host, dbg_debug,
- "probe: mode:%s mapped mci_base:%p irq:%u irq_cd:%u dma:%p.\n",
- (host->is2440?"2440":""),
- host->base, host->irq, host->irq_cd, host->dma);
-
- ret = s3cmci_cpufreq_register(host);
- if (ret) {
- dev_err(&pdev->dev, "failed to register cpufreq\n");
- goto free_dmabuf;
- }
-
- ret = mmc_add_host(mmc);
- if (ret) {
- dev_err(&pdev->dev, "failed to add mmc host.\n");
- goto free_cpufreq;
- }
-
- s3cmci_debugfs_attach(host);
-
- platform_set_drvdata(pdev, mmc);
- dev_info(&pdev->dev, "%s - using %s, %s SDIO IRQ\n", mmc_hostname(mmc),
- s3cmci_host_usedma(host) ? "dma" : "pio",
- mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw");
-
- return 0;
-
- free_cpufreq:
- s3cmci_cpufreq_deregister(host);
-
- free_dmabuf:
- clk_disable_unprepare(host->clk);
-
- clk_free:
- clk_put(host->clk);
-
- probe_free_dma:
- if (s3cmci_host_usedma(host))
- dma_release_channel(host->dma);
-
- probe_free_irq:
- free_irq(host->irq, host);
-
- probe_iounmap:
- iounmap(host->base);
-
- probe_free_mem_region:
- release_mem_region(host->mem->start, resource_size(host->mem));
-
- probe_free_host:
- mmc_free_host(mmc);
-
- probe_out:
- return ret;
-}
-
-static void s3cmci_shutdown(struct platform_device *pdev)
-{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct s3cmci_host *host = mmc_priv(mmc);
-
- if (host->irq_cd >= 0)
- free_irq(host->irq_cd, host);
-
- s3cmci_debugfs_remove(host);
- s3cmci_cpufreq_deregister(host);
- mmc_remove_host(mmc);
- clk_disable_unprepare(host->clk);
-}
-
-static int s3cmci_remove(struct platform_device *pdev)
-{
- struct mmc_host *mmc = platform_get_drvdata(pdev);
- struct s3cmci_host *host = mmc_priv(mmc);
-
- s3cmci_shutdown(pdev);
-
- clk_put(host->clk);
-
- tasklet_disable(&host->pio_tasklet);
-
- if (s3cmci_host_usedma(host))
- dma_release_channel(host->dma);
-
- free_irq(host->irq, host);
-
- iounmap(host->base);
- release_mem_region(host->mem->start, resource_size(host->mem));
-
- mmc_free_host(mmc);
- return 0;
-}
-
-static const struct of_device_id s3cmci_dt_match[] = {
- {
- .compatible = "samsung,s3c2410-sdi",
- .data = (void *)0,
- },
- {
- .compatible = "samsung,s3c2412-sdi",
- .data = (void *)1,
- },
- {
- .compatible = "samsung,s3c2440-sdi",
- .data = (void *)1,
- },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, s3cmci_dt_match);
-
-static const struct platform_device_id s3cmci_driver_ids[] = {
- {
- .name = "s3c2410-sdi",
- .driver_data = 0,
- }, {
- .name = "s3c2412-sdi",
- .driver_data = 1,
- }, {
- .name = "s3c2440-sdi",
- .driver_data = 1,
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
-
-static struct platform_driver s3cmci_driver = {
- .driver = {
- .name = "s3c-sdi",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .of_match_table = s3cmci_dt_match,
- },
- .id_table = s3cmci_driver_ids,
- .probe = s3cmci_probe,
- .remove = s3cmci_remove,
- .shutdown = s3cmci_shutdown,
-};
-
-module_platform_driver(s3cmci_driver);
-
-MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>");
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
deleted file mode 100644
index 8b65d7ad9f97..000000000000
--- a/drivers/mmc/host/s3cmci.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/drivers/mmc/s3cmci.h - Samsung S3C MCI driver
- *
- * Copyright (C) 2004-2006 Thomas Kleffel, All Rights Reserved.
- */
-
-enum s3cmci_waitfor {
- COMPLETION_NONE,
- COMPLETION_FINALIZE,
- COMPLETION_CMDSENT,
- COMPLETION_RSPFIN,
- COMPLETION_XFERFINISH,
- COMPLETION_XFERFINISH_RSPFIN,
-};
-
-struct s3cmci_host {
- struct platform_device *pdev;
- struct s3c24xx_mci_pdata *pdata;
- struct mmc_host *mmc;
- struct resource *mem;
- struct clk *clk;
- void __iomem *base;
- int irq;
- int irq_cd;
- struct dma_chan *dma;
-
- unsigned long clk_rate;
- unsigned long clk_div;
- unsigned long real_rate;
- u8 prescaler;
-
- int is2440;
- unsigned sdiimsk;
- unsigned sdidata;
-
- bool irq_disabled;
- bool irq_enabled;
- bool irq_state;
- int sdio_irqen;
-
- struct mmc_request *mrq;
- int cmd_is_stop;
-
- spinlock_t complete_lock;
- enum s3cmci_waitfor complete_what;
-
- int dma_complete;
-
- u32 pio_sgptr;
- u32 pio_bytes;
- u32 pio_count;
- u32 *pio_ptr;
-#define XFER_NONE 0
-#define XFER_READ 1
-#define XFER_WRITE 2
- u32 pio_active;
-
- int bus_width;
-
- char dbgmsg_cmd[301];
- char dbgmsg_dat[301];
- char *status;
-
- unsigned int ccnt, dcnt;
- struct tasklet_struct pio_tasklet;
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *debug_root;
-#endif
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
- struct notifier_block freq_transition;
-#endif
-};
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
deleted file mode 100644
index 2a29c7a4f308..000000000000
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ /dev/null
@@ -1,113 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * SDHCI support for CNS3xxx SoC
- *
- * Copyright 2008 Cavium Networks
- * Copyright 2010 MontaVista Software, LLC.
- *
- * Authors: Scott Shu
- * Anton Vorontsov <avorontsov@mvista.com>
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/mmc/host.h>
-#include <linux/module.h>
-#include "sdhci-pltfm.h"
-
-static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host)
-{
- return 150000000;
-}
-
-static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock)
-{
- struct device *dev = mmc_dev(host->mmc);
- int div = 1;
- u16 clk;
- unsigned long timeout;
-
- host->mmc->actual_clock = 0;
-
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
-
- if (clock == 0)
- return;
-
- while (host->max_clk / div > clock) {
- /*
- * On CNS3xxx divider grows linearly up to 4, and then
- * exponentially up to 256.
- */
- if (div < 4)
- div += 1;
- else if (div < 256)
- div *= 2;
- else
- break;
- }
-
- dev_dbg(dev, "desired SD clock: %d, actual: %d\n",
- clock, host->max_clk / div);
-
- /* Divide by 3 is special. */
- if (div != 3)
- div >>= 1;
-
- clk = div << SDHCI_DIVIDER_SHIFT;
- clk |= SDHCI_CLOCK_INT_EN;
- sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
-
- timeout = 20;
- while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
- & SDHCI_CLOCK_INT_STABLE)) {
- if (timeout == 0) {
- dev_warn(dev, "clock is unstable");
- break;
- }
- timeout--;
- mdelay(1);
- }
-
- clk |= SDHCI_CLOCK_CARD_EN;
- sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
-}
-
-static const struct sdhci_ops sdhci_cns3xxx_ops = {
- .get_max_clock = sdhci_cns3xxx_get_max_clk,
- .set_clock = sdhci_cns3xxx_set_clock,
- .set_bus_width = sdhci_set_bus_width,
- .reset = sdhci_reset,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
-};
-
-static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
- .ops = &sdhci_cns3xxx_ops,
- .quirks = SDHCI_QUIRK_BROKEN_DMA |
- SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
- SDHCI_QUIRK_INVERTED_WRITE_PROTECT |
- SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
- SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
-};
-
-static int sdhci_cns3xxx_probe(struct platform_device *pdev)
-{
- return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata, 0);
-}
-
-static struct platform_driver sdhci_cns3xxx_driver = {
- .driver = {
- .name = "sdhci-cns3xxx",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &sdhci_pltfm_pmops,
- },
- .probe = sdhci_cns3xxx_probe,
- .remove = sdhci_pltfm_unregister,
-};
-
-module_platform_driver(sdhci_cns3xxx_driver);
-
-MODULE_DESCRIPTION("SDHCI driver for CNS3xxx");
-MODULE_AUTHOR("Scott Shu, "
- "Anton Vorontsov <avorontsov@mvista.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
deleted file mode 100644
index 53a2ad9a24b8..000000000000
--- a/drivers/mmc/host/tmio_mmc.c
+++ /dev/null
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for the MMC / SD / SDIO cell found in:
- *
- * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
- *
- * Copyright (C) 2017 Renesas Electronics Corporation
- * Copyright (C) 2017 Horms Solutions, Simon Horman
- * Copyright (C) 2007 Ian Molton
- * Copyright (C) 2004 Ian Molton
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-#include <linux/mmc/host.h>
-#include <linux/module.h>
-#include <linux/pagemap.h>
-#include <linux/scatterlist.h>
-
-#include "tmio_mmc.h"
-
-/* Registers specific to this variant */
-#define CTL_SDIO_REGS 0x100
-#define CTL_CLK_AND_WAIT_CTL 0x138
-#define CTL_RESET_SDIO 0x1e0
-
-static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
-{
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-
- usleep_range(10000, 11000);
- sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
- usleep_range(10000, 11000);
-}
-
-static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
-{
- sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
- usleep_range(10000, 11000);
-
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-
- usleep_range(10000, 11000);
-}
-
-static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
- unsigned int new_clock)
-{
- unsigned int divisor;
- u32 clk = 0;
- int clk_sel;
-
- if (new_clock == 0) {
- tmio_mmc_clk_stop(host);
- return;
- }
-
- divisor = host->pdata->hclk / new_clock;
-
- /* bit7 set: 1/512, ... bit0 set: 1/4, all bits clear: 1/2 */
- clk_sel = (divisor <= 1);
- clk = clk_sel ? 0 : (roundup_pow_of_two(divisor) >> 2);
-
- host->pdata->set_clk_div(host->pdev, clk_sel);
-
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
- sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
- sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
- usleep_range(10000, 11000);
-
- tmio_mmc_clk_start(host);
-}
-
-static void tmio_mmc_reset(struct tmio_mmc_host *host, bool preserve)
-{
- sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
- usleep_range(10000, 11000);
- sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
- usleep_range(10000, 11000);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tmio_mmc_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- int ret;
-
- ret = pm_runtime_force_suspend(dev);
-
- /* Tell MFD core it can disable us now.*/
- if (!ret && cell->disable)
- cell->disable(pdev);
-
- return ret;
-}
-
-static int tmio_mmc_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- int ret = 0;
-
- /* Tell the MFD core we are ready to be enabled */
- if (cell->resume)
- ret = cell->resume(pdev);
-
- if (!ret)
- ret = pm_runtime_force_resume(dev);
-
- return ret;
-}
-#endif
-
-static int tmio_mmc_probe(struct platform_device *pdev)
-{
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct tmio_mmc_data *pdata;
- struct tmio_mmc_host *host;
- struct resource *res;
- int ret = -EINVAL, irq;
-
- if (pdev->num_resources != 2)
- goto out;
-
- pdata = pdev->dev.platform_data;
- if (!pdata || !pdata->hclk)
- goto out;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto out;
- }
-
- /* Tell the MFD core we are ready to be enabled */
- if (cell->enable) {
- ret = cell->enable(pdev);
- if (ret)
- goto out;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -EINVAL;
- goto cell_disable;
- }
-
- host = tmio_mmc_host_alloc(pdev, pdata);
- if (IS_ERR(host)) {
- ret = PTR_ERR(host);
- goto cell_disable;
- }
-
- /* SD control register space size is 0x200, 0x400 for bus_shift=1 */
- host->bus_shift = resource_size(res) >> 10;
- host->set_clock = tmio_mmc_set_clock;
- host->reset = tmio_mmc_reset;
-
- host->mmc->f_max = pdata->hclk;
- host->mmc->f_min = pdata->hclk / 512;
-
- ret = tmio_mmc_host_probe(host);
- if (ret)
- goto host_free;
-
- ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq,
- IRQF_TRIGGER_FALLING,
- dev_name(&pdev->dev), host);
- if (ret)
- goto host_remove;
-
- pr_info("%s at 0x%p irq %d\n", mmc_hostname(host->mmc), host->ctl, irq);
-
- return 0;
-
-host_remove:
- tmio_mmc_host_remove(host);
-host_free:
- tmio_mmc_host_free(host);
-cell_disable:
- if (cell->disable)
- cell->disable(pdev);
-out:
- return ret;
-}
-
-static int tmio_mmc_remove(struct platform_device *pdev)
-{
- const struct mfd_cell *cell = mfd_get_cell(pdev);
- struct tmio_mmc_host *host = platform_get_drvdata(pdev);
-
- tmio_mmc_host_remove(host);
- if (cell->disable)
- cell->disable(pdev);
-
- return 0;
-}
-
-/* ------------------- device registration ----------------------- */
-
-static const struct dev_pm_ops tmio_mmc_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume)
- SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend,
- tmio_mmc_host_runtime_resume, NULL)
-};
-
-static struct platform_driver tmio_mmc_driver = {
- .driver = {
- .name = "tmio-mmc",
- .probe_type = PROBE_PREFER_ASYNCHRONOUS,
- .pm = &tmio_mmc_dev_pm_ops,
- },
- .probe = tmio_mmc_probe,
- .remove = tmio_mmc_remove,
-};
-
-module_platform_driver(tmio_mmc_driver);
-
-MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");
-MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:tmio-mmc");
diff --git a/drivers/most/Kconfig b/drivers/most/Kconfig
index 4b8145b9e7ad..69c9e728b7e6 100644
--- a/drivers/most/Kconfig
+++ b/drivers/most/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig MOST
- tristate "MOST support"
+ tristate "MOST (Media Oriented Systems Transport) support"
depends on HAS_DMA && CONFIGFS_FS
default n
help
diff --git a/drivers/most/most_cdev.c b/drivers/most/most_cdev.c
index 3722f9abd7b9..4ee536980f71 100644
--- a/drivers/most/most_cdev.c
+++ b/drivers/most/most_cdev.c
@@ -297,7 +297,7 @@ static __poll_t comp_poll(struct file *filp, poll_table *wait)
return mask;
}
-/**
+/*
* Initialization of struct file_operations
*/
static const struct file_operations channel_fops = {
@@ -404,8 +404,9 @@ static int comp_tx_completion(struct most_interface *iface, int channel_id)
* @channel_id: channel index/ID
* @cfg: pointer to actual channel configuration
* @name: name of the device to be created
+ * @args: pointer to array of component parameters (from configfs)
*
- * This allocates achannel object and creates the device node in /dev
+ * This allocates a channel object and creates the device node in /dev
*
* Returns 0 on success or error code otherwise.
*/
diff --git a/drivers/most/most_snd.c b/drivers/most/most_snd.c
index c87f6a037874..45d762804c5e 100644
--- a/drivers/most/most_snd.c
+++ b/drivers/most/most_snd.c
@@ -27,6 +27,7 @@ static struct most_component comp;
/**
* struct channel - private structure to keep channel specific data
* @substream: stores the substream structure
+ * @pcm_hardware: low-level hardware description
* @iface: interface for which the channel belongs to
* @cfg: channel configuration
* @card: registered sound card
@@ -38,6 +39,7 @@ static struct most_component comp;
* @opened: set when the stream is opened
* @playback_task: playback thread
* @playback_waitq: waitq used by playback thread
+ * @copy_fn: copy function for PCM-specific format and width
*/
struct channel {
struct snd_pcm_substream *substream;
@@ -400,7 +402,7 @@ static snd_pcm_uframes_t pcm_pointer(struct snd_pcm_substream *substream)
return channel->buffer_pos;
}
-/**
+/*
* Initialization of struct snd_pcm_ops
*/
static const struct snd_pcm_ops pcm_ops = {
@@ -501,8 +503,8 @@ static void release_adapter(struct sound_adapter *adpt)
* @iface: pointer to interface instance
* @channel_id: channel index/ID
* @cfg: pointer to actual channel configuration
- * @arg_list: string that provides the name of the device to be created in /dev
- * plus the desired audio resolution
+ * @device_name: name of the device to be created in /dev
+ * @arg_list: string that provides the desired audio resolution
*
* Creates sound card, pcm device, sets pcm ops and registers sound card.
*
@@ -699,7 +701,7 @@ static int audio_tx_completion(struct most_interface *iface, int channel_id)
return 0;
}
-/**
+/*
* Initialization of the struct most_component
*/
static struct most_component comp = {
diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c
index 73258b24fea7..485d5ca39951 100644
--- a/drivers/most/most_usb.c
+++ b/drivers/most/most_usb.c
@@ -660,7 +660,7 @@ static void hdm_request_netinfo(struct most_interface *iface, int channel,
/**
* link_stat_timer_handler - schedule work obtaining mac address and link status
- * @data: pointer to USB device instance
+ * @t: pointer to timer_list which holds a pointer to the USB device instance
*
* The handler runs in interrupt context. That's why we need to defer the
* tasks to a work queue.
@@ -763,14 +763,14 @@ static void wq_clear_halt(struct work_struct *wq_obj)
mutex_unlock(&mdev->io_mutex);
}
-/**
+/*
* hdm_usb_fops - file operation table for USB driver
*/
static const struct file_operations hdm_usb_fops = {
.owner = THIS_MODULE,
};
-/**
+/*
* usb_device_id - ID table for HCD device probing
*/
static const struct usb_device_id usbid[] = {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index d442fa94c872..85f5ee6f06fc 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -577,6 +577,7 @@ static int mtd_part_of_parse(struct mtd_info *master,
{
struct mtd_part_parser *parser;
struct device_node *np;
+ struct device_node *child;
struct property *prop;
struct device *dev;
const char *compat;
@@ -594,6 +595,15 @@ static int mtd_part_of_parse(struct mtd_info *master,
else
np = of_get_child_by_name(np, "partitions");
+ /*
+ * Don't create devices that are added to a bus but will never get
+ * probed. That'll cause fw_devlink to block probing of consumers of
+ * this partition until the partition device is probed.
+ */
+ for_each_child_of_node(np, child)
+ if (of_device_is_compatible(child, "nvmem-cells"))
+ of_node_set_flag(child, OF_POPULATED);
+
of_property_for_each_string(np, "compatible", prop, compat) {
parser = mtd_part_get_compatible_parser(compat);
if (!parser)
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 98ea1c9e65c8..048b1c8f08ee 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -79,7 +79,7 @@ config MTD_NAND_NDFC
config MTD_NAND_S3C2410
tristate "Samsung S3C NAND controller"
- depends on ARCH_S3C24XX || ARCH_S3C64XX
+ depends on ARCH_S3C64XX
help
This enables the NAND flash controller on the S3C24xx and S3C64xx
SoCs
diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c
index f0a4535c812a..80d96f94d6cb 100644
--- a/drivers/mtd/nand/raw/s3c2410.c
+++ b/drivers/mtd/nand/raw/s3c2410.c
@@ -166,10 +166,6 @@ struct s3c2410_nand_info {
enum s3c_nand_clk_state clk_state;
enum s3c_cpu_type cpu_type;
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
- struct notifier_block freq_transition;
-#endif
};
struct s3c24XX_nand_devtype_data {
@@ -711,54 +707,6 @@ static void s3c2440_nand_write_buf(struct nand_chip *this, const u_char *buf,
}
}
-/* cpufreq driver support */
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
-
-static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct s3c2410_nand_info *info;
- unsigned long newclk;
-
- info = container_of(nb, struct s3c2410_nand_info, freq_transition);
- newclk = clk_get_rate(info->clk);
-
- if ((val == CPUFREQ_POSTCHANGE && newclk < info->clk_rate) ||
- (val == CPUFREQ_PRECHANGE && newclk > info->clk_rate)) {
- s3c2410_nand_setrate(info);
- }
-
- return 0;
-}
-
-static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
-{
- info->freq_transition.notifier_call = s3c2410_nand_cpufreq_transition;
-
- return cpufreq_register_notifier(&info->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-static inline void
-s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
-{
- cpufreq_unregister_notifier(&info->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-#else
-static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
-{
- return 0;
-}
-
-static inline void
-s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
-{
-}
-#endif
-
/* device management functions */
static int s3c24xx_nand_remove(struct platform_device *pdev)
@@ -768,8 +716,6 @@ static int s3c24xx_nand_remove(struct platform_device *pdev)
if (info == NULL)
return 0;
- s3c2410_nand_cpufreq_deregister(info);
-
/* Release all our mtds and their partitions, then go through
* freeing the resources used
*/
@@ -1184,12 +1130,6 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
if (err != 0)
goto exit_error;
- err = s3c2410_nand_cpufreq_register(info);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to init cpufreq support\n");
- goto exit_error;
- }
-
if (allow_clk_suspend(info)) {
dev_info(&pdev->dev, "clock idle support enabled\n");
s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 9e63b8c43f3e..c34bd432da27 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -334,7 +334,6 @@ config NETCONSOLE_DYNAMIC
config NETPOLL
def_bool NETCONSOLE
- select SRCU
config NET_POLL_CONTROLLER
def_bool NETPOLL
@@ -583,18 +582,7 @@ config FUJITSU_ES
This driver provides support for Extended Socket network device
on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
-config USB4_NET
- tristate "Networking over USB4 and Thunderbolt cables"
- depends on USB4 && INET
- help
- Select this if you want to create network between two computers
- over a USB4 and Thunderbolt cables. The driver supports Apple
- ThunderboltIP protocol and allows communication with any host
- supporting the same protocol including Windows and macOS.
-
- To compile this driver a module, choose M here. The module will be
- called thunderbolt-net.
-
+source "drivers/net/thunderbolt/Kconfig"
source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 6ce076462dbf..e26f98f897c5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -84,8 +84,6 @@ obj-$(CONFIG_HYPERV_NET) += hyperv/
obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
obj-$(CONFIG_FUJITSU_ES) += fjes/
-
-thunderbolt-net-y += thunderbolt.o
-obj-$(CONFIG_USB4_NET) += thunderbolt-net.o
+obj-$(CONFIG_USB4_NET) += thunderbolt/
obj-$(CONFIG_NETDEVSIM) += netdevsim/
obj-$(CONFIG_NET_FAILOVER) += net_failover.o
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 4f9b4a18c74c..594094526648 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -76,7 +76,7 @@ void bond_debug_reregister(struct bonding *bond)
d = debugfs_rename(bonding_debug_root, bond->debug_dir,
bonding_debug_root, bond->dev->name);
- if (d) {
+ if (!IS_ERR(d)) {
bond->debug_dir = d;
} else {
netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0363ce597661..00646aa315c3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -419,8 +419,10 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
/**
* bond_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
+ * @extack: extack point to fill failure reason
**/
-static int bond_ipsec_add_sa(struct xfrm_state *xs)
+static int bond_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
{
struct net_device *bond_dev = xs->xso.dev;
struct bond_ipsec *ipsec;
@@ -442,7 +444,7 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
if (!slave->dev->xfrmdev_ops ||
!slave->dev->xfrmdev_ops->xdo_dev_state_add ||
netif_is_bond_master(slave->dev)) {
- slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
rcu_read_unlock();
return -EINVAL;
}
@@ -454,7 +456,7 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
}
xs->xso.real_dev = slave->dev;
- err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+ err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
if (!err) {
ipsec->xs = xs;
INIT_LIST_HEAD(&ipsec->list);
@@ -494,7 +496,7 @@ static void bond_ipsec_add_sa_all(struct bonding *bond)
spin_lock_bh(&bond->ipsec_lock);
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
ipsec->xs->xso.real_dev = slave->dev;
- if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
+ if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
ipsec->xs->xso.real_dev = NULL;
}
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
index f83684f006ea..a17561d97192 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_platform.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -47,7 +47,6 @@ static void ctucan_platform_set_drvdata(struct device *dev,
*/
static int ctucan_platform_probe(struct platform_device *pdev)
{
- struct resource *res; /* IO mem resources */
struct device *dev = &pdev->dev;
void __iomem *addr;
int ret;
@@ -55,8 +54,7 @@ static int ctucan_platform_probe(struct platform_device *pdev)
int irq;
/* Get the virtual base address for the device */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- addr = devm_ioremap_resource(dev, res);
+ addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
goto err;
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index 7ae80763c960..0b93900b1dfa 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -6,25 +6,81 @@
#include <linux/can/dev.h>
+void can_sjw_set_default(struct can_bittiming *bt)
+{
+ if (bt->sjw)
+ return;
+
+ /* If user space provides no sjw, use sane default of phase_seg2 / 2 */
+ bt->sjw = max(1U, min(bt->phase_seg1, bt->phase_seg2 / 2));
+}
+
+int can_sjw_check(const struct net_device *dev, const struct can_bittiming *bt,
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
+{
+ if (bt->sjw > btc->sjw_max) {
+ NL_SET_ERR_MSG_FMT(extack, "sjw: %u greater than max sjw: %u",
+ bt->sjw, btc->sjw_max);
+ return -EINVAL;
+ }
+
+ if (bt->sjw > bt->phase_seg1) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg1: %u",
+ bt->sjw, bt->phase_seg1);
+ return -EINVAL;
+ }
+
+ if (bt->sjw > bt->phase_seg2) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "sjw: %u greater than phase-seg2: %u",
+ bt->sjw, bt->phase_seg2);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* Checks the validity of the specified bit-timing parameters prop_seg,
* phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
* prescaler value brp. You can find more information in the header
* file linux/can/netlink.h.
*/
static int can_fixup_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+ const struct can_bittiming_const *btc,
+ struct netlink_ext_ack *extack)
{
+ const unsigned int tseg1 = bt->prop_seg + bt->phase_seg1;
const struct can_priv *priv = netdev_priv(dev);
- unsigned int tseg1, alltseg;
u64 brp64;
+ int err;
- tseg1 = bt->prop_seg + bt->phase_seg1;
- if (!bt->sjw)
- bt->sjw = 1;
- if (bt->sjw > btc->sjw_max ||
- tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
- bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
- return -ERANGE;
+ if (tseg1 < btc->tseg1_min) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u less than tseg1-min: %u",
+ tseg1, btc->tseg1_min);
+ return -EINVAL;
+ }
+ if (tseg1 > btc->tseg1_max) {
+ NL_SET_ERR_MSG_FMT(extack, "prop-seg + phase-seg1: %u greater than tseg1-max: %u",
+ tseg1, btc->tseg1_max);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 < btc->tseg2_min) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u less than tseg2-min: %u",
+ bt->phase_seg2, btc->tseg2_min);
+ return -EINVAL;
+ }
+ if (bt->phase_seg2 > btc->tseg2_max) {
+ NL_SET_ERR_MSG_FMT(extack, "phase-seg2: %u greater than tseg2-max: %u",
+ bt->phase_seg2, btc->tseg2_max);
+ return -EINVAL;
+ }
+
+ can_sjw_set_default(bt);
+
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
brp64 = (u64)priv->clock.freq * (u64)bt->tq;
if (btc->brp_inc > 1)
@@ -35,12 +91,21 @@ static int can_fixup_bittiming(const struct net_device *dev, struct can_bittimin
brp64 *= btc->brp_inc;
bt->brp = (u32)brp64;
- if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
+ if (bt->brp < btc->brp_min) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u less than brp-min: %u",
+ bt->brp, btc->brp_min);
+ return -EINVAL;
+ }
+ if (bt->brp > btc->brp_max) {
+ NL_SET_ERR_MSG_FMT(extack, "resulting brp: %u greater than brp-max: %u",
+ bt->brp, btc->brp_max);
return -EINVAL;
+ }
- alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
- bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
- bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
+ bt->bitrate = priv->clock.freq / (bt->brp * can_bit_time(bt));
+ bt->sample_point = ((CAN_SYNC_SEG + tseg1) * 1000) / can_bit_time(bt);
+ bt->tq = DIV_U64_ROUND_CLOSEST(mul_u32_u32(bt->brp, NSEC_PER_SEC),
+ priv->clock.freq);
return 0;
}
@@ -49,7 +114,8 @@ static int can_fixup_bittiming(const struct net_device *dev, struct can_bittimin
static int
can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *bt,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
{
unsigned int i;
@@ -58,30 +124,30 @@ can_validate_bitrate(const struct net_device *dev, const struct can_bittiming *b
return 0;
}
+ NL_SET_ERR_MSG_FMT(extack, "bitrate %u bps not supported",
+ bt->brp);
+
return -EINVAL;
}
int can_get_bittiming(const struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc,
const u32 *bitrate_const,
- const unsigned int bitrate_const_cnt)
+ const unsigned int bitrate_const_cnt,
+ struct netlink_ext_ack *extack)
{
- int err;
-
/* Depending on the given can_bittiming parameter structure the CAN
* timing parameters are calculated based on the provided bitrate OR
* alternatively the CAN timing parameters (tq, prop_seg, etc.) are
* provided directly which are then checked and fixed up.
*/
if (!bt->tq && bt->bitrate && btc)
- err = can_calc_bittiming(dev, bt, btc);
- else if (bt->tq && !bt->bitrate && btc)
- err = can_fixup_bittiming(dev, bt, btc);
- else if (!bt->tq && bt->bitrate && bitrate_const)
- err = can_validate_bitrate(dev, bt, bitrate_const,
- bitrate_const_cnt);
- else
- err = -EINVAL;
-
- return err;
+ return can_calc_bittiming(dev, bt, btc, extack);
+ if (bt->tq && !bt->bitrate && btc)
+ return can_fixup_bittiming(dev, bt, btc, extack);
+ if (!bt->tq && bt->bitrate && bitrate_const)
+ return can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt, extack);
+
+ return -EINVAL;
}
diff --git a/drivers/net/can/dev/calc_bittiming.c b/drivers/net/can/dev/calc_bittiming.c
index d3caa040614d..3809c148fb88 100644
--- a/drivers/net/can/dev/calc_bittiming.c
+++ b/drivers/net/can/dev/calc_bittiming.c
@@ -63,7 +63,7 @@ can_update_sample_point(const struct can_bittiming_const *btc,
}
int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+ const struct can_bittiming_const *btc, struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
unsigned int bitrate; /* current bitrate */
@@ -76,6 +76,7 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
unsigned int best_brp = 0; /* current best value for brp */
unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
u64 v64;
+ int err;
/* Use CiA recommended sample points */
if (bt->sample_point) {
@@ -133,13 +134,14 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
do_div(v64, bt->bitrate);
bitrate_error = (u32)v64;
if (bitrate_error > CAN_CALC_MAX_ERROR) {
- netdev_err(dev,
- "bitrate error %d.%d%% too high\n",
- bitrate_error / 10, bitrate_error % 10);
- return -EDOM;
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%u%% too high",
+ bitrate_error / 10, bitrate_error % 10);
+ return -EINVAL;
}
- netdev_warn(dev, "bitrate error %d.%d%%\n",
- bitrate_error / 10, bitrate_error % 10);
+ NL_SET_ERR_MSG_FMT(extack,
+ "bitrate error: %u.%u%%",
+ bitrate_error / 10, bitrate_error % 10);
}
/* real sample point */
@@ -154,23 +156,17 @@ int can_calc_bittiming(const struct net_device *dev, struct can_bittiming *bt,
bt->phase_seg1 = tseg1 - bt->prop_seg;
bt->phase_seg2 = tseg2;
- /* check for sjw user settings */
- if (!bt->sjw || !btc->sjw_max) {
- bt->sjw = 1;
- } else {
- /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
- if (bt->sjw > btc->sjw_max)
- bt->sjw = btc->sjw_max;
- /* bt->sjw must not be higher than tseg2 */
- if (tseg2 < bt->sjw)
- bt->sjw = tseg2;
- }
+ can_sjw_set_default(bt);
+
+ err = can_sjw_check(dev, bt, btc, extack);
+ if (err)
+ return err;
bt->brp = best_brp;
/* real bitrate */
bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
+ (bt->brp * can_bit_time(bt));
return 0;
}
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index c1956b1e9faf..7f9334a8af50 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -498,6 +498,18 @@ static int can_get_termination(struct net_device *ndev)
return 0;
}
+static bool
+can_bittiming_const_valid(const struct can_bittiming_const *btc)
+{
+ if (!btc)
+ return true;
+
+ if (!btc->sjw_max)
+ return false;
+
+ return true;
+}
+
/* Register the CAN network device */
int register_candev(struct net_device *dev)
{
@@ -518,6 +530,15 @@ int register_candev(struct net_device *dev)
if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
return -EINVAL;
+ /* We only support either fixed bit rates or bit timing const. */
+ if ((priv->bitrate_const || priv->data_bitrate_const) &&
+ (priv->bittiming_const || priv->data_bittiming_const))
+ return -EINVAL;
+
+ if (!can_bittiming_const_valid(priv->bittiming_const) ||
+ !can_bittiming_const_valid(priv->data_bittiming_const))
+ return -EINVAL;
+
if (!priv->termination_const) {
err = can_get_termination(dev);
if (err)
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 8efa22d9f214..036d85ef07f5 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -36,10 +36,24 @@ static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
[IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
+static int can_validate_bittiming(const struct can_bittiming *bt,
+ struct netlink_ext_ack *extack)
+{
+ /* sample point is in one-tenth of a percent */
+ if (bt->sample_point >= 1000) {
+ NL_SET_ERR_MSG(extack, "sample point must be between 0 and 100%");
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
bool is_can_fd = false;
+ int err;
/* Make sure that valid CAN FD configurations always consist of
* - nominal/arbitration bittiming
@@ -51,6 +65,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
if (!data)
return 0;
+ if (data[IFLA_CAN_BITTIMING]) {
+ struct can_bittiming bt;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
+ err = can_validate_bittiming(&bt, extack);
+ if (err)
+ return err;
+ }
+
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
@@ -71,7 +94,6 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
*/
if (data[IFLA_CAN_TDC]) {
struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
- int err;
err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
data[IFLA_CAN_TDC],
@@ -102,6 +124,15 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
return -EOPNOTSUPP;
}
+ if (data[IFLA_CAN_DATA_BITTIMING]) {
+ struct can_bittiming bt;
+
+ memcpy(&bt, nla_data(data[IFLA_CAN_DATA_BITTIMING]), sizeof(bt));
+ err = can_validate_bittiming(&bt, extack);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -184,13 +215,15 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
err = can_get_bittiming(dev, &bt,
priv->bittiming_const,
priv->bitrate_const,
- priv->bitrate_const_cnt);
+ priv->bitrate_const_cnt,
+ extack);
if (err)
return err;
if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
+ NL_SET_ERR_MSG_FMT(extack,
+ "arbitration bitrate %u bps surpasses transceiver capabilities of %u bps",
+ bt.bitrate, priv->bitrate_max);
return -EINVAL;
}
@@ -288,13 +321,15 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
err = can_get_bittiming(dev, &dbt,
priv->data_bittiming_const,
priv->data_bitrate_const,
- priv->data_bitrate_const_cnt);
+ priv->data_bitrate_const_cnt,
+ extack);
if (err)
return err;
if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
- netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
- priv->bitrate_max);
+ NL_SET_ERR_MSG_FMT(extack,
+ "CANFD data bitrate %u bps surpasses transceiver capabilities of %u bps",
+ dbt.bitrate, priv->bitrate_max);
return -EINVAL;
}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index f6fa7157b99b..ef4e1b9a9e1e 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -21,23 +21,23 @@
* wherever it is modified to a readable name.
*/
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/can/dev.h>
+#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/netdevice.h>
-#include <linux/platform_device.h>
-#include <linux/can/dev.h>
-#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/iopoll.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/types.h>
#define RCANFD_DRV_NAME "rcar_canfd"
@@ -82,8 +82,8 @@
#define RCANFD_GERFL_DEF BIT(0)
#define RCANFD_GERFL_ERR(gpriv, x) \
- ((x) & (reg_v3u(gpriv, RCANFD_GERFL_EEF0_7, \
- RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
+ ((x) & (reg_gen4(gpriv, RCANFD_GERFL_EEF0_7, \
+ RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \
RCANFD_GERFL_MES | \
((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0)))
@@ -91,16 +91,16 @@
/* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */
#define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \
- (((x) & reg_v3u(gpriv, 0x1ff, 0xff)) << \
- (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8)))
+ (((x) & reg_gen4(gpriv, 0x1ff, 0xff)) << \
+ (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8)))
#define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \
- (((x) >> (reg_v3u(gpriv, 16, 24) - (n) * reg_v3u(gpriv, 16, 8))) & \
- reg_v3u(gpriv, 0x1ff, 0xff))
+ (((x) >> (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) & \
+ reg_gen4(gpriv, 0x1ff, 0xff))
/* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */
#define RCANFD_GAFLECTR_AFLDAE BIT(8)
-#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_v3u(gpriv, 0x7f, 0x1f))
+#define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_gen4(gpriv, 0x7f, 0x1f))
/* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */
#define RCANFD_GAFLID_GAFLLB BIT(29)
@@ -118,13 +118,13 @@
/* RSCFDnCFDCmNCFG - CAN FD only */
#define RCANFD_NCFG_NTSEG2(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 25, 24))
+ (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 25, 24))
#define RCANFD_NCFG_NTSEG1(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0xff, 0x7f)) << reg_v3u(gpriv, 17, 16))
+ (((x) & reg_gen4(gpriv, 0xff, 0x7f)) << reg_gen4(gpriv, 17, 16))
#define RCANFD_NCFG_NSJW(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x7f, 0x1f)) << reg_v3u(gpriv, 10, 11))
+ (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 10, 11))
#define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0)
@@ -186,19 +186,19 @@
#define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */
/* RSCFDnCFDCmDCFG */
-#define RCANFD_DCFG_DSJW(x) (((x) & 0x7) << 24)
+#define RCANFD_DCFG_DSJW(gpriv, x) (((x) & reg_gen4(gpriv, 0xf, 0x7)) << 24)
#define RCANFD_DCFG_DTSEG2(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x0f, 0x7)) << reg_v3u(gpriv, 16, 20))
+ (((x) & reg_gen4(gpriv, 0x0f, 0x7)) << reg_gen4(gpriv, 16, 20))
#define RCANFD_DCFG_DTSEG1(gpriv, x) \
- (((x) & reg_v3u(gpriv, 0x1f, 0xf)) << reg_v3u(gpriv, 8, 16))
+ (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 8, 16))
#define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0)
/* RSCFDnCFDCmFDCFG */
-#define RCANFD_FDCFG_CLOE BIT(30)
-#define RCANFD_FDCFG_FDOE BIT(28)
+#define RCANFD_GEN4_FDCFG_CLOE BIT(30)
+#define RCANFD_GEN4_FDCFG_FDOE BIT(28)
#define RCANFD_FDCFG_TDCE BIT(9)
#define RCANFD_FDCFG_TDCOC BIT(8)
#define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16)
@@ -233,10 +233,11 @@
/* Common FIFO bits */
/* RSCFDnCFDCFCCk */
-#define RCANFD_CFCC_CFTML(gpriv, x) (((x) & 0xf) << reg_v3u(gpriv, 16, 20))
-#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_v3u(gpriv, 8, 16))
+#define RCANFD_CFCC_CFTML(gpriv, x) \
+ (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 16, 20))
+#define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_gen4(gpriv, 8, 16))
#define RCANFD_CFCC_CFIM BIT(12)
-#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_v3u(gpriv, 21, 8))
+#define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_gen4(gpriv, 21, 8))
#define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4)
#define RCANFD_CFCC_CFTXIE BIT(2)
#define RCANFD_CFCC_CFE BIT(0)
@@ -304,7 +305,7 @@
#define RCANFD_RMND(y) (0x00a8 + (0x04 * (y)))
/* RSCFDnCFDRFCCx / RSCFDnRFCCx */
-#define RCANFD_RFCC(gpriv, x) (reg_v3u(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
+#define RCANFD_RFCC(gpriv, x) (reg_gen4(gpriv, 0x00c0, 0x00b8) + (0x04 * (x)))
/* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */
#define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20)
/* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */
@@ -314,13 +315,13 @@
/* RSCFDnCFDCFCCx / RSCFDnCFCCx */
#define RCANFD_CFCC(gpriv, ch, idx) \
- (reg_v3u(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
+ (reg_gen4(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */
#define RCANFD_CFSTS(gpriv, ch, idx) \
- (reg_v3u(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
+ (reg_gen4(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */
#define RCANFD_CFPCTR(gpriv, ch, idx) \
- (reg_v3u(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
+ (reg_gen4(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx)))
/* RSCFDnCFDFESTS / RSCFDnFESTS */
#define RCANFD_FESTS (0x0238)
@@ -428,16 +429,15 @@
/* RSCFDnRPGACCr */
#define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r)))
-/* R-Car V3U Classical and CAN FD mode specific register map */
-#define RCANFD_V3U_CFDCFG (0x1314)
-#define RCANFD_V3U_DCFG(m) (0x1400 + (0x20 * (m)))
+/* R-Car Gen4 Classical and CAN FD mode specific register map */
+#define RCANFD_GEN4_FDCFG(m) (0x1404 + (0x20 * (m)))
-#define RCANFD_V3U_GAFL_OFFSET (0x1800)
+#define RCANFD_GEN4_GAFL_OFFSET (0x1800)
/* CAN FD mode specific register map */
/* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */
-#define RCANFD_F_DCFG(m) (0x0500 + (0x20 * (m)))
+#define RCANFD_F_DCFG(gpriv, m) (reg_gen4(gpriv, 0x1400, 0x0500) + (0x20 * (m)))
#define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m)))
#define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m)))
#define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m)))
@@ -453,7 +453,7 @@
#define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q)))
/* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */
-#define RCANFD_F_RFOFFSET(gpriv) reg_v3u(gpriv, 0x6000, 0x3000)
+#define RCANFD_F_RFOFFSET(gpriv) reg_gen4(gpriv, 0x6000, 0x3000)
#define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x)))
#define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x)))
#define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x)))
@@ -461,7 +461,7 @@
(RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df)))
/* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */
-#define RCANFD_F_CFOFFSET(gpriv) reg_v3u(gpriv, 0x6400, 0x3400)
+#define RCANFD_F_CFOFFSET(gpriv) reg_gen4(gpriv, 0x6400, 0x3400)
#define RCANFD_F_CFID(gpriv, ch, idx) \
(RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx)))
@@ -597,28 +597,28 @@ static const struct rcar_canfd_hw_info rcar_gen3_hw_info = {
.shared_global_irqs = 1,
};
+static const struct rcar_canfd_hw_info rcar_gen4_hw_info = {
+ .max_channels = 8,
+ .postdiv = 2,
+ .shared_global_irqs = 1,
+};
+
static const struct rcar_canfd_hw_info rzg2l_hw_info = {
.max_channels = 2,
.postdiv = 1,
.multi_channel_irqs = 1,
};
-static const struct rcar_canfd_hw_info r8a779a0_hw_info = {
- .max_channels = 8,
- .postdiv = 2,
- .shared_global_irqs = 1,
-};
-
/* Helper functions */
-static inline bool is_v3u(struct rcar_canfd_global *gpriv)
+static inline bool is_gen4(struct rcar_canfd_global *gpriv)
{
- return gpriv->info == &r8a779a0_hw_info;
+ return gpriv->info == &rcar_gen4_hw_info;
}
-static inline u32 reg_v3u(struct rcar_canfd_global *gpriv,
- u32 v3u, u32 not_v3u)
+static inline u32 reg_gen4(struct rcar_canfd_global *gpriv,
+ u32 gen4, u32 not_gen4)
{
- return is_v3u(gpriv) ? v3u : not_v3u;
+ return is_gen4(gpriv) ? gen4 : not_gen4;
}
static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
@@ -688,13 +688,14 @@ static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev)
static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv)
{
- if (is_v3u(gpriv)) {
- if (gpriv->fdmode)
- rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
- RCANFD_FDCFG_FDOE);
- else
- rcar_canfd_set_bit(gpriv->base, RCANFD_V3U_CFDCFG,
- RCANFD_FDCFG_CLOE);
+ if (is_gen4(gpriv)) {
+ u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE
+ : RCANFD_GEN4_FDCFG_CLOE;
+
+ for_each_set_bit(ch, &gpriv->channels_mask,
+ gpriv->info->max_channels)
+ rcar_canfd_set_bit(gpriv->base, RCANFD_GEN4_FDCFG(ch),
+ val);
} else {
if (gpriv->fdmode)
rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG,
@@ -814,8 +815,8 @@ static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv,
/* Write number of rules for channel */
rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch),
RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules));
- if (is_v3u(gpriv))
- offset = RCANFD_V3U_GAFL_OFFSET;
+ if (is_gen4(gpriv))
+ offset = RCANFD_GEN4_GAFL_OFFSET;
else if (gpriv->fdmode)
offset = RCANFD_F_GAFL_OFFSET;
else
@@ -1343,17 +1344,14 @@ static void rcar_canfd_set_bittiming(struct net_device *dev)
tseg2 = dbt->phase_seg2 - 1;
cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) |
- RCANFD_DCFG_DSJW(sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
+ RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2));
- if (is_v3u(gpriv))
- rcar_canfd_write(priv->base, RCANFD_V3U_DCFG(ch), cfg);
- else
- rcar_canfd_write(priv->base, RCANFD_F_DCFG(ch), cfg);
+ rcar_canfd_write(priv->base, RCANFD_F_DCFG(gpriv, ch), cfg);
netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n",
brp, sjw, tseg1, tseg2);
} else {
/* Classical CAN only mode */
- if (is_v3u(gpriv)) {
+ if (is_gen4(gpriv)) {
cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) |
RCANFD_NCFG_NBRP(brp) |
RCANFD_NCFG_NSJW(gpriv, sjw) |
@@ -1510,7 +1508,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb,
dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len));
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
rcar_canfd_write(priv->base,
RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id);
rcar_canfd_write(priv->base,
@@ -1569,7 +1567,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
u32 ch = priv->channel;
u32 ridx = ch + RCANFD_RFFIFO_IDX;
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_v3u(gpriv)) {
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) {
id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx));
dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx));
@@ -1620,7 +1618,7 @@ static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv)
cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc));
if (id & RCANFD_RFID_RFRTR)
cf->can_id |= CAN_RTR_FLAG;
- else if (is_v3u(gpriv))
+ else if (is_gen4(gpriv))
rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0));
else
rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0));
@@ -1717,13 +1715,14 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
{
const struct rcar_canfd_hw_info *info = gpriv->info;
struct platform_device *pdev = gpriv->pdev;
+ struct device *dev = &pdev->dev;
struct rcar_canfd_channel *priv;
struct net_device *ndev;
int err = -ENODEV;
ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH);
if (!ndev) {
- dev_err(&pdev->dev, "alloc_candev() failed\n");
+ dev_err(dev, "alloc_candev() failed\n");
return -ENOMEM;
}
priv = netdev_priv(ndev);
@@ -1736,7 +1735,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->channel = ch;
priv->gpriv = gpriv;
priv->can.clock.freq = fcan_freq;
- dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq);
+ dev_info(dev, "can_clk rate is %u\n", priv->can.clock.freq);
if (info->multi_channel_irqs) {
char *irq_name;
@@ -1755,31 +1754,31 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
goto fail;
}
- irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "canfd.ch%d_err", ch);
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_err",
+ ch);
if (!irq_name) {
err = -ENOMEM;
goto fail;
}
- err = devm_request_irq(&pdev->dev, err_irq,
+ err = devm_request_irq(dev, err_irq,
rcar_canfd_channel_err_interrupt, 0,
irq_name, priv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq CH Err(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq CH Err(%d) failed, error %d\n",
err_irq, err);
goto fail;
}
- irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "canfd.ch%d_trx", ch);
+ irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_trx",
+ ch);
if (!irq_name) {
err = -ENOMEM;
goto fail;
}
- err = devm_request_irq(&pdev->dev, tx_irq,
+ err = devm_request_irq(dev, tx_irq,
rcar_canfd_channel_tx_interrupt, 0,
irq_name, priv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq Tx (%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq Tx (%d) failed, error %d\n",
tx_irq, err);
goto fail;
}
@@ -1803,7 +1802,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
priv->can.do_set_mode = rcar_canfd_do_set_mode;
priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter;
- SET_NETDEV_DEV(ndev, &pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll,
RCANFD_NAPI_WEIGHT);
@@ -1811,11 +1810,10 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch,
gpriv->ch[priv->channel] = priv;
err = register_candev(ndev);
if (err) {
- dev_err(&pdev->dev,
- "register_candev() failed, error %d\n", err);
+ dev_err(dev, "register_candev() failed, error %d\n", err);
goto fail_candev;
}
- dev_info(&pdev->dev, "device registered (channel %u)\n", priv->channel);
+ dev_info(dev, "device registered (channel %u)\n", priv->channel);
return 0;
fail_candev:
@@ -1839,6 +1837,7 @@ static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch)
static int rcar_canfd_probe(struct platform_device *pdev)
{
const struct rcar_canfd_hw_info *info;
+ struct device *dev = &pdev->dev;
void __iomem *addr;
u32 sts, ch, fcan_freq;
struct rcar_canfd_global *gpriv;
@@ -1850,14 +1849,14 @@ static int rcar_canfd_probe(struct platform_device *pdev)
char name[9] = "channelX";
int i;
- info = of_device_get_match_data(&pdev->dev);
+ info = of_device_get_match_data(dev);
- if (of_property_read_bool(pdev->dev.of_node, "renesas,no-can-fd"))
+ if (of_property_read_bool(dev->of_node, "renesas,no-can-fd"))
fdmode = false; /* Classical CAN only mode */
for (i = 0; i < info->max_channels; ++i) {
name[7] = '0' + i;
- of_child = of_get_child_by_name(pdev->dev.of_node, name);
+ of_child = of_get_child_by_name(dev->of_node, name);
if (of_child && of_device_is_available(of_child))
channels_mask |= BIT(i);
of_node_put(of_child);
@@ -1890,7 +1889,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
/* Global controller context */
- gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
+ gpriv = devm_kzalloc(dev, sizeof(*gpriv), GFP_KERNEL);
if (!gpriv)
return -ENOMEM;
@@ -1899,32 +1898,30 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->fdmode = fdmode;
gpriv->info = info;
- gpriv->rstc1 = devm_reset_control_get_optional_exclusive(&pdev->dev,
- "rstp_n");
+ gpriv->rstc1 = devm_reset_control_get_optional_exclusive(dev, "rstp_n");
if (IS_ERR(gpriv->rstc1))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc1),
+ return dev_err_probe(dev, PTR_ERR(gpriv->rstc1),
"failed to get rstp_n\n");
- gpriv->rstc2 = devm_reset_control_get_optional_exclusive(&pdev->dev,
- "rstc_n");
+ gpriv->rstc2 = devm_reset_control_get_optional_exclusive(dev, "rstc_n");
if (IS_ERR(gpriv->rstc2))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->rstc2),
+ return dev_err_probe(dev, PTR_ERR(gpriv->rstc2),
"failed to get rstc_n\n");
/* Peripheral clock */
- gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
+ gpriv->clkp = devm_clk_get(dev, "fck");
if (IS_ERR(gpriv->clkp))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->clkp),
+ return dev_err_probe(dev, PTR_ERR(gpriv->clkp),
"cannot get peripheral clock\n");
/* fCAN clock: Pick External clock. If not available fallback to
* CANFD clock
*/
- gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
+ gpriv->can_clk = devm_clk_get(dev, "can_clk");
if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
- gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
+ gpriv->can_clk = devm_clk_get(dev, "canfd");
if (IS_ERR(gpriv->can_clk))
- return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->can_clk),
+ return dev_err_probe(dev, PTR_ERR(gpriv->can_clk),
"cannot get canfd clock\n");
gpriv->fcan = RCANFD_CANFDCLK;
@@ -1947,39 +1944,38 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Request IRQ that's common for both channels */
if (info->shared_global_irqs) {
- err = devm_request_irq(&pdev->dev, ch_irq,
+ err = devm_request_irq(dev, ch_irq,
rcar_canfd_channel_interrupt, 0,
"canfd.ch_int", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
ch_irq, err);
goto fail_dev;
}
- err = devm_request_irq(&pdev->dev, g_irq,
- rcar_canfd_global_interrupt, 0,
- "canfd.g_int", gpriv);
+ err = devm_request_irq(dev, g_irq, rcar_canfd_global_interrupt,
+ 0, "canfd.g_int", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
g_irq, err);
goto fail_dev;
}
} else {
- err = devm_request_irq(&pdev->dev, g_recc_irq,
+ err = devm_request_irq(dev, g_recc_irq,
rcar_canfd_global_receive_fifo_interrupt, 0,
"canfd.g_recc", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
g_recc_irq, err);
goto fail_dev;
}
- err = devm_request_irq(&pdev->dev, g_err_irq,
+ err = devm_request_irq(dev, g_err_irq,
rcar_canfd_global_err_interrupt, 0,
"canfd.g_err", gpriv);
if (err) {
- dev_err(&pdev->dev, "devm_request_irq(%d) failed, error %d\n",
+ dev_err(dev, "devm_request_irq(%d) failed, error %d\n",
g_err_irq, err);
goto fail_dev;
}
@@ -1997,14 +1993,14 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Enable peripheral clock for register access */
err = clk_prepare_enable(gpriv->clkp);
if (err) {
- dev_err(&pdev->dev,
- "failed to enable peripheral clock, error %d\n", err);
+ dev_err(dev, "failed to enable peripheral clock, error %d\n",
+ err);
goto fail_reset;
}
err = rcar_canfd_reset_controller(gpriv);
if (err) {
- dev_err(&pdev->dev, "reset controller failed\n");
+ dev_err(dev, "reset controller failed\n");
goto fail_clk;
}
@@ -2034,7 +2030,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts,
!(sts & RCANFD_GSTS_GNOPM), 2, 500000);
if (err) {
- dev_err(&pdev->dev, "global operational mode failed\n");
+ dev_err(dev, "global operational mode failed\n");
goto fail_mode;
}
@@ -2045,7 +2041,7 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, gpriv);
- dev_info(&pdev->dev, "global operational state (clk %d, fdmode %d)\n",
+ dev_info(dev, "global operational state (clk %d, fdmode %d)\n",
gpriv->fcan, gpriv->fdmode);
return 0;
@@ -2099,9 +2095,10 @@ static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend,
rcar_canfd_resume);
static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = {
+ { .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info },
+ { .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info },
{ .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info },
- { .compatible = "renesas,r8a779a0-canfd", .data = &r8a779a0_hw_info },
{ }
};
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 4ab91759a5c6..c56e27223e5f 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -3,6 +3,7 @@
* Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
* Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
* Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ * Copyright (C) 2023 EMS Dr. Thomas Wuensche
*/
#include <linux/kernel.h>
@@ -19,12 +20,14 @@
#define DRV_NAME "ems_pci"
-MODULE_AUTHOR("Sebastian Haas <haas@ems-wuenche.com>");
+MODULE_AUTHOR("Sebastian Haas <support@ems-wuensche.com>");
+MODULE_AUTHOR("Gerhard Uttenthaler <uttenthaler@ems-wuensche.com>");
MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards");
MODULE_LICENSE("GPL v2");
#define EMS_PCI_V1_MAX_CHAN 2
#define EMS_PCI_V2_MAX_CHAN 4
+#define EMS_PCI_V3_MAX_CHAN 4
#define EMS_PCI_MAX_CHAN EMS_PCI_V2_MAX_CHAN
struct ems_pci_card {
@@ -40,8 +43,7 @@ struct ems_pci_card {
#define EMS_PCI_CAN_CLOCK (16000000 / 2)
-/*
- * Register definitions and descriptions are from LinCAN 0.3.3.
+/* Register definitions and descriptions are from LinCAN 0.3.3.
*
* PSB4610 PITA-2 bridge control registers
*/
@@ -52,8 +54,7 @@ struct ems_pci_card {
#define PITA2_MISC 0x1c /* Miscellaneous Register */
#define PITA2_MISC_CONFIG 0x04000000 /* Multiplexed parallel interface */
-/*
- * Register definitions for the PLX 9030
+/* Register definitions for the PLX 9030
*/
#define PLX_ICSR 0x4c /* Interrupt Control/Status register */
#define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */
@@ -62,8 +63,16 @@ struct ems_pci_card {
#define PLX_ICSR_ENA_CLR (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \
PLX_ICSR_LINTI1_CLR)
-/*
- * The board configuration is probably following:
+/* Register definitions for the ASIX99100
+ */
+#define ASIX_LINTSR 0x28 /* Interrupt Control/Status register */
+#define ASIX_LINTSR_INT0AC BIT(0) /* Writing 1 enables or clears interrupt */
+
+#define ASIX_LIEMR 0x24 /* Local Interrupt Enable / Miscellaneous Register */
+#define ASIX_LIEMR_L0EINTEN BIT(16) /* Local INT0 input assertion enable */
+#define ASIX_LIEMR_LRST BIT(14) /* Local Reset assert */
+
+/* The board configuration is probably following:
* RX1 is connected to ground.
* TX1 is not connected.
* CLKO is not connected.
@@ -72,23 +81,40 @@ struct ems_pci_card {
*/
#define EMS_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
-/*
- * In the CDR register, you should set CBP to 1.
+/* In the CDR register, you should set CBP to 1.
* You will probably also want to set the clock divider value to 7
* (meaning direct oscillator output) because the second SJA1000 chip
* is driven by the first one CLKOUT output.
*/
#define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
-#define EMS_PCI_V1_BASE_BAR 1
-#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
-#define EMS_PCI_V2_BASE_BAR 2
-#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
-#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
-#define EMS_PCI_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+#define EMS_PCI_V1_BASE_BAR 1
+#define EMS_PCI_V1_CONF_BAR 0
+#define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */
+#define EMS_PCI_V1_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */
+#define EMS_PCI_V1_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+
+#define EMS_PCI_V2_BASE_BAR 2
+#define EMS_PCI_V2_CONF_BAR 0
+#define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */
+#define EMS_PCI_V2_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */
+#define EMS_PCI_V2_CAN_CTRL_SIZE 0x200 /* memory size for each controller */
+
+#define EMS_PCI_V3_BASE_BAR 0
+#define EMS_PCI_V3_CONF_BAR 5
+#define EMS_PCI_V3_CONF_SIZE 128 /* size of ASIX control area */
+#define EMS_PCI_V3_CAN_BASE_OFFSET 0x00 /* offset where the controllers starts */
+#define EMS_PCI_V3_CAN_CTRL_SIZE 0x100 /* memory size for each controller */
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
+#ifndef PCI_VENDOR_ID_ASIX
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_9110 0x9110
+#define PCI_SUBVENDOR_ID_ASIX 0xa000
+#endif
+#define PCI_SUBDEVICE_ID_EMS 0x4010
+
static const struct pci_device_id ems_pci_tbl[] = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
@@ -96,12 +122,13 @@ static const struct pci_device_id ems_pci_tbl[] = {
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000},
/* CPC-104P v2 */
{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002},
+ /* CPC-PCIe v3 */
+ {PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_9110, PCI_SUBVENDOR_ID_ASIX, PCI_SUBDEVICE_ID_EMS},
{0,}
};
MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
-/*
- * Helper to read internal registers from card logic (not CAN)
+/* Helper to read internal registers from card logic (not CAN)
*/
static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port)
{
@@ -146,8 +173,25 @@ static void ems_pci_v2_post_irq(const struct sja1000_priv *priv)
writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR);
}
-/*
- * Check if a CAN controller is present at the specified location
+static u8 ems_pci_v3_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return readb(priv->reg_base + port);
+}
+
+static void ems_pci_v3_write_reg(const struct sja1000_priv *priv,
+ int port, u8 val)
+{
+ writeb(val, priv->reg_base + port);
+}
+
+static void ems_pci_v3_post_irq(const struct sja1000_priv *priv)
+{
+ struct ems_pci_card *card = (struct ems_pci_card *)priv->priv;
+
+ writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR);
+}
+
+/* Check if a CAN controller is present at the specified location
* by trying to set 'em into the PeliCAN mode
*/
static inline int ems_pci_check_chan(const struct sja1000_priv *priv)
@@ -185,10 +229,10 @@ static void ems_pci_del_card(struct pci_dev *pdev)
free_sja1000dev(dev);
}
- if (card->base_addr != NULL)
+ if (card->base_addr)
pci_iounmap(card->pci_dev, card->base_addr);
- if (card->conf_addr != NULL)
+ if (card->conf_addr)
pci_iounmap(card->pci_dev, card->conf_addr);
kfree(card);
@@ -202,8 +246,7 @@ static void ems_pci_card_reset(struct ems_pci_card *card)
writeb(0, card->base_addr);
}
-/*
- * Probe PCI device for EMS CAN signature and register each available
+/* Probe PCI device for EMS CAN signature and register each available
* CAN channel to SJA1000 Socket-CAN subsystem.
*/
static int ems_pci_add_card(struct pci_dev *pdev,
@@ -212,7 +255,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
struct sja1000_priv *priv;
struct net_device *dev;
struct ems_pci_card *card;
- int max_chan, conf_size, base_bar;
+ int max_chan, conf_size, base_bar, conf_bar;
int err, i;
/* Enabling PCI device */
@@ -222,8 +265,8 @@ static int ems_pci_add_card(struct pci_dev *pdev,
}
/* Allocating card structures to hold addresses, ... */
- card = kzalloc(sizeof(struct ems_pci_card), GFP_KERNEL);
- if (card == NULL) {
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card) {
pci_disable_device(pdev);
return -ENOMEM;
}
@@ -234,27 +277,35 @@ static int ems_pci_add_card(struct pci_dev *pdev,
card->channels = 0;
- if (pdev->vendor == PCI_VENDOR_ID_PLX) {
+ if (pdev->vendor == PCI_VENDOR_ID_ASIX) {
+ card->version = 3; /* CPC-PCI v3 */
+ max_chan = EMS_PCI_V3_MAX_CHAN;
+ base_bar = EMS_PCI_V3_BASE_BAR;
+ conf_bar = EMS_PCI_V3_CONF_BAR;
+ conf_size = EMS_PCI_V3_CONF_SIZE;
+ } else if (pdev->vendor == PCI_VENDOR_ID_PLX) {
card->version = 2; /* CPC-PCI v2 */
max_chan = EMS_PCI_V2_MAX_CHAN;
base_bar = EMS_PCI_V2_BASE_BAR;
+ conf_bar = EMS_PCI_V2_CONF_BAR;
conf_size = EMS_PCI_V2_CONF_SIZE;
} else {
card->version = 1; /* CPC-PCI v1 */
max_chan = EMS_PCI_V1_MAX_CHAN;
base_bar = EMS_PCI_V1_BASE_BAR;
+ conf_bar = EMS_PCI_V1_CONF_BAR;
conf_size = EMS_PCI_V1_CONF_SIZE;
}
/* Remap configuration space and controller memory area */
- card->conf_addr = pci_iomap(pdev, 0, conf_size);
- if (card->conf_addr == NULL) {
+ card->conf_addr = pci_iomap(pdev, conf_bar, conf_size);
+ if (!card->conf_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE);
- if (card->base_addr == NULL) {
+ if (!card->base_addr) {
err = -ENOMEM;
goto failure_cleanup;
}
@@ -276,12 +327,20 @@ static int ems_pci_add_card(struct pci_dev *pdev,
}
}
+ if (card->version == 3) {
+ /* ASIX chip asserts local reset to CAN controllers
+ * after bootup until it is deasserted
+ */
+ writel(readl(card->conf_addr + ASIX_LIEMR) & ~ASIX_LIEMR_LRST,
+ card->conf_addr + ASIX_LIEMR);
+ }
+
ems_pci_card_reset(card);
/* Detect available channels */
for (i = 0; i < max_chan; i++) {
dev = alloc_sja1000dev(0);
- if (dev == NULL) {
+ if (!dev) {
err = -ENOMEM;
goto failure_cleanup;
}
@@ -292,16 +351,25 @@ static int ems_pci_add_card(struct pci_dev *pdev,
priv->irq_flags = IRQF_SHARED;
dev->irq = pdev->irq;
- priv->reg_base = card->base_addr + EMS_PCI_CAN_BASE_OFFSET
- + (i * EMS_PCI_CAN_CTRL_SIZE);
+
if (card->version == 1) {
priv->read_reg = ems_pci_v1_read_reg;
priv->write_reg = ems_pci_v1_write_reg;
priv->post_irq = ems_pci_v1_post_irq;
- } else {
+ priv->reg_base = card->base_addr + EMS_PCI_V1_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V1_CAN_CTRL_SIZE);
+ } else if (card->version == 2) {
priv->read_reg = ems_pci_v2_read_reg;
priv->write_reg = ems_pci_v2_write_reg;
priv->post_irq = ems_pci_v2_post_irq;
+ priv->reg_base = card->base_addr + EMS_PCI_V2_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V2_CAN_CTRL_SIZE);
+ } else {
+ priv->read_reg = ems_pci_v3_read_reg;
+ priv->write_reg = ems_pci_v3_write_reg;
+ priv->post_irq = ems_pci_v3_post_irq;
+ priv->reg_base = card->base_addr + EMS_PCI_V3_CAN_BASE_OFFSET
+ + (i * EMS_PCI_V3_CAN_CTRL_SIZE);
}
/* Check if channel is present */
@@ -313,20 +381,28 @@ static int ems_pci_add_card(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
dev->dev_id = i;
- if (card->version == 1)
+ if (card->version == 1) {
/* reset int flag of pita */
writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
card->conf_addr + PITA2_ICR);
- else
+ } else if (card->version == 2) {
/* enable IRQ in PLX 9030 */
writel(PLX_ICSR_ENA_CLR,
card->conf_addr + PLX_ICSR);
+ } else {
+ /* Enable IRQ in AX99100 */
+ writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR);
+ /* Enable local INT0 input enable */
+ writel(readl(card->conf_addr + ASIX_LIEMR) | ASIX_LIEMR_L0EINTEN,
+ card->conf_addr + ASIX_LIEMR);
+ }
/* Register SJA1000 device */
err = register_sja1000dev(dev);
if (err) {
- dev_err(&pdev->dev, "Registering device failed "
- "(err=%d)\n", err);
+ dev_err(&pdev->dev,
+ "Registering device failed: %pe\n",
+ ERR_PTR(err));
free_sja1000dev(dev);
goto failure_cleanup;
}
@@ -334,7 +410,7 @@ static int ems_pci_add_card(struct pci_dev *pdev,
card->channels++;
dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n",
- i + 1, priv->reg_base, dev->irq);
+ i + 1, priv->reg_base, dev->irq);
} else {
free_sja1000dev(dev);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
index 3585f02575df..57eeb066a945 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
@@ -48,6 +48,7 @@ mcp251xfd_ring_set_ringparam(struct net_device *ndev,
priv->rx_obj_num = layout.cur_rx;
priv->rx_obj_num_coalesce_irq = layout.rx_coalesce;
priv->tx->obj_num = layout.cur_tx;
+ priv->tx_obj_num_coalesce_irq = layout.tx_coalesce;
return 0;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index bf3f0f150199..bfe4caa0c99d 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -30,11 +30,23 @@ mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
last_byte = mcp251xfd_last_byte_set(mask);
len = last_byte - first_byte + 1;
- data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
+ data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte, len);
val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
memcpy(data, &val_le32, len);
- if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
+ if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) {
+ len += sizeof(write_reg_buf->nocrc.cmd);
+ } else if (len == 1) {
+ u16 crc;
+
+ /* CRC */
+ len += sizeof(write_reg_buf->safe.cmd);
+ crc = mcp251xfd_crc16_compute(&write_reg_buf->safe, len);
+ put_unaligned_be16(crc, (void *)write_reg_buf + len);
+
+ /* Total length */
+ len += sizeof(write_reg_buf->safe.crc);
+ } else {
u16 crc;
mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
@@ -46,8 +58,6 @@ mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
/* Total length */
len += sizeof(write_reg_buf->crc.crc);
- } else {
- len += sizeof(write_reg_buf->nocrc.cmd);
}
return len;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 2b0309fedfac..7024ff0cc2c0 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -504,6 +504,11 @@ union mcp251xfd_write_reg_buf {
u8 data[4];
__be16 crc;
} crc;
+ struct __packed {
+ struct mcp251xfd_buf_cmd cmd;
+ u8 data[1];
+ __be16 crc;
+ } safe;
} ____cacheline_aligned;
struct mcp251xfd_tx_obj {
@@ -759,6 +764,13 @@ mcp251xfd_spi_cmd_write_crc_set_addr(struct mcp251xfd_buf_cmd_crc *cmd,
}
static inline void
+mcp251xfd_spi_cmd_write_safe_set_addr(struct mcp251xfd_buf_cmd *cmd,
+ u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE_CRC_SAFE | addr);
+}
+
+static inline void
mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
u16 addr, u16 len)
{
@@ -769,14 +781,20 @@ mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
static inline u8 *
mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
union mcp251xfd_write_reg_buf *write_reg_buf,
- u16 addr)
+ u16 addr, u8 len)
{
u8 *data;
if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
- mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
- addr);
- data = write_reg_buf->crc.data;
+ if (len == 1) {
+ mcp251xfd_spi_cmd_write_safe_set_addr(&write_reg_buf->safe.cmd,
+ addr);
+ data = write_reg_buf->safe.data;
+ } else {
+ mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
+ addr);
+ data = write_reg_buf->crc.data;
+ }
} else {
mcp251xfd_spi_cmd_write_nocrc(&write_reg_buf->nocrc.cmd,
addr);
diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c
index 42323f5e6f3a..55b36973952d 100644
--- a/drivers/net/can/usb/esd_usb.c
+++ b/drivers/net/can/usb/esd_usb.c
@@ -127,7 +127,15 @@ struct rx_msg {
u8 dlc;
__le32 ts;
__le32 id; /* upper 3 bits contain flags */
- u8 data[8];
+ union {
+ u8 data[8];
+ struct {
+ u8 status; /* CAN Controller Status */
+ u8 ecc; /* Error Capture Register */
+ u8 rec; /* RX Error Counter */
+ u8 tec; /* TX Error Counter */
+ } ev_can_err_ext; /* For ESD_EV_CAN_ERROR_EXT */
+ };
};
struct tx_msg {
@@ -229,51 +237,52 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
u32 id = le32_to_cpu(msg->msg.rx.id) & ESD_IDMASK;
if (id == ESD_EV_CAN_ERROR_EXT) {
- u8 state = msg->msg.rx.data[0];
- u8 ecc = msg->msg.rx.data[1];
- u8 rxerr = msg->msg.rx.data[2];
- u8 txerr = msg->msg.rx.data[3];
+ u8 state = msg->msg.rx.ev_can_err_ext.status;
+ u8 ecc = msg->msg.rx.ev_can_err_ext.ecc;
+ u8 rxerr = msg->msg.rx.ev_can_err_ext.rec;
+ u8 txerr = msg->msg.rx.ev_can_err_ext.tec;
netdev_dbg(priv->netdev,
"CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n",
msg->msg.rx.dlc, state, ecc, rxerr, txerr);
skb = alloc_can_err_skb(priv->netdev, &cf);
- if (skb == NULL) {
- stats->rx_dropped++;
- return;
- }
if (state != priv->old_state) {
+ enum can_state tx_state, rx_state;
+ enum can_state new_state = CAN_STATE_ERROR_ACTIVE;
+
priv->old_state = state;
switch (state & ESD_BUSSTATE_MASK) {
case ESD_BUSSTATE_BUSOFF:
- priv->can.state = CAN_STATE_BUS_OFF;
- cf->can_id |= CAN_ERR_BUSOFF;
- priv->can.can_stats.bus_off++;
+ new_state = CAN_STATE_BUS_OFF;
can_bus_off(priv->netdev);
break;
case ESD_BUSSTATE_WARN:
- priv->can.state = CAN_STATE_ERROR_WARNING;
- priv->can.can_stats.error_warning++;
+ new_state = CAN_STATE_ERROR_WARNING;
break;
case ESD_BUSSTATE_ERRPASSIVE:
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- priv->can.can_stats.error_passive++;
+ new_state = CAN_STATE_ERROR_PASSIVE;
break;
default:
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ new_state = CAN_STATE_ERROR_ACTIVE;
txerr = 0;
rxerr = 0;
break;
}
- } else {
+
+ if (new_state != priv->can.state) {
+ tx_state = (txerr >= rxerr) ? new_state : 0;
+ rx_state = (txerr <= rxerr) ? new_state : 0;
+ can_change_state(priv->netdev, cf,
+ tx_state, rx_state);
+ }
+ } else if (skb) {
priv->can.can_stats.bus_error++;
stats->rx_errors++;
- cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR |
- CAN_ERR_CNT;
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
switch (ecc & SJA1000_ECC_MASK) {
case SJA1000_ECC_BIT:
@@ -286,7 +295,6 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
default:
- cf->data[3] = ecc & SJA1000_ECC_SEG;
break;
}
@@ -294,20 +302,22 @@ static void esd_usb_rx_event(struct esd_usb_net_priv *priv,
if (!(ecc & SJA1000_ECC_DIR))
cf->data[2] |= CAN_ERR_PROT_TX;
- if (priv->can.state == CAN_STATE_ERROR_WARNING ||
- priv->can.state == CAN_STATE_ERROR_PASSIVE) {
- cf->data[1] = (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_PASSIVE :
- CAN_ERR_CRTL_RX_PASSIVE;
- }
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
+ /* Bit stream position in CAN frame as the error was detected */
+ cf->data[3] = ecc & SJA1000_ECC_SEG;
}
priv->bec.txerr = txerr;
priv->bec.rxerr = rxerr;
- netif_rx(skb);
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+
+ netif_rx(skb);
+ } else {
+ stats->rx_dropped++;
+ }
}
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 687dd542f7f6..b211b6e283a2 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -9,10 +9,11 @@
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
#include <asm/unaligned.h>
+
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -381,23 +382,42 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
}
/*
- * read device id from device
+ * read can channel id from device
*/
-static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
+static int pcan_usb_get_can_channel_id(struct peak_usb_device *dev, u32 *can_ch_id)
{
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args);
if (err)
- netdev_err(dev->netdev, "getting device id failure: %d\n", err);
+ netdev_err(dev->netdev, "getting can channel id failure: %d\n", err);
else
- *device_id = args[0];
+ *can_ch_id = args[0];
return err;
}
+/* set a new CAN channel id in the flash memory of the device */
+static int pcan_usb_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN];
+
+ /* this kind of device supports 8-bit values only */
+ if (can_ch_id > U8_MAX)
+ return -EINVAL;
+
+ /* during the flash process the device disconnects during ~1.25 s.:
+ * prohibit access when interface is UP
+ */
+ if (dev->netdev->flags & IFF_UP)
+ return -EBUSY;
+
+ args[0] = can_ch_id;
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_DEVID, PCAN_USB_SET, args);
+}
+
/*
* update current time ref with received timestamp
*/
@@ -963,9 +983,18 @@ static int pcan_usb_set_phys_id(struct net_device *netdev,
return err;
}
+/* This device only handles 8-bit CAN channel id. */
+static int pcan_usb_get_eeprom_len(struct net_device *netdev)
+{
+ return sizeof(u8);
+}
+
static const struct ethtool_ops pcan_usb_ethtool_ops = {
.set_phys_id = pcan_usb_set_phys_id,
.get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = pcan_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/*
@@ -1017,7 +1046,8 @@ const struct peak_usb_adapter pcan_usb = {
.dev_init = pcan_usb_init,
.dev_set_bus = pcan_usb_write_mode,
.dev_set_bittiming = pcan_usb_set_bittiming,
- .dev_get_device_id = pcan_usb_get_device_id,
+ .dev_get_can_channel_id = pcan_usb_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_set_can_channel_id,
.dev_decode_buf = pcan_usb_decode_buf,
.dev_encode_msg = pcan_usb_encode_msg,
.dev_start = pcan_usb_start,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 1d996d3320fe..d881e1d30183 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -8,13 +8,15 @@
*
* Many thanks to Klaus Hitschler <klaus.hitschler@gmx.de>
*/
+#include <linux/device.h>
+#include <linux/ethtool.h>
#include <linux/init.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/usb.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -53,6 +55,26 @@ static const struct usb_device_id peak_usb_table[] = {
MODULE_DEVICE_TABLE(usb, peak_usb_table);
+static ssize_t can_channel_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ struct peak_usb_device *peak_dev = netdev_priv(netdev);
+
+ return sysfs_emit(buf, "%08X\n", peak_dev->can_channel_id);
+}
+static DEVICE_ATTR_RO(can_channel_id);
+
+/* mutable to avoid cast in attribute_group */
+static struct attribute *peak_usb_sysfs_attrs[] = {
+ &dev_attr_can_channel_id.attr,
+ NULL,
+};
+
+static const struct attribute_group peak_usb_sysfs_group = {
+ .name = "peak_usb",
+ .attrs = peak_usb_sysfs_attrs,
+};
+
/*
* dump memory
*/
@@ -808,6 +830,86 @@ static const struct net_device_ops peak_usb_netdev_ops = {
.ndo_change_mtu = can_change_mtu,
};
+/* CAN-USB devices generally handle 32-bit CAN channel IDs.
+ * In case one doesn't, then it have to overload this function.
+ */
+int peak_usb_get_eeprom_len(struct net_device *netdev)
+{
+ return sizeof(u32);
+}
+
+/* Every CAN-USB device exports the dev_get_can_channel_id() operation. It is used
+ * here to fill the data buffer with the user defined CAN channel ID.
+ */
+int peak_usb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ u32 ch_id;
+ __le32 ch_id_le;
+ int err;
+
+ err = dev->adapter->dev_get_can_channel_id(dev, &ch_id);
+ if (err)
+ return err;
+
+ /* ethtool operates on individual bytes. The byte order of the CAN
+ * channel id in memory depends on the kernel architecture. We
+ * convert the CAN channel id back to the native byte order of the PEAK
+ * device itself to ensure that the order is consistent for all
+ * host architectures.
+ */
+ ch_id_le = cpu_to_le32(ch_id);
+ memcpy(data, (u8 *)&ch_id_le + eeprom->offset, eeprom->len);
+
+ /* update cached value */
+ dev->can_channel_id = ch_id;
+ return err;
+}
+
+/* Every CAN-USB device exports the dev_get_can_channel_id()/dev_set_can_channel_id()
+ * operations. They are used here to set the new user defined CAN channel ID.
+ */
+int peak_usb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ u32 ch_id;
+ __le32 ch_id_le;
+ int err;
+
+ /* first, read the current user defined CAN channel ID */
+ err = dev->adapter->dev_get_can_channel_id(dev, &ch_id);
+ if (err) {
+ netdev_err(netdev, "Failed to init CAN channel id (err %d)\n", err);
+ return err;
+ }
+
+ /* do update the value with user given bytes.
+ * ethtool operates on individual bytes. The byte order of the CAN
+ * channel ID in memory depends on the kernel architecture. We
+ * convert the CAN channel ID back to the native byte order of the PEAK
+ * device itself to ensure that the order is consistent for all
+ * host architectures.
+ */
+ ch_id_le = cpu_to_le32(ch_id);
+ memcpy((u8 *)&ch_id_le + eeprom->offset, data, eeprom->len);
+ ch_id = le32_to_cpu(ch_id_le);
+
+ /* flash the new value now */
+ err = dev->adapter->dev_set_can_channel_id(dev, ch_id);
+ if (err) {
+ netdev_err(netdev, "Failed to write new CAN channel id (err %d)\n",
+ err);
+ return err;
+ }
+
+ /* update cached value with the new one */
+ dev->can_channel_id = ch_id;
+
+ return 0;
+}
+
int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
{
info->so_timestamping =
@@ -881,6 +983,9 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
/* add ethtool support */
netdev->ethtool_ops = peak_usb_adapter->ethtool_ops;
+ /* register peak_usb sysfs files */
+ netdev->sysfs_groups[0] = &peak_usb_sysfs_group;
+
init_usb_anchor(&dev->rx_submitted);
init_usb_anchor(&dev->tx_submitted);
@@ -921,12 +1026,11 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
goto adap_dev_free;
}
- /* get device number early */
- if (dev->adapter->dev_get_device_id)
- dev->adapter->dev_get_device_id(dev, &dev->device_number);
+ /* get CAN channel id early */
+ dev->adapter->dev_get_can_channel_id(dev, &dev->can_channel_id);
- netdev_info(netdev, "attached to %s channel %u (device %u)\n",
- peak_usb_adapter->name, ctrl_idx, dev->device_number);
+ netdev_info(netdev, "attached to %s channel %u (device 0x%08X)\n",
+ peak_usb_adapter->name, ctrl_idx, dev->can_channel_id);
return 0;
@@ -964,7 +1068,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev->state &= ~PCAN_USB_STATE_CONNECTED;
strscpy(name, netdev->name, IFNAMSIZ);
- unregister_netdev(netdev);
+ unregister_candev(netdev);
kfree(dev->cmd_buf);
dev->next_siblings = NULL;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index f6bdd8b3f290..980e315186cf 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -60,7 +60,8 @@ struct peak_usb_adapter {
int (*dev_set_data_bittiming)(struct peak_usb_device *dev,
struct can_bittiming *bt);
int (*dev_set_bus)(struct peak_usb_device *dev, u8 onoff);
- int (*dev_get_device_id)(struct peak_usb_device *dev, u32 *device_id);
+ int (*dev_get_can_channel_id)(struct peak_usb_device *dev, u32 *can_ch_id);
+ int (*dev_set_can_channel_id)(struct peak_usb_device *dev, u32 can_ch_id);
int (*dev_decode_buf)(struct peak_usb_device *dev, struct urb *urb);
int (*dev_encode_msg)(struct peak_usb_device *dev, struct sk_buff *skb,
u8 *obuf, size_t *size);
@@ -122,7 +123,8 @@ struct peak_usb_device {
u8 *cmd_buf;
struct usb_anchor rx_submitted;
- u32 device_number;
+ /* equivalent to the device ID in the Windows API */
+ u32 can_channel_id;
u8 device_rev;
u8 ep_msg_in;
@@ -147,4 +149,10 @@ void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+/* common 32-bit CAN channel ID ethtool management */
+int peak_usb_get_eeprom_len(struct net_device *netdev);
+int peak_usb_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data);
+int peak_usb_set_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data);
#endif
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 2ea1500df393..4d85b29a17b7 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -4,10 +4,10 @@
*
* Copyright (C) 2013-2014 Stephane Grosjean <s.grosjean@peak-system.com>
*/
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -147,6 +147,15 @@ struct __packed pcan_ufd_ovr_msg {
u8 unused[3];
};
+#define PCAN_UFD_CMD_DEVID_SET 0x81
+
+struct __packed pcan_ufd_device_id {
+ __le16 opcode_channel;
+
+ u16 unused;
+ __le32 device_id;
+};
+
static inline int pufd_omsg_get_channel(struct pcan_ufd_ovr_msg *om)
{
return om->channel & 0xf;
@@ -234,6 +243,15 @@ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail)
return err;
}
+static int pcan_usb_fd_read_fwinfo(struct peak_usb_device *dev,
+ struct pcan_ufd_fw_info *fw_info)
+{
+ return pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
+ PCAN_USBPRO_INFO_FW,
+ fw_info,
+ sizeof(*fw_info));
+}
+
/* build the commands list in the given buffer, to enter operational mode */
static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf)
{
@@ -434,6 +452,34 @@ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev,
return pcan_usb_fd_send_cmd(dev, ++cmd);
}
+/* read user CAN channel id from device */
+static int pcan_usb_fd_get_can_channel_id(struct peak_usb_device *dev,
+ u32 *can_ch_id)
+{
+ int err;
+ struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev);
+
+ err = pcan_usb_fd_read_fwinfo(dev, &usb_if->fw_info);
+ if (err)
+ return err;
+
+ *can_ch_id = le32_to_cpu(usb_if->fw_info.dev_id[dev->ctrl_idx]);
+ return err;
+}
+
+/* set a new CAN channel id in the flash memory of the device */
+static int pcan_usb_fd_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id)
+{
+ struct pcan_ufd_device_id *cmd = pcan_usb_fd_cmd_buffer(dev);
+
+ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx,
+ PCAN_UFD_CMD_DEVID_SET);
+ cmd->device_id = cpu_to_le32(can_ch_id);
+
+ /* send the command */
+ return pcan_usb_fd_send_cmd(dev, ++cmd);
+}
+
/* handle restart but in asynchronously way
* (uses PCAN-USB Pro code to complete asynchronous request)
*/
@@ -907,10 +953,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
fw_info = &pdev->usb_if->fw_info;
- err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO,
- PCAN_USBPRO_INFO_FW,
- fw_info,
- sizeof(*fw_info));
+ err = pcan_usb_fd_read_fwinfo(dev, fw_info);
if (err) {
dev_err(dev->netdev->dev.parent,
"unable to read %s firmware info (err %d)\n",
@@ -972,7 +1015,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
}
pdev->usb_if->dev[dev->ctrl_idx] = dev;
- dev->device_number =
+ dev->can_channel_id =
le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]);
/* if vendor rsp is of type 2, then it contains EP numbers to
@@ -1081,6 +1124,9 @@ static int pcan_usb_fd_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_fd_ethtool_ops = {
.set_phys_id = pcan_usb_fd_set_phys_id,
.get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = peak_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/* describes the PCAN-USB FD adapter */
@@ -1148,6 +1194,8 @@ const struct peak_usb_adapter pcan_usb_fd = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1222,6 +1270,8 @@ const struct peak_usb_adapter pcan_usb_chip = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1296,6 +1346,8 @@ const struct peak_usb_adapter pcan_usb_pro_fd = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
@@ -1370,6 +1422,8 @@ const struct peak_usb_adapter pcan_usb_x6 = {
.dev_set_bus = pcan_usb_fd_set_bus,
.dev_set_bittiming = pcan_usb_fd_set_bittiming_slow,
.dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast,
+ .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id,
.dev_decode_buf = pcan_usb_fd_decode_buf,
.dev_start = pcan_usb_fd_start,
.dev_stop = pcan_usb_fd_stop,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 5d8f6a40bb2c..f736196383ac 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -6,10 +6,10 @@
* Copyright (C) 2003-2011 PEAK System-Technik GmbH
* Copyright (C) 2011-2012 Stephane Grosjean <s.grosjean@peak-system.com>
*/
+#include <linux/ethtool.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/usb.h>
-#include <linux/module.h>
-#include <linux/ethtool.h>
#include <linux/can.h>
#include <linux/can/dev.h>
@@ -76,6 +76,7 @@ static u16 pcan_usb_pro_sizeof_rec[256] = {
[PCAN_USBPRO_SETFILTR] = sizeof(struct pcan_usb_pro_filter),
[PCAN_USBPRO_SETTS] = sizeof(struct pcan_usb_pro_setts),
[PCAN_USBPRO_GETDEVID] = sizeof(struct pcan_usb_pro_devid),
+ [PCAN_USBPRO_SETDEVID] = sizeof(struct pcan_usb_pro_devid),
[PCAN_USBPRO_SETLED] = sizeof(struct pcan_usb_pro_setled),
[PCAN_USBPRO_RXMSG8] = sizeof(struct pcan_usb_pro_rxmsg),
[PCAN_USBPRO_RXMSG4] = sizeof(struct pcan_usb_pro_rxmsg) - 4,
@@ -149,6 +150,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...)
case PCAN_USBPRO_SETBTR:
case PCAN_USBPRO_GETDEVID:
+ case PCAN_USBPRO_SETDEVID:
*pc++ = va_arg(ap, int);
pc += 2;
*(__le32 *)pc = cpu_to_le32(va_arg(ap, u32));
@@ -419,8 +421,8 @@ static int pcan_usb_pro_set_led(struct peak_usb_device *dev, u8 mode,
return pcan_usb_pro_send_cmd(dev, &um);
}
-static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
- u32 *device_id)
+static int pcan_usb_pro_get_can_channel_id(struct peak_usb_device *dev,
+ u32 *can_ch_id)
{
struct pcan_usb_pro_devid *pdn;
struct pcan_usb_pro_msg um;
@@ -439,11 +441,23 @@ static int pcan_usb_pro_get_device_id(struct peak_usb_device *dev,
return err;
pdn = (struct pcan_usb_pro_devid *)pc;
- *device_id = le32_to_cpu(pdn->dev_num);
+ *can_ch_id = le32_to_cpu(pdn->dev_num);
return err;
}
+static int pcan_usb_pro_set_can_channel_id(struct peak_usb_device *dev,
+ u32 can_ch_id)
+{
+ struct pcan_usb_pro_msg um;
+
+ pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN);
+ pcan_msg_add_rec(&um, PCAN_USBPRO_SETDEVID, dev->ctrl_idx,
+ can_ch_id);
+
+ return pcan_usb_pro_send_cmd(dev, &um);
+}
+
static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev,
struct can_bittiming *bt)
{
@@ -1023,6 +1037,9 @@ static int pcan_usb_pro_set_phys_id(struct net_device *netdev,
static const struct ethtool_ops pcan_usb_pro_ethtool_ops = {
.set_phys_id = pcan_usb_pro_set_phys_id,
.get_ts_info = pcan_get_ts_info,
+ .get_eeprom_len = peak_usb_get_eeprom_len,
+ .get_eeprom = peak_usb_get_eeprom,
+ .set_eeprom = peak_usb_set_eeprom,
};
/*
@@ -1076,7 +1093,8 @@ const struct peak_usb_adapter pcan_usb_pro = {
.dev_free = pcan_usb_pro_free,
.dev_set_bus = pcan_usb_pro_set_bus,
.dev_set_bittiming = pcan_usb_pro_set_bittiming,
- .dev_get_device_id = pcan_usb_pro_get_device_id,
+ .dev_get_can_channel_id = pcan_usb_pro_get_can_channel_id,
+ .dev_set_can_channel_id = pcan_usb_pro_set_can_channel_id,
.dev_decode_buf = pcan_usb_pro_decode_buf,
.dev_encode_msg = pcan_usb_pro_encode_msg,
.dev_start = pcan_usb_pro_start,
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
index a34e0fc021c9..28e740af905d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.h
@@ -62,6 +62,7 @@ struct __packed pcan_usb_pro_fwinfo {
#define PCAN_USBPRO_SETBTR 0x02
#define PCAN_USBPRO_SETBUSACT 0x04
#define PCAN_USBPRO_SETSILENT 0x05
+#define PCAN_USBPRO_SETDEVID 0x06
#define PCAN_USBPRO_SETFILTR 0x0a
#define PCAN_USBPRO_SETTS 0x10
#define PCAN_USBPRO_GETDEVID 0x12
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index c26755f662c1..f6f3b43dfb06 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -35,12 +35,13 @@ config NET_DSA_LANTIQ_GSWIP
the xrx200 / VR9 SoC.
config NET_DSA_MT7530
- tristate "MediaTek MT753x and MT7621 Ethernet switch support"
+ tristate "MediaTek MT7530 and MT7531 Ethernet switch support"
select NET_DSA_TAG_MTK
select MEDIATEK_GE_PHY
help
- This enables support for the MediaTek MT7530, MT7531, and MT7621
- Ethernet switch chips.
+ This enables support for the MediaTek MT7530 and MT7531 Ethernet
+ switch chips. Multi-chip module MT7530 in MT7621AT, MT7621DAT,
+ MT7621ST and MT7623AI SoCs is supported.
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 2e270b479143..cbe831875347 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -15,6 +15,9 @@
#include "lan9303.h"
+/* For the LAN9303 and LAN9354, only port 0 is an XMII port. */
+#define IS_PORT_XMII(port) ((port) == 0)
+
#define LAN9303_NUM_PORTS 3
/* 13.2 System Control and Status Registers
@@ -50,6 +53,9 @@
#define LAN9303_MANUAL_FC_1 0x68
#define LAN9303_MANUAL_FC_2 0x69
#define LAN9303_MANUAL_FC_0 0x6a
+# define LAN9303_BP_EN BIT(6)
+# define LAN9303_RX_FC_EN BIT(2)
+# define LAN9303_TX_FC_EN BIT(1)
#define LAN9303_SWITCH_CSR_DATA 0x6b
#define LAN9303_SWITCH_CSR_CMD 0x6c
#define LAN9303_SWITCH_CSR_CMD_BUSY BIT(31)
@@ -225,6 +231,13 @@ const struct regmap_access_table lan9303_register_set = {
};
EXPORT_SYMBOL(lan9303_register_set);
+/* Flow Control registers indexed by port number */
+static unsigned int flow_ctl_reg[] = {
+ LAN9303_MANUAL_FC_0,
+ LAN9303_MANUAL_FC_1,
+ LAN9303_MANUAL_FC_2
+};
+
static int lan9303_read(struct regmap *regmap, unsigned int offset, u32 *reg)
{
int ret, i;
@@ -902,6 +915,7 @@ static int lan9303_setup(struct dsa_switch *ds)
{
struct lan9303 *chip = ds->priv;
int ret;
+ u32 reg;
/* Make sure that port 0 is the cpu port */
if (!dsa_is_cpu_port(ds, 0)) {
@@ -909,6 +923,17 @@ static int lan9303_setup(struct dsa_switch *ds)
return -EINVAL;
}
+ /* Virtual Phy: Remove Turbo 200Mbit mode */
+ ret = lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &reg);
+ if (ret)
+ return (ret);
+
+ /* Clear the TURBO Mode bit if it was set. */
+ if (reg & LAN9303_VIRT_SPECIAL_TURBO) {
+ reg &= ~LAN9303_VIRT_SPECIAL_TURBO;
+ regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, reg);
+ }
+
ret = lan9303_setup_tagging(chip);
if (ret)
dev_err(chip->dev, "failed to setup port tagging %d\n", ret);
@@ -1049,42 +1074,6 @@ static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
return chip->ops->phy_write(chip, phy, regnum, val);
}
-static void lan9303_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct lan9303 *chip = ds->priv;
- int ctl;
-
- if (!phy_is_pseudo_fixed_link(phydev))
- return;
-
- ctl = lan9303_phy_read(ds, port, MII_BMCR);
-
- ctl &= ~BMCR_ANENABLE;
-
- if (phydev->speed == SPEED_100)
- ctl |= BMCR_SPEED100;
- else if (phydev->speed == SPEED_10)
- ctl &= ~BMCR_SPEED100;
- else
- dev_err(ds->dev, "unsupported speed: %d\n", phydev->speed);
-
- if (phydev->duplex == DUPLEX_FULL)
- ctl |= BMCR_FULLDPLX;
- else
- ctl &= ~BMCR_FULLDPLX;
-
- lan9303_phy_write(ds, port, MII_BMCR, ctl);
-
- if (port == chip->phy_addr_base) {
- /* Virtual Phy: Remove Turbo 200Mbit mode */
- lan9303_read(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, &ctl);
-
- ctl &= ~LAN9303_VIRT_SPECIAL_TURBO;
- regmap_write(chip->regmap, LAN9303_VIRT_SPECIAL_CTRL, ctl);
- }
-}
-
static int lan9303_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
@@ -1281,26 +1270,96 @@ static int lan9303_port_mdb_del(struct dsa_switch *ds, int port,
return 0;
}
+static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port,
+ struct phylink_config *config)
+{
+ struct lan9303 *chip = ds->priv;
+
+ dev_dbg(chip->dev, "%s(%d) entered.", __func__, port);
+
+ config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE;
+
+ if (port == 0) {
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ config->supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ config->supported_interfaces);
+ } else {
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ config->supported_interfaces);
+ /* Compatibility for phylib's default interface type when the
+ * phy-mode property is absent
+ */
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ config->supported_interfaces);
+ }
+
+ /* This driver does not make use of the speed, duplex, pause or the
+ * advertisement in its mac_config, so it is safe to mark this driver
+ * as non-legacy.
+ */
+ config->legacy_pre_march2020 = false;
+}
+
+static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct lan9303 *chip = ds->priv;
+ u32 ctl;
+ u32 reg;
+
+ /* On this device, we are only interested in doing something here if
+ * this is the xMII port. All other ports are 10/100 phys using MDIO
+ * to control there link settings.
+ */
+ if (!IS_PORT_XMII(port))
+ return;
+
+ /* Disable auto-negotiation and force the speed/duplex settings. */
+ ctl = lan9303_phy_read(ds, port, MII_BMCR);
+ ctl &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (speed == SPEED_100)
+ ctl |= BMCR_SPEED100;
+ if (duplex == DUPLEX_FULL)
+ ctl |= BMCR_FULLDPLX;
+ lan9303_phy_write(ds, port, MII_BMCR, ctl);
+
+ /* Force the flow control settings. */
+ lan9303_read(chip->regmap, flow_ctl_reg[port], &reg);
+ reg &= ~(LAN9303_BP_EN | LAN9303_RX_FC_EN | LAN9303_TX_FC_EN);
+ if (rx_pause)
+ reg |= (LAN9303_RX_FC_EN | LAN9303_BP_EN);
+ if (tx_pause)
+ reg |= LAN9303_TX_FC_EN;
+ regmap_write(chip->regmap, flow_ctl_reg[port], reg);
+}
+
static const struct dsa_switch_ops lan9303_switch_ops = {
- .get_tag_protocol = lan9303_get_tag_protocol,
- .setup = lan9303_setup,
- .get_strings = lan9303_get_strings,
- .phy_read = lan9303_phy_read,
- .phy_write = lan9303_phy_write,
- .adjust_link = lan9303_adjust_link,
- .get_ethtool_stats = lan9303_get_ethtool_stats,
- .get_sset_count = lan9303_get_sset_count,
- .port_enable = lan9303_port_enable,
- .port_disable = lan9303_port_disable,
- .port_bridge_join = lan9303_port_bridge_join,
- .port_bridge_leave = lan9303_port_bridge_leave,
- .port_stp_state_set = lan9303_port_stp_state_set,
- .port_fast_age = lan9303_port_fast_age,
- .port_fdb_add = lan9303_port_fdb_add,
- .port_fdb_del = lan9303_port_fdb_del,
- .port_fdb_dump = lan9303_port_fdb_dump,
- .port_mdb_add = lan9303_port_mdb_add,
- .port_mdb_del = lan9303_port_mdb_del,
+ .get_tag_protocol = lan9303_get_tag_protocol,
+ .setup = lan9303_setup,
+ .get_strings = lan9303_get_strings,
+ .phy_read = lan9303_phy_read,
+ .phy_write = lan9303_phy_write,
+ .phylink_get_caps = lan9303_phylink_get_caps,
+ .phylink_mac_link_up = lan9303_phylink_mac_link_up,
+ .get_ethtool_stats = lan9303_get_ethtool_stats,
+ .get_sset_count = lan9303_get_sset_count,
+ .port_enable = lan9303_port_enable,
+ .port_disable = lan9303_port_disable,
+ .port_bridge_join = lan9303_port_bridge_join,
+ .port_bridge_leave = lan9303_port_bridge_leave,
+ .port_stp_state_set = lan9303_port_stp_state_set,
+ .port_fast_age = lan9303_port_fast_age,
+ .port_fdb_add = lan9303_port_fdb_add,
+ .port_fdb_del = lan9303_port_fdb_del,
+ .port_fdb_dump = lan9303_port_fdb_dump,
+ .port_mdb_add = lan9303_port_mdb_add,
+ .port_mdb_del = lan9303_port_mdb_del,
};
static int lan9303_register_switch(struct lan9303 *chip)
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 913f83ef013c..394ca8678d2b 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -22,6 +22,16 @@ config NET_DSA_MICROCHIP_KSZ_SPI
help
Select to enable support for registering switches configured through SPI.
+config NET_DSA_MICROCHIP_KSZ_PTP
+ bool "Support for the PTP clock on the KSZ9563/LAN937x Ethernet Switch"
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON && PTP_1588_CLOCK
+ depends on NET_DSA_MICROCHIP_KSZ_COMMON=m || PTP_1588_CLOCK=y
+ help
+ Select to enable support for timestamping & PTP clock manipulation in
+ KSZ8563/KSZ9563/LAN937x series of switches. KSZ9563/KSZ8563 supports
+ only one step timestamping. LAN937x switch supports both one step and
+ two step timestamping.
+
config NET_DSA_MICROCHIP_KSZ8863_SMI
tristate "KSZ series SMI connected switch driver"
depends on NET_DSA_MICROCHIP_KSZ_COMMON
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 28873559efc2..48360cc9fc68 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -4,6 +4,11 @@ ksz_switch-objs := ksz_common.o
ksz_switch-objs += ksz9477.o
ksz_switch-objs += ksz8795.o
ksz_switch-objs += lan937x_main.o
+
+ifdef CONFIG_NET_DSA_MICROCHIP_KSZ_PTP
+ksz_switch-objs += ksz_ptp.o
+endif
+
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_I2C) += ksz9477_i2c.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_SPI) += ksz_spi.o
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ8863_SMI) += ksz8863_smi.o
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 6178a96e389f..bf13d47c26cf 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -980,6 +980,22 @@ int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
}
+void ksz9477_port_queue_split(struct ksz_device *dev, int port)
+{
+ u8 data;
+
+ if (dev->info->num_tx_queues == 8)
+ data = PORT_EIGHT_QUEUE;
+ else if (dev->info->num_tx_queues == 4)
+ data = PORT_FOUR_QUEUE;
+ else if (dev->info->num_tx_queues == 2)
+ data = PORT_TWO_QUEUE;
+ else
+ data = PORT_SINGLE_QUEUE;
+
+ ksz_prmw8(dev, port, REG_PORT_CTRL_0, PORT_QUEUE_SPLIT_MASK, data);
+}
+
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
@@ -991,6 +1007,8 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE,
true);
+ ksz9477_port_queue_split(dev, port);
+
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false);
/* set back pressure */
@@ -1166,6 +1184,13 @@ u32 ksz9477_get_port_addr(int port, int offset)
return PORT_CTRL_ADDR(port, offset);
}
+int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
+{
+ val = val >> 8;
+
+ return ksz_pwrite16(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
+}
+
int ksz9477_switch_init(struct ksz_device *dev)
{
u8 data8;
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index 7c5bb3032772..b6f7e3c46e3f 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -51,10 +51,12 @@ int ksz9477_mdb_del(struct ksz_device *dev, int port,
const struct switchdev_obj_port_mdb *mdb, struct dsa_db db);
int ksz9477_change_mtu(struct ksz_device *dev, int port, int mtu);
void ksz9477_config_cpu_port(struct dsa_switch *ds);
+int ksz9477_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val);
int ksz9477_enable_stp_addr(struct ksz_device *dev);
int ksz9477_reset_switch(struct ksz_device *dev);
int ksz9477_dsa_init(struct ksz_device *dev);
int ksz9477_switch_init(struct ksz_device *dev);
void ksz9477_switch_exit(struct ksz_device *dev);
+void ksz9477_port_queue_split(struct ksz_device *dev, int port);
#endif
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index cc457fa64939..cba3dba58bc3 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -850,7 +850,11 @@
#define PORT_FORCE_TX_FLOW_CTRL BIT(4)
#define PORT_FORCE_RX_FLOW_CTRL BIT(3)
#define PORT_TAIL_TAG_ENABLE BIT(2)
-#define PORT_QUEUE_SPLIT_ENABLE 0x3
+#define PORT_QUEUE_SPLIT_MASK GENMASK(1, 0)
+#define PORT_EIGHT_QUEUE 0x3
+#define PORT_FOUR_QUEUE 0x2
+#define PORT_TWO_QUEUE 0x1
+#define PORT_SINGLE_QUEUE 0x0
#define REG_PORT_CTRL_1 0x0021
@@ -1480,33 +1484,10 @@
/* 9 - Shaping */
-#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
+#define REG_PORT_MTI_QUEUE_CTRL_0__4 0x0904
-#define REG_PORT_MTI_QUEUE_CTRL_0__4 0x0904
+#define MTI_PVID_REPLACE BIT(0)
-#define MTI_PVID_REPLACE BIT(0)
-
-#define REG_PORT_MTI_QUEUE_CTRL_0 0x0914
-
-#define MTI_SCHEDULE_MODE_M 0x3
-#define MTI_SCHEDULE_MODE_S 6
-#define MTI_SCHEDULE_STRICT_PRIO 0
-#define MTI_SCHEDULE_WRR 2
-#define MTI_SHAPING_M 0x3
-#define MTI_SHAPING_S 4
-#define MTI_SHAPING_OFF 0
-#define MTI_SHAPING_SRP 1
-#define MTI_SHAPING_TIME_AWARE 2
-
-#define REG_PORT_MTI_QUEUE_CTRL_1 0x0915
-
-#define MTI_TX_RATIO_M (BIT(7) - 1)
-
-#define REG_PORT_MTI_QUEUE_CTRL_2__2 0x0916
-#define REG_PORT_MTI_HI_WATER_MARK 0x0916
-#define REG_PORT_MTI_QUEUE_CTRL_3__2 0x0918
-#define REG_PORT_MTI_LO_WATER_MARK 0x0918
-#define REG_PORT_MTI_QUEUE_CTRL_4__2 0x091A
#define REG_PORT_MTI_CREDIT_INCREMENT 0x091A
/* A - QM */
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 9b20c2ee6d62..729b36eeb2c4 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -6,6 +6,7 @@
*/
#include <linux/delay.h>
+#include <linux/dsa/ksz_common.h>
#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
@@ -22,13 +23,19 @@
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
#include <net/dsa.h>
+#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include "ksz_common.h"
+#include "ksz_ptp.h"
#include "ksz8.h"
#include "ksz9477.h"
#include "lan937x.h"
+#define KSZ_CBS_ENABLE ((MTI_SCHEDULE_STRICT_PRIO << MTI_SCHEDULE_MODE_S) | \
+ (MTI_SHAPING_SRP << MTI_SHAPING_S))
+#define KSZ_CBS_DISABLE ((MTI_SCHEDULE_WRR << MTI_SCHEDULE_MODE_S) |\
+ (MTI_SHAPING_OFF << MTI_SHAPING_S))
#define MIB_COUNTER_NUM 0x20
struct ksz_stats_raw {
@@ -248,6 +255,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.change_mtu = ksz9477_change_mtu,
.phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = ksz9477_config_cpu_port,
+ .tc_cbs_set_cinc = ksz9477_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = ksz9477_reset_switch,
.init = ksz9477_switch_init,
@@ -284,6 +292,7 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.change_mtu = lan937x_change_mtu,
.phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = lan937x_config_cpu_port,
+ .tc_cbs_set_cinc = lan937x_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = lan937x_reset_switch,
.init = lan937x_switch_init,
@@ -1078,6 +1087,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1104,6 +1115,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1142,6 +1154,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1166,6 +1179,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
+ .num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
@@ -1190,6 +1204,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 8,
.cpu_ports = 0x4, /* can be configured as cpu port */
.port_cnt = 3,
+ .num_tx_queues = 4,
.ops = &ksz8_dev_ops,
.mib_names = ksz88xx_mib_names,
.mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
@@ -1211,6 +1226,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
.port_nirqs = 4,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -1243,6 +1260,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x3F, /* can be configured as cpu port */
.port_cnt = 6, /* total physical port count */
.port_nirqs = 2,
+ .num_tx_queues = 4,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -1275,6 +1293,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
.port_nirqs = 2,
+ .num_tx_queues = 4,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -1305,6 +1324,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
.port_nirqs = 2,
+ .num_tx_queues = 4,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1330,6 +1350,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1355,6 +1377,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
.port_nirqs = 3,
+ .num_tx_queues = 4,
+ .tc_cbs_supported = true,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -1385,6 +1409,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1409,6 +1435,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 6, /* total physical port count */
.port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1433,6 +1461,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1461,6 +1491,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x38, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1489,6 +1521,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
+ .num_tx_queues = 8,
+ .tc_cbs_supported = true,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1775,9 +1809,6 @@ static int ksz_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
u16 val;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = dev->dev_ops->r_phy(dev, addr, regnum, &val);
if (ret < 0)
return ret;
@@ -1790,9 +1821,6 @@ static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
{
struct ksz_device *dev = bus->priv;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
return dev->dev_ops->w_phy(dev, addr, regnum, val);
}
@@ -2069,6 +2097,8 @@ static int ksz_setup(struct dsa_switch *ds)
dev->dev_ops->enable_stp_addr(dev);
+ ds->num_tx_queues = dev->info->num_tx_queues;
+
regmap_update_bits(dev->regmap[0], regs[S_MULTICAST_CTRL],
MULTICAST_STORM_DISABLE, MULTICAST_STORM_DISABLE);
@@ -2099,13 +2129,23 @@ static int ksz_setup(struct dsa_switch *ds)
ret = ksz_pirq_setup(dev, dp->index);
if (ret)
goto out_girq;
+
+ ret = ksz_ptp_irq_setup(ds, dp->index);
+ if (ret)
+ goto out_pirq;
}
}
+ ret = ksz_ptp_clock_register(ds);
+ if (ret) {
+ dev_err(dev->dev, "Failed to register PTP clock: %d\n", ret);
+ goto out_ptpirq;
+ }
+
ret = ksz_mdio_register(dev);
if (ret < 0) {
dev_err(dev->dev, "failed to register the mdio");
- goto out_pirq;
+ goto out_ptp_clock_unregister;
}
/* start switch */
@@ -2114,6 +2154,12 @@ static int ksz_setup(struct dsa_switch *ds)
return 0;
+out_ptp_clock_unregister:
+ ksz_ptp_clock_unregister(ds);
+out_ptpirq:
+ if (dev->irq > 0)
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_ptp_irq_free(ds, dp->index);
out_pirq:
if (dev->irq > 0)
dsa_switch_for_each_user_port(dp, dev->ds)
@@ -2130,9 +2176,14 @@ static void ksz_teardown(struct dsa_switch *ds)
struct ksz_device *dev = ds->priv;
struct dsa_port *dp;
+ ksz_ptp_clock_unregister(ds);
+
if (dev->irq > 0) {
- dsa_switch_for_each_user_port(dp, dev->ds)
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ ksz_ptp_irq_free(ds, dp->index);
+
ksz_irq_free(&dev->ports[dp->index].pirq);
+ }
ksz_irq_free(&dev->girq);
}
@@ -2517,6 +2568,17 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
return proto;
}
+static int ksz_connect_tag_protocol(struct dsa_switch *ds,
+ enum dsa_tag_protocol proto)
+{
+ struct ksz_tagger_data *tagger_data;
+
+ tagger_data = ksz_tagger_data(ds);
+ tagger_data->xmit_work_fn = ksz_port_deferred_xmit;
+
+ return 0;
+}
+
static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port,
bool flag, struct netlink_ext_ack *extack)
{
@@ -2611,6 +2673,70 @@ static int ksz_max_mtu(struct dsa_switch *ds, int port)
return -EOPNOTSUPP;
}
+static int ksz_validate_eee(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->info->internal_phy[port])
+ return -EOPNOTSUPP;
+
+ switch (dev->chip_id) {
+ case KSZ8563_CHIP_ID:
+ case KSZ9477_CHIP_ID:
+ case KSZ9563_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9893_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
+ return 0;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int ksz_get_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ int ret;
+
+ ret = ksz_validate_eee(ds, port);
+ if (ret)
+ return ret;
+
+ /* There is no documented control of Tx LPI configuration. */
+ e->tx_lpi_enabled = true;
+
+ /* There is no documented control of Tx LPI timer. According to tests
+ * Tx LPI timer seems to be set by default to minimal value.
+ */
+ e->tx_lpi_timer = 0;
+
+ return 0;
+}
+
+static int ksz_set_mac_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+
+ ret = ksz_validate_eee(ds, port);
+ if (ret)
+ return ret;
+
+ if (!e->tx_lpi_enabled) {
+ dev_err(dev->dev, "Disabling EEE Tx LPI is not supported\n");
+ return -EINVAL;
+ }
+
+ if (e->tx_lpi_timer) {
+ dev_err(dev->dev, "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void ksz_set_xmii(struct ksz_device *dev, int port,
phy_interface_t interface)
{
@@ -2930,8 +3056,104 @@ static int ksz_switch_detect(struct ksz_device *dev)
return 0;
}
+/* Bandwidth is calculated by idle slope/transmission speed. Then the Bandwidth
+ * is converted to Hex-decimal using the successive multiplication method. On
+ * every step, integer part is taken and decimal part is carry forwarded.
+ */
+static int cinc_cal(s32 idle_slope, s32 send_slope, u32 *bw)
+{
+ u32 cinc = 0;
+ u32 txrate;
+ u32 rate;
+ u8 temp;
+ u8 i;
+
+ txrate = idle_slope - send_slope;
+
+ if (!txrate)
+ return -EINVAL;
+
+ rate = idle_slope;
+
+ /* 24 bit register */
+ for (i = 0; i < 6; i++) {
+ rate = rate * 16;
+
+ temp = rate / txrate;
+
+ rate %= txrate;
+
+ cinc = ((cinc << 4) | temp);
+ }
+
+ *bw = cinc;
+
+ return 0;
+}
+
+static int ksz_setup_tc_cbs(struct dsa_switch *ds, int port,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret;
+ u32 bw;
+
+ if (!dev->info->tc_cbs_supported)
+ return -EOPNOTSUPP;
+
+ if (qopt->queue > dev->info->num_tx_queues)
+ return -EINVAL;
+
+ /* Queue Selection */
+ ret = ksz_pwrite32(dev, port, REG_PORT_MTI_QUEUE_INDEX__4, qopt->queue);
+ if (ret)
+ return ret;
+
+ if (!qopt->enable)
+ return ksz_pwrite8(dev, port, REG_PORT_MTI_QUEUE_CTRL_0,
+ KSZ_CBS_DISABLE);
+
+ /* High Credit */
+ ret = ksz_pwrite16(dev, port, REG_PORT_MTI_HI_WATER_MARK,
+ qopt->hicredit);
+ if (ret)
+ return ret;
+
+ /* Low Credit */
+ ret = ksz_pwrite16(dev, port, REG_PORT_MTI_LO_WATER_MARK,
+ qopt->locredit);
+ if (ret)
+ return ret;
+
+ /* Credit Increment Register */
+ ret = cinc_cal(qopt->idleslope, qopt->sendslope, &bw);
+ if (ret)
+ return ret;
+
+ if (dev->dev_ops->tc_cbs_set_cinc) {
+ ret = dev->dev_ops->tc_cbs_set_cinc(dev, port, bw);
+ if (ret)
+ return ret;
+ }
+
+ return ksz_pwrite8(dev, port, REG_PORT_MTI_QUEUE_CTRL_0,
+ KSZ_CBS_ENABLE);
+}
+
+static int ksz_setup_tc(struct dsa_switch *ds, int port,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return ksz_setup_tc_cbs(ds, port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
+ .connect_tag_protocol = ksz_connect_tag_protocol,
.get_phy_flags = ksz_get_phy_flags,
.setup = ksz_setup,
.teardown = ksz_teardown,
@@ -2966,6 +3188,14 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.get_pause_stats = ksz_get_pause_stats,
.port_change_mtu = ksz_change_mtu,
.port_max_mtu = ksz_max_mtu,
+ .get_ts_info = ksz_get_ts_info,
+ .port_hwtstamp_get = ksz_hwtstamp_get,
+ .port_hwtstamp_set = ksz_hwtstamp_set,
+ .port_txtstamp = ksz_port_txtstamp,
+ .port_rxtstamp = ksz_port_rxtstamp,
+ .port_setup_tc = ksz_setup_tc,
+ .get_mac_eee = ksz_get_mac_eee,
+ .set_mac_eee = ksz_set_mac_eee,
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 055d61ff3fb8..d2d5761d58e9 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -15,9 +15,12 @@
#include <net/dsa.h>
#include <linux/irq.h>
+#include "ksz_ptp.h"
+
#define KSZ_MAX_NUM_PORTS 8
struct ksz_device;
+struct ksz_port;
struct vlan_table {
u32 table[3];
@@ -46,6 +49,8 @@ struct ksz_chip_data {
int cpu_ports;
int port_cnt;
u8 port_nirqs;
+ u8 num_tx_queues;
+ bool tc_cbs_supported;
const struct ksz_dev_ops *ops;
bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
@@ -81,6 +86,14 @@ struct ksz_irq {
struct ksz_device *dev;
};
+struct ksz_ptp_irq {
+ struct ksz_port *port;
+ u16 ts_reg;
+ bool ts_en;
+ char name[16];
+ int num;
+};
+
struct ksz_port {
bool remove_tag; /* Remove Tag flag set, for ksz8795 only */
bool learning;
@@ -100,6 +113,15 @@ struct ksz_port {
struct ksz_device *ksz_dev;
struct ksz_irq pirq;
u8 num;
+#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
+ struct hwtstamp_config tstamp_config;
+ bool hwts_tx_en;
+ bool hwts_rx_en;
+ struct ksz_irq ptpirq;
+ struct ksz_ptp_irq ptpmsg_irq[3];
+ ktime_t tstamp_msg;
+ struct completion tstamp_msg_comp;
+#endif
};
struct ksz_device {
@@ -140,6 +162,7 @@ struct ksz_device {
u16 port_mask;
struct mutex lock_irq; /* IRQ Access */
struct ksz_irq girq;
+ struct ksz_ptp_data ptp_data;
};
/* List of supported models */
@@ -332,6 +355,7 @@ struct ksz_dev_ops {
struct phy_device *phydev, int speed,
int duplex, bool tx_pause, bool rx_pause);
void (*setup_rgmii_delay)(struct ksz_device *dev, int port);
+ int (*tc_cbs_set_cinc)(struct ksz_device *dev, int port, u32 val);
void (*config_cpu_port)(struct dsa_switch *ds);
int (*enable_stp_addr)(struct ksz_device *dev);
int (*reset)(struct ksz_device *dev);
@@ -443,6 +467,32 @@ static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
return ret;
}
+static inline int ksz_rmw16(struct ksz_device *dev, u32 reg, u16 mask,
+ u16 value)
+{
+ int ret;
+
+ ret = regmap_update_bits(dev->regmap[1], reg, mask, value);
+ if (ret)
+ dev_err(dev->dev, "can't rmw 16bit reg 0x%x: %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
+static inline int ksz_rmw32(struct ksz_device *dev, u32 reg, u32 mask,
+ u32 value)
+{
+ int ret;
+
+ ret = regmap_update_bits(dev->regmap[2], reg, mask, value);
+ if (ret)
+ dev_err(dev->dev, "can't rmw 32bit reg 0x%x: %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
+}
+
static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
{
u32 val[2];
@@ -591,6 +641,7 @@ static inline int is_lan937x(struct ksz_device *dev)
#define REG_PORT_INT_MASK 0x001F
#define PORT_SRC_PHY_INT 1
+#define PORT_SRC_PTP_INT 2
#define KSZ8795_HUGE_PACKET_SIZE 2000
#define KSZ8863_HUGE_PACKET_SIZE 1916
@@ -598,6 +649,24 @@ static inline int is_lan937x(struct ksz_device *dev)
#define KSZ8_LEGAL_PACKET_SIZE 1518
#define KSZ9477_MAX_FRAME_SIZE 9000
+/* CBS related registers */
+#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
+
+#define REG_PORT_MTI_QUEUE_CTRL_0 0x0914
+
+#define MTI_SCHEDULE_MODE_M 0x3
+#define MTI_SCHEDULE_MODE_S 6
+#define MTI_SCHEDULE_STRICT_PRIO 0
+#define MTI_SCHEDULE_WRR 2
+#define MTI_SHAPING_M 0x3
+#define MTI_SHAPING_S 4
+#define MTI_SHAPING_OFF 0
+#define MTI_SHAPING_SRP 1
+#define MTI_SHAPING_TIME_AWARE 2
+
+#define REG_PORT_MTI_HI_WATER_MARK 0x0916
+#define REG_PORT_MTI_LO_WATER_MARK 0x0918
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
new file mode 100644
index 000000000000..4e22a695a64c
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -0,0 +1,1201 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Microchip KSZ PTP Implementation
+ *
+ * Copyright (C) 2020 ARRI Lighting
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#include <linux/dsa/ksz_common.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "ksz_common.h"
+#include "ksz_ptp.h"
+#include "ksz_ptp_reg.h"
+
+#define ptp_caps_to_data(d) container_of((d), struct ksz_ptp_data, caps)
+#define ptp_data_to_ksz_dev(d) container_of((d), struct ksz_device, ptp_data)
+#define work_to_xmit_work(w) \
+ container_of((w), struct ksz_deferred_xmit_work, work)
+
+/* Sub-nanoseconds-adj,max * sub-nanoseconds / 40ns * 1ns
+ * = (2^30-1) * (2 ^ 32) / 40 ns * 1 ns = 6249999
+ */
+#define KSZ_MAX_DRIFT_CORR 6249999
+#define KSZ_MAX_PULSE_WIDTH 125000000LL
+
+#define KSZ_PTP_INC_NS 40ULL /* HW clock is incremented every 40 ns (by 40) */
+#define KSZ_PTP_SUBNS_BITS 32
+
+#define KSZ_PTP_INT_START 13
+
+static int ksz_ptp_tou_gpio(struct ksz_device *dev)
+{
+ int ret;
+
+ if (!is_lan937x(dev))
+ return 0;
+
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, GPIO_OUT,
+ GPIO_OUT);
+ if (ret)
+ return ret;
+
+ ret = ksz_rmw32(dev, REG_SW_GLOBAL_LED_OVR__4, LED_OVR_1 | LED_OVR_2,
+ LED_OVR_1 | LED_OVR_2);
+ if (ret)
+ return ret;
+
+ return ksz_rmw32(dev, REG_SW_GLOBAL_LED_SRC__4,
+ LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2,
+ LED_SRC_PTP_GPIO_1 | LED_SRC_PTP_GPIO_2);
+}
+
+static int ksz_ptp_tou_reset(struct ksz_device *dev, u8 unit)
+{
+ u32 data;
+ int ret;
+
+ /* Reset trigger unit (clears TRIGGER_EN, but not GPIOSTATx) */
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_RESET, TRIG_RESET);
+
+ data = FIELD_PREP(TRIG_DONE_M, BIT(unit));
+ ret = ksz_write32(dev, REG_PTP_TRIG_STATUS__4, data);
+ if (ret)
+ return ret;
+
+ data = FIELD_PREP(TRIG_INT_M, BIT(unit));
+ ret = ksz_write32(dev, REG_PTP_INT_STATUS__4, data);
+ if (ret)
+ return ret;
+
+ /* Clear reset and set GPIO direction */
+ return ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, (TRIG_RESET | TRIG_ENABLE),
+ 0);
+}
+
+static int ksz_ptp_tou_pulse_verify(u64 pulse_ns)
+{
+ u32 data;
+
+ if (pulse_ns & 0x3)
+ return -EINVAL;
+
+ data = (pulse_ns / 8);
+ if (!FIELD_FIT(TRIG_PULSE_WIDTH_M, data))
+ return -ERANGE;
+
+ return 0;
+}
+
+static int ksz_ptp_tou_target_time_set(struct ksz_device *dev,
+ struct timespec64 const *ts)
+{
+ int ret;
+
+ /* Hardware has only 32 bit */
+ if ((ts->tv_sec & 0xffffffff) != ts->tv_sec)
+ return -EINVAL;
+
+ ret = ksz_write32(dev, REG_TRIG_TARGET_NANOSEC, ts->tv_nsec);
+ if (ret)
+ return ret;
+
+ ret = ksz_write32(dev, REG_TRIG_TARGET_SEC, ts->tv_sec);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ksz_ptp_tou_start(struct ksz_device *dev, u8 unit)
+{
+ u32 data;
+ int ret;
+
+ ret = ksz_rmw32(dev, REG_PTP_CTRL_STAT__4, TRIG_ENABLE, TRIG_ENABLE);
+ if (ret)
+ return ret;
+
+ /* Check error flag:
+ * - the ACTIVE flag is NOT cleared an error!
+ */
+ ret = ksz_read32(dev, REG_PTP_TRIG_STATUS__4, &data);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(TRIG_ERROR_M, data) & (1 << unit)) {
+ dev_err(dev->dev, "%s: Trigger unit%d error!\n", __func__,
+ unit);
+ ret = -EIO;
+ /* Unit will be reset on next access */
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ksz_ptp_configure_perout(struct ksz_device *dev,
+ u32 cycle_width_ns, u32 pulse_width_ns,
+ struct timespec64 const *target_time,
+ u8 index)
+{
+ u32 data;
+ int ret;
+
+ data = FIELD_PREP(TRIG_NOTIFY, 1) |
+ FIELD_PREP(TRIG_GPO_M, index) |
+ FIELD_PREP(TRIG_PATTERN_M, TRIG_POS_PERIOD);
+ ret = ksz_write32(dev, REG_TRIG_CTRL__4, data);
+ if (ret)
+ return ret;
+
+ ret = ksz_write32(dev, REG_TRIG_CYCLE_WIDTH, cycle_width_ns);
+ if (ret)
+ return ret;
+
+ /* Set cycle count 0 - Infinite */
+ ret = ksz_rmw32(dev, REG_TRIG_CYCLE_CNT, TRIG_CYCLE_CNT_M, 0);
+ if (ret)
+ return ret;
+
+ data = (pulse_width_ns / 8);
+ ret = ksz_write32(dev, REG_TRIG_PULSE_WIDTH__4, data);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_target_time_set(dev, target_time);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ksz_ptp_enable_perout(struct ksz_device *dev,
+ struct ptp_perout_request const *request,
+ int on)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ u64 req_pulse_width_ns;
+ u64 cycle_width_ns;
+ u64 pulse_width_ns;
+ int pin = 0;
+ u32 data32;
+ int ret;
+
+ if (request->flags & ~PTP_PEROUT_DUTY_CYCLE)
+ return -EOPNOTSUPP;
+
+ if (ptp_data->tou_mode != KSZ_PTP_TOU_PEROUT &&
+ ptp_data->tou_mode != KSZ_PTP_TOU_IDLE)
+ return -EBUSY;
+
+ pin = ptp_find_pin(ptp_data->clock, PTP_PF_PEROUT, request->index);
+ if (pin < 0)
+ return -EINVAL;
+
+ data32 = FIELD_PREP(PTP_GPIO_INDEX, pin) |
+ FIELD_PREP(PTP_TOU_INDEX, request->index);
+ ret = ksz_rmw32(dev, REG_PTP_UNIT_INDEX__4,
+ PTP_GPIO_INDEX | PTP_TOU_INDEX, data32);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_reset(dev, request->index);
+ if (ret)
+ return ret;
+
+ if (!on) {
+ ptp_data->tou_mode = KSZ_PTP_TOU_IDLE;
+ return 0;
+ }
+
+ ptp_data->perout_target_time_first.tv_sec = request->start.sec;
+ ptp_data->perout_target_time_first.tv_nsec = request->start.nsec;
+
+ ptp_data->perout_period.tv_sec = request->period.sec;
+ ptp_data->perout_period.tv_nsec = request->period.nsec;
+
+ cycle_width_ns = timespec64_to_ns(&ptp_data->perout_period);
+ if ((cycle_width_ns & TRIG_CYCLE_WIDTH_M) != cycle_width_ns)
+ return -EINVAL;
+
+ if (request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ pulse_width_ns = request->on.sec * NSEC_PER_SEC +
+ request->on.nsec;
+ } else {
+ /* Use a duty cycle of 50%. Maximum pulse width supported by the
+ * hardware is a little bit more than 125 ms.
+ */
+ req_pulse_width_ns = (request->period.sec * NSEC_PER_SEC +
+ request->period.nsec) / 2;
+ pulse_width_ns = min_t(u64, req_pulse_width_ns,
+ KSZ_MAX_PULSE_WIDTH);
+ }
+
+ ret = ksz_ptp_tou_pulse_verify(pulse_width_ns);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_configure_perout(dev, cycle_width_ns, pulse_width_ns,
+ &ptp_data->perout_target_time_first,
+ pin);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_gpio(dev);
+ if (ret)
+ return ret;
+
+ ret = ksz_ptp_tou_start(dev, request->index);
+ if (ret)
+ return ret;
+
+ ptp_data->tou_mode = KSZ_PTP_TOU_PEROUT;
+
+ return 0;
+}
+
+static int ksz_ptp_enable_mode(struct ksz_device *dev)
+{
+ struct ksz_tagger_data *tagger_data = ksz_tagger_data(dev->ds);
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ struct ksz_port *prt;
+ struct dsa_port *dp;
+ bool tag_en = false;
+ int ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ prt = &dev->ports[dp->index];
+ if (prt->hwts_tx_en || prt->hwts_rx_en) {
+ tag_en = true;
+ break;
+ }
+ }
+
+ if (tag_en) {
+ ret = ptp_schedule_worker(ptp_data->clock, 0);
+ if (ret)
+ return ret;
+ } else {
+ ptp_cancel_worker_sync(ptp_data->clock);
+ }
+
+ tagger_data->hwtstamp_set_state(dev->ds, tag_en);
+
+ return ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_ENABLE,
+ tag_en ? PTP_ENABLE : 0);
+}
+
+/* The function is return back the capability of timestamping feature when
+ * requested through ethtool -T <interface> utility
+ */
+int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+
+ ptp_data = &dev->ptp_data;
+
+ if (!ptp_data->clock)
+ return -ENODEV;
+
+ ts->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ ts->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ONESTEP_P2P);
+
+ if (is_lan937x(dev))
+ ts->tx_types |= BIT(HWTSTAMP_TX_ON);
+
+ ts->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ ts->phc_index = ptp_clock_index(ptp_data->clock);
+
+ return 0;
+}
+
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct ksz_device *dev = ds->priv;
+ struct hwtstamp_config *config;
+ struct ksz_port *prt;
+
+ prt = &dev->ports[port];
+ config = &prt->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+static int ksz_set_hwtstamp_config(struct ksz_device *dev,
+ struct ksz_port *prt,
+ struct hwtstamp_config *config)
+{
+ int ret;
+
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
+ prt->hwts_tx_en = false;
+ break;
+ case HWTSTAMP_TX_ONESTEP_P2P:
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = false;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = false;
+ prt->hwts_tx_en = true;
+
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, PTP_1STEP);
+ if (ret)
+ return ret;
+
+ break;
+ case HWTSTAMP_TX_ON:
+ if (!is_lan937x(dev))
+ return -ERANGE;
+
+ prt->ptpmsg_irq[KSZ_SYNC_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_XDREQ_MSG].ts_en = true;
+ prt->ptpmsg_irq[KSZ_PDRES_MSG].ts_en = true;
+ prt->hwts_tx_en = true;
+
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_1STEP, 0);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ prt->hwts_rx_en = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ prt->hwts_rx_en = true;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ return ksz_ptp_enable_mode(dev);
+}
+
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr)
+{
+ struct ksz_device *dev = ds->priv;
+ struct hwtstamp_config config;
+ struct ksz_port *prt;
+ int ret;
+
+ prt = &dev->ports[port];
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ret = ksz_set_hwtstamp_config(dev, prt, &config);
+ if (ret)
+ return ret;
+
+ memcpy(&prt->tstamp_config, &config, sizeof(config));
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static ktime_t ksz_tstamp_reconstruct(struct ksz_device *dev, ktime_t tstamp)
+{
+ struct timespec64 ptp_clock_time;
+ struct ksz_ptp_data *ptp_data;
+ struct timespec64 diff;
+ struct timespec64 ts;
+
+ ptp_data = &dev->ptp_data;
+ ts = ktime_to_timespec64(tstamp);
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_clock_time = ptp_data->clock_time;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+ /* calculate full time from partial time stamp */
+ ts.tv_sec = (ptp_clock_time.tv_sec & ~3) | ts.tv_sec;
+
+ /* find nearest possible point in time */
+ diff = timespec64_sub(ts, ptp_clock_time);
+ if (diff.tv_sec > 2)
+ ts.tv_sec -= 4;
+ else if (diff.tv_sec < -2)
+ ts.tv_sec += 4;
+
+ return timespec64_to_ktime(ts);
+}
+
+bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
+ unsigned int type)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct ksz_device *dev = ds->priv;
+ struct ptp_header *ptp_hdr;
+ struct ksz_port *prt;
+ u8 ptp_msg_type;
+ ktime_t tstamp;
+ s64 correction;
+
+ prt = &dev->ports[port];
+
+ tstamp = KSZ_SKB_CB(skb)->tstamp;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ksz_tstamp_reconstruct(dev, tstamp);
+
+ if (prt->tstamp_config.tx_type != HWTSTAMP_TX_ONESTEP_P2P)
+ goto out;
+
+ ptp_hdr = ptp_parse_header(skb, type);
+ if (!ptp_hdr)
+ goto out;
+
+ ptp_msg_type = ptp_get_msgtype(ptp_hdr, type);
+ if (ptp_msg_type != PTP_MSGTYPE_PDELAY_REQ)
+ goto out;
+
+ /* Only subtract the partial time stamp from the correction field. When
+ * the hardware adds the egress time stamp to the correction field of
+ * the PDelay_Resp message on tx, also only the partial time stamp will
+ * be added.
+ */
+ correction = (s64)get_unaligned_be64(&ptp_hdr->correction);
+ correction -= ktime_to_ns(tstamp) << 16;
+
+ ptp_header_update_correction(skb, type, ptp_hdr, correction);
+
+out:
+ return false;
+}
+
+void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ptp_header *hdr;
+ struct sk_buff *clone;
+ struct ksz_port *prt;
+ unsigned int type;
+ u8 ptp_msg_type;
+
+ prt = &dev->ports[port];
+
+ if (!prt->hwts_tx_en)
+ return;
+
+ type = ptp_classify_raw(skb);
+ if (type == PTP_CLASS_NONE)
+ return;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return;
+
+ ptp_msg_type = ptp_get_msgtype(hdr, type);
+
+ switch (ptp_msg_type) {
+ case PTP_MSGTYPE_SYNC:
+ if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P)
+ return;
+ break;
+ case PTP_MSGTYPE_PDELAY_REQ:
+ break;
+ case PTP_MSGTYPE_PDELAY_RESP:
+ if (prt->tstamp_config.tx_type == HWTSTAMP_TX_ONESTEP_P2P) {
+ KSZ_SKB_CB(skb)->ptp_type = type;
+ KSZ_SKB_CB(skb)->update_correction = true;
+ return;
+ }
+ break;
+
+ default:
+ return;
+ }
+
+ clone = skb_clone_sk(skb);
+ if (!clone)
+ return;
+
+ /* caching the value to be used in tag_ksz.c */
+ KSZ_SKB_CB(skb)->clone = clone;
+}
+
+static void ksz_ptp_txtstamp_skb(struct ksz_device *dev,
+ struct ksz_port *prt, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps hwtstamps = {};
+ int ret;
+
+ /* timeout must include DSA master to transmit data, tstamp latency,
+ * IRQ latency and time for reading the time stamp.
+ */
+ ret = wait_for_completion_timeout(&prt->tstamp_msg_comp,
+ msecs_to_jiffies(100));
+ if (!ret)
+ return;
+
+ hwtstamps.hwtstamp = prt->tstamp_msg;
+ skb_complete_tx_timestamp(skb, &hwtstamps);
+}
+
+void ksz_port_deferred_xmit(struct kthread_work *work)
+{
+ struct ksz_deferred_xmit_work *xmit_work = work_to_xmit_work(work);
+ struct sk_buff *clone, *skb = xmit_work->skb;
+ struct dsa_switch *ds = xmit_work->dp->ds;
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *prt;
+
+ prt = &dev->ports[xmit_work->dp->index];
+
+ clone = KSZ_SKB_CB(skb)->clone;
+
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ reinit_completion(&prt->tstamp_msg_comp);
+
+ dsa_enqueue_skb(skb, skb->dev);
+
+ ksz_ptp_txtstamp_skb(dev, prt, clone);
+
+ kfree(xmit_work);
+}
+
+static int _ksz_ptp_gettime(struct ksz_device *dev, struct timespec64 *ts)
+{
+ u32 nanoseconds;
+ u32 seconds;
+ u8 phase;
+ int ret;
+
+ /* Copy current PTP clock into shadow registers and read */
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_READ_TIME, PTP_READ_TIME);
+ if (ret)
+ return ret;
+
+ ret = ksz_read8(dev, REG_PTP_RTC_SUB_NANOSEC__2, &phase);
+ if (ret)
+ return ret;
+
+ ret = ksz_read32(dev, REG_PTP_RTC_NANOSEC, &nanoseconds);
+ if (ret)
+ return ret;
+
+ ret = ksz_read32(dev, REG_PTP_RTC_SEC, &seconds);
+ if (ret)
+ return ret;
+
+ ts->tv_sec = seconds;
+ ts->tv_nsec = nanoseconds + phase * 8;
+
+ return 0;
+}
+
+static int ksz_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+ ret = _ksz_ptp_gettime(dev, ts);
+ mutex_unlock(&ptp_data->lock);
+
+ return ret;
+}
+
+static int ksz_ptp_restart_perout(struct ksz_device *dev)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ s64 now_ns, first_ns, period_ns, next_ns;
+ struct ptp_perout_request request;
+ struct timespec64 next;
+ struct timespec64 now;
+ unsigned int count;
+ int ret;
+
+ dev_info(dev->dev, "Restarting periodic output signal\n");
+
+ ret = _ksz_ptp_gettime(dev, &now);
+ if (ret)
+ return ret;
+
+ now_ns = timespec64_to_ns(&now);
+ first_ns = timespec64_to_ns(&ptp_data->perout_target_time_first);
+
+ /* Calculate next perout event based on start time and period */
+ period_ns = timespec64_to_ns(&ptp_data->perout_period);
+
+ if (first_ns < now_ns) {
+ count = div_u64(now_ns - first_ns, period_ns);
+ next_ns = first_ns + count * period_ns;
+ } else {
+ next_ns = first_ns;
+ }
+
+ /* Ensure 100 ms guard time prior next event */
+ while (next_ns < now_ns + 100000000)
+ next_ns += period_ns;
+
+ /* Restart periodic output signal */
+ next = ns_to_timespec64(next_ns);
+ request.start.sec = next.tv_sec;
+ request.start.nsec = next.tv_nsec;
+ request.period.sec = ptp_data->perout_period.tv_sec;
+ request.period.nsec = ptp_data->perout_period.tv_nsec;
+ request.index = 0;
+ request.flags = 0;
+
+ return ksz_ptp_enable_perout(dev, &request, 1);
+}
+
+static int ksz_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ /* Write to shadow registers and Load PTP clock */
+ ret = ksz_write16(dev, REG_PTP_RTC_SUB_NANOSEC__2, PTP_RTC_0NS);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, ts->tv_nsec);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_SEC, ts->tv_sec);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_LOAD_TIME, PTP_LOAD_TIME);
+ if (ret)
+ goto unlock;
+
+ switch (ptp_data->tou_mode) {
+ case KSZ_PTP_TOU_IDLE:
+ break;
+
+ case KSZ_PTP_TOU_PEROUT:
+ ret = ksz_ptp_restart_perout(dev);
+ if (ret)
+ goto unlock;
+
+ break;
+ }
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = *ts;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+
+ return ret;
+}
+
+static int ksz_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ u64 base, adj;
+ bool negative;
+ u32 data32;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ if (scaled_ppm) {
+ base = KSZ_PTP_INC_NS << KSZ_PTP_SUBNS_BITS;
+ negative = diff_by_scaled_ppm(base, scaled_ppm, &adj);
+
+ data32 = (u32)adj;
+ data32 &= PTP_SUBNANOSEC_M;
+ if (!negative)
+ data32 |= PTP_RATE_DIR;
+
+ ret = ksz_write32(dev, REG_PTP_SUBNANOSEC_RATE, data32);
+ if (ret)
+ goto unlock;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE,
+ PTP_CLK_ADJ_ENABLE);
+ if (ret)
+ goto unlock;
+ } else {
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ADJ_ENABLE, 0);
+ if (ret)
+ goto unlock;
+ }
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+ return ret;
+}
+
+static int ksz_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ struct timespec64 delta64 = ns_to_timespec64(delta);
+ s32 sec, nsec;
+ u16 data16;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+
+ /* do not use ns_to_timespec64(),
+ * both sec and nsec are subtracted by hw
+ */
+ sec = div_s64_rem(delta, NSEC_PER_SEC, &nsec);
+
+ ret = ksz_write32(dev, REG_PTP_RTC_NANOSEC, abs(nsec));
+ if (ret)
+ goto unlock;
+
+ ret = ksz_write32(dev, REG_PTP_RTC_SEC, abs(sec));
+ if (ret)
+ goto unlock;
+
+ ret = ksz_read16(dev, REG_PTP_CLK_CTRL, &data16);
+ if (ret)
+ goto unlock;
+
+ data16 |= PTP_STEP_ADJ;
+
+ /* PTP_STEP_DIR -- 0: subtract, 1: add */
+ if (delta < 0)
+ data16 &= ~PTP_STEP_DIR;
+ else
+ data16 |= PTP_STEP_DIR;
+
+ ret = ksz_write16(dev, REG_PTP_CLK_CTRL, data16);
+ if (ret)
+ goto unlock;
+
+ switch (ptp_data->tou_mode) {
+ case KSZ_PTP_TOU_IDLE:
+ break;
+
+ case KSZ_PTP_TOU_PEROUT:
+ ret = ksz_ptp_restart_perout(dev);
+ if (ret)
+ goto unlock;
+
+ break;
+ }
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = timespec64_add(ptp_data->clock_time, delta64);
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+unlock:
+ mutex_unlock(&ptp_data->lock);
+ return ret;
+}
+
+static int ksz_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *req, int on)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ int ret;
+
+ switch (req->type) {
+ case PTP_CLK_REQ_PEROUT:
+ mutex_lock(&ptp_data->lock);
+ ret = ksz_ptp_enable_perout(dev, &req->perout, on);
+ mutex_unlock(&ptp_data->lock);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ksz_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ int ret = 0;
+
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ break;
+ default:
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
+/* Function is pointer to the do_aux_work in the ptp_clock capability */
+static long ksz_ptp_do_aux_work(struct ptp_clock_info *ptp)
+{
+ struct ksz_ptp_data *ptp_data = ptp_caps_to_data(ptp);
+ struct ksz_device *dev = ptp_data_to_ksz_dev(ptp_data);
+ struct timespec64 ts;
+ int ret;
+
+ mutex_lock(&ptp_data->lock);
+ ret = _ksz_ptp_gettime(dev, &ts);
+ if (ret)
+ goto out;
+
+ spin_lock_bh(&ptp_data->clock_lock);
+ ptp_data->clock_time = ts;
+ spin_unlock_bh(&ptp_data->clock_lock);
+
+out:
+ mutex_unlock(&ptp_data->lock);
+
+ return HZ; /* reschedule in 1 second */
+}
+
+static int ksz_ptp_start_clock(struct ksz_device *dev)
+{
+ struct ksz_ptp_data *ptp_data = &dev->ptp_data;
+ int ret;
+
+ ret = ksz_rmw16(dev, REG_PTP_CLK_CTRL, PTP_CLK_ENABLE, PTP_CLK_ENABLE);
+ if (ret)
+ return ret;
+
+ ptp_data->clock_time.tv_sec = 0;
+ ptp_data->clock_time.tv_nsec = 0;
+
+ return 0;
+}
+
+int ksz_ptp_clock_register(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+ int ret;
+ u8 i;
+
+ ptp_data = &dev->ptp_data;
+ mutex_init(&ptp_data->lock);
+ spin_lock_init(&ptp_data->clock_lock);
+
+ ptp_data->caps.owner = THIS_MODULE;
+ snprintf(ptp_data->caps.name, 16, "Microchip Clock");
+ ptp_data->caps.max_adj = KSZ_MAX_DRIFT_CORR;
+ ptp_data->caps.gettime64 = ksz_ptp_gettime;
+ ptp_data->caps.settime64 = ksz_ptp_settime;
+ ptp_data->caps.adjfine = ksz_ptp_adjfine;
+ ptp_data->caps.adjtime = ksz_ptp_adjtime;
+ ptp_data->caps.do_aux_work = ksz_ptp_do_aux_work;
+ ptp_data->caps.enable = ksz_ptp_enable;
+ ptp_data->caps.verify = ksz_ptp_verify_pin;
+ ptp_data->caps.n_pins = KSZ_PTP_N_GPIO;
+ ptp_data->caps.n_per_out = 3;
+
+ ret = ksz_ptp_start_clock(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < KSZ_PTP_N_GPIO; i++) {
+ struct ptp_pin_desc *ptp_pin = &ptp_data->pin_config[i];
+
+ snprintf(ptp_pin->name,
+ sizeof(ptp_pin->name), "ksz_ptp_pin_%02d", i);
+ ptp_pin->index = i;
+ ptp_pin->func = PTP_PF_NONE;
+ }
+
+ ptp_data->caps.pin_config = ptp_data->pin_config;
+
+ /* Currently only P2P mode is supported. When 802_1AS bit is set, it
+ * forwards all PTP packets to host port and none to other ports.
+ */
+ ret = ksz_rmw16(dev, REG_PTP_MSG_CONF1, PTP_TC_P2P | PTP_802_1AS,
+ PTP_TC_P2P | PTP_802_1AS);
+ if (ret)
+ return ret;
+
+ ptp_data->clock = ptp_clock_register(&ptp_data->caps, dev->dev);
+ if (IS_ERR_OR_NULL(ptp_data->clock))
+ return PTR_ERR(ptp_data->clock);
+
+ return 0;
+}
+
+void ksz_ptp_clock_unregister(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_ptp_data *ptp_data;
+
+ ptp_data = &dev->ptp_data;
+
+ if (ptp_data->clock)
+ ptp_clock_unregister(ptp_data->clock);
+}
+
+static irqreturn_t ksz_ptp_msg_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_ptp_irq *ptpmsg_irq = dev_id;
+ struct ksz_device *dev;
+ struct ksz_port *port;
+ u32 tstamp_raw;
+ ktime_t tstamp;
+ int ret;
+
+ port = ptpmsg_irq->port;
+ dev = port->ksz_dev;
+
+ if (ptpmsg_irq->ts_en) {
+ ret = ksz_read32(dev, ptpmsg_irq->ts_reg, &tstamp_raw);
+ if (ret)
+ return IRQ_NONE;
+
+ tstamp = ksz_decode_tstamp(tstamp_raw);
+
+ port->tstamp_msg = ksz_tstamp_reconstruct(dev, tstamp);
+
+ complete(&port->tstamp_msg_comp);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ksz_ptp_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_irq *ptpirq = dev_id;
+ unsigned int nhandled = 0;
+ struct ksz_device *dev;
+ unsigned int sub_irq;
+ u16 data;
+ int ret;
+ u8 n;
+
+ dev = ptpirq->dev;
+
+ ret = ksz_read16(dev, ptpirq->reg_status, &data);
+ if (ret)
+ goto out;
+
+ /* Clear the interrupts W1C */
+ ret = ksz_write16(dev, ptpirq->reg_status, data);
+ if (ret)
+ return IRQ_NONE;
+
+ for (n = 0; n < ptpirq->nirqs; ++n) {
+ if (data & BIT(n + KSZ_PTP_INT_START)) {
+ sub_irq = irq_find_mapping(ptpirq->domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void ksz_ptp_irq_mask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked &= ~BIT(d->hwirq + KSZ_PTP_INT_START);
+}
+
+static void ksz_ptp_irq_unmask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked |= BIT(d->hwirq + KSZ_PTP_INT_START);
+}
+
+static void ksz_ptp_irq_bus_lock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&kirq->dev->lock_irq);
+}
+
+static void ksz_ptp_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+ ret = ksz_write16(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&dev->lock_irq);
+}
+
+static const struct irq_chip ksz_ptp_irq_chip = {
+ .name = "ksz-irq",
+ .irq_mask = ksz_ptp_irq_mask,
+ .irq_unmask = ksz_ptp_irq_unmask,
+ .irq_bus_lock = ksz_ptp_irq_bus_lock,
+ .irq_bus_sync_unlock = ksz_ptp_irq_bus_sync_unlock,
+};
+
+static int ksz_ptp_irq_domain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &ksz_ptp_irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ksz_ptp_irq_domain_ops = {
+ .map = ksz_ptp_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void ksz_ptp_msg_irq_free(struct ksz_port *port, u8 n)
+{
+ struct ksz_ptp_irq *ptpmsg_irq;
+
+ ptpmsg_irq = &port->ptpmsg_irq[n];
+
+ free_irq(ptpmsg_irq->num, ptpmsg_irq);
+ irq_dispose_mapping(ptpmsg_irq->num);
+}
+
+static int ksz_ptp_msg_irq_setup(struct ksz_port *port, u8 n)
+{
+ u16 ts_reg[] = {REG_PTP_PORT_PDRESP_TS, REG_PTP_PORT_XDELAY_TS,
+ REG_PTP_PORT_SYNC_TS};
+ static const char * const name[] = {"pdresp-msg", "xdreq-msg",
+ "sync-msg"};
+ const struct ksz_dev_ops *ops = port->ksz_dev->dev_ops;
+ struct ksz_ptp_irq *ptpmsg_irq;
+
+ ptpmsg_irq = &port->ptpmsg_irq[n];
+
+ ptpmsg_irq->port = port;
+ ptpmsg_irq->ts_reg = ops->get_port_addr(port->num, ts_reg[n]);
+
+ snprintf(ptpmsg_irq->name, sizeof(ptpmsg_irq->name), name[n]);
+
+ ptpmsg_irq->num = irq_find_mapping(port->ptpirq.domain, n);
+ if (ptpmsg_irq->num < 0)
+ return ptpmsg_irq->num;
+
+ return request_threaded_irq(ptpmsg_irq->num, NULL,
+ ksz_ptp_msg_thread_fn, IRQF_ONESHOT,
+ ptpmsg_irq->name, ptpmsg_irq);
+}
+
+int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
+{
+ struct ksz_device *dev = ds->priv;
+ const struct ksz_dev_ops *ops = dev->dev_ops;
+ struct ksz_port *port = &dev->ports[p];
+ struct ksz_irq *ptpirq = &port->ptpirq;
+ int irq;
+ int ret;
+
+ ptpirq->dev = dev;
+ ptpirq->masked = 0;
+ ptpirq->nirqs = 3;
+ ptpirq->reg_mask = ops->get_port_addr(p, REG_PTP_PORT_TX_INT_ENABLE__2);
+ ptpirq->reg_status = ops->get_port_addr(p,
+ REG_PTP_PORT_TX_INT_STATUS__2);
+ snprintf(ptpirq->name, sizeof(ptpirq->name), "ptp-irq-%d", p);
+
+ init_completion(&port->tstamp_msg_comp);
+
+ ptpirq->domain = irq_domain_add_linear(dev->dev->of_node, ptpirq->nirqs,
+ &ksz_ptp_irq_domain_ops, ptpirq);
+ if (!ptpirq->domain)
+ return -ENOMEM;
+
+ for (irq = 0; irq < ptpirq->nirqs; irq++)
+ irq_create_mapping(ptpirq->domain, irq);
+
+ ptpirq->irq_num = irq_find_mapping(port->pirq.domain, PORT_SRC_PTP_INT);
+ if (ptpirq->irq_num < 0) {
+ ret = ptpirq->irq_num;
+ goto out;
+ }
+
+ ret = request_threaded_irq(ptpirq->irq_num, NULL, ksz_ptp_irq_thread_fn,
+ IRQF_ONESHOT, ptpirq->name, ptpirq);
+ if (ret)
+ goto out;
+
+ for (irq = 0; irq < ptpirq->nirqs; irq++) {
+ ret = ksz_ptp_msg_irq_setup(port, irq);
+ if (ret)
+ goto out_ptp_msg;
+ }
+
+ return 0;
+
+out_ptp_msg:
+ free_irq(ptpirq->irq_num, ptpirq);
+ while (irq--)
+ free_irq(port->ptpmsg_irq[irq].num, &port->ptpmsg_irq[irq]);
+out:
+ for (irq = 0; irq < ptpirq->nirqs; irq++)
+ irq_dispose_mapping(port->ptpmsg_irq[irq].num);
+
+ irq_domain_remove(ptpirq->domain);
+
+ return ret;
+}
+
+void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p)
+{
+ struct ksz_device *dev = ds->priv;
+ struct ksz_port *port = &dev->ports[p];
+ struct ksz_irq *ptpirq = &port->ptpirq;
+ u8 n;
+
+ for (n = 0; n < ptpirq->nirqs; n++)
+ ksz_ptp_msg_irq_free(port, n);
+
+ free_irq(ptpirq->irq_num, ptpirq);
+ irq_dispose_mapping(ptpirq->irq_num);
+
+ irq_domain_remove(ptpirq->domain);
+}
+
+MODULE_AUTHOR("Christian Eggers <ceggers@arri.de>");
+MODULE_AUTHOR("Arun Ramadoss <arun.ramadoss@microchip.com>");
+MODULE_DESCRIPTION("PTP support for KSZ switch");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h
new file mode 100644
index 000000000000..0ca8ca4f804e
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip KSZ PTP Implementation
+ *
+ * Copyright (C) 2020 ARRI Lighting
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef _NET_DSA_DRIVERS_KSZ_PTP_H
+#define _NET_DSA_DRIVERS_KSZ_PTP_H
+
+#if IS_ENABLED(CONFIG_NET_DSA_MICROCHIP_KSZ_PTP)
+
+#include <linux/ptp_clock_kernel.h>
+
+#define KSZ_PTP_N_GPIO 2
+
+enum ksz_ptp_tou_mode {
+ KSZ_PTP_TOU_IDLE,
+ KSZ_PTP_TOU_PEROUT,
+};
+
+struct ksz_ptp_data {
+ struct ptp_clock_info caps;
+ struct ptp_clock *clock;
+ struct ptp_pin_desc pin_config[KSZ_PTP_N_GPIO];
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+ /* lock for accessing the clock_time */
+ spinlock_t clock_lock;
+ struct timespec64 clock_time;
+ enum ksz_ptp_tou_mode tou_mode;
+ struct timespec64 perout_target_time_first; /* start of first pulse */
+ struct timespec64 perout_period;
+};
+
+int ksz_ptp_clock_register(struct dsa_switch *ds);
+
+void ksz_ptp_clock_unregister(struct dsa_switch *ds);
+
+int ksz_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *ts);
+int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
+int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
+void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
+void ksz_port_deferred_xmit(struct kthread_work *work);
+bool ksz_port_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb,
+ unsigned int type);
+int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p);
+void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p);
+
+#else
+
+struct ksz_ptp_data {
+ /* Serializes all operations on the PTP hardware clock */
+ struct mutex lock;
+};
+
+static inline int ksz_ptp_clock_register(struct dsa_switch *ds)
+{
+ return 0;
+}
+
+static inline void ksz_ptp_clock_unregister(struct dsa_switch *ds) { }
+
+static inline int ksz_ptp_irq_setup(struct dsa_switch *ds, u8 p)
+{
+ return 0;
+}
+
+static inline void ksz_ptp_irq_free(struct dsa_switch *ds, u8 p) {}
+
+#define ksz_get_ts_info NULL
+
+#define ksz_hwtstamp_get NULL
+
+#define ksz_hwtstamp_set NULL
+
+#define ksz_port_rxtstamp NULL
+
+#define ksz_port_txtstamp NULL
+
+#define ksz_port_deferred_xmit NULL
+
+#endif /* End of CONFIG_NET_DSA_MICROCHIP_KSZ_PTP */
+
+#endif
diff --git a/drivers/net/dsa/microchip/ksz_ptp_reg.h b/drivers/net/dsa/microchip/ksz_ptp_reg.h
new file mode 100644
index 000000000000..d71e85510cda
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_ptp_reg.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip KSZ PTP register definitions
+ * Copyright (C) 2022 Microchip Technology Inc.
+ */
+
+#ifndef __KSZ_PTP_REGS_H
+#define __KSZ_PTP_REGS_H
+
+#define REG_SW_GLOBAL_LED_OVR__4 0x0120
+#define LED_OVR_2 BIT(1)
+#define LED_OVR_1 BIT(0)
+
+#define REG_SW_GLOBAL_LED_SRC__4 0x0128
+#define LED_SRC_PTP_GPIO_1 BIT(3)
+#define LED_SRC_PTP_GPIO_2 BIT(2)
+
+/* 5 - PTP Clock */
+#define REG_PTP_CLK_CTRL 0x0500
+
+#define PTP_STEP_ADJ BIT(6)
+#define PTP_STEP_DIR BIT(5)
+#define PTP_READ_TIME BIT(4)
+#define PTP_LOAD_TIME BIT(3)
+#define PTP_CLK_ADJ_ENABLE BIT(2)
+#define PTP_CLK_ENABLE BIT(1)
+#define PTP_CLK_RESET BIT(0)
+
+#define REG_PTP_RTC_SUB_NANOSEC__2 0x0502
+
+#define PTP_RTC_SUB_NANOSEC_M 0x0007
+#define PTP_RTC_0NS 0x00
+
+#define REG_PTP_RTC_NANOSEC 0x0504
+
+#define REG_PTP_RTC_SEC 0x0508
+
+#define REG_PTP_SUBNANOSEC_RATE 0x050C
+
+#define PTP_SUBNANOSEC_M 0x3FFFFFFF
+#define PTP_RATE_DIR BIT(31)
+#define PTP_TMP_RATE_ENABLE BIT(30)
+
+#define REG_PTP_SUBNANOSEC_RATE_L 0x050E
+
+#define REG_PTP_RATE_DURATION 0x0510
+#define REG_PTP_RATE_DURATION_H 0x0510
+#define REG_PTP_RATE_DURATION_L 0x0512
+
+#define REG_PTP_MSG_CONF1 0x0514
+
+#define PTP_802_1AS BIT(7)
+#define PTP_ENABLE BIT(6)
+#define PTP_ETH_ENABLE BIT(5)
+#define PTP_IPV4_UDP_ENABLE BIT(4)
+#define PTP_IPV6_UDP_ENABLE BIT(3)
+#define PTP_TC_P2P BIT(2)
+#define PTP_MASTER BIT(1)
+#define PTP_1STEP BIT(0)
+
+#define REG_PTP_UNIT_INDEX__4 0x0520
+
+#define PTP_GPIO_INDEX GENMASK(19, 16)
+#define PTP_TSI_INDEX BIT(8)
+#define PTP_TOU_INDEX GENMASK(1, 0)
+
+#define REG_PTP_TRIG_STATUS__4 0x0524
+
+#define TRIG_ERROR_M GENMASK(18, 16)
+#define TRIG_DONE_M GENMASK(2, 0)
+
+#define REG_PTP_INT_STATUS__4 0x0528
+
+#define TRIG_INT_M GENMASK(18, 16)
+#define TS_INT_M GENMASK(1, 0)
+
+#define REG_PTP_CTRL_STAT__4 0x052C
+
+#define GPIO_IN BIT(7)
+#define GPIO_OUT BIT(6)
+#define TS_INT_ENABLE BIT(5)
+#define TRIG_ACTIVE BIT(4)
+#define TRIG_ENABLE BIT(3)
+#define TRIG_RESET BIT(2)
+#define TS_ENABLE BIT(1)
+#define TS_RESET BIT(0)
+
+#define REG_TRIG_TARGET_NANOSEC 0x0530
+#define REG_TRIG_TARGET_SEC 0x0534
+
+#define REG_TRIG_CTRL__4 0x0538
+
+#define TRIG_CASCADE_ENABLE BIT(31)
+#define TRIG_CASCADE_TAIL BIT(30)
+#define TRIG_CASCADE_UPS_M GENMASK(29, 26)
+#define TRIG_NOW BIT(25)
+#define TRIG_NOTIFY BIT(24)
+#define TRIG_EDGE BIT(23)
+#define TRIG_PATTERN_M GENMASK(22, 20)
+#define TRIG_NEG_EDGE 0
+#define TRIG_POS_EDGE 1
+#define TRIG_NEG_PULSE 2
+#define TRIG_POS_PULSE 3
+#define TRIG_NEG_PERIOD 4
+#define TRIG_POS_PERIOD 5
+#define TRIG_REG_OUTPUT 6
+#define TRIG_GPO_M GENMASK(19, 16)
+#define TRIG_CASCADE_ITERATE_CNT_M GENMASK(15, 0)
+
+#define REG_TRIG_CYCLE_WIDTH 0x053C
+#define TRIG_CYCLE_WIDTH_M GENMASK(31, 0)
+
+#define REG_TRIG_CYCLE_CNT 0x0540
+
+#define TRIG_CYCLE_CNT_M GENMASK(31, 16)
+#define TRIG_BIT_PATTERN_M GENMASK(15, 0)
+
+#define REG_TRIG_ITERATE_TIME 0x0544
+
+#define REG_TRIG_PULSE_WIDTH__4 0x0548
+
+#define TRIG_PULSE_WIDTH_M GENMASK(23, 0)
+
+/* Port PTP Register */
+#define REG_PTP_PORT_RX_DELAY__2 0x0C00
+#define REG_PTP_PORT_TX_DELAY__2 0x0C02
+#define REG_PTP_PORT_ASYM_DELAY__2 0x0C04
+
+#define REG_PTP_PORT_XDELAY_TS 0x0C08
+#define REG_PTP_PORT_SYNC_TS 0x0C0C
+#define REG_PTP_PORT_PDRESP_TS 0x0C10
+
+#define REG_PTP_PORT_TX_INT_STATUS__2 0x0C14
+#define REG_PTP_PORT_TX_INT_ENABLE__2 0x0C16
+
+#define PTP_PORT_SYNC_INT BIT(15)
+#define PTP_PORT_XDELAY_REQ_INT BIT(14)
+#define PTP_PORT_PDELAY_RESP_INT BIT(13)
+#define KSZ_SYNC_MSG 2
+#define KSZ_XDREQ_MSG 1
+#define KSZ_PDRES_MSG 0
+
+#endif
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
index 8e9e66d6728d..3388d91dbc44 100644
--- a/drivers/net/dsa/microchip/lan937x.h
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -20,4 +20,5 @@ void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config);
void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port);
int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+int lan937x_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val);
#endif
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index 06d3d0308cba..399a3905e6ca 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -15,6 +15,7 @@
#include "lan937x_reg.h"
#include "ksz_common.h"
+#include "ksz9477.h"
#include "lan937x.h"
static int lan937x_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set)
@@ -180,6 +181,9 @@ void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port)
lan937x_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_TAIL_TAG_ENABLE, true);
+ /* Enable the Port Queue split */
+ ksz9477_port_queue_split(dev, port);
+
/* set back pressure for half duplex */
lan937x_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE,
true);
@@ -336,6 +340,11 @@ void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port)
}
}
+int lan937x_tc_cbs_set_cinc(struct ksz_device *dev, int port, u32 val)
+{
+ return ksz_pwrite32(dev, port, REG_PORT_MTI_CREDIT_INCREMENT, val);
+}
+
int lan937x_switch_init(struct ksz_device *dev)
{
dev->port_mask = (1 << dev->info->port_cnt) - 1;
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
index 5bc16a4c4441..45b606b6429f 100644
--- a/drivers/net/dsa/microchip/lan937x_reg.h
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -185,6 +185,9 @@
#define P_PRIO_CTRL REG_PORT_MRI_PRIO_CTRL
+/* 9 - Shaping */
+#define REG_PORT_MTI_CREDIT_INCREMENT 0x091C
+
/* The port number as per the datasheet */
#define RGMII_2_PORT_NUM 5
#define RGMII_1_PORT_NUM 6
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 908fa89444c9..3a15015bc409 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -608,17 +608,29 @@ mt7530_mib_reset(struct dsa_switch *ds)
mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
}
-static int mt7530_phy_read(struct mt7530_priv *priv, int port, int regnum)
+static int mt7530_phy_read_c22(struct mt7530_priv *priv, int port, int regnum)
{
return mdiobus_read_nested(priv->bus, port, regnum);
}
-static int mt7530_phy_write(struct mt7530_priv *priv, int port, int regnum,
- u16 val)
+static int mt7530_phy_write_c22(struct mt7530_priv *priv, int port, int regnum,
+ u16 val)
{
return mdiobus_write_nested(priv->bus, port, regnum, val);
}
+static int mt7530_phy_read_c45(struct mt7530_priv *priv, int port,
+ int devad, int regnum)
+{
+ return mdiobus_c45_read_nested(priv->bus, port, devad, regnum);
+}
+
+static int mt7530_phy_write_c45(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u16 val)
+{
+ return mdiobus_c45_write_nested(priv->bus, port, devad, regnum, val);
+}
+
static int
mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
int regnum)
@@ -670,7 +682,7 @@ out:
static int
mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
- int regnum, u32 data)
+ int regnum, u16 data)
{
struct mii_bus *bus = priv->bus;
struct mt7530_dummy_poll p;
@@ -793,55 +805,36 @@ out:
}
static int
-mt7531_ind_phy_read(struct mt7530_priv *priv, int port, int regnum)
+mt753x_phy_read_c22(struct mii_bus *bus, int port, int regnum)
{
- int devad;
- int ret;
-
- if (regnum & MII_ADDR_C45) {
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- ret = mt7531_ind_c45_phy_read(priv, port, devad,
- regnum & MII_REGADDR_C45_MASK);
- } else {
- ret = mt7531_ind_c22_phy_read(priv, port, regnum);
- }
+ struct mt7530_priv *priv = bus->priv;
- return ret;
+ return priv->info->phy_read_c22(priv, port, regnum);
}
static int
-mt7531_ind_phy_write(struct mt7530_priv *priv, int port, int regnum,
- u16 data)
+mt753x_phy_read_c45(struct mii_bus *bus, int port, int devad, int regnum)
{
- int devad;
- int ret;
-
- if (regnum & MII_ADDR_C45) {
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- ret = mt7531_ind_c45_phy_write(priv, port, devad,
- regnum & MII_REGADDR_C45_MASK,
- data);
- } else {
- ret = mt7531_ind_c22_phy_write(priv, port, regnum, data);
- }
+ struct mt7530_priv *priv = bus->priv;
- return ret;
+ return priv->info->phy_read_c45(priv, port, devad, regnum);
}
static int
-mt753x_phy_read(struct mii_bus *bus, int port, int regnum)
+mt753x_phy_write_c22(struct mii_bus *bus, int port, int regnum, u16 val)
{
struct mt7530_priv *priv = bus->priv;
- return priv->info->phy_read(priv, port, regnum);
+ return priv->info->phy_write_c22(priv, port, regnum, val);
}
static int
-mt753x_phy_write(struct mii_bus *bus, int port, int regnum, u16 val)
+mt753x_phy_write_c45(struct mii_bus *bus, int port, int devad, int regnum,
+ u16 val)
{
struct mt7530_priv *priv = bus->priv;
- return priv->info->phy_write(priv, port, regnum, val);
+ return priv->info->phy_write_c45(priv, port, devad, regnum, val);
}
static void
@@ -1309,14 +1302,26 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
if (!priv->ports[port].pvid)
mt7530_rmw(priv, MT7530_PVC_P(port), ACC_FRM_MASK,
MT7530_VLAN_ACC_TAGGED);
- }
- /* Set the port as a user port which is to be able to recognize VID
- * from incoming packets before fetching entry within the VLAN table.
- */
- mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
- VLAN_ATTR(MT7530_VLAN_USER) |
- PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ /* Set the port as a user port which is to be able to recognize
+ * VID from incoming packets before fetching entry within the
+ * VLAN table.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port),
+ VLAN_ATTR_MASK | PVC_EG_TAG_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER) |
+ PVC_EG_TAG(MT7530_VLAN_EG_DISABLED));
+ } else {
+ /* Also set CPU ports to the "user" VLAN port attribute, to
+ * allow VLAN classification, but keep the EG_TAG attribute as
+ * "consistent" (i.o.w. don't change its value) for packets
+ * received by the switch from the CPU, so that tagged packets
+ * are forwarded to user ports as tagged, and untagged as
+ * untagged.
+ */
+ mt7530_rmw(priv, MT7530_PVC_P(port), VLAN_ATTR_MASK,
+ VLAN_ATTR(MT7530_VLAN_USER));
+ }
}
static void
@@ -2086,8 +2091,10 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
bus->priv = priv;
bus->name = KBUILD_MODNAME "-mii";
snprintf(bus->id, MII_BUS_ID_SIZE, KBUILD_MODNAME "-%d", idx++);
- bus->read = mt753x_phy_read;
- bus->write = mt753x_phy_write;
+ bus->read = mt753x_phy_read_c22;
+ bus->write = mt753x_phy_write_c22;
+ bus->read_c45 = mt753x_phy_read_c45;
+ bus->write_c45 = mt753x_phy_write_c45;
bus->parent = dev;
bus->phy_mask = ~ds->phys_mii_mask;
@@ -3182,8 +3189,10 @@ static const struct mt753x_info mt753x_table[] = {
.id = ID_MT7621,
.pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
- .phy_read = mt7530_phy_read,
- .phy_write = mt7530_phy_write,
+ .phy_read_c22 = mt7530_phy_read_c22,
+ .phy_write_c22 = mt7530_phy_write_c22,
+ .phy_read_c45 = mt7530_phy_read_c45,
+ .phy_write_c45 = mt7530_phy_write_c45,
.pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
@@ -3192,8 +3201,10 @@ static const struct mt753x_info mt753x_table[] = {
.id = ID_MT7530,
.pcs_ops = &mt7530_pcs_ops,
.sw_setup = mt7530_setup,
- .phy_read = mt7530_phy_read,
- .phy_write = mt7530_phy_write,
+ .phy_read_c22 = mt7530_phy_read_c22,
+ .phy_write_c22 = mt7530_phy_write_c22,
+ .phy_read_c45 = mt7530_phy_read_c45,
+ .phy_write_c45 = mt7530_phy_write_c45,
.pad_setup = mt7530_pad_clk_setup,
.mac_port_get_caps = mt7530_mac_port_get_caps,
.mac_port_config = mt7530_mac_config,
@@ -3202,8 +3213,10 @@ static const struct mt753x_info mt753x_table[] = {
.id = ID_MT7531,
.pcs_ops = &mt7531_pcs_ops,
.sw_setup = mt7531_setup,
- .phy_read = mt7531_ind_phy_read,
- .phy_write = mt7531_ind_phy_write,
+ .phy_read_c22 = mt7531_ind_c22_phy_read,
+ .phy_write_c22 = mt7531_ind_c22_phy_write,
+ .phy_read_c45 = mt7531_ind_c45_phy_read,
+ .phy_write_c45 = mt7531_ind_c45_phy_write,
.pad_setup = mt7531_pad_setup,
.cpu_port_config = mt7531_cpu_port_config,
.mac_port_get_caps = mt7531_mac_port_get_caps,
@@ -3263,7 +3276,7 @@ mt7530_probe(struct mdio_device *mdiodev)
* properly.
*/
if (!priv->info->sw_setup || !priv->info->pad_setup ||
- !priv->info->phy_read || !priv->info->phy_write ||
+ !priv->info->phy_read_c22 || !priv->info->phy_write_c22 ||
!priv->info->mac_port_get_caps ||
!priv->info->mac_port_config)
return -EINVAL;
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index e8d966435350..6b2fc6290ea8 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -750,8 +750,10 @@ struct mt753x_pcs {
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
* @sw_setup: Holding the handler to a device initialization
- * @phy_read: Holding the way reading PHY port
- * @phy_write: Holding the way writing PHY port
+ * @phy_read_c22: Holding the way reading PHY port using C22
+ * @phy_write_c22: Holding the way writing PHY port using C22
+ * @phy_read_c45: Holding the way reading PHY port using C45
+ * @phy_write_c45: Holding the way writing PHY port using C45
* @pad_setup: Holding the way setting up the bus pad for a certain
* MAC port
* @phy_mode_supported: Check if the PHY type is being supported on a certain
@@ -767,8 +769,13 @@ struct mt753x_info {
const struct phylink_pcs_ops *pcs_ops;
int (*sw_setup)(struct dsa_switch *ds);
- int (*phy_read)(struct mt7530_priv *priv, int port, int regnum);
- int (*phy_write)(struct mt7530_priv *priv, int port, int regnum, u16 val);
+ int (*phy_read_c22)(struct mt7530_priv *priv, int port, int regnum);
+ int (*phy_write_c22)(struct mt7530_priv *priv, int port, int regnum,
+ u16 val);
+ int (*phy_read_c45)(struct mt7530_priv *priv, int port, int devad,
+ int regnum);
+ int (*phy_write_c45)(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u16 val);
int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
int (*cpu_port_config)(struct dsa_switch *ds, int port);
void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 49bf358b9c4f..1409e691ab77 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -15,6 +15,7 @@ mv88e6xxx-objs += port_hidden.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
mv88e6xxx-objs += serdes.o
mv88e6xxx-objs += smi.o
+mv88e6xxx-objs += switchdev.o
mv88e6xxx-objs += trace.o
# for tracing framework to find trace.h
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 242b8b325504..0a5d6c7bb128 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1728,11 +1728,11 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
return err;
}
-static int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
- int (*cb)(struct mv88e6xxx_chip *chip,
- const struct mv88e6xxx_vtu_entry *entry,
- void *priv),
- void *priv)
+int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
+ int (*cb)(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv),
+ void *priv)
{
struct mv88e6xxx_vtu_entry entry = {
.vid = mv88e6xxx_max_vid(chip),
@@ -3884,6 +3884,24 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
return err ? err : val;
}
+static int mv88e6xxx_mdio_read_c45(struct mii_bus *bus, int phy, int devad,
+ int reg)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ u16 val;
+ int err;
+
+ if (!chip->info->ops->phy_read_c45)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_read_c45(chip, bus, phy, devad, reg, &val);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err ? err : val;
+}
+
static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
@@ -3900,6 +3918,23 @@ static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
return err;
}
+static int mv88e6xxx_mdio_write_c45(struct mii_bus *bus, int phy, int devad,
+ int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
+ int err;
+
+ if (!chip->info->ops->phy_write_c45)
+ return -EOPNOTSUPP;
+
+ mv88e6xxx_reg_lock(chip);
+ err = chip->info->ops->phy_write_c45(chip, bus, phy, devad, reg, val);
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
struct device_node *np,
bool external)
@@ -3938,6 +3973,8 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
bus->read = mv88e6xxx_mdio_read;
bus->write = mv88e6xxx_mdio_write;
+ bus->read_c45 = mv88e6xxx_mdio_read_c45;
+ bus->write_c45 = mv88e6xxx_mdio_write_c45;
bus->parent = chip->dev;
if (!external) {
@@ -4149,8 +4186,10 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6185_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
@@ -4198,8 +4237,10 @@ static const struct mv88e6xxx_ops mv88e6123_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
@@ -4279,8 +4320,10 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4343,8 +4386,10 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_speed_duplex = mv88e6185_port_set_speed_duplex,
@@ -4426,8 +4471,10 @@ static const struct mv88e6xxx_ops mv88e6171_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4472,8 +4519,10 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4527,8 +4576,10 @@ static const struct mv88e6xxx_ops mv88e6175_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4573,8 +4624,10 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4673,8 +4726,10 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4736,8 +4791,10 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4799,8 +4856,10 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -4862,8 +4921,10 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4925,8 +4986,10 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -4964,8 +5027,10 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -5017,7 +5082,7 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
+ .ptp_ops = &mv88e6390_ptp_ops,
.phylink_get_caps = mv88e6390_phylink_get_caps,
};
@@ -5029,8 +5094,10 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
@@ -5074,8 +5141,10 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6320_port_set_rgmii_delay,
@@ -5117,8 +5186,10 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -5183,8 +5254,10 @@ static const struct mv88e6xxx_ops mv88e6350_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -5227,8 +5300,10 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.ip_pri_map = mv88e6085_g1_ip_pri_map,
.irl_init_all = mv88e6352_g2_irl_init_all,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -5275,8 +5350,10 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom16,
.set_eeprom = mv88e6xxx_g2_set_eeprom16,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6352_port_set_rgmii_delay,
@@ -5340,8 +5417,10 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -5391,7 +5470,7 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
+ .ptp_ops = &mv88e6390_ptp_ops,
.serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
.serdes_get_strings = mv88e6390_serdes_get_strings,
.serdes_get_stats = mv88e6390_serdes_get_stats,
@@ -5407,8 +5486,10 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -5462,7 +5543,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.serdes_get_regs = mv88e6390_serdes_get_regs,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
- .ptp_ops = &mv88e6352_ptp_ops,
+ .ptp_ops = &mv88e6390_ptp_ops,
.phylink_get_caps = mv88e6390x_phylink_get_caps,
};
@@ -5473,8 +5554,10 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.get_eeprom = mv88e6xxx_g2_get_eeprom8,
.set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_g2_smi_phy_read,
- .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .phy_read = mv88e6xxx_g2_smi_phy_read_c22,
+ .phy_write = mv88e6xxx_g2_smi_phy_write_c22,
+ .phy_read_c45 = mv88e6xxx_g2_smi_phy_read_c45,
+ .phy_write_c45 = mv88e6xxx_g2_smi_phy_write_c45,
.port_set_link = mv88e6xxx_port_set_link,
.port_sync_link = mv88e6xxx_port_sync_link,
.port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
@@ -6526,7 +6609,7 @@ static int mv88e6xxx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
const struct mv88e6xxx_ops *ops;
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
- BR_BCAST_FLOOD | BR_PORT_LOCKED))
+ BR_BCAST_FLOOD | BR_PORT_LOCKED | BR_PORT_MAB))
return -EINVAL;
ops = chip->info->ops;
@@ -6545,7 +6628,7 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int err = -EOPNOTSUPP;
+ int err = 0;
mv88e6xxx_reg_lock(chip);
@@ -6584,6 +6667,12 @@ static int mv88e6xxx_port_bridge_flags(struct dsa_switch *ds, int port,
goto out;
}
+ if (flags.mask & BR_PORT_MAB) {
+ bool mab = !!(flags.val & BR_PORT_MAB);
+
+ mv88e6xxx_port_set_mab(chip, port, mab);
+ }
+
if (flags.mask & BR_PORT_LOCKED) {
bool locked = !!(flags.val & BR_PORT_LOCKED);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index e693154cf803..da6e1339f809 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -280,6 +280,9 @@ struct mv88e6xxx_port {
unsigned int serdes_irq;
char serdes_irq_name[64];
struct devlink_region *region;
+
+ /* MacAuth Bypass control flag */
+ bool mab;
};
enum mv88e6xxx_region_id {
@@ -451,6 +454,13 @@ struct mv88e6xxx_ops {
struct mii_bus *bus,
int addr, int reg, u16 val);
+ int (*phy_read_c45)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 *val);
+ int (*phy_write_c45)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 val);
+
/* Priority Override Table operations */
int (*pot_clear)(struct mv88e6xxx_chip *chip);
@@ -705,6 +715,7 @@ struct mv88e6xxx_ptp_ops {
int (*port_disable)(struct mv88e6xxx_chip *chip, int port);
int (*global_enable)(struct mv88e6xxx_chip *chip);
int (*global_disable)(struct mv88e6xxx_chip *chip);
+ int (*set_ptp_cpu_port)(struct mv88e6xxx_chip *chip, int port);
int n_ext_ts;
int arr0_sts_reg;
int arr1_sts_reg;
@@ -784,6 +795,12 @@ static inline bool mv88e6xxx_is_invalid_port(struct mv88e6xxx_chip *chip, int po
return (chip->info->invalid_port_mask & BIT(port)) != 0;
}
+static inline void mv88e6xxx_port_set_mab(struct mv88e6xxx_chip *chip,
+ int port, bool mab)
+{
+ chip->ports[port].mab = mab;
+}
+
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
int mv88e6xxx_wait_mask(struct mv88e6xxx_chip *chip, int addr, int reg,
@@ -802,6 +819,12 @@ static inline void mv88e6xxx_reg_unlock(struct mv88e6xxx_chip *chip)
mutex_unlock(&chip->reg_lock);
}
+int mv88e6xxx_vtu_walk(struct mv88e6xxx_chip *chip,
+ int (*cb)(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv),
+ void *priv);
+
int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *bitmap);
#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 5848112036b0..2fa55a643591 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -403,6 +403,18 @@ int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port)
return mv88e6390_g1_monitor_write(chip, ptr, port);
}
+int mv88e6390_g1_set_ptp_cpu_port(struct mv88e6xxx_chip *chip, int port)
+{
+ u16 ptr = MV88E6390_G1_MONITOR_MGMT_CTL_PTR_PTP_CPU_DEST;
+
+ /* Use the default high priority for PTP frames sent to
+ * the CPU.
+ */
+ port |= MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI;
+
+ return mv88e6390_g1_monitor_write(chip, ptr, port);
+}
+
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip)
{
u16 ptr;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 65958b2a0d3a..c99ddd117fe6 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -214,6 +214,7 @@
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_INGRESS_DEST 0x2000
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_EGRESS_DEST 0x2100
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST 0x3000
+#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_PTP_CPU_DEST 0x3200
#define MV88E6390_G1_MONITOR_MGMT_CTL_PTR_CPU_DEST_MGMTPRI 0x00e0
#define MV88E6390_G1_MONITOR_MGMT_CTL_DATA_MASK 0x00ff
@@ -303,6 +304,7 @@ int mv88e6390_g1_set_egress_port(struct mv88e6xxx_chip *chip,
int port);
int mv88e6095_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_set_cpu_port(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390_g1_set_ptp_cpu_port(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g1_mgmt_rsvd2cpu(struct mv88e6xxx_chip *chip);
int mv88e6085_g1_ip_pri_map(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 61ae2d61e25c..ce3b3690c3c0 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -12,6 +12,7 @@
#include "chip.h"
#include "global1.h"
+#include "switchdev.h"
#include "trace.h"
/* Offset 0x01: ATU FID Register */
@@ -409,23 +410,25 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
err = mv88e6xxx_g1_read_atu_violation(chip);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &val);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_atu_fid_read(chip, &fid);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_atu_data_read(chip, &entry);
if (err)
- goto out;
+ goto out_unlock;
err = mv88e6xxx_g1_atu_mac_read(chip, &entry);
if (err)
- goto out;
+ goto out_unlock;
+
+ mv88e6xxx_reg_unlock(chip);
spid = entry.state;
@@ -441,6 +444,13 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
entry.portvec, entry.mac,
fid);
chip->ports[spid].atu_miss_violation++;
+
+ if (fid != MV88E6XXX_FID_STANDALONE && chip->ports[spid].mab) {
+ err = mv88e6xxx_handle_miss_violation(chip, spid,
+ &entry, fid);
+ if (err)
+ goto out;
+ }
}
if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
@@ -449,13 +459,13 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
fid);
chip->ports[spid].atu_full_violation++;
}
- mv88e6xxx_reg_unlock(chip);
return IRQ_HANDLED;
-out:
+out_unlock:
mv88e6xxx_reg_unlock(chip);
+out:
dev_err(chip->dev, "ATU problem: error %d while handling interrupt\n",
err);
return IRQ_HANDLED;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index fa65ecd9cb85..ed3b2f88e783 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -739,20 +739,18 @@ static int mv88e6xxx_g2_smi_phy_read_data_c45(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SMI_PHY_DATA, data);
}
-static int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
- bool external, int port, int reg,
- u16 *data)
+static int _mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int devad,
+ int reg, u16 *data)
{
- int dev = (reg >> 16) & 0x1f;
- int addr = reg & 0xffff;
int err;
- err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
- addr);
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad,
+ reg);
if (err)
return err;
- return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, dev,
+ return mv88e6xxx_g2_smi_phy_read_data_c45(chip, external, port, devad,
data);
}
@@ -771,51 +769,65 @@ static int mv88e6xxx_g2_smi_phy_write_data_c45(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_smi_phy_access_c45(chip, external, op, port, dev);
}
-static int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
- bool external, int port, int reg,
- u16 data)
+static int _mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ bool external, int port, int devad,
+ int reg, u16 data)
{
- int dev = (reg >> 16) & 0x1f;
- int addr = reg & 0xffff;
int err;
- err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, dev,
- addr);
+ err = mv88e6xxx_g2_smi_phy_write_addr_c45(chip, external, port, devad,
+ reg);
if (err)
return err;
- return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, dev,
+ return mv88e6xxx_g2_smi_phy_write_data_c45(chip, external, port, devad,
data);
}
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
- int addr, int reg, u16 *val)
+int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
- if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, reg,
- val);
-
return mv88e6xxx_g2_smi_phy_read_data_c22(chip, external, addr, reg,
val);
}
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
- int addr, int reg, u16 val)
+int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int devad,
+ int reg, u16 *val)
{
struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
bool external = mdio_bus->external;
- if (reg & MII_ADDR_C45)
- return mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, reg,
- val);
+ return _mv88e6xxx_g2_smi_phy_read_c45(chip, external, addr, devad, reg,
+ val);
+}
+
+int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int reg,
+ u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
return mv88e6xxx_g2_smi_phy_write_data_c22(chip, external, addr, reg,
val);
}
+int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus, int addr, int devad,
+ int reg, u16 val)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ bool external = mdio_bus->external;
+
+ return _mv88e6xxx_g2_smi_phy_write_c45(chip, external, addr, devad, reg,
+ val);
+}
+
/* Offset 0x1B: Watchdog Control */
static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
{
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 7536b8b0ad01..e973114d6890 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -314,12 +314,18 @@ int mv88e6xxx_g2_wait_bit(struct mv88e6xxx_chip *chip, int reg,
int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 *val);
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
- struct mii_bus *bus,
- int addr, int reg, u16 val);
+int mv88e6xxx_g2_smi_phy_read_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write_c22(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val);
+int mv88e6xxx_g2_smi_phy_read_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write_c45(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int devad, int reg, u16 val);
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/phy.c b/drivers/net/dsa/mv88e6xxx/phy.c
index 252b5b3a3efe..8bb88b3d900d 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.c
+++ b/drivers/net/dsa/mv88e6xxx/phy.c
@@ -55,6 +55,38 @@ int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy, int reg, u16 val)
return chip->info->ops->phy_write(chip, bus, addr, reg, val);
}
+int mv88e6xxx_phy_read_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 *val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_read_c45)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_read_c45(chip, bus, addr, devad, reg, val);
+}
+
+int mv88e6xxx_phy_write_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 val)
+{
+ int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
+
+ if (!chip->info->ops->phy_write_c45)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->phy_write_c45(chip, bus, addr, devad, reg, val);
+}
+
static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
{
return mv88e6xxx_phy_write(chip, phy, MV88E6XXX_PHY_PAGE, page);
diff --git a/drivers/net/dsa/mv88e6xxx/phy.h b/drivers/net/dsa/mv88e6xxx/phy.h
index 05ea0d546969..5f47722364cc 100644
--- a/drivers/net/dsa/mv88e6xxx/phy.h
+++ b/drivers/net/dsa/mv88e6xxx/phy.h
@@ -28,6 +28,10 @@ int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 *val);
int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 val);
+int mv88e6xxx_phy_read_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 *val);
+int mv88e6xxx_phy_write_c45(struct mv88e6xxx_chip *chip, int phy, int devad,
+ int reg, u16 val);
int mv88e6xxx_phy_page_read(struct mv88e6xxx_chip *chip, int phy,
u8 page, int reg, u16 *val);
int mv88e6xxx_phy_page_write(struct mv88e6xxx_chip *chip, int phy,
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
index d838c174dc0d..ea17231dc34e 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.c
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -11,6 +11,7 @@
*/
#include "chip.h"
+#include "global1.h"
#include "global2.h"
#include "hwtstamp.h"
#include "ptp.h"
@@ -419,6 +420,34 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
.cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
};
+const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
+ .clock_read = mv88e6352_ptp_clock_read,
+ .ptp_enable = mv88e6352_ptp_enable,
+ .ptp_verify = mv88e6352_ptp_verify,
+ .event_work = mv88e6352_tai_event_work,
+ .port_enable = mv88e6352_hwtstamp_port_enable,
+ .port_disable = mv88e6352_hwtstamp_port_disable,
+ .set_ptp_cpu_port = mv88e6390_g1_set_ptp_cpu_port,
+ .n_ext_ts = 1,
+ .arr0_sts_reg = MV88E6XXX_PORT_PTP_ARR0_STS,
+ .arr1_sts_reg = MV88E6XXX_PORT_PTP_ARR1_STS,
+ .dep_sts_reg = MV88E6XXX_PORT_PTP_DEP_STS,
+ .rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+ .cc_shift = MV88E6XXX_CC_SHIFT,
+ .cc_mult = MV88E6XXX_CC_MULT,
+ .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+ .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
+};
+
static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
{
struct mv88e6xxx_chip *chip = cc_to_chip(cc);
@@ -491,6 +520,23 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
chip->ptp_clock_info.verify = ptp_ops->ptp_verify;
chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
+ if (ptp_ops->set_ptp_cpu_port) {
+ struct dsa_port *dp;
+ int upstream = 0;
+ int err;
+
+ dsa_switch_for_each_user_port(dp, chip->ds) {
+ upstream = dsa_upstream_port(chip->ds, dp->index);
+ break;
+ }
+
+ err = ptp_ops->set_ptp_cpu_port(chip, upstream);
+ if (err) {
+ dev_err(chip->dev, "Failed to set PTP CPU destination port!\n");
+ return err;
+ }
+ }
+
chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev);
if (IS_ERR(chip->ptp_clock))
return PTR_ERR(chip->ptp_clock);
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
index 269d5d16a466..6c4d09adc93c 100644
--- a/drivers/net/dsa/mv88e6xxx/ptp.h
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -151,6 +151,7 @@ void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops;
extern const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops;
+extern const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops;
#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
@@ -171,6 +172,7 @@ static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
static const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {};
static const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {};
+static const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {};
#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index d94150d8f3f4..72faec8f44dc 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -36,17 +36,13 @@ static int mv88e6352_serdes_write(struct mv88e6xxx_chip *chip, int reg,
static int mv88e6390_serdes_read(struct mv88e6xxx_chip *chip,
int lane, int device, int reg, u16 *val)
{
- int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
-
- return mv88e6xxx_phy_read(chip, lane, reg_c45, val);
+ return mv88e6xxx_phy_read_c45(chip, lane, device, reg, val);
}
static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip,
int lane, int device, int reg, u16 val)
{
- int reg_c45 = MII_ADDR_C45 | device << 16 | reg;
-
- return mv88e6xxx_phy_write(chip, lane, reg_c45, val);
+ return mv88e6xxx_phy_write_c45(chip, lane, device, reg, val);
}
static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip,
diff --git a/drivers/net/dsa/mv88e6xxx/switchdev.c b/drivers/net/dsa/mv88e6xxx/switchdev.c
new file mode 100644
index 000000000000..4c346a884fb2
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/switchdev.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * switchdev.c
+ *
+ * Authors:
+ * Hans J. Schultz <netdev@kapio-technology.com>
+ *
+ */
+
+#include <net/switchdev.h>
+#include "chip.h"
+#include "global1.h"
+#include "switchdev.h"
+
+struct mv88e6xxx_fid_search_ctx {
+ u16 fid_search;
+ u16 vid_found;
+};
+
+static int __mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip,
+ const struct mv88e6xxx_vtu_entry *entry,
+ void *priv)
+{
+ struct mv88e6xxx_fid_search_ctx *ctx = priv;
+
+ if (ctx->fid_search == entry->fid) {
+ ctx->vid_found = entry->vid;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_find_vid(struct mv88e6xxx_chip *chip, u16 fid, u16 *vid)
+{
+ struct mv88e6xxx_fid_search_ctx ctx;
+ int err;
+
+ ctx.fid_search = fid;
+ mv88e6xxx_reg_lock(chip);
+ err = mv88e6xxx_vtu_walk(chip, __mv88e6xxx_find_vid, &ctx);
+ mv88e6xxx_reg_unlock(chip);
+ if (err < 0)
+ return err;
+ if (err == 1)
+ *vid = ctx.vid_found;
+ else
+ return -ENOENT;
+
+ return 0;
+}
+
+int mv88e6xxx_handle_miss_violation(struct mv88e6xxx_chip *chip, int port,
+ struct mv88e6xxx_atu_entry *entry, u16 fid)
+{
+ struct switchdev_notifier_fdb_info info = {
+ .addr = entry->mac,
+ .locked = true,
+ };
+ struct net_device *brport;
+ struct dsa_port *dp;
+ u16 vid;
+ int err;
+
+ err = mv88e6xxx_find_vid(chip, fid, &vid);
+ if (err)
+ return err;
+
+ info.vid = vid;
+ dp = dsa_to_port(chip->ds, port);
+
+ rtnl_lock();
+ brport = dsa_port_to_bridge_port(dp);
+ if (!brport) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ err = call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ brport, &info.info, NULL);
+ rtnl_unlock();
+
+ return err;
+}
diff --git a/drivers/net/dsa/mv88e6xxx/switchdev.h b/drivers/net/dsa/mv88e6xxx/switchdev.h
new file mode 100644
index 000000000000..62214f9d62b0
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/switchdev.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * switchdev.h
+ *
+ * Authors:
+ * Hans J. Schultz <netdev@kapio-technology.com>
+ *
+ */
+
+#ifndef _MV88E6XXX_SWITCHDEV_H_
+#define _MV88E6XXX_SWITCHDEV_H_
+
+#include "chip.h"
+
+int mv88e6xxx_handle_miss_violation(struct mv88e6xxx_chip *chip, int port,
+ struct mv88e6xxx_atu_entry *entry,
+ u16 fid);
+
+#endif /* _MV88E6XXX_SWITCHDEV_H_ */
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 08db9cf76818..081e7a88ea02 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -1,4 +1,34 @@
# SPDX-License-Identifier: GPL-2.0-only
+config NET_DSA_MSCC_FELIX_DSA_LIB
+ tristate
+ help
+ This is an umbrella module for all network switches that are
+ register-compatible with Ocelot and that perform I/O to their host
+ CPU through an NPI (Node Processor Interface) Ethernet port.
+ Its name comes from the first hardware chip to make use of it
+ (VSC9959), code named Felix.
+
+config NET_DSA_MSCC_OCELOT_EXT
+ tristate "Ocelot External Ethernet switch support"
+ depends on NET_DSA && SPI
+ depends on NET_VENDOR_MICROSEMI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select MDIO_MSCC_MIIM
+ select MFD_OCELOT
+ select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
+ select NET_DSA_TAG_OCELOT_8021Q
+ select NET_DSA_TAG_OCELOT
+ help
+ This driver supports the VSC7511, VSC7512, VSC7513 and VSC7514 chips
+ when controlled through SPI.
+
+ The Ocelot switch family is a set of multi-port networking chips. All
+ of these chips have the ability to be controlled externally through
+ SPI or PCIe interfaces.
+
+ Say "Y" here to enable external control to these chips.
+
config NET_DSA_MSCC_FELIX
tristate "Ocelot / Felix Ethernet switch support"
depends on NET_DSA && PCI
@@ -8,6 +38,7 @@ config NET_DSA_MSCC_FELIX
depends on PTP_1588_CLOCK_OPTIONAL
depends on NET_SCH_TAPRIO || NET_SCH_TAPRIO=n
select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
select FSL_ENETC_MDIO
@@ -24,6 +55,7 @@ config NET_DSA_MSCC_SEVILLE
depends on PTP_1588_CLOCK_OPTIONAL
select MDIO_MSCC_MIIM
select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_MSCC_FELIX_DSA_LIB
select NET_DSA_TAG_OCELOT_8021Q
select NET_DSA_TAG_OCELOT
select PCS_LYNX
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
index f6dd131e7491..ead868a293e3 100644
--- a/drivers/net/dsa/ocelot/Makefile
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -1,11 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_NET_DSA_MSCC_FELIX_DSA_LIB) += mscc_felix_dsa_lib.o
obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+obj-$(CONFIG_NET_DSA_MSCC_OCELOT_EXT) += mscc_ocelot_ext.o
obj-$(CONFIG_NET_DSA_MSCC_SEVILLE) += mscc_seville.o
-mscc_felix-objs := \
- felix.o \
- felix_vsc9959.o
-
-mscc_seville-objs := \
- felix.o \
- seville_vsc9953.o
+mscc_felix_dsa_lib-objs := felix.o
+mscc_felix-objs := felix_vsc9959.o
+mscc_ocelot_ext-objs := ocelot_ext.o
+mscc_seville-objs := seville_vsc9953.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3b738cb2ae6e..d4cc9e60f369 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1075,9 +1075,12 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
phy_interface_t interface)
{
struct ocelot *ocelot = ds->priv;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface,
- FELIX_MAC_QUIRKS);
+ felix->info->quirks);
}
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
@@ -1092,7 +1095,7 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
interface, speed, duplex, tx_pause, rx_pause,
- FELIX_MAC_QUIRKS);
+ felix->info->quirks);
if (felix->info->port_sched_speed_set)
felix->info->port_sched_speed_set(ocelot, port, speed);
@@ -1270,10 +1273,15 @@ static int felix_parse_ports_node(struct felix *felix,
err = felix_validate_phy_mode(felix, port, phy_mode);
if (err < 0) {
- dev_err(dev, "Unsupported PHY mode %s on port %d\n",
- phy_modes(phy_mode), port);
+ dev_info(dev, "Unsupported PHY mode %s on port %d\n",
+ phy_modes(phy_mode), port);
of_node_put(child);
- return err;
+
+ /* Leave port_phy_modes[port] = 0, which is also
+ * PHY_INTERFACE_MODE_NA. This will perform a
+ * best-effort to bring up as many ports as possible.
+ */
+ continue;
}
port_phy_modes[port] = phy_mode;
@@ -1312,6 +1320,13 @@ static struct regmap *felix_request_regmap_by_name(struct felix *felix,
struct resource res;
int i;
+ /* In an MFD configuration, regmaps are registered directly to the
+ * parent device before the child devices are probed, so there is no
+ * need to initialize a new one.
+ */
+ if (!felix->info->resources)
+ return dev_get_regmap(ocelot->dev->parent, resource_name);
+
for (i = 0; i < felix->info->num_resources; i++) {
if (strcmp(resource_name, felix->info->resources[i].name))
continue;
@@ -2024,6 +2039,31 @@ static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp,
return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio);
}
+static int felix_get_mm(struct dsa_switch *ds, int port,
+ struct ethtool_mm_state *state)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_get_mm(ocelot, port, state);
+}
+
+static int felix_set_mm(struct dsa_switch *ds, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ return ocelot_port_set_mm(ocelot, port, cfg, extack);
+}
+
+static void felix_get_mm_stats(struct dsa_switch *ds, int port,
+ struct ethtool_mm_stats *stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_mm_stats(ocelot, port, stats);
+}
+
const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
@@ -2031,6 +2071,9 @@ const struct dsa_switch_ops felix_switch_ops = {
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
+ .get_mm = felix_get_mm,
+ .set_mm = felix_set_mm,
+ .get_mm_stats = felix_get_mm_stats,
.get_stats64 = felix_get_stats64,
.get_pause_stats = felix_get_pause_stats,
.get_rmon_stats = felix_get_rmon_stats,
@@ -2103,6 +2146,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_set_host_flood = felix_port_set_host_flood,
.port_change_master = felix_port_change_master,
};
+EXPORT_SYMBOL_GPL(felix_switch_ops);
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
{
@@ -2114,6 +2158,7 @@ struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
return dsa_to_port(ds, port)->slave;
}
+EXPORT_SYMBOL_GPL(felix_port_to_netdev);
int felix_netdev_to_port(struct net_device *dev)
{
@@ -2125,3 +2170,7 @@ int felix_netdev_to_port(struct net_device *dev)
return dp->index;
}
+EXPORT_SYMBOL_GPL(felix_netdev_to_port);
+
+MODULE_DESCRIPTION("Felix DSA library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index be22d6ccd7c8..d5d0b30c0b75 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -7,6 +7,7 @@
#define ocelot_to_felix(o) container_of((o), struct felix, ocelot)
#define FELIX_MAC_QUIRKS OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION
+#define OCELOT_PORT_MODE_NONE 0
#define OCELOT_PORT_MODE_INTERNAL BIT(0)
#define OCELOT_PORT_MODE_SGMII BIT(1)
#define OCELOT_PORT_MODE_QSGMII BIT(2)
@@ -36,6 +37,7 @@ struct felix_info {
u16 vcap_pol_base2;
u16 vcap_pol_max2;
const struct ptp_clock_info *ptp_caps;
+ unsigned long quirks;
/* Some Ocelot switches are integrated into the SoC without the
* extraction IRQ line connected to the ARM GIC. By enabling this
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 01ac70fd7ddf..354aa3dbfde7 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -6,6 +6,7 @@
#include <soc/mscc/ocelot_qsys.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_ana.h>
+#include <soc/mscc/ocelot_dev.h>
#include <soc/mscc/ocelot_ptp.h>
#include <soc/mscc/ocelot_sys.h>
#include <net/tc_act/tc_gate.h>
@@ -318,6 +319,29 @@ static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_RX_GREEN_PRIO_5, 0x0000a4),
REG(SYS_COUNT_RX_GREEN_PRIO_6, 0x0000a8),
REG(SYS_COUNT_RX_GREEN_PRIO_7, 0x0000ac),
+ REG(SYS_COUNT_RX_ASSEMBLY_ERRS, 0x0000b0),
+ REG(SYS_COUNT_RX_SMD_ERRS, 0x0000b4),
+ REG(SYS_COUNT_RX_ASSEMBLY_OK, 0x0000b8),
+ REG(SYS_COUNT_RX_MERGE_FRAGMENTS, 0x0000bc),
+ REG(SYS_COUNT_RX_PMAC_OCTETS, 0x0000c0),
+ REG(SYS_COUNT_RX_PMAC_UNICAST, 0x0000c4),
+ REG(SYS_COUNT_RX_PMAC_MULTICAST, 0x0000c8),
+ REG(SYS_COUNT_RX_PMAC_BROADCAST, 0x0000cc),
+ REG(SYS_COUNT_RX_PMAC_SHORTS, 0x0000d0),
+ REG(SYS_COUNT_RX_PMAC_FRAGMENTS, 0x0000d4),
+ REG(SYS_COUNT_RX_PMAC_JABBERS, 0x0000d8),
+ REG(SYS_COUNT_RX_PMAC_CRC_ALIGN_ERRS, 0x0000dc),
+ REG(SYS_COUNT_RX_PMAC_SYM_ERRS, 0x0000e0),
+ REG(SYS_COUNT_RX_PMAC_64, 0x0000e4),
+ REG(SYS_COUNT_RX_PMAC_65_127, 0x0000e8),
+ REG(SYS_COUNT_RX_PMAC_128_255, 0x0000ec),
+ REG(SYS_COUNT_RX_PMAC_256_511, 0x0000f0),
+ REG(SYS_COUNT_RX_PMAC_512_1023, 0x0000f4),
+ REG(SYS_COUNT_RX_PMAC_1024_1526, 0x0000f8),
+ REG(SYS_COUNT_RX_PMAC_1527_MAX, 0x0000fc),
+ REG(SYS_COUNT_RX_PMAC_PAUSE, 0x000100),
+ REG(SYS_COUNT_RX_PMAC_CONTROL, 0x000104),
+ REG(SYS_COUNT_RX_PMAC_LONGS, 0x000108),
REG(SYS_COUNT_TX_OCTETS, 0x000200),
REG(SYS_COUNT_TX_UNICAST, 0x000204),
REG(SYS_COUNT_TX_MULTICAST, 0x000208),
@@ -349,6 +373,20 @@ static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
REG(SYS_COUNT_TX_AGED, 0x000278),
+ REG(SYS_COUNT_TX_MM_HOLD, 0x00027c),
+ REG(SYS_COUNT_TX_MERGE_FRAGMENTS, 0x000280),
+ REG(SYS_COUNT_TX_PMAC_OCTETS, 0x000284),
+ REG(SYS_COUNT_TX_PMAC_UNICAST, 0x000288),
+ REG(SYS_COUNT_TX_PMAC_MULTICAST, 0x00028c),
+ REG(SYS_COUNT_TX_PMAC_BROADCAST, 0x000290),
+ REG(SYS_COUNT_TX_PMAC_PAUSE, 0x000294),
+ REG(SYS_COUNT_TX_PMAC_64, 0x000298),
+ REG(SYS_COUNT_TX_PMAC_65_127, 0x00029c),
+ REG(SYS_COUNT_TX_PMAC_128_255, 0x0002a0),
+ REG(SYS_COUNT_TX_PMAC_256_511, 0x0002a4),
+ REG(SYS_COUNT_TX_PMAC_512_1023, 0x0002a8),
+ REG(SYS_COUNT_TX_PMAC_1024_1526, 0x0002ac),
+ REG(SYS_COUNT_TX_PMAC_1527_MAX, 0x0002b0),
REG(SYS_COUNT_DROP_LOCAL, 0x000400),
REG(SYS_COUNT_DROP_TAIL, 0x000404),
REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
@@ -439,6 +477,9 @@ static const u32 vsc9959_dev_gmii_regmap[] = {
REG(DEV_MAC_FC_MAC_LOW_CFG, 0x3c),
REG(DEV_MAC_FC_MAC_HIGH_CFG, 0x40),
REG(DEV_MAC_STICKY, 0x44),
+ REG(DEV_MM_ENABLE_CONFIG, 0x48),
+ REG(DEV_MM_VERIF_CONFIG, 0x4C),
+ REG(DEV_MM_STATUS, 0x50),
REG_RESERVED(PCS1G_CFG),
REG_RESERVED(PCS1G_MODE_CFG),
REG_RESERVED(PCS1G_SD_CFG),
@@ -954,8 +995,10 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return -ENOMEM;
bus->name = "VSC9959 internal MDIO bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
@@ -2550,6 +2593,7 @@ static const struct felix_info felix_info_vsc9959 = {
.num_mact_rows = 2048,
.num_ports = VSC9959_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
+ .quirks = FELIX_MAC_QUIRKS,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
@@ -2560,20 +2604,19 @@ static const struct felix_info felix_info_vsc9959 = {
.tas_guard_bands_update = vsc9959_tas_guard_bands_update,
};
+/* The INTB interrupt is shared between for PTP TX timestamp availability
+ * notification and MAC Merge status change on each port.
+ */
static irqreturn_t felix_irq_handler(int irq, void *data)
{
struct ocelot *ocelot = (struct ocelot *)data;
-
- /* The INTB interrupt is used for both PTP TX timestamp interrupt
- * and preemption status change interrupt on each port.
- *
- * - Get txtstamp if have
- * - TODO: handle preemption. Without handling it, driver may get
- * interrupt storm.
- */
+ int port;
ocelot_get_txtstamp(ocelot);
+ for (port = 0; port < ocelot->num_phys_ports; port++)
+ ocelot_port_mm_irq(ocelot, port);
+
return IRQ_HANDLED;
}
@@ -2621,6 +2664,7 @@ static int felix_pci_probe(struct pci_dev *pdev,
}
ocelot->ptp = 1;
+ ocelot->mm_supported = true;
ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
if (!ds) {
diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c
new file mode 100644
index 000000000000..14efa6387bd7
--- /dev/null
+++ b/drivers/net/dsa/ocelot/ocelot_ext.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright 2021-2022 Innovative Advantage Inc.
+ */
+
+#include <linux/mfd/ocelot.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <soc/mscc/ocelot.h>
+#include <soc/mscc/vsc7514_regs.h>
+#include "felix.h"
+
+#define VSC7514_NUM_PORTS 11
+
+#define OCELOT_PORT_MODE_SERDES (OCELOT_PORT_MODE_SGMII | \
+ OCELOT_PORT_MODE_QSGMII)
+
+static const u32 vsc7512_port_modes[VSC7514_NUM_PORTS] = {
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_INTERNAL,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+ OCELOT_PORT_MODE_NONE,
+};
+
+static const struct ocelot_ops ocelot_ext_ops = {
+ .reset = ocelot_reset,
+ .wm_enc = ocelot_wm_enc,
+ .wm_dec = ocelot_wm_dec,
+ .wm_stat = ocelot_wm_stat,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
+};
+
+static const char * const vsc7512_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [QS] = "qs",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
+};
+
+static const struct felix_info vsc7512_info = {
+ .resource_names = vsc7512_resource_names,
+ .regfields = vsc7514_regfields,
+ .map = vsc7514_regmap,
+ .ops = &ocelot_ext_ops,
+ .vcap = vsc7514_vcap_props,
+ .num_mact_rows = 1024,
+ .num_ports = VSC7514_NUM_PORTS,
+ .num_tx_queues = OCELOT_NUM_TC,
+ .port_modes = vsc7512_port_modes,
+};
+
+static int ocelot_ext_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ felix = kzalloc(sizeof(*felix), GFP_KERNEL);
+ if (!felix)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, felix);
+
+ ocelot = &felix->ocelot;
+ ocelot->dev = dev;
+
+ ocelot->num_flooding_pgids = 1;
+
+ felix->info = &vsc7512_info;
+
+ ds = kzalloc(sizeof(*ds), GFP_KERNEL);
+ if (!ds) {
+ err = -ENOMEM;
+ dev_err_probe(dev, err, "Failed to allocate DSA switch\n");
+ goto err_free_felix;
+ }
+
+ ds->dev = dev;
+ ds->num_ports = felix->info->num_ports;
+ ds->num_tx_queues = felix->info->num_tx_queues;
+
+ ds->ops = &felix_switch_ops;
+ ds->priv = ocelot;
+ felix->ds = ds;
+ felix->tag_proto = DSA_TAG_PROTO_OCELOT;
+
+ err = dsa_register_switch(ds);
+ if (err) {
+ dev_err_probe(dev, err, "Failed to register DSA switch\n");
+ goto err_free_ds;
+ }
+
+ return 0;
+
+err_free_ds:
+ kfree(ds);
+err_free_felix:
+ kfree(felix);
+ return err;
+}
+
+static int ocelot_ext_remove(struct platform_device *pdev)
+{
+ struct felix *felix = dev_get_drvdata(&pdev->dev);
+
+ if (!felix)
+ return 0;
+
+ dsa_unregister_switch(felix->ds);
+
+ kfree(felix->ds);
+ kfree(felix);
+
+ return 0;
+}
+
+static void ocelot_ext_shutdown(struct platform_device *pdev)
+{
+ struct felix *felix = dev_get_drvdata(&pdev->dev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+}
+
+static const struct of_device_id ocelot_ext_switch_of_match[] = {
+ { .compatible = "mscc,vsc7512-switch" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ocelot_ext_switch_of_match);
+
+static struct platform_driver ocelot_ext_switch_driver = {
+ .driver = {
+ .name = "ocelot-switch",
+ .of_match_table = of_match_ptr(ocelot_ext_switch_of_match),
+ },
+ .probe = ocelot_ext_probe,
+ .remove = ocelot_ext_remove,
+ .shutdown = ocelot_ext_shutdown,
+};
+module_platform_driver(ocelot_ext_switch_driver);
+
+MODULE_DESCRIPTION("External Ocelot Switch driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(MFD_OCELOT);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 88ed3a2e487a..287b64b788db 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -971,6 +971,7 @@ static const struct felix_info seville_info_vsc9953 = {
.vcap_pol_max = VSC9953_VCAP_POLICER_MAX,
.vcap_pol_base2 = VSC9953_VCAP_POLICER_BASE2,
.vcap_pol_max2 = VSC9953_VCAP_POLICER_MAX2,
+ .quirks = FELIX_MAC_QUIRKS,
.num_mact_rows = 2048,
.num_ports = VSC9953_NUM_PORTS,
.num_tx_queues = OCELOT_NUM_TC,
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 2f224b166bbb..55df4479ea30 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -425,16 +425,12 @@ qca8k_regmap_update_bits_eth(struct qca8k_priv *priv, u32 reg, u32 mask, u32 wri
}
static int
-qca8k_regmap_read(void *ctx, uint32_t reg, uint32_t *val)
+qca8k_read_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t *val)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
- if (!qca8k_read_eth(priv, reg, val, sizeof(*val)))
- return 0;
-
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
@@ -451,16 +447,12 @@ exit:
}
static int
-qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
+qca8k_write_mii(struct qca8k_priv *priv, uint32_t reg, uint32_t val)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
int ret;
- if (!qca8k_write_eth(priv, reg, &val, sizeof(val)))
- return 0;
-
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
@@ -477,17 +469,14 @@ exit:
}
static int
-qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
+qca8k_regmap_update_bits_mii(struct qca8k_priv *priv, uint32_t reg,
+ uint32_t mask, uint32_t write_val)
{
- struct qca8k_priv *priv = (struct qca8k_priv *)ctx;
struct mii_bus *bus = priv->bus;
u16 r1, r2, page;
u32 val;
int ret;
- if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
- return 0;
-
qca8k_split_addr(reg, &r1, &r2, &page);
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
@@ -510,17 +499,84 @@ exit:
return ret;
}
+static int
+qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ int i, count = val_len / sizeof(u32), ret;
+ u32 reg = *(u32 *)reg_buf & U16_MAX;
+ struct qca8k_priv *priv = ctx;
+
+ if (priv->mgmt_master &&
+ !qca8k_read_eth(priv, reg, val_buf, val_len))
+ return 0;
+
+ /* loop count times and increment reg of 4 */
+ for (i = 0; i < count; i++, reg += sizeof(u32)) {
+ ret = qca8k_read_mii(priv, reg, val_buf + i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
+ const void *val_buf, size_t val_len)
+{
+ int i, count = val_len / sizeof(u32), ret;
+ u32 reg = *(u32 *)reg_buf & U16_MAX;
+ struct qca8k_priv *priv = ctx;
+ u32 *val = (u32 *)val_buf;
+
+ if (priv->mgmt_master &&
+ !qca8k_write_eth(priv, reg, val, val_len))
+ return 0;
+
+ /* loop count times, increment reg of 4 and increment val ptr to
+ * the next value
+ */
+ for (i = 0; i < count; i++, reg += sizeof(u32), val++) {
+ ret = qca8k_write_mii(priv, reg, *val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+qca8k_bulk_write(void *ctx, const void *data, size_t bytes)
+{
+ return qca8k_bulk_gather_write(ctx, data, sizeof(u16), data + sizeof(u16),
+ bytes - sizeof(u16));
+}
+
+static int
+qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_val)
+{
+ struct qca8k_priv *priv = ctx;
+
+ if (!qca8k_regmap_update_bits_eth(priv, reg, mask, write_val))
+ return 0;
+
+ return qca8k_regmap_update_bits_mii(priv, reg, mask, write_val);
+}
+
static struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0x16ac, /* end MIB - Port6 range */
- .reg_read = qca8k_regmap_read,
- .reg_write = qca8k_regmap_write,
+ .read = qca8k_bulk_read,
+ .write = qca8k_bulk_write,
.reg_update_bits = qca8k_regmap_update_bits,
.rd_table = &qca8k_readable_table,
.disable_locking = true, /* Locking is handled by qca8k read/write */
.cache_type = REGCACHE_NONE, /* Explicitly disable CACHE */
+ .max_raw_read = 32, /* mgmt eth can read/write up to 8 registers at time */
+ .max_raw_write = 32,
};
static int
@@ -2089,8 +2145,6 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
static const struct qca8k_info_ops qca8xxx_ops = {
.autocast_mib = qca8k_get_ethtool_stats_eth,
- .read_eth = qca8k_read_eth,
- .write_eth = qca8k_write_eth,
};
static const struct qca8k_match_data qca8327 = {
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index fb45b598847b..96773e432558 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -101,45 +101,6 @@ const struct regmap_access_table qca8k_readable_table = {
.n_yes_ranges = ARRAY_SIZE(qca8k_readable_ranges),
};
-/* TODO: remove these extra ops when we can support regmap bulk read/write */
-static int qca8k_bulk_read(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
- int i, count = len / sizeof(u32), ret;
-
- if (priv->mgmt_master && priv->info->ops->read_eth &&
- !priv->info->ops->read_eth(priv, reg, val, len))
- return 0;
-
- for (i = 0; i < count; i++) {
- ret = regmap_read(priv->regmap, reg + (i * 4), val + i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-/* TODO: remove these extra ops when we can support regmap bulk read/write */
-static int qca8k_bulk_write(struct qca8k_priv *priv, u32 reg, u32 *val, int len)
-{
- int i, count = len / sizeof(u32), ret;
- u32 tmp;
-
- if (priv->mgmt_master && priv->info->ops->write_eth &&
- !priv->info->ops->write_eth(priv, reg, val, len))
- return 0;
-
- for (i = 0; i < count; i++) {
- tmp = val[i];
-
- ret = regmap_write(priv->regmap, reg + (i * 4), tmp);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
{
u32 val;
@@ -150,11 +111,12 @@ static int qca8k_busy_wait(struct qca8k_priv *priv, u32 reg, u32 mask)
static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
{
- u32 reg[3];
+ u32 reg[QCA8K_ATU_TABLE_SIZE];
int ret;
/* load the ARL table into an array */
- ret = qca8k_bulk_read(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+ ret = regmap_bulk_read(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
+ QCA8K_ATU_TABLE_SIZE);
if (ret)
return ret;
@@ -178,7 +140,7 @@ static int qca8k_fdb_read(struct qca8k_priv *priv, struct qca8k_fdb *fdb)
static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
const u8 *mac, u8 aging)
{
- u32 reg[3] = { 0 };
+ u32 reg[QCA8K_ATU_TABLE_SIZE] = { 0 };
/* vid - 83:72 */
reg[2] = FIELD_PREP(QCA8K_ATU_VID_MASK, vid);
@@ -195,7 +157,8 @@ static void qca8k_fdb_write(struct qca8k_priv *priv, u16 vid, u8 port_mask,
reg[0] |= FIELD_PREP(QCA8K_ATU_ADDR5_MASK, mac[5]);
/* load the array into the ARL table */
- qca8k_bulk_write(priv, QCA8K_REG_ATU_DATA0, reg, sizeof(reg));
+ regmap_bulk_write(priv->regmap, QCA8K_REG_ATU_DATA0, reg,
+ QCA8K_ATU_TABLE_SIZE);
}
static int qca8k_fdb_access(struct qca8k_priv *priv, enum qca8k_fdb_cmd cmd,
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 03514f7a20be..7996975d29d3 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -148,6 +148,8 @@
#define QCA8K_REG_IPV4_PRI_ADDR_MASK 0x474
/* Lookup registers */
+#define QCA8K_ATU_TABLE_SIZE 3 /* 12 bytes wide table / sizeof(u32) */
+
#define QCA8K_REG_ATU_DATA0 0x600
#define QCA8K_ATU_ADDR2_MASK GENMASK(31, 24)
#define QCA8K_ATU_ADDR3_MASK GENMASK(23, 16)
@@ -328,9 +330,6 @@ struct qca8k_priv;
struct qca8k_info_ops {
int (*autocast_mib)(struct dsa_switch *ds, int port, u64 *data);
- /* TODO: remove these extra ops when we can support regmap bulk read/write */
- int (*read_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
- int (*write_eth)(struct qca8k_priv *priv, u32 reg, u32 *val, int len);
};
struct qca8k_match_data {
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index ed413d555bec..919027cf2012 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -781,9 +781,6 @@ static int a5psw_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
u32 cmd, status;
int ret;
- if (phy_reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
cmd = A5PSW_MDIO_COMMAND_READ;
cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
@@ -809,9 +806,6 @@ static int a5psw_mdio_write(struct mii_bus *bus, int phy_id, int phy_reg,
struct a5psw *a5psw = bus->priv;
u32 cmd;
- if (phy_reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
cmd = FIELD_PREP(A5PSW_MDIO_COMMAND_REG_ADDR, phy_reg);
cmd |= FIELD_PREP(A5PSW_MDIO_COMMAND_PHY_ADDR, phy_id);
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 9ba2ec2b966d..fb1549a5fe32 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -149,8 +149,10 @@ struct sja1105_info {
bool (*rxtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
void (*txtstamp)(struct dsa_switch *ds, int port, struct sk_buff *skb);
int (*clocking_setup)(struct sja1105_private *priv);
- int (*pcs_mdio_read)(struct mii_bus *bus, int phy, int reg);
- int (*pcs_mdio_write)(struct mii_bus *bus, int phy, int reg, u16 val);
+ int (*pcs_mdio_read_c45)(struct mii_bus *bus, int phy, int mmd,
+ int reg);
+ int (*pcs_mdio_write_c45)(struct mii_bus *bus, int phy, int mmd,
+ int reg, u16 val);
int (*disable_microcontroller)(struct sja1105_private *priv);
const char *name;
bool supports_mii[SJA1105_MAX_NUM_PORTS];
@@ -303,10 +305,12 @@ void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
/* From sja1105_mdio.c */
int sja1105_mdiobus_register(struct dsa_switch *ds);
void sja1105_mdiobus_unregister(struct dsa_switch *ds);
-int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
-int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
-int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg);
-int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val);
+int sja1105_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg);
+int sja1105_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val);
+int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg);
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd, int reg,
+ u16 val);
/* From sja1105_devlink.c */
int sja1105_devlink_setup(struct dsa_switch *ds);
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 4059fcc8c832..01f1cb719042 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -7,20 +7,15 @@
#define SJA1110_PCS_BANK_REG SJA1110_SPI_ADDR(0x3fc)
-int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+int sja1105_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
return 0xffff;
@@ -37,19 +32,15 @@ int sja1105_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int sja1105_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int mmd,
+ int reg, u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- u16 mmd;
-
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
tmp = val;
if (mmd != MDIO_MMD_VEND1 && mmd != MDIO_MMD_VEND2)
@@ -58,7 +49,7 @@ int sja1105_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
}
-int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
+int sja1110_pcs_mdio_read_c45(struct mii_bus *bus, int phy, int mmd, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -66,17 +57,12 @@ int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
int offset, bank;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
return -ENODEV;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
if (mmd == MDIO_MMD_VEND2 && (reg & GENMASK(15, 0)) == MII_PHYSID1)
return NXP_SJA1110_XPCS_ID >> 16;
@@ -108,7 +94,8 @@ int sja1110_pcs_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int sja1110_pcs_mdio_write_c45(struct mii_bus *bus, int phy, int reg, int mmd,
+ u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -116,17 +103,12 @@ int sja1110_pcs_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
int offset, bank;
u64 addr;
u32 tmp;
- u16 mmd;
int rc;
- if (!(reg & MII_ADDR_C45))
- return -EINVAL;
-
if (regs->pcs_base[phy] == SJA1105_RSV_ADDR)
return -ENODEV;
- mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- addr = (mmd << 16) | (reg & GENMASK(15, 0));
+ addr = (mmd << 16) | reg;
bank = addr >> 8;
offset = addr & GENMASK(7, 0);
@@ -167,7 +149,7 @@ static u64 sja1105_base_t1_encode_addr(struct sja1105_private *priv,
return regs->mdio_100base_t1 | (phy << 7) | (op << 5) | (xad << 0);
}
-static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
+static int sja1105_base_t1_mdio_read_c22(struct mii_bus *bus, int phy, int reg)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
@@ -175,30 +157,31 @@ static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
u32 tmp;
int rc;
- if (reg & MII_ADDR_C45) {
- u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
-
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
- mmd);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
- tmp = reg & MII_REGADDR_C45_MASK;
+ rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
+ if (rc < 0)
+ return rc;
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ return tmp & 0xffff;
+}
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
- mmd);
+static int sja1105_base_t1_mdio_read_c45(struct mii_bus *bus, int phy,
+ int mmd, int reg)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
- rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd);
- return tmp & 0xffff;
- }
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &reg, NULL);
+ if (rc < 0)
+ return rc;
- /* Clause 22 read */
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd);
rc = sja1105_xfer_u32(priv, SPI_READ, addr, &tmp, NULL);
if (rc < 0)
@@ -207,41 +190,37 @@ static int sja1105_base_t1_mdio_read(struct mii_bus *bus, int phy, int reg)
return tmp & 0xffff;
}
-static int sja1105_base_t1_mdio_write(struct mii_bus *bus, int phy, int reg,
- u16 val)
+static int sja1105_base_t1_mdio_write_c22(struct mii_bus *bus, int phy, int reg,
+ u16 val)
{
struct sja1105_mdio_private *mdio_priv = bus->priv;
struct sja1105_private *priv = mdio_priv->priv;
u64 addr;
u32 tmp;
- int rc;
-
- if (reg & MII_ADDR_C45) {
- u16 mmd = (reg >> MII_DEVADDR_C45_SHIFT) & 0x1f;
-
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR,
- mmd);
- tmp = reg & MII_REGADDR_C45_MASK;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ tmp = val & 0xffff;
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA,
- mmd);
+ return sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
+}
- tmp = val & 0xffff;
+static int sja1105_base_t1_mdio_write_c45(struct mii_bus *bus, int phy,
+ int mmd, int reg, u16 val)
+{
+ struct sja1105_mdio_private *mdio_priv = bus->priv;
+ struct sja1105_private *priv = mdio_priv->priv;
+ u64 addr;
+ u32 tmp;
+ int rc;
- rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &tmp, NULL);
- if (rc < 0)
- return rc;
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_ADDR, mmd);
- return 0;
- }
+ rc = sja1105_xfer_u32(priv, SPI_WRITE, addr, &reg, NULL);
+ if (rc < 0)
+ return rc;
- /* Clause 22 write */
- addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C22, reg & 0x1f);
+ addr = sja1105_base_t1_encode_addr(priv, phy, SJA1105_C45_DATA, mmd);
tmp = val & 0xffff;
@@ -256,9 +235,6 @@ static int sja1105_base_tx_mdio_read(struct mii_bus *bus, int phy, int reg)
u32 tmp;
int rc;
- if (reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
rc = sja1105_xfer_u32(priv, SPI_READ, regs->mdio_100base_tx + reg,
&tmp, NULL);
if (rc < 0)
@@ -275,9 +251,6 @@ static int sja1105_base_tx_mdio_write(struct mii_bus *bus, int phy, int reg,
const struct sja1105_regs *regs = priv->info->regs;
u32 tmp = val;
- if (reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
return sja1105_xfer_u32(priv, SPI_WRITE, regs->mdio_100base_tx + reg,
&tmp, NULL);
}
@@ -360,8 +333,10 @@ static int sja1105_mdiobus_base_t1_register(struct sja1105_private *priv,
bus->name = "SJA1110 100base-T1 MDIO bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-base-t1",
dev_name(priv->ds->dev));
- bus->read = sja1105_base_t1_mdio_read;
- bus->write = sja1105_base_t1_mdio_write;
+ bus->read = sja1105_base_t1_mdio_read_c22;
+ bus->write = sja1105_base_t1_mdio_write_c22;
+ bus->read_c45 = sja1105_base_t1_mdio_read_c45;
+ bus->write_c45 = sja1105_base_t1_mdio_write_c45;
bus->parent = priv->ds->dev;
mdio_priv = bus->priv;
mdio_priv->priv = priv;
@@ -398,7 +373,7 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
int rc = 0;
int port;
- if (!priv->info->pcs_mdio_read || !priv->info->pcs_mdio_write)
+ if (!priv->info->pcs_mdio_read_c45 || !priv->info->pcs_mdio_write_c45)
return 0;
bus = mdiobus_alloc_size(sizeof(*mdio_priv));
@@ -408,8 +383,8 @@ static int sja1105_mdiobus_pcs_register(struct sja1105_private *priv)
bus->name = "SJA1105 PCS MDIO bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-pcs",
dev_name(ds->dev));
- bus->read = priv->info->pcs_mdio_read;
- bus->write = priv->info->pcs_mdio_write;
+ bus->read_c45 = priv->info->pcs_mdio_read_c45;
+ bus->write_c45 = priv->info->pcs_mdio_write_c45;
bus->parent = ds->dev;
/* There is no PHY on this MDIO bus => mask out all PHY addresses
* from auto probing.
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index d3c9ad6d39d4..5ce29c8057a4 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -719,8 +719,8 @@ const struct sja1105_info sja1105r_info = {
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
- .pcs_mdio_read = sja1105_pcs_mdio_read,
- .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45,
.regs = &sja1105pqrs_regs,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
@@ -756,8 +756,8 @@ const struct sja1105_info sja1105s_info = {
.ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
.rxtstamp = sja1105_rxtstamp,
.clocking_setup = sja1105_clocking_setup,
- .pcs_mdio_read = sja1105_pcs_mdio_read,
- .pcs_mdio_write = sja1105_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1105_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1105_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 3,
@@ -794,8 +794,8 @@ const struct sja1105_info sja1110a_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -844,8 +844,8 @@ const struct sja1105_info sja1110b_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -894,8 +894,8 @@ const struct sja1105_info sja1110c_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
@@ -944,8 +944,8 @@ const struct sja1105_info sja1110d_info = {
.rxtstamp = sja1110_rxtstamp,
.txtstamp = sja1110_txtstamp,
.disable_microcontroller = sja1110_disable_microcontroller,
- .pcs_mdio_read = sja1110_pcs_mdio_read,
- .pcs_mdio_write = sja1110_pcs_mdio_write,
+ .pcs_mdio_read_c45 = sja1110_pcs_mdio_read_c45,
+ .pcs_mdio_write_c45 = sja1110_pcs_mdio_write_c45,
.port_speed = {
[SJA1105_SPEED_AUTO] = 0,
[SJA1105_SPEED_10MBPS] = 4,
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index cd4d71b83c33..c6f8f852bff1 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1275,9 +1275,6 @@ static int owl_emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
u32 data, tmp;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
data = OWL_EMAC_BIT_MAC_CSR10_SB;
data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
@@ -1305,9 +1302,6 @@ owl_emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
struct owl_emac_priv *priv = bus->priv;
u32 data, tmp;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
data = OWL_EMAC_BIT_MAC_CSR10_SB;
data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index c26b8597945b..3f316a0f4158 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -523,7 +523,6 @@ static int adin1110_register_mdiobus(struct adin1110_priv *priv,
mii_bus->priv = priv;
mii_bus->parent = dev;
mii_bus->phy_mask = ~((u32)GENMASK(2, 0));
- mii_bus->probe_capabilities = MDIOBUS_C22;
snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
ret = devm_mdiobus_register(dev, mii_bus);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index e8ad5ea31aff..d3999db7c6a2 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -597,7 +597,9 @@ static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
if (rc)
return rc;
}
+ xdp_features_set_redirect_target(netdev, false);
} else if (old_bpf_prog) {
+ xdp_features_clear_redirect_target(netdev);
rc = ena_destroy_and_free_all_xdp_queues(adapter);
if (rc)
return rc;
@@ -4103,6 +4105,8 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter,
/* Set offload features */
ena_set_dev_offloads(feat, netdev);
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+
adapter->max_mtu = feat->dev_attr.max_mtu;
netdev->max_mtu = adapter->max_mtu;
netdev->min_mtu = ENA_MIN_MTU;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 466273b22f0a..3b70f6737633 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1285,6 +1285,22 @@
#define MDIO_PMA_RX_CTRL1 0x8051
#endif
+#ifndef MDIO_PMA_RX_LSTS
+#define MDIO_PMA_RX_LSTS 0x018020
+#endif
+
+#ifndef MDIO_PMA_RX_EQ_CTRL4
+#define MDIO_PMA_RX_EQ_CTRL4 0x0001805C
+#endif
+
+#ifndef MDIO_PMA_MP_MISC_STS
+#define MDIO_PMA_MP_MISC_STS 0x0078
+#endif
+
+#ifndef MDIO_PMA_PHY_RX_EQ_CEU
+#define MDIO_PMA_PHY_RX_EQ_CEU 0x1800E
+#endif
+
#ifndef MDIO_PCS_DIG_CTRL
#define MDIO_PCS_DIG_CTRL 0x8000
#endif
@@ -1395,6 +1411,28 @@
#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
+#define XGBE_PMA_RX_SIG_DET_0_MASK BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_ENABLE BIT(4)
+#define XGBE_PMA_RX_SIG_DET_0_DISABLE 0x0000
+
+#define XGBE_PMA_RX_VALID_0_MASK BIT(12)
+#define XGBE_PMA_RX_VALID_0_ENABLE BIT(12)
+#define XGBE_PMA_RX_VALID_0_DISABLE 0x0000
+
+#define XGBE_PMA_RX_AD_REQ_MASK BIT(12)
+#define XGBE_PMA_RX_AD_REQ_ENABLE BIT(12)
+#define XGBE_PMA_RX_AD_REQ_DISABLE 0x0000
+
+#define XGBE_PMA_RX_ADPT_ACK_MASK BIT(12)
+#define XGBE_PMA_RX_ADPT_ACK BIT(12)
+
+#define XGBE_PMA_CFF_UPDTM1_VLD BIT(8)
+#define XGBE_PMA_CFF_UPDT0_VLD BIT(9)
+#define XGBE_PMA_CFF_UPDT1_VLD BIT(10)
+#define XGBE_PMA_CFF_UPDT_MASK (XGBE_PMA_CFF_UPDTM1_VLD |\
+ XGBE_PMA_CFF_UPDT0_VLD | \
+ XGBE_PMA_CFF_UPDT1_VLD)
+
#define XGBE_PMA_PLL_CTRL_MASK BIT(15)
#define XGBE_PMA_PLL_CTRL_ENABLE BIT(15)
#define XGBE_PMA_PLL_CTRL_DISABLE 0x0000
@@ -1699,20 +1737,21 @@ do { \
} while (0)
/* Macros for building, reading or writing register values or bits
- * using MDIO. Different from above because of the use of standardized
- * Linux include values. No shifting is performed with the bit
- * operations, everything works on mask values.
+ * using MDIO.
*/
+
+#define XGBE_ADDR_C45 BIT(30)
+
#define XMDIO_READ(_pdata, _mmd, _reg) \
((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
- MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
+ XGBE_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
(XMDIO_READ((_pdata), _mmd, _reg) & _mask)
#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
- MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
+ XGBE_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
do { \
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 4030d619e84f..f393228d41c7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -814,6 +814,9 @@ static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
unsigned int ss;
switch (speed) {
+ case SPEED_10:
+ ss = 0x07;
+ break;
case SPEED_1000:
ss = 0x03;
break;
@@ -1154,8 +1157,8 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address, index, offset;
int mmd_data;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1186,8 +1189,8 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
unsigned long flags;
unsigned int mmd_address, index, offset;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1217,8 +1220,8 @@ static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
int mmd_data;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1245,8 +1248,8 @@ static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
unsigned int mmd_address;
unsigned long flags;
- if (mmd_reg & MII_ADDR_C45)
- mmd_address = mmd_reg & ~MII_ADDR_C45;
+ if (mmd_reg & XGBE_ADDR_C45)
+ mmd_address = mmd_reg & ~XGBE_ADDR_C45;
else
mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
@@ -1291,11 +1294,20 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
}
}
-static unsigned int xgbe_create_mdio_sca(int port, int reg)
+static unsigned int xgbe_create_mdio_sca_c22(int port, int reg)
{
- unsigned int mdio_sca, da;
+ unsigned int mdio_sca;
+
+ mdio_sca = 0;
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
+ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
- da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
+ return mdio_sca;
+}
+
+static unsigned int xgbe_create_mdio_sca_c45(int port, unsigned int da, int reg)
+{
+ unsigned int mdio_sca;
mdio_sca = 0;
XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
@@ -1305,14 +1317,13 @@ static unsigned int xgbe_create_mdio_sca(int port, int reg)
return mdio_sca;
}
-static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
- int reg, u16 val)
+static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata,
+ unsigned int mdio_sca, u16 val)
{
- unsigned int mdio_sca, mdio_sccd;
+ unsigned int mdio_sccd;
reinit_completion(&pdata->mdio_complete);
- mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
@@ -1329,14 +1340,33 @@ static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
return 0;
}
-static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
- int reg)
+static int xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
{
- unsigned int mdio_sca, mdio_sccd;
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c22(addr, reg);
+
+ return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
+}
+
+static int xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg, u16 val)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg);
+
+ return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
+}
+
+static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata,
+ unsigned int mdio_sca)
+{
+ unsigned int mdio_sccd;
reinit_completion(&pdata->mdio_complete);
- mdio_sca = xgbe_create_mdio_sca(addr, reg);
XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
mdio_sccd = 0;
@@ -1352,6 +1382,26 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
}
+static int xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c22(addr, reg);
+
+ return xgbe_read_ext_mii_regs(pdata, mdio_sca);
+}
+
+static int xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg)
+{
+ unsigned int mdio_sca;
+
+ mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg);
+
+ return xgbe_read_ext_mii_regs(pdata, mdio_sca);
+}
+
static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
enum xgbe_mdio_mode mode)
{
@@ -3565,8 +3615,10 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
hw_if->set_speed = xgbe_set_speed;
hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
- hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
- hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
+ hw_if->read_ext_mii_regs_c22 = xgbe_read_ext_mii_regs_c22;
+ hw_if->write_ext_mii_regs_c22 = xgbe_write_ext_mii_regs_c22;
+ hw_if->read_ext_mii_regs_c45 = xgbe_read_ext_mii_regs_c45;
+ hw_if->write_ext_mii_regs_c45 = xgbe_write_ext_mii_regs_c45;
hw_if->set_gpio = xgbe_set_gpio;
hw_if->clr_gpio = xgbe_clr_gpio;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 43fdd111235a..33a9574e9e04 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -274,6 +274,15 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000);
}
+static void xgbe_sgmii_10_mode(struct xgbe_prv_data *pdata)
+{
+ /* Set MAC to 10M speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_10);
+}
+
static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
{
/* Set MAC to 1G speed */
@@ -306,6 +315,9 @@ static void xgbe_change_mode(struct xgbe_prv_data *pdata,
case XGBE_MODE_KR:
xgbe_kr_mode(pdata);
break;
+ case XGBE_MODE_SGMII_10:
+ xgbe_sgmii_10_mode(pdata);
+ break;
case XGBE_MODE_SGMII_100:
xgbe_sgmii_100_mode(pdata);
break;
@@ -1077,6 +1089,8 @@ static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
static const char *xgbe_phy_speed_string(int speed)
{
switch (speed) {
+ case SPEED_10:
+ return "10Mbps";
case SPEED_100:
return "100Mbps";
case SPEED_1000:
@@ -1164,6 +1178,7 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
case XGBE_MODE_KX_1000:
case XGBE_MODE_KX_2500:
case XGBE_MODE_KR:
+ case XGBE_MODE_SGMII_10:
case XGBE_MODE_SGMII_100:
case XGBE_MODE_SGMII_1000:
case XGBE_MODE_X:
@@ -1225,6 +1240,8 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode)
xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
} else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_10)) {
+ xgbe_set_mode(pdata, XGBE_MODE_SGMII_10);
} else {
enable_irq(pdata->an_irq);
ret = -EINVAL;
@@ -1325,6 +1342,9 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
mode = xgbe_phy_status_aneg(pdata);
switch (mode) {
+ case XGBE_MODE_SGMII_10:
+ pdata->phy.speed = SPEED_10;
+ break;
case XGBE_MODE_SGMII_100:
pdata->phy.speed = SPEED_100;
break;
@@ -1467,6 +1487,8 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
xgbe_sgmii_1000_mode(pdata);
} else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
xgbe_sgmii_100_mode(pdata);
+ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_10)) {
+ xgbe_sgmii_10_mode(pdata);
} else {
ret = -EINVAL;
goto err_irq;
@@ -1564,6 +1586,8 @@ static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata)
return SPEED_1000;
else if (XGBE_ADV(lks, 100baseT_Full))
return SPEED_100;
+ else if (XGBE_ADV(lks, 10baseT_Full))
+ return SPEED_10;
return SPEED_UNKNOWN;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index c731a04731f8..16e7fb2c0dae 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -124,6 +124,7 @@
#include "xgbe.h"
#include "xgbe-common.h"
+#define XGBE_PHY_PORT_SPEED_10 BIT(0)
#define XGBE_PHY_PORT_SPEED_100 BIT(1)
#define XGBE_PHY_PORT_SPEED_1000 BIT(2)
#define XGBE_PHY_PORT_SPEED_2500 BIT(3)
@@ -387,6 +388,10 @@ struct xgbe_phy_data {
static DEFINE_MUTEX(xgbe_phy_comm_lock);
static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
+static void xgbe_phy_rrc(struct xgbe_prv_data *pdata);
+static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ enum xgbe_mb_cmd cmd,
+ enum xgbe_mb_subcmd sub_cmd);
static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
struct xgbe_i2c_op *i2c_op)
@@ -599,20 +604,27 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
return -ETIMEDOUT;
}
-static int xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr,
- int reg, u16 val)
+static int xgbe_phy_mdio_mii_write_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg, u16 val)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
- if (reg & MII_ADDR_C45) {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
- return -ENOTSUPP;
- } else {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
- return -ENOTSUPP;
- }
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.write_ext_mii_regs_c22(pdata, addr, reg, val);
+}
- return pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val);
+static int xgbe_phy_mdio_mii_write_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg, u16 val)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.write_ext_mii_regs_c45(pdata, addr, devad,
+ reg, val);
}
static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val)
@@ -637,7 +649,8 @@ static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val)
return ret;
}
-static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val)
+static int xgbe_phy_mii_write_c22(struct mii_bus *mii, int addr, int reg,
+ u16 val)
{
struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -650,29 +663,58 @@ static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val)
if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
ret = xgbe_phy_i2c_mii_write(pdata, reg, val);
else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
- ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val);
+ ret = xgbe_phy_mdio_mii_write_c22(pdata, addr, reg, val);
else
- ret = -ENOTSUPP;
+ ret = -EOPNOTSUPP;
xgbe_phy_put_comm_ownership(pdata);
return ret;
}
-static int xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr,
- int reg)
+static int xgbe_phy_mii_write_c45(struct mii_bus *mii, int addr, int devad,
+ int reg, u16 val)
{
+ struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
- if (reg & MII_ADDR_C45) {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
- return -ENOTSUPP;
- } else {
- if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
- return -ENOTSUPP;
- }
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
- return pdata->hw_if.read_ext_mii_regs(pdata, addr, reg);
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = -EOPNOTSUPP;
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_write_c45(pdata, addr, devad, reg, val);
+ else
+ ret = -EOPNOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mdio_mii_read_c22(struct xgbe_prv_data *pdata, int addr,
+ int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.read_ext_mii_regs_c22(pdata, addr, reg);
+}
+
+static int xgbe_phy_mdio_mii_read_c45(struct xgbe_prv_data *pdata, int addr,
+ int devad, int reg)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
+ return -EOPNOTSUPP;
+
+ return pdata->hw_if.read_ext_mii_regs_c45(pdata, addr, devad, reg);
}
static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
@@ -697,7 +739,7 @@ static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
return ret;
}
-static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
+static int xgbe_phy_mii_read_c22(struct mii_bus *mii, int addr, int reg)
{
struct xgbe_prv_data *pdata = mii->priv;
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -710,7 +752,30 @@ static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
ret = xgbe_phy_i2c_mii_read(pdata, reg);
else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
- ret = xgbe_phy_mdio_mii_read(pdata, addr, reg);
+ ret = xgbe_phy_mdio_mii_read_c22(pdata, addr, reg);
+ else
+ ret = -EOPNOTSUPP;
+
+ xgbe_phy_put_comm_ownership(pdata);
+
+ return ret;
+}
+
+static int xgbe_phy_mii_read_c45(struct mii_bus *mii, int addr, int devad,
+ int reg)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ ret = xgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return ret;
+
+ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
+ ret = -EOPNOTSUPP;
+ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
+ ret = xgbe_phy_mdio_mii_read_c45(pdata, addr, devad, reg);
else
ret = -ENOTSUPP;
@@ -759,6 +824,8 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) {
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10)
+ XGBE_SET_SUP(lks, 10baseT_Full);
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
XGBE_SET_SUP(lks, 100baseT_Full);
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
@@ -1542,6 +1609,16 @@ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata)
xgbe_phy_phydev_flowctrl(pdata);
switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) {
+ case XGBE_SGMII_AN_LINK_SPEED_10:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+ XGBE_SET_LP_ADV(lks, 10baseT_Full);
+ mode = XGBE_MODE_SGMII_10;
+ } else {
+ /* Half-duplex not supported */
+ XGBE_SET_LP_ADV(lks, 10baseT_Half);
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
case XGBE_SGMII_AN_LINK_SPEED_100:
if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
XGBE_SET_LP_ADV(lks, 100baseT_Full);
@@ -1658,7 +1735,10 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
switch (phy_data->sfp_base) {
case XGBE_SFP_BASE_1000_T:
if (phy_data->phydev &&
- (phy_data->phydev->speed == SPEED_100))
+ (phy_data->phydev->speed == SPEED_10))
+ mode = XGBE_MODE_SGMII_10;
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
mode = XGBE_MODE_SGMII_100;
else
mode = XGBE_MODE_SGMII_1000;
@@ -1673,7 +1753,10 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
break;
default:
if (phy_data->phydev &&
- (phy_data->phydev->speed == SPEED_100))
+ (phy_data->phydev->speed == SPEED_10))
+ mode = XGBE_MODE_SGMII_10;
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_100))
mode = XGBE_MODE_SGMII_100;
else
mode = XGBE_MODE_SGMII_1000;
@@ -1803,6 +1886,9 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
if (phy_data->phydev &&
(phy_data->phydev->speed == SPEED_10000))
XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ else if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_2500))
+ XGBE_SET_ADV(dlks, 2500baseX_Full);
else
XGBE_SET_ADV(dlks, 1000baseKX_Full);
break;
@@ -1910,8 +1996,8 @@ static int xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata,
redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
redrv_val = (u16)mode;
- return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
- redrv_reg, redrv_val);
+ return pdata->hw_if.write_ext_mii_regs_c22(pdata, phy_data->redrv_addr,
+ redrv_reg, redrv_val);
}
static int xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata,
@@ -1956,6 +2042,93 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
xgbe_phy_put_comm_ownership(pdata);
}
+#define MAX_RX_ADAPT_RETRIES 1
+#define XGBE_PMA_RX_VAL_SIG_MASK (XGBE_PMA_RX_SIG_DET_0_MASK | \
+ XGBE_PMA_RX_VALID_0_MASK)
+
+static void xgbe_set_rx_adap_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+{
+ if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+ pdata->rx_adapt_retries = 0;
+ return;
+ }
+
+ xgbe_phy_perform_ratechange(pdata,
+ mode == XGBE_MODE_KR ?
+ XGBE_MB_CMD_SET_10G_KR :
+ XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_RX_ADAP);
+}
+
+static void xgbe_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ /* step 2: force PCS to send RX_ADAPT Req to PHY */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+ XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_ENABLE);
+
+ /* Step 3: Wait for RX_ADAPT ACK from the PHY */
+ msleep(200);
+
+ /* Software polls for coefficient update command (given by local PHY) */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_PHY_RX_EQ_CEU);
+
+ /* Clear the RX_AD_REQ bit */
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_EQ_CTRL4,
+ XGBE_PMA_RX_AD_REQ_MASK, XGBE_PMA_RX_AD_REQ_DISABLE);
+
+ /* Check if coefficient update command is set */
+ if ((reg & XGBE_PMA_CFF_UPDT_MASK) != XGBE_PMA_CFF_UPDT_MASK)
+ goto set_mode;
+
+ /* Step 4: Check for Block lock */
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS) {
+ /* If the block lock is found, update the helpers
+ * and declare the link up
+ */
+ netif_dbg(pdata, link, pdata->netdev, "Block_lock done");
+ pdata->rx_adapt_done = true;
+ pdata->mode_set = false;
+ return;
+ }
+
+set_mode:
+ xgbe_set_rx_adap_mode(pdata, phy_data->cur_mode);
+}
+
+static void xgbe_phy_rx_adaptation(struct xgbe_prv_data *pdata)
+{
+ unsigned int reg;
+
+rx_adapt_reinit:
+ reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_LSTS,
+ XGBE_PMA_RX_VAL_SIG_MASK);
+
+ /* step 1: Check for RX_VALID && LF_SIGDET */
+ if ((reg & XGBE_PMA_RX_VAL_SIG_MASK) != XGBE_PMA_RX_VAL_SIG_MASK) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "RX_VALID or LF_SIGDET is unset, issue rrc");
+ xgbe_phy_rrc(pdata);
+ if (pdata->rx_adapt_retries++ >= MAX_RX_ADAPT_RETRIES) {
+ pdata->rx_adapt_retries = 0;
+ return;
+ }
+ goto rx_adapt_reinit;
+ }
+
+ /* perform rx adaptation */
+ xgbe_rx_adaptation(pdata);
+}
+
static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
{
int reg;
@@ -2021,7 +2194,7 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
- goto reenable_pll;
+ goto do_rx_adaptation;
usleep_range(1000, 2000);
}
@@ -2031,6 +2204,20 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
/* Reset on error */
xgbe_phy_rx_reset(pdata);
+ goto reenable_pll;
+
+do_rx_adaptation:
+ if (pdata->en_rx_adap && sub_cmd == XGBE_MB_SUBCMD_RX_ADAP &&
+ (cmd == XGBE_MB_CMD_SET_10G_KR || cmd == XGBE_MB_CMD_SET_10G_SFI)) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "Enabling RX adaptation\n");
+ pdata->mode_set = true;
+ xgbe_phy_rx_adaptation(pdata);
+ /* return from here to avoid enabling PLL ctrl
+ * during adaptation phase
+ */
+ return;
+ }
reenable_pll:
/* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */
@@ -2059,6 +2246,31 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "phy powered off\n");
}
+static bool enable_rx_adap(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
+ /* Rx-Adaptation is not supported on older platforms(< 0x30H) */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ if (ver < 0x30)
+ return false;
+
+ /* Re-driver models 4223 && 4227 do not support Rx-Adaptation */
+ if (phy_data->redrv &&
+ (phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4223 ||
+ phy_data->redrv_model == XGBE_PHY_REDRV_MODEL_4227))
+ return false;
+
+ /* 10G KR mode with AN does not support Rx-Adaptation */
+ if (mode == XGBE_MODE_KR &&
+ phy_data->port_mode != XGBE_PORT_MODE_BACKPLANE_NO_AUTONEG)
+ return false;
+
+ pdata->en_rx_adap = 1;
+ return true;
+}
+
static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2067,7 +2279,12 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
/* 10G/SFI */
if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
+ pdata->en_rx_adap = 0;
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_ACTIVE);
+ } else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
+ (enable_rx_adap(pdata, XGBE_MODE_SFI))) {
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
+ XGBE_MB_SUBCMD_RX_ADAP);
} else {
if (phy_data->sfp_cable_len <= 1)
xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI,
@@ -2127,6 +2344,20 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
netif_dbg(pdata, link, pdata->netdev, "100MbE SGMII mode set\n");
}
+static void xgbe_phy_sgmii_10_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+ /* 10M/SGMII */
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_10MBITS);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_10;
+
+ netif_dbg(pdata, link, pdata->netdev, "10MbE SGMII mode set\n");
+}
+
static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
@@ -2134,7 +2365,12 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
xgbe_phy_set_redrv_mode(pdata);
/* 10G/KR */
- xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR, XGBE_MB_SUBCMD_NONE);
+ if (enable_rx_adap(pdata, XGBE_MODE_KR))
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_SUBCMD_RX_ADAP);
+ else
+ xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR,
+ XGBE_MB_SUBCMD_NONE);
phy_data->cur_mode = XGBE_MODE_KR;
@@ -2185,12 +2421,15 @@ static enum xgbe_mode xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata)
return xgbe_phy_cur_mode(pdata);
switch (xgbe_phy_cur_mode(pdata)) {
+ case XGBE_MODE_SGMII_10:
case XGBE_MODE_SGMII_100:
case XGBE_MODE_SGMII_1000:
return XGBE_MODE_KR;
+ case XGBE_MODE_KX_2500:
+ return XGBE_MODE_SGMII_1000;
case XGBE_MODE_KR:
default:
- return XGBE_MODE_SGMII_1000;
+ return XGBE_MODE_KX_2500;
}
}
@@ -2252,6 +2491,8 @@ static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data,
int speed)
{
switch (speed) {
+ case SPEED_10:
+ return XGBE_MODE_SGMII_10;
case SPEED_100:
return XGBE_MODE_SGMII_100;
case SPEED_1000:
@@ -2269,6 +2510,8 @@ static enum xgbe_mode xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data,
int speed)
{
switch (speed) {
+ case SPEED_10:
+ return XGBE_MODE_SGMII_10;
case SPEED_100:
return XGBE_MODE_SGMII_100;
case SPEED_1000:
@@ -2343,6 +2586,9 @@ static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
case XGBE_MODE_KR:
xgbe_phy_kr_mode(pdata);
break;
+ case XGBE_MODE_SGMII_10:
+ xgbe_phy_sgmii_10_mode(pdata);
+ break;
case XGBE_MODE_SGMII_100:
xgbe_phy_sgmii_100_mode(pdata);
break;
@@ -2399,6 +2645,9 @@ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
switch (mode) {
+ case XGBE_MODE_SGMII_10:
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10baseT_Full));
case XGBE_MODE_SGMII_100:
return xgbe_phy_check_mode(pdata, mode,
XGBE_ADV(lks, 100baseT_Full));
@@ -2428,6 +2677,11 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
return false;
return xgbe_phy_check_mode(pdata, mode,
XGBE_ADV(lks, 1000baseX_Full));
+ case XGBE_MODE_SGMII_10:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+ XGBE_ADV(lks, 10baseT_Full));
case XGBE_MODE_SGMII_100:
if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
return false;
@@ -2520,15 +2774,23 @@ static bool xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data,
}
}
-static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
+static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_prv_data *pdata,
int speed)
{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
switch (speed) {
+ case SPEED_10:
+ /* Supported in ver >= 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ return (ver >= 0x30) ? true : false;
case SPEED_100:
case SPEED_1000:
return true;
case SPEED_2500:
- return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T);
+ return ((phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T) ||
+ (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T));
case SPEED_10000:
return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
default:
@@ -2536,10 +2798,17 @@ static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
}
}
-static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data,
+static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_prv_data *pdata,
int speed)
{
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
switch (speed) {
+ case SPEED_10:
+ /* Supported in ver >= 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ return (ver >= 0x30) && (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000);
case SPEED_100:
return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000);
case SPEED_1000:
@@ -2586,12 +2855,12 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
case XGBE_PORT_MODE_1000BASE_T:
case XGBE_PORT_MODE_NBASE_T:
case XGBE_PORT_MODE_10GBASE_T:
- return xgbe_phy_valid_speed_baset_mode(phy_data, speed);
+ return xgbe_phy_valid_speed_baset_mode(pdata, speed);
case XGBE_PORT_MODE_1000BASE_X:
case XGBE_PORT_MODE_10GBASE_R:
return xgbe_phy_valid_speed_basex_mode(phy_data, speed);
case XGBE_PORT_MODE_SFP:
- return xgbe_phy_valid_speed_sfp_mode(phy_data, speed);
+ return xgbe_phy_valid_speed_sfp_mode(pdata, speed);
default:
return false;
}
@@ -2614,8 +2883,11 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
return 0;
}
- if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) {
+ if (pdata->en_rx_adap)
+ pdata->rx_adapt_done = false;
return 0;
+ }
}
if (phy_data->phydev) {
@@ -2637,7 +2909,29 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
*/
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
- if (reg & MDIO_STAT1_LSTATUS)
+
+ if (pdata->en_rx_adap) {
+ /* if the link is available and adaptation is done,
+ * declare link up
+ */
+ if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ return 1;
+ /* If either link is not available or adaptation is not done,
+ * retrigger the adaptation logic. (if the mode is not set,
+ * then issue mailbox command first)
+ */
+ if (pdata->mode_set) {
+ xgbe_phy_rx_adaptation(pdata);
+ } else {
+ pdata->rx_adapt_done = false;
+ xgbe_phy_set_mode(pdata, phy_data->cur_mode);
+ }
+
+ /* check again for the link and adaptation status */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done)
+ return 1;
+ } else if (reg & MDIO_STAT1_LSTATUS)
return 1;
if (pdata->phy.autoneg == AUTONEG_ENABLE &&
@@ -2862,6 +3156,12 @@ static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
{
struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int ver;
+
+ /* 10 Mbps speed is not supported in ver < 30H */
+ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
+ if (ver < 0x30 && (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10))
+ return true;
switch (phy_data->port_mode) {
case XGBE_PORT_MODE_BACKPLANE:
@@ -2875,7 +3175,8 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_1000BASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000))
return false;
break;
@@ -2884,14 +3185,17 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_NBASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500))
return false;
break;
case XGBE_PORT_MODE_10GBASE_T:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false;
break;
@@ -2900,7 +3204,8 @@ static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
return false;
break;
case XGBE_PORT_MODE_SFP:
- if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
+ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) ||
+ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
(phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
return false;
@@ -3269,6 +3574,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3299,6 +3608,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3321,6 +3634,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Pause);
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10) {
+ XGBE_SET_SUP(lks, 10baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
XGBE_SET_SUP(lks, 100baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_100;
@@ -3329,6 +3646,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, 1000baseT_Full);
phy_data->start_mode = XGBE_MODE_SGMII_1000;
}
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) {
+ XGBE_SET_SUP(lks, 2500baseT_Full);
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+ }
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
XGBE_SET_SUP(lks, 10000baseT_Full);
phy_data->start_mode = XGBE_MODE_KR;
@@ -3361,6 +3682,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
XGBE_SET_SUP(lks, Asym_Pause);
XGBE_SET_SUP(lks, TP);
XGBE_SET_SUP(lks, FIBRE);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10)
+ phy_data->start_mode = XGBE_MODE_SGMII_10;
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
phy_data->start_mode = XGBE_MODE_SGMII_100;
if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
@@ -3415,8 +3738,10 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
mii->priv = pdata;
mii->name = "amd-xgbe-mii";
- mii->read = xgbe_phy_mii_read;
- mii->write = xgbe_phy_mii_write;
+ mii->read = xgbe_phy_mii_read_c22;
+ mii->write = xgbe_phy_mii_write_c22;
+ mii->read_c45 = xgbe_phy_mii_read_c45;
+ mii->write_c45 = xgbe_phy_mii_write_c45;
mii->parent = pdata->dev;
mii->phy_mask = ~0;
snprintf(mii->id, sizeof(mii->id), "%s", dev_name(pdata->dev));
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 7a41367c437d..ad136ed493ed 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -294,6 +294,7 @@
#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define XGBE_SGMII_AN_LINK_SPEED_10 0x00
#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
@@ -595,6 +596,7 @@ enum xgbe_mode {
XGBE_MODE_KX_2500,
XGBE_MODE_KR,
XGBE_MODE_X,
+ XGBE_MODE_SGMII_10,
XGBE_MODE_SGMII_100,
XGBE_MODE_SGMII_1000,
XGBE_MODE_SFI,
@@ -623,6 +625,7 @@ enum xgbe_mb_cmd {
enum xgbe_mb_subcmd {
XGBE_MB_SUBCMD_NONE = 0,
+ XGBE_MB_SUBCMD_RX_ADAP,
/* 10GbE SFP subcommands */
XGBE_MB_SUBCMD_ACTIVE = 0,
@@ -774,8 +777,11 @@ struct xgbe_hw_if {
int (*set_ext_mii_mode)(struct xgbe_prv_data *, unsigned int,
enum xgbe_mdio_mode);
- int (*read_ext_mii_regs)(struct xgbe_prv_data *, int, int);
- int (*write_ext_mii_regs)(struct xgbe_prv_data *, int, int, u16);
+ int (*read_ext_mii_regs_c22)(struct xgbe_prv_data *, int, int);
+ int (*write_ext_mii_regs_c22)(struct xgbe_prv_data *, int, int, u16);
+ int (*read_ext_mii_regs_c45)(struct xgbe_prv_data *, int, int, int);
+ int (*write_ext_mii_regs_c45)(struct xgbe_prv_data *, int, int, int,
+ u16);
int (*set_gpio)(struct xgbe_prv_data *, unsigned int);
int (*clr_gpio)(struct xgbe_prv_data *, unsigned int);
@@ -1311,6 +1317,10 @@ struct xgbe_prv_data {
bool debugfs_an_cdr_workaround;
bool debugfs_an_cdr_track_early;
+ bool en_rx_adap;
+ int rx_adapt_retries;
+ bool rx_adapt_done;
+ bool mode_set;
};
/* Function prototypes*/
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 77609dc0a08d..0b2a52199914 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -21,6 +21,7 @@
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <linux/filter.h>
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 06508eebb585..d6d6d5d37ff3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -384,6 +384,11 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
+ self->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
}
void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index d30d11872719..306393f8eeca 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1905,7 +1905,6 @@ static void alx_remove(struct pci_dev *pdev)
free_netdev(alx->dev);
}
-#ifdef CONFIG_PM_SLEEP
static int alx_suspend(struct device *dev)
{
struct alx_priv *alx = dev_get_drvdata(dev);
@@ -1951,12 +1950,7 @@ unlock:
return err;
}
-static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
-#define ALX_PM_OPS (&alx_pm_ops)
-#else
-#define ALX_PM_OPS NULL
-#endif
-
+static DEFINE_SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
@@ -2055,7 +2049,7 @@ static struct pci_driver alx_driver = {
.probe = alx_probe,
.remove = alx_remove,
.err_handler = &alx_err_handlers,
- .driver.pm = ALX_PM_OPS,
+ .driver.pm = pm_sleep_ptr(&alx_pm_ops),
};
module_pci_driver(alx_driver);
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index f4ca0c6c0f51..948586bf1b5b 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -213,6 +213,7 @@ config BNXT
select NET_DEVLINK
select PAGE_POOL
select DIMLIB
+ select AUXILIARY_BUS
help
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index b751dc8486dc..392ec09a1d8a 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -196,28 +196,6 @@ static int b44_wait_bit(struct b44 *bp, unsigned long reg,
return 0;
}
-static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
-{
- u32 val;
-
- bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
- (index << CAM_CTRL_INDEX_SHIFT)));
-
- b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
-
- val = br32(bp, B44_CAM_DATA_LO);
-
- data[2] = (val >> 24) & 0xFF;
- data[3] = (val >> 16) & 0xFF;
- data[4] = (val >> 8) & 0xFF;
- data[5] = (val >> 0) & 0xFF;
-
- val = br32(bp, B44_CAM_DATA_HI);
-
- data[0] = (val >> 8) & 0xFF;
- data[1] = (val >> 0) & 0xFF;
-}
-
static inline void __b44_cam_write(struct b44 *bp,
const unsigned char *data, int index)
{
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 02bd3cf9a260..6e4f36aaf5db 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -240,12 +240,12 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
- if (ci->pkg == BCMA_PKG_ID_BCM47188 ||
- ci->pkg == BCMA_PKG_ID_BCM47186) {
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
}
- if (ci->pkg == BCMA_PKG_ID_BCM5358)
+ if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358)
bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
break;
case BCMA_CHIP_ID_BCM53573:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 240a7e8a7652..5d4b1f2ebeac 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2414,7 +2414,6 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
bnxt_queue_sp_work(bp);
async_event_process_exit:
- bnxt_ulp_async_events(bp, cmpl);
return 0;
}
@@ -5538,7 +5537,7 @@ vnic_mru:
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
- if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
+ if (!vnic_id && bnxt_ulp_registered(bp->edev))
req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_req_send(bp, req);
@@ -9274,10 +9273,14 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
return rc;
}
- if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
+ if (tcs && (bp->tx_nr_rings_per_tc * tcs !=
+ bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
netdev_reset_tc(bp->dev);
- bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
+ if (bp->tx_nr_rings_xdp)
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
+ else
+ bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
return -ENOMEM;
}
return 0;
@@ -13181,6 +13184,8 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
+ bnxt_rdma_aux_device_uninit(bp);
+
bnxt_ptp_clear(bp);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
@@ -13686,6 +13691,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_RX_SG;
+
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
#endif
@@ -13776,11 +13784,13 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_dl_fw_reporters_create(bp);
+ bnxt_rdma_aux_device_init(bp);
+
bnxt_print_device_info(bp);
pci_save_state(pdev);
- return 0;
+ return 0;
init_err_cleanup:
bnxt_dl_unregister(bp);
init_err_dl:
@@ -13824,7 +13834,6 @@ static void bnxt_shutdown(struct pci_dev *pdev)
if (netif_running(dev))
dev_close(dev);
- bnxt_ulp_shutdown(bp);
bnxt_clear_int_mode(bp);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 5163ef4a49ea..dcb09fbe4007 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/rhashtable.h>
#include <linux/crash_dump.h>
+#include <linux/auxiliary_bus.h>
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/xdp.h>
@@ -1631,6 +1632,12 @@ struct bnxt_fw_health {
#define BNXT_FW_IF_RETRY 10
#define BNXT_FW_SLOT_RESET_RETRY 4
+struct bnxt_aux_priv {
+ struct auxiliary_device aux_dev;
+ struct bnxt_en_dev *edev;
+ int id;
+};
+
enum board_idx {
BCM57301,
BCM57302,
@@ -1852,6 +1859,7 @@ struct bnxt {
#define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+ struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
struct bnxt_napi **bnapi;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 26913dc816d3..8b3e7697390f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -1303,7 +1303,6 @@ int bnxt_dl_register(struct bnxt *bp)
if (rc)
goto err_dl_port_unreg;
- devlink_set_features(dl, DEVLINK_F_RELOAD);
out:
devlink_register(dl);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index a4cba7cb2783..3ed3a2b3b3a9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -749,7 +749,6 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset)
*num_vfs = rc;
}
- bnxt_ulp_sriov_cfg(bp, *num_vfs);
return 0;
}
@@ -823,10 +822,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out2;
rc = pci_enable_sriov(bp->pdev, *num_vfs);
- if (rc) {
- bnxt_ulp_sriov_cfg(bp, 0);
+ if (rc)
goto err_out2;
- }
return 0;
@@ -872,8 +869,6 @@ void bnxt_sriov_disable(struct bnxt *bp)
rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
rtnl_unlock();
-
- bnxt_ulp_sriov_cfg(bp, 0);
}
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 2e54bf4fc7a7..d4cc9c371e7b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -19,89 +19,26 @@
#include <linux/irq.h>
#include <asm/byteorder.h>
#include <linux/bitmap.h>
+#include <linux/auxiliary_bus.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_ulp.h"
-static int bnxt_register_dev(struct bnxt_en_dev *edev, unsigned int ulp_id,
- struct bnxt_ulp_ops *ulp_ops, void *handle)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_ulp *ulp;
-
- ASSERT_RTNL();
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
- if (rcu_access_pointer(ulp->ulp_ops)) {
- netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
- return -EBUSY;
- }
- if (ulp_id == BNXT_ROCE_ULP) {
- unsigned int max_stat_ctxs;
-
- max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
- if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
- bp->cp_nr_rings == max_stat_ctxs)
- return -ENOMEM;
- }
-
- atomic_set(&ulp->ref_count, 0);
- ulp->handle = handle;
- rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
-
- if (ulp_id == BNXT_ROCE_ULP) {
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_hwrm_vnic_cfg(bp, 0);
- }
-
- return 0;
-}
-
-static int bnxt_unregister_dev(struct bnxt_en_dev *edev, unsigned int ulp_id)
-{
- struct net_device *dev = edev->net;
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_ulp *ulp;
- int i = 0;
-
- ASSERT_RTNL();
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
- if (!rcu_access_pointer(ulp->ulp_ops)) {
- netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
- return -EINVAL;
- }
- if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
- edev->en_ops->bnxt_free_msix(edev, ulp_id);
-
- if (ulp->max_async_event_id)
- bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
-
- RCU_INIT_POINTER(ulp->ulp_ops, NULL);
- synchronize_rcu();
- ulp->max_async_event_id = 0;
- ulp->async_events_bmap = NULL;
- while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
- msleep(100);
- i++;
- }
- return 0;
-}
+static DEFINE_IDA(bnxt_aux_dev_ids);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
int num_msix, idx, i;
- num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
- idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ if (!edev->ulp_tbl->msix_requested) {
+ netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
+ return;
+ }
+ num_msix = edev->ulp_tbl->msix_requested;
+ idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
@@ -115,125 +52,95 @@ static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
}
}
-static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id,
- struct bnxt_msix_entry *ent, int num_msix)
+int bnxt_register_dev(struct bnxt_en_dev *edev,
+ struct bnxt_ulp_ops *ulp_ops,
+ void *handle)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
- struct bnxt_hw_resc *hw_resc;
- int max_idx, max_cp_rings;
- int avail_msix, idx;
- int total_vecs;
- int rc = 0;
-
- ASSERT_RTNL();
- if (ulp_id != BNXT_ROCE_ULP)
- return -EINVAL;
-
- if (!(bp->flags & BNXT_FLAG_USING_MSIX))
- return -ENODEV;
+ unsigned int max_stat_ctxs;
+ struct bnxt_ulp *ulp;
- if (edev->ulp_tbl[ulp_id].msix_requested)
- return -EAGAIN;
+ max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
+ if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
+ bp->cp_nr_rings == max_stat_ctxs)
+ return -ENOMEM;
- max_cp_rings = bnxt_get_max_func_cp_rings(bp);
- avail_msix = bnxt_get_avail_msix(bp, num_msix);
- if (!avail_msix)
+ ulp = edev->ulp_tbl;
+ if (!ulp)
return -ENOMEM;
- if (avail_msix > num_msix)
- avail_msix = num_msix;
-
- if (BNXT_NEW_RM(bp)) {
- idx = bp->cp_nr_rings;
- } else {
- max_idx = min_t(int, bp->total_irqs, max_cp_rings);
- idx = max_idx - avail_msix;
- }
- edev->ulp_tbl[ulp_id].msix_base = idx;
- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
- hw_resc = &bp->hw_resc;
- total_vecs = idx + avail_msix;
- if (bp->total_irqs < total_vecs ||
- (BNXT_NEW_RM(bp) && hw_resc->resv_irqs < total_vecs)) {
- if (netif_running(dev)) {
- bnxt_close_nic(bp, true, false);
- rc = bnxt_open_nic(bp, true, false);
- } else {
- rc = bnxt_reserve_rings(bp, true);
- }
- }
- if (rc) {
- edev->ulp_tbl[ulp_id].msix_requested = 0;
- return -EAGAIN;
- }
- if (BNXT_NEW_RM(bp)) {
- int resv_msix;
+ ulp->handle = handle;
+ rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
+
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_hwrm_vnic_cfg(bp, 0);
- resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
- avail_msix = min_t(int, resv_msix, avail_msix);
- edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
- }
- bnxt_fill_msix_vecs(bp, ent);
+ bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
- return avail_msix;
+ return 0;
}
+EXPORT_SYMBOL(bnxt_register_dev);
-static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, unsigned int ulp_id)
+void bnxt_unregister_dev(struct bnxt_en_dev *edev)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ulp *ulp;
+ int i = 0;
- ASSERT_RTNL();
- if (ulp_id != BNXT_ROCE_ULP)
- return -EINVAL;
+ ulp = edev->ulp_tbl;
+ if (ulp->msix_requested)
+ edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
- return 0;
+ if (ulp->max_async_event_id)
+ bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
- edev->ulp_tbl[ulp_id].msix_requested = 0;
- edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
- if (netif_running(dev) && !(edev->flags & BNXT_EN_FLAG_ULP_STOPPED)) {
- bnxt_close_nic(bp, true, false);
- bnxt_open_nic(bp, true, false);
+ RCU_INIT_POINTER(ulp->ulp_ops, NULL);
+ synchronize_rcu();
+ ulp->max_async_event_id = 0;
+ ulp->async_events_bmap = NULL;
+ while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
+ msleep(100);
+ i++;
}
- return 0;
+ return;
}
+EXPORT_SYMBOL(bnxt_unregister_dev);
int bnxt_get_ulp_msix_num(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_en_dev *edev = bp->edev;
+ u32 roce_msix = BNXT_VF(bp) ?
+ BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
- return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
- }
- return 0;
+ return ((bp->flags & BNXT_FLAG_ROCE_CAP) ?
+ min_t(u32, roce_msix, num_online_cpus()) : 0);
}
int bnxt_get_ulp_msix_base(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_en_dev *edev = bp->edev;
- if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
- return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ if (edev->ulp_tbl->msix_requested)
+ return edev->ulp_tbl->msix_base;
}
return 0;
}
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
{
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ if (bnxt_ulp_registered(bp->edev)) {
struct bnxt_en_dev *edev = bp->edev;
- if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ if (edev->ulp_tbl->msix_requested)
return BNXT_MIN_ROCE_STAT_CTXS;
}
return 0;
}
-static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
+int bnxt_send_msg(struct bnxt_en_dev *edev,
struct bnxt_fw_msg *fw_msg)
{
struct net_device *dev = edev->net;
@@ -243,7 +150,7 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
u32 resp_len;
int rc;
- if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state)
+ if (bp->fw_reset_state)
return -EBUSY;
rc = hwrm_req_init(bp, req, 0 /* don't care */);
@@ -267,42 +174,36 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, unsigned int ulp_id,
hwrm_req_drop(bp, req);
return rc;
}
-
-static void bnxt_ulp_get(struct bnxt_ulp *ulp)
-{
- atomic_inc(&ulp->ref_count);
-}
-
-static void bnxt_ulp_put(struct bnxt_ulp *ulp)
-{
- atomic_dec(&ulp->ref_count);
-}
+EXPORT_SYMBOL(bnxt_send_msg);
void bnxt_ulp_stop(struct bnxt *bp)
{
+ struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
if (!edev)
return;
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+ if (aux_priv) {
+ struct auxiliary_device *adev;
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_stop)
- continue;
- ops->ulp_stop(ulp->handle);
+ adev = &aux_priv->aux_dev;
+ if (adev->dev.driver) {
+ struct auxiliary_driver *adrv;
+ pm_message_t pm = {};
+
+ adrv = to_auxiliary_drv(adev->dev.driver);
+ edev->en_state = bp->state;
+ adrv->suspend(adev, pm);
+ }
}
}
void bnxt_ulp_start(struct bnxt *bp, int err)
{
+ struct bnxt_aux_priv *aux_priv = bp->aux_priv;
struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
if (!edev)
return;
@@ -312,58 +213,19 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_start)
- continue;
- ops->ulp_start(ulp->handle);
- }
-}
-
-void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
-{
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
+ if (aux_priv) {
+ struct auxiliary_device *adev;
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+ adev = &aux_priv->aux_dev;
+ if (adev->dev.driver) {
+ struct auxiliary_driver *adrv;
- rcu_read_lock();
- ops = rcu_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_sriov_config) {
- rcu_read_unlock();
- continue;
+ adrv = to_auxiliary_drv(adev->dev.driver);
+ edev->en_state = bp->state;
+ adrv->resume(adev);
}
- bnxt_ulp_get(ulp);
- rcu_read_unlock();
- ops->ulp_sriov_config(ulp->handle, num_vfs);
- bnxt_ulp_put(ulp);
}
-}
-void bnxt_ulp_shutdown(struct bnxt *bp)
-{
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
-
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rtnl_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_shutdown)
- continue;
- ops->ulp_shutdown(ulp->handle);
- }
}
void bnxt_ulp_irq_stop(struct bnxt *bp)
@@ -374,8 +236,8 @@ void bnxt_ulp_irq_stop(struct bnxt *bp)
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+ if (bnxt_ulp_registered(bp->edev)) {
+ struct bnxt_ulp *ulp = edev->ulp_tbl;
if (!ulp->msix_requested)
return;
@@ -395,8 +257,8 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
return;
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+ if (bnxt_ulp_registered(bp->edev)) {
+ struct bnxt_ulp *ulp = edev->ulp_tbl;
struct bnxt_msix_entry *ent = NULL;
if (!ulp->msix_requested)
@@ -418,46 +280,15 @@ void bnxt_ulp_irq_restart(struct bnxt *bp, int err)
}
}
-void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
-{
- u16 event_id = le16_to_cpu(cmpl->event_id);
- struct bnxt_en_dev *edev = bp->edev;
- struct bnxt_ulp_ops *ops;
- int i;
-
- if (!edev)
- return;
-
- rcu_read_lock();
- for (i = 0; i < BNXT_MAX_ULP; i++) {
- struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
-
- ops = rcu_dereference(ulp->ulp_ops);
- if (!ops || !ops->ulp_async_notifier)
- continue;
- if (!ulp->async_events_bmap ||
- event_id > ulp->max_async_event_id)
- continue;
-
- /* Read max_async_event_id first before testing the bitmap. */
- smp_rmb();
- if (test_bit(event_id, ulp->async_events_bmap))
- ops->ulp_async_notifier(ulp->handle, cmpl);
- }
- rcu_read_unlock();
-}
-
-static int bnxt_register_async_events(struct bnxt_en_dev *edev, unsigned int ulp_id,
- unsigned long *events_bmap, u16 max_id)
+int bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap,
+ u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
- if (ulp_id >= BNXT_MAX_ULP)
- return -EINVAL;
-
- ulp = &edev->ulp_tbl[ulp_id];
+ ulp = edev->ulp_tbl;
ulp->async_events_bmap = events_bmap;
/* Make sure bnxt_ulp_async_events() sees this order */
smp_wmb();
@@ -465,38 +296,121 @@ static int bnxt_register_async_events(struct bnxt_en_dev *edev, unsigned int ulp
bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
return 0;
}
+EXPORT_SYMBOL(bnxt_register_async_events);
-static const struct bnxt_en_ops bnxt_en_ops_tbl = {
- .bnxt_register_device = bnxt_register_dev,
- .bnxt_unregister_device = bnxt_unregister_dev,
- .bnxt_request_msix = bnxt_req_msix_vecs,
- .bnxt_free_msix = bnxt_free_msix_vecs,
- .bnxt_send_fw_msg = bnxt_send_msg,
- .bnxt_register_fw_async_events = bnxt_register_async_events,
-};
+void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
+{
+ struct bnxt_aux_priv *aux_priv;
+ struct auxiliary_device *adev;
+
+ /* Skip if no auxiliary device init was done. */
+ if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+ return;
+
+ aux_priv = bp->aux_priv;
+ adev = &aux_priv->aux_dev;
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+}
-struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
+static void bnxt_aux_dev_release(struct device *dev)
{
- struct bnxt *bp = netdev_priv(dev);
- struct bnxt_en_dev *edev;
+ struct bnxt_aux_priv *aux_priv =
+ container_of(dev, struct bnxt_aux_priv, aux_dev.dev);
+
+ ida_free(&bnxt_aux_dev_ids, aux_priv->id);
+ kfree(aux_priv->edev->ulp_tbl);
+ kfree(aux_priv->edev);
+ kfree(aux_priv);
+}
+
+static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
+{
+ edev->net = bp->dev;
+ edev->pdev = bp->pdev;
+ edev->l2_db_size = bp->db_size;
+ edev->l2_db_size_nc = bp->db_size;
- edev = bp->edev;
- if (!edev) {
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev)
- return ERR_PTR(-ENOMEM);
- edev->en_ops = &bnxt_en_ops_tbl;
- edev->net = dev;
- edev->pdev = bp->pdev;
- edev->l2_db_size = bp->db_size;
- edev->l2_db_size_nc = bp->db_size;
- bp->edev = edev;
- }
- edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP;
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
- return bp->edev;
+ if (bp->flags & BNXT_FLAG_VF)
+ edev->flags |= BNXT_EN_FLAG_VF;
+
+ edev->chip_num = bp->chip_num;
+ edev->hw_ring_stats_size = bp->hw_ring_stats_size;
+ edev->pf_port_id = bp->pf.port_id;
+ edev->en_state = bp->state;
+
+ edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
+}
+
+void bnxt_rdma_aux_device_init(struct bnxt *bp)
+{
+ struct auxiliary_device *aux_dev;
+ struct bnxt_aux_priv *aux_priv;
+ struct bnxt_en_dev *edev;
+ struct bnxt_ulp *ulp;
+ int rc;
+
+ if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
+ return;
+
+ bp->aux_priv = kzalloc(sizeof(*bp->aux_priv), GFP_KERNEL);
+ if (!bp->aux_priv)
+ goto exit;
+
+ bp->aux_priv->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
+ if (bp->aux_priv->id < 0) {
+ netdev_warn(bp->dev,
+ "ida alloc failed for ROCE auxiliary device\n");
+ kfree(bp->aux_priv);
+ goto exit;
+ }
+
+ aux_priv = bp->aux_priv;
+ aux_dev = &aux_priv->aux_dev;
+ aux_dev->id = aux_priv->id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &bp->pdev->dev;
+ aux_dev->dev.release = bnxt_aux_dev_release;
+
+ rc = auxiliary_device_init(aux_dev);
+ if (rc) {
+ ida_free(&bnxt_aux_dev_ids, bp->aux_priv->id);
+ kfree(bp->aux_priv);
+ goto exit;
+ }
+
+ /* From this point, all cleanup will happen via the .release callback &
+ * any error unwinding will need to include a call to
+ * auxiliary_device_uninit.
+ */
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ goto aux_dev_uninit;
+
+ ulp = kzalloc(sizeof(*ulp), GFP_KERNEL);
+ if (!ulp)
+ goto aux_dev_uninit;
+
+ edev->ulp_tbl = ulp;
+ aux_priv->edev = edev;
+ bp->edev = edev;
+ bnxt_set_edev_info(edev, bp);
+
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ netdev_warn(bp->dev,
+ "Failed to add auxiliary device for ROCE\n");
+ goto aux_dev_uninit;
+ }
+
+ return;
+
+aux_dev_uninit:
+ auxiliary_device_uninit(aux_dev);
+exit:
+ bp->flags &= ~BNXT_FLAG_ROCE_CAP;
}
-EXPORT_SYMBOL(bnxt_ulp_probe);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 42b50abc3e91..80cbc4b6130a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -15,6 +15,8 @@
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
+#define BNXT_MAX_ROCE_MSIX 9
+#define BNXT_MAX_VF_ROCE_MSIX 2
struct hwrm_async_event_cmpl;
struct bnxt;
@@ -26,12 +28,6 @@ struct bnxt_msix_entry {
};
struct bnxt_ulp_ops {
- /* async_notifier() cannot sleep (in BH context) */
- void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
- void (*ulp_stop)(void *);
- void (*ulp_start)(void *);
- void (*ulp_sriov_config)(void *, int);
- void (*ulp_shutdown)(void *);
void (*ulp_irq_stop)(void *);
void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
};
@@ -57,6 +53,7 @@ struct bnxt_ulp {
struct bnxt_en_dev {
struct net_device *net;
struct pci_dev *pdev;
+ struct bnxt_msix_entry msix_entries[BNXT_MAX_ROCE_MSIX];
u32 flags;
#define BNXT_EN_FLAG_ROCEV1_CAP 0x1
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
@@ -64,8 +61,10 @@ struct bnxt_en_dev {
BNXT_EN_FLAG_ROCEV2_CAP)
#define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
#define BNXT_EN_FLAG_ULP_STOPPED 0x8
- const struct bnxt_en_ops *en_ops;
- struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ #define BNXT_EN_FLAG_VF 0x10
+#define BNXT_EN_VF(edev) ((edev)->flags & BNXT_EN_FLAG_VF)
+
+ struct bnxt_ulp *ulp_tbl;
int l2_db_size; /* Doorbell BAR size in
* bytes mapped by L2
* driver.
@@ -74,24 +73,19 @@ struct bnxt_en_dev {
* bytes mapped as non-
* cacheable.
*/
+ u16 chip_num;
+ u16 hw_ring_stats_size;
+ u16 pf_port_id;
+ unsigned long en_state; /* Could be checked in
+ * RoCE driver suspend
+ * mode only. Will be
+ * updated in resume.
+ */
};
-struct bnxt_en_ops {
- int (*bnxt_register_device)(struct bnxt_en_dev *, unsigned int,
- struct bnxt_ulp_ops *, void *);
- int (*bnxt_unregister_device)(struct bnxt_en_dev *, unsigned int);
- int (*bnxt_request_msix)(struct bnxt_en_dev *, unsigned int,
- struct bnxt_msix_entry *, int);
- int (*bnxt_free_msix)(struct bnxt_en_dev *, unsigned int);
- int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, unsigned int,
- struct bnxt_fw_msg *);
- int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, unsigned int,
- unsigned long *, u16);
-};
-
-static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
+static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
{
- if (edev && rcu_access_pointer(edev->ulp_tbl[ulp_id].ulp_ops))
+ if (edev && edev->ulp_tbl)
return true;
return false;
}
@@ -102,10 +96,15 @@ int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
-void bnxt_ulp_shutdown(struct bnxt *bp);
void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
-struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
-
+void bnxt_rdma_aux_device_uninit(struct bnxt *bp);
+void bnxt_rdma_aux_device_init(struct bnxt *bp);
+int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
+ void *handle);
+void bnxt_unregister_dev(struct bnxt_en_dev *edev);
+int bnxt_send_msg(struct bnxt_en_dev *edev, struct bnxt_fw_msg *fw_msg);
+int bnxt_register_async_events(struct bnxt_en_dev *edev,
+ unsigned long *events_bmap, u16 max_id);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 36d5202c0aee..5843c93b1711 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -422,9 +422,11 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
if (prog) {
bnxt_set_rx_skb_mode(bp, true);
+ xdp_features_set_redirect_target(dev, true);
} else {
int rx, tx;
+ xdp_features_clear_redirect_target(dev);
bnxt_set_rx_skb_mode(bp, false);
bnxt_get_max_rings(bp, &rx, &tx, true);
if (rx > 1) {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 21973046b12b..d937daa8ee88 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2316,6 +2316,14 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
__func__, p_index, ring->c_index,
ring->read_ptr, dma_length_status);
+ if (unlikely(len > RX_BUF_LENGTH)) {
+ netif_err(priv, rx_status, dev, "oversized packet\n");
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
netif_err(priv, rx_status, dev,
"dropping fragmented packet!\n");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index f55d9d9c01a8..3a4b6cb7b7b9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -77,14 +77,18 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts) {
device_set_wakeup_enable(kdev, 1);
/* Avoid unbalanced enable_irq_wake calls */
- if (priv->wol_irq_disabled)
+ if (priv->wol_irq_disabled) {
enable_irq_wake(priv->wol_irq);
+ enable_irq_wake(priv->irq0);
+ }
priv->wol_irq_disabled = false;
} else {
device_set_wakeup_enable(kdev, 0);
/* Avoid unbalanced disable_irq_wake calls */
- if (!priv->wol_irq_disabled)
+ if (!priv->wol_irq_disabled) {
disable_irq_wake(priv->wol_irq);
+ disable_irq_wake(priv->irq0);
+ }
priv->wol_irq_disabled = true;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index b615176338b2..be042905ada2 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -176,15 +176,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
- u32 reg;
-
- if (!GENET_IS_V5(priv)) {
- /* Speed settings are set in bcmgenet_mii_setup() */
- reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
- reg |= LED_ACT_SOURCE_MAC;
- bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
- }
-
if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
fixed_phy_set_link_update(priv->dev->phydev,
bcmgenet_fixed_phy_link_update);
@@ -217,6 +208,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
if (!phy_name) {
phy_name = "MoCA";
+ if (!GENET_IS_V5(priv))
+ port_ctrl |= LED_ACT_SOURCE_MAC;
bcmgenet_moca_phy_setup(priv);
}
break;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 9c410f93a103..14dfec4db8f9 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -768,8 +768,6 @@
#define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4)
#define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
-#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
-
/* Conditional GEM/MACB macros. These perform the operation to the correct
* register dependent on whether the device is a GEM or a MACB. For registers
* and bitfields that are common across both devices, use macb_{read,write}l
@@ -819,11 +817,6 @@ struct macb_dma_desc_ptp {
u32 ts_1;
u32 ts_2;
};
-
-struct gem_tx_ts {
- struct sk_buff *skb;
- struct macb_dma_desc_ptp desc_ptp;
-};
#endif
/* DMA descriptor bitfields */
@@ -1224,12 +1217,6 @@ struct macb_queue {
void *rx_buffers;
struct napi_struct napi_rx;
struct queue_stats stats;
-
-#ifdef CONFIG_MACB_USE_HWSTAMP
- struct work_struct tx_ts_task;
- unsigned int tx_ts_head, tx_ts_tail;
- struct gem_tx_ts tx_timestamps[PTP_TS_BUFFER_SIZE];
-#endif
};
struct ethtool_rx_fs_item {
@@ -1340,14 +1327,14 @@ enum macb_bd_control {
void gem_ptp_init(struct net_device *ndev);
void gem_ptp_remove(struct net_device *ndev);
-int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *des);
+void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc);
-static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
+static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
{
- if (queue->bp->tstamp_config.tx_type == TSTAMP_DISABLED)
- return -ENOTSUPP;
+ if (bp->tstamp_config.tx_type == TSTAMP_DISABLED)
+ return;
- return gem_ptp_txstamp(queue, skb, desc);
+ gem_ptp_txstamp(bp, skb, desc);
}
static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc)
@@ -1363,11 +1350,7 @@ int gem_set_hwtst(struct net_device *dev, struct ifreq *ifr, int cmd);
static inline void gem_ptp_init(struct net_device *ndev) { }
static inline void gem_ptp_remove(struct net_device *ndev) { }
-static inline int gem_ptp_do_txstamp(struct macb_queue *queue, struct sk_buff *skb, struct macb_dma_desc *desc)
-{
- return -1;
-}
-
+static inline void gem_ptp_do_txstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
static inline void gem_ptp_do_rxstamp(struct macb *bp, struct sk_buff *skb, struct macb_dma_desc *desc) { }
#endif
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 72e42820713d..6e141a8bbf43 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -334,7 +334,7 @@ static int macb_mdio_wait_for_idle(struct macb *bp)
1, MACB_MDIO_TIMEOUT);
}
-static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
{
struct macb *bp = bus->priv;
int status;
@@ -347,35 +347,62 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (status < 0)
goto mdio_read_exit;
- if (regnum & MII_ADDR_C45) {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_ADDR)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(DATA, regnum & 0xFFFF)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
-
- status = macb_mdio_wait_for_idle(bp);
- if (status < 0)
- goto mdio_read_exit;
-
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_READ)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
- } else {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
- | MACB_BF(RW, MACB_MAN_C22_READ)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_C22_CODE)));
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
+
+mdio_read_exit:
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
+mdio_pm_exit:
+ return status;
+}
+
+static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad,
+ int regnum)
+{
+ struct macb *bp = bus->priv;
+ int status;
+
+ status = pm_runtime_get_sync(&bp->pdev->dev);
+ if (status < 0) {
+ pm_runtime_put_noidle(&bp->pdev->dev);
+ goto mdio_pm_exit;
}
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
goto mdio_read_exit;
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_READ)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_read_exit;
+
status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
mdio_read_exit:
@@ -385,8 +412,8 @@ mdio_pm_exit:
return status;
}
-static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
+static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
{
struct macb *bp = bus->priv;
int status;
@@ -399,37 +426,63 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (status < 0)
goto mdio_write_exit;
- if (regnum & MII_ADDR_C45) {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_ADDR)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(DATA, regnum & 0xFFFF)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)));
-
- status = macb_mdio_wait_for_idle(bp);
- if (status < 0)
- goto mdio_write_exit;
-
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
- | MACB_BF(RW, MACB_MAN_C45_WRITE)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, (regnum >> 16) & 0x1F)
- | MACB_BF(CODE, MACB_MAN_C45_CODE)
- | MACB_BF(DATA, value)));
- } else {
- macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
- | MACB_BF(RW, MACB_MAN_C22_WRITE)
- | MACB_BF(PHYA, mii_id)
- | MACB_BF(REGA, regnum)
- | MACB_BF(CODE, MACB_MAN_C22_CODE)
- | MACB_BF(DATA, value)));
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
+ | MACB_BF(RW, MACB_MAN_C22_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, regnum)
+ | MACB_BF(CODE, MACB_MAN_C22_CODE)
+ | MACB_BF(DATA, value)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+mdio_write_exit:
+ pm_runtime_mark_last_busy(&bp->pdev->dev);
+ pm_runtime_put_autosuspend(&bp->pdev->dev);
+mdio_pm_exit:
+ return status;
+}
+
+static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum,
+ u16 value)
+{
+ struct macb *bp = bus->priv;
+ int status;
+
+ status = pm_runtime_get_sync(&bp->pdev->dev);
+ if (status < 0) {
+ pm_runtime_put_noidle(&bp->pdev->dev);
+ goto mdio_pm_exit;
}
status = macb_mdio_wait_for_idle(bp);
if (status < 0)
goto mdio_write_exit;
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_ADDR)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(DATA, regnum & 0xFFFF)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
+ macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
+ | MACB_BF(RW, MACB_MAN_C45_WRITE)
+ | MACB_BF(PHYA, mii_id)
+ | MACB_BF(REGA, devad & 0x1F)
+ | MACB_BF(CODE, MACB_MAN_C45_CODE)
+ | MACB_BF(DATA, value)));
+
+ status = macb_mdio_wait_for_idle(bp);
+ if (status < 0)
+ goto mdio_write_exit;
+
mdio_write_exit:
pm_runtime_mark_last_busy(&bp->pdev->dev);
pm_runtime_put_autosuspend(&bp->pdev->dev);
@@ -902,8 +955,10 @@ static int macb_mii_init(struct macb *bp)
}
bp->mii_bus->name = "MACB_mii_bus";
- bp->mii_bus->read = &macb_mdio_read;
- bp->mii_bus->write = &macb_mdio_write;
+ bp->mii_bus->read = &macb_mdio_read_c22;
+ bp->mii_bus->write = &macb_mdio_write_c22;
+ bp->mii_bus->read_c45 = &macb_mdio_read_c45;
+ bp->mii_bus->write_c45 = &macb_mdio_write_c45;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
@@ -1191,13 +1246,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
/* First, update TX stats if needed */
if (skb) {
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- !ptp_one_step_sync(skb) &&
- gem_ptp_do_txstamp(queue, skb, desc) == 0) {
- /* skb now belongs to timestamp buffer
- * and will be removed later
- */
- tx_skb->skb = NULL;
- }
+ !ptp_one_step_sync(skb))
+ gem_ptp_do_txstamp(bp, skb, desc);
+
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(bp, tail),
skb->data);
@@ -2253,6 +2304,12 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return ret;
}
+#ifdef CONFIG_MACB_USE_HWSTAMP
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ (bp->hw_dma_cap & HW_DMA_CAP_PTP))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+#endif
+
is_lso = (skb_shinfo(skb)->gso_size != 0);
if (is_lso) {
@@ -4627,25 +4684,26 @@ static int init_reset_optional(struct platform_device *pdev)
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to init SGMII PHY\n");
- }
- ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
- if (!ret) {
- u32 pm_info[2];
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+ if (!ret) {
+ u32 pm_info[2];
- ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
- pm_info, ARRAY_SIZE(pm_info));
- if (ret) {
- dev_err(&pdev->dev, "Failed to read power management information\n");
- goto err_out_phy_exit;
+ ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+ pm_info, ARRAY_SIZE(pm_info));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read power management information\n");
+ goto err_out_phy_exit;
+ }
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+ if (ret)
+ goto err_out_phy_exit;
+
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+ if (ret)
+ goto err_out_phy_exit;
}
- ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
- if (ret)
- goto err_out_phy_exit;
- ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
- if (ret)
- goto err_out_phy_exit;
}
/* Fully reset controller at hardware level if mapped in device tree */
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index e6cb20aaa76a..f962a95068a0 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -292,79 +292,39 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb,
}
}
-static void gem_tstamp_tx(struct macb *bp, struct sk_buff *skb,
- struct macb_dma_desc_ptp *desc_ptp)
+void gem_ptp_txstamp(struct macb *bp, struct sk_buff *skb,
+ struct macb_dma_desc *desc)
{
struct skb_shared_hwtstamps shhwtstamps;
- struct timespec64 ts;
-
- gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb, &shhwtstamps);
-}
-
-int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb,
- struct macb_dma_desc *desc)
-{
- unsigned long tail = READ_ONCE(queue->tx_ts_tail);
- unsigned long head = queue->tx_ts_head;
struct macb_dma_desc_ptp *desc_ptp;
- struct gem_tx_ts *tx_timestamp;
-
- if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl))
- return -EINVAL;
+ struct timespec64 ts;
- if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0)
- return -ENOMEM;
+ if (!GEM_BFEXT(DMA_TXVALID, desc->ctrl)) {
+ dev_warn_ratelimited(&bp->pdev->dev,
+ "Timestamp not set in TX BD as expected\n");
+ return;
+ }
- desc_ptp = macb_ptp_desc(queue->bp, desc);
+ desc_ptp = macb_ptp_desc(bp, desc);
/* Unlikely but check */
- if (!desc_ptp)
- return -EINVAL;
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- tx_timestamp = &queue->tx_timestamps[head];
- tx_timestamp->skb = skb;
+ if (!desc_ptp) {
+ dev_warn_ratelimited(&bp->pdev->dev,
+ "Timestamp not supported in BD\n");
+ return;
+ }
+
/* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */
dma_rmb();
- tx_timestamp->desc_ptp.ts_1 = desc_ptp->ts_1;
- tx_timestamp->desc_ptp.ts_2 = desc_ptp->ts_2;
- /* move head */
- smp_store_release(&queue->tx_ts_head,
- (head + 1) & (PTP_TS_BUFFER_SIZE - 1));
-
- schedule_work(&queue->tx_ts_task);
- return 0;
-}
+ gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts);
-static void gem_tx_timestamp_flush(struct work_struct *work)
-{
- struct macb_queue *queue =
- container_of(work, struct macb_queue, tx_ts_task);
- unsigned long head, tail;
- struct gem_tx_ts *tx_ts;
-
- /* take current head */
- head = smp_load_acquire(&queue->tx_ts_head);
- tail = queue->tx_ts_tail;
-
- while (CIRC_CNT(head, tail, PTP_TS_BUFFER_SIZE)) {
- tx_ts = &queue->tx_timestamps[tail];
- gem_tstamp_tx(queue->bp, tx_ts->skb, &tx_ts->desc_ptp);
- /* cleanup */
- dev_kfree_skb_any(tx_ts->skb);
- /* remove old tail */
- smp_store_release(&queue->tx_ts_tail,
- (tail + 1) & (PTP_TS_BUFFER_SIZE - 1));
- tail = queue->tx_ts_tail;
- }
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
}
void gem_ptp_init(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct macb_queue *queue;
- unsigned int q;
bp->ptp_clock_info = gem_ptp_caps_template;
@@ -384,11 +344,6 @@ void gem_ptp_init(struct net_device *dev)
}
spin_lock_init(&bp->tsu_clk_lock);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue->tx_ts_head = 0;
- queue->tx_ts_tail = 0;
- INIT_WORK(&queue->tx_ts_task, gem_tx_timestamp_flush);
- }
gem_ptp_init_tsu(bp);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index f2f95493ec89..8b25313c7f6b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -2218,6 +2218,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+
/* MTU range: 64 - 9200 */
netdev->min_mtu = NIC_HW_MIN_FRS;
netdev->max_mtu = NIC_HW_MAX_FRS;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 5657ac8cfca0..fca9533bc011 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1079,8 +1079,6 @@ struct mbox_list {
#if IS_ENABLED(CONFIG_THERMAL)
struct ch_thermal {
struct thermal_zone_device *tzdev;
- int trip_temp;
- int trip_type;
};
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 9cbce1faab26..7db2403c4c9c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6490,21 +6490,21 @@ static const struct tlsdev_ops cxgb4_ktls_ops = {
#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
-static int cxgb4_xfrm_add_state(struct xfrm_state *x)
+static int cxgb4_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct adapter *adap = netdev2adap(x->xso.dev);
int ret;
if (!mutex_trylock(&uld_mutex)) {
- dev_dbg(adap->pdev_dev,
- "crypto uld critical resource is under use\n");
+ NL_SET_ERR_MSG_MOD(extack, "crypto uld critical resource is under use");
return -EBUSY;
}
ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
if (ret)
goto out_unlock;
- ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x, extack);
out_unlock:
mutex_unlock(&uld_mutex);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
index be96f1dc0372..d4a862a9fd7d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -4,7 +4,7 @@
#ifndef __CXGB4_TC_MQPRIO_H__
#define __CXGB4_TC_MQPRIO_H__
-#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#define CXGB4_EOSW_TXQ_DEFAULT_DESC_NUM 128
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
index 9a6d65243334..95e1b415ba13 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_thermal.c
@@ -29,36 +29,12 @@ static int cxgb4_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static int cxgb4_thermal_get_trip_type(struct thermal_zone_device *tzdev,
- int trip, enum thermal_trip_type *type)
-{
- struct adapter *adap = tzdev->devdata;
-
- if (!adap->ch_thermal.trip_temp)
- return -EINVAL;
-
- *type = adap->ch_thermal.trip_type;
- return 0;
-}
-
-static int cxgb4_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
- int trip, int *temp)
-{
- struct adapter *adap = tzdev->devdata;
-
- if (!adap->ch_thermal.trip_temp)
- return -EINVAL;
-
- *temp = adap->ch_thermal.trip_temp;
- return 0;
-}
-
static struct thermal_zone_device_ops cxgb4_thermal_ops = {
.get_temp = cxgb4_thermal_get_temp,
- .get_trip_type = cxgb4_thermal_get_trip_type,
- .get_trip_temp = cxgb4_thermal_get_trip_temp,
};
+static struct thermal_trip trip = { .type = THERMAL_TRIP_CRITICAL } ;
+
int cxgb4_thermal_init(struct adapter *adap)
{
struct ch_thermal *ch_thermal = &adap->ch_thermal;
@@ -79,15 +55,14 @@ int cxgb4_thermal_init(struct adapter *adap)
if (ret < 0) {
num_trip = 0; /* could not get trip temperature */
} else {
- ch_thermal->trip_temp = val * 1000;
- ch_thermal->trip_type = THERMAL_TRIP_CRITICAL;
+ trip.temperature = val * 1000;
}
snprintf(ch_tz_name, sizeof(ch_tz_name), "cxgb4_%s", adap->name);
- ch_thermal->tzdev = thermal_zone_device_register(ch_tz_name, num_trip,
- 0, adap,
- &cxgb4_thermal_ops,
- NULL, 0, 0);
+ ch_thermal->tzdev = thermal_zone_device_register_with_trips(ch_tz_name, &trip, num_trip,
+ 0, adap,
+ &cxgb4_thermal_ops,
+ NULL, 0, 0);
if (IS_ERR(ch_thermal->tzdev)) {
ret = PTR_ERR(ch_thermal->tzdev);
dev_err(adap->pdev_dev, "Failed to register thermal zone\n");
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index ca21794281d6..3731c93f8f95 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -80,7 +80,8 @@ static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x);
+static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack);
static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
.xdo_dev_state_add = ch_ipsec_xfrm_add_state,
@@ -226,65 +227,66 @@ out:
* returns 0 on success, negative error if failed to send message to FPGA
* positive error if FPGA returned a bad response
*/
-static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
+static int ch_ipsec_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct ipsec_sa_entry *sa_entry;
int res = 0;
if (x->props.aalgo != SADB_AALG_NONE) {
- pr_debug("Cannot offload authenticated xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
return -EINVAL;
}
if (x->props.calgo != SADB_X_CALG_NONE) {
- pr_debug("Cannot offload compressed xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
return -EINVAL;
}
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
- pr_debug("Only IPv4/6 xfrm state offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm state offloaded");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
- pr_debug("Only transport and tunnel xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm offload");
return -EINVAL;
}
if (x->id.proto != IPPROTO_ESP) {
- pr_debug("Only ESP xfrm state offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state offloaded");
return -EINVAL;
}
if (x->encap) {
- pr_debug("Encapsulated xfrm state not offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state not offloaded");
return -EINVAL;
}
if (!x->aead) {
- pr_debug("Cannot offload xfrm states without aead\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
return -EINVAL;
}
if (x->aead->alg_icv_len != 128 &&
x->aead->alg_icv_len != 96) {
- pr_debug("Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
- return -EINVAL;
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 96b & 128b");
+ return -EINVAL;
}
if ((x->aead->alg_key_len != 128 + 32) &&
(x->aead->alg_key_len != 256 + 32)) {
- pr_debug("cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ NL_SET_ERR_MSG_MOD(extack, "cannot offload xfrm states with AEAD key length other than 128/256 bit");
return -EINVAL;
}
if (x->tfcpad) {
- pr_debug("Cannot offload xfrm states with tfc padding\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
return -EINVAL;
}
if (!x->geniv) {
- pr_debug("Cannot offload xfrm states without geniv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
return -EINVAL;
}
if (strcmp(x->geniv, "seqiv")) {
- pr_debug("Cannot offload xfrm states with geniv other than seqiv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
return -EINVAL;
}
if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
- pr_debug("Unsupported xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
index b6e3b16623de..b98135f65eb7 100644
--- a/drivers/net/ethernet/engleder/Makefile
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_TSNEP) += tsnep.o
tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
- tsnep_rxnfc.o $(tsnep-y)
+ tsnep_rxnfc.o tsnep_xdp.o
tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index f93ba48bac3f..058c2bcf31a7 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -65,7 +65,11 @@ struct tsnep_tx_entry {
u32 properties;
- struct sk_buff *skb;
+ u32 type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
size_t len;
DEFINE_DMA_UNMAP_ADDR(dma);
};
@@ -78,8 +82,6 @@ struct tsnep_tx {
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
- /* TX ring lock */
- spinlock_t lock;
struct tsnep_tx_entry entry[TSNEP_RING_SIZE];
int write;
int read;
@@ -107,6 +109,7 @@ struct tsnep_rx {
struct tsnep_adapter *adapter;
void __iomem *addr;
int queue_index;
+ int tx_queue_index;
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
@@ -123,6 +126,8 @@ struct tsnep_rx {
u32 dropped;
u32 multicast;
u32 alloc_failed;
+
+ struct xdp_rxq_info xdp_rxq;
};
struct tsnep_queue {
@@ -172,6 +177,8 @@ struct tsnep_adapter {
int rxnfc_count;
int rxnfc_max;
+ struct bpf_prog *xdp_prog;
+
int num_tx_queues;
struct tsnep_tx tx[TSNEP_MAX_QUEUES];
int num_rx_queues;
@@ -204,6 +211,9 @@ int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
struct ethtool_rxnfc *cmd);
+int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack);
+
#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
int tsnep_ethtool_get_test_count(void);
void tsnep_ethtool_get_test_strings(u8 *data);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 00e2108f2ca4..6982aaa928b5 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -26,9 +26,11 @@
#include <linux/etherdevice.h>
#include <linux/phy.h>
#include <linux/iopoll.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
-#define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4)
+#define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+#define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4)
#define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -43,6 +45,14 @@
#define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \
ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1)
+#define TSNEP_TX_TYPE_SKB BIT(0)
+#define TSNEP_TX_TYPE_SKB_FRAG BIT(1)
+#define TSNEP_TX_TYPE_XDP_TX BIT(2)
+#define TSNEP_TX_TYPE_XDP_NDO BIT(3)
+
+#define TSNEP_XDP_TX BIT(0)
+#define TSNEP_XDP_REDIRECT BIT(1)
+
static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
{
iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
@@ -120,9 +130,6 @@ static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
u32 md;
int retval;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
md = ECM_MD_READ;
if (!adapter->suppress_preamble)
md |= ECM_MD_PREAMBLE;
@@ -144,9 +151,6 @@ static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
u32 md;
int retval;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
md = ECM_MD_WRITE;
if (!adapter->suppress_preamble)
md |= ECM_MD_PREAMBLE;
@@ -306,10 +310,12 @@ static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
struct tsnep_tx_entry *entry = &tx->entry[index];
entry->properties = 0;
+ /* xdpf is union with skb */
if (entry->skb) {
entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
- if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS))
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
/* toggle user flag to prevent false acknowledge
@@ -378,15 +384,19 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
for (i = 0; i < count; i++) {
entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
- if (i == 0) {
+ if (!i) {
len = skb_headlen(skb);
dma = dma_map_single(dmadev, skb->data, len,
DMA_TO_DEVICE);
+
+ entry->type = TSNEP_TX_TYPE_SKB;
} else {
len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
dma = skb_frag_dma_map(dmadev,
&skb_shinfo(skb)->frags[i - 1],
0, len, DMA_TO_DEVICE);
+
+ entry->type = TSNEP_TX_TYPE_SKB_FRAG;
}
if (dma_mapping_error(dmadev, dma))
return -ENOMEM;
@@ -413,12 +423,13 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
if (entry->len) {
- if (i == 0)
+ if (entry->type & TSNEP_TX_TYPE_SKB)
dma_unmap_single(dmadev,
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
DMA_TO_DEVICE);
- else
+ else if (entry->type &
+ (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO))
dma_unmap_page(dmadev,
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
@@ -434,7 +445,6 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
struct tsnep_tx *tx)
{
- unsigned long flags;
int count = 1;
struct tsnep_tx_entry *entry;
int length;
@@ -444,16 +454,12 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
if (skb_shinfo(skb)->nr_frags > 0)
count += skb_shinfo(skb)->nr_frags;
- spin_lock_irqsave(&tx->lock, flags);
-
if (tsnep_tx_desc_available(tx) < count) {
/* ring full, shall not happen because queue is stopped if full
* below
*/
netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
- spin_unlock_irqrestore(&tx->lock, flags);
-
return NETDEV_TX_BUSY;
}
@@ -468,10 +474,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
tx->dropped++;
- spin_unlock_irqrestore(&tx->lock, flags);
-
- netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
-
return NETDEV_TX_OK;
}
length = retval;
@@ -481,7 +483,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
for (i = 0; i < count; i++)
tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
- i == (count - 1));
+ i == count - 1);
tx->write = (tx->write + count) % TSNEP_RING_SIZE;
skb_tx_timestamp(skb);
@@ -496,23 +498,146 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
netif_stop_subqueue(tx->adapter->netdev, tx->queue_index);
}
- spin_unlock_irqrestore(&tx->lock, flags);
-
return NETDEV_TX_OK;
}
+static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx,
+ struct skb_shared_info *shinfo, int count, u32 type)
+{
+ struct device *dmadev = tx->adapter->dmadev;
+ struct tsnep_tx_entry *entry;
+ struct page *page;
+ skb_frag_t *frag;
+ unsigned int len;
+ int map_len = 0;
+ dma_addr_t dma;
+ void *data;
+ int i;
+
+ frag = NULL;
+ len = xdpf->len;
+ for (i = 0; i < count; i++) {
+ entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
+ if (type & TSNEP_TX_TYPE_XDP_NDO) {
+ data = unlikely(frag) ? skb_frag_address(frag) :
+ xdpf->data;
+ dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dmadev, dma))
+ return -ENOMEM;
+
+ entry->type = TSNEP_TX_TYPE_XDP_NDO;
+ } else {
+ page = unlikely(frag) ? skb_frag_page(frag) :
+ virt_to_page(xdpf->data);
+ dma = page_pool_get_dma_addr(page);
+ if (unlikely(frag))
+ dma += skb_frag_off(frag);
+ else
+ dma += sizeof(*xdpf) + xdpf->headroom;
+ dma_sync_single_for_device(dmadev, dma, len,
+ DMA_BIDIRECTIONAL);
+
+ entry->type = TSNEP_TX_TYPE_XDP_TX;
+ }
+
+ entry->len = len;
+ dma_unmap_addr_set(entry, dma, dma);
+
+ entry->desc->tx = __cpu_to_le64(dma);
+
+ map_len += len;
+
+ if (i + 1 < count) {
+ frag = &shinfo->frags[i];
+ len = skb_frag_size(frag);
+ }
+ }
+
+ return map_len;
+}
+
+/* This function requires __netif_tx_lock is held by the caller. */
+static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf,
+ struct tsnep_tx *tx, u32 type)
+{
+ struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf);
+ struct tsnep_tx_entry *entry;
+ int count, length, retval, i;
+
+ count = 1;
+ if (unlikely(xdp_frame_has_frags(xdpf)))
+ count += shinfo->nr_frags;
+
+ /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS
+ * will be available for normal TX path and queue is stopped there if
+ * necessary
+ */
+ if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count))
+ return false;
+
+ entry = &tx->entry[tx->write];
+ entry->xdpf = xdpf;
+
+ retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type);
+ if (retval < 0) {
+ tsnep_tx_unmap(tx, tx->write, count);
+ entry->xdpf = NULL;
+
+ tx->dropped++;
+
+ return false;
+ }
+ length = retval;
+
+ for (i = 0; i < count; i++)
+ tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
+ i == count - 1);
+ tx->write = (tx->write + count) % TSNEP_RING_SIZE;
+
+ /* descriptor properties shall be valid before hardware is notified */
+ dma_wmb();
+
+ return true;
+}
+
+static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx)
+{
+ iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
+}
+
+static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter,
+ struct xdp_buff *xdp,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ bool xmit;
+
+ if (unlikely(!xdpf))
+ return false;
+
+ __netif_tx_lock(tx_nq, smp_processor_id());
+
+ xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX);
+
+ /* Avoid transmit queue timeout since we share it with the slow path */
+ if (xmit)
+ txq_trans_cond_update(tx_nq);
+
+ __netif_tx_unlock(tx_nq);
+
+ return xmit;
+}
+
static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
{
struct tsnep_tx_entry *entry;
struct netdev_queue *nq;
- unsigned long flags;
int budget = 128;
int length;
int count;
nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
-
- spin_lock_irqsave(&tx->lock, flags);
+ __netif_tx_lock(nq, smp_processor_id());
do {
if (tx->read == tx->write)
@@ -530,12 +655,17 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
dma_rmb();
count = 1;
- if (skb_shinfo(entry->skb)->nr_frags > 0)
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ skb_shinfo(entry->skb)->nr_frags > 0)
count += skb_shinfo(entry->skb)->nr_frags;
+ else if (!(entry->type & TSNEP_TX_TYPE_SKB) &&
+ xdp_frame_has_frags(entry->xdpf))
+ count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags;
length = tsnep_tx_unmap(tx, tx->read, count);
- if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
+ if ((entry->type & TSNEP_TX_TYPE_SKB) &&
+ (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
(__le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
struct skb_shared_hwtstamps hwtstamps;
@@ -555,7 +685,11 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
skb_tstamp_tx(entry->skb, &hwtstamps);
}
- napi_consume_skb(entry->skb, budget);
+ if (entry->type & TSNEP_TX_TYPE_SKB)
+ napi_consume_skb(entry->skb, napi_budget);
+ else
+ xdp_return_frame_rx_napi(entry->xdpf);
+ /* xdpf is union with skb */
entry->skb = NULL;
tx->read = (tx->read + count) % TSNEP_RING_SIZE;
@@ -571,18 +705,19 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
netif_tx_wake_queue(nq);
}
- spin_unlock_irqrestore(&tx->lock, flags);
+ __netif_tx_unlock(nq);
- return (budget != 0);
+ return budget != 0;
}
static bool tsnep_tx_pending(struct tsnep_tx *tx)
{
- unsigned long flags;
struct tsnep_tx_entry *entry;
+ struct netdev_queue *nq;
bool pending = false;
- spin_lock_irqsave(&tx->lock, flags);
+ nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index);
+ __netif_tx_lock(nq, smp_processor_id());
if (tx->read != tx->write) {
entry = &tx->entry[tx->read];
@@ -592,7 +727,7 @@ static bool tsnep_tx_pending(struct tsnep_tx *tx)
pending = true;
}
- spin_unlock_irqrestore(&tx->lock, flags);
+ __netif_tx_unlock(nq);
return pending;
}
@@ -618,8 +753,6 @@ static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
tx->owner_counter = 1;
tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
- spin_lock_init(&tx->lock);
-
return 0;
}
@@ -695,9 +828,9 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
pp_params.pool_size = TSNEP_RING_SIZE;
pp_params.nid = dev_to_node(dmadev);
pp_params.dev = dmadev;
- pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
- pp_params.offset = TSNEP_SKB_PAD;
+ pp_params.offset = TSNEP_RX_OFFSET;
rx->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx->page_pool)) {
retval = PTR_ERR(rx->page_pool);
@@ -732,7 +865,7 @@ static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry,
entry->page = page;
entry->len = TSNEP_MAX_RX_BUF_SIZE;
entry->dma = page_pool_get_dma_addr(entry->page);
- entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
+ entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET);
}
static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index)
@@ -826,6 +959,62 @@ static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse)
return i;
}
+static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog,
+ struct xdp_buff *xdp, int *status,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ unsigned int length;
+ unsigned int sync;
+ u32 act;
+
+ length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+
+ /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
+ sync = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM;
+ sync = max(sync, length);
+
+ switch (act) {
+ case XDP_PASS:
+ return false;
+ case XDP_TX:
+ if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx))
+ goto out_failure;
+ *status |= TSNEP_XDP_TX;
+ return true;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0)
+ goto out_failure;
+ *status |= TSNEP_XDP_REDIRECT;
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+out_failure:
+ trace_xdp_exception(rx->adapter->netdev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data),
+ sync, true);
+ return true;
+ }
+}
+
+static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status,
+ struct netdev_queue *tx_nq, struct tsnep_tx *tx)
+{
+ if (status & TSNEP_XDP_TX) {
+ __netif_tx_lock(tx_nq, smp_processor_id());
+ tsnep_xdp_xmit_flush(tx);
+ __netif_tx_unlock(tx_nq);
+ }
+
+ if (status & TSNEP_XDP_REDIRECT)
+ xdp_do_flush();
+}
+
static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
int length)
{
@@ -836,14 +1025,14 @@ static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE);
- __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN);
+ skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE);
+ __skb_put(skb, length - ETH_FCS_LEN);
if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
struct tsnep_rx_inline *rx_inline =
(struct tsnep_rx_inline *)(page_address(page) +
- TSNEP_SKB_PAD);
+ TSNEP_RX_OFFSET);
skb_shinfo(skb)->tx_flags |=
SKBTX_HW_TSTAMP_NETDEV;
@@ -861,15 +1050,28 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
int budget)
{
struct device *dmadev = rx->adapter->dmadev;
- int desc_available;
- int done = 0;
enum dma_data_direction dma_dir;
struct tsnep_rx_entry *entry;
+ struct netdev_queue *tx_nq;
+ struct bpf_prog *prog;
+ struct xdp_buff xdp;
struct sk_buff *skb;
+ struct tsnep_tx *tx;
+ int desc_available;
+ int xdp_status = 0;
+ int done = 0;
int length;
desc_available = tsnep_rx_desc_available(rx);
dma_dir = page_pool_get_dma_dir(rx->page_pool);
+ prog = READ_ONCE(rx->adapter->xdp_prog);
+ if (prog) {
+ tx_nq = netdev_get_tx_queue(rx->adapter->netdev,
+ rx->tx_queue_index);
+ tx = &rx->adapter->tx[rx->tx_queue_index];
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq);
+ }
while (likely(done < budget) && (rx->read != rx->write)) {
entry = &rx->entry[rx->read];
@@ -903,21 +1105,47 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
*/
dma_rmb();
- prefetch(page_address(entry->page) + TSNEP_SKB_PAD);
+ prefetch(page_address(entry->page) + TSNEP_RX_OFFSET);
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
- dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD,
- length, dma_dir);
+ dma_sync_single_range_for_cpu(dmadev, entry->dma,
+ TSNEP_RX_OFFSET, length, dma_dir);
+
+ /* RX metadata with timestamps is in front of actual data,
+ * subtract metadata size to get length of actual data and
+ * consider metadata size as offset of actual data during RX
+ * processing
+ */
+ length -= TSNEP_RX_INLINE_METADATA_SIZE;
rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
desc_available++;
+ if (prog) {
+ bool consume;
+
+ xdp_prepare_buff(&xdp, page_address(entry->page),
+ XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
+ length, false);
+
+ consume = tsnep_xdp_run_prog(rx, prog, &xdp,
+ &xdp_status, tx_nq, tx);
+ if (consume) {
+ rx->packets++;
+ rx->bytes += length;
+
+ entry->page = NULL;
+
+ continue;
+ }
+ }
+
skb = tsnep_build_skb(rx, entry->page, length);
if (skb) {
page_pool_release_page(rx->page_pool, entry->page);
rx->packets++;
- rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
+ rx->bytes += length;
if (skb->pkt_type == PACKET_MULTICAST)
rx->multicast++;
@@ -930,6 +1158,9 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
entry->page = NULL;
}
+ if (xdp_status)
+ tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx);
+
if (desc_available)
tsnep_rx_refill(rx, desc_available, false);
@@ -1086,17 +1317,73 @@ static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
memset(queue->name, 0, sizeof(queue->name));
}
+static void tsnep_queue_close(struct tsnep_queue *queue, bool first)
+{
+ struct tsnep_rx *rx = queue->rx;
+
+ tsnep_free_irq(queue, first);
+
+ if (rx && xdp_rxq_info_is_reg(&rx->xdp_rxq))
+ xdp_rxq_info_unreg(&rx->xdp_rxq);
+
+ netif_napi_del(&queue->napi);
+}
+
+static int tsnep_queue_open(struct tsnep_adapter *adapter,
+ struct tsnep_queue *queue, bool first)
+{
+ struct tsnep_rx *rx = queue->rx;
+ struct tsnep_tx *tx = queue->tx;
+ int retval;
+
+ queue->adapter = adapter;
+
+ netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll);
+
+ if (rx) {
+ /* choose TX queue for XDP_TX */
+ if (tx)
+ rx->tx_queue_index = tx->queue_index;
+ else if (rx->queue_index < adapter->num_tx_queues)
+ rx->tx_queue_index = rx->queue_index;
+ else
+ rx->tx_queue_index = 0;
+
+ retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev,
+ rx->queue_index, queue->napi.napi_id);
+ if (retval)
+ goto failed;
+ retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx->page_pool);
+ if (retval)
+ goto failed;
+ }
+
+ retval = tsnep_request_irq(queue, first);
+ if (retval) {
+ netif_err(adapter, drv, adapter->netdev,
+ "can't get assigned irq %d.\n", queue->irq);
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ tsnep_queue_close(queue, first);
+
+ return retval;
+}
+
static int tsnep_netdev_open(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
- int i;
- void __iomem *addr;
int tx_queue_index = 0;
int rx_queue_index = 0;
- int retval;
+ void __iomem *addr;
+ int i, retval;
for (i = 0; i < adapter->num_queues; i++) {
- adapter->queue[i].adapter = adapter;
if (adapter->queue[i].tx) {
addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
retval = tsnep_tx_open(adapter, addr, tx_queue_index,
@@ -1107,21 +1394,16 @@ static int tsnep_netdev_open(struct net_device *netdev)
}
if (adapter->queue[i].rx) {
addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
- retval = tsnep_rx_open(adapter, addr,
- rx_queue_index,
+ retval = tsnep_rx_open(adapter, addr, rx_queue_index,
adapter->queue[i].rx);
if (retval)
goto failed;
rx_queue_index++;
}
- retval = tsnep_request_irq(&adapter->queue[i], i == 0);
- if (retval) {
- netif_err(adapter, drv, adapter->netdev,
- "can't get assigned irq %d.\n",
- adapter->queue[i].irq);
+ retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0);
+ if (retval)
goto failed;
- }
}
retval = netif_set_real_num_tx_queues(adapter->netdev,
@@ -1139,8 +1421,6 @@ static int tsnep_netdev_open(struct net_device *netdev)
goto phy_failed;
for (i = 0; i < adapter->num_queues; i++) {
- netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
- tsnep_poll);
napi_enable(&adapter->queue[i].napi);
tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
@@ -1150,10 +1430,9 @@ static int tsnep_netdev_open(struct net_device *netdev)
phy_failed:
tsnep_disable_irq(adapter, ECM_INT_LINK);
- tsnep_phy_close(adapter);
failed:
for (i = 0; i < adapter->num_queues; i++) {
- tsnep_free_irq(&adapter->queue[i], i == 0);
+ tsnep_queue_close(&adapter->queue[i], i == 0);
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
@@ -1175,9 +1454,8 @@ static int tsnep_netdev_close(struct net_device *netdev)
tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
napi_disable(&adapter->queue[i].napi);
- netif_napi_del(&adapter->queue[i].napi);
- tsnep_free_irq(&adapter->queue[i], i == 0);
+ tsnep_queue_close(&adapter->queue[i], i == 0);
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
@@ -1330,6 +1608,67 @@ static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
return ns_to_ktime(timestamp);
}
+static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu)
+{
+ if (cpu >= TSNEP_MAX_QUEUES)
+ cpu &= TSNEP_MAX_QUEUES - 1;
+
+ while (cpu >= adapter->num_tx_queues)
+ cpu -= adapter->num_tx_queues;
+
+ return &adapter->tx[cpu];
+}
+
+static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **xdp, u32 flags)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+ u32 cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ struct tsnep_tx *tx;
+ int nxmit;
+ bool xmit;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ tx = tsnep_xdp_get_tx(adapter, cpu);
+ nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index);
+
+ __netif_tx_lock(nq, cpu);
+
+ for (nxmit = 0; nxmit < n; nxmit++) {
+ xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx,
+ TSNEP_TX_TYPE_XDP_NDO);
+ if (!xmit)
+ break;
+
+ /* avoid transmit queue timeout since we share it with the slow
+ * path
+ */
+ txq_trans_cond_update(nq);
+ }
+
+ if (flags & XDP_XMIT_FLUSH)
+ tsnep_xdp_xmit_flush(tx);
+
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
static const struct net_device_ops tsnep_netdev_ops = {
.ndo_open = tsnep_netdev_open,
.ndo_stop = tsnep_netdev_close,
@@ -1341,6 +1680,8 @@ static const struct net_device_ops tsnep_netdev_ops = {
.ndo_set_features = tsnep_netdev_set_features,
.ndo_get_tstamp = tsnep_netdev_get_tstamp,
.ndo_setup_tc = tsnep_tc_setup,
+ .ndo_bpf = tsnep_netdev_bpf,
+ .ndo_xdp_xmit = tsnep_netdev_xdp_xmit,
};
static int tsnep_mac_init(struct tsnep_adapter *adapter)
@@ -1585,6 +1926,10 @@ static int tsnep_probe(struct platform_device *pdev)
netdev->features = NETIF_F_SG;
netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
diff --git a/drivers/net/ethernet/engleder/tsnep_tc.c b/drivers/net/ethernet/engleder/tsnep_tc.c
index c4c6e1357317..d083e6684f12 100644
--- a/drivers/net/ethernet/engleder/tsnep_tc.c
+++ b/drivers/net/ethernet/engleder/tsnep_tc.c
@@ -403,12 +403,33 @@ static int tsnep_taprio(struct tsnep_adapter *adapter,
return 0;
}
+static int tsnep_tc_query_caps(struct tsnep_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!adapter->gate_control)
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return tsnep_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_TAPRIO:
return tsnep_taprio(adapter, type_data);
default:
diff --git a/drivers/net/ethernet/engleder/tsnep_xdp.c b/drivers/net/ethernet/engleder/tsnep_xdp.c
new file mode 100644
index 000000000000..4d14cb1fd772
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_xdp.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder <gerhard@engleder-embedded.com> */
+
+#include <linux/if_vlan.h>
+#include <net/xdp_sock_drv.h>
+
+#include "tsnep.h"
+
+int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct bpf_prog *old_prog;
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 6c8c78018ce6..139fe66f8bcd 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -182,6 +182,12 @@ static int ftmac100_start_hw(struct ftmac100 *priv)
if (netdev->mtu > ETH_DATA_LEN)
maccr |= FTMAC100_MACCR_RX_FTL;
+ /* Add other bits as needed */
+ if (netdev->flags & IFF_PROMISC)
+ maccr |= FTMAC100_MACCR_RCV_ALL;
+ if (netdev->flags & IFF_ALLMULTI)
+ maccr |= FTMAC100_MACCR_RX_MULTIPKT;
+
iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 3f8032947d86..9318a2554056 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -244,6 +244,10 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->features |= net_dev->hw_features;
net_dev->vlan_features = net_dev->features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
if (is_valid_ether_addr(mac_addr)) {
memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
eth_hw_addr_set(net_dev, mac_addr);
@@ -2410,6 +2414,9 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
cleaned = qman_p_poll_dqrr(np->p, budget);
+ if (np->xdp_act & XDP_REDIRECT)
+ xdp_do_flush();
+
if (cleaned < budget) {
napi_complete_done(napi, cleaned);
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
@@ -2417,9 +2424,6 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
}
- if (np->xdp_act & XDP_REDIRECT)
- xdp_do_flush();
-
return cleaned;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 0c35abb7d065..a62cffaf6ff1 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -1993,10 +1993,15 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
if (rx_cleaned >= budget ||
txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
work_done = budget;
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
goto out;
}
} while (store_cleaned);
+ if (ch->xdp.res & XDP_REDIRECT)
+ xdp_do_flush();
+
/* Update NET DIM with the values for this CDAN */
dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
ch->stats.bytes_per_cdan);
@@ -2032,9 +2037,7 @@ out:
txc_fq->dq_bytes = 0;
}
- if (ch->xdp.res & XDP_REDIRECT)
- xdp_do_flush_map();
- else if (rx_cleaned && ch->xdp.res & XDP_TX)
+ if (rx_cleaned && ch->xdp.res & XDP_TX)
dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
@@ -4593,6 +4596,12 @@ static int dpaa2_eth_netdev_init(struct net_device *net_dev)
NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
net_dev->hw_features = net_dev->features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+ if (priv->dpni_attrs.wriop_version >= DPAA2_WRIOP_VERSION(3, 0, 0) &&
+ priv->dpni_attrs.num_queues <= 8)
+ net_dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
if (priv->dpni_attrs.vlan_filter_entries)
net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index cdc0ff89388a..9bc099cf3cb1 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,7 +1,16 @@
# SPDX-License-Identifier: GPL-2.0
+config FSL_ENETC_CORE
+ tristate
+ help
+ This module supports common functionality between the PF and VF
+ drivers for the NXP ENETC controller.
+
+ If compiled as module (M), the module name is fsl-enetc-core.
+
config FSL_ENETC
tristate "ENETC PF driver"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
+ select FSL_ENETC_CORE
select FSL_ENETC_IERB
select FSL_ENETC_MDIO
select PHYLINK
@@ -16,7 +25,8 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
+ select FSL_ENETC_CORE
select FSL_ENETC_MDIO
select PHYLINK
select DIMLIB
diff --git a/drivers/net/ethernet/freescale/enetc/Makefile b/drivers/net/ethernet/freescale/enetc/Makefile
index e0e8dfd13793..b13cbbabb2ea 100644
--- a/drivers/net/ethernet/freescale/enetc/Makefile
+++ b/drivers/net/ethernet/freescale/enetc/Makefile
@@ -1,14 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
-common-objs := enetc.o enetc_cbdr.o enetc_ethtool.o
+obj-$(CONFIG_FSL_ENETC_CORE) += fsl-enetc-core.o
+fsl-enetc-core-y := enetc.o enetc_cbdr.o enetc_ethtool.o
obj-$(CONFIG_FSL_ENETC) += fsl-enetc.o
-fsl-enetc-y := enetc_pf.o $(common-objs)
+fsl-enetc-y := enetc_pf.o
fsl-enetc-$(CONFIG_PCI_IOV) += enetc_msg.o
fsl-enetc-$(CONFIG_FSL_ENETC_QOS) += enetc_qos.o
obj-$(CONFIG_FSL_ENETC_VF) += fsl-enetc-vf.o
-fsl-enetc-vf-y := enetc_vf.o $(common-objs)
+fsl-enetc-vf-y := enetc_vf.o
obj-$(CONFIG_FSL_ENETC_IERB) += fsl-enetc-ierb.o
fsl-enetc-ierb-y := enetc_ierb.o
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index e96449eedfb5..2fc712b24d12 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -11,14 +11,26 @@
#include <net/pkt_sched.h>
#include <net/tso.h>
+u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg)
+{
+ return enetc_port_rd(&si->hw, reg);
+}
+EXPORT_SYMBOL_GPL(enetc_port_mac_rd);
+
+void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val)
+{
+ enetc_port_wr(&si->hw, reg, val);
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_port_wr(&si->hw, reg + ENETC_PMAC_OFFSET, val);
+}
+EXPORT_SYMBOL_GPL(enetc_port_mac_wr);
+
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
int num_tx_rings = priv->num_tx_rings;
- int i;
- for (i = 0; i < priv->num_rx_rings; i++)
- if (priv->rx_ring[i]->xdp.prog)
- return num_tx_rings - num_possible_cpus();
+ if (priv->xdp_prog)
+ return num_tx_rings - num_possible_cpus();
return num_tx_rings;
}
@@ -243,8 +255,8 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
if (udp)
val |= ENETC_PM0_SINGLE_STEP_CH;
- enetc_port_wr(hw, ENETC_PM0_SINGLE_STEP, val);
- enetc_port_wr(hw, ENETC_PM1_SINGLE_STEP, val);
+ enetc_port_mac_wr(priv->si, ENETC_PM0_SINGLE_STEP,
+ val);
} else if (do_twostep_tstamp) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
@@ -651,6 +663,7 @@ netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
return enetc_start_xmit(skb, ndev);
}
+EXPORT_SYMBOL_GPL(enetc_xmit);
static irqreturn_t enetc_msix(int irq, void *data)
{
@@ -1305,6 +1318,10 @@ static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
xdp_tx_swbd->xdp_frame = NULL;
n++;
+
+ if (!xdp_frame_has_frags(xdp_frame))
+ goto out;
+
xdp_tx_swbd = &xdp_tx_arr[n];
shinfo = xdp_get_shared_info_from_frame(xdp_frame);
@@ -1334,7 +1351,7 @@ static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
n++;
xdp_tx_swbd = &xdp_tx_arr[n];
}
-
+out:
xdp_tx_arr[n - 1].is_eof = true;
xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
@@ -1384,22 +1401,19 @@ int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
return xdp_tx_frm_cnt;
}
+EXPORT_SYMBOL_GPL(enetc_xdp_xmit);
static void enetc_map_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
struct xdp_buff *xdp_buff, u16 size)
{
struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
void *hard_start = page_address(rx_swbd->page) + rx_swbd->page_offset;
- struct skb_shared_info *shinfo;
/* To be used for XDP_TX */
rx_swbd->len = size;
xdp_prepare_buff(xdp_buff, hard_start - rx_ring->buffer_offset,
rx_ring->buffer_offset, size, false);
-
- shinfo = xdp_get_shared_info_from_buff(xdp_buff);
- shinfo->nr_frags = 0;
}
static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
@@ -1407,11 +1421,23 @@ static void enetc_add_rx_buff_to_xdp(struct enetc_bdr *rx_ring, int i,
{
struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp_buff);
struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
- skb_frag_t *frag = &shinfo->frags[shinfo->nr_frags];
+ skb_frag_t *frag;
/* To be used for XDP_TX */
rx_swbd->len = size;
+ if (!xdp_buff_has_frags(xdp_buff)) {
+ xdp_buff_set_frags_flag(xdp_buff);
+ shinfo->xdp_frags_size = size;
+ shinfo->nr_frags = 0;
+ } else {
+ shinfo->xdp_frags_size += size;
+ }
+
+ if (page_is_pfmemalloc(rx_swbd->page))
+ xdp_buff_set_frag_pfmemalloc(xdp_buff);
+
+ frag = &shinfo->frags[shinfo->nr_frags];
skb_frag_off_set(frag, rx_swbd->page_offset);
skb_frag_size_set(frag, size);
__skb_frag_set_page(frag, rx_swbd->page);
@@ -1584,20 +1610,6 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
}
break;
case XDP_REDIRECT:
- /* xdp_return_frame does not support S/G in the sense
- * that it leaks the fragments (__xdp_return should not
- * call page_frag_free only for the initial buffer).
- * Until XDP_REDIRECT gains support for S/G let's keep
- * the code structure in place, but dead. We drop the
- * S/G frames ourselves to avoid memory leaks which
- * would otherwise leave the kernel OOM.
- */
- if (unlikely(cleaned_cnt - orig_cleaned_cnt != 1)) {
- enetc_xdp_drop(rx_ring, orig_i, i);
- rx_ring->stats.xdp_redirect_sg++;
- break;
- }
-
err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
if (unlikely(err)) {
enetc_xdp_drop(rx_ring, orig_i, i);
@@ -1713,204 +1725,263 @@ void enetc_get_si_caps(struct enetc_si *si)
if (val & ENETC_SIPCAPR0_QBV)
si->hw_features |= ENETC_SI_F_QBV;
+ if (val & ENETC_SIPCAPR0_QBU)
+ si->hw_features |= ENETC_SI_F_QBU;
+
if (val & ENETC_SIPCAPR0_PSFP)
si->hw_features |= ENETC_SI_F_PSFP;
}
+EXPORT_SYMBOL_GPL(enetc_get_si_caps);
-static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
+static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res)
{
- r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
- &r->bd_dma_base, GFP_KERNEL);
- if (!r->bd_base)
+ size_t bd_base_size = res->bd_count * res->bd_size;
+
+ res->bd_base = dma_alloc_coherent(res->dev, bd_base_size,
+ &res->bd_dma_base, GFP_KERNEL);
+ if (!res->bd_base)
return -ENOMEM;
/* h/w requires 128B alignment */
- if (!IS_ALIGNED(r->bd_dma_base, 128)) {
- dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
- r->bd_dma_base);
+ if (!IS_ALIGNED(res->bd_dma_base, 128)) {
+ dma_free_coherent(res->dev, bd_base_size, res->bd_base,
+ res->bd_dma_base);
return -EINVAL;
}
return 0;
}
-static int enetc_alloc_txbdr(struct enetc_bdr *txr)
+static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res)
+{
+ size_t bd_base_size = res->bd_count * res->bd_size;
+
+ dma_free_coherent(res->dev, bd_base_size, res->bd_base,
+ res->bd_dma_base);
+}
+
+static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res,
+ struct device *dev, size_t bd_count)
{
int err;
- txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
- if (!txr->tx_swbd)
+ res->dev = dev;
+ res->bd_count = bd_count;
+ res->bd_size = sizeof(union enetc_tx_bd);
+
+ res->tx_swbd = vzalloc(bd_count * sizeof(*res->tx_swbd));
+ if (!res->tx_swbd)
return -ENOMEM;
- err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
+ err = enetc_dma_alloc_bdr(res);
if (err)
goto err_alloc_bdr;
- txr->tso_headers = dma_alloc_coherent(txr->dev,
- txr->bd_count * TSO_HEADER_SIZE,
- &txr->tso_headers_dma,
+ res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE,
+ &res->tso_headers_dma,
GFP_KERNEL);
- if (!txr->tso_headers) {
+ if (!res->tso_headers) {
err = -ENOMEM;
goto err_alloc_tso;
}
- txr->next_to_clean = 0;
- txr->next_to_use = 0;
-
return 0;
err_alloc_tso:
- dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
- txr->bd_base, txr->bd_dma_base);
- txr->bd_base = NULL;
+ enetc_dma_free_bdr(res);
err_alloc_bdr:
- vfree(txr->tx_swbd);
- txr->tx_swbd = NULL;
+ vfree(res->tx_swbd);
+ res->tx_swbd = NULL;
return err;
}
-static void enetc_free_txbdr(struct enetc_bdr *txr)
+static void enetc_free_tx_resource(const struct enetc_bdr_resource *res)
{
- int size, i;
-
- for (i = 0; i < txr->bd_count; i++)
- enetc_free_tx_frame(txr, &txr->tx_swbd[i]);
-
- size = txr->bd_count * sizeof(union enetc_tx_bd);
-
- dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
- txr->tso_headers, txr->tso_headers_dma);
- txr->tso_headers = NULL;
-
- dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
- txr->bd_base = NULL;
-
- vfree(txr->tx_swbd);
- txr->tx_swbd = NULL;
+ dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE,
+ res->tso_headers, res->tso_headers_dma);
+ enetc_dma_free_bdr(res);
+ vfree(res->tx_swbd);
}
-static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
+static struct enetc_bdr_resource *
+enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
{
+ struct enetc_bdr_resource *tx_res;
int i, err;
+ tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL);
+ if (!tx_res)
+ return ERR_PTR(-ENOMEM);
+
for (i = 0; i < priv->num_tx_rings; i++) {
- err = enetc_alloc_txbdr(priv->tx_ring[i]);
+ struct enetc_bdr *tx_ring = priv->tx_ring[i];
+ err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev,
+ tx_ring->bd_count);
if (err)
goto fail;
}
- return 0;
+ return tx_res;
fail:
while (i-- > 0)
- enetc_free_txbdr(priv->tx_ring[i]);
+ enetc_free_tx_resource(&tx_res[i]);
- return err;
+ kfree(tx_res);
+
+ return ERR_PTR(err);
}
-static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
+static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res,
+ size_t num_resources)
{
- int i;
+ size_t i;
- for (i = 0; i < priv->num_tx_rings; i++)
- enetc_free_txbdr(priv->tx_ring[i]);
+ for (i = 0; i < num_resources; i++)
+ enetc_free_tx_resource(&tx_res[i]);
+
+ kfree(tx_res);
}
-static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
+static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res,
+ struct device *dev, size_t bd_count,
+ bool extended)
{
- size_t size = sizeof(union enetc_rx_bd);
int err;
- rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
- if (!rxr->rx_swbd)
- return -ENOMEM;
-
+ res->dev = dev;
+ res->bd_count = bd_count;
+ res->bd_size = sizeof(union enetc_rx_bd);
if (extended)
- size *= 2;
+ res->bd_size *= 2;
+
+ res->rx_swbd = vzalloc(bd_count * sizeof(struct enetc_rx_swbd));
+ if (!res->rx_swbd)
+ return -ENOMEM;
- err = enetc_dma_alloc_bdr(rxr, size);
+ err = enetc_dma_alloc_bdr(res);
if (err) {
- vfree(rxr->rx_swbd);
+ vfree(res->rx_swbd);
return err;
}
- rxr->next_to_clean = 0;
- rxr->next_to_use = 0;
- rxr->next_to_alloc = 0;
- rxr->ext_en = extended;
-
return 0;
}
-static void enetc_free_rxbdr(struct enetc_bdr *rxr)
+static void enetc_free_rx_resource(const struct enetc_bdr_resource *res)
{
- int size;
-
- size = rxr->bd_count * sizeof(union enetc_rx_bd);
-
- dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
- rxr->bd_base = NULL;
-
- vfree(rxr->rx_swbd);
- rxr->rx_swbd = NULL;
+ enetc_dma_free_bdr(res);
+ vfree(res->rx_swbd);
}
-static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
+static struct enetc_bdr_resource *
+enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended)
{
- bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+ struct enetc_bdr_resource *rx_res;
int i, err;
+ rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL);
+ if (!rx_res)
+ return ERR_PTR(-ENOMEM);
+
for (i = 0; i < priv->num_rx_rings; i++) {
- err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
+ struct enetc_bdr *rx_ring = priv->rx_ring[i];
+ err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev,
+ rx_ring->bd_count, extended);
if (err)
goto fail;
}
- return 0;
+ return rx_res;
fail:
while (i-- > 0)
- enetc_free_rxbdr(priv->rx_ring[i]);
+ enetc_free_rx_resource(&rx_res[i]);
- return err;
+ kfree(rx_res);
+
+ return ERR_PTR(err);
}
-static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
+static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res,
+ size_t num_resources)
+{
+ size_t i;
+
+ for (i = 0; i < num_resources; i++)
+ enetc_free_rx_resource(&rx_res[i]);
+
+ kfree(rx_res);
+}
+
+static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring,
+ const struct enetc_bdr_resource *res)
+{
+ tx_ring->bd_base = res ? res->bd_base : NULL;
+ tx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
+ tx_ring->tx_swbd = res ? res->tx_swbd : NULL;
+ tx_ring->tso_headers = res ? res->tso_headers : NULL;
+ tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0;
+}
+
+static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring,
+ const struct enetc_bdr_resource *res)
+{
+ rx_ring->bd_base = res ? res->bd_base : NULL;
+ rx_ring->bd_dma_base = res ? res->bd_dma_base : 0;
+ rx_ring->rx_swbd = res ? res->rx_swbd : NULL;
+}
+
+static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv,
+ const struct enetc_bdr_resource *res)
{
int i;
- for (i = 0; i < priv->num_rx_rings; i++)
- enetc_free_rxbdr(priv->rx_ring[i]);
+ if (priv->tx_res)
+ enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings);
+
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ enetc_assign_tx_resource(priv->tx_ring[i],
+ res ? &res[i] : NULL);
+ }
+
+ priv->tx_res = res;
}
-static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv,
+ const struct enetc_bdr_resource *res)
{
int i;
- if (!tx_ring->tx_swbd)
- return;
+ if (priv->rx_res)
+ enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings);
+
+ for (i = 0; i < priv->num_rx_rings; i++) {
+ enetc_assign_rx_resource(priv->rx_ring[i],
+ res ? &res[i] : NULL);
+ }
+
+ priv->rx_res = res;
+}
+
+static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
+{
+ int i;
for (i = 0; i < tx_ring->bd_count; i++) {
struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
enetc_free_tx_frame(tx_ring, tx_swbd);
}
-
- tx_ring->next_to_clean = 0;
- tx_ring->next_to_use = 0;
}
static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
{
int i;
- if (!rx_ring->rx_swbd)
- return;
-
for (i = 0; i < rx_ring->bd_count; i++) {
struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
@@ -1922,10 +1993,6 @@ static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
__free_page(rx_swbd->page);
rx_swbd->page = NULL;
}
-
- rx_ring->next_to_clean = 0;
- rx_ring->next_to_use = 0;
- rx_ring->next_to_alloc = 0;
}
static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
@@ -1980,6 +2047,7 @@ int enetc_configure_si(struct enetc_ndev_priv *priv)
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_configure_si);
void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
{
@@ -1999,6 +2067,7 @@ void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
priv->tx_ictt = ENETC_TXIC_TIMETHR;
}
+EXPORT_SYMBOL_GPL(enetc_init_si_rings_params);
int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
{
@@ -2011,11 +2080,13 @@ int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_alloc_si_resources);
void enetc_free_si_resources(struct enetc_ndev_priv *priv)
{
kfree(priv->cls_rules);
}
+EXPORT_SYMBOL_GPL(enetc_free_si_resources);
static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
{
@@ -2039,7 +2110,7 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
/* enable Tx ints by setting pkt thr to 1 */
enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
- tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio);
+ tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio);
if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
tbmr |= ENETC_TBMR_VIH;
@@ -2051,10 +2122,11 @@ static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
tx_ring->idr = hw->reg + ENETC_SITXIDR;
}
-static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
+ bool extended)
{
int idx = rx_ring->index;
- u32 rbmr;
+ u32 rbmr = 0;
enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
lower_32_bits(rx_ring->bd_dma_base));
@@ -2081,8 +2153,7 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
/* enable Rx ints by setting pkt thr to 1 */
enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
- rbmr = ENETC_RBMR_EN;
-
+ rx_ring->ext_en = extended;
if (rx_ring->ext_en)
rbmr |= ENETC_RBMR_BDS;
@@ -2092,15 +2163,18 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
rx_ring->idr = hw->reg + ENETC_SIRXIDR;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ rx_ring->next_to_alloc = 0;
+
enetc_lock_mdio();
enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
enetc_unlock_mdio();
- /* enable ring */
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
}
-static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
@@ -2109,10 +2183,42 @@ static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
enetc_setup_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_setup_rxbdr(hw, priv->rx_ring[i]);
+ enetc_setup_rxbdr(hw, priv->rx_ring[i], extended);
+}
+
+static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int idx = tx_ring->index;
+ u32 tbmr;
+
+ tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR);
+ tbmr |= ENETC_TBMR_EN;
+ enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
+}
+
+static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+{
+ int idx = rx_ring->index;
+ u32 rbmr;
+
+ rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR);
+ rbmr |= ENETC_RBMR_EN;
+ enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
+}
+
+static void enetc_enable_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_enable_txbdr(hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_enable_rxbdr(hw, priv->rx_ring[i]);
}
-static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
+static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
int idx = rx_ring->index;
@@ -2120,13 +2226,30 @@ static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
}
-static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
{
- int delay = 8, timeout = 100;
- int idx = tx_ring->index;
+ int idx = rx_ring->index;
/* disable EN bit on ring */
enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
+}
+
+static void enetc_disable_bdrs(struct enetc_ndev_priv *priv)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ enetc_disable_txbdr(hw, priv->tx_ring[i]);
+
+ for (i = 0; i < priv->num_rx_rings; i++)
+ enetc_disable_rxbdr(hw, priv->rx_ring[i]);
+}
+
+static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
+{
+ int delay = 8, timeout = 100;
+ int idx = tx_ring->index;
/* wait for busy to clear */
while (delay < timeout &&
@@ -2140,18 +2263,13 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
idx);
}
-static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
+static void enetc_wait_bdrs(struct enetc_ndev_priv *priv)
{
struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_clear_txbdr(hw, priv->tx_ring[i]);
-
- for (i = 0; i < priv->num_rx_rings; i++)
- enetc_clear_rxbdr(hw, priv->rx_ring[i]);
-
- udelay(1);
+ enetc_wait_txbdr(hw, priv->tx_ring[i]);
}
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
@@ -2267,8 +2385,11 @@ static int enetc_phylink_connect(struct net_device *ndev)
struct ethtool_eee edata;
int err;
- if (!priv->phylink)
- return 0; /* phy-less mode */
+ if (!priv->phylink) {
+ /* phy-less mode */
+ netif_carrier_on(ndev);
+ return 0;
+ }
err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
if (err) {
@@ -2280,6 +2401,8 @@ static int enetc_phylink_connect(struct net_device *ndev)
memset(&edata, 0, sizeof(struct ethtool_eee));
phylink_ethtool_set_eee(priv->phylink, &edata);
+ phylink_start(priv->phylink);
+
return 0;
}
@@ -2321,20 +2444,21 @@ void enetc_start(struct net_device *ndev)
enable_irq(irq);
}
- if (priv->phylink)
- phylink_start(priv->phylink);
- else
- netif_carrier_on(ndev);
+ enetc_enable_bdrs(priv);
netif_tx_start_all_queues(ndev);
}
+EXPORT_SYMBOL_GPL(enetc_start);
int enetc_open(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- int num_stack_tx_queues;
+ struct enetc_bdr_resource *tx_res, *rx_res;
+ bool extended;
int err;
+ extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+
err = enetc_setup_irqs(priv);
if (err)
return err;
@@ -2343,34 +2467,28 @@ int enetc_open(struct net_device *ndev)
if (err)
goto err_phy_connect;
- err = enetc_alloc_tx_resources(priv);
- if (err)
+ tx_res = enetc_alloc_tx_resources(priv);
+ if (IS_ERR(tx_res)) {
+ err = PTR_ERR(tx_res);
goto err_alloc_tx;
+ }
- err = enetc_alloc_rx_resources(priv);
- if (err)
+ rx_res = enetc_alloc_rx_resources(priv, extended);
+ if (IS_ERR(rx_res)) {
+ err = PTR_ERR(rx_res);
goto err_alloc_rx;
-
- num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
-
- err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
- if (err)
- goto err_set_queues;
+ }
enetc_tx_onestep_tstamp_init(priv);
- enetc_setup_bdrs(priv);
+ enetc_assign_tx_resources(priv, tx_res);
+ enetc_assign_rx_resources(priv, rx_res);
+ enetc_setup_bdrs(priv, extended);
enetc_start(ndev);
return 0;
-err_set_queues:
- enetc_free_rx_resources(priv);
err_alloc_rx:
- enetc_free_tx_resources(priv);
+ enetc_free_tx_resources(tx_res, priv->num_tx_rings);
err_alloc_tx:
if (priv->phylink)
phylink_disconnect_phy(priv->phylink);
@@ -2379,6 +2497,7 @@ err_phy_connect:
return err;
}
+EXPORT_SYMBOL_GPL(enetc_open);
void enetc_stop(struct net_device *ndev)
{
@@ -2387,6 +2506,8 @@ void enetc_stop(struct net_device *ndev)
netif_tx_stop_all_queues(ndev);
+ enetc_disable_bdrs(priv);
+
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(priv->si->pdev,
ENETC_BDR_INT_BASE_IDX + i);
@@ -2396,104 +2517,205 @@ void enetc_stop(struct net_device *ndev)
napi_disable(&priv->int_vector[i]->napi);
}
- if (priv->phylink)
- phylink_stop(priv->phylink);
- else
- netif_carrier_off(ndev);
+ enetc_wait_bdrs(priv);
enetc_clear_interrupts(priv);
}
+EXPORT_SYMBOL_GPL(enetc_stop);
int enetc_close(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
enetc_stop(ndev);
- enetc_clear_bdrs(priv);
- if (priv->phylink)
+ if (priv->phylink) {
+ phylink_stop(priv->phylink);
phylink_disconnect_phy(priv->phylink);
+ } else {
+ netif_carrier_off(ndev);
+ }
+
enetc_free_rxtx_rings(priv);
- enetc_free_rx_resources(priv);
- enetc_free_tx_resources(priv);
+
+ /* Avoids dangling pointers and also frees old resources */
+ enetc_assign_rx_resources(priv, NULL);
+ enetc_assign_tx_resources(priv, NULL);
+
enetc_free_irqs(priv);
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_close);
-int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended,
+ int (*cb)(struct enetc_ndev_priv *priv, void *ctx),
+ void *ctx)
+{
+ struct enetc_bdr_resource *tx_res, *rx_res;
+ int err;
+
+ ASSERT_RTNL();
+
+ /* If the interface is down, run the callback right away,
+ * without reconfiguration.
+ */
+ if (!netif_running(priv->ndev)) {
+ if (cb) {
+ err = cb(priv, ctx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+ }
+
+ tx_res = enetc_alloc_tx_resources(priv);
+ if (IS_ERR(tx_res)) {
+ err = PTR_ERR(tx_res);
+ goto out;
+ }
+
+ rx_res = enetc_alloc_rx_resources(priv, extended);
+ if (IS_ERR(rx_res)) {
+ err = PTR_ERR(rx_res);
+ goto out_free_tx_res;
+ }
+
+ enetc_stop(priv->ndev);
+ enetc_free_rxtx_rings(priv);
+
+ /* Interface is down, run optional callback now */
+ if (cb) {
+ err = cb(priv, ctx);
+ if (err)
+ goto out_restart;
+ }
+
+ enetc_assign_tx_resources(priv, tx_res);
+ enetc_assign_rx_resources(priv, rx_res);
+ enetc_setup_bdrs(priv, extended);
+ enetc_start(priv->ndev);
+
+ return 0;
+
+out_restart:
+ enetc_setup_bdrs(priv, extended);
+ enetc_start(priv->ndev);
+ enetc_free_rx_resources(rx_res, priv->num_rx_rings);
+out_free_tx_res:
+ enetc_free_tx_resources(tx_res, priv->num_tx_rings);
+out:
+ return err;
+}
+
+static void enetc_debug_tx_ring_prios(struct enetc_ndev_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_tx_rings; i++)
+ netdev_dbg(priv->ndev, "TX ring %d prio %d\n", i,
+ priv->tx_ring[i]->prio);
+}
+
+static void enetc_reset_tc_mqprio(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct tc_mqprio_qopt *mqprio = type_data;
struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
int num_stack_tx_queues;
- u8 num_tc;
int i;
num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
- mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
- num_tc = mqprio->num_tc;
- if (!num_tc) {
- netdev_reset_tc(ndev);
- netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ netdev_reset_tc(ndev);
+ netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ priv->min_num_stack_tx_queues = num_possible_cpus();
- /* Reset all ring priorities to 0 */
- for (i = 0; i < priv->num_tx_rings; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = 0;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
- }
+ /* Reset all ring priorities to 0 */
+ for (i = 0; i < priv->num_tx_rings; i++) {
+ tx_ring = priv->tx_ring[i];
+ tx_ring->prio = 0;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
+
+ enetc_debug_tx_ring_prios(priv);
+}
+
+int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_hw *hw = &priv->si->hw;
+ int num_stack_tx_queues = 0;
+ u8 num_tc = mqprio->num_tc;
+ struct enetc_bdr *tx_ring;
+ int offset, count;
+ int err, tc, q;
+ if (!num_tc) {
+ enetc_reset_tc_mqprio(ndev);
return 0;
}
- /* Check if we have enough BD rings available to accommodate all TCs */
- if (num_tc > num_stack_tx_queues) {
- netdev_err(ndev, "Max %d traffic classes supported\n",
- priv->num_tx_rings);
- return -EINVAL;
- }
+ err = netdev_set_num_tc(ndev, num_tc);
+ if (err)
+ return err;
- /* For the moment, we use only one BD ring per TC.
- *
- * Configure num_tc BD rings with increasing priorities.
- */
- for (i = 0; i < num_tc; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = i;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ for (tc = 0; tc < num_tc; tc++) {
+ offset = mqprio->offset[tc];
+ count = mqprio->count[tc];
+ num_stack_tx_queues += count;
+
+ err = netdev_set_tc_queue(ndev, tc, count, offset);
+ if (err)
+ goto err_reset_tc;
+
+ for (q = offset; q < offset + count; q++) {
+ tx_ring = priv->tx_ring[q];
+ /* The prio_tc_map is skb_tx_hash()'s way of selecting
+ * between TX queues based on skb->priority. As such,
+ * there's nothing to offload based on it.
+ * Make the mqprio "traffic class" be the priority of
+ * this ring group, and leave the Tx IPV to traffic
+ * class mapping as its default mapping value of 1:1.
+ */
+ tx_ring->prio = tc;
+ enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
+ }
}
- /* Reset the number of netdev queues based on the TC count */
- netif_set_real_num_tx_queues(ndev, num_tc);
+ err = netif_set_real_num_tx_queues(ndev, num_stack_tx_queues);
+ if (err)
+ goto err_reset_tc;
- netdev_set_num_tc(ndev, num_tc);
+ priv->min_num_stack_tx_queues = num_stack_tx_queues;
- /* Each TC is associated with one netdev queue */
- for (i = 0; i < num_tc; i++)
- netdev_set_tc_queue(ndev, i, 1, i);
+ enetc_debug_tx_ring_prios(priv);
return 0;
+
+err_reset_tc:
+ enetc_reset_tc_mqprio(ndev);
+ return err;
}
+EXPORT_SYMBOL_GPL(enetc_setup_tc_mqprio);
-static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
- struct netlink_ext_ack *extack)
+static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx)
{
- struct enetc_ndev_priv *priv = netdev_priv(dev);
- struct bpf_prog *old_prog;
- bool is_up;
- int i;
-
- /* The buffer layout is changing, so we need to drain the old
- * RX buffers and seed new ones.
- */
- is_up = netif_running(dev);
- if (is_up)
- dev_close(dev);
+ struct bpf_prog *old_prog, *prog = ctx;
+ int num_stack_tx_queues;
+ int err, i;
old_prog = xchg(&priv->xdp_prog, prog);
+
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err) {
+ xchg(&priv->xdp_prog, old_prog);
+ return err;
+ }
+
if (old_prog)
bpf_prog_put(old_prog);
@@ -2508,23 +2730,46 @@ static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog,
rx_ring->buffer_offset = ENETC_RXB_PAD;
}
- if (is_up)
- return dev_open(dev, extack);
-
return 0;
}
-int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp)
+static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
{
- switch (xdp->command) {
+ int num_xdp_tx_queues = prog ? num_possible_cpus() : 0;
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ bool extended;
+
+ if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
+ priv->num_tx_rings) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)",
+ num_xdp_tx_queues,
+ priv->min_num_stack_tx_queues,
+ priv->num_tx_rings);
+ return -EBUSY;
+ }
+
+ extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
+
+ /* The buffer layout is changing, so we need to drain the old
+ * RX buffers and seed new ones.
+ */
+ return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog);
+}
+
+int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
case XDP_SETUP_PROG:
- return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack);
+ return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack);
default:
return -EINVAL;
}
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_setup_bpf);
struct net_device_stats *enetc_get_stats(struct net_device *ndev)
{
@@ -2556,6 +2801,7 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev)
return stats;
}
+EXPORT_SYMBOL_GPL(enetc_get_stats);
static int enetc_set_rss(struct net_device *ndev, int en)
{
@@ -2608,48 +2854,53 @@ void enetc_set_features(struct net_device *ndev, netdev_features_t features)
enetc_enable_txvlan(ndev,
!!(features & NETIF_F_HW_VLAN_CTAG_TX));
}
+EXPORT_SYMBOL_GPL(enetc_set_features);
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ int err, new_offloads = priv->active_offloads;
struct hwtstamp_config config;
- int ao;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
return -EFAULT;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
break;
case HWTSTAMP_TX_ON:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
- priv->active_offloads |= ENETC_F_TX_TSTAMP;
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads |= ENETC_F_TX_TSTAMP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
- priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
- priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
+ new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
+ new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
break;
default:
return -ERANGE;
}
- ao = priv->active_offloads;
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
+ new_offloads &= ~ENETC_F_RX_TSTAMP;
break;
default:
- priv->active_offloads |= ENETC_F_RX_TSTAMP;
+ new_offloads |= ENETC_F_RX_TSTAMP;
config.rx_filter = HWTSTAMP_FILTER_ALL;
}
- if (netif_running(ndev) && ao != priv->active_offloads) {
- enetc_close(ndev);
- enetc_open(ndev);
+ if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) {
+ bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP);
+
+ err = enetc_reconfigure(priv, extended, NULL, NULL);
+ if (err)
+ return err;
}
+ priv->active_offloads = new_offloads;
+
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -2691,10 +2942,12 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
return phylink_mii_ioctl(priv->phylink, rq, cmd);
}
+EXPORT_SYMBOL_GPL(enetc_ioctl);
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ int num_stack_tx_queues;
int first_xdp_tx_ring;
int i, n, err, nvec;
int v_tx_rings;
@@ -2771,6 +3024,17 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
}
}
+ num_stack_tx_queues = enetc_num_stack_tx_queues(priv);
+
+ err = netif_set_real_num_tx_queues(priv->ndev, num_stack_tx_queues);
+ if (err)
+ goto fail;
+
+ err = netif_set_real_num_rx_queues(priv->ndev, priv->num_rx_rings);
+ if (err)
+ goto fail;
+
+ priv->min_num_stack_tx_queues = num_possible_cpus();
first_xdp_tx_ring = priv->num_tx_rings - num_possible_cpus();
priv->xdp_tx_ring = &priv->tx_ring[first_xdp_tx_ring];
@@ -2792,6 +3056,7 @@ fail:
return err;
}
+EXPORT_SYMBOL_GPL(enetc_alloc_msix);
void enetc_free_msix(struct enetc_ndev_priv *priv)
{
@@ -2821,6 +3086,7 @@ void enetc_free_msix(struct enetc_ndev_priv *priv)
/* disable all MSIX for this device */
pci_free_irq_vectors(priv->si->pdev);
}
+EXPORT_SYMBOL_GPL(enetc_free_msix);
static void enetc_kfree_si(struct enetc_si *si)
{
@@ -2910,6 +3176,7 @@ err_dma:
return err;
}
+EXPORT_SYMBOL_GPL(enetc_pci_probe);
void enetc_pci_remove(struct pci_dev *pdev)
{
@@ -2921,3 +3188,6 @@ void enetc_pci_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
}
+EXPORT_SYMBOL_GPL(enetc_pci_remove);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index c6d8cc15c270..8010f31cd10d 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -70,7 +70,6 @@ struct enetc_ring_stats {
unsigned int xdp_tx_drops;
unsigned int xdp_redirect;
unsigned int xdp_redirect_failures;
- unsigned int xdp_redirect_sg;
unsigned int recycles;
unsigned int recycle_failures;
unsigned int win_drop;
@@ -86,6 +85,23 @@ struct enetc_xdp_data {
#define ENETC_TX_RING_DEFAULT_SIZE 2048
#define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2)
+struct enetc_bdr_resource {
+ /* Input arguments saved for teardown */
+ struct device *dev; /* for DMA mapping */
+ size_t bd_count;
+ size_t bd_size;
+
+ /* Resource proper */
+ void *bd_base; /* points to Rx or Tx BD ring */
+ dma_addr_t bd_dma_base;
+ union {
+ struct enetc_tx_swbd *tx_swbd;
+ struct enetc_rx_swbd *rx_swbd;
+ };
+ char *tso_headers;
+ dma_addr_t tso_headers_dma;
+};
+
struct enetc_bdr {
struct device *dev; /* for DMA mapping */
struct net_device *ndev;
@@ -213,8 +229,9 @@ enum enetc_errata {
ENETC_ERR_UCMCSWP = BIT(1),
};
-#define ENETC_SI_F_QBV BIT(0)
-#define ENETC_SI_F_PSFP BIT(1)
+#define ENETC_SI_F_PSFP BIT(0)
+#define ENETC_SI_F_QBV BIT(1)
+#define ENETC_SI_F_QBU BIT(2)
/* PCI IEP device data */
struct enetc_si {
@@ -297,7 +314,6 @@ struct psfp_cap {
};
#define ENETC_F_TX_TSTAMP_MASK 0xff
-/* TODO: more hardware offloads */
enum enetc_active_offloads {
/* 8 bits reserved for TX timestamp types (hwtstamp_tx_types) */
ENETC_F_TX_TSTAMP = BIT(0),
@@ -306,6 +322,7 @@ enum enetc_active_offloads {
ENETC_F_RX_TSTAMP = BIT(8),
ENETC_F_QBV = BIT(9),
ENETC_F_QCI = BIT(10),
+ ENETC_F_QBU = BIT(11),
};
enum enetc_flags_bit {
@@ -345,11 +362,16 @@ struct enetc_ndev_priv {
struct enetc_bdr **xdp_tx_ring;
struct enetc_bdr *tx_ring[16];
struct enetc_bdr *rx_ring[16];
+ const struct enetc_bdr_resource *tx_res;
+ const struct enetc_bdr_resource *rx_res;
struct enetc_cls_rule *cls_rules;
struct psfp_cap psfp_cap;
+ /* Minimum number of TX queues required by the network stack */
+ unsigned int min_num_stack_tx_queues;
+
struct phylink *phylink;
int ic_mode;
u32 tx_ictt;
@@ -360,6 +382,11 @@ struct enetc_ndev_priv {
struct work_struct tx_onestep_tstamp;
struct sk_buff_head tx_skbs;
+
+ /* Serialize access to MAC Merge state between ethtool requests
+ * and link state updates
+ */
+ struct mutex mm_lock;
};
/* Messaging */
@@ -378,6 +405,8 @@ struct enetc_msg_cmd_set_primary_mac {
extern int enetc_phc_index;
/* SI common */
+u32 enetc_port_mac_rd(struct enetc_si *si, u32 reg);
+void enetc_port_mac_wr(struct enetc_si *si, u32 reg, u32 val);
int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv);
void enetc_pci_remove(struct pci_dev *pdev);
int enetc_alloc_msix(struct enetc_ndev_priv *priv);
@@ -397,12 +426,13 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev);
void enetc_set_features(struct net_device *ndev, netdev_features_t features);
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd);
int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data);
-int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp);
+int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
struct xdp_frame **frames, u32 flags);
/* ethtool */
void enetc_set_ethtool_ops(struct net_device *ndev);
+void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link);
/* control buffer descriptor ring (CBDR) */
int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
index af68dc46a795..20bfdf7fb4b4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
@@ -44,6 +44,7 @@ int enetc_setup_cbdr(struct device *dev, struct enetc_hw *hw, int bd_count,
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_setup_cbdr);
void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
{
@@ -57,6 +58,7 @@ void enetc_teardown_cbdr(struct enetc_cbdr *cbdr)
cbdr->bd_base = NULL;
cbdr->dma_dev = NULL;
}
+EXPORT_SYMBOL_GPL(enetc_teardown_cbdr);
static void enetc_clean_cbdr(struct enetc_cbdr *ring)
{
@@ -127,6 +129,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd)
return 0;
}
+EXPORT_SYMBOL_GPL(enetc_send_cmd);
int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
{
@@ -140,6 +143,7 @@ int enetc_clear_mac_flt_entry(struct enetc_si *si, int index)
return enetc_send_cmd(si, &cbd);
}
+EXPORT_SYMBOL_GPL(enetc_clear_mac_flt_entry);
int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
char *mac_addr, int si_map)
@@ -165,6 +169,7 @@ int enetc_set_mac_flt_entry(struct enetc_si *si, int index,
return enetc_send_cmd(si, &cbd);
}
+EXPORT_SYMBOL_GPL(enetc_set_mac_flt_entry);
/* Set entry in RFS table */
int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
@@ -197,6 +202,7 @@ int enetc_set_fs_entry(struct enetc_si *si, struct enetc_cmd_rfse *rfse,
return err;
}
+EXPORT_SYMBOL_GPL(enetc_set_fs_entry);
static int enetc_cmd_rss_table(struct enetc_si *si, u32 *table, int count,
bool read)
@@ -242,9 +248,11 @@ int enetc_get_rss_table(struct enetc_si *si, u32 *table, int count)
{
return enetc_cmd_rss_table(si, table, count, true);
}
+EXPORT_SYMBOL_GPL(enetc_get_rss_table);
/* Set RSS table */
int enetc_set_rss_table(struct enetc_si *si, const u32 *table, int count)
{
return enetc_cmd_rss_table(si, (u32 *)table, count, false);
}
+EXPORT_SYMBOL_GPL(enetc_set_rss_table);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index c8369e3752b0..bca68edfbe9c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2017-2019 NXP */
+#include <linux/ethtool_netlink.h>
#include <linux/net_tstamp.h>
#include <linux/module.h>
#include "enetc.h"
@@ -197,7 +198,6 @@ static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
"Rx ring %2d recycle failures",
"Rx ring %2d redirects",
"Rx ring %2d redirect failures",
- "Rx ring %2d redirect S/G",
};
static const char tx_ring_stats[][ETH_GSTRING_LEN] = {
@@ -291,7 +291,6 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = priv->rx_ring[i]->stats.recycle_failures;
data[o++] = priv->rx_ring[i]->stats.xdp_redirect;
data[o++] = priv->rx_ring[i]->stats.xdp_redirect_failures;
- data[o++] = priv->rx_ring[i]->stats.xdp_redirect_sg;
}
if (!enetc_si_is_pf(priv->si))
@@ -301,14 +300,32 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
}
+static void enetc_pause_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_pause_stats *pause_stats)
+{
+ pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(mac));
+ pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(mac));
+}
+
static void enetc_get_pause_stats(struct net_device *ndev,
struct ethtool_pause_stats *pause_stats)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(0));
- pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(0));
+ switch (pause_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_pause_stats(hw, 0, pause_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_pause_stats(hw, 1, pause_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_pause_stats(ndev, pause_stats);
+ break;
+ }
}
static void enetc_mac_stats(struct enetc_hw *hw, int mac,
@@ -385,8 +402,20 @@ static void enetc_get_eth_mac_stats(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- enetc_mac_stats(hw, 0, mac_stats);
+ switch (mac_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_mac_stats(hw, 0, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mac_stats(hw, 1, mac_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_mac_stats(ndev, mac_stats);
+ break;
+ }
}
static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
@@ -394,8 +423,20 @@ static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- enetc_ctrl_stats(hw, 0, ctrl_stats);
+ switch (ctrl_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_ctrl_stats(hw, 0, ctrl_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_ctrl_stats(hw, 1, ctrl_stats);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_ctrl_stats(ndev, ctrl_stats);
+ break;
+ }
}
static void enetc_get_rmon_stats(struct net_device *ndev,
@@ -404,8 +445,20 @@ static void enetc_get_rmon_stats(struct net_device *ndev,
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
- enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+ switch (rmon_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_rmon_stats(hw, 1, rmon_stats, ranges);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ ethtool_aggregate_rmon_stats(ndev, rmon_stats);
+ break;
+ }
}
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
@@ -651,6 +704,7 @@ void enetc_set_rss_key(struct enetc_hw *hw, const u8 *bytes)
for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / 4; i++)
enetc_port_wr(hw, ENETC_PRSSK(i), ((u32 *)bytes)[i]);
}
+EXPORT_SYMBOL_GPL(enetc_set_rss_key);
static int enetc_set_rxfh(struct net_device *ndev, const u32 *indir,
const u8 *key, const u8 hfunc)
@@ -864,6 +918,166 @@ static int enetc_set_link_ksettings(struct net_device *dev,
return phylink_ethtool_ksettings_set(priv->phylink, cmd);
}
+static void enetc_get_mm_stats(struct net_device *ndev,
+ struct ethtool_mm_stats *s)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return;
+
+ s->MACMergeFrameAssErrorCount = enetc_port_rd(hw, ENETC_MMFAECR);
+ s->MACMergeFrameSmdErrorCount = enetc_port_rd(hw, ENETC_MMFSECR);
+ s->MACMergeFrameAssOkCount = enetc_port_rd(hw, ENETC_MMFAOCR);
+ s->MACMergeFragCountRx = enetc_port_rd(hw, ENETC_MMFCRXR);
+ s->MACMergeFragCountTx = enetc_port_rd(hw, ENETC_MMFCTXR);
+ s->MACMergeHoldCount = enetc_port_rd(hw, ENETC_MMHCR);
+}
+
+static int enetc_get_mm(struct net_device *ndev, struct ethtool_mm_state *state)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &si->hw;
+ u32 lafs, rafs, val;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_PFPMR);
+ state->pmac_enabled = !!(val & ENETC_PFPMR_PMACE);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ switch (ENETC_MMCSR_GET_VSTS(val)) {
+ case 0:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ break;
+ case 2:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+ break;
+ case 3:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
+ break;
+ case 4:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_FAILED;
+ break;
+ case 5:
+ default:
+ state->verify_status = ETHTOOL_MM_VERIFY_STATUS_UNKNOWN;
+ break;
+ }
+
+ rafs = ENETC_MMCSR_GET_RAFS(val);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(rafs);
+ lafs = ENETC_MMCSR_GET_LAFS(val);
+ state->rx_min_frag_size = ethtool_mm_frag_size_add_to_min(lafs);
+ state->tx_enabled = !!(val & ENETC_MMCSR_LPE); /* mirror of MMCSR_ME */
+ state->tx_active = !!(val & ENETC_MMCSR_LPA);
+ state->verify_enabled = !(val & ENETC_MMCSR_VDIS);
+ state->verify_time = ENETC_MMCSR_GET_VT(val);
+ /* A verifyTime of 128 ms would exceed the 7 bit width
+ * of the ENETC_MMCSR_VT field
+ */
+ state->max_verify_time = 127;
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+static int enetc_set_mm(struct net_device *ndev, struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
+ u32 val, add_frag_size;
+ int err;
+
+ if (!(si->hw_features & ENETC_SI_F_QBU))
+ return -EOPNOTSUPP;
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &add_frag_size, extack);
+ if (err)
+ return err;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_PFPMR);
+ if (cfg->pmac_enabled)
+ val |= ENETC_PFPMR_PMACE;
+ else
+ val &= ~ENETC_PFPMR_PMACE;
+ enetc_port_wr(hw, ENETC_PFPMR, val);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ if (cfg->verify_enabled)
+ val &= ~ENETC_MMCSR_VDIS;
+ else
+ val |= ENETC_MMCSR_VDIS;
+
+ if (cfg->tx_enabled)
+ priv->active_offloads |= ENETC_F_QBU;
+ else
+ priv->active_offloads &= ~ENETC_F_QBU;
+
+ /* If link is up, enable MAC Merge right away */
+ if (!!(priv->active_offloads & ENETC_F_QBU) &&
+ !(val & ENETC_MMCSR_LINK_FAIL))
+ val |= ENETC_MMCSR_ME;
+
+ val &= ~ENETC_MMCSR_VT_MASK;
+ val |= ENETC_MMCSR_VT(cfg->verify_time);
+
+ val &= ~ENETC_MMCSR_RAFS_MASK;
+ val |= ENETC_MMCSR_RAFS(add_frag_size);
+
+ enetc_port_wr(hw, ENETC_MMCSR, val);
+
+ mutex_unlock(&priv->mm_lock);
+
+ return 0;
+}
+
+/* When the link is lost, the verification state machine goes to the FAILED
+ * state and doesn't restart on its own after a new link up event.
+ * According to 802.3 Figure 99-8 - Verify state diagram, the LINK_FAIL bit
+ * should have been sufficient to re-trigger verification, but for ENETC it
+ * doesn't. As a workaround, we need to toggle the Merge Enable bit to
+ * re-trigger verification when link comes up.
+ */
+void enetc_mm_link_state_update(struct enetc_ndev_priv *priv, bool link)
+{
+ struct enetc_hw *hw = &priv->si->hw;
+ u32 val;
+
+ mutex_lock(&priv->mm_lock);
+
+ val = enetc_port_rd(hw, ENETC_MMCSR);
+
+ if (link) {
+ val &= ~ENETC_MMCSR_LINK_FAIL;
+ if (priv->active_offloads & ENETC_F_QBU)
+ val |= ENETC_MMCSR_ME;
+ } else {
+ val |= ENETC_MMCSR_LINK_FAIL;
+ if (priv->active_offloads & ENETC_F_QBU)
+ val &= ~ENETC_MMCSR_ME;
+ }
+
+ enetc_port_wr(hw, ENETC_MMCSR, val);
+
+ mutex_unlock(&priv->mm_lock);
+}
+EXPORT_SYMBOL_GPL(enetc_mm_link_state_update);
+
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -894,6 +1108,9 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.set_wol = enetc_set_wol,
.get_pauseparam = enetc_get_pauseparam,
.set_pauseparam = enetc_set_pauseparam,
+ .get_mm = enetc_get_mm,
+ .set_mm = enetc_set_mm,
+ .get_mm_stats = enetc_get_mm_stats,
};
static const struct ethtool_ops enetc_vf_ethtool_ops = {
@@ -926,3 +1143,4 @@ void enetc_set_ethtool_ops(struct net_device *ndev)
else
ndev->ethtool_ops = &enetc_vf_ethtool_ops;
}
+EXPORT_SYMBOL_GPL(enetc_set_ethtool_ops);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 18ca1f42b1f7..de2e0ee8cdcb 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -18,9 +18,10 @@
#define ENETC_SICTR0 0x18
#define ENETC_SICTR1 0x1c
#define ENETC_SIPCAPR0 0x20
-#define ENETC_SIPCAPR0_QBV BIT(4)
#define ENETC_SIPCAPR0_PSFP BIT(9)
#define ENETC_SIPCAPR0_RSS BIT(8)
+#define ENETC_SIPCAPR0_QBV BIT(4)
+#define ENETC_SIPCAPR0_QBU BIT(3)
#define ENETC_SIPCAPR1 0x24
#define ENETC_SITGTGR 0x30
#define ENETC_SIRBGCR 0x38
@@ -213,7 +214,6 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSIRFSCFGR(n) (0x1814 + (n) * 4) /* n = SI index */
#define ENETC_PFPMR 0x1900
#define ENETC_PFPMR_PMACE BIT(1)
-#define ENETC_PFPMR_MWLM BIT(0)
#define ENETC_EMDIO_BASE 0x1c00
#define ENETC_PSIUMHFR0(n, err) (((err) ? 0x1d08 : 0x1d00) + (n) * 0x10)
#define ENETC_PSIUMHFR1(n) (0x1d04 + (n) * 0x10)
@@ -222,11 +222,35 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PSIVHFR0(n) (0x1e00 + (n) * 8) /* n = SI index */
#define ENETC_PSIVHFR1(n) (0x1e04 + (n) * 8) /* n = SI index */
#define ENETC_MMCSR 0x1f00
-#define ENETC_MMCSR_ME BIT(16)
+#define ENETC_MMCSR_LINK_FAIL BIT(31)
+#define ENETC_MMCSR_VT_MASK GENMASK(29, 23) /* Verify Time */
+#define ENETC_MMCSR_VT(x) (((x) << 23) & ENETC_MMCSR_VT_MASK)
+#define ENETC_MMCSR_GET_VT(x) (((x) & ENETC_MMCSR_VT_MASK) >> 23)
+#define ENETC_MMCSR_TXSTS_MASK GENMASK(22, 21) /* Merge Status */
+#define ENETC_MMCSR_GET_TXSTS(x) (((x) & ENETC_MMCSR_TXSTS_MASK) >> 21)
+#define ENETC_MMCSR_VSTS_MASK GENMASK(20, 18) /* Verify Status */
+#define ENETC_MMCSR_GET_VSTS(x) (((x) & ENETC_MMCSR_VSTS_MASK) >> 18)
+#define ENETC_MMCSR_VDIS BIT(17) /* Verify Disabled */
+#define ENETC_MMCSR_ME BIT(16) /* Merge Enabled */
+#define ENETC_MMCSR_RAFS_MASK GENMASK(9, 8) /* Remote Additional Fragment Size */
+#define ENETC_MMCSR_RAFS(x) (((x) << 8) & ENETC_MMCSR_RAFS_MASK)
+#define ENETC_MMCSR_GET_RAFS(x) (((x) & ENETC_MMCSR_RAFS_MASK) >> 8)
+#define ENETC_MMCSR_LAFS_MASK GENMASK(4, 3) /* Local Additional Fragment Size */
+#define ENETC_MMCSR_GET_LAFS(x) (((x) & ENETC_MMCSR_LAFS_MASK) >> 3)
+#define ENETC_MMCSR_LPA BIT(2) /* Local Preemption Active */
+#define ENETC_MMCSR_LPE BIT(1) /* Local Preemption Enabled */
+#define ENETC_MMCSR_LPS BIT(0) /* Local Preemption Supported */
+#define ENETC_MMFAECR 0x1f08
+#define ENETC_MMFSECR 0x1f0c
+#define ENETC_MMFAOCR 0x1f10
+#define ENETC_MMFCRXR 0x1f14
+#define ENETC_MMFCTXR 0x1f18
+#define ENETC_MMHCR 0x1f1c
#define ENETC_PTCMSDUR(n) (0x2020 + (n) * 4) /* n = TC index [0..7] */
+#define ENETC_PMAC_OFFSET 0x1000
+
#define ENETC_PM0_CMD_CFG 0x8008
-#define ENETC_PM1_CMD_CFG 0x9008
#define ENETC_PM0_TX_EN BIT(0)
#define ENETC_PM0_RX_EN BIT(1)
#define ENETC_PM0_PROMISC BIT(4)
@@ -245,11 +269,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PM0_PAUSE_QUANTA 0x8054
#define ENETC_PM0_PAUSE_THRESH 0x8064
-#define ENETC_PM1_PAUSE_QUANTA 0x9054
-#define ENETC_PM1_PAUSE_THRESH 0x9064
#define ENETC_PM0_SINGLE_STEP 0x80c0
-#define ENETC_PM1_SINGLE_STEP 0x90c0
#define ENETC_PM0_SINGLE_STEP_CH BIT(7)
#define ENETC_PM0_SINGLE_STEP_EN BIT(31)
#define ENETC_SET_SINGLE_STEP_OFFSET(v) (((v) & 0xff) << 8)
@@ -279,57 +300,57 @@ enum enetc_bdr_type {TX, RX};
/* Port MAC counters: Port MAC 0 corresponds to the eMAC and
* Port MAC 1 to the pMAC.
*/
-#define ENETC_PM_REOCT(mac) (0x8100 + 0x1000 * (mac))
-#define ENETC_PM_RALN(mac) (0x8110 + 0x1000 * (mac))
-#define ENETC_PM_RXPF(mac) (0x8118 + 0x1000 * (mac))
-#define ENETC_PM_RFRM(mac) (0x8120 + 0x1000 * (mac))
-#define ENETC_PM_RFCS(mac) (0x8128 + 0x1000 * (mac))
-#define ENETC_PM_RVLAN(mac) (0x8130 + 0x1000 * (mac))
-#define ENETC_PM_RERR(mac) (0x8138 + 0x1000 * (mac))
-#define ENETC_PM_RUCA(mac) (0x8140 + 0x1000 * (mac))
-#define ENETC_PM_RMCA(mac) (0x8148 + 0x1000 * (mac))
-#define ENETC_PM_RBCA(mac) (0x8150 + 0x1000 * (mac))
-#define ENETC_PM_RDRP(mac) (0x8158 + 0x1000 * (mac))
-#define ENETC_PM_RPKT(mac) (0x8160 + 0x1000 * (mac))
-#define ENETC_PM_RUND(mac) (0x8168 + 0x1000 * (mac))
-#define ENETC_PM_R64(mac) (0x8170 + 0x1000 * (mac))
-#define ENETC_PM_R127(mac) (0x8178 + 0x1000 * (mac))
-#define ENETC_PM_R255(mac) (0x8180 + 0x1000 * (mac))
-#define ENETC_PM_R511(mac) (0x8188 + 0x1000 * (mac))
-#define ENETC_PM_R1023(mac) (0x8190 + 0x1000 * (mac))
-#define ENETC_PM_R1522(mac) (0x8198 + 0x1000 * (mac))
-#define ENETC_PM_R1523X(mac) (0x81A0 + 0x1000 * (mac))
-#define ENETC_PM_ROVR(mac) (0x81A8 + 0x1000 * (mac))
-#define ENETC_PM_RJBR(mac) (0x81B0 + 0x1000 * (mac))
-#define ENETC_PM_RFRG(mac) (0x81B8 + 0x1000 * (mac))
-#define ENETC_PM_RCNP(mac) (0x81C0 + 0x1000 * (mac))
-#define ENETC_PM_RDRNTP(mac) (0x81C8 + 0x1000 * (mac))
-#define ENETC_PM_TEOCT(mac) (0x8200 + 0x1000 * (mac))
-#define ENETC_PM_TOCT(mac) (0x8208 + 0x1000 * (mac))
-#define ENETC_PM_TCRSE(mac) (0x8210 + 0x1000 * (mac))
-#define ENETC_PM_TXPF(mac) (0x8218 + 0x1000 * (mac))
-#define ENETC_PM_TFRM(mac) (0x8220 + 0x1000 * (mac))
-#define ENETC_PM_TFCS(mac) (0x8228 + 0x1000 * (mac))
-#define ENETC_PM_TVLAN(mac) (0x8230 + 0x1000 * (mac))
-#define ENETC_PM_TERR(mac) (0x8238 + 0x1000 * (mac))
-#define ENETC_PM_TUCA(mac) (0x8240 + 0x1000 * (mac))
-#define ENETC_PM_TMCA(mac) (0x8248 + 0x1000 * (mac))
-#define ENETC_PM_TBCA(mac) (0x8250 + 0x1000 * (mac))
-#define ENETC_PM_TPKT(mac) (0x8260 + 0x1000 * (mac))
-#define ENETC_PM_TUND(mac) (0x8268 + 0x1000 * (mac))
-#define ENETC_PM_T64(mac) (0x8270 + 0x1000 * (mac))
-#define ENETC_PM_T127(mac) (0x8278 + 0x1000 * (mac))
-#define ENETC_PM_T255(mac) (0x8280 + 0x1000 * (mac))
-#define ENETC_PM_T511(mac) (0x8288 + 0x1000 * (mac))
-#define ENETC_PM_T1023(mac) (0x8290 + 0x1000 * (mac))
-#define ENETC_PM_T1522(mac) (0x8298 + 0x1000 * (mac))
-#define ENETC_PM_T1523X(mac) (0x82A0 + 0x1000 * (mac))
-#define ENETC_PM_TCNP(mac) (0x82C0 + 0x1000 * (mac))
-#define ENETC_PM_TDFR(mac) (0x82D0 + 0x1000 * (mac))
-#define ENETC_PM_TMCOL(mac) (0x82D8 + 0x1000 * (mac))
-#define ENETC_PM_TSCOL(mac) (0x82E0 + 0x1000 * (mac))
-#define ENETC_PM_TLCOL(mac) (0x82E8 + 0x1000 * (mac))
-#define ENETC_PM_TECOL(mac) (0x82F0 + 0x1000 * (mac))
+#define ENETC_PM_REOCT(mac) (0x8100 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RALN(mac) (0x8110 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RXPF(mac) (0x8118 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFRM(mac) (0x8120 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFCS(mac) (0x8128 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RVLAN(mac) (0x8130 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RERR(mac) (0x8138 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RUCA(mac) (0x8140 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RMCA(mac) (0x8148 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RBCA(mac) (0x8150 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RDRP(mac) (0x8158 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RPKT(mac) (0x8160 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RUND(mac) (0x8168 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R64(mac) (0x8170 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R127(mac) (0x8178 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R255(mac) (0x8180 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R511(mac) (0x8188 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1023(mac) (0x8190 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1522(mac) (0x8198 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_R1523X(mac) (0x81A0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_ROVR(mac) (0x81A8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RJBR(mac) (0x81B0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RFRG(mac) (0x81B8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RCNP(mac) (0x81C0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_RDRNTP(mac) (0x81C8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TEOCT(mac) (0x8200 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TOCT(mac) (0x8208 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TCRSE(mac) (0x8210 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TXPF(mac) (0x8218 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TFRM(mac) (0x8220 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TFCS(mac) (0x8228 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TVLAN(mac) (0x8230 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TERR(mac) (0x8238 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TUCA(mac) (0x8240 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TMCA(mac) (0x8248 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TBCA(mac) (0x8250 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TPKT(mac) (0x8260 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TUND(mac) (0x8268 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T64(mac) (0x8270 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T127(mac) (0x8278 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T255(mac) (0x8280 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T511(mac) (0x8288 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1023(mac) (0x8290 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1522(mac) (0x8298 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_T1523X(mac) (0x82A0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TCNP(mac) (0x82C0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TDFR(mac) (0x82D0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TMCOL(mac) (0x82D8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TSCOL(mac) (0x82E0 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TLCOL(mac) (0x82E8 + ENETC_PMAC_OFFSET * (mac))
+#define ENETC_PM_TECOL(mac) (0x82F0 + ENETC_PMAC_OFFSET * (mac))
/* Port counters */
#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
index 1c8f5cc6dec4..998aaa394e9c 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_mdio.c
@@ -55,7 +55,8 @@ static int enetc_mdio_wait_complete(struct enetc_mdio_priv *mdio_priv)
is_busy, !is_busy, 10, 10 * 1000);
}
-int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+int enetc_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
u32 mdio_ctl, mdio_cfg;
@@ -63,14 +64,39 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
int ret;
mdio_cfg = ENETC_EMDIO_CFG;
- if (regnum & MII_ADDR_C45) {
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_cfg |= MDIO_CFG_ENC45;
- } else {
- /* clause 22 (ie 1G) */
- dev_addr = regnum & 0x1f;
- mdio_cfg &= ~MDIO_CFG_ENC45;
- }
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* set port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
+
+ /* write the value */
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(enetc_mdio_write_c22);
+
+int enetc_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 value)
+{
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ u32 mdio_ctl, mdio_cfg;
+ int ret;
+
+ mdio_cfg = ENETC_EMDIO_CFG;
+ mdio_cfg |= MDIO_CFG_ENC45;
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
@@ -83,13 +109,11 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
- if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(mdio_priv);
- if (ret)
- return ret;
- }
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
/* write the value */
enetc_mdio_wr(mdio_priv, ENETC_MDIO_DATA, value);
@@ -100,9 +124,9 @@ int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
return 0;
}
-EXPORT_SYMBOL_GPL(enetc_mdio_write);
+EXPORT_SYMBOL_GPL(enetc_mdio_write_c45);
-int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+int enetc_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
struct enetc_mdio_priv *mdio_priv = bus->priv;
u32 mdio_ctl, mdio_cfg;
@@ -110,14 +134,51 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
int ret;
mdio_cfg = ENETC_EMDIO_CFG;
- if (regnum & MII_ADDR_C45) {
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_cfg |= MDIO_CFG_ENC45;
- } else {
- dev_addr = regnum & 0x1f;
- mdio_cfg &= ~MDIO_CFG_ENC45;
+ dev_addr = regnum & 0x1f;
+ mdio_cfg &= ~MDIO_CFG_ENC45;
+
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* set port and device addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
+
+ /* initiate the read */
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
+
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
+
+ /* return all Fs if nothing was there */
+ if (enetc_mdio_rd(mdio_priv, ENETC_MDIO_CFG) & MDIO_CFG_RD_ER) {
+ dev_dbg(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
+ return 0xffff;
}
+ value = enetc_mdio_rd(mdio_priv, ENETC_MDIO_DATA) & 0xffff;
+
+ return value;
+}
+EXPORT_SYMBOL_GPL(enetc_mdio_read_c22);
+
+int enetc_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum)
+{
+ struct enetc_mdio_priv *mdio_priv = bus->priv;
+ u32 mdio_ctl, mdio_cfg;
+ u16 value;
+ int ret;
+
+ mdio_cfg = ENETC_EMDIO_CFG;
+ mdio_cfg |= MDIO_CFG_ENC45;
+
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CFG, mdio_cfg);
ret = enetc_mdio_wait_complete(mdio_priv);
@@ -129,13 +190,11 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl);
/* set the register address */
- if (regnum & MII_ADDR_C45) {
- enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
+ enetc_mdio_wr(mdio_priv, ENETC_MDIO_ADDR, regnum & 0xffff);
- ret = enetc_mdio_wait_complete(mdio_priv);
- if (ret)
- return ret;
- }
+ ret = enetc_mdio_wait_complete(mdio_priv);
+ if (ret)
+ return ret;
/* initiate the read */
enetc_mdio_wr(mdio_priv, ENETC_MDIO_CTL, mdio_ctl | MDIO_CTL_READ);
@@ -156,7 +215,7 @@ int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return value;
}
-EXPORT_SYMBOL_GPL(enetc_mdio_read);
+EXPORT_SYMBOL_GPL(enetc_mdio_read_c45);
struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
{
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
index dafb26f81f95..a1b595bd7993 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pci_mdio.c
@@ -39,8 +39,10 @@ static int enetc_pci_mdio_probe(struct pci_dev *pdev,
}
bus->name = ENETC_MDIO_BUS_NAME;
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = hw;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 9f6c4f5c0a6c..7cd22d370caa 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -319,24 +319,23 @@ static int enetc_vlan_rx_del_vid(struct net_device *ndev, __be16 prot, u16 vid)
static void enetc_set_loopback(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
+ struct enetc_si *si = priv->si;
u32 reg;
- reg = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ reg = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
if (reg & ENETC_PM0_IFM_RG) {
/* RGMII mode */
reg = (reg & ~ENETC_PM0_IFM_RLP) |
(en ? ENETC_PM0_IFM_RLP : 0);
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, reg);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, reg);
} else {
/* assume SGMII mode */
- reg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ reg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
reg = (reg & ~ENETC_PM0_CMD_XGLP) |
(en ? ENETC_PM0_CMD_XGLP : 0);
reg = (reg & ~ENETC_PM0_CMD_PHY_TX_EN) |
(en ? ENETC_PM0_CMD_PHY_TX_EN : 0);
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, reg);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, reg);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, reg);
}
}
@@ -538,65 +537,50 @@ void enetc_reset_ptcmsdur(struct enetc_hw *hw)
enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
}
-static void enetc_configure_port_mac(struct enetc_hw *hw)
+static void enetc_configure_port_mac(struct enetc_si *si)
{
- enetc_port_wr(hw, ENETC_PM0_MAXFRM,
- ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+ struct enetc_hw *hw = &si->hw;
- enetc_reset_ptcmsdur(hw);
+ enetc_port_mac_wr(si, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+ enetc_reset_ptcmsdur(hw);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
/* On LS1028A, the MAC RX FIFO defaults to 2, which is too high
* and may lead to RX lock-up under traffic. Set it to 1 instead,
* as recommended by the hardware team.
*/
- enetc_port_wr(hw, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
+ enetc_port_mac_wr(si, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL);
}
-static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
+static void enetc_mac_config(struct enetc_si *si, phy_interface_t phy_mode)
{
u32 val;
if (phy_interface_mode_is_rgmii(phy_mode)) {
- val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
if (phy_mode == PHY_INTERFACE_MODE_USXGMII) {
val = ENETC_PM0_IFM_FULL_DPX | ENETC_PM0_IFM_IFMODE_XGMII;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
}
-static void enetc_mac_enable(struct enetc_hw *hw, bool en)
+static void enetc_mac_enable(struct enetc_si *si, bool en)
{
- u32 val = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ u32 val = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
val &= ~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
val |= en ? (ENETC_PM0_TX_EN | ENETC_PM0_RX_EN) : 0;
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, val);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, val);
-}
-
-static void enetc_configure_port_pmac(struct enetc_hw *hw)
-{
- u32 temp;
-
- /* Set pMAC step lock */
- temp = enetc_port_rd(hw, ENETC_PFPMR);
- enetc_port_wr(hw, ENETC_PFPMR,
- temp | ENETC_PFPMR_PMACE | ENETC_PFPMR_MWLM);
-
- temp = enetc_port_rd(hw, ENETC_MMCSR);
- enetc_port_wr(hw, ENETC_MMCSR, temp | ENETC_MMCSR_ME);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, val);
}
static void enetc_configure_port(struct enetc_pf *pf)
@@ -604,9 +588,7 @@ static void enetc_configure_port(struct enetc_pf *pf)
u8 hash_key[ENETC_RSSHASH_KEY_SIZE];
struct enetc_hw *hw = &pf->si->hw;
- enetc_configure_port_pmac(hw);
-
- enetc_configure_port_mac(hw);
+ enetc_configure_port_mac(pf->si);
enetc_port_si_configure(pf->si);
@@ -825,6 +807,9 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features |= NETIF_F_RXHASH;
ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
if (si->hw_features & ENETC_SI_F_PSFP && !enetc_psfp_enable(priv)) {
priv->active_offloads |= ENETC_F_QCI;
@@ -848,8 +833,10 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
return -ENOMEM;
bus->name = "Freescale ENETC MDIO Bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
mdio_priv = bus->priv;
mdio_priv->hw = &pf->si->hw;
@@ -885,8 +872,10 @@ static int enetc_imdio_create(struct enetc_pf *pf)
return -ENOMEM;
bus->name = "Freescale ENETC internal MDIO Bus";
- bus->read = enetc_mdio_read;
- bus->write = enetc_mdio_write;
+ bus->read = enetc_mdio_read_c22;
+ bus->write = enetc_mdio_write_c22;
+ bus->read_c45 = enetc_mdio_read_c45;
+ bus->write_c45 = enetc_mdio_write_c45;
bus->parent = dev;
bus->phy_mask = ~0;
mdio_priv = bus->priv;
@@ -994,14 +983,14 @@ static void enetc_pl_mac_config(struct phylink_config *config,
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
- enetc_mac_config(&pf->si->hw, state->interface);
+ enetc_mac_config(pf->si, state->interface);
}
-static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
+static void enetc_force_rgmii_mac(struct enetc_si *si, int speed, int duplex)
{
u32 old_val, val;
- old_val = val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
+ old_val = val = enetc_port_mac_rd(si, ENETC_PM0_IF_MODE);
if (speed == SPEED_1000) {
val &= ~ENETC_PM0_IFM_SSP_MASK;
@@ -1022,7 +1011,7 @@ static void enetc_force_rgmii_mac(struct enetc_hw *hw, int speed, int duplex)
if (val == old_val)
return;
- enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
+ enetc_port_mac_wr(si, ENETC_PM0_IF_MODE, val);
}
static void enetc_pl_mac_link_up(struct phylink_config *config,
@@ -1034,6 +1023,7 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
u32 pause_off_thresh = 0, pause_on_thresh = 0;
u32 init_quanta = 0, refresh_quanta = 0;
struct enetc_hw *hw = &pf->si->hw;
+ struct enetc_si *si = pf->si;
struct enetc_ndev_priv *priv;
u32 rbmr, cmd_cfg;
int idx;
@@ -1045,7 +1035,7 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
if (!phylink_autoneg_inband(mode) &&
phy_interface_mode_is_rgmii(interface))
- enetc_force_rgmii_mac(hw, speed, duplex);
+ enetc_force_rgmii_mac(si, speed, duplex);
/* Flow control */
for (idx = 0; idx < priv->num_rx_rings; idx++) {
@@ -1081,24 +1071,24 @@ static void enetc_pl_mac_link_up(struct phylink_config *config,
pause_off_thresh = 1 * ENETC_MAC_MAXFRM_SIZE;
}
- enetc_port_wr(hw, ENETC_PM0_PAUSE_QUANTA, init_quanta);
- enetc_port_wr(hw, ENETC_PM1_PAUSE_QUANTA, init_quanta);
- enetc_port_wr(hw, ENETC_PM0_PAUSE_THRESH, refresh_quanta);
- enetc_port_wr(hw, ENETC_PM1_PAUSE_THRESH, refresh_quanta);
+ enetc_port_mac_wr(si, ENETC_PM0_PAUSE_QUANTA, init_quanta);
+ enetc_port_mac_wr(si, ENETC_PM0_PAUSE_THRESH, refresh_quanta);
enetc_port_wr(hw, ENETC_PPAUONTR, pause_on_thresh);
enetc_port_wr(hw, ENETC_PPAUOFFTR, pause_off_thresh);
- cmd_cfg = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+ cmd_cfg = enetc_port_mac_rd(si, ENETC_PM0_CMD_CFG);
if (rx_pause)
cmd_cfg &= ~ENETC_PM0_PAUSE_IGN;
else
cmd_cfg |= ENETC_PM0_PAUSE_IGN;
- enetc_port_wr(hw, ENETC_PM0_CMD_CFG, cmd_cfg);
- enetc_port_wr(hw, ENETC_PM1_CMD_CFG, cmd_cfg);
+ enetc_port_mac_wr(si, ENETC_PM0_CMD_CFG, cmd_cfg);
+
+ enetc_mac_enable(si, true);
- enetc_mac_enable(hw, true);
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mm_link_state_update(priv, true);
}
static void enetc_pl_mac_link_down(struct phylink_config *config,
@@ -1106,8 +1096,15 @@ static void enetc_pl_mac_link_down(struct phylink_config *config,
phy_interface_t interface)
{
struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_si *si = pf->si;
+ struct enetc_ndev_priv *priv;
- enetc_mac_enable(&pf->si->hw, false);
+ priv = netdev_priv(si->ndev);
+
+ if (si->hw_features & ENETC_SI_F_QBU)
+ enetc_mm_link_state_update(priv, false);
+
+ enetc_mac_enable(si, false);
}
static const struct phylink_mac_ops enetc_mac_phylink_ops = {
@@ -1300,6 +1297,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
priv = netdev_priv(ndev);
+ mutex_init(&priv->mm_lock);
+
enetc_init_si_rings_params(priv);
err = enetc_alloc_si_resources(priv);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index fcebb54224c0..130ebf6853e6 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -136,29 +136,21 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
{
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct enetc_hw *hw = &priv->si->hw;
- struct enetc_bdr *tx_ring;
- int err;
- int i;
+ int err, i;
/* TSD and Qbv are mutually exclusive in hardware */
for (i = 0; i < priv->num_tx_rings; i++)
if (priv->tx_ring[i]->tsd_enable)
return -EBUSY;
- for (i = 0; i < priv->num_tx_rings; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = taprio->enable ? i : 0;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
- }
+ err = enetc_setup_tc_mqprio(ndev, &taprio->mqprio);
+ if (err)
+ return err;
err = enetc_setup_taprio(ndev, taprio);
if (err) {
- for (i = 0; i < priv->num_tx_rings; i++) {
- tx_ring = priv->tx_ring[i];
- tx_ring->prio = taprio->enable ? 0 : i;
- enetc_set_bdr_prio(hw, tx_ring->index, tx_ring->prio);
- }
+ taprio->mqprio.qopt.num_tc = 0;
+ enetc_setup_tc_mqprio(ndev, &taprio->mqprio);
}
return err;
@@ -1611,6 +1603,13 @@ int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
struct enetc_si *si = priv->si;
switch (base->type) {
+ case TC_SETUP_QDISC_MQPRIO: {
+ struct tc_mqprio_caps *caps = base->caps;
+
+ caps->validate_queue_counts = true;
+
+ return 0;
+ }
case TC_SETUP_QDISC_TAPRIO: {
struct tc_taprio_caps *caps = base->caps;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2341597408d1..c73e25f8995e 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -56,12 +56,12 @@
#include <linux/fec.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/gpio/consumer.h>
#include <linux/prefetch.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -1987,47 +1987,74 @@ static int fec_enet_mdio_wait(struct fec_enet_private *fep)
return ret;
}
-static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
int ret = 0, frame_start, frame_addr, frame_op;
- bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- if (is_c45) {
- frame_start = FEC_MMFR_ST_C45;
+ /* C22 read */
+ frame_op = FEC_MMFR_OP_READ;
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
- /* write address */
- frame_addr = (regnum >> 16);
- writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | (regnum & 0xFFFF),
- fep->hwp + FEC_MII_DATA);
+ /* start a read op */
+ writel(frame_start | frame_op |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
- /* wait for end of transfer */
- ret = fec_enet_mdio_wait(fep);
- if (ret) {
- netdev_err(fep->netdev, "MDIO address write timeout\n");
- goto out;
- }
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO read timeout\n");
+ goto out;
+ }
- frame_op = FEC_MMFR_OP_READ_C45;
+ ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
- } else {
- /* C22 read */
- frame_op = FEC_MMFR_OP_READ;
- frame_start = FEC_MMFR_ST;
- frame_addr = regnum;
+out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum)
+{
+ struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
+ int ret = 0, frame_start, frame_op;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ frame_start = FEC_MMFR_ST_C45;
+
+ /* write address */
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ goto out;
}
+ frame_op = FEC_MMFR_OP_READ_C45;
+
/* start a read op */
writel(frame_start | frame_op |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
ret = fec_enet_mdio_wait(fep);
@@ -2045,45 +2072,69 @@ out:
return ret;
}
-static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
+static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
{
struct fec_enet_private *fep = bus->priv;
struct device *dev = &fep->pdev->dev;
int ret, frame_start, frame_addr;
- bool is_c45 = !!(regnum & MII_ADDR_C45);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
return ret;
- if (is_c45) {
- frame_start = FEC_MMFR_ST_C45;
+ /* C22 write */
+ frame_start = FEC_MMFR_ST;
+ frame_addr = regnum;
+
+ /* start a write op */
+ writel(frame_start | FEC_MMFR_OP_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
+
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret)
+ netdev_err(fep->netdev, "MDIO write timeout\n");
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
+ int devad, int regnum, u16 value)
+{
+ struct fec_enet_private *fep = bus->priv;
+ struct device *dev = &fep->pdev->dev;
+ int ret, frame_start;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ return ret;
+
+ frame_start = FEC_MMFR_ST_C45;
- /* write address */
- frame_addr = (regnum >> 16);
- writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | (regnum & 0xFFFF),
- fep->hwp + FEC_MII_DATA);
+ /* write address */
+ writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | (regnum & 0xFFFF),
+ fep->hwp + FEC_MII_DATA);
- /* wait for end of transfer */
- ret = fec_enet_mdio_wait(fep);
- if (ret) {
- netdev_err(fep->netdev, "MDIO address write timeout\n");
- goto out;
- }
- } else {
- /* C22 write */
- frame_start = FEC_MMFR_ST;
- frame_addr = regnum;
+ /* wait for end of transfer */
+ ret = fec_enet_mdio_wait(fep);
+ if (ret) {
+ netdev_err(fep->netdev, "MDIO address write timeout\n");
+ goto out;
}
/* start a write op */
writel(frame_start | FEC_MMFR_OP_WRITE |
- FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
- FEC_MMFR_TA | FEC_MMFR_DATA(value),
- fep->hwp + FEC_MII_DATA);
+ FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
+ FEC_MMFR_TA | FEC_MMFR_DATA(value),
+ fep->hwp + FEC_MII_DATA);
/* wait for end of transfer */
ret = fec_enet_mdio_wait(fep);
@@ -2381,8 +2432,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
}
fep->mii_bus->name = "fec_enet_mii_bus";
- fep->mii_bus->read = fec_enet_mdio_read;
- fep->mii_bus->write = fec_enet_mdio_write;
+ fep->mii_bus->read = fec_enet_mdio_read_c22;
+ fep->mii_bus->write = fec_enet_mdio_write_c22;
+ fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
+ fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
pdev->name, fep->dev_id + 1);
fep->mii_bus->priv = fep;
@@ -3982,10 +4035,10 @@ free_queue_mem:
#ifdef CONFIG_OF
static int fec_reset_phy(struct platform_device *pdev)
{
- int err, phy_reset;
- bool active_high = false;
+ struct gpio_desc *phy_reset;
int msec = 1, phy_post_delay = 0;
struct device_node *np = pdev->dev.of_node;
+ int err;
if (!np)
return 0;
@@ -3995,33 +4048,26 @@ static int fec_reset_phy(struct platform_device *pdev)
if (!err && msec > 1000)
msec = 1;
- phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
- if (phy_reset == -EPROBE_DEFER)
- return phy_reset;
- else if (!gpio_is_valid(phy_reset))
- return 0;
-
err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
/* valid reset duration should be less than 1s */
if (!err && phy_post_delay > 1000)
return -EINVAL;
- active_high = of_property_read_bool(np, "phy-reset-active-high");
+ phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(phy_reset))
+ return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
+ "failed to get phy-reset-gpios\n");
- err = devm_gpio_request_one(&pdev->dev, phy_reset,
- active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- "phy-reset");
- if (err) {
- dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
- return err;
- }
+ if (!phy_reset)
+ return 0;
if (msec > 20)
msleep(msec);
else
usleep_range(msec * 1000, msec * 1000 + 1000);
- gpio_set_value_cansleep(phy_reset, !active_high);
+ gpiod_set_value_cansleep(phy_reset, 0);
if (!phy_post_delay)
return 0;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 9349f841bd06..625c79d5636f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1055,6 +1055,9 @@ static struct phylink_pcs *memac_pcs_create(struct device_node *mac_node,
return ERR_PTR(-EPROBE_DEFER);
pcs = lynx_pcs_create(mdiodev);
+ if (!pcs)
+ mdio_device_free(mdiodev);
+
return pcs;
}
@@ -1152,13 +1155,12 @@ int memac_initialization(struct mac_device *mac_dev,
else
memac->sgmii_pcs = pcs;
- memac->serdes = devm_of_phy_get(mac_dev->dev, mac_node, "serdes");
- err = PTR_ERR(memac->serdes);
- if (err == -ENODEV || err == -ENOSYS) {
+ memac->serdes = devm_of_phy_optional_get(mac_dev->dev, mac_node,
+ "serdes");
+ if (!memac->serdes) {
dev_dbg(mac_dev->dev, "could not get (optional) serdes\n");
- memac->serdes = NULL;
} else if (IS_ERR(memac->serdes)) {
- dev_err_probe(mac_dev->dev, err, "could not get serdes\n");
+ err = PTR_ERR(memac->serdes);
goto _return_fm_mac_free;
}
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index d7d39a58cd80..a13b4ba4d6e1 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -128,30 +128,49 @@ static int xgmac_wait_until_done(struct device *dev,
return 0;
}
-/*
- * Write value to the PHY for this device to the register at regnum,waiting
- * until the write is done before it returns. All PHY configuration has to be
- * done through the TSEC1 MIIM regs.
- */
-static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value)
+static int xgmac_mdio_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
{
struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
- uint16_t dev_addr;
+ bool endian = priv->is_little_endian;
+ u16 dev_addr = regnum & 0x1f;
u32 mdio_ctl, mdio_stat;
int ret;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
+ mdio_stat &= ~MDIO_STAT_ENC;
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the port and dev addr */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
+
+ /* Write the value to the register */
+ xgmac_write32(MDIO_DATA(value), &regs->mdio_data, endian);
+
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int xgmac_mdio_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum, u16 value)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
bool endian = priv->is_little_endian;
+ u32 mdio_ctl, mdio_stat;
+ int ret;
mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
- if (regnum & MII_ADDR_C45) {
- /* Clause 45 (ie 10G) */
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_stat |= MDIO_STAT_ENC;
- } else {
- /* Clause 22 (ie 1G) */
- dev_addr = regnum & 0x1f;
- mdio_stat &= ~MDIO_STAT_ENC;
- }
+ mdio_stat |= MDIO_STAT_ENC;
xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
@@ -164,13 +183,11 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
/* Set the register address */
- if (regnum & MII_ADDR_C45) {
- xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
- ret = xgmac_wait_until_free(&bus->dev, regs, endian);
- if (ret)
- return ret;
- }
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
/* Write the value to the register */
xgmac_write32(MDIO_DATA(value), &regs->mdio_data, endian);
@@ -182,31 +199,82 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val
return 0;
}
-/*
- * Reads from register regnum in the PHY for device dev, returning the value.
+/* Reads from register regnum in the PHY for device dev, returning the value.
* Clears miimcom first. All PHY configuration has to be done through the
* TSEC1 MIIM regs.
*/
-static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+static int xgmac_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ bool endian = priv->is_little_endian;
+ u16 dev_addr = regnum & 0x1f;
unsigned long flags;
- uint16_t dev_addr;
uint32_t mdio_stat;
uint32_t mdio_ctl;
int ret;
- bool endian = priv->is_little_endian;
mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
- if (regnum & MII_ADDR_C45) {
- dev_addr = (regnum >> 16) & 0x1f;
- mdio_stat |= MDIO_STAT_ENC;
+ mdio_stat &= ~MDIO_STAT_ENC;
+ xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
+
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
+
+ /* Set the Port and Device Addrs */
+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr);
+ xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
+
+ if (priv->has_a009885)
+ /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we
+ * must read back the data register within 16 MDC cycles.
+ */
+ local_irq_save(flags);
+
+ /* Initiate the read */
+ xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
+
+ ret = xgmac_wait_until_done(&bus->dev, regs, endian);
+ if (ret)
+ goto irq_restore;
+
+ /* Return all Fs if nothing was there */
+ if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
+ !priv->has_a011043) {
+ dev_dbg(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
+ ret = 0xffff;
} else {
- dev_addr = regnum & 0x1f;
- mdio_stat &= ~MDIO_STAT_ENC;
+ ret = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", ret);
}
+irq_restore:
+ if (priv->has_a009885)
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+/* Reads from register regnum in the PHY for device dev, returning the value.
+ * Clears miimcom first. All PHY configuration has to be done through the
+ * TSEC1 MIIM regs.
+ */
+static int xgmac_mdio_read_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+ int regnum)
+{
+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ bool endian = priv->is_little_endian;
+ u32 mdio_stat, mdio_ctl;
+ unsigned long flags;
+ int ret;
+
+ mdio_stat = xgmac_read32(&regs->mdio_stat, endian);
+ mdio_stat |= MDIO_STAT_ENC;
+
xgmac_write32(mdio_stat, &regs->mdio_stat, endian);
ret = xgmac_wait_until_free(&bus->dev, regs, endian);
@@ -218,13 +286,11 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
xgmac_write32(mdio_ctl, &regs->mdio_ctl, endian);
/* Set the register address */
- if (regnum & MII_ADDR_C45) {
- xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
+ xgmac_write32(regnum & 0xffff, &regs->mdio_addr, endian);
- ret = xgmac_wait_until_free(&bus->dev, regs, endian);
- if (ret)
- return ret;
- }
+ ret = xgmac_wait_until_free(&bus->dev, regs, endian);
+ if (ret)
+ return ret;
if (priv->has_a009885)
/* Once the operation completes, i.e. MDIO_STAT_BSY clears, we
@@ -326,10 +392,11 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
return -ENOMEM;
bus->name = "Freescale XGMAC MDIO Bus";
- bus->read = xgmac_mdio_read;
- bus->write = xgmac_mdio_write;
+ bus->read = xgmac_mdio_read_c22;
+ bus->write = xgmac_mdio_write_c22;
+ bus->read_c45 = xgmac_mdio_read_c45;
+ bus->write_c45 = xgmac_mdio_write_c45;
bus->parent = &pdev->dev;
- bus->probe_capabilities = MDIOBUS_C22_C45;
snprintf(bus->id, MII_BUS_ID_SIZE, "%pa", &res->start);
priv = bus->priv;
diff --git a/drivers/net/ethernet/fungible/funeth/Kconfig b/drivers/net/ethernet/fungible/funeth/Kconfig
index c72ad9386400..e742e7663449 100644
--- a/drivers/net/ethernet/fungible/funeth/Kconfig
+++ b/drivers/net/ethernet/fungible/funeth/Kconfig
@@ -5,7 +5,7 @@
config FUN_ETH
tristate "Fungible Ethernet device driver"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
select FUN_CORE
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index b4cce30e526a..df86770731ad 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -1160,6 +1160,11 @@ static int fun_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
WRITE_ONCE(rxqs[i]->xdp_prog, prog);
}
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+
dev->max_mtu = prog ? XDP_MAX_MTU : FUN_MAX_MTU;
old_prog = xchg(&fp->xdp_prog, prog);
if (old_prog)
@@ -1765,6 +1770,7 @@ static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->mpls_features = netdev->vlan_features;
netdev->hw_enc_features = netdev->hw_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = FUN_MAX_MTU;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 5b40f9c53196..07111c241e0e 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -327,7 +327,6 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
static int gve_alloc_notify_blocks(struct gve_priv *priv)
{
int num_vecs_requested = priv->num_ntfy_blks + 1;
- char *name = priv->dev->name;
unsigned int active_cpus;
int vecs_enabled;
int i, j;
@@ -371,8 +370,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
active_cpus = min_t(int, priv->num_ntfy_blks / 2, num_online_cpus());
/* Setup Management Vector - the last vector */
- snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "%s-mgmnt",
- name);
+ snprintf(priv->mgmt_msix_name, sizeof(priv->mgmt_msix_name), "gve-mgmnt@pci:%s",
+ pci_name(priv->pdev));
err = request_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector,
gve_mgmnt_intr, 0, priv->mgmt_msix_name, priv);
if (err) {
@@ -401,8 +400,8 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
struct gve_notify_block *block = &priv->ntfy_blocks[i];
int msix_idx = i;
- snprintf(block->name, sizeof(block->name), "%s-ntfy-block.%d",
- name, i);
+ snprintf(block->name, sizeof(block->name), "gve-ntfy-blk%d@pci:%s",
+ i, pci_name(priv->pdev));
block->priv = priv;
err = request_irq(priv->msix_vectors[msix_idx].vector,
gve_is_gqi(priv) ? gve_intr : gve_intr_dqo,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 740850b64aff..5df19c604d09 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -554,11 +554,11 @@ static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
argv4.package.count = 1;
argv4.package.elements = &obj_args;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
- &hns_dsaf_acpi_dsm_guid, 0,
- HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
-
- if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_GET_PORT_TYPE_FUNC, &argv4,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
return phy_if;
phy_if = obj->integer.value ?
@@ -601,11 +601,11 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
argv4.package.count = 1;
argv4.package.elements = &obj_args;
- obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
- &hns_dsaf_acpi_dsm_guid, 0,
- HNS_OP_GET_SFP_STAT_FUNC, &argv4);
-
- if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ obj = acpi_evaluate_dsm_typed(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_GET_SFP_STAT_FUNC, &argv4,
+ ACPI_TYPE_INTEGER);
+ if (!obj)
return -ENODEV;
*sfp_prsnt = obj->integer.value;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 17137de9338c..40f4306449eb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -32,6 +32,7 @@
#include <linux/pkt_sched.h>
#include <linux/types.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#define HNAE3_MOD_VERSION "1.0"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b4c4fb873568..25be7f8ac7cd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -20,6 +20,7 @@
#include <net/gro.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tcp.h>
#include <net/vxlan.h>
#include <net/geneve.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 142415c84c6b..a0b46e7d863e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2018-2019 Hisilicon Limited. */
#include <linux/device.h>
+#include <linux/sched/clock.h>
#include "hclge_debugfs.h"
#include "hclge_err.h"
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index 3d3b69605423..9a939c0b217f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -114,7 +114,6 @@ int hclge_devlink_init(struct hclge_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 6efd768cc07c..3f35227ef1fa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (c) 2016-2017 Hisilicon Limited. */
+#include <linux/sched/clock.h>
+
#include "hclge_err.h"
static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
index a6c3c5e8f0ab..1b535142c65a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -116,7 +116,6 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index c2ae1b4f9a5f..9232caaf0bdc 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -206,7 +206,7 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
}
/**
- * hns_mdio_write - access phy register
+ * hns_mdio_write_c22 - access phy register
* @bus: mdio bus
* @phy_id: phy id
* @regnum: register num
@@ -214,21 +214,19 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
*
* Return 0 on success, negative on failure
*/
-static int hns_mdio_write(struct mii_bus *bus,
- int phy_id, int regnum, u16 data)
+static int hns_mdio_write_c22(struct mii_bus *bus,
+ int phy_id, int regnum, u16 data)
{
- int ret;
struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
- u8 devad = ((regnum >> 16) & 0x1f);
- u8 is_c45 = !!(regnum & MII_ADDR_C45);
u16 reg = (u16)(regnum & 0xffff);
- u8 op;
u16 cmd_reg_cfg;
+ int ret;
+ u8 op;
dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
bus->id, mdio_dev->vbase);
- dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x, write data=%d\n",
- phy_id, is_c45, devad, reg, data);
+ dev_dbg(&bus->dev, "phy id=%d, reg=%#x, write data=%d\n",
+ phy_id, reg, data);
/* wait for ready */
ret = hns_mdio_wait_ready(bus);
@@ -237,58 +235,91 @@ static int hns_mdio_write(struct mii_bus *bus,
return ret;
}
- if (!is_c45) {
- cmd_reg_cfg = reg;
- op = MDIO_C22_WRITE;
- } else {
- /* config the cmd-reg to write addr*/
- MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
- MDIO_ADDR_DATA_S, reg);
+ cmd_reg_cfg = reg;
+ op = MDIO_C22_WRITE;
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_WRITE_ADDR, phy_id, devad);
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
+ MDIO_WDATA_DATA_S, data);
- /* check for read or write opt is finished */
- ret = hns_mdio_wait_ready(bus);
- if (ret) {
- dev_err(&bus->dev, "MDIO bus is busy\n");
- return ret;
- }
+ hns_mdio_cmd_write(mdio_dev, false, op, phy_id, cmd_reg_cfg);
+
+ return 0;
+}
+
+/**
+ * hns_mdio_write_c45 - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @devad: device address to read
+ * @regnum: register num
+ * @data: register value
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_write_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum, u16 data)
+{
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 cmd_reg_cfg;
+ int ret;
+ u8 op;
+
+ dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, devad=%d, reg=%#x, write data=%d\n",
+ phy_id, devad, reg, data);
+
+ /* wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ /* config the cmd-reg to write addr*/
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
- /* config the data needed writing */
- cmd_reg_cfg = devad;
- op = MDIO_C45_WRITE_DATA;
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
}
+ /* config the data needed writing */
+ cmd_reg_cfg = devad;
+ op = MDIO_C45_WRITE_DATA;
+
MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
MDIO_WDATA_DATA_S, data);
- hns_mdio_cmd_write(mdio_dev, is_c45, op, phy_id, cmd_reg_cfg);
+ hns_mdio_cmd_write(mdio_dev, true, op, phy_id, cmd_reg_cfg);
return 0;
}
/**
- * hns_mdio_read - access phy register
+ * hns_mdio_read_c22 - access phy register
* @bus: mdio bus
* @phy_id: phy id
* @regnum: register num
*
* Return phy register value
*/
-static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+static int hns_mdio_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
- int ret;
- u16 reg_val;
- u8 devad = ((regnum >> 16) & 0x1f);
- u8 is_c45 = !!(regnum & MII_ADDR_C45);
- u16 reg = (u16)(regnum & 0xffff);
struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 reg_val;
+ int ret;
dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
bus->id, mdio_dev->vbase);
- dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x!\n",
- phy_id, is_c45, devad, reg);
+ dev_dbg(&bus->dev, "phy id=%d, reg=%#x!\n", phy_id, reg);
/* Step 1: wait for ready */
ret = hns_mdio_wait_ready(bus);
@@ -297,29 +328,74 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
}
- if (!is_c45) {
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C22_READ, phy_id, reg);
- } else {
- MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
- MDIO_ADDR_DATA_S, reg);
+ hns_mdio_cmd_write(mdio_dev, false, MDIO_C22_READ, phy_id, reg);
- /* Step 2; config the cmd-reg to write addr*/
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_WRITE_ADDR, phy_id, devad);
+ /* Step 2: waiting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
- /* Step 3: check for read or write opt is finished */
- ret = hns_mdio_wait_ready(bus);
- if (ret) {
- dev_err(&bus->dev, "MDIO bus is busy\n");
- return ret;
- }
+ reg_val = MDIO_GET_REG_BIT(mdio_dev, MDIO_STA_REG, MDIO_STATE_STA_B);
+ if (reg_val) {
+ dev_err(&bus->dev, " ERROR! MDIO Read failed!\n");
+ return -EBUSY;
+ }
- hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_READ, phy_id, devad);
+ /* Step 3; get out data*/
+ reg_val = (u16)MDIO_GET_REG_FIELD(mdio_dev, MDIO_RDATA_REG,
+ MDIO_RDATA_DATA_M, MDIO_RDATA_DATA_S);
+
+ return reg_val;
+}
+
+/**
+ * hns_mdio_read_c45 - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @devad: device address to read
+ * @regnum: register num
+ *
+ * Return phy register value
+ */
+static int hns_mdio_read_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum)
+{
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u16 reg = (u16)(regnum & 0xffff);
+ u16 reg_val;
+ int ret;
+
+ dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, devad=%d, reg=%#x!\n",
+ phy_id, devad, reg);
+
+ /* Step 1: wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
+
+ /* Step 2; config the cmd-reg to write addr*/
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* Step 3: check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
}
- /* Step 5: waiting for MDIO_COMMAND_REG's mdio_start==0,*/
+ hns_mdio_cmd_write(mdio_dev, true, MDIO_C45_READ, phy_id, devad);
+
+ /* Step 5: waiting for MDIO_COMMAND_REG 's mdio_start==0,*/
/* check for read or write opt is finished */
ret = hns_mdio_wait_ready(bus);
if (ret) {
@@ -438,8 +514,10 @@ static int hns_mdio_probe(struct platform_device *pdev)
}
new_bus->name = MDIO_BUS_NAME;
- new_bus->read = hns_mdio_read;
- new_bus->write = hns_mdio_write;
+ new_bus->read = hns_mdio_read_c22;
+ new_bus->write = hns_mdio_write_c22;
+ new_bus->read_c45 = hns_mdio_read_c45;
+ new_bus->write_c45 = hns_mdio_write_c45;
new_bus->reset = hns_mdio_reset;
new_bus->priv = mdio_dev;
new_bus->parent = &pdev->dev;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index e19a6bb3f444..146ca1d8031b 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -250,10 +250,11 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
struct ibmvnic_sub_crq_queue **rxqs = adapter->rx_scrq;
struct ibmvnic_sub_crq_queue **txqs = adapter->tx_scrq;
struct ibmvnic_sub_crq_queue *queue;
- int num_rxqs = adapter->num_active_rx_scrqs;
- int num_txqs = adapter->num_active_tx_scrqs;
+ int num_rxqs = adapter->num_active_rx_scrqs, i_rxqs = 0;
+ int num_txqs = adapter->num_active_tx_scrqs, i_txqs = 0;
int total_queues, stride, stragglers, i;
unsigned int num_cpu, cpu;
+ bool is_rx_queue;
int rc = 0;
netdev_dbg(adapter->netdev, "%s: Setting irq affinity hints", __func__);
@@ -273,14 +274,24 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
/* next available cpu to assign irq to */
cpu = cpumask_next(-1, cpu_online_mask);
- for (i = 0; i < num_txqs; i++) {
- queue = txqs[i];
+ for (i = 0; i < total_queues; i++) {
+ is_rx_queue = false;
+ /* balance core load by alternating rx and tx assignments
+ * ex: TX0 -> RX0 -> TX1 -> RX1 etc.
+ */
+ if ((i % 2 == 1 && i_rxqs < num_rxqs) || i_txqs == num_txqs) {
+ queue = rxqs[i_rxqs++];
+ is_rx_queue = true;
+ } else {
+ queue = txqs[i_txqs++];
+ }
+
rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
stride);
if (rc)
goto out;
- if (!queue)
+ if (!queue || is_rx_queue)
continue;
rc = __netif_set_xps_queue(adapter->netdev,
@@ -291,14 +302,6 @@ static void ibmvnic_set_affinity(struct ibmvnic_adapter *adapter)
__func__, i, rc);
}
- for (i = 0; i < num_rxqs; i++) {
- queue = rxqs[i];
- rc = ibmvnic_set_queue_affinity(queue, &cpu, &stragglers,
- stride);
- if (rc)
- goto out;
- }
-
out:
if (rc) {
netdev_warn(adapter->netdev,
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 3facb55b7161..a3c84bf05e44 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -337,6 +337,9 @@ config ICE_HWTS
the PTP clock driver precise cross-timestamp ioctl
(PTP_SYS_OFFSET_PRECISE).
+config ICE_GNSS
+ def_bool GNSS = y || GNSS = ICE
+
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 59e82d131d88..721f86fd5802 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -110,9 +110,9 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
static int e1000_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
+ u32 speed, supported, advertising, lp_advertising, lpa_t;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 speed, supported, advertising;
if (hw->phy.media_type == e1000_media_type_copper) {
supported = (SUPPORTED_10baseT_Half |
@@ -120,7 +120,9 @@ static int e1000_get_link_ksettings(struct net_device *netdev,
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full |
+ SUPPORTED_Asym_Pause |
SUPPORTED_Autoneg |
+ SUPPORTED_Pause |
SUPPORTED_TP);
if (hw->phy.type == e1000_phy_ife)
supported &= ~SUPPORTED_1000baseT_Full;
@@ -192,10 +194,16 @@ static int e1000_get_link_ksettings(struct net_device *netdev,
if (hw->phy.media_type != e1000_media_type_copper)
cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ lpa_t = mii_stat1000_to_ethtool_lpa_t(adapter->phy_regs.stat1000);
+ lp_advertising = lpa_t |
+ mii_lpa_to_ethtool_lpa_t(adapter->phy_regs.lpa);
+
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
supported);
ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
return 0;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 04acd1a992fa..e1eb1de88bf9 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7418,9 +7418,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_pci_reg;
- /* AER (Advanced Error Reporting) hooks */
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
/* PCI config space info */
err = pci_save_state(pdev);
@@ -7708,7 +7705,6 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -7775,9 +7771,6 @@ static void e1000_remove(struct pci_dev *pdev)
free_netdev(netdev);
- /* AER disable */
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 060b263348ce..08c3d477dd6f 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2,6 +2,7 @@
/* Copyright(c) 1999 - 2018 Intel Corporation. */
#include "e1000.h"
+#include <linux/ethtool.h>
static s32 e1000_wait_autoneg(struct e1000_hw *hw);
static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
@@ -1011,6 +1012,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg &=
~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ phy->autoneg_advertised &=
+ ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
break;
case e1000_fc_rx_pause:
/* Rx Flow control is enabled, and Tx Flow control is
@@ -1024,6 +1027,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg |=
(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ phy->autoneg_advertised |=
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
break;
case e1000_fc_tx_pause:
/* Tx Flow control is enabled, and Rx Flow control is
@@ -1031,6 +1036,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM;
mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP;
+ phy->autoneg_advertised |= ADVERTISED_Asym_Pause;
+ phy->autoneg_advertised &= ~ADVERTISED_Pause;
break;
case e1000_fc_full:
/* Flow control (both Rx and Tx) is enabled by a software
@@ -1038,6 +1045,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
mii_autoneg_adv_reg |=
(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
+ phy->autoneg_advertised |=
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
break;
default:
e_dbg("Flow control param set incorrectly\n");
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index b473cb7d7c57..027d721feb18 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2127,8 +2127,6 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -2227,7 +2225,6 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -2281,8 +2278,6 @@ static void fm10k_remove(struct pci_dev *pdev)
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 60e351665c70..60ce4d15d82a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -33,6 +33,7 @@
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/udp_tunnel.h>
@@ -176,7 +177,7 @@ enum i40e_interrupt_policy {
struct i40e_lump_tracking {
u16 num_entries;
- u16 list[0];
+ u16 list[];
#define I40E_PILE_VALID_BIT 0x8000
#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
};
@@ -1287,9 +1288,9 @@ void i40e_ptp_stop(struct i40e_pf *pf);
int i40e_ptp_alloc_pins(struct i40e_pf *pf);
int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset);
int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
+int i40e_get_partition_bw_setting(struct i40e_pf *pf);
+int i40e_set_partition_bw_setting(struct i40e_pf *pf);
+int i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 42439f725aa4..86fac8f959bb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -47,9 +47,9 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
* i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
+static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
i40e_mem_atq_ring,
@@ -74,9 +74,9 @@ static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
* i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
+static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;
ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
i40e_mem_arq_ring,
@@ -115,11 +115,11 @@ static void i40e_free_adminq_arq(struct i40e_hw *hw)
* i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
+static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
{
- i40e_status ret_code;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
+ int ret_code;
int i;
/* We'll be allocating the buffer info memory first, then we can
@@ -182,10 +182,10 @@ unwind_alloc_arq_bufs:
* i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
* @hw: pointer to the hardware structure
**/
-static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
+static int i40e_alloc_asq_bufs(struct i40e_hw *hw)
{
- i40e_status ret_code;
struct i40e_dma_mem *bi;
+ int ret_code;
int i;
/* No mapped memory needed yet, just the buffer info structures */
@@ -266,9 +266,9 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
*
* Configure base address and length registers for the transmit queue
**/
-static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
+static int i40e_config_asq_regs(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -295,9 +295,9 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
*
* Configure base address and length registers for the receive (event queue)
**/
-static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
+static int i40e_config_arq_regs(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg = 0;
/* Clear Head and Tail */
@@ -334,9 +334,9 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_asq(struct i40e_hw *hw)
+static int i40e_init_asq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->aq.asq.count > 0) {
/* queue already initialized */
@@ -393,9 +393,9 @@ init_adminq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
**/
-static i40e_status i40e_init_arq(struct i40e_hw *hw)
+static int i40e_init_arq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->aq.arq.count > 0) {
/* queue already initialized */
@@ -445,9 +445,9 @@ init_adminq_exit:
*
* The main shutdown routine for the Admin Send Queue
**/
-static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
+static int i40e_shutdown_asq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&hw->aq.asq_mutex);
@@ -479,9 +479,9 @@ shutdown_asq_out:
*
* The main shutdown routine for the Admin Receive Queue
**/
-static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
+static int i40e_shutdown_arq(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
mutex_lock(&hw->aq.arq_mutex);
@@ -582,12 +582,12 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
* - hw->aq.arq_buf_size
* - hw->aq.asq_buf_size
**/
-i40e_status i40e_init_adminq(struct i40e_hw *hw)
+int i40e_init_adminq(struct i40e_hw *hw)
{
u16 cfg_ptr, oem_hi, oem_lo;
u16 eetrack_lo, eetrack_hi;
- i40e_status ret_code;
int retry = 0;
+ int ret_code;
/* verify input for valid configuration */
if ((hw->aq.num_arq_entries == 0) ||
@@ -780,7 +780,7 @@ static bool i40e_asq_done(struct i40e_hw *hw)
* This is the main send command driver routine for the Admin Queue send
* queue. It runs the queue, cleans the queue, etc
**/
-static i40e_status
+static int
i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -788,12 +788,12 @@ i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context)
{
- i40e_status status = 0;
struct i40e_dma_mem *dma_buff = NULL;
struct i40e_asq_cmd_details *details;
struct i40e_aq_desc *desc_on_ring;
bool cmd_completed = false;
u16 retval = 0;
+ int status = 0;
u32 val = 0;
if (hw->aq.asq.count == 0) {
@@ -984,7 +984,7 @@ asq_send_command_error:
* Acquires the lock and calls the main send command execution
* routine.
**/
-i40e_status
+int
i40e_asq_send_command_atomic(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -992,7 +992,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context)
{
- i40e_status status;
+ int status;
mutex_lock(&hw->aq.asq_mutex);
status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
@@ -1003,7 +1003,7 @@ i40e_asq_send_command_atomic(struct i40e_hw *hw,
return status;
}
-i40e_status
+int
i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
@@ -1026,7 +1026,7 @@ i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
* routine. Returns the last Admin Queue status in aq_status
* to avoid race conditions in access to hw->aq.asq_last_status.
**/
-i40e_status
+int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -1035,7 +1035,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
bool is_atomic_context,
enum i40e_admin_queue_err *aq_status)
{
- i40e_status status;
+ int status;
mutex_lock(&hw->aq.asq_mutex);
status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
@@ -1048,7 +1048,7 @@ i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
return status;
}
-i40e_status
+int
i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
@@ -1084,14 +1084,14 @@ void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
* the contents through e. It can also return how many events are
* left to process through 'pending'
**/
-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *pending)
+int i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
{
- i40e_status ret_code = 0;
u16 ntc = hw->aq.arq.next_to_clean;
struct i40e_aq_desc *desc;
struct i40e_dma_mem *bi;
+ int ret_code = 0;
u16 desc_idx;
u16 datalen;
u16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_alloc.h b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
index cb8689222c8b..a6c9a9e343d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_alloc.h
@@ -20,16 +20,16 @@ enum i40e_memory_type {
};
/* prototype for functions used for dynamic memory allocation */
-i40e_status i40e_allocate_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem,
- enum i40e_memory_type type,
- u64 size, u32 alignment);
-i40e_status i40e_free_dma_mem(struct i40e_hw *hw,
- struct i40e_dma_mem *mem);
-i40e_status i40e_allocate_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem,
- u32 size);
-i40e_status i40e_free_virt_mem(struct i40e_hw *hw,
- struct i40e_virt_mem *mem);
+int i40e_allocate_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem,
+ enum i40e_memory_type type,
+ u64 size, u32 alignment);
+int i40e_free_dma_mem(struct i40e_hw *hw,
+ struct i40e_dma_mem *mem);
+int i40e_allocate_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem,
+ u32 size);
+int i40e_free_virt_mem(struct i40e_hw *hw,
+ struct i40e_virt_mem *mem);
#endif /* _I40E_ALLOC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 10d7a982a5b9..639c5a1ca853 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -541,9 +541,9 @@ static int i40e_client_virtchnl_send(struct i40e_info *ldev,
{
struct i40e_pf *pf = ldev->pf;
struct i40e_hw *hw = &pf->hw;
- i40e_status err;
+ int err;
- err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_IWARP,
+ err = i40e_aq_send_msg_to_vf(hw, vf_id, VIRTCHNL_OP_RDMA,
0, msg, len, NULL);
if (err)
dev_err(&pf->pdev->dev, "Unable to send iWarp message to VF, error %d, aq status %d\n",
@@ -674,7 +674,7 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
bool update = true;
- i40e_status err;
+ int err;
/* TODO: for now do not allow setting VF's VSI setting */
if (is_vf)
@@ -686,8 +686,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (err) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -714,8 +714,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
err = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (err) {
dev_info(&pf->pdev->dev,
- "update VSI ctxt for PE failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "update VSI ctxt for PE failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 8f764ff5c990..ed88e38d488b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -14,9 +14,9 @@
* This function sets the mac type of the adapter based on the
* vendor ID and device ID stored in the hw structure.
**/
-i40e_status i40e_set_mac_type(struct i40e_hw *hw)
+int i40e_set_mac_type(struct i40e_hw *hw)
{
- i40e_status status = 0;
+ int status = 0;
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
@@ -125,154 +125,6 @@ const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err)
}
/**
- * i40e_stat_str - convert status err code to a string
- * @hw: pointer to the HW structure
- * @stat_err: the status error code to convert
- **/
-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err)
-{
- switch (stat_err) {
- case 0:
- return "OK";
- case I40E_ERR_NVM:
- return "I40E_ERR_NVM";
- case I40E_ERR_NVM_CHECKSUM:
- return "I40E_ERR_NVM_CHECKSUM";
- case I40E_ERR_PHY:
- return "I40E_ERR_PHY";
- case I40E_ERR_CONFIG:
- return "I40E_ERR_CONFIG";
- case I40E_ERR_PARAM:
- return "I40E_ERR_PARAM";
- case I40E_ERR_MAC_TYPE:
- return "I40E_ERR_MAC_TYPE";
- case I40E_ERR_UNKNOWN_PHY:
- return "I40E_ERR_UNKNOWN_PHY";
- case I40E_ERR_LINK_SETUP:
- return "I40E_ERR_LINK_SETUP";
- case I40E_ERR_ADAPTER_STOPPED:
- return "I40E_ERR_ADAPTER_STOPPED";
- case I40E_ERR_INVALID_MAC_ADDR:
- return "I40E_ERR_INVALID_MAC_ADDR";
- case I40E_ERR_DEVICE_NOT_SUPPORTED:
- return "I40E_ERR_DEVICE_NOT_SUPPORTED";
- case I40E_ERR_PRIMARY_REQUESTS_PENDING:
- return "I40E_ERR_PRIMARY_REQUESTS_PENDING";
- case I40E_ERR_INVALID_LINK_SETTINGS:
- return "I40E_ERR_INVALID_LINK_SETTINGS";
- case I40E_ERR_AUTONEG_NOT_COMPLETE:
- return "I40E_ERR_AUTONEG_NOT_COMPLETE";
- case I40E_ERR_RESET_FAILED:
- return "I40E_ERR_RESET_FAILED";
- case I40E_ERR_SWFW_SYNC:
- return "I40E_ERR_SWFW_SYNC";
- case I40E_ERR_NO_AVAILABLE_VSI:
- return "I40E_ERR_NO_AVAILABLE_VSI";
- case I40E_ERR_NO_MEMORY:
- return "I40E_ERR_NO_MEMORY";
- case I40E_ERR_BAD_PTR:
- return "I40E_ERR_BAD_PTR";
- case I40E_ERR_RING_FULL:
- return "I40E_ERR_RING_FULL";
- case I40E_ERR_INVALID_PD_ID:
- return "I40E_ERR_INVALID_PD_ID";
- case I40E_ERR_INVALID_QP_ID:
- return "I40E_ERR_INVALID_QP_ID";
- case I40E_ERR_INVALID_CQ_ID:
- return "I40E_ERR_INVALID_CQ_ID";
- case I40E_ERR_INVALID_CEQ_ID:
- return "I40E_ERR_INVALID_CEQ_ID";
- case I40E_ERR_INVALID_AEQ_ID:
- return "I40E_ERR_INVALID_AEQ_ID";
- case I40E_ERR_INVALID_SIZE:
- return "I40E_ERR_INVALID_SIZE";
- case I40E_ERR_INVALID_ARP_INDEX:
- return "I40E_ERR_INVALID_ARP_INDEX";
- case I40E_ERR_INVALID_FPM_FUNC_ID:
- return "I40E_ERR_INVALID_FPM_FUNC_ID";
- case I40E_ERR_QP_INVALID_MSG_SIZE:
- return "I40E_ERR_QP_INVALID_MSG_SIZE";
- case I40E_ERR_QP_TOOMANY_WRS_POSTED:
- return "I40E_ERR_QP_TOOMANY_WRS_POSTED";
- case I40E_ERR_INVALID_FRAG_COUNT:
- return "I40E_ERR_INVALID_FRAG_COUNT";
- case I40E_ERR_QUEUE_EMPTY:
- return "I40E_ERR_QUEUE_EMPTY";
- case I40E_ERR_INVALID_ALIGNMENT:
- return "I40E_ERR_INVALID_ALIGNMENT";
- case I40E_ERR_FLUSHED_QUEUE:
- return "I40E_ERR_FLUSHED_QUEUE";
- case I40E_ERR_INVALID_PUSH_PAGE_INDEX:
- return "I40E_ERR_INVALID_PUSH_PAGE_INDEX";
- case I40E_ERR_INVALID_IMM_DATA_SIZE:
- return "I40E_ERR_INVALID_IMM_DATA_SIZE";
- case I40E_ERR_TIMEOUT:
- return "I40E_ERR_TIMEOUT";
- case I40E_ERR_OPCODE_MISMATCH:
- return "I40E_ERR_OPCODE_MISMATCH";
- case I40E_ERR_CQP_COMPL_ERROR:
- return "I40E_ERR_CQP_COMPL_ERROR";
- case I40E_ERR_INVALID_VF_ID:
- return "I40E_ERR_INVALID_VF_ID";
- case I40E_ERR_INVALID_HMCFN_ID:
- return "I40E_ERR_INVALID_HMCFN_ID";
- case I40E_ERR_BACKING_PAGE_ERROR:
- return "I40E_ERR_BACKING_PAGE_ERROR";
- case I40E_ERR_NO_PBLCHUNKS_AVAILABLE:
- return "I40E_ERR_NO_PBLCHUNKS_AVAILABLE";
- case I40E_ERR_INVALID_PBLE_INDEX:
- return "I40E_ERR_INVALID_PBLE_INDEX";
- case I40E_ERR_INVALID_SD_INDEX:
- return "I40E_ERR_INVALID_SD_INDEX";
- case I40E_ERR_INVALID_PAGE_DESC_INDEX:
- return "I40E_ERR_INVALID_PAGE_DESC_INDEX";
- case I40E_ERR_INVALID_SD_TYPE:
- return "I40E_ERR_INVALID_SD_TYPE";
- case I40E_ERR_MEMCPY_FAILED:
- return "I40E_ERR_MEMCPY_FAILED";
- case I40E_ERR_INVALID_HMC_OBJ_INDEX:
- return "I40E_ERR_INVALID_HMC_OBJ_INDEX";
- case I40E_ERR_INVALID_HMC_OBJ_COUNT:
- return "I40E_ERR_INVALID_HMC_OBJ_COUNT";
- case I40E_ERR_INVALID_SRQ_ARM_LIMIT:
- return "I40E_ERR_INVALID_SRQ_ARM_LIMIT";
- case I40E_ERR_SRQ_ENABLED:
- return "I40E_ERR_SRQ_ENABLED";
- case I40E_ERR_ADMIN_QUEUE_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_ERROR";
- case I40E_ERR_ADMIN_QUEUE_TIMEOUT:
- return "I40E_ERR_ADMIN_QUEUE_TIMEOUT";
- case I40E_ERR_BUF_TOO_SHORT:
- return "I40E_ERR_BUF_TOO_SHORT";
- case I40E_ERR_ADMIN_QUEUE_FULL:
- return "I40E_ERR_ADMIN_QUEUE_FULL";
- case I40E_ERR_ADMIN_QUEUE_NO_WORK:
- return "I40E_ERR_ADMIN_QUEUE_NO_WORK";
- case I40E_ERR_BAD_IWARP_CQE:
- return "I40E_ERR_BAD_IWARP_CQE";
- case I40E_ERR_NVM_BLANK_MODE:
- return "I40E_ERR_NVM_BLANK_MODE";
- case I40E_ERR_NOT_IMPLEMENTED:
- return "I40E_ERR_NOT_IMPLEMENTED";
- case I40E_ERR_PE_DOORBELL_NOT_ENABLED:
- return "I40E_ERR_PE_DOORBELL_NOT_ENABLED";
- case I40E_ERR_DIAG_TEST_FAILED:
- return "I40E_ERR_DIAG_TEST_FAILED";
- case I40E_ERR_NOT_READY:
- return "I40E_ERR_NOT_READY";
- case I40E_NOT_SUPPORTED:
- return "I40E_NOT_SUPPORTED";
- case I40E_ERR_FIRMWARE_API_VERSION:
- return "I40E_ERR_FIRMWARE_API_VERSION";
- case I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR:
- return "I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR";
- }
-
- snprintf(hw->err_str, sizeof(hw->err_str), "%d", stat_err);
- return hw->err_str;
-}
-
-/**
* i40e_debug_aq
* @hw: debug mask related to admin queue
* @mask: debug mask
@@ -355,13 +207,13 @@ bool i40e_check_asq_alive(struct i40e_hw *hw)
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well.
**/
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
- bool unloading)
+int i40e_aq_queue_shutdown(struct i40e_hw *hw,
+ bool unloading)
{
struct i40e_aq_desc desc;
struct i40e_aqc_queue_shutdown *cmd =
(struct i40e_aqc_queue_shutdown *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_queue_shutdown);
@@ -384,15 +236,15 @@ i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw,
*
* Internal function to get or set RSS look up table
**/
-static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
- u16 vsi_id, bool pf_lut,
- u8 *lut, u16 lut_size,
- bool set)
+static int i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_lut *cmd_resp =
(struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+ int status;
if (set)
i40e_fill_default_direct_cmd_desc(&desc,
@@ -437,8 +289,8 @@ static i40e_status i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
*
* get the RSS lookup table, PF or VSI type
**/
-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
false);
@@ -454,8 +306,8 @@ i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
*
* set the RSS lookup table, PF or VSI type
**/
-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
- bool pf_lut, u8 *lut, u16 lut_size)
+int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
{
return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, true);
}
@@ -469,16 +321,16 @@ i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
**/
-static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key,
- bool set)
+static int i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_get_set_rss_key *cmd_resp =
(struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+ int status;
if (set)
i40e_fill_default_direct_cmd_desc(&desc,
@@ -509,9 +361,9 @@ static i40e_status i40e_aq_get_set_rss_key(struct i40e_hw *hw,
* @key: pointer to key info struct
*
**/
-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+int i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, false);
}
@@ -524,9 +376,9 @@ i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
*
* set the RSS key per VSI
**/
-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
- u16 vsi_id,
- struct i40e_aqc_get_set_rss_key_data *key)
+int i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
{
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
@@ -796,10 +648,10 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
* hw_addr, back, device_id, vendor_id, subsystem_device_id,
* subsystem_vendor_id, and revision_id
**/
-i40e_status i40e_init_shared_code(struct i40e_hw *hw)
+int i40e_init_shared_code(struct i40e_hw *hw)
{
- i40e_status status = 0;
u32 port, ari, func_rid;
+ int status = 0;
i40e_set_mac_type(hw);
@@ -836,15 +688,16 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
* @addrs: the requestor's mac addr store
* @cmd_details: pointer to command details structure or NULL
**/
-static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
- u16 *flags,
- struct i40e_aqc_mac_address_read_data *addrs,
- struct i40e_asq_cmd_details *cmd_details)
+static int
+i40e_aq_mac_address_read(struct i40e_hw *hw,
+ u16 *flags,
+ struct i40e_aqc_mac_address_read_data *addrs,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_mac_address_read *cmd_data =
(struct i40e_aqc_mac_address_read *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_mac_address_read);
desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF);
@@ -863,14 +716,14 @@ static i40e_status i40e_aq_mac_address_read(struct i40e_hw *hw,
* @mac_addr: address to write
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
- u16 flags, u8 *mac_addr,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_mac_address_write *cmd_data =
(struct i40e_aqc_mac_address_write *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_mac_address_write);
@@ -893,11 +746,11 @@ i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
*
* Reads the adapter's MAC address from register
**/
-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
{
struct i40e_aqc_mac_address_read_data addrs;
- i40e_status status;
u16 flags = 0;
+ int status;
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
@@ -914,11 +767,11 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
*
* Reads the adapter's Port MAC address
**/
-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
{
struct i40e_aqc_mac_address_read_data addrs;
- i40e_status status;
u16 flags = 0;
+ int status;
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (status)
@@ -972,13 +825,13 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
*
* Reads the part number string from the EEPROM.
**/
-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
- u32 pba_num_size)
+int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
{
- i40e_status status = 0;
u16 pba_word = 0;
u16 pba_size = 0;
u16 pba_ptr = 0;
+ int status = 0;
u16 i = 0;
status = i40e_read_nvm_word(hw, I40E_SR_PBA_FLAGS, &pba_word);
@@ -1087,8 +940,8 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
* @hw: pointer to the hardware structure
* @retry_limit: how many times to retry before failure
**/
-static i40e_status i40e_poll_globr(struct i40e_hw *hw,
- u32 retry_limit)
+static int i40e_poll_globr(struct i40e_hw *hw,
+ u32 retry_limit)
{
u32 cnt, reg = 0;
@@ -1114,7 +967,7 @@ static i40e_status i40e_poll_globr(struct i40e_hw *hw,
* Assuming someone else has triggered a global reset,
* assure the global reset is complete and then reset the PF
**/
-i40e_status i40e_pf_reset(struct i40e_hw *hw)
+int i40e_pf_reset(struct i40e_hw *hw)
{
u32 cnt = 0;
u32 cnt1 = 0;
@@ -1453,15 +1306,16 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
*
* Returns the various PHY abilities supported on the Port.
**/
-i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
- bool qualified_modules, bool report_init,
- struct i40e_aq_get_phy_abilities_resp *abilities,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details)
{
- struct i40e_aq_desc desc;
- i40e_status status;
u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
u16 max_delay = I40E_MAX_PHY_TIMEOUT, total_delay = 0;
+ struct i40e_aq_desc desc;
+ int status;
if (!abilities)
return I40E_ERR_PARAM;
@@ -1532,14 +1386,14 @@ i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
* of the PHY Config parameters. This status will be indicated by the
* command response.
**/
-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
- struct i40e_aq_set_phy_config *config,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aq_set_phy_config *cmd =
(struct i40e_aq_set_phy_config *)&desc.params.raw;
- enum i40e_status_code status;
+ int status;
if (!config)
return I40E_ERR_PARAM;
@@ -1554,7 +1408,7 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
return status;
}
-static noinline_for_stack enum i40e_status_code
+static noinline_for_stack int
i40e_set_fc_status(struct i40e_hw *hw,
struct i40e_aq_get_phy_abilities_resp *abilities,
bool atomic_restart)
@@ -1612,11 +1466,11 @@ i40e_set_fc_status(struct i40e_hw *hw,
*
* Set the requested flow control mode using set_phy_config.
**/
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_restart)
+int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_restart)
{
struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code status;
+ int status;
*aq_failures = 0x0;
@@ -1655,13 +1509,13 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
*
* Tell the firmware that the driver is taking over from PXE
**/
-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
- i40e_status status;
struct i40e_aq_desc desc;
struct i40e_aqc_clear_pxe *cmd =
(struct i40e_aqc_clear_pxe *)&desc.params.raw;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_clear_pxe_mode);
@@ -1683,14 +1537,14 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
*
* Sets up the link and restarts the Auto-Negotiation over the link.
**/
-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
- bool enable_link,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_link_restart_an *cmd =
(struct i40e_aqc_set_link_restart_an *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_link_restart_an);
@@ -1715,17 +1569,17 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
*
* Returns the link status of the adapter.
**/
-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
- bool enable_lse, struct i40e_link_status *link,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_link_status *resp =
(struct i40e_aqc_get_link_status *)&desc.params.raw;
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
- i40e_status status;
bool tx_pause, rx_pause;
u16 command_flags;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
@@ -1811,14 +1665,14 @@ aq_get_link_info_exit:
*
* Set link interrupt mask.
**/
-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
- u16 mask,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
+ u16 mask,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_int_mask *cmd =
(struct i40e_aqc_set_phy_int_mask *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_int_mask);
@@ -1838,8 +1692,8 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
*
* Enable/disable loopback on a given port
*/
-i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_lb_mode *cmd =
@@ -1864,13 +1718,13 @@ i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw, bool ena_lpbk,
*
* Reset the external PHY.
**/
-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_debug *cmd =
(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
@@ -1905,9 +1759,9 @@ static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
*
* Add a VSI context to the hardware.
**/
-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -1915,7 +1769,7 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_vsi);
@@ -1949,15 +1803,15 @@ aq_add_vsi_exit:
* @seid: vsi number
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
- u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -1977,15 +1831,15 @@ i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
* @seid: vsi number
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
- u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2007,16 +1861,16 @@ i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
* @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set,
- struct i40e_asq_cmd_details *cmd_details,
- bool rx_only_promisc)
+int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2047,14 +1901,15 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
* @set: set multicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 seid, bool set, struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
+ u16 seid, bool set,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2080,16 +1935,16 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- enum i40e_status_code status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2116,16 +1971,16 @@ enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- enum i40e_status_code status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2158,15 +2013,15 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
* @vid: The VLAN tag filter - capture any broadcast packet with this VLAN tag
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable, u16 vid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2193,14 +2048,14 @@ i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
*
* Set or clear the broadcast promiscuous flag (filter) for a given VSI.
**/
-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
- u16 seid, bool set_filter,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 seid, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2226,15 +2081,15 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
* @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
(struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
- i40e_status status;
u16 flags = 0;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_vsi_promiscuous_modes);
@@ -2256,9 +2111,9 @@ i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
* @vsi_ctx: pointer to a vsi context struct
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -2266,7 +2121,7 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_vsi_parameters);
@@ -2298,9 +2153,9 @@ aq_get_vsi_params_exit:
*
* Update a VSI context.
**/
-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
@@ -2308,7 +2163,7 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_aqc_add_get_update_vsi_completion *resp =
(struct i40e_aqc_add_get_update_vsi_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_update_vsi_parameters);
@@ -2336,15 +2191,15 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
*
* Fill the buf with switch configuration returned from AdminQ command
**/
-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
- struct i40e_aqc_get_switch_config_resp *buf,
- u16 buf_size, u16 *start_seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_switch_seid *scfg =
(struct i40e_aqc_switch_seid *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_switch_config);
@@ -2370,15 +2225,15 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
*
* Set switch configuration bits
**/
-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags,
- u16 valid_flags, u8 mode,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags, u8 mode,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_switch_config *scfg =
(struct i40e_aqc_set_switch_config *)&desc.params.raw;
- enum i40e_status_code status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_switch_config);
@@ -2407,16 +2262,16 @@ enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
*
* Get the firmware version from the admin queue commands
**/
-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
- u16 *fw_major_version, u16 *fw_minor_version,
- u32 *fw_build,
- u16 *api_major_version, u16 *api_minor_version,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_version *resp =
(struct i40e_aqc_get_version *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_version);
@@ -2446,14 +2301,14 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
*
* Send the driver version to the firmware
**/
-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_driver_version *cmd =
(struct i40e_aqc_driver_version *)&desc.params.raw;
- i40e_status status;
+ int status;
u16 len;
if (dv == NULL)
@@ -2488,9 +2343,9 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
*
* Side effect: LinkStatusEvent reporting becomes enabled
**/
-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+int i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
{
- i40e_status status = 0;
+ int status = 0;
if (hw->phy.get_link_info) {
status = i40e_update_link_info(hw);
@@ -2509,10 +2364,10 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
* i40e_update_link_info - update status of the HW network link
* @hw: pointer to the hw struct
**/
-noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
+noinline_for_stack int i40e_update_link_info(struct i40e_hw *hw)
{
struct i40e_aq_get_phy_abilities_resp abilities;
- i40e_status status = 0;
+ int status = 0;
status = i40e_aq_get_link_info(hw, true, NULL, NULL);
if (status)
@@ -2559,19 +2414,19 @@ noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
* This asks the FW to add a VEB between the uplink and downlink
* elements. If the uplink SEID is 0, this will be a floating VEB.
**/
-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
- u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *veb_seid,
- bool enable_stats,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *veb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_veb *cmd =
(struct i40e_aqc_add_veb *)&desc.params.raw;
struct i40e_aqc_add_veb_completion *resp =
(struct i40e_aqc_add_veb_completion *)&desc.params.raw;
- i40e_status status;
u16 veb_flags = 0;
+ int status;
/* SEIDs need to either both be set or both be 0 for floating VEB */
if (!!uplink_seid != !!downlink_seid)
@@ -2617,17 +2472,17 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
* This retrieves the parameters for a particular VEB, specified by
* uplink_seid, and returns them to the caller.
**/
-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
- u16 veb_seid, u16 *switch_id,
- bool *floating, u16 *statistic_index,
- u16 *vebs_used, u16 *vebs_free,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id,
+ bool *floating, u16 *statistic_index,
+ u16 *vebs_used, u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_veb_parameters_completion *cmd_resp =
(struct i40e_aqc_get_veb_parameters_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
if (veb_seid == 0)
return I40E_ERR_PARAM;
@@ -2711,7 +2566,7 @@ i40e_prepare_add_macvlan(struct i40e_aqc_add_macvlan_element_data *mv_list,
*
* Add MAC/VLAN addresses to the HW filtering
**/
-i40e_status
+int
i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details)
@@ -2743,7 +2598,7 @@ i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
* It also calls _v2 versions of asq_send_command functions to
* get the aq_status on the stack.
**/
-i40e_status
+int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
@@ -2771,15 +2626,16 @@ i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
*
* Remove MAC/VLAN addresses from the HW filtering
**/
-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_remove_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_macvlan *cmd =
(struct i40e_aqc_macvlan *)&desc.params.raw;
- i40e_status status;
u16 buf_size;
+ int status;
if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM;
@@ -2818,7 +2674,7 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid,
* It also calls _v2 versions of asq_send_command functions to
* get the aq_status on the stack.
**/
-i40e_status
+int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
@@ -2866,19 +2722,19 @@ i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
* Add/Delete a mirror rule to a specific switch. Mirror rules are supported for
* VEBs/VEPA elements only
**/
-static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
- u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
- u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
+static int i40e_mirrorrule_op(struct i40e_hw *hw,
+ u16 opcode, u16 sw_seid, u16 rule_type, u16 id,
+ u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_delete_mirror_rule *cmd =
(struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw;
struct i40e_aqc_add_delete_mirror_rule_completion *resp =
(struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw;
- i40e_status status;
u16 buf_size;
+ int status;
buf_size = count * sizeof(*mr_list);
@@ -2926,10 +2782,11 @@ static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw,
*
* Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only
**/
-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free)
+int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count,
+ __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free)
{
if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS ||
rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) {
@@ -2957,10 +2814,11 @@ i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
*
* Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only
**/
-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free)
+int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count,
+ __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
@@ -2989,14 +2847,14 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
*
* send msg to vf
**/
-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
- u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_pf_vf_message *cmd =
(struct i40e_aqc_pf_vf_message *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_vf);
cmd->id = cpu_to_le32(vfid);
@@ -3024,14 +2882,14 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
*
* Read the register using the admin queue commands
**/
-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+int i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_reg_read_write *cmd_resp =
(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
if (reg_val == NULL)
return I40E_ERR_PARAM;
@@ -3059,14 +2917,14 @@ i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
*
* Write to a register using the admin queue commands
**/
-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
- u32 reg_addr, u64 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_reg_read_write *cmd =
(struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
@@ -3090,16 +2948,16 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
*
* requests common resource using the admin queue commands
**/
-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- enum i40e_aq_resource_access_type access,
- u8 sdp_number, u64 *timeout,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_request_resource *cmd_resp =
(struct i40e_aqc_request_resource *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_request_resource);
@@ -3129,15 +2987,15 @@ i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
*
* release common resource using the admin queue commands
**/
-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- u8 sdp_number,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_request_resource *cmd =
(struct i40e_aqc_request_resource *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_release_resource);
@@ -3161,15 +3019,15 @@ i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
*
* Read the NVM using the admin queue commands
**/
-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3207,14 +3065,14 @@ i40e_aq_read_nvm_exit:
*
* Erase the NVM sector using the admin queue commands
**/
-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, bool last_command,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3255,8 +3113,8 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
u32 number, logical_id, phys_id;
struct i40e_hw_capabilities *p;
u16 id, ocp_cfg_word0;
- i40e_status status;
u8 major_rev;
+ int status;
u32 i = 0;
cap = (struct i40e_aqc_list_capabilities_element_resp *) buff;
@@ -3497,14 +3355,14 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
*
* Get the device capabilities descriptions from the firmware
**/
-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
- void *buff, u16 buff_size, u16 *data_size,
- enum i40e_admin_queue_opc list_type_opc,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_list_capabilites *cmd;
struct i40e_aq_desc desc;
- i40e_status status = 0;
+ int status = 0;
cmd = (struct i40e_aqc_list_capabilites *)&desc.params.raw;
@@ -3546,15 +3404,15 @@ exit:
*
* Update the NVM using the admin queue commands
**/
-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command, u8 preservation_flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_nvm_update *cmd =
(struct i40e_aqc_nvm_update *)&desc.params.raw;
- i40e_status status;
+ int status;
/* In offset the highest byte must be zeroed. */
if (offset & 0xFF000000) {
@@ -3599,13 +3457,13 @@ i40e_aq_update_nvm_exit:
*
* Rearrange NVM structure, available only for transition FW
**/
-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_nvm_update *cmd;
- i40e_status status;
struct i40e_aq_desc desc;
+ int status;
cmd = (struct i40e_aqc_nvm_update *)&desc.params.raw;
@@ -3639,17 +3497,17 @@ i40e_aq_rearrange_nvm_exit:
*
* Requests the complete LLDP MIB (entire packet).
**/
-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
- u8 mib_type, void *buff, u16 buff_size,
- u16 *local_len, u16 *remote_len,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_get_mib *cmd =
(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
struct i40e_aqc_lldp_get_mib *resp =
(struct i40e_aqc_lldp_get_mib *)&desc.params.raw;
- i40e_status status;
+ int status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -3689,14 +3547,14 @@ i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
*
* Set the LLDP MIB.
**/
-enum i40e_status_code
+int
i40e_aq_set_lldp_mib(struct i40e_hw *hw,
u8 mib_type, void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_lldp_set_local_mib *cmd;
- enum i40e_status_code status;
struct i40e_aq_desc desc;
+ int status;
cmd = (struct i40e_aqc_lldp_set_local_mib *)&desc.params.raw;
if (buff_size == 0 || !buff)
@@ -3728,14 +3586,14 @@ i40e_aq_set_lldp_mib(struct i40e_hw *hw,
* Enable or Disable posting of an event on ARQ when LLDP MIB
* associated with the interface changes
**/
-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
- bool enable_update,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_update_mib *cmd =
(struct i40e_aqc_lldp_update_mib *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_mib);
@@ -3757,14 +3615,14 @@ i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
* Restore LLDP Agent factory settings if @restore set to True. In other case
* only returns factory setting in AQ response.
**/
-enum i40e_status_code
+int
i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_restore *cmd =
(struct i40e_aqc_lldp_restore *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
i40e_debug(hw, I40E_DEBUG_ALL,
@@ -3794,14 +3652,14 @@ i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
*
* Stop or Shutdown the embedded LLDP Agent
**/
-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
- bool persist,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_stop *cmd =
(struct i40e_aqc_lldp_stop *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_stop);
@@ -3829,13 +3687,13 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
*
* Start the embedded LLDP Agent on all ports.
**/
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_lldp_start *cmd =
(struct i40e_aqc_lldp_start *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
@@ -3861,14 +3719,14 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
* @dcb_enable: True if DCB configuration needs to be applied
*
**/
-enum i40e_status_code
+int
i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_dcb_parameters *cmd =
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -3894,12 +3752,12 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
*
* Get CEE DCBX mode operational configuration from firmware
**/
-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
- void *buff, u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -3925,17 +3783,17 @@ i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
* and this function will call cpu_to_le16 to convert from Host byte order to
* Little Endian order.
**/
-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 protocol_index,
- u8 *filter_index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_udp_tunnel *cmd =
(struct i40e_aqc_add_udp_tunnel *)&desc.params.raw;
struct i40e_aqc_del_udp_tunnel_completion *resp =
(struct i40e_aqc_del_udp_tunnel_completion *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_udp_tunnel);
@@ -3956,13 +3814,13 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
* @index: filter index
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_remove_udp_tunnel *cmd =
(struct i40e_aqc_remove_udp_tunnel *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_del_udp_tunnel);
@@ -3981,13 +3839,13 @@ i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
*
* This deletes a switch element from the switch.
**/
-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_switch_seid *cmd =
(struct i40e_aqc_switch_seid *)&desc.params.raw;
- i40e_status status;
+ int status;
if (seid == 0)
return I40E_ERR_PARAM;
@@ -4011,11 +3869,11 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
* recomputed and modified. The retval field in the descriptor
* will be set to 0 when RPB is modified.
**/
-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_dcb_updated(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_dcb_updated);
@@ -4035,15 +3893,15 @@ i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
*
* Generic command handler for Tx scheduler AQ commands
**/
-static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
+static int i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
void *buff, u16 buff_size,
- enum i40e_admin_queue_opc opcode,
+ enum i40e_admin_queue_opc opcode,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_tx_sched_ind *cmd =
(struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
- i40e_status status;
+ int status;
bool cmd_param_flag = false;
switch (opcode) {
@@ -4093,14 +3951,14 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
* @max_credit: Max BW limit credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_configure_vsi_bw_limit *cmd =
(struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_vsi_bw_limit);
@@ -4121,10 +3979,10 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
* @bw_data: Buffer holding enabled TCs, relative TC BW limit/credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_configure_vsi_tc_bw,
@@ -4139,11 +3997,12 @@ i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw,
* @opcode: Tx scheduler AQ command opcode
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
- enum i40e_admin_queue_opc opcode,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)ets_data,
sizeof(*ets_data), opcode, cmd_details);
@@ -4156,7 +4015,8 @@ i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
* @bw_data: Buffer holding enabled TCs, relative/absolute TC BW limit/credits
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+int
+i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
struct i40e_asq_cmd_details *cmd_details)
@@ -4173,10 +4033,11 @@ i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold VSI BW configuration
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_vsi_bw_config,
@@ -4190,10 +4051,11 @@ i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold VSI BW configuration per TC
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_vsi_ets_sla_config,
@@ -4207,10 +4069,11 @@ i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold switching component's per TC BW config
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_switching_comp_ets_config,
@@ -4224,10 +4087,11 @@ i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold current ETS configuration for the Physical Port
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_port_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_port_ets_config,
@@ -4241,10 +4105,11 @@ i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
* @bw_data: Buffer to hold switching component's BW configuration
* @cmd_details: pointer to command details structure or NULL
**/
-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
return i40e_aq_tx_sched_cmd(hw, seid, (void *)bw_data, sizeof(*bw_data),
i40e_aqc_opc_query_switching_comp_bw_config,
@@ -4263,8 +4128,9 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
* Returns 0 if the values passed are valid and within
* range else returns an error.
**/
-static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings)
+static int
+i40e_validate_filter_settings(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
{
u32 fcoe_cntx_size, fcoe_filt_size;
u32 fcoe_fmax;
@@ -4350,11 +4216,11 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
* for a single PF. It is expected that these settings are programmed
* at the driver initialization time.
**/
-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings)
+int i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings)
{
- i40e_status ret = 0;
u32 hash_lut_size = 0;
+ int ret = 0;
u32 val;
if (!settings)
@@ -4424,11 +4290,11 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
* In return it will update the total number of perfect filter count in
* the stats member.
**/
-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
- u8 *mac_addr, u16 ethtype, u16 flags,
- u16 vsi_seid, u16 queue, bool is_add,
- struct i40e_control_filter_stats *stats,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_control_packet_filter *cmd =
@@ -4437,7 +4303,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
struct i40e_aqc_add_remove_control_packet_filter_completion *resp =
(struct i40e_aqc_add_remove_control_packet_filter_completion *)
&desc.params.raw;
- i40e_status status;
+ int status;
if (vsi_seid == 0)
return I40E_ERR_PARAM;
@@ -4483,7 +4349,7 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP |
I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX;
u16 ethtype = I40E_FLOW_CONTROL_ETHTYPE;
- i40e_status status;
+ int status;
status = i40e_aq_add_rem_control_packet_filter(hw, NULL, ethtype, flag,
seid, 0, true, NULL,
@@ -4505,14 +4371,14 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
* is not passed then only register at 'reg_addr0' is read.
*
**/
-static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
- u32 reg_addr0, u32 *reg_val0,
- u32 reg_addr1, u32 *reg_val1)
+static int i40e_aq_alternate_read(struct i40e_hw *hw,
+ u32 reg_addr0, u32 *reg_val0,
+ u32 reg_addr1, u32 *reg_val1)
{
struct i40e_aq_desc desc;
struct i40e_aqc_alternate_write *cmd_resp =
(struct i40e_aqc_alternate_write *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!reg_val0)
return I40E_ERR_PARAM;
@@ -4541,12 +4407,12 @@ static i40e_status i40e_aq_alternate_read(struct i40e_hw *hw,
*
* Suspend port's Tx traffic
**/
-i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aqc_tx_sched_ind *cmd;
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
cmd = (struct i40e_aqc_tx_sched_ind *)&desc.params.raw;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_suspend_port_tx);
@@ -4563,11 +4429,11 @@ i40e_status i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
*
* Resume port's Tx traffic
**/
-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_resume_port_tx);
@@ -4637,18 +4503,18 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status)
* Dump internal FW/HW data for debug purposes.
*
**/
-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
- u8 table_id, u32 start_index, u16 buff_size,
- void *buff, u16 *ret_buff_size,
- u8 *ret_next_table, u32 *ret_next_index,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_debug_dump_internals *cmd =
(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
struct i40e_aqc_debug_dump_internals *resp =
(struct i40e_aqc_debug_dump_internals *)&desc.params.raw;
- i40e_status status;
+ int status;
if (buff_size == 0 || !buff)
return I40E_ERR_PARAM;
@@ -4689,12 +4555,12 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
*
* Read bw from the alternate ram for the given pf
**/
-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
- u32 *max_bw, u32 *min_bw,
- bool *min_valid, bool *max_valid)
+int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw,
+ bool *min_valid, bool *max_valid)
{
- i40e_status status;
u32 max_bw_addr, min_bw_addr;
+ int status;
/* Calculate the address of the min/max bw registers */
max_bw_addr = I40E_ALT_STRUCT_FIRST_PF_OFFSET +
@@ -4729,13 +4595,14 @@ i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
*
* Configure partitions guaranteed/max bw
**/
-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
- struct i40e_aqc_configure_partition_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details)
+int
+i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details)
{
- i40e_status status;
- struct i40e_aq_desc desc;
u16 bwd_size = sizeof(*bw_data);
+ struct i40e_aq_desc desc;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_configure_partition_bw);
@@ -4764,11 +4631,11 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
@@ -4809,11 +4676,11 @@ i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
*
* Writes specified PHY register value
**/
-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
u8 port_num = (u8)hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
@@ -4850,13 +4717,13 @@ i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
+ u8 port_num = hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
- u8 port_num = hw->func_caps.mdio_port_num;
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
@@ -4924,13 +4791,13 @@ phy_read_end:
*
* Writes value to specified PHY register
**/
-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status = I40E_ERR_TIMEOUT;
- u32 command = 0;
- u16 retry = 1000;
u8 port_num = hw->func_caps.mdio_port_num;
+ int status = I40E_ERR_TIMEOUT;
+ u16 retry = 1000;
+ u32 command = 0;
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
@@ -4991,10 +4858,10 @@ phy_write_end:
*
* Writes value to specified PHY register
**/
-i40e_status i40e_write_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value)
+int i40e_write_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value)
{
- i40e_status status;
+ int status;
switch (hw->device_id) {
case I40E_DEV_ID_1G_BASE_T_X722:
@@ -5030,10 +4897,10 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
*
* Reads specified PHY register value
**/
-i40e_status i40e_read_phy_register(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value)
+int i40e_read_phy_register(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value)
{
- i40e_status status;
+ int status;
switch (hw->device_id) {
case I40E_DEV_ID_1G_BASE_T_X722:
@@ -5082,17 +4949,17 @@ u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
*
* Blinks PHY link LED
**/
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval)
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval)
{
- i40e_status status = 0;
- u32 i;
- u16 led_ctl;
- u16 gpio_led_port;
- u16 led_reg;
u16 led_addr = I40E_PHY_LED_PROV_REG_1;
+ u16 gpio_led_port;
u8 phy_addr = 0;
+ int status = 0;
+ u16 led_ctl;
u8 port_num;
+ u16 led_reg;
+ u32 i;
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
@@ -5154,12 +5021,12 @@ phy_blinking_end:
* @led_addr: LED register address
* @reg_val: read register value
**/
-static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
- u32 *reg_val)
+static int i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val)
{
- enum i40e_status_code status;
u8 phy_addr = 0;
u8 port_num;
+ int status;
u32 i;
*reg_val = 0;
@@ -5188,12 +5055,12 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
* @led_addr: LED register address
* @reg_val: register value to write
**/
-static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
- u32 reg_val)
+static int i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val)
{
- enum i40e_status_code status;
u8 phy_addr = 0;
u8 port_num;
+ int status;
u32 i;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
@@ -5223,17 +5090,17 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
* @val: original value of register to use
*
**/
-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
- u16 *val)
+int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val)
{
- i40e_status status = 0;
u16 gpio_led_port;
u8 phy_addr = 0;
- u16 reg_val;
+ u32 reg_val_aq;
+ int status = 0;
u16 temp_addr;
+ u16 reg_val;
u8 port_num;
u32 i;
- u32 reg_val_aq;
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status =
@@ -5278,12 +5145,12 @@ i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
* Set led's on or off when controlled by the PHY
*
**/
-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
- u16 led_addr, u32 mode)
+int i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode)
{
- i40e_status status = 0;
u32 led_ctl = 0;
u32 led_reg = 0;
+ int status = 0;
status = i40e_led_get_reg(hw, led_addr, &led_reg);
if (status)
@@ -5327,14 +5194,14 @@ restore_config:
* Use the firmware to read the Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
if (!reg_val)
return I40E_ERR_PARAM;
@@ -5358,8 +5225,8 @@ i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
**/
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
{
- i40e_status status = 0;
bool use_register;
+ int status = 0;
int retry = 5;
u32 val = 0;
@@ -5393,14 +5260,14 @@ do_retry:
* Use the firmware to write to an Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
@@ -5420,8 +5287,8 @@ i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
**/
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
{
- i40e_status status = 0;
bool use_register;
+ int status = 0;
int retry = 5;
use_register = (((hw->aq.api_maj_ver == 1) &&
@@ -5483,16 +5350,16 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
* NOTE: In common cases MDIO I/F number should not be changed, thats why you
* may use simple wrapper i40e_aq_set_phy_register.
**/
-enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr, bool page_change,
- bool set_mdio, u8 mdio_num,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
(struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_register);
@@ -5528,16 +5395,16 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
* NOTE: In common cases MDIO I/F number should not be changed, thats why you
* may use simple wrapper i40e_aq_get_phy_register.
**/
-enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr, bool page_change,
- bool set_mdio, u8 mdio_num,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
(struct i40e_aqc_phy_register_access *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_phy_register);
@@ -5568,18 +5435,17 @@ enum i40e_status_code i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
* @error_info: returns error information
* @cmd_details: pointer to command details structure or NULL
**/
-enum
-i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_write_personalization_profile *cmd =
(struct i40e_aqc_write_personalization_profile *)
&desc.params.raw;
struct i40e_aqc_write_ddp_resp *resp;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_write_personalization_profile);
@@ -5612,15 +5478,14 @@ i40e_status_code i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
* @flags: AdminQ command flags
* @cmd_details: pointer to command details structure or NULL
**/
-enum
-i40e_status_code i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *cmd_details)
+int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_get_applied_profiles *cmd =
(struct i40e_aqc_get_applied_profiles *)&desc.params.raw;
- i40e_status status;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_get_personalization_profile_list);
@@ -5719,14 +5584,13 @@ i40e_find_section_in_profile(u32 section_type,
* @hw: pointer to the hw struct
* @aq: command buffer containing all data to execute AQ
**/
-static enum
-i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
- struct i40e_profile_aq_section *aq)
+static int i40e_ddp_exec_aq_section(struct i40e_hw *hw,
+ struct i40e_profile_aq_section *aq)
{
- i40e_status status;
struct i40e_aq_desc desc;
u8 *msg = NULL;
u16 msglen;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc, aq->opcode);
desc.flags |= cpu_to_le16(aq->flags);
@@ -5766,14 +5630,14 @@ i40e_status_code i40e_ddp_exec_aq_section(struct i40e_hw *hw,
*
* Validates supported devices and profile's sections.
*/
-static enum i40e_status_code
+static int
i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id, bool rollback)
{
struct i40e_profile_section_header *sec = NULL;
- i40e_status status = 0;
struct i40e_section_table *sec_tbl;
u32 vendor_dev_id;
+ int status = 0;
u32 dev_cnt;
u32 sec_off;
u32 i;
@@ -5831,16 +5695,16 @@ i40e_validate_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Handles the download of a complete package.
*/
-enum i40e_status_code
+int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id)
{
- i40e_status status = 0;
- struct i40e_section_table *sec_tbl;
struct i40e_profile_section_header *sec = NULL;
struct i40e_profile_aq_section *ddp_aq;
- u32 section_size = 0;
+ struct i40e_section_table *sec_tbl;
u32 offset = 0, info = 0;
+ u32 section_size = 0;
+ int status = 0;
u32 sec_off;
u32 i;
@@ -5894,15 +5758,15 @@ i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Rolls back previously loaded package.
*/
-enum i40e_status_code
+int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u32 track_id)
{
struct i40e_profile_section_header *sec = NULL;
- i40e_status status = 0;
struct i40e_section_table *sec_tbl;
u32 offset = 0, info = 0;
u32 section_size = 0;
+ int status = 0;
u32 sec_off;
int i;
@@ -5946,15 +5810,15 @@ i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Register a profile to the list of loaded profiles.
*/
-enum i40e_status_code
+int
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
- i40e_status status = 0;
struct i40e_profile_section_header *sec = NULL;
struct i40e_profile_info *pinfo;
u32 offset = 0, info = 0;
+ int status = 0;
sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -5988,7 +5852,7 @@ i40e_add_pinfo_to_list(struct i40e_hw *hw,
* of the function.
*
**/
-enum i40e_status_code
+int
i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
@@ -5996,8 +5860,8 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- enum i40e_status_code status;
u16 buff_len;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
@@ -6025,7 +5889,7 @@ i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 seid,
* function.
*
**/
-enum i40e_status_code
+int
i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
@@ -6033,8 +5897,8 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- i40e_status status;
u16 buff_len;
+ int status;
int i;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -6082,7 +5946,7 @@ i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
* of the function.
*
**/
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
@@ -6090,8 +5954,8 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- enum i40e_status_code status;
u16 buff_len;
+ int status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_remove_cloud_filters);
@@ -6119,7 +5983,7 @@ i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
* function.
*
**/
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count)
@@ -6127,8 +5991,8 @@ i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- i40e_status status;
u16 buff_len;
+ int status;
int i;
i40e_fill_default_direct_cmd_desc(&desc,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
index 673f341f4c0c..90638b67f8dc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c
@@ -12,7 +12,7 @@
*
* Get the DCBX status from the Firmware
**/
-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
+int i40e_get_dcbx_status(struct i40e_hw *hw, u16 *status)
{
u32 reg;
@@ -497,15 +497,15 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
*
* Parse DCB configuration from the LLDPDU
**/
-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg)
{
- i40e_status ret = 0;
struct i40e_lldp_org_tlv *tlv;
- u16 type;
- u16 length;
u16 typelength;
u16 offset = 0;
+ int ret = 0;
+ u16 length;
+ u16 type;
if (!lldpmib || !dcbcfg)
return I40E_ERR_PARAM;
@@ -551,12 +551,12 @@ i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
*
* Query DCB configuration from the Firmware
**/
-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
- u8 bridgetype,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg)
{
- i40e_status ret = 0;
struct i40e_virt_mem mem;
+ int ret = 0;
u8 *lldpmib;
/* Allocate the LLDPDU */
@@ -767,9 +767,9 @@ static void i40e_cee_to_dcb_config(
*
* Get IEEE mode DCB configuration from the Firmware
**/
-static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
+static int i40e_get_ieee_dcb_config(struct i40e_hw *hw)
{
- i40e_status ret = 0;
+ int ret = 0;
/* IEEE mode */
hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
@@ -797,11 +797,11 @@ out:
*
* Get DCB configuration from the Firmware
**/
-i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
+int i40e_get_dcb_config(struct i40e_hw *hw)
{
- i40e_status ret = 0;
- struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
+ struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
+ int ret = 0;
/* If Firmware version < v4.33 on X710/XL710, IEEE only */
if ((hw->mac.type == I40E_MAC_XL710) &&
@@ -867,11 +867,11 @@ out:
*
* Update DCB configuration from the Firmware
**/
-i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
+int i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
{
- i40e_status ret = 0;
struct i40e_lldp_variables lldp_cfg;
u8 adminstatus = 0;
+ int ret = 0;
if (!hw->func_caps.dcb)
return I40E_NOT_SUPPORTED;
@@ -940,13 +940,13 @@ i40e_status i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
* Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
* Status of agent is reported via @lldp_status parameter.
**/
-enum i40e_status_code
+int
i40e_get_fw_lldp_status(struct i40e_hw *hw,
enum i40e_get_fw_lldp_status_resp *lldp_status)
{
struct i40e_virt_mem mem;
- i40e_status ret;
u8 *lldpmib;
+ int ret;
if (!lldp_status)
return I40E_ERR_PARAM;
@@ -1238,13 +1238,13 @@ static void i40e_add_dcb_tlv(struct i40e_lldp_org_tlv *tlv,
*
* Set DCB configuration to the Firmware
**/
-i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
+int i40e_set_dcb_config(struct i40e_hw *hw)
{
struct i40e_dcbx_config *dcbcfg;
struct i40e_virt_mem mem;
u8 mib_type, *lldpmib;
- i40e_status ret;
u16 miblen;
+ int ret;
/* update the hw local config */
dcbcfg = &hw->local_dcbx_config;
@@ -1274,8 +1274,8 @@ i40e_status i40e_set_dcb_config(struct i40e_hw *hw)
*
* send DCB configuration to FW
**/
-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
- struct i40e_dcbx_config *dcbcfg)
+int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg)
{
u16 length, offset = 0, tlvid, typelength;
struct i40e_lldp_org_tlv *tlv;
@@ -1888,13 +1888,13 @@ void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
*
* Reads the LLDP configuration data from NVM using passed addresses
**/
-static i40e_status _i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg,
- u8 module, u32 word_offset)
+static int _i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg,
+ u8 module, u32 word_offset)
{
u32 address, offset = (2 * word_offset);
- i40e_status ret;
__le16 raw_mem;
+ int ret;
u16 mem;
ret = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -1950,10 +1950,10 @@ err_lldp_cfg:
*
* Reads the LLDP configuration data from NVM
**/
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg)
+int i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg)
{
- i40e_status ret = 0;
+ int ret = 0;
u32 mem;
if (!lldp_cfg)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.h b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
index 2370ceecb061..6b60dc9b7736 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.h
@@ -264,20 +264,20 @@ void i40e_dcb_hw_calculate_pool_sizes(struct i40e_hw *hw,
void i40e_dcb_hw_rx_pb_config(struct i40e_hw *hw,
struct i40e_rx_pb_config *old_pb_cfg,
struct i40e_rx_pb_config *new_pb_cfg);
-i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
- u16 *status);
-i40e_status i40e_lldp_to_dcb_config(u8 *lldpmib,
- struct i40e_dcbx_config *dcbcfg);
-i40e_status i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
- u8 bridgetype,
- struct i40e_dcbx_config *dcbcfg);
-i40e_status i40e_get_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_init_dcb(struct i40e_hw *hw,
- bool enable_mib_change);
-enum i40e_status_code
+int i40e_get_dcbx_status(struct i40e_hw *hw,
+ u16 *status);
+int i40e_lldp_to_dcb_config(u8 *lldpmib,
+ struct i40e_dcbx_config *dcbcfg);
+int i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
+ u8 bridgetype,
+ struct i40e_dcbx_config *dcbcfg);
+int i40e_get_dcb_config(struct i40e_hw *hw);
+int i40e_init_dcb(struct i40e_hw *hw,
+ bool enable_mib_change);
+int
i40e_get_fw_lldp_status(struct i40e_hw *hw,
enum i40e_get_fw_lldp_status_resp *lldp_status);
-i40e_status i40e_set_dcb_config(struct i40e_hw *hw);
-i40e_status i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
- struct i40e_dcbx_config *dcbcfg);
+int i40e_set_dcb_config(struct i40e_hw *hw);
+int i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
+ struct i40e_dcbx_config *dcbcfg);
#endif /* _I40E_DCB_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index e32c61909b31..195421d863ab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -135,8 +135,8 @@ static int i40e_dcbnl_ieee_setets(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB ETS configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB ETS configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -174,8 +174,8 @@ static int i40e_dcbnl_ieee_setpfc(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB PFC configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB PFC configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -225,8 +225,8 @@ static int i40e_dcbnl_ieee_setapp(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -290,8 +290,8 @@ static int i40e_dcbnl_ieee_delapp(struct net_device *netdev,
ret = i40e_hw_dcb_config(pf, &pf->tmp_cfg);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed setting DCB configuration err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed setting DCB configuration err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index e1069ae658ad..7e8183762fd9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -36,7 +36,7 @@ static int i40e_ddp_does_profile_exist(struct i40e_hw *hw,
{
struct i40e_ddp_profile_list *profile_list;
u8 buff[I40E_PROFILE_LIST_SIZE];
- i40e_status status;
+ int status;
int i;
status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
@@ -91,7 +91,7 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
{
struct i40e_ddp_profile_list *profile_list;
u8 buff[I40E_PROFILE_LIST_SIZE];
- i40e_status status;
+ int status;
int i;
status = i40e_aq_get_ddp_list(hw, buff, I40E_PROFILE_LIST_SIZE, 0,
@@ -117,14 +117,14 @@ static int i40e_ddp_does_profile_overlap(struct i40e_hw *hw,
*
* Register a profile to the list of loaded profiles.
*/
-static enum i40e_status_code
+static int
i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
struct i40e_profile_section_header *sec;
struct i40e_profile_info *pinfo;
- i40e_status status;
u32 offset = 0, info = 0;
+ int status;
sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -157,14 +157,14 @@ i40e_add_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
*
* Removes DDP profile from the NIC.
**/
-static enum i40e_status_code
+static int
i40e_del_pinfo(struct i40e_hw *hw, struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id)
{
struct i40e_profile_section_header *sec;
struct i40e_profile_info *pinfo;
- i40e_status status;
u32 offset = 0, info = 0;
+ int status;
sec = (struct i40e_profile_section_header *)profile_info_sec;
sec->tbl_size = 1;
@@ -270,12 +270,12 @@ int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
struct i40e_profile_segment *profile_hdr;
struct i40e_profile_info pinfo;
struct i40e_package_header *pkg_hdr;
- i40e_status status;
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 track_id;
int istatus;
+ int status;
pkg_hdr = (struct i40e_package_header *)data;
if (!i40e_ddp_is_pkg_hdr_valid(netdev, pkg_hdr, size))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index c9dcd6d92c83..9954493cd448 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -918,9 +918,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid);
i40e_veb_release(pf->veb[i]);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
- i40e_status ret;
- u16 vid;
unsigned int v;
+ int ret;
+ u16 vid;
cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
if (cnt != 2) {
@@ -1284,7 +1284,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(cmd_buf, "send aq_cmd", 11) == 0) {
struct i40e_aq_desc *desc;
- i40e_status ret;
+ int ret;
desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
if (!desc)
@@ -1330,9 +1330,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
desc = NULL;
} else if (strncmp(cmd_buf, "send indirect aq_cmd", 20) == 0) {
struct i40e_aq_desc *desc;
- i40e_status ret;
u16 buffer_len;
u8 *buff;
+ int ret;
desc = kzalloc(sizeof(struct i40e_aq_desc), GFP_KERNEL);
if (!desc)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index ef4d3762bf37..5b3519c6e362 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -10,8 +10,8 @@
* @reg: reg to be tested
* @mask: bits to be touched
**/
-static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
- u32 reg, u32 mask)
+static int i40e_diag_reg_pattern_test(struct i40e_hw *hw,
+ u32 reg, u32 mask)
{
static const u32 patterns[] = {
0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
@@ -74,9 +74,9 @@ struct i40e_diag_reg_test_info i40e_reg_list[] = {
*
* Perform registers diagnostic test
**/
-i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
+int i40e_diag_reg_test(struct i40e_hw *hw)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 reg, mask;
u32 i, j;
@@ -114,9 +114,9 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
*
* Perform EEPROM diagnostic test
**/
-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw)
+int i40e_diag_eeprom_test(struct i40e_hw *hw)
{
- i40e_status ret_code;
+ int ret_code;
u16 reg_val;
/* read NVM control word and if NVM valid, validate EEPROM checksum*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h
index c3340f320a18..e641035c7297 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h
@@ -22,7 +22,7 @@ struct i40e_diag_reg_test_info {
extern struct i40e_diag_reg_test_info i40e_reg_list[];
-i40e_status i40e_diag_reg_test(struct i40e_hw *hw);
-i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw);
+int i40e_diag_reg_test(struct i40e_hw *hw);
+int i40e_diag_eeprom_test(struct i40e_hw *hw);
#endif /* _I40E_DIAG_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 887a735fe2a7..4934ff58332c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1226,8 +1226,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_hw *hw = &pf->hw;
bool autoneg_changed = false;
- i40e_status status = 0;
int timeout = 50;
+ int status = 0;
int err = 0;
__u32 speed;
u8 autoneg;
@@ -1455,8 +1455,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
netdev_info(netdev,
- "Set phy config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Set phy config failed, err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
@@ -1465,8 +1465,8 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
status = i40e_update_link_info(hw);
if (status)
netdev_dbg(netdev,
- "Updating link info failed with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Updating link info failed with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
@@ -1485,7 +1485,7 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status status = 0;
+ int status = 0;
u32 flags = 0;
int err = 0;
@@ -1517,8 +1517,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
status = i40e_aq_set_phy_config(hw, &config, NULL);
if (status) {
netdev_info(netdev,
- "Set phy config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Set phy config failed, err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
goto done;
@@ -1531,8 +1531,8 @@ static int i40e_set_fec_cfg(struct net_device *netdev, u8 fec_cfg)
* (e.g. no physical connection etc.)
*/
netdev_dbg(netdev,
- "Updating link info failed with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "Updating link info failed with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
@@ -1547,7 +1547,7 @@ static int i40e_get_fec_param(struct net_device *netdev,
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status status = 0;
+ int status = 0;
int err = 0;
u8 fec_cfg;
@@ -1634,12 +1634,12 @@ static int i40e_nway_reset(struct net_device *netdev)
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
- i40e_status ret = 0;
+ int ret = 0;
ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
if (ret) {
- netdev_info(netdev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -1699,9 +1699,9 @@ static int i40e_set_pauseparam(struct net_device *netdev,
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config;
bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
- i40e_status status;
u8 aq_failures;
int err = 0;
+ int status;
u32 is_an;
/* Changing the port's flow control is not supported if this isn't the
@@ -1755,20 +1755,20 @@ static int i40e_set_pauseparam(struct net_device *netdev,
status = i40e_set_fc(hw, &aq_failures, link_up);
if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
- netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
- netdev_info(netdev, "Set fc failed on the set_phy_config call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the set_phy_config call with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
- netdev_info(netdev, "Set fc failed on the get_link_info call with err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ netdev_info(netdev, "Set fc failed on the get_link_info call with err %pe aq_err %s\n",
+ ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
err = -EAGAIN;
}
@@ -2583,8 +2583,8 @@ static u64 i40e_link_test(struct net_device *netdev, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- i40e_status status;
bool link_up = false;
+ int status;
netif_info(pf, hw, netdev, "link test\n");
status = i40e_get_link_status(&pf->hw, &link_up);
@@ -2807,11 +2807,11 @@ static int i40e_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- i40e_status ret = 0;
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
int blink_freq = 2;
u16 temp_status;
+ int ret = 0;
switch (state) {
case ETHTOOL_ID_ACTIVE:
@@ -5247,7 +5247,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
u32 reset_needed = 0;
- i40e_status status;
+ int status;
u32 i, j;
orig_flags = READ_ONCE(pf->flags);
@@ -5362,8 +5362,8 @@ flags_complete:
0, NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
@@ -5435,9 +5435,8 @@ flags_complete:
return -EBUSY;
default:
dev_warn(&pf->pdev->dev,
- "Starting FW LLDP agent failed: error: %s, %s\n",
- i40e_stat_str(&pf->hw,
- status),
+ "Starting FW LLDP agent failed: error: %pe, %s\n",
+ ERR_PTR(status),
i40e_aq_str(&pf->hw,
adq_err));
return -EINVAL;
@@ -5477,8 +5476,8 @@ static int i40e_get_module_info(struct net_device *netdev,
u32 sff8472_comp = 0;
u32 sff8472_swap = 0;
u32 sff8636_rev = 0;
- i40e_status status;
u32 type = 0;
+ int status;
/* Check if firmware supports reading module EEPROM. */
if (!(hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE)) {
@@ -5582,8 +5581,8 @@ static int i40e_get_module_eeprom(struct net_device *netdev,
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
bool is_sfp = false;
- i40e_status status;
u32 value = 0;
+ int status;
int i;
if (!ee || !ee->len || !data)
@@ -5624,10 +5623,10 @@ static int i40e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp phy_cfg;
- enum i40e_status_code status = 0;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ int status = 0;
/* Get initial PHY capabilities */
status = i40e_aq_get_phy_capabilities(hw, false, true, &phy_cfg, NULL);
@@ -5689,11 +5688,11 @@ static int i40e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code status = I40E_SUCCESS;
struct i40e_aq_set_phy_config config;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ int status = I40E_SUCCESS;
__le16 eee_capability;
/* Deny parameters we don't support */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 163ee8c6311c..46f7950a0049 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -17,17 +17,17 @@
* @type: what type of segment descriptor we're manipulating
* @direct_mode_sz: size to alloc in direct mode
**/
-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 sd_index,
- enum i40e_sd_entry_type type,
- u64 direct_mode_sz)
+int i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz)
{
enum i40e_memory_type mem_type __attribute__((unused));
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
+ int ret_code = I40E_SUCCESS;
struct i40e_dma_mem mem;
- i40e_status ret_code = I40E_SUCCESS;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
@@ -106,19 +106,19 @@ exit:
* aligned on 4K boundary and zeroed memory.
* 2. It should be 4K in size.
**/
-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 pd_index,
- struct i40e_dma_mem *rsrc_pg)
+int i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_dma_mem mem;
struct i40e_dma_mem *page = &mem;
u32 sd_idx, rel_pd_idx;
- u64 *pd_addr;
+ int ret_code = 0;
u64 page_desc;
+ u64 *pd_addr;
if (pd_index / I40E_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) {
ret_code = I40E_ERR_INVALID_PAGE_DESC_INDEX;
@@ -185,15 +185,15 @@ exit:
* 1. Caller can deallocate the memory used by backing storage after this
* function returns.
**/
-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_entry *pd_entry;
struct i40e_hmc_pd_table *pd_table;
struct i40e_hmc_sd_entry *sd_entry;
u32 sd_idx, rel_pd_idx;
+ int ret_code = 0;
u64 *pd_addr;
/* calculate index */
@@ -241,11 +241,11 @@ exit:
* @hmc_info: pointer to the HMC configuration information structure
* @idx: the page index
**/
-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
+ int ret_code = 0;
/* get the entry and decrease its ref counter */
sd_entry = &hmc_info->sd_table.sd_entry[idx];
@@ -269,9 +269,9 @@ exit:
* @idx: the page index
* @is_pf: used to distinguish between VF and PF
**/
-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+int i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
@@ -290,11 +290,11 @@ i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
* @hmc_info: pointer to the HMC configuration information structure
* @idx: segment descriptor index to find the relevant page descriptor
**/
-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
- u32 idx)
+int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
+ int ret_code = 0;
sd_entry = &hmc_info->sd_table.sd_entry[idx];
@@ -318,9 +318,9 @@ exit:
* @idx: segment descriptor index to find the relevant page descriptor
* @is_pf: used to distinguish between VF and PF
**/
-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+int i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf)
{
struct i40e_hmc_sd_entry *sd_entry;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 3113792afaff..9960da07a573 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -187,28 +187,28 @@ struct i40e_hmc_info {
/* add one more to the limit to correct our range */ \
*(pd_limit) += 1; \
}
-i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 sd_index,
- enum i40e_sd_entry_type type,
- u64 direct_mode_sz);
-
-i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 pd_index,
- struct i40e_dma_mem *rsrc_pg);
-i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
-i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
- u32 idx);
-i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
+
+int i40e_add_sd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 sd_index,
+ enum i40e_sd_entry_type type,
+ u64 direct_mode_sz);
+int i40e_add_pd_table_entry(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 pd_index,
+ struct i40e_dma_mem *rsrc_pg);
+int i40e_remove_pd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_remove_sd_bp_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
+int i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info,
+ u32 idx);
+int i40e_remove_pd_page_new(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx, bool is_pf);
#endif /* _I40E_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d6e92ecddfbd..40c101f286d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -74,12 +74,12 @@ static u64 i40e_calculate_l2fpm_size(u32 txq_num, u32 rxq_num,
* Assumptions:
* - HMC Resource Profile has been selected before calling this function.
**/
-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
- u32 rxq_num, u32 fcoe_cntx_num,
- u32 fcoe_filt_num)
+int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num)
{
struct i40e_hmc_obj_info *obj, *full_obj;
- i40e_status ret_code = 0;
+ int ret_code = 0;
u64 l2fpm_size;
u32 size_exp;
@@ -229,11 +229,11 @@ init_lan_hmc_out:
* 1. caller can deallocate the memory used by pd after this function
* returns.
**/
-static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+static int i40e_remove_pd_page(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (!i40e_prep_remove_pd_page(hmc_info, idx))
ret_code = i40e_remove_pd_page_new(hw, hmc_info, idx, true);
@@ -256,11 +256,11 @@ static i40e_status i40e_remove_pd_page(struct i40e_hw *hw,
* 1. caller can deallocate the memory used by backing storage after this
* function returns.
**/
-static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
- struct i40e_hmc_info *hmc_info,
- u32 idx)
+static int i40e_remove_sd_bp(struct i40e_hw *hw,
+ struct i40e_hmc_info *hmc_info,
+ u32 idx)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (!i40e_prep_remove_sd_bp(hmc_info, idx))
ret_code = i40e_remove_sd_bp_new(hw, hmc_info, idx, true);
@@ -276,15 +276,15 @@ static i40e_status i40e_remove_sd_bp(struct i40e_hw *hw,
* This will allocate memory for PDs and backing pages and populate
* the sd and pd entries.
**/
-static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
- struct i40e_hmc_lan_create_obj_info *info)
+static int i40e_create_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_create_obj_info *info)
{
- i40e_status ret_code = 0;
struct i40e_hmc_sd_entry *sd_entry;
u32 pd_idx1 = 0, pd_lmt1 = 0;
u32 pd_idx = 0, pd_lmt = 0;
bool pd_error = false;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;
u64 sd_size;
u32 i, j;
@@ -435,13 +435,13 @@ exit:
* - This function will be called after i40e_init_lan_hmc() and before
* any LAN/FCoE HMC objects can be created.
**/
-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
- enum i40e_hmc_model model)
+int i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model)
{
struct i40e_hmc_lan_create_obj_info info;
- i40e_status ret_code = 0;
u8 hmc_fn_id = hw->hmc.hmc_fn_id;
struct i40e_hmc_obj_info *obj;
+ int ret_code = 0;
/* Initialize part of the create object info struct */
info.hmc_info = &hw->hmc;
@@ -520,13 +520,13 @@ configure_lan_hmc_out:
* caller should deallocate memory allocated previously for
* book-keeping information about PDs and backing storage.
**/
-static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
- struct i40e_hmc_lan_delete_obj_info *info)
+static int i40e_delete_lan_hmc_object(struct i40e_hw *hw,
+ struct i40e_hmc_lan_delete_obj_info *info)
{
- i40e_status ret_code = 0;
struct i40e_hmc_pd_table *pd_table;
u32 pd_idx, pd_lmt, rel_pd_idx;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;
u32 i, j;
if (NULL == info) {
@@ -632,10 +632,10 @@ exit:
* This must be called by drivers as they are shutting down and being
* removed from the OS.
**/
-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw)
+int i40e_shutdown_lan_hmc(struct i40e_hw *hw)
{
struct i40e_hmc_lan_delete_obj_info info;
- i40e_status ret_code;
+ int ret_code;
info.hmc_info = &hw->hmc;
info.rsrc_type = I40E_HMC_LAN_FULL;
@@ -915,9 +915,9 @@ static void i40e_write_qword(u8 *hmc_bits,
* @context_bytes: pointer to the context bit array (DMA memory)
* @hmc_type: the type of HMC resource
**/
-static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
- u8 *context_bytes,
- enum i40e_hmc_lan_rsrc_type hmc_type)
+static int i40e_clear_hmc_context(struct i40e_hw *hw,
+ u8 *context_bytes,
+ enum i40e_hmc_lan_rsrc_type hmc_type)
{
/* clean the bit array */
memset(context_bytes, 0, (u32)hw->hmc.hmc_obj[hmc_type].size);
@@ -931,9 +931,9 @@ static i40e_status i40e_clear_hmc_context(struct i40e_hw *hw,
* @ce_info: a description of the struct to be filled
* @dest: the struct to be filled
**/
-static i40e_status i40e_set_hmc_context(u8 *context_bytes,
- struct i40e_context_ele *ce_info,
- u8 *dest)
+static int i40e_set_hmc_context(u8 *context_bytes,
+ struct i40e_context_ele *ce_info,
+ u8 *dest)
{
int f;
@@ -973,18 +973,18 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
* base pointer. This function is used for LAN Queue contexts.
**/
static
-i40e_status i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
- enum i40e_hmc_lan_rsrc_type rsrc_type,
- u32 obj_idx)
+int i40e_hmc_get_object_va(struct i40e_hw *hw, u8 **object_base,
+ enum i40e_hmc_lan_rsrc_type rsrc_type,
+ u32 obj_idx)
{
struct i40e_hmc_info *hmc_info = &hw->hmc;
u32 obj_offset_in_sd, obj_offset_in_pd;
struct i40e_hmc_sd_entry *sd_entry;
struct i40e_hmc_pd_entry *pd_entry;
u32 pd_idx, pd_lmt, rel_pd_idx;
- i40e_status ret_code = 0;
u64 obj_offset_in_fpm;
u32 sd_idx, sd_lmt;
+ int ret_code = 0;
if (NULL == hmc_info) {
ret_code = I40E_ERR_BAD_PTR;
@@ -1042,11 +1042,11 @@ exit:
* @hw: the hardware struct
* @queue: the queue we care about
**/
-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue)
+int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_TX, queue);
@@ -1062,12 +1062,12 @@ i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
* @queue: the queue we care about
* @s: the struct to be filled
**/
-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_txq *s)
+int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_TX, queue);
@@ -1083,11 +1083,11 @@ i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
* @hw: the hardware struct
* @queue: the queue we care about
**/
-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue)
+int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_RX, queue);
@@ -1103,12 +1103,12 @@ i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
* @queue: the queue we care about
* @s: the struct to be filled
**/
-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_rxq *s)
+int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s)
{
- i40e_status err;
u8 *context_bytes;
+ int err;
err = i40e_hmc_get_object_va(hw, &context_bytes,
I40E_HMC_LAN_RX, queue);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index c46a2c449e60..9f960404c2b3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -137,22 +137,22 @@ struct i40e_hmc_lan_delete_obj_info {
u32 count;
};
-i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
- u32 rxq_num, u32 fcoe_cntx_num,
- u32 fcoe_filt_num);
-i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw,
- enum i40e_hmc_model model);
-i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw);
-
-i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue);
-i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_txq *s);
-i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue);
-i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
- u16 queue,
- struct i40e_hmc_obj_rxq *s);
+int i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
+ u32 rxq_num, u32 fcoe_cntx_num,
+ u32 fcoe_filt_num);
+int i40e_configure_lan_hmc(struct i40e_hw *hw,
+ enum i40e_hmc_model model);
+int i40e_shutdown_lan_hmc(struct i40e_hw *hw);
+
+int i40e_clear_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+int i40e_set_lan_tx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_txq *s);
+int i40e_clear_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue);
+int i40e_set_lan_rx_queue_context(struct i40e_hw *hw,
+ u16 queue,
+ struct i40e_hmc_obj_rxq *s);
#endif /* _I40E_LAN_HMC_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 53d0083e35da..467001db5070 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1817,13 +1817,13 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
spin_unlock_bh(&vsi->mac_filter_hash_lock);
if (vsi->type == I40E_VSI_MAIN) {
- i40e_status ret;
+ int ret;
ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
if (ret)
- netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "Ignoring error from firmware on LAA update, status %pe, AQ ret %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
@@ -1854,8 +1854,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_set_rss_key(hw, vsi->id, seed_dw);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS key, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS key, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -1866,8 +1866,8 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -2349,7 +2349,7 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
{
struct i40e_hw *hw = &vsi->back->hw;
enum i40e_admin_queue_err aq_status;
- i40e_status aq_ret;
+ int aq_ret;
aq_ret = i40e_aq_remove_macvlan_v2(hw, vsi->seid, list, num_del, NULL,
&aq_status);
@@ -2358,8 +2358,8 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
if (aq_ret && !(aq_status == I40E_AQ_RC_ENOENT)) {
*retval = -EIO;
dev_info(&vsi->back->pdev->dev,
- "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
- vsi_name, i40e_stat_str(hw, aq_ret),
+ "ignoring delete macvlan error on %s, err %pe, aq_err %s\n",
+ vsi_name, ERR_PTR(aq_ret),
i40e_aq_str(hw, aq_status));
}
}
@@ -2423,13 +2423,13 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
*
* Returns status indicating success or failure;
**/
-static i40e_status
+static int
i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_mac_filter *f)
{
bool enable = f->state == I40E_FILTER_NEW;
struct i40e_hw *hw = &vsi->back->hw;
- i40e_status aq_ret;
+ int aq_ret;
if (f->vlan == I40E_VLAN_ANY) {
aq_ret = i40e_aq_set_vsi_broadcast(hw,
@@ -2468,7 +2468,7 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret;
+ int aq_ret;
if (vsi->type == I40E_VSI_MAIN &&
pf->lan_veb != I40E_NO_VEB &&
@@ -2488,8 +2488,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "Set default VSI failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "Set default VSI failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
} else {
@@ -2500,8 +2500,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
true);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "set unicast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "set unicast promisc failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
@@ -2510,8 +2510,8 @@ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
promisc, NULL);
if (aq_ret) {
dev_info(&pf->pdev->dev,
- "set multicast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
+ "set multicast promisc failed, err %pe, aq_err %s\n",
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
@@ -2541,12 +2541,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
unsigned int vlan_filters = 0;
char vsi_name[16] = "PF";
int filter_list_len = 0;
- i40e_status aq_ret = 0;
u32 changed_flags = 0;
struct hlist_node *h;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
+ int aq_ret = 0;
int retval = 0;
u16 cmd_flags;
int list_size;
@@ -2814,9 +2814,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
retval = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multi promisc failed on %s, err %s aq_err %s\n",
+ "set multi promisc failed on %s, err %pe aq_err %s\n",
vsi_name,
- i40e_stat_str(hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
} else {
dev_info(&pf->pdev->dev, "%s allmulti mode.\n",
@@ -2834,10 +2834,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
retval = i40e_aq_rc_to_posix(aq_ret,
hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
+ "Setting promiscuous %s failed on %s, err %pe aq_err %s\n",
cur_promisc ? "on" : "off",
vsi_name,
- i40e_stat_str(hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
@@ -2921,7 +2921,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
struct i40e_pf *pf = vsi->back;
if (i40e_enabled_xdp_vsi(vsi)) {
- int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ int frame_size = new_mtu + I40E_PACKET_HDR_PAD;
if (frame_size > i40e_max_xdp_frame_size(vsi))
return -EINVAL;
@@ -2965,7 +2965,7 @@ int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;
/* Don't modify stripping options if a port VLAN is active */
if (vsi->info.pvid)
@@ -2985,8 +2985,8 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "update vlan stripping failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "update vlan stripping failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
}
@@ -2999,7 +2999,7 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;
/* Don't modify stripping options if a port VLAN is active */
if (vsi->info.pvid)
@@ -3020,8 +3020,8 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "update vlan stripping failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "update vlan stripping failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
}
@@ -3252,7 +3252,7 @@ static void i40e_restore_vlan(struct i40e_vsi *vsi)
int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
{
struct i40e_vsi_context ctxt;
- i40e_status ret;
+ int ret;
vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
vsi->info.pvid = cpu_to_le16(vid);
@@ -3265,8 +3265,8 @@ int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "add pvid failed, err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
+ "add pvid failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&vsi->back->hw,
vsi->back->hw.aq.asq_last_status));
return -ENOENT;
@@ -3429,8 +3429,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u16 pf_q = vsi->base_queue + ring->queue_index;
struct i40e_hw *hw = &vsi->back->hw;
struct i40e_hmc_obj_txq tx_ctx;
- i40e_status err = 0;
u32 qtx_ctl = 0;
+ int err = 0;
if (ring_is_xdp(ring))
ring->xsk_pool = i40e_xsk_pool(ring);
@@ -3554,7 +3554,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
u16 pf_q = vsi->base_queue + ring->queue_index;
struct i40e_hw *hw = &vsi->back->hw;
struct i40e_hmc_obj_rxq rx_ctx;
- i40e_status err = 0;
+ int err = 0;
bool ok;
int ret;
@@ -5525,16 +5525,16 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
u32 tc_bw_max;
+ int ret;
int i;
/* Get the VSI level BW configuration */
ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5544,8 +5544,8 @@ static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi ets bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EINVAL;
}
@@ -5586,7 +5586,7 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
struct i40e_pf *pf = vsi->back;
- i40e_status ret;
+ int ret;
int i;
/* There is no need to reset BW when mqprio mode is on. */
@@ -5734,8 +5734,8 @@ int i40e_update_adq_vsi_queues(struct i40e_vsi *vsi, int vsi_offset)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
- dev_info(&pf->pdev->dev, "Update vsi config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ dev_info(&pf->pdev->dev, "Update vsi config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -5790,8 +5790,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
&bw_config, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed querying vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Failed querying vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5857,8 +5857,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Update vsi tc config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Update vsi tc config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5870,8 +5870,8 @@ static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed updating vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Failed updating vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
goto out;
}
@@ -5962,8 +5962,8 @@ int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate)
I40E_MAX_BW_INACTIVE_ACCUM, NULL);
if (ret)
dev_err(&pf->pdev->dev,
- "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
- max_tx_rate, seid, i40e_stat_str(&pf->hw, ret),
+ "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %pe aq_err %s\n",
+ max_tx_rate, seid, ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
@@ -6038,8 +6038,8 @@ static void i40e_remove_queue_channels(struct i40e_vsi *vsi)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret)
dev_info(&pf->pdev->dev,
- "Failed to delete cloud filter, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed to delete cloud filter, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
kfree(cfilter);
}
@@ -6173,8 +6173,8 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Cannot set RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
kfree(lut);
return ret;
@@ -6272,8 +6272,8 @@ static int i40e_add_channel(struct i40e_pf *pf, u16 uplink_seid,
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "add new vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "add new vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -6304,7 +6304,7 @@ static int i40e_channel_config_bw(struct i40e_vsi *vsi, struct i40e_channel *ch,
u8 *bw_share)
{
struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
- i40e_status ret;
+ int ret;
int i;
memset(&bw_data, 0, sizeof(bw_data));
@@ -6340,9 +6340,9 @@ static int i40e_channel_config_tx_ring(struct i40e_pf *pf,
struct i40e_vsi *vsi,
struct i40e_channel *ch)
{
- i40e_status ret;
- int i;
u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
+ int ret;
+ int i;
/* Enable ETS TCs with equal BW Share for now across all VSIs */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
@@ -6518,8 +6518,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
mode, NULL);
if (ret && hw->aq.asq_last_status != I40E_AQ_RC_ESRCH)
dev_err(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw,
hw->aq.asq_last_status));
@@ -6719,8 +6719,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "VEB bw config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "VEB bw config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -6729,8 +6729,8 @@ int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
- "Failed getting veb bw config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed getting veb bw config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
@@ -6813,8 +6813,8 @@ static int i40e_resume_port_tx(struct i40e_pf *pf)
ret = i40e_aq_resume_port_tx(hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Resume Port Tx failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Resume Port Tx failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
@@ -6838,8 +6838,8 @@ static int i40e_suspend_port_tx(struct i40e_pf *pf)
ret = i40e_aq_suspend_port_tx(hw, pf->mac_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Suspend Port Tx failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Suspend Port Tx failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, pf->state);
@@ -6878,8 +6878,8 @@ static int i40e_hw_set_dcb_config(struct i40e_pf *pf,
ret = i40e_set_dcb_config(&pf->hw);
if (ret) {
dev_info(&pf->pdev->dev,
- "Set DCB Config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Set DCB Config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -6995,8 +6995,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
i40e_aqc_opc_modify_switching_comp_ets, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Modify Port ETS failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Modify Port ETS failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -7033,8 +7033,8 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
ret = i40e_aq_dcb_updated(&pf->hw, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "DCB Updated failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "DCB Updated failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto out;
}
@@ -7117,8 +7117,8 @@ int i40e_dcb_sw_default_config(struct i40e_pf *pf)
i40e_aqc_opc_enable_switching_comp_ets, NULL);
if (err) {
dev_info(&pf->pdev->dev,
- "Enable Port ETS failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "Enable Port ETS failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
err = -ENOENT;
goto out;
@@ -7197,8 +7197,8 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
pf->flags |= I40E_FLAG_DISABLE_FW_LLDP;
} else {
dev_info(&pf->pdev->dev,
- "Query for DCB configuration failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "Query for DCB configuration failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
@@ -7416,15 +7416,15 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
* @pf: board private structure
* @is_up: whether the link state should be forced up or down
**/
-static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+static int i40e_force_link_state(struct i40e_pf *pf, bool is_up)
{
struct i40e_aq_get_phy_abilities_resp abilities;
struct i40e_aq_set_phy_config config = {0};
bool non_zero_phy_type = is_up;
struct i40e_hw *hw = &pf->hw;
- i40e_status err;
u64 mask;
u8 speed;
+ int err;
/* Card might've been put in an unstable state by other drivers
* and applications, which causes incorrect speed values being
@@ -7436,8 +7436,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
NULL);
if (err) {
dev_err(&pf->pdev->dev,
- "failed to get phy cap., ret = %s last_status = %s\n",
- i40e_stat_str(hw, err),
+ "failed to get phy cap., ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -7448,8 +7448,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
NULL);
if (err) {
dev_err(&pf->pdev->dev,
- "failed to get phy cap., ret = %s last_status = %s\n",
- i40e_stat_str(hw, err),
+ "failed to get phy cap., ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(hw, hw->aq.asq_last_status));
return err;
}
@@ -7493,8 +7493,8 @@ static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
if (err) {
dev_err(&pf->pdev->dev,
- "set phy config ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ "set phy config ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return err;
}
@@ -7657,11 +7657,11 @@ static void i40e_vsi_set_default_tc_config(struct i40e_vsi *vsi)
* This function deletes a mac filter on the channel VSI which serves as the
* macvlan. Returns 0 on success.
**/
-static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
- const u8 *macaddr, int *aq_err)
+static int i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
{
struct i40e_aqc_remove_macvlan_element_data element;
- i40e_status status;
+ int status;
memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
@@ -7683,12 +7683,12 @@ static i40e_status i40e_del_macvlan_filter(struct i40e_hw *hw, u16 seid,
* This function adds a mac filter on the channel VSI which serves as the
* macvlan. Returns 0 on success.
**/
-static i40e_status i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
- const u8 *macaddr, int *aq_err)
+static int i40e_add_macvlan_filter(struct i40e_hw *hw, u16 seid,
+ const u8 *macaddr, int *aq_err)
{
struct i40e_aqc_add_macvlan_element_data element;
- i40e_status status;
u16 cmd_flags = 0;
+ int status;
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0;
@@ -7834,8 +7834,8 @@ static int i40e_fwd_ring_up(struct i40e_vsi *vsi, struct net_device *vdev,
rx_ring->netdev = NULL;
}
dev_info(&pf->pdev->dev,
- "Error adding mac filter on macvlan err %s, aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Error adding mac filter on macvlan err %pe, aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, aq_err));
netdev_err(vdev, "L2fwd offload disabled to L2 filter error\n");
}
@@ -7907,8 +7907,8 @@ static int i40e_setup_macvlans(struct i40e_vsi *vsi, u16 macvlan_cnt, u16 qcnt,
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "Update vsi tc config failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Update vsi tc config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
@@ -8123,8 +8123,8 @@ static void i40e_fwd_del(struct net_device *netdev, void *vdev)
ch->fwd = NULL;
} else {
dev_info(&pf->pdev->dev,
- "Error deleting mac filter on macvlan err %s, aq_err %s\n",
- i40e_stat_str(hw, ret),
+ "Error deleting mac filter on macvlan err %pe, aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, aq_err));
}
break;
@@ -8875,8 +8875,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
kfree(filter);
if (err) {
dev_err(&pf->pdev->dev,
- "Failed to delete cloud filter, err %s\n",
- i40e_stat_str(&pf->hw, err));
+ "Failed to delete cloud filter, err %pe\n",
+ ERR_PTR(err));
return i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
}
@@ -9438,8 +9438,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
} else {
dev_info(&pf->pdev->dev,
- "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed querying DCB configuration data from firmware, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -9887,8 +9887,8 @@ static void i40e_link_event(struct i40e_pf *pf)
{
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
u8 new_link_speed, old_link_speed;
- i40e_status status;
bool new_link, old_link;
+ int status;
#ifdef CONFIG_I40E_DCB
int err;
#endif /* CONFIG_I40E_DCB */
@@ -10099,9 +10099,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
struct i40e_arq_event_info event;
struct i40e_hw *hw = &pf->hw;
u16 pending, i = 0;
- i40e_status ret;
u16 opcode;
u32 oldval;
+ int ret;
u32 val;
/* Do not run clean AQ when PF reset fails */
@@ -10265,8 +10265,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
@@ -10277,8 +10277,8 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi switch failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi switch failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -10301,8 +10301,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return;
}
@@ -10313,8 +10313,8 @@ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi switch failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi switch failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
}
}
@@ -10458,8 +10458,8 @@ static int i40e_get_capabilities(struct i40e_pf *pf,
buf_len = data_size;
} else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) {
dev_info(&pf->pdev->dev,
- "capability discovery failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ "capability discovery failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENODEV;
@@ -10580,7 +10580,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
struct i40e_cloud_filter *cfilter;
struct i40e_pf *pf = vsi->back;
struct hlist_node *node;
- i40e_status ret;
+ int ret;
/* Add cloud filters back if they exist */
hlist_for_each_entry_safe(cfilter, node, &pf->cloud_filter_list,
@@ -10596,8 +10596,8 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
if (ret) {
dev_dbg(&pf->pdev->dev,
- "Failed to rebuild cloud filter, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Failed to rebuild cloud filter, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -10615,7 +10615,7 @@ static int i40e_rebuild_cloud_filters(struct i40e_vsi *vsi, u16 seid)
static int i40e_rebuild_channels(struct i40e_vsi *vsi)
{
struct i40e_channel *ch, *ch_tmp;
- i40e_status ret;
+ int ret;
if (list_empty(&vsi->ch_list))
return 0;
@@ -10691,7 +10691,7 @@ static void i40e_clean_xps_state(struct i40e_vsi *vsi)
static void i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret = 0;
+ int ret = 0;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, pf->state);
@@ -10796,7 +10796,7 @@ static void i40e_get_oem_version(struct i40e_hw *hw)
static int i40e_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;
ret = i40e_pf_reset(hw);
if (ret) {
@@ -10821,7 +10821,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;
u32 val;
int v;
@@ -10837,8 +10837,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
/* rebuild the basics for the AdminQ, HMC, and initial HW switch */
ret = i40e_init_adminq(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto clear_recovery;
}
@@ -10949,8 +10949,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
- dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Rebuild the VSIs and VEBs that existed before reset.
@@ -11053,8 +11053,8 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
msleep(75);
ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (ret)
- dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -11082,9 +11082,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
ret = i40e_set_promiscuous(pf, pf->cur_promisc);
if (ret)
dev_warn(&pf->pdev->dev,
- "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
+ "Failed to restore promiscuous setting: %s, err %pe aq_err %s\n",
pf->cur_promisc ? "on" : "off",
- i40e_stat_str(&pf->hw, ret),
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
i40e_reset_all_vfs(pf, true);
@@ -12218,8 +12218,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
(struct i40e_aqc_get_set_rss_key_data *)seed);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot get RSS key, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot get RSS key, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -12232,8 +12232,8 @@ static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot get RSS lut, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot get RSS lut, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return ret;
@@ -12508,11 +12508,11 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
* i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
+int i40e_get_partition_bw_setting(struct i40e_pf *pf)
{
- i40e_status status;
bool min_valid, max_valid;
u32 max_bw, min_bw;
+ int status;
status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
&min_valid, &max_valid);
@@ -12531,10 +12531,10 @@ i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf)
* i40e_set_partition_bw_setting - Set BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+int i40e_set_partition_bw_setting(struct i40e_pf *pf)
{
struct i40e_aqc_configure_partition_bw_data bw_data;
- i40e_status status;
+ int status;
memset(&bw_data, 0, sizeof(bw_data));
@@ -12553,12 +12553,12 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
* i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
* @pf: board private structure
**/
-i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
+int i40e_commit_partition_bw_setting(struct i40e_pf *pf)
{
/* Commit temporary BW setting to permanent NVM image */
enum i40e_admin_queue_err last_aq_status;
- i40e_status ret;
u16 nvm_word;
+ int ret;
if (pf->hw.partition_id != 1) {
dev_info(&pf->pdev->dev,
@@ -12573,8 +12573,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for read access, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot acquire NVM for read access, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12590,8 +12590,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
i40e_release_nvm(&pf->hw);
if (ret) {
- dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ dev_info(&pf->pdev->dev, "NVM read error, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12604,8 +12604,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
last_aq_status = pf->hw.aq.asq_last_status;
if (ret) {
dev_info(&pf->pdev->dev,
- "Cannot acquire NVM for write access, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "Cannot acquire NVM for write access, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
goto bw_commit_out;
}
@@ -12624,8 +12624,8 @@ i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf)
i40e_release_nvm(&pf->hw);
if (ret)
dev_info(&pf->pdev->dev,
- "BW settings NOT SAVED, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "BW settings NOT SAVED, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, last_aq_status));
bw_commit_out:
@@ -12646,7 +12646,7 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
#define I40E_LINK_BEHAVIOR_WORD_LENGTH 0x1
#define I40E_LINK_BEHAVIOR_OS_FORCED_ENABLED BIT(0)
#define I40E_LINK_BEHAVIOR_PORT_BIT_LENGTH 4
- i40e_status read_status = I40E_SUCCESS;
+ int read_status = I40E_SUCCESS;
u16 sr_emp_sr_settings_ptr = 0;
u16 features_enable = 0;
u16 link_behavior = 0;
@@ -12679,8 +12679,8 @@ static bool i40e_is_total_port_shutdown_enabled(struct i40e_pf *pf)
err_nvm:
dev_warn(&pf->pdev->dev,
- "total-port-shutdown feature is off due to read nvm error: %s\n",
- i40e_stat_str(&pf->hw, read_status));
+ "total-port-shutdown feature is off due to read nvm error: %pe\n",
+ ERR_PTR(read_status));
return ret;
}
@@ -13025,7 +13025,7 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
u8 type, filter_index;
- i40e_status ret;
+ int ret;
type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
I40E_AQC_TUNNEL_TYPE_NGE;
@@ -13033,8 +13033,8 @@ static int i40e_udp_tunnel_set_port(struct net_device *netdev,
ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
NULL);
if (ret) {
- netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "add UDP port failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -13049,12 +13049,12 @@ static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_hw *hw = &np->vsi->back->hw;
- i40e_status ret;
+ int ret;
ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
if (ret) {
- netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
- i40e_stat_str(hw, ret),
+ netdev_info(netdev, "delete UDP port failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(hw, hw->aq.asq_last_status));
return -EIO;
}
@@ -13167,6 +13167,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
}
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!br_spec)
+ return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
__u16 mode;
@@ -13339,9 +13341,11 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) {
- if (!prog)
+ if (!prog) {
+ xdp_features_clear_redirect_target(vsi->netdev);
/* Wait until ndo_xsk_wakeup completes. */
synchronize_rcu();
+ }
i40e_reset_and_rebuild(pf, true, true);
}
@@ -13362,11 +13366,13 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
/* Kick start the NAPI context if there is an AF_XDP socket open
* on that queue id. This so that receiving will start.
*/
- if (need_reset && prog)
+ if (need_reset && prog) {
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->xdp_rings[i]->xsk_pool)
(void)i40e_xsk_wakeup(vsi->netdev, i,
XDP_WAKEUP_RX);
+ xdp_features_set_redirect_target(vsi->netdev, true);
+ }
return 0;
}
@@ -13801,6 +13807,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_add_mac_filter(vsi, mac_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
/* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
* are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
@@ -13941,8 +13951,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.flags = I40E_AQ_VSI_TYPE_PF;
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get PF vsi config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get PF vsi config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
return -ENOENT;
@@ -13971,8 +13981,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi failed, err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -13991,8 +14001,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "update vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "update vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -14014,9 +14024,9 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
* message and continue
*/
dev_info(&pf->pdev->dev,
- "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
+ "failed to configure TCs for main VSI tc_map 0x%08x, err %pe aq_err %s\n",
enabled_tc,
- i40e_stat_str(&pf->hw, ret),
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -14110,8 +14120,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
dev_info(&vsi->back->pdev->dev,
- "add vsi failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "add vsi failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
ret = -ENOENT;
@@ -14142,8 +14152,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ret = i40e_vsi_get_bw_info(vsi);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get vsi bw info, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get vsi bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* VSI is already added so not tearing that up */
ret = 0;
@@ -14589,8 +14599,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&bw_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "query veb bw config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -14599,8 +14609,8 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb)
&ets_data, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "query veb bw ets config failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "query veb bw ets config failed, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
goto out;
}
@@ -14796,8 +14806,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
/* get a VEB from the hardware */
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't add VEB, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't add VEB, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
@@ -14807,16 +14817,16 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
&veb->stats_idx, NULL, NULL, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get VEB statistics idx, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get VEB statistics idx, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return -EPERM;
}
ret = i40e_veb_get_bw_info(veb);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't get VEB bw info, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't get VEB bw info, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
return -ENOENT;
@@ -15026,8 +15036,8 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
&next_seid, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
- "get switch config failed err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "get switch config failed err %d aq_err %s\n",
+ ret,
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
kfree(aq_buf);
@@ -15072,8 +15082,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
ret = i40e_fetch_switch_configuration(pf, false);
if (ret) {
dev_info(&pf->pdev->dev,
- "couldn't fetch switch config, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't fetch switch config, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
return ret;
}
@@ -15099,8 +15109,8 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
NULL);
if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
dev_info(&pf->pdev->dev,
- "couldn't set switch config bits, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, ret),
+ "couldn't set switch config bits, err %pe aq_err %s\n",
+ ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
/* not a fatal problem, just keep going */
@@ -15437,13 +15447,12 @@ static bool i40e_check_recovery_mode(struct i40e_pf *pf)
*
* Return 0 on success, negative on failure.
**/
-static i40e_status i40e_pf_loop_reset(struct i40e_pf *pf)
+static int i40e_pf_loop_reset(struct i40e_pf *pf)
{
/* wait max 10 seconds for PF reset to succeed */
const unsigned long time_end = jiffies + 10 * HZ;
-
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ int ret;
ret = i40e_pf_reset(hw);
while (ret != I40E_SUCCESS && time_before(jiffies, time_end)) {
@@ -15489,9 +15498,9 @@ static bool i40e_check_fw_empr(struct i40e_pf *pf)
* Return 0 if NIC is healthy or negative value when there are issues
* with resets
**/
-static i40e_status i40e_handle_resets(struct i40e_pf *pf)
+static int i40e_handle_resets(struct i40e_pf *pf)
{
- const i40e_status pfr = i40e_pf_loop_reset(pf);
+ const int pfr = i40e_pf_loop_reset(pf);
const bool is_empr = i40e_check_fw_empr(pf);
if (is_empr || pfr != I40E_SUCCESS)
@@ -15589,7 +15598,6 @@ err_switch_setup:
timer_shutdown_sync(&pf->service_timer);
i40e_shutdown_adminq(hw);
iounmap(hw->hw_addr);
- pci_disable_pcie_error_reporting(pf->pdev);
pci_release_mem_regions(pf->pdev);
pci_disable_device(pf->pdev);
kfree(pf);
@@ -15629,13 +15637,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct i40e_aq_get_phy_abilities_resp abilities;
#ifdef CONFIG_I40E_DCB
enum i40e_get_fw_lldp_status_resp lldp_status;
- i40e_status status;
#endif /* CONFIG_I40E_DCB */
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
u16 wol_nvm_bits;
u16 link_status;
+#ifdef CONFIG_I40E_DCB
+ int status;
+#endif /* CONFIG_I40E_DCB */
int err;
u32 val;
u32 i;
@@ -15660,7 +15670,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
/* Now that we have a PCI connection, we need to do the
@@ -16004,8 +16013,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
- dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_info(&pf->pdev->dev, "set phy mask fail, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* Reconfigure hardware for allowing smaller MSS in the case
@@ -16023,8 +16032,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
msleep(75);
err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
if (err)
- dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_info(&pf->pdev->dev, "link restart failed, err %pe aq_err %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
}
@@ -16156,8 +16165,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the requested speeds from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
if (err)
- dev_dbg(&pf->pdev->dev, "get requested speeds ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_dbg(&pf->pdev->dev, "get requested speeds ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
@@ -16167,8 +16176,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
- dev_dbg(&pf->pdev->dev, "get supported phy types ret = %s last_status = %s\n",
- i40e_stat_str(&pf->hw, err),
+ dev_dbg(&pf->pdev->dev, "get supported phy types ret = %pe last_status = %s\n",
+ ERR_PTR(err),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
/* make sure the MFS hasn't been set lower than the default */
@@ -16218,7 +16227,6 @@ err_pf_reset:
err_ioremap:
kfree(pf);
err_pf_alloc:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -16239,7 +16247,7 @@ static void i40e_remove(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
- i40e_status ret_code;
+ int ret_code;
int i;
i40e_dbg_pf_exit(pf);
@@ -16366,7 +16374,6 @@ unmap:
kfree(pf);
pci_release_mem_regions(pdev);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
@@ -16487,9 +16494,9 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
u8 mac_addr[6];
u16 flags = 0;
+ int ret;
/* Get current MAC address in case it's an LAA */
if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 3a38bf8bcde7..9da0c87f0328 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -13,10 +13,10 @@
* in this file) as an equivalent of the FLASH part mapped into the SR.
* We are accessing FLASH always thru the Shadow RAM.
**/
-i40e_status i40e_init_nvm(struct i40e_hw *hw)
+int i40e_init_nvm(struct i40e_hw *hw)
{
struct i40e_nvm_info *nvm = &hw->nvm;
- i40e_status ret_code = 0;
+ int ret_code = 0;
u32 fla, gens;
u8 sr_size;
@@ -52,12 +52,12 @@ i40e_status i40e_init_nvm(struct i40e_hw *hw)
* This function will request NVM ownership for reading
* via the proper Admin Command.
**/
-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
- enum i40e_aq_resource_access_type access)
+int i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access)
{
- i40e_status ret_code = 0;
u64 gtime, timeout;
u64 time_left = 0;
+ int ret_code = 0;
if (hw->nvm.blank_nvm_mode)
goto i40e_i40e_acquire_nvm_exit;
@@ -111,7 +111,7 @@ i40e_i40e_acquire_nvm_exit:
**/
void i40e_release_nvm(struct i40e_hw *hw)
{
- i40e_status ret_code = I40E_SUCCESS;
+ int ret_code = I40E_SUCCESS;
u32 total_delay = 0;
if (hw->nvm.blank_nvm_mode)
@@ -138,9 +138,9 @@ void i40e_release_nvm(struct i40e_hw *hw)
*
* Polls the SRCTL Shadow RAM register done bit.
**/
-static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
+static int i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;
u32 srctl, wait_cnt;
/* Poll the I40E_GLNVM_SRCTL until the done bit is set */
@@ -165,10 +165,10 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
*
* Reads one 16 bit word from the Shadow RAM using the GLNVM_SRCTL register.
**/
-static i40e_status i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;
u32 sr_reg;
if (offset >= hw->nvm.sr_size) {
@@ -216,13 +216,13 @@ read_nvm_exit:
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
-static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
- u8 module_pointer, u32 offset,
- u16 words, void *data,
- bool last_command)
+static int i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+ u16 words, void *data,
+ bool last_command)
{
- i40e_status ret_code = I40E_ERR_NVM;
struct i40e_asq_cmd_details cmd_details;
+ int ret_code = I40E_ERR_NVM;
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -264,10 +264,10 @@ static i40e_status i40e_read_nvm_aq(struct i40e_hw *hw,
*
* Reads one 16 bit word from the Shadow RAM using the AdminQ
**/
-static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
- u16 *data)
+static int i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = I40E_ERR_TIMEOUT;
+ int ret_code = I40E_ERR_TIMEOUT;
ret_code = i40e_read_nvm_aq(hw, 0x0, offset, 1, data, true);
*data = le16_to_cpu(*(__le16 *)data);
@@ -286,8 +286,8 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset,
* Do not use this function except in cases where the nvm lock is already
* taken via i40e_acquire_nvm().
**/
-static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
- u16 offset, u16 *data)
+static int __i40e_read_nvm_word(struct i40e_hw *hw,
+ u16 offset, u16 *data)
{
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
return i40e_read_nvm_word_aq(hw, offset, data);
@@ -303,10 +303,10 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw,
*
* Reads one 16 bit word from the Shadow RAM.
**/
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data)
+int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -330,17 +330,17 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
* @words_data_size: Words to read from NVM
* @data_ptr: Pointer to memory location where resulting buffer will be stored
**/
-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr,
- u16 module_offset,
- u16 data_offset,
- u16 words_data_size,
- u16 *data_ptr)
+int i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr)
{
- i40e_status status;
u16 specific_ptr = 0;
u16 ptr_value = 0;
u32 offset = 0;
+ int status;
if (module_ptr != 0) {
status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
@@ -406,10 +406,10 @@ enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static int i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
u16 index, word;
/* Loop thru the selected region */
@@ -437,13 +437,13 @@ static i40e_status i40e_read_nvm_buffer_srctl(struct i40e_hw *hw, u16 offset,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-static i40e_status i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+static int i40e_read_nvm_buffer_aq(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code;
- u16 read_size;
bool last_cmd = false;
u16 words_read = 0;
+ u16 read_size;
+ int ret_code;
u16 i = 0;
do {
@@ -493,9 +493,9 @@ read_nvm_buffer_aq_exit:
* Reads 16 bit words (data buffer) from the SR using the i40e_read_nvm_srrd()
* method.
**/
-static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
- u16 offset, u16 *words,
- u16 *data)
+static int __i40e_read_nvm_buffer(struct i40e_hw *hw,
+ u16 offset, u16 *words,
+ u16 *data)
{
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
return i40e_read_nvm_buffer_aq(hw, offset, words, data);
@@ -514,10 +514,10 @@ static i40e_status __i40e_read_nvm_buffer(struct i40e_hw *hw,
* method. The buffer read is preceded by the NVM ownership take
* and followed by the release.
**/
-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data)
+int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data)
{
- i40e_status ret_code = 0;
+ int ret_code = 0;
if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
@@ -544,12 +544,12 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
**/
-static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 words, void *data,
- bool last_command)
+static int i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 words, void *data,
+ bool last_command)
{
- i40e_status ret_code = I40E_ERR_NVM;
struct i40e_asq_cmd_details cmd_details;
+ int ret_code = I40E_ERR_NVM;
memset(&cmd_details, 0, sizeof(cmd_details));
cmd_details.wb_desc = &hw->nvm_wb_desc;
@@ -594,14 +594,14 @@ static i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
* is customer specific and unknown. Therefore, this function skips all maximum
* possible size of VPD (1kB).
**/
-static i40e_status i40e_calc_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum)
+static int i40e_calc_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
{
- i40e_status ret_code;
struct i40e_virt_mem vmem;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module = 0;
+ int ret_code;
u16 *data;
u16 i = 0;
@@ -675,11 +675,11 @@ i40e_calc_nvm_checksum_exit:
* on ARQ completion event reception by caller.
* This function will commit SR to NVM.
**/
-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
+int i40e_update_nvm_checksum(struct i40e_hw *hw)
{
- i40e_status ret_code;
- u16 checksum;
__le16 le_sum;
+ int ret_code;
+ u16 checksum;
ret_code = i40e_calc_nvm_checksum(hw, &checksum);
if (!ret_code) {
@@ -699,12 +699,12 @@ i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
* Performs checksum calculation and validates the NVM SW checksum. If the
* caller does not need checksum, the value can be NULL.
**/
-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum)
+int i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum)
{
- i40e_status ret_code = 0;
- u16 checksum_sr = 0;
u16 checksum_local = 0;
+ u16 checksum_sr = 0;
+ int ret_code = 0;
/* We must acquire the NVM lock in order to correctly synchronize the
* NVM accesses across multiple PFs. Without doing so it is possible
@@ -733,36 +733,36 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
return ret_code;
}
-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
+static int i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *errno);
static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
int *perrno);
-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno);
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno);
static inline u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
@@ -807,12 +807,12 @@ static const char * const i40e_nvm_update_state_str[] = {
*
* Dispatches command depending on what update state is current
**/
-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status;
enum i40e_nvmupd_cmd upd_cmd;
+ int status;
/* assume success */
*perrno = 0;
@@ -923,12 +923,12 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* Process legitimate commands of the Init state and conditionally set next
* state. Reject all other commands.
**/
-static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_init(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
+ int status = 0;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
@@ -1062,12 +1062,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands.
**/
-static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
+ int status = 0;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
@@ -1104,13 +1104,13 @@ static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
* NVM ownership is already held. Process legitimate commands and set any
* change in state; reject all other commands
**/
-static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
enum i40e_nvmupd_cmd upd_cmd;
bool retry_attempt = false;
+ int status = 0;
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
@@ -1187,8 +1187,8 @@ retry:
*/
if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
!retry_attempt) {
- i40e_status old_status = status;
u32 old_asq_status = hw->aq.asq_last_status;
+ int old_status = status;
u32 gtime;
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
@@ -1370,17 +1370,17 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
struct i40e_asq_cmd_details cmd_details;
- i40e_status status;
struct i40e_aq_desc *aq_desc;
u32 buff_size = 0;
u8 *buff = NULL;
u32 aq_desc_len;
u32 aq_data_len;
+ int status;
i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
if (cmd->offset == 0xffff)
@@ -1429,8 +1429,8 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
buff_size, &cmd_details);
if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_exec_aq err %s aq_err %s\n",
- i40e_stat_str(hw, status),
+ "%s err %pe aq_err %s\n",
+ __func__, ERR_PTR(status),
i40e_aq_str(hw, hw->aq.asq_last_status));
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
return status;
@@ -1454,9 +1454,9 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
u32 aq_total_len;
u32 aq_desc_len;
@@ -1523,9 +1523,9 @@ static i40e_status i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
u32 aq_total_len;
u32 aq_desc_len;
@@ -1557,13 +1557,13 @@ static i40e_status i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
*
* cmd structure contains identifiers and data buffer
**/
-static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
struct i40e_asq_cmd_details cmd_details;
- i40e_status status;
u8 module, transaction;
+ int status;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
@@ -1596,13 +1596,13 @@ static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
*
* module, offset, data_size and data are in cmd structure
**/
-static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
{
- i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
+ int status = 0;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
@@ -1636,14 +1636,14 @@ static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
*
* module, offset, data_size and data are in cmd structure
**/
-static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- i40e_status status = 0;
struct i40e_asq_cmd_details cmd_details;
u8 module, transaction;
u8 preservation_flags;
+ int status = 0;
bool last;
transaction = i40e_nvmupd_get_transaction(cmd->config);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 2f6815b2f8df..2bd4de03dafa 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -56,5 +56,4 @@ do { \
(h)->bus.func, ##__VA_ARGS__); \
} while (0)
-typedef enum i40e_status_code i40e_status;
#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9a71121420c3..fe845987d99a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -16,29 +16,29 @@
*/
/* adminq functions */
-i40e_status i40e_init_adminq(struct i40e_hw *hw);
+int i40e_init_adminq(struct i40e_hw *hw);
void i40e_shutdown_adminq(struct i40e_hw *hw);
void i40e_adminq_init_ring_data(struct i40e_hw *hw);
-i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *events_pending);
-i40e_status
+int i40e_clean_arq_element(struct i40e_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *events_pending);
+int
i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status
+int
i40e_asq_send_command_v2(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
enum i40e_admin_queue_err *aq_status);
-i40e_status
+int
i40e_asq_send_command_atomic(struct i40e_hw *hw, struct i40e_aq_desc *desc,
void *buff, /* can be NULL */ u16 buff_size,
struct i40e_asq_cmd_details *cmd_details,
bool is_atomic_context);
-i40e_status
+int
i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
struct i40e_aq_desc *desc,
void *buff, /* can be NULL */
@@ -53,327 +53,332 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
-i40e_status i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+int i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
-const char *i40e_stat_str(struct i40e_hw *hw, i40e_status stat_err);
-i40e_status i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
- bool pf_lut, u8 *lut, u16 lut_size);
-i40e_status i40e_aq_get_rss_key(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
-i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_get_set_rss_key_data *key);
+int i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+int i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+int i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+int i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
-i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
- u16 led_addr, u32 mode);
-i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
- u16 *val);
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
+int i40e_led_set_phy(struct i40e_hw *hw, bool on,
+ u16 led_addr, u32 mode);
+int i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
+ u16 *val);
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
/* admin send queue commands */
-i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
- u16 *fw_major_version, u16 *fw_minor_version,
- u32 *fw_build,
- u16 *api_major_version, u16 *api_minor_version,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
- u32 reg_addr, u64 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_read_register(struct i40e_hw *hw,
+int i40e_aq_get_firmware_version(struct i40e_hw *hw,
+ u16 *fw_major_version, u16 *fw_minor_version,
+ u32 *fw_build,
+ u16 *api_major_version, u16 *api_minor_version,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u64 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_read_register(struct i40e_hw *hw,
u32 reg_addr, u64 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
- bool qualified_modules, bool report_init,
- struct i40e_aq_get_phy_abilities_resp *abilities,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
- struct i40e_aq_set_phy_config *config,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
- bool atomic_reset);
-i40e_status i40e_aq_set_mac_loopback(struct i40e_hw *hw,
- bool ena_lpbk,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
- bool enable_link,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
- bool enable_lse, struct i40e_link_status *link,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
- u64 advt_reg,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
+int i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+ bool qualified_modules, bool report_init,
+ struct i40e_aq_get_phy_abilities_resp *abilities,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_phy_config(struct i40e_hw *hw,
+ struct i40e_aq_set_phy_config *config,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ bool atomic_reset);
+int i40e_aq_set_mac_loopback(struct i40e_hw *hw,
+ bool ena_lpbk,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
+ bool enable_link,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_link_info(struct i40e_hw *hw,
+ bool enable_lse, struct i40e_link_status *link,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
+ u64 advt_reg,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_driver_version *dv,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
- u16 vsi_id, bool set_filter,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
- bool rx_only_promisc);
-i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable,
- u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
- u16 seid, bool enable, u16 vid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
- u16 seid, bool enable,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
- struct i40e_vsi_context *vsi_ctx,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
- u16 downlink_seid, u8 enabled_tc,
- bool default_port, u16 *pveb_seid,
- bool enable_stats,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
- u16 veb_seid, u16 *switch_id, bool *floating,
- u16 *statistic_index, u16 *vebs_used,
- u16 *vebs_free,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
+int i40e_aq_add_vsi(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
+ u16 vsi_id, bool set_filter,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set,
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
+int i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_bc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable, u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_update_vsi_params(struct i40e_hw *hw,
+ struct i40e_vsi_context *vsi_ctx,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
+ u16 downlink_seid, u8 enabled_tc,
+ bool default_port, u16 *pveb_seid,
+ bool enable_stats,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_veb_parameters(struct i40e_hw *hw,
+ u16 veb_seid, u16 *switch_id, bool *floating,
+ u16 *statistic_index, u16 *vebs_used,
+ u16 *vebs_free,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status
+int
i40e_aq_add_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_add_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
enum i40e_admin_queue_err *aq_status);
-i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
- struct i40e_aqc_remove_macvlan_element_data *mv_list,
- u16 count, struct i40e_asq_cmd_details *cmd_details);
-i40e_status
+int i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_aqc_remove_macvlan_element_data *mv_list,
+ u16 count, struct i40e_asq_cmd_details *cmd_details);
+int
i40e_aq_remove_macvlan_v2(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_remove_macvlan_element_data *mv_list,
u16 count, struct i40e_asq_cmd_details *cmd_details,
enum i40e_admin_queue_err *aq_status);
-i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rule_id, u16 *rules_used, u16 *rules_free);
-i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
- u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
- struct i40e_asq_cmd_details *cmd_details,
- u16 *rules_used, u16 *rules_free);
+int i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rule_id, u16 *rules_used, u16 *rules_free);
+int i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
+ u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list,
+ struct i40e_asq_cmd_details *cmd_details,
+ u16 *rules_used, u16 *rules_free);
-i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
- u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
- struct i40e_aqc_get_switch_config_resp *buf,
- u16 buf_size, u16 *start_seid,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
- u16 flags,
- u16 valid_flags, u8 mode,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- enum i40e_aq_resource_access_type access,
- u8 sdp_number, u64 *timeout,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_release_resource(struct i40e_hw *hw,
- enum i40e_aq_resources_ids resource,
- u8 sdp_number,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, bool last_command,
+int i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
+ u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_switch_config(struct i40e_hw *hw,
+ struct i40e_aqc_get_switch_config_resp *buf,
+ u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
- void *buff, u16 buff_size, u16 *data_size,
- enum i40e_admin_queue_opc list_type_opc,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
- u32 offset, u16 length, void *data,
- bool last_command, u8 preservation_flags,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_rearrange_nvm(struct i40e_hw *hw,
- u8 rearrange_nvm,
+int i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags, u8 mode,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_request_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ enum i40e_aq_resource_access_type access,
+ u8 sdp_number, u64 *timeout,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_release_resource(struct i40e_hw *hw,
+ enum i40e_aq_resources_ids resource,
+ u8 sdp_number,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, bool last_command,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_discover_capabilities(struct i40e_hw *hw,
+ void *buff, u16 buff_size, u16 *data_size,
+ enum i40e_admin_queue_opc list_type_opc,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
- u8 mib_type, void *buff, u16 buff_size,
- u16 *local_len, u16 *remote_len,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 preservation_flags,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rearrange_nvm(struct i40e_hw *hw,
+ u8 rearrange_nvm,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_lldp_mib(struct i40e_hw *hw, u8 bridge_type,
+ u8 mib_type, void *buff, u16 buff_size,
+ u16 *local_len, u16 *remote_len,
+ struct i40e_asq_cmd_details *cmd_details);
+int
i40e_aq_set_lldp_mib(struct i40e_hw *hw,
u8 mib_type, void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
- bool enable_update,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
+ bool enable_update,
+ struct i40e_asq_cmd_details *cmd_details);
+int
i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
- bool persist,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
- bool dcb_enable,
- struct i40e_asq_cmd_details
- *cmd_details);
-i40e_status i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+int i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
+ bool dcb_enable,
+ struct i40e_asq_cmd_details
+ *cmd_details);
+int i40e_aq_start_lldp(struct i40e_hw *hw, bool persist,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
+ void *buff, u16 buff_size,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
- void *buff, u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 protocol_index,
- u8 *filter_index,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
- u16 flags, u8 *mac_addr,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+int i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_mac_address_write(struct i40e_hw *hw,
+ u16 flags, u8 *mac_addr,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_credit,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
- u16 seid, u16 credit, u8 max_bw,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
- struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+int i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
- enum i40e_admin_queue_opc opcode,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
+int i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_bw,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_vsi_tc_bw(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_configure_vsi_tc_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_config_switch_comp_ets(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_configure_switching_comp_ets_data *ets_data,
+ enum i40e_admin_queue_opc opcode,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_config_switch_comp_bw_config(struct i40e_hw *hw,
u16 seid,
struct i40e_aqc_configure_switching_comp_bw_config_data *bw_data,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_port_ets_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_port_ets_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_resume_port_tx(struct i40e_hw *hw,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int i40e_aq_query_vsi_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_vsi_ets_sla_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_vsi_ets_sla_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_switch_comp_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_port_ets_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_port_ets_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int
+i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_query_switching_comp_bw_config_resp *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_resume_port_tx(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
+int
i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count);
-enum i40e_status_code
+int
i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count);
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count);
-enum i40e_status_code
+int
i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
struct i40e_aqc_cloud_filters_element_bb *filters,
u8 filter_count);
-i40e_status i40e_read_lldp_cfg(struct i40e_hw *hw,
- struct i40e_lldp_variables *lldp_cfg);
-enum i40e_status_code
+int i40e_read_lldp_cfg(struct i40e_hw *hw,
+ struct i40e_lldp_variables *lldp_cfg);
+int
i40e_aq_suspend_port_tx(struct i40e_hw *hw, u16 seid,
struct i40e_asq_cmd_details *cmd_details);
/* i40e_common */
-i40e_status i40e_init_shared_code(struct i40e_hw *hw);
-i40e_status i40e_pf_reset(struct i40e_hw *hw);
+int i40e_init_shared_code(struct i40e_hw *hw);
+int i40e_pf_reset(struct i40e_hw *hw);
void i40e_clear_hw(struct i40e_hw *hw);
void i40e_clear_pxe_mode(struct i40e_hw *hw);
-i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
-i40e_status i40e_update_link_info(struct i40e_hw *hw);
-i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-i40e_status i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
- u32 *max_bw, u32 *min_bw, bool *min_valid,
- bool *max_valid);
-i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
- struct i40e_aqc_configure_partition_bw_data *bw_data,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
-i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
- u32 pba_num_size);
-i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+int i40e_get_link_status(struct i40e_hw *hw, bool *link_up);
+int i40e_update_link_info(struct i40e_hw *hw);
+int i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+int i40e_read_bw_from_alt_ram(struct i40e_hw *hw,
+ u32 *max_bw, u32 *min_bw, bool *min_valid,
+ bool *max_valid);
+int
+i40e_aq_configure_partition_bw(struct i40e_hw *hw,
+ struct i40e_aqc_configure_partition_bw_data *bw_data,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+int i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num,
+ u32 pba_num_size);
+int i40e_validate_mac_addr(u8 *mac_addr);
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
/* prototype for functions used for NVM access */
-i40e_status i40e_init_nvm(struct i40e_hw *hw);
-i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
- enum i40e_aq_resource_access_type access);
+int i40e_init_nvm(struct i40e_hw *hw);
+int i40e_acquire_nvm(struct i40e_hw *hw,
+ enum i40e_aq_resource_access_type access);
void i40e_release_nvm(struct i40e_hw *hw);
-i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
- u16 *data);
-enum i40e_status_code i40e_read_nvm_module_data(struct i40e_hw *hw,
- u8 module_ptr,
- u16 module_offset,
- u16 data_offset,
- u16 words_data_size,
- u16 *data_ptr);
-i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
- u16 *words, u16 *data);
-i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
-i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
- u16 *checksum);
-i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *);
+int i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
+ u16 *data);
+int i40e_read_nvm_module_data(struct i40e_hw *hw,
+ u8 module_ptr,
+ u16 module_offset,
+ u16 data_offset,
+ u16 words_data_size,
+ u16 *data_ptr);
+int i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
+ u16 *words, u16 *data);
+int i40e_update_nvm_checksum(struct i40e_hw *hw);
+int i40e_validate_nvm_checksum(struct i40e_hw *hw,
+ u16 *checksum);
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *errno);
void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
struct i40e_aq_desc *desc);
void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
-i40e_status i40e_set_mac_type(struct i40e_hw *hw);
+int i40e_set_mac_type(struct i40e_hw *hw);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
@@ -422,41 +427,41 @@ i40e_virtchnl_link_speed(enum i40e_aq_link_speed link_speed)
/* i40e_common for VF drivers*/
void i40e_vf_parse_hw_config(struct i40e_hw *hw,
struct virtchnl_vf_resource *msg);
-i40e_status i40e_vf_reset(struct i40e_hw *hw);
-i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
- enum virtchnl_ops v_opcode,
- i40e_status v_retval,
- u8 *msg, u16 msglen,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_set_filter_control(struct i40e_hw *hw,
- struct i40e_filter_control_settings *settings);
-i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
- u8 *mac_addr, u16 ethtype, u16 flags,
- u16 vsi_seid, u16 queue, bool is_add,
- struct i40e_control_filter_stats *stats,
- struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
- u8 table_id, u32 start_index, u16 buff_size,
- void *buff, u16 *ret_buff_size,
- u8 *ret_next_table, u32 *ret_next_index,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_vf_reset(struct i40e_hw *hw);
+int i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
+ enum virtchnl_ops v_opcode,
+ int v_retval,
+ u8 *msg, u16 msglen,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_set_filter_control(struct i40e_hw *hw,
+ struct i40e_filter_control_settings *settings);
+int i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
+ u8 *mac_addr, u16 ethtype, u16 flags,
+ u16 vsi_seid, u16 queue, bool is_add,
+ struct i40e_control_filter_stats *stats,
+ struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
+ u8 table_id, u32 start_index, u16 buff_size,
+ void *buff, u16 *ret_buff_size,
+ u8 *ret_next_table, u32 *ret_next_index,
+ struct i40e_asq_cmd_details *cmd_details);
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 vsi_seid);
-i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
-i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+int i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-enum i40e_status_code
+int
i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr, bool page_change,
bool set_mdio, u8 mdio_num,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code
+int
i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
u8 phy_select, u8 dev_addr, bool page_change,
bool set_mdio, u8 mdio_num,
@@ -469,43 +474,43 @@ i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
i40e_aq_get_phy_register_ext(hw, ps, da, pc, false, 0, ra, rv, cd)
-i40e_status i40e_read_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register_clause22(struct i40e_hw *hw,
- u16 reg, u8 phy_addr, u16 value);
-i40e_status i40e_read_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register_clause45(struct i40e_hw *hw,
- u8 page, u16 reg, u8 phy_addr, u16 value);
-i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 *value);
-i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
- u8 phy_addr, u16 value);
+int i40e_read_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 *value);
+int i40e_write_phy_register_clause22(struct i40e_hw *hw,
+ u16 reg, u8 phy_addr, u16 value);
+int i40e_read_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 *value);
+int i40e_write_phy_register_clause45(struct i40e_hw *hw,
+ u8 page, u16 reg, u8 phy_addr, u16 value);
+int i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 *value);
+int i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg,
+ u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
-i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
- u32 time, u32 interval);
-i40e_status i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
- u16 buff_size, u32 track_id,
- u32 *error_offset, u32 *error_info,
- struct i40e_asq_cmd_details *
- cmd_details);
-i40e_status i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
- u16 buff_size, u8 flags,
- struct i40e_asq_cmd_details *
- cmd_details);
+int i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
+int i40e_aq_write_ddp(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u32 track_id,
+ u32 *error_offset, u32 *error_info,
+ struct i40e_asq_cmd_details *
+ cmd_details);
+int i40e_aq_get_ddp_list(struct i40e_hw *hw, void *buff,
+ u16 buff_size, u8 flags,
+ struct i40e_asq_cmd_details *
+ cmd_details);
struct i40e_generic_seg_header *
i40e_find_segment_in_package(u32 segment_type,
struct i40e_package_header *pkg_header);
struct i40e_profile_section_header *
i40e_find_section_in_profile(u32 section_type,
struct i40e_profile_segment *profile);
-enum i40e_status_code
+int
i40e_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-enum i40e_status_code
+int
i40e_rollback_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg,
u32 track_id);
-enum i40e_status_code
+int
i40e_add_pinfo_to_list(struct i40e_hw *hw,
struct i40e_profile_segment *profile,
u8 *profile_info_sec, u32 track_id);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_status.h b/drivers/net/ethernet/intel/i40e/i40e_status.h
index db3714a65dc7..4d2782e76038 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_status.h
@@ -9,65 +9,30 @@ enum i40e_status_code {
I40E_SUCCESS = 0,
I40E_ERR_NVM = -1,
I40E_ERR_NVM_CHECKSUM = -2,
- I40E_ERR_PHY = -3,
I40E_ERR_CONFIG = -4,
I40E_ERR_PARAM = -5,
- I40E_ERR_MAC_TYPE = -6,
I40E_ERR_UNKNOWN_PHY = -7,
- I40E_ERR_LINK_SETUP = -8,
- I40E_ERR_ADAPTER_STOPPED = -9,
I40E_ERR_INVALID_MAC_ADDR = -10,
I40E_ERR_DEVICE_NOT_SUPPORTED = -11,
- I40E_ERR_PRIMARY_REQUESTS_PENDING = -12,
- I40E_ERR_INVALID_LINK_SETTINGS = -13,
- I40E_ERR_AUTONEG_NOT_COMPLETE = -14,
I40E_ERR_RESET_FAILED = -15,
- I40E_ERR_SWFW_SYNC = -16,
I40E_ERR_NO_AVAILABLE_VSI = -17,
I40E_ERR_NO_MEMORY = -18,
I40E_ERR_BAD_PTR = -19,
- I40E_ERR_RING_FULL = -20,
- I40E_ERR_INVALID_PD_ID = -21,
- I40E_ERR_INVALID_QP_ID = -22,
- I40E_ERR_INVALID_CQ_ID = -23,
- I40E_ERR_INVALID_CEQ_ID = -24,
- I40E_ERR_INVALID_AEQ_ID = -25,
I40E_ERR_INVALID_SIZE = -26,
- I40E_ERR_INVALID_ARP_INDEX = -27,
- I40E_ERR_INVALID_FPM_FUNC_ID = -28,
- I40E_ERR_QP_INVALID_MSG_SIZE = -29,
- I40E_ERR_QP_TOOMANY_WRS_POSTED = -30,
- I40E_ERR_INVALID_FRAG_COUNT = -31,
I40E_ERR_QUEUE_EMPTY = -32,
- I40E_ERR_INVALID_ALIGNMENT = -33,
- I40E_ERR_FLUSHED_QUEUE = -34,
- I40E_ERR_INVALID_PUSH_PAGE_INDEX = -35,
- I40E_ERR_INVALID_IMM_DATA_SIZE = -36,
I40E_ERR_TIMEOUT = -37,
- I40E_ERR_OPCODE_MISMATCH = -38,
- I40E_ERR_CQP_COMPL_ERROR = -39,
- I40E_ERR_INVALID_VF_ID = -40,
- I40E_ERR_INVALID_HMCFN_ID = -41,
- I40E_ERR_BACKING_PAGE_ERROR = -42,
- I40E_ERR_NO_PBLCHUNKS_AVAILABLE = -43,
- I40E_ERR_INVALID_PBLE_INDEX = -44,
I40E_ERR_INVALID_SD_INDEX = -45,
I40E_ERR_INVALID_PAGE_DESC_INDEX = -46,
I40E_ERR_INVALID_SD_TYPE = -47,
- I40E_ERR_MEMCPY_FAILED = -48,
I40E_ERR_INVALID_HMC_OBJ_INDEX = -49,
I40E_ERR_INVALID_HMC_OBJ_COUNT = -50,
- I40E_ERR_INVALID_SRQ_ARM_LIMIT = -51,
- I40E_ERR_SRQ_ENABLED = -52,
I40E_ERR_ADMIN_QUEUE_ERROR = -53,
I40E_ERR_ADMIN_QUEUE_TIMEOUT = -54,
I40E_ERR_BUF_TOO_SHORT = -55,
I40E_ERR_ADMIN_QUEUE_FULL = -56,
I40E_ERR_ADMIN_QUEUE_NO_WORK = -57,
- I40E_ERR_BAD_IWARP_CQE = -58,
I40E_ERR_NVM_BLANK_MODE = -59,
I40E_ERR_NOT_IMPLEMENTED = -60,
- I40E_ERR_PE_DOORBELL_NOT_ENABLED = -61,
I40E_ERR_DIAG_TEST_FAILED = -62,
I40E_ERR_NOT_READY = -63,
I40E_NOT_SUPPORTED = -64,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 635f93d60318..8a4587585acd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -17,7 +17,7 @@
**/
static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
enum virtchnl_ops v_opcode,
- i40e_status v_retval, u8 *msg,
+ int v_retval, u8 *msg,
u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
@@ -441,14 +441,14 @@ irq_list_done:
}
/**
- * i40e_release_iwarp_qvlist
+ * i40e_release_rdma_qvlist
* @vf: pointer to the VF.
*
**/
-static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
+static void i40e_release_rdma_qvlist(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
- struct virtchnl_iwarp_qvlist_info *qvlist_info = vf->qvlist_info;
+ struct virtchnl_rdma_qvlist_info *qvlist_info = vf->qvlist_info;
u32 msix_vf;
u32 i;
@@ -457,7 +457,7 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
for (i = 0; i < qvlist_info->num_vectors; i++) {
- struct virtchnl_iwarp_qv_info *qv_info;
+ struct virtchnl_rdma_qv_info *qv_info;
u32 next_q_index, next_q_type;
struct i40e_hw *hw = &pf->hw;
u32 v_idx, reg_idx, reg;
@@ -491,18 +491,19 @@ static void i40e_release_iwarp_qvlist(struct i40e_vf *vf)
}
/**
- * i40e_config_iwarp_qvlist
+ * i40e_config_rdma_qvlist
* @vf: pointer to the VF info
* @qvlist_info: queue and vector list
*
* Return 0 on success or < 0 on error
**/
-static int i40e_config_iwarp_qvlist(struct i40e_vf *vf,
- struct virtchnl_iwarp_qvlist_info *qvlist_info)
+static int
+i40e_config_rdma_qvlist(struct i40e_vf *vf,
+ struct virtchnl_rdma_qvlist_info *qvlist_info)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- struct virtchnl_iwarp_qv_info *qv_info;
+ struct virtchnl_rdma_qv_info *qv_info;
u32 v_idx, i, reg_idx, reg;
u32 next_q_idx, next_q_type;
u32 msix_vf;
@@ -1246,13 +1247,13 @@ err:
* @vl: List of VLANs - apply filter for given VLANs
* @num_vlans: Number of elements in @vl
**/
-static i40e_status
+static int
i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
bool unicast_enable, s16 *vl, u16 num_vlans)
{
- i40e_status aq_ret, aq_tmp = 0;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ int aq_ret, aq_tmp = 0;
int i;
/* No VLAN to set promisc on, set on VSI */
@@ -1264,9 +1265,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
return aq_ret;
@@ -1280,9 +1281,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
}
@@ -1297,9 +1298,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set multicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
if (!aq_tmp)
@@ -1313,9 +1314,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
int aq_err = pf->hw.aq.asq_last_status;
dev_err(&pf->pdev->dev,
- "VF %d failed to set unicast promiscuous mode err %s aq_err %s\n",
+ "VF %d failed to set unicast promiscuous mode err %pe aq_err %s\n",
vf->vf_id,
- i40e_stat_str(&pf->hw, aq_ret),
+ ERR_PTR(aq_ret),
i40e_aq_str(&pf->hw, aq_err));
if (!aq_tmp)
@@ -1339,13 +1340,13 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable,
* Called from the VF to configure the promiscuous mode of
* VF vsis and from the VF reset path to reset promiscuous mode.
**/
-static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
- u16 vsi_id,
- bool allmulti,
- bool alluni)
+static int i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
+ u16 vsi_id,
+ bool allmulti,
+ bool alluni)
{
- i40e_status aq_ret = I40E_SUCCESS;
struct i40e_pf *pf = vf->pf;
+ int aq_ret = I40E_SUCCESS;
struct i40e_vsi *vsi;
u16 num_vlans;
s16 *vl;
@@ -1955,7 +1956,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
struct i40e_pf *pf;
struct i40e_hw *hw;
int abs_vf_id;
- i40e_status aq_ret;
+ int aq_ret;
/* validate the request */
if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs)
@@ -1987,7 +1988,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
**/
static int i40e_vc_send_resp_to_vf(struct i40e_vf *vf,
enum virtchnl_ops opcode,
- i40e_status retval)
+ int retval)
{
return i40e_vc_send_msg_to_vf(vf, opcode, retval, NULL, 0);
}
@@ -2091,9 +2092,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_vf_resource *vfres = NULL;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
int num_vsis = 1;
+ int aq_ret = 0;
size_t len = 0;
int ret;
@@ -2123,11 +2124,11 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
if (i40e_vf_client_capable(pf, vf->vf_id) &&
- (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_IWARP)) {
- vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_IWARP;
- set_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
+ (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RDMA)) {
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RDMA;
+ set_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
} else {
- clear_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states);
+ clear_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states);
}
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
@@ -2221,9 +2222,9 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_promisc_info *info =
(struct virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
bool allmulti = false;
bool alluni = false;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2308,10 +2309,10 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_pair_info *qpi;
u16 vsi_id, vsi_queue_id = 0;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
int i, j = 0, idx = 0;
struct i40e_vsi *vsi;
u16 num_qps_all = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2458,8 +2459,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
+ int aq_ret = 0;
u16 vsi_id;
- i40e_status aq_ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -2574,7 +2575,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
@@ -2632,7 +2633,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -2783,7 +2784,7 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_eth_stats stats;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
struct i40e_vsi *vsi;
memset(&stats, 0, sizeof(struct i40e_eth_stats));
@@ -2926,7 +2927,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_ether_addr_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status ret = 0;
+ int ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -2998,7 +2999,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg)
bool was_unimac_deleted = false;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status ret = 0;
+ int ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -3071,7 +3072,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;
if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
@@ -3142,7 +3143,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_vlan_filter_list *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -3187,21 +3188,21 @@ error_param:
}
/**
- * i40e_vc_iwarp_msg
+ * i40e_vc_rdma_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @msglen: msg length
*
* called from the VF for the iwarp msgs
**/
-static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
+static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
struct i40e_pf *pf = vf->pf;
int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+ !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -3211,42 +3212,42 @@ static int i40e_vc_iwarp_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
error_param:
/* send the response to the VF */
- return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_IWARP,
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_RDMA,
aq_ret);
}
/**
- * i40e_vc_iwarp_qvmap_msg
+ * i40e_vc_rdma_qvmap_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
* @config: config qvmap or release it
*
* called from the VF for the iwarp msgs
**/
-static int i40e_vc_iwarp_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
+static int i40e_vc_rdma_qvmap_msg(struct i40e_vf *vf, u8 *msg, bool config)
{
- struct virtchnl_iwarp_qvlist_info *qvlist_info =
- (struct virtchnl_iwarp_qvlist_info *)msg;
- i40e_status aq_ret = 0;
+ struct virtchnl_rdma_qvlist_info *qvlist_info =
+ (struct virtchnl_rdma_qvlist_info *)msg;
+ int aq_ret = 0;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VF_STATE_IWARPENA, &vf->vf_states)) {
+ !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
if (config) {
- if (i40e_config_iwarp_qvlist(vf, qvlist_info))
+ if (i40e_config_rdma_qvlist(vf, qvlist_info))
aq_ret = I40E_ERR_PARAM;
} else {
- i40e_release_iwarp_qvlist(vf);
+ i40e_release_rdma_qvlist(vf);
}
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf,
- config ? VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP :
- VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ config ? VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP :
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
aq_ret);
}
@@ -3263,7 +3264,7 @@ static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_key *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
!i40e_vc_isvalid_vsi_id(vf, vrk->vsi_id) ||
@@ -3293,7 +3294,7 @@ static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_lut *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
u16 i;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE) ||
@@ -3328,7 +3329,7 @@ static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg)
{
struct virtchnl_rss_hena *vrh = NULL;
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int len = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3365,7 +3366,7 @@ static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg)
(struct virtchnl_rss_hena *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3389,8 +3390,8 @@ err:
**/
static int i40e_vc_enable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3415,8 +3416,8 @@ err:
**/
static int i40e_vc_disable_vlan_stripping(struct i40e_vf *vf, u8 *msg)
{
- i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -3615,8 +3616,8 @@ static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
if (ret)
dev_err(&pf->pdev->dev,
- "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(ret),
i40e_aq_str(&pf->hw,
pf->hw.aq.asq_last_status));
@@ -3642,7 +3643,7 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
struct hlist_node *node;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i, ret;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3718,8 +3719,8 @@ static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
if (ret) {
dev_err(&pf->pdev->dev,
- "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to delete cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err;
}
@@ -3773,7 +3774,7 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
struct i40e_cloud_filter *cfilter = NULL;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi = NULL;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
int i, ret;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3852,8 +3853,8 @@ static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
if (ret) {
dev_err(&pf->pdev->dev,
- "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
- vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ "VF %d: Failed to add cloud filter, err %pe aq_err %s\n",
+ vf->vf_id, ERR_PTR(ret),
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
goto err_free;
}
@@ -3882,7 +3883,7 @@ static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
int i, adq_request_qps = 0;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
u64 speed = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
@@ -3994,7 +3995,7 @@ err:
static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
{
struct i40e_pf *pf = vf->pf;
- i40e_status aq_ret = 0;
+ int aq_ret = 0;
if (!i40e_sync_vf_state(vf, I40E_VF_STATE_ACTIVE)) {
aq_ret = I40E_ERR_PARAM;
@@ -4112,14 +4113,14 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_GET_STATS:
ret = i40e_vc_get_stats_msg(vf, msg);
break;
- case VIRTCHNL_OP_IWARP:
- ret = i40e_vc_iwarp_msg(vf, msg, msglen);
+ case VIRTCHNL_OP_RDMA:
+ ret = i40e_vc_rdma_msg(vf, msg, msglen);
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, true);
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
+ ret = i40e_vc_rdma_qvmap_msg(vf, msg, true);
break;
- case VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
- ret = i40e_vc_iwarp_qvmap_msg(vf, msg, false);
+ case VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP:
+ ret = i40e_vc_rdma_qvmap_msg(vf, msg, false);
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
ret = i40e_vc_config_rss_key(vf, msg);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 358bbdb58795..895b8feb2567 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -34,7 +34,7 @@ enum i40e_queue_ctrl {
enum i40e_vf_states {
I40E_VF_STATE_INIT = 0,
I40E_VF_STATE_ACTIVE,
- I40E_VF_STATE_IWARPENA,
+ I40E_VF_STATE_RDMAENA,
I40E_VF_STATE_DISABLED,
I40E_VF_STATE_MC_PROMISC,
I40E_VF_STATE_UC_PROMISC,
@@ -46,7 +46,7 @@ enum i40e_vf_states {
enum i40e_vf_capabilities {
I40E_VIRTCHNL_VF_CAP_PRIVILEGE = 0,
I40E_VIRTCHNL_VF_CAP_L2,
- I40E_VIRTCHNL_VF_CAP_IWARP,
+ I40E_VIRTCHNL_VF_CAP_RDMA,
};
/* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI.
@@ -108,7 +108,7 @@ struct i40e_vf {
u16 num_cloud_filters;
/* RDMA Client */
- struct virtchnl_iwarp_qvlist_info *qvlist_info;
+ struct virtchnl_rdma_qvlist_info *qvlist_info;
};
void i40e_free_vfs(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 2a9f1eeeb701..232bc61d9eee 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -30,6 +30,7 @@
#include <linux/jiffies.h>
#include <net/ip6_checksum.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/udp.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
@@ -276,8 +277,8 @@ struct iavf_adapter {
u64 hw_csum_rx_error;
u32 rx_desc_count;
int num_msix_vectors;
- int num_iwarp_msix;
- int iwarp_base_vector;
+ int num_rdma_msix;
+ int rdma_base_vector;
u32 client_pending;
struct iavf_client_instance *cinst;
struct msix_entry *msix_entries;
@@ -384,7 +385,7 @@ struct iavf_adapter {
enum virtchnl_ops current_op;
#define CLIENT_ALLOWED(_a) ((_a)->vf_res ? \
(_a)->vf_res->vf_cap_flags & \
- VIRTCHNL_VF_OFFLOAD_IWARP : \
+ VIRTCHNL_VF_OFFLOAD_RDMA : \
0)
#define CLIENT_ENABLED(_a) ((_a)->cinst)
/* RSS by the PF should be preferred over RSS via other methods. */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.c b/drivers/net/ethernet/intel/iavf/iavf_client.c
index 0c77e4171808..93c903c02c64 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.c
@@ -127,7 +127,7 @@ void iavf_notify_client_open(struct iavf_vsi *vsi)
}
/**
- * iavf_client_release_qvlist - send a message to the PF to release iwarp qv map
+ * iavf_client_release_qvlist - send a message to the PF to release rdma qv map
* @ldev: pointer to L2 context.
*
* Return 0 on success or < 0 on error
@@ -141,12 +141,12 @@ static int iavf_client_release_qvlist(struct iavf_info *ldev)
return -EAGAIN;
err = iavf_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP,
+ VIRTCHNL_OP_RELEASE_RDMA_IRQ_MAP,
IAVF_SUCCESS, NULL, 0, NULL);
if (err)
dev_err(&adapter->pdev->dev,
- "Unable to send iWarp vector release message to PF, error %d, aq status %d\n",
+ "Unable to send RDMA vector release message to PF, error %d, aq status %d\n",
err, adapter->hw.aq.asq_last_status);
return err;
@@ -215,9 +215,9 @@ iavf_client_add_instance(struct iavf_adapter *adapter)
cinst->lan_info.params = params;
set_bit(__IAVF_CLIENT_INSTANCE_NONE, &cinst->state);
- cinst->lan_info.msix_count = adapter->num_iwarp_msix;
+ cinst->lan_info.msix_count = adapter->num_rdma_msix;
cinst->lan_info.msix_entries =
- &adapter->msix_entries[adapter->iwarp_base_vector];
+ &adapter->msix_entries[adapter->rdma_base_vector];
mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
@@ -425,17 +425,17 @@ static u32 iavf_client_virtchnl_send(struct iavf_info *ldev,
if (adapter->aq_required)
return -EAGAIN;
- err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_IWARP,
+ err = iavf_aq_send_msg_to_pf(&adapter->hw, VIRTCHNL_OP_RDMA,
IAVF_SUCCESS, msg, len, NULL);
if (err)
- dev_err(&adapter->pdev->dev, "Unable to send iWarp message to PF, error %d, aq status %d\n",
+ dev_err(&adapter->pdev->dev, "Unable to send RDMA message to PF, error %d, aq status %d\n",
err, adapter->hw.aq.asq_last_status);
return err;
}
/**
- * iavf_client_setup_qvlist - send a message to the PF to setup iwarp qv map
+ * iavf_client_setup_qvlist - send a message to the PF to setup rdma qv map
* @ldev: pointer to L2 context.
* @client: Client pointer.
* @qvlist_info: queue and vector list
@@ -446,7 +446,7 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,
struct iavf_client *client,
struct iavf_qvlist_info *qvlist_info)
{
- struct virtchnl_iwarp_qvlist_info *v_qvlist_info;
+ struct virtchnl_rdma_qvlist_info *v_qvlist_info;
struct iavf_adapter *adapter = ldev->vf;
struct iavf_qv_info *qv_info;
enum iavf_status err;
@@ -463,23 +463,23 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,
continue;
v_idx = qv_info->v_idx;
if ((v_idx >=
- (adapter->iwarp_base_vector + adapter->num_iwarp_msix)) ||
- (v_idx < adapter->iwarp_base_vector))
+ (adapter->rdma_base_vector + adapter->num_rdma_msix)) ||
+ (v_idx < adapter->rdma_base_vector))
return -EINVAL;
}
- v_qvlist_info = (struct virtchnl_iwarp_qvlist_info *)qvlist_info;
+ v_qvlist_info = (struct virtchnl_rdma_qvlist_info *)qvlist_info;
msg_size = struct_size(v_qvlist_info, qv_info,
v_qvlist_info->num_vectors - 1);
- adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP);
+ adapter->client_pending |= BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP);
err = iavf_aq_send_msg_to_pf(&adapter->hw,
- VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP, IAVF_SUCCESS,
+ VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP, IAVF_SUCCESS,
(u8 *)v_qvlist_info, msg_size, NULL);
if (err) {
dev_err(&adapter->pdev->dev,
- "Unable to send iWarp vector config message to PF, error %d, aq status %d\n",
+ "Unable to send RDMA vector config message to PF, error %d, aq status %d\n",
err, adapter->hw.aq.asq_last_status);
goto out;
}
@@ -488,7 +488,7 @@ static int iavf_client_setup_qvlist(struct iavf_info *ldev,
for (i = 0; i < 5; i++) {
msleep(100);
if (!(adapter->client_pending &
- BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP))) {
+ BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP))) {
err = 0;
break;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_client.h b/drivers/net/ethernet/intel/iavf/iavf_client.h
index 9a7cf39ea75a..c5d51d7dc7cc 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_client.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_client.h
@@ -159,7 +159,7 @@ struct iavf_client {
#define IAVF_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define IAVF_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
u8 type;
-#define IAVF_CLIENT_IWARP 0
+#define IAVF_CLIENT_RDMA 0
struct iavf_client_ops *ops; /* client ops provided by the client */
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 34e46a23894f..16c490965b61 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -223,8 +223,8 @@ const char *iavf_stat_str(struct iavf_hw *hw, enum iavf_status stat_err)
return "IAVF_ERR_ADMIN_QUEUE_FULL";
case IAVF_ERR_ADMIN_QUEUE_NO_WORK:
return "IAVF_ERR_ADMIN_QUEUE_NO_WORK";
- case IAVF_ERR_BAD_IWARP_CQE:
- return "IAVF_ERR_BAD_IWARP_CQE";
+ case IAVF_ERR_BAD_RDMA_CQE:
+ return "IAVF_ERR_BAD_RDMA_CQE";
case IAVF_ERR_NVM_BLANK_MODE:
return "IAVF_ERR_NVM_BLANK_MODE";
case IAVF_ERR_NOT_IMPLEMENTED:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 4b09785d2147..3273aeb8fa67 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -105,7 +105,7 @@ int iavf_status_to_errno(enum iavf_status status)
case IAVF_ERR_SRQ_ENABLED:
case IAVF_ERR_ADMIN_QUEUE_ERROR:
case IAVF_ERR_ADMIN_QUEUE_FULL:
- case IAVF_ERR_BAD_IWARP_CQE:
+ case IAVF_ERR_BAD_RDMA_CQE:
case IAVF_ERR_NVM_BLANK_MODE:
case IAVF_ERR_PE_DOORBELL_NOT_ENABLED:
case IAVF_ERR_DIAG_TEST_FAILED:
@@ -4868,8 +4868,6 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
@@ -4957,7 +4955,6 @@ err_ioremap:
err_alloc_wq:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_pci_reg:
err_dma:
@@ -5175,8 +5172,6 @@ static void iavf_remove(struct pci_dev *pdev)
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_status.h b/drivers/net/ethernet/intel/iavf/iavf_status.h
index 2ea5c7c339bc..0e493ee9e9d1 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_status.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_status.h
@@ -64,7 +64,7 @@ enum iavf_status {
IAVF_ERR_BUF_TOO_SHORT = -55,
IAVF_ERR_ADMIN_QUEUE_FULL = -56,
IAVF_ERR_ADMIN_QUEUE_NO_WORK = -57,
- IAVF_ERR_BAD_IWARP_CQE = -58,
+ IAVF_ERR_BAD_RDMA_CQE = -58,
IAVF_ERR_NVM_BLANK_MODE = -59,
IAVF_ERR_NOT_IMPLEMENTED = -60,
IAVF_ERR_PE_DOORBELL_NOT_ENABLED = -61,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 365ca0c710c4..6d23338604bb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -2298,7 +2298,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
- case VIRTCHNL_OP_IWARP:
+ case VIRTCHNL_OP_RDMA:
/* Gobble zero-length replies from the PF. They indicate that
* a previous message was received OK, and the client doesn't
* care about that.
@@ -2307,9 +2307,9 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_notify_client_message(&adapter->vsi, msg, msglen);
break;
- case VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP:
+ case VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP:
adapter->client_pending &=
- ~(BIT(VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP));
+ ~(BIT(VIRTCHNL_OP_CONFIG_RDMA_IRQ_MAP));
break;
case VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
struct virtchnl_rss_hena *vrh = (struct virtchnl_rss_hena *)msg;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 9183d480b70b..f269952d207d 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -28,6 +28,7 @@ ice-y := ice_main.o \
ice_flow.o \
ice_idc.o \
ice_devlink.o \
+ ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o \
@@ -42,8 +43,8 @@ ice-$(CONFIG_PCI_IOV) += \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
-ice-$(CONFIG_TTY) += ice_gnss.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
+ice-$(CONFIG_ICE_GNSS) += ice_gnss.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 2f0b604abc5e..b0e29e342401 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -39,7 +39,9 @@
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
+#include <linux/gnss.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <net/tc_act/tc_mirred.h>
#include <net/tc_act/tc_gact.h>
#include <net/ip.h>
@@ -121,6 +123,8 @@
#define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
+#define ICE_MAX_TSO_SIZE 131072
+
#define ICE_UP_TABLE_TRANSLATE(val, i) \
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
ICE_AQ_VSI_UP_TABLE_UP##i##_M)
@@ -352,7 +356,6 @@ struct ice_vsi {
struct ice_vf *vf; /* VF associated with this VSI */
- u16 ethtype; /* Ethernet protocol for pause frame */
u16 num_gfltr;
u16 num_bfltr;
@@ -565,9 +568,8 @@ struct ice_pf {
struct mutex adev_mutex; /* lock to protect aux device access */
u32 msg_enable;
struct ice_ptp ptp;
- struct tty_driver *ice_gnss_tty_driver;
- struct tty_port *gnss_tty_port[ICE_GNSS_TTY_MINOR_DEVICES];
- struct gnss_serial *gnss_serial[ICE_GNSS_TTY_MINOR_DEVICES];
+ struct gnss_serial *gnss_serial;
+ struct gnss_device *gnss_dev;
u16 num_rdma_msix; /* Total MSIX vectors for RDMA driver */
u16 rdma_base_vector;
@@ -880,7 +882,7 @@ void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf);
void
@@ -889,7 +891,7 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
-int ice_vsi_cfg(struct ice_vsi *vsi);
+int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
@@ -907,6 +909,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
int ice_plug_aux_dev(struct ice_pf *pf);
void ice_unplug_aux_dev(struct ice_pf *pf);
int ice_init_rdma(struct ice_pf *pf);
+void ice_deinit_rdma(struct ice_pf *pf);
const char *ice_aq_str(enum ice_aq_err aq_err);
bool ice_is_wol_supported(struct ice_hw *hw);
void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
@@ -931,6 +934,8 @@ int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
+int ice_load(struct ice_pf *pf);
+void ice_unload(struct ice_pf *pf);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 958c1e435232..838d9b274d68 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1659,14 +1659,24 @@ struct ice_aqc_lldp_get_mib {
#define ICE_AQ_LLDP_TX_ACTIVE 0
#define ICE_AQ_LLDP_TX_SUSPENDED 1
#define ICE_AQ_LLDP_TX_FLUSHED 3
+/* DCBX mode */
+#define ICE_AQ_LLDP_DCBX_M GENMASK(7, 6)
+#define ICE_AQ_LLDP_DCBX_NA 0
+#define ICE_AQ_LLDP_DCBX_CEE 1
+#define ICE_AQ_LLDP_DCBX_IEEE 2
+
+ u8 state;
+#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M BIT(0)
+#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED 0
+#define ICE_AQ_LLDP_MIB_CHANGE_PENDING 1
+
/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
* and in the LLDP MIB Change Event (0x0A01). They are valid for the
* Get LLDP MIB (0x0A00) response only.
*/
- u8 reserved1;
__le16 local_len;
__le16 remote_len;
- u8 reserved2[2];
+ u8 reserved[2];
__le32 addr_high;
__le32 addr_low;
};
@@ -1677,6 +1687,9 @@ struct ice_aqc_lldp_set_mib_change {
u8 command;
#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
+#define ICE_AQ_LLDP_MIB_PENDING_M BIT(1)
+#define ICE_AQ_LLDP_MIB_PENDING_DISABLE 0
+#define ICE_AQ_LLDP_MIB_PENDING_ENABLE 1
u8 reserved[15];
};
@@ -2329,6 +2342,7 @@ enum ice_adminq_opc {
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
ice_aqc_opc_lldp_filter_ctrl = 0x0A0A,
+ ice_aqc_opc_lldp_execute_pending_mib = 0x0A0B,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 554095b25f44..1911d644dfa8 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -355,9 +355,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
- else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
- return XDP_PACKET_HEADROOM;
-
return 0;
}
@@ -495,7 +492,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
- u16 num_bufs = ICE_DESC_UNUSED(ring);
+ u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
int err;
ring->rx_buf_len = ring->vsi->rx_buf_len;
@@ -503,8 +500,10 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
+ __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->vsi->rx_buf_len);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
@@ -524,9 +523,11 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
} else {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
- xdp_rxq_info_reg(&ring->xdp_rxq,
- ring->netdev,
- ring->q_index, ring->q_vector->napi.napi_id);
+ __xdp_rxq_info_reg(&ring->xdp_rxq,
+ ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+ ring->vsi->rx_buf_len);
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED,
@@ -536,6 +537,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
}
}
+ xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
+ ring->xdp.data = NULL;
err = ice_setup_rx_ctx(ring);
if (err) {
dev_err(dev, "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index d02b55b6aa9c..c2fda4fa4188 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -208,6 +208,31 @@ bool ice_is_e810t(struct ice_hw *hw)
}
/**
+ * ice_is_e823
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E823-L or E823-C based, false if not.
+ */
+bool ice_is_e823(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -1088,8 +1113,10 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_cqinit;
- hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*hw->port_info), GFP_KERNEL);
+ if (!hw->port_info)
+ hw->port_info = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*hw->port_info),
+ GFP_KERNEL);
if (!hw->port_info) {
status = -ENOMEM;
goto err_unroll_cqinit;
@@ -1217,11 +1244,6 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_free_hw_tbls(hw);
mutex_destroy(&hw->tnl_lock);
- if (hw->port_info) {
- devm_kfree(ice_hw_to_dev(hw), hw->port_info);
- hw->port_info = NULL;
- }
-
/* Attempt to disable FW logging before shutting down control queues */
ice_cfg_fw_log(hw, false);
ice_destroy_all_ctrlq(hw);
@@ -5504,6 +5526,19 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
}
/**
+ * ice_lldp_execute_pending_mib - execute LLDP pending MIB request
+ * @hw: pointer to HW struct
+ */
+int ice_lldp_execute_pending_mib(struct ice_hw *hw)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
* ice_fw_supports_report_dflt_cfg
* @hw: pointer to the hardware structure
*
@@ -5524,7 +5559,7 @@ bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
* returned by the firmware is a 16 bit * value, but is indexed
* by [fls(speed) - 1]
*/
-static const u32 ice_aq_to_link_speed[15] = {
+static const u32 ice_aq_to_link_speed[] = {
SPEED_10, /* BIT(0) */
SPEED_100,
SPEED_1000,
@@ -5536,10 +5571,6 @@ static const u32 ice_aq_to_link_speed[15] = {
SPEED_40000,
SPEED_50000,
SPEED_100000, /* BIT(10) */
- 0,
- 0,
- 0,
- 0 /* BIT(14) */
};
/**
@@ -5550,5 +5581,8 @@ static const u32 ice_aq_to_link_speed[15] = {
*/
u32 ice_get_link_speed(u16 index)
{
+ if (index >= ARRAY_SIZE(ice_aq_to_link_speed))
+ return 0;
+
return ice_aq_to_link_speed[index];
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 4c6a0b5c9304..8ba5f935a092 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -122,7 +122,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
- enum ice_fc_mode fc);
+ enum ice_fc_mode req_mode);
bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
@@ -199,6 +199,7 @@ void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
+bool ice_is_e823(struct ice_hw *hw);
int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -221,6 +222,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
+int ice_lldp_execute_pending_mib(struct ice_hw *hw);
int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 6be02f9b0b8c..c557dfc50aad 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -73,6 +73,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
if (!ena_update)
cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
+ else
+ cmd->command |= FIELD_PREP(ICE_AQ_LLDP_MIB_PENDING_M,
+ ICE_AQ_LLDP_MIB_PENDING_ENABLE);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -566,7 +569,7 @@ ice_parse_cee_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
* @tlv: Organization specific TLV
* @dcbcfg: Local store to update ETS REC data
*
- * Currently only IEEE 802.1Qaz TLV is supported, all others
+ * Currently IEEE 802.1Qaz and CEE DCBX TLV are supported, others
* will be returned
*/
static void
@@ -585,7 +588,7 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
ice_parse_cee_tlv(tlv, dcbcfg);
break;
default:
- break;
+ break; /* Other OUIs not supported */
}
}
@@ -964,6 +967,42 @@ int ice_get_dcb_cfg(struct ice_port_info *pi)
}
/**
+ * ice_get_dcb_cfg_from_mib_change
+ * @pi: port information structure
+ * @event: pointer to the admin queue receive event
+ *
+ * Set DCB configuration from received MIB Change event
+ */
+void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
+ struct ice_rq_event_info *event)
+{
+ struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
+ struct ice_aqc_lldp_get_mib *mib;
+ u8 change_type, dcbx_mode;
+
+ mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+
+ change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
+ if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
+ dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
+
+ dcbx_mode = FIELD_GET(ICE_AQ_LLDP_DCBX_M, mib->type);
+
+ switch (dcbx_mode) {
+ case ICE_AQ_LLDP_DCBX_IEEE:
+ dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
+ ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg);
+ break;
+
+ case ICE_AQ_LLDP_DCBX_CEE:
+ pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
+ ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *)
+ event->msg_buf, pi);
+ break;
+ }
+}
+
+/**
* ice_init_dcb
* @hw: pointer to the HW struct
* @enable_mib_change: enable MIB change event
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index 6abf28a14291..be34650a77d5 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -144,6 +144,8 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
int ice_get_dcb_cfg(struct ice_port_info *pi);
int ice_set_dcb_cfg(struct ice_port_info *pi);
+void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
+ struct ice_rq_event_info *event);
int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
int
ice_query_port_ets(struct ice_port_info *pi,
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 4f24d441c35e..c6d4926f0fcf 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -441,7 +441,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto out;
}
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, false);
out:
/* enable previously downed VSIs */
@@ -731,12 +731,13 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
/**
* ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
* @pf: pointer to the PF struct
+ * @locked: is adev device lock held
*
* Assumed caller has already disabled all VSIs before
* calling this function. Reconfiguring DCB based on
* local_dcbx_cfg.
*/
-void ice_pf_dcb_recfg(struct ice_pf *pf)
+void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
struct iidc_event *event;
@@ -783,14 +784,16 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi);
}
- /* Notify the AUX drivers that TC change is finished */
- event = kzalloc(sizeof(*event), GFP_KERNEL);
- if (!event)
- return;
+ if (!locked) {
+ /* Notify the AUX drivers that TC change is finished */
+ event = kzalloc(sizeof(*event), GFP_KERNEL);
+ if (!event)
+ return;
- set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
- ice_send_event_to_aux(pf, event);
- kfree(event);
+ set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
+ ice_send_event_to_aux(pf, event);
+ kfree(event);
+ }
}
/**
@@ -859,7 +862,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
if (err)
goto dcb_init_err;
- return err;
+ return 0;
dcb_init_err:
dev_err(dev, "DCB init failed\n");
@@ -944,6 +947,16 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
}
/**
+ * ice_dcb_is_mib_change_pending - Check if MIB change is pending
+ * @state: MIB change state
+ */
+static bool ice_dcb_is_mib_change_pending(u8 state)
+{
+ return ICE_AQ_LLDP_MIB_CHANGE_PENDING ==
+ FIELD_GET(ICE_AQ_LLDP_MIB_CHANGE_STATE_M, state);
+}
+
+/**
* ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf
* @event: pointer to the admin queue receive event
@@ -956,6 +969,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct device *dev = ice_pf_to_dev(pf);
struct ice_aqc_lldp_get_mib *mib;
struct ice_dcbx_cfg tmp_dcbx_cfg;
+ bool pending_handled = true;
bool need_reconfig = false;
struct ice_port_info *pi;
u8 mib_type;
@@ -972,41 +986,58 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pi = pf->hw.port_info;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
+
/* Ignore if event is not for Nearest Bridge */
- mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
- ICE_AQ_LLDP_BRID_TYPE_M);
+ mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type);
dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);
if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return;
+ /* A pending change event contains accurate config information, and
+ * the FW setting has not been updaed yet, so detect if change is
+ * pending to determine where to pull config information from
+ * (FW vs event)
+ */
+ if (ice_dcb_is_mib_change_pending(mib->state))
+ pending_handled = false;
+
/* Check MIB Type and return if event for Remote MIB update */
- mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
+ mib_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");
if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
- ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
- ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
- &pi->qos_cfg.remote_dcbx_cfg);
- if (ret) {
- dev_err(dev, "Failed to get remote DCB config\n");
- return;
+ if (!pending_handled) {
+ ice_get_dcb_cfg_from_mib_change(pi, event);
+ } else {
+ ret =
+ ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
+ ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
+ &pi->qos_cfg.remote_dcbx_cfg);
+ if (ret)
+ dev_dbg(dev, "Failed to get remote DCB config\n");
}
+ return;
}
+ /* That a DCB change has happened is now determined */
mutex_lock(&pf->tc_mutex);
/* store the old configuration */
- tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ tmp_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
/* Reset the old DCBX configuration data */
memset(&pi->qos_cfg.local_dcbx_cfg, 0,
sizeof(pi->qos_cfg.local_dcbx_cfg));
/* Get updated DCBX data from firmware */
- ret = ice_get_dcb_cfg(pf->hw.port_info);
- if (ret) {
- dev_err(dev, "Failed to get DCB config\n");
- goto out;
+ if (!pending_handled) {
+ ice_get_dcb_cfg_from_mib_change(pi, event);
+ } else {
+ ret = ice_get_dcb_cfg(pi);
+ if (ret) {
+ dev_err(dev, "Failed to get DCB config\n");
+ goto out;
+ }
}
/* No change detected in DCBX configs */
@@ -1033,18 +1064,24 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
+ /* Send Execute Pending MIB Change event if it is a Pending event */
+ if (!pending_handled) {
+ ice_lldp_execute_pending_mib(&pf->hw);
+ pending_handled = true;
+ }
+
rtnl_lock();
/* disable VSIs affected by DCB changes */
ice_dcb_ena_dis_vsi(pf, false, true);
- ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
+ ret = ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
goto unlock_rtnl;
}
/* changes in configuration update VSI */
- ice_pf_dcb_recfg(pf);
+ ice_pf_dcb_recfg(pf, false);
/* enable previously downed VSIs */
ice_dcb_ena_dis_vsi(pf, true, true);
@@ -1052,4 +1089,8 @@ unlock_rtnl:
rtnl_unlock();
out:
mutex_unlock(&pf->tc_mutex);
+
+ /* Send Execute Pending MIB Change event if it is a Pending event */
+ if (!pending_handled)
+ ice_lldp_execute_pending_mib(&pf->hw);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 4c421c842a13..800879a88c5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -23,7 +23,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
-void ice_pf_dcb_recfg(struct ice_pf *pf);
+void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
@@ -128,7 +128,7 @@ static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
return 0;
}
-static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
+static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { }
static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
new file mode 100644
index 000000000000..d71ed210f9c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -0,0 +1,1897 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice.h"
+#include "ice_ddp.h"
+
+/* For supporting double VLAN mode, it is necessary to enable or disable certain
+ * boost tcam entries. The metadata labels names that match the following
+ * prefixes will be saved to allow enabling double VLAN mode.
+ */
+#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
+#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
+
+/* To support tunneling entries by PF, the package will append the PF number to
+ * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
+ */
+#define ICE_TNL_PRE "TNL_"
+static const struct ice_tunnel_type_scan tnls[] = {
+ { TNL_VXLAN, "TNL_VXLAN_PF" },
+ { TNL_GENEVE, "TNL_GENEVE_PF" },
+ { TNL_LAST, "" }
+};
+
+/**
+ * ice_verify_pkg - verify package
+ * @pkg: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * Verifies various attributes of the package file, including length, format
+ * version, and the requirement of at least one segment.
+ */
+enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+{
+ u32 seg_count;
+ u32 i;
+
+ if (len < struct_size(pkg, seg_offset, 1))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* pkg must have at least one segment */
+ seg_count = le32_to_cpu(pkg->seg_count);
+ if (seg_count < 1)
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* make sure segment array fits in package length */
+ if (len < struct_size(pkg, seg_offset, seg_count))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ /* all segments must fit within length */
+ for (i = 0; i < seg_count; i++) {
+ u32 off = le32_to_cpu(pkg->seg_offset[i]);
+ struct ice_generic_seg_hdr *seg;
+
+ /* segment header must fit */
+ if (len < off + sizeof(*seg))
+ return ICE_DDP_PKG_INVALID_FILE;
+
+ seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+
+ /* segment body must fit */
+ if (len < off + le32_to_cpu(seg->seg_size))
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_free_seg - free package segment pointer
+ * @hw: pointer to the hardware structure
+ *
+ * Frees the package segment pointer in the proper manner, depending on if the
+ * segment was allocated or just the passed in pointer was stored.
+ */
+void ice_free_seg(struct ice_hw *hw)
+{
+ if (hw->pkg_copy) {
+ devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
+ hw->pkg_copy = NULL;
+ hw->pkg_size = 0;
+ }
+ hw->seg = NULL;
+}
+
+/**
+ * ice_chk_pkg_version - check package version for compatibility with driver
+ * @pkg_ver: pointer to a version structure to check
+ *
+ * Check to make sure that the package about to be downloaded is compatible with
+ * the driver. To be compatible, the major and minor components of the package
+ * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
+ * definitions.
+ */
+static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+{
+ if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
+ else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
+ (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
+ pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
+ return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_pkg_val_buf
+ * @buf: pointer to the ice buffer
+ *
+ * This helper function validates a buffer's header.
+ */
+struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+{
+ struct ice_buf_hdr *hdr;
+ u16 section_count;
+ u16 data_end;
+
+ hdr = (struct ice_buf_hdr *)buf->buf;
+ /* verify data */
+ section_count = le16_to_cpu(hdr->section_count);
+ if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
+ return NULL;
+
+ data_end = le16_to_cpu(hdr->data_end);
+ if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ return hdr;
+}
+
+/**
+ * ice_find_buf_table
+ * @ice_seg: pointer to the ice segment
+ *
+ * Returns the address of the buffer table within the ice segment.
+ */
+static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
+{
+ struct ice_nvm_table *nvms = (struct ice_nvm_table *)
+ (ice_seg->device_table + le32_to_cpu(ice_seg->device_table_count));
+
+ return (__force struct ice_buf_table *)(nvms->vers +
+ le32_to_cpu(nvms->table_count));
+}
+
+/**
+ * ice_pkg_enum_buf
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This function will enumerate all the buffers in the ice segment. The first
+ * call is made with the ice_seg parameter non-NULL; on subsequent calls,
+ * ice_seg is set to NULL which continues the enumeration. When the function
+ * returns a NULL pointer, then the end of the buffers has been reached, or an
+ * unexpected value has been detected (for example an invalid section count or
+ * an invalid buffer end value).
+ */
+static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state)
+{
+ if (ice_seg) {
+ state->buf_table = ice_find_buf_table(ice_seg);
+ if (!state->buf_table)
+ return NULL;
+
+ state->buf_idx = 0;
+ return ice_pkg_val_buf(state->buf_table->buf_array);
+ }
+
+ if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
+ return ice_pkg_val_buf(state->buf_table->buf_array +
+ state->buf_idx);
+ else
+ return NULL;
+}
+
+/**
+ * ice_pkg_advance_sect
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This helper function will advance the section within the ice segment,
+ * also advancing the buffer if needed.
+ */
+static bool ice_pkg_advance_sect(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state)
+{
+ if (!ice_seg && !state->buf)
+ return false;
+
+ if (!ice_seg && state->buf)
+ if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
+ return true;
+
+ state->buf = ice_pkg_enum_buf(ice_seg, state);
+ if (!state->buf)
+ return false;
+
+ /* start of new buffer, reset section index */
+ state->sect_idx = 0;
+ return true;
+}
+
+/**
+ * ice_pkg_enum_section
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ *
+ * This function will enumerate all the sections of a particular type in the
+ * ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the matching
+ * sections has been reached.
+ */
+void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type)
+{
+ u16 offset, size;
+
+ if (ice_seg)
+ state->type = sect_type;
+
+ if (!ice_pkg_advance_sect(ice_seg, state))
+ return NULL;
+
+ /* scan for next matching section */
+ while (state->buf->section_entry[state->sect_idx].type !=
+ cpu_to_le32(state->type))
+ if (!ice_pkg_advance_sect(NULL, state))
+ return NULL;
+
+ /* validate section */
+ offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
+ if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
+ return NULL;
+
+ size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
+ return NULL;
+
+ /* make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE)
+ return NULL;
+
+ state->sect_type =
+ le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
+
+ /* calc pointer to this section */
+ state->sect =
+ ((u8 *)state->buf) +
+ le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
+
+ return state->sect;
+}
+
+/**
+ * ice_pkg_enum_entry
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ * @offset: pointer to variable that receives the offset in the table (optional)
+ * @handler: function that handles access to the entries into the section type
+ *
+ * This function will enumerate all the entries in particular section type in
+ * the ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the entries has
+ * been reached.
+ *
+ * Since each section may have a different header and entry size, the handler
+ * function is needed to determine the number and location entries in each
+ * section.
+ *
+ * The offset parameter is optional, but should be used for sections that
+ * contain an offset for each section table. For such cases, the section handler
+ * function must return the appropriate offset + index to give the absolution
+ * offset for each entry. For example, if the base for a section's header
+ * indicates a base offset of 10, and the index for the entry is 2, then
+ * section handler function should set the offset to 10 + 2 = 12.
+ */
+static void *ice_pkg_enum_entry(struct ice_seg *ice_seg,
+ struct ice_pkg_enum *state, u32 sect_type,
+ u32 *offset,
+ void *(*handler)(u32 sect_type, void *section,
+ u32 index, u32 *offset))
+{
+ void *entry;
+
+ if (ice_seg) {
+ if (!handler)
+ return NULL;
+
+ if (!ice_pkg_enum_section(ice_seg, state, sect_type))
+ return NULL;
+
+ state->entry_idx = 0;
+ state->handler = handler;
+ } else {
+ state->entry_idx++;
+ }
+
+ if (!state->handler)
+ return NULL;
+
+ /* get entry */
+ entry = state->handler(state->sect_type, state->sect, state->entry_idx,
+ offset);
+ if (!entry) {
+ /* end of a section, look for another section of this type */
+ if (!ice_pkg_enum_section(NULL, state, 0))
+ return NULL;
+
+ state->entry_idx = 0;
+ entry = state->handler(state->sect_type, state->sect,
+ state->entry_idx, offset);
+ }
+
+ return entry;
+}
+
+/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector table.
+ */
+static void *ice_sw_fv_handler(u32 sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section = section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= le16_to_cpu(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = le16_to_cpu(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_prof_index_max - get the max profile index for used profile
+ * @hw: pointer to the HW struct
+ *
+ * Calling this function will get the max profile index for used profile
+ * and store the index number in struct ice_switch_info *switch_info
+ * in HW for following use.
+ */
+static int ice_get_prof_index_max(struct ice_hw *hw)
+{
+ u16 prof_index = 0, j, max_prof_index = 0;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ bool flag = false;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return -EINVAL;
+
+ ice_seg = hw->seg;
+
+ do {
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* in the profile that not be used, the prot_id is set to 0xff
+ * and the off is set to 0x1ff for all the field vectors.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
+ fv->ew[j].off != ICE_FV_OFFSET_INVAL)
+ flag = true;
+ if (flag && prof_index > max_prof_index)
+ max_prof_index = prof_index;
+
+ prof_index++;
+ flag = false;
+ } while (fv);
+
+ hw->switch_info->max_used_prof_index = max_prof_index;
+
+ return 0;
+}
+
+/**
+ * ice_get_ddp_pkg_state - get DDP pkg state after download
+ * @hw: pointer to the HW struct
+ * @already_loaded: indicates if pkg was already loaded onto the device
+ */
+static enum ice_ddp_state ice_get_ddp_pkg_state(struct ice_hw *hw,
+ bool already_loaded)
+{
+ if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
+ hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
+ hw->pkg_ver.update == hw->active_pkg_ver.update &&
+ hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
+ !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
+ if (already_loaded)
+ return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
+ else
+ return ICE_DDP_PKG_SUCCESS;
+ } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
+ hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
+ } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
+ hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
+ return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
+ } else {
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
+ * ice_init_pkg_regs - initialize additional package registers
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_pkg_regs(struct ice_hw *hw)
+{
+#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
+#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
+#define ICE_SW_BLK_IDX 0
+
+ /* setup Switch block input mask, which is 48-bits in two parts */
+ wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
+ wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
+}
+
+/**
+ * ice_marker_ptype_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the Marker PType TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual Marker PType TCAM entries.
+ */
+static void *ice_marker_ptype_tcam_handler(u32 sect_type, void *section,
+ u32 index, u32 *offset)
+{
+ struct ice_marker_ptype_tcam_section *marker_ptype;
+
+ if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
+ return NULL;
+
+ if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ marker_ptype = section;
+ if (index >= le16_to_cpu(marker_ptype->count))
+ return NULL;
+
+ return marker_ptype->tcam + index;
+}
+
+/**
+ * ice_add_dvm_hint
+ * @hw: pointer to the HW structure
+ * @val: value of the boost entry
+ * @enable: true if entry needs to be enabled, or false if needs to be disabled
+ */
+static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
+{
+ if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
+ hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
+ hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
+ hw->dvm_upd.count++;
+ }
+}
+
+/**
+ * ice_add_tunnel_hint
+ * @hw: pointer to the HW structure
+ * @label_name: label text
+ * @val: value of the tunnel port boost entry
+ */
+static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
+{
+ if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
+ u16 i;
+
+ for (i = 0; tnls[i].type != TNL_LAST; i++) {
+ size_t len = strlen(tnls[i].label_prefix);
+
+ /* Look for matching label start, before continuing */
+ if (strncmp(label_name, tnls[i].label_prefix, len))
+ continue;
+
+ /* Make sure this label matches our PF. Note that the PF
+ * character ('0' - '7') will be located where our
+ * prefix string's null terminator is located.
+ */
+ if ((label_name[len] - '0') == hw->pf_id) {
+ hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
+ hw->tnl.tbl[hw->tnl.count].valid = false;
+ hw->tnl.tbl[hw->tnl.count].boost_addr = val;
+ hw->tnl.tbl[hw->tnl.count].port = 0;
+ hw->tnl.count++;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * ice_label_enum_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the label entry to be returned
+ * @offset: pointer to receive absolute offset, always zero for label sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual label entries.
+ */
+static void *ice_label_enum_handler(u32 __always_unused sect_type,
+ void *section, u32 index, u32 *offset)
+{
+ struct ice_label_section *labels;
+
+ if (!section)
+ return NULL;
+
+ if (index > ICE_MAX_LABELS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ labels = section;
+ if (index >= le16_to_cpu(labels->count))
+ return NULL;
+
+ return labels->label + index;
+}
+
+/**
+ * ice_enum_labels
+ * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
+ * @type: the section type that will contain the label (0 on subsequent calls)
+ * @state: ice_pkg_enum structure that will hold the state of the enumeration
+ * @value: pointer to a value that will return the label's value if found
+ *
+ * Enumerates a list of labels in the package. The caller will call
+ * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
+ * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
+ * the end of the list has been reached.
+ */
+static char *ice_enum_labels(struct ice_seg *ice_seg, u32 type,
+ struct ice_pkg_enum *state, u16 *value)
+{
+ struct ice_label *label;
+
+ /* Check for valid label section on first call */
+ if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
+ return NULL;
+
+ label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
+ ice_label_enum_handler);
+ if (!label)
+ return NULL;
+
+ *value = le16_to_cpu(label->value);
+ return label->name;
+}
+
+/**
+ * ice_boost_tcam_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the boost TCAM entry to be returned
+ * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * Handles enumeration of individual boost TCAM entries.
+ */
+static void *ice_boost_tcam_handler(u32 sect_type, void *section, u32 index,
+ u32 *offset)
+{
+ struct ice_boost_tcam_section *boost;
+
+ if (!section)
+ return NULL;
+
+ if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
+ return NULL;
+
+ if (index > ICE_MAX_BST_TCAMS_IN_BUF)
+ return NULL;
+
+ if (offset)
+ *offset = 0;
+
+ boost = section;
+ if (index >= le16_to_cpu(boost->count))
+ return NULL;
+
+ return boost->tcam + index;
+}
+
+/**
+ * ice_find_boost_entry
+ * @ice_seg: pointer to the ice segment (non-NULL)
+ * @addr: Boost TCAM address of entry to search for
+ * @entry: returns pointer to the entry
+ *
+ * Finds a particular Boost TCAM entry and returns a pointer to that entry
+ * if it is found. The ice_seg parameter must not be NULL since the first call
+ * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
+ */
+static int ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
+ struct ice_boost_tcam_entry **entry)
+{
+ struct ice_boost_tcam_entry *tcam;
+ struct ice_pkg_enum state;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return -EINVAL;
+
+ do {
+ tcam = ice_pkg_enum_entry(ice_seg, &state,
+ ICE_SID_RXPARSER_BOOST_TCAM, NULL,
+ ice_boost_tcam_handler);
+ if (tcam && le16_to_cpu(tcam->addr) == addr) {
+ *entry = tcam;
+ return 0;
+ }
+
+ ice_seg = NULL;
+ } while (tcam);
+
+ *entry = NULL;
+ return -EIO;
+}
+
+/**
+ * ice_is_init_pkg_successful - check if DDP init was successful
+ * @state: state of the DDP pkg after download
+ */
+bool ice_is_init_pkg_successful(enum ice_ddp_state state)
+{
+ switch (state) {
+ case ICE_DDP_PKG_SUCCESS:
+ case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
+ case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_pkg_buf_alloc
+ * @hw: pointer to the HW structure
+ *
+ * Allocates a package buffer and returns a pointer to the buffer header.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
+{
+ struct ice_buf_build *bld;
+ struct ice_buf_hdr *buf;
+
+ bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
+ if (!bld)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)bld;
+ buf->data_end =
+ cpu_to_le16(offsetof(struct ice_buf_hdr, section_entry));
+ return bld;
+}
+
+static bool ice_is_gtp_u_profile(u16 prof_idx)
+{
+ return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
+ prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
+ prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
+}
+
+static bool ice_is_gtp_c_profile(u16 prof_idx)
+{
+ switch (prof_idx) {
+ case ICE_PROFID_IPV4_GTPC_TEID:
+ case ICE_PROFID_IPV4_GTPC_NO_TEID:
+ case ICE_PROFID_IPV6_GTPC_TEID:
+ case ICE_PROFID_IPV6_GTPC_NO_TEID:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_get_sw_prof_type - determine switch profile type
+ * @hw: pointer to the HW structure
+ * @fv: pointer to the switch field vector
+ * @prof_idx: profile index to check
+ */
+static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw,
+ struct ice_fv *fv, u32 prof_idx)
+{
+ u16 i;
+
+ if (ice_is_gtp_c_profile(prof_idx))
+ return ICE_PROF_TUN_GTPC;
+
+ if (ice_is_gtp_u_profile(prof_idx))
+ return ICE_PROF_TUN_GTPU;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+ /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
+ fv->ew[i].off == ICE_VNI_OFFSET)
+ return ICE_PROF_TUN_UDP;
+
+ /* GRE tunnel will have GRE protocol */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
+ return ICE_PROF_TUN_GRE;
+ }
+
+ return ICE_PROF_NON_TUN;
+}
+
+/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ unsigned long *bm)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ if (req_profs == ICE_PROF_ALL) {
+ bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
+ return;
+ }
+
+ memset(&state, 0, sizeof(state));
+ bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
+ ice_seg = hw->seg;
+ do {
+ enum ice_prof_type prof_type;
+ u32 offset;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ ice_seg = NULL;
+
+ if (fv) {
+ /* Determine field vector type */
+ prof_type = ice_get_sw_prof_type(hw, fv, offset);
+
+ if (req_profs & prof_type)
+ set_bit((u16)offset, bm);
+ }
+ } while (fv);
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @lkups: list of protocol types
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol ID and offset and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile ID information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+int ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
+ unsigned long *bm, struct list_head *fv_list)
+{
+ struct ice_sw_fv_list_entry *fvl;
+ struct ice_sw_fv_list_entry *tmp;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!lkups->n_val_words || !hw->seg)
+ return -EINVAL;
+
+ ice_seg = hw->seg;
+ do {
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* If field vector is not in the bitmap list, then skip this
+ * profile.
+ */
+ if (!test_bit((u16)offset, bm))
+ continue;
+
+ for (i = 0; i < lkups->n_val_words; i++) {
+ int j;
+
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id ==
+ lkups->fv_words[i].prot_id &&
+ fv->ew[j].off == lkups->fv_words[i].off)
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+ break;
+ if (i + 1 == lkups->n_val_words) {
+ fvl = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*fvl), GFP_KERNEL);
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ list_add(&fvl->list_entry, fv_list);
+ break;
+ }
+ }
+ } while (fv);
+ if (list_empty(fv_list)) {
+ dev_warn(ice_hw_to_dev(hw),
+ "Required profiles not found in currently loaded DDP package");
+ return -EIO;
+ }
+
+ return 0;
+
+err:
+ list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
+ list_del(&fvl->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fvl);
+ }
+
+ return -ENOMEM;
+}
+
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ bitmap_zero(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ set_bit(i, hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
+/**
+ * ice_pkg_buf_free
+ * @hw: pointer to the HW structure
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Frees a package buffer
+ */
+void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
+{
+ devm_kfree(ice_hw_to_dev(hw), bld);
+}
+
+/**
+ * ice_pkg_buf_reserve_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @count: the number of sections to reserve
+ *
+ * Reserves one or more section table entries in a package buffer. This routine
+ * can be called multiple times as long as they are made before calling
+ * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
+ * is called once, the number of sections that can be allocated will not be able
+ * to be increased; not using all reserved sections is fine, but this will
+ * result in some wasted space in the buffer.
+ * Note: all package contents must be in Little Endian form.
+ */
+int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
+{
+ struct ice_buf_hdr *buf;
+ u16 section_count;
+ u16 data_end;
+
+ if (!bld)
+ return -EINVAL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* already an active section, can't increase table size */
+ section_count = le16_to_cpu(buf->section_count);
+ if (section_count > 0)
+ return -EIO;
+
+ if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
+ return -EIO;
+ bld->reserved_section_table_entries += count;
+
+ data_end = le16_to_cpu(buf->data_end) +
+ flex_array_size(buf, section_entry, count);
+ buf->data_end = cpu_to_le16(data_end);
+
+ return 0;
+}
+
+/**
+ * ice_pkg_buf_alloc_section
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ *
+ * Reserves memory in the buffer for a section's content and updates the
+ * buffers' status accordingly. This routine returns a pointer to the first
+ * byte of the section start within the buffer, which is used to fill in the
+ * section contents.
+ * Note: all package contents must be in Little Endian form.
+ */
+void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
+{
+ struct ice_buf_hdr *buf;
+ u16 sect_count;
+ u16 data_end;
+
+ if (!bld || !type || !size)
+ return NULL;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+
+ /* check for enough space left in buffer */
+ data_end = le16_to_cpu(buf->data_end);
+
+ /* section start must align on 4 byte boundary */
+ data_end = ALIGN(data_end, 4);
+
+ if ((data_end + size) > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ /* check for more available section table entries */
+ sect_count = le16_to_cpu(buf->section_count);
+ if (sect_count < bld->reserved_section_table_entries) {
+ void *section_ptr = ((u8 *)buf) + data_end;
+
+ buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
+ buf->section_entry[sect_count].size = cpu_to_le16(size);
+ buf->section_entry[sect_count].type = cpu_to_le32(type);
+
+ data_end += size;
+ buf->data_end = cpu_to_le16(data_end);
+
+ buf->section_count = cpu_to_le16(sect_count + 1);
+ return section_ptr;
+ }
+
+ /* no free section table entries */
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_alloc_single_section
+ * @hw: pointer to the HW structure
+ * @type: the section type value
+ * @size: the size of the section to reserve (in bytes)
+ * @section: returns pointer to the section
+ *
+ * Allocates a package buffer with a single section.
+ * Note: all package contents must be in Little Endian form.
+ */
+struct ice_buf_build *ice_pkg_buf_alloc_single_section(struct ice_hw *hw,
+ u32 type, u16 size,
+ void **section)
+{
+ struct ice_buf_build *buf;
+
+ if (!section)
+ return NULL;
+
+ buf = ice_pkg_buf_alloc(hw);
+ if (!buf)
+ return NULL;
+
+ if (ice_pkg_buf_reserve_section(buf, 1))
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ *section = ice_pkg_buf_alloc_section(buf, type, size);
+ if (!*section)
+ goto ice_pkg_buf_alloc_single_section_err;
+
+ return buf;
+
+ice_pkg_buf_alloc_single_section_err:
+ ice_pkg_buf_free(hw, buf);
+ return NULL;
+}
+
+/**
+ * ice_pkg_buf_get_active_sections
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Returns the number of active sections. Before using the package buffer
+ * in an update package command, the caller should make sure that there is at
+ * least one active section - otherwise, the buffer is not legal and should
+ * not be used.
+ * Note: all package contents must be in Little Endian form.
+ */
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
+{
+ struct ice_buf_hdr *buf;
+
+ if (!bld)
+ return 0;
+
+ buf = (struct ice_buf_hdr *)&bld->buf;
+ return le16_to_cpu(buf->section_count);
+}
+
+/**
+ * ice_pkg_buf
+ * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
+ *
+ * Return a pointer to the buffer's header
+ */
+struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
+{
+ if (!bld)
+ return NULL;
+
+ return &bld->buf;
+}
+
+static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
+{
+ switch (aq_err) {
+ case ICE_AQ_RC_ENOSEC:
+ case ICE_AQ_RC_EBADSIG:
+ return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
+ case ICE_AQ_RC_ESVN:
+ return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
+ case ICE_AQ_RC_EBADMAN:
+ case ICE_AQ_RC_EBADBUF:
+ return ICE_DDP_PKG_LOAD_ERROR;
+ default:
+ return ICE_DDP_PKG_ERR;
+ }
+}
+
+/**
+ * ice_acquire_global_cfg_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the global config lock for reading
+ * or writing of the package. When attempting to obtain write access, the
+ * caller must check for the following two return values:
+ *
+ * 0 - Means the caller has acquired the global config lock
+ * and can perform writing of the package.
+ * -EALREADY - Indicates another driver has already written the
+ * package or has found that no update was necessary; in
+ * this case, the caller can just skip performing any
+ * update of the package.
+ */
+static int ice_acquire_global_cfg_lock(struct ice_hw *hw,
+ enum ice_aq_res_access_type access)
+{
+ int status;
+
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+
+ if (!status)
+ mutex_lock(&ice_global_cfg_lock_sw);
+ else if (status == -EALREADY)
+ ice_debug(hw, ICE_DBG_PKG,
+ "Global config lock: No work to do\n");
+
+ return status;
+}
+
+/**
+ * ice_release_global_cfg_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the global config lock.
+ */
+static void ice_release_global_cfg_lock(struct ice_hw *hw)
+{
+ mutex_unlock(&ice_global_cfg_lock_sw);
+ ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware. Metadata buffers are skipped, and the first metadata buffer
+ * found indicates that the rest of the buffers are all metadata buffers.
+ */
+static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw,
+ struct ice_buf *bufs, u32 count)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ struct ice_buf_hdr *bh;
+ enum ice_aq_err err;
+ u32 offset, info, i;
+ int status;
+
+ if (!bufs || !count)
+ return ICE_DDP_PKG_ERR;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)bufs;
+ if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_DDP_PKG_SUCCESS;
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == -EALREADY)
+ return ICE_DDP_PKG_ALREADY_LOADED;
+ return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
+ }
+
+ for (i = 0; i < count; i++) {
+ bool last = ((i + 1) == count);
+
+ if (!last) {
+ /* check next buffer for metadata flag */
+ bh = (struct ice_buf_hdr *)(bufs + i + 1);
+
+ /* A set metadata flag in the next buffer will signal
+ * that the current buffer will be the last buffer
+ * downloaded
+ */
+ if (le16_to_cpu(bh->section_count))
+ if (le32_to_cpu(bh->section_entry[0].type) &
+ ICE_METADATA_BUF)
+ last = true;
+ }
+
+ bh = (struct ice_buf_hdr *)(bufs + i);
+
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
+ &offset, &info, NULL);
+
+ /* Save AQ status from download package */
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Pkg download failed: err %d off %d inf %d\n",
+ status, offset, info);
+ err = hw->adminq.sq_last_status;
+ state = ice_map_aq_err_to_ddp_state(err);
+ break;
+ }
+
+ if (last)
+ break;
+ }
+
+ if (!status) {
+ status = ice_set_vlan_mode(hw);
+ if (status)
+ ice_debug(hw, ICE_DBG_PKG,
+ "Failed to set VLAN mode: err %d\n", status);
+ }
+
+ ice_release_global_cfg_lock(hw);
+
+ return state;
+}
+
+/**
+ * ice_aq_get_pkg_info_list
+ * @hw: pointer to the hardware structure
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Package Info List (0x0C43)
+ */
+static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
+ struct ice_aqc_get_pkg_info_resp *pkg_info,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw,
+ struct ice_seg *ice_seg)
+{
+ struct ice_buf_table *ice_buf_tbl;
+ int status;
+
+ ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_format_ver.major,
+ ice_seg->hdr.seg_format_ver.minor,
+ ice_seg->hdr.seg_format_ver.update,
+ ice_seg->hdr.seg_format_ver.draft);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+ le32_to_cpu(ice_seg->hdr.seg_type),
+ le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
+
+ ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ ice_post_pkg_dwnld_vlan_mode_cfg(hw);
+
+ return status;
+}
+
+/**
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == -EIO) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
+ * ice_aq_update_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package cmd buffer
+ * @buf_size: the size of the package cmd buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Update Package (0x0C42)
+ */
+static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == -EIO) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_update_pkg_no_lock
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ */
+int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ int status = 0;
+ u32 i;
+
+ for (i = 0; i < count; i++) {
+ struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
+ bool last = ((i + 1) == count);
+ u32 offset, info;
+
+ status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
+ last, &offset, &info, NULL);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Update pkg failed: err %d off %d inf %d\n",
+ status, offset, info);
+ break;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ice_update_pkg
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains change lock and updates package.
+ */
+int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ int status;
+
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ice_update_pkg_no_lock(hw, bufs, count);
+
+ ice_release_change_lock(hw);
+
+ return status;
+}
+
+/**
+ * ice_find_seg_in_pkg
+ * @hw: pointer to the hardware structure
+ * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ */
+struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr)
+{
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
+ pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
+ pkg_hdr->pkg_format_ver.update,
+ pkg_hdr->pkg_format_ver.draft);
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
+ struct ice_generic_seg_hdr *seg;
+
+ seg = (struct ice_generic_seg_hdr
+ *)((u8 *)pkg_hdr +
+ le32_to_cpu(pkg_hdr->seg_offset[i]));
+
+ if (le32_to_cpu(seg->seg_type) == seg_type)
+ return seg;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_init_pkg_info
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ *
+ * Saves off the package details into the HW structure.
+ */
+static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
+ struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ if (!pkg_hdr)
+ return ICE_DDP_PKG_ERR;
+
+ seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+ if (seg_hdr) {
+ struct ice_meta_sect *meta;
+ struct ice_pkg_enum state;
+
+ memset(&state, 0, sizeof(state));
+
+ /* Get package information from the Metadata Section */
+ meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
+ ICE_SID_METADATA);
+ if (!meta) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find ice metadata section in package\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ hw->pkg_ver = meta->ver;
+ memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
+ meta->ver.major, meta->ver.minor, meta->ver.update,
+ meta->ver.draft, meta->name);
+
+ hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
+ memcpy(hw->ice_seg_id, seg_hdr->seg_id, sizeof(hw->ice_seg_id));
+
+ ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_format_ver.major,
+ seg_hdr->seg_format_ver.minor,
+ seg_hdr->seg_format_ver.update,
+ seg_hdr->seg_format_ver.draft, seg_hdr->seg_id);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find ice segment in driver package\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_get_pkg_info
+ * @hw: pointer to the hardware structure
+ *
+ * Store details of the package currently loaded in HW into the HW structure.
+ */
+static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
+{
+ enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
+ struct ice_aqc_get_pkg_info_resp *pkg_info;
+ u16 size;
+ u32 i;
+
+ size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
+ pkg_info = kzalloc(size, GFP_KERNEL);
+ if (!pkg_info)
+ return ICE_DDP_PKG_ERR;
+
+ if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
+ state = ICE_DDP_PKG_ERR;
+ goto init_pkg_free_alloc;
+ }
+
+ for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
+#define ICE_PKG_FLAG_COUNT 4
+ char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
+ u8 place = 0;
+
+ if (pkg_info->pkg_info[i].is_active) {
+ flags[place++] = 'A';
+ hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ hw->active_track_id =
+ le32_to_cpu(pkg_info->pkg_info[i].track_id);
+ memcpy(hw->active_pkg_name, pkg_info->pkg_info[i].name,
+ sizeof(pkg_info->pkg_info[i].name));
+ hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
+ }
+ if (pkg_info->pkg_info[i].is_active_at_boot)
+ flags[place++] = 'B';
+ if (pkg_info->pkg_info[i].is_modified)
+ flags[place++] = 'M';
+ if (pkg_info->pkg_info[i].is_in_nvm)
+ flags[place++] = 'N';
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n", i,
+ pkg_info->pkg_info[i].ver.major,
+ pkg_info->pkg_info[i].ver.minor,
+ pkg_info->pkg_info[i].ver.update,
+ pkg_info->pkg_info[i].ver.draft,
+ pkg_info->pkg_info[i].name, flags);
+ }
+
+init_pkg_free_alloc:
+ kfree(pkg_info);
+
+ return state;
+}
+
+/**
+ * ice_chk_pkg_compat
+ * @hw: pointer to the hardware structure
+ * @ospkg: pointer to the package hdr
+ * @seg: pointer to the package segment hdr
+ *
+ * This function checks the package version compatibility with driver and NVM
+ */
+static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
+ struct ice_pkg_hdr *ospkg,
+ struct ice_seg **seg)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg;
+ enum ice_ddp_state state;
+ u16 size;
+ u32 i;
+
+ /* Check package version compatibility */
+ state = ice_chk_pkg_version(&hw->pkg_ver);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
+ return state;
+ }
+
+ /* find ICE segment in given package */
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+ ospkg);
+ if (!*seg) {
+ ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+ return ICE_DDP_PKG_INVALID_FILE;
+ }
+
+ /* Check if FW is compatible with the OS package */
+ size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
+ pkg = kzalloc(size, GFP_KERNEL);
+ if (!pkg)
+ return ICE_DDP_PKG_ERR;
+
+ if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
+ state = ICE_DDP_PKG_LOAD_ERROR;
+ goto fw_ddp_compat_free_alloc;
+ }
+
+ for (i = 0; i < le32_to_cpu(pkg->count); i++) {
+ /* loop till we find the NVM package */
+ if (!pkg->pkg_info[i].is_in_nvm)
+ continue;
+ if ((*seg)->hdr.seg_format_ver.major !=
+ pkg->pkg_info[i].ver.major ||
+ (*seg)->hdr.seg_format_ver.minor >
+ pkg->pkg_info[i].ver.minor) {
+ state = ICE_DDP_PKG_FW_MISMATCH;
+ ice_debug(hw, ICE_DBG_INIT,
+ "OS package is not compatible with NVM.\n");
+ }
+ /* done processing NVM package so break */
+ break;
+ }
+fw_ddp_compat_free_alloc:
+ kfree(pkg);
+ return state;
+}
+
+/**
+ * ice_init_pkg_hints
+ * @hw: pointer to the HW structure
+ * @ice_seg: pointer to the segment of the package scan (non-NULL)
+ *
+ * This function will scan the package and save off relevant information
+ * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
+ * since the first call to ice_enum_labels requires a pointer to an actual
+ * ice_seg structure.
+ */
+static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_pkg_enum state;
+ char *label_name;
+ u16 val;
+ int i;
+
+ memset(&hw->tnl, 0, sizeof(hw->tnl));
+ memset(&state, 0, sizeof(state));
+
+ if (!ice_seg)
+ return;
+
+ label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
+ &val);
+
+ while (label_name) {
+ if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
+ /* check for a tunnel entry */
+ ice_add_tunnel_hint(hw, label_name, val);
+
+ /* check for a dvm mode entry */
+ else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
+ ice_add_dvm_hint(hw, val, true);
+
+ /* check for a svm mode entry */
+ else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
+ ice_add_dvm_hint(hw, val, false);
+
+ label_name = ice_enum_labels(NULL, 0, &state, &val);
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers for tunnels */
+ for (i = 0; i < hw->tnl.count; i++) {
+ ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
+ &hw->tnl.tbl[i].boost_entry);
+ if (hw->tnl.tbl[i].boost_entry) {
+ hw->tnl.tbl[i].valid = true;
+ if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
+ hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
+ }
+ }
+
+ /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
+ for (i = 0; i < hw->dvm_upd.count; i++)
+ ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
+ &hw->dvm_upd.tbl[i].boost_entry);
+}
+
+/**
+ * ice_fill_hw_ptype - fill the enabled PTYPE bit information
+ * @hw: pointer to the HW structure
+ */
+static void ice_fill_hw_ptype(struct ice_hw *hw)
+{
+ struct ice_marker_ptype_tcam_entry *tcam;
+ struct ice_seg *seg = hw->seg;
+ struct ice_pkg_enum state;
+
+ bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
+ if (!seg)
+ return;
+
+ memset(&state, 0, sizeof(state));
+
+ do {
+ tcam = ice_pkg_enum_entry(seg, &state,
+ ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
+ ice_marker_ptype_tcam_handler);
+ if (tcam &&
+ le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
+ le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
+ set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype);
+
+ seg = NULL;
+ } while (tcam);
+}
+
+/**
+ * ice_init_pkg - initialize/download package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function initializes a package. The package contains HW tables
+ * required to do packet processing. First, the function extracts package
+ * information such as version. Then it finds the ice configuration segment
+ * within the package; this function then saves a copy of the segment pointer
+ * within the supplied package buffer. Next, the function will cache any hints
+ * from the package, followed by downloading the package itself. Note, that if
+ * a previous PF driver has already downloaded the package successfully, then
+ * the current driver will not have to download the package again.
+ *
+ * The local package contents will be used to query default behavior and to
+ * update specific sections of the HW's version of the package (e.g. to update
+ * the parse graph to understand new protocols).
+ *
+ * This function stores a pointer to the package buffer memory, and it is
+ * expected that the supplied buffer will not be freed immediately. If the
+ * package buffer needs to be freed, such as when read from a file, use
+ * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
+ * case.
+ */
+enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ bool already_loaded = false;
+ enum ice_ddp_state state;
+ struct ice_pkg_hdr *pkg;
+ struct ice_seg *seg;
+
+ if (!buf || !len)
+ return ICE_DDP_PKG_ERR;
+
+ pkg = (struct ice_pkg_hdr *)buf;
+ state = ice_verify_pkg(pkg, len);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+ state);
+ return state;
+ }
+
+ /* initialize package info */
+ state = ice_init_pkg_info(hw, pkg);
+ if (state)
+ return state;
+
+ /* before downloading the package, check package version for
+ * compatibility with driver
+ */
+ state = ice_chk_pkg_compat(hw, pkg, &seg);
+ if (state)
+ return state;
+
+ /* initialize package hints and then download package */
+ ice_init_pkg_hints(hw, seg);
+ state = ice_download_pkg(hw, seg);
+ if (state == ICE_DDP_PKG_ALREADY_LOADED) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "package previously loaded - no work.\n");
+ already_loaded = true;
+ }
+
+ /* Get information on the package currently loaded in HW, then make sure
+ * the driver is compatible with this version.
+ */
+ if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
+ state = ice_get_pkg_info(hw);
+ if (!state)
+ state = ice_get_ddp_pkg_state(hw, already_loaded);
+ }
+
+ if (ice_is_init_pkg_successful(state)) {
+ hw->seg = seg;
+ /* on successful package download update other required
+ * registers to support the package and fill HW tables
+ * with package content.
+ */
+ ice_init_pkg_regs(hw);
+ ice_fill_blk_tbls(hw);
+ ice_fill_hw_ptype(hw);
+ ice_get_prof_index_max(hw);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n", state);
+ }
+
+ return state;
+}
+
+/**
+ * ice_copy_and_init_pkg - initialize/download a copy of the package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function copies the package buffer, and then calls ice_init_pkg() to
+ * initialize the copied package contents.
+ *
+ * The copying is necessary if the package buffer supplied is constant, or if
+ * the memory may disappear shortly after calling this function.
+ *
+ * If the package buffer resides in the data segment and can be modified, the
+ * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
+ *
+ * However, if the package buffer needs to be copied first, such as when being
+ * read from a file, the caller should use ice_copy_and_init_pkg().
+ *
+ * This function will first copy the package buffer, before calling
+ * ice_init_pkg(). The caller is free to immediately destroy the original
+ * package buffer, as the new copy will be managed by this function and
+ * related routines.
+ */
+enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
+ u32 len)
+{
+ enum ice_ddp_state state;
+ u8 *buf_copy;
+
+ if (!buf || !len)
+ return ICE_DDP_PKG_ERR;
+
+ buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
+
+ state = ice_init_pkg(hw, buf_copy, len);
+ if (!ice_is_init_pkg_successful(state)) {
+ /* Free the copy, since we failed to initialize the package */
+ devm_kfree(ice_hw_to_dev(hw), buf_copy);
+ } else {
+ /* Track the copied pkg so we can free it later */
+ hw->pkg_copy = buf_copy;
+ hw->pkg_size = len;
+ }
+
+ return state;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
new file mode 100644
index 000000000000..37eadb3d27a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2022, Intel Corporation. */
+
+#ifndef _ICE_DDP_H_
+#define _ICE_DDP_H_
+
+#include "ice_type.h"
+
+/* Package minimal version supported */
+#define ICE_PKG_SUPP_VER_MAJ 1
+#define ICE_PKG_SUPP_VER_MNR 3
+
+/* Package format version */
+#define ICE_PKG_FMT_VER_MAJ 1
+#define ICE_PKG_FMT_VER_MNR 0
+#define ICE_PKG_FMT_VER_UPD 0
+#define ICE_PKG_FMT_VER_DFT 0
+
+#define ICE_PKG_CNT 4
+
+#define ICE_FV_OFFSET_INVAL 0x1FF
+
+/* Extraction Sequence (Field Vector) Table */
+struct ice_fv_word {
+ u8 prot_id;
+ u16 off; /* Offset within the protocol header */
+ u8 resvrd;
+} __packed;
+
+#define ICE_MAX_NUM_PROFILES 256
+
+#define ICE_MAX_FV_WORDS 48
+struct ice_fv {
+ struct ice_fv_word ew[ICE_MAX_FV_WORDS];
+};
+
+enum ice_ddp_state {
+ /* Indicates that this call to ice_init_pkg
+ * successfully loaded the requested DDP package
+ */
+ ICE_DDP_PKG_SUCCESS = 0,
+
+ /* Generic error for already loaded errors, it is mapped later to
+ * the more specific one (one of the next 3)
+ */
+ ICE_DDP_PKG_ALREADY_LOADED = -1,
+
+ /* Indicates that a DDP package of the same version has already been
+ * loaded onto the device by a previous call or by another PF
+ */
+ ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
+
+ /* The device has a DDP package that is not supported by the driver */
+ ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
+
+ /* The device has a compatible package
+ * (but different from the request) already loaded
+ */
+ ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
+
+ /* The firmware loaded on the device is not compatible with
+ * the DDP package loaded
+ */
+ ICE_DDP_PKG_FW_MISMATCH = -5,
+
+ /* The DDP package file is invalid */
+ ICE_DDP_PKG_INVALID_FILE = -6,
+
+ /* The version of the DDP package provided is higher than
+ * the driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
+
+ /* The version of the DDP package provided is lower than the
+ * driver supports
+ */
+ ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
+
+ /* The signature of the DDP package file provided is invalid */
+ ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9,
+
+ /* The DDP package file security revision is too low and not
+ * supported by firmware
+ */
+ ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10,
+
+ /* An error occurred in firmware while loading the DDP package */
+ ICE_DDP_PKG_LOAD_ERROR = -11,
+
+ /* Other errors */
+ ICE_DDP_PKG_ERR = -12
+};
+
+/* Package and segment headers and tables */
+struct ice_pkg_hdr {
+ struct ice_pkg_ver pkg_format_ver;
+ __le32 seg_count;
+ __le32 seg_offset[];
+};
+
+/* generic segment */
+struct ice_generic_seg_hdr {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_ICE 0x00000010
+ __le32 seg_type;
+ struct ice_pkg_ver seg_format_ver;
+ __le32 seg_size;
+ char seg_id[ICE_PKG_NAME_SIZE];
+};
+
+/* ice specific segment */
+
+union ice_device_id {
+ struct {
+ __le16 device_id;
+ __le16 vendor_id;
+ } dev_vend_id;
+ __le32 id;
+};
+
+struct ice_device_id_entry {
+ union ice_device_id device;
+ union ice_device_id sub_device;
+};
+
+struct ice_seg {
+ struct ice_generic_seg_hdr hdr;
+ __le32 device_table_count;
+ struct ice_device_id_entry device_table[];
+};
+
+struct ice_nvm_table {
+ __le32 table_count;
+ __le32 vers[];
+};
+
+struct ice_buf {
+#define ICE_PKG_BUF_SIZE 4096
+ u8 buf[ICE_PKG_BUF_SIZE];
+};
+
+struct ice_buf_table {
+ __le32 buf_count;
+ struct ice_buf buf_array[];
+};
+
+struct ice_run_time_cfg_seg {
+ struct ice_generic_seg_hdr hdr;
+ u8 rsvd[8];
+ struct ice_buf_table buf_table;
+};
+
+/* global metadata specific segment */
+struct ice_global_metadata_seg {
+ struct ice_generic_seg_hdr hdr;
+ struct ice_pkg_ver pkg_ver;
+ __le32 rsvd;
+ char pkg_name[ICE_PKG_NAME_SIZE];
+};
+
+#define ICE_MIN_S_OFF 12
+#define ICE_MAX_S_OFF 4095
+#define ICE_MIN_S_SZ 1
+#define ICE_MAX_S_SZ 4084
+
+/* section information */
+struct ice_section_entry {
+ __le32 type;
+ __le16 offset;
+ __le16 size;
+};
+
+#define ICE_MIN_S_COUNT 1
+#define ICE_MAX_S_COUNT 511
+#define ICE_MIN_S_DATA_END 12
+#define ICE_MAX_S_DATA_END 4096
+
+#define ICE_METADATA_BUF 0x80000000
+
+struct ice_buf_hdr {
+ __le16 section_count;
+ __le16 data_end;
+ struct ice_section_entry section_entry[];
+};
+
+#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) \
+ ((ICE_PKG_BUF_SIZE - \
+ struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) / \
+ (ent_sz))
+
+/* ice package section IDs */
+#define ICE_SID_METADATA 1
+#define ICE_SID_XLT0_SW 10
+#define ICE_SID_XLT_KEY_BUILDER_SW 11
+#define ICE_SID_XLT1_SW 12
+#define ICE_SID_XLT2_SW 13
+#define ICE_SID_PROFID_TCAM_SW 14
+#define ICE_SID_PROFID_REDIR_SW 15
+#define ICE_SID_FLD_VEC_SW 16
+#define ICE_SID_CDID_KEY_BUILDER_SW 17
+
+struct ice_meta_sect {
+ struct ice_pkg_ver ver;
+#define ICE_META_SECT_NAME_SIZE 28
+ char name[ICE_META_SECT_NAME_SIZE];
+ __le32 track_id;
+};
+
+#define ICE_SID_CDID_REDIR_SW 18
+
+#define ICE_SID_XLT0_ACL 20
+#define ICE_SID_XLT_KEY_BUILDER_ACL 21
+#define ICE_SID_XLT1_ACL 22
+#define ICE_SID_XLT2_ACL 23
+#define ICE_SID_PROFID_TCAM_ACL 24
+#define ICE_SID_PROFID_REDIR_ACL 25
+#define ICE_SID_FLD_VEC_ACL 26
+#define ICE_SID_CDID_KEY_BUILDER_ACL 27
+#define ICE_SID_CDID_REDIR_ACL 28
+
+#define ICE_SID_XLT0_FD 30
+#define ICE_SID_XLT_KEY_BUILDER_FD 31
+#define ICE_SID_XLT1_FD 32
+#define ICE_SID_XLT2_FD 33
+#define ICE_SID_PROFID_TCAM_FD 34
+#define ICE_SID_PROFID_REDIR_FD 35
+#define ICE_SID_FLD_VEC_FD 36
+#define ICE_SID_CDID_KEY_BUILDER_FD 37
+#define ICE_SID_CDID_REDIR_FD 38
+
+#define ICE_SID_XLT0_RSS 40
+#define ICE_SID_XLT_KEY_BUILDER_RSS 41
+#define ICE_SID_XLT1_RSS 42
+#define ICE_SID_XLT2_RSS 43
+#define ICE_SID_PROFID_TCAM_RSS 44
+#define ICE_SID_PROFID_REDIR_RSS 45
+#define ICE_SID_FLD_VEC_RSS 46
+#define ICE_SID_CDID_KEY_BUILDER_RSS 47
+#define ICE_SID_CDID_REDIR_RSS 48
+
+#define ICE_SID_RXPARSER_MARKER_PTYPE 55
+#define ICE_SID_RXPARSER_BOOST_TCAM 56
+#define ICE_SID_RXPARSER_METADATA_INIT 58
+#define ICE_SID_TXPARSER_BOOST_TCAM 66
+
+#define ICE_SID_XLT0_PE 80
+#define ICE_SID_XLT_KEY_BUILDER_PE 81
+#define ICE_SID_XLT1_PE 82
+#define ICE_SID_XLT2_PE 83
+#define ICE_SID_PROFID_TCAM_PE 84
+#define ICE_SID_PROFID_REDIR_PE 85
+#define ICE_SID_FLD_VEC_PE 86
+#define ICE_SID_CDID_KEY_BUILDER_PE 87
+#define ICE_SID_CDID_REDIR_PE 88
+
+/* Label Metadata section IDs */
+#define ICE_SID_LBL_FIRST 0x80000010
+#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
+/* The following define MUST be updated to reflect the last label section ID */
+#define ICE_SID_LBL_LAST 0x80000038
+
+/* Label ICE runtime configuration section IDs */
+#define ICE_SID_TX_5_LAYER_TOPO 0x10
+
+enum ice_block {
+ ICE_BLK_SW = 0,
+ ICE_BLK_ACL,
+ ICE_BLK_FD,
+ ICE_BLK_RSS,
+ ICE_BLK_PE,
+ ICE_BLK_COUNT
+};
+
+enum ice_sect {
+ ICE_XLT0 = 0,
+ ICE_XLT_KB,
+ ICE_XLT1,
+ ICE_XLT2,
+ ICE_PROF_TCAM,
+ ICE_PROF_REDIR,
+ ICE_VEC_TBL,
+ ICE_CDID_KB,
+ ICE_CDID_REDIR,
+ ICE_SECT_COUNT
+};
+
+/* package labels */
+struct ice_label {
+ __le16 value;
+#define ICE_PKG_LABEL_SIZE 64
+ char name[ICE_PKG_LABEL_SIZE];
+};
+
+struct ice_label_section {
+ __le16 count;
+ struct ice_label label[];
+};
+
+#define ICE_MAX_LABELS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_label_section *)0, \
+ label, 1) - \
+ sizeof(struct ice_label), \
+ sizeof(struct ice_label))
+
+struct ice_sw_fv_section {
+ __le16 count;
+ __le16 base_offset;
+ struct ice_fv fv[];
+};
+
+struct ice_sw_fv_list_entry {
+ struct list_head list_entry;
+ u32 profile_id;
+ struct ice_fv *fv_ptr;
+};
+
+/* The BOOST TCAM stores the match packet header in reverse order, meaning
+ * the fields are reversed; in addition, this means that the normally big endian
+ * fields of the packet are now little endian.
+ */
+struct ice_boost_key_value {
+#define ICE_BOOST_REMAINING_HV_KEY 15
+ u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
+ __le16 hv_dst_port_key;
+ __le16 hv_src_port_key;
+ u8 tcam_search_key;
+} __packed;
+
+struct ice_boost_key {
+ struct ice_boost_key_value key;
+ struct ice_boost_key_value key2;
+};
+
+/* package Boost TCAM entry */
+struct ice_boost_tcam_entry {
+ __le16 addr;
+ __le16 reserved;
+ /* break up the 40 bytes of key into different fields */
+ struct ice_boost_key key;
+ u8 boost_hit_index_group;
+ /* The following contains bitfields which are not on byte boundaries.
+ * These fields are currently unused by driver software.
+ */
+#define ICE_BOOST_BIT_FIELDS 43
+ u8 bit_fields[ICE_BOOST_BIT_FIELDS];
+};
+
+struct ice_boost_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_boost_tcam_entry tcam[];
+};
+
+#define ICE_MAX_BST_TCAMS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_boost_tcam_section *)0, \
+ tcam, 1) - \
+ sizeof(struct ice_boost_tcam_entry), \
+ sizeof(struct ice_boost_tcam_entry))
+
+/* package Marker Ptype TCAM entry */
+struct ice_marker_ptype_tcam_entry {
+#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
+ __le16 addr;
+ __le16 ptype;
+ u8 keys[20];
+};
+
+struct ice_marker_ptype_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_marker_ptype_tcam_entry tcam[];
+};
+
+#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
+ ICE_MAX_ENTRIES_IN_BUF( \
+ struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, \
+ 1) - \
+ sizeof(struct ice_marker_ptype_tcam_entry), \
+ sizeof(struct ice_marker_ptype_tcam_entry))
+
+struct ice_xlt1_section {
+ __le16 count;
+ __le16 offset;
+ u8 value[];
+};
+
+struct ice_xlt2_section {
+ __le16 count;
+ __le16 offset;
+ __le16 value[];
+};
+
+struct ice_prof_redir_section {
+ __le16 count;
+ __le16 offset;
+ u8 redir_value[];
+};
+
+/* package buffer building */
+
+struct ice_buf_build {
+ struct ice_buf buf;
+ u16 reserved_section_table_entries;
+};
+
+struct ice_pkg_enum {
+ struct ice_buf_table *buf_table;
+ u32 buf_idx;
+
+ u32 type;
+ struct ice_buf_hdr *buf;
+ u32 sect_idx;
+ void *sect;
+ u32 sect_type;
+
+ u32 entry_idx;
+ void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
+};
+
+int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd);
+int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd);
+
+void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
+
+enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len);
+
+struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
+
+struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr);
+
+int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
+
+int ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
+u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
+void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type);
+
+struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf);
+
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 8286e47b4bae..05f216af8c81 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -371,10 +371,7 @@ out_free_ctx:
/**
* ice_devlink_reload_empr_start - Start EMP reset to activate new firmware
- * @devlink: pointer to the devlink instance to reload
- * @netns_change: if true, the network namespace is changing
- * @action: the action to perform. Must be DEVLINK_RELOAD_ACTION_FW_ACTIVATE
- * @limit: limits on what reload should do, such as not resetting
+ * @pf: pointer to the pf instance
* @extack: netlink extended ACK structure
*
* Allow user to activate new Embedded Management Processor firmware by
@@ -387,12 +384,9 @@ out_free_ctx:
* any source.
*/
static int
-ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
- enum devlink_reload_action action,
- enum devlink_reload_limit limit,
+ice_devlink_reload_empr_start(struct ice_pf *pf,
struct netlink_ext_ack *extack)
{
- struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
u8 pending;
@@ -431,11 +425,51 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
}
/**
+ * ice_devlink_reload_down - prepare for reload
+ * @devlink: pointer to the devlink instance to reload
+ * @netns_change: if true, the network namespace is changing
+ * @action: the action to perform
+ * @limit: limits on what reload should do, such as not resetting
+ * @extack: netlink extended ACK structure
+ */
+static int
+ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Go to legacy mode before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ if (ice_is_adq_active(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Turn off ADQ before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ if (ice_has_vfs(pf)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Remove all VFs before doing reinit\n");
+ return -EOPNOTSUPP;
+ }
+ ice_unload(pf);
+ return 0;
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ return ice_devlink_reload_empr_start(pf, extack);
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* ice_devlink_reload_empr_finish - Wait for EMP reset to finish
- * @devlink: pointer to the devlink instance reloading
- * @action: the action requested
- * @limit: limits imposed by userspace, such as not resetting
- * @actions_performed: on return, indicate what actions actually performed
+ * @pf: pointer to the pf instance
* @extack: netlink extended ACK structure
*
* Wait for driver to finish rebuilding after EMP reset is completed. This
@@ -443,17 +477,11 @@ ice_devlink_reload_empr_start(struct devlink *devlink, bool netns_change,
* for the driver's rebuild to complete.
*/
static int
-ice_devlink_reload_empr_finish(struct devlink *devlink,
- enum devlink_reload_action action,
- enum devlink_reload_limit limit,
- u32 *actions_performed,
+ice_devlink_reload_empr_finish(struct ice_pf *pf,
struct netlink_ext_ack *extack)
{
- struct ice_pf *pf = devlink_priv(devlink);
int err;
- *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
-
err = ice_wait_for_reset(pf, 60 * HZ);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Device still resetting after 1 minute");
@@ -899,7 +927,7 @@ static int ice_set_object_tx_priority(struct ice_port_info *pi, struct ice_sched
{
int status;
- if (node->tx_priority >= 8) {
+ if (priority >= 8) {
NL_SET_ERR_MSG_MOD(extack, "Priority should be less than 8");
return -EINVAL;
}
@@ -929,7 +957,7 @@ static int ice_set_object_tx_weight(struct ice_port_info *pi, struct ice_sched_n
{
int status;
- if (node->tx_weight > 200 || node->tx_weight < 1) {
+ if (weight > 200 || weight < 1) {
NL_SET_ERR_MSG_MOD(extack, "Weight must be between 1 and 200");
return -EINVAL;
}
@@ -1192,12 +1220,43 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
return status;
}
+/**
+ * ice_devlink_reload_up - do reload up after reinit
+ * @devlink: pointer to the devlink instance reloading
+ * @action: the action requested
+ * @limit: limits imposed by userspace, such as not resetting
+ * @actions_performed: on return, indicate what actions actually performed
+ * @extack: netlink extended ACK structure
+ */
+static int
+ice_devlink_reload_up(struct devlink *devlink,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+ return ice_load(pf);
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
+ return ice_devlink_reload_empr_finish(pf, extack);
+ default:
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
- .reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
/* The ice driver currently does not support driver reinit */
- .reload_down = ice_devlink_reload_empr_start,
- .reload_up = ice_devlink_reload_empr_finish,
+ .reload_down = ice_devlink_reload_down,
+ .reload_up = ice_devlink_reload_up,
.port_split = ice_devlink_port_split,
.port_unsplit = ice_devlink_port_unsplit,
.eswitch_mode_get = ice_eswitch_mode_get,
@@ -1376,7 +1435,6 @@ void ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devlink_register(devlink);
}
@@ -1411,25 +1469,9 @@ ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
- union devlink_param_value value;
- int err;
- err = devlink_params_register(devlink, ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
- if (err)
- return err;
-
- value.vbool = false;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_IWARP,
- value);
-
- value.vbool = test_bit(ICE_FLAG_RDMA_ENA, pf->flags) ? true : false;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- value);
-
- return 0;
+ return devlink_params_register(devlink, ice_devlink_params,
+ ARRAY_SIZE(ice_devlink_params));
}
void ice_devlink_unregister_params(struct ice_pf *pf)
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index f9f15acae90a..f6dd3f8fd936 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -71,17 +71,17 @@ void ice_eswitch_replay_vf_mac_rule(struct ice_vf *vf)
if (!ice_is_switchdev_running(vf->pf))
return;
- if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
+ if (is_valid_ether_addr(vf->hw_lan_addr)) {
err = ice_eswitch_add_vf_mac_rule(vf->pf, vf,
- vf->hw_lan_addr.addr);
+ vf->hw_lan_addr);
if (err) {
dev_err(ice_pf_to_dev(vf->pf), "Failed to add MAC %pM for VF %d\n, error %d\n",
- vf->hw_lan_addr.addr, vf->vf_id, err);
+ vf->hw_lan_addr, vf->vf_id, err);
return;
}
vf->num_mac++;
- ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+ ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
}
}
@@ -237,7 +237,7 @@ ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
netif_napi_del(&vf->repr->q_vector->napi);
@@ -265,14 +265,14 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
GFP_KERNEL);
if (!vf->repr->dst) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
goto err;
}
if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
@@ -281,7 +281,7 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(vsi)) {
ice_fltr_add_mac_and_broadcast(vsi,
- vf->hw_lan_addr.addr,
+ vf->hw_lan_addr,
ICE_FWD_TO_VSI);
metadata_dst_free(vf->repr->dst);
vf->repr->dst = NULL;
@@ -338,7 +338,7 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
- ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor",
vsi->vf->vf_id);
}
@@ -425,7 +425,13 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
static struct ice_vsi *
ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_SWITCHDEV_CTRL;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 4191994d8f3a..b360bd8f1599 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -656,7 +656,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_setup_rx_ring;
- status = ice_vsi_cfg(vsi);
+ status = ice_vsi_cfg_lan(vsi);
if (status)
goto err_setup_rx_ring;
@@ -664,7 +664,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_start_rx_ring;
- return status;
+ return 0;
err_start_rx_ring:
ice_vsi_free_rx_rings(vsi);
@@ -1950,8 +1950,7 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
ICE_PHY_TYPE_LOW_100G_CAUI4 |
ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC |
ICE_PHY_TYPE_LOW_100G_AUI4 |
- ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 |
- ICE_PHY_TYPE_LOW_100GBASE_CP2;
+ ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4;
phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |
ICE_PHY_TYPE_HIGH_100G_CAUI2 |
ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC |
@@ -1964,15 +1963,27 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
100000baseCR4_Full);
}
- phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 |
- ICE_PHY_TYPE_LOW_100GBASE_SR2;
- if (phy_types_low & phy_type_mask_lo) {
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseCR2_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseCR2_Full);
+ }
+
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseSR4_Full);
}
+ if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseSR2_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseSR2_Full);
+ }
+
phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 |
ICE_PHY_TYPE_LOW_100GBASE_DR;
if (phy_types_low & phy_type_mask_lo) {
@@ -1984,14 +1995,20 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 |
ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4;
- phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4;
- if (phy_types_low & phy_type_mask_lo ||
- phy_types_high & phy_type_mask_hi) {
+ if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full);
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseKR4_Full);
}
+
+ if (phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
+ ethtool_link_ksettings_add_link_mode(ks, supported,
+ 100000baseKR2_Full);
+ ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
+ 100000baseKR2_Full);
+ }
+
}
#define TEST_SET_BITS_TIMEOUT 50
@@ -2242,17 +2259,15 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
100baseT_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_100MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 1000baseX_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 1000baseX_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
1000baseT_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
1000baseKX_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 2500baseT_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 2500baseT_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
2500baseX_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
@@ -2261,9 +2276,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseT_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseKR_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 10000baseKR_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseSR_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseLR_Full))
@@ -2287,9 +2301,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseCR2_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
- 50000baseKR2_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 50000baseKR2_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseSR2_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
@@ -2299,7 +2312,13 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseLR4_ER4_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseKR4_Full))
+ 100000baseKR4_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseCR2_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseSR2_Full) ||
+ ethtool_link_ksettings_test_link_mode(ks, advertising,
+ 100000baseKR2_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_100GB;
return adv_link_speed;
@@ -3027,8 +3046,6 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
/* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt;
- xdp_rings[i].next_dd = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
- xdp_rings[i].next_rs = ICE_RING_QUARTER(&xdp_rings[i]) - 1;
xdp_rings[i].desc = NULL;
xdp_rings[i].tx_buf = NULL;
err = ice_setup_tx_ring(&xdp_rings[i]);
@@ -3073,7 +3090,7 @@ process_rx:
/* allocate Rx buffers */
err = ice_alloc_rx_bufs(&rx_rings[i],
- ICE_DESC_UNUSED(&rx_rings[i]));
+ ICE_RX_DESC_UNUSED(&rx_rings[i]));
rx_unwind:
if (err) {
while (i) {
@@ -3641,7 +3658,9 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int new_rx = 0, new_tx = 0;
+ bool locked = false;
u32 curr_combined;
+ int ret = 0;
/* do not support changing channels in Safe Mode */
if (ice_is_safe_mode(pf)) {
@@ -3705,15 +3724,33 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL;
}
- ice_vsi_recfg_qs(vsi, new_rx, new_tx);
+ if (pf->adev) {
+ mutex_lock(&pf->adev_mutex);
+ device_lock(&pf->adev->dev);
+ locked = true;
+ if (pf->adev->dev.driver) {
+ netdev_err(dev, "Cannot change channels when RDMA is active\n");
+ ret = -EBUSY;
+ goto adev_unlock;
+ }
+ }
+
+ ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked);
- if (!netif_is_rxfh_configured(dev))
- return ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ if (!netif_is_rxfh_configured(dev)) {
+ ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx);
+ goto adev_unlock;
+ }
/* Update rss_size due to change in Rx queues */
vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
- return 0;
+adev_unlock:
+ if (locked) {
+ device_unlock(&pf->adev->dev);
+ mutex_unlock(&pf->adev_mutex);
+ }
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 4b3bb19e1d06..5ce413965930 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -6,23 +6,6 @@
#include "ice_flow.h"
#include "ice.h"
-/* For supporting double VLAN mode, it is necessary to enable or disable certain
- * boost tcam entries. The metadata labels names that match the following
- * prefixes will be saved to allow enabling double VLAN mode.
- */
-#define ICE_DVM_PRE "BOOST_MAC_VLAN_DVM" /* enable these entries */
-#define ICE_SVM_PRE "BOOST_MAC_VLAN_SVM" /* disable these entries */
-
-/* To support tunneling entries by PF, the package will append the PF number to
- * the label; for example TNL_VXLAN_PF0, TNL_VXLAN_PF1, TNL_VXLAN_PF2, etc.
- */
-#define ICE_TNL_PRE "TNL_"
-static const struct ice_tunnel_type_scan tnls[] = {
- { TNL_VXLAN, "TNL_VXLAN_PF" },
- { TNL_GENEVE, "TNL_GENEVE_PF" },
- { TNL_LAST, "" }
-};
-
static const u32 ice_sect_lkup[ICE_BLK_COUNT][ICE_SECT_COUNT] = {
/* SWITCH */
{
@@ -104,225 +87,6 @@ static u32 ice_sect_id(enum ice_block blk, enum ice_sect sect)
}
/**
- * ice_pkg_val_buf
- * @buf: pointer to the ice buffer
- *
- * This helper function validates a buffer's header.
- */
-static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
-{
- struct ice_buf_hdr *hdr;
- u16 section_count;
- u16 data_end;
-
- hdr = (struct ice_buf_hdr *)buf->buf;
- /* verify data */
- section_count = le16_to_cpu(hdr->section_count);
- if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
- return NULL;
-
- data_end = le16_to_cpu(hdr->data_end);
- if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
- return NULL;
-
- return hdr;
-}
-
-/**
- * ice_find_buf_table
- * @ice_seg: pointer to the ice segment
- *
- * Returns the address of the buffer table within the ice segment.
- */
-static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
-{
- struct ice_nvm_table *nvms;
-
- nvms = (struct ice_nvm_table *)
- (ice_seg->device_table +
- le32_to_cpu(ice_seg->device_table_count));
-
- return (__force struct ice_buf_table *)
- (nvms->vers + le32_to_cpu(nvms->table_count));
-}
-
-/**
- * ice_pkg_enum_buf
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- *
- * This function will enumerate all the buffers in the ice segment. The first
- * call is made with the ice_seg parameter non-NULL; on subsequent calls,
- * ice_seg is set to NULL which continues the enumeration. When the function
- * returns a NULL pointer, then the end of the buffers has been reached, or an
- * unexpected value has been detected (for example an invalid section count or
- * an invalid buffer end value).
- */
-static struct ice_buf_hdr *
-ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
-{
- if (ice_seg) {
- state->buf_table = ice_find_buf_table(ice_seg);
- if (!state->buf_table)
- return NULL;
-
- state->buf_idx = 0;
- return ice_pkg_val_buf(state->buf_table->buf_array);
- }
-
- if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
- return ice_pkg_val_buf(state->buf_table->buf_array +
- state->buf_idx);
- else
- return NULL;
-}
-
-/**
- * ice_pkg_advance_sect
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- *
- * This helper function will advance the section within the ice segment,
- * also advancing the buffer if needed.
- */
-static bool
-ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
-{
- if (!ice_seg && !state->buf)
- return false;
-
- if (!ice_seg && state->buf)
- if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
- return true;
-
- state->buf = ice_pkg_enum_buf(ice_seg, state);
- if (!state->buf)
- return false;
-
- /* start of new buffer, reset section index */
- state->sect_idx = 0;
- return true;
-}
-
-/**
- * ice_pkg_enum_section
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- * @sect_type: section type to enumerate
- *
- * This function will enumerate all the sections of a particular type in the
- * ice segment. The first call is made with the ice_seg parameter non-NULL;
- * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
- * When the function returns a NULL pointer, then the end of the matching
- * sections has been reached.
- */
-static void *
-ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type)
-{
- u16 offset, size;
-
- if (ice_seg)
- state->type = sect_type;
-
- if (!ice_pkg_advance_sect(ice_seg, state))
- return NULL;
-
- /* scan for next matching section */
- while (state->buf->section_entry[state->sect_idx].type !=
- cpu_to_le32(state->type))
- if (!ice_pkg_advance_sect(NULL, state))
- return NULL;
-
- /* validate section */
- offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
- if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
- return NULL;
-
- size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
- if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
- return NULL;
-
- /* make sure the section fits in the buffer */
- if (offset + size > ICE_PKG_BUF_SIZE)
- return NULL;
-
- state->sect_type =
- le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
-
- /* calc pointer to this section */
- state->sect = ((u8 *)state->buf) +
- le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
-
- return state->sect;
-}
-
-/**
- * ice_pkg_enum_entry
- * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
- * @state: pointer to the enum state
- * @sect_type: section type to enumerate
- * @offset: pointer to variable that receives the offset in the table (optional)
- * @handler: function that handles access to the entries into the section type
- *
- * This function will enumerate all the entries in particular section type in
- * the ice segment. The first call is made with the ice_seg parameter non-NULL;
- * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
- * When the function returns a NULL pointer, then the end of the entries has
- * been reached.
- *
- * Since each section may have a different header and entry size, the handler
- * function is needed to determine the number and location entries in each
- * section.
- *
- * The offset parameter is optional, but should be used for sections that
- * contain an offset for each section table. For such cases, the section handler
- * function must return the appropriate offset + index to give the absolution
- * offset for each entry. For example, if the base for a section's header
- * indicates a base offset of 10, and the index for the entry is 2, then
- * section handler function should set the offset to 10 + 2 = 12.
- */
-static void *
-ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
- u32 sect_type, u32 *offset,
- void *(*handler)(u32 sect_type, void *section,
- u32 index, u32 *offset))
-{
- void *entry;
-
- if (ice_seg) {
- if (!handler)
- return NULL;
-
- if (!ice_pkg_enum_section(ice_seg, state, sect_type))
- return NULL;
-
- state->entry_idx = 0;
- state->handler = handler;
- } else {
- state->entry_idx++;
- }
-
- if (!state->handler)
- return NULL;
-
- /* get entry */
- entry = state->handler(state->sect_type, state->sect, state->entry_idx,
- offset);
- if (!entry) {
- /* end of a section, look for another section of this type */
- if (!ice_pkg_enum_section(NULL, state, 0))
- return NULL;
-
- state->entry_idx = 0;
- entry = state->handler(state->sect_type, state->sect,
- state->entry_idx, offset);
- }
-
- return entry;
-}
-
-/**
* ice_hw_ptype_ena - check if the PTYPE is enabled or not
* @hw: pointer to the HW structure
* @ptype: the hardware PTYPE
@@ -333,312 +97,6 @@ bool ice_hw_ptype_ena(struct ice_hw *hw, u16 ptype)
test_bit(ptype, hw->hw_ptype);
}
-/**
- * ice_marker_ptype_tcam_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the Marker PType TCAM entry to be returned
- * @offset: pointer to receive absolute offset, always 0 for ptype TCAM sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual Marker PType TCAM entries.
- */
-static void *
-ice_marker_ptype_tcam_handler(u32 sect_type, void *section, u32 index,
- u32 *offset)
-{
- struct ice_marker_ptype_tcam_section *marker_ptype;
-
- if (sect_type != ICE_SID_RXPARSER_MARKER_PTYPE)
- return NULL;
-
- if (index > ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- marker_ptype = section;
- if (index >= le16_to_cpu(marker_ptype->count))
- return NULL;
-
- return marker_ptype->tcam + index;
-}
-
-/**
- * ice_fill_hw_ptype - fill the enabled PTYPE bit information
- * @hw: pointer to the HW structure
- */
-static void ice_fill_hw_ptype(struct ice_hw *hw)
-{
- struct ice_marker_ptype_tcam_entry *tcam;
- struct ice_seg *seg = hw->seg;
- struct ice_pkg_enum state;
-
- bitmap_zero(hw->hw_ptype, ICE_FLOW_PTYPE_MAX);
- if (!seg)
- return;
-
- memset(&state, 0, sizeof(state));
-
- do {
- tcam = ice_pkg_enum_entry(seg, &state,
- ICE_SID_RXPARSER_MARKER_PTYPE, NULL,
- ice_marker_ptype_tcam_handler);
- if (tcam &&
- le16_to_cpu(tcam->addr) < ICE_MARKER_PTYPE_TCAM_ADDR_MAX &&
- le16_to_cpu(tcam->ptype) < ICE_FLOW_PTYPE_MAX)
- set_bit(le16_to_cpu(tcam->ptype), hw->hw_ptype);
-
- seg = NULL;
- } while (tcam);
-}
-
-/**
- * ice_boost_tcam_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the boost TCAM entry to be returned
- * @offset: pointer to receive absolute offset, always 0 for boost TCAM sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual boost TCAM entries.
- */
-static void *
-ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_boost_tcam_section *boost;
-
- if (!section)
- return NULL;
-
- if (sect_type != ICE_SID_RXPARSER_BOOST_TCAM)
- return NULL;
-
- /* cppcheck-suppress nullPointer */
- if (index > ICE_MAX_BST_TCAMS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- boost = section;
- if (index >= le16_to_cpu(boost->count))
- return NULL;
-
- return boost->tcam + index;
-}
-
-/**
- * ice_find_boost_entry
- * @ice_seg: pointer to the ice segment (non-NULL)
- * @addr: Boost TCAM address of entry to search for
- * @entry: returns pointer to the entry
- *
- * Finds a particular Boost TCAM entry and returns a pointer to that entry
- * if it is found. The ice_seg parameter must not be NULL since the first call
- * to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
- */
-static int
-ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
- struct ice_boost_tcam_entry **entry)
-{
- struct ice_boost_tcam_entry *tcam;
- struct ice_pkg_enum state;
-
- memset(&state, 0, sizeof(state));
-
- if (!ice_seg)
- return -EINVAL;
-
- do {
- tcam = ice_pkg_enum_entry(ice_seg, &state,
- ICE_SID_RXPARSER_BOOST_TCAM, NULL,
- ice_boost_tcam_handler);
- if (tcam && le16_to_cpu(tcam->addr) == addr) {
- *entry = tcam;
- return 0;
- }
-
- ice_seg = NULL;
- } while (tcam);
-
- *entry = NULL;
- return -EIO;
-}
-
-/**
- * ice_label_enum_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the label entry to be returned
- * @offset: pointer to receive absolute offset, always zero for label sections
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * Handles enumeration of individual label entries.
- */
-static void *
-ice_label_enum_handler(u32 __always_unused sect_type, void *section, u32 index,
- u32 *offset)
-{
- struct ice_label_section *labels;
-
- if (!section)
- return NULL;
-
- /* cppcheck-suppress nullPointer */
- if (index > ICE_MAX_LABELS_IN_BUF)
- return NULL;
-
- if (offset)
- *offset = 0;
-
- labels = section;
- if (index >= le16_to_cpu(labels->count))
- return NULL;
-
- return labels->label + index;
-}
-
-/**
- * ice_enum_labels
- * @ice_seg: pointer to the ice segment (NULL on subsequent calls)
- * @type: the section type that will contain the label (0 on subsequent calls)
- * @state: ice_pkg_enum structure that will hold the state of the enumeration
- * @value: pointer to a value that will return the label's value if found
- *
- * Enumerates a list of labels in the package. The caller will call
- * ice_enum_labels(ice_seg, type, ...) to start the enumeration, then call
- * ice_enum_labels(NULL, 0, ...) to continue. When the function returns a NULL
- * the end of the list has been reached.
- */
-static char *
-ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
- u16 *value)
-{
- struct ice_label *label;
-
- /* Check for valid label section on first call */
- if (type && !(type >= ICE_SID_LBL_FIRST && type <= ICE_SID_LBL_LAST))
- return NULL;
-
- label = ice_pkg_enum_entry(ice_seg, state, type, NULL,
- ice_label_enum_handler);
- if (!label)
- return NULL;
-
- *value = le16_to_cpu(label->value);
- return label->name;
-}
-
-/**
- * ice_add_tunnel_hint
- * @hw: pointer to the HW structure
- * @label_name: label text
- * @val: value of the tunnel port boost entry
- */
-static void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
-{
- if (hw->tnl.count < ICE_TUNNEL_MAX_ENTRIES) {
- u16 i;
-
- for (i = 0; tnls[i].type != TNL_LAST; i++) {
- size_t len = strlen(tnls[i].label_prefix);
-
- /* Look for matching label start, before continuing */
- if (strncmp(label_name, tnls[i].label_prefix, len))
- continue;
-
- /* Make sure this label matches our PF. Note that the PF
- * character ('0' - '7') will be located where our
- * prefix string's null terminator is located.
- */
- if ((label_name[len] - '0') == hw->pf_id) {
- hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
- hw->tnl.tbl[hw->tnl.count].valid = false;
- hw->tnl.tbl[hw->tnl.count].boost_addr = val;
- hw->tnl.tbl[hw->tnl.count].port = 0;
- hw->tnl.count++;
- break;
- }
- }
- }
-}
-
-/**
- * ice_add_dvm_hint
- * @hw: pointer to the HW structure
- * @val: value of the boost entry
- * @enable: true if entry needs to be enabled, or false if needs to be disabled
- */
-static void ice_add_dvm_hint(struct ice_hw *hw, u16 val, bool enable)
-{
- if (hw->dvm_upd.count < ICE_DVM_MAX_ENTRIES) {
- hw->dvm_upd.tbl[hw->dvm_upd.count].boost_addr = val;
- hw->dvm_upd.tbl[hw->dvm_upd.count].enable = enable;
- hw->dvm_upd.count++;
- }
-}
-
-/**
- * ice_init_pkg_hints
- * @hw: pointer to the HW structure
- * @ice_seg: pointer to the segment of the package scan (non-NULL)
- *
- * This function will scan the package and save off relevant information
- * (hints or metadata) for driver use. The ice_seg parameter must not be NULL
- * since the first call to ice_enum_labels requires a pointer to an actual
- * ice_seg structure.
- */
-static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
-{
- struct ice_pkg_enum state;
- char *label_name;
- u16 val;
- int i;
-
- memset(&hw->tnl, 0, sizeof(hw->tnl));
- memset(&state, 0, sizeof(state));
-
- if (!ice_seg)
- return;
-
- label_name = ice_enum_labels(ice_seg, ICE_SID_LBL_RXPARSER_TMEM, &state,
- &val);
-
- while (label_name) {
- if (!strncmp(label_name, ICE_TNL_PRE, strlen(ICE_TNL_PRE)))
- /* check for a tunnel entry */
- ice_add_tunnel_hint(hw, label_name, val);
-
- /* check for a dvm mode entry */
- else if (!strncmp(label_name, ICE_DVM_PRE, strlen(ICE_DVM_PRE)))
- ice_add_dvm_hint(hw, val, true);
-
- /* check for a svm mode entry */
- else if (!strncmp(label_name, ICE_SVM_PRE, strlen(ICE_SVM_PRE)))
- ice_add_dvm_hint(hw, val, false);
-
- label_name = ice_enum_labels(NULL, 0, &state, &val);
- }
-
- /* Cache the appropriate boost TCAM entry pointers for tunnels */
- for (i = 0; i < hw->tnl.count; i++) {
- ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
- &hw->tnl.tbl[i].boost_entry);
- if (hw->tnl.tbl[i].boost_entry) {
- hw->tnl.tbl[i].valid = true;
- if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
- hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
- }
- }
-
- /* Cache the appropriate boost TCAM entry pointers for DVM and SVM */
- for (i = 0; i < hw->dvm_upd.count; i++)
- ice_find_boost_entry(ice_seg, hw->dvm_upd.tbl[i].boost_addr,
- &hw->dvm_upd.tbl[i].boost_entry);
-}
-
/* Key creation */
#define ICE_DC_KEY 0x1 /* don't care */
@@ -810,51 +268,6 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
}
/**
- * ice_acquire_global_cfg_lock
- * @hw: pointer to the HW structure
- * @access: access type (read or write)
- *
- * This function will request ownership of the global config lock for reading
- * or writing of the package. When attempting to obtain write access, the
- * caller must check for the following two return values:
- *
- * 0 - Means the caller has acquired the global config lock
- * and can perform writing of the package.
- * -EALREADY - Indicates another driver has already written the
- * package or has found that no update was necessary; in
- * this case, the caller can just skip performing any
- * update of the package.
- */
-static int
-ice_acquire_global_cfg_lock(struct ice_hw *hw,
- enum ice_aq_res_access_type access)
-{
- int status;
-
- status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
- ICE_GLOBAL_CFG_LOCK_TIMEOUT);
-
- if (!status)
- mutex_lock(&ice_global_cfg_lock_sw);
- else if (status == -EALREADY)
- ice_debug(hw, ICE_DBG_PKG, "Global config lock: No work to do\n");
-
- return status;
-}
-
-/**
- * ice_release_global_cfg_lock
- * @hw: pointer to the HW structure
- *
- * This function will release the global config lock.
- */
-static void ice_release_global_cfg_lock(struct ice_hw *hw)
-{
- mutex_unlock(&ice_global_cfg_lock_sw);
- ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
-}
-
-/**
* ice_acquire_change_lock
* @hw: pointer to the HW structure
* @access: access type (read or write)
@@ -880,1325 +293,6 @@ void ice_release_change_lock(struct ice_hw *hw)
}
/**
- * ice_aq_download_pkg
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer to transfer
- * @buf_size: the size of the package buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
- *
- * Download Package (0x0C40)
- */
-static int
-ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, bool last_buf, u32 *error_offset,
- u32 *error_info, struct ice_sq_cd *cd)
-{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- int status;
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
-
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == -EIO) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
-
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = le32_to_cpu(resp->error_offset);
- if (error_info)
- *error_info = le32_to_cpu(resp->error_info);
- }
-
- return status;
-}
-
-/**
- * ice_aq_upload_section
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer which will receive the section
- * @buf_size: the size of the package buffer
- * @cd: pointer to command details structure or NULL
- *
- * Upload Section (0x0C41)
- */
-int
-ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, struct ice_sq_cd *cd)
-{
- struct ice_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
-}
-
-/**
- * ice_aq_update_pkg
- * @hw: pointer to the hardware structure
- * @pkg_buf: the package cmd buffer
- * @buf_size: the size of the package cmd buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
- *
- * Update Package (0x0C42)
- */
-static int
-ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
- bool last_buf, u32 *error_offset, u32 *error_info,
- struct ice_sq_cd *cd)
-{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- int status;
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
-
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == -EIO) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
-
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = le32_to_cpu(resp->error_offset);
- if (error_info)
- *error_info = le32_to_cpu(resp->error_info);
- }
-
- return status;
-}
-
-/**
- * ice_find_seg_in_pkg
- * @hw: pointer to the hardware structure
- * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
- * @pkg_hdr: pointer to the package header to be searched
- *
- * This function searches a package file for a particular segment type. On
- * success it returns a pointer to the segment header, otherwise it will
- * return NULL.
- */
-static struct ice_generic_seg_hdr *
-ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
- struct ice_pkg_hdr *pkg_hdr)
-{
- u32 i;
-
- ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
- pkg_hdr->pkg_format_ver.major, pkg_hdr->pkg_format_ver.minor,
- pkg_hdr->pkg_format_ver.update,
- pkg_hdr->pkg_format_ver.draft);
-
- /* Search all package segments for the requested segment type */
- for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
- struct ice_generic_seg_hdr *seg;
-
- seg = (struct ice_generic_seg_hdr *)
- ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
-
- if (le32_to_cpu(seg->seg_type) == seg_type)
- return seg;
- }
-
- return NULL;
-}
-
-/**
- * ice_update_pkg_no_lock
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- */
-static int
-ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- int status = 0;
- u32 i;
-
- for (i = 0; i < count; i++) {
- struct ice_buf_hdr *bh = (struct ice_buf_hdr *)(bufs + i);
- bool last = ((i + 1) == count);
- u32 offset, info;
-
- status = ice_aq_update_pkg(hw, bh, le16_to_cpu(bh->data_end),
- last, &offset, &info, NULL);
-
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Update pkg failed: err %d off %d inf %d\n",
- status, offset, info);
- break;
- }
- }
-
- return status;
-}
-
-/**
- * ice_update_pkg
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- *
- * Obtains change lock and updates package.
- */
-static int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- int status;
-
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- return status;
-
- status = ice_update_pkg_no_lock(hw, bufs, count);
-
- ice_release_change_lock(hw);
-
- return status;
-}
-
-static enum ice_ddp_state ice_map_aq_err_to_ddp_state(enum ice_aq_err aq_err)
-{
- switch (aq_err) {
- case ICE_AQ_RC_ENOSEC:
- case ICE_AQ_RC_EBADSIG:
- return ICE_DDP_PKG_FILE_SIGNATURE_INVALID;
- case ICE_AQ_RC_ESVN:
- return ICE_DDP_PKG_FILE_REVISION_TOO_LOW;
- case ICE_AQ_RC_EBADMAN:
- case ICE_AQ_RC_EBADBUF:
- return ICE_DDP_PKG_LOAD_ERROR;
- default:
- return ICE_DDP_PKG_ERR;
- }
-}
-
-/**
- * ice_dwnld_cfg_bufs
- * @hw: pointer to the hardware structure
- * @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
- *
- * Obtains global config lock and downloads the package configuration buffers
- * to the firmware. Metadata buffers are skipped, and the first metadata buffer
- * found indicates that the rest of the buffers are all metadata buffers.
- */
-static enum ice_ddp_state
-ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
-{
- enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- struct ice_buf_hdr *bh;
- enum ice_aq_err err;
- u32 offset, info, i;
- int status;
-
- if (!bufs || !count)
- return ICE_DDP_PKG_ERR;
-
- /* If the first buffer's first section has its metadata bit set
- * then there are no buffers to be downloaded, and the operation is
- * considered a success.
- */
- bh = (struct ice_buf_hdr *)bufs;
- if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
- return ICE_DDP_PKG_SUCCESS;
-
- status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
- if (status) {
- if (status == -EALREADY)
- return ICE_DDP_PKG_ALREADY_LOADED;
- return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
- }
-
- for (i = 0; i < count; i++) {
- bool last = ((i + 1) == count);
-
- if (!last) {
- /* check next buffer for metadata flag */
- bh = (struct ice_buf_hdr *)(bufs + i + 1);
-
- /* A set metadata flag in the next buffer will signal
- * that the current buffer will be the last buffer
- * downloaded
- */
- if (le16_to_cpu(bh->section_count))
- if (le32_to_cpu(bh->section_entry[0].type) &
- ICE_METADATA_BUF)
- last = true;
- }
-
- bh = (struct ice_buf_hdr *)(bufs + i);
-
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
- &offset, &info, NULL);
-
- /* Save AQ status from download package */
- if (status) {
- ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
- status, offset, info);
- err = hw->adminq.sq_last_status;
- state = ice_map_aq_err_to_ddp_state(err);
- break;
- }
-
- if (last)
- break;
- }
-
- if (!status) {
- status = ice_set_vlan_mode(hw);
- if (status)
- ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
- status);
- }
-
- ice_release_global_cfg_lock(hw);
-
- return state;
-}
-
-/**
- * ice_aq_get_pkg_info_list
- * @hw: pointer to the hardware structure
- * @pkg_info: the buffer which will receive the information list
- * @buf_size: the size of the pkg_info information buffer
- * @cd: pointer to command details structure or NULL
- *
- * Get Package Info List (0x0C43)
- */
-static int
-ice_aq_get_pkg_info_list(struct ice_hw *hw,
- struct ice_aqc_get_pkg_info_resp *pkg_info,
- u16 buf_size, struct ice_sq_cd *cd)
-{
- struct ice_aq_desc desc;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
-
- return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
-}
-
-/**
- * ice_download_pkg
- * @hw: pointer to the hardware structure
- * @ice_seg: pointer to the segment of the package to be downloaded
- *
- * Handles the download of a complete package.
- */
-static enum ice_ddp_state
-ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
-{
- struct ice_buf_table *ice_buf_tbl;
- int status;
-
- ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
- ice_seg->hdr.seg_format_ver.major,
- ice_seg->hdr.seg_format_ver.minor,
- ice_seg->hdr.seg_format_ver.update,
- ice_seg->hdr.seg_format_ver.draft);
-
- ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
- le32_to_cpu(ice_seg->hdr.seg_type),
- le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
-
- ice_buf_tbl = ice_find_buf_table(ice_seg);
-
- ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
- le32_to_cpu(ice_buf_tbl->buf_count));
-
- status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
- le32_to_cpu(ice_buf_tbl->buf_count));
-
- ice_post_pkg_dwnld_vlan_mode_cfg(hw);
-
- return status;
-}
-
-/**
- * ice_init_pkg_info
- * @hw: pointer to the hardware structure
- * @pkg_hdr: pointer to the driver's package hdr
- *
- * Saves off the package details into the HW structure.
- */
-static enum ice_ddp_state
-ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
-{
- struct ice_generic_seg_hdr *seg_hdr;
-
- if (!pkg_hdr)
- return ICE_DDP_PKG_ERR;
-
- seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
- if (seg_hdr) {
- struct ice_meta_sect *meta;
- struct ice_pkg_enum state;
-
- memset(&state, 0, sizeof(state));
-
- /* Get package information from the Metadata Section */
- meta = ice_pkg_enum_section((struct ice_seg *)seg_hdr, &state,
- ICE_SID_METADATA);
- if (!meta) {
- ice_debug(hw, ICE_DBG_INIT, "Did not find ice metadata section in package\n");
- return ICE_DDP_PKG_INVALID_FILE;
- }
-
- hw->pkg_ver = meta->ver;
- memcpy(hw->pkg_name, meta->name, sizeof(meta->name));
-
- ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
- meta->ver.major, meta->ver.minor, meta->ver.update,
- meta->ver.draft, meta->name);
-
- hw->ice_seg_fmt_ver = seg_hdr->seg_format_ver;
- memcpy(hw->ice_seg_id, seg_hdr->seg_id,
- sizeof(hw->ice_seg_id));
-
- ice_debug(hw, ICE_DBG_PKG, "Ice Seg: %d.%d.%d.%d, %s\n",
- seg_hdr->seg_format_ver.major,
- seg_hdr->seg_format_ver.minor,
- seg_hdr->seg_format_ver.update,
- seg_hdr->seg_format_ver.draft,
- seg_hdr->seg_id);
- } else {
- ice_debug(hw, ICE_DBG_INIT, "Did not find ice segment in driver package\n");
- return ICE_DDP_PKG_INVALID_FILE;
- }
-
- return ICE_DDP_PKG_SUCCESS;
-}
-
-/**
- * ice_get_pkg_info
- * @hw: pointer to the hardware structure
- *
- * Store details of the package currently loaded in HW into the HW structure.
- */
-static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
-{
- enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- struct ice_aqc_get_pkg_info_resp *pkg_info;
- u16 size;
- u32 i;
-
- size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
- pkg_info = kzalloc(size, GFP_KERNEL);
- if (!pkg_info)
- return ICE_DDP_PKG_ERR;
-
- if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
- state = ICE_DDP_PKG_ERR;
- goto init_pkg_free_alloc;
- }
-
- for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
-#define ICE_PKG_FLAG_COUNT 4
- char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
- u8 place = 0;
-
- if (pkg_info->pkg_info[i].is_active) {
- flags[place++] = 'A';
- hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
- hw->active_track_id =
- le32_to_cpu(pkg_info->pkg_info[i].track_id);
- memcpy(hw->active_pkg_name,
- pkg_info->pkg_info[i].name,
- sizeof(pkg_info->pkg_info[i].name));
- hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
- }
- if (pkg_info->pkg_info[i].is_active_at_boot)
- flags[place++] = 'B';
- if (pkg_info->pkg_info[i].is_modified)
- flags[place++] = 'M';
- if (pkg_info->pkg_info[i].is_in_nvm)
- flags[place++] = 'N';
-
- ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
- i, pkg_info->pkg_info[i].ver.major,
- pkg_info->pkg_info[i].ver.minor,
- pkg_info->pkg_info[i].ver.update,
- pkg_info->pkg_info[i].ver.draft,
- pkg_info->pkg_info[i].name, flags);
- }
-
-init_pkg_free_alloc:
- kfree(pkg_info);
-
- return state;
-}
-
-/**
- * ice_verify_pkg - verify package
- * @pkg: pointer to the package buffer
- * @len: size of the package buffer
- *
- * Verifies various attributes of the package file, including length, format
- * version, and the requirement of at least one segment.
- */
-static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
-{
- u32 seg_count;
- u32 i;
-
- if (len < struct_size(pkg, seg_offset, 1))
- return ICE_DDP_PKG_INVALID_FILE;
-
- if (pkg->pkg_format_ver.major != ICE_PKG_FMT_VER_MAJ ||
- pkg->pkg_format_ver.minor != ICE_PKG_FMT_VER_MNR ||
- pkg->pkg_format_ver.update != ICE_PKG_FMT_VER_UPD ||
- pkg->pkg_format_ver.draft != ICE_PKG_FMT_VER_DFT)
- return ICE_DDP_PKG_INVALID_FILE;
-
- /* pkg must have at least one segment */
- seg_count = le32_to_cpu(pkg->seg_count);
- if (seg_count < 1)
- return ICE_DDP_PKG_INVALID_FILE;
-
- /* make sure segment array fits in package length */
- if (len < struct_size(pkg, seg_offset, seg_count))
- return ICE_DDP_PKG_INVALID_FILE;
-
- /* all segments must fit within length */
- for (i = 0; i < seg_count; i++) {
- u32 off = le32_to_cpu(pkg->seg_offset[i]);
- struct ice_generic_seg_hdr *seg;
-
- /* segment header must fit */
- if (len < off + sizeof(*seg))
- return ICE_DDP_PKG_INVALID_FILE;
-
- seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
-
- /* segment body must fit */
- if (len < off + le32_to_cpu(seg->seg_size))
- return ICE_DDP_PKG_INVALID_FILE;
- }
-
- return ICE_DDP_PKG_SUCCESS;
-}
-
-/**
- * ice_free_seg - free package segment pointer
- * @hw: pointer to the hardware structure
- *
- * Frees the package segment pointer in the proper manner, depending on if the
- * segment was allocated or just the passed in pointer was stored.
- */
-void ice_free_seg(struct ice_hw *hw)
-{
- if (hw->pkg_copy) {
- devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
- hw->pkg_copy = NULL;
- hw->pkg_size = 0;
- }
- hw->seg = NULL;
-}
-
-/**
- * ice_init_pkg_regs - initialize additional package registers
- * @hw: pointer to the hardware structure
- */
-static void ice_init_pkg_regs(struct ice_hw *hw)
-{
-#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
-#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
-#define ICE_SW_BLK_IDX 0
-
- /* setup Switch block input mask, which is 48-bits in two parts */
- wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
- wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
-}
-
-/**
- * ice_chk_pkg_version - check package version for compatibility with driver
- * @pkg_ver: pointer to a version structure to check
- *
- * Check to make sure that the package about to be downloaded is compatible with
- * the driver. To be compatible, the major and minor components of the package
- * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
- * definitions.
- */
-static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
-{
- if (pkg_ver->major > ICE_PKG_SUPP_VER_MAJ ||
- (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
- pkg_ver->minor > ICE_PKG_SUPP_VER_MNR))
- return ICE_DDP_PKG_FILE_VERSION_TOO_HIGH;
- else if (pkg_ver->major < ICE_PKG_SUPP_VER_MAJ ||
- (pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
- pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
- return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
-
- return ICE_DDP_PKG_SUCCESS;
-}
-
-/**
- * ice_chk_pkg_compat
- * @hw: pointer to the hardware structure
- * @ospkg: pointer to the package hdr
- * @seg: pointer to the package segment hdr
- *
- * This function checks the package version compatibility with driver and NVM
- */
-static enum ice_ddp_state
-ice_chk_pkg_compat(struct ice_hw *hw, struct ice_pkg_hdr *ospkg,
- struct ice_seg **seg)
-{
- struct ice_aqc_get_pkg_info_resp *pkg;
- enum ice_ddp_state state;
- u16 size;
- u32 i;
-
- /* Check package version compatibility */
- state = ice_chk_pkg_version(&hw->pkg_ver);
- if (state) {
- ice_debug(hw, ICE_DBG_INIT, "Package version check failed.\n");
- return state;
- }
-
- /* find ICE segment in given package */
- *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
- ospkg);
- if (!*seg) {
- ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
- return ICE_DDP_PKG_INVALID_FILE;
- }
-
- /* Check if FW is compatible with the OS package */
- size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
- pkg = kzalloc(size, GFP_KERNEL);
- if (!pkg)
- return ICE_DDP_PKG_ERR;
-
- if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
- state = ICE_DDP_PKG_LOAD_ERROR;
- goto fw_ddp_compat_free_alloc;
- }
-
- for (i = 0; i < le32_to_cpu(pkg->count); i++) {
- /* loop till we find the NVM package */
- if (!pkg->pkg_info[i].is_in_nvm)
- continue;
- if ((*seg)->hdr.seg_format_ver.major !=
- pkg->pkg_info[i].ver.major ||
- (*seg)->hdr.seg_format_ver.minor >
- pkg->pkg_info[i].ver.minor) {
- state = ICE_DDP_PKG_FW_MISMATCH;
- ice_debug(hw, ICE_DBG_INIT, "OS package is not compatible with NVM.\n");
- }
- /* done processing NVM package so break */
- break;
- }
-fw_ddp_compat_free_alloc:
- kfree(pkg);
- return state;
-}
-
-/**
- * ice_sw_fv_handler
- * @sect_type: section type
- * @section: pointer to section
- * @index: index of the field vector entry to be returned
- * @offset: ptr to variable that receives the offset in the field vector table
- *
- * This is a callback function that can be passed to ice_pkg_enum_entry.
- * This function treats the given section as of type ice_sw_fv_section and
- * enumerates offset field. "offset" is an index into the field vector table.
- */
-static void *
-ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
-{
- struct ice_sw_fv_section *fv_section = section;
-
- if (!section || sect_type != ICE_SID_FLD_VEC_SW)
- return NULL;
- if (index >= le16_to_cpu(fv_section->count))
- return NULL;
- if (offset)
- /* "index" passed in to this function is relative to a given
- * 4k block. To get to the true index into the field vector
- * table need to add the relative index to the base_offset
- * field of this section
- */
- *offset = le16_to_cpu(fv_section->base_offset) + index;
- return fv_section->fv + index;
-}
-
-/**
- * ice_get_prof_index_max - get the max profile index for used profile
- * @hw: pointer to the HW struct
- *
- * Calling this function will get the max profile index for used profile
- * and store the index number in struct ice_switch_info *switch_info
- * in HW for following use.
- */
-static int ice_get_prof_index_max(struct ice_hw *hw)
-{
- u16 prof_index = 0, j, max_prof_index = 0;
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- bool flag = false;
- struct ice_fv *fv;
- u32 offset;
-
- memset(&state, 0, sizeof(state));
-
- if (!hw->seg)
- return -EINVAL;
-
- ice_seg = hw->seg;
-
- do {
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- if (!fv)
- break;
- ice_seg = NULL;
-
- /* in the profile that not be used, the prot_id is set to 0xff
- * and the off is set to 0x1ff for all the field vectors.
- */
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
- fv->ew[j].off != ICE_FV_OFFSET_INVAL)
- flag = true;
- if (flag && prof_index > max_prof_index)
- max_prof_index = prof_index;
-
- prof_index++;
- flag = false;
- } while (fv);
-
- hw->switch_info->max_used_prof_index = max_prof_index;
-
- return 0;
-}
-
-/**
- * ice_get_ddp_pkg_state - get DDP pkg state after download
- * @hw: pointer to the HW struct
- * @already_loaded: indicates if pkg was already loaded onto the device
- */
-static enum ice_ddp_state
-ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
-{
- if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
- hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
- hw->pkg_ver.update == hw->active_pkg_ver.update &&
- hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
- !memcmp(hw->pkg_name, hw->active_pkg_name, sizeof(hw->pkg_name))) {
- if (already_loaded)
- return ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED;
- else
- return ICE_DDP_PKG_SUCCESS;
- } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
- hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
- return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
- } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
- return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
- } else {
- return ICE_DDP_PKG_ERR;
- }
-}
-
-/**
- * ice_init_pkg - initialize/download package
- * @hw: pointer to the hardware structure
- * @buf: pointer to the package buffer
- * @len: size of the package buffer
- *
- * This function initializes a package. The package contains HW tables
- * required to do packet processing. First, the function extracts package
- * information such as version. Then it finds the ice configuration segment
- * within the package; this function then saves a copy of the segment pointer
- * within the supplied package buffer. Next, the function will cache any hints
- * from the package, followed by downloading the package itself. Note, that if
- * a previous PF driver has already downloaded the package successfully, then
- * the current driver will not have to download the package again.
- *
- * The local package contents will be used to query default behavior and to
- * update specific sections of the HW's version of the package (e.g. to update
- * the parse graph to understand new protocols).
- *
- * This function stores a pointer to the package buffer memory, and it is
- * expected that the supplied buffer will not be freed immediately. If the
- * package buffer needs to be freed, such as when read from a file, use
- * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
- * case.
- */
-enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
-{
- bool already_loaded = false;
- enum ice_ddp_state state;
- struct ice_pkg_hdr *pkg;
- struct ice_seg *seg;
-
- if (!buf || !len)
- return ICE_DDP_PKG_ERR;
-
- pkg = (struct ice_pkg_hdr *)buf;
- state = ice_verify_pkg(pkg, len);
- if (state) {
- ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
- state);
- return state;
- }
-
- /* initialize package info */
- state = ice_init_pkg_info(hw, pkg);
- if (state)
- return state;
-
- /* before downloading the package, check package version for
- * compatibility with driver
- */
- state = ice_chk_pkg_compat(hw, pkg, &seg);
- if (state)
- return state;
-
- /* initialize package hints and then download package */
- ice_init_pkg_hints(hw, seg);
- state = ice_download_pkg(hw, seg);
- if (state == ICE_DDP_PKG_ALREADY_LOADED) {
- ice_debug(hw, ICE_DBG_INIT, "package previously loaded - no work.\n");
- already_loaded = true;
- }
-
- /* Get information on the package currently loaded in HW, then make sure
- * the driver is compatible with this version.
- */
- if (!state || state == ICE_DDP_PKG_ALREADY_LOADED) {
- state = ice_get_pkg_info(hw);
- if (!state)
- state = ice_get_ddp_pkg_state(hw, already_loaded);
- }
-
- if (ice_is_init_pkg_successful(state)) {
- hw->seg = seg;
- /* on successful package download update other required
- * registers to support the package and fill HW tables
- * with package content.
- */
- ice_init_pkg_regs(hw);
- ice_fill_blk_tbls(hw);
- ice_fill_hw_ptype(hw);
- ice_get_prof_index_max(hw);
- } else {
- ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
- state);
- }
-
- return state;
-}
-
-/**
- * ice_copy_and_init_pkg - initialize/download a copy of the package
- * @hw: pointer to the hardware structure
- * @buf: pointer to the package buffer
- * @len: size of the package buffer
- *
- * This function copies the package buffer, and then calls ice_init_pkg() to
- * initialize the copied package contents.
- *
- * The copying is necessary if the package buffer supplied is constant, or if
- * the memory may disappear shortly after calling this function.
- *
- * If the package buffer resides in the data segment and can be modified, the
- * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
- *
- * However, if the package buffer needs to be copied first, such as when being
- * read from a file, the caller should use ice_copy_and_init_pkg().
- *
- * This function will first copy the package buffer, before calling
- * ice_init_pkg(). The caller is free to immediately destroy the original
- * package buffer, as the new copy will be managed by this function and
- * related routines.
- */
-enum ice_ddp_state
-ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
-{
- enum ice_ddp_state state;
- u8 *buf_copy;
-
- if (!buf || !len)
- return ICE_DDP_PKG_ERR;
-
- buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
-
- state = ice_init_pkg(hw, buf_copy, len);
- if (!ice_is_init_pkg_successful(state)) {
- /* Free the copy, since we failed to initialize the package */
- devm_kfree(ice_hw_to_dev(hw), buf_copy);
- } else {
- /* Track the copied pkg so we can free it later */
- hw->pkg_copy = buf_copy;
- hw->pkg_size = len;
- }
-
- return state;
-}
-
-/**
- * ice_is_init_pkg_successful - check if DDP init was successful
- * @state: state of the DDP pkg after download
- */
-bool ice_is_init_pkg_successful(enum ice_ddp_state state)
-{
- switch (state) {
- case ICE_DDP_PKG_SUCCESS:
- case ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED:
- case ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_pkg_buf_alloc
- * @hw: pointer to the HW structure
- *
- * Allocates a package buffer and returns a pointer to the buffer header.
- * Note: all package contents must be in Little Endian form.
- */
-static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
-{
- struct ice_buf_build *bld;
- struct ice_buf_hdr *buf;
-
- bld = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*bld), GFP_KERNEL);
- if (!bld)
- return NULL;
-
- buf = (struct ice_buf_hdr *)bld;
- buf->data_end = cpu_to_le16(offsetof(struct ice_buf_hdr,
- section_entry));
- return bld;
-}
-
-static bool ice_is_gtp_u_profile(u16 prof_idx)
-{
- return (prof_idx >= ICE_PROFID_IPV6_GTPU_TEID &&
- prof_idx <= ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER) ||
- prof_idx == ICE_PROFID_IPV4_GTPU_TEID;
-}
-
-static bool ice_is_gtp_c_profile(u16 prof_idx)
-{
- switch (prof_idx) {
- case ICE_PROFID_IPV4_GTPC_TEID:
- case ICE_PROFID_IPV4_GTPC_NO_TEID:
- case ICE_PROFID_IPV6_GTPC_TEID:
- case ICE_PROFID_IPV6_GTPC_NO_TEID:
- return true;
- default:
- return false;
- }
-}
-
-/**
- * ice_get_sw_prof_type - determine switch profile type
- * @hw: pointer to the HW structure
- * @fv: pointer to the switch field vector
- * @prof_idx: profile index to check
- */
-static enum ice_prof_type
-ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv, u32 prof_idx)
-{
- u16 i;
-
- if (ice_is_gtp_c_profile(prof_idx))
- return ICE_PROF_TUN_GTPC;
-
- if (ice_is_gtp_u_profile(prof_idx))
- return ICE_PROF_TUN_GTPU;
-
- for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
- /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
- if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
- fv->ew[i].off == ICE_VNI_OFFSET)
- return ICE_PROF_TUN_UDP;
-
- /* GRE tunnel will have GRE protocol */
- if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
- return ICE_PROF_TUN_GRE;
- }
-
- return ICE_PROF_NON_TUN;
-}
-
-/**
- * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
- * @hw: pointer to hardware structure
- * @req_profs: type of profiles requested
- * @bm: pointer to memory for returning the bitmap of field vectors
- */
-void
-ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
- unsigned long *bm)
-{
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
-
- if (req_profs == ICE_PROF_ALL) {
- bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
- return;
- }
-
- memset(&state, 0, sizeof(state));
- bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
- ice_seg = hw->seg;
- do {
- enum ice_prof_type prof_type;
- u32 offset;
-
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- ice_seg = NULL;
-
- if (fv) {
- /* Determine field vector type */
- prof_type = ice_get_sw_prof_type(hw, fv, offset);
-
- if (req_profs & prof_type)
- set_bit((u16)offset, bm);
- }
- } while (fv);
-}
-
-/**
- * ice_get_sw_fv_list
- * @hw: pointer to the HW structure
- * @lkups: list of protocol types
- * @bm: bitmap of field vectors to consider
- * @fv_list: Head of a list
- *
- * Finds all the field vector entries from switch block that contain
- * a given protocol ID and offset and returns a list of structures of type
- * "ice_sw_fv_list_entry". Every structure in the list has a field vector
- * definition and profile ID information
- * NOTE: The caller of the function is responsible for freeing the memory
- * allocated for every list entry.
- */
-int
-ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
- unsigned long *bm, struct list_head *fv_list)
-{
- struct ice_sw_fv_list_entry *fvl;
- struct ice_sw_fv_list_entry *tmp;
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
- u32 offset;
-
- memset(&state, 0, sizeof(state));
-
- if (!lkups->n_val_words || !hw->seg)
- return -EINVAL;
-
- ice_seg = hw->seg;
- do {
- u16 i;
-
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &offset, ice_sw_fv_handler);
- if (!fv)
- break;
- ice_seg = NULL;
-
- /* If field vector is not in the bitmap list, then skip this
- * profile.
- */
- if (!test_bit((u16)offset, bm))
- continue;
-
- for (i = 0; i < lkups->n_val_words; i++) {
- int j;
-
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv->ew[j].prot_id ==
- lkups->fv_words[i].prot_id &&
- fv->ew[j].off == lkups->fv_words[i].off)
- break;
- if (j >= hw->blk[ICE_BLK_SW].es.fvw)
- break;
- if (i + 1 == lkups->n_val_words) {
- fvl = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*fvl), GFP_KERNEL);
- if (!fvl)
- goto err;
- fvl->fv_ptr = fv;
- fvl->profile_id = offset;
- list_add(&fvl->list_entry, fv_list);
- break;
- }
- }
- } while (fv);
- if (list_empty(fv_list)) {
- dev_warn(ice_hw_to_dev(hw), "Required profiles not found in currently loaded DDP package");
- return -EIO;
- }
-
- return 0;
-
-err:
- list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
- list_del(&fvl->list_entry);
- devm_kfree(ice_hw_to_dev(hw), fvl);
- }
-
- return -ENOMEM;
-}
-
-/**
- * ice_init_prof_result_bm - Initialize the profile result index bitmap
- * @hw: pointer to hardware structure
- */
-void ice_init_prof_result_bm(struct ice_hw *hw)
-{
- struct ice_pkg_enum state;
- struct ice_seg *ice_seg;
- struct ice_fv *fv;
-
- memset(&state, 0, sizeof(state));
-
- if (!hw->seg)
- return;
-
- ice_seg = hw->seg;
- do {
- u32 off;
- u16 i;
-
- fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
- &off, ice_sw_fv_handler);
- ice_seg = NULL;
- if (!fv)
- break;
-
- bitmap_zero(hw->switch_info->prof_res_bm[off],
- ICE_MAX_FV_WORDS);
-
- /* Determine empty field vector indices, these can be
- * used for recipe results. Skip index 0, since it is
- * always used for Switch ID.
- */
- for (i = 1; i < ICE_MAX_FV_WORDS; i++)
- if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
- fv->ew[i].off == ICE_FV_OFFSET_INVAL)
- set_bit(i, hw->switch_info->prof_res_bm[off]);
- } while (fv);
-}
-
-/**
- * ice_pkg_buf_free
- * @hw: pointer to the HW structure
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Frees a package buffer
- */
-void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
-{
- devm_kfree(ice_hw_to_dev(hw), bld);
-}
-
-/**
- * ice_pkg_buf_reserve_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @count: the number of sections to reserve
- *
- * Reserves one or more section table entries in a package buffer. This routine
- * can be called multiple times as long as they are made before calling
- * ice_pkg_buf_alloc_section(). Once ice_pkg_buf_alloc_section()
- * is called once, the number of sections that can be allocated will not be able
- * to be increased; not using all reserved sections is fine, but this will
- * result in some wasted space in the buffer.
- * Note: all package contents must be in Little Endian form.
- */
-static int
-ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
-{
- struct ice_buf_hdr *buf;
- u16 section_count;
- u16 data_end;
-
- if (!bld)
- return -EINVAL;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
-
- /* already an active section, can't increase table size */
- section_count = le16_to_cpu(buf->section_count);
- if (section_count > 0)
- return -EIO;
-
- if (bld->reserved_section_table_entries + count > ICE_MAX_S_COUNT)
- return -EIO;
- bld->reserved_section_table_entries += count;
-
- data_end = le16_to_cpu(buf->data_end) +
- flex_array_size(buf, section_entry, count);
- buf->data_end = cpu_to_le16(data_end);
-
- return 0;
-}
-
-/**
- * ice_pkg_buf_alloc_section
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
- *
- * Reserves memory in the buffer for a section's content and updates the
- * buffers' status accordingly. This routine returns a pointer to the first
- * byte of the section start within the buffer, which is used to fill in the
- * section contents.
- * Note: all package contents must be in Little Endian form.
- */
-static void *
-ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size)
-{
- struct ice_buf_hdr *buf;
- u16 sect_count;
- u16 data_end;
-
- if (!bld || !type || !size)
- return NULL;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
-
- /* check for enough space left in buffer */
- data_end = le16_to_cpu(buf->data_end);
-
- /* section start must align on 4 byte boundary */
- data_end = ALIGN(data_end, 4);
-
- if ((data_end + size) > ICE_MAX_S_DATA_END)
- return NULL;
-
- /* check for more available section table entries */
- sect_count = le16_to_cpu(buf->section_count);
- if (sect_count < bld->reserved_section_table_entries) {
- void *section_ptr = ((u8 *)buf) + data_end;
-
- buf->section_entry[sect_count].offset = cpu_to_le16(data_end);
- buf->section_entry[sect_count].size = cpu_to_le16(size);
- buf->section_entry[sect_count].type = cpu_to_le32(type);
-
- data_end += size;
- buf->data_end = cpu_to_le16(data_end);
-
- buf->section_count = cpu_to_le16(sect_count + 1);
- return section_ptr;
- }
-
- /* no free section table entries */
- return NULL;
-}
-
-/**
- * ice_pkg_buf_alloc_single_section
- * @hw: pointer to the HW structure
- * @type: the section type value
- * @size: the size of the section to reserve (in bytes)
- * @section: returns pointer to the section
- *
- * Allocates a package buffer with a single section.
- * Note: all package contents must be in Little Endian form.
- */
-struct ice_buf_build *
-ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
- void **section)
-{
- struct ice_buf_build *buf;
-
- if (!section)
- return NULL;
-
- buf = ice_pkg_buf_alloc(hw);
- if (!buf)
- return NULL;
-
- if (ice_pkg_buf_reserve_section(buf, 1))
- goto ice_pkg_buf_alloc_single_section_err;
-
- *section = ice_pkg_buf_alloc_section(buf, type, size);
- if (!*section)
- goto ice_pkg_buf_alloc_single_section_err;
-
- return buf;
-
-ice_pkg_buf_alloc_single_section_err:
- ice_pkg_buf_free(hw, buf);
- return NULL;
-}
-
-/**
- * ice_pkg_buf_get_active_sections
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Returns the number of active sections. Before using the package buffer
- * in an update package command, the caller should make sure that there is at
- * least one active section - otherwise, the buffer is not legal and should
- * not be used.
- * Note: all package contents must be in Little Endian form.
- */
-static u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld)
-{
- struct ice_buf_hdr *buf;
-
- if (!bld)
- return 0;
-
- buf = (struct ice_buf_hdr *)&bld->buf;
- return le16_to_cpu(buf->section_count);
-}
-
-/**
- * ice_pkg_buf
- * @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
- *
- * Return a pointer to the buffer's header
- */
-struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
-{
- if (!bld)
- return NULL;
-
- return &bld->buf;
-}
-
-/**
* ice_get_open_tunnel_port - retrieve an open tunnel port
* @hw: pointer to the HW structure
* @port: returns open port
@@ -2297,10 +391,11 @@ ice_upd_dvm_boost_entry_err:
*/
int ice_set_dvm_boost_entries(struct ice_hw *hw)
{
- int status;
u16 i;
for (i = 0; i < hw->dvm_upd.count; i++) {
+ int status;
+
status = ice_upd_dvm_boost_entry(hw, &hw->dvm_upd.tbl[i]);
if (status)
return status;
@@ -2757,7 +852,6 @@ ice_match_prop_lst(struct list_head *list1, struct list_head *list2)
count++;
list_for_each_entry(tmp2, list2, list)
chk_count++;
- /* cppcheck-suppress knownConditionTrueFalse */
if (!count || count != chk_count)
return false;
@@ -5102,12 +3196,13 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_vsi *vsi_cur;
struct ice_vsig_prof *d, *t;
- int status;
/* remove TCAM entries */
list_for_each_entry_safe(d, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
list) {
+ int status;
+
status = ice_rem_prof_id(hw, blk, d);
if (status)
return status;
@@ -5158,12 +3253,13 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
{
u16 idx = vsig & ICE_VSIG_IDX_M;
struct ice_vsig_prof *p, *t;
- int status;
list_for_each_entry_safe(p, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
list)
if (p->profile_cookie == hdl) {
+ int status;
+
if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
/* this is the last profile, remove the VSIG */
return ice_rem_vsig(hw, blk, vsig, chg);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 9c530c86703e..7af7c8e9aa4e 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -6,75 +6,6 @@
#include "ice_type.h"
-/* Package minimal version supported */
-#define ICE_PKG_SUPP_VER_MAJ 1
-#define ICE_PKG_SUPP_VER_MNR 3
-
-/* Package format version */
-#define ICE_PKG_FMT_VER_MAJ 1
-#define ICE_PKG_FMT_VER_MNR 0
-#define ICE_PKG_FMT_VER_UPD 0
-#define ICE_PKG_FMT_VER_DFT 0
-
-#define ICE_PKG_CNT 4
-
-enum ice_ddp_state {
- /* Indicates that this call to ice_init_pkg
- * successfully loaded the requested DDP package
- */
- ICE_DDP_PKG_SUCCESS = 0,
-
- /* Generic error for already loaded errors, it is mapped later to
- * the more specific one (one of the next 3)
- */
- ICE_DDP_PKG_ALREADY_LOADED = -1,
-
- /* Indicates that a DDP package of the same version has already been
- * loaded onto the device by a previous call or by another PF
- */
- ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
-
- /* The device has a DDP package that is not supported by the driver */
- ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
-
- /* The device has a compatible package
- * (but different from the request) already loaded
- */
- ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
-
- /* The firmware loaded on the device is not compatible with
- * the DDP package loaded
- */
- ICE_DDP_PKG_FW_MISMATCH = -5,
-
- /* The DDP package file is invalid */
- ICE_DDP_PKG_INVALID_FILE = -6,
-
- /* The version of the DDP package provided is higher than
- * the driver supports
- */
- ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
-
- /* The version of the DDP package provided is lower than the
- * driver supports
- */
- ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
-
- /* The signature of the DDP package file provided is invalid */
- ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9,
-
- /* The DDP package file security revision is too low and not
- * supported by firmware
- */
- ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10,
-
- /* An error occurred in firmware while loading the DDP package */
- ICE_DDP_PKG_LOAD_ERROR = -11,
-
- /* Other errors */
- ICE_DDP_PKG_ERR = -12
-};
-
int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 974d14a83b2e..4f42e14ed3ae 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -3,205 +3,7 @@
#ifndef _ICE_FLEX_TYPE_H_
#define _ICE_FLEX_TYPE_H_
-
-#define ICE_FV_OFFSET_INVAL 0x1FF
-
-/* Extraction Sequence (Field Vector) Table */
-struct ice_fv_word {
- u8 prot_id;
- u16 off; /* Offset within the protocol header */
- u8 resvrd;
-} __packed;
-
-#define ICE_MAX_NUM_PROFILES 256
-
-#define ICE_MAX_FV_WORDS 48
-struct ice_fv {
- struct ice_fv_word ew[ICE_MAX_FV_WORDS];
-};
-
-/* Package and segment headers and tables */
-struct ice_pkg_hdr {
- struct ice_pkg_ver pkg_format_ver;
- __le32 seg_count;
- __le32 seg_offset[];
-};
-
-/* generic segment */
-struct ice_generic_seg_hdr {
-#define SEGMENT_TYPE_METADATA 0x00000001
-#define SEGMENT_TYPE_ICE 0x00000010
- __le32 seg_type;
- struct ice_pkg_ver seg_format_ver;
- __le32 seg_size;
- char seg_id[ICE_PKG_NAME_SIZE];
-};
-
-/* ice specific segment */
-
-union ice_device_id {
- struct {
- __le16 device_id;
- __le16 vendor_id;
- } dev_vend_id;
- __le32 id;
-};
-
-struct ice_device_id_entry {
- union ice_device_id device;
- union ice_device_id sub_device;
-};
-
-struct ice_seg {
- struct ice_generic_seg_hdr hdr;
- __le32 device_table_count;
- struct ice_device_id_entry device_table[];
-};
-
-struct ice_nvm_table {
- __le32 table_count;
- __le32 vers[];
-};
-
-struct ice_buf {
-#define ICE_PKG_BUF_SIZE 4096
- u8 buf[ICE_PKG_BUF_SIZE];
-};
-
-struct ice_buf_table {
- __le32 buf_count;
- struct ice_buf buf_array[];
-};
-
-/* global metadata specific segment */
-struct ice_global_metadata_seg {
- struct ice_generic_seg_hdr hdr;
- struct ice_pkg_ver pkg_ver;
- __le32 rsvd;
- char pkg_name[ICE_PKG_NAME_SIZE];
-};
-
-#define ICE_MIN_S_OFF 12
-#define ICE_MAX_S_OFF 4095
-#define ICE_MIN_S_SZ 1
-#define ICE_MAX_S_SZ 4084
-
-/* section information */
-struct ice_section_entry {
- __le32 type;
- __le16 offset;
- __le16 size;
-};
-
-#define ICE_MIN_S_COUNT 1
-#define ICE_MAX_S_COUNT 511
-#define ICE_MIN_S_DATA_END 12
-#define ICE_MAX_S_DATA_END 4096
-
-#define ICE_METADATA_BUF 0x80000000
-
-struct ice_buf_hdr {
- __le16 section_count;
- __le16 data_end;
- struct ice_section_entry section_entry[];
-};
-
-#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
- struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
- (ent_sz))
-
-/* ice package section IDs */
-#define ICE_SID_METADATA 1
-#define ICE_SID_XLT0_SW 10
-#define ICE_SID_XLT_KEY_BUILDER_SW 11
-#define ICE_SID_XLT1_SW 12
-#define ICE_SID_XLT2_SW 13
-#define ICE_SID_PROFID_TCAM_SW 14
-#define ICE_SID_PROFID_REDIR_SW 15
-#define ICE_SID_FLD_VEC_SW 16
-#define ICE_SID_CDID_KEY_BUILDER_SW 17
-
-struct ice_meta_sect {
- struct ice_pkg_ver ver;
-#define ICE_META_SECT_NAME_SIZE 28
- char name[ICE_META_SECT_NAME_SIZE];
- __le32 track_id;
-};
-
-#define ICE_SID_CDID_REDIR_SW 18
-
-#define ICE_SID_XLT0_ACL 20
-#define ICE_SID_XLT_KEY_BUILDER_ACL 21
-#define ICE_SID_XLT1_ACL 22
-#define ICE_SID_XLT2_ACL 23
-#define ICE_SID_PROFID_TCAM_ACL 24
-#define ICE_SID_PROFID_REDIR_ACL 25
-#define ICE_SID_FLD_VEC_ACL 26
-#define ICE_SID_CDID_KEY_BUILDER_ACL 27
-#define ICE_SID_CDID_REDIR_ACL 28
-
-#define ICE_SID_XLT0_FD 30
-#define ICE_SID_XLT_KEY_BUILDER_FD 31
-#define ICE_SID_XLT1_FD 32
-#define ICE_SID_XLT2_FD 33
-#define ICE_SID_PROFID_TCAM_FD 34
-#define ICE_SID_PROFID_REDIR_FD 35
-#define ICE_SID_FLD_VEC_FD 36
-#define ICE_SID_CDID_KEY_BUILDER_FD 37
-#define ICE_SID_CDID_REDIR_FD 38
-
-#define ICE_SID_XLT0_RSS 40
-#define ICE_SID_XLT_KEY_BUILDER_RSS 41
-#define ICE_SID_XLT1_RSS 42
-#define ICE_SID_XLT2_RSS 43
-#define ICE_SID_PROFID_TCAM_RSS 44
-#define ICE_SID_PROFID_REDIR_RSS 45
-#define ICE_SID_FLD_VEC_RSS 46
-#define ICE_SID_CDID_KEY_BUILDER_RSS 47
-#define ICE_SID_CDID_REDIR_RSS 48
-
-#define ICE_SID_RXPARSER_MARKER_PTYPE 55
-#define ICE_SID_RXPARSER_BOOST_TCAM 56
-#define ICE_SID_RXPARSER_METADATA_INIT 58
-#define ICE_SID_TXPARSER_BOOST_TCAM 66
-
-#define ICE_SID_XLT0_PE 80
-#define ICE_SID_XLT_KEY_BUILDER_PE 81
-#define ICE_SID_XLT1_PE 82
-#define ICE_SID_XLT2_PE 83
-#define ICE_SID_PROFID_TCAM_PE 84
-#define ICE_SID_PROFID_REDIR_PE 85
-#define ICE_SID_FLD_VEC_PE 86
-#define ICE_SID_CDID_KEY_BUILDER_PE 87
-#define ICE_SID_CDID_REDIR_PE 88
-
-/* Label Metadata section IDs */
-#define ICE_SID_LBL_FIRST 0x80000010
-#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
-/* The following define MUST be updated to reflect the last label section ID */
-#define ICE_SID_LBL_LAST 0x80000038
-
-enum ice_block {
- ICE_BLK_SW = 0,
- ICE_BLK_ACL,
- ICE_BLK_FD,
- ICE_BLK_RSS,
- ICE_BLK_PE,
- ICE_BLK_COUNT
-};
-
-enum ice_sect {
- ICE_XLT0 = 0,
- ICE_XLT_KB,
- ICE_XLT1,
- ICE_XLT2,
- ICE_PROF_TCAM,
- ICE_PROF_REDIR,
- ICE_VEC_TBL,
- ICE_CDID_KB,
- ICE_CDID_REDIR,
- ICE_SECT_COUNT
-};
+#include "ice_ddp.h"
/* Packet Type (PTYPE) values */
#define ICE_PTYPE_MAC_PAY 1
@@ -283,134 +85,6 @@ struct ice_ptype_attributes {
enum ice_ptype_attrib_type attrib;
};
-/* package labels */
-struct ice_label {
- __le16 value;
-#define ICE_PKG_LABEL_SIZE 64
- char name[ICE_PKG_LABEL_SIZE];
-};
-
-struct ice_label_section {
- __le16 count;
- struct ice_label label[];
-};
-
-#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- struct_size((struct ice_label_section *)0, label, 1) - \
- sizeof(struct ice_label), sizeof(struct ice_label))
-
-struct ice_sw_fv_section {
- __le16 count;
- __le16 base_offset;
- struct ice_fv fv[];
-};
-
-struct ice_sw_fv_list_entry {
- struct list_head list_entry;
- u32 profile_id;
- struct ice_fv *fv_ptr;
-};
-
-/* The BOOST TCAM stores the match packet header in reverse order, meaning
- * the fields are reversed; in addition, this means that the normally big endian
- * fields of the packet are now little endian.
- */
-struct ice_boost_key_value {
-#define ICE_BOOST_REMAINING_HV_KEY 15
- u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
- __le16 hv_dst_port_key;
- __le16 hv_src_port_key;
- u8 tcam_search_key;
-} __packed;
-
-struct ice_boost_key {
- struct ice_boost_key_value key;
- struct ice_boost_key_value key2;
-};
-
-/* package Boost TCAM entry */
-struct ice_boost_tcam_entry {
- __le16 addr;
- __le16 reserved;
- /* break up the 40 bytes of key into different fields */
- struct ice_boost_key key;
- u8 boost_hit_index_group;
- /* The following contains bitfields which are not on byte boundaries.
- * These fields are currently unused by driver software.
- */
-#define ICE_BOOST_BIT_FIELDS 43
- u8 bit_fields[ICE_BOOST_BIT_FIELDS];
-};
-
-struct ice_boost_tcam_section {
- __le16 count;
- __le16 reserved;
- struct ice_boost_tcam_entry tcam[];
-};
-
-#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
- struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \
- sizeof(struct ice_boost_tcam_entry), \
- sizeof(struct ice_boost_tcam_entry))
-
-/* package Marker Ptype TCAM entry */
-struct ice_marker_ptype_tcam_entry {
-#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
- __le16 addr;
- __le16 ptype;
- u8 keys[20];
-};
-
-struct ice_marker_ptype_tcam_section {
- __le16 count;
- __le16 reserved;
- struct ice_marker_ptype_tcam_entry tcam[];
-};
-
-#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
- ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \
- sizeof(struct ice_marker_ptype_tcam_entry), \
- sizeof(struct ice_marker_ptype_tcam_entry))
-
-struct ice_xlt1_section {
- __le16 count;
- __le16 offset;
- u8 value[];
-};
-
-struct ice_xlt2_section {
- __le16 count;
- __le16 offset;
- __le16 value[];
-};
-
-struct ice_prof_redir_section {
- __le16 count;
- __le16 offset;
- u8 redir_value[];
-};
-
-/* package buffer building */
-
-struct ice_buf_build {
- struct ice_buf buf;
- u16 reserved_section_table_entries;
-};
-
-struct ice_pkg_enum {
- struct ice_buf_table *buf_table;
- u32 buf_idx;
-
- u32 type;
- struct ice_buf_hdr *buf;
- u32 sect_idx;
- void *sect;
- u32 sect_type;
-
- u32 entry_idx;
- void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
-};
-
/* Tunnel enabling */
enum ice_tunnel_type {
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 40e678cfb507..aff7a141c30d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -208,6 +208,11 @@ static int ice_fltr_remove_eth_list(struct ice_vsi *vsi, struct list_head *list)
void ice_fltr_remove_all(struct ice_vsi *vsi)
{
ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
+ /* sync netdev filters if exist */
+ if (vsi->netdev) {
+ __dev_uc_unsync(vsi->netdev, NULL);
+ __dev_mc_unsync(vsi->netdev, NULL);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index 43e199b5b513..8dec748bb53a 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -3,15 +3,18 @@
#include "ice.h"
#include "ice_lib.h"
-#include <linux/tty_driver.h>
/**
- * ice_gnss_do_write - Write data to internal GNSS
+ * ice_gnss_do_write - Write data to internal GNSS receiver
* @pf: board private structure
* @buf: command buffer
* @size: command buffer size
*
* Write UBX command data to the GNSS receiver
+ *
+ * Return:
+ * * number of bytes written - success
+ * * negative - error code
*/
static unsigned int
ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
@@ -82,6 +85,12 @@ static void ice_gnss_write_pending(struct kthread_work *work)
write_work);
struct ice_pf *pf = gnss->back;
+ if (!pf)
+ return;
+
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return;
+
if (!list_empty(&gnss->queue)) {
struct gnss_write_buf *write_buf = NULL;
unsigned int bytes;
@@ -102,16 +111,14 @@ static void ice_gnss_write_pending(struct kthread_work *work)
* ice_gnss_read - Read data from internal GNSS module
* @work: GNSS read work structure
*
- * Read the data from internal GNSS receiver, number of bytes read will be
- * returned in *read_data parameter.
+ * Read the data from internal GNSS receiver, write it to gnss_dev.
*/
static void ice_gnss_read(struct kthread_work *work)
{
struct gnss_serial *gnss = container_of(work, struct gnss_serial,
read_work.work);
+ unsigned int i, bytes_read, data_len, count;
struct ice_aqc_link_topo_addr link_topo;
- unsigned int i, bytes_read, data_len;
- struct tty_port *port;
struct ice_pf *pf;
struct ice_hw *hw;
__be16 data_len_b;
@@ -120,14 +127,15 @@ static void ice_gnss_read(struct kthread_work *work)
int err = 0;
pf = gnss->back;
- if (!pf || !gnss->tty || !gnss->tty->port) {
+ if (!pf) {
err = -EFAULT;
goto exit;
}
- hw = &pf->hw;
- port = gnss->tty->port;
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return;
+ hw = &pf->hw;
buf = (char *)get_zeroed_page(GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
@@ -159,7 +167,6 @@ static void ice_gnss_read(struct kthread_work *work)
}
data_len = min_t(typeof(data_len), data_len, PAGE_SIZE);
- data_len = tty_buffer_request_room(port, data_len);
if (!data_len) {
err = -ENOMEM;
goto exit_buf;
@@ -179,12 +186,11 @@ static void ice_gnss_read(struct kthread_work *work)
goto exit_buf;
}
- /* Send the data to the tty layer for users to read. This doesn't
- * actually push the data through unless tty->low_latency is set.
- */
- tty_insert_flip_string(port, buf, i);
- tty_flip_buffer_push(port);
-
+ count = gnss_insert_raw(pf->gnss_dev, buf, i);
+ if (count != i)
+ dev_warn(ice_pf_to_dev(pf),
+ "gnss_insert_raw ret=%d size=%d\n",
+ count, i);
exit_buf:
free_page((unsigned long)buf);
kthread_queue_delayed_work(gnss->kworker, &gnss->read_work,
@@ -195,11 +201,16 @@ exit:
}
/**
- * ice_gnss_struct_init - Initialize GNSS structure for the TTY
+ * ice_gnss_struct_init - Initialize GNSS receiver
* @pf: Board private structure
- * @index: TTY device index
+ *
+ * Initialize GNSS structures and workers.
+ *
+ * Return:
+ * * pointer to initialized gnss_serial struct - success
+ * * NULL - error
*/
-static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)
+static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
struct kthread_worker *kworker;
@@ -209,17 +220,12 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)
if (!gnss)
return NULL;
- mutex_init(&gnss->gnss_mutex);
- gnss->open_count = 0;
gnss->back = pf;
- pf->gnss_serial[index] = gnss;
+ pf->gnss_serial = gnss;
kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
INIT_LIST_HEAD(&gnss->queue);
kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
- /* Allocate a kworker for handling work required for the GNSS TTY
- * writes.
- */
kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
if (IS_ERR(kworker)) {
kfree(gnss);
@@ -232,140 +238,100 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf, int index)
}
/**
- * ice_gnss_tty_open - Initialize GNSS structures on TTY device open
- * @tty: pointer to the tty_struct
- * @filp: pointer to the file
+ * ice_gnss_open - Open GNSS device
+ * @gdev: pointer to the gnss device struct
+ *
+ * Open GNSS device and start filling the read buffer for consumer.
*
- * This routine is mandatory. If this routine is not filled in, the attempted
- * open will fail with ENODEV.
+ * Return:
+ * * 0 - success
+ * * negative - error code
*/
-static int ice_gnss_tty_open(struct tty_struct *tty, struct file *filp)
+static int ice_gnss_open(struct gnss_device *gdev)
{
+ struct ice_pf *pf = gnss_get_drvdata(gdev);
struct gnss_serial *gnss;
- struct ice_pf *pf;
- pf = (struct ice_pf *)tty->driver->driver_state;
if (!pf)
return -EFAULT;
- /* Clear the pointer in case something fails */
- tty->driver_data = NULL;
-
- /* Get the serial object associated with this tty pointer */
- gnss = pf->gnss_serial[tty->index];
- if (!gnss) {
- /* Initialize GNSS struct on the first device open */
- gnss = ice_gnss_struct_init(pf, tty->index);
- if (!gnss)
- return -ENOMEM;
- }
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return -EFAULT;
- mutex_lock(&gnss->gnss_mutex);
+ gnss = pf->gnss_serial;
+ if (!gnss)
+ return -ENODEV;
- /* Save our structure within the tty structure */
- tty->driver_data = gnss;
- gnss->tty = tty;
- gnss->open_count++;
kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, 0);
- mutex_unlock(&gnss->gnss_mutex);
-
return 0;
}
/**
- * ice_gnss_tty_close - Cleanup GNSS structures on tty device close
- * @tty: pointer to the tty_struct
- * @filp: pointer to the file
+ * ice_gnss_close - Close GNSS device
+ * @gdev: pointer to the gnss device struct
+ *
+ * Close GNSS device, cancel worker, stop filling the read buffer.
*/
-static void ice_gnss_tty_close(struct tty_struct *tty, struct file *filp)
+static void ice_gnss_close(struct gnss_device *gdev)
{
- struct gnss_serial *gnss = tty->driver_data;
- struct ice_pf *pf;
-
- if (!gnss)
- return;
+ struct ice_pf *pf = gnss_get_drvdata(gdev);
+ struct gnss_serial *gnss;
- pf = (struct ice_pf *)tty->driver->driver_state;
if (!pf)
return;
- mutex_lock(&gnss->gnss_mutex);
-
- if (!gnss->open_count) {
- /* Port was never opened */
- dev_err(ice_pf_to_dev(pf), "GNSS port not opened\n");
- goto exit;
- }
+ gnss = pf->gnss_serial;
+ if (!gnss)
+ return;
- gnss->open_count--;
- if (gnss->open_count <= 0) {
- /* Port is in shutdown state */
- kthread_cancel_delayed_work_sync(&gnss->read_work);
- }
-exit:
- mutex_unlock(&gnss->gnss_mutex);
+ kthread_cancel_work_sync(&gnss->write_work);
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
}
/**
- * ice_gnss_tty_write - Write GNSS data
- * @tty: pointer to the tty_struct
+ * ice_gnss_write - Write to GNSS device
+ * @gdev: pointer to the gnss device struct
* @buf: pointer to the user data
- * @count: the number of characters queued to be sent to the HW
+ * @count: size of the buffer to be sent to the GNSS device
*
- * The write function call is called by the user when there is data to be sent
- * to the hardware. First the tty core receives the call, and then it passes the
- * data on to the tty driver's write function. The tty core also tells the tty
- * driver the size of the data being sent.
- * If any errors happen during the write call, a negative error value should be
- * returned instead of the number of characters queued to be written.
+ * Return:
+ * * number of written bytes - success
+ * * negative - error code
*/
static int
-ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count)
{
+ struct ice_pf *pf = gnss_get_drvdata(gdev);
struct gnss_write_buf *write_buf;
struct gnss_serial *gnss;
unsigned char *cmd_buf;
- struct ice_pf *pf;
int err = count;
/* We cannot write a single byte using our I2C implementation. */
if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
return -EINVAL;
- gnss = tty->driver_data;
- if (!gnss)
- return -EFAULT;
-
- pf = (struct ice_pf *)tty->driver->driver_state;
if (!pf)
return -EFAULT;
- /* Only allow to write on TTY 0 */
- if (gnss != pf->gnss_serial[0])
- return -EIO;
-
- mutex_lock(&gnss->gnss_mutex);
+ if (!test_bit(ICE_FLAG_GNSS, pf->flags))
+ return -EFAULT;
- if (!gnss->open_count) {
- err = -EINVAL;
- goto exit;
- }
+ gnss = pf->gnss_serial;
+ if (!gnss)
+ return -ENODEV;
cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
- if (!cmd_buf) {
- err = -ENOMEM;
- goto exit;
- }
+ if (!cmd_buf)
+ return -ENOMEM;
memcpy(cmd_buf, buf, count);
-
- /* Send the data out to a hardware port */
write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
if (!write_buf) {
kfree(cmd_buf);
- err = -ENOMEM;
- goto exit;
+ return -ENOMEM;
}
write_buf->buf = cmd_buf;
@@ -373,141 +339,89 @@ ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
INIT_LIST_HEAD(&write_buf->queue);
list_add_tail(&write_buf->queue, &gnss->queue);
kthread_queue_work(gnss->kworker, &gnss->write_work);
-exit:
- mutex_unlock(&gnss->gnss_mutex);
+
return err;
}
+static const struct gnss_operations ice_gnss_ops = {
+ .open = ice_gnss_open,
+ .close = ice_gnss_close,
+ .write_raw = ice_gnss_write,
+};
+
/**
- * ice_gnss_tty_write_room - Returns the numbers of characters to be written.
- * @tty: pointer to the tty_struct
+ * ice_gnss_register - Register GNSS receiver
+ * @pf: Board private structure
+ *
+ * Allocate and register GNSS receiver in the Linux GNSS subsystem.
*
- * This routine returns the numbers of characters the tty driver will accept
- * for queuing to be written or 0 if either the TTY is not open or user
- * tries to write to the TTY other than the first.
+ * Return:
+ * * 0 - success
+ * * negative - error code
*/
-static unsigned int ice_gnss_tty_write_room(struct tty_struct *tty)
+static int ice_gnss_register(struct ice_pf *pf)
{
- struct gnss_serial *gnss = tty->driver_data;
-
- /* Only allow to write on TTY 0 */
- if (!gnss || gnss != gnss->back->gnss_serial[0])
- return 0;
-
- mutex_lock(&gnss->gnss_mutex);
+ struct gnss_device *gdev;
+ int ret;
+
+ gdev = gnss_allocate_device(ice_pf_to_dev(pf));
+ if (!gdev) {
+ dev_err(ice_pf_to_dev(pf),
+ "gnss_allocate_device returns NULL\n");
+ return -ENOMEM;
+ }
- if (!gnss->open_count) {
- mutex_unlock(&gnss->gnss_mutex);
- return 0;
+ gdev->ops = &ice_gnss_ops;
+ gdev->type = GNSS_TYPE_UBX;
+ gnss_set_drvdata(gdev, pf);
+ ret = gnss_register_device(gdev);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf), "gnss_register_device err=%d\n",
+ ret);
+ gnss_put_device(gdev);
+ } else {
+ pf->gnss_dev = gdev;
}
- mutex_unlock(&gnss->gnss_mutex);
- return ICE_GNSS_TTY_WRITE_BUF;
+ return ret;
}
-static const struct tty_operations tty_gps_ops = {
- .open = ice_gnss_tty_open,
- .close = ice_gnss_tty_close,
- .write = ice_gnss_tty_write,
- .write_room = ice_gnss_tty_write_room,
-};
-
/**
- * ice_gnss_create_tty_driver - Create a TTY driver for GNSS
+ * ice_gnss_deregister - Deregister GNSS receiver
* @pf: Board private structure
+ *
+ * Deregister GNSS receiver from the Linux GNSS subsystem,
+ * release its resources.
*/
-static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
+static void ice_gnss_deregister(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- const int ICE_TTYDRV_NAME_MAX = 14;
- struct tty_driver *tty_driver;
- char *ttydrv_name;
- unsigned int i;
- int err;
-
- tty_driver = tty_alloc_driver(ICE_GNSS_TTY_MINOR_DEVICES,
- TTY_DRIVER_REAL_RAW);
- if (IS_ERR(tty_driver)) {
- dev_err(dev, "Failed to allocate memory for GNSS TTY\n");
- return NULL;
- }
-
- ttydrv_name = kzalloc(ICE_TTYDRV_NAME_MAX, GFP_KERNEL);
- if (!ttydrv_name) {
- tty_driver_kref_put(tty_driver);
- return NULL;
+ if (pf->gnss_dev) {
+ gnss_deregister_device(pf->gnss_dev);
+ gnss_put_device(pf->gnss_dev);
+ pf->gnss_dev = NULL;
}
-
- snprintf(ttydrv_name, ICE_TTYDRV_NAME_MAX, "ttyGNSS_%02x%02x_",
- (u8)pf->pdev->bus->number, (u8)PCI_SLOT(pf->pdev->devfn));
-
- /* Initialize the tty driver*/
- tty_driver->owner = THIS_MODULE;
- tty_driver->driver_name = dev_driver_string(dev);
- tty_driver->name = (const char *)ttydrv_name;
- tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
- tty_driver->subtype = SERIAL_TYPE_NORMAL;
- tty_driver->init_termios = tty_std_termios;
- tty_driver->init_termios.c_iflag &= ~INLCR;
- tty_driver->init_termios.c_iflag |= IGNCR;
- tty_driver->init_termios.c_oflag &= ~OPOST;
- tty_driver->init_termios.c_lflag &= ~ICANON;
- tty_driver->init_termios.c_cflag &= ~(CSIZE | CBAUD | CBAUDEX);
- /* baud rate 9600 */
- tty_termios_encode_baud_rate(&tty_driver->init_termios, 9600, 9600);
- tty_driver->driver_state = pf;
- tty_set_operations(tty_driver, &tty_gps_ops);
-
- for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
- pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]),
- GFP_KERNEL);
- if (!pf->gnss_tty_port[i])
- goto err_out;
-
- pf->gnss_serial[i] = NULL;
-
- tty_port_init(pf->gnss_tty_port[i]);
- tty_port_link_device(pf->gnss_tty_port[i], tty_driver, i);
- }
-
- err = tty_register_driver(tty_driver);
- if (err) {
- dev_err(dev, "Failed to register TTY driver err=%d\n", err);
- goto err_out;
- }
-
- for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++)
- dev_info(dev, "%s%d registered\n", ttydrv_name, i);
-
- return tty_driver;
-
-err_out:
- while (i--) {
- tty_port_destroy(pf->gnss_tty_port[i]);
- kfree(pf->gnss_tty_port[i]);
- }
- kfree(ttydrv_name);
- tty_driver_kref_put(pf->ice_gnss_tty_driver);
-
- return NULL;
}
/**
- * ice_gnss_init - Initialize GNSS TTY support
+ * ice_gnss_init - Initialize GNSS support
* @pf: Board private structure
*/
void ice_gnss_init(struct ice_pf *pf)
{
- struct tty_driver *tty_driver;
+ int ret;
- tty_driver = ice_gnss_create_tty_driver(pf);
- if (!tty_driver)
+ pf->gnss_serial = ice_gnss_struct_init(pf);
+ if (!pf->gnss_serial)
return;
- pf->ice_gnss_tty_driver = tty_driver;
-
- set_bit(ICE_FLAG_GNSS, pf->flags);
- dev_info(ice_pf_to_dev(pf), "GNSS TTY init successful\n");
+ ret = ice_gnss_register(pf);
+ if (!ret) {
+ set_bit(ICE_FLAG_GNSS, pf->flags);
+ dev_info(ice_pf_to_dev(pf), "GNSS init successful\n");
+ } else {
+ ice_gnss_exit(pf);
+ dev_err(ice_pf_to_dev(pf), "GNSS init failure\n");
+ }
}
/**
@@ -516,31 +430,20 @@ void ice_gnss_init(struct ice_pf *pf)
*/
void ice_gnss_exit(struct ice_pf *pf)
{
- unsigned int i;
+ ice_gnss_deregister(pf);
+ clear_bit(ICE_FLAG_GNSS, pf->flags);
- if (!test_bit(ICE_FLAG_GNSS, pf->flags) || !pf->ice_gnss_tty_driver)
- return;
-
- for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
- if (pf->gnss_tty_port[i]) {
- tty_port_destroy(pf->gnss_tty_port[i]);
- kfree(pf->gnss_tty_port[i]);
- }
+ if (pf->gnss_serial) {
+ struct gnss_serial *gnss = pf->gnss_serial;
- if (pf->gnss_serial[i]) {
- struct gnss_serial *gnss = pf->gnss_serial[i];
+ kthread_cancel_work_sync(&gnss->write_work);
+ kthread_cancel_delayed_work_sync(&gnss->read_work);
+ kthread_destroy_worker(gnss->kworker);
+ gnss->kworker = NULL;
- kthread_cancel_work_sync(&gnss->write_work);
- kthread_cancel_delayed_work_sync(&gnss->read_work);
- kfree(gnss);
- pf->gnss_serial[i] = NULL;
- }
+ kfree(gnss);
+ pf->gnss_serial = NULL;
}
-
- tty_unregister_driver(pf->ice_gnss_tty_driver);
- kfree(pf->ice_gnss_tty_driver->name);
- tty_driver_kref_put(pf->ice_gnss_tty_driver);
- pf->ice_gnss_tty_driver = NULL;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h
index f454dd1d9285..31db0701d13f 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.h
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.h
@@ -4,15 +4,8 @@
#ifndef _ICE_GNSS_H_
#define _ICE_GNSS_H_
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-
#define ICE_E810T_GNSS_I2C_BUS 0x2
#define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */
-/* Create 2 minor devices, both using the same GNSS module. First one is RW,
- * second one RO.
- */
-#define ICE_GNSS_TTY_MINOR_DEVICES 2
#define ICE_GNSS_TTY_WRITE_BUF 250
#define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M)
#define ICE_MAX_I2C_WRITE_BYTES 4
@@ -36,13 +29,9 @@ struct gnss_write_buf {
unsigned char *buf;
};
-
/**
* struct gnss_serial - data used to initialize GNSS TTY port
* @back: back pointer to PF
- * @tty: pointer to the tty for this device
- * @open_count: number of times this port has been opened
- * @gnss_mutex: gnss_mutex used to protect GNSS serial operations
* @kworker: kwork thread for handling periodic work
* @read_work: read_work function for handling GNSS reads
* @write_work: write_work function for handling GNSS writes
@@ -50,16 +39,13 @@ struct gnss_write_buf {
*/
struct gnss_serial {
struct ice_pf *back;
- struct tty_struct *tty;
- int open_count;
- struct mutex gnss_mutex; /* protects GNSS serial structure */
struct kthread_worker *kworker;
struct kthread_delayed_work read_work;
struct kthread_work write_work;
struct list_head queue;
};
-#if IS_ENABLED(CONFIG_TTY)
+#if IS_ENABLED(CONFIG_ICE_GNSS)
void ice_gnss_init(struct ice_pf *pf);
void ice_gnss_exit(struct ice_pf *pf);
bool ice_gnss_is_gps_present(struct ice_hw *hw);
@@ -70,5 +56,5 @@ static inline bool ice_gnss_is_gps_present(struct ice_hw *hw)
{
return false;
}
-#endif /* IS_ENABLED(CONFIG_TTY) */
+#endif /* IS_ENABLED(CONFIG_ICE_GNSS) */
#endif /* _ICE_GNSS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
index 895c32bcc8b5..e6bc2285071e 100644
--- a/drivers/net/ethernet/intel/ice/ice_idc.c
+++ b/drivers/net/ethernet/intel/ice/ice_idc.c
@@ -6,6 +6,8 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
+static DEFINE_XARRAY_ALLOC1(ice_aux_id);
+
/**
* ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
* @pf: pointer to PF struct
@@ -246,6 +248,17 @@ static int ice_reserve_rdma_qvector(struct ice_pf *pf)
}
/**
+ * ice_free_rdma_qvector - free vector resources reserved for RDMA driver
+ * @pf: board private structure to initialize
+ */
+static void ice_free_rdma_qvector(struct ice_pf *pf)
+{
+ pf->num_avail_sw_msix -= pf->num_rdma_msix;
+ ice_free_res(pf->irq_tracker, pf->rdma_base_vector,
+ ICE_RES_RDMA_VEC_ID);
+}
+
+/**
* ice_adev_release - function to be mapped to AUX dev's release op
* @dev: pointer to device to free
*/
@@ -331,12 +344,48 @@ int ice_init_rdma(struct ice_pf *pf)
struct device *dev = &pf->pdev->dev;
int ret;
+ if (!ice_is_rdma_ena(pf)) {
+ dev_warn(dev, "RDMA is not supported on this device\n");
+ return 0;
+ }
+
+ ret = xa_alloc(&ice_aux_id, &pf->aux_idx, NULL, XA_LIMIT(1, INT_MAX),
+ GFP_KERNEL);
+ if (ret) {
+ dev_err(dev, "Failed to allocate device ID for AUX driver\n");
+ return -ENOMEM;
+ }
+
/* Reserve vector resources */
ret = ice_reserve_rdma_qvector(pf);
if (ret < 0) {
dev_err(dev, "failed to reserve vectors for RDMA\n");
- return ret;
+ goto err_reserve_rdma_qvector;
}
pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
- return ice_plug_aux_dev(pf);
+ ret = ice_plug_aux_dev(pf);
+ if (ret)
+ goto err_plug_aux_dev;
+ return 0;
+
+err_plug_aux_dev:
+ ice_free_rdma_qvector(pf);
+err_reserve_rdma_qvector:
+ pf->adev = NULL;
+ xa_erase(&ice_aux_id, pf->aux_idx);
+ return ret;
+}
+
+/**
+ * ice_deinit_rdma - deinitialize RDMA on PF
+ * @pf: ptr to ice_pf
+ */
+void ice_deinit_rdma(struct ice_pf *pf)
+{
+ if (!ice_is_rdma_ena(pf))
+ return;
+
+ ice_unplug_aux_dev(pf);
+ ice_free_rdma_qvector(pf);
+ xa_erase(&ice_aux_id, pf->aux_idx);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index a596e07b3ce9..781475480ff2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -166,14 +166,14 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
/**
* ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
* @vsi: the VSI being configured
- * @vf: the VF associated with this VSI, if any
*
* Return 0 on success and a negative value on error
*/
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi, struct ice_vf *vf)
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
{
enum ice_vsi_type vsi_type = vsi->type;
struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf = vsi->vf;
if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
return;
@@ -282,10 +282,10 @@ static int ice_get_free_slot(void *array, int size, int curr)
}
/**
- * ice_vsi_delete - delete a VSI from the switch
+ * ice_vsi_delete_from_hw - delete a VSI from the switch
* @vsi: pointer to VSI being removed
*/
-void ice_vsi_delete(struct ice_vsi *vsi)
+static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt;
@@ -348,47 +348,144 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
}
/**
- * ice_vsi_clear - clean up and deallocate the provided VSI
+ * ice_vsi_free_stats - Free the ring statistics structures
+ * @vsi: VSI pointer
+ */
+static void ice_vsi_free_stats(struct ice_vsi *vsi)
+{
+ struct ice_vsi_stats *vsi_stat;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ if (vsi->type == ICE_VSI_CHNL)
+ return;
+ if (!pf->vsi_stats)
+ return;
+
+ vsi_stat = pf->vsi_stats[vsi->idx];
+ if (!vsi_stat)
+ return;
+
+ ice_for_each_alloc_txq(vsi, i) {
+ if (vsi_stat->tx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
+ }
+ }
+
+ ice_for_each_alloc_rxq(vsi, i) {
+ if (vsi_stat->rx_ring_stats[i]) {
+ kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
+ WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
+ }
+ }
+
+ kfree(vsi_stat->tx_ring_stats);
+ kfree(vsi_stat->rx_ring_stats);
+ kfree(vsi_stat);
+ pf->vsi_stats[vsi->idx] = NULL;
+}
+
+/**
+ * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
+ * @vsi: VSI which is having stats allocated
+ */
+static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
+{
+ struct ice_ring_stats **tx_ring_stats;
+ struct ice_ring_stats **rx_ring_stats;
+ struct ice_vsi_stats *vsi_stats;
+ struct ice_pf *pf = vsi->back;
+ u16 i;
+
+ vsi_stats = pf->vsi_stats[vsi->idx];
+ tx_ring_stats = vsi_stats->tx_ring_stats;
+ rx_ring_stats = vsi_stats->rx_ring_stats;
+
+ /* Allocate Tx ring stats */
+ ice_for_each_alloc_txq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_tx_ring *ring;
+
+ ring = vsi->tx_rings[i];
+ ring_stats = tx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(tx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ /* Allocate Rx ring stats */
+ ice_for_each_alloc_rxq(vsi, i) {
+ struct ice_ring_stats *ring_stats;
+ struct ice_rx_ring *ring;
+
+ ring = vsi->rx_rings[i];
+ ring_stats = rx_ring_stats[i];
+
+ if (!ring_stats) {
+ ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
+ if (!ring_stats)
+ goto err_out;
+
+ WRITE_ONCE(rx_ring_stats[i], ring_stats);
+ }
+
+ ring->ring_stats = ring_stats;
+ }
+
+ return 0;
+
+err_out:
+ ice_vsi_free_stats(vsi);
+ return -ENOMEM;
+}
+
+/**
+ * ice_vsi_free - clean up and deallocate the provided VSI
* @vsi: pointer to VSI being cleared
*
* This deallocates the VSI's queue resources, removes it from the PF's
* VSI array if necessary, and deallocates the VSI
- *
- * Returns 0 on success, negative on failure
*/
-int ice_vsi_clear(struct ice_vsi *vsi)
+static void ice_vsi_free(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
- if (!vsi)
- return 0;
-
- if (!vsi->back)
- return -EINVAL;
+ if (!vsi || !vsi->back)
+ return;
pf = vsi->back;
dev = ice_pf_to_dev(pf);
if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
- return -EINVAL;
+ return;
}
mutex_lock(&pf->sw_mutex);
/* updates the PF for this cleared VSI */
pf->vsi[vsi->idx] = NULL;
- if (vsi->idx < pf->next_vsi && vsi->type != ICE_VSI_CTRL)
- pf->next_vsi = vsi->idx;
- if (vsi->idx < pf->next_vsi && vsi->type == ICE_VSI_CTRL && vsi->vf)
- pf->next_vsi = vsi->idx;
+ pf->next_vsi = vsi->idx;
+ ice_vsi_free_stats(vsi);
ice_vsi_free_arrays(vsi);
mutex_unlock(&pf->sw_mutex);
devm_kfree(dev, vsi);
+}
- return 0;
+void ice_vsi_delete(struct ice_vsi *vsi)
+{
+ ice_vsi_delete_from_hw(vsi);
+ ice_vsi_free(vsi);
}
/**
@@ -461,6 +558,10 @@ static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
if (!pf->vsi_stats)
return -ENOENT;
+ if (pf->vsi_stats[vsi->idx])
+ /* realloc will happen in rebuild path */
+ return 0;
+
vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
if (!vsi_stat)
return -ENOMEM;
@@ -491,128 +592,93 @@ err_alloc_tx:
}
/**
- * ice_vsi_alloc - Allocates the next available struct VSI in the PF
- * @pf: board private structure
- * @vsi_type: type of VSI
+ * ice_vsi_alloc_def - set default values for already allocated VSI
+ * @vsi: ptr to VSI
* @ch: ptr to channel
- * @vf: VF for ICE_VSI_VF and ICE_VSI_CTRL
- *
- * The VF pointer is used for ICE_VSI_VF and ICE_VSI_CTRL. For ICE_VSI_CTRL,
- * it may be NULL in the case there is no association with a VF. For
- * ICE_VSI_VF the VF pointer *must not* be NULL.
- *
- * returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *
-ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
- struct ice_channel *ch, struct ice_vf *vf)
+static int
+ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_vsi *vsi = NULL;
-
- if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
- return NULL;
-
- /* Need to protect the allocation of the VSIs at the PF level */
- mutex_lock(&pf->sw_mutex);
-
- /* If we have already allocated our maximum number of VSIs,
- * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
- * is available to be populated
- */
- if (pf->next_vsi == ICE_NO_VSI) {
- dev_dbg(dev, "out of VSI slots!\n");
- goto unlock_pf;
+ if (vsi->type != ICE_VSI_CHNL) {
+ ice_vsi_set_num_qs(vsi);
+ if (ice_vsi_alloc_arrays(vsi))
+ return -ENOMEM;
}
- vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
- if (!vsi)
- goto unlock_pf;
-
- vsi->type = vsi_type;
- vsi->back = pf;
- set_bit(ICE_VSI_DOWN, vsi->state);
-
- if (vsi_type == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vf);
- else if (vsi_type != ICE_VSI_CHNL)
- ice_vsi_set_num_qs(vsi, NULL);
-
switch (vsi->type) {
case ICE_VSI_SWITCHDEV_CTRL:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
-
/* Setup eswitch MSIX irq handler for VSI */
vsi->irq_handler = ice_eswitch_msix_clean_rings;
break;
case ICE_VSI_PF:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
-
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
case ICE_VSI_CTRL:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
-
/* Setup ctrl VSI MSIX irq handler */
vsi->irq_handler = ice_msix_clean_ctrl_vsi;
-
- /* For the PF control VSI this is NULL, for the VF control VSI
- * this will be the first VF to allocate it.
- */
- vsi->vf = vf;
- break;
- case ICE_VSI_VF:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
- vsi->vf = vf;
break;
case ICE_VSI_CHNL:
if (!ch)
- goto err_rings;
+ return -EINVAL;
+
vsi->num_rxq = ch->num_rxq;
vsi->num_txq = ch->num_txq;
vsi->next_base_q = ch->base_q;
break;
+ case ICE_VSI_VF:
case ICE_VSI_LB:
- if (ice_vsi_alloc_arrays(vsi))
- goto err_rings;
break;
default:
- dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
- goto unlock_pf;
+ ice_vsi_free_arrays(vsi);
+ return -EINVAL;
}
- if (vsi->type == ICE_VSI_CTRL && !vf) {
- /* Use the last VSI slot as the index for PF control VSI */
- vsi->idx = pf->num_alloc_vsi - 1;
- pf->ctrl_vsi_idx = vsi->idx;
- pf->vsi[vsi->idx] = vsi;
- } else {
- /* fill slot and make note of the index */
- vsi->idx = pf->next_vsi;
- pf->vsi[pf->next_vsi] = vsi;
+ return 0;
+}
+
+/**
+ * ice_vsi_alloc - Allocates the next available struct VSI in the PF
+ * @pf: board private structure
+ *
+ * Reserves a VSI index from the PF and allocates an empty VSI structure
+ * without a type. The VSI structure must later be initialized by calling
+ * ice_vsi_cfg().
+ *
+ * returns a pointer to a VSI on success, NULL on failure.
+ */
+static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi = NULL;
- /* prepare pf->next_vsi for next use */
- pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
- pf->next_vsi);
+ /* Need to protect the allocation of the VSIs at the PF level */
+ mutex_lock(&pf->sw_mutex);
+
+ /* If we have already allocated our maximum number of VSIs,
+ * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
+ * is available to be populated
+ */
+ if (pf->next_vsi == ICE_NO_VSI) {
+ dev_dbg(dev, "out of VSI slots!\n");
+ goto unlock_pf;
}
- if (vsi->type == ICE_VSI_CTRL && vf)
- vf->ctrl_vsi_idx = vsi->idx;
+ vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
+ if (!vsi)
+ goto unlock_pf;
- /* allocate memory for Tx/Rx ring stat pointers */
- if (ice_vsi_alloc_stat_arrays(vsi))
- goto err_rings;
+ vsi->back = pf;
+ set_bit(ICE_VSI_DOWN, vsi->state);
- goto unlock_pf;
+ /* fill slot and make note of the index */
+ vsi->idx = pf->next_vsi;
+ pf->vsi[pf->next_vsi] = vsi;
+
+ /* prepare pf->next_vsi for next use */
+ pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
+ pf->next_vsi);
-err_rings:
- devm_kfree(dev, vsi);
- vsi = NULL;
unlock_pf:
mutex_unlock(&pf->sw_mutex);
return vsi;
@@ -1177,12 +1243,15 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
- * @init_vsi: is this call creating a VSI
+ * @vsi_flags: VSI configuration flags
+ *
+ * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to
+ * reconfigure an existing context.
*
* This initializes a VSI context depending on the VSI type to be added and
* passes it down to the add_vsi aq command to create a new VSI.
*/
-static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
+static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
@@ -1244,7 +1313,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
/* if updating VSI context, make sure to set valid_section:
* to indicate which section of VSI context being updated
*/
- if (!init_vsi)
+ if (!(vsi_flags & ICE_VSI_FLAG_INIT))
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
}
@@ -1257,7 +1326,8 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
if (ret)
goto out;
- if (!init_vsi) /* means VSI being updated */
+ if (!(vsi_flags & ICE_VSI_FLAG_INIT))
+ /* means VSI being updated */
/* must to indicate which section of VSI context are
* being modified
*/
@@ -1272,7 +1342,7 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
}
- if (init_vsi) {
+ if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(dev, "Add VSI failed, err %d\n", ret);
@@ -1436,7 +1506,7 @@ static int ice_get_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
* ice_vsi_setup_vector_base - Set up the base vector for the given VSI
* @vsi: ptr to the VSI
*
- * This should only be called after ice_vsi_alloc() which allocates the
+ * This should only be called after ice_vsi_alloc_def() which allocates the
* corresponding SW VSI structure and initializes num_queue_pairs for the
* newly allocated VSI.
*
@@ -1584,106 +1654,6 @@ err_out:
}
/**
- * ice_vsi_free_stats - Free the ring statistics structures
- * @vsi: VSI pointer
- */
-static void ice_vsi_free_stats(struct ice_vsi *vsi)
-{
- struct ice_vsi_stats *vsi_stat;
- struct ice_pf *pf = vsi->back;
- int i;
-
- if (vsi->type == ICE_VSI_CHNL)
- return;
- if (!pf->vsi_stats)
- return;
-
- vsi_stat = pf->vsi_stats[vsi->idx];
- if (!vsi_stat)
- return;
-
- ice_for_each_alloc_txq(vsi, i) {
- if (vsi_stat->tx_ring_stats[i]) {
- kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
- WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
- }
- }
-
- ice_for_each_alloc_rxq(vsi, i) {
- if (vsi_stat->rx_ring_stats[i]) {
- kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
- WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
- }
- }
-
- kfree(vsi_stat->tx_ring_stats);
- kfree(vsi_stat->rx_ring_stats);
- kfree(vsi_stat);
- pf->vsi_stats[vsi->idx] = NULL;
-}
-
-/**
- * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
- * @vsi: VSI which is having stats allocated
- */
-static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
-{
- struct ice_ring_stats **tx_ring_stats;
- struct ice_ring_stats **rx_ring_stats;
- struct ice_vsi_stats *vsi_stats;
- struct ice_pf *pf = vsi->back;
- u16 i;
-
- vsi_stats = pf->vsi_stats[vsi->idx];
- tx_ring_stats = vsi_stats->tx_ring_stats;
- rx_ring_stats = vsi_stats->rx_ring_stats;
-
- /* Allocate Tx ring stats */
- ice_for_each_alloc_txq(vsi, i) {
- struct ice_ring_stats *ring_stats;
- struct ice_tx_ring *ring;
-
- ring = vsi->tx_rings[i];
- ring_stats = tx_ring_stats[i];
-
- if (!ring_stats) {
- ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
- if (!ring_stats)
- goto err_out;
-
- WRITE_ONCE(tx_ring_stats[i], ring_stats);
- }
-
- ring->ring_stats = ring_stats;
- }
-
- /* Allocate Rx ring stats */
- ice_for_each_alloc_rxq(vsi, i) {
- struct ice_ring_stats *ring_stats;
- struct ice_rx_ring *ring;
-
- ring = vsi->rx_rings[i];
- ring_stats = rx_ring_stats[i];
-
- if (!ring_stats) {
- ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
- if (!ring_stats)
- goto err_out;
-
- WRITE_ONCE(rx_ring_stats[i], ring_stats);
- }
-
- ring->ring_stats = ring_stats;
- }
-
- return 0;
-
-err_out:
- ice_vsi_free_stats(vsi);
- return -ENOMEM;
-}
-
-/**
* ice_vsi_manage_rss_lut - disable/enable RSS
* @vsi: the VSI being changed
* @ena: boolean value indicating if this is an enable or disable request
@@ -1992,8 +1962,8 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
{
if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_2048;
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
#if (PAGE_SIZE < 8192)
} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
(vsi->netdev->mtu <= ETH_DATA_LEN)) {
@@ -2002,11 +1972,7 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
#endif
} else {
vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
-#if (PAGE_SIZE < 8192)
vsi->rx_buf_len = ICE_RXBUF_3072;
-#else
- vsi->rx_buf_len = ICE_RXBUF_2048;
-#endif
}
}
@@ -2645,54 +2611,97 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
}
/**
- * ice_vsi_setup - Set up a VSI by a given type
- * @pf: board private structure
- * @pi: pointer to the port_info instance
- * @vsi_type: VSI type
- * @vf: pointer to VF to which this VSI connects. This field is used primarily
- * for the ICE_VSI_VF type. Other VSI types should pass NULL.
- * @ch: ptr to channel
- *
- * This allocates the sw VSI structure and its queue resources.
+ * ice_free_vf_ctrl_res - Free the VF control VSI resource
+ * @pf: pointer to PF structure
+ * @vsi: the VSI to free resources for
*
- * Returns pointer to the successfully allocated and configured VSI sw struct on
- * success, NULL on failure.
+ * Check if the VF control VSI resource is still in use. If no VF is using it
+ * any more, release the VSI resource. Otherwise, leave it to be cleaned up
+ * once no other VF uses it.
*/
-struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, struct ice_vf *vf,
- struct ice_channel *ch)
+static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
+{
+ struct ice_vf *vf;
+ unsigned int bkt;
+
+ rcu_read_lock();
+ ice_for_each_vf_rcu(pf, bkt, vf) {
+ if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+
+ /* No other VFs left that have control VSI. It is now safe to reclaim
+ * SW interrupts back to the common pool.
+ */
+ ice_free_res(pf->irq_tracker, vsi->base_vector,
+ ICE_RES_VF_CTRL_VEC_ID);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+}
+
+static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
- struct ice_vsi *vsi;
int ret, i;
- if (vsi_type == ICE_VSI_CHNL)
- vsi = ice_vsi_alloc(pf, vsi_type, ch, NULL);
- else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf);
- else
- vsi = ice_vsi_alloc(pf, vsi_type, NULL, NULL);
+ /* configure VSI nodes based on number of queues and TC's */
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i)))
+ continue;
- if (!vsi) {
- dev_err(dev, "could not allocate VSI\n");
- return NULL;
+ if (vsi->type == ICE_VSI_CHNL) {
+ if (!vsi->alloc_txq && vsi->num_txq)
+ max_txqs[i] = vsi->num_txq;
+ else
+ max_txqs[i] = pf->num_lan_tx;
+ } else {
+ max_txqs[i] = vsi->alloc_txq;
+ }
}
- vsi->port_info = pi;
+ dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
+ ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
+ max_txqs);
+ if (ret) {
+ dev_err(dev, "VSI %d failed lan queue config, error %d\n",
+ vsi->vsi_num, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vsi_cfg_def - configure default VSI based on the type
+ * @vsi: pointer to VSI
+ * @params: the parameters to configure this VSI with
+ */
+static int
+ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+{
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ struct ice_pf *pf = vsi->back;
+ int ret;
+
vsi->vsw = pf->first_sw;
- if (vsi->type == ICE_VSI_PF)
- vsi->ethtype = ETH_P_PAUSE;
+
+ ret = ice_vsi_alloc_def(vsi, params->ch);
+ if (ret)
+ return ret;
+
+ /* allocate memory for Tx/Rx ring stat pointers */
+ if (ice_vsi_alloc_stat_arrays(vsi))
+ goto unroll_vsi_alloc;
ice_alloc_fd_res(vsi);
- if (vsi_type != ICE_VSI_CHNL) {
- if (ice_vsi_get_qs(vsi)) {
- dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
- vsi->idx);
- goto unroll_vsi_alloc;
- }
+ if (ice_vsi_get_qs(vsi)) {
+ dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+ vsi->idx);
+ goto unroll_vsi_alloc_stat;
}
/* set RSS capabilities */
@@ -2702,7 +2711,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
- ret = ice_vsi_init(vsi, true);
+ ret = ice_vsi_init(vsi, params->flags);
if (ret)
goto unroll_get_qs;
@@ -2733,6 +2742,14 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
+ if (ice_is_xdp_ena_vsi(vsi)) {
+ ret = ice_vsi_determine_xdp_res(vsi);
+ if (ret)
+ goto unroll_vector_base;
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ if (ret)
+ goto unroll_vector_base;
+ }
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL)
@@ -2797,30 +2814,156 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_vsi_init;
}
- /* configure VSI nodes based on number of queues and TC's */
- ice_for_each_traffic_class(i) {
- if (!(vsi->tc_cfg.ena_tc & BIT(i)))
- continue;
+ return 0;
- if (vsi->type == ICE_VSI_CHNL) {
- if (!vsi->alloc_txq && vsi->num_txq)
- max_txqs[i] = vsi->num_txq;
- else
- max_txqs[i] = pf->num_lan_tx;
+unroll_vector_base:
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+unroll_alloc_q_vector:
+ ice_vsi_free_q_vectors(vsi);
+unroll_vsi_init:
+ ice_vsi_delete_from_hw(vsi);
+unroll_get_qs:
+ ice_vsi_put_qs(vsi);
+unroll_vsi_alloc_stat:
+ ice_vsi_free_stats(vsi);
+unroll_vsi_alloc:
+ ice_vsi_free_arrays(vsi);
+ return ret;
+}
+
+/**
+ * ice_vsi_cfg - configure a previously allocated VSI
+ * @vsi: pointer to VSI
+ * @params: parameters used to configure this VSI
+ */
+int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+{
+ struct ice_pf *pf = vsi->back;
+ int ret;
+
+ if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
+ return -EINVAL;
+
+ vsi->type = params->type;
+ vsi->port_info = params->pi;
+
+ /* For VSIs which don't have a connected VF, this will be NULL */
+ vsi->vf = params->vf;
+
+ ret = ice_vsi_cfg_def(vsi, params);
+ if (ret)
+ return ret;
+
+ ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
+ if (ret)
+ ice_vsi_decfg(vsi);
+
+ if (vsi->type == ICE_VSI_CTRL) {
+ if (vsi->vf) {
+ WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
+ vsi->vf->ctrl_vsi_idx = vsi->idx;
} else {
- max_txqs[i] = vsi->alloc_txq;
+ WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
+ pf->ctrl_vsi_idx = vsi->idx;
}
}
- dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
- if (ret) {
- dev_err(dev, "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, ret);
- goto unroll_clear_rings;
+ return ret;
+}
+
+/**
+ * ice_vsi_decfg - remove all VSI configuration
+ * @vsi: pointer to VSI
+ */
+void ice_vsi_decfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ /* The Rx rule will only exist to remove if the LLDP FW
+ * engine is currently stopped
+ */
+ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
+ !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, false);
+
+ ice_fltr_remove_all(vsi);
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (err)
+ dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, err);
+
+ if (ice_is_xdp_ena_vsi(vsi))
+ /* return value check can be skipped here, it always returns
+ * 0 if reset is in progress
+ */
+ ice_destroy_xdp_rings(vsi);
+
+ ice_vsi_clear_rings(vsi);
+ ice_vsi_free_q_vectors(vsi);
+ ice_vsi_put_qs(vsi);
+ ice_vsi_free_arrays(vsi);
+
+ /* SR-IOV determines needed MSIX resources all at once instead of per
+ * VSI since when VFs are spawned we know how many VFs there are and how
+ * many interrupts each VF needs. SR-IOV MSIX resources are also
+ * cleared in the same manner.
+ */
+ if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ ice_free_vf_ctrl_res(pf, vsi);
+ } else if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ vsi->base_vector = 0;
}
+ if (vsi->type == ICE_VSI_VF &&
+ vsi->agg_node && vsi->agg_node->valid)
+ vsi->agg_node->num_vsis--;
+ if (vsi->agg_node) {
+ vsi->agg_node->valid = false;
+ vsi->agg_node->agg_id = 0;
+ }
+}
+
+/**
+ * ice_vsi_setup - Set up a VSI by a given type
+ * @pf: board private structure
+ * @params: parameters to use when creating the VSI
+ *
+ * This allocates the sw VSI structure and its queue resources.
+ *
+ * Returns pointer to the successfully allocated and configured VSI sw struct on
+ * success, NULL on failure.
+ */
+struct ice_vsi *
+ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi;
+ int ret;
+
+ /* ice_vsi_setup can only initialize a new VSI, and we must have
+ * a port_info structure for it.
+ */
+ if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
+ WARN_ON(!params->pi))
+ return NULL;
+
+ vsi = ice_vsi_alloc(pf);
+ if (!vsi) {
+ dev_err(dev, "could not allocate VSI\n");
+ return NULL;
+ }
+
+ ret = ice_vsi_cfg(vsi, params);
+ if (ret)
+ goto err_vsi_cfg;
+
/* Add switch rule to drop all Tx Flow Control Frames, of look up
* type ETHERTYPE from VSIs, and restrict malicious VF from sending
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
@@ -2830,34 +2973,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* be dropped so that VFs cannot send LLDP packets to reconfig DCB
* settings in the HW.
*/
- if (!ice_is_safe_mode(pf))
- if (vsi->type == ICE_VSI_PF) {
- ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
- ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, true);
- }
+ if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
+ ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
+ ICE_DROP_PACKET);
+ ice_cfg_sw_lldp(vsi, true, true);
+ }
if (!vsi->agg_node)
ice_set_agg_vsi(vsi);
+
return vsi;
-unroll_clear_rings:
- ice_vsi_clear_rings(vsi);
-unroll_vector_base:
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
-unroll_alloc_q_vector:
- ice_vsi_free_q_vectors(vsi);
-unroll_vsi_init:
- ice_vsi_free_stats(vsi);
- ice_vsi_delete(vsi);
-unroll_get_qs:
- ice_vsi_put_qs(vsi);
-unroll_vsi_alloc:
- if (vsi_type == ICE_VSI_VF)
+err_vsi_cfg:
+ if (params->type == ICE_VSI_VF)
ice_enable_lag(pf->lag);
- ice_vsi_clear(vsi);
+ ice_vsi_free(vsi);
return NULL;
}
@@ -3121,37 +3251,6 @@ void ice_napi_del(struct ice_vsi *vsi)
}
/**
- * ice_free_vf_ctrl_res - Free the VF control VSI resource
- * @pf: pointer to PF structure
- * @vsi: the VSI to free resources for
- *
- * Check if the VF control VSI resource is still in use. If no VF is using it
- * any more, release the VSI resource. Otherwise, leave it to be cleaned up
- * once no other VF uses it.
- */
-static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
-{
- struct ice_vf *vf;
- unsigned int bkt;
-
- rcu_read_lock();
- ice_for_each_vf_rcu(pf, bkt, vf) {
- if (vf != vsi->vf && vf->ctrl_vsi_idx != ICE_NO_VSI) {
- rcu_read_unlock();
- return;
- }
- }
- rcu_read_unlock();
-
- /* No other VFs left that have control VSI. It is now safe to reclaim
- * SW interrupts back to the common pool.
- */
- ice_free_res(pf->irq_tracker, vsi->base_vector,
- ICE_RES_VF_CTRL_VEC_ID);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
-}
-
-/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -3160,7 +3259,6 @@ static void ice_free_vf_ctrl_res(struct ice_pf *pf, struct ice_vsi *vsi)
int ice_vsi_release(struct ice_vsi *vsi)
{
struct ice_pf *pf;
- int err;
if (!vsi->back)
return -ENODEV;
@@ -3178,50 +3276,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
+ if (vsi->type == ICE_VSI_PF)
+ ice_devlink_destroy_pf_port(pf);
+
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
- /* Disable VSI and free resources */
- if (vsi->type != ICE_VSI_LB)
- ice_vsi_dis_irq(vsi);
ice_vsi_close(vsi);
-
- /* SR-IOV determines needed MSIX resources all at once instead of per
- * VSI since when VFs are spawned we know how many VFs there are and how
- * many interrupts each VF needs. SR-IOV MSIX resources are also
- * cleared in the same manner.
- */
- if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
- ice_free_vf_ctrl_res(pf, vsi);
- } else if (vsi->type != ICE_VSI_VF) {
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- }
-
- if (!ice_is_safe_mode(pf)) {
- if (vsi->type == ICE_VSI_PF) {
- ice_fltr_remove_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
- ICE_DROP_PACKET);
- ice_cfg_sw_lldp(vsi, true, false);
- /* The Rx rule will only exist to remove if the LLDP FW
- * engine is currently stopped
- */
- if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
- }
- }
-
- if (ice_is_vsi_dflt_vsi(vsi))
- ice_clear_dflt_vsi(vsi);
- ice_fltr_remove_all(vsi);
- ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
- err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
- if (err)
- dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
- vsi->vsi_num, err);
- ice_vsi_delete(vsi);
- ice_vsi_free_q_vectors(vsi);
+ ice_vsi_decfg(vsi);
if (vsi->netdev) {
if (test_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state)) {
@@ -3235,19 +3297,12 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
- if (vsi->type == ICE_VSI_VF &&
- vsi->agg_node && vsi->agg_node->valid)
- vsi->agg_node->num_vsis--;
- ice_vsi_clear_rings(vsi);
- ice_vsi_free_stats(vsi);
- ice_vsi_put_qs(vsi);
-
/* retain SW VSI data structure since it is needed to unregister and
* free VSI netdev when PF is not in reset recovery pending state,\
* for ex: during rmmod.
*/
if (!ice_is_reset_in_progress(pf->state))
- ice_vsi_clear(vsi);
+ ice_vsi_delete(vsi);
return 0;
}
@@ -3372,7 +3427,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
* @prev_txq: Number of Tx rings before ring reallocation
* @prev_rxq: Number of Rx rings before ring reallocation
*/
-static int
+static void
ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
{
struct ice_vsi_stats *vsi_stat;
@@ -3380,9 +3435,9 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
int i;
if (!prev_txq || !prev_rxq)
- return 0;
+ return;
if (vsi->type == ICE_VSI_CHNL)
- return 0;
+ return;
vsi_stat = pf->vsi_stats[vsi->idx];
@@ -3403,36 +3458,36 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
}
}
}
-
- return 0;
}
/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
- * @init_vsi: is this an initialization or a reconfigure of the VSI
+ * @vsi_flags: flags used for VSI rebuild flow
+ *
+ * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
+ * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
*
* Returns 0 on success and negative value on failure
*/
-int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
+int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
- u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
- int ret, i, prev_txq, prev_rxq;
+ int ret, prev_txq, prev_rxq;
int prev_num_q_vectors = 0;
- enum ice_vsi_type vtype;
struct ice_pf *pf;
if (!vsi)
return -EINVAL;
+ params = ice_vsi_to_params(vsi);
+ params.flags = vsi_flags;
+
pf = vsi->back;
- vtype = vsi->type;
- if (WARN_ON(vtype == ICE_VSI_VF && !vsi->vf))
+ if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
- ice_vsi_init_vlan_ops(vsi);
-
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
if (!coalesce)
@@ -3443,188 +3498,32 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
prev_txq = vsi->num_txq;
prev_rxq = vsi->num_rxq;
- ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
- ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ ice_vsi_decfg(vsi);
+ ret = ice_vsi_cfg_def(vsi, &params);
if (ret)
- dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
- vsi->vsi_num, ret);
- ice_vsi_free_q_vectors(vsi);
-
- /* SR-IOV determines needed MSIX resources all at once instead of per
- * VSI since when VFs are spawned we know how many VFs there are and how
- * many interrupts each VF needs. SR-IOV MSIX resources are also
- * cleared in the same manner.
- */
- if (vtype != ICE_VSI_VF) {
- /* reclaim SW interrupts back to the common pool */
- ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
- pf->num_avail_sw_msix += vsi->num_q_vectors;
- vsi->base_vector = 0;
- }
-
- if (ice_is_xdp_ena_vsi(vsi))
- /* return value check can be skipped here, it always returns
- * 0 if reset is in progress
- */
- ice_destroy_xdp_rings(vsi);
- ice_vsi_put_qs(vsi);
- ice_vsi_clear_rings(vsi);
- ice_vsi_free_arrays(vsi);
- if (vtype == ICE_VSI_VF)
- ice_vsi_set_num_qs(vsi, vsi->vf);
- else
- ice_vsi_set_num_qs(vsi, NULL);
-
- ret = ice_vsi_alloc_arrays(vsi);
- if (ret < 0)
- goto err_vsi;
-
- ice_vsi_get_qs(vsi);
-
- ice_alloc_fd_res(vsi);
- ice_vsi_set_tc_cfg(vsi);
-
- /* Initialize VSI struct elements and create VSI in FW */
- ret = ice_vsi_init(vsi, init_vsi);
- if (ret < 0)
- goto err_vsi;
-
- switch (vtype) {
- case ICE_VSI_CTRL:
- case ICE_VSI_SWITCHDEV_CTRL:
- case ICE_VSI_PF:
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_setup_vector_base(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_set_q_vectors_reg_idx(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_ring_stats(vsi);
- if (ret)
- goto err_vectors;
-
- ice_vsi_map_rings_to_vectors(vsi);
-
- vsi->stat_offsets_loaded = false;
- if (ice_is_xdp_ena_vsi(vsi)) {
- ret = ice_vsi_determine_xdp_res(vsi);
- if (ret)
- goto err_vectors;
- ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
- if (ret)
- goto err_vectors;
- }
- /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
- if (vtype != ICE_VSI_CTRL)
- /* Do not exit if configuring RSS had an issue, at
- * least receive traffic on first queue. Hence no
- * need to capture return value
- */
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
- ice_vsi_cfg_rss_lut_key(vsi);
-
- /* disable or enable CRC stripping */
- if (vsi->netdev)
- ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features &
- NETIF_F_RXFCS));
-
- break;
- case ICE_VSI_VF:
- ret = ice_vsi_alloc_q_vectors(vsi);
- if (ret)
- goto err_rings;
-
- ret = ice_vsi_set_q_vectors_reg_idx(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_rings(vsi);
- if (ret)
- goto err_vectors;
-
- ret = ice_vsi_alloc_ring_stats(vsi);
- if (ret)
- goto err_vectors;
-
- vsi->stat_offsets_loaded = false;
- break;
- case ICE_VSI_CHNL:
- if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
- ice_vsi_cfg_rss_lut_key(vsi);
- ice_vsi_set_rss_flow_fld(vsi);
- }
- break;
- default:
- break;
- }
-
- /* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++) {
- /* configure VSI nodes based on number of queues and TC's.
- * ADQ creates VSIs for each TC/Channel but doesn't
- * allocate queues instead it reconfigures the PF queues
- * as per the TC command. So max_txqs should point to the
- * PF Tx queues.
- */
- if (vtype == ICE_VSI_CHNL)
- max_txqs[i] = pf->num_lan_tx;
- else
- max_txqs[i] = vsi->alloc_txq;
-
- if (ice_is_xdp_ena_vsi(vsi))
- max_txqs[i] += vsi->num_xdp_txq;
- }
-
- if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
- /* If MQPRIO is set, means channel code path, hence for main
- * VSI's, use TC as 1
- */
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
- else
- ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
- vsi->tc_cfg.ena_tc, max_txqs);
+ goto err_vsi_cfg;
+ ret = ice_vsi_cfg_tc_lan(pf, vsi);
if (ret) {
- dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
- vsi->vsi_num, ret);
- if (init_vsi) {
+ if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = -EIO;
- goto err_vectors;
+ goto err_vsi_cfg_tc_lan;
} else {
+ kfree(coalesce);
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
}
- if (ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq))
- goto err_vectors;
+ ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce);
return 0;
-err_vectors:
- ice_vsi_free_q_vectors(vsi);
-err_rings:
- if (vsi->netdev) {
- vsi->current_netdev_flags = 0;
- unregister_netdev(vsi->netdev);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
-err_vsi:
- ice_vsi_clear(vsi);
- set_bit(ICE_RESET_FAILED, pf->state);
+err_vsi_cfg_tc_lan:
+ ice_vsi_decfg(vsi);
+err_vsi_cfg:
kfree(coalesce);
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index dcdf69a693e9..75221478f2dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -7,6 +7,47 @@
#include "ice.h"
#include "ice_vlan.h"
+/* Flags used for VSI configuration and rebuild */
+#define ICE_VSI_FLAG_INIT BIT(0)
+#define ICE_VSI_FLAG_NO_INIT 0
+
+/**
+ * struct ice_vsi_cfg_params - VSI configuration parameters
+ * @pi: pointer to the port_info instance for the VSI
+ * @ch: pointer to the channel structure for the VSI, may be NULL
+ * @vf: pointer to the VF associated with this VSI, may be NULL
+ * @type: the type of VSI to configure
+ * @flags: VSI flags used for rebuild and configuration
+ *
+ * Parameter structure used when configuring a new VSI.
+ */
+struct ice_vsi_cfg_params {
+ struct ice_port_info *pi;
+ struct ice_channel *ch;
+ struct ice_vf *vf;
+ enum ice_vsi_type type;
+ u32 flags;
+};
+
+/**
+ * ice_vsi_to_params - Get parameters for an existing VSI
+ * @vsi: the VSI to get parameters for
+ *
+ * Fill a parameter structure for reconfiguring a VSI with its current
+ * parameters, such as during a rebuild operation.
+ */
+static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi)
+{
+ struct ice_vsi_cfg_params params = {};
+
+ params.pi = vsi->port_info;
+ params.ch = vsi->ch;
+ params.vf = vsi->vf;
+ params.type = vsi->type;
+
+ return params;
+}
+
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
bool ice_pf_state_is_nominal(struct ice_pf *pf);
@@ -42,7 +83,6 @@ void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
void ice_vsi_delete(struct ice_vsi *vsi);
-int ice_vsi_clear(struct ice_vsi *vsi);
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
@@ -51,9 +91,7 @@ int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi);
void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
-ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, struct ice_vf *vf,
- struct ice_channel *ch);
+ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
void ice_napi_del(struct ice_vsi *vsi);
@@ -63,6 +101,7 @@ void ice_vsi_close(struct ice_vsi *vsi);
int ice_ena_vsi(struct ice_vsi *vsi, bool locked);
+void ice_vsi_decfg(struct ice_vsi *vsi);
void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
@@ -70,7 +109,8 @@ int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
int
ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id);
-int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi);
+int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
+int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 237ede2cffb0..567694bf098b 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -22,6 +22,7 @@
#include "ice_eswitch.h"
#include "ice_tc_lib.h"
#include "ice_vsi_vlan_ops.h"
+#include <net/xdp_sock_drv.h>
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -44,7 +45,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all), hw debug_mask (0x8XXXX
MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
-static DEFINE_IDA(ice_aux_ida);
DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
EXPORT_SYMBOL(ice_xdp_locking_key);
@@ -275,6 +275,8 @@ static int ice_set_promisc(struct ice_vsi *vsi, u8 promisc_m)
if (status && status != -EEXIST)
return status;
+ netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n",
+ vsi->vsi_num, promisc_m);
return 0;
}
@@ -300,6 +302,8 @@ static int ice_clear_promisc(struct ice_vsi *vsi, u8 promisc_m)
promisc_m, 0);
}
+ netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n",
+ vsi->vsi_num, promisc_m);
return status;
}
@@ -414,6 +418,16 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
err = 0;
vlan_ops->dis_rx_filtering(vsi);
+
+ /* promiscuous mode implies allmulticast so
+ * that VSIs that are in promiscuous mode are
+ * subscribed to multicast packets coming to
+ * the port
+ */
+ err = ice_set_promisc(vsi,
+ ICE_MCAST_PROMISC_BITS);
+ if (err)
+ goto out_promisc;
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -430,6 +444,18 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
NETIF_F_HW_VLAN_CTAG_FILTER)
vlan_ops->ena_rx_filtering(vsi);
}
+
+ /* disable allmulti here, but only if allmulti is not
+ * still enabled for the netdev
+ */
+ if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ err = ice_clear_promisc(vsi,
+ ICE_MCAST_PROMISC_BITS);
+ if (err) {
+ netdev_err(netdev, "Error %d clearing multicast promiscuous on VSI %i\n",
+ err, vsi->vsi_num);
+ }
+ }
}
}
goto exit;
@@ -538,7 +564,7 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
/* Disable VFs until reset is completed */
mutex_lock(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf)
- ice_set_vf_state_qs_dis(vf);
+ ice_set_vf_state_dis(vf);
mutex_unlock(&pf->vfs.table_lock);
if (ice_is_eswitch_mode_switchdev(pf)) {
@@ -2570,8 +2596,6 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->netdev = NULL;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
- xdp_ring->next_dd = ICE_RING_QUARTER(xdp_ring) - 1;
- xdp_ring->next_rs = ICE_RING_QUARTER(xdp_ring) - 1;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
@@ -2863,6 +2887,18 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
}
/**
+ * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @vsi: Pointer to VSI structure
+ */
+static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
+{
+ if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
+ return ICE_RXBUF_1664;
+ else
+ return ICE_RXBUF_3072;
+}
+
+/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @vsi: VSI to setup XDP for
* @prog: XDP program
@@ -2872,13 +2908,16 @@ static int
ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
- int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
+ unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD;
bool if_running = netif_running(vsi->netdev);
int ret = 0, xdp_ring_err = 0;
- if (frame_size > vsi->rx_buf_len) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large for loading XDP");
- return -EOPNOTSUPP;
+ if (prog && !prog->aux->xdp_has_frags) {
+ if (frame_size > ice_max_xdp_frame_size(vsi)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MTU is too large for linear frames and XDP prog does not support frags");
+ return -EOPNOTSUPP;
+ }
}
/* need to stop netdev while setting up the program for Rx rings */
@@ -2899,11 +2938,13 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
+ xdp_features_set_redirect_target(vsi->netdev, true);
/* reallocate Rx queues that are used for zero-copy */
xdp_ring_err = ice_realloc_zc_buf(vsi, true);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
+ xdp_features_clear_redirect_target(vsi->netdev);
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
@@ -3318,10 +3359,11 @@ static void ice_napi_add(struct ice_vsi *vsi)
/**
* ice_set_ops - set netdev and ethtools ops for the given netdev
- * @netdev: netdev instance
+ * @vsi: the VSI associated with the new netdev
*/
-static void ice_set_ops(struct net_device *netdev)
+static void ice_set_ops(struct ice_vsi *vsi)
{
+ struct net_device *netdev = vsi->netdev;
struct ice_pf *pf = ice_netdev_to_pf(netdev);
if (ice_is_safe_mode(pf)) {
@@ -3333,6 +3375,13 @@ static void ice_set_ops(struct net_device *netdev)
netdev->netdev_ops = &ice_netdev_ops;
netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
ice_set_ethtool_ops(netdev);
+
+ if (vsi->type != ICE_VSI_PF)
+ return;
+
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
}
/**
@@ -3421,53 +3470,8 @@ static void ice_set_netdev_features(struct net_device *netdev)
* be changed at runtime
*/
netdev->hw_features |= NETIF_F_RXFCS;
-}
-
-/**
- * ice_cfg_netdev - Allocate, configure and register a netdev
- * @vsi: the VSI associated with the new netdev
- *
- * Returns 0 on success, negative value on failure
- */
-static int ice_cfg_netdev(struct ice_vsi *vsi)
-{
- struct ice_netdev_priv *np;
- struct net_device *netdev;
- u8 mac_addr[ETH_ALEN];
-
- netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
- vsi->alloc_rxq);
- if (!netdev)
- return -ENOMEM;
- set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- vsi->netdev = netdev;
- np = netdev_priv(netdev);
- np->vsi = vsi;
-
- ice_set_netdev_features(netdev);
-
- ice_set_ops(netdev);
-
- if (vsi->type == ICE_VSI_PF) {
- SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
- ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
- eth_hw_addr_set(netdev, mac_addr);
- ether_addr_copy(netdev->perm_addr, mac_addr);
- }
-
- netdev->priv_flags |= IFF_UNICAST_FLT;
-
- /* Setup netdev TC information */
- ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
-
- /* setup watchdog timeout value to be 5 second */
- netdev->watchdog_timeo = 5 * HZ;
-
- netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = ICE_MAX_MTU;
-
- return 0;
+ netif_set_tso_max_size(netdev, ICE_MAX_TSO_SIZE);
}
/**
@@ -3495,14 +3499,27 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_PF;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
static struct ice_vsi *
ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_channel *ch)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, NULL, ch);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_CHNL;
+ params.pi = pi;
+ params.ch = ch;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -3516,7 +3533,13 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_CTRL;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -3530,7 +3553,13 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, NULL, NULL);
+ struct ice_vsi_cfg_params params = {};
+
+ params.type = ICE_VSI_LB;
+ params.pi = pi;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ return ice_vsi_setup(pf, &params);
}
/**
@@ -3690,20 +3719,6 @@ static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
}
/**
- * ice_tc_indir_block_remove - clean indirect TC block notifications
- * @pf: PF structure
- */
-static void ice_tc_indir_block_remove(struct ice_pf *pf)
-{
- struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
-
- if (!pf_vsi)
- return;
-
- ice_tc_indir_block_unregister(pf_vsi);
-}
-
-/**
* ice_tc_indir_block_register - Register TC indirect block notifications
* @vsi: VSI struct which has the netdev
*
@@ -3723,78 +3738,6 @@ static int ice_tc_indir_block_register(struct ice_vsi *vsi)
}
/**
- * ice_setup_pf_sw - Setup the HW switch on startup or after reset
- * @pf: board private structure
- *
- * Returns 0 on success, negative value on failure
- */
-static int ice_setup_pf_sw(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- bool dvm = ice_is_dvm_ena(&pf->hw);
- struct ice_vsi *vsi;
- int status;
-
- if (ice_is_reset_in_progress(pf->state))
- return -EBUSY;
-
- status = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
- if (status)
- return -EIO;
-
- vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
- if (!vsi)
- return -ENOMEM;
-
- /* init channel list */
- INIT_LIST_HEAD(&vsi->ch_list);
-
- status = ice_cfg_netdev(vsi);
- if (status)
- goto unroll_vsi_setup;
- /* netdev has to be configured before setting frame size */
- ice_vsi_cfg_frame_size(vsi);
-
- /* init indirect block notifications */
- status = ice_tc_indir_block_register(vsi);
- if (status) {
- dev_err(dev, "Failed to register netdev notifier\n");
- goto unroll_cfg_netdev;
- }
-
- /* Setup DCB netlink interface */
- ice_dcbnl_setup(vsi);
-
- /* registering the NAPI handler requires both the queues and
- * netdev to be created, which are done in ice_pf_vsi_setup()
- * and ice_cfg_netdev() respectively
- */
- ice_napi_add(vsi);
-
- status = ice_init_mac_fltr(pf);
- if (status)
- goto unroll_napi_add;
-
- return 0;
-
-unroll_napi_add:
- ice_tc_indir_block_unregister(vsi);
-unroll_cfg_netdev:
- if (vsi) {
- ice_napi_del(vsi);
- if (vsi->netdev) {
- clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- }
- }
-
-unroll_vsi_setup:
- ice_vsi_release(vsi);
- return status;
-}
-
-/**
* ice_get_avail_q_count - Get count of queues in use
* @pf_qmap: bitmap to get queue use count from
* @lock: pointer to a mutex that protects access to pf_qmap
@@ -4195,12 +4138,13 @@ bool ice_is_wol_supported(struct ice_hw *hw)
* @vsi: VSI being changed
* @new_rx: new number of Rx queues
* @new_tx: new number of Tx queues
+ * @locked: is adev device_lock held
*
* Only change the number of queues if new_tx, or new_rx is non-0.
*
* Returns 0 on success.
*/
-int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
+int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
{
struct ice_pf *pf = vsi->back;
int err = 0, timeout = 50;
@@ -4222,14 +4166,14 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- ice_vsi_rebuild(vsi, false);
+ ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
goto done;
}
ice_vsi_close(vsi);
- ice_vsi_rebuild(vsi, false);
- ice_pf_dcb_recfg(pf);
+ ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
done:
clear_bit(ICE_CFG_BUSY, pf->state);
@@ -4491,6 +4435,23 @@ err_vsi_open:
return err;
}
+static void ice_deinit_fdir(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_ctrl_vsi(pf);
+
+ if (!vsi)
+ return;
+
+ ice_vsi_manage_fdir(vsi, false);
+ ice_vsi_release(vsi);
+ if (pf->ctrl_vsi_idx != ICE_NO_VSI) {
+ pf->vsi[pf->ctrl_vsi_idx] = NULL;
+ pf->ctrl_vsi_idx = ICE_NO_VSI;
+ }
+
+ mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
+}
+
/**
* ice_get_opt_fw_name - return optional firmware file name or NULL
* @pf: pointer to the PF instance
@@ -4591,116 +4552,171 @@ static void ice_print_wake_reason(struct ice_pf *pf)
/**
* ice_register_netdev - register netdev
- * @pf: pointer to the PF struct
+ * @vsi: pointer to the VSI struct
*/
-static int ice_register_netdev(struct ice_pf *pf)
+static int ice_register_netdev(struct ice_vsi *vsi)
{
- struct ice_vsi *vsi;
- int err = 0;
+ int err;
- vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->netdev)
return -EIO;
err = register_netdev(vsi->netdev);
if (err)
- goto err_register_netdev;
+ return err;
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
return 0;
-err_register_netdev:
- free_netdev(vsi->netdev);
- vsi->netdev = NULL;
- clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
- return err;
+}
+
+static void ice_unregister_netdev(struct ice_vsi *vsi)
+{
+ if (!vsi || !vsi->netdev)
+ return;
+
+ unregister_netdev(vsi->netdev);
+ clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
/**
- * ice_probe - Device initialization routine
- * @pdev: PCI device information struct
- * @ent: entry in ice_pci_tbl
+ * ice_cfg_netdev - Allocate, configure and register a netdev
+ * @vsi: the VSI associated with the new netdev
*
- * Returns 0 on success, negative on failure
+ * Returns 0 on success, negative value on failure
*/
-static int
-ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+static int ice_cfg_netdev(struct ice_vsi *vsi)
{
- struct device *dev = &pdev->dev;
- struct ice_vsi *vsi;
- struct ice_pf *pf;
- struct ice_hw *hw;
- int i, err;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ u8 mac_addr[ETH_ALEN];
- if (pdev->is_virtfn) {
- dev_err(dev, "can't probe a virtual function\n");
- return -EINVAL;
+ netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
+ vsi->alloc_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ ice_set_netdev_features(netdev);
+ ice_set_ops(vsi);
+
+ if (vsi->type == ICE_VSI_PF) {
+ SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
+ ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
+ eth_hw_addr_set(netdev, mac_addr);
}
- /* this driver uses devres, see
- * Documentation/driver-api/driver-model/devres.rst
- */
- err = pcim_enable_device(pdev);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Setup netdev TC information */
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
+
+ netdev->max_mtu = ICE_MAX_MTU;
+
+ return 0;
+}
+
+static void ice_decfg_netdev(struct ice_vsi *vsi)
+{
+ clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
+ free_netdev(vsi->netdev);
+ vsi->netdev = NULL;
+}
+
+static int ice_start_eth(struct ice_vsi *vsi)
+{
+ int err;
+
+ err = ice_init_mac_fltr(vsi->back);
if (err)
return err;
- err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
- if (err) {
- dev_err(dev, "BAR0 I/O map error %d\n", err);
- return err;
- }
+ rtnl_lock();
+ err = ice_vsi_open(vsi);
+ rtnl_unlock();
- pf = ice_allocate_pf(dev);
- if (!pf)
- return -ENOMEM;
+ return err;
+}
- /* initialize Auxiliary index to invalid value */
- pf->aux_idx = -1;
+static int ice_init_eth(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ int err;
- /* set up for high or low DMA */
- err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(dev, "DMA configuration failed: 0x%x\n", err);
+ if (!vsi)
+ return -EINVAL;
+
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
+ err = ice_cfg_netdev(vsi);
+ if (err)
return err;
- }
+ /* Setup DCB netlink interface */
+ ice_dcbnl_setup(vsi);
- pci_enable_pcie_error_reporting(pdev);
- pci_set_master(pdev);
+ err = ice_init_mac_fltr(pf);
+ if (err)
+ goto err_init_mac_fltr;
- pf->pdev = pdev;
- pci_set_drvdata(pdev, pf);
- set_bit(ICE_DOWN, pf->state);
- /* Disable service task until DOWN bit is cleared */
- set_bit(ICE_SERVICE_DIS, pf->state);
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_devlink_create_pf_port;
- hw = &pf->hw;
- hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
- pci_save_state(pdev);
+ SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
- hw->back = pf;
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_device_id = pdev->subsystem_device;
- hw->bus.device = PCI_SLOT(pdev->devfn);
- hw->bus.func = PCI_FUNC(pdev->devfn);
- ice_set_ctrlq_len(hw);
+ err = ice_register_netdev(vsi);
+ if (err)
+ goto err_register_netdev;
- pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+ err = ice_tc_indir_block_register(vsi);
+ if (err)
+ goto err_tc_indir_block_register;
-#ifndef CONFIG_DYNAMIC_DEBUG
- if (debug < -1)
- hw->debug_mask = debug;
-#endif
+ ice_napi_add(vsi);
+
+ return 0;
+
+err_tc_indir_block_register:
+ ice_unregister_netdev(vsi);
+err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create_pf_port:
+err_init_mac_fltr:
+ ice_decfg_netdev(vsi);
+ return err;
+}
+
+static void ice_deinit_eth(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+
+ if (!vsi)
+ return;
+
+ ice_vsi_close(vsi);
+ ice_unregister_netdev(vsi);
+ ice_devlink_destroy_pf_port(pf);
+ ice_tc_indir_block_unregister(vsi);
+ ice_decfg_netdev(vsi);
+}
+
+static int ice_init_dev(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_hw *hw = &pf->hw;
+ int err;
err = ice_init_hw(hw);
if (err) {
dev_err(dev, "ice_init_hw failed: %d\n", err);
- err = -EIO;
- goto err_exit_unroll;
+ return err;
}
ice_init_feature_support(pf);
@@ -4723,62 +4739,31 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
- goto err_init_pf_unroll;
+ goto err_init_pf;
}
- ice_devlink_init_regions(pf);
-
pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
- i = 0;
if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
- pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.udp_tunnel_nic.tables[0].n_entries =
pf->hw.tnl.valid_count[TNL_VXLAN];
- pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ pf->hw.udp_tunnel_nic.tables[0].tunnel_types =
UDP_TUNNEL_TYPE_VXLAN;
- i++;
}
if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
- pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.udp_tunnel_nic.tables[1].n_entries =
pf->hw.tnl.valid_count[TNL_GENEVE];
- pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ pf->hw.udp_tunnel_nic.tables[1].tunnel_types =
UDP_TUNNEL_TYPE_GENEVE;
- i++;
- }
-
- pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
- if (!pf->num_alloc_vsi) {
- err = -EIO;
- goto err_init_pf_unroll;
- }
- if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
- dev_warn(&pf->pdev->dev,
- "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
- pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
- pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
- }
-
- pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
- GFP_KERNEL);
- if (!pf->vsi) {
- err = -ENOMEM;
- goto err_init_pf_unroll;
- }
-
- pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
- sizeof(*pf->vsi_stats), GFP_KERNEL);
- if (!pf->vsi_stats) {
- err = -ENOMEM;
- goto err_init_vsi_unroll;
}
err = ice_init_interrupt_scheme(pf);
if (err) {
dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
- goto err_init_vsi_stats_unroll;
+ goto err_init_interrupt_scheme;
}
/* In case of MSIX we are going to setup the misc vector right here
@@ -4789,49 +4774,94 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "setup of misc vector failed: %d\n", err);
- goto err_init_interrupt_unroll;
+ goto err_req_irq_msix_misc;
}
- /* create switch struct for the switch element created by FW on boot */
- pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
- if (!pf->first_sw) {
- err = -ENOMEM;
- goto err_msix_misc_unroll;
- }
+ return 0;
- if (hw->evb_veb)
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
- else
- pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
+err_req_irq_msix_misc:
+ ice_clear_interrupt_scheme(pf);
+err_init_interrupt_scheme:
+ ice_deinit_pf(pf);
+err_init_pf:
+ ice_deinit_hw(hw);
+ return err;
+}
- pf->first_sw->pf = pf;
+static void ice_deinit_dev(struct ice_pf *pf)
+{
+ ice_free_irq_msix_misc(pf);
+ ice_clear_interrupt_scheme(pf);
+ ice_deinit_pf(pf);
+ ice_deinit_hw(&pf->hw);
+}
- /* record the sw_id available for later use */
- pf->first_sw->sw_id = hw->port_info->sw_id;
+static void ice_init_features(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
- err = ice_setup_pf_sw(pf);
- if (err) {
- dev_err(dev, "probe failed due to setup PF switch: %d\n", err);
- goto err_alloc_sw_unroll;
- }
+ if (ice_is_safe_mode(pf))
+ return;
- clear_bit(ICE_SERVICE_DIS, pf->state);
+ /* initialize DDP driven features */
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_init(pf);
- /* tell the firmware we are up */
- err = ice_send_version(pf);
- if (err) {
- dev_err(dev, "probe failed sending driver version %s. error: %d\n",
- UTS_RELEASE, err);
- goto err_send_version_unroll;
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_init(pf);
+
+ /* Note: Flow director init failure is non-fatal to load */
+ if (ice_init_fdir(pf))
+ dev_err(dev, "could not initialize flow director\n");
+
+ /* Note: DCB init failure is non-fatal to load */
+ if (ice_init_pf_dcb(pf, false)) {
+ clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ } else {
+ ice_cfg_lldp_mib_change(&pf->hw, true);
}
- /* since everything is good, start the service timer */
- mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+ if (ice_init_lag(pf))
+ dev_warn(dev, "Failed to init link aggregation support\n");
+}
+
+static void ice_deinit_features(struct ice_pf *pf)
+{
+ ice_deinit_lag(pf);
+ if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
+ ice_cfg_lldp_mib_change(&pf->hw, false);
+ ice_deinit_fdir(pf);
+ if (ice_is_feature_supported(pf, ICE_F_GNSS))
+ ice_gnss_exit(pf);
+ if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
+ ice_ptp_release(pf);
+}
+
+static void ice_init_wakeup(struct ice_pf *pf)
+{
+ /* Save wakeup reason register for later use */
+ pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS);
+
+ /* check for a power management event */
+ ice_print_wake_reason(pf);
+
+ /* clear wake status, all bits */
+ wr32(&pf->hw, PFPM_WUS, U32_MAX);
+
+ /* Disable WoL at init, wait for user to enable */
+ device_set_wakeup_enable(ice_pf_to_dev(pf), false);
+}
+
+static int ice_init_link(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
- goto err_send_version_unroll;
+ return err;
}
/* not a fatal error if this fails */
@@ -4867,123 +4897,350 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
set_bit(ICE_FLAG_NO_MEDIA, pf->flags);
}
- ice_verify_cacheline_size(pf);
+ return err;
+}
- /* Save wakeup reason register for later use */
- pf->wakeup_reason = rd32(hw, PFPM_WUS);
+static int ice_init_pf_sw(struct ice_pf *pf)
+{
+ bool dvm = ice_is_dvm_ena(&pf->hw);
+ struct ice_vsi *vsi;
+ int err;
- /* check for a power management event */
- ice_print_wake_reason(pf);
+ /* create switch struct for the switch element created by FW on boot */
+ pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL);
+ if (!pf->first_sw)
+ return -ENOMEM;
- /* clear wake status, all bits */
- wr32(hw, PFPM_WUS, U32_MAX);
+ if (pf->hw.evb_veb)
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEB;
+ else
+ pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA;
- /* Disable WoL at init, wait for user to enable */
- device_set_wakeup_enable(dev, false);
+ pf->first_sw->pf = pf;
- if (ice_is_safe_mode(pf)) {
- ice_set_safe_mode_vlan_cfg(pf);
- goto probe_done;
+ /* record the sw_id available for later use */
+ pf->first_sw->sw_id = pf->hw.port_info->sw_id;
+
+ err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL);
+ if (err)
+ goto err_aq_set_port_params;
+
+ vsi = ice_pf_vsi_setup(pf, pf->hw.port_info);
+ if (!vsi) {
+ err = -ENOMEM;
+ goto err_pf_vsi_setup;
}
- /* initialize DDP driven features */
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_init(pf);
+ return 0;
- if (ice_is_feature_supported(pf, ICE_F_GNSS))
- ice_gnss_init(pf);
+err_pf_vsi_setup:
+err_aq_set_port_params:
+ kfree(pf->first_sw);
+ return err;
+}
- /* Note: Flow director init failure is non-fatal to load */
- if (ice_init_fdir(pf))
- dev_err(dev, "could not initialize flow director\n");
+static void ice_deinit_pf_sw(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
- /* Note: DCB init failure is non-fatal to load */
- if (ice_init_pf_dcb(pf, false)) {
- clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
- clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
- } else {
- ice_cfg_lldp_mib_change(&pf->hw, true);
+ if (!vsi)
+ return;
+
+ ice_vsi_release(vsi);
+ kfree(pf->first_sw);
+}
+
+static int ice_alloc_vsis(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi;
+ if (!pf->num_alloc_vsi)
+ return -EIO;
+
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
}
- if (ice_init_lag(pf))
- dev_warn(dev, "Failed to init link aggregation support\n");
+ pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
+ GFP_KERNEL);
+ if (!pf->vsi)
+ return -ENOMEM;
- /* print PCI link speed and width */
- pcie_print_link_status(pf->pdev);
+ pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi,
+ sizeof(*pf->vsi_stats), GFP_KERNEL);
+ if (!pf->vsi_stats) {
+ devm_kfree(dev, pf->vsi);
+ return -ENOMEM;
+ }
-probe_done:
- err = ice_devlink_create_pf_port(pf);
+ return 0;
+}
+
+static void ice_dealloc_vsis(struct ice_pf *pf)
+{
+ devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats);
+ pf->vsi_stats = NULL;
+
+ pf->num_alloc_vsi = 0;
+ devm_kfree(ice_pf_to_dev(pf), pf->vsi);
+ pf->vsi = NULL;
+}
+
+static int ice_init_devlink(struct ice_pf *pf)
+{
+ int err;
+
+ err = ice_devlink_register_params(pf);
if (err)
- goto err_create_pf_port;
+ return err;
- vsi = ice_get_main_vsi(pf);
- if (!vsi || !vsi->netdev) {
- err = -EINVAL;
- goto err_netdev_reg;
- }
+ ice_devlink_init_regions(pf);
+ ice_devlink_register(pf);
- SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port);
+ return 0;
+}
+
+static void ice_deinit_devlink(struct ice_pf *pf)
+{
+ ice_devlink_unregister(pf);
+ ice_devlink_destroy_regions(pf);
+ ice_devlink_unregister_params(pf);
+}
- err = ice_register_netdev(pf);
+static int ice_init(struct ice_pf *pf)
+{
+ int err;
+
+ err = ice_init_dev(pf);
+ if (err)
+ return err;
+
+ err = ice_alloc_vsis(pf);
if (err)
- goto err_netdev_reg;
+ goto err_alloc_vsis;
- err = ice_devlink_register_params(pf);
+ err = ice_init_pf_sw(pf);
if (err)
- goto err_netdev_reg;
+ goto err_init_pf_sw;
+
+ ice_init_wakeup(pf);
+
+ err = ice_init_link(pf);
+ if (err)
+ goto err_init_link;
+
+ err = ice_send_version(pf);
+ if (err)
+ goto err_init_link;
+
+ ice_verify_cacheline_size(pf);
+
+ if (ice_is_safe_mode(pf))
+ ice_set_safe_mode_vlan_cfg(pf);
+ else
+ /* print PCI link speed and width */
+ pcie_print_link_status(pf->pdev);
/* ready to go, so clear down state bit */
clear_bit(ICE_DOWN, pf->state);
- if (ice_is_rdma_ena(pf)) {
- pf->aux_idx = ida_alloc(&ice_aux_ida, GFP_KERNEL);
- if (pf->aux_idx < 0) {
- dev_err(dev, "Failed to allocate device ID for AUX driver\n");
- err = -ENOMEM;
- goto err_devlink_reg_param;
- }
+ clear_bit(ICE_SERVICE_DIS, pf->state);
- err = ice_init_rdma(pf);
- if (err) {
- dev_err(dev, "Failed to initialize RDMA: %d\n", err);
- err = -EIO;
- goto err_init_aux_unroll;
- }
- } else {
- dev_warn(dev, "RDMA is not supported on this device\n");
- }
+ /* since everything is good, start the service timer */
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
- ice_devlink_register(pf);
return 0;
-err_init_aux_unroll:
- pf->adev = NULL;
- ida_free(&ice_aux_ida, pf->aux_idx);
-err_devlink_reg_param:
- ice_devlink_unregister_params(pf);
-err_netdev_reg:
- ice_devlink_destroy_pf_port(pf);
-err_create_pf_port:
-err_send_version_unroll:
- ice_vsi_release_all(pf);
-err_alloc_sw_unroll:
+err_init_link:
+ ice_deinit_pf_sw(pf);
+err_init_pf_sw:
+ ice_dealloc_vsis(pf);
+err_alloc_vsis:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+static void ice_deinit(struct ice_pf *pf)
+{
set_bit(ICE_SERVICE_DIS, pf->state);
set_bit(ICE_DOWN, pf->state);
- devm_kfree(dev, pf->first_sw);
-err_msix_misc_unroll:
- ice_free_irq_msix_misc(pf);
-err_init_interrupt_unroll:
- ice_clear_interrupt_scheme(pf);
-err_init_vsi_stats_unroll:
- devm_kfree(dev, pf->vsi_stats);
- pf->vsi_stats = NULL;
-err_init_vsi_unroll:
- devm_kfree(dev, pf->vsi);
-err_init_pf_unroll:
- ice_deinit_pf(pf);
- ice_devlink_destroy_regions(pf);
- ice_deinit_hw(hw);
-err_exit_unroll:
- pci_disable_pcie_error_reporting(pdev);
+
+ ice_deinit_pf_sw(pf);
+ ice_dealloc_vsis(pf);
+ ice_deinit_dev(pf);
+}
+
+/**
+ * ice_load - load pf by init hw and starting VSI
+ * @pf: pointer to the pf instance
+ */
+int ice_load(struct ice_pf *pf)
+{
+ struct ice_vsi_cfg_params params = {};
+ struct ice_vsi *vsi;
+ int err;
+
+ err = ice_reset(&pf->hw, ICE_RESET_PFR);
+ if (err)
+ return err;
+
+ err = ice_init_dev(pf);
+ if (err)
+ return err;
+
+ vsi = ice_get_main_vsi(pf);
+
+ params = ice_vsi_to_params(vsi);
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ err = ice_vsi_cfg(vsi, &params);
+ if (err)
+ goto err_vsi_cfg;
+
+ err = ice_start_eth(ice_get_main_vsi(pf));
+ if (err)
+ goto err_start_eth;
+
+ err = ice_init_rdma(pf);
+ if (err)
+ goto err_init_rdma;
+
+ ice_init_features(pf);
+ ice_service_task_restart(pf);
+
+ clear_bit(ICE_DOWN, pf->state);
+
+ return 0;
+
+err_init_rdma:
+ ice_vsi_close(ice_get_main_vsi(pf));
+err_start_eth:
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+err_vsi_cfg:
+ ice_deinit_dev(pf);
+ return err;
+}
+
+/**
+ * ice_unload - unload pf by stopping VSI and deinit hw
+ * @pf: pointer to the pf instance
+ */
+void ice_unload(struct ice_pf *pf)
+{
+ ice_deinit_features(pf);
+ ice_deinit_rdma(pf);
+ ice_vsi_close(ice_get_main_vsi(pf));
+ ice_vsi_decfg(ice_get_main_vsi(pf));
+ ice_deinit_dev(pf);
+}
+
+/**
+ * ice_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ice_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+
+ if (pdev->is_virtfn) {
+ dev_err(dev, "can't probe a virtual function\n");
+ return -EINVAL;
+ }
+
+ /* this driver uses devres, see
+ * Documentation/driver-api/driver-model/devres.rst
+ */
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), dev_driver_string(dev));
+ if (err) {
+ dev_err(dev, "BAR0 I/O map error %d\n", err);
+ return err;
+ }
+
+ pf = ice_allocate_pf(dev);
+ if (!pf)
+ return -ENOMEM;
+
+ /* initialize Auxiliary index to invalid value */
+ pf->aux_idx = -1;
+
+ /* set up for high or low DMA */
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (err) {
+ dev_err(dev, "DMA configuration failed: 0x%x\n", err);
+ return err;
+ }
+
+ pci_set_master(pdev);
+
+ pf->pdev = pdev;
+ pci_set_drvdata(pdev, pf);
+ set_bit(ICE_DOWN, pf->state);
+ /* Disable service task until DOWN bit is cleared */
+ set_bit(ICE_SERVICE_DIS, pf->state);
+
+ hw = &pf->hw;
+ hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0];
+ pci_save_state(pdev);
+
+ hw->back = pf;
+ hw->port_info = NULL;
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+ hw->bus.device = PCI_SLOT(pdev->devfn);
+ hw->bus.func = PCI_FUNC(pdev->devfn);
+ ice_set_ctrlq_len(hw);
+
+ pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
+
+#ifndef CONFIG_DYNAMIC_DEBUG
+ if (debug < -1)
+ hw->debug_mask = debug;
+#endif
+
+ err = ice_init(pf);
+ if (err)
+ goto err_init;
+
+ err = ice_init_eth(pf);
+ if (err)
+ goto err_init_eth;
+
+ err = ice_init_rdma(pf);
+ if (err)
+ goto err_init_rdma;
+
+ err = ice_init_devlink(pf);
+ if (err)
+ goto err_init_devlink;
+
+ ice_init_features(pf);
+
+ return 0;
+
+err_init_devlink:
+ ice_deinit_rdma(pf);
+err_init_rdma:
+ ice_deinit_eth(pf);
+err_init_eth:
+ ice_deinit(pf);
+err_init:
pci_disable_device(pdev);
return err;
}
@@ -5058,52 +5315,33 @@ static void ice_remove(struct pci_dev *pdev)
struct ice_pf *pf = pci_get_drvdata(pdev);
int i;
- ice_devlink_unregister(pf);
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
if (!ice_is_reset_in_progress(pf->state))
break;
msleep(100);
}
- ice_tc_indir_block_remove(pf);
-
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
}
ice_service_task_stop(pf);
-
ice_aq_cancel_waiting_tasks(pf);
- ice_unplug_aux_dev(pf);
- if (pf->aux_idx >= 0)
- ida_free(&ice_aux_ida, pf->aux_idx);
- ice_devlink_unregister_params(pf);
set_bit(ICE_DOWN, pf->state);
- ice_deinit_lag(pf);
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_release(pf);
- if (ice_is_feature_supported(pf, ICE_F_GNSS))
- ice_gnss_exit(pf);
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
- ice_setup_mc_magic_wake(pf);
+ ice_deinit_features(pf);
+ ice_deinit_devlink(pf);
+ ice_deinit_rdma(pf);
+ ice_deinit_eth(pf);
+ ice_deinit(pf);
+
ice_vsi_release_all(pf);
- mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
- ice_devlink_destroy_pf_port(pf);
+
+ ice_setup_mc_magic_wake(pf);
ice_set_wake(pf);
- ice_free_irq_msix_misc(pf);
- ice_for_each_vsi(pf, i) {
- if (!pf->vsi[i])
- continue;
- ice_vsi_free_q_vectors(pf->vsi[i]);
- }
- devm_kfree(&pdev->dev, pf->vsi_stats);
- pf->vsi_stats = NULL;
- ice_deinit_pf(pf);
- ice_devlink_destroy_regions(pf);
- ice_deinit_hw(&pf->hw);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
@@ -5111,8 +5349,6 @@ static void ice_remove(struct pci_dev *pdev)
*/
ice_reset(&pf->hw, ICE_RESET_PFR);
pci_wait_for_pending_transaction(pdev);
- ice_clear_interrupt_scheme(pf);
- pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
@@ -5540,7 +5776,7 @@ static int __init ice_module_init(void)
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
- ice_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, KBUILD_MODNAME);
+ ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
return -ENOMEM;
@@ -6146,24 +6382,21 @@ static int ice_vsi_vlan_setup(struct ice_vsi *vsi)
}
/**
- * ice_vsi_cfg - Setup the VSI
+ * ice_vsi_cfg_lan - Setup the VSI lan related config
* @vsi: the VSI being configured
*
* Return 0 on success and negative value on error
*/
-int ice_vsi_cfg(struct ice_vsi *vsi)
+int ice_vsi_cfg_lan(struct ice_vsi *vsi)
{
int err;
- if (vsi->netdev) {
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
ice_set_rx_mode(vsi->netdev);
- if (vsi->type != ICE_VSI_LB) {
- err = ice_vsi_vlan_setup(vsi);
-
- if (err)
- return err;
- }
+ err = ice_vsi_vlan_setup(vsi);
+ if (err)
+ return err;
}
ice_vsi_cfg_dcb_rings(vsi);
@@ -6344,7 +6577,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
if (vsi->port_info &&
(vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
- vsi->netdev) {
+ vsi->netdev && vsi->type == ICE_VSI_PF) {
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
@@ -6355,7 +6588,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
* set the baseline so counters are ready when interface is up
*/
ice_update_eth_stats(vsi);
- ice_service_task_schedule(pf);
+
+ if (vsi->type == ICE_VSI_PF)
+ ice_service_task_schedule(pf);
return 0;
}
@@ -6368,7 +6603,7 @@ int ice_up(struct ice_vsi *vsi)
{
int err;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (!err)
err = ice_up_complete(vsi);
@@ -6936,7 +7171,7 @@ int ice_vsi_open_ctrl(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (err)
goto err_setup_rx;
@@ -6990,7 +7225,7 @@ int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- err = ice_vsi_cfg(vsi);
+ err = ice_vsi_cfg_lan(vsi);
if (err)
goto err_setup_rx;
@@ -7075,7 +7310,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
continue;
/* rebuild the VSI */
- err = ice_vsi_rebuild(vsi, true);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
if (err) {
dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
@@ -7331,18 +7566,6 @@ clear_recovery:
}
/**
- * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
- * @vsi: Pointer to VSI structure
- */
-static int ice_max_xdp_frame_size(struct ice_vsi *vsi)
-{
- if (PAGE_SIZE >= 8192 || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags))
- return ICE_RXBUF_2048 - XDP_PACKET_HEADROOM;
- else
- return ICE_RXBUF_3072;
-}
-
-/**
* ice_change_mtu - NDO callback to change the MTU
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -7354,6 +7577,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct bpf_prog *prog;
u8 count = 0;
int err = 0;
@@ -7362,7 +7586,8 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
- if (ice_is_xdp_ena_vsi(vsi)) {
+ prog = vsi->xdp_prog;
+ if (prog && !prog->aux->xdp_has_frags) {
int frame_size = ice_max_xdp_frame_size(vsi);
if (new_mtu + ICE_ETH_PKT_HDR_PAD > frame_size) {
@@ -7370,6 +7595,12 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
frame_size - ICE_ETH_PKT_HDR_PAD);
return -EINVAL;
}
+ } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) {
+ if (new_mtu + ICE_ETH_PKT_HDR_PAD > ICE_MAX_FRAME_LEGACY_RX) {
+ netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n",
+ ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD);
+ return -EINVAL;
+ }
}
/* if a reset is in progress, wait for some time for it to complete */
@@ -8420,12 +8651,9 @@ static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
/* clear the VSI from scheduler tree */
ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
- /* Delete VSI from FW */
+ /* Delete VSI from FW, PF and HW VSI arrays */
ice_vsi_delete(ch->ch_vsi);
- /* Delete VSI from PF and HW VSI arrays */
- ice_vsi_clear(ch->ch_vsi);
-
/* free the channel */
kfree(ch);
}
@@ -8484,7 +8712,7 @@ static int ice_rebuild_channels(struct ice_pf *pf)
type = vsi->type;
/* rebuild ADQ VSI */
- err = ice_vsi_rebuild(vsi, true);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT);
if (err) {
dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
ice_vsi_type_str(type), vsi->idx, err);
@@ -8716,14 +8944,14 @@ config_tcf:
cur_rxq = vsi->num_rxq;
/* proceed with rebuild main VSI using correct number of queues */
- ret = ice_vsi_rebuild(vsi, false);
+ ret = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
if (ret) {
/* fallback to current number of queues */
dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
vsi->req_txq = cur_txq;
vsi->req_rxq = cur_rxq;
clear_bit(ICE_RESET_FAILED, pf->state);
- if (ice_vsi_rebuild(vsi, false)) {
+ if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
dev_err(dev, "Rebuild of main VSI failed again\n");
return ret;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index c262dc886e6a..f6f52a248066 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -662,7 +662,6 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
/* Verify that the simple checksum is zero */
for (i = 0; i < sizeof(*tmp); i++)
- /* cppcheck-suppress objectIndex */
sum += ((u8 *)tmp)[i];
if (sum) {
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index d63161d73eb1..ac6f06f9a2ed 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -680,6 +680,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
struct ice_pf *pf;
struct ice_hw *hw;
u64 tstamp_ready;
+ bool link_up;
int err;
u8 idx;
@@ -695,11 +696,14 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
if (err)
return false;
+ /* Drop packets if the link went down */
+ link_up = ptp_port->link_up;
+
for_each_set_bit(idx, tx->in_use, tx->len) {
struct skb_shared_hwtstamps shhwtstamps = {};
u8 phy_idx = idx + tx->offset;
u64 raw_tstamp = 0, tstamp;
- bool drop_ts = false;
+ bool drop_ts = !link_up;
struct sk_buff *skb;
/* Drop packets which have waited for more than 2 seconds */
@@ -728,7 +732,7 @@ static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
err = ice_read_phy_tstamp(hw, tx->block, phy_idx, &raw_tstamp);
- if (err)
+ if (err && !drop_ts)
continue;
ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
@@ -1770,6 +1774,38 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
}
/**
+ * ice_ptp_gpio_enable_e823 - Enable/disable ancillary features of PHC
+ * @info: the driver's PTP info structure
+ * @rq: The requested feature to change
+ * @on: Enable/disable flag
+ */
+static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
+ struct ptp_clock_request *rq, int on)
+{
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_perout_channel clk_cfg = {0};
+ int err;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PPS:
+ clk_cfg.gpio_pin = PPS_PIN_INDEX;
+ clk_cfg.period = NSEC_PER_SEC;
+ clk_cfg.ena = !!on;
+
+ err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index,
+ TIME_SYNC_PIN_INDEX, rq->extts.flags);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+/**
* ice_ptp_gettimex64 - Get the time of the clock
* @info: the driver's PTP info structure
* @ts: timespec64 structure to hold the current time value
@@ -2221,6 +2257,19 @@ ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
+ * ice_ptp_setup_pins_e823 - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
+ * @info: PTP clock capabilities
+ */
+static void
+ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ info->pps = 1;
+ info->n_per_out = 0;
+ info->n_ext_ts = 1;
+}
+
+/**
* ice_ptp_set_funcs_e822 - Set specialized functions for E822 support
* @pf: Board private structure
* @info: PTP info to fill
@@ -2258,6 +2307,23 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
+ * ice_ptp_set_funcs_e823 - Set specialized functions for E823 support
+ * @pf: Board private structure
+ * @info: PTP info to fill
+ *
+ * Assign functions to the PTP capabiltiies structure for E823 devices.
+ * Functions which operate across all device families should be set directly
+ * in ice_ptp_set_caps. Only add functions here which are distinct for e823
+ * devices.
+ */
+static void
+ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ info->enable = ice_ptp_gpio_enable_e823;
+ ice_ptp_setup_pins_e823(pf, info);
+}
+
+/**
* ice_ptp_set_caps - Set PTP capabilities
* @pf: Board private structure
*/
@@ -2269,7 +2335,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
dev_driver_string(dev), dev_name(dev));
info->owner = THIS_MODULE;
- info->max_adj = 999999999;
+ info->max_adj = 100000000;
info->adjtime = ice_ptp_adjtime;
info->adjfine = ice_ptp_adjfine;
info->gettimex64 = ice_ptp_gettimex64;
@@ -2277,6 +2343,8 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
if (ice_is_e810(&pf->hw))
ice_ptp_set_funcs_e810(pf, info);
+ else if (ice_is_e823(&pf->hw))
+ ice_ptp_set_funcs_e823(pf, info);
else
ice_ptp_set_funcs_e822(pf, info);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 6d08b397df2a..4eca8d195ef0 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1063,7 +1063,6 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
*num_nodes_added = 0;
while (*num_nodes_added < num_nodes) {
u16 max_child_nodes, num_added = 0;
- /* cppcheck-suppress unusedVariable */
u32 temp;
status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
@@ -1655,12 +1654,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u32 first_node_teid;
u16 num_added = 0;
u8 i, qgl, vsil;
- int status;
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
+ int status;
+
if (!parent)
return -EIO;
@@ -1756,13 +1756,14 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
u32 first_node_teid;
u16 num_added = 0;
u8 i, vsil;
- int status;
if (!pi)
return -EINVAL;
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
+ int status;
+
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
i, num_nodes[i],
&first_node_teid,
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 3ba1408c56a9..96a64c25e2ef 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -41,21 +41,6 @@ static void ice_free_vf_entries(struct ice_pf *pf)
}
/**
- * ice_vf_vsi_release - invalidate the VF's VSI after freeing it
- * @vf: invalidate this VF's VSI after freeing it
- */
-static void ice_vf_vsi_release(struct ice_vf *vf)
-{
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- if (WARN_ON(!vsi))
- return;
-
- ice_vsi_release(vsi);
- ice_vf_invalidate_vsi(vf);
-}
-
-/**
* ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info
*/
@@ -248,11 +233,16 @@ void ice_free_vfs(struct ice_pf *pf)
*/
static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_vsi_cfg_params params = {};
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf, NULL);
+ params.type = ICE_VSI_VF;
+ params.pi = ice_vf_get_port_info(vf);
+ params.vf = vf;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ vsi = ice_vsi_setup(pf, &params);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
@@ -583,51 +573,19 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
*/
static int ice_init_vf_vsi_res(struct ice_vf *vf)
{
- struct ice_vsi_vlan_ops *vlan_ops;
struct ice_pf *pf = vf->pf;
- u8 broadcast[ETH_ALEN];
struct ice_vsi *vsi;
- struct device *dev;
int err;
vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
- dev = ice_pf_to_dev(pf);
vsi = ice_vf_vsi_setup(vf);
if (!vsi)
return -ENOMEM;
- err = ice_vsi_add_vlan_zero(vsi);
- if (err) {
- dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
- vf->vf_id);
- goto release_vsi;
- }
-
- vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
- err = vlan_ops->ena_rx_filtering(vsi);
- if (err) {
- dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
- vf->vf_id);
- goto release_vsi;
- }
-
- eth_broadcast_addr(broadcast);
- err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (err) {
- dev_err(dev, "Failed to add broadcast MAC filter for VF %d, error %d\n",
- vf->vf_id, err);
- goto release_vsi;
- }
-
- err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
- if (err) {
- dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
- vf->vf_id);
+ err = ice_vf_init_host_cfg(vf, vsi);
+ if (err)
goto release_vsi;
- }
-
- vf->num_mac = 1;
return 0;
@@ -697,6 +655,21 @@ static void ice_sriov_free_vf(struct ice_vf *vf)
}
/**
+ * ice_sriov_clear_reset_state - clears VF Reset status register
+ * @vf: the vf to configure
+ */
+static void ice_sriov_clear_reset_state(struct ice_vf *vf)
+{
+ struct ice_hw *hw = &vf->pf->hw;
+
+ /* Clear the reset status register so that VF immediately sees that
+ * the device is resetting, even if hardware hasn't yet gotten around
+ * to clearing VFGEN_RSTAT for us.
+ */
+ wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
+}
+
+/**
* ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
* @vf: the vf to configure
*/
@@ -799,23 +772,19 @@ static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
}
/**
- * ice_sriov_vsi_rebuild - release and rebuild VF's VSI
- * @vf: VF to release and setup the VSI for
+ * ice_sriov_create_vsi - Create a new VSI for a VF
+ * @vf: VF to create the VSI for
*
- * This is only called when a single VF is being reset (i.e. VFR, VFLR, host VF
- * configuration change, etc.).
+ * This is called by ice_vf_recreate_vsi to create the new VSI after the old
+ * VSI has been released.
*/
-static int ice_sriov_vsi_rebuild(struct ice_vf *vf)
+static int ice_sriov_create_vsi(struct ice_vf *vf)
{
- struct ice_pf *pf = vf->pf;
+ struct ice_vsi *vsi;
- ice_vf_vsi_release(vf);
- if (!ice_vf_vsi_setup(vf)) {
- dev_err(ice_pf_to_dev(pf),
- "Failed to release and setup the VF%u's VSI\n",
- vf->vf_id);
+ vsi = ice_vf_vsi_setup(vf);
+ if (!vsi)
return -ENOMEM;
- }
return 0;
}
@@ -826,8 +795,6 @@ static int ice_sriov_vsi_rebuild(struct ice_vf *vf)
*/
static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
{
- ice_vf_rebuild_host_cfg(vf);
- ice_vf_set_initialized(vf);
ice_ena_vf_mappings(vf);
wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
}
@@ -835,11 +802,13 @@ static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
static const struct ice_vf_ops ice_sriov_vf_ops = {
.reset_type = ICE_VF_RESET,
.free = ice_sriov_free_vf,
+ .clear_reset_state = ice_sriov_clear_reset_state,
.clear_mbx_register = ice_sriov_clear_mbx_register,
.trigger_reset_register = ice_sriov_trigger_reset_register,
.poll_reset_status = ice_sriov_poll_reset_status,
.clear_reset_trigger = ice_sriov_clear_reset_trigger,
- .vsi_rebuild = ice_sriov_vsi_rebuild,
+ .irq_close = NULL,
+ .create_vsi = ice_sriov_create_vsi,
.post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
};
@@ -879,21 +848,9 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
/* set sriov vf ops for VFs created during SRIOV flow */
vf->vf_ops = &ice_sriov_vf_ops;
- vf->vf_sw_id = pf->first_sw;
- /* assign default capabilities */
- vf->spoofchk = true;
- vf->num_vf_qs = pf->vfs.num_qps_per;
- ice_vc_set_default_allowlist(vf);
-
- /* ctrl_vsi_idx will be set to a valid value only when VF
- * creates its first fdir rule.
- */
- ice_vf_ctrl_invalidate_vsi(vf);
- ice_vf_fdir_init(vf);
-
- ice_virtchnl_set_dflt_ops(vf);
+ ice_initialize_vf_entry(vf);
- mutex_init(&vf->cfg_lock);
+ vf->vf_sw_id = pf->first_sw;
hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
@@ -1285,7 +1242,7 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
goto out_put_vf;
ivi->vf = vf_id;
- ether_addr_copy(ivi->mac, vf->hw_lan_addr.addr);
+ ether_addr_copy(ivi->mac, vf->hw_lan_addr);
/* VF configuration for VLAN and applicable QoS */
ivi->vlan = ice_vf_get_port_vlan_id(vf);
@@ -1333,8 +1290,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return -EINVAL;
/* nothing left to do, unicast MAC already set */
- if (ether_addr_equal(vf->dev_lan_addr.addr, mac) &&
- ether_addr_equal(vf->hw_lan_addr.addr, mac)) {
+ if (ether_addr_equal(vf->dev_lan_addr, mac) &&
+ ether_addr_equal(vf->hw_lan_addr, mac)) {
ret = 0;
goto out_put_vf;
}
@@ -1348,8 +1305,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
/* VF is notified of its new MAC via the PF's response to the
* VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
*/
- ether_addr_copy(vf->dev_lan_addr.addr, mac);
- ether_addr_copy(vf->hw_lan_addr.addr, mac);
+ ether_addr_copy(vf->dev_lan_addr, mac);
+ ether_addr_copy(vf->hw_lan_addr, mac);
if (is_zero_ether_addr(mac)) {
/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
vf->pf_set_mac = false;
@@ -1750,7 +1707,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
- vf->dev_lan_addr.addr,
+ vf->dev_lan_addr,
test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
? "on" : "off");
}
@@ -1794,7 +1751,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
- vf->dev_lan_addr.addr);
+ vf->dev_lan_addr);
}
}
mutex_unlock(&pf->vfs.table_lock);
@@ -1884,7 +1841,7 @@ ice_is_malicious_vf(struct ice_pf *pf, struct ice_rq_event_info *event,
if (pf_vsi)
dev_warn(dev, "VF MAC %pM on PF MAC %pM is generating asynchronous messages and may be overflowing the PF message queue. Please see the Adapter User Guide for more information\n",
- &vf->dev_lan_addr.addr[0],
+ &vf->dev_lan_addr[0],
pf_vsi->netdev->dev_addr);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 9b762f7972ce..61f844d22512 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -5420,7 +5420,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*/
status = ice_add_special_words(rinfo, lkup_exts, ice_is_dvm_ena(hw));
if (status)
- goto err_free_lkup_exts;
+ goto err_unroll;
/* Group match words into recipes using preferred recipe grouping
* criteria.
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index faba0f857cd9..6b48cbc049c6 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -792,7 +792,7 @@ static struct ice_vsi *
ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
{
struct ice_rx_ring *ring = NULL;
- struct ice_vsi *ch_vsi = NULL;
+ struct ice_vsi *dest_vsi = NULL;
struct ice_pf *pf = vsi->back;
struct device *dev;
u32 tc_class;
@@ -810,7 +810,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
return ERR_PTR(-EOPNOTSUPP);
}
/* Locate ADQ VSI depending on hw_tc number */
- ch_vsi = vsi->tc_map_vsi[tc_class];
+ dest_vsi = vsi->tc_map_vsi[tc_class];
break;
case ICE_FWD_TO_Q:
/* Locate the Rx queue */
@@ -824,7 +824,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
/* Determine destination VSI even though the action is
* FWD_TO_QUEUE, because QUEUE is associated with VSI
*/
- ch_vsi = tc_fltr->dest_vsi;
+ dest_vsi = tc_fltr->dest_vsi;
break;
default:
dev_err(dev,
@@ -832,13 +832,13 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
tc_fltr->action.fltr_act);
return ERR_PTR(-EINVAL);
}
- /* Must have valid ch_vsi (it could be main VSI or ADQ VSI) */
- if (!ch_vsi) {
+ /* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */
+ if (!dest_vsi) {
dev_err(dev,
"Unable to add filter because specified destination VSI doesn't exist\n");
return ERR_PTR(-EINVAL);
}
- return ch_vsi;
+ return dest_vsi;
}
/**
@@ -860,7 +860,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 flags = tc_fltr->flags;
- struct ice_vsi *ch_vsi;
+ struct ice_vsi *dest_vsi;
struct device *dev;
u16 lkups_cnt = 0;
u16 l4_proto = 0;
@@ -883,9 +883,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
}
/* validate forwarding action VSI and queue */
- ch_vsi = ice_tc_forward_action(vsi, tc_fltr);
- if (IS_ERR(ch_vsi))
- return PTR_ERR(ch_vsi);
+ if (ice_is_forward_action(tc_fltr->action.fltr_act)) {
+ dest_vsi = ice_tc_forward_action(vsi, tc_fltr);
+ if (IS_ERR(dest_vsi))
+ return PTR_ERR(dest_vsi);
+ }
lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
@@ -904,7 +906,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
switch (tc_fltr->action.fltr_act) {
case ICE_FWD_TO_VSI:
- rule_info.sw_act.vsi_handle = ch_vsi->idx;
+ rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
@@ -915,7 +917,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
case ICE_FWD_TO_Q:
/* HW queue number in global space */
rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue;
- rule_info.sw_act.vsi_handle = ch_vsi->idx;
+ rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
@@ -923,14 +925,15 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.queue,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
break;
- default:
- rule_info.sw_act.flag |= ICE_FLTR_TX;
- /* In case of Tx (LOOKUP_TX), src needs to be src VSI */
- rule_info.sw_act.src = vsi->idx;
- /* 'Rx' is false, direction of rule(LOOKUPTRX) */
- rule_info.rx = false;
+ case ICE_DROP_PACKET:
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto exit;
}
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
@@ -953,11 +956,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->dest_vsi_handle = rule_added.vsi_handle;
if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI ||
tc_fltr->action.fltr_act == ICE_FWD_TO_Q) {
- tc_fltr->dest_vsi = ch_vsi;
+ tc_fltr->dest_vsi = dest_vsi;
/* keep track of advanced switch filter for
* destination VSI
*/
- ch_vsi->num_chnl_fltr++;
+ dest_vsi->num_chnl_fltr++;
/* keeps track of channel filters for PF VSI */
if (vsi->type == ICE_VSI_PF &&
@@ -978,6 +981,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.hw_queue, rule_added.rid,
rule_added.rule_id);
break;
+ case ICE_DROP_PACKET:
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n",
+ lkups_cnt, flags, rule_added.rid, rule_added.rule_id);
+ break;
default:
break;
}
@@ -1681,7 +1688,7 @@ ice_tc_forward_to_queue(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
struct ice_vsi *ch_vsi = NULL;
u16 queue = act->rx_queue;
- if (queue > vsi->num_rxq) {
+ if (queue >= vsi->num_rxq) {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Unable to add filter because specified queue is invalid");
return -EINVAL;
@@ -1712,6 +1719,9 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
case FLOW_ACTION_RX_QUEUE_MAPPING:
/* forward to queue */
return ice_tc_forward_to_queue(vsi, fltr, act);
+ case FLOW_ACTION_DROP:
+ fltr->action.fltr_act = ICE_DROP_PACKET;
+ return 0;
default:
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index d916d1e92aa3..8d5e22ac7023 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -211,4 +211,14 @@ ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
void ice_replay_tc_fltrs(struct ice_pf *pf);
bool ice_is_tunnel_supported(struct net_device *dev);
+static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act)
+{
+ switch (fltr_act) {
+ case ICE_FWD_TO_VSI:
+ case ICE_FWD_TO_Q:
+ return true;
+ default:
+ return false;
+ }
+}
#endif /* _ICE_TC_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 086f0b3ab68d..dfd22862e926 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -85,7 +85,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
ICE_TX_DESC_CMD_RE;
- tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
+ tx_buf->type = ICE_TX_BUF_DUMMY;
tx_buf->raw_buf = raw_packet;
tx_desc->cmd_type_offset_bsz =
@@ -112,27 +112,29 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
static void
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
- if (tx_buf->skb) {
- if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
- devm_kfree(ring->dev, tx_buf->raw_buf);
- else if (ice_ring_is_xdp(ring))
- page_frag_free(tx_buf->raw_buf);
- else
- dev_kfree_skb_any(tx_buf->skb);
- if (dma_unmap_len(tx_buf, len))
- dma_unmap_single(ring->dev,
- dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len),
- DMA_TO_DEVICE);
- } else if (dma_unmap_len(tx_buf, len)) {
+ if (dma_unmap_len(tx_buf, len))
dma_unmap_page(ring->dev,
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
+
+ switch (tx_buf->type) {
+ case ICE_TX_BUF_DUMMY:
+ devm_kfree(ring->dev, tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_SKB:
+ dev_kfree_skb_any(tx_buf->skb);
+ break;
+ case ICE_TX_BUF_XDP_TX:
+ page_frag_free(tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_XDP_XMIT:
+ xdp_return_frame(tx_buf->xdpf);
+ break;
}
tx_buf->next_to_watch = NULL;
- tx_buf->skb = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
dma_unmap_len_set(tx_buf, len, 0);
/* tx_buf must be completely set up in the transmit path */
}
@@ -174,8 +176,6 @@ tx_skip_free:
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->next_dd = ICE_RING_QUARTER(tx_ring) - 1;
- tx_ring->next_rs = ICE_RING_QUARTER(tx_ring) - 1;
if (!tx_ring->netdev)
return;
@@ -267,7 +267,7 @@ static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
DMA_TO_DEVICE);
/* clear tx_buf data */
- tx_buf->skb = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
dma_unmap_len_set(tx_buf, len, 0);
/* unmap remaining buffers */
@@ -382,6 +382,7 @@ err:
*/
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
+ struct xdp_buff *xdp = &rx_ring->xdp;
struct device *dev = rx_ring->dev;
u32 size;
u16 i;
@@ -390,16 +391,16 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
- if (rx_ring->skb) {
- dev_kfree_skb(rx_ring->skb);
- rx_ring->skb = NULL;
- }
-
if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
+ if (xdp->data) {
+ xdp_return_buff(xdp);
+ xdp->data = NULL;
+ }
+
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
@@ -437,6 +438,7 @@ rx_skip_free:
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->first_desc = 0;
rx_ring->next_to_use = 0;
}
@@ -506,6 +508,7 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
+ rx_ring->first_desc = 0;
if (ice_is_xdp_ena_vsi(rx_ring->vsi))
WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
@@ -523,8 +526,16 @@ err:
return -ENOMEM;
}
+/**
+ * ice_rx_frame_truesize
+ * @rx_ring: ptr to Rx ring
+ * @size: size
+ *
+ * calculate the truesize with taking into the account PAGE_SIZE of
+ * underlying arch
+ */
static unsigned int
-ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
+ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
{
unsigned int truesize;
@@ -545,34 +556,39 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused s
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
+ * @rx_buf: Rx buffer to store the XDP action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static int
+static void
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+ struct ice_rx_buf *rx_buf)
{
- int err;
+ unsigned int ret = ICE_XDP_PASS;
u32 act;
+ if (!xdp_prog)
+ goto exit;
+
act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
- return ICE_XDP_PASS;
+ break;
case XDP_TX:
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
- err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
+ ret = __ice_xmit_xdp_ring(xdp, xdp_ring, false);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
- if (err == ICE_XDP_CONSUMED)
+ if (ret == ICE_XDP_CONSUMED)
goto out_failure;
- return err;
+ break;
case XDP_REDIRECT:
- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
+ if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))
goto out_failure;
- return ICE_XDP_REDIR;
+ ret = ICE_XDP_REDIR;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
@@ -581,8 +597,31 @@ out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- return ICE_XDP_CONSUMED;
+ ret = ICE_XDP_CONSUMED;
}
+exit:
+ rx_buf->act = ret;
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring, ret);
+}
+
+/**
+ * ice_xmit_xdp_ring - submit frame to XDP ring for transmission
+ * @xdpf: XDP frame that will be converted to XDP buff
+ * @xdp_ring: XDP ring for transmission
+ */
+static int ice_xmit_xdp_ring(const struct xdp_frame *xdpf,
+ struct ice_tx_ring *xdp_ring)
+{
+ struct xdp_buff xdp;
+
+ xdp.data_hard_start = (void *)xdpf;
+ xdp.data = xdpf->data;
+ xdp.data_end = xdp.data + xdpf->len;
+ xdp.frame_sz = xdpf->frame_sz;
+ xdp.flags = xdpf->flags;
+
+ return __ice_xmit_xdp_ring(&xdp, xdp_ring, true);
}
/**
@@ -605,6 +644,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
unsigned int queue_index = smp_processor_id();
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *xdp_ring;
+ struct ice_tx_buf *tx_buf;
int nxmit = 0, i;
if (test_bit(ICE_VSI_DOWN, vsi->state))
@@ -627,16 +667,18 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdp_ring = vsi->xdp_rings[queue_index];
}
+ tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
for (i = 0; i < n; i++) {
- struct xdp_frame *xdpf = frames[i];
+ const struct xdp_frame *xdpf = frames[i];
int err;
- err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ err = ice_xmit_xdp_ring(xdpf, xdp_ring);
if (err != ICE_XDP_TX)
break;
nxmit++;
}
+ tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
@@ -706,7 +748,7 @@ ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
* buffers. Then bump tail at most one time. Grouping like this lets us avoid
* multiple tail writes per call.
*/
-bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -783,7 +825,6 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
/**
* ice_can_reuse_rx_page - Determine if page can be reused for another Rx
* @rx_buf: buffer containing the page
- * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
*
* If page is reusable, we have a green light for calling ice_reuse_rx_page,
* which will assign the current buffer to the buffer that next_to_alloc is
@@ -791,7 +832,7 @@ ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
* page freed
*/
static bool
-ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
+ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
{
unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
struct page *page = rx_buf->page;
@@ -802,7 +843,7 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
+ if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
return false;
#else
#define ICE_LAST_OFFSET \
@@ -824,33 +865,44 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
}
/**
- * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * ice_add_xdp_frag - Add contents of Rx buffer to xdp buf as a frag
* @rx_ring: Rx descriptor ring to transact packets on
+ * @xdp: xdp buff to place the data into
* @rx_buf: buffer containing page to add
- * @skb: sk_buff to place the data into
* @size: packet length from rx_desc
*
- * This function will add the data contained in rx_buf->page to the skb.
- * It will just attach the page as a frag to the skb.
- * The function will then update the page offset.
+ * This function will add the data contained in rx_buf->page to the xdp buf.
+ * It will just attach the page as a frag.
*/
-static void
-ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct sk_buff *skb, unsigned int size)
+static int
+ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct ice_rx_buf *rx_buf, const unsigned int size)
{
-#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
-#else
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#endif
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
if (!size)
- return;
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->page_offset, size, truesize);
+ return 0;
+
+ if (!xdp_buff_has_frags(xdp)) {
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(xdp);
+ }
- /* page is being used so we must update the page offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
+ rx_buf->page_offset, size);
+ sinfo->xdp_frags_size += size;
+
+ if (page_is_pfmemalloc(rx_buf->page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ return 0;
}
/**
@@ -886,19 +938,18 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
* @size: size of buffer to add to skb
- * @rx_buf_pgcnt: rx_buf page refcount
*
* This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU.
*/
static struct ice_rx_buf *
ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
- int *rx_buf_pgcnt)
+ const unsigned int ntc)
{
struct ice_rx_buf *rx_buf;
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- *rx_buf_pgcnt =
+ rx_buf = &rx_ring->rx_buf[ntc];
+ rx_buf->pgcnt =
#if (PAGE_SIZE < 8192)
page_count(rx_buf->page);
#else
@@ -922,26 +973,25 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
/**
* ice_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_buf: Rx buffer to pull data from
* @xdp: xdp_buff pointing to the data
*
- * This function builds an skb around an existing Rx buffer, taking care
- * to set up the skb correctly and avoid any memcpy overhead.
+ * This function builds an skb around an existing XDP buffer, taking care
+ * to set up the skb correctly and avoid any memcpy overhead. Driver has
+ * already combined frags (if any) to skb_shared_info.
*/
static struct sk_buff *
-ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
+ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
u8 metasize = xdp->data - xdp->data_meta;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(xdp->data_end -
- xdp->data_hard_start);
-#endif
+ struct skb_shared_info *sinfo = NULL;
+ unsigned int nr_frags;
struct sk_buff *skb;
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
+
/* Prefetch first cache line of first page. If xdp->data_meta
* is unused, this points exactly as xdp->data, otherwise we
* likely have a consumer accessing first few bytes of meta
@@ -949,7 +999,7 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
*/
net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = napi_build_skb(xdp->data_hard_start, truesize);
+ skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
if (unlikely(!skb))
return NULL;
@@ -964,8 +1014,11 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
if (metasize)
skb_metadata_set(skb, metasize);
- /* buffer is used by skb, update page_offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ xdp_update_skb_shared_info(skb, nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
return skb;
}
@@ -981,24 +1034,30 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
* skb correctly.
*/
static struct sk_buff *
-ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- struct xdp_buff *xdp)
+ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
- unsigned int metasize = xdp->data - xdp->data_meta;
unsigned int size = xdp->data_end - xdp->data;
+ struct skb_shared_info *sinfo = NULL;
+ struct ice_rx_buf *rx_buf;
+ unsigned int nr_frags = 0;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- net_prefetch(xdp->data_meta);
+ net_prefetch(xdp->data);
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- ICE_RX_HDR_SIZE + metasize,
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
return NULL;
+ rx_buf = &rx_ring->rx_buf[rx_ring->first_desc];
skb_record_rx_queue(skb, rx_ring->q_index);
/* Determine available headroom for copy */
headlen = size;
@@ -1006,32 +1065,42 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen + metasize), xdp->data_meta,
- ALIGN(headlen + metasize, sizeof(long)));
-
- if (metasize) {
- skb_metadata_set(skb, metasize);
- __skb_pull(skb, metasize);
- }
+ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
+ sizeof(long)));
/* if we exhaust the linear part then add what is left as a frag */
size -= headlen;
if (size) {
-#if (PAGE_SIZE >= 8192)
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#else
- unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
-#endif
+ /* besides adding here a partial frag, we are going to add
+ * frags from xdp_buff, make sure there is enough space for
+ * them
+ */
+ if (unlikely(nr_frags >= MAX_SKB_FRAGS - 1)) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
skb_add_rx_frag(skb, 0, rx_buf->page,
- rx_buf->page_offset + headlen, size, truesize);
- /* buffer is used by skb, update page_offset */
- ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+ rx_buf->page_offset + headlen, size,
+ xdp->frame_sz);
} else {
- /* buffer is unused, reset bias back to rx_buf; data was copied
- * onto skb's linear part so there's no need for adjusting
- * page offset and we can reuse this buffer as-is
+ /* buffer is unused, change the act that should be taken later
+ * on; data was copied onto skb's linear part so there's no
+ * need for adjusting page offset and we can reuse this buffer
+ * as-is
*/
- rx_buf->pagecnt_bias++;
+ rx_buf->act = ICE_SKB_CONSUMED;
+ }
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
+
+ memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
+ sizeof(skb_frag_t) * nr_frags);
+
+ xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags,
+ sinfo->xdp_frags_size,
+ nr_frags * xdp->frame_sz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
}
return skb;
@@ -1041,26 +1110,17 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
* ice_put_rx_buf - Clean up used buffer and either recycle or free
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
- * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
*
- * This function will update next_to_clean and then clean up the contents
- * of the rx_buf. It will either recycle the buffer or unmap it and free
- * the associated resources.
+ * This function will clean up the contents of the rx_buf. It will either
+ * recycle the buffer or unmap it and free the associated resources.
*/
static void
-ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
- int rx_buf_pgcnt)
+ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- u16 ntc = rx_ring->next_to_clean + 1;
-
- /* fetch, update, and store next to clean */
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
-
if (!rx_buf)
return;
- if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
+ if (ice_can_reuse_rx_page(rx_buf)) {
/* hand second half of page back to the ring */
ice_reuse_rx_page(rx_ring, rx_buf);
} else {
@@ -1076,27 +1136,6 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
}
/**
- * ice_is_non_eop - process handling of non-EOP buffers
- * @rx_ring: Rx ring being processed
- * @rx_desc: Rx descriptor for current buffer
- *
- * If the buffer is an EOP buffer, this function exits returning false,
- * otherwise return true indicating that this is in fact a non-EOP buffer.
- */
-static bool
-ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
-{
- /* if we are the last buffer then there is nothing else to do */
-#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
- if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
- return false;
-
- rx_ring->ring_stats->rx_stats.non_eop_descs++;
-
- return true;
-}
-
-/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1110,39 +1149,42 @@ ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
*/
int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
- unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
- u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
+ struct xdp_buff *xdp = &rx_ring->xdp;
struct ice_tx_ring *xdp_ring = NULL;
- unsigned int xdp_res, xdp_xmit = 0;
- struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
- struct xdp_buff xdp;
+ u32 ntc = rx_ring->next_to_clean;
+ u32 cnt = rx_ring->count;
+ u32 cached_ntc = ntc;
+ u32 xdp_xmit = 0;
+ u32 cached_ntu;
bool failure;
+ u32 first;
/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
- frame_sz = ice_rx_frame_truesize(rx_ring, 0);
+ xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);
#endif
- xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
- if (xdp_prog)
+ if (xdp_prog) {
xdp_ring = rx_ring->xdp_ring;
+ cached_ntu = xdp_ring->next_to_use;
+ }
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
struct ice_rx_buf *rx_buf;
- unsigned char *hard_start;
+ struct sk_buff *skb;
unsigned int size;
u16 stat_err_bits;
- int rx_buf_pgcnt;
u16 vlan_tag = 0;
u16 rx_ptype;
/* get the Rx desc from Rx ring based on 'next_to_clean' */
- rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
/* status_error_len will always be zero for unused descriptors
* because it's cleared in cleanup, and overlaps with hdr_addr
@@ -1166,8 +1208,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
ctrl_vsi->vf)
ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
- ice_put_rx_buf(rx_ring, NULL, 0);
- cleaned_count++;
+ if (++ntc == cnt)
+ ntc = 0;
continue;
}
@@ -1175,65 +1217,56 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
/* retrieve a buffer from the ring */
- rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
+ rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
- if (!size) {
- xdp.data = NULL;
- xdp.data_end = NULL;
- xdp.data_hard_start = NULL;
- xdp.data_meta = NULL;
- goto construct_skb;
- }
+ if (!xdp->data) {
+ void *hard_start;
- hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
- offset;
- xdp_prepare_buff(&xdp, hard_start, offset, size, true);
+ hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
+ offset;
+ xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
#if (PAGE_SIZE > 4096)
- /* At larger PAGE_SIZE, frame_sz depend on len size */
- xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif
+ xdp_buff_clear_frags_flag(xdp);
+ } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+ break;
+ }
+ if (++ntc == cnt)
+ ntc = 0;
- if (!xdp_prog)
- goto construct_skb;
+ /* skip if it is NOP desc */
+ if (ice_is_non_eop(rx_ring, rx_desc))
+ continue;
- xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
- if (!xdp_res)
+ ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf);
+ if (rx_buf->act == ICE_XDP_PASS)
goto construct_skb;
- if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- xdp_xmit |= xdp_res;
- ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
- } else {
- rx_buf->pagecnt_bias++;
- }
- total_rx_bytes += size;
+ total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- cleaned_count++;
- ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
continue;
construct_skb:
- if (skb) {
- ice_add_rx_frag(rx_ring, rx_buf, skb, size);
- } else if (likely(xdp.data)) {
- if (ice_ring_uses_build_skb(rx_ring))
- skb = ice_build_skb(rx_ring, rx_buf, &xdp);
- else
- skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
- }
+ if (likely(ice_ring_uses_build_skb(rx_ring)))
+ skb = ice_build_skb(rx_ring, xdp);
+ else
+ skb = ice_construct_skb(rx_ring, xdp);
/* exit if we failed to retrieve a buffer */
if (!skb) {
- rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
- if (rx_buf)
- rx_buf->pagecnt_bias++;
+ rx_ring->ring_stats->rx_stats.alloc_page_failed++;
+ rx_buf->act = ICE_XDP_CONSUMED;
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ ice_set_rx_bufs_act(xdp, rx_ring,
+ ICE_XDP_CONSUMED);
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
break;
}
-
- ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
- cleaned_count++;
-
- /* skip if it is NOP desc */
- if (ice_is_non_eop(rx_ring, rx_desc))
- continue;
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
@@ -1245,10 +1278,8 @@ construct_skb:
vlan_tag = ice_get_vlan_tag_from_rx_desc(rx_desc);
/* pad the skb if needed, to make a valid ethernet frame */
- if (eth_skb_pad(skb)) {
- skb = NULL;
+ if (eth_skb_pad(skb))
continue;
- }
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1262,18 +1293,34 @@ construct_skb:
ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
/* send completed skb up the stack */
ice_receive_skb(rx_ring, skb, vlan_tag);
- skb = NULL;
/* update budget accounting */
total_rx_pkts++;
}
+ first = rx_ring->first_desc;
+ while (cached_ntc != first) {
+ struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
+
+ if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ xdp_xmit |= buf->act;
+ } else if (buf->act & ICE_XDP_CONSUMED) {
+ buf->pagecnt_bias++;
+ } else if (buf->act == ICE_XDP_PASS) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ }
+
+ ice_put_rx_buf(rx_ring, buf);
+ if (++cached_ntc >= cnt)
+ cached_ntc = 0;
+ }
+ rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
- failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
+ failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
- if (xdp_prog)
- ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
- rx_ring->skb = skb;
+ if (xdp_xmit)
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit, cached_ntu);
if (rx_ring->ring_stats)
ice_update_rx_ring_stats(rx_ring, total_rx_pkts,
@@ -1682,6 +1729,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
DMA_TO_DEVICE);
tx_buf = &tx_ring->tx_buf[i];
+ tx_buf->type = ICE_TX_BUF_FRAG;
}
/* record SW timestamp if HW timestamp is not available */
@@ -1996,7 +2044,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (err < 0)
return err;
- /* cppcheck-suppress unreadVariable */
protocol = vlan_get_protocol(skb);
if (eth_p_mpls(protocol))
@@ -2033,8 +2080,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
}
/* reset pointers to inner headers */
-
- /* cppcheck-suppress unreadVariable */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
@@ -2300,6 +2345,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
ice_trace(xmit_frame_ring, tx_ring, skb);
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto out_drop;
+
count = ice_xmit_desc_count(skb);
if (ice_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
@@ -2328,6 +2376,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buf[tx_ring->next_to_use];
first->skb = skb;
+ first->type = ICE_TX_BUF_SKB;
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
first->gso_segs = 1;
first->tx_flags = 0;
@@ -2500,11 +2549,11 @@ void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
dma_unmap_addr(tx_buf, dma),
dma_unmap_len(tx_buf, len),
DMA_TO_DEVICE);
- if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
+ if (tx_buf->type == ICE_TX_BUF_DUMMY)
devm_kfree(tx_ring->dev, tx_buf->raw_buf);
/* clear next_to_watch to prevent false hangs */
- tx_buf->raw_buf = NULL;
+ tx_buf->type = ICE_TX_BUF_EMPTY;
tx_buf->tx_flags = 0;
tx_buf->next_to_watch = NULL;
dma_unmap_len_set(tx_buf, len, 0);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 4fd0e5d0a313..fff0efe28373 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -9,10 +9,12 @@
#define ICE_DFLT_IRQ_WORK 256
#define ICE_RXBUF_3072 3072
#define ICE_RXBUF_2048 2048
+#define ICE_RXBUF_1664 1664
#define ICE_RXBUF_1536 1536
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
+#define ICE_MAX_FRAME_LEGACY_RX 8320
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
@@ -110,15 +112,16 @@ static inline int ice_skb_pad(void)
(u16)((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
+#define ICE_RX_DESC_UNUSED(R) \
+ ((((R)->first_desc > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->first_desc - (R)->next_to_use - 1)
+
#define ICE_RING_QUARTER(R) ((R)->count >> 2)
#define ICE_TX_FLAGS_TSO BIT(0)
#define ICE_TX_FLAGS_HW_VLAN BIT(1)
#define ICE_TX_FLAGS_SW_VLAN BIT(2)
-/* ICE_TX_FLAGS_DUMMY_PKT is used to mark dummy packets that should be
- * freed instead of returned like skb packets.
- */
-#define ICE_TX_FLAGS_DUMMY_PKT BIT(3)
+/* Free, was ICE_TX_FLAGS_DUMMY_PKT */
#define ICE_TX_FLAGS_TSYN BIT(4)
#define ICE_TX_FLAGS_IPV4 BIT(5)
#define ICE_TX_FLAGS_IPV6 BIT(6)
@@ -134,6 +137,7 @@ static inline int ice_skb_pad(void)
#define ICE_XDP_TX BIT(1)
#define ICE_XDP_REDIR BIT(2)
#define ICE_XDP_EXIT BIT(3)
+#define ICE_SKB_CONSUMED ICE_XDP_CONSUMED
#define ICE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -142,15 +146,44 @@ static inline int ice_skb_pad(void)
#define ICE_TXD_LAST_DESC_CMD (ICE_TX_DESC_CMD_EOP | ICE_TX_DESC_CMD_RS)
+/**
+ * enum ice_tx_buf_type - type of &ice_tx_buf to act on Tx completion
+ * @ICE_TX_BUF_EMPTY: unused OR XSk frame, no action required
+ * @ICE_TX_BUF_DUMMY: dummy Flow Director packet, unmap and kfree()
+ * @ICE_TX_BUF_FRAG: mapped skb OR &xdp_buff frag, only unmap DMA
+ * @ICE_TX_BUF_SKB: &sk_buff, unmap and consume_skb(), update stats
+ * @ICE_TX_BUF_XDP_TX: &xdp_buff, unmap and page_frag_free(), stats
+ * @ICE_TX_BUF_XDP_XMIT: &xdp_frame, unmap and xdp_return_frame(), stats
+ * @ICE_TX_BUF_XSK_TX: &xdp_buff on XSk queue, xsk_buff_free(), stats
+ */
+enum ice_tx_buf_type {
+ ICE_TX_BUF_EMPTY = 0U,
+ ICE_TX_BUF_DUMMY,
+ ICE_TX_BUF_FRAG,
+ ICE_TX_BUF_SKB,
+ ICE_TX_BUF_XDP_TX,
+ ICE_TX_BUF_XDP_XMIT,
+ ICE_TX_BUF_XSK_TX,
+};
+
struct ice_tx_buf {
- struct ice_tx_desc *next_to_watch;
union {
- struct sk_buff *skb;
- void *raw_buf; /* used for XDP */
+ struct ice_tx_desc *next_to_watch;
+ u32 rs_idx;
+ };
+ union {
+ void *raw_buf; /* used for XDP_TX and FDir rules */
+ struct sk_buff *skb; /* used for .ndo_start_xmit() */
+ struct xdp_frame *xdpf; /* used for .ndo_xdp_xmit() */
+ struct xdp_buff *xdp; /* used for XDP_TX ZC */
};
unsigned int bytecount;
- unsigned short gso_segs;
- u32 tx_flags;
+ union {
+ unsigned int gso_segs;
+ unsigned int nr_frags; /* used for mbuf XDP */
+ };
+ u32 type:16; /* &ice_tx_buf_type */
+ u32 tx_flags:16;
DEFINE_DMA_UNMAP_LEN(len);
DEFINE_DMA_UNMAP_ADDR(dma);
};
@@ -170,7 +203,9 @@ struct ice_rx_buf {
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
- u16 pagecnt_bias;
+ unsigned int pgcnt;
+ unsigned int act;
+ unsigned int pagecnt_bias;
};
struct ice_q_stats {
@@ -273,42 +308,44 @@ struct ice_rx_ring {
struct ice_vsi *vsi; /* Backreference to associated VSI */
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
+ u16 q_index; /* Queue number of ring */
+
+ u16 count; /* Number of descriptors */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 next_to_alloc;
+ /* CL2 - 2nd cacheline starts here */
union {
struct ice_rx_buf *rx_buf;
struct xdp_buff **xdp_buf;
};
- /* CL2 - 2nd cacheline starts here */
- struct xdp_rxq_info xdp_rxq;
+ struct xdp_buff xdp;
/* CL3 - 3rd cacheline starts here */
- u16 q_index; /* Queue number of ring */
-
- u16 count; /* Number of descriptors */
- u16 reg_idx; /* HW register index of the ring */
+ struct bpf_prog *xdp_prog;
+ u16 rx_offset;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
- u16 next_to_alloc;
- u16 rx_offset;
- u16 rx_buf_len;
+ u16 first_desc;
/* stats structs */
struct ice_ring_stats *ring_stats;
struct rcu_head rcu; /* to avoid race on free */
- /* CL4 - 3rd cacheline starts here */
+ /* CL4 - 4th cacheline starts here */
struct ice_channel *ch;
- struct bpf_prog *xdp_prog;
struct ice_tx_ring *xdp_ring;
struct xsk_buff_pool *xsk_pool;
- struct sk_buff *skb;
dma_addr_t dma; /* physical address of ring */
u64 cached_phctime;
+ u16 rx_buf_len;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
u8 flags;
+ /* CL5 - 5th cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
struct ice_tx_ring {
@@ -326,12 +363,11 @@ struct ice_tx_ring {
struct xsk_buff_pool *xsk_pool;
u16 next_to_use;
u16 next_to_clean;
- u16 next_rs;
- u16 next_dd;
u16 q_handle; /* Queue handle per TC */
u16 reg_idx; /* HW register index of the ring */
u16 count; /* Number of descriptors */
u16 q_index; /* Queue number of ring */
+ u16 xdp_tx_active;
/* stats structs */
struct ice_ring_stats *ring_stats;
/* CL3 - 3rd cacheline starts here */
@@ -342,7 +378,6 @@ struct ice_tx_ring {
spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
/* CL4 - 4th cacheline starts here */
- u16 xdp_tx_active;
#define ICE_TX_FLAGS_RING_XDP BIT(0)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG1 BIT(1)
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
@@ -431,7 +466,7 @@ static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
union ice_32b_rx_flex_desc;
-bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, unsigned int cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
u16
ice_select_queue(struct net_device *dev, struct sk_buff *skb,
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 25f04266c668..7bc5aa340c7d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -221,128 +221,217 @@ ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
}
/**
+ * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
+ * @dev: device for DMA mapping
+ * @tx_buf: Tx buffer to clean
+ * @bq: XDP bulk flush struct
+ */
+static void
+ice_clean_xdp_tx_buf(struct device *dev, struct ice_tx_buf *tx_buf,
+ struct xdp_frame_bulk *bq)
+{
+ dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+
+ switch (tx_buf->type) {
+ case ICE_TX_BUF_XDP_TX:
+ page_frag_free(tx_buf->raw_buf);
+ break;
+ case ICE_TX_BUF_XDP_XMIT:
+ xdp_return_frame_bulk(tx_buf->xdpf, bq);
+ break;
+ }
+
+ tx_buf->type = ICE_TX_BUF_EMPTY;
+}
+
+/**
* ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
* @xdp_ring: XDP ring to clean
*/
-static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
{
- unsigned int total_bytes = 0, total_pkts = 0;
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
- u16 ntc = xdp_ring->next_to_clean;
- struct ice_tx_desc *next_dd_desc;
- u16 next_dd = xdp_ring->next_dd;
- struct ice_tx_buf *tx_buf;
- int i;
+ int total_bytes = 0, total_pkts = 0;
+ struct device *dev = xdp_ring->dev;
+ u32 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u32 cnt = xdp_ring->count;
+ struct xdp_frame_bulk bq;
+ u32 frags, xdp_tx = 0;
+ u32 ready_frames = 0;
+ u32 idx;
+ u32 ret;
+
+ idx = xdp_ring->tx_buf[ntc].rs_idx;
+ tx_desc = ICE_TX_DESC(xdp_ring, idx);
+ if (tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
+ if (idx >= ntc)
+ ready_frames = idx - ntc + 1;
+ else
+ ready_frames = idx + cnt - ntc + 1;
+ }
- next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
- if (!(next_dd_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
- return;
+ if (unlikely(!ready_frames))
+ return 0;
+ ret = ready_frames;
+
+ xdp_frame_bulk_init(&bq);
+ rcu_read_lock(); /* xdp_return_frame_bulk() */
- for (i = 0; i < tx_thresh; i++) {
- tx_buf = &xdp_ring->tx_buf[ntc];
+ while (ready_frames) {
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
+ struct ice_tx_buf *head = tx_buf;
+ /* bytecount holds size of head + frags */
total_bytes += tx_buf->bytecount;
- /* normally tx_buf->gso_segs was taken but at this point
- * it's always 1 for us
- */
+ frags = tx_buf->nr_frags;
total_pkts++;
-
- page_frag_free(tx_buf->raw_buf);
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
- tx_buf->raw_buf = NULL;
+ /* count head + frags */
+ ready_frames -= frags + 1;
+ xdp_tx++;
ntc++;
- if (ntc >= xdp_ring->count)
+ if (ntc == cnt)
ntc = 0;
+
+ for (int i = 0; i < frags; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ ice_clean_xdp_tx_buf(dev, tx_buf, &bq);
+ ntc++;
+ if (ntc == cnt)
+ ntc = 0;
+ }
+
+ ice_clean_xdp_tx_buf(dev, head, &bq);
}
- next_dd_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_dd = xdp_ring->next_dd + tx_thresh;
- if (xdp_ring->next_dd > xdp_ring->count)
- xdp_ring->next_dd = tx_thresh - 1;
+ xdp_flush_frame_bulk(&bq);
+ rcu_read_unlock();
+
+ tx_desc->cmd_type_offset_bsz = 0;
xdp_ring->next_to_clean = ntc;
+ xdp_ring->xdp_tx_active -= xdp_tx;
ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
+
+ return ret;
}
/**
- * ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
- * @data: packet data pointer
- * @size: packet data size
+ * __ice_xmit_xdp_ring - submit frame to XDP ring for transmission
+ * @xdp: XDP buffer to be placed onto Tx descriptors
* @xdp_ring: XDP ring for transmission
+ * @frame: whether this comes from .ndo_xdp_xmit()
*/
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
+int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
+ bool frame)
{
- u16 tx_thresh = ICE_RING_QUARTER(xdp_ring);
- u16 i = xdp_ring->next_to_use;
+ struct skb_shared_info *sinfo = NULL;
+ u32 size = xdp->data_end - xdp->data;
+ struct device *dev = xdp_ring->dev;
+ u32 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_head;
struct ice_tx_buf *tx_buf;
- dma_addr_t dma;
+ u32 cnt = xdp_ring->count;
+ void *data = xdp->data;
+ u32 nr_frags = 0;
+ u32 free_space;
+ u32 frag = 0;
+
+ free_space = ICE_DESC_UNUSED(xdp_ring);
+ if (free_space < ICE_RING_QUARTER(xdp_ring))
+ free_space += ice_clean_xdp_irq(xdp_ring);
+
+ if (unlikely(!free_space))
+ goto busy;
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ if (free_space < nr_frags + 1)
+ goto busy;
+ }
- if (ICE_DESC_UNUSED(xdp_ring) < tx_thresh)
- ice_clean_xdp_irq(xdp_ring);
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_head = &xdp_ring->tx_buf[ntu];
+ tx_buf = tx_head;
- if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
- xdp_ring->ring_stats->tx_stats.tx_busy++;
- return ICE_XDP_CONSUMED;
- }
+ for (;;) {
+ dma_addr_t dma;
- dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(xdp_ring->dev, dma))
- return ICE_XDP_CONSUMED;
+ dma = dma_map_single(dev, data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto dma_unmap;
- tx_buf = &xdp_ring->tx_buf[i];
- tx_buf->bytecount = size;
- tx_buf->gso_segs = 1;
- tx_buf->raw_buf = data;
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buf, len, size);
+ dma_unmap_addr_set(tx_buf, dma, dma);
- /* record length, and DMA address */
- dma_unmap_len_set(tx_buf, len, size);
- dma_unmap_addr_set(tx_buf, dma, dma);
+ if (frame) {
+ tx_buf->type = ICE_TX_BUF_FRAG;
+ } else {
+ tx_buf->type = ICE_TX_BUF_XDP_TX;
+ tx_buf->raw_buf = data;
+ }
- tx_desc = ICE_TX_DESC(xdp_ring, i);
- tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
- size, 0);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
- xdp_ring->xdp_tx_active++;
- i++;
- if (i == xdp_ring->count) {
- i = 0;
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs = tx_thresh - 1;
+ ntu++;
+ if (ntu == cnt)
+ ntu = 0;
+
+ if (frag == nr_frags)
+ break;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_buf = &xdp_ring->tx_buf[ntu];
+
+ data = skb_frag_address(&sinfo->frags[frag]);
+ size = skb_frag_size(&sinfo->frags[frag]);
+ frag++;
}
- xdp_ring->next_to_use = i;
- if (i > xdp_ring->next_rs) {
- tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
- xdp_ring->next_rs += tx_thresh;
+ /* store info about bytecount and frag count in first desc */
+ tx_head->bytecount = xdp_get_buff_len(xdp);
+ tx_head->nr_frags = nr_frags;
+
+ if (frame) {
+ tx_head->type = ICE_TX_BUF_XDP_XMIT;
+ tx_head->xdpf = xdp->data_hard_start;
}
+ /* update last descriptor from a frame with EOP */
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
+
+ xdp_ring->xdp_tx_active++;
+ xdp_ring->next_to_use = ntu;
+
return ICE_XDP_TX;
-}
-/**
- * ice_xmit_xdp_buff - convert an XDP buffer to an XDP frame and send it
- * @xdp: XDP buffer
- * @xdp_ring: XDP Tx ring
- *
- * Returns negative on failure, 0 on success.
- */
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
-{
- struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+dma_unmap:
+ for (;;) {
+ tx_buf = &xdp_ring->tx_buf[ntu];
+ dma_unmap_page(dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ if (tx_buf == tx_head)
+ break;
+
+ if (!ntu)
+ ntu += cnt;
+ ntu--;
+ }
+ return ICE_XDP_CONSUMED;
- if (unlikely(!xdpf))
- return ICE_XDP_CONSUMED;
+busy:
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
- return ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
+ return ICE_XDP_CONSUMED;
}
/**
@@ -354,14 +443,21 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
* should be called when a batch of packets has been processed in the
* napi loop.
*/
-void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
+ u32 first_idx)
{
+ struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
+
if (xdp_res & ICE_XDP_REDIR)
xdp_do_flush_map();
if (xdp_res & ICE_XDP_TX) {
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_lock(&xdp_ring->tx_lock);
+ /* store index of descriptor with RS bit set in the first
+ * ice_tx_buf of given NAPI batch
+ */
+ tx_buf->rs_idx = ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
if (static_branch_unlikely(&ice_xdp_locking_key))
spin_unlock(&xdp_ring->tx_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index c7d2954dc9ea..115969ecdf7b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,6 +6,36 @@
#include "ice.h"
/**
+ * ice_set_rx_bufs_act - propagate Rx buffer action to frags
+ * @xdp: XDP buffer representing frame (linear and frags part)
+ * @rx_ring: Rx ring struct
+ * act: action to store onto Rx buffers related to XDP buffer parts
+ *
+ * Set action that should be taken before putting Rx buffer from first frag
+ * to one before last. Last one is handled by caller of this function as it
+ * is the EOP frag that is currently being processed. This function is
+ * supposed to be called only when XDP buffer contains frags.
+ */
+static inline void
+ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
+ const unsigned int act)
+{
+ const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ u32 first = rx_ring->first_desc;
+ u32 nr_frags = sinfo->nr_frags;
+ u32 cnt = rx_ring->count;
+ struct ice_rx_buf *buf;
+
+ for (int i = 0; i < nr_frags; i++) {
+ buf = &rx_ring->rx_buf[first];
+ buf->act = act;
+
+ if (++first == cnt)
+ first = 0;
+ }
+}
+
+/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
* @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
@@ -21,6 +51,28 @@ ice_test_staterr(__le16 status_err_n, const u16 stat_err_bits)
return !!(status_err_n & cpu_to_le16(stat_err_bits));
}
+/**
+ * ice_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ *
+ * If the buffer is an EOP buffer, this function exits returning false,
+ * otherwise return true indicating that this is in fact a non-EOP buffer.
+ */
+static inline bool
+ice_is_non_eop(const struct ice_rx_ring *rx_ring,
+ const union ice_32b_rx_flex_desc *rx_desc)
+{
+ /* if we are the last buffer then there is nothing else to do */
+#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
+ if (likely(ice_test_staterr(rx_desc->wb.status_error0, ICE_RXD_EOF)))
+ return false;
+
+ rx_ring->ring_stats->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
static inline __le64
ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
{
@@ -70,9 +122,28 @@ static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
}
-void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res);
+/**
+ * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
+ * @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ *
+ * returns index of descriptor that had RS bit produced on
+ */
+static inline u32 ice_set_rs_bit(const struct ice_tx_ring *xdp_ring)
+{
+ u32 rs_idx = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
+ struct ice_tx_desc *tx_desc;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, rs_idx);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+
+ return rs_idx;
+}
+
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res, u32 first_idx);
int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring);
+int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
+ bool frame);
void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
ice_process_skb_fields(struct ice_rx_ring *rx_ring,
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index 375eb6493f0f..0e57bd1b85fd 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -237,16 +237,49 @@ static void ice_vf_clear_counters(struct ice_vf *vf)
*/
static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
{
+ /* Close any IRQ mapping now */
+ if (vf->vf_ops->irq_close)
+ vf->vf_ops->irq_close(vf);
+
ice_vf_clear_counters(vf);
vf->vf_ops->clear_reset_trigger(vf);
}
/**
+ * ice_vf_recreate_vsi - Release and re-create the VF's VSI
+ * @vf: VF to recreate the VSI for
+ *
+ * This is only called when a single VF is being reset (i.e. VVF, VFLR, host
+ * VF configuration change, etc)
+ *
+ * It releases and then re-creates a new VSI.
+ */
+static int ice_vf_recreate_vsi(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ int err;
+
+ ice_vf_vsi_release(vf);
+
+ err = vf->vf_ops->create_vsi(vf);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf),
+ "Failed to recreate the VF%u's VSI, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_vf_rebuild_vsi - rebuild the VF's VSI
* @vf: VF to rebuild the VSI for
*
* This is only called when all VF(s) are being reset (i.e. PCIe Reset on the
* host, PFR, CORER, etc.).
+ *
+ * It reprograms the VSI configuration back into hardware.
*/
static int ice_vf_rebuild_vsi(struct ice_vf *vf)
{
@@ -256,7 +289,7 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
if (WARN_ON(!vsi))
return -EINVAL;
- if (ice_vsi_rebuild(vsi, true)) {
+ if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_INIT)) {
dev_err(ice_pf_to_dev(pf), "failed to rebuild VF %d VSI\n",
vf->vf_id);
return -EIO;
@@ -271,6 +304,21 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
}
/**
+ * ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
+ * @vf: the VF being reset
+ *
+ * Perform reset tasks which must occur after the VSI has been re-created or
+ * rebuilt during a VF reset.
+ */
+static void ice_vf_post_vsi_rebuild(struct ice_vf *vf)
+{
+ ice_vf_rebuild_host_cfg(vf);
+ ice_vf_set_initialized(vf);
+
+ vf->vf_ops->post_vsi_rebuild(vf);
+}
+
+/**
* ice_is_any_vf_in_unicast_promisc - check if any VF(s)
* are in unicast promiscuous mode
* @pf: PF structure for accessing VF(s)
@@ -495,7 +543,7 @@ void ice_reset_all_vfs(struct ice_pf *pf)
ice_vf_pre_vsi_rebuild(vf);
ice_vf_rebuild_vsi(vf);
- vf->vf_ops->post_vsi_rebuild(vf);
+ ice_vf_post_vsi_rebuild(vf);
mutex_unlock(&vf->cfg_lock);
}
@@ -639,14 +687,14 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
ice_vf_pre_vsi_rebuild(vf);
- if (vf->vf_ops->vsi_rebuild(vf)) {
+ if (ice_vf_recreate_vsi(vf)) {
dev_err(dev, "Failed to release and setup the VF%u's VSI\n",
vf->vf_id);
err = -EFAULT;
goto out_unlock;
}
- vf->vf_ops->post_vsi_rebuild(vf);
+ ice_vf_post_vsi_rebuild(vf);
vsi = ice_get_vf_vsi(vf);
if (WARN_ON(!vsi)) {
err = -EINVAL;
@@ -673,7 +721,7 @@ out_unlock:
* ice_set_vf_state_qs_dis - Set VF queues state to disabled
* @vf: pointer to the VF structure
*/
-void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
{
/* Clear Rx/Tx enabled queues flag */
bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
@@ -681,9 +729,45 @@ void ice_set_vf_state_qs_dis(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
}
+/**
+ * ice_set_vf_state_dis - Set VF state to disabled
+ * @vf: pointer to the VF structure
+ */
+void ice_set_vf_state_dis(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ vf->vf_ops->clear_reset_state(vf);
+}
+
/* Private functions only accessed from other virtualization files */
/**
+ * ice_initialize_vf_entry - Initialize a VF entry
+ * @vf: pointer to the VF structure
+ */
+void ice_initialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct ice_vfs *vfs;
+
+ vfs = &pf->vfs;
+
+ /* assign default capabilities */
+ vf->spoofchk = true;
+ vf->num_vf_qs = vfs->num_qps_per;
+ ice_vc_set_default_allowlist(vf);
+ ice_virtchnl_set_dflt_ops(vf);
+
+ /* ctrl_vsi_idx will be set to a valid value only when iAVF
+ * creates its first fdir rule.
+ */
+ ice_vf_ctrl_invalidate_vsi(vf);
+ ice_vf_fdir_init(vf);
+
+ mutex_init(&vf->cfg_lock);
+}
+
+/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
*/
@@ -924,18 +1008,18 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
vf->num_mac++;
- if (is_valid_ether_addr(vf->hw_lan_addr.addr)) {
- status = ice_fltr_add_mac(vsi, vf->hw_lan_addr.addr,
+ if (is_valid_ether_addr(vf->hw_lan_addr)) {
+ status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
ICE_FWD_TO_VSI);
if (status) {
dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
- &vf->hw_lan_addr.addr[0], vf->vf_id,
+ &vf->hw_lan_addr[0], vf->vf_id,
status);
return status;
}
vf->num_mac++;
- ether_addr_copy(vf->dev_lan_addr.addr, vf->hw_lan_addr.addr);
+ ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
}
return 0;
@@ -1115,11 +1199,16 @@ void ice_vf_ctrl_vsi_release(struct ice_vf *vf)
*/
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
{
- struct ice_port_info *pi = ice_vf_get_port_info(vf);
+ struct ice_vsi_cfg_params params = {};
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf, NULL);
+ params.type = ICE_VSI_CTRL;
+ params.pi = ice_vf_get_port_info(vf);
+ params.vf = vf;
+ params.flags = ICE_VSI_FLAG_INIT;
+
+ vsi = ice_vsi_setup(pf, &params);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
ice_vf_ctrl_invalidate_vsi(vf);
@@ -1129,6 +1218,60 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
}
/**
+ * ice_vf_init_host_cfg - Initialize host admin configuration
+ * @vf: VF to initialize
+ * @vsi: the VSI created at initialization
+ *
+ * Initialize the VF host configuration. Called during VF creation to setup
+ * VLAN 0, add the VF VSI broadcast filter, and setup spoof checking. It
+ * should only be called during VF creation.
+ */
+int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vf->pf;
+ u8 broadcast[ETH_ALEN];
+ struct device *dev;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+
+ err = ice_vsi_add_vlan_zero(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to add VLAN 0 filter for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err) {
+ dev_warn(dev, "Failed to enable Rx VLAN filtering for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ eth_broadcast_addr(broadcast);
+ err = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (err) {
+ dev_err(dev, "Failed to add broadcast MAC filter for VF %d, status %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ vf->num_mac = 1;
+
+ err = ice_vsi_apply_spoofchk(vsi, vf->spoofchk);
+ if (err) {
+ dev_warn(dev, "Failed to initialize spoofchk setting for VF %d\n",
+ vf->vf_id);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access
* @vf: VF to remove access to VSI for
*/
@@ -1139,6 +1282,24 @@ void ice_vf_invalidate_vsi(struct ice_vf *vf)
}
/**
+ * ice_vf_vsi_release - Release the VF VSI and invalidate indexes
+ * @vf: pointer to the VF structure
+ *
+ * Release the VF associated with this VSI and then invalidate the VSI
+ * indexes.
+ */
+void ice_vf_vsi_release(struct ice_vf *vf)
+{
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vsi_release(vsi);
+ ice_vf_invalidate_vsi(vf);
+}
+
+/**
* ice_vf_set_initialized - VF is ready for VIRTCHNL communication
* @vf: VF to set in initialized state
*
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 52bd9a3816bf..ef30f05b5d02 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -56,11 +56,13 @@ struct ice_mdd_vf_events {
struct ice_vf_ops {
enum ice_disq_rst_src reset_type;
void (*free)(struct ice_vf *vf);
+ void (*clear_reset_state)(struct ice_vf *vf);
void (*clear_mbx_register)(struct ice_vf *vf);
void (*trigger_reset_register)(struct ice_vf *vf, bool is_vflr);
bool (*poll_reset_status)(struct ice_vf *vf);
void (*clear_reset_trigger)(struct ice_vf *vf);
- int (*vsi_rebuild)(struct ice_vf *vf);
+ void (*irq_close)(struct ice_vf *vf);
+ int (*create_vsi)(struct ice_vf *vf);
void (*post_vsi_rebuild)(struct ice_vf *vf);
};
@@ -96,8 +98,8 @@ struct ice_vf {
struct ice_sw *vf_sw_id; /* switch ID the VF VSIs connect to */
struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */
- struct virtchnl_ether_addr dev_lan_addr;
- struct virtchnl_ether_addr hw_lan_addr;
+ u8 dev_lan_addr[ETH_ALEN];
+ u8 hw_lan_addr[ETH_ALEN];
struct ice_time_mac legacy_last_added_umac;
DECLARE_BITMAP(txq_ena, ICE_MAX_RSS_QS_PER_VF);
DECLARE_BITMAP(rxq_ena, ICE_MAX_RSS_QS_PER_VF);
@@ -213,7 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
bool ice_is_vf_disabled(struct ice_vf *vf);
int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
-void ice_set_vf_state_qs_dis(struct ice_vf *vf);
+void ice_set_vf_state_dis(struct ice_vf *vf);
bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
void
ice_vf_get_promisc_masks(struct ice_vf *vf, struct ice_vsi *vsi,
@@ -259,7 +261,7 @@ static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
return -EOPNOTSUPP;
}
-static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+static inline void ice_set_vf_state_dis(struct ice_vf *vf)
{
}
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 9c8ef2b01f0f..6f3293b793b5 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -23,6 +23,7 @@
#warning "Only include ice_vf_lib_private.h in CONFIG_PCI_IOV virtualization files"
#endif
+void ice_initialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
enum virtchnl_status_code ice_err_to_virt_err(int err);
@@ -35,7 +36,9 @@ void ice_vf_rebuild_host_cfg(struct ice_vf *vf);
void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
+int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi);
void ice_vf_invalidate_vsi(struct ice_vf *vf);
+void ice_vf_vsi_release(struct ice_vf *vf);
void ice_vf_set_initialized(struct ice_vf *vf);
#endif /* _ICE_VF_LIB_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
index d4a4001b6e5d..f56fa94ff3d0 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_mbx.c
@@ -39,7 +39,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
}
-static const u32 ice_legacy_aq_to_vc_speed[15] = {
+static const u32 ice_legacy_aq_to_vc_speed[] = {
VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
VIRTCHNL_LINK_SPEED_100MB,
VIRTCHNL_LINK_SPEED_1GB,
@@ -51,10 +51,6 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
VIRTCHNL_LINK_SPEED_40GB,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN,
- VIRTCHNL_LINK_SPEED_UNKNOWN /* BIT(14) */
};
/**
@@ -71,21 +67,20 @@ static const u32 ice_legacy_aq_to_vc_speed[15] = {
*/
u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
{
- u32 speed;
+ /* convert a BIT() value into an array index */
+ u32 index = fls(link_speed) - 1;
- if (adv_link_support) {
- /* convert a BIT() value into an array index */
- speed = ice_get_link_speed(fls(link_speed) - 1);
- } else {
+ if (adv_link_support)
+ return ice_get_link_speed(index);
+ else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
/* Virtchnl speeds are not defined for every speed supported in
* the hardware. To maintain compatibility with older AVF
* drivers, while reporting the speed the new speed values are
* resolved to the closest known virtchnl speeds
*/
- speed = ice_legacy_aq_to_vc_speed[fls(link_speed) - 1];
- }
+ return ice_legacy_aq_to_vc_speed[index];
- return speed;
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
}
/* The mailbox overflow detection algorithm helps to check if there
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index 5ecc0ee9a78e..b1ffb81893d4 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -44,13 +44,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
/* outer VLAN ops regardless of port VLAN config */
vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
if (ice_vf_is_port_vlan_ena(vf)) {
/* setup outer VLAN ops */
vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ /* all Rx traffic should be in the domain of the
+ * assigned port VLAN, so prevent disabling Rx VLAN
+ * filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
@@ -63,6 +67,9 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
} else {
+ vlan_ops->dis_rx_filtering =
+ ice_vsi_dis_rx_vlan_filtering;
+
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
@@ -96,7 +103,14 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
vlan_ops->ena_rx_filtering =
ice_vsi_ena_rx_vlan_filtering;
+ /* all Rx traffic should be in the domain of the
+ * assigned port VLAN, so prevent disabling Rx VLAN
+ * filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
} else {
+ vlan_ops->dis_rx_filtering =
+ ice_vsi_dis_rx_vlan_filtering;
if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
vlan_ops->ena_rx_filtering = noop_vlan;
else
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index dab3cd5d300e..e24e3f5017ca 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -507,7 +507,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
vfres->vsi_res[0].num_queue_pairs = vsi->num_txq;
ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
- vf->hw_lan_addr.addr);
+ vf->hw_lan_addr);
/* match guest capabilities */
vf->driver_caps = vfres->vf_cap_flags;
@@ -1802,10 +1802,10 @@ ice_vfhw_mac_add(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
* was correctly specified over VIRTCHNL
*/
if ((ice_is_vc_addr_legacy(vc_ether_addr) &&
- is_zero_ether_addr(vf->hw_lan_addr.addr)) ||
+ is_zero_ether_addr(vf->hw_lan_addr)) ||
ice_is_vc_addr_primary(vc_ether_addr)) {
- ether_addr_copy(vf->dev_lan_addr.addr, mac_addr);
- ether_addr_copy(vf->hw_lan_addr.addr, mac_addr);
+ ether_addr_copy(vf->dev_lan_addr, mac_addr);
+ ether_addr_copy(vf->hw_lan_addr, mac_addr);
}
/* hardware and device MACs are already set, but its possible that the
@@ -1836,7 +1836,7 @@ ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
int ret;
/* device MAC already added */
- if (ether_addr_equal(mac_addr, vf->dev_lan_addr.addr))
+ if (ether_addr_equal(mac_addr, vf->dev_lan_addr))
return 0;
if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
@@ -1891,8 +1891,8 @@ ice_update_legacy_cached_mac(struct ice_vf *vf,
ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
return;
- ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
- ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->dev_lan_addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->hw_lan_addr, vf->legacy_last_added_umac.addr);
}
/**
@@ -1906,15 +1906,15 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
u8 *mac_addr = vc_ether_addr->addr;
if (!is_valid_ether_addr(mac_addr) ||
- !ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+ !ether_addr_equal(vf->dev_lan_addr, mac_addr))
return;
/* allow the device MAC to be repopulated in the add flow and don't
- * clear the hardware MAC (i.e. hw_lan_addr.addr) here as that is meant
+ * clear the hardware MAC (i.e. hw_lan_addr) here as that is meant
* to be persistent on VM reboot and across driver unload/load, which
* won't work if we clear the hardware MAC here
*/
- eth_zero_addr(vf->dev_lan_addr.addr);
+ eth_zero_addr(vf->dev_lan_addr);
ice_update_legacy_cached_mac(vf, vc_ether_addr);
}
@@ -1934,7 +1934,7 @@ ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi,
int status;
if (!ice_can_vf_change_mac(vf) &&
- ether_addr_equal(vf->dev_lan_addr.addr, mac_addr))
+ ether_addr_equal(vf->dev_lan_addr, mac_addr))
return 0;
status = ice_fltr_remove_mac(vsi, mac_addr, ICE_FWD_TO_VSI);
@@ -3733,7 +3733,7 @@ static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
int result;
if (!is_unicast_ether_addr(mac_addr) ||
- ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
+ ether_addr_equal(mac_addr, vf->hw_lan_addr))
continue;
if (vf->pf_set_mac) {
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index c6a58343d81d..e6ef6b303222 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -113,7 +113,7 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id)
if (!ice_vc_isvalid_vsi_id(vf, vsi_id))
return -EINVAL;
- if (!pf->vsi[vf->lan_vsi_idx])
+ if (!ice_get_vf_vsi(vf))
return -EINVAL;
return 0;
@@ -494,7 +494,7 @@ ice_vc_fdir_rem_prof(struct ice_vf *vf, enum ice_fltr_ptype flow, int tun)
vf_prof = fdir->fdir_prof[flow];
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi) {
dev_dbg(dev, "NULL vf %d vsi pointer\n", vf->vf_id);
return;
@@ -572,7 +572,7 @@ ice_vc_fdir_write_flow_prof(struct ice_vf *vf, enum ice_fltr_ptype flow,
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
- vf_vsi = pf->vsi[vf->lan_vsi_idx];
+ vf_vsi = ice_get_vf_vsi(vf);
if (!vf_vsi)
return -EINVAL;
@@ -1205,7 +1205,7 @@ static int ice_vc_fdir_write_fltr(struct ice_vf *vf,
pf = vf->pf;
dev = ice_pf_to_dev(pf);
hw = &pf->hw;
- vsi = pf->vsi[vf->lan_vsi_idx];
+ vsi = ice_get_vf_vsi(vf);
if (!vsi) {
dev_dbg(dev, "Invalid vsi for VF %d\n", vf->vf_id);
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 7105de6fb344..31565bbafa22 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -598,6 +598,112 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
}
/**
+ * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
+ * @xdp_ring: XDP Tx ring
+ */
+static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+{
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *tx_desc;
+ u16 cnt = xdp_ring->count;
+ struct ice_tx_buf *tx_buf;
+ u16 completed_frames = 0;
+ u16 xsk_frames = 0;
+ u16 last_rs;
+ int i;
+
+ last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
+ tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
+ if (tx_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)) {
+ if (last_rs >= ntc)
+ completed_frames = last_rs - ntc + 1;
+ else
+ completed_frames = last_rs + cnt - ntc + 1;
+ }
+
+ if (!completed_frames)
+ return;
+
+ if (likely(!xdp_ring->xdp_tx_active)) {
+ xsk_frames = completed_frames;
+ goto skip;
+ }
+
+ ntc = xdp_ring->next_to_clean;
+ for (i = 0; i < completed_frames; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
+ tx_buf->type = ICE_TX_BUF_EMPTY;
+ xsk_buff_free(tx_buf->xdp);
+ xdp_ring->xdp_tx_active--;
+ } else {
+ xsk_frames++;
+ }
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+skip:
+ tx_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_to_clean += completed_frames;
+ if (xdp_ring->next_to_clean >= cnt)
+ xdp_ring->next_to_clean -= cnt;
+ if (xsk_frames)
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+}
+
+/**
+ * ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
+ * @xdp: XDP buffer to xmit
+ * @xdp_ring: XDP ring to produce descriptor onto
+ *
+ * note that this function works directly on xdp_buff, no need to convert
+ * it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
+ * side will be able to xsk_buff_free() it.
+ *
+ * Returns ICE_XDP_TX for successfully produced desc, ICE_XDP_CONSUMED if there
+ * was not enough space on XDP ring
+ */
+static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
+ struct ice_tx_ring *xdp_ring)
+{
+ u32 size = xdp->data_end - xdp->data;
+ u32 ntu = xdp_ring->next_to_use;
+ struct ice_tx_desc *tx_desc;
+ struct ice_tx_buf *tx_buf;
+ dma_addr_t dma;
+
+ if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring)) {
+ ice_clean_xdp_irq_zc(xdp_ring);
+ if (!ICE_DESC_UNUSED(xdp_ring)) {
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
+ return ICE_XDP_CONSUMED;
+ }
+ }
+
+ dma = xsk_buff_xdp_get_dma(xdp);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+
+ tx_buf = &xdp_ring->tx_buf[ntu];
+ tx_buf->xdp = xdp;
+ tx_buf->type = ICE_TX_BUF_XSK_TX;
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ 0, size, 0);
+ xdp_ring->xdp_tx_active++;
+
+ if (++ntu == xdp_ring->count)
+ ntu = 0;
+ xdp_ring->next_to_use = ntu;
+
+ return ICE_XDP_TX;
+}
+
+/**
* ice_run_xdp_zc - Executes an XDP program in zero-copy path
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
@@ -630,7 +736,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
case XDP_PASS:
break;
case XDP_TX:
- result = ice_xmit_xdp_buff(xdp, xdp_ring);
+ result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
@@ -760,7 +866,7 @@ construct_skb:
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
- ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
@@ -776,75 +882,6 @@ construct_skb:
}
/**
- * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
- * @xdp_ring: XDP Tx ring
- * @tx_buf: Tx buffer to clean
- */
-static void
-ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
-{
- page_frag_free(tx_buf->raw_buf);
- xdp_ring->xdp_tx_active--;
- dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
- dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
- dma_unmap_len_set(tx_buf, len, 0);
-}
-
-/**
- * ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
- * @xdp_ring: XDP Tx ring
- */
-static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
-{
- u16 ntc = xdp_ring->next_to_clean;
- struct ice_tx_desc *tx_desc;
- u16 cnt = xdp_ring->count;
- struct ice_tx_buf *tx_buf;
- u16 xsk_frames = 0;
- u16 last_rs;
- int i;
-
- last_rs = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : cnt - 1;
- tx_desc = ICE_TX_DESC(xdp_ring, last_rs);
- if ((tx_desc->cmd_type_offset_bsz &
- cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) {
- if (last_rs >= ntc)
- xsk_frames = last_rs - ntc + 1;
- else
- xsk_frames = last_rs + cnt - ntc + 1;
- }
-
- if (!xsk_frames)
- return;
-
- if (likely(!xdp_ring->xdp_tx_active))
- goto skip;
-
- ntc = xdp_ring->next_to_clean;
- for (i = 0; i < xsk_frames; i++) {
- tx_buf = &xdp_ring->tx_buf[ntc];
-
- if (tx_buf->raw_buf) {
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- tx_buf->raw_buf = NULL;
- } else {
- xsk_frames++;
- }
-
- ntc++;
- if (ntc >= xdp_ring->count)
- ntc = 0;
- }
-skip:
- tx_desc->cmd_type_offset_bsz = 0;
- xdp_ring->next_to_clean += xsk_frames;
- if (xdp_ring->next_to_clean >= cnt)
- xdp_ring->next_to_clean -= cnt;
- if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
-}
-
-/**
* ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
* @xdp_ring: XDP ring to produce the HW Tx descriptor on
* @desc: AF_XDP descriptor to pull the DMA address and length from
@@ -918,20 +955,6 @@ static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *d
}
/**
- * ice_set_rs_bit - set RS bit on last produced descriptor (one behind current NTU)
- * @xdp_ring: XDP ring to produce the HW Tx descriptors on
- */
-static void ice_set_rs_bit(struct ice_tx_ring *xdp_ring)
-{
- u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
- struct ice_tx_desc *tx_desc;
-
- tx_desc = ICE_TX_DESC(xdp_ring, ntu);
- tx_desc->cmd_type_offset_bsz |=
- cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
-}
-
-/**
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
*
@@ -1065,12 +1088,12 @@ void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
while (ntc != ntu) {
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
- if (tx_buf->raw_buf)
- ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
- else
+ if (tx_buf->type == ICE_TX_BUF_XSK_TX) {
+ tx_buf->type = ICE_TX_BUF_EMPTY;
+ xsk_buff_free(tx_buf->xdp);
+ } else {
xsk_frames++;
-
- tx_buf->raw_buf = NULL;
+ }
ntc++;
if (ntc >= xdp_ring->count)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 3c0c35ecea10..03bc1e8af575 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2256,6 +2256,30 @@ static void igb_enable_mas(struct igb_adapter *adapter)
}
}
+#ifdef CONFIG_IGB_HWMON
+/**
+ * igb_set_i2c_bb - Init I2C interface
+ * @hw: pointer to hardware structure
+ **/
+static void igb_set_i2c_bb(struct e1000_hw *hw)
+{
+ u32 ctrl_ext;
+ s32 i2cctl;
+
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_I2C_ENA;
+ wr32(E1000_CTRL_EXT, ctrl_ext);
+ wrfl();
+
+ i2cctl = rd32(E1000_I2CPARAMS);
+ i2cctl |= E1000_I2CBB_EN
+ | E1000_I2C_CLK_OE_N
+ | E1000_I2C_DATA_OE_N;
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+}
+#endif
+
void igb_reset(struct igb_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
@@ -2400,7 +2424,8 @@ void igb_reset(struct igb_adapter *adapter)
* interface.
*/
if (adapter->ets)
- mac->ops.init_thermal_sensor_thresh(hw);
+ igb_set_i2c_bb(hw);
+ mac->ops.init_thermal_sensor_thresh(hw);
}
}
#endif
@@ -2810,6 +2835,22 @@ static int igb_offload_txtime(struct igb_adapter *adapter,
return 0;
}
+static int igb_tc_query_caps(struct igb_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->broken_mqprio = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static LIST_HEAD(igb_block_cb_list);
static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
@@ -2818,6 +2859,8 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
struct igb_adapter *adapter = netdev_priv(dev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return igb_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_CBS:
return igb_offload_cbs(adapter, type_data);
case TC_SETUP_BLOCK:
@@ -2871,8 +2914,14 @@ static int igb_xdp_setup(struct net_device *dev, struct netdev_bpf *bpf)
bpf_prog_put(old_prog);
/* bpf is just replaced, RXQ and MTU are already setup */
- if (!need_reset)
+ if (!need_reset) {
return 0;
+ } else {
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+ }
if (running)
igb_open(dev);
@@ -3117,21 +3166,12 @@ static void igb_init_mas(struct igb_adapter *adapter)
**/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
s32 status = 0;
- s32 i2cctl;
/* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350)
return 0;
- i2cctl = rd32(E1000_I2CPARAMS);
- i2cctl |= E1000_I2CBB_EN
- | E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N
- | E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
- wr32(E1000_I2CPARAMS, i2cctl);
- wrfl();
-
/* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structure that implements
* the protocol through toggling of the 4 bits in the register.
@@ -3194,8 +3234,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_pci_reg;
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -3317,6 +3355,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
@@ -3521,6 +3560,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->ets = true;
else
adapter->ets = false;
+ /* Only enable I2C bit banging if an external thermal
+ * sensor is supported.
+ */
+ if (adapter->ets)
+ igb_set_i2c_bb(hw);
+ hw->mac.ops.init_thermal_sensor_thresh(hw);
if (igb_sysfs_init(adapter))
dev_err(&pdev->dev,
"failed to allocate sysfs resources\n");
@@ -3626,7 +3671,6 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -3837,8 +3881,6 @@ static void igb_remove(struct pci_dev *pdev)
kfree(adapter->shadow_vfta);
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
@@ -6794,7 +6836,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
struct timespec64 ts;
u32 tsauxc;
- if (pin < 0 || pin >= IGB_N_PEROUT)
+ if (pin < 0 || pin >= IGB_N_SDP)
return;
spin_lock(&adapter->tmreg_lock);
@@ -6802,7 +6844,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
if (hw->mac.type == e1000_82580 ||
hw->mac.type == e1000_i354 ||
hw->mac.type == e1000_i350) {
- s64 ns = timespec64_to_ns(&adapter->perout[pin].period);
+ s64 ns = timespec64_to_ns(&adapter->perout[tsintr_tt].period);
u32 systiml, systimh, level_mask, level, rem;
u64 systim, now;
@@ -6850,8 +6892,8 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
ts.tv_nsec = (u32)systim;
ts.tv_sec = ((u32)(systim >> 32)) & 0xFF;
} else {
- ts = timespec64_add(adapter->perout[pin].start,
- adapter->perout[pin].period);
+ ts = timespec64_add(adapter->perout[tsintr_tt].start,
+ adapter->perout[tsintr_tt].period);
}
/* u32 conversion of tv_sec is safe until y2106 */
@@ -6860,7 +6902,7 @@ static void igb_perout(struct igb_adapter *adapter, int tsintr_tt)
tsauxc = rd32(E1000_TSAUXC);
tsauxc |= TSAUXC_EN_TT0;
wr32(E1000_TSAUXC, tsauxc);
- adapter->perout[pin].start = ts;
+ adapter->perout[tsintr_tt].start = ts;
spin_unlock(&adapter->tmreg_lock);
}
@@ -6874,7 +6916,7 @@ static void igb_extts(struct igb_adapter *adapter, int tsintr_tt)
struct ptp_clock_event event;
struct timespec64 ts;
- if (pin < 0 || pin >= IGB_N_EXTTS)
+ if (pin < 0 || pin >= IGB_N_SDP)
return;
if (hw->mac.type == e1000_82580 ||
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index a15927e77272..a1d815af507d 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -396,6 +396,35 @@ void igc_rx_fifo_flush_base(struct igc_hw *hw)
rd32(IGC_MPC);
}
+bool igc_is_device_id_i225(struct igc_hw *hw)
+{
+ switch (hw->device_id) {
+ case IGC_DEV_ID_I225_LM:
+ case IGC_DEV_ID_I225_V:
+ case IGC_DEV_ID_I225_I:
+ case IGC_DEV_ID_I225_K:
+ case IGC_DEV_ID_I225_K2:
+ case IGC_DEV_ID_I225_LMVP:
+ case IGC_DEV_ID_I225_IT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool igc_is_device_id_i226(struct igc_hw *hw)
+{
+ switch (hw->device_id) {
+ case IGC_DEV_ID_I226_LM:
+ case IGC_DEV_ID_I226_V:
+ case IGC_DEV_ID_I226_K:
+ case IGC_DEV_ID_I226_IT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static struct igc_mac_operations igc_mac_ops_base = {
.init_hw = igc_init_hw_base,
.check_for_link = igc_check_for_copper_link,
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index ce530f5fd7bd..7a992befca24 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -7,6 +7,8 @@
/* forward declaration */
void igc_rx_fifo_flush_base(struct igc_hw *hw);
void igc_power_down_phy_copper_base(struct igc_hw *hw);
+bool igc_is_device_id_i225(struct igc_hw *hw);
+bool igc_is_device_id_i226(struct igc_hw *hw);
/* Transmit Descriptor - Advanced */
union igc_adv_tx_desc {
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index e9747ec5ac0b..9dec3563ce3a 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -524,6 +524,7 @@
/* Transmit Scheduling */
#define IGC_TQAVCTRL_TRANSMIT_MODE_TSN 0x00000001
#define IGC_TQAVCTRL_ENHANCED_QAV 0x00000008
+#define IGC_TQAVCTRL_FUTSCDDIS 0x00000080
#define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001
#define IGC_TXQCTL_STRICT_CYCLE 0x00000002
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 44b1740dc098..2928a6c73692 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2942,7 +2942,9 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (tx_buffer->next_to_watch &&
time_after(jiffies, tx_buffer->time_stamp +
(adapter->tx_timeout_factor * HZ)) &&
- !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF)) {
+ !(rd32(IGC_STATUS) & IGC_STATUS_TXOFF) &&
+ (rd32(IGC_TDH(tx_ring->reg_idx)) !=
+ readl(tx_ring->tail))) {
/* detected Tx unit hang */
netdev_err(tx_ring->netdev,
"Detected Tx Unit Hang\n"
@@ -5069,6 +5071,24 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * igc_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ * @txqueue: queue number that timed out
+ **/
+static void igc_tx_timeout(struct net_device *netdev,
+ unsigned int __always_unused txqueue)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct igc_hw *hw = &adapter->hw;
+
+ /* Do the reset outside of interrupt context */
+ adapter->tx_timeout_count++;
+ schedule_work(&adapter->reset_task);
+ wr32(IGC_EICS,
+ (adapter->eims_enable_mask & ~adapter->eims_other));
+}
+
+/**
* igc_get_stats64 - Get System Network Statistics
* @netdev: network interface device structure
* @stats: rtnl_link_stats64 pointer
@@ -5495,7 +5515,7 @@ static void igc_watchdog_task(struct work_struct *work)
case SPEED_100:
case SPEED_1000:
case SPEED_2500:
- adapter->tx_timeout_factor = 7;
+ adapter->tx_timeout_factor = 1;
break;
}
@@ -5958,6 +5978,7 @@ static bool validate_schedule(struct igc_adapter *adapter,
const struct tc_taprio_qopt_offload *qopt)
{
int queue_uses[IGC_MAX_TX_QUEUES] = { };
+ struct igc_hw *hw = &adapter->hw;
struct timespec64 now;
size_t n;
@@ -5970,8 +5991,10 @@ static bool validate_schedule(struct igc_adapter *adapter,
* in the future, it will hold all the packets until that
* time, causing a lot of TX Hangs, so to avoid that, we
* reject schedules that would start in the future.
+ * Note: Limitation above is no longer in i226.
*/
- if (!is_base_time_past(qopt->base_time, &now))
+ if (!is_base_time_past(qopt->base_time, &now) &&
+ igc_is_device_id_i225(hw))
return false;
for (n = 0; n < qopt->num_entries; n++) {
@@ -6041,6 +6064,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct tc_taprio_qopt_offload *qopt)
{
bool queue_configured[IGC_MAX_TX_QUEUES] = { };
+ struct igc_hw *hw = &adapter->hw;
u32 start_time = 0, end_time = 0;
size_t n;
int i;
@@ -6053,7 +6077,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (qopt->base_time < 0)
return -ERANGE;
- if (adapter->base_time)
+ if (igc_is_device_id_i225(hw) && adapter->base_time)
return -EALREADY;
if (!validate_schedule(adapter, qopt))
@@ -6201,12 +6225,35 @@ static int igc_tsn_enable_cbs(struct igc_adapter *adapter,
return igc_tsn_offload_apply(adapter);
}
+static int igc_tc_query_caps(struct igc_adapter *adapter,
+ struct tc_query_caps_base *base)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->broken_mqprio = true;
+
+ if (hw->mac.type == igc_i225)
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct igc_adapter *adapter = netdev_priv(dev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return igc_tc_query_caps(adapter, type_data);
case TC_SETUP_QDISC_TAPRIO:
return igc_tsn_enable_qbv_scheduling(adapter, type_data);
@@ -6320,6 +6367,7 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_rx_mode = igc_set_rx_mode,
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
+ .ndo_tx_timeout = igc_tx_timeout,
.ndo_get_stats64 = igc_get_stats64,
.ndo_fix_features = igc_fix_features,
.ndo_set_features = igc_set_features,
@@ -6430,8 +6478,6 @@ static int igc_probe(struct pci_dev *pdev,
if (err)
goto err_pci_reg;
- pci_enable_pcie_error_reporting(pdev);
-
err = pci_enable_ptm(pdev, NULL);
if (err < 0)
dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n");
@@ -6529,6 +6575,9 @@ static int igc_probe(struct pci_dev *pdev,
netdev->mpls_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= netdev->vlan_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
@@ -6636,7 +6685,6 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -6684,8 +6732,6 @@ static void igc_remove(struct pci_dev *pdev)
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index c34734d432e0..4e10ced736db 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -417,10 +417,12 @@ static int igc_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
*
* We need to convert the system time value stored in the RX/TXSTMP registers
* into a hwtstamp which can be used by the upper level timestamping functions.
+ *
+ * Returns 0 on success.
**/
-static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
- struct skb_shared_hwtstamps *hwtstamps,
- u64 systim)
+static int igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
+ struct skb_shared_hwtstamps *hwtstamps,
+ u64 systim)
{
switch (adapter->hw.mac.type) {
case igc_i225:
@@ -430,8 +432,9 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
systim & 0xFFFFFFFF);
break;
default:
- break;
+ return -EINVAL;
}
+ return 0;
}
/**
@@ -652,7 +655,8 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter)
regval = rd32(IGC_TXSTMPL);
regval |= (u64)rd32(IGC_TXSTMPH) << 32;
- igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ if (igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval))
+ return;
switch (adapter->link_speed) {
case SPEED_10:
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index bb10d7b65232..a386c8d61dbf 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019 Intel Corporation */
#include "igc.h"
+#include "igc_hw.h"
#include "igc_tsn.h"
static bool is_any_launchtime(struct igc_adapter *adapter)
@@ -92,7 +93,8 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
- IGC_TQAVCTRL_ENHANCED_QAV);
+ IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
+
wr32(IGC_TQAVCTRL, tqavctrl);
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -117,20 +119,10 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
ktime_t base_time, systim;
int i;
- cycle = adapter->cycle_time;
- base_time = adapter->base_time;
-
wr32(IGC_TSAUXC, 0);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
- tqavctrl = rd32(IGC_TQAVCTRL);
- tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
- wr32(IGC_TQAVCTRL, tqavctrl);
-
- wr32(IGC_QBVCYCLET_S, cycle);
- wr32(IGC_QBVCYCLET, cycle);
-
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
@@ -233,21 +225,46 @@ skip_cbs:
wr32(IGC_TXQCTL(i), txqctl);
}
+ tqavctrl = rd32(IGC_TQAVCTRL) & ~IGC_TQAVCTRL_FUTSCDDIS;
+ tqavctrl |= IGC_TQAVCTRL_TRANSMIT_MODE_TSN | IGC_TQAVCTRL_ENHANCED_QAV;
+
+ cycle = adapter->cycle_time;
+ base_time = adapter->base_time;
+
nsec = rd32(IGC_SYSTIML);
sec = rd32(IGC_SYSTIMH);
systim = ktime_set(sec, nsec);
-
if (ktime_compare(systim, base_time) > 0) {
- s64 n;
+ s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
- n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
+ } else {
+ /* According to datasheet section 7.5.2.9.3.3, FutScdDis bit
+ * has to be configured before the cycle time and base time.
+ * Tx won't hang if there is a GCL is already running,
+ * so in this case we don't need to set FutScdDis.
+ */
+ if (igc_is_device_id_i226(hw) &&
+ !(rd32(IGC_BASET_H) || rd32(IGC_BASET_L)))
+ tqavctrl |= IGC_TQAVCTRL_FUTSCDDIS;
}
- baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
+ wr32(IGC_TQAVCTRL, tqavctrl);
+
+ wr32(IGC_QBVCYCLET_S, cycle);
+ wr32(IGC_QBVCYCLET, cycle);
+ baset_h = div_s64_rem(base_time, NSEC_PER_SEC, &baset_l);
wr32(IGC_BASET_H, baset_h);
+
+ /* In i226, Future base time is only supported when FutScdDis bit
+ * is enabled and only active for re-configuration.
+ * In this case, initialize the base time with zero to create
+ * "re-configuration" scenario then only set the desired base time.
+ */
+ if (tqavctrl & IGC_TQAVCTRL_FUTSCDDIS)
+ wr32(IGC_BASET_L, 0);
wr32(IGC_BASET_L, baset_l);
return 0;
@@ -274,17 +291,14 @@ int igc_tsn_reset(struct igc_adapter *adapter)
int igc_tsn_offload_apply(struct igc_adapter *adapter)
{
- int err;
+ struct igc_hw *hw = &adapter->hw;
- if (netif_running(adapter->netdev)) {
+ if (netif_running(adapter->netdev) && igc_is_device_id_i225(hw)) {
schedule_work(&adapter->reset_task);
return 0;
}
- err = igc_tsn_enable_offload(adapter);
- if (err < 0)
- return err;
+ igc_tsn_reset(adapter);
- adapter->flags = igc_tsn_new_flags(adapter);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_xdp.c b/drivers/net/ethernet/intel/igc/igc_xdp.c
index aeeb34e64610..e27af72aada8 100644
--- a/drivers/net/ethernet/intel/igc/igc_xdp.c
+++ b/drivers/net/ethernet/intel/igc/igc_xdp.c
@@ -29,6 +29,11 @@ int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
if (old_prog)
bpf_prog_put(old_prog);
+ if (prog)
+ xdp_features_set_redirect_target(dev, true);
+ else
+ xdp_features_clear_redirect_target(dev);
+
if (if_running)
igc_open(dev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index bc68b8f2176d..8736ca4b2628 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -73,6 +73,8 @@
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
+#define IXGBE_PKT_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
+
/* Attempt to maximize the headroom available for incoming frames. We
* use a 2K buffer for receives and need 1536/1534 to store the data for
* the frame. This leaves us with 512 bytes of room. From that we need
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 38c4609bd429..878dd8dff528 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -3292,13 +3292,14 @@ static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
bool *link_up, bool link_up_wait_to_complete)
{
+ bool crosstalk_fix_active = ixgbe_need_crosstalk_fix(hw);
u32 links_reg, links_orig;
u32 i;
/* If Crosstalk fix enabled do the sanity check of making sure
* the SFP+ cage is full.
*/
- if (ixgbe_need_crosstalk_fix(hw)) {
+ if (crosstalk_fix_active) {
u32 sfp_cage_full;
switch (hw->mac.type) {
@@ -3346,10 +3347,24 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
}
} else {
- if (links_reg & IXGBE_LINKS_UP)
+ if (links_reg & IXGBE_LINKS_UP) {
+ if (crosstalk_fix_active) {
+ /* Check the link state again after a delay
+ * to filter out spurious link up
+ * notifications.
+ */
+ mdelay(5);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+ if (!(links_reg & IXGBE_LINKS_UP)) {
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ return 0;
+ }
+ }
*link_up = true;
- else
+ } else {
*link_up = false;
+ }
}
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index 53a969e34883..13a6fca31004 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -557,8 +557,10 @@ static int ixgbe_ipsec_check_mgmt_ip(struct xfrm_state *xs)
/**
* ixgbe_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
+ * @extack: extack point to fill failure reason
**/
-static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
+static int ixgbe_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = xs->xso.real_dev;
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -570,23 +572,22 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
int i;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
- netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
- xs->id.proto);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for ipsec offload");
return -EINVAL;
}
if (xs->props.mode != XFRM_MODE_TRANSPORT) {
- netdev_err(dev, "Unsupported mode for ipsec offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload");
return -EINVAL;
}
if (ixgbe_ipsec_check_mgmt_ip(xs)) {
- netdev_err(dev, "IPsec IP addr clash with mgmt filters\n");
+ NL_SET_ERR_MSG_MOD(extack, "IPsec IP addr clash with mgmt filters");
return -EINVAL;
}
if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
- netdev_err(dev, "Unsupported ipsec offload type\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type");
return -EINVAL;
}
@@ -594,14 +595,14 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
struct rx_sa rsa;
if (xs->calg) {
- netdev_err(dev, "Compression offload not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported");
return -EINVAL;
}
/* find the first unused index */
ret = ixgbe_ipsec_find_empty_idx(ipsec, true);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Rx table!\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!");
return ret;
}
sa_idx = (u16)ret;
@@ -616,7 +617,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
/* get the key and salt */
ret = ixgbe_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Rx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
}
@@ -676,7 +677,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
} else {
/* no match and no empty slot */
- netdev_err(dev, "No space for SA in Rx IP SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx IP SA table");
memset(&rsa, 0, sizeof(rsa));
return -ENOSPC;
}
@@ -711,7 +712,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
/* find the first unused index */
ret = ixgbe_ipsec_find_empty_idx(ipsec, false);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Tx table\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table");
return ret;
}
sa_idx = (u16)ret;
@@ -725,7 +726,7 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
ret = ixgbe_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Tx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
return ret;
}
@@ -950,7 +951,7 @@ int ixgbe_ipsec_vf_add_sa(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
memcpy(xs->aead->alg_name, aes_gcm_name, sizeof(aes_gcm_name));
/* set up the HW offload */
- err = ixgbe_ipsec_add_sa(xs);
+ err = ixgbe_ipsec_add_sa(xs, NULL);
if (err)
goto err_aead;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ab8370c413f3..773c35fecace 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6647,7 +6647,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
rx_ring->queue_index, ixgbe_rx_napi_id(rx_ring)) < 0)
goto err;
- rx_ring->xdp_prog = adapter->xdp_prog;
+ WRITE_ONCE(rx_ring->xdp_prog, adapter->xdp_prog);
return 0;
err:
@@ -6778,6 +6778,18 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_max_xdp_frame_size - returns the maximum allowed frame size for XDP
+ * @adapter: device handle, pointer to adapter
+ */
+static int ixgbe_max_xdp_frame_size(struct ixgbe_adapter *adapter)
+{
+ if (PAGE_SIZE >= 8192 || adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
+ return IXGBE_RXBUFFER_2K;
+ else
+ return IXGBE_RXBUFFER_3K;
+}
+
+/**
* ixgbe_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
* @new_mtu: new value for maximum frame size
@@ -6788,18 +6800,12 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (adapter->xdp_prog) {
- int new_frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN +
- VLAN_HLEN;
- int i;
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring = adapter->rx_ring[i];
+ if (ixgbe_enabled_xdp_adapter(adapter)) {
+ int new_frame_size = new_mtu + IXGBE_PKT_HDR_PAD;
- if (new_frame_size > ixgbe_rx_bufsz(ring)) {
- e_warn(probe, "Requested MTU size is not supported with XDP\n");
- return -EINVAL;
- }
+ if (new_frame_size > ixgbe_max_xdp_frame_size(adapter)) {
+ e_warn(probe, "Requested MTU size is not supported with XDP\n");
+ return -EINVAL;
}
}
@@ -8937,7 +8943,8 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr)
int regnum = addr;
if (devad != MDIO_DEVAD_NONE)
- regnum |= (devad << 16) | MII_ADDR_C45;
+ return mdiobus_c45_read(adapter->mii_bus, prtad,
+ devad, regnum);
return mdiobus_read(adapter->mii_bus, prtad, regnum);
}
@@ -8960,7 +8967,8 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad,
int regnum = addr;
if (devad != MDIO_DEVAD_NONE)
- regnum |= (devad << 16) | MII_ADDR_C45;
+ return mdiobus_c45_write(adapter->mii_bus, prtad, devad,
+ regnum, value);
return mdiobus_write(adapter->mii_bus, prtad, regnum, value);
}
@@ -10297,14 +10305,15 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
synchronize_rcu();
err = ixgbe_setup_tc(dev, adapter->hw_tcs);
- if (err) {
- rcu_assign_pointer(adapter->xdp_prog, old_prog);
+ if (err)
return -EINVAL;
- }
+ if (!prog)
+ xdp_features_clear_redirect_target(dev);
} else {
- for (i = 0; i < adapter->num_rx_queues; i++)
- (void)xchg(&adapter->rx_ring[i]->xdp_prog,
- adapter->xdp_prog);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ WRITE_ONCE(adapter->rx_ring[i]->xdp_prog,
+ adapter->xdp_prog);
+ }
}
if (old_prog)
@@ -10320,6 +10329,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX);
+ xdp_features_set_redirect_target(dev, true);
}
return 0;
@@ -10808,8 +10818,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_pci_reg;
}
- pci_enable_pcie_error_reporting(pdev);
-
pci_set_master(pdev);
pci_save_state(pdev);
@@ -11017,6 +11025,9 @@ skip_sriov:
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY;
+
/* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
@@ -11237,7 +11248,6 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
- pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -11326,8 +11336,6 @@ static void ixgbe_remove(struct pci_dev *pdev)
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
- pci_disable_pcie_error_reporting(pdev);
-
if (disable_dev)
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 123dca9ce468..689470c1e8ad 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -680,14 +680,14 @@ static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd)
}
/**
- * ixgbe_mii_bus_read_generic - Read a clause 22/45 register with gssr flags
+ * ixgbe_mii_bus_read_generic_c22 - Read a clause 22 register with gssr flags
* @hw: pointer to hardware structure
* @addr: address
* @regnum: register number
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
- int regnum, u32 gssr)
+static s32 ixgbe_mii_bus_read_generic_c22(struct ixgbe_hw *hw, int addr,
+ int regnum, u32 gssr)
{
u32 hwaddr, cmd;
s32 data;
@@ -696,31 +696,52 @@ static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr,
return -EBUSY;
hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
- if (regnum & MII_ADDR_C45) {
- hwaddr |= regnum & GENMASK(21, 0);
- cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
- } else {
- hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
- cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
- IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
- }
+ hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
+ cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL |
+ IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND;
data = ixgbe_msca_cmd(hw, cmd);
if (data < 0)
goto mii_bus_read_done;
- /* For a clause 45 access the address cycle just completed, we still
- * need to do the read command, otherwise just get the data
- */
- if (!(regnum & MII_ADDR_C45))
- goto do_mii_bus_read;
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
+
+mii_bus_read_done:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return data;
+}
+
+/**
+ * ixgbe_mii_bus_read_generic_c45 - Read a clause 45 register with gssr flags
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @devad: device address to read
+ * @regnum: register number
+ * @gssr: semaphore flags to acquire
+ **/
+static s32 ixgbe_mii_bus_read_generic_c45(struct ixgbe_hw *hw, int addr,
+ int devad, int regnum, u32 gssr)
+{
+ u32 hwaddr, cmd;
+ s32 data;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return -EBUSY;
+
+ hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
+ hwaddr |= devad << 16 | regnum;
+ cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
+
+ data = ixgbe_msca_cmd(hw, cmd);
+ if (data < 0)
+ goto mii_bus_read_done;
cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND;
data = ixgbe_msca_cmd(hw, cmd);
if (data < 0)
goto mii_bus_read_done;
-do_mii_bus_read:
data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0);
@@ -730,15 +751,15 @@ mii_bus_read_done:
}
/**
- * ixgbe_mii_bus_write_generic - Write a clause 22/45 register with gssr flags
+ * ixgbe_mii_bus_write_generic_c22 - Write a clause 22 register with gssr flags
* @hw: pointer to hardware structure
* @addr: address
* @regnum: register number
* @val: value to write
* @gssr: semaphore flags to acquire
**/
-static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
- int regnum, u16 val, u32 gssr)
+static s32 ixgbe_mii_bus_write_generic_c22(struct ixgbe_hw *hw, int addr,
+ int regnum, u16 val, u32 gssr)
{
u32 hwaddr, cmd;
s32 err;
@@ -749,20 +770,43 @@ static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr,
IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
- if (regnum & MII_ADDR_C45) {
- hwaddr |= regnum & GENMASK(21, 0);
- cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
- } else {
- hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
- cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
- IXGBE_MSCA_MDI_COMMAND;
- }
+ hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT;
+ cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE |
+ IXGBE_MSCA_MDI_COMMAND;
+
+ err = ixgbe_msca_cmd(hw, cmd);
+
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ return err;
+}
+
+/**
+ * ixgbe_mii_bus_write_generic_c45 - Write a clause 45 register with gssr flags
+ * @hw: pointer to hardware structure
+ * @addr: address
+ * @devad: device address to read
+ * @regnum: register number
+ * @val: value to write
+ * @gssr: semaphore flags to acquire
+ **/
+static s32 ixgbe_mii_bus_write_generic_c45(struct ixgbe_hw *hw, int addr,
+ int devad, int regnum, u16 val,
+ u32 gssr)
+{
+ u32 hwaddr, cmd;
+ s32 err;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr))
+ return -EBUSY;
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val);
+
+ hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT;
+ hwaddr |= devad << 16 | regnum;
+ cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND;
- /* For clause 45 this is an address cycle, for clause 22 this is the
- * entire transaction
- */
err = ixgbe_msca_cmd(hw, cmd);
- if (err < 0 || !(regnum & MII_ADDR_C45))
+ if (err < 0)
goto mii_bus_write_done;
cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND;
@@ -774,70 +818,144 @@ mii_bus_write_done:
}
/**
- * ixgbe_mii_bus_read - Read a clause 22/45 register
+ * ixgbe_mii_bus_read_c22 - Read a clause 22 register
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @regnum: register number
+ **/
+static s32 ixgbe_mii_bus_read_c22(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr);
+}
+
+/**
+ * ixgbe_mii_bus_read_c45 - Read a clause 45 register
* @bus: pointer to mii_bus structure which points to our driver private
+ * @devad: device address to read
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
+static s32 ixgbe_mii_bus_read_c45(struct mii_bus *bus, int devad, int addr,
+ int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr);
+}
+
+/**
+ * ixgbe_mii_bus_write_c22 - Write a clause 22 register
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @regnum: register number
+ * @val: value to write
+ **/
+static s32 ixgbe_mii_bus_write_c22(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
- return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
+ return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr);
}
/**
- * ixgbe_mii_bus_write - Write a clause 22/45 register
+ * ixgbe_mii_bus_write_c45 - Write a clause 45 register
* @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
+ * @devad: device address to read
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
+static s32 ixgbe_mii_bus_write_c45(struct mii_bus *bus, int addr, int devad,
+ int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
- return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
+ return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val,
+ gssr);
}
/**
- * ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a
+ * ixgbe_x550em_a_mii_bus_read_c22 - Read a clause 22 register on x550em_a
* @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
**/
-static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
- int regnum)
+static s32 ixgbe_x550em_a_mii_bus_read_c22(struct mii_bus *bus, int addr,
+ int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
+ return ixgbe_mii_bus_read_generic_c22(hw, addr, regnum, gssr);
+}
+
+/**
+ * ixgbe_x550em_a_mii_bus_read_c45 - Read a clause 45 register on x550em_a
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @devad: device address to read
+ * @regnum: register number
+ **/
+static s32 ixgbe_x550em_a_mii_bus_read_c45(struct mii_bus *bus, int addr,
+ int devad, int regnum)
+{
+ struct ixgbe_adapter *adapter = bus->priv;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gssr = hw->phy.phy_semaphore_mask;
+
+ gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
+ return ixgbe_mii_bus_read_generic_c45(hw, addr, devad, regnum, gssr);
+}
+
+/**
+ * ixgbe_x550em_a_mii_bus_write_c22 - Write a clause 22 register on x550em_a
+ * @bus: pointer to mii_bus structure which points to our driver private
+ * @addr: address
+ * @regnum: register number
+ * @val: value to write
+ **/
+static s32 ixgbe_x550em_a_mii_bus_write_c22(struct mii_bus *bus, int addr,
+ int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
- return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr);
+ return ixgbe_mii_bus_write_generic_c22(hw, addr, regnum, val, gssr);
}
/**
- * ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a
+ * ixgbe_x550em_a_mii_bus_write_c45 - Write a clause 45 register on x550em_a
* @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
+ * @devad: device address to read
* @regnum: register number
* @val: value to write
**/
-static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr,
- int regnum, u16 val)
+static s32 ixgbe_x550em_a_mii_bus_write_c45(struct mii_bus *bus, int addr,
+ int devad, int regnum, u16 val)
{
struct ixgbe_adapter *adapter = bus->priv;
struct ixgbe_hw *hw = &adapter->hw;
u32 gssr = hw->phy.phy_semaphore_mask;
gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM;
- return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr);
+ return ixgbe_mii_bus_write_generic_c45(hw, addr, devad, regnum, val,
+ gssr);
}
/**
@@ -909,8 +1027,11 @@ out:
**/
s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
{
- s32 (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
- s32 (*read)(struct mii_bus *bus, int addr, int regnum);
+ s32 (*write_c22)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ s32 (*read_c22)(struct mii_bus *bus, int addr, int regnum);
+ s32 (*write_c45)(struct mii_bus *bus, int addr, int devad, int regnum,
+ u16 val);
+ s32 (*read_c45)(struct mii_bus *bus, int addr, int devad, int regnum);
struct ixgbe_adapter *adapter = hw->back;
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
@@ -929,12 +1050,16 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
if (!ixgbe_x550em_a_has_mii(hw))
return 0;
- read = &ixgbe_x550em_a_mii_bus_read;
- write = &ixgbe_x550em_a_mii_bus_write;
+ read_c22 = ixgbe_x550em_a_mii_bus_read_c22;
+ write_c22 = ixgbe_x550em_a_mii_bus_write_c22;
+ read_c45 = ixgbe_x550em_a_mii_bus_read_c45;
+ write_c45 = ixgbe_x550em_a_mii_bus_write_c45;
break;
default:
- read = &ixgbe_mii_bus_read;
- write = &ixgbe_mii_bus_write;
+ read_c22 = ixgbe_mii_bus_read_c22;
+ write_c22 = ixgbe_mii_bus_write_c22;
+ read_c45 = ixgbe_mii_bus_read_c45;
+ write_c45 = ixgbe_mii_bus_write_c45;
break;
}
@@ -942,8 +1067,10 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
if (!bus)
return -ENOMEM;
- bus->read = read;
- bus->write = write;
+ bus->read = read_c22;
+ bus->write = write_c22;
+ bus->read_c45 = read_c45;
+ bus->write_c45 = write_c45;
/* Use the position of the device in the PCI hierarchy as the id */
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index c1cf540d162a..66cf17f19408 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -257,8 +257,10 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
/**
* ixgbevf_ipsec_add_sa - program device with a security association
* @xs: pointer to transformer state struct
+ * @extack: extack point to fill failure reason
**/
-static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
+static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = xs->xso.real_dev;
struct ixgbevf_adapter *adapter;
@@ -270,18 +272,17 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
ipsec = adapter->ipsec;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
- netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
- xs->id.proto);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for IPsec offload");
return -EINVAL;
}
if (xs->props.mode != XFRM_MODE_TRANSPORT) {
- netdev_err(dev, "Unsupported mode for ipsec offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for ipsec offload");
return -EINVAL;
}
if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
- netdev_err(dev, "Unsupported ipsec offload type\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type");
return -EINVAL;
}
@@ -289,14 +290,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
struct rx_sa rsa;
if (xs->calg) {
- netdev_err(dev, "Compression offload not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported");
return -EINVAL;
}
/* find the first unused index */
ret = ixgbevf_ipsec_find_empty_idx(ipsec, true);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Rx table!\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!");
return ret;
}
sa_idx = (u16)ret;
@@ -311,7 +312,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
/* get the key and salt */
ret = ixgbevf_ipsec_parse_proto_keys(xs, rsa.key, &rsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Rx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Rx SA table");
return ret;
}
@@ -350,7 +351,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
/* find the first unused index */
ret = ixgbevf_ipsec_find_empty_idx(ipsec, false);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Tx table\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Tx table");
return ret;
}
sa_idx = (u16)ret;
@@ -364,7 +365,7 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
ret = ixgbevf_ipsec_parse_proto_keys(xs, tsa.key, &tsa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for Tx SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for Tx SA table");
memset(&tsa, 0, sizeof(tsa));
return ret;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index ea0a230c1153..a44e4bd56142 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4634,6 +4634,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_TX;
netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
/* MTU range: 68 - 1504 or 9710 */
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index ef878973b859..8662543ca5c8 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -146,9 +146,6 @@ static int orion_mdio_smi_read(struct mii_bus *bus, int mii_id,
u32 val;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
return ret;
@@ -177,9 +174,6 @@ static int orion_mdio_smi_write(struct mii_bus *bus, int mii_id,
struct orion_mdio_dev *dev = bus->priv;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_smi_ops, bus);
if (ret < 0)
return ret;
@@ -204,21 +198,17 @@ static const struct orion_mdio_ops orion_mdio_xsmi_ops = {
.poll_interval_max = MVMDIO_XSMI_POLL_INTERVAL_MAX,
};
-static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id,
- int regnum)
+static int orion_mdio_xsmi_read_c45(struct mii_bus *bus, int mii_id,
+ int dev_addr, int regnum)
{
struct orion_mdio_dev *dev = bus->priv;
- u16 dev_addr = (regnum >> 16) & GENMASK(4, 0);
int ret;
- if (!(regnum & MII_ADDR_C45))
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
if (ret < 0)
return ret;
- writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG);
writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
(dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
MVMDIO_XSMI_READ_OPERATION,
@@ -237,21 +227,17 @@ static int orion_mdio_xsmi_read(struct mii_bus *bus, int mii_id,
return readl(dev->regs + MVMDIO_XSMI_MGNT_REG) & GENMASK(15, 0);
}
-static int orion_mdio_xsmi_write(struct mii_bus *bus, int mii_id,
- int regnum, u16 value)
+static int orion_mdio_xsmi_write_c45(struct mii_bus *bus, int mii_id,
+ int dev_addr, int regnum, u16 value)
{
struct orion_mdio_dev *dev = bus->priv;
- u16 dev_addr = (regnum >> 16) & GENMASK(4, 0);
int ret;
- if (!(regnum & MII_ADDR_C45))
- return -EOPNOTSUPP;
-
ret = orion_mdio_wait_ready(&orion_mdio_xsmi_ops, bus);
if (ret < 0)
return ret;
- writel(regnum & GENMASK(15, 0), dev->regs + MVMDIO_XSMI_ADDR_REG);
+ writel(regnum, dev->regs + MVMDIO_XSMI_ADDR_REG);
writel((mii_id << MVMDIO_XSMI_PHYADDR_SHIFT) |
(dev_addr << MVMDIO_XSMI_DEVADDR_SHIFT) |
MVMDIO_XSMI_WRITE_OPERATION | value,
@@ -302,8 +288,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
bus->write = orion_mdio_smi_write;
break;
case BUS_TYPE_XSMI:
- bus->read = orion_mdio_xsmi_read;
- bus->write = orion_mdio_xsmi_write;
+ bus->read_c45 = orion_mdio_xsmi_read_c45;
+ bus->write_c45 = orion_mdio_xsmi_write_c45;
break;
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f8925cac61e4..0e39d199ff06 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -38,7 +38,7 @@
#include <net/ipv6.h>
#include <net/tso.h>
#include <net/page_pool.h>
-#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include <linux/bpf_trace.h>
/* Registers */
@@ -5612,6 +5612,12 @@ static int mvneta_probe(struct platform_device *pdev)
NETIF_F_TSO | NETIF_F_RXCSUM;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ if (!pp->bm_priv)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netif_set_tso_max_segs(dev, MVNETA_MAX_TSO_SEGS);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 4da45c5abba5..9b4ecbe4f36d 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6866,6 +6866,10 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->vlan_features |= features;
netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS);
+
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
dev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 68 - 9704 */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index d2584ebb7a70..5727d67e0259 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -195,6 +195,9 @@ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \
M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
+M(CPT_LF_RESET, 0xA08, cpt_lf_reset, cpt_lf_rst_req, msg_rsp) \
+M(CPT_FLT_ENG_INFO, 0xA09, cpt_flt_eng_info, cpt_flt_eng_info_req, \
+ cpt_flt_eng_info_rsp) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
@@ -297,6 +300,8 @@ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \
+M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
+ msg_req, nix_inline_ipsec_cfg) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \
@@ -1196,7 +1201,7 @@ struct nix_inline_ipsec_cfg {
u32 cpt_credit;
struct {
u8 egrp;
- u8 opcode;
+ u16 opcode;
u16 param1;
u16 param2;
} gen_cfg;
@@ -1205,6 +1210,8 @@ struct nix_inline_ipsec_cfg {
u8 cpt_slot;
} inst_qsel;
u8 enable;
+ u16 bpid;
+ u32 credit_th;
};
/* Per NIX LF inline IPSec configuration */
@@ -1609,6 +1616,8 @@ struct cpt_lf_alloc_req_msg {
u16 sso_pf_func;
u16 eng_grpmsk;
int blkaddr;
+ u8 ctx_ilen_valid : 1;
+ u8 ctx_ilen : 7;
};
#define CPT_INLINE_INBOUND 0
@@ -1692,6 +1701,28 @@ struct cpt_inst_lmtst_req {
u64 rsvd;
};
+/* Mailbox message format to request for CPT LF reset */
+struct cpt_lf_rst_req {
+ struct mbox_msghdr hdr;
+ u32 slot;
+ u32 rsvd;
+};
+
+/* Mailbox message format to request for CPT faulted engines */
+struct cpt_flt_eng_info_req {
+ struct mbox_msghdr hdr;
+ int blkaddr;
+ bool reset;
+ u32 rsvd;
+};
+
+struct cpt_flt_eng_info_rsp {
+ struct mbox_msghdr hdr;
+ u64 flt_eng_map[CPT_10K_AF_INT_VEC_RVU];
+ u64 rcvrd_eng_map[CPT_10K_AF_INT_VEC_RVU];
+ u64 rsvd;
+};
+
struct sdp_node_info {
/* Node to which this PF belons to */
u8 node_id;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 3f5e09b77d4b..8683ce57ed3f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1164,8 +1164,16 @@ cpt:
goto nix_err;
}
+ err = rvu_cpt_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize cpt\n", __func__);
+ goto mcs_err;
+ }
+
return 0;
+mcs_err:
+ rvu_mcs_exit(rvu);
nix_err:
rvu_nix_freemem(rvu);
npa_err:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 7f0a64731c67..389663a13d1d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -108,6 +108,8 @@ struct rvu_block {
u64 lfreset_reg;
unsigned char name[NAME_SIZE];
struct rvu *rvu;
+ u64 cpt_flt_eng_map[3];
+ u64 cpt_rcvrd_eng_map[3];
};
struct nix_mcast {
@@ -459,6 +461,7 @@ struct rvu {
struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf;
struct mutex rsrc_lock; /* Serialize resource alloc/free */
+ struct mutex alias_lock; /* Serialize bar2 alias access */
int vfs; /* Number of VFs attached to RVU */
int nix_blkaddr[MAX_NIX_BLKS];
@@ -510,6 +513,7 @@ struct rvu {
struct ptp *ptp;
int mcs_blk_cnt;
+ int cpt_pf_num;
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
@@ -524,6 +528,8 @@ struct rvu {
struct list_head mcs_intrq_head;
/* mcs interrupt queue lock */
spinlock_t mcs_intrq_lock;
+ /* CPT interrupt lock */
+ spinlock_t cpt_intr_lock;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -546,6 +552,17 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset)
return readq(rvu->pfreg_base + offset);
}
+static inline void rvu_bar2_sel_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
+{
+ /* HW requires read back of RVU_AF_BAR2_SEL register to make sure completion of
+ * write operation.
+ */
+ rvu_write64(rvu, block, offset, val);
+ rvu_read64(rvu, block, offset);
+ /* Barrier to ensure read completes before accessing LF registers */
+ mb();
+}
+
/* Silicon revisions */
static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu)
{
@@ -865,11 +882,15 @@ void rvu_cpt_unregister_interrupts(struct rvu *rvu);
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
int slot);
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
+int rvu_cpt_init(struct rvu *rvu);
/* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
+/* CN10K NIX */
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw);
+
/* CN10K RVU - LMT*/
void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7dbbc115cde4..4ad9ff025c96 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -538,3 +538,21 @@ void rvu_program_channels(struct rvu *rvu)
rvu_lbk_set_channels(rvu);
rvu_rpm_set_channels(rvu);
}
+
+void rvu_nix_block_cn10k_init(struct rvu *rvu, struct nix_hw *nix_hw)
+{
+ int blkaddr = nix_hw->blkaddr;
+ u64 cfg;
+
+ /* Set AF vWQE timer interval to a LF configurable range of
+ * 6.4us to 1.632ms.
+ */
+ rvu_write64(rvu, blkaddr, NIX_AF_VWQE_TIMER, 0x3FULL);
+
+ /* Enable NIX RX stream and global conditional clock to
+ * avoild multiple free of NPA buffers.
+ */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CFG);
+ cfg |= BIT_ULL(1) | BIT_ULL(2);
+ rvu_write64(rvu, blkaddr, NIX_AF_CFG, cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 38bbae5d9ae0..f047185f38e0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -17,7 +17,7 @@
#define PCI_DEVID_OTX2_CPT10K_PF 0xA0F2
/* Length of initial context fetch in 128 byte words */
-#define CPT_CTX_ILEN 2ULL
+#define CPT_CTX_ILEN 1ULL
#define cpt_get_eng_sts(e_min, e_max, rsp, etype) \
({ \
@@ -37,34 +37,68 @@
(_rsp)->free_sts_##etype = free_sts; \
})
-static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
+static irqreturn_t cpt_af_flt_intr_handler(int vec, void *ptr)
{
struct rvu_block *block = ptr;
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
- u64 reg0, reg1, reg2;
-
- reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
- reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
- if (!is_rvu_otx2(rvu)) {
- reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
- dev_err_ratelimited(rvu->dev,
- "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
- reg0, reg1, reg2);
- } else {
- dev_err_ratelimited(rvu->dev,
- "Received CPTAF FLT irq : 0x%llx, 0x%llx",
- reg0, reg1);
+ u64 reg, val;
+ int i, eng;
+ u8 grp;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(vec));
+ dev_err_ratelimited(rvu->dev, "Received CPTAF FLT%d irq : 0x%llx", vec, reg);
+
+ i = -1;
+ while ((i = find_next_bit((unsigned long *)&reg, 64, i + 1)) < 64) {
+ switch (vec) {
+ case 0:
+ eng = i;
+ break;
+ case 1:
+ eng = i + 64;
+ break;
+ case 2:
+ eng = i + 128;
+ break;
+ }
+ grp = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng)) & 0xFF;
+ /* Disable and enable the engine which triggers fault */
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), 0x0);
+ val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng));
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val & ~1ULL);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL2(eng), grp);
+ rvu_write64(rvu, blkaddr, CPT_AF_EXEX_CTL(eng), val | 1ULL);
+
+ spin_lock(&rvu->cpt_intr_lock);
+ block->cpt_flt_eng_map[vec] |= BIT_ULL(i);
+ val = rvu_read64(rvu, blkaddr, CPT_AF_EXEX_STS(eng));
+ val = val & 0x3;
+ if (val == 0x1 || val == 0x2)
+ block->cpt_rcvrd_eng_map[vec] |= BIT_ULL(i);
+ spin_unlock(&rvu->cpt_intr_lock);
}
-
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
- if (!is_rvu_otx2(rvu))
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(vec), reg);
return IRQ_HANDLED;
}
+static irqreturn_t rvu_cpt_af_flt0_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT0, ptr);
+}
+
+static irqreturn_t rvu_cpt_af_flt1_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_AF_INT_VEC_FLT1, ptr);
+}
+
+static irqreturn_t rvu_cpt_af_flt2_intr_handler(int irq, void *ptr)
+{
+ return cpt_af_flt_intr_handler(CPT_10K_AF_INT_VEC_FLT2, ptr);
+}
+
static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
{
struct rvu_block *block = ptr;
@@ -119,8 +153,10 @@ static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
int i;
/* Disable all CPT AF interrupts */
- for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(0), ~0ULL);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(1), ~0ULL);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(2), 0xFFFF);
+
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
@@ -151,7 +187,7 @@ static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
/* Disable all CPT AF interrupts */
for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), ~0ULL);
rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
@@ -172,16 +208,31 @@ static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
{
struct rvu *rvu = block->rvu;
int blkaddr = block->addr;
+ irq_handler_t flt_fn;
int i, ret;
for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
sprintf(&rvu->irq_name[(off + i) * NAME_SIZE], "CPTAF FLT%d", i);
+
+ switch (i) {
+ case CPT_10K_AF_INT_VEC_FLT0:
+ flt_fn = rvu_cpt_af_flt0_intr_handler;
+ break;
+ case CPT_10K_AF_INT_VEC_FLT1:
+ flt_fn = rvu_cpt_af_flt1_intr_handler;
+ break;
+ case CPT_10K_AF_INT_VEC_FLT2:
+ flt_fn = rvu_cpt_af_flt2_intr_handler;
+ break;
+ }
ret = rvu_cpt_do_register_interrupt(block, off + i,
- rvu_cpt_af_flt_intr_handler,
- &rvu->irq_name[(off + i) * NAME_SIZE]);
+ flt_fn, &rvu->irq_name[(off + i) * NAME_SIZE]);
if (ret)
goto err;
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ if (i == CPT_10K_AF_INT_VEC_FLT2)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0xFFFF);
+ else
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
}
ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
@@ -208,8 +259,8 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
+ irq_handler_t flt_fn;
int i, offs, ret = 0;
- char irq_name[16];
if (!is_block_implemented(rvu->hw, blkaddr))
return 0;
@@ -226,13 +277,20 @@ static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
return cpt_10k_register_interrupts(block, offs);
for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
- snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ sprintf(&rvu->irq_name[(offs + i) * NAME_SIZE], "CPTAF FLT%d", i);
+ switch (i) {
+ case CPT_AF_INT_VEC_FLT0:
+ flt_fn = rvu_cpt_af_flt0_intr_handler;
+ break;
+ case CPT_AF_INT_VEC_FLT1:
+ flt_fn = rvu_cpt_af_flt1_intr_handler;
+ break;
+ }
ret = rvu_cpt_do_register_interrupt(block, offs + i,
- rvu_cpt_af_flt_intr_handler,
- irq_name);
+ flt_fn, &rvu->irq_name[(offs + i) * NAME_SIZE]);
if (ret)
goto err;
- rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), ~0ULL);
}
ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
@@ -290,7 +348,7 @@ static int get_cpt_pf_num(struct rvu *rvu)
static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
{
- int cpt_pf_num = get_cpt_pf_num(rvu);
+ int cpt_pf_num = rvu->cpt_pf_num;
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
@@ -302,7 +360,7 @@ static bool is_cpt_pf(struct rvu *rvu, u16 pcifunc)
static bool is_cpt_vf(struct rvu *rvu, u16 pcifunc)
{
- int cpt_pf_num = get_cpt_pf_num(rvu);
+ int cpt_pf_num = rvu->cpt_pf_num;
if (rvu_get_pf(pcifunc) != cpt_pf_num)
return false;
@@ -371,8 +429,12 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
/* Set CPT LF group and priority */
val = (u64)req->eng_grpmsk << 48 | 1;
- if (!is_rvu_otx2(rvu))
- val |= (CPT_CTX_ILEN << 17);
+ if (!is_rvu_otx2(rvu)) {
+ if (req->ctx_ilen_valid)
+ val |= (req->ctx_ilen << 17);
+ else
+ val |= (CPT_CTX_ILEN << 17);
+ }
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
@@ -762,10 +824,21 @@ int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
- int blkaddr)
+ int blkaddr, struct cpt_rxc_time_cfg_req *save)
{
u64 dfrg_reg;
+ if (save) {
+ /* Save older config */
+ dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
+ save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
+ save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
+ save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
+ save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
+
+ save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
+ }
+
dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
@@ -790,7 +863,7 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
- cpt_rxc_time_cfg(rvu, req, blkaddr);
+ cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
return 0;
}
@@ -801,9 +874,67 @@ int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
}
+int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr, ret;
+ u16 actual_slot;
+ u64 ctl, ctl2;
+
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+ req->slot, &actual_slot);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+ ctl = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ ctl2 = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+
+ ret = rvu_lf_reset(rvu, block, cptlf);
+ if (ret)
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), ctl);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), ctl2);
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_flt_eng_info(struct rvu *rvu, struct cpt_flt_eng_info_req *req,
+ struct cpt_flt_eng_info_rsp *rsp)
+{
+ struct rvu_block *block;
+ unsigned long flags;
+ int blkaddr, vec;
+
+ blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ block = &rvu->hw->block[blkaddr];
+ for (vec = 0; vec < CPT_10K_AF_INT_VEC_RVU; vec++) {
+ spin_lock_irqsave(&rvu->cpt_intr_lock, flags);
+ rsp->flt_eng_map[vec] = block->cpt_flt_eng_map[vec];
+ rsp->rcvrd_eng_map[vec] = block->cpt_rcvrd_eng_map[vec];
+ if (req->reset) {
+ block->cpt_flt_eng_map[vec] = 0x0;
+ block->cpt_rcvrd_eng_map[vec] = 0x0;
+ }
+ spin_unlock_irqrestore(&rvu->cpt_intr_lock, flags);
+ }
+ return 0;
+}
+
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{
- struct cpt_rxc_time_cfg_req req;
+ struct cpt_rxc_time_cfg_req req, prev;
int timeout = 2000;
u64 reg;
@@ -819,7 +950,7 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
req.active_thres = 1;
req.active_limit = 1;
- cpt_rxc_time_cfg(rvu, &req, blkaddr);
+ cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
@@ -845,70 +976,68 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+
+ /* Restore config */
+ cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
}
-#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
-#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
-#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
-#define INPROG_GWB(reg) (((reg) >> 40) & 0xFF)
+#define INFLIGHT GENMASK_ULL(8, 0)
+#define GRB_CNT GENMASK_ULL(39, 32)
+#define GWB_CNT GENMASK_ULL(47, 40)
+#define XQ_XOR GENMASK_ULL(63, 63)
+#define DQPTR GENMASK_ULL(19, 0)
+#define NQPTR GENMASK_ULL(51, 32)
static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
{
- int i = 0, hard_lp_ctr = 100000;
- u64 inprog, grp_ptr;
- u16 nq_ptr, dq_ptr;
+ int timeout = 1000000;
+ u64 inprog, inst_ptr;
+ u64 qsize, pending;
+ int i = 0;
/* Disable instructions enqueuing */
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTL), 0x0);
- /* Disable executions in the LF's queue */
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- inprog &= ~BIT_ULL(16);
+ inprog |= BIT_ULL(16);
rvu_write64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), inprog);
- /* Wait for CPT queue to become execution-quiescent */
+ qsize = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_SIZE)) & 0x7FFF;
do {
- inprog = rvu_read64(rvu, blkaddr,
- CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- if (INPROG_GRB_PARTIAL(inprog)) {
- i = 0;
- hard_lp_ctr--;
- } else {
- i++;
- }
-
- grp_ptr = rvu_read64(rvu, blkaddr,
- CPT_AF_BAR2_ALIASX(slot,
- CPT_LF_Q_GRP_PTR));
- nq_ptr = (grp_ptr >> 32) & 0x7FFF;
- dq_ptr = grp_ptr & 0x7FFF;
-
- } while (hard_lp_ctr && (i < 10) && (nq_ptr != dq_ptr));
+ inst_ptr = rvu_read64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_Q_INST_PTR));
+ pending = (FIELD_GET(XQ_XOR, inst_ptr) * qsize * 40) +
+ FIELD_GET(NQPTR, inst_ptr) -
+ FIELD_GET(DQPTR, inst_ptr);
+ udelay(1);
+ timeout--;
+ } while ((pending != 0) && (timeout != 0));
- if (hard_lp_ctr == 0)
- dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+ if (timeout == 0)
+ dev_warn(rvu->dev, "TIMEOUT: CPT poll on pending instructions\n");
- i = 0;
- hard_lp_ctr = 100000;
+ timeout = 1000000;
+ /* Wait for CPT queue to become execution-quiescent */
do {
inprog = rvu_read64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- if ((INPROG_INFLIGHT(inprog) == 0) &&
- (INPROG_GWB(inprog) < 40) &&
- ((INPROG_GRB(inprog) == 0) ||
- (INPROG_GRB((inprog)) == 40))) {
+ if ((FIELD_GET(INFLIGHT, inprog) == 0) &&
+ (FIELD_GET(GRB_CNT, inprog) == 0)) {
i++;
} else {
i = 0;
- hard_lp_ctr--;
+ timeout--;
}
- } while (hard_lp_ctr && (i < 10));
+ } while ((timeout != 0) && (i < 10));
- if (hard_lp_ctr == 0)
- dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
+ if (timeout == 0)
+ dev_warn(rvu->dev, "TIMEOUT: CPT poll on inflight count\n");
+ /* Wait for 2 us to flush all queue writes to memory */
+ udelay(2);
}
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
@@ -918,18 +1047,15 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
cpt_rxc_teardown(rvu, blkaddr);
+ mutex_lock(&rvu->alias_lock);
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
cpt_lf_disable_iqueue(rvu, blkaddr, slot);
- /* Set group drop to help clear out hardware */
- reg = rvu_read64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG));
- reg |= BIT_ULL(17);
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_ALIASX(slot, CPT_LF_INPROG), reg);
-
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+ mutex_unlock(&rvu->alias_lock);
return 0;
}
@@ -940,7 +1066,7 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
int nix_blkaddr)
{
- int cpt_pf_num = get_cpt_pf_num(rvu);
+ int cpt_pf_num = rvu->cpt_pf_num;
struct cpt_inst_lmtst_req *req;
dma_addr_t res_daddr;
int timeout = 3000;
@@ -1064,7 +1190,7 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
for (i = 0; i < max_ctx_entries; i++) {
cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
@@ -1077,10 +1203,19 @@ int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
reg);
}
}
- rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+ rvu_bar2_sel_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
unlock:
mutex_unlock(&rvu->rsrc_lock);
return 0;
}
+
+int rvu_cpt_init(struct rvu *rvu)
+{
+ /* Retrieve CPT PF number */
+ rvu->cpt_pf_num = get_cpt_pf_num(rvu);
+ spin_lock_init(&rvu->cpt_intr_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index bda1a6fa2ec4..e4407f09c9d3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1500,6 +1500,9 @@ static const struct devlink_param rvu_af_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
rvu_af_dl_dwrr_mtu_validate),
+};
+
+static const struct devlink_param rvu_af_dl_param_exact_match[] = {
DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
"npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
@@ -1556,7 +1559,6 @@ int rvu_register_dl(struct rvu *rvu)
{
struct rvu_devlink *rvu_dl;
struct devlink *dl;
- size_t size;
int err;
dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
@@ -1578,21 +1580,32 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
+ err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+ if (err) {
+ dev_err(rvu->dev,
+ "devlink params register failed with error %d", err);
+ goto err_dl_health;
+ }
+
/* Register exact match devlink only for CN10K-B */
- size = ARRAY_SIZE(rvu_af_dl_params);
if (!rvu_npc_exact_has_match_table(rvu))
- size -= 1;
+ goto done;
- err = devlink_params_register(dl, rvu_af_dl_params, size);
+ err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
if (err) {
dev_err(rvu->dev,
- "devlink params register failed with error %d", err);
- goto err_dl_health;
+ "devlink exact match params register failed with error %d", err);
+ goto err_dl_exact_match;
}
+done:
devlink_register(dl);
return 0;
+err_dl_exact_match:
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
err_dl_health:
rvu_health_reporters_destroy(rvu);
devlink_free(dl);
@@ -1605,8 +1618,14 @@ void rvu_unregister_dl(struct rvu *rvu)
struct devlink *dl = rvu_dl->dl;
devlink_unregister(dl);
- devlink_params_unregister(dl, rvu_af_dl_params,
- ARRAY_SIZE(rvu_af_dl_params));
+
+ devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
+
+ /* Unregister exact match devlink only for CN10K-B */
+ if (rvu_npc_exact_has_match_table(rvu))
+ devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
+ ARRAY_SIZE(rvu_af_dl_param_exact_match));
+
rvu_health_reporters_destroy(rvu);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 6b8747ebc08c..26e639e57dae 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2058,6 +2058,13 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
int err, restore_tx_en = 0;
u64 cfg;
+ if (!is_rvu_otx2(rvu)) {
+ /* Skip SMQ flush if pkt count is zero */
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_IN_MD_COUNT(smq));
+ if (!cfg)
+ return 0;
+ }
+
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
@@ -4309,6 +4316,9 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
+ if (!is_rvu_otx2(rvu))
+ rvu_nix_block_cn10k_init(rvu, nix_hw);
+
if (is_block_implemented(hw, blkaddr)) {
err = nix_setup_txschq(rvu, nix_hw, blkaddr);
if (err)
@@ -4731,6 +4741,10 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+#define CPT_INST_CREDIT_TH GENMASK_ULL(53, 32)
+#define CPT_INST_CREDIT_BPID GENMASK_ULL(30, 22)
+#define CPT_INST_CREDIT_CNT GENMASK_ULL(21, 0)
+
static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
int blkaddr)
{
@@ -4767,14 +4781,23 @@ static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *r
val);
/* Set CPT credit */
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
- req->cpt_credit);
+ val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
+ if ((val & 0x3FFFFF) != 0x3FFFFF)
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF - val);
+
+ val = FIELD_PREP(CPT_INST_CREDIT_CNT, req->cpt_credit);
+ val |= FIELD_PREP(CPT_INST_CREDIT_BPID, req->bpid);
+ val |= FIELD_PREP(CPT_INST_CREDIT_TH, req->credit_th);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx), val);
} else {
rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
0x0);
- rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
- 0x3FFFFF);
+ val = rvu_read64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx));
+ if ((val & 0x3FFFFF) != 0x3FFFFF)
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF - val);
}
}
@@ -4792,6 +4815,30 @@ int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_nix_read_inline_ipsec_cfg(struct rvu *rvu,
+ struct msg_req *req,
+ struct nix_inline_ipsec_cfg *rsp)
+
+{
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_IPSEC_GEN_CFG);
+ rsp->gen_cfg.egrp = FIELD_GET(IPSEC_GEN_CFG_EGRP, val);
+ rsp->gen_cfg.opcode = FIELD_GET(IPSEC_GEN_CFG_OPCODE, val);
+ rsp->gen_cfg.param1 = FIELD_GET(IPSEC_GEN_CFG_PARAM1, val);
+ rsp->gen_cfg.param2 = FIELD_GET(IPSEC_GEN_CFG_PARAM2, val);
+
+ val = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_RX_CPTX_CREDIT(0));
+ rsp->cpt_credit = FIELD_GET(CPT_INST_CREDIT_CNT, val);
+ rsp->credit_th = FIELD_GET(CPT_INST_CREDIT_TH, val);
+ rsp->bpid = FIELD_GET(CPT_INST_CREDIT_BPID, val);
+
+ return 0;
+}
+
int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
struct nix_inline_ipsec_lf_cfg *req,
struct msg_rsp *rsp)
@@ -4835,6 +4882,7 @@ int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
return 0;
}
+
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
{
bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
index f69102d20c90..20ebb9c95c73 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_hash.c
@@ -200,10 +200,8 @@ void npc_config_secret_key(struct rvu *rvu, int blkaddr)
struct rvu_hwinfo *hw = rvu->hw;
u8 intf;
- if (!hwcap->npc_hash_extract) {
- dev_info(rvu->dev, "HW does not support secret key configuration\n");
+ if (!hwcap->npc_hash_extract)
return;
- }
for (intf = 0; intf < hw->npc_intfs; intf++) {
rvu_write64(rvu, blkaddr, NPC_AF_INTFX_SECRET_KEY0(intf),
@@ -221,10 +219,8 @@ void npc_program_mkex_hash(struct rvu *rvu, int blkaddr)
struct rvu_hwinfo *hw = rvu->hw;
u8 intf;
- if (!hwcap->npc_hash_extract) {
- dev_dbg(rvu->dev, "Field hash extract feature is not supported\n");
+ if (!hwcap->npc_hash_extract)
return;
- }
for (intf = 0; intf < hw->npc_intfs; intf++) {
npc_program_mkex_hash_rx(rvu, blkaddr, intf);
@@ -1853,19 +1849,13 @@ int rvu_npc_exact_init(struct rvu *rvu)
/* Check exact match feature is supported */
npc_const3 = rvu_read64(rvu, blkaddr, NPC_AF_CONST3);
- if (!(npc_const3 & BIT_ULL(62))) {
- dev_info(rvu->dev, "%s: No support for exact match support\n",
- __func__);
+ if (!(npc_const3 & BIT_ULL(62)))
return 0;
- }
/* Check if kex profile has enabled EXACT match nibble */
cfg = rvu_read64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX));
- if (!(cfg & NPC_EXACT_NIBBLE_HIT)) {
- dev_info(rvu->dev, "%s: NPC exact match nibble not enabled in KEX profile\n",
- __func__);
+ if (!(cfg & NPC_EXACT_NIBBLE_HIT))
return 0;
- }
/* Set capability to true */
rvu->hw->cap.npc_exact_match_enabled = true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 0e0d536645ac..1729b22580ce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -189,6 +189,7 @@
#define NIX_AF_RX_CFG (0x00D0)
#define NIX_AF_AVG_DELAY (0x00E0)
#define NIX_AF_CINT_DELAY (0x00F0)
+#define NIX_AF_VWQE_TIMER (0x00F8)
#define NIX_AF_RX_MCAST_BASE (0x0100)
#define NIX_AF_RX_MCAST_CFG (0x0110)
#define NIX_AF_RX_MCAST_BUF_BASE (0x0120)
@@ -426,6 +427,7 @@
#define NIX_AF_RX_NPC_MIRROR_DROP (0x4730)
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
+#define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
@@ -545,6 +547,8 @@
#define CPT_LF_CTL 0x10
#define CPT_LF_INPROG 0x40
+#define CPT_LF_Q_SIZE 0x100
+#define CPT_LF_Q_INST_PTR 0x110
#define CPT_LF_Q_GRP_PTR 0x120
#define CPT_LF_CTX_FLUSH 0x510
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index c1ea60bc2630..179433d0a54a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -2512,10 +2512,13 @@ static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
/* Network stack and XDP shared same rx queues.
* Use separate tx queues for XDP and network stack.
*/
- if (pf->xdp_prog)
+ if (pf->xdp_prog) {
pf->hw.xdp_queues = pf->hw.rx_queues;
- else
+ xdp_features_set_redirect_target(dev, false);
+ } else {
pf->hw.xdp_queues = 0;
+ xdp_features_clear_redirect_target(dev);
+ }
pf->hw.tot_tx_queues += pf->hw.xdp_queues;
@@ -2878,6 +2881,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
netdev->netdev_ops = &otx2_netdev_ops;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index cf456d62677f..87fff539d39d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -965,7 +965,7 @@ static int pxa168_init_phy(struct net_device *dev)
if (dev->phydev)
return 0;
- phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+ phy = mdiobus_scan_c22(pep->smi_bus, pep->phy_addr);
if (IS_ERR(phy))
return PTR_ERR(phy);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index e3de9a53b2d9..14be6ea51b88 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -51,6 +51,7 @@ static const struct mtk_reg_map mtk_reg_map = {
.delay_irq = 0x0a0c,
.irq_status = 0x0a20,
.irq_mask = 0x0a28,
+ .adma_rx_dbg0 = 0x0a38,
.int_grp = 0x0a50,
},
.qdma = {
@@ -82,6 +83,8 @@ static const struct mtk_reg_map mtk_reg_map = {
[0] = 0x2800,
[1] = 0x2c00,
},
+ .pse_iq_sta = 0x0110,
+ .pse_oq_sta = 0x0118,
};
static const struct mtk_reg_map mt7628_reg_map = {
@@ -112,6 +115,7 @@ static const struct mtk_reg_map mt7986_reg_map = {
.delay_irq = 0x620c,
.irq_status = 0x6220,
.irq_mask = 0x6228,
+ .adma_rx_dbg0 = 0x6238,
.int_grp = 0x6250,
},
.qdma = {
@@ -143,6 +147,8 @@ static const struct mtk_reg_map mt7986_reg_map = {
[0] = 0x4800,
[1] = 0x4c00,
},
+ .pse_iq_sta = 0x0180,
+ .pse_oq_sta = 0x01a0,
};
/* strings used by ethtool */
@@ -215,8 +221,8 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
return -ETIMEDOUT;
}
-static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
- u32 write_data)
+static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
+ u32 write_data)
{
int ret;
@@ -224,35 +230,13 @@ static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
if (ret < 0)
return ret;
- if (phy_reg & MII_ADDR_C45) {
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C45 |
- PHY_IAC_CMD_C45_ADDR |
- PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
- PHY_IAC_ADDR(phy_addr) |
- PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
- MTK_PHY_IAC);
-
- ret = mtk_mdio_busy_wait(eth);
- if (ret < 0)
- return ret;
-
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C45 |
- PHY_IAC_CMD_WRITE |
- PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
- PHY_IAC_ADDR(phy_addr) |
- PHY_IAC_DATA(write_data),
- MTK_PHY_IAC);
- } else {
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C22 |
- PHY_IAC_CMD_WRITE |
- PHY_IAC_REG(phy_reg) |
- PHY_IAC_ADDR(phy_addr) |
- PHY_IAC_DATA(write_data),
- MTK_PHY_IAC);
- }
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
@@ -261,7 +245,8 @@ static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
return 0;
}
-static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
+static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
+ u32 devad, u32 phy_reg, u32 write_data)
{
int ret;
@@ -269,33 +254,47 @@ static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
if (ret < 0)
return ret;
- if (phy_reg & MII_ADDR_C45) {
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C45 |
- PHY_IAC_CMD_C45_ADDR |
- PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
- PHY_IAC_ADDR(phy_addr) |
- PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
- MTK_PHY_IAC);
-
- ret = mtk_mdio_busy_wait(eth);
- if (ret < 0)
- return ret;
-
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C45 |
- PHY_IAC_CMD_C45_READ |
- PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
- PHY_IAC_ADDR(phy_addr),
- MTK_PHY_IAC);
- } else {
- mtk_w32(eth, PHY_IAC_ACCESS |
- PHY_IAC_START_C22 |
- PHY_IAC_CMD_C22_READ |
- PHY_IAC_REG(phy_reg) |
- PHY_IAC_ADDR(phy_addr),
- MTK_PHY_IAC);
- }
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(phy_reg),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
+{
+ int ret;
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_C22_READ |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
@@ -304,19 +303,70 @@ static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
}
-static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
- int phy_reg, u16 val)
+static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
+ u32 devad, u32 phy_reg)
+{
+ int ret;
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(phy_reg),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_READ |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
+}
+
+static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
+ int phy_reg, u16 val)
{
struct mtk_eth *eth = bus->priv;
- return _mtk_mdio_write(eth, phy_addr, phy_reg, val);
+ return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
}
-static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
+static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
+ int devad, int phy_reg, u16 val)
{
struct mtk_eth *eth = bus->priv;
- return _mtk_mdio_read(eth, phy_addr, phy_reg);
+ return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
+}
+
+static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
+}
+
+static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
+ int phy_reg)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
}
static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
@@ -760,9 +810,10 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
eth->mii_bus->name = "mdio";
- eth->mii_bus->read = mtk_mdio_read;
- eth->mii_bus->write = mtk_mdio_write;
- eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
+ eth->mii_bus->read = mtk_mdio_read_c22;
+ eth->mii_bus->write = mtk_mdio_write_c22;
+ eth->mii_bus->read_c45 = mtk_mdio_read_c45;
+ eth->mii_bus->write_c45 = mtk_mdio_write_c45;
eth->mii_bus->priv = eth;
eth->mii_bus->parent = eth->dev;
@@ -1570,8 +1621,8 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
if (IS_ERR(pp))
return pp;
- err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
- id, PAGE_SIZE);
+ err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
+ eth->rx_napi.napi_id, PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@@ -1870,7 +1921,9 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
while (done < budget) {
unsigned int pktlen, *rxdcsum;
+ bool has_hwaccel_tag = false;
struct net_device *netdev;
+ u16 vlan_proto, vlan_tci;
dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@@ -2010,27 +2063,29 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- if (trxd.rxd3 & RX_DMA_VTAG_V2)
- __vlan_hwaccel_put_tag(skb,
- htons(RX_DMA_VPID(trxd.rxd4)),
- RX_DMA_VID(trxd.rxd4));
+ if (trxd.rxd3 & RX_DMA_VTAG_V2) {
+ vlan_proto = RX_DMA_VPID(trxd.rxd4);
+ vlan_tci = RX_DMA_VID(trxd.rxd4);
+ has_hwaccel_tag = true;
+ }
} else if (trxd.rxd2 & RX_DMA_VTAG) {
- __vlan_hwaccel_put_tag(skb, htons(RX_DMA_VPID(trxd.rxd3)),
- RX_DMA_VID(trxd.rxd3));
+ vlan_proto = RX_DMA_VPID(trxd.rxd3);
+ vlan_tci = RX_DMA_VID(trxd.rxd3);
+ has_hwaccel_tag = true;
}
}
/* When using VLAN untagging in combination with DSA, the
* hardware treats the MTK special tag as a VLAN and untags it.
*/
- if (skb_vlan_tag_present(skb) && netdev_uses_dsa(netdev)) {
- unsigned int port = ntohs(skb->vlan_proto) & GENMASK(2, 0);
+ if (has_hwaccel_tag && netdev_uses_dsa(netdev)) {
+ unsigned int port = vlan_proto & GENMASK(2, 0);
if (port < ARRAY_SIZE(eth->dsa_meta) &&
eth->dsa_meta[port])
skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
-
- __vlan_hwaccel_clear_tag(skb);
+ } else if (has_hwaccel_tag) {
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan_tci);
}
skb_record_rx_queue(skb, 0);
@@ -2984,14 +3039,29 @@ static void mtk_dma_free(struct mtk_eth *eth)
kfree(eth->scratch_head);
}
+static bool mtk_hw_reset_check(struct mtk_eth *eth)
+{
+ u32 val = mtk_r32(eth, MTK_INT_STATUS2);
+
+ return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
+ (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
+ (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
+}
+
static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ if (test_bit(MTK_RESETTING, &eth->state))
+ return;
+
+ if (!mtk_hw_reset_check(eth))
+ return;
+
eth->netdev[mac->id]->stats.tx_errors++;
- netif_err(eth, tx_err, dev,
- "transmit timed out\n");
+ netif_err(eth, tx_err, dev, "transmit timed out\n");
+
schedule_work(&eth->pending_work);
}
@@ -3111,7 +3181,7 @@ static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
val |= config;
- if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0]))
+ if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i]))
val |= MTK_GDMA_SPECIAL_TAG;
mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
@@ -3471,22 +3541,188 @@ static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
}
-static int mtk_hw_init(struct mtk_eth *eth)
+static void mtk_hw_reset(struct mtk_eth *eth)
+{
+ u32 val;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+ val = RSTCTRL_PPE0_V2;
+ } else {
+ val = RSTCTRL_PPE0;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= RSTCTRL_PPE1;
+
+ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
+ 0x3ffffff);
+}
+
+static u32 mtk_hw_reset_read(struct mtk_eth *eth)
+{
+ u32 val;
+
+ regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
+ return val;
+}
+
+static void mtk_hw_warm_reset(struct mtk_eth *eth)
+{
+ u32 rst_mask, val;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
+ RSTCTRL_FE);
+ if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
+ val & RSTCTRL_FE, 1, 1000)) {
+ dev_err(eth->dev, "warm reset failed\n");
+ mtk_hw_reset(eth);
+ return;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
+ else
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ rst_mask |= RSTCTRL_PPE1;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
+
+ udelay(1);
+ val = mtk_hw_reset_read(eth);
+ if (!(val & rst_mask))
+ dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
+ val, rst_mask);
+
+ rst_mask |= RSTCTRL_FE;
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
+
+ udelay(1);
+ val = mtk_hw_reset_read(eth);
+ if (val & rst_mask)
+ dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
+ val, rst_mask);
+}
+
+static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
+{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
+ bool oq_hang, cdm1_busy, adma_busy;
+ bool wtx_busy, cdm_full, oq_free;
+ u32 wdidx, val, gdm1_fc, gdm2_fc;
+ bool qfsm_hang, qfwd_hang;
+ bool ret = false;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return false;
+
+ /* WDMA sanity checks */
+ wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
+
+ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
+ wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
+
+ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
+ cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
+
+ oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
+ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
+ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
+
+ if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
+ if (++eth->reset.wdma_hang_count > 2) {
+ eth->reset.wdma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ /* QDMA sanity checks */
+ qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
+ qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
+
+ gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
+ gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
+ gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
+ gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
+ gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
+ gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
+
+ if (qfsm_hang && qfwd_hang &&
+ ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
+ (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
+ if (++eth->reset.qdma_hang_count > 2) {
+ eth->reset.qdma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ /* ADMA sanity checks */
+ oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
+ cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
+ adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
+ !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
+
+ if (oq_hang && cdm1_busy && adma_busy) {
+ if (++eth->reset.adma_hang_count > 2) {
+ eth->reset.adma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ eth->reset.wdma_hang_count = 0;
+ eth->reset.qdma_hang_count = 0;
+ eth->reset.adma_hang_count = 0;
+out:
+ eth->reset.wdidx = wdidx;
+
+ return ret;
+}
+
+static void mtk_hw_reset_monitor_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
+ reset.monitor_work);
+
+ if (test_bit(MTK_RESETTING, &eth->state))
+ goto out;
+
+ /* DMA stuck checks */
+ if (mtk_hw_check_dma_hang(eth))
+ schedule_work(&eth->pending_work);
+
+out:
+ schedule_delayed_work(&eth->reset.monitor_work,
+ MTK_DMA_MONITOR_TIMEOUT);
+}
+
+static int mtk_hw_init(struct mtk_eth *eth, bool reset)
{
u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
ETHSYS_DMA_AG_MAP_PPE;
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int i, val, ret;
- if (test_and_set_bit(MTK_HW_INIT, &eth->state))
+ if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
return 0;
- pm_runtime_enable(eth->dev);
- pm_runtime_get_sync(eth->dev);
+ if (!reset) {
+ pm_runtime_enable(eth->dev);
+ pm_runtime_get_sync(eth->dev);
- ret = mtk_clk_enable(eth);
- if (ret)
- goto err_disable_pm;
+ ret = mtk_clk_enable(eth);
+ if (ret)
+ goto err_disable_pm;
+ }
if (eth->ethsys)
regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
@@ -3510,22 +3746,14 @@ static int mtk_hw_init(struct mtk_eth *eth)
return 0;
}
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
- val = RSTCTRL_PPE0_V2;
- } else {
- val = RSTCTRL_PPE0;
- }
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
- val |= RSTCTRL_PPE1;
+ msleep(100);
- ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
+ if (reset)
+ mtk_hw_warm_reset(eth);
+ else
+ mtk_hw_reset(eth);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
- regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
- 0x3ffffff);
-
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
@@ -3627,8 +3855,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
return 0;
err_disable_pm:
- pm_runtime_put_sync(eth->dev);
- pm_runtime_disable(eth->dev);
+ if (!reset) {
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+ }
return ret;
}
@@ -3707,52 +3937,86 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EOPNOTSUPP;
}
+static void mtk_prepare_for_reset(struct mtk_eth *eth)
+{
+ u32 val;
+ int i;
+
+ /* disabe FE P3 and P4 */
+ val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= MTK_FE_LINK_DOWN_P4;
+ mtk_w32(eth, val, MTK_FE_GLO_CFG);
+
+ /* adjust PPE configurations to prepare for reset */
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_prepare_reset(eth->ppe[i]);
+
+ /* disable NETSYS interrupts */
+ mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
+
+ /* force link down GMAC */
+ for (i = 0; i < 2; i++) {
+ val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
+ mtk_w32(eth, val, MTK_MAC_MCR(i));
+ }
+}
+
static void mtk_pending_work(struct work_struct *work)
{
struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
- int err, i;
unsigned long restart = 0;
+ u32 val;
+ int i;
rtnl_lock();
-
- dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__);
set_bit(MTK_RESETTING, &eth->state);
+ mtk_prepare_for_reset(eth);
+ mtk_wed_fe_reset();
+ /* Run again reset preliminary configuration in order to avoid any
+ * possible race during FE reset since it can run releasing RTNL lock.
+ */
+ mtk_prepare_for_reset(eth);
+
/* stop all devices to make sure that dma is properly shut down */
for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i])
+ if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
continue;
+
mtk_stop(eth->netdev[i]);
__set_bit(i, &restart);
}
- dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__);
- /* restart underlying hardware such as power, clock, pin mux
- * and the connected phy
- */
- mtk_hw_deinit(eth);
+ usleep_range(15000, 16000);
if (eth->dev->pins)
pinctrl_select_state(eth->dev->pins->p,
eth->dev->pins->default_state);
- mtk_hw_init(eth);
+ mtk_hw_init(eth, true);
/* restart DMA and enable IRQs */
for (i = 0; i < MTK_MAC_COUNT; i++) {
if (!test_bit(i, &restart))
continue;
- err = mtk_open(eth->netdev[i]);
- if (err) {
+
+ if (mtk_open(eth->netdev[i])) {
netif_alert(eth, ifup, eth->netdev[i],
- "Driver up/down cycle failed, closing device.\n");
+ "Driver up/down cycle failed\n");
dev_close(eth->netdev[i]);
}
}
- dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__);
+ /* enabe FE P3 and P4 */
+ val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val &= ~MTK_FE_LINK_DOWN_P4;
+ mtk_w32(eth, val, MTK_FE_GLO_CFG);
clear_bit(MTK_RESETTING, &eth->state);
+ mtk_wed_fe_reset_complete();
+
rtnl_unlock();
}
@@ -3797,6 +4061,7 @@ static int mtk_cleanup(struct mtk_eth *eth)
mtk_unreg_dev(eth);
mtk_free_dev(eth);
cancel_work_sync(&eth->pending_work);
+ cancel_delayed_work_sync(&eth->reset.monitor_work);
return 0;
}
@@ -4186,6 +4451,12 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
register_netdevice_notifier(&mac->device_notifier);
}
+ if (mtk_page_pool_enabled(eth))
+ eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
return 0;
free_netdev:
@@ -4251,6 +4522,7 @@ static int mtk_probe(struct platform_device *pdev)
eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
+ INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
@@ -4364,7 +4636,7 @@ static int mtk_probe(struct platform_device *pdev)
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(&eth->pending_work, mtk_pending_work);
- err = mtk_hw_init(eth);
+ err = mtk_hw_init(eth, false);
if (err)
goto err_wed_exit;
@@ -4453,6 +4725,8 @@ static int mtk_probe(struct platform_device *pdev)
netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
+ schedule_delayed_work(&eth->reset.monitor_work,
+ MTK_DMA_MONITOR_TIMEOUT);
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 18a50529ce7b..afc9d52e79bf 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -77,12 +77,24 @@
#define MTK_HW_LRO_REPLACE_DELTA 1000
#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
+/* Frame Engine Global Configuration */
+#define MTK_FE_GLO_CFG 0x00
+#define MTK_FE_LINK_DOWN_P3 BIT(11)
+#define MTK_FE_LINK_DOWN_P4 BIT(12)
+
/* Frame Engine Global Reset Register */
#define MTK_RST_GL 0x04
#define RST_GL_PSE BIT(0)
/* Frame Engine Interrupt Status Register */
#define MTK_INT_STATUS2 0x08
+#define MTK_FE_INT_ENABLE 0x0c
+#define MTK_FE_INT_FQ_EMPTY BIT(8)
+#define MTK_FE_INT_TSO_FAIL BIT(12)
+#define MTK_FE_INT_TSO_ILLEGAL BIT(13)
+#define MTK_FE_INT_TSO_ALIGN BIT(14)
+#define MTK_FE_INT_RFIFO_OV BIT(18)
+#define MTK_FE_INT_RFIFO_UF BIT(19)
#define MTK_GDM1_AF BIT(28)
#define MTK_GDM2_AF BIT(29)
@@ -272,6 +284,8 @@
#define MTK_RX_DONE_INT_V2 BIT(14)
+#define MTK_CDM_TXFIFO_RDY BIT(7)
+
/* QDMA Interrupt grouping registers */
#define MTK_RLS_DONE_INT BIT(0)
@@ -519,7 +533,7 @@
#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0)
#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1)
#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2)
-#define SGMII_DUPLEX_FULL BIT(4)
+#define SGMII_DUPLEX_HALF BIT(4)
#define SGMII_IF_MODE_BIT5 BIT(5)
#define SGMII_REMOTE_FAULT_DIS BIT(8)
#define SGMII_CODE_SYNC_SET_VAL BIT(9)
@@ -562,6 +576,17 @@
#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
+#define MTK_FE_CDM1_FSM 0x220
+#define MTK_FE_CDM2_FSM 0x224
+#define MTK_FE_CDM3_FSM 0x238
+#define MTK_FE_CDM4_FSM 0x298
+#define MTK_FE_CDM5_FSM 0x318
+#define MTK_FE_CDM6_FSM 0x328
+#define MTK_FE_GDM1_FSM 0x228
+#define MTK_FE_GDM2_FSM 0x22C
+
+#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
+
struct mtk_rx_dma {
unsigned int rxd1;
unsigned int rxd2;
@@ -958,6 +983,7 @@ struct mtk_reg_map {
u32 delay_irq; /* delay interrupt */
u32 irq_status; /* interrupt status */
u32 irq_mask; /* interrupt mask */
+ u32 adma_rx_dbg0;
u32 int_grp;
} pdma;
struct {
@@ -986,6 +1012,8 @@ struct mtk_reg_map {
u32 gdma_to_ppe;
u32 ppe_base;
u32 wdma_base[2];
+ u32 pse_iq_sta;
+ u32 pse_oq_sta;
};
/* struct mtk_eth_data - This is the structure holding all differences
@@ -1028,6 +1056,8 @@ struct mtk_soc_data {
} txrx;
};
+#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
+
/* currently no SoC has more than 2 macs */
#define MTK_MAX_DEVS 2
@@ -1036,11 +1066,13 @@ struct mtk_soc_data {
* @regmap: The register map pointing at the range used to setup
* SGMII modes
* @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap
+ * @interface: Currently configured interface mode
* @pcs: Phylink PCS structure
*/
struct mtk_pcs {
struct regmap *regmap;
u32 ana_rgc3;
+ phy_interface_t interface;
struct phylink_pcs pcs;
};
@@ -1152,6 +1184,14 @@ struct mtk_eth {
struct rhashtable flow_table;
struct bpf_prog __rcu *prog;
+
+ struct {
+ struct delayed_work monitor_work;
+ u32 wdidx;
+ u8 wdma_hang_count;
+ u8 qdma_hang_count;
+ u8 adma_hang_count;
+ } reset;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 269208a841c7..6883eb34cd8b 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -615,8 +615,7 @@ mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
int type;
- flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
- GFP_ATOMIC);
+ flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
if (!flow_info)
return;
@@ -730,6 +729,33 @@ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
}
+int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
+{
+ if (!ppe)
+ return -EINVAL;
+
+ /* disable KA */
+ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
+ ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
+ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
+ usleep_range(10000, 11000);
+
+ /* set KA timer to maximum */
+ ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
+ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
+
+ /* set KA tick select */
+ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
+ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
+ usleep_range(10000, 11000);
+
+ /* disable scan mode */
+ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
+ usleep_range(10000, 11000);
+
+ return mtk_ppe_wait_busy(ppe);
+}
+
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
int version, int index)
{
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index ea64fac1d425..5e8bc48252b1 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -279,7 +279,6 @@ struct mtk_flow_entry {
struct {
struct mtk_flow_entry *base_flow;
struct hlist_node list;
- struct {} end;
} l2_data;
};
struct rhash_head node;
@@ -309,6 +308,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
void mtk_ppe_deinit(struct mtk_eth *eth);
void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
+int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
index 59596d823d8b..0fdb983b0a88 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
@@ -58,6 +58,12 @@
#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
#define MTK_PPE_TB_CFG_INFO_SEL BIT(20)
+#define MTK_PPE_TB_TICK_SEL BIT(24)
+
+#define MTK_PPE_BIND_LMT1 0x230
+#define MTK_PPE_NTU_KEEPALIVE GENMASK(23, 16)
+
+#define MTK_PPE_KEEPALIVE 0x234
enum {
MTK_PPE_SCAN_MODE_DISABLED,
diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c
index 5c286f2c9418..bb00de1003ac 100644
--- a/drivers/net/ethernet/mediatek/mtk_sgmii.c
+++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c
@@ -43,11 +43,6 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
int advertise, link_timer;
bool changed, use_an;
- if (interface == PHY_INTERFACE_MODE_2500BASEX)
- rgc3 = RG_PHY_SPEED_3_125G;
- else
- rgc3 = 0;
-
advertise = phylink_mii_c22_pcs_encode_advertisement(interface,
advertising);
if (advertise < 0)
@@ -88,9 +83,22 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
bmcr = 0;
}
- /* Configure the underlying interface speed */
- regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
- RG_PHY_SPEED_3_125G, rgc3);
+ if (mpcs->interface != interface) {
+ /* PHYA power down */
+ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
+ SGMII_PHYA_PWD, SGMII_PHYA_PWD);
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ rgc3 = RG_PHY_SPEED_3_125G;
+ else
+ rgc3 = 0;
+
+ /* Configure the underlying interface speed */
+ regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3,
+ RG_PHY_SPEED_3_125G, rgc3);
+
+ mpcs->interface = interface;
+ }
/* Update the advertisement, noting whether it has changed */
regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE,
@@ -108,9 +116,17 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1,
SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr);
- /* Release PHYA power down state */
- regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL,
- SGMII_PHYA_PWD, 0);
+ /* Release PHYA power down state
+ * Only removing bit SGMII_PHYA_PWD isn't enough.
+ * There are cases when the SGMII_PHYA_PWD register contains 0x9 which
+ * prevents SGMII from working. The SGMII still shows link but no traffic
+ * can flow. Writing 0x0 to the PHYA_PWD register fix the issue. 0x0 was
+ * taken from a good working state of the SGMII interface.
+ * Unknown how much the QPHY needs but it is racy without a sleep.
+ * Tested on mt7622 & mt7986.
+ */
+ usleep_range(50, 100);
+ regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0);
return changed;
}
@@ -138,11 +154,11 @@ static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
else
sgm_mode = SGMII_SPEED_1000;
- if (duplex == DUPLEX_FULL)
- sgm_mode |= SGMII_DUPLEX_FULL;
+ if (duplex != DUPLEX_FULL)
+ sgm_mode |= SGMII_DUPLEX_HALF;
regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE,
- SGMII_DUPLEX_FULL | SGMII_SPEED_MASK,
+ SGMII_DUPLEX_HALF | SGMII_SPEED_MASK,
sgm_mode);
}
}
@@ -171,6 +187,8 @@ int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3)
return PTR_ERR(ss->pcs[i].regmap);
ss->pcs[i].pcs.ops = &mtk_pcs_ops;
+ ss->pcs[i].pcs.poll = true;
+ ss->pcs[i].interface = PHY_INTERFACE_MODE_NA;
}
return 0;
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 7050351250b7..02c03325911f 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1378,9 +1378,6 @@ static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
unsigned int val, data;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
mtk_star_mdio_rwok_clear(priv);
val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
@@ -1407,9 +1404,6 @@ static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
struct mtk_star_priv *priv = mii->priv;
unsigned int val;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
mtk_star_mdio_rwok_clear(priv);
val = data;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index a6271449617f..95d890870984 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -206,6 +206,48 @@ mtk_wed_wo_reset(struct mtk_wed_device *dev)
iounmap(reg);
}
+void mtk_wed_fe_reset(void)
+{
+ int i;
+
+ mutex_lock(&hw_lock);
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw = hw_list[i];
+ struct mtk_wed_device *dev = hw->wed_dev;
+ int err;
+
+ if (!dev || !dev->wlan.reset)
+ continue;
+
+ /* reset callback blocks until WLAN reset is completed */
+ err = dev->wlan.reset(dev);
+ if (err)
+ dev_err(dev->dev, "wlan reset failed: %d\n", err);
+ }
+
+ mutex_unlock(&hw_lock);
+}
+
+void mtk_wed_fe_reset_complete(void)
+{
+ int i;
+
+ mutex_lock(&hw_lock);
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw = hw_list[i];
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (!dev || !dev->wlan.reset_complete)
+ continue;
+
+ dev->wlan.reset_complete(dev);
+ }
+
+ mutex_unlock(&hw_lock);
+}
+
static struct mtk_wed_hw *
mtk_wed_assign(struct mtk_wed_device *dev)
{
@@ -745,7 +787,6 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
ring->desc_size = sizeof(*ring->desc);
ring->size = size;
- memset(ring->desc, 0, size);
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
index e012b8a82133..43ab77eaf683 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -128,6 +128,8 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
void mtk_wed_exit(void);
int mtk_wed_flow_add(int index);
void mtk_wed_flow_remove(int index);
+void mtk_wed_fe_reset(void);
+void mtk_wed_fe_reset_complete(void);
#else
static inline void
mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
@@ -147,6 +149,13 @@ static inline void mtk_wed_flow_remove(int index)
{
}
+static inline void mtk_wed_fe_reset(void)
+{
+}
+
+static inline void mtk_wed_fe_reset_complete(void)
+{
+}
#endif
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index a0a39643caf7..69fba29055e9 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -138,7 +138,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
int n_buf = 0;
- spin_lock_bh(&q->lock);
while (q->queued < q->n_desc) {
struct mtk_wed_wo_queue_entry *entry;
dma_addr_t addr;
@@ -172,7 +171,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
q->queued++;
n_buf++;
}
- spin_unlock_bh(&q->lock);
return n_buf;
}
@@ -260,7 +258,6 @@ mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
int n_desc, int buf_size, int index,
struct mtk_wed_wo_queue_regs *regs)
{
- spin_lock_init(&q->lock);
q->regs = *regs;
q->n_desc = n_desc;
q->buf_size = buf_size;
@@ -292,7 +289,6 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
struct page *page;
int i;
- spin_lock_bh(&q->lock);
for (i = 0; i < q->n_desc; i++) {
struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
@@ -301,7 +297,6 @@ mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(entry->buf);
entry->buf = NULL;
}
- spin_unlock_bh(&q->lock);
if (!q->cache.va)
return;
@@ -316,7 +311,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
{
struct page *page;
- spin_lock_bh(&q->lock);
for (;;) {
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
@@ -325,7 +319,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
skb_free_frag(buf);
}
- spin_unlock_bh(&q->lock);
if (!q->cache.va)
return;
@@ -351,8 +344,6 @@ int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
int ret = 0, index;
u32 ctrl;
- spin_lock_bh(&q->lock);
-
q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
index = (q->head + 1) % q->n_desc;
if (q->tail == index) {
@@ -383,8 +374,6 @@ int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
mtk_wed_wo_queue_kick(wo, q, q->head);
mtk_wed_wo_kickout(wo);
out:
- spin_unlock_bh(&q->lock);
-
dev_kfree_skb(skb);
return ret;
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
index c8fb85795864..dbcf42ce9173 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
@@ -211,7 +211,6 @@ struct mtk_wed_wo_queue {
struct mtk_wed_wo_queue_regs regs;
struct page_frag_cache cache;
- spinlock_t lock;
struct mtk_wed_wo_queue_desc *desc;
dma_addr_t desc_dma;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 98b5ffb4d729..9e3b76182088 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -58,9 +58,7 @@ u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
return hi | lo;
}
-void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
- struct skb_shared_hwtstamps *hwts,
- u64 timestamp)
+u64 mlx4_en_get_hwtstamp(struct mlx4_en_dev *mdev, u64 timestamp)
{
unsigned int seq;
u64 nsec;
@@ -70,8 +68,15 @@ void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
nsec = timecounter_cyc2time(&mdev->clock, timestamp);
} while (read_seqretry(&mdev->clock_lock, seq));
+ return ns_to_ktime(nsec);
+}
+
+void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
+ struct skb_shared_hwtstamps *hwts,
+ u64 timestamp)
+{
memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
- hwts->hwtstamp = ns_to_ktime(nsec);
+ hwts->hwtstamp = mlx4_en_get_hwtstamp(mdev, timestamp);
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 8800d3f1f55c..e11bc0ac880e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2889,6 +2889,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_bpf = mlx4_xdp,
};
+static const struct xdp_metadata_ops mlx4_xdp_metadata_ops = {
+ .xmo_rx_timestamp = mlx4_en_xdp_rx_timestamp,
+ .xmo_rx_hash = mlx4_en_xdp_rx_hash,
+};
+
struct mlx4_en_bond {
struct work_struct work;
struct mlx4_en_priv *priv;
@@ -3310,6 +3315,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->netdev_ops = &mlx4_netdev_ops_master;
else
dev->netdev_ops = &mlx4_netdev_ops;
+ dev->xdp_metadata_ops = &mlx4_xdp_metadata_ops;
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -3410,6 +3416,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
+
/* MTU range: 68 - hw-specific max */
dev->min_mtu = ETH_MIN_MTU;
dev->max_mtu = priv->max_mtu;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 8f762fc170b3..0869d4fff17b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -661,9 +661,41 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
#endif
+struct mlx4_en_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx4_cqe *cqe;
+ struct mlx4_en_dev *mdev;
+ struct mlx4_en_rx_ring *ring;
+ struct net_device *dev;
+};
+
+int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(_ctx->ring->hwtstamp_rx_filter != HWTSTAMP_FILTER_ALL))
+ return -EOPNOTSUPP;
+
+ *timestamp = mlx4_en_get_hwtstamp(_ctx->mdev,
+ mlx4_en_get_cqe_ts(_ctx->cqe));
+ return 0;
+}
+
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+{
+ struct mlx4_en_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(!(_ctx->dev->features & NETIF_F_RXHASH)))
+ return -EOPNOTSUPP;
+
+ *hash = be32_to_cpu(_ctx->cqe->immed_rss_invalid);
+ return 0;
+}
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_xdp_buff mxbuf = {};
int factor = priv->cqe_factor;
struct mlx4_en_rx_ring *ring;
struct bpf_prog *xdp_prog;
@@ -671,7 +703,6 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
bool doorbell_pending;
bool xdp_redir_flush;
struct mlx4_cqe *cqe;
- struct xdp_buff xdp;
int polled = 0;
int index;
@@ -681,7 +712,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ring = priv->rx_ring[cq_ring];
xdp_prog = rcu_dereference_bh(ring->xdp_prog);
- xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
+ xdp_init_buff(&mxbuf.xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
xdp_redir_flush = false;
@@ -776,24 +807,28 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
priv->frag_info[0].frag_size,
DMA_FROM_DEVICE);
- xdp_prepare_buff(&xdp, va - frags[0].page_offset,
- frags[0].page_offset, length, false);
- orig_data = xdp.data;
-
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
-
- length = xdp.data_end - xdp.data;
- if (xdp.data != orig_data) {
- frags[0].page_offset = xdp.data -
- xdp.data_hard_start;
- va = xdp.data;
+ xdp_prepare_buff(&mxbuf.xdp, va - frags[0].page_offset,
+ frags[0].page_offset, length, true);
+ orig_data = mxbuf.xdp.data;
+ mxbuf.cqe = cqe;
+ mxbuf.mdev = priv->mdev;
+ mxbuf.ring = ring;
+ mxbuf.dev = dev;
+
+ act = bpf_prog_run_xdp(xdp_prog, &mxbuf.xdp);
+
+ length = mxbuf.xdp.data_end - mxbuf.xdp.data;
+ if (mxbuf.xdp.data != orig_data) {
+ frags[0].page_offset = mxbuf.xdp.data -
+ mxbuf.xdp.data_hard_start;
+ va = mxbuf.xdp.data;
}
switch (act) {
case XDP_PASS:
break;
case XDP_REDIRECT:
- if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) {
+ if (likely(!xdp_do_redirect(dev, &mxbuf.xdp, xdp_prog))) {
ring->xdp_redirect++;
xdp_redir_flush = true;
frags[0].page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c5758637b7be..2f79378fbf6e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -699,32 +699,32 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
} else {
inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
- memset(((void *)(inl + 1)) + skb->len, 0,
+ memset(inl->data + skb->len, 0,
MIN_PKT_LEN - skb->len);
}
- skb_copy_from_linear_data(skb, inl + 1, hlen);
+ skb_copy_from_linear_data(skb, inl->data, hlen);
if (shinfo->nr_frags)
- memcpy(((void *)(inl + 1)) + hlen, fragptr,
+ memcpy(inl->data + hlen, fragptr,
skb_frag_size(&shinfo->frags[0]));
} else {
inl->byte_count = cpu_to_be32(1 << 31 | spc);
if (hlen <= spc) {
- skb_copy_from_linear_data(skb, inl + 1, hlen);
+ skb_copy_from_linear_data(skb, inl->data, hlen);
if (hlen < spc) {
- memcpy(((void *)(inl + 1)) + hlen,
+ memcpy(inl->data + hlen,
fragptr, spc - hlen);
fragptr += spc - hlen;
}
- inl = (void *) (inl + 1) + spc;
- memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
+ inl = (void *)inl->data + spc;
+ memcpy(inl->data, fragptr, skb->len - spc);
} else {
- skb_copy_from_linear_data(skb, inl + 1, spc);
- inl = (void *) (inl + 1) + spc;
- skb_copy_from_linear_data_offset(skb, spc, inl + 1,
+ skb_copy_from_linear_data(skb, inl->data, spc);
+ inl = (void *)inl->data + spc;
+ skb_copy_from_linear_data_offset(skb, spc, inl->data,
hlen - spc);
if (shinfo->nr_frags)
- memcpy(((void *)(inl + 1)) + hlen - spc,
+ memcpy(inl->data + hlen - spc,
fragptr,
skb_frag_size(&shinfo->frags[0]));
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 3ae246391549..277738c50c56 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -265,29 +265,29 @@ static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
union devlink_param_value value;
value.vbool = !!mlx4_internal_err_reset;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ value);
value.vu32 = 1UL << log_num_mac;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
value.vbool = enable_64b_cqe_eqe;
- devlink_param_driverinit_value_set(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
- value);
+ devl_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ value);
value.vbool = enable_4k_uar;
- devlink_param_driverinit_value_set(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
- value);
+ devl_param_driverinit_value_set(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ value);
value.vbool = false;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ value);
}
static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
@@ -3910,37 +3910,37 @@ static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
union devlink_param_value saved_value;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ &saved_value);
if (!err && mlx4_internal_err_reset != saved_value.vbool) {
mlx4_internal_err_reset = saved_value.vbool;
/* Notify on value changed on runtime configuration mode */
- devlink_param_value_changed(devlink,
- DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
+ devl_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
}
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
if (!err)
log_num_mac = order_base_2(saved_value.vu32);
- err = devlink_param_driverinit_value_get(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
+ &saved_value);
if (!err)
enable_64b_cqe_eqe = saved_value.vbool;
- err = devlink_param_driverinit_value_get(devlink,
- MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
+ &saved_value);
if (!err)
enable_4k_uar = saved_value.vbool;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+ &saved_value);
if (!err && crdump->snapshot_enable != saved_value.vbool) {
crdump->snapshot_enable = saved_value.vbool;
- devlink_param_value_changed(devlink,
- DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
+ devl_param_value_changed(devlink,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
}
}
@@ -4021,8 +4021,8 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&dev->persist->interface_state_mutex);
mutex_init(&dev->persist->pci_status_mutex);
- ret = devlink_params_register(devlink, mlx4_devlink_params,
- ARRAY_SIZE(mlx4_devlink_params));
+ ret = devl_params_register(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
if (ret)
goto err_devlink_unregister;
mlx4_devlink_set_params_init_values(devlink);
@@ -4031,14 +4031,13 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_params_unregister;
pci_save_state(pdev);
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devl_unlock(devlink);
devlink_register(devlink);
return 0;
err_params_unregister:
- devlink_params_unregister(devlink, mlx4_devlink_params,
- ARRAY_SIZE(mlx4_devlink_params));
+ devl_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
kfree(dev->persist);
err_devlink_free:
@@ -4181,8 +4180,8 @@ static void mlx4_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
mlx4_pci_disable_device(dev);
- devlink_params_unregister(devlink, mlx4_devlink_params,
- ARRAY_SIZE(mlx4_devlink_params));
+ devl_params_unregister(devlink, mlx4_devlink_params,
+ ARRAY_SIZE(mlx4_devlink_params));
kfree(dev->persist);
devl_unlock(devlink);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3d4226ddba5e..544e09b97483 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -796,10 +796,15 @@ void mlx4_en_update_pfc_stats_bitmap(struct mlx4_dev *dev,
int mlx4_en_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr);
+struct xdp_md;
+int mlx4_en_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp);
+int mlx4_en_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash);
+
/*
* Functions for time stamping
*/
u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
+u64 mlx4_en_get_hwtstamp(struct mlx4_en_dev *mdev, u64 timestamp);
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
struct skb_shared_hwtstamps *hwts,
u64 timestamp);
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 48cfaa7eaf50..913ed255990f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -46,6 +46,13 @@
#define MLX4_BF_QP_SKIP_MASK 0xc0
#define MLX4_MAX_BF_QP_RANGE 0x40
+void mlx4_put_qp(struct mlx4_qp *qp)
+{
+ if (refcount_dec_and_test(&qp->refcount))
+ complete(&qp->free);
+}
+EXPORT_SYMBOL_GPL(mlx4_put_qp);
+
void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
@@ -64,10 +71,8 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
return;
}
+ /* Need to call mlx4_put_qp() in event handler */
qp->event(qp, event_type);
-
- if (refcount_dec_and_test(&qp->refcount))
- complete(&qp->free);
}
/* used for INIT/CLOSE port logic */
@@ -523,8 +528,7 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
- if (refcount_dec_and_test(&qp->refcount))
- complete(&qp->free);
+ mlx4_put_qp(qp);
wait_for_completion(&qp->free);
mlx4_qp_free_icm(dev, qp->qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 26685fd0fdaa..bb1d7b039a7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -85,7 +85,7 @@ config MLX5_BRIDGE
config MLX5_CLS_ACT
bool "MLX5 TC classifier action support"
- depends on MLX5_ESWITCH && NET_CLS_ACT
+ depends on MLX5_ESWITCH && NET_CLS_ACT && NET_TC_SKB_EXT
default y
help
mlx5 ConnectX offloads support for TC classifier action (NET_CLS_ACT),
@@ -100,7 +100,7 @@ config MLX5_CLS_ACT
config MLX5_TC_CT
bool "MLX5 TC connection tracking offload support"
- depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT
+ depends on MLX5_CLS_ACT && NF_FLOW_TABLE && NET_ACT_CT
default y
help
Say Y here if you want to support offloading connection tracking rules
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index cd4a1ab0ea78..8d4e25cc54ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -47,7 +47,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
en/tc/post_act.o en/tc/int_port.o en/tc/meter.o \
- en/tc/post_meter.o
+ en/tc/post_meter.o en/tc/act_stats.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en/tc/act/act.o en/tc/act/drop.o en/tc/act/trap.o \
en/tc/act/accept.o en/tc/act/mark.o en/tc/act/goto.o \
@@ -97,7 +97,7 @@ mlx5_core-$(CONFIG_MLX5_EN_MACSEC) += en_accel/macsec.o en_accel/macsec_fs.o \
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o en_accel/ipsec_fs.o \
- en_accel/ipsec_offload.o
+ en_accel/ipsec_offload.o lib/ipsec_fs_roce.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index c837103a9ee3..b00e33ed05e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -47,6 +47,25 @@
#define CREATE_TRACE_POINTS
#include "diag/cmd_tracepoint.h"
+struct mlx5_ifc_mbox_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_mbox_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
enum {
CMD_IF_REV = 5,
};
@@ -70,6 +89,27 @@ enum {
MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
};
+static u16 in_to_opcode(void *in)
+{
+ return MLX5_GET(mbox_in, in, opcode);
+}
+
+/* Returns true for opcodes that might be triggered very frequently and throttle
+ * the command interface. Limit their command slots usage.
+ */
+static bool mlx5_cmd_is_throttle_opcode(u16 op)
+{
+ switch (op) {
+ case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
+ case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
+ case MLX5_CMD_OP_SYNC_CRYPTO:
+ return true;
+ }
+ return false;
+}
+
static struct mlx5_cmd_work_ent *
cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
@@ -91,6 +131,7 @@ cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in,
ent->context = context;
ent->cmd = cmd;
ent->page_queue = page_queue;
+ ent->op = in_to_opcode(in->first.data);
refcount_set(&ent->refcnt, 1);
return ent;
@@ -483,6 +524,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VHCA_MIGRATION_STATE:
case MLX5_CMD_OP_SAVE_VHCA_STATE:
case MLX5_CMD_OP_LOAD_VHCA_STATE:
+ case MLX5_CMD_OP_SYNC_CRYPTO:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -ENOLINK;
@@ -685,6 +727,7 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(QUERY_VHCA_MIGRATION_STATE);
MLX5_COMMAND_STR_CASE(SAVE_VHCA_STATE);
MLX5_COMMAND_STR_CASE(LOAD_VHCA_STATE);
+ MLX5_COMMAND_STR_CASE(SYNC_CRYPTO);
default: return "unknown command opcode";
}
}
@@ -752,25 +795,6 @@ static int cmd_status_to_err(u8 status)
}
}
-struct mlx5_ifc_mbox_out_bits {
- u8 status[0x8];
- u8 reserved_at_8[0x18];
-
- u8 syndrome[0x20];
-
- u8 reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_mbox_in_bits {
- u8 opcode[0x10];
- u8 uid[0x10];
-
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
-
- u8 reserved_at_40[0x40];
-};
-
void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out)
{
u32 syndrome = MLX5_GET(mbox_out, out, syndrome);
@@ -788,11 +812,12 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
u16 opcode, op_mod;
u16 uid;
- opcode = MLX5_GET(mbox_in, in, opcode);
+ opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
- if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY)
+ if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
+ opcode != MLX5_CMD_OP_CREATE_UCTX)
mlx5_cmd_out_err(dev, opcode, op_mod, out);
}
@@ -800,7 +825,7 @@ int mlx5_cmd_check(struct mlx5_core_dev *dev, int err, void *in, void *out)
{
/* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */
if (err == -ENXIO) {
- u16 opcode = MLX5_GET(mbox_in, in, opcode);
+ u16 opcode = in_to_opcode(in);
u32 syndrome;
u8 status;
@@ -829,9 +854,9 @@ static void dump_command(struct mlx5_core_dev *dev,
struct mlx5_cmd_work_ent *ent, int input)
{
struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
- u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode);
struct mlx5_cmd_mailbox *next = msg->next;
int n = mlx5_calc_cmd_blocks(msg);
+ u16 op = ent->op;
int data_only;
u32 offset = 0;
int dump_len;
@@ -883,11 +908,6 @@ static void dump_command(struct mlx5_core_dev *dev,
mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx);
}
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
- return MLX5_GET(mbox_in, in->first.data, opcode);
-}
-
static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
static void cb_timeout_handler(struct work_struct *work)
@@ -905,13 +925,13 @@ static void cb_timeout_handler(struct work_struct *work)
/* Maybe got handled by eq recover ? */
if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) {
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx,
- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
goto out; /* phew, already handled */
}
ent->ret = -ETIMEDOUT;
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
- ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ ent->idx, mlx5_command_str(ent->op), ent->op);
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
out:
@@ -985,7 +1005,6 @@ static void cmd_work_handler(struct work_struct *work)
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
- ent->op = be32_to_cpu(lay->in[0]) >> 16;
if (ent->in->next)
lay->in_ptr = cpu_to_be64(ent->in->next->dma);
lay->inlen = cpu_to_be32(ent->in->len);
@@ -1098,12 +1117,12 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
*/
if (wait_for_completion_timeout(&ent->done, timeout)) {
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx,
- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
return;
}
mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx,
- mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
ent->ret = -ETIMEDOUT;
mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true);
@@ -1130,12 +1149,10 @@ out_err:
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
- mlx5_command_str(msg_to_opcode(ent->in)),
- msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
} else if (err == -ECANCELED) {
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
- mlx5_command_str(msg_to_opcode(ent->in)),
- msg_to_opcode(ent->in));
+ mlx5_command_str(ent->op), ent->op);
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@@ -1169,7 +1186,6 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
u8 status = 0;
int err = 0;
s64 ds;
- u16 op;
if (callback && page_queue)
return -EINVAL;
@@ -1209,9 +1225,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out_free;
ds = ent->ts2 - ent->ts1;
- op = MLX5_GET(mbox_in, in->first.data, opcode);
- if (op < MLX5_CMD_OP_MAX) {
- stats = &cmd->stats[op];
+ if (ent->op < MLX5_CMD_OP_MAX) {
+ stats = &cmd->stats[ent->op];
spin_lock_irq(&stats->lock);
stats->sum += ds;
++stats->n;
@@ -1219,7 +1234,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
}
mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
"fw exec time for %s is %lld nsec\n",
- mlx5_command_str(op), ds);
+ mlx5_command_str(ent->op), ds);
out_free:
status = ent->status;
@@ -1816,7 +1831,7 @@ cache_miss:
static int is_manage_pages(void *in)
{
- return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES;
+ return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES;
}
/* Notes:
@@ -1827,8 +1842,9 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int out_size, mlx5_cmd_cbk_t callback, void *context,
bool force_polling)
{
- u16 opcode = MLX5_GET(mbox_in, in, opcode);
struct mlx5_cmd_msg *inb, *outb;
+ u16 opcode = in_to_opcode(in);
+ bool throttle_op;
int pages_queue;
gfp_t gfp;
u8 token;
@@ -1837,13 +1853,21 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode))
return -ENXIO;
+ throttle_op = mlx5_cmd_is_throttle_opcode(opcode);
+ if (throttle_op) {
+ /* atomic context may not sleep */
+ if (callback)
+ return -EINVAL;
+ down(&dev->cmd.throttle_sem);
+ }
+
pages_queue = is_manage_pages(in);
gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
inb = alloc_msg(dev, in_size, gfp);
if (IS_ERR(inb)) {
err = PTR_ERR(inb);
- return err;
+ goto out_up;
}
token = alloc_token(&dev->cmd);
@@ -1877,6 +1901,9 @@ out_out:
mlx5_free_cmd_msg(dev, outb);
out_in:
free_msg(dev, inb);
+out_up:
+ if (throttle_op)
+ up(&dev->cmd.throttle_sem);
return err;
}
@@ -1950,8 +1977,8 @@ static int cmd_status_err(struct mlx5_core_dev *dev, int err, u16 opcode, u16 op
int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size)
{
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false);
- u16 opcode = MLX5_GET(mbox_in, in, opcode);
u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
+ u16 opcode = in_to_opcode(in);
return cmd_status_err(dev, err, opcode, op_mod, out);
}
@@ -1996,8 +2023,8 @@ int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
void *out, int out_size)
{
int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true);
- u16 opcode = MLX5_GET(mbox_in, in, opcode);
u16 op_mod = MLX5_GET(mbox_in, in, op_mod);
+ u16 opcode = in_to_opcode(in);
err = cmd_status_err(dev, err, opcode, op_mod, out);
return mlx5_cmd_check(dev, err, in, out);
@@ -2049,7 +2076,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
work->ctx = ctx;
work->user_callback = callback;
- work->opcode = MLX5_GET(mbox_in, in, opcode);
+ work->opcode = in_to_opcode(in);
work->op_mod = MLX5_GET(mbox_in, in, op_mod);
work->out = out;
if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight)))
@@ -2220,6 +2247,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
sema_init(&cmd->sem, cmd->max_reg_cmds);
sema_init(&cmd->pages_sem, 1);
+ sema_init(&cmd->throttle_sem, DIV_ROUND_UP(cmd->max_reg_cmds, 2));
cmd_h = (u32)((u64)(cmd->dma) >> 32);
cmd_l = (u32)(cmd->dma);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 3e232a65a0c3..bb95b40d25eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -245,8 +245,9 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
pages = dev->priv.dbg.pages_debugfs;
debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
- debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.vfs_pages);
- debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.host_pf_pages);
+ debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
+ debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
+ debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
debugfs_create_u32("fw_pages_give_dropped", 0400, pages, &dev->priv.give_pages_dropped);
debugfs_create_u32("fw_pages_reclaim_discard", 0400, pages,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 0571e40c6ee5..445fe30c3d0b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -59,6 +59,9 @@ bool mlx5_eth_supported(struct mlx5_core_dev *dev)
if (!IS_ENABLED(CONFIG_MLX5_CORE_EN))
return false;
+ if (mlx5_core_is_management_pf(dev))
+ return false;
+
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return false;
@@ -111,9 +114,9 @@ static bool is_eth_enabled(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
- &val);
+ err = devl_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ &val);
return err ? false : val.vbool;
}
@@ -144,9 +147,9 @@ static bool is_vnet_enabled(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
- DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
- &val);
+ err = devl_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ &val);
return err ? false : val.vbool;
}
@@ -198,6 +201,9 @@ bool mlx5_rdma_supported(struct mlx5_core_dev *dev)
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return false;
+ if (mlx5_core_is_management_pf(dev))
+ return false;
+
if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
return false;
@@ -215,9 +221,9 @@ static bool is_ib_enabled(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(priv_to_devlink(dev),
- DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
- &val);
+ err = devl_param_driverinit_value_get(priv_to_devlink(dev),
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ &val);
return err ? false : val.vbool;
}
@@ -343,7 +349,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
- priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
if (!priv->adev[i]) {
bool is_supported = false;
@@ -372,10 +377,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
/* Pay attention that this is not PCI driver that
* mlx5_core_dev is connected, but auxiliary driver.
- *
- * Here we can race of module unload with devlink
- * reload, but we don't need to take extra lock because
- * we are holding global mlx5_intf_mutex.
*/
if (!adev->dev.driver)
continue;
@@ -391,12 +392,11 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
break;
}
}
- priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
mutex_unlock(&mlx5_intf_mutex);
return ret;
}
-void mlx5_detach_device(struct mlx5_core_dev *dev)
+void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
{
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
@@ -406,7 +406,6 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&mlx5_intf_mutex);
- priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
for (i = ARRAY_SIZE(mlx5_adev_devices) - 1; i >= 0; i--) {
if (!priv->adev[i])
continue;
@@ -426,7 +425,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
adrv = to_auxiliary_drv(adev->dev.driver);
- if (adrv->suspend) {
+ if (adrv->suspend && suspend) {
adrv->suspend(adev, pm);
continue;
}
@@ -435,7 +434,6 @@ skip_suspend:
del_adev(&priv->adev[i]->adev);
priv->adev[i] = NULL;
}
- priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
priv->flags |= MLX5_PRIV_FLAGS_DETACH;
mutex_unlock(&mlx5_intf_mutex);
}
@@ -534,22 +532,16 @@ del_adev:
int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
- int err = 0;
lockdep_assert_held(&mlx5_intf_mutex);
if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
return 0;
- priv->flags |= MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
delete_drivers(dev);
if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
- goto out;
-
- err = add_drivers(dev);
+ return 0;
-out:
- priv->flags &= ~MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW;
- return err;
+ return add_drivers(dev);
}
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 5bd83c0275f8..c5d2fdcabd56 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -7,6 +7,7 @@
#include "fw_reset.h"
#include "fs_core.h"
#include "eswitch.h"
+#include "lag/lag.h"
#include "esw/qos.h"
#include "sf/dev/dev.h"
#include "sf/sf.h"
@@ -104,7 +105,7 @@ static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netli
if (err)
return err;
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, true);
err = mlx5_health_wait_pci_up(dev);
if (err)
NL_SET_ERR_MSG_MOD(extack, "FW activate aborted, PCI reads fail after reset");
@@ -156,13 +157,18 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (mlx5_core_is_mp_slave(dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "reload is unsupported for multi port slave");
+ return -EOPNOTSUPP;
+ }
+
if (pci_num_vf(pdev)) {
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
}
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, false);
break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
@@ -263,9 +269,10 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_devlink_trap_event_ctx trap_event_ctx;
enum devlink_trap_action action_orig;
struct mlx5_devlink_trap *dl_trap;
- int err = 0;
+ int err;
if (is_mdev_switchdev_mode(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Devlink traps can't be set in switchdev mode");
@@ -275,26 +282,25 @@ static int mlx5_devlink_trap_action_set(struct devlink *devlink,
dl_trap = mlx5_find_trap_by_id(dev, trap->id);
if (!dl_trap) {
mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
- if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) {
- err = -EOPNOTSUPP;
- goto out;
- }
+ if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP)
+ return -EOPNOTSUPP;
if (action == dl_trap->trap.action)
- goto out;
+ return 0;
action_orig = dl_trap->trap.action;
dl_trap->trap.action = action;
+ trap_event_ctx.trap = &dl_trap->trap;
+ trap_event_ctx.err = 0;
err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP,
- &dl_trap->trap);
- if (err)
+ &trap_event_ctx);
+ if (err == NOTIFY_BAD)
dl_trap->trap.action = action_orig;
-out:
- return err;
+
+ return trap_event_ctx.err;
}
static const struct devlink_ops mlx5_devlink_ops = {
@@ -396,70 +402,6 @@ void mlx5_devlink_free(struct devlink *devlink)
devlink_free(devlink);
}
-static int mlx5_devlink_fs_mode_validate(struct devlink *devlink, u32 id,
- union devlink_param_value val,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- char *value = val.vstr;
- int err = 0;
-
- if (!strcmp(value, "dmfs")) {
- return 0;
- } else if (!strcmp(value, "smfs")) {
- u8 eswitch_mode;
- bool smfs_cap;
-
- eswitch_mode = mlx5_eswitch_mode(dev);
- smfs_cap = mlx5_fs_dr_is_supported(dev);
-
- if (!smfs_cap) {
- err = -EOPNOTSUPP;
- NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported by current device");
- }
-
- else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
- NL_SET_ERR_MSG_MOD(extack,
- "Software managed steering is not supported when eswitch offloads enabled.");
- err = -EOPNOTSUPP;
- }
- } else {
- NL_SET_ERR_MSG_MOD(extack,
- "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
- err = -EINVAL;
- }
-
- return err;
-}
-
-static int mlx5_devlink_fs_mode_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- enum mlx5_flow_steering_mode mode;
-
- if (!strcmp(ctx->val.vstr, "smfs"))
- mode = MLX5_FLOW_STEERING_MODE_SMFS;
- else
- mode = MLX5_FLOW_STEERING_MODE_DMFS;
- dev->priv.steering->mode = mode;
-
- return 0;
-}
-
-static int mlx5_devlink_fs_mode_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
- if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
- strcpy(ctx->val.vstr, "smfs");
- else
- strcpy(ctx->val.vstr, "dmfs");
- return 0;
-}
-
static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack)
@@ -496,68 +438,54 @@ static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id
return 0;
}
-static int mlx5_devlink_esw_port_metadata_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!MLX5_ESWITCH_MANAGER(dev))
return -EOPNOTSUPP;
- return mlx5_esw_offloads_vport_metadata_set(dev->priv.eswitch, ctx->val.vbool);
+ if (ctx->val.vbool)
+ return mlx5_lag_mpesw_enable(dev);
+
+ mlx5_lag_mpesw_disable(dev);
+ return 0;
}
-static int mlx5_devlink_esw_port_metadata_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!MLX5_ESWITCH_MANAGER(dev))
return -EOPNOTSUPP;
- ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
+ ctx->val.vbool = mlx5_lag_is_mpesw(dev);
return 0;
}
-static int mlx5_devlink_esw_port_metadata_validate(struct devlink *devlink, u32 id,
- union devlink_param_value val,
- struct netlink_ext_ack *extack)
+static int mlx5_devlink_esw_multiport_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- u8 esw_mode;
if (!MLX5_ESWITCH_MANAGER(dev)) {
NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported");
return -EOPNOTSUPP;
}
- esw_mode = mlx5_eswitch_mode(dev);
- if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
+
+ if (mlx5_eswitch_mode(dev) != MLX5_ESWITCH_OFFLOADS) {
NL_SET_ERR_MSG_MOD(extack,
- "E-Switch must either disabled or non switchdev mode");
+ "E-Switch must be in switchdev mode");
return -EBUSY;
}
- return 0;
-}
-
-#endif
-
-static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
- mlx5_fw_reset_enable_remote_dev_reset_set(dev, ctx->val.vbool);
return 0;
}
-static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
- ctx->val.vbool = mlx5_fw_reset_enable_remote_dev_reset_get(dev);
- return 0;
-}
+#endif
static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
union devlink_param_value val,
@@ -567,11 +495,6 @@ static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
}
static const struct devlink_param mlx5_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
- "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
- mlx5_devlink_fs_mode_validate),
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_enable_roce_validate),
#ifdef CONFIG_MLX5_ESWITCH
@@ -580,16 +503,13 @@ static const struct devlink_param mlx5_devlink_params[] = {
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL,
mlx5_devlink_large_group_num_validate),
- DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
- "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
+ "esw_multiport", DEVLINK_PARAM_TYPE_BOOL,
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlx5_devlink_esw_port_metadata_get,
- mlx5_devlink_esw_port_metadata_set,
- mlx5_devlink_esw_port_metadata_validate),
+ mlx5_devlink_esw_multiport_get,
+ mlx5_devlink_esw_multiport_set,
+ mlx5_devlink_esw_multiport_validate),
#endif
- DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlx5_devlink_enable_remote_dev_reset_get,
- mlx5_devlink_enable_remote_dev_reset_set, NULL),
DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_eq_depth_validate),
DEVLINK_PARAM_GENERIC(EVENT_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
@@ -602,33 +522,34 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
union devlink_param_value value;
value.vbool = MLX5_CAP_GEN(dev, roce);
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ value);
#ifdef CONFIG_MLX5_ESWITCH
value.vu32 = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
- devlink_param_driverinit_value_set(devlink,
- MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
- value);
+ devl_param_driverinit_value_set(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ value);
#endif
value.vu32 = MLX5_COMP_EQ_SIZE;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ value);
value.vu32 = MLX5_NUM_ASYNC_EQE;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ value);
}
-static const struct devlink_param enable_eth_param =
+static const struct devlink_param mlx5_devlink_eth_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ETH, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL, NULL);
+ NULL, NULL, NULL),
+};
-static int mlx5_devlink_eth_param_register(struct devlink *devlink)
+static int mlx5_devlink_eth_params_register(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
@@ -637,25 +558,27 @@ static int mlx5_devlink_eth_param_register(struct devlink *devlink)
if (!mlx5_eth_supported(dev))
return 0;
- err = devlink_param_register(devlink, &enable_eth_param);
+ err = devl_params_register(devlink, mlx5_devlink_eth_params,
+ ARRAY_SIZE(mlx5_devlink_eth_params));
if (err)
return err;
value.vbool = true;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
+ value);
return 0;
}
-static void mlx5_devlink_eth_param_unregister(struct devlink *devlink)
+static void mlx5_devlink_eth_params_unregister(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!mlx5_eth_supported(dev))
return;
- devlink_param_unregister(devlink, &enable_eth_param);
+ devl_params_unregister(devlink, mlx5_devlink_eth_params,
+ ARRAY_SIZE(mlx5_devlink_eth_params));
}
static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
@@ -670,11 +593,12 @@ static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id,
return 0;
}
-static const struct devlink_param enable_rdma_param =
+static const struct devlink_param mlx5_devlink_rdma_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL, mlx5_devlink_enable_rdma_validate);
+ NULL, NULL, mlx5_devlink_enable_rdma_validate),
+};
-static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
+static int mlx5_devlink_rdma_params_register(struct devlink *devlink)
{
union devlink_param_value value;
int err;
@@ -682,30 +606,33 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return 0;
- err = devlink_param_register(devlink, &enable_rdma_param);
+ err = devl_params_register(devlink, mlx5_devlink_rdma_params,
+ ARRAY_SIZE(mlx5_devlink_rdma_params));
if (err)
return err;
value.vbool = true;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
+ value);
return 0;
}
-static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
+static void mlx5_devlink_rdma_params_unregister(struct devlink *devlink)
{
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return;
- devlink_param_unregister(devlink, &enable_rdma_param);
+ devl_params_unregister(devlink, mlx5_devlink_rdma_params,
+ ARRAY_SIZE(mlx5_devlink_rdma_params));
}
-static const struct devlink_param enable_vnet_param =
+static const struct devlink_param mlx5_devlink_vnet_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_VNET, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL, NULL);
+ NULL, NULL, NULL),
+};
-static int mlx5_devlink_vnet_param_register(struct devlink *devlink)
+static int mlx5_devlink_vnet_params_register(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
@@ -714,56 +641,58 @@ static int mlx5_devlink_vnet_param_register(struct devlink *devlink)
if (!mlx5_vnet_supported(dev))
return 0;
- err = devlink_param_register(devlink, &enable_vnet_param);
+ err = devl_params_register(devlink, mlx5_devlink_vnet_params,
+ ARRAY_SIZE(mlx5_devlink_vnet_params));
if (err)
return err;
value.vbool = true;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
+ value);
return 0;
}
-static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink)
+static void mlx5_devlink_vnet_params_unregister(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!mlx5_vnet_supported(dev))
return;
- devlink_param_unregister(devlink, &enable_vnet_param);
+ devl_params_unregister(devlink, mlx5_devlink_vnet_params,
+ ARRAY_SIZE(mlx5_devlink_vnet_params));
}
static int mlx5_devlink_auxdev_params_register(struct devlink *devlink)
{
int err;
- err = mlx5_devlink_eth_param_register(devlink);
+ err = mlx5_devlink_eth_params_register(devlink);
if (err)
return err;
- err = mlx5_devlink_rdma_param_register(devlink);
+ err = mlx5_devlink_rdma_params_register(devlink);
if (err)
goto rdma_err;
- err = mlx5_devlink_vnet_param_register(devlink);
+ err = mlx5_devlink_vnet_params_register(devlink);
if (err)
goto vnet_err;
return 0;
vnet_err:
- mlx5_devlink_rdma_param_unregister(devlink);
+ mlx5_devlink_rdma_params_unregister(devlink);
rdma_err:
- mlx5_devlink_eth_param_unregister(devlink);
+ mlx5_devlink_eth_params_unregister(devlink);
return err;
}
static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink)
{
- mlx5_devlink_vnet_param_unregister(devlink);
- mlx5_devlink_rdma_param_unregister(devlink);
- mlx5_devlink_eth_param_unregister(devlink);
+ mlx5_devlink_vnet_params_unregister(devlink);
+ mlx5_devlink_rdma_params_unregister(devlink);
+ mlx5_devlink_eth_params_unregister(devlink);
}
static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id,
@@ -791,11 +720,12 @@ static int mlx5_devlink_max_uc_list_validate(struct devlink *devlink, u32 id,
return 0;
}
-static const struct devlink_param max_uc_list_param =
+static const struct devlink_param mlx5_devlink_max_uc_list_params[] = {
DEVLINK_PARAM_GENERIC(MAX_MACS, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL, mlx5_devlink_max_uc_list_validate);
+ NULL, NULL, mlx5_devlink_max_uc_list_validate),
+};
-static int mlx5_devlink_max_uc_list_param_register(struct devlink *devlink)
+static int mlx5_devlink_max_uc_list_params_register(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
@@ -804,26 +734,28 @@ static int mlx5_devlink_max_uc_list_param_register(struct devlink *devlink)
if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
return 0;
- err = devlink_param_register(devlink, &max_uc_list_param);
+ err = devl_params_register(devlink, mlx5_devlink_max_uc_list_params,
+ ARRAY_SIZE(mlx5_devlink_max_uc_list_params));
if (err)
return err;
value.vu32 = 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list);
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
return 0;
}
static void
-mlx5_devlink_max_uc_list_param_unregister(struct devlink *devlink)
+mlx5_devlink_max_uc_list_params_unregister(struct devlink *devlink)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (!MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list_wr_supported))
return;
- devlink_param_unregister(devlink, &max_uc_list_param);
+ devl_params_unregister(devlink, mlx5_devlink_max_uc_list_params,
+ ARRAY_SIZE(mlx5_devlink_max_uc_list_params));
}
#define MLX5_TRAP_DROP(_id, _group_id) \
@@ -869,13 +801,12 @@ void mlx5_devlink_traps_unregister(struct devlink *devlink)
ARRAY_SIZE(mlx5_trap_groups_arr));
}
-int mlx5_devlink_register(struct devlink *devlink)
+int mlx5_devlink_params_register(struct devlink *devlink)
{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
- err = devlink_params_register(devlink, mlx5_devlink_params,
- ARRAY_SIZE(mlx5_devlink_params));
+ err = devl_params_register(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
if (err)
return err;
@@ -885,27 +816,24 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto auxdev_reg_err;
- err = mlx5_devlink_max_uc_list_param_register(devlink);
+ err = mlx5_devlink_max_uc_list_params_register(devlink);
if (err)
goto max_uc_list_err;
- if (!mlx5_core_is_mp_slave(dev))
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
-
return 0;
max_uc_list_err:
mlx5_devlink_auxdev_params_unregister(devlink);
auxdev_reg_err:
- devlink_params_unregister(devlink, mlx5_devlink_params,
- ARRAY_SIZE(mlx5_devlink_params));
+ devl_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
return err;
}
-void mlx5_devlink_unregister(struct devlink *devlink)
+void mlx5_devlink_params_unregister(struct devlink *devlink)
{
- mlx5_devlink_max_uc_list_param_unregister(devlink);
+ mlx5_devlink_max_uc_list_params_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
- devlink_params_unregister(devlink, mlx5_devlink_params,
- ARRAY_SIZE(mlx5_devlink_params));
+ devl_params_unregister(devlink, mlx5_devlink_params,
+ ARRAY_SIZE(mlx5_devlink_params));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
index fd033df24856..212b12424146 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h
@@ -11,6 +11,7 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+ MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
};
struct mlx5_trap_ctx {
@@ -24,6 +25,11 @@ struct mlx5_devlink_trap {
struct list_head list;
};
+struct mlx5_devlink_trap_event_ctx {
+ struct mlx5_trap_ctx *trap;
+ int err;
+};
+
struct mlx5_core_dev;
void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
struct devlink_port *dl_port);
@@ -35,7 +41,7 @@ void mlx5_devlink_traps_unregister(struct devlink *devlink);
struct devlink *mlx5_devlink_alloc(struct device *dev);
void mlx5_devlink_free(struct devlink *devlink);
-int mlx5_devlink_register(struct devlink *devlink);
-void mlx5_devlink_unregister(struct devlink *devlink);
+int mlx5_devlink_params_register(struct devlink *devlink);
+void mlx5_devlink_params_unregister(struct devlink *devlink);
#endif /* __MLX5_DEVLINK_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 2732128e7a6e..6d73127b7217 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -275,6 +275,10 @@ const char *parse_fs_dst(struct trace_seq *p,
fs_dest_range_field_to_str(dst->range.field),
dst->range.min, dst->range.max);
break;
+ case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+ trace_seq_printf(p, "flow_table_type=%u id:%u\n", dst->ft->type,
+ dst->ft->id);
+ break;
case MLX5_FLOW_DESTINATION_TYPE_NONE:
trace_seq_printf(p, "none\n");
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 21831386b26e..f40497823e65 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -64,6 +64,7 @@ static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer)
MLX5_GET(mtrc_cap, out, num_string_trace);
tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db);
tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner);
+ tracer->str_db.loaded = false;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]);
@@ -233,6 +234,8 @@ static int mlx5_fw_tracer_allocate_strings_db(struct mlx5_fw_tracer *tracer)
int i;
for (i = 0; i < num_string_db; i++) {
+ if (!string_db_size_out[i])
+ continue;
tracer->str_db.buffer[i] = kzalloc(string_db_size_out[i], GFP_KERNEL);
if (!tracer->str_db.buffer[i])
goto free_strings_db;
@@ -278,6 +281,8 @@ static void mlx5_tracer_read_strings_db(struct work_struct *work)
}
for (i = 0; i < num_string_db; i++) {
+ if (!tracer->str_db.size_out[i])
+ continue;
offset = 0;
MLX5_SET(mtrc_stdb, in, string_db_index, i);
num_of_reads = tracer->str_db.size_out[i] /
@@ -384,6 +389,8 @@ static struct tracer_string_format *mlx5_tracer_get_string(struct mlx5_fw_tracer
str_ptr = tracer_event->string_event.string_param;
for (i = 0; i < tracer->str_db.num_string_db; i++) {
+ if (!tracer->str_db.size_out[i])
+ continue;
if (str_ptr > tracer->str_db.base_address_out[i] &&
str_ptr < tracer->str_db.base_address_out[i] +
tracer->str_db.size_out[i]) {
@@ -459,6 +466,7 @@ static void poll_trace(struct mlx5_fw_tracer *tracer,
tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id);
tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost);
+ tracer_event->out = trace;
switch (tracer_event->event_id) {
case TRACER_EVENT_TYPE_TIMESTAMP:
@@ -581,6 +589,26 @@ void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt,
mlx5_tracer_clean_message(str_frmt);
}
+static int mlx5_tracer_handle_raw_string(struct mlx5_fw_tracer *tracer,
+ struct tracer_event *tracer_event)
+{
+ struct tracer_string_format *cur_string;
+
+ cur_string = mlx5_tracer_message_insert(tracer, tracer_event);
+ if (!cur_string)
+ return -1;
+
+ cur_string->event_id = tracer_event->event_id;
+ cur_string->timestamp = tracer_event->string_event.timestamp;
+ cur_string->lost = tracer_event->lost_event;
+ cur_string->string = "0x%08x%08x";
+ cur_string->num_of_params = 2;
+ cur_string->params[0] = upper_32_bits(*tracer_event->out);
+ cur_string->params[1] = lower_32_bits(*tracer_event->out);
+ list_add_tail(&cur_string->list, &tracer->ready_strings_list);
+ return 0;
+}
+
static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
struct tracer_event *tracer_event)
{
@@ -589,7 +617,7 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
if (tracer_event->string_event.tdsn == 0) {
cur_string = mlx5_tracer_get_string(tracer, tracer_event);
if (!cur_string)
- return -1;
+ return mlx5_tracer_handle_raw_string(tracer, tracer_event);
cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string);
cur_string->last_param_num = 0;
@@ -602,9 +630,9 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer,
} else {
cur_string = mlx5_tracer_message_get(tracer, tracer_event);
if (!cur_string) {
- pr_debug("%s Got string event for unknown string tdsm: %d\n",
+ pr_debug("%s Got string event for unknown string tmsn: %d\n",
__func__, tracer_event->string_event.tmsn);
- return -1;
+ return mlx5_tracer_handle_raw_string(tracer, tracer_event);
}
cur_string->last_param_num += 1;
if (cur_string->last_param_num > TRACER_MAX_PARAMS) {
@@ -756,6 +784,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
if (err)
mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err);
+ tracer->buff.consumer_index = 0;
return err;
}
@@ -820,7 +849,6 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
if (tracer->owner) {
tracer->owner = false;
- tracer->buff.consumer_index = 0;
return;
}
@@ -930,6 +958,14 @@ unlock:
return err;
}
+static void mlx5_fw_tracer_update_db(struct work_struct *work)
+{
+ struct mlx5_fw_tracer *tracer =
+ container_of(work, struct mlx5_fw_tracer, update_db_work);
+
+ mlx5_fw_tracer_reload(tracer);
+}
+
/* Create software resources (Buffers, etc ..) */
struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
{
@@ -957,6 +993,8 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change);
INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db);
INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces);
+ INIT_WORK(&tracer->update_db_work, mlx5_fw_tracer_update_db);
+ mutex_init(&tracer->state_lock);
err = mlx5_query_mtrc_caps(tracer);
@@ -1003,11 +1041,15 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
if (IS_ERR_OR_NULL(tracer))
return 0;
- dev = tracer->dev;
-
if (!tracer->str_db.loaded)
queue_work(tracer->work_queue, &tracer->read_fw_strings_work);
+ mutex_lock(&tracer->state_lock);
+ if (test_and_set_bit(MLX5_TRACER_STATE_UP, &tracer->state))
+ goto unlock;
+
+ dev = tracer->dev;
+
err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn);
if (err) {
mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err);
@@ -1028,6 +1070,8 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
mlx5_core_warn(dev, "FWTracer: Failed to start tracer %d\n", err);
goto err_notifier_unregister;
}
+unlock:
+ mutex_unlock(&tracer->state_lock);
return 0;
err_notifier_unregister:
@@ -1037,6 +1081,7 @@ err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
err_cancel_work:
cancel_work_sync(&tracer->read_fw_strings_work);
+ mutex_unlock(&tracer->state_lock);
return err;
}
@@ -1046,17 +1091,27 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
if (IS_ERR_OR_NULL(tracer))
return;
+ mutex_lock(&tracer->state_lock);
+ if (!test_and_clear_bit(MLX5_TRACER_STATE_UP, &tracer->state))
+ goto unlock;
+
mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n",
tracer->owner);
mlx5_eq_notifier_unregister(tracer->dev, &tracer->nb);
cancel_work_sync(&tracer->ownership_change_work);
cancel_work_sync(&tracer->handle_traces_work);
+ /* It is valid to get here from update_db_work. Hence, don't wait for
+ * update_db_work to finished.
+ */
+ cancel_work(&tracer->update_db_work);
if (tracer->owner)
mlx5_fw_tracer_ownership_release(tracer);
mlx5_core_destroy_mkey(tracer->dev, tracer->buff.mkey);
mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
+unlock:
+ mutex_unlock(&tracer->state_lock);
}
/* Free software resources (Buffers, etc ..) */
@@ -1073,6 +1128,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_clean_saved_traces_array(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
+ mutex_destroy(&tracer->state_lock);
destroy_workqueue(tracer->work_queue);
kvfree(tracer);
}
@@ -1082,6 +1138,8 @@ static int mlx5_fw_tracer_recreate_strings_db(struct mlx5_fw_tracer *tracer)
struct mlx5_core_dev *dev;
int err;
+ if (test_and_set_bit(MLX5_TRACER_RECREATE_DB, &tracer->state))
+ return 0;
cancel_work_sync(&tracer->read_fw_strings_work);
mlx5_fw_tracer_clean_ready_list(tracer);
mlx5_fw_tracer_clean_print_hash(tracer);
@@ -1092,17 +1150,18 @@ static int mlx5_fw_tracer_recreate_strings_db(struct mlx5_fw_tracer *tracer)
err = mlx5_query_mtrc_caps(tracer);
if (err) {
mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err);
- return err;
+ goto out;
}
err = mlx5_fw_tracer_allocate_strings_db(tracer);
if (err) {
mlx5_core_warn(dev, "FWTracer: Allocate strings DB failed %d\n", err);
- return err;
+ goto out;
}
mlx5_fw_tracer_init_saved_traces_array(tracer);
-
- return 0;
+out:
+ clear_bit(MLX5_TRACER_RECREATE_DB, &tracer->state);
+ return err;
}
int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer)
@@ -1142,6 +1201,9 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
queue_work(tracer->work_queue, &tracer->handle_traces_work);
break;
+ case MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE:
+ queue_work(tracer->work_queue, &tracer->update_db_work);
+ break;
default:
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
eqe->sub_type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index 4762b55b0b0e..5c548bb74f07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -63,6 +63,11 @@ struct mlx5_fw_trace_data {
char msg[TRACE_STR_MSG];
};
+enum mlx5_fw_tracer_state {
+ MLX5_TRACER_STATE_UP = BIT(0),
+ MLX5_TRACER_RECREATE_DB = BIT(1),
+};
+
struct mlx5_fw_tracer {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -104,6 +109,9 @@ struct mlx5_fw_tracer {
struct work_struct handle_traces_work;
struct hlist_head hash[MESSAGE_HASH_SIZE];
struct list_head ready_strings_list;
+ struct work_struct update_db_work;
+ struct mutex state_lock; /* Synchronize update work with reload flows */
+ unsigned long state;
};
struct tracer_string_format {
@@ -158,6 +166,7 @@ struct tracer_event {
struct tracer_string_event string_event;
struct tracer_timestamp_event timestamp_event;
};
+ u64 *out;
};
struct mlx5_ifc_tracer_event_bits {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index 464eb3a18450..9a3878f9e582 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -75,6 +75,10 @@ int mlx5_ec_init(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev))
return 0;
+ /* Management PF don't have a peer PF */
+ if (mlx5_core_is_management_pf(dev))
+ return 0;
+
return mlx5_host_pf_init(dev);
}
@@ -85,9 +89,13 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
if (!mlx5_core_is_ecpf(dev))
return;
+ /* Management PF don't have a peer PF */
+ if (mlx5_core_is_management_pf(dev))
+ return;
+
mlx5_host_pf_cleanup(dev);
- err = mlx5_wait_for_pages(dev, &dev->priv.host_pf_pages);
+ err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 2d77fb8a8a01..88460b7796e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -247,7 +247,7 @@ struct mlx5e_rx_wqe_ll {
};
struct mlx5e_rx_wqe_cyc {
- struct mlx5_wqe_data_seg data[0];
+ DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data);
};
struct mlx5e_umr_wqe {
@@ -454,6 +454,7 @@ struct mlx5e_txqsq {
struct mlx5_clock *clock;
struct net_device *netdev;
struct mlx5_core_dev *mdev;
+ struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
/* control path */
@@ -626,10 +627,11 @@ struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe_mpwrq)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt,
+ u32 head_offset, u32 page_idx);
typedef struct sk_buff *
(*mlx5e_fp_skb_from_cqe)(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- u32 cqe_bcnt);
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
@@ -968,6 +970,12 @@ struct mlx5e_priv {
struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb *htb;
struct mlx5e_mqprio_rl *mqprio_rl;
+ struct dentry *dfs_root;
+};
+
+struct mlx5e_dev {
+ struct mlx5e_priv *priv;
+ struct devlink_port dl_port;
};
struct mlx5e_rx_handlers {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 83adaabf59f5..c6b6e290fd79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -4,6 +4,31 @@
#include "en/devlink.h"
#include "eswitch.h"
+static const struct devlink_ops mlx5e_devlink_ops = {
+};
+
+struct mlx5e_dev *mlx5e_create_devlink(struct device *dev,
+ struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_dev *mlx5e_dev;
+ struct devlink *devlink;
+
+ devlink = devlink_alloc_ns(&mlx5e_devlink_ops, sizeof(*mlx5e_dev),
+ devlink_net(priv_to_devlink(mdev)), dev);
+ if (!devlink)
+ return ERR_PTR(-ENOMEM);
+ devlink_register(devlink);
+ return devlink_priv(devlink);
+}
+
+void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev)
+{
+ struct devlink *devlink = priv_to_devlink(mlx5e_dev);
+
+ devlink_unregister(devlink);
+ devlink_free(devlink);
+}
+
static void
mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
{
@@ -14,51 +39,36 @@ mlx5e_devlink_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_i
memcpy(ppid->id, &parent_id, sizeof(parent_id));
}
-int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
+int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev,
+ struct mlx5_core_dev *mdev)
{
- struct devlink *devlink = priv_to_devlink(priv->mdev);
+ struct devlink *devlink = priv_to_devlink(mlx5e_dev);
struct devlink_port_attrs attrs = {};
struct netdev_phys_item_id ppid = {};
- struct devlink_port *dl_port;
unsigned int dl_port_index;
- int ret;
- if (mlx5_core_is_pf(priv->mdev)) {
+ if (mlx5_core_is_pf(mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = mlx5_get_dev_index(priv->mdev);
- if (MLX5_ESWITCH_MANAGER(priv->mdev)) {
- mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid);
+ attrs.phys.port_number = mlx5_get_dev_index(mdev);
+ if (MLX5_ESWITCH_MANAGER(mdev)) {
+ mlx5e_devlink_get_port_parent_id(mdev, &ppid);
memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
attrs.switch_id.id_len = ppid.id_len;
}
- dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev,
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(mdev,
MLX5_VPORT_UPLINK);
} else {
attrs.flavour = DEVLINK_PORT_FLAVOUR_VIRTUAL;
- dl_port_index = mlx5_esw_vport_to_devlink_port_index(priv->mdev, 0);
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(mdev, 0);
}
- dl_port = mlx5e_devlink_get_dl_port(priv);
- memset(dl_port, 0, sizeof(*dl_port));
- devlink_port_attrs_set(dl_port, &attrs);
+ devlink_port_attrs_set(&mlx5e_dev->dl_port, &attrs);
- if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
- devl_lock(devlink);
- ret = devl_port_register(devlink, dl_port, dl_port_index);
- if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
- devl_unlock(devlink);
-
- return ret;
+ return devlink_port_register(devlink, &mlx5e_dev->dl_port,
+ dl_port_index);
}
-void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv)
+void mlx5e_devlink_port_unregister(struct mlx5e_dev *mlx5e_dev)
{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
- struct devlink *devlink = priv_to_devlink(priv->mdev);
-
- if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
- devl_lock(devlink);
- devl_port_unregister(dl_port);
- if (!(priv->mdev->priv.flags & MLX5_PRIV_FLAGS_MLX5E_LOCKED_FLOW))
- devl_unlock(devlink);
+ devlink_port_unregister(&mlx5e_dev->dl_port);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
index 4f238d4fff55..d5ec4461f300 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.h
@@ -7,13 +7,11 @@
#include <net/devlink.h>
#include "en.h"
-int mlx5e_devlink_port_register(struct mlx5e_priv *priv);
-void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv);
-
-static inline struct devlink_port *
-mlx5e_devlink_get_dl_port(struct mlx5e_priv *priv)
-{
- return &priv->mdev->mlx5e_res.dl_port;
-}
+struct mlx5e_dev *mlx5e_create_devlink(struct device *dev,
+ struct mlx5_core_dev *mdev);
+void mlx5e_destroy_devlink(struct mlx5e_dev *mlx5e_dev);
+int mlx5e_devlink_port_register(struct mlx5e_dev *mlx5e_dev,
+ struct mlx5_core_dev *mdev);
+void mlx5e_devlink_port_unregister(struct mlx5e_dev *mlx5e_dev);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 379c6dc9a3be..e5a44b0b9616 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -87,6 +87,7 @@ enum {
MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_ACCEL_FS_ESP_FT_LEVEL,
MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
#endif
};
@@ -145,7 +146,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_flow_steering *fs, bool ntuple,
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct mlx5_core_dev *mdev,
- bool state_destroy);
+ bool state_destroy,
+ struct dentry *dfs_root);
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs);
void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc);
@@ -189,6 +191,8 @@ int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
__be16 proto, u16 vid);
void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
+struct dentry *mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering *fs);
+
#define fs_err(fs, fmt, ...) \
mlx5_core_err(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
index 17325c5d6516..cf60f0a3ff23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mod_hdr.c
@@ -47,6 +47,7 @@ void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl)
void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl)
{
+ WARN_ON(!hash_empty(tbl->hlist));
mutex_destroy(&tbl->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 4ad19c981294..a21bd1179477 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -411,9 +411,14 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
{
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
+ u8 log_wqe_size, log_stride_size;
- return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) -
- mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
+ log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ WARN(log_wqe_size < log_stride_size,
+ "Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n",
+ log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk);
+ return log_wqe_size - log_stride_size;
}
u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
@@ -580,11 +585,16 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
u16 max_mtu_pkts;
- if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
+ if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) {
+ mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n",
+ page_shift, umr_mode);
return -EOPNOTSUPP;
+ }
- if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk))
+ if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) {
+ mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n");
return -EINVAL;
+ }
/* Current RQ length is too big for the given frame size, the
* needed number of WQEs exceeds the maximum.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 89510cac46c2..505ba41195b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -287,6 +287,78 @@ int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in)
return err;
}
+int mlx5e_port_query_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir,
+ u8 pool_idx, void *out, int size_out)
+{
+ u32 in[MLX5_ST_SZ_DW(sbpr_reg)] = {};
+
+ MLX5_SET(sbpr_reg, in, desc, desc);
+ MLX5_SET(sbpr_reg, in, dir, dir);
+ MLX5_SET(sbpr_reg, in, pool, pool_idx);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, size_out, MLX5_REG_SBPR, 0, 0);
+}
+
+int mlx5e_port_set_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir,
+ u8 pool_idx, u32 infi_size, u32 size)
+{
+ u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(sbpr_reg)] = {};
+
+ MLX5_SET(sbpr_reg, in, desc, desc);
+ MLX5_SET(sbpr_reg, in, dir, dir);
+ MLX5_SET(sbpr_reg, in, pool, pool_idx);
+ MLX5_SET(sbpr_reg, in, infi_size, infi_size);
+ MLX5_SET(sbpr_reg, in, size, size);
+ MLX5_SET(sbpr_reg, in, mode, 1);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_SBPR, 0, 1);
+}
+
+static int mlx5e_port_query_sbcm(struct mlx5_core_dev *mdev, u32 desc,
+ u8 pg_buff_idx, u8 dir, void *out,
+ int size_out)
+{
+ u32 in[MLX5_ST_SZ_DW(sbcm_reg)] = {};
+
+ MLX5_SET(sbcm_reg, in, desc, desc);
+ MLX5_SET(sbcm_reg, in, local_port, 1);
+ MLX5_SET(sbcm_reg, in, pg_buff, pg_buff_idx);
+ MLX5_SET(sbcm_reg, in, dir, dir);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, size_out, MLX5_REG_SBCM, 0, 0);
+}
+
+int mlx5e_port_set_sbcm(struct mlx5_core_dev *mdev, u32 desc, u8 pg_buff_idx,
+ u8 dir, u8 infi_size, u32 max_buff, u8 pool_idx)
+{
+ u32 out[MLX5_ST_SZ_DW(sbcm_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(sbcm_reg)] = {};
+ u32 min_buff;
+ int err;
+ u8 exc;
+
+ err = mlx5e_port_query_sbcm(mdev, desc, pg_buff_idx, dir, out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ exc = MLX5_GET(sbcm_reg, out, exc);
+ min_buff = MLX5_GET(sbcm_reg, out, min_buff);
+
+ MLX5_SET(sbcm_reg, in, desc, desc);
+ MLX5_SET(sbcm_reg, in, local_port, 1);
+ MLX5_SET(sbcm_reg, in, pg_buff, pg_buff_idx);
+ MLX5_SET(sbcm_reg, in, dir, dir);
+ MLX5_SET(sbcm_reg, in, exc, exc);
+ MLX5_SET(sbcm_reg, in, min_buff, min_buff);
+ MLX5_SET(sbcm_reg, in, infi_max, infi_size);
+ MLX5_SET(sbcm_reg, in, max_buff, max_buff);
+ MLX5_SET(sbcm_reg, in, pool, pool_idx);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_SBCM, 0, 1);
+}
+
/* buffer[i]: buffer that priority i mapped to */
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index 7a7defe60792..3f474e370828 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -57,6 +57,12 @@ u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev);
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
+int mlx5e_port_query_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir,
+ u8 pool_idx, void *out, int size_out);
+int mlx5e_port_set_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir,
+ u8 pool_idx, u32 infi_size, u32 size);
+int mlx5e_port_set_sbcm(struct mlx5_core_dev *mdev, u32 desc, u8 pg_buff_idx,
+ u8 dir, u8 infi_size, u32 max_buff, u8 pool_idx);
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index c9d5d8d93994..7ac1ad9c46de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -73,6 +73,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
port_buffer->buffer[i].lossy);
}
+ port_buffer->headroom_size = total_used;
port_buffer->port_buffer_size =
MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
port_buffer->spare_buffer_size =
@@ -86,16 +87,204 @@ out:
return err;
}
+struct mlx5e_buffer_pool {
+ u32 infi_size;
+ u32 size;
+ u32 buff_occupancy;
+};
+
+static int mlx5e_port_query_pool(struct mlx5_core_dev *mdev,
+ struct mlx5e_buffer_pool *buffer_pool,
+ u32 desc, u8 dir, u8 pool_idx)
+{
+ u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {};
+ int err;
+
+ err = mlx5e_port_query_sbpr(mdev, desc, dir, pool_idx, out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ buffer_pool->size = MLX5_GET(sbpr_reg, out, size);
+ buffer_pool->infi_size = MLX5_GET(sbpr_reg, out, infi_size);
+ buffer_pool->buff_occupancy = MLX5_GET(sbpr_reg, out, buff_occupancy);
+
+ return err;
+}
+
+enum {
+ MLX5_INGRESS_DIR = 0,
+ MLX5_EGRESS_DIR = 1,
+};
+
+enum {
+ MLX5_LOSSY_POOL = 0,
+ MLX5_LOSSLESS_POOL = 1,
+};
+
+/* No limit on usage of shared buffer pool (max_buff=0) */
+#define MLX5_SB_POOL_NO_THRESHOLD 0
+/* Shared buffer pool usage threshold when calculated
+ * dynamically in alpha units. alpha=13 is equivalent to
+ * HW_alpha of [(1/128) * 2 ^ (alpha-1)] = 32, where HW_alpha
+ * equates to the following portion of the shared buffer pool:
+ * [32 / (1 + n * 32)] While *n* is the number of buffers
+ * that are using the shared buffer pool.
+ */
+#define MLX5_SB_POOL_THRESHOLD 13
+
+/* Shared buffer class management parameters */
+struct mlx5_sbcm_params {
+ u8 pool_idx;
+ u8 max_buff;
+ u8 infi_size;
+};
+
+static const struct mlx5_sbcm_params sbcm_default = {
+ .pool_idx = MLX5_LOSSY_POOL,
+ .max_buff = MLX5_SB_POOL_NO_THRESHOLD,
+ .infi_size = 0,
+};
+
+static const struct mlx5_sbcm_params sbcm_lossy = {
+ .pool_idx = MLX5_LOSSY_POOL,
+ .max_buff = MLX5_SB_POOL_NO_THRESHOLD,
+ .infi_size = 1,
+};
+
+static const struct mlx5_sbcm_params sbcm_lossless = {
+ .pool_idx = MLX5_LOSSLESS_POOL,
+ .max_buff = MLX5_SB_POOL_THRESHOLD,
+ .infi_size = 0,
+};
+
+static const struct mlx5_sbcm_params sbcm_lossless_no_threshold = {
+ .pool_idx = MLX5_LOSSLESS_POOL,
+ .max_buff = MLX5_SB_POOL_NO_THRESHOLD,
+ .infi_size = 1,
+};
+
+/**
+ * select_sbcm_params() - selects the shared buffer pool configuration
+ *
+ * @buffer: <input> port buffer to retrieve params of
+ * @lossless_buff_count: <input> number of lossless buffers in total
+ *
+ * The selection is based on the following rules:
+ * 1. If buffer size is 0, no shared buffer pool is used.
+ * 2. If buffer is lossy, use lossy shared buffer pool.
+ * 3. If there are more than 1 lossless buffers, use lossless shared buffer pool
+ * with threshold.
+ * 4. If there is only 1 lossless buffer, use lossless shared buffer pool
+ * without threshold.
+ *
+ * @return const struct mlx5_sbcm_params* selected values
+ */
+static const struct mlx5_sbcm_params *
+select_sbcm_params(struct mlx5e_bufferx_reg *buffer, u8 lossless_buff_count)
+{
+ if (buffer->size == 0)
+ return &sbcm_default;
+
+ if (buffer->lossy)
+ return &sbcm_lossy;
+
+ if (lossless_buff_count > 1)
+ return &sbcm_lossless;
+
+ return &sbcm_lossless_no_threshold;
+}
+
+static int port_update_pool_cfg(struct mlx5_core_dev *mdev,
+ struct mlx5e_port_buffer *port_buffer)
+{
+ const struct mlx5_sbcm_params *p;
+ u8 lossless_buff_count = 0;
+ int err;
+ int i;
+
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
+ return 0;
+
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+ lossless_buff_count += ((port_buffer->buffer[i].size) &&
+ (!(port_buffer->buffer[i].lossy)));
+
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+ p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count);
+ err = mlx5e_port_set_sbcm(mdev, 0, i,
+ MLX5_INGRESS_DIR,
+ p->infi_size,
+ p->max_buff,
+ p->pool_idx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
+ u32 current_headroom_size,
+ u32 new_headroom_size)
+{
+ struct mlx5e_buffer_pool lossless_ipool;
+ struct mlx5e_buffer_pool lossy_epool;
+ u32 lossless_ipool_size;
+ u32 shared_buffer_size;
+ u32 total_buffer_size;
+ u32 lossy_epool_size;
+ int err;
+
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
+ return 0;
+
+ err = mlx5e_port_query_pool(mdev, &lossy_epool, 0, MLX5_EGRESS_DIR,
+ MLX5_LOSSY_POOL);
+ if (err)
+ return err;
+
+ err = mlx5e_port_query_pool(mdev, &lossless_ipool, 0, MLX5_INGRESS_DIR,
+ MLX5_LOSSLESS_POOL);
+ if (err)
+ return err;
+
+ total_buffer_size = current_headroom_size + lossy_epool.size +
+ lossless_ipool.size;
+ shared_buffer_size = total_buffer_size - new_headroom_size;
+
+ if (shared_buffer_size < 4) {
+ pr_err("Requested port buffer is too large, not enough space left for shared buffer\n");
+ return -EINVAL;
+ }
+
+ /* Total shared buffer size is split in a ratio of 3:1 between
+ * lossy and lossless pools respectively.
+ */
+ lossy_epool_size = (shared_buffer_size / 4) * 3;
+ lossless_ipool_size = shared_buffer_size / 4;
+
+ mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
+ lossy_epool_size);
+ mlx5e_port_set_sbpr(mdev, 0, MLX5_INGRESS_DIR, MLX5_LOSSLESS_POOL, 0,
+ lossless_ipool_size);
+ return 0;
+}
+
static int port_set_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer)
{
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
+ u32 new_headroom_size = 0;
+ u32 current_headroom_size;
void *in;
int err;
int i;
+ current_headroom_size = port_buffer->headroom_size;
+
in = kzalloc(sz, GFP_KERNEL);
if (!in)
return -ENOMEM;
@@ -110,6 +299,7 @@ static int port_set_buffer(struct mlx5e_priv *priv,
u64 xoff = port_buffer->buffer[i].xoff;
u64 xon = port_buffer->buffer[i].xon;
+ new_headroom_size += size;
do_div(size, port_buff_cell_sz);
do_div(xoff, port_buff_cell_sz);
do_div(xon, port_buff_cell_sz);
@@ -119,6 +309,17 @@ static int port_set_buffer(struct mlx5e_priv *priv,
MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
}
+ new_headroom_size /= port_buff_cell_sz;
+ current_headroom_size /= port_buff_cell_sz;
+ err = port_update_shared_buffer(priv->mdev, current_headroom_size,
+ new_headroom_size);
+ if (err)
+ goto out;
+
+ err = port_update_pool_cfg(priv->mdev, port_buffer);
+ if (err)
+ goto out;
+
err = mlx5e_port_set_pbmc(mdev, in);
out:
kfree(in);
@@ -174,6 +375,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
/**
* update_buffer_lossy - Update buffer configuration based on pfc
+ * @mdev: port function core device
* @max_mtu: netdev's max_mtu
* @pfc_en: <input> current pfc configuration
* @buffer: <input> current prio to buffer mapping
@@ -192,7 +394,8 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* @return: 0 if no error,
* sets change to true if buffer configuration was modified.
*/
-static int update_buffer_lossy(unsigned int max_mtu,
+static int update_buffer_lossy(struct mlx5_core_dev *mdev,
+ unsigned int max_mtu,
u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
struct mlx5e_port_buffer *port_buffer,
bool *change)
@@ -229,6 +432,10 @@ static int update_buffer_lossy(unsigned int max_mtu,
}
if (changed) {
+ err = port_update_pool_cfg(mdev, port_buffer);
+ if (err)
+ return err;
+
err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
@@ -293,23 +500,30 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
}
if (change & MLX5E_PORT_BUFFER_PFC) {
+ mlx5e_dbg(HW, priv, "%s: requested PFC per priority bitmask: 0x%x\n",
+ __func__, pfc->pfc_en);
err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
if (err)
return err;
- err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz,
- &port_buffer, &update_buffer);
+ err = update_buffer_lossy(priv->mdev, max_mtu, pfc->pfc_en, buffer, xoff,
+ port_buff_cell_sz, &port_buffer,
+ &update_buffer);
if (err)
return err;
}
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
update_prio2buffer = true;
+ for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+ mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n",
+ __func__, i, prio2buffer[i]);
+
err = fill_pfc_en(priv->mdev, &curr_pfc_en);
if (err)
return err;
- err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff,
+ err = update_buffer_lossy(priv->mdev, max_mtu, curr_pfc_en, prio2buffer, xoff,
port_buff_cell_sz, &port_buffer, &update_buffer);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
index 80af7a5ac604..a6ef118de758 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
@@ -60,6 +60,7 @@ struct mlx5e_bufferx_reg {
struct mlx5e_port_buffer {
u32 port_buffer_size;
u32 spare_buffer_size;
+ u32 headroom_size;
struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER];
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 8469e9c38670..9a1bc93b7dc6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -771,8 +771,8 @@ void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c)
if (test_bit(MLX5E_PTP_STATE_RX, c->state)) {
mlx5e_ptp_rx_set_fs(c->priv);
mlx5e_activate_rq(&c->rq);
- mlx5e_trigger_napi_sched(&c->napi);
}
+ mlx5e_trigger_napi_sched(&c->napi);
}
void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index b6f5c1bcdbcd..016a61c52c45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -120,8 +120,8 @@ int mlx5e_rep_bond_enslave(struct mlx5_eswitch *esw, struct net_device *netdev,
priv = netdev_priv(netdev);
rpriv = priv->ppriv;
- err = mlx5_esw_acl_ingress_vport_bond_update(esw, rpriv->rep->vport,
- mdata->metadata_reg_c_0);
+ err = mlx5_esw_acl_ingress_vport_metadata_update(esw, rpriv->rep->vport,
+ mdata->metadata_reg_c_0);
if (err)
goto ingress_err;
@@ -167,7 +167,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
/* Reset bond_metadata to zero first then reset all ingress/egress
* acls and rx rules of unslave representor's vport
*/
- mlx5_esw_acl_ingress_vport_bond_update(esw, rpriv->rep->vport, 0);
+ mlx5_esw_acl_ingress_vport_metadata_update(esw, rpriv->rep->vport, 0);
mlx5_esw_acl_egress_vport_unbond(esw, rpriv->rep->vport);
mlx5e_rep_bond_update(priv, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index 8099a21e674c..ce85b48d327d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -438,10 +438,6 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
switch (event) {
case SWITCHDEV_FDB_ADD_TO_BRIDGE:
- /* only handle the event on native eswtich of representor */
- if (!mlx5_esw_bridge_is_local(dev, rep, esw))
- break;
-
fdb_info = container_of(info,
struct switchdev_notifier_fdb_info,
info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index b08339d986d5..e24b46953542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies. */
-#include <net/dst_metadata.h>
#include <linux/netdevice.h>
#include <linux/if_macvlan.h>
#include <linux/list.h>
@@ -589,7 +588,7 @@ mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv,
act = mlx5e_tc_act_get(fl_act->id, ns_type);
if (!act || !act->stats_action)
- return -EOPNOTSUPP;
+ return mlx5e_tc_fill_action_stats(priv, fl_act);
return act->stats_action(priv, fl_act);
}
@@ -665,232 +664,54 @@ void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
mlx5e_rep_indr_block_unbind);
}
-static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv,
- u32 tunnel_id)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct tunnel_match_enc_opts enc_opts = {};
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct metadata_dst *tun_dst;
- struct tunnel_match_key key;
- u32 tun_id, enc_opts_id;
- struct net_device *dev;
- int err;
-
- enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
- tun_id = tunnel_id >> ENC_OPTS_BITS;
-
- if (!tun_id)
- return true;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
-
- err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel for tun_id: %d, err: %d\n",
- tun_id, err);
- return false;
- }
-
- if (enc_opts_id) {
- err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
- enc_opts_id, &enc_opts);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
- enc_opts_id, err);
- return false;
- }
- }
-
- if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
- tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, TUNNEL_KEY,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- enc_opts.key.len);
- } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
- tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, 0, TUNNEL_KEY,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- enc_opts.key.len);
- } else {
- netdev_dbg(priv->netdev,
- "Couldn't restore tunnel, unsupported addr_type: %d\n",
- key.enc_control.addr_type);
- return false;
- }
-
- if (!tun_dst) {
- netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
- return false;
- }
-
- tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
-
- if (enc_opts.key.len)
- ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
- enc_opts.key.data,
- enc_opts.key.len,
- enc_opts.key.dst_opt_type);
-
- skb_dst_set(skb, (struct dst_entry *)tun_dst);
- dev = dev_get_by_index(&init_net, key.filter_ifindex);
- if (!dev) {
- netdev_dbg(priv->netdev,
- "Couldn't find tunnel device with ifindex: %d\n",
- key.filter_ifindex);
- return false;
- }
-
- /* Set fwd_dev so we do dev_put() after datapath */
- tc_priv->fwd_dev = dev;
-
- skb->dev = dev;
-
- return true;
-}
-
-static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1,
- struct mlx5e_tc_update_priv *tc_priv)
-{
- struct mlx5e_priv *priv = netdev_priv(skb->dev);
- u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
-
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- if (chain) {
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
- struct tc_skb_ext *tc_skb_ext;
- struct mlx5_eswitch *esw;
- u32 zone_restore_id;
-
- tc_skb_ext = tc_skb_ext_alloc(skb);
- if (!tc_skb_ext) {
- WARN_ON(1);
- return false;
- }
- tc_skb_ext->chain = chain;
- zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
- esw = priv->mdev->priv.eswitch;
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
- if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
- zone_restore_id))
- return false;
- }
-#endif /* CONFIG_NET_TC_SKB_EXT */
-
- return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
-}
-
-static void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
-{
- if (tc_priv->fwd_dev)
- dev_put(tc_priv->fwd_dev);
-}
-
-static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
- struct mlx5_mapped_obj *mapped_obj,
- struct mlx5e_tc_update_priv *tc_priv)
-{
- if (!mlx5e_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
- netdev_dbg(priv->netdev,
- "Failed to restore tunnel info for sampled packet\n");
- return;
- }
- mlx5e_tc_sample_skb(skb, mapped_obj);
- mlx5_rep_tc_post_napi_receive(tc_priv);
-}
-
-static bool mlx5e_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
- struct mlx5_mapped_obj *mapped_obj,
- struct mlx5e_tc_update_priv *tc_priv,
- bool *forward_tx,
- u32 reg_c1)
-{
- u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
-
- /* Tunnel restore takes precedence over int port restore */
- if (tunnel_id)
- return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
-
- if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
- mapped_obj->int_port_metadata, forward_tx)) {
- /* Set fwd_dev for future dev_put */
- tc_priv->fwd_dev = skb->dev;
-
- return true;
- }
-
- return false;
-}
-
void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
struct sk_buff *skb)
{
- u32 reg_c1 = be32_to_cpu(cqe->ft_metadata);
+ u32 reg_c0, reg_c1, zone_restore_id, tunnel_id;
struct mlx5e_tc_update_priv tc_priv = {};
- struct mlx5_mapped_obj mapped_obj;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct mapping_ctx *mapping_ctx;
struct mlx5_eswitch *esw;
- bool forward_tx = false;
struct mlx5e_priv *priv;
- u32 reg_c0;
- int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
goto forward;
- /* If reg_c0 is not equal to the default flow tag then skb->mark
+ /* If mapped_obj_id is not equal to the default flow tag then skb->mark
* is not supported and must be reset back to 0.
*/
skb->mark = 0;
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
- err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj);
- if (err) {
- netdev_dbg(priv->netdev,
- "Couldn't find mapped object for reg_c0: %d, err: %d\n",
- reg_c0, err);
- goto free_skb;
- }
+ mapping_ctx = esw->offloads.reg_c0_obj_pool;
+ reg_c1 = be32_to_cpu(cqe->ft_metadata);
+ zone_restore_id = reg_c1 & ESW_ZONE_ID_MASK;
+ tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
- if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
- if (!mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, &tc_priv) &&
- !mlx5_ipsec_is_rx_flow(cqe))
- goto free_skb;
- } else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
- mlx5e_restore_skb_sample(priv, skb, &mapped_obj, &tc_priv);
- goto free_skb;
- } else if (mapped_obj.type == MLX5_MAPPED_OBJ_INT_PORT_METADATA) {
- if (!mlx5e_restore_skb_int_port(priv, skb, &mapped_obj, &tc_priv,
- &forward_tx, reg_c1))
- goto free_skb;
- } else {
- netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+ ct_priv = uplink_priv->ct_priv;
+
+ if (!mlx5_ipsec_is_rx_flow(cqe) &&
+ !mlx5e_tc_update_skb(cqe, skb, mapping_ctx, reg_c0, ct_priv, zone_restore_id, tunnel_id,
+ &tc_priv))
goto free_skb;
- }
forward:
- if (forward_tx)
+ if (tc_priv.skb_done)
+ goto free_skb;
+
+ if (tc_priv.forward_tx)
dev_queue_xmit(skb);
else
napi_gro_receive(rq->cq.napi, skb);
- mlx5_rep_tc_post_napi_receive(&tc_priv);
+ if (tc_priv.fwd_dev)
+ dev_put(tc_priv.fwd_dev);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 1ae15b8536a8..c462fe76495b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -736,10 +736,10 @@ static const struct devlink_health_reporter_ops mlx5_rx_reporter_ops = {
void mlx5e_reporter_rx_create(struct mlx5e_priv *priv)
{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(dl_port, &mlx5_rx_reporter_ops,
+ reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ &mlx5_rx_reporter_ops,
MLX5E_REPORTER_RX_GRACEFUL_PERIOD, priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev, "Failed to create rx reporter, err = %ld\n",
@@ -754,6 +754,6 @@ void mlx5e_reporter_rx_destroy(struct mlx5e_priv *priv)
if (!priv->rx_reporter)
return;
- devlink_port_health_reporter_destroy(priv->rx_reporter);
+ devlink_health_reporter_destroy(priv->rx_reporter);
priv->rx_reporter = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 60bc5b577ab9..34666e2b3871 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -81,6 +81,10 @@ static int mlx5e_tx_reporter_err_cqe_recover(void *ctx)
sq->stats->recover++;
clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state);
mlx5e_activate_txqsq(sq);
+ if (sq->channel)
+ mlx5e_trigger_napi_icosq(sq->channel);
+ else
+ mlx5e_trigger_napi_sched(sq->cq.napi);
return 0;
out:
@@ -590,10 +594,10 @@ static const struct devlink_health_reporter_ops mlx5_tx_reporter_ops = {
void mlx5e_reporter_tx_create(struct mlx5e_priv *priv)
{
- struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv);
struct devlink_health_reporter *reporter;
- reporter = devlink_port_health_reporter_create(dl_port, &mlx5_tx_reporter_ops,
+ reporter = devlink_port_health_reporter_create(priv->netdev->devlink_port,
+ &mlx5_tx_reporter_ops,
MLX5_REPORTER_TX_GRACEFUL_PERIOD, priv);
if (IS_ERR(reporter)) {
netdev_warn(priv->netdev,
@@ -609,6 +613,6 @@ void mlx5e_reporter_tx_destroy(struct mlx5e_priv *priv)
if (!priv->tx_reporter)
return;
- devlink_port_health_reporter_destroy(priv->tx_reporter);
+ devlink_health_reporter_destroy(priv->tx_reporter);
priv->tx_reporter = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 78c427b38048..07cc65596f89 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -216,7 +216,6 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
struct net_device *uplink_dev;
struct mlx5e_priv *out_priv;
struct mlx5_eswitch *esw;
- bool is_uplink_rep;
int *ifindexes;
int if_count;
int err;
@@ -231,10 +230,9 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->ifindexes[if_count] = out_dev->ifindex;
parse_state->if_count++;
- is_uplink_rep = mlx5e_eswitch_uplink_rep(out_dev);
- err = mlx5_lag_do_mirred(priv->mdev, out_dev);
- if (err)
- return err;
+
+ if (mlx5_lag_mpesw_do_mirred(priv->mdev, out_dev, extack))
+ return -EOPNOTSUPP;
out_dev = get_fdb_out_dev(uplink_dev, out_dev);
if (!out_dev)
@@ -275,13 +273,6 @@ parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
- /* If output device is bond master then rules are not explicit
- * so we don't attempt to count them.
- */
- if (is_uplink_rep && MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
- MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
- attr->lag.count = true;
-
esw_attr->out_count++;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index b86ac604d0c2..2e0d88b513aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -44,19 +44,17 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, vlan_idx)) {
+ NL_SET_ERR_MSG_MOD(extack, "firmware vlan actions is not supported");
+ return -EOPNOTSUPP;
+ }
+
switch (act->id) {
case FLOW_ACTION_VLAN_POP:
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack, "vlan pop action is not supported");
- return -EOPNOTSUPP;
- }
-
+ if (vlan_idx)
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
- } else {
+ else
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
- }
break;
case FLOW_ACTION_VLAN_PUSH:
attr->vlan_vid[vlan_idx] = act->vlan.vid;
@@ -65,25 +63,10 @@ parse_tc_vlan_action(struct mlx5e_priv *priv,
if (!attr->vlan_proto[vlan_idx])
attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
- if (vlan_idx) {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH)) {
- NL_SET_ERR_MSG_MOD(extack,
- "vlan push action is not supported for vlan depth > 1");
- return -EOPNOTSUPP;
- }
-
+ if (vlan_idx)
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
- } else {
- if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
- (act->vlan.proto != htons(ETH_P_8021Q) ||
- act->vlan.prio)) {
- NL_SET_ERR_MSG_MOD(extack, "vlan push action is not supported");
- return -EOPNOTSUPP;
- }
-
+ else
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
- }
break;
case FLOW_ACTION_VLAN_POP_ETH:
parse_state->eth_pop = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c
new file mode 100644
index 000000000000..f71766dca660
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/rhashtable.h>
+#include <net/flow_offload.h>
+#include "en/tc_priv.h"
+#include "act_stats.h"
+#include "en/fs.h"
+
+struct mlx5e_tc_act_stats_handle {
+ struct rhashtable ht;
+ spinlock_t ht_lock; /* protects hashtable */
+};
+
+struct mlx5e_tc_act_stats {
+ unsigned long tc_act_cookie;
+
+ struct mlx5_fc *counter;
+ u64 lastpackets;
+ u64 lastbytes;
+
+ struct rhash_head hash;
+ struct rcu_head rcu_head;
+};
+
+static const struct rhashtable_params act_counters_ht_params = {
+ .head_offset = offsetof(struct mlx5e_tc_act_stats, hash),
+ .key_offset = 0,
+ .key_len = offsetof(struct mlx5e_tc_act_stats, counter),
+ .automatic_shrinking = true,
+};
+
+struct mlx5e_tc_act_stats_handle *
+mlx5e_tc_act_stats_create(void)
+{
+ struct mlx5e_tc_act_stats_handle *handle;
+ int err;
+
+ handle = kvzalloc(sizeof(*handle), GFP_KERNEL);
+ if (IS_ERR(handle))
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&handle->ht, &act_counters_ht_params);
+ if (err)
+ goto err;
+
+ spin_lock_init(&handle->ht_lock);
+ return handle;
+err:
+ kvfree(handle);
+ return ERR_PTR(err);
+}
+
+void mlx5e_tc_act_stats_free(struct mlx5e_tc_act_stats_handle *handle)
+{
+ rhashtable_destroy(&handle->ht);
+ kvfree(handle);
+}
+
+static int
+mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle,
+ unsigned long act_cookie,
+ struct mlx5_fc *counter)
+{
+ struct mlx5e_tc_act_stats *act_stats, *old_act_stats;
+ struct rhashtable *ht = &handle->ht;
+ int err = 0;
+
+ act_stats = kvzalloc(sizeof(*act_stats), GFP_KERNEL);
+ if (!act_stats)
+ return -ENOMEM;
+
+ act_stats->tc_act_cookie = act_cookie;
+ act_stats->counter = counter;
+
+ rcu_read_lock();
+ old_act_stats = rhashtable_lookup_get_insert_fast(ht,
+ &act_stats->hash,
+ act_counters_ht_params);
+ if (IS_ERR(old_act_stats)) {
+ err = PTR_ERR(old_act_stats);
+ goto err_hash_insert;
+ } else if (old_act_stats) {
+ err = -EEXIST;
+ goto err_hash_insert;
+ }
+ rcu_read_unlock();
+
+ return 0;
+
+err_hash_insert:
+ rcu_read_unlock();
+ kvfree(act_stats);
+ return err;
+}
+
+void
+mlx5e_tc_act_stats_del_flow(struct mlx5e_tc_act_stats_handle *handle,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_flow_attr *attr;
+ struct mlx5e_tc_act_stats *act_stats;
+ int i;
+
+ if (!flow_flag_test(flow, USE_ACT_STATS))
+ return;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ for (i = 0; i < attr->tc_act_cookies_count; i++) {
+ struct rhashtable *ht = &handle->ht;
+
+ spin_lock(&handle->ht_lock);
+ act_stats = rhashtable_lookup_fast(ht,
+ &attr->tc_act_cookies[i],
+ act_counters_ht_params);
+ if (act_stats &&
+ rhashtable_remove_fast(ht, &act_stats->hash,
+ act_counters_ht_params) == 0)
+ kvfree_rcu(act_stats, rcu_head);
+
+ spin_unlock(&handle->ht_lock);
+ }
+ }
+}
+
+int
+mlx5e_tc_act_stats_add_flow(struct mlx5e_tc_act_stats_handle *handle,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_fc *curr_counter = NULL;
+ unsigned long last_cookie = 0;
+ struct mlx5_flow_attr *attr;
+ int err;
+ int i;
+
+ if (!flow_flag_test(flow, USE_ACT_STATS))
+ return 0;
+
+ list_for_each_entry(attr, &flow->attrs, list) {
+ if (attr->counter)
+ curr_counter = attr->counter;
+
+ for (i = 0; i < attr->tc_act_cookies_count; i++) {
+ /* jump over identical ids (e.g. pedit)*/
+ if (last_cookie == attr->tc_act_cookies[i])
+ continue;
+
+ err = mlx5e_tc_act_stats_add(handle, attr->tc_act_cookies[i], curr_counter);
+ if (err)
+ goto out_err;
+ last_cookie = attr->tc_act_cookies[i];
+ }
+ }
+
+ return 0;
+out_err:
+ mlx5e_tc_act_stats_del_flow(handle, flow);
+ return err;
+}
+
+int
+mlx5e_tc_act_stats_fill_stats(struct mlx5e_tc_act_stats_handle *handle,
+ struct flow_offload_action *fl_act)
+{
+ struct rhashtable *ht = &handle->ht;
+ struct mlx5e_tc_act_stats *item;
+ struct mlx5e_tc_act_stats key;
+ u64 pkts, bytes, lastused;
+ int err = 0;
+
+ key.tc_act_cookie = fl_act->cookie;
+
+ rcu_read_lock();
+ item = rhashtable_lookup(ht, &key, act_counters_ht_params);
+ if (!item) {
+ rcu_read_unlock();
+ err = -ENOENT;
+ goto err_out;
+ }
+
+ mlx5_fc_query_cached_raw(item->counter,
+ &bytes, &pkts, &lastused);
+
+ flow_stats_update(&fl_act->stats,
+ bytes - item->lastbytes,
+ pkts - item->lastpackets,
+ 0, lastused, FLOW_ACTION_HW_STATS_DELAYED);
+
+ item->lastpackets = pkts;
+ item->lastbytes = bytes;
+ rcu_read_unlock();
+
+ return 0;
+
+err_out:
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.h
new file mode 100644
index 000000000000..002292c2567c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_ACT_STATS_H__
+#define __MLX5_EN_ACT_STATS_H__
+
+#include <net/flow_offload.h>
+#include "en/tc_priv.h"
+
+struct mlx5e_tc_act_stats_handle;
+
+struct mlx5e_tc_act_stats_handle *mlx5e_tc_act_stats_create(void);
+void mlx5e_tc_act_stats_free(struct mlx5e_tc_act_stats_handle *handle);
+
+int
+mlx5e_tc_act_stats_add_flow(struct mlx5e_tc_act_stats_handle *handle,
+ struct mlx5e_tc_flow *flow);
+
+void
+mlx5e_tc_act_stats_del_flow(struct mlx5e_tc_act_stats_handle *handle,
+ struct mlx5e_tc_flow *flow);
+
+int
+mlx5e_tc_act_stats_fill_stats(struct mlx5e_tc_act_stats_handle *handle,
+ struct flow_offload_action *fl_act);
+
+#endif /* __MLX5_EN_ACT_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
index 78af8a3175bf..8218c892b161 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
@@ -28,7 +28,7 @@ struct mlx5e_flow_meter_aso_obj {
int base_id;
int total_meters;
- unsigned long meters_map[0]; /* must be at the end of this struct */
+ unsigned long meters_map[]; /* must be at the end of this struct */
};
struct mlx5e_flow_meters {
@@ -204,13 +204,15 @@ mlx5e_flow_meter_create_aso_obj(struct mlx5e_flow_meters *flow_meters, int *obj_
u32 in[MLX5_ST_SZ_DW(create_flow_meter_aso_obj_in)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct mlx5_core_dev *mdev = flow_meters->mdev;
- void *obj;
+ void *obj, *param;
int err;
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO);
- MLX5_SET(general_obj_in_cmd_hdr, in, log_obj_range, flow_meters->log_granularity);
+ param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
+ MLX5_SET(general_obj_create_param, param, log_obj_range,
+ flow_meters->log_granularity);
obj = MLX5_ADDR_OF(create_flow_meter_aso_obj_in, in, flow_meter_aso_obj);
MLX5_SET(flow_meter_aso_obj, obj, meter_aso_access_pd, flow_meters->pdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index f2c2c752bd1c..558a776359af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -237,7 +237,7 @@ sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
int err;
err = mlx5e_tc_match_to_reg_set(mdev, mod_acts, MLX5_FLOW_NAMESPACE_FDB,
- CHAIN_TO_REG, obj_id);
+ MAPPED_OBJ_TO_REG, obj_id);
if (err)
goto err_set_regc0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 313df8232db7..314983bc6f08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -35,6 +35,7 @@
#define MLX5_CT_STATE_REPLY_BIT BIT(4)
#define MLX5_CT_STATE_RELATED_BIT BIT(5)
#define MLX5_CT_STATE_INVALID_BIT BIT(6)
+#define MLX5_CT_STATE_NEW_BIT BIT(7)
#define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG)
#define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG)
@@ -59,6 +60,7 @@ struct mlx5_tc_ct_debugfs {
struct mlx5_tc_ct_priv {
struct mlx5_core_dev *dev;
+ struct mlx5e_priv *priv;
const struct net_device *netdev;
struct mod_hdr_tbl *mod_hdr_tbl;
struct xarray tuple_ids;
@@ -85,7 +87,6 @@ struct mlx5_ct_flow {
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
struct mlx5_ct_ft *ft;
- u32 chain_mapping;
};
struct mlx5_ct_zone_rule {
@@ -721,12 +722,14 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
struct flow_action_entry *meta;
+ enum ip_conntrack_info ctinfo;
u16 ct_state = 0;
int err;
meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
if (!meta)
return -EOPNOTSUPP;
+ ctinfo = meta->ct_metadata.cookie & NFCT_INFOMASK;
err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
&attr->ct_attr.ct_labels_id);
@@ -742,7 +745,8 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
ct_state |= MLX5_CT_STATE_NAT_BIT;
}
- ct_state |= MLX5_CT_STATE_ESTABLISHED_BIT | MLX5_CT_STATE_TRK_BIT;
+ ct_state |= MLX5_CT_STATE_TRK_BIT;
+ ct_state |= ctinfo == IP_CT_NEW ? MLX5_CT_STATE_NEW_BIT : MLX5_CT_STATE_ESTABLISHED_BIT;
ct_state |= meta->ct_metadata.orig_dir ? 0 : MLX5_CT_STATE_REPLY_BIT;
err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
ct_state,
@@ -871,6 +875,68 @@ err_attr:
return err;
}
+static int
+mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ bool nat, u8 zone_restore_id)
+{
+ struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
+ struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
+ struct mlx5e_mod_hdr_handle *mh;
+ struct mlx5_ct_fs_rule *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ old_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!old_attr) {
+ err = -ENOMEM;
+ goto err_attr;
+ }
+ *old_attr = *attr;
+
+ err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
+ nat, mlx5_tc_ct_entry_has_nat(entry));
+ if (err) {
+ ct_dbg("Failed to create ct entry mod hdr");
+ goto err_mod_hdr;
+ }
+
+ mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
+ mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
+
+ rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ ct_dbg("Failed to add replacement ct entry rule, nat: %d", nat);
+ goto err_rule;
+ }
+
+ ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
+ zone_rule->rule = rule;
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
+ zone_rule->mh = mh;
+
+ kfree(old_attr);
+ kvfree(spec);
+ ct_dbg("Replaced ct entry rule in zone %d", entry->tuple.zone);
+
+ return 0;
+
+err_rule:
+ mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
+ mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
+err_mod_hdr:
+ kfree(old_attr);
+err_attr:
+ kvfree(spec);
+ return err;
+}
+
static bool
mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
{
@@ -1066,6 +1132,52 @@ err_orig:
}
static int
+mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
+ struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry,
+ u8 zone_restore_id)
+{
+ int err;
+
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
+ if (err)
+ return err;
+
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
+ if (err)
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ return err;
+}
+
+static int
+mlx5_tc_ct_block_flow_offload_replace(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
+ struct mlx5_ct_entry *entry, unsigned long cookie)
+{
+ struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
+ int err;
+
+ err = mlx5_tc_ct_entry_replace_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
+ if (!err)
+ return 0;
+
+ /* If failed to update the entry, then look it up again under ht_lock
+ * protection and properly delete it.
+ */
+ spin_lock_bh(&ct_priv->ht_lock);
+ entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
+ if (entry) {
+ rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
+ spin_unlock_bh(&ct_priv->ht_lock);
+ mlx5_tc_ct_entry_put(entry);
+ } else {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ }
+ return err;
+}
+
+static int
mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
struct flow_cls_offload *flow)
{
@@ -1083,9 +1195,17 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
spin_lock_bh(&ct_priv->ht_lock);
entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
if (entry && refcount_inc_not_zero(&entry->refcnt)) {
+ if (entry->restore_cookie == meta_action->ct_metadata.cookie) {
+ spin_unlock_bh(&ct_priv->ht_lock);
+ mlx5_tc_ct_entry_put(entry);
+ return -EEXIST;
+ }
+ entry->restore_cookie = meta_action->ct_metadata.cookie;
spin_unlock_bh(&ct_priv->ht_lock);
+
+ err = mlx5_tc_ct_block_flow_offload_replace(ft, flow_rule, entry, cookie);
mlx5_tc_ct_entry_put(entry);
- return -EEXIST;
+ return err;
}
spin_unlock_bh(&ct_priv->ht_lock);
@@ -1323,7 +1443,7 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack)
{
- bool trk, est, untrk, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
+ bool trk, est, untrk, unnew, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector_key_ct *mask, *key;
u32 ctstate = 0, ctstate_mask = 0;
@@ -1369,15 +1489,18 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
rel = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
inv = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
+ unnew = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_NEW;
unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
unrel = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
uninv = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
+ ctstate |= new ? MLX5_CT_STATE_NEW_BIT : 0;
ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
ctstate |= rpl ? MLX5_CT_STATE_REPLY_BIT : 0;
ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
+ ctstate_mask |= (unnew || new) ? MLX5_CT_STATE_NEW_BIT : 0;
ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
ctstate_mask |= unrel ? MLX5_CT_STATE_RELATED_BIT : 0;
@@ -1395,12 +1518,6 @@ mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
return -EOPNOTSUPP;
}
- if (new) {
- NL_SET_ERR_MSG_MOD(extack,
- "matching on ct_state +new isn't supported");
- return -EOPNOTSUPP;
- }
-
if (mask->ct_zone)
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
key->ct_zone, MLX5_CT_ZONE_MASK);
@@ -1441,6 +1558,7 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
attr->ct_attr.zone = act->ct.zone;
attr->ct_attr.ct_action = act->ct.action;
attr->ct_attr.nf_ft = act->ct.flow_table;
+ attr->ct_attr.act_miss_cookie = act->miss_cookie;
return 0;
}
@@ -1778,7 +1896,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* + ft prio (tc chain) +
* + original match +
* +---------------------+
- * | set chain miss mapping
+ * | set act_miss_cookie mapping
* | set fte_id
* | set tunnel_id
* | do decap
@@ -1823,7 +1941,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_ct_flow *ct_flow;
- int chain_mapping = 0, err;
+ int act_miss_mapping = 0, err;
struct mlx5_ct_ft *ft;
u16 zone;
@@ -1858,22 +1976,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- /* Write chain miss tag for miss in ct table as we
- * don't go though all prios of this chain as normal tc rules
- * miss.
- */
- err = mlx5_chains_get_chain_mapping(ct_priv->chains, attr->chain,
- &chain_mapping);
+ err = mlx5e_tc_action_miss_mapping_get(ct_priv->priv, attr, attr->ct_attr.act_miss_cookie,
+ &act_miss_mapping);
if (err) {
- ct_dbg("Failed to get chain register mapping for chain");
- goto err_get_chain;
+ ct_dbg("Failed to get register mapping for act miss");
+ goto err_get_act_miss;
}
- ct_flow->chain_mapping = chain_mapping;
+ attr->ct_attr.act_miss_mapping = act_miss_mapping;
err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
- CHAIN_TO_REG, chain_mapping);
+ MAPPED_OBJ_TO_REG, act_miss_mapping);
if (err) {
- ct_dbg("Failed to set chain register mapping");
+ ct_dbg("Failed to set act miss register mapping");
goto err_mapping;
}
@@ -1937,8 +2051,8 @@ err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
mlx5e_mod_hdr_dealloc(pre_mod_acts);
- mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
-err_get_chain:
+ mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, act_miss_mapping);
+err_get_act_miss:
kfree(ct_flow->pre_ct_attr);
err_alloc_pre:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
@@ -1977,7 +2091,7 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule, pre_ct_attr);
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
- mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
+ mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, attr->ct_attr.act_miss_mapping);
mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
kfree(ct_flow->pre_ct_attr);
@@ -2074,13 +2188,6 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
const char *err_msg = NULL;
int err = 0;
-#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- /* cannot restore chain ID on HW miss */
-
- err_msg = "tc skb extension missing";
- err = -EOPNOTSUPP;
- goto out_err;
-#endif
if (IS_ERR_OR_NULL(post_act)) {
/* Ignore_flow_level support isn't supported by default for VFs and so post_act
* won't be supported. Skip showing error msg.
@@ -2157,6 +2264,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
}
spin_lock_init(&ct_priv->ht_lock);
+ ct_priv->priv = priv;
ct_priv->ns_type = ns_type;
ct_priv->chains = chains;
ct_priv->netdev = priv->netdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 5bbd6b92840f..5c5ddaa83055 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -28,6 +28,8 @@ struct mlx5_ct_attr {
struct mlx5_ct_flow *ct_flow;
struct nf_flowtable *nf_ft;
u32 ct_labels_id;
+ u32 act_miss_mapping;
+ u64 act_miss_cookie;
};
#define zone_to_reg_ct {\
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index 2b7fd1c0e643..451fd4342a5a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -30,6 +30,7 @@ enum {
MLX5E_TC_FLOW_FLAG_TUN_RX = MLX5E_TC_FLOW_BASE + 9,
MLX5E_TC_FLOW_FLAG_FAILED = MLX5E_TC_FLOW_BASE + 10,
MLX5E_TC_FLOW_FLAG_SAMPLE = MLX5E_TC_FLOW_BASE + 11,
+ MLX5E_TC_FLOW_FLAG_USE_ACT_STATS = MLX5E_TC_FLOW_BASE + 12,
};
struct mlx5e_tc_flow_parse_attr {
@@ -95,8 +96,6 @@ struct mlx5e_tc_flow {
*/
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_tc_flow *peer_flow;
- struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
- struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index e6f64d890fb3..00a04fdd756f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -93,11 +93,11 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
else
return -EOPNOTSUPP;
- if (!(mlx5e_eswitch_rep(*out_dev) &&
- mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
+ if (!mlx5e_eswitch_uplink_rep(*out_dev))
return -EOPNOTSUPP;
- if (mlx5e_eswitch_uplink_rep(priv->netdev) && *out_dev != priv->netdev)
+ if (mlx5e_eswitch_uplink_rep(priv->netdev) && *out_dev != priv->netdev &&
+ !mlx5_lag_is_mpesw(priv->mdev))
return -EOPNOTSUPP;
return 0;
@@ -745,8 +745,6 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
if (err)
goto out;
- esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
- misc_parameters.vxlan_vni);
esw_attr->rx_tun_attr->decap_vport = vport_num;
} else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) {
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 2aaf8ab857b8..780224fd67a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -1349,7 +1349,8 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
mlx5e_tc_unoffload_from_slow_path(esw, flow);
else
mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
- mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
+
+ mlx5e_tc_detach_mod_hdr(priv, flow, attr);
attr->modify_hdr = NULL;
esw_attr->dests[flow->tmp_entry_index].flags &=
@@ -1405,7 +1406,7 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
continue;
}
- err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
+ err = mlx5e_tc_attach_mod_hdr(priv, flow, attr);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d",
err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 853f312cd757..c067d2efab51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -73,6 +73,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
+static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
+{
+ return config->rx_filter == HWTSTAMP_FILTER_ALL;
+}
+
/* TX */
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
@@ -315,7 +320,6 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
}
}
-void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session, u8 max_sq_mpw_wqebbs)
@@ -445,7 +449,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
{
- WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev));
+ WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < (u16)mlx5e_get_max_sq_wqebbs(mdev));
/* A WQE must not cross the page boundary, hence two conditions:
* 1. Its size must not exceed the page size.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 20507ef2f956..bcd6370de440 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -57,8 +57,9 @@ int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk)
static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
- struct page *page, struct xdp_buff *xdp)
+ struct xdp_buff *xdp)
{
+ struct page *page = virt_to_page(xdp->data);
struct skb_shared_info *sinfo = NULL;
struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
@@ -156,10 +157,39 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
return true;
}
+static int mlx5e_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(!mlx5e_rx_hw_stamp(_ctx->rq->tstamp)))
+ return -EOPNOTSUPP;
+
+ *timestamp = mlx5e_cqe_ts_to_ns(_ctx->rq->ptp_cyc2time,
+ _ctx->rq->clock, get_cqe_ts(_ctx->cqe));
+ return 0;
+}
+
+static int mlx5e_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+{
+ const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
+
+ if (unlikely(!(_ctx->xdp.rxq->dev->features & NETIF_F_RXHASH)))
+ return -EOPNOTSUPP;
+
+ *hash = be32_to_cpu(_ctx->cqe->rss_hash_result);
+ return 0;
+}
+
+const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
+ .xmo_rx_timestamp = mlx5e_xdp_rx_timestamp,
+ .xmo_rx_hash = mlx5e_xdp_rx_hash,
+};
+
/* returns true if packet was consumed by xdp */
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
- struct bpf_prog *prog, struct xdp_buff *xdp)
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ struct bpf_prog *prog, struct mlx5e_xdp_buff *mxbuf)
{
+ struct xdp_buff *xdp = &mxbuf->xdp;
u32 act;
int err;
@@ -168,7 +198,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
case XDP_PASS:
return false;
case XDP_TX:
- if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, page, xdp)))
+ if (unlikely(!mlx5e_xmit_xdp_buff(rq->xdpsq, rq, xdp)))
goto xdp_abort;
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */
return true;
@@ -180,7 +210,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
__set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags);
__set_bit(MLX5E_RQ_FLAG_XDP_REDIRECT, rq->flags);
if (xdp->rxq->mem.type != MEM_TYPE_XSK_BUFF_POOL)
- mlx5e_page_dma_unmap(rq, page);
+ mlx5e_page_dma_unmap(rq, virt_to_page(xdp->data));
rq->stats->xdp_redirect++;
return true;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index bc2d9034af5b..10bcfa6f88c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -44,10 +44,16 @@
(MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
sizeof(struct mlx5_wqe_inline_seg))
+struct mlx5e_xdp_buff {
+ struct xdp_buff xdp;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_rq *rq;
+};
+
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
-bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct page *page,
- struct bpf_prog *prog, struct xdp_buff *xdp);
+bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ struct bpf_prog *prog, struct mlx5e_xdp_buff *mlctx);
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq);
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq);
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq);
@@ -56,6 +62,8 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
+extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
+
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd,
struct skb_shared_info *sinfo,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index c91b54d9ff27..fab787600459 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -8,6 +8,14 @@
/* RX data path */
+static struct mlx5e_xdp_buff *xsk_buff_to_mxbuf(struct xdp_buff *xdp)
+{
+ /* mlx5e_xdp_buff shares its layout with xdp_buff_xsk
+ * and private mlx5e_xdp_buff fields fall into xdp_buff_xsk->cb
+ */
+ return (struct mlx5e_xdp_buff *)xdp;
+}
+
int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
@@ -22,6 +30,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
goto err;
BUILD_BUG_ON(sizeof(wi->alloc_units[0]) != sizeof(wi->alloc_units[0].xsk));
+ XSK_CHECK_PRIV_TYPE(struct mlx5e_xdp_buff);
batch = xsk_buff_alloc_batch(rq->xsk_pool, (struct xdp_buff **)wi->alloc_units,
rq->mpwqe.pages_per_wqe);
@@ -43,25 +52,30 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_ALIGNED)) {
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
.ptag = cpu_to_be64(addr | MLX5_EN_WR),
};
+ mxbuf->rq = rq;
}
} else if (unlikely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_UNALIGNED)) {
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
.key = rq->mkey_be,
.va = cpu_to_be64(addr),
};
+ mxbuf->rq = rq;
}
} else if (likely(rq->mpwqe.umr_mode == MLX5E_MPWRQ_UMR_MODE_TRIPLE)) {
u32 mapping_size = 1 << (rq->mpwqe.page_shift - 2);
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_ksms[i << 2] = (struct mlx5_ksm) {
@@ -80,6 +94,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
.key = rq->mkey_be,
.va = cpu_to_be64(rq->wqe_overflow.addr),
};
+ mxbuf->rq = rq;
}
} else {
__be32 pad_size = cpu_to_be32((1 << rq->mpwqe.page_shift) -
@@ -87,6 +102,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
__be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size);
for (i = 0; i < batch; i++) {
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[i].xsk);
dma_addr_t addr = xsk_buff_xdp_get_frame_dma(wi->alloc_units[i].xsk);
umr_wqe->inline_klms[i << 1] = (struct mlx5_klm) {
@@ -99,6 +115,7 @@ int mlx5e_xsk_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
.va = cpu_to_be64(rq->wqe_overflow.addr),
.bcount = pad_size,
};
+ mxbuf->rq = rq;
}
}
@@ -229,11 +246,12 @@ static struct sk_buff *mlx5e_xsk_construct_skb(struct mlx5e_rq *rq, struct xdp_b
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt,
u32 head_offset,
u32 page_idx)
{
- struct xdp_buff *xdp = wi->alloc_units[page_idx].xsk;
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->alloc_units[page_idx].xsk);
struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
@@ -249,9 +267,11 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(head_offset);
- xsk_buff_set_size(xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
- net_prefetch(xdp->data);
+ /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
+ mxbuf->cqe = cqe;
+ xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ net_prefetch(mxbuf->xdp.data);
/* Possible flows:
* - XDP_REDIRECT to XSKMAP:
@@ -269,7 +289,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
*/
prog = rcu_dereference(rq->xdp_prog);
- if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp))) {
+ if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
@@ -278,14 +298,15 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* XDP_PASS: copy the data from the UMEM to a new SKB and reuse the
* frame. On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp);
+ return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
+ struct mlx5_cqe64 *cqe,
u32 cqe_bcnt)
{
- struct xdp_buff *xdp = wi->au->xsk;
+ struct mlx5e_xdp_buff *mxbuf = xsk_buff_to_mxbuf(wi->au->xsk);
struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the
@@ -295,17 +316,19 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
*/
WARN_ON_ONCE(wi->offset);
- xsk_buff_set_size(xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
- net_prefetch(xdp->data);
+ /* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
+ mxbuf->cqe = cqe;
+ xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ net_prefetch(mxbuf->xdp.data);
prog = rcu_dereference(rq->xdp_prog);
- if (likely(prog && mlx5e_xdp_handle(rq, NULL, prog, xdp)))
+ if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf)))
return NULL; /* page/packet was consumed by XDP */
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
* will be handled by mlx5e_free_rx_wqe.
* On SKB allocation failure, NULL is returned.
*/
- return mlx5e_xsk_construct_skb(rq, xdp);
+ return mlx5e_xsk_construct_skb(rq, &mxbuf->xdp);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index 087c943bd8e9..cefc0ef6105d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -13,11 +13,13 @@ int mlx5e_xsk_alloc_rx_wqes_batched(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
int mlx5e_xsk_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk);
struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe,
u16 cqe_bcnt,
u32 head_offset,
u32 page_idx);
struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
+ struct mlx5_cqe64 *cqe,
u32 cqe_bcnt);
#endif /* __MLX5_EN_XSK_RX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index ff03c43833bb..81a567e17264 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -7,6 +7,18 @@
#include "en/health.h"
#include <net/xdp_sock_drv.h>
+static int mlx5e_legacy_rq_validate_xsk(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
+ mlx5_core_err(mdev, "Legacy RQ linear mode for XSK can't be activated with current params\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* The limitation of 2048 can be altered, but shouldn't go beyond the minimal
* stride size of striding RQ.
*/
@@ -17,8 +29,11 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5_core_dev *mdev)
{
/* AF_XDP doesn't support frames larger than PAGE_SIZE. */
- if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
+ if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
+ mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
+ MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
return false;
+ }
/* frag_sz is different for regular and XSK RQs, so ensure that linear
* SKB mode is possible.
@@ -27,7 +42,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
default: /* MLX5_WQ_TYPE_CYCLIC */
- return mlx5e_rx_is_linear_skb(mdev, params, xsk);
+ return !mlx5e_legacy_rq_validate_xsk(mdev, params, xsk);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 07187028f0d3..c964644ee866 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -124,7 +124,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
mlx5e_udp_gso_handle_tx_skb(skb);
#ifdef CONFIG_MLX5_EN_TLS
- /* May send SKBs and WQEs. */
+ /* May send WQEs. */
if (mlx5e_ktls_skb_offloaded(skb))
if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
&state->tls)))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index d7c020f72401..88a5aed9d678 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -365,7 +365,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(fs, i);
- kvfree(accel_tcp);
+ kfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL);
}
@@ -377,7 +377,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
- accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL);
+ accel_tcp = kzalloc(sizeof(*accel_tcp), GFP_KERNEL);
if (!accel_tcp)
return -ENOMEM;
mlx5e_fs_set_accel_tcp(fs, accel_tcp);
@@ -397,7 +397,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs)
err_destroy_tables:
while (--i >= 0)
accel_fs_tcp_destroy_table(fs, i);
- kvfree(accel_tcp);
+ kfree(accel_tcp);
mlx5e_fs_set_accel_tcp(fs, NULL);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index bb9023957f74..7b0d3de0ec6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -158,95 +158,103 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
attrs->family = x->props.family;
attrs->type = x->xso.type;
attrs->reqid = x->props.reqid;
+ attrs->upspec.dport = ntohs(x->sel.dport);
+ attrs->upspec.dport_mask = ntohs(x->sel.dport_mask);
+ attrs->upspec.sport = ntohs(x->sel.sport);
+ attrs->upspec.sport_mask = ntohs(x->sel.sport_mask);
+ attrs->upspec.proto = x->sel.proto;
mlx5e_ipsec_init_limits(sa_entry, attrs);
}
-static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
+static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
+ struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xso.real_dev;
- struct mlx5e_priv *priv;
-
- priv = netdev_priv(netdev);
-
if (x->props.aalgo != SADB_AALG_NONE) {
- netdev_info(netdev, "Cannot offload authenticated xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload authenticated xfrm states");
return -EINVAL;
}
if (x->props.ealgo != SADB_X_EALG_AES_GCM_ICV16) {
- netdev_info(netdev, "Only AES-GCM-ICV16 xfrm state may be offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only AES-GCM-ICV16 xfrm state may be offloaded");
return -EINVAL;
}
if (x->props.calgo != SADB_X_CALG_NONE) {
- netdev_info(netdev, "Cannot offload compressed xfrm states\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload compressed xfrm states");
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN &&
- !(mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_ESN)) {
- netdev_info(netdev, "Cannot offload ESN xfrm states\n");
+ !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN)) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload ESN xfrm states");
return -EINVAL;
}
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
- netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only IPv4/6 xfrm states may be offloaded");
return -EINVAL;
}
if (x->id.proto != IPPROTO_ESP) {
- netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only ESP xfrm state may be offloaded");
return -EINVAL;
}
if (x->encap) {
- netdev_info(netdev, "Encapsulated xfrm state may not be offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Encapsulated xfrm state may not be offloaded");
return -EINVAL;
}
if (!x->aead) {
- netdev_info(netdev, "Cannot offload xfrm states without aead\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without aead");
return -EINVAL;
}
if (x->aead->alg_icv_len != 128) {
- netdev_info(netdev, "Cannot offload xfrm states with AEAD ICV length other than 128bit\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD ICV length other than 128bit");
return -EINVAL;
}
if ((x->aead->alg_key_len != 128 + 32) &&
(x->aead->alg_key_len != 256 + 32)) {
- netdev_info(netdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with AEAD key length other than 128/256 bit");
return -EINVAL;
}
if (x->tfcpad) {
- netdev_info(netdev, "Cannot offload xfrm states with tfc padding\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with tfc padding");
return -EINVAL;
}
if (!x->geniv) {
- netdev_info(netdev, "Cannot offload xfrm states without geniv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states without geniv");
return -EINVAL;
}
if (strcmp(x->geniv, "seqiv")) {
- netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload xfrm states with geniv other than seqiv");
return -EINVAL;
}
+
+ if (x->sel.proto != IPPROTO_IP &&
+ (x->sel.proto != IPPROTO_UDP || x->xso.dir != XFRM_DEV_OFFLOAD_OUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
+ return -EINVAL;
+ }
+
switch (x->xso.type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
- if (!(mlx5_ipsec_device_caps(priv->mdev) &
- MLX5_IPSEC_CAP_CRYPTO)) {
- netdev_info(netdev, "Crypto offload is not supported\n");
+ if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO)) {
+ NL_SET_ERR_MSG_MOD(extack, "Crypto offload is not supported");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
- netdev_info(netdev, "Only transport and tunnel xfrm states may be offloaded\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only transport and tunnel xfrm states may be offloaded");
return -EINVAL;
}
break;
case XFRM_DEV_OFFLOAD_PACKET:
- if (!(mlx5_ipsec_device_caps(priv->mdev) &
+ if (!(mlx5_ipsec_device_caps(mdev) &
MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
- netdev_info(netdev, "Packet offload is not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Packet offload is not supported");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT) {
- netdev_info(netdev, "Only transport xfrm states may be offloaded in packet mode\n");
+ NL_SET_ERR_MSG_MOD(extack, "Only transport xfrm states may be offloaded in packet mode");
return -EINVAL;
}
@@ -254,35 +262,30 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
x->replay_esn->replay_window != 64 &&
x->replay_esn->replay_window != 128 &&
x->replay_esn->replay_window != 256) {
- netdev_info(netdev,
- "Unsupported replay window size %u\n",
- x->replay_esn->replay_window);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported replay window size");
return -EINVAL;
}
if (!x->props.reqid) {
- netdev_info(netdev, "Cannot offload without reqid\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload without reqid");
return -EINVAL;
}
if (x->lft.hard_byte_limit != XFRM_INF ||
x->lft.soft_byte_limit != XFRM_INF) {
- netdev_info(netdev,
- "Device doesn't support limits in bytes\n");
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support limits in bytes");
return -EINVAL;
}
if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
x->lft.hard_packet_limit != XFRM_INF) {
/* XFRM stack doesn't prevent such configuration :(. */
- netdev_info(netdev,
- "Hard packet limit must be greater than soft one\n");
+ NL_SET_ERR_MSG_MOD(extack, "Hard packet limit must be greater than soft one");
return -EINVAL;
}
break;
default:
- netdev_info(netdev, "Unsupported xfrm offload type %d\n",
- x->xso.type);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
return -EINVAL;
}
return 0;
@@ -298,7 +301,8 @@ static void _update_xfrm_state(struct work_struct *work)
mlx5_accel_esp_modify_xfrm(sa_entry, &modify_work->attrs);
}
-static int mlx5e_xfrm_add_state(struct xfrm_state *x)
+static int mlx5e_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct net_device *netdev = x->xso.real_dev;
@@ -311,15 +315,13 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
return -EOPNOTSUPP;
ipsec = priv->ipsec;
- err = mlx5e_xfrm_validate_state(x);
+ err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
if (err)
return err;
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
- if (!sa_entry) {
- err = -ENOMEM;
- goto out;
- }
+ if (!sa_entry)
+ return -ENOMEM;
sa_entry->x = x;
sa_entry->ipsec = ipsec;
@@ -360,7 +362,7 @@ err_hw_ctx:
mlx5_ipsec_free_sa_ctx(sa_entry);
err_xfrm:
kfree(sa_entry);
-out:
+ NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
return err;
}
@@ -497,34 +499,39 @@ static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
}
-static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x)
+static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x,
+ struct netlink_ext_ack *extack)
{
- struct net_device *netdev = x->xdo.real_dev;
-
if (x->type != XFRM_POLICY_TYPE_MAIN) {
- netdev_info(netdev, "Cannot offload non-main policy types\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types");
return -EINVAL;
}
/* Please pay attention that we support only one template */
if (x->xfrm_nr > 1) {
- netdev_info(netdev, "Cannot offload more than one template\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload more than one template");
return -EINVAL;
}
if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
- netdev_info(netdev, "Cannot offload forward policy\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload forward policy");
return -EINVAL;
}
if (!x->xfrm_vec[0].reqid) {
- netdev_info(netdev, "Cannot offload policy without reqid\n");
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload policy without reqid");
return -EINVAL;
}
if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
- netdev_info(netdev, "Unsupported xfrm offload type\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
+ return -EINVAL;
+ }
+
+ if (x->selector.proto != IPPROTO_IP &&
+ (x->selector.proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
return -EINVAL;
}
@@ -548,9 +555,15 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
attrs->action = x->action;
attrs->type = XFRM_DEV_OFFLOAD_PACKET;
attrs->reqid = x->xfrm_vec[0].reqid;
+ attrs->upspec.dport = ntohs(sel->dport);
+ attrs->upspec.dport_mask = ntohs(sel->dport_mask);
+ attrs->upspec.sport = ntohs(sel->sport);
+ attrs->upspec.sport_mask = ntohs(sel->sport_mask);
+ attrs->upspec.proto = sel->proto;
}
-static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
+static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
+ struct netlink_ext_ack *extack)
{
struct net_device *netdev = x->xdo.real_dev;
struct mlx5e_ipsec_pol_entry *pol_entry;
@@ -558,10 +571,12 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
int err;
priv = netdev_priv(netdev);
- if (!priv->ipsec)
+ if (!priv->ipsec) {
+ NL_SET_ERR_MSG_MOD(extack, "Device doesn't support IPsec packet offload");
return -EOPNOTSUPP;
+ }
- err = mlx5e_xfrm_validate_policy(x);
+ err = mlx5e_xfrm_validate_policy(x, extack);
if (err)
return err;
@@ -582,6 +597,7 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
err_fs:
kfree(pol_entry);
+ NL_SET_ERR_MSG_MOD(extack, "Device failed to offload this policy");
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 8bed9c361075..12f044330639 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -52,6 +52,14 @@ struct aes_gcm_keymat {
u32 aes_key[256 / 32];
};
+struct upspec {
+ u16 dport;
+ u16 dport_mask;
+ u16 sport;
+ u16 sport_mask;
+ u8 proto;
+};
+
struct mlx5_accel_esp_xfrm_attrs {
u32 esn;
u32 spi;
@@ -68,6 +76,7 @@ struct mlx5_accel_esp_xfrm_attrs {
__be32 a6[4];
} daddr;
+ struct upspec upspec;
u8 dir : 2;
u8 esn_overlap : 1;
u8 esn_trigger : 1;
@@ -84,6 +93,7 @@ enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
MLX5_IPSEC_CAP_ESN = 1 << 1,
MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
+ MLX5_IPSEC_CAP_ROCE = 1 << 3,
};
struct mlx5e_priv;
@@ -119,7 +129,7 @@ struct mlx5e_ipsec_work {
};
struct mlx5e_ipsec_aso {
- u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
+ u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
dma_addr_t dma_addr;
struct mlx5_aso *aso;
/* Protect ASO WQ access, as it is global to whole IPsec */
@@ -138,6 +148,7 @@ struct mlx5e_ipsec {
struct mlx5e_ipsec_tx *tx;
struct mlx5e_ipsec_aso *aso;
struct notifier_block nb;
+ struct mlx5_ipsec_fs *roce;
};
struct mlx5e_ipsec_esn_state {
@@ -181,6 +192,7 @@ struct mlx5_accel_pol_xfrm_attrs {
__be32 a6[4];
} daddr;
+ struct upspec upspec;
u8 family;
u8 action;
u8 type : 2;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 9f19f4b59a70..9871ba1b25ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -6,6 +6,7 @@
#include "en/fs.h"
#include "ipsec.h"
#include "fs_core.h"
+#include "lib/ipsec_fs_roce.h"
#define NUM_IPSEC_FTE BIT(15)
@@ -166,7 +167,8 @@ out:
return err;
}
-static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx, u32 family)
{
mlx5_del_flow_rules(rx->pol.rule);
mlx5_destroy_flow_group(rx->pol.group);
@@ -179,6 +181,8 @@ static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
mlx5_destroy_flow_table(rx->ft.status);
+
+ mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
}
static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
@@ -186,18 +190,35 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
{
struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+ struct mlx5_flow_destination default_dest;
struct mlx5_flow_destination dest[2];
struct mlx5_flow_table *ft;
int err;
+ default_dest = mlx5_ttc_get_default_dest(ttc, family2tt(family));
+ err = mlx5_ipsec_fs_roce_rx_create(mdev, ipsec->roce, ns, &default_dest,
+ family, MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
+ MLX5E_NIC_PRIO);
+ if (err)
+ return err;
+
ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
MLX5E_NIC_PRIO, 1);
- if (IS_ERR(ft))
- return PTR_ERR(ft);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_fs_ft_status;
+ }
rx->ft.status = ft;
- dest[0] = mlx5_ttc_get_default_dest(ttc, family2tt(family));
+ ft = mlx5_ipsec_fs_roce_ft_get(ipsec->roce, family);
+ if (ft) {
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = ft;
+ } else {
+ dest[0] = default_dest;
+ }
+
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
err = ipsec_status_rule(mdev, rx, dest);
@@ -245,6 +266,8 @@ err_fs_ft:
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
err_add:
mlx5_destroy_flow_table(rx->ft.status);
+err_fs_ft_status:
+ mlx5_ipsec_fs_roce_rx_destroy(ipsec->roce, family);
return err;
}
@@ -304,14 +327,15 @@ static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
/* remove FT */
- rx_destroy(mdev, rx);
+ rx_destroy(mdev, ipsec, rx, family);
out:
mutex_unlock(&rx->ft.mutex);
}
/* IPsec TX flow steering */
-static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
+static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx,
+ struct mlx5_ipsec_fs *roce)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_table *ft;
@@ -334,8 +358,15 @@ static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
if (err)
goto err_pol_miss;
+
+ err = mlx5_ipsec_fs_roce_tx_create(mdev, roce, tx->ft.pol);
+ if (err)
+ goto err_roce;
return 0;
+err_roce:
+ mlx5_del_flow_rules(tx->pol.rule);
+ mlx5_destroy_flow_group(tx->pol.group);
err_pol_miss:
mlx5_destroy_flow_table(tx->ft.pol);
err_pol_ft:
@@ -353,9 +384,10 @@ static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
if (tx->ft.refcnt)
goto skip;
- err = tx_create(mdev, tx);
+ err = tx_create(mdev, tx, ipsec->roce);
if (err)
goto out;
+
skip:
tx->ft.refcnt++;
out:
@@ -374,6 +406,7 @@ static void tx_ft_put(struct mlx5e_ipsec *ipsec)
if (tx->ft.refcnt)
goto out;
+ mlx5_ipsec_fs_roce_tx_destroy(ipsec->roce);
mlx5_del_flow_rules(tx->pol.rule);
mlx5_destroy_flow_group(tx->pol.group);
mlx5_destroy_flow_table(tx->ft.pol);
@@ -467,6 +500,27 @@ static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
misc_parameters_2.metadata_reg_c_0, reqid);
}
+static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
+{
+ if (upspec->proto != IPPROTO_UDP)
+ return;
+
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
+ if (upspec->dport) {
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
+ upspec->dport_mask);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->dport);
+ }
+
+ if (upspec->sport) {
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_criteria, udp_dport,
+ upspec->sport_mask);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, udp_dport, upspec->sport);
+ }
+}
+
static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
struct mlx5_flow_act *flow_act)
{
@@ -654,6 +708,7 @@ static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_no_frags(spec);
+ setup_fte_upper_proto_match(spec, &attrs->upspec);
switch (attrs->type) {
case XFRM_DEV_OFFLOAD_CRYPTO:
@@ -728,6 +783,7 @@ static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
setup_fte_no_frags(spec);
+ setup_fte_upper_proto_match(spec, &attrs->upspec);
err = setup_modify_header(mdev, attrs->reqid, XFRM_DEV_OFFLOAD_OUT,
&flow_act);
@@ -1008,6 +1064,9 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
if (!ipsec->tx)
return;
+ if (mlx5_ipsec_device_caps(ipsec->mdev) & MLX5_IPSEC_CAP_ROCE)
+ mlx5_ipsec_fs_roce_cleanup(ipsec->roce);
+
ipsec_fs_destroy_counters(ipsec);
mutex_destroy(&ipsec->tx->ft.mutex);
WARN_ON(ipsec->tx->ft.refcnt);
@@ -1024,6 +1083,7 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
{
+ struct mlx5_core_dev *mdev = ipsec->mdev;
struct mlx5_flow_namespace *ns;
int err = -ENOMEM;
@@ -1053,6 +1113,9 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
mutex_init(&ipsec->rx_ipv6->ft.mutex);
ipsec->tx->ns = ns;
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ROCE)
+ ipsec->roce = mlx5_ipsec_fs_roce_init(mdev);
+
return 0;
err_counters:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 2461462b7b99..5fa7a4c40429 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -4,7 +4,7 @@
#include "mlx5_core.h"
#include "en.h"
#include "ipsec.h"
-#include "lib/mlx5.h"
+#include "lib/crypto.h"
enum {
MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
@@ -42,6 +42,11 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
+ if (mlx5_get_roce_state(mdev) &&
+ MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
+ MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
+ caps |= MLX5_IPSEC_CAP_ROCE;
+
if (!caps)
return 0;
@@ -92,7 +97,6 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
lower_32_bits(attrs->hard_packet_limit));
MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
- MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
}
if (attrs->soft_packet_limit != XFRM_INF) {
@@ -329,8 +333,7 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
if (attrs->soft_packet_limit != XFRM_INF)
if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
- !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
- !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
+ !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm))
xfrm_state_check_expire(sa_entry->x);
unlock:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
index da2184c94203..cf704f106b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
@@ -1,18 +1,19 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
+#include <linux/debugfs.h>
#include "en.h"
#include "lib/mlx5.h"
+#include "lib/crypto.h"
#include "en_accel/ktls.h"
#include "en_accel/ktls_utils.h"
#include "en_accel/fs_tcp.h"
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id)
+struct mlx5_crypto_dek *mlx5_ktls_create_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct tls_crypto_info *crypto_info)
{
+ const void *key;
u32 sz_bytes;
- void *key;
switch (crypto_info->cipher_type) {
case TLS_CIPHER_AES_GCM_128: {
@@ -32,17 +33,16 @@ int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
break;
}
default:
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
- return mlx5_create_encryption_key(mdev, key, sz_bytes,
- MLX5_ACCEL_OBJ_TLS_KEY,
- p_key_id);
+ return mlx5_crypto_dek_create(dek_pool, key, sz_bytes);
}
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id)
+void mlx5_ktls_destroy_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek)
{
- mlx5_destroy_encryption_key(mdev, key_id);
+ mlx5_crypto_dek_destroy(dek_pool, dek);
}
static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
@@ -177,8 +177,18 @@ void mlx5e_ktls_cleanup_rx(struct mlx5e_priv *priv)
destroy_workqueue(priv->tls->rx_wq);
}
+static void mlx5e_tls_debugfs_init(struct mlx5e_tls *tls,
+ struct dentry *dfs_root)
+{
+ if (IS_ERR_OR_NULL(dfs_root))
+ return;
+
+ tls->debugfs.dfs = debugfs_create_dir("tls", dfs_root);
+}
+
int mlx5e_ktls_init(struct mlx5e_priv *priv)
{
+ struct mlx5_crypto_dek_pool *dek_pool;
struct mlx5e_tls *tls;
if (!mlx5e_is_ktls_device(priv->mdev))
@@ -187,13 +197,32 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv)
tls = kzalloc(sizeof(*tls), GFP_KERNEL);
if (!tls)
return -ENOMEM;
+ tls->mdev = priv->mdev;
+ dek_pool = mlx5_crypto_dek_pool_create(priv->mdev, MLX5_ACCEL_OBJ_TLS_KEY);
+ if (IS_ERR(dek_pool)) {
+ kfree(tls);
+ return PTR_ERR(dek_pool);
+ }
+ tls->dek_pool = dek_pool;
priv->tls = tls;
+
+ mlx5e_tls_debugfs_init(tls, priv->dfs_root);
+
return 0;
}
void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
{
+ struct mlx5e_tls *tls = priv->tls;
+
+ if (!mlx5e_is_ktls_device(priv->mdev))
+ return;
+
+ debugfs_remove_recursive(tls->debugfs.dfs);
+ tls->debugfs.dfs = NULL;
+
+ mlx5_crypto_dek_pool_destroy(tls->dek_pool);
kfree(priv->tls);
priv->tls = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index 1c35045e41fb..f11075e67658 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -4,15 +4,18 @@
#ifndef __MLX5E_KTLS_H__
#define __MLX5E_KTLS_H__
+#include <linux/debugfs.h>
#include <linux/tls.h>
#include <net/tls.h>
#include "en.h"
#ifdef CONFIG_MLX5_EN_TLS
-int mlx5_ktls_create_key(struct mlx5_core_dev *mdev,
- struct tls_crypto_info *crypto_info,
- u32 *p_key_id);
-void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
+#include "lib/crypto.h"
+
+struct mlx5_crypto_dek *mlx5_ktls_create_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct tls_crypto_info *crypto_info);
+void mlx5_ktls_destroy_key(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek);
static inline bool mlx5e_is_ktls_device(struct mlx5_core_dev *mdev)
{
@@ -72,10 +75,18 @@ struct mlx5e_tls_sw_stats {
atomic64_t rx_tls_del;
};
+struct mlx5e_tls_debugfs {
+ struct dentry *dfs;
+ struct dentry *dfs_tx;
+};
+
struct mlx5e_tls {
+ struct mlx5_core_dev *mdev;
struct mlx5e_tls_sw_stats sw_stats;
struct workqueue_struct *rx_wq;
struct mlx5e_tls_tx_pool *tx_pool;
+ struct mlx5_crypto_dek_pool *dek_pool;
+ struct mlx5e_tls_debugfs debugfs;
};
int mlx5e_ktls_init(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 3e54834747ce..4be770443b0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -50,7 +50,7 @@ struct mlx5e_ktls_offload_context_rx {
struct mlx5e_tls_sw_stats *sw_stats;
struct completion add_ctx;
struct mlx5e_tir tir;
- u32 key_id;
+ struct mlx5_crypto_dek *dek;
u32 rxq;
DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS);
@@ -148,7 +148,8 @@ post_static_params(struct mlx5e_icosq *sq,
wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info,
mlx5e_tir_get_tirn(&priv_rx->tir),
- priv_rx->key_id, priv_rx->resync.seq, false,
+ mlx5_crypto_dek_get_id(priv_rx->dek),
+ priv_rx->resync.seq, false,
TLS_OFFLOAD_CTX_DIR_RX);
wi = (struct mlx5e_icosq_wqe_info) {
.wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS,
@@ -610,20 +611,22 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct mlx5e_ktls_rx_resync_ctx *resync;
struct tls_context *tls_ctx;
- struct mlx5_core_dev *mdev;
+ struct mlx5_crypto_dek *dek;
struct mlx5e_priv *priv;
int rxq, err;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
- mdev = priv->mdev;
priv_rx = kzalloc(sizeof(*priv_rx), GFP_KERNEL);
if (unlikely(!priv_rx))
return -ENOMEM;
- err = mlx5_ktls_create_key(mdev, crypto_info, &priv_rx->key_id);
- if (err)
+ dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+ if (IS_ERR(dek)) {
+ err = PTR_ERR(dek);
goto err_create_key;
+ }
+ priv_rx->dek = dek;
INIT_LIST_HEAD(&priv_rx->list);
spin_lock_init(&priv_rx->lock);
@@ -673,7 +676,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
err_post_wqes:
mlx5e_tir_destroy(&priv_rx->tir);
err_create_tir:
- mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
+ mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek);
err_create_key:
kfree(priv_rx);
return err;
@@ -683,11 +686,9 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
struct mlx5e_ktls_rx_resync_ctx *resync;
- struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
priv = netdev_priv(netdev);
- mdev = priv->mdev;
priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx);
set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags);
@@ -707,7 +708,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx)
mlx5e_accel_fs_del_sk(priv_rx->rule.rule);
mlx5e_tir_destroy(&priv_rx->tir);
- mlx5_ktls_destroy_key(mdev, priv_rx->key_id);
+ mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek);
/* priv_rx should normally be freed here, but if there is an outstanding
* GET_PSV, deallocation will be delayed until the CQE for GET_PSV is
* processed.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 78072bf93f3f..60b3e08a1028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2019 Mellanox Technologies.
+#include <linux/debugfs.h>
#include "en_accel/ktls.h"
#include "en_accel/ktls_txrx.h"
#include "en_accel/ktls_utils.h"
@@ -97,7 +98,7 @@ struct mlx5e_ktls_offload_context_tx {
struct tls_offload_context_tx *tx_ctx;
struct mlx5_core_dev *mdev;
struct mlx5e_tls_sw_stats *sw_stats;
- u32 key_id;
+ struct mlx5_crypto_dek *dek;
u8 create_err : 1;
};
@@ -456,6 +457,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
struct mlx5e_ktls_offload_context_tx *priv_tx;
struct mlx5e_tls_tx_pool *pool;
struct tls_context *tls_ctx;
+ struct mlx5_crypto_dek *dek;
struct mlx5e_priv *priv;
int err;
@@ -467,9 +469,12 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk,
if (IS_ERR(priv_tx))
return PTR_ERR(priv_tx);
- err = mlx5_ktls_create_key(pool->mdev, crypto_info, &priv_tx->key_id);
- if (err)
+ dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info);
+ if (IS_ERR(dek)) {
+ err = PTR_ERR(dek);
goto err_create_key;
+ }
+ priv_tx->dek = dek;
priv_tx->expected_seq = start_offload_tcp_sn;
switch (crypto_info->cipher_type) {
@@ -511,7 +516,7 @@ void mlx5e_ktls_del_tx(struct net_device *netdev, struct tls_context *tls_ctx)
pool = priv->tls->tx_pool;
atomic64_inc(&priv_tx->sw_stats->tx_tls_del);
- mlx5_ktls_destroy_key(priv_tx->mdev, priv_tx->key_id);
+ mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_tx->dek);
pool_push(pool, priv_tx);
}
@@ -550,8 +555,9 @@ post_static_params(struct mlx5e_txqsq *sq,
pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi);
mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_tx->crypto_info,
- priv_tx->tisn, priv_tx->key_id, 0, fence,
- TLS_OFFLOAD_CTX_DIR_TX);
+ priv_tx->tisn,
+ mlx5_crypto_dek_get_id(priv_tx->dek),
+ 0, fence, TLS_OFFLOAD_CTX_DIR_TX);
tx_fill_wi(sq, pi, num_wqebbs, 0, NULL);
sq->pc += num_wqebbs;
}
@@ -886,8 +892,22 @@ err_out:
return false;
}
+static void mlx5e_tls_tx_debugfs_init(struct mlx5e_tls *tls,
+ struct dentry *dfs_root)
+{
+ if (IS_ERR_OR_NULL(dfs_root))
+ return;
+
+ tls->debugfs.dfs_tx = debugfs_create_dir("tx", dfs_root);
+
+ debugfs_create_size_t("pool_size", 0400, tls->debugfs.dfs_tx,
+ &tls->tx_pool->size);
+}
+
int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
{
+ struct mlx5e_tls *tls = priv->tls;
+
if (!mlx5e_is_ktls_tx(priv->mdev))
return 0;
@@ -895,6 +915,8 @@ int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
if (!priv->tls->tx_pool)
return -ENOMEM;
+ mlx5e_tls_tx_debugfs_init(tls, tls->debugfs.dfs);
+
return 0;
}
@@ -903,6 +925,9 @@ void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
if (!mlx5e_is_ktls_tx(priv->mdev))
return;
+ debugfs_remove_recursive(priv->tls->debugfs.dfs_tx);
+ priv->tls->debugfs.dfs_tx = NULL;
+
mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
priv->tls->tx_pool = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 7f6b940830b3..08d0929e8260 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -7,7 +7,7 @@
#include "en.h"
#include "lib/aso.h"
-#include "lib/mlx5.h"
+#include "lib/crypto.h"
#include "en_accel/macsec.h"
#include "en_accel/macsec_fs.h"
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 68f19324db93..4c9a3210600c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -31,6 +31,7 @@
*/
#include "en.h"
+#include "lib/crypto.h"
/* mlx5e global resources should be placed in this file.
* Global resources are common to all the netdevices created on the same nic.
@@ -104,6 +105,13 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
INIT_LIST_HEAD(&res->td.tirs_list);
mutex_init(&res->td.list_lock);
+ mdev->mlx5e_res.dek_priv = mlx5_crypto_dek_init(mdev);
+ if (IS_ERR(mdev->mlx5e_res.dek_priv)) {
+ mlx5_core_err(mdev, "crypto dek init failed, %ld\n",
+ PTR_ERR(mdev->mlx5e_res.dek_priv));
+ mdev->mlx5e_res.dek_priv = NULL;
+ }
+
return 0;
err_destroy_mkey:
@@ -119,6 +127,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
{
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
+ mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
+ mdev->mlx5e_res.dek_priv = NULL;
mlx5_free_bfreg(mdev, &res->bfreg);
mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1892ccb889b3..05796f8b1d7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/debugfs.h>
#include <linux/list.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -67,6 +68,7 @@ struct mlx5e_flow_steering {
struct mlx5e_fs_udp *udp;
struct mlx5e_fs_any *any;
struct mlx5e_ptp_fs *ptp_fs;
+ struct dentry *dfs_root;
};
static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
@@ -104,6 +106,11 @@ static inline int mlx5e_hash_l2(const u8 *addr)
return addr[5];
}
+struct dentry *mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering *fs)
+{
+ return fs->dfs_root;
+}
+
static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr)
{
struct mlx5e_l2_hash_node *hn;
@@ -443,7 +450,7 @@ void mlx5e_enable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
void mlx5e_disable_cvlan_filter(struct mlx5e_flow_steering *fs, bool promisc)
{
- if (fs->vlan->cvlan_filter_disabled)
+ if (!fs->vlan || fs->vlan->cvlan_filter_disabled)
return;
fs->vlan->cvlan_filter_disabled = true;
@@ -1429,9 +1436,19 @@ static int mlx5e_fs_ethtool_alloc(struct mlx5e_flow_steering *fs)
static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs) { }
#endif
+static void mlx5e_fs_debugfs_init(struct mlx5e_flow_steering *fs,
+ struct dentry *dfs_root)
+{
+ if (IS_ERR_OR_NULL(dfs_root))
+ return;
+
+ fs->dfs_root = debugfs_create_dir("fs", dfs_root);
+}
+
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct mlx5_core_dev *mdev,
- bool state_destroy)
+ bool state_destroy,
+ struct dentry *dfs_root)
{
struct mlx5e_flow_steering *fs;
int err;
@@ -1458,6 +1475,8 @@ struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
if (err)
goto err_free_tc;
+ mlx5e_fs_debugfs_init(fs, dfs_root);
+
return fs;
err_free_tc:
mlx5e_fs_tc_free(fs);
@@ -1471,6 +1490,7 @@ err:
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs)
{
+ debugfs_remove_recursive(fs->dfs_root);
mlx5e_fs_ethtool_free(fs);
mlx5e_fs_tc_free(fs);
mlx5e_fs_vlan_free(fs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index abcc614b6191..76a9c5194a70 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -35,9 +35,11 @@
#include <net/vxlan.h>
#include <net/geneve.h>
#include <linux/bpf.h>
+#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
#include <net/page_pool.h>
+#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
#include "eswitch.h"
#include "en.h"
@@ -179,17 +181,21 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
+ struct mlx5_devlink_trap_event_ctx *trap_event_ctx = data;
int err;
switch (event) {
case MLX5_DRIVER_EVENT_TYPE_TRAP:
- err = mlx5e_handle_trap_event(priv, data);
+ err = mlx5e_handle_trap_event(priv, trap_event_ctx->trap);
+ if (err) {
+ trap_event_ctx->err = err;
+ return NOTIFY_BAD;
+ }
break;
default:
- netdev_warn(priv->netdev, "Sync event: Unknown event %ld\n", event);
- err = -EINVAL;
+ return NOTIFY_DONE;
}
- return err;
+ return NOTIFY_OK;
}
static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
@@ -591,7 +597,8 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
rq->ix = c->ix;
rq->channel = c;
rq->mdev = mdev;
- rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+ rq->hw_mtu =
+ MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN * !params->scatter_fcs_en;
rq->xdpsq = &c->rq_xdpsq;
rq->stats = &c->priv->channel_stats[c->ix]->rq;
rq->ptp_cyc2time = mlx5_rq_ts_translator(mdev);
@@ -661,6 +668,26 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
mlx5e_rq_shampo_hd_free(rq);
}
+static __be32 mlx5e_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev)
+{
+ u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+ int res;
+
+ if (!MLX5_CAP_GEN(dev, terminate_scatter_list_mkey))
+ return MLX5_TERMINATE_SCATTER_LIST_LKEY;
+
+ MLX5_SET(query_special_contexts_in, in, opcode,
+ MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+ res = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
+ if (res)
+ return MLX5_TERMINATE_SCATTER_LIST_LKEY;
+
+ res = MLX5_GET(query_special_contexts_out, out,
+ terminate_scatter_list_mkey);
+ return cpu_to_be32(res);
+}
+
static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5e_rq_param *rqp,
@@ -825,7 +852,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
/* check if num_frags is not a pow of two */
if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
wqe->data[f].byte_count = 0;
- wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
+ wqe->data[f].lkey = mlx5e_get_terminate_scatter_list_mkey(mdev);
wqe->data[f].addr = 0;
}
}
@@ -1014,35 +1041,6 @@ int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
return mlx5e_rq_to_ready(rq, curr_state);
}
-static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
-{
- struct mlx5_core_dev *mdev = rq->mdev;
-
- void *in;
- void *rqc;
- int inlen;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
-
- MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
- MLX5_SET64(modify_rq_in, in, modify_bitmask,
- MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
- MLX5_SET(rqc, rqc, scatter_fcs, enable);
- MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
-
- err = mlx5_core_modify_rq(mdev, rq->rqn, in);
-
- kvfree(in);
-
- return err;
-}
-
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
{
struct mlx5_core_dev *mdev = rq->mdev;
@@ -1469,6 +1467,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->mkey_be = c->mkey_be;
sq->netdev = c->netdev;
sq->mdev = c->mdev;
+ sq->channel = c;
sq->priv = c->priv;
sq->ch_ix = c->ix;
sq->txq_ix = txq_ix;
@@ -2481,8 +2480,6 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
mlx5e_activate_xsk(c);
else
mlx5e_activate_rq(&c->rq);
-
- mlx5e_trigger_napi_icosq(c);
}
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
@@ -2574,13 +2571,19 @@ err_free:
return err;
}
-static void mlx5e_activate_channels(struct mlx5e_channels *chs)
+static void mlx5e_activate_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs)
{
int i;
for (i = 0; i < chs->num; i++)
mlx5e_activate_channel(chs->c[i]);
+ if (priv->htb)
+ mlx5e_qos_activate_queues(priv);
+
+ for (i = 0; i < chs->num; i++)
+ mlx5e_trigger_napi_icosq(chs->c[i]);
+
if (chs->ptp)
mlx5e_ptp_activate_channel(chs->ptp);
}
@@ -2887,9 +2890,7 @@ out:
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
mlx5e_build_txq_maps(priv);
- mlx5e_activate_channels(&priv->channels);
- if (priv->htb)
- mlx5e_qos_activate_queues(priv);
+ mlx5e_activate_channels(priv, &priv->channels);
mlx5e_xdp_tx_enable(priv);
/* dev_watchdog() wants all TX queues to be started when the carrier is
@@ -2997,32 +2998,37 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
mlx5e_fp_preactivate preactivate,
void *context, bool reset)
{
- struct mlx5e_channels new_chs = {};
+ struct mlx5e_channels *new_chs;
int err;
reset &= test_bit(MLX5E_STATE_OPENED, &priv->state);
if (!reset)
return mlx5e_switch_priv_params(priv, params, preactivate, context);
- new_chs.params = *params;
+ new_chs = kzalloc(sizeof(*new_chs), GFP_KERNEL);
+ if (!new_chs)
+ return -ENOMEM;
+ new_chs->params = *params;
- mlx5e_selq_prepare_params(&priv->selq, &new_chs.params);
+ mlx5e_selq_prepare_params(&priv->selq, &new_chs->params);
- err = mlx5e_open_channels(priv, &new_chs);
+ err = mlx5e_open_channels(priv, new_chs);
if (err)
goto err_cancel_selq;
- err = mlx5e_switch_priv_channels(priv, &new_chs, preactivate, context);
+ err = mlx5e_switch_priv_channels(priv, new_chs, preactivate, context);
if (err)
goto err_close;
+ kfree(new_chs);
return 0;
err_close:
- mlx5e_close_channels(&new_chs);
+ mlx5e_close_channels(new_chs);
err_cancel_selq:
mlx5e_selq_cancel(&priv->selq);
+ kfree(new_chs);
return err;
}
@@ -3314,20 +3320,6 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
mlx5e_destroy_tises(priv);
}
-static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
-{
- int err = 0;
- int i;
-
- for (i = 0; i < chs->num; i++) {
- err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
{
int err;
@@ -3903,41 +3895,27 @@ static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
return mlx5_set_ports_check(mdev, in, sizeof(in));
}
+static int mlx5e_set_rx_port_ts_wrap(struct mlx5e_priv *priv, void *ctx)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool enable = *(bool *)ctx;
+
+ return mlx5e_set_rx_port_ts(mdev, enable);
+}
+
static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels *chs = &priv->channels;
- struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_params new_params;
int err;
mutex_lock(&priv->state_lock);
- if (enable) {
- err = mlx5e_set_rx_port_ts(mdev, false);
- if (err)
- goto out;
-
- chs->params.scatter_fcs_en = true;
- err = mlx5e_modify_channels_scatter_fcs(chs, true);
- if (err) {
- chs->params.scatter_fcs_en = false;
- mlx5e_set_rx_port_ts(mdev, true);
- }
- } else {
- chs->params.scatter_fcs_en = false;
- err = mlx5e_modify_channels_scatter_fcs(chs, false);
- if (err) {
- chs->params.scatter_fcs_en = true;
- goto out;
- }
- err = mlx5e_set_rx_port_ts(mdev, true);
- if (err) {
- mlx5_core_warn(mdev, "Failed to set RX port timestamp %d\n", err);
- err = 0;
- }
- }
-
-out:
+ new_params = chs->params;
+ new_params.scatter_fcs_en = enable;
+ err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
+ &new_params.scatter_fcs_en, true);
mutex_unlock(&priv->state_lock);
return err;
}
@@ -4074,6 +4052,10 @@ static netdev_features_t mlx5e_fix_uplink_rep_features(struct net_device *netdev
if (netdev->features & NETIF_F_GRO_HW)
netdev_warn(netdev, "Disabling HW_GRO, not supported in switchdev mode\n");
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ netdev_warn(netdev, "Disabling HW_VLAN CTAG FILTERING, not supported in switchdev mode\n");
+
return features;
}
@@ -4779,6 +4761,13 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
if (old_prog)
bpf_prog_put(old_prog);
+ if (reset) {
+ if (prog)
+ xdp_features_set_redirect_target(netdev, true);
+ else
+ xdp_features_clear_redirect_target(netdev);
+ }
+
if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
goto unlock;
@@ -5056,6 +5045,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
SET_NETDEV_DEV(netdev, mdev->device);
netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->xdp_metadata_ops = &mlx5e_xdp_metadata_ops;
mlx5e_dcbnl_build_netdev(netdev);
@@ -5173,6 +5163,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_HIGHDMA;
netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_RX_SG;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(netdev, GSO_MAX_SIZE);
@@ -5233,7 +5227,8 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
+ priv->dfs_root);
if (!fs) {
err = -ENOMEM;
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
@@ -5874,7 +5869,8 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
static int mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
- struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
+ struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
+ struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
int err;
@@ -5897,7 +5893,8 @@ static int mlx5e_resume(struct auxiliary_device *adev)
static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
+ struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
+ struct mlx5e_priv *priv = mlx5e_dev->priv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -5915,35 +5912,46 @@ static int mlx5e_probe(struct auxiliary_device *adev,
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
const struct mlx5e_profile *profile = &mlx5e_nic_profile;
struct mlx5_core_dev *mdev = edev->mdev;
+ struct mlx5e_dev *mlx5e_dev;
struct net_device *netdev;
pm_message_t state = {};
struct mlx5e_priv *priv;
int err;
+ mlx5e_dev = mlx5e_create_devlink(&adev->dev, mdev);
+ if (IS_ERR(mlx5e_dev))
+ return PTR_ERR(mlx5e_dev);
+ auxiliary_set_drvdata(adev, mlx5e_dev);
+
+ err = mlx5e_devlink_port_register(mlx5e_dev, mdev);
+ if (err) {
+ mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
+ goto err_devlink_unregister;
+ }
+
netdev = mlx5e_create_netdev(mdev, profile);
if (!netdev) {
mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_devlink_port_unregister;
}
+ SET_NETDEV_DEVLINK_PORT(netdev, &mlx5e_dev->dl_port);
mlx5e_build_nic_netdev(netdev);
priv = netdev_priv(netdev);
- auxiliary_set_drvdata(adev, priv);
+ mlx5e_dev->priv = priv;
priv->profile = profile;
priv->ppriv = NULL;
- err = mlx5e_devlink_port_register(priv);
- if (err) {
- mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err);
- goto err_destroy_netdev;
- }
+ priv->dfs_root = debugfs_create_dir("nic",
+ mlx5_debugfs_get_dev_root(priv->mdev));
err = profile->init(mdev, netdev);
if (err) {
mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
- goto err_devlink_cleanup;
+ goto err_destroy_netdev;
}
err = mlx5e_resume(adev);
@@ -5952,7 +5960,6 @@ static int mlx5e_probe(struct auxiliary_device *adev,
goto err_profile_cleanup;
}
- SET_NETDEV_DEVLINK_PORT(netdev, mlx5e_devlink_get_dl_port(priv));
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
@@ -5960,7 +5967,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
}
mlx5e_dcbnl_init_app(priv);
- mlx5_uplink_netdev_set(mdev, netdev);
+ mlx5_core_uplink_netdev_set(mdev, netdev);
mlx5e_params_print_info(mdev, &priv->channels.params);
return 0;
@@ -5968,24 +5975,31 @@ err_resume:
mlx5e_suspend(adev, state);
err_profile_cleanup:
profile->cleanup(priv);
-err_devlink_cleanup:
- mlx5e_devlink_port_unregister(priv);
err_destroy_netdev:
+ debugfs_remove_recursive(priv->dfs_root);
mlx5e_destroy_netdev(priv);
+err_devlink_port_unregister:
+ mlx5e_devlink_port_unregister(mlx5e_dev);
+err_devlink_unregister:
+ mlx5e_destroy_devlink(mlx5e_dev);
return err;
}
static void mlx5e_remove(struct auxiliary_device *adev)
{
- struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
+ struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
+ struct mlx5e_priv *priv = mlx5e_dev->priv;
pm_message_t state = {};
+ mlx5_core_uplink_netdev_set(priv->mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
mlx5e_suspend(adev, state);
priv->profile->cleanup(priv);
- mlx5e_devlink_port_unregister(priv);
+ debugfs_remove_recursive(priv->dfs_root);
mlx5e_destroy_netdev(priv);
+ mlx5e_devlink_port_unregister(mlx5e_dev);
+ mlx5e_destroy_devlink(mlx5e_dev);
}
static const struct auxiliary_device_id mlx5e_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 7d90e5b72854..9b9203443085 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -789,8 +789,10 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- priv->fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ priv->fs =
+ mlx5e_fs_init(priv->profile, mdev,
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
+ priv->dfs_root);
if (!priv->fs) {
netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
@@ -808,7 +810,8 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
priv->fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
+ priv->dfs_root);
if (!priv->fs) {
netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
@@ -1004,8 +1007,23 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
priv->rx_res = NULL;
}
+static void mlx5e_rep_mpesw_work(struct work_struct *work)
+{
+ struct mlx5_rep_uplink_priv *uplink_priv =
+ container_of(work, struct mlx5_rep_uplink_priv,
+ mpesw_work);
+ struct mlx5e_rep_priv *rpriv =
+ container_of(uplink_priv, struct mlx5e_rep_priv,
+ uplink_priv);
+ struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+
+ rep_vport_rx_rule_destroy(priv);
+ mlx5e_create_rep_vport_rx_rule(priv);
+}
+
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
int err;
mlx5e_create_q_counters(priv);
@@ -1015,12 +1033,17 @@ static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
mlx5e_tc_int_port_init_rep_rx(priv);
+ INIT_WORK(&rpriv->uplink_priv.mpesw_work, mlx5e_rep_mpesw_work);
+
out:
return err;
}
static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+
+ cancel_work_sync(&rpriv->uplink_priv.mpesw_work);
mlx5e_tc_int_port_cleanup_rep_rx(priv);
mlx5e_cleanup_rep_rx(priv);
mlx5e_destroy_q_counters(priv);
@@ -1129,6 +1152,19 @@ static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
return 0;
}
+static int mlx5e_rep_event_mpesw(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+
+ if (rep->vport != MLX5_VPORT_UPLINK)
+ return NOTIFY_DONE;
+
+ queue_work(priv->wq, &rpriv->uplink_priv.mpesw_work);
+
+ return NOTIFY_OK;
+}
+
static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
@@ -1150,6 +1186,8 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event
if (event == MLX5_DEV_EVENT_PORT_AFFINITY)
return mlx5e_rep_tc_event_port_affinity(priv);
+ else if (event == MLX5_DEV_EVENT_MULTIPORT_ESW)
+ return mlx5e_rep_event_mpesw(priv);
return NOTIFY_DONE;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index b4e691760da9..dcfad0bf0f45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -100,6 +100,11 @@ struct mlx5_rep_uplink_priv {
struct mlx5e_tc_int_port_priv *int_port_priv;
struct mlx5e_flow_meters *flow_meters;
+
+ /* tc action stats */
+ struct mlx5e_tc_act_stats_handle *action_stats_handle;
+
+ struct work_struct mpesw_work;
};
struct mlx5e_rep_priv {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3df455f6b168..3f7b63d6616b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -62,10 +62,12 @@
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx);
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx);
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -76,11 +78,6 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
.handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
};
-static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
-{
- return config->rx_filter == HWTSTAMP_FILTER_ALL;
-}
-
static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
u32 cqcc, void *data)
{
@@ -1559,7 +1556,7 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
u32 cqe_bcnt, u32 metasize)
{
- struct sk_buff *skb = build_skb(va, frag_size);
+ struct sk_buff *skb = napi_build_skb(va, frag_size);
if (unlikely(!skb)) {
rq->stats->buff_alloc_err++;
@@ -1575,16 +1572,19 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
return skb;
}
-static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
- u32 len, struct xdp_buff *xdp)
+static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ void *va, u16 headroom, u32 len,
+ struct mlx5e_xdp_buff *mxbuf)
{
- xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, va, headroom, len, true);
+ xdp_init_buff(&mxbuf->xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
+ mxbuf->cqe = cqe;
+ mxbuf->rq = rq;
}
static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- u32 cqe_bcnt)
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
@@ -1606,16 +1606,16 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct xdp_buff xdp;
+ struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, au->page, prog, &xdp))
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, &mxbuf))
return NULL; /* page/packet was consumed by XDP */
- rx_headroom = xdp.data - xdp.data_hard_start;
- metasize = xdp.data - xdp.data_meta;
- cqe_bcnt = xdp.data_end - xdp.data;
+ rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
+ metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
+ cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -1630,16 +1630,16 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
- u32 cqe_bcnt)
+ struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
{
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
struct mlx5e_wqe_frag_info *head_wi = wi;
union mlx5e_alloc_unit *au = wi->au;
u16 rx_headroom = rq->buff.headroom;
struct skb_shared_info *sinfo;
+ struct mlx5e_xdp_buff mxbuf;
u32 frag_consumed_bytes;
struct bpf_prog *prog;
- struct xdp_buff xdp;
struct sk_buff *skb;
dma_addr_t addr;
u32 truesize;
@@ -1654,8 +1654,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
net_prefetchw(va); /* xdp_frame data area */
net_prefetch(va + rx_headroom);
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, frag_consumed_bytes, &xdp);
- sinfo = xdp_get_shared_info_from_buff(&xdp);
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, frag_consumed_bytes, &mxbuf);
+ sinfo = xdp_get_shared_info_from_buff(&mxbuf.xdp);
truesize = 0;
cqe_bcnt -= frag_consumed_bytes;
@@ -1673,13 +1673,13 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
dma_sync_single_for_cpu(rq->pdev, addr + wi->offset,
frag_consumed_bytes, rq->buff.map_dir);
- if (!xdp_buff_has_frags(&xdp)) {
+ if (!xdp_buff_has_frags(&mxbuf.xdp)) {
/* Init on the first fragment to avoid cold cache access
* when possible.
*/
sinfo->nr_frags = 0;
sinfo->xdp_frags_size = 0;
- xdp_buff_set_frags_flag(&xdp);
+ xdp_buff_set_frags_flag(&mxbuf.xdp);
}
frag = &sinfo->frags[sinfo->nr_frags++];
@@ -1688,7 +1688,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
skb_frag_size_set(frag, frag_consumed_bytes);
if (page_is_pfmemalloc(au->page))
- xdp_buff_set_frag_pfmemalloc(&xdp);
+ xdp_buff_set_frag_pfmemalloc(&mxbuf.xdp);
sinfo->xdp_frags_size += frag_consumed_bytes;
truesize += frag_info->frag_stride;
@@ -1698,10 +1698,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
wi++;
}
- au = head_wi->au;
-
prog = rcu_dereference(rq->xdp_prog);
- if (prog && mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i;
@@ -1711,22 +1709,22 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
return NULL; /* page/packet was consumed by XDP */
}
- skb = mlx5e_build_linear_skb(rq, xdp.data_hard_start, rq->buff.frame0_sz,
- xdp.data - xdp.data_hard_start,
- xdp.data_end - xdp.data,
- xdp.data - xdp.data_meta);
+ skb = mlx5e_build_linear_skb(rq, mxbuf.xdp.data_hard_start, rq->buff.frame0_sz,
+ mxbuf.xdp.data - mxbuf.xdp.data_hard_start,
+ mxbuf.xdp.data_end - mxbuf.xdp.data,
+ mxbuf.xdp.data - mxbuf.xdp.data_meta);
if (unlikely(!skb))
return NULL;
- page_ref_inc(au->page);
+ page_ref_inc(head_wi->au->page);
- if (unlikely(xdp_buff_has_frags(&xdp))) {
+ if (xdp_buff_has_frags(&mxbuf.xdp)) {
int i;
/* sinfo->nr_frags is reset by build_skb, calculate again. */
xdp_update_skb_shared_info(skb, wi - head_wi - 1,
sinfo->xdp_frags_size, truesize,
- xdp_buff_is_frag_pfmemalloc(&xdp));
+ xdp_buff_is_frag_pfmemalloc(&mxbuf.xdp));
for (i = 0; i < sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i];
@@ -1777,7 +1775,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
mlx5e_xsk_skb_from_cqe_linear,
- rq, wi, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1792,7 +1790,7 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb)) {
+ if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
dev_kfree_skb_any(skb);
goto free_wqe;
}
@@ -1830,7 +1828,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, wi, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1889,7 +1887,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
- rq, wi, cqe_bcnt, head_offset, page_idx);
+ rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
if (!skb)
goto mpwrq_cqe_out;
@@ -1940,7 +1938,8 @@ mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx)
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx)
{
union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
@@ -1979,7 +1978,8 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
- u16 cqe_bcnt, u32 head_offset, u32 page_idx)
+ struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
+ u32 page_idx)
{
union mlx5e_alloc_unit *au = &wi->alloc_units[page_idx];
u16 rx_headroom = rq->buff.headroom;
@@ -2007,19 +2007,19 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
prog = rcu_dereference(rq->xdp_prog);
if (prog) {
- struct xdp_buff xdp;
+ struct mlx5e_xdp_buff mxbuf;
net_prefetchw(va); /* xdp_frame data area */
- mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
- if (mlx5e_xdp_handle(rq, au->page, prog, &xdp)) {
+ mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, cqe_bcnt, &mxbuf);
+ if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */
return NULL; /* page/packet was consumed by XDP */
}
- rx_headroom = xdp.data - xdp.data_hard_start;
- metasize = xdp.data - xdp.data_meta;
- cqe_bcnt = xdp.data_end - xdp.data;
+ rx_headroom = mxbuf.xdp.data - mxbuf.xdp.data_hard_start;
+ metasize = mxbuf.xdp.data - mxbuf.xdp.data_meta;
+ cqe_bcnt = mxbuf.xdp.data_end - mxbuf.xdp.data;
}
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
@@ -2174,8 +2174,8 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
if (likely(head_size))
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
else
- *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
- page_idx);
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe, cqe_bcnt,
+ data_offset, page_idx);
if (unlikely(!*skb))
goto free_hd_entry;
@@ -2249,14 +2249,15 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
mlx5e_skb_from_cqe_mpwrq_linear,
mlx5e_skb_from_cqe_mpwrq_nonlinear,
mlx5e_xsk_skb_from_cqe_mpwrq_linear,
- rq, wi, cqe_bcnt, head_offset, page_idx);
+ rq, wi, cqe, cqe_bcnt, head_offset,
+ page_idx);
if (!skb)
goto mpwrq_cqe_out;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe))
- if (!mlx5e_tc_update_skb(cqe, skb)) {
+ if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
dev_kfree_skb_any(skb);
goto mpwrq_cqe_out;
}
@@ -2494,7 +2495,7 @@ static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
mlx5e_skb_from_cqe_linear,
mlx5e_skb_from_cqe_nonlinear,
- rq, wi, cqe_bcnt);
+ rq, wi, cqe, cqe_bcnt);
if (!skb)
goto wq_free_wqe;
@@ -2567,10 +2568,8 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
- struct mlx5e_priv *priv = netdev_priv(rq->netdev);
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
- struct devlink_port *dl_port;
struct sk_buff *skb;
u32 cqe_bcnt;
u16 trap_id;
@@ -2586,15 +2585,15 @@ static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe
goto free_wqe;
}
- skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe_bcnt);
+ skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
if (!skb)
goto free_wqe;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
skb_push(skb, ETH_HLEN);
- dl_port = mlx5e_devlink_get_dl_port(priv);
- mlx5_devlink_trap_report(rq->mdev, trap_id, skb, dl_port);
+ mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
+ rq->netdev->devlink_port);
dev_kfree_skb_any(skb);
free_wqe:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 243d5d7750be..e34d9b5fb504 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -43,8 +43,10 @@
#include <net/ipv6_stubs.h>
#include <net/bareudp.h>
#include <net/bonding.h>
+#include <net/dst_metadata.h>
#include "en.h"
#include "en/tc/post_act.h"
+#include "en/tc/act_stats.h"
#include "en_rep.h"
#include "en/rep/tc.h"
#include "en/rep/neigh.h"
@@ -71,6 +73,12 @@
#define MLX5E_TC_TABLE_NUM_GROUPS 4
#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
+struct mlx5e_hairpin_params {
+ struct mlx5_core_dev *mdev;
+ u32 num_queues;
+ u32 queue_size;
+};
+
struct mlx5e_tc_table {
/* Protects the dynamic assignment of the t parameter
* which is the nic tc root table.
@@ -93,10 +101,15 @@ struct mlx5e_tc_table {
struct mlx5_tc_ct_priv *ct;
struct mapping_ctx *mapping;
+ struct mlx5e_hairpin_params hairpin_params;
+ struct dentry *dfs_root;
+
+ /* tc action stats */
+ struct mlx5e_tc_act_stats_handle *action_stats_handle;
};
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
- [CHAIN_TO_REG] = {
+ [MAPPED_OBJ_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
.moffset = 0,
.mlen = 16,
@@ -123,7 +136,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
* into reg_b that is passed to SW since we don't
* jump between steering domains.
*/
- [NIC_CHAIN_TO_REG] = {
+ [NIC_MAPPED_OBJ_TO_REG] = {
.mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
.moffset = 0,
.mlen = 16,
@@ -278,6 +291,24 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
return err;
}
+static struct mlx5e_tc_act_stats_handle *
+get_act_stats_handle(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->action_stats_handle;
+ }
+
+ return tc->action_stats_handle;
+}
+
struct mlx5e_tc_int_port_priv *
mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
{
@@ -639,36 +670,36 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
&tc->mod_hdr;
}
-static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5e_tc_flow_parse_attr *parse_attr)
+int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
- struct mlx5_modify_hdr *modify_hdr;
struct mlx5e_mod_hdr_handle *mh;
mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
mlx5e_get_flow_namespace(flow),
- &parse_attr->mod_hdr_acts);
+ &attr->parse_attr->mod_hdr_acts);
if (IS_ERR(mh))
return PTR_ERR(mh);
- modify_hdr = mlx5e_mod_hdr_get(mh);
- flow->attr->modify_hdr = modify_hdr;
- flow->mh = mh;
+ WARN_ON(attr->modify_hdr);
+ attr->modify_hdr = mlx5e_mod_hdr_get(mh);
+ attr->mh = mh;
return 0;
}
-static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow)
+void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
/* flow wasn't fully initialized */
- if (!flow->mh)
+ if (!attr->mh)
return;
mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
- flow->mh);
- flow->mh = NULL;
+ attr->mh);
+ attr->mh = NULL;
}
static
@@ -1017,6 +1048,136 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
return 0;
}
+static int debugfs_hairpin_queues_set(void *data, u64 val)
+{
+ struct mlx5e_hairpin_params *hp = data;
+
+ if (!val) {
+ mlx5_core_err(hp->mdev,
+ "Number of hairpin queues must be > 0\n");
+ return -EINVAL;
+ }
+
+ hp->num_queues = val;
+
+ return 0;
+}
+
+static int debugfs_hairpin_queues_get(void *data, u64 *val)
+{
+ struct mlx5e_hairpin_params *hp = data;
+
+ *val = hp->num_queues;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_queues, debugfs_hairpin_queues_get,
+ debugfs_hairpin_queues_set, "%llu\n");
+
+static int debugfs_hairpin_queue_size_set(void *data, u64 val)
+{
+ struct mlx5e_hairpin_params *hp = data;
+
+ if (val > BIT(MLX5_CAP_GEN(hp->mdev, log_max_hairpin_num_packets))) {
+ mlx5_core_err(hp->mdev,
+ "Invalid hairpin queue size, must be <= %lu\n",
+ BIT(MLX5_CAP_GEN(hp->mdev,
+ log_max_hairpin_num_packets)));
+ return -EINVAL;
+ }
+
+ hp->queue_size = roundup_pow_of_two(val);
+
+ return 0;
+}
+
+static int debugfs_hairpin_queue_size_get(void *data, u64 *val)
+{
+ struct mlx5e_hairpin_params *hp = data;
+
+ *val = hp->queue_size;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_queue_size,
+ debugfs_hairpin_queue_size_get,
+ debugfs_hairpin_queue_size_set, "%llu\n");
+
+static int debugfs_hairpin_num_active_get(void *data, u64 *val)
+{
+ struct mlx5e_tc_table *tc = data;
+ struct mlx5e_hairpin_entry *hpe;
+ u32 cnt = 0;
+ u32 bkt;
+
+ mutex_lock(&tc->hairpin_tbl_lock);
+ hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
+ cnt++;
+ mutex_unlock(&tc->hairpin_tbl_lock);
+
+ *val = cnt;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_num_active,
+ debugfs_hairpin_num_active_get, NULL, "%llu\n");
+
+static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv)
+
+{
+ struct mlx5e_tc_table *tc = file->private;
+ struct mlx5e_hairpin_entry *hpe;
+ u32 bkt;
+
+ mutex_lock(&tc->hairpin_tbl_lock);
+ hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
+ seq_printf(file, "Hairpin peer_vhca_id %u prio %u refcnt %u\n",
+ hpe->peer_vhca_id, hpe->prio,
+ refcount_read(&hpe->refcnt));
+ mutex_unlock(&tc->hairpin_tbl_lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_hairpin_table_dump);
+
+static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc,
+ struct dentry *dfs_root)
+{
+ if (IS_ERR_OR_NULL(dfs_root))
+ return;
+
+ tc->dfs_root = debugfs_create_dir("tc", dfs_root);
+
+ debugfs_create_file("hairpin_num_queues", 0644, tc->dfs_root,
+ &tc->hairpin_params, &fops_hairpin_queues);
+ debugfs_create_file("hairpin_queue_size", 0644, tc->dfs_root,
+ &tc->hairpin_params, &fops_hairpin_queue_size);
+ debugfs_create_file("hairpin_num_active", 0444, tc->dfs_root, tc,
+ &fops_hairpin_num_active);
+ debugfs_create_file("hairpin_table_dump", 0444, tc->dfs_root, tc,
+ &debugfs_hairpin_table_dump_fops);
+}
+
+static void
+mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params,
+ struct mlx5_core_dev *mdev)
+{
+ u64 link_speed64;
+ u32 link_speed;
+
+ hairpin_params->mdev = mdev;
+ /* set hairpin pair per each 50Gbs share of the link */
+ mlx5e_port_max_linkspeed(mdev, &link_speed);
+ link_speed = max_t(u32, link_speed, 50000);
+ link_speed64 = link_speed;
+ do_div(link_speed64, 50000);
+ hairpin_params->num_queues = link_speed64;
+
+ hairpin_params->queue_size =
+ BIT(min_t(u32, 16 - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev),
+ MLX5_CAP_GEN(mdev, log_max_hairpin_num_packets)));
+}
+
static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -1028,8 +1189,6 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5_core_dev *peer_mdev;
struct mlx5e_hairpin_entry *hpe;
struct mlx5e_hairpin *hp;
- u64 link_speed64;
- u32 link_speed;
u8 match_prio;
u16 peer_id;
int err;
@@ -1082,21 +1241,16 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
hash_hairpin_info(peer_id, match_prio));
mutex_unlock(&tc->hairpin_tbl_lock);
- params.log_data_size = clamp_t(u8, 16,
- MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
- MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
- params.log_num_packets = params.log_data_size -
- MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
- params.log_num_packets = min_t(u8, params.log_num_packets,
- MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
+ params.log_num_packets = ilog2(tc->hairpin_params.queue_size);
+ params.log_data_size =
+ clamp_t(u32,
+ params.log_num_packets +
+ MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev),
+ MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
+ MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
params.q_counter = priv->q_counter;
- /* set hairpin pair per each 50Gbs share of the link */
- mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
- link_speed = max_t(u32, link_speed, 50000);
- link_speed64 = link_speed;
- do_div(link_speed64, 50000);
- params.num_channels = link_speed64;
+ params.num_channels = tc->hairpin_params.num_queues;
hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
hpe->hp = hp;
@@ -1301,7 +1455,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
+ err = mlx5e_tc_attach_mod_hdr(priv, flow, attr);
if (err)
return err;
}
@@ -1361,7 +1515,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
- mlx5e_detach_mod_hdr(priv, flow);
+ mlx5e_tc_detach_mod_hdr(priv, flow, attr);
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
@@ -1451,7 +1605,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
goto err_get_chain;
err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
- CHAIN_TO_REG, chain_mapping);
+ MAPPED_OBJ_TO_REG, chain_mapping);
if (err)
goto err_reg_set;
@@ -1472,7 +1626,7 @@ skip_restore:
goto err_offload;
}
- flow->slow_mh = mh;
+ flow->attr->slow_mh = mh;
flow->chain_mapping = chain_mapping;
flow_flag_set(flow, SLOW);
@@ -1497,6 +1651,7 @@ err_get_chain:
void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow)
{
+ struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh;
struct mlx5_flow_attr *slow_attr;
slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
@@ -1509,16 +1664,16 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0;
slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
- if (flow->slow_mh) {
+ if (slow_mh) {
slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh);
+ slow_attr->modify_hdr = mlx5e_mod_hdr_get(slow_mh);
}
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
- if (flow->slow_mh) {
- mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh);
+ if (slow_mh) {
+ mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh);
mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
flow->chain_mapping = 0;
- flow->slow_mh = NULL;
+ flow->attr->slow_mh = NULL;
}
flow_flag_clear(flow, SLOW);
kfree(slow_attr);
@@ -1629,26 +1784,6 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
return err;
}
-int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5_flow_attr *attr)
-{
- struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
- struct mlx5_modify_hdr *mod_hdr;
-
- mod_hdr = mlx5_modify_header_alloc(priv->mdev,
- mlx5e_get_flow_namespace(flow),
- mod_hdr_acts->num_actions,
- mod_hdr_acts->actions);
- if (IS_ERR(mod_hdr))
- return PTR_ERR(mod_hdr);
-
- WARN_ON(attr->modify_hdr);
- attr->modify_hdr = mod_hdr;
-
- return 0;
-}
-
static int
set_encap_dests(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
@@ -1768,10 +1903,8 @@ verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
static int
post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
- bool is_post_act_attr,
struct netlink_ext_ack *extack)
{
- struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
bool vf_tun;
int err = 0;
@@ -1783,34 +1916,22 @@ post_process_attr(struct mlx5e_tc_flow *flow,
if (err)
goto err_out;
- if (mlx5e_is_eswitch_flow(flow)) {
- err = mlx5_eswitch_add_vlan_action(esw, attr);
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+ err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
if (err)
goto err_out;
}
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- if (vf_tun || is_post_act_attr) {
- err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
- if (err)
- goto err_out;
- } else {
- err = mlx5e_attach_mod_hdr(flow->priv, flow, attr->parse_attr);
- if (err)
- goto err_out;
- }
- }
-
if (attr->branch_true &&
attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_true);
+ err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true);
if (err)
goto err_out;
}
if (attr->branch_false &&
attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
- err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_false);
+ err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false);
if (err)
goto err_out;
}
@@ -1924,7 +2045,11 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port;
}
- err = post_process_attr(flow, attr, false, extack);
+ err = post_process_attr(flow, attr, extack);
+ if (err)
+ goto err_out;
+
+ err = mlx5e_tc_act_stats_add_flow(get_act_stats_handle(priv), flow);
if (err)
goto err_out;
@@ -1998,8 +2123,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (mlx5_flow_has_geneve_opt(flow))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
- mlx5_eswitch_del_vlan_action(esw, attr);
-
if (flow->decap_route)
mlx5e_detach_decap_route(priv, flow);
@@ -2009,10 +2132,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
- if (vf_tun && attr->modify_hdr)
- mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
- else
- mlx5e_detach_mod_hdr(priv, flow);
+ mlx5e_tc_detach_mod_hdr(priv, flow, attr);
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
@@ -2027,13 +2147,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
+ mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
+
free_flow_post_acts(flow);
free_branch_attr(flow, attr->branch_true);
free_branch_attr(flow, attr->branch_false);
- if (flow->attr->lag.count)
- mlx5_lag_del_mpesw_rule(esw->dev);
-
kvfree(attr->esw_attr->rx_tun_attr);
kvfree(attr->parse_attr);
kfree(flow->attr);
@@ -2492,13 +2611,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
err = mlx5e_tc_set_attr_rx_tun(flow, spec);
if (err)
return err;
- } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
+ } else if (tunnel) {
struct mlx5_flow_spec *tmp_spec;
tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
if (!tmp_spec) {
- NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
- netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec");
+ netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec");
return -ENOMEM;
}
memcpy(tmp_spec, spec, sizeof(*tmp_spec));
@@ -3560,7 +3679,6 @@ out_ok:
static bool
actions_match_supported_fdb(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
@@ -3609,7 +3727,7 @@ actions_match_supported(struct mlx5e_priv *priv,
return false;
if (mlx5e_is_eswitch_flow(flow) &&
- !actions_match_supported_fdb(priv, parse_attr, flow, extack))
+ !actions_match_supported_fdb(priv, flow, extack))
return false;
return true;
@@ -3692,10 +3810,13 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
INIT_LIST_HEAD(&attr2->list);
parse_attr->filter_dev = attr->parse_attr->filter_dev;
attr2->action = 0;
+ attr2->counter = NULL;
+ attr->tc_act_cookies_count = 0;
attr2->flags = 0;
attr2->parse_attr = parse_attr;
attr2->dest_chain = 0;
attr2->dest_ft = NULL;
+ attr2->act_id_restore_rule = NULL;
if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
attr2->esw_attr->out_count = 0;
@@ -3831,7 +3952,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
if (err)
goto out_free;
- err = post_process_attr(flow, attr, true, extack);
+ err = post_process_attr(flow, attr, extack);
if (err)
goto out_free;
@@ -3991,6 +4112,11 @@ parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
jump_state->jumping_attr = attr->branch_false;
jump_state->jump_count = jump_count;
+
+ /* branching action requires its own counter */
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_flag_set(flow, USE_ACT_STATS);
+
return 0;
err_branch_false:
@@ -4051,6 +4177,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
goto out_free;
parse_state->actions |= attr->action;
+ if (!tc_act->stats_action)
+ attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie;
/* Split attr for multi table act if not the last act. */
if (jump_state.jump_target ||
@@ -4184,12 +4312,7 @@ static bool is_lag_dev(struct mlx5e_priv *priv,
static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
{
- if (same_hw_reps(priv, out_dev) &&
- MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
- MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
- return true;
-
- return false;
+ return same_hw_reps(priv, out_dev) && mlx5_lag_is_mpesw(priv->mdev);
}
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
@@ -4360,6 +4483,9 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
(is_rep_ingress || act_is_encap))
return true;
+ if (mlx5_lag_is_mpesw(esw_attr->in_mdev))
+ return true;
+
return false;
}
@@ -4398,8 +4524,7 @@ mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
- if (attr->modify_hdr)
- mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
+ mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
}
}
@@ -4492,7 +4617,6 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_core_dev *in_mdev)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct netlink_ext_ack *extack = f->common.extack;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_tc_flow *flow;
@@ -4521,33 +4645,21 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
- /* always set IP version for indirect table handling */
- flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
-
err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
- if (flow->attr->lag.count) {
- err = mlx5_lag_add_mpesw_rule(esw->dev);
- if (err)
- goto err_free;
- }
-
err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
complete_all(&flow->init_done);
if (err) {
if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
- goto err_lag;
+ goto err_free;
add_unready_flow(flow);
}
return flow;
-err_lag:
- if (flow->attr->lag.count)
- mlx5_lag_del_mpesw_rule(esw->dev);
err_free:
mlx5e_flow_put(priv, flow);
out:
@@ -4579,8 +4691,10 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
* So packets redirected to uplink use the same mdev of the
* original flow and packets redirected from uplink use the
* peer mdev.
+ * In multiport eswitch it's a special case that we need to
+ * keep the original mdev.
*/
- if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
+ if (attr->in_rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(priv->mdev))
in_mdev = peer_priv->mdev;
else
in_mdev = priv->mdev;
@@ -4842,6 +4956,12 @@ errout:
return err;
}
+int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act)
+{
+ return mlx5e_tc_act_stats_fill_stats(get_act_stats_handle(priv), fl_act);
+}
+
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags)
{
@@ -4868,11 +4988,15 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
}
if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
- counter = mlx5e_tc_get_counter(flow);
- if (!counter)
- goto errout;
+ if (flow_flag_test(flow, USE_ACT_STATS)) {
+ f->use_act_stats = true;
+ } else {
+ counter = mlx5e_tc_get_counter(flow);
+ if (!counter)
+ goto errout;
- mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ }
}
/* Under multipath it's possible for one rule to be currently
@@ -4888,14 +5012,18 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
u64 packets2;
u64 lastuse2;
- counter = mlx5e_tc_get_counter(flow->peer_flow);
- if (!counter)
- goto no_peer_counter;
- mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
-
- bytes += bytes2;
- packets += packets2;
- lastuse = max_t(u64, lastuse, lastuse2);
+ if (flow_flag_test(flow, USE_ACT_STATS)) {
+ f->use_act_stats = true;
+ } else {
+ counter = mlx5e_tc_get_counter(flow->peer_flow);
+ if (!counter)
+ goto no_peer_counter;
+ mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
+
+ bytes += bytes2;
+ packets += packets2;
+ lastuse = max_t(u64, lastuse, lastuse2);
+ }
}
no_peer_counter:
@@ -5220,6 +5348,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
+ mlx5e_hairpin_params_init(&tc->hairpin_params, dev);
+
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
&tc->netdevice_nb,
@@ -5230,8 +5360,18 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
goto err_reg;
}
+ mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs));
+
+ tc->action_stats_handle = mlx5e_tc_act_stats_create();
+ if (IS_ERR(tc->action_stats_handle))
+ goto err_act_stats;
+
return 0;
+err_act_stats:
+ unregister_netdevice_notifier_dev_net(priv->netdev,
+ &tc->netdevice_nb,
+ &tc->netdevice_nn);
err_reg:
mlx5_tc_ct_clean(tc->ct);
mlx5e_tc_post_act_destroy(tc->post_act);
@@ -5258,6 +5398,8 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
+ debugfs_remove_recursive(tc->dfs_root);
+
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier_dev_net(priv->netdev,
&tc->netdevice_nb,
@@ -5279,6 +5421,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mapping_destroy(tc->mapping);
mlx5_chains_destroy(tc->chains);
mlx5e_tc_nic_destroy_miss_table(priv);
+ mlx5e_tc_act_stats_free(tc->action_stats_handle);
}
int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
@@ -5355,8 +5498,14 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_register_fib_notifier;
}
+ uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create();
+ if (IS_ERR(uplink_priv->action_stats_handle))
+ goto err_action_counter;
+
return 0;
+err_action_counter:
+ mlx5e_tc_tun_cleanup(uplink_priv->encap);
err_register_fib_notifier:
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
err_enc_opts_mapping:
@@ -5383,6 +5532,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
mlx5_tc_ct_clean(uplink_priv->ct_priv);
mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
+ mlx5e_tc_act_stats_free(uplink_priv->action_stats_handle);
}
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
@@ -5456,48 +5606,268 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
}
}
-bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
- struct sk_buff *skb)
+static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5e_tc_update_priv *tc_priv,
+ u32 tunnel_id)
{
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- u32 chain = 0, chain_tag, reg_b, zone_restore_id;
- struct mlx5e_priv *priv = netdev_priv(skb->dev);
- struct mlx5_mapped_obj mapped_obj;
- struct tc_skb_ext *tc_skb_ext;
- struct mlx5e_tc_table *tc;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct tunnel_match_enc_opts enc_opts = {};
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct metadata_dst *tun_dst;
+ struct tunnel_match_key key;
+ u32 tun_id, enc_opts_id;
+ struct net_device *dev;
int err;
- reg_b = be32_to_cpu(cqe->ft_metadata);
- tc = mlx5e_fs_get_tc(priv->fs);
- chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
+ tun_id = tunnel_id >> ENC_OPTS_BITS;
- err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
+ if (!tun_id)
+ return true;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
if (err) {
netdev_dbg(priv->netdev,
- "Couldn't find chain for chain tag: %d, err: %d\n",
- chain_tag, err);
+ "Couldn't find tunnel for tun_id: %d, err: %d\n",
+ tun_id, err);
return false;
}
- if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
- chain = mapped_obj.chain;
- tc_skb_ext = tc_skb_ext_alloc(skb);
- if (WARN_ON(!tc_skb_ext))
+ if (enc_opts_id) {
+ err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
+ enc_opts_id, &enc_opts);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
+ enc_opts_id, err);
return false;
+ }
+ }
+
+ switch (key.enc_control.addr_type) {
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, 0, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ break;
+ default:
+ netdev_dbg(priv->netdev,
+ "Couldn't restore tunnel, unsupported addr_type: %d\n",
+ key.enc_control.addr_type);
+ return false;
+ }
+
+ if (!tun_dst) {
+ netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
+ return false;
+ }
+
+ tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
+
+ if (enc_opts.key.len)
+ ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
+ enc_opts.key.data,
+ enc_opts.key.len,
+ enc_opts.key.dst_opt_type);
+
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+ dev = dev_get_by_index(&init_net, key.filter_ifindex);
+ if (!dev) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find tunnel device with ifindex: %d\n",
+ key.filter_ifindex);
+ return false;
+ }
- tc_skb_ext->chain = chain;
+ /* Set fwd_dev so we do dev_put() after datapath */
+ tc_priv->fwd_dev = dev;
- zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
- ESW_ZONE_ID_MASK;
+ skb->dev = dev;
- if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
- zone_restore_id))
+ return true;
+}
+
+static bool mlx5e_tc_restore_skb_tc_meta(struct sk_buff *skb, struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_mapped_obj *mapped_obj, u32 zone_restore_id,
+ u32 tunnel_id, struct mlx5e_tc_update_priv *tc_priv)
+{
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ struct tc_skb_ext *tc_skb_ext;
+ u64 act_miss_cookie;
+ u32 chain;
+
+ chain = mapped_obj->type == MLX5_MAPPED_OBJ_CHAIN ? mapped_obj->chain : 0;
+ act_miss_cookie = mapped_obj->type == MLX5_MAPPED_OBJ_ACT_MISS ?
+ mapped_obj->act_miss_cookie : 0;
+ if (chain || act_miss_cookie) {
+ if (!mlx5e_tc_ct_restore_flow(ct_priv, skb, zone_restore_id))
return false;
- } else {
+
+ tc_skb_ext = tc_skb_ext_alloc(skb);
+ if (!tc_skb_ext) {
+ WARN_ON(1);
+ return false;
+ }
+
+ if (act_miss_cookie) {
+ tc_skb_ext->act_miss_cookie = act_miss_cookie;
+ tc_skb_ext->act_miss = 1;
+ } else {
+ tc_skb_ext->chain = chain;
+ }
+ }
+
+ if (tc_priv)
+ return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+
+ return true;
+}
+
+static void mlx5e_tc_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_mapped_obj *mapped_obj,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+ if (!mlx5e_tc_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
+ netdev_dbg(priv->netdev,
+ "Failed to restore tunnel info for sampled packet\n");
+ return;
+ }
+ mlx5e_tc_sample_skb(skb, mapped_obj);
+}
+
+static bool mlx5e_tc_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_mapped_obj *mapped_obj,
+ struct mlx5e_tc_update_priv *tc_priv,
+ u32 tunnel_id)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ bool forward_tx = false;
+
+ /* Tunnel restore takes precedence over int port restore */
+ if (tunnel_id)
+ return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
+ mapped_obj->int_port_metadata, &forward_tx)) {
+ /* Set fwd_dev for future dev_put */
+ tc_priv->fwd_dev = skb->dev;
+ tc_priv->forward_tx = forward_tx;
+
+ return true;
+ }
+
+ return false;
+}
+
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
+ struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
+ struct mlx5_tc_ct_priv *ct_priv,
+ u32 zone_restore_id, u32 tunnel_id,
+ struct mlx5e_tc_update_priv *tc_priv)
+{
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ struct mlx5_mapped_obj mapped_obj;
+ int err;
+
+ err = mapping_find(mapping_ctx, mapped_obj_id, &mapped_obj);
+ if (err) {
+ netdev_dbg(skb->dev,
+ "Couldn't find mapped object for mapped_obj_id: %d, err: %d\n",
+ mapped_obj_id, err);
+ return false;
+ }
+
+ switch (mapped_obj.type) {
+ case MLX5_MAPPED_OBJ_CHAIN:
+ case MLX5_MAPPED_OBJ_ACT_MISS:
+ return mlx5e_tc_restore_skb_tc_meta(skb, ct_priv, &mapped_obj, zone_restore_id,
+ tunnel_id, tc_priv);
+ case MLX5_MAPPED_OBJ_SAMPLE:
+ mlx5e_tc_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
+ tc_priv->skb_done = true;
+ return true;
+ case MLX5_MAPPED_OBJ_INT_PORT_METADATA:
+ return mlx5e_tc_restore_skb_int_port(priv, skb, &mapped_obj, tc_priv, tunnel_id);
+ default:
netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
return false;
}
-#endif /* CONFIG_NET_TC_SKB_EXT */
- return true;
+ return false;
+}
+
+bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
+{
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ u32 mapped_obj_id, reg_b, zone_restore_id;
+ struct mlx5_tc_ct_priv *ct_priv;
+ struct mapping_ctx *mapping_ctx;
+ struct mlx5e_tc_table *tc;
+
+ reg_b = be32_to_cpu(cqe->ft_metadata);
+ tc = mlx5e_fs_get_tc(priv->fs);
+ mapped_obj_id = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
+ ESW_ZONE_ID_MASK;
+ ct_priv = tc->ct;
+ mapping_ctx = tc->mapping;
+
+ return mlx5e_tc_update_skb(cqe, skb, mapping_ctx, mapped_obj_id, ct_priv, zone_restore_id,
+ 0, NULL);
+}
+
+int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u64 act_miss_cookie, u32 *act_miss_mapping)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_mapped_obj mapped_obj = {};
+ struct mapping_ctx *ctx;
+ int err;
+
+ ctx = esw->offloads.reg_c0_obj_pool;
+
+ mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
+ mapped_obj.act_miss_cookie = act_miss_cookie;
+ err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
+ if (err)
+ return err;
+
+ attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
+ if (IS_ERR(attr->act_id_restore_rule))
+ goto err_rule;
+
+ return 0;
+
+err_rule:
+ mapping_remove(ctx, *act_miss_mapping);
+ return err;
+}
+
+void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u32 act_miss_mapping)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mapping_ctx *ctx;
+
+ ctx = esw->offloads.reg_c0_obj_pool;
+ mlx5_del_flow_rules(attr->act_id_restore_rule);
+ mapping_remove(ctx, act_miss_mapping);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 50af70ef22f3..adb39e30f90f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -59,6 +59,8 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
struct net_device *fwd_dev;
+ bool skb_done;
+ bool forward_tx;
};
struct mlx5_nic_flow_attr {
@@ -69,35 +71,33 @@ struct mlx5_nic_flow_attr {
struct mlx5_flow_attr {
u32 action;
+ unsigned long tc_act_cookies[TCA_ACT_MAX_PRIO];
struct mlx5_fc *counter;
struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
+ struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
struct mlx5_ct_attr ct_attr;
struct mlx5e_sample_attr sample_attr;
struct mlx5e_meter_attr meter_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
u32 chain;
u16 prio;
+ u16 tc_act_cookies_count;
u32 dest_chain;
struct mlx5_flow_table *ft;
struct mlx5_flow_table *dest_ft;
u8 inner_match_level;
u8 outer_match_level;
- u8 ip_version;
u8 tun_ip_version;
int tunnel_id; /* mapped tunnel id */
u32 flags;
u32 exe_aso_type;
struct list_head list;
struct mlx5e_post_act_handle *post_act_handle;
- struct {
- /* Indicate whether the parsed flow should be counted for lag mode decision
- * making
- */
- bool count;
- } lag;
struct mlx5_flow_attr *branch_true;
struct mlx5_flow_attr *branch_false;
struct mlx5_flow_attr *jumping_attr;
+ struct mlx5_flow_handle *act_id_restore_rule;
/* keep this union last */
union {
DECLARE_FLEX_ARRAY(struct mlx5_esw_flow_attr, esw_attr);
@@ -134,7 +134,6 @@ struct mlx5_rx_tun_attr {
__be32 v4;
struct in6_addr v6;
} dst_ip; /* Valid if decap_vport is not zero */
- u32 vni;
};
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
@@ -197,6 +196,8 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
+int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
+ struct flow_offload_action *fl_act);
int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *f);
@@ -227,7 +228,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
enum mlx5e_tc_attr_to_reg {
- CHAIN_TO_REG,
+ MAPPED_OBJ_TO_REG,
VPORT_TO_REG,
TUNNEL_TO_REG,
CTSTATE_TO_REG,
@@ -236,7 +237,7 @@ enum mlx5e_tc_attr_to_reg {
MARK_TO_REG,
LABELS_TO_REG,
FTEID_TO_REG,
- NIC_CHAIN_TO_REG,
+ NIC_MAPPED_OBJ_TO_REG,
NIC_ZONE_RESTORE_TO_REG,
PACKET_COLOR_TO_REG,
};
@@ -285,9 +286,13 @@ int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
enum mlx5e_tc_attr_to_reg type,
u32 data);
-int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow *flow,
- struct mlx5_flow_attr *attr);
+int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
+
+void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr);
void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
struct flow_match_basic *match, bool outer,
@@ -366,7 +371,6 @@ struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{
-#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
u32 chain, reg_b;
reg_b = be32_to_cpu(cqe->ft_metadata);
@@ -377,20 +381,29 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
if (chain)
return true;
-#endif
return false;
}
-bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
+bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
+ struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
+ struct mlx5_tc_ct_priv *ct_priv,
+ u32 zone_restore_id, u32 tunnel_id,
+ struct mlx5e_tc_update_priv *tc_priv);
#else /* CONFIG_MLX5_CLS_ACT */
static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{ return false; }
static inline bool
-mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
+mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
{ return true; }
#endif
+int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u64 act_miss_cookie, u32 *act_miss_mapping);
+void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
+ u32 act_miss_mapping);
+
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index f7897ddb29c5..df5e780e8e6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -720,21 +720,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more)
-{
- struct mlx5e_tx_wqe_attr wqe_attr;
- struct mlx5e_tx_attr attr;
- struct mlx5e_tx_wqe *wqe;
- u16 pi;
-
- mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
- mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
- pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
- wqe = MLX5E_TX_FETCH_WQE(sq, pi);
- mlx5e_txwqe_build_eseg_csum(sq, skb, NULL, &wqe->eth);
- mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
-}
-
static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 8f7580fec193..38b32e98f3bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -629,9 +629,9 @@ static u16 async_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
- &val);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
+ &val);
if (!err)
return val.vu32;
mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
@@ -817,9 +817,12 @@ static void comp_irqs_release(struct mlx5_core_dev *dev)
static int comp_irqs_request(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
+ const struct cpumask *prev = cpu_none_mask;
+ const struct cpumask *mask;
int ncomp_eqs = table->num_comp_eqs;
u16 *cpus;
int ret;
+ int cpu;
int i;
ncomp_eqs = table->num_comp_eqs;
@@ -838,8 +841,19 @@ static int comp_irqs_request(struct mlx5_core_dev *dev)
ret = -ENOMEM;
goto free_irqs;
}
- for (i = 0; i < ncomp_eqs; i++)
- cpus[i] = cpumask_local_spread(i, dev->priv.numa_node);
+
+ i = 0;
+ rcu_read_lock();
+ for_each_numa_hop_mask(mask, dev->priv.numa_node) {
+ for_each_cpu_andnot(cpu, mask, prev) {
+ cpus[i] = cpu;
+ if (++i == ncomp_eqs)
+ goto spread_done;
+ }
+ prev = mask;
+ }
+spread_done:
+ rcu_read_unlock();
ret = mlx5_irqs_request_vectors(dev, cpus, ncomp_eqs, table->comp_irqs);
kfree(cpus);
if (ret < 0)
@@ -874,9 +888,9 @@ static u16 comp_eq_depth_devlink_param_get(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
- &val);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_IO_EQ_SIZE,
+ &val);
if (!err)
return val.vu32;
mlx5_core_dbg(dev, "Failed to get param. using default. err = %d\n", err);
@@ -946,11 +960,11 @@ static int vector2eqnirqn(struct mlx5_core_dev *dev, int vector, int *eqn,
unsigned int *irqn)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_eq_comp *eq, *n;
+ struct mlx5_eq_comp *eq;
int err = -ENOENT;
int i = 0;
- list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ list_for_each_entry(eq, &table->comp_eqs_list, list) {
if (i++ == vector) {
if (irqn)
*irqn = eq->core.irqn;
@@ -985,10 +999,10 @@ struct cpumask *
mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- struct mlx5_eq_comp *eq, *n;
+ struct mlx5_eq_comp *eq;
int i = 0;
- list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
+ list_for_each_entry(eq, &table->comp_eqs_list, list) {
if (i++ == vector)
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index a994e71e05c1..d55775627a47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -356,8 +356,8 @@ void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
}
/* Caller must hold rtnl_lock */
-int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
- u32 metadata)
+int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 metadata)
{
struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
index 11d3d3978848..c9f8469e9a47 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
@@ -24,8 +24,8 @@ static inline bool mlx5_esw_acl_egress_fwd2vport_supported(struct mlx5_eswitch *
/* Eswitch acl ingress external APIs */
int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
-int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_num,
- u32 metadata);
+int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vport_num,
+ u32 metadata);
void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num);
int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index b176648d1343..3cdcb0e0b20f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1715,7 +1715,7 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
struct mlx5_esw_bridge *bridge;
port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
- if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER)
+ if (!port)
return;
bridge = port->bridge;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
index c9a91158e99c..9959e9fd15a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
@@ -16,18 +16,12 @@
#include "lib/fs_chains.h"
#include "en/mod_hdr.h"
-#define MLX5_ESW_INDIR_TABLE_SIZE 128
-#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
+#define MLX5_ESW_INDIR_TABLE_SIZE 2
+#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
#define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1)
struct mlx5_esw_indir_table_rule {
- struct list_head list;
struct mlx5_flow_handle *handle;
- union {
- __be32 v4;
- struct in6_addr v6;
- } dst_ip;
- u32 vni;
struct mlx5_modify_hdr *mh;
refcount_t refcnt;
};
@@ -38,12 +32,10 @@ struct mlx5_esw_indir_table_entry {
struct mlx5_flow_group *recirc_grp;
struct mlx5_flow_group *fwd_grp;
struct mlx5_flow_handle *fwd_rule;
- struct list_head recirc_rules;
- int recirc_cnt;
+ struct mlx5_esw_indir_table_rule *recirc_rule;
int fwd_ref;
u16 vport;
- u8 ip_version;
};
struct mlx5_esw_indir_table {
@@ -89,7 +81,6 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
vf_sf_vport &&
esw->dev == dest_mdev &&
- attr->ip_version &&
attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
}
@@ -101,27 +92,8 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr)
return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0;
}
-static struct mlx5_esw_indir_table_rule *
-mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e,
- struct mlx5_esw_flow_attr *attr)
-{
- struct mlx5_esw_indir_table_rule *rule;
-
- list_for_each_entry(rule, &e->recirc_rules, list)
- if (rule->vni == attr->rx_tun_attr->vni &&
- !memcmp(&rule->dst_ip, &attr->rx_tun_attr->dst_ip,
- sizeof(attr->rx_tun_attr->dst_ip)))
- goto found;
- return NULL;
-
-found:
- refcount_inc(&rule->refcnt);
- return rule;
-}
-
static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
struct mlx5_esw_indir_table_entry *e)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
@@ -130,73 +102,18 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
struct mlx5_flow_destination dest = {};
struct mlx5_esw_indir_table_rule *rule;
struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_spec *rule_spec;
struct mlx5_flow_handle *handle;
int err = 0;
u32 data;
- rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr);
- if (rule)
+ if (e->recirc_rule) {
+ refcount_inc(&e->recirc_rule->refcnt);
return 0;
-
- if (e->recirc_cnt == MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX)
- return -EINVAL;
-
- rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
- if (!rule_spec)
- return -ENOMEM;
-
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (!rule) {
- err = -ENOMEM;
- goto out;
- }
-
- rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS |
- MLX5_MATCH_MISC_PARAMETERS_2;
- if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) {
- MLX5_SET(fte_match_param, rule_spec->match_criteria,
- outer_headers.ip_version, 0xf);
- MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version,
- attr->ip_version);
- } else if (attr->ip_version) {
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.ethertype);
- MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ethertype,
- (attr->ip_version == 4 ? ETH_P_IP : ETH_P_IPV6));
- } else {
- err = -EOPNOTSUPP;
- goto err_ethertype;
}
- if (attr->ip_version == 4) {
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
- MLX5_SET(fte_match_param, rule_spec->match_value,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
- ntohl(esw_attr->rx_tun_attr->dst_ip.v4));
- } else if (attr->ip_version == 6) {
- int len = sizeof(struct in6_addr);
-
- memset(MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- 0xff, len);
- memcpy(MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- &esw_attr->rx_tun_attr->dst_ip.v6, len);
- }
-
- MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
- misc_parameters.vxlan_vni);
- MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters.vxlan_vni,
- MLX5_GET(fte_match_param, spec->match_value, misc_parameters.vxlan_vni));
-
- MLX5_SET(fte_match_param, rule_spec->match_criteria,
- misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
- MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(esw_attr->in_mdev->priv.eswitch,
- MLX5_VPORT_UPLINK));
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
/* Modify flow source to recirculate packet */
data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport);
@@ -219,13 +136,14 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
+ flow_act.fg = e->recirc_grp;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = mlx5_chains_get_table(chains, 0, 1, 0);
if (IS_ERR(dest.ft)) {
err = PTR_ERR(dest.ft);
goto err_table;
}
- handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1);
+ handle = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
goto err_handle;
@@ -233,14 +151,10 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
mlx5e_mod_hdr_dealloc(&mod_acts);
rule->handle = handle;
- rule->vni = esw_attr->rx_tun_attr->vni;
rule->mh = flow_act.modify_hdr;
- memcpy(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
- sizeof(esw_attr->rx_tun_attr->dst_ip));
refcount_set(&rule->refcnt, 1);
- list_add(&rule->list, &e->recirc_rules);
- e->recirc_cnt++;
- goto out;
+ e->recirc_rule = rule;
+ return 0;
err_handle:
mlx5_chains_put_table(chains, 0, 1, 0);
@@ -250,89 +164,44 @@ err_mod_hdr_alloc:
err_mod_hdr_regc1:
mlx5e_mod_hdr_dealloc(&mod_acts);
err_mod_hdr_regc0:
-err_ethertype:
kfree(rule);
-out:
- kvfree(rule_spec);
return err;
}
static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
struct mlx5_esw_indir_table_entry *e)
{
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_esw_indir_table_rule *rule = e->recirc_rule;
struct mlx5_fs_chains *chains = esw_chains(esw);
- struct mlx5_esw_indir_table_rule *rule;
- list_for_each_entry(rule, &e->recirc_rules, list)
- if (rule->vni == esw_attr->rx_tun_attr->vni &&
- !memcmp(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
- sizeof(esw_attr->rx_tun_attr->dst_ip)))
- goto found;
-
- return;
+ if (!rule)
+ return;
-found:
if (!refcount_dec_and_test(&rule->refcnt))
return;
mlx5_del_flow_rules(rule->handle);
mlx5_chains_put_table(chains, 0, 1, 0);
mlx5_modify_header_dealloc(esw->dev, rule->mh);
- list_del(&rule->list);
kfree(rule);
- e->recirc_cnt--;
+ e->recirc_rule = NULL;
}
-static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
- struct mlx5_esw_indir_table_entry *e)
+static int mlx5_create_indir_recirc_group(struct mlx5_esw_indir_table_entry *e)
{
int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- u32 *in, *match;
+ u32 *in;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
- MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
- MLX5_MATCH_MISC_PARAMETERS | MLX5_MATCH_MISC_PARAMETERS_2);
- match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
-
- if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version))
- MLX5_SET(fte_match_param, match, outer_headers.ip_version, 0xf);
- else
- MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ethertype);
-
- if (attr->ip_version == 4) {
- MLX5_SET_TO_ONES(fte_match_param, match,
- outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
- } else if (attr->ip_version == 6) {
- memset(MLX5_ADDR_OF(fte_match_param, match,
- outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- 0xff, sizeof(struct in6_addr));
- } else {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters.vxlan_vni);
- MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_mask());
MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX);
e->recirc_grp = mlx5_create_flow_group(e->ft, in);
- if (IS_ERR(e->recirc_grp)) {
+ if (IS_ERR(e->recirc_grp))
err = PTR_ERR(e->recirc_grp);
- goto out;
- }
- INIT_LIST_HEAD(&e->recirc_rules);
- e->recirc_cnt = 0;
-
-out:
kvfree(in);
return err;
}
@@ -343,19 +212,12 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_spec *spec;
u32 *in;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- kvfree(in);
- return -ENOMEM;
- }
-
/* Hold one entry */
MLX5_SET(create_flow_group_in, in, start_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
@@ -366,25 +228,25 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_act.fg = e->fwd_grp;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = e->vport;
dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
- e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1);
+ e->fwd_rule = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1);
if (IS_ERR(e->fwd_rule)) {
mlx5_destroy_flow_group(e->fwd_grp);
err = PTR_ERR(e->fwd_rule);
}
err_out:
- kvfree(spec);
kvfree(in);
return err;
}
static struct mlx5_esw_indir_table_entry *
mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec, u16 vport, bool decap)
+ u16 vport, bool decap)
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
@@ -412,15 +274,14 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
}
e->ft = ft;
e->vport = vport;
- e->ip_version = attr->ip_version;
e->fwd_ref = !decap;
- err = mlx5_create_indir_recirc_group(esw, attr, spec, e);
+ err = mlx5_create_indir_recirc_group(e);
if (err)
goto recirc_grp_err;
if (decap) {
- err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
+ err = mlx5_esw_indir_table_rule_get(esw, attr, e);
if (err)
goto recirc_rule_err;
}
@@ -430,13 +291,13 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
goto fwd_grp_err;
hash_add(esw->fdb_table.offloads.indir->table, &e->hlist,
- vport << 16 | attr->ip_version);
+ vport << 16);
return e;
fwd_grp_err:
if (decap)
- mlx5_esw_indir_table_rule_put(esw, attr, e);
+ mlx5_esw_indir_table_rule_put(esw, e);
recirc_rule_err:
mlx5_destroy_flow_group(e->recirc_grp);
recirc_grp_err:
@@ -447,13 +308,13 @@ tbl_err:
}
static struct mlx5_esw_indir_table_entry *
-mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_version)
+mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport)
{
struct mlx5_esw_indir_table_entry *e;
- u32 key = vport << 16 | ip_version;
+ u32 key = vport << 16;
hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key)
- if (e->vport == vport && e->ip_version == ip_version)
+ if (e->vport == vport)
return e;
return NULL;
@@ -461,24 +322,23 @@ mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_ver
struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
u16 vport, bool decap)
{
struct mlx5_esw_indir_table_entry *e;
int err;
mutex_lock(&esw->fdb_table.offloads.indir->lock);
- e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
+ e = mlx5_esw_indir_table_entry_lookup(esw, vport);
if (e) {
if (!decap) {
e->fwd_ref++;
} else {
- err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e);
+ err = mlx5_esw_indir_table_rule_get(esw, attr, e);
if (err)
goto out_err;
}
} else {
- e = mlx5_esw_indir_table_entry_create(esw, attr, spec, vport, decap);
+ e = mlx5_esw_indir_table_entry_create(esw, attr, vport, decap);
if (IS_ERR(e)) {
err = PTR_ERR(e);
esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err);
@@ -494,22 +354,21 @@ out_err:
}
void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
u16 vport, bool decap)
{
struct mlx5_esw_indir_table_entry *e;
mutex_lock(&esw->fdb_table.offloads.indir->lock);
- e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version);
+ e = mlx5_esw_indir_table_entry_lookup(esw, vport);
if (!e)
goto out;
if (!decap)
e->fwd_ref--;
else
- mlx5_esw_indir_table_rule_put(esw, attr, e);
+ mlx5_esw_indir_table_rule_put(esw, e);
- if (e->fwd_ref || e->recirc_cnt)
+ if (e->fwd_ref || e->recirc_rule)
goto out;
hash_del(&e->hlist);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
index 21d56b49d14b..036f5b3a341b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.h
@@ -13,10 +13,8 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir);
struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
u16 vport, bool decap);
void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
u16 vport, bool decap);
bool
@@ -44,7 +42,6 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
static inline struct mlx5_flow_table *
mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
u16 vport, bool decap)
{
return ERR_PTR(-EOPNOTSUPP);
@@ -52,7 +49,6 @@ mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
static inline void
mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
u16 vport, bool decap)
{
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 9daf55e90367..0f052513fefa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1190,9 +1190,9 @@ static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
- &val);
+ err = devl_param_driverinit_value_get(devlink,
+ MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
+ &val);
if (!err) {
esw->params.large_group_num = val.vu32;
} else {
@@ -1250,7 +1250,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
if (err)
return err;
} else {
- esw_warn(dev, "engress ACL is not supported by FW\n");
+ esw_warn(dev, "egress ACL is not supported by FW\n");
}
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
@@ -1406,9 +1406,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
- /* If disabling sriov in switchdev mode, free meta rules here
- * because it depends on num_vfs.
- */
+
if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
struct devlink *devlink = priv_to_devlink(esw->dev);
@@ -1489,7 +1487,7 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
void *hca_caps;
int err;
- if (!mlx5_core_is_ecpf(dev)) {
+ if (!mlx5_core_is_ecpf(dev) || mlx5_core_is_management_pf(dev)) {
*max_sfs = 0;
return 0;
}
@@ -1642,7 +1640,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
if (err)
goto abort;
- err = esw_offloads_init_reps(esw);
+ err = esw_offloads_init(esw);
if (err)
goto reps_err;
@@ -1708,7 +1706,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
mutex_destroy(&esw->offloads.encap_tbl_lock);
mutex_destroy(&esw->offloads.decap_tbl_lock);
- esw_offloads_cleanup_reps(esw);
+ esw_offloads_cleanup(esw);
mlx5_esw_vports_cleanup(esw);
kfree(esw);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 92644fbb5081..19e9a77c4633 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -52,12 +52,14 @@ enum mlx5_mapped_obj_type {
MLX5_MAPPED_OBJ_CHAIN,
MLX5_MAPPED_OBJ_SAMPLE,
MLX5_MAPPED_OBJ_INT_PORT_METADATA,
+ MLX5_MAPPED_OBJ_ACT_MISS,
};
struct mlx5_mapped_obj {
enum mlx5_mapped_obj_type type;
union {
u32 chain;
+ u64 act_miss_cookie;
struct {
u32 group_id;
u32 rate;
@@ -222,7 +224,6 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_handle **send_to_vport_meta_rules;
struct mlx5_flow_handle *miss_rule_uni;
struct mlx5_flow_handle *miss_rule_multi;
- int vlan_push_pop_refcount;
struct mlx5_fs_chains *esw_chains_priv;
struct {
@@ -346,8 +347,8 @@ struct mlx5_eswitch {
void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
-void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
-int esw_offloads_init_reps(struct mlx5_eswitch *esw);
+void esw_offloads_cleanup(struct mlx5_eswitch *esw);
+int esw_offloads_init(struct mlx5_eswitch *esw);
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num);
@@ -520,10 +521,6 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
-int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr);
-int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index c981fa77f439..2a98375a0abf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -179,15 +179,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
static int
esw_setup_decap_indir(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec)
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_table *ft;
if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP;
- ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ ft = mlx5_esw_indir_table_get(esw, attr,
mlx5_esw_indir_table_decap_vport(attr), true);
return PTR_ERR_OR_ZERO(ft);
}
@@ -197,7 +196,7 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr)
{
if (mlx5_esw_indir_table_decap_vport(attr))
- mlx5_esw_indir_table_put(esw, attr,
+ mlx5_esw_indir_table_put(esw,
mlx5_esw_indir_table_decap_vport(attr),
true);
}
@@ -235,7 +234,6 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
int i)
{
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
@@ -243,7 +241,7 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
dest[i].ft = attr->dest_ft;
if (mlx5_esw_indir_table_decap_vport(attr))
- return esw_setup_decap_indir(esw, attr, spec);
+ return esw_setup_decap_indir(esw, attr);
return 0;
}
@@ -298,7 +296,7 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_
mlx5_chains_put_table(chains, 0, 1, 0);
else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
esw_attr->dests[i].mdev))
- mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
+ mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
false);
}
@@ -384,7 +382,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
- struct mlx5_flow_spec *spec,
bool ignore_flow_lvl,
int *i)
{
@@ -399,7 +396,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
+ dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
esw_attr->dests[j].rep->vport, false);
if (IS_ERR(dest[*i].ft)) {
err = PTR_ERR(dest[*i].ft);
@@ -408,7 +405,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
}
if (mlx5_esw_indir_table_decap_vport(attr)) {
- err = esw_setup_decap_indir(esw, attr, spec);
+ err = esw_setup_decap_indir(esw, attr);
if (err)
goto err_indir_tbl_get;
}
@@ -446,7 +443,7 @@ esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *f
MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
- mlx5_lag_mpesw_is_activated(esw->dev))
+ mlx5_lag_is_mpesw(esw->dev))
dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
}
if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
@@ -511,14 +508,14 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
(*i)++;
} else if (esw_is_indir_table(esw, attr)) {
- err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
+ err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else {
*i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
if (attr->dest_ft) {
- err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
+ err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
(*i)++;
} else if (attr->dest_chain) {
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
@@ -582,16 +579,16 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
+ return ERR_PTR(-EOPNOTSUPP);
+
dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
if (!dest)
return ERR_PTR(-ENOMEM);
flow_act.action = attr->action;
- /* if per flow vlan pop/push is emulated, don't set that into the firmware */
- if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
- MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
@@ -727,7 +724,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < esw_attr->split_count; i++) {
if (esw_is_indir_table(esw, attr))
- err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
+ err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i);
else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
&i);
@@ -832,204 +829,6 @@ mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
__mlx5_eswitch_del_rule(esw, rule, attr, true);
}
-static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
-{
- struct mlx5_eswitch_rep *rep;
- unsigned long i;
- int err = 0;
-
- esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
- mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
- if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
- continue;
-
- err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
- if (err)
- goto out;
- }
-
-out:
- return err;
-}
-
-static struct mlx5_eswitch_rep *
-esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
-{
- struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
-
- in_rep = attr->in_rep;
- out_rep = attr->dests[0].rep;
-
- if (push)
- vport = in_rep;
- else if (pop)
- vport = out_rep;
- else
- vport = in_rep;
-
- return vport;
-}
-
-static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
- bool push, bool pop, bool fwd)
-{
- struct mlx5_eswitch_rep *in_rep, *out_rep;
-
- if ((push || pop) && !fwd)
- goto out_notsupp;
-
- in_rep = attr->in_rep;
- out_rep = attr->dests[0].rep;
-
- if (push && in_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
- if (!push && !pop && fwd)
- if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
- goto out_notsupp;
-
- /* protects against (1) setting rules with different vlans to push and
- * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
- */
- if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
- goto out_notsupp;
-
- return 0;
-
-out_notsupp:
- return -EOPNOTSUPP;
-}
-
-int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr)
-{
- struct offloads_fdb *offloads = &esw->fdb_table.offloads;
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- struct mlx5_eswitch_rep *vport = NULL;
- bool push, pop, fwd;
- int err = 0;
-
- /* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- return 0;
-
- push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
- pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
- !attr->dest_chain);
-
- mutex_lock(&esw->state_lock);
-
- err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
- if (err)
- goto unlock;
-
- attr->flags &= ~MLX5_ATTR_FLAG_VLAN_HANDLED;
-
- vport = esw_vlan_action_get_vport(esw_attr, push, pop);
-
- if (!push && !pop && fwd) {
- /* tracks VF --> wire rules without vlan push action */
- if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
- vport->vlan_refcount++;
- attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
- }
-
- goto unlock;
- }
-
- if (!push && !pop)
- goto unlock;
-
- if (!(offloads->vlan_push_pop_refcount)) {
- /* it's the 1st vlan rule, apply global vlan pop policy */
- err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
- if (err)
- goto out;
- }
- offloads->vlan_push_pop_refcount++;
-
- if (push) {
- if (vport->vlan_refcount)
- goto skip_set_push;
-
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
- 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
- if (err)
- goto out;
- vport->vlan = esw_attr->vlan_vid[0];
-skip_set_push:
- vport->vlan_refcount++;
- }
-out:
- if (!err)
- attr->flags |= MLX5_ATTR_FLAG_VLAN_HANDLED;
-unlock:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
-int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_flow_attr *attr)
-{
- struct offloads_fdb *offloads = &esw->fdb_table.offloads;
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- struct mlx5_eswitch_rep *vport = NULL;
- bool push, pop, fwd;
- int err = 0;
-
- /* nop if we're on the vlan push/pop non emulation mode */
- if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
- return 0;
-
- if (!(attr->flags & MLX5_ATTR_FLAG_VLAN_HANDLED))
- return 0;
-
- push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
- pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
- fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
-
- mutex_lock(&esw->state_lock);
-
- vport = esw_vlan_action_get_vport(esw_attr, push, pop);
-
- if (!push && !pop && fwd) {
- /* tracks VF --> wire rules without vlan push action */
- if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
- vport->vlan_refcount--;
-
- goto out;
- }
-
- if (push) {
- vport->vlan_refcount--;
- if (vport->vlan_refcount)
- goto skip_unset_push;
-
- vport->vlan = 0;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
- 0, 0, SET_VLAN_STRIP);
- if (err)
- goto out;
- }
-
-skip_unset_push:
- offloads->vlan_push_pop_refcount--;
- if (offloads->vlan_push_pop_refcount)
- goto out;
-
- /* no more vlan rules, stop global vlan pop policy */
- err = esw_set_global_vlan_pop(esw, 0);
-
-out:
- mutex_unlock(&esw->state_lock);
- return err;
-}
-
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
struct mlx5_eswitch *from_esw,
@@ -2406,7 +2205,7 @@ static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
kfree(rep);
}
-void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
+static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
@@ -2416,7 +2215,7 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
xa_destroy(&esw->offloads.vport_reps);
}
-int esw_offloads_init_reps(struct mlx5_eswitch *esw)
+static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
unsigned long i;
@@ -2436,6 +2235,94 @@ err:
return err;
}
+static int esw_port_metadata_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ int err = 0;
+
+ down_write(&esw->mode_lock);
+ if (mlx5_esw_is_fdb_created(esw)) {
+ err = -EBUSY;
+ goto done;
+ }
+ if (!mlx5_esw_vport_match_metadata_supported(esw)) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+ if (ctx->val.vbool)
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+ else
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
+done:
+ up_write(&esw->mode_lock);
+ return err;
+}
+
+static int esw_port_metadata_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
+ return 0;
+}
+
+static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u8 esw_mode;
+
+ esw_mode = mlx5_eswitch_mode(dev);
+ if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "E-Switch must either disabled or non switchdev mode");
+ return -EBUSY;
+ }
+ return 0;
+}
+
+static const struct devlink_param esw_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
+ "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ esw_port_metadata_get,
+ esw_port_metadata_set,
+ esw_port_metadata_validate),
+};
+
+int esw_offloads_init(struct mlx5_eswitch *esw)
+{
+ int err;
+
+ err = esw_offloads_init_reps(esw);
+ if (err)
+ return err;
+
+ err = devl_params_register(priv_to_devlink(esw->dev),
+ esw_devlink_params,
+ ARRAY_SIZE(esw_devlink_params));
+ if (err)
+ goto err_params;
+
+ return 0;
+
+err_params:
+ esw_offloads_cleanup_reps(esw);
+ return err;
+}
+
+void esw_offloads_cleanup(struct mlx5_eswitch *esw)
+{
+ devl_params_unregister(priv_to_devlink(esw->dev),
+ esw_devlink_params,
+ ARRAY_SIZE(esw_devlink_params));
+ esw_offloads_cleanup_reps(esw);
+}
+
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
@@ -3575,9 +3462,9 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ down_read(&esw->mode_lock);
err = esw_mode_to_devlink(esw->mode, mode);
- up_write(&esw->mode_lock);
+ up_read(&esw->mode_lock);
return err;
}
@@ -3675,9 +3562,9 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ down_read(&esw->mode_lock);
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
- up_write(&esw->mode_lock);
+ up_read(&esw->mode_lock);
return err;
}
@@ -3749,9 +3636,9 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (IS_ERR(esw))
return PTR_ERR(esw);
- down_write(&esw->mode_lock);
+ down_read(&esw->mode_lock);
*encap = esw->offloads.encap;
- up_write(&esw->mode_lock);
+ up_read(&esw->mode_lock);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 9459e56ee90a..718cf09c28ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -424,6 +424,7 @@ int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_b
return blocking_notifier_chain_register(&events->sw_nh, nb);
}
+EXPORT_SYMBOL(mlx5_blocking_notifier_register);
int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
@@ -431,6 +432,7 @@ int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier
return blocking_notifier_chain_unregister(&events->sw_nh, nb);
}
+EXPORT_SYMBOL(mlx5_blocking_notifier_unregister);
int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
void *data)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 32d4c967469c..144e59480686 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -272,8 +272,6 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
unsigned int size;
int err;
- if (ft_attr->max_fte != POOL_NEXT_SIZE)
- size = roundup_pow_of_two(ft_attr->max_fte);
size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
if (!size)
return -ENOSPC;
@@ -412,11 +410,6 @@ static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
MLX5_SET(create_flow_group_in, in, table_id, ft->id);
- if (ft->vport) {
- MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
- MLX5_SET(create_flow_group_in, in, other_vport, 1);
- }
-
MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
MLX5_SET(create_flow_group_in, in, other_vport,
!!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
@@ -653,6 +646,12 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
id = dst->dest_attr.sampler_id;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_table_type, dst->dest_attr.ft->type);
+ id = dst->dest_attr.ft->id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ break;
default:
id = dst->dest_attr.tir_num;
ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5a85d8c1e797..731acbe22dc7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -34,12 +34,14 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/eswitch.h>
+#include <net/devlink.h>
#include "mlx5_core.h"
#include "fs_core.h"
#include "fs_cmd.h"
#include "fs_ft_pool.h"
#include "diag/fs_tracepoint.h"
+#include "devlink.h"
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
@@ -111,8 +113,10 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy */
-#define KERNEL_NIC_PRIO_NUM_LEVELS 8
+/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
+ * IPsec RoCE policy
+ */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 9
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
@@ -219,19 +223,30 @@ static struct init_tree_node egress_root_fs = {
};
enum {
+ RDMA_RX_IPSEC_PRIO,
RDMA_RX_COUNTERS_PRIO,
RDMA_RX_BYPASS_PRIO,
RDMA_RX_KERNEL_PRIO,
};
+#define RDMA_RX_IPSEC_NUM_PRIOS 1
+#define RDMA_RX_IPSEC_NUM_LEVELS 2
+#define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS)
+
#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
static struct init_tree_node rdma_rx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 3,
+ .ar_size = 4,
.children = (struct init_tree_node[]) {
+ [RDMA_RX_IPSEC_PRIO] =
+ ADD_PRIO(0, RDMA_RX_IPSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(RDMA_RX_IPSEC_NUM_PRIOS,
+ RDMA_RX_IPSEC_NUM_LEVELS))),
[RDMA_RX_COUNTERS_PRIO] =
ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
@@ -254,15 +269,20 @@ static struct init_tree_node rdma_rx_root_fs = {
enum {
RDMA_TX_COUNTERS_PRIO,
+ RDMA_TX_IPSEC_PRIO,
RDMA_TX_BYPASS_PRIO,
};
#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
+#define RDMA_TX_IPSEC_NUM_PRIOS 1
+#define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1
+#define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS)
+
static struct init_tree_node rdma_tx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 2,
+ .ar_size = 3,
.children = (struct init_tree_node[]) {
[RDMA_TX_COUNTERS_PRIO] =
ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
@@ -270,6 +290,13 @@ static struct init_tree_node rdma_tx_root_fs = {
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
+ [RDMA_TX_IPSEC_PRIO] =
+ ADD_PRIO(0, RDMA_TX_IPSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(RDMA_TX_IPSEC_NUM_PRIOS,
+ RDMA_TX_IPSEC_PRIO_NUM_LEVELS))),
+
[RDMA_TX_BYPASS_PRIO] =
ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS_RDMA_TX,
@@ -449,7 +476,8 @@ static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
type == MLX5_FLOW_DESTINATION_TYPE_TIR ||
- type == MLX5_FLOW_DESTINATION_TYPE_RANGE;
+ type == MLX5_FLOW_DESTINATION_TYPE_RANGE ||
+ type == MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
}
static bool check_valid_spec(const struct mlx5_flow_spec *spec)
@@ -1774,7 +1802,6 @@ static int build_match_list(struct match_list *match_head,
{
struct rhlist_head *tmp, *list;
struct mlx5_flow_group *g;
- int err = 0;
rcu_read_lock();
INIT_LIST_HEAD(&match_head->list);
@@ -1800,7 +1827,7 @@ static int build_match_list(struct match_list *match_head,
list_add_tail(&curr_match->list, &match_head->list);
}
rcu_read_unlock();
- return err;
+ return 0;
}
static u64 matched_fgs_get_version(struct list_head *match_head)
@@ -2367,6 +2394,14 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
root_ns = steering->rdma_tx_root_ns;
prio = RDMA_TX_COUNTERS_PRIO;
break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC:
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_IPSEC_PRIO;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC:
+ root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_IPSEC_PRIO;
+ break;
default: /* Must be NIC RX */
WARN_ON(!is_nic_rx_ns(type));
root_ns = steering->root_ns;
@@ -3143,6 +3178,78 @@ cleanup:
return err;
}
+static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ char *value = val.vstr;
+ int err = 0;
+
+ if (!strcmp(value, "dmfs")) {
+ return 0;
+ } else if (!strcmp(value, "smfs")) {
+ u8 eswitch_mode;
+ bool smfs_cap;
+
+ eswitch_mode = mlx5_eswitch_mode(dev);
+ smfs_cap = mlx5_fs_dr_is_supported(dev);
+
+ if (!smfs_cap) {
+ err = -EOPNOTSUPP;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Software managed steering is not supported by current device");
+ }
+
+ else if (eswitch_mode == MLX5_ESWITCH_OFFLOADS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Software managed steering is not supported when eswitch offloads enabled.");
+ err = -EOPNOTSUPP;
+ }
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Bad parameter: supported values are [\"dmfs\", \"smfs\"]");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ enum mlx5_flow_steering_mode mode;
+
+ if (!strcmp(ctx->val.vstr, "smfs"))
+ mode = MLX5_FLOW_STEERING_MODE_SMFS;
+ else
+ mode = MLX5_FLOW_STEERING_MODE_DMFS;
+ dev->priv.steering->mode = mode;
+
+ return 0;
+}
+
+static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
+ strcpy(ctx->val.vstr, "smfs");
+ else
+ strcpy(ctx->val.vstr, "dmfs");
+ return 0;
+}
+
+static const struct devlink_param mlx5_fs_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
+ "flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlx5_fs_mode_get, mlx5_fs_mode_set,
+ mlx5_fs_mode_validate),
+};
+
void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
@@ -3155,12 +3262,20 @@ void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->rdma_rx_root_ns);
cleanup_root_ns(steering->rdma_tx_root_ns);
cleanup_root_ns(steering->egress_root_ns);
+
+ devl_params_unregister(priv_to_devlink(dev), mlx5_fs_params,
+ ARRAY_SIZE(mlx5_fs_params));
}
int mlx5_fs_core_init(struct mlx5_core_dev *dev)
{
struct mlx5_flow_steering *steering = dev->priv.steering;
- int err = 0;
+ int err;
+
+ err = devl_params_register(priv_to_devlink(dev), mlx5_fs_params,
+ ARRAY_SIZE(mlx5_fs_params));
+ if (err)
+ return err;
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index b406e0367af6..17fe30a4c06c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -504,6 +504,16 @@ void mlx5_fc_query_cached(struct mlx5_fc *counter,
counter->lastpackets = c.packets;
}
+void mlx5_fc_query_cached_raw(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse)
+{
+ struct mlx5_fc_cache c = counter->cache;
+
+ *bytes = c.bytes;
+ *packets = c.packets;
+ *lastuse = c.lastuse;
+}
+
void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
struct delayed_work *dwork,
unsigned long delay)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index f34e758a2f1f..7bb7be01225a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -267,6 +267,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, crypto)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_CRYPTO);
+ if (err)
+ return err;
+ }
+
if (MLX5_CAP_GEN(dev, shampo)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_SHAMPO);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 1e46f9afa40e..4c2dad9d7cfb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+#include <devlink.h>
+
#include "fw_reset.h"
#include "diag/fw_tracer.h"
#include "lib/tout.h"
@@ -28,21 +30,32 @@ struct mlx5_fw_reset {
int ret;
};
-void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable)
+static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_fw_reset *fw_reset;
- if (enable)
+ fw_reset = dev->priv.fw_reset;
+
+ if (ctx->val.vbool)
clear_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
else
set_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
+ return 0;
}
-bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev)
+static int mlx5_fw_reset_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
- struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct mlx5_fw_reset *fw_reset;
+
+ fw_reset = dev->priv.fw_reset;
- return !test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
+ ctx->val.vbool = !test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
+ &fw_reset->reset_flags);
+ return 0;
}
static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
@@ -150,11 +163,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, false);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
- mlx5_load_one(dev, false);
+ mlx5_load_one(dev);
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
@@ -358,6 +371,7 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
mlx5_core_err(dev, "PCI link not ready (0x%04x) after %llu ms\n",
reg16, mlx5_tout_ms(dev, PCI_TOGGLE));
err = -ETIMEDOUT;
+ goto restore;
}
do {
@@ -484,7 +498,7 @@ int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
}
err = fw_reset->ret;
if (test_and_clear_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags)) {
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, false);
mlx5_load_one_devl_locked(dev, false);
}
out:
@@ -517,9 +531,16 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
cancel_work_sync(&fw_reset->reset_abort_work);
}
+static const struct devlink_param mlx5_fw_reset_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlx5_fw_reset_enable_remote_dev_reset_get,
+ mlx5_fw_reset_enable_remote_dev_reset_set, NULL),
+};
+
int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+ int err;
if (!fw_reset)
return -ENOMEM;
@@ -532,6 +553,15 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
fw_reset->dev = dev;
dev->priv.fw_reset = fw_reset;
+ err = devl_params_register(priv_to_devlink(dev),
+ mlx5_fw_reset_devlink_params,
+ ARRAY_SIZE(mlx5_fw_reset_devlink_params));
+ if (err) {
+ destroy_workqueue(fw_reset->wq);
+ kfree(fw_reset);
+ return err;
+ }
+
INIT_WORK(&fw_reset->fw_live_patch_work, mlx5_fw_live_patch_event);
INIT_WORK(&fw_reset->reset_request_work, mlx5_sync_reset_request_event);
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
@@ -546,6 +576,9 @@ void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ devl_params_unregister(priv_to_devlink(dev),
+ mlx5_fw_reset_devlink_params,
+ ARRAY_SIZE(mlx5_fw_reset_devlink_params));
destroy_workqueue(fw_reset->wq);
kfree(dev->priv.fw_reset);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
index dc141c7e641a..c57465595f7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -6,8 +6,6 @@
#include "mlx5_core.h"
-void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable);
-bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev);
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type);
int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 879555ba847d..f9438d4e43ca 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -62,7 +62,7 @@ enum {
};
enum {
- MLX5_DROP_NEW_HEALTH_WORK,
+ MLX5_DROP_HEALTH_WORK,
};
enum {
@@ -675,7 +675,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
devlink = priv_to_devlink(dev);
mutex_lock(&dev->intf_state_mutex);
- if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
+ if (test_bit(MLX5_DROP_HEALTH_WORK, &health->flags)) {
mlx5_core_err(dev, "health works are not permitted at this stage\n");
mutex_unlock(&dev->intf_state_mutex);
return;
@@ -699,7 +699,7 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
* requests from the kernel.
*/
mlx5_core_err(dev, "Driver is in error state. Unloading\n");
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, false);
}
}
@@ -771,14 +771,8 @@ static unsigned long get_next_poll_jiffies(struct mlx5_core_dev *dev)
void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long flags;
- spin_lock_irqsave(&health->wq_lock, flags);
- if (!test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags))
- queue_work(health->wq, &health->fatal_report_work);
- else
- mlx5_core_err(dev, "new health works are not permitted at this stage\n");
- spin_unlock_irqrestore(&health->wq_lock, flags);
+ queue_work(health->wq, &health->fatal_report_work);
}
#define MLX5_MSEC_PER_HOUR (MSEC_PER_SEC * 60 * 60)
@@ -858,7 +852,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
timer_setup(&health->timer, poll_health, 0);
health->fatal_error = MLX5_SENSOR_NO_ERR;
- clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
+ clear_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
@@ -869,13 +863,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
{
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long flags;
- if (disable_health) {
- spin_lock_irqsave(&health->wq_lock, flags);
- set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- spin_unlock_irqrestore(&health->wq_lock, flags);
- }
+ if (disable_health)
+ set_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
del_timer_sync(&health->timer);
}
@@ -891,11 +881,8 @@ void mlx5_start_health_fw_log_up(struct mlx5_core_dev *dev)
void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- unsigned long flags;
- spin_lock_irqsave(&health->wq_lock, flags);
- set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
- spin_unlock_irqrestore(&health->wq_lock, flags);
+ set_bit(MLX5_DROP_HEALTH_WORK, &health->flags);
cancel_delayed_work_sync(&health->update_fw_log_ts_work);
cancel_work_sync(&health->report_work);
cancel_work_sync(&health->fatal_report_work);
@@ -928,7 +915,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
kfree(name);
if (!health->wq)
goto out_err;
- spin_lock_init(&health->wq_lock);
INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index eff92dc0927c..779d92b762d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -172,6 +172,7 @@ enum mlx5_ptys_rate {
MLX5_PTYS_RATE_EDR = 1 << 5,
MLX5_PTYS_RATE_HDR = 1 << 6,
MLX5_PTYS_RATE_NDR = 1 << 7,
+ MLX5_PTYS_RATE_XDR = 1 << 8,
};
static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
@@ -185,20 +186,21 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate)
case MLX5_PTYS_RATE_EDR: return 25000;
case MLX5_PTYS_RATE_HDR: return 50000;
case MLX5_PTYS_RATE_NDR: return 100000;
+ case MLX5_PTYS_RATE_XDR: return 200000;
default: return -1;
}
}
-static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
+static u32 mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper)
{
int rate, width;
rate = mlx5_ptys_rate_enum_to_int(ib_proto_oper);
if (rate < 0)
- return -EINVAL;
+ return SPEED_UNKNOWN;
width = mlx5_ptys_width_enum_to_int(ib_link_width_oper);
if (width < 0)
- return -EINVAL;
+ return SPEED_UNKNOWN;
return rate * width;
}
@@ -221,16 +223,13 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
speed = mlx5i_get_speed_settings(ib_link_width_oper, ib_proto_oper);
- if (speed < 0)
- return -EINVAL;
+ link_ksettings->base.speed = speed;
+ link_ksettings->base.duplex = speed == SPEED_UNKNOWN ? DUPLEX_UNKNOWN : DUPLEX_FULL;
- link_ksettings->base.duplex = DUPLEX_FULL;
link_ksettings->base.port = PORT_OTHER;
link_ksettings->base.autoneg = AUTONEG_DISABLE;
- link_ksettings->base.speed = speed;
-
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 911cf4d23964..c2a4f86bc890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -412,7 +412,8 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
int err;
priv->fs = mlx5e_fs_init(priv->profile, mdev,
- !test_bit(MLX5E_STATE_DESTROYING, &priv->state));
+ !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
+ priv->dfs_root);
if (!priv->fs) {
netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
index b8feaf0f5c4c..f4b777d4e108 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c
@@ -22,7 +22,7 @@ static int type_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
char *mode = NULL;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
if (__mlx5_lag_is_active(ldev))
mode = get_str_mode_type(ldev);
@@ -41,7 +41,7 @@ static int port_sel_mode_show(struct seq_file *file, void *priv)
int ret = 0;
char *mode;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
if (__mlx5_lag_is_active(ldev))
mode = mlx5_get_str_port_sel_mode(ldev->mode, ldev->mode_flags);
@@ -61,7 +61,7 @@ static int state_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
bool active;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
active = __mlx5_lag_is_active(ldev);
mutex_unlock(&ldev->lock);
@@ -77,7 +77,7 @@ static int flags_show(struct seq_file *file, void *priv)
bool shared_fdb;
bool lag_active;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (!lag_active)
@@ -108,7 +108,7 @@ static int mapping_show(struct seq_file *file, void *priv)
int num_ports;
int i;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
lag_active = __mlx5_lag_is_active(ldev);
if (lag_active) {
@@ -142,7 +142,7 @@ static int members_show(struct seq_file *file, void *priv)
struct mlx5_lag *ldev;
int i;
- ldev = dev->priv.lag;
+ ldev = mlx5_lag_dev(dev);
mutex_lock(&ldev->lock);
for (i = 0; i < ldev->ports; i++) {
if (!ldev->pf[i].dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index ad32b80e8501..5d331b940f4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -230,7 +230,6 @@ static void mlx5_ldev_free(struct kref *ref)
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
destroy_workqueue(ldev->wq);
- mlx5_lag_mpesw_cleanup(ldev);
mutex_destroy(&ldev->lock);
kfree(ldev);
}
@@ -276,7 +275,6 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(struct mlx5_core_dev *dev)
mlx5_core_err(dev, "Failed to init multipath lag err=%d\n",
err);
- mlx5_lag_mpesw_init(ldev);
ldev->ports = MLX5_CAP_GEN(dev, num_lag_ports);
ldev->buckets = 1;
@@ -646,7 +644,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
return 0;
}
-static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
+int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
@@ -688,7 +686,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
}
#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 2
-static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5_core_dev *dev;
@@ -723,7 +721,7 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
return true;
}
-static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
+void mlx5_lag_add_devices(struct mlx5_lag *ldev)
{
int i;
@@ -740,7 +738,7 @@ static void mlx5_lag_add_devices(struct mlx5_lag *ldev)
}
}
-static void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
+void mlx5_lag_remove_devices(struct mlx5_lag *ldev)
{
int i;
@@ -1187,7 +1185,7 @@ static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev)
tmp_dev = mlx5_get_next_phys_dev_lag(dev);
if (tmp_dev)
- ldev = tmp_dev->priv.lag;
+ ldev = mlx5_lag_dev(tmp_dev);
if (!ldev) {
ldev = mlx5_lag_dev_alloc(dev);
@@ -1386,8 +1384,7 @@ bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev)
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_sriov(ldev) &&
- test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
+ res = ldev && test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
spin_unlock_irqrestore(&lag_lock, flags);
return res;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index f30ac2de639f..bc1f1dd3e283 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -50,19 +50,6 @@ struct lag_tracker {
enum netdev_lag_hash hash_type;
};
-enum mpesw_op {
- MLX5_MPESW_OP_ENABLE,
- MLX5_MPESW_OP_DISABLE,
-};
-
-struct mlx5_mpesw_work_st {
- struct work_struct work;
- struct mlx5_lag *lag;
- enum mpesw_op op;
- struct completion comp;
- int result;
-};
-
/* LAG data of a ConnectX card.
* It serves both its phys functions.
*/
@@ -115,6 +102,7 @@ mlx5_lag_is_ready(struct mlx5_lag *ldev)
return test_bit(MLX5_LAG_FLAG_NDEVS_READY, &ldev->state_flags);
}
+bool mlx5_lag_check_prereq(struct mlx5_lag *ldev);
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev,
@@ -124,8 +112,6 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev);
-void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev);
-int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev);
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
@@ -134,5 +120,8 @@ void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
void mlx5_ldev_add_debugfs(struct mlx5_core_dev *dev);
void mlx5_ldev_remove_debugfs(struct dentry *dbg);
void mlx5_disable_lag(struct mlx5_lag *ldev);
+void mlx5_lag_remove_devices(struct mlx5_lag *ldev);
+int mlx5_deactivate_lag(struct mlx5_lag *ldev);
+void mlx5_lag_add_devices(struct mlx5_lag *ldev);
#endif /* __MLX5_LAG_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index d9fcb9ed726f..d85a8dfc153d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -28,13 +28,9 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
{
- struct mlx5_lag *ldev;
- bool res;
-
- ldev = mlx5_lag_dev(dev);
- res = ldev && __mlx5_lag_is_multipath(ldev);
+ struct mlx5_lag *ldev = mlx5_lag_dev(dev);
- return res;
+ return ldev && __mlx5_lag_is_multipath(ldev);
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index c17e8f1ec914..0c0ef600f643 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -5,39 +5,121 @@
#include <net/nexthop.h>
#include "lag/lag.h"
#include "eswitch.h"
+#include "esw/acl/ofld.h"
#include "lib/mlx5.h"
-static int add_mpesw_rule(struct mlx5_lag *ldev)
+static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
- int err;
+ struct mlx5_core_dev *dev;
+ struct mlx5_eswitch *esw;
+ u32 pf_metadata;
+ int i;
+
+ for (i = 0; i < ldev->ports; i++) {
+ dev = ldev->pf[i].dev;
+ esw = dev->priv.eswitch;
+ pf_metadata = ldev->lag_mpesw.pf_metadata[i];
+ if (!pf_metadata)
+ continue;
+ mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK, 0);
+ mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
+ (void *)0);
+ mlx5_esw_match_metadata_free(esw, pf_metadata);
+ ldev->lag_mpesw.pf_metadata[i] = 0;
+ }
+}
- if (atomic_add_return(1, &ldev->lag_mpesw.mpesw_rule_count) != 1)
- return 0;
+static int mlx5_mpesw_metadata_set(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev;
+ struct mlx5_eswitch *esw;
+ u32 pf_metadata;
+ int i, err;
+
+ for (i = 0; i < ldev->ports; i++) {
+ dev = ldev->pf[i].dev;
+ esw = dev->priv.eswitch;
+ pf_metadata = mlx5_esw_match_metadata_alloc(esw);
+ if (!pf_metadata) {
+ err = -ENOSPC;
+ goto err_metadata;
+ }
+
+ ldev->lag_mpesw.pf_metadata[i] = pf_metadata;
+ err = mlx5_esw_acl_ingress_vport_metadata_update(esw, MLX5_VPORT_UPLINK,
+ pf_metadata);
+ if (err)
+ goto err_metadata;
+ }
- if (ldev->mode != MLX5_LAG_MODE_NONE) {
- err = -EINVAL;
- goto out_err;
+ for (i = 0; i < ldev->ports; i++) {
+ dev = ldev->pf[i].dev;
+ mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_MULTIPORT_ESW,
+ (void *)0);
}
- err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);
+ return 0;
+
+err_metadata:
+ mlx5_mpesw_metadata_cleanup(ldev);
+ return err;
+}
+
+static int enable_mpesw(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ int err;
+
+ if (ldev->mode != MLX5_LAG_MODE_NONE)
+ return -EINVAL;
+
+ if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
+ !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
+ !MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
+ !mlx5_lag_check_prereq(ldev))
+ return -EOPNOTSUPP;
+
+ err = mlx5_mpesw_metadata_set(ldev);
+ if (err)
+ return err;
+
+ mlx5_lag_remove_devices(ldev);
+
+ err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, true);
if (err) {
- mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err);
- goto out_err;
+ mlx5_core_warn(dev0, "Failed to create LAG in MPESW mode (%d)\n", err);
+ goto err_add_devices;
}
+ dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
+ err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
+ if (!err)
+ err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ if (err)
+ goto err_rescan_drivers;
+
return 0;
-out_err:
- atomic_dec(&ldev->lag_mpesw.mpesw_rule_count);
+err_rescan_drivers:
+ dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+ mlx5_rescan_drivers_locked(dev0);
+ mlx5_deactivate_lag(ldev);
+err_add_devices:
+ mlx5_lag_add_devices(ldev);
+ mlx5_eswitch_reload_reps(dev0->priv.eswitch);
+ mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
-static void del_mpesw_rule(struct mlx5_lag *ldev)
+static void disable_mpesw(struct mlx5_lag *ldev)
{
- if (!atomic_dec_return(&ldev->lag_mpesw.mpesw_rule_count) &&
- ldev->mode == MLX5_LAG_MODE_MPESW)
+ if (ldev->mode == MLX5_LAG_MODE_MPESW) {
+ mlx5_mpesw_metadata_cleanup(ldev);
mlx5_disable_lag(ldev);
+ }
}
static void mlx5_mpesw_work(struct work_struct *work)
@@ -45,20 +127,27 @@ static void mlx5_mpesw_work(struct work_struct *work)
struct mlx5_mpesw_work_st *mpesww = container_of(work, struct mlx5_mpesw_work_st, work);
struct mlx5_lag *ldev = mpesww->lag;
+ mlx5_dev_list_lock();
mutex_lock(&ldev->lock);
+ if (ldev->mode_changes_in_progress) {
+ mpesww->result = -EAGAIN;
+ goto unlock;
+ }
+
if (mpesww->op == MLX5_MPESW_OP_ENABLE)
- mpesww->result = add_mpesw_rule(ldev);
+ mpesww->result = enable_mpesw(ldev);
else if (mpesww->op == MLX5_MPESW_OP_DISABLE)
- del_mpesw_rule(ldev);
+ disable_mpesw(ldev);
+unlock:
mutex_unlock(&ldev->lock);
-
+ mlx5_dev_list_unlock();
complete(&mpesww->comp);
}
static int mlx5_lag_mpesw_queue_work(struct mlx5_core_dev *dev,
enum mpesw_op op)
{
- struct mlx5_lag *ldev = dev->priv.lag;
+ struct mlx5_lag *ldev = mlx5_lag_dev(dev);
struct mlx5_mpesw_work_st *work;
int err = 0;
@@ -86,43 +175,36 @@ out:
return err;
}
-void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)
+void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev)
{
mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_DISABLE);
}
-int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)
+int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev)
{
return mlx5_lag_mpesw_queue_work(dev, MLX5_MPESW_OP_ENABLE);
}
-int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev)
+int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack)
{
- struct mlx5_lag *ldev = mdev->priv.lag;
+ struct mlx5_lag *ldev = mlx5_lag_dev(mdev);
if (!netif_is_bond_master(out_dev) || !ldev)
return 0;
- if (ldev->mode == MLX5_LAG_MODE_MPESW)
- return -EOPNOTSUPP;
-
- return 0;
-}
-
-bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev)
-{
- bool ret;
+ if (ldev->mode != MLX5_LAG_MODE_MPESW)
+ return 0;
- ret = dev->priv.lag && dev->priv.lag->mode == MLX5_LAG_MODE_MPESW;
- return ret;
+ NL_SET_ERR_MSG_MOD(extack, "can't forward to bond in mpesw mode");
+ return -EOPNOTSUPP;
}
-void mlx5_lag_mpesw_init(struct mlx5_lag *ldev)
+bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev)
{
- atomic_set(&ldev->lag_mpesw.mpesw_rule_count, 0);
-}
+ struct mlx5_lag *ldev = mlx5_lag_dev(dev);
-void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev)
-{
- WARN_ON(atomic_read(&ldev->lag_mpesw.mpesw_rule_count));
+ return ldev && ldev->mode == MLX5_LAG_MODE_MPESW;
}
+EXPORT_SYMBOL(mlx5_lag_is_mpesw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
index 88e8daffcf92..02520f27a033 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.h
@@ -9,17 +9,27 @@
struct lag_mpesw {
struct work_struct mpesw_work;
- atomic_t mpesw_rule_count;
+ u32 pf_metadata[MLX5_MAX_PORTS];
};
-int mlx5_lag_do_mirred(struct mlx5_core_dev *mdev, struct net_device *out_dev);
-bool mlx5_lag_mpesw_is_activated(struct mlx5_core_dev *dev);
-#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
-void mlx5_lag_mpesw_init(struct mlx5_lag *ldev);
-void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev);
-#else
-static inline void mlx5_lag_mpesw_init(struct mlx5_lag *ldev) {}
-static inline void mlx5_lag_mpesw_cleanup(struct mlx5_lag *ldev) {}
-#endif
+enum mpesw_op {
+ MLX5_MPESW_OP_ENABLE,
+ MLX5_MPESW_OP_DISABLE,
+};
+
+struct mlx5_mpesw_work_st {
+ struct work_struct work;
+ struct mlx5_lag *lag;
+ enum mpesw_op op;
+ struct completion comp;
+ int result;
+};
+
+int mlx5_lag_mpesw_do_mirred(struct mlx5_core_dev *mdev,
+ struct net_device *out_dev,
+ struct netlink_ext_ack *extack);
+bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev);
+void mlx5_lag_mpesw_disable(struct mlx5_core_dev *dev);
+int mlx5_lag_mpesw_enable(struct mlx5_core_dev *dev);
#endif /* __MLX5_LAG_MPESW_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 69318b143268..4c9a40211059 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -69,6 +69,13 @@ enum {
MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa),
};
+enum {
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN = S16_MIN,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX = S16_MAX,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000,
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
+};
+
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
{
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
@@ -86,6 +93,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
+static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
+{
+ s64 min = MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN;
+ s64 max = MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+
+ if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range)) {
+ min = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN;
+ max = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX;
+ }
+
+ if (delta < min || delta > max)
+ return false;
+
+ return true;
+}
+
static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
{
u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
@@ -288,8 +311,8 @@ static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
if (!mlx5_modify_mtutc_allowed(mdev))
return 0;
- /* HW time adjustment range is s16. If out of range, settime instead */
- if (delta < S16_MIN || delta > S16_MAX) {
+ /* HW time adjustment range is checked. If out of range, settime instead */
+ if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
struct timespec64 ts;
s64 ns;
@@ -326,7 +349,20 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
+static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
+
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
+ if (!mlx5_is_mtutc_time_adj_cap(mdev, delta))
+ return -ERANGE;
+
+ return mlx5_ptp_adjtime(ptp, delta);
+}
+
+static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)
{
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
@@ -334,7 +370,15 @@ static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
return 0;
MLX5_SET(mtutc_reg, in, operation, MLX5_MTUTC_OPERATION_ADJUST_FREQ_UTC);
- MLX5_SET(mtutc_reg, in, freq_adjustment, freq);
+
+ if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_freq_adj_units)) {
+ MLX5_SET(mtutc_reg, in, freq_adj_units,
+ MLX5_MTUTC_FREQ_ADJ_UNITS_SCALED_PPM);
+ MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm);
+ } else {
+ MLX5_SET(mtutc_reg, in, freq_adj_units, MLX5_MTUTC_FREQ_ADJ_UNITS_PPB);
+ MLX5_SET(mtutc_reg, in, freq_adjustment, scaled_ppm_to_ppb(scaled_ppm));
+ }
return mlx5_set_mtutc(mdev, in, sizeof(in));
}
@@ -349,7 +393,8 @@ static int mlx5_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
int err;
mdev = container_of(clock, struct mlx5_core_dev, clock);
- err = mlx5_ptp_adjfreq_real_time(mdev, scaled_ppm_to_ppb(scaled_ppm));
+
+ err = mlx5_ptp_freq_adj_real_time(mdev, scaled_ppm);
if (err)
return err;
@@ -688,6 +733,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.n_pins = 0,
.pps = 0,
.adjfine = mlx5_ptp_adjfine,
+ .adjphase = mlx5_ptp_adjphase,
.adjtime = mlx5_ptp_adjtime,
.gettimex64 = mlx5_ptp_gettimex,
.settime64 = mlx5_ptp_settime,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
index e995f8378df7..3a94b8f8031e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c
@@ -2,53 +2,253 @@
// Copyright (c) 2019 Mellanox Technologies.
#include "mlx5_core.h"
-#include "lib/mlx5.h"
+#include "lib/crypto.h"
-int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
- void *key, u32 sz_bytes,
- u32 key_type, u32 *p_key_id)
-{
- u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
- u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
- u32 sz_bits = sz_bytes * BITS_PER_BYTE;
- u8 general_obj_key_size;
- u64 general_obj_types;
- void *obj, *key_p;
- int err;
+#define MLX5_CRYPTO_DEK_POOLS_NUM (MLX5_ACCEL_OBJ_TYPE_KEY_NUM - 1)
+#define type2idx(type) ((type) - 1)
- obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
- key_p = MLX5_ADDR_OF(encryption_key_obj, obj, key);
+#define MLX5_CRYPTO_DEK_POOL_SYNC_THRESH 128
- general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
- if (!(general_obj_types &
- MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY))
- return -EINVAL;
+/* calculate the num of DEKs, which are freed by any user
+ * (for example, TLS) after last revalidation in a pool or a bulk.
+ */
+#define MLX5_CRYPTO_DEK_CALC_FREED(a) \
+ ({ typeof(a) _a = (a); \
+ _a->num_deks - _a->avail_deks - _a->in_use_deks; })
+
+#define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool)
+#define MLX5_CRYPTO_DEK_BULK_CALC_FREED(bulk) MLX5_CRYPTO_DEK_CALC_FREED(bulk)
+
+#define MLX5_CRYPTO_DEK_BULK_IDLE(bulk) \
+ ({ typeof(bulk) _bulk = (bulk); \
+ _bulk->avail_deks == _bulk->num_deks; })
+
+enum {
+ MLX5_CRYPTO_DEK_ALL_TYPE = BIT(0),
+};
+
+struct mlx5_crypto_dek_pool {
+ struct mlx5_core_dev *mdev;
+ u32 key_purpose;
+ int num_deks; /* the total number of keys in this pool */
+ int avail_deks; /* the number of available keys in this pool */
+ int in_use_deks; /* the number of being used keys in this pool */
+ struct mutex lock; /* protect the following lists, and the bulks */
+ struct list_head partial_list; /* some of keys are available */
+ struct list_head full_list; /* no available keys */
+ struct list_head avail_list; /* all keys are available to use */
+
+ /* No in-used keys, and all need to be synced.
+ * These bulks will be put to avail list after sync.
+ */
+ struct list_head sync_list;
+
+ bool syncing;
+ struct list_head wait_for_free;
+ struct work_struct sync_work;
+
+ spinlock_t destroy_lock; /* protect destroy_list */
+ struct list_head destroy_list;
+ struct work_struct destroy_work;
+};
+
+struct mlx5_crypto_dek_bulk {
+ struct mlx5_core_dev *mdev;
+ int base_obj_id;
+ int avail_start; /* the bit to start search */
+ int num_deks; /* the total number of keys in a bulk */
+ int avail_deks; /* the number of keys available, with need_sync bit 0 */
+ int in_use_deks; /* the number of keys being used, with in_use bit 1 */
+ struct list_head entry;
+
+ /* 0: not being used by any user, 1: otherwise */
+ unsigned long *in_use;
+
+ /* The bits are set when they are used, and reset after crypto_sync
+ * is executed. So, the value 0 means the key is newly created, or not
+ * used after sync, and 1 means it is in use, or freed but not synced
+ */
+ unsigned long *need_sync;
+};
+
+struct mlx5_crypto_dek_priv {
+ struct mlx5_core_dev *mdev;
+ int log_dek_obj_range;
+};
+
+struct mlx5_crypto_dek {
+ struct mlx5_crypto_dek_bulk *bulk;
+ struct list_head entry;
+ u32 obj_id;
+};
+
+u32 mlx5_crypto_dek_get_id(struct mlx5_crypto_dek *dek)
+{
+ return dek->obj_id;
+}
+
+static int mlx5_crypto_dek_get_key_sz(struct mlx5_core_dev *mdev,
+ u32 sz_bytes, u8 *key_sz_p)
+{
+ u32 sz_bits = sz_bytes * BITS_PER_BYTE;
switch (sz_bits) {
case 128:
- general_obj_key_size =
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
- key_p += sz_bytes;
+ *key_sz_p = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
break;
case 256:
- general_obj_key_size =
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256;
+ *key_sz_p = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256;
break;
default:
+ mlx5_core_err(mdev, "Crypto offload error, invalid key size (%u bits)\n",
+ sz_bits);
return -EINVAL;
}
- memcpy(key_p, key, sz_bytes);
+ return 0;
+}
+
+static int mlx5_crypto_dek_fill_key(struct mlx5_core_dev *mdev, u8 *key_obj,
+ const void *key, u32 sz_bytes)
+{
+ void *dst;
+ u8 key_sz;
+ int err;
+
+ err = mlx5_crypto_dek_get_key_sz(mdev, sz_bytes, &key_sz);
+ if (err)
+ return err;
+
+ MLX5_SET(encryption_key_obj, key_obj, key_size, key_sz);
+
+ if (sz_bytes == 16)
+ /* For key size of 128b the MSBs are reserved. */
+ dst = MLX5_ADDR_OF(encryption_key_obj, key_obj, key[1]);
+ else
+ dst = MLX5_ADDR_OF(encryption_key_obj, key_obj, key);
+
+ memcpy(dst, key, sz_bytes);
+
+ return 0;
+}
+
+static int mlx5_crypto_cmd_sync_crypto(struct mlx5_core_dev *mdev,
+ int crypto_type)
+{
+ u32 in[MLX5_ST_SZ_DW(sync_crypto_in)] = {};
+ int err;
+
+ mlx5_core_dbg(mdev,
+ "Execute SYNC_CRYPTO command with crypto_type(0x%x)\n",
+ crypto_type);
+
+ MLX5_SET(sync_crypto_in, in, opcode, MLX5_CMD_OP_SYNC_CRYPTO);
+ MLX5_SET(sync_crypto_in, in, crypto_type, crypto_type);
+
+ err = mlx5_cmd_exec_in(mdev, sync_crypto, in);
+ if (err)
+ mlx5_core_err(mdev,
+ "Failed to exec sync crypto, type=%d, err=%d\n",
+ crypto_type, err);
+
+ return err;
+}
+
+static int mlx5_crypto_create_dek_bulk(struct mlx5_core_dev *mdev,
+ u32 key_purpose, int log_obj_range,
+ u32 *obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *obj, *param;
+ int err;
- MLX5_SET(encryption_key_obj, obj, key_size, general_obj_key_size);
- MLX5_SET(encryption_key_obj, obj, key_type, key_type);
MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+ param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
+ MLX5_SET(general_obj_create_param, param, log_obj_range, log_obj_range);
+
+ obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
+ MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ mlx5_core_dbg(mdev, "DEK objects created, bulk=%d, obj_id=%d\n",
+ 1 << log_obj_range, *obj_id);
+
+ return 0;
+}
+
+static int mlx5_crypto_modify_dek_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes, u32 key_purpose,
+ u32 obj_id, u32 obj_offset)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ void *obj, *param;
+ int err;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+ param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
+ MLX5_SET(general_obj_query_param, param, obj_offset, obj_offset);
+
+ obj = MLX5_ADDR_OF(modify_encryption_key_in, in, encryption_key_object);
+ MLX5_SET64(encryption_key_obj, obj, modify_field_select, 1);
+ MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
+ MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
+
+ err = mlx5_crypto_dek_fill_key(mdev, obj, key, sz_bytes);
+ if (err)
+ return err;
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+
+ /* avoid leaking key on the stack */
+ memzero_explicit(in, sizeof(in));
+
+ return err;
+}
+
+static int mlx5_crypto_create_dek_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes,
+ u32 key_purpose, u32 *p_key_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u64 general_obj_types;
+ void *obj;
+ int err;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY))
+ return -EINVAL;
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+
+ obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
+ MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
+ MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
+
+ err = mlx5_crypto_dek_fill_key(mdev, obj, key, sz_bytes);
+ if (err)
+ return err;
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
if (!err)
*p_key_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
@@ -58,7 +258,7 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
return err;
}
-void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
+static void mlx5_crypto_destroy_dek_key(struct mlx5_core_dev *mdev, u32 key_id)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
@@ -71,3 +271,504 @@ void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
+
+int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes,
+ u32 key_type, u32 *p_key_id)
+{
+ return mlx5_crypto_create_dek_key(mdev, key, sz_bytes, key_type, p_key_id);
+}
+
+void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
+{
+ mlx5_crypto_destroy_dek_key(mdev, key_id);
+}
+
+static struct mlx5_crypto_dek_bulk *
+mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv;
+ struct mlx5_core_dev *mdev = pool->mdev;
+ struct mlx5_crypto_dek_bulk *bulk;
+ int num_deks, base_obj_id;
+ int err;
+
+ bulk = kzalloc(sizeof(*bulk), GFP_KERNEL);
+ if (!bulk)
+ return ERR_PTR(-ENOMEM);
+
+ num_deks = 1 << dek_priv->log_dek_obj_range;
+ bulk->need_sync = bitmap_zalloc(num_deks, GFP_KERNEL);
+ if (!bulk->need_sync) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ bulk->in_use = bitmap_zalloc(num_deks, GFP_KERNEL);
+ if (!bulk->in_use) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose,
+ dek_priv->log_dek_obj_range,
+ &base_obj_id);
+ if (err)
+ goto err_out;
+
+ bulk->base_obj_id = base_obj_id;
+ bulk->num_deks = num_deks;
+ bulk->avail_deks = num_deks;
+ bulk->mdev = mdev;
+
+ return bulk;
+
+err_out:
+ bitmap_free(bulk->in_use);
+ bitmap_free(bulk->need_sync);
+ kfree(bulk);
+ return ERR_PTR(err);
+}
+
+static struct mlx5_crypto_dek_bulk *
+mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_bulk *bulk;
+
+ bulk = mlx5_crypto_dek_bulk_create(pool);
+ if (IS_ERR(bulk))
+ return bulk;
+
+ pool->avail_deks += bulk->num_deks;
+ pool->num_deks += bulk->num_deks;
+ list_add(&bulk->entry, &pool->partial_list);
+
+ return bulk;
+}
+
+static void mlx5_crypto_dek_bulk_free(struct mlx5_crypto_dek_bulk *bulk)
+{
+ mlx5_crypto_destroy_dek_key(bulk->mdev, bulk->base_obj_id);
+ bitmap_free(bulk->need_sync);
+ bitmap_free(bulk->in_use);
+ kfree(bulk);
+}
+
+static void mlx5_crypto_dek_pool_remove_bulk(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek_bulk *bulk,
+ bool delay)
+{
+ pool->num_deks -= bulk->num_deks;
+ pool->avail_deks -= bulk->avail_deks;
+ pool->in_use_deks -= bulk->in_use_deks;
+ list_del(&bulk->entry);
+ if (!delay)
+ mlx5_crypto_dek_bulk_free(bulk);
+}
+
+static struct mlx5_crypto_dek_bulk *
+mlx5_crypto_dek_pool_pop(struct mlx5_crypto_dek_pool *pool, u32 *obj_offset)
+{
+ struct mlx5_crypto_dek_bulk *bulk;
+ int pos;
+
+ mutex_lock(&pool->lock);
+ bulk = list_first_entry_or_null(&pool->partial_list,
+ struct mlx5_crypto_dek_bulk, entry);
+
+ if (bulk) {
+ pos = find_next_zero_bit(bulk->need_sync, bulk->num_deks,
+ bulk->avail_start);
+ if (pos == bulk->num_deks) {
+ mlx5_core_err(pool->mdev, "Wrong DEK bulk avail_start.\n");
+ pos = find_first_zero_bit(bulk->need_sync, bulk->num_deks);
+ }
+ WARN_ON(pos == bulk->num_deks);
+ } else {
+ bulk = list_first_entry_or_null(&pool->avail_list,
+ struct mlx5_crypto_dek_bulk,
+ entry);
+ if (bulk) {
+ list_move(&bulk->entry, &pool->partial_list);
+ } else {
+ bulk = mlx5_crypto_dek_pool_add_bulk(pool);
+ if (IS_ERR(bulk))
+ goto out;
+ }
+ pos = 0;
+ }
+
+ *obj_offset = pos;
+ bitmap_set(bulk->need_sync, pos, 1);
+ bitmap_set(bulk->in_use, pos, 1);
+ bulk->in_use_deks++;
+ bulk->avail_deks--;
+ if (!bulk->avail_deks) {
+ list_move(&bulk->entry, &pool->full_list);
+ bulk->avail_start = bulk->num_deks;
+ } else {
+ bulk->avail_start = pos + 1;
+ }
+ pool->avail_deks--;
+ pool->in_use_deks++;
+
+out:
+ mutex_unlock(&pool->lock);
+ return bulk;
+}
+
+static bool mlx5_crypto_dek_need_sync(struct mlx5_crypto_dek_pool *pool)
+{
+ return !pool->syncing &&
+ MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) > MLX5_CRYPTO_DEK_POOL_SYNC_THRESH;
+}
+
+static int mlx5_crypto_dek_free_locked(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek *dek)
+{
+ struct mlx5_crypto_dek_bulk *bulk = dek->bulk;
+ int obj_offset;
+ bool old_val;
+ int err = 0;
+
+ obj_offset = dek->obj_id - bulk->base_obj_id;
+ old_val = test_and_clear_bit(obj_offset, bulk->in_use);
+ WARN_ON_ONCE(!old_val);
+ if (!old_val) {
+ err = -ENOENT;
+ goto out_free;
+ }
+ pool->in_use_deks--;
+ bulk->in_use_deks--;
+ if (!bulk->avail_deks && !bulk->in_use_deks)
+ list_move(&bulk->entry, &pool->sync_list);
+
+ if (mlx5_crypto_dek_need_sync(pool) && schedule_work(&pool->sync_work))
+ pool->syncing = true;
+
+out_free:
+ kfree(dek);
+ return err;
+}
+
+static int mlx5_crypto_dek_pool_push(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek *dek)
+{
+ int err = 0;
+
+ mutex_lock(&pool->lock);
+ if (pool->syncing)
+ list_add(&dek->entry, &pool->wait_for_free);
+ else
+ err = mlx5_crypto_dek_free_locked(pool, dek);
+ mutex_unlock(&pool->lock);
+
+ return err;
+}
+
+/* Update the bits for a bulk while sync, and avail_next for search.
+ * As the combinations of (need_sync, in_use) of one DEK are
+ * - (0,0) means the key is ready for use,
+ * - (1,1) means the key is currently being used by a user,
+ * - (1,0) means the key is freed, and waiting for being synced,
+ * - (0,1) is invalid state.
+ * the number of revalidated DEKs can be calculated by
+ * hweight_long(need_sync XOR in_use), and the need_sync bits can be reset
+ * by simply copying from in_use bits.
+ */
+static void mlx5_crypto_dek_bulk_reset_synced(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek_bulk *bulk)
+{
+ unsigned long *need_sync = bulk->need_sync;
+ unsigned long *in_use = bulk->in_use;
+ int i, freed, reused, avail_next;
+ bool first = true;
+
+ freed = MLX5_CRYPTO_DEK_BULK_CALC_FREED(bulk);
+
+ for (i = 0; freed && i < BITS_TO_LONGS(bulk->num_deks);
+ i++, need_sync++, in_use++) {
+ reused = hweight_long((*need_sync) ^ (*in_use));
+ if (!reused)
+ continue;
+
+ bulk->avail_deks += reused;
+ pool->avail_deks += reused;
+ *need_sync = *in_use;
+ if (first) {
+ avail_next = i * BITS_PER_TYPE(long);
+ if (bulk->avail_start > avail_next)
+ bulk->avail_start = avail_next;
+ first = false;
+ }
+
+ freed -= reused;
+ }
+}
+
+/* Return true if the bulk is reused, false if destroyed with delay */
+static bool mlx5_crypto_dek_bulk_handle_avail(struct mlx5_crypto_dek_pool *pool,
+ struct mlx5_crypto_dek_bulk *bulk,
+ struct list_head *destroy_list)
+{
+ if (list_empty(&pool->avail_list)) {
+ list_move(&bulk->entry, &pool->avail_list);
+ return true;
+ }
+
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, true);
+ list_add(&bulk->entry, destroy_list);
+ return false;
+}
+
+static void mlx5_crypto_dek_pool_splice_destroy_list(struct mlx5_crypto_dek_pool *pool,
+ struct list_head *list,
+ struct list_head *head)
+{
+ spin_lock(&pool->destroy_lock);
+ list_splice_init(list, head);
+ spin_unlock(&pool->destroy_lock);
+}
+
+static void mlx5_crypto_dek_pool_free_wait_keys(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek *dek, *next;
+
+ list_for_each_entry_safe(dek, next, &pool->wait_for_free, entry) {
+ list_del(&dek->entry);
+ mlx5_crypto_dek_free_locked(pool, dek);
+ }
+}
+
+/* For all the bulks in each list, reset the bits while sync.
+ * Move them to different lists according to the number of available DEKs.
+ * Destrory all the idle bulks, except one for quick service.
+ * And free DEKs in the waiting list at the end of this func.
+ */
+static void mlx5_crypto_dek_pool_reset_synced(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_bulk *bulk, *tmp;
+ LIST_HEAD(destroy_list);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->partial_list, entry) {
+ mlx5_crypto_dek_bulk_reset_synced(pool, bulk);
+ if (MLX5_CRYPTO_DEK_BULK_IDLE(bulk))
+ mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list);
+ }
+
+ list_for_each_entry_safe(bulk, tmp, &pool->full_list, entry) {
+ mlx5_crypto_dek_bulk_reset_synced(pool, bulk);
+
+ if (!bulk->avail_deks)
+ continue;
+
+ if (MLX5_CRYPTO_DEK_BULK_IDLE(bulk))
+ mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list);
+ else
+ list_move(&bulk->entry, &pool->partial_list);
+ }
+
+ list_for_each_entry_safe(bulk, tmp, &pool->sync_list, entry) {
+ bulk->avail_deks = bulk->num_deks;
+ pool->avail_deks += bulk->num_deks;
+ if (mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list)) {
+ memset(bulk->need_sync, 0, BITS_TO_BYTES(bulk->num_deks));
+ bulk->avail_start = 0;
+ }
+ }
+
+ mlx5_crypto_dek_pool_free_wait_keys(pool);
+
+ if (!list_empty(&destroy_list)) {
+ mlx5_crypto_dek_pool_splice_destroy_list(pool, &destroy_list,
+ &pool->destroy_list);
+ schedule_work(&pool->destroy_work);
+ }
+}
+
+static void mlx5_crypto_dek_sync_work_fn(struct work_struct *work)
+{
+ struct mlx5_crypto_dek_pool *pool =
+ container_of(work, struct mlx5_crypto_dek_pool, sync_work);
+ int err;
+
+ err = mlx5_crypto_cmd_sync_crypto(pool->mdev, BIT(pool->key_purpose));
+ mutex_lock(&pool->lock);
+ if (!err)
+ mlx5_crypto_dek_pool_reset_synced(pool);
+ pool->syncing = false;
+ mutex_unlock(&pool->lock);
+}
+
+struct mlx5_crypto_dek *mlx5_crypto_dek_create(struct mlx5_crypto_dek_pool *dek_pool,
+ const void *key, u32 sz_bytes)
+{
+ struct mlx5_crypto_dek_priv *dek_priv = dek_pool->mdev->mlx5e_res.dek_priv;
+ struct mlx5_core_dev *mdev = dek_pool->mdev;
+ u32 key_purpose = dek_pool->key_purpose;
+ struct mlx5_crypto_dek_bulk *bulk;
+ struct mlx5_crypto_dek *dek;
+ int obj_offset;
+ int err;
+
+ dek = kzalloc(sizeof(*dek), GFP_KERNEL);
+ if (!dek)
+ return ERR_PTR(-ENOMEM);
+
+ if (!dek_priv) {
+ err = mlx5_crypto_create_dek_key(mdev, key, sz_bytes,
+ key_purpose, &dek->obj_id);
+ goto out;
+ }
+
+ bulk = mlx5_crypto_dek_pool_pop(dek_pool, &obj_offset);
+ if (IS_ERR(bulk)) {
+ err = PTR_ERR(bulk);
+ goto out;
+ }
+
+ dek->bulk = bulk;
+ dek->obj_id = bulk->base_obj_id + obj_offset;
+ err = mlx5_crypto_modify_dek_key(mdev, key, sz_bytes, key_purpose,
+ bulk->base_obj_id, obj_offset);
+ if (err) {
+ mlx5_crypto_dek_pool_push(dek_pool, dek);
+ return ERR_PTR(err);
+ }
+
+out:
+ if (err) {
+ kfree(dek);
+ return ERR_PTR(err);
+ }
+
+ return dek;
+}
+
+void mlx5_crypto_dek_destroy(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek)
+{
+ struct mlx5_crypto_dek_priv *dek_priv = dek_pool->mdev->mlx5e_res.dek_priv;
+ struct mlx5_core_dev *mdev = dek_pool->mdev;
+
+ if (!dek_priv) {
+ mlx5_crypto_destroy_dek_key(mdev, dek->obj_id);
+ kfree(dek);
+ } else {
+ mlx5_crypto_dek_pool_push(dek_pool, dek);
+ }
+}
+
+static void mlx5_crypto_dek_free_destroy_list(struct list_head *destroy_list)
+{
+ struct mlx5_crypto_dek_bulk *bulk, *tmp;
+
+ list_for_each_entry_safe(bulk, tmp, destroy_list, entry)
+ mlx5_crypto_dek_bulk_free(bulk);
+}
+
+static void mlx5_crypto_dek_destroy_work_fn(struct work_struct *work)
+{
+ struct mlx5_crypto_dek_pool *pool =
+ container_of(work, struct mlx5_crypto_dek_pool, destroy_work);
+ LIST_HEAD(destroy_list);
+
+ mlx5_crypto_dek_pool_splice_destroy_list(pool, &pool->destroy_list,
+ &destroy_list);
+ mlx5_crypto_dek_free_destroy_list(&destroy_list);
+}
+
+struct mlx5_crypto_dek_pool *
+mlx5_crypto_dek_pool_create(struct mlx5_core_dev *mdev, int key_purpose)
+{
+ struct mlx5_crypto_dek_pool *pool;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->mdev = mdev;
+ pool->key_purpose = key_purpose;
+
+ mutex_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->avail_list);
+ INIT_LIST_HEAD(&pool->partial_list);
+ INIT_LIST_HEAD(&pool->full_list);
+ INIT_LIST_HEAD(&pool->sync_list);
+ INIT_LIST_HEAD(&pool->wait_for_free);
+ INIT_WORK(&pool->sync_work, mlx5_crypto_dek_sync_work_fn);
+ spin_lock_init(&pool->destroy_lock);
+ INIT_LIST_HEAD(&pool->destroy_list);
+ INIT_WORK(&pool->destroy_work, mlx5_crypto_dek_destroy_work_fn);
+
+ return pool;
+}
+
+void mlx5_crypto_dek_pool_destroy(struct mlx5_crypto_dek_pool *pool)
+{
+ struct mlx5_crypto_dek_bulk *bulk, *tmp;
+
+ cancel_work_sync(&pool->sync_work);
+ cancel_work_sync(&pool->destroy_work);
+
+ mlx5_crypto_dek_pool_free_wait_keys(pool);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->avail_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->full_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->sync_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ list_for_each_entry_safe(bulk, tmp, &pool->partial_list, entry)
+ mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
+
+ mlx5_crypto_dek_free_destroy_list(&pool->destroy_list);
+
+ mutex_destroy(&pool->lock);
+
+ kfree(pool);
+}
+
+void mlx5_crypto_dek_cleanup(struct mlx5_crypto_dek_priv *dek_priv)
+{
+ if (!dek_priv)
+ return;
+
+ kfree(dek_priv);
+}
+
+struct mlx5_crypto_dek_priv *mlx5_crypto_dek_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_crypto_dek_priv *dek_priv;
+ int err;
+
+ if (!MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc))
+ return NULL;
+
+ dek_priv = kzalloc(sizeof(*dek_priv), GFP_KERNEL);
+ if (!dek_priv)
+ return ERR_PTR(-ENOMEM);
+
+ dek_priv->mdev = mdev;
+ dek_priv->log_dek_obj_range = min_t(int, 12,
+ MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc));
+
+ /* sync all types of objects */
+ err = mlx5_crypto_cmd_sync_crypto(mdev, MLX5_CRYPTO_DEK_ALL_TYPE);
+ if (err)
+ goto err_sync_crypto;
+
+ mlx5_core_dbg(mdev, "Crypto DEK enabled, %d deks per alloc (max %d), total %d\n",
+ 1 << dek_priv->log_dek_obj_range,
+ 1 << MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc),
+ 1 << MLX5_CAP_CRYPTO(mdev, log_max_num_deks));
+
+ return dek_priv;
+
+err_sync_crypto:
+ kfree(dek_priv);
+ return ERR_PTR(err);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
new file mode 100644
index 000000000000..c819c047bb9c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_CRYPTO_H__
+#define __MLX5_LIB_CRYPTO_H__
+
+enum {
+ MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS,
+ MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC,
+ MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC,
+ MLX5_ACCEL_OBJ_TYPE_KEY_NUM,
+};
+
+int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
+ const void *key, u32 sz_bytes,
+ u32 key_type, u32 *p_key_id);
+
+void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
+
+struct mlx5_crypto_dek_pool;
+struct mlx5_crypto_dek;
+
+struct mlx5_crypto_dek_pool *mlx5_crypto_dek_pool_create(struct mlx5_core_dev *mdev,
+ int key_purpose);
+void mlx5_crypto_dek_pool_destroy(struct mlx5_crypto_dek_pool *pool);
+struct mlx5_crypto_dek *mlx5_crypto_dek_create(struct mlx5_crypto_dek_pool *dek_pool,
+ const void *key, u32 sz_bytes);
+void mlx5_crypto_dek_destroy(struct mlx5_crypto_dek_pool *dek_pool,
+ struct mlx5_crypto_dek *dek);
+u32 mlx5_crypto_dek_get_id(struct mlx5_crypto_dek *dek);
+
+struct mlx5_crypto_dek_priv *mlx5_crypto_dek_init(struct mlx5_core_dev *mdev);
+void mlx5_crypto_dek_cleanup(struct mlx5_crypto_dek_priv *dek_priv);
+#endif /* __MLX5_LIB_CRYPTO_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index df58cba37930..81ed91fee59b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -214,7 +214,7 @@ create_chain_restore(struct fs_chain *chain)
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_fs_chains *chains = chain->chains;
- enum mlx5e_tc_attr_to_reg chain_to_reg;
+ enum mlx5e_tc_attr_to_reg mapped_obj_to_reg;
struct mlx5_modify_hdr *mod_hdr;
u32 index;
int err;
@@ -242,7 +242,7 @@ create_chain_restore(struct fs_chain *chain)
chain->id = index;
if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
- chain_to_reg = CHAIN_TO_REG;
+ mapped_obj_to_reg = MAPPED_OBJ_TO_REG;
chain->restore_rule = esw_add_restore_rule(esw, chain->id);
if (IS_ERR(chain->restore_rule)) {
err = PTR_ERR(chain->restore_rule);
@@ -253,7 +253,7 @@ create_chain_restore(struct fs_chain *chain)
* since we write the metadata to reg_b
* that is passed to SW directly.
*/
- chain_to_reg = NIC_CHAIN_TO_REG;
+ mapped_obj_to_reg = NIC_MAPPED_OBJ_TO_REG;
} else {
err = -EINVAL;
goto err_rule;
@@ -261,12 +261,12 @@ create_chain_restore(struct fs_chain *chain)
MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
MLX5_SET(set_action_in, modact, field,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
+ mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mfield);
MLX5_SET(set_action_in, modact, offset,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset);
+ mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].moffset);
MLX5_SET(set_action_in, modact, length,
- mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen == 32 ?
- 0 : mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen);
+ mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen == 32 ?
+ 0 : mlx5e_tc_attr_to_reg_mappings[mapped_obj_to_reg].mlen);
MLX5_SET(set_action_in, modact, data, chain->id);
mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
1, modact);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
new file mode 100644
index 000000000000..2c53589b765d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "fs_core.h"
+#include "lib/ipsec_fs_roce.h"
+#include "mlx5_core.h"
+
+struct mlx5_ipsec_miss {
+ struct mlx5_flow_group *group;
+ struct mlx5_flow_handle *rule;
+};
+
+struct mlx5_ipsec_rx_roce {
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_ipsec_miss roce_miss;
+
+ struct mlx5_flow_table *ft_rdma;
+ struct mlx5_flow_namespace *ns_rdma;
+};
+
+struct mlx5_ipsec_tx_roce {
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_namespace *ns;
+};
+
+struct mlx5_ipsec_fs {
+ struct mlx5_ipsec_rx_roce ipv4_rx;
+ struct mlx5_ipsec_rx_roce ipv6_rx;
+ struct mlx5_ipsec_tx_roce tx;
+};
+
+static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec,
+ u16 dport)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport);
+}
+
+static int
+ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_destination *default_dst,
+ struct mlx5_ipsec_rx_roce *roce)
+{
+ struct mlx5_flow_destination dst = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ ipsec_fs_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = roce->ft_rdma;
+ rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule err=%d\n",
+ err);
+ goto fail_add_rule;
+ }
+
+ roce->rule = rule;
+
+ memset(spec, 0, sizeof(*spec));
+ rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, default_dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add RX RoCE IPsec miss rule err=%d\n",
+ err);
+ goto fail_add_default_rule;
+ }
+
+ roce->roce_miss.rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+fail_add_default_rule:
+ mlx5_del_flow_rules(roce->rule);
+fail_add_rule:
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_fs_roce_tx_rule_setup(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_tx_roce *roce,
+ struct mlx5_flow_table *pol_ft)
+{
+ struct mlx5_flow_destination dst = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_handle *rule;
+ int err = 0;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = pol_ft;
+ rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, &dst,
+ 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n",
+ err);
+ goto out;
+ }
+ roce->rule = rule;
+
+out:
+ return err;
+}
+
+void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce)
+{
+ struct mlx5_ipsec_tx_roce *tx_roce;
+
+ if (!ipsec_roce)
+ return;
+
+ tx_roce = &ipsec_roce->tx;
+
+ mlx5_del_flow_rules(tx_roce->rule);
+ mlx5_destroy_flow_group(tx_roce->g);
+ mlx5_destroy_flow_table(tx_roce->ft);
+}
+
+#define MLX5_TX_ROCE_GROUP_SIZE BIT(0)
+
+int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_table *pol_ft)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_ipsec_tx_roce *roce;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ int ix = 0;
+ int err;
+ u32 *in;
+
+ if (!ipsec_roce)
+ return 0;
+
+ roce = &ipsec_roce->tx;
+
+ in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ ft_attr.max_fte = 1;
+ ft = mlx5_create_flow_table(roce->ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err);
+ return err;
+ }
+
+ roce->ft = ft;
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_TX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err);
+ goto fail;
+ }
+ roce->g = g;
+
+ err = ipsec_fs_roce_tx_rule_setup(mdev, roce, pol_ft);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
+ goto rule_fail;
+ }
+
+ return 0;
+
+rule_fail:
+ mlx5_destroy_flow_group(roce->g);
+fail:
+ mlx5_destroy_flow_table(ft);
+ return err;
+}
+
+struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
+{
+ struct mlx5_ipsec_rx_roce *rx_roce;
+
+ if (!ipsec_roce)
+ return NULL;
+
+ rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
+ &ipsec_roce->ipv6_rx;
+
+ return rx_roce->ft;
+}
+
+void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
+{
+ struct mlx5_ipsec_rx_roce *rx_roce;
+
+ if (!ipsec_roce)
+ return;
+
+ rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
+ &ipsec_roce->ipv6_rx;
+
+ mlx5_del_flow_rules(rx_roce->roce_miss.rule);
+ mlx5_del_flow_rules(rx_roce->rule);
+ mlx5_destroy_flow_table(rx_roce->ft_rdma);
+ mlx5_destroy_flow_group(rx_roce->roce_miss.group);
+ mlx5_destroy_flow_group(rx_roce->g);
+ mlx5_destroy_flow_table(rx_roce->ft);
+}
+
+#define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
+
+int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_destination *default_dst,
+ u32 family, u32 level, u32 prio)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_ipsec_rx_roce *roce;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ void *outer_headers_c;
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ if (!ipsec_roce)
+ return 0;
+
+ roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
+ &ipsec_roce->ipv6_rx;
+
+ ft_attr.max_fte = 2;
+ ft_attr.level = level;
+ ft_attr.prio = prio;
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at nic err=%d\n", err);
+ return err;
+ }
+
+ roce->ft = ft;
+
+ in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ goto fail_nomem;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_RX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group at nic err=%d\n", err);
+ goto fail_group;
+ }
+ roce->g = g;
+
+ memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5_RX_ROCE_GROUP_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx miss group at nic err=%d\n", err);
+ goto fail_mgroup;
+ }
+ roce->roce_miss.group = g;
+
+ memset(&ft_attr, 0, sizeof(ft_attr));
+ if (family == AF_INET)
+ ft_attr.level = 1;
+ ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err);
+ goto fail_rdma_table;
+ }
+
+ roce->ft_rdma = ft;
+
+ err = ipsec_fs_roce_rx_rule_setup(mdev, default_dst, roce);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec rx rules err=%d\n", err);
+ goto fail_setup_rule;
+ }
+
+ kvfree(in);
+ return 0;
+
+fail_setup_rule:
+ mlx5_destroy_flow_table(roce->ft_rdma);
+fail_rdma_table:
+ mlx5_destroy_flow_group(roce->roce_miss.group);
+fail_mgroup:
+ mlx5_destroy_flow_group(roce->g);
+fail_group:
+ kvfree(in);
+fail_nomem:
+ mlx5_destroy_flow_table(roce->ft);
+ return err;
+}
+
+void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce)
+{
+ kfree(ipsec_roce);
+}
+
+struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_ipsec_fs *roce_ipsec;
+ struct mlx5_flow_namespace *ns;
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
+ if (!ns) {
+ mlx5_core_err(mdev, "Failed to get RoCE rx ns\n");
+ return NULL;
+ }
+
+ roce_ipsec = kzalloc(sizeof(*roce_ipsec), GFP_KERNEL);
+ if (!roce_ipsec)
+ return NULL;
+
+ roce_ipsec->ipv4_rx.ns_rdma = ns;
+ roce_ipsec->ipv6_rx.ns_rdma = ns;
+
+ ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
+ if (!ns) {
+ mlx5_core_err(mdev, "Failed to get RoCE tx ns\n");
+ goto err_tx;
+ }
+
+ roce_ipsec->tx.ns = ns;
+
+ return roce_ipsec;
+
+err_tx:
+ kfree(roce_ipsec);
+ return NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h
new file mode 100644
index 000000000000..9712d705fe48
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_IPSEC_H__
+#define __MLX5_LIB_IPSEC_H__
+
+struct mlx5_ipsec_fs;
+
+struct mlx5_flow_table *
+mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family);
+void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
+ u32 family);
+int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_destination *default_dst,
+ u32 family, u32 level, u32 prio);
+void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce);
+int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
+ struct mlx5_ipsec_fs *ipsec_roce,
+ struct mlx5_flow_table *pol_ft);
+void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce);
+struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev);
+
+#endif /* __MLX5_LIB_IPSEC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index 032adb21ad4b..ccf12f7db6f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -79,28 +79,11 @@ struct mlx5_pme_stats {
void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
-/* Crypto */
-enum {
- MLX5_ACCEL_OBJ_TLS_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
- MLX5_ACCEL_OBJ_IPSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
- MLX5_ACCEL_OBJ_MACSEC_KEY = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_MACSEC,
-};
-
-int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
- void *key, u32 sz_bytes,
- u32 key_type, u32 *p_key_id);
-void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id);
-
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
{
return devlink_net(priv_to_devlink(dev));
}
-static inline void mlx5_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev)
-{
- mdev->mlx5e_res.uplink_netdev = netdev;
-}
-
static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
{
return mdev->mlx5e_res.uplink_netdev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3d5f2a4b1fed..540840e80493 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -336,6 +336,24 @@ static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
}
}
+void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *dev, struct net_device *netdev)
+{
+ mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
+ dev->mlx5e_res.uplink_netdev = netdev;
+ mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ netdev);
+ mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
+}
+
+void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev)
+{
+ mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
+ mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_UPLINK_NETDEV,
+ dev->mlx5e_res.uplink_netdev);
+ mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
+}
+EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay);
+
static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
enum mlx5_cap_type cap_type,
enum mlx5_cap_mode cap_mode)
@@ -484,9 +502,9 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- &val);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &val);
if (!err)
return val.vu32;
mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
@@ -499,9 +517,9 @@ bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
union devlink_param_value val;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- &val);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
if (!err)
return val.vbool;
@@ -1390,9 +1408,9 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
- err = mlx5_devlink_register(priv_to_devlink(dev));
+ err = mlx5_devlink_params_register(priv_to_devlink(dev));
if (err)
- goto err_devlink_reg;
+ goto err_devlink_params_reg;
err = mlx5_register_device(dev);
if (err)
@@ -1403,8 +1421,8 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
return 0;
err_register:
- mlx5_devlink_unregister(priv_to_devlink(dev));
-err_devlink_reg:
+ mlx5_devlink_params_unregister(priv_to_devlink(dev));
+err_devlink_params_reg:
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
mlx5_unload(dev);
err_load:
@@ -1426,7 +1444,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
mutex_lock(&dev->intf_state_mutex);
mlx5_unregister_device(dev);
- mlx5_devlink_unregister(priv_to_devlink(dev));
+ mlx5_devlink_params_unregister(priv_to_devlink(dev));
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
@@ -1491,23 +1509,23 @@ out:
return err;
}
-int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
+int mlx5_load_one(struct mlx5_core_dev *dev)
{
struct devlink *devlink = priv_to_devlink(dev);
int ret;
devl_lock(devlink);
- ret = mlx5_load_one_devl_locked(dev, recovery);
+ ret = mlx5_load_one_devl_locked(dev, false);
devl_unlock(devlink);
return ret;
}
-void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev)
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend)
{
devl_assert_locked(priv_to_devlink(dev));
mutex_lock(&dev->intf_state_mutex);
- mlx5_detach_device(dev);
+ mlx5_detach_device(dev, suspend);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
@@ -1522,12 +1540,12 @@ out:
mutex_unlock(&dev->intf_state_mutex);
}
-void mlx5_unload_one(struct mlx5_core_dev *dev)
+void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend)
{
struct devlink *devlink = priv_to_devlink(dev);
devl_lock(devlink);
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, suspend);
devl_unlock(devlink);
}
@@ -1555,6 +1573,7 @@ static const int types[] = {
MLX5_CAP_DEV_SHAMPO,
MLX5_CAP_MACSEC,
MLX5_CAP_ADV_VIRTUALIZATION,
+ MLX5_CAP_CRYPTO,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
@@ -1608,6 +1627,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
+ mutex_init(&dev->mlx5e_res.uplink_netdev_lock);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1696,6 +1716,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->mlx5e_res.uplink_netdev_lock);
mutex_destroy(&dev->intf_state_mutex);
lockdep_unregister_key(&dev->lock_key);
}
@@ -1809,7 +1830,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev, false);
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, true);
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
@@ -1891,8 +1912,7 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
mlx5_pci_trace(dev, "Enter, loading driver..\n");
- err = mlx5_load_one(dev, false);
-
+ err = mlx5_load_one(dev);
if (!err)
devlink_health_reporter_state_update(dev->priv.health.fw_fatal_reporter,
DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
@@ -1966,7 +1986,7 @@ static void shutdown(struct pci_dev *pdev)
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
err = mlx5_try_fast_unload(dev);
if (err)
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, false);
mlx5_pci_disable_device(dev);
}
@@ -1974,7 +1994,7 @@ static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- mlx5_unload_one(dev);
+ mlx5_unload_one(dev, true);
return 0;
}
@@ -1983,7 +2003,7 @@ static int mlx5_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- return mlx5_load_one(dev, false);
+ return mlx5_load_one(dev);
}
static const struct pci_device_id mlx5_core_pci_table[] = {
@@ -2017,7 +2037,7 @@ MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
void mlx5_disable_device(struct mlx5_core_dev *dev)
{
mlx5_error_sw_reset(dev);
- mlx5_unload_one_devl_locked(dev);
+ mlx5_unload_one_devl_locked(dev, false);
}
int mlx5_recover_device(struct mlx5_core_dev *dev)
@@ -2110,7 +2130,7 @@ static int __init mlx5_init(void)
mlx5_core_verify_params();
mlx5_register_debugfs();
- err = pci_register_driver(&mlx5_core_driver);
+ err = mlx5e_init();
if (err)
goto err_debug;
@@ -2118,16 +2138,16 @@ static int __init mlx5_init(void)
if (err)
goto err_sf;
- err = mlx5e_init();
+ err = pci_register_driver(&mlx5_core_driver);
if (err)
- goto err_en;
+ goto err_pci;
return 0;
-err_en:
+err_pci:
mlx5_sf_driver_unregister();
err_sf:
- pci_unregister_driver(&mlx5_core_driver);
+ mlx5e_cleanup();
err_debug:
mlx5_unregister_debugfs();
return err;
@@ -2135,9 +2155,9 @@ err_debug:
static void __exit mlx5_cleanup(void)
{
- mlx5e_cleanup();
- mlx5_sf_driver_unregister();
pci_unregister_driver(&mlx5_core_driver);
+ mlx5_sf_driver_unregister();
+ mlx5e_cleanup();
mlx5_unregister_debugfs();
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 029305a8b80a..be0785f83083 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -236,7 +236,7 @@ void mlx5_adev_cleanup(struct mlx5_core_dev *dev);
int mlx5_adev_init(struct mlx5_core_dev *dev);
int mlx5_attach_device(struct mlx5_core_dev *dev);
-void mlx5_detach_device(struct mlx5_core_dev *dev);
+void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend);
int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
@@ -319,9 +319,9 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
void mlx5_uninit_one(struct mlx5_core_dev *dev);
-void mlx5_unload_one(struct mlx5_core_dev *dev);
-void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev);
-int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
+void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend);
+void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend);
+int mlx5_load_one(struct mlx5_core_dev *dev);
int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 function_id,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 60596357bfc7..64d4e7125e9b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -74,6 +74,14 @@ static u32 get_function(u16 func_id, bool ec_function)
return (u32)func_id | (ec_function << 16);
}
+static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_function)
+{
+ if (!func_id)
+ return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
+
+ return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF;
+}
+
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
{
struct rb_root *root;
@@ -211,7 +219,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function)
n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
if (n >= MLX5_NUM_4K_IN_PAGE) {
- mlx5_core_warn(dev, "alloc 4k bug\n");
+ mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n",
+ fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE);
return -ENOENT;
}
clear_bit(n, &fp->bitmask);
@@ -332,6 +341,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
int notify_fail = event;
+ u16 func_type;
u64 addr;
int err;
u32 *in;
@@ -383,11 +393,9 @@ retry:
goto out_dropped;
}
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] += npages;
dev->priv.fw_pages += npages;
- if (func_id)
- dev->priv.vfs_pages += npages;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages += npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x, err %d\n",
npages, ec_function, func_id, err);
@@ -414,6 +422,7 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
struct rb_root *root;
struct rb_node *p;
int npages = 0;
+ u16 func_type;
root = xa_load(&dev->priv.page_root_xa, function);
if (WARN_ON_ONCE(!root))
@@ -428,11 +437,9 @@ static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id,
free_fwp(dev, fwp, fwp->free_count);
}
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] -= npages;
dev->priv.fw_pages -= npages;
- if (func_id)
- dev->priv.vfs_pages -= npages;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages -= npages;
mlx5_core_dbg(dev, "npages %d, ec_function %d, func_id 0x%x\n",
npages, ec_function, func_id);
@@ -498,6 +505,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {};
int num_claimed;
+ u16 func_type;
u32 *out;
int err;
int i;
@@ -549,11 +557,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
if (nclaimed)
*nclaimed = num_claimed;
+ func_type = func_id_to_type(dev, func_id, ec_function);
+ dev->priv.page_counters[func_type] -= num_claimed;
dev->priv.fw_pages -= num_claimed;
- if (func_id)
- dev->priv.vfs_pages -= num_claimed;
- else if (mlx5_core_is_ecpf(dev) && !ec_function)
- dev->priv.host_pf_pages -= num_claimed;
out_free:
kvfree(out);
@@ -706,12 +712,12 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.fw_pages,
"FW pages counter is %d after reclaiming all pages\n",
dev->priv.fw_pages);
- WARN(dev->priv.vfs_pages,
+ WARN(dev->priv.page_counters[MLX5_VF],
"VFs FW pages counter is %d after reclaiming all pages\n",
- dev->priv.vfs_pages);
- WARN(dev->priv.host_pf_pages,
+ dev->priv.page_counters[MLX5_VF]);
+ WARN(dev->priv.page_counters[MLX5_HOST_PF],
"External host PF FW pages counter is %d after reclaiming all pages\n",
- dev->priv.host_pf_pages);
+ dev->priv.page_counters[MLX5_HOST_PF]);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 7b4783ce213e..a7377619ba6f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -74,7 +74,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- mlx5_unload_one(sf_dev->mdev);
+ mlx5_unload_one(sf_dev->mdev, false);
}
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index c0e6c487c63c..3008e9ce2bbf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -147,7 +147,7 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
- if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
+ if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index b851141e03de..042ca0349124 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -1138,12 +1138,14 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
rule->flow_source))
return 0;
+ mlx5dr_domain_nic_lock(nic_dmn);
+
ret = mlx5dr_matcher_select_builders(matcher,
nic_matcher,
dr_rule_get_ipv(&param->outer),
dr_rule_get_ipv(&param->inner));
if (ret)
- return ret;
+ goto err_unlock;
hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
if (likely(hw_ste_arr_is_opt)) {
@@ -1152,12 +1154,12 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
DR_STE_SIZE, GFP_KERNEL);
- if (!hw_ste_arr)
- return -ENOMEM;
+ if (!hw_ste_arr) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
}
- mlx5dr_domain_nic_lock(nic_dmn);
-
ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
if (ret)
goto free_hw_ste;
@@ -1223,7 +1225,10 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
mlx5dr_domain_nic_unlock(nic_dmn);
- goto out;
+ if (unlikely(!hw_ste_arr_is_opt))
+ kfree(hw_ste_arr);
+
+ return 0;
free_rule:
dr_rule_clean_rule_members(rule, nic_rule);
@@ -1238,12 +1243,12 @@ remove_from_nic_tbl:
mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
free_hw_ste:
- mlx5dr_domain_nic_unlock(nic_dmn);
-
-out:
- if (unlikely(!hw_ste_arr_is_opt))
+ if (!hw_ste_arr_is_opt)
kfree(hw_ste_arr);
+err_unlock:
+ mlx5dr_domain_nic_unlock(nic_dmn);
+
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index a4476cb4c3b3..fd2d31cdbcf9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -724,7 +724,6 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action)
{
struct postsend_info send_info = {};
- int ret;
send_info.write.addr = (uintptr_t)action->rewrite->data;
send_info.write.length = action->rewrite->num_of_actions *
@@ -734,9 +733,7 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk);
send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk);
- ret = dr_postsend_icm_data(dmn, &send_info);
-
- return ret;
+ return dr_postsend_icm_data(dmn, &send_info);
}
static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index 5a1027b07215..a453b9cd9033 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -14,6 +14,7 @@
#include <linux/irqreturn.h>
#include <linux/netdevice.h>
#include <linux/irq.h>
+#include <linux/phy.h>
/* The silicon design supports a maximum RX ring size of
* 32K entries. Based on current testing this maximum size
@@ -67,6 +68,29 @@ struct mlxbf_gige_stats {
u64 rx_filter_discard_pkts;
};
+struct mlxbf_gige_reg_param {
+ u32 mask;
+ u32 shift;
+};
+
+struct mlxbf_gige_mdio_gw {
+ u32 gw_address;
+ u32 read_data_address;
+ struct mlxbf_gige_reg_param busy;
+ struct mlxbf_gige_reg_param write_data;
+ struct mlxbf_gige_reg_param read_data;
+ struct mlxbf_gige_reg_param devad;
+ struct mlxbf_gige_reg_param partad;
+ struct mlxbf_gige_reg_param opcode;
+ struct mlxbf_gige_reg_param st1;
+};
+
+struct mlxbf_gige_link_cfg {
+ void (*set_phy_link_mode)(struct phy_device *phydev);
+ void (*adjust_link)(struct net_device *netdev);
+ phy_interface_t phy_mode;
+};
+
struct mlxbf_gige {
void __iomem *base;
void __iomem *llu_base;
@@ -102,6 +126,9 @@ struct mlxbf_gige {
u8 valid_polarity;
struct napi_struct napi;
struct mlxbf_gige_stats stats;
+ u8 hw_version;
+ struct mlxbf_gige_mdio_gw *mdio_gw;
+ int prev_speed;
};
/* Rx Work Queue Element definitions */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
index 41ebef25a930..253d7ad9b809 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_ethtool.c
@@ -135,4 +135,5 @@ const struct ethtool_ops mlxbf_gige_ethtool_ops = {
.nway_reset = phy_ethtool_nway_reset,
.get_pauseparam = mlxbf_gige_get_pauseparam,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 2292d63a279c..694de9513b9f 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -205,7 +205,7 @@ static int mlxbf_gige_stop(struct net_device *netdev)
}
static int mlxbf_gige_eth_ioctl(struct net_device *netdev,
- struct ifreq *ifr, int cmd)
+ struct ifreq *ifr, int cmd)
{
if (!(netif_running(netdev)))
return -EINVAL;
@@ -263,13 +263,99 @@ static const struct net_device_ops mlxbf_gige_netdev_ops = {
.ndo_get_stats64 = mlxbf_gige_get_stats64,
};
-static void mlxbf_gige_adjust_link(struct net_device *netdev)
+static void mlxbf_gige_bf2_adjust_link(struct net_device *netdev)
{
struct phy_device *phydev = netdev->phydev;
phy_print_status(phydev);
}
+static void mlxbf_gige_bf3_adjust_link(struct net_device *netdev)
+{
+ struct mlxbf_gige *priv = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ u8 sgmii_mode;
+ u16 ipg_size;
+ u32 val;
+
+ if (phydev->link && phydev->speed != priv->prev_speed) {
+ switch (phydev->speed) {
+ case 1000:
+ ipg_size = MLXBF_GIGE_1G_IPG_SIZE;
+ sgmii_mode = MLXBF_GIGE_1G_SGMII_MODE;
+ break;
+ case 100:
+ ipg_size = MLXBF_GIGE_100M_IPG_SIZE;
+ sgmii_mode = MLXBF_GIGE_100M_SGMII_MODE;
+ break;
+ case 10:
+ ipg_size = MLXBF_GIGE_10M_IPG_SIZE;
+ sgmii_mode = MLXBF_GIGE_10M_SGMII_MODE;
+ break;
+ default:
+ return;
+ }
+
+ val = readl(priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
+ val &= ~(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK | MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK);
+ val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK, ipg_size);
+ val |= FIELD_PREP(MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK, sgmii_mode);
+ writel(val, priv->plu_base + MLXBF_GIGE_PLU_TX_REG0);
+
+ val = readl(priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
+ val &= ~MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK;
+ val |= FIELD_PREP(MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK, sgmii_mode);
+ writel(val, priv->plu_base + MLXBF_GIGE_PLU_RX_REG0);
+
+ priv->prev_speed = phydev->speed;
+ }
+
+ phy_print_status(phydev);
+}
+
+static void mlxbf_gige_bf2_set_phy_link_mode(struct phy_device *phydev)
+{
+ /* MAC only supports 1000T full duplex mode */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+
+ /* Only symmetric pause with flow control enabled is supported so no
+ * need to negotiate pause.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
+}
+
+static void mlxbf_gige_bf3_set_phy_link_mode(struct phy_device *phydev)
+{
+ /* MAC only supports full duplex mode */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+
+ /* Only symmetric pause with flow control enabled is supported so no
+ * need to negotiate pause.
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
+}
+
+static struct mlxbf_gige_link_cfg mlxbf_gige_link_cfgs[] = {
+ [MLXBF_GIGE_VERSION_BF2] = {
+ .set_phy_link_mode = mlxbf_gige_bf2_set_phy_link_mode,
+ .adjust_link = mlxbf_gige_bf2_adjust_link,
+ .phy_mode = PHY_INTERFACE_MODE_GMII
+ },
+ [MLXBF_GIGE_VERSION_BF3] = {
+ .set_phy_link_mode = mlxbf_gige_bf3_set_phy_link_mode,
+ .adjust_link = mlxbf_gige_bf3_adjust_link,
+ .phy_mode = PHY_INTERFACE_MODE_SGMII
+ }
+};
+
static int mlxbf_gige_probe(struct platform_device *pdev)
{
struct phy_device *phydev;
@@ -315,6 +401,8 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
+ priv->hw_version = readq(base + MLXBF_GIGE_VERSION);
+
/* Attach MDIO device */
err = mlxbf_gige_mdio_probe(pdev, priv);
if (err)
@@ -357,25 +445,14 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
phydev->irq = phy_irq;
err = phy_connect_direct(netdev, phydev,
- mlxbf_gige_adjust_link,
- PHY_INTERFACE_MODE_GMII);
+ mlxbf_gige_link_cfgs[priv->hw_version].adjust_link,
+ mlxbf_gige_link_cfgs[priv->hw_version].phy_mode);
if (err) {
dev_err(&pdev->dev, "Could not attach to PHY\n");
goto out;
}
- /* MAC only supports 1000T full duplex mode */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
-
- /* Only symmetric pause with flow control enabled is supported so no
- * need to negotiate pause.
- */
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
+ mlxbf_gige_link_cfgs[priv->hw_version].set_phy_link_mode(phydev);
/* Display information about attached PHY device */
phy_attached_info(phydev);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
index aa780b1614a3..654190263535 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
@@ -23,9 +23,75 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
+#include "mlxbf_gige_mdio_bf2.h"
+#include "mlxbf_gige_mdio_bf3.h"
-#define MLXBF_GIGE_MDIO_GW_OFFSET 0x0
-#define MLXBF_GIGE_MDIO_CFG_OFFSET 0x4
+static struct mlxbf_gige_mdio_gw mlxbf_gige_mdio_gw_t[] = {
+ [MLXBF_GIGE_VERSION_BF2] = {
+ .gw_address = MLXBF2_GIGE_MDIO_GW_OFFSET,
+ .read_data_address = MLXBF2_GIGE_MDIO_GW_OFFSET,
+ .busy = {
+ .mask = MLXBF2_GIGE_MDIO_GW_BUSY_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_BUSY_SHIFT,
+ },
+ .read_data = {
+ .mask = MLXBF2_GIGE_MDIO_GW_AD_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_AD_SHIFT,
+ },
+ .write_data = {
+ .mask = MLXBF2_GIGE_MDIO_GW_AD_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_AD_SHIFT,
+ },
+ .devad = {
+ .mask = MLXBF2_GIGE_MDIO_GW_DEVAD_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_DEVAD_SHIFT,
+ },
+ .partad = {
+ .mask = MLXBF2_GIGE_MDIO_GW_PARTAD_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_PARTAD_SHIFT,
+ },
+ .opcode = {
+ .mask = MLXBF2_GIGE_MDIO_GW_OPCODE_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_OPCODE_SHIFT,
+ },
+ .st1 = {
+ .mask = MLXBF2_GIGE_MDIO_GW_ST1_MASK,
+ .shift = MLXBF2_GIGE_MDIO_GW_ST1_SHIFT,
+ },
+ },
+ [MLXBF_GIGE_VERSION_BF3] = {
+ .gw_address = MLXBF3_GIGE_MDIO_GW_OFFSET,
+ .read_data_address = MLXBF3_GIGE_MDIO_DATA_READ,
+ .busy = {
+ .mask = MLXBF3_GIGE_MDIO_GW_BUSY_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_BUSY_SHIFT,
+ },
+ .read_data = {
+ .mask = MLXBF3_GIGE_MDIO_GW_DATA_READ_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_DATA_READ_SHIFT,
+ },
+ .write_data = {
+ .mask = MLXBF3_GIGE_MDIO_GW_DATA_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_DATA_SHIFT,
+ },
+ .devad = {
+ .mask = MLXBF3_GIGE_MDIO_GW_DEVAD_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_DEVAD_SHIFT,
+ },
+ .partad = {
+ .mask = MLXBF3_GIGE_MDIO_GW_PARTAD_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_PARTAD_SHIFT,
+ },
+ .opcode = {
+ .mask = MLXBF3_GIGE_MDIO_GW_OPCODE_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_OPCODE_SHIFT,
+ },
+ .st1 = {
+ .mask = MLXBF3_GIGE_MDIO_GW_ST1_MASK,
+ .shift = MLXBF3_GIGE_MDIO_GW_ST1_SHIFT,
+ },
+ },
+};
#define MLXBF_GIGE_MDIO_FREQ_REFERENCE 156250000ULL
#define MLXBF_GIGE_MDIO_COREPLL_CONST 16384ULL
@@ -47,30 +113,10 @@
/* Busy bit is set by software and cleared by hardware */
#define MLXBF_GIGE_MDIO_SET_BUSY 0x1
-/* MDIO GW register bits */
-#define MLXBF_GIGE_MDIO_GW_AD_MASK GENMASK(15, 0)
-#define MLXBF_GIGE_MDIO_GW_DEVAD_MASK GENMASK(20, 16)
-#define MLXBF_GIGE_MDIO_GW_PARTAD_MASK GENMASK(25, 21)
-#define MLXBF_GIGE_MDIO_GW_OPCODE_MASK GENMASK(27, 26)
-#define MLXBF_GIGE_MDIO_GW_ST1_MASK GENMASK(28, 28)
-#define MLXBF_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30)
-
-/* MDIO config register bits */
-#define MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0)
-#define MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK GENMASK(2, 2)
-#define MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(4, 4)
-#define MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(15, 8)
-#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
-#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
-
-#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
-
#define MLXBF_GIGE_BF2_COREPLL_ADDR 0x02800c30
#define MLXBF_GIGE_BF2_COREPLL_SIZE 0x0000000c
+#define MLXBF_GIGE_BF3_COREPLL_ADDR 0x13409824
+#define MLXBF_GIGE_BF3_COREPLL_SIZE 0x00000010
static struct resource corepll_params[] = {
[MLXBF_GIGE_VERSION_BF2] = {
@@ -78,6 +124,11 @@ static struct resource corepll_params[] = {
.end = MLXBF_GIGE_BF2_COREPLL_ADDR + MLXBF_GIGE_BF2_COREPLL_SIZE - 1,
.name = "COREPLL_RES"
},
+ [MLXBF_GIGE_VERSION_BF3] = {
+ .start = MLXBF_GIGE_BF3_COREPLL_ADDR,
+ .end = MLXBF_GIGE_BF3_COREPLL_ADDR + MLXBF_GIGE_BF3_COREPLL_SIZE - 1,
+ .name = "COREPLL_RES"
+ }
};
/* Returns core clock i1clk in Hz */
@@ -134,19 +185,23 @@ static u8 mdio_period_map(struct mlxbf_gige *priv)
return mdio_period;
}
-static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
+static u32 mlxbf_gige_mdio_create_cmd(struct mlxbf_gige_mdio_gw *mdio_gw, u16 data, int phy_add,
int phy_reg, u32 opcode)
{
u32 gw_reg = 0;
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_AD_MASK, data);
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_DEVAD_MASK, phy_reg);
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_PARTAD_MASK, phy_add);
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_OPCODE_MASK, opcode);
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_ST1_MASK,
- MLXBF_GIGE_MDIO_CL22_ST1);
- gw_reg |= FIELD_PREP(MLXBF_GIGE_MDIO_GW_BUSY_MASK,
- MLXBF_GIGE_MDIO_SET_BUSY);
+ gw_reg |= ((data << mdio_gw->write_data.shift) &
+ mdio_gw->write_data.mask);
+ gw_reg |= ((phy_reg << mdio_gw->devad.shift) &
+ mdio_gw->devad.mask);
+ gw_reg |= ((phy_add << mdio_gw->partad.shift) &
+ mdio_gw->partad.mask);
+ gw_reg |= ((opcode << mdio_gw->opcode.shift) &
+ mdio_gw->opcode.mask);
+ gw_reg |= ((MLXBF_GIGE_MDIO_CL22_ST1 << mdio_gw->st1.shift) &
+ mdio_gw->st1.mask);
+ gw_reg |= ((MLXBF_GIGE_MDIO_SET_BUSY << mdio_gw->busy.shift) &
+ mdio_gw->busy.mask);
return gw_reg;
}
@@ -158,29 +213,27 @@ static int mlxbf_gige_mdio_read(struct mii_bus *bus, int phy_add, int phy_reg)
int ret;
u32 val;
- if (phy_reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
/* Send mdio read request */
- cmd = mlxbf_gige_mdio_create_cmd(0, phy_add, phy_reg, MLXBF_GIGE_MDIO_CL22_READ);
+ cmd = mlxbf_gige_mdio_create_cmd(priv->mdio_gw, 0, phy_add, phy_reg,
+ MLXBF_GIGE_MDIO_CL22_READ);
- writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ writel(cmd, priv->mdio_io + priv->mdio_gw->gw_address);
- ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
- val, !(val & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
+ ret = readl_poll_timeout_atomic(priv->mdio_io + priv->mdio_gw->gw_address,
+ val, !(val & priv->mdio_gw->busy.mask),
5, 1000000);
if (ret) {
- writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
- ret = readl(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ ret = readl(priv->mdio_io + priv->mdio_gw->read_data_address);
/* Only return ad bits of the gw register */
- ret &= MLXBF_GIGE_MDIO_GW_AD_MASK;
+ ret &= priv->mdio_gw->read_data.mask;
/* The MDIO lock is set on read. To release it, clear gw register */
- writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
@@ -193,21 +246,18 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
u32 cmd;
int ret;
- if (phy_reg & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
/* Send mdio write request */
- cmd = mlxbf_gige_mdio_create_cmd(val, phy_add, phy_reg,
+ cmd = mlxbf_gige_mdio_create_cmd(priv->mdio_gw, val, phy_add, phy_reg,
MLXBF_GIGE_MDIO_CL22_WRITE);
- writel(cmd, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ writel(cmd, priv->mdio_io + priv->mdio_gw->gw_address);
/* If the poll timed out, drop the request */
- ret = readl_poll_timeout_atomic(priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET,
- temp, !(temp & MLXBF_GIGE_MDIO_GW_BUSY_MASK),
+ ret = readl_poll_timeout_atomic(priv->mdio_io + priv->mdio_gw->gw_address,
+ temp, !(temp & priv->mdio_gw->busy.mask),
5, 1000000);
/* The MDIO lock is set on read. To release it, clear gw register */
- writel(0, priv->mdio_io + MLXBF_GIGE_MDIO_GW_OFFSET);
+ writel(0, priv->mdio_io + priv->mdio_gw->gw_address);
return ret;
}
@@ -219,9 +269,20 @@ static void mlxbf_gige_mdio_cfg(struct mlxbf_gige *priv)
mdio_period = mdio_period_map(priv);
- val = MLXBF_GIGE_MDIO_CFG_VAL;
- val |= FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
- writel(val, priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+ if (priv->hw_version == MLXBF_GIGE_VERSION_BF2) {
+ val = MLXBF2_GIGE_MDIO_CFG_VAL;
+ val |= FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
+ writel(val, priv->mdio_io + MLXBF2_GIGE_MDIO_CFG_OFFSET);
+ } else {
+ val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) |
+ FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1);
+ writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG0);
+ val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
+ writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG1);
+ val = FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) |
+ FIELD_PREP(MLXBF3_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13);
+ writel(val, priv->mdio_io + MLXBF3_GIGE_MDIO_CFG_REG2);
+ }
}
int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
@@ -230,6 +291,9 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
struct resource *res;
int ret;
+ if (priv->hw_version > MLXBF_GIGE_VERSION_BF3)
+ return -ENODEV;
+
priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9);
if (IS_ERR(priv->mdio_io))
return PTR_ERR(priv->mdio_io);
@@ -242,13 +306,15 @@ int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
/* For backward compatibility with older ACPI tables, also keep
* CLK resource internal to the driver.
*/
- res = &corepll_params[MLXBF_GIGE_VERSION_BF2];
+ res = &corepll_params[priv->hw_version];
}
priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
if (!priv->clk_io)
return -ENOMEM;
+ priv->mdio_gw = &mlxbf_gige_mdio_gw_t[priv->hw_version];
+
mlxbf_gige_mdio_cfg(priv);
priv->mdiobus = devm_mdiobus_alloc(dev);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h
new file mode 100644
index 000000000000..7f1ff0ac7699
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf2.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+
+/* MDIO support for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, ALL RIGHTS RESERVED.
+ *
+ * This software product is a proprietary product of NVIDIA CORPORATION &
+ * AFFILIATES (the "Company") and all right, title, and interest in and to the
+ * software product, including all associated intellectual property rights, are
+ * and shall remain exclusively with the Company.
+ *
+ * This software product is governed by the End User License Agreement
+ * provided with the software product.
+ */
+
+#ifndef __MLXBF_GIGE_MDIO_BF2_H__
+#define __MLXBF_GIGE_MDIO_BF2_H__
+
+#include <linux/bitfield.h>
+
+#define MLXBF2_GIGE_MDIO_GW_OFFSET 0x0
+#define MLXBF2_GIGE_MDIO_CFG_OFFSET 0x4
+
+/* MDIO GW register bits */
+#define MLXBF2_GIGE_MDIO_GW_AD_MASK GENMASK(15, 0)
+#define MLXBF2_GIGE_MDIO_GW_DEVAD_MASK GENMASK(20, 16)
+#define MLXBF2_GIGE_MDIO_GW_PARTAD_MASK GENMASK(25, 21)
+#define MLXBF2_GIGE_MDIO_GW_OPCODE_MASK GENMASK(27, 26)
+#define MLXBF2_GIGE_MDIO_GW_ST1_MASK GENMASK(28, 28)
+#define MLXBF2_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30)
+
+#define MLXBF2_GIGE_MDIO_GW_AD_SHIFT 0
+#define MLXBF2_GIGE_MDIO_GW_DEVAD_SHIFT 16
+#define MLXBF2_GIGE_MDIO_GW_PARTAD_SHIFT 21
+#define MLXBF2_GIGE_MDIO_GW_OPCODE_SHIFT 26
+#define MLXBF2_GIGE_MDIO_GW_ST1_SHIFT 28
+#define MLXBF2_GIGE_MDIO_GW_BUSY_SHIFT 30
+
+/* MDIO config register bits */
+#define MLXBF2_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0)
+#define MLXBF2_GIGE_MDIO_CFG_MDIO3_3_MASK GENMASK(2, 2)
+#define MLXBF2_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(4, 4)
+#define MLXBF2_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(15, 8)
+#define MLXBF2_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
+#define MLXBF2_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
+
+#define MLXBF2_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
+ FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
+ FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
+ FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
+ FIELD_PREP(MLXBF2_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+
+#endif /* __MLXBF_GIGE_MDIO_BF2_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h
new file mode 100644
index 000000000000..9dd9144b9173
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio_bf3.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause */
+
+/* MDIO support for Mellanox Gigabit Ethernet driver
+ *
+ * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, ALL RIGHTS RESERVED.
+ *
+ * This software product is a proprietary product of NVIDIA CORPORATION &
+ * AFFILIATES (the "Company") and all right, title, and interest in and to the
+ * software product, including all associated intellectual property rights, are
+ * and shall remain exclusively with the Company.
+ *
+ * This software product is governed by the End User License Agreement
+ * provided with the software product.
+ */
+
+#ifndef __MLXBF_GIGE_MDIO_BF3_H__
+#define __MLXBF_GIGE_MDIO_BF3_H__
+
+#include <linux/bitfield.h>
+
+#define MLXBF3_GIGE_MDIO_GW_OFFSET 0x80
+#define MLXBF3_GIGE_MDIO_DATA_READ 0x8c
+#define MLXBF3_GIGE_MDIO_CFG_REG0 0x100
+#define MLXBF3_GIGE_MDIO_CFG_REG1 0x104
+#define MLXBF3_GIGE_MDIO_CFG_REG2 0x108
+
+/* MDIO GW register bits */
+#define MLXBF3_GIGE_MDIO_GW_ST1_MASK GENMASK(1, 1)
+#define MLXBF3_GIGE_MDIO_GW_OPCODE_MASK GENMASK(3, 2)
+#define MLXBF3_GIGE_MDIO_GW_PARTAD_MASK GENMASK(8, 4)
+#define MLXBF3_GIGE_MDIO_GW_DEVAD_MASK GENMASK(13, 9)
+/* For BlueField-3, this field is only used for mdio write */
+#define MLXBF3_GIGE_MDIO_GW_DATA_MASK GENMASK(29, 14)
+#define MLXBF3_GIGE_MDIO_GW_BUSY_MASK GENMASK(30, 30)
+
+#define MLXBF3_GIGE_MDIO_GW_DATA_READ_MASK GENMASK(15, 0)
+
+#define MLXBF3_GIGE_MDIO_GW_ST1_SHIFT 1
+#define MLXBF3_GIGE_MDIO_GW_OPCODE_SHIFT 2
+#define MLXBF3_GIGE_MDIO_GW_PARTAD_SHIFT 4
+#define MLXBF3_GIGE_MDIO_GW_DEVAD_SHIFT 9
+#define MLXBF3_GIGE_MDIO_GW_DATA_SHIFT 14
+#define MLXBF3_GIGE_MDIO_GW_BUSY_SHIFT 30
+
+#define MLXBF3_GIGE_MDIO_GW_DATA_READ_SHIFT 0
+
+/* MDIO config register bits */
+#define MLXBF3_GIGE_MDIO_CFG_MDIO_MODE_MASK GENMASK(1, 0)
+#define MLXBF3_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK GENMASK(2, 2)
+#define MLXBF3_GIGE_MDIO_CFG_MDC_PERIOD_MASK GENMASK(7, 0)
+#define MLXBF3_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(7, 0)
+#define MLXBF3_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(15, 8)
+
+#endif /* __MLXBF_GIGE_MDIO_BF3_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index 7be3a793984d..cd0973229c9b 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -8,8 +8,11 @@
#ifndef __MLXBF_GIGE_REGS_H__
#define __MLXBF_GIGE_REGS_H__
+#include <linux/bitfield.h>
+
#define MLXBF_GIGE_VERSION 0x0000
#define MLXBF_GIGE_VERSION_BF2 0x0
+#define MLXBF_GIGE_VERSION_BF3 0x1
#define MLXBF_GIGE_STATUS 0x0010
#define MLXBF_GIGE_STATUS_READY BIT(0)
#define MLXBF_GIGE_INT_STATUS 0x0028
@@ -77,4 +80,23 @@
*/
#define MLXBF_GIGE_MMIO_REG_SZ (MLXBF_GIGE_MAC_CFG + 8)
+#define MLXBF_GIGE_PLU_TX_REG0 0x80
+#define MLXBF_GIGE_PLU_TX_IPG_SIZE_MASK GENMASK(11, 0)
+#define MLXBF_GIGE_PLU_TX_SGMII_MODE_MASK GENMASK(15, 14)
+
+#define MLXBF_GIGE_PLU_RX_REG0 0x10
+#define MLXBF_GIGE_PLU_RX_SGMII_MODE_MASK GENMASK(25, 24)
+
+#define MLXBF_GIGE_1G_SGMII_MODE 0x0
+#define MLXBF_GIGE_10M_SGMII_MODE 0x1
+#define MLXBF_GIGE_100M_SGMII_MODE 0x2
+
+/* ipg_size default value for 1G is fixed by HW to 11 + End = 12.
+ * So for 100M it is 12 * 10 - 1 = 119
+ * For 10M, it is 12 * 100 - 1 = 1199
+ */
+#define MLXBF_GIGE_1G_IPG_SIZE 11
+#define MLXBF_GIGE_100M_IPG_SIZE 119
+#define MLXBF_GIGE_10M_IPG_SIZE 1199
+
#endif /* !defined(__MLXBF_GIGE_REGS_H__) */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index a0a06e2eff82..22db0bb15c45 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -78,6 +78,7 @@ struct mlxsw_core {
spinlock_t trans_list_lock; /* protects trans_list writes */
bool use_emad;
bool enable_string_tlv;
+ bool enable_latency_tlv;
} emad;
struct {
u16 *mapping; /* lag_id+port_index to local_port mapping */
@@ -378,6 +379,22 @@ MLXSW_ITEM32(emad, string_tlv, len, 0x00, 16, 11);
MLXSW_ITEM_BUF(emad, string_tlv, string, 0x04,
MLXSW_EMAD_STRING_TLV_STRING_LEN);
+/* emad_latency_tlv_type
+ * Type of the TLV.
+ * Must be set to 0x4 (latency TLV).
+ */
+MLXSW_ITEM32(emad, latency_tlv, type, 0x00, 27, 5);
+
+/* emad_latency_tlv_len
+ * Length of the latency TLV in u32.
+ */
+MLXSW_ITEM32(emad, latency_tlv, len, 0x00, 16, 11);
+
+/* emad_latency_tlv_latency_time
+ * EMAD latency time in units of uSec.
+ */
+MLXSW_ITEM32(emad, latency_tlv, latency_time, 0x04, 0, 32);
+
/* emad_reg_tlv_type
* Type of the TLV.
* Must be set to 0x3 (register TLV).
@@ -461,6 +478,12 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
}
+static void mlxsw_emad_pack_latency_tlv(char *latency_tlv)
+{
+ mlxsw_emad_latency_tlv_type_set(latency_tlv, MLXSW_EMAD_TLV_TYPE_LATENCY);
+ mlxsw_emad_latency_tlv_len_set(latency_tlv, MLXSW_EMAD_LATENCY_TLV_LEN);
+}
+
static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
{
char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
@@ -476,11 +499,11 @@ static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
return 0;
}
-static void mlxsw_emad_construct(struct sk_buff *skb,
+static void mlxsw_emad_construct(const struct mlxsw_core *mlxsw_core,
+ struct sk_buff *skb,
const struct mlxsw_reg_info *reg,
char *payload,
- enum mlxsw_core_reg_access_type type,
- u64 tid, bool enable_string_tlv)
+ enum mlxsw_core_reg_access_type type, u64 tid)
{
char *buf;
@@ -490,7 +513,12 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
buf = skb_push(skb, reg->len + sizeof(u32));
mlxsw_emad_pack_reg_tlv(buf, reg, payload);
- if (enable_string_tlv) {
+ if (mlxsw_core->emad.enable_latency_tlv) {
+ buf = skb_push(skb, MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32));
+ mlxsw_emad_pack_latency_tlv(buf);
+ }
+
+ if (mlxsw_core->emad.enable_string_tlv) {
buf = skb_push(skb, MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32));
mlxsw_emad_pack_string_tlv(buf);
}
@@ -504,6 +532,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
struct mlxsw_emad_tlv_offsets {
u16 op_tlv;
u16 string_tlv;
+ u16 latency_tlv;
u16 reg_tlv;
};
@@ -514,6 +543,13 @@ static bool mlxsw_emad_tlv_is_string_tlv(const char *tlv)
return tlv_type == MLXSW_EMAD_TLV_TYPE_STRING;
}
+static bool mlxsw_emad_tlv_is_latency_tlv(const char *tlv)
+{
+ u8 tlv_type = mlxsw_emad_latency_tlv_type_get(tlv);
+
+ return tlv_type == MLXSW_EMAD_TLV_TYPE_LATENCY;
+}
+
static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
{
struct mlxsw_emad_tlv_offsets *offsets =
@@ -521,6 +557,8 @@ static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
offsets->op_tlv = MLXSW_EMAD_ETH_HDR_LEN;
offsets->string_tlv = 0;
+ offsets->latency_tlv = 0;
+
offsets->reg_tlv = MLXSW_EMAD_ETH_HDR_LEN +
MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
@@ -529,6 +567,11 @@ static void mlxsw_emad_tlv_parse(struct sk_buff *skb)
offsets->string_tlv = offsets->reg_tlv;
offsets->reg_tlv += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
}
+
+ if (mlxsw_emad_tlv_is_latency_tlv(skb->data + offsets->reg_tlv)) {
+ offsets->latency_tlv = offsets->reg_tlv;
+ offsets->reg_tlv += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32);
+ }
}
static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
@@ -794,6 +837,32 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
EMAD, DISCARD);
+static int mlxsw_emad_tlv_enable(struct mlxsw_core *mlxsw_core)
+{
+ char mgir_pl[MLXSW_REG_MGIR_LEN];
+ bool string_tlv, latency_tlv;
+ int err;
+
+ mlxsw_reg_mgir_pack(mgir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgir), mgir_pl);
+ if (err)
+ return err;
+
+ string_tlv = mlxsw_reg_mgir_fw_info_string_tlv_get(mgir_pl);
+ mlxsw_core->emad.enable_string_tlv = string_tlv;
+
+ latency_tlv = mlxsw_reg_mgir_fw_info_latency_tlv_get(mgir_pl);
+ mlxsw_core->emad.enable_latency_tlv = latency_tlv;
+
+ return 0;
+}
+
+static void mlxsw_emad_tlv_disable(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core->emad.enable_latency_tlv = false;
+ mlxsw_core->emad.enable_string_tlv = false;
+}
+
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{
struct workqueue_struct *emad_wq;
@@ -824,10 +893,17 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (err)
goto err_trap_register;
+ err = mlxsw_emad_tlv_enable(mlxsw_core);
+ if (err)
+ goto err_emad_tlv_enable;
+
mlxsw_core->emad.use_emad = true;
return 0;
+err_emad_tlv_enable:
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
+ mlxsw_core);
err_trap_register:
destroy_workqueue(mlxsw_core->emad_wq);
return err;
@@ -840,13 +916,14 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
return;
mlxsw_core->emad.use_emad = false;
+ mlxsw_emad_tlv_disable(mlxsw_core);
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
destroy_workqueue(mlxsw_core->emad_wq);
}
static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
- u16 reg_len, bool enable_string_tlv)
+ u16 reg_len)
{
struct sk_buff *skb;
u16 emad_len;
@@ -854,8 +931,10 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
(MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
sizeof(u32) + mlxsw_core->driver->txhdr_len);
- if (enable_string_tlv)
+ if (mlxsw_core->emad.enable_string_tlv)
emad_len += MLXSW_EMAD_STRING_TLV_LEN * sizeof(u32);
+ if (mlxsw_core->emad.enable_latency_tlv)
+ emad_len += MLXSW_EMAD_LATENCY_TLV_LEN * sizeof(u32);
if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
return NULL;
@@ -877,7 +956,6 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
mlxsw_reg_trans_cb_t *cb,
unsigned long cb_priv, u64 tid)
{
- bool enable_string_tlv;
struct sk_buff *skb;
int err;
@@ -885,12 +963,7 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
tid, reg->id, mlxsw_reg_id_str(reg->id),
mlxsw_core_reg_access_type_str(type));
- /* Since this can be changed during emad_reg_access, read it once and
- * use the value all the way.
- */
- enable_string_tlv = mlxsw_core->emad.enable_string_tlv;
-
- skb = mlxsw_emad_alloc(mlxsw_core, reg->len, enable_string_tlv);
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
if (!skb)
return -ENOMEM;
@@ -907,8 +980,7 @@ static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
trans->reg = reg;
trans->type = type;
- mlxsw_emad_construct(skb, reg, payload, type, trans->tid,
- enable_string_tlv);
+ mlxsw_emad_construct(mlxsw_core, skb, reg, payload, type, trans->tid);
mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
@@ -1171,9 +1243,9 @@ static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
return 0;
/* Don't check if devlink 'fw_load_policy' param is 'flash' */
- err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
- DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
- &value);
+ err = devl_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ &value);
if (err)
return err;
if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
@@ -1244,20 +1316,22 @@ static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
union devlink_param_value value;
int err;
- err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params,
- ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+ err = devl_params_register(devlink, mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
if (err)
return err;
value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
- devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ value);
return 0;
}
static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
{
- devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
- ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+ devl_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
}
static void *__dl_port(struct devlink_port *devlink_port)
@@ -1676,29 +1750,12 @@ static const struct devlink_ops mlxsw_devlink_ops = {
static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
{
- int err;
-
- err = mlxsw_core_fw_params_register(mlxsw_core);
- if (err)
- return err;
-
- if (mlxsw_core->driver->params_register) {
- err = mlxsw_core->driver->params_register(mlxsw_core);
- if (err)
- goto err_params_register;
- }
- return 0;
-
-err_params_register:
- mlxsw_core_fw_params_unregister(mlxsw_core);
- return err;
+ return mlxsw_core_fw_params_register(mlxsw_core);
}
static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
{
mlxsw_core_fw_params_unregister(mlxsw_core);
- if (mlxsw_core->driver->params_register)
- mlxsw_core->driver->params_unregister(mlxsw_core);
}
struct mlxsw_core_health_event {
@@ -2051,8 +2108,8 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
- fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
- 0, mlxsw_core);
+ fw_fatal = devl_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
+ 0, mlxsw_core);
if (IS_ERR(fw_fatal)) {
dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
return PTR_ERR(fw_fatal);
@@ -2072,7 +2129,7 @@ static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
err_fw_fatal_config:
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
err_trap_register:
- devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+ devl_health_reporter_destroy(mlxsw_core->health.fw_fatal);
return err;
}
@@ -2085,7 +2142,7 @@ static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
/* Make sure there is no more event work scheduled */
mlxsw_core_flush_owq();
- devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+ devl_health_reporter_destroy(mlxsw_core->health.fw_fatal);
}
static void mlxsw_core_irq_event_handler_init(struct mlxsw_core *mlxsw_core)
@@ -2127,6 +2184,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_devlink_alloc;
}
devl_lock(devlink);
+ devl_register(devlink);
}
mlxsw_core = devlink_priv(devlink);
@@ -2210,11 +2268,8 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_driver_init;
}
- if (!reload) {
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ if (!reload)
devl_unlock(devlink);
- devlink_register(devlink);
- }
return 0;
err_driver_init:
@@ -2246,6 +2301,7 @@ err_register_resources:
err_bus_init:
mlxsw_core_irq_event_handler_fini(mlxsw_core);
if (!reload) {
+ devl_unregister(devlink);
devl_unlock(devlink);
devlink_free(devlink);
}
@@ -2284,10 +2340,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- if (!reload) {
- devlink_unregister(devlink);
+ if (!reload)
devl_lock(devlink);
- }
if (devlink_is_reload_failed(devlink)) {
if (!reload)
@@ -2316,6 +2370,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
mlxsw_core_irq_event_handler_fini(mlxsw_core);
if (!reload) {
+ devl_unregister(devlink);
devl_unlock(devlink);
devlink_free(devlink);
}
@@ -2325,6 +2380,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
devl_resources_unregister(devlink);
+ devl_unregister(devlink);
devl_unlock(devlink);
devlink_free(devlink);
}
@@ -3377,12 +3433,6 @@ bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_sdq_supports_cqe_v2);
-void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core)
-{
- mlxsw_core->emad.enable_string_tlv = true;
-}
-EXPORT_SYMBOL(mlxsw_core_emad_string_tlv_enable);
-
static int __init mlxsw_core_module_init(void)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index e0a6fcbbcb19..e5474d3e34db 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -421,8 +421,6 @@ struct mlxsw_driver {
const struct mlxsw_config_profile *profile,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
- int (*params_register)(struct mlxsw_core *mlxsw_core);
- void (*params_unregister)(struct mlxsw_core *mlxsw_core);
/* Notify a driver that a timestamped packet was transmitted. Driver
* is responsible for freeing the passed-in SKB.
@@ -448,8 +446,6 @@ u32 mlxsw_core_read_utc_nsec(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_sdq_supports_cqe_v2(struct mlxsw_core *mlxsw_core);
-void mlxsw_core_emad_string_tlv_enable(struct mlxsw_core *mlxsw_core);
-
bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
enum mlxsw_res_id res_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index 83d2dc91ba2c..025e0db983fe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -1259,9 +1259,9 @@ static int mlxsw_linecard_init(struct mlxsw_core *mlxsw_core,
linecard->linecards = linecards;
mutex_init(&linecard->lock);
- devlink_linecard = devlink_linecard_create(priv_to_devlink(mlxsw_core),
- slot_index, &mlxsw_linecard_ops,
- linecard);
+ devlink_linecard = devl_linecard_create(priv_to_devlink(mlxsw_core),
+ slot_index, &mlxsw_linecard_ops,
+ linecard);
if (IS_ERR(devlink_linecard))
return PTR_ERR(devlink_linecard);
@@ -1285,7 +1285,7 @@ static void mlxsw_linecard_fini(struct mlxsw_core *mlxsw_core,
if (linecard->active)
mlxsw_linecard_active_clear(linecard);
mlxsw_linecard_bdev_del(linecard);
- devlink_linecard_destroy(linecard->devlink_linecard);
+ devl_linecard_destroy(linecard->devlink_linecard);
mutex_destroy(&linecard->lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 987fe5c9d5a3..c5240d38c9db 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -36,33 +36,39 @@ enum mlxsw_thermal_trips {
MLXSW_THERMAL_TEMP_TRIP_HOT,
};
-struct mlxsw_thermal_trip {
- int type;
- int temp;
- int hyst;
+struct mlxsw_cooling_states {
int min_state;
int max_state;
};
-static const struct mlxsw_thermal_trip default_thermal_trips[] = {
+static const struct thermal_trip default_thermal_trips[] = {
{ /* In range - 0-40% PWM */
.type = THERMAL_TRIP_ACTIVE,
- .temp = MLXSW_THERMAL_ASIC_TEMP_NORM,
- .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP,
- .min_state = 0,
- .max_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10,
+ .temperature = MLXSW_THERMAL_ASIC_TEMP_NORM,
+ .hysteresis = MLXSW_THERMAL_HYSTERESIS_TEMP,
},
{
/* In range - 40-100% PWM */
.type = THERMAL_TRIP_ACTIVE,
- .temp = MLXSW_THERMAL_ASIC_TEMP_HIGH,
- .hyst = MLXSW_THERMAL_HYSTERESIS_TEMP,
- .min_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10,
- .max_state = MLXSW_THERMAL_MAX_STATE,
+ .temperature = MLXSW_THERMAL_ASIC_TEMP_HIGH,
+ .hysteresis = MLXSW_THERMAL_HYSTERESIS_TEMP,
},
{ /* Warning */
.type = THERMAL_TRIP_HOT,
- .temp = MLXSW_THERMAL_ASIC_TEMP_HOT,
+ .temperature = MLXSW_THERMAL_ASIC_TEMP_HOT,
+ },
+};
+
+static const struct mlxsw_cooling_states default_cooling_states[] = {
+ {
+ .min_state = 0,
+ .max_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10,
+ },
+ {
+ .min_state = (4 * MLXSW_THERMAL_MAX_STATE) / 10,
+ .max_state = MLXSW_THERMAL_MAX_STATE,
+ },
+ {
.min_state = MLXSW_THERMAL_MAX_STATE,
.max_state = MLXSW_THERMAL_MAX_STATE,
},
@@ -78,7 +84,8 @@ struct mlxsw_thermal;
struct mlxsw_thermal_module {
struct mlxsw_thermal *parent;
struct thermal_zone_device *tzdev;
- struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+ struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+ struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
int module; /* Module or gearbox number */
u8 slot_index;
};
@@ -99,7 +106,8 @@ struct mlxsw_thermal {
int polling_delay;
struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1];
- struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+ struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
+ struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_thermal_area line_cards[];
};
@@ -136,9 +144,9 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
static void
mlxsw_thermal_module_trips_reset(struct mlxsw_thermal_module *tz)
{
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = 0;
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = 0;
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = 0;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = 0;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temperature = 0;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temperature = 0;
}
static int
@@ -180,12 +188,12 @@ mlxsw_thermal_module_trips_update(struct device *dev, struct mlxsw_core *core,
* by subtracting double hysteresis value.
*/
if (crit_temp >= MLXSW_THERMAL_MODULE_TEMP_SHIFT)
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp -
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = crit_temp -
MLXSW_THERMAL_MODULE_TEMP_SHIFT;
else
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temp = crit_temp;
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temp = crit_temp;
- tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temp = emerg_temp;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_NORM].temperature = crit_temp;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HIGH].temperature = crit_temp;
+ tz->trips[MLXSW_THERMAL_TEMP_TRIP_HOT].temperature = emerg_temp;
return 0;
}
@@ -202,11 +210,11 @@ static int mlxsw_thermal_bind(struct thermal_zone_device *tzdev,
return 0;
for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- const struct mlxsw_thermal_trip *trip = &thermal->trips[i];
+ const struct mlxsw_cooling_states *state = &thermal->cooling_states[i];
err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
- trip->max_state,
- trip->min_state,
+ state->max_state,
+ state->min_state,
THERMAL_WEIGHT_DEFAULT);
if (err < 0) {
dev_err(dev, "Failed to bind cooling device to trip %d\n", i);
@@ -260,61 +268,6 @@ static int mlxsw_thermal_get_temp(struct thermal_zone_device *tzdev,
return 0;
}
-static int mlxsw_thermal_get_trip_type(struct thermal_zone_device *tzdev,
- int trip,
- enum thermal_trip_type *p_type)
-{
- struct mlxsw_thermal *thermal = tzdev->devdata;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- *p_type = thermal->trips[trip].type;
- return 0;
-}
-
-static int mlxsw_thermal_get_trip_temp(struct thermal_zone_device *tzdev,
- int trip, int *p_temp)
-{
- struct mlxsw_thermal *thermal = tzdev->devdata;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- *p_temp = thermal->trips[trip].temp;
- return 0;
-}
-
-static int mlxsw_thermal_set_trip_temp(struct thermal_zone_device *tzdev,
- int trip, int temp)
-{
- struct mlxsw_thermal *thermal = tzdev->devdata;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- thermal->trips[trip].temp = temp;
- return 0;
-}
-
-static int mlxsw_thermal_get_trip_hyst(struct thermal_zone_device *tzdev,
- int trip, int *p_hyst)
-{
- struct mlxsw_thermal *thermal = tzdev->devdata;
-
- *p_hyst = thermal->trips[trip].hyst;
- return 0;
-}
-
-static int mlxsw_thermal_set_trip_hyst(struct thermal_zone_device *tzdev,
- int trip, int hyst)
-{
- struct mlxsw_thermal *thermal = tzdev->devdata;
-
- thermal->trips[trip].hyst = hyst;
- return 0;
-}
-
static struct thermal_zone_params mlxsw_thermal_params = {
.no_hwmon = true,
};
@@ -323,11 +276,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_ops = {
.bind = mlxsw_thermal_bind,
.unbind = mlxsw_thermal_unbind,
.get_temp = mlxsw_thermal_get_temp,
- .get_trip_type = mlxsw_thermal_get_trip_type,
- .get_trip_temp = mlxsw_thermal_get_trip_temp,
- .set_trip_temp = mlxsw_thermal_set_trip_temp,
- .get_trip_hyst = mlxsw_thermal_get_trip_hyst,
- .set_trip_hyst = mlxsw_thermal_set_trip_hyst,
};
static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
@@ -342,11 +290,11 @@ static int mlxsw_thermal_module_bind(struct thermal_zone_device *tzdev,
return 0;
for (i = 0; i < MLXSW_THERMAL_NUM_TRIPS; i++) {
- const struct mlxsw_thermal_trip *trip = &tz->trips[i];
+ const struct mlxsw_cooling_states *state = &tz->cooling_states[i];
err = thermal_zone_bind_cooling_device(tzdev, i, cdev,
- trip->max_state,
- trip->min_state,
+ state->max_state,
+ state->min_state,
THERMAL_WEIGHT_DEFAULT);
if (err < 0)
goto err_thermal_zone_bind_cooling_device;
@@ -434,74 +382,10 @@ static int mlxsw_thermal_module_temp_get(struct thermal_zone_device *tzdev,
return 0;
}
-static int
-mlxsw_thermal_module_trip_type_get(struct thermal_zone_device *tzdev, int trip,
- enum thermal_trip_type *p_type)
-{
- struct mlxsw_thermal_module *tz = tzdev->devdata;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- *p_type = tz->trips[trip].type;
- return 0;
-}
-
-static int
-mlxsw_thermal_module_trip_temp_get(struct thermal_zone_device *tzdev,
- int trip, int *p_temp)
-{
- struct mlxsw_thermal_module *tz = tzdev->devdata;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- *p_temp = tz->trips[trip].temp;
- return 0;
-}
-
-static int
-mlxsw_thermal_module_trip_temp_set(struct thermal_zone_device *tzdev,
- int trip, int temp)
-{
- struct mlxsw_thermal_module *tz = tzdev->devdata;
-
- if (trip < 0 || trip >= MLXSW_THERMAL_NUM_TRIPS)
- return -EINVAL;
-
- tz->trips[trip].temp = temp;
- return 0;
-}
-
-static int
-mlxsw_thermal_module_trip_hyst_get(struct thermal_zone_device *tzdev, int trip,
- int *p_hyst)
-{
- struct mlxsw_thermal_module *tz = tzdev->devdata;
-
- *p_hyst = tz->trips[trip].hyst;
- return 0;
-}
-
-static int
-mlxsw_thermal_module_trip_hyst_set(struct thermal_zone_device *tzdev, int trip,
- int hyst)
-{
- struct mlxsw_thermal_module *tz = tzdev->devdata;
-
- tz->trips[trip].hyst = hyst;
- return 0;
-}
-
static struct thermal_zone_device_ops mlxsw_thermal_module_ops = {
.bind = mlxsw_thermal_module_bind,
.unbind = mlxsw_thermal_module_unbind,
.get_temp = mlxsw_thermal_module_temp_get,
- .get_trip_type = mlxsw_thermal_module_trip_type_get,
- .get_trip_temp = mlxsw_thermal_module_trip_temp_get,
- .set_trip_temp = mlxsw_thermal_module_trip_temp_set,
- .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
- .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
};
static int mlxsw_thermal_gearbox_temp_get(struct thermal_zone_device *tzdev,
@@ -531,11 +415,6 @@ static struct thermal_zone_device_ops mlxsw_thermal_gearbox_ops = {
.bind = mlxsw_thermal_module_bind,
.unbind = mlxsw_thermal_module_unbind,
.get_temp = mlxsw_thermal_gearbox_temp_get,
- .get_trip_type = mlxsw_thermal_module_trip_type_get,
- .get_trip_temp = mlxsw_thermal_module_trip_temp_get,
- .set_trip_temp = mlxsw_thermal_module_trip_temp_set,
- .get_trip_hyst = mlxsw_thermal_module_trip_hyst_get,
- .set_trip_hyst = mlxsw_thermal_module_trip_hyst_set,
};
static int mlxsw_thermal_get_max_state(struct thermal_cooling_device *cdev,
@@ -617,7 +496,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
else
snprintf(tz_name, sizeof(tz_name), "mlxsw-module%d",
module_tz->module + 1);
- module_tz->tzdev = thermal_zone_device_register(tz_name,
+ module_tz->tzdev = thermal_zone_device_register_with_trips(tz_name,
+ module_tz->trips,
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
module_tz,
@@ -661,6 +541,8 @@ mlxsw_thermal_module_init(struct device *dev, struct mlxsw_core *core,
module_tz->parent = thermal;
memcpy(module_tz->trips, default_thermal_trips,
sizeof(thermal->trips));
+ memcpy(module_tz->cooling_states, default_cooling_states,
+ sizeof(thermal->cooling_states));
/* Initialize all trip point. */
mlxsw_thermal_module_trips_reset(module_tz);
/* Read module temperature and thresholds. */
@@ -756,7 +638,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
else
snprintf(tz_name, sizeof(tz_name), "mlxsw-gearbox%d",
gearbox_tz->module + 1);
- gearbox_tz->tzdev = thermal_zone_device_register(tz_name,
+ gearbox_tz->tzdev = thermal_zone_device_register_with_trips(tz_name,
+ gearbox_tz->trips,
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
gearbox_tz,
@@ -813,6 +696,8 @@ mlxsw_thermal_gearboxes_init(struct device *dev, struct mlxsw_core *core,
gearbox_tz = &area->tz_gearbox_arr[i];
memcpy(gearbox_tz->trips, default_thermal_trips,
sizeof(thermal->trips));
+ memcpy(gearbox_tz->cooling_states, default_cooling_states,
+ sizeof(thermal->cooling_states));
gearbox_tz->module = i;
gearbox_tz->parent = thermal;
gearbox_tz->slot_index = area->slot_index;
@@ -928,6 +813,7 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
thermal->core = core;
thermal->bus_info = bus_info;
memcpy(thermal->trips, default_thermal_trips, sizeof(thermal->trips));
+ memcpy(thermal->cooling_states, default_cooling_states, sizeof(thermal->cooling_states));
thermal->line_cards[0].slot_index = 0;
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfcr), mfcr_pl);
@@ -981,7 +867,8 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
MLXSW_THERMAL_SLOW_POLL_INT :
MLXSW_THERMAL_POLL_INT;
- thermal->tzdev = thermal_zone_device_register("mlxsw",
+ thermal->tzdev = thermal_zone_device_register_with_trips("mlxsw",
+ thermal->trips,
MLXSW_THERMAL_NUM_TRIPS,
MLXSW_THERMAL_TRIP_MASK,
thermal,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/emad.h b/drivers/net/ethernet/mellanox/mlxsw/emad.h
index acfbbec52424..c51a61aa19b7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/emad.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/emad.h
@@ -21,6 +21,7 @@ enum {
MLXSW_EMAD_TLV_TYPE_OP,
MLXSW_EMAD_TLV_TYPE_STRING,
MLXSW_EMAD_TLV_TYPE_REG,
+ MLXSW_EMAD_TLV_TYPE_LATENCY,
};
/* OP TLV */
@@ -90,6 +91,9 @@ enum {
/* STRING TLV */
#define MLXSW_EMAD_STRING_TLV_LEN 33 /* Length in u32 */
+/* LATENCY TLV */
+#define MLXSW_EMAD_LATENCY_TLV_LEN 7 /* Length in u32 */
+
/* END TLV */
#define MLXSW_EMAD_END_TLV_LEN 1 /* Length in u32 */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index f2d6f8654e04..8165bf31a99a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -10009,6 +10009,18 @@ MLXSW_REG_DEFINE(mgir, MLXSW_REG_MGIR_ID, MLXSW_REG_MGIR_LEN);
*/
MLXSW_ITEM32(reg, mgir, hw_info_device_hw_revision, 0x0, 16, 16);
+/* reg_mgir_fw_info_latency_tlv
+ * When set, latency-TLV is supported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_latency_tlv, 0x20, 29, 1);
+
+/* reg_mgir_fw_info_string_tlv
+ * When set, string-TLV is supported.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mgir, fw_info_string_tlv, 0x20, 28, 1);
+
#define MLXSW_REG_MGIR_FW_INFO_PSID_SIZE 16
/* reg_mgir_fw_info_psid
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f5b2d965d476..a8f94b7544ee 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -3092,7 +3092,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->bus_info = mlxsw_bus_info;
mlxsw_sp_parsing_init(mlxsw_sp);
- mlxsw_core_emad_string_tlv_enable(mlxsw_core);
err = mlxsw_sp_base_mac_get(mlxsw_sp);
if (err) {
@@ -3862,62 +3861,6 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
return 0;
}
-static int
-mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-
- ctx->val.vu32 = mlxsw_sp_acl_region_rehash_intrvl_get(mlxsw_sp);
- return 0;
-}
-
-static int
-mlxsw_sp_params_acl_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
-
- return mlxsw_sp_acl_region_rehash_intrvl_set(mlxsw_sp, ctx->val.vu32);
-}
-
-static const struct devlink_param mlxsw_sp2_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
- "acl_region_rehash_interval",
- DEVLINK_PARAM_TYPE_U32,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlxsw_sp_params_acl_region_rehash_intrvl_get,
- mlxsw_sp_params_acl_region_rehash_intrvl_set,
- NULL),
-};
-
-static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
-{
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- union devlink_param_value value;
- int err;
-
- err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
- ARRAY_SIZE(mlxsw_sp2_devlink_params));
- if (err)
- return err;
-
- value.vu32 = 0;
- devlink_param_driverinit_value_set(devlink,
- MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
- value);
- return 0;
-}
-
-static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
-{
- devlink_params_unregister(priv_to_devlink(mlxsw_core),
- mlxsw_sp2_devlink_params,
- ARRAY_SIZE(mlxsw_sp2_devlink_params));
-}
-
static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
struct sk_buff *skb, u16 local_port)
{
@@ -3995,8 +3938,6 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
- .params_register = mlxsw_sp2_params_register,
- .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
@@ -4034,8 +3975,6 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
- .params_register = mlxsw_sp2_params_register,
- .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
@@ -4071,8 +4010,6 @@ static struct mlxsw_driver mlxsw_sp4_driver = {
.trap_policer_counter_get = mlxsw_sp_trap_policer_counter_get,
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp2_resources_register,
- .params_register = mlxsw_sp2_params_register,
- .params_unregister = mlxsw_sp2_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp4_config_profile,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index bbc73324451d..4c22f8004514 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -973,6 +973,7 @@ enum mlxsw_sp_acl_profile {
};
struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl);
+struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl);
int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_flow_block *block,
@@ -1096,8 +1097,6 @@ mlxsw_sp_acl_act_cookie_lookup(struct mlxsw_sp *mlxsw_sp, u32 cookie_index)
int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp);
-u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp);
-int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val);
struct mlxsw_sp_acl_mangle_action;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 6c5af018546f..0423ac262d89 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -40,6 +40,11 @@ struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
return acl->afk;
}
+struct mlxsw_sp_acl_tcam *mlxsw_sp_acl_to_tcam(struct mlxsw_sp_acl *acl)
+{
+ return &acl->tcam;
+}
+
struct mlxsw_sp_acl_ruleset_ht_key {
struct mlxsw_sp_flow_block *block;
u32 chain_index;
@@ -1099,22 +1104,6 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
kfree(acl);
}
-u32 mlxsw_sp_acl_region_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp)
-{
- struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
-
- return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(mlxsw_sp,
- &acl->tcam);
-}
-
-int mlxsw_sp_acl_region_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp, u32 val)
-{
- struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
-
- return mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(mlxsw_sp,
- &acl->tcam, val);
-}
-
struct mlxsw_sp_acl_rulei_ops mlxsw_sp1_acl_rulei_ops = {
.act_mangle_field = mlxsw_sp1_acl_rulei_act_mangle_field,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 3b9ba8fa247a..d50786b0a6ce 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -9,6 +9,7 @@
#include <linux/rhashtable.h>
#include <linux/netdevice.h>
#include <linux/mutex.h>
+#include <net/devlink.h>
#include <trace/events/mlxsw.h>
#include "reg.h"
@@ -29,67 +30,6 @@ size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
-int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- u64 max_tcam_regions;
- u64 max_regions;
- u64 max_groups;
- int err;
-
- mutex_init(&tcam->lock);
- tcam->vregion_rehash_intrvl =
- MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
- INIT_LIST_HEAD(&tcam->vregion_list);
-
- max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- ACL_MAX_TCAM_REGIONS);
- max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
-
- /* Use 1:1 mapping between ACL region and TCAM region */
- if (max_tcam_regions < max_regions)
- max_regions = max_tcam_regions;
-
- tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
- if (!tcam->used_regions)
- return -ENOMEM;
- tcam->max_regions = max_regions;
-
- max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
- tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
- if (!tcam->used_groups) {
- err = -ENOMEM;
- goto err_alloc_used_groups;
- }
- tcam->max_groups = max_groups;
- tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- ACL_MAX_GROUP_SIZE);
-
- err = ops->init(mlxsw_sp, tcam->priv, tcam);
- if (err)
- goto err_tcam_init;
-
- return 0;
-
-err_tcam_init:
- bitmap_free(tcam->used_groups);
-err_alloc_used_groups:
- bitmap_free(tcam->used_regions);
- return err;
-}
-
-void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
-
- mutex_destroy(&tcam->lock);
- ops->fini(mlxsw_sp, tcam->priv);
- bitmap_free(tcam->used_groups);
- bitmap_free(tcam->used_regions);
-}
-
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 *priority, bool fillup_priority)
@@ -893,41 +833,6 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
kfree(vregion);
}
-u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- u32 vregion_rehash_intrvl;
-
- if (WARN_ON(!ops->region_rehash_hints_get))
- return 0;
- vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
- return vregion_rehash_intrvl;
-}
-
-int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam,
- u32 val)
-{
- const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- struct mlxsw_sp_acl_tcam_vregion *vregion;
-
- if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
- return -EINVAL;
- if (WARN_ON(!ops->region_rehash_hints_get))
- return -EOPNOTSUPP;
- tcam->vregion_rehash_intrvl = val;
- mutex_lock(&tcam->lock);
- list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
- if (val)
- mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
- else
- cancel_delayed_work_sync(&vregion->rehash.dw);
- }
- mutex_unlock(&tcam->lock);
- return 0;
-}
-
static struct mlxsw_sp_acl_tcam_vregion *
mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vgroup *vgroup,
@@ -1542,6 +1447,153 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
}
+static int
+mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp_acl_tcam *tcam;
+ struct mlxsw_sp *mlxsw_sp;
+
+ mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
+ ctx->val.vu32 = tcam->vregion_rehash_intrvl;
+
+ return 0;
+}
+
+static int
+mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp_acl_tcam_vregion *vregion;
+ struct mlxsw_sp_acl_tcam *tcam;
+ struct mlxsw_sp *mlxsw_sp;
+ u32 val = ctx->val.vu32;
+
+ if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
+ return -EINVAL;
+
+ mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
+ tcam->vregion_rehash_intrvl = val;
+ mutex_lock(&tcam->lock);
+ list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
+ if (val)
+ mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
+ else
+ cancel_delayed_work_sync(&vregion->rehash.dw);
+ }
+ mutex_unlock(&tcam->lock);
+ return 0;
+}
+
+static const struct devlink_param mlxsw_sp_acl_tcam_rehash_params[] = {
+ DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
+ "acl_region_rehash_interval",
+ DEVLINK_PARAM_TYPE_U32,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlxsw_sp_acl_tcam_region_rehash_intrvl_get,
+ mlxsw_sp_acl_tcam_region_rehash_intrvl_set,
+ NULL),
+};
+
+static int mlxsw_sp_acl_tcam_rehash_params_register(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
+ return 0;
+
+ return devl_params_register(devlink, mlxsw_sp_acl_tcam_rehash_params,
+ ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
+}
+
+static void
+mlxsw_sp_acl_tcam_rehash_params_unregister(struct mlxsw_sp *mlxsw_sp)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+
+ if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
+ return;
+
+ devl_params_unregister(devlink, mlxsw_sp_acl_tcam_rehash_params,
+ ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
+}
+
+int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+ u64 max_tcam_regions;
+ u64 max_regions;
+ u64 max_groups;
+ int err;
+
+ mutex_init(&tcam->lock);
+ tcam->vregion_rehash_intrvl =
+ MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
+ INIT_LIST_HEAD(&tcam->vregion_list);
+
+ err = mlxsw_sp_acl_tcam_rehash_params_register(mlxsw_sp);
+ if (err)
+ goto err_rehash_params_register;
+
+ max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_TCAM_REGIONS);
+ max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
+
+ /* Use 1:1 mapping between ACL region and TCAM region */
+ if (max_tcam_regions < max_regions)
+ max_regions = max_tcam_regions;
+
+ tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
+ if (!tcam->used_regions) {
+ err = -ENOMEM;
+ goto err_alloc_used_regions;
+ }
+ tcam->max_regions = max_regions;
+
+ max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
+ tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
+ if (!tcam->used_groups) {
+ err = -ENOMEM;
+ goto err_alloc_used_groups;
+ }
+ tcam->max_groups = max_groups;
+ tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ ACL_MAX_GROUP_SIZE);
+
+ err = ops->init(mlxsw_sp, tcam->priv, tcam);
+ if (err)
+ goto err_tcam_init;
+
+ return 0;
+
+err_tcam_init:
+ bitmap_free(tcam->used_groups);
+err_alloc_used_groups:
+ bitmap_free(tcam->used_regions);
+err_alloc_used_regions:
+ mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
+err_rehash_params_register:
+ mutex_destroy(&tcam->lock);
+ return err;
+}
+
+void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_acl_tcam *tcam)
+{
+ const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
+
+ ops->fini(mlxsw_sp, tcam->priv);
+ bitmap_free(tcam->used_groups);
+ bitmap_free(tcam->used_regions);
+ mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
+ mutex_destroy(&tcam->lock);
+}
+
static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
MLXSW_AFK_ELEMENT_DMAC_32_47,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index edbbc89e7a71..462bf448497d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -29,11 +29,6 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam);
void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam);
-u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam);
-int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_acl_tcam *tcam,
- u32 val);
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei,
u32 *priority, bool fillup_priority);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index e91fb205e0b4..594cdcb90b3d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -103,7 +103,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
}
ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
- act->cookie, extack);
+ act->user_cookie, extack);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
return err;
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 534840f9a7ca..7e0871b631e4 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -792,7 +792,7 @@ static int lan743x_mac_mii_wait_till_not_busy(struct lan743x_adapter *adapter)
!(data & MAC_MII_ACC_MII_BUSY_), 0, 1000000);
}
-static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
+static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index)
{
struct lan743x_adapter *adapter = bus->priv;
u32 val, mii_access;
@@ -814,8 +814,8 @@ static int lan743x_mdiobus_read(struct mii_bus *bus, int phy_id, int index)
return (int)(val & 0xFFFF);
}
-static int lan743x_mdiobus_write(struct mii_bus *bus,
- int phy_id, int index, u16 regval)
+static int lan743x_mdiobus_write_c22(struct mii_bus *bus,
+ int phy_id, int index, u16 regval)
{
struct lan743x_adapter *adapter = bus->priv;
u32 val, mii_access;
@@ -835,12 +835,10 @@ static int lan743x_mdiobus_write(struct mii_bus *bus,
return ret;
}
-static u32 lan743x_mac_mmd_access(int id, int index, int op)
+static u32 lan743x_mac_mmd_access(int id, int dev_addr, int op)
{
- u16 dev_addr;
u32 ret;
- dev_addr = (index >> 16) & 0x1f;
ret = (id << MAC_MII_ACC_PHY_ADDR_SHIFT_) &
MAC_MII_ACC_PHY_ADDR_MASK_;
ret |= (dev_addr << MAC_MII_ACC_MIIMMD_SHIFT_) &
@@ -858,7 +856,8 @@ static u32 lan743x_mac_mmd_access(int id, int index, int op)
return ret;
}
-static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index)
+static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id,
+ int dev_addr, int index)
{
struct lan743x_adapter *adapter = bus->priv;
u32 mmd_access;
@@ -868,32 +867,30 @@ static int lan743x_mdiobus_c45_read(struct mii_bus *bus, int phy_id, int index)
ret = lan743x_mac_mii_wait_till_not_busy(adapter);
if (ret < 0)
return ret;
- if (index & MII_ADDR_C45) {
- /* Load Register Address */
- lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
- mmd_access = lan743x_mac_mmd_access(phy_id, index,
- MMD_ACCESS_ADDRESS);
- lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
- ret = lan743x_mac_mii_wait_till_not_busy(adapter);
- if (ret < 0)
- return ret;
- /* Read Data */
- mmd_access = lan743x_mac_mmd_access(phy_id, index,
- MMD_ACCESS_READ);
- lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
- ret = lan743x_mac_mii_wait_till_not_busy(adapter);
- if (ret < 0)
- return ret;
- ret = lan743x_csr_read(adapter, MAC_MII_DATA);
- return (int)(ret & 0xFFFF);
- }
- ret = lan743x_mdiobus_read(bus, phy_id, index);
- return ret;
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, index);
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* Read Data */
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_READ);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = lan743x_csr_read(adapter, MAC_MII_DATA);
+ return (int)(ret & 0xFFFF);
}
-static int lan743x_mdiobus_c45_write(struct mii_bus *bus,
- int phy_id, int index, u16 regval)
+static int lan743x_mdiobus_write_c45(struct mii_bus *bus, int phy_id,
+ int dev_addr, int index, u16 regval)
{
struct lan743x_adapter *adapter = bus->priv;
u32 mmd_access;
@@ -903,26 +900,23 @@ static int lan743x_mdiobus_c45_write(struct mii_bus *bus,
ret = lan743x_mac_mii_wait_till_not_busy(adapter);
if (ret < 0)
return ret;
- if (index & MII_ADDR_C45) {
- /* Load Register Address */
- lan743x_csr_write(adapter, MAC_MII_DATA, (u32)(index & 0xffff));
- mmd_access = lan743x_mac_mmd_access(phy_id, index,
- MMD_ACCESS_ADDRESS);
- lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
- ret = lan743x_mac_mii_wait_till_not_busy(adapter);
- if (ret < 0)
- return ret;
- /* Write Data */
- lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
- mmd_access = lan743x_mac_mmd_access(phy_id, index,
- MMD_ACCESS_WRITE);
- lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
- ret = lan743x_mac_mii_wait_till_not_busy(adapter);
- } else {
- ret = lan743x_mdiobus_write(bus, phy_id, index, regval);
- }
- return ret;
+ /* Load Register Address */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)index);
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_ADDRESS);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+ ret = lan743x_mac_mii_wait_till_not_busy(adapter);
+ if (ret < 0)
+ return ret;
+
+ /* Write Data */
+ lan743x_csr_write(adapter, MAC_MII_DATA, (u32)regval);
+ mmd_access = lan743x_mac_mmd_access(phy_id, dev_addr,
+ MMD_ACCESS_WRITE);
+ lan743x_csr_write(adapter, MAC_MII_ACC, mmd_access);
+
+ return lan743x_mac_mii_wait_till_not_busy(adapter);
}
static int lan743x_sgmii_wait_till_not_busy(struct lan743x_adapter *adapter)
@@ -1424,14 +1418,6 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
data = lan743x_csr_read(adapter, MAC_CR);
- /* set interface mode */
- if (phy_interface_is_rgmii(phydev))
- /* RGMII */
- data &= ~MAC_CR_MII_EN_;
- else
- /* GMII */
- data |= MAC_CR_MII_EN_;
-
/* set duplex mode */
if (phydev->duplex)
data |= MAC_CR_DPX_;
@@ -1483,10 +1469,33 @@ static void lan743x_phy_close(struct lan743x_adapter *adapter)
netdev->phydev = NULL;
}
+static void lan743x_phy_interface_select(struct lan743x_adapter *adapter)
+{
+ u32 id_rev;
+ u32 data;
+
+ data = lan743x_csr_read(adapter, MAC_CR);
+ id_rev = adapter->csr.id_rev & ID_REV_ID_MASK_;
+
+ if (adapter->is_pci11x1x && adapter->is_sgmii_en)
+ adapter->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ else if (id_rev == ID_REV_ID_LAN7430_)
+ adapter->phy_interface = PHY_INTERFACE_MODE_GMII;
+ else if ((id_rev == ID_REV_ID_LAN7431_) && (data & MAC_CR_MII_EN_))
+ adapter->phy_interface = PHY_INTERFACE_MODE_MII;
+ else
+ adapter->phy_interface = PHY_INTERFACE_MODE_RGMII;
+}
+
static int lan743x_phy_open(struct lan743x_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct lan743x_phy *phy = &adapter->phy;
+ struct fixed_phy_status fphy_status = {
+ .link = 1,
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ };
struct phy_device *phydev;
int ret = -EIO;
@@ -1497,17 +1506,25 @@ static int lan743x_phy_open(struct lan743x_adapter *adapter)
if (!phydev) {
/* try internal phy */
phydev = phy_find_first(adapter->mdiobus);
- if (!phydev)
- goto return_error;
+ if (!phydev) {
+ if ((adapter->csr.id_rev & ID_REV_ID_MASK_) ==
+ ID_REV_ID_LAN7431_) {
+ phydev = fixed_phy_register(PHY_POLL,
+ &fphy_status, NULL);
+ if (IS_ERR(phydev)) {
+ netdev_err(netdev, "No PHY/fixed_PHY found\n");
+ return -EIO;
+ }
+ } else {
+ goto return_error;
+ }
+ }
- if (adapter->is_pci11x1x)
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- PHY_INTERFACE_MODE_RGMII);
- else
- ret = phy_connect_direct(netdev, phydev,
- lan743x_phy_link_status_change,
- PHY_INTERFACE_MODE_GMII);
+ lan743x_phy_interface_select(adapter);
+
+ ret = phy_connect_direct(netdev, phydev,
+ lan743x_phy_link_status_change,
+ adapter->phy_interface);
if (ret)
goto return_error;
}
@@ -3285,9 +3302,10 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
lan743x_csr_write(adapter, SGMII_CTL, sgmii_ctl);
netif_dbg(adapter, drv, adapter->netdev,
"SGMII operation\n");
- adapter->mdiobus->probe_capabilities = MDIOBUS_C22_C45;
- adapter->mdiobus->read = lan743x_mdiobus_c45_read;
- adapter->mdiobus->write = lan743x_mdiobus_c45_write;
+ adapter->mdiobus->read = lan743x_mdiobus_read_c22;
+ adapter->mdiobus->write = lan743x_mdiobus_write_c22;
+ adapter->mdiobus->read_c45 = lan743x_mdiobus_read_c45;
+ adapter->mdiobus->write_c45 = lan743x_mdiobus_write_c45;
adapter->mdiobus->name = "lan743x-mdiobus-c45";
netif_dbg(adapter, drv, adapter->netdev,
"lan743x-mdiobus-c45\n");
@@ -3299,16 +3317,15 @@ static int lan743x_mdiobus_init(struct lan743x_adapter *adapter)
netif_dbg(adapter, drv, adapter->netdev,
"RGMII operation\n");
// Only C22 support when RGMII I/F
- adapter->mdiobus->probe_capabilities = MDIOBUS_C22;
- adapter->mdiobus->read = lan743x_mdiobus_read;
- adapter->mdiobus->write = lan743x_mdiobus_write;
+ adapter->mdiobus->read = lan743x_mdiobus_read_c22;
+ adapter->mdiobus->write = lan743x_mdiobus_write_c22;
adapter->mdiobus->name = "lan743x-mdiobus";
netif_dbg(adapter, drv, adapter->netdev,
"lan743x-mdiobus\n");
}
} else {
- adapter->mdiobus->read = lan743x_mdiobus_read;
- adapter->mdiobus->write = lan743x_mdiobus_write;
+ adapter->mdiobus->read = lan743x_mdiobus_read_c22;
+ adapter->mdiobus->write = lan743x_mdiobus_write_c22;
adapter->mdiobus->name = "lan743x-mdiobus";
netif_dbg(adapter, drv, adapter->netdev, "lan743x-mdiobus\n");
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 8438c3dbcf36..52609fc13ad9 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -1042,6 +1042,7 @@ struct lan743x_adapter {
#define LAN743X_ADAPTER_FLAG_OTP BIT(0)
u32 flags;
u32 hw_cfg;
+ phy_interface_t phy_interface;
};
#define LAN743X_COMPONENT_FLAG_RX(channel) BIT(20 + (channel))
diff --git a/drivers/net/ethernet/microchip/lan966x/Makefile b/drivers/net/ethernet/microchip/lan966x/Makefile
index 56afd694f3c7..7b0cda4ffa6b 100644
--- a/drivers/net/ethernet/microchip/lan966x/Makefile
+++ b/drivers/net/ethernet/microchip/lan966x/Makefile
@@ -15,5 +15,7 @@ lan966x-switch-objs := lan966x_main.o lan966x_phylink.o lan966x_port.o \
lan966x_xdp.o lan966x_vcap_impl.o lan966x_vcap_ag_api.o \
lan966x_tc_flower.o lan966x_goto.o
+lan966x-switch-$(CONFIG_DEBUG_FS) += lan966x_vcap_debugfs.o
+
# Provide include files
ccflags-y += -I$(srctree)/drivers/net/ethernet/microchip/vcap
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 5314c064ceae..55b484b10562 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -608,12 +608,12 @@ allocate_new:
lan966x_fdma_rx_reload(rx);
}
- if (counter < weight && napi_complete_done(napi, counter))
- lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
-
if (redirect)
xdp_do_flush();
+ if (counter < weight && napi_complete_done(napi, counter))
+ lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
+
return counter;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
index bf0cfe24a8fc..9b18156eea1a 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_goto.c
@@ -4,7 +4,7 @@
#include "vcap_api_client.h"
int lan966x_goto_port_add(struct lan966x_port *port,
- struct flow_action_entry *act,
+ int from_cid, int to_cid,
unsigned long goto_id,
struct netlink_ext_ack *extack)
{
@@ -12,7 +12,7 @@ int lan966x_goto_port_add(struct lan966x_port *port,
int err;
err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev,
- act->chain_index, goto_id,
+ from_cid, to_cid, goto_id,
true);
if (err == -EFAULT) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported goto chain");
@@ -29,8 +29,6 @@ int lan966x_goto_port_add(struct lan966x_port *port,
return err;
}
- port->tc.goto_id = goto_id;
-
return 0;
}
@@ -41,14 +39,12 @@ int lan966x_goto_port_del(struct lan966x_port *port,
struct lan966x *lan966x = port->lan966x;
int err;
- err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev, 0,
+ err = vcap_enable_lookups(lan966x->vcap_ctrl, port->dev, 0, 0,
goto_id, false);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Could not disable VCAP lookups");
return err;
}
- port->tc.goto_id = 0;
-
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 580c91d24a52..685e8cd7658c 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -823,6 +823,11 @@ static int lan966x_probe_port(struct lan966x *lan966x, u32 p,
port->phylink = phylink;
+ if (lan966x->fdma)
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
err = register_netdev(dev);
if (err) {
dev_err(lan966x->dev, "register_netdev failed\n");
@@ -1035,6 +1040,8 @@ static int lan966x_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lan966x);
lan966x->dev = &pdev->dev;
+ lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+
if (!device_get_mac_address(&pdev->dev, mac_addr)) {
ether_addr_copy(lan966x->base_mac, mac_addr);
} else {
@@ -1147,9 +1154,8 @@ static int lan966x_probe(struct platform_device *pdev)
lan966x->ports[p]->config.portmode = phy_mode;
lan966x->ports[p]->fwnode = fwnode_handle_get(portnp);
- serdes = devm_of_phy_get(lan966x->dev, to_of_node(portnp), NULL);
- if (PTR_ERR(serdes) == -ENODEV)
- serdes = NULL;
+ serdes = devm_of_phy_optional_get(lan966x->dev,
+ to_of_node(portnp), NULL);
if (IS_ERR(serdes)) {
err = PTR_ERR(serdes);
goto cleanup_ports;
@@ -1223,6 +1229,8 @@ static int lan966x_remove(struct platform_device *pdev)
lan966x_fdb_deinit(lan966x);
lan966x_ptp_deinit(lan966x);
+ debugfs_remove_recursive(lan966x->debugfs_root);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index 3491f1961835..49f5159afbf3 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -3,6 +3,7 @@
#ifndef __LAN966X_MAIN_H__
#define __LAN966X_MAIN_H__
+#include <linux/debugfs.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/jiffies.h>
@@ -14,6 +15,9 @@
#include <net/pkt_sched.h>
#include <net/switchdev.h>
+#include <vcap_api.h>
+#include <vcap_api_client.h>
+
#include "lan966x_regs.h"
#include "lan966x_ifh.h"
@@ -128,6 +132,13 @@ enum LAN966X_PORT_MASK_MODE {
LAN966X_PMM_REDIRECT,
};
+enum vcap_is2_port_sel_ipv6 {
+ VCAP_IS2_PS_IPV6_TCPUDP_OTHER,
+ VCAP_IS2_PS_IPV6_STD,
+ VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER,
+ VCAP_IS2_PS_IPV6_MAC_ETYPE,
+};
+
struct lan966x_port;
struct lan966x_db {
@@ -315,6 +326,9 @@ struct lan966x {
/* vcap */
struct vcap_control *vcap_ctrl;
+
+ /* debugfs */
+ struct dentry *debugfs_root;
};
struct lan966x_port_config {
@@ -332,7 +346,6 @@ struct lan966x_port_tc {
unsigned long police_id;
unsigned long ingress_mirror_id;
unsigned long egress_mirror_id;
- unsigned long goto_id;
struct flow_stats police_stat;
struct flow_stats mirror_stat;
};
@@ -602,12 +615,25 @@ static inline bool lan966x_xdp_port_present(struct lan966x_port *port)
int lan966x_vcap_init(struct lan966x *lan966x);
void lan966x_vcap_deinit(struct lan966x *lan966x);
+#if defined(CONFIG_DEBUG_FS)
+int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out);
+#else
+static inline int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ return 0;
+}
+#endif
int lan966x_tc_flower(struct lan966x_port *port,
- struct flow_cls_offload *f);
+ struct flow_cls_offload *f,
+ bool ingress);
int lan966x_goto_port_add(struct lan966x_port *port,
- struct flow_action_entry *act,
+ int from_cid, int to_cid,
unsigned long goto_id,
struct netlink_ext_ack *extack);
int lan966x_goto_port_del(struct lan966x_port *port,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
index a8348437dd87..931e37b9a0ad 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
@@ -83,8 +83,7 @@ static int lan966x_ptp_add_trap(struct lan966x_port *port,
if (err)
goto free_rule;
- err = vcap_set_rule_set_actionset(vrule, VCAP_AFS_BASE_TYPE);
- err |= vcap_rule_add_action_bit(vrule, VCAP_AF_CPU_COPY_ENA, VCAP_BIT_1);
+ err = vcap_rule_add_action_bit(vrule, VCAP_AF_CPU_COPY_ENA, VCAP_BIT_1);
err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE, LAN966X_PMM_REPLACE);
err |= vcap_val_rule(vrule, proto);
if (err)
@@ -524,9 +523,9 @@ irqreturn_t lan966x_ptp_irq_handler(int irq, void *args)
if (WARN_ON(!skb_match))
continue;
- spin_lock(&lan966x->ptp_ts_id_lock);
+ spin_lock_irqsave(&lan966x->ptp_ts_id_lock, flags);
lan966x->ptp_skbs--;
- spin_unlock(&lan966x->ptp_ts_id_lock);
+ spin_unlock_irqrestore(&lan966x->ptp_ts_id_lock, flags);
/* Get the h/w timestamp */
lan966x_get_hwtimestamp(lan966x, &ts, delay);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
index 01072121c999..cf0cc7562d04 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include "lan966x_main.h"
@@ -70,7 +71,7 @@ static int lan966x_tc_block_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSMATCHALL:
return lan966x_tc_matchall(port, type_data, ingress);
case TC_SETUP_CLSFLOWER:
- return lan966x_tc_flower(port, type_data);
+ return lan966x_tc_flower(port, type_data, ingress);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
index ba3fa917d6b7..f960727ecaee 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -3,53 +3,135 @@
#include "lan966x_main.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#include "vcap_tc.h"
-struct lan966x_tc_flower_parse_usage {
- struct flow_cls_offload *f;
- struct flow_rule *frule;
- struct vcap_rule *vrule;
- unsigned int used_keys;
- u16 l3_proto;
-};
+static bool lan966x_tc_is_known_etype(u16 etype)
+{
+ switch (etype) {
+ case ETH_P_ALL:
+ case ETH_P_ARP:
+ case ETH_P_IP:
+ case ETH_P_IPV6:
+ return true;
+ }
+
+ return false;
+}
-static int lan966x_tc_flower_handler_ethaddr_usage(struct lan966x_tc_flower_parse_usage *st)
+static int
+lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
- enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
- enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
- struct flow_match_eth_addrs match;
- struct vcap_u48_key smac, dmac;
+ struct flow_match_control match;
int err = 0;
- flow_rule_match_eth_addrs(st->frule, &match);
-
- if (!is_zero_ether_addr(match.mask->src)) {
- vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
- vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
+ flow_rule_match_control(st->frule, &match);
+ if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ if (match.key->flags & FLOW_DIS_IS_FRAGMENT)
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_BIT_1);
+ else
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAGMENT,
+ VCAP_BIT_0);
if (err)
goto out;
}
- if (!is_zero_ether_addr(match.mask->dst)) {
- vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
- vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ if (match.key->flags & FLOW_DIS_FIRST_FRAG)
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_BIT_0);
+ else
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_L3_FRAG_OFS_GT0,
+ VCAP_BIT_1);
if (err)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
return err;
out:
- NL_SET_ERR_MSG_MOD(st->f->common.extack, "eth_addr parse error");
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
return err;
}
static int
-(*lan966x_tc_flower_handlers_usage[])(struct lan966x_tc_flower_parse_usage *st) = {
- [FLOW_DISSECTOR_KEY_ETH_ADDRS] = lan966x_tc_flower_handler_ethaddr_usage,
+lan966x_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_basic match;
+ int err = 0;
+
+ flow_rule_match_basic(st->frule, &match);
+ if (match.mask->n_proto) {
+ st->l3_proto = be16_to_cpu(match.key->n_proto);
+ if (!lan966x_tc_is_known_etype(st->l3_proto)) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
+ st->l3_proto, ~0);
+ if (err)
+ goto out;
+ } else if (st->l3_proto == ETH_P_IP) {
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_IP4_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
+ }
+ if (match.mask->ip_proto) {
+ st->l4_proto = match.key->ip_proto;
+
+ if (st->l4_proto == IPPROTO_TCP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else if (st->l4_proto == IPPROTO_UDP) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_IS,
+ VCAP_BIT_0);
+ if (err)
+ goto out;
+ } else {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP_PROTO,
+ st->l4_proto, ~0);
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_proto parse error");
+ return err;
+}
+
+static int
+lan966x_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ return vcap_tc_flower_handler_vlan_usage(st,
+ VCAP_KF_8021Q_VID_CLS,
+ VCAP_KF_8021Q_PCP_CLS);
+}
+
+static int
+(*lan966x_tc_flower_handlers_usage[])(struct vcap_tc_flower_parse_usage *st) = {
+ [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
+ [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
+ [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
+ [FLOW_DISSECTOR_KEY_CONTROL] = lan966x_tc_flower_handler_control_usage,
+ [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
+ [FLOW_DISSECTOR_KEY_BASIC] = lan966x_tc_flower_handler_basic_usage,
+ [FLOW_DISSECTOR_KEY_VLAN] = lan966x_tc_flower_handler_vlan_usage,
+ [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
+ [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
+ [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
};
static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
@@ -57,8 +139,8 @@ static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
struct vcap_rule *vrule,
u16 *l3_proto)
{
- struct lan966x_tc_flower_parse_usage state = {
- .f = f,
+ struct vcap_tc_flower_parse_usage state = {
+ .fco = f,
.vrule = vrule,
.l3_proto = ETH_P_ALL,
};
@@ -82,8 +164,9 @@ static int lan966x_tc_flower_use_dissectors(struct flow_cls_offload *f,
}
static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
+ struct net_device *dev,
struct flow_cls_offload *fco,
- struct vcap_admin *admin)
+ bool ingress)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
struct flow_action_entry *actent, *last_actent = NULL;
@@ -109,21 +192,24 @@ static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
last_actent = actent; /* Save last action for later check */
}
- /* Check that last action is a goto */
- if (last_actent->id != FLOW_ACTION_GOTO) {
+ /* Check that last action is a goto
+ * The last chain/lookup does not need to have goto action
+ */
+ if (last_actent->id == FLOW_ACTION_GOTO) {
+ /* Check if the destination chain is in one of the VCAPs */
+ if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
+ last_actent->chain_index)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid goto chain");
+ return -EINVAL;
+ }
+ } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
+ ingress)) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Last action must be 'goto'");
return -EINVAL;
}
- /* Check if the goto chain is in the next lookup */
- if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
- last_actent->chain_index)) {
- NL_SET_ERR_MSG_MOD(fco->common.extack,
- "Invalid goto chain");
- return -EINVAL;
- }
-
/* Catch unsupported combinations of actions */
if (action_mask & BIT(FLOW_ACTION_TRAP) &&
action_mask & BIT(FLOW_ACTION_ACCEPT)) {
@@ -137,7 +223,8 @@ static int lan966x_tc_flower_action_check(struct vcap_control *vctrl,
static int lan966x_tc_flower_add(struct lan966x_port *port,
struct flow_cls_offload *f,
- struct vcap_admin *admin)
+ struct vcap_admin *admin,
+ bool ingress)
{
struct flow_action_entry *act;
u16 l3_proto = ETH_P_ALL;
@@ -145,8 +232,8 @@ static int lan966x_tc_flower_add(struct lan966x_port *port,
struct vcap_rule *vrule;
int err, idx;
- err = lan966x_tc_flower_action_check(port->lan966x->vcap_ctrl, f,
- admin);
+ err = lan966x_tc_flower_action_check(port->lan966x->vcap_ctrl,
+ port->dev, f, ingress);
if (err)
return err;
@@ -174,8 +261,6 @@ static int lan966x_tc_flower_add(struct lan966x_port *port,
0);
err |= vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
LAN966X_PMM_REPLACE);
- err |= vcap_set_rule_set_actionset(vrule,
- VCAP_AFS_BASE_TYPE);
if (err)
goto out;
@@ -229,8 +314,27 @@ static int lan966x_tc_flower_del(struct lan966x_port *port,
return err;
}
+static int lan966x_tc_flower_stats(struct lan966x_port *port,
+ struct flow_cls_offload *f,
+ struct vcap_admin *admin)
+{
+ struct vcap_counter count = {};
+ int err;
+
+ err = vcap_get_rule_count_by_cookie(port->lan966x->vcap_ctrl,
+ &count, f->cookie);
+ if (err)
+ return err;
+
+ flow_stats_update(&f->stats, 0x0, count.value, 0, 0,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ return err;
+}
+
int lan966x_tc_flower(struct lan966x_port *port,
- struct flow_cls_offload *f)
+ struct flow_cls_offload *f,
+ bool ingress)
{
struct vcap_admin *admin;
@@ -243,9 +347,11 @@ int lan966x_tc_flower(struct lan966x_port *port,
switch (f->command) {
case FLOW_CLS_REPLACE:
- return lan966x_tc_flower_add(port, f, admin);
+ return lan966x_tc_flower_add(port, f, admin, ingress);
case FLOW_CLS_DESTROY:
return lan966x_tc_flower_del(port, f, admin);
+ case FLOW_CLS_STATS:
+ return lan966x_tc_flower_stats(port, f, admin);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
index a539abaad9b6..20627323d656 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_matchall.c
@@ -24,7 +24,8 @@ static int lan966x_tc_matchall_add(struct lan966x_port *port,
return lan966x_mirror_port_add(port, act, f->cookie,
ingress, f->common.extack);
case FLOW_ACTION_GOTO:
- return lan966x_goto_port_add(port, act, f->cookie,
+ return lan966x_goto_port_add(port, f->common.chain_index,
+ act->chain_index, f->cookie,
f->common.extack);
default:
NL_SET_ERR_MSG_MOD(f->common.extack,
@@ -46,13 +47,8 @@ static int lan966x_tc_matchall_del(struct lan966x_port *port,
f->cookie == port->tc.egress_mirror_id) {
return lan966x_mirror_port_del(port, ingress,
f->common.extack);
- } else if (f->cookie == port->tc.goto_id) {
- return lan966x_goto_port_del(port, f->cookie,
- f->common.extack);
} else {
- NL_SET_ERR_MSG_MOD(f->common.extack,
- "Unsupported action");
- return -EOPNOTSUPP;
+ return lan966x_goto_port_del(port, f->cookie, f->common.extack);
}
return 0;
@@ -80,12 +76,6 @@ int lan966x_tc_matchall(struct lan966x_port *port,
struct tc_cls_matchall_offload *f,
bool ingress)
{
- if (!tc_cls_can_offload_and_chain0(port->dev, &f->common)) {
- NL_SET_ERR_MSG_MOD(f->common.extack,
- "Only chain zero is supported");
- return -EOPNOTSUPP;
- }
-
switch (f->command) {
case TC_CLSMATCHALL_REPLACE:
return lan966x_tc_matchall_add(port, f, ingress);
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
new file mode 100644
index 000000000000..7a0db58f5513
--- /dev/null
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_debugfs.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include "lan966x_main.h"
+#include "lan966x_vcap_ag_api.h"
+#include "vcap_api.h"
+#include "vcap_api_client.h"
+
+static void lan966x_vcap_port_keys(struct lan966x_port *port,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x *lan966x = port->lan966x;
+ u32 val;
+
+ out->prf(out->dst, " port[%d] (%s): ", port->chip_port,
+ netdev_name(port->dev));
+
+ val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_VCAP_S2_CFG_ENA_GET(val))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ for (int l = 0; l < admin->lookups; ++l) {
+ out->prf(out->dst, "\n Lookup %d: ", l);
+
+ out->prf(out->dst, "\n snap: ");
+ if (ANA_VCAP_S2_CFG_SNAP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_llc");
+ else
+ out->prf(out->dst, "mac_snap");
+
+ out->prf(out->dst, "\n oam: ");
+ if (ANA_VCAP_S2_CFG_OAM_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "mac_oam");
+
+ out->prf(out->dst, "\n arp: ");
+ if (ANA_VCAP_S2_CFG_ARP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "mac_arp");
+
+ out->prf(out->dst, "\n ipv4_other: ");
+ if (ANA_VCAP_S2_CFG_IP_OTHER_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "ip4_other");
+
+ out->prf(out->dst, "\n ipv4_tcp_udp: ");
+ if (ANA_VCAP_S2_CFG_IP_TCPUDP_DIS_GET(val) & (BIT(0) << l))
+ out->prf(out->dst, "mac_etype");
+ else
+ out->prf(out->dst, "ipv4_tcp_udp");
+
+ out->prf(out->dst, "\n ipv6: ");
+ switch (ANA_VCAP_S2_CFG_IP6_CFG_GET(val) & (0x3 << l)) {
+ case VCAP_IS2_PS_IPV6_TCPUDP_OTHER:
+ out->prf(out->dst, "ipv6_tcp_udp ipv6_tcp_udp");
+ break;
+ case VCAP_IS2_PS_IPV6_STD:
+ out->prf(out->dst, "ipv6_std");
+ break;
+ case VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER:
+ out->prf(out->dst, "ipv4_tcp_udp ipv4_tcp_udp");
+ break;
+ case VCAP_IS2_PS_IPV6_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ }
+ }
+
+ out->prf(out->dst, "\n");
+}
+
+int lan966x_vcap_port_info(struct net_device *dev,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ struct lan966x_port *port = netdev_priv(dev);
+ struct lan966x *lan966x = port->lan966x;
+ const struct vcap_info *vcap;
+ struct vcap_control *vctrl;
+
+ vctrl = lan966x->vcap_ctrl;
+ vcap = &vctrl->vcaps[admin->vtype];
+
+ out->prf(out->dst, "%s:\n", vcap->name);
+ lan966x_vcap_port_keys(port, admin, out);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
index a54c0426a35f..68f9d69fd37b 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -4,18 +4,12 @@
#include "lan966x_vcap_ag_api.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#include "vcap_api_debugfs.h"
#define STREAMSIZE (64 * 4)
#define LAN966X_IS2_LOOKUPS 2
-enum vcap_is2_port_sel_ipv6 {
- VCAP_IS2_PS_IPV6_TCPUDP_OTHER,
- VCAP_IS2_PS_IPV6_STD,
- VCAP_IS2_PS_IPV6_IP4_TCPUDP_IP4_OTHER,
- VCAP_IS2_PS_IPV6_MAC_ETYPE,
-};
-
static struct lan966x_vcap_inst {
enum vcap_type vtype; /* type of vcap */
int tgt_inst; /* hardware instance number */
@@ -23,6 +17,7 @@ static struct lan966x_vcap_inst {
int first_cid; /* first chain id in this vcap */
int last_cid; /* last chain id in this vcap */
int count; /* number of available addresses */
+ bool ingress; /* is vcap in the ingress path */
} lan966x_vcap_inst_cfg[] = {
{
.vtype = VCAP_TYPE_IS2, /* IS2-0 */
@@ -31,6 +26,7 @@ static struct lan966x_vcap_inst {
.first_cid = LAN966X_VCAP_CID_IS2_L0,
.last_cid = LAN966X_VCAP_CID_IS2_MAX,
.count = 256,
+ .ingress = true,
},
};
@@ -383,27 +379,6 @@ static void lan966x_vcap_move(struct net_device *dev,
lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
}
-static int lan966x_vcap_port_info(struct net_device *dev,
- struct vcap_admin *admin,
- struct vcap_output_print *out)
-{
- return 0;
-}
-
-static int lan966x_vcap_enable(struct net_device *dev,
- struct vcap_admin *admin,
- bool enable)
-{
- struct lan966x_port *port = netdev_priv(dev);
- struct lan966x *lan966x = port->lan966x;
-
- lan_rmw(ANA_VCAP_S2_CFG_ENA_SET(enable),
- ANA_VCAP_S2_CFG_ENA,
- lan966x, ANA_VCAP_S2_CFG(port->chip_port));
-
- return 0;
-}
-
static struct vcap_operations lan966x_vcap_ops = {
.validate_keyset = lan966x_vcap_validate_keyset,
.add_default_fields = lan966x_vcap_add_default_fields,
@@ -414,7 +389,6 @@ static struct vcap_operations lan966x_vcap_ops = {
.update = lan966x_vcap_update,
.move = lan966x_vcap_move,
.port_info = lan966x_vcap_port_info,
- .enable = lan966x_vcap_enable,
};
static void lan966x_vcap_admin_free(struct vcap_admin *admin)
@@ -446,6 +420,7 @@ lan966x_vcap_admin_alloc(struct lan966x *lan966x, struct vcap_control *ctrl,
admin->vtype = cfg->vtype;
admin->vinst = 0;
+ admin->ingress = cfg->ingress;
admin->w32be = true;
admin->tgt_inst = cfg->tgt_inst;
@@ -498,6 +473,7 @@ int lan966x_vcap_init(struct lan966x *lan966x)
struct lan966x_vcap_inst *cfg;
struct vcap_control *ctrl;
struct vcap_admin *admin;
+ struct dentry *dir;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
@@ -521,6 +497,18 @@ int lan966x_vcap_init(struct lan966x *lan966x)
list_add_tail(&admin->list, &ctrl->list);
}
+ dir = vcap_debugfs(lan966x->dev, lan966x->debugfs_root, ctrl);
+ for (int p = 0; p < lan966x->num_phys_ports; ++p) {
+ if (lan966x->ports[p]) {
+ vcap_port_debugfs(lan966x->dev, dir, ctrl,
+ lan966x->ports[p]->dev);
+
+ lan_rmw(ANA_VCAP_S2_CFG_ENA_SET(true),
+ ANA_VCAP_S2_CFG_ENA, lan966x,
+ ANA_VCAP_S2_CFG(lan966x->ports[p]->chip_port));
+ }
+ }
+
lan966x->vcap_ctrl = ctrl;
return 0;
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index d0ed7090aa54..1cb1cc3f1a85 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -9,7 +9,8 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \
sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \
- sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o sparx5_tc_matchall.o
+ sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o \
+ sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o sparx5_psfp.o
sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
index 74abb946b2a3..871a3e62f852 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_dcb.c
@@ -133,12 +133,17 @@ static bool sparx5_dcb_apptrust_contains(int portno, u8 selector)
static int sparx5_dcb_app_update(struct net_device *dev)
{
+ struct dcb_ieee_app_prio_map dscp_rewr_map = {0};
+ struct dcb_rewr_prio_pcp_map pcp_rewr_map = {0};
struct sparx5_port *port = netdev_priv(dev);
struct sparx5_port_qos_dscp_map *dscp_map;
struct sparx5_port_qos_pcp_map *pcp_map;
struct sparx5_port_qos qos = {0};
struct dcb_app app_itr = {0};
int portno = port->portno;
+ bool dscp_rewr = false;
+ bool pcp_rewr = false;
+ u16 dscp;
int i;
dscp_map = &qos.dscp.map;
@@ -163,31 +168,72 @@ static int sparx5_dcb_app_update(struct net_device *dev)
pcp_map->map[i] = dcb_getapp(dev, &app_itr);
}
+ /* Get pcp rewrite mapping */
+ dcb_getrewr_prio_pcp_mask_map(dev, &pcp_rewr_map);
+ for (i = 0; i < ARRAY_SIZE(pcp_rewr_map.map); i++) {
+ if (!pcp_rewr_map.map[i])
+ continue;
+ pcp_rewr = true;
+ qos.pcp_rewr.map.map[i] = fls(pcp_rewr_map.map[i]) - 1;
+ }
+
+ /* Get dscp rewrite mapping */
+ dcb_getrewr_prio_dscp_mask_map(dev, &dscp_rewr_map);
+ for (i = 0; i < ARRAY_SIZE(dscp_rewr_map.map); i++) {
+ if (!dscp_rewr_map.map[i])
+ continue;
+
+ /* The rewrite table of the switch has 32 entries; one for each
+ * priority for each DP level. Currently, the rewrite map does
+ * not indicate DP level, so we map classified QoS class to
+ * classified DSCP, for each classified DP level. Rewrite of
+ * DSCP is only enabled, if we have active mappings.
+ */
+ dscp_rewr = true;
+ dscp = fls64(dscp_rewr_map.map[i]) - 1;
+ qos.dscp_rewr.map.map[i] = dscp; /* DP 0 */
+ qos.dscp_rewr.map.map[i + 8] = dscp; /* DP 1 */
+ qos.dscp_rewr.map.map[i + 16] = dscp; /* DP 2 */
+ qos.dscp_rewr.map.map[i + 24] = dscp; /* DP 3 */
+ }
+
/* Enable use of pcp for queue classification ? */
if (sparx5_dcb_apptrust_contains(portno, DCB_APP_SEL_PCP)) {
qos.pcp.qos_enable = true;
qos.pcp.dp_enable = qos.pcp.qos_enable;
+ /* Enable rewrite of PCP and DEI if PCP is trusted *and* rewrite
+ * table is not empty.
+ */
+ if (pcp_rewr)
+ qos.pcp_rewr.enable = true;
}
/* Enable use of dscp for queue classification ? */
if (sparx5_dcb_apptrust_contains(portno, IEEE_8021QAZ_APP_SEL_DSCP)) {
qos.dscp.qos_enable = true;
qos.dscp.dp_enable = qos.dscp.qos_enable;
+ if (dscp_rewr)
+ /* Do not enable rewrite if no mappings are active, as
+ * classified DSCP will then be zero for all classified
+ * QoS class and DP combinations.
+ */
+ qos.dscp_rewr.enable = true;
}
return sparx5_port_qos_set(port, &qos);
}
-/* Set or delete dscp app entry.
+/* Set or delete DSCP app entry.
*
- * Dscp mapping is global for all ports, so set and delete app entries are
+ * DSCP mapping is global for all ports, so set and delete app entries are
* replicated for each port.
*/
-static int sparx5_dcb_ieee_dscp_setdel_app(struct net_device *dev,
- struct dcb_app *app, bool del)
+static int sparx5_dcb_ieee_dscp_setdel(struct net_device *dev,
+ struct dcb_app *app,
+ int (*setdel)(struct net_device *,
+ struct dcb_app *))
{
struct sparx5_port *port = netdev_priv(dev);
- struct dcb_app apps[SPX5_PORTS];
struct sparx5_port *port_itr;
int err, i;
@@ -195,11 +241,7 @@ static int sparx5_dcb_ieee_dscp_setdel_app(struct net_device *dev,
port_itr = port->sparx5->ports[i];
if (!port_itr)
continue;
- memcpy(&apps[i], app, sizeof(struct dcb_app));
- if (del)
- err = dcb_ieee_delapp(port_itr->ndev, &apps[i]);
- else
- err = dcb_ieee_setapp(port_itr->ndev, &apps[i]);
+ err = setdel(port_itr->ndev, app);
if (err)
return err;
}
@@ -226,7 +268,7 @@ static int sparx5_dcb_ieee_setapp(struct net_device *dev, struct dcb_app *app)
}
if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
- err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, false);
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_setapp);
else
err = dcb_ieee_setapp(dev, app);
@@ -244,7 +286,7 @@ static int sparx5_dcb_ieee_delapp(struct net_device *dev, struct dcb_app *app)
int err;
if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
- err = sparx5_dcb_ieee_dscp_setdel_app(dev, app, true);
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_ieee_delapp);
else
err = dcb_ieee_delapp(dev, app);
@@ -283,11 +325,60 @@ static int sparx5_dcb_getapptrust(struct net_device *dev, u8 *selectors,
return 0;
}
+static int sparx5_dcb_delrewr(struct net_device *dev, struct dcb_app *app)
+{
+ int err;
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_delrewr);
+ else
+ err = dcb_delrewr(dev, app);
+
+ if (err < 0)
+ return err;
+
+ return sparx5_dcb_app_update(dev);
+}
+
+static int sparx5_dcb_setrewr(struct net_device *dev, struct dcb_app *app)
+{
+ struct dcb_app app_itr;
+ int err = 0;
+ u16 proto;
+
+ err = sparx5_dcb_app_validate(dev, app);
+ if (err)
+ goto out;
+
+ /* Delete current mapping, if it exists. */
+ proto = dcb_getrewr(dev, app);
+ if (proto) {
+ app_itr = *app;
+ app_itr.protocol = proto;
+ sparx5_dcb_delrewr(dev, &app_itr);
+ }
+
+ if (app->selector == IEEE_8021QAZ_APP_SEL_DSCP)
+ err = sparx5_dcb_ieee_dscp_setdel(dev, app, dcb_setrewr);
+ else
+ err = dcb_setrewr(dev, app);
+
+ if (err)
+ goto out;
+
+ sparx5_dcb_app_update(dev);
+
+out:
+ return err;
+}
+
const struct dcbnl_rtnl_ops sparx5_dcbnl_ops = {
.ieee_setapp = sparx5_dcb_ieee_setapp,
.ieee_delapp = sparx5_dcb_ieee_delapp,
.dcbnl_setapptrust = sparx5_dcb_setapptrust,
.dcbnl_getapptrust = sparx5_dcb_getapptrust,
+ .dcbnl_setrewr = sparx5_dcb_setrewr,
+ .dcbnl_delrewr = sparx5_dcb_delrewr,
};
int sparx5_dcb_init(struct sparx5 *sparx5)
@@ -304,6 +395,12 @@ int sparx5_dcb_init(struct sparx5 *sparx5)
sparx5_port_apptrust[port->portno] =
&sparx5_dcb_apptrust_policies
[SPARX5_DCB_APPTRUST_DSCP_PCP];
+
+ /* Enable DSCP classification based on classified QoS class and
+ * DP, for all DSCP values, for all ports.
+ */
+ sparx5_port_qos_dscp_rewr_mode_set(port,
+ SPARX5_PORT_REW_DSCP_ALL);
}
return 0;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 3c5d4fe99373..42b77ba9b572 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -198,12 +198,15 @@ static const struct sparx5_main_io_resource sparx5_main_iomap[] = {
{ TARGET_QSYS, 0x110a0000, 2 }, /* 0x6110a0000 */
{ TARGET_QFWD, 0x110b0000, 2 }, /* 0x6110b0000 */
{ TARGET_XQS, 0x110c0000, 2 }, /* 0x6110c0000 */
+ { TARGET_VCAP_ES2, 0x110d0000, 2 }, /* 0x6110d0000 */
+ { TARGET_VCAP_ES0, 0x110e0000, 2 }, /* 0x6110e0000 */
{ TARGET_CLKGEN, 0x11100000, 2 }, /* 0x611100000 */
{ TARGET_ANA_AC_POL, 0x11200000, 2 }, /* 0x611200000 */
{ TARGET_QRES, 0x11280000, 2 }, /* 0x611280000 */
{ TARGET_EACL, 0x112c0000, 2 }, /* 0x6112c0000 */
{ TARGET_ANA_CL, 0x11400000, 2 }, /* 0x611400000 */
{ TARGET_ANA_L3, 0x11480000, 2 }, /* 0x611480000 */
+ { TARGET_ANA_AC_SDLB, 0x11500000, 2 }, /* 0x611500000 */
{ TARGET_HSCH, 0x11580000, 2 }, /* 0x611580000 */
{ TARGET_REW, 0x11600000, 2 }, /* 0x611600000 */
{ TARGET_ANA_L2, 0x11800000, 2 }, /* 0x611800000 */
@@ -500,8 +503,8 @@ static int sparx5_init_coreclock(struct sparx5 *sparx5)
clk_period = sparx5_clk_period(freq);
- spx5_rmw(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(clk_period / 100),
- HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS,
+ spx5_rmw(HSCH_SYS_CLK_PER_100PS_SET(clk_period / 100),
+ HSCH_SYS_CLK_PER_100PS,
sparx5,
HSCH_SYS_CLK_PER);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 4a574cdcb584..72e7928912eb 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -396,6 +396,7 @@ int sparx5_ptp_txtstamp_request(struct sparx5_port *port,
void sparx5_ptp_txtstamp_release(struct sparx5_port *port,
struct sk_buff *skb);
irqreturn_t sparx5_ptp_irq_handler(int irq, void *args);
+int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
/* sparx5_vcap_impl.c */
int sparx5_vcap_init(struct sparx5 *sparx5);
@@ -413,6 +414,129 @@ int sparx5_pgid_alloc_glag(struct sparx5 *spx5, u16 *idx);
int sparx5_pgid_alloc_mcast(struct sparx5 *spx5, u16 *idx);
int sparx5_pgid_free(struct sparx5 *spx5, u16 idx);
+/* sparx5_pool.c */
+struct sparx5_pool_entry {
+ u16 ref_cnt;
+ u32 idx; /* tc index */
+};
+
+u32 sparx5_pool_idx_to_id(u32 idx);
+int sparx5_pool_put(struct sparx5_pool_entry *pool, int size, u32 id);
+int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id);
+int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx,
+ u32 *id);
+
+/* sparx5_sdlb.c */
+#define SPX5_SDLB_PUP_TOKEN_DISABLE 0x1FFF
+#define SPX5_SDLB_PUP_TOKEN_MAX (SPX5_SDLB_PUP_TOKEN_DISABLE - 1)
+#define SPX5_SDLB_GROUP_RATE_MAX 25000000000ULL
+#define SPX5_SDLB_2CYCLES_TYPE2_THRES_OFFSET 13
+#define SPX5_SDLB_CNT 4096
+#define SPX5_SDLB_GROUP_CNT 10
+#define SPX5_CLK_PER_100PS_DEFAULT 16
+
+struct sparx5_sdlb_group {
+ u64 max_rate;
+ u32 min_burst;
+ u32 frame_size;
+ u32 pup_interval;
+ u32 nsets;
+};
+
+extern struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT];
+int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval,
+ u64 rate);
+
+int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5);
+int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst);
+int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group);
+
+int sparx5_sdlb_group_add(struct sparx5 *sparx5, u32 group, u32 idx);
+int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx);
+
+void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst,
+ u32 frame_size, u32 idx);
+
+/* sparx5_police.c */
+enum {
+ /* More policer types will be added later */
+ SPX5_POL_SERVICE
+};
+
+struct sparx5_policer {
+ u32 type;
+ u32 idx;
+ u64 rate;
+ u32 burst;
+ u32 group;
+ u8 event_mask;
+};
+
+int sparx5_policer_conf_set(struct sparx5 *sparx5, struct sparx5_policer *pol);
+
+/* sparx5_psfp.c */
+#define SPX5_PSFP_GCE_CNT 4
+#define SPX5_PSFP_SG_CNT 1024
+#define SPX5_PSFP_SG_MIN_CYCLE_TIME_NS (1 * NSEC_PER_USEC)
+#define SPX5_PSFP_SG_MAX_CYCLE_TIME_NS ((1 * NSEC_PER_SEC) - 1)
+#define SPX5_PSFP_SG_MAX_IPV (SPX5_PRIOS - 1)
+#define SPX5_PSFP_SG_OPEN (SPX5_PSFP_SG_CNT - 1)
+#define SPX5_PSFP_SG_CYCLE_TIME_DEFAULT 1000000
+#define SPX5_PSFP_SF_MAX_SDU 16383
+
+struct sparx5_psfp_fm {
+ struct sparx5_policer pol;
+};
+
+struct sparx5_psfp_gce {
+ bool gate_state; /* StreamGateState */
+ u32 interval; /* TimeInterval */
+ u32 ipv; /* InternalPriorityValue */
+ u32 maxoctets; /* IntervalOctetMax */
+};
+
+struct sparx5_psfp_sg {
+ bool gate_state; /* PSFPAdminGateStates */
+ bool gate_enabled; /* PSFPGateEnabled */
+ u32 ipv; /* PSFPAdminIPV */
+ struct timespec64 basetime; /* PSFPAdminBaseTime */
+ u32 cycletime; /* PSFPAdminCycleTime */
+ u32 cycletimeext; /* PSFPAdminCycleTimeExtension */
+ u32 num_entries; /* PSFPAdminControlListLength */
+ struct sparx5_psfp_gce gce[SPX5_PSFP_GCE_CNT];
+};
+
+struct sparx5_psfp_sf {
+ bool sblock_osize_ena;
+ bool sblock_osize;
+ u32 max_sdu;
+ u32 sgid; /* Gate id */
+ u32 fmid; /* Flow meter id */
+};
+
+int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_fm *fm, u32 *id);
+int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id);
+
+int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_sg *sg, u32 *id);
+int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id);
+
+int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf,
+ u32 *id);
+int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id);
+
+u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx);
+u32 sparx5_psfp_isdx_get_fm(struct sparx5 *sparx5, u32 isdx);
+u32 sparx5_psfp_sf_get_sg(struct sparx5 *sparx5, u32 sfid);
+void sparx5_isdx_conf_set(struct sparx5 *sparx5, u32 isdx, u32 sfid, u32 fmid);
+
+void sparx5_psfp_init(struct sparx5 *sparx5);
+
+/* sparx5_qos.c */
+void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
+ const ktime_t org_base_time, ktime_t *new_base_time);
+
/* Clock period in picoseconds */
static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
{
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index 6c93dd6b01b0..bd03a0a3c1da 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -4,8 +4,8 @@
* Copyright (c) 2021 Microchip Technology Inc.
*/
-/* This file is autogenerated by cml-utils 2022-09-28 11:17:02 +0200.
- * Commit ID: 385c8a11d71a9f6a60368d3a3cb648fa257b479a
+/* This file is autogenerated by cml-utils 2023-02-10 11:18:53 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
*/
#ifndef _SPARX5_MAIN_REGS_H_
@@ -19,6 +19,7 @@ enum sparx5_target {
TARGET_ANA_AC = 1,
TARGET_ANA_ACL = 2,
TARGET_ANA_AC_POL = 4,
+ TARGET_ANA_AC_SDLB = 5,
TARGET_ANA_CL = 6,
TARGET_ANA_L2 = 7,
TARGET_ANA_L3 = 8,
@@ -46,6 +47,8 @@ enum sparx5_target {
TARGET_QS = 177,
TARGET_QSYS = 178,
TARGET_REW = 179,
+ TARGET_VCAP_ES0 = 323,
+ TARGET_VCAP_ES2 = 324,
TARGET_VCAP_SUPER = 326,
TARGET_VOP = 327,
TARGET_XQS = 331,
@@ -55,7 +58,8 @@ enum sparx5_target {
#define __REG(...) __VA_ARGS__
/* ANA_AC:RAM_CTRL:RAM_INIT */
-#define ANA_AC_RAM_INIT __REG(TARGET_ANA_AC, 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4)
+#define ANA_AC_RAM_INIT __REG(TARGET_ANA_AC,\
+ 0, 1, 839108, 0, 1, 4, 0, 0, 1, 4)
#define ANA_AC_RAM_INIT_RAM_INIT BIT(1)
#define ANA_AC_RAM_INIT_RAM_INIT_SET(x)\
@@ -70,7 +74,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_RAM_INIT_RAM_CFG_HOOK, x)
/* ANA_AC:PS_COMMON:OWN_UPSID */
-#define ANA_AC_OWN_UPSID(r) __REG(TARGET_ANA_AC, 0, 1, 894472, 0, 1, 352, 52, r, 3, 4)
+#define ANA_AC_OWN_UPSID(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 894472, 0, 1, 352, 52, r, 3, 4)
#define ANA_AC_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_AC_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -79,13 +84,16 @@ enum sparx5_target {
FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x)
/* ANA_AC:SRC:SRC_CFG */
-#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
+#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
/* ANA_AC:SRC:SRC_CFG1 */
-#define ANA_AC_SRC_CFG1(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 4, 0, 1, 4)
+#define ANA_AC_SRC_CFG1(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 849920, g, 102, 16, 4, 0, 1, 4)
/* ANA_AC:SRC:SRC_CFG2 */
-#define ANA_AC_SRC_CFG2(g) __REG(TARGET_ANA_AC, 0, 1, 849920, g, 102, 16, 8, 0, 1, 4)
+#define ANA_AC_SRC_CFG2(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 849920, g, 102, 16, 8, 0, 1, 4)
#define ANA_AC_SRC_CFG2_PORT_MASK2 BIT(0)
#define ANA_AC_SRC_CFG2_PORT_MASK2_SET(x)\
@@ -94,13 +102,16 @@ enum sparx5_target {
FIELD_GET(ANA_AC_SRC_CFG2_PORT_MASK2, x)
/* ANA_AC:PGID:PGID_CFG */
-#define ANA_AC_PGID_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4)
+#define ANA_AC_PGID_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 0, 0, 1, 4)
/* ANA_AC:PGID:PGID_CFG1 */
-#define ANA_AC_PGID_CFG1(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4)
+#define ANA_AC_PGID_CFG1(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 4, 0, 1, 4)
/* ANA_AC:PGID:PGID_CFG2 */
-#define ANA_AC_PGID_CFG2(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4)
+#define ANA_AC_PGID_CFG2(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 8, 0, 1, 4)
#define ANA_AC_PGID_CFG2_PORT_MASK2 BIT(0)
#define ANA_AC_PGID_CFG2_PORT_MASK2_SET(x)\
@@ -109,7 +120,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_PGID_CFG2_PORT_MASK2, x)
/* ANA_AC:PGID:PGID_MISC_CFG */
-#define ANA_AC_PGID_MISC_CFG(g) __REG(TARGET_ANA_AC, 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4)
+#define ANA_AC_PGID_MISC_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 786432, g, 3290, 16, 12, 0, 1, 4)
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU GENMASK(6, 4)
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_QU_SET(x)\
@@ -129,8 +141,257 @@ enum sparx5_target {
#define ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(x)\
FIELD_GET(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, x)
+/* ANA_AC:TSN_SF:TSN_SF */
+#define ANA_AC_TSN_SF __REG(TARGET_ANA_AC,\
+ 0, 1, 839136, 0, 1, 4, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY BIT(9)
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY, x)
+#define ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_TSN_STREAM_BLOCK_OVERSIZE_STICKY, x)
+
+#define ANA_AC_TSN_SF_PORT_NUM GENMASK(8, 0)
+#define ANA_AC_TSN_SF_PORT_NUM_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_PORT_NUM, x)
+#define ANA_AC_TSN_SF_PORT_NUM_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_PORT_NUM, x)
+
+/* ANA_AC:TSN_SF_CFG:TSN_SF_CFG */
+#define ANA_AC_TSN_SF_CFG(g) __REG(TARGET_ANA_AC,\
+ 0, 1, 839680, g, 1024, 4, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_CFG_TSN_SGID GENMASK(25, 16)
+#define ANA_AC_TSN_SF_CFG_TSN_SGID_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+#define ANA_AC_TSN_SF_CFG_TSN_SGID_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_TSN_SGID, x)
+
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU GENMASK(15, 2)
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_TSN_MAX_SDU, x)
+#define ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_TSN_MAX_SDU, x)
+
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA BIT(1)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA, x)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA, x)
+
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE BIT(0)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE, x)
+#define ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE, x)
+
+/* ANA_AC:TSN_SF_STATUS:TSN_SF_STATUS */
+#define ANA_AC_TSN_SF_STATUS __REG(TARGET_ANA_AC,\
+ 0, 1, 839072, 0, 1, 16, 0, 0, 1, 4)
+
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN GENMASK(25, 12)
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_FRM_LEN, x)
+#define ANA_AC_TSN_SF_STATUS_FRM_LEN_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_FRM_LEN, x)
+
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP BIT(11)
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_DLB_DROP, x)
+#define ANA_AC_TSN_SF_STATUS_DLB_DROP_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_DLB_DROP, x)
+
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID GENMASK(10, 1)
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+#define ANA_AC_TSN_SF_STATUS_TSN_SFID_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_TSN_SFID, x)
+
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD BIT(0)
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_SET(x)\
+ FIELD_PREP(ANA_AC_TSN_SF_STATUS_TSTAMP_VLD, x)
+#define ANA_AC_TSN_SF_STATUS_TSTAMP_VLD_GET(x)\
+ FIELD_GET(ANA_AC_TSN_SF_STATUS_TSTAMP_VLD, x)
+
+/* ANA_AC:SG_ACCESS:SG_ACCESS_CTRL */
+#define ANA_AC_SG_ACCESS_CTRL __REG(TARGET_ANA_AC,\
+ 0, 1, 839140, 0, 1, 12, 0, 0, 1, 4)
+
+#define ANA_AC_SG_ACCESS_CTRL_SGID GENMASK(9, 0)
+#define ANA_AC_SG_ACCESS_CTRL_SGID_SET(x)\
+ FIELD_PREP(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+#define ANA_AC_SG_ACCESS_CTRL_SGID_GET(x)\
+ FIELD_GET(ANA_AC_SG_ACCESS_CTRL_SGID, x)
+
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE, x)
+#define ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_GET(x)\
+ FIELD_GET(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE, x)
+
+/* ANA_AC:SG_ACCESS:SG_CYCLETIME_UPDATE_PERIOD */
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD __REG(TARGET_ANA_AC,\
+ 0, 1, 839140, 0, 1, 12, 8, 0, 1, 4)
+
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS GENMASK(15, 0)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS, x)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS_GET(x)\
+ FIELD_GET(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_CLKS, x)
+
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA BIT(31)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA, x)
+#define ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA, x)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_1 */
+#define ANA_AC_SG_CONFIG_REG_1 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 48, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_2 */
+#define ANA_AC_SG_CONFIG_REG_2 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 52, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_3 */
+#define ANA_AC_SG_CONFIG_REG_3 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 56, 0, 1, 4)
+
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB GENMASK(15, 0)
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB, x)
+#define ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH GENMASK(18, 16)
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH, x)
+#define ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE, x)
+#define ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS GENMASK(24, 21)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INIT_IPS, x)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INIT_IPS, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(25)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE, x)
+#define ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA BIT(26)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA, x)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INVALID_RX_ENA, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX BIT(27)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_INVALID_RX, x)
+#define ANA_AC_SG_CONFIG_REG_3_INVALID_RX_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_INVALID_RX, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA BIT(28)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA, x)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_ENA, x)
+
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED BIT(29)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_SET(x)\
+ FIELD_PREP(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED, x)
+#define ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED_GET(x)\
+ FIELD_GET(ANA_AC_SG_CONFIG_REG_3_OCTETS_EXCEEDED, x)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_4 */
+#define ANA_AC_SG_CONFIG_REG_4 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 60, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_CONFIG_REG_5 */
+#define ANA_AC_SG_CONFIG_REG_5 __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 64, 0, 1, 4)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_GS_CONFIG */
+#define ANA_AC_SG_GCL_GS_CONFIG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 0, r, 4, 4)
+
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS GENMASK(3, 0)
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_GCL_GS_CONFIG_IPS, x)
+#define ANA_AC_SG_GCL_GS_CONFIG_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_GCL_GS_CONFIG_IPS, x)
+
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE BIT(4)
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE, x)
+#define ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE, x)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_TI_CONFIG */
+#define ANA_AC_SG_GCL_TI_CONFIG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 16, r, 4, 4)
+
+/* ANA_AC:SG_CONFIG:SG_GCL_OCT_CONFIG */
+#define ANA_AC_SG_GCL_OCT_CONFIG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851584, 0, 1, 128, 32, r, 4, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_1 */
+#define ANA_AC_SG_STATUS_REG_1 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 0, 0, 1, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_2 */
+#define ANA_AC_SG_STATUS_REG_2 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 4, 0, 1, 4)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_3 */
+#define ANA_AC_SG_STATUS_REG_3 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 8, 0, 1, 4)
+
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB GENMASK(15, 0)
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB, x)
+#define ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB, x)
+
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE BIT(16)
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_GATE_STATE, x)
+#define ANA_AC_SG_STATUS_REG_3_GATE_STATE_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_GATE_STATE, x)
+
+#define ANA_AC_SG_STATUS_REG_3_IPS GENMASK(23, 20)
+#define ANA_AC_SG_STATUS_REG_3_IPS_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_IPS, x)
+#define ANA_AC_SG_STATUS_REG_3_IPS_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_IPS, x)
+
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING BIT(24)
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING, x)
+#define ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_CONFIG_PENDING, x)
+
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX GENMASK(27, 25)
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX_SET(x)\
+ FIELD_PREP(ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX, x)
+#define ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX_GET(x)\
+ FIELD_GET(ANA_AC_SG_STATUS_REG_3_GCL_OCTET_INDEX, x)
+
+/* ANA_AC:SG_STATUS:SG_STATUS_REG_4 */
+#define ANA_AC_SG_STATUS_REG_4 __REG(TARGET_ANA_AC,\
+ 0, 1, 839088, 0, 1, 16, 12, 0, 1, 4)
+
/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_GLOBAL_EVENT_MASK */
-#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 0, r, 4, 4)
+#define ANA_AC_PORT_SGE_CFG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 851552, 0, 1, 20, 0, r, 4, 4)
#define ANA_AC_PORT_SGE_CFG_MASK GENMASK(15, 0)
#define ANA_AC_PORT_SGE_CFG_MASK_SET(x)\
@@ -139,7 +400,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_PORT_SGE_CFG_MASK, x)
/* ANA_AC:STAT_GLOBAL_CFG_PORT:STAT_RESET */
-#define ANA_AC_STAT_RESET __REG(TARGET_ANA_AC, 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4)
+#define ANA_AC_STAT_RESET __REG(TARGET_ANA_AC,\
+ 0, 1, 851552, 0, 1, 20, 16, 0, 1, 4)
#define ANA_AC_STAT_RESET_RESET BIT(0)
#define ANA_AC_STAT_RESET_RESET_SET(x)\
@@ -148,7 +410,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_STAT_RESET_RESET, x)
/* ANA_AC:STAT_CNT_CFG_PORT:STAT_CFG */
-#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 4, r, 4, 4)
+#define ANA_AC_PORT_STAT_CFG(g, r) __REG(TARGET_ANA_AC,\
+ 0, 1, 843776, g, 70, 64, 4, r, 4, 4)
#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK GENMASK(11, 4)
#define ANA_AC_PORT_STAT_CFG_CFG_PRIO_MASK_SET(x)\
@@ -169,10 +432,42 @@ enum sparx5_target {
FIELD_GET(ANA_AC_PORT_STAT_CFG_CFG_CNT_BYTE, x)
/* ANA_AC:STAT_CNT_CFG_PORT:STAT_LSB_CNT */
-#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC, 0, 1, 843776, g, 70, 64, 20, r, 4, 4)
+#define ANA_AC_PORT_STAT_LSB_CNT(g, r) __REG(TARGET_ANA_AC,\
+ 0, 1, 843776, g, 70, 64, 20, r, 4, 4)
+
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:GLOBAL_CNT_FRM_TYPE_CFG */
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 893792, 0, 1, 24, 0, r, 2, 4)
+
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE GENMASK(2, 0)
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_SET(x)\
+ FIELD_PREP(ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE, x)
+#define ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE_GET(x)\
+ FIELD_GET(ANA_AC_ACL_GLOBAL_CNT_FRM_TYPE_CFG_GLOBAL_CFG_CNT_FRM_TYPE, x)
+
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_CFG */
+#define ANA_AC_ACL_STAT_GLOBAL_CFG(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 893792, 0, 1, 24, 8, r, 2, 4)
+
+#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE BIT(0)
+#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_SET(x)\
+ FIELD_PREP(ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE, x)
+#define ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE_GET(x)\
+ FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_CFG_GLOBAL_CFG_CNT_BYTE, x)
+
+/* ANA_AC:STAT_GLOBAL_CFG_ACL:STAT_GLOBAL_EVENT_MASK */
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK(r) __REG(TARGET_ANA_AC,\
+ 0, 1, 893792, 0, 1, 24, 16, r, 2, 4)
+
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK GENMASK(3, 0)
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_SET(x)\
+ FIELD_PREP(ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK, x)
+#define ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK_GET(x)\
+ FIELD_GET(ANA_AC_ACL_STAT_GLOBAL_EVENT_MASK_GLOBAL_EVENT_MASK, x)
/* ANA_ACL:COMMON:VCAP_S2_CFG */
-#define ANA_ACL_VCAP_S2_CFG(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 0, r, 70, 4)
+#define ANA_ACL_VCAP_S2_CFG(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 0, r, 70, 4)
#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA BIT(28)
#define ANA_ACL_VCAP_S2_CFG_SEC_ROUTE_HANDLING_ENA_SET(x)\
@@ -259,7 +554,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_VCAP_S2_CFG_SEC_ENA, x)
/* ANA_ACL:COMMON:SWAP_IP_CTRL */
-#define ANA_ACL_SWAP_IP_CTRL __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 412, 0, 1, 4)
+#define ANA_ACL_SWAP_IP_CTRL __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 412, 0, 1, 4)
#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL GENMASK(23, 18)
#define ANA_ACL_SWAP_IP_CTRL_DMAC_REPL_OFFSET_VAL_SET(x)\
@@ -292,7 +588,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_SWAP_IP_CTRL_IP_SWAP_IP4_TTL_ENA, x)
/* ANA_ACL:COMMON:VCAP_S2_RLEG_STAT */
-#define ANA_ACL_VCAP_S2_RLEG_STAT(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 424, r, 4, 4)
+#define ANA_ACL_VCAP_S2_RLEG_STAT(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 424, r, 4, 4)
#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK GENMASK(12, 6)
#define ANA_ACL_VCAP_S2_RLEG_STAT_IRLEG_STAT_MASK_SET(x)\
@@ -307,7 +604,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_VCAP_S2_RLEG_STAT_ERLEG_STAT_MASK, x)
/* ANA_ACL:COMMON:VCAP_S2_FRAGMENT_CFG */
-#define ANA_ACL_VCAP_S2_FRAGMENT_CFG __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 440, 0, 1, 4)
+#define ANA_ACL_VCAP_S2_FRAGMENT_CFG __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 440, 0, 1, 4)
#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN GENMASK(9, 5)
#define ANA_ACL_VCAP_S2_FRAGMENT_CFG_L4_MIN_LEN_SET(x)\
@@ -328,7 +626,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_VCAP_S2_FRAGMENT_CFG_FRAGMENT_OFFSET_THRES, x)
/* ANA_ACL:COMMON:OWN_UPSID */
-#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL, 0, 1, 32768, 0, 1, 592, 580, r, 3, 4)
+#define ANA_ACL_OWN_UPSID(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 32768, 0, 1, 592, 580, r, 3, 4)
#define ANA_ACL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_ACL_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -337,7 +636,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_OWN_UPSID_OWN_UPSID, x)
/* ANA_ACL:KEY_SEL:VCAP_S2_KEY_SEL */
-#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) __REG(TARGET_ANA_ACL, 0, 1, 34200, g, 134, 16, 0, r, 4, 4)
+#define ANA_ACL_VCAP_S2_KEY_SEL(g, r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 34200, g, 134, 16, 0, r, 4, 4)
#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA BIT(13)
#define ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_SET(x)\
@@ -388,13 +688,16 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL, x)
/* ANA_ACL:CNT_A:CNT_A */
-#define ANA_ACL_CNT_A(g) __REG(TARGET_ANA_ACL, 0, 1, 0, g, 4096, 4, 0, 0, 1, 4)
+#define ANA_ACL_CNT_A(g) __REG(TARGET_ANA_ACL,\
+ 0, 1, 0, g, 4096, 4, 0, 0, 1, 4)
/* ANA_ACL:CNT_B:CNT_B */
-#define ANA_ACL_CNT_B(g) __REG(TARGET_ANA_ACL, 0, 1, 16384, g, 4096, 4, 0, 0, 1, 4)
+#define ANA_ACL_CNT_B(g) __REG(TARGET_ANA_ACL,\
+ 0, 1, 16384, g, 4096, 4, 0, 0, 1, 4)
/* ANA_ACL:STICKY:SEC_LOOKUP_STICKY */
-#define ANA_ACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_ANA_ACL, 0, 1, 36408, 0, 1, 16, 0, r, 4, 4)
+#define ANA_ACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_ANA_ACL,\
+ 0, 1, 36408, 0, 1, 16, 0, r, 4, 4)
#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY BIT(17)
#define ANA_ACL_SEC_LOOKUP_STICKY_KEY_SEL_CLM_STICKY_SET(x)\
@@ -505,7 +808,8 @@ enum sparx5_target {
FIELD_GET(ANA_ACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
/* ANA_AC_POL:POL_ALL_CFG:POL_UPD_INT_CFG */
-#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL, 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4)
+#define ANA_AC_POL_POL_UPD_INT_CFG __REG(TARGET_ANA_AC_POL,\
+ 0, 1, 75968, 0, 1, 1160, 1148, 0, 1, 4)
#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT GENMASK(9, 0)
#define ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT_SET(x)\
@@ -514,7 +818,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_POL_POL_UPD_INT_CFG_POL_UPD_INT, x)
/* ANA_AC_POL:COMMON_BDLB:DLB_CTRL */
-#define ANA_AC_POL_BDLB_DLB_CTRL __REG(TARGET_ANA_AC_POL, 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4)
+#define ANA_AC_POL_BDLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\
+ 0, 1, 79048, 0, 1, 8, 0, 0, 1, 4)
#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
#define ANA_AC_POL_BDLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
@@ -541,7 +846,8 @@ enum sparx5_target {
FIELD_GET(ANA_AC_POL_BDLB_DLB_CTRL_DLB_ADD_ENA, x)
/* ANA_AC_POL:COMMON_BUM_SLB:DLB_CTRL */
-#define ANA_AC_POL_SLB_DLB_CTRL __REG(TARGET_ANA_AC_POL, 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4)
+#define ANA_AC_POL_SLB_DLB_CTRL __REG(TARGET_ANA_AC_POL,\
+ 0, 1, 79056, 0, 1, 20, 0, 0, 1, 4)
#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS GENMASK(26, 19)
#define ANA_AC_POL_SLB_DLB_CTRL_CLK_PERIOD_01NS_SET(x)\
@@ -567,8 +873,235 @@ enum sparx5_target {
#define ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA_GET(x)\
FIELD_GET(ANA_AC_POL_SLB_DLB_CTRL_DLB_ADD_ENA, x)
+/* ANA_AC_SDLB:LBGRP_TBL:XLB_START */
+#define ANA_AC_SDLB_XLB_START(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 0, 0, 1, 4)
+
+#define ANA_AC_SDLB_XLB_START_LBSET_START GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_START_LBSET_START_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+#define ANA_AC_SDLB_XLB_START_LBSET_START_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_START_LBSET_START, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_INTERVAL */
+#define ANA_AC_SDLB_PUP_INTERVAL(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 4, 0, 1, 4)
+
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL GENMASK(19, 0)
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL, x)
+#define ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:PUP_CTRL */
+#define ANA_AC_SDLB_PUP_CTRL(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 8, 0, 1, 4)
+
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT GENMASK(18, 0)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT, x)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_CTRL_PUP_LB_DT, x)
+
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA BIT(24)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_CTRL_PUP_ENA, x)
+#define ANA_AC_SDLB_PUP_CTRL_PUP_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_CTRL_PUP_ENA, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_MISC */
+#define ANA_AC_SDLB_LBGRP_MISC(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 12, 0, 1, 4)
+
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT GENMASK(12, 8)
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+#define ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:FRM_RATE_TOKENS */
+#define ANA_AC_SDLB_FRM_RATE_TOKENS(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 16, 0, 1, 4)
+
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS GENMASK(12, 0)
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS, x)
+#define ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS, x)
+
+/* ANA_AC_SDLB:LBGRP_TBL:LBGRP_STATE_TBL */
+#define ANA_AC_SDLB_LBGRP_STATE_TBL(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 295468, g, 10, 24, 20, 0, 1, 4)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING BIT(0)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_ONGOING, x)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK BIT(1)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_WAIT_ACK, x)
+
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT GENMASK(28, 16)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+#define ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_LBGRP_STATE_TBL_PUP_LBSET_NEXT, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:PUP_TOKENS */
+#define ANA_AC_SDLB_PUP_TOKENS(g, r) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 0, r, 2, 4)
+
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS GENMASK(12, 0)
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS, x)
+#define ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:THRES */
+#define ANA_AC_SDLB_THRES(g, r) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 8, r, 2, 4)
+
+#define ANA_AC_SDLB_THRES_THRES GENMASK(9, 0)
+#define ANA_AC_SDLB_THRES_THRES_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_THRES_THRES, x)
+#define ANA_AC_SDLB_THRES_THRES_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_THRES_THRES, x)
+
+#define ANA_AC_SDLB_THRES_THRES_HYS GENMASK(25, 16)
+#define ANA_AC_SDLB_THRES_THRES_HYS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_THRES_THRES_HYS, x)
+#define ANA_AC_SDLB_THRES_THRES_HYS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_THRES_THRES_HYS, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:XLB_NEXT */
+#define ANA_AC_SDLB_XLB_NEXT(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 16, 0, 1, 4)
+
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT GENMASK(12, 0)
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+#define ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT, x)
+
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP GENMASK(27, 24)
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+#define ANA_AC_SDLB_XLB_NEXT_LBGRP_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_XLB_NEXT_LBGRP, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:INH_CTRL */
+#define ANA_AC_SDLB_INH_CTRL(g, r) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 20, r, 2, 4)
+
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX GENMASK(12, 0)
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, x)
+#define ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, x)
+
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE GENMASK(21, 20)
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_INH_MODE, x)
+#define ANA_AC_SDLB_INH_CTRL_INH_MODE_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_INH_MODE, x)
+
+#define ANA_AC_SDLB_INH_CTRL_INH_LB BIT(24)
+#define ANA_AC_SDLB_INH_CTRL_INH_LB_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_CTRL_INH_LB, x)
+#define ANA_AC_SDLB_INH_CTRL_INH_LB_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_CTRL_INH_LB, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:INH_LBSET_ADDR */
+#define ANA_AC_SDLB_INH_LBSET_ADDR(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 28, 0, 1, 4)
+
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR GENMASK(12, 0)
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+#define ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_INH_LBSET_ADDR_INH_LBSET_ADDR, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:DLB_MISC */
+#define ANA_AC_SDLB_DLB_MISC(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 32, 0, 1, 4)
+
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA BIT(0)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA, x)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_DLB_FRM_RATE_ENA, x)
+
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA BIT(6)
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA, x)
+#define ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_MARK_ALL_FRMS_RED_ENA, x)
+
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ GENMASK(14, 8)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ, x)
+#define ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_MISC_DLB_FRM_ADJ, x)
+
+/* ANA_AC_SDLB:LBSET_TBL:DLB_CFG */
+#define ANA_AC_SDLB_DLB_CFG(g) __REG(TARGET_ANA_AC_SDLB,\
+ 0, 1, 0, g, 4616, 64, 36, 0, 1, 4)
+
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA BIT(11)
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA, x)
+#define ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DROP_ON_YELLOW_ENA, x)
+
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL GENMASK(10, 9)
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL, x)
+#define ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DP_BYPASS_LVL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS BIT(8)
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS, x)
+#define ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_HIER_DLB_DIS, x)
+
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS BIT(7)
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS, x)
+#define ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_ENCAP_DATA_DIS, x)
+
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL GENMASK(6, 5)
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL, x)
+#define ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_COLOR_AWARE_LVL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL GENMASK(4, 3)
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL, x)
+#define ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_CIR_INC_DP_VAL, x)
+
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE BIT(2)
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_DLB_MODE, x)
+#define ANA_AC_SDLB_DLB_CFG_DLB_MODE_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_DLB_MODE, x)
+
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK GENMASK(1, 0)
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK_SET(x)\
+ FIELD_PREP(ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK, x)
+#define ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK_GET(x)\
+ FIELD_GET(ANA_AC_SDLB_DLB_CFG_TRAFFIC_TYPE_MASK, x)
+
/* ANA_CL:PORT:FILTER_CTRL */
-#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 4, 0, 1, 4)
+#define ANA_CL_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 4, 0, 1, 4)
#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS BIT(2)
#define ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(x)\
@@ -589,7 +1122,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_FILTER_CTRL_FORCE_FCS_UPDATE_ENA, x)
/* ANA_CL:PORT:VLAN_FILTER_CTRL */
-#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 8, r, 3, 4)
+#define ANA_CL_VLAN_FILTER_CTRL(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 8, r, 3, 4)
#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA BIT(10)
#define ANA_CL_VLAN_FILTER_CTRL_TAG_REQUIRED_ENA_SET(x)\
@@ -658,7 +1192,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_VLAN_FILTER_CTRL_CUST3_STAG_DIS, x)
/* ANA_CL:PORT:ETAG_FILTER_CTRL */
-#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 20, 0, 1, 4)
+#define ANA_CL_ETAG_FILTER_CTRL(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 20, 0, 1, 4)
#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA BIT(1)
#define ANA_CL_ETAG_FILTER_CTRL_ETAG_REQUIRED_ENA_SET(x)\
@@ -673,7 +1208,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_ETAG_FILTER_CTRL_ETAG_DIS, x)
/* ANA_CL:PORT:VLAN_CTRL */
-#define ANA_CL_VLAN_CTRL(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 32, 0, 1, 4)
+#define ANA_CL_VLAN_CTRL(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 32, 0, 1, 4)
#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS GENMASK(30, 26)
#define ANA_CL_VLAN_CTRL_PORT_VOE_TPID_AWARE_DIS_SET(x)\
@@ -742,7 +1278,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_VLAN_CTRL_PORT_VID, x)
/* ANA_CL:PORT:VLAN_CTRL_2 */
-#define ANA_CL_VLAN_CTRL_2(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 36, 0, 1, 4)
+#define ANA_CL_VLAN_CTRL_2(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 36, 0, 1, 4)
#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT GENMASK(1, 0)
#define ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT_SET(x)\
@@ -751,7 +1288,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_VLAN_CTRL_2_VLAN_PUSH_CNT, x)
/* ANA_CL:PORT:PCP_DEI_MAP_CFG */
-#define ANA_CL_PCP_DEI_MAP_CFG(g, r) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 108, r, 16, 4)
+#define ANA_CL_PCP_DEI_MAP_CFG(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 108, r, 16, 4)
#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL GENMASK(4, 3)
#define ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_DP_VAL_SET(x)\
@@ -766,7 +1304,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_PCP_DEI_MAP_CFG_PCP_DEI_QOS_VAL, x)
/* ANA_CL:PORT:QOS_CFG */
-#define ANA_CL_QOS_CFG(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 172, 0, 1, 4)
+#define ANA_CL_QOS_CFG(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 172, 0, 1, 4)
#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA BIT(17)
#define ANA_CL_QOS_CFG_DEFAULT_COSID_ENA_SET(x)\
@@ -841,10 +1380,74 @@ enum sparx5_target {
FIELD_GET(ANA_CL_QOS_CFG_DEFAULT_QOS_VAL, x)
/* ANA_CL:PORT:CAPTURE_BPDU_CFG */
-#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL, 0, 1, 131072, g, 70, 512, 196, 0, 1, 4)
+#define ANA_CL_CAPTURE_BPDU_CFG(g) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 196, 0, 1, 4)
+
+/* ANA_CL:PORT:ADV_CL_CFG_2 */
+#define ANA_CL_ADV_CL_CFG_2(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 200, r, 6, 4)
+
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA BIT(1)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA, x)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_2_USE_CL_TCI0_ENA, x)
+
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA BIT(0)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA, x)
+#define ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_2_USE_CL_DSCP_ENA, x)
+
+/* ANA_CL:PORT:ADV_CL_CFG */
+#define ANA_CL_ADV_CL_CFG(g, r) __REG(TARGET_ANA_CL,\
+ 0, 1, 131072, g, 70, 512, 224, r, 6, 4)
+
+#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL GENMASK(30, 26)
+#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL GENMASK(25, 21)
+#define ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL GENMASK(20, 16)
+#define ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL GENMASK(15, 11)
+#define ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL GENMASK(10, 6)
+#define ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL GENMASK(5, 1)
+#define ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL, x)
+#define ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL, x)
+
+#define ANA_CL_ADV_CL_CFG_LOOKUP_ENA BIT(0)
+#define ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(x)\
+ FIELD_PREP(ANA_CL_ADV_CL_CFG_LOOKUP_ENA, x)
+#define ANA_CL_ADV_CL_CFG_LOOKUP_ENA_GET(x)\
+ FIELD_GET(ANA_CL_ADV_CL_CFG_LOOKUP_ENA, x)
/* ANA_CL:COMMON:OWN_UPSID */
-#define ANA_CL_OWN_UPSID(r) __REG(TARGET_ANA_CL, 0, 1, 166912, 0, 1, 756, 0, r, 3, 4)
+#define ANA_CL_OWN_UPSID(r) __REG(TARGET_ANA_CL,\
+ 0, 1, 166912, 0, 1, 756, 0, r, 3, 4)
#define ANA_CL_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_CL_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -853,7 +1456,8 @@ enum sparx5_target {
FIELD_GET(ANA_CL_OWN_UPSID_OWN_UPSID, x)
/* ANA_CL:COMMON:DSCP_CFG */
-#define ANA_CL_DSCP_CFG(r) __REG(TARGET_ANA_CL, 0, 1, 166912, 0, 1, 756, 256, r, 64, 4)
+#define ANA_CL_DSCP_CFG(r) __REG(TARGET_ANA_CL,\
+ 0, 1, 166912, 0, 1, 756, 256, r, 64, 4)
#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL GENMASK(12, 7)
#define ANA_CL_DSCP_CFG_DSCP_TRANSLATE_VAL_SET(x)\
@@ -885,14 +1489,103 @@ enum sparx5_target {
#define ANA_CL_DSCP_CFG_DSCP_TRUST_ENA_GET(x)\
FIELD_GET(ANA_CL_DSCP_CFG_DSCP_TRUST_ENA, x)
+/* ANA_CL:COMMON:QOS_MAP_CFG */
+#define ANA_CL_QOS_MAP_CFG(r) __REG(TARGET_ANA_CL,\
+ 0, 1, 166912, 0, 1, 756, 512, r, 32, 4)
+
+#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL GENMASK(9, 4)
+#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(x)\
+ FIELD_PREP(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x)
+#define ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_GET(x)\
+ FIELD_GET(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, x)
+
+/* ANA_L2:COMMON:FWD_CFG */
+#define ANA_L2_FWD_CFG __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 0, 0, 1, 4)
+
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL GENMASK(21, 20)
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL, x)
+#define ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_MAC_TBL_SPLIT_SEL, x)
+
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA BIT(18)
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA, x)
+#define ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_PORT_DEFAULT_BDLB_ENA, x)
+
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA BIT(17)
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA, x)
+#define ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_QUEUE_DEFAULT_SDLB_ENA, x)
+
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA BIT(16)
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, x)
+#define ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, x)
+
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU GENMASK(10, 8)
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_CPU_DMAC_QU, x)
+#define ANA_L2_FWD_CFG_CPU_DMAC_QU_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_CPU_DMAC_QU, x)
+
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA BIT(7)
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_LOOPBACK_ENA, x)
+#define ANA_L2_FWD_CFG_LOOPBACK_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_LOOPBACK_ENA, x)
+
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA BIT(6)
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA, x)
+#define ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_CPU_DMAC_COPY_ENA, x)
+
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL BIT(4)
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FILTER_MODE_SEL, x)
+#define ANA_L2_FWD_CFG_FILTER_MODE_SEL_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FILTER_MODE_SEL, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA BIT(3)
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_MIRROR_ENA, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA BIT(2)
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_IGNORE_VLAN_ENA, x)
+
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA BIT(1)
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA, x)
+#define ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FLOOD_CPU_COPY_ENA, x)
+
+#define ANA_L2_FWD_CFG_FWD_ENA BIT(0)
+#define ANA_L2_FWD_CFG_FWD_ENA_SET(x)\
+ FIELD_PREP(ANA_L2_FWD_CFG_FWD_ENA, x)
+#define ANA_L2_FWD_CFG_FWD_ENA_GET(x)\
+ FIELD_GET(ANA_L2_FWD_CFG_FWD_ENA, x)
+
/* ANA_L2:COMMON:AUTO_LRN_CFG */
-#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
+#define ANA_L2_AUTO_LRN_CFG __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 24, 0, 1, 4)
/* ANA_L2:COMMON:AUTO_LRN_CFG1 */
-#define ANA_L2_AUTO_LRN_CFG1 __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4)
+#define ANA_L2_AUTO_LRN_CFG1 __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 28, 0, 1, 4)
/* ANA_L2:COMMON:AUTO_LRN_CFG2 */
-#define ANA_L2_AUTO_LRN_CFG2 __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4)
+#define ANA_L2_AUTO_LRN_CFG2 __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 32, 0, 1, 4)
#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2 BIT(0)
#define ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2_SET(x)\
@@ -901,7 +1594,8 @@ enum sparx5_target {
FIELD_GET(ANA_L2_AUTO_LRN_CFG2_AUTO_LRN_ENA2, x)
/* ANA_L2:COMMON:OWN_UPSID */
-#define ANA_L2_OWN_UPSID(r) __REG(TARGET_ANA_L2, 0, 1, 566024, 0, 1, 700, 672, r, 3, 4)
+#define ANA_L2_OWN_UPSID(r) __REG(TARGET_ANA_L2,\
+ 0, 1, 566024, 0, 1, 700, 672, r, 3, 4)
#define ANA_L2_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define ANA_L2_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -909,8 +1603,29 @@ enum sparx5_target {
#define ANA_L2_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_L2_OWN_UPSID_OWN_UPSID, x)
+/* ANA_L2:ISDX:DLB_CFG */
+#define ANA_L2_DLB_CFG(g) __REG(TARGET_ANA_L2,\
+ 0, 1, 0, g, 4096, 128, 56, 0, 1, 4)
+
+#define ANA_L2_DLB_CFG_DLB_IDX GENMASK(12, 0)
+#define ANA_L2_DLB_CFG_DLB_IDX_SET(x)\
+ FIELD_PREP(ANA_L2_DLB_CFG_DLB_IDX, x)
+#define ANA_L2_DLB_CFG_DLB_IDX_GET(x)\
+ FIELD_GET(ANA_L2_DLB_CFG_DLB_IDX, x)
+
+/* ANA_L2:ISDX:TSN_CFG */
+#define ANA_L2_TSN_CFG(g) __REG(TARGET_ANA_L2,\
+ 0, 1, 0, g, 4096, 128, 100, 0, 1, 4)
+
+#define ANA_L2_TSN_CFG_TSN_SFID GENMASK(9, 0)
+#define ANA_L2_TSN_CFG_TSN_SFID_SET(x)\
+ FIELD_PREP(ANA_L2_TSN_CFG_TSN_SFID, x)
+#define ANA_L2_TSN_CFG_TSN_SFID_GET(x)\
+ FIELD_GET(ANA_L2_TSN_CFG_TSN_SFID, x)
+
/* ANA_L3:COMMON:VLAN_CTRL */
-#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3, 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4)
+#define ANA_L3_VLAN_CTRL __REG(TARGET_ANA_L3,\
+ 0, 1, 493632, 0, 1, 184, 4, 0, 1, 4)
#define ANA_L3_VLAN_CTRL_VLAN_ENA BIT(0)
#define ANA_L3_VLAN_CTRL_VLAN_ENA_SET(x)\
@@ -919,7 +1634,8 @@ enum sparx5_target {
FIELD_GET(ANA_L3_VLAN_CTRL_VLAN_ENA, x)
/* ANA_L3:VLAN:VLAN_CFG */
-#define ANA_L3_VLAN_CFG(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 8, 0, 1, 4)
+#define ANA_L3_VLAN_CFG(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 8, 0, 1, 4)
#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR GENMASK(30, 24)
#define ANA_L3_VLAN_CFG_VLAN_MSTP_PTR_SET(x)\
@@ -976,13 +1692,16 @@ enum sparx5_target {
FIELD_GET(ANA_L3_VLAN_CFG_VLAN_MIRROR_ENA, x)
/* ANA_L3:VLAN:VLAN_MASK_CFG */
-#define ANA_L3_VLAN_MASK_CFG(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 16, 0, 1, 4)
+#define ANA_L3_VLAN_MASK_CFG(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 16, 0, 1, 4)
/* ANA_L3:VLAN:VLAN_MASK_CFG1 */
-#define ANA_L3_VLAN_MASK_CFG1(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 20, 0, 1, 4)
+#define ANA_L3_VLAN_MASK_CFG1(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 20, 0, 1, 4)
/* ANA_L3:VLAN:VLAN_MASK_CFG2 */
-#define ANA_L3_VLAN_MASK_CFG2(g) __REG(TARGET_ANA_L3, 0, 1, 0, g, 5120, 64, 24, 0, 1, 4)
+#define ANA_L3_VLAN_MASK_CFG2(g) __REG(TARGET_ANA_L3,\
+ 0, 1, 0, g, 5120, 64, 24, 0, 1, 4)
#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2 BIT(0)
#define ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2_SET(x)\
@@ -991,274 +1710,364 @@ enum sparx5_target {
FIELD_GET(ANA_L3_VLAN_MASK_CFG2_VLAN_PORT_MASK2, x)
/* ASM:DEV_STATISTICS:RX_IN_BYTES_CNT */
-#define ASM_RX_IN_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 0, 0, 1, 4)
+#define ASM_RX_IN_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 0, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SYMBOL_ERR_CNT */
-#define ASM_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 4, 0, 1, 4)
+#define ASM_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 4, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_PAUSE_CNT */
-#define ASM_RX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 8, 0, 1, 4)
+#define ASM_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 8, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_UNSUP_OPCODE_CNT */
-#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 12, 0, 1, 4)
+#define ASM_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 12, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_OK_BYTES_CNT */
-#define ASM_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 16, 0, 1, 4)
+#define ASM_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 16, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_BAD_BYTES_CNT */
-#define ASM_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 20, 0, 1, 4)
+#define ASM_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 20, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_UC_CNT */
-#define ASM_RX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 24, 0, 1, 4)
+#define ASM_RX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 24, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_MC_CNT */
-#define ASM_RX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 28, 0, 1, 4)
+#define ASM_RX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 28, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_BC_CNT */
-#define ASM_RX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 32, 0, 1, 4)
+#define ASM_RX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 32, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_CRC_ERR_CNT */
-#define ASM_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 36, 0, 1, 4)
+#define ASM_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 36, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_UNDERSIZE_CNT */
-#define ASM_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 40, 0, 1, 4)
+#define ASM_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 40, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_FRAGMENTS_CNT */
-#define ASM_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 44, 0, 1, 4)
+#define ASM_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 44, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_IN_RANGE_LEN_ERR_CNT */
-#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 48, 0, 1, 4)
+#define ASM_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 48, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 52, 0, 1, 4)
+#define ASM_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 52, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_OVERSIZE_CNT */
-#define ASM_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 56, 0, 1, 4)
+#define ASM_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 56, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_JABBERS_CNT */
-#define ASM_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 60, 0, 1, 4)
+#define ASM_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 60, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE64_CNT */
-#define ASM_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 64, 0, 1, 4)
+#define ASM_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 64, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE65TO127_CNT */
-#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 68, 0, 1, 4)
+#define ASM_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 68, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE128TO255_CNT */
-#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 72, 0, 1, 4)
+#define ASM_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 72, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE256TO511_CNT */
-#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 76, 0, 1, 4)
+#define ASM_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 76, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE512TO1023_CNT */
-#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 80, 0, 1, 4)
+#define ASM_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 80, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE1024TO1518_CNT */
-#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 84, 0, 1, 4)
+#define ASM_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 84, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_SIZE1519TOMAX_CNT */
-#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 88, 0, 1, 4)
+#define ASM_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 88, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_IPG_SHRINK_CNT */
-#define ASM_RX_IPG_SHRINK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 92, 0, 1, 4)
+#define ASM_RX_IPG_SHRINK_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 92, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_OUT_BYTES_CNT */
-#define ASM_TX_OUT_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 96, 0, 1, 4)
+#define ASM_TX_OUT_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 96, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_PAUSE_CNT */
-#define ASM_TX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 100, 0, 1, 4)
+#define ASM_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 100, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_OK_BYTES_CNT */
-#define ASM_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 104, 0, 1, 4)
+#define ASM_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 104, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_UC_CNT */
-#define ASM_TX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 108, 0, 1, 4)
+#define ASM_TX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 108, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_MC_CNT */
-#define ASM_TX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 112, 0, 1, 4)
+#define ASM_TX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 112, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_BC_CNT */
-#define ASM_TX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 116, 0, 1, 4)
+#define ASM_TX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 116, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE64_CNT */
-#define ASM_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 120, 0, 1, 4)
+#define ASM_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 120, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE65TO127_CNT */
-#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 124, 0, 1, 4)
+#define ASM_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 124, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE128TO255_CNT */
-#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 128, 0, 1, 4)
+#define ASM_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 128, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE256TO511_CNT */
-#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 132, 0, 1, 4)
+#define ASM_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 132, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE512TO1023_CNT */
-#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 136, 0, 1, 4)
+#define ASM_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 136, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE1024TO1518_CNT */
-#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 140, 0, 1, 4)
+#define ASM_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 140, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_SIZE1519TOMAX_CNT */
-#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 144, 0, 1, 4)
+#define ASM_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 144, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_ALIGNMENT_LOST_CNT */
-#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 148, 0, 1, 4)
+#define ASM_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 148, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_TAGGED_FRMS_CNT */
-#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 152, 0, 1, 4)
+#define ASM_RX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 152, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_UNTAGGED_FRMS_CNT */
-#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 156, 0, 1, 4)
+#define ASM_RX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 156, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_TAGGED_FRMS_CNT */
-#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 160, 0, 1, 4)
+#define ASM_TX_TAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 160, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_UNTAGGED_FRMS_CNT */
-#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 164, 0, 1, 4)
+#define ASM_TX_UNTAGGED_FRMS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 164, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SYMBOL_ERR_CNT */
-#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 168, 0, 1, 4)
+#define ASM_PMAC_RX_SYMBOL_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 168, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_PAUSE_CNT */
-#define ASM_PMAC_RX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 172, 0, 1, 4)
+#define ASM_PMAC_RX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 172, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_UNSUP_OPCODE_CNT */
-#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 176, 0, 1, 4)
+#define ASM_PMAC_RX_UNSUP_OPCODE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 176, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_CNT */
-#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 180, 0, 1, 4)
+#define ASM_PMAC_RX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 180, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_CNT */
-#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 184, 0, 1, 4)
+#define ASM_PMAC_RX_BAD_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 184, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_UC_CNT */
-#define ASM_PMAC_RX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 188, 0, 1, 4)
+#define ASM_PMAC_RX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 188, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_MC_CNT */
-#define ASM_PMAC_RX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 192, 0, 1, 4)
+#define ASM_PMAC_RX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 192, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_BC_CNT */
-#define ASM_PMAC_RX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 196, 0, 1, 4)
+#define ASM_PMAC_RX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 196, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_CRC_ERR_CNT */
-#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 200, 0, 1, 4)
+#define ASM_PMAC_RX_CRC_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 200, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_UNDERSIZE_CNT */
-#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 204, 0, 1, 4)
+#define ASM_PMAC_RX_UNDERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 204, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_FRAGMENTS_CNT */
-#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 208, 0, 1, 4)
+#define ASM_PMAC_RX_FRAGMENTS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 208, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
-#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 212, 0, 1, 4)
+#define ASM_PMAC_RX_IN_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 212, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 216, 0, 1, 4)
+#define ASM_PMAC_RX_OUT_OF_RANGE_LEN_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 216, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_OVERSIZE_CNT */
-#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 220, 0, 1, 4)
+#define ASM_PMAC_RX_OVERSIZE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 220, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_JABBERS_CNT */
-#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 224, 0, 1, 4)
+#define ASM_PMAC_RX_JABBERS_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 224, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE64_CNT */
-#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 228, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 228, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE65TO127_CNT */
-#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 232, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 232, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE128TO255_CNT */
-#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 236, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 236, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE256TO511_CNT */
-#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 240, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 240, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE512TO1023_CNT */
-#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 244, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 244, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1024TO1518_CNT */
-#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 248, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 248, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_SIZE1519TOMAX_CNT */
-#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 252, 0, 1, 4)
+#define ASM_PMAC_RX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 252, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_PAUSE_CNT */
-#define ASM_PMAC_TX_PAUSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 256, 0, 1, 4)
+#define ASM_PMAC_TX_PAUSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 256, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_CNT */
-#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 260, 0, 1, 4)
+#define ASM_PMAC_TX_OK_BYTES_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 260, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_UC_CNT */
-#define ASM_PMAC_TX_UC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 264, 0, 1, 4)
+#define ASM_PMAC_TX_UC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 264, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_MC_CNT */
-#define ASM_PMAC_TX_MC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 268, 0, 1, 4)
+#define ASM_PMAC_TX_MC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 268, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_BC_CNT */
-#define ASM_PMAC_TX_BC_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 272, 0, 1, 4)
+#define ASM_PMAC_TX_BC_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 272, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE64_CNT */
-#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 276, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE64_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 276, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE65TO127_CNT */
-#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 280, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE65TO127_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 280, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE128TO255_CNT */
-#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 284, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE128TO255_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 284, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE256TO511_CNT */
-#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 288, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE256TO511_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 288, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE512TO1023_CNT */
-#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 292, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE512TO1023_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 292, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1024TO1518_CNT */
-#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 296, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE1024TO1518_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 296, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_TX_SIZE1519TOMAX_CNT */
-#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 300, 0, 1, 4)
+#define ASM_PMAC_TX_SIZE1519TOMAX_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 300, 0, 1, 4)
/* ASM:DEV_STATISTICS:PMAC_RX_ALIGNMENT_LOST_CNT */
-#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 304, 0, 1, 4)
+#define ASM_PMAC_RX_ALIGNMENT_LOST_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 304, 0, 1, 4)
/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_ERR_CNT */
-#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 308, 0, 1, 4)
+#define ASM_MM_RX_ASSEMBLY_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 308, 0, 1, 4)
/* ASM:DEV_STATISTICS:MM_RX_SMD_ERR_CNT */
-#define ASM_MM_RX_SMD_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 312, 0, 1, 4)
+#define ASM_MM_RX_SMD_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 312, 0, 1, 4)
/* ASM:DEV_STATISTICS:MM_RX_ASSEMBLY_OK_CNT */
-#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 316, 0, 1, 4)
+#define ASM_MM_RX_ASSEMBLY_OK_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 316, 0, 1, 4)
/* ASM:DEV_STATISTICS:MM_RX_MERGE_FRAG_CNT */
-#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 320, 0, 1, 4)
+#define ASM_MM_RX_MERGE_FRAG_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 320, 0, 1, 4)
/* ASM:DEV_STATISTICS:MM_TX_PFRAGMENT_CNT */
-#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 324, 0, 1, 4)
+#define ASM_MM_TX_PFRAGMENT_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 324, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_MULTI_COLL_CNT */
-#define ASM_TX_MULTI_COLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 328, 0, 1, 4)
+#define ASM_TX_MULTI_COLL_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 328, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_LATE_COLL_CNT */
-#define ASM_TX_LATE_COLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 332, 0, 1, 4)
+#define ASM_TX_LATE_COLL_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 332, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_XCOLL_CNT */
-#define ASM_TX_XCOLL_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 336, 0, 1, 4)
+#define ASM_TX_XCOLL_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 336, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_DEFER_CNT */
-#define ASM_TX_DEFER_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 340, 0, 1, 4)
+#define ASM_TX_DEFER_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 340, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_XDEFER_CNT */
-#define ASM_TX_XDEFER_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 344, 0, 1, 4)
+#define ASM_TX_XDEFER_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 344, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_BACKOFF1_CNT */
-#define ASM_TX_BACKOFF1_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 348, 0, 1, 4)
+#define ASM_TX_BACKOFF1_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 348, 0, 1, 4)
/* ASM:DEV_STATISTICS:TX_CSENSE_CNT */
-#define ASM_TX_CSENSE_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 352, 0, 1, 4)
+#define ASM_TX_CSENSE_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 352, 0, 1, 4)
/* ASM:DEV_STATISTICS:RX_IN_BYTES_MSB_CNT */
-#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 356, 0, 1, 4)
+#define ASM_RX_IN_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 356, 0, 1, 4)
#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
@@ -1267,7 +2076,8 @@ enum sparx5_target {
FIELD_GET(ASM_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:RX_OK_BYTES_MSB_CNT */
-#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 360, 0, 1, 4)
+#define ASM_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 360, 0, 1, 4)
#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -1276,7 +2086,8 @@ enum sparx5_target {
FIELD_GET(ASM_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:PMAC_RX_OK_BYTES_MSB_CNT */
-#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 364, 0, 1, 4)
+#define ASM_PMAC_RX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 364, 0, 1, 4)
#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -1285,7 +2096,8 @@ enum sparx5_target {
FIELD_GET(ASM_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:RX_BAD_BYTES_MSB_CNT */
-#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 368, 0, 1, 4)
+#define ASM_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 368, 0, 1, 4)
#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -1294,7 +2106,8 @@ enum sparx5_target {
FIELD_GET(ASM_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:PMAC_RX_BAD_BYTES_MSB_CNT */
-#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 372, 0, 1, 4)
+#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 372, 0, 1, 4)
#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -1303,7 +2116,8 @@ enum sparx5_target {
FIELD_GET(ASM_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:TX_OUT_BYTES_MSB_CNT */
-#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 376, 0, 1, 4)
+#define ASM_TX_OUT_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 376, 0, 1, 4)
#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
@@ -1312,7 +2126,8 @@ enum sparx5_target {
FIELD_GET(ASM_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:TX_OK_BYTES_MSB_CNT */
-#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 380, 0, 1, 4)
+#define ASM_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 380, 0, 1, 4)
#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -1321,7 +2136,8 @@ enum sparx5_target {
FIELD_GET(ASM_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:PMAC_TX_OK_BYTES_MSB_CNT */
-#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 384, 0, 1, 4)
+#define ASM_PMAC_TX_OK_BYTES_MSB_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 384, 0, 1, 4)
#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(3, 0)
#define ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -1330,10 +2146,12 @@ enum sparx5_target {
FIELD_GET(ASM_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
/* ASM:DEV_STATISTICS:RX_SYNC_LOST_ERR_CNT */
-#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM, 0, 1, 0, g, 65, 512, 388, 0, 1, 4)
+#define ASM_RX_SYNC_LOST_ERR_CNT(g) __REG(TARGET_ASM,\
+ 0, 1, 0, g, 65, 512, 388, 0, 1, 4)
/* ASM:CFG:STAT_CFG */
-#define ASM_STAT_CFG __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4)
+#define ASM_STAT_CFG __REG(TARGET_ASM,\
+ 0, 1, 33280, 0, 1, 1088, 0, 0, 1, 4)
#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT BIT(0)
#define ASM_STAT_CFG_STAT_CNT_CLR_SHOT_SET(x)\
@@ -1342,7 +2160,8 @@ enum sparx5_target {
FIELD_GET(ASM_STAT_CFG_STAT_CNT_CLR_SHOT, x)
/* ASM:CFG:PORT_CFG */
-#define ASM_PORT_CFG(r) __REG(TARGET_ASM, 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4)
+#define ASM_PORT_CFG(r) __REG(TARGET_ASM,\
+ 0, 1, 33280, 0, 1, 1088, 540, r, 67, 4)
#define ASM_PORT_CFG_CSC_STAT_DIS BIT(12)
#define ASM_PORT_CFG_CSC_STAT_DIS_SET(x)\
@@ -1411,7 +2230,8 @@ enum sparx5_target {
FIELD_GET(ASM_PORT_CFG_PFRM_FLUSH, x)
/* ASM:RAM_CTRL:RAM_INIT */
-#define ASM_RAM_INIT __REG(TARGET_ASM, 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4)
+#define ASM_RAM_INIT __REG(TARGET_ASM,\
+ 0, 1, 34832, 0, 1, 4, 0, 0, 1, 4)
#define ASM_RAM_INIT_RAM_INIT BIT(1)
#define ASM_RAM_INIT_RAM_INIT_SET(x)\
@@ -1426,7 +2246,8 @@ enum sparx5_target {
FIELD_GET(ASM_RAM_INIT_RAM_CFG_HOOK, x)
/* CLKGEN:LCPLL1:LCPLL1_CORE_CLK_CFG */
-#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN, 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
+#define CLKGEN_LCPLL1_CORE_CLK_CFG __REG(TARGET_CLKGEN,\
+ 0, 1, 12, 0, 1, 36, 0, 0, 1, 4)
#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV GENMASK(7, 0)
#define CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_DIV_SET(x)\
@@ -1465,7 +2286,8 @@ enum sparx5_target {
FIELD_GET(CLKGEN_LCPLL1_CORE_CLK_CFG_CORE_CLK_ENA, x)
/* CPU:CPU_REGS:PROC_CTRL */
-#define CPU_PROC_CTRL __REG(TARGET_CPU, 0, 1, 0, 0, 1, 204, 176, 0, 1, 4)
+#define CPU_PROC_CTRL __REG(TARGET_CPU,\
+ 0, 1, 0, 0, 1, 204, 176, 0, 1, 4)
#define CPU_PROC_CTRL_AARCH64_MODE_ENA BIT(12)
#define CPU_PROC_CTRL_AARCH64_MODE_ENA_SET(x)\
@@ -1546,7 +2368,8 @@ enum sparx5_target {
FIELD_GET(CPU_PROC_CTRL_ACP_DISABLE, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV10G_MAC_ENA_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 0, 0, 1, 4)
+#define DEV10G_MAC_ENA_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 0, 0, 1, 4)
#define DEV10G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV10G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -1561,7 +2384,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_ENA_CFG_TX_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV10G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 8, 0, 1, 4)
+#define DEV10G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 8, 0, 1, 4)
#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV10G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -1576,7 +2400,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_MAXLEN_CFG_MAX_LEN, x)
/* DEV10G:MAC_CFG_STATUS:MAC_NUM_TAGS_CFG */
-#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 12, 0, 1, 4)
+#define DEV10G_MAC_NUM_TAGS_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 12, 0, 1, 4)
#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS GENMASK(1, 0)
#define DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS_SET(x)\
@@ -1585,7 +2410,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_NUM_TAGS_CFG_NUM_TAGS, x)
/* DEV10G:MAC_CFG_STATUS:MAC_TAGS_CFG */
-#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 16, r, 3, 4)
+#define DEV10G_MAC_TAGS_CFG(t, r) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 16, r, 3, 4)
#define DEV10G_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
#define DEV10G_MAC_TAGS_CFG_TAG_ID_SET(x)\
@@ -1600,7 +2426,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_TAGS_CFG_TAG_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 28, 0, 1, 4)
+#define DEV10G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 28, 0, 1, 4)
#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV10G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -1645,7 +2472,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_TX_MONITOR_STICKY */
-#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G, t, 12, 0, 0, 1, 60, 48, 0, 1, 4)
+#define DEV10G_MAC_TX_MONITOR_STICKY(t) __REG(TARGET_DEV10G,\
+ t, 12, 0, 0, 1, 60, 48, 0, 1, 4)
#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY BIT(4)
#define DEV10G_MAC_TX_MONITOR_STICKY_LOCAL_ERR_STATE_STICKY_SET(x)\
@@ -1678,7 +2506,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_MAC_TX_MONITOR_STICKY_DIS_STATE_STICKY, x)
/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV10G_DEV_RST_CTRL(t) __REG(TARGET_DEV10G, t, 12, 436, 0, 1, 52, 0, 0, 1, 4)
+#define DEV10G_DEV_RST_CTRL(t) __REG(TARGET_DEV10G,\
+ t, 12, 436, 0, 1, 52, 0, 0, 1, 4)
#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV10G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -1735,7 +2564,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_DEV_RST_CTRL_MAC_RX_RST, x)
/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
-#define DEV10G_PCS25G_CFG(t) __REG(TARGET_DEV10G, t, 12, 488, 0, 1, 32, 0, 0, 1, 4)
+#define DEV10G_PCS25G_CFG(t) __REG(TARGET_DEV10G,\
+ t, 12, 488, 0, 1, 32, 0, 0, 1, 4)
#define DEV10G_PCS25G_CFG_PCS25G_ENA BIT(0)
#define DEV10G_PCS25G_CFG_PCS25G_ENA_SET(x)\
@@ -1744,7 +2574,8 @@ enum sparx5_target {
FIELD_GET(DEV10G_PCS25G_CFG_PCS25G_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV25G_MAC_ENA_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
+#define DEV25G_MAC_ENA_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 0, 0, 1, 60, 0, 0, 1, 4)
#define DEV25G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV25G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -1759,7 +2590,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_MAC_ENA_CFG_TX_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV25G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
+#define DEV25G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 0, 0, 1, 60, 8, 0, 1, 4)
#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV25G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -1774,7 +2606,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_MAC_MAXLEN_CFG_MAX_LEN, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G, t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
+#define DEV25G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 0, 0, 1, 60, 28, 0, 1, 4)
#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV25G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -1819,7 +2652,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV25G_DEV_RST_CTRL(t) __REG(TARGET_DEV25G, t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
+#define DEV25G_DEV_RST_CTRL(t) __REG(TARGET_DEV25G,\
+ t, 8, 436, 0, 1, 52, 0, 0, 1, 4)
#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV25G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -1876,7 +2710,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_DEV_RST_CTRL_MAC_RX_RST, x)
/* DEV10G:PCS25G_CFG_STATUS:PCS25G_CFG */
-#define DEV25G_PCS25G_CFG(t) __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
+#define DEV25G_PCS25G_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 488, 0, 1, 32, 0, 0, 1, 4)
#define DEV25G_PCS25G_CFG_PCS25G_ENA BIT(0)
#define DEV25G_PCS25G_CFG_PCS25G_ENA_SET(x)\
@@ -1885,7 +2720,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_PCS25G_CFG_PCS25G_ENA, x)
/* DEV10G:PCS25G_CFG_STATUS:PCS25G_SD_CFG */
-#define DEV25G_PCS25G_SD_CFG(t) __REG(TARGET_DEV25G, t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
+#define DEV25G_PCS25G_SD_CFG(t) __REG(TARGET_DEV25G,\
+ t, 8, 488, 0, 1, 32, 4, 0, 1, 4)
#define DEV25G_PCS25G_SD_CFG_SD_SEL BIT(8)
#define DEV25G_PCS25G_SD_CFG_SD_SEL_SET(x)\
@@ -1906,7 +2742,8 @@ enum sparx5_target {
FIELD_GET(DEV25G_PCS25G_SD_CFG_SD_ENA, x)
/* DEV1G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV2G5_DEV_RST_CTRL(t) __REG(TARGET_DEV2G5, t, 65, 0, 0, 1, 36, 0, 0, 1, 4)
+#define DEV2G5_DEV_RST_CTRL(t) __REG(TARGET_DEV2G5,\
+ t, 65, 0, 0, 1, 36, 0, 0, 1, 4)
#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS BIT(23)
#define DEV2G5_DEV_RST_CTRL_USXGMII_OSET_FILTER_DIS_SET(x)\
@@ -1957,7 +2794,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_DEV_RST_CTRL_MAC_RX_RST, x)
/* DEV1G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV2G5_MAC_ENA_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 0, 0, 1, 4)
+#define DEV2G5_MAC_ENA_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 0, 0, 1, 4)
#define DEV2G5_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV2G5_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -1972,7 +2810,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_ENA_CFG_TX_ENA, x)
/* DEV1G:MAC_CFG_STATUS:MAC_MODE_CFG */
-#define DEV2G5_MAC_MODE_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 4, 0, 1, 4)
+#define DEV2G5_MAC_MODE_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 4, 0, 1, 4)
#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
#define DEV2G5_MAC_MODE_CFG_FC_WORD_SYNC_ENA_SET(x)\
@@ -1993,7 +2832,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_MODE_CFG_FDX_ENA, x)
/* DEV1G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV2G5_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 8, 0, 1, 4)
+#define DEV2G5_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 8, 0, 1, 4)
#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN GENMASK(15, 0)
#define DEV2G5_MAC_MAXLEN_CFG_MAX_LEN_SET(x)\
@@ -2002,7 +2842,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_MAXLEN_CFG_MAX_LEN, x)
/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG */
-#define DEV2G5_MAC_TAGS_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 12, 0, 1, 4)
+#define DEV2G5_MAC_TAGS_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 12, 0, 1, 4)
#define DEV2G5_MAC_TAGS_CFG_TAG_ID GENMASK(31, 16)
#define DEV2G5_MAC_TAGS_CFG_TAG_ID_SET(x)\
@@ -2029,7 +2870,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_TAGS_CFG_VLAN_AWR_ENA, x)
/* DEV1G:MAC_CFG_STATUS:MAC_TAGS_CFG2 */
-#define DEV2G5_MAC_TAGS_CFG2(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 16, 0, 1, 4)
+#define DEV2G5_MAC_TAGS_CFG2(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 16, 0, 1, 4)
#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3 GENMASK(31, 16)
#define DEV2G5_MAC_TAGS_CFG2_TAG_ID3_SET(x)\
@@ -2044,7 +2886,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_TAGS_CFG2_TAG_ID2, x)
/* DEV1G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 20, 0, 1, 4)
+#define DEV2G5_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 20, 0, 1, 4)
#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
#define DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA_SET(x)\
@@ -2053,7 +2896,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_ADV_CHK_CFG_LEN_DROP_ENA, x)
/* DEV1G:MAC_CFG_STATUS:MAC_IFG_CFG */
-#define DEV2G5_MAC_IFG_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 24, 0, 1, 4)
+#define DEV2G5_MAC_IFG_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 24, 0, 1, 4)
#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
#define DEV2G5_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK_SET(x)\
@@ -2080,7 +2924,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_IFG_CFG_RX_IFG1, x)
/* DEV1G:MAC_CFG_STATUS:MAC_HDX_CFG */
-#define DEV2G5_MAC_HDX_CFG(t) __REG(TARGET_DEV2G5, t, 65, 52, 0, 1, 36, 28, 0, 1, 4)
+#define DEV2G5_MAC_HDX_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 52, 0, 1, 36, 28, 0, 1, 4)
#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
#define DEV2G5_MAC_HDX_CFG_BYPASS_COL_SYNC_SET(x)\
@@ -2113,7 +2958,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_MAC_HDX_CFG_LATE_COL_POS, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_CFG */
-#define DEV2G5_PCS1G_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 0, 0, 1, 4)
+#define DEV2G5_PCS1G_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 0, 0, 1, 4)
#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
#define DEV2G5_PCS1G_CFG_LINK_STATUS_TYPE_SET(x)\
@@ -2134,7 +2980,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_CFG_PCS_ENA, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_MODE_CFG */
-#define DEV2G5_PCS1G_MODE_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 4, 0, 1, 4)
+#define DEV2G5_PCS1G_MODE_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 4, 0, 1, 4)
#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
#define DEV2G5_PCS1G_MODE_CFG_UNIDIR_MODE_ENA_SET(x)\
@@ -2155,7 +3002,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_MODE_CFG_SGMII_MODE_ENA, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_SD_CFG */
-#define DEV2G5_PCS1G_SD_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 8, 0, 1, 4)
+#define DEV2G5_PCS1G_SD_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 8, 0, 1, 4)
#define DEV2G5_PCS1G_SD_CFG_SD_SEL BIT(8)
#define DEV2G5_PCS1G_SD_CFG_SD_SEL_SET(x)\
@@ -2176,7 +3024,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_SD_CFG_SD_ENA, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_CFG */
-#define DEV2G5_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 12, 0, 1, 4)
+#define DEV2G5_PCS1G_ANEG_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 12, 0, 1, 4)
#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY GENMASK(31, 16)
#define DEV2G5_PCS1G_ANEG_CFG_ADV_ABILITY_SET(x)\
@@ -2203,7 +3052,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_ANEG_CFG_ANEG_ENA, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LB_CFG */
-#define DEV2G5_PCS1G_LB_CFG(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 20, 0, 1, 4)
+#define DEV2G5_PCS1G_LB_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 20, 0, 1, 4)
#define DEV2G5_PCS1G_LB_CFG_RA_ENA BIT(4)
#define DEV2G5_PCS1G_LB_CFG_RA_ENA_SET(x)\
@@ -2224,7 +3074,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_LB_CFG_TBI_HOST_LB_ENA, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_ANEG_STATUS */
-#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 32, 0, 1, 4)
+#define DEV2G5_PCS1G_ANEG_STATUS(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 32, 0, 1, 4)
#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY GENMASK(31, 16)
#define DEV2G5_PCS1G_ANEG_STATUS_LP_ADV_ABILITY_SET(x)\
@@ -2251,7 +3102,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_ANEG_STATUS_ANEG_COMPLETE, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_LINK_STATUS */
-#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 40, 0, 1, 4)
+#define DEV2G5_PCS1G_LINK_STATUS(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 40, 0, 1, 4)
#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR GENMASK(15, 12)
#define DEV2G5_PCS1G_LINK_STATUS_DELAY_VAR_SET(x)\
@@ -2278,7 +3130,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_LINK_STATUS_SYNC_STATUS, x)
/* DEV1G:PCS1G_CFG_STATUS:PCS1G_STICKY */
-#define DEV2G5_PCS1G_STICKY(t) __REG(TARGET_DEV2G5, t, 65, 88, 0, 1, 68, 48, 0, 1, 4)
+#define DEV2G5_PCS1G_STICKY(t) __REG(TARGET_DEV2G5,\
+ t, 65, 88, 0, 1, 68, 48, 0, 1, 4)
#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
#define DEV2G5_PCS1G_STICKY_LINK_DOWN_STICKY_SET(x)\
@@ -2293,7 +3146,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS1G_STICKY_OUT_OF_SYNC_STICKY, x)
/* DEV1G:PCS_FX100_CONFIGURATION:PCS_FX100_CFG */
-#define DEV2G5_PCS_FX100_CFG(t) __REG(TARGET_DEV2G5, t, 65, 164, 0, 1, 4, 0, 0, 1, 4)
+#define DEV2G5_PCS_FX100_CFG(t) __REG(TARGET_DEV2G5,\
+ t, 65, 164, 0, 1, 4, 0, 0, 1, 4)
#define DEV2G5_PCS_FX100_CFG_SD_SEL BIT(26)
#define DEV2G5_PCS_FX100_CFG_SD_SEL_SET(x)\
@@ -2374,7 +3228,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS_FX100_CFG_PCS_ENA, x)
/* DEV1G:PCS_FX100_STATUS:PCS_FX100_STATUS */
-#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5, t, 65, 168, 0, 1, 4, 0, 0, 1, 4)
+#define DEV2G5_PCS_FX100_STATUS(t) __REG(TARGET_DEV2G5,\
+ t, 65, 168, 0, 1, 4, 0, 0, 1, 4)
#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP GENMASK(11, 8)
#define DEV2G5_PCS_FX100_STATUS_EDGE_POS_PTP_SET(x)\
@@ -2425,7 +3280,8 @@ enum sparx5_target {
FIELD_GET(DEV2G5_PCS_FX100_STATUS_SYNC_STATUS, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ENA_CFG */
-#define DEV5G_MAC_ENA_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 0, 0, 1, 4)
+#define DEV5G_MAC_ENA_CFG(t) __REG(TARGET_DEV5G,\
+ t, 13, 0, 0, 1, 60, 0, 0, 1, 4)
#define DEV5G_MAC_ENA_CFG_RX_ENA BIT(4)
#define DEV5G_MAC_ENA_CFG_RX_ENA_SET(x)\
@@ -2440,7 +3296,8 @@ enum sparx5_target {
FIELD_GET(DEV5G_MAC_ENA_CFG_TX_ENA, x)
/* DEV10G:MAC_CFG_STATUS:MAC_MAXLEN_CFG */
-#define DEV5G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 8, 0, 1, 4)
+#define DEV5G_MAC_MAXLEN_CFG(t) __REG(TARGET_DEV5G,\
+ t, 13, 0, 0, 1, 60, 8, 0, 1, 4)
#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK BIT(16)
#define DEV5G_MAC_MAXLEN_CFG_MAX_LEN_TAG_CHK_SET(x)\
@@ -2455,7 +3312,8 @@ enum sparx5_target {
FIELD_GET(DEV5G_MAC_MAXLEN_CFG_MAX_LEN, x)
/* DEV10G:MAC_CFG_STATUS:MAC_ADV_CHK_CFG */
-#define DEV5G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV5G, t, 13, 0, 0, 1, 60, 28, 0, 1, 4)
+#define DEV5G_MAC_ADV_CHK_CFG(t) __REG(TARGET_DEV5G,\
+ t, 13, 0, 0, 1, 60, 28, 0, 1, 4)
#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA BIT(24)
#define DEV5G_MAC_ADV_CHK_CFG_EXT_EOP_CHK_ENA_SET(x)\
@@ -2500,142 +3358,188 @@ enum sparx5_target {
FIELD_GET(DEV5G_MAC_ADV_CHK_CFG_INR_ERR_ENA, x)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SYMBOL_ERR_CNT */
-#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 0, 0, 1, 4)
+#define DEV5G_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 0, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_PAUSE_CNT */
-#define DEV5G_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 4, 0, 1, 4)
+#define DEV5G_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 4, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_UNSUP_OPCODE_CNT */
-#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 8, 0, 1, 4)
+#define DEV5G_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 8, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_UC_CNT */
-#define DEV5G_RX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 12, 0, 1, 4)
+#define DEV5G_RX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 12, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_MC_CNT */
-#define DEV5G_RX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 16, 0, 1, 4)
+#define DEV5G_RX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 16, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_BC_CNT */
-#define DEV5G_RX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 20, 0, 1, 4)
+#define DEV5G_RX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 20, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_CRC_ERR_CNT */
-#define DEV5G_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 24, 0, 1, 4)
+#define DEV5G_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 24, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_UNDERSIZE_CNT */
-#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 28, 0, 1, 4)
+#define DEV5G_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 28, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_FRAGMENTS_CNT */
-#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 32, 0, 1, 4)
+#define DEV5G_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 32, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_IN_RANGE_LEN_ERR_CNT */
-#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 36, 0, 1, 4)
+#define DEV5G_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 36, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_OUT_OF_RANGE_LEN_ERR_CNT */
-#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 40, 0, 1, 4)
+#define DEV5G_RX_OUT_OF_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 40, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_OVERSIZE_CNT */
-#define DEV5G_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 44, 0, 1, 4)
+#define DEV5G_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 44, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_JABBERS_CNT */
-#define DEV5G_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 48, 0, 1, 4)
+#define DEV5G_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 48, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE64_CNT */
-#define DEV5G_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 52, 0, 1, 4)
+#define DEV5G_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 52, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE65TO127_CNT */
-#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 56, 0, 1, 4)
+#define DEV5G_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 56, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE128TO255_CNT */
-#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 60, 0, 1, 4)
+#define DEV5G_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 60, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE256TO511_CNT */
-#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 64, 0, 1, 4)
+#define DEV5G_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 64, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE512TO1023_CNT */
-#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 68, 0, 1, 4)
+#define DEV5G_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 68, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1024TO1518_CNT */
-#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 72, 0, 1, 4)
+#define DEV5G_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 72, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_SIZE1519TOMAX_CNT */
-#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 76, 0, 1, 4)
+#define DEV5G_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 76, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_IPG_SHRINK_CNT */
-#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 80, 0, 1, 4)
+#define DEV5G_RX_IPG_SHRINK_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 80, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_PAUSE_CNT */
-#define DEV5G_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 84, 0, 1, 4)
+#define DEV5G_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 84, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_UC_CNT */
-#define DEV5G_TX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 88, 0, 1, 4)
+#define DEV5G_TX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 88, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_MC_CNT */
-#define DEV5G_TX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 92, 0, 1, 4)
+#define DEV5G_TX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 92, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_BC_CNT */
-#define DEV5G_TX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 96, 0, 1, 4)
+#define DEV5G_TX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 96, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE64_CNT */
-#define DEV5G_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 100, 0, 1, 4)
+#define DEV5G_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 100, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE65TO127_CNT */
-#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 104, 0, 1, 4)
+#define DEV5G_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 104, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE128TO255_CNT */
-#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 108, 0, 1, 4)
+#define DEV5G_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 108, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE256TO511_CNT */
-#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 112, 0, 1, 4)
+#define DEV5G_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 112, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE512TO1023_CNT */
-#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 116, 0, 1, 4)
+#define DEV5G_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 116, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1024TO1518_CNT */
-#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 120, 0, 1, 4)
+#define DEV5G_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 120, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_SIZE1519TOMAX_CNT */
-#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 124, 0, 1, 4)
+#define DEV5G_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 124, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_ALIGNMENT_LOST_CNT */
-#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 128, 0, 1, 4)
+#define DEV5G_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 128, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_TAGGED_FRMS_CNT */
-#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 132, 0, 1, 4)
+#define DEV5G_RX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 132, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_UNTAGGED_FRMS_CNT */
-#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 136, 0, 1, 4)
+#define DEV5G_RX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 136, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_TAGGED_FRMS_CNT */
-#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 140, 0, 1, 4)
+#define DEV5G_TX_TAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 140, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:TX_UNTAGGED_FRMS_CNT */
-#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 144, 0, 1, 4)
+#define DEV5G_TX_UNTAGGED_FRMS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 144, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SYMBOL_ERR_CNT */
-#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 148, 0, 1, 4)
+#define DEV5G_PMAC_RX_SYMBOL_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 148, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_PAUSE_CNT */
-#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 152, 0, 1, 4)
+#define DEV5G_PMAC_RX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 152, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNSUP_OPCODE_CNT */
-#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 156, 0, 1, 4)
+#define DEV5G_PMAC_RX_UNSUP_OPCODE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 156, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UC_CNT */
-#define DEV5G_PMAC_RX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 160, 0, 1, 4)
+#define DEV5G_PMAC_RX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 160, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_MC_CNT */
-#define DEV5G_PMAC_RX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 164, 0, 1, 4)
+#define DEV5G_PMAC_RX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 164, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_BC_CNT */
-#define DEV5G_PMAC_RX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 168, 0, 1, 4)
+#define DEV5G_PMAC_RX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 168, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_CRC_ERR_CNT */
-#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 172, 0, 1, 4)
+#define DEV5G_PMAC_RX_CRC_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 172, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_UNDERSIZE_CNT */
-#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 176, 0, 1, 4)
+#define DEV5G_PMAC_RX_UNDERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 176, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_FRAGMENTS_CNT */
-#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 180, 0, 1, 4)
+#define DEV5G_PMAC_RX_FRAGMENTS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 180, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_IN_RANGE_LEN_ERR_CNT */
#define DEV5G_PMAC_RX_IN_RANGE_LEN_ERR_CNT(t) __REG(TARGET_DEV5G,\
@@ -2646,100 +3550,132 @@ enum sparx5_target {
t, 13, 60, 0, 1, 312, 188, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_OVERSIZE_CNT */
-#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 192, 0, 1, 4)
+#define DEV5G_PMAC_RX_OVERSIZE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 192, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_JABBERS_CNT */
-#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 196, 0, 1, 4)
+#define DEV5G_PMAC_RX_JABBERS_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 196, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE64_CNT */
-#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 200, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 200, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE65TO127_CNT */
-#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 204, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 204, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE128TO255_CNT */
-#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 208, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 208, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE256TO511_CNT */
-#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 212, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 212, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE512TO1023_CNT */
-#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 216, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 216, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1024TO1518_CNT */
-#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 220, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 220, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_SIZE1519TOMAX_CNT */
-#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 224, 0, 1, 4)
+#define DEV5G_PMAC_RX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 224, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_PAUSE_CNT */
-#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 228, 0, 1, 4)
+#define DEV5G_PMAC_TX_PAUSE_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 228, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_UC_CNT */
-#define DEV5G_PMAC_TX_UC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 232, 0, 1, 4)
+#define DEV5G_PMAC_TX_UC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 232, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_MC_CNT */
-#define DEV5G_PMAC_TX_MC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 236, 0, 1, 4)
+#define DEV5G_PMAC_TX_MC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 236, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_BC_CNT */
-#define DEV5G_PMAC_TX_BC_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 240, 0, 1, 4)
+#define DEV5G_PMAC_TX_BC_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 240, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE64_CNT */
-#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 244, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE64_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 244, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE65TO127_CNT */
-#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 248, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE65TO127_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 248, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE128TO255_CNT */
-#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 252, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE128TO255_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 252, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE256TO511_CNT */
-#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 256, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE256TO511_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 256, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE512TO1023_CNT */
-#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 260, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE512TO1023_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 260, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1024TO1518_CNT */
-#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 264, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE1024TO1518_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 264, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_TX_SIZE1519TOMAX_CNT */
-#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 268, 0, 1, 4)
+#define DEV5G_PMAC_TX_SIZE1519TOMAX_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 268, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_ALIGNMENT_LOST_CNT */
-#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 272, 0, 1, 4)
+#define DEV5G_PMAC_RX_ALIGNMENT_LOST_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 272, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_ERR_CNT */
-#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 276, 0, 1, 4)
+#define DEV5G_MM_RX_ASSEMBLY_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 276, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_SMD_ERR_CNT */
-#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 280, 0, 1, 4)
+#define DEV5G_MM_RX_SMD_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 280, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_ASSEMBLY_OK_CNT */
-#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 284, 0, 1, 4)
+#define DEV5G_MM_RX_ASSEMBLY_OK_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 284, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:MM_RX_MERGE_FRAG_CNT */
-#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 288, 0, 1, 4)
+#define DEV5G_MM_RX_MERGE_FRAG_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 288, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:MM_TX_PFRAGMENT_CNT */
-#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 292, 0, 1, 4)
+#define DEV5G_MM_TX_PFRAGMENT_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 292, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_HIH_CKSM_ERR_CNT */
-#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 296, 0, 1, 4)
+#define DEV5G_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 296, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:RX_XGMII_PROT_ERR_CNT */
-#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 300, 0, 1, 4)
+#define DEV5G_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 300, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_HIH_CKSM_ERR_CNT */
-#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 304, 0, 1, 4)
+#define DEV5G_PMAC_RX_HIH_CKSM_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 304, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_32BIT:PMAC_RX_XGMII_PROT_ERR_CNT */
-#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G, t, 13, 60, 0, 1, 312, 308, 0, 1, 4)
+#define DEV5G_PMAC_RX_XGMII_PROT_ERR_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 60, 0, 1, 312, 308, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_CNT */
-#define DEV5G_RX_IN_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 0, 0, 1, 4)
+#define DEV5G_RX_IN_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 0, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:RX_IN_BYTES_MSB_CNT */
-#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 4, 0, 1, 4)
+#define DEV5G_RX_IN_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 4, 0, 1, 4)
#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT_SET(x)\
@@ -2748,10 +3684,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_RX_IN_BYTES_MSB_CNT_RX_IN_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_CNT */
-#define DEV5G_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 8, 0, 1, 4)
+#define DEV5G_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 8, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:RX_OK_BYTES_MSB_CNT */
-#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 12, 0, 1, 4)
+#define DEV5G_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 12, 0, 1, 4)
#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2760,10 +3698,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_RX_OK_BYTES_MSB_CNT_RX_OK_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_CNT */
-#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 16, 0, 1, 4)
+#define DEV5G_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 16, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:RX_BAD_BYTES_MSB_CNT */
-#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 20, 0, 1, 4)
+#define DEV5G_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 20, 0, 1, 4)
#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -2772,10 +3712,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_RX_BAD_BYTES_MSB_CNT_RX_BAD_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_CNT */
-#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 24, 0, 1, 4)
+#define DEV5G_TX_OUT_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 24, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:TX_OUT_BYTES_MSB_CNT */
-#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 28, 0, 1, 4)
+#define DEV5G_TX_OUT_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 28, 0, 1, 4)
#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT_SET(x)\
@@ -2784,10 +3726,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_TX_OUT_BYTES_MSB_CNT_TX_OUT_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_CNT */
-#define DEV5G_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 32, 0, 1, 4)
+#define DEV5G_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 32, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:TX_OK_BYTES_MSB_CNT */
-#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 36, 0, 1, 4)
+#define DEV5G_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 36, 0, 1, 4)
#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2796,10 +3740,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_TX_OK_BYTES_MSB_CNT_TX_OK_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_CNT */
-#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 40, 0, 1, 4)
+#define DEV5G_PMAC_RX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 40, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_OK_BYTES_MSB_CNT */
-#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 44, 0, 1, 4)
+#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 44, 0, 1, 4)
#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2808,10 +3754,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_PMAC_RX_OK_BYTES_MSB_CNT_PMAC_RX_OK_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_CNT */
-#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 48, 0, 1, 4)
+#define DEV5G_PMAC_RX_BAD_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 48, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_RX_BAD_BYTES_MSB_CNT */
-#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 52, 0, 1, 4)
+#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 52, 0, 1, 4)
#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT_SET(x)\
@@ -2820,10 +3768,12 @@ enum sparx5_target {
FIELD_GET(DEV5G_PMAC_RX_BAD_BYTES_MSB_CNT_PMAC_RX_BAD_BYTES_MSB_CNT, x)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_CNT */
-#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 56, 0, 1, 4)
+#define DEV5G_PMAC_TX_OK_BYTES_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 56, 0, 1, 4)
/* DEV10G:DEV_STATISTICS_40BIT:PMAC_TX_OK_BYTES_MSB_CNT */
-#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G, t, 13, 372, 0, 1, 64, 60, 0, 1, 4)
+#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT(t) __REG(TARGET_DEV5G,\
+ t, 13, 372, 0, 1, 64, 60, 0, 1, 4)
#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT GENMASK(7, 0)
#define DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT_SET(x)\
@@ -2832,7 +3782,8 @@ enum sparx5_target {
FIELD_GET(DEV5G_PMAC_TX_OK_BYTES_MSB_CNT_PMAC_TX_OK_BYTES_MSB_CNT, x)
/* DEV10G:DEV_CFG_STATUS:DEV_RST_CTRL */
-#define DEV5G_DEV_RST_CTRL(t) __REG(TARGET_DEV5G, t, 13, 436, 0, 1, 52, 0, 0, 1, 4)
+#define DEV5G_DEV_RST_CTRL(t) __REG(TARGET_DEV5G,\
+ t, 13, 436, 0, 1, 52, 0, 0, 1, 4)
#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA BIT(28)
#define DEV5G_DEV_RST_CTRL_PARDET_MODE_ENA_SET(x)\
@@ -2889,7 +3840,8 @@ enum sparx5_target {
FIELD_GET(DEV5G_DEV_RST_CTRL_MAC_RX_RST, x)
/* DSM:RAM_CTRL:RAM_INIT */
-#define DSM_RAM_INIT __REG(TARGET_DSM, 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
+#define DSM_RAM_INIT __REG(TARGET_DSM,\
+ 0, 1, 0, 0, 1, 4, 0, 0, 1, 4)
#define DSM_RAM_INIT_RAM_INIT BIT(1)
#define DSM_RAM_INIT_RAM_INIT_SET(x)\
@@ -2904,7 +3856,8 @@ enum sparx5_target {
FIELD_GET(DSM_RAM_INIT_RAM_CFG_HOOK, x)
/* DSM:CFG:BUF_CFG */
-#define DSM_BUF_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 0, r, 67, 4)
+#define DSM_BUF_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 0, r, 67, 4)
#define DSM_BUF_CFG_CSC_STAT_DIS BIT(13)
#define DSM_BUF_CFG_CSC_STAT_DIS_SET(x)\
@@ -2931,7 +3884,8 @@ enum sparx5_target {
FIELD_GET(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_TIMEOUT, x)
/* DSM:CFG:DEV_TX_STOP_WM_CFG */
-#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4)
+#define DSM_DEV_TX_STOP_WM_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 1360, r, 67, 4)
#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA BIT(9)
#define DSM_DEV_TX_STOP_WM_CFG_FAST_STARTUP_ENA_SET(x)\
@@ -2958,7 +3912,8 @@ enum sparx5_target {
FIELD_GET(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, x)
/* DSM:CFG:RX_PAUSE_CFG */
-#define DSM_RX_PAUSE_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4)
+#define DSM_RX_PAUSE_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 1628, r, 67, 4)
#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN BIT(1)
#define DSM_RX_PAUSE_CFG_RX_PAUSE_EN_SET(x)\
@@ -2973,7 +3928,8 @@ enum sparx5_target {
FIELD_GET(DSM_RX_PAUSE_CFG_FC_OBEY_LOCAL, x)
/* DSM:CFG:MAC_CFG */
-#define DSM_MAC_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4)
+#define DSM_MAC_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 2432, r, 67, 4)
#define DSM_MAC_CFG_TX_PAUSE_VAL GENMASK(31, 16)
#define DSM_MAC_CFG_TX_PAUSE_VAL_SET(x)\
@@ -3000,7 +3956,8 @@ enum sparx5_target {
FIELD_GET(DSM_MAC_CFG_TX_PAUSE_XON_XOFF, x)
/* DSM:CFG:MAC_ADDR_BASE_HIGH_CFG */
-#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4)
+#define DSM_MAC_ADDR_BASE_HIGH_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 2700, r, 65, 4)
#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH GENMASK(23, 0)
#define DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH_SET(x)\
@@ -3009,7 +3966,8 @@ enum sparx5_target {
FIELD_GET(DSM_MAC_ADDR_BASE_HIGH_CFG_MAC_ADDR_HIGH, x)
/* DSM:CFG:MAC_ADDR_BASE_LOW_CFG */
-#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4)
+#define DSM_MAC_ADDR_BASE_LOW_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 2960, r, 65, 4)
#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW GENMASK(23, 0)
#define DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW_SET(x)\
@@ -3018,7 +3976,8 @@ enum sparx5_target {
FIELD_GET(DSM_MAC_ADDR_BASE_LOW_CFG_MAC_ADDR_LOW, x)
/* DSM:CFG:TAXI_CAL_CFG */
-#define DSM_TAXI_CAL_CFG(r) __REG(TARGET_DSM, 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4)
+#define DSM_TAXI_CAL_CFG(r) __REG(TARGET_DSM,\
+ 0, 1, 20, 0, 1, 3528, 3224, r, 9, 4)
#define DSM_TAXI_CAL_CFG_CAL_IDX GENMASK(20, 15)
#define DSM_TAXI_CAL_CFG_CAL_IDX_SET(x)\
@@ -3050,8 +4009,41 @@ enum sparx5_target {
#define DSM_TAXI_CAL_CFG_CAL_PGM_ENA_GET(x)\
FIELD_GET(DSM_TAXI_CAL_CFG_CAL_PGM_ENA, x)
+/* EACL:ES2_KEY_SELECT_PROFILE:VCAP_ES2_KEY_SEL */
+#define EACL_VCAP_ES2_KEY_SEL(g, r) __REG(TARGET_EACL,\
+ 0, 1, 149504, g, 138, 8, 0, r, 2, 4)
+
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL GENMASK(7, 5)
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL GENMASK(4, 2)
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL BIT(1)
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL, x)
+#define EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL, x)
+
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA BIT(0)
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(x)\
+ FIELD_PREP(EACL_VCAP_ES2_KEY_SEL_KEY_ENA, x)
+#define EACL_VCAP_ES2_KEY_SEL_KEY_ENA_GET(x)\
+ FIELD_GET(EACL_VCAP_ES2_KEY_SEL_KEY_ENA, x)
+
+/* EACL:CNT_TBL:ES2_CNT */
+#define EACL_ES2_CNT(g) __REG(TARGET_EACL,\
+ 0, 1, 122880, g, 2048, 4, 0, 0, 1, 4)
+
/* EACL:POL_CFG:POL_EACL_CFG */
-#define EACL_POL_EACL_CFG __REG(TARGET_EACL, 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4)
+#define EACL_POL_EACL_CFG __REG(TARGET_EACL,\
+ 0, 1, 150608, 0, 1, 780, 768, 0, 1, 4)
#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED BIT(5)
#define EACL_POL_EACL_CFG_EACL_CNT_MARKED_AS_DROPPED_SET(x)\
@@ -3089,8 +4081,61 @@ enum sparx5_target {
#define EACL_POL_EACL_CFG_EACL_FORCE_INIT_GET(x)\
FIELD_GET(EACL_POL_EACL_CFG_EACL_FORCE_INIT, x)
+/* EACL:ES2_STICKY:SEC_LOOKUP_STICKY */
+#define EACL_SEC_LOOKUP_STICKY(r) __REG(TARGET_EACL,\
+ 0, 1, 118696, 0, 1, 8, 0, r, 2, 4)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY BIT(7)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY BIT(6)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY BIT(5)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY BIT(4)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY BIT(3)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY BIT(2)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY BIT(1)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY, x)
+
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY BIT(0)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_SET(x)\
+ FIELD_PREP(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+#define EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(x)\
+ FIELD_GET(EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY, x)
+
/* EACL:RAM_CTRL:RAM_INIT */
-#define EACL_RAM_INIT __REG(TARGET_EACL, 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4)
+#define EACL_RAM_INIT __REG(TARGET_EACL,\
+ 0, 1, 118736, 0, 1, 4, 0, 0, 1, 4)
#define EACL_RAM_INIT_RAM_INIT BIT(1)
#define EACL_RAM_INIT_RAM_INIT_SET(x)\
@@ -3105,7 +4150,8 @@ enum sparx5_target {
FIELD_GET(EACL_RAM_INIT_RAM_CFG_HOOK, x)
/* FDMA:FDMA:FDMA_CH_ACTIVATE */
-#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
+#define FDMA_CH_ACTIVATE __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 0, 0, 1, 4)
#define FDMA_CH_ACTIVATE_CH_ACTIVATE GENMASK(7, 0)
#define FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(x)\
@@ -3114,7 +4160,8 @@ enum sparx5_target {
FIELD_GET(FDMA_CH_ACTIVATE_CH_ACTIVATE, x)
/* FDMA:FDMA:FDMA_CH_RELOAD */
-#define FDMA_CH_RELOAD __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
+#define FDMA_CH_RELOAD __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 4, 0, 1, 4)
#define FDMA_CH_RELOAD_CH_RELOAD GENMASK(7, 0)
#define FDMA_CH_RELOAD_CH_RELOAD_SET(x)\
@@ -3123,7 +4170,8 @@ enum sparx5_target {
FIELD_GET(FDMA_CH_RELOAD_CH_RELOAD, x)
/* FDMA:FDMA:FDMA_CH_DISABLE */
-#define FDMA_CH_DISABLE __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
+#define FDMA_CH_DISABLE __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 8, 0, 1, 4)
#define FDMA_CH_DISABLE_CH_DISABLE GENMASK(7, 0)
#define FDMA_CH_DISABLE_CH_DISABLE_SET(x)\
@@ -3132,19 +4180,24 @@ enum sparx5_target {
FIELD_GET(FDMA_CH_DISABLE_CH_DISABLE, x)
/* FDMA:FDMA:FDMA_DCB_LLP */
-#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
+#define FDMA_DCB_LLP(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 52, r, 8, 4)
/* FDMA:FDMA:FDMA_DCB_LLP1 */
-#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
+#define FDMA_DCB_LLP1(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 84, r, 8, 4)
/* FDMA:FDMA:FDMA_DCB_LLP_PREV */
-#define FDMA_DCB_LLP_PREV(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 116, r, 8, 4)
+#define FDMA_DCB_LLP_PREV(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 116, r, 8, 4)
/* FDMA:FDMA:FDMA_DCB_LLP_PREV1 */
-#define FDMA_DCB_LLP_PREV1(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 148, r, 8, 4)
+#define FDMA_DCB_LLP_PREV1(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 148, r, 8, 4)
/* FDMA:FDMA:FDMA_CH_CFG */
-#define FDMA_CH_CFG(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
+#define FDMA_CH_CFG(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 224, r, 8, 4)
#define FDMA_CH_CFG_CH_XTR_STATUS_MODE BIT(7)
#define FDMA_CH_CFG_CH_XTR_STATUS_MODE_SET(x)\
@@ -3177,7 +4230,8 @@ enum sparx5_target {
FIELD_GET(FDMA_CH_CFG_CH_MEM, x)
/* FDMA:FDMA:FDMA_CH_TRANSLATE */
-#define FDMA_CH_TRANSLATE(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 256, r, 8, 4)
+#define FDMA_CH_TRANSLATE(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 256, r, 8, 4)
#define FDMA_CH_TRANSLATE_OFFSET GENMASK(15, 0)
#define FDMA_CH_TRANSLATE_OFFSET_SET(x)\
@@ -3186,7 +4240,8 @@ enum sparx5_target {
FIELD_GET(FDMA_CH_TRANSLATE_OFFSET, x)
/* FDMA:FDMA:FDMA_XTR_CFG */
-#define FDMA_XTR_CFG __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 364, 0, 1, 4)
+#define FDMA_XTR_CFG __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 364, 0, 1, 4)
#define FDMA_XTR_CFG_XTR_FIFO_WM GENMASK(15, 11)
#define FDMA_XTR_CFG_XTR_FIFO_WM_SET(x)\
@@ -3201,7 +4256,8 @@ enum sparx5_target {
FIELD_GET(FDMA_XTR_CFG_XTR_ARB_SAT, x)
/* FDMA:FDMA:FDMA_PORT_CTRL */
-#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
+#define FDMA_PORT_CTRL(r) __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 376, r, 2, 4)
#define FDMA_PORT_CTRL_INJ_STOP BIT(4)
#define FDMA_PORT_CTRL_INJ_STOP_SET(x)\
@@ -3234,7 +4290,8 @@ enum sparx5_target {
FIELD_GET(FDMA_PORT_CTRL_XTR_BUF_RST, x)
/* FDMA:FDMA:FDMA_INTR_DCB */
-#define FDMA_INTR_DCB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 384, 0, 1, 4)
+#define FDMA_INTR_DCB __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 384, 0, 1, 4)
#define FDMA_INTR_DCB_INTR_DCB GENMASK(7, 0)
#define FDMA_INTR_DCB_INTR_DCB_SET(x)\
@@ -3243,7 +4300,8 @@ enum sparx5_target {
FIELD_GET(FDMA_INTR_DCB_INTR_DCB, x)
/* FDMA:FDMA:FDMA_INTR_DCB_ENA */
-#define FDMA_INTR_DCB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 388, 0, 1, 4)
+#define FDMA_INTR_DCB_ENA __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 388, 0, 1, 4)
#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA GENMASK(7, 0)
#define FDMA_INTR_DCB_ENA_INTR_DCB_ENA_SET(x)\
@@ -3252,7 +4310,8 @@ enum sparx5_target {
FIELD_GET(FDMA_INTR_DCB_ENA_INTR_DCB_ENA, x)
/* FDMA:FDMA:FDMA_INTR_DB */
-#define FDMA_INTR_DB __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
+#define FDMA_INTR_DB __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 392, 0, 1, 4)
#define FDMA_INTR_DB_INTR_DB GENMASK(7, 0)
#define FDMA_INTR_DB_INTR_DB_SET(x)\
@@ -3261,7 +4320,8 @@ enum sparx5_target {
FIELD_GET(FDMA_INTR_DB_INTR_DB, x)
/* FDMA:FDMA:FDMA_INTR_DB_ENA */
-#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
+#define FDMA_INTR_DB_ENA __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 396, 0, 1, 4)
#define FDMA_INTR_DB_ENA_INTR_DB_ENA GENMASK(7, 0)
#define FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(x)\
@@ -3270,7 +4330,8 @@ enum sparx5_target {
FIELD_GET(FDMA_INTR_DB_ENA_INTR_DB_ENA, x)
/* FDMA:FDMA:FDMA_INTR_ERR */
-#define FDMA_INTR_ERR __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
+#define FDMA_INTR_ERR __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 400, 0, 1, 4)
#define FDMA_INTR_ERR_INTR_PORT_ERR GENMASK(9, 8)
#define FDMA_INTR_ERR_INTR_PORT_ERR_SET(x)\
@@ -3285,7 +4346,8 @@ enum sparx5_target {
FIELD_GET(FDMA_INTR_ERR_INTR_CH_ERR, x)
/* FDMA:FDMA:FDMA_ERRORS */
-#define FDMA_ERRORS __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
+#define FDMA_ERRORS __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 412, 0, 1, 4)
#define FDMA_ERRORS_ERR_XTR_WR GENMASK(31, 30)
#define FDMA_ERRORS_ERR_XTR_WR_SET(x)\
@@ -3336,7 +4398,8 @@ enum sparx5_target {
FIELD_GET(FDMA_ERRORS_ERR_CH_WR, x)
/* FDMA:FDMA:FDMA_ERRORS_2 */
-#define FDMA_ERRORS_2 __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 416, 0, 1, 4)
+#define FDMA_ERRORS_2 __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 416, 0, 1, 4)
#define FDMA_ERRORS_2_ERR_XTR_FRAG GENMASK(1, 0)
#define FDMA_ERRORS_2_ERR_XTR_FRAG_SET(x)\
@@ -3345,7 +4408,8 @@ enum sparx5_target {
FIELD_GET(FDMA_ERRORS_2_ERR_XTR_FRAG, x)
/* FDMA:FDMA:FDMA_CTRL */
-#define FDMA_CTRL __REG(TARGET_FDMA, 0, 1, 8, 0, 1, 428, 424, 0, 1, 4)
+#define FDMA_CTRL __REG(TARGET_FDMA,\
+ 0, 1, 8, 0, 1, 428, 424, 0, 1, 4)
#define FDMA_CTRL_NRESET BIT(0)
#define FDMA_CTRL_NRESET_SET(x)\
@@ -3354,7 +4418,8 @@ enum sparx5_target {
FIELD_GET(FDMA_CTRL_NRESET, x)
/* DEVCPU_GCB:CHIP_REGS:CHIP_ID */
-#define GCB_CHIP_ID __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 0, 0, 1, 4)
+#define GCB_CHIP_ID __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 0, 0, 1, 4)
#define GCB_CHIP_ID_REV_ID GENMASK(31, 28)
#define GCB_CHIP_ID_REV_ID_SET(x)\
@@ -3381,7 +4446,8 @@ enum sparx5_target {
FIELD_GET(GCB_CHIP_ID_ONE, x)
/* DEVCPU_GCB:CHIP_REGS:SOFT_RST */
-#define GCB_SOFT_RST __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 8, 0, 1, 4)
+#define GCB_SOFT_RST __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 8, 0, 1, 4)
#define GCB_SOFT_RST_SOFT_NON_CFG_RST BIT(2)
#define GCB_SOFT_RST_SOFT_NON_CFG_RST_SET(x)\
@@ -3402,7 +4468,8 @@ enum sparx5_target {
FIELD_GET(GCB_SOFT_RST_SOFT_CHIP_RST, x)
/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_SD_CFG */
-#define GCB_HW_SGPIO_SD_CFG __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 20, 0, 1, 4)
+#define GCB_HW_SGPIO_SD_CFG __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 20, 0, 1, 4)
#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA BIT(1)
#define GCB_HW_SGPIO_SD_CFG_SD_HIGH_ENA_SET(x)\
@@ -3417,7 +4484,8 @@ enum sparx5_target {
FIELD_GET(GCB_HW_SGPIO_SD_CFG_SD_MAP_SEL, x)
/* DEVCPU_GCB:CHIP_REGS:HW_SGPIO_TO_SD_MAP_CFG */
-#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB, 0, 1, 0, 0, 1, 424, 24, r, 65, 4)
+#define GCB_HW_SGPIO_TO_SD_MAP_CFG(r) __REG(TARGET_GCB,\
+ 0, 1, 0, 0, 1, 424, 24, r, 65, 4)
#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL GENMASK(8, 0)
#define GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL_SET(x)\
@@ -3426,7 +4494,8 @@ enum sparx5_target {
FIELD_GET(GCB_HW_SGPIO_TO_SD_MAP_CFG_SGPIO_TO_SD_SEL, x)
/* DEVCPU_GCB:SIO_CTRL:SIO_CLOCK */
-#define GCB_SIO_CLOCK(g) __REG(TARGET_GCB, 0, 1, 876, g, 3, 280, 20, 0, 1, 4)
+#define GCB_SIO_CLOCK(g) __REG(TARGET_GCB,\
+ 0, 1, 876, g, 3, 280, 20, 0, 1, 4)
#define GCB_SIO_CLOCK_SIO_CLK_FREQ GENMASK(19, 8)
#define GCB_SIO_CLOCK_SIO_CLK_FREQ_SET(x)\
@@ -3441,7 +4510,8 @@ enum sparx5_target {
FIELD_GET(GCB_SIO_CLOCK_SYS_CLK_PERIOD, x)
/* HSCH:HSCH_CFG:CIR_CFG */
-#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
+#define HSCH_CIR_CFG(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 0, 0, 1, 4)
#define HSCH_CIR_CFG_CIR_RATE GENMASK(22, 6)
#define HSCH_CIR_CFG_CIR_RATE_SET(x)\
@@ -3456,7 +4526,8 @@ enum sparx5_target {
FIELD_GET(HSCH_CIR_CFG_CIR_BURST, x)
/* HSCH:HSCH_CFG:EIR_CFG */
-#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
+#define HSCH_EIR_CFG(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 4, 0, 1, 4)
#define HSCH_EIR_CFG_EIR_RATE GENMASK(22, 6)
#define HSCH_EIR_CFG_EIR_RATE_SET(x)\
@@ -3471,7 +4542,8 @@ enum sparx5_target {
FIELD_GET(HSCH_EIR_CFG_EIR_BURST, x)
/* HSCH:HSCH_CFG:SE_CFG */
-#define HSCH_SE_CFG(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
+#define HSCH_SE_CFG(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 8, 0, 1, 4)
#define HSCH_SE_CFG_SE_DWRR_CNT GENMASK(12, 6)
#define HSCH_SE_CFG_SE_DWRR_CNT_SET(x)\
@@ -3504,7 +4576,8 @@ enum sparx5_target {
FIELD_GET(HSCH_SE_CFG_SE_STOP, x)
/* HSCH:HSCH_CFG:SE_CONNECT */
-#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
+#define HSCH_SE_CONNECT(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 12, 0, 1, 4)
#define HSCH_SE_CONNECT_SE_LEAK_LINK GENMASK(15, 0)
#define HSCH_SE_CONNECT_SE_LEAK_LINK_SET(x)\
@@ -3513,7 +4586,8 @@ enum sparx5_target {
FIELD_GET(HSCH_SE_CONNECT_SE_LEAK_LINK, x)
/* HSCH:HSCH_CFG:SE_DLB_SENSE */
-#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH, 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
+#define HSCH_SE_DLB_SENSE(g) __REG(TARGET_HSCH,\
+ 0, 1, 0, g, 5040, 32, 16, 0, 1, 4)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO GENMASK(12, 10)
#define HSCH_SE_DLB_SENSE_SE_DLB_PRIO_SET(x)\
@@ -3546,7 +4620,8 @@ enum sparx5_target {
FIELD_GET(HSCH_SE_DLB_SENSE_SE_DLB_DPORT_ENA, x)
/* HSCH:HSCH_DWRR:DWRR_ENTRY */
-#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH, 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
+#define HSCH_DWRR_ENTRY(g) __REG(TARGET_HSCH,\
+ 0, 1, 162816, g, 72, 4, 0, 0, 1, 4)
#define HSCH_DWRR_ENTRY_DWRR_COST GENMASK(24, 20)
#define HSCH_DWRR_ENTRY_DWRR_COST_SET(x)\
@@ -3561,7 +4636,8 @@ enum sparx5_target {
FIELD_GET(HSCH_DWRR_ENTRY_DWRR_BALANCE, x)
/* HSCH:HSCH_MISC:HSCH_CFG_CFG */
-#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
+#define HSCH_HSCH_CFG_CFG __REG(TARGET_HSCH,\
+ 0, 1, 163104, 0, 1, 648, 284, 0, 1, 4)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX GENMASK(26, 14)
#define HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(x)\
@@ -3582,16 +4658,18 @@ enum sparx5_target {
FIELD_GET(HSCH_HSCH_CFG_CFG_CSR_GRANT, x)
/* HSCH:HSCH_MISC:SYS_CLK_PER */
-#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH, 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
+#define HSCH_SYS_CLK_PER __REG(TARGET_HSCH,\
+ 0, 1, 163104, 0, 1, 648, 640, 0, 1, 4)
-#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS GENMASK(7, 0)
-#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_SET(x)\
- FIELD_PREP(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
-#define HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS_GET(x)\
- FIELD_GET(HSCH_SYS_CLK_PER_SYS_CLK_PER_100PS, x)
+#define HSCH_SYS_CLK_PER_100PS GENMASK(7, 0)
+#define HSCH_SYS_CLK_PER_100PS_SET(x)\
+ FIELD_PREP(HSCH_SYS_CLK_PER_100PS, x)
+#define HSCH_SYS_CLK_PER_100PS_GET(x)\
+ FIELD_GET(HSCH_SYS_CLK_PER_100PS, x)
/* HSCH:HSCH_LEAK_LISTS:HSCH_TIMER_CFG */
-#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
+#define HSCH_HSCH_TIMER_CFG(g, r) __REG(TARGET_HSCH,\
+ 0, 1, 161664, g, 4, 32, 0, r, 4, 4)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME GENMASK(17, 0)
#define HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(x)\
@@ -3600,7 +4678,8 @@ enum sparx5_target {
FIELD_GET(HSCH_HSCH_TIMER_CFG_LEAK_TIME, x)
/* HSCH:HSCH_LEAK_LISTS:HSCH_LEAK_CFG */
-#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH, 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
+#define HSCH_HSCH_LEAK_CFG(g, r) __REG(TARGET_HSCH,\
+ 0, 1, 161664, g, 4, 32, 16, r, 4, 4)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST GENMASK(16, 1)
#define HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(x)\
@@ -3615,7 +4694,8 @@ enum sparx5_target {
FIELD_GET(HSCH_HSCH_LEAK_CFG_LEAK_ERR, x)
/* HSCH:SYSTEM:FLUSH_CTRL */
-#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
+#define HSCH_FLUSH_CTRL __REG(TARGET_HSCH,\
+ 0, 1, 184000, 0, 1, 312, 4, 0, 1, 4)
#define HSCH_FLUSH_CTRL_FLUSH_ENA BIT(27)
#define HSCH_FLUSH_CTRL_FLUSH_ENA_SET(x)\
@@ -3660,7 +4740,8 @@ enum sparx5_target {
FIELD_GET(HSCH_FLUSH_CTRL_FLUSH_HIER, x)
/* HSCH:SYSTEM:PORT_MODE */
-#define HSCH_PORT_MODE(r) __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 8, r, 70, 4)
+#define HSCH_PORT_MODE(r) __REG(TARGET_HSCH,\
+ 0, 1, 184000, 0, 1, 312, 8, r, 70, 4)
#define HSCH_PORT_MODE_DEQUEUE_DIS BIT(4)
#define HSCH_PORT_MODE_DEQUEUE_DIS_SET(x)\
@@ -3693,7 +4774,8 @@ enum sparx5_target {
FIELD_GET(HSCH_PORT_MODE_CPU_PRIO_MODE, x)
/* HSCH:SYSTEM:OUTB_SHARE_ENA */
-#define HSCH_OUTB_SHARE_ENA(r) __REG(TARGET_HSCH, 0, 1, 184000, 0, 1, 312, 288, r, 5, 4)
+#define HSCH_OUTB_SHARE_ENA(r) __REG(TARGET_HSCH,\
+ 0, 1, 184000, 0, 1, 312, 288, r, 5, 4)
#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA GENMASK(7, 0)
#define HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA_SET(x)\
@@ -3702,7 +4784,8 @@ enum sparx5_target {
FIELD_GET(HSCH_OUTB_SHARE_ENA_OUTB_SHARE_ENA, x)
/* HSCH:MMGT:RESET_CFG */
-#define HSCH_RESET_CFG __REG(TARGET_HSCH, 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4)
+#define HSCH_RESET_CFG __REG(TARGET_HSCH,\
+ 0, 1, 162368, 0, 1, 16, 8, 0, 1, 4)
#define HSCH_RESET_CFG_CORE_ENA BIT(0)
#define HSCH_RESET_CFG_CORE_ENA_SET(x)\
@@ -3711,7 +4794,8 @@ enum sparx5_target {
FIELD_GET(HSCH_RESET_CFG_CORE_ENA, x)
/* HSCH:TAS_CONFIG:TAS_STATEMACHINE_CFG */
-#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH, 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4)
+#define HSCH_TAS_STATEMACHINE_CFG __REG(TARGET_HSCH,\
+ 0, 1, 162384, 0, 1, 12, 8, 0, 1, 4)
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY GENMASK(7, 0)
#define HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY_SET(x)\
@@ -3720,7 +4804,8 @@ enum sparx5_target {
FIELD_GET(HSCH_TAS_STATEMACHINE_CFG_REVISIT_DLY, x)
/* LRN:COMMON:COMMON_ACCESS_CTRL */
-#define LRN_COMMON_ACCESS_CTRL __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
+#define LRN_COMMON_ACCESS_CTRL __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 0, 0, 1, 4)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL GENMASK(21, 20)
#define LRN_COMMON_ACCESS_CTRL_CPU_ACCESS_DIRECT_COL_SET(x)\
@@ -3753,7 +4838,8 @@ enum sparx5_target {
FIELD_GET(LRN_COMMON_ACCESS_CTRL_MAC_TABLE_ACCESS_SHOT, x)
/* LRN:COMMON:MAC_ACCESS_CFG_0 */
-#define LRN_MAC_ACCESS_CFG_0 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
+#define LRN_MAC_ACCESS_CFG_0 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 4, 0, 1, 4)
#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID GENMASK(28, 16)
#define LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_FID_SET(x)\
@@ -3768,10 +4854,12 @@ enum sparx5_target {
FIELD_GET(LRN_MAC_ACCESS_CFG_0_MAC_ENTRY_MAC_MSB, x)
/* LRN:COMMON:MAC_ACCESS_CFG_1 */
-#define LRN_MAC_ACCESS_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
+#define LRN_MAC_ACCESS_CFG_1 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 8, 0, 1, 4)
/* LRN:COMMON:MAC_ACCESS_CFG_2 */
-#define LRN_MAC_ACCESS_CFG_2 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
+#define LRN_MAC_ACCESS_CFG_2 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 12, 0, 1, 4)
#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD BIT(28)
#define LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_SRC_KILL_FWD_SET(x)\
@@ -3846,7 +4934,8 @@ enum sparx5_target {
FIELD_GET(LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR, x)
/* LRN:COMMON:MAC_ACCESS_CFG_3 */
-#define LRN_MAC_ACCESS_CFG_3 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
+#define LRN_MAC_ACCESS_CFG_3 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 16, 0, 1, 4)
#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX GENMASK(10, 0)
#define LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX_SET(x)\
@@ -3855,7 +4944,8 @@ enum sparx5_target {
FIELD_GET(LRN_MAC_ACCESS_CFG_3_MAC_ENTRY_ISDX_LIMIT_IDX, x)
/* LRN:COMMON:SCAN_NEXT_CFG */
-#define LRN_SCAN_NEXT_CFG __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
+#define LRN_SCAN_NEXT_CFG __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 20, 0, 1, 4)
#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL GENMASK(21, 19)
#define LRN_SCAN_NEXT_CFG_SCAN_AGE_FLAG_UPDATE_SEL_SET(x)\
@@ -3948,7 +5038,8 @@ enum sparx5_target {
FIELD_GET(LRN_SCAN_NEXT_CFG_ADDR_FILTER_ENA, x)
/* LRN:COMMON:SCAN_NEXT_CFG_1 */
-#define LRN_SCAN_NEXT_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
+#define LRN_SCAN_NEXT_CFG_1 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 24, 0, 1, 4)
#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR GENMASK(30, 16)
#define LRN_SCAN_NEXT_CFG_1_PORT_MOVE_NEW_ADDR_SET(x)\
@@ -3963,7 +5054,8 @@ enum sparx5_target {
FIELD_GET(LRN_SCAN_NEXT_CFG_1_SCAN_ENTRY_ADDR_MASK, x)
/* LRN:COMMON:AUTOAGE_CFG */
-#define LRN_AUTOAGE_CFG(r) __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
+#define LRN_AUTOAGE_CFG(r) __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 36, r, 4, 4)
#define LRN_AUTOAGE_CFG_UNIT_SIZE GENMASK(29, 28)
#define LRN_AUTOAGE_CFG_UNIT_SIZE_SET(x)\
@@ -3978,7 +5070,8 @@ enum sparx5_target {
FIELD_GET(LRN_AUTOAGE_CFG_PERIOD_VAL, x)
/* LRN:COMMON:AUTOAGE_CFG_1 */
-#define LRN_AUTOAGE_CFG_1 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
+#define LRN_AUTOAGE_CFG_1 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 52, 0, 1, 4)
#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA BIT(25)
#define LRN_AUTOAGE_CFG_1_PAUSE_AUTO_AGE_ENA_SET(x)\
@@ -4023,7 +5116,8 @@ enum sparx5_target {
FIELD_GET(LRN_AUTOAGE_CFG_1_FORCE_IDLE_ENA, x)
/* LRN:COMMON:AUTOAGE_CFG_2 */
-#define LRN_AUTOAGE_CFG_2 __REG(TARGET_LRN, 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
+#define LRN_AUTOAGE_CFG_2 __REG(TARGET_LRN,\
+ 0, 1, 0, 0, 1, 72, 56, 0, 1, 4)
#define LRN_AUTOAGE_CFG_2_NEXT_ROW GENMASK(17, 4)
#define LRN_AUTOAGE_CFG_2_NEXT_ROW_SET(x)\
@@ -4038,7 +5132,8 @@ enum sparx5_target {
FIELD_GET(LRN_AUTOAGE_CFG_2_SCAN_ONGOING_STATUS, x)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_REGION_CTRL_2_OFF_OUTBOUND_0 */
-#define PCEP_RCTRL_2_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
+#define PCEP_RCTRL_2_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 4, 0, 1, 4)
#define PCEP_RCTRL_2_OUT_0_MSG_CODE GENMASK(7, 0)
#define PCEP_RCTRL_2_OUT_0_MSG_CODE_SET(x)\
@@ -4101,7 +5196,8 @@ enum sparx5_target {
FIELD_GET(PCEP_RCTRL_2_OUT_0_REGION_EN, x)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_BASE_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LWR_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
+#define PCEP_ADDR_LWR_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 8, 0, 1, 4)
#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW GENMASK(15, 0)
#define PCEP_ADDR_LWR_OUT_0_LWR_BASE_HW_SET(x)\
@@ -4116,10 +5212,12 @@ enum sparx5_target {
FIELD_GET(PCEP_ADDR_LWR_OUT_0_LWR_BASE_RW, x)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_BASE_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
+#define PCEP_ADDR_UPR_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 12, 0, 1, 4)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LIMIT_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LIM_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
+#define PCEP_ADDR_LIM_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 16, 0, 1, 4)
#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW GENMASK(15, 0)
#define PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_HW_SET(x)\
@@ -4134,13 +5232,16 @@ enum sparx5_target {
FIELD_GET(PCEP_ADDR_LIM_OUT_0_LIMIT_ADDR_RW, x)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_LWR_TARGET_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_LWR_TGT_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
+#define PCEP_ADDR_LWR_TGT_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 20, 0, 1, 4)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPER_TARGET_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_TGT_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
+#define PCEP_ADDR_UPR_TGT_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 24, 0, 1, 4)
/* PCIE_DM_EP:PF0_ATU_CAP:IATU_UPPR_LIMIT_ADDR_OFF_OUTBOUND_0 */
-#define PCEP_ADDR_UPR_LIM_OUT_0 __REG(TARGET_PCEP, 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
+#define PCEP_ADDR_UPR_LIM_OUT_0 __REG(TARGET_PCEP,\
+ 0, 1, 3145728, 0, 1, 130852, 32, 0, 1, 4)
#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW GENMASK(1, 0)
#define PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_RW_SET(x)\
@@ -4155,7 +5256,8 @@ enum sparx5_target {
FIELD_GET(PCEP_ADDR_UPR_LIM_OUT_0_UPPR_LIMIT_ADDR_HW, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS10G_BR_PCS_CFG(t) __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 0, 0, 1, 4)
+#define PCS10G_BR_PCS_CFG(t) __REG(TARGET_PCS10G_BR,\
+ t, 12, 0, 0, 1, 56, 0, 0, 1, 4)
#define PCS10G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS10G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -4230,7 +5332,8 @@ enum sparx5_target {
FIELD_GET(PCS10G_BR_PCS_CFG_TX_SCR_DISABLE, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS10G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS10G_BR, t, 12, 0, 0, 1, 56, 4, 0, 1, 4)
+#define PCS10G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS10G_BR,\
+ t, 12, 0, 0, 1, 56, 4, 0, 1, 4)
#define PCS10G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS10G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -4251,7 +5354,8 @@ enum sparx5_target {
FIELD_GET(PCS10G_BR_PCS_SD_CFG_SD_ENA, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS25G_BR_PCS_CFG(t) __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
+#define PCS25G_BR_PCS_CFG(t) __REG(TARGET_PCS25G_BR,\
+ t, 8, 0, 0, 1, 56, 0, 0, 1, 4)
#define PCS25G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS25G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -4326,7 +5430,8 @@ enum sparx5_target {
FIELD_GET(PCS25G_BR_PCS_CFG_TX_SCR_DISABLE, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS25G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS25G_BR, t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
+#define PCS25G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS25G_BR,\
+ t, 8, 0, 0, 1, 56, 4, 0, 1, 4)
#define PCS25G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS25G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -4347,7 +5452,8 @@ enum sparx5_target {
FIELD_GET(PCS25G_BR_PCS_SD_CFG_SD_ENA, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_CFG */
-#define PCS5G_BR_PCS_CFG(t) __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 0, 0, 1, 4)
+#define PCS5G_BR_PCS_CFG(t) __REG(TARGET_PCS5G_BR,\
+ t, 13, 0, 0, 1, 56, 0, 0, 1, 4)
#define PCS5G_BR_PCS_CFG_PCS_ENA BIT(31)
#define PCS5G_BR_PCS_CFG_PCS_ENA_SET(x)\
@@ -4422,7 +5528,8 @@ enum sparx5_target {
FIELD_GET(PCS5G_BR_PCS_CFG_TX_SCR_DISABLE, x)
/* PCS_10GBASE_R:PCS_10GBR_CFG:PCS_SD_CFG */
-#define PCS5G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS5G_BR, t, 13, 0, 0, 1, 56, 4, 0, 1, 4)
+#define PCS5G_BR_PCS_SD_CFG(t) __REG(TARGET_PCS5G_BR,\
+ t, 13, 0, 0, 1, 56, 4, 0, 1, 4)
#define PCS5G_BR_PCS_SD_CFG_SD_SEL BIT(8)
#define PCS5G_BR_PCS_SD_CFG_SD_SEL_SET(x)\
@@ -4443,7 +5550,8 @@ enum sparx5_target {
FIELD_GET(PCS5G_BR_PCS_SD_CFG_SD_ENA, x)
/* PORT_CONF:HW_CFG:DEV5G_MODES */
-#define PORT_CONF_DEV5G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
+#define PORT_CONF_DEV5G_MODES __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 0, 0, 1, 4)
#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE BIT(0)
#define PORT_CONF_DEV5G_MODES_DEV5G_D0_MODE_SET(x)\
@@ -4524,7 +5632,8 @@ enum sparx5_target {
FIELD_GET(PORT_CONF_DEV5G_MODES_DEV5G_D64_MODE, x)
/* PORT_CONF:HW_CFG:DEV10G_MODES */
-#define PORT_CONF_DEV10G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
+#define PORT_CONF_DEV10G_MODES __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 4, 0, 1, 4)
#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE BIT(0)
#define PORT_CONF_DEV10G_MODES_DEV10G_D12_MODE_SET(x)\
@@ -4599,7 +5708,8 @@ enum sparx5_target {
FIELD_GET(PORT_CONF_DEV10G_MODES_DEV10G_D55_MODE, x)
/* PORT_CONF:HW_CFG:DEV25G_MODES */
-#define PORT_CONF_DEV25G_MODES __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
+#define PORT_CONF_DEV25G_MODES __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 8, 0, 1, 4)
#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE BIT(0)
#define PORT_CONF_DEV25G_MODES_DEV25G_D56_MODE_SET(x)\
@@ -4650,7 +5760,8 @@ enum sparx5_target {
FIELD_GET(PORT_CONF_DEV25G_MODES_DEV25G_D63_MODE, x)
/* PORT_CONF:HW_CFG:QSGMII_ENA */
-#define PORT_CONF_QSGMII_ENA __REG(TARGET_PORT_CONF, 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
+#define PORT_CONF_QSGMII_ENA __REG(TARGET_PORT_CONF,\
+ 0, 1, 0, 0, 1, 24, 12, 0, 1, 4)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0 BIT(0)
#define PORT_CONF_QSGMII_ENA_QSGMII_ENA_0_SET(x)\
@@ -4725,7 +5836,8 @@ enum sparx5_target {
FIELD_GET(PORT_CONF_QSGMII_ENA_QSGMII_ENA_11, x)
/* PORT_CONF:USGMII_CFG_STAT:USGMII_CFG */
-#define PORT_CONF_USGMII_CFG(g) __REG(TARGET_PORT_CONF, 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
+#define PORT_CONF_USGMII_CFG(g) __REG(TARGET_PORT_CONF,\
+ 0, 1, 72, g, 6, 8, 0, 0, 1, 4)
#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM BIT(9)
#define PORT_CONF_USGMII_CFG_BYPASS_SCRAM_SET(x)\
@@ -4770,7 +5882,8 @@ enum sparx5_target {
FIELD_GET(PORT_CONF_USGMII_CFG_QUAD_MODE, x)
/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR */
-#define PTP_PTP_PIN_INTR __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 0, 0, 1, 4)
+#define PTP_PTP_PIN_INTR __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 0, 0, 1, 4)
#define PTP_PTP_PIN_INTR_INTR_PTP GENMASK(4, 0)
#define PTP_PTP_PIN_INTR_INTR_PTP_SET(x)\
@@ -4779,7 +5892,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_PIN_INTR_INTR_PTP, x)
/* DEVCPU_PTP:PTP_CFG:PTP_PIN_INTR_ENA */
-#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 4, 0, 1, 4)
+#define PTP_PTP_PIN_INTR_ENA __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 4, 0, 1, 4)
#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA GENMASK(4, 0)
#define PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA_SET(x)\
@@ -4788,7 +5902,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_PIN_INTR_ENA_INTR_PTP_ENA, x)
/* DEVCPU_PTP:PTP_CFG:PTP_INTR_IDENT */
-#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 8, 0, 1, 4)
+#define PTP_PTP_INTR_IDENT __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 8, 0, 1, 4)
#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT GENMASK(4, 0)
#define PTP_PTP_INTR_IDENT_INTR_PTP_IDENT_SET(x)\
@@ -4797,7 +5912,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_INTR_IDENT_INTR_PTP_IDENT, x)
/* DEVCPU_PTP:PTP_CFG:PTP_DOM_CFG */
-#define PTP_PTP_DOM_CFG __REG(TARGET_PTP, 0, 1, 320, 0, 1, 16, 12, 0, 1, 4)
+#define PTP_PTP_DOM_CFG __REG(TARGET_PTP,\
+ 0, 1, 320, 0, 1, 16, 12, 0, 1, 4)
#define PTP_PTP_DOM_CFG_PTP_ENA GENMASK(11, 9)
#define PTP_PTP_DOM_CFG_PTP_ENA_SET(x)\
@@ -4824,10 +5940,12 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_DOM_CFG_PTP_CLKCFG_DIS, x)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:CLK_PER_CFG */
-#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 0, r, 2, 4)
+#define PTP_CLK_PER_CFG(g, r) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 0, r, 2, 4)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC */
-#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 8, 0, 1, 4)
+#define PTP_PTP_CUR_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 8, 0, 1, 4)
#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC GENMASK(29, 0)
#define PTP_PTP_CUR_NSEC_PTP_CUR_NSEC_SET(x)\
@@ -4836,7 +5954,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_CUR_NSEC_PTP_CUR_NSEC, x)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_NSEC_FRAC */
-#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 12, 0, 1, 4)
+#define PTP_PTP_CUR_NSEC_FRAC(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 12, 0, 1, 4)
#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC GENMASK(7, 0)
#define PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC_SET(x)\
@@ -4845,10 +5964,12 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_CUR_NSEC_FRAC_PTP_CUR_NSEC_FRAC, x)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_LSB */
-#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 16, 0, 1, 4)
+#define PTP_PTP_CUR_SEC_LSB(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 16, 0, 1, 4)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:PTP_CUR_SEC_MSB */
-#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 20, 0, 1, 4)
+#define PTP_PTP_CUR_SEC_MSB(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 20, 0, 1, 4)
#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB GENMASK(15, 0)
#define PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB_SET(x)\
@@ -4857,10 +5978,12 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_CUR_SEC_MSB_PTP_CUR_SEC_MSB, x)
/* DEVCPU_PTP:PTP_TOD_DOMAINS:NTP_CUR_NSEC */
-#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP, 0, 1, 336, g, 3, 28, 24, 0, 1, 4)
+#define PTP_NTP_CUR_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 336, g, 3, 28, 24, 0, 1, 4)
/* DEVCPU_PTP:PTP_PINS:PTP_PIN_CFG */
-#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 0, 0, 1, 4)
+#define PTP_PTP_PIN_CFG(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 0, 0, 1, 4)
#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION GENMASK(28, 26)
#define PTP_PTP_PIN_CFG_PTP_PIN_ACTION_SET(x)\
@@ -4917,7 +6040,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_PIN_CFG_PTP_PIN_OUTP_OFS, x)
/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_MSB */
-#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 4, 0, 1, 4)
+#define PTP_PTP_TOD_SEC_MSB(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 4, 0, 1, 4)
#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB GENMASK(15, 0)
#define PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB_SET(x)\
@@ -4926,10 +6050,12 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_TOD_SEC_MSB_PTP_TOD_SEC_MSB, x)
/* DEVCPU_PTP:PTP_PINS:PTP_TOD_SEC_LSB */
-#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 8, 0, 1, 4)
+#define PTP_PTP_TOD_SEC_LSB(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 8, 0, 1, 4)
/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC */
-#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 12, 0, 1, 4)
+#define PTP_PTP_TOD_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 12, 0, 1, 4)
#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC GENMASK(29, 0)
#define PTP_PTP_TOD_NSEC_PTP_TOD_NSEC_SET(x)\
@@ -4938,7 +6064,8 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_TOD_NSEC_PTP_TOD_NSEC, x)
/* DEVCPU_PTP:PTP_PINS:PTP_TOD_NSEC_FRAC */
-#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 16, 0, 1, 4)
+#define PTP_PTP_TOD_NSEC_FRAC(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 16, 0, 1, 4)
#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC GENMASK(7, 0)
#define PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC_SET(x)\
@@ -4947,10 +6074,12 @@ enum sparx5_target {
FIELD_GET(PTP_PTP_TOD_NSEC_FRAC_PTP_TOD_NSEC_FRAC, x)
/* DEVCPU_PTP:PTP_PINS:NTP_NSEC */
-#define PTP_NTP_NSEC(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 20, 0, 1, 4)
+#define PTP_NTP_NSEC(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 20, 0, 1, 4)
/* DEVCPU_PTP:PTP_PINS:PIN_WF_HIGH_PERIOD */
-#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 24, 0, 1, 4)
+#define PTP_PIN_WF_HIGH_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 24, 0, 1, 4)
#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH GENMASK(29, 0)
#define PTP_PIN_WF_HIGH_PERIOD_PIN_WFH_SET(x)\
@@ -4959,7 +6088,8 @@ enum sparx5_target {
FIELD_GET(PTP_PIN_WF_HIGH_PERIOD_PIN_WFH, x)
/* DEVCPU_PTP:PTP_PINS:PIN_WF_LOW_PERIOD */
-#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 28, 0, 1, 4)
+#define PTP_PIN_WF_LOW_PERIOD(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 28, 0, 1, 4)
#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL GENMASK(29, 0)
#define PTP_PIN_WF_LOW_PERIOD_PIN_WFL_SET(x)\
@@ -4968,7 +6098,8 @@ enum sparx5_target {
FIELD_GET(PTP_PIN_WF_LOW_PERIOD_PIN_WFL, x)
/* DEVCPU_PTP:PTP_PINS:PIN_IOBOUNCH_DELAY */
-#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP, 0, 1, 0, g, 5, 64, 32, 0, 1, 4)
+#define PTP_PIN_IOBOUNCH_DELAY(g) __REG(TARGET_PTP,\
+ 0, 1, 0, g, 5, 64, 32, 0, 1, 4)
#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL GENMASK(18, 3)
#define PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_VAL_SET(x)\
@@ -4983,7 +6114,8 @@ enum sparx5_target {
FIELD_GET(PTP_PIN_IOBOUNCH_DELAY_PIN_IOBOUNCH_CFG, x)
/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CTRL */
-#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 0, 0, 1, 4)
+#define PTP_PHAD_CTRL(g) __REG(TARGET_PTP,\
+ 0, 1, 420, g, 5, 8, 0, 0, 1, 4)
#define PTP_PHAD_CTRL_PHAD_ENA BIT(7)
#define PTP_PHAD_CTRL_PHAD_ENA_SET(x)\
@@ -5010,10 +6142,12 @@ enum sparx5_target {
FIELD_GET(PTP_PHAD_CTRL_LOCK_ACC, x)
/* DEVCPU_PTP:PHASE_DETECTOR_CTRL:PHAD_CYC_STAT */
-#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP, 0, 1, 420, g, 5, 8, 4, 0, 1, 4)
+#define PTP_PHAD_CYC_STAT(g) __REG(TARGET_PTP,\
+ 0, 1, 420, g, 5, 8, 4, 0, 1, 4)
/* QFWD:SYSTEM:SWITCH_PORT_MODE */
-#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
+#define QFWD_SWITCH_PORT_MODE(r) __REG(TARGET_QFWD,\
+ 0, 1, 0, 0, 1, 340, 0, r, 70, 4)
#define QFWD_SWITCH_PORT_MODE_PORT_ENA BIT(19)
#define QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(x)\
@@ -5070,7 +6204,8 @@ enum sparx5_target {
FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
/* QRES:RES_CTRL:RES_CFG */
-#define QRES_RES_CFG(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
+#define QRES_RES_CFG(g) __REG(TARGET_QRES,\
+ 0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
#define QRES_RES_CFG_WM_HIGH GENMASK(11, 0)
#define QRES_RES_CFG_WM_HIGH_SET(x)\
@@ -5079,7 +6214,8 @@ enum sparx5_target {
FIELD_GET(QRES_RES_CFG_WM_HIGH, x)
/* QRES:RES_CTRL:RES_STAT */
-#define QRES_RES_STAT(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
+#define QRES_RES_STAT(g) __REG(TARGET_QRES,\
+ 0, 1, 0, g, 5120, 16, 4, 0, 1, 4)
#define QRES_RES_STAT_MAXUSE GENMASK(20, 0)
#define QRES_RES_STAT_MAXUSE_SET(x)\
@@ -5088,7 +6224,8 @@ enum sparx5_target {
FIELD_GET(QRES_RES_STAT_MAXUSE, x)
/* QRES:RES_CTRL:RES_STAT_CUR */
-#define QRES_RES_STAT_CUR(g) __REG(TARGET_QRES, 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
+#define QRES_RES_STAT_CUR(g) __REG(TARGET_QRES,\
+ 0, 1, 0, g, 5120, 16, 8, 0, 1, 4)
#define QRES_RES_STAT_CUR_INUSE GENMASK(20, 0)
#define QRES_RES_STAT_CUR_INUSE_SET(x)\
@@ -5097,7 +6234,8 @@ enum sparx5_target {
FIELD_GET(QRES_RES_STAT_CUR_INUSE, x)
/* DEVCPU_QS:XTR:XTR_GRP_CFG */
-#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
+#define QS_XTR_GRP_CFG(r) __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 0, r, 2, 4)
#define QS_XTR_GRP_CFG_MODE GENMASK(3, 2)
#define QS_XTR_GRP_CFG_MODE_SET(x)\
@@ -5118,10 +6256,12 @@ enum sparx5_target {
FIELD_GET(QS_XTR_GRP_CFG_BYTE_SWAP, x)
/* DEVCPU_QS:XTR:XTR_RD */
-#define QS_XTR_RD(r) __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
+#define QS_XTR_RD(r) __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 8, r, 2, 4)
/* DEVCPU_QS:XTR:XTR_FLUSH */
-#define QS_XTR_FLUSH __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
+#define QS_XTR_FLUSH __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 24, 0, 1, 4)
#define QS_XTR_FLUSH_FLUSH GENMASK(1, 0)
#define QS_XTR_FLUSH_FLUSH_SET(x)\
@@ -5130,7 +6270,8 @@ enum sparx5_target {
FIELD_GET(QS_XTR_FLUSH_FLUSH, x)
/* DEVCPU_QS:XTR:XTR_DATA_PRESENT */
-#define QS_XTR_DATA_PRESENT __REG(TARGET_QS, 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
+#define QS_XTR_DATA_PRESENT __REG(TARGET_QS,\
+ 0, 1, 0, 0, 1, 36, 28, 0, 1, 4)
#define QS_XTR_DATA_PRESENT_DATA_PRESENT GENMASK(1, 0)
#define QS_XTR_DATA_PRESENT_DATA_PRESENT_SET(x)\
@@ -5139,7 +6280,8 @@ enum sparx5_target {
FIELD_GET(QS_XTR_DATA_PRESENT_DATA_PRESENT, x)
/* DEVCPU_QS:INJ:INJ_GRP_CFG */
-#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
+#define QS_INJ_GRP_CFG(r) __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 0, r, 2, 4)
#define QS_INJ_GRP_CFG_MODE GENMASK(3, 2)
#define QS_INJ_GRP_CFG_MODE_SET(x)\
@@ -5154,10 +6296,12 @@ enum sparx5_target {
FIELD_GET(QS_INJ_GRP_CFG_BYTE_SWAP, x)
/* DEVCPU_QS:INJ:INJ_WR */
-#define QS_INJ_WR(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
+#define QS_INJ_WR(r) __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 8, r, 2, 4)
/* DEVCPU_QS:INJ:INJ_CTRL */
-#define QS_INJ_CTRL(r) __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
+#define QS_INJ_CTRL(r) __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 16, r, 2, 4)
#define QS_INJ_CTRL_GAP_SIZE GENMASK(24, 21)
#define QS_INJ_CTRL_GAP_SIZE_SET(x)\
@@ -5190,7 +6334,8 @@ enum sparx5_target {
FIELD_GET(QS_INJ_CTRL_VLD_BYTES, x)
/* DEVCPU_QS:INJ:INJ_STATUS */
-#define QS_INJ_STATUS __REG(TARGET_QS, 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
+#define QS_INJ_STATUS __REG(TARGET_QS,\
+ 0, 1, 36, 0, 1, 40, 24, 0, 1, 4)
#define QS_INJ_STATUS_WMARK_REACHED GENMASK(5, 4)
#define QS_INJ_STATUS_WMARK_REACHED_SET(x)\
@@ -5211,7 +6356,8 @@ enum sparx5_target {
FIELD_GET(QS_INJ_STATUS_INJ_IN_PROGRESS, x)
/* QSYS:PAUSE_CFG:PAUSE_CFG */
-#define QSYS_PAUSE_CFG(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 0, r, 70, 4)
+#define QSYS_PAUSE_CFG(r) __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 0, r, 70, 4)
#define QSYS_PAUSE_CFG_PAUSE_START GENMASK(25, 14)
#define QSYS_PAUSE_CFG_PAUSE_START_SET(x)\
@@ -5238,7 +6384,8 @@ enum sparx5_target {
FIELD_GET(QSYS_PAUSE_CFG_AGGRESSIVE_TAILDROP_ENA, x)
/* QSYS:PAUSE_CFG:ATOP */
-#define QSYS_ATOP(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 284, r, 70, 4)
+#define QSYS_ATOP(r) __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 284, r, 70, 4)
#define QSYS_ATOP_ATOP GENMASK(11, 0)
#define QSYS_ATOP_ATOP_SET(x)\
@@ -5247,7 +6394,8 @@ enum sparx5_target {
FIELD_GET(QSYS_ATOP_ATOP, x)
/* QSYS:PAUSE_CFG:FWD_PRESSURE */
-#define QSYS_FWD_PRESSURE(r) __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 564, r, 70, 4)
+#define QSYS_FWD_PRESSURE(r) __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 564, r, 70, 4)
#define QSYS_FWD_PRESSURE_FWD_PRESSURE GENMASK(11, 1)
#define QSYS_FWD_PRESSURE_FWD_PRESSURE_SET(x)\
@@ -5262,7 +6410,8 @@ enum sparx5_target {
FIELD_GET(QSYS_FWD_PRESSURE_FWD_PRESSURE_DIS, x)
/* QSYS:PAUSE_CFG:ATOP_TOT_CFG */
-#define QSYS_ATOP_TOT_CFG __REG(TARGET_QSYS, 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4)
+#define QSYS_ATOP_TOT_CFG __REG(TARGET_QSYS,\
+ 0, 1, 544, 0, 1, 1128, 844, 0, 1, 4)
#define QSYS_ATOP_TOT_CFG_ATOP_TOT GENMASK(11, 0)
#define QSYS_ATOP_TOT_CFG_ATOP_TOT_SET(x)\
@@ -5271,7 +6420,8 @@ enum sparx5_target {
FIELD_GET(QSYS_ATOP_TOT_CFG_ATOP_TOT, x)
/* QSYS:CALCFG:CAL_AUTO */
-#define QSYS_CAL_AUTO(r) __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 0, r, 7, 4)
+#define QSYS_CAL_AUTO(r) __REG(TARGET_QSYS,\
+ 0, 1, 2304, 0, 1, 40, 0, r, 7, 4)
#define QSYS_CAL_AUTO_CAL_AUTO GENMASK(29, 0)
#define QSYS_CAL_AUTO_CAL_AUTO_SET(x)\
@@ -5280,7 +6430,8 @@ enum sparx5_target {
FIELD_GET(QSYS_CAL_AUTO_CAL_AUTO, x)
/* QSYS:CALCFG:CAL_CTRL */
-#define QSYS_CAL_CTRL __REG(TARGET_QSYS, 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4)
+#define QSYS_CAL_CTRL __REG(TARGET_QSYS,\
+ 0, 1, 2304, 0, 1, 40, 36, 0, 1, 4)
#define QSYS_CAL_CTRL_CAL_MODE GENMASK(14, 11)
#define QSYS_CAL_CTRL_CAL_MODE_SET(x)\
@@ -5301,7 +6452,8 @@ enum sparx5_target {
FIELD_GET(QSYS_CAL_CTRL_CAL_AUTO_ERROR, x)
/* QSYS:RAM_CTRL:RAM_INIT */
-#define QSYS_RAM_INIT __REG(TARGET_QSYS, 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4)
+#define QSYS_RAM_INIT __REG(TARGET_QSYS,\
+ 0, 1, 2344, 0, 1, 4, 0, 0, 1, 4)
#define QSYS_RAM_INIT_RAM_INIT BIT(1)
#define QSYS_RAM_INIT_RAM_INIT_SET(x)\
@@ -5316,7 +6468,8 @@ enum sparx5_target {
FIELD_GET(QSYS_RAM_INIT_RAM_CFG_HOOK, x)
/* REW:COMMON:OWN_UPSID */
-#define REW_OWN_UPSID(r) __REG(TARGET_REW, 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4)
+#define REW_OWN_UPSID(r) __REG(TARGET_REW,\
+ 0, 1, 387264, 0, 1, 1232, 0, r, 3, 4)
#define REW_OWN_UPSID_OWN_UPSID GENMASK(4, 0)
#define REW_OWN_UPSID_OWN_UPSID_SET(x)\
@@ -5324,8 +6477,71 @@ enum sparx5_target {
#define REW_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(REW_OWN_UPSID_OWN_UPSID, x)
+/* REW:COMMON:RTAG_ETAG_CTRL */
+#define REW_RTAG_ETAG_CTRL(r) __REG(TARGET_REW,\
+ 0, 1, 387264, 0, 1, 1232, 560, r, 70, 4)
+
+#define REW_RTAG_ETAG_CTRL_IPE_TBL GENMASK(9, 3)
+#define REW_RTAG_ETAG_CTRL_IPE_TBL_SET(x)\
+ FIELD_PREP(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
+#define REW_RTAG_ETAG_CTRL_IPE_TBL_GET(x)\
+ FIELD_GET(REW_RTAG_ETAG_CTRL_IPE_TBL, x)
+
+#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA GENMASK(2, 1)
+#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(x)\
+ FIELD_PREP(REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA, x)
+#define REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_GET(x)\
+ FIELD_GET(REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA, x)
+
+#define REW_RTAG_ETAG_CTRL_KEEP_ETAG BIT(0)
+#define REW_RTAG_ETAG_CTRL_KEEP_ETAG_SET(x)\
+ FIELD_PREP(REW_RTAG_ETAG_CTRL_KEEP_ETAG, x)
+#define REW_RTAG_ETAG_CTRL_KEEP_ETAG_GET(x)\
+ FIELD_GET(REW_RTAG_ETAG_CTRL_KEEP_ETAG, x)
+
+/* REW:COMMON:ES0_CTRL */
+#define REW_ES0_CTRL __REG(TARGET_REW,\
+ 0, 1, 387264, 0, 1, 1232, 852, 0, 1, 4)
+
+#define REW_ES0_CTRL_ES0_BY_RT_FWD BIT(5)
+#define REW_ES0_CTRL_ES0_BY_RT_FWD_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_BY_RT_FWD, x)
+#define REW_ES0_CTRL_ES0_BY_RT_FWD_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_BY_RT_FWD, x)
+
+#define REW_ES0_CTRL_ES0_BY_RLEG BIT(4)
+#define REW_ES0_CTRL_ES0_BY_RLEG_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_BY_RLEG, x)
+#define REW_ES0_CTRL_ES0_BY_RLEG_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_BY_RLEG, x)
+
+#define REW_ES0_CTRL_ES0_DPORT_ENA BIT(3)
+#define REW_ES0_CTRL_ES0_DPORT_ENA_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_DPORT_ENA, x)
+#define REW_ES0_CTRL_ES0_DPORT_ENA_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_DPORT_ENA, x)
+
+#define REW_ES0_CTRL_ES0_FRM_LBK_CFG BIT(2)
+#define REW_ES0_CTRL_ES0_FRM_LBK_CFG_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_FRM_LBK_CFG, x)
+#define REW_ES0_CTRL_ES0_FRM_LBK_CFG_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_FRM_LBK_CFG, x)
+
+#define REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA BIT(1)
+#define REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA, x)
+#define REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_VD2_ENCAP_ID_ENA, x)
+
+#define REW_ES0_CTRL_ES0_LU_ENA BIT(0)
+#define REW_ES0_CTRL_ES0_LU_ENA_SET(x)\
+ FIELD_PREP(REW_ES0_CTRL_ES0_LU_ENA, x)
+#define REW_ES0_CTRL_ES0_LU_ENA_GET(x)\
+ FIELD_GET(REW_ES0_CTRL_ES0_LU_ENA, x)
+
/* REW:PORT:PORT_VLAN_CFG */
-#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 0, 0, 1, 4)
+#define REW_PORT_VLAN_CFG(g) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 0, 0, 1, 4)
#define REW_PORT_VLAN_CFG_PORT_PCP GENMASK(15, 13)
#define REW_PORT_VLAN_CFG_PORT_PCP_SET(x)\
@@ -5345,8 +6561,49 @@ enum sparx5_target {
#define REW_PORT_VLAN_CFG_PORT_VID_GET(x)\
FIELD_GET(REW_PORT_VLAN_CFG_PORT_VID, x)
+/* REW:PORT:PCP_MAP_DE0 */
+#define REW_PCP_MAP_DE0(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 4, r, 8, 4)
+
+#define REW_PCP_MAP_DE0_PCP_DE0 GENMASK(2, 0)
+#define REW_PCP_MAP_DE0_PCP_DE0_SET(x)\
+ FIELD_PREP(REW_PCP_MAP_DE0_PCP_DE0, x)
+#define REW_PCP_MAP_DE0_PCP_DE0_GET(x)\
+ FIELD_GET(REW_PCP_MAP_DE0_PCP_DE0, x)
+
+/* REW:PORT:PCP_MAP_DE1 */
+#define REW_PCP_MAP_DE1(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 36, r, 8, 4)
+
+#define REW_PCP_MAP_DE1_PCP_DE1 GENMASK(2, 0)
+#define REW_PCP_MAP_DE1_PCP_DE1_SET(x)\
+ FIELD_PREP(REW_PCP_MAP_DE1_PCP_DE1, x)
+#define REW_PCP_MAP_DE1_PCP_DE1_GET(x)\
+ FIELD_GET(REW_PCP_MAP_DE1_PCP_DE1, x)
+
+/* REW:PORT:DEI_MAP_DE0 */
+#define REW_DEI_MAP_DE0(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 68, r, 8, 4)
+
+#define REW_DEI_MAP_DE0_DEI_DE0 BIT(0)
+#define REW_DEI_MAP_DE0_DEI_DE0_SET(x)\
+ FIELD_PREP(REW_DEI_MAP_DE0_DEI_DE0, x)
+#define REW_DEI_MAP_DE0_DEI_DE0_GET(x)\
+ FIELD_GET(REW_DEI_MAP_DE0_DEI_DE0, x)
+
+/* REW:PORT:DEI_MAP_DE1 */
+#define REW_DEI_MAP_DE1(g, r) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 100, r, 8, 4)
+
+#define REW_DEI_MAP_DE1_DEI_DE1 BIT(0)
+#define REW_DEI_MAP_DE1_DEI_DE1_SET(x)\
+ FIELD_PREP(REW_DEI_MAP_DE1_DEI_DE1, x)
+#define REW_DEI_MAP_DE1_DEI_DE1_GET(x)\
+ FIELD_GET(REW_DEI_MAP_DE1_DEI_DE1, x)
+
/* REW:PORT:TAG_CTRL */
-#define REW_TAG_CTRL(g) __REG(TARGET_REW, 0, 1, 360448, g, 70, 256, 132, 0, 1, 4)
+#define REW_TAG_CTRL(g) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 132, 0, 1, 4)
#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED BIT(13)
#define REW_TAG_CTRL_TAG_CFG_OBEY_WAS_TAGGED_SET(x)\
@@ -5384,8 +6641,25 @@ enum sparx5_target {
#define REW_TAG_CTRL_TAG_DEI_CFG_GET(x)\
FIELD_GET(REW_TAG_CTRL_TAG_DEI_CFG, x)
+/* REW:PORT:DSCP_MAP */
+#define REW_DSCP_MAP(g) __REG(TARGET_REW,\
+ 0, 1, 360448, g, 70, 256, 136, 0, 1, 4)
+
+#define REW_DSCP_MAP_DSCP_UPDATE_ENA BIT(1)
+#define REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(x)\
+ FIELD_PREP(REW_DSCP_MAP_DSCP_UPDATE_ENA, x)
+#define REW_DSCP_MAP_DSCP_UPDATE_ENA_GET(x)\
+ FIELD_GET(REW_DSCP_MAP_DSCP_UPDATE_ENA, x)
+
+#define REW_DSCP_MAP_DSCP_REMAP_ENA BIT(0)
+#define REW_DSCP_MAP_DSCP_REMAP_ENA_SET(x)\
+ FIELD_PREP(REW_DSCP_MAP_DSCP_REMAP_ENA, x)
+#define REW_DSCP_MAP_DSCP_REMAP_ENA_GET(x)\
+ FIELD_GET(REW_DSCP_MAP_DSCP_REMAP_ENA, x)
+
/* REW:PTP_CTRL:PTP_TWOSTEP_CTRL */
-#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
+#define REW_PTP_TWOSTEP_CTRL __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 0, 0, 1, 4)
#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA BIT(12)
#define REW_PTP_TWOSTEP_CTRL_PTP_OVWR_ENA_SET(x)\
@@ -5424,7 +6698,8 @@ enum sparx5_target {
FIELD_GET(REW_PTP_TWOSTEP_CTRL_PTP_OVFL, x)
/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP */
-#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
+#define REW_PTP_TWOSTEP_STAMP __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 4, 0, 1, 4)
#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC GENMASK(29, 0)
#define REW_PTP_TWOSTEP_STAMP_STAMP_NSEC_SET(x)\
@@ -5433,7 +6708,8 @@ enum sparx5_target {
FIELD_GET(REW_PTP_TWOSTEP_STAMP_STAMP_NSEC, x)
/* REW:PTP_CTRL:PTP_TWOSTEP_STAMP_SUBNS */
-#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
+#define REW_PTP_TWOSTEP_STAMP_SUBNS __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 8, 0, 1, 4)
#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC GENMASK(7, 0)
#define REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC_SET(x)\
@@ -5442,13 +6718,16 @@ enum sparx5_target {
FIELD_GET(REW_PTP_TWOSTEP_STAMP_SUBNS_STAMP_SUB_NSEC, x)
/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO */
-#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
+#define REW_PTP_RSRV_NOT_ZERO __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 12, 0, 1, 4)
/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO1 */
-#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
+#define REW_PTP_RSRV_NOT_ZERO1 __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 16, 0, 1, 4)
/* REW:PTP_CTRL:PTP_RSRV_NOT_ZERO2 */
-#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
+#define REW_PTP_RSRV_NOT_ZERO2 __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 20, 0, 1, 4)
#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2 GENMASK(5, 0)
#define REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2_SET(x)\
@@ -5457,7 +6736,8 @@ enum sparx5_target {
FIELD_GET(REW_PTP_RSRV_NOT_ZERO2_PTP_RSRV_NOT_ZERO2, x)
/* REW:PTP_CTRL:PTP_GEN_STAMP_FMT */
-#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW, 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
+#define REW_PTP_GEN_STAMP_FMT(r) __REG(TARGET_REW,\
+ 0, 1, 378368, 0, 1, 40, 24, r, 4, 4)
#define REW_PTP_GEN_STAMP_FMT_RT_OFS GENMASK(6, 2)
#define REW_PTP_GEN_STAMP_FMT_RT_OFS_SET(x)\
@@ -5472,7 +6752,8 @@ enum sparx5_target {
FIELD_GET(REW_PTP_GEN_STAMP_FMT_RT_FMT, x)
/* REW:RAM_CTRL:RAM_INIT */
-#define REW_RAM_INIT __REG(TARGET_REW, 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
+#define REW_RAM_INIT __REG(TARGET_REW,\
+ 0, 1, 378696, 0, 1, 4, 0, 0, 1, 4)
#define REW_RAM_INIT_RAM_INIT BIT(1)
#define REW_RAM_INIT_RAM_INIT_SET(x)\
@@ -5486,8 +6767,333 @@ enum sparx5_target {
#define REW_RAM_INIT_RAM_CFG_HOOK_GET(x)\
FIELD_GET(REW_RAM_INIT_RAM_CFG_HOOK, x)
+/* VCAP_ES0:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_ES0_CTRL __REG(TARGET_VCAP_ES0,\
+ 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES0_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_ES0_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_CMD, x)
+#define VCAP_ES0_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_CMD, x)
+
+#define VCAP_ES0_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_ES0_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_ES0_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_ES0_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_ES0_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_ES0_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_ES0_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_ADDR, x)
+#define VCAP_ES0_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_ES0_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_ES0_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_UPDATE_SHOT, x)
+#define VCAP_ES0_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_ES0_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_ES0_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_CLEAR_CACHE, x)
+#define VCAP_ES0_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_ES0_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_ES0_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_ES0_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_ES0_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_ES0_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP_ES0:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_ES0_CFG __REG(TARGET_VCAP_ES0,\
+ 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES0_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_ES0_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_ES0_CFG_MV_NUM_POS, x)
+#define VCAP_ES0_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_ES0_CFG_MV_NUM_POS, x)
+
+#define VCAP_ES0_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_ES0_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_ES0_CFG_MV_SIZE, x)
+#define VCAP_ES0_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_ES0_CFG_MV_SIZE, x)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ES0_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_ES0_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ES0_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_ES0_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_ES0_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_ES0_VCAP_TG_DAT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_ES0_IDX __REG(TARGET_VCAP_ES0,\
+ 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES0_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_ES0_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_ES0_IDX_CORE_IDX, x)
+#define VCAP_ES0_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_ES0_IDX_CORE_IDX, x)
+
+/* VCAP_ES0:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_ES0_MAP __REG(TARGET_VCAP_ES0,\
+ 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES0_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_ES0_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_ES0_MAP_CORE_MAP, x)
+#define VCAP_ES0_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_ES0_MAP_CORE_MAP, x)
+
+/* VCAP_ES0:VCAP_CORE_STICKY:VCAP_STICKY */
+#define VCAP_ES0_VCAP_STICKY __REG(TARGET_VCAP_ES0,\
+ 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
+
+#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\
+ FIELD_PREP(VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+#define VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\
+ FIELD_GET(VCAP_ES0_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+
+/* VCAP_ES0:VCAP_CONST:VCAP_VER */
+#define VCAP_ES0_VCAP_VER __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ES0_ENTRY_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ES0_ENTRY_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ES0_ENTRY_SWCNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ES0_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ES0_ACTION_DEF_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ES0_ACTION_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:CNT_WIDTH */
+#define VCAP_ES0_CNT_WIDTH __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:CORE_CNT */
+#define VCAP_ES0_CORE_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP_ES0:VCAP_CONST:IF_CNT */
+#define VCAP_ES0_IF_CNT __REG(TARGET_VCAP_ES0,\
+ 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
+#define VCAP_ES2_CTRL __REG(TARGET_VCAP_ES2,\
+ 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES2_CTRL_UPDATE_CMD GENMASK(24, 22)
+#define VCAP_ES2_CTRL_UPDATE_CMD_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_CMD, x)
+#define VCAP_ES2_CTRL_UPDATE_CMD_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_CMD, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ENTRY_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ENTRY_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ACTION_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_ACTION_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ACTION_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_CNT_DIS, x)
+#define VCAP_ES2_CTRL_UPDATE_CNT_DIS_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_CNT_DIS, x)
+
+#define VCAP_ES2_CTRL_UPDATE_ADDR GENMASK(18, 3)
+#define VCAP_ES2_CTRL_UPDATE_ADDR_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_ADDR, x)
+#define VCAP_ES2_CTRL_UPDATE_ADDR_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_ADDR, x)
+
+#define VCAP_ES2_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_ES2_CTRL_UPDATE_SHOT_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_UPDATE_SHOT, x)
+#define VCAP_ES2_CTRL_UPDATE_SHOT_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_UPDATE_SHOT, x)
+
+#define VCAP_ES2_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_ES2_CTRL_CLEAR_CACHE_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_CLEAR_CACHE, x)
+#define VCAP_ES2_CTRL_CLEAR_CACHE_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_CLEAR_CACHE, x)
+
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN BIT(0)
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN_SET(x)\
+ FIELD_PREP(VCAP_ES2_CTRL_MV_TRAFFIC_IGN, x)
+#define VCAP_ES2_CTRL_MV_TRAFFIC_IGN_GET(x)\
+ FIELD_GET(VCAP_ES2_CTRL_MV_TRAFFIC_IGN, x)
+
+/* VCAP_ES2:VCAP_CORE_CFG:VCAP_MV_CFG */
+#define VCAP_ES2_CFG __REG(TARGET_VCAP_ES2,\
+ 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES2_CFG_MV_NUM_POS GENMASK(31, 16)
+#define VCAP_ES2_CFG_MV_NUM_POS_SET(x)\
+ FIELD_PREP(VCAP_ES2_CFG_MV_NUM_POS, x)
+#define VCAP_ES2_CFG_MV_NUM_POS_GET(x)\
+ FIELD_GET(VCAP_ES2_CFG_MV_NUM_POS, x)
+
+#define VCAP_ES2_CFG_MV_SIZE GENMASK(15, 0)
+#define VCAP_ES2_CFG_MV_SIZE_SET(x)\
+ FIELD_PREP(VCAP_ES2_CFG_MV_SIZE, x)
+#define VCAP_ES2_CFG_MV_SIZE_GET(x)\
+ FIELD_GET(VCAP_ES2_CFG_MV_SIZE, x)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
+#define VCAP_ES2_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_MASK_DAT */
+#define VCAP_ES2_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
+#define VCAP_ES2_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_DAT */
+#define VCAP_ES2_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
+#define VCAP_ES2_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CORE_CACHE:VCAP_TG_DAT */
+#define VCAP_ES2_VCAP_TG_DAT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_IDX */
+#define VCAP_ES2_IDX __REG(TARGET_VCAP_ES2,\
+ 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+
+#define VCAP_ES2_IDX_CORE_IDX GENMASK(3, 0)
+#define VCAP_ES2_IDX_CORE_IDX_SET(x)\
+ FIELD_PREP(VCAP_ES2_IDX_CORE_IDX, x)
+#define VCAP_ES2_IDX_CORE_IDX_GET(x)\
+ FIELD_GET(VCAP_ES2_IDX_CORE_IDX, x)
+
+/* VCAP_ES2:VCAP_CORE_MAP:VCAP_CORE_MAP */
+#define VCAP_ES2_MAP __REG(TARGET_VCAP_ES2,\
+ 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+
+#define VCAP_ES2_MAP_CORE_MAP GENMASK(2, 0)
+#define VCAP_ES2_MAP_CORE_MAP_SET(x)\
+ FIELD_PREP(VCAP_ES2_MAP_CORE_MAP, x)
+#define VCAP_ES2_MAP_CORE_MAP_GET(x)\
+ FIELD_GET(VCAP_ES2_MAP_CORE_MAP, x)
+
+/* VCAP_ES2:VCAP_CORE_STICKY:VCAP_STICKY */
+#define VCAP_ES2_VCAP_STICKY __REG(TARGET_VCAP_ES2,\
+ 0, 1, 920, 0, 1, 4, 0, 0, 1, 4)
+
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_SET(x)\
+ FIELD_PREP(VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+#define VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY_GET(x)\
+ FIELD_GET(VCAP_ES2_VCAP_STICKY_VCAP_ROW_DELETED_STICKY, x)
+
+/* VCAP_ES2:VCAP_CONST:VCAP_VER */
+#define VCAP_ES2_VCAP_VER __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_WIDTH */
+#define VCAP_ES2_ENTRY_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_CNT */
+#define VCAP_ES2_ENTRY_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_SWCNT */
+#define VCAP_ES2_ENTRY_SWCNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ENTRY_TG_WIDTH */
+#define VCAP_ES2_ENTRY_TG_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ACTION_DEF_CNT */
+#define VCAP_ES2_ACTION_DEF_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:ACTION_WIDTH */
+#define VCAP_ES2_ACTION_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:CNT_WIDTH */
+#define VCAP_ES2_CNT_WIDTH __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:CORE_CNT */
+#define VCAP_ES2_CORE_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+
+/* VCAP_ES2:VCAP_CONST:IF_CNT */
+#define VCAP_ES2_IF_CNT __REG(TARGET_VCAP_ES2,\
+ 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+
/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_UPDATE_CTRL */
-#define VCAP_SUPER_CTRL __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
+#define VCAP_SUPER_CTRL __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 0, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_SUPER_CTRL_UPDATE_CMD GENMASK(24, 22)
#define VCAP_SUPER_CTRL_UPDATE_CMD_SET(x)\
@@ -5538,7 +7144,8 @@ enum sparx5_target {
FIELD_GET(VCAP_SUPER_CTRL_MV_TRAFFIC_IGN, x)
/* VCAP_SUPER:VCAP_CORE_CFG:VCAP_MV_CFG */
-#define VCAP_SUPER_CFG __REG(TARGET_VCAP_SUPER, 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
+#define VCAP_SUPER_CFG __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 0, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_SUPER_CFG_MV_NUM_POS GENMASK(31, 16)
#define VCAP_SUPER_CFG_MV_NUM_POS_SET(x)\
@@ -5553,25 +7160,32 @@ enum sparx5_target {
FIELD_GET(VCAP_SUPER_CFG_MV_SIZE, x)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ENTRY_DAT */
-#define VCAP_SUPER_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
+#define VCAP_SUPER_VCAP_ENTRY_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 0, r, 64, 4)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_MASK_DAT */
-#define VCAP_SUPER_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
+#define VCAP_SUPER_VCAP_MASK_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 256, r, 64, 4)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_ACTION_DAT */
-#define VCAP_SUPER_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
+#define VCAP_SUPER_VCAP_ACTION_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 512, r, 64, 4)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_DAT */
-#define VCAP_SUPER_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
+#define VCAP_SUPER_VCAP_CNT_DAT(r) __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 768, r, 32, 4)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_CNT_FW_DAT */
-#define VCAP_SUPER_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
+#define VCAP_SUPER_VCAP_CNT_FW_DAT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 896, 0, 1, 4)
/* VCAP_SUPER:VCAP_CORE_CACHE:VCAP_TG_DAT */
-#define VCAP_SUPER_VCAP_TG_DAT __REG(TARGET_VCAP_SUPER, 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
+#define VCAP_SUPER_VCAP_TG_DAT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 8, 0, 1, 904, 900, 0, 1, 4)
/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_IDX */
-#define VCAP_SUPER_IDX __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
+#define VCAP_SUPER_IDX __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 912, 0, 1, 8, 0, 0, 1, 4)
#define VCAP_SUPER_IDX_CORE_IDX GENMASK(3, 0)
#define VCAP_SUPER_IDX_CORE_IDX_SET(x)\
@@ -5580,7 +7194,8 @@ enum sparx5_target {
FIELD_GET(VCAP_SUPER_IDX_CORE_IDX, x)
/* VCAP_SUPER:VCAP_CORE_MAP:VCAP_CORE_MAP */
-#define VCAP_SUPER_MAP __REG(TARGET_VCAP_SUPER, 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
+#define VCAP_SUPER_MAP __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 912, 0, 1, 8, 4, 0, 1, 4)
#define VCAP_SUPER_MAP_CORE_MAP GENMASK(2, 0)
#define VCAP_SUPER_MAP_CORE_MAP_SET(x)\
@@ -5589,37 +7204,48 @@ enum sparx5_target {
FIELD_GET(VCAP_SUPER_MAP_CORE_MAP, x)
/* VCAP_SUPER:VCAP_CONST:VCAP_VER */
-#define VCAP_SUPER_VCAP_VER __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
+#define VCAP_SUPER_VCAP_VER __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 0, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ENTRY_WIDTH */
-#define VCAP_SUPER_ENTRY_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
+#define VCAP_SUPER_ENTRY_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 4, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ENTRY_CNT */
-#define VCAP_SUPER_ENTRY_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
+#define VCAP_SUPER_ENTRY_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 8, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ENTRY_SWCNT */
-#define VCAP_SUPER_ENTRY_SWCNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
+#define VCAP_SUPER_ENTRY_SWCNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 12, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ENTRY_TG_WIDTH */
-#define VCAP_SUPER_ENTRY_TG_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
+#define VCAP_SUPER_ENTRY_TG_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 16, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ACTION_DEF_CNT */
-#define VCAP_SUPER_ACTION_DEF_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
+#define VCAP_SUPER_ACTION_DEF_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 20, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:ACTION_WIDTH */
-#define VCAP_SUPER_ACTION_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
+#define VCAP_SUPER_ACTION_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 24, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:CNT_WIDTH */
-#define VCAP_SUPER_CNT_WIDTH __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
+#define VCAP_SUPER_CNT_WIDTH __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 28, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:CORE_CNT */
-#define VCAP_SUPER_CORE_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
+#define VCAP_SUPER_CORE_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 32, 0, 1, 4)
/* VCAP_SUPER:VCAP_CONST:IF_CNT */
-#define VCAP_SUPER_IF_CNT __REG(TARGET_VCAP_SUPER, 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
+#define VCAP_SUPER_IF_CNT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 924, 0, 1, 40, 36, 0, 1, 4)
/* VCAP_SUPER:RAM_CTRL:RAM_INIT */
-#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER, 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
+#define VCAP_SUPER_RAM_INIT __REG(TARGET_VCAP_SUPER,\
+ 0, 1, 1120, 0, 1, 4, 0, 0, 1, 4)
#define VCAP_SUPER_RAM_INIT_RAM_INIT BIT(1)
#define VCAP_SUPER_RAM_INIT_RAM_INIT_SET(x)\
@@ -5634,7 +7260,8 @@ enum sparx5_target {
FIELD_GET(VCAP_SUPER_RAM_INIT_RAM_CFG_HOOK, x)
/* VOP:RAM_CTRL:RAM_INIT */
-#define VOP_RAM_INIT __REG(TARGET_VOP, 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4)
+#define VOP_RAM_INIT __REG(TARGET_VOP,\
+ 0, 1, 279176, 0, 1, 4, 0, 0, 1, 4)
#define VOP_RAM_INIT_RAM_INIT BIT(1)
#define VOP_RAM_INIT_RAM_INIT_SET(x)\
@@ -5649,7 +7276,8 @@ enum sparx5_target {
FIELD_GET(VOP_RAM_INIT_RAM_CFG_HOOK, x)
/* XQS:SYSTEM:STAT_CFG */
-#define XQS_STAT_CFG __REG(TARGET_XQS, 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4)
+#define XQS_STAT_CFG __REG(TARGET_XQS,\
+ 0, 1, 6768, 0, 1, 872, 860, 0, 1, 4)
#define XQS_STAT_CFG_STAT_CLEAR_SHOT GENMASK(21, 18)
#define XQS_STAT_CFG_STAT_CLEAR_SHOT_SET(x)\
@@ -5676,7 +7304,8 @@ enum sparx5_target {
FIELD_GET(XQS_STAT_CFG_STAT_WRAP_DIS, x)
/* XQS:QLIMIT_SHR:QLIMIT_SHR_TOP_CFG */
-#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 0, 0, 1, 4)
+#define XQS_QLIMIT_SHR_TOP_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 0, 0, 1, 4)
#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP GENMASK(14, 0)
#define XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP_SET(x)\
@@ -5685,7 +7314,8 @@ enum sparx5_target {
FIELD_GET(XQS_QLIMIT_SHR_TOP_CFG_QLIMIT_SHR_TOP, x)
/* XQS:QLIMIT_SHR:QLIMIT_SHR_ATOP_CFG */
-#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 4, 0, 1, 4)
+#define XQS_QLIMIT_SHR_ATOP_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 4, 0, 1, 4)
#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP GENMASK(14, 0)
#define XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP_SET(x)\
@@ -5694,7 +7324,8 @@ enum sparx5_target {
FIELD_GET(XQS_QLIMIT_SHR_ATOP_CFG_QLIMIT_SHR_ATOP, x)
/* XQS:QLIMIT_SHR:QLIMIT_SHR_CTOP_CFG */
-#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 8, 0, 1, 4)
+#define XQS_QLIMIT_SHR_CTOP_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 8, 0, 1, 4)
#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP GENMASK(14, 0)
#define XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP_SET(x)\
@@ -5703,7 +7334,8 @@ enum sparx5_target {
FIELD_GET(XQS_QLIMIT_SHR_CTOP_CFG_QLIMIT_SHR_CTOP, x)
/* XQS:QLIMIT_SHR:QLIMIT_SHR_QLIM_CFG */
-#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS, 0, 1, 7936, g, 4, 48, 12, 0, 1, 4)
+#define XQS_QLIMIT_SHR_QLIM_CFG(g) __REG(TARGET_XQS,\
+ 0, 1, 7936, g, 4, 48, 12, 0, 1, 4)
#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM GENMASK(14, 0)
#define XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM_SET(x)\
@@ -5712,6 +7344,7 @@ enum sparx5_target {
FIELD_GET(XQS_QLIMIT_SHR_QLIM_CFG_QLIMIT_SHR_QLIM, x)
/* XQS:STAT:CNT */
-#define XQS_CNT(g) __REG(TARGET_XQS, 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
+#define XQS_CNT(g) __REG(TARGET_XQS,\
+ 0, 1, 0, g, 1024, 4, 0, 0, 1, 4)
#endif /* _SPARX5_MAIN_REGS_H_ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_police.c b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
new file mode 100644
index 000000000000..8ada5cee1342
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_police.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static int sparx5_policer_service_conf_set(struct sparx5 *sparx5,
+ struct sparx5_policer *pol)
+{
+ u32 idx, pup_tokens, max_pup_tokens, burst, thres;
+ struct sparx5_sdlb_group *g;
+ u64 rate;
+
+ g = &sdlb_groups[pol->group];
+ idx = pol->idx;
+
+ rate = pol->rate * 1000;
+ burst = pol->burst;
+
+ pup_tokens = sparx5_sdlb_pup_token_get(sparx5, g->pup_interval, rate);
+ max_pup_tokens =
+ sparx5_sdlb_pup_token_get(sparx5, g->pup_interval, g->max_rate);
+
+ thres = DIV_ROUND_UP(burst, g->min_burst);
+
+ spx5_wr(ANA_AC_SDLB_PUP_TOKENS_PUP_TOKENS_SET(pup_tokens), sparx5,
+ ANA_AC_SDLB_PUP_TOKENS(idx, 0));
+
+ spx5_rmw(ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX_SET(max_pup_tokens),
+ ANA_AC_SDLB_INH_CTRL_PUP_TOKENS_MAX, sparx5,
+ ANA_AC_SDLB_INH_CTRL(idx, 0));
+
+ spx5_rmw(ANA_AC_SDLB_THRES_THRES_SET(thres), ANA_AC_SDLB_THRES_THRES,
+ sparx5, ANA_AC_SDLB_THRES(idx, 0));
+
+ return 0;
+}
+
+int sparx5_policer_conf_set(struct sparx5 *sparx5, struct sparx5_policer *pol)
+{
+ /* More policer types will be added later */
+ switch (pol->type) {
+ case SPX5_POL_SERVICE:
+ return sparx5_policer_service_conf_set(sparx5, pol);
+ default:
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c
new file mode 100644
index 000000000000..b4b280c6138b
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_pool.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+static u32 sparx5_pool_id_to_idx(u32 id)
+{
+ return --id;
+}
+
+u32 sparx5_pool_idx_to_id(u32 idx)
+{
+ return ++idx;
+}
+
+/* Release resource from pool.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_put(struct sparx5_pool_entry *pool, int size, u32 id)
+{
+ struct sparx5_pool_entry *e_itr;
+
+ e_itr = (pool + sparx5_pool_id_to_idx(id));
+ if (e_itr->ref_cnt == 0)
+ return -EINVAL;
+
+ return --e_itr->ref_cnt;
+}
+
+/* Get resource from pool.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_get(struct sparx5_pool_entry *pool, int size, u32 *id)
+{
+ struct sparx5_pool_entry *e_itr;
+ int i;
+
+ for (i = 0, e_itr = pool; i < size; i++, e_itr++) {
+ if (e_itr->ref_cnt == 0) {
+ *id = sparx5_pool_idx_to_id(i);
+ return ++e_itr->ref_cnt;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/* Get resource from pool that matches index.
+ * Return reference count on success, otherwise return error.
+ */
+int sparx5_pool_get_with_idx(struct sparx5_pool_entry *pool, int size, u32 idx,
+ u32 *id)
+{
+ struct sparx5_pool_entry *e_itr;
+ int i, ret = -ENOSPC;
+
+ for (i = 0, e_itr = pool; i < size; i++, e_itr++) {
+ /* Pool index of first free entry */
+ if (e_itr->ref_cnt == 0 && ret == -ENOSPC)
+ ret = i;
+ /* Tc index already in use ? */
+ if (e_itr->idx == idx && e_itr->ref_cnt > 0) {
+ ret = i;
+ break;
+ }
+ }
+
+ /* Did we find a free entry? */
+ if (ret >= 0) {
+ *id = sparx5_pool_idx_to_id(ret);
+ e_itr = (pool + ret);
+ e_itr->idx = idx;
+ return ++e_itr->ref_cnt;
+ }
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 107b9cd931c0..3a1b1a1f5a19 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -1071,6 +1071,11 @@ int sparx5_port_init(struct sparx5 *sparx5,
/* Discard pause frame 01-80-C2-00-00-01 */
spx5_wr(PAUSE_DISCARD, sparx5, ANA_CL_CAPTURE_BPDU_CFG(port->portno));
+ /* Discard SMAC multicast */
+ spx5_rmw(ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS_SET(0),
+ ANA_CL_FILTER_CTRL_FILTER_SMAC_MC_DIS,
+ sparx5, ANA_CL_FILTER_CTRL(port->portno));
+
if (conf->portmode == PHY_INTERFACE_MODE_QSGMII ||
conf->portmode == PHY_INTERFACE_MODE_SGMII) {
err = sparx5_serdes_set(sparx5, port, conf);
@@ -1151,11 +1156,69 @@ int sparx5_port_qos_set(struct sparx5_port *port,
{
sparx5_port_qos_dscp_set(port, &qos->dscp);
sparx5_port_qos_pcp_set(port, &qos->pcp);
+ sparx5_port_qos_pcp_rewr_set(port, &qos->pcp_rewr);
+ sparx5_port_qos_dscp_rewr_set(port, &qos->dscp_rewr);
sparx5_port_qos_default_set(port, qos);
return 0;
}
+int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_pcp_rewr *qos)
+{
+ int i, mode = SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED;
+ struct sparx5 *sparx5 = port->sparx5;
+ u8 pcp, dei;
+
+ /* Use mapping table, with classified QoS as index, to map QoS and DP
+ * to tagged PCP and DEI, if PCP is trusted. Otherwise use classified
+ * PCP. Classified PCP equals frame PCP.
+ */
+ if (qos->enable)
+ mode = SPARX5_PORT_REW_TAG_CTRL_MAPPED;
+
+ spx5_rmw(REW_TAG_CTRL_TAG_PCP_CFG_SET(mode) |
+ REW_TAG_CTRL_TAG_DEI_CFG_SET(mode),
+ REW_TAG_CTRL_TAG_PCP_CFG | REW_TAG_CTRL_TAG_DEI_CFG,
+ port->sparx5, REW_TAG_CTRL(port->portno));
+
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ /* Extract PCP and DEI */
+ pcp = qos->map.map[i];
+ if (pcp > SPARX5_PORT_QOS_PCP_COUNT)
+ dei = 1;
+ else
+ dei = 0;
+
+ /* Rewrite PCP and DEI, for each classified QoS class and DP
+ * level. This table is only used if tag ctrl mode is set to
+ * 'mapped'.
+ *
+ * 0:0nd - prio=0 and dp:0 => pcp=0 and dei=0
+ * 0:0de - prio=0 and dp:1 => pcp=0 and dei=1
+ */
+ if (dei) {
+ spx5_rmw(REW_PCP_MAP_DE1_PCP_DE1_SET(pcp),
+ REW_PCP_MAP_DE1_PCP_DE1, sparx5,
+ REW_PCP_MAP_DE1(port->portno, i));
+
+ spx5_rmw(REW_DEI_MAP_DE1_DEI_DE1_SET(dei),
+ REW_DEI_MAP_DE1_DEI_DE1, port->sparx5,
+ REW_DEI_MAP_DE1(port->portno, i));
+ } else {
+ spx5_rmw(REW_PCP_MAP_DE0_PCP_DE0_SET(pcp),
+ REW_PCP_MAP_DE0_PCP_DE0, sparx5,
+ REW_PCP_MAP_DE0(port->portno, i));
+
+ spx5_rmw(REW_DEI_MAP_DE0_DEI_DE0_SET(dei),
+ REW_DEI_MAP_DE0_DEI_DE0, port->sparx5,
+ REW_DEI_MAP_DE0(port->portno, i));
+ }
+ }
+
+ return 0;
+}
+
int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
struct sparx5_port_qos_pcp *qos)
{
@@ -1184,6 +1247,45 @@ int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
return 0;
}
+void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
+ int mode)
+{
+ spx5_rmw(ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL_SET(mode),
+ ANA_CL_QOS_CFG_DSCP_REWR_MODE_SEL, port->sparx5,
+ ANA_CL_QOS_CFG(port->portno));
+}
+
+int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp_rewr *qos)
+{
+ struct sparx5 *sparx5 = port->sparx5;
+ bool rewr = false;
+ u16 dscp;
+ int i;
+
+ /* On egress, rewrite DSCP value to either classified DSCP or frame
+ * DSCP. If enabled; classified DSCP, if disabled; frame DSCP.
+ */
+ if (qos->enable)
+ rewr = true;
+
+ spx5_rmw(REW_DSCP_MAP_DSCP_UPDATE_ENA_SET(rewr),
+ REW_DSCP_MAP_DSCP_UPDATE_ENA, sparx5,
+ REW_DSCP_MAP(port->portno));
+
+ /* On ingress, map each classified QoS class and DP to classified DSCP
+ * value. This mapping table is global for all ports.
+ */
+ for (i = 0; i < ARRAY_SIZE(qos->map.map); i++) {
+ dscp = qos->map.map[i];
+ spx5_rmw(ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL_SET(dscp),
+ ANA_CL_QOS_MAP_CFG_DSCP_REWR_VAL, sparx5,
+ ANA_CL_QOS_MAP_CFG(i));
+ }
+
+ return 0;
+}
+
int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
struct sparx5_port_qos_dscp *qos)
{
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
index fbafe22e25cc..607c4ff1df6b 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h
@@ -9,6 +9,17 @@
#include "sparx5_main.h"
+/* Port PCP rewrite mode */
+#define SPARX5_PORT_REW_TAG_CTRL_CLASSIFIED 0
+#define SPARX5_PORT_REW_TAG_CTRL_DEFAULT 1
+#define SPARX5_PORT_REW_TAG_CTRL_MAPPED 2
+
+/* Port DSCP rewrite mode */
+#define SPARX5_PORT_REW_DSCP_NONE 0
+#define SPARX5_PORT_REW_DSCP_IF_ZERO 1
+#define SPARX5_PORT_REW_DSCP_SELECTED 2
+#define SPARX5_PORT_REW_DSCP_ALL 3
+
static inline bool sparx5_port_is_2g5(int portno)
{
return portno >= 16 && portno <= 47;
@@ -99,6 +110,15 @@ struct sparx5_port_qos_pcp_map {
u8 map[SPARX5_PORT_QOS_PCP_DEI_COUNT];
};
+struct sparx5_port_qos_pcp_rewr_map {
+ u16 map[SPX5_PRIOS];
+};
+
+#define SPARX5_PORT_QOS_DP_NUM 4
+struct sparx5_port_qos_dscp_rewr_map {
+ u16 map[SPX5_PRIOS * SPARX5_PORT_QOS_DP_NUM];
+};
+
#define SPARX5_PORT_QOS_DSCP_COUNT 64
struct sparx5_port_qos_dscp_map {
u8 map[SPARX5_PORT_QOS_DSCP_COUNT];
@@ -110,15 +130,27 @@ struct sparx5_port_qos_pcp {
bool dp_enable;
};
+struct sparx5_port_qos_pcp_rewr {
+ struct sparx5_port_qos_pcp_rewr_map map;
+ bool enable;
+};
+
struct sparx5_port_qos_dscp {
struct sparx5_port_qos_dscp_map map;
bool qos_enable;
bool dp_enable;
};
+struct sparx5_port_qos_dscp_rewr {
+ struct sparx5_port_qos_dscp_rewr_map map;
+ bool enable;
+};
+
struct sparx5_port_qos {
struct sparx5_port_qos_pcp pcp;
+ struct sparx5_port_qos_pcp_rewr pcp_rewr;
struct sparx5_port_qos_dscp dscp;
+ struct sparx5_port_qos_dscp_rewr dscp_rewr;
u8 default_prio;
};
@@ -127,9 +159,18 @@ int sparx5_port_qos_set(struct sparx5_port *port, struct sparx5_port_qos *qos);
int sparx5_port_qos_pcp_set(const struct sparx5_port *port,
struct sparx5_port_qos_pcp *qos);
+int sparx5_port_qos_pcp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_pcp_rewr *qos);
+
int sparx5_port_qos_dscp_set(const struct sparx5_port *port,
struct sparx5_port_qos_dscp *qos);
+void sparx5_port_qos_dscp_rewr_mode_set(const struct sparx5_port *port,
+ int mode);
+
+int sparx5_port_qos_dscp_rewr_set(const struct sparx5_port *port,
+ struct sparx5_port_qos_dscp_rewr *qos);
+
int sparx5_port_qos_default_set(const struct sparx5_port *port,
const struct sparx5_port_qos *qos);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
new file mode 100644
index 000000000000..8dee1ab1fa75
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_psfp.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+#define SPX5_PSFP_SF_CNT 1024
+#define SPX5_PSFP_SG_CONFIG_CHANGE_SLEEP 1000
+#define SPX5_PSFP_SG_CONFIG_CHANGE_TIMEO 100000
+
+/* Pool of available service policers */
+static struct sparx5_pool_entry sparx5_psfp_fm_pool[SPX5_SDLB_CNT];
+
+/* Pool of available stream gates */
+static struct sparx5_pool_entry sparx5_psfp_sg_pool[SPX5_PSFP_SG_CNT];
+
+/* Pool of available stream filters */
+static struct sparx5_pool_entry sparx5_psfp_sf_pool[SPX5_PSFP_SF_CNT];
+
+static int sparx5_psfp_sf_get(u32 *id)
+{
+ return sparx5_pool_get(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+}
+
+static int sparx5_psfp_sf_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_sf_pool, SPX5_PSFP_SF_CNT, id);
+}
+
+static int sparx5_psfp_sg_get(u32 idx, u32 *id)
+{
+ return sparx5_pool_get_with_idx(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT,
+ idx, id);
+}
+
+static int sparx5_psfp_sg_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_sg_pool, SPX5_PSFP_SG_CNT, id);
+}
+
+static int sparx5_psfp_fm_get(u32 idx, u32 *id)
+{
+ return sparx5_pool_get_with_idx(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, idx,
+ id);
+}
+
+static int sparx5_psfp_fm_put(u32 id)
+{
+ return sparx5_pool_put(sparx5_psfp_fm_pool, SPX5_SDLB_CNT, id);
+}
+
+u32 sparx5_psfp_isdx_get_sf(struct sparx5 *sparx5, u32 isdx)
+{
+ return ANA_L2_TSN_CFG_TSN_SFID_GET(spx5_rd(sparx5,
+ ANA_L2_TSN_CFG(isdx)));
+}
+
+u32 sparx5_psfp_isdx_get_fm(struct sparx5 *sparx5, u32 isdx)
+{
+ return ANA_L2_DLB_CFG_DLB_IDX_GET(spx5_rd(sparx5,
+ ANA_L2_DLB_CFG(isdx)));
+}
+
+u32 sparx5_psfp_sf_get_sg(struct sparx5 *sparx5, u32 sfid)
+{
+ return ANA_AC_TSN_SF_CFG_TSN_SGID_GET(spx5_rd(sparx5,
+ ANA_AC_TSN_SF_CFG(sfid)));
+}
+
+void sparx5_isdx_conf_set(struct sparx5 *sparx5, u32 isdx, u32 sfid, u32 fmid)
+{
+ spx5_rmw(ANA_L2_TSN_CFG_TSN_SFID_SET(sfid), ANA_L2_TSN_CFG_TSN_SFID,
+ sparx5, ANA_L2_TSN_CFG(isdx));
+
+ spx5_rmw(ANA_L2_DLB_CFG_DLB_IDX_SET(fmid), ANA_L2_DLB_CFG_DLB_IDX,
+ sparx5, ANA_L2_DLB_CFG(isdx));
+}
+
+/* Internal priority value to internal priority selector */
+static u32 sparx5_psfp_ipv_to_ips(s32 ipv)
+{
+ return ipv > 0 ? (ipv | BIT(3)) : 0;
+}
+
+static int sparx5_psfp_sgid_get_status(struct sparx5 *sparx5)
+{
+ return spx5_rd(sparx5, ANA_AC_SG_ACCESS_CTRL);
+}
+
+static int sparx5_psfp_sgid_wait_for_completion(struct sparx5 *sparx5)
+{
+ u32 val;
+
+ return readx_poll_timeout(sparx5_psfp_sgid_get_status, sparx5, val,
+ !ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_GET(val),
+ SPX5_PSFP_SG_CONFIG_CHANGE_SLEEP,
+ SPX5_PSFP_SG_CONFIG_CHANGE_TIMEO);
+}
+
+static void sparx5_psfp_sg_config_change(struct sparx5 *sparx5, u32 id)
+{
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_SGID_SET(id), sparx5,
+ ANA_AC_SG_ACCESS_CTRL);
+
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_CONFIG_CHANGE_SET(1) |
+ ANA_AC_SG_ACCESS_CTRL_SGID_SET(id),
+ sparx5, ANA_AC_SG_ACCESS_CTRL);
+
+ if (sparx5_psfp_sgid_wait_for_completion(sparx5) < 0)
+ pr_debug("%s:%d timed out waiting for sgid completion",
+ __func__, __LINE__);
+}
+
+static void sparx5_psfp_sf_set(struct sparx5 *sparx5, u32 id,
+ const struct sparx5_psfp_sf *sf)
+{
+ /* Configure stream gate*/
+ spx5_rmw(ANA_AC_TSN_SF_CFG_TSN_SGID_SET(sf->sgid) |
+ ANA_AC_TSN_SF_CFG_TSN_MAX_SDU_SET(sf->max_sdu) |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE_SET(sf->sblock_osize) |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA_SET(sf->sblock_osize_ena),
+ ANA_AC_TSN_SF_CFG_TSN_SGID | ANA_AC_TSN_SF_CFG_TSN_MAX_SDU |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_STATE |
+ ANA_AC_TSN_SF_CFG_BLOCK_OVERSIZE_ENA,
+ sparx5, ANA_AC_TSN_SF_CFG(id));
+}
+
+static int sparx5_psfp_sg_set(struct sparx5 *sparx5, u32 id,
+ const struct sparx5_psfp_sg *sg)
+{
+ u32 ips, base_lsb, base_msb, accum_time_interval = 0;
+ const struct sparx5_psfp_gce *gce;
+ int i;
+
+ ips = sparx5_psfp_ipv_to_ips(sg->ipv);
+ base_lsb = sg->basetime.tv_sec & 0xffffffff;
+ base_msb = sg->basetime.tv_sec >> 32;
+
+ /* Set stream gate id */
+ spx5_wr(ANA_AC_SG_ACCESS_CTRL_SGID_SET(id), sparx5,
+ ANA_AC_SG_ACCESS_CTRL);
+
+ /* Write AdminPSFP values */
+ spx5_wr(sg->basetime.tv_nsec, sparx5, ANA_AC_SG_CONFIG_REG_1);
+ spx5_wr(base_lsb, sparx5, ANA_AC_SG_CONFIG_REG_2);
+
+ spx5_rmw(ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_SET(base_msb) |
+ ANA_AC_SG_CONFIG_REG_3_INIT_IPS_SET(ips) |
+ ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH_SET(sg->num_entries) |
+ ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE_SET(sg->gate_state) |
+ ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE_SET(1),
+ ANA_AC_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB |
+ ANA_AC_SG_CONFIG_REG_3_INIT_IPS |
+ ANA_AC_SG_CONFIG_REG_3_LIST_LENGTH |
+ ANA_AC_SG_CONFIG_REG_3_INIT_GATE_STATE |
+ ANA_AC_SG_CONFIG_REG_3_GATE_ENABLE,
+ sparx5, ANA_AC_SG_CONFIG_REG_3);
+
+ spx5_wr(sg->cycletime, sparx5, ANA_AC_SG_CONFIG_REG_4);
+ spx5_wr(sg->cycletimeext, sparx5, ANA_AC_SG_CONFIG_REG_5);
+
+ /* For each scheduling entry */
+ for (i = 0; i < sg->num_entries; i++) {
+ gce = &sg->gce[i];
+ ips = sparx5_psfp_ipv_to_ips(gce->ipv);
+ /* hardware needs TimeInterval to be cumulative */
+ accum_time_interval += gce->interval;
+ /* Set gate state */
+ spx5_wr(ANA_AC_SG_GCL_GS_CONFIG_IPS_SET(ips) |
+ ANA_AC_SG_GCL_GS_CONFIG_GATE_STATE_SET(gce->gate_state),
+ sparx5, ANA_AC_SG_GCL_GS_CONFIG(i));
+
+ /* Set time interval */
+ spx5_wr(accum_time_interval, sparx5,
+ ANA_AC_SG_GCL_TI_CONFIG(i));
+
+ /* Set maximum octets */
+ spx5_wr(gce->maxoctets, sparx5, ANA_AC_SG_GCL_OCT_CONFIG(i));
+ }
+
+ return 0;
+}
+
+static int sparx5_sdlb_conf_set(struct sparx5 *sparx5,
+ struct sparx5_psfp_fm *fm)
+{
+ int (*sparx5_sdlb_group_action)(struct sparx5 *sparx5, u32 group,
+ u32 idx);
+
+ if (!fm->pol.rate && !fm->pol.burst)
+ sparx5_sdlb_group_action = &sparx5_sdlb_group_del;
+ else
+ sparx5_sdlb_group_action = &sparx5_sdlb_group_add;
+
+ sparx5_policer_conf_set(sparx5, &fm->pol);
+
+ return sparx5_sdlb_group_action(sparx5, fm->pol.group, fm->pol.idx);
+}
+
+int sparx5_psfp_sf_add(struct sparx5 *sparx5, const struct sparx5_psfp_sf *sf,
+ u32 *id)
+{
+ int ret;
+
+ ret = sparx5_psfp_sf_get(id);
+ if (ret < 0)
+ return ret;
+
+ sparx5_psfp_sf_set(sparx5, *id, sf);
+
+ return 0;
+}
+
+int sparx5_psfp_sf_del(struct sparx5 *sparx5, u32 id)
+{
+ const struct sparx5_psfp_sf sf = { 0 };
+
+ sparx5_psfp_sf_set(sparx5, id, &sf);
+
+ return sparx5_psfp_sf_put(id);
+}
+
+int sparx5_psfp_sg_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_sg *sg, u32 *id)
+{
+ ktime_t basetime;
+ int ret;
+
+ ret = sparx5_psfp_sg_get(uidx, id);
+ if (ret < 0)
+ return ret;
+ /* Was already in use, no need to reconfigure */
+ if (ret > 1)
+ return 0;
+
+ /* Calculate basetime for this stream gate */
+ sparx5_new_base_time(sparx5, sg->cycletime, 0, &basetime);
+ sg->basetime = ktime_to_timespec64(basetime);
+
+ sparx5_psfp_sg_set(sparx5, *id, sg);
+
+ /* Signal hardware to copy AdminPSFP values into OperPSFP values */
+ sparx5_psfp_sg_config_change(sparx5, *id);
+
+ return 0;
+}
+
+int sparx5_psfp_sg_del(struct sparx5 *sparx5, u32 id)
+{
+ const struct sparx5_psfp_sg sg = { 0 };
+ int ret;
+
+ ret = sparx5_psfp_sg_put(id);
+ if (ret < 0)
+ return ret;
+ /* Stream gate still in use ? */
+ if (ret > 0)
+ return 0;
+
+ return sparx5_psfp_sg_set(sparx5, id, &sg);
+}
+
+int sparx5_psfp_fm_add(struct sparx5 *sparx5, u32 uidx,
+ struct sparx5_psfp_fm *fm, u32 *id)
+{
+ struct sparx5_policer *pol = &fm->pol;
+ int ret;
+
+ /* Get flow meter */
+ ret = sparx5_psfp_fm_get(uidx, &fm->pol.idx);
+ if (ret < 0)
+ return ret;
+ /* Was already in use, no need to reconfigure */
+ if (ret > 1)
+ return 0;
+
+ ret = sparx5_sdlb_group_get_by_rate(sparx5, pol->rate, pol->burst);
+ if (ret < 0)
+ return ret;
+
+ fm->pol.group = ret;
+
+ ret = sparx5_sdlb_conf_set(sparx5, fm);
+ if (ret < 0)
+ return ret;
+
+ *id = fm->pol.idx;
+
+ return 0;
+}
+
+int sparx5_psfp_fm_del(struct sparx5 *sparx5, u32 id)
+{
+ struct sparx5_psfp_fm fm = { .pol.idx = id,
+ .pol.type = SPX5_POL_SERVICE };
+ int ret;
+
+ /* Find the group that this lb belongs to */
+ ret = sparx5_sdlb_group_get_by_index(sparx5, id, &fm.pol.group);
+ if (ret < 0)
+ return ret;
+
+ ret = sparx5_psfp_fm_put(id);
+ if (ret < 0)
+ return ret;
+ /* Do not reset flow-meter if still in use. */
+ if (ret > 0)
+ return 0;
+
+ return sparx5_sdlb_conf_set(sparx5, &fm);
+}
+
+void sparx5_psfp_init(struct sparx5 *sparx5)
+{
+ const struct sparx5_sdlb_group *group;
+ int i;
+
+ for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
+ group = &sdlb_groups[i];
+ sparx5_sdlb_group_init(sparx5, group->max_rate,
+ group->min_burst, group->frame_size, i);
+ }
+
+ spx5_wr(ANA_AC_SG_CYCLETIME_UPDATE_PERIOD_SG_CT_UPDATE_ENA_SET(1),
+ sparx5, ANA_AC_SG_CYCLETIME_UPDATE_PERIOD);
+
+ spx5_rmw(ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA_SET(1),
+ ANA_L2_FWD_CFG_ISDX_LOOKUP_ENA, sparx5, ANA_L2_FWD_CFG);
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
index 0ed1ea7727c5..0edb98cef7e4 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ptp.c
@@ -476,8 +476,7 @@ static int sparx5_ptp_settime64(struct ptp_clock_info *ptp,
return 0;
}
-static int sparx5_ptp_gettime64(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+int sparx5_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
struct sparx5_phc *phc = container_of(ptp, struct sparx5_phc, info);
struct sparx5 *sparx5 = phc->sparx5;
@@ -633,7 +632,7 @@ int sparx5_ptp_init(struct sparx5 *sparx5)
/* Enable master counters */
spx5_wr(PTP_PTP_DOM_CFG_PTP_ENA_SET(0x7), sparx5, PTP_PTP_DOM_CFG);
- for (i = 0; i < sparx5->port_count; i++) {
+ for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;
@@ -649,7 +648,7 @@ void sparx5_ptp_deinit(struct sparx5 *sparx5)
struct sparx5_port *port;
int i;
- for (i = 0; i < sparx5->port_count; i++) {
+ for (i = 0; i < SPX5_PORTS; i++) {
port = sparx5->ports[i];
if (!port)
continue;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
index 379e540e5e6a..5f34febaee6b 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_qos.c
@@ -9,6 +9,63 @@
#include "sparx5_main.h"
#include "sparx5_qos.h"
+/* Calculate new base_time based on cycle_time.
+ *
+ * The hardware requires a base_time that is always in the future.
+ * We define threshold_time as current_time + (2 * cycle_time).
+ * If base_time is below threshold_time this function recalculates it to be in
+ * the interval:
+ * threshold_time <= base_time < (threshold_time + cycle_time)
+ *
+ * A very simple algorithm could be like this:
+ * new_base_time = org_base_time + N * cycle_time
+ * using the lowest N so (new_base_time >= threshold_time
+ */
+void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
+ const ktime_t org_base_time, ktime_t *new_base_time)
+{
+ ktime_t current_time, threshold_time, new_time;
+ struct timespec64 ts;
+ u64 nr_of_cycles_p2;
+ u64 nr_of_cycles;
+ u64 diff_time;
+
+ new_time = org_base_time;
+
+ sparx5_ptp_gettime64(&sparx5->phc[SPARX5_PHC_PORT].info, &ts);
+ current_time = timespec64_to_ktime(ts);
+ threshold_time = current_time + (2 * cycle_time);
+ diff_time = threshold_time - new_time;
+ nr_of_cycles = div_u64(diff_time, cycle_time);
+ nr_of_cycles_p2 = 1; /* Use 2^0 as start value */
+
+ if (new_time >= threshold_time) {
+ *new_base_time = new_time;
+ return;
+ }
+
+ /* Calculate the smallest power of 2 (nr_of_cycles_p2)
+ * that is larger than nr_of_cycles.
+ */
+ while (nr_of_cycles_p2 < nr_of_cycles)
+ nr_of_cycles_p2 <<= 1; /* Next (higher) power of 2 */
+
+ /* Add as big chunks (power of 2 * cycle_time)
+ * as possible for each power of 2
+ */
+ while (nr_of_cycles_p2) {
+ if (new_time < threshold_time) {
+ new_time += cycle_time * nr_of_cycles_p2;
+ while (new_time < threshold_time)
+ new_time += cycle_time * nr_of_cycles_p2;
+ new_time -= cycle_time * nr_of_cycles_p2;
+ }
+ nr_of_cycles_p2 >>= 1; /* Next (lower) power of 2 */
+ }
+ new_time += cycle_time;
+ *new_base_time = new_time;
+}
+
/* Max rates for leak groups */
static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
1048568, /* 1.049 Gbps */
@@ -393,6 +450,8 @@ int sparx5_qos_init(struct sparx5 *sparx5)
if (ret < 0)
return ret;
+ sparx5_psfp_init(sparx5);
+
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
new file mode 100644
index 000000000000..f5267218caeb
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_sdlb.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main_regs.h"
+#include "sparx5_main.h"
+
+struct sparx5_sdlb_group sdlb_groups[SPX5_SDLB_GROUP_CNT] = {
+ { SPX5_SDLB_GROUP_RATE_MAX, 8192 / 1, 64 }, /* 25 G */
+ { 15000000000ULL, 8192 / 1, 64 }, /* 15 G */
+ { 10000000000ULL, 8192 / 1, 64 }, /* 10 G */
+ { 5000000000ULL, 8192 / 1, 64 }, /* 5 G */
+ { 2500000000ULL, 8192 / 1, 64 }, /* 2.5 G */
+ { 1000000000ULL, 8192 / 2, 64 }, /* 1 G */
+ { 500000000ULL, 8192 / 2, 64 }, /* 500 M */
+ { 100000000ULL, 8192 / 4, 64 }, /* 100 M */
+ { 50000000ULL, 8192 / 4, 64 }, /* 50 M */
+ { 5000000ULL, 8192 / 8, 64 } /* 5 M */
+};
+
+int sparx5_sdlb_clk_hz_get(struct sparx5 *sparx5)
+{
+ u32 clk_per_100ps;
+ u64 clk_hz;
+
+ clk_per_100ps = HSCH_SYS_CLK_PER_100PS_GET(spx5_rd(sparx5,
+ HSCH_SYS_CLK_PER));
+ if (!clk_per_100ps)
+ clk_per_100ps = SPX5_CLK_PER_100PS_DEFAULT;
+
+ clk_hz = (10 * 1000 * 1000) / clk_per_100ps;
+ return clk_hz *= 1000;
+}
+
+static int sparx5_sdlb_pup_interval_get(struct sparx5 *sparx5, u32 max_token,
+ u64 max_rate)
+{
+ u64 clk_hz;
+
+ clk_hz = sparx5_sdlb_clk_hz_get(sparx5);
+
+ return div64_u64((8 * clk_hz * max_token), max_rate);
+}
+
+int sparx5_sdlb_pup_token_get(struct sparx5 *sparx5, u32 pup_interval, u64 rate)
+{
+ u64 clk_hz;
+
+ if (!rate)
+ return SPX5_SDLB_PUP_TOKEN_DISABLE;
+
+ clk_hz = sparx5_sdlb_clk_hz_get(sparx5);
+
+ return DIV64_U64_ROUND_UP((rate * pup_interval), (clk_hz * 8));
+}
+
+static void sparx5_sdlb_group_disable(struct sparx5 *sparx5, u32 group)
+{
+ spx5_rmw(ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(0),
+ ANA_AC_SDLB_PUP_CTRL_PUP_ENA, sparx5,
+ ANA_AC_SDLB_PUP_CTRL(group));
+}
+
+static void sparx5_sdlb_group_enable(struct sparx5 *sparx5, u32 group)
+{
+ spx5_rmw(ANA_AC_SDLB_PUP_CTRL_PUP_ENA_SET(1),
+ ANA_AC_SDLB_PUP_CTRL_PUP_ENA, sparx5,
+ ANA_AC_SDLB_PUP_CTRL(group));
+}
+
+static u32 sparx5_sdlb_group_get_first(struct sparx5 *sparx5, u32 group)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_XLB_START(group));
+
+ return ANA_AC_SDLB_XLB_START_LBSET_START_GET(val);
+}
+
+static u32 sparx5_sdlb_group_get_next(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_XLB_NEXT(lb));
+
+ return ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_GET(val);
+}
+
+static bool sparx5_sdlb_group_is_first(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ return lb == sparx5_sdlb_group_get_first(sparx5, group);
+}
+
+static bool sparx5_sdlb_group_is_last(struct sparx5 *sparx5, u32 group,
+ u32 lb)
+{
+ return lb == sparx5_sdlb_group_get_next(sparx5, group, lb);
+}
+
+static bool sparx5_sdlb_group_is_empty(struct sparx5 *sparx5, u32 group)
+{
+ u32 val;
+
+ val = spx5_rd(sparx5, ANA_AC_SDLB_PUP_CTRL(group));
+
+ return ANA_AC_SDLB_PUP_CTRL_PUP_ENA_GET(val) == 0;
+}
+
+static u32 sparx5_sdlb_group_get_last(struct sparx5 *sparx5, u32 group)
+{
+ u32 itr, next;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, group);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+ if (itr == next)
+ return itr;
+
+ itr = next;
+ }
+}
+
+static bool sparx5_sdlb_group_is_singular(struct sparx5 *sparx5, u32 group)
+{
+ if (sparx5_sdlb_group_is_empty(sparx5, group))
+ return false;
+
+ return sparx5_sdlb_group_get_first(sparx5, group) ==
+ sparx5_sdlb_group_get_last(sparx5, group);
+}
+
+static int sparx5_sdlb_group_get_adjacent(struct sparx5 *sparx5, u32 group,
+ u32 idx, u32 *prev, u32 *next,
+ u32 *first)
+{
+ u32 itr;
+
+ *first = sparx5_sdlb_group_get_first(sparx5, group);
+ *prev = *first;
+ *next = *first;
+ itr = *first;
+
+ for (;;) {
+ *next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+
+ if (itr == idx)
+ return 0; /* Found it */
+
+ if (itr == *next)
+ return -EINVAL; /* Was not found */
+
+ *prev = itr;
+ itr = *next;
+ }
+}
+
+static int sparx5_sdlb_group_get_count(struct sparx5 *sparx5, u32 group)
+{
+ u32 itr, next;
+ int count = 0;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, group);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, group, itr);
+ if (itr == next)
+ return count;
+
+ itr = next;
+ count++;
+ }
+}
+
+int sparx5_sdlb_group_get_by_rate(struct sparx5 *sparx5, u32 rate, u32 burst)
+{
+ const struct sparx5_sdlb_group *group;
+ u64 rate_bps;
+ int i, count;
+
+ rate_bps = rate * 1000;
+
+ for (i = SPX5_SDLB_GROUP_CNT - 1; i >= 0; i--) {
+ group = &sdlb_groups[i];
+
+ count = sparx5_sdlb_group_get_count(sparx5, i);
+
+ /* Check that this group is not full.
+ * According to LB group configuration rules: the number of XLBs
+ * in a group must not exceed PUP_INTERVAL/4 - 1.
+ */
+ if (count > ((group->pup_interval / 4) - 1))
+ continue;
+
+ if (rate_bps < group->max_rate)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+int sparx5_sdlb_group_get_by_index(struct sparx5 *sparx5, u32 idx, u32 *group)
+{
+ u32 itr, next;
+ int i;
+
+ for (i = 0; i < SPX5_SDLB_GROUP_CNT; i++) {
+ if (sparx5_sdlb_group_is_empty(sparx5, i))
+ continue;
+
+ itr = sparx5_sdlb_group_get_first(sparx5, i);
+
+ for (;;) {
+ next = sparx5_sdlb_group_get_next(sparx5, i, itr);
+
+ if (itr == idx) {
+ *group = i;
+ return 0; /* Found it */
+ }
+ if (itr == next)
+ break; /* Was not found */
+
+ itr = next;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int sparx5_sdlb_group_link(struct sparx5 *sparx5, u32 group, u32 idx,
+ u32 first, u32 next, bool empty)
+{
+ /* Stop leaking */
+ sparx5_sdlb_group_disable(sparx5, group);
+
+ if (empty)
+ return 0;
+
+ /* Link insertion lb to next lb */
+ spx5_wr(ANA_AC_SDLB_XLB_NEXT_LBSET_NEXT_SET(next) |
+ ANA_AC_SDLB_XLB_NEXT_LBGRP_SET(group),
+ sparx5, ANA_AC_SDLB_XLB_NEXT(idx));
+
+ /* Set the first lb */
+ spx5_wr(ANA_AC_SDLB_XLB_START_LBSET_START_SET(first), sparx5,
+ ANA_AC_SDLB_XLB_START(group));
+
+ /* Start leaking */
+ sparx5_sdlb_group_enable(sparx5, group);
+
+ return 0;
+};
+
+int sparx5_sdlb_group_add(struct sparx5 *sparx5, u32 group, u32 idx)
+{
+ u32 first, next;
+
+ /* We always add to head of the list */
+ first = idx;
+
+ if (sparx5_sdlb_group_is_empty(sparx5, group))
+ next = idx;
+ else
+ next = sparx5_sdlb_group_get_first(sparx5, group);
+
+ return sparx5_sdlb_group_link(sparx5, group, idx, first, next, false);
+}
+
+int sparx5_sdlb_group_del(struct sparx5 *sparx5, u32 group, u32 idx)
+{
+ u32 first, next, prev;
+ bool empty = false;
+
+ if (sparx5_sdlb_group_get_adjacent(sparx5, group, idx, &prev, &next,
+ &first) < 0) {
+ pr_err("%s:%d Could not find idx: %d in group: %d", __func__,
+ __LINE__, idx, group);
+ return -EINVAL;
+ }
+
+ if (sparx5_sdlb_group_is_singular(sparx5, group)) {
+ empty = true;
+ } else if (sparx5_sdlb_group_is_last(sparx5, group, idx)) {
+ /* idx is removed, prev is now last */
+ idx = prev;
+ next = prev;
+ } else if (sparx5_sdlb_group_is_first(sparx5, group, idx)) {
+ /* idx is removed and points to itself, first is next */
+ first = next;
+ next = idx;
+ } else {
+ /* Next is not touched */
+ idx = prev;
+ }
+
+ return sparx5_sdlb_group_link(sparx5, group, idx, first, next, empty);
+}
+
+void sparx5_sdlb_group_init(struct sparx5 *sparx5, u64 max_rate, u32 min_burst,
+ u32 frame_size, u32 idx)
+{
+ u32 thres_shift, mask = 0x01, power = 0;
+ struct sparx5_sdlb_group *group;
+ u64 max_token;
+
+ group = &sdlb_groups[idx];
+
+ /* Number of positions to right-shift LB's threshold value. */
+ while ((min_burst & mask) == 0) {
+ power++;
+ mask <<= 1;
+ }
+ thres_shift = SPX5_SDLB_2CYCLES_TYPE2_THRES_OFFSET - power;
+
+ max_token = (min_burst > SPX5_SDLB_PUP_TOKEN_MAX) ?
+ SPX5_SDLB_PUP_TOKEN_MAX :
+ min_burst;
+ group->pup_interval =
+ sparx5_sdlb_pup_interval_get(sparx5, max_token, max_rate);
+
+ group->frame_size = frame_size;
+
+ spx5_wr(ANA_AC_SDLB_PUP_INTERVAL_PUP_INTERVAL_SET(group->pup_interval),
+ sparx5, ANA_AC_SDLB_PUP_INTERVAL(idx));
+
+ spx5_wr(ANA_AC_SDLB_FRM_RATE_TOKENS_FRM_RATE_TOKENS_SET(frame_size),
+ sparx5, ANA_AC_SDLB_FRM_RATE_TOKENS(idx));
+
+ spx5_wr(ANA_AC_SDLB_LBGRP_MISC_THRES_SHIFT_SET(thres_shift), sparx5,
+ ANA_AC_SDLB_LBGRP_MISC(idx));
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
index 205246b5af82..e80f3166db7d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.c
@@ -5,6 +5,7 @@
*/
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include "sparx5_tc.h"
#include "sparx5_main.h"
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
index adab88e6b21f..7ef470b28566 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc.h
@@ -21,6 +21,80 @@ enum SPX5_PORT_MASK_MODE {
SPX5_PMM_OR_PGID_MASK,
};
+/* Controls ES0 forwarding */
+enum SPX5_FORWARDING_SEL {
+ SPX5_FWSEL_NO_ACTION,
+ SPX5_FWSEL_COPY_TO_LOOPBACK,
+ SPX5_FWSEL_REDIRECT_TO_LOOPBACK,
+ SPX5_FWSEL_DISCARD,
+};
+
+/* Controls tag A (outer tagging) */
+enum SPX5_OUTER_TAG_SEL {
+ SPX5_OTAG_PORT,
+ SPX5_OTAG_TAG_A,
+ SPX5_OTAG_FORCED_PORT,
+ SPX5_OTAG_UNTAG,
+};
+
+/* Selects TPID for ES0 tag A */
+enum SPX5_TPID_A_SEL {
+ SPX5_TPID_A_8100,
+ SPX5_TPID_A_88A8,
+ SPX5_TPID_A_CUST1,
+ SPX5_TPID_A_CUST2,
+ SPX5_TPID_A_CUST3,
+ SPX5_TPID_A_CLASSIFIED,
+};
+
+/* Selects VID for ES0 tag A */
+enum SPX5_VID_A_SEL {
+ SPX5_VID_A_CLASSIFIED,
+ SPX5_VID_A_VAL,
+ SPX5_VID_A_IFH,
+ SPX5_VID_A_RESERVED,
+};
+
+/* Select PCP source for ES0 tag A */
+enum SPX5_PCP_A_SEL {
+ SPX5_PCP_A_CLASSIFIED,
+ SPX5_PCP_A_VAL,
+ SPX5_PCP_A_RESERVED,
+ SPX5_PCP_A_POPPED,
+ SPX5_PCP_A_MAPPED_0,
+ SPX5_PCP_A_MAPPED_1,
+ SPX5_PCP_A_MAPPED_2,
+ SPX5_PCP_A_MAPPED_3,
+};
+
+/* Select DEI source for ES0 tag A */
+enum SPX5_DEI_A_SEL {
+ SPX5_DEI_A_CLASSIFIED,
+ SPX5_DEI_A_VAL,
+ SPX5_DEI_A_REW,
+ SPX5_DEI_A_POPPED,
+ SPX5_DEI_A_MAPPED_0,
+ SPX5_DEI_A_MAPPED_1,
+ SPX5_DEI_A_MAPPED_2,
+ SPX5_DEI_A_MAPPED_3,
+};
+
+/* Controls tag B (inner tagging) */
+enum SPX5_INNER_TAG_SEL {
+ SPX5_ITAG_NO_PUSH,
+ SPX5_ITAG_PUSH_B_TAG,
+};
+
+/* Selects TPID for ES0 tag B. */
+enum SPX5_TPID_B_SEL {
+ SPX5_TPID_B_8100,
+ SPX5_TPID_B_88A8,
+ SPX5_TPID_B_CUST1,
+ SPX5_TPID_B_CUST2,
+ SPX5_TPID_B_CUST3,
+ SPX5_TPID_B_CLASSIFIED,
+};
+
int sparx5_port_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 1ed304a816cc..b36819aafaca 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -4,11 +4,13 @@
* Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
*/
+#include <net/tc_act/tc_gate.h>
#include <net/tcp.h>
#include "sparx5_tc.h"
#include "vcap_api.h"
#include "vcap_api_client.h"
+#include "vcap_tc.h"
#include "sparx5_main.h"
#include "sparx5_vcap_impl.h"
@@ -26,249 +28,33 @@ struct sparx5_multiple_rules {
struct sparx5_wildcard_rule rule[SPX5_MAX_RULE_SIZE];
};
-struct sparx5_tc_flower_parse_usage {
- struct flow_cls_offload *fco;
- struct flow_rule *frule;
- struct vcap_rule *vrule;
- u16 l3_proto;
- u8 l4_proto;
- unsigned int used_keys;
-};
-
-struct sparx5_tc_rule_pkt_cnt {
- u64 cookie;
- u32 pkts;
-};
-
-/* These protocols have dedicated keysets in IS2 and a TC dissector
- * ETH_P_ARP does not have a TC dissector
- */
-static u16 sparx5_tc_known_etypes[] = {
- ETH_P_ALL,
- ETH_P_ARP,
- ETH_P_IP,
- ETH_P_IPV6,
-};
-
-enum sparx5_is2_arp_opcode {
- SPX5_IS2_ARP_REQUEST,
- SPX5_IS2_ARP_REPLY,
- SPX5_IS2_RARP_REQUEST,
- SPX5_IS2_RARP_REPLY,
-};
-
-enum tc_arp_opcode {
- TC_ARP_OP_RESERVED,
- TC_ARP_OP_REQUEST,
- TC_ARP_OP_REPLY,
-};
-
-static bool sparx5_tc_is_known_etype(u16 etype)
-{
- int idx;
-
- /* For now this only knows about IS2 traffic classification */
- for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_known_etypes); ++idx)
- if (sparx5_tc_known_etypes[idx] == etype)
- return true;
-
- return false;
-}
-
-static int sparx5_tc_flower_handler_ethaddr_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
- enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
- struct flow_match_eth_addrs match;
- struct vcap_u48_key smac, dmac;
- int err = 0;
-
- flow_rule_match_eth_addrs(st->frule, &match);
-
- if (!is_zero_ether_addr(match.mask->src)) {
- vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
- vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
- if (err)
- goto out;
- }
-
- if (!is_zero_ether_addr(match.mask->dst)) {
- vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
- vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
- err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
- if (err)
- goto out;
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_ipv4_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- int err = 0;
-
- if (st->l3_proto == ETH_P_IP) {
- struct flow_match_ipv4_addrs mt;
-
- flow_rule_match_ipv4_addrs(st->frule, &mt);
- if (mt.mask->src) {
- err = vcap_rule_add_key_u32(st->vrule,
- VCAP_KF_L3_IP4_SIP,
- be32_to_cpu(mt.key->src),
- be32_to_cpu(mt.mask->src));
- if (err)
- goto out;
- }
- if (mt.mask->dst) {
- err = vcap_rule_add_key_u32(st->vrule,
- VCAP_KF_L3_IP4_DIP,
- be32_to_cpu(mt.key->dst),
- be32_to_cpu(mt.mask->dst));
- if (err)
- goto out;
- }
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_ipv6_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- int err = 0;
-
- if (st->l3_proto == ETH_P_IPV6) {
- struct flow_match_ipv6_addrs mt;
- struct vcap_u128_key sip;
- struct vcap_u128_key dip;
-
- flow_rule_match_ipv6_addrs(st->frule, &mt);
- /* Check if address masks are non-zero */
- if (!ipv6_addr_any(&mt.mask->src)) {
- vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
- vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
- err = vcap_rule_add_key_u128(st->vrule,
- VCAP_KF_L3_IP6_SIP, &sip);
- if (err)
- goto out;
- }
- if (!ipv6_addr_any(&mt.mask->dst)) {
- vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
- vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
- err = vcap_rule_add_key_u128(st->vrule,
- VCAP_KF_L3_IP6_DIP, &dip);
- if (err)
- goto out;
- }
- }
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
- return err;
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
- return err;
-}
-
static int
-sparx5_tc_flower_handler_control_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_es0_tpid(struct vcap_tc_flower_parse_usage *st)
{
- struct flow_match_control mt;
- u32 value, mask;
int err = 0;
- flow_rule_match_control(st->frule, &mt);
-
- if (mt.mask->flags) {
- if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
- if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
- value = 1; /* initial fragment */
- mask = 0x3;
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
- }
- } else {
- if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
- value = 3; /* follow up fragment */
- mask = 0x3;
- } else {
- value = 0; /* no fragment */
- mask = 0x3;
- }
- }
-
+ switch (st->tpid) {
+ case ETH_P_8021Q:
err = vcap_rule_add_key_u32(st->vrule,
- VCAP_KF_L3_FRAGMENT_TYPE,
- value, mask);
- if (err)
- goto out;
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_portnum_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- struct flow_match_ports mt;
- u16 value, mask;
- int err = 0;
-
- flow_rule_match_ports(st->frule, &mt);
-
- if (mt.mask->src) {
- value = be16_to_cpu(mt.key->src);
- mask = be16_to_cpu(mt.mask->src);
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
- mask);
- if (err)
- goto out;
- }
-
- if (mt.mask->dst) {
- value = be16_to_cpu(mt.key->dst);
- mask = be16_to_cpu(mt.mask->dst);
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
- mask);
- if (err)
- goto out;
+ VCAP_KF_8021Q_TPID,
+ SPX5_TPID_SEL_8100, ~0);
+ break;
+ case ETH_P_8021AD:
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_8021Q_TPID,
+ SPX5_TPID_SEL_88A8, ~0);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ break;
}
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
-
- return err;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
return err;
}
static int
-sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_basic_usage(struct vcap_tc_flower_parse_usage *st)
{
struct flow_match_basic mt;
int err = 0;
@@ -277,7 +63,7 @@ sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
if (mt.mask->n_proto) {
st->l3_proto = be16_to_cpu(mt.key->n_proto);
- if (!sparx5_tc_is_known_etype(st->l3_proto)) {
+ if (!sparx5_vcap_is_known_etype(st->admin, st->l3_proto)) {
err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ETYPE,
st->l3_proto, ~0);
if (err)
@@ -292,6 +78,13 @@ sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
VCAP_BIT_0);
if (err)
goto out;
+ if (st->admin->vtype == VCAP_TYPE_IS0) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_IP_SNAP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
}
}
@@ -309,6 +102,13 @@ sparx5_tc_flower_handler_basic_usage(struct sparx5_tc_flower_parse_usage *st)
VCAP_BIT_0);
if (err)
goto out;
+ if (st->admin->vtype == VCAP_TYPE_IS0) {
+ err = vcap_rule_add_key_bit(st->vrule,
+ VCAP_KF_TCP_UDP_IS,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ }
} else {
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_IP_PROTO,
@@ -328,253 +128,131 @@ out:
}
static int
-sparx5_tc_flower_handler_vlan_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
- enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
- enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
- struct flow_match_vlan mt;
- int err;
-
- flow_rule_match_vlan(st->frule, &mt);
-
- if (mt.mask->vlan_id) {
- err = vcap_rule_add_key_u32(st->vrule, vid_key,
- mt.key->vlan_id,
- mt.mask->vlan_id);
- if (err)
- goto out;
- }
-
- if (mt.mask->vlan_priority) {
- err = vcap_rule_add_key_u32(st->vrule, pcp_key,
- mt.key->vlan_priority,
- mt.mask->vlan_priority);
- if (err)
- goto out;
- }
-
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
-
- return 0;
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
- return err;
-}
-
-static int
-sparx5_tc_flower_handler_tcp_usage(struct sparx5_tc_flower_parse_usage *st)
-{
- struct flow_match_tcp mt;
- u16 tcp_flags_mask;
- u16 tcp_flags_key;
- enum vcap_bit val;
+ struct flow_match_control mt;
+ u32 value, mask;
int err = 0;
- flow_rule_match_tcp(st->frule, &mt);
- tcp_flags_key = be16_to_cpu(mt.key->flags);
- tcp_flags_mask = be16_to_cpu(mt.mask->flags);
-
- if (tcp_flags_mask & TCPHDR_FIN) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_FIN)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
- if (err)
- goto out;
- }
-
- if (tcp_flags_mask & TCPHDR_SYN) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_SYN)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
- if (err)
- goto out;
- }
-
- if (tcp_flags_mask & TCPHDR_RST) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_RST)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
- if (err)
- goto out;
- }
-
- if (tcp_flags_mask & TCPHDR_PSH) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_PSH)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
- if (err)
- goto out;
- }
+ flow_rule_match_control(st->frule, &mt);
- if (tcp_flags_mask & TCPHDR_ACK) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_ACK)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
- if (err)
- goto out;
- }
+ if (mt.mask->flags) {
+ if (mt.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ if (mt.key->flags & FLOW_DIS_FIRST_FRAG) {
+ value = 1; /* initial fragment */
+ mask = 0x3;
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
+ } else {
+ if (mt.mask->flags & FLOW_DIS_IS_FRAGMENT) {
+ value = 3; /* follow up fragment */
+ mask = 0x3;
+ } else {
+ value = 0; /* no fragment */
+ mask = 0x3;
+ }
+ }
- if (tcp_flags_mask & TCPHDR_URG) {
- val = VCAP_BIT_0;
- if (tcp_flags_key & TCPHDR_URG)
- val = VCAP_BIT_1;
- err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_FRAGMENT_TYPE,
+ value, mask);
if (err)
goto out;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CONTROL);
return err;
out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
return err;
}
static int
-sparx5_tc_flower_handler_arp_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
{
- struct flow_match_arp mt;
- u16 value, mask;
- u32 ipval, ipmsk;
- int err;
-
- flow_rule_match_arp(st->frule, &mt);
-
- if (mt.mask->op) {
- mask = 0x3;
- if (st->l3_proto == ETH_P_ARP) {
- value = mt.key->op == TC_ARP_OP_REQUEST ?
- SPX5_IS2_ARP_REQUEST :
- SPX5_IS2_ARP_REPLY;
- } else { /* RARP */
- value = mt.key->op == TC_ARP_OP_REQUEST ?
- SPX5_IS2_RARP_REQUEST :
- SPX5_IS2_RARP_REPLY;
- }
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
- value, mask);
- if (err)
- goto out;
- }
-
- /* The IS2 ARP keyset does not support ARP hardware addresses */
- if (!is_zero_ether_addr(mt.mask->sha) ||
- !is_zero_ether_addr(mt.mask->tha)) {
- err = -EINVAL;
- goto out;
- }
-
- if (mt.mask->sip) {
- ipval = be32_to_cpu((__force __be32)mt.key->sip);
- ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
-
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
- ipval, ipmsk);
- if (err)
- goto out;
- }
-
- if (mt.mask->tip) {
- ipval = be32_to_cpu((__force __be32)mt.key->tip);
- ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
-
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
- ipval, ipmsk);
- if (err)
- goto out;
+ if (st->admin->vtype != VCAP_TYPE_IS0) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ "cvlan not supported in this VCAP");
+ return -EINVAL;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
-
- return 0;
-
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
- return err;
+ return vcap_tc_flower_handler_cvlan_usage(st);
}
static int
-sparx5_tc_flower_handler_ip_usage(struct sparx5_tc_flower_parse_usage *st)
+sparx5_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st)
{
- struct flow_match_ip mt;
- int err = 0;
-
- flow_rule_match_ip(st->frule, &mt);
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID_CLS;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP_CLS;
+ int err;
- if (mt.mask->tos) {
- err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
- mt.key->tos,
- mt.mask->tos);
- if (err)
- goto out;
+ if (st->admin->vtype == VCAP_TYPE_IS0) {
+ vid_key = VCAP_KF_8021Q_VID0;
+ pcp_key = VCAP_KF_8021Q_PCP0;
}
- st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
+ err = vcap_tc_flower_handler_vlan_usage(st, vid_key, pcp_key);
+ if (err)
+ return err;
- return err;
+ if (st->admin->vtype == VCAP_TYPE_ES0 && st->tpid)
+ err = sparx5_tc_flower_es0_tpid(st);
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
return err;
}
-static int (*sparx5_tc_flower_usage_handlers[])(struct sparx5_tc_flower_parse_usage *st) = {
- [FLOW_DISSECTOR_KEY_ETH_ADDRS] = sparx5_tc_flower_handler_ethaddr_usage,
- [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = sparx5_tc_flower_handler_ipv4_usage,
- [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = sparx5_tc_flower_handler_ipv6_usage,
+static int (*sparx5_tc_flower_usage_handlers[])(struct vcap_tc_flower_parse_usage *st) = {
+ [FLOW_DISSECTOR_KEY_ETH_ADDRS] = vcap_tc_flower_handler_ethaddr_usage,
+ [FLOW_DISSECTOR_KEY_IPV4_ADDRS] = vcap_tc_flower_handler_ipv4_usage,
+ [FLOW_DISSECTOR_KEY_IPV6_ADDRS] = vcap_tc_flower_handler_ipv6_usage,
[FLOW_DISSECTOR_KEY_CONTROL] = sparx5_tc_flower_handler_control_usage,
- [FLOW_DISSECTOR_KEY_PORTS] = sparx5_tc_flower_handler_portnum_usage,
+ [FLOW_DISSECTOR_KEY_PORTS] = vcap_tc_flower_handler_portnum_usage,
[FLOW_DISSECTOR_KEY_BASIC] = sparx5_tc_flower_handler_basic_usage,
+ [FLOW_DISSECTOR_KEY_CVLAN] = sparx5_tc_flower_handler_cvlan_usage,
[FLOW_DISSECTOR_KEY_VLAN] = sparx5_tc_flower_handler_vlan_usage,
- [FLOW_DISSECTOR_KEY_TCP] = sparx5_tc_flower_handler_tcp_usage,
- [FLOW_DISSECTOR_KEY_ARP] = sparx5_tc_flower_handler_arp_usage,
- [FLOW_DISSECTOR_KEY_IP] = sparx5_tc_flower_handler_ip_usage,
+ [FLOW_DISSECTOR_KEY_TCP] = vcap_tc_flower_handler_tcp_usage,
+ [FLOW_DISSECTOR_KEY_ARP] = vcap_tc_flower_handler_arp_usage,
+ [FLOW_DISSECTOR_KEY_IP] = vcap_tc_flower_handler_ip_usage,
};
-static int sparx5_tc_use_dissectors(struct flow_cls_offload *fco,
+static int sparx5_tc_use_dissectors(struct vcap_tc_flower_parse_usage *st,
struct vcap_admin *admin,
- struct vcap_rule *vrule,
- u16 *l3_proto)
+ struct vcap_rule *vrule)
{
- struct sparx5_tc_flower_parse_usage state = {
- .fco = fco,
- .vrule = vrule,
- .l3_proto = ETH_P_ALL,
- };
int idx, err = 0;
- state.frule = flow_cls_offload_flow_rule(fco);
for (idx = 0; idx < ARRAY_SIZE(sparx5_tc_flower_usage_handlers); ++idx) {
- if (!flow_rule_match_key(state.frule, idx))
+ if (!flow_rule_match_key(st->frule, idx))
continue;
if (!sparx5_tc_flower_usage_handlers[idx])
continue;
- err = sparx5_tc_flower_usage_handlers[idx](&state);
+ err = sparx5_tc_flower_usage_handlers[idx](st);
if (err)
return err;
}
- if (state.frule->match.dissector->used_keys ^ state.used_keys) {
- NL_SET_ERR_MSG_MOD(fco->common.extack,
+ if (st->frule->match.dissector->used_keys ^ st->used_keys) {
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack,
"Unsupported match item");
return -ENOENT;
}
- if (l3_proto)
- *l3_proto = state.l3_proto;
return err;
}
static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
+ struct net_device *ndev,
struct flow_cls_offload *fco,
- struct vcap_admin *admin)
+ bool ingress)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(fco);
struct flow_action_entry *actent, *last_actent = NULL;
@@ -600,21 +278,24 @@ static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
last_actent = actent; /* Save last action for later check */
}
- /* Check that last action is a goto */
- if (last_actent->id != FLOW_ACTION_GOTO) {
+ /* Check if last action is a goto
+ * The last chain/lookup does not need to have a goto action
+ */
+ if (last_actent->id == FLOW_ACTION_GOTO) {
+ /* Check if the destination chain is in one of the VCAPs */
+ if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
+ last_actent->chain_index)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid goto chain");
+ return -EINVAL;
+ }
+ } else if (!vcap_is_last_chain(vctrl, fco->common.chain_index,
+ ingress)) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Last action must be 'goto'");
return -EINVAL;
}
- /* Check if the goto chain is in the next lookup */
- if (!vcap_is_next_lookup(vctrl, fco->common.chain_index,
- last_actent->chain_index)) {
- NL_SET_ERR_MSG_MOD(fco->common.extack,
- "Invalid goto chain");
- return -EINVAL;
- }
-
/* Catch unsupported combinations of actions */
if (action_mask & BIT(FLOW_ACTION_TRAP) &&
action_mask & BIT(FLOW_ACTION_ACCEPT)) {
@@ -623,21 +304,60 @@ static int sparx5_tc_flower_action_check(struct vcap_control *vctrl,
return -EOPNOTSUPP;
}
+ if (action_mask & BIT(FLOW_ACTION_VLAN_PUSH) &&
+ action_mask & BIT(FLOW_ACTION_VLAN_POP)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine vlan push and pop action");
+ return -EOPNOTSUPP;
+ }
+
+ if (action_mask & BIT(FLOW_ACTION_VLAN_PUSH) &&
+ action_mask & BIT(FLOW_ACTION_VLAN_MANGLE)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine vlan push and modify action");
+ return -EOPNOTSUPP;
+ }
+
+ if (action_mask & BIT(FLOW_ACTION_VLAN_POP) &&
+ action_mask & BIT(FLOW_ACTION_VLAN_MANGLE)) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot combine vlan pop and modify action");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
-/* Add a rule counter action - only IS2 is considered for now */
+/* Add a rule counter action */
static int sparx5_tc_add_rule_counter(struct vcap_admin *admin,
struct vcap_rule *vrule)
{
int err;
- err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID, vrule->id);
- if (err)
- return err;
-
- vcap_rule_set_counter_id(vrule, vrule->id);
- return err;
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ break;
+ case VCAP_TYPE_ES0:
+ err = vcap_rule_mod_action_u32(vrule, VCAP_AF_ESDX,
+ vrule->id);
+ if (err)
+ return err;
+ vcap_rule_set_counter_id(vrule, vrule->id);
+ break;
+ case VCAP_TYPE_IS2:
+ case VCAP_TYPE_ES2:
+ err = vcap_rule_mod_action_u32(vrule, VCAP_AF_CNT_ID,
+ vrule->id);
+ if (err)
+ return err;
+ vcap_rule_set_counter_id(vrule, vrule->id);
+ break;
+ default:
+ pr_err("%s:%d: vcap type: %d not supported\n",
+ __func__, __LINE__, admin->vtype);
+ break;
+ }
+ return 0;
}
/* Collect all port keysets and apply the first of them, possibly wildcarded */
@@ -818,22 +538,490 @@ static int sparx5_tc_add_remaining_rules(struct vcap_control *vctrl,
return err;
}
+/* Add the actionset that is the default for the VCAP type */
+static int sparx5_tc_set_actionset(struct vcap_admin *admin,
+ struct vcap_rule *vrule)
+{
+ enum vcap_actionfield_set aset;
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ aset = VCAP_AFS_CLASSIFICATION;
+ break;
+ case VCAP_TYPE_IS2:
+ aset = VCAP_AFS_BASE_TYPE;
+ break;
+ case VCAP_TYPE_ES0:
+ aset = VCAP_AFS_ES0;
+ break;
+ case VCAP_TYPE_ES2:
+ aset = VCAP_AFS_BASE_TYPE;
+ break;
+ default:
+ pr_err("%s:%d: %s\n", __func__, __LINE__, "Invalid VCAP type");
+ return -EINVAL;
+ }
+ /* Do not overwrite any current actionset */
+ if (vrule->actionset == VCAP_AFS_NO_VALUE)
+ err = vcap_set_rule_set_actionset(vrule, aset);
+ return err;
+}
+
+/* Add the VCAP key to match on for a rule target value */
+static int sparx5_tc_add_rule_link_target(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ int target_cid)
+{
+ int link_val = target_cid % VCAP_CID_LOOKUP_SIZE;
+ int err;
+
+ if (!link_val)
+ return 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ /* Add NXT_IDX key for chaining rules between IS0 instances */
+ err = vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX_SEL,
+ 1, /* enable */
+ ~0);
+ if (err)
+ return err;
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_GEN_IDX,
+ link_val, /* target */
+ ~0);
+ case VCAP_TYPE_IS2:
+ /* Add PAG key for chaining rules from IS0 */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_LOOKUP_PAG,
+ link_val, /* target */
+ ~0);
+ case VCAP_TYPE_ES0:
+ case VCAP_TYPE_ES2:
+ /* Add ISDX key for chaining rules from IS0 */
+ return vcap_rule_add_key_u32(vrule, VCAP_KF_ISDX_CLS, link_val,
+ ~0);
+ default:
+ break;
+ }
+ return 0;
+}
+
+/* Add the VCAP action that adds a target value to a rule */
+static int sparx5_tc_add_rule_link(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ int from_cid, int to_cid)
+{
+ struct vcap_admin *to_admin = vcap_find_admin(vctrl, to_cid);
+ int diff, err = 0;
+
+ if (!to_admin) {
+ pr_err("%s:%d: unsupported chain direction: %d\n",
+ __func__, __LINE__, to_cid);
+ return -EINVAL;
+ }
+
+ diff = vcap_chain_offset(vctrl, from_cid, to_cid);
+ if (!diff)
+ return 0;
+
+ if (admin->vtype == VCAP_TYPE_IS0 &&
+ to_admin->vtype == VCAP_TYPE_IS0) {
+ /* Between IS0 instances the G_IDX value is used */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX, diff);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_NXT_IDX_CTRL,
+ 1); /* Replace */
+ if (err)
+ goto out;
+ } else if (admin->vtype == VCAP_TYPE_IS0 &&
+ to_admin->vtype == VCAP_TYPE_IS2) {
+ /* Between IS0 and IS2 the PAG value is used */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_PAG_VAL, diff);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PAG_OVERRIDE_MASK,
+ 0xff);
+ if (err)
+ goto out;
+ } else if (admin->vtype == VCAP_TYPE_IS0 &&
+ (to_admin->vtype == VCAP_TYPE_ES0 ||
+ to_admin->vtype == VCAP_TYPE_ES2)) {
+ /* Between IS0 and ES0/ES2 the ISDX value is used */
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL,
+ diff);
+ if (err)
+ goto out;
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_BIT_1);
+ if (err)
+ goto out;
+ } else {
+ pr_err("%s:%d: unsupported chain destination: %d\n",
+ __func__, __LINE__, to_cid);
+ err = -EOPNOTSUPP;
+ }
+out:
+ return err;
+}
+
+static int sparx5_tc_flower_parse_act_gate(struct sparx5_psfp_sg *sg,
+ struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ int i;
+
+ if (act->gate.prio < -1 || act->gate.prio > SPX5_PSFP_SG_MAX_IPV) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate priority");
+ return -EINVAL;
+ }
+
+ if (act->gate.cycletime < SPX5_PSFP_SG_MIN_CYCLE_TIME_NS ||
+ act->gate.cycletime > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletime");
+ return -EINVAL;
+ }
+
+ if (act->gate.cycletimeext > SPX5_PSFP_SG_MAX_CYCLE_TIME_NS) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid gate cycletimeext");
+ return -EINVAL;
+ }
+
+ if (act->gate.num_entries >= SPX5_PSFP_GCE_CNT) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid number of gate entries");
+ return -EINVAL;
+ }
+
+ sg->gate_state = true;
+ sg->ipv = act->gate.prio;
+ sg->num_entries = act->gate.num_entries;
+ sg->cycletime = act->gate.cycletime;
+ sg->cycletimeext = act->gate.cycletimeext;
+
+ for (i = 0; i < sg->num_entries; i++) {
+ sg->gce[i].gate_state = !!act->gate.entries[i].gate_state;
+ sg->gce[i].interval = act->gate.entries[i].interval;
+ sg->gce[i].ipv = act->gate.entries[i].ipv;
+ sg->gce[i].maxoctets = act->gate.entries[i].maxoctets;
+ }
+
+ return 0;
+}
+
+static int sparx5_tc_flower_parse_act_police(struct sparx5_policer *pol,
+ struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ pol->type = SPX5_POL_SERVICE;
+ pol->rate = div_u64(act->police.rate_bytes_ps, 1000) * 8;
+ pol->burst = act->police.burst;
+ pol->idx = act->hw_index;
+
+ /* rate is now in kbit */
+ if (pol->rate > DIV_ROUND_UP(SPX5_SDLB_GROUP_RATE_MAX, 1000)) {
+ NL_SET_ERR_MSG_MOD(extack, "Maximum rate exceeded");
+ return -EINVAL;
+ }
+
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack, "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int sparx5_tc_flower_psfp_setup(struct sparx5 *sparx5,
+ struct vcap_rule *vrule, int sg_idx,
+ int pol_idx, struct sparx5_psfp_sg *sg,
+ struct sparx5_psfp_fm *fm,
+ struct sparx5_psfp_sf *sf)
+{
+ u32 psfp_sfid = 0, psfp_fmid = 0, psfp_sgid = 0;
+ int ret;
+
+ /* Must always have a stream gate - max sdu (filter option) is evaluated
+ * after frames have passed the gate, so in case of only a policer, we
+ * allocate a stream gate that is always open.
+ */
+ if (sg_idx < 0) {
+ sg_idx = sparx5_pool_idx_to_id(SPX5_PSFP_SG_OPEN);
+ sg->ipv = 0; /* Disabled */
+ sg->cycletime = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
+ sg->num_entries = 1;
+ sg->gate_state = 1; /* Open */
+ sg->gate_enabled = 1;
+ sg->gce[0].gate_state = 1;
+ sg->gce[0].interval = SPX5_PSFP_SG_CYCLE_TIME_DEFAULT;
+ sg->gce[0].ipv = 0;
+ sg->gce[0].maxoctets = 0; /* Disabled */
+ }
+
+ ret = sparx5_psfp_sg_add(sparx5, sg_idx, sg, &psfp_sgid);
+ if (ret < 0)
+ return ret;
+
+ if (pol_idx >= 0) {
+ /* Add new flow-meter */
+ ret = sparx5_psfp_fm_add(sparx5, pol_idx, fm, &psfp_fmid);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Map stream filter to stream gate */
+ sf->sgid = psfp_sgid;
+
+ /* Add new stream-filter and map it to a steam gate */
+ ret = sparx5_psfp_sf_add(sparx5, sf, &psfp_sfid);
+ if (ret < 0)
+ return ret;
+
+ /* Streams are classified by ISDX - map ISDX 1:1 to sfid for now. */
+ sparx5_isdx_conf_set(sparx5, psfp_sfid, psfp_sfid, psfp_fmid);
+
+ ret = vcap_rule_add_action_bit(vrule, VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_BIT_1);
+ if (ret)
+ return ret;
+
+ ret = vcap_rule_add_action_u32(vrule, VCAP_AF_ISDX_VAL, psfp_sfid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* Handle the action trap for a VCAP rule */
+static int sparx5_tc_action_trap(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco)
+{
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS2:
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_BIT_1);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_CPU_QUEUE_NUM, 0);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_MASK_MODE,
+ SPX5_PMM_REPLACE_ALL);
+ break;
+ case VCAP_TYPE_ES0:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_FWD_SEL,
+ SPX5_FWSEL_REDIRECT_TO_LOOPBACK);
+ break;
+ case VCAP_TYPE_ES2:
+ err = vcap_rule_add_action_bit(vrule,
+ VCAP_AF_CPU_COPY_ENA,
+ VCAP_BIT_1);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_CPU_QUEUE_NUM, 0);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Trap action not supported in this VCAP");
+ err = -EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static int sparx5_tc_action_vlan_pop(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ u16 tpid)
+{
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_ES0:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "VLAN pop action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ switch (tpid) {
+ case ETH_P_8021Q:
+ case ETH_P_8021AD:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PUSH_OUTER_TAG,
+ SPX5_OTAG_UNTAG);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int sparx5_tc_action_vlan_modify(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act,
+ u16 tpid)
+{
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_ES0:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PUSH_OUTER_TAG,
+ SPX5_OTAG_TAG_A);
+ if (err)
+ return err;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "VLAN modify action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ switch (tpid) {
+ case ETH_P_8021Q:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_TPID_SEL,
+ SPX5_TPID_A_8100);
+ break;
+ case ETH_P_8021AD:
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_TPID_SEL,
+ SPX5_TPID_A_88A8);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ }
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_VID_SEL,
+ SPX5_VID_A_VAL);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_VID_A_VAL,
+ act->vlan.vid);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_PCP_SEL,
+ SPX5_PCP_A_VAL);
+ if (err)
+ return err;
+
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PCP_A_VAL,
+ act->vlan.prio);
+ if (err)
+ return err;
+
+ return vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_A_DEI_SEL,
+ SPX5_DEI_A_CLASSIFIED);
+}
+
+static int sparx5_tc_action_vlan_push(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act,
+ u16 tpid)
+{
+ u16 act_tpid = be16_to_cpu(act->vlan.proto);
+ int err = 0;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_ES0:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "VLAN push action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ if (tpid == ETH_P_8021AD) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Cannot push on double tagged frames");
+ return -EOPNOTSUPP;
+ }
+
+ err = sparx5_tc_action_vlan_modify(admin, vrule, fco, act, act_tpid);
+ if (err)
+ return err;
+
+ switch (act_tpid) {
+ case ETH_P_8021Q:
+ break;
+ case ETH_P_8021AD:
+ /* Push classified tag as inner tag */
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_PUSH_INNER_TAG,
+ SPX5_ITAG_PUSH_B_TAG);
+ if (err)
+ break;
+ err = vcap_rule_add_action_u32(vrule,
+ VCAP_AF_TAG_B_TPID_SEL,
+ SPX5_TPID_B_CLASSIFIED);
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Invalid vlan proto");
+ err = -EINVAL;
+ }
+ return err;
+}
+
static int sparx5_tc_flower_replace(struct net_device *ndev,
struct flow_cls_offload *fco,
- struct vcap_admin *admin)
+ struct vcap_admin *admin,
+ bool ingress)
{
+ struct sparx5_psfp_sf sf = { .max_sdu = SPX5_PSFP_SF_MAX_SDU };
+ struct netlink_ext_ack *extack = fco->common.extack;
+ int err, idx, tc_sg_idx = -1, tc_pol_idx = -1;
+ struct vcap_tc_flower_parse_usage state = {
+ .fco = fco,
+ .l3_proto = ETH_P_ALL,
+ .admin = admin,
+ };
struct sparx5_port *port = netdev_priv(ndev);
struct sparx5_multiple_rules multi = {};
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_psfp_sg sg = { 0 };
+ struct sparx5_psfp_fm fm = { 0 };
struct flow_action_entry *act;
struct vcap_control *vctrl;
struct flow_rule *frule;
struct vcap_rule *vrule;
- u16 l3_proto;
- int err, idx;
vctrl = port->sparx5->vcap_ctrl;
- err = sparx5_tc_flower_action_check(vctrl, fco, admin);
+ err = sparx5_tc_flower_action_check(vctrl, ndev, fco, ingress);
if (err)
return err;
@@ -844,8 +1032,9 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
vrule->cookie = fco->cookie;
- l3_proto = ETH_P_ALL;
- err = sparx5_tc_use_dissectors(fco, admin, vrule, &l3_proto);
+ state.vrule = vrule;
+ state.frule = flow_cls_offload_flow_rule(fco);
+ err = sparx5_tc_use_dissectors(&state, admin, vrule);
if (err)
goto out;
@@ -853,38 +1042,69 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
if (err)
goto out;
+ err = sparx5_tc_add_rule_link_target(admin, vrule,
+ fco->common.chain_index);
+ if (err)
+ goto out;
+
frule = flow_cls_offload_flow_rule(fco);
flow_action_for_each(idx, act, &frule->action) {
switch (act->id) {
+ case FLOW_ACTION_GATE: {
+ err = sparx5_tc_flower_parse_act_gate(&sg, act, extack);
+ if (err < 0)
+ goto out;
+
+ tc_sg_idx = act->hw_index;
+
+ break;
+ }
+ case FLOW_ACTION_POLICE: {
+ err = sparx5_tc_flower_parse_act_police(&fm.pol, act,
+ extack);
+ if (err < 0)
+ goto out;
+
+ tc_pol_idx = fm.pol.idx;
+ sf.max_sdu = act->police.mtu;
+
+ break;
+ }
case FLOW_ACTION_TRAP:
- err = vcap_rule_add_action_bit(vrule,
- VCAP_AF_CPU_COPY_ENA,
- VCAP_BIT_1);
+ err = sparx5_tc_action_trap(admin, vrule, fco);
if (err)
goto out;
- err = vcap_rule_add_action_u32(vrule,
- VCAP_AF_CPU_QUEUE_NUM, 0);
+ break;
+ case FLOW_ACTION_ACCEPT:
+ err = sparx5_tc_set_actionset(admin, vrule);
if (err)
goto out;
- err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
- SPX5_PMM_REPLACE_ALL);
+ break;
+ case FLOW_ACTION_GOTO:
+ err = sparx5_tc_set_actionset(admin, vrule);
if (err)
goto out;
- /* For now the actionset is hardcoded */
- err = vcap_set_rule_set_actionset(vrule,
- VCAP_AFS_BASE_TYPE);
+ sparx5_tc_add_rule_link(vctrl, admin, vrule,
+ fco->common.chain_index,
+ act->chain_index);
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ err = sparx5_tc_action_vlan_pop(admin, vrule, fco,
+ state.tpid);
if (err)
goto out;
break;
- case FLOW_ACTION_ACCEPT:
- /* For now the actionset is hardcoded */
- err = vcap_set_rule_set_actionset(vrule,
- VCAP_AFS_BASE_TYPE);
+ case FLOW_ACTION_VLAN_PUSH:
+ err = sparx5_tc_action_vlan_push(admin, vrule, fco,
+ act, state.tpid);
if (err)
goto out;
break;
- case FLOW_ACTION_GOTO:
- /* Links between VCAPs will be added later */
+ case FLOW_ACTION_VLAN_MANGLE:
+ err = sparx5_tc_action_vlan_modify(admin, vrule, fco,
+ act, state.tpid);
+ if (err)
+ goto out;
break;
default:
NL_SET_ERR_MSG_MOD(fco->common.extack,
@@ -894,8 +1114,16 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
}
}
- err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin, l3_proto,
- &multi);
+ /* Setup PSFP */
+ if (tc_sg_idx >= 0 || tc_pol_idx >= 0) {
+ err = sparx5_tc_flower_psfp_setup(sparx5, vrule, tc_sg_idx,
+ tc_pol_idx, &sg, &fm, &sf);
+ if (err)
+ goto out;
+ }
+
+ err = sparx5_tc_select_protocol_keyset(ndev, vrule, admin,
+ state.l3_proto, &multi);
if (err) {
NL_SET_ERR_MSG_MOD(fco->common.extack,
"No matching port keyset for filter protocol and keys");
@@ -903,7 +1131,7 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
}
/* provide the l3 protocol to guide the keyset selection */
- err = vcap_val_rule(vrule, l3_proto);
+ err = vcap_val_rule(vrule, state.l3_proto);
if (err) {
vcap_set_tc_exterr(fco, vrule);
goto out;
@@ -913,7 +1141,7 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
NL_SET_ERR_MSG_MOD(fco->common.extack,
"Could not add the filter");
- if (l3_proto == ETH_P_ALL)
+ if (state.l3_proto == ETH_P_ALL)
err = sparx5_tc_add_remaining_rules(vctrl, fco, vrule, admin,
&multi);
@@ -922,19 +1150,86 @@ out:
return err;
}
+static void sparx5_tc_free_psfp_resources(struct sparx5 *sparx5,
+ struct vcap_rule *vrule)
+{
+ struct vcap_client_actionfield *afield;
+ u32 isdx, sfid, sgid, fmid;
+
+ /* Check if VCAP_AF_ISDX_VAL action is set for this rule - and if
+ * it is used for stream and/or flow-meter classification.
+ */
+ afield = vcap_find_actionfield(vrule, VCAP_AF_ISDX_VAL);
+ if (!afield)
+ return;
+
+ isdx = afield->data.u32.value;
+ sfid = sparx5_psfp_isdx_get_sf(sparx5, isdx);
+
+ if (!sfid)
+ return;
+
+ fmid = sparx5_psfp_isdx_get_fm(sparx5, isdx);
+ sgid = sparx5_psfp_sf_get_sg(sparx5, sfid);
+
+ if (fmid && sparx5_psfp_fm_del(sparx5, fmid) < 0)
+ pr_err("%s:%d Could not delete invalid fmid: %d", __func__,
+ __LINE__, fmid);
+
+ if (sgid && sparx5_psfp_sg_del(sparx5, sgid) < 0)
+ pr_err("%s:%d Could not delete invalid sgid: %d", __func__,
+ __LINE__, sgid);
+
+ if (sparx5_psfp_sf_del(sparx5, sfid) < 0)
+ pr_err("%s:%d Could not delete invalid sfid: %d", __func__,
+ __LINE__, sfid);
+
+ sparx5_isdx_conf_set(sparx5, isdx, 0, 0);
+}
+
+static int sparx5_tc_free_rule_resources(struct net_device *ndev,
+ struct vcap_control *vctrl,
+ int rule_id)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ struct vcap_rule *vrule;
+ int ret = 0;
+
+ vrule = vcap_get_rule(vctrl, rule_id);
+ if (!vrule || IS_ERR(vrule))
+ return -EINVAL;
+
+ sparx5_tc_free_psfp_resources(sparx5, vrule);
+
+ vcap_free_rule(vrule);
+ return ret;
+}
+
static int sparx5_tc_flower_destroy(struct net_device *ndev,
struct flow_cls_offload *fco,
struct vcap_admin *admin)
{
struct sparx5_port *port = netdev_priv(ndev);
+ int err = -ENOENT, count = 0, rule_id;
struct vcap_control *vctrl;
- int err = -ENOENT, rule_id;
vctrl = port->sparx5->vcap_ctrl;
while (true) {
rule_id = vcap_lookup_rule_by_cookie(vctrl, fco->cookie);
if (rule_id <= 0)
break;
+ if (count == 0) {
+ /* Resources are attached to the first rule of
+ * a set of rules. Only works if the rules are
+ * in the correct order.
+ */
+ err = sparx5_tc_free_rule_resources(ndev, vctrl,
+ rule_id);
+ if (err)
+ pr_err("%s:%d: could not free resources %d\n",
+ __func__, __LINE__, rule_id);
+ }
err = vcap_del_rule(vctrl, ndev, rule_id);
if (err) {
pr_err("%s:%d: could not delete rule %d\n",
@@ -945,44 +1240,21 @@ static int sparx5_tc_flower_destroy(struct net_device *ndev,
return err;
}
-/* Collect packet counts from all rules with the same cookie */
-static int sparx5_tc_rule_counter_cb(void *arg, struct vcap_rule *rule)
-{
- struct sparx5_tc_rule_pkt_cnt *rinfo = arg;
- struct vcap_counter counter;
- int err = 0;
-
- if (rule->cookie == rinfo->cookie) {
- err = vcap_rule_get_counter(rule, &counter);
- if (err)
- return err;
- rinfo->pkts += counter.value;
- /* Reset the rule counter */
- counter.value = 0;
- vcap_rule_set_counter(rule, &counter);
- }
- return err;
-}
-
static int sparx5_tc_flower_stats(struct net_device *ndev,
struct flow_cls_offload *fco,
struct vcap_admin *admin)
{
struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5_tc_rule_pkt_cnt rinfo = {};
+ struct vcap_counter ctr = {};
struct vcap_control *vctrl;
ulong lastused = 0;
- u64 drops = 0;
- u32 pkts = 0;
int err;
- rinfo.cookie = fco->cookie;
vctrl = port->sparx5->vcap_ctrl;
- err = vcap_rule_iter(vctrl, sparx5_tc_rule_counter_cb, &rinfo);
+ err = vcap_get_rule_count_by_cookie(vctrl, &ctr, fco->cookie);
if (err)
return err;
- pkts = rinfo.pkts;
- flow_stats_update(&fco->stats, 0x0, pkts, drops, lastused,
+ flow_stats_update(&fco->stats, 0x0, ctr.value, 0, lastused,
FLOW_ACTION_HW_STATS_IMMEDIATE);
return err;
}
@@ -1005,7 +1277,7 @@ int sparx5_tc_flower(struct net_device *ndev, struct flow_cls_offload *fco,
switch (fco->command) {
case FLOW_CLS_REPLACE:
- return sparx5_tc_flower_replace(ndev, fco, admin);
+ return sparx5_tc_flower_replace(ndev, fco, admin, ingress);
case FLOW_CLS_DESTROY:
return sparx5_tc_flower_destroy(ndev, fco, admin);
case FLOW_CLS_STATS:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
index 30dd61e5d150..d88a93f22606 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
@@ -31,6 +31,7 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev,
switch (action->id) {
case FLOW_ACTION_GOTO:
err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
+ tmo->common.chain_index,
action->chain_index, tmo->cookie,
true);
if (err == -EFAULT) {
@@ -43,6 +44,11 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev,
"VCAP already enabled");
return -EOPNOTSUPP;
}
+ if (err == -EADDRNOTAVAIL) {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Already matching this chain");
+ return -EOPNOTSUPP;
+ }
if (err) {
NL_SET_ERR_MSG_MOD(tmo->common.extack,
"Could not enable VCAP lookups");
@@ -66,8 +72,8 @@ static int sparx5_tc_matchall_destroy(struct net_device *ndev,
sparx5 = port->sparx5;
if (!tmo->rule && tmo->cookie) {
- err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev, 0,
- tmo->cookie, false);
+ err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
+ 0, 0, tmo->cookie, false);
if (err)
return err;
return 0;
@@ -80,12 +86,6 @@ int sparx5_tc_matchall(struct net_device *ndev,
struct tc_cls_matchall_offload *tmo,
bool ingress)
{
- if (!tc_cls_can_offload_and_chain0(ndev, &tmo->common)) {
- NL_SET_ERR_MSG_MOD(tmo->common.extack,
- "Only chain zero is supported");
- return -EOPNOTSUPP;
- }
-
switch (tmo->command) {
case TC_CLSMATCHALL_REPLACE:
return sparx5_tc_matchall_replace(ndev, tmo, ingress);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
index 1bd987c664e8..556d6ea0acd1 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_ag_api.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
* Microchip VCAP API
*/
-/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200.
- * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86
+/* This file is autogenerated by cml-utils 2023-02-10 11:15:56 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
*/
#include <linux/types.h>
@@ -14,6 +14,372 @@
#include "sparx5_vcap_ag_api.h"
/* keyfields */
+static const struct vcap_field is0_normal_7tuple_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 12,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 18,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 83,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 95,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 107,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 110,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 113,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 114,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 126,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 129,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 132,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 133,
+ .width = 12,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 145,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 193,
+ .width = 48,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 241,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 242,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 243,
+ .width = 16,
+ },
+ [VCAP_KF_IP_SNAP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 259,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 260,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 261,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 263,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 264,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 265,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 271,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 399,
+ .width = 128,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 527,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 528,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 529,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 545,
+ .width = 8,
+ },
+};
+
+static const struct vcap_field is0_normal_5tuple_ip4_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 2,
+ },
+ [VCAP_KF_LOOKUP_GEN_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 5,
+ .width = 12,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_KF_IF_IGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 19,
+ .width = 65,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 85,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 86,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 89,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 92,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID0] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 96,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 108,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 111,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI1] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 114,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID1] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 115,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_TPID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_PCP2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI2] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 133,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID2] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 134,
+ .width = 12,
+ },
+ [VCAP_KF_IP_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 148,
+ .width = 2,
+ },
+ [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 151,
+ .width = 1,
+ },
+ [VCAP_KF_L3_DSCP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 152,
+ .width = 6,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 158,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 190,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 222,
+ .width = 8,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 231,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 232,
+ .width = 8,
+ },
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 240,
+ .width = 32,
+ },
+};
+
static const struct vcap_field is2_mac_etype_keyfield[] = {
[VCAP_KF_TYPE] = {
.type = VCAP_FIELD_U32,
@@ -967,7 +1333,971 @@ static const struct vcap_field is2_ip_7tuple_keyfield[] = {
},
};
+static const struct vcap_field es0_isdx_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_IF_EGR_PORT_NO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 7,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 13,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_TPID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 27,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_PROT_ACTIVE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field es2_mac_etype_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 99,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 147,
+ .width = 48,
+ },
+ [VCAP_KF_ETYPE_LEN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 195,
+ .width = 1,
+ },
+ [VCAP_KF_ETYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 196,
+ .width = 16,
+ },
+ [VCAP_KF_L2_PAYLOAD_ETYPE] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 212,
+ .width = 64,
+ },
+ [VCAP_KF_OAM_CCM_CNTS_EQ0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 276,
+ .width = 1,
+ },
+ [VCAP_KF_OAM_Y1731_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 277,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_arp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 98,
+ .width = 48,
+ },
+ [VCAP_KF_ARP_ADDR_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 146,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_PROTO_SPACE_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 147,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_LEN_OK_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 148,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_TGT_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 149,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_SENDER_MATCH_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 150,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE_UNKNOWN_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 151,
+ .width = 1,
+ },
+ [VCAP_KF_ARP_OPCODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 152,
+ .width = 2,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 154,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 186,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 218,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 177,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 178,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 210,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 226,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 227,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 229,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 230,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 231,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 232,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 233,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 234,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip4_other_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_FRAGMENT_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 100,
+ .width = 2,
+ },
+ [VCAP_KF_L3_OPTIONS_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 102,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 103,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 104,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP4_DIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 112,
+ .width = 32,
+ },
+ [VCAP_KF_L3_IP4_SIP] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 144,
+ .width = 32,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 176,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 177,
+ .width = 8,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U112,
+ .offset = 185,
+ .width = 96,
+ },
+};
+
+static const struct vcap_field es2_ip_7tuple_keyfield[] = {
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 10,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 25,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 74,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 75,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 84,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 87,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 88,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 91,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 92,
+ .width = 1,
+ },
+ [VCAP_KF_L2_DMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 96,
+ .width = 48,
+ },
+ [VCAP_KF_L2_SMAC] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 144,
+ .width = 48,
+ },
+ [VCAP_KF_IP4_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 192,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 193,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TOS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 194,
+ .width = 8,
+ },
+ [VCAP_KF_L3_IP6_DIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 202,
+ .width = 128,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 330,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 458,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_UDP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 459,
+ .width = 1,
+ },
+ [VCAP_KF_TCP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 460,
+ .width = 1,
+ },
+ [VCAP_KF_L4_DPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 461,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 477,
+ .width = 16,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 493,
+ .width = 16,
+ },
+ [VCAP_KF_L4_SPORT_EQ_DPORT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 509,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SEQUENCE_EQ0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 510,
+ .width = 1,
+ },
+ [VCAP_KF_L4_FIN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 511,
+ .width = 1,
+ },
+ [VCAP_KF_L4_SYN] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 512,
+ .width = 1,
+ },
+ [VCAP_KF_L4_RST] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 513,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PSH] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 514,
+ .width = 1,
+ },
+ [VCAP_KF_L4_ACK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 515,
+ .width = 1,
+ },
+ [VCAP_KF_L4_URG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 516,
+ .width = 1,
+ },
+ [VCAP_KF_L4_PAYLOAD] = {
+ .type = VCAP_FIELD_U64,
+ .offset = 517,
+ .width = 64,
+ },
+};
+
+static const struct vcap_field es2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 100,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 229,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 237,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 253,
+ .width = 40,
+ },
+};
+
/* keyfield_set */
+static const struct vcap_set is0_keyfield_set[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = {
+ .type_id = 0,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
static const struct vcap_set is2_keyfield_set[] = {
[VCAP_KFS_MAC_ETYPE] = {
.type_id = 0,
@@ -1001,7 +2331,53 @@ static const struct vcap_set is2_keyfield_set[] = {
},
};
+static const struct vcap_set es0_keyfield_set[] = {
+ [VCAP_KFS_ISDX] = {
+ .type_id = 0,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_keyfield_set[] = {
+ [VCAP_KFS_MAC_ETYPE] = {
+ .type_id = 0,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_ARP] = {
+ .type_id = 1,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_TCP_UDP] = {
+ .type_id = 2,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP4_OTHER] = {
+ .type_id = 3,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+ [VCAP_KFS_IP_7TUPLE] = {
+ .type_id = -1,
+ .sw_per_item = 12,
+ .sw_cnt = 1,
+ },
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
+};
+
/* keyfield_set map */
+static const struct vcap_field *is0_keyfield_set_map[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield,
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = is0_normal_5tuple_ip4_keyfield,
+};
+
static const struct vcap_field *is2_keyfield_set_map[] = {
[VCAP_KFS_MAC_ETYPE] = is2_mac_etype_keyfield,
[VCAP_KFS_ARP] = is2_arp_keyfield,
@@ -1011,7 +2387,25 @@ static const struct vcap_field *is2_keyfield_set_map[] = {
[VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
};
+static const struct vcap_field *es0_keyfield_set_map[] = {
+ [VCAP_KFS_ISDX] = es0_isdx_keyfield,
+};
+
+static const struct vcap_field *es2_keyfield_set_map[] = {
+ [VCAP_KFS_MAC_ETYPE] = es2_mac_etype_keyfield,
+ [VCAP_KFS_ARP] = es2_arp_keyfield,
+ [VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield,
+ [VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield,
+ [VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP6_STD] = es2_ip6_std_keyfield,
+};
+
/* keyfield_set map sizes */
+static int is0_keyfield_set_map_size[] = {
+ [VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield),
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = ARRAY_SIZE(is0_normal_5tuple_ip4_keyfield),
+};
+
static int is2_keyfield_set_map_size[] = {
[VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(is2_mac_etype_keyfield),
[VCAP_KFS_ARP] = ARRAY_SIZE(is2_arp_keyfield),
@@ -1021,7 +2415,319 @@ static int is2_keyfield_set_map_size[] = {
[VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
};
+static int es0_keyfield_set_map_size[] = {
+ [VCAP_KFS_ISDX] = ARRAY_SIZE(es0_isdx_keyfield),
+};
+
+static int es2_keyfield_set_map_size[] = {
+ [VCAP_KFS_MAC_ETYPE] = ARRAY_SIZE(es2_mac_etype_keyfield),
+ [VCAP_KFS_ARP] = ARRAY_SIZE(es2_arp_keyfield),
+ [VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield),
+ [VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield),
+ [VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(es2_ip6_std_keyfield),
+};
+
/* actionfields */
+static const struct vcap_field is0_classification_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 12,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 13,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 16,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 21,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 9,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 39,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 68,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 69,
+ .width = 12,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 109,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 117,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 171,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 174,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_full_actionfield[] = {
+ [VCAP_AF_DSCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 1,
+ .width = 6,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 11,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 2,
+ },
+ [VCAP_AF_DEI_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 18,
+ .width = 1,
+ },
+ [VCAP_AF_DEI_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 19,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 20,
+ .width = 1,
+ },
+ [VCAP_AF_PCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 21,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 24,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 26,
+ .width = 3,
+ },
+ [VCAP_AF_MAP_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 9,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 38,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 67,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 68,
+ .width = 12,
+ },
+ [VCAP_AF_MASK_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 80,
+ .width = 3,
+ },
+ [VCAP_AF_PORT_MASK] = {
+ .type = VCAP_FIELD_U72,
+ .offset = 83,
+ .width = 65,
+ },
+ [VCAP_AF_PAG_OVERRIDE_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 204,
+ .width = 8,
+ },
+ [VCAP_AF_PAG_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 212,
+ .width = 8,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 298,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 301,
+ .width = 12,
+ },
+};
+
+static const struct vcap_field is0_class_reduced_actionfield[] = {
+ [VCAP_AF_TYPE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 5,
+ .width = 1,
+ },
+ [VCAP_AF_QOS_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 3,
+ },
+ [VCAP_AF_DP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 9,
+ .width = 1,
+ },
+ [VCAP_AF_DP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 10,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_LOOKUP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 12,
+ .width = 2,
+ },
+ [VCAP_AF_MAP_KEY] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_CLS_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 3,
+ },
+ [VCAP_AF_VID_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 23,
+ .width = 13,
+ },
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 46,
+ .width = 1,
+ },
+ [VCAP_AF_ISDX_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 47,
+ .width = 12,
+ },
+ [VCAP_AF_NXT_IDX_CTRL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 90,
+ .width = 3,
+ },
+ [VCAP_AF_NXT_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 93,
+ .width = 12,
+ },
+};
+
static const struct vcap_field is2_base_type_actionfield[] = {
[VCAP_AF_PIPELINE_FORCE_ENA] = {
.type = VCAP_FIELD_BIT,
@@ -1110,7 +2816,276 @@ static const struct vcap_field is2_base_type_actionfield[] = {
},
};
+static const struct vcap_field es0_es0_actionfield[] = {
+ [VCAP_AF_PUSH_OUTER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 2,
+ },
+ [VCAP_AF_PUSH_INNER_TAG] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 2,
+ .width = 1,
+ },
+ [VCAP_AF_TAG_A_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 3,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 6,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_A_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 8,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_A_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 11,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 14,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 17,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_B_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 19,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_B_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 22,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_TPID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 25,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_PCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 28,
+ .width = 3,
+ },
+ [VCAP_AF_TAG_C_DEI_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 31,
+ .width = 3,
+ },
+ [VCAP_AF_VID_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 34,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_A_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 46,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_A_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 49,
+ .width = 1,
+ },
+ [VCAP_AF_VID_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 50,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_B_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 62,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_B_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 65,
+ .width = 1,
+ },
+ [VCAP_AF_VID_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 66,
+ .width = 12,
+ },
+ [VCAP_AF_PCP_C_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 3,
+ },
+ [VCAP_AF_DEI_C_VAL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 81,
+ .width = 1,
+ },
+ [VCAP_AF_POP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 82,
+ .width = 2,
+ },
+ [VCAP_AF_UNTAG_VID_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 84,
+ .width = 1,
+ },
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 85,
+ .width = 2,
+ },
+ [VCAP_AF_TAG_C_VID_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 2,
+ },
+ [VCAP_AF_DSCP_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 127,
+ .width = 3,
+ },
+ [VCAP_AF_DSCP_VAL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 130,
+ .width = 6,
+ },
+ [VCAP_AF_ESDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 323,
+ .width = 13,
+ },
+ [VCAP_AF_FWD_SEL] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 459,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_QU] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 461,
+ .width = 3,
+ },
+ [VCAP_AF_PIPELINE_PT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 464,
+ .width = 2,
+ },
+ [VCAP_AF_PIPELINE_ACT] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 466,
+ .width = 1,
+ },
+ [VCAP_AF_SWAP_MACS_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 475,
+ .width = 1,
+ },
+ [VCAP_AF_LOOP_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 476,
+ .width = 1,
+ },
+};
+
+static const struct vcap_field es2_base_type_actionfield[] = {
+ [VCAP_AF_HIT_ME_ONCE] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 0,
+ .width = 1,
+ },
+ [VCAP_AF_INTR_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 1,
+ .width = 1,
+ },
+ [VCAP_AF_FWD_MODE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 2,
+ .width = 2,
+ },
+ [VCAP_AF_COPY_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 4,
+ .width = 16,
+ },
+ [VCAP_AF_COPY_PORT_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 20,
+ .width = 7,
+ },
+ [VCAP_AF_MIRROR_PROBE_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 27,
+ .width = 2,
+ },
+ [VCAP_AF_CPU_COPY_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 29,
+ .width = 1,
+ },
+ [VCAP_AF_CPU_QUEUE_NUM] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 30,
+ .width = 3,
+ },
+ [VCAP_AF_POLICE_ENA] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 33,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_REMARK] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 34,
+ .width = 1,
+ },
+ [VCAP_AF_POLICE_IDX] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 35,
+ .width = 6,
+ },
+ [VCAP_AF_ES2_REW_CMD] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 41,
+ .width = 3,
+ },
+ [VCAP_AF_CNT_ID] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 44,
+ .width = 11,
+ },
+ [VCAP_AF_IGNORE_PIPELINE_CTRL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 55,
+ .width = 1,
+ },
+};
+
/* actionfield_set */
+static const struct vcap_set is0_actionfield_set[] = {
+ [VCAP_AFS_CLASSIFICATION] = {
+ .type_id = 1,
+ .sw_per_item = 2,
+ .sw_cnt = 6,
+ },
+ [VCAP_AFS_FULL] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+ [VCAP_AFS_CLASS_REDUCED] = {
+ .type_id = 1,
+ .sw_per_item = 1,
+ .sw_cnt = 12,
+ },
+};
+
static const struct vcap_set is2_actionfield_set[] = {
[VCAP_AFS_BASE_TYPE] = {
.type_id = -1,
@@ -1119,17 +3094,196 @@ static const struct vcap_set is2_actionfield_set[] = {
},
};
+static const struct vcap_set es0_actionfield_set[] = {
+ [VCAP_AFS_ES0] = {
+ .type_id = -1,
+ .sw_per_item = 1,
+ .sw_cnt = 1,
+ },
+};
+
+static const struct vcap_set es2_actionfield_set[] = {
+ [VCAP_AFS_BASE_TYPE] = {
+ .type_id = -1,
+ .sw_per_item = 3,
+ .sw_cnt = 4,
+ },
+};
+
/* actionfield_set map */
+static const struct vcap_field *is0_actionfield_set_map[] = {
+ [VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield,
+ [VCAP_AFS_FULL] = is0_full_actionfield,
+ [VCAP_AFS_CLASS_REDUCED] = is0_class_reduced_actionfield,
+};
+
static const struct vcap_field *is2_actionfield_set_map[] = {
[VCAP_AFS_BASE_TYPE] = is2_base_type_actionfield,
};
+static const struct vcap_field *es0_actionfield_set_map[] = {
+ [VCAP_AFS_ES0] = es0_es0_actionfield,
+};
+
+static const struct vcap_field *es2_actionfield_set_map[] = {
+ [VCAP_AFS_BASE_TYPE] = es2_base_type_actionfield,
+};
+
/* actionfield_set map size */
+static int is0_actionfield_set_map_size[] = {
+ [VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield),
+ [VCAP_AFS_FULL] = ARRAY_SIZE(is0_full_actionfield),
+ [VCAP_AFS_CLASS_REDUCED] = ARRAY_SIZE(is0_class_reduced_actionfield),
+};
+
static int is2_actionfield_set_map_size[] = {
[VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(is2_base_type_actionfield),
};
+static int es0_actionfield_set_map_size[] = {
+ [VCAP_AFS_ES0] = ARRAY_SIZE(es0_es0_actionfield),
+};
+
+static int es2_actionfield_set_map_size[] = {
+ [VCAP_AFS_BASE_TYPE] = ARRAY_SIZE(es2_base_type_actionfield),
+};
+
/* Type Groups */
+static const struct vcap_typegroup is0_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 5,
+ .value = 16,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 4,
+ .value = 0,
+ },
+ {
+ .offset = 364,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 416,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 520,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 572,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 4,
+ .value = 8,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 156,
+ .width = 3,
+ .value = 0,
+ },
+ {
+ .offset = 208,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 260,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x3_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 52,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 104,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 52,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
static const struct vcap_typegroup is2_x12_keyfield_set_typegroups[] = {
{
.offset = 0,
@@ -1176,6 +3330,70 @@ static const struct vcap_typegroup is2_x1_keyfield_set_typegroups[] = {
{}
};
+static const struct vcap_typegroup es0_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x12_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 312,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 468,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x6_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 156,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_keyfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_keyfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_keyfield_set_typegroups[] = {
+ [12] = is0_x12_keyfield_set_typegroups,
+ [6] = is0_x6_keyfield_set_typegroups,
+ [3] = is0_x3_keyfield_set_typegroups,
+ [2] = is0_x2_keyfield_set_typegroups,
+ [1] = is0_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
[12] = is2_x12_keyfield_set_typegroups,
[6] = is2_x6_keyfield_set_typegroups,
@@ -1184,6 +3402,61 @@ static const struct vcap_typegroup *is2_keyfield_set_typegroups[] = {
[13] = NULL,
};
+static const struct vcap_typegroup *es0_keyfield_set_typegroups[] = {
+ [1] = es0_x1_keyfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_keyfield_set_typegroups[] = {
+ [12] = es2_x12_keyfield_set_typegroups,
+ [6] = es2_x6_keyfield_set_typegroups,
+ [3] = es2_x3_keyfield_set_typegroups,
+ [1] = es2_x1_keyfield_set_typegroups,
+ [13] = NULL,
+};
+
+static const struct vcap_typegroup is0_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 3,
+ .value = 4,
+ },
+ {
+ .offset = 110,
+ .width = 2,
+ .value = 0,
+ },
+ {
+ .offset = 220,
+ .width = 2,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x2_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 110,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup is0_x1_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 1,
+ .value = 1,
+ },
+ {}
+};
+
static const struct vcap_typegroup is2_x3_actionfield_set_typegroups[] = {
{
.offset = 0,
@@ -1207,36 +3480,122 @@ static const struct vcap_typegroup is2_x1_actionfield_set_typegroups[] = {
{}
};
+static const struct vcap_typegroup es0_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup es2_x3_actionfield_set_typegroups[] = {
+ {
+ .offset = 0,
+ .width = 2,
+ .value = 2,
+ },
+ {
+ .offset = 21,
+ .width = 1,
+ .value = 0,
+ },
+ {
+ .offset = 42,
+ .width = 1,
+ .value = 0,
+ },
+ {}
+};
+
+static const struct vcap_typegroup es2_x1_actionfield_set_typegroups[] = {
+ {}
+};
+
+static const struct vcap_typegroup *is0_actionfield_set_typegroups[] = {
+ [3] = is0_x3_actionfield_set_typegroups,
+ [2] = is0_x2_actionfield_set_typegroups,
+ [1] = is0_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
static const struct vcap_typegroup *is2_actionfield_set_typegroups[] = {
[3] = is2_x3_actionfield_set_typegroups,
[1] = is2_x1_actionfield_set_typegroups,
[13] = NULL,
};
+static const struct vcap_typegroup *es0_actionfield_set_typegroups[] = {
+ [1] = es0_x1_actionfield_set_typegroups,
+ [2] = NULL,
+};
+
+static const struct vcap_typegroup *es2_actionfield_set_typegroups[] = {
+ [3] = es2_x3_actionfield_set_typegroups,
+ [1] = es2_x1_actionfield_set_typegroups,
+ [13] = NULL,
+};
+
/* Keyfieldset names */
static const char * const vcap_keyfield_set_names[] = {
[VCAP_KFS_NO_VALUE] = "(None)",
[VCAP_KFS_ARP] = "VCAP_KFS_ARP",
+ [VCAP_KFS_ETAG] = "VCAP_KFS_ETAG",
[VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
[VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
+ [VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
[VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
+ [VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
[VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
+ [VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
[VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
+ [VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
+ [VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
+ [VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
};
/* Actionfieldset names */
static const char * const vcap_actionfield_set_names[] = {
[VCAP_AFS_NO_VALUE] = "(None)",
[VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
+ [VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
+ [VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_ES0] = "VCAP_AFS_ES0",
+ [VCAP_AFS_FULL] = "VCAP_AFS_FULL",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
};
/* Keyfield names */
static const char * const vcap_keyfield_names[] = {
[VCAP_KF_NO_VALUE] = "(None)",
+ [VCAP_KF_8021BR_ECID_BASE] = "8021BR_ECID_BASE",
+ [VCAP_KF_8021BR_ECID_EXT] = "8021BR_ECID_EXT",
+ [VCAP_KF_8021BR_E_TAGGED] = "8021BR_E_TAGGED",
+ [VCAP_KF_8021BR_GRP] = "8021BR_GRP",
+ [VCAP_KF_8021BR_IGR_ECID_BASE] = "8021BR_IGR_ECID_BASE",
+ [VCAP_KF_8021BR_IGR_ECID_EXT] = "8021BR_IGR_ECID_EXT",
+ [VCAP_KF_8021Q_DEI0] = "8021Q_DEI0",
+ [VCAP_KF_8021Q_DEI1] = "8021Q_DEI1",
+ [VCAP_KF_8021Q_DEI2] = "8021Q_DEI2",
[VCAP_KF_8021Q_DEI_CLS] = "8021Q_DEI_CLS",
+ [VCAP_KF_8021Q_PCP0] = "8021Q_PCP0",
+ [VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
+ [VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
[VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID] = "8021Q_TPID",
+ [VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
+ [VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
+ [VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
+ [VCAP_KF_8021Q_VID0] = "8021Q_VID0",
+ [VCAP_KF_8021Q_VID1] = "8021Q_VID1",
+ [VCAP_KF_8021Q_VID2] = "8021Q_VID2",
[VCAP_KF_8021Q_VID_CLS] = "8021Q_VID_CLS",
[VCAP_KF_8021Q_VLAN_TAGGED_IS] = "8021Q_VLAN_TAGGED_IS",
+ [VCAP_KF_8021Q_VLAN_TAGS] = "8021Q_VLAN_TAGS",
+ [VCAP_KF_ACL_GRP_ID] = "ACL_GRP_ID",
[VCAP_KF_ARP_ADDR_SPACE_OK_IS] = "ARP_ADDR_SPACE_OK_IS",
[VCAP_KF_ARP_LEN_OK_IS] = "ARP_LEN_OK_IS",
[VCAP_KF_ARP_OPCODE] = "ARP_OPCODE",
@@ -1244,25 +3603,46 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_ARP_PROTO_SPACE_OK_IS] = "ARP_PROTO_SPACE_OK_IS",
[VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
[VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
+ [VCAP_KF_COSID_CLS] = "COSID_CLS",
+ [VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
[VCAP_KF_ETYPE] = "ETYPE",
[VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
+ [VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_EGR_PORT_NO] = "IF_EGR_PORT_NO",
+ [VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
[VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
[VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
[VCAP_KF_IF_IGR_PORT_MASK_RNG] = "IF_IGR_PORT_MASK_RNG",
[VCAP_KF_IF_IGR_PORT_MASK_SEL] = "IF_IGR_PORT_MASK_SEL",
+ [VCAP_KF_IF_IGR_PORT_SEL] = "IF_IGR_PORT_SEL",
[VCAP_KF_IP4_IS] = "IP4_IS",
+ [VCAP_KF_IP_MC_IS] = "IP_MC_IS",
+ [VCAP_KF_IP_PAYLOAD_5TUPLE] = "IP_PAYLOAD_5TUPLE",
+ [VCAP_KF_IP_SNAP_IS] = "IP_SNAP_IS",
[VCAP_KF_ISDX_CLS] = "ISDX_CLS",
[VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
[VCAP_KF_L2_BC_IS] = "L2_BC_IS",
[VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
[VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
[VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
[VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
[VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
[VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
+ [VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
+ [VCAP_KF_L3_DSCP] = "L3_DSCP",
[VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
[VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
[VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
[VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
[VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
[VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
@@ -1273,6 +3653,8 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_L3_RT_IS] = "L3_RT_IS",
[VCAP_KF_L3_TOS] = "L3_TOS",
[VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
[VCAP_KF_L4_ACK] = "L4_ACK",
[VCAP_KF_L4_DPORT] = "L4_DPORT",
[VCAP_KF_L4_FIN] = "L4_FIN",
@@ -1286,9 +3668,19 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_L4_SYN] = "L4_SYN",
[VCAP_KF_L4_URG] = "L4_URG",
[VCAP_KF_LOOKUP_FIRST_IS] = "LOOKUP_FIRST_IS",
+ [VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
+ [VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
[VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
[VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
+ [VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
[VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
+ [VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
[VCAP_KF_TCP_IS] = "TCP_IS",
[VCAP_KF_TCP_UDP_IS] = "TCP_UDP_IS",
[VCAP_KF_TYPE] = "TYPE",
@@ -1297,27 +3689,116 @@ static const char * const vcap_keyfield_names[] = {
/* Actionfield names */
static const char * const vcap_actionfield_names[] = {
[VCAP_AF_NO_VALUE] = "(None)",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
+ [VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
[VCAP_AF_CNT_ID] = "CNT_ID",
+ [VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
+ [VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
[VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
+ [VCAP_AF_CPU_QU] = "CPU_QU",
[VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
+ [VCAP_AF_DEI_A_VAL] = "DEI_A_VAL",
+ [VCAP_AF_DEI_B_VAL] = "DEI_B_VAL",
+ [VCAP_AF_DEI_C_VAL] = "DEI_C_VAL",
+ [VCAP_AF_DEI_ENA] = "DEI_ENA",
+ [VCAP_AF_DEI_VAL] = "DEI_VAL",
+ [VCAP_AF_DP_ENA] = "DP_ENA",
+ [VCAP_AF_DP_VAL] = "DP_VAL",
+ [VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_SEL] = "DSCP_SEL",
+ [VCAP_AF_DSCP_VAL] = "DSCP_VAL",
+ [VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
+ [VCAP_AF_ESDX] = "ESDX",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
+ [VCAP_AF_FWD_MODE] = "FWD_MODE",
+ [VCAP_AF_FWD_SEL] = "FWD_SEL",
[VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
[VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
[VCAP_AF_INTR_ENA] = "INTR_ENA",
+ [VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
+ [VCAP_AF_ISDX_VAL] = "ISDX_VAL",
+ [VCAP_AF_LOOP_ENA] = "LOOP_ENA",
[VCAP_AF_LRN_DIS] = "LRN_DIS",
+ [VCAP_AF_MAP_IDX] = "MAP_IDX",
+ [VCAP_AF_MAP_KEY] = "MAP_KEY",
+ [VCAP_AF_MAP_LOOKUP_SEL] = "MAP_LOOKUP_SEL",
[VCAP_AF_MASK_MODE] = "MASK_MODE",
[VCAP_AF_MATCH_ID] = "MATCH_ID",
[VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
[VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
+ [VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
+ [VCAP_AF_NXT_IDX] = "NXT_IDX",
+ [VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
+ [VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
+ [VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_A_VAL] = "PCP_A_VAL",
+ [VCAP_AF_PCP_B_VAL] = "PCP_B_VAL",
+ [VCAP_AF_PCP_C_VAL] = "PCP_C_VAL",
+ [VCAP_AF_PCP_ENA] = "PCP_ENA",
+ [VCAP_AF_PCP_VAL] = "PCP_VAL",
+ [VCAP_AF_PIPELINE_ACT] = "PIPELINE_ACT",
[VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
[VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
[VCAP_AF_POLICE_ENA] = "POLICE_ENA",
[VCAP_AF_POLICE_IDX] = "POLICE_IDX",
+ [VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_POP_VAL] = "POP_VAL",
[VCAP_AF_PORT_MASK] = "PORT_MASK",
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = "PUSH_CUSTOMER_TAG",
+ [VCAP_AF_PUSH_INNER_TAG] = "PUSH_INNER_TAG",
+ [VCAP_AF_PUSH_OUTER_TAG] = "PUSH_OUTER_TAG",
+ [VCAP_AF_QOS_ENA] = "QOS_ENA",
+ [VCAP_AF_QOS_VAL] = "QOS_VAL",
+ [VCAP_AF_REW_OP] = "REW_OP",
[VCAP_AF_RT_DIS] = "RT_DIS",
+ [VCAP_AF_SWAP_MACS_ENA] = "SWAP_MACS_ENA",
+ [VCAP_AF_TAG_A_DEI_SEL] = "TAG_A_DEI_SEL",
+ [VCAP_AF_TAG_A_PCP_SEL] = "TAG_A_PCP_SEL",
+ [VCAP_AF_TAG_A_TPID_SEL] = "TAG_A_TPID_SEL",
+ [VCAP_AF_TAG_A_VID_SEL] = "TAG_A_VID_SEL",
+ [VCAP_AF_TAG_B_DEI_SEL] = "TAG_B_DEI_SEL",
+ [VCAP_AF_TAG_B_PCP_SEL] = "TAG_B_PCP_SEL",
+ [VCAP_AF_TAG_B_TPID_SEL] = "TAG_B_TPID_SEL",
+ [VCAP_AF_TAG_B_VID_SEL] = "TAG_B_VID_SEL",
+ [VCAP_AF_TAG_C_DEI_SEL] = "TAG_C_DEI_SEL",
+ [VCAP_AF_TAG_C_PCP_SEL] = "TAG_C_PCP_SEL",
+ [VCAP_AF_TAG_C_TPID_SEL] = "TAG_C_TPID_SEL",
+ [VCAP_AF_TAG_C_VID_SEL] = "TAG_C_VID_SEL",
+ [VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_UNTAG_VID_ENA] = "UNTAG_VID_ENA",
+ [VCAP_AF_VID_A_VAL] = "VID_A_VAL",
+ [VCAP_AF_VID_B_VAL] = "VID_B_VAL",
+ [VCAP_AF_VID_C_VAL] = "VID_C_VAL",
+ [VCAP_AF_VID_VAL] = "VID_VAL",
};
/* VCAPs */
const struct vcap_info sparx5_vcaps[] = {
+ [VCAP_TYPE_IS0] = {
+ .name = "is0",
+ .rows = 1024,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 110,
+ .default_cnt = 140,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = is0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(is0_keyfield_set),
+ .actionfield_set = is0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(is0_actionfield_set),
+ .keyfield_set_map = is0_keyfield_set_map,
+ .keyfield_set_map_size = is0_keyfield_set_map_size,
+ .actionfield_set_map = is0_actionfield_set_map,
+ .actionfield_set_map_size = is0_actionfield_set_map_size,
+ .keyfield_set_typegroups = is0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = is0_actionfield_set_typegroups,
+ },
[VCAP_TYPE_IS2] = {
.name = "is2",
.rows = 256,
@@ -1339,11 +3820,53 @@ const struct vcap_info sparx5_vcaps[] = {
.keyfield_set_typegroups = is2_keyfield_set_typegroups,
.actionfield_set_typegroups = is2_actionfield_set_typegroups,
},
+ [VCAP_TYPE_ES0] = {
+ .name = "es0",
+ .rows = 4096,
+ .sw_count = 1,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 489,
+ .default_cnt = 70,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es0_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es0_keyfield_set),
+ .actionfield_set = es0_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es0_actionfield_set),
+ .keyfield_set_map = es0_keyfield_set_map,
+ .keyfield_set_map_size = es0_keyfield_set_map_size,
+ .actionfield_set_map = es0_actionfield_set_map,
+ .actionfield_set_map_size = es0_actionfield_set_map_size,
+ .keyfield_set_typegroups = es0_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es0_actionfield_set_typegroups,
+ },
+ [VCAP_TYPE_ES2] = {
+ .name = "es2",
+ .rows = 1024,
+ .sw_count = 12,
+ .sw_width = 52,
+ .sticky_width = 1,
+ .act_width = 21,
+ .default_cnt = 74,
+ .require_cnt_dis = 0,
+ .version = 1,
+ .keyfield_set = es2_keyfield_set,
+ .keyfield_set_size = ARRAY_SIZE(es2_keyfield_set),
+ .actionfield_set = es2_actionfield_set,
+ .actionfield_set_size = ARRAY_SIZE(es2_actionfield_set),
+ .keyfield_set_map = es2_keyfield_set_map,
+ .keyfield_set_map_size = es2_keyfield_set_map_size,
+ .actionfield_set_map = es2_actionfield_set_map,
+ .actionfield_set_map_size = es2_actionfield_set_map_size,
+ .keyfield_set_typegroups = es2_keyfield_set_typegroups,
+ .actionfield_set_typegroups = es2_actionfield_set_typegroups,
+ },
};
const struct vcap_statistics sparx5_vcap_stats = {
.name = "sparx5",
- .count = 1,
+ .count = 4,
.keyfield_set_names = vcap_keyfield_set_names,
.actionfield_set_names = vcap_actionfield_set_names,
.keyfield_names = vcap_keyfield_names,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
index b91e05ffe2f4..07b472c84a47 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_debugfs.c
@@ -13,10 +13,113 @@
#include "sparx5_vcap_impl.h"
#include "sparx5_vcap_ag_api.h"
-static void sparx5_vcap_port_keys(struct sparx5 *sparx5,
- struct vcap_admin *admin,
- struct sparx5_port *port,
- struct vcap_output_print *out)
+static const char *sparx5_vcap_is0_etype_str(u32 value)
+{
+ switch (value) {
+ case VCAP_IS0_PS_ETYPE_DEFAULT:
+ return "default";
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ return "normal_7tuple";
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ return "normal_5tuple_ip4";
+ case VCAP_IS0_PS_ETYPE_MLL:
+ return "mll";
+ case VCAP_IS0_PS_ETYPE_LL_FULL:
+ return "ll_full";
+ case VCAP_IS0_PS_ETYPE_PURE_5TUPLE_IP4:
+ return "pure_5tuple_ip4";
+ case VCAP_IS0_PS_ETYPE_ETAG:
+ return "etag";
+ case VCAP_IS0_PS_ETYPE_NO_LOOKUP:
+ return "no lookup";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *sparx5_vcap_is0_mpls_str(u32 value)
+{
+ switch (value) {
+ case VCAP_IS0_PS_MPLS_FOLLOW_ETYPE:
+ return "follow_etype";
+ case VCAP_IS0_PS_MPLS_NORMAL_7TUPLE:
+ return "normal_7tuple";
+ case VCAP_IS0_PS_MPLS_NORMAL_5TUPLE_IP4:
+ return "normal_5tuple_ip4";
+ case VCAP_IS0_PS_MPLS_MLL:
+ return "mll";
+ case VCAP_IS0_PS_MPLS_LL_FULL:
+ return "ll_full";
+ case VCAP_IS0_PS_MPLS_PURE_5TUPLE_IP4:
+ return "pure_5tuple_ip4";
+ case VCAP_IS0_PS_MPLS_ETAG:
+ return "etag";
+ case VCAP_IS0_PS_MPLS_NO_LOOKUP:
+ return "no lookup";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *sparx5_vcap_is0_mlbs_str(u32 value)
+{
+ switch (value) {
+ case VCAP_IS0_PS_MLBS_FOLLOW_ETYPE:
+ return "follow_etype";
+ case VCAP_IS0_PS_MLBS_NO_LOOKUP:
+ return "no lookup";
+ default:
+ return "unknown";
+ }
+}
+
+static void sparx5_vcap_is0_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value, val;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5,
+ ANA_CL_ADV_CL_CFG(port->portno, lookup));
+ out->prf(out->dst, "\n state: ");
+ if (ANA_CL_ADV_CL_CFG_LOOKUP_ENA_GET(value))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+ val = ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n etype: %s",
+ sparx5_vcap_is0_etype_str(val));
+ val = ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n ipv4: %s",
+ sparx5_vcap_is0_etype_str(val));
+ val = ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n ipv6: %s",
+ sparx5_vcap_is0_etype_str(val));
+ val = ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n mpls_uc: %s",
+ sparx5_vcap_is0_mpls_str(val));
+ val = ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n mpls_mc: %s",
+ sparx5_vcap_is0_mpls_str(val));
+ val = ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_GET(value);
+ out->prf(out->dst, "\n mlbs: %s",
+ sparx5_vcap_is0_mlbs_str(val));
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_is2_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
{
int lookup;
u32 value;
@@ -29,7 +132,7 @@ static void sparx5_vcap_port_keys(struct sparx5 *sparx5,
/* Get lookup state */
value = spx5_rd(sparx5, ANA_ACL_VCAP_S2_CFG(port->portno));
out->prf(out->dst, "\n state: ");
- if (ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(value))
+ if (ANA_ACL_VCAP_S2_CFG_SEC_ENA_GET(value) & BIT(lookup))
out->prf(out->dst, "on");
else
out->prf(out->dst, "off");
@@ -126,9 +229,9 @@ static void sparx5_vcap_port_keys(struct sparx5 *sparx5,
out->prf(out->dst, "\n");
}
-static void sparx5_vcap_port_stickies(struct sparx5 *sparx5,
- struct vcap_admin *admin,
- struct vcap_output_print *out)
+static void sparx5_vcap_is2_port_stickies(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
{
int lookup;
u32 value;
@@ -181,6 +284,157 @@ static void sparx5_vcap_port_stickies(struct sparx5 *sparx5,
out->prf(out->dst, "\n");
}
+static void sparx5_vcap_es0_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ u32 value;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ out->prf(out->dst, "\n Lookup 0: ");
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5, REW_ES0_CTRL);
+ out->prf(out->dst, "\n state: ");
+ if (REW_ES0_CTRL_ES0_LU_ENA_GET(value))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ out->prf(out->dst, "\n keyset: ");
+ value = spx5_rd(sparx5, REW_RTAG_ETAG_CTRL(port->portno));
+ switch (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_GET(value)) {
+ case VCAP_ES0_PS_NORMAL_SELECTION:
+ out->prf(out->dst, "normal");
+ break;
+ case VCAP_ES0_PS_FORCE_ISDX_LOOKUPS:
+ out->prf(out->dst, "isdx");
+ break;
+ case VCAP_ES0_PS_FORCE_VID_LOOKUPS:
+ out->prf(out->dst, "vid");
+ break;
+ case VCAP_ES0_PS_RESERVED:
+ out->prf(out->dst, "reserved");
+ break;
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_es2_port_keys(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct sparx5_port *port,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " port[%02d] (%s): ", port->portno,
+ netdev_name(port->ndev));
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+
+ /* Get lookup state */
+ value = spx5_rd(sparx5, EACL_VCAP_ES2_KEY_SEL(port->portno,
+ lookup));
+ out->prf(out->dst, "\n state: ");
+ if (EACL_VCAP_ES2_KEY_SEL_KEY_ENA_GET(value))
+ out->prf(out->dst, "on");
+ else
+ out->prf(out->dst, "off");
+
+ out->prf(out->dst, "\n arp: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_ARP_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_ARP_ARP:
+ out->prf(out->dst, "arp");
+ break;
+ }
+ out->prf(out->dst, "\n ipv4: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV4_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_IPV4_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID:
+ out->prf(out->dst, "ip4_tcp_udp ip4_vid");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER:
+ out->prf(out->dst, "ip4_tcp_udp ip4_other");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_VID:
+ out->prf(out->dst, "ip4_vid");
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_OTHER:
+ out->prf(out->dst, "ip4_other");
+ break;
+ }
+ out->prf(out->dst, "\n ipv6: ");
+ switch (EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV6_MAC_ETYPE:
+ out->prf(out->dst, "mac_etype");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE:
+ out->prf(out->dst, "ip_7tuple");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_VID:
+ out->prf(out->dst, "ip_7tuple ip6_vid");
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_STD:
+ out->prf(out->dst, "ip_7tuple ip6_std");
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_VID:
+ out->prf(out->dst, "ip6_vid");
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_STD:
+ out->prf(out->dst, "ip6_std");
+ break;
+ case VCAP_ES2_PS_IPV6_IP4_DOWNGRADE:
+ out->prf(out->dst, "ip4_downgrade");
+ break;
+ }
+ }
+ out->prf(out->dst, "\n");
+}
+
+static void sparx5_vcap_es2_port_stickies(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ struct vcap_output_print *out)
+{
+ int lookup;
+ u32 value;
+
+ out->prf(out->dst, " Sticky bits: ");
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ value = spx5_rd(sparx5, EACL_SEC_LOOKUP_STICKY(lookup));
+ out->prf(out->dst, "\n Lookup %d: ", lookup);
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP_7TUPLE_STICKY_GET(value))
+ out->prf(out->dst, " ip_7tuple");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip6_vid");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP6_STD_STICKY_GET(value))
+ out->prf(out->dst, " ip6_std");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_TCPUDP_STICKY_GET(value))
+ out->prf(out->dst, " ip4_tcp_udp");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_VID_STICKY_GET(value))
+ out->prf(out->dst, " ip4_vid");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_IP4_OTHER_STICKY_GET(value))
+ out->prf(out->dst, " ip4_other");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_ARP_STICKY_GET(value))
+ out->prf(out->dst, " arp");
+ if (EACL_SEC_LOOKUP_STICKY_SEC_TYPE_MAC_ETYPE_STICKY_GET(value))
+ out->prf(out->dst, " mac_etype");
+ /* Clear stickies */
+ spx5_wr(value, sparx5, EACL_SEC_LOOKUP_STICKY(lookup));
+ }
+ out->prf(out->dst, "\n");
+}
+
/* Provide port information via a callback interface */
int sparx5_port_info(struct net_device *ndev,
struct vcap_admin *admin,
@@ -194,7 +448,24 @@ int sparx5_port_info(struct net_device *ndev,
vctrl = sparx5->vcap_ctrl;
vcap = &vctrl->vcaps[admin->vtype];
out->prf(out->dst, "%s:\n", vcap->name);
- sparx5_vcap_port_keys(sparx5, admin, port, out);
- sparx5_vcap_port_stickies(sparx5, admin, out);
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_port_keys(sparx5, admin, port, out);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_port_keys(sparx5, admin, port, out);
+ sparx5_vcap_is2_port_stickies(sparx5, admin, out);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_port_keys(sparx5, admin, port, out);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_port_keys(sparx5, admin, port, out);
+ sparx5_vcap_es2_port_stickies(sparx5, admin, out);
+ break;
+ default:
+ out->prf(out->dst, " no info\n");
+ break;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
index a0c126ba9a87..d0d4e0385ac7 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -27,6 +27,28 @@
ANA_ACL_VCAP_S2_KEY_SEL_IP6_UC_KEY_SEL_SET(_v6_uc) | \
ANA_ACL_VCAP_S2_KEY_SEL_ARP_KEY_SEL_SET(_arp))
+#define SPARX5_IS0_LOOKUPS 6
+#define VCAP_IS0_KEYSEL(_ena, _etype, _ipv4, _ipv6, _mpls_uc, _mpls_mc, _mlbs) \
+ (ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(_ena) | \
+ ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_SET(_etype) | \
+ ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_SET(_ipv4) | \
+ ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_SET(_ipv6) | \
+ ANA_CL_ADV_CL_CFG_MPLS_UC_CLM_KEY_SEL_SET(_mpls_uc) | \
+ ANA_CL_ADV_CL_CFG_MPLS_MC_CLM_KEY_SEL_SET(_mpls_mc) | \
+ ANA_CL_ADV_CL_CFG_MLBS_CLM_KEY_SEL_SET(_mlbs))
+
+#define SPARX5_ES0_LOOKUPS 1
+#define VCAP_ES0_KEYSEL(_key) (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_SET(_key))
+#define SPARX5_STAT_ESDX_GRN_PKTS 0x300
+#define SPARX5_STAT_ESDX_YEL_PKTS 0x301
+
+#define SPARX5_ES2_LOOKUPS 2
+#define VCAP_ES2_KEYSEL(_ena, _arp, _ipv4, _ipv6) \
+ (EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(_ena) | \
+ EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_SET(_arp) | \
+ EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_SET(_ipv4) | \
+ EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_SET(_ipv6))
+
static struct sparx5_vcap_inst {
enum vcap_type vtype; /* type of vcap */
int vinst; /* instance number within the same type */
@@ -38,8 +60,45 @@ static struct sparx5_vcap_inst {
int map_id; /* id in the super vcap block mapping (if applicable) */
int blockno; /* starting block in super vcap (if applicable) */
int blocks; /* number of blocks in super vcap (if applicable) */
+ bool ingress; /* is vcap in the ingress path */
} sparx5_vcap_inst_cfg[] = {
{
+ .vtype = VCAP_TYPE_IS0, /* CLM-0 */
+ .vinst = 0,
+ .map_id = 1,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L0,
+ .last_cid = SPARX5_VCAP_CID_IS0_L2 - 1,
+ .blockno = 8, /* Maps block 8-9 */
+ .blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-1 */
+ .vinst = 1,
+ .map_id = 2,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L2,
+ .last_cid = SPARX5_VCAP_CID_IS0_L4 - 1,
+ .blockno = 6, /* Maps block 6-7 */
+ .blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_IS0, /* CLM-2 */
+ .vinst = 2,
+ .map_id = 3,
+ .lookups = SPARX5_IS0_LOOKUPS,
+ .lookups_per_instance = SPARX5_IS0_LOOKUPS / 3,
+ .first_cid = SPARX5_VCAP_CID_IS0_L4,
+ .last_cid = SPARX5_VCAP_CID_IS0_MAX,
+ .blockno = 4, /* Maps block 4-5 */
+ .blocks = 2,
+ .ingress = true,
+ },
+ {
.vtype = VCAP_TYPE_IS2, /* IS2-0 */
.vinst = 0,
.map_id = 4,
@@ -49,6 +108,7 @@ static struct sparx5_vcap_inst {
.last_cid = SPARX5_VCAP_CID_IS2_L2 - 1,
.blockno = 0, /* Maps block 0-1 */
.blocks = 2,
+ .ingress = true,
},
{
.vtype = VCAP_TYPE_IS2, /* IS2-1 */
@@ -60,9 +120,59 @@ static struct sparx5_vcap_inst {
.last_cid = SPARX5_VCAP_CID_IS2_MAX,
.blockno = 2, /* Maps block 2-3 */
.blocks = 2,
+ .ingress = true,
+ },
+ {
+ .vtype = VCAP_TYPE_ES0,
+ .lookups = SPARX5_ES0_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES0_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES0_L0,
+ .last_cid = SPARX5_VCAP_CID_ES0_MAX,
+ .count = 4096, /* Addresses according to datasheet */
+ .ingress = false,
+ },
+ {
+ .vtype = VCAP_TYPE_ES2,
+ .lookups = SPARX5_ES2_LOOKUPS,
+ .lookups_per_instance = SPARX5_ES2_LOOKUPS,
+ .first_cid = SPARX5_VCAP_CID_ES2_L0,
+ .last_cid = SPARX5_VCAP_CID_ES2_MAX,
+ .count = 12288, /* Addresses according to datasheet */
+ .ingress = false,
},
};
+/* These protocols have dedicated keysets in IS0 and a TC dissector */
+static u16 sparx5_vcap_is0_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
+/* These protocols have dedicated keysets in IS2 and a TC dissector */
+static u16 sparx5_vcap_is2_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_ARP,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
+/* These protocols have dedicated keysets in ES2 and a TC dissector */
+static u16 sparx5_vcap_es2_known_etypes[] = {
+ ETH_P_ALL,
+ ETH_P_ARP,
+ ETH_P_IP,
+ ETH_P_IPV6,
+};
+
+static void sparx5_vcap_type_err(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ const char *fname)
+{
+ pr_err("%s: vcap type: %s not supported\n",
+ fname, sparx5_vcaps[admin->vtype].name);
+}
+
/* Await the super VCAP completion of the current operation */
static void sparx5_vcap_wait_super_update(struct sparx5 *sparx5)
{
@@ -73,25 +183,81 @@ static void sparx5_vcap_wait_super_update(struct sparx5 *sparx5)
false, sparx5, VCAP_SUPER_CTRL);
}
-/* Initializing a VCAP address range: only IS2 for now */
+/* Await the ES0 VCAP completion of the current operation */
+static void sparx5_vcap_wait_es0_update(struct sparx5 *sparx5)
+{
+ u32 value;
+
+ read_poll_timeout(spx5_rd, value,
+ !VCAP_ES0_CTRL_UPDATE_SHOT_GET(value), 500, 10000,
+ false, sparx5, VCAP_ES0_CTRL);
+}
+
+/* Await the ES2 VCAP completion of the current operation */
+static void sparx5_vcap_wait_es2_update(struct sparx5 *sparx5)
+{
+ u32 value;
+
+ read_poll_timeout(spx5_rd, value,
+ !VCAP_ES2_CTRL_UPDATE_SHOT_GET(value), 500, 10000,
+ false, sparx5, VCAP_ES2_CTRL);
+}
+
+/* Initializing a VCAP address range */
static void _sparx5_vcap_range_init(struct sparx5 *sparx5,
struct vcap_admin *admin,
u32 addr, u32 count)
{
u32 size = count - 1;
- spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
- VCAP_SUPER_CFG_MV_SIZE_SET(size),
- sparx5, VCAP_SUPER_CFG);
- spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
- VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
- VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
- VCAP_SUPER_CTRL_CLEAR_CACHE_SET(true) |
- VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
- sparx5, VCAP_SUPER_CTRL);
- sparx5_vcap_wait_super_update(sparx5);
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
+ VCAP_SUPER_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_SUPER_CFG);
+ spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_SUPER_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_SUPER_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_SUPER_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_SUPER_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_SUPER_CTRL);
+ sparx5_vcap_wait_super_update(sparx5);
+ break;
+ case VCAP_TYPE_ES0:
+ spx5_wr(VCAP_ES0_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES0_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_ES0_CFG);
+ spx5_wr(VCAP_ES0_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES0_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_ES0_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES0_CTRL);
+ sparx5_vcap_wait_es0_update(sparx5);
+ break;
+ case VCAP_TYPE_ES2:
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES2_CFG_MV_SIZE_SET(size),
+ sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(VCAP_CMD_INITIALIZE) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(true) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
/* Initializing VCAP rule data area */
@@ -112,6 +278,17 @@ static const char *sparx5_vcap_keyset_name(struct net_device *ndev,
return vcap_keyset_name(port->sparx5->vcap_ctrl, keyset);
}
+/* Check if this is the first lookup of IS0 */
+static bool sparx5_vcap_is0_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= SPARX5_VCAP_CID_IS0_L0 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS0_L1) ||
+ ((rule->vcap_chain_id >= SPARX5_VCAP_CID_IS0_L2 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS0_L3)) ||
+ ((rule->vcap_chain_id >= SPARX5_VCAP_CID_IS0_L4 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_IS0_L5));
+}
+
/* Check if this is the first lookup of IS2 */
static bool sparx5_vcap_is2_is_first_chain(struct vcap_rule *rule)
{
@@ -121,9 +298,15 @@ static bool sparx5_vcap_is2_is_first_chain(struct vcap_rule *rule)
rule->vcap_chain_id < SPARX5_VCAP_CID_IS2_L3));
}
+static bool sparx5_vcap_es2_is_first_chain(struct vcap_rule *rule)
+{
+ return (rule->vcap_chain_id >= SPARX5_VCAP_CID_ES2_L0 &&
+ rule->vcap_chain_id < SPARX5_VCAP_CID_ES2_L1);
+}
+
/* Set the narrow range ingress port mask on a rule */
-static void sparx5_vcap_add_range_port_mask(struct vcap_rule *rule,
- struct net_device *ndev)
+static void sparx5_vcap_add_ingress_range_port_mask(struct vcap_rule *rule,
+ struct net_device *ndev)
{
struct sparx5_port *port = netdev_priv(ndev);
u32 port_mask;
@@ -153,12 +336,51 @@ static void sparx5_vcap_add_wide_port_mask(struct vcap_rule *rule,
vcap_rule_add_key_u72(rule, VCAP_KF_IF_IGR_PORT_MASK, &port_mask);
}
-/* Convert chain id to vcap lookup id */
-static int sparx5_vcap_cid_to_lookup(int cid)
+static void sparx5_vcap_add_egress_range_port_mask(struct vcap_rule *rule,
+ struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ u32 port_mask;
+ u32 range;
+
+ /* Mask range selects:
+ * 0-2: Physical/Logical egress port number 0-31, 32–63, 64.
+ * 3-5: Virtual Interface Number 0-31, 32-63, 64.
+ * 6: CPU queue Number 0-7.
+ *
+ * Use physical/logical port ranges (0-2)
+ */
+ range = port->portno / BITS_PER_TYPE(u32);
+ /* Port bit set to match-any */
+ port_mask = ~BIT(port->portno % BITS_PER_TYPE(u32));
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_MASK_RNG, range, 0xf);
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_MASK, 0, port_mask);
+}
+
+/* Convert IS0 chain id to vcap lookup id */
+static int sparx5_vcap_is0_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ if (cid >= SPARX5_VCAP_CID_IS0_L1 && cid < SPARX5_VCAP_CID_IS0_L2)
+ lookup = 1;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L2 && cid < SPARX5_VCAP_CID_IS0_L3)
+ lookup = 2;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L3 && cid < SPARX5_VCAP_CID_IS0_L4)
+ lookup = 3;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L4 && cid < SPARX5_VCAP_CID_IS0_L5)
+ lookup = 4;
+ else if (cid >= SPARX5_VCAP_CID_IS0_L5 && cid < SPARX5_VCAP_CID_IS0_MAX)
+ lookup = 5;
+
+ return lookup;
+}
+
+/* Convert IS2 chain id to vcap lookup id */
+static int sparx5_vcap_is2_cid_to_lookup(int cid)
{
int lookup = 0;
- /* For now only handle IS2 */
if (cid >= SPARX5_VCAP_CID_IS2_L1 && cid < SPARX5_VCAP_CID_IS2_L2)
lookup = 1;
else if (cid >= SPARX5_VCAP_CID_IS2_L2 && cid < SPARX5_VCAP_CID_IS2_L3)
@@ -169,6 +391,86 @@ static int sparx5_vcap_cid_to_lookup(int cid)
return lookup;
}
+/* Convert ES2 chain id to vcap lookup id */
+static int sparx5_vcap_es2_cid_to_lookup(int cid)
+{
+ int lookup = 0;
+
+ if (cid >= SPARX5_VCAP_CID_ES2_L1)
+ lookup = 1;
+
+ return lookup;
+}
+
+/* Add ethernet type IS0 keyset to a list */
+static void
+sparx5_vcap_is0_get_port_etype_keysets(struct vcap_keyset_list *keysetlist,
+ u32 value)
+{
+ switch (ANA_CL_ADV_CL_CFG_ETYPE_CLM_KEY_SEL_GET(value)) {
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL_7TUPLE);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_NORMAL_5TUPLE_IP4);
+ break;
+ }
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_is0_get_port_keysets(struct net_device *ndev,
+ int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ value = spx5_rd(sparx5, ANA_CL_ADV_CL_CFG(portno, lookup));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL)
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist, value);
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP)
+ switch (ANA_CL_ADV_CL_CFG_IP4_CLM_KEY_SEL_GET(value)) {
+ case VCAP_IS0_PS_ETYPE_DEFAULT:
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist,
+ value);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_7TUPLE);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_5TUPLE_IP4);
+ break;
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6)
+ switch (ANA_CL_ADV_CL_CFG_IP6_CLM_KEY_SEL_GET(value)) {
+ case VCAP_IS0_PS_ETYPE_DEFAULT:
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist,
+ value);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_7TUPLE);
+ break;
+ case VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4:
+ vcap_keyset_list_add(keysetlist,
+ VCAP_KFS_NORMAL_5TUPLE_IP4);
+ break;
+ }
+
+ if (l3_proto != ETH_P_IP && l3_proto != ETH_P_IPV6)
+ sparx5_vcap_is0_get_port_etype_keysets(keysetlist, value);
+ return 0;
+}
+
/* Return the list of keysets for the vcap port configuration */
static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev,
int lookup,
@@ -180,10 +482,7 @@ static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev,
int portno = port->portno;
u32 value;
- /* Check if the port keyset selection is enabled */
value = spx5_rd(sparx5, ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
- if (!ANA_ACL_VCAP_S2_KEY_SEL_KEY_SEL_ENA_GET(value))
- return -ENOENT;
/* Collect all keysets for the port in a list */
if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
@@ -274,6 +573,121 @@ static int sparx5_vcap_is2_get_port_keysets(struct net_device *ndev,
return 0;
}
+/* Return the keysets for the vcap port IP4 traffic class configuration */
+static void
+sparx5_vcap_es2_get_port_ipv4_keysets(struct vcap_keyset_list *keysetlist,
+ u32 value)
+{
+ switch (EACL_VCAP_ES2_KEY_SEL_IP4_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV4_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_IPV4_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_TCP_UDP);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_VID:
+ /* Not used */
+ break;
+ case VCAP_ES2_PS_IPV4_IP4_OTHER:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP4_OTHER);
+ break;
+ }
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_es0_get_port_keysets(struct net_device *ndev,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ value = spx5_rd(sparx5, REW_RTAG_ETAG_CTRL(portno));
+
+ /* Collect all keysets for the port in a list */
+ switch (REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA_GET(value)) {
+ case VCAP_ES0_PS_NORMAL_SELECTION:
+ case VCAP_ES0_PS_FORCE_ISDX_LOOKUPS:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ISDX);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/* Return the list of keysets for the vcap port configuration */
+static int sparx5_vcap_es2_get_port_keysets(struct net_device *ndev,
+ int lookup,
+ struct vcap_keyset_list *keysetlist,
+ u16 l3_proto)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ int portno = port->portno;
+ u32 value;
+
+ value = spx5_rd(sparx5, EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+
+ /* Collect all keysets for the port in a list */
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_ARP) {
+ switch (EACL_VCAP_ES2_KEY_SEL_ARP_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_ARP_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_ARP_ARP:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_ARP);
+ break;
+ }
+ }
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IP)
+ sparx5_vcap_es2_get_port_ipv4_keysets(keysetlist, value);
+
+ if (l3_proto == ETH_P_ALL || l3_proto == ETH_P_IPV6) {
+ switch (EACL_VCAP_ES2_KEY_SEL_IP6_KEY_SEL_GET(value)) {
+ case VCAP_ES2_PS_IPV6_MAC_ETYPE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_VID:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ break;
+ case VCAP_ES2_PS_IPV6_IP_7TUPLE_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP_7TUPLE);
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_VID:
+ /* Not used */
+ break;
+ case VCAP_ES2_PS_IPV6_IP6_STD:
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_IP6_STD);
+ break;
+ case VCAP_ES2_PS_IPV6_IP4_DOWNGRADE:
+ sparx5_vcap_es2_get_port_ipv4_keysets(keysetlist,
+ value);
+ break;
+ }
+ }
+
+ if (l3_proto != ETH_P_ARP && l3_proto != ETH_P_IP &&
+ l3_proto != ETH_P_IPV6) {
+ vcap_keyset_list_add(keysetlist, VCAP_KFS_MAC_ETYPE);
+ }
+ return 0;
+}
+
/* Get the port keyset for the vcap lookup */
int sparx5_vcap_get_port_keyset(struct net_device *ndev,
struct vcap_admin *admin,
@@ -281,10 +695,64 @@ int sparx5_vcap_get_port_keyset(struct net_device *ndev,
u16 l3_proto,
struct vcap_keyset_list *kslist)
{
- int lookup;
+ int lookup, err = -EINVAL;
+ struct sparx5_port *port;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ lookup = sparx5_vcap_is0_cid_to_lookup(cid);
+ err = sparx5_vcap_is0_get_port_keysets(ndev, lookup, kslist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_IS2:
+ lookup = sparx5_vcap_is2_cid_to_lookup(cid);
+ err = sparx5_vcap_is2_get_port_keysets(ndev, lookup, kslist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_ES0:
+ err = sparx5_vcap_es0_get_port_keysets(ndev, kslist, l3_proto);
+ break;
+ case VCAP_TYPE_ES2:
+ lookup = sparx5_vcap_es2_cid_to_lookup(cid);
+ err = sparx5_vcap_es2_get_port_keysets(ndev, lookup, kslist,
+ l3_proto);
+ break;
+ default:
+ port = netdev_priv(ndev);
+ sparx5_vcap_type_err(port->sparx5, admin, __func__);
+ break;
+ }
+ return err;
+}
+
+/* Check if the ethertype is supported by the vcap port classification */
+bool sparx5_vcap_is_known_etype(struct vcap_admin *admin, u16 etype)
+{
+ const u16 *known_etypes;
+ int size, idx;
- lookup = sparx5_vcap_cid_to_lookup(cid);
- return sparx5_vcap_is2_get_port_keysets(ndev, lookup, kslist, l3_proto);
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ known_etypes = sparx5_vcap_is0_known_etypes;
+ size = ARRAY_SIZE(sparx5_vcap_is0_known_etypes);
+ break;
+ case VCAP_TYPE_IS2:
+ known_etypes = sparx5_vcap_is2_known_etypes;
+ size = ARRAY_SIZE(sparx5_vcap_is2_known_etypes);
+ break;
+ case VCAP_TYPE_ES0:
+ return true;
+ case VCAP_TYPE_ES2:
+ known_etypes = sparx5_vcap_es2_known_etypes;
+ size = ARRAY_SIZE(sparx5_vcap_es2_known_etypes);
+ break;
+ default:
+ return false;
+ }
+ for (idx = 0; idx < size; ++idx)
+ if (known_etypes[idx] == etype)
+ return true;
+ return false;
}
/* API callback used for validating a field keyset (check the port keysets) */
@@ -297,16 +765,40 @@ sparx5_vcap_validate_keyset(struct net_device *ndev,
{
struct vcap_keyset_list keysetlist = {};
enum vcap_keyfield_set keysets[10] = {};
+ struct sparx5_port *port;
int idx, jdx, lookup;
if (!kslist || kslist->cnt == 0)
return VCAP_KFS_NO_VALUE;
- /* Get a list of currently configured keysets in the lookups */
- lookup = sparx5_vcap_cid_to_lookup(rule->vcap_chain_id);
keysetlist.max = ARRAY_SIZE(keysets);
keysetlist.keysets = keysets;
- sparx5_vcap_is2_get_port_keysets(ndev, lookup, &keysetlist, l3_proto);
+
+ /* Get a list of currently configured keysets in the lookups */
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ lookup = sparx5_vcap_is0_cid_to_lookup(rule->vcap_chain_id);
+ sparx5_vcap_is0_get_port_keysets(ndev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_IS2:
+ lookup = sparx5_vcap_is2_cid_to_lookup(rule->vcap_chain_id);
+ sparx5_vcap_is2_get_port_keysets(ndev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_get_port_keysets(ndev, &keysetlist, l3_proto);
+ break;
+ case VCAP_TYPE_ES2:
+ lookup = sparx5_vcap_es2_cid_to_lookup(rule->vcap_chain_id);
+ sparx5_vcap_es2_get_port_keysets(ndev, lookup, &keysetlist,
+ l3_proto);
+ break;
+ default:
+ port = netdev_priv(ndev);
+ sparx5_vcap_type_err(port->sparx5, admin, __func__);
+ break;
+ }
/* Check if there is a match and return the match */
for (idx = 0; idx < kslist->cnt; ++idx)
@@ -321,27 +813,97 @@ sparx5_vcap_validate_keyset(struct net_device *ndev,
return -ENOENT;
}
-/* API callback used for adding default fields to a rule */
-static void sparx5_vcap_add_default_fields(struct net_device *ndev,
- struct vcap_admin *admin,
- struct vcap_rule *rule)
+static void sparx5_vcap_ingress_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
{
const struct vcap_field *field;
+ bool is_first;
+ /* Add ingress port mask matching the net device */
field = vcap_lookup_keyfield(rule, VCAP_KF_IF_IGR_PORT_MASK);
if (field && field->width == SPX5_PORTS)
sparx5_vcap_add_wide_port_mask(rule, ndev);
else if (field && field->width == BITS_PER_TYPE(u32))
- sparx5_vcap_add_range_port_mask(rule, ndev);
+ sparx5_vcap_add_ingress_range_port_mask(rule, ndev);
else
pr_err("%s:%d: %s: could not add an ingress port mask for: %s\n",
__func__, __LINE__, netdev_name(ndev),
sparx5_vcap_keyset_name(ndev, rule->keyset));
- /* add the lookup bit */
- if (sparx5_vcap_is2_is_first_chain(rule))
- vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
+
+ if (admin->vtype == VCAP_TYPE_IS0)
+ is_first = sparx5_vcap_is0_is_first_chain(rule);
else
- vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_0);
+ is_first = sparx5_vcap_is2_is_first_chain(rule);
+
+ /* Add key that selects the first/second lookup */
+ if (is_first)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+static void sparx5_vcap_es0_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+
+ vcap_rule_add_key_u32(rule, VCAP_KF_IF_EGR_PORT_NO, port->portno, ~0);
+ /* Match untagged frames if there was no VLAN key */
+ vcap_rule_add_key_u32(rule, VCAP_KF_8021Q_TPID, SPX5_TPID_SEL_UNTAGGED,
+ ~0);
+}
+
+static void sparx5_vcap_es2_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ const struct vcap_field *field;
+ bool is_first;
+
+ /* Add egress port mask matching the net device */
+ field = vcap_lookup_keyfield(rule, VCAP_KF_IF_EGR_PORT_MASK);
+ if (field)
+ sparx5_vcap_add_egress_range_port_mask(rule, ndev);
+
+ /* Add key that selects the first/second lookup */
+ is_first = sparx5_vcap_es2_is_first_chain(rule);
+
+ if (is_first)
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_1);
+ else
+ vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
+ VCAP_BIT_0);
+}
+
+/* API callback used for adding default fields to a rule */
+static void sparx5_vcap_add_default_fields(struct net_device *ndev,
+ struct vcap_admin *admin,
+ struct vcap_rule *rule)
+{
+ struct sparx5_port *port;
+
+ /* add the lookup bit */
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_ingress_add_default_fields(ndev, admin, rule);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_add_default_fields(ndev, admin, rule);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_add_default_fields(ndev, admin, rule);
+ break;
+ default:
+ port = netdev_priv(ndev);
+ sparx5_vcap_type_err(port->sparx5, admin, __func__);
+ break;
+ }
}
/* API callback used for erasing the vcap cache area (not the register area) */
@@ -353,21 +915,60 @@ static void sparx5_vcap_cache_erase(struct vcap_admin *admin)
memset(&admin->cache.counter, 0, sizeof(admin->cache.counter));
}
-/* API callback used for writing to the VCAP cache */
-static void sparx5_vcap_cache_write(struct net_device *ndev,
- struct vcap_admin *admin,
- enum vcap_selection sel,
- u32 start,
- u32 count)
+static void sparx5_vcap_is0_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+
+ if (sel & VCAP_SEL_COUNTER)
+ spx5_wr(admin->cache.counter, sparx5,
+ VCAP_SUPER_VCAP_CNT_DAT(0));
+}
+
+static void sparx5_vcap_is2_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
{
- struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5 *sparx5 = port->sparx5;
u32 *keystr, *mskstr, *actstr;
int idx;
keystr = &admin->cache.keystream[start];
mskstr = &admin->cache.maskstream[start];
actstr = &admin->cache.actionstream[start];
+
switch (sel) {
case VCAP_SEL_ENTRY:
for (idx = 0; idx < count; ++idx) {
@@ -403,21 +1004,143 @@ static void sparx5_vcap_cache_write(struct net_device *ndev,
}
}
-/* API callback used for reading from the VCAP into the VCAP cache */
-static void sparx5_vcap_cache_read(struct net_device *ndev,
- struct vcap_admin *admin,
- enum vcap_selection sel,
- u32 start,
- u32 count)
+/* Use ESDX counters located in the XQS */
+static void sparx5_es0_write_esdx_counter(struct sparx5 *sparx5,
+ struct vcap_admin *admin, u32 id)
+{
+ mutex_lock(&sparx5->queue_stats_lock);
+ spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(id), sparx5, XQS_STAT_CFG);
+ spx5_wr(admin->cache.counter, sparx5,
+ XQS_CNT(SPARX5_STAT_ESDX_GRN_PKTS));
+ spx5_wr(0, sparx5, XQS_CNT(SPARX5_STAT_ESDX_YEL_PKTS));
+ mutex_unlock(&sparx5->queue_stats_lock);
+}
+
+static void sparx5_vcap_es0_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_ES0_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_ES0_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_ES0_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+ if (sel & VCAP_SEL_COUNTER) {
+ spx5_wr(admin->cache.counter, sparx5, VCAP_ES0_VCAP_CNT_DAT(0));
+ sparx5_es0_write_esdx_counter(sparx5, admin, start);
+ }
+}
+
+static void sparx5_vcap_es2_cache_write(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ switch (sel) {
+ case VCAP_SEL_ENTRY:
+ for (idx = 0; idx < count; ++idx) {
+ /* Avoid 'match-off' by setting value & mask */
+ spx5_wr(keystr[idx] & mskstr[idx], sparx5,
+ VCAP_ES2_VCAP_ENTRY_DAT(idx));
+ spx5_wr(~mskstr[idx], sparx5,
+ VCAP_ES2_VCAP_MASK_DAT(idx));
+ }
+ break;
+ case VCAP_SEL_ACTION:
+ for (idx = 0; idx < count; ++idx)
+ spx5_wr(actstr[idx], sparx5,
+ VCAP_ES2_VCAP_ACTION_DAT(idx));
+ break;
+ case VCAP_SEL_ALL:
+ pr_err("%s:%d: cannot write all streams at once\n",
+ __func__, __LINE__);
+ break;
+ default:
+ break;
+ }
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0x7ff; /* counter limit */
+ spx5_wr(admin->cache.counter, sparx5, EACL_ES2_CNT(start));
+ spx5_wr(admin->cache.sticky, sparx5, VCAP_ES2_VCAP_CNT_DAT(0));
+ }
+}
+
+/* API callback used for writing to the VCAP cache */
+static void sparx5_vcap_cache_write(struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
{
struct sparx5_port *port = netdev_priv(ndev);
struct sparx5 *sparx5 = port->sparx5;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_cache_write(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_cache_write(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_cache_write(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_cache_write(sparx5, admin, sel, start, count);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+static void sparx5_vcap_is0_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
u32 *keystr, *mskstr, *actstr;
int idx;
keystr = &admin->cache.keystream[start];
mskstr = &admin->cache.maskstream[start];
actstr = &admin->cache.actionstream[start];
+
if (sel & VCAP_SEL_ENTRY) {
for (idx = 0; idx < count; ++idx) {
keystr[idx] = spx5_rd(sparx5,
@@ -426,11 +1149,47 @@ static void sparx5_vcap_cache_read(struct net_device *ndev,
VCAP_SUPER_VCAP_MASK_DAT(idx));
}
}
- if (sel & VCAP_SEL_ACTION) {
+
+ if (sel & VCAP_SEL_ACTION)
for (idx = 0; idx < count; ++idx)
actstr[idx] = spx5_rd(sparx5,
VCAP_SUPER_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ admin->cache.counter =
+ spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
+ admin->cache.sticky =
+ spx5_rd(sparx5, VCAP_SUPER_VCAP_CNT_DAT(0));
+ }
+}
+
+static void sparx5_vcap_is2_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] = ~spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_MASK_DAT(idx));
+ }
}
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] = spx5_rd(sparx5,
+ VCAP_SUPER_VCAP_ACTION_DAT(idx));
+
if (sel & VCAP_SEL_COUNTER) {
start = start & 0xfff; /* counter limit */
if (admin->vinst == 0)
@@ -444,6 +1203,121 @@ static void sparx5_vcap_cache_read(struct net_device *ndev,
}
}
+/* Use ESDX counters located in the XQS */
+static void sparx5_es0_read_esdx_counter(struct sparx5 *sparx5,
+ struct vcap_admin *admin, u32 id)
+{
+ u32 counter;
+
+ mutex_lock(&sparx5->queue_stats_lock);
+ spx5_wr(XQS_STAT_CFG_STAT_VIEW_SET(id), sparx5, XQS_STAT_CFG);
+ counter = spx5_rd(sparx5, XQS_CNT(SPARX5_STAT_ESDX_GRN_PKTS)) +
+ spx5_rd(sparx5, XQS_CNT(SPARX5_STAT_ESDX_YEL_PKTS));
+ mutex_unlock(&sparx5->queue_stats_lock);
+ if (counter)
+ admin->cache.counter = counter;
+}
+
+static void sparx5_vcap_es0_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] =
+ spx5_rd(sparx5, VCAP_ES0_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] =
+ ~spx5_rd(sparx5, VCAP_ES0_VCAP_MASK_DAT(idx));
+ }
+ }
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] =
+ spx5_rd(sparx5, VCAP_ES0_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ admin->cache.counter =
+ spx5_rd(sparx5, VCAP_ES0_VCAP_CNT_DAT(0));
+ admin->cache.sticky = admin->cache.counter;
+ sparx5_es0_read_esdx_counter(sparx5, admin, start);
+ }
+}
+
+static void sparx5_vcap_es2_cache_read(struct sparx5 *sparx5,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ u32 *keystr, *mskstr, *actstr;
+ int idx;
+
+ keystr = &admin->cache.keystream[start];
+ mskstr = &admin->cache.maskstream[start];
+ actstr = &admin->cache.actionstream[start];
+
+ if (sel & VCAP_SEL_ENTRY) {
+ for (idx = 0; idx < count; ++idx) {
+ keystr[idx] =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_ENTRY_DAT(idx));
+ mskstr[idx] =
+ ~spx5_rd(sparx5, VCAP_ES2_VCAP_MASK_DAT(idx));
+ }
+ }
+
+ if (sel & VCAP_SEL_ACTION)
+ for (idx = 0; idx < count; ++idx)
+ actstr[idx] =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_ACTION_DAT(idx));
+
+ if (sel & VCAP_SEL_COUNTER) {
+ start = start & 0x7ff; /* counter limit */
+ admin->cache.counter =
+ spx5_rd(sparx5, EACL_ES2_CNT(start));
+ admin->cache.sticky =
+ spx5_rd(sparx5, VCAP_ES2_VCAP_CNT_DAT(0));
+ }
+}
+
+/* API callback used for reading from the VCAP into the VCAP cache */
+static void sparx5_vcap_cache_read(struct net_device *ndev,
+ struct vcap_admin *admin,
+ enum vcap_selection sel,
+ u32 start,
+ u32 count)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_cache_read(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_cache_read(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_cache_read(sparx5, admin, sel, start, count);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_cache_read(sparx5, admin, sel, start, count);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
/* API callback used for initializing a VCAP address range */
static void sparx5_vcap_range_init(struct net_device *ndev,
struct vcap_admin *admin, u32 addr,
@@ -455,16 +1329,12 @@ static void sparx5_vcap_range_init(struct net_device *ndev,
_sparx5_vcap_range_init(sparx5, admin, addr, count);
}
-/* API callback used for updating the VCAP cache */
-static void sparx5_vcap_update(struct net_device *ndev,
- struct vcap_admin *admin, enum vcap_command cmd,
- enum vcap_selection sel, u32 addr)
+static void sparx5_vcap_super_update(struct sparx5 *sparx5,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
{
- struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5 *sparx5 = port->sparx5;
- bool clear;
+ bool clear = (cmd == VCAP_CMD_INITIALIZE);
- clear = (cmd == VCAP_CMD_INITIALIZE);
spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(0) |
VCAP_SUPER_CFG_MV_SIZE_SET(0), sparx5, VCAP_SUPER_CFG);
spx5_wr(VCAP_SUPER_CTRL_UPDATE_CMD_SET(cmd) |
@@ -478,24 +1348,75 @@ static void sparx5_vcap_update(struct net_device *ndev,
sparx5_vcap_wait_super_update(sparx5);
}
-/* API callback used for moving a block of rules in the VCAP */
-static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
- u32 addr, int offset, int count)
+static void sparx5_vcap_es0_update(struct sparx5 *sparx5,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ bool clear = (cmd == VCAP_CMD_INITIALIZE);
+
+ spx5_wr(VCAP_ES0_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES0_CFG_MV_SIZE_SET(0), sparx5, VCAP_ES0_CFG);
+ spx5_wr(VCAP_ES0_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_ES0_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES0_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_ES0_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES0_CTRL);
+ sparx5_vcap_wait_es0_update(sparx5);
+}
+
+static void sparx5_vcap_es2_update(struct sparx5 *sparx5,
+ enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
+{
+ bool clear = (cmd == VCAP_CMD_INITIALIZE);
+
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(0) |
+ VCAP_ES2_CFG_MV_SIZE_SET(0), sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET((VCAP_SEL_ENTRY & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET((VCAP_SEL_ACTION & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET((VCAP_SEL_COUNTER & sel) == 0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(clear) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+}
+
+/* API callback used for updating the VCAP cache */
+static void sparx5_vcap_update(struct net_device *ndev,
+ struct vcap_admin *admin, enum vcap_command cmd,
+ enum vcap_selection sel, u32 addr)
{
struct sparx5_port *port = netdev_priv(ndev);
struct sparx5 *sparx5 = port->sparx5;
- enum vcap_command cmd;
- u16 mv_num_pos;
- u16 mv_size;
- mv_size = count - 1;
- if (offset > 0) {
- mv_num_pos = offset - 1;
- cmd = VCAP_CMD_MOVE_DOWN;
- } else {
- mv_num_pos = -offset - 1;
- cmd = VCAP_CMD_MOVE_UP;
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_super_update(sparx5, cmd, sel, addr);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_update(sparx5, cmd, sel, addr);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_update(sparx5, cmd, sel, addr);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
}
+}
+
+static void sparx5_vcap_super_move(struct sparx5 *sparx5,
+ u32 addr,
+ enum vcap_command cmd,
+ u16 mv_num_pos,
+ u16 mv_size)
+{
spx5_wr(VCAP_SUPER_CFG_MV_NUM_POS_SET(mv_num_pos) |
VCAP_SUPER_CFG_MV_SIZE_SET(mv_size),
sparx5, VCAP_SUPER_CFG);
@@ -510,29 +1431,82 @@ static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
sparx5_vcap_wait_super_update(sparx5);
}
-/* Enable all lookups in the VCAP instance */
-static int sparx5_vcap_enable(struct net_device *ndev,
- struct vcap_admin *admin,
- bool enable)
+static void sparx5_vcap_es0_move(struct sparx5 *sparx5,
+ u32 addr,
+ enum vcap_command cmd,
+ u16 mv_num_pos,
+ u16 mv_size)
+{
+ spx5_wr(VCAP_ES0_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_ES0_CFG_MV_SIZE_SET(mv_size),
+ sparx5, VCAP_ES0_CFG);
+ spx5_wr(VCAP_ES0_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES0_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES0_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES0_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_ES0_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES0_CTRL);
+ sparx5_vcap_wait_es0_update(sparx5);
+}
+
+static void sparx5_vcap_es2_move(struct sparx5 *sparx5,
+ u32 addr,
+ enum vcap_command cmd,
+ u16 mv_num_pos,
+ u16 mv_size)
+{
+ spx5_wr(VCAP_ES2_CFG_MV_NUM_POS_SET(mv_num_pos) |
+ VCAP_ES2_CFG_MV_SIZE_SET(mv_size),
+ sparx5, VCAP_ES2_CFG);
+ spx5_wr(VCAP_ES2_CTRL_UPDATE_CMD_SET(cmd) |
+ VCAP_ES2_CTRL_UPDATE_ENTRY_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ACTION_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_CNT_DIS_SET(0) |
+ VCAP_ES2_CTRL_UPDATE_ADDR_SET(addr) |
+ VCAP_ES2_CTRL_CLEAR_CACHE_SET(false) |
+ VCAP_ES2_CTRL_UPDATE_SHOT_SET(true),
+ sparx5, VCAP_ES2_CTRL);
+ sparx5_vcap_wait_es2_update(sparx5);
+}
+
+/* API callback used for moving a block of rules in the VCAP */
+static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
+ u32 addr, int offset, int count)
{
struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5 *sparx5;
- int portno;
+ struct sparx5 *sparx5 = port->sparx5;
+ enum vcap_command cmd;
+ u16 mv_num_pos;
+ u16 mv_size;
- sparx5 = port->sparx5;
- portno = port->portno;
+ mv_size = count - 1;
+ if (offset > 0) {
+ mv_num_pos = offset - 1;
+ cmd = VCAP_CMD_MOVE_DOWN;
+ } else {
+ mv_num_pos = -offset - 1;
+ cmd = VCAP_CMD_MOVE_UP;
+ }
- /* For now we only consider IS2 */
- if (enable)
- spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf), sparx5,
- ANA_ACL_VCAP_S2_CFG(portno));
- else
- spx5_wr(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0), sparx5,
- ANA_ACL_VCAP_S2_CFG(portno));
- return 0;
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_super_move(sparx5, addr, cmd, mv_num_pos, mv_size);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_move(sparx5, addr, cmd, mv_num_pos, mv_size);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_move(sparx5, addr, cmd, mv_num_pos, mv_size);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
-/* API callback operations: only IS2 is supported for now */
static struct vcap_operations sparx5_vcap_ops = {
.validate_keyset = sparx5_vcap_validate_keyset,
.add_default_fields = sparx5_vcap_add_default_fields,
@@ -543,19 +1517,41 @@ static struct vcap_operations sparx5_vcap_ops = {
.update = sparx5_vcap_update,
.move = sparx5_vcap_move,
.port_info = sparx5_port_info,
- .enable = sparx5_vcap_enable,
};
-/* Enable lookups per port and set the keyset generation: only IS2 for now */
-static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
- struct vcap_admin *admin)
+/* Enable IS0 lookups per port and set the keyset generation */
+static void sparx5_vcap_is0_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+ u32 keysel;
+
+ keysel = VCAP_IS0_KEYSEL(false,
+ VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE,
+ VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4,
+ VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE,
+ VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MLBS_FOLLOW_ETYPE);
+ for (lookup = 0; lookup < admin->lookups; ++lookup) {
+ for (portno = 0; portno < SPX5_PORTS; ++portno) {
+ spx5_wr(keysel, sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
+ ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ }
+ }
+}
+
+/* Enable IS2 lookups per port and set the keyset generation */
+static void sparx5_vcap_is2_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
{
int portno, lookup;
u32 keysel;
- /* all traffic types generate the MAC_ETYPE keyset for now in all
- * lookups on all ports
- */
keysel = VCAP_IS2_KEYSEL(true, VCAP_IS2_PS_NONETH_MAC_ETYPE,
VCAP_IS2_PS_IPV4_MC_IP4_TCP_UDP_OTHER,
VCAP_IS2_PS_IPV4_UC_IP4_TCP_UDP_OTHER,
@@ -568,19 +1564,107 @@ static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
ANA_ACL_VCAP_S2_KEY_SEL(portno, lookup));
}
}
+ /* IS2 lookups are in bit 0:3 */
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0xf),
+ ANA_ACL_VCAP_S2_CFG_SEC_ENA,
+ sparx5,
+ ANA_ACL_VCAP_S2_CFG(portno));
}
-/* Disable lookups per port and set the keyset generation: only IS2 for now */
-static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
- struct vcap_admin *admin)
+/* Enable ES0 lookups per port and set the keyset generation */
+static void sparx5_vcap_es0_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
{
int portno;
+ u32 keysel;
+ keysel = VCAP_ES0_KEYSEL(VCAP_ES0_PS_FORCE_ISDX_LOOKUPS);
for (portno = 0; portno < SPX5_PORTS; ++portno)
- spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0),
- ANA_ACL_VCAP_S2_CFG_SEC_ENA,
- sparx5,
- ANA_ACL_VCAP_S2_CFG(portno));
+ spx5_rmw(keysel, REW_RTAG_ETAG_CTRL_ES0_ISDX_KEY_ENA,
+ sparx5, REW_RTAG_ETAG_CTRL(portno));
+
+ spx5_rmw(REW_ES0_CTRL_ES0_LU_ENA_SET(1), REW_ES0_CTRL_ES0_LU_ENA,
+ sparx5, REW_ES0_CTRL);
+}
+
+/* Enable ES2 lookups per port and set the keyset generation */
+static void sparx5_vcap_es2_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+ u32 keysel;
+
+ keysel = VCAP_ES2_KEYSEL(true, VCAP_ES2_PS_ARP_MAC_ETYPE,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE);
+ for (lookup = 0; lookup < admin->lookups; ++lookup)
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_wr(keysel, sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+}
+
+/* Enable lookups per port and set the keyset generation */
+static void sparx5_vcap_port_key_selection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ sparx5_vcap_is0_port_key_selection(sparx5, admin);
+ break;
+ case VCAP_TYPE_IS2:
+ sparx5_vcap_is2_port_key_selection(sparx5, admin);
+ break;
+ case VCAP_TYPE_ES0:
+ sparx5_vcap_es0_port_key_selection(sparx5, admin);
+ break;
+ case VCAP_TYPE_ES2:
+ sparx5_vcap_es2_port_key_selection(sparx5, admin);
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
+}
+
+/* Disable lookups per port */
+static void sparx5_vcap_port_key_deselection(struct sparx5 *sparx5,
+ struct vcap_admin *admin)
+{
+ int portno, lookup;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ for (lookup = 0; lookup < admin->lookups; ++lookup)
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(ANA_CL_ADV_CL_CFG_LOOKUP_ENA_SET(0),
+ ANA_CL_ADV_CL_CFG_LOOKUP_ENA,
+ sparx5,
+ ANA_CL_ADV_CL_CFG(portno, lookup));
+ break;
+ case VCAP_TYPE_IS2:
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(ANA_ACL_VCAP_S2_CFG_SEC_ENA_SET(0),
+ ANA_ACL_VCAP_S2_CFG_SEC_ENA,
+ sparx5,
+ ANA_ACL_VCAP_S2_CFG(portno));
+ break;
+ case VCAP_TYPE_ES0:
+ spx5_rmw(REW_ES0_CTRL_ES0_LU_ENA_SET(0),
+ REW_ES0_CTRL_ES0_LU_ENA, sparx5, REW_ES0_CTRL);
+ break;
+ case VCAP_TYPE_ES2:
+ for (lookup = 0; lookup < admin->lookups; ++lookup)
+ for (portno = 0; portno < SPX5_PORTS; ++portno)
+ spx5_rmw(EACL_VCAP_ES2_KEY_SEL_KEY_ENA_SET(0),
+ EACL_VCAP_ES2_KEY_SEL_KEY_ENA,
+ sparx5,
+ EACL_VCAP_ES2_KEY_SEL(portno, lookup));
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
static void sparx5_vcap_admin_free(struct vcap_admin *admin)
@@ -610,6 +1694,7 @@ sparx5_vcap_admin_alloc(struct sparx5 *sparx5, struct vcap_control *ctrl,
mutex_init(&admin->lock);
admin->vtype = cfg->vtype;
admin->vinst = cfg->vinst;
+ admin->ingress = cfg->ingress;
admin->lookups = cfg->lookups;
admin->lookups_per_instance = cfg->lookups_per_instance;
admin->first_cid = cfg->first_cid;
@@ -633,22 +1718,55 @@ static void sparx5_vcap_block_alloc(struct sparx5 *sparx5,
struct vcap_admin *admin,
const struct sparx5_vcap_inst *cfg)
{
- int idx;
-
- /* Super VCAP block mapping and address configuration. Block 0
- * is assigned addresses 0 through 3071, block 1 is assigned
- * addresses 3072 though 6143, and so on.
- */
- for (idx = cfg->blockno; idx < cfg->blockno + cfg->blocks; ++idx) {
- spx5_wr(VCAP_SUPER_IDX_CORE_IDX_SET(idx), sparx5,
- VCAP_SUPER_IDX);
- spx5_wr(VCAP_SUPER_MAP_CORE_MAP_SET(cfg->map_id), sparx5,
- VCAP_SUPER_MAP);
- }
- admin->first_valid_addr = cfg->blockno * SUPER_VCAP_BLK_SIZE;
- admin->last_used_addr = admin->first_valid_addr +
- cfg->blocks * SUPER_VCAP_BLK_SIZE;
- admin->last_valid_addr = admin->last_used_addr - 1;
+ int idx, cores;
+
+ switch (admin->vtype) {
+ case VCAP_TYPE_IS0:
+ case VCAP_TYPE_IS2:
+ /* Super VCAP block mapping and address configuration. Block 0
+ * is assigned addresses 0 through 3071, block 1 is assigned
+ * addresses 3072 though 6143, and so on.
+ */
+ for (idx = cfg->blockno; idx < cfg->blockno + cfg->blocks;
+ ++idx) {
+ spx5_wr(VCAP_SUPER_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_SUPER_IDX);
+ spx5_wr(VCAP_SUPER_MAP_CORE_MAP_SET(cfg->map_id),
+ sparx5, VCAP_SUPER_MAP);
+ }
+ admin->first_valid_addr = cfg->blockno * SUPER_VCAP_BLK_SIZE;
+ admin->last_used_addr = admin->first_valid_addr +
+ cfg->blocks * SUPER_VCAP_BLK_SIZE;
+ admin->last_valid_addr = admin->last_used_addr - 1;
+ break;
+ case VCAP_TYPE_ES0:
+ admin->first_valid_addr = 0;
+ admin->last_used_addr = cfg->count;
+ admin->last_valid_addr = cfg->count - 1;
+ cores = spx5_rd(sparx5, VCAP_ES0_CORE_CNT);
+ for (idx = 0; idx < cores; ++idx) {
+ spx5_wr(VCAP_ES0_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_ES0_IDX);
+ spx5_wr(VCAP_ES0_MAP_CORE_MAP_SET(1), sparx5,
+ VCAP_ES0_MAP);
+ }
+ break;
+ case VCAP_TYPE_ES2:
+ admin->first_valid_addr = 0;
+ admin->last_used_addr = cfg->count;
+ admin->last_valid_addr = cfg->count - 1;
+ cores = spx5_rd(sparx5, VCAP_ES2_CORE_CNT);
+ for (idx = 0; idx < cores; ++idx) {
+ spx5_wr(VCAP_ES2_IDX_CORE_IDX_SET(idx), sparx5,
+ VCAP_ES2_IDX);
+ spx5_wr(VCAP_ES2_MAP_CORE_MAP_SET(1), sparx5,
+ VCAP_ES2_MAP);
+ }
+ break;
+ default:
+ sparx5_vcap_type_err(sparx5, admin, __func__);
+ break;
+ }
}
/* Allocate a vcap control and vcap instances and configure the system */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
index 0a0f2412c980..3260ab5e3a82 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.h
@@ -16,6 +16,15 @@
#include "vcap_api.h"
#include "vcap_api_client.h"
+#define SPARX5_VCAP_CID_IS0_L0 VCAP_CID_INGRESS_L0 /* IS0/CLM lookup 0 */
+#define SPARX5_VCAP_CID_IS0_L1 VCAP_CID_INGRESS_L1 /* IS0/CLM lookup 1 */
+#define SPARX5_VCAP_CID_IS0_L2 VCAP_CID_INGRESS_L2 /* IS0/CLM lookup 2 */
+#define SPARX5_VCAP_CID_IS0_L3 VCAP_CID_INGRESS_L3 /* IS0/CLM lookup 3 */
+#define SPARX5_VCAP_CID_IS0_L4 VCAP_CID_INGRESS_L4 /* IS0/CLM lookup 4 */
+#define SPARX5_VCAP_CID_IS0_L5 VCAP_CID_INGRESS_L5 /* IS0/CLM lookup 5 */
+#define SPARX5_VCAP_CID_IS0_MAX \
+ (VCAP_CID_INGRESS_L5 + VCAP_CID_LOOKUP_SIZE - 1) /* IS0/CLM Max */
+
#define SPARX5_VCAP_CID_IS2_L0 VCAP_CID_INGRESS_STAGE2_L0 /* IS2 lookup 0 */
#define SPARX5_VCAP_CID_IS2_L1 VCAP_CID_INGRESS_STAGE2_L1 /* IS2 lookup 1 */
#define SPARX5_VCAP_CID_IS2_L2 VCAP_CID_INGRESS_STAGE2_L2 /* IS2 lookup 2 */
@@ -23,6 +32,63 @@
#define SPARX5_VCAP_CID_IS2_MAX \
(VCAP_CID_INGRESS_STAGE2_L3 + VCAP_CID_LOOKUP_SIZE - 1) /* IS2 Max */
+#define SPARX5_VCAP_CID_ES0_L0 VCAP_CID_EGRESS_L0 /* ES0 lookup 0 */
+#define SPARX5_VCAP_CID_ES0_MAX (VCAP_CID_EGRESS_L1 - 1) /* ES0 Max */
+
+#define SPARX5_VCAP_CID_ES2_L0 VCAP_CID_EGRESS_STAGE2_L0 /* ES2 lookup 0 */
+#define SPARX5_VCAP_CID_ES2_L1 VCAP_CID_EGRESS_STAGE2_L1 /* ES2 lookup 1 */
+#define SPARX5_VCAP_CID_ES2_MAX \
+ (VCAP_CID_EGRESS_STAGE2_L1 + VCAP_CID_LOOKUP_SIZE - 1) /* ES2 Max */
+
+/* IS0 port keyset selection control */
+
+/* IS0 ethernet, IPv4, IPv6 traffic type keyset generation */
+enum vcap_is0_port_sel_etype {
+ VCAP_IS0_PS_ETYPE_DEFAULT, /* None or follow depending on class */
+ VCAP_IS0_PS_ETYPE_MLL,
+ VCAP_IS0_PS_ETYPE_SGL_MLBS,
+ VCAP_IS0_PS_ETYPE_DBL_MLBS,
+ VCAP_IS0_PS_ETYPE_TRI_MLBS,
+ VCAP_IS0_PS_ETYPE_TRI_VID,
+ VCAP_IS0_PS_ETYPE_LL_FULL,
+ VCAP_IS0_PS_ETYPE_NORMAL_SRC,
+ VCAP_IS0_PS_ETYPE_NORMAL_DST,
+ VCAP_IS0_PS_ETYPE_NORMAL_7TUPLE,
+ VCAP_IS0_PS_ETYPE_NORMAL_5TUPLE_IP4,
+ VCAP_IS0_PS_ETYPE_PURE_5TUPLE_IP4,
+ VCAP_IS0_PS_ETYPE_DBL_VID_IDX,
+ VCAP_IS0_PS_ETYPE_ETAG,
+ VCAP_IS0_PS_ETYPE_NO_LOOKUP,
+};
+
+/* IS0 MPLS traffic type keyset generation */
+enum vcap_is0_port_sel_mpls_uc_mc {
+ VCAP_IS0_PS_MPLS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MPLS_MLL,
+ VCAP_IS0_PS_MPLS_SGL_MLBS,
+ VCAP_IS0_PS_MPLS_DBL_MLBS,
+ VCAP_IS0_PS_MPLS_TRI_MLBS,
+ VCAP_IS0_PS_MPLS_TRI_VID,
+ VCAP_IS0_PS_MPLS_LL_FULL,
+ VCAP_IS0_PS_MPLS_NORMAL_SRC,
+ VCAP_IS0_PS_MPLS_NORMAL_DST,
+ VCAP_IS0_PS_MPLS_NORMAL_7TUPLE,
+ VCAP_IS0_PS_MPLS_NORMAL_5TUPLE_IP4,
+ VCAP_IS0_PS_MPLS_PURE_5TUPLE_IP4,
+ VCAP_IS0_PS_MPLS_DBL_VID_IDX,
+ VCAP_IS0_PS_MPLS_ETAG,
+ VCAP_IS0_PS_MPLS_NO_LOOKUP,
+};
+
+/* IS0 MBLS traffic type keyset generation */
+enum vcap_is0_port_sel_mlbs {
+ VCAP_IS0_PS_MLBS_FOLLOW_ETYPE,
+ VCAP_IS0_PS_MLBS_SGL_MLBS,
+ VCAP_IS0_PS_MLBS_DBL_MLBS,
+ VCAP_IS0_PS_MLBS_TRI_MLBS,
+ VCAP_IS0_PS_MLBS_NO_LOOKUP = 17,
+};
+
/* IS2 port keyset selection control */
/* IS2 non-ethernet traffic type keyset generation */
@@ -71,6 +137,57 @@ enum vcap_is2_port_sel_arp {
VCAP_IS2_PS_ARP_ARP,
};
+/* ES0 port keyset selection control */
+
+/* ES0 Egress port traffic type classification */
+enum vcap_es0_port_sel {
+ VCAP_ES0_PS_NORMAL_SELECTION,
+ VCAP_ES0_PS_FORCE_ISDX_LOOKUPS,
+ VCAP_ES0_PS_FORCE_VID_LOOKUPS,
+ VCAP_ES0_PS_RESERVED,
+};
+
+/* ES2 port keyset selection control */
+
+/* ES2 IPv4 traffic type keyset generation */
+enum vcap_es2_port_sel_ipv4 {
+ VCAP_ES2_PS_IPV4_MAC_ETYPE,
+ VCAP_ES2_PS_IPV4_IP_7TUPLE,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_VID,
+ VCAP_ES2_PS_IPV4_IP4_TCP_UDP_OTHER,
+ VCAP_ES2_PS_IPV4_IP4_VID,
+ VCAP_ES2_PS_IPV4_IP4_OTHER,
+};
+
+/* ES2 IPv6 traffic type keyset generation */
+enum vcap_es2_port_sel_ipv6 {
+ VCAP_ES2_PS_IPV6_MAC_ETYPE,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE_VID,
+ VCAP_ES2_PS_IPV6_IP_7TUPLE_STD,
+ VCAP_ES2_PS_IPV6_IP6_VID,
+ VCAP_ES2_PS_IPV6_IP6_STD,
+ VCAP_ES2_PS_IPV6_IP4_DOWNGRADE,
+};
+
+/* ES2 ARP traffic type keyset generation */
+enum vcap_es2_port_sel_arp {
+ VCAP_ES2_PS_ARP_MAC_ETYPE,
+ VCAP_ES2_PS_ARP_ARP,
+};
+
+/* Selects TPID for ES0 matching */
+enum SPX5_TPID_SEL {
+ SPX5_TPID_SEL_UNTAGGED,
+ SPX5_TPID_SEL_8100,
+ SPX5_TPID_SEL_UNUSED_0,
+ SPX5_TPID_SEL_UNUSED_1,
+ SPX5_TPID_SEL_88A8,
+ SPX5_TPID_SEL_TPIDCFG_1,
+ SPX5_TPID_SEL_TPIDCFG_2,
+ SPX5_TPID_SEL_TPIDCFG_3,
+};
+
/* Get the port keyset for the vcap lookup */
int sparx5_vcap_get_port_keyset(struct net_device *ndev,
struct vcap_admin *admin,
@@ -78,4 +195,7 @@ int sparx5_vcap_get_port_keyset(struct net_device *ndev,
u16 l3_proto,
struct vcap_keyset_list *kslist);
+/* Check if the ethertype is supported by the vcap port classification */
+bool sparx5_vcap_is_known_etype(struct vcap_admin *admin, u16 etype);
+
#endif /* __SPARX5_VCAP_IMPL_H__ */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
index 34f954bbf815..ac001ae59a38 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vlan.c
@@ -219,8 +219,8 @@ void sparx5_vlan_port_apply(struct sparx5 *sparx5,
spx5_wr(val, sparx5,
ANA_CL_VLAN_FILTER_CTRL(port->portno, 0));
- /* Egress configuration (REW_TAG_CFG): VLAN tag type to 8021Q */
- val = REW_TAG_CTRL_TAG_TPID_CFG_SET(0);
+ /* Egress configuration (REW_TAG_CFG): VLAN tag selected via IFH */
+ val = REW_TAG_CTRL_TAG_TPID_CFG_SET(5);
if (port->vlan_aware) {
if (port->vid)
/* Tag all frames except when VID == DEFAULT_VLAN */
diff --git a/drivers/net/ethernet/microchip/vcap/Makefile b/drivers/net/ethernet/microchip/vcap/Makefile
index 0adb8f5a8735..c86f20e6491f 100644
--- a/drivers/net/ethernet/microchip/vcap/Makefile
+++ b/drivers/net/ethernet/microchip/vcap/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_VCAP) += vcap.o
obj-$(CONFIG_VCAP_KUNIT_TEST) += vcap_model_kunit.o
vcap-$(CONFIG_DEBUG_FS) += vcap_api_debugfs.o
-vcap-y += vcap_api.o
+vcap-y += vcap_api.o vcap_tc.o
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
index 84de2aee4169..0844fcaeee68 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
@@ -1,16 +1,17 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
* Microchip VCAP API
*/
-/* This file is autogenerated by cml-utils 2022-10-13 10:04:41 +0200.
- * Commit ID: fd7cafd175899f0672c73afb3a30fc872500ae86
+/* This file is autogenerated by cml-utils 2023-02-10 11:15:56 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
*/
#ifndef __VCAP_AG_API__
#define __VCAP_AG_API__
enum vcap_type {
+ VCAP_TYPE_ES0,
VCAP_TYPE_ES2,
VCAP_TYPE_IS0,
VCAP_TYPE_IS2,
@@ -20,27 +21,25 @@ enum vcap_type {
/* Keyfieldset names with origin information */
enum vcap_keyfield_set {
VCAP_KFS_NO_VALUE, /* initial value */
- VCAP_KFS_ARP, /* sparx5 is2 X6, sparx5 es2 X6 */
+ VCAP_KFS_ARP, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
VCAP_KFS_ETAG, /* sparx5 is0 X2 */
- VCAP_KFS_IP4_OTHER, /* sparx5 is2 X6, sparx5 es2 X6 */
- VCAP_KFS_IP4_TCP_UDP, /* sparx5 is2 X6, sparx5 es2 X6 */
+ VCAP_KFS_IP4_OTHER, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
+ VCAP_KFS_IP4_TCP_UDP, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
VCAP_KFS_IP4_VID, /* sparx5 es2 X3 */
- VCAP_KFS_IP6_STD, /* sparx5 is2 X6 */
- VCAP_KFS_IP6_VID, /* sparx5 is2 X6, sparx5 es2 X6 */
+ VCAP_KFS_IP6_OTHER, /* lan966x is2 X4 */
+ VCAP_KFS_IP6_STD, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
+ VCAP_KFS_IP6_TCP_UDP, /* lan966x is2 X4 */
+ VCAP_KFS_IP6_VID, /* sparx5 es2 X6 */
VCAP_KFS_IP_7TUPLE, /* sparx5 is2 X12, sparx5 es2 X12 */
+ VCAP_KFS_ISDX, /* sparx5 es0 X1 */
VCAP_KFS_LL_FULL, /* sparx5 is0 X6 */
- VCAP_KFS_MAC_ETYPE, /* sparx5 is2 X6, sparx5 es2 X6 */
- VCAP_KFS_MLL, /* sparx5 is0 X3 */
- VCAP_KFS_NORMAL, /* sparx5 is0 X6 */
- VCAP_KFS_NORMAL_5TUPLE_IP4, /* sparx5 is0 X6 */
- VCAP_KFS_NORMAL_7TUPLE, /* sparx5 is0 X12 */
- VCAP_KFS_PURE_5TUPLE_IP4, /* sparx5 is0 X3 */
- VCAP_KFS_TRI_VID, /* sparx5 is0 X2 */
+ VCAP_KFS_MAC_ETYPE, /* sparx5 is2 X6, sparx5 es2 X6, lan966x is2 X2 */
VCAP_KFS_MAC_LLC, /* lan966x is2 X2 */
VCAP_KFS_MAC_SNAP, /* lan966x is2 X2 */
+ VCAP_KFS_NORMAL_5TUPLE_IP4, /* sparx5 is0 X6 */
+ VCAP_KFS_NORMAL_7TUPLE, /* sparx5 is0 X12 */
VCAP_KFS_OAM, /* lan966x is2 X2 */
- VCAP_KFS_IP6_TCP_UDP, /* lan966x is2 X4 */
- VCAP_KFS_IP6_OTHER, /* lan966x is2 X4 */
+ VCAP_KFS_PURE_5TUPLE_IP4, /* sparx5 is0 X3 */
VCAP_KFS_SMAC_SIP4, /* lan966x is2 X1 */
VCAP_KFS_SMAC_SIP6, /* lan966x is2 X2 */
};
@@ -78,6 +77,8 @@ enum vcap_keyfield_set {
* Third PCP in multiple vlan tags (not always available)
* VCAP_KF_8021Q_PCP_CLS: W3, sparx5: is2/es2, lan966x: is2
* Classified PCP
+ * VCAP_KF_8021Q_TPID: W3, sparx5: es0
+ * TPID for outer tag: 0: Customer TPID 1: Service TPID (88A8 or programmable)
* VCAP_KF_8021Q_TPID0: W3, sparx5: is0
* First TPIC in multiple vlan tags (outer tag or default port tag)
* VCAP_KF_8021Q_TPID1: W3, sparx5: is0
@@ -90,7 +91,8 @@ enum vcap_keyfield_set {
* Second VID in multiple vlan tags (inner tag)
* VCAP_KF_8021Q_VID2: W12, sparx5: is0
* Third VID in multiple vlan tags (not always available)
- * VCAP_KF_8021Q_VID_CLS: W13, sparx5: is2/es2, lan966x is2 W12
+ * VCAP_KF_8021Q_VID_CLS: sparx5 is2 W13, sparx5 es0 W13, sparx5 es2 W13,
+ * lan966x is2 W12
* Classified VID
* VCAP_KF_8021Q_VLAN_TAGGED_IS: W1, sparx5: is2/es2, lan966x: is2
* Sparx5: Set if frame was received with a VLAN tag, LAN966x: Set if frame has
@@ -104,7 +106,7 @@ enum vcap_keyfield_set {
* Set if hardware address is Ethernet
* VCAP_KF_ARP_LEN_OK_IS: W1, sparx5: is2/es2, lan966x: is2
* Set if hardware address length = 6 (Ethernet) and IP address length = 4 (IP).
- * VCAP_KF_ARP_OPCODE: W2, sparx5: is2/es2, lan966x: i2
+ * VCAP_KF_ARP_OPCODE: W2, sparx5: is2/es2, lan966x: is2
* ARP opcode
* VCAP_KF_ARP_OPCODE_UNKNOWN_IS: W1, sparx5: is2/es2, lan966x: is2
* Set if not one of the codes defined in VCAP_KF_ARP_OPCODE
@@ -114,25 +116,25 @@ enum vcap_keyfield_set {
* Sender Hardware Address = SMAC (ARP)
* VCAP_KF_ARP_TGT_MATCH_IS: W1, sparx5: is2/es2, lan966x: is2
* Target Hardware Address = SMAC (RARP)
- * VCAP_KF_COSID_CLS: W3, sparx5: es2
+ * VCAP_KF_COSID_CLS: W3, sparx5: es0/es2
* Class of service
- * VCAP_KF_DST_ENTRY: W1, sparx5: is0
- * Selects whether the frame’s destination or source information is used for
- * fields L2_SMAC and L3_IP4_SIP
* VCAP_KF_ES0_ISDX_KEY_ENA: W1, sparx5: es2
* The value taken from the IFH .FWD.ES0_ISDX_KEY_ENA
* VCAP_KF_ETYPE: W16, sparx5: is0/is2/es2, lan966x: is2
* Ethernet type
* VCAP_KF_ETYPE_LEN_IS: W1, sparx5: is0/is2/es2
* Set if frame has EtherType >= 0x600
- * VCAP_KF_ETYPE_MPLS: W2, sparx5: is0
- * Type of MPLS Ethertype (or not)
+ * VCAP_KF_HOST_MATCH: W1, lan966x: is2
+ * The action from the SMAC_SIP4 or SMAC_SIP6 lookups. Used for IP source
+ * guarding.
* VCAP_KF_IF_EGR_PORT_MASK: W32, sparx5: es2
* Egress port mask, one bit per port
* VCAP_KF_IF_EGR_PORT_MASK_RNG: W3, sparx5: es2
* Select which 32 port group is available in IF_EGR_PORT (or virtual ports or
* CPU queue)
- * VCAP_KF_IF_IGR_PORT: sparx5 is0 W7, sparx5 es2 W9
+ * VCAP_KF_IF_EGR_PORT_NO: W7, sparx5: es0
+ * Egress port number
+ * VCAP_KF_IF_IGR_PORT: sparx5 is0 W7, sparx5 es2 W9, lan966x is2 W4
* Sparx5: Logical ingress port number retrieved from
* ANA_CL::PORT_ID_CFG.LPORT_NUM or ERLEG, LAN966x: ingress port nunmber
* VCAP_KF_IF_IGR_PORT_MASK: sparx5 is0 W65, sparx5 is2 W32, sparx5 is2 W65,
@@ -152,45 +154,61 @@ enum vcap_keyfield_set {
* VCAP_KF_IP4_IS: W1, sparx5: is0/is2/es2, lan966x: is2
* Set if frame has EtherType = 0x800 and IP version = 4
* VCAP_KF_IP_MC_IS: W1, sparx5: is0
- * Set if frame is IPv4 frame and frame’s destination MAC address is an IPv4
- * multicast address (0x01005E0 /25). Set if frame is IPv6 frame and frame’s
+ * Set if frame is IPv4 frame and frame's destination MAC address is an IPv4
+ * multicast address (0x01005E0 /25). Set if frame is IPv6 frame and frame's
* destination MAC address is an IPv6 multicast address (0x3333/16).
* VCAP_KF_IP_PAYLOAD_5TUPLE: W32, sparx5: is0
* Payload bytes after IP header
* VCAP_KF_IP_SNAP_IS: W1, sparx5: is0
* Set if frame is IPv4, IPv6, or SNAP frame
- * VCAP_KF_ISDX_CLS: W12, sparx5: is2/es2
+ * VCAP_KF_ISDX_CLS: W12, sparx5: is2/es0/es2
* Classified ISDX
- * VCAP_KF_ISDX_GT0_IS: W1, sparx5: is2/es2, lan966x: is2
+ * VCAP_KF_ISDX_GT0_IS: W1, sparx5: is2/es0/es2, lan966x: is2
* Set if classified ISDX > 0
* VCAP_KF_L2_BC_IS: W1, sparx5: is0/is2/es2, lan966x: is2
- * Set if frame’s destination MAC address is the broadcast address
+ * Set if frame's destination MAC address is the broadcast address
* (FF-FF-FF-FF-FF-FF).
* VCAP_KF_L2_DMAC: W48, sparx5: is0/is2/es2, lan966x: is2
* Destination MAC address
+ * VCAP_KF_L2_FRM_TYPE: W4, lan966x: is2
+ * Frame subtype for specific EtherTypes (MRP, DLR)
* VCAP_KF_L2_FWD_IS: W1, sparx5: is2
* Set if the frame is allowed to be forwarded to front ports
- * VCAP_KF_L2_MC_IS: W1, sparx5: is0/is2/es2, lan9966x is2
- * Set if frame’s destination MAC address is a multicast address (bit 40 = 1).
+ * VCAP_KF_L2_LLC: W40, lan966x: is2
+ * LLC header and data after up to two VLAN tags and the type/length field
+ * VCAP_KF_L2_MC_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * Set if frame's destination MAC address is a multicast address (bit 40 = 1).
+ * VCAP_KF_L2_PAYLOAD0: W16, lan966x: is2
+ * Payload bytes 0-1 after the frame's EtherType
+ * VCAP_KF_L2_PAYLOAD1: W8, lan966x: is2
+ * Payload byte 4 after the frame's EtherType. This is specifically for PTP
+ * frames.
+ * VCAP_KF_L2_PAYLOAD2: W3, lan966x: is2
+ * Bits 7, 2, and 1 from payload byte 6 after the frame's EtherType. This is
+ * specifically for PTP frames.
* VCAP_KF_L2_PAYLOAD_ETYPE: W64, sparx5: is2/es2
* Byte 0-7 of L2 payload after Type/Len field and overloading for OAM
- * VCAP_KF_L2_SMAC: W48, sparx5: is0/is2/es2, lan966x is2
+ * VCAP_KF_L2_SMAC: W48, sparx5: is0/is2/es2, lan966x: is2
* Source MAC address
+ * VCAP_KF_L2_SNAP: W40, lan966x: is2
+ * SNAP header after LLC header (AA-AA-03)
* VCAP_KF_L3_DIP_EQ_SIP_IS: W1, sparx5: is2/es2, lan966x: is2
* Set if Src IP matches Dst IP address
- * VCAP_KF_L3_DMAC_DIP_MATCH: W1, sparx5: is2
- * Match found in DIP security lookup in ANA_L3
- * VCAP_KF_L3_DPL_CLS: W1, sparx5: es2
+ * VCAP_KF_L3_DPL_CLS: W1, sparx5: es0/es2
* The frames drop precedence level
* VCAP_KF_L3_DSCP: W6, sparx5: is0
- * Frame’s DSCP value
+ * Frame's DSCP value
* VCAP_KF_L3_DST_IS: W1, sparx5: is2
* Set if lookup is done for egress router leg
+ * VCAP_KF_L3_FRAGMENT: W1, lan966x: is2
+ * Set if IPv4 frame is fragmented
* VCAP_KF_L3_FRAGMENT_TYPE: W2, sparx5: is0/is2/es2
* L3 Fragmentation type (none, initial, suspicious, valid follow up)
* VCAP_KF_L3_FRAG_INVLD_L4_LEN: W1, sparx5: is0/is2
* Set if frame's L4 length is less than ANA_CL:COMMON:CLM_FRAGMENT_CFG.L4_MIN_L
* EN
+ * VCAP_KF_L3_FRAG_OFS_GT0: W1, lan966x: is2
+ * Set if IPv4 frame is fragmented and it is not the first fragment
* VCAP_KF_L3_IP4_DIP: W32, sparx5: is0/is2/es2, lan966x: is2
* Destination IPv4 Address
* VCAP_KF_L3_IP4_SIP: W32, sparx5: is0/is2/es2, lan966x: is2
@@ -205,36 +223,38 @@ enum vcap_keyfield_set {
* IPv4 frames: IP protocol. IPv6 frames: Next header, same as for IPV4
* VCAP_KF_L3_OPTIONS_IS: W1, sparx5: is0/is2/es2, lan966x: is2
* Set if IPv4 frame contains options (IP len > 5)
- * VCAP_KF_L3_PAYLOAD: sparx5 is2 W96, sparx5 is2 W40, sparx5 es2 W96,
- * lan966x is2 W56
+ * VCAP_KF_L3_PAYLOAD: sparx5 is2 W96, sparx5 is2 W40, sparx5 es2 W96, sparx5
+ * es2 W40, lan966x is2 W56
* Sparx5: Payload bytes after IP header. IPv4: IPv4 options are not parsed so
* payload is always taken 20 bytes after the start of the IPv4 header, LAN966x:
* Bytes 0-6 after IP header
* VCAP_KF_L3_RT_IS: W1, sparx5: is2/es2
* Set if frame has hit a router leg
- * VCAP_KF_L3_SMAC_SIP_MATCH: W1, sparx5: is2
- * Match found in SIP security lookup in ANA_L3
* VCAP_KF_L3_TOS: W8, sparx5: is2/es2, lan966x: is2
* Sparx5: Frame's IPv4/IPv6 DSCP and ECN fields, LAN966x: IP TOS field
* VCAP_KF_L3_TTL_GT0: W1, sparx5: is2/es2, lan966x: is2
* Set if IPv4 TTL / IPv6 hop limit is greater than 0
+ * VCAP_KF_L4_1588_DOM: W8, lan966x: is2
+ * PTP over UDP: domainNumber
+ * VCAP_KF_L4_1588_VER: W4, lan966x: is2
+ * PTP over UDP: version
* VCAP_KF_L4_ACK: W1, sparx5: is2/es2, lan966x: is2
* Sparx5 and LAN966x: TCP flag ACK, LAN966x only: PTP over UDP: flagField bit 2
* (unicastFlag)
* VCAP_KF_L4_DPORT: W16, sparx5: is2/es2, lan966x: is2
* Sparx5: TCP/UDP destination port. Overloading for IP_7TUPLE: Non-TCP/UDP IP
* frames: L4_DPORT = L3_IP_PROTO, LAN966x: TCP/UDP destination port
- * VCAP_KF_L4_FIN: W1, sparx5: is2/es2
+ * VCAP_KF_L4_FIN: W1, sparx5: is2/es2, lan966x: is2
* TCP flag FIN, LAN966x: TCP flag FIN, and for PTP over UDP: messageType bit 1
* VCAP_KF_L4_PAYLOAD: W64, sparx5: is2/es2
* Payload bytes after TCP/UDP header Overloading for IP_7TUPLE: Non TCP/UDP
- * frames: Payload bytes 0–7 after IP header. IPv4 options are not parsed so
+ * frames: Payload bytes 0-7 after IP header. IPv4 options are not parsed so
* payload is always taken 20 bytes after the start of the IPv4 header for non
* TCP/UDP IPv4 frames
* VCAP_KF_L4_PSH: W1, sparx5: is2/es2, lan966x: is2
* Sparx5: TCP flag PSH, LAN966x: TCP: TCP flag PSH. PTP over UDP: flagField bit
* 1 (twoStepFlag)
- * VCAP_KF_L4_RNG: sparx5 is0 W8, sparx5 is2 W16, sparx5 es2 W16, lan966x: is2
+ * VCAP_KF_L4_RNG: sparx5 is0 W8, sparx5 is2 W16, sparx5 es2 W16, lan966x is2 W8
* Range checker bitmask (one for each range checker). Input into range checkers
* is taken from classified results (VID, DSCP) and frame (SPORT, DPORT, ETYPE,
* outer VID, inner VID)
@@ -263,58 +283,35 @@ enum vcap_keyfield_set {
* Select the mode of the Generic Index
* VCAP_KF_LOOKUP_PAG: W8, sparx5: is2, lan966x: is2
* Classified Policy Association Group: chains rules from IS1/CLM to IS2
+ * VCAP_KF_MIRROR_PROBE: W2, sparx5: es2
+ * Identifies frame copies generated as a result of mirroring
* VCAP_KF_OAM_CCM_CNTS_EQ0: W1, sparx5: is2/es2, lan966x: is2
* Dual-ended loss measurement counters in CCM frames are all zero
- * VCAP_KF_OAM_MEL_FLAGS: W7, sparx5: is0, lan966x: is2
+ * VCAP_KF_OAM_DETECTED: W1, lan966x: is2
+ * This is missing in the datasheet, but present in the OAM keyset in XML
+ * VCAP_KF_OAM_FLAGS: W8, lan966x: is2
+ * Frame's OAM flags
+ * VCAP_KF_OAM_MEL_FLAGS: W7, lan966x: is2
* Encoding of MD level/MEG level (MEL)
- * VCAP_KF_OAM_Y1731_IS: W1, sparx5: is0/is2/es2, lan966x: is2
- * Set if frame’s EtherType = 0x8902
- * VCAP_KF_PROT_ACTIVE: W1, sparx5: es2
+ * VCAP_KF_OAM_MEPID: W16, lan966x: is2
+ * CCM frame's OAM MEP ID
+ * VCAP_KF_OAM_OPCODE: W8, lan966x: is2
+ * Frame's OAM opcode
+ * VCAP_KF_OAM_VER: W5, lan966x: is2
+ * Frame's OAM version
+ * VCAP_KF_OAM_Y1731_IS: W1, sparx5: is2/es2, lan966x: is2
+ * Set if frame's EtherType = 0x8902
+ * VCAP_KF_PROT_ACTIVE: W1, sparx5: es0/es2
* Protection is active
* VCAP_KF_TCP_IS: W1, sparx5: is0/is2/es2, lan966x: is2
* Set if frame is IPv4 TCP frame (IP protocol = 6) or IPv6 TCP frames (Next
* header = 6)
- * VCAP_KF_TCP_UDP_IS: W1, sparx5: is0/is2/es2, lan966x: is2
+ * VCAP_KF_TCP_UDP_IS: W1, sparx5: is0/is2/es2
* Set if frame is IPv4/IPv6 TCP or UDP frame (IP protocol/next header equals 6
* or 17)
* VCAP_KF_TYPE: sparx5 is0 W2, sparx5 is0 W1, sparx5 is2 W4, sparx5 is2 W2,
- * sparx5 es2 W3, lan966x: is2
+ * sparx5 es0 W1, sparx5 es2 W3, lan966x is2 W4, lan966x is2 W2
* Keyset type id - set by the API
- * VCAP_KF_HOST_MATCH: W1, lan966x: is2
- * The action from the SMAC_SIP4 or SMAC_SIP6 lookups. Used for IP source
- * guarding.
- * VCAP_KF_L2_FRM_TYPE: W4, lan966x: is2
- * Frame subtype for specific EtherTypes (MRP, DLR)
- * VCAP_KF_L2_PAYLOAD0: W16, lan966x: is2
- * Payload bytes 0-1 after the frame’s EtherType
- * VCAP_KF_L2_PAYLOAD1: W8, lan966x: is2
- * Payload byte 4 after the frame’s EtherType. This is specifically for PTP
- * frames.
- * VCAP_KF_L2_PAYLOAD2: W3, lan966x: is2
- * Bits 7, 2, and 1 from payload byte 6 after the frame’s EtherType. This is
- * specifically for PTP frames.
- * VCAP_KF_L2_LLC: W40, lan966x: is2
- * LLC header and data after up to two VLAN tags and the type/length field
- * VCAP_KF_L3_FRAGMENT: W1, lan966x: is2
- * Set if IPv4 frame is fragmented
- * VCAP_KF_L3_FRAG_OFS_GT0: W1, lan966x: is2
- * Set if IPv4 frame is fragmented and it is not the first fragment
- * VCAP_KF_L2_SNAP: W40, lan966x: is2
- * SNAP header after LLC header (AA-AA-03)
- * VCAP_KF_L4_1588_DOM: W8, lan966x: is2
- * PTP over UDP: domainNumber
- * VCAP_KF_L4_1588_VER: W4, lan966x: is2
- * PTP over UDP: version
- * VCAP_KF_OAM_MEPID: W16, lan966x: is2
- * CCM frame’s OAM MEP ID
- * VCAP_KF_OAM_OPCODE: W8, lan966x: is2
- * Frame’s OAM opcode
- * VCAP_KF_OAM_VER: W5, lan966x: is2
- * Frame’s OAM version
- * VCAP_KF_OAM_FLAGS: W8, lan966x: is2
- * Frame’s OAM flags
- * VCAP_KF_OAM_DETECTED: W1, lan966x: is2
- * This is missing in the datasheet, but present in the OAM keyset in XML
*/
/* Keyfield names */
@@ -334,6 +331,7 @@ enum vcap_key_field {
VCAP_KF_8021Q_PCP1,
VCAP_KF_8021Q_PCP2,
VCAP_KF_8021Q_PCP_CLS,
+ VCAP_KF_8021Q_TPID,
VCAP_KF_8021Q_TPID0,
VCAP_KF_8021Q_TPID1,
VCAP_KF_8021Q_TPID2,
@@ -352,13 +350,13 @@ enum vcap_key_field {
VCAP_KF_ARP_SENDER_MATCH_IS,
VCAP_KF_ARP_TGT_MATCH_IS,
VCAP_KF_COSID_CLS,
- VCAP_KF_DST_ENTRY,
VCAP_KF_ES0_ISDX_KEY_ENA,
VCAP_KF_ETYPE,
VCAP_KF_ETYPE_LEN_IS,
- VCAP_KF_ETYPE_MPLS,
+ VCAP_KF_HOST_MATCH,
VCAP_KF_IF_EGR_PORT_MASK,
VCAP_KF_IF_EGR_PORT_MASK_RNG,
+ VCAP_KF_IF_EGR_PORT_NO,
VCAP_KF_IF_IGR_PORT,
VCAP_KF_IF_IGR_PORT_MASK,
VCAP_KF_IF_IGR_PORT_MASK_L3,
@@ -373,17 +371,24 @@ enum vcap_key_field {
VCAP_KF_ISDX_GT0_IS,
VCAP_KF_L2_BC_IS,
VCAP_KF_L2_DMAC,
+ VCAP_KF_L2_FRM_TYPE,
VCAP_KF_L2_FWD_IS,
+ VCAP_KF_L2_LLC,
VCAP_KF_L2_MC_IS,
+ VCAP_KF_L2_PAYLOAD0,
+ VCAP_KF_L2_PAYLOAD1,
+ VCAP_KF_L2_PAYLOAD2,
VCAP_KF_L2_PAYLOAD_ETYPE,
VCAP_KF_L2_SMAC,
+ VCAP_KF_L2_SNAP,
VCAP_KF_L3_DIP_EQ_SIP_IS,
- VCAP_KF_L3_DMAC_DIP_MATCH,
VCAP_KF_L3_DPL_CLS,
VCAP_KF_L3_DSCP,
VCAP_KF_L3_DST_IS,
+ VCAP_KF_L3_FRAGMENT,
VCAP_KF_L3_FRAGMENT_TYPE,
VCAP_KF_L3_FRAG_INVLD_L4_LEN,
+ VCAP_KF_L3_FRAG_OFS_GT0,
VCAP_KF_L3_IP4_DIP,
VCAP_KF_L3_IP4_SIP,
VCAP_KF_L3_IP6_DIP,
@@ -392,9 +397,10 @@ enum vcap_key_field {
VCAP_KF_L3_OPTIONS_IS,
VCAP_KF_L3_PAYLOAD,
VCAP_KF_L3_RT_IS,
- VCAP_KF_L3_SMAC_SIP_MATCH,
VCAP_KF_L3_TOS,
VCAP_KF_L3_TTL_GT0,
+ VCAP_KF_L4_1588_DOM,
+ VCAP_KF_L4_1588_VER,
VCAP_KF_L4_ACK,
VCAP_KF_L4_DPORT,
VCAP_KF_L4_FIN,
@@ -411,30 +417,19 @@ enum vcap_key_field {
VCAP_KF_LOOKUP_GEN_IDX,
VCAP_KF_LOOKUP_GEN_IDX_SEL,
VCAP_KF_LOOKUP_PAG,
- VCAP_KF_MIRROR_ENA,
+ VCAP_KF_MIRROR_PROBE,
VCAP_KF_OAM_CCM_CNTS_EQ0,
+ VCAP_KF_OAM_DETECTED,
+ VCAP_KF_OAM_FLAGS,
VCAP_KF_OAM_MEL_FLAGS,
+ VCAP_KF_OAM_MEPID,
+ VCAP_KF_OAM_OPCODE,
+ VCAP_KF_OAM_VER,
VCAP_KF_OAM_Y1731_IS,
VCAP_KF_PROT_ACTIVE,
VCAP_KF_TCP_IS,
VCAP_KF_TCP_UDP_IS,
VCAP_KF_TYPE,
- VCAP_KF_HOST_MATCH,
- VCAP_KF_L2_FRM_TYPE,
- VCAP_KF_L2_PAYLOAD0,
- VCAP_KF_L2_PAYLOAD1,
- VCAP_KF_L2_PAYLOAD2,
- VCAP_KF_L2_LLC,
- VCAP_KF_L3_FRAGMENT,
- VCAP_KF_L3_FRAG_OFS_GT0,
- VCAP_KF_L2_SNAP,
- VCAP_KF_L4_1588_DOM,
- VCAP_KF_L4_1588_VER,
- VCAP_KF_OAM_MEPID,
- VCAP_KF_OAM_OPCODE,
- VCAP_KF_OAM_VER,
- VCAP_KF_OAM_FLAGS,
- VCAP_KF_OAM_DETECTED,
};
/* Actionset names with origin information */
@@ -443,14 +438,17 @@ enum vcap_actionfield_set {
VCAP_AFS_BASE_TYPE, /* sparx5 is2 X3, sparx5 es2 X3, lan966x is2 X2 */
VCAP_AFS_CLASSIFICATION, /* sparx5 is0 X2 */
VCAP_AFS_CLASS_REDUCED, /* sparx5 is0 X1 */
+ VCAP_AFS_ES0, /* sparx5 es0 X1 */
VCAP_AFS_FULL, /* sparx5 is0 X3 */
- VCAP_AFS_MLBS, /* sparx5 is0 X2 */
- VCAP_AFS_MLBS_REDUCED, /* sparx5 is0 X1 */
- VCAP_AFS_SMAC_SIP, /* lan966x is2 x1 */
+ VCAP_AFS_SMAC_SIP, /* lan966x is2 X1 */
};
/* List of actionfields with description
*
+ * VCAP_AF_ACL_ID: W6, lan966x: is2
+ * Logical ID for the entry. This ID is extracted together with the frame in the
+ * CPU extraction header. Only applicable to actions with CPU_COPY_ENA or
+ * HIT_ME_ONCE set.
* VCAP_AF_CLS_VID_SEL: W3, sparx5: is0
* Controls the classified VID: 0: VID_NONE: No action. 1: VID_ADD: New VID =
* old VID + VID_VAL. 2: VID_REPLACE: New VID = VID_VAL. 3: VID_FIRST_TAG: New
@@ -468,8 +466,16 @@ enum vcap_actionfield_set {
* VCAP_AF_CPU_COPY_ENA: W1, sparx5: is2/es2, lan966x: is2
* Setting this bit to 1 causes all frames that hit this action to be copied to
* the CPU extraction queue specified in CPU_QUEUE_NUM.
+ * VCAP_AF_CPU_QU: W3, sparx5: es0
+ * CPU extraction queue. Used when FWD_SEL >0 and PIPELINE_ACT = XTR.
* VCAP_AF_CPU_QUEUE_NUM: W3, sparx5: is2/es2, lan966x: is2
* CPU queue number. Used when CPU_COPY_ENA is set.
+ * VCAP_AF_DEI_A_VAL: W1, sparx5: es0
+ * DEI used in ES0 tag A. See TAG_A_DEI_SEL.
+ * VCAP_AF_DEI_B_VAL: W1, sparx5: es0
+ * DEI used in ES0 tag B. See TAG_B_DEI_SEL.
+ * VCAP_AF_DEI_C_VAL: W1, sparx5: es0
+ * DEI used in ES0 tag C. See TAG_C_DEI_SEL.
* VCAP_AF_DEI_ENA: W1, sparx5: is0
* If set, use DEI_VAL as classified DEI value. Otherwise, DEI from basic
* classification is used
@@ -483,19 +489,38 @@ enum vcap_actionfield_set {
* VCAP_AF_DSCP_ENA: W1, sparx5: is0
* If set, use DSCP_VAL as classified DSCP value. Otherwise, DSCP value from
* basic classification is used.
- * VCAP_AF_DSCP_VAL: W6, sparx5: is0
+ * VCAP_AF_DSCP_SEL: W3, sparx5: es0
+ * Selects source for DSCP. 0: Controlled by port configuration and IFH. 1:
+ * Classified DSCP via IFH. 2: DSCP_VAL. 3: Reserved. 4: Mapped using mapping
+ * table 0, otherwise use DSCP_VAL. 5: Mapped using mapping table 1, otherwise
+ * use mapping table 0. 6: Mapped using mapping table 2, otherwise use DSCP_VAL.
+ * 7: Mapped using mapping table 3, otherwise use mapping table 2
+ * VCAP_AF_DSCP_VAL: W6, sparx5: is0/es0
* See DSCP_ENA.
* VCAP_AF_ES2_REW_CMD: W3, sparx5: es2
* Command forwarded to REW: 0: No action. 1: SWAP MAC addresses. 2: Do L2CP
* DMAC translation when entering or leaving a tunnel.
+ * VCAP_AF_ESDX: W13, sparx5: es0
+ * Egress counter index. Used to index egress counter set as defined in
+ * REW::STAT_CFG.
+ * VCAP_AF_FWD_KILL_ENA: W1, lan966x: is2
+ * Setting this bit to 1 denies forwarding of the frame forwarding to any front
+ * port. The frame can still be copied to the CPU by other actions.
* VCAP_AF_FWD_MODE: W2, sparx5: es2
* Forward selector: 0: Forward. 1: Discard. 2: Redirect. 3: Copy.
+ * VCAP_AF_FWD_SEL: W2, sparx5: es0
+ * ES0 Forward selector. 0: No action. 1: Copy to loopback interface. 2:
+ * Redirect to loopback interface. 3: Discard
* VCAP_AF_HIT_ME_ONCE: W1, sparx5: is2/es2, lan966x: is2
* Setting this bit to 1 causes the first frame that hits this action where the
* HIT_CNT counter is zero to be copied to the CPU extraction queue specified in
* CPU_QUEUE_NUM. The HIT_CNT counter is then incremented and any frames that
* hit this action later are not copied to the CPU. To re-enable the HIT_ME_ONCE
* functionality, the HIT_CNT counter must be cleared.
+ * VCAP_AF_HOST_MATCH: W1, lan966x: is2
+ * Used for IP source guarding. If set, it signals that the host is a valid (for
+ * instance a valid combination of source MAC address and source IP address).
+ * HOST_MATCH is input to the IS2 keys.
* VCAP_AF_IGNORE_PIPELINE_CTRL: W1, sparx5: is2/es2
* Ignore ingress pipeline control. This enforces the use of the VCAP IS2 action
* even when the pipeline control has terminated the frame before VCAP IS2.
@@ -504,8 +529,13 @@ enum vcap_actionfield_set {
* VCAP_AF_ISDX_ADD_REPLACE_SEL: W1, sparx5: is0
* Controls the classified ISDX. 0: New ISDX = old ISDX + ISDX_VAL. 1: New ISDX
* = ISDX_VAL.
+ * VCAP_AF_ISDX_ENA: W1, lan966x: is2
+ * Setting this bit to 1 causes the classified ISDX to be set to the value of
+ * POLICE_IDX[8:0].
* VCAP_AF_ISDX_VAL: W12, sparx5: is0
* See isdx_add_replace_sel
+ * VCAP_AF_LOOP_ENA: W1, sparx5: es0
+ * 0: Forward based on PIPELINE_PT and FWD_SEL
* VCAP_AF_LRN_DIS: W1, sparx5: is2, lan966x: is2
* Setting this bit to 1 disables learning of frames hitting this action.
* VCAP_AF_MAP_IDX: W9, sparx5: is0
@@ -521,136 +551,190 @@ enum vcap_actionfield_set {
* are applied to. 0: No changes to the QoS Mapping Table lookup. 1: Update key
* type and index for QoS Mapping Table lookup #0. 2: Update key type and index
* for QoS Mapping Table lookup #1. 3: Reserved.
- * VCAP_AF_MASK_MODE: W3, sparx5: is0/is2, lan966x is2 W2
+ * VCAP_AF_MASK_MODE: sparx5 is0 W3, sparx5 is2 W3, lan966x is2 W2
* Controls the PORT_MASK use. Sparx5: 0: OR_DSTMASK, 1: AND_VLANMASK, 2:
* REPLACE_PGID, 3: REPLACE_ALL, 4: REDIR_PGID, 5: OR_PGID_MASK, 6: VSTAX, 7:
* Not applicable. LAN966X: 0: No action, 1: Permit/deny (AND), 2: Policy
* forwarding (DMAC lookup), 3: Redirect. The CPU port is untouched by
* MASK_MODE.
- * VCAP_AF_MATCH_ID: W16, sparx5: is0/is2
+ * VCAP_AF_MATCH_ID: W16, sparx5: is2
* Logical ID for the entry. The MATCH_ID is extracted together with the frame
* if the frame is forwarded to the CPU (CPU_COPY_ENA). The result is placed in
* IFH.CL_RSLT.
- * VCAP_AF_MATCH_ID_MASK: W16, sparx5: is0/is2
+ * VCAP_AF_MATCH_ID_MASK: W16, sparx5: is2
* Mask used by MATCH_ID.
+ * VCAP_AF_MIRROR_ENA: W1, lan966x: is2
+ * Setting this bit to 1 causes frames to be mirrored to the mirror target port
+ * (ANA::MIRRPORPORTS).
* VCAP_AF_MIRROR_PROBE: W2, sparx5: is2
* Mirroring performed according to configuration of a mirror probe. 0: No
* mirroring. 1: Mirror probe 0. 2: Mirror probe 1. 3: Mirror probe 2
* VCAP_AF_MIRROR_PROBE_ID: W2, sparx5: es2
* Signals a mirror probe to be placed in the IFH. Only possible when FWD_MODE
- * is copy. 0: No mirroring. 1–3: Use mirror probe 0-2.
+ * is copy. 0: No mirroring. 1-3: Use mirror probe 0-2.
* VCAP_AF_NXT_IDX: W12, sparx5: is0
* Index used as part of key (field G_IDX) in the next lookup.
* VCAP_AF_NXT_IDX_CTRL: W3, sparx5: is0
* Controls the generation of the G_IDX used in the VCAP CLM next lookup
* VCAP_AF_PAG_OVERRIDE_MASK: W8, sparx5: is0
- * Bits set in this mask will override PAG_VAL from port profile.  New PAG =
- * (PAG (input) AND ~PAG_OVERRIDE_MASK) OR (PAG_VAL AND PAG_OVERRIDE_MASK)
+ * Bits set in this mask will override PAG_VAL from port profile. New PAG = (PAG
+ * (input) AND ~PAG_OVERRIDE_MASK) OR (PAG_VAL AND PAG_OVERRIDE_MASK)
* VCAP_AF_PAG_VAL: W8, sparx5: is0
* See PAG_OVERRIDE_MASK.
+ * VCAP_AF_PCP_A_VAL: W3, sparx5: es0
+ * PCP used in ES0 tag A. See TAG_A_PCP_SEL.
+ * VCAP_AF_PCP_B_VAL: W3, sparx5: es0
+ * PCP used in ES0 tag B. See TAG_B_PCP_SEL.
+ * VCAP_AF_PCP_C_VAL: W3, sparx5: es0
+ * PCP used in ES0 tag C. See TAG_C_PCP_SEL.
* VCAP_AF_PCP_ENA: W1, sparx5: is0
* If set, use PCP_VAL as classified PCP value. Otherwise, PCP from basic
* classification is used.
* VCAP_AF_PCP_VAL: W3, sparx5: is0
* See PCP_ENA.
- * VCAP_AF_PIPELINE_FORCE_ENA: sparx5 is0 W2, sparx5 is2 W1
+ * VCAP_AF_PIPELINE_ACT: W1, sparx5: es0
+ * Pipeline action when FWD_SEL > 0. 0: XTR. CPU_QU selects CPU extraction queue
+ * 1: LBK_ASM.
+ * VCAP_AF_PIPELINE_FORCE_ENA: W1, sparx5: is2
* If set, use PIPELINE_PT unconditionally and set PIPELINE_ACT = NONE if
* PIPELINE_PT == NONE. Overrules previous settings of pipeline point.
- * VCAP_AF_PIPELINE_PT: W5, sparx5: is0/is2
+ * VCAP_AF_PIPELINE_PT: sparx5 is2 W5, sparx5 es0 W2
* Pipeline point used if PIPELINE_FORCE_ENA is set
* VCAP_AF_POLICE_ENA: W1, sparx5: is2/es2, lan966x: is2
* Setting this bit to 1 causes frames that hit this action to be policed by the
* ACL policer specified in POLICE_IDX. Only applies to the first lookup.
- * VCAP_AF_POLICE_IDX: W6, sparx5: is2/es2, lan966x: is2 W9
+ * VCAP_AF_POLICE_IDX: sparx5 is2 W6, sparx5 es2 W6, lan966x is2 W9
* Selects VCAP policer used when policing frames (POLICE_ENA)
* VCAP_AF_POLICE_REMARK: W1, sparx5: es2
* If set, frames exceeding policer rates are marked as yellow but not
* discarded.
+ * VCAP_AF_POLICE_VCAP_ONLY: W1, lan966x: is2
+ * Disable policing from QoS, and port policers. Only the VCAP policer selected
+ * by POLICE_IDX is active. Only applies to the second lookup.
+ * VCAP_AF_POP_VAL: W2, sparx5: es0
+ * Controls popping of Q-tags. The final number of Q-tags popped is calculated
+ * as shown in section 4.28.7.2 VLAN Pop Decision.
* VCAP_AF_PORT_MASK: sparx5 is0 W65, sparx5 is2 W68, lan966x is2 W8
* Port mask applied to the forwarding decision based on MASK_MODE.
+ * VCAP_AF_PUSH_CUSTOMER_TAG: W2, sparx5: es0
+ * Selects tag C mode: 0: Do not push tag C. 1: Push tag C if
+ * IFH.VSTAX.TAG.WAS_TAGGED = 1. 2: Push tag C if IFH.VSTAX.TAG.WAS_TAGGED = 0.
+ * 3: Push tag C if UNTAG_VID_ENA = 0 or (C-TAG.VID ! = VID_C_VAL).
+ * VCAP_AF_PUSH_INNER_TAG: W1, sparx5: es0
+ * Controls inner tagging. 0: Do not push ES0 tag B as inner tag. 1: Push ES0
+ * tag B as inner tag.
+ * VCAP_AF_PUSH_OUTER_TAG: W2, sparx5: es0
+ * Controls outer tagging. 0: No ES0 tag A: Port tag is allowed if enabled on
+ * port. 1: ES0 tag A: Push ES0 tag A. No port tag. 2: Force port tag: Always
+ * push port tag. No ES0 tag A. 3: Force untag: Never push port tag or ES0 tag
+ * A.
* VCAP_AF_QOS_ENA: W1, sparx5: is0
* If set, use QOS_VAL as classified QoS class. Otherwise, QoS class from basic
* classification is used.
* VCAP_AF_QOS_VAL: W3, sparx5: is0
* See QOS_ENA.
+ * VCAP_AF_REW_OP: W16, lan966x: is2
+ * Rewriter operation command.
* VCAP_AF_RT_DIS: W1, sparx5: is2
* If set, routing is disallowed. Only applies when IS_INNER_ACL is 0. See also
* IGR_ACL_ENA, EGR_ACL_ENA, and RLEG_STAT_IDX.
+ * VCAP_AF_SWAP_MACS_ENA: W1, sparx5: es0
+ * This setting is only active when FWD_SEL = 1 or FWD_SEL = 2 and PIPELINE_ACT
+ * = LBK_ASM. 0: No action. 1: Swap MACs and clear bit 40 in new SMAC.
+ * VCAP_AF_TAG_A_DEI_SEL: W3, sparx5: es0
+ * Selects PCP for ES0 tag A. 0: Classified DEI. 1: DEI_A_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: DP.
+ * VCAP_AF_TAG_A_PCP_SEL: W3, sparx5: es0
+ * Selects PCP for ES0 tag A. 0: Classified PCP. 1: PCP_A_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: QoS class.
+ * VCAP_AF_TAG_A_TPID_SEL: W3, sparx5: es0
+ * Selects TPID for ES0 tag A: 0: 0x8100. 1: 0x88A8. 2: Custom
+ * (REW:PORT:PORT_VLAN_CFG.PORT_TPID). 3: If IFH.TAG_TYPE = 0 then 0x8100 else
+ * custom.
+ * VCAP_AF_TAG_A_VID_SEL: W2, sparx5: es0
+ * Selects VID for ES0 tag A. 0: Classified VID + VID_A_VAL. 1: VID_A_VAL.
+ * VCAP_AF_TAG_B_DEI_SEL: W3, sparx5: es0
+ * Selects PCP for ES0 tag B. 0: Classified DEI. 1: DEI_B_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: DP.
+ * VCAP_AF_TAG_B_PCP_SEL: W3, sparx5: es0
+ * Selects PCP for ES0 tag B. 0: Classified PCP. 1: PCP_B_VAL. 2: DP and QoS
+ * mapped to PCP (per port table). 3: QoS class.
+ * VCAP_AF_TAG_B_TPID_SEL: W3, sparx5: es0
+ * Selects TPID for ES0 tag B. 0: 0x8100. 1: 0x88A8. 2: Custom
+ * (REW:PORT:PORT_VLAN_CFG.PORT_TPID). 3: If IFH.TAG_TYPE = 0 then 0x8100 else
+ * custom.
+ * VCAP_AF_TAG_B_VID_SEL: W2, sparx5: es0
+ * Selects VID for ES0 tag B. 0: Classified VID + VID_B_VAL. 1: VID_B_VAL.
+ * VCAP_AF_TAG_C_DEI_SEL: W3, sparx5: es0
+ * Selects DEI source for ES0 tag C. 0: Classified DEI. 1: DEI_C_VAL. 2:
+ * REW::DP_MAP.DP [IFH.VSTAX.QOS.DP]. 3: DEI of popped VLAN tag if available
+ * (IFH.VSTAX.TAG.WAS_TAGGED = 1 and tot_pop_cnt>0) else DEI_C_VAL. 4: Mapped
+ * using mapping table 0, otherwise use DEI_C_VAL. 5: Mapped using mapping table
+ * 1, otherwise use mapping table 0. 6: Mapped using mapping table 2, otherwise
+ * use DEI_C_VAL. 7: Mapped using mapping table 3, otherwise use mapping table
+ * 2.
+ * VCAP_AF_TAG_C_PCP_SEL: W3, sparx5: es0
+ * Selects PCP source for ES0 tag C. 0: Classified PCP. 1: PCP_C_VAL. 2:
+ * Reserved. 3: PCP of popped VLAN tag if available (IFH.VSTAX.TAG.WAS_TAGGED=1
+ * and tot_pop_cnt>0) else PCP_C_VAL. 4: Mapped using mapping table 0, otherwise
+ * use PCP_C_VAL. 5: Mapped using mapping table 1, otherwise use mapping table
+ * 0. 6: Mapped using mapping table 2, otherwise use PCP_C_VAL. 7: Mapped using
+ * mapping table 3, otherwise use mapping table 2.
+ * VCAP_AF_TAG_C_TPID_SEL: W3, sparx5: es0
+ * Selects TPID for ES0 tag C. 0: 0x8100. 1: 0x88A8. 2: Custom 1. 3: Custom 2.
+ * 4: Custom 3. 5: See TAG_A_TPID_SEL.
+ * VCAP_AF_TAG_C_VID_SEL: W2, sparx5: es0
+ * Selects VID for ES0 tag C. The resulting VID is termed C-TAG.VID. 0:
+ * Classified VID. 1: VID_C_VAL. 2: IFH.ENCAP.GVID. 3: Reserved.
* VCAP_AF_TYPE: W1, sparx5: is0
* Actionset type id - Set by the API
+ * VCAP_AF_UNTAG_VID_ENA: W1, sparx5: es0
+ * Controls insertion of tag C. Untag or insert mode can be selected. See
+ * PUSH_CUSTOMER_TAG.
+ * VCAP_AF_VID_A_VAL: W12, sparx5: es0
+ * VID used in ES0 tag A. See TAG_A_VID_SEL.
+ * VCAP_AF_VID_B_VAL: W12, sparx5: es0
+ * VID used in ES0 tag B. See TAG_B_VID_SEL.
+ * VCAP_AF_VID_C_VAL: W12, sparx5: es0
+ * VID used in ES0 tag C. See TAG_C_VID_SEL.
* VCAP_AF_VID_VAL: W13, sparx5: is0
* New VID Value
- * VCAP_AF_MIRROR_ENA: W1, lan966x: is2
- * Setting this bit to 1 causes frames to be mirrored to the mirror target
- * port (ANA::MIRRPORPORTS).
- * VCAP_AF_POLICE_VCAP_ONLY: W1, lan966x: is2
- * Disable policing from QoS, and port policers. Only the VCAP policer
- * selected by POLICE_IDX is active. Only applies to the second lookup.
- * VCAP_AF_REW_OP: W16, lan966x: is2
- * Rewriter operation command.
- * VCAP_AF_ISDX_ENA: W1, lan966x: is2
- * Setting this bit to 1 causes the classified ISDX to be set to the value of
- * POLICE_IDX[8:0].
- * VCAP_AF_ACL_ID: W6, lan966x: is2
- * Logical ID for the entry. This ID is extracted together with the frame in
- * the CPU extraction header. Only applicable to actions with CPU_COPY_ENA or
- * HIT_ME_ONCE set.
- * VCAP_AF_FWD_KILL_ENA: W1, lan966x: is2
- * Setting this bit to 1 denies forwarding of the frame forwarding to any
- * front port. The frame can still be copied to the CPU by other actions.
- * VCAP_AF_HOST_MATCH: W1, lan966x: is2
- * Used for IP source guarding. If set, it signals that the host is a valid
- * (for instance a valid combination of source MAC address and source IP
- * address). HOST_MATCH is input to the IS2 keys.
*/
/* Actionfield names */
enum vcap_action_field {
VCAP_AF_NO_VALUE, /* initial value */
- VCAP_AF_ACL_MAC,
- VCAP_AF_ACL_RT_MODE,
+ VCAP_AF_ACL_ID,
VCAP_AF_CLS_VID_SEL,
VCAP_AF_CNT_ID,
VCAP_AF_COPY_PORT_NUM,
VCAP_AF_COPY_QUEUE_NUM,
- VCAP_AF_COSID_ENA,
- VCAP_AF_COSID_VAL,
VCAP_AF_CPU_COPY_ENA,
- VCAP_AF_CPU_DIS,
- VCAP_AF_CPU_ENA,
- VCAP_AF_CPU_Q,
+ VCAP_AF_CPU_QU,
VCAP_AF_CPU_QUEUE_NUM,
- VCAP_AF_CUSTOM_ACE_ENA,
- VCAP_AF_CUSTOM_ACE_OFFSET,
+ VCAP_AF_DEI_A_VAL,
+ VCAP_AF_DEI_B_VAL,
+ VCAP_AF_DEI_C_VAL,
VCAP_AF_DEI_ENA,
VCAP_AF_DEI_VAL,
- VCAP_AF_DLB_OFFSET,
- VCAP_AF_DMAC_OFFSET_ENA,
VCAP_AF_DP_ENA,
VCAP_AF_DP_VAL,
VCAP_AF_DSCP_ENA,
+ VCAP_AF_DSCP_SEL,
VCAP_AF_DSCP_VAL,
- VCAP_AF_EGR_ACL_ENA,
VCAP_AF_ES2_REW_CMD,
- VCAP_AF_FWD_DIS,
+ VCAP_AF_ESDX,
+ VCAP_AF_FWD_KILL_ENA,
VCAP_AF_FWD_MODE,
- VCAP_AF_FWD_TYPE,
- VCAP_AF_GVID_ADD_REPLACE_SEL,
+ VCAP_AF_FWD_SEL,
VCAP_AF_HIT_ME_ONCE,
+ VCAP_AF_HOST_MATCH,
VCAP_AF_IGNORE_PIPELINE_CTRL,
- VCAP_AF_IGR_ACL_ENA,
- VCAP_AF_INJ_MASQ_ENA,
- VCAP_AF_INJ_MASQ_LPORT,
- VCAP_AF_INJ_MASQ_PORT,
VCAP_AF_INTR_ENA,
VCAP_AF_ISDX_ADD_REPLACE_SEL,
+ VCAP_AF_ISDX_ENA,
VCAP_AF_ISDX_VAL,
- VCAP_AF_IS_INNER_ACL,
- VCAP_AF_L3_MAC_UPDATE_DIS,
- VCAP_AF_LOG_MSG_INTERVAL,
- VCAP_AF_LPM_AFFIX_ENA,
- VCAP_AF_LPM_AFFIX_VAL,
- VCAP_AF_LPORT_ENA,
+ VCAP_AF_LOOP_ENA,
VCAP_AF_LRN_DIS,
VCAP_AF_MAP_IDX,
VCAP_AF_MAP_KEY,
@@ -658,78 +742,53 @@ enum vcap_action_field {
VCAP_AF_MASK_MODE,
VCAP_AF_MATCH_ID,
VCAP_AF_MATCH_ID_MASK,
- VCAP_AF_MIP_SEL,
+ VCAP_AF_MIRROR_ENA,
VCAP_AF_MIRROR_PROBE,
VCAP_AF_MIRROR_PROBE_ID,
- VCAP_AF_MPLS_IP_CTRL_ENA,
- VCAP_AF_MPLS_MEP_ENA,
- VCAP_AF_MPLS_MIP_ENA,
- VCAP_AF_MPLS_OAM_FLAVOR,
- VCAP_AF_MPLS_OAM_TYPE,
- VCAP_AF_NUM_VLD_LABELS,
VCAP_AF_NXT_IDX,
VCAP_AF_NXT_IDX_CTRL,
- VCAP_AF_NXT_KEY_TYPE,
- VCAP_AF_NXT_NORMALIZE,
- VCAP_AF_NXT_NORM_W16_OFFSET,
- VCAP_AF_NXT_NORM_W32_OFFSET,
- VCAP_AF_NXT_OFFSET_FROM_TYPE,
- VCAP_AF_NXT_TYPE_AFTER_OFFSET,
- VCAP_AF_OAM_IP_BFD_ENA,
- VCAP_AF_OAM_TWAMP_ENA,
- VCAP_AF_OAM_Y1731_SEL,
VCAP_AF_PAG_OVERRIDE_MASK,
VCAP_AF_PAG_VAL,
+ VCAP_AF_PCP_A_VAL,
+ VCAP_AF_PCP_B_VAL,
+ VCAP_AF_PCP_C_VAL,
VCAP_AF_PCP_ENA,
VCAP_AF_PCP_VAL,
- VCAP_AF_PIPELINE_ACT_SEL,
+ VCAP_AF_PIPELINE_ACT,
VCAP_AF_PIPELINE_FORCE_ENA,
VCAP_AF_PIPELINE_PT,
- VCAP_AF_PIPELINE_PT_REDUCED,
VCAP_AF_POLICE_ENA,
VCAP_AF_POLICE_IDX,
VCAP_AF_POLICE_REMARK,
+ VCAP_AF_POLICE_VCAP_ONLY,
+ VCAP_AF_POP_VAL,
VCAP_AF_PORT_MASK,
- VCAP_AF_PTP_MASTER_SEL,
+ VCAP_AF_PUSH_CUSTOMER_TAG,
+ VCAP_AF_PUSH_INNER_TAG,
+ VCAP_AF_PUSH_OUTER_TAG,
VCAP_AF_QOS_ENA,
VCAP_AF_QOS_VAL,
- VCAP_AF_REW_CMD,
- VCAP_AF_RLEG_DMAC_CHK_DIS,
- VCAP_AF_RLEG_STAT_IDX,
- VCAP_AF_RSDX_ENA,
- VCAP_AF_RSDX_VAL,
- VCAP_AF_RSVD_LBL_VAL,
+ VCAP_AF_REW_OP,
VCAP_AF_RT_DIS,
- VCAP_AF_RT_SEL,
- VCAP_AF_S2_KEY_SEL_ENA,
- VCAP_AF_S2_KEY_SEL_IDX,
- VCAP_AF_SAM_SEQ_ENA,
- VCAP_AF_SIP_IDX,
- VCAP_AF_SWAP_MAC_ENA,
- VCAP_AF_TCP_UDP_DPORT,
- VCAP_AF_TCP_UDP_ENA,
- VCAP_AF_TCP_UDP_SPORT,
- VCAP_AF_TC_ENA,
- VCAP_AF_TC_LABEL,
- VCAP_AF_TPID_SEL,
- VCAP_AF_TTL_DECR_DIS,
- VCAP_AF_TTL_ENA,
- VCAP_AF_TTL_LABEL,
- VCAP_AF_TTL_UPDATE_ENA,
+ VCAP_AF_SWAP_MACS_ENA,
+ VCAP_AF_TAG_A_DEI_SEL,
+ VCAP_AF_TAG_A_PCP_SEL,
+ VCAP_AF_TAG_A_TPID_SEL,
+ VCAP_AF_TAG_A_VID_SEL,
+ VCAP_AF_TAG_B_DEI_SEL,
+ VCAP_AF_TAG_B_PCP_SEL,
+ VCAP_AF_TAG_B_TPID_SEL,
+ VCAP_AF_TAG_B_VID_SEL,
+ VCAP_AF_TAG_C_DEI_SEL,
+ VCAP_AF_TAG_C_PCP_SEL,
+ VCAP_AF_TAG_C_TPID_SEL,
+ VCAP_AF_TAG_C_VID_SEL,
VCAP_AF_TYPE,
+ VCAP_AF_UNTAG_VID_ENA,
+ VCAP_AF_VID_A_VAL,
+ VCAP_AF_VID_B_VAL,
+ VCAP_AF_VID_C_VAL,
VCAP_AF_VID_VAL,
- VCAP_AF_VLAN_POP_CNT,
- VCAP_AF_VLAN_POP_CNT_ENA,
- VCAP_AF_VLAN_PUSH_CNT,
- VCAP_AF_VLAN_PUSH_CNT_ENA,
- VCAP_AF_VLAN_WAS_TAGGED,
- VCAP_AF_MIRROR_ENA,
- VCAP_AF_POLICE_VCAP_ONLY,
- VCAP_AF_REW_OP,
- VCAP_AF_ISDX_ENA,
- VCAP_AF_ACL_ID,
- VCAP_AF_FWD_KILL_ENA,
- VCAP_AF_HOST_MATCH,
};
#endif /* __VCAP_AG_API__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
index 664aae3e2acd..4847d0d99ec9 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -37,11 +37,13 @@ struct vcap_rule_move {
int count; /* blocksize of addresses to move */
};
-/* Stores the filter cookie that enabled the port */
+/* Stores the filter cookie and chain id that enabled the port */
struct vcap_enabled_port {
struct list_head list; /* for insertion in enabled ports list */
struct net_device *ndev; /* the enabled port */
unsigned long cookie; /* filter that enabled the port */
+ int src_cid; /* source chain id */
+ int dst_cid; /* destination chain id */
};
void vcap_iter_set(struct vcap_stream_iter *itr, int sw_width,
@@ -508,10 +510,133 @@ static void vcap_encode_keyfield_typegroups(struct vcap_control *vctrl,
vcap_encode_typegroups(cache->maskstream, sw_width, tgt, true);
}
+/* Copy data from src to dst but reverse the data in chunks of 32bits.
+ * For example if src is 00:11:22:33:44:55 where 55 is LSB the dst will
+ * have the value 22:33:44:55:00:11.
+ */
+static void vcap_copy_to_w32be(u8 *dst, const u8 *src, int size)
+{
+ for (int idx = 0; idx < size; ++idx) {
+ int first_byte_index = 0;
+ int nidx;
+
+ first_byte_index = size - (((idx >> 2) + 1) << 2);
+ if (first_byte_index < 0)
+ first_byte_index = 0;
+ nidx = idx + first_byte_index - (idx & ~0x3);
+ dst[nidx] = src[idx];
+ }
+}
+
+static void
+vcap_copy_from_client_keyfield(struct vcap_rule *rule,
+ struct vcap_client_keyfield *dst,
+ const struct vcap_client_keyfield *src)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_client_keyfield_data *sdata;
+ struct vcap_client_keyfield_data *ddata;
+ int size;
+
+ dst->ctrl.type = src->ctrl.type;
+ dst->ctrl.key = src->ctrl.key;
+ INIT_LIST_HEAD(&dst->ctrl.list);
+ sdata = &src->data;
+ ddata = &dst->data;
+
+ if (!ri->admin->w32be) {
+ memcpy(ddata, sdata, sizeof(dst->data));
+ return;
+ }
+
+ size = keyfield_size_table[dst->ctrl.type] / 2;
+
+ switch (dst->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ case VCAP_FIELD_U32:
+ memcpy(ddata, sdata, sizeof(dst->data));
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_to_w32be(ddata->u48.value, src->data.u48.value, size);
+ vcap_copy_to_w32be(ddata->u48.mask, src->data.u48.mask, size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_to_w32be(ddata->u56.value, sdata->u56.value, size);
+ vcap_copy_to_w32be(ddata->u56.mask, sdata->u56.mask, size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_to_w32be(ddata->u64.value, sdata->u64.value, size);
+ vcap_copy_to_w32be(ddata->u64.mask, sdata->u64.mask, size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_to_w32be(ddata->u72.value, sdata->u72.value, size);
+ vcap_copy_to_w32be(ddata->u72.mask, sdata->u72.mask, size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_to_w32be(ddata->u112.value, sdata->u112.value, size);
+ vcap_copy_to_w32be(ddata->u112.mask, sdata->u112.mask, size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_to_w32be(ddata->u128.value, sdata->u128.value, size);
+ vcap_copy_to_w32be(ddata->u128.mask, sdata->u128.mask, size);
+ break;
+ }
+}
+
+static void
+vcap_copy_from_client_actionfield(struct vcap_rule *rule,
+ struct vcap_client_actionfield *dst,
+ const struct vcap_client_actionfield *src)
+{
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ const struct vcap_client_actionfield_data *sdata;
+ struct vcap_client_actionfield_data *ddata;
+ int size;
+
+ dst->ctrl.type = src->ctrl.type;
+ dst->ctrl.action = src->ctrl.action;
+ INIT_LIST_HEAD(&dst->ctrl.list);
+ sdata = &src->data;
+ ddata = &dst->data;
+
+ if (!ri->admin->w32be) {
+ memcpy(ddata, sdata, sizeof(dst->data));
+ return;
+ }
+
+ size = actionfield_size_table[dst->ctrl.type];
+
+ switch (dst->ctrl.type) {
+ case VCAP_FIELD_BIT:
+ case VCAP_FIELD_U32:
+ memcpy(ddata, sdata, sizeof(dst->data));
+ break;
+ case VCAP_FIELD_U48:
+ vcap_copy_to_w32be(ddata->u48.value, sdata->u48.value, size);
+ break;
+ case VCAP_FIELD_U56:
+ vcap_copy_to_w32be(ddata->u56.value, sdata->u56.value, size);
+ break;
+ case VCAP_FIELD_U64:
+ vcap_copy_to_w32be(ddata->u64.value, sdata->u64.value, size);
+ break;
+ case VCAP_FIELD_U72:
+ vcap_copy_to_w32be(ddata->u72.value, sdata->u72.value, size);
+ break;
+ case VCAP_FIELD_U112:
+ vcap_copy_to_w32be(ddata->u112.value, sdata->u112.value, size);
+ break;
+ case VCAP_FIELD_U128:
+ vcap_copy_to_w32be(ddata->u128.value, sdata->u128.value, size);
+ break;
+ }
+}
+
static int vcap_encode_rule_keyset(struct vcap_rule_internal *ri)
{
const struct vcap_client_keyfield *ckf;
const struct vcap_typegroup *tg_table;
+ struct vcap_client_keyfield tempkf;
const struct vcap_field *kf_table;
int keyset_size;
@@ -552,7 +677,9 @@ static int vcap_encode_rule_keyset(struct vcap_rule_internal *ri)
__func__, __LINE__, ckf->ctrl.key);
return -EINVAL;
}
- vcap_encode_keyfield(ri, ckf, &kf_table[ckf->ctrl.key], tg_table);
+ vcap_copy_from_client_keyfield(&ri->data, &tempkf, ckf);
+ vcap_encode_keyfield(ri, &tempkf, &kf_table[ckf->ctrl.key],
+ tg_table);
}
/* Add typegroup bits to the key/mask bitstreams */
vcap_encode_keyfield_typegroups(ri->vctrl, ri, tg_table);
@@ -667,6 +794,7 @@ static int vcap_encode_rule_actionset(struct vcap_rule_internal *ri)
{
const struct vcap_client_actionfield *caf;
const struct vcap_typegroup *tg_table;
+ struct vcap_client_actionfield tempaf;
const struct vcap_field *af_table;
int actionset_size;
@@ -707,8 +835,9 @@ static int vcap_encode_rule_actionset(struct vcap_rule_internal *ri)
__func__, __LINE__, caf->ctrl.action);
return -EINVAL;
}
- vcap_encode_actionfield(ri, caf, &af_table[caf->ctrl.action],
- tg_table);
+ vcap_copy_from_client_actionfield(&ri->data, &tempaf, caf);
+ vcap_encode_actionfield(ri, &tempaf,
+ &af_table[caf->ctrl.action], tg_table);
}
/* Add typegroup bits to the entry bitstreams */
vcap_encode_actionfield_typegroups(ri, tg_table);
@@ -738,7 +867,7 @@ int vcap_api_check(struct vcap_control *ctrl)
!ctrl->ops->add_default_fields || !ctrl->ops->cache_erase ||
!ctrl->ops->cache_write || !ctrl->ops->cache_read ||
!ctrl->ops->init || !ctrl->ops->update || !ctrl->ops->move ||
- !ctrl->ops->port_info || !ctrl->ops->enable) {
+ !ctrl->ops->port_info) {
pr_err("%s:%d: client operations are missing\n",
__func__, __LINE__);
return -ENOENT;
@@ -791,9 +920,8 @@ int vcap_set_rule_set_actionset(struct vcap_rule *rule,
}
EXPORT_SYMBOL_GPL(vcap_set_rule_set_actionset);
-/* Find a rule with a provided rule id */
-static struct vcap_rule_internal *vcap_lookup_rule(struct vcap_control *vctrl,
- u32 id)
+/* Check if a rule with this id exists */
+static bool vcap_rule_exists(struct vcap_control *vctrl, u32 id)
{
struct vcap_rule_internal *ri;
struct vcap_admin *admin;
@@ -802,7 +930,25 @@ static struct vcap_rule_internal *vcap_lookup_rule(struct vcap_control *vctrl,
list_for_each_entry(admin, &vctrl->list, list)
list_for_each_entry(ri, &admin->rules, list)
if (ri->data.id == id)
+ return true;
+ return false;
+}
+
+/* Find a rule with a provided rule id return a locked vcap */
+static struct vcap_rule_internal *
+vcap_get_locked_rule(struct vcap_control *vctrl, u32 id)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+
+ /* Look for the rule id in all vcaps */
+ list_for_each_entry(admin, &vctrl->list, list) {
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list)
+ if (ri->data.id == id)
return ri;
+ mutex_unlock(&admin->lock);
+ }
return NULL;
}
@@ -811,19 +957,31 @@ int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie)
{
struct vcap_rule_internal *ri;
struct vcap_admin *admin;
+ int id = 0;
/* Look for the rule id in all vcaps */
- list_for_each_entry(admin, &vctrl->list, list)
- list_for_each_entry(ri, &admin->rules, list)
- if (ri->data.cookie == cookie)
- return ri->data.id;
+ list_for_each_entry(admin, &vctrl->list, list) {
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ if (ri->data.cookie == cookie) {
+ id = ri->data.id;
+ break;
+ }
+ }
+ mutex_unlock(&admin->lock);
+ if (id)
+ return id;
+ }
return -ENOENT;
}
EXPORT_SYMBOL_GPL(vcap_lookup_rule_by_cookie);
-/* Make a shallow copy of the rule without the fields */
-struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri)
+/* Make a copy of the rule, shallow or full */
+static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri,
+ bool full)
{
+ struct vcap_client_actionfield *caf, *newcaf;
+ struct vcap_client_keyfield *ckf, *newckf;
struct vcap_rule_internal *duprule;
/* Allocate the client part */
@@ -836,6 +994,25 @@ struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri)
/* No elements in these lists */
INIT_LIST_HEAD(&duprule->data.keyfields);
INIT_LIST_HEAD(&duprule->data.actionfields);
+
+ /* A full rule copy includes keys and actions */
+ if (!full)
+ return duprule;
+
+ list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
+ newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL);
+ if (!newckf)
+ return ERR_PTR(-ENOMEM);
+ list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields);
+ }
+
+ list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
+ newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL);
+ if (!newcaf)
+ return ERR_PTR(-ENOMEM);
+ list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields);
+ }
+
return duprule;
}
@@ -1424,39 +1601,65 @@ struct vcap_admin *vcap_find_admin(struct vcap_control *vctrl, int cid)
}
EXPORT_SYMBOL_GPL(vcap_find_admin);
-/* Is the next chain id in the following lookup, possible in another VCAP */
-bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid)
+/* Is this the last admin instance ordered by chain id and direction */
+static bool vcap_admin_is_last(struct vcap_control *vctrl,
+ struct vcap_admin *admin,
+ bool ingress)
{
- struct vcap_admin *admin, *next_admin;
- int lookup, next_lookup;
+ struct vcap_admin *iter, *last = NULL;
+ int max_cid = 0;
- /* The offset must be at least one lookup */
- if (next_cid < cur_cid + VCAP_CID_LOOKUP_SIZE)
+ list_for_each_entry(iter, &vctrl->list, list) {
+ if (iter->first_cid > max_cid &&
+ iter->ingress == ingress) {
+ last = iter;
+ max_cid = iter->first_cid;
+ }
+ }
+ if (!last)
return false;
- if (vcap_api_check(vctrl))
- return false;
+ return admin == last;
+}
- admin = vcap_find_admin(vctrl, cur_cid);
- if (!admin)
+/* Calculate the value used for chaining VCAP rules */
+int vcap_chain_offset(struct vcap_control *vctrl, int from_cid, int to_cid)
+{
+ int diff = to_cid - from_cid;
+
+ if (diff < 0) /* Wrong direction */
+ return diff;
+ to_cid %= VCAP_CID_LOOKUP_SIZE;
+ if (to_cid == 0) /* Destination aligned to a lookup == no chaining */
+ return 0;
+ diff %= VCAP_CID_LOOKUP_SIZE; /* Limit to a value within a lookup */
+ return diff;
+}
+EXPORT_SYMBOL_GPL(vcap_chain_offset);
+
+/* Is the next chain id in one of the following lookups
+ * For now this does not support filters linked to other filters using
+ * keys and actions. That will be added later.
+ */
+bool vcap_is_next_lookup(struct vcap_control *vctrl, int src_cid, int dst_cid)
+{
+ struct vcap_admin *admin;
+ int next_cid;
+
+ if (vcap_api_check(vctrl))
return false;
- /* If no VCAP contains the next chain, the next chain must be beyond
- * the last chain in the current VCAP
- */
- next_admin = vcap_find_admin(vctrl, next_cid);
- if (!next_admin)
- return next_cid > admin->last_cid;
+ /* The offset must be at least one lookup so round up one chain */
+ next_cid = roundup(src_cid + 1, VCAP_CID_LOOKUP_SIZE);
- lookup = vcap_chain_id_to_lookup(admin, cur_cid);
- next_lookup = vcap_chain_id_to_lookup(next_admin, next_cid);
+ if (dst_cid < next_cid)
+ return false;
- /* Next lookup must be the following lookup */
- if (admin == next_admin || admin->vtype == next_admin->vtype)
- return next_lookup == lookup + 1;
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return false;
- /* Must be the first lookup in the next VCAP instance */
- return next_lookup == 0;
+ return true;
}
EXPORT_SYMBOL_GPL(vcap_is_next_lookup);
@@ -1504,6 +1707,39 @@ static int vcap_add_type_keyfield(struct vcap_rule *rule)
return 0;
}
+/* Add the actionset typefield to the list of rule actionfields */
+static int vcap_add_type_actionfield(struct vcap_rule *rule)
+{
+ enum vcap_actionfield_set actionset = rule->actionset;
+ struct vcap_rule_internal *ri = to_intrule(rule);
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_field *fields;
+ const struct vcap_set *aset;
+ int ret = -EINVAL;
+
+ aset = vcap_actionfieldset(ri->vctrl, vt, actionset);
+ if (!aset)
+ return ret;
+ if (aset->type_id == (u8)-1) /* No type field is needed */
+ return 0;
+
+ fields = vcap_actionfields(ri->vctrl, vt, actionset);
+ if (!fields)
+ return -EINVAL;
+ if (fields[VCAP_AF_TYPE].width > 1) {
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE,
+ aset->type_id);
+ } else {
+ if (aset->type_id)
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_TYPE,
+ VCAP_BIT_1);
+ else
+ ret = vcap_rule_add_action_bit(rule, VCAP_AF_TYPE,
+ VCAP_BIT_0);
+ }
+ return ret;
+}
+
/* Add a keyset to a keyset list */
bool vcap_keyset_list_add(struct vcap_keyset_list *keysetlist,
enum vcap_keyfield_set keyset)
@@ -1521,6 +1757,22 @@ bool vcap_keyset_list_add(struct vcap_keyset_list *keysetlist,
}
EXPORT_SYMBOL_GPL(vcap_keyset_list_add);
+/* Add a actionset to a actionset list */
+static bool vcap_actionset_list_add(struct vcap_actionset_list *actionsetlist,
+ enum vcap_actionfield_set actionset)
+{
+ int idx;
+
+ if (actionsetlist->cnt < actionsetlist->max) {
+ /* Avoid duplicates */
+ for (idx = 0; idx < actionsetlist->cnt; ++idx)
+ if (actionsetlist->actionsets[idx] == actionset)
+ return actionsetlist->cnt < actionsetlist->max;
+ actionsetlist->actionsets[actionsetlist->cnt++] = actionset;
+ }
+ return actionsetlist->cnt < actionsetlist->max;
+}
+
/* map keyset id to a string with the keyset name */
const char *vcap_keyset_name(struct vcap_control *vctrl,
enum vcap_keyfield_set keyset)
@@ -1629,6 +1881,75 @@ bool vcap_rule_find_keysets(struct vcap_rule *rule,
}
EXPORT_SYMBOL_GPL(vcap_rule_find_keysets);
+/* Return the actionfield that matches a action in a actionset */
+static const struct vcap_field *
+vcap_find_actionset_actionfield(struct vcap_control *vctrl,
+ enum vcap_type vtype,
+ enum vcap_actionfield_set actionset,
+ enum vcap_action_field action)
+{
+ const struct vcap_field *fields;
+ int idx, count;
+
+ fields = vcap_actionfields(vctrl, vtype, actionset);
+ if (!fields)
+ return NULL;
+
+ /* Iterate the actionfields of the actionset */
+ count = vcap_actionfield_count(vctrl, vtype, actionset);
+ for (idx = 0; idx < count; ++idx) {
+ if (fields[idx].width == 0)
+ continue;
+
+ if (action == idx)
+ return &fields[idx];
+ }
+
+ return NULL;
+}
+
+/* Match a list of actions against the actionsets available in a vcap type */
+static bool vcap_rule_find_actionsets(struct vcap_rule_internal *ri,
+ struct vcap_actionset_list *matches)
+{
+ int actionset, found, actioncount, map_size;
+ const struct vcap_client_actionfield *ckf;
+ const struct vcap_field **map;
+ enum vcap_type vtype;
+
+ vtype = ri->admin->vtype;
+ map = ri->vctrl->vcaps[vtype].actionfield_set_map;
+ map_size = ri->vctrl->vcaps[vtype].actionfield_set_size;
+
+ /* Get a count of the actionfields we want to match */
+ actioncount = 0;
+ list_for_each_entry(ckf, &ri->data.actionfields, ctrl.list)
+ ++actioncount;
+
+ matches->cnt = 0;
+ /* Iterate the actionsets of the VCAP */
+ for (actionset = 0; actionset < map_size; ++actionset) {
+ if (!map[actionset])
+ continue;
+
+ /* Iterate the actions in the rule */
+ found = 0;
+ list_for_each_entry(ckf, &ri->data.actionfields, ctrl.list)
+ if (vcap_find_actionset_actionfield(ri->vctrl, vtype,
+ actionset,
+ ckf->ctrl.action))
+ ++found;
+
+ /* Save the actionset if all actionfields were found */
+ if (found == actioncount)
+ if (!vcap_actionset_list_add(matches, actionset))
+ /* bail out when the quota is filled */
+ break;
+ }
+
+ return matches->cnt > 0;
+}
+
/* Validate a rule with respect to available port keys */
int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto)
{
@@ -1680,13 +2001,26 @@ int vcap_val_rule(struct vcap_rule *rule, u16 l3_proto)
return ret;
}
if (ri->data.actionset == VCAP_AFS_NO_VALUE) {
- /* Later also actionsets will be matched against actions in
- * the rule, and the type will be set accordingly
- */
- ri->data.exterr = VCAP_ERR_NO_ACTIONSET_MATCH;
- return -EINVAL;
+ struct vcap_actionset_list matches = {};
+ enum vcap_actionfield_set actionsets[10];
+
+ matches.actionsets = actionsets;
+ matches.max = ARRAY_SIZE(actionsets);
+
+ /* Find an actionset that fits the rule actions */
+ if (!vcap_rule_find_actionsets(ri, &matches)) {
+ ri->data.exterr = VCAP_ERR_NO_ACTIONSET_MATCH;
+ return -EINVAL;
+ }
+ ret = vcap_set_rule_set_actionset(rule, actionsets[0]);
+ if (ret < 0) {
+ pr_err("%s:%d: actionset was not updated: %d\n",
+ __func__, __LINE__, ret);
+ return ret;
+ }
}
vcap_add_type_keyfield(rule);
+ vcap_add_type_actionfield(rule);
/* Add default fields to this rule */
ri->vctrl->ops->add_default_fields(ri->ndev, ri->admin, rule);
@@ -1721,7 +2055,7 @@ static u32 vcap_set_rule_id(struct vcap_rule_internal *ri)
return ri->data.id;
for (u32 next_id = 1; next_id < ~0; ++next_id) {
- if (!vcap_lookup_rule(ri->vctrl, next_id)) {
+ if (!vcap_rule_exists(ri->vctrl, next_id)) {
ri->data.id = next_id;
break;
}
@@ -1756,8 +2090,8 @@ static int vcap_insert_rule(struct vcap_rule_internal *ri,
ri->addr = vcap_next_rule_addr(admin->last_used_addr, ri);
admin->last_used_addr = ri->addr;
- /* Add a shallow copy of the rule to the VCAP list */
- duprule = vcap_dup_rule(ri);
+ /* Add a copy of the rule to the VCAP list */
+ duprule = vcap_dup_rule(ri, ri->state == VCAP_RS_DISABLED);
if (IS_ERR(duprule))
return PTR_ERR(duprule);
@@ -1770,8 +2104,8 @@ static int vcap_insert_rule(struct vcap_rule_internal *ri,
ri->addr = vcap_next_rule_addr(addr, ri);
addr = ri->addr;
- /* Add a shallow copy of the rule to the VCAP list */
- duprule = vcap_dup_rule(ri);
+ /* Add a copy of the rule to the VCAP list */
+ duprule = vcap_dup_rule(ri, ri->state == VCAP_RS_DISABLED);
if (IS_ERR(duprule))
return PTR_ERR(duprule);
@@ -1803,11 +2137,96 @@ static void vcap_move_rules(struct vcap_rule_internal *ri,
move->offset, move->count);
}
+/* Check if the chain is already used to enable a VCAP lookup for this port */
+static bool vcap_is_chain_used(struct vcap_control *vctrl,
+ struct net_device *ndev, int src_cid)
+{
+ struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
+
+ list_for_each_entry(admin, &vctrl->list, list)
+ list_for_each_entry(eport, &admin->enabled, list)
+ if (eport->src_cid == src_cid && eport->ndev == ndev)
+ return true;
+
+ return false;
+}
+
+/* Fetch the next chain in the enabled list for the port */
+static int vcap_get_next_chain(struct vcap_control *vctrl,
+ struct net_device *ndev,
+ int dst_cid)
+{
+ struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ list_for_each_entry(eport, &admin->enabled, list) {
+ if (eport->ndev != ndev)
+ continue;
+ if (eport->src_cid == dst_cid)
+ return eport->dst_cid;
+ }
+ }
+
+ return 0;
+}
+
+static bool vcap_path_exist(struct vcap_control *vctrl, struct net_device *ndev,
+ int dst_cid)
+{
+ int cid = rounddown(dst_cid, VCAP_CID_LOOKUP_SIZE);
+ struct vcap_enabled_port *eport = NULL;
+ struct vcap_enabled_port *elem;
+ struct vcap_admin *admin;
+ int tmp;
+
+ if (cid == 0) /* Chain zero is always available */
+ return true;
+
+ /* Find first entry that starts from chain 0*/
+ list_for_each_entry(admin, &vctrl->list, list) {
+ list_for_each_entry(elem, &admin->enabled, list) {
+ if (elem->src_cid == 0 && elem->ndev == ndev) {
+ eport = elem;
+ break;
+ }
+ }
+ if (eport)
+ break;
+ }
+
+ if (!eport)
+ return false;
+
+ tmp = eport->dst_cid;
+ while (tmp != cid && tmp != 0)
+ tmp = vcap_get_next_chain(vctrl, ndev, tmp);
+
+ return !!tmp;
+}
+
+/* Internal clients can always store their rules in HW
+ * External clients can store their rules if the chain is enabled all
+ * the way from chain 0, otherwise the rule will be cached until
+ * the chain is enabled.
+ */
+static void vcap_rule_set_state(struct vcap_rule_internal *ri)
+{
+ if (ri->data.user <= VCAP_USER_QOS)
+ ri->state = VCAP_RS_PERMANENT;
+ else if (vcap_path_exist(ri->vctrl, ri->ndev, ri->data.vcap_chain_id))
+ ri->state = VCAP_RS_ENABLED;
+ else
+ ri->state = VCAP_RS_DISABLED;
+}
+
/* Encode and write a validated rule to the VCAP */
int vcap_add_rule(struct vcap_rule *rule)
{
struct vcap_rule_internal *ri = to_intrule(rule);
struct vcap_rule_move move = {0};
+ struct vcap_counter ctr = {0};
int ret;
ret = vcap_api_check(ri->vctrl);
@@ -1815,6 +2234,8 @@ int vcap_add_rule(struct vcap_rule *rule)
return ret;
/* Insert the new rule in the list of vcap rules */
mutex_lock(&ri->admin->lock);
+
+ vcap_rule_set_state(ri);
ret = vcap_insert_rule(ri, &move);
if (ret < 0) {
pr_err("%s:%d: could not insert rule in vcap list: %d\n",
@@ -1823,6 +2244,19 @@ int vcap_add_rule(struct vcap_rule *rule)
}
if (move.count > 0)
vcap_move_rules(ri, &move);
+
+ /* Set the counter to zero */
+ ret = vcap_write_counter(ri, &ctr);
+ if (ret)
+ goto out;
+
+ if (ri->state == VCAP_RS_DISABLED) {
+ /* Erase the rule area */
+ ri->vctrl->ops->init(ri->ndev, ri->admin, ri->addr, ri->size);
+ goto out;
+ }
+
+ vcap_erase_cache(ri);
ret = vcap_encode_rule(ri);
if (ret) {
pr_err("%s:%d: rule encoding error: %d\n", __func__, __LINE__, ret);
@@ -1830,8 +2264,10 @@ int vcap_add_rule(struct vcap_rule *rule)
}
ret = vcap_write_rule(ri);
- if (ret)
+ if (ret) {
pr_err("%s:%d: rule write error: %d\n", __func__, __LINE__, ret);
+ goto out;
+ }
out:
mutex_unlock(&ri->admin->lock);
return ret;
@@ -1860,17 +2296,28 @@ struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl,
/* Sanity check that this VCAP is supported on this platform */
if (vctrl->vcaps[admin->vtype].rows == 0)
return ERR_PTR(-EINVAL);
+
+ mutex_lock(&admin->lock);
/* Check if a rule with this id already exists */
- if (vcap_lookup_rule(vctrl, id))
- return ERR_PTR(-EEXIST);
+ if (vcap_rule_exists(vctrl, id)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
/* Check if there is room for the rule in the block(s) of the VCAP */
maxsize = vctrl->vcaps[admin->vtype].sw_count; /* worst case rule size */
- if (vcap_rule_space(admin, maxsize))
- return ERR_PTR(-ENOSPC);
+ if (vcap_rule_space(admin, maxsize)) {
+ err = -ENOSPC;
+ goto out_unlock;
+ }
+
/* Create a container for the rule and return it */
ri = kzalloc(sizeof(*ri), GFP_KERNEL);
- if (!ri)
- return ERR_PTR(-ENOMEM);
+ if (!ri) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
ri->data.vcap_chain_id = vcap_chain_id;
ri->data.user = user;
ri->data.priority = priority;
@@ -1883,14 +2330,21 @@ struct vcap_rule *vcap_alloc_rule(struct vcap_control *vctrl,
ri->ndev = ndev;
ri->admin = admin; /* refer to the vcap instance */
ri->vctrl = vctrl; /* refer to the client */
- if (vcap_set_rule_id(ri) == 0)
+
+ if (vcap_set_rule_id(ri) == 0) {
+ err = -EINVAL;
goto out_free;
- vcap_erase_cache(ri);
+ }
+
+ mutex_unlock(&admin->lock);
return (struct vcap_rule *)ri;
out_free:
kfree(ri);
- return ERR_PTR(-EINVAL);
+out_unlock:
+ mutex_unlock(&admin->lock);
+ return ERR_PTR(err);
+
}
EXPORT_SYMBOL_GPL(vcap_alloc_rule);
@@ -1915,43 +2369,52 @@ void vcap_free_rule(struct vcap_rule *rule)
}
EXPORT_SYMBOL_GPL(vcap_free_rule);
-struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id)
+/* Decode a rule from the VCAP cache and return a copy */
+struct vcap_rule *vcap_decode_rule(struct vcap_rule_internal *elem)
{
- struct vcap_rule_internal *elem;
struct vcap_rule_internal *ri;
int err;
- ri = NULL;
+ ri = vcap_dup_rule(elem, elem->state == VCAP_RS_DISABLED);
+ if (IS_ERR(ri))
+ return ERR_PTR(PTR_ERR(ri));
+
+ if (ri->state == VCAP_RS_DISABLED)
+ goto out;
+
+ err = vcap_read_rule(ri);
+ if (err)
+ return ERR_PTR(err);
+
+ err = vcap_decode_keyset(ri);
+ if (err)
+ return ERR_PTR(err);
+
+ err = vcap_decode_actionset(ri);
+ if (err)
+ return ERR_PTR(err);
+
+out:
+ return &ri->data;
+}
+
+struct vcap_rule *vcap_get_rule(struct vcap_control *vctrl, u32 id)
+{
+ struct vcap_rule_internal *elem;
+ struct vcap_rule *rule;
+ int err;
err = vcap_api_check(vctrl);
if (err)
return ERR_PTR(err);
- elem = vcap_lookup_rule(vctrl, id);
+
+ elem = vcap_get_locked_rule(vctrl, id);
if (!elem)
return NULL;
- mutex_lock(&elem->admin->lock);
- ri = vcap_dup_rule(elem);
- if (IS_ERR(ri))
- goto unlock;
- err = vcap_read_rule(ri);
- if (err) {
- ri = ERR_PTR(err);
- goto unlock;
- }
- err = vcap_decode_keyset(ri);
- if (err) {
- ri = ERR_PTR(err);
- goto unlock;
- }
- err = vcap_decode_actionset(ri);
- if (err) {
- ri = ERR_PTR(err);
- goto unlock;
- }
-unlock:
+ rule = vcap_decode_rule(elem);
mutex_unlock(&elem->admin->lock);
- return (struct vcap_rule *)ri;
+ return rule;
}
EXPORT_SYMBOL_GPL(vcap_get_rule);
@@ -1966,10 +2429,13 @@ int vcap_mod_rule(struct vcap_rule *rule)
if (err)
return err;
- if (!vcap_lookup_rule(ri->vctrl, ri->data.id))
+ if (!vcap_get_locked_rule(ri->vctrl, ri->data.id))
return -ENOENT;
- mutex_lock(&ri->admin->lock);
+ vcap_rule_set_state(ri);
+ if (ri->state == VCAP_RS_DISABLED)
+ goto out;
+
/* Encode the bitstreams to the VCAP cache */
vcap_erase_cache(ri);
err = vcap_encode_rule(ri);
@@ -1982,8 +2448,6 @@ int vcap_mod_rule(struct vcap_rule *rule)
memset(&ctr, 0, sizeof(ctr));
err = vcap_write_counter(ri, &ctr);
- if (err)
- goto out;
out:
mutex_unlock(&ri->admin->lock);
@@ -2050,20 +2514,19 @@ int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id)
if (err)
return err;
/* Look for the rule id in all vcaps */
- ri = vcap_lookup_rule(vctrl, id);
+ ri = vcap_get_locked_rule(vctrl, id);
if (!ri)
- return -EINVAL;
+ return -ENOENT;
+
admin = ri->admin;
if (ri->addr > admin->last_used_addr)
gap = vcap_fill_rule_gap(ri);
/* Delete the rule from the list of rules and the cache */
- mutex_lock(&admin->lock);
list_del(&ri->list);
vctrl->ops->init(ndev, admin, admin->last_used_addr, ri->size + gap);
- kfree(ri);
- mutex_unlock(&admin->lock);
+ vcap_free_rule(&ri->data);
/* Update the last used address, set to default when no rules */
if (list_empty(&admin->rules)) {
@@ -2073,7 +2536,9 @@ int vcap_del_rule(struct vcap_control *vctrl, struct net_device *ndev, u32 id)
list);
admin->last_used_addr = elem->addr;
}
- return 0;
+
+ mutex_unlock(&admin->lock);
+ return err;
}
EXPORT_SYMBOL_GPL(vcap_del_rule);
@@ -2091,7 +2556,7 @@ int vcap_del_rules(struct vcap_control *vctrl, struct vcap_admin *admin)
list_for_each_entry_safe(ri, next_ri, &admin->rules, list) {
vctrl->ops->init(ri->ndev, admin, ri->addr, ri->size);
list_del(&ri->list);
- kfree(ri);
+ vcap_free_rule(&ri->data);
}
admin->last_used_addr = admin->last_valid_addr;
@@ -2137,69 +2602,6 @@ const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule,
}
EXPORT_SYMBOL_GPL(vcap_lookup_keyfield);
-/* Copy data from src to dst but reverse the data in chunks of 32bits.
- * For example if src is 00:11:22:33:44:55 where 55 is LSB the dst will
- * have the value 22:33:44:55:00:11.
- */
-static void vcap_copy_to_w32be(u8 *dst, u8 *src, int size)
-{
- for (int idx = 0; idx < size; ++idx) {
- int first_byte_index = 0;
- int nidx;
-
- first_byte_index = size - (((idx >> 2) + 1) << 2);
- if (first_byte_index < 0)
- first_byte_index = 0;
- nidx = idx + first_byte_index - (idx & ~0x3);
- dst[nidx] = src[idx];
- }
-}
-
-static void vcap_copy_from_client_keyfield(struct vcap_rule *rule,
- struct vcap_client_keyfield *field,
- struct vcap_client_keyfield_data *data)
-{
- struct vcap_rule_internal *ri = to_intrule(rule);
- int size;
-
- if (!ri->admin->w32be) {
- memcpy(&field->data, data, sizeof(field->data));
- return;
- }
-
- size = keyfield_size_table[field->ctrl.type] / 2;
- switch (field->ctrl.type) {
- case VCAP_FIELD_BIT:
- case VCAP_FIELD_U32:
- memcpy(&field->data, data, sizeof(field->data));
- break;
- case VCAP_FIELD_U48:
- vcap_copy_to_w32be(field->data.u48.value, data->u48.value, size);
- vcap_copy_to_w32be(field->data.u48.mask, data->u48.mask, size);
- break;
- case VCAP_FIELD_U56:
- vcap_copy_to_w32be(field->data.u56.value, data->u56.value, size);
- vcap_copy_to_w32be(field->data.u56.mask, data->u56.mask, size);
- break;
- case VCAP_FIELD_U64:
- vcap_copy_to_w32be(field->data.u64.value, data->u64.value, size);
- vcap_copy_to_w32be(field->data.u64.mask, data->u64.mask, size);
- break;
- case VCAP_FIELD_U72:
- vcap_copy_to_w32be(field->data.u72.value, data->u72.value, size);
- vcap_copy_to_w32be(field->data.u72.mask, data->u72.mask, size);
- break;
- case VCAP_FIELD_U112:
- vcap_copy_to_w32be(field->data.u112.value, data->u112.value, size);
- vcap_copy_to_w32be(field->data.u112.mask, data->u112.mask, size);
- break;
- case VCAP_FIELD_U128:
- vcap_copy_to_w32be(field->data.u128.value, data->u128.value, size);
- vcap_copy_to_w32be(field->data.u128.mask, data->u128.mask, size);
- break;
- }
-}
-
/* Check if the keyfield is already in the rule */
static bool vcap_keyfield_unique(struct vcap_rule *rule,
enum vcap_key_field key)
@@ -2257,9 +2659,9 @@ static int vcap_rule_add_key(struct vcap_rule *rule,
field = kzalloc(sizeof(*field), GFP_KERNEL);
if (!field)
return -ENOMEM;
+ memcpy(&field->data, data, sizeof(field->data));
field->ctrl.key = key;
field->ctrl.type = ftype;
- vcap_copy_from_client_keyfield(rule, field, data);
list_add_tail(&field->ctrl.list, &rule->keyfields);
return 0;
}
@@ -2355,7 +2757,7 @@ int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
EXPORT_SYMBOL_GPL(vcap_rule_get_key_u32);
/* Find a client action field in a rule */
-static struct vcap_client_actionfield *
+struct vcap_client_actionfield *
vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act)
{
struct vcap_rule_internal *ri = (struct vcap_rule_internal *)rule;
@@ -2366,45 +2768,7 @@ vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act)
return caf;
return NULL;
}
-
-static void vcap_copy_from_client_actionfield(struct vcap_rule *rule,
- struct vcap_client_actionfield *field,
- struct vcap_client_actionfield_data *data)
-{
- struct vcap_rule_internal *ri = to_intrule(rule);
- int size;
-
- if (!ri->admin->w32be) {
- memcpy(&field->data, data, sizeof(field->data));
- return;
- }
-
- size = actionfield_size_table[field->ctrl.type];
- switch (field->ctrl.type) {
- case VCAP_FIELD_BIT:
- case VCAP_FIELD_U32:
- memcpy(&field->data, data, sizeof(field->data));
- break;
- case VCAP_FIELD_U48:
- vcap_copy_to_w32be(field->data.u48.value, data->u48.value, size);
- break;
- case VCAP_FIELD_U56:
- vcap_copy_to_w32be(field->data.u56.value, data->u56.value, size);
- break;
- case VCAP_FIELD_U64:
- vcap_copy_to_w32be(field->data.u64.value, data->u64.value, size);
- break;
- case VCAP_FIELD_U72:
- vcap_copy_to_w32be(field->data.u72.value, data->u72.value, size);
- break;
- case VCAP_FIELD_U112:
- vcap_copy_to_w32be(field->data.u112.value, data->u112.value, size);
- break;
- case VCAP_FIELD_U128:
- vcap_copy_to_w32be(field->data.u128.value, data->u128.value, size);
- break;
- }
-}
+EXPORT_SYMBOL_GPL(vcap_find_actionfield);
/* Check if the actionfield is already in the rule */
static bool vcap_actionfield_unique(struct vcap_rule *rule,
@@ -2463,9 +2827,9 @@ static int vcap_rule_add_action(struct vcap_rule *rule,
field = kzalloc(sizeof(*field), GFP_KERNEL);
if (!field)
return -ENOMEM;
+ memcpy(&field->data, data, sizeof(field->data));
field->ctrl.action = action;
field->ctrl.type = ftype;
- vcap_copy_from_client_actionfield(rule, field, data);
list_add_tail(&field->ctrl.list, &rule->actionfields);
return 0;
}
@@ -2564,24 +2928,157 @@ void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule)
}
EXPORT_SYMBOL_GPL(vcap_set_tc_exterr);
+/* Write a rule to VCAP HW to enable it */
+static int vcap_enable_rule(struct vcap_rule_internal *ri)
+{
+ struct vcap_client_actionfield *af, *naf;
+ struct vcap_client_keyfield *kf, *nkf;
+ int err;
+
+ vcap_erase_cache(ri);
+ err = vcap_encode_rule(ri);
+ if (err)
+ goto out;
+ err = vcap_write_rule(ri);
+ if (err)
+ goto out;
+
+ /* Deallocate the list of keys and actions */
+ list_for_each_entry_safe(kf, nkf, &ri->data.keyfields, ctrl.list) {
+ list_del(&kf->ctrl.list);
+ kfree(kf);
+ }
+ list_for_each_entry_safe(af, naf, &ri->data.actionfields, ctrl.list) {
+ list_del(&af->ctrl.list);
+ kfree(af);
+ }
+ ri->state = VCAP_RS_ENABLED;
+out:
+ return err;
+}
+
+/* Enable all disabled rules for a specific chain/port in the VCAP HW */
+static int vcap_enable_rules(struct vcap_control *vctrl,
+ struct net_device *ndev, int chain)
+{
+ int next_chain = chain + VCAP_CID_LOOKUP_SIZE;
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+ int err = 0;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ if (!(chain >= admin->first_cid && chain <= admin->last_cid))
+ continue;
+
+ /* Found the admin, now find the offloadable rules */
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ /* Is the rule in the lookup defined by the chain */
+ if (!(ri->data.vcap_chain_id >= chain &&
+ ri->data.vcap_chain_id < next_chain)) {
+ continue;
+ }
+
+ if (ri->ndev != ndev)
+ continue;
+
+ if (ri->state != VCAP_RS_DISABLED)
+ continue;
+
+ err = vcap_enable_rule(ri);
+ if (err)
+ break;
+ }
+ mutex_unlock(&admin->lock);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+/* Read and erase a rule from VCAP HW to disable it */
+static int vcap_disable_rule(struct vcap_rule_internal *ri)
+{
+ int err;
+
+ err = vcap_read_rule(ri);
+ if (err)
+ return err;
+ err = vcap_decode_keyset(ri);
+ if (err)
+ return err;
+ err = vcap_decode_actionset(ri);
+ if (err)
+ return err;
+
+ ri->state = VCAP_RS_DISABLED;
+ ri->vctrl->ops->init(ri->ndev, ri->admin, ri->addr, ri->size);
+ return 0;
+}
+
+/* Disable all enabled rules for a specific chain/port in the VCAP HW */
+static int vcap_disable_rules(struct vcap_control *vctrl,
+ struct net_device *ndev, int chain)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_admin *admin;
+ int err = 0;
+
+ list_for_each_entry(admin, &vctrl->list, list) {
+ if (!(chain >= admin->first_cid && chain <= admin->last_cid))
+ continue;
+
+ /* Found the admin, now find the rules on the chain */
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ if (ri->data.vcap_chain_id != chain)
+ continue;
+
+ if (ri->ndev != ndev)
+ continue;
+
+ if (ri->state != VCAP_RS_ENABLED)
+ continue;
+
+ err = vcap_disable_rule(ri);
+ if (err)
+ break;
+ }
+ mutex_unlock(&admin->lock);
+ if (err)
+ break;
+ }
+ return err;
+}
+
/* Check if this port is already enabled for this VCAP instance */
-static bool vcap_is_enabled(struct vcap_admin *admin, struct net_device *ndev,
- unsigned long cookie)
+static bool vcap_is_enabled(struct vcap_control *vctrl, struct net_device *ndev,
+ int dst_cid)
{
struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
- list_for_each_entry(eport, &admin->enabled, list)
- if (eport->cookie == cookie || eport->ndev == ndev)
- return true;
+ list_for_each_entry(admin, &vctrl->list, list)
+ list_for_each_entry(eport, &admin->enabled, list)
+ if (eport->dst_cid == dst_cid && eport->ndev == ndev)
+ return true;
return false;
}
-/* Enable this port for this VCAP instance */
-static int vcap_enable(struct vcap_admin *admin, struct net_device *ndev,
- unsigned long cookie)
+/* Enable this port and chain id in a VCAP instance */
+static int vcap_enable(struct vcap_control *vctrl, struct net_device *ndev,
+ unsigned long cookie, int src_cid, int dst_cid)
{
struct vcap_enabled_port *eport;
+ struct vcap_admin *admin;
+
+ if (src_cid >= dst_cid)
+ return -EFAULT;
+
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return -ENOENT;
eport = kzalloc(sizeof(*eport), GFP_KERNEL);
if (!eport)
@@ -2589,48 +3086,72 @@ static int vcap_enable(struct vcap_admin *admin, struct net_device *ndev,
eport->ndev = ndev;
eport->cookie = cookie;
+ eport->src_cid = src_cid;
+ eport->dst_cid = dst_cid;
+ mutex_lock(&admin->lock);
list_add_tail(&eport->list, &admin->enabled);
+ mutex_unlock(&admin->lock);
+ if (vcap_path_exist(vctrl, ndev, src_cid)) {
+ /* Enable chained lookups */
+ while (dst_cid) {
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return -ENOENT;
+
+ vcap_enable_rules(vctrl, ndev, dst_cid);
+ dst_cid = vcap_get_next_chain(vctrl, ndev, dst_cid);
+ }
+ }
return 0;
}
-/* Disable this port for this VCAP instance */
-static int vcap_disable(struct vcap_admin *admin, struct net_device *ndev,
+/* Disable this port and chain id for a VCAP instance */
+static int vcap_disable(struct vcap_control *vctrl, struct net_device *ndev,
unsigned long cookie)
{
- struct vcap_enabled_port *eport;
+ struct vcap_enabled_port *elem, *eport = NULL;
+ struct vcap_admin *found = NULL, *admin;
+ int dst_cid;
- list_for_each_entry(eport, &admin->enabled, list) {
- if (eport->cookie == cookie && eport->ndev == ndev) {
- list_del(&eport->list);
- kfree(eport);
- return 0;
+ list_for_each_entry(admin, &vctrl->list, list) {
+ list_for_each_entry(elem, &admin->enabled, list) {
+ if (elem->cookie == cookie && elem->ndev == ndev) {
+ eport = elem;
+ found = admin;
+ break;
+ }
}
+ if (eport)
+ break;
}
- return -ENOENT;
-}
+ if (!eport)
+ return -ENOENT;
-/* Find the VCAP instance that enabled the port using a specific filter */
-static struct vcap_admin *vcap_find_admin_by_cookie(struct vcap_control *vctrl,
- unsigned long cookie)
-{
- struct vcap_enabled_port *eport;
- struct vcap_admin *admin;
+ /* Disable chained lookups */
+ dst_cid = eport->dst_cid;
+ while (dst_cid) {
+ admin = vcap_find_admin(vctrl, dst_cid);
+ if (!admin)
+ return -ENOENT;
- list_for_each_entry(admin, &vctrl->list, list)
- list_for_each_entry(eport, &admin->enabled, list)
- if (eport->cookie == cookie)
- return admin;
+ vcap_disable_rules(vctrl, ndev, dst_cid);
+ dst_cid = vcap_get_next_chain(vctrl, ndev, dst_cid);
+ }
- return NULL;
+ mutex_lock(&found->lock);
+ list_del(&eport->list);
+ mutex_unlock(&found->lock);
+ kfree(eport);
+ return 0;
}
-/* Enable/Disable the VCAP instance lookups. Chain id 0 means disable */
+/* Enable/Disable the VCAP instance lookups */
int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
- int chain_id, unsigned long cookie, bool enable)
+ int src_cid, int dst_cid, unsigned long cookie,
+ bool enable)
{
- struct vcap_admin *admin;
int err;
err = vcap_api_check(vctrl);
@@ -2640,36 +3161,48 @@ int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
if (!ndev)
return -ENODEV;
- if (chain_id)
- admin = vcap_find_admin(vctrl, chain_id);
- else
- admin = vcap_find_admin_by_cookie(vctrl, cookie);
- if (!admin)
- return -ENOENT;
-
- /* first instance and first chain */
- if (admin->vinst || chain_id > admin->first_cid)
+ /* Source and destination must be the first chain in a lookup */
+ if (src_cid % VCAP_CID_LOOKUP_SIZE)
+ return -EFAULT;
+ if (dst_cid % VCAP_CID_LOOKUP_SIZE)
return -EFAULT;
- err = vctrl->ops->enable(ndev, admin, enable);
- if (err)
- return err;
-
- if (chain_id) {
- if (vcap_is_enabled(admin, ndev, cookie))
+ if (enable) {
+ if (vcap_is_enabled(vctrl, ndev, dst_cid))
return -EADDRINUSE;
- mutex_lock(&admin->lock);
- vcap_enable(admin, ndev, cookie);
+ if (vcap_is_chain_used(vctrl, ndev, src_cid))
+ return -EADDRNOTAVAIL;
+ err = vcap_enable(vctrl, ndev, cookie, src_cid, dst_cid);
} else {
- mutex_lock(&admin->lock);
- vcap_disable(admin, ndev, cookie);
+ err = vcap_disable(vctrl, ndev, cookie);
}
- mutex_unlock(&admin->lock);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(vcap_enable_lookups);
+/* Is this chain id the last lookup of all VCAPs */
+bool vcap_is_last_chain(struct vcap_control *vctrl, int cid, bool ingress)
+{
+ struct vcap_admin *admin;
+ int lookup;
+
+ if (vcap_api_check(vctrl))
+ return false;
+
+ admin = vcap_find_admin(vctrl, cid);
+ if (!admin)
+ return false;
+
+ if (!vcap_admin_is_last(vctrl, admin, ingress))
+ return false;
+
+ /* This must be the last lookup in this VCAP type */
+ lookup = vcap_chain_id_to_lookup(admin, cid);
+ return lookup == admin->lookups - 1;
+}
+EXPORT_SYMBOL_GPL(vcap_is_last_chain);
+
/* Set a rule counter id (for certain vcaps only) */
void vcap_rule_set_counter_id(struct vcap_rule *rule, u32 counter_id)
{
@@ -2679,31 +3212,6 @@ void vcap_rule_set_counter_id(struct vcap_rule *rule, u32 counter_id)
}
EXPORT_SYMBOL_GPL(vcap_rule_set_counter_id);
-/* Provide all rules via a callback interface */
-int vcap_rule_iter(struct vcap_control *vctrl,
- int (*callback)(void *, struct vcap_rule *), void *arg)
-{
- struct vcap_rule_internal *ri;
- struct vcap_admin *admin;
- int ret;
-
- ret = vcap_api_check(vctrl);
- if (ret)
- return ret;
-
- /* Iterate all rules in each VCAP instance */
- list_for_each_entry(admin, &vctrl->list, list) {
- list_for_each_entry(ri, &admin->rules, list) {
- ret = callback(arg, &ri->data);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vcap_rule_iter);
-
int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr)
{
struct vcap_rule_internal *ri = to_intrule(rule);
@@ -2716,7 +3224,12 @@ int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr)
pr_err("%s:%d: counter is missing\n", __func__, __LINE__);
return -EINVAL;
}
- return vcap_write_counter(ri, ctr);
+
+ mutex_lock(&ri->admin->lock);
+ err = vcap_write_counter(ri, ctr);
+ mutex_unlock(&ri->admin->lock);
+
+ return err;
}
EXPORT_SYMBOL_GPL(vcap_rule_set_counter);
@@ -2732,10 +3245,138 @@ int vcap_rule_get_counter(struct vcap_rule *rule, struct vcap_counter *ctr)
pr_err("%s:%d: counter is missing\n", __func__, __LINE__);
return -EINVAL;
}
- return vcap_read_counter(ri, ctr);
+
+ mutex_lock(&ri->admin->lock);
+ err = vcap_read_counter(ri, ctr);
+ mutex_unlock(&ri->admin->lock);
+
+ return err;
}
EXPORT_SYMBOL_GPL(vcap_rule_get_counter);
+/* Get a copy of a client key field */
+static int vcap_rule_get_key(struct vcap_rule *rule,
+ enum vcap_key_field key,
+ struct vcap_client_keyfield *ckf)
+{
+ struct vcap_client_keyfield *field;
+
+ field = vcap_find_keyfield(rule, key);
+ if (!field)
+ return -EINVAL;
+ memcpy(ckf, field, sizeof(*ckf));
+ INIT_LIST_HEAD(&ckf->ctrl.list);
+ return 0;
+}
+
+/* Find a keyset having the same size as the provided rule, where the keyset
+ * does not have a type id.
+ */
+static int vcap_rule_get_untyped_keyset(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_set *keyfield_set;
+ int idx;
+
+ keyfield_set = vctrl->vcaps[vt].keyfield_set;
+ for (idx = 0; idx < vctrl->vcaps[vt].keyfield_set_size; ++idx) {
+ if (keyfield_set[idx].sw_per_item == ri->keyset_sw &&
+ keyfield_set[idx].type_id == (u8)-1) {
+ vcap_keyset_list_add(matches, idx);
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+/* Get the keysets that matches the rule key type/mask */
+int vcap_rule_get_keysets(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ enum vcap_type vt = ri->admin->vtype;
+ const struct vcap_set *keyfield_set;
+ struct vcap_client_keyfield kf = {};
+ u32 value, mask;
+ int err, idx;
+
+ err = vcap_rule_get_key(&ri->data, VCAP_KF_TYPE, &kf);
+ if (err)
+ return vcap_rule_get_untyped_keyset(ri, matches);
+
+ if (kf.ctrl.type == VCAP_FIELD_BIT) {
+ value = kf.data.u1.value;
+ mask = kf.data.u1.mask;
+ } else if (kf.ctrl.type == VCAP_FIELD_U32) {
+ value = kf.data.u32.value;
+ mask = kf.data.u32.mask;
+ } else {
+ return -EINVAL;
+ }
+
+ keyfield_set = vctrl->vcaps[vt].keyfield_set;
+ for (idx = 0; idx < vctrl->vcaps[vt].keyfield_set_size; ++idx) {
+ if (keyfield_set[idx].sw_per_item != ri->keyset_sw)
+ continue;
+
+ if (keyfield_set[idx].type_id == (u8)-1) {
+ vcap_keyset_list_add(matches, idx);
+ continue;
+ }
+
+ if ((keyfield_set[idx].type_id & mask) == value)
+ vcap_keyset_list_add(matches, idx);
+ }
+ if (matches->cnt > 0)
+ return 0;
+
+ return -EINVAL;
+}
+
+/* Collect packet counts from all rules with the same cookie */
+int vcap_get_rule_count_by_cookie(struct vcap_control *vctrl,
+ struct vcap_counter *ctr, u64 cookie)
+{
+ struct vcap_rule_internal *ri;
+ struct vcap_counter temp = {};
+ struct vcap_admin *admin;
+ int err;
+
+ err = vcap_api_check(vctrl);
+ if (err)
+ return err;
+
+ /* Iterate all rules in each VCAP instance */
+ list_for_each_entry(admin, &vctrl->list, list) {
+ mutex_lock(&admin->lock);
+ list_for_each_entry(ri, &admin->rules, list) {
+ if (ri->data.cookie != cookie)
+ continue;
+
+ err = vcap_read_counter(ri, &temp);
+ if (err)
+ goto unlock;
+ ctr->value += temp.value;
+
+ /* Reset the rule counter */
+ temp.value = 0;
+ temp.sticky = 0;
+ err = vcap_write_counter(ri, &temp);
+ if (err)
+ goto unlock;
+ }
+ mutex_unlock(&admin->lock);
+ }
+ return err;
+
+unlock:
+ mutex_unlock(&admin->lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_get_rule_count_by_cookie);
+
static int vcap_rule_mod_key(struct vcap_rule *rule,
enum vcap_key_field key,
enum vcap_field_type ftype,
@@ -2746,7 +3387,7 @@ static int vcap_rule_mod_key(struct vcap_rule *rule,
field = vcap_find_keyfield(rule, key);
if (!field)
return vcap_rule_add_key(rule, key, ftype, data);
- vcap_copy_from_client_keyfield(rule, field, data);
+ memcpy(&field->data, data, sizeof(field->data));
return 0;
}
@@ -2772,7 +3413,7 @@ static int vcap_rule_mod_action(struct vcap_rule *rule,
field = vcap_find_actionfield(rule, action);
if (!field)
return vcap_rule_add_action(rule, action, ftype, data);
- vcap_copy_from_client_actionfield(rule, field, data);
+ memcpy(&field->data, data, sizeof(field->data));
return 0;
}
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h
index 689c7270f2a8..62db270f65af 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h
@@ -176,6 +176,7 @@ struct vcap_admin {
int first_valid_addr; /* bottom of address range to be used */
int last_used_addr; /* address of lowest added rule */
bool w32be; /* vcap uses "32bit-word big-endian" encoding */
+ bool ingress; /* chain traffic direction */
struct vcap_cache_data cache; /* encoded rule data */
};
@@ -201,6 +202,13 @@ struct vcap_keyset_list {
enum vcap_keyfield_set *keysets; /* the list of keysets */
};
+/* List of actionsets */
+struct vcap_actionset_list {
+ int max; /* size of the actionset list */
+ int cnt; /* count of actionsets actually in the list */
+ enum vcap_actionfield_set *actionsets; /* the list of actionsets */
+};
+
/* Client output printf-like function with destination */
struct vcap_output_print {
__printf(2, 3)
@@ -259,11 +267,6 @@ struct vcap_operations {
(struct net_device *ndev,
struct vcap_admin *admin,
struct vcap_output_print *out);
- /* enable/disable the lookups in a vcap instance */
- int (*enable)
- (struct net_device *ndev,
- struct vcap_admin *admin,
- bool enable);
};
/* VCAP API Client control interface */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
index 0319866f9c94..417af9754bcc 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
@@ -148,9 +148,10 @@ struct vcap_counter {
bool sticky;
};
-/* Enable/Disable the VCAP instance lookups. Chain id 0 means disable */
+/* Enable/Disable the VCAP instance lookups */
int vcap_enable_lookups(struct vcap_control *vctrl, struct net_device *ndev,
- int chain_id, unsigned long cookie, bool enable);
+ int from_cid, int to_cid, unsigned long cookie,
+ bool enable);
/* VCAP rule operations */
/* Allocate a rule and fill in the basic information */
@@ -201,6 +202,8 @@ int vcap_rule_add_action_u32(struct vcap_rule *rule,
enum vcap_action_field action, u32 value);
/* VCAP rule counter operations */
+int vcap_get_rule_count_by_cookie(struct vcap_control *vctrl,
+ struct vcap_counter *ctr, u64 cookie);
int vcap_rule_set_counter(struct vcap_rule *rule, struct vcap_counter *ctr);
int vcap_rule_get_counter(struct vcap_rule *rule, struct vcap_counter *ctr);
@@ -214,8 +217,12 @@ const struct vcap_field *vcap_lookup_keyfield(struct vcap_rule *rule,
enum vcap_key_field key);
/* Find a rule id with a provided cookie */
int vcap_lookup_rule_by_cookie(struct vcap_control *vctrl, u64 cookie);
+/* Calculate the value used for chaining VCAP rules */
+int vcap_chain_offset(struct vcap_control *vctrl, int from_cid, int to_cid);
/* Is the next chain id in the following lookup, possible in another VCAP */
bool vcap_is_next_lookup(struct vcap_control *vctrl, int cur_cid, int next_cid);
+/* Is this chain id the last lookup of all VCAPs */
+bool vcap_is_last_chain(struct vcap_control *vctrl, int cid, bool ingress);
/* Provide all rules via a callback interface */
int vcap_rule_iter(struct vcap_control *vctrl,
int (*callback)(void *, struct vcap_rule *), void *arg);
@@ -262,4 +269,6 @@ int vcap_rule_mod_action_u32(struct vcap_rule *rule,
int vcap_rule_get_key_u32(struct vcap_rule *rule, enum vcap_key_field key,
u32 *value, u32 *mask);
+struct vcap_client_actionfield *
+vcap_find_actionfield(struct vcap_rule *rule, enum vcap_action_field act);
#endif /* __VCAP_API_CLIENT__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
index e0b206247f2e..c2c3397c5898 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs.c
@@ -44,11 +44,14 @@ static void vcap_debugfs_show_rule_keyfield(struct vcap_control *vctrl,
out->prf(out->dst, "%pI4h/%pI4h", &data->u32.value,
&data->u32.mask);
} else if (key == VCAP_KF_ETYPE ||
- key == VCAP_KF_IF_IGR_PORT_MASK) {
+ key == VCAP_KF_IF_IGR_PORT_MASK ||
+ key == VCAP_KF_IF_EGR_PORT_MASK) {
hex = true;
} else {
u32 fmsk = (1 << keyfield[key].width) - 1;
+ if (keyfield[key].width == 32)
+ fmsk = ~0;
out->prf(out->dst, "%u/%u", data->u32.value & fmsk,
data->u32.mask & fmsk);
}
@@ -152,37 +155,48 @@ vcap_debugfs_show_rule_actionfield(struct vcap_control *vctrl,
out->prf(out->dst, "\n");
}
-static int vcap_debugfs_show_rule_keyset(struct vcap_rule_internal *ri,
- struct vcap_output_print *out)
+static int vcap_debugfs_show_keysets(struct vcap_rule_internal *ri,
+ struct vcap_output_print *out)
{
- struct vcap_control *vctrl = ri->vctrl;
struct vcap_admin *admin = ri->admin;
enum vcap_keyfield_set keysets[10];
- const struct vcap_field *keyfield;
- enum vcap_type vt = admin->vtype;
- struct vcap_client_keyfield *ckf;
struct vcap_keyset_list matches;
- u32 *maskstream;
- u32 *keystream;
- int res;
+ int err;
- keystream = admin->cache.keystream;
- maskstream = admin->cache.maskstream;
matches.keysets = keysets;
matches.cnt = 0;
matches.max = ARRAY_SIZE(keysets);
- res = vcap_find_keystream_keysets(vctrl, vt, keystream, maskstream,
- false, 0, &matches);
- if (res < 0) {
+
+ if (ri->state == VCAP_RS_DISABLED)
+ err = vcap_rule_get_keysets(ri, &matches);
+ else
+ err = vcap_find_keystream_keysets(ri->vctrl, admin->vtype,
+ admin->cache.keystream,
+ admin->cache.maskstream,
+ false, 0, &matches);
+ if (err) {
pr_err("%s:%d: could not find valid keysets: %d\n",
- __func__, __LINE__, res);
- return -EINVAL;
+ __func__, __LINE__, err);
+ return err;
}
+
out->prf(out->dst, " keysets:");
for (int idx = 0; idx < matches.cnt; ++idx)
out->prf(out->dst, " %s",
- vcap_keyset_name(vctrl, matches.keysets[idx]));
+ vcap_keyset_name(ri->vctrl, matches.keysets[idx]));
out->prf(out->dst, "\n");
+ return 0;
+}
+
+static int vcap_debugfs_show_rule_keyset(struct vcap_rule_internal *ri,
+ struct vcap_output_print *out)
+{
+ struct vcap_control *vctrl = ri->vctrl;
+ struct vcap_admin *admin = ri->admin;
+ const struct vcap_field *keyfield;
+ struct vcap_client_keyfield *ckf;
+
+ vcap_debugfs_show_keysets(ri, out);
out->prf(out->dst, " keyset_sw: %d\n", ri->keyset_sw);
out->prf(out->dst, " keyset_sw_regs: %d\n", ri->keyset_sw_regs);
@@ -233,6 +247,18 @@ static void vcap_show_admin_rule(struct vcap_control *vctrl,
out->prf(out->dst, " chain_id: %d\n", ri->data.vcap_chain_id);
out->prf(out->dst, " user: %d\n", ri->data.user);
out->prf(out->dst, " priority: %d\n", ri->data.priority);
+ out->prf(out->dst, " state: ");
+ switch (ri->state) {
+ case VCAP_RS_PERMANENT:
+ out->prf(out->dst, "permanent\n");
+ break;
+ case VCAP_RS_DISABLED:
+ out->prf(out->dst, "disabled\n");
+ break;
+ case VCAP_RS_ENABLED:
+ out->prf(out->dst, "enabled\n");
+ break;
+ }
vcap_debugfs_show_rule_keyset(ri, out);
vcap_debugfs_show_rule_actionset(ri, out);
}
@@ -254,6 +280,7 @@ static void vcap_show_admin_info(struct vcap_control *vctrl,
out->prf(out->dst, "version: %d\n", vcap->version);
out->prf(out->dst, "vtype: %d\n", admin->vtype);
out->prf(out->dst, "vinst: %d\n", admin->vinst);
+ out->prf(out->dst, "ingress: %d\n", admin->ingress);
out->prf(out->dst, "first_cid: %d\n", admin->first_cid);
out->prf(out->dst, "last_cid: %d\n", admin->last_cid);
out->prf(out->dst, "lookups: %d\n", admin->lookups);
@@ -272,7 +299,7 @@ static int vcap_show_admin(struct vcap_control *vctrl,
vcap_show_admin_info(vctrl, admin, out);
list_for_each_entry(elem, &admin->rules, list) {
- vrule = vcap_get_rule(vctrl, elem->data.id);
+ vrule = vcap_decode_rule(elem);
if (IS_ERR_OR_NULL(vrule)) {
ret = PTR_ERR(vrule);
break;
@@ -381,8 +408,12 @@ static int vcap_debugfs_show(struct seq_file *m, void *unused)
.prf = (void *)seq_printf,
.dst = m,
};
+ int ret;
- return vcap_show_admin(info->vctrl, info->admin, &out);
+ mutex_lock(&info->admin->lock);
+ ret = vcap_show_admin(info->vctrl, info->admin, &out);
+ mutex_unlock(&info->admin->lock);
+ return ret;
}
DEFINE_SHOW_ATTRIBUTE(vcap_debugfs);
@@ -394,8 +425,12 @@ static int vcap_raw_debugfs_show(struct seq_file *m, void *unused)
.prf = (void *)seq_printf,
.dst = m,
};
+ int ret;
- return vcap_show_admin_raw(info->vctrl, info->admin, &out);
+ mutex_lock(&info->admin->lock);
+ ret = vcap_show_admin_raw(info->vctrl, info->admin, &out);
+ mutex_unlock(&info->admin->lock);
+ return ret;
}
DEFINE_SHOW_ATTRIBUTE(vcap_raw_debugfs);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
index cf594668d5d9..0de3f677135a 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
@@ -221,13 +221,6 @@ static int vcap_test_port_info(struct net_device *ndev,
return 0;
}
-static int vcap_test_enable(struct net_device *ndev,
- struct vcap_admin *admin,
- bool enable)
-{
- return 0;
-}
-
static struct vcap_operations test_callbacks = {
.validate_keyset = test_val_keyset,
.add_default_fields = test_add_def_fields,
@@ -238,7 +231,6 @@ static struct vcap_operations test_callbacks = {
.update = test_cache_update,
.move = test_cache_move,
.port_info = vcap_test_port_info,
- .enable = vcap_test_enable,
};
static struct vcap_control test_vctrl = {
@@ -253,6 +245,8 @@ static void vcap_test_api_init(struct vcap_admin *admin)
INIT_LIST_HEAD(&test_vctrl.list);
INIT_LIST_HEAD(&admin->list);
INIT_LIST_HEAD(&admin->rules);
+ INIT_LIST_HEAD(&admin->enabled);
+ mutex_init(&admin->lock);
list_add_tail(&admin->list, &test_vctrl.list);
memset(test_updateaddr, 0, sizeof(test_updateaddr));
test_updateaddridx = 0;
@@ -393,8 +387,9 @@ static const char * const test_admin_info_expect[] = {
"default_cnt: 73\n",
"require_cnt_dis: 0\n",
"version: 1\n",
- "vtype: 2\n",
+ "vtype: 3\n",
"vinst: 0\n",
+ "ingress: 1\n",
"first_cid: 10000\n",
"last_cid: 19999\n",
"lookups: 4\n",
@@ -413,6 +408,7 @@ static void vcap_api_show_admin_test(struct kunit *test)
.last_valid_addr = 3071,
.first_valid_addr = 0,
.last_used_addr = 794,
+ .ingress = true,
};
struct vcap_output_print out = {
.prf = (void *)test_prf,
@@ -439,8 +435,9 @@ static const char * const test_admin_expect[] = {
"default_cnt: 73\n",
"require_cnt_dis: 0\n",
"version: 1\n",
- "vtype: 2\n",
+ "vtype: 3\n",
"vinst: 0\n",
+ "ingress: 1\n",
"first_cid: 8000000\n",
"last_cid: 8199999\n",
"lookups: 4\n",
@@ -452,6 +449,7 @@ static const char * const test_admin_expect[] = {
" chain_id: 0\n",
" user: 0\n",
" priority: 0\n",
+ " state: permanent\n",
" keysets: VCAP_KFS_MAC_ETYPE\n",
" keyset_sw: 6\n",
" keyset_sw_regs: 2\n",
@@ -501,6 +499,7 @@ static void vcap_api_show_admin_rule_test(struct kunit *test)
.last_valid_addr = 3071,
.first_valid_addr = 0,
.last_used_addr = 794,
+ .ingress = true,
.cache = {
.keystream = keydata,
.maskstream = mskdata,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index 76a31215ebfb..c07f25e791c7 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -211,13 +211,6 @@ static int vcap_test_port_info(struct net_device *ndev,
return 0;
}
-static int vcap_test_enable(struct net_device *ndev,
- struct vcap_admin *admin,
- bool enable)
-{
- return 0;
-}
-
static struct vcap_operations test_callbacks = {
.validate_keyset = test_val_keyset,
.add_default_fields = test_add_def_fields,
@@ -228,7 +221,6 @@ static struct vcap_operations test_callbacks = {
.update = test_cache_update,
.move = test_cache_move,
.port_info = vcap_test_port_info,
- .enable = vcap_test_enable,
};
static struct vcap_control test_vctrl = {
@@ -243,6 +235,8 @@ static void vcap_test_api_init(struct vcap_admin *admin)
INIT_LIST_HEAD(&test_vctrl.list);
INIT_LIST_HEAD(&admin->list);
INIT_LIST_HEAD(&admin->rules);
+ INIT_LIST_HEAD(&admin->enabled);
+ mutex_init(&admin->lock);
list_add_tail(&admin->list, &test_vctrl.list);
memset(test_updateaddr, 0, sizeof(test_updateaddr));
test_updateaddridx = 0;
@@ -302,7 +296,7 @@ test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user,
ret = vcap_set_rule_set_keyset(rule, keyset);
/* Add rule actions : there must be at least one action */
- ret = vcap_rule_add_action_u32(rule, VCAP_AF_COSID_VAL, 0);
+ ret = vcap_rule_add_action_u32(rule, VCAP_AF_ISDX_VAL, 0);
/* Override rule actionset */
ret = vcap_set_rule_set_actionset(rule, actionset);
@@ -1312,8 +1306,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
struct vcap_admin is2_admin = {
.vtype = VCAP_TYPE_IS2,
- .first_cid = 10000,
- .last_cid = 19999,
+ .first_cid = 8000000,
+ .last_cid = 8099999,
.lookups = 4,
.last_valid_addr = 3071,
.first_valid_addr = 0,
@@ -1326,7 +1320,7 @@ static void vcap_api_encode_rule_test(struct kunit *test)
};
struct vcap_rule *rule;
struct vcap_rule_internal *ri;
- int vcap_chain_id = 10005;
+ int vcap_chain_id = 8000000;
enum vcap_user user = VCAP_USER_VCAP_UTIL;
u16 priority = 10;
int id = 100;
@@ -1343,8 +1337,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
u32 port_mask_rng_mask = 0x0f;
u32 igr_port_mask_value = 0xffabcd01;
u32 igr_port_mask_mask = ~0;
- /* counter is not written yet, so it is not in expwriteaddr */
- u32 expwriteaddr[] = {792, 793, 794, 795, 796, 797, 0};
+ /* counter is written as the first operation */
+ u32 expwriteaddr[] = {792, 792, 793, 794, 795, 796, 797};
int idx;
vcap_test_api_init(&is2_admin);
@@ -1398,6 +1392,11 @@ static void vcap_api_encode_rule_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, 2, ri->keyset_sw_regs);
KUNIT_EXPECT_EQ(test, 4, ri->actionset_sw_regs);
+ /* Enable lookup, so the rule will be written */
+ ret = vcap_enable_lookups(&test_vctrl, &test_netdev, 0,
+ rule->vcap_chain_id, rule->cookie, true);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+
/* Add rule with write callback */
ret = vcap_add_rule(rule);
KUNIT_EXPECT_EQ(test, 0, ret);
@@ -1872,58 +1871,56 @@ static void vcap_api_next_lookup_basic_test(struct kunit *test)
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000);
- KUNIT_EXPECT_EQ(test, true, ret);
+ KUNIT_EXPECT_EQ(test, false, ret);
}
static void vcap_api_next_lookup_advanced_test(struct kunit *test)
{
- struct vcap_admin admin1 = {
+ struct vcap_admin admin[] = {
+ {
.vtype = VCAP_TYPE_IS0,
.vinst = 0,
.first_cid = 1000000,
.last_cid = 1199999,
.lookups = 6,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin2 = {
+ }, {
.vtype = VCAP_TYPE_IS0,
.vinst = 1,
.first_cid = 1200000,
.last_cid = 1399999,
.lookups = 6,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin3 = {
+ }, {
.vtype = VCAP_TYPE_IS0,
.vinst = 2,
.first_cid = 1400000,
.last_cid = 1599999,
.lookups = 6,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin4 = {
+ }, {
.vtype = VCAP_TYPE_IS2,
.vinst = 0,
.first_cid = 8000000,
.last_cid = 8199999,
.lookups = 4,
.lookups_per_instance = 2,
- };
- struct vcap_admin admin5 = {
+ }, {
.vtype = VCAP_TYPE_IS2,
.vinst = 1,
.first_cid = 8200000,
.last_cid = 8399999,
.lookups = 4,
.lookups_per_instance = 2,
+ }
};
bool ret;
- vcap_test_api_init(&admin1);
- list_add_tail(&admin2.list, &test_vctrl.list);
- list_add_tail(&admin3.list, &test_vctrl.list);
- list_add_tail(&admin4.list, &test_vctrl.list);
- list_add_tail(&admin5.list, &test_vctrl.list);
+ vcap_test_api_init(&admin[0]);
+ list_add_tail(&admin[1].list, &test_vctrl.list);
+ list_add_tail(&admin[2].list, &test_vctrl.list);
+ list_add_tail(&admin[3].list, &test_vctrl.list);
+ list_add_tail(&admin[4].list, &test_vctrl.list);
ret = vcap_is_next_lookup(&test_vctrl, 1000000, 1001000);
KUNIT_EXPECT_EQ(test, false, ret);
@@ -1933,9 +1930,9 @@ static void vcap_api_next_lookup_advanced_test(struct kunit *test)
ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1201000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1100000, 1301000);
- KUNIT_EXPECT_EQ(test, false, ret);
+ KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1100000, 8101000);
- KUNIT_EXPECT_EQ(test, false, ret);
+ KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1300000, 1401000);
KUNIT_EXPECT_EQ(test, true, ret);
ret = vcap_is_next_lookup(&test_vctrl, 1400000, 1501000);
@@ -1951,7 +1948,7 @@ static void vcap_api_next_lookup_advanced_test(struct kunit *test)
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8301000);
KUNIT_EXPECT_EQ(test, false, ret);
ret = vcap_is_next_lookup(&test_vctrl, 8300000, 8401000);
- KUNIT_EXPECT_EQ(test, true, ret);
+ KUNIT_EXPECT_EQ(test, false, ret);
}
static void vcap_api_filter_unsupported_keys_test(struct kunit *test)
@@ -2146,6 +2143,71 @@ static void vcap_api_filter_keylist_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, 26, idx);
}
+static void vcap_api_rule_chain_path_test(struct kunit *test)
+{
+ struct vcap_admin admin1 = {
+ .vtype = VCAP_TYPE_IS0,
+ .vinst = 0,
+ .first_cid = 1000000,
+ .last_cid = 1199999,
+ .lookups = 6,
+ .lookups_per_instance = 2,
+ };
+ struct vcap_enabled_port eport3 = {
+ .ndev = &test_netdev,
+ .cookie = 0x100,
+ .src_cid = 0,
+ .dst_cid = 1000000,
+ };
+ struct vcap_enabled_port eport2 = {
+ .ndev = &test_netdev,
+ .cookie = 0x200,
+ .src_cid = 1000000,
+ .dst_cid = 1100000,
+ };
+ struct vcap_enabled_port eport1 = {
+ .ndev = &test_netdev,
+ .cookie = 0x300,
+ .src_cid = 1100000,
+ .dst_cid = 8000000,
+ };
+ bool ret;
+ int chain;
+
+ vcap_test_api_init(&admin1);
+ list_add_tail(&eport1.list, &admin1.enabled);
+ list_add_tail(&eport2.list, &admin1.enabled);
+ list_add_tail(&eport3.list, &admin1.enabled);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1000000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1100000);
+ KUNIT_EXPECT_EQ(test, true, ret);
+
+ ret = vcap_path_exist(&test_vctrl, &test_netdev, 1200000);
+ KUNIT_EXPECT_EQ(test, false, ret);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 0);
+ KUNIT_EXPECT_EQ(test, 1000000, chain);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 1000000);
+ KUNIT_EXPECT_EQ(test, 1100000, chain);
+
+ chain = vcap_get_next_chain(&test_vctrl, &test_netdev, 1100000);
+ KUNIT_EXPECT_EQ(test, 8000000, chain);
+}
+
+static struct kunit_case vcap_api_rule_enable_test_cases[] = {
+ KUNIT_CASE(vcap_api_rule_chain_path_test),
+ {}
+};
+
+static struct kunit_suite vcap_api_rule_enable_test_suite = {
+ .name = "VCAP_API_Rule_Enable_Testsuite",
+ .test_cases = vcap_api_rule_enable_test_cases,
+};
+
static struct kunit_suite vcap_api_rule_remove_test_suite = {
.name = "VCAP_API_Rule_Remove_Testsuite",
.test_cases = vcap_api_rule_remove_test_cases,
@@ -2236,6 +2298,7 @@ static struct kunit_suite vcap_api_encoding_test_suite = {
.test_cases = vcap_api_encoding_test_cases,
};
+kunit_test_suite(vcap_api_rule_enable_test_suite);
kunit_test_suite(vcap_api_rule_remove_test_suite);
kunit_test_suite(vcap_api_rule_insert_test_suite);
kunit_test_suite(vcap_api_rule_counter_test_suite);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
index 4fd21da97679..df81d9ff502b 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
@@ -13,6 +13,12 @@
#define to_intrule(rule) container_of((rule), struct vcap_rule_internal, data)
+enum vcap_rule_state {
+ VCAP_RS_PERMANENT, /* the rule is always stored in HW */
+ VCAP_RS_ENABLED, /* enabled in HW but can be disabled */
+ VCAP_RS_DISABLED, /* disabled (stored in SW) and can be enabled */
+};
+
/* Private VCAP API rule data */
struct vcap_rule_internal {
struct vcap_rule data; /* provided by the client */
@@ -29,6 +35,7 @@ struct vcap_rule_internal {
u32 addr; /* address in the VCAP at insertion */
u32 counter_id; /* counter id (if a dedicated counter is available) */
struct vcap_counter counter; /* last read counter value */
+ enum vcap_rule_state state; /* rule storage state */
};
/* Bit iterator for the VCAP cache streams */
@@ -43,8 +50,6 @@ struct vcap_stream_iter {
/* Check that the control has a valid set of callbacks */
int vcap_api_check(struct vcap_control *ctrl);
-/* Make a shallow copy of the rule without the fields */
-struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri);
/* Erase the VCAP cache area used or encoding and decoding */
void vcap_erase_cache(struct vcap_rule_internal *ri);
@@ -110,4 +115,10 @@ int vcap_find_keystream_keysets(struct vcap_control *vctrl, enum vcap_type vt,
u32 *keystream, u32 *mskstream, bool mask,
int sw_max, struct vcap_keyset_list *kslist);
+/* Get the keysets that matches the rule key type/mask */
+int vcap_rule_get_keysets(struct vcap_rule_internal *ri,
+ struct vcap_keyset_list *matches);
+/* Decode a rule from the VCAP cache and return a copy */
+struct vcap_rule *vcap_decode_rule(struct vcap_rule_internal *elem);
+
#endif /* __VCAP_API_PRIVATE__ */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
index 5d681d2697cd..5dbfc0d0c369 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.c
@@ -1,6 +1,10 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
- * Microchip VCAP API Test VCAP Model Data
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP test model interface for kunit testing
+ */
+
+/* This file is autogenerated by cml-utils 2023-02-10 11:16:00 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
*/
#include <linux/types.h>
@@ -10,177 +14,6 @@
#include "vcap_model_kunit.h"
/* keyfields */
-static const struct vcap_field is0_mll_keyfield[] = {
- [VCAP_KF_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 0,
- .width = 2,
- },
- [VCAP_KF_LOOKUP_FIRST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 2,
- .width = 1,
- },
- [VCAP_KF_IF_IGR_PORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 3,
- .width = 7,
- },
- [VCAP_KF_8021Q_VLAN_TAGS] = {
- .type = VCAP_FIELD_U32,
- .offset = 10,
- .width = 3,
- },
- [VCAP_KF_8021Q_TPID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 13,
- .width = 3,
- },
- [VCAP_KF_8021Q_VID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 16,
- .width = 12,
- },
- [VCAP_KF_8021Q_TPID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 28,
- .width = 3,
- },
- [VCAP_KF_8021Q_VID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 31,
- .width = 12,
- },
- [VCAP_KF_L2_DMAC] = {
- .type = VCAP_FIELD_U48,
- .offset = 43,
- .width = 48,
- },
- [VCAP_KF_L2_SMAC] = {
- .type = VCAP_FIELD_U48,
- .offset = 91,
- .width = 48,
- },
- [VCAP_KF_ETYPE_MPLS] = {
- .type = VCAP_FIELD_U32,
- .offset = 139,
- .width = 2,
- },
- [VCAP_KF_L4_RNG] = {
- .type = VCAP_FIELD_U32,
- .offset = 141,
- .width = 8,
- },
-};
-
-static const struct vcap_field is0_tri_vid_keyfield[] = {
- [VCAP_KF_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 0,
- .width = 2,
- },
- [VCAP_KF_LOOKUP_FIRST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 2,
- .width = 1,
- },
- [VCAP_KF_IF_IGR_PORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 3,
- .width = 7,
- },
- [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 10,
- .width = 2,
- },
- [VCAP_KF_LOOKUP_GEN_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 12,
- .width = 12,
- },
- [VCAP_KF_8021Q_VLAN_TAGS] = {
- .type = VCAP_FIELD_U32,
- .offset = 24,
- .width = 3,
- },
- [VCAP_KF_8021Q_TPID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 27,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP0] = {
- .type = VCAP_FIELD_U32,
- .offset = 30,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI0] = {
- .type = VCAP_FIELD_BIT,
- .offset = 33,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 34,
- .width = 12,
- },
- [VCAP_KF_8021Q_TPID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 46,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP1] = {
- .type = VCAP_FIELD_U32,
- .offset = 49,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI1] = {
- .type = VCAP_FIELD_BIT,
- .offset = 52,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 53,
- .width = 12,
- },
- [VCAP_KF_8021Q_TPID2] = {
- .type = VCAP_FIELD_U32,
- .offset = 65,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP2] = {
- .type = VCAP_FIELD_U32,
- .offset = 68,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI2] = {
- .type = VCAP_FIELD_BIT,
- .offset = 71,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID2] = {
- .type = VCAP_FIELD_U32,
- .offset = 72,
- .width = 12,
- },
- [VCAP_KF_L4_RNG] = {
- .type = VCAP_FIELD_U32,
- .offset = 84,
- .width = 8,
- },
- [VCAP_KF_OAM_Y1731_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 92,
- .width = 1,
- },
- [VCAP_KF_OAM_MEL_FLAGS] = {
- .type = VCAP_FIELD_U32,
- .offset = 93,
- .width = 7,
- },
-};
-
static const struct vcap_field is0_ll_full_keyfield[] = {
[VCAP_KF_TYPE] = {
.type = VCAP_FIELD_U32,
@@ -344,194 +177,6 @@ static const struct vcap_field is0_ll_full_keyfield[] = {
},
};
-static const struct vcap_field is0_normal_keyfield[] = {
- [VCAP_KF_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 0,
- .width = 2,
- },
- [VCAP_KF_LOOKUP_FIRST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 2,
- .width = 1,
- },
- [VCAP_KF_LOOKUP_GEN_IDX_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 3,
- .width = 2,
- },
- [VCAP_KF_LOOKUP_GEN_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 5,
- .width = 12,
- },
- [VCAP_KF_IF_IGR_PORT_MASK_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 17,
- .width = 2,
- },
- [VCAP_KF_IF_IGR_PORT_MASK] = {
- .type = VCAP_FIELD_U72,
- .offset = 19,
- .width = 65,
- },
- [VCAP_KF_L2_MC_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 84,
- .width = 1,
- },
- [VCAP_KF_L2_BC_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 85,
- .width = 1,
- },
- [VCAP_KF_8021Q_VLAN_TAGS] = {
- .type = VCAP_FIELD_U32,
- .offset = 86,
- .width = 3,
- },
- [VCAP_KF_8021Q_TPID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 89,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP0] = {
- .type = VCAP_FIELD_U32,
- .offset = 92,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI0] = {
- .type = VCAP_FIELD_BIT,
- .offset = 95,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID0] = {
- .type = VCAP_FIELD_U32,
- .offset = 96,
- .width = 12,
- },
- [VCAP_KF_8021Q_TPID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 108,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP1] = {
- .type = VCAP_FIELD_U32,
- .offset = 111,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI1] = {
- .type = VCAP_FIELD_BIT,
- .offset = 114,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID1] = {
- .type = VCAP_FIELD_U32,
- .offset = 115,
- .width = 12,
- },
- [VCAP_KF_8021Q_TPID2] = {
- .type = VCAP_FIELD_U32,
- .offset = 127,
- .width = 3,
- },
- [VCAP_KF_8021Q_PCP2] = {
- .type = VCAP_FIELD_U32,
- .offset = 130,
- .width = 3,
- },
- [VCAP_KF_8021Q_DEI2] = {
- .type = VCAP_FIELD_BIT,
- .offset = 133,
- .width = 1,
- },
- [VCAP_KF_8021Q_VID2] = {
- .type = VCAP_FIELD_U32,
- .offset = 134,
- .width = 12,
- },
- [VCAP_KF_DST_ENTRY] = {
- .type = VCAP_FIELD_BIT,
- .offset = 146,
- .width = 1,
- },
- [VCAP_KF_L2_SMAC] = {
- .type = VCAP_FIELD_U48,
- .offset = 147,
- .width = 48,
- },
- [VCAP_KF_IP_MC_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 195,
- .width = 1,
- },
- [VCAP_KF_ETYPE_LEN_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 196,
- .width = 1,
- },
- [VCAP_KF_ETYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 197,
- .width = 16,
- },
- [VCAP_KF_IP_SNAP_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 213,
- .width = 1,
- },
- [VCAP_KF_IP4_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 214,
- .width = 1,
- },
- [VCAP_KF_L3_FRAGMENT_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 215,
- .width = 2,
- },
- [VCAP_KF_L3_FRAG_INVLD_L4_LEN] = {
- .type = VCAP_FIELD_BIT,
- .offset = 217,
- .width = 1,
- },
- [VCAP_KF_L3_OPTIONS_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 218,
- .width = 1,
- },
- [VCAP_KF_L3_DSCP] = {
- .type = VCAP_FIELD_U32,
- .offset = 219,
- .width = 6,
- },
- [VCAP_KF_L3_IP4_SIP] = {
- .type = VCAP_FIELD_U32,
- .offset = 225,
- .width = 32,
- },
- [VCAP_KF_TCP_UDP_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 257,
- .width = 1,
- },
- [VCAP_KF_TCP_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 258,
- .width = 1,
- },
- [VCAP_KF_L4_SPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 259,
- .width = 16,
- },
- [VCAP_KF_L4_RNG] = {
- .type = VCAP_FIELD_U32,
- .offset = 275,
- .width = 8,
- },
-};
-
static const struct vcap_field is0_normal_7tuple_keyfield[] = {
[VCAP_KF_TYPE] = {
.type = VCAP_FIELD_BIT,
@@ -1095,16 +740,6 @@ static const struct vcap_field is2_mac_etype_keyfield[] = {
.offset = 85,
.width = 1,
},
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 86,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 87,
- .width = 1,
- },
[VCAP_KF_L3_RT_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 88,
@@ -1381,16 +1016,6 @@ static const struct vcap_field is2_ip4_tcp_udp_keyfield[] = {
.offset = 85,
.width = 1,
},
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 86,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 87,
- .width = 1,
- },
[VCAP_KF_L3_RT_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 88,
@@ -1594,16 +1219,6 @@ static const struct vcap_field is2_ip4_other_keyfield[] = {
.offset = 85,
.width = 1,
},
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 86,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 87,
- .width = 1,
- },
[VCAP_KF_L3_RT_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 88,
@@ -1757,26 +1372,11 @@ static const struct vcap_field is2_ip6_std_keyfield[] = {
.offset = 85,
.width = 1,
},
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 86,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 87,
- .width = 1,
- },
[VCAP_KF_L3_RT_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 88,
.width = 1,
},
- [VCAP_KF_L3_DST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 89,
- .width = 1,
- },
[VCAP_KF_L3_TTL_GT0] = {
.type = VCAP_FIELD_BIT,
.offset = 90,
@@ -1890,16 +1490,6 @@ static const struct vcap_field is2_ip_7tuple_keyfield[] = {
.offset = 116,
.width = 1,
},
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 117,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 118,
- .width = 1,
- },
[VCAP_KF_L3_RT_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 119,
@@ -2022,69 +1612,6 @@ static const struct vcap_field is2_ip_7tuple_keyfield[] = {
},
};
-static const struct vcap_field is2_ip6_vid_keyfield[] = {
- [VCAP_KF_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 0,
- .width = 4,
- },
- [VCAP_KF_LOOKUP_FIRST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 4,
- .width = 1,
- },
- [VCAP_KF_LOOKUP_PAG] = {
- .type = VCAP_FIELD_U32,
- .offset = 5,
- .width = 8,
- },
- [VCAP_KF_ISDX_GT0_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 13,
- .width = 1,
- },
- [VCAP_KF_ISDX_CLS] = {
- .type = VCAP_FIELD_U32,
- .offset = 14,
- .width = 12,
- },
- [VCAP_KF_8021Q_VID_CLS] = {
- .type = VCAP_FIELD_U32,
- .offset = 26,
- .width = 13,
- },
- [VCAP_KF_L3_SMAC_SIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 39,
- .width = 1,
- },
- [VCAP_KF_L3_DMAC_DIP_MATCH] = {
- .type = VCAP_FIELD_BIT,
- .offset = 40,
- .width = 1,
- },
- [VCAP_KF_L3_RT_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 41,
- .width = 1,
- },
- [VCAP_KF_L3_DST_IS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 42,
- .width = 1,
- },
- [VCAP_KF_L3_IP6_DIP] = {
- .type = VCAP_FIELD_U128,
- .offset = 43,
- .width = 128,
- },
- [VCAP_KF_L3_IP6_SIP] = {
- .type = VCAP_FIELD_U128,
- .offset = 171,
- .width = 128,
- },
-};
-
static const struct vcap_field es2_mac_etype_keyfield[] = {
[VCAP_KF_TYPE] = {
.type = VCAP_FIELD_U32,
@@ -2096,16 +1623,6 @@ static const struct vcap_field es2_mac_etype_keyfield[] = {
.offset = 3,
.width = 1,
},
- [VCAP_KF_ACL_GRP_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 4,
- .width = 8,
- },
- [VCAP_KF_PROT_ACTIVE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 12,
- .width = 1,
- },
[VCAP_KF_L2_MC_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 13,
@@ -2181,16 +1698,6 @@ static const struct vcap_field es2_mac_etype_keyfield[] = {
.offset = 95,
.width = 1,
},
- [VCAP_KF_ES0_ISDX_KEY_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 96,
- .width = 1,
- },
- [VCAP_KF_MIRROR_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 97,
- .width = 2,
- },
[VCAP_KF_L2_DMAC] = {
.type = VCAP_FIELD_U48,
.offset = 99,
@@ -2239,16 +1746,6 @@ static const struct vcap_field es2_arp_keyfield[] = {
.offset = 3,
.width = 1,
},
- [VCAP_KF_ACL_GRP_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 4,
- .width = 8,
- },
- [VCAP_KF_PROT_ACTIVE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 12,
- .width = 1,
- },
[VCAP_KF_L2_MC_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 13,
@@ -2319,16 +1816,6 @@ static const struct vcap_field es2_arp_keyfield[] = {
.offset = 94,
.width = 1,
},
- [VCAP_KF_ES0_ISDX_KEY_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 95,
- .width = 1,
- },
- [VCAP_KF_MIRROR_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 96,
- .width = 2,
- },
[VCAP_KF_L2_SMAC] = {
.type = VCAP_FIELD_U48,
.offset = 98,
@@ -2397,16 +1884,6 @@ static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
.offset = 3,
.width = 1,
},
- [VCAP_KF_ACL_GRP_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 4,
- .width = 8,
- },
- [VCAP_KF_PROT_ACTIVE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 12,
- .width = 1,
- },
[VCAP_KF_L2_MC_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 13,
@@ -2482,16 +1959,6 @@ static const struct vcap_field es2_ip4_tcp_udp_keyfield[] = {
.offset = 95,
.width = 1,
},
- [VCAP_KF_ES0_ISDX_KEY_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 96,
- .width = 1,
- },
- [VCAP_KF_MIRROR_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 97,
- .width = 2,
- },
[VCAP_KF_IP4_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 99,
@@ -2610,16 +2077,6 @@ static const struct vcap_field es2_ip4_other_keyfield[] = {
.offset = 3,
.width = 1,
},
- [VCAP_KF_ACL_GRP_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 4,
- .width = 8,
- },
- [VCAP_KF_PROT_ACTIVE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 12,
- .width = 1,
- },
[VCAP_KF_L2_MC_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 13,
@@ -2695,16 +2152,6 @@ static const struct vcap_field es2_ip4_other_keyfield[] = {
.offset = 95,
.width = 1,
},
- [VCAP_KF_ES0_ISDX_KEY_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 96,
- .width = 1,
- },
- [VCAP_KF_MIRROR_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 97,
- .width = 2,
- },
[VCAP_KF_IP4_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 99,
@@ -2763,16 +2210,6 @@ static const struct vcap_field es2_ip_7tuple_keyfield[] = {
.offset = 0,
.width = 1,
},
- [VCAP_KF_ACL_GRP_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 1,
- .width = 8,
- },
- [VCAP_KF_PROT_ACTIVE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 9,
- .width = 1,
- },
[VCAP_KF_L2_MC_IS] = {
.type = VCAP_FIELD_BIT,
.offset = 10,
@@ -2848,16 +2285,6 @@ static const struct vcap_field es2_ip_7tuple_keyfield[] = {
.offset = 92,
.width = 1,
},
- [VCAP_KF_ES0_ISDX_KEY_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 93,
- .width = 1,
- },
- [VCAP_KF_MIRROR_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 94,
- .width = 2,
- },
[VCAP_KF_L2_DMAC] = {
.type = VCAP_FIELD_U48,
.offset = 96,
@@ -2970,6 +2397,124 @@ static const struct vcap_field es2_ip_7tuple_keyfield[] = {
},
};
+static const struct vcap_field es2_ip6_std_keyfield[] = {
+ [VCAP_KF_TYPE] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 0,
+ .width = 3,
+ },
+ [VCAP_KF_LOOKUP_FIRST_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 3,
+ .width = 1,
+ },
+ [VCAP_KF_L2_MC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 13,
+ .width = 1,
+ },
+ [VCAP_KF_L2_BC_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 14,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_GT0_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 15,
+ .width = 1,
+ },
+ [VCAP_KF_ISDX_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 16,
+ .width = 12,
+ },
+ [VCAP_KF_8021Q_VLAN_TAGGED_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 28,
+ .width = 1,
+ },
+ [VCAP_KF_8021Q_VID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 29,
+ .width = 13,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 42,
+ .width = 3,
+ },
+ [VCAP_KF_IF_EGR_PORT_MASK] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 45,
+ .width = 32,
+ },
+ [VCAP_KF_IF_IGR_PORT_SEL] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 77,
+ .width = 1,
+ },
+ [VCAP_KF_IF_IGR_PORT] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 78,
+ .width = 9,
+ },
+ [VCAP_KF_8021Q_PCP_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 87,
+ .width = 3,
+ },
+ [VCAP_KF_8021Q_DEI_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 90,
+ .width = 1,
+ },
+ [VCAP_KF_COSID_CLS] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 91,
+ .width = 3,
+ },
+ [VCAP_KF_L3_DPL_CLS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 94,
+ .width = 1,
+ },
+ [VCAP_KF_L3_RT_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 95,
+ .width = 1,
+ },
+ [VCAP_KF_L3_TTL_GT0] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 99,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP6_SIP] = {
+ .type = VCAP_FIELD_U128,
+ .offset = 100,
+ .width = 128,
+ },
+ [VCAP_KF_L3_DIP_EQ_SIP_IS] = {
+ .type = VCAP_FIELD_BIT,
+ .offset = 228,
+ .width = 1,
+ },
+ [VCAP_KF_L3_IP_PROTO] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 229,
+ .width = 8,
+ },
+ [VCAP_KF_L4_RNG] = {
+ .type = VCAP_FIELD_U32,
+ .offset = 237,
+ .width = 16,
+ },
+ [VCAP_KF_L3_PAYLOAD] = {
+ .type = VCAP_FIELD_U48,
+ .offset = 253,
+ .width = 40,
+ },
+};
+
static const struct vcap_field es2_ip4_vid_keyfield[] = {
[VCAP_KF_LOOKUP_FIRST_IS] = {
.type = VCAP_FIELD_BIT,
@@ -3046,7 +2591,7 @@ static const struct vcap_field es2_ip4_vid_keyfield[] = {
.offset = 48,
.width = 1,
},
- [VCAP_KF_MIRROR_ENA] = {
+ [VCAP_KF_MIRROR_PROBE] = {
.type = VCAP_FIELD_U32,
.offset = 49,
.width = 2,
@@ -3143,26 +2688,11 @@ static const struct vcap_field es2_ip6_vid_keyfield[] = {
/* keyfield_set */
static const struct vcap_set is0_keyfield_set[] = {
- [VCAP_KFS_MLL] = {
- .type_id = 0,
- .sw_per_item = 3,
- .sw_cnt = 4,
- },
- [VCAP_KFS_TRI_VID] = {
- .type_id = 0,
- .sw_per_item = 2,
- .sw_cnt = 6,
- },
[VCAP_KFS_LL_FULL] = {
.type_id = 0,
.sw_per_item = 6,
.sw_cnt = 2,
},
- [VCAP_KFS_NORMAL] = {
- .type_id = 1,
- .sw_per_item = 6,
- .sw_cnt = 2,
- },
[VCAP_KFS_NORMAL_7TUPLE] = {
.type_id = 0,
.sw_per_item = 12,
@@ -3216,11 +2746,6 @@ static const struct vcap_set is2_keyfield_set[] = {
.sw_per_item = 12,
.sw_cnt = 1,
},
- [VCAP_KFS_IP6_VID] = {
- .type_id = 9,
- .sw_per_item = 6,
- .sw_cnt = 2,
- },
};
static const struct vcap_set es2_keyfield_set[] = {
@@ -3249,6 +2774,11 @@ static const struct vcap_set es2_keyfield_set[] = {
.sw_per_item = 12,
.sw_cnt = 1,
},
+ [VCAP_KFS_IP6_STD] = {
+ .type_id = 4,
+ .sw_per_item = 6,
+ .sw_cnt = 2,
+ },
[VCAP_KFS_IP4_VID] = {
.type_id = -1,
.sw_per_item = 3,
@@ -3263,10 +2793,7 @@ static const struct vcap_set es2_keyfield_set[] = {
/* keyfield_set map */
static const struct vcap_field *is0_keyfield_set_map[] = {
- [VCAP_KFS_MLL] = is0_mll_keyfield,
- [VCAP_KFS_TRI_VID] = is0_tri_vid_keyfield,
[VCAP_KFS_LL_FULL] = is0_ll_full_keyfield,
- [VCAP_KFS_NORMAL] = is0_normal_keyfield,
[VCAP_KFS_NORMAL_7TUPLE] = is0_normal_7tuple_keyfield,
[VCAP_KFS_NORMAL_5TUPLE_IP4] = is0_normal_5tuple_ip4_keyfield,
[VCAP_KFS_PURE_5TUPLE_IP4] = is0_pure_5tuple_ip4_keyfield,
@@ -3280,7 +2807,6 @@ static const struct vcap_field *is2_keyfield_set_map[] = {
[VCAP_KFS_IP4_OTHER] = is2_ip4_other_keyfield,
[VCAP_KFS_IP6_STD] = is2_ip6_std_keyfield,
[VCAP_KFS_IP_7TUPLE] = is2_ip_7tuple_keyfield,
- [VCAP_KFS_IP6_VID] = is2_ip6_vid_keyfield,
};
static const struct vcap_field *es2_keyfield_set_map[] = {
@@ -3289,16 +2815,14 @@ static const struct vcap_field *es2_keyfield_set_map[] = {
[VCAP_KFS_IP4_TCP_UDP] = es2_ip4_tcp_udp_keyfield,
[VCAP_KFS_IP4_OTHER] = es2_ip4_other_keyfield,
[VCAP_KFS_IP_7TUPLE] = es2_ip_7tuple_keyfield,
+ [VCAP_KFS_IP6_STD] = es2_ip6_std_keyfield,
[VCAP_KFS_IP4_VID] = es2_ip4_vid_keyfield,
[VCAP_KFS_IP6_VID] = es2_ip6_vid_keyfield,
};
/* keyfield_set map sizes */
static int is0_keyfield_set_map_size[] = {
- [VCAP_KFS_MLL] = ARRAY_SIZE(is0_mll_keyfield),
- [VCAP_KFS_TRI_VID] = ARRAY_SIZE(is0_tri_vid_keyfield),
[VCAP_KFS_LL_FULL] = ARRAY_SIZE(is0_ll_full_keyfield),
- [VCAP_KFS_NORMAL] = ARRAY_SIZE(is0_normal_keyfield),
[VCAP_KFS_NORMAL_7TUPLE] = ARRAY_SIZE(is0_normal_7tuple_keyfield),
[VCAP_KFS_NORMAL_5TUPLE_IP4] = ARRAY_SIZE(is0_normal_5tuple_ip4_keyfield),
[VCAP_KFS_PURE_5TUPLE_IP4] = ARRAY_SIZE(is0_pure_5tuple_ip4_keyfield),
@@ -3312,7 +2836,6 @@ static int is2_keyfield_set_map_size[] = {
[VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(is2_ip4_other_keyfield),
[VCAP_KFS_IP6_STD] = ARRAY_SIZE(is2_ip6_std_keyfield),
[VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(is2_ip_7tuple_keyfield),
- [VCAP_KFS_IP6_VID] = ARRAY_SIZE(is2_ip6_vid_keyfield),
};
static int es2_keyfield_set_map_size[] = {
@@ -3321,387 +2844,12 @@ static int es2_keyfield_set_map_size[] = {
[VCAP_KFS_IP4_TCP_UDP] = ARRAY_SIZE(es2_ip4_tcp_udp_keyfield),
[VCAP_KFS_IP4_OTHER] = ARRAY_SIZE(es2_ip4_other_keyfield),
[VCAP_KFS_IP_7TUPLE] = ARRAY_SIZE(es2_ip_7tuple_keyfield),
+ [VCAP_KFS_IP6_STD] = ARRAY_SIZE(es2_ip6_std_keyfield),
[VCAP_KFS_IP4_VID] = ARRAY_SIZE(es2_ip4_vid_keyfield),
[VCAP_KFS_IP6_VID] = ARRAY_SIZE(es2_ip6_vid_keyfield),
};
/* actionfields */
-static const struct vcap_field is0_mlbs_actionfield[] = {
- [VCAP_AF_TYPE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 0,
- .width = 1,
- },
- [VCAP_AF_COSID_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 1,
- .width = 1,
- },
- [VCAP_AF_COSID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 2,
- .width = 3,
- },
- [VCAP_AF_QOS_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 5,
- .width = 1,
- },
- [VCAP_AF_QOS_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 6,
- .width = 3,
- },
- [VCAP_AF_DP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 9,
- .width = 1,
- },
- [VCAP_AF_DP_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 10,
- .width = 2,
- },
- [VCAP_AF_MAP_LOOKUP_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 12,
- .width = 2,
- },
- [VCAP_AF_MAP_KEY] = {
- .type = VCAP_FIELD_U32,
- .offset = 14,
- .width = 3,
- },
- [VCAP_AF_MAP_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 17,
- .width = 9,
- },
- [VCAP_AF_CLS_VID_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 26,
- .width = 3,
- },
- [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 29,
- .width = 3,
- },
- [VCAP_AF_VID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 32,
- .width = 13,
- },
- [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 45,
- .width = 1,
- },
- [VCAP_AF_ISDX_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 46,
- .width = 12,
- },
- [VCAP_AF_FWD_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 58,
- .width = 1,
- },
- [VCAP_AF_CPU_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 59,
- .width = 1,
- },
- [VCAP_AF_CPU_Q] = {
- .type = VCAP_FIELD_U32,
- .offset = 60,
- .width = 3,
- },
- [VCAP_AF_OAM_Y1731_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 63,
- .width = 3,
- },
- [VCAP_AF_OAM_TWAMP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 66,
- .width = 1,
- },
- [VCAP_AF_OAM_IP_BFD_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 67,
- .width = 1,
- },
- [VCAP_AF_TC_LABEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 68,
- .width = 3,
- },
- [VCAP_AF_TTL_LABEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 71,
- .width = 3,
- },
- [VCAP_AF_NUM_VLD_LABELS] = {
- .type = VCAP_FIELD_U32,
- .offset = 74,
- .width = 2,
- },
- [VCAP_AF_FWD_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 76,
- .width = 3,
- },
- [VCAP_AF_MPLS_OAM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 79,
- .width = 3,
- },
- [VCAP_AF_MPLS_MEP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 82,
- .width = 1,
- },
- [VCAP_AF_MPLS_MIP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 83,
- .width = 1,
- },
- [VCAP_AF_MPLS_OAM_FLAVOR] = {
- .type = VCAP_FIELD_BIT,
- .offset = 84,
- .width = 1,
- },
- [VCAP_AF_MPLS_IP_CTRL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 85,
- .width = 1,
- },
- [VCAP_AF_PAG_OVERRIDE_MASK] = {
- .type = VCAP_FIELD_U32,
- .offset = 86,
- .width = 8,
- },
- [VCAP_AF_PAG_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 94,
- .width = 8,
- },
- [VCAP_AF_S2_KEY_SEL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 102,
- .width = 1,
- },
- [VCAP_AF_S2_KEY_SEL_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 103,
- .width = 6,
- },
- [VCAP_AF_PIPELINE_FORCE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 109,
- .width = 2,
- },
- [VCAP_AF_PIPELINE_ACT_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 111,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_PT] = {
- .type = VCAP_FIELD_U32,
- .offset = 112,
- .width = 5,
- },
- [VCAP_AF_NXT_KEY_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 117,
- .width = 5,
- },
- [VCAP_AF_NXT_NORM_W16_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 122,
- .width = 5,
- },
- [VCAP_AF_NXT_OFFSET_FROM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 127,
- .width = 2,
- },
- [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 129,
- .width = 2,
- },
- [VCAP_AF_NXT_NORMALIZE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 131,
- .width = 1,
- },
- [VCAP_AF_NXT_IDX_CTRL] = {
- .type = VCAP_FIELD_U32,
- .offset = 132,
- .width = 3,
- },
- [VCAP_AF_NXT_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 135,
- .width = 12,
- },
-};
-
-static const struct vcap_field is0_mlbs_reduced_actionfield[] = {
- [VCAP_AF_TYPE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 0,
- .width = 1,
- },
- [VCAP_AF_COSID_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 1,
- .width = 1,
- },
- [VCAP_AF_COSID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 2,
- .width = 3,
- },
- [VCAP_AF_QOS_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 5,
- .width = 1,
- },
- [VCAP_AF_QOS_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 6,
- .width = 3,
- },
- [VCAP_AF_DP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 9,
- .width = 1,
- },
- [VCAP_AF_DP_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 10,
- .width = 2,
- },
- [VCAP_AF_MAP_LOOKUP_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 12,
- .width = 2,
- },
- [VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 14,
- .width = 1,
- },
- [VCAP_AF_ISDX_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 15,
- .width = 12,
- },
- [VCAP_AF_FWD_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 27,
- .width = 1,
- },
- [VCAP_AF_CPU_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 28,
- .width = 1,
- },
- [VCAP_AF_CPU_Q] = {
- .type = VCAP_FIELD_U32,
- .offset = 29,
- .width = 3,
- },
- [VCAP_AF_TC_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 32,
- .width = 1,
- },
- [VCAP_AF_TTL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 33,
- .width = 1,
- },
- [VCAP_AF_FWD_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 34,
- .width = 3,
- },
- [VCAP_AF_MPLS_OAM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 37,
- .width = 3,
- },
- [VCAP_AF_MPLS_MEP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 40,
- .width = 1,
- },
- [VCAP_AF_MPLS_MIP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 41,
- .width = 1,
- },
- [VCAP_AF_MPLS_OAM_FLAVOR] = {
- .type = VCAP_FIELD_BIT,
- .offset = 42,
- .width = 1,
- },
- [VCAP_AF_MPLS_IP_CTRL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 43,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_FORCE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 44,
- .width = 2,
- },
- [VCAP_AF_PIPELINE_ACT_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 46,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_PT_REDUCED] = {
- .type = VCAP_FIELD_U32,
- .offset = 47,
- .width = 3,
- },
- [VCAP_AF_NXT_KEY_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 50,
- .width = 5,
- },
- [VCAP_AF_NXT_NORM_W32_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 55,
- .width = 2,
- },
- [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 57,
- .width = 2,
- },
- [VCAP_AF_NXT_NORMALIZE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 59,
- .width = 1,
- },
- [VCAP_AF_NXT_IDX_CTRL] = {
- .type = VCAP_FIELD_U32,
- .offset = 60,
- .width = 3,
- },
- [VCAP_AF_NXT_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 63,
- .width = 12,
- },
-};
-
static const struct vcap_field is0_classification_actionfield[] = {
[VCAP_AF_TYPE] = {
.type = VCAP_FIELD_BIT,
@@ -3718,16 +2866,6 @@ static const struct vcap_field is0_classification_actionfield[] = {
.offset = 2,
.width = 6,
},
- [VCAP_AF_COSID_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 8,
- .width = 1,
- },
- [VCAP_AF_COSID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 9,
- .width = 3,
- },
[VCAP_AF_QOS_ENA] = {
.type = VCAP_FIELD_BIT,
.offset = 12,
@@ -3788,46 +2926,11 @@ static const struct vcap_field is0_classification_actionfield[] = {
.offset = 39,
.width = 3,
},
- [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 42,
- .width = 3,
- },
[VCAP_AF_VID_VAL] = {
.type = VCAP_FIELD_U32,
.offset = 45,
.width = 13,
},
- [VCAP_AF_VLAN_POP_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 58,
- .width = 1,
- },
- [VCAP_AF_VLAN_POP_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 59,
- .width = 2,
- },
- [VCAP_AF_VLAN_PUSH_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 61,
- .width = 1,
- },
- [VCAP_AF_VLAN_PUSH_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 62,
- .width = 2,
- },
- [VCAP_AF_TPID_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 64,
- .width = 2,
- },
- [VCAP_AF_VLAN_WAS_TAGGED] = {
- .type = VCAP_FIELD_U32,
- .offset = 66,
- .width = 2,
- },
[VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
.type = VCAP_FIELD_BIT,
.offset = 68,
@@ -3838,71 +2941,6 @@ static const struct vcap_field is0_classification_actionfield[] = {
.offset = 69,
.width = 12,
},
- [VCAP_AF_RT_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 81,
- .width = 2,
- },
- [VCAP_AF_LPM_AFFIX_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 83,
- .width = 1,
- },
- [VCAP_AF_LPM_AFFIX_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 84,
- .width = 10,
- },
- [VCAP_AF_RLEG_DMAC_CHK_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 94,
- .width = 1,
- },
- [VCAP_AF_TTL_DECR_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 95,
- .width = 1,
- },
- [VCAP_AF_L3_MAC_UPDATE_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 96,
- .width = 1,
- },
- [VCAP_AF_FWD_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 97,
- .width = 1,
- },
- [VCAP_AF_CPU_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 98,
- .width = 1,
- },
- [VCAP_AF_CPU_Q] = {
- .type = VCAP_FIELD_U32,
- .offset = 99,
- .width = 3,
- },
- [VCAP_AF_MIP_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 102,
- .width = 2,
- },
- [VCAP_AF_OAM_Y1731_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 104,
- .width = 3,
- },
- [VCAP_AF_OAM_TWAMP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 107,
- .width = 1,
- },
- [VCAP_AF_OAM_IP_BFD_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 108,
- .width = 1,
- },
[VCAP_AF_PAG_OVERRIDE_MASK] = {
.type = VCAP_FIELD_U32,
.offset = 109,
@@ -3913,76 +2951,6 @@ static const struct vcap_field is0_classification_actionfield[] = {
.offset = 117,
.width = 8,
},
- [VCAP_AF_S2_KEY_SEL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 125,
- .width = 1,
- },
- [VCAP_AF_S2_KEY_SEL_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 126,
- .width = 6,
- },
- [VCAP_AF_INJ_MASQ_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 132,
- .width = 1,
- },
- [VCAP_AF_INJ_MASQ_PORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 133,
- .width = 7,
- },
- [VCAP_AF_LPORT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 140,
- .width = 1,
- },
- [VCAP_AF_INJ_MASQ_LPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 141,
- .width = 7,
- },
- [VCAP_AF_PIPELINE_FORCE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 148,
- .width = 2,
- },
- [VCAP_AF_PIPELINE_ACT_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 150,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_PT] = {
- .type = VCAP_FIELD_U32,
- .offset = 151,
- .width = 5,
- },
- [VCAP_AF_NXT_KEY_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 156,
- .width = 5,
- },
- [VCAP_AF_NXT_NORM_W16_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 161,
- .width = 5,
- },
- [VCAP_AF_NXT_OFFSET_FROM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 166,
- .width = 2,
- },
- [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 168,
- .width = 2,
- },
- [VCAP_AF_NXT_NORMALIZE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 170,
- .width = 1,
- },
[VCAP_AF_NXT_IDX_CTRL] = {
.type = VCAP_FIELD_U32,
.offset = 171,
@@ -4006,16 +2974,6 @@ static const struct vcap_field is0_full_actionfield[] = {
.offset = 1,
.width = 6,
},
- [VCAP_AF_COSID_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 7,
- .width = 1,
- },
- [VCAP_AF_COSID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 8,
- .width = 3,
- },
[VCAP_AF_QOS_ENA] = {
.type = VCAP_FIELD_BIT,
.offset = 11,
@@ -4076,46 +3034,11 @@ static const struct vcap_field is0_full_actionfield[] = {
.offset = 38,
.width = 3,
},
- [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 41,
- .width = 3,
- },
[VCAP_AF_VID_VAL] = {
.type = VCAP_FIELD_U32,
.offset = 44,
.width = 13,
},
- [VCAP_AF_VLAN_POP_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 57,
- .width = 1,
- },
- [VCAP_AF_VLAN_POP_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 58,
- .width = 2,
- },
- [VCAP_AF_VLAN_PUSH_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 60,
- .width = 1,
- },
- [VCAP_AF_VLAN_PUSH_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 61,
- .width = 2,
- },
- [VCAP_AF_TPID_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 63,
- .width = 2,
- },
- [VCAP_AF_VLAN_WAS_TAGGED] = {
- .type = VCAP_FIELD_U32,
- .offset = 65,
- .width = 2,
- },
[VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
.type = VCAP_FIELD_BIT,
.offset = 67,
@@ -4136,126 +3059,6 @@ static const struct vcap_field is0_full_actionfield[] = {
.offset = 83,
.width = 65,
},
- [VCAP_AF_RT_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 148,
- .width = 2,
- },
- [VCAP_AF_LPM_AFFIX_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 150,
- .width = 1,
- },
- [VCAP_AF_LPM_AFFIX_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 151,
- .width = 10,
- },
- [VCAP_AF_RLEG_DMAC_CHK_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 161,
- .width = 1,
- },
- [VCAP_AF_TTL_DECR_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 162,
- .width = 1,
- },
- [VCAP_AF_L3_MAC_UPDATE_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 163,
- .width = 1,
- },
- [VCAP_AF_CPU_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 164,
- .width = 1,
- },
- [VCAP_AF_CPU_Q] = {
- .type = VCAP_FIELD_U32,
- .offset = 165,
- .width = 3,
- },
- [VCAP_AF_MIP_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 168,
- .width = 2,
- },
- [VCAP_AF_OAM_Y1731_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 170,
- .width = 3,
- },
- [VCAP_AF_OAM_TWAMP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 173,
- .width = 1,
- },
- [VCAP_AF_OAM_IP_BFD_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 174,
- .width = 1,
- },
- [VCAP_AF_RSVD_LBL_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 175,
- .width = 4,
- },
- [VCAP_AF_TC_LABEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 179,
- .width = 3,
- },
- [VCAP_AF_TTL_LABEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 182,
- .width = 3,
- },
- [VCAP_AF_NUM_VLD_LABELS] = {
- .type = VCAP_FIELD_U32,
- .offset = 185,
- .width = 2,
- },
- [VCAP_AF_FWD_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 187,
- .width = 3,
- },
- [VCAP_AF_MPLS_OAM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 190,
- .width = 3,
- },
- [VCAP_AF_MPLS_MEP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 193,
- .width = 1,
- },
- [VCAP_AF_MPLS_MIP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 194,
- .width = 1,
- },
- [VCAP_AF_MPLS_OAM_FLAVOR] = {
- .type = VCAP_FIELD_BIT,
- .offset = 195,
- .width = 1,
- },
- [VCAP_AF_MPLS_IP_CTRL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 196,
- .width = 1,
- },
- [VCAP_AF_CUSTOM_ACE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 197,
- .width = 5,
- },
- [VCAP_AF_CUSTOM_ACE_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 202,
- .width = 2,
- },
[VCAP_AF_PAG_OVERRIDE_MASK] = {
.type = VCAP_FIELD_U32,
.offset = 204,
@@ -4266,86 +3069,6 @@ static const struct vcap_field is0_full_actionfield[] = {
.offset = 212,
.width = 8,
},
- [VCAP_AF_S2_KEY_SEL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 220,
- .width = 1,
- },
- [VCAP_AF_S2_KEY_SEL_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 221,
- .width = 6,
- },
- [VCAP_AF_INJ_MASQ_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 227,
- .width = 1,
- },
- [VCAP_AF_INJ_MASQ_PORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 228,
- .width = 7,
- },
- [VCAP_AF_LPORT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 235,
- .width = 1,
- },
- [VCAP_AF_INJ_MASQ_LPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 236,
- .width = 7,
- },
- [VCAP_AF_MATCH_ID] = {
- .type = VCAP_FIELD_U32,
- .offset = 243,
- .width = 16,
- },
- [VCAP_AF_MATCH_ID_MASK] = {
- .type = VCAP_FIELD_U32,
- .offset = 259,
- .width = 16,
- },
- [VCAP_AF_PIPELINE_FORCE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 275,
- .width = 2,
- },
- [VCAP_AF_PIPELINE_ACT_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 277,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_PT] = {
- .type = VCAP_FIELD_U32,
- .offset = 278,
- .width = 5,
- },
- [VCAP_AF_NXT_KEY_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 283,
- .width = 5,
- },
- [VCAP_AF_NXT_NORM_W16_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 288,
- .width = 5,
- },
- [VCAP_AF_NXT_OFFSET_FROM_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 293,
- .width = 2,
- },
- [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 295,
- .width = 2,
- },
- [VCAP_AF_NXT_NORMALIZE] = {
- .type = VCAP_FIELD_BIT,
- .offset = 297,
- .width = 1,
- },
[VCAP_AF_NXT_IDX_CTRL] = {
.type = VCAP_FIELD_U32,
.offset = 298,
@@ -4364,16 +3087,6 @@ static const struct vcap_field is0_class_reduced_actionfield[] = {
.offset = 0,
.width = 1,
},
- [VCAP_AF_COSID_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 1,
- .width = 1,
- },
- [VCAP_AF_COSID_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 2,
- .width = 3,
- },
[VCAP_AF_QOS_ENA] = {
.type = VCAP_FIELD_BIT,
.offset = 5,
@@ -4409,46 +3122,11 @@ static const struct vcap_field is0_class_reduced_actionfield[] = {
.offset = 17,
.width = 3,
},
- [VCAP_AF_GVID_ADD_REPLACE_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 20,
- .width = 3,
- },
[VCAP_AF_VID_VAL] = {
.type = VCAP_FIELD_U32,
.offset = 23,
.width = 13,
},
- [VCAP_AF_VLAN_POP_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 36,
- .width = 1,
- },
- [VCAP_AF_VLAN_POP_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 37,
- .width = 2,
- },
- [VCAP_AF_VLAN_PUSH_CNT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 39,
- .width = 1,
- },
- [VCAP_AF_VLAN_PUSH_CNT] = {
- .type = VCAP_FIELD_U32,
- .offset = 40,
- .width = 2,
- },
- [VCAP_AF_TPID_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 42,
- .width = 2,
- },
- [VCAP_AF_VLAN_WAS_TAGGED] = {
- .type = VCAP_FIELD_U32,
- .offset = 44,
- .width = 2,
- },
[VCAP_AF_ISDX_ADD_REPLACE_SEL] = {
.type = VCAP_FIELD_BIT,
.offset = 46,
@@ -4459,61 +3137,6 @@ static const struct vcap_field is0_class_reduced_actionfield[] = {
.offset = 47,
.width = 12,
},
- [VCAP_AF_FWD_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 59,
- .width = 1,
- },
- [VCAP_AF_CPU_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 60,
- .width = 1,
- },
- [VCAP_AF_CPU_Q] = {
- .type = VCAP_FIELD_U32,
- .offset = 61,
- .width = 3,
- },
- [VCAP_AF_MIP_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 64,
- .width = 2,
- },
- [VCAP_AF_OAM_Y1731_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 66,
- .width = 3,
- },
- [VCAP_AF_LPORT_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 69,
- .width = 1,
- },
- [VCAP_AF_INJ_MASQ_LPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 70,
- .width = 7,
- },
- [VCAP_AF_PIPELINE_FORCE_ENA] = {
- .type = VCAP_FIELD_U32,
- .offset = 77,
- .width = 2,
- },
- [VCAP_AF_PIPELINE_ACT_SEL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 79,
- .width = 1,
- },
- [VCAP_AF_PIPELINE_PT] = {
- .type = VCAP_FIELD_U32,
- .offset = 80,
- .width = 5,
- },
- [VCAP_AF_NXT_KEY_TYPE] = {
- .type = VCAP_FIELD_U32,
- .offset = 85,
- .width = 5,
- },
[VCAP_AF_NXT_IDX_CTRL] = {
.type = VCAP_FIELD_U32,
.offset = 90,
@@ -4527,11 +3150,6 @@ static const struct vcap_field is0_class_reduced_actionfield[] = {
};
static const struct vcap_field is2_base_type_actionfield[] = {
- [VCAP_AF_IS_INNER_ACL] = {
- .type = VCAP_FIELD_BIT,
- .offset = 0,
- .width = 1,
- },
[VCAP_AF_PIPELINE_FORCE_ENA] = {
.type = VCAP_FIELD_BIT,
.offset = 1,
@@ -4562,11 +3180,6 @@ static const struct vcap_field is2_base_type_actionfield[] = {
.offset = 10,
.width = 3,
},
- [VCAP_AF_CPU_DIS] = {
- .type = VCAP_FIELD_BIT,
- .offset = 13,
- .width = 1,
- },
[VCAP_AF_LRN_DIS] = {
.type = VCAP_FIELD_BIT,
.offset = 14,
@@ -4592,11 +3205,6 @@ static const struct vcap_field is2_base_type_actionfield[] = {
.offset = 23,
.width = 1,
},
- [VCAP_AF_DLB_OFFSET] = {
- .type = VCAP_FIELD_U32,
- .offset = 24,
- .width = 3,
- },
[VCAP_AF_MASK_MODE] = {
.type = VCAP_FIELD_U32,
.offset = 27,
@@ -4607,51 +3215,11 @@ static const struct vcap_field is2_base_type_actionfield[] = {
.offset = 30,
.width = 68,
},
- [VCAP_AF_RSDX_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 98,
- .width = 1,
- },
- [VCAP_AF_RSDX_VAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 99,
- .width = 12,
- },
[VCAP_AF_MIRROR_PROBE] = {
.type = VCAP_FIELD_U32,
.offset = 111,
.width = 2,
},
- [VCAP_AF_REW_CMD] = {
- .type = VCAP_FIELD_U32,
- .offset = 113,
- .width = 11,
- },
- [VCAP_AF_TTL_UPDATE_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 124,
- .width = 1,
- },
- [VCAP_AF_SAM_SEQ_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 125,
- .width = 1,
- },
- [VCAP_AF_TCP_UDP_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 126,
- .width = 1,
- },
- [VCAP_AF_TCP_UDP_DPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 127,
- .width = 16,
- },
- [VCAP_AF_TCP_UDP_SPORT] = {
- .type = VCAP_FIELD_U32,
- .offset = 143,
- .width = 16,
- },
[VCAP_AF_MATCH_ID] = {
.type = VCAP_FIELD_U32,
.offset = 159,
@@ -4667,56 +3235,6 @@ static const struct vcap_field is2_base_type_actionfield[] = {
.offset = 191,
.width = 12,
},
- [VCAP_AF_SWAP_MAC_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 203,
- .width = 1,
- },
- [VCAP_AF_ACL_RT_MODE] = {
- .type = VCAP_FIELD_U32,
- .offset = 204,
- .width = 4,
- },
- [VCAP_AF_ACL_MAC] = {
- .type = VCAP_FIELD_U48,
- .offset = 208,
- .width = 48,
- },
- [VCAP_AF_DMAC_OFFSET_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 256,
- .width = 1,
- },
- [VCAP_AF_PTP_MASTER_SEL] = {
- .type = VCAP_FIELD_U32,
- .offset = 257,
- .width = 2,
- },
- [VCAP_AF_LOG_MSG_INTERVAL] = {
- .type = VCAP_FIELD_U32,
- .offset = 259,
- .width = 4,
- },
- [VCAP_AF_SIP_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 263,
- .width = 5,
- },
- [VCAP_AF_RLEG_STAT_IDX] = {
- .type = VCAP_FIELD_U32,
- .offset = 268,
- .width = 3,
- },
- [VCAP_AF_IGR_ACL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 271,
- .width = 1,
- },
- [VCAP_AF_EGR_ACL_ENA] = {
- .type = VCAP_FIELD_BIT,
- .offset = 272,
- .width = 1,
- },
};
static const struct vcap_field es2_base_type_actionfield[] = {
@@ -4794,16 +3312,6 @@ static const struct vcap_field es2_base_type_actionfield[] = {
/* actionfield_set */
static const struct vcap_set is0_actionfield_set[] = {
- [VCAP_AFS_MLBS] = {
- .type_id = 0,
- .sw_per_item = 2,
- .sw_cnt = 6,
- },
- [VCAP_AFS_MLBS_REDUCED] = {
- .type_id = 0,
- .sw_per_item = 1,
- .sw_cnt = 12,
- },
[VCAP_AFS_CLASSIFICATION] = {
.type_id = 1,
.sw_per_item = 2,
@@ -4839,8 +3347,6 @@ static const struct vcap_set es2_actionfield_set[] = {
/* actionfield_set map */
static const struct vcap_field *is0_actionfield_set_map[] = {
- [VCAP_AFS_MLBS] = is0_mlbs_actionfield,
- [VCAP_AFS_MLBS_REDUCED] = is0_mlbs_reduced_actionfield,
[VCAP_AFS_CLASSIFICATION] = is0_classification_actionfield,
[VCAP_AFS_FULL] = is0_full_actionfield,
[VCAP_AFS_CLASS_REDUCED] = is0_class_reduced_actionfield,
@@ -4856,8 +3362,6 @@ static const struct vcap_field *es2_actionfield_set_map[] = {
/* actionfield_set map size */
static int is0_actionfield_set_map_size[] = {
- [VCAP_AFS_MLBS] = ARRAY_SIZE(is0_mlbs_actionfield),
- [VCAP_AFS_MLBS_REDUCED] = ARRAY_SIZE(is0_mlbs_reduced_actionfield),
[VCAP_AFS_CLASSIFICATION] = ARRAY_SIZE(is0_classification_actionfield),
[VCAP_AFS_FULL] = ARRAY_SIZE(is0_full_actionfield),
[VCAP_AFS_CLASS_REDUCED] = ARRAY_SIZE(is0_class_reduced_actionfield),
@@ -5244,17 +3748,22 @@ static const char * const vcap_keyfield_set_names[] = {
[VCAP_KFS_IP4_OTHER] = "VCAP_KFS_IP4_OTHER",
[VCAP_KFS_IP4_TCP_UDP] = "VCAP_KFS_IP4_TCP_UDP",
[VCAP_KFS_IP4_VID] = "VCAP_KFS_IP4_VID",
+ [VCAP_KFS_IP6_OTHER] = "VCAP_KFS_IP6_OTHER",
[VCAP_KFS_IP6_STD] = "VCAP_KFS_IP6_STD",
+ [VCAP_KFS_IP6_TCP_UDP] = "VCAP_KFS_IP6_TCP_UDP",
[VCAP_KFS_IP6_VID] = "VCAP_KFS_IP6_VID",
[VCAP_KFS_IP_7TUPLE] = "VCAP_KFS_IP_7TUPLE",
+ [VCAP_KFS_ISDX] = "VCAP_KFS_ISDX",
[VCAP_KFS_LL_FULL] = "VCAP_KFS_LL_FULL",
[VCAP_KFS_MAC_ETYPE] = "VCAP_KFS_MAC_ETYPE",
- [VCAP_KFS_MLL] = "VCAP_KFS_MLL",
- [VCAP_KFS_NORMAL] = "VCAP_KFS_NORMAL",
+ [VCAP_KFS_MAC_LLC] = "VCAP_KFS_MAC_LLC",
+ [VCAP_KFS_MAC_SNAP] = "VCAP_KFS_MAC_SNAP",
[VCAP_KFS_NORMAL_5TUPLE_IP4] = "VCAP_KFS_NORMAL_5TUPLE_IP4",
[VCAP_KFS_NORMAL_7TUPLE] = "VCAP_KFS_NORMAL_7TUPLE",
+ [VCAP_KFS_OAM] = "VCAP_KFS_OAM",
[VCAP_KFS_PURE_5TUPLE_IP4] = "VCAP_KFS_PURE_5TUPLE_IP4",
- [VCAP_KFS_TRI_VID] = "VCAP_KFS_TRI_VID",
+ [VCAP_KFS_SMAC_SIP4] = "VCAP_KFS_SMAC_SIP4",
+ [VCAP_KFS_SMAC_SIP6] = "VCAP_KFS_SMAC_SIP6",
};
/* Actionfieldset names */
@@ -5263,9 +3772,9 @@ static const char * const vcap_actionfield_set_names[] = {
[VCAP_AFS_BASE_TYPE] = "VCAP_AFS_BASE_TYPE",
[VCAP_AFS_CLASSIFICATION] = "VCAP_AFS_CLASSIFICATION",
[VCAP_AFS_CLASS_REDUCED] = "VCAP_AFS_CLASS_REDUCED",
+ [VCAP_AFS_ES0] = "VCAP_AFS_ES0",
[VCAP_AFS_FULL] = "VCAP_AFS_FULL",
- [VCAP_AFS_MLBS] = "VCAP_AFS_MLBS",
- [VCAP_AFS_MLBS_REDUCED] = "VCAP_AFS_MLBS_REDUCED",
+ [VCAP_AFS_SMAC_SIP] = "VCAP_AFS_SMAC_SIP",
};
/* Keyfield names */
@@ -5285,6 +3794,7 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_8021Q_PCP1] = "8021Q_PCP1",
[VCAP_KF_8021Q_PCP2] = "8021Q_PCP2",
[VCAP_KF_8021Q_PCP_CLS] = "8021Q_PCP_CLS",
+ [VCAP_KF_8021Q_TPID] = "8021Q_TPID",
[VCAP_KF_8021Q_TPID0] = "8021Q_TPID0",
[VCAP_KF_8021Q_TPID1] = "8021Q_TPID1",
[VCAP_KF_8021Q_TPID2] = "8021Q_TPID2",
@@ -5303,13 +3813,13 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_ARP_SENDER_MATCH_IS] = "ARP_SENDER_MATCH_IS",
[VCAP_KF_ARP_TGT_MATCH_IS] = "ARP_TGT_MATCH_IS",
[VCAP_KF_COSID_CLS] = "COSID_CLS",
- [VCAP_KF_DST_ENTRY] = "DST_ENTRY",
[VCAP_KF_ES0_ISDX_KEY_ENA] = "ES0_ISDX_KEY_ENA",
[VCAP_KF_ETYPE] = "ETYPE",
[VCAP_KF_ETYPE_LEN_IS] = "ETYPE_LEN_IS",
- [VCAP_KF_ETYPE_MPLS] = "ETYPE_MPLS",
+ [VCAP_KF_HOST_MATCH] = "HOST_MATCH",
[VCAP_KF_IF_EGR_PORT_MASK] = "IF_EGR_PORT_MASK",
[VCAP_KF_IF_EGR_PORT_MASK_RNG] = "IF_EGR_PORT_MASK_RNG",
+ [VCAP_KF_IF_EGR_PORT_NO] = "IF_EGR_PORT_NO",
[VCAP_KF_IF_IGR_PORT] = "IF_IGR_PORT",
[VCAP_KF_IF_IGR_PORT_MASK] = "IF_IGR_PORT_MASK",
[VCAP_KF_IF_IGR_PORT_MASK_L3] = "IF_IGR_PORT_MASK_L3",
@@ -5324,17 +3834,24 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_ISDX_GT0_IS] = "ISDX_GT0_IS",
[VCAP_KF_L2_BC_IS] = "L2_BC_IS",
[VCAP_KF_L2_DMAC] = "L2_DMAC",
+ [VCAP_KF_L2_FRM_TYPE] = "L2_FRM_TYPE",
[VCAP_KF_L2_FWD_IS] = "L2_FWD_IS",
+ [VCAP_KF_L2_LLC] = "L2_LLC",
[VCAP_KF_L2_MC_IS] = "L2_MC_IS",
+ [VCAP_KF_L2_PAYLOAD0] = "L2_PAYLOAD0",
+ [VCAP_KF_L2_PAYLOAD1] = "L2_PAYLOAD1",
+ [VCAP_KF_L2_PAYLOAD2] = "L2_PAYLOAD2",
[VCAP_KF_L2_PAYLOAD_ETYPE] = "L2_PAYLOAD_ETYPE",
[VCAP_KF_L2_SMAC] = "L2_SMAC",
+ [VCAP_KF_L2_SNAP] = "L2_SNAP",
[VCAP_KF_L3_DIP_EQ_SIP_IS] = "L3_DIP_EQ_SIP_IS",
- [VCAP_KF_L3_DMAC_DIP_MATCH] = "L3_DMAC_DIP_MATCH",
[VCAP_KF_L3_DPL_CLS] = "L3_DPL_CLS",
[VCAP_KF_L3_DSCP] = "L3_DSCP",
[VCAP_KF_L3_DST_IS] = "L3_DST_IS",
+ [VCAP_KF_L3_FRAGMENT] = "L3_FRAGMENT",
[VCAP_KF_L3_FRAGMENT_TYPE] = "L3_FRAGMENT_TYPE",
[VCAP_KF_L3_FRAG_INVLD_L4_LEN] = "L3_FRAG_INVLD_L4_LEN",
+ [VCAP_KF_L3_FRAG_OFS_GT0] = "L3_FRAG_OFS_GT0",
[VCAP_KF_L3_IP4_DIP] = "L3_IP4_DIP",
[VCAP_KF_L3_IP4_SIP] = "L3_IP4_SIP",
[VCAP_KF_L3_IP6_DIP] = "L3_IP6_DIP",
@@ -5343,9 +3860,10 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_L3_OPTIONS_IS] = "L3_OPTIONS_IS",
[VCAP_KF_L3_PAYLOAD] = "L3_PAYLOAD",
[VCAP_KF_L3_RT_IS] = "L3_RT_IS",
- [VCAP_KF_L3_SMAC_SIP_MATCH] = "L3_SMAC_SIP_MATCH",
[VCAP_KF_L3_TOS] = "L3_TOS",
[VCAP_KF_L3_TTL_GT0] = "L3_TTL_GT0",
+ [VCAP_KF_L4_1588_DOM] = "L4_1588_DOM",
+ [VCAP_KF_L4_1588_VER] = "L4_1588_VER",
[VCAP_KF_L4_ACK] = "L4_ACK",
[VCAP_KF_L4_DPORT] = "L4_DPORT",
[VCAP_KF_L4_FIN] = "L4_FIN",
@@ -5362,9 +3880,14 @@ static const char * const vcap_keyfield_names[] = {
[VCAP_KF_LOOKUP_GEN_IDX] = "LOOKUP_GEN_IDX",
[VCAP_KF_LOOKUP_GEN_IDX_SEL] = "LOOKUP_GEN_IDX_SEL",
[VCAP_KF_LOOKUP_PAG] = "LOOKUP_PAG",
- [VCAP_KF_MIRROR_ENA] = "MIRROR_ENA",
+ [VCAP_KF_MIRROR_PROBE] = "MIRROR_PROBE",
[VCAP_KF_OAM_CCM_CNTS_EQ0] = "OAM_CCM_CNTS_EQ0",
+ [VCAP_KF_OAM_DETECTED] = "OAM_DETECTED",
+ [VCAP_KF_OAM_FLAGS] = "OAM_FLAGS",
[VCAP_KF_OAM_MEL_FLAGS] = "OAM_MEL_FLAGS",
+ [VCAP_KF_OAM_MEPID] = "OAM_MEPID",
+ [VCAP_KF_OAM_OPCODE] = "OAM_OPCODE",
+ [VCAP_KF_OAM_VER] = "OAM_VER",
[VCAP_KF_OAM_Y1731_IS] = "OAM_Y1731_IS",
[VCAP_KF_PROT_ACTIVE] = "PROT_ACTIVE",
[VCAP_KF_TCP_IS] = "TCP_IS",
@@ -5375,50 +3898,37 @@ static const char * const vcap_keyfield_names[] = {
/* Actionfield names */
static const char * const vcap_actionfield_names[] = {
[VCAP_AF_NO_VALUE] = "(None)",
- [VCAP_AF_ACL_MAC] = "ACL_MAC",
- [VCAP_AF_ACL_RT_MODE] = "ACL_RT_MODE",
+ [VCAP_AF_ACL_ID] = "ACL_ID",
[VCAP_AF_CLS_VID_SEL] = "CLS_VID_SEL",
[VCAP_AF_CNT_ID] = "CNT_ID",
[VCAP_AF_COPY_PORT_NUM] = "COPY_PORT_NUM",
[VCAP_AF_COPY_QUEUE_NUM] = "COPY_QUEUE_NUM",
- [VCAP_AF_COSID_ENA] = "COSID_ENA",
- [VCAP_AF_COSID_VAL] = "COSID_VAL",
[VCAP_AF_CPU_COPY_ENA] = "CPU_COPY_ENA",
- [VCAP_AF_CPU_DIS] = "CPU_DIS",
- [VCAP_AF_CPU_ENA] = "CPU_ENA",
- [VCAP_AF_CPU_Q] = "CPU_Q",
+ [VCAP_AF_CPU_QU] = "CPU_QU",
[VCAP_AF_CPU_QUEUE_NUM] = "CPU_QUEUE_NUM",
- [VCAP_AF_CUSTOM_ACE_ENA] = "CUSTOM_ACE_ENA",
- [VCAP_AF_CUSTOM_ACE_OFFSET] = "CUSTOM_ACE_OFFSET",
+ [VCAP_AF_DEI_A_VAL] = "DEI_A_VAL",
+ [VCAP_AF_DEI_B_VAL] = "DEI_B_VAL",
+ [VCAP_AF_DEI_C_VAL] = "DEI_C_VAL",
[VCAP_AF_DEI_ENA] = "DEI_ENA",
[VCAP_AF_DEI_VAL] = "DEI_VAL",
- [VCAP_AF_DLB_OFFSET] = "DLB_OFFSET",
- [VCAP_AF_DMAC_OFFSET_ENA] = "DMAC_OFFSET_ENA",
[VCAP_AF_DP_ENA] = "DP_ENA",
[VCAP_AF_DP_VAL] = "DP_VAL",
[VCAP_AF_DSCP_ENA] = "DSCP_ENA",
+ [VCAP_AF_DSCP_SEL] = "DSCP_SEL",
[VCAP_AF_DSCP_VAL] = "DSCP_VAL",
- [VCAP_AF_EGR_ACL_ENA] = "EGR_ACL_ENA",
[VCAP_AF_ES2_REW_CMD] = "ES2_REW_CMD",
- [VCAP_AF_FWD_DIS] = "FWD_DIS",
+ [VCAP_AF_ESDX] = "ESDX",
+ [VCAP_AF_FWD_KILL_ENA] = "FWD_KILL_ENA",
[VCAP_AF_FWD_MODE] = "FWD_MODE",
- [VCAP_AF_FWD_TYPE] = "FWD_TYPE",
- [VCAP_AF_GVID_ADD_REPLACE_SEL] = "GVID_ADD_REPLACE_SEL",
+ [VCAP_AF_FWD_SEL] = "FWD_SEL",
[VCAP_AF_HIT_ME_ONCE] = "HIT_ME_ONCE",
+ [VCAP_AF_HOST_MATCH] = "HOST_MATCH",
[VCAP_AF_IGNORE_PIPELINE_CTRL] = "IGNORE_PIPELINE_CTRL",
- [VCAP_AF_IGR_ACL_ENA] = "IGR_ACL_ENA",
- [VCAP_AF_INJ_MASQ_ENA] = "INJ_MASQ_ENA",
- [VCAP_AF_INJ_MASQ_LPORT] = "INJ_MASQ_LPORT",
- [VCAP_AF_INJ_MASQ_PORT] = "INJ_MASQ_PORT",
[VCAP_AF_INTR_ENA] = "INTR_ENA",
[VCAP_AF_ISDX_ADD_REPLACE_SEL] = "ISDX_ADD_REPLACE_SEL",
+ [VCAP_AF_ISDX_ENA] = "ISDX_ENA",
[VCAP_AF_ISDX_VAL] = "ISDX_VAL",
- [VCAP_AF_IS_INNER_ACL] = "IS_INNER_ACL",
- [VCAP_AF_L3_MAC_UPDATE_DIS] = "L3_MAC_UPDATE_DIS",
- [VCAP_AF_LOG_MSG_INTERVAL] = "LOG_MSG_INTERVAL",
- [VCAP_AF_LPM_AFFIX_ENA] = "LPM_AFFIX_ENA",
- [VCAP_AF_LPM_AFFIX_VAL] = "LPM_AFFIX_VAL",
- [VCAP_AF_LPORT_ENA] = "LPORT_ENA",
+ [VCAP_AF_LOOP_ENA] = "LOOP_ENA",
[VCAP_AF_LRN_DIS] = "LRN_DIS",
[VCAP_AF_MAP_IDX] = "MAP_IDX",
[VCAP_AF_MAP_KEY] = "MAP_KEY",
@@ -5426,71 +3936,53 @@ static const char * const vcap_actionfield_names[] = {
[VCAP_AF_MASK_MODE] = "MASK_MODE",
[VCAP_AF_MATCH_ID] = "MATCH_ID",
[VCAP_AF_MATCH_ID_MASK] = "MATCH_ID_MASK",
- [VCAP_AF_MIP_SEL] = "MIP_SEL",
+ [VCAP_AF_MIRROR_ENA] = "MIRROR_ENA",
[VCAP_AF_MIRROR_PROBE] = "MIRROR_PROBE",
[VCAP_AF_MIRROR_PROBE_ID] = "MIRROR_PROBE_ID",
- [VCAP_AF_MPLS_IP_CTRL_ENA] = "MPLS_IP_CTRL_ENA",
- [VCAP_AF_MPLS_MEP_ENA] = "MPLS_MEP_ENA",
- [VCAP_AF_MPLS_MIP_ENA] = "MPLS_MIP_ENA",
- [VCAP_AF_MPLS_OAM_FLAVOR] = "MPLS_OAM_FLAVOR",
- [VCAP_AF_MPLS_OAM_TYPE] = "MPLS_OAM_TYPE",
- [VCAP_AF_NUM_VLD_LABELS] = "NUM_VLD_LABELS",
[VCAP_AF_NXT_IDX] = "NXT_IDX",
[VCAP_AF_NXT_IDX_CTRL] = "NXT_IDX_CTRL",
- [VCAP_AF_NXT_KEY_TYPE] = "NXT_KEY_TYPE",
- [VCAP_AF_NXT_NORMALIZE] = "NXT_NORMALIZE",
- [VCAP_AF_NXT_NORM_W16_OFFSET] = "NXT_NORM_W16_OFFSET",
- [VCAP_AF_NXT_NORM_W32_OFFSET] = "NXT_NORM_W32_OFFSET",
- [VCAP_AF_NXT_OFFSET_FROM_TYPE] = "NXT_OFFSET_FROM_TYPE",
- [VCAP_AF_NXT_TYPE_AFTER_OFFSET] = "NXT_TYPE_AFTER_OFFSET",
- [VCAP_AF_OAM_IP_BFD_ENA] = "OAM_IP_BFD_ENA",
- [VCAP_AF_OAM_TWAMP_ENA] = "OAM_TWAMP_ENA",
- [VCAP_AF_OAM_Y1731_SEL] = "OAM_Y1731_SEL",
[VCAP_AF_PAG_OVERRIDE_MASK] = "PAG_OVERRIDE_MASK",
[VCAP_AF_PAG_VAL] = "PAG_VAL",
+ [VCAP_AF_PCP_A_VAL] = "PCP_A_VAL",
+ [VCAP_AF_PCP_B_VAL] = "PCP_B_VAL",
+ [VCAP_AF_PCP_C_VAL] = "PCP_C_VAL",
[VCAP_AF_PCP_ENA] = "PCP_ENA",
[VCAP_AF_PCP_VAL] = "PCP_VAL",
- [VCAP_AF_PIPELINE_ACT_SEL] = "PIPELINE_ACT_SEL",
+ [VCAP_AF_PIPELINE_ACT] = "PIPELINE_ACT",
[VCAP_AF_PIPELINE_FORCE_ENA] = "PIPELINE_FORCE_ENA",
[VCAP_AF_PIPELINE_PT] = "PIPELINE_PT",
- [VCAP_AF_PIPELINE_PT_REDUCED] = "PIPELINE_PT_REDUCED",
[VCAP_AF_POLICE_ENA] = "POLICE_ENA",
[VCAP_AF_POLICE_IDX] = "POLICE_IDX",
[VCAP_AF_POLICE_REMARK] = "POLICE_REMARK",
+ [VCAP_AF_POLICE_VCAP_ONLY] = "POLICE_VCAP_ONLY",
+ [VCAP_AF_POP_VAL] = "POP_VAL",
[VCAP_AF_PORT_MASK] = "PORT_MASK",
- [VCAP_AF_PTP_MASTER_SEL] = "PTP_MASTER_SEL",
+ [VCAP_AF_PUSH_CUSTOMER_TAG] = "PUSH_CUSTOMER_TAG",
+ [VCAP_AF_PUSH_INNER_TAG] = "PUSH_INNER_TAG",
+ [VCAP_AF_PUSH_OUTER_TAG] = "PUSH_OUTER_TAG",
[VCAP_AF_QOS_ENA] = "QOS_ENA",
[VCAP_AF_QOS_VAL] = "QOS_VAL",
- [VCAP_AF_REW_CMD] = "REW_CMD",
- [VCAP_AF_RLEG_DMAC_CHK_DIS] = "RLEG_DMAC_CHK_DIS",
- [VCAP_AF_RLEG_STAT_IDX] = "RLEG_STAT_IDX",
- [VCAP_AF_RSDX_ENA] = "RSDX_ENA",
- [VCAP_AF_RSDX_VAL] = "RSDX_VAL",
- [VCAP_AF_RSVD_LBL_VAL] = "RSVD_LBL_VAL",
+ [VCAP_AF_REW_OP] = "REW_OP",
[VCAP_AF_RT_DIS] = "RT_DIS",
- [VCAP_AF_RT_SEL] = "RT_SEL",
- [VCAP_AF_S2_KEY_SEL_ENA] = "S2_KEY_SEL_ENA",
- [VCAP_AF_S2_KEY_SEL_IDX] = "S2_KEY_SEL_IDX",
- [VCAP_AF_SAM_SEQ_ENA] = "SAM_SEQ_ENA",
- [VCAP_AF_SIP_IDX] = "SIP_IDX",
- [VCAP_AF_SWAP_MAC_ENA] = "SWAP_MAC_ENA",
- [VCAP_AF_TCP_UDP_DPORT] = "TCP_UDP_DPORT",
- [VCAP_AF_TCP_UDP_ENA] = "TCP_UDP_ENA",
- [VCAP_AF_TCP_UDP_SPORT] = "TCP_UDP_SPORT",
- [VCAP_AF_TC_ENA] = "TC_ENA",
- [VCAP_AF_TC_LABEL] = "TC_LABEL",
- [VCAP_AF_TPID_SEL] = "TPID_SEL",
- [VCAP_AF_TTL_DECR_DIS] = "TTL_DECR_DIS",
- [VCAP_AF_TTL_ENA] = "TTL_ENA",
- [VCAP_AF_TTL_LABEL] = "TTL_LABEL",
- [VCAP_AF_TTL_UPDATE_ENA] = "TTL_UPDATE_ENA",
+ [VCAP_AF_SWAP_MACS_ENA] = "SWAP_MACS_ENA",
+ [VCAP_AF_TAG_A_DEI_SEL] = "TAG_A_DEI_SEL",
+ [VCAP_AF_TAG_A_PCP_SEL] = "TAG_A_PCP_SEL",
+ [VCAP_AF_TAG_A_TPID_SEL] = "TAG_A_TPID_SEL",
+ [VCAP_AF_TAG_A_VID_SEL] = "TAG_A_VID_SEL",
+ [VCAP_AF_TAG_B_DEI_SEL] = "TAG_B_DEI_SEL",
+ [VCAP_AF_TAG_B_PCP_SEL] = "TAG_B_PCP_SEL",
+ [VCAP_AF_TAG_B_TPID_SEL] = "TAG_B_TPID_SEL",
+ [VCAP_AF_TAG_B_VID_SEL] = "TAG_B_VID_SEL",
+ [VCAP_AF_TAG_C_DEI_SEL] = "TAG_C_DEI_SEL",
+ [VCAP_AF_TAG_C_PCP_SEL] = "TAG_C_PCP_SEL",
+ [VCAP_AF_TAG_C_TPID_SEL] = "TAG_C_TPID_SEL",
+ [VCAP_AF_TAG_C_VID_SEL] = "TAG_C_VID_SEL",
[VCAP_AF_TYPE] = "TYPE",
+ [VCAP_AF_UNTAG_VID_ENA] = "UNTAG_VID_ENA",
+ [VCAP_AF_VID_A_VAL] = "VID_A_VAL",
+ [VCAP_AF_VID_B_VAL] = "VID_B_VAL",
+ [VCAP_AF_VID_C_VAL] = "VID_C_VAL",
[VCAP_AF_VID_VAL] = "VID_VAL",
- [VCAP_AF_VLAN_POP_CNT] = "VLAN_POP_CNT",
- [VCAP_AF_VLAN_POP_CNT_ENA] = "VLAN_POP_CNT_ENA",
- [VCAP_AF_VLAN_PUSH_CNT] = "VLAN_PUSH_CNT",
- [VCAP_AF_VLAN_PUSH_CNT_ENA] = "VLAN_PUSH_CNT_ENA",
- [VCAP_AF_VLAN_WAS_TAGGED] = "VLAN_WAS_TAGGED",
};
/* VCAPs */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h
index b5a74f0eef9b..55762f24e196 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_model_kunit.h
@@ -1,10 +1,18 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (C) 2022 Microchip Technology Inc. and its subsidiaries.
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
* Microchip VCAP test model interface for kunit testing
*/
+/* This file is autogenerated by cml-utils 2023-02-10 11:16:00 +0100.
+ * Commit ID: c30fb4bf0281cd4a7133bdab6682f9e43c872ada
+ */
+
#ifndef __VCAP_MODEL_KUNIT_H__
#define __VCAP_MODEL_KUNIT_H__
+
+/* VCAPs */
extern const struct vcap_info kunit_test_vcaps[];
extern const struct vcap_statistics kunit_test_vcap_stats;
+
#endif /* __VCAP_MODEL_KUNIT_H__ */
+
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.c b/drivers/net/ethernet/microchip/vcap/vcap_tc.c
new file mode 100644
index 000000000000..09abe7944af6
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip VCAP TC
+ *
+ * Copyright (c) 2023 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include <net/flow_offload.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+
+#include "vcap_api_client.h"
+#include "vcap_tc.h"
+
+enum vcap_is2_arp_opcode {
+ VCAP_IS2_ARP_REQUEST,
+ VCAP_IS2_ARP_REPLY,
+ VCAP_IS2_RARP_REQUEST,
+ VCAP_IS2_RARP_REPLY,
+};
+
+enum vcap_arp_opcode {
+ VCAP_ARP_OP_RESERVED,
+ VCAP_ARP_OP_REQUEST,
+ VCAP_ARP_OP_REPLY,
+};
+
+int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field smac_key = VCAP_KF_L2_SMAC;
+ enum vcap_key_field dmac_key = VCAP_KF_L2_DMAC;
+ struct flow_match_eth_addrs match;
+ struct vcap_u48_key smac, dmac;
+ int err = 0;
+
+ flow_rule_match_eth_addrs(st->frule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ vcap_netbytes_copy(smac.value, match.key->src, ETH_ALEN);
+ vcap_netbytes_copy(smac.mask, match.mask->src, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, smac_key, &smac);
+ if (err)
+ goto out;
+ }
+
+ if (!is_zero_ether_addr(match.mask->dst)) {
+ vcap_netbytes_copy(dmac.value, match.key->dst, ETH_ALEN);
+ vcap_netbytes_copy(dmac.mask, match.mask->dst, ETH_ALEN);
+ err = vcap_rule_add_key_u48(st->vrule, dmac_key, &dmac);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "eth_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ethaddr_usage);
+
+int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IP) {
+ struct flow_match_ipv4_addrs mt;
+
+ flow_rule_match_ipv4_addrs(st->frule, &mt);
+ if (mt.mask->src) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_SIP,
+ be32_to_cpu(mt.key->src),
+ be32_to_cpu(mt.mask->src));
+ if (err)
+ goto out;
+ }
+ if (mt.mask->dst) {
+ err = vcap_rule_add_key_u32(st->vrule,
+ VCAP_KF_L3_IP4_DIP,
+ be32_to_cpu(mt.key->dst),
+ be32_to_cpu(mt.mask->dst));
+ if (err)
+ goto out;
+ }
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv4_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv4_usage);
+
+int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ int err = 0;
+
+ if (st->l3_proto == ETH_P_IPV6) {
+ struct flow_match_ipv6_addrs mt;
+ struct vcap_u128_key sip;
+ struct vcap_u128_key dip;
+
+ flow_rule_match_ipv6_addrs(st->frule, &mt);
+ /* Check if address masks are non-zero */
+ if (!ipv6_addr_any(&mt.mask->src)) {
+ vcap_netbytes_copy(sip.value, mt.key->src.s6_addr, 16);
+ vcap_netbytes_copy(sip.mask, mt.mask->src.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_SIP, &sip);
+ if (err)
+ goto out;
+ }
+ if (!ipv6_addr_any(&mt.mask->dst)) {
+ vcap_netbytes_copy(dip.value, mt.key->dst.s6_addr, 16);
+ vcap_netbytes_copy(dip.mask, mt.mask->dst.s6_addr, 16);
+ err = vcap_rule_add_key_u128(st->vrule,
+ VCAP_KF_L3_IP6_DIP, &dip);
+ if (err)
+ goto out;
+ }
+ }
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS);
+ return err;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ipv6_addr parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ipv6_usage);
+
+int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_ports mt;
+ u16 value, mask;
+ int err = 0;
+
+ flow_rule_match_ports(st->frule, &mt);
+
+ if (mt.mask->src) {
+ value = be16_to_cpu(mt.key->src);
+ mask = be16_to_cpu(mt.mask->src);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_SPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->dst) {
+ value = be16_to_cpu(mt.key->dst);
+ mask = be16_to_cpu(mt.mask->dst);
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L4_DPORT, value,
+ mask);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_PORTS);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "port parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_portnum_usage);
+
+int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ enum vcap_key_field vid_key = VCAP_KF_8021Q_VID0;
+ enum vcap_key_field pcp_key = VCAP_KF_8021Q_PCP0;
+ struct flow_match_vlan mt;
+ u16 tpid;
+ int err;
+
+ flow_rule_match_cvlan(st->frule, &mt);
+
+ tpid = be16_to_cpu(mt.key->vlan_tpid);
+
+ if (tpid == ETH_P_8021Q) {
+ vid_key = VCAP_KF_8021Q_VID1;
+ pcp_key = VCAP_KF_8021Q_PCP1;
+ }
+
+ if (mt.mask->vlan_id) {
+ err = vcap_rule_add_key_u32(st->vrule, vid_key,
+ mt.key->vlan_id,
+ mt.mask->vlan_id);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_priority) {
+ err = vcap_rule_add_key_u32(st->vrule, pcp_key,
+ mt.key->vlan_priority,
+ mt.mask->vlan_priority);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
+
+ return 0;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "cvlan parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_cvlan_usage);
+
+int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st,
+ enum vcap_key_field vid_key,
+ enum vcap_key_field pcp_key)
+{
+ struct flow_match_vlan mt;
+ int err;
+
+ flow_rule_match_vlan(st->frule, &mt);
+
+ if (mt.mask->vlan_id) {
+ err = vcap_rule_add_key_u32(st->vrule, vid_key,
+ mt.key->vlan_id,
+ mt.mask->vlan_id);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_priority) {
+ err = vcap_rule_add_key_u32(st->vrule, pcp_key,
+ mt.key->vlan_priority,
+ mt.mask->vlan_priority);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->vlan_tpid)
+ st->tpid = be16_to_cpu(mt.key->vlan_tpid);
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
+
+ return 0;
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "vlan parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_vlan_usage);
+
+int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_tcp mt;
+ u16 tcp_flags_mask;
+ u16 tcp_flags_key;
+ enum vcap_bit val;
+ int err = 0;
+
+ flow_rule_match_tcp(st->frule, &mt);
+ tcp_flags_key = be16_to_cpu(mt.key->flags);
+ tcp_flags_mask = be16_to_cpu(mt.mask->flags);
+
+ if (tcp_flags_mask & TCPHDR_FIN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_FIN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_FIN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_SYN) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_SYN)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_SYN, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_RST) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_RST)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_RST, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_PSH) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_PSH)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_PSH, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_ACK) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_ACK)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_ACK, val);
+ if (err)
+ goto out;
+ }
+
+ if (tcp_flags_mask & TCPHDR_URG) {
+ val = VCAP_BIT_0;
+ if (tcp_flags_key & TCPHDR_URG)
+ val = VCAP_BIT_1;
+ err = vcap_rule_add_key_bit(st->vrule, VCAP_KF_L4_URG, val);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "tcp_flags parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_tcp_usage);
+
+int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_arp mt;
+ u16 value, mask;
+ u32 ipval, ipmsk;
+ int err;
+
+ flow_rule_match_arp(st->frule, &mt);
+
+ if (mt.mask->op) {
+ mask = 0x3;
+ if (st->l3_proto == ETH_P_ARP) {
+ value = mt.key->op == VCAP_ARP_OP_REQUEST ?
+ VCAP_IS2_ARP_REQUEST :
+ VCAP_IS2_ARP_REPLY;
+ } else { /* RARP */
+ value = mt.key->op == VCAP_ARP_OP_REQUEST ?
+ VCAP_IS2_RARP_REQUEST :
+ VCAP_IS2_RARP_REPLY;
+ }
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_ARP_OPCODE,
+ value, mask);
+ if (err)
+ goto out;
+ }
+
+ /* The IS2 ARP keyset does not support ARP hardware addresses */
+ if (!is_zero_ether_addr(mt.mask->sha) ||
+ !is_zero_ether_addr(mt.mask->tha)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (mt.mask->sip) {
+ ipval = be32_to_cpu((__force __be32)mt.key->sip);
+ ipmsk = be32_to_cpu((__force __be32)mt.mask->sip);
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_SIP,
+ ipval, ipmsk);
+ if (err)
+ goto out;
+ }
+
+ if (mt.mask->tip) {
+ ipval = be32_to_cpu((__force __be32)mt.key->tip);
+ ipmsk = be32_to_cpu((__force __be32)mt.mask->tip);
+
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_IP4_DIP,
+ ipval, ipmsk);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_ARP);
+
+ return 0;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "arp parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_arp_usage);
+
+int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st)
+{
+ struct flow_match_ip mt;
+ int err = 0;
+
+ flow_rule_match_ip(st->frule, &mt);
+
+ if (mt.mask->tos) {
+ err = vcap_rule_add_key_u32(st->vrule, VCAP_KF_L3_TOS,
+ mt.key->tos,
+ mt.mask->tos);
+ if (err)
+ goto out;
+ }
+
+ st->used_keys |= BIT(FLOW_DISSECTOR_KEY_IP);
+
+ return err;
+
+out:
+ NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_tos parse error");
+ return err;
+}
+EXPORT_SYMBOL_GPL(vcap_tc_flower_handler_ip_usage);
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_tc.h b/drivers/net/ethernet/microchip/vcap/vcap_tc.h
new file mode 100644
index 000000000000..071f892f9aa4
--- /dev/null
+++ b/drivers/net/ethernet/microchip/vcap/vcap_tc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (C) 2023 Microchip Technology Inc. and its subsidiaries.
+ * Microchip VCAP TC
+ */
+
+#ifndef __VCAP_TC__
+#define __VCAP_TC__
+
+struct vcap_tc_flower_parse_usage {
+ struct flow_cls_offload *fco;
+ struct flow_rule *frule;
+ struct vcap_rule *vrule;
+ struct vcap_admin *admin;
+ u16 l3_proto;
+ u8 l4_proto;
+ u16 tpid;
+ unsigned int used_keys;
+};
+
+int vcap_tc_flower_handler_ethaddr_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ipv4_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ipv6_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_portnum_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_cvlan_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_vlan_usage(struct vcap_tc_flower_parse_usage *st,
+ enum vcap_key_field vid_key,
+ enum vcap_key_field pcp_key);
+int vcap_tc_flower_handler_tcp_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_arp_usage(struct vcap_tc_flower_parse_usage *st);
+int vcap_tc_flower_handler_ip_usage(struct vcap_tc_flower_parse_usage *st);
+
+#endif /* __VCAP_TC__ */
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index b144f2237748..f9b8f372ec8a 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1217,9 +1217,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
unsigned int max_queues_per_port = num_online_cpus();
struct gdma_context *gc = pci_get_drvdata(pdev);
struct gdma_irq_context *gic;
- unsigned int max_irqs;
- u16 *cpus;
- cpumask_var_t req_mask;
+ unsigned int max_irqs, cpu;
int nvec, irq;
int err, i = 0, j;
@@ -1240,21 +1238,7 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
goto free_irq_vector;
}
- if (!zalloc_cpumask_var(&req_mask, GFP_KERNEL)) {
- err = -ENOMEM;
- goto free_irq;
- }
-
- cpus = kcalloc(nvec, sizeof(*cpus), GFP_KERNEL);
- if (!cpus) {
- err = -ENOMEM;
- goto free_mask;
- }
- for (i = 0; i < nvec; i++)
- cpus[i] = cpumask_local_spread(i, gc->numa_node);
-
for (i = 0; i < nvec; i++) {
- cpumask_set_cpu(cpus[i], req_mask);
gic = &gc->irq_contexts[i];
gic->handler = NULL;
gic->arg = NULL;
@@ -1269,17 +1253,16 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
irq = pci_irq_vector(pdev, i);
if (irq < 0) {
err = irq;
- goto free_mask;
+ goto free_irq;
}
err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
if (err)
- goto free_mask;
- irq_set_affinity_and_hint(irq, req_mask);
- cpumask_clear(req_mask);
+ goto free_irq;
+
+ cpu = cpumask_local_spread(i, gc->numa_node);
+ irq_set_affinity_and_hint(irq, cpumask_of(cpu));
}
- free_cpumask_var(req_mask);
- kfree(cpus);
err = mana_gd_alloc_res_map(nvec, &gc->msix_resource);
if (err)
@@ -1290,13 +1273,12 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
return 0;
-free_mask:
- free_cpumask_var(req_mask);
- kfree(cpus);
free_irq:
for (j = i - 1; j >= 0; j--) {
irq = pci_irq_vector(pdev, j);
gic = &gc->irq_contexts[j];
+
+ irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}
@@ -1324,6 +1306,9 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
continue;
gic = &gc->irq_contexts[i];
+
+ /* Need to clear the hint before free_irq */
+ irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 2f6a048dee90..6120f2b6684f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -2160,6 +2160,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->hw_features |= NETIF_F_RXHASH;
ndev->features = ndev->hw_features;
ndev->vlan_features = 0;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
err = register_netdev(ndev);
if (err) {
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index 8dd8c7f425d2..81e605691bb8 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -13,6 +13,7 @@ if NET_VENDOR_MICROSEMI
# Users should depend on NET_SWITCHDEV, HAS_IOMEM, BRIDGE
config MSCC_OCELOT_SWITCH_LIB
+ depends on PTP_1588_CLOCK_OPTIONAL
select NET_DEVLINK
select REGMAP_MMIO
select PACKING
diff --git a/drivers/net/ethernet/mscc/Makefile b/drivers/net/ethernet/mscc/Makefile
index 5d435a565d4c..16987b72dfc0 100644
--- a/drivers/net/ethernet/mscc/Makefile
+++ b/drivers/net/ethernet/mscc/Makefile
@@ -5,6 +5,7 @@ mscc_ocelot_switch_lib-y := \
ocelot_devlink.o \
ocelot_flower.o \
ocelot_io.o \
+ ocelot_mm.o \
ocelot_police.o \
ocelot_ptp.o \
ocelot_stats.o \
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index da56f9bfeaf0..08acb7b89086 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -6,12 +6,16 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/if_bridge.h>
+#include <linux/iopoll.h>
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
-#define TABLE_UPDATE_SLEEP_US 10
-#define TABLE_UPDATE_TIMEOUT_US 100000
+#define TABLE_UPDATE_SLEEP_US 10
+#define TABLE_UPDATE_TIMEOUT_US 100000
+#define MEM_INIT_SLEEP_US 1000
+#define MEM_INIT_TIMEOUT_US 100000
+
#define OCELOT_RSV_VLAN_RANGE_START 4000
struct ocelot_mact_entry {
@@ -2713,6 +2717,46 @@ static void ocelot_detect_features(struct ocelot *ocelot)
ocelot->num_frame_refs = QSYS_MMGT_EQ_CTRL_FP_FREE_CNT(eq_ctrl);
}
+static int ocelot_mem_init_status(struct ocelot *ocelot)
+{
+ unsigned int val;
+ int err;
+
+ err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
+ &val);
+
+ return err ?: val;
+}
+
+int ocelot_reset(struct ocelot *ocelot)
+{
+ int err;
+ u32 val;
+
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
+ if (err)
+ return err;
+
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
+
+ /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be
+ * 100us) before enabling the switch core.
+ */
+ err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val,
+ MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US);
+ if (err)
+ return err;
+
+ err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
+ if (err)
+ return err;
+
+ return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+}
+EXPORT_SYMBOL(ocelot_reset);
+
int ocelot_init(struct ocelot *ocelot)
{
int i, ret;
@@ -2738,10 +2782,8 @@ int ocelot_init(struct ocelot *ocelot)
return -ENOMEM;
ret = ocelot_stats_init(ocelot);
- if (ret) {
- destroy_workqueue(ocelot->owq);
- return ret;
- }
+ if (ret)
+ goto err_stats_init;
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
@@ -2756,6 +2798,12 @@ int ocelot_init(struct ocelot *ocelot)
if (ocelot->ops->psfp_init)
ocelot->ops->psfp_init(ocelot);
+ if (ocelot->mm_supported) {
+ ret = ocelot_mm_init(ocelot);
+ if (ret)
+ goto err_mm_init;
+ }
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
/* Clear all counters (5 groups) */
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(port) |
@@ -2853,6 +2901,12 @@ int ocelot_init(struct ocelot *ocelot)
ANA_CPUQ_8021_CFG, i);
return 0;
+
+err_mm_init:
+ ocelot_stats_deinit(ocelot);
+err_stats_init:
+ destroy_workqueue(ocelot->owq);
+ return ret;
}
EXPORT_SYMBOL(ocelot_init);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 70dbd9c4e512..e9a0179448bf 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -109,6 +109,8 @@ void ocelot_mirror_put(struct ocelot *ocelot);
int ocelot_stats_init(struct ocelot *ocelot);
void ocelot_stats_deinit(struct ocelot *ocelot);
+int ocelot_mm_init(struct ocelot *ocelot);
+
extern struct notifier_block ocelot_netdevice_nb;
extern struct notifier_block ocelot_switchdev_nb;
extern struct notifier_block ocelot_switchdev_blocking_nb;
diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c
index b8737efd2a85..d9ea75a14f2f 100644
--- a/drivers/net/ethernet/mscc/ocelot_devlink.c
+++ b/drivers/net/ethernet/mscc/ocelot_devlink.c
@@ -487,6 +487,37 @@ static void ocelot_watermark_init(struct ocelot *ocelot)
ocelot_setup_sharing_watermarks(ocelot);
}
+/* Watermark encode
+ * Bit 8: Unit; 0:1, 1:16
+ * Bit 7-0: Value to be multiplied with unit
+ */
+u16 ocelot_wm_enc(u16 value)
+{
+ WARN_ON(value >= 16 * BIT(8));
+
+ if (value >= BIT(8))
+ return BIT(8) | (value / 16);
+
+ return value;
+}
+EXPORT_SYMBOL(ocelot_wm_enc);
+
+u16 ocelot_wm_dec(u16 wm)
+{
+ if (wm & BIT(8))
+ return (wm & GENMASK(7, 0)) * 16;
+
+ return wm;
+}
+EXPORT_SYMBOL(ocelot_wm_dec);
+
+void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
+{
+ *inuse = (val & GENMASK(23, 12)) >> 12;
+ *maxuse = val & GENMASK(11, 0);
+}
+EXPORT_SYMBOL(ocelot_wm_stat);
+
/* Pool size and type are fixed up at runtime. Keeping this structure to
* look up the cell size multipliers.
*/
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 7c0897e779dc..ee052404eb55 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -605,6 +605,18 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
flow_rule_match_control(rule, &match);
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+ filter->key_type = OCELOT_VCAP_KEY_ANY;
+ filter->vlan.vid.value = match.key->vlan_id;
+ filter->vlan.vid.mask = match.mask->vlan_id;
+ filter->vlan.pcp.value[0] = match.key->vlan_priority;
+ filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
+ match_protocol = false;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@@ -737,18 +749,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
match_protocol = false;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
- struct flow_match_vlan match;
-
- flow_rule_match_vlan(rule, &match);
- filter->key_type = OCELOT_VCAP_KEY_ANY;
- filter->vlan.vid.value = match.key->vlan_id;
- filter->vlan.vid.mask = match.mask->vlan_id;
- filter->vlan.pcp.value[0] = match.key->vlan_priority;
- filter->vlan.pcp.mask[0] = match.mask->vlan_priority;
- match_protocol = false;
- }
-
finished_key_parsing:
if (match_protocol && proto != ETH_P_ALL) {
if (filter->block_id == VCAP_ES0) {
diff --git a/drivers/net/ethernet/mscc/ocelot_mm.c b/drivers/net/ethernet/mscc/ocelot_mm.c
new file mode 100644
index 000000000000..0a8f21ae23f0
--- /dev/null
+++ b/drivers/net/ethernet/mscc/ocelot_mm.c
@@ -0,0 +1,215 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Hardware library for MAC Merge Layer and Frame Preemption on TSN-capable
+ * switches (VSC9959)
+ *
+ * Copyright 2022-2023 NXP
+ */
+#include <linux/ethtool.h>
+#include <soc/mscc/ocelot.h>
+#include <soc/mscc/ocelot_dev.h>
+#include <soc/mscc/ocelot_qsys.h>
+
+#include "ocelot.h"
+
+static const char *
+mm_verify_state_to_string(enum ethtool_mm_verify_status state)
+{
+ switch (state) {
+ case ETHTOOL_MM_VERIFY_STATUS_INITIAL:
+ return "INITIAL";
+ case ETHTOOL_MM_VERIFY_STATUS_VERIFYING:
+ return "VERIFYING";
+ case ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED:
+ return "SUCCEEDED";
+ case ETHTOOL_MM_VERIFY_STATUS_FAILED:
+ return "FAILED";
+ case ETHTOOL_MM_VERIFY_STATUS_DISABLED:
+ return "DISABLED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static enum ethtool_mm_verify_status ocelot_mm_verify_status(u32 val)
+{
+ switch (DEV_MM_STAT_MM_STATUS_PRMPT_VERIFY_STATE_X(val)) {
+ case 0:
+ return ETHTOOL_MM_VERIFY_STATUS_INITIAL;
+ case 1:
+ return ETHTOOL_MM_VERIFY_STATUS_VERIFYING;
+ case 2:
+ return ETHTOOL_MM_VERIFY_STATUS_SUCCEEDED;
+ case 3:
+ return ETHTOOL_MM_VERIFY_STATUS_FAILED;
+ case 4:
+ return ETHTOOL_MM_VERIFY_STATUS_DISABLED;
+ default:
+ return ETHTOOL_MM_VERIFY_STATUS_UNKNOWN;
+ }
+}
+
+void ocelot_port_mm_irq(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_mm_state *mm = &ocelot->mm[port];
+ enum ethtool_mm_verify_status verify_status;
+ u32 val;
+
+ mutex_lock(&mm->lock);
+
+ val = ocelot_port_readl(ocelot_port, DEV_MM_STATUS);
+
+ verify_status = ocelot_mm_verify_status(val);
+ if (mm->verify_status != verify_status) {
+ dev_dbg(ocelot->dev,
+ "Port %d MAC Merge verification state %s\n",
+ port, mm_verify_state_to_string(verify_status));
+ mm->verify_status = verify_status;
+ }
+
+ if (val & DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STICKY) {
+ mm->tx_active = !!(val & DEV_MM_STAT_MM_STATUS_PRMPT_ACTIVE_STATUS);
+
+ dev_dbg(ocelot->dev, "Port %d TX preemption %s\n",
+ port, mm->tx_active ? "active" : "inactive");
+ }
+
+ if (val & DEV_MM_STAT_MM_STATUS_UNEXP_RX_PFRM_STICKY) {
+ dev_err(ocelot->dev,
+ "Unexpected P-frame received on port %d while verification was unsuccessful or not yet verified\n",
+ port);
+ }
+
+ if (val & DEV_MM_STAT_MM_STATUS_UNEXP_TX_PFRM_STICKY) {
+ dev_err(ocelot->dev,
+ "Unexpected P-frame requested to be transmitted on port %d while verification was unsuccessful or not yet verified, or MM_TX_ENA=0\n",
+ port);
+ }
+
+ ocelot_port_writel(ocelot_port, val, DEV_MM_STATUS);
+
+ mutex_unlock(&mm->lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_mm_irq);
+
+int ocelot_port_set_mm(struct ocelot *ocelot, int port,
+ struct ethtool_mm_cfg *cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u32 mm_enable = 0, verify_disable = 0, add_frag_size;
+ struct ocelot_mm_state *mm;
+ int err;
+
+ if (!ocelot->mm_supported)
+ return -EOPNOTSUPP;
+
+ mm = &ocelot->mm[port];
+
+ err = ethtool_mm_frag_size_min_to_add(cfg->tx_min_frag_size,
+ &add_frag_size, extack);
+ if (err)
+ return err;
+
+ if (cfg->pmac_enabled)
+ mm_enable |= DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA;
+
+ if (cfg->tx_enabled)
+ mm_enable |= DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA;
+
+ if (!cfg->verify_enabled)
+ verify_disable = DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS;
+
+ mutex_lock(&mm->lock);
+
+ ocelot_port_rmwl(ocelot_port, mm_enable,
+ DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA |
+ DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA,
+ DEV_MM_ENABLE_CONFIG);
+
+ ocelot_port_rmwl(ocelot_port, verify_disable |
+ DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME(cfg->verify_time),
+ DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS |
+ DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_M,
+ DEV_MM_VERIF_CONFIG);
+
+ ocelot_rmw_rix(ocelot,
+ QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(add_frag_size),
+ QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M,
+ QSYS_PREEMPTION_CFG,
+ port);
+
+ mutex_unlock(&mm->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_set_mm);
+
+int ocelot_port_get_mm(struct ocelot *ocelot, int port,
+ struct ethtool_mm_state *state)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_mm_state *mm;
+ u32 val, add_frag_size;
+
+ if (!ocelot->mm_supported)
+ return -EOPNOTSUPP;
+
+ mm = &ocelot->mm[port];
+
+ mutex_lock(&mm->lock);
+
+ val = ocelot_port_readl(ocelot_port, DEV_MM_ENABLE_CONFIG);
+ state->pmac_enabled = !!(val & DEV_MM_CONFIG_ENABLE_CONFIG_MM_RX_ENA);
+ state->tx_enabled = !!(val & DEV_MM_CONFIG_ENABLE_CONFIG_MM_TX_ENA);
+
+ val = ocelot_port_readl(ocelot_port, DEV_MM_VERIF_CONFIG);
+ state->verify_enabled = !(val & DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_DIS);
+ state->verify_time = DEV_MM_CONFIG_VERIF_CONFIG_PRM_VERIFY_TIME_X(val);
+ state->max_verify_time = 128;
+
+ val = ocelot_read_rix(ocelot, QSYS_PREEMPTION_CFG, port);
+ add_frag_size = QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(val);
+ state->tx_min_frag_size = ethtool_mm_frag_size_add_to_min(add_frag_size);
+ state->rx_min_frag_size = ETH_ZLEN;
+
+ state->verify_status = mm->verify_status;
+ state->tx_active = mm->tx_active;
+
+ mutex_unlock(&mm->lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_mm);
+
+int ocelot_mm_init(struct ocelot *ocelot)
+{
+ struct ocelot_port *ocelot_port;
+ struct ocelot_mm_state *mm;
+ int port;
+
+ if (!ocelot->mm_supported)
+ return 0;
+
+ ocelot->mm = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports,
+ sizeof(*ocelot->mm), GFP_KERNEL);
+ if (!ocelot->mm)
+ return -ENOMEM;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ u32 val;
+
+ mm = &ocelot->mm[port];
+ mutex_init(&mm->lock);
+ ocelot_port = ocelot->ports[port];
+
+ /* Update initial status variable for the
+ * verification state machine
+ */
+ val = ocelot_port_readl(ocelot_port, DEV_MM_STATUS);
+ mm->verify_status = ocelot_mm_verify_status(val);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 1a82f10c8853..2180ae94c744 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -335,8 +335,8 @@ static void
ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_EV_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@@ -355,8 +355,8 @@ static void
ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
- trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
- trap->key.ipv4.proto.mask[0] = 0xff;
+ trap->key.ipv6.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv6.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_GEN_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_stats.c b/drivers/net/ethernet/mscc/ocelot_stats.c
index 1478c3b21af1..bdb893476832 100644
--- a/drivers/net/ethernet/mscc/ocelot_stats.c
+++ b/drivers/net/ethernet/mscc/ocelot_stats.c
@@ -4,6 +4,7 @@
* Copyright (c) 2017 Microsemi Corporation
* Copyright 2022 NXP
*/
+#include <linux/ethtool_netlink.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
@@ -54,6 +55,29 @@ enum ocelot_stat {
OCELOT_STAT_RX_GREEN_PRIO_5,
OCELOT_STAT_RX_GREEN_PRIO_6,
OCELOT_STAT_RX_GREEN_PRIO_7,
+ OCELOT_STAT_RX_ASSEMBLY_ERRS,
+ OCELOT_STAT_RX_SMD_ERRS,
+ OCELOT_STAT_RX_ASSEMBLY_OK,
+ OCELOT_STAT_RX_MERGE_FRAGMENTS,
+ OCELOT_STAT_RX_PMAC_OCTETS,
+ OCELOT_STAT_RX_PMAC_UNICAST,
+ OCELOT_STAT_RX_PMAC_MULTICAST,
+ OCELOT_STAT_RX_PMAC_BROADCAST,
+ OCELOT_STAT_RX_PMAC_SHORTS,
+ OCELOT_STAT_RX_PMAC_FRAGMENTS,
+ OCELOT_STAT_RX_PMAC_JABBERS,
+ OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS,
+ OCELOT_STAT_RX_PMAC_SYM_ERRS,
+ OCELOT_STAT_RX_PMAC_64,
+ OCELOT_STAT_RX_PMAC_65_127,
+ OCELOT_STAT_RX_PMAC_128_255,
+ OCELOT_STAT_RX_PMAC_256_511,
+ OCELOT_STAT_RX_PMAC_512_1023,
+ OCELOT_STAT_RX_PMAC_1024_1526,
+ OCELOT_STAT_RX_PMAC_1527_MAX,
+ OCELOT_STAT_RX_PMAC_PAUSE,
+ OCELOT_STAT_RX_PMAC_CONTROL,
+ OCELOT_STAT_RX_PMAC_LONGS,
OCELOT_STAT_TX_OCTETS,
OCELOT_STAT_TX_UNICAST,
OCELOT_STAT_TX_MULTICAST,
@@ -85,6 +109,20 @@ enum ocelot_stat {
OCELOT_STAT_TX_GREEN_PRIO_6,
OCELOT_STAT_TX_GREEN_PRIO_7,
OCELOT_STAT_TX_AGED,
+ OCELOT_STAT_TX_MM_HOLD,
+ OCELOT_STAT_TX_MERGE_FRAGMENTS,
+ OCELOT_STAT_TX_PMAC_OCTETS,
+ OCELOT_STAT_TX_PMAC_UNICAST,
+ OCELOT_STAT_TX_PMAC_MULTICAST,
+ OCELOT_STAT_TX_PMAC_BROADCAST,
+ OCELOT_STAT_TX_PMAC_PAUSE,
+ OCELOT_STAT_TX_PMAC_64,
+ OCELOT_STAT_TX_PMAC_65_127,
+ OCELOT_STAT_TX_PMAC_128_255,
+ OCELOT_STAT_TX_PMAC_256_511,
+ OCELOT_STAT_TX_PMAC_512_1023,
+ OCELOT_STAT_TX_PMAC_1024_1526,
+ OCELOT_STAT_TX_PMAC_1527_MAX,
OCELOT_STAT_DROP_LOCAL,
OCELOT_STAT_DROP_TAIL,
OCELOT_STAT_DROP_YELLOW_PRIO_0,
@@ -228,6 +266,55 @@ static const struct ocelot_stat_layout ocelot_stats_layout[OCELOT_NUM_STATS] = {
OCELOT_COMMON_STATS,
};
+static const struct ocelot_stat_layout ocelot_mm_stats_layout[OCELOT_NUM_STATS] = {
+ OCELOT_COMMON_STATS,
+ OCELOT_STAT(RX_ASSEMBLY_ERRS),
+ OCELOT_STAT(RX_SMD_ERRS),
+ OCELOT_STAT(RX_ASSEMBLY_OK),
+ OCELOT_STAT(RX_MERGE_FRAGMENTS),
+ OCELOT_STAT(TX_MERGE_FRAGMENTS),
+ OCELOT_STAT(RX_PMAC_OCTETS),
+ OCELOT_STAT(RX_PMAC_UNICAST),
+ OCELOT_STAT(RX_PMAC_MULTICAST),
+ OCELOT_STAT(RX_PMAC_BROADCAST),
+ OCELOT_STAT(RX_PMAC_SHORTS),
+ OCELOT_STAT(RX_PMAC_FRAGMENTS),
+ OCELOT_STAT(RX_PMAC_JABBERS),
+ OCELOT_STAT(RX_PMAC_CRC_ALIGN_ERRS),
+ OCELOT_STAT(RX_PMAC_SYM_ERRS),
+ OCELOT_STAT(RX_PMAC_64),
+ OCELOT_STAT(RX_PMAC_65_127),
+ OCELOT_STAT(RX_PMAC_128_255),
+ OCELOT_STAT(RX_PMAC_256_511),
+ OCELOT_STAT(RX_PMAC_512_1023),
+ OCELOT_STAT(RX_PMAC_1024_1526),
+ OCELOT_STAT(RX_PMAC_1527_MAX),
+ OCELOT_STAT(RX_PMAC_PAUSE),
+ OCELOT_STAT(RX_PMAC_CONTROL),
+ OCELOT_STAT(RX_PMAC_LONGS),
+ OCELOT_STAT(TX_PMAC_OCTETS),
+ OCELOT_STAT(TX_PMAC_UNICAST),
+ OCELOT_STAT(TX_PMAC_MULTICAST),
+ OCELOT_STAT(TX_PMAC_BROADCAST),
+ OCELOT_STAT(TX_PMAC_PAUSE),
+ OCELOT_STAT(TX_PMAC_64),
+ OCELOT_STAT(TX_PMAC_65_127),
+ OCELOT_STAT(TX_PMAC_128_255),
+ OCELOT_STAT(TX_PMAC_256_511),
+ OCELOT_STAT(TX_PMAC_512_1023),
+ OCELOT_STAT(TX_PMAC_1024_1526),
+ OCELOT_STAT(TX_PMAC_1527_MAX),
+};
+
+static const struct ocelot_stat_layout *
+ocelot_get_stats_layout(struct ocelot *ocelot)
+{
+ if (ocelot->mm_supported)
+ return ocelot_mm_stats_layout;
+
+ return ocelot_stats_layout;
+}
+
/* Read the counters from hardware and keep them in region->buf.
* Caller must hold &ocelot->stat_view_lock.
*/
@@ -306,17 +393,20 @@ static void ocelot_check_stats_work(struct work_struct *work)
void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
{
+ const struct ocelot_stat_layout *layout;
int i;
if (sset != ETH_SS_STATS)
return;
+ layout = ocelot_get_stats_layout(ocelot);
+
for (i = 0; i < OCELOT_NUM_STATS; i++) {
- if (ocelot_stats_layout[i].name[0] == '\0')
+ if (layout[i].name[0] == '\0')
continue;
- memcpy(data + i * ETH_GSTRING_LEN, ocelot_stats_layout[i].name,
- ETH_GSTRING_LEN);
+ memcpy(data, layout[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
}
}
EXPORT_SYMBOL(ocelot_get_strings);
@@ -350,13 +440,16 @@ out_unlock:
int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset)
{
+ const struct ocelot_stat_layout *layout;
int i, num_stats = 0;
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
+ layout = ocelot_get_stats_layout(ocelot);
+
for (i = 0; i < OCELOT_NUM_STATS; i++)
- if (ocelot_stats_layout[i].name[0] != '\0')
+ if (layout[i].name[0] != '\0')
num_stats++;
return num_stats;
@@ -366,14 +459,17 @@ EXPORT_SYMBOL(ocelot_get_sset_count);
static void ocelot_port_ethtool_stats_cb(struct ocelot *ocelot, int port,
void *priv)
{
+ const struct ocelot_stat_layout *layout;
u64 *data = priv;
int i;
+ layout = ocelot_get_stats_layout(ocelot);
+
/* Copy all supported counters */
for (i = 0; i < OCELOT_NUM_STATS; i++) {
int index = port * OCELOT_NUM_STATS + i;
- if (ocelot_stats_layout[i].name[0] == '\0')
+ if (layout[i].name[0] == '\0')
continue;
*data++ = ocelot->stats[index];
@@ -395,14 +491,63 @@ static void ocelot_port_pause_stats_cb(struct ocelot *ocelot, int port, void *pr
pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PAUSE];
}
+static void ocelot_port_pmac_pause_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_pause_stats *pause_stats = priv;
+
+ pause_stats->tx_pause_frames = s[OCELOT_STAT_TX_PMAC_PAUSE];
+ pause_stats->rx_pause_frames = s[OCELOT_STAT_RX_PMAC_PAUSE];
+}
+
+static void ocelot_port_mm_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_mm_stats *stats = priv;
+
+ stats->MACMergeFrameAssErrorCount = s[OCELOT_STAT_RX_ASSEMBLY_ERRS];
+ stats->MACMergeFrameSmdErrorCount = s[OCELOT_STAT_RX_SMD_ERRS];
+ stats->MACMergeFrameAssOkCount = s[OCELOT_STAT_RX_ASSEMBLY_OK];
+ stats->MACMergeFragCountRx = s[OCELOT_STAT_RX_MERGE_FRAGMENTS];
+ stats->MACMergeFragCountTx = s[OCELOT_STAT_TX_MERGE_FRAGMENTS];
+ stats->MACMergeHoldCount = s[OCELOT_STAT_TX_MM_HOLD];
+}
+
void ocelot_port_get_pause_stats(struct ocelot *ocelot, int port,
struct ethtool_pause_stats *pause_stats)
{
- ocelot_port_stats_run(ocelot, port, pause_stats,
- ocelot_port_pause_stats_cb);
+ struct net_device *dev;
+
+ switch (pause_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ ocelot_port_stats_run(ocelot, port, pause_stats,
+ ocelot_port_pause_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (ocelot->mm_supported)
+ ocelot_port_stats_run(ocelot, port, pause_stats,
+ ocelot_port_pmac_pause_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ ethtool_aggregate_pause_stats(dev, pause_stats);
+ break;
+ }
}
EXPORT_SYMBOL_GPL(ocelot_port_get_pause_stats);
+void ocelot_port_get_mm_stats(struct ocelot *ocelot, int port,
+ struct ethtool_mm_stats *stats)
+{
+ if (!ocelot->mm_supported)
+ return;
+
+ ocelot_port_stats_run(ocelot, port, stats, ocelot_port_mm_stats_cb);
+}
+EXPORT_SYMBOL_GPL(ocelot_port_get_mm_stats);
+
static const struct ethtool_rmon_hist_range ocelot_rmon_ranges[] = {
{ 64, 64 },
{ 65, 127 },
@@ -441,14 +586,57 @@ static void ocelot_port_rmon_stats_cb(struct ocelot *ocelot, int port, void *pri
rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_1024_1526];
}
+static void ocelot_port_pmac_rmon_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_rmon_stats *rmon_stats = priv;
+
+ rmon_stats->undersize_pkts = s[OCELOT_STAT_RX_PMAC_SHORTS];
+ rmon_stats->oversize_pkts = s[OCELOT_STAT_RX_PMAC_LONGS];
+ rmon_stats->fragments = s[OCELOT_STAT_RX_PMAC_FRAGMENTS];
+ rmon_stats->jabbers = s[OCELOT_STAT_RX_PMAC_JABBERS];
+
+ rmon_stats->hist[0] = s[OCELOT_STAT_RX_PMAC_64];
+ rmon_stats->hist[1] = s[OCELOT_STAT_RX_PMAC_65_127];
+ rmon_stats->hist[2] = s[OCELOT_STAT_RX_PMAC_128_255];
+ rmon_stats->hist[3] = s[OCELOT_STAT_RX_PMAC_256_511];
+ rmon_stats->hist[4] = s[OCELOT_STAT_RX_PMAC_512_1023];
+ rmon_stats->hist[5] = s[OCELOT_STAT_RX_PMAC_1024_1526];
+ rmon_stats->hist[6] = s[OCELOT_STAT_RX_PMAC_1527_MAX];
+
+ rmon_stats->hist_tx[0] = s[OCELOT_STAT_TX_PMAC_64];
+ rmon_stats->hist_tx[1] = s[OCELOT_STAT_TX_PMAC_65_127];
+ rmon_stats->hist_tx[2] = s[OCELOT_STAT_TX_PMAC_128_255];
+ rmon_stats->hist_tx[3] = s[OCELOT_STAT_TX_PMAC_128_255];
+ rmon_stats->hist_tx[4] = s[OCELOT_STAT_TX_PMAC_256_511];
+ rmon_stats->hist_tx[5] = s[OCELOT_STAT_TX_PMAC_512_1023];
+ rmon_stats->hist_tx[6] = s[OCELOT_STAT_TX_PMAC_1024_1526];
+}
+
void ocelot_port_get_rmon_stats(struct ocelot *ocelot, int port,
struct ethtool_rmon_stats *rmon_stats,
const struct ethtool_rmon_hist_range **ranges)
{
+ struct net_device *dev;
+
*ranges = ocelot_rmon_ranges;
- ocelot_port_stats_run(ocelot, port, rmon_stats,
- ocelot_port_rmon_stats_cb);
+ switch (rmon_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ ocelot_port_stats_run(ocelot, port, rmon_stats,
+ ocelot_port_rmon_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (ocelot->mm_supported)
+ ocelot_port_stats_run(ocelot, port, rmon_stats,
+ ocelot_port_pmac_rmon_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ ethtool_aggregate_rmon_stats(dev, rmon_stats);
+ break;
+ }
}
EXPORT_SYMBOL_GPL(ocelot_port_get_rmon_stats);
@@ -460,11 +648,35 @@ static void ocelot_port_ctrl_stats_cb(struct ocelot *ocelot, int port, void *pri
ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_CONTROL];
}
+static void ocelot_port_pmac_ctrl_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_ctrl_stats *ctrl_stats = priv;
+
+ ctrl_stats->MACControlFramesReceived = s[OCELOT_STAT_RX_PMAC_CONTROL];
+}
+
void ocelot_port_get_eth_ctrl_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_ctrl_stats *ctrl_stats)
{
- ocelot_port_stats_run(ocelot, port, ctrl_stats,
- ocelot_port_ctrl_stats_cb);
+ struct net_device *dev;
+
+ switch (ctrl_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ ocelot_port_stats_run(ocelot, port, ctrl_stats,
+ ocelot_port_ctrl_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (ocelot->mm_supported)
+ ocelot_port_stats_run(ocelot, port, ctrl_stats,
+ ocelot_port_pmac_ctrl_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ ethtool_aggregate_ctrl_stats(dev, ctrl_stats);
+ break;
+ }
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_ctrl_stats);
@@ -510,11 +722,60 @@ static void ocelot_port_mac_stats_cb(struct ocelot *ocelot, int port, void *priv
mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_CRC_ALIGN_ERRS];
}
+static void ocelot_port_pmac_mac_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_mac_stats *mac_stats = priv;
+
+ mac_stats->OctetsTransmittedOK = s[OCELOT_STAT_TX_PMAC_OCTETS];
+ mac_stats->FramesTransmittedOK = s[OCELOT_STAT_TX_PMAC_64] +
+ s[OCELOT_STAT_TX_PMAC_65_127] +
+ s[OCELOT_STAT_TX_PMAC_128_255] +
+ s[OCELOT_STAT_TX_PMAC_256_511] +
+ s[OCELOT_STAT_TX_PMAC_512_1023] +
+ s[OCELOT_STAT_TX_PMAC_1024_1526] +
+ s[OCELOT_STAT_TX_PMAC_1527_MAX];
+ mac_stats->OctetsReceivedOK = s[OCELOT_STAT_RX_PMAC_OCTETS];
+ mac_stats->FramesReceivedOK = s[OCELOT_STAT_RX_PMAC_64] +
+ s[OCELOT_STAT_RX_PMAC_65_127] +
+ s[OCELOT_STAT_RX_PMAC_128_255] +
+ s[OCELOT_STAT_RX_PMAC_256_511] +
+ s[OCELOT_STAT_RX_PMAC_512_1023] +
+ s[OCELOT_STAT_RX_PMAC_1024_1526] +
+ s[OCELOT_STAT_RX_PMAC_1527_MAX];
+ mac_stats->MulticastFramesXmittedOK = s[OCELOT_STAT_TX_PMAC_MULTICAST];
+ mac_stats->BroadcastFramesXmittedOK = s[OCELOT_STAT_TX_PMAC_BROADCAST];
+ mac_stats->MulticastFramesReceivedOK = s[OCELOT_STAT_RX_PMAC_MULTICAST];
+ mac_stats->BroadcastFramesReceivedOK = s[OCELOT_STAT_RX_PMAC_BROADCAST];
+ mac_stats->FrameTooLongErrors = s[OCELOT_STAT_RX_PMAC_LONGS];
+ /* Sadly, C_RX_CRC is the sum of FCS and alignment errors, they are not
+ * counted individually.
+ */
+ mac_stats->FrameCheckSequenceErrors = s[OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS];
+ mac_stats->AlignmentErrors = s[OCELOT_STAT_RX_PMAC_CRC_ALIGN_ERRS];
+}
+
void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_mac_stats *mac_stats)
{
- ocelot_port_stats_run(ocelot, port, mac_stats,
- ocelot_port_mac_stats_cb);
+ struct net_device *dev;
+
+ switch (mac_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ ocelot_port_stats_run(ocelot, port, mac_stats,
+ ocelot_port_mac_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (ocelot->mm_supported)
+ ocelot_port_stats_run(ocelot, port, mac_stats,
+ ocelot_port_pmac_mac_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ ethtool_aggregate_mac_stats(dev, mac_stats);
+ break;
+ }
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_mac_stats);
@@ -526,11 +787,35 @@ static void ocelot_port_phy_stats_cb(struct ocelot *ocelot, int port, void *priv
phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_SYM_ERRS];
}
+static void ocelot_port_pmac_phy_stats_cb(struct ocelot *ocelot, int port,
+ void *priv)
+{
+ u64 *s = &ocelot->stats[port * OCELOT_NUM_STATS];
+ struct ethtool_eth_phy_stats *phy_stats = priv;
+
+ phy_stats->SymbolErrorDuringCarrier = s[OCELOT_STAT_RX_PMAC_SYM_ERRS];
+}
+
void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port,
struct ethtool_eth_phy_stats *phy_stats)
{
- ocelot_port_stats_run(ocelot, port, phy_stats,
- ocelot_port_phy_stats_cb);
+ struct net_device *dev;
+
+ switch (phy_stats->src) {
+ case ETHTOOL_MAC_STATS_SRC_EMAC:
+ ocelot_port_stats_run(ocelot, port, phy_stats,
+ ocelot_port_phy_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_PMAC:
+ if (ocelot->mm_supported)
+ ocelot_port_stats_run(ocelot, port, phy_stats,
+ ocelot_port_pmac_phy_stats_cb);
+ break;
+ case ETHTOOL_MAC_STATS_SRC_AGGREGATE:
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ ethtool_aggregate_phy_stats(dev, phy_stats);
+ break;
+ }
}
EXPORT_SYMBOL_GPL(ocelot_port_get_eth_phy_stats);
@@ -602,16 +887,19 @@ EXPORT_SYMBOL(ocelot_port_get_stats64);
static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
{
struct ocelot_stats_region *region = NULL;
+ const struct ocelot_stat_layout *layout;
unsigned int last = 0;
int i;
INIT_LIST_HEAD(&ocelot->stats_regions);
+ layout = ocelot_get_stats_layout(ocelot);
+
for (i = 0; i < OCELOT_NUM_STATS; i++) {
- if (!ocelot_stats_layout[i].reg)
+ if (!layout[i].reg)
continue;
- if (region && ocelot_stats_layout[i].reg == last + 4) {
+ if (region && layout[i].reg == last + 4) {
region->count++;
} else {
region = devm_kzalloc(ocelot->dev, sizeof(*region),
@@ -620,17 +908,17 @@ static int ocelot_prepare_stats_regions(struct ocelot *ocelot)
return -ENOMEM;
/* enum ocelot_stat must be kept sorted in the same
- * order as ocelot_stats_layout[i].reg in order to have
- * efficient bulking
+ * order as layout[i].reg in order to have efficient
+ * bulking
*/
- WARN_ON(last >= ocelot_stats_layout[i].reg);
+ WARN_ON(last >= layout[i].reg);
- region->base = ocelot_stats_layout[i].reg;
+ region->base = layout[i].reg;
region->count = 1;
list_add_tail(&region->node, &ocelot->stats_regions);
}
- last = ocelot_stats_layout[i].reg;
+ last = layout[i].reg;
}
list_for_each_entry(region, &ocelot->stats_regions, node) {
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index b097fd4a4061..7388c3b0535c 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -6,7 +6,6 @@
*/
#include <linux/dsa/ocelot.h>
#include <linux/interrupt.h>
-#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_net.h>
#include <linux/netdevice.h>
@@ -17,6 +16,7 @@
#include <linux/skbuff.h>
#include <net/switchdev.h>
+#include <soc/mscc/ocelot.h>
#include <soc/mscc/ocelot_vcap.h>
#include <soc/mscc/ocelot_hsio.h>
#include <soc/mscc/vsc7514_regs.h>
@@ -26,80 +26,6 @@
#define VSC7514_VCAP_POLICER_BASE 128
#define VSC7514_VCAP_POLICER_MAX 191
-#define MEM_INIT_SLEEP_US 1000
-#define MEM_INIT_TIMEOUT_US 100000
-
-static const u32 *ocelot_regmap[TARGET_MAX] = {
- [ANA] = vsc7514_ana_regmap,
- [QS] = vsc7514_qs_regmap,
- [QSYS] = vsc7514_qsys_regmap,
- [REW] = vsc7514_rew_regmap,
- [SYS] = vsc7514_sys_regmap,
- [S0] = vsc7514_vcap_regmap,
- [S1] = vsc7514_vcap_regmap,
- [S2] = vsc7514_vcap_regmap,
- [PTP] = vsc7514_ptp_regmap,
- [DEV_GMII] = vsc7514_dev_gmii_regmap,
-};
-
-static const struct reg_field ocelot_regfields[REGFIELD_MAX] = {
- [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
- [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
- [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
- [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
- [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
- [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
- [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
- [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
- [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
- [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
- [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
- [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
- [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
- [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
- [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
- [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
- [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
- [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
- [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
- [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
- [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
- [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
- [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
- [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
- [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
- [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
- [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
- [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
- [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
- [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
- [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
- [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
- [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
- [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
- [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
- [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
- [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
- /* Replicated per number of ports (12), register size 4 per port */
- [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 12, 4),
- [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 12, 4),
- [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 12, 4),
- [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 12, 4),
- [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 12, 4),
- [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 12, 4),
- [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 12, 4),
- [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 12, 4),
- [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 12, 4),
- [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 12, 4),
- [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 12, 4),
- [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 12, 4),
- [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
-};
-
static void ocelot_pll5_init(struct ocelot *ocelot)
{
/* Configure PLL5. This will need a proper CCF driver
@@ -133,11 +59,11 @@ static int ocelot_chip_init(struct ocelot *ocelot, const struct ocelot_ops *ops)
{
int ret;
- ocelot->map = ocelot_regmap;
+ ocelot->map = vsc7514_regmap;
ocelot->num_mact_rows = 1024;
ocelot->ops = ops;
- ret = ocelot_regfields_init(ocelot, ocelot_regfields);
+ ret = ocelot_regfields_init(ocelot, vsc7514_regfields);
if (ret)
return ret;
@@ -190,73 +116,6 @@ static const struct of_device_id mscc_ocelot_match[] = {
};
MODULE_DEVICE_TABLE(of, mscc_ocelot_match);
-static int ocelot_mem_init_status(struct ocelot *ocelot)
-{
- unsigned int val;
- int err;
-
- err = regmap_field_read(ocelot->regfields[SYS_RESET_CFG_MEM_INIT],
- &val);
-
- return err ?: val;
-}
-
-static int ocelot_reset(struct ocelot *ocelot)
-{
- int err;
- u32 val;
-
- err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_INIT], 1);
- if (err)
- return err;
-
- err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- if (err)
- return err;
-
- /* MEM_INIT is a self-clearing bit. Wait for it to be cleared (should be
- * 100us) before enabling the switch core.
- */
- err = readx_poll_timeout(ocelot_mem_init_status, ocelot, val, !val,
- MEM_INIT_SLEEP_US, MEM_INIT_TIMEOUT_US);
- if (err)
- return err;
-
- err = regmap_field_write(ocelot->regfields[SYS_RESET_CFG_MEM_ENA], 1);
- if (err)
- return err;
-
- return regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
-}
-
-/* Watermark encode
- * Bit 8: Unit; 0:1, 1:16
- * Bit 7-0: Value to be multiplied with unit
- */
-static u16 ocelot_wm_enc(u16 value)
-{
- WARN_ON(value >= 16 * BIT(8));
-
- if (value >= BIT(8))
- return BIT(8) | (value / 16);
-
- return value;
-}
-
-static u16 ocelot_wm_dec(u16 wm)
-{
- if (wm & BIT(8))
- return (wm & GENMASK(7, 0)) * 16;
-
- return wm;
-}
-
-static void ocelot_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
-{
- *inuse = (val & GENMASK(23, 12)) >> 12;
- *maxuse = val & GENMASK(11, 0);
-}
-
static const struct ocelot_ops ocelot_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
@@ -266,49 +125,6 @@ static const struct ocelot_ops ocelot_ops = {
.netdev_to_port = ocelot_netdev_to_port,
};
-static struct vcap_props vsc7514_vcap_props[] = {
- [VCAP_ES0] = {
- .action_type_width = 0,
- .action_table = {
- [ES0_ACTION_TYPE_NORMAL] = {
- .width = 73, /* HIT_STICKY not included */
- .count = 1,
- },
- },
- .target = S0,
- .keys = vsc7514_vcap_es0_keys,
- .actions = vsc7514_vcap_es0_actions,
- },
- [VCAP_IS1] = {
- .action_type_width = 0,
- .action_table = {
- [IS1_ACTION_TYPE_NORMAL] = {
- .width = 78, /* HIT_STICKY not included */
- .count = 4,
- },
- },
- .target = S1,
- .keys = vsc7514_vcap_is1_keys,
- .actions = vsc7514_vcap_is1_actions,
- },
- [VCAP_IS2] = {
- .action_type_width = 1,
- .action_table = {
- [IS2_ACTION_TYPE_NORMAL] = {
- .width = 49,
- .count = 2
- },
- [IS2_ACTION_TYPE_SMAC_SIP] = {
- .width = 6,
- .count = 4
- },
- },
- .target = S2,
- .keys = vsc7514_vcap_is2_keys,
- .actions = vsc7514_vcap_is2_actions,
- },
-};
-
static struct ptp_clock_info ocelot_ptp_clock_info = {
.owner = THIS_MODULE,
.name = "ocelot ptp",
diff --git a/drivers/net/ethernet/mscc/vsc7514_regs.c b/drivers/net/ethernet/mscc/vsc7514_regs.c
index 9d2d3e13cacf..ef6fd3f6be30 100644
--- a/drivers/net/ethernet/mscc/vsc7514_regs.c
+++ b/drivers/net/ethernet/mscc/vsc7514_regs.c
@@ -9,7 +9,66 @@
#include <soc/mscc/vsc7514_regs.h>
#include "ocelot.h"
-const u32 vsc7514_ana_regmap[] = {
+const struct reg_field vsc7514_regfields[REGFIELD_MAX] = {
+ [ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 11, 11),
+ [ANA_ADVLEARN_LEARN_MIRROR] = REG_FIELD(ANA_ADVLEARN, 0, 10),
+ [ANA_ANEVENTS_MSTI_DROP] = REG_FIELD(ANA_ANEVENTS, 27, 27),
+ [ANA_ANEVENTS_ACLKILL] = REG_FIELD(ANA_ANEVENTS, 26, 26),
+ [ANA_ANEVENTS_ACLUSED] = REG_FIELD(ANA_ANEVENTS, 25, 25),
+ [ANA_ANEVENTS_AUTOAGE] = REG_FIELD(ANA_ANEVENTS, 24, 24),
+ [ANA_ANEVENTS_VS2TTL1] = REG_FIELD(ANA_ANEVENTS, 23, 23),
+ [ANA_ANEVENTS_STORM_DROP] = REG_FIELD(ANA_ANEVENTS, 22, 22),
+ [ANA_ANEVENTS_LEARN_DROP] = REG_FIELD(ANA_ANEVENTS, 21, 21),
+ [ANA_ANEVENTS_AGED_ENTRY] = REG_FIELD(ANA_ANEVENTS, 20, 20),
+ [ANA_ANEVENTS_CPU_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 19, 19),
+ [ANA_ANEVENTS_AUTO_LEARN_FAILED] = REG_FIELD(ANA_ANEVENTS, 18, 18),
+ [ANA_ANEVENTS_LEARN_REMOVE] = REG_FIELD(ANA_ANEVENTS, 17, 17),
+ [ANA_ANEVENTS_AUTO_LEARNED] = REG_FIELD(ANA_ANEVENTS, 16, 16),
+ [ANA_ANEVENTS_AUTO_MOVED] = REG_FIELD(ANA_ANEVENTS, 15, 15),
+ [ANA_ANEVENTS_DROPPED] = REG_FIELD(ANA_ANEVENTS, 14, 14),
+ [ANA_ANEVENTS_CLASSIFIED_DROP] = REG_FIELD(ANA_ANEVENTS, 13, 13),
+ [ANA_ANEVENTS_CLASSIFIED_COPY] = REG_FIELD(ANA_ANEVENTS, 12, 12),
+ [ANA_ANEVENTS_VLAN_DISCARD] = REG_FIELD(ANA_ANEVENTS, 11, 11),
+ [ANA_ANEVENTS_FWD_DISCARD] = REG_FIELD(ANA_ANEVENTS, 10, 10),
+ [ANA_ANEVENTS_MULTICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 9, 9),
+ [ANA_ANEVENTS_UNICAST_FLOOD] = REG_FIELD(ANA_ANEVENTS, 8, 8),
+ [ANA_ANEVENTS_DEST_KNOWN] = REG_FIELD(ANA_ANEVENTS, 7, 7),
+ [ANA_ANEVENTS_BUCKET3_MATCH] = REG_FIELD(ANA_ANEVENTS, 6, 6),
+ [ANA_ANEVENTS_BUCKET2_MATCH] = REG_FIELD(ANA_ANEVENTS, 5, 5),
+ [ANA_ANEVENTS_BUCKET1_MATCH] = REG_FIELD(ANA_ANEVENTS, 4, 4),
+ [ANA_ANEVENTS_BUCKET0_MATCH] = REG_FIELD(ANA_ANEVENTS, 3, 3),
+ [ANA_ANEVENTS_CPU_OPERATION] = REG_FIELD(ANA_ANEVENTS, 2, 2),
+ [ANA_ANEVENTS_DMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 1, 1),
+ [ANA_ANEVENTS_SMAC_LOOKUP] = REG_FIELD(ANA_ANEVENTS, 0, 0),
+ [ANA_TABLES_MACACCESS_B_DOM] = REG_FIELD(ANA_TABLES_MACACCESS, 18, 18),
+ [ANA_TABLES_MACTINDX_BUCKET] = REG_FIELD(ANA_TABLES_MACTINDX, 10, 11),
+ [ANA_TABLES_MACTINDX_M_INDEX] = REG_FIELD(ANA_TABLES_MACTINDX, 0, 9),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_VLD] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 20, 20),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_FP] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 8, 19),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 4, 7),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 1, 3),
+ [QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T] = REG_FIELD(QSYS_TIMED_FRAME_ENTRY, 0, 0),
+ [SYS_RESET_CFG_CORE_ENA] = REG_FIELD(SYS_RESET_CFG, 2, 2),
+ [SYS_RESET_CFG_MEM_ENA] = REG_FIELD(SYS_RESET_CFG, 1, 1),
+ [SYS_RESET_CFG_MEM_INIT] = REG_FIELD(SYS_RESET_CFG, 0, 0),
+ /* Replicated per number of ports (12), register size 4 per port */
+ [QSYS_SWITCH_PORT_MODE_PORT_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 14, 14, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 11, 13, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_YEL_RSRVD] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 10, 10, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 9, 9, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_ENA] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 1, 8, 12, 4),
+ [QSYS_SWITCH_PORT_MODE_TX_PFC_MODE] = REG_FIELD_ID(QSYS_SWITCH_PORT_MODE, 0, 0, 12, 4),
+ [SYS_PORT_MODE_DATA_WO_TS] = REG_FIELD_ID(SYS_PORT_MODE, 5, 6, 12, 4),
+ [SYS_PORT_MODE_INCL_INJ_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 3, 4, 12, 4),
+ [SYS_PORT_MODE_INCL_XTR_HDR] = REG_FIELD_ID(SYS_PORT_MODE, 1, 2, 12, 4),
+ [SYS_PORT_MODE_INCL_HDR_ERR] = REG_FIELD_ID(SYS_PORT_MODE, 0, 0, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_START] = REG_FIELD_ID(SYS_PAUSE_CFG, 10, 18, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_STOP] = REG_FIELD_ID(SYS_PAUSE_CFG, 1, 9, 12, 4),
+ [SYS_PAUSE_CFG_PAUSE_ENA] = REG_FIELD_ID(SYS_PAUSE_CFG, 0, 1, 12, 4),
+};
+EXPORT_SYMBOL(vsc7514_regfields);
+
+static const u32 vsc7514_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
REG(ANA_VLANMASK, 0x009004),
REG(ANA_PORT_B_DOMAIN, 0x009008),
@@ -89,9 +148,8 @@ const u32 vsc7514_ana_regmap[] = {
REG(ANA_POL_HYST, 0x008bec),
REG(ANA_POL_MISC_CFG, 0x008bf0),
};
-EXPORT_SYMBOL(vsc7514_ana_regmap);
-const u32 vsc7514_qs_regmap[] = {
+static const u32 vsc7514_qs_regmap[] = {
REG(QS_XTR_GRP_CFG, 0x000000),
REG(QS_XTR_RD, 0x000008),
REG(QS_XTR_FRM_PRUNING, 0x000010),
@@ -105,9 +163,8 @@ const u32 vsc7514_qs_regmap[] = {
REG(QS_INJ_ERR, 0x000040),
REG(QS_INH_DBG, 0x000048),
};
-EXPORT_SYMBOL(vsc7514_qs_regmap);
-const u32 vsc7514_qsys_regmap[] = {
+static const u32 vsc7514_qsys_regmap[] = {
REG(QSYS_PORT_MODE, 0x011200),
REG(QSYS_SWITCH_PORT_MODE, 0x011234),
REG(QSYS_STAT_CNT_CFG, 0x011264),
@@ -150,9 +207,8 @@ const u32 vsc7514_qsys_regmap[] = {
REG(QSYS_SE_STATE, 0x00004c),
REG(QSYS_HSCH_MISC_CFG, 0x011388),
};
-EXPORT_SYMBOL(vsc7514_qsys_regmap);
-const u32 vsc7514_rew_regmap[] = {
+static const u32 vsc7514_rew_regmap[] = {
REG(REW_PORT_VLAN_CFG, 0x000000),
REG(REW_TAG_CFG, 0x000004),
REG(REW_PORT_CFG, 0x000008),
@@ -165,9 +221,8 @@ const u32 vsc7514_rew_regmap[] = {
REG(REW_STAT_CFG, 0x000890),
REG(REW_PPT, 0x000680),
};
-EXPORT_SYMBOL(vsc7514_rew_regmap);
-const u32 vsc7514_sys_regmap[] = {
+static const u32 vsc7514_sys_regmap[] = {
REG(SYS_COUNT_RX_OCTETS, 0x000000),
REG(SYS_COUNT_RX_UNICAST, 0x000004),
REG(SYS_COUNT_RX_MULTICAST, 0x000008),
@@ -288,9 +343,8 @@ const u32 vsc7514_sys_regmap[] = {
REG(SYS_PTP_NXT, 0x0006c0),
REG(SYS_PTP_CFG, 0x0006c4),
};
-EXPORT_SYMBOL(vsc7514_sys_regmap);
-const u32 vsc7514_vcap_regmap[] = {
+static const u32 vsc7514_vcap_regmap[] = {
/* VCAP_CORE_CFG */
REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
REG(VCAP_CORE_MV_CFG, 0x000004),
@@ -312,9 +366,8 @@ const u32 vsc7514_vcap_regmap[] = {
REG(VCAP_CONST_CORE_CNT, 0x0003b8),
REG(VCAP_CONST_IF_CNT, 0x0003bc),
};
-EXPORT_SYMBOL(vsc7514_vcap_regmap);
-const u32 vsc7514_ptp_regmap[] = {
+static const u32 vsc7514_ptp_regmap[] = {
REG(PTP_PIN_CFG, 0x000000),
REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
@@ -325,9 +378,8 @@ const u32 vsc7514_ptp_regmap[] = {
REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
};
-EXPORT_SYMBOL(vsc7514_ptp_regmap);
-const u32 vsc7514_dev_gmii_regmap[] = {
+static const u32 vsc7514_dev_gmii_regmap[] = {
REG(DEV_CLOCK_CFG, 0x0),
REG(DEV_PORT_MISC, 0x4),
REG(DEV_EVENTS, 0x8),
@@ -368,9 +420,22 @@ const u32 vsc7514_dev_gmii_regmap[] = {
REG(DEV_PCS_FX100_CFG, 0x94),
REG(DEV_PCS_FX100_STATUS, 0x98),
};
-EXPORT_SYMBOL(vsc7514_dev_gmii_regmap);
-const struct vcap_field vsc7514_vcap_es0_keys[] = {
+const u32 *vsc7514_regmap[TARGET_MAX] = {
+ [ANA] = vsc7514_ana_regmap,
+ [QS] = vsc7514_qs_regmap,
+ [QSYS] = vsc7514_qsys_regmap,
+ [REW] = vsc7514_rew_regmap,
+ [SYS] = vsc7514_sys_regmap,
+ [S0] = vsc7514_vcap_regmap,
+ [S1] = vsc7514_vcap_regmap,
+ [S2] = vsc7514_vcap_regmap,
+ [PTP] = vsc7514_ptp_regmap,
+ [DEV_GMII] = vsc7514_dev_gmii_regmap,
+};
+EXPORT_SYMBOL(vsc7514_regmap);
+
+static const struct vcap_field vsc7514_vcap_es0_keys[] = {
[VCAP_ES0_EGR_PORT] = { 0, 4 },
[VCAP_ES0_IGR_PORT] = { 4, 4 },
[VCAP_ES0_RSV] = { 8, 2 },
@@ -380,9 +445,8 @@ const struct vcap_field vsc7514_vcap_es0_keys[] = {
[VCAP_ES0_DP] = { 24, 1 },
[VCAP_ES0_PCP] = { 25, 3 },
};
-EXPORT_SYMBOL(vsc7514_vcap_es0_keys);
-const struct vcap_field vsc7514_vcap_es0_actions[] = {
+static const struct vcap_field vsc7514_vcap_es0_actions[] = {
[VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2 },
[VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1 },
[VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2 },
@@ -402,9 +466,8 @@ const struct vcap_field vsc7514_vcap_es0_actions[] = {
[VCAP_ES0_ACT_RSV] = { 49, 24 },
[VCAP_ES0_ACT_HIT_STICKY] = { 73, 1 },
};
-EXPORT_SYMBOL(vsc7514_vcap_es0_actions);
-const struct vcap_field vsc7514_vcap_is1_keys[] = {
+static const struct vcap_field vsc7514_vcap_is1_keys[] = {
[VCAP_IS1_HK_TYPE] = { 0, 1 },
[VCAP_IS1_HK_LOOKUP] = { 1, 2 },
[VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12 },
@@ -454,9 +517,8 @@ const struct vcap_field vsc7514_vcap_is1_keys[] = {
[VCAP_IS1_HK_IP4_L4_RNG] = { 148, 8 },
[VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = { 156, 32 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is1_keys);
-const struct vcap_field vsc7514_vcap_is1_actions[] = {
+static const struct vcap_field vsc7514_vcap_is1_actions[] = {
[VCAP_IS1_ACT_DSCP_ENA] = { 0, 1 },
[VCAP_IS1_ACT_DSCP_VAL] = { 1, 6 },
[VCAP_IS1_ACT_QOS_ENA] = { 7, 1 },
@@ -479,9 +541,8 @@ const struct vcap_field vsc7514_vcap_is1_actions[] = {
[VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4 },
[VCAP_IS1_ACT_HIT_STICKY] = { 78, 1 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is1_actions);
-const struct vcap_field vsc7514_vcap_is2_keys[] = {
+static const struct vcap_field vsc7514_vcap_is2_keys[] = {
/* Common: 46 bits */
[VCAP_IS2_TYPE] = { 0, 4 },
[VCAP_IS2_HK_FIRST] = { 4, 1 },
@@ -560,9 +621,8 @@ const struct vcap_field vsc7514_vcap_is2_keys[] = {
[VCAP_IS2_HK_OAM_CCM_CNTS_EQ0] = { 186, 1 },
[VCAP_IS2_HK_OAM_IS_Y1731] = { 187, 1 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is2_keys);
-const struct vcap_field vsc7514_vcap_is2_actions[] = {
+static const struct vcap_field vsc7514_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_ME_ONCE] = { 0, 1 },
[VCAP_IS2_ACT_CPU_COPY_ENA] = { 1, 1 },
[VCAP_IS2_ACT_CPU_QU_NUM] = { 2, 3 },
@@ -579,4 +639,47 @@ const struct vcap_field vsc7514_vcap_is2_actions[] = {
[VCAP_IS2_ACT_ACL_ID] = { 43, 6 },
[VCAP_IS2_ACT_HIT_CNT] = { 49, 32 },
};
-EXPORT_SYMBOL(vsc7514_vcap_is2_actions);
+
+struct vcap_props vsc7514_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 73, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc7514_vcap_es0_keys,
+ .actions = vsc7514_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 78, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc7514_vcap_is1_keys,
+ .actions = vsc7514_vcap_is1_actions,
+ },
+ [VCAP_IS2] = {
+ .action_type_width = 1,
+ .action_table = {
+ [IS2_ACTION_TYPE_NORMAL] = {
+ .width = 49,
+ .count = 2
+ },
+ [IS2_ACTION_TYPE_SMAC_SIP] = {
+ .width = 6,
+ .count = 4
+ },
+ },
+ .target = S2,
+ .keys = vsc7514_vcap_is2_keys,
+ .actions = vsc7514_vcap_is2_actions,
+ },
+};
+EXPORT_SYMBOL(vsc7514_vcap_props);
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index e785c00b5845..d03d6e96f730 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_NETRONOME
config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
- depends on PCI && PCI_MSI
+ depends on PCI_MSI
depends on VXLAN || VXLAN=n
depends on TLS && TLS_DEVICE || TLS_DEVICE=n
select NET_DEVLINK
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 8a250214e289..808599b8066e 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -80,6 +80,8 @@ nfp-objs += \
abm/main.o
endif
-nfp-$(CONFIG_NFP_NET_IPSEC) += crypto/ipsec.o nfd3/ipsec.o
+nfp-$(CONFIG_NFP_NET_IPSEC) += crypto/ipsec.o nfd3/ipsec.o nfdk/ipsec.o
nfp-$(CONFIG_NFP_DEBUG) += nfp_net_debugfs.o
+
+nfp-$(CONFIG_DCB) += nic/dcb.o
diff --git a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
index 4632268695cb..c0dcce8ae437 100644
--- a/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
+++ b/drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
@@ -10,6 +10,7 @@
#include <linux/ktime.h>
#include <net/xfrm.h>
+#include "../nfpcore/nfp_dev.h"
#include "../nfp_net_ctrl.h"
#include "../nfp_net.h"
#include "crypto.h"
@@ -129,26 +130,31 @@ struct nfp_ipsec_cfg_mssg {
};
};
-static int nfp_ipsec_cfg_cmd_issue(struct nfp_net *nn, int type, int saidx,
- struct nfp_ipsec_cfg_mssg *msg)
+static int nfp_net_ipsec_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
{
+ unsigned int offset = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
+ struct nfp_ipsec_cfg_mssg *msg = (struct nfp_ipsec_cfg_mssg *)entry->msg;
int i, msg_size, ret;
- msg->cmd = type;
- msg->sa_idx = saidx;
- msg->rsp = 0;
- msg_size = ARRAY_SIZE(msg->raw);
+ ret = nfp_net_mbox_lock(nn, sizeof(*msg));
+ if (ret)
+ return ret;
+ msg_size = ARRAY_SIZE(msg->raw);
for (i = 0; i < msg_size; i++)
- nn_writel(nn, NFP_NET_CFG_MBOX_VAL + 4 * i, msg->raw[i]);
+ nn_writel(nn, offset + 4 * i, msg->raw[i]);
- ret = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_IPSEC);
- if (ret < 0)
+ ret = nfp_net_mbox_reconfig(nn, entry->cmd);
+ if (ret < 0) {
+ nn_ctrl_bar_unlock(nn);
return ret;
+ }
/* For now we always read the whole message response back */
for (i = 0; i < msg_size; i++)
- msg->raw[i] = nn_readl(nn, NFP_NET_CFG_MBOX_VAL + 4 * i);
+ msg->raw[i] = nn_readl(nn, offset + 4 * i);
+
+ nn_ctrl_bar_unlock(nn);
switch (msg->rsp) {
case NFP_IPSEC_CFG_MSSG_OK:
@@ -260,7 +266,8 @@ static void set_sha2_512hmac(struct nfp_ipsec_cfg_add_sa *cfg, int *trunc_len)
}
}
-static int nfp_net_xfrm_add_state(struct xfrm_state *x)
+static int nfp_net_xfrm_add_state(struct xfrm_state *x,
+ struct netlink_ext_ack *extack)
{
struct net_device *netdev = x->xso.dev;
struct nfp_ipsec_cfg_mssg msg = {};
@@ -281,7 +288,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
cfg->ctrl_word.mode = NFP_IPSEC_PROTMODE_TRANSPORT;
break;
default:
- nn_err(nn, "Unsupported mode for xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported mode for xfrm offload");
return -EINVAL;
}
@@ -293,17 +300,17 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
cfg->ctrl_word.proto = NFP_IPSEC_PROTOCOL_AH;
break;
default:
- nn_err(nn, "Unsupported protocol for xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for xfrm offload");
return -EINVAL;
}
if (x->props.flags & XFRM_STATE_ESN) {
- nn_err(nn, "Unsupported XFRM_REPLAY_MODE_ESN for xfrm offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported XFRM_REPLAY_MODE_ESN for xfrm offload");
return -EINVAL;
}
if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
- nn_err(nn, "Unsupported xfrm offload tyoe\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported xfrm offload type");
return -EINVAL;
}
@@ -320,7 +327,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
if (x->aead) {
trunc_len = -1;
} else {
- nn_err(nn, "Unsupported authentication algorithm\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
return -EINVAL;
}
break;
@@ -329,6 +336,10 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
trunc_len = -1;
break;
case SADB_AALG_MD5HMAC:
+ if (nn->pdev->device == PCI_DEVICE_ID_NFP3800) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
+ return -EINVAL;
+ }
set_md5hmac(cfg, &trunc_len);
break;
case SADB_AALG_SHA1HMAC:
@@ -344,19 +355,19 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
set_sha2_512hmac(cfg, &trunc_len);
break;
default:
- nn_err(nn, "Unsupported authentication algorithm\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm");
return -EINVAL;
}
if (!trunc_len) {
- nn_err(nn, "Unsupported authentication algorithm trunc length\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported authentication algorithm trunc length");
return -EINVAL;
}
if (x->aalg) {
key_len = DIV_ROUND_UP(x->aalg->alg_key_len, BITS_PER_BYTE);
if (key_len > sizeof(cfg->auth_key)) {
- nn_err(nn, "Insufficient space for offloaded auth key\n");
+ NL_SET_ERR_MSG_MOD(extack, "Insufficient space for offloaded auth key");
return -EINVAL;
}
for (i = 0; i < key_len / sizeof(cfg->auth_key[0]) ; i++)
@@ -372,18 +383,22 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_NULL;
break;
case SADB_EALG_3DESCBC:
+ if (nn->pdev->device == PCI_DEVICE_ID_NFP3800) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported encryption algorithm for offload");
+ return -EINVAL;
+ }
cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
cfg->ctrl_word.cipher = NFP_IPSEC_CIPHER_3DES;
break;
case SADB_X_EALG_AES_GCM_ICV16:
case SADB_X_EALG_NULL_AES_GMAC:
if (!x->aead) {
- nn_err(nn, "Invalid AES key data\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid AES key data");
return -EINVAL;
}
if (x->aead->alg_icv_len != 128) {
- nn_err(nn, "ICV must be 128bit with SADB_X_EALG_AES_GCM_ICV16\n");
+ NL_SET_ERR_MSG_MOD(extack, "ICV must be 128bit with SADB_X_EALG_AES_GCM_ICV16");
return -EINVAL;
}
cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CTR;
@@ -391,23 +406,23 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
/* Aead->alg_key_len includes 32-bit salt */
if (set_aes_keylen(cfg, x->props.ealgo, x->aead->alg_key_len - 32)) {
- nn_err(nn, "Unsupported AES key length %d\n", x->aead->alg_key_len);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported AES key length");
return -EINVAL;
}
break;
case SADB_X_EALG_AESCBC:
cfg->ctrl_word.cimode = NFP_IPSEC_CIMODE_CBC;
if (!x->ealg) {
- nn_err(nn, "Invalid AES key data\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid AES key data");
return -EINVAL;
}
if (set_aes_keylen(cfg, x->props.ealgo, x->ealg->alg_key_len) < 0) {
- nn_err(nn, "Unsupported AES key length %d\n", x->ealg->alg_key_len);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported AES key length");
return -EINVAL;
}
break;
default:
- nn_err(nn, "Unsupported encryption algorithm for offload\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported encryption algorithm for offload");
return -EINVAL;
}
@@ -418,7 +433,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
key_len -= salt_len;
if (key_len > sizeof(cfg->ciph_key)) {
- nn_err(nn, "aead: Insufficient space for offloaded key\n");
+ NL_SET_ERR_MSG_MOD(extack, "aead: Insufficient space for offloaded key");
return -EINVAL;
}
@@ -434,7 +449,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
key_len = DIV_ROUND_UP(x->ealg->alg_key_len, BITS_PER_BYTE);
if (key_len > sizeof(cfg->ciph_key)) {
- nn_err(nn, "ealg: Insufficient space for offloaded key\n");
+ NL_SET_ERR_MSG_MOD(extack, "ealg: Insufficient space for offloaded key");
return -EINVAL;
}
for (i = 0; i < key_len / sizeof(cfg->ciph_key[0]) ; i++)
@@ -457,7 +472,7 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
}
break;
default:
- nn_err(nn, "Unsupported address family\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported address family");
return -EINVAL;
}
@@ -472,15 +487,18 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
err = xa_alloc(&nn->xa_ipsec, &saidx, x,
XA_LIMIT(0, NFP_NET_IPSEC_MAX_SA_CNT - 1), GFP_KERNEL);
if (err < 0) {
- nn_err(nn, "Unable to get sa_data number for IPsec\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unable to get sa_data number for IPsec");
return err;
}
/* Allocate saidx and commit the SA */
- err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_ADD_SA, saidx, &msg);
+ msg.cmd = NFP_IPSEC_CFG_MSSG_ADD_SA;
+ msg.sa_idx = saidx;
+ err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
+ sizeof(msg), nfp_net_ipsec_cfg);
if (err) {
xa_erase(&nn->xa_ipsec, saidx);
- nn_err(nn, "Failed to issue IPsec command err ret=%d\n", err);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to issue IPsec command");
return err;
}
@@ -491,14 +509,17 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
static void nfp_net_xfrm_del_state(struct xfrm_state *x)
{
+ struct nfp_ipsec_cfg_mssg msg = {
+ .cmd = NFP_IPSEC_CFG_MSSG_INV_SA,
+ .sa_idx = x->xso.offload_handle - 1,
+ };
struct net_device *netdev = x->xso.dev;
- struct nfp_ipsec_cfg_mssg msg;
struct nfp_net *nn;
int err;
nn = netdev_priv(netdev);
- err = nfp_ipsec_cfg_cmd_issue(nn, NFP_IPSEC_CFG_MSSG_INV_SA,
- x->xso.offload_handle - 1, &msg);
+ err = nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_IPSEC, &msg,
+ sizeof(msg), nfp_net_ipsec_cfg);
if (err)
nn_warn(nn, "Failed to invalidate SA in hardware\n");
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index db297ee4d7ad..a655f9e69a7b 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -233,8 +233,8 @@ int nfp_devlink_params_register(struct nfp_pf *pf)
if (err <= 0)
return err;
- return devlink_params_register(devlink, nfp_devlink_params,
- ARRAY_SIZE(nfp_devlink_params));
+ return devl_params_register(devlink, nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
}
void nfp_devlink_params_unregister(struct nfp_pf *pf)
@@ -245,6 +245,6 @@ void nfp_devlink_params_unregister(struct nfp_pf *pf)
if (err <= 0)
return;
- devlink_params_unregister(priv_to_devlink(pf), nfp_devlink_params,
- ARRAY_SIZE(nfp_devlink_params));
+ devl_params_unregister(priv_to_devlink(pf), nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index f693119541d5..d23830b5bcb8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1964,6 +1964,27 @@ int nfp_fl_ct_stats(struct flow_cls_offload *flow,
return 0;
}
+static bool
+nfp_fl_ct_offload_nft_supported(struct flow_cls_offload *flow)
+{
+ struct flow_rule *flow_rule = flow->rule;
+ struct flow_action *flow_action =
+ &flow_rule->action;
+ struct flow_action_entry *act;
+ int i;
+
+ flow_action_for_each(i, act, flow_action) {
+ if (act->id == FLOW_ACTION_CT_METADATA) {
+ enum ip_conntrack_info ctinfo =
+ act->ct_metadata.cookie & NFCT_INFOMASK;
+
+ return ctinfo != IP_CT_NEW;
+ }
+ }
+
+ return false;
+}
+
static int
nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
{
@@ -1976,6 +1997,9 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
extack = flow->common.extack;
switch (flow->command) {
case FLOW_CLS_REPLACE:
+ if (!nfp_fl_ct_offload_nft_supported(flow))
+ return -EOPNOTSUPP;
+
/* Netfilter can request offload multiple times for the same
* flow - protect against adding duplicates.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index a8678d5612ee..060a77f2265d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -460,6 +460,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
sizeof(struct nfp_tun_neigh_v4);
unsigned long cookie = (unsigned long)neigh;
struct nfp_flower_priv *priv = app->priv;
+ struct nfp_tun_neigh_lag lag_info;
struct nfp_neigh_entry *nn_entry;
u32 port_id;
u8 mtype;
@@ -468,6 +469,11 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
if (!port_id)
return;
+ if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT) {
+ memset(&lag_info, 0, sizeof(struct nfp_tun_neigh_lag));
+ nfp_flower_lag_get_info_from_netdev(app, netdev, &lag_info);
+ }
+
spin_lock_bh(&priv->predt_lock);
nn_entry = rhashtable_lookup_fast(&priv->neigh_table, &cookie,
neigh_table_params);
@@ -515,7 +521,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
neigh_ha_snapshot(common->dst_addr, neigh, netdev);
if ((port_id & NFP_FL_LAG_OUT) == NFP_FL_LAG_OUT)
- nfp_flower_lag_get_info_from_netdev(app, netdev, lag);
+ memcpy(lag, &lag_info, sizeof(struct nfp_tun_neigh_lag));
common->port_id = cpu_to_be32(port_id);
if (rhashtable_insert_fast(&priv->neigh_table,
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
index 861082c5dbff..59fb0583cc08 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/dp.c
@@ -192,10 +192,10 @@ static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb,
return 0;
md_bytes = sizeof(meta_id) +
- !!md_dst * NFP_NET_META_PORTID_SIZE +
- !!tls_handle * NFP_NET_META_CONN_HANDLE_SIZE +
- vlan_insert * NFP_NET_META_VLAN_SIZE +
- *ipsec * NFP_NET_META_IPSEC_FIELD_SIZE; /* IPsec has 12 bytes of metadata */
+ (!!md_dst ? NFP_NET_META_PORTID_SIZE : 0) +
+ (!!tls_handle ? NFP_NET_META_CONN_HANDLE_SIZE : 0) +
+ (vlan_insert ? NFP_NET_META_VLAN_SIZE : 0) +
+ (*ipsec ? NFP_NET_META_IPSEC_FIELD_SIZE : 0);
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
@@ -226,9 +226,6 @@ static int nfp_nfd3_prep_tx_meta(struct nfp_net_dp *dp, struct sk_buff *skb,
meta_id |= NFP_NET_META_VLAN;
}
if (*ipsec) {
- /* IPsec has three consecutive 4-bit IPsec metadata types,
- * so in total IPsec has three 4 bytes of metadata.
- */
data -= NFP_NET_META_IPSEC_SIZE;
put_unaligned_be32(offload_info.seq_hi, data);
data -= NFP_NET_META_IPSEC_SIZE;
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
index ccacb6ab6c39..d60c0e991a91 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c
@@ -6,6 +6,7 @@
#include <linux/overflow.h>
#include <linux/sizes.h>
#include <linux/bitfield.h>
+#include <net/xfrm.h>
#include "../nfp_app.h"
#include "../nfp_net.h"
@@ -172,25 +173,32 @@ close_block:
static int
nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool *ipsec)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
+ struct nfp_ipsec_offload offload_info;
unsigned char *data;
bool vlan_insert;
u32 meta_id = 0;
int md_bytes;
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (xfrm_offload(skb))
+ *ipsec = nfp_net_ipsec_tx_prep(dp, skb, &offload_info);
+#endif
+
if (unlikely(md_dst && md_dst->type != METADATA_HW_PORT_MUX))
md_dst = NULL;
vlan_insert = skb_vlan_tag_present(skb) && (dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2);
- if (!(md_dst || vlan_insert))
+ if (!(md_dst || vlan_insert || *ipsec))
return 0;
md_bytes = sizeof(meta_id) +
- !!md_dst * NFP_NET_META_PORTID_SIZE +
- vlan_insert * NFP_NET_META_VLAN_SIZE;
+ (!!md_dst ? NFP_NET_META_PORTID_SIZE : 0) +
+ (vlan_insert ? NFP_NET_META_VLAN_SIZE : 0) +
+ (*ipsec ? NFP_NET_META_IPSEC_FIELD_SIZE : 0);
if (unlikely(skb_cow_head(skb, md_bytes)))
return -ENOMEM;
@@ -212,6 +220,17 @@ nfp_nfdk_prep_tx_meta(struct nfp_net_dp *dp, struct nfp_app *app,
meta_id |= NFP_NET_META_VLAN;
}
+ if (*ipsec) {
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_hi, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.seq_low, data);
+ data -= NFP_NET_META_IPSEC_SIZE;
+ put_unaligned_be32(offload_info.handle - 1, data);
+ meta_id <<= NFP_NET_META_IPSEC_FIELD_SIZE;
+ meta_id |= NFP_NET_META_IPSEC << 8 | NFP_NET_META_IPSEC << 4 | NFP_NET_META_IPSEC;
+ }
+
meta_id = FIELD_PREP(NFDK_META_LEN, md_bytes) |
FIELD_PREP(NFDK_META_FIELDS, meta_id);
@@ -243,6 +262,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
int nr_frags, wr_idx;
dma_addr_t dma_addr;
+ bool ipsec = false;
u64 metadata;
dp = &nn->dp;
@@ -263,7 +283,7 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
- metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb);
+ metadata = nfp_nfdk_prep_tx_meta(dp, nn->app, skb, &ipsec);
if (unlikely((int)metadata < 0))
goto err_flush;
@@ -361,6 +381,9 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)
(txd - 1)->dma_len_type = cpu_to_le16(dlen_type | NFDK_DESC_TX_EOP);
+ if (ipsec)
+ metadata = nfp_nfdk_ipsec_tx(metadata, skb);
+
if (!skb_is_gso(skb)) {
real_len = skb->len;
/* Metadata desc */
@@ -760,6 +783,15 @@ nfp_nfdk_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
return false;
data += sizeof(struct nfp_net_tls_resync_req);
break;
+#ifdef CONFIG_NFP_NET_IPSEC
+ case NFP_NET_META_IPSEC:
+ /* Note: IPsec packet could have zero saidx, so need add 1
+ * to indicate packet is IPsec packet within driver.
+ */
+ meta->ipsec_saidx = get_unaligned_be32(data) + 1;
+ data += 4;
+ break;
+#endif
default:
return true;
}
@@ -1186,6 +1218,13 @@ static int nfp_nfdk_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
+#ifdef CONFIG_NFP_NET_IPSEC
+ if (meta.ipsec_saidx != 0 && unlikely(nfp_net_ipsec_rx(&meta, skb))) {
+ nfp_nfdk_rx_drop(dp, r_vec, rx_ring, NULL, skb);
+ continue;
+ }
+#endif
+
if (meta_len_xdp)
skb_metadata_set(skb, meta_len_xdp);
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
new file mode 100644
index 000000000000..58d8f59eb885
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/ipsec.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2023 Corigine, Inc */
+
+#include <net/xfrm.h>
+
+#include "../nfp_net.h"
+#include "nfdk.h"
+
+u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+
+ if (x->xso.dev && (x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM))
+ flags |= NFDK_DESC_TX_L3_CSUM | NFDK_DESC_TX_L4_CSUM;
+
+ return flags;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
index 0ea51d9f2325..fe55980348e9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
+++ b/drivers/net/ethernet/netronome/nfp/nfdk/nfdk.h
@@ -125,4 +125,12 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
void nfp_nfdk_ctrl_poll(struct tasklet_struct *t);
void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
+#ifndef CONFIG_NFP_NET_IPSEC
+static inline u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb)
+{
+ return flags;
+}
+#else
+u64 nfp_nfdk_ipsec_tx(u64 flags, struct sk_buff *skb);
+#endif
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 432d79d691c2..939cfce15830 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -617,9 +617,10 @@ struct nfp_net_dp {
* @vnic_no_name: For non-port PF vNIC make ndo_get_phys_port_name return
* -EOPNOTSUPP to keep backwards compatibility (set by app)
* @port: Pointer to nfp_port structure if vNIC is a port
- * @mc_lock: Protect mc_addrs list
- * @mc_addrs: List of mc addrs to add/del to HW
- * @mc_work: Work to update mc addrs
+ * @mbox_amsg: Asynchronously processed message via mailbox
+ * @mbox_amsg.lock: Protect message list
+ * @mbox_amsg.list: List of message to process
+ * @mbox_amsg.work: Work to process message asynchronously
* @app_priv: APP private data for this vNIC
*/
struct nfp_net {
@@ -721,13 +722,25 @@ struct nfp_net {
struct nfp_port *port;
- spinlock_t mc_lock;
- struct list_head mc_addrs;
- struct work_struct mc_work;
+ struct {
+ spinlock_t lock;
+ struct list_head list;
+ struct work_struct work;
+ } mbox_amsg;
void *app_priv;
};
+struct nfp_mbox_amsg_entry {
+ struct list_head list;
+ int (*cfg)(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry);
+ u32 cmd;
+ char msg[];
+};
+
+int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
+ int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *));
+
/* Functions to read/write from/to a BAR
* Performs any endian conversion necessary.
*/
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 18fc9971f1c8..81b7ca0ad222 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1334,14 +1334,54 @@ err_unlock:
return err;
}
-struct nfp_mc_addr_entry {
- u8 addr[ETH_ALEN];
- u32 cmd;
- struct list_head list;
-};
+int nfp_net_sched_mbox_amsg_work(struct nfp_net *nn, u32 cmd, const void *data, size_t len,
+ int (*cb)(struct nfp_net *, struct nfp_mbox_amsg_entry *))
+{
+ struct nfp_mbox_amsg_entry *entry;
+
+ entry = kmalloc(sizeof(*entry) + len, GFP_ATOMIC);
+ if (!entry)
+ return -ENOMEM;
+
+ memcpy(entry->msg, data, len);
+ entry->cmd = cmd;
+ entry->cfg = cb;
+
+ spin_lock_bh(&nn->mbox_amsg.lock);
+ list_add_tail(&entry->list, &nn->mbox_amsg.list);
+ spin_unlock_bh(&nn->mbox_amsg.lock);
+
+ schedule_work(&nn->mbox_amsg.work);
+
+ return 0;
+}
+
+static void nfp_net_mbox_amsg_work(struct work_struct *work)
+{
+ struct nfp_net *nn = container_of(work, struct nfp_net, mbox_amsg.work);
+ struct nfp_mbox_amsg_entry *entry, *tmp;
+ struct list_head tmp_list;
+
+ INIT_LIST_HEAD(&tmp_list);
+
+ spin_lock_bh(&nn->mbox_amsg.lock);
+ list_splice_init(&nn->mbox_amsg.list, &tmp_list);
+ spin_unlock_bh(&nn->mbox_amsg.lock);
+
+ list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
+ int err = entry->cfg(nn, entry);
+
+ if (err)
+ nn_err(nn, "Config cmd %d to HW failed %d.\n", entry->cmd, err);
+
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
-static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
+static int nfp_net_mc_cfg(struct nfp_net *nn, struct nfp_mbox_amsg_entry *entry)
{
+ unsigned char *addr = entry->msg;
int ret;
ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
@@ -1353,26 +1393,7 @@ static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u
nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MULTICAST_MAC_LO,
get_unaligned_be16(addr + 4));
- return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
-}
-
-static int nfp_net_mc_prep(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
-{
- struct nfp_mc_addr_entry *entry;
-
- entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
- if (!entry)
- return -ENOMEM;
-
- ether_addr_copy(entry->addr, addr);
- entry->cmd = cmd;
- spin_lock_bh(&nn->mc_lock);
- list_add_tail(&entry->list, &nn->mc_addrs);
- spin_unlock_bh(&nn->mc_lock);
-
- schedule_work(&nn->mc_work);
-
- return 0;
+ return nfp_net_mbox_reconfig_and_unlock(nn, entry->cmd);
}
static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
@@ -1385,35 +1406,16 @@ static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
return -EINVAL;
}
- return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD);
+ return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD, addr,
+ NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
}
static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
{
struct nfp_net *nn = netdev_priv(netdev);
- return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL);
-}
-
-static void nfp_net_mc_addr_config(struct work_struct *work)
-{
- struct nfp_net *nn = container_of(work, struct nfp_net, mc_work);
- struct nfp_mc_addr_entry *entry, *tmp;
- struct list_head tmp_list;
-
- INIT_LIST_HEAD(&tmp_list);
-
- spin_lock_bh(&nn->mc_lock);
- list_splice_init(&nn->mc_addrs, &tmp_list);
- spin_unlock_bh(&nn->mc_lock);
-
- list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
- if (nfp_net_mc_cfg(nn, entry->addr, entry->cmd))
- nn_err(nn, "Config mc address to HW failed.\n");
-
- list_del(&entry->list);
- kfree(entry);
- }
+ return nfp_net_sched_mbox_amsg_work(nn, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL, addr,
+ NFP_NET_CFG_MULTICAST_SZ, nfp_net_mc_cfg);
}
static void nfp_net_set_rx_mode(struct net_device *netdev)
@@ -2529,10 +2531,15 @@ static void nfp_net_netdev_init(struct nfp_net *nn)
netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC;
+ if (nn->app && nn->app->type->id == NFP_APP_BPF_NIC)
+ netdev->xdp_features |= NETDEV_XDP_ACT_HW_OFFLOAD;
+
/* Finalise the netdev setup */
switch (nn->dp.ops->version) {
case NFP_NFD_VER_NFD3:
netdev->netdev_ops = &nfp_nfd3_netdev_ops;
+ netdev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
break;
case NFP_NFD_VER_NFDK:
netdev->netdev_ops = &nfp_nfdk_netdev_ops;
@@ -2681,9 +2688,9 @@ int nfp_net_init(struct nfp_net *nn)
if (!nn->dp.netdev)
return 0;
- spin_lock_init(&nn->mc_lock);
- INIT_LIST_HEAD(&nn->mc_addrs);
- INIT_WORK(&nn->mc_work, nfp_net_mc_addr_config);
+ spin_lock_init(&nn->mbox_amsg.lock);
+ INIT_LIST_HEAD(&nn->mbox_amsg.list);
+ INIT_WORK(&nn->mbox_amsg.work, nfp_net_mbox_amsg_work);
return register_netdev(nn->dp.netdev);
@@ -2704,6 +2711,6 @@ void nfp_net_clean(struct nfp_net *nn)
unregister_netdev(nn->dp.netdev);
nfp_net_ipsec_clean(nn);
nfp_ccm_mbox_clean(nn);
- flush_work(&nn->mc_work);
+ flush_work(&nn->mbox_amsg.work);
nfp_net_reconfig_wait_posted(nn);
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 51124309ae1f..669b9dccb6a9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -403,7 +403,6 @@
*/
#define NFP_NET_CFG_MBOX_BASE 0x1800
#define NFP_NET_CFG_MBOX_VAL_MAX_SZ 0x1F8
-#define NFP_NET_CFG_MBOX_VAL 0x1808
#define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
#define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
#define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
@@ -413,6 +412,7 @@
#define NFP_NET_CFG_MBOX_CMD_IPSEC 3
#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5
#define NFP_NET_CFG_MBOX_CMD_TLV_CMSG 6
+#define NFP_NET_CFG_MBOX_CMD_DCB_UPDATE 7
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD 8
#define NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL 9
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index a4a89ef3f18b..dfedb52b7e70 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -293,35 +293,143 @@ nfp_net_set_fec_link_mode(struct nfp_eth_table_port *eth_port,
}
}
-static const u16 nfp_eth_media_table[] = {
- [NFP_MEDIA_1000BASE_CX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- [NFP_MEDIA_1000BASE_KX] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- [NFP_MEDIA_10GBASE_KX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- [NFP_MEDIA_10GBASE_KR] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- [NFP_MEDIA_10GBASE_CX4] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- [NFP_MEDIA_10GBASE_CR] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- [NFP_MEDIA_10GBASE_SR] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- [NFP_MEDIA_10GBASE_ER] = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
- [NFP_MEDIA_25GBASE_KR] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- [NFP_MEDIA_25GBASE_KR_S] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- [NFP_MEDIA_25GBASE_CR] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- [NFP_MEDIA_25GBASE_CR_S] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- [NFP_MEDIA_25GBASE_SR] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- [NFP_MEDIA_40GBASE_CR4] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- [NFP_MEDIA_40GBASE_KR4] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- [NFP_MEDIA_40GBASE_SR4] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- [NFP_MEDIA_40GBASE_LR4] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- [NFP_MEDIA_50GBASE_KR] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
- [NFP_MEDIA_50GBASE_SR] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
- [NFP_MEDIA_50GBASE_CR] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
- [NFP_MEDIA_50GBASE_LR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_50GBASE_ER] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_50GBASE_FR] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- [NFP_MEDIA_100GBASE_KR4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- [NFP_MEDIA_100GBASE_SR4] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- [NFP_MEDIA_100GBASE_CR4] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- [NFP_MEDIA_100GBASE_KP4] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- [NFP_MEDIA_100GBASE_CR10] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+static const struct nfp_eth_media_link_mode {
+ u16 ethtool_link_mode;
+ u16 speed;
+} nfp_eth_media_table[NFP_MEDIA_LINK_MODES_NUMBER] = {
+ [NFP_MEDIA_1000BASE_CX] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = NFP_SPEED_1G,
+ },
+ [NFP_MEDIA_1000BASE_KX] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = NFP_SPEED_1G,
+ },
+ [NFP_MEDIA_10GBASE_KX4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_LR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_CX4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_10GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ .speed = NFP_SPEED_10G,
+ },
+ [NFP_MEDIA_25GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_KR_S] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_CR_S] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_LR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_25GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = NFP_SPEED_25G,
+ },
+ [NFP_MEDIA_40GBASE_CR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_KR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_SR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_40GBASE_LR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ .speed = NFP_SPEED_40G,
+ },
+ [NFP_MEDIA_50GBASE_KR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_SR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_CR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_LR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_ER] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_50GBASE_FR] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ .speed = NFP_SPEED_50G,
+ },
+ [NFP_MEDIA_100GBASE_KR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_SR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_CR4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_KP4] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+ [NFP_MEDIA_100GBASE_CR10] = {
+ .ethtool_link_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = NFP_SPEED_100G,
+ },
+};
+
+static const unsigned int nfp_eth_speed_map[NFP_SUP_SPEED_NUMBER] = {
+ [NFP_SPEED_1G] = SPEED_1000,
+ [NFP_SPEED_10G] = SPEED_10000,
+ [NFP_SPEED_25G] = SPEED_25000,
+ [NFP_SPEED_40G] = SPEED_40000,
+ [NFP_SPEED_50G] = SPEED_50000,
+ [NFP_SPEED_100G] = SPEED_100000,
};
static void nfp_add_media_link_mode(struct nfp_port *port,
@@ -334,8 +442,12 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
};
struct nfp_cpp *cpp = port->app->cpp;
- if (nfp_eth_read_media(cpp, &ethm))
+ if (nfp_eth_read_media(cpp, &ethm)) {
+ bitmap_fill(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
return;
+ }
+
+ bitmap_zero(port->speed_bitmap, NFP_SUP_SPEED_NUMBER);
for (u32 i = 0; i < 2; i++) {
supported_modes[i] = le64_to_cpu(ethm.supported_modes[i]);
@@ -344,20 +456,26 @@ static void nfp_add_media_link_mode(struct nfp_port *port,
for (u32 i = 0; i < NFP_MEDIA_LINK_MODES_NUMBER; i++) {
if (i < 64) {
- if (supported_modes[0] & BIT_ULL(i))
- __set_bit(nfp_eth_media_table[i],
+ if (supported_modes[0] & BIT_ULL(i)) {
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.supported);
+ __set_bit(nfp_eth_media_table[i].speed,
+ port->speed_bitmap);
+ }
if (advertised_modes[0] & BIT_ULL(i))
- __set_bit(nfp_eth_media_table[i],
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.advertising);
} else {
- if (supported_modes[1] & BIT_ULL(i - 64))
- __set_bit(nfp_eth_media_table[i],
+ if (supported_modes[1] & BIT_ULL(i - 64)) {
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.supported);
+ __set_bit(nfp_eth_media_table[i].speed,
+ port->speed_bitmap);
+ }
if (advertised_modes[1] & BIT_ULL(i - 64))
- __set_bit(nfp_eth_media_table[i],
+ __set_bit(nfp_eth_media_table[i].ethtool_link_mode,
cmd->link_modes.advertising);
}
}
@@ -468,6 +586,22 @@ nfp_net_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed != SPEED_UNKNOWN) {
u32 speed = cmd->base.speed / eth_port->lanes;
+ bool is_supported = false;
+
+ for (u32 i = 0; i < NFP_SUP_SPEED_NUMBER; i++) {
+ if (cmd->base.speed == nfp_eth_speed_map[i] &&
+ test_bit(i, port->speed_bitmap)) {
+ is_supported = true;
+ break;
+ }
+ }
+
+ if (!is_supported) {
+ netdev_err(netdev, "Speed %u is not supported.\n",
+ cmd->base.speed);
+ err = -EINVAL;
+ goto err_bad_set;
+ }
if (req_aneg) {
netdev_err(netdev, "Speed changing is not allowed when working on autoneg mode.\n");
@@ -1905,16 +2039,16 @@ static int
nfp_net_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
u8 buf[NFP_EEPROM_LEN] = {};
- if (eeprom->len == 0)
- return -EINVAL;
-
if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
return -EOPNOTSUPP;
- eeprom->magic = nn->pdev->vendor | (nn->pdev->device << 16);
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = app->pdev->vendor | (app->pdev->device << 16);
memcpy(bytes, buf + eeprom->offset, eeprom->len);
return 0;
@@ -1924,18 +2058,18 @@ static int
nfp_net_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, u8 *bytes)
{
- struct nfp_net *nn = netdev_priv(netdev);
+ struct nfp_app *app = nfp_app_from_netdev(netdev);
u8 buf[NFP_EEPROM_LEN] = {};
+ if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
+ return -EOPNOTSUPP;
+
if (eeprom->len == 0)
return -EINVAL;
- if (eeprom->magic != (nn->pdev->vendor | nn->pdev->device << 16))
+ if (eeprom->magic != (app->pdev->vendor | app->pdev->device << 16))
return -EINVAL;
- if (nfp_net_get_port_mac_by_hwinfo(netdev, buf))
- return -EOPNOTSUPP;
-
memcpy(buf + eeprom->offset, bytes, eeprom->len);
if (nfp_net_set_port_mac_by_hwinfo(netdev, buf))
return -EOPNOTSUPP;
@@ -1995,6 +2129,9 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_eeprom_len = nfp_net_get_eeprom_len,
+ .get_eeprom = nfp_net_get_eeprom,
+ .set_eeprom = nfp_net_set_eeprom,
.get_module_info = nfp_port_get_module_info,
.get_module_eeprom = nfp_port_get_module_eeprom,
.get_link_ksettings = nfp_net_get_link_ksettings,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index abfe788d558f..cbe4972ba104 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -754,11 +754,11 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_devlink_unreg;
+ devl_lock(devlink);
err = nfp_devlink_params_register(pf);
if (err)
goto err_shared_buf_unreg;
- devl_lock(devlink);
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
/* Allocate the vnics and do basic init */
@@ -791,9 +791,9 @@ err_free_vnics:
nfp_net_pf_free_vnics(pf);
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
- devl_unlock(devlink);
nfp_devlink_params_unregister(pf);
err_shared_buf_unreg:
+ devl_unlock(devlink);
nfp_shared_buf_unregister(pf);
err_devlink_unreg:
cancel_work_sync(&pf->port_refresh_work);
@@ -821,9 +821,10 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
/* stop app first, to avoid double free of ctrl vNIC's ddir */
nfp_net_debugfs_dir_clean(&pf->ddir);
+ nfp_devlink_params_unregister(pf);
+
devl_unlock(devlink);
- nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
nfp_net_pf_free_irqs(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index f8cd157ca1d7..9c04f9f0e2c9 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -38,6 +38,16 @@ enum nfp_port_flags {
NFP_PORT_CHANGED = 0,
};
+enum {
+ NFP_SPEED_1G,
+ NFP_SPEED_10G,
+ NFP_SPEED_25G,
+ NFP_SPEED_40G,
+ NFP_SPEED_50G,
+ NFP_SPEED_100G,
+ NFP_SUP_SPEED_NUMBER
+};
+
/**
* struct nfp_port - structure representing NFP port
* @netdev: backpointer to associated netdev
@@ -52,6 +62,7 @@ enum nfp_port_flags {
* @eth_forced: for %NFP_PORT_PHYS_PORT port is forced UP or DOWN, don't change
* @eth_port: for %NFP_PORT_PHYS_PORT translated ETH Table port entry
* @eth_stats: for %NFP_PORT_PHYS_PORT MAC stats if available
+ * @speed_bitmap: for %NFP_PORT_PHYS_PORT supported speed bitmap
* @pf_id: for %NFP_PORT_PF_PORT, %NFP_PORT_VF_PORT ID of the PCI PF (0-3)
* @vf_id: for %NFP_PORT_VF_PORT ID of the PCI VF within @pf_id
* @pf_split: for %NFP_PORT_PF_PORT %true if PCI PF has more than one vNIC
@@ -78,6 +89,7 @@ struct nfp_port {
bool eth_forced;
struct nfp_eth_table_port *eth_port;
u8 __iomem *eth_stats;
+ DECLARE_BITMAP(speed_bitmap, NFP_SUP_SPEED_NUMBER);
};
/* NFP_PORT_PF_PORT, NFP_PORT_VF_PORT */
struct {
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index 8f5cab0032d0..781edc451bd4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -140,6 +140,9 @@ enum nfp_ethtool_link_mode_list {
NFP_MEDIA_100GBASE_CR4,
NFP_MEDIA_100GBASE_KP4,
NFP_MEDIA_100GBASE_CR10,
+ NFP_MEDIA_10GBASE_LR,
+ NFP_MEDIA_25GBASE_LR,
+ NFP_MEDIA_25GBASE_ER,
NFP_MEDIA_LINK_MODES_NUMBER
};
diff --git a/drivers/net/ethernet/netronome/nfp/nic/dcb.c b/drivers/net/ethernet/netronome/nfp/nic/dcb.c
new file mode 100644
index 000000000000..bb498ac6bd7d
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nic/dcb.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2023 Corigine, Inc. */
+
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <net/dcbnl.h>
+
+#include "../nfp_app.h"
+#include "../nfp_net.h"
+#include "../nfp_main.h"
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfpcore/nfp_nffw.h"
+#include "../nfp_net_sriov.h"
+
+#include "main.h"
+
+#define NFP_DCB_TRUST_PCP 1
+#define NFP_DCB_TRUST_DSCP 2
+#define NFP_DCB_TRUST_INVALID 0xff
+
+#define NFP_DCB_TSA_VENDOR 1
+#define NFP_DCB_TSA_STRICT 2
+#define NFP_DCB_TSA_ETS 3
+
+#define NFP_DCB_GBL_ENABLE BIT(0)
+#define NFP_DCB_QOS_ENABLE BIT(1)
+#define NFP_DCB_DISABLE 0
+#define NFP_DCB_ALL_QOS_ENABLE (NFP_DCB_GBL_ENABLE | NFP_DCB_QOS_ENABLE)
+
+#define NFP_DCB_UPDATE_MSK_SZ 4
+#define NFP_DCB_TC_RATE_MAX 0xffff
+
+#define NFP_DCB_DATA_OFF_DSCP2IDX 0
+#define NFP_DCB_DATA_OFF_PCP2IDX 64
+#define NFP_DCB_DATA_OFF_TSA 80
+#define NFP_DCB_DATA_OFF_IDX_BW_PCT 88
+#define NFP_DCB_DATA_OFF_RATE 96
+#define NFP_DCB_DATA_OFF_CAP 112
+#define NFP_DCB_DATA_OFF_ENABLE 116
+#define NFP_DCB_DATA_OFF_TRUST 120
+
+#define NFP_DCB_MSG_MSK_ENABLE BIT(31)
+#define NFP_DCB_MSG_MSK_TRUST BIT(30)
+#define NFP_DCB_MSG_MSK_TSA BIT(29)
+#define NFP_DCB_MSG_MSK_DSCP BIT(28)
+#define NFP_DCB_MSG_MSK_PCP BIT(27)
+#define NFP_DCB_MSG_MSK_RATE BIT(26)
+#define NFP_DCB_MSG_MSK_PCT BIT(25)
+
+static struct nfp_dcb *get_dcb_priv(struct nfp_net *nn)
+{
+ struct nfp_dcb *dcb = &((struct nfp_app_nic_private *)nn->app_priv)->dcb;
+
+ return dcb;
+}
+
+static u8 nfp_tsa_ieee2nfp(u8 tsa)
+{
+ switch (tsa) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ return NFP_DCB_TSA_STRICT;
+ case IEEE_8021QAZ_TSA_ETS:
+ return NFP_DCB_TSA_ETS;
+ default:
+ return NFP_DCB_TSA_VENDOR;
+ }
+}
+
+static int nfp_nic_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ ets->prio_tc[i] = dcb->prio2tc[i];
+ ets->tc_tx_bw[i] = dcb->tc_tx_pct[i];
+ ets->tc_tsa[i] = dcb->tc_tsa[i];
+ }
+
+ return 0;
+}
+
+static bool nfp_refresh_tc2idx(struct nfp_net *nn)
+{
+ u8 tc2idx[IEEE_8021QAZ_MAX_TCS];
+ bool change = false;
+ struct nfp_dcb *dcb;
+ int maxstrict = 0;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ tc2idx[i] = i;
+ if (dcb->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT)
+ maxstrict = i;
+ }
+
+ if (maxstrict > 0 && dcb->tc_tsa[0] != IEEE_8021QAZ_TSA_STRICT) {
+ tc2idx[0] = maxstrict;
+ tc2idx[maxstrict] = 0;
+ }
+
+ for (unsigned int j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
+ if (dcb->tc2idx[j] != tc2idx[j]) {
+ change = true;
+ dcb->tc2idx[j] = tc2idx[j];
+ }
+ }
+
+ return change;
+}
+
+static int nfp_fill_maxrate(struct nfp_net *nn, u64 *max_rate_array)
+{
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ u32 ratembps;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ /* Convert bandwidth from kbps to mbps. */
+ ratembps = max_rate_array[i] / 1024;
+
+ /* Reject input values >= NFP_DCB_TC_RATE_MAX */
+ if (ratembps >= NFP_DCB_TC_RATE_MAX) {
+ nfp_warn(app->cpp, "ratembps(%d) must less than %d.",
+ ratembps, NFP_DCB_TC_RATE_MAX);
+ return -EINVAL;
+ }
+ /* Input value 0 mapped to NFP_DCB_TC_RATE_MAX for firmware. */
+ if (ratembps == 0)
+ ratembps = NFP_DCB_TC_RATE_MAX;
+
+ writew((u16)ratembps, dcb->dcbcfg_tbl +
+ dcb->cfg_offset + NFP_DCB_DATA_OFF_RATE + dcb->tc2idx[i] * 2);
+ /* for rate value from user space, need to sync to dcb structure */
+ if (dcb->tc_maxrate != max_rate_array)
+ dcb->tc_maxrate[i] = max_rate_array[i];
+ }
+
+ return 0;
+}
+
+static int update_dscp_maxrate(struct net_device *dev, u32 *update)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+ int err;
+
+ dcb = get_dcb_priv(nn);
+
+ err = nfp_fill_maxrate(nn, dcb->tc_maxrate);
+ if (err)
+ return err;
+
+ *update |= NFP_DCB_MSG_MSK_RATE;
+
+ /* We only refresh dscp in dscp trust mode. */
+ if (dcb->dscp_cnt > 0) {
+ for (unsigned int i = 0; i < NFP_NET_MAX_DSCP; i++) {
+ writeb(dcb->tc2idx[dcb->prio2tc[dcb->dscp2prio[i]]],
+ dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_DSCP2IDX + i);
+ }
+ *update |= NFP_DCB_MSG_MSK_DSCP;
+ }
+
+ return 0;
+}
+
+static void nfp_nic_set_trust(struct nfp_net *nn, u32 *update)
+{
+ struct nfp_dcb *dcb;
+ u8 trust;
+
+ dcb = get_dcb_priv(nn);
+
+ if (dcb->trust_status != NFP_DCB_TRUST_INVALID)
+ return;
+
+ trust = dcb->dscp_cnt > 0 ? NFP_DCB_TRUST_DSCP : NFP_DCB_TRUST_PCP;
+ writeb(trust, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_TRUST);
+
+ dcb->trust_status = trust;
+ *update |= NFP_DCB_MSG_MSK_TRUST;
+}
+
+static void nfp_nic_set_enable(struct nfp_net *nn, u32 enable, u32 *update)
+{
+ struct nfp_dcb *dcb;
+ u32 value = 0;
+
+ dcb = get_dcb_priv(nn);
+
+ value = readl(dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_ENABLE);
+ if (value != enable) {
+ writel(enable, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_ENABLE);
+ *update |= NFP_DCB_MSG_MSK_ENABLE;
+ }
+}
+
+static int dcb_ets_check(struct net_device *dev, struct ieee_ets *ets)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_app *app = nn->app;
+ bool ets_exists = false;
+ int sum = 0;
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ /* For ets mode, check bw percentage sum. */
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ ets_exists = true;
+ sum += ets->tc_tx_bw[i];
+ } else if (ets->tc_tx_bw[i]) {
+ nfp_warn(app->cpp, "ETS BW for strict/vendor TC must be 0.");
+ return -EINVAL;
+ }
+ }
+
+ if (ets_exists && sum != 100) {
+ nfp_warn(app->cpp, "Failed to validate ETS BW: sum must be 100.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void nfp_nic_fill_ets(struct nfp_net *nn)
+{
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ writeb(dcb->tc2idx[dcb->prio2tc[i]],
+ dcb->dcbcfg_tbl + dcb->cfg_offset + NFP_DCB_DATA_OFF_PCP2IDX + i);
+ writeb(dcb->tc_tx_pct[i], dcb->dcbcfg_tbl +
+ dcb->cfg_offset + NFP_DCB_DATA_OFF_IDX_BW_PCT + dcb->tc2idx[i]);
+ writeb(nfp_tsa_ieee2nfp(dcb->tc_tsa[i]), dcb->dcbcfg_tbl +
+ dcb->cfg_offset + NFP_DCB_DATA_OFF_TSA + dcb->tc2idx[i]);
+ }
+}
+
+static void nfp_nic_ets_init(struct nfp_net *nn, u32 *update)
+{
+ struct nfp_dcb *dcb = get_dcb_priv(nn);
+
+ if (dcb->ets_init)
+ return;
+
+ nfp_nic_fill_ets(nn);
+ dcb->ets_init = true;
+ *update |= NFP_DCB_MSG_MSK_TSA | NFP_DCB_MSG_MSK_PCT | NFP_DCB_MSG_MSK_PCP;
+}
+
+static int nfp_nic_dcbnl_ieee_setets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ u32 update = 0;
+ bool change;
+ int err;
+
+ err = dcb_ets_check(dev, ets);
+ if (err)
+ return err;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ dcb->prio2tc[i] = ets->prio_tc[i];
+ dcb->tc_tx_pct[i] = ets->tc_tx_bw[i];
+ dcb->tc_tsa[i] = ets->tc_tsa[i];
+ }
+
+ change = nfp_refresh_tc2idx(nn);
+ nfp_nic_fill_ets(nn);
+ dcb->ets_init = true;
+ if (change || !dcb->rate_init) {
+ err = update_dscp_maxrate(dev, &update);
+ if (err) {
+ nfp_warn(app->cpp,
+ "nfp dcbnl ieee setets ERROR:%d.",
+ err);
+ return err;
+ }
+
+ dcb->rate_init = true;
+ }
+ nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update);
+ nfp_nic_set_trust(nn, &update);
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL,
+ update | NFP_DCB_MSG_MSK_TSA | NFP_DCB_MSG_MSK_PCT |
+ NFP_DCB_MSG_MSK_PCP);
+
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+}
+
+static int nfp_nic_dcbnl_ieee_getmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ maxrate->tc_maxrate[i] = dcb->tc_maxrate[i];
+
+ return 0;
+}
+
+static int nfp_nic_dcbnl_ieee_setmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ u32 update = 0;
+ int err;
+
+ err = nfp_fill_maxrate(nn, maxrate->tc_maxrate);
+ if (err) {
+ nfp_warn(app->cpp,
+ "nfp dcbnl ieee setmaxrate ERROR:%d.",
+ err);
+ return err;
+ }
+
+ dcb = get_dcb_priv(nn);
+
+ dcb->rate_init = true;
+ nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update);
+ nfp_nic_set_trust(nn, &update);
+ nfp_nic_ets_init(nn, &update);
+
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL,
+ update | NFP_DCB_MSG_MSK_RATE);
+
+ return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+}
+
+static int nfp_nic_set_trust_status(struct nfp_net *nn, u8 status)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_dcb *dcb;
+ u32 update = 0;
+ int err;
+
+ dcb = get_dcb_priv(nn);
+ if (!dcb->rate_init) {
+ err = nfp_fill_maxrate(nn, dcb->tc_maxrate);
+ if (err)
+ return err;
+
+ update |= NFP_DCB_MSG_MSK_RATE;
+ dcb->rate_init = true;
+ }
+
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ nfp_nic_ets_init(nn, &update);
+ writeb(status, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_TRUST);
+ nfp_nic_set_enable(nn, NFP_DCB_ALL_QOS_ENABLE, &update);
+ nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL,
+ update | NFP_DCB_MSG_MSK_TRUST);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+ if (err)
+ return err;
+
+ dcb->trust_status = status;
+
+ return 0;
+}
+
+static int nfp_nic_set_dscp2prio(struct nfp_net *nn, u8 dscp, u8 prio)
+{
+ const u32 cmd = NFP_NET_CFG_MBOX_CMD_DCB_UPDATE;
+ struct nfp_dcb *dcb;
+ u8 idx, tc;
+ int err;
+
+ err = nfp_net_mbox_lock(nn, NFP_DCB_UPDATE_MSK_SZ);
+ if (err)
+ return err;
+
+ dcb = get_dcb_priv(nn);
+
+ tc = dcb->prio2tc[prio];
+ idx = dcb->tc2idx[tc];
+
+ writeb(idx, dcb->dcbcfg_tbl + dcb->cfg_offset +
+ NFP_DCB_DATA_OFF_DSCP2IDX + dscp);
+
+ nn_writel(nn, nn->tlv_caps.mbox_off +
+ NFP_NET_CFG_MBOX_SIMPLE_VAL, NFP_DCB_MSG_MSK_DSCP);
+
+ err = nfp_net_mbox_reconfig_and_unlock(nn, cmd);
+ if (err)
+ return err;
+
+ dcb->dscp2prio[dscp] = prio;
+
+ return 0;
+}
+
+static int nfp_nic_dcbnl_ieee_setapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct dcb_app old_app;
+ struct nfp_dcb *dcb;
+ bool is_new;
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ dcb = get_dcb_priv(nn);
+
+ /* Save the old entry info */
+ old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ old_app.protocol = app->protocol;
+ old_app.priority = dcb->dscp2prio[app->protocol];
+
+ /* Check trust status */
+ if (!dcb->dscp_cnt) {
+ err = nfp_nic_set_trust_status(nn, NFP_DCB_TRUST_DSCP);
+ if (err)
+ return err;
+ }
+
+ /* Check if the new mapping is same as old or in init stage */
+ if (app->priority != old_app.priority || app->priority == 0) {
+ err = nfp_nic_set_dscp2prio(nn, app->protocol, app->priority);
+ if (err)
+ return err;
+ }
+
+ /* Delete the old entry if exists */
+ is_new = !!dcb_ieee_delapp(dev, &old_app);
+
+ /* Add new entry and update counter */
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ if (is_new)
+ dcb->dscp_cnt++;
+
+ return 0;
+}
+
+static int nfp_nic_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct nfp_net *nn = netdev_priv(dev);
+ struct nfp_dcb *dcb;
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ dcb = get_dcb_priv(nn);
+
+ /* Check if the dcb_app param match fw */
+ if (app->priority != dcb->dscp2prio[app->protocol])
+ return -ENOENT;
+
+ /* Set fw dscp mapping to 0 */
+ err = nfp_nic_set_dscp2prio(nn, app->protocol, 0);
+ if (err)
+ return err;
+
+ /* Delete app from dcb list */
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ /* Decrease dscp counter */
+ dcb->dscp_cnt--;
+
+ /* If no dscp mapping is configured, trust pcp */
+ if (dcb->dscp_cnt == 0)
+ return nfp_nic_set_trust_status(nn, NFP_DCB_TRUST_PCP);
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops nfp_nic_dcbnl_ops = {
+ /* ieee 802.1Qaz std */
+ .ieee_getets = nfp_nic_dcbnl_ieee_getets,
+ .ieee_setets = nfp_nic_dcbnl_ieee_setets,
+ .ieee_getmaxrate = nfp_nic_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = nfp_nic_dcbnl_ieee_setmaxrate,
+ .ieee_setapp = nfp_nic_dcbnl_ieee_setapp,
+ .ieee_delapp = nfp_nic_dcbnl_ieee_delapp,
+};
+
+int nfp_nic_dcb_init(struct nfp_net *nn)
+{
+ struct nfp_app *app = nn->app;
+ struct nfp_dcb *dcb;
+ int err;
+
+ dcb = get_dcb_priv(nn);
+ dcb->cfg_offset = NFP_DCB_CFG_STRIDE * nn->id;
+ dcb->dcbcfg_tbl = nfp_pf_map_rtsym(app->pf, "net.dcbcfg_tbl",
+ "_abi_dcb_cfg",
+ dcb->cfg_offset, &dcb->dcbcfg_tbl_area);
+ if (IS_ERR(dcb->dcbcfg_tbl)) {
+ if (PTR_ERR(dcb->dcbcfg_tbl) != -ENOENT) {
+ err = PTR_ERR(dcb->dcbcfg_tbl);
+ dcb->dcbcfg_tbl = NULL;
+ nfp_err(app->cpp,
+ "Failed to map dcbcfg_tbl area, min_size %u.\n",
+ dcb->cfg_offset);
+ return err;
+ }
+ dcb->dcbcfg_tbl = NULL;
+ }
+
+ if (dcb->dcbcfg_tbl) {
+ for (unsigned int i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ dcb->prio2tc[i] = i;
+ dcb->tc2idx[i] = i;
+ dcb->tc_tx_pct[i] = 0;
+ dcb->tc_maxrate[i] = 0;
+ dcb->tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
+ }
+ dcb->trust_status = NFP_DCB_TRUST_INVALID;
+ dcb->rate_init = false;
+ dcb->ets_init = false;
+
+ nn->dp.netdev->dcbnl_ops = &nfp_nic_dcbnl_ops;
+ }
+
+ return 0;
+}
+
+void nfp_nic_dcb_clean(struct nfp_net *nn)
+{
+ struct nfp_dcb *dcb;
+
+ dcb = get_dcb_priv(nn);
+ if (dcb->dcbcfg_tbl_area)
+ nfp_cpp_area_release_free(dcb->dcbcfg_tbl_area);
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c
index aea8579206ee..9dd5afe37f6e 100644
--- a/drivers/net/ethernet/netronome/nfp/nic/main.c
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.c
@@ -5,6 +5,8 @@
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "main.h"
static int nfp_nic_init(struct nfp_app *app)
{
@@ -28,13 +30,50 @@ static void nfp_nic_sriov_disable(struct nfp_app *app)
{
}
+static int nfp_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn)
+{
+ return nfp_nic_dcb_init(nn);
+}
+
+static void nfp_nic_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
+{
+ nfp_nic_dcb_clean(nn);
+}
+
+static int nfp_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
+ unsigned int id)
+{
+ struct nfp_app_nic_private *app_pri = nn->app_priv;
+ int err;
+
+ err = nfp_app_nic_vnic_alloc(app, nn, id);
+ if (err)
+ return err;
+
+ if (sizeof(*app_pri)) {
+ nn->app_priv = kzalloc(sizeof(*app_pri), GFP_KERNEL);
+ if (!nn->app_priv)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void nfp_nic_vnic_free(struct nfp_app *app, struct nfp_net *nn)
+{
+ kfree(nn->app_priv);
+}
+
const struct nfp_app_type app_nic = {
.id = NFP_APP_CORE_NIC,
.name = "nic",
.init = nfp_nic_init,
- .vnic_alloc = nfp_app_nic_vnic_alloc,
-
+ .vnic_alloc = nfp_nic_vnic_alloc,
+ .vnic_free = nfp_nic_vnic_free,
.sriov_enable = nfp_nic_sriov_enable,
.sriov_disable = nfp_nic_sriov_disable,
+
+ .vnic_init = nfp_nic_vnic_init,
+ .vnic_clean = nfp_nic_vnic_clean,
};
diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.h b/drivers/net/ethernet/netronome/nfp/nic/main.h
new file mode 100644
index 000000000000..094374df42b8
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nic/main.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/* Copyright (C) 2023 Corigine, Inc. */
+
+#ifndef __NFP_NIC_H__
+#define __NFP_NIC_H__ 1
+
+#include <linux/netdevice.h>
+
+#ifdef CONFIG_DCB
+/* DCB feature definitions */
+#define NFP_NET_MAX_DSCP 4
+#define NFP_NET_MAX_TC IEEE_8021QAZ_MAX_TCS
+#define NFP_NET_MAX_PRIO 8
+#define NFP_DCB_CFG_STRIDE 256
+
+struct nfp_dcb {
+ u8 dscp2prio[NFP_NET_MAX_DSCP];
+ u8 prio2tc[NFP_NET_MAX_PRIO];
+ u8 tc2idx[IEEE_8021QAZ_MAX_TCS];
+ u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS];
+ u8 tc_tx_pct[IEEE_8021QAZ_MAX_TCS];
+ u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
+ u8 dscp_cnt;
+ u8 trust_status;
+ bool rate_init;
+ bool ets_init;
+
+ struct nfp_cpp_area *dcbcfg_tbl_area;
+ u8 __iomem *dcbcfg_tbl;
+ u32 cfg_offset;
+};
+
+int nfp_nic_dcb_init(struct nfp_net *nn);
+void nfp_nic_dcb_clean(struct nfp_net *nn);
+#else
+static inline int nfp_nic_dcb_init(struct nfp_net *nn) { return 0; }
+static inline void nfp_nic_dcb_clean(struct nfp_net *nn) {}
+#endif
+
+struct nfp_app_nic_private {
+#ifdef CONFIG_DCB
+ struct nfp_dcb dcb;
+#endif
+};
+
+#endif
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 62320be4de5a..56e02cba0b8a 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1081,40 +1081,59 @@ static const struct ethtool_ops nixge_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+static int nixge_mdio_read_c22(struct mii_bus *bus, int phy_id, int reg)
{
struct nixge_priv *priv = bus->priv;
u32 status, tmp;
int err;
u16 device;
- if (reg & MII_ADDR_C45) {
- device = (reg >> 16) & 0x1f;
+ device = reg & 0x1f;
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
+ tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
- tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
- | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting read command");
+ return err;
+ }
- err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
- !status, 10, 1000);
- if (err) {
- dev_err(priv->dev, "timeout setting address");
- return err;
- }
+ status = nixge_ctrl_read_reg(priv, NIXGE_REG_MDIO_DATA);
- tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
- NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
- } else {
- device = reg & 0x1f;
+ return status;
+}
- tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ) |
- NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+static int nixge_mdio_read_c45(struct mii_bus *bus, int phy_id, int device,
+ int reg)
+{
+ struct nixge_priv *priv = bus->priv;
+ u32 status, tmp;
+ int err;
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
+
+ tmp = NIXGE_MDIO_CLAUSE45 |
+ NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting address");
+ return err;
}
+ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
@@ -1130,57 +1149,65 @@ static int nixge_mdio_read(struct mii_bus *bus, int phy_id, int reg)
return status;
}
-static int nixge_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
+static int nixge_mdio_write_c22(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
{
struct nixge_priv *priv = bus->priv;
u32 status, tmp;
u16 device;
int err;
- if (reg & MII_ADDR_C45) {
- device = (reg >> 16) & 0x1f;
+ device = reg & 0x1f;
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
+ tmp = NIXGE_MDIO_CLAUSE22 | NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
- tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS)
- | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err)
+ dev_err(priv->dev, "timeout setting write command");
- err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
- !status, 10, 1000);
- if (err) {
- dev_err(priv->dev, "timeout setting address");
- return err;
- }
+ return err;
+}
- tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE)
- | NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+static int nixge_mdio_write_c45(struct mii_bus *bus, int phy_id,
+ int device, int reg, u16 val)
+{
+ struct nixge_priv *priv = bus->priv;
+ u32 status, tmp;
+ int err;
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
- err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
- !status, 10, 1000);
- if (err)
- dev_err(priv->dev, "timeout setting write command");
- } else {
- device = reg & 0x1f;
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_ADDR, reg & 0xffff);
- tmp = NIXGE_MDIO_CLAUSE22 |
- NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE) |
- NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+ tmp = NIXGE_MDIO_CLAUSE45 |
+ NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
- nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_CTRL, 1);
- err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
- !status, 10, 1000);
- if (err)
- dev_err(priv->dev, "timeout setting write command");
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err) {
+ dev_err(priv->dev, "timeout setting address");
+ return err;
}
+ tmp = NIXGE_MDIO_CLAUSE45 | NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE) |
+ NIXGE_MDIO_ADDR(phy_id) | NIXGE_MDIO_MMD(device);
+
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_DATA, val);
+ nixge_ctrl_write_reg(priv, NIXGE_REG_MDIO_OP, tmp);
+
+ err = nixge_ctrl_poll_timeout(priv, NIXGE_REG_MDIO_CTRL, status,
+ !status, 10, 1000);
+ if (err)
+ dev_err(priv->dev, "timeout setting write command");
+
return err;
}
@@ -1195,8 +1222,10 @@ static int nixge_mdio_setup(struct nixge_priv *priv, struct device_node *np)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(priv->dev));
bus->priv = priv;
bus->name = "nixge_mii_bus";
- bus->read = nixge_mdio_read;
- bus->write = nixge_mdio_write;
+ bus->read = nixge_mdio_read_c22;
+ bus->write = nixge_mdio_write_c22;
+ bus->read_c45 = nixge_mdio_read_c45;
+ bus->write_c45 = nixge_mdio_write_c45;
bus->parent = priv->dev;
priv->mii_bus = bus;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index ce436e97324a..e508f8eb43bf 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -121,7 +121,7 @@ static void ionic_vf_dealloc_locked(struct ionic *ionic)
if (v->stats_pa) {
vfc.stats_pa = 0;
- (void)ionic_set_vf_config(ionic, i, &vfc);
+ ionic_set_vf_config(ionic, i, &vfc);
dma_unmap_single(ionic->dev, v->stats_pa,
sizeof(v->stats), DMA_FROM_DEVICE);
v->stats_pa = 0;
@@ -169,7 +169,7 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs)
/* ignore failures from older FW, we just won't get stats */
vfc.stats_pa = cpu_to_le64(v->stats_pa);
- (void)ionic_set_vf_config(ionic, i, &vfc);
+ ionic_set_vf_config(ionic, i, &vfc);
}
out:
@@ -352,6 +352,7 @@ err_out_port_reset:
err_out_reset:
ionic_reset(ionic);
err_out_teardown:
+ ionic_dev_teardown(ionic);
pci_clear_master(pdev);
/* Don't fail the probe for these errors, keep
* the hw interface around for inspection
@@ -390,6 +391,7 @@ static void ionic_remove(struct pci_dev *pdev)
ionic_port_reset(ionic);
ionic_reset(ionic);
+ ionic_dev_teardown(ionic);
pci_clear_master(pdev);
ionic_unmap_bars(ionic);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 626b9113e7c4..c06576f43916 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -92,6 +92,7 @@ int ionic_dev_setup(struct ionic *ionic)
unsigned int num_bars = ionic->num_bars;
struct ionic_dev *idev = &ionic->idev;
struct device *dev = ionic->dev;
+ int size;
u32 sig;
/* BAR0: dev_cmd and interrupts */
@@ -133,9 +134,36 @@ int ionic_dev_setup(struct ionic *ionic)
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
+ /* BAR2: optional controller memory mapping */
+ bar++;
+ mutex_init(&idev->cmb_inuse_lock);
+ if (num_bars < 3 || !ionic->bars[IONIC_PCI_BAR_CMB].len) {
+ idev->cmb_inuse = NULL;
+ return 0;
+ }
+
+ idev->phy_cmb_pages = bar->bus_addr;
+ idev->cmb_npages = bar->len / PAGE_SIZE;
+ size = BITS_TO_LONGS(idev->cmb_npages) * sizeof(long);
+ idev->cmb_inuse = kzalloc(size, GFP_KERNEL);
+ if (!idev->cmb_inuse)
+ dev_warn(dev, "No memory for CMB, disabling\n");
+
return 0;
}
+void ionic_dev_teardown(struct ionic *ionic)
+{
+ struct ionic_dev *idev = &ionic->idev;
+
+ kfree(idev->cmb_inuse);
+ idev->cmb_inuse = NULL;
+ idev->phy_cmb_pages = 0;
+ idev->cmb_npages = 0;
+
+ mutex_destroy(&idev->cmb_inuse_lock);
+}
+
/* Devcmd Interface */
bool ionic_is_fw_running(struct ionic_dev *idev)
{
@@ -571,6 +599,33 @@ int ionic_db_page_num(struct ionic_lif *lif, int pid)
return (lif->hw_index * lif->dbid_count) + pid;
}
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+ int ret;
+
+ mutex_lock(&idev->cmb_inuse_lock);
+ ret = bitmap_find_free_region(idev->cmb_inuse, idev->cmb_npages, order);
+ mutex_unlock(&idev->cmb_inuse_lock);
+
+ if (ret < 0)
+ return ret;
+
+ *pgid = ret;
+ *pgaddr = idev->phy_cmb_pages + ret * PAGE_SIZE;
+
+ return 0;
+}
+
+void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+
+ mutex_lock(&idev->cmb_inuse_lock);
+ bitmap_release_region(idev->cmb_inuse, pgid, order);
+ mutex_unlock(&idev->cmb_inuse_lock);
+}
+
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size)
@@ -679,6 +734,18 @@ void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
cur->desc = base + (i * q->desc_size);
}
+void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa)
+{
+ struct ionic_desc_info *cur;
+ unsigned int i;
+
+ q->cmb_base = base;
+ q->cmb_base_pa = base_pa;
+
+ for (i = 0, cur = q->info; i < q->num_descs; i++, cur++)
+ cur->cmb_desc = base + (i * q->desc_size);
+}
+
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa)
{
struct ionic_desc_info *cur;
@@ -708,9 +775,16 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
q->lif->index, q->name, q->hw_type, q->hw_index,
q->head_idx, ring_doorbell);
- if (ring_doorbell)
+ if (ring_doorbell) {
ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
+
+ q->dbell_jiffies = jiffies;
+
+ if (q_to_qcq(q)->napi_qcq)
+ mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
+ }
}
static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 2a1d7b9c07e7..0bea208bfba2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -25,6 +25,12 @@
#define IONIC_DEV_INFO_REG_COUNT 32
#define IONIC_DEV_CMD_REG_COUNT 32
+#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */
+#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
+#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
+#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
+#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */
+
struct ionic_dev_bar {
void __iomem *vaddr;
phys_addr_t bus_addr;
@@ -153,6 +159,11 @@ struct ionic_dev {
struct ionic_intr __iomem *intr_ctrl;
u64 __iomem *intr_status;
+ struct mutex cmb_inuse_lock; /* for cmb_inuse */
+ unsigned long *cmb_inuse;
+ dma_addr_t phy_cmb_pages;
+ u32 cmb_npages;
+
u32 port_info_sz;
struct ionic_port_info *port_info;
dma_addr_t port_info_pa;
@@ -197,6 +208,7 @@ struct ionic_desc_info {
struct ionic_rxq_desc *rxq_desc;
struct ionic_admin_cmd *adminq_desc;
};
+ void __iomem *cmb_desc;
union {
void *sg_desc;
struct ionic_txq_sg_desc *txq_sg_desc;
@@ -216,6 +228,8 @@ struct ionic_queue {
struct ionic_lif *lif;
struct ionic_desc_info *info;
u64 dbval;
+ unsigned long dbell_deadline;
+ unsigned long dbell_jiffies;
u16 head_idx;
u16 tail_idx;
unsigned int index;
@@ -233,12 +247,14 @@ struct ionic_queue {
struct ionic_rxq_desc *rxq;
struct ionic_admin_cmd *adminq;
};
+ void __iomem *cmb_base;
union {
void *sg_base;
struct ionic_txq_sg_desc *txq_sgl;
struct ionic_rxq_sg_desc *rxq_sgl;
};
dma_addr_t base_pa;
+ dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa;
unsigned int desc_size;
unsigned int sg_desc_size;
@@ -301,6 +317,7 @@ static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
void ionic_init_devinfo(struct ionic *ionic);
int ionic_dev_setup(struct ionic *ionic);
+void ionic_dev_teardown(struct ionic *ionic);
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd);
u8 ionic_dev_cmd_status(struct ionic_dev *idev);
@@ -336,6 +353,9 @@ void ionic_dev_cmd_adminq_init(struct ionic_dev *idev, struct ionic_qcq *qcq,
int ionic_db_page_num(struct ionic_lif *lif, int pid);
+int ionic_get_cmb(struct ionic_lif *lif, u32 *pgid, phys_addr_t *pgaddr, int order);
+void ionic_put_cmb(struct ionic_lif *lif, u32 pgid, int order);
+
int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size);
@@ -352,6 +372,7 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid);
void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
+void ionic_q_cmb_map(struct ionic_queue *q, void __iomem *base, dma_addr_t base_pa);
void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa);
void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg);
@@ -361,4 +382,8 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
+bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
+bool ionic_txq_poke_doorbell(struct ionic_queue *q);
+bool ionic_rxq_poke_doorbell(struct ionic_queue *q);
+
#endif /* _IONIC_DEV_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 01c22701482d..cf33503468a3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -511,6 +511,87 @@ static int ionic_set_coalesce(struct net_device *netdev,
return 0;
}
+static int ionic_validate_cmb_config(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ int pages_have, pages_required = 0;
+ unsigned long sz;
+
+ if (!lif->ionic->idev.cmb_inuse &&
+ (qparam->cmb_tx || qparam->cmb_rx)) {
+ netdev_info(lif->netdev, "CMB rings are not supported on this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (qparam->cmb_tx) {
+ if (!(lif->qtype_info[IONIC_QTYPE_TXQ].features & IONIC_QIDENT_F_CMB)) {
+ netdev_info(lif->netdev,
+ "CMB rings for tx-push are not supported on this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ sz = sizeof(struct ionic_txq_desc) * qparam->ntxq_descs * qparam->nxqs;
+ pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
+ }
+
+ if (qparam->cmb_rx) {
+ if (!(lif->qtype_info[IONIC_QTYPE_RXQ].features & IONIC_QIDENT_F_CMB)) {
+ netdev_info(lif->netdev,
+ "CMB rings for rx-push are not supported on this device\n");
+ return -EOPNOTSUPP;
+ }
+
+ sz = sizeof(struct ionic_rxq_desc) * qparam->nrxq_descs * qparam->nxqs;
+ pages_required += ALIGN(sz, PAGE_SIZE) / PAGE_SIZE;
+ }
+
+ pages_have = lif->ionic->bars[IONIC_PCI_BAR_CMB].len / PAGE_SIZE;
+ if (pages_required > pages_have) {
+ netdev_info(lif->netdev,
+ "Not enough CMB pages for number of queues and size of descriptor rings, need %d have %d",
+ pages_required, pages_have);
+ return -ENOMEM;
+ }
+
+ return pages_required;
+}
+
+static int ionic_cmb_rings_toggle(struct ionic_lif *lif, bool cmb_tx, bool cmb_rx)
+{
+ struct ionic_queue_params qparam;
+ int pages_used;
+
+ if (netif_running(lif->netdev)) {
+ netdev_info(lif->netdev, "Please stop device to toggle CMB for tx/rx-push\n");
+ return -EBUSY;
+ }
+
+ ionic_init_queue_params(lif, &qparam);
+ qparam.cmb_tx = cmb_tx;
+ qparam.cmb_rx = cmb_rx;
+ pages_used = ionic_validate_cmb_config(lif, &qparam);
+ if (pages_used < 0)
+ return pages_used;
+
+ if (cmb_tx)
+ set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+ else
+ clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+
+ if (cmb_rx)
+ set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
+ else
+ clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
+
+ if (cmb_tx || cmb_rx)
+ netdev_info(lif->netdev, "Enabling CMB %s %s rings - %d pages\n",
+ cmb_tx ? "TX" : "", cmb_rx ? "RX" : "", pages_used);
+ else
+ netdev_info(lif->netdev, "Disabling CMB rings\n");
+
+ return 0;
+}
+
static void ionic_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring,
struct kernel_ethtool_ringparam *kernel_ring,
@@ -522,6 +603,8 @@ static void ionic_get_ringparam(struct net_device *netdev,
ring->tx_pending = lif->ntxq_descs;
ring->rx_max_pending = IONIC_MAX_RX_DESC;
ring->rx_pending = lif->nrxq_descs;
+ kernel_ring->tx_push = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+ kernel_ring->rx_push = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}
static int ionic_set_ringparam(struct net_device *netdev,
@@ -551,9 +634,28 @@ static int ionic_set_ringparam(struct net_device *netdev,
/* if nothing to do return success */
if (ring->tx_pending == lif->ntxq_descs &&
- ring->rx_pending == lif->nrxq_descs)
+ ring->rx_pending == lif->nrxq_descs &&
+ kernel_ring->tx_push == test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) &&
+ kernel_ring->rx_push == test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
return 0;
+ qparam.ntxq_descs = ring->tx_pending;
+ qparam.nrxq_descs = ring->rx_pending;
+ qparam.cmb_tx = kernel_ring->tx_push;
+ qparam.cmb_rx = kernel_ring->rx_push;
+
+ err = ionic_validate_cmb_config(lif, &qparam);
+ if (err < 0)
+ return err;
+
+ if (kernel_ring->tx_push != test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) ||
+ kernel_ring->rx_push != test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state)) {
+ err = ionic_cmb_rings_toggle(lif, kernel_ring->tx_push,
+ kernel_ring->rx_push);
+ if (err < 0)
+ return err;
+ }
+
if (ring->tx_pending != lif->ntxq_descs)
netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
lif->ntxq_descs, ring->tx_pending);
@@ -569,9 +671,6 @@ static int ionic_set_ringparam(struct net_device *netdev,
return 0;
}
- qparam.ntxq_descs = ring->tx_pending;
- qparam.nrxq_descs = ring->rx_pending;
-
mutex_lock(&lif->queue_lock);
err = ionic_reconfigure_queues(lif, &qparam);
mutex_unlock(&lif->queue_lock);
@@ -638,7 +737,7 @@ static int ionic_set_channels(struct net_device *netdev,
lif->nxqs, ch->combined_count);
qparam.nxqs = ch->combined_count;
- qparam.intr_split = 0;
+ qparam.intr_split = false;
} else {
max_cnt /= 2;
if (ch->rx_count > max_cnt)
@@ -654,9 +753,13 @@ static int ionic_set_channels(struct net_device *netdev,
lif->nxqs, ch->rx_count);
qparam.nxqs = ch->rx_count;
- qparam.intr_split = 1;
+ qparam.intr_split = true;
}
+ err = ionic_validate_cmb_config(lif, &qparam);
+ if (err < 0)
+ return err;
+
/* if we're not running, just set the values and return */
if (!netif_running(lif->netdev)) {
lif->nxqs = qparam.nxqs;
@@ -965,6 +1068,8 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
ETHTOOL_COALESCE_USE_ADAPTIVE_TX,
+ .supported_ring_params = ETHTOOL_RING_USE_TX_PUSH |
+ ETHTOOL_RING_USE_RX_PUSH,
.get_drvinfo = ionic_get_drvinfo,
.get_regs_len = ionic_get_regs_len,
.get_regs = ionic_get_regs,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index eac09b2375b8..9a1825edf0d0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -3073,9 +3073,10 @@ union ionic_adminq_comp {
#define IONIC_BARS_MAX 6
#define IONIC_PCI_BAR_DBELL 1
+#define IONIC_PCI_BAR_CMB 2
-/* BAR0 */
#define IONIC_BAR0_SIZE 0x8000
+#define IONIC_BAR2_SIZE 0x800000
#define IONIC_BAR0_DEV_INFO_REGS_OFFSET 0x0000
#define IONIC_BAR0_DEV_CMD_REGS_OFFSET 0x0800
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 4dd16c487f2b..957027e546b3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -16,6 +16,7 @@
#include "ionic.h"
#include "ionic_bus.h"
+#include "ionic_dev.h"
#include "ionic_lif.h"
#include "ionic_txrx.h"
#include "ionic_ethtool.h"
@@ -25,9 +26,12 @@
static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
[IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
[IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
- [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
- [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
- * 1 = ... with Tx SG version 1
+ [IONIC_QTYPE_RXQ] = 2, /* 0 = Base version with CQ+SG support
+ * 2 = ... with CMB rings
+ */
+ [IONIC_QTYPE_TXQ] = 3, /* 0 = Base version with CQ+SG support
+ * 1 = ... with Tx SG version 1
+ * 3 = ... with CMB rings
*/
};
@@ -148,7 +152,7 @@ static void ionic_link_status_check(struct ionic_lif *lif)
mutex_lock(&lif->queue_lock);
err = ionic_start_queues(lif);
if (err && err != -EBUSY) {
- netdev_err(lif->netdev,
+ netdev_err(netdev,
"Failed to start queues: %d\n", err);
set_bit(IONIC_LIF_F_BROKEN, lif->state);
netif_carrier_off(lif->netdev);
@@ -200,6 +204,13 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
}
}
+static void ionic_napi_deadline(struct timer_list *timer)
+{
+ struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
+
+ napi_schedule(&qcq->napi);
+}
+
static irqreturn_t ionic_isr(int irq, void *data)
{
struct napi_struct *napi = data;
@@ -269,6 +280,7 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
.oper = IONIC_Q_ENABLE,
},
};
+ int ret;
idev = &lif->ionic->idev;
dev = lif->ionic->dev;
@@ -276,16 +288,24 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ if (qcq->flags & IONIC_QCQ_F_INTR)
+ ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
+
+ ret = ionic_adminq_post_wait(lif, &ctx);
+ if (ret)
+ return ret;
+
+ if (qcq->napi.poll)
+ napi_enable(&qcq->napi);
+
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
- napi_enable(&qcq->napi);
- ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
- return ionic_adminq_post_wait(lif, &ctx);
+ return 0;
}
static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
@@ -316,6 +336,7 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
synchronize_irq(qcq->intr.vector);
irq_set_affinity_hint(qcq->intr.vector, NULL);
napi_disable(&qcq->napi);
+ del_timer_sync(&qcq->napi_deadline);
}
/* If there was a previous fw communcation error, don't bother with
@@ -379,6 +400,15 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->q_base_pa = 0;
}
+ if (qcq->cmb_q_base) {
+ iounmap(qcq->cmb_q_base);
+ ionic_put_cmb(lif, qcq->cmb_pgid, qcq->cmb_order);
+ qcq->cmb_pgid = 0;
+ qcq->cmb_order = 0;
+ qcq->cmb_q_base = NULL;
+ qcq->cmb_q_base_pa = 0;
+ }
+
if (qcq->cq_base) {
dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
qcq->cq_base = NULL;
@@ -451,6 +481,7 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
n_qcq->intr.vector = src_qcq->intr.vector;
n_qcq->intr.index = src_qcq->intr.index;
+ n_qcq->napi_qcq = src_qcq->napi_qcq;
}
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -564,13 +595,15 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
}
if (flags & IONIC_QCQ_F_NOTIFYQ) {
- int q_size, cq_size;
+ int q_size;
- /* q & cq need to be contiguous in case of notifyq */
+ /* q & cq need to be contiguous in NotifyQ, so alloc it all in q
+ * and don't alloc qc. We leave new->qc_size and new->qc_base
+ * as 0 to be sure we don't try to free it later.
+ */
q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
- cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
-
- new->q_size = PAGE_SIZE + q_size + cq_size;
+ new->q_size = PAGE_SIZE + q_size +
+ ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
new->q_base = dma_alloc_coherent(dev, new->q_size,
&new->q_base_pa, GFP_KERNEL);
if (!new->q_base) {
@@ -587,6 +620,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
ionic_cq_map(&new->cq, cq_base, cq_base_pa);
ionic_cq_bind(&new->cq, &new->q);
} else {
+ /* regular DMA q descriptors */
new->q_size = PAGE_SIZE + (num_descs * desc_size);
new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
GFP_KERNEL);
@@ -599,6 +633,33 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
ionic_q_map(&new->q, q_base, q_base_pa);
+ if (flags & IONIC_QCQ_F_CMB_RINGS) {
+ /* on-chip CMB q descriptors */
+ new->cmb_q_size = num_descs * desc_size;
+ new->cmb_order = order_base_2(new->cmb_q_size / PAGE_SIZE);
+
+ err = ionic_get_cmb(lif, &new->cmb_pgid, &new->cmb_q_base_pa,
+ new->cmb_order);
+ if (err) {
+ netdev_err(lif->netdev,
+ "Cannot allocate queue order %d from cmb: err %d\n",
+ new->cmb_order, err);
+ goto err_out_free_q;
+ }
+
+ new->cmb_q_base = ioremap_wc(new->cmb_q_base_pa, new->cmb_q_size);
+ if (!new->cmb_q_base) {
+ netdev_err(lif->netdev, "Cannot map queue from cmb\n");
+ ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
+ err = -ENOMEM;
+ goto err_out_free_q;
+ }
+
+ new->cmb_q_base_pa -= idev->phy_cmb_pages;
+ ionic_q_cmb_map(&new->q, new->cmb_q_base, new->cmb_q_base_pa);
+ }
+
+ /* cq DMA descriptors */
new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
GFP_KERNEL);
@@ -637,6 +698,10 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err_out_free_cq:
dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
err_out_free_q:
+ if (new->cmb_q_base) {
+ iounmap(new->cmb_q_base);
+ ionic_put_cmb(lif, new->cmb_pgid, new->cmb_order);
+ }
dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
err_out_free_cq_info:
vfree(new->cq.info);
@@ -718,6 +783,8 @@ static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
qcq->cq.tail_idx = 0;
qcq->cq.done_color = 1;
memset(qcq->q_base, 0, qcq->q_size);
+ if (qcq->cmb_q_base)
+ memset_io(qcq->cmb_q_base, 0, qcq->cmb_q_size);
memset(qcq->cq_base, 0, qcq->cq_size);
memset(qcq->sg_base, 0, qcq->sg_size);
}
@@ -737,6 +804,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
IONIC_QINIT_F_SG),
+ .intr_index = cpu_to_le16(qcq->intr.index),
.pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs),
.ring_base = cpu_to_le64(q->base_pa),
@@ -745,17 +813,19 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.features = cpu_to_le64(q->features),
},
};
- unsigned int intr_index;
int err;
- intr_index = qcq->intr.index;
-
- ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
+ if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
+ ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
+ ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
+ }
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
+ dev_dbg(dev, "txq_init.cq_ring_base 0x%llx\n", ctx.cmd.q_init.cq_ring_base);
+ dev_dbg(dev, "txq_init.sg_ring_base 0x%llx\n", ctx.cmd.q_init.sg_ring_base);
dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
@@ -773,8 +843,14 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+ }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -807,6 +883,11 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
};
int err;
+ if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
+ ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
+ ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
+ }
+
dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
@@ -828,11 +909,17 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
+ q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi);
else
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+
qcq->flags |= IONIC_QCQ_F_INITED;
return 0;
@@ -1150,6 +1237,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
struct ionic_dev *idev = &lif->ionic->idev;
unsigned long irqflags;
unsigned int flags = 0;
+ bool resched = false;
int rx_work = 0;
int tx_work = 0;
int n_work = 0;
@@ -1187,6 +1275,16 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
}
+ if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
+ resched = true;
+ if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
+ resched = true;
+ if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
+ resched = true;
+ if (resched)
+ mod_timer(&lif->adminqcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
@@ -1966,8 +2064,13 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
+
+ if (test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state))
+ flags |= IONIC_QCQ_F_CMB_RINGS;
+
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
flags |= IONIC_QCQ_F_INTR;
+
for (i = 0; i < lif->nxqs; i++) {
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
@@ -1988,6 +2091,9 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
+ if (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state))
+ flags |= IONIC_QCQ_F_CMB_RINGS;
+
num_desc = lif->nrxq_descs;
desc_sz = sizeof(struct ionic_rxq_desc);
comp_sz = sizeof(struct ionic_rxq_comp);
@@ -2463,7 +2569,7 @@ static int ionic_set_vf_rate(struct net_device *netdev, int vf,
ret = ionic_set_vf_config(ionic, vf, &vfc);
if (!ret)
- lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
+ ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
}
up_write(&ionic->vf_op_lock);
@@ -2663,6 +2769,55 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_get_vf_stats = ionic_get_vf_stats,
};
+static int ionic_cmb_reconfig(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ struct ionic_queue_params start_qparams;
+ int err = 0;
+
+ /* When changing CMB queue parameters, we're using limited
+ * on-device memory and don't have extra memory to use for
+ * duplicate allocations, so we free it all first then
+ * re-allocate with the new parameters.
+ */
+
+ /* Checkpoint for possible unwind */
+ ionic_init_queue_params(lif, &start_qparams);
+
+ /* Stop and free the queues */
+ ionic_stop_queues_reconfig(lif);
+ ionic_txrx_free(lif);
+
+ /* Set up new qparams */
+ ionic_set_queue_params(lif, qparam);
+
+ if (netif_running(lif->netdev)) {
+ /* Alloc and start the new configuration */
+ err = ionic_txrx_alloc(lif);
+ if (err) {
+ dev_warn(lif->ionic->dev,
+ "CMB reconfig failed, restoring values: %d\n", err);
+
+ /* Back out the changes */
+ ionic_set_queue_params(lif, &start_qparams);
+ err = ionic_txrx_alloc(lif);
+ if (err) {
+ dev_err(lif->ionic->dev,
+ "CMB restore failed: %d\n", err);
+ goto errout;
+ }
+ }
+
+ ionic_start_queues_reconfig(lif);
+ } else {
+ /* This was detached in ionic_stop_queues_reconfig() */
+ netif_device_attach(lif->netdev);
+ }
+
+errout:
+ return err;
+}
+
static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
{
/* only swapping the queues, not the napi, flags, or other stuff */
@@ -2705,6 +2860,11 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
unsigned int flags, i;
int err = 0;
+ /* Are we changing q params while CMB is on */
+ if ((test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state) && qparam->cmb_tx) ||
+ (test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state) && qparam->cmb_rx))
+ return ionic_cmb_reconfig(lif, qparam);
+
/* allocate temporary qcq arrays to hold new queue structs */
if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
@@ -2741,6 +2901,16 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
for (i = 0; i < qparam->nxqs; i++) {
+ /* If missing, short placeholder qcq needed for swap */
+ if (!lif->txqcqs[i]) {
+ flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
+ 4, desc_sz, comp_sz, sg_desc_sz,
+ lif->kern_pid, &lif->txqcqs[i]);
+ if (err)
+ goto err_out;
+ }
+
flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
@@ -2760,6 +2930,16 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
comp_sz *= 2;
for (i = 0; i < qparam->nxqs; i++) {
+ /* If missing, short placeholder qcq needed for swap */
+ if (!lif->rxqcqs[i]) {
+ flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
+ 4, desc_sz, comp_sz, sg_desc_sz,
+ lif->kern_pid, &lif->rxqcqs[i]);
+ if (err)
+ goto err_out;
+ }
+
flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
num_desc, desc_sz, comp_sz, sg_desc_sz,
@@ -2809,10 +2989,15 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
}
- /* clear existing interrupt assignments */
+ /* Clear existing interrupt assignments. We check for NULL here
+ * because we're checking the whole array for potential qcqs, not
+ * just those qcqs that have just been set up.
+ */
for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
- ionic_qcq_intr_free(lif, lif->txqcqs[i]);
- ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
+ if (lif->txqcqs[i])
+ ionic_qcq_intr_free(lif, lif->txqcqs[i]);
+ if (lif->rxqcqs[i])
+ ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
}
/* re-assign the interrupts */
@@ -3245,8 +3430,14 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
+ q->dbell_deadline = IONIC_ADMIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
+ qcq->napi_qcq = qcq;
+ timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
+
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index a53984bf3544..c9c4c46d5a16 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -59,6 +59,7 @@ struct ionic_rx_stats {
#define IONIC_QCQ_F_TX_STATS BIT(3)
#define IONIC_QCQ_F_RX_STATS BIT(4)
#define IONIC_QCQ_F_NOTIFYQ BIT(5)
+#define IONIC_QCQ_F_CMB_RINGS BIT(6)
struct ionic_qcq {
void *q_base;
@@ -70,12 +71,19 @@ struct ionic_qcq {
void *sg_base;
dma_addr_t sg_base_pa;
u32 sg_size;
+ void __iomem *cmb_q_base;
+ phys_addr_t cmb_q_base_pa;
+ u32 cmb_q_size;
+ u32 cmb_pgid;
+ u32 cmb_order;
struct dim dim;
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
+ struct timer_list napi_deadline;
struct napi_struct napi;
unsigned int flags;
+ struct ionic_qcq *napi_qcq;
struct dentry *dentry;
};
@@ -140,6 +148,8 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_BROKEN,
IONIC_LIF_F_TX_DIM_INTR,
IONIC_LIF_F_RX_DIM_INTR,
+ IONIC_LIF_F_CMB_TX_RINGS,
+ IONIC_LIF_F_CMB_RX_RINGS,
/* leave this as last */
IONIC_LIF_F_STATE_SIZE
@@ -243,8 +253,10 @@ struct ionic_queue_params {
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
- unsigned int intr_split;
u64 rxq_features;
+ bool intr_split;
+ bool cmb_tx;
+ bool cmb_rx;
};
static inline void ionic_init_queue_params(struct ionic_lif *lif,
@@ -253,8 +265,34 @@ static inline void ionic_init_queue_params(struct ionic_lif *lif,
qparam->nxqs = lif->nxqs;
qparam->ntxq_descs = lif->ntxq_descs;
qparam->nrxq_descs = lif->nrxq_descs;
- qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
qparam->rxq_features = lif->rxq_features;
+ qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ qparam->cmb_tx = test_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+ qparam->cmb_rx = test_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
+}
+
+static inline void ionic_set_queue_params(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ lif->nxqs = qparam->nxqs;
+ lif->ntxq_descs = qparam->ntxq_descs;
+ lif->nrxq_descs = qparam->nrxq_descs;
+ lif->rxq_features = qparam->rxq_features;
+
+ if (qparam->intr_split)
+ set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ else
+ clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+
+ if (qparam->cmb_tx)
+ set_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+ else
+ clear_bit(IONIC_LIF_F_CMB_TX_RINGS, lif->state);
+
+ if (qparam->cmb_rx)
+ set_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
+ else
+ clear_bit(IONIC_LIF_F_CMB_RX_RINGS, lif->state);
}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index a13530ec4dd8..1dc79cecc5cc 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -289,6 +289,35 @@ static void ionic_adminq_cb(struct ionic_queue *q,
complete_all(&ctx->work);
}
+bool ionic_adminq_poke_doorbell(struct ionic_queue *q)
+{
+ struct ionic_lif *lif = q->lif;
+ unsigned long now, then, dif;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&lif->adminq_lock, irqflags);
+
+ if (q->tail_idx == q->head_idx) {
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+ return false;
+ }
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+ }
+
+ spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
+
+ return true;
+}
+
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
struct ionic_desc_info *desc_info;
@@ -359,7 +388,7 @@ int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
break;
/* force a check of FW status and break out if FW reset */
- (void)ionic_heartbeat_check(lif->ionic);
+ ionic_heartbeat_check(lif->ionic);
if ((test_bit(IONIC_LIF_F_FW_RESET, lif->state) &&
!lif->ionic->idev.fw_status_ready) ||
test_bit(IONIC_LIF_F_FW_STOPPING, lif->state)) {
@@ -647,7 +676,7 @@ int ionic_port_init(struct ionic *ionic)
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
ionic_dev_cmd_port_state(&ionic->idev, IONIC_PORT_ADMIN_STATE_UP);
- (void)ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
mutex_unlock(&ionic->dev_cmd_lock);
if (err) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index 887046838b3b..eac2f0e3576e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -268,7 +268,7 @@ static u64 ionic_hwstamp_read(struct ionic *ionic,
u32 tick_high_before, tick_high, tick_low;
/* read and discard low part to defeat hw staging of high part */
- (void)ioread32(&ionic->idev.hwstamp_regs->tick_low);
+ ioread32(&ionic->idev.hwstamp_regs->tick_low);
tick_high_before = ioread32(&ionic->idev.hwstamp_regs->tick_high);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index b7363376dfc8..1ee2f285cb42 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -604,14 +604,14 @@ loop_out:
* they can clear room for some new filters
*/
list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
- (void)ionic_lif_filter_del(lif, &sync_item->f.cmd);
+ ionic_lif_filter_del(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
}
list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
- (void)ionic_lif_filter_add(lif, &sync_item->f.cmd);
+ ionic_lif_filter_add(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 0c3977416cd1..26798fc635db 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -22,6 +22,67 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
+bool ionic_txq_poke_doorbell(struct ionic_queue *q)
+{
+ unsigned long now, then, dif;
+ struct netdev_queue *netdev_txq;
+ struct net_device *netdev;
+
+ netdev = q->lif->netdev;
+ netdev_txq = netdev_get_tx_queue(netdev, q->index);
+
+ HARD_TX_LOCK(netdev, netdev_txq, smp_processor_id());
+
+ if (q->tail_idx == q->head_idx) {
+ HARD_TX_UNLOCK(netdev, netdev_txq);
+ return false;
+ }
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+ }
+
+ HARD_TX_UNLOCK(netdev, netdev_txq);
+
+ return true;
+}
+
+bool ionic_rxq_poke_doorbell(struct ionic_queue *q)
+{
+ unsigned long now, then, dif;
+
+ /* no lock, called from rx napi or txrx napi, nothing else can fill */
+
+ if (q->tail_idx == q->head_idx)
+ return false;
+
+ now = READ_ONCE(jiffies);
+ then = q->dbell_jiffies;
+ dif = now - then;
+
+ if (dif > q->dbell_deadline) {
+ ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
+ q->dbval | q->head_idx);
+
+ q->dbell_jiffies = now;
+
+ dif = 2 * q->dbell_deadline;
+ if (dif > IONIC_RX_MAX_DOORBELL_DEADLINE)
+ dif = IONIC_RX_MAX_DOORBELL_DEADLINE;
+
+ q->dbell_deadline = dif;
+ }
+
+ return true;
+}
+
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
{
return netdev_get_tx_queue(q->lif->netdev, q->index);
@@ -341,6 +402,14 @@ bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return true;
}
+static inline void ionic_write_cmb_desc(struct ionic_queue *q,
+ void __iomem *cmb_desc,
+ void *desc)
+{
+ if (q_to_qcq(q)->flags & IONIC_QCQ_F_CMB_RINGS)
+ memcpy_toio(cmb_desc, desc, q->desc_size);
+}
+
void ionic_rx_fill(struct ionic_queue *q)
{
struct net_device *netdev = q->lif->netdev;
@@ -419,11 +488,19 @@ void ionic_rx_fill(struct ionic_queue *q)
IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->nbufs = nfrags;
+ ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+
ionic_rxq_post(q, false, ionic_rx_clean, NULL);
}
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
q->dbval | q->head_idx);
+
+ q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
+ q->dbell_jiffies = jiffies;
+
+ mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
+ jiffies + IONIC_NAPI_DEADLINE);
}
void ionic_rx_empty(struct ionic_queue *q)
@@ -511,6 +588,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
+ if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
+ mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
@@ -544,23 +624,29 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
+ if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
+ mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return work_done;
}
int ionic_txrx_napi(struct napi_struct *napi, int budget)
{
- struct ionic_qcq *qcq = napi_to_qcq(napi);
+ struct ionic_qcq *rxqcq = napi_to_qcq(napi);
struct ionic_cq *rxcq = napi_to_cq(napi);
unsigned int qi = rxcq->bound_q->index;
+ struct ionic_qcq *txqcq;
struct ionic_dev *idev;
struct ionic_lif *lif;
struct ionic_cq *txcq;
+ bool resched = false;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
u32 flags = 0;
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
+ txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT,
@@ -572,7 +658,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
- ionic_dim_update(qcq, 0);
+ ionic_dim_update(rxqcq, 0);
flags |= IONIC_INTR_CRED_UNMASK;
rxcq->bound_intr->rearm_count++;
}
@@ -583,6 +669,13 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
+ if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
+ resched = true;
+ if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
+ resched = true;
+ if (resched)
+ mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+
return rx_work_done;
}
@@ -860,7 +953,8 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
return 0;
}
-static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
+static void ionic_tx_tso_post(struct ionic_queue *q,
+ struct ionic_desc_info *desc_info,
struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss,
@@ -868,6 +962,7 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
u16 vlan_tci, bool has_vlan,
bool start, bool done)
{
+ struct ionic_txq_desc *desc = desc_info->desc;
u8 flags = 0;
u64 cmd;
@@ -883,6 +978,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
desc->hdr_len = cpu_to_le16(hdrlen);
desc->mss = cpu_to_le16(mss);
+ ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+
if (start) {
skb_tx_timestamp(skb);
if (!unlikely(q->features & IONIC_TXQ_F_HWSTAMP))
@@ -1001,7 +1098,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
/* post descriptor */
- ionic_tx_tso_post(q, desc, skb,
+ ionic_tx_tso_post(q, desc_info, skb,
desc_addr, desc_nsge, desc_len,
hdrlen, mss, outer_csum, vlan_tci, has_vlan,
start, done);
@@ -1050,6 +1147,8 @@ static void ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
desc->csum_offset = cpu_to_le16(skb->csum_offset);
+ ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+
if (skb_csum_is_sctp(skb))
stats->crc32_csum++;
else
@@ -1087,6 +1186,8 @@ static void ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb,
desc->csum_start = 0;
desc->csum_offset = 0;
+ ionic_write_cmb_desc(q, desc_info->cmb_desc, desc);
+
stats->csum_none++;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index 922c47797af6..be5cc8b79bd5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -198,7 +198,6 @@ static const struct devlink_ops qed_dl_ops = {
struct devlink *qed_devlink_register(struct qed_dev *cdev)
{
- union devlink_param_value value;
struct qed_devlink *qdevlink;
struct devlink *dl;
int rc;
@@ -216,11 +215,6 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev)
if (rc)
goto err_unregister;
- value.vbool = false;
- devlink_param_driverinit_value_set(dl,
- QED_DEVLINK_PARAM_ID_IWARP_CMT,
- value);
-
cdev->iwarp_cmt = false;
qed_fw_reporters_create(dl);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 0848b5529d48..2bf18748581d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -831,7 +831,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
* @p_hwfn: HW device data.
* @p_ptt: PTT window for writing the registers.
* @vf: VF info data.
- * @enable: The actual permision for this VF.
+ * @enable: The actual permission for this VF.
*
* In E4, queue zone permission table size is 320x9. There
* are 320 VF queues for single engine device (256 for dual
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 7c2af482192d..cb1746bc0e0c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1438,6 +1438,10 @@ int qede_poll(struct napi_struct *napi, int budget)
rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
qede_has_rx_work(fp->rxq)) ?
qede_rx_int(fp, budget) : 0;
+
+ if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
+ xdp_do_flush();
+
/* Handle case where we are called by netpoll with a budget of 0 */
if (rx_work_done < budget || !budget) {
if (!qede_poll_is_more_work(fp)) {
@@ -1457,9 +1461,6 @@ int qede_poll(struct napi_struct *napi, int budget)
qede_update_tx_producer(fp->xdp_tx);
}
- if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
- xdp_do_flush_map();
-
return rx_work_done;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 953f304b8588..4a3c3b5fb4a1 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -892,6 +892,9 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->hw_features = hw_features;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
/* MTU range: 46 - 9600 */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
@@ -970,8 +973,15 @@ static int qede_alloc_fp_array(struct qede_dev *edev)
goto err;
}
- mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
- sizeof(*edev->coal_entry), GFP_KERNEL);
+ if (!edev->coal_entry) {
+ mem = kcalloc(QEDE_MAX_RSS_CNT(edev),
+ sizeof(*edev->coal_entry), GFP_KERNEL);
+ } else {
+ mem = krealloc(edev->coal_entry,
+ QEDE_QUEUE_CNT(edev) * sizeof(*edev->coal_entry),
+ GFP_KERNEL);
+ }
+
if (!mem) {
DP_ERR(edev, "coalesce entry allocation failed\n");
kfree(edev->coal_entry);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 27b1663c476e..39d24e07f306 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -12,6 +12,7 @@
#include "rmnet_handlers.h"
#include "rmnet_vnd.h"
#include "rmnet_private.h"
+#include "rmnet_map.h"
/* Local Definitions and Declarations */
@@ -39,6 +40,8 @@ static int rmnet_unregister_real_device(struct net_device *real_dev)
if (port->nr_rmnet_devs)
return -EINVAL;
+ rmnet_map_tx_aggregate_exit(port);
+
netdev_rx_handler_unregister(real_dev);
kfree(port);
@@ -79,6 +82,8 @@ static int rmnet_register_real_device(struct net_device *real_dev,
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
+ rmnet_map_tx_aggregate_init(port);
+
netdev_dbg(real_dev, "registered with rmnet\n");
return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 3d3cba56c516..ed112d51ac5a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -6,6 +6,7 @@
*/
#include <linux/skbuff.h>
+#include <linux/time.h>
#include <net/gro_cells.h>
#ifndef _RMNET_CONFIG_H_
@@ -19,6 +20,12 @@ struct rmnet_endpoint {
struct hlist_node hlnode;
};
+struct rmnet_egress_agg_params {
+ u32 bytes;
+ u32 count;
+ u64 time_nsec;
+};
+
/* One instance of this structure is instantiated for each real_dev associated
* with rmnet.
*/
@@ -30,6 +37,19 @@ struct rmnet_port {
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
struct net_device *rmnet_dev;
+
+ /* Egress aggregation information */
+ struct rmnet_egress_agg_params egress_agg_params;
+ /* Protect aggregation related elements */
+ spinlock_t agg_lock;
+ struct sk_buff *skbagg_head;
+ struct sk_buff *skbagg_tail;
+ int agg_state;
+ u8 agg_count;
+ struct timespec64 agg_time;
+ struct timespec64 agg_last;
+ struct hrtimer hrtimer;
+ struct work_struct agg_wq;
};
extern struct rtnl_link_ops rmnet_link_ops;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index a313242a762e..9f3479500f85 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -164,8 +164,18 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
map_header->mux_id = mux_id;
- skb->protocol = htons(ETH_P_MAP);
+ if (READ_ONCE(port->egress_agg_params.count) > 1) {
+ unsigned int len;
+
+ len = rmnet_map_tx_aggregate(skb, port, orig_dev);
+ if (likely(len)) {
+ rmnet_vnd_tx_fixup_len(len, orig_dev);
+ return -EINPROGRESS;
+ }
+ return -ENOMEM;
+ }
+ skb->protocol = htons(ETH_P_MAP);
return 0;
}
@@ -235,6 +245,7 @@ void rmnet_egress_handler(struct sk_buff *skb)
struct rmnet_port *port;
struct rmnet_priv *priv;
u8 mux_id;
+ int err;
sk_pacing_shift_update(skb->sk, 8);
@@ -247,8 +258,11 @@ void rmnet_egress_handler(struct sk_buff *skb)
if (!port)
goto drop;
- if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
+ err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
+ if (err == -ENOMEM)
goto drop;
+ else if (err == -EINPROGRESS)
+ return;
rmnet_vnd_tx_fixup(skb, orig_dev);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 2b033060fc20..b70284095568 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -53,5 +53,11 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev,
int csum_type);
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len);
+unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
+ struct net_device *orig_dev);
+void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
+void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
+void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
+ u32 count, u32 time);
#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index ba194698cc14..a5e3d1a88305 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -12,6 +12,7 @@
#include "rmnet_config.h"
#include "rmnet_map.h"
#include "rmnet_private.h"
+#include "rmnet_vnd.h"
#define RMNET_MAP_DEAGGR_SPACING 64
#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
@@ -518,3 +519,193 @@ int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
return 0;
}
+
+#define RMNET_AGG_BYPASS_TIME_NSEC 10000000L
+
+static void reset_aggr_params(struct rmnet_port *port)
+{
+ port->skbagg_head = NULL;
+ port->agg_count = 0;
+ port->agg_state = 0;
+ memset(&port->agg_time, 0, sizeof(struct timespec64));
+}
+
+static void rmnet_send_skb(struct rmnet_port *port, struct sk_buff *skb)
+{
+ if (skb_needs_linearize(skb, port->dev->features)) {
+ if (unlikely(__skb_linearize(skb))) {
+ struct rmnet_priv *priv;
+
+ priv = netdev_priv(port->rmnet_dev);
+ this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
+
+ dev_queue_xmit(skb);
+}
+
+static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ struct rmnet_port *port;
+
+ port = container_of(work, struct rmnet_port, agg_wq);
+
+ spin_lock_bh(&port->agg_lock);
+ if (likely(port->agg_state == -EINPROGRESS)) {
+ /* Buffer may have already been shipped out */
+ if (likely(port->skbagg_head)) {
+ skb = port->skbagg_head;
+ reset_aggr_params(port);
+ }
+ port->agg_state = 0;
+ }
+
+ spin_unlock_bh(&port->agg_lock);
+ if (skb)
+ rmnet_send_skb(port, skb);
+}
+
+static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
+{
+ struct rmnet_port *port;
+
+ port = container_of(t, struct rmnet_port, hrtimer);
+
+ schedule_work(&port->agg_wq);
+
+ return HRTIMER_NORESTART;
+}
+
+unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
+ struct net_device *orig_dev)
+{
+ struct timespec64 diff, last;
+ unsigned int len = skb->len;
+ struct sk_buff *agg_skb;
+ int size;
+
+ spin_lock_bh(&port->agg_lock);
+ memcpy(&last, &port->agg_last, sizeof(struct timespec64));
+ ktime_get_real_ts64(&port->agg_last);
+
+ if (!port->skbagg_head) {
+ /* Check to see if we should agg first. If the traffic is very
+ * sparse, don't aggregate.
+ */
+new_packet:
+ diff = timespec64_sub(port->agg_last, last);
+ size = port->egress_agg_params.bytes - skb->len;
+
+ if (size < 0) {
+ /* dropped */
+ spin_unlock_bh(&port->agg_lock);
+ return 0;
+ }
+
+ if (diff.tv_sec > 0 || diff.tv_nsec > RMNET_AGG_BYPASS_TIME_NSEC ||
+ size == 0)
+ goto no_aggr;
+
+ port->skbagg_head = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
+ if (!port->skbagg_head)
+ goto no_aggr;
+
+ dev_kfree_skb_any(skb);
+ port->skbagg_head->protocol = htons(ETH_P_MAP);
+ port->agg_count = 1;
+ ktime_get_real_ts64(&port->agg_time);
+ skb_frag_list_init(port->skbagg_head);
+ goto schedule;
+ }
+ diff = timespec64_sub(port->agg_last, port->agg_time);
+ size = port->egress_agg_params.bytes - port->skbagg_head->len;
+
+ if (skb->len > size) {
+ agg_skb = port->skbagg_head;
+ reset_aggr_params(port);
+ spin_unlock_bh(&port->agg_lock);
+ hrtimer_cancel(&port->hrtimer);
+ rmnet_send_skb(port, agg_skb);
+ spin_lock_bh(&port->agg_lock);
+ goto new_packet;
+ }
+
+ if (skb_has_frag_list(port->skbagg_head))
+ port->skbagg_tail->next = skb;
+ else
+ skb_shinfo(port->skbagg_head)->frag_list = skb;
+
+ port->skbagg_head->len += skb->len;
+ port->skbagg_head->data_len += skb->len;
+ port->skbagg_head->truesize += skb->truesize;
+ port->skbagg_tail = skb;
+ port->agg_count++;
+
+ if (diff.tv_sec > 0 || diff.tv_nsec > port->egress_agg_params.time_nsec ||
+ port->agg_count >= port->egress_agg_params.count ||
+ port->skbagg_head->len == port->egress_agg_params.bytes) {
+ agg_skb = port->skbagg_head;
+ reset_aggr_params(port);
+ spin_unlock_bh(&port->agg_lock);
+ hrtimer_cancel(&port->hrtimer);
+ rmnet_send_skb(port, agg_skb);
+ return len;
+ }
+
+schedule:
+ if (!hrtimer_active(&port->hrtimer) && port->agg_state != -EINPROGRESS) {
+ port->agg_state = -EINPROGRESS;
+ hrtimer_start(&port->hrtimer,
+ ns_to_ktime(port->egress_agg_params.time_nsec),
+ HRTIMER_MODE_REL);
+ }
+ spin_unlock_bh(&port->agg_lock);
+
+ return len;
+
+no_aggr:
+ spin_unlock_bh(&port->agg_lock);
+ skb->protocol = htons(ETH_P_MAP);
+ dev_queue_xmit(skb);
+
+ return len;
+}
+
+void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
+ u32 count, u32 time)
+{
+ spin_lock_bh(&port->agg_lock);
+ port->egress_agg_params.bytes = size;
+ WRITE_ONCE(port->egress_agg_params.count, count);
+ port->egress_agg_params.time_nsec = time * NSEC_PER_USEC;
+ spin_unlock_bh(&port->agg_lock);
+}
+
+void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
+{
+ hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+ spin_lock_init(&port->agg_lock);
+ rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
+ INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
+}
+
+void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
+{
+ hrtimer_cancel(&port->hrtimer);
+ cancel_work_sync(&port->agg_wq);
+
+ spin_lock_bh(&port->agg_lock);
+ if (port->agg_state == -EINPROGRESS) {
+ if (port->skbagg_head) {
+ dev_kfree_skb_any(port->skbagg_head);
+ reset_aggr_params(port);
+ }
+
+ port->agg_state = 0;
+ }
+ spin_unlock_bh(&port->agg_lock);
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 3f5e6572d20e..046b5f7d8e7c 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -29,7 +29,7 @@ void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_end(&pcpu_ptr->syncp);
}
-void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
+void rmnet_vnd_tx_fixup_len(unsigned int len, struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_pcpu_stats *pcpu_ptr;
@@ -38,10 +38,15 @@ void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&pcpu_ptr->syncp);
pcpu_ptr->stats.tx_pkts++;
- pcpu_ptr->stats.tx_bytes += skb->len;
+ pcpu_ptr->stats.tx_bytes += len;
u64_stats_update_end(&pcpu_ptr->syncp);
}
+void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+ rmnet_vnd_tx_fixup_len(skb->len, dev);
+}
+
/* Network Device Operations */
static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
@@ -210,7 +215,52 @@ static void rmnet_get_ethtool_stats(struct net_device *dev,
memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
}
+static int rmnet_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_port *port;
+
+ port = rmnet_get_port_rtnl(priv->real_dev);
+
+ memset(kernel_coal, 0, sizeof(*kernel_coal));
+ kernel_coal->tx_aggr_max_bytes = port->egress_agg_params.bytes;
+ kernel_coal->tx_aggr_max_frames = port->egress_agg_params.count;
+ kernel_coal->tx_aggr_time_usecs = div_u64(port->egress_agg_params.time_nsec,
+ NSEC_PER_USEC);
+
+ return 0;
+}
+
+static int rmnet_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_port *port;
+
+ port = rmnet_get_port_rtnl(priv->real_dev);
+
+ if (kernel_coal->tx_aggr_max_frames < 1 || kernel_coal->tx_aggr_max_frames > 64)
+ return -EINVAL;
+
+ if (kernel_coal->tx_aggr_max_bytes > 32768)
+ return -EINVAL;
+
+ rmnet_map_update_ul_agg_config(port, kernel_coal->tx_aggr_max_bytes,
+ kernel_coal->tx_aggr_max_frames,
+ kernel_coal->tx_aggr_time_usecs);
+
+ return 0;
+}
+
static const struct ethtool_ops rmnet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_AGGR,
+ .get_coalesce = rmnet_get_coalesce,
+ .set_coalesce = rmnet_set_coalesce,
.get_ethtool_stats = rmnet_get_ethtool_stats,
.get_strings = rmnet_get_strings,
.get_sset_count = rmnet_get_sset_count,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
index dc3a4443ef0a..c2b2baf86894 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -16,6 +16,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
+void rmnet_vnd_tx_fixup_len(unsigned int len, struct net_device *dev);
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_setup(struct net_device *dev);
int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index dadd61bccfe7..45147a1016be 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -576,6 +576,7 @@ struct rtl8169_tc_offsets {
enum rtl_flag {
RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_RESET_PENDING,
+ RTL_FLAG_TASK_TX_TIMEOUT,
RTL_FLAG_MAX
};
@@ -3928,7 +3929,7 @@ static void rtl8169_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
+ rtl_schedule_task(tp, RTL_FLAG_TASK_TX_TIMEOUT);
}
static int rtl8169_tx_map(struct rtl8169_private *tp, const u32 *opts, u32 len,
@@ -4522,6 +4523,7 @@ static void rtl_task(struct work_struct *work)
{
struct rtl8169_private *tp =
container_of(work, struct rtl8169_private, wk.work);
+ int ret;
rtnl_lock();
@@ -4529,7 +4531,27 @@ static void rtl_task(struct work_struct *work)
!test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
goto out_unlock;
+ if (test_and_clear_bit(RTL_FLAG_TASK_TX_TIMEOUT, tp->wk.flags)) {
+ /* if chip isn't accessible, reset bus to revive it */
+ if (RTL_R32(tp, TxConfig) == ~0) {
+ ret = pci_reset_bus(tp->pci_dev);
+ if (ret < 0) {
+ netdev_err(tp->dev, "Can't reset secondary PCI bus, detach NIC\n");
+ netif_device_detach(tp->dev);
+ goto out_unlock;
+ }
+ }
+
+ /* ASPM compatibility issues are a typical reason for tx timeouts */
+ ret = pci_disable_link_state(tp->pci_dev, PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_L0S);
+ if (!ret)
+ netdev_warn_once(tp->dev, "ASPM disabled on Tx timeout\n");
+ goto reset;
+ }
+
if (test_and_clear_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags)) {
+reset:
rtl_reset_work(tp);
netif_wake_queue(tp->dev);
}
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 2370c7797a0a..853394e5bb8b 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -16,7 +16,6 @@
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
-#include <linux/phylink.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
#include <linux/rtnetlink.h>
@@ -124,13 +123,6 @@ static void rswitch_fwd_init(struct rswitch_private *priv)
iowrite32(GENMASK(RSWITCH_NUM_PORTS - 1, 0), priv->addr + FWPBFC(priv->gwca.index));
}
-/* gPTP timer (gPTP) */
-static void rswitch_get_timestamp(struct rswitch_private *priv,
- struct timespec64 *ts)
-{
- priv->ptp_priv->info.gettime64(&priv->ptp_priv->info, ts);
-}
-
/* Gateway CPU agent block (GWCA) */
static int rswitch_gwca_change_mode(struct rswitch_private *priv,
enum rswitch_gwca_mode mode)
@@ -241,7 +233,7 @@ static int rswitch_get_num_cur_queues(struct rswitch_gwca_queue *gq)
static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
{
- struct rswitch_ext_ts_desc *desc = &gq->ts_ring[gq->dirty];
+ struct rswitch_ext_ts_desc *desc = &gq->rx_ring[gq->dirty];
if ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
return true;
@@ -281,36 +273,43 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
{
int i;
- if (gq->gptp) {
+ if (!gq->dir_tx) {
dma_free_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_ts_desc) *
- (gq->ring_size + 1), gq->ts_ring, gq->ring_dma);
- gq->ts_ring = NULL;
- } else {
- dma_free_coherent(ndev->dev.parent,
- sizeof(struct rswitch_ext_desc) *
- (gq->ring_size + 1), gq->ring, gq->ring_dma);
- gq->ring = NULL;
- }
+ (gq->ring_size + 1), gq->rx_ring, gq->ring_dma);
+ gq->rx_ring = NULL;
- if (!gq->dir_tx) {
for (i = 0; i < gq->ring_size; i++)
dev_kfree_skb(gq->skbs[i]);
+ } else {
+ dma_free_coherent(ndev->dev.parent,
+ sizeof(struct rswitch_ext_desc) *
+ (gq->ring_size + 1), gq->tx_ring, gq->ring_dma);
+ gq->tx_ring = NULL;
}
kfree(gq->skbs);
gq->skbs = NULL;
}
+static void rswitch_gwca_ts_queue_free(struct rswitch_private *priv)
+{
+ struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct rswitch_ts_desc) * (gq->ring_size + 1),
+ gq->ts_ring, gq->ring_dma);
+ gq->ts_ring = NULL;
+}
+
static int rswitch_gwca_queue_alloc(struct net_device *ndev,
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq,
- bool dir_tx, bool gptp, int ring_size)
+ bool dir_tx, int ring_size)
{
int i, bit;
gq->dir_tx = dir_tx;
- gq->gptp = gptp;
gq->ring_size = ring_size;
gq->ndev = ndev;
@@ -318,18 +317,19 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
if (!gq->skbs)
return -ENOMEM;
- if (!dir_tx)
+ if (!dir_tx) {
rswitch_gwca_queue_alloc_skb(gq, 0, gq->ring_size);
- if (gptp)
- gq->ts_ring = dma_alloc_coherent(ndev->dev.parent,
+ gq->rx_ring = dma_alloc_coherent(ndev->dev.parent,
sizeof(struct rswitch_ext_ts_desc) *
(gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
- else
- gq->ring = dma_alloc_coherent(ndev->dev.parent,
- sizeof(struct rswitch_ext_desc) *
- (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
- if (!gq->ts_ring && !gq->ring)
+ } else {
+ gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rswitch_ext_desc) *
+ (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+ }
+
+ if (!gq->rx_ring && !gq->tx_ring)
goto out;
i = gq->index / 32;
@@ -347,6 +347,17 @@ out:
return -ENOMEM;
}
+static int rswitch_gwca_ts_queue_alloc(struct rswitch_private *priv)
+{
+ struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+
+ gq->ring_size = TS_RING_SIZE;
+ gq->ts_ring = dma_alloc_coherent(&priv->pdev->dev,
+ sizeof(struct rswitch_ts_desc) *
+ (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
+ return !gq->ts_ring ? -ENOMEM : 0;
+}
+
static void rswitch_desc_set_dptr(struct rswitch_desc *desc, dma_addr_t addr)
{
desc->dptrl = cpu_to_le32(lower_32_bits(addr));
@@ -362,14 +373,14 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
struct rswitch_private *priv,
struct rswitch_gwca_queue *gq)
{
- int tx_ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
+ int ring_size = sizeof(struct rswitch_ext_desc) * gq->ring_size;
struct rswitch_ext_desc *desc;
struct rswitch_desc *linkfix;
dma_addr_t dma_addr;
int i;
- memset(gq->ring, 0, tx_ring_size);
- for (i = 0, desc = gq->ring; i < gq->ring_size; i++, desc++) {
+ memset(gq->tx_ring, 0, ring_size);
+ for (i = 0, desc = gq->tx_ring; i < gq->ring_size; i++, desc++) {
if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent,
gq->skbs[i]->data, PKT_BUF_SZ,
@@ -387,7 +398,7 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
desc->desc.die_dt = DT_LINKFIX;
- linkfix = &priv->linkfix_table[gq->index];
+ linkfix = &priv->gwca.linkfix_table[gq->index];
linkfix->die_dt = DT_LINKFIX;
rswitch_desc_set_dptr(linkfix, gq->ring_dma);
@@ -398,7 +409,7 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
err:
if (!gq->dir_tx) {
- for (i--, desc = gq->ring; i >= 0; i--, desc++) {
+ for (i--, desc = gq->tx_ring; i >= 0; i--, desc++) {
dma_addr = rswitch_desc_get_dptr(&desc->desc);
dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
DMA_FROM_DEVICE);
@@ -408,9 +419,23 @@ err:
return -ENOMEM;
}
-static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
- struct rswitch_gwca_queue *gq,
- int start_index, int num)
+static void rswitch_gwca_ts_queue_fill(struct rswitch_private *priv,
+ int start_index, int num)
+{
+ struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+ struct rswitch_ts_desc *desc;
+ int i, index;
+
+ for (i = 0; i < num; i++) {
+ index = (i + start_index) % gq->ring_size;
+ desc = &gq->ts_ring[index];
+ desc->desc.die_dt = DT_FEMPTY_ND | DIE;
+ }
+}
+
+static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
+ struct rswitch_gwca_queue *gq,
+ int start_index, int num)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_ext_ts_desc *desc;
@@ -419,7 +444,7 @@ static int rswitch_gwca_queue_ts_fill(struct net_device *ndev,
for (i = 0; i < num; i++) {
index = (i + start_index) % gq->ring_size;
- desc = &gq->ts_ring[index];
+ desc = &gq->rx_ring[index];
if (!gq->dir_tx) {
dma_addr = dma_map_single(ndev->dev.parent,
gq->skbs[index]->data, PKT_BUF_SZ,
@@ -443,7 +468,7 @@ err:
if (!gq->dir_tx) {
for (i--; i >= 0; i--) {
index = (i + start_index) % gq->ring_size;
- desc = &gq->ts_ring[index];
+ desc = &gq->rx_ring[index];
dma_addr = rswitch_desc_get_dptr(&desc->desc);
dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
DMA_FROM_DEVICE);
@@ -453,25 +478,25 @@ err:
return -ENOMEM;
}
-static int rswitch_gwca_queue_ts_format(struct net_device *ndev,
- struct rswitch_private *priv,
- struct rswitch_gwca_queue *gq)
+static int rswitch_gwca_queue_ext_ts_format(struct net_device *ndev,
+ struct rswitch_private *priv,
+ struct rswitch_gwca_queue *gq)
{
- int tx_ts_ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
+ int ring_size = sizeof(struct rswitch_ext_ts_desc) * gq->ring_size;
struct rswitch_ext_ts_desc *desc;
struct rswitch_desc *linkfix;
int err;
- memset(gq->ts_ring, 0, tx_ts_ring_size);
- err = rswitch_gwca_queue_ts_fill(ndev, gq, 0, gq->ring_size);
+ memset(gq->rx_ring, 0, ring_size);
+ err = rswitch_gwca_queue_ext_ts_fill(ndev, gq, 0, gq->ring_size);
if (err < 0)
return err;
- desc = &gq->ts_ring[gq->ring_size]; /* Last */
+ desc = &gq->rx_ring[gq->ring_size]; /* Last */
rswitch_desc_set_dptr(&desc->desc, gq->ring_dma);
desc->desc.die_dt = DT_LINKFIX;
- linkfix = &priv->linkfix_table[gq->index];
+ linkfix = &priv->gwca.linkfix_table[gq->index];
linkfix->die_dt = DT_LINKFIX;
rswitch_desc_set_dptr(linkfix, gq->ring_dma);
@@ -481,28 +506,31 @@ static int rswitch_gwca_queue_ts_format(struct net_device *ndev,
return 0;
}
-static int rswitch_gwca_desc_alloc(struct rswitch_private *priv)
+static int rswitch_gwca_linkfix_alloc(struct rswitch_private *priv)
{
int i, num_queues = priv->gwca.num_queues;
+ struct rswitch_gwca *gwca = &priv->gwca;
struct device *dev = &priv->pdev->dev;
- priv->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
- priv->linkfix_table = dma_alloc_coherent(dev, priv->linkfix_table_size,
- &priv->linkfix_table_dma, GFP_KERNEL);
- if (!priv->linkfix_table)
+ gwca->linkfix_table_size = sizeof(struct rswitch_desc) * num_queues;
+ gwca->linkfix_table = dma_alloc_coherent(dev, gwca->linkfix_table_size,
+ &gwca->linkfix_table_dma, GFP_KERNEL);
+ if (!gwca->linkfix_table)
return -ENOMEM;
for (i = 0; i < num_queues; i++)
- priv->linkfix_table[i].die_dt = DT_EOS;
+ gwca->linkfix_table[i].die_dt = DT_EOS;
return 0;
}
-static void rswitch_gwca_desc_free(struct rswitch_private *priv)
+static void rswitch_gwca_linkfix_free(struct rswitch_private *priv)
{
- if (priv->linkfix_table)
- dma_free_coherent(&priv->pdev->dev, priv->linkfix_table_size,
- priv->linkfix_table, priv->linkfix_table_dma);
- priv->linkfix_table = NULL;
+ struct rswitch_gwca *gwca = &priv->gwca;
+
+ if (gwca->linkfix_table)
+ dma_free_coherent(&priv->pdev->dev, gwca->linkfix_table_size,
+ gwca->linkfix_table, gwca->linkfix_table_dma);
+ gwca->linkfix_table = NULL;
}
static struct rswitch_gwca_queue *rswitch_gwca_get(struct rswitch_private *priv)
@@ -537,8 +565,7 @@ static int rswitch_txdmac_alloc(struct net_device *ndev)
if (!rdev->tx_queue)
return -EBUSY;
- err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, false,
- TX_RING_SIZE);
+ err = rswitch_gwca_queue_alloc(ndev, priv, rdev->tx_queue, true, TX_RING_SIZE);
if (err < 0) {
rswitch_gwca_put(priv, rdev->tx_queue);
return err;
@@ -572,8 +599,7 @@ static int rswitch_rxdmac_alloc(struct net_device *ndev)
if (!rdev->rx_queue)
return -EBUSY;
- err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, true,
- RX_RING_SIZE);
+ err = rswitch_gwca_queue_alloc(ndev, priv, rdev->rx_queue, false, RX_RING_SIZE);
if (err < 0) {
rswitch_gwca_put(priv, rdev->rx_queue);
return err;
@@ -595,7 +621,7 @@ static int rswitch_rxdmac_init(struct rswitch_private *priv, int index)
struct rswitch_device *rdev = priv->rdev[index];
struct net_device *ndev = rdev->ndev;
- return rswitch_gwca_queue_ts_format(ndev, priv, rdev->rx_queue);
+ return rswitch_gwca_queue_ext_ts_format(ndev, priv, rdev->rx_queue);
}
static int rswitch_gwca_hw_init(struct rswitch_private *priv)
@@ -618,8 +644,11 @@ static int rswitch_gwca_hw_init(struct rswitch_private *priv)
iowrite32(GWVCC_VEM_SC_TAG, priv->addr + GWVCC);
iowrite32(0, priv->addr + GWTTFC);
- iowrite32(lower_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC1);
- iowrite32(upper_32_bits(priv->linkfix_table_dma), priv->addr + GWDCBAC0);
+ iowrite32(lower_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC1);
+ iowrite32(upper_32_bits(priv->gwca.linkfix_table_dma), priv->addr + GWDCBAC0);
+ iowrite32(lower_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC10);
+ iowrite32(upper_32_bits(priv->gwca.ts_queue.ring_dma), priv->addr + GWTDCAC00);
+ iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDCC0);
rswitch_gwca_set_rate_limit(priv, priv->gwca.speed);
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
@@ -676,7 +705,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
boguscnt = min_t(int, gq->ring_size, *quota);
limit = boguscnt;
- desc = &gq->ts_ring[gq->cur];
+ desc = &gq->rx_ring[gq->cur];
while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) {
if (--boguscnt < 0)
break;
@@ -704,14 +733,14 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
rdev->ndev->stats.rx_bytes += pkt_len;
gq->cur = rswitch_next_queue_index(gq, true, 1);
- desc = &gq->ts_ring[gq->cur];
+ desc = &gq->rx_ring[gq->cur];
}
num = rswitch_get_num_cur_queues(gq);
ret = rswitch_gwca_queue_alloc_skb(gq, gq->dirty, num);
if (ret < 0)
goto err;
- ret = rswitch_gwca_queue_ts_fill(ndev, gq, gq->dirty, num);
+ ret = rswitch_gwca_queue_ext_ts_fill(ndev, gq, gq->dirty, num);
if (ret < 0)
goto err;
gq->dirty = rswitch_next_queue_index(gq, false, num);
@@ -738,7 +767,7 @@ static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
for (; rswitch_get_num_cur_queues(gq) > 0;
gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
- desc = &gq->ring[gq->dirty];
+ desc = &gq->tx_ring[gq->dirty];
if (free_txed_only && (desc->desc.die_dt & DT_MASK) != DT_FEMPTY)
break;
@@ -746,15 +775,6 @@ static int rswitch_tx_free(struct net_device *ndev, bool free_txed_only)
size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
skb = gq->skbs[gq->dirty];
if (skb) {
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
- struct skb_shared_hwtstamps shhwtstamps;
- struct timespec64 ts;
-
- rswitch_get_timestamp(rdev->priv, &ts);
- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
- shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
- skb_tstamp_tx(skb, &shhwtstamps);
- }
dma_addr = rswitch_desc_get_dptr(&desc->desc);
dma_unmap_single(ndev->dev.parent, dma_addr,
size, DMA_TO_DEVICE);
@@ -880,6 +900,73 @@ static int rswitch_gwca_request_irqs(struct rswitch_private *priv)
return 0;
}
+static void rswitch_ts(struct rswitch_private *priv)
+{
+ struct rswitch_gwca_queue *gq = &priv->gwca.ts_queue;
+ struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct rswitch_ts_desc *desc;
+ struct timespec64 ts;
+ u32 tag, port;
+ int num;
+
+ desc = &gq->ts_ring[gq->cur];
+ while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY_ND) {
+ dma_rmb();
+
+ port = TS_DESC_DPN(__le32_to_cpu(desc->desc.dptrl));
+ tag = TS_DESC_TSUN(__le32_to_cpu(desc->desc.dptrl));
+
+ list_for_each_entry_safe(ts_info, ts_info2, &priv->gwca.ts_info_list, list) {
+ if (!(ts_info->port == port && ts_info->tag == tag))
+ continue;
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ ts.tv_sec = __le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = __le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(ts_info->skb, &shhwtstamps);
+ dev_consume_skb_irq(ts_info->skb);
+ list_del(&ts_info->list);
+ kfree(ts_info);
+ break;
+ }
+
+ gq->cur = rswitch_next_queue_index(gq, true, 1);
+ desc = &gq->ts_ring[gq->cur];
+ }
+
+ num = rswitch_get_num_cur_queues(gq);
+ rswitch_gwca_ts_queue_fill(priv, gq->dirty, num);
+ gq->dirty = rswitch_next_queue_index(gq, false, num);
+}
+
+static irqreturn_t rswitch_gwca_ts_irq(int irq, void *dev_id)
+{
+ struct rswitch_private *priv = dev_id;
+
+ if (ioread32(priv->addr + GWTSDIS) & GWCA_TS_IRQ_BIT) {
+ iowrite32(GWCA_TS_IRQ_BIT, priv->addr + GWTSDIS);
+ rswitch_ts(priv);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int rswitch_gwca_ts_request_irqs(struct rswitch_private *priv)
+{
+ int irq;
+
+ irq = platform_get_irq_byname(priv->pdev, GWCA_TS_IRQ_RESOURCE_NAME);
+ if (irq < 0)
+ return irq;
+
+ return devm_request_irq(&priv->pdev->dev, irq, rswitch_gwca_ts_irq,
+ 0, GWCA_TS_IRQ_NAME, priv);
+}
+
/* Ethernet TSN Agent block (ETHA) and Ethernet MAC IP block (RMAC) */
static int rswitch_etha_change_mode(struct rswitch_etha *etha,
enum rswitch_etha_mode mode)
@@ -1024,34 +1111,18 @@ static int rswitch_etha_set_access(struct rswitch_etha *etha, bool read,
return ret;
}
-static int rswitch_etha_mii_read(struct mii_bus *bus, int addr, int regnum)
+static int rswitch_etha_mii_read_c45(struct mii_bus *bus, int addr, int devad,
+ int regad)
{
struct rswitch_etha *etha = bus->priv;
- int mode, devad, regad;
-
- mode = regnum & MII_ADDR_C45;
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- regad = regnum & MII_REGADDR_C45_MASK;
-
- /* Not support Clause 22 access method */
- if (!mode)
- return -EOPNOTSUPP;
return rswitch_etha_set_access(etha, true, addr, devad, regad, 0);
}
-static int rswitch_etha_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+static int rswitch_etha_mii_write_c45(struct mii_bus *bus, int addr, int devad,
+ int regad, u16 val)
{
struct rswitch_etha *etha = bus->priv;
- int mode, devad, regad;
-
- mode = regnum & MII_ADDR_C45;
- devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
- regad = regnum & MII_REGADDR_C45_MASK;
-
- /* Not support Clause 22 access method */
- if (!mode)
- return -EOPNOTSUPP;
return rswitch_etha_set_access(etha, false, addr, devad, regad, val);
}
@@ -1087,33 +1158,25 @@ out:
return port;
}
-/* Call of_node_put(mdio) after done */
-static struct device_node *rswitch_get_mdio_node(struct rswitch_device *rdev)
-{
- struct device_node *port, *mdio;
-
- port = rswitch_get_port_node(rdev);
- if (!port)
- return NULL;
-
- mdio = of_get_child_by_name(port, "mdio");
- of_node_put(port);
-
- return mdio;
-}
-
static int rswitch_etha_get_params(struct rswitch_device *rdev)
{
- struct device_node *port;
+ u32 max_speed;
int err;
- port = rswitch_get_port_node(rdev);
- if (!port)
+ if (!rdev->np_port)
return 0; /* ignored */
- err = of_get_phy_mode(port, &rdev->etha->phy_interface);
- of_node_put(port);
+ err = of_get_phy_mode(rdev->np_port, &rdev->etha->phy_interface);
+ if (err)
+ return err;
+ err = of_property_read_u32(rdev->np_port, "max-speed", &max_speed);
+ if (!err) {
+ rdev->etha->speed = max_speed;
+ return 0;
+ }
+
+ /* if no "max-speed" property, let's use default speed */
switch (rdev->etha->phy_interface) {
case PHY_INTERFACE_MODE_MII:
rdev->etha->speed = SPEED_100;
@@ -1125,11 +1188,10 @@ static int rswitch_etha_get_params(struct rswitch_device *rdev)
rdev->etha->speed = SPEED_2500;
break;
default:
- err = -EINVAL;
- break;
+ return -EINVAL;
}
- return err;
+ return 0;
}
static int rswitch_mii_register(struct rswitch_device *rdev)
@@ -1145,11 +1207,11 @@ static int rswitch_mii_register(struct rswitch_device *rdev)
mii_bus->name = "rswitch_mii";
sprintf(mii_bus->id, "etha%d", rdev->etha->index);
mii_bus->priv = rdev->etha;
- mii_bus->read = rswitch_etha_mii_read;
- mii_bus->write = rswitch_etha_mii_write;
+ mii_bus->read_c45 = rswitch_etha_mii_read_c45;
+ mii_bus->write_c45 = rswitch_etha_mii_write_c45;
mii_bus->parent = &rdev->priv->pdev->dev;
- mdio_np = rswitch_get_mdio_node(rdev);
+ mdio_np = of_get_child_by_name(rdev->np_port, "mdio");
err = of_mdiobus_register(mii_bus, mdio_np);
if (err < 0) {
mdiobus_free(mii_bus);
@@ -1173,114 +1235,107 @@ static void rswitch_mii_unregister(struct rswitch_device *rdev)
}
}
-static void rswitch_mac_config(struct phylink_config *config,
- unsigned int mode,
- const struct phylink_link_state *state)
+static void rswitch_adjust_link(struct net_device *ndev)
{
-}
+ struct rswitch_device *rdev = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
-static void rswitch_mac_link_down(struct phylink_config *config,
- unsigned int mode,
- phy_interface_t interface)
-{
+ /* Current hardware has a restriction not to change speed at runtime */
+ if (phydev->link != rdev->etha->link) {
+ phy_print_status(phydev);
+ if (phydev->link)
+ phy_power_on(rdev->serdes);
+ else
+ phy_power_off(rdev->serdes);
+
+ rdev->etha->link = phydev->link;
+ }
}
-static void rswitch_mac_link_up(struct phylink_config *config,
- struct phy_device *phydev, unsigned int mode,
- phy_interface_t interface, int speed,
- int duplex, bool tx_pause, bool rx_pause)
+static void rswitch_phy_remove_link_mode(struct rswitch_device *rdev,
+ struct phy_device *phydev)
{
- /* Current hardware cannot change speed at runtime */
-}
+ /* Current hardware has a restriction not to change speed at runtime */
+ switch (rdev->etha->speed) {
+ case SPEED_2500:
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ break;
+ case SPEED_1000:
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Full_BIT);
+ break;
+ case SPEED_100:
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_2500baseX_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ break;
+ default:
+ break;
+ }
-static const struct phylink_mac_ops rswitch_phylink_ops = {
- .mac_config = rswitch_mac_config,
- .mac_link_down = rswitch_mac_link_down,
- .mac_link_up = rswitch_mac_link_up,
-};
+ phy_set_max_speed(phydev, rdev->etha->speed);
+}
-static int rswitch_phylink_init(struct rswitch_device *rdev)
+static int rswitch_phy_device_init(struct rswitch_device *rdev)
{
- struct device_node *port;
- struct phylink *phylink;
- int err;
+ struct phy_device *phydev;
+ struct device_node *phy;
+ int err = -ENOENT;
- port = rswitch_get_port_node(rdev);
- if (!port)
+ if (!rdev->np_port)
return -ENODEV;
- rdev->phylink_config.dev = &rdev->ndev->dev;
- rdev->phylink_config.type = PHYLINK_NETDEV;
- __set_bit(PHY_INTERFACE_MODE_SGMII, rdev->phylink_config.supported_interfaces);
- __set_bit(PHY_INTERFACE_MODE_USXGMII, rdev->phylink_config.supported_interfaces);
- rdev->phylink_config.mac_capabilities = MAC_100FD | MAC_1000FD | MAC_2500FD;
+ phy = of_parse_phandle(rdev->np_port, "phy-handle", 0);
+ if (!phy)
+ return -ENODEV;
- phylink = phylink_create(&rdev->phylink_config, &port->fwnode,
- rdev->etha->phy_interface, &rswitch_phylink_ops);
- if (IS_ERR(phylink)) {
- err = PTR_ERR(phylink);
+ /* Set phydev->host_interfaces before calling of_phy_connect() to
+ * configure the PHY with the information of host_interfaces.
+ */
+ phydev = of_phy_find_device(phy);
+ if (!phydev)
goto out;
- }
+ __set_bit(rdev->etha->phy_interface, phydev->host_interfaces);
- rdev->phylink = phylink;
- err = phylink_of_phy_connect(rdev->phylink, port, rdev->etha->phy_interface);
+ phydev = of_phy_connect(rdev->ndev, phy, rswitch_adjust_link, 0,
+ rdev->etha->phy_interface);
+ if (!phydev)
+ goto out;
+
+ phy_set_max_speed(phydev, SPEED_2500);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ rswitch_phy_remove_link_mode(rdev, phydev);
+
+ phy_attached_info(phydev);
+
+ err = 0;
out:
- of_node_put(port);
+ of_node_put(phy);
return err;
}
-static void rswitch_phylink_deinit(struct rswitch_device *rdev)
+static void rswitch_phy_device_deinit(struct rswitch_device *rdev)
{
- rtnl_lock();
- phylink_disconnect_phy(rdev->phylink);
- rtnl_unlock();
- phylink_destroy(rdev->phylink);
+ if (rdev->ndev->phydev) {
+ phy_disconnect(rdev->ndev->phydev);
+ rdev->ndev->phydev = NULL;
+ }
}
static int rswitch_serdes_set_params(struct rswitch_device *rdev)
{
- struct device_node *port = rswitch_get_port_node(rdev);
- struct phy *serdes;
int err;
- serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
- of_node_put(port);
- if (IS_ERR(serdes))
- return PTR_ERR(serdes);
-
- err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET,
+ err = phy_set_mode_ext(rdev->serdes, PHY_MODE_ETHERNET,
rdev->etha->phy_interface);
if (err < 0)
return err;
- return phy_set_speed(serdes, rdev->etha->speed);
-}
-
-static int rswitch_serdes_init(struct rswitch_device *rdev)
-{
- struct device_node *port = rswitch_get_port_node(rdev);
- struct phy *serdes;
-
- serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
- of_node_put(port);
- if (IS_ERR(serdes))
- return PTR_ERR(serdes);
-
- return phy_init(serdes);
-}
-
-static int rswitch_serdes_deinit(struct rswitch_device *rdev)
-{
- struct device_node *port = rswitch_get_port_node(rdev);
- struct phy *serdes;
-
- serdes = devm_of_phy_get(&rdev->priv->pdev->dev, port, NULL);
- of_node_put(port);
- if (IS_ERR(serdes))
- return PTR_ERR(serdes);
-
- return phy_exit(serdes);
+ return phy_set_speed(rdev->serdes, rdev->etha->speed);
}
static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
@@ -1298,9 +1353,15 @@ static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
if (err < 0)
return err;
- err = rswitch_phylink_init(rdev);
+ err = rswitch_phy_device_init(rdev);
if (err < 0)
- goto err_phylink_init;
+ goto err_phy_device_init;
+
+ rdev->serdes = devm_of_phy_get(&rdev->priv->pdev->dev, rdev->np_port, NULL);
+ if (IS_ERR(rdev->serdes)) {
+ err = PTR_ERR(rdev->serdes);
+ goto err_serdes_phy_get;
+ }
err = rswitch_serdes_set_params(rdev);
if (err < 0)
@@ -1309,9 +1370,10 @@ static int rswitch_ether_port_init_one(struct rswitch_device *rdev)
return 0;
err_serdes_set_params:
- rswitch_phylink_deinit(rdev);
+err_serdes_phy_get:
+ rswitch_phy_device_deinit(rdev);
-err_phylink_init:
+err_phy_device_init:
rswitch_mii_unregister(rdev);
return err;
@@ -1319,7 +1381,7 @@ err_phylink_init:
static void rswitch_ether_port_deinit_one(struct rswitch_device *rdev)
{
- rswitch_phylink_deinit(rdev);
+ rswitch_phy_device_deinit(rdev);
rswitch_mii_unregister(rdev);
}
@@ -1334,7 +1396,7 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
}
rswitch_for_each_enabled_port(priv, i) {
- err = rswitch_serdes_init(priv->rdev[i]);
+ err = phy_init(priv->rdev[i]->serdes);
if (err)
goto err_serdes;
}
@@ -1343,7 +1405,7 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv)
err_serdes:
rswitch_for_each_enabled_port_continue_reverse(priv, i)
- rswitch_serdes_deinit(priv->rdev[i]);
+ phy_exit(priv->rdev[i]->serdes);
i = RSWITCH_NUM_PORTS;
err_init_one:
@@ -1358,7 +1420,7 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
int i;
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
- rswitch_serdes_deinit(priv->rdev[i]);
+ phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(priv->rdev[i]);
}
}
@@ -1367,7 +1429,7 @@ static int rswitch_open(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
- phylink_start(rdev->phylink);
+ phy_start(ndev->phydev);
napi_enable(&rdev->napi);
netif_start_queue(ndev);
@@ -1375,19 +1437,32 @@ static int rswitch_open(struct net_device *ndev)
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
+
return 0;
};
static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
+ struct rswitch_gwca_ts_info *ts_info, *ts_info2;
netif_tx_stop_all_queues(ndev);
+ iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID);
+
+ list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) {
+ if (ts_info->port != rdev->port)
+ continue;
+ dev_kfree_skb_irq(ts_info->skb);
+ list_del(&ts_info->list);
+ kfree(ts_info);
+ }
+
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
- phylink_stop(rdev->phylink);
+ phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
return 0;
@@ -1416,17 +1491,31 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
}
gq->skbs[gq->cur] = skb;
- desc = &gq->ring[gq->cur];
+ desc = &gq->tx_ring[gq->cur];
rswitch_desc_set_dptr(&desc->desc, dma_addr);
desc->desc.info_ds = cpu_to_le16(skb->len);
desc->info1 = cpu_to_le64(INFO1_DV(BIT(rdev->etha->index)) | INFO1_FMT);
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct rswitch_gwca_ts_info *ts_info;
+
+ ts_info = kzalloc(sizeof(*ts_info), GFP_ATOMIC);
+ if (!ts_info) {
+ dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
rdev->ts_tag++;
desc->info1 |= cpu_to_le64(INFO1_TSUN(rdev->ts_tag) | INFO1_TXC);
+
+ ts_info->skb = skb_get(skb);
+ ts_info->port = rdev->port;
+ ts_info->tag = rdev->ts_tag;
+ list_add_tail(&ts_info->list, &rdev->priv->gwca.ts_info_list);
+
+ skb_tx_timestamp(skb);
}
- skb_tx_timestamp(skb);
dma_wmb();
@@ -1515,8 +1604,6 @@ static int rswitch_hwstamp_set(struct net_device *ndev, struct ifreq *req)
static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
- struct rswitch_device *rdev = netdev_priv(ndev);
-
if (!netif_running(ndev))
return -EINVAL;
@@ -1526,7 +1613,7 @@ static int rswitch_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd
case SIOCSHWTSTAMP:
return rswitch_hwstamp_set(ndev, req);
default:
- return phylink_mii_ioctl(rdev->phylink, req, cmd);
+ return phy_mii_ioctl(ndev->phydev, req, cmd);
}
}
@@ -1581,7 +1668,6 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
{
struct platform_device *pdev = priv->pdev;
struct rswitch_device *rdev;
- struct device_node *port;
struct net_device *ndev;
int err;
@@ -1610,10 +1696,10 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
netif_napi_add(ndev, &rdev->napi, rswitch_poll);
- port = rswitch_get_port_node(rdev);
- rdev->disabled = !port;
- err = of_get_ethdev_address(port, ndev);
- of_node_put(port);
+ rdev->np_port = rswitch_get_port_node(rdev);
+ rdev->disabled = !rdev->np_port;
+ err = of_get_ethdev_address(rdev->np_port, ndev);
+ of_node_put(rdev->np_port);
if (err) {
if (is_valid_ether_addr(rdev->etha->mac_addr))
eth_hw_addr_set(ndev, rdev->etha->mac_addr);
@@ -1679,10 +1765,17 @@ static int rswitch_init(struct rswitch_private *priv)
if (err < 0)
return err;
- err = rswitch_gwca_desc_alloc(priv);
+ err = rswitch_gwca_linkfix_alloc(priv);
if (err < 0)
return -ENOMEM;
+ err = rswitch_gwca_ts_queue_alloc(priv);
+ if (err < 0)
+ goto err_ts_queue_alloc;
+
+ rswitch_gwca_ts_queue_fill(priv, 0, TS_RING_SIZE);
+ INIT_LIST_HEAD(&priv->gwca.ts_info_list);
+
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
err = rswitch_device_alloc(priv, i);
if (err < 0) {
@@ -1703,6 +1796,10 @@ static int rswitch_init(struct rswitch_private *priv)
if (err < 0)
goto err_gwca_request_irq;
+ err = rswitch_gwca_ts_request_irqs(priv);
+ if (err < 0)
+ goto err_gwca_ts_request_irq;
+
err = rswitch_gwca_hw_init(priv);
if (err < 0)
goto err_gwca_hw_init;
@@ -1733,6 +1830,7 @@ err_ether_port_init_all:
rswitch_gwca_hw_deinit(priv);
err_gwca_hw_init:
+err_gwca_ts_request_irq:
err_gwca_request_irq:
rcar_gen4_ptp_unregister(priv->ptp_priv);
@@ -1741,7 +1839,10 @@ err_ptp_register:
rswitch_device_free(priv, i);
err_device_alloc:
- rswitch_gwca_desc_free(priv);
+ rswitch_gwca_ts_queue_free(priv);
+
+err_ts_queue_alloc:
+ rswitch_gwca_linkfix_free(priv);
return err;
}
@@ -1814,13 +1915,14 @@ static void rswitch_deinit(struct rswitch_private *priv)
for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
struct rswitch_device *rdev = priv->rdev[i];
- rswitch_serdes_deinit(rdev);
+ phy_exit(priv->rdev[i]->serdes);
rswitch_ether_port_deinit_one(rdev);
unregister_netdev(rdev->ndev);
rswitch_device_free(priv, i);
}
- rswitch_gwca_desc_free(priv);
+ rswitch_gwca_ts_queue_free(priv);
+ rswitch_gwca_linkfix_free(priv);
rswitch_clock_disable(priv);
}
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 49efb0f31c77..27d3d38c055f 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -27,6 +27,7 @@
#define TX_RING_SIZE 1024
#define RX_RING_SIZE 1024
+#define TS_RING_SIZE (TX_RING_SIZE * RSWITCH_NUM_PORTS)
#define PKT_BUF_SZ 1584
#define RSWITCH_ALIGN 128
@@ -49,6 +50,10 @@
#define AGENT_INDEX_GWCA 3
#define GWRO RSWITCH_GWCA0_OFFSET
+#define GWCA_TS_IRQ_RESOURCE_NAME "gwca0_rxts0"
+#define GWCA_TS_IRQ_NAME "rswitch: gwca0_rxts0"
+#define GWCA_TS_IRQ_BIT BIT(0)
+
#define FWRO 0
#define TPRO RSWITCH_TOP_OFFSET
#define CARO RSWITCH_COMA_OFFSET
@@ -831,7 +836,7 @@ enum DIE_DT {
DT_FSINGLE = 0x80,
DT_FSTART = 0x90,
DT_FMID = 0xa0,
- DT_FEND = 0xb8,
+ DT_FEND = 0xb0,
/* Chain control */
DT_LEMPTY = 0xc0,
@@ -843,7 +848,7 @@ enum DIE_DT {
DT_FEMPTY = 0x40,
DT_FEMPTY_IS = 0x10,
DT_FEMPTY_IC = 0x20,
- DT_FEMPTY_ND = 0x38,
+ DT_FEMPTY_ND = 0x30,
DT_FEMPTY_START = 0x50,
DT_FEMPTY_MID = 0x60,
DT_FEMPTY_END = 0x70,
@@ -865,6 +870,12 @@ enum DIE_DT {
/* For reception */
#define INFO1_SPN(port) ((u64)(port) << 36ULL)
+/* For timestamp descriptor in dptrl (Byte 4 to 7) */
+#define TS_DESC_TSUN(dptrl) ((dptrl) & GENMASK(7, 0))
+#define TS_DESC_SPN(dptrl) (((dptrl) & GENMASK(10, 8)) >> 8)
+#define TS_DESC_DPN(dptrl) (((dptrl) & GENMASK(17, 16)) >> 16)
+#define TS_DESC_TN(dptrl) ((dptrl) & BIT(24))
+
struct rswitch_desc {
__le16 info_ds; /* Descriptor size */
u8 die_dt; /* Descriptor interrupt enable and type */
@@ -911,27 +922,43 @@ struct rswitch_etha {
* name, this driver calls "queue".
*/
struct rswitch_gwca_queue {
- int index;
- bool dir_tx;
- bool gptp;
union {
- struct rswitch_ext_desc *ring;
- struct rswitch_ext_ts_desc *ts_ring;
+ struct rswitch_ext_desc *tx_ring;
+ struct rswitch_ext_ts_desc *rx_ring;
+ struct rswitch_ts_desc *ts_ring;
};
+
+ /* Common */
dma_addr_t ring_dma;
int ring_size;
int cur;
int dirty;
- struct sk_buff **skbs;
+ /* For [rt]_ring */
+ int index;
+ bool dir_tx;
+ struct sk_buff **skbs;
struct net_device *ndev; /* queue to ndev for irq */
};
+struct rswitch_gwca_ts_info {
+ struct sk_buff *skb;
+ struct list_head list;
+
+ int port;
+ u8 tag;
+};
+
#define RSWITCH_NUM_IRQ_REGS (RSWITCH_MAX_NUM_QUEUES / BITS_PER_TYPE(u32))
struct rswitch_gwca {
int index;
+ struct rswitch_desc *linkfix_table;
+ dma_addr_t linkfix_table_dma;
+ u32 linkfix_table_size;
struct rswitch_gwca_queue *queues;
int num_queues;
+ struct rswitch_gwca_queue ts_queue;
+ struct list_head ts_info_list;
DECLARE_BITMAP(used, RSWITCH_MAX_NUM_QUEUES);
u32 tx_irq_bits[RSWITCH_NUM_IRQ_REGS];
u32 rx_irq_bits[RSWITCH_NUM_IRQ_REGS];
@@ -943,8 +970,6 @@ struct rswitch_device {
struct rswitch_private *priv;
struct net_device *ndev;
struct napi_struct napi;
- struct phylink *phylink;
- struct phylink_config phylink_config;
void __iomem *addr;
struct rswitch_gwca_queue *tx_queue;
struct rswitch_gwca_queue *rx_queue;
@@ -953,6 +978,8 @@ struct rswitch_device {
int port;
struct rswitch_etha *etha;
+ struct device_node *np_port;
+ struct phy *serdes;
};
struct rswitch_mfwd_mac_table_entry {
@@ -969,9 +996,6 @@ struct rswitch_private {
struct platform_device *pdev;
void __iomem *addr;
struct rcar_gen4_ptp_private *ptp_priv;
- struct rswitch_desc *linkfix_table;
- dma_addr_t linkfix_table_dma;
- u32 linkfix_table_size;
struct rswitch_device *rdev[RSWITCH_NUM_PORTS];
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 71a499113308..ed17163d7811 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3044,23 +3044,46 @@ static int sh_mdio_release(struct sh_eth_private *mdp)
return 0;
}
-static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+static int sh_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg)
{
int res;
pm_runtime_get_sync(bus->parent);
- res = mdiobb_read(bus, phy, reg);
+ res = mdiobb_read_c22(bus, phy, reg);
pm_runtime_put(bus->parent);
return res;
}
-static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+static int sh_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val)
{
int res;
pm_runtime_get_sync(bus->parent);
- res = mdiobb_write(bus, phy, reg, val);
+ res = mdiobb_write_c22(bus, phy, reg, val);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
+static int sh_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, int reg)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_read_c45(bus, phy, devad, reg);
+ pm_runtime_put(bus->parent);
+
+ return res;
+}
+
+static int sh_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad,
+ int reg, u16 val)
+{
+ int res;
+
+ pm_runtime_get_sync(bus->parent);
+ res = mdiobb_write_c45(bus, phy, devad, reg, val);
pm_runtime_put(bus->parent);
return res;
@@ -3091,8 +3114,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
return -ENOMEM;
/* Wrap accessors with Runtime PM-aware ops */
- mdp->mii_bus->read = sh_mdiobb_read;
- mdp->mii_bus->write = sh_mdiobb_write;
+ mdp->mii_bus->read = sh_mdiobb_read_c22;
+ mdp->mii_bus->write = sh_mdiobb_write_c22;
+ mdp->mii_bus->read_c45 = sh_mdiobb_read_c45;
+ mdp->mii_bus->write_c45 = sh_mdiobb_write_c45;
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
index fceb6d637235..0227223c06fa 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_mdio.c
@@ -50,12 +50,12 @@ static void sxgbe_mdio_ctrl_data(struct sxgbe_priv_data *sp, u32 cmd,
}
static void sxgbe_mdio_c45(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
- int phyreg, u16 phydata)
+ int devad, int phyreg, u16 phydata)
{
u32 reg;
/* set mdio address register */
- reg = ((phyreg >> 16) & 0x1f) << 21;
+ reg = (devad & 0x1f) << 21;
reg |= (phyaddr << 16) | (phyreg & 0xffff);
writel(reg, sp->ioaddr + sp->hw->mii.addr);
@@ -76,8 +76,8 @@ static void sxgbe_mdio_c22(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
sxgbe_mdio_ctrl_data(sp, cmd, phydata);
}
-static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
- int phyreg, u16 phydata)
+static int sxgbe_mdio_access_c22(struct sxgbe_priv_data *sp, u32 cmd,
+ int phyaddr, int phyreg, u16 phydata)
{
const struct mii_regs *mii = &sp->hw->mii;
int rc;
@@ -86,33 +86,46 @@ static int sxgbe_mdio_access(struct sxgbe_priv_data *sp, u32 cmd, int phyaddr,
if (rc < 0)
return rc;
- if (phyreg & MII_ADDR_C45) {
- sxgbe_mdio_c45(sp, cmd, phyaddr, phyreg, phydata);
- } else {
- /* Ports 0-3 only support C22. */
- if (phyaddr >= 4)
- return -ENODEV;
+ /* Ports 0-3 only support C22. */
+ if (phyaddr >= 4)
+ return -ENODEV;
- sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata);
- }
+ sxgbe_mdio_c22(sp, cmd, phyaddr, phyreg, phydata);
+
+ return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
+}
+
+static int sxgbe_mdio_access_c45(struct sxgbe_priv_data *sp, u32 cmd,
+ int phyaddr, int devad, int phyreg,
+ u16 phydata)
+{
+ const struct mii_regs *mii = &sp->hw->mii;
+ int rc;
+
+ rc = sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
+ if (rc < 0)
+ return rc;
+
+ sxgbe_mdio_c45(sp, cmd, phyaddr, devad, phyreg, phydata);
return sxgbe_mdio_busy_wait(sp->ioaddr, mii->data);
}
/**
- * sxgbe_mdio_read
+ * sxgbe_mdio_read_c22
* @bus: points to the mii_bus structure
* @phyaddr: address of phy port
* @phyreg: address of register with in phy register
- * Description: this function used for C45 and C22 MDIO Read
+ * Description: this function used for C22 MDIO Read
*/
-static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+static int sxgbe_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
{
struct net_device *ndev = bus->priv;
struct sxgbe_priv_data *priv = netdev_priv(ndev);
int rc;
- rc = sxgbe_mdio_access(priv, SXGBE_SMA_READ_CMD, phyaddr, phyreg, 0);
+ rc = sxgbe_mdio_access_c22(priv, SXGBE_SMA_READ_CMD, phyaddr,
+ phyreg, 0);
if (rc < 0)
return rc;
@@ -120,21 +133,63 @@ static int sxgbe_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
}
/**
- * sxgbe_mdio_write
+ * sxgbe_mdio_read_c45
+ * @bus: points to the mii_bus structure
+ * @phyaddr: address of phy port
+ * @devad: device (MMD) address
+ * @phyreg: address of register with in phy register
+ * Description: this function used for C45 MDIO Read
+ */
+static int sxgbe_mdio_read_c45(struct mii_bus *bus, int phyaddr, int devad,
+ int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+ int rc;
+
+ rc = sxgbe_mdio_access_c45(priv, SXGBE_SMA_READ_CMD, phyaddr,
+ devad, phyreg, 0);
+ if (rc < 0)
+ return rc;
+
+ return readl(priv->ioaddr + priv->hw->mii.data) & 0xffff;
+}
+
+/**
+ * sxgbe_mdio_write_c22
+ * @bus: points to the mii_bus structure
+ * @phyaddr: address of phy port
+ * @phyreg: address of phy registers
+ * @phydata: data to be written into phy register
+ * Description: this function is used for C22 MDIO write
+ */
+static int sxgbe_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct sxgbe_priv_data *priv = netdev_priv(ndev);
+
+ return sxgbe_mdio_access_c22(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg,
+ phydata);
+}
+
+/**
+ * sxgbe_mdio_write_c45
* @bus: points to the mii_bus structure
* @phyaddr: address of phy port
* @phyreg: address of phy registers
+ * @devad: device (MMD) address
* @phydata: data to be written into phy register
- * Description: this function is used for C45 and C22 MDIO write
+ * Description: this function is used for C45 MDIO write
*/
-static int sxgbe_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
- u16 phydata)
+static int sxgbe_mdio_write_c45(struct mii_bus *bus, int phyaddr, int devad,
+ int phyreg, u16 phydata)
{
struct net_device *ndev = bus->priv;
struct sxgbe_priv_data *priv = netdev_priv(ndev);
- return sxgbe_mdio_access(priv, SXGBE_SMA_WRITE_CMD, phyaddr, phyreg,
- phydata);
+ return sxgbe_mdio_access_c45(priv, SXGBE_SMA_WRITE_CMD, phyaddr,
+ devad, phyreg, phydata);
}
int sxgbe_mdio_register(struct net_device *ndev)
@@ -161,8 +216,10 @@ int sxgbe_mdio_register(struct net_device *ndev)
/* assign mii bus fields */
mdio_bus->name = "sxgbe";
- mdio_bus->read = &sxgbe_mdio_read;
- mdio_bus->write = &sxgbe_mdio_write;
+ mdio_bus->read = sxgbe_mdio_read_c22;
+ mdio_bus->write = sxgbe_mdio_write_c22;
+ mdio_bus->read_c45 = sxgbe_mdio_read_c45;
+ mdio_bus->write_c45 = sxgbe_mdio_write_c45;
snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
mdio_bus->name, priv->plat->bus_id);
mdio_bus->priv = ndev;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 0950e6b0508f..4af36ba8906b 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -22,6 +22,7 @@ config SFC
depends on PTP_1588_CLOCK_OPTIONAL
select MDIO
select CRC32
+ select NET_DEVLINK
help
This driver supports 10/40-gigabit Ethernet cards based on
the Solarflare SFC9100-family controllers.
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 712a48d00069..55b9c73cd8ef 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -6,7 +6,8 @@ sfc-y += efx.o efx_common.o efx_channels.o nic.o \
mcdi.o mcdi_port.o mcdi_port_common.o \
mcdi_functions.o mcdi_filters.o mcdi_mon.o \
ef100.o ef100_nic.o ef100_netdev.o \
- ef100_ethtool.o ef100_rx.o ef100_tx.o
+ ef100_ethtool.o ef100_rx.o ef100_tx.o \
+ efx_devlink.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o ef100_rep.o \
mae.o tc.o tc_bindings.o tc_counters.o
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index ddcc325ed570..d916877b5a9a 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -24,6 +24,7 @@
#include "rx_common.h"
#include "ef100_sriov.h"
#include "tc_bindings.h"
+#include "efx_devlink.h"
static void ef100_update_name(struct efx_nic *efx)
{
@@ -332,9 +333,11 @@ void ef100_remove_netdev(struct efx_probe_data *probe_data)
efx_ef100_pci_sriov_disable(efx, true);
#endif
+ efx_fini_devlink_lock(efx);
ef100_unregister_netdev(efx);
#ifdef CONFIG_SFC_SRIOV
+ ef100_pf_unset_devlink_port(efx);
efx_fini_tc(efx);
#endif
@@ -345,6 +348,8 @@ void ef100_remove_netdev(struct efx_probe_data *probe_data)
kfree(efx->phy_data);
efx->phy_data = NULL;
+ efx_fini_devlink_and_unlock(efx);
+
free_netdev(efx->net_dev);
efx->net_dev = NULL;
efx->state = STATE_PROBED;
@@ -354,6 +359,7 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
{
struct efx_nic *efx = &probe_data->efx;
struct efx_probe_data **probe_ptr;
+ struct ef100_nic_data *nic_data;
struct net_device *net_dev;
int rc;
@@ -405,6 +411,20 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
/* Don't fail init if RSS setup doesn't work. */
efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
+ nic_data = efx->nic_data;
+ rc = ef100_get_mac_address(efx, net_dev->perm_addr, CLIENT_HANDLE_SELF,
+ efx->type->is_vf);
+ if (rc)
+ return rc;
+ /* Assign MAC address */
+ eth_hw_addr_set(net_dev, net_dev->perm_addr);
+ ether_addr_copy(nic_data->port_id, net_dev->perm_addr);
+
+ /* devlink creation, registration and lock */
+ rc = efx_probe_devlink_and_lock(efx);
+ if (rc)
+ pci_info(efx->pci_dev, "devlink registration failed");
+
rc = ef100_register_netdev(efx);
if (rc)
goto fail;
@@ -413,6 +433,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
rc = ef100_probe_netdev_pf(efx);
if (rc)
goto fail;
+#ifdef CONFIG_SFC_SRIOV
+ ef100_pf_set_devlink_port(efx);
+#endif
}
efx->netdev_notifier.notifier_call = ef100_netdev_event;
@@ -423,6 +446,13 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
goto fail;
}
+ efx_probe_devlink_unlock(efx);
+ return rc;
fail:
+#ifdef CONFIG_SFC_SRIOV
+ /* remove devlink port if does exist */
+ ef100_pf_unset_devlink_port(efx);
+#endif
+ efx_probe_devlink_unlock(efx);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index ad686c671ab8..4dc643b0d2db 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -130,23 +130,34 @@ static void ef100_mcdi_reboot_detected(struct efx_nic *efx)
/* MCDI calls
*/
-static int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address,
+ int client_handle, bool empty_ok)
{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1));
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_LEN);
size_t outlen;
int rc;
BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
+ MCDI_SET_DWORD(inbuf, GET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE,
+ client_handle);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLIENT_MAC_ADDRESSES, inbuf,
+ sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
if (rc)
return rc;
- if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
- return -EIO;
- ether_addr_copy(mac_address,
- MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
+ if (outlen >= MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1)) {
+ ether_addr_copy(mac_address,
+ MCDI_PTR(outbuf, GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS));
+ } else if (empty_ok) {
+ pci_warn(efx->pci_dev,
+ "No MAC address provisioned for client ID %#x.\n",
+ client_handle);
+ eth_zero_addr(mac_address);
+ } else {
+ return -ENOENT;
+ }
return 0;
}
@@ -388,14 +399,14 @@ static int ef100_filter_table_up(struct efx_nic *efx)
* filter insertion will need to take the lock for read.
*/
up_write(&efx->filter_sem);
-#ifdef CONFIG_SFC_SRIOV
- rc = efx_tc_insert_rep_filters(efx);
+ if (IS_ENABLED(CONFIG_SFC_SRIOV))
+ rc = efx_tc_insert_rep_filters(efx);
+
/* Rep filter failure is nonfatal */
if (rc)
netif_warn(efx, drv, efx->net_dev,
"Failed to insert representor filters, rc %d\n",
rc);
-#endif
return 0;
fail_vlan0:
@@ -408,9 +419,8 @@ fail_unspec:
static void ef100_filter_table_down(struct efx_nic *efx)
{
-#ifdef CONFIG_SFC_SRIOV
- efx_tc_remove_rep_filters(efx);
-#endif
+ if (IS_ENABLED(CONFIG_SFC_SRIOV))
+ efx_tc_remove_rep_filters(efx);
down_write(&efx->filter_sem);
efx_mcdi_filter_del_vlan(efx, 0);
efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
@@ -726,7 +736,6 @@ static unsigned int efx_ef100_recycle_ring_size(const struct efx_nic *efx)
return 10 * EFX_RECYCLE_RING_SIZE_10G;
}
-#ifdef CONFIG_SFC_SRIOV
static int efx_ef100_get_base_mport(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
@@ -736,7 +745,7 @@ static int efx_ef100_get_base_mport(struct efx_nic *efx)
/* Construct mport selector for "physical network port" */
efx_mae_mport_wire(efx, &selector);
/* Look up actual mport ID */
- rc = efx_mae_lookup_mport(efx, selector, &id);
+ rc = efx_mae_fw_lookup_mport(efx, selector, &id);
if (rc)
return rc;
/* The ID should always fit in 16 bits, because that's how wide the
@@ -747,9 +756,21 @@ static int efx_ef100_get_base_mport(struct efx_nic *efx)
id);
nic_data->base_mport = id;
nic_data->have_mport = true;
+
+ /* Construct mport selector for "calling PF" */
+ efx_mae_mport_uplink(efx, &selector);
+ /* Look up actual mport ID */
+ rc = efx_mae_fw_lookup_mport(efx, selector, &id);
+ if (rc)
+ return rc;
+ if (id >> 16)
+ netif_warn(efx, probe, efx->net_dev, "Bad own m-port id %#x\n",
+ id);
+ nic_data->own_mport = id;
+ nic_data->have_own_mport = true;
+
return 0;
}
-#endif
static int compare_versions(const char *a, const char *b)
{
@@ -1098,23 +1119,42 @@ fail:
return rc;
}
+/* MCDI commands are related to the same device issuing them. This function
+ * allows to do an MCDI command on behalf of another device, mainly PFs setting
+ * things for VFs.
+ */
+int efx_ef100_lookup_client_id(struct efx_nic *efx, efx_qword_t pciefn, u32 *id)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLIENT_HANDLE_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_CLIENT_HANDLE_IN_LEN);
+ u64 pciefn_flat = le64_to_cpu(pciefn.u64[0]);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, GET_CLIENT_HANDLE_IN_TYPE,
+ MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC);
+ MCDI_SET_QWORD(inbuf, GET_CLIENT_HANDLE_IN_FUNC,
+ pciefn_flat);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLIENT_HANDLE, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < sizeof(outbuf))
+ return -EIO;
+ *id = MCDI_DWORD(outbuf, GET_CLIENT_HANDLE_OUT_HANDLE);
+ return 0;
+}
+
int ef100_probe_netdev_pf(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
struct net_device *net_dev = efx->net_dev;
int rc;
- rc = ef100_get_mac_address(efx, net_dev->perm_addr);
- if (rc)
- goto fail;
- /* Assign MAC address */
- eth_hw_addr_set(net_dev, net_dev->perm_addr);
- memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
-
- if (!nic_data->grp_mae)
+ if (!IS_ENABLED(CONFIG_SFC_SRIOV) || !nic_data->grp_mae)
return 0;
-#ifdef CONFIG_SFC_SRIOV
rc = efx_init_struct_tc(efx);
if (rc)
return rc;
@@ -1126,6 +1166,14 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
rc);
}
+ rc = efx_init_mae(efx);
+ if (rc)
+ netif_warn(efx, probe, net_dev,
+ "Failed to init MAE rc %d; representors will not function\n",
+ rc);
+ else
+ efx_ef100_init_reps(efx);
+
rc = efx_init_tc(efx);
if (rc) {
/* Either we don't have an MAE at all (i.e. legacy v-switching),
@@ -1141,10 +1189,6 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
net_dev->features |= NETIF_F_HW_TC;
efx->fixed_features |= NETIF_F_HW_TC;
}
-#endif
- return 0;
-
-fail:
return rc;
}
@@ -1157,6 +1201,11 @@ void ef100_remove(struct efx_nic *efx)
{
struct ef100_nic_data *nic_data = efx->nic_data;
+ if (IS_ENABLED(CONFIG_SFC_SRIOV) && efx->mae) {
+ efx_ef100_fini_reps(efx);
+ efx_fini_mae(efx);
+ }
+
efx_mcdi_detach(efx);
efx_mcdi_fini(efx);
if (nic_data)
@@ -1249,9 +1298,8 @@ const struct efx_nic_type ef100_pf_nic_type = {
.update_stats = ef100_update_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
-#ifdef CONFIG_SFC_SRIOV
- .sriov_configure = efx_ef100_sriov_configure,
-#endif
+ .sriov_configure = IS_ENABLED(CONFIG_SFC_SRIOV) ?
+ efx_ef100_sriov_configure : NULL,
/* Per-type bar/size configuration not used on ef100. Location of
* registers is defined by extended capabilities.
diff --git a/drivers/net/ethernet/sfc/ef100_nic.h b/drivers/net/ethernet/sfc/ef100_nic.h
index 0295933145fa..f1ed481c1260 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.h
+++ b/drivers/net/ethernet/sfc/ef100_nic.h
@@ -74,6 +74,10 @@ struct ef100_nic_data {
u64 stats[EF100_STAT_COUNT];
u32 base_mport;
bool have_mport; /* base_mport was populated successfully */
+ u32 own_mport;
+ u32 local_mae_intf; /* interface_idx that corresponds to us, in mport enumerate */
+ bool have_own_mport; /* own_mport was populated successfully */
+ bool have_local_intf; /* local_mae_intf was populated successfully */
bool grp_mae; /* MAE Privilege */
u16 tso_max_hdr_len;
u16 tso_max_payload_num_segs;
@@ -88,4 +92,7 @@ int efx_ef100_init_datapath_caps(struct efx_nic *efx);
int ef100_phy_probe(struct efx_nic *efx);
int ef100_filter_table_probe(struct efx_nic *efx);
+int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address,
+ int client_handle, bool empty_ok);
+int efx_ef100_lookup_client_id(struct efx_nic *efx, efx_qword_t pciefn, u32 *id);
#endif /* EFX_EF100_NIC_H */
diff --git a/drivers/net/ethernet/sfc/ef100_rep.c b/drivers/net/ethernet/sfc/ef100_rep.c
index 81ab22c74635..0b3083ef0ead 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.c
+++ b/drivers/net/ethernet/sfc/ef100_rep.c
@@ -9,12 +9,14 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
+#include <linux/rhashtable.h>
#include "ef100_rep.h"
#include "ef100_netdev.h"
#include "ef100_nic.h"
#include "mae.h"
#include "rx_common.h"
#include "tc_bindings.h"
+#include "efx_devlink.h"
#define EFX_EF100_REP_DRIVER "efx_ef100_rep"
@@ -242,14 +244,11 @@ fail1:
static int efx_ef100_configure_rep(struct efx_rep *efv)
{
struct efx_nic *efx = efv->parent;
- u32 selector;
int rc;
efv->rx_pring_size = EFX_REP_DEFAULT_PSEUDO_RING_SIZE;
- /* Construct mport selector for corresponding VF */
- efx_mae_mport_vf(efx, efv->idx, &selector);
/* Look up actual mport ID */
- rc = efx_mae_lookup_mport(efx, selector, &efv->mport);
+ rc = efx_mae_lookup_mport(efx, efv->idx, &efv->mport);
if (rc)
return rc;
pci_dbg(efx->pci_dev, "VF %u has mport ID %#x\n", efv->idx, efv->mport);
@@ -299,6 +298,7 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
i, rc);
goto fail1;
}
+ ef100_rep_set_devlink_port(efv);
rc = register_netdev(efv->net_dev);
if (rc) {
pci_err(efx->pci_dev,
@@ -310,6 +310,7 @@ int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i)
efv->net_dev->name);
return 0;
fail2:
+ ef100_rep_unset_devlink_port(efv);
efx_ef100_deconfigure_rep(efv);
fail1:
efx_ef100_rep_destroy_netdev(efv);
@@ -325,6 +326,7 @@ void efx_ef100_vfrep_destroy(struct efx_nic *efx, struct efx_rep *efv)
return;
netif_dbg(efx, drv, rep_dev, "Removing VF representor\n");
unregister_netdev(rep_dev);
+ ef100_rep_unset_devlink_port(efv);
efx_ef100_deconfigure_rep(efv);
efx_ef100_rep_destroy_netdev(efv);
}
@@ -341,6 +343,53 @@ void efx_ef100_fini_vfreps(struct efx_nic *efx)
efx_ef100_vfrep_destroy(efx, efv);
}
+static bool ef100_mport_is_pcie_vnic(struct mae_mport_desc *mport_desc)
+{
+ return mport_desc->mport_type == MAE_MPORT_DESC_MPORT_TYPE_VNIC &&
+ mport_desc->vnic_client_type == MAE_MPORT_DESC_VNIC_CLIENT_TYPE_FUNCTION;
+}
+
+bool ef100_mport_on_local_intf(struct efx_nic *efx,
+ struct mae_mport_desc *mport_desc)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ bool pcie_func;
+
+ pcie_func = ef100_mport_is_pcie_vnic(mport_desc);
+
+ return nic_data->have_local_intf && pcie_func &&
+ mport_desc->interface_idx == nic_data->local_mae_intf;
+}
+
+bool ef100_mport_is_vf(struct mae_mport_desc *mport_desc)
+{
+ bool pcie_func;
+
+ pcie_func = ef100_mport_is_pcie_vnic(mport_desc);
+ return pcie_func && (mport_desc->vf_idx != MAE_MPORT_DESC_VF_IDX_NULL);
+}
+
+void efx_ef100_init_reps(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ nic_data->have_local_intf = false;
+ rc = efx_mae_enumerate_mports(efx);
+ if (rc)
+ pci_warn(efx->pci_dev,
+ "Could not enumerate mports (rc=%d), are we admin?",
+ rc);
+}
+
+void efx_ef100_fini_reps(struct efx_nic *efx)
+{
+ struct efx_mae *mae = efx->mae;
+
+ rhashtable_free_and_destroy(&mae->mports_ht, efx_mae_remove_mport,
+ NULL);
+}
+
static int efx_ef100_rep_poll(struct napi_struct *napi, int weight)
{
struct efx_rep *efv = container_of(napi, struct efx_rep, napi);
diff --git a/drivers/net/ethernet/sfc/ef100_rep.h b/drivers/net/ethernet/sfc/ef100_rep.h
index c21bc716f847..a042525a2240 100644
--- a/drivers/net/ethernet/sfc/ef100_rep.h
+++ b/drivers/net/ethernet/sfc/ef100_rep.h
@@ -22,6 +22,8 @@ struct efx_rep_sw_stats {
atomic64_t rx_dropped, tx_errors;
};
+struct devlink_port;
+
/**
* struct efx_rep - Private data for an Efx representor
*
@@ -39,6 +41,7 @@ struct efx_rep_sw_stats {
* @rx_lock: protects @rx_list
* @napi: NAPI control structure
* @stats: software traffic counters for netdev stats
+ * @dl_port: devlink port associated to this netdev representor
*/
struct efx_rep {
struct efx_nic *parent;
@@ -54,6 +57,7 @@ struct efx_rep {
spinlock_t rx_lock;
struct napi_struct napi;
struct efx_rep_sw_stats stats;
+ struct devlink_port *dl_port;
};
int efx_ef100_vfrep_create(struct efx_nic *efx, unsigned int i);
@@ -67,4 +71,10 @@ void efx_ef100_rep_rx_packet(struct efx_rep *efv, struct efx_rx_buffer *rx_buf);
*/
struct efx_rep *efx_ef100_find_rep_by_mport(struct efx_nic *efx, u16 mport);
extern const struct net_device_ops efx_ef100_rep_netdev_ops;
+void efx_ef100_init_reps(struct efx_nic *efx);
+void efx_ef100_fini_reps(struct efx_nic *efx);
+struct mae_mport_desc;
+bool ef100_mport_on_local_intf(struct efx_nic *efx,
+ struct mae_mport_desc *mport_desc);
+bool ef100_mport_is_vf(struct mae_mport_desc *mport_desc);
#endif /* EF100_REP_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0556542d7a6b..02c2adeb0a12 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1003,8 +1003,11 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
/* Determine netdevice features */
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
- if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
+ if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM)) {
net_dev->features |= NETIF_F_TSO6;
+ if (efx_has_cap(efx, TX_TSO_V2_ENCAP))
+ net_dev->hw_enc_features |= NETIF_F_TSO6;
+ }
/* Check whether device supports TSO */
if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
net_dev->features &= ~NETIF_F_ALL_TSO;
@@ -1025,6 +1028,10 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net_dev->features |= efx->fixed_features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
rc = efx_register_netdev(efx);
if (!rc)
return 0;
diff --git a/drivers/net/ethernet/sfc/efx_devlink.c b/drivers/net/ethernet/sfc/efx_devlink.c
new file mode 100644
index 000000000000..381b805659d3
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_devlink.c
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "ef100_nic.h"
+#include "efx_devlink.h"
+#include <linux/rtc.h>
+#include "mcdi.h"
+#include "mcdi_functions.h"
+#include "mcdi_pcol.h"
+#ifdef CONFIG_SFC_SRIOV
+#include "mae.h"
+#include "ef100_rep.h"
+#endif
+
+struct efx_devlink {
+ struct efx_nic *efx;
+};
+
+#ifdef CONFIG_SFC_SRIOV
+static void efx_devlink_del_port(struct devlink_port *dl_port)
+{
+ if (!dl_port)
+ return;
+ devl_port_unregister(dl_port);
+}
+
+static int efx_devlink_add_port(struct efx_nic *efx,
+ struct mae_mport_desc *mport)
+{
+ bool external = false;
+
+ if (!ef100_mport_on_local_intf(efx, mport))
+ external = true;
+
+ switch (mport->mport_type) {
+ case MAE_MPORT_DESC_MPORT_TYPE_VNIC:
+ if (mport->vf_idx != MAE_MPORT_DESC_VF_IDX_NULL)
+ devlink_port_attrs_pci_vf_set(&mport->dl_port, 0, mport->pf_idx,
+ mport->vf_idx,
+ external);
+ else
+ devlink_port_attrs_pci_pf_set(&mport->dl_port, 0, mport->pf_idx,
+ external);
+ break;
+ default:
+ /* MAE_MPORT_DESC_MPORT_ALIAS and UNDEFINED */
+ return 0;
+ }
+
+ mport->dl_port.index = mport->mport_id;
+
+ return devl_port_register(efx->devlink, &mport->dl_port, mport->mport_id);
+}
+
+static int efx_devlink_port_addr_get(struct devlink_port *port, u8 *hw_addr,
+ int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_devlink *devlink = devlink_priv(port->devlink);
+ struct mae_mport_desc *mport_desc;
+ efx_qword_t pciefn;
+ u32 client_id;
+ int rc = 0;
+
+ mport_desc = container_of(port, struct mae_mport_desc, dl_port);
+
+ if (!ef100_mport_on_local_intf(devlink->efx, mport_desc)) {
+ rc = -EINVAL;
+ NL_SET_ERR_MSG_FMT(extack,
+ "Port not on local interface (mport: %u)",
+ mport_desc->mport_id);
+ goto out;
+ }
+
+ if (ef100_mport_is_vf(mport_desc))
+ EFX_POPULATE_QWORD_3(pciefn,
+ PCIE_FUNCTION_PF, PCIE_FUNCTION_PF_NULL,
+ PCIE_FUNCTION_VF, mport_desc->vf_idx,
+ PCIE_FUNCTION_INTF, PCIE_INTERFACE_CALLER);
+ else
+ EFX_POPULATE_QWORD_3(pciefn,
+ PCIE_FUNCTION_PF, mport_desc->pf_idx,
+ PCIE_FUNCTION_VF, PCIE_FUNCTION_VF_NULL,
+ PCIE_FUNCTION_INTF, PCIE_INTERFACE_CALLER);
+
+ rc = efx_ef100_lookup_client_id(devlink->efx, pciefn, &client_id);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "No internal client_ID for port (mport: %u)",
+ mport_desc->mport_id);
+ goto out;
+ }
+
+ rc = ef100_get_mac_address(devlink->efx, hw_addr, client_id, true);
+ if (rc != 0)
+ NL_SET_ERR_MSG_FMT(extack,
+ "No available MAC for port (mport: %u)",
+ mport_desc->mport_id);
+out:
+ *hw_addr_len = ETH_ALEN;
+ return rc;
+}
+
+static int efx_devlink_port_addr_set(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LEN(1));
+ struct efx_devlink *devlink = devlink_priv(port->devlink);
+ struct mae_mport_desc *mport_desc;
+ efx_qword_t pciefn;
+ u32 client_id;
+ int rc;
+
+ mport_desc = container_of(port, struct mae_mport_desc, dl_port);
+
+ if (!ef100_mport_is_vf(mport_desc)) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "port mac change not allowed (mport: %u)",
+ mport_desc->mport_id);
+ return -EPERM;
+ }
+
+ EFX_POPULATE_QWORD_3(pciefn,
+ PCIE_FUNCTION_PF, PCIE_FUNCTION_PF_NULL,
+ PCIE_FUNCTION_VF, mport_desc->vf_idx,
+ PCIE_FUNCTION_INTF, PCIE_INTERFACE_CALLER);
+
+ rc = efx_ef100_lookup_client_id(devlink->efx, pciefn, &client_id);
+ if (rc) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "No internal client_ID for port (mport: %u)",
+ mport_desc->mport_id);
+ return rc;
+ }
+
+ MCDI_SET_DWORD(inbuf, SET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE,
+ client_id);
+
+ ether_addr_copy(MCDI_PTR(inbuf, SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS),
+ hw_addr);
+
+ rc = efx_mcdi_rpc(devlink->efx, MC_CMD_SET_CLIENT_MAC_ADDRESSES, inbuf,
+ sizeof(inbuf), NULL, 0, NULL);
+ if (rc)
+ NL_SET_ERR_MSG_FMT(extack,
+ "sfc MC_CMD_SET_CLIENT_MAC_ADDRESSES mcdi error (mport: %u)",
+ mport_desc->mport_id);
+
+ return rc;
+}
+
+#endif
+
+static int efx_devlink_info_nvram_partition(struct efx_nic *efx,
+ struct devlink_info_req *req,
+ unsigned int partition_type,
+ const char *version_name)
+{
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ u16 version[4];
+ int rc;
+
+ rc = efx_mcdi_nvram_metadata(efx, partition_type, NULL, version, NULL,
+ 0);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed\n",
+ version_name);
+ return rc;
+ }
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u", version[0],
+ version[1], version[2], version[3]);
+ devlink_info_version_stored_put(req, version_name, buf);
+
+ return 0;
+}
+
+static int efx_devlink_info_stored_versions(struct efx_nic *efx,
+ struct devlink_info_req *req)
+{
+ int rc;
+
+ rc = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_BUNDLE,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID);
+ if (rc)
+ return rc;
+
+ rc = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_MC_FIRMWARE,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT);
+ if (rc)
+ return rc;
+
+ rc = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_SUC_FIRMWARE,
+ EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC);
+ if (rc)
+ return rc;
+
+ rc = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_EXPANSION_ROM,
+ EFX_DEVLINK_INFO_VERSION_FW_EXPROM);
+ if (rc)
+ return rc;
+
+ rc = efx_devlink_info_nvram_partition(efx, req,
+ NVRAM_PARTITION_TYPE_EXPANSION_UEFI,
+ EFX_DEVLINK_INFO_VERSION_FW_UEFI);
+ return rc;
+}
+
+#define EFX_VER_FLAG(_f) \
+ (MC_CMD_GET_VERSION_V5_OUT_ ## _f ## _PRESENT_LBN)
+
+static void efx_devlink_info_running_v2(struct efx_nic *efx,
+ struct devlink_info_req *req,
+ unsigned int flags, efx_dword_t *outbuf)
+{
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ union {
+ const __le32 *dwords;
+ const __le16 *words;
+ const char *str;
+ } ver;
+ struct rtc_time build_date;
+ unsigned int build_id;
+ size_t offset;
+ __maybe_unused u64 tstamp;
+
+ if (flags & BIT(EFX_VER_FLAG(BOARD_EXT_INFO))) {
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%s",
+ MCDI_PTR(outbuf, GET_VERSION_V2_OUT_BOARD_NAME));
+ devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ buf);
+
+ /* Favour full board version if present (in V5 or later) */
+ if (~flags & BIT(EFX_VER_FLAG(BOARD_VERSION))) {
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u",
+ MCDI_DWORD(outbuf,
+ GET_VERSION_V2_OUT_BOARD_REVISION));
+ devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV,
+ buf);
+ }
+
+ ver.str = MCDI_PTR(outbuf, GET_VERSION_V2_OUT_BOARD_SERIAL);
+ if (ver.str[0])
+ devlink_info_board_serial_number_put(req, ver.str);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(FPGA_EXT_INFO))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V2_OUT_FPGA_VERSION);
+ offset = snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u_%c%u",
+ le32_to_cpu(ver.dwords[0]),
+ 'A' + le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]));
+
+ ver.str = MCDI_PTR(outbuf, GET_VERSION_V2_OUT_FPGA_EXTRA);
+ if (ver.str[0])
+ snprintf(&buf[offset], EFX_MAX_VERSION_INFO_LEN - offset,
+ " (%s)", ver.str);
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_FPGA_REV,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(CMC_EXT_INFO))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V2_OUT_CMCFW_VERSION);
+ offset = snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]),
+ le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+#ifdef CONFIG_RTC_LIB
+ tstamp = MCDI_QWORD(outbuf,
+ GET_VERSION_V2_OUT_CMCFW_BUILD_DATE);
+ if (tstamp) {
+ rtc_time64_to_tm(tstamp, &build_date);
+ snprintf(&buf[offset], EFX_MAX_VERSION_INFO_LEN - offset,
+ " (%ptRd)", &build_date);
+ }
+#endif
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_FW_MGMT_CMC,
+ buf);
+ }
+
+ ver.words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_V2_OUT_VERSION);
+ offset = snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le16_to_cpu(ver.words[0]), le16_to_cpu(ver.words[1]),
+ le16_to_cpu(ver.words[2]), le16_to_cpu(ver.words[3]));
+ if (flags & BIT(EFX_VER_FLAG(MCFW_EXT_INFO))) {
+ build_id = MCDI_DWORD(outbuf, GET_VERSION_V2_OUT_MCFW_BUILD_ID);
+ snprintf(&buf[offset], EFX_MAX_VERSION_INFO_LEN - offset,
+ " (%x) %s", build_id,
+ MCDI_PTR(outbuf, GET_VERSION_V2_OUT_MCFW_BUILD_NAME));
+ }
+ devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT,
+ buf);
+
+ if (flags & BIT(EFX_VER_FLAG(SUCFW_EXT_INFO))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V2_OUT_SUCFW_VERSION);
+#ifdef CONFIG_RTC_LIB
+ tstamp = MCDI_QWORD(outbuf,
+ GET_VERSION_V2_OUT_SUCFW_BUILD_DATE);
+ rtc_time64_to_tm(tstamp, &build_date);
+#else
+ memset(&build_date, 0, sizeof(build_date));
+#endif
+ build_id = MCDI_DWORD(outbuf, GET_VERSION_V2_OUT_SUCFW_CHIP_ID);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN,
+ "%u.%u.%u.%u type %x (%ptRd)",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]), le32_to_cpu(ver.dwords[3]),
+ build_id, &build_date);
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC,
+ buf);
+ }
+}
+
+static void efx_devlink_info_running_v3(struct efx_nic *efx,
+ struct devlink_info_req *req,
+ unsigned int flags, efx_dword_t *outbuf)
+{
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ union {
+ const __le32 *dwords;
+ const __le16 *words;
+ const char *str;
+ } ver;
+
+ if (flags & BIT(EFX_VER_FLAG(DATAPATH_HW_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V3_OUT_DATAPATH_HW_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_DATAPATH_HW,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(DATAPATH_FW_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V3_OUT_DATAPATH_FW_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_DATAPATH_FW,
+ buf);
+ }
+}
+
+static void efx_devlink_info_running_v4(struct efx_nic *efx,
+ struct devlink_info_req *req,
+ unsigned int flags, efx_dword_t *outbuf)
+{
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ union {
+ const __le32 *dwords;
+ const __le16 *words;
+ const char *str;
+ } ver;
+
+ if (flags & BIT(EFX_VER_FLAG(SOC_BOOT_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V4_OUT_SOC_BOOT_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_SOC_BOOT,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(SOC_UBOOT_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V4_OUT_SOC_UBOOT_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_SOC_UBOOT,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(SOC_MAIN_ROOTFS_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V4_OUT_SOC_MAIN_ROOTFS_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_SOC_MAIN,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(SOC_RECOVERY_BUILDROOT_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V4_OUT_SOC_RECOVERY_BUILDROOT_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_SOC_RECOVERY,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(SUCFW_VERSION)) &&
+ ~flags & BIT(EFX_VER_FLAG(SUCFW_EXT_INFO))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V4_OUT_SUCFW_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC,
+ buf);
+ }
+}
+
+static void efx_devlink_info_running_v5(struct efx_nic *efx,
+ struct devlink_info_req *req,
+ unsigned int flags, efx_dword_t *outbuf)
+{
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ union {
+ const __le32 *dwords;
+ const __le16 *words;
+ const char *str;
+ } ver;
+
+ if (flags & BIT(EFX_VER_FLAG(BOARD_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V5_OUT_BOARD_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV,
+ buf);
+ }
+
+ if (flags & BIT(EFX_VER_FLAG(BUNDLE_VERSION))) {
+ ver.dwords = (__le32 *)MCDI_PTR(outbuf,
+ GET_VERSION_V5_OUT_BUNDLE_VERSION);
+
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le32_to_cpu(ver.dwords[0]), le32_to_cpu(ver.dwords[1]),
+ le32_to_cpu(ver.dwords[2]),
+ le32_to_cpu(ver.dwords[3]));
+
+ devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID,
+ buf);
+ }
+}
+
+static int efx_devlink_info_running_versions(struct efx_nic *efx,
+ struct devlink_info_req *req)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_VERSION_V5_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_VERSION_EXT_IN_LEN);
+ char buf[EFX_MAX_VERSION_INFO_LEN];
+ union {
+ const __le32 *dwords;
+ const __le16 *words;
+ const char *str;
+ } ver;
+ size_t outlength;
+ unsigned int flags;
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc || outlength < MC_CMD_GET_VERSION_OUT_LEN) {
+ netif_err(efx, drv, efx->net_dev,
+ "mcdi MC_CMD_GET_VERSION failed\n");
+ return rc;
+ }
+
+ /* Handle previous output */
+ if (outlength < MC_CMD_GET_VERSION_V2_OUT_LEN) {
+ ver.words = (__le16 *)MCDI_PTR(outbuf,
+ GET_VERSION_EXT_OUT_VERSION);
+ snprintf(buf, EFX_MAX_VERSION_INFO_LEN, "%u.%u.%u.%u",
+ le16_to_cpu(ver.words[0]),
+ le16_to_cpu(ver.words[1]),
+ le16_to_cpu(ver.words[2]),
+ le16_to_cpu(ver.words[3]));
+
+ devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT,
+ buf);
+ return 0;
+ }
+
+ /* Handle V2 additions */
+ flags = MCDI_DWORD(outbuf, GET_VERSION_V2_OUT_FLAGS);
+ efx_devlink_info_running_v2(efx, req, flags, outbuf);
+
+ if (outlength < MC_CMD_GET_VERSION_V3_OUT_LEN)
+ return 0;
+
+ /* Handle V3 additions */
+ efx_devlink_info_running_v3(efx, req, flags, outbuf);
+
+ if (outlength < MC_CMD_GET_VERSION_V4_OUT_LEN)
+ return 0;
+
+ /* Handle V4 additions */
+ efx_devlink_info_running_v4(efx, req, flags, outbuf);
+
+ if (outlength < MC_CMD_GET_VERSION_V5_OUT_LEN)
+ return 0;
+
+ /* Handle V5 additions */
+ efx_devlink_info_running_v5(efx, req, flags, outbuf);
+
+ return 0;
+}
+
+#define EFX_MAX_SERIALNUM_LEN (ETH_ALEN * 2 + 1)
+
+static int efx_devlink_info_board_cfg(struct efx_nic *efx,
+ struct devlink_info_req *req)
+{
+ char sn[EFX_MAX_SERIALNUM_LEN];
+ u8 mac_address[ETH_ALEN];
+ int rc;
+
+ rc = efx_mcdi_get_board_cfg(efx, (u8 *)mac_address, NULL, NULL);
+ if (!rc) {
+ snprintf(sn, EFX_MAX_SERIALNUM_LEN, "%pm", mac_address);
+ devlink_info_serial_number_put(req, sn);
+ }
+ return rc;
+}
+
+static int efx_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_devlink *devlink_private = devlink_priv(devlink);
+ struct efx_nic *efx = devlink_private->efx;
+ int rc;
+
+ /* Several different MCDI commands are used. We report first error
+ * through extack returning at that point. Specific error
+ * information via system messages.
+ */
+ rc = efx_devlink_info_board_cfg(efx, req);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Getting board info failed");
+ return rc;
+ }
+ rc = efx_devlink_info_stored_versions(efx, req);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Getting stored versions failed");
+ return rc;
+ }
+ rc = efx_devlink_info_running_versions(efx, req);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Getting running versions failed");
+ return rc;
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops sfc_devlink_ops = {
+ .info_get = efx_devlink_info_get,
+#ifdef CONFIG_SFC_SRIOV
+ .port_function_hw_addr_get = efx_devlink_port_addr_get,
+ .port_function_hw_addr_set = efx_devlink_port_addr_set,
+#endif
+};
+
+#ifdef CONFIG_SFC_SRIOV
+static struct devlink_port *ef100_set_devlink_port(struct efx_nic *efx, u32 idx)
+{
+ struct mae_mport_desc *mport;
+ u32 id;
+ int rc;
+
+ if (efx_mae_lookup_mport(efx, idx, &id)) {
+ /* This should not happen. */
+ if (idx == MAE_MPORT_DESC_VF_IDX_NULL)
+ pci_warn_once(efx->pci_dev, "No mport ID found for PF.\n");
+ else
+ pci_warn_once(efx->pci_dev, "No mport ID found for VF %u.\n",
+ idx);
+ return NULL;
+ }
+
+ mport = efx_mae_get_mport(efx, id);
+ if (!mport) {
+ /* This should not happen. */
+ if (idx == MAE_MPORT_DESC_VF_IDX_NULL)
+ pci_warn_once(efx->pci_dev, "No mport found for PF.\n");
+ else
+ pci_warn_once(efx->pci_dev, "No mport found for VF %u.\n",
+ idx);
+ return NULL;
+ }
+
+ rc = efx_devlink_add_port(efx, mport);
+ if (rc) {
+ if (idx == MAE_MPORT_DESC_VF_IDX_NULL)
+ pci_warn(efx->pci_dev,
+ "devlink port creation for PF failed.\n");
+ else
+ pci_warn(efx->pci_dev,
+ "devlink_port creation for VF %u failed.\n",
+ idx);
+ return NULL;
+ }
+
+ return &mport->dl_port;
+}
+
+void ef100_rep_set_devlink_port(struct efx_rep *efv)
+{
+ efv->dl_port = ef100_set_devlink_port(efv->parent, efv->idx);
+}
+
+void ef100_pf_set_devlink_port(struct efx_nic *efx)
+{
+ efx->dl_port = ef100_set_devlink_port(efx, MAE_MPORT_DESC_VF_IDX_NULL);
+}
+
+void ef100_rep_unset_devlink_port(struct efx_rep *efv)
+{
+ efx_devlink_del_port(efv->dl_port);
+}
+
+void ef100_pf_unset_devlink_port(struct efx_nic *efx)
+{
+ efx_devlink_del_port(efx->dl_port);
+}
+#endif
+
+void efx_fini_devlink_lock(struct efx_nic *efx)
+{
+ if (efx->devlink)
+ devl_lock(efx->devlink);
+}
+
+void efx_fini_devlink_and_unlock(struct efx_nic *efx)
+{
+ if (efx->devlink) {
+ devl_unregister(efx->devlink);
+ devl_unlock(efx->devlink);
+ devlink_free(efx->devlink);
+ efx->devlink = NULL;
+ }
+}
+
+int efx_probe_devlink_and_lock(struct efx_nic *efx)
+{
+ struct efx_devlink *devlink_private;
+
+ if (efx->type->is_vf)
+ return 0;
+
+ efx->devlink = devlink_alloc(&sfc_devlink_ops,
+ sizeof(struct efx_devlink),
+ &efx->pci_dev->dev);
+ if (!efx->devlink)
+ return -ENOMEM;
+
+ devl_lock(efx->devlink);
+ devlink_private = devlink_priv(efx->devlink);
+ devlink_private->efx = efx;
+
+ devl_register(efx->devlink);
+
+ return 0;
+}
+
+void efx_probe_devlink_unlock(struct efx_nic *efx)
+{
+ if (!efx->devlink)
+ return;
+
+ devl_unlock(efx->devlink);
+}
diff --git a/drivers/net/ethernet/sfc/efx_devlink.h b/drivers/net/ethernet/sfc/efx_devlink.h
new file mode 100644
index 000000000000..e5fd5e1dcc27
--- /dev/null
+++ b/drivers/net/ethernet/sfc/efx_devlink.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/****************************************************************************
+ * Driver for AMD network controllers and boards
+ * Copyright (C) 2023, Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef _EFX_DEVLINK_H
+#define _EFX_DEVLINK_H
+
+#include "net_driver.h"
+#include <net/devlink.h>
+
+/* Custom devlink-info version object names for details that do not map to the
+ * generic standardized names.
+ */
+#define EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC "fw.mgmt.suc"
+#define EFX_DEVLINK_INFO_VERSION_FW_MGMT_CMC "fw.mgmt.cmc"
+#define EFX_DEVLINK_INFO_VERSION_FPGA_REV "fpga.rev"
+#define EFX_DEVLINK_INFO_VERSION_DATAPATH_HW "fpga.app"
+#define EFX_DEVLINK_INFO_VERSION_DATAPATH_FW DEVLINK_INFO_VERSION_GENERIC_FW_APP
+#define EFX_DEVLINK_INFO_VERSION_SOC_BOOT "coproc.boot"
+#define EFX_DEVLINK_INFO_VERSION_SOC_UBOOT "coproc.uboot"
+#define EFX_DEVLINK_INFO_VERSION_SOC_MAIN "coproc.main"
+#define EFX_DEVLINK_INFO_VERSION_SOC_RECOVERY "coproc.recovery"
+#define EFX_DEVLINK_INFO_VERSION_FW_EXPROM "fw.exprom"
+#define EFX_DEVLINK_INFO_VERSION_FW_UEFI "fw.uefi"
+
+#define EFX_MAX_VERSION_INFO_LEN 64
+
+int efx_probe_devlink_and_lock(struct efx_nic *efx);
+void efx_probe_devlink_unlock(struct efx_nic *efx);
+void efx_fini_devlink_lock(struct efx_nic *efx);
+void efx_fini_devlink_and_unlock(struct efx_nic *efx);
+
+#ifdef CONFIG_SFC_SRIOV
+struct efx_rep;
+
+void ef100_pf_set_devlink_port(struct efx_nic *efx);
+void ef100_rep_set_devlink_port(struct efx_rep *efv);
+void ef100_pf_unset_devlink_port(struct efx_nic *efx);
+void ef100_rep_unset_devlink_port(struct efx_rep *efv);
+#endif
+#endif /* _EFX_DEVLINK_H */
diff --git a/drivers/net/ethernet/sfc/mae.c b/drivers/net/ethernet/sfc/mae.c
index 583baf69981c..2d32abe5f478 100644
--- a/drivers/net/ethernet/sfc/mae.c
+++ b/drivers/net/ethernet/sfc/mae.c
@@ -9,8 +9,11 @@
* by the Free Software Foundation, incorporated herein by reference.
*/
+#include <linux/rhashtable.h>
+#include "ef100_nic.h"
#include "mae.h"
#include "mcdi.h"
+#include "mcdi_pcol.h"
#include "mcdi_pcol_mae.h"
int efx_mae_allocate_mport(struct efx_nic *efx, u32 *id, u32 *label)
@@ -94,7 +97,7 @@ void efx_mae_mport_mport(struct efx_nic *efx __always_unused, u32 mport_id, u32
}
/* id is really only 24 bits wide */
-int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
+int efx_mae_fw_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_MPORT_LOOKUP_OUT_LEN);
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_LOOKUP_IN_LEN);
@@ -485,11 +488,193 @@ int efx_mae_free_counter(struct efx_nic *efx, struct efx_tc_counter *cnt)
return 0;
}
+int efx_mae_lookup_mport(struct efx_nic *efx, u32 vf_idx, u32 *id)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct efx_mae *mae = efx->mae;
+ struct rhashtable_iter walk;
+ struct mae_mport_desc *m;
+ int rc = -ENOENT;
+
+ rhashtable_walk_enter(&mae->mports_ht, &walk);
+ rhashtable_walk_start(&walk);
+ while ((m = rhashtable_walk_next(&walk)) != NULL) {
+ if (m->mport_type == MAE_MPORT_DESC_MPORT_TYPE_VNIC &&
+ m->interface_idx == nic_data->local_mae_intf &&
+ m->pf_idx == 0 &&
+ m->vf_idx == vf_idx) {
+ *id = m->mport_id;
+ rc = 0;
+ break;
+ }
+ }
+ rhashtable_walk_stop(&walk);
+ rhashtable_walk_exit(&walk);
+ return rc;
+}
+
static bool efx_mae_asl_id(u32 id)
{
return !!(id & BIT(31));
}
+/* mport handling */
+static const struct rhashtable_params efx_mae_mports_ht_params = {
+ .key_len = sizeof(u32),
+ .key_offset = offsetof(struct mae_mport_desc, mport_id),
+ .head_offset = offsetof(struct mae_mport_desc, linkage),
+};
+
+struct mae_mport_desc *efx_mae_get_mport(struct efx_nic *efx, u32 mport_id)
+{
+ return rhashtable_lookup_fast(&efx->mae->mports_ht, &mport_id,
+ efx_mae_mports_ht_params);
+}
+
+static int efx_mae_add_mport(struct efx_nic *efx, struct mae_mport_desc *desc)
+{
+ struct efx_mae *mae = efx->mae;
+ int rc;
+
+ rc = rhashtable_insert_fast(&mae->mports_ht, &desc->linkage,
+ efx_mae_mports_ht_params);
+
+ if (rc) {
+ pci_err(efx->pci_dev, "Failed to insert MPORT %08x, rc %d\n",
+ desc->mport_id, rc);
+ kfree(desc);
+ return rc;
+ }
+
+ return rc;
+}
+
+void efx_mae_remove_mport(void *desc, void *arg)
+{
+ struct mae_mport_desc *mport = desc;
+
+ synchronize_rcu();
+ kfree(mport);
+}
+
+static int efx_mae_process_mport(struct efx_nic *efx,
+ struct mae_mport_desc *desc)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct mae_mport_desc *mport;
+
+ mport = efx_mae_get_mport(efx, desc->mport_id);
+ if (!IS_ERR_OR_NULL(mport)) {
+ netif_err(efx, drv, efx->net_dev,
+ "mport with id %u does exist!!!\n", desc->mport_id);
+ return -EEXIST;
+ }
+
+ if (nic_data->have_own_mport &&
+ desc->mport_id == nic_data->own_mport) {
+ WARN_ON(desc->mport_type != MAE_MPORT_DESC_MPORT_TYPE_VNIC);
+ WARN_ON(desc->vnic_client_type !=
+ MAE_MPORT_DESC_VNIC_CLIENT_TYPE_FUNCTION);
+ nic_data->local_mae_intf = desc->interface_idx;
+ nic_data->have_local_intf = true;
+ pci_dbg(efx->pci_dev, "MAE interface_idx is %u\n",
+ nic_data->local_mae_intf);
+ }
+
+ return efx_mae_add_mport(efx, desc);
+}
+
+#define MCDI_MPORT_JOURNAL_LEN \
+ ALIGN(MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LENMAX_MCDI2, 4)
+
+int efx_mae_enumerate_mports(struct efx_nic *efx)
+{
+ efx_dword_t *outbuf = kzalloc(MCDI_MPORT_JOURNAL_LEN, GFP_KERNEL);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAE_MPORT_READ_JOURNAL_IN_LEN);
+ MCDI_DECLARE_STRUCT_PTR(desc);
+ size_t outlen, stride, count;
+ int rc = 0, i;
+
+ if (!outbuf)
+ return -ENOMEM;
+ do {
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAE_MPORT_READ_JOURNAL, inbuf,
+ sizeof(inbuf), outbuf,
+ MCDI_MPORT_JOURNAL_LEN, &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_OFST) {
+ rc = -EIO;
+ goto fail;
+ }
+ count = MCDI_DWORD(outbuf, MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_COUNT);
+ if (!count)
+ continue; /* not break; we want to look at MORE flag */
+ stride = MCDI_DWORD(outbuf, MAE_MPORT_READ_JOURNAL_OUT_SIZEOF_MPORT_DESC);
+ if (stride < MAE_MPORT_DESC_LEN) {
+ rc = -EIO;
+ goto fail;
+ }
+ if (outlen < MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_LEN(count * stride)) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct mae_mport_desc *d;
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ desc = (efx_dword_t *)
+ _MCDI_PTR(outbuf, MC_CMD_MAE_MPORT_READ_JOURNAL_OUT_MPORT_DESC_DATA_OFST +
+ i * stride);
+ d->mport_id = MCDI_STRUCT_DWORD(desc, MAE_MPORT_DESC_MPORT_ID);
+ d->flags = MCDI_STRUCT_DWORD(desc, MAE_MPORT_DESC_FLAGS);
+ d->caller_flags = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_CALLER_FLAGS);
+ d->mport_type = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_MPORT_TYPE);
+ switch (d->mport_type) {
+ case MAE_MPORT_DESC_MPORT_TYPE_NET_PORT:
+ d->port_idx = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_NET_PORT_IDX);
+ break;
+ case MAE_MPORT_DESC_MPORT_TYPE_ALIAS:
+ d->alias_mport_id = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_ALIAS_DELIVER_MPORT_ID);
+ break;
+ case MAE_MPORT_DESC_MPORT_TYPE_VNIC:
+ d->vnic_client_type = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_VNIC_CLIENT_TYPE);
+ d->interface_idx = MCDI_STRUCT_DWORD(desc,
+ MAE_MPORT_DESC_VNIC_FUNCTION_INTERFACE);
+ d->pf_idx = MCDI_STRUCT_WORD(desc,
+ MAE_MPORT_DESC_VNIC_FUNCTION_PF_IDX);
+ d->vf_idx = MCDI_STRUCT_WORD(desc,
+ MAE_MPORT_DESC_VNIC_FUNCTION_VF_IDX);
+ break;
+ default:
+ /* Unknown mport_type, just accept it */
+ break;
+ }
+ rc = efx_mae_process_mport(efx, d);
+ /* Any failure will be due to memory allocation faiure,
+ * so there is no point to try subsequent entries.
+ */
+ if (rc)
+ goto fail;
+ }
+ } while (MCDI_FIELD(outbuf, MAE_MPORT_READ_JOURNAL_OUT, MORE) &&
+ !WARN_ON(!count));
+fail:
+ kfree(outbuf);
+ return rc;
+}
+
int efx_mae_alloc_action_set(struct efx_nic *efx, struct efx_tc_action_set *act)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_MAE_ACTION_SET_ALLOC_OUT_LEN);
@@ -805,3 +990,34 @@ int efx_mae_delete_rule(struct efx_nic *efx, u32 id)
return -EIO;
return 0;
}
+
+int efx_init_mae(struct efx_nic *efx)
+{
+ struct ef100_nic_data *nic_data = efx->nic_data;
+ struct efx_mae *mae;
+ int rc;
+
+ if (!nic_data->have_mport)
+ return -EINVAL;
+
+ mae = kmalloc(sizeof(*mae), GFP_KERNEL);
+ if (!mae)
+ return -ENOMEM;
+
+ rc = rhashtable_init(&mae->mports_ht, &efx_mae_mports_ht_params);
+ if (rc < 0) {
+ kfree(mae);
+ return rc;
+ }
+ efx->mae = mae;
+ mae->efx = efx;
+ return 0;
+}
+
+void efx_fini_mae(struct efx_nic *efx)
+{
+ struct efx_mae *mae = efx->mae;
+
+ kfree(mae);
+ efx->mae = NULL;
+}
diff --git a/drivers/net/ethernet/sfc/mae.h b/drivers/net/ethernet/sfc/mae.h
index 72343e90e222..bec293a06733 100644
--- a/drivers/net/ethernet/sfc/mae.h
+++ b/drivers/net/ethernet/sfc/mae.h
@@ -13,6 +13,7 @@
#define EF100_MAE_H
/* MCDI interface for the ef100 Match-Action Engine */
+#include <net/devlink.h>
#include "net_driver.h"
#include "tc.h"
#include "mcdi_pcol.h" /* needed for various MC_CMD_MAE_*_NULL defines */
@@ -27,6 +28,40 @@ void efx_mae_mport_mport(struct efx_nic *efx, u32 mport_id, u32 *out);
int efx_mae_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
+struct mae_mport_desc {
+ u32 mport_id;
+ u32 flags;
+ u32 caller_flags; /* enum mae_mport_desc_caller_flags */
+ u32 mport_type; /* MAE_MPORT_DESC_MPORT_TYPE_* */
+ union {
+ u32 port_idx; /* for mport_type == NET_PORT */
+ u32 alias_mport_id; /* for mport_type == ALIAS */
+ struct { /* for mport_type == VNIC */
+ u32 vnic_client_type; /* MAE_MPORT_DESC_VNIC_CLIENT_TYPE_* */
+ u32 interface_idx;
+ u16 pf_idx;
+ u16 vf_idx;
+ };
+ };
+ struct rhash_head linkage;
+ struct devlink_port dl_port;
+};
+
+int efx_mae_enumerate_mports(struct efx_nic *efx);
+struct mae_mport_desc *efx_mae_get_mport(struct efx_nic *efx, u32 mport_id);
+void efx_mae_put_mport(struct efx_nic *efx, struct mae_mport_desc *desc);
+
+/**
+ * struct efx_mae - MAE information
+ *
+ * @efx: The associated NIC
+ * @mports_ht: m-port descriptions from MC_CMD_MAE_MPORT_READ_JOURNAL
+ */
+struct efx_mae {
+ struct efx_nic *efx;
+ struct rhashtable mports_ht;
+};
+
int efx_mae_start_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
int efx_mae_stop_counters(struct efx_nic *efx, struct efx_rx_queue *rx_queue);
void efx_mae_counters_grant_credits(struct work_struct *work);
@@ -60,4 +95,9 @@ int efx_mae_insert_rule(struct efx_nic *efx, const struct efx_tc_match *match,
u32 prio, u32 acts_id, u32 *id);
int efx_mae_delete_rule(struct efx_nic *efx, u32 id);
+int efx_init_mae(struct efx_nic *efx);
+void efx_fini_mae(struct efx_nic *efx);
+void efx_mae_remove_mport(void *desc, void *arg);
+int efx_mae_fw_lookup_mport(struct efx_nic *efx, u32 selector, u32 *id);
+int efx_mae_lookup_mport(struct efx_nic *efx, u32 vf, u32 *id);
#endif /* EF100_MAE_H */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index af338208eae9..a7f2c31071e8 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -2175,6 +2175,78 @@ int efx_mcdi_get_privilege_mask(struct efx_nic *efx, u32 *mask)
return 0;
}
+int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
+ u32 *subtype, u16 version[4], char *desc,
+ size_t descsize)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
+ efx_dword_t *outbuf;
+ size_t outlen;
+ u32 flags;
+ int rc;
+
+ outbuf = kzalloc(MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2, GFP_KERNEL);
+ if (!outbuf)
+ return -ENOMEM;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_NVRAM_METADATA, inbuf,
+ sizeof(inbuf), outbuf,
+ MC_CMD_NVRAM_METADATA_OUT_LENMAX_MCDI2,
+ &outlen);
+ if (rc)
+ goto out_free;
+ if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN) {
+ rc = -EIO;
+ goto out_free;
+ }
+
+ flags = MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS);
+
+ if (desc && descsize > 0) {
+ if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN)) {
+ if (descsize <=
+ MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)) {
+ rc = -E2BIG;
+ goto out_free;
+ }
+
+ strncpy(desc,
+ MCDI_PTR(outbuf, NVRAM_METADATA_OUT_DESCRIPTION),
+ MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen));
+ desc[MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_NUM(outlen)] = '\0';
+ } else {
+ desc[0] = '\0';
+ }
+ }
+
+ if (subtype) {
+ if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
+ *subtype = MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_SUBTYPE);
+ else
+ *subtype = 0;
+ }
+
+ if (version) {
+ if (flags & BIT(MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN)) {
+ version[0] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_W);
+ version[1] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_X);
+ version[2] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_Y);
+ version[3] = MCDI_WORD(outbuf, NVRAM_METADATA_OUT_VERSION_Z);
+ } else {
+ version[0] = 0;
+ version[1] = 0;
+ version[2] = 0;
+ version[3] = 0;
+ }
+ }
+
+out_free:
+ kfree(outbuf);
+ return rc;
+}
+
#ifdef CONFIG_SFC_MTD
#define EFX_MCDI_NVRAM_LEN_MAX 128
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 7e35fec9da35..b139b76febff 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -229,6 +229,9 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
#define MCDI_WORD(_buf, _field) \
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_STRUCT_WORD(_buf, _field) \
+ ((void)BUILD_BUG_ON_ZERO(_field ## _LEN != 2), \
+ le16_to_cpu(*(__force const __le16 *)MCDI_STRUCT_PTR(_buf, _field)))
/* Write a 16-bit field defined in the protocol as being big-endian. */
#define MCDI_STRUCT_SET_WORD_BE(_buf, _field, _value) do { \
BUILD_BUG_ON(_field ## _LEN != 2); \
@@ -241,6 +244,8 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
EFX_POPULATE_DWORD_1(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0, _value)
#define MCDI_DWORD(_buf, _field) \
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
+#define MCDI_STRUCT_DWORD(_buf, _field) \
+ EFX_DWORD_FIELD(*_MCDI_STRUCT_DWORD(_buf, _field), EFX_DWORD_0)
/* Write a 32-bit field defined in the protocol as being big-endian. */
#define MCDI_STRUCT_SET_DWORD_BE(_buf, _field, _value) do { \
BUILD_BUG_ON(_field ## _LEN != 4); \
@@ -378,6 +383,9 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out);
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
+int efx_mcdi_nvram_metadata(struct efx_nic *efx, unsigned int type,
+ u32 *subtype, u16 version[4], char *desc,
+ size_t descsize);
int efx_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_handle_assertion(struct efx_nic *efx);
int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 3b49e216768b..fcd51d3992fa 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -845,6 +845,8 @@ enum efx_xdp_tx_queues_mode {
EFX_XDP_TX_QUEUES_BORROWED /* queues borrowed from net stack */
};
+struct efx_mae;
+
/**
* struct efx_nic - an Efx NIC
* @name: Device name (net device name or bus id before net device registered)
@@ -881,6 +883,7 @@ enum efx_xdp_tx_queues_mode {
* @msi_context: Context for each MSI
* @extra_channel_types: Types of extra (non-traffic) channels that
* should be allocated for this NIC
+ * @mae: Details of the Match Action Engine
* @xdp_tx_queue_count: Number of entries in %xdp_tx_queues.
* @xdp_tx_queues: Array of pointers to tx queues used for XDP transmit.
* @xdp_txq_queues_mode: XDP TX queues sharing strategy.
@@ -994,6 +997,8 @@ enum efx_xdp_tx_queues_mode {
* xdp_rxq_info structures?
* @netdev_notifier: Netdevice notifier.
* @tc: state for TC offload (EF100).
+ * @devlink: reference to devlink structure owned by this device
+ * @dl_port: devlink port associated with the PF
* @mem_bar: The BAR that is mapped into membase.
* @reg_base: Offset from the start of the bar to the function control window.
* @monitor_work: Hardware monitor workitem
@@ -1043,6 +1048,7 @@ struct efx_nic {
struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
const struct efx_channel_type *
extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
+ struct efx_mae *mae;
unsigned int xdp_tx_queue_count;
struct efx_tx_queue **xdp_tx_queues;
@@ -1179,6 +1185,8 @@ struct efx_nic {
struct notifier_block netdev_notifier;
struct efx_tc_state *tc;
+ struct devlink *devlink;
+ struct devlink_port *dl_port;
unsigned int mem_bar;
u32 reg_base;
diff --git a/drivers/net/ethernet/sfc/siena/efx.c b/drivers/net/ethernet/sfc/siena/efx.c
index 60e5b7c8ccf9..ef52ec71d197 100644
--- a/drivers/net/ethernet/sfc/siena/efx.c
+++ b/drivers/net/ethernet/sfc/siena/efx.c
@@ -1007,6 +1007,10 @@ static int efx_pci_probe_post_io(struct efx_nic *efx)
net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net_dev->features |= efx->fixed_features;
+ net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
rc = efx_register_netdev(efx);
if (!rc)
return 0;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 9b46579b5a10..2d7347b71c41 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -2104,6 +2104,9 @@ static int netsec_probe(struct platform_device *pdev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
ndev->hw_features = ndev->features;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
priv->rx_cksum_offload_flag = true;
ret = netsec_register_mdio(priv, phy_addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 80efdeeb0b59..18acf7dd74e5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -159,15 +159,13 @@ disable:
return err;
}
-static int dwc_qos_remove(struct platform_device *pdev)
+static void dwc_qos_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
clk_disable_unprepare(priv->plat->pclk);
clk_disable_unprepare(priv->plat->stmmac_clk);
-
- return 0;
}
#define SDMEMCOMPPADCTRL 0x8800
@@ -384,7 +382,7 @@ error:
return err;
}
-static int tegra_eqos_remove(struct platform_device *pdev)
+static void tegra_eqos_remove(struct platform_device *pdev)
{
struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
@@ -394,15 +392,13 @@ static int tegra_eqos_remove(struct platform_device *pdev)
clk_disable_unprepare(eqos->clk_rx);
clk_disable_unprepare(eqos->clk_slave);
clk_disable_unprepare(eqos->clk_master);
-
- return 0;
}
struct dwc_eth_dwmac_data {
int (*probe)(struct platform_device *pdev,
struct plat_stmmacenet_data *data,
struct stmmac_resources *res);
- int (*remove)(struct platform_device *pdev);
+ void (*remove)(struct platform_device *pdev);
};
static const struct dwc_eth_dwmac_data dwc_qos_data = {
@@ -473,21 +469,16 @@ static int dwc_eth_dwmac_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
const struct dwc_eth_dwmac_data *data;
- int err;
data = device_get_match_data(&pdev->dev);
- err = stmmac_dvr_remove(&pdev->dev);
- if (err < 0)
- dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
+ stmmac_dvr_remove(&pdev->dev);
- err = data->remove(pdev);
- if (err < 0)
- dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
+ data->remove(pdev);
stmmac_remove_config_dt(pdev, priv->plat);
- return err;
+ return 0;
}
static const struct of_device_id dwc_eth_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index bd52fb7cf486..ac8580f501e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -31,6 +31,12 @@
#define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20)
#define GPR_ENET_QOS_RGMII_EN (0x1 << 21)
+#define MX93_GPR_ENET_QOS_INTF_MODE_MASK GENMASK(3, 0)
+#define MX93_GPR_ENET_QOS_INTF_SEL_MII (0x0 << 1)
+#define MX93_GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 1)
+#define MX93_GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 1)
+#define MX93_GPR_ENET_QOS_CLK_GEN_EN (0x1 << 0)
+
struct imx_dwmac_ops {
u32 addr_width;
bool mac_rgmii_txclk_auto_adj;
@@ -90,6 +96,35 @@ imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
return ret;
}
+static int imx93_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct imx_priv_data *dwmac = plat_dat->bsp_priv;
+ int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = MX93_GPR_ENET_QOS_INTF_SEL_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = MX93_GPR_ENET_QOS_INTF_SEL_RMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = MX93_GPR_ENET_QOS_INTF_SEL_RGMII;
+ break;
+ default:
+ dev_dbg(dwmac->dev, "imx dwmac doesn't support %d interface\n",
+ plat_dat->interface);
+ return -EINVAL;
+ }
+
+ val |= MX93_GPR_ENET_QOS_CLK_GEN_EN;
+ return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
+ MX93_GPR_ENET_QOS_INTF_MODE_MASK, val);
+};
+
static int imx_dwmac_clks_config(void *priv, bool enabled)
{
struct imx_priv_data *dwmac = priv;
@@ -188,7 +223,9 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
dwmac->clk_mem = NULL;
- if (of_machine_is_compatible("fsl,imx8dxl")) {
+
+ if (of_machine_is_compatible("fsl,imx8dxl") ||
+ of_machine_is_compatible("fsl,imx93")) {
dwmac->clk_mem = devm_clk_get(dev, "mem");
if (IS_ERR(dwmac->clk_mem)) {
dev_err(dev, "failed to get mem clock\n");
@@ -196,10 +233,11 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
}
}
- if (of_machine_is_compatible("fsl,imx8mp")) {
- /* Binding doc describes the property:
- is required by i.MX8MP.
- is optional for i.MX8DXL.
+ if (of_machine_is_compatible("fsl,imx8mp") ||
+ of_machine_is_compatible("fsl,imx93")) {
+ /* Binding doc describes the propety:
+ * is required by i.MX8MP, i.MX93.
+ * is optinoal for i.MX8DXL.
*/
dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
if (IS_ERR(dwmac->intf_regmap))
@@ -296,9 +334,16 @@ static struct imx_dwmac_ops imx8dxl_dwmac_data = {
.set_intf_mode = imx8dxl_set_intf_mode,
};
+static struct imx_dwmac_ops imx93_dwmac_data = {
+ .addr_width = 32,
+ .mac_rgmii_txclk_auto_adj = true,
+ .set_intf_mode = imx93_set_intf_mode,
+};
+
static const struct of_device_id imx_dwmac_match[] = {
{ .compatible = "nxp,imx8mp-dwmac-eqos", .data = &imx8mp_dwmac_data },
{ .compatible = "nxp,imx8dxl-dwmac-eqos", .data = &imx8dxl_dwmac_data },
+ { .compatible = "nxp,imx93-dwmac-eqos", .data = &imx93_dwmac_data },
{ }
};
MODULE_DEVICE_TABLE(of, imx_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index 835caa15d55f..732774645c1a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -560,6 +560,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->has_gmac4 = 1;
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
+ plat_dat->rx_clk_runs_in_lpi = 1;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 6656d76b6766..4b8fd11563e4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1915,11 +1915,12 @@ err_remove_config_dt:
static int rk_gmac_remove(struct platform_device *pdev)
{
struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
- int ret = stmmac_dvr_remove(&pdev->dev);
+
+ stmmac_dvr_remove(&pdev->dev);
rk_gmac_powerdown(bsp_priv);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
index 710d7435733e..be3b1ebc06ab 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -371,11 +371,12 @@ err_remove_config_dt:
static int sti_dwmac_remove(struct platform_device *pdev)
{
struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
- int ret = stmmac_dvr_remove(&pdev->dev);
+
+ stmmac_dvr_remove(&pdev->dev);
clk_disable_unprepare(dwmac->clk);
- return ret;
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 2b38a499a404..0616b3a04ff3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -421,9 +421,10 @@ static int stm32_dwmac_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
- int ret = stmmac_dvr_remove(&pdev->dev);
struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
+ stmmac_dvr_remove(&pdev->dev);
+
stm32_dwmac_clk_disable(priv->plat->bsp_priv);
if (dwmac->irq_pwr_wakeup >= 0) {
@@ -431,7 +432,7 @@ static int stm32_dwmac_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, false);
}
- return ret;
+ return 0;
}
static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
index 413f66017219..e95d35f1e5a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -541,9 +541,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
return 0;
}
- val |= PPSCMDx(index, 0x2);
val |= TRGTMODSELx(index, 0x2);
val |= PPSEN0;
+ writel(val, ioaddr + MAC_PPS_CONTROL);
writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
@@ -568,6 +568,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
/* Finally, activate it */
+ val |= PPSCMDx(index, 0x2);
writel(val, ioaddr + MAC_PPS_CONTROL);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 592b4067f9b8..16a7421715cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -567,6 +567,7 @@ struct tc_cbs_qopt_offload;
struct flow_cls_offload;
struct tc_taprio_qopt_offload;
struct tc_etf_qopt_offload;
+struct tc_query_caps_base;
struct stmmac_tc_ops {
int (*init)(struct stmmac_priv *priv);
@@ -580,6 +581,8 @@ struct stmmac_tc_ops {
struct tc_taprio_qopt_offload *qopt);
int (*setup_etf)(struct stmmac_priv *priv,
struct tc_etf_qopt_offload *qopt);
+ int (*query_caps)(struct stmmac_priv *priv,
+ struct tc_query_caps_base *base);
};
#define stmmac_tc_init(__priv, __args...) \
@@ -594,6 +597,8 @@ struct stmmac_tc_ops {
stmmac_do_callback(__priv, tc, setup_taprio, __args)
#define stmmac_tc_setup_etf(__priv, __args...) \
stmmac_do_callback(__priv, tc, setup_etf, __args)
+#define stmmac_tc_query_caps(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, query_caps, __args)
struct stmmac_counters;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index bdbf86cb102a..3d15e1e92e18 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -345,7 +345,7 @@ int stmmac_xdp_open(struct net_device *dev);
void stmmac_xdp_release(struct net_device *dev);
int stmmac_resume(struct device *dev);
int stmmac_suspend(struct device *dev);
-int stmmac_dvr_remove(struct device *dev);
+void stmmac_dvr_remove(struct device *dev);
int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b7e5af58ab75..e4902a7bb61e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1080,7 +1080,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
- priv->eee_active = phy_init_eee(phy, 1) >= 0;
+ priv->eee_active =
+ phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
priv->eee_enabled = stmmac_eee_init(priv);
priv->tx_lpi_enabled = priv->eee_enabled;
stmmac_set_eee_pls(priv, priv->hw, true);
@@ -5991,6 +5992,8 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
struct stmmac_priv *priv = netdev_priv(ndev);
switch (type) {
+ case TC_QUERY_CAPS:
+ return stmmac_tc_query_caps(priv, priv, type_data);
case TC_SETUP_BLOCK:
return flow_block_cb_setup_simple(type_data,
&stmmac_block_cb_list,
@@ -7150,6 +7153,9 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_XSK_ZEROCOPY |
+ NETDEV_XDP_ACT_NDO_XMIT;
ret = stmmac_tc_init(priv, priv);
if (!ret) {
@@ -7346,7 +7352,7 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
* changes the link status, releases the DMA descriptor rings.
*/
-int stmmac_dvr_remove(struct device *dev)
+void stmmac_dvr_remove(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -7382,8 +7388,6 @@ int stmmac_dvr_remove(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_put_noidle(dev);
-
- return 0;
}
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 5f177ea80725..21aaa2730ac8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -45,8 +45,8 @@
#define MII_XGMAC_PA_SHIFT 16
#define MII_XGMAC_DA_SHIFT 21
-static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
- int phyreg, u32 *hw_addr)
+static void stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
+ int devad, int phyreg, u32 *hw_addr)
{
u32 tmp;
@@ -56,19 +56,14 @@ static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
*hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0xffff);
- *hw_addr |= (phyreg >> MII_DEVADDR_C45_SHIFT) << MII_XGMAC_DA_SHIFT;
- return 0;
+ *hw_addr |= devad << MII_XGMAC_DA_SHIFT;
}
-static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
- int phyreg, u32 *hw_addr)
+static void stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 *hw_addr)
{
u32 tmp;
- /* HW does not support C22 addr >= 4 */
- if (phyaddr > MII_XGMAC_MAX_C22ADDR)
- return -ENODEV;
-
/* Set port as Clause 22 */
tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
tmp &= ~MII_XGMAC_C22P_MASK;
@@ -76,16 +71,14 @@ static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
*hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0x1f);
- return 0;
}
-static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+static int stmmac_xgmac2_mdio_read(struct stmmac_priv *priv, u32 addr,
+ u32 value)
{
- struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 tmp, addr, value = MII_XGMAC_BUSY;
+ u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -99,20 +92,6 @@ static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
goto err_disable_clks;
}
- if (phyreg & MII_ADDR_C45) {
- phyreg &= ~MII_ADDR_C45;
-
- ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
- if (ret)
- goto err_disable_clks;
- } else {
- ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
- if (ret)
- goto err_disable_clks;
-
- value |= MII_XGMAC_SADDR;
- }
-
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
value |= MII_XGMAC_READ;
@@ -144,14 +123,44 @@ err_disable_clks:
return ret;
}
-static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
- int phyreg, u16 phydata)
+static int stmmac_xgmac2_mdio_read_c22(struct mii_bus *bus, int phyaddr,
+ int phyreg)
{
struct net_device *ndev = bus->priv;
- struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stmmac_priv *priv;
+ u32 addr;
+
+ priv = netdev_priv(ndev);
+
+ /* HW does not support C22 addr >= 4 */
+ if (phyaddr > MII_XGMAC_MAX_C22ADDR)
+ return -ENODEV;
+
+ stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+
+ return stmmac_xgmac2_mdio_read(priv, addr, MII_XGMAC_BUSY);
+}
+
+static int stmmac_xgmac2_mdio_read_c45(struct mii_bus *bus, int phyaddr,
+ int devad, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv;
+ u32 addr;
+
+ priv = netdev_priv(ndev);
+
+ stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
+
+ return stmmac_xgmac2_mdio_read(priv, addr, MII_XGMAC_BUSY);
+}
+
+static int stmmac_xgmac2_mdio_write(struct stmmac_priv *priv, u32 addr,
+ u32 value, u16 phydata)
+{
unsigned int mii_address = priv->hw->mii.addr;
unsigned int mii_data = priv->hw->mii.data;
- u32 addr, tmp, value = MII_XGMAC_BUSY;
+ u32 tmp;
int ret;
ret = pm_runtime_resume_and_get(priv->device);
@@ -165,20 +174,6 @@ static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
goto err_disable_clks;
}
- if (phyreg & MII_ADDR_C45) {
- phyreg &= ~MII_ADDR_C45;
-
- ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
- if (ret)
- goto err_disable_clks;
- } else {
- ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
- if (ret)
- goto err_disable_clks;
-
- value |= MII_XGMAC_SADDR;
- }
-
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
value |= phydata;
@@ -205,8 +200,63 @@ err_disable_clks:
return ret;
}
+static int stmmac_xgmac2_mdio_write_c22(struct mii_bus *bus, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv;
+ u32 addr;
+
+ priv = netdev_priv(ndev);
+
+ /* HW does not support C22 addr >= 4 */
+ if (phyaddr > MII_XGMAC_MAX_C22ADDR)
+ return -ENODEV;
+
+ stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+
+ return stmmac_xgmac2_mdio_write(priv, addr,
+ MII_XGMAC_BUSY | MII_XGMAC_SADDR, phydata);
+}
+
+static int stmmac_xgmac2_mdio_write_c45(struct mii_bus *bus, int phyaddr,
+ int devad, int phyreg, u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv;
+ u32 addr;
+
+ priv = netdev_priv(ndev);
+
+ stmmac_xgmac2_c45_format(priv, phyaddr, devad, phyreg, &addr);
+
+ return stmmac_xgmac2_mdio_write(priv, addr, MII_XGMAC_BUSY,
+ phydata);
+}
+
+static int stmmac_mdio_read(struct stmmac_priv *priv, int data, u32 value)
+{
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 v;
+
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000))
+ return -EBUSY;
+
+ writel(data, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
+
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000))
+ return -EBUSY;
+
+ /* Read the data from the MII data register */
+ return readl(priv->ioaddr + mii_data) & MII_DATA_MASK;
+}
+
/**
- * stmmac_mdio_read
+ * stmmac_mdio_read_c22
* @bus: points to the mii_bus structure
* @phyaddr: MII addr
* @phyreg: MII reg
@@ -215,15 +265,12 @@ err_disable_clks:
* accessing the PHY registers.
* Fortunately, it seems this has no drawback for the 7109 MAC.
*/
-static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+static int stmmac_mdio_read_c22(struct mii_bus *bus, int phyaddr, int phyreg)
{
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
u32 value = MII_BUSY;
int data = 0;
- u32 v;
data = pm_runtime_resume_and_get(priv->device);
if (data < 0)
@@ -236,60 +283,94 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
& priv->hw->mii.clk_csr_mask;
if (priv->plat->has_gmac4) {
value |= MII_GMAC4_READ;
- if (phyreg & MII_ADDR_C45) {
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) <<
- priv->hw->mii.reg_shift) &
- priv->hw->mii.reg_mask;
-
- data |= (phyreg & MII_REGADDR_C45_MASK) <<
- MII_GMAC4_REG_ADDR_SHIFT;
- }
}
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000)) {
- data = -EBUSY;
- goto err_disable_clks;
- }
+ data = stmmac_mdio_read(priv, data, value);
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
+ pm_runtime_put(priv->device);
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000)) {
- data = -EBUSY;
- goto err_disable_clks;
+ return data;
+}
+
+/**
+ * stmmac_mdio_read_c45
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr
+ * @devad: device address to read
+ * @phyreg: MII reg
+ * Description: it reads data from the MII register from within the phy device.
+ * For the 7111 GMAC, we must set the bit 0 in the MII address register while
+ * accessing the PHY registers.
+ * Fortunately, it seems this has no drawback for the 7109 MAC.
+ */
+static int stmmac_mdio_read_c45(struct mii_bus *bus, int phyaddr, int devad,
+ int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 value = MII_BUSY;
+ int data = 0;
+
+ data = pm_runtime_get_sync(priv->device);
+ if (data < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return data;
}
- /* Read the data from the MII data register */
- data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK;
+ value |= (phyaddr << priv->hw->mii.addr_shift)
+ & priv->hw->mii.addr_mask;
+ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ value |= MII_GMAC4_READ;
+ value |= MII_GMAC4_C45E;
+ value &= ~priv->hw->mii.reg_mask;
+ value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+
+ data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
+
+ data = stmmac_mdio_read(priv, data, value);
-err_disable_clks:
pm_runtime_put(priv->device);
return data;
}
+static int stmmac_mdio_write(struct stmmac_priv *priv, int data, u32 value)
+{
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 v;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000))
+ return -EBUSY;
+
+ /* Set the MII address register to write */
+ writel(data, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
+
+ /* Wait until any existing MII operation is complete */
+ return readl_poll_timeout(priv->ioaddr + mii_address, v,
+ !(v & MII_BUSY), 100, 10000);
+}
+
/**
- * stmmac_mdio_write
+ * stmmac_mdio_write_c22
* @bus: points to the mii_bus structure
* @phyaddr: MII addr
* @phyreg: MII reg
* @phydata: phy data
* Description: it writes the data into the MII register from within the device.
*/
-static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
- u16 phydata)
+static int stmmac_mdio_write_c22(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
{
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
- unsigned int mii_address = priv->hw->mii.addr;
- unsigned int mii_data = priv->hw->mii.data;
int ret, data = phydata;
u32 value = MII_BUSY;
- u32 v;
ret = pm_runtime_resume_and_get(priv->device);
if (ret < 0)
@@ -301,38 +382,57 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
& priv->hw->mii.clk_csr_mask;
- if (priv->plat->has_gmac4) {
+ if (priv->plat->has_gmac4)
value |= MII_GMAC4_WRITE;
- if (phyreg & MII_ADDR_C45) {
- value |= MII_GMAC4_C45E;
- value &= ~priv->hw->mii.reg_mask;
- value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) <<
- priv->hw->mii.reg_shift) &
- priv->hw->mii.reg_mask;
-
- data |= (phyreg & MII_REGADDR_C45_MASK) <<
- MII_GMAC4_REG_ADDR_SHIFT;
- }
- } else {
+ else
value |= MII_WRITE;
- }
- /* Wait until any existing MII operation is complete */
- if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000)) {
- ret = -EBUSY;
- goto err_disable_clks;
+ ret = stmmac_mdio_write(priv, data, value);
+
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+/**
+ * stmmac_mdio_write_c45
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr
+ * @phyreg: MII reg
+ * @devad: device address to read
+ * @phydata: phy data
+ * Description: it writes the data into the MII register from within the device.
+ */
+static int stmmac_mdio_write_c45(struct mii_bus *bus, int phyaddr,
+ int devad, int phyreg, u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret, data = phydata;
+ u32 value = MII_BUSY;
+
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
}
- /* Set the MII address register to write */
- writel(data, priv->ioaddr + mii_data);
- writel(value, priv->ioaddr + mii_address);
+ value |= (phyaddr << priv->hw->mii.addr_shift)
+ & priv->hw->mii.addr_mask;
+ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
- /* Wait until any existing MII operation is complete */
- ret = readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
- 100, 10000);
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+
+ value |= MII_GMAC4_WRITE;
+ value |= MII_GMAC4_C45E;
+ value &= ~priv->hw->mii.reg_mask;
+ value |= (devad << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+
+ data |= phyreg << MII_GMAC4_REG_ADDR_SHIFT;
+
+ ret = stmmac_mdio_write(priv, data, value);
-err_disable_clks:
pm_runtime_put(priv->device);
return ret;
@@ -453,12 +553,11 @@ int stmmac_mdio_register(struct net_device *ndev)
new_bus->name = "stmmac";
- if (priv->plat->has_gmac4)
- new_bus->probe_capabilities = MDIOBUS_C22_C45;
-
if (priv->plat->has_xgmac) {
- new_bus->read = &stmmac_xgmac2_mdio_read;
- new_bus->write = &stmmac_xgmac2_mdio_write;
+ new_bus->read = &stmmac_xgmac2_mdio_read_c22;
+ new_bus->write = &stmmac_xgmac2_mdio_write_c22;
+ new_bus->read_c45 = &stmmac_xgmac2_mdio_read_c45;
+ new_bus->write_c45 = &stmmac_xgmac2_mdio_write_c45;
/* Right now only C22 phys are supported */
max_addr = MII_XGMAC_MAX_C22ADDR + 1;
@@ -468,8 +567,13 @@ int stmmac_mdio_register(struct net_device *ndev)
dev_err(dev, "Unsupported phy_addr (max=%d)\n",
MII_XGMAC_MAX_C22ADDR);
} else {
- new_bus->read = &stmmac_mdio_read;
- new_bus->write = &stmmac_mdio_write;
+ new_bus->read = &stmmac_mdio_read_c22;
+ new_bus->write = &stmmac_mdio_write_c22;
+ if (priv->plat->has_gmac4) {
+ new_bus->read_c45 = &stmmac_mdio_read_c45;
+ new_bus->write_c45 = &stmmac_mdio_write_c45;
+ }
+
max_addr = PHY_MAX_ADDR;
}
@@ -490,7 +594,7 @@ int stmmac_mdio_register(struct net_device *ndev)
/* Looks like we need a dummy read for XGMAC only and C45 PHYs */
if (priv->plat->has_xgmac)
- stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45);
+ stmmac_xgmac2_mdio_read_c45(new_bus, 0, 0, 0);
/* If fixed-link is set, skip PHY scanning */
if (!fwnode)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index eb6d9cd8e93f..067a40fe0a23 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -559,7 +559,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
- if (plat->force_thresh_dma_mode) {
+ if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) {
plat->force_sf_dma_mode = 0;
dev_warn(&pdev->dev,
"force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n");
@@ -711,14 +711,15 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
struct plat_stmmacenet_data *plat = priv->plat;
- int ret = stmmac_dvr_remove(&pdev->dev);
+
+ stmmac_dvr_remove(&pdev->dev);
if (plat->exit)
plat->exit(pdev, plat->bsp_priv);
stmmac_remove_config_dt(pdev, plat);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 2cfb18cef1d4..9d55226479b4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -1107,6 +1107,25 @@ static int tc_setup_etf(struct stmmac_priv *priv,
return 0;
}
+static int tc_query_caps(struct stmmac_priv *priv,
+ struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!priv->dma_cap.estsel)
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
const struct stmmac_tc_ops dwmac510_tc_ops = {
.init = tc_init,
.setup_cls_u32 = tc_setup_cls_u32,
@@ -1114,4 +1133,5 @@ const struct stmmac_tc_ops dwmac510_tc_ops = {
.setup_cls = tc_setup_cls,
.setup_taprio = tc_setup_taprio,
.setup_etf = tc_setup_etf,
+ .query_caps = tc_query_caps,
};
diff --git a/drivers/net/ethernet/sunplus/spl2sw_mdio.c b/drivers/net/ethernet/sunplus/spl2sw_mdio.c
index 733ae1704269..c8ef17e34f3c 100644
--- a/drivers/net/ethernet/sunplus/spl2sw_mdio.c
+++ b/drivers/net/ethernet/sunplus/spl2sw_mdio.c
@@ -61,9 +61,6 @@ static int spl2sw_mii_read(struct mii_bus *bus, int addr, int regnum)
{
struct spl2sw_common *comm = bus->priv;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
return spl2sw_mdio_access(comm, SPL2SW_MDIO_READ_CMD, addr, regnum, 0);
}
@@ -72,9 +69,6 @@ static int spl2sw_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
struct spl2sw_common *comm = bus->priv;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = spl2sw_mdio_access(comm, SPL2SW_MDIO_WRITE_CMD, addr, regnum, val);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index ecbde83b5243..4e3861c47708 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -501,7 +501,15 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
}
+ reinit_completion(&common->tdown_complete);
k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
+
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
+ i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
+ if (!i)
+ dev_err(common->dev, "rx teardown timeout\n");
+ }
+
napi_disable(&common->napi_rx);
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
@@ -721,6 +729,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
if (cppi5_desc_is_tdcm(desc_dma)) {
dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
+ complete(&common->tdown_complete);
return 0;
}
@@ -1416,6 +1426,70 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
};
+static void am65_cpsw_disable_phy(struct phy *phy)
+{
+ phy_power_off(phy);
+ phy_exit(phy);
+}
+
+static int am65_cpsw_enable_phy(struct phy *phy)
+{
+ int ret;
+
+ ret = phy_init(phy);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_power_on(phy);
+ if (ret < 0) {
+ phy_exit(phy);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void am65_cpsw_disable_serdes_phy(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ struct phy *phy;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ phy = port->slave.serdes_phy;
+ if (phy)
+ am65_cpsw_disable_phy(phy);
+ }
+}
+
+static int am65_cpsw_init_serdes_phy(struct device *dev, struct device_node *port_np,
+ struct am65_cpsw_port *port)
+{
+ const char *name = "serdes-phy";
+ struct phy *phy;
+ int ret;
+
+ phy = devm_of_phy_get(dev, port_np, name);
+ if (PTR_ERR(phy) == -ENODEV)
+ return 0;
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ /* Serdes PHY exists. Store it. */
+ port->slave.serdes_phy = phy;
+
+ ret = am65_cpsw_enable_phy(phy);
+ if (ret < 0)
+ goto err_phy;
+
+ return 0;
+
+err_phy:
+ devm_phy_put(dev, phy);
+ return ret;
+}
+
static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -1873,11 +1947,6 @@ static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
int ret = PTR_ERR(cpts);
of_node_put(node);
- if (ret == -EOPNOTSUPP) {
- dev_info(dev, "cpts disabled\n");
- return 0;
- }
-
dev_err(dev, "cpts create err %d\n", ret);
return ret;
}
@@ -1959,6 +2028,11 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
goto of_node_put;
}
+ /* Initialize the Serdes PHY for the port */
+ ret = am65_cpsw_init_serdes_phy(dev, port_np, port);
+ if (ret)
+ return ret;
+
port->slave.mac_only =
of_property_read_bool(port_np, "ti,mac-only");
@@ -2672,7 +2746,7 @@ static const struct am65_cpsw_pdata j721e_pdata = {
};
static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
- .quirks = 0,
+ .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
};
@@ -2684,11 +2758,19 @@ static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
.extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
};
+static const struct am65_cpsw_pdata j721e_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+};
+
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
{ .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
{ .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
{ .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
{ .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
+ { .compatible = "ti,j721e-cpswxg-nuss", .data = &j721e_cpswxg_pdata},
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
@@ -2842,6 +2924,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
err_free_phylink:
am65_cpsw_nuss_phylink_cleanup(common);
+ am65_cpts_release(common->cpts);
err_of_clear:
of_platform_device_destroy(common->mdio_dev, NULL);
err_pm_clear:
@@ -2870,6 +2953,8 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev)
*/
am65_cpsw_nuss_cleanup_ndev(common);
am65_cpsw_nuss_phylink_cleanup(common);
+ am65_cpts_release(common->cpts);
+ am65_cpsw_disable_serdes_phy(common);
of_platform_device_destroy(common->mdio_dev, NULL);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 4b75620f8d28..cad04662739c 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -32,6 +32,7 @@ struct am65_cpsw_slave_data {
struct device_node *phy_node;
phy_interface_t phy_if;
struct phy *ifphy;
+ struct phy *serdes_phy;
bool rx_pause;
bool tx_pause;
u8 mac_addr[ETH_ALEN];
@@ -90,6 +91,7 @@ struct am65_cpsw_rx_chn {
};
#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
+#define AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ BIT(1)
struct am65_cpsw_pdata {
u32 quirks;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index e162771893af..8dc2c3085dcf 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -585,6 +585,26 @@ static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
return am65_cpsw_set_taprio(ndev, type_data);
}
+static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data)
+{
+ struct tc_query_caps_base *base = type_data;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return -EOPNOTSUPP;
+
+ caps->gate_mask_per_txq = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
struct netlink_ext_ack *extack,
struct flow_cls_offload *cls,
@@ -765,6 +785,8 @@ int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
+ case TC_QUERY_CAPS:
+ return am65_cpsw_tc_query_caps(ndev, type_data);
case TC_SETUP_QDISC_TAPRIO:
return am65_cpsw_setup_taprio(ndev, type_data);
case TC_SETUP_BLOCK:
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index 9535396b28cd..16ee9c29cb35 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -176,6 +176,10 @@ struct am65_cpts {
u32 genf_enable;
u32 hw_ts_enable;
struct sk_buff_head txq;
+ bool pps_enabled;
+ bool pps_present;
+ u32 pps_hw_ts_idx;
+ u32 pps_genf_idx;
/* context save/restore */
u64 sr_cpts_ns;
u64 sr_ktime_ns;
@@ -319,8 +323,15 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts)
case AM65_CPTS_EV_HW:
pevent.index = am65_cpts_event_get_port(event) - 1;
pevent.timestamp = event->timestamp;
- pevent.type = PTP_CLOCK_EXTTS;
- dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
+ if (cpts->pps_enabled && pevent.index == cpts->pps_hw_ts_idx) {
+ pevent.type = PTP_CLOCK_PPSUSR;
+ pevent.pps_times.ts_real = ns_to_timespec64(pevent.timestamp);
+ } else {
+ pevent.type = PTP_CLOCK_EXTTS;
+ }
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_HW:%s p:%d t:%llu\n",
+ pevent.type == PTP_CLOCK_EXTTS ?
+ "extts" : "pps",
pevent.index, event->timestamp);
ptp_clock_event(cpts->ptp_clock, &pevent);
@@ -394,10 +405,13 @@ static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
{
struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u32 pps_ctrl_val = 0, pps_ppm_hi = 0, pps_ppm_low = 0;
s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
+ int pps_index = cpts->pps_genf_idx;
+ u64 adj_period, pps_adj_period;
+ u32 ctrl_val, ppm_hi, ppm_low;
+ unsigned long flags;
int neg_adj = 0;
- u64 adj_period;
- u32 val;
if (ppb < 0) {
neg_adj = 1;
@@ -417,17 +431,53 @@ static int am65_cpts_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
mutex_lock(&cpts->ptp_clk_lock);
- val = am65_cpts_read32(cpts, control);
+ ctrl_val = am65_cpts_read32(cpts, control);
if (neg_adj)
- val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
+ ctrl_val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
else
- val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
- am65_cpts_write32(cpts, val, control);
+ ctrl_val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
+
+ ppm_hi = upper_32_bits(adj_period) & 0x3FF;
+ ppm_low = lower_32_bits(adj_period);
+
+ if (cpts->pps_enabled) {
+ pps_ctrl_val = am65_cpts_read32(cpts, genf[pps_index].control);
+ if (neg_adj)
+ pps_ctrl_val &= ~BIT(1);
+ else
+ pps_ctrl_val |= BIT(1);
+
+ /* GenF PPM will do correction using cpts refclk tick which is
+ * (cpts->ts_add_val + 1) ns, so GenF length PPM adj period
+ * need to be corrected.
+ */
+ pps_adj_period = adj_period * (cpts->ts_add_val + 1);
+ pps_ppm_hi = upper_32_bits(pps_adj_period) & 0x3FF;
+ pps_ppm_low = lower_32_bits(pps_adj_period);
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
+
+ /* All below writes must be done extremely fast:
+ * - delay between PPM dir and PPM value changes can cause err due old
+ * PPM correction applied in wrong direction
+ * - delay between CPTS-clock PPM cfg and GenF PPM cfg can cause err
+ * due CPTS-clock PPM working with new cfg while GenF PPM cfg still
+ * with old for short period of time
+ */
+
+ am65_cpts_write32(cpts, ctrl_val, control);
+ am65_cpts_write32(cpts, ppm_hi, ts_ppm_hi);
+ am65_cpts_write32(cpts, ppm_low, ts_ppm_low);
+
+ if (cpts->pps_enabled) {
+ am65_cpts_write32(cpts, pps_ctrl_val, genf[pps_index].control);
+ am65_cpts_write32(cpts, pps_ppm_hi, genf[pps_index].ppm_hi);
+ am65_cpts_write32(cpts, pps_ppm_low, genf[pps_index].ppm_low);
+ }
- val = upper_32_bits(adj_period) & 0x3FF;
- am65_cpts_write32(cpts, val, ts_ppm_hi);
- val = lower_32_bits(adj_period);
- am65_cpts_write32(cpts, val, ts_ppm_low);
+ /* All GenF/EstF can be updated here the same way */
+ spin_unlock_irqrestore(&cpts->lock, flags);
mutex_unlock(&cpts->ptp_clk_lock);
@@ -507,7 +557,13 @@ static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
{
- if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
+ if (index >= cpts->ptp_info.n_ext_ts)
+ return -ENXIO;
+
+ if (cpts->pps_present && index == cpts->pps_hw_ts_idx)
+ return -EINVAL;
+
+ if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
return 0;
mutex_lock(&cpts->ptp_clk_lock);
@@ -591,6 +647,12 @@ static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
static int am65_cpts_perout_enable(struct am65_cpts *cpts,
struct ptp_perout_request *req, int on)
{
+ if (req->index >= cpts->ptp_info.n_per_out)
+ return -ENXIO;
+
+ if (cpts->pps_present && req->index == cpts->pps_genf_idx)
+ return -EINVAL;
+
if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
return 0;
@@ -604,6 +666,48 @@ static int am65_cpts_perout_enable(struct am65_cpts *cpts,
return 0;
}
+static int am65_cpts_pps_enable(struct am65_cpts *cpts, int on)
+{
+ int ret = 0;
+ struct timespec64 ts;
+ struct ptp_clock_request rq;
+ u64 ns;
+
+ if (!cpts->pps_present)
+ return -EINVAL;
+
+ if (cpts->pps_enabled == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+
+ if (on) {
+ am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on);
+
+ ns = am65_cpts_gettime(cpts, NULL);
+ ts = ns_to_timespec64(ns);
+ rq.perout.period.sec = 1;
+ rq.perout.period.nsec = 0;
+ rq.perout.start.sec = ts.tv_sec + 2;
+ rq.perout.start.nsec = 0;
+ rq.perout.index = cpts->pps_genf_idx;
+
+ am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
+ cpts->pps_enabled = true;
+ } else {
+ rq.perout.index = cpts->pps_genf_idx;
+ am65_cpts_perout_enable_hw(cpts, &rq.perout, on);
+ am65_cpts_extts_enable_hw(cpts, cpts->pps_hw_ts_idx, on);
+ cpts->pps_enabled = false;
+ }
+
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: pps: %s\n",
+ __func__, on ? "enabled" : "disabled");
+ return ret;
+}
+
static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
@@ -614,6 +718,8 @@ static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
return am65_cpts_extts_enable(cpts, rq->extts.index, on);
case PTP_CLK_REQ_PEROUT:
return am65_cpts_perout_enable(cpts, &rq->perout, on);
+ case PTP_CLK_REQ_PPS:
+ return am65_cpts_pps_enable(cpts, on);
default:
break;
}
@@ -926,17 +1032,33 @@ static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
cpts->genf_num = prop[0];
+ if (!of_property_read_u32_array(node, "ti,pps", prop, 2)) {
+ cpts->pps_present = true;
+
+ if (prop[0] > 7) {
+ dev_err(cpts->dev, "invalid HWx_TS_PUSH index: %u provided\n", prop[0]);
+ cpts->pps_present = false;
+ }
+ if (prop[1] > 1) {
+ dev_err(cpts->dev, "invalid GENFy index: %u provided\n", prop[1]);
+ cpts->pps_present = false;
+ }
+ if (cpts->pps_present) {
+ cpts->pps_hw_ts_idx = prop[0];
+ cpts->pps_genf_idx = prop[1];
+ }
+ }
+
return cpts_of_mux_clk_setup(cpts, node);
}
-static void am65_cpts_release(void *data)
+void am65_cpts_release(struct am65_cpts *cpts)
{
- struct am65_cpts *cpts = data;
-
ptp_clock_unregister(cpts->ptp_clock);
am65_cpts_disable(cpts);
clk_disable_unprepare(cpts->refclk);
}
+EXPORT_SYMBOL_GPL(am65_cpts_release);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node)
@@ -993,6 +1115,8 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
if (cpts->genf_num)
cpts->ptp_info.n_per_out = cpts->genf_num;
+ if (cpts->pps_present)
+ cpts->ptp_info.pps = 1;
am65_cpts_set_add_val(cpts);
@@ -1014,26 +1138,22 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
}
cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
- ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
- if (ret) {
- dev_err(dev, "failed to add ptpclk reset action %d", ret);
- return ERR_PTR(ret);
- }
-
ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
am65_cpts_interrupt,
IRQF_ONESHOT, dev_name(dev), cpts);
if (ret < 0) {
dev_err(cpts->dev, "error attaching irq %d\n", ret);
- return ERR_PTR(ret);
+ goto reset_ptpclk;
}
- dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
+ dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u pps:%d\n",
am65_cpts_read32(cpts, idver),
- cpts->refclk_freq, cpts->ts_add_val);
+ cpts->refclk_freq, cpts->ts_add_val, cpts->pps_present);
return cpts;
+reset_ptpclk:
+ am65_cpts_release(cpts);
refclk_disable:
clk_disable_unprepare(cpts->refclk);
return ERR_PTR(ret);
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
index bd08f4b2edd2..6e14df0be113 100644
--- a/drivers/net/ethernet/ti/am65-cpts.h
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -18,6 +18,7 @@ struct am65_cpts_estf_cfg {
};
#if IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)
+void am65_cpts_release(struct am65_cpts *cpts);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
int am65_cpts_phc_index(struct am65_cpts *cpts);
@@ -31,6 +32,10 @@ void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
void am65_cpts_suspend(struct am65_cpts *cpts);
void am65_cpts_resume(struct am65_cpts *cpts);
#else
+static inline void am65_cpts_release(struct am65_cpts *cpts)
+{
+}
+
static inline struct am65_cpts *am65_cpts_create(struct device *dev,
void __iomem *regs,
struct device_node *node)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 13c9c2d6b79b..37f0b62ec5d6 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1458,6 +1458,8 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
priv_sl2->emac_port = 1;
cpsw->slaves[1].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
@@ -1635,6 +1637,8 @@ static int cpsw_probe(struct platform_device *pdev)
cpsw->slaves[0].ndev = ndev;
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 83596ec0c7cb..35128dd45ffc 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1405,6 +1405,10 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
+ ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops;
SET_NETDEV_DEV(ndev, dev);
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 758295c898ac..e966dd47e2db 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -20,6 +20,7 @@
#include <linux/skbuff.h>
#include <net/page_pool.h>
#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
#include "cpsw.h"
#include "cpts.h"
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 946b9753ccfb..23169e36a3d4 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -225,7 +225,7 @@ static int davinci_get_mdio_data(struct mdiobb_ctrl *ctrl)
return test_bit(MDIO_PIN, &reg);
}
-static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+static int davinci_mdiobb_read_c22(struct mii_bus *bus, int phy, int reg)
{
int ret;
@@ -233,7 +233,7 @@ static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
if (ret < 0)
return ret;
- ret = mdiobb_read(bus, phy, reg);
+ ret = mdiobb_read_c22(bus, phy, reg);
pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
@@ -241,8 +241,8 @@ static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
return ret;
}
-static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg,
- u16 val)
+static int davinci_mdiobb_write_c22(struct mii_bus *bus, int phy, int reg,
+ u16 val)
{
int ret;
@@ -250,7 +250,41 @@ static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg,
if (ret < 0)
return ret;
- ret = mdiobb_write(bus, phy, reg, val);
+ ret = mdiobb_write_c22(bus, phy, reg, val);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdiobb_read_c45(struct mii_bus *bus, int phy, int devad,
+ int reg)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_read_c45(bus, phy, devad, reg);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdiobb_write_c45(struct mii_bus *bus, int phy, int devad,
+ int reg, u16 val)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_write_c45(bus, phy, devad, reg, val);
pm_runtime_mark_last_busy(bus->parent);
pm_runtime_put_autosuspend(bus->parent);
@@ -573,8 +607,10 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->bus->name = dev_name(dev);
if (data->manual_mode) {
- data->bus->read = davinci_mdiobb_read;
- data->bus->write = davinci_mdiobb_write;
+ data->bus->read = davinci_mdiobb_read_c22;
+ data->bus->write = davinci_mdiobb_write_c22;
+ data->bus->read_c45 = davinci_mdiobb_read_c45;
+ data->bus->write_c45 = davinci_mdiobb_write_c45;
data->bus->reset = davinci_mdiobb_reset;
dev_info(dev, "Configuring MDIO in manual mode\n");
diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig
index 86310588c6c1..c9d88673d306 100644
--- a/drivers/net/ethernet/wangxun/Kconfig
+++ b/drivers/net/ethernet/wangxun/Kconfig
@@ -18,6 +18,7 @@ if NET_VENDOR_WANGXUN
config LIBWX
tristate
+ select PAGE_POOL
help
Common library for Wangxun(R) Ethernet drivers.
@@ -25,6 +26,7 @@ config NGBE
tristate "Wangxun(R) GbE PCI Express adapters support"
depends on PCI
select LIBWX
+ select PHYLIB
help
This driver supports Wangxun(R) GbE PCI Express family of
adapters.
diff --git a/drivers/net/ethernet/wangxun/libwx/Makefile b/drivers/net/ethernet/wangxun/libwx/Makefile
index 1ed5e23af944..42ccd6e4052e 100644
--- a/drivers/net/ethernet/wangxun/libwx/Makefile
+++ b/drivers/net/ethernet/wangxun/libwx/Makefile
@@ -4,4 +4,4 @@
obj-$(CONFIG_LIBWX) += libwx.o
-libwx-objs := wx_hw.o
+libwx-objs := wx_hw.o wx_lib.o wx_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
new file mode 100644
index 000000000000..93cb6f2294e7
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/pci.h>
+#include <linux/phy.h>
+
+#include "wx_type.h"
+#include "wx_ethtool.h"
+
+void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ strscpy(info->driver, wx->driver_name, sizeof(info->driver));
+ strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info));
+}
+EXPORT_SYMBOL(wx_get_drvinfo);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
new file mode 100644
index 000000000000..e85538c69454
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _WX_ETHTOOL_H_
+#define _WX_ETHTOOL_H_
+
+void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info);
+#endif /* _WX_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index c57dc3238b3f..7db57f934a91 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -2,59 +2,100 @@
/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
#include <linux/if_ether.h>
#include <linux/iopoll.h>
#include <linux/pci.h>
#include "wx_type.h"
+#include "wx_lib.h"
#include "wx_hw.h"
-static void wx_intr_disable(struct wx_hw *wxhw, u64 qmask)
+static void wx_intr_disable(struct wx *wx, u64 qmask)
{
u32 mask;
- mask = (qmask & 0xFFFFFFFF);
+ mask = (qmask & U32_MAX);
if (mask)
- wr32(wxhw, WX_PX_IMS(0), mask);
+ wr32(wx, WX_PX_IMS(0), mask);
- if (wxhw->mac.type == wx_mac_sp) {
+ if (wx->mac.type == wx_mac_sp) {
mask = (qmask >> 32);
if (mask)
- wr32(wxhw, WX_PX_IMS(1), mask);
+ wr32(wx, WX_PX_IMS(1), mask);
}
}
+void wx_intr_enable(struct wx *wx, u64 qmask)
+{
+ u32 mask;
+
+ mask = (qmask & U32_MAX);
+ if (mask)
+ wr32(wx, WX_PX_IMC(0), mask);
+ if (wx->mac.type == wx_mac_sp) {
+ mask = (qmask >> 32);
+ if (mask)
+ wr32(wx, WX_PX_IMC(1), mask);
+ }
+}
+EXPORT_SYMBOL(wx_intr_enable);
+
+/**
+ * wx_irq_disable - Mask off interrupt generation on the NIC
+ * @wx: board private structure
+ **/
+void wx_irq_disable(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ wr32(wx, WX_PX_MISC_IEN, 0);
+ wx_intr_disable(wx, WX_INTR_ALL);
+
+ if (pdev->msix_enabled) {
+ int vector;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++)
+ synchronize_irq(wx->msix_entries[vector].vector);
+
+ synchronize_irq(wx->msix_entries[vector].vector);
+ } else {
+ synchronize_irq(pdev->irq);
+ }
+}
+EXPORT_SYMBOL(wx_irq_disable);
+
/* cmd_addr is used for some special command:
* 1. to be sector address, when implemented erase sector command
* 2. to be flash address when implemented read, write flash address
*/
-static int wx_fmgr_cmd_op(struct wx_hw *wxhw, u32 cmd, u32 cmd_addr)
+static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr)
{
u32 cmd_val = 0, val = 0;
cmd_val = WX_SPI_CMD_CMD(cmd) |
WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) |
cmd_addr;
- wr32(wxhw, WX_SPI_CMD, cmd_val);
+ wr32(wx, WX_SPI_CMD, cmd_val);
return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000,
- false, wxhw, WX_SPI_STATUS);
+ false, wx, WX_SPI_STATUS);
}
-static int wx_flash_read_dword(struct wx_hw *wxhw, u32 addr, u32 *data)
+static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data)
{
int ret = 0;
- ret = wx_fmgr_cmd_op(wxhw, WX_SPI_CMD_READ_DWORD, addr);
+ ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr);
if (ret < 0)
return ret;
- *data = rd32(wxhw, WX_SPI_DATA);
+ *data = rd32(wx, WX_SPI_DATA);
return ret;
}
-int wx_check_flash_load(struct wx_hw *hw, u32 check_bit)
+int wx_check_flash_load(struct wx *hw, u32 check_bit)
{
u32 reg = 0;
int err = 0;
@@ -73,29 +114,25 @@ int wx_check_flash_load(struct wx_hw *hw, u32 check_bit)
}
EXPORT_SYMBOL(wx_check_flash_load);
-void wx_control_hw(struct wx_hw *wxhw, bool drv)
+void wx_control_hw(struct wx *wx, bool drv)
{
- if (drv) {
- /* Let firmware know the driver has taken over */
- wr32m(wxhw, WX_CFG_PORT_CTL,
- WX_CFG_PORT_CTL_DRV_LOAD, WX_CFG_PORT_CTL_DRV_LOAD);
- } else {
- /* Let firmware take over control of hw */
- wr32m(wxhw, WX_CFG_PORT_CTL,
- WX_CFG_PORT_CTL_DRV_LOAD, 0);
- }
+ /* True : Let firmware know the driver has taken over
+ * False : Let firmware take over control of hw
+ */
+ wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD,
+ drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0);
}
EXPORT_SYMBOL(wx_control_hw);
/**
* wx_mng_present - returns 0 when management capability is present
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*/
-int wx_mng_present(struct wx_hw *wxhw)
+int wx_mng_present(struct wx *wx)
{
u32 fwsm;
- fwsm = rd32(wxhw, WX_MIS_ST);
+ fwsm = rd32(wx, WX_MIS_ST);
if (fwsm & WX_MIS_ST_MNG_INIT_DN)
return 0;
else
@@ -108,40 +145,40 @@ static DEFINE_MUTEX(wx_sw_sync_lock);
/**
* wx_release_sw_sync - Release SW semaphore
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
* Releases the SW semaphore for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-static void wx_release_sw_sync(struct wx_hw *wxhw, u32 mask)
+static void wx_release_sw_sync(struct wx *wx, u32 mask)
{
mutex_lock(&wx_sw_sync_lock);
- wr32m(wxhw, WX_MNG_SWFW_SYNC, mask, 0);
+ wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0);
mutex_unlock(&wx_sw_sync_lock);
}
/**
* wx_acquire_sw_sync - Acquire SW semaphore
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @mask: Mask to specify which semaphore to acquire
*
* Acquires the SW semaphore for the specified
* function (CSR, PHY0, PHY1, EEPROM, Flash)
**/
-static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask)
+static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
{
u32 sem = 0;
int ret = 0;
mutex_lock(&wx_sw_sync_lock);
ret = read_poll_timeout(rd32, sem, !(sem & mask),
- 5000, 2000000, false, wxhw, WX_MNG_SWFW_SYNC);
+ 5000, 2000000, false, wx, WX_MNG_SWFW_SYNC);
if (!ret) {
sem |= mask;
- wr32(wxhw, WX_MNG_SWFW_SYNC, sem);
+ wr32(wx, WX_MNG_SWFW_SYNC, sem);
} else {
- wx_err(wxhw, "SW Semaphore not granted: 0x%x.\n", sem);
+ wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem);
}
mutex_unlock(&wx_sw_sync_lock);
@@ -150,7 +187,7 @@ static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask)
/**
* wx_host_interface_command - Issue command to manageability block
- * @wxhw: pointer to the HW structure
+ * @wx: pointer to the HW structure
* @buffer: contains the command to write and where the return status will
* be placed
* @length: length of buffer, must be multiple of 4 bytes
@@ -162,7 +199,7 @@ static int wx_acquire_sw_sync(struct wx_hw *wxhw, u32 mask)
* So we will leave this up to the caller to read back the data
* in these cases.
**/
-int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
+int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 length, u32 timeout, bool return_data)
{
u32 hdr_size = sizeof(struct wx_hic_hdr);
@@ -172,17 +209,17 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
u16 buf_len;
if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
- wx_err(wxhw, "Buffer length failure buffersize=%d.\n", length);
+ wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
return -EINVAL;
}
- status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB);
+ status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
if (status != 0)
return status;
/* Calculate length in DWORDs. We must be DWORD aligned */
if ((length % (sizeof(u32))) != 0) {
- wx_err(wxhw, "Buffer length failure, not aligned to dword");
+ wx_err(wx, "Buffer length failure, not aligned to dword");
status = -EINVAL;
goto rel_out;
}
@@ -193,38 +230,38 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
* into the ram area.
*/
for (i = 0; i < dword_len; i++) {
- wr32a(wxhw, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
+ wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
/* write flush */
- buf[i] = rd32a(wxhw, WX_MNG_MBOX, i);
+ buf[i] = rd32a(wx, WX_MNG_MBOX, i);
}
/* Setting this bit tells the ARC that a new command is pending. */
- wr32m(wxhw, WX_MNG_MBOX_CTL,
+ wr32m(wx, WX_MNG_MBOX_CTL,
WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY);
status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
- timeout * 1000, false, wxhw, WX_MNG_MBOX_CTL);
+ timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
/* Check command completion */
if (status) {
- wx_dbg(wxhw, "Command has failed with no status valid.\n");
+ wx_dbg(wx, "Command has failed with no status valid.\n");
- buf[0] = rd32(wxhw, WX_MNG_MBOX);
+ buf[0] = rd32(wx, WX_MNG_MBOX);
if ((buffer[0] & 0xff) != (~buf[0] >> 24)) {
status = -EINVAL;
goto rel_out;
}
if ((buf[0] & 0xff0000) >> 16 == 0x80) {
- wx_dbg(wxhw, "It's unknown cmd.\n");
+ wx_dbg(wx, "It's unknown cmd.\n");
status = -EINVAL;
goto rel_out;
}
- wx_dbg(wxhw, "write value:\n");
+ wx_dbg(wx, "write value:\n");
for (i = 0; i < dword_len; i++)
- wx_dbg(wxhw, "%x ", buffer[i]);
- wx_dbg(wxhw, "read value:\n");
+ wx_dbg(wx, "%x ", buffer[i]);
+ wx_dbg(wx, "read value:\n");
for (i = 0; i < dword_len; i++)
- wx_dbg(wxhw, "%x ", buf[i]);
+ wx_dbg(wx, "%x ", buf[i]);
}
if (!return_data)
@@ -235,7 +272,7 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
/* first pull in the header so we know the buffer length */
for (bi = 0; bi < dword_len; bi++) {
- buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi);
+ buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
le32_to_cpus(&buffer[bi]);
}
@@ -245,7 +282,7 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
goto rel_out;
if (length < buf_len + hdr_size) {
- wx_err(wxhw, "Buffer not large enough for reply message.\n");
+ wx_err(wx, "Buffer not large enough for reply message.\n");
status = -EFAULT;
goto rel_out;
}
@@ -255,12 +292,12 @@ int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
/* Pull in the rest of the buffer (bi is where we left off) */
for (; bi <= dword_len; bi++) {
- buffer[bi] = rd32a(wxhw, WX_MNG_MBOX, bi);
+ buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
le32_to_cpus(&buffer[bi]);
}
rel_out:
- wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_MB);
+ wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
return status;
}
EXPORT_SYMBOL(wx_host_interface_command);
@@ -268,13 +305,13 @@ EXPORT_SYMBOL(wx_host_interface_command);
/**
* wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
* assuming that the semaphore is already obtained.
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-static int wx_read_ee_hostif_data(struct wx_hw *wxhw, u16 offset, u16 *data)
+static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
{
struct wx_hic_read_shadow_ram buffer;
int status;
@@ -289,33 +326,33 @@ static int wx_read_ee_hostif_data(struct wx_hw *wxhw, u16 offset, u16 *data)
/* one word */
buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
- status = wx_host_interface_command(wxhw, (u32 *)&buffer, sizeof(buffer),
+ status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
WX_HI_COMMAND_TIMEOUT, false);
if (status != 0)
return status;
- *data = (u16)rd32a(wxhw, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
+ *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
return status;
}
/**
* wx_read_ee_hostif - Read EEPROM word using a host interface cmd
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM using the hostif.
**/
-int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data)
+int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data)
{
int status = 0;
- status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
if (status == 0) {
- status = wx_read_ee_hostif_data(wxhw, offset, data);
- wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ status = wx_read_ee_hostif_data(wx, offset, data);
+ wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
}
return status;
@@ -324,14 +361,14 @@ EXPORT_SYMBOL(wx_read_ee_hostif);
/**
* wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @offset: offset of word in the EEPROM to read
* @words: number of words
* @data: word(s) read from the EEPROM
*
* Reads a 16 bit word(s) from the EEPROM using the hostif.
**/
-int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
+int wx_read_ee_hostif_buffer(struct wx *wx,
u16 offset, u16 words, u16 *data)
{
struct wx_hic_read_shadow_ram buffer;
@@ -342,7 +379,7 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
u32 i;
/* Take semaphore for the entire operation. */
- status = wx_acquire_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
if (status != 0)
return status;
@@ -361,20 +398,20 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2);
buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
- status = wx_host_interface_command(wxhw, (u32 *)&buffer,
+ status = wx_host_interface_command(wx, (u32 *)&buffer,
sizeof(buffer),
WX_HI_COMMAND_TIMEOUT,
false);
if (status != 0) {
- wx_err(wxhw, "Host interface command failed\n");
+ wx_err(wx, "Host interface command failed\n");
goto out;
}
for (i = 0; i < words_to_read; i++) {
u32 reg = WX_MNG_MBOX + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
- value = rd32(wxhw, reg);
+ value = rd32(wx, reg);
data[current_word] = (u16)(value & 0xffff);
current_word++;
i++;
@@ -388,7 +425,7 @@ int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
}
out:
- wx_release_sw_sync(wxhw, WX_MNG_SWFW_SYNC_SW_FLASH);
+ wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
return status;
}
EXPORT_SYMBOL(wx_read_ee_hostif_buffer);
@@ -416,12 +453,12 @@ static u8 wx_calculate_checksum(u8 *buffer, u32 length)
/**
* wx_reset_hostif - send reset cmd to fw
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Sends reset cmd to firmware through the manageability
* block.
**/
-int wx_reset_hostif(struct wx_hw *wxhw)
+int wx_reset_hostif(struct wx *wx)
{
struct wx_hic_reset reset_cmd;
int ret_val = 0;
@@ -430,15 +467,15 @@ int wx_reset_hostif(struct wx_hw *wxhw)
reset_cmd.hdr.cmd = FW_RESET_CMD;
reset_cmd.hdr.buf_len = FW_RESET_LEN;
reset_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
- reset_cmd.lan_id = wxhw->bus.func;
- reset_cmd.reset_type = (u16)wxhw->reset_type;
+ reset_cmd.lan_id = wx->bus.func;
+ reset_cmd.reset_type = (u16)wx->reset_type;
reset_cmd.hdr.checksum = 0;
reset_cmd.hdr.checksum = wx_calculate_checksum((u8 *)&reset_cmd,
(FW_CEM_HDR_LEN +
reset_cmd.hdr.buf_len));
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = wx_host_interface_command(wxhw, (u32 *)&reset_cmd,
+ ret_val = wx_host_interface_command(wx, (u32 *)&reset_cmd,
sizeof(reset_cmd),
WX_HI_COMMAND_TIMEOUT,
true);
@@ -460,14 +497,14 @@ EXPORT_SYMBOL(wx_reset_hostif);
/**
* wx_init_eeprom_params - Initialize EEPROM params
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Initializes the EEPROM parameters wx_eeprom_info within the
* wx_hw struct in order to set up EEPROM access.
**/
-void wx_init_eeprom_params(struct wx_hw *wxhw)
+void wx_init_eeprom_params(struct wx *wx)
{
- struct wx_eeprom_info *eeprom = &wxhw->eeprom;
+ struct wx_eeprom_info *eeprom = &wx->eeprom;
u16 eeprom_size;
u16 data = 0x80;
@@ -475,21 +512,21 @@ void wx_init_eeprom_params(struct wx_hw *wxhw)
eeprom->semaphore_delay = 10;
eeprom->type = wx_eeprom_none;
- if (!(rd32(wxhw, WX_SPI_STATUS) &
+ if (!(rd32(wx, WX_SPI_STATUS) &
WX_SPI_STATUS_FLASH_BYPASS)) {
eeprom->type = wx_flash;
eeprom_size = 4096;
eeprom->word_size = eeprom_size >> 1;
- wx_dbg(wxhw, "Eeprom params: type = %d, size = %d\n",
+ wx_dbg(wx, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
}
}
- if (wxhw->mac.type == wx_mac_sp) {
- if (wx_read_ee_hostif(wxhw, WX_SW_REGION_PTR, &data)) {
- wx_err(wxhw, "NVM Read Error\n");
+ if (wx->mac.type == wx_mac_sp) {
+ if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
+ wx_err(wx, "NVM Read Error\n");
return;
}
data = data >> 1;
@@ -501,22 +538,22 @@ EXPORT_SYMBOL(wx_init_eeprom_params);
/**
* wx_get_mac_addr - Generic get MAC address
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @mac_addr: Adapter MAC address
*
* Reads the adapter's MAC address from first Receive Address Register (RAR0)
* A reset of the adapter must be performed prior to calling this function
* in order for the MAC address to have been loaded from the EEPROM into RAR0
**/
-void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr)
+void wx_get_mac_addr(struct wx *wx, u8 *mac_addr)
{
u32 rar_high;
u32 rar_low;
u16 i;
- wr32(wxhw, WX_PSR_MAC_SWC_IDX, 0);
- rar_high = rd32(wxhw, WX_PSR_MAC_SWC_AD_H);
- rar_low = rd32(wxhw, WX_PSR_MAC_SWC_AD_L);
+ wr32(wx, WX_PSR_MAC_SWC_IDX, 0);
+ rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H);
+ rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L);
for (i = 0; i < 2; i++)
mac_addr[i] = (u8)(rar_high >> (1 - i) * 8);
@@ -528,7 +565,7 @@ EXPORT_SYMBOL(wx_get_mac_addr);
/**
* wx_set_rar - Set Rx address register
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @index: Receive address register to write
* @addr: Address to put into receive address register
* @pools: VMDq "set" or "pool" index
@@ -536,25 +573,25 @@ EXPORT_SYMBOL(wx_get_mac_addr);
*
* Puts an ethernet address into a receive address register.
**/
-int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools,
- u32 enable_addr)
+static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
+ u32 enable_addr)
{
- u32 rar_entries = wxhw->mac.num_rar_entries;
+ u32 rar_entries = wx->mac.num_rar_entries;
u32 rar_low, rar_high;
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
- wx_err(wxhw, "RAR index %d is out of range.\n", index);
+ wx_err(wx, "RAR index %d is out of range.\n", index);
return -EINVAL;
}
/* select the MAC address */
- wr32(wxhw, WX_PSR_MAC_SWC_IDX, index);
+ wr32(wx, WX_PSR_MAC_SWC_IDX, index);
/* setup VMDq pool mapping */
- wr32(wxhw, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
- if (wxhw->mac.type == wx_mac_sp)
- wr32(wxhw, WX_PSR_MAC_SWC_VM_H, pools >> 32);
+ wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
+ if (wx->mac.type == wx_mac_sp)
+ wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
/* HW expects these in little endian so we reverse the byte
* order from network order (big endian) to little endian
@@ -572,31 +609,30 @@ int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools,
if (enable_addr != 0)
rar_high |= WX_PSR_MAC_SWC_AD_H_AV;
- wr32(wxhw, WX_PSR_MAC_SWC_AD_L, rar_low);
- wr32m(wxhw, WX_PSR_MAC_SWC_AD_H,
- (WX_PSR_MAC_SWC_AD_H_AD(~0) |
- WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) |
+ wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low);
+ wr32m(wx, WX_PSR_MAC_SWC_AD_H,
+ (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
+ WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
WX_PSR_MAC_SWC_AD_H_AV),
rar_high);
return 0;
}
-EXPORT_SYMBOL(wx_set_rar);
/**
* wx_clear_rar - Remove Rx address register
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @index: Receive address register to write
*
* Clears an ethernet address from a receive address register.
**/
-int wx_clear_rar(struct wx_hw *wxhw, u32 index)
+static int wx_clear_rar(struct wx *wx, u32 index)
{
- u32 rar_entries = wxhw->mac.num_rar_entries;
+ u32 rar_entries = wx->mac.num_rar_entries;
/* Make sure we are using a valid rar index range */
if (index >= rar_entries) {
- wx_err(wxhw, "RAR index %d is out of range.\n", index);
+ wx_err(wx, "RAR index %d is out of range.\n", index);
return -EINVAL;
}
@@ -604,78 +640,77 @@ int wx_clear_rar(struct wx_hw *wxhw, u32 index)
* so save everything except the lower 16 bits that hold part
* of the address and the address valid bit.
*/
- wr32(wxhw, WX_PSR_MAC_SWC_IDX, index);
+ wr32(wx, WX_PSR_MAC_SWC_IDX, index);
- wr32(wxhw, WX_PSR_MAC_SWC_VM_L, 0);
- wr32(wxhw, WX_PSR_MAC_SWC_VM_H, 0);
+ wr32(wx, WX_PSR_MAC_SWC_VM_L, 0);
+ wr32(wx, WX_PSR_MAC_SWC_VM_H, 0);
- wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0);
- wr32m(wxhw, WX_PSR_MAC_SWC_AD_H,
- (WX_PSR_MAC_SWC_AD_H_AD(~0) |
- WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) |
+ wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
+ wr32m(wx, WX_PSR_MAC_SWC_AD_H,
+ (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
+ WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
WX_PSR_MAC_SWC_AD_H_AV),
0);
return 0;
}
-EXPORT_SYMBOL(wx_clear_rar);
/**
* wx_clear_vmdq - Disassociate a VMDq pool index from a rx address
- * @wxhw: pointer to hardware struct
+ * @wx: pointer to hardware struct
* @rar: receive address register index to disassociate
* @vmdq: VMDq pool index to remove from the rar
**/
-static int wx_clear_vmdq(struct wx_hw *wxhw, u32 rar, u32 __maybe_unused vmdq)
+static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq)
{
- u32 rar_entries = wxhw->mac.num_rar_entries;
+ u32 rar_entries = wx->mac.num_rar_entries;
u32 mpsar_lo, mpsar_hi;
/* Make sure we are using a valid rar index range */
if (rar >= rar_entries) {
- wx_err(wxhw, "RAR index %d is out of range.\n", rar);
+ wx_err(wx, "RAR index %d is out of range.\n", rar);
return -EINVAL;
}
- wr32(wxhw, WX_PSR_MAC_SWC_IDX, rar);
- mpsar_lo = rd32(wxhw, WX_PSR_MAC_SWC_VM_L);
- mpsar_hi = rd32(wxhw, WX_PSR_MAC_SWC_VM_H);
+ wr32(wx, WX_PSR_MAC_SWC_IDX, rar);
+ mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L);
+ mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H);
if (!mpsar_lo && !mpsar_hi)
return 0;
/* was that the last pool using this rar? */
if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
- wx_clear_rar(wxhw, rar);
+ wx_clear_rar(wx, rar);
return 0;
}
/**
* wx_init_uta_tables - Initialize the Unicast Table Array
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
**/
-static void wx_init_uta_tables(struct wx_hw *wxhw)
+static void wx_init_uta_tables(struct wx *wx)
{
int i;
- wx_dbg(wxhw, " Clearing UTA\n");
+ wx_dbg(wx, " Clearing UTA\n");
for (i = 0; i < 128; i++)
- wr32(wxhw, WX_PSR_UC_TBL(i), 0);
+ wr32(wx, WX_PSR_UC_TBL(i), 0);
}
/**
* wx_init_rx_addrs - Initializes receive address filters.
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Places the MAC address in receive address register 0 and clears the rest
* of the receive address registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
**/
-void wx_init_rx_addrs(struct wx_hw *wxhw)
+void wx_init_rx_addrs(struct wx *wx)
{
- u32 rar_entries = wxhw->mac.num_rar_entries;
+ u32 rar_entries = wx->mac.num_rar_entries;
u32 psrctl;
int i;
@@ -683,97 +718,829 @@ void wx_init_rx_addrs(struct wx_hw *wxhw)
* to the permanent address.
* Otherwise, use the permanent address from the eeprom.
*/
- if (!is_valid_ether_addr(wxhw->mac.addr)) {
+ if (!is_valid_ether_addr(wx->mac.addr)) {
/* Get the MAC address from the RAR0 for later reference */
- wx_get_mac_addr(wxhw, wxhw->mac.addr);
- wx_dbg(wxhw, "Keeping Current RAR0 Addr = %pM\n", wxhw->mac.addr);
+ wx_get_mac_addr(wx, wx->mac.addr);
+ wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr);
} else {
/* Setup the receive address. */
- wx_dbg(wxhw, "Overriding MAC Address in RAR[0]\n");
- wx_dbg(wxhw, "New MAC Addr = %pM\n", wxhw->mac.addr);
+ wx_dbg(wx, "Overriding MAC Address in RAR[0]\n");
+ wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr);
- wx_set_rar(wxhw, 0, wxhw->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
+ wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
- if (wxhw->mac.type == wx_mac_sp) {
+ if (wx->mac.type == wx_mac_sp) {
/* clear VMDq pool/queue selection for RAR 0 */
- wx_clear_vmdq(wxhw, 0, WX_CLEAR_VMDQ_ALL);
+ wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
}
}
/* Zero out the other receive addresses. */
- wx_dbg(wxhw, "Clearing RAR[1-%d]\n", rar_entries - 1);
+ wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1);
for (i = 1; i < rar_entries; i++) {
- wr32(wxhw, WX_PSR_MAC_SWC_IDX, i);
- wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0);
- wr32(wxhw, WX_PSR_MAC_SWC_AD_H, 0);
+ wr32(wx, WX_PSR_MAC_SWC_IDX, i);
+ wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
+ wr32(wx, WX_PSR_MAC_SWC_AD_H, 0);
}
/* Clear the MTA */
- wxhw->addr_ctrl.mta_in_use = 0;
- psrctl = rd32(wxhw, WX_PSR_CTL);
+ wx->addr_ctrl.mta_in_use = 0;
+ psrctl = rd32(wx, WX_PSR_CTL);
psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
- psrctl |= wxhw->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT;
- wr32(wxhw, WX_PSR_CTL, psrctl);
- wx_dbg(wxhw, " Clearing MTA\n");
- for (i = 0; i < wxhw->mac.mcft_size; i++)
- wr32(wxhw, WX_PSR_MC_TBL(i), 0);
+ psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT;
+ wr32(wx, WX_PSR_CTL, psrctl);
+ wx_dbg(wx, " Clearing MTA\n");
+ for (i = 0; i < wx->mac.mcft_size; i++)
+ wr32(wx, WX_PSR_MC_TBL(i), 0);
- wx_init_uta_tables(wxhw);
+ wx_init_uta_tables(wx);
}
EXPORT_SYMBOL(wx_init_rx_addrs);
-void wx_disable_rx(struct wx_hw *wxhw)
+static void wx_sync_mac_table(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
+ wx_set_rar(wx, i,
+ wx->mac_table[i].addr,
+ wx->mac_table[i].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+ } else {
+ wx_clear_rar(wx, i);
+ }
+ wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
+ }
+ }
+}
+
+/* this function destroys the first RAR entry */
+void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
+{
+ memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
+ wx->mac_table[0].pools = 1ULL;
+ wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
+ wx_set_rar(wx, 0, wx->mac_table[0].addr,
+ wx->mac_table[0].pools,
+ WX_PSR_MAC_SWC_AD_H_AV);
+}
+EXPORT_SYMBOL(wx_mac_set_default_filter);
+
+void wx_flush_sw_mac_table(struct wx *wx)
+{
+ u32 i;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE))
+ continue;
+
+ wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
+ wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
+ memset(wx->mac_table[i].addr, 0, ETH_ALEN);
+ wx->mac_table[i].pools = 0;
+ }
+ wx_sync_mac_table(wx);
+}
+EXPORT_SYMBOL(wx_flush_sw_mac_table);
+
+static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+{
+ u32 i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
+ if (ether_addr_equal(addr, wx->mac_table[i].addr)) {
+ if (wx->mac_table[i].pools != (1ULL << pool)) {
+ memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
+ wx->mac_table[i].pools |= (1ULL << pool);
+ wx_sync_mac_table(wx);
+ return i;
+ }
+ }
+ }
+
+ if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE)
+ continue;
+ wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED |
+ WX_MAC_STATE_IN_USE);
+ memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
+ wx->mac_table[i].pools |= (1ULL << pool);
+ wx_sync_mac_table(wx);
+ return i;
+ }
+ return -ENOMEM;
+}
+
+static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
+{
+ u32 i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* search table for addr, if found, set to 0 and sync */
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (!ether_addr_equal(addr, wx->mac_table[i].addr))
+ continue;
+
+ wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
+ wx->mac_table[i].pools &= ~(1ULL << pool);
+ if (!wx->mac_table[i].pools) {
+ wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
+ memset(wx->mac_table[i].addr, 0, ETH_ALEN);
+ }
+ wx_sync_mac_table(wx);
+ return 0;
+ }
+ return -ENOMEM;
+}
+
+static int wx_available_rars(struct wx *wx)
+{
+ u32 i, count = 0;
+
+ for (i = 0; i < wx->mac.num_rar_entries; i++) {
+ if (wx->mac_table[i].state == 0)
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * wx_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
+ * @pool: index for mac table
+ *
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
+ **/
+static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int count = 0;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > wx_available_rars(wx))
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, netdev) {
+ wx_del_mac_filter(wx, ha->addr, pool);
+ wx_add_mac_filter(wx, ha->addr, pool);
+ count++;
+ }
+ }
+ return count;
+}
+
+/**
+ * wx_mta_vector - Determines bit-vector in multicast table to set
+ * @wx: pointer to private structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (wx->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ wx_err(wx, "MC filter type param set incorrectly\n");
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * wx_set_mta - Set bit-vector in multicast table
+ * @wx: pointer to private structure
+ * @mc_addr: Multicast address
+ *
+ * Sets the bit-vector in the multicast table.
+ **/
+static void wx_set_mta(struct wx *wx, u8 *mc_addr)
+{
+ u32 vector, vector_bit, vector_reg;
+
+ wx->addr_ctrl.mta_in_use++;
+
+ vector = wx_mta_vector(wx, mc_addr);
+ wx_dbg(wx, " bit-vector = 0x%03X\n", vector);
+
+ /* The MTA is a register array of 128 32-bit registers. It is treated
+ * like an array of 4096 bits. We want to set bit
+ * BitArray[vector_value]. So we figure out what register the bit is
+ * in, read it, OR in the new bit, then write back the new value. The
+ * register is determined by the upper 7 bits of the vector value and
+ * the bit within that register are determined by the lower 5 bits of
+ * the value.
+ */
+ vector_reg = (vector >> 5) & 0x7F;
+ vector_bit = vector & 0x1F;
+ wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+/**
+ * wx_update_mc_addr_list - Updates MAC list of multicast addresses
+ * @wx: pointer to private structure
+ * @netdev: pointer to net device structure
+ *
+ * The given list replaces any existing list. Clears the MC addrs from receive
+ * address registers and the multicast table. Uses unused receive address
+ * registers for the first multicast addresses, and hashes the rest into the
+ * multicast table.
+ **/
+static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
+{
+ struct netdev_hw_addr *ha;
+ u32 i, psrctl;
+
+ /* Set the new number of MC addresses that we are being requested to
+ * use.
+ */
+ wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
+ wx->addr_ctrl.mta_in_use = 0;
+
+ /* Clear mta_shadow */
+ wx_dbg(wx, " Clearing MTA\n");
+ memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow));
+
+ /* Update mta_shadow */
+ netdev_for_each_mc_addr(ha, netdev) {
+ wx_dbg(wx, " Adding the multicast addresses:\n");
+ wx_set_mta(wx, ha->addr);
+ }
+
+ /* Enable mta */
+ for (i = 0; i < wx->mac.mcft_size; i++)
+ wr32a(wx, WX_PSR_MC_TBL(0), i,
+ wx->mac.mta_shadow[i]);
+
+ if (wx->addr_ctrl.mta_in_use > 0) {
+ psrctl = rd32(wx, WX_PSR_CTL);
+ psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
+ psrctl |= WX_PSR_CTL_MFE |
+ (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT);
+ wr32(wx, WX_PSR_CTL, psrctl);
+ }
+
+ wx_dbg(wx, "Update mc addr list Complete\n");
+}
+
+/**
+ * wx_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int wx_write_mc_addr_list(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (!netif_running(netdev))
+ return 0;
+
+ wx_update_mc_addr_list(wx, netdev);
+
+ return netdev_mc_count(netdev);
+}
+
+/**
+ * wx_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int wx_set_mac(struct net_device *netdev, void *p)
+{
+ struct wx *wx = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+ int retval;
+
+ retval = eth_prepare_mac_addr_change(netdev, addr);
+ if (retval)
+ return retval;
+
+ wx_del_mac_filter(wx, wx->mac.addr, 0);
+ eth_hw_addr_set(netdev, addr->sa_data);
+ memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
+
+ wx_mac_set_default_filter(wx, wx->mac.addr);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_set_mac);
+
+void wx_disable_rx(struct wx *wx)
{
u32 pfdtxgswc;
u32 rxctrl;
- rxctrl = rd32(wxhw, WX_RDB_PB_CTL);
+ rxctrl = rd32(wx, WX_RDB_PB_CTL);
if (rxctrl & WX_RDB_PB_CTL_RXEN) {
- pfdtxgswc = rd32(wxhw, WX_PSR_CTL);
+ pfdtxgswc = rd32(wx, WX_PSR_CTL);
if (pfdtxgswc & WX_PSR_CTL_SW_EN) {
pfdtxgswc &= ~WX_PSR_CTL_SW_EN;
- wr32(wxhw, WX_PSR_CTL, pfdtxgswc);
- wxhw->mac.set_lben = true;
+ wr32(wx, WX_PSR_CTL, pfdtxgswc);
+ wx->mac.set_lben = true;
} else {
- wxhw->mac.set_lben = false;
+ wx->mac.set_lben = false;
}
rxctrl &= ~WX_RDB_PB_CTL_RXEN;
- wr32(wxhw, WX_RDB_PB_CTL, rxctrl);
+ wr32(wx, WX_RDB_PB_CTL, rxctrl);
- if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
- ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
+ if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
+ ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
/* disable mac receiver */
- wr32m(wxhw, WX_MAC_RX_CFG,
+ wr32m(wx, WX_MAC_RX_CFG,
WX_MAC_RX_CFG_RE, 0);
}
}
}
EXPORT_SYMBOL(wx_disable_rx);
+static void wx_enable_rx(struct wx *wx)
+{
+ u32 psrctl;
+
+ /* enable mac receiver */
+ wr32m(wx, WX_MAC_RX_CFG,
+ WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
+
+ wr32m(wx, WX_RDB_PB_CTL,
+ WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN);
+
+ if (wx->mac.set_lben) {
+ psrctl = rd32(wx, WX_PSR_CTL);
+ psrctl |= WX_PSR_CTL_SW_EN;
+ wr32(wx, WX_PSR_CTL, psrctl);
+ wx->mac.set_lben = false;
+ }
+}
+
+/**
+ * wx_set_rxpba - Initialize Rx packet buffer
+ * @wx: pointer to private structure
+ **/
+static void wx_set_rxpba(struct wx *wx)
+{
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT;
+ wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
+
+ /* Only support an equally distributed Tx packet buffer strategy. */
+ txpktsize = wx->mac.tx_pb_size;
+ txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX;
+ wr32(wx, WX_TDB_PB_SZ(0), txpktsize);
+ wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
+}
+
+static void wx_configure_port(struct wx *wx)
+{
+ u32 value, i;
+
+ value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
+ wr32m(wx, WX_CFG_PORT_CTL,
+ WX_CFG_PORT_CTL_D_VLAN |
+ WX_CFG_PORT_CTL_QINQ,
+ value);
+
+ wr32(wx, WX_CFG_TAG_TPID(0),
+ ETH_P_8021Q | ETH_P_8021AD << 16);
+ wx->tpid[0] = ETH_P_8021Q;
+ wx->tpid[1] = ETH_P_8021AD;
+ for (i = 1; i < 4; i++)
+ wr32(wx, WX_CFG_TAG_TPID(i),
+ ETH_P_8021Q | ETH_P_8021Q << 16);
+ for (i = 2; i < 8; i++)
+ wx->tpid[i] = ETH_P_8021Q;
+}
+
+/**
+ * wx_disable_sec_rx_path - Stops the receive data path
+ * @wx: pointer to private structure
+ *
+ * Stops the receive data path and waits for the HW to internally empty
+ * the Rx security block
+ **/
+static int wx_disable_sec_rx_path(struct wx *wx)
+{
+ u32 secrx;
+
+ wr32m(wx, WX_RSC_CTL,
+ WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS);
+
+ return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
+ 1000, 40000, false, wx, WX_RSC_ST);
+}
+
+/**
+ * wx_enable_sec_rx_path - Enables the receive data path
+ * @wx: pointer to private structure
+ *
+ * Enables the receive data path.
+ **/
+static void wx_enable_sec_rx_path(struct wx *wx)
+{
+ wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
+ WX_WRITE_FLUSH(wx);
+}
+
+void wx_set_rx_mode(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+ u32 fctrl, vmolr, vlnctrl;
+ int count;
+
+ /* Check for Promiscuous and All Multicast modes */
+ fctrl = rd32(wx, WX_PSR_CTL);
+ fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
+ vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
+ vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
+ WX_PSR_VM_L2CTL_MPE |
+ WX_PSR_VM_L2CTL_ROPE |
+ WX_PSR_VM_L2CTL_ROMPE);
+ vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
+ vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN);
+
+ /* set all bits that we expect to always be set */
+ fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE;
+ vmolr |= WX_PSR_VM_L2CTL_BAM |
+ WX_PSR_VM_L2CTL_AUPE |
+ WX_PSR_VM_L2CTL_VACC;
+ vlnctrl |= WX_PSR_VLAN_CTL_VFE;
+
+ wx->addr_ctrl.user_set_promisc = false;
+ if (netdev->flags & IFF_PROMISC) {
+ wx->addr_ctrl.user_set_promisc = true;
+ fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
+ /* pf don't want packets routing to vf, so clear UPE */
+ vmolr |= WX_PSR_VM_L2CTL_MPE;
+ vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ }
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ fctrl |= WX_PSR_CTL_MPE;
+ vmolr |= WX_PSR_VM_L2CTL_MPE;
+ }
+
+ if (netdev->features & NETIF_F_RXALL) {
+ vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE);
+ vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
+ /* receive bad packets */
+ wr32m(wx, WX_RSC_CTL,
+ WX_RSC_CTL_SAVE_MAC_ERR,
+ WX_RSC_CTL_SAVE_MAC_ERR);
+ } else {
+ vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE;
+ }
+
+ /* Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
+ */
+ count = wx_write_uc_addr_list(netdev, 0);
+ if (count < 0) {
+ vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
+ vmolr |= WX_PSR_VM_L2CTL_UPE;
+ }
+
+ /* Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = wx_write_mc_addr_list(netdev);
+ if (count < 0) {
+ vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
+ vmolr |= WX_PSR_VM_L2CTL_MPE;
+ }
+
+ wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
+ wr32(wx, WX_PSR_CTL, fctrl);
+ wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
+}
+EXPORT_SYMBOL(wx_set_rx_mode);
+
+static void wx_set_rx_buffer_len(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ u32 mhadd, max_frame;
+
+ max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
+ mhadd = rd32(wx, WX_PSR_MAX_SZ);
+ if (max_frame != mhadd)
+ wr32(wx, WX_PSR_MAX_SZ, max_frame);
+}
+
+/* Disable the specified rx queue */
+void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ u32 rxdctl;
+ int ret;
+
+ /* write value back with RRCFG.EN bit cleared */
+ wr32m(wx, WX_PX_RR_CFG(reg_idx),
+ WX_PX_RR_CFG_RR_EN, 0);
+
+ /* the hardware may take up to 100us to really disable the rx queue */
+ ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN),
+ 10, 100, true, wx, WX_PX_RR_CFG(reg_idx));
+
+ if (ret == -ETIMEDOUT) {
+ /* Just for information */
+ wx_err(wx,
+ "RRCFG.EN on Rx queue %d not cleared within the polling period\n",
+ reg_idx);
+ }
+}
+EXPORT_SYMBOL(wx_disable_rx_queue);
+
+static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
+{
+ u8 reg_idx = ring->reg_idx;
+ u32 rxdctl;
+ int ret;
+
+ ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN,
+ 1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx));
+
+ if (ret == -ETIMEDOUT) {
+ /* Just for information */
+ wx_err(wx,
+ "RRCFG.EN on Rx queue %d not set within the polling period\n",
+ reg_idx);
+ }
+}
+
+static void wx_configure_srrctl(struct wx *wx,
+ struct wx_ring *rx_ring)
+{
+ u16 reg_idx = rx_ring->reg_idx;
+ u32 srrctl;
+
+ srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
+ srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ |
+ WX_PX_RR_CFG_RR_BUF_SZ |
+ WX_PX_RR_CFG_SPLIT_MODE);
+ /* configure header buffer length, needed for RSC */
+ srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT;
+
+ /* configure the packet buffer length */
+ srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT;
+
+ wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
+}
+
+static void wx_configure_tx_ring(struct wx *wx,
+ struct wx_ring *ring)
+{
+ u32 txdctl = WX_PX_TR_CFG_ENABLE;
+ u8 reg_idx = ring->reg_idx;
+ u64 tdba = ring->dma;
+ int ret;
+
+ /* disable queue to avoid issues while updating state */
+ wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
+ WX_WRITE_FLUSH(wx);
+
+ wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba));
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_PX_TR_RP(reg_idx), 0);
+ wr32(wx, WX_PX_TR_WP(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx);
+
+ if (ring->count < WX_MAX_TXD)
+ txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
+ txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
+
+ /* reinitialize tx_buffer_info */
+ memset(ring->tx_buffer_info, 0,
+ sizeof(struct wx_tx_buffer) * ring->count);
+
+ /* enable queue */
+ wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl);
+
+ /* poll to verify queue is enabled */
+ ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE,
+ 1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx));
+ if (ret == -ETIMEDOUT)
+ wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
+}
+
+static void wx_configure_rx_ring(struct wx *wx,
+ struct wx_ring *ring)
+{
+ u16 reg_idx = ring->reg_idx;
+ union wx_rx_desc *rx_desc;
+ u64 rdba = ring->dma;
+ u32 rxdctl;
+
+ /* disable queue to avoid issues while updating state */
+ rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
+ wx_disable_rx_queue(wx, ring);
+
+ wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
+ wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba));
+
+ if (ring->count == WX_MAX_RXD)
+ rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT;
+ else
+ rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT;
+
+ rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT;
+ wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl);
+
+ /* reset head and tail pointers */
+ wr32(wx, WX_PX_RR_RP(reg_idx), 0);
+ wr32(wx, WX_PX_RR_WP(reg_idx), 0);
+ ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx);
+
+ wx_configure_srrctl(wx, ring);
+
+ /* initialize rx_buffer_info */
+ memset(ring->rx_buffer_info, 0,
+ sizeof(struct wx_rx_buffer) * ring->count);
+
+ /* initialize Rx descriptor 0 */
+ rx_desc = WX_RX_DESC(ring, 0);
+ rx_desc->wb.upper.length = 0;
+
+ /* enable receive descriptor ring */
+ wr32m(wx, WX_PX_RR_CFG(reg_idx),
+ WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN);
+
+ wx_enable_rx_queue(wx, ring);
+ wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
+}
+
+/**
+ * wx_configure_tx - Configure Transmit Unit after Reset
+ * @wx: pointer to private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void wx_configure_tx(struct wx *wx)
+{
+ u32 i;
+
+ /* TDM_CTL.TE must be before Tx queues are enabled */
+ wr32m(wx, WX_TDM_CTL,
+ WX_TDM_CTL_TE, WX_TDM_CTL_TE);
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_configure_tx_ring(wx, wx->tx_ring[i]);
+
+ wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10);
+
+ if (wx->mac.type == wx_mac_em)
+ wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1);
+
+ /* enable mac transmitter */
+ wr32m(wx, WX_MAC_TX_CFG,
+ WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE);
+}
+
+/**
+ * wx_configure_rx - Configure Receive Unit after Reset
+ * @wx: pointer to private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void wx_configure_rx(struct wx *wx)
+{
+ u32 psrtype, i;
+ int ret;
+
+ wx_disable_rx(wx);
+
+ psrtype = WX_RDB_PL_CFG_L4HDR |
+ WX_RDB_PL_CFG_L3HDR |
+ WX_RDB_PL_CFG_L2HDR |
+ WX_RDB_PL_CFG_TUN_TUNHDR |
+ WX_RDB_PL_CFG_TUN_TUNHDR;
+ wr32(wx, WX_RDB_PL_CFG(0), psrtype);
+
+ /* enable hw crc stripping */
+ wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
+
+ if (wx->mac.type == wx_mac_sp) {
+ u32 psrctl;
+
+ /* RSC Setup */
+ psrctl = rd32(wx, WX_PSR_CTL);
+ psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
+ psrctl |= WX_PSR_CTL_RSC_DIS;
+ wr32(wx, WX_PSR_CTL, psrctl);
+ }
+
+ /* set_rx_buffer_len must be called before ring initialization */
+ wx_set_rx_buffer_len(wx);
+
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring
+ */
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_configure_rx_ring(wx, wx->rx_ring[i]);
+
+ /* Enable all receives, disable security engine prior to block traffic */
+ ret = wx_disable_sec_rx_path(wx);
+ if (ret < 0)
+ wx_err(wx, "The register status is abnormal, please check device.");
+
+ wx_enable_rx(wx);
+ wx_enable_sec_rx_path(wx);
+}
+
+static void wx_configure_isb(struct wx *wx)
+{
+ /* set ISB Address */
+ wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32));
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma));
+}
+
+void wx_configure(struct wx *wx)
+{
+ wx_set_rxpba(wx);
+ wx_configure_port(wx);
+
+ wx_set_rx_mode(wx->netdev);
+
+ wx_enable_sec_rx_path(wx);
+
+ wx_configure_tx(wx);
+ wx_configure_rx(wx);
+ wx_configure_isb(wx);
+}
+EXPORT_SYMBOL(wx_configure);
+
/**
* wx_disable_pcie_master - Disable PCI-express master access
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Disables PCI-Express master access and verifies there are no pending
* requests.
**/
-int wx_disable_pcie_master(struct wx_hw *wxhw)
+int wx_disable_pcie_master(struct wx *wx)
{
int status = 0;
u32 val;
/* Always set this bit to ensure any future transactions are blocked */
- pci_clear_master(wxhw->pdev);
+ pci_clear_master(wx->pdev);
/* Exit if master requests are blocked */
- if (!(rd32(wxhw, WX_PX_TRANSACTION_PENDING)))
+ if (!(rd32(wx, WX_PX_TRANSACTION_PENDING)))
return 0;
/* Poll for master request bit to clear */
status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT,
- false, wxhw, WX_PX_TRANSACTION_PENDING);
+ false, wx, WX_PX_TRANSACTION_PENDING);
if (status < 0)
- wx_err(wxhw, "PCIe transaction pending bit did not clear.\n");
+ wx_err(wx, "PCIe transaction pending bit did not clear.\n");
return status;
}
@@ -781,106 +1548,106 @@ EXPORT_SYMBOL(wx_disable_pcie_master);
/**
* wx_stop_adapter - Generic stop Tx/Rx units
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Sets the adapter_stopped flag within wx_hw struct. Clears interrupts,
* disables transmit and receive units. The adapter_stopped flag is used by
* the shared code and drivers to determine if the adapter is in a stopped
* state and should not touch the hardware.
**/
-int wx_stop_adapter(struct wx_hw *wxhw)
+int wx_stop_adapter(struct wx *wx)
{
u16 i;
/* Set the adapter_stopped flag so other driver functions stop touching
* the hardware
*/
- wxhw->adapter_stopped = true;
+ wx->adapter_stopped = true;
/* Disable the receive unit */
- wx_disable_rx(wxhw);
+ wx_disable_rx(wx);
/* Set interrupt mask to stop interrupts from being generated */
- wx_intr_disable(wxhw, WX_INTR_ALL);
+ wx_intr_disable(wx, WX_INTR_ALL);
/* Clear any pending interrupts, flush previous writes */
- wr32(wxhw, WX_PX_MISC_IC, 0xffffffff);
- wr32(wxhw, WX_BME_CTL, 0x3);
+ wr32(wx, WX_PX_MISC_IC, 0xffffffff);
+ wr32(wx, WX_BME_CTL, 0x3);
/* Disable the transmit unit. Each queue must be disabled. */
- for (i = 0; i < wxhw->mac.max_tx_queues; i++) {
- wr32m(wxhw, WX_PX_TR_CFG(i),
+ for (i = 0; i < wx->mac.max_tx_queues; i++) {
+ wr32m(wx, WX_PX_TR_CFG(i),
WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE,
WX_PX_TR_CFG_SWFLSH);
}
/* Disable the receive unit by stopping each queue */
- for (i = 0; i < wxhw->mac.max_rx_queues; i++) {
- wr32m(wxhw, WX_PX_RR_CFG(i),
+ for (i = 0; i < wx->mac.max_rx_queues; i++) {
+ wr32m(wx, WX_PX_RR_CFG(i),
WX_PX_RR_CFG_RR_EN, 0);
}
/* flush all queues disables */
- WX_WRITE_FLUSH(wxhw);
+ WX_WRITE_FLUSH(wx);
/* Prevent the PCI-E bus from hanging by disabling PCI-E master
* access and verify no pending requests
*/
- return wx_disable_pcie_master(wxhw);
+ return wx_disable_pcie_master(wx);
}
EXPORT_SYMBOL(wx_stop_adapter);
-void wx_reset_misc(struct wx_hw *wxhw)
+void wx_reset_misc(struct wx *wx)
{
int i;
/* receive packets that size > 2048 */
- wr32m(wxhw, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
+ wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
/* clear counters on read */
- wr32m(wxhw, WX_MMC_CONTROL,
+ wr32m(wx, WX_MMC_CONTROL,
WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD);
- wr32m(wxhw, WX_MAC_RX_FLOW_CTRL,
+ wr32m(wx, WX_MAC_RX_FLOW_CTRL,
WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
- wr32(wxhw, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+ wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
- wr32m(wxhw, WX_MIS_RST_ST,
+ wr32m(wx, WX_MIS_RST_ST,
WX_MIS_RST_ST_RST_INIT, 0x1E00);
/* errata 4: initialize mng flex tbl and wakeup flex tbl*/
- wr32(wxhw, WX_PSR_MNG_FLEX_SEL, 0);
+ wr32(wx, WX_PSR_MNG_FLEX_SEL, 0);
for (i = 0; i < 16; i++) {
- wr32(wxhw, WX_PSR_MNG_FLEX_DW_L(i), 0);
- wr32(wxhw, WX_PSR_MNG_FLEX_DW_H(i), 0);
- wr32(wxhw, WX_PSR_MNG_FLEX_MSK(i), 0);
+ wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0);
+ wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0);
+ wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0);
}
- wr32(wxhw, WX_PSR_LAN_FLEX_SEL, 0);
+ wr32(wx, WX_PSR_LAN_FLEX_SEL, 0);
for (i = 0; i < 16; i++) {
- wr32(wxhw, WX_PSR_LAN_FLEX_DW_L(i), 0);
- wr32(wxhw, WX_PSR_LAN_FLEX_DW_H(i), 0);
- wr32(wxhw, WX_PSR_LAN_FLEX_MSK(i), 0);
+ wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0);
+ wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0);
+ wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0);
}
/* set pause frame dst mac addr */
- wr32(wxhw, WX_RDB_PFCMACDAL, 0xC2000001);
- wr32(wxhw, WX_RDB_PFCMACDAH, 0x0180);
+ wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001);
+ wr32(wx, WX_RDB_PFCMACDAH, 0x0180);
}
EXPORT_SYMBOL(wx_reset_misc);
/**
* wx_get_pcie_msix_counts - Gets MSI-X vector count
- * @wxhw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @msix_count: number of MSI interrupts that can be obtained
* @max_msix_count: number of MSI interrupts that mac need
*
* Read PCIe configuration space, and get the MSI-X vector count from
* the capabilities table.
**/
-int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count)
+int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
{
- struct pci_dev *pdev = wxhw->pdev;
+ struct pci_dev *pdev = wx->pdev;
struct device *dev = &pdev->dev;
int pos;
@@ -904,31 +1671,39 @@ int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_co
}
EXPORT_SYMBOL(wx_get_pcie_msix_counts);
-int wx_sw_init(struct wx_hw *wxhw)
+int wx_sw_init(struct wx *wx)
{
- struct pci_dev *pdev = wxhw->pdev;
+ struct pci_dev *pdev = wx->pdev;
u32 ssid = 0;
int err = 0;
- wxhw->vendor_id = pdev->vendor;
- wxhw->device_id = pdev->device;
- wxhw->revision_id = pdev->revision;
- wxhw->oem_svid = pdev->subsystem_vendor;
- wxhw->oem_ssid = pdev->subsystem_device;
- wxhw->bus.device = PCI_SLOT(pdev->devfn);
- wxhw->bus.func = PCI_FUNC(pdev->devfn);
-
- if (wxhw->oem_svid == PCI_VENDOR_ID_WANGXUN) {
- wxhw->subsystem_vendor_id = pdev->subsystem_vendor;
- wxhw->subsystem_device_id = pdev->subsystem_device;
+ wx->vendor_id = pdev->vendor;
+ wx->device_id = pdev->device;
+ wx->revision_id = pdev->revision;
+ wx->oem_svid = pdev->subsystem_vendor;
+ wx->oem_ssid = pdev->subsystem_device;
+ wx->bus.device = PCI_SLOT(pdev->devfn);
+ wx->bus.func = PCI_FUNC(pdev->devfn);
+
+ if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) {
+ wx->subsystem_vendor_id = pdev->subsystem_vendor;
+ wx->subsystem_device_id = pdev->subsystem_device;
} else {
- err = wx_flash_read_dword(wxhw, 0xfffdc, &ssid);
+ err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
if (!err)
- wxhw->subsystem_device_id = swab16((u16)ssid);
+ wx->subsystem_device_id = swab16((u16)ssid);
return err;
}
+ wx->mac_table = kcalloc(wx->mac.num_rar_entries,
+ sizeof(struct wx_mac_addr),
+ GFP_KERNEL);
+ if (!wx->mac_table) {
+ wx_err(wx, "mac_table allocation failed\n");
+ return -ENOMEM;
+ }
+
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index a0652f5e9939..44dfd6ea442a 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -4,25 +4,31 @@
#ifndef _WX_HW_H_
#define _WX_HW_H_
-int wx_check_flash_load(struct wx_hw *hw, u32 check_bit);
-void wx_control_hw(struct wx_hw *wxhw, bool drv);
-int wx_mng_present(struct wx_hw *wxhw);
-int wx_host_interface_command(struct wx_hw *wxhw, u32 *buffer,
+void wx_intr_enable(struct wx *wx, u64 qmask);
+void wx_irq_disable(struct wx *wx);
+int wx_check_flash_load(struct wx *wx, u32 check_bit);
+void wx_control_hw(struct wx *wx, bool drv);
+int wx_mng_present(struct wx *wx);
+int wx_host_interface_command(struct wx *wx, u32 *buffer,
u32 length, u32 timeout, bool return_data);
-int wx_read_ee_hostif(struct wx_hw *wxhw, u16 offset, u16 *data);
-int wx_read_ee_hostif_buffer(struct wx_hw *wxhw,
+int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data);
+int wx_read_ee_hostif_buffer(struct wx *wx,
u16 offset, u16 words, u16 *data);
-int wx_reset_hostif(struct wx_hw *wxhw);
-void wx_init_eeprom_params(struct wx_hw *wxhw);
-void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr);
-int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, u32 enable_addr);
-int wx_clear_rar(struct wx_hw *wxhw, u32 index);
-void wx_init_rx_addrs(struct wx_hw *wxhw);
-void wx_disable_rx(struct wx_hw *wxhw);
-int wx_disable_pcie_master(struct wx_hw *wxhw);
-int wx_stop_adapter(struct wx_hw *wxhw);
-void wx_reset_misc(struct wx_hw *wxhw);
-int wx_get_pcie_msix_counts(struct wx_hw *wxhw, u16 *msix_count, u16 max_msix_count);
-int wx_sw_init(struct wx_hw *wxhw);
+int wx_reset_hostif(struct wx *wx);
+void wx_init_eeprom_params(struct wx *wx);
+void wx_get_mac_addr(struct wx *wx, u8 *mac_addr);
+void wx_init_rx_addrs(struct wx *wx);
+void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
+void wx_flush_sw_mac_table(struct wx *wx);
+int wx_set_mac(struct net_device *netdev, void *p);
+void wx_disable_rx(struct wx *wx);
+void wx_set_rx_mode(struct net_device *netdev);
+void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
+void wx_configure(struct wx *wx);
+int wx_disable_pcie_master(struct wx *wx);
+int wx_stop_adapter(struct wx *wx);
+void wx_reset_misc(struct wx *wx);
+int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count);
+int wx_sw_init(struct wx *wx);
#endif /* _WX_HW_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
new file mode 100644
index 000000000000..eb89a274083e
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -0,0 +1,2004 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/etherdevice.h>
+#include <net/page_pool.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+
+#include "wx_type.h"
+#include "wx_lib.h"
+#include "wx_hw.h"
+
+/* wx_test_staterr - tests bits in Rx descriptor status and error fields */
+static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
+ const u32 stat_err_bits)
+{
+ return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
+static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer,
+ int rx_buffer_pgcnt)
+{
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
+
+ /* avoid re-using remote and pfmemalloc pages */
+ if (!dev_page_is_reusable(page))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
+ return false;
+#endif
+
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
+ */
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
+ rx_buffer->pagecnt_bias = USHRT_MAX;
+ }
+
+ return true;
+}
+
+/**
+ * wx_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void wx_reuse_rx_page(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *old_buff)
+{
+ u16 nta = rx_ring->next_to_alloc;
+ struct wx_rx_buffer *new_buff;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ new_buff->page = old_buff->page;
+ new_buff->page_dma = old_buff->page_dma;
+ new_buff->page_offset = old_buff->page_offset;
+ new_buff->pagecnt_bias = old_buff->pagecnt_bias;
+}
+
+static void wx_dma_sync_frag(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *rx_buffer)
+{
+ struct sk_buff *skb = rx_buffer->skb;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ WX_CB(skb)->dma,
+ skb_frag_off(frag),
+ skb_frag_size(frag),
+ DMA_FROM_DEVICE);
+
+ /* If the page was released, just unmap it. */
+ if (unlikely(WX_CB(skb)->page_released))
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+}
+
+static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring,
+ union wx_rx_desc *rx_desc,
+ struct sk_buff **skb,
+ int *rx_buffer_pgcnt)
+{
+ struct wx_rx_buffer *rx_buffer;
+ unsigned int size;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+
+#if (PAGE_SIZE < 8192)
+ *rx_buffer_pgcnt = page_count(rx_buffer->page);
+#else
+ *rx_buffer_pgcnt = 0;
+#endif
+
+ prefetchw(rx_buffer->page);
+ *skb = rx_buffer->skb;
+
+ /* Delay unmapping of the first packet. It carries the header
+ * information, HW may still access the header after the writeback.
+ * Only unmap it when EOP is reached
+ */
+ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) {
+ if (!*skb)
+ goto skip_sync;
+ } else {
+ if (*skb)
+ wx_dma_sync_frag(rx_ring, rx_buffer);
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+skip_sync:
+ rx_buffer->pagecnt_bias--;
+
+ return rx_buffer;
+}
+
+static void wx_put_rx_buffer(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *rx_buffer,
+ struct sk_buff *skb,
+ int rx_buffer_pgcnt)
+{
+ if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
+ /* hand second half of page back to the ring */
+ wx_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma)
+ /* the page has been released from the ring */
+ WX_CB(skb)->page_released = true;
+ else
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+ rx_buffer->skb = NULL;
+}
+
+static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *rx_buffer,
+ union wx_rx_desc *rx_desc)
+{
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = WX_RX_BUFSZ;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+#endif
+ struct sk_buff *skb = rx_buffer->skb;
+
+ if (!skb) {
+ void *page_addr = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, WX_RXBUFFER_256);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+
+ if (size <= WX_RXBUFFER_256) {
+ memcpy(__skb_put(skb, size), page_addr,
+ ALIGN(size, sizeof(long)));
+ rx_buffer->pagecnt_bias++;
+
+ return skb;
+ }
+
+ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP))
+ WX_CB(skb)->dma = rx_buffer->dma;
+
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ rx_buffer->page_offset,
+ size, truesize);
+ goto out;
+
+ } else {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+ }
+
+out:
+#if (PAGE_SIZE < 8192)
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+#endif
+
+ return skb;
+}
+
+static bool wx_alloc_mapped_page(struct wx_ring *rx_ring,
+ struct wx_rx_buffer *bi)
+{
+ struct page *page = bi->page;
+ dma_addr_t dma;
+
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
+ return true;
+
+ page = page_pool_dev_alloc_pages(rx_ring->page_pool);
+ WARN_ON(!page);
+ dma = page_pool_get_dma_addr(page);
+
+ bi->page_dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
+
+ return true;
+}
+
+/**
+ * wx_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
+ **/
+void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count)
+{
+ u16 i = rx_ring->next_to_use;
+ union wx_rx_desc *rx_desc;
+ struct wx_rx_buffer *bi;
+
+ /* nothing to do */
+ if (!cleaned_count)
+ return;
+
+ rx_desc = WX_RX_DESC(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ i -= rx_ring->count;
+
+ do {
+ if (!wx_alloc_mapped_page(rx_ring, bi))
+ break;
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ WX_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ rx_desc->read.pkt_addr =
+ cpu_to_le64(bi->page_dma + bi->page_offset);
+
+ rx_desc++;
+ bi++;
+ i++;
+ if (unlikely(!i)) {
+ rx_desc = WX_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_buffer_info;
+ i -= rx_ring->count;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.upper.status_error = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ i += rx_ring->count;
+
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /* Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ writel(i, rx_ring->tail);
+ }
+}
+
+u16 wx_desc_unused(struct wx_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
+
+/**
+ * wx_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool wx_is_non_eop(struct wx_ring *rx_ring,
+ union wx_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(WX_RX_DESC(rx_ring, ntc));
+
+ /* if we are the last buffer then there is nothing else to do */
+ if (likely(wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)))
+ return false;
+
+ rx_ring->rx_buffer_info[ntc].skb = skb;
+
+ return true;
+}
+
+static void wx_pull_tail(struct sk_buff *skb)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+ unsigned int pull_len;
+ unsigned char *va;
+
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
+
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(skb->dev, va, WX_RXBUFFER_256);
+
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ skb_frag_off_add(frag, pull_len);
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * wx_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Check for corrupted packet headers caused by senders on the local L2
+ * embedded NIC switch not setting up their Tx Descriptors right. These
+ * should be very rare.
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool wx_cleanup_headers(struct wx_ring *rx_ring,
+ union wx_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rx_ring->netdev;
+
+ /* verify that the packet does not have any known errors */
+ if (!netdev ||
+ unlikely(wx_test_staterr(rx_desc, WX_RXD_ERR_RXE) &&
+ !(netdev->features & NETIF_F_RXALL))) {
+ dev_kfree_skb_any(skb);
+ return true;
+ }
+
+ /* place header in linear portion of buffer */
+ if (!skb_headlen(skb))
+ wx_pull_tail(skb);
+
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
+
+ return false;
+}
+
+/**
+ * wx_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @q_vector: structure containing interrupt and ring information
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed.
+ **/
+static int wx_clean_rx_irq(struct wx_q_vector *q_vector,
+ struct wx_ring *rx_ring,
+ int budget)
+{
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ u16 cleaned_count = wx_desc_unused(rx_ring);
+
+ do {
+ struct wx_rx_buffer *rx_buffer;
+ union wx_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ int rx_buffer_pgcnt;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= WX_RX_BUFFER_WRITE) {
+ wx_alloc_rx_buffers(rx_ring, cleaned_count);
+ cleaned_count = 0;
+ }
+
+ rx_desc = WX_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ if (!wx_test_staterr(rx_desc, WX_RXD_STAT_DD))
+ break;
+
+ /* This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * descriptor has been written back
+ */
+ dma_rmb();
+
+ rx_buffer = wx_get_rx_buffer(rx_ring, rx_desc, &skb, &rx_buffer_pgcnt);
+
+ /* retrieve a buffer from the ring */
+ skb = wx_build_skb(rx_ring, rx_buffer, rx_desc);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_buffer->pagecnt_bias++;
+ break;
+ }
+
+ wx_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
+ cleaned_count++;
+
+ /* place incomplete frames back on ring for completion */
+ if (wx_is_non_eop(rx_ring, rx_desc, skb))
+ continue;
+
+ /* verify the packet layout is correct */
+ if (wx_cleanup_headers(rx_ring, rx_desc, skb))
+ continue;
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ napi_gro_receive(&q_vector->napi, skb);
+
+ /* update budget accounting */
+ total_rx_packets++;
+ } while (likely(total_rx_packets < budget));
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_packets += total_rx_packets;
+ q_vector->rx.total_bytes += total_rx_bytes;
+
+ return total_rx_packets;
+}
+
+static struct netdev_queue *wx_txring_txq(const struct wx_ring *ring)
+{
+ return netdev_get_tx_queue(ring->netdev, ring->queue_index);
+}
+
+/**
+ * wx_clean_tx_irq - Reclaim resources after transmit completes
+ * @q_vector: structure containing interrupt and ring information
+ * @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
+ **/
+static bool wx_clean_tx_irq(struct wx_q_vector *q_vector,
+ struct wx_ring *tx_ring, int napi_budget)
+{
+ unsigned int budget = q_vector->wx->tx_work_limit;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int i = tx_ring->next_to_clean;
+ struct wx_tx_buffer *tx_buffer;
+ union wx_tx_desc *tx_desc;
+
+ if (!netif_carrier_ok(tx_ring->netdev))
+ return true;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ tx_desc = WX_TX_DESC(tx_ring, i);
+ i -= tx_ring->count;
+
+ do {
+ union wx_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+ /* if next_to_watch is not set then there is no work pending */
+ if (!eop_desc)
+ break;
+
+ /* prevent any other reads prior to eop_desc */
+ smp_rmb();
+
+ /* if DD is not set pending work has not been completed */
+ if (!(eop_desc->wb.status & cpu_to_le32(WX_TXD_STAT_DD)))
+ break;
+
+ /* clear next_to_watch to prevent false hangs */
+ tx_buffer->next_to_watch = NULL;
+
+ /* update the statistics for this packet */
+ total_bytes += tx_buffer->bytecount;
+ total_packets += tx_buffer->gso_segs;
+
+ /* free the skb */
+ napi_consume_skb(tx_buffer->skb, napi_budget);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* clear tx_buffer data */
+ dma_unmap_len_set(tx_buffer, len, 0);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len)) {
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ }
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(!i)) {
+ i -= tx_ring->count;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ }
+
+ /* issue prefetch for next Tx descriptor */
+ prefetch(tx_desc);
+
+ /* update budget accounting */
+ budget--;
+ } while (likely(budget));
+
+ i += tx_ring->count;
+ tx_ring->next_to_clean = i;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_end(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
+
+ netdev_tx_completed_queue(wx_txring_txq(tx_ring),
+ total_packets, total_bytes);
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) &&
+ (wx_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+
+ if (__netif_subqueue_stopped(tx_ring->netdev,
+ tx_ring->queue_index) &&
+ netif_running(tx_ring->netdev))
+ netif_wake_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
+ }
+
+ return !!budget;
+}
+
+/**
+ * wx_poll - NAPI polling RX/TX cleanup routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean all queues associated with a q_vector.
+ **/
+static int wx_poll(struct napi_struct *napi, int budget)
+{
+ struct wx_q_vector *q_vector = container_of(napi, struct wx_q_vector, napi);
+ int per_ring_budget, work_done = 0;
+ struct wx *wx = q_vector->wx;
+ bool clean_complete = true;
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->tx) {
+ if (!wx_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
+
+ /* Exit if we are called by netpoll */
+ if (budget <= 0)
+ return budget;
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling
+ */
+ if (q_vector->rx.count > 1)
+ per_ring_budget = max(budget / q_vector->rx.count, 1);
+ else
+ per_ring_budget = budget;
+
+ wx_for_each_ring(ring, q_vector->rx) {
+ int cleaned = wx_clean_rx_irq(q_vector, ring, per_ring_budget);
+
+ work_done += cleaned;
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
+ }
+
+ /* If all work not completed, return budget and keep polling */
+ if (!clean_complete)
+ return budget;
+
+ /* all work done, exit the polling mode */
+ if (likely(napi_complete_done(napi, work_done))) {
+ if (netif_running(wx->netdev))
+ wx_intr_enable(wx, WX_INTR_Q(q_vector->v_idx));
+ }
+
+ return min(work_done, budget - 1);
+}
+
+static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size)
+{
+ if (likely(wx_desc_unused(tx_ring) >= size))
+ return 0;
+
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ /* For the next check */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available.
+ */
+ if (likely(wx_desc_unused(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+ return 0;
+}
+
+static void wx_tx_map(struct wx_ring *tx_ring,
+ struct wx_tx_buffer *first)
+{
+ struct sk_buff *skb = first->skb;
+ struct wx_tx_buffer *tx_buffer;
+ u16 i = tx_ring->next_to_use;
+ unsigned int data_len, size;
+ union wx_tx_desc *tx_desc;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ u32 cmd_type;
+
+ cmd_type = WX_TXD_DTYP_DATA | WX_TXD_IFCS;
+ tx_desc = WX_TX_DESC(tx_ring, i);
+
+ tx_desc->read.olinfo_status = cpu_to_le32(skb->len << WX_TXD_PAYLEN_SHIFT);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
+ dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
+
+ tx_buffer = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ while (unlikely(size > WX_MAX_DATA_PER_TXD)) {
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type ^ WX_MAX_DATA_PER_TXD);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ dma += WX_MAX_DATA_PER_TXD;
+ size -= WX_MAX_DATA_PER_TXD;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ }
+
+ if (likely(!data_len))
+ break;
+
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
+
+ i++;
+ tx_desc++;
+ if (i == tx_ring->count) {
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ i = 0;
+ }
+ tx_desc->read.olinfo_status = 0;
+
+ size = skb_frag_size(frag);
+
+ data_len -= size;
+
+ dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
+ DMA_TO_DEVICE);
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ }
+
+ /* write last descriptor with RS and EOP bits */
+ cmd_type |= size | WX_TXD_EOP | WX_TXD_RS;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ netdev_tx_sent_queue(wx_txring_txq(tx_ring), first->bytecount);
+
+ skb_tx_timestamp(skb);
+
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch. (Only applicable for weak-ordered
+ * memory model archs, such as IA-64).
+ *
+ * We also need this memory barrier to make certain all of the
+ * status bits have been updated before next_to_watch is written.
+ */
+ wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ first->next_to_watch = tx_desc;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_ring->next_to_use = i;
+
+ wx_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ if (netif_xmit_stopped(wx_txring_txq(tx_ring)) || !netdev_xmit_more())
+ writel(i, tx_ring->tail);
+
+ return;
+dma_error:
+ dev_err(tx_ring->dev, "TX DMA map failed\n");
+
+ /* clear dma mappings for failed tx_buffer_info map */
+ for (;;) {
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buffer, len, 0);
+ if (tx_buffer == first)
+ break;
+ if (i == 0)
+ i += tx_ring->count;
+ i--;
+ }
+
+ dev_kfree_skb_any(first->skb);
+ first->skb = NULL;
+
+ tx_ring->next_to_use = i;
+}
+
+static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
+ struct wx_ring *tx_ring)
+{
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
+ struct wx_tx_buffer *first;
+ unsigned short f;
+
+ /* need: 1 descriptor per page * PAGE_SIZE/WX_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_headlen/WX_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->
+ frags[f]));
+
+ if (wx_maybe_stop_tx(tx_ring, count + 3))
+ return NETDEV_TX_BUSY;
+
+ /* record the location of the first descriptor for this packet */
+ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->skb = skb;
+ first->bytecount = skb->len;
+ first->gso_segs = 1;
+
+ wx_tx_map(tx_ring, first);
+
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev)
+{
+ unsigned int r_idx = skb->queue_mapping;
+ struct wx *wx = netdev_priv(netdev);
+ struct wx_ring *tx_ring;
+
+ if (!netif_carrier_ok(netdev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* The minimum packet size for olinfo paylen is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb_put_padto(skb, 17))
+ return NETDEV_TX_OK;
+
+ if (r_idx >= wx->num_tx_queues)
+ r_idx = r_idx % wx->num_tx_queues;
+ tx_ring = wx->tx_ring[r_idx];
+
+ return wx_xmit_frame_ring(skb, tx_ring);
+}
+EXPORT_SYMBOL(wx_xmit_frame);
+
+void wx_napi_enable_all(struct wx *wx)
+{
+ struct wx_q_vector *q_vector;
+ int q_idx;
+
+ for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
+ q_vector = wx->q_vector[q_idx];
+ napi_enable(&q_vector->napi);
+ }
+}
+EXPORT_SYMBOL(wx_napi_enable_all);
+
+void wx_napi_disable_all(struct wx *wx)
+{
+ struct wx_q_vector *q_vector;
+ int q_idx;
+
+ for (q_idx = 0; q_idx < wx->num_q_vectors; q_idx++) {
+ q_vector = wx->q_vector[q_idx];
+ napi_disable(&q_vector->napi);
+ }
+}
+EXPORT_SYMBOL(wx_napi_disable_all);
+
+/**
+ * wx_set_rss_queues: Allocate queues for RSS
+ * @wx: board private structure to initialize
+ *
+ * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
+ * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
+ *
+ **/
+static void wx_set_rss_queues(struct wx *wx)
+{
+ wx->num_rx_queues = wx->mac.max_rx_queues;
+ wx->num_tx_queues = wx->mac.max_tx_queues;
+}
+
+static void wx_set_num_queues(struct wx *wx)
+{
+ /* Start with base case */
+ wx->num_rx_queues = 1;
+ wx->num_tx_queues = 1;
+ wx->queues_per_pool = 1;
+
+ wx_set_rss_queues(wx);
+}
+
+/**
+ * wx_acquire_msix_vectors - acquire MSI-X vectors
+ * @wx: board private structure
+ *
+ * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
+ * return a negative error code if unable to acquire MSI-X vectors for any
+ * reason.
+ */
+static int wx_acquire_msix_vectors(struct wx *wx)
+{
+ struct irq_affinity affd = {0, };
+ int nvecs, i;
+
+ nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors);
+
+ wx->msix_entries = kcalloc(nvecs,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!wx->msix_entries)
+ return -ENOMEM;
+
+ nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
+ nvecs,
+ PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ &affd);
+ if (nvecs < 0) {
+ wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs);
+ kfree(wx->msix_entries);
+ wx->msix_entries = NULL;
+ return nvecs;
+ }
+
+ for (i = 0; i < nvecs; i++) {
+ wx->msix_entries[i].entry = i;
+ wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i);
+ }
+
+ /* one for msix_other */
+ nvecs -= 1;
+ wx->num_q_vectors = nvecs;
+ wx->num_rx_queues = nvecs;
+ wx->num_tx_queues = nvecs;
+
+ return 0;
+}
+
+/**
+ * wx_set_interrupt_capability - set MSI-X or MSI if supported
+ * @wx: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int wx_set_interrupt_capability(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+ int nvecs, ret;
+
+ /* We will try to get MSI-X interrupts first */
+ ret = wx_acquire_msix_vectors(wx);
+ if (ret == 0 || (ret == -ENOMEM))
+ return ret;
+
+ wx->num_rx_queues = 1;
+ wx->num_tx_queues = 1;
+ wx->num_q_vectors = 1;
+
+ /* minmum one for queue, one for misc*/
+ nvecs = 1;
+ nvecs = pci_alloc_irq_vectors(pdev, nvecs,
+ nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ if (nvecs == 1) {
+ if (pdev->msi_enabled)
+ wx_err(wx, "Fallback to MSI.\n");
+ else
+ wx_err(wx, "Fallback to LEGACY.\n");
+ } else {
+ wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs);
+ return nvecs;
+ }
+
+ pdev->irq = pci_irq_vector(pdev, 0);
+
+ return 0;
+}
+
+/**
+ * wx_cache_ring_rss - Descriptor ring to register mapping for RSS
+ * @wx: board private structure to initialize
+ *
+ * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV.
+ *
+ **/
+static void wx_cache_ring_rss(struct wx *wx)
+{
+ u16 i;
+
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx->rx_ring[i]->reg_idx = i;
+
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx->tx_ring[i]->reg_idx = i;
+}
+
+static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head)
+{
+ ring->next = head->ring;
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * wx_alloc_q_vector - Allocate memory for a single interrupt vector
+ * @wx: board private structure to initialize
+ * @v_count: q_vectors allocated on wx, used for ring interleaving
+ * @v_idx: index of vector in wx struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
+ *
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
+ **/
+static int wx_alloc_q_vector(struct wx *wx,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txr_count, unsigned int txr_idx,
+ unsigned int rxr_count, unsigned int rxr_idx)
+{
+ struct wx_q_vector *q_vector;
+ int ring_count, default_itr;
+ struct wx_ring *ring;
+
+ /* note this will allocate space for the ring structure as well! */
+ ring_count = txr_count + rxr_count;
+
+ q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
+ GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(wx->netdev, &q_vector->napi,
+ wx_poll);
+
+ /* tie q_vector and wx together */
+ wx->q_vector[v_idx] = q_vector;
+ q_vector->wx = wx;
+ q_vector->v_idx = v_idx;
+ if (cpu_online(v_idx))
+ q_vector->numa_node = cpu_to_node(v_idx);
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ if (wx->mac.type == wx_mac_sp)
+ default_itr = WX_12K_ITR;
+ else
+ default_itr = WX_7K_ITR;
+ /* initialize ITR */
+ if (txr_count && !rxr_count)
+ /* tx only vector */
+ q_vector->itr = wx->tx_itr_setting ?
+ default_itr : wx->tx_itr_setting;
+ else
+ /* rx or rx/tx vector */
+ q_vector->itr = wx->rx_itr_setting ?
+ default_itr : wx->rx_itr_setting;
+
+ while (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &wx->pdev->dev;
+ ring->netdev = wx->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ wx_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
+ ring->count = wx->tx_ring_count;
+
+ ring->queue_index = txr_idx;
+
+ /* assign ring to wx */
+ wx->tx_ring[txr_idx] = ring;
+
+ /* update count and index */
+ txr_count--;
+ txr_idx += v_count;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ while (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &wx->pdev->dev;
+ ring->netdev = wx->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ wx_add_ring(ring, &q_vector->rx);
+
+ /* apply Rx specific ring traits */
+ ring->count = wx->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to wx */
+ wx->rx_ring[rxr_idx] = ring;
+
+ /* update count and index */
+ rxr_count--;
+ rxr_idx += v_count;
+
+ /* push pointer to next ring */
+ ring++;
+ }
+
+ return 0;
+}
+
+/**
+ * wx_free_q_vector - Free memory allocated for specific interrupt vector
+ * @wx: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void wx_free_q_vector(struct wx *wx, int v_idx)
+{
+ struct wx_q_vector *q_vector = wx->q_vector[v_idx];
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->tx)
+ wx->tx_ring[ring->queue_index] = NULL;
+
+ wx_for_each_ring(ring, q_vector->rx)
+ wx->rx_ring[ring->queue_index] = NULL;
+
+ wx->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
+ * wx_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @wx: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int wx_alloc_q_vectors(struct wx *wx)
+{
+ unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ unsigned int rxr_remaining = wx->num_rx_queues;
+ unsigned int txr_remaining = wx->num_tx_queues;
+ unsigned int q_vectors = wx->num_q_vectors;
+ int rqpv, tqpv;
+ int err;
+
+ for (; v_idx < q_vectors; v_idx++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+ err = wx_alloc_q_vector(wx, q_vectors, v_idx,
+ tqpv, txr_idx,
+ rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
+ return 0;
+
+err_out:
+ wx->num_tx_queues = 0;
+ wx->num_rx_queues = 0;
+ wx->num_q_vectors = 0;
+
+ while (v_idx--)
+ wx_free_q_vector(wx, v_idx);
+
+ return -ENOMEM;
+}
+
+/**
+ * wx_free_q_vectors - Free memory allocated for interrupt vectors
+ * @wx: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void wx_free_q_vectors(struct wx *wx)
+{
+ int v_idx = wx->num_q_vectors;
+
+ wx->num_tx_queues = 0;
+ wx->num_rx_queues = 0;
+ wx->num_q_vectors = 0;
+
+ while (v_idx--)
+ wx_free_q_vector(wx, v_idx);
+}
+
+void wx_reset_interrupt_capability(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ if (!pdev->msi_enabled && !pdev->msix_enabled)
+ return;
+
+ pci_free_irq_vectors(wx->pdev);
+ if (pdev->msix_enabled) {
+ kfree(wx->msix_entries);
+ wx->msix_entries = NULL;
+ }
+}
+EXPORT_SYMBOL(wx_reset_interrupt_capability);
+
+/**
+ * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings
+ * @wx: board private structure to clear interrupt scheme on
+ *
+ * We go through and clear interrupt specific resources and reset the structure
+ * to pre-load conditions
+ **/
+void wx_clear_interrupt_scheme(struct wx *wx)
+{
+ wx_free_q_vectors(wx);
+ wx_reset_interrupt_capability(wx);
+}
+EXPORT_SYMBOL(wx_clear_interrupt_scheme);
+
+int wx_init_interrupt_scheme(struct wx *wx)
+{
+ int ret;
+
+ /* Number of supported queues */
+ wx_set_num_queues(wx);
+
+ /* Set interrupt mode */
+ ret = wx_set_interrupt_capability(wx);
+ if (ret) {
+ wx_err(wx, "Allocate irq vectors for failed.\n");
+ return ret;
+ }
+
+ /* Allocate memory for queues */
+ ret = wx_alloc_q_vectors(wx);
+ if (ret) {
+ wx_err(wx, "Unable to allocate memory for queue vectors.\n");
+ wx_reset_interrupt_capability(wx);
+ return ret;
+ }
+
+ wx_cache_ring_rss(wx);
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_init_interrupt_scheme);
+
+irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector = data;
+
+ /* EIAM disabled interrupts (on this vector) for us */
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_schedule_irqoff(&q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(wx_msix_clean_rings);
+
+void wx_free_irq(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+ int vector;
+
+ if (!(pdev->msix_enabled)) {
+ free_irq(pdev->irq, wx);
+ return;
+ }
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_entries[vector];
+
+ /* free only the irqs that were actually requested */
+ if (!q_vector->rx.ring && !q_vector->tx.ring)
+ continue;
+
+ free_irq(entry->vector, q_vector);
+ }
+
+ free_irq(wx->msix_entries[vector].vector, wx);
+}
+EXPORT_SYMBOL(wx_free_irq);
+
+/**
+ * wx_setup_isb_resources - allocate interrupt status resources
+ * @wx: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+int wx_setup_isb_resources(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ wx->isb_mem = dma_alloc_coherent(&pdev->dev,
+ sizeof(u32) * 4,
+ &wx->isb_dma,
+ GFP_KERNEL);
+ if (!wx->isb_mem) {
+ wx_err(wx, "Alloc isb_mem failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(wx_setup_isb_resources);
+
+/**
+ * wx_free_isb_resources - allocate all queues Rx resources
+ * @wx: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+void wx_free_isb_resources(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+
+ dma_free_coherent(&pdev->dev, sizeof(u32) * 4,
+ wx->isb_mem, wx->isb_dma);
+ wx->isb_mem = NULL;
+}
+EXPORT_SYMBOL(wx_free_isb_resources);
+
+u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx)
+{
+ u32 cur_tag = 0;
+
+ cur_tag = wx->isb_mem[WX_ISB_HEADER];
+ wx->isb_tag[idx] = cur_tag;
+
+ return (__force u32)cpu_to_le32(wx->isb_mem[idx]);
+}
+EXPORT_SYMBOL(wx_misc_isb);
+
+/**
+ * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @wx: pointer to wx struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ **/
+static void wx_set_ivar(struct wx *wx, s8 direction,
+ u16 queue, u16 msix_vector)
+{
+ u32 ivar, index;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ index = 0;
+ ivar = rd32(wx, WX_PX_MISC_IVAR);
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ wr32(wx, WX_PX_MISC_IVAR, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= WX_PX_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ wr32(wx, WX_PX_IVAR(queue >> 1), ivar);
+ }
+}
+
+/**
+ * wx_write_eitr - write EITR register in hardware specific way
+ * @q_vector: structure containing interrupt and ring information
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update EITR registers at runtime. Hardware
+ * specific quirks/differences are taken care of here.
+ */
+static void wx_write_eitr(struct wx_q_vector *q_vector)
+{
+ struct wx *wx = q_vector->wx;
+ int v_idx = q_vector->v_idx;
+ u32 itr_reg;
+
+ if (wx->mac.type == wx_mac_sp)
+ itr_reg = q_vector->itr & WX_SP_MAX_EITR;
+ else
+ itr_reg = q_vector->itr & WX_EM_MAX_EITR;
+
+ itr_reg |= WX_PX_ITR_CNT_WDIS;
+
+ wr32(wx, WX_PX_ITR(v_idx), itr_reg);
+}
+
+/**
+ * wx_configure_vectors - Configure vectors for hardware
+ * @wx: board private structure
+ *
+ * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY
+ * interrupts.
+ **/
+void wx_configure_vectors(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+ u32 eitrsel = 0;
+ u16 v_idx;
+
+ if (pdev->msix_enabled) {
+ /* Populate MSIX to EITR Select */
+ wr32(wx, WX_PX_ITRSEL, eitrsel);
+ /* use EIAM to auto-mask when MSI-X interrupt is asserted
+ * this saves a register write for every interrupt
+ */
+ wr32(wx, WX_PX_GPIE, WX_PX_GPIE_MODEL);
+ } else {
+ /* legacy interrupts, use EIAM to auto-mask when reading EICR,
+ * specifically only auto mask tx and rx interrupts.
+ */
+ wr32(wx, WX_PX_GPIE, 0);
+ }
+
+ /* Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) {
+ struct wx_q_vector *q_vector = wx->q_vector[v_idx];
+ struct wx_ring *ring;
+
+ wx_for_each_ring(ring, q_vector->rx)
+ wx_set_ivar(wx, 0, ring->reg_idx, v_idx);
+
+ wx_for_each_ring(ring, q_vector->tx)
+ wx_set_ivar(wx, 1, ring->reg_idx, v_idx);
+
+ wx_write_eitr(q_vector);
+ }
+
+ wx_set_ivar(wx, -1, 0, v_idx);
+ if (pdev->msix_enabled)
+ wr32(wx, WX_PX_ITR(v_idx), 1950);
+}
+EXPORT_SYMBOL(wx_configure_vectors);
+
+/**
+ * wx_clean_rx_ring - Free Rx Buffers per Queue
+ * @rx_ring: ring to free buffers from
+ **/
+static void wx_clean_rx_ring(struct wx_ring *rx_ring)
+{
+ struct wx_rx_buffer *rx_buffer;
+ u16 i = rx_ring->next_to_clean;
+
+ rx_buffer = &rx_ring->rx_buffer_info[i];
+
+ /* Free all the Rx ring sk_buffs */
+ while (i != rx_ring->next_to_alloc) {
+ if (rx_buffer->skb) {
+ struct sk_buff *skb = rx_buffer->skb;
+
+ if (WX_CB(skb)->page_released)
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+
+ dev_kfree_skb(skb);
+ }
+
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ WX_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+
+ i++;
+ rx_buffer++;
+ if (i == rx_ring->count) {
+ i = 0;
+ rx_buffer = rx_ring->rx_buffer_info;
+ }
+ }
+
+ rx_ring->next_to_alloc = 0;
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+}
+
+/**
+ * wx_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @wx: board private structure
+ **/
+void wx_clean_all_rx_rings(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_clean_rx_ring(wx->rx_ring[i]);
+}
+EXPORT_SYMBOL(wx_clean_all_rx_rings);
+
+/**
+ * wx_free_rx_resources - Free Rx Resources
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+static void wx_free_rx_resources(struct wx_ring *rx_ring)
+{
+ wx_clean_rx_ring(rx_ring);
+ kvfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!rx_ring->desc)
+ return;
+
+ dma_free_coherent(rx_ring->dev, rx_ring->size,
+ rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+
+ if (rx_ring->page_pool) {
+ page_pool_destroy(rx_ring->page_pool);
+ rx_ring->page_pool = NULL;
+ }
+}
+
+/**
+ * wx_free_all_rx_resources - Free Rx Resources for All Queues
+ * @wx: pointer to hardware structure
+ *
+ * Free all receive software resources
+ **/
+static void wx_free_all_rx_resources(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_rx_queues; i++)
+ wx_free_rx_resources(wx->rx_ring[i]);
+}
+
+/**
+ * wx_clean_tx_ring - Free Tx Buffers
+ * @tx_ring: ring to be cleaned
+ **/
+static void wx_clean_tx_ring(struct wx_ring *tx_ring)
+{
+ struct wx_tx_buffer *tx_buffer;
+ u16 i = tx_ring->next_to_clean;
+
+ tx_buffer = &tx_ring->tx_buffer_info[i];
+
+ while (i != tx_ring->next_to_use) {
+ union wx_tx_desc *eop_desc, *tx_desc;
+
+ /* Free all the Tx ring sk_buffs */
+ dev_kfree_skb_any(tx_buffer->skb);
+
+ /* unmap skb header data */
+ dma_unmap_single(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+
+ /* check for eop_desc to determine the end of the packet */
+ eop_desc = tx_buffer->next_to_watch;
+ tx_desc = WX_TX_DESC(tx_ring, i);
+
+ /* unmap remaining buffers */
+ while (tx_desc != eop_desc) {
+ tx_buffer++;
+ tx_desc++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ tx_desc = WX_TX_DESC(tx_ring, 0);
+ }
+
+ /* unmap any remaining paged data */
+ if (dma_unmap_len(tx_buffer, len))
+ dma_unmap_page(tx_ring->dev,
+ dma_unmap_addr(tx_buffer, dma),
+ dma_unmap_len(tx_buffer, len),
+ DMA_TO_DEVICE);
+ }
+
+ /* move us one more past the eop_desc for start of next pkt */
+ tx_buffer++;
+ i++;
+ if (unlikely(i == tx_ring->count)) {
+ i = 0;
+ tx_buffer = tx_ring->tx_buffer_info;
+ }
+ }
+
+ netdev_tx_reset_queue(wx_txring_txq(tx_ring));
+
+ /* reset next_to_use and next_to_clean */
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+}
+
+/**
+ * wx_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @wx: board private structure
+ **/
+void wx_clean_all_tx_rings(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_clean_tx_ring(wx->tx_ring[i]);
+}
+EXPORT_SYMBOL(wx_clean_all_tx_rings);
+
+/**
+ * wx_free_tx_resources - Free Tx Resources per Queue
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+static void wx_free_tx_resources(struct wx_ring *tx_ring)
+{
+ wx_clean_tx_ring(tx_ring);
+ kvfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ /* if not set, then don't free */
+ if (!tx_ring->desc)
+ return;
+
+ dma_free_coherent(tx_ring->dev, tx_ring->size,
+ tx_ring->desc, tx_ring->dma);
+ tx_ring->desc = NULL;
+}
+
+/**
+ * wx_free_all_tx_resources - Free Tx Resources for All Queues
+ * @wx: pointer to hardware structure
+ *
+ * Free all transmit software resources
+ **/
+static void wx_free_all_tx_resources(struct wx *wx)
+{
+ int i;
+
+ for (i = 0; i < wx->num_tx_queues; i++)
+ wx_free_tx_resources(wx->tx_ring[i]);
+}
+
+void wx_free_resources(struct wx *wx)
+{
+ wx_free_isb_resources(wx);
+ wx_free_all_rx_resources(wx);
+ wx_free_all_tx_resources(wx);
+}
+EXPORT_SYMBOL(wx_free_resources);
+
+static int wx_alloc_page_pool(struct wx_ring *rx_ring)
+{
+ int ret = 0;
+
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = 0,
+ .pool_size = rx_ring->size,
+ .nid = dev_to_node(rx_ring->dev),
+ .dev = rx_ring->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = 0,
+ .max_len = PAGE_SIZE,
+ };
+
+ rx_ring->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx_ring->page_pool)) {
+ ret = PTR_ERR(rx_ring->page_pool);
+ rx_ring->page_pool = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * wx_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int wx_setup_rx_resources(struct wx_ring *rx_ring)
+{
+ struct device *dev = rx_ring->dev;
+ int orig_node = dev_to_node(dev);
+ int numa_node = NUMA_NO_NODE;
+ int size, ret;
+
+ size = sizeof(struct wx_rx_buffer) * rx_ring->count;
+
+ if (rx_ring->q_vector)
+ numa_node = rx_ring->q_vector->numa_node;
+
+ rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL);
+ if (!rx_ring->rx_buffer_info)
+ goto err;
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ set_dev_node(dev, numa_node);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ if (!rx_ring->desc) {
+ set_dev_node(dev, orig_node);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
+ }
+
+ if (!rx_ring->desc)
+ goto err;
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ ret = wx_alloc_page_pool(rx_ring);
+ if (ret < 0) {
+ dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+err:
+ kvfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * wx_setup_all_rx_resources - allocate all queues Rx resources
+ * @wx: pointer to hardware structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int wx_setup_all_rx_resources(struct wx *wx)
+{
+ int i, err = 0;
+
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ err = wx_setup_rx_resources(wx->rx_ring[i]);
+ if (!err)
+ continue;
+
+ wx_err(wx, "Allocation for Rx Queue %u failed\n", i);
+ goto err_setup_rx;
+ }
+
+ return 0;
+err_setup_rx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ wx_free_rx_resources(wx->rx_ring[i]);
+ return err;
+}
+
+/**
+ * wx_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int wx_setup_tx_resources(struct wx_ring *tx_ring)
+{
+ struct device *dev = tx_ring->dev;
+ int orig_node = dev_to_node(dev);
+ int numa_node = NUMA_NO_NODE;
+ int size;
+
+ size = sizeof(struct wx_tx_buffer) * tx_ring->count;
+
+ if (tx_ring->q_vector)
+ numa_node = tx_ring->q_vector->numa_node;
+
+ tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ set_dev_node(dev, numa_node);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ if (!tx_ring->desc) {
+ set_dev_node(dev, orig_node);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
+ }
+
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ return 0;
+
+err:
+ kvfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * wx_setup_all_tx_resources - allocate all queues Tx resources
+ * @wx: pointer to private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int wx_setup_all_tx_resources(struct wx *wx)
+{
+ int i, err = 0;
+
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ err = wx_setup_tx_resources(wx->tx_ring[i]);
+ if (!err)
+ continue;
+
+ wx_err(wx, "Allocation for Tx Queue %u failed\n", i);
+ goto err_setup_tx;
+ }
+
+ return 0;
+err_setup_tx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ wx_free_tx_resources(wx->tx_ring[i]);
+ return err;
+}
+
+int wx_setup_resources(struct wx *wx)
+{
+ int err;
+
+ /* allocate transmit descriptors */
+ err = wx_setup_all_tx_resources(wx);
+ if (err)
+ return err;
+
+ /* allocate receive descriptors */
+ err = wx_setup_all_rx_resources(wx);
+ if (err)
+ goto err_free_tx;
+
+ err = wx_setup_isb_resources(wx);
+ if (err)
+ goto err_free_rx;
+
+ return 0;
+
+err_free_rx:
+ wx_free_all_rx_resources(wx);
+err_free_tx:
+ wx_free_all_tx_resources(wx);
+
+ return err;
+}
+EXPORT_SYMBOL(wx_setup_resources);
+
+/**
+ * wx_get_stats64 - Get System Network Statistics
+ * @netdev: network interface device structure
+ * @stats: storage space for 64bit statistics
+ */
+void wx_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct wx *wx = netdev_priv(netdev);
+ int i;
+
+ rcu_read_lock();
+ for (i = 0; i < wx->num_rx_queues; i++) {
+ struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry(&ring->syncp, start));
+ stats->rx_packets += packets;
+ stats->rx_bytes += bytes;
+ }
+ }
+
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ struct wx_ring *ring = READ_ONCE(wx->tx_ring[i]);
+ u64 bytes, packets;
+ unsigned int start;
+
+ if (ring) {
+ do {
+ start = u64_stats_fetch_begin(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry(&ring->syncp,
+ start));
+ stats->tx_packets += packets;
+ stats->tx_bytes += bytes;
+ }
+ }
+
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(wx_get_stats64);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
new file mode 100644
index 000000000000..50ee41f1fa10
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * WangXun Gigabit PCI Express Linux driver
+ * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd.
+ */
+
+#ifndef _WX_LIB_H_
+#define _WX_LIB_H_
+
+void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count);
+u16 wx_desc_unused(struct wx_ring *ring);
+netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
+ struct net_device *netdev);
+void wx_napi_enable_all(struct wx *wx);
+void wx_napi_disable_all(struct wx *wx);
+void wx_reset_interrupt_capability(struct wx *wx);
+void wx_clear_interrupt_scheme(struct wx *wx);
+int wx_init_interrupt_scheme(struct wx *wx);
+irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data);
+void wx_free_irq(struct wx *wx);
+int wx_setup_isb_resources(struct wx *wx);
+void wx_free_isb_resources(struct wx *wx);
+u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx);
+void wx_configure_vectors(struct wx *wx);
+void wx_clean_all_rx_rings(struct wx *wx);
+void wx_clean_all_tx_rings(struct wx *wx);
+void wx_free_resources(struct wx *wx);
+int wx_setup_resources(struct wx *wx);
+void wx_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats);
+
+#endif /* _NGBE_LIB_H_ */
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 1cbeef8230bf..77d8d7f1707e 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -4,6 +4,9 @@
#ifndef _WX_TYPE_H_
#define _WX_TYPE_H_
+#include <linux/bitfield.h>
+#include <linux/netdevice.h>
+
/* Vendor ID */
#ifndef PCI_VENDOR_ID_WANGXUN
#define PCI_VENDOR_ID_WANGXUN 0x8088
@@ -36,12 +39,11 @@
#define WX_SPI_CMD 0x10104
#define WX_SPI_CMD_READ_DWORD 0x1
#define WX_SPI_CLK_DIV 0x3
-#define WX_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28)
-#define WX_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25)
-#define WX_SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF))
+#define WX_SPI_CMD_CMD(_v) FIELD_PREP(GENMASK(30, 28), _v)
+#define WX_SPI_CMD_CLK(_v) FIELD_PREP(GENMASK(27, 25), _v)
+#define WX_SPI_CMD_ADDR(_v) FIELD_PREP(GENMASK(23, 0), _v)
#define WX_SPI_DATA 0x10108
#define WX_SPI_DATA_BYPASS BIT(31)
-#define WX_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16)
#define WX_SPI_DATA_OP_DONE BIT(0)
#define WX_SPI_STATUS 0x1010C
#define WX_SPI_STATUS_OPDONE BIT(0)
@@ -64,21 +66,50 @@
/* port cfg Registers */
#define WX_CFG_PORT_CTL 0x14400
#define WX_CFG_PORT_CTL_DRV_LOAD BIT(3)
+#define WX_CFG_PORT_CTL_QINQ BIT(2)
+#define WX_CFG_PORT_CTL_D_VLAN BIT(0) /* double vlan*/
+#define WX_CFG_TAG_TPID(_i) (0x14430 + ((_i) * 4))
+
+/* GPIO Registers */
+#define WX_GPIO_DR 0x14800
+#define WX_GPIO_DR_0 BIT(0) /* SDP0 Data Value */
+#define WX_GPIO_DR_1 BIT(1) /* SDP1 Data Value */
+#define WX_GPIO_DDR 0x14804
+#define WX_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */
+#define WX_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */
+#define WX_GPIO_CTL 0x14808
+#define WX_GPIO_INTEN 0x14830
+#define WX_GPIO_INTEN_0 BIT(0)
+#define WX_GPIO_INTEN_1 BIT(1)
+#define WX_GPIO_INTMASK 0x14834
+#define WX_GPIO_INTTYPE_LEVEL 0x14838
+#define WX_GPIO_POLARITY 0x1483C
+#define WX_GPIO_EOI 0x1484C
/*********************** Transmit DMA registers **************************/
/* transmit global control */
#define WX_TDM_CTL 0x18000
/* TDM CTL BIT */
#define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */
+#define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4))
/***************************** RDB registers *********************************/
/* receive packet buffer */
#define WX_RDB_PB_CTL 0x19000
#define WX_RDB_PB_CTL_RXEN BIT(31) /* Enable Receiver */
#define WX_RDB_PB_CTL_DISABLED BIT(0)
+#define WX_RDB_PB_SZ(_i) (0x19020 + ((_i) * 4))
+#define WX_RDB_PB_SZ_SHIFT 10
/* statistic */
#define WX_RDB_PFCMACDAL 0x19210
#define WX_RDB_PFCMACDAH 0x19214
+/* ring assignment */
+#define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4))
+#define WX_RDB_PL_CFG_L4HDR BIT(1)
+#define WX_RDB_PL_CFG_L3HDR BIT(2)
+#define WX_RDB_PL_CFG_L2HDR BIT(3)
+#define WX_RDB_PL_CFG_TUN_TUNHDR BIT(4)
+#define WX_RDB_PL_CFG_TUN_OUTL2HDR BIT(5)
/******************************* PSR Registers *******************************/
/* psr control */
@@ -96,10 +127,24 @@
#define WX_PSR_CTL_MO_SHIFT 5
#define WX_PSR_CTL_MO (0x3 << WX_PSR_CTL_MO_SHIFT)
#define WX_PSR_CTL_TPE BIT(4)
+#define WX_PSR_MAX_SZ 0x15020
+#define WX_PSR_VLAN_CTL 0x15088
+#define WX_PSR_VLAN_CTL_CFIEN BIT(29) /* bit 29 */
+#define WX_PSR_VLAN_CTL_VFE BIT(30) /* bit 30 */
/* mcasst/ucast overflow tbl */
#define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4))
#define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4))
+/* VM L2 contorl */
+#define WX_PSR_VM_L2CTL(_i) (0x15600 + ((_i) * 4))
+#define WX_PSR_VM_L2CTL_UPE BIT(4) /* unicast promiscuous */
+#define WX_PSR_VM_L2CTL_VACC BIT(6) /* accept nomatched vlan */
+#define WX_PSR_VM_L2CTL_AUPE BIT(8) /* accept untagged packets */
+#define WX_PSR_VM_L2CTL_ROMPE BIT(9) /* accept packets in MTA tbl */
+#define WX_PSR_VM_L2CTL_ROPE BIT(10) /* accept packets in UC tbl */
+#define WX_PSR_VM_L2CTL_BAM BIT(11) /* accept broadcast packets */
+#define WX_PSR_VM_L2CTL_MPE BIT(12) /* multicast promiscuous */
+
/* Management */
#define WX_PSR_MNG_FLEX_SEL 0x1582C
#define WX_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16))
@@ -113,14 +158,35 @@
/* mac switcher */
#define WX_PSR_MAC_SWC_AD_L 0x16200
#define WX_PSR_MAC_SWC_AD_H 0x16204
-#define WX_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF))
-#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30)
+#define WX_PSR_MAC_SWC_AD_H_AD(v) FIELD_PREP(U16_MAX, v)
+#define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) FIELD_PREP(BIT(30), v)
#define WX_PSR_MAC_SWC_AD_H_AV BIT(31)
#define WX_PSR_MAC_SWC_VM_L 0x16208
#define WX_PSR_MAC_SWC_VM_H 0x1620C
#define WX_PSR_MAC_SWC_IDX 0x16210
#define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU
+/********************************* RSEC **************************************/
+/* general rsec */
+#define WX_RSC_CTL 0x17000
+#define WX_RSC_CTL_SAVE_MAC_ERR BIT(6)
+#define WX_RSC_CTL_CRC_STRIP BIT(2)
+#define WX_RSC_CTL_RX_DIS BIT(1)
+#define WX_RSC_ST 0x17004
+#define WX_RSC_ST_RSEC_RDY BIT(0)
+
+/****************************** TDB ******************************************/
+#define WX_TDB_PB_SZ(_i) (0x1CC00 + ((_i) * 4))
+#define WX_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
+
+/****************************** TSEC *****************************************/
+/* Security Control Registers */
+#define WX_TSC_CTL 0x1D000
+#define WX_TSC_CTL_TX_DIS BIT(1)
+#define WX_TSC_CTL_TSEC_DIS BIT(0)
+#define WX_TSC_BUF_AE 0x1D00C
+#define WX_TSC_BUF_AE_THR GENMASK(9, 0)
+
/************************************** MNG ********************************/
#define WX_MNG_SWFW_SYNC 0x1E008
#define WX_MNG_SWFW_SYNC_SW_MB BIT(2)
@@ -133,11 +199,15 @@
/************************************* ETH MAC *****************************/
#define WX_MAC_TX_CFG 0x11000
#define WX_MAC_TX_CFG_TE BIT(0)
+#define WX_MAC_TX_CFG_SPEED_MASK GENMASK(30, 29)
+#define WX_MAC_TX_CFG_SPEED_10G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 0)
+#define WX_MAC_TX_CFG_SPEED_1G FIELD_PREP(WX_MAC_TX_CFG_SPEED_MASK, 3)
#define WX_MAC_RX_CFG 0x11004
#define WX_MAC_RX_CFG_RE BIT(0)
#define WX_MAC_RX_CFG_JE BIT(8)
#define WX_MAC_PKT_FLT 0x11008
#define WX_MAC_PKT_FLT_PR BIT(0) /* promiscuous mode */
+#define WX_MAC_WDG_TIMEOUT 0x1100C
#define WX_MAC_RX_FLOW_CTRL 0x11090
#define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */
#define WX_MMC_CONTROL 0x11800
@@ -147,10 +217,34 @@
/* Interrupt Registers */
#define WX_BME_CTL 0x12020
#define WX_PX_MISC_IC 0x100
+#define WX_PX_MISC_ICS 0x104
+#define WX_PX_MISC_IEN 0x108
+#define WX_PX_INTA 0x110
+#define WX_PX_GPIE 0x118
+#define WX_PX_GPIE_MODEL BIT(0)
+#define WX_PX_IC 0x120
#define WX_PX_IMS(_i) (0x140 + (_i) * 4)
+#define WX_PX_IMC(_i) (0x150 + (_i) * 4)
+#define WX_PX_ISB_ADDR_L 0x160
+#define WX_PX_ISB_ADDR_H 0x164
#define WX_PX_TRANSACTION_PENDING 0x168
+#define WX_PX_ITRSEL 0x180
+#define WX_PX_ITR(_i) (0x200 + (_i) * 4)
+#define WX_PX_ITR_CNT_WDIS BIT(31)
+#define WX_PX_MISC_IVAR 0x4FC
+#define WX_PX_IVAR(_i) (0x500 + (_i) * 4)
+
+#define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+#define WX_7K_ITR 595
+#define WX_12K_ITR 336
+#define WX_SP_MAX_EITR 0x00000FF8U
+#define WX_EM_MAX_EITR 0x00007FFCU
/* transmit DMA Registers */
+#define WX_PX_TR_BAL(_i) (0x03000 + ((_i) * 0x40))
+#define WX_PX_TR_BAH(_i) (0x03004 + ((_i) * 0x40))
+#define WX_PX_TR_WP(_i) (0x03008 + ((_i) * 0x40))
+#define WX_PX_TR_RP(_i) (0x0300C + ((_i) * 0x40))
#define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40))
/* Transmit Config masks */
#define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */
@@ -160,8 +254,22 @@
#define WX_PX_TR_CFG_THRE_SHIFT 8
/* Receive DMA Registers */
+#define WX_PX_RR_BAL(_i) (0x01000 + ((_i) * 0x40))
+#define WX_PX_RR_BAH(_i) (0x01004 + ((_i) * 0x40))
+#define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40))
+#define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40))
#define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40))
/* PX_RR_CFG bit definitions */
+#define WX_PX_RR_CFG_SPLIT_MODE BIT(26)
+#define WX_PX_RR_CFG_RR_THER_SHIFT 16
+#define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12)
+#define WX_PX_RR_CFG_RR_BUF_SZ GENMASK(11, 8)
+#define WX_PX_RR_CFG_BHDRSIZE_SHIFT 6 /* 64byte resolution (>> 6)
+ * + at bit 8 offset (<< 12)
+ * = (<< 6)
+ */
+#define WX_PX_RR_CFG_BSIZEPKT_SHIFT 2 /* so many KBs */
+#define WX_PX_RR_CFG_RR_SIZE_SHIFT 1
#define WX_PX_RR_CFG_RR_EN BIT(0)
/* Number of 80 microseconds we wait for PCI Express master disable */
@@ -185,6 +293,50 @@
#define WX_SW_REGION_PTR 0x1C
+#define WX_MAC_STATE_DEFAULT 0x1
+#define WX_MAC_STATE_MODIFIED 0x2
+#define WX_MAC_STATE_IN_USE 0x4
+
+#define WX_MAX_RXD 8192
+#define WX_MAX_TXD 8192
+
+/* Supported Rx Buffer Sizes */
+#define WX_RXBUFFER_256 256 /* Used for skb receive header */
+#define WX_RXBUFFER_2K 2048
+#define WX_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+
+#if MAX_SKB_FRAGS < 8
+#define WX_RX_BUFSZ ALIGN(WX_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024)
+#else
+#define WX_RX_BUFSZ WX_RXBUFFER_2K
+#endif
+
+#define WX_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define WX_MAX_DATA_PER_TXD BIT(14)
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), WX_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+/* Ether Types */
+#define WX_ETH_P_CNM 0x22E7
+
+#define WX_CFG_PORT_ST 0x14404
+
+/******************* Receive Descriptor bit definitions **********************/
+#define WX_RXD_STAT_DD BIT(0) /* Done */
+#define WX_RXD_STAT_EOP BIT(1) /* End of Packet */
+
+#define WX_RXD_ERR_RXE BIT(29) /* Any MAC Error */
+
+/*********************** Transmit Descriptor Config Masks ****************/
+#define WX_TXD_STAT_DD BIT(0) /* Descriptor Done */
+#define WX_TXD_DTYP_DATA 0 /* Adv Data Descriptor */
+#define WX_TXD_PAYLEN_SHIFT 13 /* Desc PAYLEN shift */
+#define WX_TXD_EOP BIT(24) /* End of Packet */
+#define WX_TXD_IFCS BIT(25) /* Insert FCS */
+#define WX_TXD_RS BIT(27) /* Report Status */
+
/* Host Interface Command Structures */
struct wx_hic_hdr {
u8 cmd;
@@ -249,14 +401,23 @@ enum wx_mac_type {
wx_mac_em
};
+enum em_mac_type {
+ em_mac_type_unknown = 0,
+ em_mac_type_mdi,
+ em_mac_type_rgmii
+};
+
struct wx_mac_info {
enum wx_mac_type type;
bool set_lben;
u8 addr[ETH_ALEN];
u8 perm_addr[ETH_ALEN];
+ u32 mta_shadow[128];
s32 mc_filter_type;
u32 mcft_size;
u32 num_rar_entries;
+ u32 rx_pb_size;
+ u32 tx_pb_size;
u32 max_tx_queues;
u32 max_rx_queues;
@@ -284,19 +445,183 @@ struct wx_addr_filter_info {
bool user_set_promisc;
};
+struct wx_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 state; /* bitmask */
+ u64 pools;
+};
+
enum wx_reset_type {
WX_LAN_RESET = 0,
WX_SW_RESET,
WX_GLOBAL_RESET
};
-struct wx_hw {
+struct wx_cb {
+ dma_addr_t dma;
+ u16 append_cnt; /* number of skb's appended */
+ bool page_released;
+ bool dma_released;
+};
+
+#define WX_CB(skb) ((struct wx_cb *)(skb)->cb)
+
+/* Transmit Descriptor */
+union wx_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor */
+union wx_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+#define WX_RX_DESC(R, i) \
+ (&(((union wx_rx_desc *)((R)->desc))[i]))
+#define WX_TX_DESC(R, i) \
+ (&(((union wx_tx_desc *)((R)->desc))[i]))
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct wx_tx_buffer {
+ union wx_tx_desc *next_to_watch;
+ struct sk_buff *skb;
+ unsigned int bytecount;
+ unsigned short gso_segs;
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+struct wx_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ dma_addr_t page_dma;
+ struct page *page;
+ unsigned int page_offset;
+ u16 pagecnt_bias;
+};
+
+struct wx_queue_stats {
+ u64 packets;
+ u64 bytes;
+};
+
+/* iterator for handling rings in ring container */
+#define wx_for_each_ring(posm, headm) \
+ for (posm = (headm).ring; posm; posm = posm->next)
+
+struct wx_ring_container {
+ struct wx_ring *ring; /* pointer to linked list of rings */
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
+
+struct wx_ring {
+ struct wx_ring *next; /* pointer to next ring in q_vector */
+ struct wx_q_vector *q_vector; /* backpointer to host q_vector */
+ struct net_device *netdev; /* netdev ring belongs to */
+ struct device *dev; /* device for DMA mapping */
+ struct page_pool *page_pool;
+ void *desc; /* descriptor ring memory */
+ union {
+ struct wx_tx_buffer *tx_buffer_info;
+ struct wx_rx_buffer *rx_buffer_info;
+ };
+ u8 __iomem *tail;
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
+
+ u16 count; /* amount of descriptors */
+
+ u8 queue_index; /* needed for multiqueue queue management */
+ u8 reg_idx; /* holds the special value that gets
+ * the hardware register offset
+ * associated with this ring, which is
+ * different for DCB and RSS modes
+ */
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_alloc;
+
+ struct wx_queue_stats stats;
+ struct u64_stats_sync syncp;
+} ____cacheline_internodealigned_in_smp;
+
+struct wx_q_vector {
+ struct wx *wx;
+ int cpu; /* CPU for DCA */
+ int numa_node;
+ u16 v_idx; /* index of q_vector within array, also used for
+ * finding the bit in EICR and friends that
+ * represents the vector for this ring
+ */
+ u16 itr; /* Interrupt throttle rate written to EITR */
+ struct wx_ring_container rx, tx;
+ struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+
+ char name[IFNAMSIZ + 17];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct wx_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+enum wx_isb_idx {
+ WX_ISB_HEADER,
+ WX_ISB_MISC,
+ WX_ISB_VEC0,
+ WX_ISB_VEC1,
+ WX_ISB_MAX
+};
+
+struct wx {
u8 __iomem *hw_addr;
struct pci_dev *pdev;
+ struct net_device *netdev;
struct wx_bus_info bus;
struct wx_mac_info mac;
+ enum em_mac_type mac_type;
struct wx_eeprom_info eeprom;
struct wx_addr_filter_info addr_ctrl;
+ struct wx_mac_addr *mac_table;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -304,11 +629,63 @@ struct wx_hw {
u8 revision_id;
u16 oem_ssid;
u16 oem_svid;
+ u16 msg_enable;
bool adapter_stopped;
+ u16 tpid[8];
+ char eeprom_id[32];
+ char *driver_name;
enum wx_reset_type reset_type;
+
+ /* PHY stuff */
+ unsigned int link;
+ int speed;
+ int duplex;
+ struct phy_device *phydev;
+
+ bool wol_enabled;
+ bool ncsi_enabled;
+ bool gpio_ctrl;
+
+ /* Tx fast path data */
+ int num_tx_queues;
+ u16 tx_itr_setting;
+ u16 tx_work_limit;
+
+ /* Rx fast path data */
+ int num_rx_queues;
+ u16 rx_itr_setting;
+ u16 rx_work_limit;
+
+ int num_q_vectors; /* current number of q_vectors for device */
+ int max_q_vectors; /* upper limit of q_vectors for device */
+
+ u32 tx_ring_count;
+ u32 rx_ring_count;
+
+ struct wx_ring *tx_ring[64] ____cacheline_aligned_in_smp;
+ struct wx_ring *rx_ring[64];
+ struct wx_q_vector *q_vector[64];
+
+ unsigned int queues_per_pool;
+ struct msix_entry *msix_entries;
+
+ /* misc interrupt status block */
+ dma_addr_t isb_dma;
+ u32 *isb_mem;
+ u32 isb_tag[WX_ISB_MAX];
+
+#define WX_MAX_RETA_ENTRIES 128
+ u8 rss_indir_tbl[WX_MAX_RETA_ENTRIES];
+
+#define WX_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
+ u32 *rss_key;
+ u32 wol;
+
+ u16 bd_number;
};
#define WX_INTR_ALL (~0ULL)
+#define WX_INTR_Q(i) BIT(i)
/* register operations */
#define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
@@ -319,23 +696,23 @@ struct wx_hw {
wr32((a), (reg) + ((off) << 2), (val))
static inline u32
-rd32m(struct wx_hw *wxhw, u32 reg, u32 mask)
+rd32m(struct wx *wx, u32 reg, u32 mask)
{
u32 val;
- val = rd32(wxhw, reg);
+ val = rd32(wx, reg);
return val & mask;
}
static inline void
-wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field)
+wr32m(struct wx *wx, u32 reg, u32 mask, u32 field)
{
u32 val;
- val = rd32(wxhw, reg);
+ val = rd32(wx, reg);
val = ((val & ~mask) | (field & mask));
- wr32(wxhw, reg, val);
+ wr32(wx, reg, val);
}
/* On some domestic CPU platforms, sometimes IO is not synchronized with
@@ -343,10 +720,10 @@ wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field)
*/
#define WX_WRITE_FLUSH(H) rd32(H, WX_MIS_PWR)
-#define wx_err(wxhw, fmt, arg...) \
- dev_err(&(wxhw)->pdev->dev, fmt, ##arg)
+#define wx_err(wx, fmt, arg...) \
+ dev_err(&(wx)->pdev->dev, fmt, ##arg)
-#define wx_dbg(wxhw, fmt, arg...) \
- dev_dbg(&(wxhw)->pdev->dev, fmt, ##arg)
+#define wx_dbg(wx, fmt, arg...) \
+ dev_dbg(&(wx)->pdev->dev, fmt, ##arg)
#endif /* _WX_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/Makefile b/drivers/net/ethernet/wangxun/ngbe/Makefile
index 391c2cbc1bb4..61a13d98abe7 100644
--- a/drivers/net/ethernet/wangxun/ngbe/Makefile
+++ b/drivers/net/ethernet/wangxun/ngbe/Makefile
@@ -6,4 +6,4 @@
obj-$(CONFIG_NGBE) += ngbe.o
-ngbe-objs := ngbe_main.o ngbe_hw.o
+ngbe-objs := ngbe_main.o ngbe_hw.o ngbe_mdio.o ngbe_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe.h b/drivers/net/ethernet/wangxun/ngbe/ngbe.h
deleted file mode 100644
index af147ca8605c..000000000000
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
-
-#ifndef _NGBE_H_
-#define _NGBE_H_
-
-#include "ngbe_type.h"
-
-#define NGBE_MAX_FDIR_INDICES 7
-
-#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
-#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
-
-#define NGBE_ETH_LENGTH_OF_ADDRESS 6
-#define NGBE_MAX_MSIX_VECTORS 0x09
-#define NGBE_RAR_ENTRIES 32
-
-/* TX/RX descriptor defines */
-#define NGBE_DEFAULT_TXD 512 /* default ring size */
-#define NGBE_DEFAULT_TX_WORK 256
-#define NGBE_MAX_TXD 8192
-#define NGBE_MIN_TXD 128
-
-#define NGBE_DEFAULT_RXD 512 /* default ring size */
-#define NGBE_DEFAULT_RX_WORK 256
-#define NGBE_MAX_RXD 8192
-#define NGBE_MIN_RXD 128
-
-#define NGBE_MAC_STATE_DEFAULT 0x1
-#define NGBE_MAC_STATE_MODIFIED 0x2
-#define NGBE_MAC_STATE_IN_USE 0x4
-
-struct ngbe_mac_addr {
- u8 addr[ETH_ALEN];
- u16 state; /* bitmask */
- u64 pools;
-};
-
-/* board specific private data structure */
-struct ngbe_adapter {
- u8 __iomem *io_addr; /* Mainly for iounmap use */
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
-
- /* structs defined in ngbe_hw.h */
- struct ngbe_hw hw;
- struct ngbe_mac_addr *mac_table;
- u16 msg_enable;
-
- /* Tx fast path data */
- int num_tx_queues;
- u16 tx_itr_setting;
- u16 tx_work_limit;
-
- /* Rx fast path data */
- int num_rx_queues;
- u16 rx_itr_setting;
- u16 rx_work_limit;
-
- int num_q_vectors; /* current number of q_vectors for device */
- int max_q_vectors; /* upper limit of q_vectors for device */
-
- u32 tx_ring_count;
- u32 rx_ring_count;
-
-#define NGBE_MAX_RETA_ENTRIES 128
- u8 rss_indir_tbl[NGBE_MAX_RETA_ENTRIES];
-
-#define NGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
- u32 *rss_key;
- u32 wol;
-
- u16 bd_number;
-};
-
-extern char ngbe_driver_name[];
-
-#endif /* _NGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
new file mode 100644
index 000000000000..5b25834baf38
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/pci.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#include "../libwx/wx_ethtool.h"
+#include "ngbe_ethtool.h"
+
+static const struct ethtool_ops ngbe_ethtool_ops = {
+ .get_drvinfo = wx_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+};
+
+void ngbe_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ngbe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h
new file mode 100644
index 000000000000..487074e0eeec
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _NGBE_ETHTOOL_H_
+#define _NGBE_ETHTOOL_H_
+
+void ngbe_set_ethtool_ops(struct net_device *netdev);
+
+#endif /* _NGBE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c
index 0e3923b3737e..6562a2de9527 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c
@@ -9,12 +9,10 @@
#include "../libwx/wx_hw.h"
#include "ngbe_type.h"
#include "ngbe_hw.h"
-#include "ngbe.h"
-int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw)
+int ngbe_eeprom_chksum_hostif(struct wx *wx)
{
struct wx_hic_read_shadow_ram buffer;
- struct wx_hw *wxhw = &hw->wxhw;
int status;
int tmp;
@@ -27,61 +25,73 @@ int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw)
/* one word */
buffer.length = 0;
- status = wx_host_interface_command(wxhw, (u32 *)&buffer, sizeof(buffer),
+ status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
WX_HI_COMMAND_TIMEOUT, false);
if (status < 0)
return status;
- tmp = rd32a(wxhw, WX_MNG_MBOX, 1);
+ tmp = rd32a(wx, WX_MNG_MBOX, 1);
if (tmp == NGBE_FW_CMD_ST_PASS)
return 0;
return -EIO;
}
-static int ngbe_reset_misc(struct ngbe_hw *hw)
+static int ngbe_reset_misc(struct wx *wx)
{
- struct wx_hw *wxhw = &hw->wxhw;
-
- wx_reset_misc(wxhw);
- if (hw->mac_type == ngbe_mac_type_rgmii)
- wr32(wxhw, NGBE_MDIO_CLAUSE_SELECT, 0xF);
- if (hw->gpio_ctrl) {
+ wx_reset_misc(wx);
+ if (wx->gpio_ctrl) {
/* gpio0 is used to power on/off control*/
- wr32(wxhw, NGBE_GPIO_DDR, 0x1);
- wr32(wxhw, NGBE_GPIO_DR, NGBE_GPIO_DR_0);
+ wr32(wx, NGBE_GPIO_DDR, 0x1);
+ ngbe_sfp_modules_txrx_powerctl(wx, false);
}
return 0;
}
+void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi)
+{
+ /* gpio0 is used to power on control . 0 is on */
+ wr32(wx, NGBE_GPIO_DR, swi ? 0 : NGBE_GPIO_DR_0);
+}
+
/**
* ngbe_reset_hw - Perform hardware reset
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-int ngbe_reset_hw(struct ngbe_hw *hw)
+int ngbe_reset_hw(struct wx *wx)
{
- struct wx_hw *wxhw = &hw->wxhw;
- int status = 0;
- u32 reset = 0;
+ u32 val = 0;
+ int ret = 0;
- /* Call adapter stop to disable tx/rx and clear interrupts */
- status = wx_stop_adapter(wxhw);
- if (status != 0)
- return status;
- reset = WX_MIS_RST_LAN_RST(wxhw->bus.func);
- wr32(wxhw, WX_MIS_RST, reset | rd32(wxhw, WX_MIS_RST));
- ngbe_reset_misc(hw);
+ /* Call wx stop to disable tx/rx and clear interrupts */
+ ret = wx_stop_adapter(wx);
+ if (ret != 0)
+ return ret;
+
+ if (wx->mac_type != em_mac_type_mdi) {
+ val = WX_MIS_RST_LAN_RST(wx->bus.func);
+ wr32(wx, WX_MIS_RST, val | rd32(wx, WX_MIS_RST));
+
+ ret = read_poll_timeout(rd32, val,
+ !(val & (BIT(9) << wx->bus.func)), 1000,
+ 100000, false, wx, 0x10028);
+ if (ret) {
+ wx_err(wx, "Lan reset exceed s maximum times.\n");
+ return ret;
+ }
+ }
+ ngbe_reset_misc(wx);
/* Store the permanent mac address */
- wx_get_mac_addr(wxhw, wxhw->mac.perm_addr);
+ wx_get_mac_addr(wx, wx->mac.perm_addr);
/* reset num_rar_entries to 128 */
- wxhw->mac.num_rar_entries = NGBE_RAR_ENTRIES;
- wx_init_rx_addrs(wxhw);
- pci_set_master(wxhw->pdev);
+ wx->mac.num_rar_entries = NGBE_RAR_ENTRIES;
+ wx_init_rx_addrs(wx);
+ pci_set_master(wx->pdev);
return 0;
}
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h
index 42476a3fe57c..a4693e006816 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.h
@@ -7,6 +7,7 @@
#ifndef _NGBE_HW_H_
#define _NGBE_HW_H_
-int ngbe_eeprom_chksum_hostif(struct ngbe_hw *hw);
-int ngbe_reset_hw(struct ngbe_hw *hw);
+int ngbe_eeprom_chksum_hostif(struct wx *wx);
+void ngbe_sfp_modules_txrx_powerctl(struct wx *wx, bool swi);
+int ngbe_reset_hw(struct wx *wx);
#endif /* _NGBE_HW_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index f0b24366da18..5b564d348c09 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -9,12 +9,16 @@
#include <linux/aer.h>
#include <linux/etherdevice.h>
#include <net/ip.h>
+#include <linux/phy.h>
#include "../libwx/wx_type.h"
#include "../libwx/wx_hw.h"
+#include "../libwx/wx_lib.h"
#include "ngbe_type.h"
+#include "ngbe_mdio.h"
#include "ngbe_hw.h"
-#include "ngbe.h"
+#include "ngbe_ethtool.h"
+
char ngbe_driver_name[] = "ngbe";
/* ngbe_pci_tbl - PCI Device ID Table
@@ -39,70 +43,27 @@ static const struct pci_device_id ngbe_pci_tbl[] = {
{ .device = 0 }
};
-static void ngbe_mac_set_default_filter(struct ngbe_adapter *adapter, u8 *addr)
-{
- struct ngbe_hw *hw = &adapter->hw;
-
- memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
- adapter->mac_table[0].pools = 1ULL;
- adapter->mac_table[0].state = (NGBE_MAC_STATE_DEFAULT |
- NGBE_MAC_STATE_IN_USE);
- wx_set_rar(&hw->wxhw, 0, adapter->mac_table[0].addr,
- adapter->mac_table[0].pools,
- WX_PSR_MAC_SWC_AD_H_AV);
-}
-
/**
* ngbe_init_type_code - Initialize the shared code
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
**/
-static void ngbe_init_type_code(struct ngbe_hw *hw)
+static void ngbe_init_type_code(struct wx *wx)
{
int wol_mask = 0, ncsi_mask = 0;
- struct wx_hw *wxhw = &hw->wxhw;
- u16 type_mask = 0;
+ u16 type_mask = 0, val;
- wxhw->mac.type = wx_mac_em;
- type_mask = (u16)(wxhw->subsystem_device_id & NGBE_OEM_MASK);
- ncsi_mask = wxhw->subsystem_device_id & NGBE_NCSI_MASK;
- wol_mask = wxhw->subsystem_device_id & NGBE_WOL_MASK;
-
- switch (type_mask) {
- case NGBE_SUBID_M88E1512_SFP:
- case NGBE_SUBID_LY_M88E1512_SFP:
- hw->phy.type = ngbe_phy_m88e1512_sfi;
- break;
- case NGBE_SUBID_M88E1512_RJ45:
- hw->phy.type = ngbe_phy_m88e1512;
- break;
- case NGBE_SUBID_M88E1512_MIX:
- hw->phy.type = ngbe_phy_m88e1512_unknown;
- break;
- case NGBE_SUBID_YT8521S_SFP:
- case NGBE_SUBID_YT8521S_SFP_GPIO:
- case NGBE_SUBID_LY_YT8521S_SFP:
- hw->phy.type = ngbe_phy_yt8521s_sfi;
- break;
- case NGBE_SUBID_INTERNAL_YT8521S_SFP:
- case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO:
- hw->phy.type = ngbe_phy_internal_yt8521s_sfi;
- break;
- case NGBE_SUBID_RGMII_FPGA:
- case NGBE_SUBID_OCP_CARD:
- fallthrough;
- default:
- hw->phy.type = ngbe_phy_internal;
- break;
- }
+ wx->mac.type = wx_mac_em;
+ type_mask = (u16)(wx->subsystem_device_id & NGBE_OEM_MASK);
+ ncsi_mask = wx->subsystem_device_id & NGBE_NCSI_MASK;
+ wol_mask = wx->subsystem_device_id & NGBE_WOL_MASK;
- if (hw->phy.type == ngbe_phy_internal ||
- hw->phy.type == ngbe_phy_internal_yt8521s_sfi)
- hw->mac_type = ngbe_mac_type_mdi;
- else
- hw->mac_type = ngbe_mac_type_rgmii;
+ val = rd32(wx, WX_CFG_PORT_ST);
+ wx->mac_type = (val & BIT(7)) >> 7 ?
+ em_mac_type_rgmii :
+ em_mac_type_mdi;
- hw->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0;
- hw->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK ||
+ wx->wol_enabled = (wol_mask == NGBE_WOL_SUP) ? 1 : 0;
+ wx->ncsi_enabled = (ncsi_mask == NGBE_NCSI_MASK ||
type_mask == NGBE_SUBID_OCP_CARD) ? 1 : 0;
switch (type_mask) {
@@ -110,31 +71,31 @@ static void ngbe_init_type_code(struct ngbe_hw *hw)
case NGBE_SUBID_LY_M88E1512_SFP:
case NGBE_SUBID_YT8521S_SFP_GPIO:
case NGBE_SUBID_INTERNAL_YT8521S_SFP_GPIO:
- hw->gpio_ctrl = 1;
+ wx->gpio_ctrl = 1;
break;
default:
- hw->gpio_ctrl = 0;
+ wx->gpio_ctrl = 0;
break;
}
}
/**
- * ngbe_init_rss_key - Initialize adapter RSS key
- * @adapter: device handle
+ * ngbe_init_rss_key - Initialize wx RSS key
+ * @wx: device handle
*
* Allocates and initializes the RSS key if it is not allocated.
**/
-static inline int ngbe_init_rss_key(struct ngbe_adapter *adapter)
+static inline int ngbe_init_rss_key(struct wx *wx)
{
u32 *rss_key;
- if (!adapter->rss_key) {
- rss_key = kzalloc(NGBE_RSS_KEY_SIZE, GFP_KERNEL);
+ if (!wx->rss_key) {
+ rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
if (unlikely(!rss_key))
return -ENOMEM;
- netdev_rss_key_fill(rss_key, NGBE_RSS_KEY_SIZE);
- adapter->rss_key = rss_key;
+ netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
+ wx->rss_key = rss_key;
}
return 0;
@@ -142,72 +103,263 @@ static inline int ngbe_init_rss_key(struct ngbe_adapter *adapter)
/**
* ngbe_sw_init - Initialize general software structures
- * @adapter: board private structure to initialize
+ * @wx: board private structure to initialize
**/
-static int ngbe_sw_init(struct ngbe_adapter *adapter)
+static int ngbe_sw_init(struct wx *wx)
{
- struct pci_dev *pdev = adapter->pdev;
- struct ngbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
+ struct pci_dev *pdev = wx->pdev;
u16 msix_count = 0;
int err = 0;
- wxhw->hw_addr = adapter->io_addr;
- wxhw->pdev = pdev;
+ wx->mac.num_rar_entries = NGBE_RAR_ENTRIES;
+ wx->mac.max_rx_queues = NGBE_MAX_RX_QUEUES;
+ wx->mac.max_tx_queues = NGBE_MAX_TX_QUEUES;
+ wx->mac.mcft_size = NGBE_MC_TBL_SIZE;
+ wx->mac.rx_pb_size = NGBE_RX_PB_SIZE;
+ wx->mac.tx_pb_size = NGBE_TDB_PB_SZ;
/* PCI config space info */
- err = wx_sw_init(wxhw);
+ err = wx_sw_init(wx);
if (err < 0) {
- netif_err(adapter, probe, adapter->netdev,
- "Read of internal subsystem device id failed\n");
+ wx_err(wx, "read of internal subsystem device id failed\n");
return err;
}
/* mac type, phy type , oem type */
- ngbe_init_type_code(hw);
+ ngbe_init_type_code(wx);
- wxhw->mac.max_rx_queues = NGBE_MAX_RX_QUEUES;
- wxhw->mac.max_tx_queues = NGBE_MAX_TX_QUEUES;
- wxhw->mac.num_rar_entries = NGBE_RAR_ENTRIES;
/* Set common capability flags and settings */
- adapter->max_q_vectors = NGBE_MAX_MSIX_VECTORS;
-
- err = wx_get_pcie_msix_counts(wxhw, &msix_count, NGBE_MAX_MSIX_VECTORS);
+ wx->max_q_vectors = NGBE_MAX_MSIX_VECTORS;
+ err = wx_get_pcie_msix_counts(wx, &msix_count, NGBE_MAX_MSIX_VECTORS);
if (err)
dev_err(&pdev->dev, "Do not support MSI-X\n");
- wxhw->mac.max_msix_vectors = msix_count;
+ wx->mac.max_msix_vectors = msix_count;
- adapter->mac_table = kcalloc(wxhw->mac.num_rar_entries,
- sizeof(struct ngbe_mac_addr),
- GFP_KERNEL);
- if (!adapter->mac_table) {
- dev_err(&pdev->dev, "mac_table allocation failed: %d\n", err);
- return -ENOMEM;
- }
-
- if (ngbe_init_rss_key(adapter))
+ if (ngbe_init_rss_key(wx))
return -ENOMEM;
/* enable itr by default in dynamic mode */
- adapter->rx_itr_setting = 1;
- adapter->tx_itr_setting = 1;
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
/* set default ring sizes */
- adapter->tx_ring_count = NGBE_DEFAULT_TXD;
- adapter->rx_ring_count = NGBE_DEFAULT_RXD;
+ wx->tx_ring_count = NGBE_DEFAULT_TXD;
+ wx->rx_ring_count = NGBE_DEFAULT_RXD;
/* set default work limits */
- adapter->tx_work_limit = NGBE_DEFAULT_TX_WORK;
- adapter->rx_work_limit = NGBE_DEFAULT_RX_WORK;
+ wx->tx_work_limit = NGBE_DEFAULT_TX_WORK;
+ wx->rx_work_limit = NGBE_DEFAULT_RX_WORK;
return 0;
}
-static void ngbe_down(struct ngbe_adapter *adapter)
+/**
+ * ngbe_irq_enable - Enable default interrupt generation settings
+ * @wx: board private structure
+ * @queues: enable all queues interrupts
+ **/
+static void ngbe_irq_enable(struct wx *wx, bool queues)
{
- netif_carrier_off(adapter->netdev);
- netif_tx_disable(adapter->netdev);
-};
+ u32 mask;
+
+ /* enable misc interrupt */
+ mask = NGBE_PX_MISC_IEN_MASK;
+
+ wr32(wx, WX_GPIO_DDR, WX_GPIO_DDR_0);
+ wr32(wx, WX_GPIO_INTEN, WX_GPIO_INTEN_0 | WX_GPIO_INTEN_1);
+ wr32(wx, WX_GPIO_INTTYPE_LEVEL, 0x0);
+ wr32(wx, WX_GPIO_POLARITY, wx->gpio_ctrl ? 0 : 0x3);
+
+ wr32(wx, WX_PX_MISC_IEN, mask);
+
+ /* mask interrupt */
+ if (queues)
+ wx_intr_enable(wx, NGBE_INTR_ALL);
+ else
+ wx_intr_enable(wx, NGBE_INTR_MISC(wx));
+}
+
+/**
+ * ngbe_intr - msi/legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t ngbe_intr(int __always_unused irq, void *data)
+{
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ struct pci_dev *pdev;
+ u32 eicr;
+
+ q_vector = wx->q_vector[0];
+ pdev = wx->pdev;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the EICR read.
+ */
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ wx->isb_mem[WX_ISB_MISC] = 0;
+ /* would disable interrupts here but it is auto disabled */
+ napi_schedule_irqoff(&q_vector->napi);
+
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ngbe_msix_other(int __always_unused irq, void *data)
+{
+ struct wx *wx = data;
+
+ /* re-enable the original interrupt state, no lsc, no queues */
+ if (netif_running(wx->netdev))
+ ngbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ngbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @wx: board private structure
+ *
+ * ngbe_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ngbe_request_msix_irqs(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
+
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
+ }
+ }
+
+ err = request_irq(wx->msix_entries[vector].vector,
+ ngbe_msix_other, 0, netdev->name, wx);
+
+ if (err) {
+ wx_err(wx, "request_irq for msix_other failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_entries[vector].vector,
+ wx->q_vector[vector]);
+ }
+ wx_reset_interrupt_capability(wx);
+ return err;
+}
+
+/**
+ * ngbe_request_irq - initialize interrupts
+ * @wx: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ngbe_request_irq(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
+
+ if (pdev->msix_enabled)
+ err = ngbe_request_msix_irqs(wx);
+ else if (pdev->msi_enabled)
+ err = request_irq(pdev->irq, ngbe_intr, 0,
+ netdev->name, wx);
+ else
+ err = request_irq(pdev->irq, ngbe_intr, IRQF_SHARED,
+ netdev->name, wx);
+
+ if (err)
+ wx_err(wx, "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static void ngbe_disable_device(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
+ u32 i;
+
+ /* disable all enabled rx queues */
+ for (i = 0; i < wx->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ wx_disable_rx_queue(wx, wx->rx_ring[i]);
+ /* disable receives */
+ wx_disable_rx(wx);
+ wx_napi_disable_all(wx);
+ netif_tx_stop_all_queues(netdev);
+ netif_tx_disable(netdev);
+ if (wx->gpio_ctrl)
+ ngbe_sfp_modules_txrx_powerctl(wx, false);
+ wx_irq_disable(wx);
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ u8 reg_idx = wx->tx_ring[i]->reg_idx;
+
+ wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
+ }
+}
+
+static void ngbe_down(struct wx *wx)
+{
+ phy_stop(wx->phydev);
+ ngbe_disable_device(wx);
+ wx_clean_all_tx_rings(wx);
+ wx_clean_all_rx_rings(wx);
+}
+
+static void ngbe_up(struct wx *wx)
+{
+ wx_configure_vectors(wx);
+
+ /* make sure to complete pre-operations */
+ smp_mb__before_atomic();
+ wx_napi_enable_all(wx);
+ /* enable transmits */
+ netif_tx_start_all_queues(wx->netdev);
+
+ /* clear any pending interrupts, may auto mask */
+ rd32(wx, WX_PX_IC);
+ rd32(wx, WX_PX_MISC_IC);
+ ngbe_irq_enable(wx, true);
+ if (wx->gpio_ctrl)
+ ngbe_sfp_modules_txrx_powerctl(wx, true);
+
+ phy_start(wx->phydev);
+}
/**
* ngbe_open - Called when a network interface is made active
@@ -220,13 +372,43 @@ static void ngbe_down(struct ngbe_adapter *adapter)
**/
static int ngbe_open(struct net_device *netdev)
{
- struct ngbe_adapter *adapter = netdev_priv(netdev);
- struct ngbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
+ struct wx *wx = netdev_priv(netdev);
+ int err;
+
+ wx_control_hw(wx, true);
+
+ err = wx_setup_resources(wx);
+ if (err)
+ return err;
+
+ wx_configure(wx);
+
+ err = ngbe_request_irq(wx);
+ if (err)
+ goto err_free_resources;
+
+ err = ngbe_phy_connect(wx);
+ if (err)
+ goto err_free_irq;
+
+ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+ if (err)
+ goto err_dis_phy;
+
+ err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
+ if (err)
+ goto err_dis_phy;
- wx_control_hw(wxhw, true);
+ ngbe_up(wx);
return 0;
+err_dis_phy:
+ phy_disconnect(wx->phydev);
+err_free_irq:
+ wx_free_irq(wx);
+err_free_resources:
+ wx_free_resources(wx);
+ return err;
}
/**
@@ -242,66 +424,40 @@ static int ngbe_open(struct net_device *netdev)
**/
static int ngbe_close(struct net_device *netdev)
{
- struct ngbe_adapter *adapter = netdev_priv(netdev);
-
- ngbe_down(adapter);
- wx_control_hw(&adapter->hw.wxhw, false);
-
- return 0;
-}
-
-static netdev_tx_t ngbe_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
-{
- return NETDEV_TX_OK;
-}
-
-/**
- * ngbe_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int ngbe_set_mac(struct net_device *netdev, void *p)
-{
- struct ngbe_adapter *adapter = netdev_priv(netdev);
- struct wx_hw *wxhw = &adapter->hw.wxhw;
- struct sockaddr *addr = p;
+ struct wx *wx = netdev_priv(netdev);
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
-
- eth_hw_addr_set(netdev, addr->sa_data);
- memcpy(wxhw->mac.addr, addr->sa_data, netdev->addr_len);
-
- ngbe_mac_set_default_filter(adapter, wxhw->mac.addr);
+ ngbe_down(wx);
+ wx_free_irq(wx);
+ wx_free_resources(wx);
+ phy_disconnect(wx->phydev);
+ wx_control_hw(wx, false);
return 0;
}
static void ngbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
- struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
+ struct wx *wx = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+ netdev = wx->netdev;
netif_device_detach(netdev);
rtnl_lock();
if (netif_running(netdev))
- ngbe_down(adapter);
+ ngbe_down(wx);
rtnl_unlock();
- wx_control_hw(&adapter->hw.wxhw, false);
+ wx_control_hw(wx, false);
pci_disable_device(pdev);
}
static void ngbe_shutdown(struct pci_dev *pdev)
{
- struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct wx *wx = pci_get_drvdata(pdev);
bool wake;
- wake = !!adapter->wol;
+ wake = !!wx->wol;
ngbe_dev_shutdown(pdev, &wake);
@@ -314,9 +470,11 @@ static void ngbe_shutdown(struct pci_dev *pdev)
static const struct net_device_ops ngbe_netdev_ops = {
.ndo_open = ngbe_open,
.ndo_stop = ngbe_close,
- .ndo_start_xmit = ngbe_xmit_frame,
+ .ndo_start_xmit = wx_xmit_frame,
+ .ndo_set_rx_mode = wx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = ngbe_set_mac,
+ .ndo_set_mac_address = wx_set_mac,
+ .ndo_get_stats64 = wx_get_stats64,
};
/**
@@ -326,18 +484,16 @@ static const struct net_device_ops ngbe_netdev_ops = {
*
* Returns 0 on success, negative on failure
*
- * ngbe_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
+ * ngbe_probe initializes an wx identified by a pci_dev structure.
+ * The OS initialization, configuring of the wx private structure,
* and a hardware reset occur.
**/
static int ngbe_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
- struct ngbe_adapter *adapter = NULL;
- struct ngbe_hw *hw = NULL;
- struct wx_hw *wxhw = NULL;
struct net_device *netdev;
u32 e2rom_cksum_cap = 0;
+ struct wx *wx = NULL;
static int func_nums;
u16 e2rom_ver = 0;
u32 etrack_id = 0;
@@ -368,7 +524,7 @@ static int ngbe_probe(struct pci_dev *pdev,
pci_set_master(pdev);
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
- sizeof(struct ngbe_adapter),
+ sizeof(struct wx),
NGBE_MAX_TX_QUEUES,
NGBE_MAX_RX_QUEUES);
if (!netdev) {
@@ -378,63 +534,74 @@ static int ngbe_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->pdev = pdev;
- hw = &adapter->hw;
- wxhw = &hw->wxhw;
- adapter->msg_enable = BIT(3) - 1;
-
- adapter->io_addr = devm_ioremap(&pdev->dev,
- pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!adapter->io_addr) {
+ wx = netdev_priv(netdev);
+ wx->netdev = netdev;
+ wx->pdev = pdev;
+ wx->msg_enable = BIT(3) - 1;
+
+ wx->hw_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!wx->hw_addr) {
err = -EIO;
goto err_pci_release_regions;
}
+ wx->driver_name = ngbe_driver_name;
+ ngbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &ngbe_netdev_ops;
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = NETIF_F_SG;
- adapter->bd_number = func_nums;
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features |
+ NETIF_F_RXALL;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = NGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
+
+ wx->bd_number = func_nums;
/* setup the private structure */
- err = ngbe_sw_init(adapter);
+ err = ngbe_sw_init(wx);
if (err)
goto err_free_mac_table;
/* check if flash load is done after hw power up */
- err = wx_check_flash_load(wxhw, NGBE_SPI_ILDR_STATUS_PERST);
+ err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PERST);
if (err)
goto err_free_mac_table;
- err = wx_check_flash_load(wxhw, NGBE_SPI_ILDR_STATUS_PWRRST);
+ err = wx_check_flash_load(wx, NGBE_SPI_ILDR_STATUS_PWRRST);
if (err)
goto err_free_mac_table;
- err = wx_mng_present(wxhw);
+ err = wx_mng_present(wx);
if (err) {
dev_err(&pdev->dev, "Management capability is not present\n");
goto err_free_mac_table;
}
- err = ngbe_reset_hw(hw);
+ err = ngbe_reset_hw(wx);
if (err) {
dev_err(&pdev->dev, "HW Init failed: %d\n", err);
goto err_free_mac_table;
}
- if (wxhw->bus.func == 0) {
- wr32(wxhw, NGBE_CALSUM_CAP_STATUS, 0x0);
- wr32(wxhw, NGBE_EEPROM_VERSION_STORE_REG, 0x0);
+ if (wx->bus.func == 0) {
+ wr32(wx, NGBE_CALSUM_CAP_STATUS, 0x0);
+ wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, 0x0);
} else {
- e2rom_cksum_cap = rd32(wxhw, NGBE_CALSUM_CAP_STATUS);
- saved_ver = rd32(wxhw, NGBE_EEPROM_VERSION_STORE_REG);
+ e2rom_cksum_cap = rd32(wx, NGBE_CALSUM_CAP_STATUS);
+ saved_ver = rd32(wx, NGBE_EEPROM_VERSION_STORE_REG);
}
- wx_init_eeprom_params(wxhw);
- if (wxhw->bus.func == 0 || e2rom_cksum_cap == 0) {
+ wx_init_eeprom_params(wx);
+ if (wx->bus.func == 0 || e2rom_cksum_cap == 0) {
/* make sure the EEPROM is ready */
- err = ngbe_eeprom_chksum_hostif(hw);
+ err = ngbe_eeprom_chksum_hostif(wx);
if (err) {
dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
@@ -442,14 +609,14 @@ static int ngbe_probe(struct pci_dev *pdev,
}
}
- adapter->wol = 0;
- if (hw->wol_enabled)
- adapter->wol = NGBE_PSR_WKUP_CTL_MAG;
+ wx->wol = 0;
+ if (wx->wol_enabled)
+ wx->wol = NGBE_PSR_WKUP_CTL_MAG;
- hw->wol_enabled = !!(adapter->wol);
- wr32(wxhw, NGBE_PSR_WKUP_CTL, adapter->wol);
+ wx->wol_enabled = !!(wx->wol);
+ wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol);
- device_set_wakeup_enable(&pdev->dev, adapter->wol);
+ device_set_wakeup_enable(&pdev->dev, wx->wol);
/* Save off EEPROM version number and Option Rom version which
* together make a unique identify for the eeprom
@@ -457,37 +624,50 @@ static int ngbe_probe(struct pci_dev *pdev,
if (saved_ver) {
etrack_id = saved_ver;
} else {
- wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H,
+ wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_H,
&e2rom_ver);
etrack_id = e2rom_ver << 16;
- wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L,
+ wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + NGBE_EEPROM_VERSION_L,
&e2rom_ver);
etrack_id |= e2rom_ver;
- wr32(wxhw, NGBE_EEPROM_VERSION_STORE_REG, etrack_id);
+ wr32(wx, NGBE_EEPROM_VERSION_STORE_REG, etrack_id);
}
+ snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
+ "0x%08x", etrack_id);
+
+ eth_hw_addr_set(netdev, wx->mac.perm_addr);
+ wx_mac_set_default_filter(wx, wx->mac.perm_addr);
- eth_hw_addr_set(netdev, wxhw->mac.perm_addr);
- ngbe_mac_set_default_filter(adapter, wxhw->mac.perm_addr);
+ err = wx_init_interrupt_scheme(wx);
+ if (err)
+ goto err_free_mac_table;
+
+ /* phy Interface Configuration */
+ err = ngbe_mdio_init(wx);
+ if (err)
+ goto err_clear_interrupt_scheme;
err = register_netdev(netdev);
if (err)
goto err_register;
- pci_set_drvdata(pdev, adapter);
+ pci_set_drvdata(pdev, wx);
- netif_info(adapter, probe, netdev,
+ netif_info(wx, probe, netdev,
"PHY: %s, PBA No: Wang Xun GbE Family Controller\n",
- hw->phy.type == ngbe_phy_internal ? "Internal" : "External");
- netif_info(adapter, probe, netdev, "%pM\n", netdev->dev_addr);
+ wx->mac_type == em_mac_type_mdi ? "Internal" : "External");
+ netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr);
return 0;
err_register:
- wx_control_hw(wxhw, false);
+ wx_control_hw(wx, false);
+err_clear_interrupt_scheme:
+ wx_clear_interrupt_scheme(wx);
err_free_mac_table:
- kfree(adapter->mac_table);
+ kfree(wx->mac_table);
err_pci_release_regions:
pci_disable_pcie_error_reporting(pdev);
pci_release_selected_regions(pdev,
@@ -508,15 +688,16 @@ err_pci_disable_dev:
**/
static void ngbe_remove(struct pci_dev *pdev)
{
- struct ngbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct wx *wx = pci_get_drvdata(pdev);
struct net_device *netdev;
- netdev = adapter->netdev;
+ netdev = wx->netdev;
unregister_netdev(netdev);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
- kfree(adapter->mac_table);
+ kfree(wx->mac_table);
+ wx_clear_interrupt_scheme(wx);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
new file mode 100644
index 000000000000..c9ddbbc3fa4f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/ethtool.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <linux/phy.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_hw.h"
+#include "ngbe_type.h"
+#include "ngbe_mdio.h"
+
+static int ngbe_phy_read_reg_internal(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct wx *wx = bus->priv;
+
+ if (phy_addr != 0)
+ return 0xffff;
+ return (u16)rd32(wx, NGBE_PHY_CONFIG(regnum));
+}
+
+static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+
+ if (phy_addr == 0)
+ wr32(wx, NGBE_PHY_CONFIG(regnum), value);
+ return 0;
+}
+
+static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ u32 command, val, device_type = 0;
+ struct wx *wx = bus->priv;
+ int ret;
+
+ wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF);
+ /* setup and write the address cycle command */
+ command = NGBE_MSCA_RA(regnum) |
+ NGBE_MSCA_PA(phy_addr) |
+ NGBE_MSCA_DA(device_type);
+ wr32(wx, NGBE_MSCA, command);
+ command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) |
+ NGBE_MSCC_BUSY |
+ NGBE_MDIO_CLK(6);
+ wr32(wx, NGBE_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
+ 100000, false, wx, NGBE_MSCC);
+ if (ret) {
+ wx_err(wx, "Mdio read c22 command did not complete.\n");
+ return ret;
+ }
+
+ return (u16)rd32(wx, NGBE_MSCC);
+}
+
+static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
+{
+ u32 command, val, device_type = 0;
+ struct wx *wx = bus->priv;
+ int ret;
+
+ wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF);
+ /* setup and write the address cycle command */
+ command = NGBE_MSCA_RA(regnum) |
+ NGBE_MSCA_PA(phy_addr) |
+ NGBE_MSCA_DA(device_type);
+ wr32(wx, NGBE_MSCA, command);
+ command = value |
+ NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) |
+ NGBE_MSCC_BUSY |
+ NGBE_MDIO_CLK(6);
+ wr32(wx, NGBE_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
+ 100000, false, wx, NGBE_MSCC);
+ if (ret)
+ wx_err(wx, "Mdio write c22 command did not complete.\n");
+
+ return ret;
+}
+
+static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
+{
+ struct wx *wx = bus->priv;
+ u32 val, command;
+ int ret;
+
+ wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0);
+ /* setup and write the address cycle command */
+ command = NGBE_MSCA_RA(regnum) |
+ NGBE_MSCA_PA(phy_addr) |
+ NGBE_MSCA_DA(devnum);
+ wr32(wx, NGBE_MSCA, command);
+ command = NGBE_MSCC_CMD(NGBE_MSCA_CMD_READ) |
+ NGBE_MSCC_BUSY |
+ NGBE_MDIO_CLK(6);
+ wr32(wx, NGBE_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
+ 100000, false, wx, NGBE_MSCC);
+ if (ret) {
+ wx_err(wx, "Mdio read c45 command did not complete.\n");
+ return ret;
+ }
+
+ return (u16)rd32(wx, NGBE_MSCC);
+}
+
+static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
+ int devnum, int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+ int ret, command;
+ u16 val;
+
+ wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0);
+ /* setup and write the address cycle command */
+ command = NGBE_MSCA_RA(regnum) |
+ NGBE_MSCA_PA(phy_addr) |
+ NGBE_MSCA_DA(devnum);
+ wr32(wx, NGBE_MSCA, command);
+ command = value |
+ NGBE_MSCC_CMD(NGBE_MSCA_CMD_WRITE) |
+ NGBE_MSCC_BUSY |
+ NGBE_MDIO_CLK(6);
+ wr32(wx, NGBE_MSCC, command);
+
+ /* wait to complete */
+ ret = read_poll_timeout(rd32, val, !(val & NGBE_MSCC_BUSY), 1000,
+ 100000, false, wx, NGBE_MSCC);
+ if (ret)
+ wx_err(wx, "Mdio write c45 command did not complete.\n");
+
+ return ret;
+}
+
+static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum)
+{
+ struct wx *wx = bus->priv;
+ u16 phy_data;
+
+ if (wx->mac_type == em_mac_type_mdi)
+ phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum);
+ else
+ phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum);
+
+ return phy_data;
+}
+
+static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr,
+ int regnum, u16 value)
+{
+ struct wx *wx = bus->priv;
+ int ret;
+
+ if (wx->mac_type == em_mac_type_mdi)
+ ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value);
+ else
+ ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value);
+
+ return ret;
+}
+
+static void ngbe_handle_link_change(struct net_device *dev)
+{
+ struct wx *wx = netdev_priv(dev);
+ struct phy_device *phydev;
+ u32 lan_speed, reg;
+
+ phydev = wx->phydev;
+ if (!(wx->link != phydev->link ||
+ wx->speed != phydev->speed ||
+ wx->duplex != phydev->duplex))
+ return;
+
+ wx->link = phydev->link;
+ wx->speed = phydev->speed;
+ wx->duplex = phydev->duplex;
+ switch (phydev->speed) {
+ case SPEED_10:
+ lan_speed = 0;
+ break;
+ case SPEED_100:
+ lan_speed = 1;
+ break;
+ case SPEED_1000:
+ default:
+ lan_speed = 2;
+ break;
+ }
+ wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed);
+
+ if (phydev->link) {
+ reg = rd32(wx, WX_MAC_TX_CFG);
+ reg &= ~WX_MAC_TX_CFG_SPEED_MASK;
+ reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE;
+ wr32(wx, WX_MAC_TX_CFG, reg);
+ /* Re configure MAC RX */
+ reg = rd32(wx, WX_MAC_RX_CFG);
+ wr32(wx, WX_MAC_RX_CFG, reg);
+ wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+ reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
+ wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
+ }
+ phy_print_status(phydev);
+}
+
+int ngbe_phy_connect(struct wx *wx)
+{
+ int ret;
+
+ ret = phy_connect_direct(wx->netdev,
+ wx->phydev,
+ ngbe_handle_link_change,
+ PHY_INTERFACE_MODE_RGMII_ID);
+ if (ret) {
+ wx_err(wx, "PHY connect failed.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ngbe_phy_fixup(struct wx *wx)
+{
+ struct phy_device *phydev = wx->phydev;
+ struct ethtool_eee eee;
+
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ if (wx->mac_type != em_mac_type_mdi)
+ return;
+ /* disable EEE, internal phy does not support eee */
+ memset(&eee, 0, sizeof(eee));
+ phy_ethtool_set_eee(phydev, &eee);
+}
+
+int ngbe_mdio_init(struct wx *wx)
+{
+ struct pci_dev *pdev = wx->pdev;
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!mii_bus)
+ return -ENOMEM;
+
+ mii_bus->name = "ngbe_mii_bus";
+ mii_bus->read = ngbe_phy_read_reg_c22;
+ mii_bus->write = ngbe_phy_write_reg_c22;
+ mii_bus->phy_mask = GENMASK(31, 4);
+ mii_bus->parent = &pdev->dev;
+ mii_bus->priv = wx;
+
+ if (wx->mac_type == em_mac_type_rgmii) {
+ mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45;
+ mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45;
+ }
+
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x",
+ (pdev->bus->number << 8) | pdev->devfn);
+ ret = devm_mdiobus_register(&pdev->dev, mii_bus);
+ if (ret)
+ return ret;
+
+ wx->phydev = phy_find_first(mii_bus);
+ if (!wx->phydev)
+ return -ENODEV;
+
+ phy_attached_info(wx->phydev);
+ ngbe_phy_fixup(wx);
+
+ wx->link = 0;
+ wx->speed = 0;
+ wx->duplex = 0;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h
new file mode 100644
index 000000000000..0a6400dd89c4
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * WangXun Gigabit PCI Express Linux driver
+ * Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd.
+ */
+
+#ifndef _NGBE_MDIO_H_
+#define _NGBE_MDIO_H_
+
+int ngbe_phy_connect(struct wx *wx);
+int ngbe_mdio_init(struct wx *wx);
+#endif /* _NGBE_MDIO_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
index 39f6c03f1a54..a2351349785e 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h
@@ -49,7 +49,6 @@
#define NGBE_SPI_ILDR_STATUS 0x10120
#define NGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */
#define NGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */
-#define NGBE_SPI_ILDR_STATUS_LAN_SW_RST(_i) BIT((_i) + 9) /* lan soft reset done */
/* Checksum and EEPROM pointers */
#define NGBE_CALSUM_COMMAND 0xE9
@@ -60,6 +59,25 @@
#define NGBE_EEPROM_VERSION_L 0x1D
#define NGBE_EEPROM_VERSION_H 0x1E
+/* mdio access */
+#define NGBE_MSCA 0x11200
+#define NGBE_MSCA_RA(v) FIELD_PREP(U16_MAX, v)
+#define NGBE_MSCA_PA(v) FIELD_PREP(GENMASK(20, 16), v)
+#define NGBE_MSCA_DA(v) FIELD_PREP(GENMASK(25, 21), v)
+#define NGBE_MSCC 0x11204
+#define NGBE_MSCC_CMD(v) FIELD_PREP(GENMASK(17, 16), v)
+
+enum NGBE_MSCA_CMD_value {
+ NGBE_MSCA_CMD_RSV = 0,
+ NGBE_MSCA_CMD_WRITE,
+ NGBE_MSCA_CMD_POST_READ,
+ NGBE_MSCA_CMD_READ,
+};
+
+#define NGBE_MSCC_SADDR BIT(18)
+#define NGBE_MSCC_BUSY BIT(22)
+#define NGBE_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v)
+
/* Media-dependent registers. */
#define NGBE_MDIO_CLAUSE_SELECT 0x11220
@@ -72,6 +90,24 @@
#define NGBE_GPIO_DDR_0 BIT(0) /* SDP0 IO direction */
#define NGBE_GPIO_DDR_1 BIT(1) /* SDP1 IO direction */
+/* Extended Interrupt Enable Set */
+#define NGBE_PX_MISC_IEN_DEV_RST BIT(10)
+#define NGBE_PX_MISC_IEN_ETH_LK BIT(18)
+#define NGBE_PX_MISC_IEN_INT_ERR BIT(20)
+#define NGBE_PX_MISC_IEN_GPIO BIT(26)
+#define NGBE_PX_MISC_IEN_MASK ( \
+ NGBE_PX_MISC_IEN_DEV_RST | \
+ NGBE_PX_MISC_IEN_ETH_LK | \
+ NGBE_PX_MISC_IEN_INT_ERR | \
+ NGBE_PX_MISC_IEN_GPIO)
+
+#define NGBE_INTR_ALL 0x1FF
+#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
+
+#define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4))
+#define NGBE_CFG_LAN_SPEED 0x14440
+#define NGBE_CFG_PORT_ST 0x14404
+
/* Wake up registers */
#define NGBE_PSR_WKUP_CTL 0x15B80
/* Wake Up Filter Control Bit */
@@ -90,50 +126,30 @@
#define NGBE_FW_CMD_ST_PASS 0x80658383
#define NGBE_FW_CMD_ST_FAIL 0x70657376
-enum ngbe_phy_type {
- ngbe_phy_unknown = 0,
- ngbe_phy_none,
- ngbe_phy_internal,
- ngbe_phy_m88e1512,
- ngbe_phy_m88e1512_sfi,
- ngbe_phy_m88e1512_unknown,
- ngbe_phy_yt8521s,
- ngbe_phy_yt8521s_sfi,
- ngbe_phy_internal_yt8521s_sfi,
- ngbe_phy_generic
-};
+#define NGBE_MAX_FDIR_INDICES 7
-enum ngbe_media_type {
- ngbe_media_type_unknown = 0,
- ngbe_media_type_fiber,
- ngbe_media_type_copper,
- ngbe_media_type_backplane,
-};
-
-enum ngbe_mac_type {
- ngbe_mac_type_unknown = 0,
- ngbe_mac_type_mdi,
- ngbe_mac_type_rgmii
-};
+#define NGBE_MAX_RX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
+#define NGBE_MAX_TX_QUEUES (NGBE_MAX_FDIR_INDICES + 1)
-struct ngbe_phy_info {
- enum ngbe_phy_type type;
- enum ngbe_media_type media_type;
+#define NGBE_ETH_LENGTH_OF_ADDRESS 6
+#define NGBE_MAX_MSIX_VECTORS 0x09
+#define NGBE_RAR_ENTRIES 32
+#define NGBE_RX_PB_SIZE 42
+#define NGBE_MC_TBL_SIZE 128
+#define NGBE_TDB_PB_SZ (20 * 1024) /* 160KB Packet Buffer */
+#define NGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
- u32 addr;
- u32 id;
+/* TX/RX descriptor defines */
+#define NGBE_DEFAULT_TXD 512 /* default ring size */
+#define NGBE_DEFAULT_TX_WORK 256
+#define NGBE_MAX_TXD 8192
+#define NGBE_MIN_TXD 128
- bool reset_if_overtemp;
+#define NGBE_DEFAULT_RXD 512 /* default ring size */
+#define NGBE_DEFAULT_RX_WORK 256
+#define NGBE_MAX_RXD 8192
+#define NGBE_MIN_RXD 128
-};
-
-struct ngbe_hw {
- struct wx_hw wxhw;
- struct ngbe_phy_info phy;
- enum ngbe_mac_type mac_type;
+extern char ngbe_driver_name[];
- bool wol_enabled;
- bool ncsi_enabled;
- bool gpio_ctrl;
-};
#endif /* _NGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 78484c58b78b..6db14a2cb2d0 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -7,4 +7,5 @@
obj-$(CONFIG_TXGBE) += txgbe.o
txgbe-objs := txgbe_main.o \
- txgbe_hw.o
+ txgbe_hw.o \
+ txgbe_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe.h b/drivers/net/ethernet/wangxun/txgbe/txgbe.h
deleted file mode 100644
index 19e61377bd00..000000000000
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
-
-#ifndef _TXGBE_H_
-#define _TXGBE_H_
-
-#define TXGBE_MAX_FDIR_INDICES 63
-
-#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
-#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
-
-#define TXGBE_SP_MAX_TX_QUEUES 128
-#define TXGBE_SP_MAX_RX_QUEUES 128
-#define TXGBE_SP_RAR_ENTRIES 128
-#define TXGBE_SP_MC_TBL_SIZE 128
-
-struct txgbe_mac_addr {
- u8 addr[ETH_ALEN];
- u16 state; /* bitmask */
- u64 pools;
-};
-
-#define TXGBE_MAC_STATE_DEFAULT 0x1
-#define TXGBE_MAC_STATE_MODIFIED 0x2
-#define TXGBE_MAC_STATE_IN_USE 0x4
-
-/* board specific private data structure */
-struct txgbe_adapter {
- u8 __iomem *io_addr; /* Mainly for iounmap use */
- /* OS defined structs */
- struct net_device *netdev;
- struct pci_dev *pdev;
-
- /* structs defined in txgbe_type.h */
- struct txgbe_hw hw;
- u16 msg_enable;
- struct txgbe_mac_addr *mac_table;
- char eeprom_id[32];
-};
-
-extern char txgbe_driver_name[];
-
-#endif /* _TXGBE_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
new file mode 100644
index 000000000000..d914e9a05404
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/pci.h>
+#include <linux/phylink.h>
+#include <linux/netdevice.h>
+
+#include "../libwx/wx_ethtool.h"
+#include "txgbe_ethtool.h"
+
+static const struct ethtool_ops txgbe_ethtool_ops = {
+ .get_drvinfo = wx_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
+
+void txgbe_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &txgbe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
new file mode 100644
index 000000000000..ace1b3571012
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2023 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_ETHTOOL_H_
+#define _TXGBE_ETHTOOL_H_
+
+void txgbe_set_ethtool_ops(struct net_device *netdev);
+
+#endif /* _TXGBE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
index 167f7ff73192..ebc46f3be056 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
@@ -12,70 +12,67 @@
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
-#include "txgbe.h"
/**
* txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
*
* Inits the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-static void txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw)
+static void txgbe_init_thermal_sensor_thresh(struct wx *wx)
{
- struct wx_hw *wxhw = &hw->wxhw;
- struct wx_thermal_sensor_data *data = &wxhw->mac.sensor;
+ struct wx_thermal_sensor_data *data = &wx->mac.sensor;
memset(data, 0, sizeof(struct wx_thermal_sensor_data));
/* Only support thermal sensors attached to SP physical port 0 */
- if (wxhw->bus.func)
+ if (wx->bus.func)
return;
- wr32(wxhw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD);
+ wr32(wx, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD);
- wr32(wxhw, WX_TS_INT_EN,
+ wr32(wx, WX_TS_INT_EN,
WX_TS_INT_EN_ALARM_INT_EN | WX_TS_INT_EN_DALARM_INT_EN);
- wr32(wxhw, WX_TS_EN, WX_TS_EN_ENA);
+ wr32(wx, WX_TS_EN, WX_TS_EN_ENA);
data->alarm_thresh = 100;
- wr32(wxhw, WX_TS_ALARM_THRE, 677);
+ wr32(wx, WX_TS_ALARM_THRE, 677);
data->dalarm_thresh = 90;
- wr32(wxhw, WX_TS_DALARM_THRE, 614);
+ wr32(wx, WX_TS_DALARM_THRE, 614);
}
/**
* txgbe_read_pba_string - Reads part number string from EEPROM
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @pba_num: stores the part number string from the EEPROM
* @pba_num_size: part number string buffer length
*
* Reads the part number string from the EEPROM.
**/
-int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
+int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size)
{
u16 pba_ptr, offset, length, data;
- struct wx_hw *wxhw = &hw->wxhw;
int ret_val;
if (!pba_num) {
- wx_err(wxhw, "PBA string buffer was null\n");
+ wx_err(wx, "PBA string buffer was null\n");
return -EINVAL;
}
- ret_val = wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR,
+ ret_val = wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR,
&data);
if (ret_val != 0) {
- wx_err(wxhw, "NVM Read Error\n");
+ wx_err(wx, "NVM Read Error\n");
return ret_val;
}
- ret_val = wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR,
+ ret_val = wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR,
&pba_ptr);
if (ret_val != 0) {
- wx_err(wxhw, "NVM Read Error\n");
+ wx_err(wx, "NVM Read Error\n");
return ret_val;
}
@@ -84,11 +81,11 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
* and we can decode it into an ascii string
*/
if (data != TXGBE_PBANUM_PTR_GUARD) {
- wx_err(wxhw, "NVM PBA number is not stored as string\n");
+ wx_err(wx, "NVM PBA number is not stored as string\n");
/* we will need 11 characters to store the PBA */
if (pba_num_size < 11) {
- wx_err(wxhw, "PBA string buffer too small\n");
+ wx_err(wx, "PBA string buffer too small\n");
return -ENOMEM;
}
@@ -118,20 +115,20 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
return 0;
}
- ret_val = wx_read_ee_hostif(wxhw, pba_ptr, &length);
+ ret_val = wx_read_ee_hostif(wx, pba_ptr, &length);
if (ret_val != 0) {
- wx_err(wxhw, "NVM Read Error\n");
+ wx_err(wx, "NVM Read Error\n");
return ret_val;
}
if (length == 0xFFFF || length == 0) {
- wx_err(wxhw, "NVM PBA number section invalid length\n");
+ wx_err(wx, "NVM PBA number section invalid length\n");
return -EINVAL;
}
/* check if pba_num buffer is big enough */
if (pba_num_size < (((u32)length * 2) - 1)) {
- wx_err(wxhw, "PBA string buffer too small\n");
+ wx_err(wx, "PBA string buffer too small\n");
return -ENOMEM;
}
@@ -140,9 +137,9 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
length--;
for (offset = 0; offset < length; offset++) {
- ret_val = wx_read_ee_hostif(wxhw, pba_ptr + offset, &data);
+ ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data);
if (ret_val != 0) {
- wx_err(wxhw, "NVM Read Error\n");
+ wx_err(wx, "NVM Read Error\n");
return ret_val;
}
pba_num[offset * 2] = (u8)(data >> 8);
@@ -155,14 +152,13 @@ int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
/**
* txgbe_calc_eeprom_checksum - Calculates and returns the checksum
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @checksum: pointer to cheksum
*
* Returns a negative error code on error
**/
-static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum)
+static int txgbe_calc_eeprom_checksum(struct wx *wx, u16 *checksum)
{
- struct wx_hw *wxhw = &hw->wxhw;
u16 *eeprom_ptrs = NULL;
u32 buffer_size = 0;
u16 *buffer = NULL;
@@ -170,7 +166,7 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum)
int status;
u16 i;
- wx_init_eeprom_params(wxhw);
+ wx_init_eeprom_params(wx);
if (!buffer) {
eeprom_ptrs = kvmalloc_array(TXGBE_EEPROM_LAST_WORD, sizeof(u16),
@@ -178,11 +174,11 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum)
if (!eeprom_ptrs)
return -ENOMEM;
/* Read pointer area */
- status = wx_read_ee_hostif_buffer(wxhw, 0,
+ status = wx_read_ee_hostif_buffer(wx, 0,
TXGBE_EEPROM_LAST_WORD,
eeprom_ptrs);
if (status != 0) {
- wx_err(wxhw, "Failed to read EEPROM image\n");
+ wx_err(wx, "Failed to read EEPROM image\n");
kvfree(eeprom_ptrs);
return status;
}
@@ -194,7 +190,7 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum)
}
for (i = 0; i < TXGBE_EEPROM_LAST_WORD; i++)
- if (i != wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
+ if (i != wx->eeprom.sw_region_offset + TXGBE_EEPROM_CHECKSUM)
*checksum += local_buffer[i];
if (eeprom_ptrs)
@@ -210,15 +206,14 @@ static int txgbe_calc_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum)
/**
* txgbe_validate_eeprom_checksum - Validate EEPROM checksum
- * @hw: pointer to hardware structure
+ * @wx: pointer to hardware structure
* @checksum_val: calculated checksum
*
* Performs checksum calculation and validates the EEPROM checksum. If the
* caller does not need checksum_val, the value can be NULL.
**/
-int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val)
+int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val)
{
- struct wx_hw *wxhw = &hw->wxhw;
u16 read_checksum = 0;
u16 checksum;
int status;
@@ -227,18 +222,18 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val)
* not continue or we could be in for a very long wait while every
* EEPROM read fails
*/
- status = wx_read_ee_hostif(wxhw, 0, &checksum);
+ status = wx_read_ee_hostif(wx, 0, &checksum);
if (status) {
- wx_err(wxhw, "EEPROM read failed\n");
+ wx_err(wx, "EEPROM read failed\n");
return status;
}
checksum = 0;
- status = txgbe_calc_eeprom_checksum(hw, &checksum);
+ status = txgbe_calc_eeprom_checksum(wx, &checksum);
if (status != 0)
return status;
- status = wx_read_ee_hostif(wxhw, wxhw->eeprom.sw_region_offset +
+ status = wx_read_ee_hostif(wx, wx->eeprom.sw_region_offset +
TXGBE_EEPROM_CHECKSUM, &read_checksum);
if (status != 0)
return status;
@@ -248,7 +243,7 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val)
*/
if (read_checksum != checksum) {
status = -EIO;
- wx_err(wxhw, "Invalid EEPROM checksum\n");
+ wx_err(wx, "Invalid EEPROM checksum\n");
}
/* If the user cares, return the calculated checksum */
@@ -258,55 +253,52 @@ int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val)
return status;
}
-static void txgbe_reset_misc(struct txgbe_hw *hw)
+static void txgbe_reset_misc(struct wx *wx)
{
- struct wx_hw *wxhw = &hw->wxhw;
-
- wx_reset_misc(wxhw);
- txgbe_init_thermal_sensor_thresh(hw);
+ wx_reset_misc(wx);
+ txgbe_init_thermal_sensor_thresh(wx);
}
/**
* txgbe_reset_hw - Perform hardware reset
- * @hw: pointer to hardware structure
+ * @wx: pointer to wx structure
*
* Resets the hardware by resetting the transmit and receive units, masks
* and clears all interrupts, perform a PHY reset, and perform a link (MAC)
* reset.
**/
-int txgbe_reset_hw(struct txgbe_hw *hw)
+int txgbe_reset_hw(struct wx *wx)
{
- struct wx_hw *wxhw = &hw->wxhw;
int status;
/* Call adapter stop to disable tx/rx and clear interrupts */
- status = wx_stop_adapter(wxhw);
+ status = wx_stop_adapter(wx);
if (status != 0)
return status;
- if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
- ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP)))
- wx_reset_hostif(wxhw);
+ if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
+ ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP)))
+ wx_reset_hostif(wx);
usleep_range(10, 100);
- status = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wxhw->bus.func));
+ status = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wx->bus.func));
if (status != 0)
return status;
- txgbe_reset_misc(hw);
+ txgbe_reset_misc(wx);
/* Store the permanent mac address */
- wx_get_mac_addr(wxhw, wxhw->mac.perm_addr);
+ wx_get_mac_addr(wx, wx->mac.perm_addr);
/* Store MAC address from RAR0, clear receive address registers, and
* clear the multicast table. Also reset num_rar_entries to 128,
* since we modify this value when programming the SAN MAC address.
*/
- wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
- wx_init_rx_addrs(wxhw);
+ wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
+ wx_init_rx_addrs(wx);
- pci_set_master(wxhw->pdev);
+ pci_set_master(wx->pdev);
return 0;
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
index 6a751a69177b..e82f65dff8a6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
@@ -4,8 +4,8 @@
#ifndef _TXGBE_HW_H_
#define _TXGBE_HW_H_
-int txgbe_read_pba_string(struct txgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
-int txgbe_validate_eeprom_checksum(struct txgbe_hw *hw, u16 *checksum_val);
-int txgbe_reset_hw(struct txgbe_hw *hw);
+int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size);
+int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val);
+int txgbe_reset_hw(struct wx *wx);
#endif /* _TXGBE_HW_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index 36780e7f05b7..6c0a98230557 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -11,10 +11,11 @@
#include <net/ip.h>
#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
#include "../libwx/wx_hw.h"
#include "txgbe_type.h"
#include "txgbe_hw.h"
-#include "txgbe.h"
+#include "txgbe_ethtool.h"
char txgbe_driver_name[] = "txgbe";
@@ -35,26 +36,26 @@ static const struct pci_device_id txgbe_pci_tbl[] = {
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
-static void txgbe_check_minimum_link(struct txgbe_adapter *adapter)
+static void txgbe_check_minimum_link(struct wx *wx)
{
struct pci_dev *pdev;
- pdev = adapter->pdev;
+ pdev = wx->pdev;
pcie_print_link_status(pdev);
}
/**
* txgbe_enumerate_functions - Get the number of ports this device has
- * @adapter: adapter structure
+ * @wx: wx structure
*
* This function enumerates the phsyical functions co-located on a single slot,
* in order to determine how many ports a device has. This is most useful in
* determining the required GT/s of PCIe bandwidth necessary for optimal
* performance.
**/
-static int txgbe_enumerate_functions(struct txgbe_adapter *adapter)
+static int txgbe_enumerate_functions(struct wx *wx)
{
- struct pci_dev *entry, *pdev = adapter->pdev;
+ struct pci_dev *entry, *pdev = wx->pdev;
int physfns = 0;
list_for_each_entry(entry, &pdev->bus->devices, bus_list) {
@@ -73,196 +74,299 @@ static int txgbe_enumerate_functions(struct txgbe_adapter *adapter)
return physfns;
}
-static void txgbe_sync_mac_table(struct txgbe_adapter *adapter)
+/**
+ * txgbe_irq_enable - Enable default interrupt generation settings
+ * @wx: pointer to private structure
+ * @queues: enable irqs for queues
+ **/
+static void txgbe_irq_enable(struct wx *wx, bool queues)
{
- struct txgbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
- int i;
-
- for (i = 0; i < wxhw->mac.num_rar_entries; i++) {
- if (adapter->mac_table[i].state & TXGBE_MAC_STATE_MODIFIED) {
- if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) {
- wx_set_rar(wxhw, i,
- adapter->mac_table[i].addr,
- adapter->mac_table[i].pools,
- WX_PSR_MAC_SWC_AD_H_AV);
- } else {
- wx_clear_rar(wxhw, i);
- }
- adapter->mac_table[i].state &= ~(TXGBE_MAC_STATE_MODIFIED);
- }
- }
+ /* unmask interrupt */
+ wx_intr_enable(wx, TXGBE_INTR_MISC(wx));
+ if (queues)
+ wx_intr_enable(wx, TXGBE_INTR_QALL(wx));
}
-/* this function destroys the first RAR entry */
-static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter,
- u8 *addr)
+/**
+ * txgbe_intr - msi/legacy mode Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
{
- struct wx_hw *wxhw = &adapter->hw.wxhw;
-
- memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
- adapter->mac_table[0].pools = 1ULL;
- adapter->mac_table[0].state = (TXGBE_MAC_STATE_DEFAULT |
- TXGBE_MAC_STATE_IN_USE);
- wx_set_rar(wxhw, 0, adapter->mac_table[0].addr,
- adapter->mac_table[0].pools,
- WX_PSR_MAC_SWC_AD_H_AV);
+ struct wx_q_vector *q_vector;
+ struct wx *wx = data;
+ struct pci_dev *pdev;
+ u32 eicr;
+
+ q_vector = wx->q_vector[0];
+ pdev = wx->pdev;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the ICR read.
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ wx->isb_mem[WX_ISB_MISC] = 0;
+ /* would disable interrupts here but it is auto disabled */
+ napi_schedule_irqoff(&q_vector->napi);
+
+ /* re-enable link(maybe) and non-queue interrupts, no flush.
+ * txgbe_poll will re-enable the queue interrupts
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
}
-static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter)
+static irqreturn_t txgbe_msix_other(int __always_unused irq, void *data)
{
- struct wx_hw *wxhw = &adapter->hw.wxhw;
- u32 i;
+ struct wx *wx = data;
- for (i = 0; i < wxhw->mac.num_rar_entries; i++) {
- adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
- adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- adapter->mac_table[i].pools = 0;
- }
- txgbe_sync_mac_table(adapter);
+ /* re-enable the original interrupt state */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, false);
+
+ return IRQ_HANDLED;
}
-static int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool)
+/**
+ * txgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * @wx: board private structure
+ *
+ * Allocate MSI-X vectors and request interrupts from the kernel.
+ **/
+static int txgbe_request_msix_irqs(struct wx *wx)
{
- struct wx_hw *wxhw = &adapter->hw.wxhw;
- u32 i;
+ struct net_device *netdev = wx->netdev;
+ int vector, err;
+
+ for (vector = 0; vector < wx->num_q_vectors; vector++) {
+ struct wx_q_vector *q_vector = wx->q_vector[vector];
+ struct msix_entry *entry = &wx->msix_entries[vector];
+
+ if (q_vector->tx.ring && q_vector->rx.ring)
+ snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+ "%s-TxRx-%d", netdev->name, entry->entry);
+ else
+ /* skip this unused q_vector */
+ continue;
- if (is_zero_ether_addr(addr))
- return -EINVAL;
-
- /* search table for addr, if found, set to 0 and sync */
- for (i = 0; i < wxhw->mac.num_rar_entries; i++) {
- if (ether_addr_equal(addr, adapter->mac_table[i].addr)) {
- if (adapter->mac_table[i].pools & (1ULL << pool)) {
- adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
- adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
- adapter->mac_table[i].pools &= ~(1ULL << pool);
- txgbe_sync_mac_table(adapter);
- }
- return 0;
+ err = request_irq(entry->vector, wx_msix_clean_rings, 0,
+ q_vector->name, q_vector);
+ if (err) {
+ wx_err(wx, "request_irq failed for MSIX interrupt %s Error: %d\n",
+ q_vector->name, err);
+ goto free_queue_irqs;
}
+ }
- if (adapter->mac_table[i].pools != (1 << pool))
- continue;
- if (!ether_addr_equal(addr, adapter->mac_table[i].addr))
- continue;
+ err = request_irq(wx->msix_entries[vector].vector,
+ txgbe_msix_other, 0, netdev->name, wx);
+ if (err) {
+ wx_err(wx, "request_irq for msix_other failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
- adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED;
- adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE;
- memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
- adapter->mac_table[i].pools = 0;
- txgbe_sync_mac_table(adapter);
- return 0;
+free_queue_irqs:
+ while (vector) {
+ vector--;
+ free_irq(wx->msix_entries[vector].vector,
+ wx->q_vector[vector]);
}
- return -ENOMEM;
+ wx_reset_interrupt_capability(wx);
+ return err;
}
-static void txgbe_up_complete(struct txgbe_adapter *adapter)
+/**
+ * txgbe_request_irq - initialize interrupts
+ * @wx: board private structure
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int txgbe_request_irq(struct wx *wx)
{
- struct txgbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
+ struct net_device *netdev = wx->netdev;
+ struct pci_dev *pdev = wx->pdev;
+ int err;
- wx_control_hw(wxhw, true);
+ if (pdev->msix_enabled)
+ err = txgbe_request_msix_irqs(wx);
+ else if (pdev->msi_enabled)
+ err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
+ netdev->name, wx);
+ else
+ err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
+ netdev->name, wx);
+
+ if (err)
+ wx_err(wx, "request_irq failed, Error %d\n", err);
+
+ return err;
}
-static void txgbe_reset(struct txgbe_adapter *adapter)
+static void txgbe_up_complete(struct wx *wx)
{
- struct net_device *netdev = adapter->netdev;
- struct txgbe_hw *hw = &adapter->hw;
+ u32 reg;
+
+ wx_control_hw(wx, true);
+ wx_configure_vectors(wx);
+
+ /* make sure to complete pre-operations */
+ smp_mb__before_atomic();
+ wx_napi_enable_all(wx);
+
+ /* clear any pending interrupts, may auto mask */
+ rd32(wx, WX_PX_IC);
+ rd32(wx, WX_PX_MISC_IC);
+ txgbe_irq_enable(wx, true);
+
+ /* Configure MAC Rx and Tx when link is up */
+ reg = rd32(wx, WX_MAC_RX_CFG);
+ wr32(wx, WX_MAC_RX_CFG, reg);
+ wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
+ reg = rd32(wx, WX_MAC_WDG_TIMEOUT);
+ wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
+ reg = rd32(wx, WX_MAC_TX_CFG);
+ wr32(wx, WX_MAC_TX_CFG, (reg & ~WX_MAC_TX_CFG_SPEED_MASK) | WX_MAC_TX_CFG_SPEED_10G);
+
+ /* enable transmits */
+ netif_tx_start_all_queues(wx->netdev);
+ netif_carrier_on(wx->netdev);
+}
+
+static void txgbe_reset(struct wx *wx)
+{
+ struct net_device *netdev = wx->netdev;
u8 old_addr[ETH_ALEN];
int err;
- err = txgbe_reset_hw(hw);
+ err = txgbe_reset_hw(wx);
if (err != 0)
- dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err);
+ wx_err(wx, "Hardware Error: %d\n", err);
/* do not flush user set addresses */
- memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
- txgbe_flush_sw_mac_table(adapter);
- txgbe_mac_set_default_filter(adapter, old_addr);
+ memcpy(old_addr, &wx->mac_table[0].addr, netdev->addr_len);
+ wx_flush_sw_mac_table(wx);
+ wx_mac_set_default_filter(wx, old_addr);
}
-static void txgbe_disable_device(struct txgbe_adapter *adapter)
+static void txgbe_disable_device(struct wx *wx)
{
- struct net_device *netdev = adapter->netdev;
- struct wx_hw *wxhw = &adapter->hw.wxhw;
+ struct net_device *netdev = wx->netdev;
+ u32 i;
- wx_disable_pcie_master(wxhw);
+ wx_disable_pcie_master(wx);
/* disable receives */
- wx_disable_rx(wxhw);
+ wx_disable_rx(wx);
+
+ /* disable all enabled rx queues */
+ for (i = 0; i < wx->num_rx_queues; i++)
+ /* this call also flushes the previous write */
+ wx_disable_rx_queue(wx, wx->rx_ring[i]);
+ netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- if (wxhw->bus.func < 2)
- wr32m(wxhw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wxhw->bus.func), 0);
+ wx_irq_disable(wx);
+ wx_napi_disable_all(wx);
+
+ if (wx->bus.func < 2)
+ wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0);
else
- dev_err(&adapter->pdev->dev,
- "%s: invalid bus lan id %d\n",
- __func__, wxhw->bus.func);
+ wx_err(wx, "%s: invalid bus lan id %d\n",
+ __func__, wx->bus.func);
- if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
- ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
+ if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
+ ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
/* disable mac transmiter */
- wr32m(wxhw, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+ wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0);
+ }
+
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < wx->num_tx_queues; i++) {
+ u8 reg_idx = wx->tx_ring[i]->reg_idx;
+
+ wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
}
/* Disable the Tx DMA engine */
- wr32m(wxhw, WX_TDM_CTL, WX_TDM_CTL_TE, 0);
+ wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0);
}
-static void txgbe_down(struct txgbe_adapter *adapter)
+static void txgbe_down(struct wx *wx)
{
- txgbe_disable_device(adapter);
- txgbe_reset(adapter);
+ txgbe_disable_device(wx);
+ txgbe_reset(wx);
+
+ wx_clean_all_tx_rings(wx);
+ wx_clean_all_rx_rings(wx);
}
/**
- * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter)
- * @adapter: board private structure to initialize
+ * txgbe_sw_init - Initialize general software structures (struct wx)
+ * @wx: board private structure to initialize
**/
-static int txgbe_sw_init(struct txgbe_adapter *adapter)
+static int txgbe_sw_init(struct wx *wx)
{
- struct pci_dev *pdev = adapter->pdev;
- struct txgbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
+ u16 msix_count = 0;
int err;
- wxhw->hw_addr = adapter->io_addr;
- wxhw->pdev = pdev;
+ wx->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
+ wx->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
+ wx->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
+ wx->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
+ wx->mac.rx_pb_size = TXGBE_SP_RX_PB_SIZE;
+ wx->mac.tx_pb_size = TXGBE_SP_TDB_PB_SZ;
/* PCI config space info */
- err = wx_sw_init(wxhw);
+ err = wx_sw_init(wx);
if (err < 0) {
- netif_err(adapter, probe, adapter->netdev,
- "read of internal subsystem device id failed\n");
+ wx_err(wx, "read of internal subsystem device id failed\n");
return err;
}
- switch (wxhw->device_id) {
+ switch (wx->device_id) {
case TXGBE_DEV_ID_SP1000:
case TXGBE_DEV_ID_WX1820:
- wxhw->mac.type = wx_mac_sp;
+ wx->mac.type = wx_mac_sp;
break;
default:
- wxhw->mac.type = wx_mac_unknown;
+ wx->mac.type = wx_mac_unknown;
break;
}
- wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES;
- wxhw->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES;
- wxhw->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES;
- wxhw->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE;
-
- adapter->mac_table = kcalloc(wxhw->mac.num_rar_entries,
- sizeof(struct txgbe_mac_addr),
- GFP_KERNEL);
- if (!adapter->mac_table) {
- netif_err(adapter, probe, adapter->netdev,
- "mac_table allocation failed\n");
- return -ENOMEM;
- }
+ /* Set common capability flags and settings */
+ wx->max_q_vectors = TXGBE_MAX_MSIX_VECTORS;
+ err = wx_get_pcie_msix_counts(wx, &msix_count, TXGBE_MAX_MSIX_VECTORS);
+ if (err)
+ wx_err(wx, "Do not support MSI-X\n");
+ wx->mac.max_msix_vectors = msix_count;
+
+ /* enable itr by default in dynamic mode */
+ wx->rx_itr_setting = 1;
+ wx->tx_itr_setting = 1;
+
+ /* set default ring sizes */
+ wx->tx_ring_count = TXGBE_DEFAULT_TXD;
+ wx->rx_ring_count = TXGBE_DEFAULT_RXD;
+
+ /* set default work limits */
+ wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
+ wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
return 0;
}
@@ -278,23 +382,53 @@ static int txgbe_sw_init(struct txgbe_adapter *adapter)
**/
static int txgbe_open(struct net_device *netdev)
{
- struct txgbe_adapter *adapter = netdev_priv(netdev);
+ struct wx *wx = netdev_priv(netdev);
+ int err;
+
+ err = wx_setup_resources(wx);
+ if (err)
+ goto err_reset;
+
+ wx_configure(wx);
- txgbe_up_complete(adapter);
+ err = txgbe_request_irq(wx);
+ if (err)
+ goto err_free_isb;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
+ if (err)
+ goto err_free_irq;
+
+ err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
+ if (err)
+ goto err_free_irq;
+
+ txgbe_up_complete(wx);
return 0;
+
+err_free_irq:
+ wx_free_irq(wx);
+err_free_isb:
+ wx_free_isb_resources(wx);
+err_reset:
+ txgbe_reset(wx);
+
+ return err;
}
/**
* txgbe_close_suspend - actions necessary to both suspend and close flows
- * @adapter: the private adapter struct
+ * @wx: the private wx struct
*
* This function should contain the necessary work common to both suspending
* and closing of the device.
*/
-static void txgbe_close_suspend(struct txgbe_adapter *adapter)
+static void txgbe_close_suspend(struct wx *wx)
{
- txgbe_disable_device(adapter);
+ txgbe_disable_device(wx);
+ wx_free_resources(wx);
}
/**
@@ -310,29 +444,30 @@ static void txgbe_close_suspend(struct txgbe_adapter *adapter)
**/
static int txgbe_close(struct net_device *netdev)
{
- struct txgbe_adapter *adapter = netdev_priv(netdev);
+ struct wx *wx = netdev_priv(netdev);
- txgbe_down(adapter);
- wx_control_hw(&adapter->hw.wxhw, false);
+ txgbe_down(wx);
+ wx_free_irq(wx);
+ wx_free_resources(wx);
+ wx_control_hw(wx, false);
return 0;
}
static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake)
{
- struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
- struct txgbe_hw *hw = &adapter->hw;
- struct wx_hw *wxhw = &hw->wxhw;
+ struct wx *wx = pci_get_drvdata(pdev);
+ struct net_device *netdev;
+ netdev = wx->netdev;
netif_device_detach(netdev);
rtnl_lock();
if (netif_running(netdev))
- txgbe_close_suspend(adapter);
+ txgbe_close_suspend(wx);
rtnl_unlock();
- wx_control_hw(wxhw, false);
+ wx_control_hw(wx, false);
pci_disable_device(pdev);
}
@@ -349,45 +484,14 @@ static void txgbe_shutdown(struct pci_dev *pdev)
}
}
-static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb,
- struct net_device *netdev)
-{
- return NETDEV_TX_OK;
-}
-
-/**
- * txgbe_set_mac - Change the Ethernet Address of the NIC
- * @netdev: network interface device structure
- * @p: pointer to an address structure
- *
- * Returns 0 on success, negative on failure
- **/
-static int txgbe_set_mac(struct net_device *netdev, void *p)
-{
- struct txgbe_adapter *adapter = netdev_priv(netdev);
- struct wx_hw *wxhw = &adapter->hw.wxhw;
- struct sockaddr *addr = p;
- int retval;
-
- retval = eth_prepare_mac_addr_change(netdev, addr);
- if (retval)
- return retval;
-
- txgbe_del_mac_filter(adapter, wxhw->mac.addr, 0);
- eth_hw_addr_set(netdev, addr->sa_data);
- memcpy(wxhw->mac.addr, addr->sa_data, netdev->addr_len);
-
- txgbe_mac_set_default_filter(adapter, wxhw->mac.addr);
-
- return 0;
-}
-
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
- .ndo_start_xmit = txgbe_xmit_frame,
+ .ndo_start_xmit = wx_xmit_frame,
+ .ndo_set_rx_mode = wx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = txgbe_set_mac,
+ .ndo_set_mac_address = wx_set_mac,
+ .ndo_get_stats64 = wx_get_stats64,
};
/**
@@ -398,17 +502,15 @@ static const struct net_device_ops txgbe_netdev_ops = {
* Returns 0 on success, negative on failure
*
* txgbe_probe initializes an adapter identified by a pci_dev structure.
- * The OS initialization, configuring of the adapter private structure,
+ * The OS initialization, configuring of the wx private structure,
* and a hardware reset occur.
**/
static int txgbe_probe(struct pci_dev *pdev,
const struct pci_device_id __always_unused *ent)
{
- struct txgbe_adapter *adapter = NULL;
- struct txgbe_hw *hw = NULL;
- struct wx_hw *wxhw = NULL;
struct net_device *netdev;
int err, expected_gts;
+ struct wx *wx = NULL;
u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0;
u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0;
@@ -440,7 +542,7 @@ static int txgbe_probe(struct pci_dev *pdev,
pci_set_master(pdev);
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
- sizeof(struct txgbe_adapter),
+ sizeof(struct wx),
TXGBE_MAX_TX_QUEUES,
TXGBE_MAX_RX_QUEUES);
if (!netdev) {
@@ -450,81 +552,96 @@ static int txgbe_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(netdev, &pdev->dev);
- adapter = netdev_priv(netdev);
- adapter->netdev = netdev;
- adapter->pdev = pdev;
- hw = &adapter->hw;
- wxhw = &hw->wxhw;
- adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
-
- adapter->io_addr = devm_ioremap(&pdev->dev,
- pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!adapter->io_addr) {
+ wx = netdev_priv(netdev);
+ wx->netdev = netdev;
+ wx->pdev = pdev;
+
+ wx->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+ wx->hw_addr = devm_ioremap(&pdev->dev,
+ pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!wx->hw_addr) {
err = -EIO;
goto err_pci_release_regions;
}
+ wx->driver_name = txgbe_driver_name;
+ txgbe_set_ethtool_ops(netdev);
netdev->netdev_ops = &txgbe_netdev_ops;
/* setup the private structure */
- err = txgbe_sw_init(adapter);
+ err = txgbe_sw_init(wx);
if (err)
goto err_free_mac_table;
/* check if flash load is done after hw power up */
- err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PERST);
+ err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PERST);
if (err)
goto err_free_mac_table;
- err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PWRRST);
+ err = wx_check_flash_load(wx, TXGBE_SPI_ILDR_STATUS_PWRRST);
if (err)
goto err_free_mac_table;
- err = wx_mng_present(wxhw);
+ err = wx_mng_present(wx);
if (err) {
dev_err(&pdev->dev, "Management capability is not present\n");
goto err_free_mac_table;
}
- err = txgbe_reset_hw(hw);
+ err = txgbe_reset_hw(wx);
if (err) {
dev_err(&pdev->dev, "HW Init failed: %d\n", err);
goto err_free_mac_table;
}
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features = NETIF_F_SG;
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features | NETIF_F_RXALL;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+ netdev->max_mtu = TXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
/* make sure the EEPROM is good */
- err = txgbe_validate_eeprom_checksum(hw, NULL);
+ err = txgbe_validate_eeprom_checksum(wx, NULL);
if (err != 0) {
dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n");
- wr32(wxhw, WX_MIS_RST, WX_MIS_RST_SW_RST);
+ wr32(wx, WX_MIS_RST, WX_MIS_RST_SW_RST);
err = -EIO;
goto err_free_mac_table;
}
- eth_hw_addr_set(netdev, wxhw->mac.perm_addr);
- txgbe_mac_set_default_filter(adapter, wxhw->mac.perm_addr);
+ eth_hw_addr_set(netdev, wx->mac.perm_addr);
+ wx_mac_set_default_filter(wx, wx->mac.perm_addr);
+
+ err = wx_init_interrupt_scheme(wx);
+ if (err)
+ goto err_free_mac_table;
/* Save off EEPROM version number and Option Rom version which
* together make a unique identify for the eeprom
*/
- wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H,
+ wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_H,
&eeprom_verh);
- wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L,
+ wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + TXGBE_EEPROM_VERSION_L,
&eeprom_verl);
etrack_id = (eeprom_verh << 16) | eeprom_verl;
- wx_read_ee_hostif(wxhw,
- wxhw->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG,
+ wx_read_ee_hostif(wx,
+ wx->eeprom.sw_region_offset + TXGBE_ISCSI_BOOT_CONFIG,
&offset);
/* Make sure offset to SCSI block is valid */
if (!(offset == 0x0) && !(offset == 0xffff)) {
- wx_read_ee_hostif(wxhw, offset + 0x84, &eeprom_cfg_blkh);
- wx_read_ee_hostif(wxhw, offset + 0x83, &eeprom_cfg_blkl);
+ wx_read_ee_hostif(wx, offset + 0x84, &eeprom_cfg_blkh);
+ wx_read_ee_hostif(wx, offset + 0x83, &eeprom_cfg_blkl);
/* Only display Option Rom if exist */
if (eeprom_cfg_blkl && eeprom_cfg_blkh) {
@@ -532,15 +649,15 @@ static int txgbe_probe(struct pci_dev *pdev,
build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8);
patch = eeprom_cfg_blkh & 0x00ff;
- snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
"0x%08x, %d.%d.%d", etrack_id, major, build,
patch);
} else {
- snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
"0x%08x", etrack_id);
}
} else {
- snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id),
+ snprintf(wx->eeprom_id, sizeof(wx->eeprom_id),
"0x%08x", etrack_id);
}
@@ -548,7 +665,9 @@ static int txgbe_probe(struct pci_dev *pdev,
if (err)
goto err_release_hw;
- pci_set_drvdata(pdev, adapter);
+ pci_set_drvdata(pdev, wx);
+
+ netif_tx_stop_all_queues(netdev);
/* calculate the expected PCIe bandwidth required for optimal
* performance. Note that some older parts will never have enough
@@ -556,27 +675,28 @@ static int txgbe_probe(struct pci_dev *pdev,
* parts to ensure that no warning is displayed, as this could confuse
* users otherwise.
*/
- expected_gts = txgbe_enumerate_functions(adapter) * 10;
+ expected_gts = txgbe_enumerate_functions(wx) * 10;
/* don't check link if we failed to enumerate functions */
if (expected_gts > 0)
- txgbe_check_minimum_link(adapter);
+ txgbe_check_minimum_link(wx);
else
dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n");
/* First try to read PBA as a string */
- err = txgbe_read_pba_string(hw, part_str, TXGBE_PBANUM_LENGTH);
+ err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH);
if (err)
strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH);
- netif_info(adapter, probe, netdev, "%pM\n", netdev->dev_addr);
+ netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr);
return 0;
err_release_hw:
- wx_control_hw(wxhw, false);
+ wx_clear_interrupt_scheme(wx);
+ wx_control_hw(wx, false);
err_free_mac_table:
- kfree(adapter->mac_table);
+ kfree(wx->mac_table);
err_pci_release_regions:
pci_disable_pcie_error_reporting(pdev);
pci_release_selected_regions(pdev,
@@ -597,16 +717,17 @@ err_pci_disable_dev:
**/
static void txgbe_remove(struct pci_dev *pdev)
{
- struct txgbe_adapter *adapter = pci_get_drvdata(pdev);
+ struct wx *wx = pci_get_drvdata(pdev);
struct net_device *netdev;
- netdev = adapter->netdev;
+ netdev = wx->netdev;
unregister_netdev(netdev);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
- kfree(adapter->mac_table);
+ kfree(wx->mac_table);
+ wx_clear_interrupt_scheme(wx);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 740a1c447e20..563ea51deca6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -67,8 +67,37 @@
#define TXGBE_PBANUM1_PTR 0x06
#define TXGBE_PBANUM_PTR_GUARD 0xFAFA
-struct txgbe_hw {
- struct wx_hw wxhw;
-};
+#define TXGBE_MAX_MSIX_VECTORS 64
+#define TXGBE_MAX_FDIR_INDICES 63
+
+#define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+#define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1)
+
+#define TXGBE_SP_MAX_TX_QUEUES 128
+#define TXGBE_SP_MAX_RX_QUEUES 128
+#define TXGBE_SP_RAR_ENTRIES 128
+#define TXGBE_SP_MC_TBL_SIZE 128
+#define TXGBE_SP_RX_PB_SIZE 512
+#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+#define TXGBE_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */
+
+/* TX/RX descriptor defines */
+#define TXGBE_DEFAULT_TXD 512
+#define TXGBE_DEFAULT_TX_WORK 256
+
+#if (PAGE_SIZE < 8192)
+#define TXGBE_DEFAULT_RXD 512
+#define TXGBE_DEFAULT_RX_WORK 256
+#else
+#define TXGBE_DEFAULT_RXD 256
+#define TXGBE_DEFAULT_RX_WORK 128
+#endif
+
+#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors)
+#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1)
+
+#define TXGBE_MAX_EITR GENMASK(11, 3)
+
+extern char txgbe_driver_name[];
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index bd3b0c2655a2..83ff882f5d97 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -623,16 +623,10 @@ static int receive(struct net_device *dev, int cnt)
/* --------------------------------------------------------------------- */
-#if defined(__i386__) && !defined(CONFIG_UML)
-#include <asm/msr.h>
#define GETTICK(x) \
({ \
- if (boot_cpu_has(X86_FEATURE_TSC)) \
- x = (unsigned int)rdtsc(); \
+ x = (unsigned int)get_cycles(); \
})
-#else /* __i386__ && !CONFIG_UML */
-#define GETTICK(x)
-#endif /* __i386__ && !CONFIG_UML */
static void epp_bh(struct work_struct *work)
{
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 9352dad58996..da737d959e81 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -851,6 +851,7 @@ static void netvsc_send_completion(struct net_device *ndev,
u32 msglen = hv_pkt_datalen(desc);
struct nvsp_message *pkt_rqst;
u64 cmd_rqst;
+ u32 status;
/* First check if this is a VMBUS completion without data payload */
if (!msglen) {
@@ -922,6 +923,23 @@ static void netvsc_send_completion(struct net_device *ndev,
break;
case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
+ if (msglen < sizeof(struct nvsp_message_header) +
+ sizeof(struct nvsp_1_message_send_rndis_packet_complete)) {
+ if (net_ratelimit())
+ netdev_err(ndev, "nvsp_rndis_pkt_complete length too small: %u\n",
+ msglen);
+ return;
+ }
+
+ /* If status indicates an error, output a message so we know
+ * there's a problem. But process the completion anyway so the
+ * resources are released.
+ */
+ status = nvsp_packet->msg.v1_msg.send_rndis_pkt_complete.status;
+ if (status != NVSP_STAT_SUCCESS && net_ratelimit())
+ netdev_err(ndev, "nvsp_rndis_pkt_complete error status: %x\n",
+ status);
+
netvsc_send_tx_complete(ndev, net_device, incoming_channel,
desc, budget);
break;
@@ -987,9 +1005,6 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
void netvsc_dma_unmap(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet)
{
- u32 page_count = packet->cp_partial ?
- packet->page_buf_cnt - packet->rmsg_pgcnt :
- packet->page_buf_cnt;
int i;
if (!hv_is_isolation_supported())
@@ -998,7 +1013,7 @@ void netvsc_dma_unmap(struct hv_device *hv_dev,
if (!packet->dma_range)
return;
- for (i = 0; i < page_count; i++)
+ for (i = 0; i < packet->page_buf_cnt; i++)
dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
packet->dma_range[i].mapping_size,
DMA_TO_DEVICE);
@@ -1028,9 +1043,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
struct hv_netvsc_packet *packet,
struct hv_page_buffer *pb)
{
- u32 page_count = packet->cp_partial ?
- packet->page_buf_cnt - packet->rmsg_pgcnt :
- packet->page_buf_cnt;
+ u32 page_count = packet->page_buf_cnt;
dma_addr_t dma;
int i;
@@ -1039,7 +1052,7 @@ static int netvsc_dma_map(struct hv_device *hv_dev,
packet->dma_range = kcalloc(page_count,
sizeof(*packet->dma_range),
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!packet->dma_range)
return -ENOMEM;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index f9b219e6cd58..0103ff914024 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2559,6 +2559,9 @@ static int netvsc_probe(struct hv_device *dev,
netdev_lockdep_set_classes(net);
+ net->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
@@ -2594,7 +2597,7 @@ no_net:
return ret;
}
-static int netvsc_remove(struct hv_device *dev)
+static void netvsc_remove(struct hv_device *dev)
{
struct net_device_context *ndev_ctx;
struct net_device *vf_netdev, *net;
@@ -2603,7 +2606,7 @@ static int netvsc_remove(struct hv_device *dev)
net = hv_get_drvdata(dev);
if (net == NULL) {
dev_err(&dev->device, "No net device to remove\n");
- return 0;
+ return;
}
ndev_ctx = netdev_priv(net);
@@ -2637,7 +2640,6 @@ static int netvsc_remove(struct hv_device *dev)
free_percpu(ndev_ctx->vf_stats);
free_netdev(net);
- return 0;
}
static int netvsc_suspend(struct hv_device *dev)
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 15f283b26721..62b984f84d9f 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -17,8 +17,8 @@
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/delay.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
-#include <linux/spi/at86rf230.h>
#include <linux/regmap.h>
#include <linux/skbuff.h>
#include <linux/of_gpio.h>
@@ -82,7 +82,7 @@ struct at86rf230_local {
struct ieee802154_hw *hw;
struct at86rf2xx_chip_data *data;
struct regmap *regmap;
- int slp_tr;
+ struct gpio_desc *slp_tr;
bool sleep;
struct completion state_complete;
@@ -107,8 +107,8 @@ at86rf230_async_state_change(struct at86rf230_local *lp,
static inline void
at86rf230_sleep(struct at86rf230_local *lp)
{
- if (gpio_is_valid(lp->slp_tr)) {
- gpio_set_value(lp->slp_tr, 1);
+ if (lp->slp_tr) {
+ gpiod_set_value(lp->slp_tr, 1);
usleep_range(lp->data->t_off_to_sleep,
lp->data->t_off_to_sleep + 10);
lp->sleep = true;
@@ -118,8 +118,8 @@ at86rf230_sleep(struct at86rf230_local *lp)
static inline void
at86rf230_awake(struct at86rf230_local *lp)
{
- if (gpio_is_valid(lp->slp_tr)) {
- gpio_set_value(lp->slp_tr, 0);
+ if (lp->slp_tr) {
+ gpiod_set_value(lp->slp_tr, 0);
usleep_range(lp->data->t_sleep_to_off,
lp->data->t_sleep_to_off + 100);
lp->sleep = false;
@@ -204,9 +204,9 @@ at86rf230_write_subreg(struct at86rf230_local *lp,
static inline void
at86rf230_slp_tr_rising_edge(struct at86rf230_local *lp)
{
- gpio_set_value(lp->slp_tr, 1);
+ gpiod_set_value(lp->slp_tr, 1);
udelay(1);
- gpio_set_value(lp->slp_tr, 0);
+ gpiod_set_value(lp->slp_tr, 0);
}
static bool
@@ -819,7 +819,7 @@ at86rf230_write_frame_complete(void *context)
ctx->trx.len = 2;
- if (gpio_is_valid(lp->slp_tr))
+ if (lp->slp_tr)
at86rf230_slp_tr_rising_edge(lp);
else
at86rf230_async_write_reg(lp, RG_TRX_STATE, STATE_BUSY_TX, ctx,
@@ -1416,32 +1416,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
}
static int
-at86rf230_get_pdata(struct spi_device *spi, int *rstn, int *slp_tr,
- u8 *xtal_trim)
-{
- struct at86rf230_platform_data *pdata = spi->dev.platform_data;
- int ret;
-
- if (!IS_ENABLED(CONFIG_OF) || !spi->dev.of_node) {
- if (!pdata)
- return -ENOENT;
-
- *rstn = pdata->rstn;
- *slp_tr = pdata->slp_tr;
- *xtal_trim = pdata->xtal_trim;
- return 0;
- }
-
- *rstn = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
- *slp_tr = of_get_named_gpio(spi->dev.of_node, "sleep-gpio", 0);
- ret = of_property_read_u8(spi->dev.of_node, "xtal-trim", xtal_trim);
- if (ret < 0 && ret != -EINVAL)
- return ret;
-
- return 0;
-}
-
-static int
at86rf230_detect_device(struct at86rf230_local *lp)
{
unsigned int part, version, val;
@@ -1546,41 +1520,47 @@ static int at86rf230_probe(struct spi_device *spi)
{
struct ieee802154_hw *hw;
struct at86rf230_local *lp;
+ struct gpio_desc *slp_tr;
+ struct gpio_desc *rstn;
unsigned int status;
- int rc, irq_type, rstn, slp_tr;
- u8 xtal_trim = 0;
+ int rc, irq_type;
+ u8 xtal_trim;
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ specified\n");
return -EINVAL;
}
- rc = at86rf230_get_pdata(spi, &rstn, &slp_tr, &xtal_trim);
+ rc = device_property_read_u8(&spi->dev, "xtal-trim", &xtal_trim);
if (rc < 0) {
- dev_err(&spi->dev, "failed to parse platform_data: %d\n", rc);
- return rc;
- }
-
- if (gpio_is_valid(rstn)) {
- rc = devm_gpio_request_one(&spi->dev, rstn,
- GPIOF_OUT_INIT_HIGH, "rstn");
- if (rc)
+ if (rc != -EINVAL) {
+ dev_err(&spi->dev,
+ "failed to parse xtal-trim: %d\n", rc);
return rc;
+ }
+ xtal_trim = 0;
}
- if (gpio_is_valid(slp_tr)) {
- rc = devm_gpio_request_one(&spi->dev, slp_tr,
- GPIOF_OUT_INIT_LOW, "slp_tr");
- if (rc)
- return rc;
- }
+ rstn = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
+ rc = PTR_ERR_OR_ZERO(rstn);
+ if (rc)
+ return rc;
+
+ gpiod_set_consumer_name(rstn, "rstn");
+
+ slp_tr = devm_gpiod_get_optional(&spi->dev, "sleep", GPIOD_OUT_LOW);
+ rc = PTR_ERR_OR_ZERO(slp_tr);
+ if (rc)
+ return rc;
+
+ gpiod_set_consumer_name(slp_tr, "slp_tr");
/* Reset */
- if (gpio_is_valid(rstn)) {
+ if (rstn) {
udelay(1);
- gpio_set_value_cansleep(rstn, 0);
+ gpiod_set_value_cansleep(rstn, 1);
udelay(1);
- gpio_set_value_cansleep(rstn, 1);
+ gpiod_set_value_cansleep(rstn, 0);
usleep_range(120, 240);
}
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index edc769daad07..a94d8dd71aad 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -7,14 +7,13 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
-#include <linux/spi/cc2520.h>
+#include <linux/property.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/skbuff.h>
-#include <linux/of_gpio.h>
#include <linux/ieee802154.h>
#include <linux/crc-ccitt.h>
#include <asm/unaligned.h>
@@ -206,7 +205,7 @@ struct cc2520_private {
struct mutex buffer_mutex; /* SPI buffer mutex */
bool is_tx; /* Flag for sync b/w Tx and Rx */
bool amplified; /* Flag for CC2591 */
- int fifo_pin; /* FIFO GPIO pin number */
+ struct gpio_desc *fifo_pin; /* FIFO GPIO pin number */
struct work_struct fifop_irqwork;/* Workqueue for FIFOP */
spinlock_t lock; /* Lock for is_tx*/
struct completion tx_complete; /* Work completion for Tx */
@@ -875,7 +874,7 @@ static void cc2520_fifop_irqwork(struct work_struct *work)
dev_dbg(&priv->spi->dev, "fifop interrupt received\n");
- if (gpio_get_value(priv->fifo_pin))
+ if (gpiod_get_value(priv->fifo_pin))
cc2520_rx(priv);
else
dev_dbg(&priv->spi->dev, "rxfifo overflow\n");
@@ -912,49 +911,11 @@ static irqreturn_t cc2520_sfd_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int cc2520_get_platform_data(struct spi_device *spi,
- struct cc2520_platform_data *pdata)
-{
- struct device_node *np = spi->dev.of_node;
- struct cc2520_private *priv = spi_get_drvdata(spi);
-
- if (!np) {
- struct cc2520_platform_data *spi_pdata = spi->dev.platform_data;
-
- if (!spi_pdata)
- return -ENOENT;
- *pdata = *spi_pdata;
- priv->fifo_pin = pdata->fifo;
- return 0;
- }
-
- pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0);
- priv->fifo_pin = pdata->fifo;
-
- pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0);
-
- pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0);
- pdata->cca = of_get_named_gpio(np, "cca-gpio", 0);
- pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0);
- pdata->reset = of_get_named_gpio(np, "reset-gpio", 0);
-
- /* CC2591 front end for CC2520 */
- if (of_property_read_bool(np, "amplified"))
- priv->amplified = true;
-
- return 0;
-}
-
static int cc2520_hw_init(struct cc2520_private *priv)
{
u8 status = 0, state = 0xff;
int ret;
int timeout = 100;
- struct cc2520_platform_data pdata;
-
- ret = cc2520_get_platform_data(priv->spi, &pdata);
- if (ret)
- goto err_ret;
ret = cc2520_read_register(priv, CC2520_FSMSTAT1, &state);
if (ret)
@@ -1071,7 +1032,11 @@ err_ret:
static int cc2520_probe(struct spi_device *spi)
{
struct cc2520_private *priv;
- struct cc2520_platform_data pdata;
+ struct gpio_desc *fifop;
+ struct gpio_desc *cca;
+ struct gpio_desc *sfd;
+ struct gpio_desc *reset;
+ struct gpio_desc *vreg;
int ret;
priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
@@ -1080,11 +1045,11 @@ static int cc2520_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv);
- ret = cc2520_get_platform_data(spi, &pdata);
- if (ret < 0) {
- dev_err(&spi->dev, "no platform data\n");
- return -EINVAL;
- }
+ /* CC2591 front end for CC2520 */
+ /* Assumption that CC2591 is not connected */
+ priv->amplified = false;
+ if (device_property_read_bool(&spi->dev, "amplified"))
+ priv->amplified = true;
priv->spi = spi;
@@ -1098,80 +1063,53 @@ static int cc2520_probe(struct spi_device *spi)
spin_lock_init(&priv->lock);
init_completion(&priv->tx_complete);
- /* Assumption that CC2591 is not connected */
- priv->amplified = false;
-
/* Request all the gpio's */
- if (!gpio_is_valid(pdata.fifo)) {
+ priv->fifo_pin = devm_gpiod_get(&spi->dev, "fifo", GPIOD_IN);
+ if (IS_ERR(priv->fifo_pin)) {
dev_err(&spi->dev, "fifo gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(priv->fifo_pin);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.fifo,
- GPIOF_IN, "fifo");
- if (ret)
- goto err_hw_init;
-
- if (!gpio_is_valid(pdata.cca)) {
+ cca = devm_gpiod_get(&spi->dev, "cca", GPIOD_IN);
+ if (IS_ERR(cca)) {
dev_err(&spi->dev, "cca gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(cca);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.cca,
- GPIOF_IN, "cca");
- if (ret)
- goto err_hw_init;
-
- if (!gpio_is_valid(pdata.fifop)) {
+ fifop = devm_gpiod_get(&spi->dev, "fifop", GPIOD_IN);
+ if (IS_ERR(fifop)) {
dev_err(&spi->dev, "fifop gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(fifop);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.fifop,
- GPIOF_IN, "fifop");
- if (ret)
- goto err_hw_init;
-
- if (!gpio_is_valid(pdata.sfd)) {
+ sfd = devm_gpiod_get(&spi->dev, "sfd", GPIOD_IN);
+ if (IS_ERR(sfd)) {
dev_err(&spi->dev, "sfd gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(sfd);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.sfd,
- GPIOF_IN, "sfd");
- if (ret)
- goto err_hw_init;
-
- if (!gpio_is_valid(pdata.reset)) {
+ reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(reset)) {
dev_err(&spi->dev, "reset gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(reset);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.reset,
- GPIOF_OUT_INIT_LOW, "reset");
- if (ret)
- goto err_hw_init;
-
- if (!gpio_is_valid(pdata.vreg)) {
+ vreg = devm_gpiod_get(&spi->dev, "vreg", GPIOD_OUT_LOW);
+ if (IS_ERR(vreg)) {
dev_err(&spi->dev, "vreg gpio is not valid\n");
- ret = -EINVAL;
+ ret = PTR_ERR(vreg);
goto err_hw_init;
}
- ret = devm_gpio_request_one(&spi->dev, pdata.vreg,
- GPIOF_OUT_INIT_LOW, "vreg");
- if (ret)
- goto err_hw_init;
-
- gpio_set_value(pdata.vreg, HIGH);
+ gpiod_set_value(vreg, HIGH);
usleep_range(100, 150);
- gpio_set_value(pdata.reset, HIGH);
+ gpiod_set_value(reset, HIGH);
usleep_range(200, 250);
ret = cc2520_hw_init(priv);
@@ -1180,7 +1118,7 @@ static int cc2520_probe(struct spi_device *spi)
/* Set up fifop interrupt */
ret = devm_request_irq(&spi->dev,
- gpio_to_irq(pdata.fifop),
+ gpiod_to_irq(fifop),
cc2520_fifop_isr,
IRQF_TRIGGER_RISING,
dev_name(&spi->dev),
@@ -1192,7 +1130,7 @@ static int cc2520_probe(struct spi_device *spi)
/* Set up sfd interrupt */
ret = devm_request_irq(&spi->dev,
- gpio_to_irq(pdata.sfd),
+ gpiod_to_irq(sfd),
cc2520_sfd_isr,
IRQF_TRIGGER_FALLING,
dev_name(&spi->dev),
@@ -1241,7 +1179,7 @@ MODULE_DEVICE_TABLE(of, cc2520_of_ids);
static struct spi_driver cc2520_driver = {
.driver = {
.name = "cc2520",
- .of_match_table = of_match_ptr(cc2520_of_ids),
+ .of_match_table = cc2520_of_ids,
},
.id_table = cc2520_ids,
.probe = cc2520_probe,
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index 8cdcaaf58ae3..cba199422f47 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -4,15 +4,20 @@
IPA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.7 4.9 4.11
+# Some IPA versions can reuse another set of GSI register definitions.
+GSI_IPA_VERSIONS := 3.1 3.5.1 4.0 4.5 4.9 4.11
+
obj-$(CONFIG_QCOM_IPA) += ipa.o
ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
- ipa_table.o ipa_interrupt.o gsi.o gsi_trans.o \
- ipa_gsi.o ipa_smp2p.o ipa_uc.o \
+ ipa_table.o ipa_interrupt.o gsi.o gsi_reg.o \
+ gsi_trans.o ipa_gsi.o ipa_smp2p.o ipa_uc.o \
ipa_endpoint.o ipa_cmd.o ipa_modem.o \
ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
ipa_sysfs.o
+ipa-y += $(GSI_IPA_VERSIONS:%=reg/gsi_reg-v%.o)
+
ipa-y += $(IPA_VERSIONS:%=reg/ipa_reg-v%.o)
ipa-y += $(IPA_VERSIONS:%=data/ipa_data-v%.o)
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index bea2da1c4c51..9a0b1fe4a93a 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include "gsi.h"
+#include "reg.h"
#include "gsi_reg.h"
#include "gsi_private.h"
#include "gsi_trans.h"
@@ -162,12 +163,6 @@ static void gsi_validate_build(void)
* ensure the elements themselves meet the requirement.
*/
BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
-
- /* The channel element size must fit in this field */
- BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
-
- /* The event ring element size must fit in this field */
- BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
}
/* Return the channel id associated with a given channel */
@@ -182,21 +177,39 @@ static bool gsi_channel_initialized(struct gsi_channel *channel)
return !!channel->gsi;
}
+/* Encode the channel protocol for the CH_C_CNTXT_0 register */
+static u32 ch_c_cntxt_0_type_encode(enum ipa_version version,
+ const struct reg *reg,
+ enum gsi_channel_type type)
+{
+ u32 val;
+
+ val = reg_encode(reg, CHTYPE_PROTOCOL, type);
+ if (version < IPA_VERSION_4_5 || version >= IPA_VERSION_5_0)
+ return val;
+
+ type >>= hweight32(reg_fmask(reg, CHTYPE_PROTOCOL));
+
+ return val | reg_encode(reg, CHTYPE_PROTOCOL_MSB, type);
+}
+
/* Update the GSI IRQ type register with the cached value */
static void gsi_irq_type_update(struct gsi *gsi, u32 val)
{
+ const struct reg *reg = gsi_reg(gsi, CNTXT_TYPE_IRQ_MSK);
+
gsi->type_enabled_bitmap = val;
- iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
+ iowrite32(val, gsi->virt + reg_offset(reg));
}
static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
{
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | type_id);
}
static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
{
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~type_id);
}
/* Event ring commands are performed one at a time. Their completion
@@ -207,22 +220,29 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
{
u32 val = BIT(evt_ring_id);
+ const struct reg *reg;
/* There's a small chance that a previous command completed
* after the interrupt was disabled, so make sure we have no
* pending interrupts before we enable them.
*/
- iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
+ iowrite32(~0, gsi->virt + reg_offset(reg));
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
+ iowrite32(val, gsi->virt + reg_offset(reg));
gsi_irq_type_enable(gsi, GSI_EV_CTRL);
}
/* Disable event ring control interrupts */
static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
{
+ const struct reg *reg;
+
gsi_irq_type_disable(gsi, GSI_EV_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
}
/* Channel commands are performed one at a time. Their completion is
@@ -233,32 +253,43 @@ static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
{
u32 val = BIT(channel_id);
+ const struct reg *reg;
/* There's a small chance that a previous command completed
* after the interrupt was disabled, so make sure we have no
* pending interrupts before we enable them.
*/
- iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
+ iowrite32(~0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
+ iowrite32(val, gsi->virt + reg_offset(reg));
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_CH_CTRL);
}
/* Disable channel control interrupts */
static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
{
+ const struct reg *reg;
+
gsi_irq_type_disable(gsi, GSI_CH_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
}
static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
{
bool enable_ieob = !gsi->ieob_enabled_bitmap;
+ const struct reg *reg;
u32 val;
gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
val = gsi->ieob_enabled_bitmap;
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ iowrite32(val, gsi->virt + reg_offset(reg));
/* Enable the interrupt type if this is the first channel enabled */
if (enable_ieob)
@@ -267,6 +298,7 @@ static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
{
+ const struct reg *reg;
u32 val;
gsi->ieob_enabled_bitmap &= ~event_mask;
@@ -275,8 +307,9 @@ static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
if (!gsi->ieob_enabled_bitmap)
gsi_irq_type_disable(gsi, GSI_IEOB);
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
val = gsi->ieob_enabled_bitmap;
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ iowrite32(val, gsi->virt + reg_offset(reg));
}
static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
@@ -287,34 +320,44 @@ static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
/* Enable all GSI_interrupt types */
static void gsi_irq_enable(struct gsi *gsi)
{
+ const struct reg *reg;
u32 val;
/* Global interrupts include hardware error reports. Enable
* that so we can at least report the error should it occur.
*/
- iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
+
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GLOB_EE);
/* General GSI interrupts are reported to all EEs; if they occur
* they are unrecoverable (without reset). A breakpoint interrupt
* also exists, but we don't support that. We want to be notified
* of errors so we can report them, even if they can't be handled.
*/
- val = BIT(BUS_ERROR);
- val |= BIT(CMD_FIFO_OVRFLOW);
- val |= BIT(MCS_STACK_OVRFLOW);
- iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
- gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
+ val = BUS_ERROR;
+ val |= CMD_FIFO_OVRFLOW;
+ val |= MCS_STACK_OVRFLOW;
+ iowrite32(val, gsi->virt + reg_offset(reg));
+
+ gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | GSI_GENERAL);
}
/* Disable all GSI interrupt types */
static void gsi_irq_disable(struct gsi *gsi)
{
+ const struct reg *reg;
+
gsi_irq_type_update(gsi, 0);
/* Clear the type-specific interrupt masks set by gsi_irq_enable() */
- iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
}
/* Return the virtual address associated with a ring index */
@@ -356,11 +399,12 @@ static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
static enum gsi_evt_ring_state
gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
{
+ const struct reg *reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
u32 val;
- val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+ val = ioread32(gsi->virt + reg_n_offset(reg, evt_ring_id));
- return u32_get_bits(val, EV_CHSTATE_FMASK);
+ return reg_decode(reg, EV_CHSTATE, val);
}
/* Issue an event ring command and wait for it to complete */
@@ -368,16 +412,18 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
enum gsi_evt_cmd_opcode opcode)
{
struct device *dev = gsi->dev;
+ const struct reg *reg;
bool timeout;
u32 val;
/* Enable the completion interrupt for the command */
gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
- val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
- val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
+ reg = gsi_reg(gsi, EV_CH_CMD);
+ val = reg_encode(reg, EV_CHID, evt_ring_id);
+ val |= reg_encode(reg, EV_OPCODE, opcode);
- timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
+ timeout = !gsi_command(gsi, reg_offset(reg), val);
gsi_irq_ev_ctrl_disable(gsi);
@@ -464,13 +510,16 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
/* Fetch the current state of a channel from hardware */
static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
{
+ const struct reg *reg = gsi_reg(channel->gsi, CH_C_CNTXT_0);
u32 channel_id = gsi_channel_id(channel);
- void __iomem *virt = channel->gsi->virt;
+ struct gsi *gsi = channel->gsi;
+ void __iomem *virt = gsi->virt;
u32 val;
- val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
+ reg = gsi_reg(gsi, CH_C_CNTXT_0);
+ val = ioread32(virt + reg_n_offset(reg, channel_id));
- return u32_get_bits(val, CHSTATE_FMASK);
+ return reg_decode(reg, CHSTATE, val);
}
/* Issue a channel command and wait for it to complete */
@@ -480,15 +529,18 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
+ const struct reg *reg;
bool timeout;
u32 val;
/* Enable the completion interrupt for the command */
gsi_irq_ch_ctrl_enable(gsi, channel_id);
- val = u32_encode_bits(channel_id, CH_CHID_FMASK);
- val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
+ reg = gsi_reg(gsi, CH_CMD);
+ val = reg_encode(reg, CH_CHID, channel_id);
+ val |= reg_encode(reg, CH_OPCODE, opcode);
+
+ timeout = !gsi_command(gsi, reg_offset(reg), val);
gsi_irq_ch_ctrl_disable(gsi);
@@ -651,6 +703,7 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
*/
static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
{
+ const struct reg *reg = gsi_reg(gsi, EV_CH_E_DOORBELL_0);
struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
u32 val;
@@ -658,7 +711,7 @@ static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
/* Note: index *must* be used modulo the ring count here */
val = gsi_ring_addr(ring, (index - 1) % ring->count);
- iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
}
/* Program an event ring for use */
@@ -666,41 +719,56 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct gsi_ring *ring = &evt_ring->ring;
- size_t size;
+ const struct reg *reg;
u32 val;
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_0);
/* We program all event rings as GPI type/protocol */
- val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
- val |= EV_INTYPE_FMASK;
- val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
- iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+ val = reg_encode(reg, EV_CHTYPE, GSI_CHANNEL_TYPE_GPI);
+ /* EV_EE field is 0 (GSI_EE_AP) */
+ val |= reg_bit(reg, EV_INTYPE);
+ val |= reg_encode(reg, EV_ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
- size = ring->count * GSI_RING_ELEMENT_SIZE;
- val = ev_r_length_encoded(gsi->version, size);
- iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_1);
+ val = reg_encode(reg, R_LENGTH, ring->count * GSI_RING_ELEMENT_SIZE);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the event ring,
* respectively.
*/
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_2);
val = lower_32_bits(ring->addr);
- iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_3);
val = upper_32_bits(ring->addr);
- iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */
- val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
- val |= u32_encode_bits(1, MODC_FMASK); /* comes from channel */
- iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_8);
+ val = reg_encode(reg, EV_MODT, GSI_EVT_RING_INT_MODT);
+ val |= reg_encode(reg, EV_MODC, 1); /* comes from channel */
+ /* EV_MOD_CNT is 0 (no counter-based interrupt coalescing) */
+ iowrite32(val, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ /* No MSI write data, and MSI high and low address is 0 */
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_9);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
- /* No MSI write data, and MSI address high and low address is 0 */
- iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
- iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
- iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_10);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_11);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* We don't need to get event read pointer updates */
- iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
- iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_12);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
+
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_13);
+ iowrite32(0, gsi->virt + reg_n_offset(reg, evt_ring_id));
/* Finally, tell the hardware our "last processed" event (arbitrary) */
gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
@@ -761,39 +829,52 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
union gsi_channel_scratch scr = { };
struct gsi_channel_scratch_gpi *gpi;
struct gsi *gsi = channel->gsi;
+ const struct reg *reg;
u32 wrr_weight = 0;
+ u32 offset;
u32 val;
+ reg = gsi_reg(gsi, CH_C_CNTXT_0);
+
/* We program all channels as GPI type/protocol */
- val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
+ val = ch_c_cntxt_0_type_encode(gsi->version, reg, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa)
- val |= CHTYPE_DIR_FMASK;
- val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
- val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
- iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
-
- val = r_length_encoded(gsi->version, size);
- iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
+ val |= reg_bit(reg, CHTYPE_DIR);
+ if (gsi->version < IPA_VERSION_5_0)
+ val |= reg_encode(reg, ERINDEX, channel->evt_ring_id);
+ val |= reg_encode(reg, ELEMENT_SIZE, GSI_RING_ELEMENT_SIZE);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ reg = gsi_reg(gsi, CH_C_CNTXT_1);
+ val = reg_encode(reg, CH_R_LENGTH, size);
+ if (gsi->version >= IPA_VERSION_5_0)
+ val |= reg_encode(reg, CH_ERINDEX, channel->evt_ring_id);
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* The context 2 and 3 registers store the low-order and
* high-order 32 bits of the address of the channel ring,
* respectively.
*/
+ reg = gsi_reg(gsi, CH_C_CNTXT_2);
val = lower_32_bits(channel->tre_ring.addr);
- iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ reg = gsi_reg(gsi, CH_C_CNTXT_3);
val = upper_32_bits(channel->tre_ring.addr);
- iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+
+ reg = gsi_reg(gsi, CH_C_QOS);
/* Command channel gets low weighted round-robin priority */
if (channel->command)
- wrr_weight = field_max(WRR_WEIGHT_FMASK);
- val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
+ wrr_weight = reg_field_max(reg, WRR_WEIGHT);
+ val = reg_encode(reg, WRR_WEIGHT, wrr_weight);
/* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
/* No need to use the doorbell engine starting at IPA v4.0 */
if (gsi->version < IPA_VERSION_4_0 && doorbell)
- val |= USE_DB_ENG_FMASK;
+ val |= reg_bit(reg, USE_DB_ENG);
/* v4.0 introduces an escape buffer for prefetch. We use it
* on all but the AP command channel.
@@ -801,16 +882,15 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
/* If not otherwise set, prefetch buffers are used */
if (gsi->version < IPA_VERSION_4_5)
- val |= USE_ESCAPE_BUF_ONLY_FMASK;
+ val |= reg_bit(reg, USE_ESCAPE_BUF_ONLY);
else
- val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
- PREFETCH_MODE_FMASK);
+ val |= reg_encode(reg, PREFETCH_MODE, ESCAPE_BUF_ONLY);
}
/* All channels set DB_IN_BYTES */
if (gsi->version >= IPA_VERSION_4_9)
- val |= DB_IN_BYTES;
+ val |= reg_bit(reg, DB_IN_BYTES);
- iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* Now update the scratch registers for GPI protocol */
gpi = &scr.gpi;
@@ -818,22 +898,27 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
GSI_RING_ELEMENT_SIZE;
gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
+ reg = gsi_reg(gsi, CH_C_SCRATCH_0);
val = scr.data.word1;
- iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+ reg = gsi_reg(gsi, CH_C_SCRATCH_1);
val = scr.data.word2;
- iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
+ reg = gsi_reg(gsi, CH_C_SCRATCH_2);
val = scr.data.word3;
- iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
/* We must preserve the upper 16 bits of the last scratch register.
* The next sequence assumes those bits remain unchanged between the
* read and the write.
*/
- val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
+ reg = gsi_reg(gsi, CH_C_SCRATCH_3);
+ offset = reg_n_offset(reg, channel_id);
+ val = ioread32(gsi->virt + offset);
val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
- iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + offset);
/* All done! */
}
@@ -1049,10 +1134,14 @@ static void gsi_trans_tx_completed(struct gsi_trans *trans)
/* Channel control interrupt handler */
static void gsi_isr_chan_ctrl(struct gsi *gsi)
{
+ const struct reg *reg;
u32 channel_mask;
- channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
- iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ);
+ channel_mask = ioread32(gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_CLR);
+ iowrite32(channel_mask, gsi->virt + reg_offset(reg));
while (channel_mask) {
u32 channel_id = __ffs(channel_mask);
@@ -1066,10 +1155,14 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi)
/* Event ring control interrupt handler */
static void gsi_isr_evt_ctrl(struct gsi *gsi)
{
+ const struct reg *reg;
u32 event_mask;
- event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
- iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ);
+ event_mask = ioread32(gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_CLR);
+ iowrite32(event_mask, gsi->virt + reg_offset(reg));
while (event_mask) {
u32 evt_ring_id = __ffs(event_mask);
@@ -1117,21 +1210,29 @@ gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
/* Global error interrupt handler */
static void gsi_isr_glob_err(struct gsi *gsi)
{
+ const struct reg *log_reg;
+ const struct reg *clr_reg;
enum gsi_err_type type;
enum gsi_err_code code;
+ u32 offset;
u32 which;
u32 val;
u32 ee;
/* Get the logged error, then reinitialize the log */
- val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
- iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
- iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
+ log_reg = gsi_reg(gsi, ERROR_LOG);
+ offset = reg_offset(log_reg);
+ val = ioread32(gsi->virt + offset);
+ iowrite32(0, gsi->virt + offset);
+
+ clr_reg = gsi_reg(gsi, ERROR_LOG_CLR);
+ iowrite32(~0, gsi->virt + reg_offset(clr_reg));
- ee = u32_get_bits(val, ERR_EE_FMASK);
- type = u32_get_bits(val, ERR_TYPE_FMASK);
- which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
- code = u32_get_bits(val, ERR_CODE_FMASK);
+ /* Parse the error value */
+ ee = reg_decode(log_reg, ERR_EE, val);
+ type = reg_decode(log_reg, ERR_TYPE, val);
+ which = reg_decode(log_reg, ERR_VIRT_IDX, val);
+ code = reg_decode(log_reg, ERR_CODE, val);
if (type == GSI_ERR_TYPE_CHAN)
gsi_isr_glob_chan_err(gsi, ee, which, code);
@@ -1144,6 +1245,7 @@ static void gsi_isr_glob_err(struct gsi *gsi)
/* Generic EE interrupt handler */
static void gsi_isr_gp_int1(struct gsi *gsi)
{
+ const struct reg *reg;
u32 result;
u32 val;
@@ -1166,8 +1268,9 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
* In either case, we silently ignore a INCORRECT_CHANNEL_STATE
* error if we receive it.
*/
- val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
- result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
+ reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
+ val = ioread32(gsi->virt + reg_offset(reg));
+ result = reg_decode(reg, GENERIC_EE_RESULT, val);
switch (result) {
case GENERIC_EE_SUCCESS:
@@ -1191,19 +1294,22 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
/* Inter-EE interrupt handler */
static void gsi_isr_glob_ee(struct gsi *gsi)
{
+ const struct reg *reg;
u32 val;
- val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_STTS);
+ val = ioread32(gsi->virt + reg_offset(reg));
- if (val & BIT(ERROR_INT))
+ if (val & ERROR_INT)
gsi_isr_glob_err(gsi);
- iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_CLR);
+ iowrite32(val, gsi->virt + reg_offset(reg));
- val &= ~BIT(ERROR_INT);
+ val &= ~ERROR_INT;
- if (val & BIT(GP_INT1)) {
- val ^= BIT(GP_INT1);
+ if (val & GP_INT1) {
+ val ^= GP_INT1;
gsi_isr_gp_int1(gsi);
}
@@ -1214,11 +1320,16 @@ static void gsi_isr_glob_ee(struct gsi *gsi)
/* I/O completion interrupt event */
static void gsi_isr_ieob(struct gsi *gsi)
{
+ const struct reg *reg;
u32 event_mask;
- event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ);
+ event_mask = ioread32(gsi->virt + reg_offset(reg));
+
gsi_irq_ieob_disable(gsi, event_mask);
- iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
+
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_CLR);
+ iowrite32(event_mask, gsi->virt + reg_offset(reg));
while (event_mask) {
u32 evt_ring_id = __ffs(event_mask);
@@ -1233,10 +1344,14 @@ static void gsi_isr_ieob(struct gsi *gsi)
static void gsi_isr_general(struct gsi *gsi)
{
struct device *dev = gsi->dev;
+ const struct reg *reg;
u32 val;
- val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
- iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_STTS);
+ val = ioread32(gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_CLR);
+ iowrite32(val, gsi->virt + reg_offset(reg));
dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
}
@@ -1252,31 +1367,39 @@ static void gsi_isr_general(struct gsi *gsi)
static irqreturn_t gsi_isr(int irq, void *dev_id)
{
struct gsi *gsi = dev_id;
+ const struct reg *reg;
u32 intr_mask;
u32 cnt = 0;
+ u32 offset;
+
+ reg = gsi_reg(gsi, CNTXT_TYPE_IRQ);
+ offset = reg_offset(reg);
/* enum gsi_irq_type_id defines GSI interrupt types */
- while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
+ while ((intr_mask = ioread32(gsi->virt + offset))) {
/* intr_mask contains bitmask of pending GSI interrupts */
do {
u32 gsi_intr = BIT(__ffs(intr_mask));
intr_mask ^= gsi_intr;
+ /* Note: the IRQ condition for each type is cleared
+ * when the type-specific register is updated.
+ */
switch (gsi_intr) {
- case BIT(GSI_CH_CTRL):
+ case GSI_CH_CTRL:
gsi_isr_chan_ctrl(gsi);
break;
- case BIT(GSI_EV_CTRL):
+ case GSI_EV_CTRL:
gsi_isr_evt_ctrl(gsi);
break;
- case BIT(GSI_GLOB_EE):
+ case GSI_GLOB_EE:
gsi_isr_glob_ee(gsi);
break;
- case BIT(GSI_IEOB):
+ case GSI_IEOB:
gsi_isr_ieob(gsi);
break;
- case BIT(GSI_GENERAL):
+ case GSI_GENERAL:
gsi_isr_general(gsi);
break;
default:
@@ -1467,11 +1590,13 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
struct gsi_ring *tre_ring = &channel->tre_ring;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
+ const struct reg *reg;
u32 val;
+ reg = gsi_reg(gsi, CH_C_DOORBELL_0);
/* Note: index *must* be used modulo the ring count here */
val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
- iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
+ iowrite32(val, gsi->virt + reg_n_offset(reg, channel_id));
}
/* Consult hardware, move newly completed transactions to completed state */
@@ -1482,6 +1607,7 @@ void gsi_channel_update(struct gsi_channel *channel)
struct gsi_evt_ring *evt_ring;
struct gsi_trans *trans;
struct gsi_ring *ring;
+ const struct reg *reg;
u32 offset;
u32 index;
@@ -1491,7 +1617,8 @@ void gsi_channel_update(struct gsi_channel *channel)
/* See if there's anything new to process; if not, we're done. Note
* that index always refers to an entry *within* the event ring.
*/
- offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
+ reg = gsi_reg(gsi, EV_CH_E_CNTXT_4);
+ offset = reg_n_offset(reg, evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
return;
@@ -1642,7 +1769,9 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode,
u8 params)
{
+ const struct reg *reg;
bool timeout;
+ u32 offset;
u32 val;
/* The error global interrupt type is always enabled (until we tear
@@ -1654,24 +1783,31 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
* channel), and only from this function. So we enable the GP_INT1
* IRQ type here, and disable it again after the command completes.
*/
- val = BIT(ERROR_INT) | BIT(GP_INT1);
- iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ val = ERROR_INT | GP_INT1;
+ iowrite32(val, gsi->virt + reg_offset(reg));
/* First zero the result code field */
- val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
- val &= ~GENERIC_EE_RESULT_FMASK;
- iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SCRATCH_0);
+ offset = reg_offset(reg);
+ val = ioread32(gsi->virt + offset);
+
+ val &= ~reg_fmask(reg, GENERIC_EE_RESULT);
+ iowrite32(val, gsi->virt + offset);
/* Now issue the command */
- val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
- val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
- val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
- val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
+ reg = gsi_reg(gsi, GENERIC_CMD);
+ val = reg_encode(reg, GENERIC_OPCODE, opcode);
+ val |= reg_encode(reg, GENERIC_CHID, channel_id);
+ val |= reg_encode(reg, GENERIC_EE, GSI_EE_MODEM);
+ if (gsi->version >= IPA_VERSION_4_11)
+ val |= reg_encode(reg, GENERIC_PARAMS, params);
- timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
+ timeout = !gsi_command(gsi, reg_offset(reg), val);
/* Disable the GP_INT1 IRQ type again */
- iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(ERROR_INT, gsi->virt + reg_offset(reg));
if (!timeout)
return gsi->result;
@@ -1828,32 +1964,40 @@ static void gsi_channel_teardown(struct gsi *gsi)
/* Turn off all GSI interrupts initially */
static int gsi_irq_setup(struct gsi *gsi)
{
+ const struct reg *reg;
int ret;
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */
- iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_INTSET);
+ iowrite32(reg_bit(reg, INTYPE), gsi->virt + reg_offset(reg));
/* Disable all interrupt types */
gsi_irq_type_update(gsi, 0);
/* Clear all type-specific interrupt masks */
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_SRC_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_EV_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_GLOB_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
+
+ reg = gsi_reg(gsi, CNTXT_SRC_IEOB_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
/* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
if (gsi->version > IPA_VERSION_3_1) {
- u32 offset;
+ reg = gsi_reg(gsi, INTER_EE_SRC_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
- /* These registers are in the non-adjusted address range */
- offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
- iowrite32(0, gsi->virt_raw + offset);
- offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
- iowrite32(0, gsi->virt_raw + offset);
+ reg = gsi_reg(gsi, INTER_EE_SRC_EV_CH_IRQ_MSK);
+ iowrite32(0, gsi->virt + reg_offset(reg));
}
- iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
+ reg = gsi_reg(gsi, CNTXT_GSI_IRQ_EN);
+ iowrite32(0, gsi->virt + reg_offset(reg));
ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
if (ret)
@@ -1871,6 +2015,7 @@ static void gsi_irq_teardown(struct gsi *gsi)
static int gsi_ring_setup(struct gsi *gsi)
{
struct device *dev = gsi->dev;
+ const struct reg *reg;
u32 count;
u32 val;
@@ -1882,9 +2027,10 @@ static int gsi_ring_setup(struct gsi *gsi)
return 0;
}
- val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
+ reg = gsi_reg(gsi, HW_PARAM_2);
+ val = ioread32(gsi->virt + reg_offset(reg));
- count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
+ count = reg_decode(reg, NUM_CH_PER_EE, val);
if (!count) {
dev_err(dev, "GSI reports zero channels supported\n");
return -EINVAL;
@@ -1896,7 +2042,12 @@ static int gsi_ring_setup(struct gsi *gsi)
}
gsi->channel_count = count;
- count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
+ if (gsi->version < IPA_VERSION_5_0) {
+ count = reg_decode(reg, NUM_EV_PER_EE, val);
+ } else {
+ reg = gsi_reg(gsi, HW_PARAM_4);
+ count = reg_decode(reg, EV_PER_EE, val);
+ }
if (!count) {
dev_err(dev, "GSI reports zero event rings supported\n");
return -EINVAL;
@@ -1915,12 +2066,14 @@ static int gsi_ring_setup(struct gsi *gsi)
/* Setup function for GSI. GSI firmware must be loaded and initialized */
int gsi_setup(struct gsi *gsi)
{
+ const struct reg *reg;
u32 val;
int ret;
/* Here is where we first touch the GSI hardware */
- val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
- if (!(val & ENABLED_FMASK)) {
+ reg = gsi_reg(gsi, GSI_STATUS);
+ val = ioread32(gsi->virt + reg_offset(reg));
+ if (!(val & reg_bit(reg, ENABLED))) {
dev_err(gsi->dev, "GSI has not been enabled\n");
return -EIO;
}
@@ -1934,7 +2087,8 @@ int gsi_setup(struct gsi *gsi)
goto err_irq_teardown;
/* Initialize the error log */
- iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
+ reg = gsi_reg(gsi, ERROR_LOG);
+ iowrite32(0, gsi->virt + reg_offset(reg));
ret = gsi_channel_setup(gsi);
if (ret)
@@ -2205,67 +2359,37 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
enum ipa_version version, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
- struct device *dev = &pdev->dev;
- struct resource *res;
- resource_size_t size;
- u32 adjust;
int ret;
gsi_validate_build();
- gsi->dev = dev;
+ gsi->dev = &pdev->dev;
gsi->version = version;
/* GSI uses NAPI on all channels. Create a dummy network device
* for the channel NAPI contexts to be associated with.
*/
init_dummy_netdev(&gsi->dummy_dev);
-
- /* Get GSI memory range and map it */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
- if (!res) {
- dev_err(dev, "DT error getting \"gsi\" memory property\n");
- return -ENODEV;
- }
-
- size = resource_size(res);
- if (res->start > U32_MAX || size > U32_MAX - res->start) {
- dev_err(dev, "DT memory resource \"gsi\" out of range\n");
- return -EINVAL;
- }
-
- /* Make sure we can make our pointer adjustment if necessary */
- adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
- if (res->start < adjust) {
- dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
- adjust);
- return -EINVAL;
- }
-
- gsi->virt_raw = ioremap(res->start, size);
- if (!gsi->virt_raw) {
- dev_err(dev, "unable to remap \"gsi\" memory\n");
- return -ENOMEM;
- }
- /* Most registers are accessed using an adjusted register range */
- gsi->virt = gsi->virt_raw - adjust;
-
init_completion(&gsi->completion);
+ ret = gsi_reg_init(gsi, pdev);
+ if (ret)
+ return ret;
+
ret = gsi_irq_init(gsi, pdev); /* No matching exit required */
if (ret)
- goto err_iounmap;
+ goto err_reg_exit;
ret = gsi_channel_init(gsi, count, data);
if (ret)
- goto err_iounmap;
+ goto err_reg_exit;
mutex_init(&gsi->mutex);
return 0;
-err_iounmap:
- iounmap(gsi->virt_raw);
+err_reg_exit:
+ gsi_reg_exit(gsi);
return ret;
}
@@ -2275,7 +2399,7 @@ void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
- iounmap(gsi->virt_raw);
+ gsi_reg_exit(gsi);
}
/* The maximum number of outstanding TREs on a channel. This limits
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 49dcadba4e0b..50bc80cb167c 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#ifndef _GSI_H_
#define _GSI_H_
@@ -140,8 +140,9 @@ struct gsi_evt_ring {
struct gsi {
struct device *dev; /* Same as IPA device */
enum ipa_version version;
- void __iomem *virt_raw; /* I/O mapped address range */
- void __iomem *virt; /* Adjusted for most registers */
+ void __iomem *virt; /* I/O mapped registers */
+ const struct regs *regs;
+
u32 irq;
u32 channel_count;
u32 evt_ring_count;
diff --git a/drivers/net/ipa/gsi_reg.c b/drivers/net/ipa/gsi_reg.c
new file mode 100644
index 000000000000..1412b67304c8
--- /dev/null
+++ b/drivers/net/ipa/gsi_reg.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include "gsi.h"
+#include "reg.h"
+#include "gsi_reg.h"
+
+/* Is this register ID valid for the current GSI version? */
+static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id)
+{
+ switch (reg_id) {
+ case INTER_EE_SRC_CH_IRQ_MSK:
+ case INTER_EE_SRC_EV_CH_IRQ_MSK:
+ case CH_C_CNTXT_0:
+ case CH_C_CNTXT_1:
+ case CH_C_CNTXT_2:
+ case CH_C_CNTXT_3:
+ case CH_C_QOS:
+ case CH_C_SCRATCH_0:
+ case CH_C_SCRATCH_1:
+ case CH_C_SCRATCH_2:
+ case CH_C_SCRATCH_3:
+ case EV_CH_E_CNTXT_0:
+ case EV_CH_E_CNTXT_1:
+ case EV_CH_E_CNTXT_2:
+ case EV_CH_E_CNTXT_3:
+ case EV_CH_E_CNTXT_4:
+ case EV_CH_E_CNTXT_8:
+ case EV_CH_E_CNTXT_9:
+ case EV_CH_E_CNTXT_10:
+ case EV_CH_E_CNTXT_11:
+ case EV_CH_E_CNTXT_12:
+ case EV_CH_E_CNTXT_13:
+ case EV_CH_E_SCRATCH_0:
+ case EV_CH_E_SCRATCH_1:
+ case CH_C_DOORBELL_0:
+ case EV_CH_E_DOORBELL_0:
+ case GSI_STATUS:
+ case CH_CMD:
+ case EV_CH_CMD:
+ case GENERIC_CMD:
+ case HW_PARAM_2:
+ case CNTXT_TYPE_IRQ:
+ case CNTXT_TYPE_IRQ_MSK:
+ case CNTXT_SRC_CH_IRQ:
+ case CNTXT_SRC_CH_IRQ_MSK:
+ case CNTXT_SRC_CH_IRQ_CLR:
+ case CNTXT_SRC_EV_CH_IRQ:
+ case CNTXT_SRC_EV_CH_IRQ_MSK:
+ case CNTXT_SRC_EV_CH_IRQ_CLR:
+ case CNTXT_SRC_IEOB_IRQ:
+ case CNTXT_SRC_IEOB_IRQ_MSK:
+ case CNTXT_SRC_IEOB_IRQ_CLR:
+ case CNTXT_GLOB_IRQ_STTS:
+ case CNTXT_GLOB_IRQ_EN:
+ case CNTXT_GLOB_IRQ_CLR:
+ case CNTXT_GSI_IRQ_STTS:
+ case CNTXT_GSI_IRQ_EN:
+ case CNTXT_GSI_IRQ_CLR:
+ case CNTXT_INTSET:
+ case ERROR_LOG:
+ case ERROR_LOG_CLR:
+ case CNTXT_SCRATCH_0:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id)
+{
+ if (WARN(!gsi_reg_id_valid(gsi, reg_id), "invalid reg %u\n", reg_id))
+ return NULL;
+
+ return reg(gsi->regs, reg_id);
+}
+
+static const struct regs *gsi_regs(struct gsi *gsi)
+{
+ switch (gsi->version) {
+ case IPA_VERSION_3_1:
+ return &gsi_regs_v3_1;
+
+ case IPA_VERSION_3_5_1:
+ return &gsi_regs_v3_5_1;
+
+ case IPA_VERSION_4_2:
+ return &gsi_regs_v4_0;
+
+ case IPA_VERSION_4_5:
+ case IPA_VERSION_4_7:
+ return &gsi_regs_v4_5;
+
+ case IPA_VERSION_4_9:
+ return &gsi_regs_v4_9;
+
+ case IPA_VERSION_4_11:
+ return &gsi_regs_v4_11;
+
+ default:
+ return NULL;
+ }
+}
+
+/* Sets gsi->virt and I/O maps the "gsi" memory range for registers */
+int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ resource_size_t size;
+
+ /* Get GSI memory range and map it */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
+ if (!res) {
+ dev_err(dev, "DT error getting \"gsi\" memory property\n");
+ return -ENODEV;
+ }
+
+ size = resource_size(res);
+ if (res->start > U32_MAX || size > U32_MAX - res->start) {
+ dev_err(dev, "DT memory resource \"gsi\" out of range\n");
+ return -EINVAL;
+ }
+
+ gsi->regs = gsi_regs(gsi);
+ if (!gsi->regs) {
+ dev_err(dev, "unsupported IPA version %u (?)\n", gsi->version);
+ return -EINVAL;
+ }
+
+ gsi->virt = ioremap(res->start, size);
+ if (!gsi->virt) {
+ dev_err(dev, "unable to remap \"gsi\" memory\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* Inverse of gsi_reg_init() */
+void gsi_reg_exit(struct gsi *gsi)
+{
+ iounmap(gsi->virt);
+ gsi->virt = NULL;
+ gsi->regs = NULL;
+}
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 3763359f208f..f62f0a5c653d 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#ifndef _GSI_REG_H_
#define _GSI_REG_H_
-/* === Only "gsi.c" should include this file === */
+/* === Only "gsi.c" and "gsi_reg.c" should include this file === */
#include <linux/bits.h>
@@ -38,29 +38,75 @@
* (though the actual limit is hardware-dependent).
*/
-/* GSI EE registers as a group are shifted downward by a fixed constant amount
- * for IPA versions 4.5 and beyond. This applies to all GSI registers we use
- * *except* the ones that disable inter-EE interrupts for channels and event
- * channels.
- *
- * The "raw" (not adjusted) GSI register range is mapped, and a pointer to
- * the mapped range is held in gsi->virt_raw. The inter-EE interrupt
- * registers are accessed using that pointer.
- *
- * Most registers are accessed using gsi->virt, which is a copy of the "raw"
- * pointer, adjusted downward by the fixed amount.
- */
-#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */
-
-/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */
-
-#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
- (0x0000c020 + 0x1000 * GSI_EE_AP)
-
-#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
- (0x0000c024 + 0x1000 * GSI_EE_AP)
+/* enum gsi_reg_id - GSI register IDs */
+enum gsi_reg_id {
+ INTER_EE_SRC_CH_IRQ_MSK, /* IPA v3.5+ */
+ INTER_EE_SRC_EV_CH_IRQ_MSK, /* IPA v3.5+ */
+ CH_C_CNTXT_0,
+ CH_C_CNTXT_1,
+ CH_C_CNTXT_2,
+ CH_C_CNTXT_3,
+ CH_C_QOS,
+ CH_C_SCRATCH_0,
+ CH_C_SCRATCH_1,
+ CH_C_SCRATCH_2,
+ CH_C_SCRATCH_3,
+ EV_CH_E_CNTXT_0,
+ EV_CH_E_CNTXT_1,
+ EV_CH_E_CNTXT_2,
+ EV_CH_E_CNTXT_3,
+ EV_CH_E_CNTXT_4,
+ EV_CH_E_CNTXT_8,
+ EV_CH_E_CNTXT_9,
+ EV_CH_E_CNTXT_10,
+ EV_CH_E_CNTXT_11,
+ EV_CH_E_CNTXT_12,
+ EV_CH_E_CNTXT_13,
+ EV_CH_E_SCRATCH_0,
+ EV_CH_E_SCRATCH_1,
+ CH_C_DOORBELL_0,
+ EV_CH_E_DOORBELL_0,
+ GSI_STATUS,
+ CH_CMD,
+ EV_CH_CMD,
+ GENERIC_CMD,
+ HW_PARAM_2, /* IPA v3.5.1+ */
+ HW_PARAM_4, /* IPA v5.0+ */
+ CNTXT_TYPE_IRQ,
+ CNTXT_TYPE_IRQ_MSK,
+ CNTXT_SRC_CH_IRQ,
+ CNTXT_SRC_CH_IRQ_MSK,
+ CNTXT_SRC_CH_IRQ_CLR,
+ CNTXT_SRC_EV_CH_IRQ,
+ CNTXT_SRC_EV_CH_IRQ_MSK,
+ CNTXT_SRC_EV_CH_IRQ_CLR,
+ CNTXT_SRC_IEOB_IRQ,
+ CNTXT_SRC_IEOB_IRQ_MSK,
+ CNTXT_SRC_IEOB_IRQ_CLR,
+ CNTXT_GLOB_IRQ_STTS,
+ CNTXT_GLOB_IRQ_EN,
+ CNTXT_GLOB_IRQ_CLR,
+ CNTXT_GSI_IRQ_STTS,
+ CNTXT_GSI_IRQ_EN,
+ CNTXT_GSI_IRQ_CLR,
+ CNTXT_INTSET,
+ ERROR_LOG,
+ ERROR_LOG_CLR,
+ CNTXT_SCRATCH_0,
+ GSI_REG_ID_COUNT, /* Last; not an ID */
+};
-/* All other register offsets are relative to gsi->virt */
+/* CH_C_CNTXT_0 register */
+enum gsi_reg_ch_c_cntxt_0_field_id {
+ CHTYPE_PROTOCOL,
+ CHTYPE_DIR,
+ CH_EE,
+ CHID,
+ CHTYPE_PROTOCOL_MSB, /* IPA v4.5-4.11 */
+ ERINDEX, /* Not IPA v5.0+ */
+ CHSTATE,
+ ELEMENT_SIZE,
+};
/** enum gsi_channel_type - CHTYPE_PROTOCOL field values in CH_C_CNTXT_0 */
enum gsi_channel_type {
@@ -76,155 +122,64 @@ enum gsi_channel_type {
GSI_CHANNEL_TYPE_11AD = 0x9,
};
-#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
- (0x0001c000 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
-#define CHTYPE_DIR_FMASK GENMASK(3, 3)
-#define EE_FMASK GENMASK(7, 4)
-#define CHID_FMASK GENMASK(12, 8)
-/* The next field is present for IPA v4.5 and above */
-#define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13)
-#define ERINDEX_FMASK GENMASK(18, 14)
-#define CHSTATE_FMASK GENMASK(23, 20)
-#define ELEMENT_SIZE_FMASK GENMASK(31, 24)
-
-/* Encoded value for CH_C_CNTXT_0 register channel protocol fields */
-static inline u32
-chtype_protocol_encoded(enum ipa_version version, enum gsi_channel_type type)
-{
- u32 val;
-
- val = u32_encode_bits(type, CHTYPE_PROTOCOL_FMASK);
- if (version < IPA_VERSION_4_5)
- return val;
-
- /* Encode upper bit(s) as well */
- type >>= hweight32(CHTYPE_PROTOCOL_FMASK);
- val |= u32_encode_bits(type, CHTYPE_PROTOCOL_MSB_FMASK);
-
- return val;
-}
-
-#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
- (0x0001c004 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-/* Encoded value for CH_C_CNTXT_1 register R_LENGTH field */
-static inline u32 r_length_encoded(enum ipa_version version, u32 length)
-{
- if (version < IPA_VERSION_4_9)
- return u32_encode_bits(length, GENMASK(15, 0));
- return u32_encode_bits(length, GENMASK(19, 0));
-}
-
-#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
- (0x0001c008 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_CH_C_CNTXT_3_OFFSET(ch) \
- (0x0001c00c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_CH_C_QOS_OFFSET(ch) \
- (0x0001c05c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-#define WRR_WEIGHT_FMASK GENMASK(3, 0)
-#define MAX_PREFETCH_FMASK GENMASK(8, 8)
-#define USE_DB_ENG_FMASK GENMASK(9, 9)
-/* The next field is only present for IPA v4.0, v4.1, and v4.2 */
-#define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10)
-/* The next two fields are present for IPA v4.5 and above */
-#define PREFETCH_MODE_FMASK GENMASK(13, 10)
-#define EMPTY_LVL_THRSHOLD_FMASK GENMASK(23, 16)
-/* The next field is present for IPA v4.9 and above */
-#define DB_IN_BYTES GENMASK(24, 24)
+/* CH_C_CNTXT_1 register */
+enum gsi_reg_ch_c_cntxt_1_field_id {
+ CH_R_LENGTH,
+ CH_ERINDEX, /* IPA v5.0+ */
+};
+
+/* CH_C_QOS register */
+enum gsi_reg_ch_c_qos_field_id {
+ WRR_WEIGHT,
+ MAX_PREFETCH,
+ USE_DB_ENG,
+ USE_ESCAPE_BUF_ONLY, /* IPA v4.0-4.2 */
+ PREFETCH_MODE, /* IPA v4.5+ */
+ EMPTY_LVL_THRSHOLD, /* IPA v4.5+ */
+ DB_IN_BYTES, /* IPA v4.9+ */
+ LOW_LATENCY_EN, /* IPA v5.0+ */
+};
/** enum gsi_prefetch_mode - PREFETCH_MODE field in CH_C_QOS */
enum gsi_prefetch_mode {
- GSI_USE_PREFETCH_BUFS = 0x0,
- GSI_ESCAPE_BUF_ONLY = 0x1,
- GSI_SMART_PREFETCH = 0x2,
- GSI_FREE_PREFETCH = 0x3,
+ USE_PREFETCH_BUFS = 0,
+ ESCAPE_BUF_ONLY = 1,
+ SMART_PREFETCH = 2,
+ FREE_PREFETCH = 3,
};
-#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
- (0x0001c060 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_CH_C_SCRATCH_1_OFFSET(ch) \
- (0x0001c064 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_CH_C_SCRATCH_2_OFFSET(ch) \
- (0x0001c068 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_CH_C_SCRATCH_3_OFFSET(ch) \
- (0x0001c06c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
-
-#define GSI_EV_CH_E_CNTXT_0_OFFSET(ev) \
- (0x0001d000 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
-#define EV_CHTYPE_FMASK GENMASK(3, 0)
-#define EV_EE_FMASK GENMASK(7, 4)
-#define EV_EVCHID_FMASK GENMASK(15, 8)
-#define EV_INTYPE_FMASK GENMASK(16, 16)
-#define EV_CHSTATE_FMASK GENMASK(23, 20)
-#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
-
-#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
- (0x0001d004 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-/* Encoded value for EV_CH_C_CNTXT_1 register EV_R_LENGTH field */
-static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
-{
- if (version < IPA_VERSION_4_9)
- return u32_encode_bits(length, GENMASK(15, 0));
- return u32_encode_bits(length, GENMASK(19, 0));
-}
-
-#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
- (0x0001d008 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_3_OFFSET(ev) \
- (0x0001d00c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_4_OFFSET(ev) \
- (0x0001d010 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_8_OFFSET(ev) \
- (0x0001d020 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-#define MODT_FMASK GENMASK(15, 0)
-#define MODC_FMASK GENMASK(23, 16)
-#define MOD_CNT_FMASK GENMASK(31, 24)
-
-#define GSI_EV_CH_E_CNTXT_9_OFFSET(ev) \
- (0x0001d024 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_10_OFFSET(ev) \
- (0x0001d028 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_11_OFFSET(ev) \
- (0x0001d02c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_12_OFFSET(ev) \
- (0x0001d030 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_CNTXT_13_OFFSET(ev) \
- (0x0001d034 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_SCRATCH_0_OFFSET(ev) \
- (0x0001d048 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
-
-#define GSI_EV_CH_E_SCRATCH_1_OFFSET(ev) \
- (0x0001d04c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
+/* EV_CH_E_CNTXT_0 register */
+enum gsi_reg_ch_c_ev_ch_e_cntxt_0_field_id {
+ EV_CHTYPE, /* enum gsi_channel_type */
+ EV_EE, /* enum gsi_ee_id; always GSI_EE_AP for us */
+ EV_EVCHID,
+ EV_INTYPE,
+ EV_CHSTATE,
+ EV_ELEMENT_SIZE,
+};
-#define GSI_CH_C_DOORBELL_0_OFFSET(ch) \
- (0x0001e000 + 0x4000 * GSI_EE_AP + 0x08 * (ch))
+/* EV_CH_E_CNTXT_1 register */
+enum gsi_reg_ev_ch_c_cntxt_1_field_id {
+ R_LENGTH,
+};
-#define GSI_EV_CH_E_DOORBELL_0_OFFSET(ev) \
- (0x0001e100 + 0x4000 * GSI_EE_AP + 0x08 * (ev))
+/* EV_CH_E_CNTXT_8 register */
+enum gsi_reg_ch_c_ev_ch_e_cntxt_8_field_id {
+ EV_MODT,
+ EV_MODC,
+ EV_MOD_CNT,
+};
-#define GSI_GSI_STATUS_OFFSET \
- (0x0001f000 + 0x4000 * GSI_EE_AP)
-#define ENABLED_FMASK GENMASK(0, 0)
+/* GSI_STATUS register */
+enum gsi_reg_gsi_status_field_id {
+ ENABLED,
+};
-#define GSI_CH_CMD_OFFSET \
- (0x0001f008 + 0x4000 * GSI_EE_AP)
-#define CH_CHID_FMASK GENMASK(7, 0)
-#define CH_OPCODE_FMASK GENMASK(31, 24)
+/* CH_CMD register */
+enum gsi_reg_gsi_ch_cmd_field_id {
+ CH_CHID,
+ CH_OPCODE,
+};
/** enum gsi_ch_cmd_opcode - CH_OPCODE field values in CH_CMD */
enum gsi_ch_cmd_opcode {
@@ -236,10 +191,11 @@ enum gsi_ch_cmd_opcode {
GSI_CH_DB_STOP = 0xb,
};
-#define GSI_EV_CH_CMD_OFFSET \
- (0x0001f010 + 0x4000 * GSI_EE_AP)
-#define EV_CHID_FMASK GENMASK(7, 0)
-#define EV_OPCODE_FMASK GENMASK(31, 24)
+/* EV_CH_CMD register */
+enum gsi_ev_ch_cmd_field_id {
+ EV_CHID,
+ EV_OPCODE,
+};
/** enum gsi_evt_cmd_opcode - EV_OPCODE field values in EV_CH_CMD */
enum gsi_evt_cmd_opcode {
@@ -248,12 +204,13 @@ enum gsi_evt_cmd_opcode {
GSI_EVT_DE_ALLOC = 0xa,
};
-#define GSI_GENERIC_CMD_OFFSET \
- (0x0001f018 + 0x4000 * GSI_EE_AP)
-#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
-#define GENERIC_CHID_FMASK GENMASK(9, 5)
-#define GENERIC_EE_FMASK GENMASK(13, 10)
-#define GENERIC_PARAMS_FMASK GENMASK(31, 24) /* IPA v4.11+ */
+/* GENERIC_CMD register */
+enum gsi_generic_cmd_field_id {
+ GENERIC_OPCODE,
+ GENERIC_CHID,
+ GENERIC_EE,
+ GENERIC_PARAMS, /* IPA v4.11+ */
+};
/** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */
enum gsi_generic_cmd_opcode {
@@ -264,22 +221,20 @@ enum gsi_generic_cmd_opcode {
GSI_GENERIC_QUERY_FLOW_CONTROL = 0x5, /* IPA v4.11+ */
};
-/* The next register is present for IPA v3.5.1 and above */
-#define GSI_GSI_HW_PARAM_2_OFFSET \
- (0x0001f040 + 0x4000 * GSI_EE_AP)
-#define IRAM_SIZE_FMASK GENMASK(2, 0)
-#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
-#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
-#define GSI_CH_PEND_TRANSLATE_FMASK GENMASK(13, 13)
-#define GSI_CH_FULL_LOGIC_FMASK GENMASK(14, 14)
-/* Fields below are present for IPA v4.0 and above */
-#define GSI_USE_SDMA_FMASK GENMASK(15, 15)
-#define GSI_SDMA_N_INT_FMASK GENMASK(18, 16)
-#define GSI_SDMA_MAX_BURST_FMASK GENMASK(26, 19)
-#define GSI_SDMA_N_IOVEC_FMASK GENMASK(29, 27)
-/* Fields below are present for IPA v4.2 and above */
-#define GSI_USE_RD_WR_ENG_FMASK GENMASK(30, 30)
-#define GSI_USE_INTER_EE_FMASK GENMASK(31, 31)
+/* HW_PARAM_2 register */ /* IPA v3.5.1+ */
+enum gsi_hw_param_2_field_id {
+ IRAM_SIZE,
+ NUM_CH_PER_EE,
+ NUM_EV_PER_EE, /* Not IPA v5.0+ */
+ GSI_CH_PEND_TRANSLATE,
+ GSI_CH_FULL_LOGIC,
+ GSI_USE_SDMA, /* IPA v4.0+ */
+ GSI_SDMA_N_INT, /* IPA v4.0+ */
+ GSI_SDMA_MAX_BURST, /* IPA v4.0+ */
+ GSI_SDMA_N_IOVEC, /* IPA v4.0+ */
+ GSI_USE_RD_WR_ENG, /* IPA v4.2+ */
+ GSI_USE_INTER_EE, /* IPA v4.2+ */
+};
/** enum gsi_iram_size - IRAM_SIZE field values in HW_PARAM_2 */
enum gsi_iram_size {
@@ -293,93 +248,66 @@ enum gsi_iram_size {
IRAM_SIZE_FOUR_KB = 0x5,
};
-/* IRQ condition for each type is cleared by writing type-specific register */
-#define GSI_CNTXT_TYPE_IRQ_OFFSET \
- (0x0001f080 + 0x4000 * GSI_EE_AP)
-#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
- (0x0001f088 + 0x4000 * GSI_EE_AP)
+/* HW_PARAM_4 register */ /* IPA v5.0+ */
+enum gsi_hw_param_4_field_id {
+ EV_PER_EE,
+ IRAM_PROTOCOL_COUNT,
+};
-/* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */
+/**
+ * enum gsi_irq_type_id: GSI IRQ types
+ * @GSI_CH_CTRL: Channel allocation, deallocation, etc.
+ * @GSI_EV_CTRL: Event ring allocation, deallocation, etc.
+ * @GSI_GLOB_EE: Global/general event
+ * @GSI_IEOB: Transfer (TRE) completion
+ * @GSI_INTER_EE_CH_CTRL: Remote-issued stop/reset (unused)
+ * @GSI_INTER_EE_EV_CTRL: Remote-issued event reset (unused)
+ * @GSI_GENERAL: General hardware event (bus error, etc.)
+ */
enum gsi_irq_type_id {
- GSI_CH_CTRL = 0x0, /* channel allocation, etc. */
- GSI_EV_CTRL = 0x1, /* event ring allocation, etc. */
- GSI_GLOB_EE = 0x2, /* global/general event */
- GSI_IEOB = 0x3, /* TRE completion */
- GSI_INTER_EE_CH_CTRL = 0x4, /* remote-issued stop/reset (unused) */
- GSI_INTER_EE_EV_CTRL = 0x5, /* remote-issued event reset (unused) */
- GSI_GENERAL = 0x6, /* general-purpose event */
+ GSI_CH_CTRL = BIT(0),
+ GSI_EV_CTRL = BIT(1),
+ GSI_GLOB_EE = BIT(2),
+ GSI_IEOB = BIT(3),
+ GSI_INTER_EE_CH_CTRL = BIT(4),
+ GSI_INTER_EE_EV_CTRL = BIT(5),
+ GSI_GENERAL = BIT(6),
+ /* IRQ types 7-31 (and their bit values) are reserved */
};
-#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
- (0x0001f090 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET \
- (0x0001f094 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET \
- (0x0001f098 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET \
- (0x0001f09c + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET \
- (0x0001f0a0 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET \
- (0x0001f0a4 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_IEOB_IRQ_OFFSET \
- (0x0001f0b0 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET \
- (0x0001f0b8 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET \
- (0x0001f0c0 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_GLOB_IRQ_STTS_OFFSET \
- (0x0001f100 + 0x4000 * GSI_EE_AP)
-#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
- (0x0001f108 + 0x4000 * GSI_EE_AP)
-#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
- (0x0001f110 + 0x4000 * GSI_EE_AP)
-/* Values here are bit positions in the GLOB_IRQ_* registers */
+/** enum gsi_global_irq_id: Global GSI interrupt events */
enum gsi_global_irq_id {
- ERROR_INT = 0x0,
- GP_INT1 = 0x1,
- GP_INT2 = 0x2,
- GP_INT3 = 0x3,
+ ERROR_INT = BIT(0),
+ GP_INT1 = BIT(1),
+ GP_INT2 = BIT(2),
+ GP_INT3 = BIT(3),
+ /* Global IRQ types 4-31 (and their bit values) are reserved */
};
-#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
- (0x0001f118 + 0x4000 * GSI_EE_AP)
-#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
- (0x0001f120 + 0x4000 * GSI_EE_AP)
-#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
- (0x0001f128 + 0x4000 * GSI_EE_AP)
-/* Values here are bit positions in the (general) GSI_IRQ_* registers */
-enum gsi_general_id {
- BREAK_POINT = 0x0,
- BUS_ERROR = 0x1,
- CMD_FIFO_OVRFLOW = 0x2,
- MCS_STACK_OVRFLOW = 0x3,
+/** enum gsi_general_irq_id: GSI general IRQ conditions */
+enum gsi_general_irq_id {
+ BREAK_POINT = BIT(0),
+ BUS_ERROR = BIT(1),
+ CMD_FIFO_OVRFLOW = BIT(2),
+ MCS_STACK_OVRFLOW = BIT(3),
+ /* General IRQ types 4-31 (and their bit values) are reserved */
};
-#define GSI_CNTXT_INTSET_OFFSET \
- (0x0001f180 + 0x4000 * GSI_EE_AP)
-#define INTYPE_FMASK GENMASK(0, 0)
-
-#define GSI_ERROR_LOG_OFFSET \
- (0x0001f200 + 0x4000 * GSI_EE_AP)
+/* CNTXT_INTSET register */
+enum gsi_cntxt_intset_field_id {
+ INTYPE,
+};
-/* Fields below are present for IPA v3.5.1 and above */
-#define ERR_ARG3_FMASK GENMASK(3, 0)
-#define ERR_ARG2_FMASK GENMASK(7, 4)
-#define ERR_ARG1_FMASK GENMASK(11, 8)
-#define ERR_CODE_FMASK GENMASK(15, 12)
-#define ERR_VIRT_IDX_FMASK GENMASK(23, 19)
-#define ERR_TYPE_FMASK GENMASK(27, 24)
-#define ERR_EE_FMASK GENMASK(31, 28)
+/* ERROR_LOG register */
+enum gsi_error_log_field_id {
+ ERR_ARG3,
+ ERR_ARG2,
+ ERR_ARG1,
+ ERR_CODE,
+ ERR_VIRT_IDX,
+ ERR_TYPE,
+ ERR_EE,
+};
/** enum gsi_err_code - ERR_CODE field values in EE_ERR_LOG */
enum gsi_err_code {
@@ -400,13 +328,11 @@ enum gsi_err_type {
GSI_ERR_TYPE_EVT = 0x3,
};
-#define GSI_ERROR_LOG_CLR_OFFSET \
- (0x0001f210 + 0x4000 * GSI_EE_AP)
-
-#define GSI_CNTXT_SCRATCH_0_OFFSET \
- (0x0001f400 + 0x4000 * GSI_EE_AP)
-#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
-#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
+/* CNTXT_SCRATCH_0 register */
+enum gsi_cntxt_scratch_0_field_id {
+ INTER_EE_RESULT,
+ GENERIC_EE_RESULT,
+};
/** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */
enum gsi_generic_ee_result {
@@ -419,4 +345,34 @@ enum gsi_generic_ee_result {
GENERIC_EE_NO_RESOURCES = 0x7,
};
+extern const struct regs gsi_regs_v3_1;
+extern const struct regs gsi_regs_v3_5_1;
+extern const struct regs gsi_regs_v4_0;
+extern const struct regs gsi_regs_v4_5;
+extern const struct regs gsi_regs_v4_9;
+extern const struct regs gsi_regs_v4_11;
+
+/**
+ * gsi_reg() - Return the structure describing a GSI register
+ * @gsi: GSI pointer
+ * @reg_id: GSI register ID
+ */
+const struct reg *gsi_reg(struct gsi *gsi, enum gsi_reg_id reg_id);
+
+/**
+ * gsi_reg_init() - Perform GSI register initialization
+ * @gsi: GSI pointer
+ * @pdev: GSI (IPA) platform device
+ *
+ * Initialize GSI registers, including looking up and I/O mapping
+ * the "gsi" memory space.
+ */
+int gsi_reg_init(struct gsi *gsi, struct platform_device *pdev);
+
+/**
+ * gsi_reg_exit() - Inverse of gsi_reg_init()
+ * @gsi: GSI pointer
+ */
+void gsi_reg_exit(struct gsi *gsi);
+
#endif /* _GSI_REG_H_ */
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 5372db58b5bd..f3355e040a9e 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -45,7 +45,6 @@ struct ipa_interrupt;
* @interrupt: IPA Interrupt information
* @uc_powered: true if power is active by proxy for microcontroller
* @uc_loaded: true after microcontroller has reported it's ready
- * @reg_addr: DMA address used for IPA register access
* @reg_virt: Virtual address used for IPA register access
* @regs: IPA register definitions
* @mem_addr: DMA address of IPA-local memory space
@@ -97,9 +96,8 @@ struct ipa {
bool uc_powered;
bool uc_loaded;
- dma_addr_t reg_addr;
void __iomem *reg_virt;
- const struct ipa_regs *regs;
+ const struct regs *regs;
dma_addr_t mem_addr;
void *mem_virt;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index bb3dfa9a2bc8..f1419fbd776c 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -94,11 +94,11 @@ struct ipa_cmd_register_write {
/* IPA_CMD_IP_PACKET_INIT */
struct ipa_cmd_ip_packet_init {
- u8 dest_endpoint;
+ u8 dest_endpoint; /* Full 8 bits used for IPA v5.0+ */
u8 reserved[7];
};
-/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
+/* Field mask for ipa_cmd_ip_packet_init dest_endpoint field (unused v5.0+) */
#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
/* IPA_CMD_DMA_SHARED_MEM */
@@ -157,9 +157,14 @@ static void ipa_cmd_validate_build(void)
BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
- /* Valid endpoint numbers must fit in the IP packet init command */
- BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
- IPA_ENDPOINT_MAX - 1);
+ /* Prior to IPA v5.0, we supported no more than 32 endpoints,
+ * and this was reflected in some 5-bit fields that held
+ * endpoint numbers. Starting with IPA v5.0, the widths of
+ * these fields were extended to 8 bits, meaning up to 256
+ * endpoints. If the driver claims to support more than
+ * that it's an error.
+ */
+ BUILD_BUG_ON(IPA_ENDPOINT_MAX - 1 > U8_MAX);
}
/* Validate a memory region holding a table */
@@ -282,7 +287,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
/* Check whether offsets passed to register_write are valid */
static bool ipa_cmd_register_write_valid(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
const char *name;
u32 offset;
@@ -290,8 +295,12 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* offset will fit in a register write IPA immediate command.
*/
if (ipa_table_hash_support(ipa)) {
- reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
- offset = ipa_reg_offset(reg);
+ if (ipa->version < IPA_VERSION_5_0)
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ else
+ reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
+
+ offset = reg_offset(reg);
name = "filter/route hash flush";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -305,7 +314,7 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* fits in the register write command field(s) that must hold it.
*/
reg = ipa_reg(ipa, ENDP_STATUS);
- offset = ipa_reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
+ offset = reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -486,8 +495,13 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_init;
- payload->dest_endpoint = u8_encode_bits(endpoint_id,
- IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
+ if (ipa->version < IPA_VERSION_5_0) {
+ payload->dest_endpoint =
+ u8_encode_bits(endpoint_id,
+ IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
+ } else {
+ payload->dest_endpoint = endpoint_id;
+ }
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
opcode);
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 136932464261..2ee80ed140b7 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -34,41 +34,181 @@
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
-/** enum ipa_status_opcode - status element opcode hardware values */
-enum ipa_status_opcode {
- IPA_STATUS_OPCODE_PACKET = 0x01,
- IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
- IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
- IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
+/** enum ipa_status_opcode - IPA status opcode field hardware values */
+enum ipa_status_opcode { /* *Not* a bitmask */
+ IPA_STATUS_OPCODE_PACKET = 1,
+ IPA_STATUS_OPCODE_NEW_RULE_PACKET = 2,
+ IPA_STATUS_OPCODE_DROPPED_PACKET = 4,
+ IPA_STATUS_OPCODE_SUSPENDED_PACKET = 8,
+ IPA_STATUS_OPCODE_LOG = 16,
+ IPA_STATUS_OPCODE_DCMP = 32,
+ IPA_STATUS_OPCODE_PACKET_2ND_PASS = 64,
};
-/** enum ipa_status_exception - status element exception type */
-enum ipa_status_exception {
+/** enum ipa_status_exception - IPA status exception field hardware values */
+enum ipa_status_exception { /* *Not* a bitmask */
/* 0 means no exception */
- IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
+ IPA_STATUS_EXCEPTION_DEAGGR = 1,
+ IPA_STATUS_EXCEPTION_IPTYPE = 4,
+ IPA_STATUS_EXCEPTION_PACKET_LENGTH = 8,
+ IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 16,
+ IPA_STATUS_EXCEPTION_SW_FILTER = 32,
+ IPA_STATUS_EXCEPTION_NAT = 64, /* IPv4 */
+ IPA_STATUS_EXCEPTION_IPV6_CONN_TRACK = 64, /* IPv6 */
+ IPA_STATUS_EXCEPTION_UC = 128,
+ IPA_STATUS_EXCEPTION_INVALID_ENDPOINT = 129,
+ IPA_STATUS_EXCEPTION_HEADER_INSERT = 136,
+ IPA_STATUS_EXCEPTION_CHEKCSUM = 229,
};
-/* Status element provided by hardware */
-struct ipa_status {
- u8 opcode; /* enum ipa_status_opcode */
- u8 exception; /* enum ipa_status_exception */
- __le16 mask;
- __le16 pkt_len;
- u8 endp_src_idx;
- u8 endp_dst_idx;
- __le32 metadata;
- __le32 flags1;
- __le64 flags2;
- __le32 flags3;
- __le32 flags4;
+/** enum ipa_status_mask - IPA status mask field bitmask hardware values */
+enum ipa_status_mask {
+ IPA_STATUS_MASK_FRAG_PROCESS = BIT(0),
+ IPA_STATUS_MASK_FILT_PROCESS = BIT(1),
+ IPA_STATUS_MASK_NAT_PROCESS = BIT(2),
+ IPA_STATUS_MASK_ROUTE_PROCESS = BIT(3),
+ IPA_STATUS_MASK_TAG_VALID = BIT(4),
+ IPA_STATUS_MASK_FRAGMENT = BIT(5),
+ IPA_STATUS_MASK_FIRST_FRAGMENT = BIT(6),
+ IPA_STATUS_MASK_V4 = BIT(7),
+ IPA_STATUS_MASK_CKSUM_PROCESS = BIT(8),
+ IPA_STATUS_MASK_AGGR_PROCESS = BIT(9),
+ IPA_STATUS_MASK_DEST_EOT = BIT(10),
+ IPA_STATUS_MASK_DEAGGR_PROCESS = BIT(11),
+ IPA_STATUS_MASK_DEAGG_FIRST = BIT(12),
+ IPA_STATUS_MASK_SRC_EOT = BIT(13),
+ IPA_STATUS_MASK_PREV_EOT = BIT(14),
+ IPA_STATUS_MASK_BYTE_LIMIT = BIT(15),
};
-/* Field masks for struct ipa_status structure fields */
-#define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
-#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
-#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
-#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
-#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
+/* Special IPA filter/router rule field value indicating "rule miss" */
+#define IPA_STATUS_RULE_MISS 0x3ff /* 10-bit filter/router rule fields */
+
+/** The IPA status nat_type field uses enum ipa_nat_type hardware values */
+
+/* enum ipa_status_field_id - IPA packet status structure field identifiers */
+enum ipa_status_field_id {
+ STATUS_OPCODE, /* enum ipa_status_opcode */
+ STATUS_EXCEPTION, /* enum ipa_status_exception */
+ STATUS_MASK, /* enum ipa_status_mask (bitmask) */
+ STATUS_LENGTH,
+ STATUS_SRC_ENDPOINT,
+ STATUS_DST_ENDPOINT,
+ STATUS_METADATA,
+ STATUS_FILTER_LOCAL, /* Boolean */
+ STATUS_FILTER_HASH, /* Boolean */
+ STATUS_FILTER_GLOBAL, /* Boolean */
+ STATUS_FILTER_RETAIN, /* Boolean */
+ STATUS_FILTER_RULE_INDEX,
+ STATUS_ROUTER_LOCAL, /* Boolean */
+ STATUS_ROUTER_HASH, /* Boolean */
+ STATUS_UCP, /* Boolean */
+ STATUS_ROUTER_TABLE,
+ STATUS_ROUTER_RULE_INDEX,
+ STATUS_NAT_HIT, /* Boolean */
+ STATUS_NAT_INDEX,
+ STATUS_NAT_TYPE, /* enum ipa_nat_type */
+ STATUS_TAG_LOW32, /* Low-order 32 bits of 48-bit tag */
+ STATUS_TAG_HIGH16, /* High-order 16 bits of 48-bit tag */
+ STATUS_SEQUENCE,
+ STATUS_TIME_OF_DAY,
+ STATUS_HEADER_LOCAL, /* Boolean */
+ STATUS_HEADER_OFFSET,
+ STATUS_FRAG_HIT, /* Boolean */
+ STATUS_FRAG_RULE_INDEX,
+};
+
+/* Size in bytes of an IPA packet status structure */
+#define IPA_STATUS_SIZE sizeof(__le32[4])
+
+/* IPA status structure decoder; looks up field values for a structure */
+static u32 ipa_status_extract(struct ipa *ipa, const void *data,
+ enum ipa_status_field_id field)
+{
+ enum ipa_version version = ipa->version;
+ const __le32 *word = data;
+
+ switch (field) {
+ case STATUS_OPCODE:
+ return le32_get_bits(word[0], GENMASK(7, 0));
+ case STATUS_EXCEPTION:
+ return le32_get_bits(word[0], GENMASK(15, 8));
+ case STATUS_MASK:
+ return le32_get_bits(word[0], GENMASK(31, 16));
+ case STATUS_LENGTH:
+ return le32_get_bits(word[1], GENMASK(15, 0));
+ case STATUS_SRC_ENDPOINT:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[1], GENMASK(20, 16));
+ return le32_get_bits(word[1], GENMASK(23, 16));
+ /* Status word 1, bits 21-23 are reserved (not IPA v5.0+) */
+ /* Status word 1, bits 24-26 are reserved (IPA v5.0+) */
+ case STATUS_DST_ENDPOINT:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[1], GENMASK(28, 24));
+ return le32_get_bits(word[7], GENMASK(23, 16));
+ /* Status word 1, bits 29-31 are reserved */
+ case STATUS_METADATA:
+ return le32_to_cpu(word[2]);
+ case STATUS_FILTER_LOCAL:
+ return le32_get_bits(word[3], GENMASK(0, 0));
+ case STATUS_FILTER_HASH:
+ return le32_get_bits(word[3], GENMASK(1, 1));
+ case STATUS_FILTER_GLOBAL:
+ return le32_get_bits(word[3], GENMASK(2, 2));
+ case STATUS_FILTER_RETAIN:
+ return le32_get_bits(word[3], GENMASK(3, 3));
+ case STATUS_FILTER_RULE_INDEX:
+ return le32_get_bits(word[3], GENMASK(13, 4));
+ /* ROUTER_TABLE is in word 3, bits 14-21 (IPA v5.0+) */
+ case STATUS_ROUTER_LOCAL:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(14, 14));
+ return le32_get_bits(word[1], GENMASK(27, 27));
+ case STATUS_ROUTER_HASH:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(15, 15));
+ return le32_get_bits(word[1], GENMASK(28, 28));
+ case STATUS_UCP:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(16, 16));
+ return le32_get_bits(word[7], GENMASK(31, 31));
+ case STATUS_ROUTER_TABLE:
+ if (version < IPA_VERSION_5_0)
+ return le32_get_bits(word[3], GENMASK(21, 17));
+ return le32_get_bits(word[3], GENMASK(21, 14));
+ case STATUS_ROUTER_RULE_INDEX:
+ return le32_get_bits(word[3], GENMASK(31, 22));
+ case STATUS_NAT_HIT:
+ return le32_get_bits(word[4], GENMASK(0, 0));
+ case STATUS_NAT_INDEX:
+ return le32_get_bits(word[4], GENMASK(13, 1));
+ case STATUS_NAT_TYPE:
+ return le32_get_bits(word[4], GENMASK(15, 14));
+ case STATUS_TAG_LOW32:
+ return le32_get_bits(word[4], GENMASK(31, 16)) |
+ (le32_get_bits(word[5], GENMASK(15, 0)) << 16);
+ case STATUS_TAG_HIGH16:
+ return le32_get_bits(word[5], GENMASK(31, 16));
+ case STATUS_SEQUENCE:
+ return le32_get_bits(word[6], GENMASK(7, 0));
+ case STATUS_TIME_OF_DAY:
+ return le32_get_bits(word[6], GENMASK(31, 8));
+ case STATUS_HEADER_LOCAL:
+ return le32_get_bits(word[7], GENMASK(0, 0));
+ case STATUS_HEADER_OFFSET:
+ return le32_get_bits(word[7], GENMASK(10, 1));
+ case STATUS_FRAG_HIT:
+ return le32_get_bits(word[7], GENMASK(11, 11));
+ case STATUS_FRAG_RULE_INDEX:
+ return le32_get_bits(word[7], GENMASK(15, 12));
+ /* Status word 7, bits 16-30 are reserved */
+ /* Status word 7, bit 31 is reserved (not IPA v5.0+) */
+ default:
+ WARN(true, "%s: bad field_id %u\n", __func__, field);
+ return 0;
+ }
+}
/* Compute the aggregation size value to use for a given buffer size */
static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
@@ -101,7 +241,7 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
if (!data->toward_ipa) {
const struct ipa_endpoint_rx *rx_config;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 buffer_size;
u32 aggr_size;
u32 limit;
@@ -164,7 +304,7 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
rx_config->aggr_hard_limit);
reg = ipa_reg(ipa, ENDP_INIT_AGGR);
- limit = ipa_reg_field_max(reg, BYTE_LIMIT);
+ limit = reg_field_max(reg, BYTE_LIMIT);
if (aggr_size > limit) {
dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
data->endpoint_id, aggr_size, limit);
@@ -307,7 +447,7 @@ static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 field_id;
u32 offset;
bool state;
@@ -320,11 +460,11 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
WARN_ON(ipa->version >= IPA_VERSION_4_0);
reg = ipa_reg(ipa, ENDP_INIT_CTRL);
- offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ offset = reg_n_offset(reg, endpoint->endpoint_id);
val = ioread32(ipa->reg_virt + offset);
field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
- mask = ipa_reg_bit(reg, field_id);
+ mask = reg_bit(reg, field_id);
state = !!(val & mask);
@@ -353,13 +493,13 @@ static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
- val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
return !!(val & BIT(endpoint_id % 32));
}
@@ -370,12 +510,12 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
u32 mask = BIT(endpoint_id % 32);
struct ipa *ipa = endpoint->ipa;
u32 unit = endpoint_id / 32;
- const struct ipa_reg *reg;
+ const struct reg *reg;
WARN_ON(!test_bit(endpoint_id, ipa->available));
reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
- iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit));
}
/**
@@ -473,7 +613,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
struct ipa_endpoint *endpoint;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
/* We only reset modem TX endpoints */
@@ -482,7 +622,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
continue;
reg = ipa_reg(ipa, ENDP_STATUS);
- offset = ipa_reg_n_offset(reg, endpoint_id);
+ offset = reg_n_offset(reg, endpoint_id);
/* Value written is 0, and all bits are updated. That
* means status is disabled on the endpoint, and as a
@@ -505,7 +645,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
enum ipa_cs_offload_en enabled;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_CFG);
@@ -518,7 +658,7 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
/* Checksum header offset is in 4-byte units */
off = sizeof(struct rmnet_map_header) / sizeof(u32);
- val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
+ val |= reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_UL
@@ -531,26 +671,26 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
} else {
enabled = IPA_CS_OFFLOAD_NONE;
}
- val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
+ val |= reg_encode(reg, CS_OFFLOAD_EN, enabled);
/* CS_GEN_QMB_MASTER_SEL is 0 */
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (!endpoint->toward_ipa)
return;
reg = ipa_reg(ipa, ENDP_INIT_NAT);
- val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS);
+ val = reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static u32
@@ -576,13 +716,13 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
static u32 ipa_header_size_encode(enum ipa_version version,
- const struct ipa_reg *reg, u32 header_size)
+ const struct reg *reg, u32 header_size)
{
- u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
+ u32 field_max = reg_field_max(reg, HDR_LEN);
u32 val;
/* We know field_max can be used as a mask (2^n - 1) */
- val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
+ val = reg_encode(reg, HDR_LEN, header_size & field_max);
if (version < IPA_VERSION_4_5) {
WARN_ON(header_size > field_max);
return val;
@@ -590,21 +730,21 @@ static u32 ipa_header_size_encode(enum ipa_version version,
/* IPA v4.5 adds a few more most-significant bits */
header_size >>= hweight32(field_max);
- WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
- val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
+ WARN_ON(header_size > reg_field_max(reg, HDR_LEN_MSB));
+ val |= reg_encode(reg, HDR_LEN_MSB, header_size);
return val;
}
/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
static u32 ipa_metadata_offset_encode(enum ipa_version version,
- const struct ipa_reg *reg, u32 offset)
+ const struct reg *reg, u32 offset)
{
- u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
+ u32 field_max = reg_field_max(reg, HDR_OFST_METADATA);
u32 val;
/* We know field_max can be used as a mask (2^n - 1) */
- val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
+ val = reg_encode(reg, HDR_OFST_METADATA, offset);
if (version < IPA_VERSION_4_5) {
WARN_ON(offset > field_max);
return val;
@@ -612,8 +752,8 @@ static u32 ipa_metadata_offset_encode(enum ipa_version version,
/* IPA v4.5 adds a few more most-significant bits */
offset >>= hweight32(field_max);
- WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
- val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
+ WARN_ON(offset > reg_field_max(reg, HDR_OFST_METADATA_MSB));
+ val |= reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
return val;
}
@@ -643,7 +783,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_HDR);
@@ -666,13 +806,13 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
off = offsetof(struct rmnet_map_header, pkt_len);
/* Upper bits are stored in HDR_EXT with IPA v4.5 */
if (version >= IPA_VERSION_4_5)
- off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ off &= reg_field_max(reg, HDR_OFST_PKT_SIZE);
- val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
- val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
+ val |= reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
+ val |= reg_encode(reg, HDR_OFST_PKT_SIZE, off);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
- val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
+ val |= reg_bit(reg, HDR_OFST_METADATA_VALID);
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
@@ -680,7 +820,7 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
@@ -688,13 +828,13 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
u32 pad_align = endpoint->config.rx.pad_align;
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
if (endpoint->config.qmap) {
/* We have a header, so we must specify its endianness */
- val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */
+ val |= reg_bit(reg, HDR_ENDIANNESS); /* big endian */
/* A QMAP header contains a 6 bit pad field at offset 0.
* The RMNet driver assumes this field is meaningful in
@@ -704,16 +844,16 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
* (although 0) should be ignored.
*/
if (!endpoint->toward_ipa) {
- val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
+ val |= reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
/* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
- val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
+ val |= reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
}
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
if (!endpoint->toward_ipa)
- val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
+ val |= reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
/* IPA v4.5 adds some most-significant bits to a few fields,
* two of which are defined in the HDR (not HDR_EXT) register.
@@ -721,25 +861,25 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
if (endpoint->config.qmap && !endpoint->toward_ipa) {
- u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ u32 mask = reg_field_max(reg, HDR_OFST_PKT_SIZE);
u32 off; /* Field offset within header */
off = offsetof(struct rmnet_map_header, pkt_len);
/* Low bits are in the ENDP_INIT_HDR register */
off >>= hweight32(mask);
- val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
+ val |= reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
u32 offset;
@@ -747,7 +887,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
return; /* Register not valid for TX endpoints */
reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
- offset = ipa_reg_n_offset(reg, endpoint_id);
+ offset = reg_n_offset(reg, endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->config.qmap)
@@ -759,7 +899,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -771,82 +911,90 @@ static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
- val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
- val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
+ val = reg_encode(reg, ENDP_MODE, IPA_DMA);
+ val |= reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
} else {
- val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
+ val = reg_encode(reg, ENDP_MODE, IPA_BASIC);
}
/* All other bits unspecified (and 0) */
- offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ offset = reg_n_offset(reg, endpoint->endpoint_id);
iowrite32(val, ipa->reg_virt + offset);
}
-/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
- * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
- * they're configured to have granularity 100 usec and 1 msec, respectively.
- *
- * The return value is the positive or negative Qtime value to use to
- * express the (microsecond) time provided. A positive return value
- * means pulse generator 0 can be used; otherwise use pulse generator 1.
+/* For IPA v4.5+, times are expressed using Qtime. A time is represented
+ * at one of several available granularities, which are configured in
+ * ipa_qtime_config(). Three (or, starting with IPA v5.0, four) pulse
+ * generators are set up with different "tick" periods. A Qtime value
+ * encodes a tick count along with an indication of a pulse generator
+ * (which has a fixed tick period). Two pulse generators are always
+ * available to the AP; a third is available starting with IPA v5.0.
+ * This function determines which pulse generator most accurately
+ * represents the time period provided, and returns the tick count to
+ * use to represent that time.
*/
-static int ipa_qtime_val(u32 microseconds, u32 max)
-{
- u32 val;
-
- /* Use 100 microsecond granularity if possible */
- val = DIV_ROUND_CLOSEST(microseconds, 100);
- if (val <= max)
- return (int)val;
-
- /* Have to use pulse generator 1 (millisecond granularity) */
- val = DIV_ROUND_CLOSEST(microseconds, 1000);
- WARN_ON(val > max);
+static u32
+ipa_qtime_val(struct ipa *ipa, u32 microseconds, u32 max, u32 *select)
+{
+ u32 which = 0;
+ u32 ticks;
+
+ /* Pulse generator 0 has 100 microsecond granularity */
+ ticks = DIV_ROUND_CLOSEST(microseconds, 100);
+ if (ticks <= max)
+ goto out;
+
+ /* Pulse generator 1 has millisecond granularity */
+ which = 1;
+ ticks = DIV_ROUND_CLOSEST(microseconds, 1000);
+ if (ticks <= max)
+ goto out;
+
+ if (ipa->version >= IPA_VERSION_5_0) {
+ /* Pulse generator 2 has 10 millisecond granularity */
+ which = 2;
+ ticks = DIV_ROUND_CLOSEST(microseconds, 100);
+ }
+ WARN_ON(ticks > max);
+out:
+ *select = which;
- return (int)-val;
+ return ticks;
}
/* Encode the aggregation timer limit (microseconds) based on IPA version */
-static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
+static u32 aggr_time_limit_encode(struct ipa *ipa, const struct reg *reg,
u32 microseconds)
{
+ u32 ticks;
u32 max;
- u32 val;
if (!microseconds)
return 0; /* Nothing to compute if time limit is 0 */
- max = ipa_reg_field_max(reg, TIME_LIMIT);
+ max = reg_field_max(reg, TIME_LIMIT);
if (ipa->version >= IPA_VERSION_4_5) {
- u32 gran_sel;
- int ret;
-
- /* Compute the Qtime limit value to use */
- ret = ipa_qtime_val(microseconds, max);
- if (ret < 0) {
- val = -ret;
- gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
- } else {
- val = ret;
- gran_sel = 0;
- }
+ u32 select;
- return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
+ ticks = ipa_qtime_val(ipa, microseconds, max, &select);
+
+ return reg_encode(reg, AGGR_GRAN_SEL, select) |
+ reg_encode(reg, TIME_LIMIT, ticks);
}
/* We program aggregation granularity in ipa_hardware_config() */
- val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
- WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
+ ticks = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
+ WARN(ticks > max, "aggr_time_limit too large (%u > %u usec)\n",
microseconds, max * IPA_AGGR_GRANULARITY);
- return ipa_reg_encode(reg, TIME_LIMIT, val);
+ return reg_encode(reg, TIME_LIMIT, ticks);
}
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_INIT_AGGR);
@@ -857,13 +1005,13 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
u32 limit;
rx_config = &endpoint->config.rx;
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
- val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
+ val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
+ val |= reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
buffer_size = rx_config->buffer_size;
limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
rx_config->aggr_hard_limit);
- val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
+ val |= reg_encode(reg, BYTE_LIMIT, limit);
limit = rx_config->aggr_time_limit;
val |= aggr_time_limit_encode(ipa, reg, limit);
@@ -871,20 +1019,20 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
/* AGGR_PKT_LIMIT is 0 (unlimited) */
if (rx_config->aggr_close_eof)
- val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
+ val |= reg_bit(reg, SW_EOF_ACTIVE);
} else {
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
- val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
+ val |= reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
+ val |= reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
- val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
+ val |= reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
/* other fields ignored */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
/* The head-of-line blocking timer is defined as a tick count. For
@@ -895,7 +1043,7 @@ static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
* Return the encoded value representing the timeout period provided
* that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
*/
-static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
+static u32 hol_block_timer_encode(struct ipa *ipa, const struct reg *reg,
u32 microseconds)
{
u32 width;
@@ -909,21 +1057,14 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
return 0; /* Nothing to compute if timer period is 0 */
if (ipa->version >= IPA_VERSION_4_5) {
- u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
- u32 gran_sel;
- int ret;
-
- /* Compute the Qtime limit value to use */
- ret = ipa_qtime_val(microseconds, max);
- if (ret < 0) {
- val = -ret;
- gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
- } else {
- val = ret;
- gran_sel = 0;
- }
+ u32 max = reg_field_max(reg, TIMER_LIMIT);
+ u32 select;
+ u32 ticks;
+
+ ticks = ipa_qtime_val(ipa, microseconds, max, &select);
- return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
+ return reg_encode(reg, TIMER_GRAN_SEL, 1) |
+ reg_encode(reg, TIMER_LIMIT, ticks);
}
/* Use 64 bit arithmetic to avoid overflow */
@@ -931,11 +1072,11 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
/* We still need the result to fit into the field */
- WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
+ WARN_ON(ticks > reg_field_max(reg, TIMER_BASE_VALUE));
/* IPA v3.5.1 through v4.1 just record the tick count */
if (ipa->version < IPA_VERSION_4_2)
- return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
+ return reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
/* For IPA v4.2, the tick count is represented by base and
* scale fields within the 32-bit timer register, where:
@@ -946,7 +1087,7 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
* such that high bit is included.
*/
high = fls(ticks); /* 1..32 (or warning above) */
- width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
+ width = hweight32(reg_fmask(reg, TIMER_BASE_VALUE));
scale = high > width ? high - width : 0;
if (scale) {
/* If we're scaling, round up to get a closer result */
@@ -956,8 +1097,8 @@ static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
scale++;
}
- val = ipa_reg_encode(reg, TIMER_SCALE, scale);
- val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
+ val = reg_encode(reg, TIMER_SCALE, scale);
+ val |= reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
return val;
}
@@ -968,14 +1109,14 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
val = hol_block_timer_encode(ipa, reg, microseconds);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void
@@ -983,13 +1124,13 @@ ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
- offset = ipa_reg_n_offset(reg, endpoint_id);
- val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
+ offset = reg_n_offset(reg, endpoint_id);
+ val = enable ? reg_bit(reg, HOL_BLOCK_EN) : 0;
iowrite32(val, ipa->reg_virt + offset);
@@ -1030,7 +1171,7 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
if (!endpoint->toward_ipa)
@@ -1042,7 +1183,7 @@ static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
/* MAX_PACKET_LEN is 0 (not enforced) */
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
@@ -1050,20 +1191,20 @@ static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
u32 resource_group = endpoint->config.resource_group;
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
- val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
+ val = reg_encode(reg, ENDP_RSRC_GRP, resource_group);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (!endpoint->toward_ipa)
@@ -1072,14 +1213,14 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
reg = ipa_reg(ipa, ENDP_INIT_SEQ);
/* Low-order byte configures primary packet processing */
- val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
+ val = reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
/* Second byte (if supported) configures replicated packet processing */
if (ipa->version < IPA_VERSION_4_5)
- val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
- endpoint->config.tx.seq_rep_type);
+ val |= reg_encode(reg, SEQ_REP_TYPE,
+ endpoint->config.tx.seq_rep_type);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
/**
@@ -1129,12 +1270,12 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val = 0;
reg = ipa_reg(ipa, ENDP_STATUS);
if (endpoint->config.status_enable) {
- val |= ipa_reg_bit(reg, STATUS_EN);
+ val |= reg_bit(reg, STATUS_EN);
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
@@ -1142,16 +1283,15 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
- val |= ipa_reg_encode(reg, STATUS_ENDP,
- status_endpoint_id);
+ val |= reg_encode(reg, STATUS_ENDP, status_endpoint_id);
}
- /* STATUS_LOCATION is 0, meaning status element precedes
- * packet (not present for IPA v4.5+)
+ /* STATUS_LOCATION is 0, meaning IPA packet status
+ * precedes the packet (not present for IPA v4.5+)
*/
/* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id));
}
static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
@@ -1302,8 +1442,8 @@ static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
return skb != NULL;
}
-/* The format of a packet status element is the same for several status
- * types (opcodes). Other types aren't currently supported.
+ /* The format of an IPA packet status structure is the same for several
+ * status types (opcodes). Other types aren't currently supported.
*/
static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
{
@@ -1318,31 +1458,34 @@ static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
}
}
-static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
- const struct ipa_status *status)
+static bool
+ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, const void *data)
{
+ struct ipa *ipa = endpoint->ipa;
+ enum ipa_status_opcode opcode;
u32 endpoint_id;
- if (!ipa_status_format_packet(status->opcode))
+ opcode = ipa_status_extract(ipa, data, STATUS_OPCODE);
+ if (!ipa_status_format_packet(opcode))
return true;
- if (!status->pkt_len)
- return true;
- endpoint_id = u8_get_bits(status->endp_dst_idx,
- IPA_STATUS_DST_IDX_FMASK);
+
+ endpoint_id = ipa_status_extract(ipa, data, STATUS_DST_ENDPOINT);
if (endpoint_id != endpoint->endpoint_id)
return true;
return false; /* Don't skip this packet, process it */
}
-static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
- const struct ipa_status *status)
+static bool
+ipa_endpoint_status_tag_valid(struct ipa_endpoint *endpoint, const void *data)
{
struct ipa_endpoint *command_endpoint;
+ enum ipa_status_mask status_mask;
struct ipa *ipa = endpoint->ipa;
u32 endpoint_id;
- if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
+ status_mask = ipa_status_extract(ipa, data, STATUS_MASK);
+ if (!status_mask)
return false; /* No valid tag */
/* The status contains a valid tag. We know the packet was sent to
@@ -1350,8 +1493,7 @@ static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
* If the packet came from the AP->command TX endpoint we know
* this packet was sent as part of the pipeline clear process.
*/
- endpoint_id = u8_get_bits(status->endp_src_idx,
- IPA_STATUS_SRC_IDX_FMASK);
+ endpoint_id = ipa_status_extract(ipa, data, STATUS_SRC_ENDPOINT);
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
if (endpoint_id == command_endpoint->endpoint_id) {
complete(&ipa->completion);
@@ -1365,23 +1507,26 @@ static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
}
/* Return whether the status indicates the packet should be dropped */
-static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
- const struct ipa_status *status)
+static bool
+ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, const void *data)
{
- u32 val;
+ enum ipa_status_exception exception;
+ struct ipa *ipa = endpoint->ipa;
+ u32 rule;
/* If the status indicates a tagged transfer, we'll drop the packet */
- if (ipa_endpoint_status_tag(endpoint, status))
+ if (ipa_endpoint_status_tag_valid(endpoint, data))
return true;
/* Deaggregation exceptions we drop; all other types we consume */
- if (status->exception)
- return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
+ exception = ipa_status_extract(ipa, data, STATUS_EXCEPTION);
+ if (exception)
+ return exception == IPA_STATUS_EXCEPTION_DEAGGR;
/* Drop the packet if it fails to match a routing rule; otherwise no */
- val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
+ rule = ipa_status_extract(ipa, data, STATUS_ROUTER_RULE_INDEX);
- return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
+ return rule == IPA_STATUS_RULE_MISS;
}
static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
@@ -1390,47 +1535,46 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
u32 buffer_size = endpoint->config.rx.buffer_size;
void *data = page_address(page) + NET_SKB_PAD;
u32 unused = buffer_size - total_len;
+ struct ipa *ipa = endpoint->ipa;
u32 resid = total_len;
while (resid) {
- const struct ipa_status *status = data;
+ u32 length;
u32 align;
u32 len;
- if (resid < sizeof(*status)) {
+ if (resid < IPA_STATUS_SIZE) {
dev_err(&endpoint->ipa->pdev->dev,
"short message (%u bytes < %zu byte status)\n",
- resid, sizeof(*status));
+ resid, IPA_STATUS_SIZE);
break;
}
/* Skip over status packets that lack packet data */
- if (ipa_endpoint_status_skip(endpoint, status)) {
- data += sizeof(*status);
- resid -= sizeof(*status);
+ length = ipa_status_extract(ipa, data, STATUS_LENGTH);
+ if (!length || ipa_endpoint_status_skip(endpoint, data)) {
+ data += IPA_STATUS_SIZE;
+ resid -= IPA_STATUS_SIZE;
continue;
}
/* Compute the amount of buffer space consumed by the packet,
- * including the status element. If the hardware is configured
- * to pad packet data to an aligned boundary, account for that.
+ * including the status. If the hardware is configured to
+ * pad packet data to an aligned boundary, account for that.
* And if checksum offload is enabled a trailer containing
* computed checksum information will be appended.
*/
align = endpoint->config.rx.pad_align ? : 1;
- len = le16_to_cpu(status->pkt_len);
- len = sizeof(*status) + ALIGN(len, align);
+ len = IPA_STATUS_SIZE + ALIGN(length, align);
if (endpoint->config.checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
- if (!ipa_endpoint_status_drop(endpoint, status)) {
+ if (!ipa_endpoint_status_drop(endpoint, data)) {
void *data2;
u32 extra;
- u32 len2;
/* Client receives only packet data (no status) */
- data2 = data + sizeof(*status);
- len2 = le16_to_cpu(status->pkt_len);
+ data2 = data + IPA_STATUS_SIZE;
/* Have the true size reflect the extra unused space in
* the original receive buffer. Distribute the "cost"
@@ -1438,7 +1582,7 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
* buffer.
*/
extra = DIV_ROUND_CLOSEST(unused * len, total_len);
- ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
+ ipa_endpoint_skb_copy(endpoint, data2, length, extra);
}
/* Consume status and the full packet it describes */
@@ -1491,18 +1635,18 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, ROUTE);
/* ROUTE_DIS is 0 */
- val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
- val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
+ val = reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
+ val |= reg_bit(reg, ROUTE_DEF_HDR_TABLE);
/* ROUTE_DEF_HDR_OFST is 0 */
- val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
- val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
+ val |= reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
+ val |= reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
void ipa_endpoint_default_route_clear(struct ipa *ipa)
@@ -1840,8 +1984,9 @@ void ipa_endpoint_deconfig(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 endpoint_id;
+ u32 hw_limit;
u32 tx_count;
u32 rx_count;
u32 rx_base;
@@ -1873,12 +2018,12 @@ int ipa_endpoint_config(struct ipa *ipa)
* the highest one doesn't exceed the number supported by software.
*/
reg = ipa_reg(ipa, FLAVOR_0);
- val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
+ val = ioread32(ipa->reg_virt + reg_offset(reg));
/* Our RX is an IPA producer; our TX is an IPA consumer. */
- tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
- rx_count = ipa_reg_decode(reg, MAX_PROD_PIPES, val);
- rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
+ tx_count = reg_decode(reg, MAX_CONS_PIPES, val);
+ rx_count = reg_decode(reg, MAX_PROD_PIPES, val);
+ rx_base = reg_decode(reg, PROD_LOWEST, val);
limit = rx_base + rx_count;
if (limit > IPA_ENDPOINT_MAX) {
@@ -1887,6 +2032,14 @@ int ipa_endpoint_config(struct ipa *ipa)
return -EINVAL;
}
+ /* Until IPA v5.0, the max endpoint ID was 32 */
+ hw_limit = ipa->version < IPA_VERSION_5_0 ? 32 : U8_MAX + 1;
+ if (limit > hw_limit) {
+ dev_err(dev, "unexpected endpoint count, %u > %u\n",
+ limit, hw_limit);
+ return -EINVAL;
+ }
+
/* Allocate and initialize the available endpoint bitmap */
ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
if (!ipa->available)
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 4a5c3bc549df..3ad2e802040a 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#ifndef _IPA_ENDPOINT_H_
#define _IPA_ENDPOINT_H_
@@ -38,7 +38,7 @@ enum ipa_endpoint_name {
IPA_ENDPOINT_COUNT, /* Number of names (not an index) */
};
-#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
+#define IPA_ENDPOINT_MAX 36 /* Max supported by driver */
/**
* struct ipa_endpoint_tx - Endpoint configuration for TX endpoints
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index c1b3977e1ae4..4bc05948f772 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -22,10 +22,13 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include "ipa.h"
#include "ipa_reg.h"
#include "ipa_endpoint.h"
+#include "ipa_power.h"
+#include "ipa_uc.h"
#include "ipa_interrupt.h"
/**
@@ -33,47 +36,47 @@
* @ipa: IPA pointer
* @irq: Linux IRQ number used for IPA interrupts
* @enabled: Mask indicating which interrupts are enabled
- * @handler: Array of handlers indexed by IPA interrupt ID
*/
struct ipa_interrupt {
struct ipa *ipa;
u32 irq;
u32 enabled;
- ipa_irq_handler_t handler[IPA_IRQ_COUNT];
};
-/* Returns true if the interrupt type is associated with the microcontroller */
-static bool ipa_interrupt_uc(struct ipa_interrupt *interrupt, u32 irq_id)
-{
- return irq_id == IPA_IRQ_UC_0 || irq_id == IPA_IRQ_UC_1;
-}
-
/* Process a particular interrupt type that has been received */
static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
- bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
struct ipa *ipa = interrupt->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 mask = BIT(irq_id);
u32 offset;
- /* For microcontroller interrupts, clear the interrupt right away,
- * "to avoid clearing unhandled interrupts."
- */
reg = ipa_reg(ipa, IPA_IRQ_CLR);
- offset = ipa_reg_offset(reg);
- if (uc_irq)
+ offset = reg_offset(reg);
+
+ switch (irq_id) {
+ case IPA_IRQ_UC_0:
+ case IPA_IRQ_UC_1:
+ /* For microcontroller interrupts, clear the interrupt right
+ * away, "to avoid clearing unhandled interrupts."
+ */
iowrite32(mask, ipa->reg_virt + offset);
-
- if (irq_id < IPA_IRQ_COUNT && interrupt->handler[irq_id])
- interrupt->handler[irq_id](interrupt->ipa, irq_id);
-
- /* Clearing the SUSPEND_TX interrupt also clears the register
- * that tells us which suspended endpoint(s) caused the interrupt,
- * so defer clearing until after the handler has been called.
- */
- if (!uc_irq)
+ ipa_uc_interrupt_handler(ipa, irq_id);
+ break;
+
+ case IPA_IRQ_TX_SUSPEND:
+ /* Clearing the SUSPEND_TX interrupt also clears the
+ * register that tells us which suspended endpoint(s)
+ * caused the interrupt, so defer clearing until after
+ * the handler has been called.
+ */
+ ipa_power_suspend_handler(ipa, irq_id);
+ fallthrough;
+
+ default: /* Silently ignore (and clear) any other condition */
iowrite32(mask, ipa->reg_virt + offset);
+ break;
+ }
}
/* IPA IRQ handler is threaded */
@@ -82,7 +85,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
- const struct ipa_reg *reg;
+ const struct reg *reg;
struct device *dev;
u32 pending;
u32 offset;
@@ -99,7 +102,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
* only the enabled ones.
*/
reg = ipa_reg(ipa, IPA_IRQ_STTS);
- offset = ipa_reg_offset(reg);
+ offset = reg_offset(reg);
pending = ioread32(ipa->reg_virt + offset);
while ((mask = pending & enabled)) {
do {
@@ -117,8 +120,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
pending);
reg = ipa_reg(ipa, IPA_IRQ_CLR);
- offset = ipa_reg_offset(reg);
- iowrite32(pending, ipa->reg_virt + offset);
+ iowrite32(pending, ipa->reg_virt + reg_offset(reg));
}
out_power_put:
pm_runtime_mark_last_busy(dev);
@@ -127,6 +129,29 @@ out_power_put:
return IRQ_HANDLED;
}
+static void ipa_interrupt_enabled_update(struct ipa *ipa)
+{
+ const struct reg *reg = ipa_reg(ipa, IPA_IRQ_EN);
+
+ iowrite32(ipa->interrupt->enabled, ipa->reg_virt + reg_offset(reg));
+}
+
+/* Enable an IPA interrupt type */
+void ipa_interrupt_enable(struct ipa *ipa, enum ipa_irq_id ipa_irq)
+{
+ /* Update the IPA interrupt mask to enable it */
+ ipa->interrupt->enabled |= BIT(ipa_irq);
+ ipa_interrupt_enabled_update(ipa);
+}
+
+/* Disable an IPA interrupt type */
+void ipa_interrupt_disable(struct ipa *ipa, enum ipa_irq_id ipa_irq)
+{
+ /* Update the IPA interrupt mask to disable it */
+ ipa->interrupt->enabled &= ~BIT(ipa_irq);
+ ipa_interrupt_enabled_update(ipa);
+}
+
void ipa_interrupt_irq_disable(struct ipa *ipa)
{
disable_irq(ipa->interrupt->irq);
@@ -144,7 +169,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(endpoint_id % 32);
u32 unit = endpoint_id / 32;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -155,7 +180,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
return;
reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
- offset = ipa_reg_n_offset(reg, unit);
+ offset = reg_n_offset(reg, unit);
val = ioread32(ipa->reg_virt + offset);
if (enable)
@@ -189,18 +214,18 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
unit_count = roundup(ipa->endpoint_count, 32);
for (unit = 0; unit < unit_count; unit++) {
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
- val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit));
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
if (ipa->version == IPA_VERSION_3_0)
continue;
reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit));
}
}
@@ -210,50 +235,12 @@ void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt)
ipa_interrupt_process(interrupt, IPA_IRQ_TX_SUSPEND);
}
-/* Add a handler for an IPA interrupt */
-void ipa_interrupt_add(struct ipa_interrupt *interrupt,
- enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
-{
- struct ipa *ipa = interrupt->ipa;
- const struct ipa_reg *reg;
-
- if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
- return;
-
- interrupt->handler[ipa_irq] = handler;
-
- /* Update the IPA interrupt mask to enable it */
- interrupt->enabled |= BIT(ipa_irq);
-
- reg = ipa_reg(ipa, IPA_IRQ_EN);
- iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
-}
-
-/* Remove the handler for an IPA interrupt type */
-void
-ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
-{
- struct ipa *ipa = interrupt->ipa;
- const struct ipa_reg *reg;
-
- if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
- return;
-
- /* Update the IPA interrupt mask to disable it */
- interrupt->enabled &= ~BIT(ipa_irq);
-
- reg = ipa_reg(ipa, IPA_IRQ_EN);
- iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
-
- interrupt->handler[ipa_irq] = NULL;
-}
-
/* Configure the IPA interrupt framework */
struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
- const struct ipa_reg *reg;
+ const struct reg *reg;
unsigned int irq;
int ret;
@@ -273,7 +260,7 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
/* Start with all IPA interrupts disabled */
reg = ipa_reg(ipa, IPA_IRQ_EN);
- iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
@@ -282,9 +269,9 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
goto err_kfree;
}
- ret = enable_irq_wake(irq);
+ ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
- dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret);
+ dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n", ret);
goto err_free_irq;
}
@@ -302,11 +289,8 @@ err_kfree:
void ipa_interrupt_deconfig(struct ipa_interrupt *interrupt)
{
struct device *dev = &interrupt->ipa->pdev->dev;
- int ret;
- ret = disable_irq_wake(interrupt->irq);
- if (ret)
- dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret);
+ dev_pm_clear_wake_irq(dev);
free_irq(interrupt->irq, interrupt);
kfree(interrupt);
}
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 8a1bd5b89393..12e3e798ccb3 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -11,39 +11,7 @@
struct ipa;
struct ipa_interrupt;
-
-/**
- * typedef ipa_irq_handler_t - IPA interrupt handler function type
- * @ipa: IPA pointer
- * @irq_id: interrupt type
- *
- * Callback function registered by ipa_interrupt_add() to handle a specific
- * IPA interrupt type
- */
-typedef void (*ipa_irq_handler_t)(struct ipa *ipa, enum ipa_irq_id irq_id);
-
-/**
- * ipa_interrupt_add() - Register a handler for an IPA interrupt type
- * @interrupt: IPA interrupt structure
- * @irq_id: IPA interrupt type
- * @handler: Handler function for the interrupt
- *
- * Add a handler for an IPA interrupt and enable it. IPA interrupt
- * handlers are run in threaded interrupt context, so are allowed to
- * block.
- */
-void ipa_interrupt_add(struct ipa_interrupt *interrupt, enum ipa_irq_id irq_id,
- ipa_irq_handler_t handler);
-
-/**
- * ipa_interrupt_remove() - Remove the handler for an IPA interrupt type
- * @interrupt: IPA interrupt structure
- * @irq_id: IPA interrupt type
- *
- * Remove an IPA interrupt handler and disable it.
- */
-void ipa_interrupt_remove(struct ipa_interrupt *interrupt,
- enum ipa_irq_id irq_id);
+enum ipa_irq_id;
/**
* ipa_interrupt_suspend_enable - Enable TX_SUSPEND for an endpoint
@@ -86,6 +54,20 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
/**
+ * ipa_interrupt_enable() - Enable an IPA interrupt type
+ * @ipa: IPA pointer
+ * @ipa_irq: IPA interrupt ID
+ */
+void ipa_interrupt_enable(struct ipa *ipa, enum ipa_irq_id ipa_irq);
+
+/**
+ * ipa_interrupt_disable() - Disable an IPA interrupt type
+ * @ipa: IPA pointer
+ * @ipa_irq: IPA interrupt ID
+ */
+void ipa_interrupt_disable(struct ipa *ipa, enum ipa_irq_id ipa_irq);
+
+/**
* ipa_interrupt_irq_enable() - Enable IPA interrupts
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 4fb92f771974..6cb7bf96a626 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -203,7 +203,7 @@ static void ipa_teardown(struct ipa *ipa)
static void
ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* IPA v4.5+ has no backward compatibility register */
@@ -212,13 +212,13 @@ ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
reg = ipa_reg(ipa, IPA_BCR);
val = data->backward_compat;
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
static void ipa_hardware_config_tx(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -227,11 +227,11 @@ static void ipa_hardware_config_tx(struct ipa *ipa)
/* Disable PA mask to allow HOLB drop */
reg = ipa_reg(ipa, IPA_TX_CFG);
- offset = ipa_reg_offset(reg);
+ offset = reg_offset(reg);
val = ioread32(ipa->reg_virt + offset);
- val &= ~ipa_reg_bit(reg, PA_MASK_EN);
+ val &= ~reg_bit(reg, PA_MASK_EN);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -239,7 +239,7 @@ static void ipa_hardware_config_tx(struct ipa *ipa)
static void ipa_hardware_config_clkon(struct ipa *ipa)
{
enum ipa_version version = ipa->version;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (version >= IPA_VERSION_4_5)
@@ -252,20 +252,20 @@ static void ipa_hardware_config_clkon(struct ipa *ipa)
reg = ipa_reg(ipa, CLKON_CFG);
if (version == IPA_VERSION_3_1) {
/* Disable MISC clock gating */
- val = ipa_reg_bit(reg, CLKON_MISC);
+ val = reg_bit(reg, CLKON_MISC);
} else { /* IPA v4.0+ */
/* Enable open global clocks in the CLKON configuration */
- val = ipa_reg_bit(reg, CLKON_GLOBAL);
- val |= ipa_reg_bit(reg, GLOBAL_2X_CLK);
+ val = reg_bit(reg, CLKON_GLOBAL);
+ val |= reg_bit(reg, GLOBAL_2X_CLK);
}
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* Configure bus access behavior for IPA components */
static void ipa_hardware_config_comp(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
@@ -274,21 +274,22 @@ static void ipa_hardware_config_comp(struct ipa *ipa)
return;
reg = ipa_reg(ipa, COMP_CFG);
- offset = ipa_reg_offset(reg);
+ offset = reg_offset(reg);
+
val = ioread32(ipa->reg_virt + offset);
if (ipa->version == IPA_VERSION_4_0) {
- val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
- val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
- val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
+ val &= ~reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
} else if (ipa->version < IPA_VERSION_4_5) {
- val |= ipa_reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
+ val |= reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
} else {
/* For IPA v4.5 FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
}
- val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
- val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
+ val |= reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
+ val |= reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -299,7 +300,7 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
{
const struct ipa_qsb_data *data0;
const struct ipa_qsb_data *data1;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
@@ -310,29 +311,27 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
/* Max outstanding write accesses for QSB masters */
reg = ipa_reg(ipa, QSB_MAX_WRITES);
- val = ipa_reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
+ val = reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
if (data->qsb_count > 1)
- val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_WRITES,
- data1->max_writes);
+ val |= reg_encode(reg, GEN_QMB_1_MAX_WRITES, data1->max_writes);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Max outstanding read accesses for QSB masters */
reg = ipa_reg(ipa, QSB_MAX_READS);
- val = ipa_reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
+ val = reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= ipa_reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
- data0->max_reads_beats);
+ val |= reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
+ data0->max_reads_beats);
if (data->qsb_count > 1) {
- val = ipa_reg_encode(reg, GEN_QMB_1_MAX_READS,
- data1->max_reads);
+ val = reg_encode(reg, GEN_QMB_1_MAX_READS, data1->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
- data1->max_reads_beats);
+ val |= reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
+ data1->max_reads_beats);
}
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* The internal inactivity timer clock is used for the aggregation timer */
@@ -368,41 +367,47 @@ static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
*/
static void ipa_qtime_config(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
/* Timer clock divider must be disabled when we change the rate */
reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
- iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
/* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
- val = ipa_reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
- val |= ipa_reg_bit(reg, DPL_TIMESTAMP_SEL);
+ val = reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
+ val |= reg_bit(reg, DPL_TIMESTAMP_SEL);
/* Configure tag and NAT Qtime timestamp resolution as well */
- val = ipa_reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
- val = ipa_reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
+ val = reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
+ val = reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Set granularity of pulse generators used for other timers */
reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
- val = ipa_reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
- val |= ipa_reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
- val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
+ val = reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
+ val |= reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
+ if (ipa->version >= IPA_VERSION_5_0) {
+ val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_10_MS);
+ val |= reg_encode(reg, PULSE_GRAN_3, IPA_GRAN_10_MS);
+ } else {
+ val |= reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
+ }
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
/* Actual divider is 1 more than value supplied here */
reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
- offset = ipa_reg_offset(reg);
- val = ipa_reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
+ offset = reg_offset(reg);
+
+ val = reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
iowrite32(val, ipa->reg_virt + offset);
/* Divider value is set; re-enable the common timer clock divider */
- val |= ipa_reg_bit(reg, DIV_ENABLE);
+ val |= reg_bit(reg, DIV_ENABLE);
iowrite32(val, ipa->reg_virt + offset);
}
@@ -411,13 +416,13 @@ static void ipa_qtime_config(struct ipa *ipa)
static void ipa_hardware_config_counter(struct ipa *ipa)
{
u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
reg = ipa_reg(ipa, COUNTER_CFG);
/* If defined, EOT_COAL_GRANULARITY is 0 */
- val = ipa_reg_encode(reg, AGGR_GRANULARITY, granularity);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ val = reg_encode(reg, AGGR_GRANULARITY, granularity);
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
static void ipa_hardware_config_timing(struct ipa *ipa)
@@ -430,8 +435,13 @@ static void ipa_hardware_config_timing(struct ipa *ipa)
static void ipa_hardware_config_hashing(struct ipa *ipa)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
+ /* Other than IPA v4.2, all versions enable "hashing". Starting
+ * with IPA v5.0, the filter and router tables are implemented
+ * differently, but the default configuration enables this feature
+ * (now referred to as "cacheing"), so there's nothing to do here.
+ */
if (ipa->version != IPA_VERSION_4_2)
return;
@@ -441,26 +451,26 @@ static void ipa_hardware_config_hashing(struct ipa *ipa)
/* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH,
* IPV4_FILTER_HASH are all zero.
*/
- iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(0, ipa->reg_virt + reg_offset(reg));
}
static void ipa_idle_indication_cfg(struct ipa *ipa,
u32 enter_idle_debounce_thresh,
bool const_non_idle_enable)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
if (ipa->version < IPA_VERSION_3_5_1)
return;
reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
- val = ipa_reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
- enter_idle_debounce_thresh);
+ val = reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
+ enter_idle_debounce_thresh);
if (const_non_idle_enable)
- val |= ipa_reg_bit(reg, CONST_NON_IDLE_ENABLE);
+ val |= reg_bit(reg, CONST_NON_IDLE_ENABLE);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/**
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 9ec5af323f73..db6ada2343af 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -75,7 +75,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
- const struct ipa_reg *reg;
+ const struct reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
@@ -115,8 +115,8 @@ int ipa_mem_setup(struct ipa *ipa)
offset = ipa->mem_offset + mem->offset;
reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
- val = ipa_reg_encode(reg, IPA_BASE_ADDR, offset);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ val = reg_encode(reg, IPA_BASE_ADDR, offset);
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
return 0;
}
@@ -163,6 +163,12 @@ static bool ipa_mem_id_valid(struct ipa *ipa, enum ipa_mem_id mem_id)
return false;
break;
+ case IPA_MEM_AP_V4_FILTER:
+ case IPA_MEM_AP_V6_FILTER:
+ if (version != IPA_VERSION_5_0)
+ return false;
+ break;
+
case IPA_MEM_NAT_TABLE:
case IPA_MEM_STATS_FILTER_ROUTE:
if (version < IPA_VERSION_4_5)
@@ -312,8 +318,8 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- const struct ipa_reg *reg;
const struct ipa_mem *mem;
+ const struct reg *reg;
dma_addr_t addr;
u32 mem_size;
void *virt;
@@ -322,13 +328,13 @@ int ipa_mem_config(struct ipa *ipa)
/* Check the advertised location and size of the shared memory area */
reg = ipa_reg(ipa, SHARED_MEM_SIZE);
- val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
+ val = ioread32(ipa->reg_virt + reg_offset(reg));
/* The fields in the register are in 8 byte units */
- ipa->mem_offset = 8 * ipa_reg_decode(reg, MEM_BADDR, val);
+ ipa->mem_offset = 8 * reg_decode(reg, MEM_BADDR, val);
/* Make sure the end is within the region's mapped space */
- mem_size = 8 * ipa_reg_decode(reg, MEM_SIZE, val);
+ mem_size = 8 * reg_decode(reg, MEM_SIZE, val);
/* If the sizes don't match, issue a warning */
if (ipa->mem_offset + mem_size < ipa->mem_size) {
@@ -466,7 +472,8 @@ static int ipa_imem_init(struct ipa *ipa, unsigned long addr, size_t size)
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
- ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
+ ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
+ GFP_KERNEL);
if (ret)
return ret;
@@ -574,7 +581,8 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
- ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE);
+ ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
+ GFP_KERNEL);
if (ret)
return ret;
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index 570bfdd99bff..868e9c20e8c4 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2023 Linaro Ltd.
*/
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
@@ -62,13 +62,15 @@ enum ipa_mem_id {
IPA_MEM_PDN_CONFIG, /* 0/2 canaries (IPA v4.0+) */
IPA_MEM_STATS_QUOTA_MODEM, /* 2/4 canaries (IPA v4.0+) */
IPA_MEM_STATS_QUOTA_AP, /* 0 canaries, optional (IPA v4.0+) */
- IPA_MEM_STATS_TETHERING, /* 0 canaries (IPA v4.0+) */
+ IPA_MEM_STATS_TETHERING, /* 0 canaries, optional (IPA v4.0+) */
IPA_MEM_STATS_DROP, /* 0 canaries, optional (IPA v4.0+) */
- /* The next 5 filter and route statistics regions are optional */
+ /* The next 7 filter and route statistics regions are optional */
IPA_MEM_STATS_V4_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_FILTER, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V4_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
IPA_MEM_STATS_V6_ROUTE, /* 0 canaries (IPA v4.0-v4.2) */
+ IPA_MEM_AP_V4_FILTER, /* 2 canaries (IPA v5.0) */
+ IPA_MEM_AP_V6_FILTER, /* 0 canaries (IPA v5.0) */
IPA_MEM_STATS_FILTER_ROUTE, /* 0 canaries (IPA v4.5+) */
IPA_MEM_NAT_TABLE, /* 4 canaries, optional (IPA v4.5+) */
IPA_MEM_END_MARKER, /* 1 canary (not a real region) */
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index 8057be8cda80..921eecf3eff6 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -219,17 +219,7 @@ u32 ipa_core_clock_rate(struct ipa *ipa)
return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
}
-/**
- * ipa_suspend_handler() - Handle the suspend IPA interrupt
- * @ipa: IPA pointer
- * @irq_id: IPA interrupt type (unused)
- *
- * If an RX endpoint is suspended, and the IPA has a packet destined for
- * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
- * that it should resume the endpoint. If we get one of these interrupts
- * we just wake up the system.
- */
-static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
{
/* To handle an IPA interrupt we will have resumed the hardware
* just to handle the interrupt, so we're done. If we are in a
@@ -352,12 +342,11 @@ int ipa_power_setup(struct ipa *ipa)
{
int ret;
- ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
- ipa_suspend_handler);
+ ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
ret = device_init_wakeup(&ipa->pdev->dev, true);
if (ret)
- ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+ ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
return ret;
}
@@ -365,7 +354,7 @@ int ipa_power_setup(struct ipa *ipa)
void ipa_power_teardown(struct ipa *ipa)
{
(void)device_init_wakeup(&ipa->pdev->dev, false);
- ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
+ ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
}
/* Initialize IPA power management */
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 896f052e51a1..3a4c59ea1222 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -10,6 +10,7 @@ struct device;
struct ipa;
struct ipa_power_data;
+enum ipa_irq_id;
/* IPA device power management function block */
extern const struct dev_pm_ops ipa_pm_ops;
@@ -48,6 +49,17 @@ void ipa_power_modem_queue_active(struct ipa *ipa);
void ipa_power_retention(struct ipa *ipa, bool enable);
/**
+ * ipa_power_suspend_handler() - Handler for SUSPEND IPA interrupts
+ * @ipa: IPA pointer
+ * @irq_id: IPA interrupt ID (unused)
+ *
+ * If an RX endpoint is suspended, and the IPA has a packet destined for
+ * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
+ * that it should resume the endpoint.
+ */
+void ipa_power_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id);
+
+/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index ddd529153e15..735fa6591609 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -9,73 +9,96 @@
#include "ipa.h"
#include "ipa_reg.h"
-/* Is this register valid and defined for the current IPA version? */
-static bool ipa_reg_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
+/* Is this register ID valid for the current IPA version? */
+static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
{
enum ipa_version version = ipa->version;
- bool valid;
-
- /* Check for bogus (out of range) register IDs */
- if ((u32)reg_id >= ipa->regs->reg_count)
- return false;
switch (reg_id) {
case IPA_BCR:
case COUNTER_CFG:
- valid = version < IPA_VERSION_4_5;
- break;
+ return version < IPA_VERSION_4_5;
case IPA_TX_CFG:
case FLAVOR_0:
case IDLE_INDICATION_CFG:
- valid = version >= IPA_VERSION_3_5;
- break;
+ return version >= IPA_VERSION_3_5;
case QTIME_TIMESTAMP_CFG:
case TIMERS_XO_CLK_DIV_CFG:
case TIMERS_PULSE_GRAN_CFG:
- valid = version >= IPA_VERSION_4_5;
- break;
+ return version >= IPA_VERSION_4_5;
case SRC_RSRC_GRP_45_RSRC_TYPE:
case DST_RSRC_GRP_45_RSRC_TYPE:
- valid = version <= IPA_VERSION_3_1 ||
- version == IPA_VERSION_4_5;
- break;
+ return version <= IPA_VERSION_3_1 ||
+ version == IPA_VERSION_4_5;
case SRC_RSRC_GRP_67_RSRC_TYPE:
case DST_RSRC_GRP_67_RSRC_TYPE:
- valid = version <= IPA_VERSION_3_1;
- break;
+ return version <= IPA_VERSION_3_1;
case ENDP_FILTER_ROUTER_HSH_CFG:
- valid = version != IPA_VERSION_4_2;
- break;
+ return version != IPA_VERSION_4_2;
case IRQ_SUSPEND_EN:
case IRQ_SUSPEND_CLR:
- valid = version >= IPA_VERSION_3_1;
- break;
+ return version >= IPA_VERSION_3_1;
+
+ case COMP_CFG:
+ case CLKON_CFG:
+ case ROUTE:
+ case SHARED_MEM_SIZE:
+ case QSB_MAX_WRITES:
+ case QSB_MAX_READS:
+ case FILT_ROUT_HASH_EN:
+ case FILT_ROUT_CACHE_CFG:
+ case FILT_ROUT_HASH_FLUSH:
+ case FILT_ROUT_CACHE_FLUSH:
+ case STATE_AGGR_ACTIVE:
+ case LOCAL_PKT_PROC_CNTXT:
+ case AGGR_FORCE_CLOSE:
+ case SRC_RSRC_GRP_01_RSRC_TYPE:
+ case SRC_RSRC_GRP_23_RSRC_TYPE:
+ case DST_RSRC_GRP_01_RSRC_TYPE:
+ case DST_RSRC_GRP_23_RSRC_TYPE:
+ case ENDP_INIT_CTRL:
+ case ENDP_INIT_CFG:
+ case ENDP_INIT_NAT:
+ case ENDP_INIT_HDR:
+ case ENDP_INIT_HDR_EXT:
+ case ENDP_INIT_HDR_METADATA_MASK:
+ case ENDP_INIT_MODE:
+ case ENDP_INIT_AGGR:
+ case ENDP_INIT_HOL_BLOCK_EN:
+ case ENDP_INIT_HOL_BLOCK_TIMER:
+ case ENDP_INIT_DEAGGR:
+ case ENDP_INIT_RSRC_GRP:
+ case ENDP_INIT_SEQ:
+ case ENDP_STATUS:
+ case ENDP_FILTER_CACHE_CFG:
+ case ENDP_ROUTER_CACHE_CFG:
+ case IPA_IRQ_STTS:
+ case IPA_IRQ_EN:
+ case IPA_IRQ_CLR:
+ case IPA_IRQ_UC:
+ case IRQ_SUSPEND_INFO:
+ return true; /* These should be defined for all versions */
default:
- valid = true; /* Others should be defined for all versions */
- break;
+ return false;
}
-
- /* To be valid, it must be defined */
-
- return valid && ipa->regs->reg[reg_id];
}
-const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
+const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
{
- if (WARN_ON(!ipa_reg_valid(ipa, reg_id)))
+ if (WARN(!ipa_reg_id_valid(ipa, reg_id), "invalid reg %u\n", reg_id))
return NULL;
- return ipa->regs->reg[reg_id];
+ return reg(ipa->regs, reg_id);
}
-static const struct ipa_regs *ipa_regs(enum ipa_version version)
+static const struct regs *ipa_regs(enum ipa_version version)
{
switch (version) {
case IPA_VERSION_3_1:
@@ -100,7 +123,7 @@ static const struct ipa_regs *ipa_regs(enum ipa_version version)
int ipa_reg_init(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- const struct ipa_regs *regs;
+ const struct regs *regs;
struct resource *res;
regs = ipa_regs(ipa->version);
@@ -123,7 +146,6 @@ int ipa_reg_init(struct ipa *ipa)
dev_err(dev, "unable to remap \"ipa-reg\" memory\n");
return -ENOMEM;
}
- ipa->reg_addr = res->start;
ipa->regs = regs;
return 0;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index ff64b19a4df8..28aa1351dd48 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#ifndef _IPA_REG_H_
#define _IPA_REG_H_
@@ -10,6 +10,7 @@
#include <linux/bug.h>
#include "ipa_version.h"
+#include "reg.h"
struct ipa;
@@ -35,7 +36,7 @@ struct ipa;
* by register ID. Each entry in the array specifies the base offset and
* (for parameterized registers) a non-zero stride value. Not all versions
* of IPA define all registers. The offset for a register is returned by
- * ipa_reg_offset() when the register's ipa_reg structure is supplied;
+ * reg_offset() when the register's ipa_reg structure is supplied;
* zero is returned for an undefined register (this should never happen).
*
* Some registers encode multiple fields within them. Each field in
@@ -44,9 +45,9 @@ struct ipa;
* an array of field masks, indexed by field ID. Two functions are
* used to access register fields; both take an ipa_reg structure as
* argument. To encode a value to be represented in a register field,
- * the value and field ID are passed to ipa_reg_encode(). To extract
+ * the value and field ID are passed to reg_encode(). To extract
* a value encoded in a register field, the field ID is passed to
- * ipa_reg_decode(). In addition, for single-bit fields, ipa_reg_bit()
+ * reg_decode(). In addition, for single-bit fields, reg_bit()
* can be used to either encode the bit value, or to generate a mask
* used to extract the bit value.
*/
@@ -59,8 +60,10 @@ enum ipa_reg_id {
SHARED_MEM_SIZE,
QSB_MAX_WRITES,
QSB_MAX_READS,
- FILT_ROUT_HASH_EN,
- FILT_ROUT_HASH_FLUSH,
+ FILT_ROUT_HASH_EN, /* Not IPA v5.0+ */
+ FILT_ROUT_CACHE_CFG, /* IPA v5.0+ */
+ FILT_ROUT_HASH_FLUSH, /* Not IPA v5.0+ */
+ FILT_ROUT_CACHE_FLUSH, /* IPA v5.0+ */
STATE_AGGR_ACTIVE,
IPA_BCR, /* Not IPA v4.5+ */
LOCAL_PKT_PROC_CNTXT,
@@ -95,7 +98,9 @@ enum ipa_reg_id {
ENDP_INIT_SEQ, /* TX only */
ENDP_STATUS,
ENDP_FILTER_ROUTER_HSH_CFG, /* Not IPA v4.2 */
- /* The IRQ registers are only used for GSI_EE_AP */
+ ENDP_FILTER_CACHE_CFG, /* IPA v5.0+ */
+ ENDP_ROUTER_CACHE_CFG, /* IPA v5.0+ */
+ /* The IRQ registers that follow are only used for GSI_EE_AP */
IPA_IRQ_STTS,
IPA_IRQ_EN,
IPA_IRQ_CLR,
@@ -106,56 +111,6 @@ enum ipa_reg_id {
IPA_REG_ID_COUNT, /* Last; not an ID */
};
-/**
- * struct ipa_reg - An IPA register descriptor
- * @offset: Register offset relative to base of the "ipa-reg" memory
- * @stride: Distance between two instances, if parameterized
- * @fcount: Number of entries in the @fmask array
- * @fmask: Array of mask values defining position and width of fields
- * @name: Upper-case name of the IPA register
- */
-struct ipa_reg {
- u32 offset;
- u32 stride;
- u32 fcount;
- const u32 *fmask; /* BIT(nr) or GENMASK(h, l) */
- const char *name;
-};
-
-/* Helper macro for defining "simple" (non-parameterized) registers */
-#define IPA_REG(__NAME, __reg_id, __offset) \
- IPA_REG_STRIDE(__NAME, __reg_id, __offset, 0)
-
-/* Helper macro for defining parameterized registers, specifying stride */
-#define IPA_REG_STRIDE(__NAME, __reg_id, __offset, __stride) \
- static const struct ipa_reg ipa_reg_ ## __reg_id = { \
- .name = #__NAME, \
- .offset = __offset, \
- .stride = __stride, \
- }
-
-#define IPA_REG_FIELDS(__NAME, __name, __offset) \
- IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, 0)
-
-#define IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, __stride) \
- static const struct ipa_reg ipa_reg_ ## __name = { \
- .name = #__NAME, \
- .offset = __offset, \
- .stride = __stride, \
- .fcount = ARRAY_SIZE(ipa_reg_ ## __name ## _fmask), \
- .fmask = ipa_reg_ ## __name ## _fmask, \
- }
-
-/**
- * struct ipa_regs - Description of registers supported by hardware
- * @reg_count: Number of registers in the @reg[] array
- * @reg: Array of register descriptors
- */
-struct ipa_regs {
- u32 reg_count;
- const struct ipa_reg **reg;
-};
-
/* COMP_CFG register */
enum ipa_reg_comp_cfg_field_id {
COMP_CFG_ENABLE, /* Not IPA v4.0+ */
@@ -251,14 +206,28 @@ enum ipa_reg_qsb_max_reads_field_id {
GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */
};
+/* FILT_ROUT_CACHE_CFG register */
+enum ipa_reg_filt_rout_cache_cfg_field_id {
+ ROUTER_CACHE_EN,
+ FILTER_CACHE_EN,
+ LOW_PRI_HASH_HIT_DISABLE,
+ LRU_EVICTION_THRESHOLD,
+};
+
/* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
-enum ipa_reg_rout_hash_field_id {
+enum ipa_reg_filt_rout_hash_field_id {
IPV6_ROUTER_HASH,
IPV6_FILTER_HASH,
IPV4_ROUTER_HASH,
IPV4_FILTER_HASH,
};
+/* FILT_ROUT_CACHE_FLUSH register */
+enum ipa_reg_filt_rout_cache_field_id {
+ ROUTER_CACHE,
+ FILTER_CACHE,
+};
+
/* BCR register */
enum ipa_bcr_compat {
BCR_CMDQ_L_LACK_ONE_ENTRY = 0x0, /* Not IPA v4.2+ */
@@ -298,6 +267,7 @@ enum ipa_reg_ipa_tx_cfg_field_id {
DUAL_TX_ENABLE, /* v4.5+ */
SSPND_PA_NO_START_STATE, /* v4,2+, not v4.5 */
SSPND_PA_NO_BQ_STATE, /* v4.2 only */
+ HOLB_STICKY_DROP_EN, /* v5.0+ */
};
/* FLAVOR_0 register */
@@ -333,6 +303,7 @@ enum ipa_reg_timers_pulse_gran_cfg_field_id {
PULSE_GRAN_0,
PULSE_GRAN_1,
PULSE_GRAN_2,
+ PULSE_GRAN_3,
};
/* Values for IPA_GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
@@ -382,11 +353,11 @@ enum ipa_reg_endp_init_nat_field_id {
NAT_EN,
};
-/** enum ipa_nat_en - ENDP_INIT_NAT register NAT_EN field value */
-enum ipa_nat_en {
- IPA_NAT_BYPASS = 0x0,
- IPA_NAT_SRC = 0x1,
- IPA_NAT_DST = 0x2,
+/** enum ipa_nat_type - ENDP_INIT_NAT register NAT_EN field value */
+enum ipa_nat_type {
+ IPA_NAT_TYPE_BYPASS = 0,
+ IPA_NAT_TYPE_SRC = 1,
+ IPA_NAT_TYPE_DST = 2,
};
/* ENDP_INIT_HDR register */
@@ -415,6 +386,8 @@ enum ipa_reg_endp_init_hdr_ext_field_id {
HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB, /* v4.5+ */
HDR_OFST_PKT_SIZE_MSB, /* v4.5+ */
HDR_ADDITIONAL_CONST_LEN_MSB, /* v4.5+ */
+ HDR_BYTES_TO_REMOVE_VALID, /* v5.0+ */
+ HDR_BYTES_TO_REMOVE, /* v5.0+ */
};
/* ENDP_INIT_MODE register */
@@ -573,6 +546,17 @@ enum ipa_reg_endp_filter_router_hsh_cfg_field_id {
ROUTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
};
+/* ENDP_FILTER_CACHE_CFG and ENDP_ROUTER_CACHE_CFG registers */
+enum ipa_reg_endp_cache_cfg_field_id {
+ CACHE_MSK_SRC_ID,
+ CACHE_MSK_SRC_IP,
+ CACHE_MSK_DST_IP,
+ CACHE_MSK_SRC_PORT,
+ CACHE_MSK_DST_PORT,
+ CACHE_MSK_PROTOCOL,
+ CACHE_MSK_METADATA,
+};
+
/* IPA_IRQ_STTS, IPA_IRQ_EN, and IPA_IRQ_CLR registers */
/**
* enum ipa_irq_id - Bit positions representing type of IPA IRQ
@@ -654,79 +638,15 @@ enum ipa_reg_ipa_irq_uc_field_id {
UC_INTR,
};
-extern const struct ipa_regs ipa_regs_v3_1;
-extern const struct ipa_regs ipa_regs_v3_5_1;
-extern const struct ipa_regs ipa_regs_v4_2;
-extern const struct ipa_regs ipa_regs_v4_5;
-extern const struct ipa_regs ipa_regs_v4_7;
-extern const struct ipa_regs ipa_regs_v4_9;
-extern const struct ipa_regs ipa_regs_v4_11;
-
-/* Return the field mask for a field in a register */
-static inline u32 ipa_reg_fmask(const struct ipa_reg *reg, u32 field_id)
-{
- if (!reg || WARN_ON(field_id >= reg->fcount))
- return 0;
-
- return reg->fmask[field_id];
-}
-
-/* Return the mask for a single-bit field in a register */
-static inline u32 ipa_reg_bit(const struct ipa_reg *reg, u32 field_id)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- WARN_ON(!is_power_of_2(fmask));
-
- return fmask;
-}
-
-/* Encode a value into the given field of a register */
-static inline u32
-ipa_reg_encode(const struct ipa_reg *reg, u32 field_id, u32 val)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- if (!fmask)
- return 0;
-
- val <<= __ffs(fmask);
- if (WARN_ON(val & ~fmask))
- return 0;
-
- return val;
-}
-
-/* Given a register value, decode (extract) the value in the given field */
-static inline u32
-ipa_reg_decode(const struct ipa_reg *reg, u32 field_id, u32 val)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- return fmask ? (val & fmask) >> __ffs(fmask) : 0;
-}
-
-/* Return the maximum value representable by the given field; always 2^n - 1 */
-static inline u32 ipa_reg_field_max(const struct ipa_reg *reg, u32 field_id)
-{
- u32 fmask = ipa_reg_fmask(reg, field_id);
-
- return fmask ? fmask >> __ffs(fmask) : 0;
-}
-
-const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
-
-/* Returns 0 for NULL reg; warning will have already been issued */
-static inline u32 ipa_reg_offset(const struct ipa_reg *reg)
-{
- return reg ? reg->offset : 0;
-}
+extern const struct regs ipa_regs_v3_1;
+extern const struct regs ipa_regs_v3_5_1;
+extern const struct regs ipa_regs_v4_2;
+extern const struct regs ipa_regs_v4_5;
+extern const struct regs ipa_regs_v4_7;
+extern const struct regs ipa_regs_v4_9;
+extern const struct regs ipa_regs_v4_11;
-/* Returns 0 for NULL reg; warning will have already been issued */
-static inline u32 ipa_reg_n_offset(const struct ipa_reg *reg, u32 n)
-{
- return reg ? reg->offset + n * reg->stride : 0;
-}
+const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
int ipa_reg_init(struct ipa *ipa);
void ipa_reg_exit(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index a257f0e5e361..82c88a744d10 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -70,20 +70,20 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
static void
ipa_resource_config_common(struct ipa *ipa, u32 resource_type,
- const struct ipa_reg *reg,
+ const struct reg *reg,
const struct ipa_resource_limits *xlimits,
const struct ipa_resource_limits *ylimits)
{
u32 val;
- val = ipa_reg_encode(reg, X_MIN_LIM, xlimits->min);
- val |= ipa_reg_encode(reg, X_MAX_LIM, xlimits->max);
+ val = reg_encode(reg, X_MIN_LIM, xlimits->min);
+ val |= reg_encode(reg, X_MAX_LIM, xlimits->max);
if (ylimits) {
- val |= ipa_reg_encode(reg, Y_MIN_LIM, ylimits->min);
- val |= ipa_reg_encode(reg, Y_MAX_LIM, ylimits->max);
+ val |= reg_encode(reg, Y_MIN_LIM, ylimits->min);
+ val |= reg_encode(reg, Y_MAX_LIM, ylimits->max);
}
- iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, resource_type));
+ iowrite32(val, ipa->reg_virt + reg_n_offset(reg, resource_type));
}
static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
@@ -92,7 +92,7 @@ static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_src_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- const struct ipa_reg *reg;
+ const struct reg *reg;
resource = &data->resource_src[resource_type];
@@ -129,7 +129,7 @@ static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_dst_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- const struct ipa_reg *reg;
+ const struct reg *reg;
resource = &data->resource_dst[resource_type];
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index b81e27b61354..f0529c31d0b6 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2023 Linaro Ltd.
*/
#include <linux/types.h>
@@ -345,9 +345,8 @@ void ipa_table_reset(struct ipa *ipa, bool modem)
int ipa_table_hash_flush(struct ipa *ipa)
{
- const struct ipa_reg *reg;
struct gsi_trans *trans;
- u32 offset;
+ const struct reg *reg;
u32 val;
if (!ipa_table_hash_support(ipa))
@@ -359,15 +358,22 @@ int ipa_table_hash_flush(struct ipa *ipa)
return -EBUSY;
}
- reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
- offset = ipa_reg_offset(reg);
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
- val = ipa_reg_bit(reg, IPV6_ROUTER_HASH);
- val |= ipa_reg_bit(reg, IPV6_FILTER_HASH);
- val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH);
- val |= ipa_reg_bit(reg, IPV4_FILTER_HASH);
+ val = reg_bit(reg, IPV6_ROUTER_HASH);
+ val |= reg_bit(reg, IPV6_FILTER_HASH);
+ val |= reg_bit(reg, IPV4_ROUTER_HASH);
+ val |= reg_bit(reg, IPV4_FILTER_HASH);
+ } else {
+ reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
- ipa_cmd_register_write_add(trans, offset, val, val, false);
+ /* IPA v5.0+ uses a unified cache (both IPv4 and IPv6) */
+ val = reg_bit(reg, ROUTER_CACHE);
+ val |= reg_bit(reg, FILTER_CACHE);
+ }
+
+ ipa_cmd_register_write_add(trans, reg_offset(reg), val, val, false);
gsi_trans_commit_wait(trans);
@@ -486,17 +492,26 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
- reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+
+ offset = reg_n_offset(reg, endpoint_id);
+ val = ioread32(endpoint->ipa->reg_virt + offset);
- offset = ipa_reg_n_offset(reg, endpoint_id);
- val = ioread32(endpoint->ipa->reg_virt + offset);
+ /* Zero all filter-related fields, preserving the rest */
+ val &= ~reg_fmask(reg, FILTER_HASH_MSK_ALL);
+ } else {
+ /* IPA v5.0 separates filter and router cache configuration */
+ reg = ipa_reg(ipa, ENDP_FILTER_CACHE_CFG);
+ offset = reg_n_offset(reg, endpoint_id);
- /* Zero all filter-related fields, preserving the rest */
- val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL);
+ /* Zero all filter-related fields */
+ val = 0;
+ }
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -536,17 +551,26 @@ static bool ipa_route_id_modem(struct ipa *ipa, u32 route_id)
*/
static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
{
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 offset;
u32 val;
- reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
- offset = ipa_reg_n_offset(reg, route_id);
+ if (ipa->version < IPA_VERSION_5_0) {
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = reg_n_offset(reg, route_id);
- val = ioread32(ipa->reg_virt + offset);
+ val = ioread32(ipa->reg_virt + offset);
- /* Zero all route-related fields, preserving the rest */
- val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL);
+ /* Zero all route-related fields, preserving the rest */
+ val &= ~reg_fmask(reg, ROUTER_HASH_MSK_ALL);
+ } else {
+ /* IPA v5.0 separates filter and router cache configuration */
+ reg = ipa_reg(ipa, ENDP_ROUTER_CACHE_CFG);
+ offset = reg_n_offset(reg, route_id);
+
+ /* Zero all route-related fields */
+ val = 0;
+ }
iowrite32(val, ipa->reg_virt + offset);
}
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index f0ee47281015..7eaa0b4ebed9 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -124,7 +124,7 @@ static struct ipa_uc_mem_area *ipa_uc_shared(struct ipa *ipa)
}
/* Microcontroller event IPA interrupt handler */
-static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+static void ipa_uc_event_handler(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
struct device *dev = &ipa->pdev->dev;
@@ -138,7 +138,7 @@ static void ipa_uc_event_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
}
/* Microcontroller response IPA interrupt handler */
-static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
+static void ipa_uc_response_hdlr(struct ipa *ipa)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
struct device *dev = &ipa->pdev->dev;
@@ -170,13 +170,22 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
}
}
+void ipa_uc_interrupt_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
+{
+ /* Silently ignore anything unrecognized */
+ if (irq_id == IPA_IRQ_UC_0)
+ ipa_uc_event_handler(ipa);
+ else if (irq_id == IPA_IRQ_UC_1)
+ ipa_uc_response_hdlr(ipa);
+}
+
/* Configure the IPA microcontroller subsystem */
void ipa_uc_config(struct ipa *ipa)
{
ipa->uc_powered = false;
ipa->uc_loaded = false;
- ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_0, ipa_uc_event_handler);
- ipa_interrupt_add(ipa->interrupt, IPA_IRQ_UC_1, ipa_uc_response_hdlr);
+ ipa_interrupt_enable(ipa, IPA_IRQ_UC_0);
+ ipa_interrupt_enable(ipa, IPA_IRQ_UC_1);
}
/* Inverse of ipa_uc_config() */
@@ -184,8 +193,8 @@ void ipa_uc_deconfig(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
- ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
- ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
+ ipa_interrupt_disable(ipa, IPA_IRQ_UC_1);
+ ipa_interrupt_disable(ipa, IPA_IRQ_UC_0);
if (ipa->uc_loaded)
ipa_power_retention(ipa, false);
@@ -222,7 +231,7 @@ void ipa_uc_power(struct ipa *ipa)
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- const struct ipa_reg *reg;
+ const struct reg *reg;
u32 val;
/* Fill in the command data */
@@ -234,9 +243,9 @@ static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
/* Use an interrupt to tell the microcontroller the command is ready */
reg = ipa_reg(ipa, IPA_IRQ_UC);
- val = ipa_reg_bit(reg, UC_INTR);
+ val = reg_bit(reg, UC_INTR);
- iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+ iowrite32(val, ipa->reg_virt + reg_offset(reg));
}
/* Tell the microcontroller the AP is shutting down */
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
index 8514096e6f36..85aa0df818c2 100644
--- a/drivers/net/ipa/ipa_uc.h
+++ b/drivers/net/ipa/ipa_uc.h
@@ -7,6 +7,14 @@
#define _IPA_UC_H_
struct ipa;
+enum ipa_irq_id;
+
+/**
+ * ipa_uc_interrupt_handler() - Handler for microcontroller IPA interrupts
+ * @ipa: IPA pointer
+ * @irq_id: IPA interrupt ID
+ */
+void ipa_uc_interrupt_handler(struct ipa *ipa, enum ipa_irq_id irq_id);
/**
* ipa_uc_config() - Configure the IPA microcontroller subsystem
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index d15821467743..06e75b8ece7e 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -9,7 +9,7 @@
/**
* enum ipa_version
* @IPA_VERSION_3_0: IPA version 3.0/GSI version 1.0
- * @IPA_VERSION_3_1: IPA version 3.1/GSI version 1.1
+ * @IPA_VERSION_3_1: IPA version 3.1/GSI version 1.0
* @IPA_VERSION_3_5: IPA version 3.5/GSI version 1.2
* @IPA_VERSION_3_5_1: IPA version 3.5.1/GSI version 1.3
* @IPA_VERSION_4_0: IPA version 4.0/GSI version 2.0
@@ -20,6 +20,8 @@
* @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
* @IPA_VERSION_5_0: IPA version 5.0/GSI version 3.0
+ * @IPA_VERSION_5_1: IPA version 5.1/GSI version 3.0
+ * @IPA_VERSION_5_5: IPA version 5.5/GSI version 5.5
* @IPA_VERSION_COUNT: Number of defined IPA versions
*
* Defines the version of IPA (and GSI) hardware present on the platform.
@@ -38,6 +40,8 @@ enum ipa_version {
IPA_VERSION_4_9,
IPA_VERSION_4_11,
IPA_VERSION_5_0,
+ IPA_VERSION_5_1,
+ IPA_VERSION_5_5,
IPA_VERSION_COUNT, /* Last; not a version */
};
diff --git a/drivers/net/ipa/reg.h b/drivers/net/ipa/reg.h
new file mode 100644
index 000000000000..57b457f39b6e
--- /dev/null
+++ b/drivers/net/ipa/reg.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* *Copyright (C) 2022-2023 Linaro Ltd. */
+
+#ifndef _REG_H_
+#define _REG_H_
+
+#include <linux/types.h>
+#include <linux/bits.h>
+
+/**
+ * struct reg - A register descriptor
+ * @offset: Register offset relative to base of register memory
+ * @stride: Distance between two instances, if parameterized
+ * @fcount: Number of entries in the @fmask array
+ * @fmask: Array of mask values defining position and width of fields
+ * @name: Upper-case name of the register
+ */
+struct reg {
+ u32 offset;
+ u32 stride;
+ u32 fcount;
+ const u32 *fmask; /* BIT(nr) or GENMASK(h, l) */
+ const char *name;
+};
+
+/* Helper macro for defining "simple" (non-parameterized) registers */
+#define REG(__NAME, __reg_id, __offset) \
+ REG_STRIDE(__NAME, __reg_id, __offset, 0)
+
+/* Helper macro for defining parameterized registers, specifying stride */
+#define REG_STRIDE(__NAME, __reg_id, __offset, __stride) \
+ static const struct reg reg_ ## __reg_id = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ }
+
+#define REG_FIELDS(__NAME, __name, __offset) \
+ REG_STRIDE_FIELDS(__NAME, __name, __offset, 0)
+
+#define REG_STRIDE_FIELDS(__NAME, __name, __offset, __stride) \
+ static const struct reg reg_ ## __name = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ .fcount = ARRAY_SIZE(reg_ ## __name ## _fmask), \
+ .fmask = reg_ ## __name ## _fmask, \
+ }
+
+/**
+ * struct regs - Description of registers supported by hardware
+ * @reg_count: Number of registers in the @reg[] array
+ * @reg: Array of register descriptors
+ */
+struct regs {
+ u32 reg_count;
+ const struct reg **reg;
+};
+
+static inline const struct reg *reg(const struct regs *regs, u32 reg_id)
+{
+ if (WARN(reg_id >= regs->reg_count,
+ "reg out of range (%u > %u)\n", reg_id, regs->reg_count - 1))
+ return NULL;
+
+ return regs->reg[reg_id];
+}
+
+/* Return the field mask for a field in a register, or 0 on error */
+static inline u32 reg_fmask(const struct reg *reg, u32 field_id)
+{
+ if (!reg || WARN_ON(field_id >= reg->fcount))
+ return 0;
+
+ return reg->fmask[field_id];
+}
+
+/* Return the mask for a single-bit field in a register, or 0 on error */
+static inline u32 reg_bit(const struct reg *reg, u32 field_id)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ if (WARN_ON(!is_power_of_2(fmask)))
+ return 0;
+
+ return fmask;
+}
+
+/* Return the maximum value representable by the given field; always 2^n - 1 */
+static inline u32 reg_field_max(const struct reg *reg, u32 field_id)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ return fmask ? fmask >> __ffs(fmask) : 0;
+}
+
+/* Encode a value into the given field of a register */
+static inline u32 reg_encode(const struct reg *reg, u32 field_id, u32 val)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ if (!fmask)
+ return 0;
+
+ val <<= __ffs(fmask);
+ if (WARN_ON(val & ~fmask))
+ return 0;
+
+ return val;
+}
+
+/* Given a register value, decode (extract) the value in the given field */
+static inline u32 reg_decode(const struct reg *reg, u32 field_id, u32 val)
+{
+ u32 fmask = reg_fmask(reg, field_id);
+
+ return fmask ? (val & fmask) >> __ffs(fmask) : 0;
+}
+
+/* Returns 0 for NULL reg; warning should have already been issued */
+static inline u32 reg_offset(const struct reg *reg)
+{
+ return reg ? reg->offset : 0;
+}
+
+/* Returns 0 for NULL reg; warning should have already been issued */
+static inline u32 reg_n_offset(const struct reg *reg, u32 n)
+{
+ return reg ? reg->offset + n * reg->stride : 0;
+}
+
+#endif /* _REG_H_ */
diff --git a/drivers/net/ipa/reg/gsi_reg-v3.1.c b/drivers/net/ipa/reg/gsi_reg-v3.1.c
new file mode 100644
index 000000000000..e036805a7882
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v3.1.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ /* Bit 13 reserved */
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x0001f098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001f09c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v3_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v3.5.1.c b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
new file mode 100644
index 000000000000..8c3ab3a5288e
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ /* Bit 13 reserved */
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ /* Bits 15-31 reserved */
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x0001f098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001f09c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v3_5_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.0.c b/drivers/net/ipa/reg/gsi_reg-v4.0.c
new file mode 100644
index 000000000000..7cc7a21d07f9
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.0.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ /* Bit 13 reserved */
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [USE_ESCAPE_BUF_ONLY] = BIT(10),
+ /* Bits 11-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ /* Bits 30-31 reserved */
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x0001f098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001f09c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_0 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.11.c b/drivers/net/ipa/reg/gsi_reg-v4.11.c
new file mode 100644
index 000000000000..01696519032f
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.11.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ [CHTYPE_PROTOCOL_MSB] = BIT(13),
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(19, 0),
+ /* Bits 20-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [PREFETCH_MODE] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
+ [DB_IN_BYTES] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(19, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-23 reserved */
+ [GENERIC_PARAMS] = GENMASK(31, 24),
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ [GSI_USE_RD_WR_ENG] = BIT(30),
+ [GSI_USE_INTER_EE] = BIT(31),
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x00012098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001209c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x000120a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x000120a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x000120b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x000120c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_11 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.5.c b/drivers/net/ipa/reg/gsi_reg-v4.5.c
new file mode 100644
index 000000000000..648b51b88d4e
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.5.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ [CHTYPE_PROTOCOL_MSB] = BIT(13),
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(15, 0),
+ /* Bits 16-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [PREFETCH_MODE] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
+ /* Bits 24-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0000f060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0000f064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0000f068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0000f06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x00010000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x00010004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x00010008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001000c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x00010010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x00010020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x00010024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x00010028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001002c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x00010030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x00010034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x00010048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001004c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ [GSI_USE_RD_WR_ENG] = BIT(30),
+ [GSI_USE_INTER_EE] = BIT(31),
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x0001f098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001f09c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x0001f0a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x0001f0a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x0001f0b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x0001f0c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_5 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.9.c b/drivers/net/ipa/reg/gsi_reg-v4.9.c
new file mode 100644
index 000000000000..4bf45d264d6b
--- /dev/null
+++ b/drivers/net/ipa/reg/gsi_reg-v4.9.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2023 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../gsi.h"
+#include "../reg.h"
+#include "../gsi_reg.h"
+
+REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
+ 0x0000c020 + 0x1000 * GSI_EE_AP);
+
+REG(INTER_EE_SRC_EV_CH_IRQ_MSK, inter_ee_src_ev_ch_irq_msk,
+ 0x0000c024 + 0x1000 * GSI_EE_AP);
+
+static const u32 reg_ch_c_cntxt_0_fmask[] = {
+ [CHTYPE_PROTOCOL] = GENMASK(2, 0),
+ [CHTYPE_DIR] = BIT(3),
+ [CH_EE] = GENMASK(7, 4),
+ [CHID] = GENMASK(12, 8),
+ [CHTYPE_PROTOCOL_MSB] = BIT(13),
+ [ERINDEX] = GENMASK(18, 14),
+ /* Bit 19 reserved */
+ [CHSTATE] = GENMASK(23, 20),
+ [ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0,
+ 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_cntxt_1_fmask[] = {
+ [CH_R_LENGTH] = GENMASK(19, 0),
+ /* Bits 20-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1,
+ 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ch_c_qos_fmask[] = {
+ [WRR_WEIGHT] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_PREFETCH] = BIT(8),
+ [USE_DB_ENG] = BIT(9),
+ [PREFETCH_MODE] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [EMPTY_LVL_THRSHOLD] = GENMASK(23, 16),
+ [DB_IN_BYTES] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_error_log_fmask[] = {
+ [ERR_ARG3] = GENMASK(3, 0),
+ [ERR_ARG2] = GENMASK(7, 4),
+ [ERR_ARG1] = GENMASK(11, 8),
+ [ERR_CODE] = GENMASK(15, 12),
+ /* Bits 16-18 reserved */
+ [ERR_VIRT_IDX] = GENMASK(23, 19),
+ [ERR_TYPE] = GENMASK(27, 24),
+ [ERR_EE] = GENMASK(31, 28),
+};
+
+REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0,
+ 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1,
+ 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2,
+ 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3,
+ 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_0_fmask[] = {
+ [EV_CHTYPE] = GENMASK(3, 0),
+ [EV_EE] = GENMASK(7, 4),
+ [EV_EVCHID] = GENMASK(15, 8),
+ [EV_INTYPE] = BIT(16),
+ /* Bits 17-19 reserved */
+ [EV_CHSTATE] = GENMASK(23, 20),
+ [EV_ELEMENT_SIZE] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0,
+ 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_1_fmask[] = {
+ [R_LENGTH] = GENMASK(15, 0),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1,
+ 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2,
+ 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3,
+ 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4,
+ 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80);
+
+static const u32 reg_ev_ch_e_cntxt_8_fmask[] = {
+ [EV_MODT] = GENMASK(15, 0),
+ [EV_MODC] = GENMASK(23, 16),
+ [EV_MOD_CNT] = GENMASK(31, 24),
+};
+
+REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8,
+ 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9,
+ 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10,
+ 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11,
+ 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12,
+ 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13,
+ 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0,
+ 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1,
+ 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80);
+
+REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0,
+ 0x00011000 + 0x4000 * GSI_EE_AP, 0x08);
+
+REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0,
+ 0x00011100 + 0x4000 * GSI_EE_AP, 0x08);
+
+static const u32 reg_gsi_status_fmask[] = {
+ [ENABLED] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ch_cmd_fmask[] = {
+ [CH_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [CH_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_ev_ch_cmd_fmask[] = {
+ [EV_CHID] = GENMASK(7, 0),
+ /* Bits 8-23 reserved */
+ [EV_OPCODE] = GENMASK(31, 24),
+};
+
+REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_generic_cmd_fmask[] = {
+ [GENERIC_OPCODE] = GENMASK(4, 0),
+ [GENERIC_CHID] = GENMASK(9, 5),
+ [GENERIC_EE] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_hw_param_2_fmask[] = {
+ [IRAM_SIZE] = GENMASK(2, 0),
+ [NUM_CH_PER_EE] = GENMASK(7, 3),
+ [NUM_EV_PER_EE] = GENMASK(12, 8),
+ [GSI_CH_PEND_TRANSLATE] = BIT(13),
+ [GSI_CH_FULL_LOGIC] = BIT(14),
+ [GSI_USE_SDMA] = BIT(15),
+ [GSI_SDMA_N_INT] = GENMASK(18, 16),
+ [GSI_SDMA_MAX_BURST] = GENMASK(26, 19),
+ [GSI_SDMA_N_IOVEC] = GENMASK(29, 27),
+ [GSI_USE_RD_WR_ENG] = BIT(30),
+ [GSI_USE_INTER_EE] = BIT(31),
+};
+
+REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk,
+ 0x00012098 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk,
+ 0x0001209c + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr,
+ 0x000120a0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr,
+ 0x000120a4 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk,
+ 0x000120b8 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr,
+ 0x000120c0 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP);
+
+REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_intset_fmask[] = {
+ [INTYPE] = BIT(0)
+ /* Bits 1-31 reserved */
+};
+
+REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP);
+
+REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP);
+
+REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP);
+
+static const u32 reg_cntxt_scratch_0_fmask[] = {
+ [INTER_EE_RESULT] = GENMASK(2, 0),
+ /* Bits 3-4 reserved */
+ [GENERIC_EE_RESULT] = GENMASK(7, 5),
+ /* Bits 8-31 reserved */
+};
+
+REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP);
+
+static const struct reg *reg_array[] = {
+ [INTER_EE_SRC_CH_IRQ_MSK] = &reg_inter_ee_src_ch_irq_msk,
+ [INTER_EE_SRC_EV_CH_IRQ_MSK] = &reg_inter_ee_src_ev_ch_irq_msk,
+ [CH_C_CNTXT_0] = &reg_ch_c_cntxt_0,
+ [CH_C_CNTXT_1] = &reg_ch_c_cntxt_1,
+ [CH_C_CNTXT_2] = &reg_ch_c_cntxt_2,
+ [CH_C_CNTXT_3] = &reg_ch_c_cntxt_3,
+ [CH_C_QOS] = &reg_ch_c_qos,
+ [CH_C_SCRATCH_0] = &reg_ch_c_scratch_0,
+ [CH_C_SCRATCH_1] = &reg_ch_c_scratch_1,
+ [CH_C_SCRATCH_2] = &reg_ch_c_scratch_2,
+ [CH_C_SCRATCH_3] = &reg_ch_c_scratch_3,
+ [EV_CH_E_CNTXT_0] = &reg_ev_ch_e_cntxt_0,
+ [EV_CH_E_CNTXT_1] = &reg_ev_ch_e_cntxt_1,
+ [EV_CH_E_CNTXT_2] = &reg_ev_ch_e_cntxt_2,
+ [EV_CH_E_CNTXT_3] = &reg_ev_ch_e_cntxt_3,
+ [EV_CH_E_CNTXT_4] = &reg_ev_ch_e_cntxt_4,
+ [EV_CH_E_CNTXT_8] = &reg_ev_ch_e_cntxt_8,
+ [EV_CH_E_CNTXT_9] = &reg_ev_ch_e_cntxt_9,
+ [EV_CH_E_CNTXT_10] = &reg_ev_ch_e_cntxt_10,
+ [EV_CH_E_CNTXT_11] = &reg_ev_ch_e_cntxt_11,
+ [EV_CH_E_CNTXT_12] = &reg_ev_ch_e_cntxt_12,
+ [EV_CH_E_CNTXT_13] = &reg_ev_ch_e_cntxt_13,
+ [EV_CH_E_SCRATCH_0] = &reg_ev_ch_e_scratch_0,
+ [EV_CH_E_SCRATCH_1] = &reg_ev_ch_e_scratch_1,
+ [CH_C_DOORBELL_0] = &reg_ch_c_doorbell_0,
+ [EV_CH_E_DOORBELL_0] = &reg_ev_ch_e_doorbell_0,
+ [GSI_STATUS] = &reg_gsi_status,
+ [CH_CMD] = &reg_ch_cmd,
+ [EV_CH_CMD] = &reg_ev_ch_cmd,
+ [GENERIC_CMD] = &reg_generic_cmd,
+ [HW_PARAM_2] = &reg_hw_param_2,
+ [CNTXT_TYPE_IRQ] = &reg_cntxt_type_irq,
+ [CNTXT_TYPE_IRQ_MSK] = &reg_cntxt_type_irq_msk,
+ [CNTXT_SRC_CH_IRQ] = &reg_cntxt_src_ch_irq,
+ [CNTXT_SRC_EV_CH_IRQ] = &reg_cntxt_src_ev_ch_irq,
+ [CNTXT_SRC_CH_IRQ_MSK] = &reg_cntxt_src_ch_irq_msk,
+ [CNTXT_SRC_EV_CH_IRQ_MSK] = &reg_cntxt_src_ev_ch_irq_msk,
+ [CNTXT_SRC_CH_IRQ_CLR] = &reg_cntxt_src_ch_irq_clr,
+ [CNTXT_SRC_EV_CH_IRQ_CLR] = &reg_cntxt_src_ev_ch_irq_clr,
+ [CNTXT_SRC_IEOB_IRQ] = &reg_cntxt_src_ieob_irq,
+ [CNTXT_SRC_IEOB_IRQ_MSK] = &reg_cntxt_src_ieob_irq_msk,
+ [CNTXT_SRC_IEOB_IRQ_CLR] = &reg_cntxt_src_ieob_irq_clr,
+ [CNTXT_GLOB_IRQ_STTS] = &reg_cntxt_glob_irq_stts,
+ [CNTXT_GLOB_IRQ_EN] = &reg_cntxt_glob_irq_en,
+ [CNTXT_GLOB_IRQ_CLR] = &reg_cntxt_glob_irq_clr,
+ [CNTXT_GSI_IRQ_STTS] = &reg_cntxt_gsi_irq_stts,
+ [CNTXT_GSI_IRQ_EN] = &reg_cntxt_gsi_irq_en,
+ [CNTXT_GSI_IRQ_CLR] = &reg_cntxt_gsi_irq_clr,
+ [CNTXT_INTSET] = &reg_cntxt_intset,
+ [ERROR_LOG] = &reg_error_log,
+ [ERROR_LOG_CLR] = &reg_error_log_clr,
+ [CNTXT_SCRATCH_0] = &reg_cntxt_scratch_0,
+};
+
+const struct regs gsi_regs_v4_9 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
index 677ece3bce9e..648dbfe1fce3 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -16,9 +16,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -39,9 +39,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -52,31 +52,31 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -87,9 +87,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -100,121 +100,121 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
-IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_counter_cfg_fmask[] = {
+static const u32 reg_counter_cfg_fmask[] = {
[EOT_COAL_GRANULARITY] = GENMASK(3, 0),
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
- 0x00000408, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
- 0x0000040c, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
+ 0x0000040c, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
- 0x00000508, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(7, 0),
[X_MAX_LIM] = GENMASK(15, 8),
[Y_MIN_LIM] = GENMASK(23, 16),
[Y_MAX_LIM] = GENMASK(31, 24),
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
- 0x0000050c, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
+ 0x0000050c, 0x0020);
-static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+static const u32 reg_endp_init_ctrl_fmask[] = {
[ENDP_SUSPEND] = BIT(0),
[ENDP_DELAY] = BIT(1),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -223,16 +223,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -245,9 +245,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
/* Bits 29-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -257,12 +257,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 14-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -274,9 +274,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
@@ -289,25 +289,25 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
/* Entire register is a tick count */
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(31, 0),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -317,25 +317,24 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(2, 0),
/* Bits 3-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
@@ -343,9 +342,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -366,84 +365,84 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [IPA_BCR] = &ipa_reg_ipa_bcr,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [COUNTER_CFG] = &ipa_reg_counter_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
- [SRC_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_67_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
- [DST_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_67_rsrc_type,
- [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v3_1 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &reg_src_rsrc_grp_45_rsrc_type,
+ [SRC_RSRC_GRP_67_RSRC_TYPE] = &reg_src_rsrc_grp_67_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &reg_dst_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_67_RSRC_TYPE] = &reg_dst_rsrc_grp_67_rsrc_type,
+ [ENDP_INIT_CTRL] = &reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v3_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
index b9c6a50de243..78b1bf60cd02 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -16,9 +16,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -44,9 +44,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -57,31 +57,31 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -92,9 +92,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -105,42 +105,42 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c, 0x0004);
-IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_counter_cfg_fmask[] = {
+static const u32 reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
[TX0_PREFETCH_DISABLE] = BIT(0),
[TX1_PREFETCH_DISABLE] = BIT(1),
[PREFETCH_ALMOST_EMPTY_SIZE] = GENMASK(4, 2),
/* Bits 5-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -151,17 +151,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -172,10 +172,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -186,10 +186,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -200,10 +200,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -214,18 +214,18 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+static const u32 reg_endp_init_ctrl_fmask[] = {
[ENDP_SUSPEND] = BIT(0),
[ENDP_DELAY] = BIT(1),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -234,16 +234,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -256,9 +256,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
/* Bits 29-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -268,12 +268,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 14-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -285,9 +285,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
@@ -300,25 +300,25 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
/* Entire register is a tick count */
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(31, 0),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -328,25 +328,24 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
@@ -354,9 +353,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -377,83 +376,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [IPA_BCR] = &ipa_reg_ipa_bcr,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [COUNTER_CFG] = &ipa_reg_counter_cfg,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v3_5_1 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CTRL] = &reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v3_5_1 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
index 9a315130530d..29e71cce4a84 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.11.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -36,9 +36,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
[GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -73,9 +73,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
[DRBIP] = BIT(31),
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -86,24 +86,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -111,9 +111,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -124,9 +124,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -137,23 +137,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -166,9 +166,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 19-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -179,17 +179,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -199,26 +199,26 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
/* Bits 9-31 reserved */
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -229,10 +229,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -243,10 +243,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -257,10 +257,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -271,10 +271,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -283,16 +283,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -305,9 +305,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -321,12 +321,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -338,9 +338,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -355,27 +355,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -385,24 +385,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -410,9 +409,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -433,83 +432,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_11 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_11 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
index 7a95149f8ec7..bb7cf488144d 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.2.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -29,9 +29,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -65,9 +65,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -78,24 +78,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -103,9 +103,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -116,9 +116,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -129,33 +129,33 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+REG(IPA_BCR, ipa_bcr, 0x000001d0);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(16, 0),
/* Bits 17-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_counter_cfg_fmask[] = {
+static const u32 reg_counter_cfg_fmask[] = {
/* Bits 0-3 reserved */
[AGGR_GRANULARITY] = GENMASK(8, 4),
/* Bits 9-31 reserved */
};
-IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -169,9 +169,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 20-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -182,17 +182,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -203,10 +203,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -217,10 +217,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -231,10 +231,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -245,10 +245,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -257,16 +257,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -279,9 +279,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
/* Bits 29-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -291,12 +291,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 14-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
/* Bit 3 reserved */
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -308,9 +308,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(9, 5),
@@ -323,27 +323,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_BASE_VALUE] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_SCALE] = GENMASK(12, 8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -353,25 +353,24 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
[SEQ_REP_TYPE] = GENMASK(15, 8),
/* Bits 16-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-7 reserved */
@@ -380,80 +379,80 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [IPA_BCR] = &ipa_reg_ipa_bcr,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [COUNTER_CFG] = &ipa_reg_counter_cfg,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_2 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [IPA_BCR] = &reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [COUNTER_CFG] = &reg_counter_cfg,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_2 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
index 587eb8d4e00f..1c58f78851c2 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.5.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -30,9 +30,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -67,9 +67,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -80,24 +80,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -105,9 +105,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -118,9 +118,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -131,23 +131,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -159,9 +159,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 18-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -172,17 +172,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -192,25 +192,25 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -221,10 +221,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -235,10 +235,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -249,10 +249,10 @@ static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
- 0x00000408, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -263,10 +263,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -277,10 +277,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -291,10 +291,10 @@ static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
- 0x00000508, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -303,16 +303,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -325,9 +325,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -341,12 +341,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -357,9 +357,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -374,27 +374,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -404,24 +404,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(2, 0),
/* Bits 3-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -429,9 +428,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -452,85 +451,85 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_5 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &reg_src_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &reg_dst_rsrc_grp_45_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_5 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.7.c b/drivers/net/ipa/reg/ipa_reg-v4.7.c
index 21f8a58e59a0..731824fce1d4 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.7.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.7.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -30,9 +30,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -67,9 +67,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
[DRBIP] = BIT(31),
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -80,24 +80,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -105,9 +105,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -118,9 +118,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -131,23 +131,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -160,9 +160,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 19-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -173,17 +173,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -193,25 +193,25 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -222,10 +222,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -236,10 +236,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -250,10 +250,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -264,10 +264,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -276,16 +276,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -298,9 +298,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -314,12 +314,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -330,9 +330,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -347,27 +347,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -377,24 +377,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -402,9 +401,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -425,83 +424,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00003030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00003034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_7 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00003038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_7 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
index 1f67a03fe599..01f87b5290e0 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.9.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -7,7 +7,7 @@
#include "../ipa.h"
#include "../ipa_reg.h"
-static const u32 ipa_reg_comp_cfg_fmask[] = {
+static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
[GSI_SNOC_BYPASS_DIS] = BIT(1),
[GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
@@ -35,9 +35,9 @@ static const u32 ipa_reg_comp_cfg_fmask[] = {
[GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
};
-IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
-static const u32 ipa_reg_clkon_cfg_fmask[] = {
+static const u32 reg_clkon_cfg_fmask[] = {
[CLKON_RX] = BIT(0),
[CLKON_PROC] = BIT(1),
[TX_WRAPPER] = BIT(2),
@@ -72,9 +72,9 @@ static const u32 ipa_reg_clkon_cfg_fmask[] = {
[DRBIP] = BIT(31),
};
-IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
-static const u32 ipa_reg_route_fmask[] = {
+static const u32 reg_route_fmask[] = {
[ROUTE_DIS] = BIT(0),
[ROUTE_DEF_PIPE] = GENMASK(5, 1),
[ROUTE_DEF_HDR_TABLE] = BIT(6),
@@ -85,24 +85,24 @@ static const u32 ipa_reg_route_fmask[] = {
/* Bits 25-31 reserved */
};
-IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+REG_FIELDS(ROUTE, route, 0x00000048);
-static const u32 ipa_reg_shared_mem_size_fmask[] = {
+static const u32 reg_shared_mem_size_fmask[] = {
[MEM_SIZE] = GENMASK(15, 0),
[MEM_BADDR] = GENMASK(31, 16),
};
-IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
-static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+static const u32 reg_qsb_max_writes_fmask[] = {
[GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
[GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
/* Bits 8-31 reserved */
};
-IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
-static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+static const u32 reg_qsb_max_reads_fmask[] = {
[GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
[GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
/* Bits 8-15 reserved */
@@ -110,9 +110,9 @@ static const u32 ipa_reg_qsb_max_reads_fmask[] = {
[GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
};
-IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+static const u32 reg_filt_rout_hash_en_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -123,9 +123,9 @@ static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
[IPV6_FILTER_HASH] = BIT(4),
@@ -136,23 +136,23 @@ static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
/* Bits 13-31 reserved */
};
-IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
+REG_STRIDE(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4, 0x0004);
-static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+static const u32 reg_local_pkt_proc_cntxt_fmask[] = {
[IPA_BASE_ADDR] = GENMASK(17, 0),
/* Bits 18-31 reserved */
};
/* Offset must be a multiple of 8 */
-IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
+REG_STRIDE(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec, 0x0004);
-static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+static const u32 reg_ipa_tx_cfg_fmask[] = {
/* Bits 0-1 reserved */
[PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
[DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
@@ -165,9 +165,9 @@ static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
/* Bits 19-31 reserved */
};
-IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
-static const u32 ipa_reg_flavor_0_fmask[] = {
+static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(3, 0),
/* Bits 4-7 reserved */
[MAX_CONS_PIPES] = GENMASK(12, 8),
@@ -178,17 +178,17 @@ static const u32 ipa_reg_flavor_0_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
-static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+static const u32 reg_idle_indication_cfg_fmask[] = {
[ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
[CONST_NON_IDLE_ENABLE] = BIT(16),
/* Bits 17-31 reserved */
};
-IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
-static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+static const u32 reg_qtime_timestamp_cfg_fmask[] = {
[DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
/* Bits 5-6 reserved */
[DPL_TIMESTAMP_SEL] = BIT(7),
@@ -198,25 +198,25 @@ static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
/* Bits 21-31 reserved */
};
-IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
-static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+static const u32 reg_timers_xo_clk_div_cfg_fmask[] = {
[DIV_VALUE] = GENMASK(8, 0),
/* Bits 9-30 reserved */
[DIV_ENABLE] = BIT(31),
};
-IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
-static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+static const u32 reg_timers_pulse_gran_cfg_fmask[] = {
[PULSE_GRAN_0] = GENMASK(2, 0),
[PULSE_GRAN_1] = GENMASK(5, 3),
[PULSE_GRAN_2] = GENMASK(8, 6),
};
-IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
-static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -227,10 +227,10 @@ static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
- 0x00000400, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
-static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -241,10 +241,10 @@ static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
- 0x00000404, 0x0020);
+REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -255,10 +255,10 @@ static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
- 0x00000500, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
-static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+static const u32 reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
[X_MIN_LIM] = GENMASK(5, 0),
/* Bits 6-7 reserved */
[X_MAX_LIM] = GENMASK(13, 8),
@@ -269,10 +269,10 @@ static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
/* Bits 30-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
- 0x00000504, 0x0020);
+REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
-static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+static const u32 reg_endp_init_cfg_fmask[] = {
[FRAG_OFFLOAD_EN] = BIT(0),
[CS_OFFLOAD_EN] = GENMASK(2, 1),
[CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
@@ -281,16 +281,16 @@ static const u32 ipa_reg_endp_init_cfg_fmask[] = {
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
-static const u32 ipa_reg_endp_init_nat_fmask[] = {
+static const u32 reg_endp_init_nat_fmask[] = {
[NAT_EN] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+static const u32 reg_endp_init_hdr_fmask[] = {
[HDR_LEN] = GENMASK(5, 0),
[HDR_OFST_METADATA_VALID] = BIT(6),
[HDR_OFST_METADATA] = GENMASK(12, 7),
@@ -302,9 +302,9 @@ static const u32 ipa_reg_endp_init_hdr_fmask[] = {
[HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
-static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+static const u32 reg_endp_init_hdr_ext_fmask[] = {
[HDR_ENDIANNESS] = BIT(0),
[HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
[HDR_TOTAL_LEN_OR_PAD] = BIT(2),
@@ -318,12 +318,12 @@ static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
/* Bits 22-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
-IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
- 0x00000818, 0x0070);
+REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
-static const u32 ipa_reg_endp_init_mode_fmask[] = {
+static const u32 reg_endp_init_mode_fmask[] = {
[ENDP_MODE] = GENMASK(2, 0),
[DCPH_ENABLE] = BIT(3),
[DEST_PIPE_INDEX] = GENMASK(8, 4),
@@ -335,9 +335,9 @@ static const u32 ipa_reg_endp_init_mode_fmask[] = {
/* Bit 31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
-static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+static const u32 reg_endp_init_aggr_fmask[] = {
[AGGR_EN] = GENMASK(1, 0),
[AGGR_TYPE] = GENMASK(4, 2),
[BYTE_LIMIT] = GENMASK(10, 5),
@@ -352,27 +352,27 @@ static const u32 ipa_reg_endp_init_aggr_fmask[] = {
/* Bits 28-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+static const u32 reg_endp_init_hol_block_en_fmask[] = {
[HOL_BLOCK_EN] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
- 0x0000082c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
-static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+static const u32 reg_endp_init_hol_block_timer_fmask[] = {
[TIMER_LIMIT] = GENMASK(4, 0),
/* Bits 5-7 reserved */
[TIMER_GRAN_SEL] = BIT(8),
/* Bits 9-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
- 0x00000830, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
-static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+static const u32 reg_endp_init_deaggr_fmask[] = {
[DEAGGR_HDR_LEN] = GENMASK(5, 0),
[SYSPIPE_ERR_DETECTION] = BIT(6),
[PACKET_OFFSET_VALID] = BIT(7),
@@ -382,24 +382,23 @@ static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
[MAX_PACKET_LEN] = GENMASK(31, 16),
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
-static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+static const u32 reg_endp_init_rsrc_grp_fmask[] = {
[ENDP_RSRC_GRP] = GENMASK(1, 0),
/* Bits 2-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
- 0x00000838, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp, 0x00000838, 0x0070);
-static const u32 ipa_reg_endp_init_seq_fmask[] = {
+static const u32 reg_endp_init_seq_fmask[] = {
[SEQ_TYPE] = GENMASK(7, 0),
/* Bits 8-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
-static const u32 ipa_reg_endp_status_fmask[] = {
+static const u32 reg_endp_status_fmask[] = {
[STATUS_EN] = BIT(0),
[STATUS_ENDP] = GENMASK(5, 1),
/* Bits 6-8 reserved */
@@ -407,9 +406,9 @@ static const u32 ipa_reg_endp_status_fmask[] = {
/* Bits 10-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
-static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+static const u32 reg_endp_filter_router_hsh_cfg_fmask[] = {
[FILTER_HASH_MSK_SRC_ID] = BIT(0),
[FILTER_HASH_MSK_SRC_IP] = BIT(1),
[FILTER_HASH_MSK_DST_IP] = BIT(2),
@@ -430,83 +429,83 @@ static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
/* Bits 23-31 reserved */
};
-IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
- 0x0000085c, 0x0070);
+REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
-IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
-static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+static const u32 reg_ipa_irq_uc_fmask[] = {
[UC_INTR] = BIT(0),
/* Bits 1-31 reserved */
};
-IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
- 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_INFO, irq_suspend_info,
+ 0x00004030 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
- 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
+REG_STRIDE(IRQ_SUSPEND_EN, irq_suspend_en,
+ 0x00004034 + 0x1000 * GSI_EE_AP, 0x0004);
/* Valid bits defined by ipa->available */
-IPA_REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
- 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
-
-static const struct ipa_reg *ipa_reg_array[] = {
- [COMP_CFG] = &ipa_reg_comp_cfg,
- [CLKON_CFG] = &ipa_reg_clkon_cfg,
- [ROUTE] = &ipa_reg_route,
- [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
- [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
- [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
- [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
- [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
- [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
- [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
- [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
- [FLAVOR_0] = &ipa_reg_flavor_0,
- [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
- [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
- [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
- [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
- [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
- [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
- [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
- [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
- [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
- [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
- [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
- [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
- [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
- [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
- [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
- [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
- [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
- [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
- [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
- [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
- [ENDP_STATUS] = &ipa_reg_endp_status,
- [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
- [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
- [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
- [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
- [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
- [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
- [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
- [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
-};
-
-const struct ipa_regs ipa_regs_v4_9 = {
- .reg_count = ARRAY_SIZE(ipa_reg_array),
- .reg = ipa_reg_array,
+REG_STRIDE(IRQ_SUSPEND_CLR, irq_suspend_clr,
+ 0x00004038 + 0x1000 * GSI_EE_AP, 0x0004);
+
+static const struct reg *reg_array[] = {
+ [COMP_CFG] = &reg_comp_cfg,
+ [CLKON_CFG] = &reg_clkon_cfg,
+ [ROUTE] = &reg_route,
+ [SHARED_MEM_SIZE] = &reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &reg_qsb_max_writes,
+ [QSB_MAX_READS] = &reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &reg_aggr_force_close,
+ [IPA_TX_CFG] = &reg_ipa_tx_cfg,
+ [FLAVOR_0] = &reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &reg_endp_init_seq,
+ [ENDP_STATUS] = &reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &reg_irq_suspend_clr,
+};
+
+const struct regs ipa_regs_v4_9 = {
+ .reg_count = ARRAY_SIZE(reg_array),
+ .reg = reg_array,
};
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index bb1c298c1e78..460b3d4f2245 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -157,7 +157,7 @@ void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
return NULL;
ip4h = ip_hdr(skb);
- pktlen = ntohs(ip4h->tot_len);
+ pktlen = skb_ip_totlen(skb);
if (ip4h->ihl < 5 || ip4h->version != 4)
return NULL;
if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index bf8ac7a3ded7..25616247d7a5 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -528,9 +528,9 @@ static void count_tx(struct net_device *dev, int ret, int len)
}
}
-static void macsec_encrypt_done(struct crypto_async_request *base, int err)
+static void macsec_encrypt_done(void *data, int err)
{
- struct sk_buff *skb = base->data;
+ struct sk_buff *skb = data;
struct net_device *dev = skb->dev;
struct macsec_dev *macsec = macsec_priv(dev);
struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
@@ -835,9 +835,9 @@ static void count_rx(struct net_device *dev, int len)
u64_stats_update_end(&stats->syncp);
}
-static void macsec_decrypt_done(struct crypto_async_request *base, int err)
+static void macsec_decrypt_done(void *data, int err)
{
- struct sk_buff *skb = base->data;
+ struct sk_buff *skb = data;
struct net_device *dev = skb->dev;
struct macsec_dev *macsec = macsec_priv(dev);
struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
@@ -2583,16 +2583,56 @@ static bool macsec_is_configured(struct macsec_dev *macsec)
return false;
}
+static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload)
+{
+ enum macsec_offload prev_offload;
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+ struct macsec_dev *macsec;
+ int ret = 0;
+
+ macsec = macsec_priv(dev);
+
+ /* Check if the offloading mode is supported by the underlying layers */
+ if (offload != MACSEC_OFFLOAD_OFF &&
+ !macsec_check_offload(offload, macsec))
+ return -EOPNOTSUPP;
+
+ /* Check if the net device is busy. */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Check if the device already has rules configured: we do not support
+ * rules migration.
+ */
+ if (macsec_is_configured(macsec))
+ return -EBUSY;
+
+ prev_offload = macsec->offload;
+
+ ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
+ macsec, &ctx);
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ macsec->offload = offload;
+
+ ctx.secy = &macsec->secy;
+ ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx)
+ : macsec_offload(ops->mdo_add_secy, &ctx);
+ if (ret)
+ macsec->offload = prev_offload;
+
+ return ret;
+}
+
static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
- enum macsec_offload offload, prev_offload;
- int (*func)(struct macsec_context *ctx);
struct nlattr **attrs = info->attrs;
- struct net_device *dev;
- const struct macsec_ops *ops;
- struct macsec_context ctx;
+ enum macsec_offload offload;
struct macsec_dev *macsec;
+ struct net_device *dev;
int ret = 0;
if (!attrs[MACSEC_ATTR_IFINDEX])
@@ -2621,55 +2661,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
}
offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
- if (macsec->offload == offload)
- goto out;
-
- /* Check if the offloading mode is supported by the underlying layers */
- if (offload != MACSEC_OFFLOAD_OFF &&
- !macsec_check_offload(offload, macsec)) {
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- /* Check if the net device is busy. */
- if (netif_running(dev)) {
- ret = -EBUSY;
- goto out;
- }
-
- prev_offload = macsec->offload;
- macsec->offload = offload;
- /* Check if the device already has rules configured: we do not support
- * rules migration.
- */
- if (macsec_is_configured(macsec)) {
- ret = -EBUSY;
- goto rollback;
- }
-
- ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
- macsec, &ctx);
- if (!ops) {
- ret = -EOPNOTSUPP;
- goto rollback;
- }
-
- if (prev_offload == MACSEC_OFFLOAD_OFF)
- func = ops->mdo_add_secy;
- else
- func = ops->mdo_del_secy;
-
- ctx.secy = &macsec->secy;
- ret = macsec_offload(func, &ctx);
- if (ret)
- goto rollback;
-
- rtnl_unlock();
- return 0;
-
-rollback:
- macsec->offload = prev_offload;
+ if (macsec->offload != offload)
+ ret = macsec_update_offload(dev, offload);
out:
rtnl_unlock();
return ret;
@@ -3817,6 +3811,8 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct macsec_dev *macsec = macsec_priv(dev);
+ bool macsec_offload_state_change = false;
+ enum macsec_offload offload;
struct macsec_tx_sc tx_sc;
struct macsec_secy secy;
int ret;
@@ -3840,8 +3836,18 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
if (ret)
goto cleanup;
+ if (data[IFLA_MACSEC_OFFLOAD]) {
+ offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]);
+ if (macsec->offload != offload) {
+ macsec_offload_state_change = true;
+ ret = macsec_update_offload(dev, offload);
+ if (ret)
+ goto cleanup;
+ }
+ }
+
/* If h/w offloading is available, propagate to the device */
- if (macsec_is_offloaded(macsec)) {
+ if (!macsec_offload_state_change && macsec_is_offloaded(macsec)) {
const struct macsec_ops *ops;
struct macsec_context ctx;
@@ -4240,16 +4246,22 @@ static size_t macsec_get_size(const struct net_device *dev)
nla_total_size(1) + /* IFLA_MACSEC_SCB */
nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
+ nla_total_size(1) + /* IFLA_MACSEC_OFFLOAD */
0;
}
static int macsec_fill_info(struct sk_buff *skb,
const struct net_device *dev)
{
- struct macsec_secy *secy = &macsec_priv(dev)->secy;
- struct macsec_tx_sc *tx_sc = &secy->tx_sc;
+ struct macsec_tx_sc *tx_sc;
+ struct macsec_dev *macsec;
+ struct macsec_secy *secy;
u64 csid;
+ macsec = macsec_priv(dev);
+ secy = &macsec->secy;
+ tx_sc = &secy->tx_sc;
+
switch (secy->key_len) {
case MACSEC_GCM_AES_128_SAK_LEN:
csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
@@ -4274,6 +4286,7 @@ static int macsec_fill_info(struct sk_buff *skb,
nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
+ nla_put_u8(skb, IFLA_MACSEC_OFFLOAD, macsec->offload) ||
0)
goto nla_put_failure;
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
index bfa16826a6e1..90309980686e 100644
--- a/drivers/net/mdio/Kconfig
+++ b/drivers/net/mdio/Kconfig
@@ -215,6 +215,17 @@ config MDIO_BUS_MUX_MESON_G12A
the amlogic g12a SoC. The multiplexers connects either the external
or the internal MDIO bus to the parent bus.
+config MDIO_BUS_MUX_MESON_GXL
+ tristate "Amlogic GXL based MDIO bus multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
+ select MDIO_BUS_MUX
+ default m if ARCH_MESON
+ help
+ This module provides a driver for the MDIO multiplexer/glue of
+ the amlogic GXL SoC. The multiplexer connects either the external
+ or the internal MDIO bus to the parent bus.
+
config MDIO_BUS_MUX_BCM6368
tristate "Broadcom BCM6368 MDIO bus multiplexers"
depends on OF && OF_MDIO && (BMIPS_GENERIC || COMPILE_TEST)
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
index 15f8dc4042ce..7d4cb4c11e4e 100644
--- a/drivers/net/mdio/Makefile
+++ b/drivers/net/mdio/Makefile
@@ -28,5 +28,6 @@ obj-$(CONFIG_MDIO_BUS_MUX_BCM6368) += mdio-mux-bcm6368.o
obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
+obj-$(CONFIG_MDIO_BUS_MUX_MESON_GXL) += mdio-mux-meson-gxl.o
obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index b782c35c4ac1..1183ef5e203e 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -115,7 +115,7 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
struct mii_timestamper *mii_ts = NULL;
struct pse_control *psec = NULL;
struct phy_device *phy;
- bool is_c45 = false;
+ bool is_c45;
u32 phy_id;
int rc;
@@ -129,11 +129,7 @@ int fwnode_mdiobus_register_phy(struct mii_bus *bus,
goto clean_pse;
}
- rc = fwnode_property_match_string(child, "compatible",
- "ethernet-phy-ieee802.3-c45");
- if (rc >= 0)
- is_c45 = true;
-
+ is_c45 = fwnode_device_is_compatible(child, "ethernet-phy-ieee802.3-c45");
if (is_c45 || fwnode_get_phy_id(child, &phy_id))
phy = get_phy_device(bus, addr, is_c45);
else
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index 944d005d2bd1..c727103c8b05 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -104,61 +104,36 @@ static int aspeed_mdio_write_c22(struct mii_bus *bus, int addr, int regnum,
addr, regnum, val);
}
-static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int regnum)
+static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int devad,
+ int regnum)
{
- u8 c45_dev = (regnum >> 16) & 0x1F;
- u16 c45_addr = regnum & 0xFFFF;
int rc;
rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR,
- addr, c45_dev, c45_addr);
+ addr, devad, regnum);
if (rc < 0)
return rc;
rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_READ,
- addr, c45_dev, 0);
+ addr, devad, 0);
if (rc < 0)
return rc;
return aspeed_mdio_get_data(bus);
}
-static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int regnum,
- u16 val)
+static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int devad,
+ int regnum, u16 val)
{
- u8 c45_dev = (regnum >> 16) & 0x1F;
- u16 c45_addr = regnum & 0xFFFF;
int rc;
rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR,
- addr, c45_dev, c45_addr);
+ addr, devad, regnum);
if (rc < 0)
return rc;
return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_WRITE,
- addr, c45_dev, val);
-}
-
-static int aspeed_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d\n", __func__, addr,
- regnum);
-
- if (regnum & MII_ADDR_C45)
- return aspeed_mdio_read_c45(bus, addr, regnum);
-
- return aspeed_mdio_read_c22(bus, addr, regnum);
-}
-
-static int aspeed_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
-{
- dev_dbg(&bus->dev, "%s: addr: %d, regnum: %d, val: 0x%x\n",
- __func__, addr, regnum, val);
-
- if (regnum & MII_ADDR_C45)
- return aspeed_mdio_write_c45(bus, addr, regnum, val);
-
- return aspeed_mdio_write_c22(bus, addr, regnum, val);
+ addr, devad, val);
}
static int aspeed_mdio_probe(struct platform_device *pdev)
@@ -185,9 +160,10 @@ static int aspeed_mdio_probe(struct platform_device *pdev)
bus->name = DRV_NAME;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
bus->parent = &pdev->dev;
- bus->read = aspeed_mdio_read;
- bus->write = aspeed_mdio_write;
- bus->probe_capabilities = MDIOBUS_C22_C45;
+ bus->read = aspeed_mdio_read_c22;
+ bus->write = aspeed_mdio_write_c22;
+ bus->read_c45 = aspeed_mdio_read_c45;
+ bus->write_c45 = aspeed_mdio_write_c45;
rc = of_mdiobus_register(bus, pdev->dev.of_node);
if (rc) {
diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 07609114a26b..b83932562be2 100644
--- a/drivers/net/mdio/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
@@ -127,14 +127,12 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg)
/* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the
lower 16 bits of the 21 bit address. This transfer is done identically to a
- MDIO_WRITE except for a different code. To enable clause 45 mode or
- MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices
- can exist on the same bus. Normal devices should ignore the MDIO_ADDR
+ MDIO_WRITE except for a different code. Theoretically clause 45 and normal
+ devices can exist on the same bus. Normal devices should ignore the MDIO_ADDR
phase. */
-static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
+static void mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, int dev_addr,
+ int reg)
{
- unsigned int dev_addr = (addr >> 16) & 0x1F;
- unsigned int reg = addr & 0xFFFF;
mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr);
/* send the turnaround (10) */
@@ -145,21 +143,13 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
ctrl->ops->set_mdio_dir(ctrl, 0);
mdiobb_get_bit(ctrl);
-
- return dev_addr;
}
-int mdiobb_read(struct mii_bus *bus, int phy, int reg)
+static int mdiobb_read_common(struct mii_bus *bus, int phy)
{
struct mdiobb_ctrl *ctrl = bus->priv;
int ret, i;
- if (reg & MII_ADDR_C45) {
- reg = mdiobb_cmd_addr(ctrl, phy, reg);
- mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg);
- } else
- mdiobb_cmd(ctrl, ctrl->op_c22_read, phy, reg);
-
ctrl->ops->set_mdio_dir(ctrl, 0);
/* check the turnaround bit: the PHY should be driving it to zero, if this
@@ -180,17 +170,31 @@ int mdiobb_read(struct mii_bus *bus, int phy, int reg)
mdiobb_get_bit(ctrl);
return ret;
}
-EXPORT_SYMBOL(mdiobb_read);
-int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
+int mdiobb_read_c22(struct mii_bus *bus, int phy, int reg)
{
struct mdiobb_ctrl *ctrl = bus->priv;
- if (reg & MII_ADDR_C45) {
- reg = mdiobb_cmd_addr(ctrl, phy, reg);
- mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg);
- } else
- mdiobb_cmd(ctrl, ctrl->op_c22_write, phy, reg);
+ mdiobb_cmd(ctrl, ctrl->op_c22_read, phy, reg);
+
+ return mdiobb_read_common(bus, phy);
+}
+EXPORT_SYMBOL(mdiobb_read_c22);
+
+int mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, int reg)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+
+ mdiobb_cmd_addr(ctrl, phy, devad, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg);
+
+ return mdiobb_read_common(bus, phy);
+}
+EXPORT_SYMBOL(mdiobb_read_c45);
+
+static int mdiobb_write_common(struct mii_bus *bus, u16 val)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
/* send the turnaround (10) */
mdiobb_send_bit(ctrl, 1);
@@ -202,7 +206,27 @@ int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
mdiobb_get_bit(ctrl);
return 0;
}
-EXPORT_SYMBOL(mdiobb_write);
+
+int mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+
+ mdiobb_cmd(ctrl, ctrl->op_c22_write, phy, reg);
+
+ return mdiobb_write_common(bus, val);
+}
+EXPORT_SYMBOL(mdiobb_write_c22);
+
+int mdiobb_write_c45(struct mii_bus *bus, int phy, int devad, int reg, u16 val)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+
+ mdiobb_cmd_addr(ctrl, phy, devad, reg);
+ mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg);
+
+ return mdiobb_write_common(bus, val);
+}
+EXPORT_SYMBOL(mdiobb_write_c45);
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
@@ -214,8 +238,11 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
__module_get(ctrl->ops->owner);
- bus->read = mdiobb_read;
- bus->write = mdiobb_write;
+ bus->read = mdiobb_read_c22;
+ bus->write = mdiobb_write_c22;
+ bus->read_c45 = mdiobb_read_c45;
+ bus->write_c45 = mdiobb_write_c45;
+
bus->priv = ctrl;
if (!ctrl->override_op_c22) {
ctrl->op_c22_read = MDIO_READ;
diff --git a/drivers/net/mdio/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c
index 95ce274c1be1..100e46a702ee 100644
--- a/drivers/net/mdio/mdio-cavium.c
+++ b/drivers/net/mdio/mdio-cavium.c
@@ -26,7 +26,7 @@ static void cavium_mdiobus_set_mode(struct cavium_mdiobus *p,
}
static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p,
- int phy_id, int regnum)
+ int phy_id, int devad, int regnum)
{
union cvmx_smix_cmd smi_cmd;
union cvmx_smix_wr_dat smi_wr;
@@ -38,12 +38,10 @@ static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p,
smi_wr.s.dat = regnum & 0xffff;
oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
- regnum = (regnum >> 16) & 0x1f;
-
smi_cmd.u64 = 0;
smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = regnum;
+ smi_cmd.s.reg_adr = devad;
oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
do {
@@ -59,28 +57,51 @@ static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p,
return 0;
}
-int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
+int cavium_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int regnum)
{
struct cavium_mdiobus *p = bus->priv;
union cvmx_smix_cmd smi_cmd;
union cvmx_smix_rd_dat smi_rd;
- unsigned int op = 1; /* MDIO_CLAUSE_22_READ */
int timeout = 1000;
- if (regnum & MII_ADDR_C45) {
- int r = cavium_mdiobus_c45_addr(p, phy_id, regnum);
+ cavium_mdiobus_set_mode(p, C22);
+
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = regnum;
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
+
+ do {
+ /* Wait 1000 clocks so we don't saturate the RSL bus
+ * doing reads.
+ */
+ __delay(1000);
+ smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT);
+ } while (smi_rd.s.pending && --timeout);
+
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ return -EIO;
+}
+EXPORT_SYMBOL(cavium_mdiobus_read_c22);
- if (r < 0)
- return r;
+int cavium_mdiobus_read_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum)
+{
+ struct cavium_mdiobus *p = bus->priv;
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_rd_dat smi_rd;
+ int timeout = 1000;
+ int r;
- regnum = (regnum >> 16) & 0x1f;
- op = 3; /* MDIO_CLAUSE_45_READ */
- } else {
- cavium_mdiobus_set_mode(p, C22);
- }
+ r = cavium_mdiobus_c45_addr(p, phy_id, devad, regnum);
+ if (r < 0)
+ return r;
smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = op;
+ smi_cmd.s.phy_op = 3; /* MDIO_CLAUSE_45_READ */
smi_cmd.s.phy_adr = phy_id;
smi_cmd.s.reg_adr = regnum;
oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
@@ -98,36 +119,64 @@ int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
else
return -EIO;
}
-EXPORT_SYMBOL(cavium_mdiobus_read);
+EXPORT_SYMBOL(cavium_mdiobus_read_c45);
-int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val)
+int cavium_mdiobus_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 val)
{
struct cavium_mdiobus *p = bus->priv;
union cvmx_smix_cmd smi_cmd;
union cvmx_smix_wr_dat smi_wr;
- unsigned int op = 0; /* MDIO_CLAUSE_22_WRITE */
int timeout = 1000;
- if (regnum & MII_ADDR_C45) {
- int r = cavium_mdiobus_c45_addr(p, phy_id, regnum);
+ cavium_mdiobus_set_mode(p, C22);
- if (r < 0)
- return r;
+ smi_wr.u64 = 0;
+ smi_wr.s.dat = val;
+ oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
- regnum = (regnum >> 16) & 0x1f;
- op = 1; /* MDIO_CLAUSE_45_WRITE */
- } else {
- cavium_mdiobus_set_mode(p, C22);
- }
+ smi_cmd.u64 = 0;
+ smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = regnum;
+ oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
+
+ do {
+ /* Wait 1000 clocks so we don't saturate the RSL bus
+ * doing reads.
+ */
+ __delay(1000);
+ smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT);
+ } while (smi_wr.s.pending && --timeout);
+
+ if (timeout <= 0)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL(cavium_mdiobus_write_c22);
+
+int cavium_mdiobus_write_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum, u16 val)
+{
+ struct cavium_mdiobus *p = bus->priv;
+ union cvmx_smix_cmd smi_cmd;
+ union cvmx_smix_wr_dat smi_wr;
+ int timeout = 1000;
+ int r;
+
+ r = cavium_mdiobus_c45_addr(p, phy_id, devad, regnum);
+ if (r < 0)
+ return r;
smi_wr.u64 = 0;
smi_wr.s.dat = val;
oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT);
smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = op;
+ smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_45_WRITE */
smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = regnum;
+ smi_cmd.s.reg_adr = devad;
oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD);
do {
@@ -143,7 +192,7 @@ int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val)
return 0;
}
-EXPORT_SYMBOL(cavium_mdiobus_write);
+EXPORT_SYMBOL(cavium_mdiobus_write_c45);
MODULE_DESCRIPTION("Common code for OCTEON and Thunder MDIO bus drivers");
MODULE_AUTHOR("David Daney");
diff --git a/drivers/net/mdio/mdio-cavium.h b/drivers/net/mdio/mdio-cavium.h
index a2245d436f5d..71b8e20cd664 100644
--- a/drivers/net/mdio/mdio-cavium.h
+++ b/drivers/net/mdio/mdio-cavium.h
@@ -114,5 +114,10 @@ static inline u64 oct_mdio_readq(void __iomem *addr)
#define oct_mdio_readq(addr) readq(addr)
#endif
-int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum);
-int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val);
+int cavium_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int regnum);
+int cavium_mdiobus_write_c22(struct mii_bus *bus, int phy_id, int regnum,
+ u16 val);
+int cavium_mdiobus_read_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum);
+int cavium_mdiobus_write_c45(struct mii_bus *bus, int phy_id, int devad,
+ int regnum, u16 val);
diff --git a/drivers/net/mdio/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index bf8bf5e20faf..1e0c206d0f2e 100644
--- a/drivers/net/mdio/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -30,7 +30,8 @@ static unsigned int i2c_mii_phy_addr(int phy_id)
return phy_id + 0x40;
}
-static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg)
+static int i2c_mii_read_default_c45(struct mii_bus *bus, int phy_id, int devad,
+ int reg)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msgs[2];
@@ -41,8 +42,8 @@ static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg)
return 0xffff;
p = addr;
- if (reg & MII_ADDR_C45) {
- *p++ = 0x20 | ((reg >> 16) & 31);
+ if (devad >= 0) {
+ *p++ = 0x20 | devad;
*p++ = reg >> 8;
}
*p++ = reg;
@@ -64,8 +65,8 @@ static int i2c_mii_read_default(struct mii_bus *bus, int phy_id, int reg)
return data[0] << 8 | data[1];
}
-static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg,
- u16 val)
+static int i2c_mii_write_default_c45(struct mii_bus *bus, int phy_id,
+ int devad, int reg, u16 val)
{
struct i2c_adapter *i2c = bus->priv;
struct i2c_msg msg;
@@ -76,8 +77,8 @@ static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg,
return 0;
p = data;
- if (reg & MII_ADDR_C45) {
- *p++ = (reg >> 16) & 31;
+ if (devad >= 0) {
+ *p++ = devad;
*p++ = reg >> 8;
}
*p++ = reg;
@@ -94,6 +95,17 @@ static int i2c_mii_write_default(struct mii_bus *bus, int phy_id, int reg,
return ret < 0 ? ret : 0;
}
+static int i2c_mii_read_default_c22(struct mii_bus *bus, int phy_id, int reg)
+{
+ return i2c_mii_read_default_c45(bus, phy_id, -1, reg);
+}
+
+static int i2c_mii_write_default_c22(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
+{
+ return i2c_mii_write_default_c45(bus, phy_id, -1, reg, val);
+}
+
/* RollBall SFPs do not access internal PHY via I2C address 0x56, but
* instead via address 0x51, when SFP page is set to 0x03 and password to
* 0xffffffff.
@@ -285,9 +297,6 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
int bus_addr, ret;
u16 val;
- if (!(reg & MII_ADDR_C45))
- return -EOPNOTSUPP;
-
bus_addr = i2c_mii_phy_addr(phy_id);
if (bus_addr != ROLLBALL_PHY_I2C_ADDR)
return 0xffff;
@@ -319,9 +328,6 @@ static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
int bus_addr, ret;
u8 buf[6];
- if (!(reg & MII_ADDR_C45))
- return -EOPNOTSUPP;
-
bus_addr = i2c_mii_phy_addr(phy_id);
if (bus_addr != ROLLBALL_PHY_I2C_ADDR)
return 0;
@@ -403,8 +409,10 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
mii->write = i2c_mii_write_rollball;
break;
default:
- mii->read = i2c_mii_read_default;
- mii->write = i2c_mii_write_default;
+ mii->read = i2c_mii_read_default_c22;
+ mii->write = i2c_mii_write_default_c22;
+ mii->read_c45 = i2c_mii_read_default_c45;
+ mii->write_c45 = i2c_mii_write_default_c45;
break;
}
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 4eba5a91075c..78b93de636f5 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -53,7 +53,8 @@ static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
IPQ4019_MDIO_SLEEP, IPQ4019_MDIO_TIMEOUT);
}
-static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+static int ipq4019_mdio_read_c45(struct mii_bus *bus, int mii_id, int mmd,
+ int reg)
{
struct ipq4019_mdio_data *priv = bus->priv;
unsigned int data;
@@ -62,61 +63,71 @@ static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- /* Clause 45 support */
- if (regnum & MII_ADDR_C45) {
- unsigned int mmd = (regnum >> 16) & 0x1F;
- unsigned int reg = regnum & 0xFFFF;
+ data = readl(priv->membase + MDIO_MODE_REG);
- /* Enter Clause 45 mode */
- data = readl(priv->membase + MDIO_MODE_REG);
+ data |= MDIO_MODE_C45;
- data |= MDIO_MODE_C45;
+ writel(data, priv->membase + MDIO_MODE_REG);
- writel(data, priv->membase + MDIO_MODE_REG);
+ /* issue the phy address and mmd */
+ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
- /* issue the phy address and mmd */
- writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
+ /* issue reg */
+ writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
- /* issue reg */
- writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
- } else {
- /* Enter Clause 22 mode */
- data = readl(priv->membase + MDIO_MODE_REG);
+ /* issue read command */
+ writel(cmd, priv->membase + MDIO_CMD_REG);
- data &= ~MDIO_MODE_C45;
+ /* Wait read complete */
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
- writel(data, priv->membase + MDIO_MODE_REG);
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_READ;
- /* issue the phy address and reg */
- writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+ writel(cmd, priv->membase + MDIO_CMD_REG);
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
- }
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
- /* issue read command */
- writel(cmd, priv->membase + MDIO_CMD_REG);
+ /* Read and return data */
+ return readl(priv->membase + MDIO_DATA_READ_REG);
+}
+
+static int ipq4019_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int data;
+ unsigned int cmd;
- /* Wait read complete */
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- if (regnum & MII_ADDR_C45) {
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_READ;
+ data = readl(priv->membase + MDIO_MODE_REG);
- writel(cmd, priv->membase + MDIO_CMD_REG);
+ data &= ~MDIO_MODE_C45;
- if (ipq4019_mdio_wait_busy(bus))
- return -ETIMEDOUT;
- }
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
+
+ /* issue read command */
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ /* Wait read complete */
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
/* Read and return data */
return readl(priv->membase + MDIO_DATA_READ_REG);
}
-static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
+static int ipq4019_mdio_write_c45(struct mii_bus *bus, int mii_id, int mmd,
+ int reg, u16 value)
{
struct ipq4019_mdio_data *priv = bus->priv;
unsigned int data;
@@ -125,50 +136,63 @@ static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- /* Clause 45 support */
- if (regnum & MII_ADDR_C45) {
- unsigned int mmd = (regnum >> 16) & 0x1F;
- unsigned int reg = regnum & 0xFFFF;
+ data = readl(priv->membase + MDIO_MODE_REG);
- /* Enter Clause 45 mode */
- data = readl(priv->membase + MDIO_MODE_REG);
+ data |= MDIO_MODE_C45;
- data |= MDIO_MODE_C45;
+ writel(data, priv->membase + MDIO_MODE_REG);
- writel(data, priv->membase + MDIO_MODE_REG);
+ /* issue the phy address and mmd */
+ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
- /* issue the phy address and mmd */
- writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
+ /* issue reg */
+ writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
- /* issue reg */
- writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
+ writel(cmd, priv->membase + MDIO_CMD_REG);
- writel(cmd, priv->membase + MDIO_CMD_REG);
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
- if (ipq4019_mdio_wait_busy(bus))
- return -ETIMEDOUT;
- } else {
- /* Enter Clause 22 mode */
- data = readl(priv->membase + MDIO_MODE_REG);
+ /* issue write data */
+ writel(value, priv->membase + MDIO_DATA_WRITE_REG);
- data &= ~MDIO_MODE_C45;
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_WRITE;
+ writel(cmd, priv->membase + MDIO_CMD_REG);
- writel(data, priv->membase + MDIO_MODE_REG);
+ /* Wait write complete */
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
- /* issue the phy address and reg */
- writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
- }
+ return 0;
+}
+
+static int ipq4019_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int data;
+ unsigned int cmd;
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+
+ /* Enter Clause 22 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
+
+ data &= ~MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
/* issue write data */
writel(value, priv->membase + MDIO_DATA_WRITE_REG);
/* issue write command */
- if (regnum & MII_ADDR_C45)
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_WRITE;
- else
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
writel(cmd, priv->membase + MDIO_CMD_REG);
@@ -235,8 +259,10 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);
bus->name = "ipq4019_mdio";
- bus->read = ipq4019_mdio_read;
- bus->write = ipq4019_mdio_write;
+ bus->read = ipq4019_mdio_read_c22;
+ bus->write = ipq4019_mdio_write_c22;
+ bus->read_c45 = ipq4019_mdio_read_c45;
+ bus->write_c45 = ipq4019_mdio_write_c45;
bus->reset = ipq_mdio_reset;
bus->parent = &pdev->dev;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id);
diff --git a/drivers/net/mdio/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
index 37e0d8b6da07..fd9716960106 100644
--- a/drivers/net/mdio/mdio-ipq8064.c
+++ b/drivers/net/mdio/mdio-ipq8064.c
@@ -57,10 +57,6 @@ ipq8064_mdio_read(struct mii_bus *bus, int phy_addr, int reg_offset)
u32 ret_val;
int err;
- /* Reject clause 45 */
- if (reg_offset & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) |
((reg_offset << MII_REG_SHIFT) & MII_REG_MASK);
@@ -81,10 +77,6 @@ ipq8064_mdio_write(struct mii_bus *bus, int phy_addr, int reg_offset, u16 data)
u32 miiaddr = MII_WRITE | MII_BUSY | MII_CLKRANGE_250_300M;
struct ipq8064_mdio *priv = bus->priv;
- /* Reject clause 45 */
- if (reg_offset & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
regmap_write(priv->base, MII_DATA_REG_ADDR, data);
miiaddr |= ((phy_addr << MII_ADDR_SHIFT) & MII_ADDR_MASK) |
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 51f68daac152..c87e991d1a17 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -108,9 +108,6 @@ static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum)
u32 val;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = mscc_miim_wait_pending(bus);
if (ret)
goto out;
@@ -154,9 +151,6 @@ static int mscc_miim_write(struct mii_bus *bus, int mii_id,
struct mscc_miim_dev *miim = bus->priv;
int ret;
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
ret = mscc_miim_wait_pending(bus);
if (ret < 0)
goto out;
diff --git a/drivers/net/mdio/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index 014c0baedbd2..956d54846b62 100644
--- a/drivers/net/mdio/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
@@ -98,7 +98,7 @@ static int iproc_mdio_wait_for_idle(void __iomem *base, bool result)
* Return value: Successful Read operation returns read reg values and write
* operation returns 0. Failure operation returns negative error code.
*/
-static int start_miim_ops(void __iomem *base,
+static int start_miim_ops(void __iomem *base, bool c45,
u16 phyid, u32 reg, u16 val, u32 op)
{
u32 param;
@@ -112,7 +112,7 @@ static int start_miim_ops(void __iomem *base,
param = readl(base + MDIO_PARAM_OFFSET);
param |= phyid << MDIO_PARAM_PHY_ID;
param |= val << MDIO_PARAM_PHY_DATA;
- if (reg & MII_ADDR_C45)
+ if (c45)
param |= BIT(MDIO_PARAM_C45_SEL);
writel(param, base + MDIO_PARAM_OFFSET);
@@ -131,28 +131,58 @@ err:
return ret;
}
-static int iproc_mdiomux_read(struct mii_bus *bus, int phyid, int reg)
+static int iproc_mdiomux_read_c22(struct mii_bus *bus, int phyid, int reg)
{
struct iproc_mdiomux_desc *md = bus->priv;
int ret;
- ret = start_miim_ops(md->base, phyid, reg, 0, MDIO_CTRL_READ_OP);
+ ret = start_miim_ops(md->base, false, phyid, reg, 0, MDIO_CTRL_READ_OP);
if (ret < 0)
- dev_err(&bus->dev, "mdiomux read operation failed!!!");
+ dev_err(&bus->dev, "mdiomux c22 read operation failed!!!");
return ret;
}
-static int iproc_mdiomux_write(struct mii_bus *bus,
- int phyid, int reg, u16 val)
+static int iproc_mdiomux_read_c45(struct mii_bus *bus, int phyid, int devad,
+ int reg)
+{
+ struct iproc_mdiomux_desc *md = bus->priv;
+ int ret;
+
+ ret = start_miim_ops(md->base, true, phyid, reg | devad << 16, 0,
+ MDIO_CTRL_READ_OP);
+ if (ret < 0)
+ dev_err(&bus->dev, "mdiomux read c45 operation failed!!!");
+
+ return ret;
+}
+
+static int iproc_mdiomux_write_c22(struct mii_bus *bus,
+ int phyid, int reg, u16 val)
+{
+ struct iproc_mdiomux_desc *md = bus->priv;
+ int ret;
+
+ /* Write val at reg offset */
+ ret = start_miim_ops(md->base, false, phyid, reg, val,
+ MDIO_CTRL_WRITE_OP);
+ if (ret < 0)
+ dev_err(&bus->dev, "mdiomux write c22 operation failed!!!");
+
+ return ret;
+}
+
+static int iproc_mdiomux_write_c45(struct mii_bus *bus,
+ int phyid, int devad, int reg, u16 val)
{
struct iproc_mdiomux_desc *md = bus->priv;
int ret;
/* Write val at reg offset */
- ret = start_miim_ops(md->base, phyid, reg, val, MDIO_CTRL_WRITE_OP);
+ ret = start_miim_ops(md->base, true, phyid, reg | devad << 16, val,
+ MDIO_CTRL_WRITE_OP);
if (ret < 0)
- dev_err(&bus->dev, "mdiomux write operation failed!!!");
+ dev_err(&bus->dev, "mdiomux write c45 operation failed!!!");
return ret;
}
@@ -223,8 +253,10 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
bus->name = "iProc MDIO mux bus";
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
bus->parent = &pdev->dev;
- bus->read = iproc_mdiomux_read;
- bus->write = iproc_mdiomux_write;
+ bus->read = iproc_mdiomux_read_c22;
+ bus->write = iproc_mdiomux_write_c22;
+ bus->read_c45 = iproc_mdiomux_read_c45;
+ bus->write_c45 = iproc_mdiomux_write_c45;
bus->phy_mask = ~0;
bus->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index c4542ecf5623..910e5cf74e89 100644
--- a/drivers/net/mdio/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
@@ -53,10 +53,8 @@
#define MESON_G12A_MDIO_INTERNAL_ID 1
struct g12a_mdio_mux {
- bool pll_is_enabled;
void __iomem *regs;
void *mux_handle;
- struct clk *pclk;
struct clk *pll;
};
@@ -155,14 +153,12 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv)
int ret;
/* Enable the phy clock */
- if (!priv->pll_is_enabled) {
+ if (!__clk_is_enabled(priv->pll)) {
ret = clk_prepare_enable(priv->pll);
if (ret)
return ret;
}
- priv->pll_is_enabled = true;
-
/* Initialize ephy control */
writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0);
@@ -193,10 +189,8 @@ static int g12a_enable_external_mdio(struct g12a_mdio_mux *priv)
writel_relaxed(0x0, priv->regs + ETH_PHY_CNTL2);
/* Disable the phy clock if enabled */
- if (priv->pll_is_enabled) {
+ if (__clk_is_enabled(priv->pll))
clk_disable_unprepare(priv->pll);
- priv->pll_is_enabled = false;
- }
return 0;
}
@@ -311,6 +305,7 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct g12a_mdio_mux *priv;
+ struct clk *pclk;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -323,34 +318,21 @@ static int g12a_mdio_mux_probe(struct platform_device *pdev)
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
- priv->pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(priv->pclk))
- return dev_err_probe(dev, PTR_ERR(priv->pclk),
+ pclk = devm_clk_get_enabled(dev, "pclk");
+ if (IS_ERR(pclk))
+ return dev_err_probe(dev, PTR_ERR(pclk),
"failed to get peripheral clock\n");
- /* Make sure the device registers are clocked */
- ret = clk_prepare_enable(priv->pclk);
- if (ret) {
- dev_err(dev, "failed to enable peripheral clock");
- return ret;
- }
-
/* Register PLL in CCF */
ret = g12a_ephy_glue_clk_register(dev);
if (ret)
- goto err;
+ return ret;
ret = mdio_mux_init(dev, dev->of_node, g12a_mdio_switch_fn,
&priv->mux_handle, dev, NULL);
- if (ret) {
+ if (ret)
dev_err_probe(dev, ret, "mdio multiplexer init failed\n");
- goto err;
- }
- return 0;
-
-err:
- clk_disable_unprepare(priv->pclk);
return ret;
}
@@ -360,11 +342,9 @@ static int g12a_mdio_mux_remove(struct platform_device *pdev)
mdio_mux_uninit(priv->mux_handle);
- if (priv->pll_is_enabled)
+ if (__clk_is_enabled(priv->pll))
clk_disable_unprepare(priv->pll);
- clk_disable_unprepare(priv->pclk);
-
return 0;
}
diff --git a/drivers/net/mdio/mdio-mux-meson-gxl.c b/drivers/net/mdio/mdio-mux-meson-gxl.c
new file mode 100644
index 000000000000..76188575ca1f
--- /dev/null
+++ b/drivers/net/mdio/mdio-mux-meson-gxl.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Baylibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mdio-mux.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#define ETH_REG2 0x0
+#define REG2_PHYID GENMASK(21, 0)
+#define EPHY_GXL_ID 0x110181
+#define REG2_LEDACT GENMASK(23, 22)
+#define REG2_LEDLINK GENMASK(25, 24)
+#define REG2_DIV4SEL BIT(27)
+#define REG2_ADCBYPASS BIT(30)
+#define REG2_CLKINSEL BIT(31)
+#define ETH_REG3 0x4
+#define REG3_ENH BIT(3)
+#define REG3_CFGMODE GENMASK(6, 4)
+#define REG3_AUTOMDIX BIT(7)
+#define REG3_PHYADDR GENMASK(12, 8)
+#define REG3_PWRUPRST BIT(21)
+#define REG3_PWRDOWN BIT(22)
+#define REG3_LEDPOL BIT(23)
+#define REG3_PHYMDI BIT(26)
+#define REG3_CLKINEN BIT(29)
+#define REG3_PHYIP BIT(30)
+#define REG3_PHYEN BIT(31)
+#define ETH_REG4 0x8
+#define REG4_PWRUPRSTSIG BIT(0)
+
+#define MESON_GXL_MDIO_EXTERNAL_ID 0
+#define MESON_GXL_MDIO_INTERNAL_ID 1
+
+struct gxl_mdio_mux {
+ void __iomem *regs;
+ void *mux_handle;
+};
+
+static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv)
+{
+ u32 val;
+
+ /* Setup the internal phy */
+ val = (REG3_ENH |
+ FIELD_PREP(REG3_CFGMODE, 0x7) |
+ REG3_AUTOMDIX |
+ FIELD_PREP(REG3_PHYADDR, 8) |
+ REG3_LEDPOL |
+ REG3_PHYMDI |
+ REG3_CLKINEN |
+ REG3_PHYIP);
+
+ writel(REG4_PWRUPRSTSIG, priv->regs + ETH_REG4);
+ writel(val, priv->regs + ETH_REG3);
+ mdelay(10);
+
+ /* NOTE: The HW kept the phy id configurable at runtime.
+ * The id below is arbitrary. It is the one used in the vendor code.
+ * The only constraint is that it must match the one in
+ * drivers/net/phy/meson-gxl.c to properly match the PHY.
+ */
+ writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID),
+ priv->regs + ETH_REG2);
+
+ /* Enable the internal phy */
+ val |= REG3_PHYEN;
+ writel(val, priv->regs + ETH_REG3);
+ writel(0, priv->regs + ETH_REG4);
+
+ /* The phy needs a bit of time to power up */
+ mdelay(10);
+}
+
+static void gxl_enable_external_mdio(struct gxl_mdio_mux *priv)
+{
+ /* Reset the mdio bus mux to the external phy */
+ writel(0, priv->regs + ETH_REG3);
+}
+
+static int gxl_mdio_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct gxl_mdio_mux *priv = dev_get_drvdata(data);
+
+ if (current_child == desired_child)
+ return 0;
+
+ switch (desired_child) {
+ case MESON_GXL_MDIO_EXTERNAL_ID:
+ gxl_enable_external_mdio(priv);
+ break;
+ case MESON_GXL_MDIO_INTERNAL_ID:
+ gxl_enable_internal_mdio(priv);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id gxl_mdio_mux_match[] = {
+ { .compatible = "amlogic,gxl-mdio-mux", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gxl_mdio_mux_match);
+
+static int gxl_mdio_mux_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct gxl_mdio_mux *priv;
+ struct clk *rclk;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ rclk = devm_clk_get_enabled(dev, "ref");
+ if (IS_ERR(rclk))
+ return dev_err_probe(dev, PTR_ERR(rclk),
+ "failed to get reference clock\n");
+
+ ret = mdio_mux_init(dev, dev->of_node, gxl_mdio_switch_fn,
+ &priv->mux_handle, dev, NULL);
+ if (ret)
+ dev_err_probe(dev, ret, "mdio multiplexer init failed\n");
+
+ return ret;
+}
+
+static int gxl_mdio_mux_remove(struct platform_device *pdev)
+{
+ struct gxl_mdio_mux *priv = platform_get_drvdata(pdev);
+
+ mdio_mux_uninit(priv->mux_handle);
+
+ return 0;
+}
+
+static struct platform_driver gxl_mdio_mux_driver = {
+ .probe = gxl_mdio_mux_probe,
+ .remove = gxl_mdio_mux_remove,
+ .driver = {
+ .name = "gxl-mdio-mux",
+ .of_match_table = gxl_mdio_mux_match,
+ },
+};
+module_platform_driver(gxl_mdio_mux_driver);
+
+MODULE_DESCRIPTION("Amlogic GXL MDIO multiplexer driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/mdio/mdio-mvusb.c b/drivers/net/mdio/mdio-mvusb.c
index d5eabddfdf51..68fc55906e78 100644
--- a/drivers/net/mdio/mdio-mvusb.c
+++ b/drivers/net/mdio/mdio-mvusb.c
@@ -34,9 +34,6 @@ static int mvusb_mdio_read(struct mii_bus *mdio, int dev, int reg)
struct mvusb_mdio *mvusb = mdio->priv;
int err, alen;
- if (dev & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
mvusb->buf[MVUSB_CMD_ADDR] = cpu_to_le16(0xa400 | (dev << 5) | reg);
err = usb_bulk_msg(mvusb->udev, usb_sndbulkpipe(mvusb->udev, 2),
@@ -57,9 +54,6 @@ static int mvusb_mdio_write(struct mii_bus *mdio, int dev, int reg, u16 val)
struct mvusb_mdio *mvusb = mdio->priv;
int alen;
- if (dev & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
mvusb->buf[MVUSB_CMD_ADDR] = cpu_to_le16(0x8000 | (dev << 5) | reg);
mvusb->buf[MVUSB_CMD_VAL] = cpu_to_le16(val);
diff --git a/drivers/net/mdio/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index e096e68ac667..7c65c547d377 100644
--- a/drivers/net/mdio/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
@@ -58,8 +58,10 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%px", bus->register_base);
bus->mii_bus->parent = &pdev->dev;
- bus->mii_bus->read = cavium_mdiobus_read;
- bus->mii_bus->write = cavium_mdiobus_write;
+ bus->mii_bus->read = cavium_mdiobus_read_c22;
+ bus->mii_bus->write = cavium_mdiobus_write_c22;
+ bus->mii_bus->read_c45 = cavium_mdiobus_read_c45;
+ bus->mii_bus->write_c45 = cavium_mdiobus_write_c45;
platform_set_drvdata(pdev, bus);
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 822d2cdd2f35..3847ee92c109 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -93,8 +93,10 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
bus->mii_bus->name = KBUILD_MODNAME;
snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", r.start);
bus->mii_bus->parent = &pdev->dev;
- bus->mii_bus->read = cavium_mdiobus_read;
- bus->mii_bus->write = cavium_mdiobus_write;
+ bus->mii_bus->read = cavium_mdiobus_read_c22;
+ bus->mii_bus->write = cavium_mdiobus_write_c22;
+ bus->mii_bus->read_c45 = cavium_mdiobus_read_c45;
+ bus->mii_bus->write_c45 = cavium_mdiobus_write_c45;
err = of_mdiobus_register(bus->mii_bus, node);
if (err)
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
index 50854265864d..f60eb97e3a62 100644
--- a/drivers/net/netdevsim/bpf.c
+++ b/drivers/net/netdevsim/bpf.c
@@ -315,10 +315,6 @@ nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
return -EINVAL;
}
- if (!bpf_offload_dev_match(bpf->prog, ns->netdev)) {
- NSIM_EA(bpf->extack, "program bound to different dev");
- return -EINVAL;
- }
state = bpf->prog->aux->offload->dev_priv;
if (WARN_ON(strcmp(state->state, "xlated"))) {
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index b962fc8e1397..6045bece2654 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -527,13 +527,13 @@ static void nsim_devlink_set_params_init_values(struct nsim_dev *nsim_dev,
union devlink_param_value value;
value.vu32 = nsim_dev->max_macs;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- value);
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
value.vbool = nsim_dev->test1;
- devlink_param_driverinit_value_set(devlink,
- NSIM_DEVLINK_PARAM_ID_TEST1,
- value);
+ devl_param_driverinit_value_set(devlink,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+ value);
}
static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink)
@@ -542,14 +542,14 @@ static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink)
union devlink_param_value saved_value;
int err;
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
if (!err)
nsim_dev->max_macs = saved_value.vu32;
- err = devlink_param_driverinit_value_get(devlink,
- NSIM_DEVLINK_PARAM_ID_TEST1,
- &saved_value);
+ err = devl_param_driverinit_value_get(devlink,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+ &saved_value);
if (!err)
nsim_dev->test1 = saved_value.vbool;
}
@@ -1556,14 +1556,18 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
goto err_devlink_unlock;
}
- err = nsim_dev_resources_register(devlink);
+ err = devl_register(devlink);
if (err)
goto err_vfc_free;
- err = devlink_params_register(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
+ err = nsim_dev_resources_register(devlink);
if (err)
goto err_dl_unregister;
+
+ err = devl_params_register(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+ if (err)
+ goto err_resource_unregister;
nsim_devlink_set_params_init_values(nsim_dev, devlink);
err = nsim_dev_dummy_region_init(nsim_dev, devlink);
@@ -1605,9 +1609,7 @@ int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
goto err_hwstats_exit;
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- devlink_set_features(devlink, DEVLINK_F_RELOAD);
devl_unlock(devlink);
- devlink_register(devlink);
return 0;
err_hwstats_exit:
@@ -1627,10 +1629,12 @@ err_traps_exit:
err_dummy_region_exit:
nsim_dev_dummy_region_exit(nsim_dev);
err_params_unregister:
- devlink_params_unregister(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
-err_dl_unregister:
+ devl_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+err_resource_unregister:
devl_resources_unregister(devlink);
+err_dl_unregister:
+ devl_unregister(devlink);
err_vfc_free:
kfree(nsim_dev->vfconfigs);
err_devlink_unlock:
@@ -1668,15 +1672,15 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
struct devlink *devlink = priv_to_devlink(nsim_dev);
- devlink_unregister(devlink);
devl_lock(devlink);
nsim_dev_reload_destroy(nsim_dev);
nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev);
- devlink_params_unregister(devlink, nsim_devlink_params,
- ARRAY_SIZE(nsim_devlink_params));
+ devl_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
devl_resources_unregister(devlink);
+ devl_unregister(devlink);
kfree(nsim_dev->vfconfigs);
kfree(nsim_dev->fa_cookie);
devl_unlock(devlink);
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index aa77af4a68df..eb04ed715d2d 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -233,16 +233,16 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
int err;
health->empty_reporter =
- devlink_health_reporter_create(devlink,
- &nsim_dev_empty_reporter_ops,
- 0, health);
+ devl_health_reporter_create(devlink,
+ &nsim_dev_empty_reporter_ops,
+ 0, health);
if (IS_ERR(health->empty_reporter))
return PTR_ERR(health->empty_reporter);
health->dummy_reporter =
- devlink_health_reporter_create(devlink,
- &nsim_dev_dummy_reporter_ops,
- 0, health);
+ devl_health_reporter_create(devlink,
+ &nsim_dev_dummy_reporter_ops,
+ 0, health);
if (IS_ERR(health->dummy_reporter)) {
err = PTR_ERR(health->dummy_reporter);
goto err_empty_reporter_destroy;
@@ -266,9 +266,9 @@ int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
return 0;
err_dummy_reporter_destroy:
- devlink_health_reporter_destroy(health->dummy_reporter);
+ devl_health_reporter_destroy(health->dummy_reporter);
err_empty_reporter_destroy:
- devlink_health_reporter_destroy(health->empty_reporter);
+ devl_health_reporter_destroy(health->empty_reporter);
return err;
}
@@ -278,6 +278,6 @@ void nsim_dev_health_exit(struct nsim_dev *nsim_dev)
debugfs_remove_recursive(health->ddir);
kfree(health->recovered_break_msg);
- devlink_health_reporter_destroy(health->dummy_reporter);
- devlink_health_reporter_destroy(health->empty_reporter);
+ devl_health_reporter_destroy(health->dummy_reporter);
+ devl_health_reporter_destroy(health->empty_reporter);
}
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index b93baf5c8bee..f0d58092e7e9 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -125,7 +125,8 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
return 0;
}
-static int nsim_ipsec_add_sa(struct xfrm_state *xs)
+static int nsim_ipsec_add_sa(struct xfrm_state *xs,
+ struct netlink_ext_ack *extack)
{
struct nsim_ipsec *ipsec;
struct net_device *dev;
@@ -139,25 +140,24 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
ipsec = &ns->ipsec;
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
- netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
- xs->id.proto);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol for ipsec offload");
return -EINVAL;
}
if (xs->calg) {
- netdev_err(dev, "Compression offload not supported\n");
+ NL_SET_ERR_MSG_MOD(extack, "Compression offload not supported");
return -EINVAL;
}
if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
- netdev_err(dev, "Unsupported ipsec offload type\n");
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ipsec offload type");
return -EINVAL;
}
/* find the first unused index */
ret = nsim_ipsec_find_empty_idx(ipsec);
if (ret < 0) {
- netdev_err(dev, "No space for SA in Rx table!\n");
+ NL_SET_ERR_MSG_MOD(extack, "No space for SA in Rx table!");
return ret;
}
sa_idx = (u16)ret;
@@ -172,7 +172,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
/* get the key and salt */
ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt);
if (ret) {
- netdev_err(dev, "Failed to get key data for SA table\n");
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get key data for SA table");
return ret;
}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 6db6a75ff9b9..35fa1ca98671 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -286,6 +286,7 @@ static void nsim_setup(struct net_device *dev)
NETIF_F_TSO;
dev->hw_features |= NETIF_F_HW_TC;
dev->max_mtu = ETH_MAX_MTU;
+ dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
}
static int nsim_init_netdevsim(struct netdevsim *ns)
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index 7d5fc7f54b2f..3903f3baba2b 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -10,9 +10,6 @@
#define SGMII_CLOCK_PERIOD_NS 8 /* PCS is clocked at 125 MHz */
#define LINK_TIMER_VAL(ns) ((u32)((ns) / SGMII_CLOCK_PERIOD_NS))
-#define SGMII_AN_LINK_TIMER_NS 1600000 /* defined by SGMII spec */
-#define IEEE8023_LINK_TIMER_NS 10000000
-
#define LINK_TIMER_LO 0x12
#define LINK_TIMER_HI 0x13
#define IF_MODE 0x14
@@ -126,26 +123,25 @@ static int lynx_pcs_config_giga(struct mdio_device *pcs, unsigned int mode,
phy_interface_t interface,
const unsigned long *advertising)
{
+ int link_timer_ns;
u32 link_timer;
u16 if_mode;
int err;
- if (interface == PHY_INTERFACE_MODE_1000BASEX) {
- link_timer = LINK_TIMER_VAL(IEEE8023_LINK_TIMER_NS);
+ link_timer_ns = phylink_get_link_timer_ns(interface);
+ if (link_timer_ns > 0) {
+ link_timer = LINK_TIMER_VAL(link_timer_ns);
+
mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff);
mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16);
+ }
+ if (interface == PHY_INTERFACE_MODE_1000BASEX) {
if_mode = 0;
} else {
if_mode = IF_MODE_SGMII_EN;
- if (mode == MLO_AN_INBAND) {
+ if (mode == MLO_AN_INBAND)
if_mode |= IF_MODE_USE_SGMII_AN;
-
- /* Adjust link timer for SGMII */
- link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS);
- mdiodev_write(pcs, LINK_TIMER_LO, link_timer & 0xffff);
- mdiodev_write(pcs, LINK_TIMER_HI, link_timer >> 16);
- }
}
err = mdiodev_modify(pcs, IF_MODE,
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index c1424119e821..323bec5e57f8 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -121,15 +121,11 @@ static const char *index_to_string[MIIC_MODCTRL_CONF_CONV_NUM] = {
* struct miic - MII converter structure
* @base: base address of the MII converter
* @dev: Device associated to the MII converter
- * @clks: Clocks used for this device
- * @nclk: Number of clocks
* @lock: Lock used for read-modify-write access
*/
struct miic {
void __iomem *base;
struct device *dev;
- struct clk_bulk_data *clks;
- int nclk;
spinlock_t lock;
};
@@ -232,7 +228,7 @@ static int miic_config(struct phylink_pcs *pcs, unsigned int mode,
}
miic_reg_rmw(miic, MIIC_CONVCTRL(port), mask, val);
- miic_converter_enable(miic_port->miic, miic_port->port, 1);
+ miic_converter_enable(miic, miic_port->port, 1);
return 0;
}
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index f6a038a1d51e..bc428a816719 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -199,9 +199,7 @@ int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val)
static int xpcs_modify_changed(struct dw_xpcs *xpcs, int dev, u32 reg,
u16 mask, u16 set)
{
- u32 reg_addr = mdiobus_c45_addr(dev, reg);
-
- return mdiodev_modify_changed(xpcs->mdiodev, reg_addr, mask, set);
+ return mdiodev_c45_modify_changed(xpcs->mdiodev, dev, reg, mask, set);
}
static int xpcs_read_vendor(struct dw_xpcs *xpcs, int dev, u32 reg)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 1327290decab..54874555c921 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -257,7 +257,7 @@ config MOTORCOMM_PHY
tristate "Motorcomm PHYs"
help
Enables support for Motorcomm network PHYs.
- Currently supports the YT8511, YT8521, YT8531S Gigabit Ethernet PHYs.
+ Currently supports YT85xx Gigabit Ethernet PHYs.
config NATIONAL_PHY
tristate "National Semiconductor PHYs"
@@ -277,6 +277,13 @@ config NXP_TJA11XX_PHY
help
Currently supports the NXP TJA1100 and TJA1101 PHY.
+config NCN26000_PHY
+ tristate "Onsemi 10BASE-T1S Ethernet PHY"
+ help
+ Adds support for the onsemi 10BASE-T1S Ethernet PHY.
+ Currently supports the NCN26000 10BASE-T1S Industrial PHY
+ with MII interface.
+
config AT803X_PHY
tristate "Qualcomm Atheros AR803X PHYs and QCA833x PHYs"
depends on REGULATOR
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index f7138d3c896b..b5138066ba04 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -77,6 +77,7 @@ obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o
obj-$(CONFIG_MICROSEMI_PHY) += mscc/
obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
+obj-$(CONFIG_NCN26000_PHY) += ncn26000.o
obj-$(CONFIG_NXP_C45_TJA11XX_PHY) += nxp-c45-tja11xx.o
obj-$(CONFIG_NXP_TJA11XX_PHY) += nxp-tja11xx.o
obj-$(CONFIG_QSEMI_PHY) += qsemi.o
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index a6f05e35d91f..b7cb71817780 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -233,7 +233,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
- if (!dp83822->fx_enabled)
+ /* Private data pointer is NULL on DP83825/26 */
+ if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
DP83822_DUP_MODE_CHANGE_INT_EN |
DP83822_SPEED_CHANGED_INT_EN;
@@ -253,7 +254,8 @@ static int dp83822_config_intr(struct phy_device *phydev)
DP83822_PAGE_RX_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
- if (!dp83822->fx_enabled)
+ /* Private data pointer is NULL on DP83825/26 */
+ if (!dp83822 || !dp83822->fx_enabled)
misr_status |= DP83822_ANEG_ERR_INT_EN |
DP83822_WOL_PKT_INT_EN;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 0d706ee266af..63a3644d86c9 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1467,7 +1467,7 @@ static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
/* According to the Marvell data sheet EEE must be disabled for
* Fast Link Down detection to work properly
*/
- ret = phy_ethtool_get_eee(phydev, &eee);
+ ret = genphy_c45_ethtool_get_eee(phydev, &eee);
if (!ret && eee.eee_enabled) {
phydev_warn(phydev, "Fast Link Down detection requires EEE to be disabled!\n");
return -EBUSY;
diff --git a/drivers/net/phy/mdio-open-alliance.h b/drivers/net/phy/mdio-open-alliance.h
new file mode 100644
index 000000000000..931e14660d75
--- /dev/null
+++ b/drivers/net/phy/mdio-open-alliance.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * mdio-open-alliance.h - definition of OPEN Alliance SIG standard registers
+ */
+
+#ifndef __MDIO_OPEN_ALLIANCE__
+#define __MDIO_OPEN_ALLIANCE__
+
+#include <linux/mdio.h>
+
+/* NOTE: all OATC14 registers are located in MDIO_MMD_VEND2 */
+
+/* Open Alliance TC14 (10BASE-T1S) registers */
+#define MDIO_OATC14_PLCA_IDVER 0xca00 /* PLCA ID and version */
+#define MDIO_OATC14_PLCA_CTRL0 0xca01 /* PLCA Control register 0 */
+#define MDIO_OATC14_PLCA_CTRL1 0xca02 /* PLCA Control register 1 */
+#define MDIO_OATC14_PLCA_STATUS 0xca03 /* PLCA Status register */
+#define MDIO_OATC14_PLCA_TOTMR 0xca04 /* PLCA TO Timer register */
+#define MDIO_OATC14_PLCA_BURST 0xca05 /* PLCA BURST mode register */
+
+/* Open Alliance TC14 PLCA IDVER register */
+#define MDIO_OATC14_PLCA_IDM 0xff00 /* PLCA MAP ID */
+#define MDIO_OATC14_PLCA_VER 0x00ff /* PLCA MAP version */
+
+/* Open Alliance TC14 PLCA CTRL0 register */
+#define MDIO_OATC14_PLCA_EN BIT(15) /* PLCA enable */
+#define MDIO_OATC14_PLCA_RST BIT(14) /* PLCA reset */
+
+/* Open Alliance TC14 PLCA CTRL1 register */
+#define MDIO_OATC14_PLCA_NCNT 0xff00 /* PLCA node count */
+#define MDIO_OATC14_PLCA_ID 0x00ff /* PLCA local node ID */
+
+/* Open Alliance TC14 PLCA STATUS register */
+#define MDIO_OATC14_PLCA_PST BIT(15) /* PLCA status indication */
+
+/* Open Alliance TC14 PLCA TOTMR register */
+#define MDIO_OATC14_PLCA_TOT 0x00ff
+
+/* Open Alliance TC14 PLCA BURST register */
+#define MDIO_OATC14_PLCA_MAXBC 0xff00
+#define MDIO_OATC14_PLCA_BTMR 0x00ff
+
+/* Version Identifiers */
+#define OATC14_IDM 0x0a00
+
+#endif /* __MDIO_OPEN_ALLIANCE__ */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 16e021b477f0..389f33a12534 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -19,6 +19,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/micrel_phy.h>
#include <linux/mii.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -108,9 +109,10 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
{
+ bool addr_valid = addr >= 0 && addr < ARRAY_SIZE(bus->mdio_map);
struct mdio_device *mdiodev;
- if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
+ if (WARN_ONCE(!addr_valid, "addr %d out of range\n", addr))
return NULL;
mdiodev = bus->mdio_map[addr];
@@ -511,6 +513,126 @@ static int mdiobus_create_device(struct mii_bus *bus,
return ret;
}
+static struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr, bool c45)
+{
+ struct phy_device *phydev = ERR_PTR(-ENODEV);
+ int err;
+
+ phydev = get_phy_device(bus, addr, c45);
+ if (IS_ERR(phydev))
+ return phydev;
+
+ /* For DT, see if the auto-probed phy has a corresponding child
+ * in the bus node, and set the of_node pointer in this case.
+ */
+ of_mdiobus_link_mdiodev(bus, &phydev->mdio);
+
+ err = phy_device_register(phydev);
+ if (err) {
+ phy_device_free(phydev);
+ return ERR_PTR(-ENODEV);
+ }
+
+ return phydev;
+}
+
+/**
+ * mdiobus_scan_c22 - scan one address on a bus for C22 MDIO devices.
+ * @bus: mii_bus to scan
+ * @addr: address on bus to scan
+ *
+ * This function scans one address on the MDIO bus, looking for
+ * devices which can be identified using a vendor/product ID in
+ * registers 2 and 3. Not all MDIO devices have such registers, but
+ * PHY devices typically do. Hence this function assumes anything
+ * found is a PHY, or can be treated as a PHY. Other MDIO devices,
+ * such as switches, will probably not be found during the scan.
+ */
+struct phy_device *mdiobus_scan_c22(struct mii_bus *bus, int addr)
+{
+ return mdiobus_scan(bus, addr, false);
+}
+EXPORT_SYMBOL(mdiobus_scan_c22);
+
+/**
+ * mdiobus_scan_c45 - scan one address on a bus for C45 MDIO devices.
+ * @bus: mii_bus to scan
+ * @addr: address on bus to scan
+ *
+ * This function scans one address on the MDIO bus, looking for
+ * devices which can be identified using a vendor/product ID in
+ * registers 2 and 3. Not all MDIO devices have such registers, but
+ * PHY devices typically do. Hence this function assumes anything
+ * found is a PHY, or can be treated as a PHY. Other MDIO devices,
+ * such as switches, will probably not be found during the scan.
+ */
+static struct phy_device *mdiobus_scan_c45(struct mii_bus *bus, int addr)
+{
+ return mdiobus_scan(bus, addr, true);
+}
+
+static int mdiobus_scan_bus_c22(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan_c22(bus, i);
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
+ return PTR_ERR(phydev);
+ }
+ }
+ return 0;
+}
+
+static int mdiobus_scan_bus_c45(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ if ((bus->phy_mask & BIT(i)) == 0) {
+ struct phy_device *phydev;
+
+ /* Don't scan C45 if we already have a C22 device */
+ if (bus->mdio_map[i])
+ continue;
+
+ phydev = mdiobus_scan_c45(bus, i);
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV))
+ return PTR_ERR(phydev);
+ }
+ }
+ return 0;
+}
+
+/* There are some C22 PHYs which do bad things when where is a C45
+ * transaction on the bus, like accepting a read themselves, and
+ * stomping over the true devices reply, to performing a write to
+ * themselves which was intended for another device. Now that C22
+ * devices have been found, see if any of them are bad for C45, and if we
+ * should skip the C45 scan.
+ */
+static bool mdiobus_prevent_c45_scan(struct mii_bus *bus)
+{
+ int i;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+ u32 oui;
+
+ phydev = mdiobus_get_phy(bus, i);
+ if (!phydev)
+ continue;
+ oui = phydev->phy_id >> 10;
+
+ if (oui == MICREL_OUI)
+ return true;
+ }
+ return false;
+}
+
/**
* __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
* @bus: target mii_bus
@@ -528,11 +650,19 @@ static int mdiobus_create_device(struct mii_bus *bus,
int __mdiobus_register(struct mii_bus *bus, struct module *owner)
{
struct mdio_device *mdiodev;
- int i, err;
struct gpio_desc *gpiod;
+ bool prevent_c45_scan;
+ int i, err;
+
+ if (!bus || !bus->name)
+ return -EINVAL;
- if (NULL == bus || NULL == bus->name ||
- NULL == bus->read || NULL == bus->write)
+ /* An access method always needs both read and write operations */
+ if (!!bus->read != !!bus->write || !!bus->read_c45 != !!bus->write_c45)
+ return -EINVAL;
+
+ /* At least one method is mandatory */
+ if (!bus->read && !bus->read_c45)
return -EINVAL;
if (bus->parent && bus->parent->of_node)
@@ -587,16 +717,18 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
goto error_reset_gpiod;
}
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- if ((bus->phy_mask & BIT(i)) == 0) {
- struct phy_device *phydev;
+ if (bus->read) {
+ err = mdiobus_scan_bus_c22(bus);
+ if (err)
+ goto error;
+ }
- phydev = mdiobus_scan(bus, i);
- if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) {
- err = PTR_ERR(phydev);
- goto error;
- }
- }
+ prevent_c45_scan = mdiobus_prevent_c45_scan(bus);
+
+ if (!prevent_c45_scan && bus->read_c45) {
+ err = mdiobus_scan_bus_c45(bus);
+ if (err)
+ goto error;
}
mdiobus_setup_mdiodev_from_board_info(bus, mdiobus_create_device);
@@ -606,7 +738,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
return 0;
error:
- while (--i >= 0) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
mdiodev = bus->mdio_map[i];
if (!mdiodev)
continue;
@@ -677,57 +809,6 @@ void mdiobus_free(struct mii_bus *bus)
}
EXPORT_SYMBOL(mdiobus_free);
-/**
- * mdiobus_scan - scan a bus for MDIO devices.
- * @bus: mii_bus to scan
- * @addr: address on bus to scan
- *
- * This function scans the MDIO bus, looking for devices which can be
- * identified using a vendor/product ID in registers 2 and 3. Not all
- * MDIO devices have such registers, but PHY devices typically
- * do. Hence this function assumes anything found is a PHY, or can be
- * treated as a PHY. Other MDIO devices, such as switches, will
- * probably not be found during the scan.
- */
-struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
-{
- struct phy_device *phydev = ERR_PTR(-ENODEV);
- int err;
-
- switch (bus->probe_capabilities) {
- case MDIOBUS_NO_CAP:
- case MDIOBUS_C22:
- phydev = get_phy_device(bus, addr, false);
- break;
- case MDIOBUS_C45:
- phydev = get_phy_device(bus, addr, true);
- break;
- case MDIOBUS_C22_C45:
- phydev = get_phy_device(bus, addr, false);
- if (IS_ERR(phydev))
- phydev = get_phy_device(bus, addr, true);
- break;
- }
-
- if (IS_ERR(phydev))
- return phydev;
-
- /*
- * For DT, see if the auto-probed phy has a correspoding child
- * in the bus node, and set the of_node pointer in this case.
- */
- of_mdiobus_link_mdiodev(bus, &phydev->mdio);
-
- err = phy_device_register(phydev);
- if (err) {
- phy_device_free(phydev);
- return ERR_PTR(-ENODEV);
- }
-
- return phydev;
-}
-EXPORT_SYMBOL(mdiobus_scan);
-
static void mdiobus_stats_acct(struct mdio_bus_stats *stats, bool op, int ret)
{
preempt_disable();
@@ -764,7 +845,10 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
- retval = bus->read(bus, addr, regnum);
+ if (bus->read)
+ retval = bus->read(bus, addr, regnum);
+ else
+ retval = -EOPNOTSUPP;
trace_mdio_access(bus, 1, addr, regnum, retval, retval);
mdiobus_stats_acct(&bus->stats[addr], true, retval);
@@ -790,7 +874,10 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
lockdep_assert_held_once(&bus->mdio_lock);
- err = bus->write(bus, addr, regnum, val);
+ if (bus->write)
+ err = bus->write(bus, addr, regnum, val);
+ else
+ err = -EOPNOTSUPP;
trace_mdio_access(bus, 0, addr, regnum, val, err);
mdiobus_stats_acct(&bus->stats[addr], false, err);
@@ -832,6 +919,99 @@ int __mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
EXPORT_SYMBOL_GPL(__mdiobus_modify_changed);
/**
+ * __mdiobus_c45_read - Unlocked version of the mdiobus_c45_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to read
+ *
+ * Read a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum)
+{
+ int retval;
+
+ lockdep_assert_held_once(&bus->mdio_lock);
+
+ if (bus->read_c45)
+ retval = bus->read_c45(bus, addr, devad, regnum);
+ else
+ retval = -EOPNOTSUPP;
+
+ trace_mdio_access(bus, 1, addr, regnum, retval, retval);
+ mdiobus_stats_acct(&bus->stats[addr], true, retval);
+
+ return retval;
+}
+EXPORT_SYMBOL(__mdiobus_c45_read);
+
+/**
+ * __mdiobus_c45_write - Unlocked version of the mdiobus_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * Write a MDIO bus register. Caller must hold the mdio bus lock.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val)
+{
+ int err;
+
+ lockdep_assert_held_once(&bus->mdio_lock);
+
+ if (bus->write_c45)
+ err = bus->write_c45(bus, addr, devad, regnum, val);
+ else
+ err = -EOPNOTSUPP;
+
+ trace_mdio_access(bus, 0, addr, regnum, val, err);
+ mdiobus_stats_acct(&bus->stats[addr], false, err);
+
+ return err;
+}
+EXPORT_SYMBOL(__mdiobus_c45_write);
+
+/**
+ * __mdiobus_c45_modify_changed - Unlocked version of the mdiobus_modify function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to modify
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ *
+ * Read, modify, and if any change, write the register value back to the
+ * device. Any error returns a negative number.
+ *
+ * NOTE: MUST NOT be called from interrupt context.
+ */
+static int __mdiobus_c45_modify_changed(struct mii_bus *bus, int addr,
+ int devad, u32 regnum, u16 mask,
+ u16 set)
+{
+ int new, ret;
+
+ ret = __mdiobus_c45_read(bus, addr, devad, regnum);
+ if (ret < 0)
+ return ret;
+
+ new = (ret & ~mask) | set;
+ if (new == ret)
+ return 0;
+
+ ret = __mdiobus_c45_write(bus, addr, devad, regnum, new);
+
+ return ret < 0 ? ret : 1;
+}
+
+/**
* mdiobus_read_nested - Nested version of the mdiobus_read function
* @bus: the mii_bus struct
* @addr: the phy address
@@ -879,6 +1059,56 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
EXPORT_SYMBOL(mdiobus_read);
/**
+ * mdiobus_c45_read - Convenience function for reading a given MII mgmt register
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to read
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum)
+{
+ int retval;
+
+ mutex_lock(&bus->mdio_lock);
+ retval = __mdiobus_c45_read(bus, addr, devad, regnum);
+ mutex_unlock(&bus->mdio_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(mdiobus_c45_read);
+
+/**
+ * mdiobus_c45_read_nested - Nested version of the mdiobus_c45_read function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to read
+ *
+ * In case of nested MDIO bus access avoid lockdep false positives by
+ * using mutex_lock_nested().
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_c45_read_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum)
+{
+ int retval;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ retval = __mdiobus_c45_read(bus, addr, devad, regnum);
+ mutex_unlock(&bus->mdio_lock);
+
+ return retval;
+}
+EXPORT_SYMBOL(mdiobus_c45_read_nested);
+
+/**
* mdiobus_write_nested - Nested version of the mdiobus_write function
* @bus: the mii_bus struct
* @addr: the phy address
@@ -928,6 +1158,59 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
EXPORT_SYMBOL(mdiobus_write);
/**
+ * mdiobus_c45_write - Convenience function for writing a given MII mgmt register
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 val)
+{
+ int err;
+
+ mutex_lock(&bus->mdio_lock);
+ err = __mdiobus_c45_write(bus, addr, devad, regnum, val);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(mdiobus_c45_write);
+
+/**
+ * mdiobus_c45_write_nested - Nested version of the mdiobus_c45_write function
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to write
+ * @val: value to write to @regnum
+ *
+ * In case of nested MDIO bus access avoid lockdep false positives by
+ * using mutex_lock_nested().
+ *
+ * NOTE: MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation.
+ */
+int mdiobus_c45_write_nested(struct mii_bus *bus, int addr, int devad,
+ u32 regnum, u16 val)
+{
+ int err;
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+ err = __mdiobus_c45_write(bus, addr, devad, regnum, val);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(mdiobus_c45_write_nested);
+
+/**
* mdiobus_modify - Convenience function for modifying a given mdio device
* register
* @bus: the mii_bus struct
@@ -949,6 +1232,30 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
EXPORT_SYMBOL_GPL(mdiobus_modify);
/**
+ * mdiobus_c45_modify - Convenience function for modifying a given mdio device
+ * register
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ */
+int mdiobus_c45_modify(struct mii_bus *bus, int addr, int devad, u32 regnum,
+ u16 mask, u16 set)
+{
+ int err;
+
+ mutex_lock(&bus->mdio_lock);
+ err = __mdiobus_c45_modify_changed(bus, addr, devad, regnum,
+ mask, set);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err < 0 ? err : 0;
+}
+EXPORT_SYMBOL_GPL(mdiobus_c45_modify);
+
+/**
* mdiobus_modify_changed - Convenience function for modifying a given mdio
* device register and returning if it changed
* @bus: the mii_bus struct
@@ -971,6 +1278,29 @@ int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
EXPORT_SYMBOL_GPL(mdiobus_modify_changed);
/**
+ * mdiobus_c45_modify_changed - Convenience function for modifying a given mdio
+ * device register and returning if it changed
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @devad: device address to read
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ */
+int mdiobus_c45_modify_changed(struct mii_bus *bus, int devad, int addr,
+ u32 regnum, u16 mask, u16 set)
+{
+ int err;
+
+ mutex_lock(&bus->mdio_lock);
+ err = __mdiobus_c45_modify_changed(bus, addr, devad, regnum, mask, set);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mdiobus_c45_modify_changed);
+
+/**
* mdio_bus_match - determine if given MDIO driver supports the given
* MDIO device
* @dev: target MDIO device
@@ -1000,7 +1330,7 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int mdio_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
int rc;
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index c49062ad72c6..a6015cd03bff 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -261,6 +261,8 @@ static struct phy_driver meson_gxl_phy[] = {
.handle_interrupt = meson_gxl_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
}, {
PHY_ID_MATCH_EXACT(0x01803301),
.name = "Meson G12A Internal PHY",
@@ -271,6 +273,8 @@ static struct phy_driver meson_gxl_phy[] = {
.handle_interrupt = meson_gxl_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
},
};
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 26ce0c5defcd..2c84fccef4f6 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -268,6 +268,9 @@ struct kszphy_type {
u16 interrupt_level_mask;
u16 cable_diag_reg;
unsigned long pair_mask;
+ u16 disable_dll_tx_bit;
+ u16 disable_dll_rx_bit;
+ u16 disable_dll_mask;
bool has_broadcast_disable;
bool has_nand_tree_disable;
bool has_rmii_ref_clk_sel;
@@ -310,6 +313,11 @@ struct kszphy_ptp_priv {
enum hwtstamp_rx_filters rx_filter;
int layer;
int version;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ /* Lock for ptp_clock */
+ struct mutex ptp_lock;
};
struct kszphy_priv {
@@ -364,6 +372,21 @@ static const struct kszphy_type ksz9021_type = {
.interrupt_level_mask = BIT(14),
};
+static const struct kszphy_type ksz9131_type = {
+ .interrupt_level_mask = BIT(14),
+ .disable_dll_tx_bit = BIT(12),
+ .disable_dll_rx_bit = BIT(12),
+ .disable_dll_mask = BIT_MASK(12),
+};
+
+static const struct kszphy_type lan8841_type = {
+ .disable_dll_tx_bit = BIT(14),
+ .disable_dll_rx_bit = BIT(14),
+ .disable_dll_mask = BIT_MASK(14),
+ .cable_diag_reg = LAN8814_CABLE_DIAG,
+ .pair_mask = LAN8814_WIRE_PAIR_MASK,
+};
+
static int kszphy_extended_write(struct phy_device *phydev,
u32 regnum, u16 val)
{
@@ -1172,19 +1195,18 @@ static int ksz9131_of_load_skew_values(struct phy_device *phydev,
#define KSZ9131RN_MMD_COMMON_CTRL_REG 2
#define KSZ9131RN_RXC_DLL_CTRL 76
#define KSZ9131RN_TXC_DLL_CTRL 77
-#define KSZ9131RN_DLL_CTRL_BYPASS BIT_MASK(12)
#define KSZ9131RN_DLL_ENABLE_DELAY 0
-#define KSZ9131RN_DLL_DISABLE_DELAY BIT(12)
static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
{
+ const struct kszphy_type *type = phydev->drv->driver_data;
u16 rxcdll_val, txcdll_val;
int ret;
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
- rxcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
- txcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ rxcdll_val = type->disable_dll_rx_bit;
+ txcdll_val = type->disable_dll_tx_bit;
break;
case PHY_INTERFACE_MODE_RGMII_ID:
rxcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
@@ -1192,10 +1214,10 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
break;
case PHY_INTERFACE_MODE_RGMII_RXID:
rxcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
- txcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ txcdll_val = type->disable_dll_tx_bit;
break;
case PHY_INTERFACE_MODE_RGMII_TXID:
- rxcdll_val = KSZ9131RN_DLL_DISABLE_DELAY;
+ rxcdll_val = type->disable_dll_rx_bit;
txcdll_val = KSZ9131RN_DLL_ENABLE_DELAY;
break;
default:
@@ -1203,13 +1225,13 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
}
ret = phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
- KSZ9131RN_RXC_DLL_CTRL, KSZ9131RN_DLL_CTRL_BYPASS,
+ KSZ9131RN_RXC_DLL_CTRL, type->disable_dll_mask,
rxcdll_val);
if (ret < 0)
return ret;
return phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
- KSZ9131RN_TXC_DLL_CTRL, KSZ9131RN_DLL_CTRL_BYPASS,
+ KSZ9131RN_TXC_DLL_CTRL, type->disable_dll_mask,
txcdll_val);
}
@@ -1370,6 +1392,26 @@ static int ksz9131_config_aneg(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int ksz9477_get_features(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_read_abilities(phydev);
+ if (ret)
+ return ret;
+
+ /* The "EEE control and capability 1" (Register 3.20) seems to be
+ * influenced by the "EEE advertisement 1" (Register 7.60). Changes
+ * on the 7.60 will affect 3.20. So, we need to construct our own list
+ * of caps.
+ * KSZ8563R should have 100BaseTX/Full only.
+ */
+ linkmode_and(phydev->supported_eee, phydev->supported,
+ PHY_EEE_CAP1_FEATURES);
+
+ return 0;
+}
+
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX BIT(6)
#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED BIT(4)
@@ -2088,7 +2130,8 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
const struct kszphy_type *type = phydev->drv->driver_data;
unsigned long pair_mask = type->pair_mask;
int retries = 20;
- int pair, ret;
+ int ret = 0;
+ int pair;
*finished = false;
@@ -2380,8 +2423,8 @@ static void lan8814_get_sig_rx(struct sk_buff *skb, u16 *sig)
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
}
-static bool lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv,
- struct sk_buff *skb)
+static bool lan8814_match_rx_skb(struct kszphy_ptp_priv *ptp_priv,
+ struct sk_buff *skb)
{
struct skb_shared_hwtstamps *shhwtstamps;
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
@@ -2430,7 +2473,7 @@ static bool lan8814_rxtstamp(struct mii_timestamper *mii_ts, struct sk_buff *skb
/* If we failed to match then add it to the queue for when the timestamp
* will come
*/
- if (!lan8814_match_rx_ts(ptp_priv, skb))
+ if (!lan8814_match_rx_skb(ptp_priv, skb))
skb_queue_tail(&ptp_priv->rx_queue, skb);
return true;
@@ -2680,18 +2723,14 @@ static void lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
*sig = (__force u16)(ntohs(ptp_header->sequence_id));
}
-static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv)
+static void lan8814_match_tx_skb(struct kszphy_ptp_priv *ptp_priv,
+ u32 seconds, u32 nsec, u16 seq_id)
{
- struct phy_device *phydev = ptp_priv->phydev;
struct skb_shared_hwtstamps shhwtstamps;
struct sk_buff *skb, *skb_tmp;
unsigned long flags;
- u32 seconds, nsec;
bool ret = false;
u16 skb_sig;
- u16 seq_id;
-
- lan8814_ptp_tx_ts_get(phydev, &seconds, &nsec, &seq_id);
spin_lock_irqsave(&ptp_priv->tx_queue.lock, flags);
skb_queue_walk_safe(&ptp_priv->tx_queue, skb, skb_tmp) {
@@ -2713,6 +2752,16 @@ static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv)
}
}
+static void lan8814_dequeue_tx_skb(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ u32 seconds, nsec;
+ u16 seq_id;
+
+ lan8814_ptp_tx_ts_get(phydev, &seconds, &nsec, &seq_id);
+ lan8814_match_tx_skb(ptp_priv, seconds, nsec, seq_id);
+}
+
static void lan8814_get_tx_ts(struct kszphy_ptp_priv *ptp_priv)
{
struct phy_device *phydev = ptp_priv->phydev;
@@ -2761,11 +2810,27 @@ static bool lan8814_match_skb(struct kszphy_ptp_priv *ptp_priv,
return ret;
}
+static void lan8814_match_rx_ts(struct kszphy_ptp_priv *ptp_priv,
+ struct lan8814_ptp_rx_ts *rx_ts)
+{
+ unsigned long flags;
+
+ /* If we failed to match the skb add it to the queue for when
+ * the frame will come
+ */
+ if (!lan8814_match_skb(ptp_priv, rx_ts)) {
+ spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
+ list_add(&rx_ts->list, &ptp_priv->rx_ts_list);
+ spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags);
+ } else {
+ kfree(rx_ts);
+ }
+}
+
static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
{
struct phy_device *phydev = ptp_priv->phydev;
struct lan8814_ptp_rx_ts *rx_ts;
- unsigned long flags;
u32 reg;
do {
@@ -2775,17 +2840,7 @@ static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
lan8814_ptp_rx_ts_get(phydev, &rx_ts->seconds, &rx_ts->nsec,
&rx_ts->seq_id);
-
- /* If we failed to match the skb add it to the queue for when
- * the frame will come
- */
- if (!lan8814_match_skb(ptp_priv, rx_ts)) {
- spin_lock_irqsave(&ptp_priv->rx_ts_lock, flags);
- list_add(&rx_ts->list, &ptp_priv->rx_ts_list);
- spin_unlock_irqrestore(&ptp_priv->rx_ts_lock, flags);
- } else {
- kfree(rx_ts);
- }
+ lan8814_match_rx_ts(ptp_priv, rx_ts);
/* If other timestamps are available in the FIFO,
* process them.
@@ -2794,13 +2849,11 @@ static void lan8814_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
} while (PTP_CAP_INFO_RX_TS_CNT_GET_(reg) > 0);
}
-static void lan8814_handle_ptp_interrupt(struct phy_device *phydev)
+static void lan8814_handle_ptp_interrupt(struct phy_device *phydev, u16 status)
{
struct kszphy_priv *priv = phydev->priv;
struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
- u16 status;
- status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
if (status & PTP_TSU_INT_STS_PTP_TX_TS_EN_)
lan8814_get_tx_ts(ptp_priv);
@@ -2899,8 +2952,8 @@ static int lan8804_config_intr(struct phy_device *phydev)
static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
{
- int irq_status, tsu_irq_status;
int ret = IRQ_NONE;
+ int irq_status;
irq_status = phy_read(phydev, LAN8814_INTS);
if (irq_status < 0) {
@@ -2913,20 +2966,13 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
ret = IRQ_HANDLED;
}
- while (1) {
- tsu_irq_status = lanphy_read_page_reg(phydev, 4,
- LAN8814_INTR_STS_REG);
-
- if (tsu_irq_status > 0 &&
- (tsu_irq_status & (LAN8814_INTR_STS_REG_1588_TSU0_ |
- LAN8814_INTR_STS_REG_1588_TSU1_ |
- LAN8814_INTR_STS_REG_1588_TSU2_ |
- LAN8814_INTR_STS_REG_1588_TSU3_))) {
- lan8814_handle_ptp_interrupt(phydev);
- ret = IRQ_HANDLED;
- } else {
+ while (true) {
+ irq_status = lanphy_read_page_reg(phydev, 5, PTP_TSU_INT_STS);
+ if (!irq_status)
break;
- }
+
+ lan8814_handle_ptp_interrupt(phydev, irq_status);
+ ret = IRQ_HANDLED;
}
return ret;
@@ -3016,10 +3062,6 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
{
struct lan8814_shared_priv *shared = phydev->shared->priv;
- if (!IS_ENABLED(CONFIG_PTP_1588_CLOCK) ||
- !IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
- return 0;
-
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
@@ -3039,12 +3081,16 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info,
&phydev->mdio.dev);
- if (IS_ERR_OR_NULL(shared->ptp_clock)) {
+ if (IS_ERR(shared->ptp_clock)) {
phydev_err(phydev, "ptp_clock_register failed %lu\n",
PTR_ERR(shared->ptp_clock));
return -EINVAL;
}
+ /* Check if PHC support is missing at the configuration level */
+ if (!shared->ptp_clock)
+ return 0;
+
phydev_dbg(phydev, "successfully registered ptp clock\n");
shared->phydev = phydev;
@@ -3160,6 +3206,704 @@ static int lan8814_probe(struct phy_device *phydev)
return 0;
}
+#define LAN8841_MMD_TIMER_REG 0
+#define LAN8841_MMD0_REGISTER_17 17
+#define LAN8841_MMD0_REGISTER_17_DROP_OPT(x) ((x) & 0x3)
+#define LAN8841_MMD0_REGISTER_17_XMIT_TOG_TX_DIS BIT(3)
+#define LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG 2
+#define LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG_MAGJACK BIT(14)
+#define LAN8841_MMD_ANALOG_REG 28
+#define LAN8841_ANALOG_CONTROL_1 1
+#define LAN8841_ANALOG_CONTROL_1_PLL_TRIM(x) (((x) & 0x3) << 5)
+#define LAN8841_ANALOG_CONTROL_10 13
+#define LAN8841_ANALOG_CONTROL_10_PLL_DIV(x) ((x) & 0x3)
+#define LAN8841_ANALOG_CONTROL_11 14
+#define LAN8841_ANALOG_CONTROL_11_LDO_REF(x) (((x) & 0x7) << 12)
+#define LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT 69
+#define LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT_VAL 0xbffc
+#define LAN8841_BTRX_POWER_DOWN 70
+#define LAN8841_BTRX_POWER_DOWN_QBIAS_CH_A BIT(0)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_A BIT(1)
+#define LAN8841_BTRX_POWER_DOWN_QBIAS_CH_B BIT(2)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_B BIT(3)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_C BIT(5)
+#define LAN8841_BTRX_POWER_DOWN_BTRX_CH_D BIT(7)
+#define LAN8841_ADC_CHANNEL_MASK 198
+#define LAN8841_PTP_RX_PARSE_L2_ADDR_EN 370
+#define LAN8841_PTP_RX_PARSE_IP_ADDR_EN 371
+#define LAN8841_PTP_TX_PARSE_L2_ADDR_EN 434
+#define LAN8841_PTP_TX_PARSE_IP_ADDR_EN 435
+#define LAN8841_PTP_CMD_CTL 256
+#define LAN8841_PTP_CMD_CTL_PTP_ENABLE BIT(2)
+#define LAN8841_PTP_CMD_CTL_PTP_DISABLE BIT(1)
+#define LAN8841_PTP_CMD_CTL_PTP_RESET BIT(0)
+#define LAN8841_PTP_RX_PARSE_CONFIG 368
+#define LAN8841_PTP_TX_PARSE_CONFIG 432
+
+static int lan8841_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = ksz9131_config_init(phydev);
+ if (ret)
+ return ret;
+
+ /* Initialize the HW by resetting everything */
+ phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_RESET,
+ LAN8841_PTP_CMD_CTL_PTP_RESET);
+
+ phy_modify_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_ENABLE,
+ LAN8841_PTP_CMD_CTL_PTP_ENABLE);
+
+ /* Don't process any frames */
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_RX_PARSE_CONFIG, 0);
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_TX_PARSE_CONFIG, 0);
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_TX_PARSE_L2_ADDR_EN, 0);
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_RX_PARSE_L2_ADDR_EN, 0);
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_TX_PARSE_IP_ADDR_EN, 0);
+ phy_write_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_PTP_RX_PARSE_IP_ADDR_EN, 0);
+
+ /* 100BT Clause 40 improvenent errata */
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_ANALOG_CONTROL_1,
+ LAN8841_ANALOG_CONTROL_1_PLL_TRIM(0x2));
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_ANALOG_CONTROL_10,
+ LAN8841_ANALOG_CONTROL_10_PLL_DIV(0x1));
+
+ /* 10M/100M Ethernet Signal Tuning Errata for Shorted-Center Tap
+ * Magnetics
+ */
+ ret = phy_read_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG);
+ if (ret & LAN8841_OPERATION_MODE_STRAP_OVERRIDE_LOW_REG_MAGJACK) {
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT,
+ LAN8841_TX_LOW_I_CH_C_D_POWER_MANAGMENT_VAL);
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_BTRX_POWER_DOWN,
+ LAN8841_BTRX_POWER_DOWN_QBIAS_CH_A |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_A |
+ LAN8841_BTRX_POWER_DOWN_QBIAS_CH_B |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_B |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_C |
+ LAN8841_BTRX_POWER_DOWN_BTRX_CH_D);
+ }
+
+ /* LDO Adjustment errata */
+ phy_write_mmd(phydev, LAN8841_MMD_ANALOG_REG,
+ LAN8841_ANALOG_CONTROL_11,
+ LAN8841_ANALOG_CONTROL_11_LDO_REF(1));
+
+ /* 100BT RGMII latency tuning errata */
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
+ LAN8841_ADC_CHANNEL_MASK, 0x0);
+ phy_write_mmd(phydev, LAN8841_MMD_TIMER_REG,
+ LAN8841_MMD0_REGISTER_17,
+ LAN8841_MMD0_REGISTER_17_DROP_OPT(2) |
+ LAN8841_MMD0_REGISTER_17_XMIT_TOG_TX_DIS);
+
+ return 0;
+}
+
+#define LAN8841_OUTPUT_CTRL 25
+#define LAN8841_OUTPUT_CTRL_INT_BUFFER BIT(14)
+#define LAN8841_INT_PTP BIT(9)
+
+static int lan8841_config_intr(struct phy_device *phydev)
+{
+ int err;
+
+ phy_modify(phydev, LAN8841_OUTPUT_CTRL,
+ LAN8841_OUTPUT_CTRL_INT_BUFFER, 0);
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ err = phy_read(phydev, LAN8814_INTS);
+ if (err)
+ return err;
+
+ /* Enable / disable interrupts. It is OK to enable PTP interrupt
+ * even if it PTP is not enabled. Because the underneath blocks
+ * will not enable the PTP so we will never get the PTP
+ * interrupt.
+ */
+ err = phy_write(phydev, LAN8814_INTC,
+ LAN8814_INT_LINK | LAN8841_INT_PTP);
+ } else {
+ err = phy_write(phydev, LAN8814_INTC, 0);
+ if (err)
+ return err;
+
+ err = phy_read(phydev, LAN8814_INTS);
+ }
+
+ return err;
+}
+
+#define LAN8841_PTP_TX_EGRESS_SEC_LO 453
+#define LAN8841_PTP_TX_EGRESS_SEC_HI 452
+#define LAN8841_PTP_TX_EGRESS_NS_LO 451
+#define LAN8841_PTP_TX_EGRESS_NS_HI 450
+#define LAN8841_PTP_TX_EGRESS_NSEC_HI_VALID BIT(15)
+#define LAN8841_PTP_TX_MSG_HEADER2 455
+
+static bool lan8841_ptp_get_tx_ts(struct kszphy_ptp_priv *ptp_priv,
+ u32 *sec, u32 *nsec, u16 *seq)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+
+ *nsec = phy_read_mmd(phydev, 2, LAN8841_PTP_TX_EGRESS_NS_HI);
+ if (!(*nsec & LAN8841_PTP_TX_EGRESS_NSEC_HI_VALID))
+ return false;
+
+ *nsec = ((*nsec & 0x3fff) << 16);
+ *nsec = *nsec | phy_read_mmd(phydev, 2, LAN8841_PTP_TX_EGRESS_NS_LO);
+
+ *sec = phy_read_mmd(phydev, 2, LAN8841_PTP_TX_EGRESS_SEC_HI);
+ *sec = *sec << 16;
+ *sec = *sec | phy_read_mmd(phydev, 2, LAN8841_PTP_TX_EGRESS_SEC_LO);
+
+ *seq = phy_read_mmd(phydev, 2, LAN8841_PTP_TX_MSG_HEADER2);
+
+ return true;
+}
+
+static void lan8841_ptp_process_tx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ u32 sec, nsec;
+ u16 seq;
+
+ while (lan8841_ptp_get_tx_ts(ptp_priv, &sec, &nsec, &seq))
+ lan8814_match_tx_skb(ptp_priv, sec, nsec, seq);
+}
+
+#define LAN8841_PTP_RX_INGRESS_SEC_LO 389
+#define LAN8841_PTP_RX_INGRESS_SEC_HI 388
+#define LAN8841_PTP_RX_INGRESS_NS_LO 387
+#define LAN8841_PTP_RX_INGRESS_NS_HI 386
+#define LAN8841_PTP_RX_INGRESS_NSEC_HI_VALID BIT(15)
+#define LAN8841_PTP_RX_MSG_HEADER2 391
+
+static struct lan8814_ptp_rx_ts *lan8841_ptp_get_rx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_ptp_rx_ts *rx_ts;
+ u32 sec, nsec;
+ u16 seq;
+
+ nsec = phy_read_mmd(phydev, 2, LAN8841_PTP_RX_INGRESS_NS_HI);
+ if (!(nsec & LAN8841_PTP_RX_INGRESS_NSEC_HI_VALID))
+ return NULL;
+
+ nsec = ((nsec & 0x3fff) << 16);
+ nsec = nsec | phy_read_mmd(phydev, 2, LAN8841_PTP_RX_INGRESS_NS_LO);
+
+ sec = phy_read_mmd(phydev, 2, LAN8841_PTP_RX_INGRESS_SEC_HI);
+ sec = sec << 16;
+ sec = sec | phy_read_mmd(phydev, 2, LAN8841_PTP_RX_INGRESS_SEC_LO);
+
+ seq = phy_read_mmd(phydev, 2, LAN8841_PTP_RX_MSG_HEADER2);
+
+ rx_ts = kzalloc(sizeof(*rx_ts), GFP_KERNEL);
+ if (!rx_ts)
+ return NULL;
+
+ rx_ts->seconds = sec;
+ rx_ts->nsec = nsec;
+ rx_ts->seq_id = seq;
+
+ return rx_ts;
+}
+
+static void lan8841_ptp_process_rx_ts(struct kszphy_ptp_priv *ptp_priv)
+{
+ struct lan8814_ptp_rx_ts *rx_ts;
+
+ while ((rx_ts = lan8841_ptp_get_rx_ts(ptp_priv)) != NULL)
+ lan8814_match_rx_ts(ptp_priv, rx_ts);
+}
+
+#define LAN8841_PTP_INT_STS 259
+#define LAN8841_PTP_INT_STS_PTP_TX_TS_OVRFL_INT BIT(13)
+#define LAN8841_PTP_INT_STS_PTP_TX_TS_INT BIT(12)
+#define LAN8841_PTP_INT_STS_PTP_RX_TS_OVRFL_INT BIT(9)
+#define LAN8841_PTP_INT_STS_PTP_RX_TS_INT BIT(8)
+
+static void lan8841_ptp_flush_fifo(struct kszphy_ptp_priv *ptp_priv, bool egress)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+ int i;
+
+ for (i = 0; i < FIFO_SIZE; ++i)
+ phy_read_mmd(phydev, 2,
+ egress ? LAN8841_PTP_TX_MSG_HEADER2 :
+ LAN8841_PTP_RX_MSG_HEADER2);
+
+ phy_read_mmd(phydev, 2, LAN8841_PTP_INT_STS);
+}
+
+static void lan8841_handle_ptp_interrupt(struct phy_device *phydev)
+{
+ struct kszphy_priv *priv = phydev->priv;
+ struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
+ u16 status;
+
+ do {
+ status = phy_read_mmd(phydev, 2, LAN8841_PTP_INT_STS);
+ if (status & LAN8841_PTP_INT_STS_PTP_TX_TS_INT)
+ lan8841_ptp_process_tx_ts(ptp_priv);
+
+ if (status & LAN8841_PTP_INT_STS_PTP_RX_TS_INT)
+ lan8841_ptp_process_rx_ts(ptp_priv);
+
+ if (status & LAN8841_PTP_INT_STS_PTP_TX_TS_OVRFL_INT) {
+ lan8841_ptp_flush_fifo(ptp_priv, true);
+ skb_queue_purge(&ptp_priv->tx_queue);
+ }
+
+ if (status & LAN8841_PTP_INT_STS_PTP_RX_TS_OVRFL_INT) {
+ lan8841_ptp_flush_fifo(ptp_priv, false);
+ skb_queue_purge(&ptp_priv->rx_queue);
+ }
+
+ } while (status);
+}
+
+#define LAN8841_INTS_PTP BIT(9)
+
+static irqreturn_t lan8841_handle_interrupt(struct phy_device *phydev)
+{
+ irqreturn_t ret = IRQ_NONE;
+ int irq_status;
+
+ irq_status = phy_read(phydev, LAN8814_INTS);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ if (irq_status & LAN8814_INT_LINK) {
+ phy_trigger_machine(phydev);
+ ret = IRQ_HANDLED;
+ }
+
+ if (irq_status & LAN8841_INTS_PTP) {
+ lan8841_handle_ptp_interrupt(phydev);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int lan8841_ts_info(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *info)
+{
+ struct kszphy_ptp_priv *ptp_priv;
+
+ ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+
+ info->phc_index = ptp_priv->ptp_clock ?
+ ptp_clock_index(ptp_priv->ptp_clock) : -1;
+ if (info->phc_index == -1) {
+ info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ return 0;
+ }
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+
+ return 0;
+}
+
+#define LAN8841_PTP_INT_EN 260
+#define LAN8841_PTP_INT_EN_PTP_TX_TS_OVRFL_EN BIT(13)
+#define LAN8841_PTP_INT_EN_PTP_TX_TS_EN BIT(12)
+#define LAN8841_PTP_INT_EN_PTP_RX_TS_OVRFL_EN BIT(9)
+#define LAN8841_PTP_INT_EN_PTP_RX_TS_EN BIT(8)
+
+static void lan8841_ptp_enable_int(struct kszphy_ptp_priv *ptp_priv,
+ bool enable)
+{
+ struct phy_device *phydev = ptp_priv->phydev;
+
+ if (enable)
+ /* Enable interrupts */
+ phy_modify_mmd(phydev, 2, LAN8841_PTP_INT_EN,
+ LAN8841_PTP_INT_EN_PTP_TX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_TX_TS_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_EN,
+ LAN8841_PTP_INT_EN_PTP_TX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_TX_TS_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_EN);
+ else
+ /* Disable interrupts */
+ phy_modify_mmd(phydev, 2, LAN8841_PTP_INT_EN,
+ LAN8841_PTP_INT_EN_PTP_TX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_OVRFL_EN |
+ LAN8841_PTP_INT_EN_PTP_TX_TS_EN |
+ LAN8841_PTP_INT_EN_PTP_RX_TS_EN, 0);
+}
+
+#define LAN8841_PTP_RX_TIMESTAMP_EN 379
+#define LAN8841_PTP_TX_TIMESTAMP_EN 443
+#define LAN8841_PTP_TX_MOD 445
+
+static int lan8841_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct lan8814_ptp_rx_ts *rx_ts, *tmp;
+ struct hwtstamp_config config;
+ int txcfg = 0, rxcfg = 0;
+ int pkt_ts_enable;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ ptp_priv->hwts_tx_type = config.tx_type;
+ ptp_priv->rx_filter = config.rx_filter;
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ ptp_priv->layer = 0;
+ ptp_priv->version = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L4;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L2;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ ptp_priv->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
+ ptp_priv->version = PTP_CLASS_V2;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* Setup parsing of the frames and enable the timestamping for ptp
+ * frames
+ */
+ if (ptp_priv->layer & PTP_CLASS_L2) {
+ rxcfg |= PTP_RX_PARSE_CONFIG_LAYER2_EN_;
+ txcfg |= PTP_TX_PARSE_CONFIG_LAYER2_EN_;
+ } else if (ptp_priv->layer & PTP_CLASS_L4) {
+ rxcfg |= PTP_RX_PARSE_CONFIG_IPV4_EN_ | PTP_RX_PARSE_CONFIG_IPV6_EN_;
+ txcfg |= PTP_TX_PARSE_CONFIG_IPV4_EN_ | PTP_TX_PARSE_CONFIG_IPV6_EN_;
+ }
+
+ phy_write_mmd(phydev, 2, LAN8841_PTP_RX_PARSE_CONFIG, rxcfg);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_TX_PARSE_CONFIG, txcfg);
+
+ pkt_ts_enable = PTP_TIMESTAMP_EN_SYNC_ | PTP_TIMESTAMP_EN_DREQ_ |
+ PTP_TIMESTAMP_EN_PDREQ_ | PTP_TIMESTAMP_EN_PDRES_;
+ phy_write_mmd(phydev, 2, LAN8841_PTP_RX_TIMESTAMP_EN, pkt_ts_enable);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_TX_TIMESTAMP_EN, pkt_ts_enable);
+
+ /* Enable / disable of the TX timestamp in the SYNC frames */
+ phy_modify_mmd(phydev, 2, LAN8841_PTP_TX_MOD,
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_,
+ ptp_priv->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC ?
+ PTP_TX_MOD_TX_PTP_SYNC_TS_INSERT_ : 0);
+
+ /* Now enable/disable the timestamping */
+ lan8841_ptp_enable_int(ptp_priv,
+ config.rx_filter != HWTSTAMP_FILTER_NONE);
+
+ /* In case of multiple starts and stops, these needs to be cleared */
+ list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) {
+ list_del(&rx_ts->list);
+ kfree(rx_ts);
+ }
+
+ skb_queue_purge(&ptp_priv->rx_queue);
+ skb_queue_purge(&ptp_priv->tx_queue);
+
+ lan8841_ptp_flush_fifo(ptp_priv, false);
+ lan8841_ptp_flush_fifo(ptp_priv, true);
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? -EFAULT : 0;
+}
+
+#define LAN8841_PTP_LTC_SET_SEC_HI 262
+#define LAN8841_PTP_LTC_SET_SEC_MID 263
+#define LAN8841_PTP_LTC_SET_SEC_LO 264
+#define LAN8841_PTP_LTC_SET_NS_HI 265
+#define LAN8841_PTP_LTC_SET_NS_LO 266
+#define LAN8841_PTP_CMD_CTL_PTP_LTC_LOAD BIT(4)
+
+static int lan8841_ptp_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(ptp, struct kszphy_ptp_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = ptp_priv->phydev;
+
+ /* Set the value to be stored */
+ mutex_lock(&ptp_priv->ptp_lock);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_SET_SEC_LO, lower_16_bits(ts->tv_sec));
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_SET_SEC_MID, upper_16_bits(ts->tv_sec));
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_SET_SEC_HI, upper_32_bits(ts->tv_sec) & 0xffff);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_SET_NS_LO, lower_16_bits(ts->tv_nsec));
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_SET_NS_HI, upper_16_bits(ts->tv_nsec) & 0x3fff);
+
+ /* Set the command to load the LTC */
+ phy_write_mmd(phydev, 2, LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_LTC_LOAD);
+ mutex_unlock(&ptp_priv->ptp_lock);
+
+ return 0;
+}
+
+#define LAN8841_PTP_LTC_RD_SEC_HI 358
+#define LAN8841_PTP_LTC_RD_SEC_MID 359
+#define LAN8841_PTP_LTC_RD_SEC_LO 360
+#define LAN8841_PTP_LTC_RD_NS_HI 361
+#define LAN8841_PTP_LTC_RD_NS_LO 362
+#define LAN8841_PTP_CMD_CTL_PTP_LTC_READ BIT(3)
+
+static int lan8841_ptp_gettime64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(ptp, struct kszphy_ptp_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = ptp_priv->phydev;
+ time64_t s;
+ s64 ns;
+
+ mutex_lock(&ptp_priv->ptp_lock);
+ /* Issue the command to read the LTC */
+ phy_write_mmd(phydev, 2, LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_LTC_READ);
+
+ /* Read the LTC */
+ s = phy_read_mmd(phydev, 2, LAN8841_PTP_LTC_RD_SEC_HI);
+ s <<= 16;
+ s |= phy_read_mmd(phydev, 2, LAN8841_PTP_LTC_RD_SEC_MID);
+ s <<= 16;
+ s |= phy_read_mmd(phydev, 2, LAN8841_PTP_LTC_RD_SEC_LO);
+
+ ns = phy_read_mmd(phydev, 2, LAN8841_PTP_LTC_RD_NS_HI) & 0x3fff;
+ ns <<= 16;
+ ns |= phy_read_mmd(phydev, 2, LAN8841_PTP_LTC_RD_NS_LO);
+ mutex_unlock(&ptp_priv->ptp_lock);
+
+ set_normalized_timespec64(ts, s, ns);
+ return 0;
+}
+
+#define LAN8841_PTP_LTC_STEP_ADJ_LO 276
+#define LAN8841_PTP_LTC_STEP_ADJ_HI 275
+#define LAN8841_PTP_LTC_STEP_ADJ_DIR BIT(15)
+#define LAN8841_PTP_CMD_CTL_PTP_LTC_STEP_SECONDS BIT(5)
+#define LAN8841_PTP_CMD_CTL_PTP_LTC_STEP_NANOSECONDS BIT(6)
+
+static int lan8841_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(ptp, struct kszphy_ptp_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = ptp_priv->phydev;
+ struct timespec64 ts;
+ bool add = true;
+ u32 nsec;
+ s32 sec;
+
+ /* The HW allows up to 15 sec to adjust the time, but here we limit to
+ * 10 sec the adjustment. The reason is, in case the adjustment is 14
+ * sec and 999999999 nsec, then we add 8ns to compansate the actual
+ * increment so the value can be bigger than 15 sec. Therefore limit the
+ * possible adjustments so we will not have these corner cases
+ */
+ if (delta > 10000000000LL || delta < -10000000000LL) {
+ /* The timeadjustment is too big, so fall back using set time */
+ u64 now;
+
+ ptp->gettime64(ptp, &ts);
+
+ now = ktime_to_ns(timespec64_to_ktime(ts));
+ ts = ns_to_timespec64(now + delta);
+
+ ptp->settime64(ptp, &ts);
+ return 0;
+ }
+
+ sec = div_u64_rem(delta < 0 ? -delta : delta, NSEC_PER_SEC, &nsec);
+ if (delta < 0 && nsec != 0) {
+ /* It is not allowed to adjust low the nsec part, therefore
+ * subtract more from second part and add to nanosecond such
+ * that would roll over, so the second part will increase
+ */
+ sec--;
+ nsec = NSEC_PER_SEC - nsec;
+ }
+
+ /* Calculate the adjustments and the direction */
+ if (delta < 0)
+ add = false;
+
+ if (nsec > 0)
+ /* add 8 ns to cover the likely normal increment */
+ nsec += 8;
+
+ if (nsec >= NSEC_PER_SEC) {
+ /* carry into seconds */
+ sec++;
+ nsec -= NSEC_PER_SEC;
+ }
+
+ mutex_lock(&ptp_priv->ptp_lock);
+ if (sec) {
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_STEP_ADJ_LO, sec);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_STEP_ADJ_HI,
+ add ? LAN8841_PTP_LTC_STEP_ADJ_DIR : 0);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_LTC_STEP_SECONDS);
+ }
+
+ if (nsec) {
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_STEP_ADJ_LO,
+ nsec & 0xffff);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_STEP_ADJ_HI,
+ (nsec >> 16) & 0x3fff);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_CMD_CTL,
+ LAN8841_PTP_CMD_CTL_PTP_LTC_STEP_NANOSECONDS);
+ }
+ mutex_unlock(&ptp_priv->ptp_lock);
+
+ return 0;
+}
+
+#define LAN8841_PTP_LTC_RATE_ADJ_HI 269
+#define LAN8841_PTP_LTC_RATE_ADJ_HI_DIR BIT(15)
+#define LAN8841_PTP_LTC_RATE_ADJ_LO 270
+
+static int lan8841_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct kszphy_ptp_priv *ptp_priv = container_of(ptp, struct kszphy_ptp_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = ptp_priv->phydev;
+ bool faster = true;
+ u32 rate;
+
+ if (!scaled_ppm)
+ return 0;
+
+ if (scaled_ppm < 0) {
+ scaled_ppm = -scaled_ppm;
+ faster = false;
+ }
+
+ rate = LAN8814_1PPM_FORMAT * (upper_16_bits(scaled_ppm));
+ rate += (LAN8814_1PPM_FORMAT * (lower_16_bits(scaled_ppm))) >> 16;
+
+ mutex_lock(&ptp_priv->ptp_lock);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_RATE_ADJ_HI,
+ faster ? LAN8841_PTP_LTC_RATE_ADJ_HI_DIR | (upper_16_bits(rate) & 0x3fff)
+ : upper_16_bits(rate) & 0x3fff);
+ phy_write_mmd(phydev, 2, LAN8841_PTP_LTC_RATE_ADJ_LO, lower_16_bits(rate));
+ mutex_unlock(&ptp_priv->ptp_lock);
+
+ return 0;
+}
+
+static struct ptp_clock_info lan8841_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "lan8841 ptp",
+ .max_adj = 31249999,
+ .gettime64 = lan8841_ptp_gettime64,
+ .settime64 = lan8841_ptp_settime64,
+ .adjtime = lan8841_ptp_adjtime,
+ .adjfine = lan8841_ptp_adjfine,
+};
+
+#define LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER 3
+#define LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER_STRAP_RGMII_EN BIT(0)
+
+static int lan8841_probe(struct phy_device *phydev)
+{
+ struct kszphy_ptp_priv *ptp_priv;
+ struct kszphy_priv *priv;
+ int err;
+
+ err = kszphy_probe(phydev);
+ if (err)
+ return err;
+
+ if (phy_read_mmd(phydev, KSZ9131RN_MMD_COMMON_CTRL_REG,
+ LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER) &
+ LAN8841_OPERATION_MODE_STRAP_LOW_REGISTER_STRAP_RGMII_EN)
+ phydev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
+
+ /* Register the clock */
+ if (!IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING))
+ return 0;
+
+ priv = phydev->priv;
+ ptp_priv = &priv->ptp_priv;
+
+ ptp_priv->ptp_clock_info = lan8841_ptp_clock_info;
+ ptp_priv->ptp_clock = ptp_clock_register(&ptp_priv->ptp_clock_info,
+ &phydev->mdio.dev);
+ if (IS_ERR(ptp_priv->ptp_clock)) {
+ phydev_err(phydev, "ptp_clock_register failed: %lu\n",
+ PTR_ERR(ptp_priv->ptp_clock));
+ return -EINVAL;
+ }
+
+ if (!ptp_priv->ptp_clock)
+ return 0;
+
+ /* Initialize the SW */
+ skb_queue_head_init(&ptp_priv->tx_queue);
+ skb_queue_head_init(&ptp_priv->rx_queue);
+ INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
+ spin_lock_init(&ptp_priv->rx_ts_lock);
+ ptp_priv->phydev = phydev;
+ mutex_init(&ptp_priv->ptp_lock);
+
+ ptp_priv->mii_ts.rxtstamp = lan8814_rxtstamp;
+ ptp_priv->mii_ts.txtstamp = lan8814_txtstamp;
+ ptp_priv->mii_ts.hwtstamp = lan8841_hwtstamp;
+ ptp_priv->mii_ts.ts_info = lan8841_ts_info;
+
+ phydev->mii_ts = &ptp_priv->mii_ts;
+
+ return 0;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -3370,12 +4114,30 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = lan8804_config_intr,
.handle_interrupt = lan8804_handle_interrupt,
}, {
+ .phy_id = PHY_ID_LAN8841,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip LAN8841 Gigabit PHY",
+ .flags = PHY_POLL_CABLE_TEST,
+ .driver_data = &lan8841_type,
+ .config_init = lan8841_config_init,
+ .probe = lan8841_probe,
+ .soft_reset = genphy_soft_reset,
+ .config_intr = lan8841_config_intr,
+ .handle_interrupt = lan8841_handle_interrupt,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .cable_test_start = lan8814_cable_test_start,
+ .cable_test_get_status = ksz886x_cable_test_get_status,
+}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
/* PHY_GBIT_FEATURES */
.flags = PHY_POLL_CABLE_TEST,
- .driver_data = &ksz9021_type,
+ .driver_data = &ksz9131_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
.config_intr = kszphy_config_intr,
@@ -3430,6 +4192,7 @@ static struct phy_driver ksphy_driver[] = {
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .get_features = ksz9477_get_features,
} };
module_phy_driver(ksphy_driver);
@@ -3454,6 +4217,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 8569a545e0a3..a838b61cd844 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -245,15 +245,42 @@ static int lan87xx_config_rgmii_delay(struct phy_device *phydev)
PHYACC_ATTR_BANK_MISC, LAN87XX_CTRL_1, rc);
}
+static int lan87xx_phy_init_cmd(struct phy_device *phydev,
+ const struct access_ereg_val *cmd_seq, int cnt)
+{
+ int ret, i;
+
+ for (i = 0; i < cnt; i++) {
+ if (cmd_seq[i].mode == PHYACC_ATTR_MODE_POLL &&
+ cmd_seq[i].bank == PHYACC_ATTR_BANK_SMI) {
+ ret = access_smi_poll_timeout(phydev,
+ cmd_seq[i].offset,
+ cmd_seq[i].val,
+ cmd_seq[i].mask);
+ } else {
+ ret = access_ereg(phydev, cmd_seq[i].mode,
+ cmd_seq[i].bank, cmd_seq[i].offset,
+ cmd_seq[i].val);
+ }
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
static int lan87xx_phy_init(struct phy_device *phydev)
{
- static const struct access_ereg_val init[] = {
+ static const struct access_ereg_val hw_init[] = {
/* TXPD/TXAMP6 Configs */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_AFE,
T1_AFE_PORT_CFG1_REG, 0x002D, 0 },
/* HW_Init Hi and Force_ED */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
T1_POWER_DOWN_CONTROL_REG, 0x0308, 0 },
+ };
+
+ static const struct access_ereg_val slave_init[] = {
/* Equalizer Full Duplex Freeze - T1 Slave */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_EQ_FD_STG1_FRZ_CFG, 0x0002, 0 },
@@ -267,6 +294,9 @@ static int lan87xx_phy_init(struct phy_device *phydev)
T1_EQ_WT_FD_LCK_FRZ_CFG, 0x0002, 0 },
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_PST_EQ_LCK_STG1_FRZ_CFG, 0x0002, 0 },
+ };
+
+ static const struct access_ereg_val phy_init[] = {
/* Slave Full Duplex Multi Configs */
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP,
T1_SLV_FD_MULT_CFG_REG, 0x0D53, 0 },
@@ -397,7 +427,7 @@ static int lan87xx_phy_init(struct phy_device *phydev)
{ PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_SMI,
T1_POWER_DOWN_CONTROL_REG, 0x0300, 0 },
};
- int rc, i;
+ int rc;
/* phy Soft reset */
rc = genphy_soft_reset(phydev);
@@ -405,21 +435,28 @@ static int lan87xx_phy_init(struct phy_device *phydev)
return rc;
/* PHY Initialization */
- for (i = 0; i < ARRAY_SIZE(init); i++) {
- if (init[i].mode == PHYACC_ATTR_MODE_POLL &&
- init[i].bank == PHYACC_ATTR_BANK_SMI) {
- rc = access_smi_poll_timeout(phydev,
- init[i].offset,
- init[i].val,
- init[i].mask);
- } else {
- rc = access_ereg(phydev, init[i].mode, init[i].bank,
- init[i].offset, init[i].val);
- }
+ rc = lan87xx_phy_init_cmd(phydev, hw_init, ARRAY_SIZE(hw_init));
+ if (rc < 0)
+ return rc;
+
+ rc = genphy_read_master_slave(phydev);
+ if (rc)
+ return rc;
+
+ /* The following squence needs to run only if phydev is in
+ * slave mode.
+ */
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_SLAVE) {
+ rc = lan87xx_phy_init_cmd(phydev, slave_init,
+ ARRAY_SIZE(slave_init));
if (rc < 0)
return rc;
}
+ rc = lan87xx_phy_init_cmd(phydev, phy_init, ARRAY_SIZE(phy_init));
+ if (rc < 0)
+ return rc;
+
return lan87xx_config_rgmii_delay(phydev);
}
@@ -775,6 +812,7 @@ static int lan87xx_read_status(struct phy_device *phydev)
static int lan87xx_config_aneg(struct phy_device *phydev)
{
u16 ctl = 0;
+ int ret;
switch (phydev->master_slave_set) {
case MASTER_SLAVE_CFG_MASTER_FORCE:
@@ -790,7 +828,11 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
return -EOPNOTSUPP;
}
- return phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
+ ret = phy_modify_changed(phydev, MII_CTRL1000, CTL1000_AS_MASTER, ctl);
+ if (ret == 1)
+ return phy_init_hw(phydev);
+
+ return ret;
}
static int lan87xx_get_sqi(struct phy_device *phydev)
diff --git a/drivers/net/phy/motorcomm.c b/drivers/net/phy/motorcomm.c
index 685190db72de..2fa5a90e073b 100644
--- a/drivers/net/phy/motorcomm.c
+++ b/drivers/net/phy/motorcomm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Motorcomm 8511/8521/8531S PHY driver.
+ * Motorcomm 8511/8521/8531/8531S PHY driver.
*
* Author: Peter Geis <pgwipeout@gmail.com>
* Author: Frank <Frank.Sae@motor-comm.com>
@@ -10,10 +10,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
+#include <linux/of.h>
#define PHY_ID_YT8511 0x0000010a
-#define PHY_ID_YT8521 0x0000011A
-#define PHY_ID_YT8531S 0x4F51E91A
+#define PHY_ID_YT8521 0x0000011a
+#define PHY_ID_YT8531 0x4f51e91b
+#define PHY_ID_YT8531S 0x4f51e91a
/* YT8521/YT8531S Register Overview
* UTP Register space | FIBER Register space
@@ -161,6 +163,11 @@
#define YT8521_CHIP_CONFIG_REG 0xA001
#define YT8521_CCR_SW_RST BIT(15)
+/* 1b0 disable 1.9ns rxc clock delay *default*
+ * 1b1 enable 1.9ns rxc clock delay
+ */
+#define YT8521_CCR_RXC_DLY_EN BIT(8)
+#define YT8521_CCR_RXC_DLY_1_900_NS 1900
#define YT8521_CCR_MODE_SEL_MASK (BIT(2) | BIT(1) | BIT(0))
#define YT8521_CCR_MODE_UTP_TO_RGMII 0
@@ -178,22 +185,29 @@
#define YT8521_MODE_POLL 0x3
#define YT8521_RGMII_CONFIG1_REG 0xA003
-
-/* TX Gig-E Delay is bits 3:0, default 0x1
- * TX Fast-E Delay is bits 7:4, default 0xf
- * RX Delay is bits 13:10, default 0x0
- * Delay = 150ps * N
- * On = 2250ps, off = 0ps
+/* 1b0 use original tx_clk_rgmii *default*
+ * 1b1 use inverted tx_clk_rgmii.
*/
-#define YT8521_RC1R_RX_DELAY_MASK (0xF << 10)
-#define YT8521_RC1R_RX_DELAY_EN (0xF << 10)
-#define YT8521_RC1R_RX_DELAY_DIS (0x0 << 10)
-#define YT8521_RC1R_FE_TX_DELAY_MASK (0xF << 4)
-#define YT8521_RC1R_FE_TX_DELAY_EN (0xF << 4)
-#define YT8521_RC1R_FE_TX_DELAY_DIS (0x0 << 4)
-#define YT8521_RC1R_GE_TX_DELAY_MASK (0xF << 0)
-#define YT8521_RC1R_GE_TX_DELAY_EN (0xF << 0)
-#define YT8521_RC1R_GE_TX_DELAY_DIS (0x0 << 0)
+#define YT8521_RC1R_TX_CLK_SEL_INVERTED BIT(14)
+#define YT8521_RC1R_RX_DELAY_MASK GENMASK(13, 10)
+#define YT8521_RC1R_FE_TX_DELAY_MASK GENMASK(7, 4)
+#define YT8521_RC1R_GE_TX_DELAY_MASK GENMASK(3, 0)
+#define YT8521_RC1R_RGMII_0_000_NS 0
+#define YT8521_RC1R_RGMII_0_150_NS 1
+#define YT8521_RC1R_RGMII_0_300_NS 2
+#define YT8521_RC1R_RGMII_0_450_NS 3
+#define YT8521_RC1R_RGMII_0_600_NS 4
+#define YT8521_RC1R_RGMII_0_750_NS 5
+#define YT8521_RC1R_RGMII_0_900_NS 6
+#define YT8521_RC1R_RGMII_1_050_NS 7
+#define YT8521_RC1R_RGMII_1_200_NS 8
+#define YT8521_RC1R_RGMII_1_350_NS 9
+#define YT8521_RC1R_RGMII_1_500_NS 10
+#define YT8521_RC1R_RGMII_1_650_NS 11
+#define YT8521_RC1R_RGMII_1_800_NS 12
+#define YT8521_RC1R_RGMII_1_950_NS 13
+#define YT8521_RC1R_RGMII_2_100_NS 14
+#define YT8521_RC1R_RGMII_2_250_NS 15
#define YTPHY_MISC_CONFIG_REG 0xA006
#define YTPHY_MCR_FIBER_SPEED_MASK BIT(0)
@@ -222,11 +236,36 @@
*/
#define YTPHY_WCR_TYPE_PULSE BIT(0)
-#define YT8531S_SYNCE_CFG_REG 0xA012
-#define YT8531S_SCR_SYNCE_ENABLE BIT(6)
+#define YTPHY_SYNCE_CFG_REG 0xA012
+#define YT8521_SCR_SYNCE_ENABLE BIT(5)
+/* 1b0 output 25m clock
+ * 1b1 output 125m clock *default*
+ */
+#define YT8521_SCR_CLK_FRE_SEL_125M BIT(3)
+#define YT8521_SCR_CLK_SRC_MASK GENMASK(2, 1)
+#define YT8521_SCR_CLK_SRC_PLL_125M 0
+#define YT8521_SCR_CLK_SRC_UTP_RX 1
+#define YT8521_SCR_CLK_SRC_SDS_RX 2
+#define YT8521_SCR_CLK_SRC_REF_25M 3
+#define YT8531_SCR_SYNCE_ENABLE BIT(6)
+/* 1b0 output 25m clock *default*
+ * 1b1 output 125m clock
+ */
+#define YT8531_SCR_CLK_FRE_SEL_125M BIT(4)
+#define YT8531_SCR_CLK_SRC_MASK GENMASK(3, 1)
+#define YT8531_SCR_CLK_SRC_PLL_125M 0
+#define YT8531_SCR_CLK_SRC_UTP_RX 1
+#define YT8531_SCR_CLK_SRC_SDS_RX 2
+#define YT8531_SCR_CLK_SRC_CLOCK_FROM_DIGITAL 3
+#define YT8531_SCR_CLK_SRC_REF_25M 4
+#define YT8531_SCR_CLK_SRC_SSC_25M 5
/* Extended Register end */
+#define YTPHY_DTS_OUTPUT_CLK_DIS 0
+#define YTPHY_DTS_OUTPUT_CLK_25M 25000000
+#define YTPHY_DTS_OUTPUT_CLK_125M 125000000
+
struct yt8521_priv {
/* combo_advertising is used for case of YT8521 in combo mode,
* this means that yt8521 may work in utp or fiber mode which depends
@@ -479,6 +518,61 @@ err_restore_page:
return phy_restore_page(phydev, old_page, ret);
}
+static int yt8531_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ const u16 mac_addr_reg[] = {
+ YTPHY_WOL_MACADDR2_REG,
+ YTPHY_WOL_MACADDR1_REG,
+ YTPHY_WOL_MACADDR0_REG,
+ };
+ const u8 *mac_addr;
+ u16 mask, val;
+ int ret;
+ u8 i;
+
+ if (wol->wolopts & WAKE_MAGIC) {
+ mac_addr = phydev->attached_dev->dev_addr;
+
+ /* Store the device address for the magic packet */
+ for (i = 0; i < 3; i++) {
+ ret = ytphy_write_ext_with_lock(phydev, mac_addr_reg[i],
+ ((mac_addr[i * 2] << 8)) |
+ (mac_addr[i * 2 + 1]));
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable WOL feature */
+ mask = YTPHY_WCR_PULSE_WIDTH_MASK | YTPHY_WCR_INTR_SEL;
+ val = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL;
+ val |= YTPHY_WCR_TYPE_PULSE | YTPHY_WCR_PULSE_WIDTH_672MS;
+ ret = ytphy_modify_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG,
+ mask, val);
+ if (ret < 0)
+ return ret;
+
+ /* Enable WOL interrupt */
+ ret = phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG, 0,
+ YTPHY_IER_WOL);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Disable WOL feature */
+ mask = YTPHY_WCR_ENABLE | YTPHY_WCR_INTR_SEL;
+ ret = ytphy_modify_ext_with_lock(phydev, YTPHY_WOL_CONFIG_REG,
+ mask, 0);
+
+ /* Disable WOL interrupt */
+ ret = phy_modify(phydev, YTPHY_INTERRUPT_ENABLE_REG,
+ YTPHY_IER_WOL, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int yt8511_read_page(struct phy_device *phydev)
{
return __phy_read(phydev, YT8511_PAGE_SELECT);
@@ -594,6 +688,153 @@ static int yt8521_write_page(struct phy_device *phydev, int page)
};
/**
+ * struct ytphy_cfg_reg_map - map a config value to a register value
+ * @cfg: value in device configuration
+ * @reg: value in the register
+ */
+struct ytphy_cfg_reg_map {
+ u32 cfg;
+ u32 reg;
+};
+
+static const struct ytphy_cfg_reg_map ytphy_rgmii_delays[] = {
+ /* for tx delay / rx delay with YT8521_CCR_RXC_DLY_EN is not set. */
+ { 0, YT8521_RC1R_RGMII_0_000_NS },
+ { 150, YT8521_RC1R_RGMII_0_150_NS },
+ { 300, YT8521_RC1R_RGMII_0_300_NS },
+ { 450, YT8521_RC1R_RGMII_0_450_NS },
+ { 600, YT8521_RC1R_RGMII_0_600_NS },
+ { 750, YT8521_RC1R_RGMII_0_750_NS },
+ { 900, YT8521_RC1R_RGMII_0_900_NS },
+ { 1050, YT8521_RC1R_RGMII_1_050_NS },
+ { 1200, YT8521_RC1R_RGMII_1_200_NS },
+ { 1350, YT8521_RC1R_RGMII_1_350_NS },
+ { 1500, YT8521_RC1R_RGMII_1_500_NS },
+ { 1650, YT8521_RC1R_RGMII_1_650_NS },
+ { 1800, YT8521_RC1R_RGMII_1_800_NS },
+ { 1950, YT8521_RC1R_RGMII_1_950_NS }, /* default tx/rx delay */
+ { 2100, YT8521_RC1R_RGMII_2_100_NS },
+ { 2250, YT8521_RC1R_RGMII_2_250_NS },
+
+ /* only for rx delay with YT8521_CCR_RXC_DLY_EN is set. */
+ { 0 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_000_NS },
+ { 150 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_150_NS },
+ { 300 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_300_NS },
+ { 450 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_450_NS },
+ { 600 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_600_NS },
+ { 750 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_750_NS },
+ { 900 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_0_900_NS },
+ { 1050 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_050_NS },
+ { 1200 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_200_NS },
+ { 1350 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_350_NS },
+ { 1500 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_500_NS },
+ { 1650 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_650_NS },
+ { 1800 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_800_NS },
+ { 1950 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_1_950_NS },
+ { 2100 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_2_100_NS },
+ { 2250 + YT8521_CCR_RXC_DLY_1_900_NS, YT8521_RC1R_RGMII_2_250_NS }
+};
+
+static u32 ytphy_get_delay_reg_value(struct phy_device *phydev,
+ const char *prop_name,
+ const struct ytphy_cfg_reg_map *tbl,
+ int tb_size,
+ u16 *rxc_dly_en,
+ u32 dflt)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ int tb_size_half = tb_size / 2;
+ u32 val;
+ int i;
+
+ if (of_property_read_u32(node, prop_name, &val))
+ goto err_dts_val;
+
+ /* when rxc_dly_en is NULL, it is get the delay for tx, only half of
+ * tb_size is valid.
+ */
+ if (!rxc_dly_en)
+ tb_size = tb_size_half;
+
+ for (i = 0; i < tb_size; i++) {
+ if (tbl[i].cfg == val) {
+ if (rxc_dly_en && i < tb_size_half)
+ *rxc_dly_en = 0;
+ return tbl[i].reg;
+ }
+ }
+
+ phydev_warn(phydev, "Unsupported value %d for %s using default (%u)\n",
+ val, prop_name, dflt);
+
+err_dts_val:
+ /* when rxc_dly_en is not NULL, it is get the delay for rx.
+ * The rx default in dts and ytphy_rgmii_clk_delay_config is 1950 ps,
+ * so YT8521_CCR_RXC_DLY_EN should not be set.
+ */
+ if (rxc_dly_en)
+ *rxc_dly_en = 0;
+
+ return dflt;
+}
+
+static int ytphy_rgmii_clk_delay_config(struct phy_device *phydev)
+{
+ int tb_size = ARRAY_SIZE(ytphy_rgmii_delays);
+ u16 rxc_dly_en = YT8521_CCR_RXC_DLY_EN;
+ u32 rx_reg, tx_reg;
+ u16 mask, val = 0;
+ int ret;
+
+ rx_reg = ytphy_get_delay_reg_value(phydev, "rx-internal-delay-ps",
+ ytphy_rgmii_delays, tb_size,
+ &rxc_dly_en,
+ YT8521_RC1R_RGMII_1_950_NS);
+ tx_reg = ytphy_get_delay_reg_value(phydev, "tx-internal-delay-ps",
+ ytphy_rgmii_delays, tb_size, NULL,
+ YT8521_RC1R_RGMII_1_950_NS);
+
+ switch (phydev->interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ rxc_dly_en = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val |= FIELD_PREP(YT8521_RC1R_RX_DELAY_MASK, rx_reg);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ rxc_dly_en = 0;
+ val |= FIELD_PREP(YT8521_RC1R_GE_TX_DELAY_MASK, tx_reg);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ val |= FIELD_PREP(YT8521_RC1R_RX_DELAY_MASK, rx_reg) |
+ FIELD_PREP(YT8521_RC1R_GE_TX_DELAY_MASK, tx_reg);
+ break;
+ default: /* do not support other modes */
+ return -EOPNOTSUPP;
+ }
+
+ ret = ytphy_modify_ext(phydev, YT8521_CHIP_CONFIG_REG,
+ YT8521_CCR_RXC_DLY_EN, rxc_dly_en);
+ if (ret < 0)
+ return ret;
+
+ /* Generally, it is not necessary to adjust YT8521_RC1R_FE_TX_DELAY */
+ mask = YT8521_RC1R_RX_DELAY_MASK | YT8521_RC1R_GE_TX_DELAY_MASK;
+ return ytphy_modify_ext(phydev, YT8521_RGMII_CONFIG1_REG, mask, val);
+}
+
+static int ytphy_rgmii_clk_delay_config_with_lock(struct phy_device *phydev)
+{
+ int ret;
+
+ phy_lock_mdio_bus(phydev);
+ ret = ytphy_rgmii_clk_delay_config(phydev);
+ phy_unlock_mdio_bus(phydev);
+
+ return ret;
+}
+
+/**
* yt8521_probe() - read chip config then set suitable polling_mode
* @phydev: a pointer to a &struct phy_device
*
@@ -601,9 +842,12 @@ static int yt8521_write_page(struct phy_device *phydev, int page)
*/
static int yt8521_probe(struct phy_device *phydev)
{
+ struct device_node *node = phydev->mdio.dev.of_node;
struct device *dev = &phydev->mdio.dev;
struct yt8521_priv *priv;
int chip_config;
+ u16 mask, val;
+ u32 freq;
int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -648,27 +892,107 @@ static int yt8521_probe(struct phy_device *phydev)
return ret;
}
- return 0;
+ if (of_property_read_u32(node, "motorcomm,clk-out-frequency-hz", &freq))
+ freq = YTPHY_DTS_OUTPUT_CLK_DIS;
+
+ if (phydev->drv->phy_id == PHY_ID_YT8521) {
+ switch (freq) {
+ case YTPHY_DTS_OUTPUT_CLK_DIS:
+ mask = YT8521_SCR_SYNCE_ENABLE;
+ val = 0;
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_25M:
+ mask = YT8521_SCR_SYNCE_ENABLE |
+ YT8521_SCR_CLK_SRC_MASK |
+ YT8521_SCR_CLK_FRE_SEL_125M;
+ val = YT8521_SCR_SYNCE_ENABLE |
+ FIELD_PREP(YT8521_SCR_CLK_SRC_MASK,
+ YT8521_SCR_CLK_SRC_REF_25M);
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_125M:
+ mask = YT8521_SCR_SYNCE_ENABLE |
+ YT8521_SCR_CLK_SRC_MASK |
+ YT8521_SCR_CLK_FRE_SEL_125M;
+ val = YT8521_SCR_SYNCE_ENABLE |
+ YT8521_SCR_CLK_FRE_SEL_125M |
+ FIELD_PREP(YT8521_SCR_CLK_SRC_MASK,
+ YT8521_SCR_CLK_SRC_PLL_125M);
+ break;
+ default:
+ phydev_warn(phydev, "Freq err:%u\n", freq);
+ return -EINVAL;
+ }
+ } else if (phydev->drv->phy_id == PHY_ID_YT8531S) {
+ switch (freq) {
+ case YTPHY_DTS_OUTPUT_CLK_DIS:
+ mask = YT8531_SCR_SYNCE_ENABLE;
+ val = 0;
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_25M:
+ mask = YT8531_SCR_SYNCE_ENABLE |
+ YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_REF_25M);
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_125M:
+ mask = YT8531_SCR_SYNCE_ENABLE |
+ YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE |
+ YT8531_SCR_CLK_FRE_SEL_125M |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_PLL_125M);
+ break;
+ default:
+ phydev_warn(phydev, "Freq err:%u\n", freq);
+ return -EINVAL;
+ }
+ } else {
+ phydev_warn(phydev, "PHY id err\n");
+ return -EINVAL;
+ }
+
+ return ytphy_modify_ext_with_lock(phydev, YTPHY_SYNCE_CFG_REG, mask,
+ val);
}
-/**
- * yt8531s_probe() - read chip config then set suitable polling_mode
- * @phydev: a pointer to a &struct phy_device
- *
- * returns 0 or negative errno code
- */
-static int yt8531s_probe(struct phy_device *phydev)
+static int yt8531_probe(struct phy_device *phydev)
{
- int ret;
+ struct device_node *node = phydev->mdio.dev.of_node;
+ u16 mask, val;
+ u32 freq;
- /* Disable SyncE clock output by default */
- ret = ytphy_modify_ext_with_lock(phydev, YT8531S_SYNCE_CFG_REG,
- YT8531S_SCR_SYNCE_ENABLE, 0);
- if (ret < 0)
- return ret;
+ if (of_property_read_u32(node, "motorcomm,clk-out-frequency-hz", &freq))
+ freq = YTPHY_DTS_OUTPUT_CLK_DIS;
+
+ switch (freq) {
+ case YTPHY_DTS_OUTPUT_CLK_DIS:
+ mask = YT8531_SCR_SYNCE_ENABLE;
+ val = 0;
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_25M:
+ mask = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_REF_25M);
+ break;
+ case YTPHY_DTS_OUTPUT_CLK_125M:
+ mask = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_SRC_MASK |
+ YT8531_SCR_CLK_FRE_SEL_125M;
+ val = YT8531_SCR_SYNCE_ENABLE | YT8531_SCR_CLK_FRE_SEL_125M |
+ FIELD_PREP(YT8531_SCR_CLK_SRC_MASK,
+ YT8531_SCR_CLK_SRC_PLL_125M);
+ break;
+ default:
+ phydev_warn(phydev, "Freq err:%u\n", freq);
+ return -EINVAL;
+ }
- /* same as yt8521_probe */
- return yt8521_probe(phydev);
+ return ytphy_modify_ext_with_lock(phydev, YTPHY_SYNCE_CFG_REG, mask,
+ val);
}
/**
@@ -1133,65 +1457,128 @@ static int yt8521_resume(struct phy_device *phydev)
*/
static int yt8521_config_init(struct phy_device *phydev)
{
+ struct device_node *node = phydev->mdio.dev.of_node;
int old_page;
int ret = 0;
- u16 val;
old_page = phy_select_page(phydev, YT8521_RSSR_UTP_SPACE);
if (old_page < 0)
goto err_restore_page;
- switch (phydev->interface) {
- case PHY_INTERFACE_MODE_RGMII:
- val = YT8521_RC1R_GE_TX_DELAY_DIS | YT8521_RC1R_FE_TX_DELAY_DIS;
- val |= YT8521_RC1R_RX_DELAY_DIS;
- break;
- case PHY_INTERFACE_MODE_RGMII_RXID:
- val = YT8521_RC1R_GE_TX_DELAY_DIS | YT8521_RC1R_FE_TX_DELAY_DIS;
- val |= YT8521_RC1R_RX_DELAY_EN;
- break;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- val = YT8521_RC1R_GE_TX_DELAY_EN | YT8521_RC1R_FE_TX_DELAY_EN;
- val |= YT8521_RC1R_RX_DELAY_DIS;
- break;
- case PHY_INTERFACE_MODE_RGMII_ID:
- val = YT8521_RC1R_GE_TX_DELAY_EN | YT8521_RC1R_FE_TX_DELAY_EN;
- val |= YT8521_RC1R_RX_DELAY_EN;
- break;
- case PHY_INTERFACE_MODE_SGMII:
- break;
- default: /* do not support other modes */
- ret = -EOPNOTSUPP;
- goto err_restore_page;
- }
-
/* set rgmii delay mode */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII) {
- ret = ytphy_modify_ext(phydev, YT8521_RGMII_CONFIG1_REG,
- (YT8521_RC1R_RX_DELAY_MASK |
- YT8521_RC1R_FE_TX_DELAY_MASK |
- YT8521_RC1R_GE_TX_DELAY_MASK),
- val);
+ ret = ytphy_rgmii_clk_delay_config(phydev);
if (ret < 0)
goto err_restore_page;
}
- /* disable auto sleep */
- ret = ytphy_modify_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1_REG,
- YT8521_ESC1R_SLEEP_SW, 0);
- if (ret < 0)
- goto err_restore_page;
-
- /* enable RXC clock when no wire plug */
- ret = ytphy_modify_ext(phydev, YT8521_CLOCK_GATING_REG,
- YT8521_CGR_RX_CLK_EN, 0);
- if (ret < 0)
- goto err_restore_page;
+ if (of_property_read_bool(node, "motorcomm,auto-sleep-disabled")) {
+ /* disable auto sleep */
+ ret = ytphy_modify_ext(phydev, YT8521_EXTREG_SLEEP_CONTROL1_REG,
+ YT8521_ESC1R_SLEEP_SW, 0);
+ if (ret < 0)
+ goto err_restore_page;
+ }
+ if (of_property_read_bool(node, "motorcomm,keep-pll-enabled")) {
+ /* enable RXC clock when no wire plug */
+ ret = ytphy_modify_ext(phydev, YT8521_CLOCK_GATING_REG,
+ YT8521_CGR_RX_CLK_EN, 0);
+ if (ret < 0)
+ goto err_restore_page;
+ }
err_restore_page:
return phy_restore_page(phydev, old_page, ret);
}
+static int yt8531_config_init(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ int ret;
+
+ ret = ytphy_rgmii_clk_delay_config_with_lock(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (of_property_read_bool(node, "motorcomm,auto-sleep-disabled")) {
+ /* disable auto sleep */
+ ret = ytphy_modify_ext_with_lock(phydev,
+ YT8521_EXTREG_SLEEP_CONTROL1_REG,
+ YT8521_ESC1R_SLEEP_SW, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (of_property_read_bool(node, "motorcomm,keep-pll-enabled")) {
+ /* enable RXC clock when no wire plug */
+ ret = ytphy_modify_ext_with_lock(phydev,
+ YT8521_CLOCK_GATING_REG,
+ YT8521_CGR_RX_CLK_EN, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * yt8531_link_change_notify() - Adjust the tx clock direction according to
+ * the current speed and dts config.
+ * @phydev: a pointer to a &struct phy_device
+ *
+ * NOTE: This function is only used to adapt to VF2 with JH7110 SoC. Please
+ * keep "motorcomm,tx-clk-adj-enabled" not exist in dts when the soc is not
+ * JH7110.
+ */
+static void yt8531_link_change_notify(struct phy_device *phydev)
+{
+ struct device_node *node = phydev->mdio.dev.of_node;
+ bool tx_clk_1000_inverted = false;
+ bool tx_clk_100_inverted = false;
+ bool tx_clk_10_inverted = false;
+ bool tx_clk_adj_enabled = false;
+ u16 val = 0;
+ int ret;
+
+ if (of_property_read_bool(node, "motorcomm,tx-clk-adj-enabled"))
+ tx_clk_adj_enabled = true;
+
+ if (!tx_clk_adj_enabled)
+ return;
+
+ if (of_property_read_bool(node, "motorcomm,tx-clk-10-inverted"))
+ tx_clk_10_inverted = true;
+ if (of_property_read_bool(node, "motorcomm,tx-clk-100-inverted"))
+ tx_clk_100_inverted = true;
+ if (of_property_read_bool(node, "motorcomm,tx-clk-1000-inverted"))
+ tx_clk_1000_inverted = true;
+
+ if (phydev->speed < 0)
+ return;
+
+ switch (phydev->speed) {
+ case SPEED_1000:
+ if (tx_clk_1000_inverted)
+ val = YT8521_RC1R_TX_CLK_SEL_INVERTED;
+ break;
+ case SPEED_100:
+ if (tx_clk_100_inverted)
+ val = YT8521_RC1R_TX_CLK_SEL_INVERTED;
+ break;
+ case SPEED_10:
+ if (tx_clk_10_inverted)
+ val = YT8521_RC1R_TX_CLK_SEL_INVERTED;
+ break;
+ default:
+ return;
+ }
+
+ ret = ytphy_modify_ext_with_lock(phydev, YT8521_RGMII_CONFIG1_REG,
+ YT8521_RC1R_TX_CLK_SEL_INVERTED, val);
+ if (ret < 0)
+ phydev_warn(phydev, "Modify TX_CLK_SEL err:%d\n", ret);
+}
+
/**
* yt8521_prepare_fiber_features() - A small helper function that setup
* fiber's features.
@@ -1775,10 +2162,21 @@ static struct phy_driver motorcomm_phy_drvs[] = {
.resume = yt8521_resume,
},
{
+ PHY_ID_MATCH_EXACT(PHY_ID_YT8531),
+ .name = "YT8531 Gigabit Ethernet",
+ .probe = yt8531_probe,
+ .config_init = yt8531_config_init,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .get_wol = ytphy_get_wol,
+ .set_wol = yt8531_set_wol,
+ .link_change_notify = yt8531_link_change_notify,
+ },
+ {
PHY_ID_MATCH_EXACT(PHY_ID_YT8531S),
.name = "YT8531S Gigabit Ethernet",
.get_features = yt8521_get_features,
- .probe = yt8531s_probe,
+ .probe = yt8521_probe,
.read_page = yt8521_read_page,
.write_page = yt8521_write_page,
.get_wol = ytphy_get_wol,
@@ -1795,7 +2193,7 @@ static struct phy_driver motorcomm_phy_drvs[] = {
module_phy_driver(motorcomm_phy_drvs);
-MODULE_DESCRIPTION("Motorcomm 8511/8521/8531S PHY driver");
+MODULE_DESCRIPTION("Motorcomm 8511/8521/8531/8531S PHY driver");
MODULE_AUTHOR("Peter Geis");
MODULE_AUTHOR("Frank");
MODULE_LICENSE("GPL");
@@ -1803,8 +2201,9 @@ MODULE_LICENSE("GPL");
static const struct mdio_device_id __maybe_unused motorcomm_tbl[] = {
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8511) },
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8521) },
+ { PHY_ID_MATCH_EXACT(PHY_ID_YT8531) },
{ PHY_ID_MATCH_EXACT(PHY_ID_YT8531S) },
- { /* sentinal */ }
+ { /* sentinel */ }
};
MODULE_DEVICE_TABLE(mdio, motorcomm_tbl);
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 147d7a5a9b35..e5972b4ef6e8 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -12,6 +12,7 @@
#include <linux/mutex.h>
#include <linux/phy.h>
#include <linux/polynomial.h>
+#include <linux/property.h>
#include <linux/netdevice.h>
/* PHY ID */
@@ -292,6 +293,10 @@ static int gpy_probe(struct phy_device *phydev)
phydev->priv = priv;
mutex_init(&priv->mbox_lock);
+ if (gpy_has_broken_mdint(phydev) &&
+ !device_property_present(dev, "maxlinear,use-broken-interrupts"))
+ phydev->dev_flags |= PHY_F_NO_IRQ;
+
fw_version = phy_read(phydev, PHY_FWV);
if (fw_version < 0)
return fw_version;
diff --git a/drivers/net/phy/ncn26000.c b/drivers/net/phy/ncn26000.c
new file mode 100644
index 000000000000..5680584f659e
--- /dev/null
+++ b/drivers/net/phy/ncn26000.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Driver for the onsemi 10BASE-T1S NCN26000 PHYs family.
+ *
+ * Copyright 2022 onsemi
+ */
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+
+#include "mdio-open-alliance.h"
+
+#define PHY_ID_NCN26000 0x180FF5A1
+
+#define NCN26000_REG_IRQ_CTL 16
+#define NCN26000_REG_IRQ_STATUS 17
+
+// the NCN26000 maps link_ctrl to BMCR_ANENABLE
+#define NCN26000_BCMR_LINK_CTRL_BIT BMCR_ANENABLE
+
+// the NCN26000 maps link_status to BMSR_ANEGCOMPLETE
+#define NCN26000_BMSR_LINK_STATUS_BIT BMSR_ANEGCOMPLETE
+
+#define NCN26000_IRQ_LINKST_BIT BIT(0)
+#define NCN26000_IRQ_PLCAST_BIT BIT(1)
+#define NCN26000_IRQ_LJABBER_BIT BIT(2)
+#define NCN26000_IRQ_RJABBER_BIT BIT(3)
+#define NCN26000_IRQ_PLCAREC_BIT BIT(4)
+#define NCN26000_IRQ_PHYSCOL_BIT BIT(5)
+#define NCN26000_IRQ_RESET_BIT BIT(15)
+
+#define TO_TMR_DEFAULT 32
+
+static int ncn26000_config_init(struct phy_device *phydev)
+{
+ /* HW bug workaround: the default value of the PLCA TO_TIMER should be
+ * 32, where the current version of NCN26000 reports 24. This will be
+ * fixed in future PHY versions. For the time being, we force the
+ * correct default here.
+ */
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_TOTMR,
+ TO_TMR_DEFAULT);
+}
+
+static int ncn26000_config_aneg(struct phy_device *phydev)
+{
+ /* Note: the NCN26000 supports only P2MP link mode. Therefore, AN is not
+ * supported. However, this function is invoked by phylib to enable the
+ * PHY, regardless of the AN support.
+ */
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ phydev->mdix = ETH_TP_MDI;
+
+ // bring up the link
+ return phy_write(phydev, MII_BMCR, NCN26000_BCMR_LINK_CTRL_BIT);
+}
+
+static int ncn26000_read_status(struct phy_device *phydev)
+{
+ /* The NCN26000 reports NCN26000_LINK_STATUS_BIT if the link status of
+ * the PHY is up. It further reports the logical AND of the link status
+ * and the PLCA status in the BMSR_LSTATUS bit.
+ */
+ int ret;
+
+ /* The link state is latched low so that momentary link
+ * drops can be detected. Do not double-read the status
+ * in polling mode to detect such short link drops except
+ * the link was already down.
+ */
+ if (!phy_polling_mode(phydev) || !phydev->link) {
+ ret = phy_read(phydev, MII_BMSR);
+ if (ret < 0)
+ return ret;
+ else if (ret & NCN26000_BMSR_LINK_STATUS_BIT)
+ goto upd_link;
+ }
+
+ ret = phy_read(phydev, MII_BMSR);
+ if (ret < 0)
+ return ret;
+
+upd_link:
+ // update link status
+ if (ret & NCN26000_BMSR_LINK_STATUS_BIT) {
+ phydev->link = 1;
+ phydev->pause = 0;
+ phydev->duplex = DUPLEX_HALF;
+ phydev->speed = SPEED_10;
+ } else {
+ phydev->link = 0;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->speed = SPEED_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static irqreturn_t ncn26000_handle_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ // read and aknowledge the IRQ status register
+ ret = phy_read(phydev, NCN26000_REG_IRQ_STATUS);
+
+ // check only link status changes
+ if (ret < 0 || (ret & NCN26000_REG_IRQ_STATUS) == 0)
+ return IRQ_NONE;
+
+ phy_trigger_machine(phydev);
+ return IRQ_HANDLED;
+}
+
+static int ncn26000_config_intr(struct phy_device *phydev)
+{
+ int ret;
+ u16 irqe;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ // acknowledge IRQs
+ ret = phy_read(phydev, NCN26000_REG_IRQ_STATUS);
+ if (ret < 0)
+ return ret;
+
+ // get link status notifications
+ irqe = NCN26000_IRQ_LINKST_BIT;
+ } else {
+ // disable all IRQs
+ irqe = 0;
+ }
+
+ ret = phy_write(phydev, NCN26000_REG_IRQ_CTL, irqe);
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static struct phy_driver ncn26000_driver[] = {
+ {
+ PHY_ID_MATCH_MODEL(PHY_ID_NCN26000),
+ .name = "NCN26000",
+ .features = PHY_BASIC_T1S_P2MP_FEATURES,
+ .config_init = ncn26000_config_init,
+ .config_intr = ncn26000_config_intr,
+ .config_aneg = ncn26000_config_aneg,
+ .read_status = ncn26000_read_status,
+ .handle_interrupt = ncn26000_handle_interrupt,
+ .get_plca_cfg = genphy_c45_plca_get_cfg,
+ .set_plca_cfg = genphy_c45_plca_set_cfg,
+ .get_plca_status = genphy_c45_plca_get_status,
+ .soft_reset = genphy_soft_reset,
+ },
+};
+
+module_phy_driver(ncn26000_driver);
+
+static struct mdio_device_id __maybe_unused ncn26000_tbl[] = {
+ { PHY_ID_MATCH_MODEL(PHY_ID_NCN26000) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, ncn26000_tbl);
+
+MODULE_AUTHOR("Piergiorgio Beruto");
+MODULE_DESCRIPTION("onsemi 10BASE-T1S PHY driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index a87a4b3ffce4..f9b128cecc3f 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -8,6 +8,8 @@
#include <linux/mii.h>
#include <linux/phy.h>
+#include "mdio-open-alliance.h"
+
/**
* genphy_c45_baset1_able - checks if the PMA has BASE-T1 extended abilities
* @phydev: target phy_device struct
@@ -254,13 +256,17 @@ static int genphy_c45_baset1_an_config_aneg(struct phy_device *phydev)
*/
int genphy_c45_an_config_aneg(struct phy_device *phydev)
{
- int changed, ret;
+ int changed = 0, ret;
u32 adv;
linkmode_and(phydev->advertising, phydev->advertising,
phydev->supported);
- changed = genphy_config_eee_advert(phydev);
+ ret = genphy_c45_write_eee_adv(phydev, phydev->supported_eee);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ changed = true;
if (genphy_c45_baset1_able(phydev))
return genphy_c45_baset1_an_config_aneg(phydev);
@@ -660,6 +666,199 @@ int genphy_c45_read_mdix(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
/**
+ * genphy_c45_write_eee_adv - write advertised EEE link modes
+ * @phydev: target phy_device struct
+ * @adv: the linkmode advertisement settings
+ */
+int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv)
+{
+ int val, changed;
+
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP1_FEATURES)) {
+ val = linkmode_to_mii_eee_cap1_t(adv);
+
+ /* In eee_broken_modes are stored MDIO_AN_EEE_ADV specific raw
+ * register values.
+ */
+ val &= ~phydev->eee_broken_modes;
+
+ /* IEEE 802.3-2018 45.2.7.13 EEE advertisement 1
+ * (Register 7.60)
+ */
+ val = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
+ MDIO_AN_EEE_ADV,
+ MDIO_EEE_100TX | MDIO_EEE_1000T |
+ MDIO_EEE_10GT | MDIO_EEE_1000KX |
+ MDIO_EEE_10GKX4 | MDIO_EEE_10GKR,
+ val);
+ if (val < 0)
+ return val;
+ if (val > 0)
+ changed = 1;
+ }
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported_eee)) {
+ val = linkmode_adv_to_mii_10base_t1_t(adv);
+ /* IEEE 802.3cg-2019 45.2.7.25 10BASE-T1 AN control register
+ * (Register 7.526)
+ */
+ val = phy_modify_mmd_changed(phydev, MDIO_MMD_AN,
+ MDIO_AN_10BT1_AN_CTRL,
+ MDIO_AN_10BT1_AN_CTRL_ADV_EEE_T1L,
+ val);
+ if (val < 0)
+ return val;
+ if (val > 0)
+ changed = 1;
+ }
+
+ return changed;
+}
+
+/**
+ * genphy_c45_read_eee_adv - read advertised EEE link modes
+ * @phydev: target phy_device struct
+ * @adv: the linkmode advertisement status
+ */
+static int genphy_c45_read_eee_adv(struct phy_device *phydev,
+ unsigned long *adv)
+{
+ int val;
+
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP1_FEATURES)) {
+ /* IEEE 802.3-2018 45.2.7.13 EEE advertisement 1
+ * (Register 7.60)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap1_mod_linkmode_t(adv, val);
+ }
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported_eee)) {
+ /* IEEE 802.3cg-2019 45.2.7.25 10BASE-T1 AN control register
+ * (Register 7.526)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10BT1_AN_CTRL);
+ if (val < 0)
+ return val;
+
+ mii_10base_t1_adv_mod_linkmode_t(adv, val);
+ }
+
+ return 0;
+}
+
+/**
+ * genphy_c45_read_eee_lpa - read advertised LP EEE link modes
+ * @phydev: target phy_device struct
+ * @lpa: the linkmode LP advertisement status
+ */
+static int genphy_c45_read_eee_lpa(struct phy_device *phydev,
+ unsigned long *lpa)
+{
+ int val;
+
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP1_FEATURES)) {
+ /* IEEE 802.3-2018 45.2.7.14 EEE link partner ability 1
+ * (Register 7.61)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
+ if (val < 0)
+ return val;
+
+ mii_eee_cap1_mod_linkmode_t(lpa, val);
+ }
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported_eee)) {
+ /* IEEE 802.3cg-2019 45.2.7.26 10BASE-T1 AN status register
+ * (Register 7.527)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10BT1_AN_STAT);
+ if (val < 0)
+ return val;
+
+ mii_10base_t1_adv_mod_linkmode_t(lpa, val);
+ }
+
+ return 0;
+}
+
+/**
+ * genphy_c45_read_eee_cap1 - read supported EEE link modes from register 3.20
+ * @phydev: target phy_device struct
+ */
+static int genphy_c45_read_eee_cap1(struct phy_device *phydev)
+{
+ int val;
+
+ /* IEEE 802.3-2018 45.2.3.10 EEE control and capability 1
+ * (Register 3.20)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
+ if (val < 0)
+ return val;
+
+ /* The 802.3 2018 standard says the top 2 bits are reserved and should
+ * read as 0. Also, it seems unlikely anybody will build a PHY which
+ * supports 100GBASE-R deep sleep all the way down to 100BASE-TX EEE.
+ * If MDIO_PCS_EEE_ABLE is 0xffff assume EEE is not supported.
+ */
+ if (val == 0xffff)
+ return 0;
+
+ mii_eee_cap1_mod_linkmode_t(phydev->supported_eee, val);
+
+ /* Some buggy devices indicate EEE link modes in MDIO_PCS_EEE_ABLE
+ * which they don't support as indicated by BMSR, ESTATUS etc.
+ */
+ linkmode_and(phydev->supported_eee, phydev->supported_eee,
+ phydev->supported);
+
+ return 0;
+}
+
+/**
+ * genphy_c45_read_eee_abilities - read supported EEE link modes
+ * @phydev: target phy_device struct
+ */
+int genphy_c45_read_eee_abilities(struct phy_device *phydev)
+{
+ int val;
+
+ /* There is not indicator whether optional register
+ * "EEE control and capability 1" (3.20) is supported. Read it only
+ * on devices with appropriate linkmodes.
+ */
+ if (linkmode_intersects(phydev->supported, PHY_EEE_CAP1_FEATURES)) {
+ val = genphy_c45_read_eee_cap1(phydev);
+ if (val)
+ return val;
+ }
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported)) {
+ /* IEEE 802.3cg-2019 45.2.1.186b 10BASE-T1L PMA status register
+ * (Register 1.2295)
+ */
+ val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10T1L_STAT);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT,
+ phydev->supported_eee,
+ val & MDIO_PMA_10T1L_STAT_EEE);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_read_eee_abilities);
+
+/**
* genphy_c45_pma_read_abilities - read supported link modes from PMA
* @phydev: target phy_device struct
*
@@ -773,6 +972,11 @@ int genphy_c45_pma_read_abilities(struct phy_device *phydev)
}
}
+ /* This is optional functionality. If not supported, we may get an error
+ * which should be ignored.
+ */
+ genphy_c45_read_eee_abilities(phydev);
+
return 0;
}
EXPORT_SYMBOL_GPL(genphy_c45_pma_read_abilities);
@@ -931,6 +1135,312 @@ int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable)
}
EXPORT_SYMBOL_GPL(genphy_c45_fast_retrain);
+/**
+ * genphy_c45_plca_get_cfg - get PLCA configuration from standard registers
+ * @phydev: target phy_device struct
+ * @plca_cfg: output structure to store the PLCA configuration
+ *
+ * Description: if the PHY complies to the Open Alliance TC14 10BASE-T1S PLCA
+ * Management Registers specifications, this function can be used to retrieve
+ * the current PLCA configuration from the standard registers in MMD 31.
+ */
+int genphy_c45_plca_get_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_IDVER);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & MDIO_OATC14_PLCA_IDM) != OATC14_IDM)
+ return -ENODEV;
+
+ plca_cfg->version = ret & ~MDIO_OATC14_PLCA_IDM;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_CTRL0);
+ if (ret < 0)
+ return ret;
+
+ plca_cfg->enabled = !!(ret & MDIO_OATC14_PLCA_EN);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ plca_cfg->node_cnt = (ret & MDIO_OATC14_PLCA_NCNT) >> 8;
+ plca_cfg->node_id = (ret & MDIO_OATC14_PLCA_ID);
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_TOTMR);
+ if (ret < 0)
+ return ret;
+
+ plca_cfg->to_tmr = ret & MDIO_OATC14_PLCA_TOT;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_BURST);
+ if (ret < 0)
+ return ret;
+
+ plca_cfg->burst_cnt = (ret & MDIO_OATC14_PLCA_MAXBC) >> 8;
+ plca_cfg->burst_tmr = (ret & MDIO_OATC14_PLCA_BTMR);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_plca_get_cfg);
+
+/**
+ * genphy_c45_plca_set_cfg - set PLCA configuration using standard registers
+ * @phydev: target phy_device struct
+ * @plca_cfg: structure containing the PLCA configuration. Fields set to -1 are
+ * not to be changed.
+ *
+ * Description: if the PHY complies to the Open Alliance TC14 10BASE-T1S PLCA
+ * Management Registers specifications, this function can be used to modify
+ * the PLCA configuration using the standard registers in MMD 31.
+ */
+int genphy_c45_plca_set_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg)
+{
+ u16 val = 0;
+ int ret;
+
+ // PLCA IDVER is read-only
+ if (plca_cfg->version >= 0)
+ return -EINVAL;
+
+ // first of all, disable PLCA if required
+ if (plca_cfg->enabled == 0) {
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_CTRL0,
+ MDIO_OATC14_PLCA_EN);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ // check if we need to set the PLCA node count, node ID, or both
+ if (plca_cfg->node_cnt >= 0 || plca_cfg->node_id >= 0) {
+ /* if one between node count and node ID is -not- to be
+ * changed, read the register to later perform merge/purge of
+ * the configuration as appropriate
+ */
+ if (plca_cfg->node_cnt < 0 || plca_cfg->node_id < 0) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_CTRL1);
+
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+ }
+
+ if (plca_cfg->node_cnt >= 0)
+ val = (val & ~MDIO_OATC14_PLCA_NCNT) |
+ (plca_cfg->node_cnt << 8);
+
+ if (plca_cfg->node_id >= 0)
+ val = (val & ~MDIO_OATC14_PLCA_ID) |
+ (plca_cfg->node_id);
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_CTRL1, val);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ if (plca_cfg->to_tmr >= 0) {
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_TOTMR,
+ plca_cfg->to_tmr);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ // check if we need to set the PLCA burst count, burst timer, or both
+ if (plca_cfg->burst_cnt >= 0 || plca_cfg->burst_tmr >= 0) {
+ /* if one between burst count and burst timer is -not- to be
+ * changed, read the register to later perform merge/purge of
+ * the configuration as appropriate
+ */
+ if (plca_cfg->burst_cnt < 0 || plca_cfg->burst_tmr < 0) {
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_BURST);
+
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+ }
+
+ if (plca_cfg->burst_cnt >= 0)
+ val = (val & ~MDIO_OATC14_PLCA_MAXBC) |
+ (plca_cfg->burst_cnt << 8);
+
+ if (plca_cfg->burst_tmr >= 0)
+ val = (val & ~MDIO_OATC14_PLCA_BTMR) |
+ (plca_cfg->burst_tmr);
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_BURST, val);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ // if we need to enable PLCA, do it at the end
+ if (plca_cfg->enabled > 0) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2,
+ MDIO_OATC14_PLCA_CTRL0,
+ MDIO_OATC14_PLCA_EN);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_plca_set_cfg);
+
+/**
+ * genphy_c45_plca_get_status - get PLCA status from standard registers
+ * @phydev: target phy_device struct
+ * @plca_st: output structure to store the PLCA status
+ *
+ * Description: if the PHY complies to the Open Alliance TC14 10BASE-T1S PLCA
+ * Management Registers specifications, this function can be used to retrieve
+ * the current PLCA status information from the standard registers in MMD 31.
+ */
+int genphy_c45_plca_get_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, MDIO_OATC14_PLCA_STATUS);
+ if (ret < 0)
+ return ret;
+
+ plca_st->pst = !!(ret & MDIO_OATC14_PLCA_PST);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genphy_c45_plca_get_status);
+
+/**
+ * genphy_c45_eee_is_active - get EEE status
+ * @phydev: target phy_device struct
+ * @adv: variable to store advertised linkmodes
+ * @lp: variable to store LP advertised linkmodes
+ * @is_enabled: variable to store EEE enabled/disabled configuration value
+ *
+ * Description: this function will read local and link partner PHY
+ * advertisements. Compare them return current EEE state.
+ */
+int genphy_c45_eee_is_active(struct phy_device *phydev, unsigned long *adv,
+ unsigned long *lp, bool *is_enabled)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_adv) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp_lp) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
+ bool eee_enabled, eee_active;
+ int ret;
+
+ ret = genphy_c45_read_eee_adv(phydev, tmp_adv);
+ if (ret)
+ return ret;
+
+ ret = genphy_c45_read_eee_lpa(phydev, tmp_lp);
+ if (ret)
+ return ret;
+
+ eee_enabled = !linkmode_empty(tmp_adv);
+ linkmode_and(common, tmp_adv, tmp_lp);
+ if (eee_enabled && !linkmode_empty(common))
+ eee_active = phy_check_valid(phydev->speed, phydev->duplex,
+ common);
+ else
+ eee_active = false;
+
+ if (adv)
+ linkmode_copy(adv, tmp_adv);
+ if (lp)
+ linkmode_copy(lp, tmp_lp);
+ if (is_enabled)
+ *is_enabled = eee_enabled;
+
+ return eee_active;
+}
+EXPORT_SYMBOL(genphy_c45_eee_is_active);
+
+/**
+ * genphy_c45_ethtool_get_eee - get EEE supported and status
+ * @phydev: target phy_device struct
+ * @data: ethtool_eee data
+ *
+ * Description: it reports the Supported/Advertisement/LP Advertisement
+ * capabilities.
+ */
+int genphy_c45_ethtool_get_eee(struct phy_device *phydev,
+ struct ethtool_eee *data)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv) = {};
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp) = {};
+ bool overflow = false, is_enabled;
+ int ret;
+
+ ret = genphy_c45_eee_is_active(phydev, adv, lp, &is_enabled);
+ if (ret < 0)
+ return ret;
+
+ data->eee_enabled = is_enabled;
+ data->eee_active = ret;
+
+ if (!ethtool_convert_link_mode_to_legacy_u32(&data->supported,
+ phydev->supported_eee))
+ overflow = true;
+ if (!ethtool_convert_link_mode_to_legacy_u32(&data->advertised, adv))
+ overflow = true;
+ if (!ethtool_convert_link_mode_to_legacy_u32(&data->lp_advertised, lp))
+ overflow = true;
+
+ if (overflow)
+ phydev_warn(phydev, "Not all supported or advertised EEE link modes were passed to the user space\n");
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_c45_ethtool_get_eee);
+
+/**
+ * genphy_c45_ethtool_set_eee - get EEE supported and status
+ * @phydev: target phy_device struct
+ * @data: ethtool_eee data
+ *
+ * Description: it reportes the Supported/Advertisement/LP Advertisement
+ * capabilities.
+ */
+int genphy_c45_ethtool_set_eee(struct phy_device *phydev,
+ struct ethtool_eee *data)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv) = {};
+ int ret;
+
+ if (data->eee_enabled) {
+ if (data->advertised)
+ adv[0] = data->advertised;
+ else
+ linkmode_copy(adv, phydev->supported_eee);
+ }
+
+ ret = genphy_c45_write_eee_adv(phydev, adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return phy_restart_aneg(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_c45_ethtool_set_eee);
+
struct phy_driver genphy_c45_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 5d08c627a516..a64186dc53f8 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -13,7 +13,7 @@
*/
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 99,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 102,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -260,6 +260,9 @@ static const struct phy_setting settings[] = {
PHY_SETTING( 10, FULL, 10baseT_Full ),
PHY_SETTING( 10, HALF, 10baseT_Half ),
PHY_SETTING( 10, FULL, 10baseT1L_Full ),
+ PHY_SETTING( 10, FULL, 10baseT1S_Full ),
+ PHY_SETTING( 10, HALF, 10baseT1S_Half ),
+ PHY_SETTING( 10, HALF, 10baseT1S_P2MP_Half ),
};
#undef PHY_SETTING
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e5b6cb1a77f9..b33e55a7364e 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -242,11 +242,11 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
*
* Description: Returns true if there is a valid setting, false otherwise.
*/
-static inline bool phy_check_valid(int speed, int duplex,
- unsigned long *features)
+bool phy_check_valid(int speed, int duplex, unsigned long *features)
{
return !!phy_lookup_setting(speed, duplex, features, true);
}
+EXPORT_SYMBOL(phy_check_valid);
/**
* phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
@@ -544,6 +544,198 @@ int phy_ethtool_get_stats(struct phy_device *phydev,
EXPORT_SYMBOL(phy_ethtool_get_stats);
/**
+ * phy_ethtool_get_plca_cfg - Get PLCA RS configuration
+ * @phydev: the phy_device struct
+ * @plca_cfg: where to store the retrieved configuration
+ *
+ * Retrieve the PLCA configuration from the PHY. Return 0 on success or a
+ * negative value if an error occurred.
+ */
+int phy_ethtool_get_plca_cfg(struct phy_device *phydev,
+ struct phy_plca_cfg *plca_cfg)
+{
+ int ret;
+
+ if (!phydev->drv) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!phydev->drv->get_plca_cfg) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->get_plca_cfg(phydev, plca_cfg);
+
+ mutex_unlock(&phydev->lock);
+out:
+ return ret;
+}
+
+/**
+ * plca_check_valid - Check PLCA configuration before enabling
+ * @phydev: the phy_device struct
+ * @plca_cfg: current PLCA configuration
+ * @extack: extack for reporting useful error messages
+ *
+ * Checks whether the PLCA and PHY configuration are consistent and it is safe
+ * to enable PLCA. Returns 0 on success or a negative value if the PLCA or PHY
+ * configuration is not consistent.
+ */
+static int plca_check_valid(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack)
+{
+ int ret = 0;
+
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT,
+ phydev->advertising)) {
+ ret = -EOPNOTSUPP;
+ NL_SET_ERR_MSG(extack,
+ "Point to Multi-Point mode is not enabled");
+ } else if (plca_cfg->node_id >= 255) {
+ NL_SET_ERR_MSG(extack, "PLCA node ID is not set");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * phy_ethtool_set_plca_cfg - Set PLCA RS configuration
+ * @phydev: the phy_device struct
+ * @plca_cfg: new PLCA configuration to apply
+ * @extack: extack for reporting useful error messages
+ *
+ * Sets the PLCA configuration in the PHY. Return 0 on success or a
+ * negative value if an error occurred.
+ */
+int phy_ethtool_set_plca_cfg(struct phy_device *phydev,
+ const struct phy_plca_cfg *plca_cfg,
+ struct netlink_ext_ack *extack)
+{
+ struct phy_plca_cfg *curr_plca_cfg;
+ int ret;
+
+ if (!phydev->drv) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!phydev->drv->set_plca_cfg ||
+ !phydev->drv->get_plca_cfg) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ curr_plca_cfg = kmalloc(sizeof(*curr_plca_cfg), GFP_KERNEL);
+ if (!curr_plca_cfg) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mutex_lock(&phydev->lock);
+
+ ret = phydev->drv->get_plca_cfg(phydev, curr_plca_cfg);
+ if (ret)
+ goto out_drv;
+
+ if (curr_plca_cfg->enabled < 0 && plca_cfg->enabled >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'enable' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ if (curr_plca_cfg->node_id < 0 && plca_cfg->node_id >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'local node ID' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ if (curr_plca_cfg->node_cnt < 0 && plca_cfg->node_cnt >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'node count' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ if (curr_plca_cfg->to_tmr < 0 && plca_cfg->to_tmr >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'TO timer' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ if (curr_plca_cfg->burst_cnt < 0 && plca_cfg->burst_cnt >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'burst count' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ if (curr_plca_cfg->burst_tmr < 0 && plca_cfg->burst_tmr >= 0) {
+ NL_SET_ERR_MSG(extack,
+ "PHY does not support changing the PLCA 'burst timer' attribute");
+ ret = -EINVAL;
+ goto out_drv;
+ }
+
+ // if enabling PLCA, perform a few sanity checks
+ if (plca_cfg->enabled > 0) {
+ // allow setting node_id concurrently with enabled
+ if (plca_cfg->node_id >= 0)
+ curr_plca_cfg->node_id = plca_cfg->node_id;
+
+ ret = plca_check_valid(phydev, curr_plca_cfg, extack);
+ if (ret)
+ goto out_drv;
+ }
+
+ ret = phydev->drv->set_plca_cfg(phydev, plca_cfg);
+
+out_drv:
+ kfree(curr_plca_cfg);
+ mutex_unlock(&phydev->lock);
+out:
+ return ret;
+}
+
+/**
+ * phy_ethtool_get_plca_status - Get PLCA RS status information
+ * @phydev: the phy_device struct
+ * @plca_st: where to store the retrieved status information
+ *
+ * Retrieve the PLCA status information from the PHY. Return 0 on success or a
+ * negative value if an error occurred.
+ */
+int phy_ethtool_get_plca_status(struct phy_device *phydev,
+ struct phy_plca_status *plca_st)
+{
+ int ret;
+
+ if (!phydev->drv) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (!phydev->drv->get_plca_status) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->get_plca_status(phydev, plca_st);
+
+ mutex_unlock(&phydev->lock);
+out:
+ return ret;
+}
+
+/**
* phy_start_cable_test - Start a cable test
*
* @phydev: the phy_device struct
@@ -877,27 +1069,35 @@ EXPORT_SYMBOL(phy_ethtool_ksettings_set);
int phy_speed_down(struct phy_device *phydev, bool sync)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp);
- int ret;
+ int ret = 0;
+
+ mutex_lock(&phydev->lock);
if (phydev->autoneg != AUTONEG_ENABLE)
- return 0;
+ goto out;
linkmode_copy(adv_tmp, phydev->advertising);
ret = phy_speed_down_core(phydev);
if (ret)
- return ret;
+ goto out;
linkmode_copy(phydev->adv_old, adv_tmp);
- if (linkmode_equal(phydev->advertising, adv_tmp))
- return 0;
+ if (linkmode_equal(phydev->advertising, adv_tmp)) {
+ ret = 0;
+ goto out;
+ }
ret = phy_config_aneg(phydev);
if (ret)
- return ret;
+ goto out;
+
+ ret = sync ? phy_poll_aneg_done(phydev) : 0;
+out:
+ mutex_unlock(&phydev->lock);
- return sync ? phy_poll_aneg_done(phydev) : 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(phy_speed_down);
@@ -910,21 +1110,28 @@ EXPORT_SYMBOL_GPL(phy_speed_down);
int phy_speed_up(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(adv_tmp);
+ int ret = 0;
+
+ mutex_lock(&phydev->lock);
if (phydev->autoneg != AUTONEG_ENABLE)
- return 0;
+ goto out;
if (linkmode_empty(phydev->adv_old))
- return 0;
+ goto out;
linkmode_copy(adv_tmp, phydev->advertising);
linkmode_copy(phydev->advertising, phydev->adv_old);
linkmode_zero(phydev->adv_old);
if (linkmode_equal(phydev->advertising, adv_tmp))
- return 0;
+ goto out;
+
+ ret = phy_config_aneg(phydev);
+out:
+ mutex_unlock(&phydev->lock);
- return phy_config_aneg(phydev);
+ return ret;
}
EXPORT_SYMBOL_GPL(phy_speed_up);
@@ -1265,30 +1472,6 @@ void phy_mac_interrupt(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_mac_interrupt);
-static void mmd_eee_adv_to_linkmode(unsigned long *advertising, u16 eee_adv)
-{
- linkmode_zero(advertising);
-
- if (eee_adv & MDIO_EEE_100TX)
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- advertising);
- if (eee_adv & MDIO_EEE_1000T)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
- advertising);
- if (eee_adv & MDIO_EEE_10GT)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- advertising);
- if (eee_adv & MDIO_EEE_1000KX)
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- advertising);
- if (eee_adv & MDIO_EEE_10GKX4)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
- advertising);
- if (eee_adv & MDIO_EEE_10GKR)
- linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- advertising);
-}
-
/**
* phy_init_eee - init and check the EEE feature
* @phydev: target phy_device struct
@@ -1301,62 +1484,25 @@ static void mmd_eee_adv_to_linkmode(unsigned long *advertising, u16 eee_adv)
*/
int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
{
+ int ret;
+
if (!phydev->drv)
return -EIO;
- /* According to 802.3az,the EEE is supported only in full duplex-mode.
- */
- if (phydev->duplex == DUPLEX_FULL) {
- __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(lp);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
- int eee_lp, eee_cap, eee_adv;
- int status;
- u32 cap;
-
- /* Read phy status to properly get the right settings */
- status = phy_read_status(phydev);
- if (status)
- return status;
-
- /* First check if the EEE ability is supported */
- eee_cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- if (eee_cap <= 0)
- goto eee_exit_err;
-
- cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
- if (!cap)
- goto eee_exit_err;
-
- /* Check which link settings negotiated and verify it in
- * the EEE advertising registers.
- */
- eee_lp = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
- if (eee_lp <= 0)
- goto eee_exit_err;
-
- eee_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- if (eee_adv <= 0)
- goto eee_exit_err;
-
- mmd_eee_adv_to_linkmode(adv, eee_adv);
- mmd_eee_adv_to_linkmode(lp, eee_lp);
- linkmode_and(common, adv, lp);
-
- if (!phy_check_valid(phydev->speed, phydev->duplex, common))
- goto eee_exit_err;
+ ret = genphy_c45_eee_is_active(phydev, NULL, NULL, NULL);
+ if (ret < 0)
+ return ret;
+ if (!ret)
+ return -EPROTONOSUPPORT;
- if (clk_stop_enable)
- /* Configure the PHY to stop receiving xMII
- * clock while it is signaling LPI.
- */
- phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
- MDIO_PCS_CTRL1_CLKSTOP_EN);
+ if (clk_stop_enable)
+ /* Configure the PHY to stop receiving xMII
+ * clock while it is signaling LPI.
+ */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL1,
+ MDIO_PCS_CTRL1_CLKSTOP_EN);
- return 0; /* EEE supported */
- }
-eee_exit_err:
- return -EPROTONOSUPPORT;
+ return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL(phy_init_eee);
@@ -1369,10 +1515,16 @@ EXPORT_SYMBOL(phy_init_eee);
*/
int phy_get_eee_err(struct phy_device *phydev)
{
+ int ret;
+
if (!phydev->drv)
return -EIO;
- return phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR);
+ mutex_lock(&phydev->lock);
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_WK_ERR);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
}
EXPORT_SYMBOL(phy_get_eee_err);
@@ -1386,33 +1538,16 @@ EXPORT_SYMBOL(phy_get_eee_err);
*/
int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
- int val;
+ int ret;
if (!phydev->drv)
return -EIO;
- /* Get Supported EEE */
- val = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- if (val < 0)
- return val;
- data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
-
- /* Get advertisement EEE */
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- if (val < 0)
- return val;
- data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
- data->eee_enabled = !!data->advertised;
-
- /* Get LP advertisement EEE */
- val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE);
- if (val < 0)
- return val;
- data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
-
- data->eee_active = !!(data->advertised & data->lp_advertised);
+ mutex_lock(&phydev->lock);
+ ret = genphy_c45_ethtool_get_eee(phydev, data);
+ mutex_unlock(&phydev->lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(phy_ethtool_get_eee);
@@ -1425,43 +1560,16 @@ EXPORT_SYMBOL(phy_ethtool_get_eee);
*/
int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
{
- int cap, old_adv, adv = 0, ret;
+ int ret;
if (!phydev->drv)
return -EIO;
- /* Get Supported EEE */
- cap = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_PCS_EEE_ABLE);
- if (cap < 0)
- return cap;
-
- old_adv = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV);
- if (old_adv < 0)
- return old_adv;
-
- if (data->eee_enabled) {
- adv = !data->advertised ? cap :
- ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap;
- /* Mask prohibited EEE modes */
- adv &= ~phydev->eee_broken_modes;
- }
-
- if (old_adv != adv) {
- ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv);
- if (ret < 0)
- return ret;
-
- /* Restart autonegotiation so the new modes get sent to the
- * link partner.
- */
- if (phydev->autoneg == AUTONEG_ENABLE) {
- ret = phy_restart_aneg(phydev);
- if (ret < 0)
- return ret;
- }
- }
+ mutex_lock(&phydev->lock);
+ ret = genphy_c45_ethtool_set_eee(phydev, data);
+ mutex_unlock(&phydev->lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(phy_ethtool_set_eee);
@@ -1473,8 +1581,15 @@ EXPORT_SYMBOL(phy_ethtool_set_eee);
*/
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
- if (phydev->drv && phydev->drv->set_wol)
- return phydev->drv->set_wol(phydev, wol);
+ int ret;
+
+ if (phydev->drv && phydev->drv->set_wol) {
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->set_wol(phydev, wol);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+ }
return -EOPNOTSUPP;
}
@@ -1488,8 +1603,11 @@ EXPORT_SYMBOL(phy_ethtool_set_wol);
*/
void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
- if (phydev->drv && phydev->drv->get_wol)
+ if (phydev->drv && phydev->drv->get_wol) {
+ mutex_lock(&phydev->lock);
phydev->drv->get_wol(phydev, wol);
+ mutex_unlock(&phydev->lock);
+ }
}
EXPORT_SYMBOL(phy_ethtool_get_wol);
@@ -1526,6 +1644,7 @@ EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
int phy_ethtool_nway_reset(struct net_device *ndev)
{
struct phy_device *phydev = ndev->phydev;
+ int ret;
if (!phydev)
return -ENODEV;
@@ -1533,6 +1652,10 @@ int phy_ethtool_nway_reset(struct net_device *ndev)
if (!phydev->drv)
return -EIO;
- return phy_restart_aneg(phydev);
+ mutex_lock(&phydev->lock);
+ ret = phy_restart_aneg(phydev);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
}
EXPORT_SYMBOL(phy_ethtool_nway_reset);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 716870a4499c..71becceb8764 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -45,6 +45,9 @@ EXPORT_SYMBOL_GPL(phy_basic_features);
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_basic_t1_features);
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_basic_t1s_p2mp_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features);
+
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_gbit_features);
@@ -98,6 +101,12 @@ const int phy_basic_t1_features_array[3] = {
};
EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
+const int phy_basic_t1s_p2mp_features_array[2] = {
+ ETHTOOL_LINK_MODE_TP_BIT,
+ ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT,
+};
+EXPORT_SYMBOL_GPL(phy_basic_t1s_p2mp_features_array);
+
const int phy_gbit_features_array[2] = {
ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
@@ -123,6 +132,18 @@ static const int phy_10gbit_full_features_array[] = {
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
};
+static const int phy_eee_cap1_features_array[] = {
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+};
+
+__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_eee_cap1_features) __ro_after_init;
+EXPORT_SYMBOL_GPL(phy_eee_cap1_features);
+
static void features_init(void)
{
/* 10/100 half/full*/
@@ -138,6 +159,11 @@ static void features_init(void)
ARRAY_SIZE(phy_basic_t1_features_array),
phy_basic_t1_features);
+ /* 10 half, P2MP, TP */
+ linkmode_set_bit_array(phy_basic_t1s_p2mp_features_array,
+ ARRAY_SIZE(phy_basic_t1s_p2mp_features_array),
+ phy_basic_t1s_p2mp_features);
+
/* 10/100 half/full + 1000 half/full */
linkmode_set_bit_array(phy_basic_ports_array,
ARRAY_SIZE(phy_basic_ports_array),
@@ -199,6 +225,10 @@ static void features_init(void)
linkmode_set_bit_array(phy_10gbit_fec_features_array,
ARRAY_SIZE(phy_10gbit_fec_features_array),
phy_10gbit_fec_features);
+ linkmode_set_bit_array(phy_eee_cap1_features_array,
+ ARRAY_SIZE(phy_eee_cap1_features_array),
+ phy_eee_cap1_features);
+
}
void phy_device_free(struct phy_device *phydev)
@@ -932,7 +962,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
* probe with C45 to see if we're able to get a valid PHY ID in the C45
* space, if successful, create the C45 PHY device.
*/
- if (!is_c45 && phy_id == 0 && bus->probe_capabilities >= MDIOBUS_C45) {
+ if (!is_c45 && phy_id == 0 && bus->read_c45) {
r = get_phy_c45_ids(bus, addr, &c45_ids);
if (!r)
return phy_device_create(bus, addr, phy_id,
@@ -1487,6 +1517,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phydev->interrupts = PHY_INTERRUPT_DISABLED;
+ /* PHYs can request to use poll mode even though they have an
+ * associated interrupt line. This could be the case if they
+ * detect a broken interrupt handling.
+ */
+ if (phydev->dev_flags & PHY_F_NO_IRQ)
+ phydev->irq = PHY_POLL;
+
/* Port is set to PORT_TP by default and the actual PHY driver will set
* it to different value depending on the PHY configuration. If we have
* the generic PHY driver we can't figure it out, thus set the old
@@ -1517,7 +1554,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
* another mac interface, so we should create a device link between
* phy dev and mac dev.
*/
- if (phydev->mdio.bus->parent && dev->dev.parent != phydev->mdio.bus->parent)
+ if (dev && phydev->mdio.bus->parent && dev->dev.parent != phydev->mdio.bus->parent)
phydev->devlink = device_link_add(dev->dev.parent, &phydev->mdio.dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
@@ -2194,7 +2231,10 @@ int __genphy_config_aneg(struct phy_device *phydev, bool changed)
{
int err;
- if (genphy_config_eee_advert(phydev))
+ err = genphy_c45_write_eee_adv(phydev, phydev->supported_eee);
+ if (err < 0)
+ return err;
+ else if (err)
changed = true;
err = genphy_setup_master_slave(phydev);
@@ -2616,6 +2656,11 @@ int genphy_read_abilities(struct phy_device *phydev)
phydev->supported, val & ESTATUS_1000_XFULL);
}
+ /* This is optional functionality. If not supported, we may get an error
+ * which should be ignored.
+ */
+ genphy_c45_read_eee_abilities(phydev);
+
return 0;
}
EXPORT_SYMBOL(genphy_read_abilities);
@@ -3068,8 +3113,10 @@ static int phy_probe(struct device *dev)
* a controller will attach, and may modify one
* or both of these values
*/
- if (phydrv->features)
+ if (phydrv->features) {
linkmode_copy(phydev->supported, phydrv->features);
+ genphy_c45_read_eee_abilities(phydev);
+ }
else if (phydrv->get_features)
err = phydrv->get_features(phydev);
else if (phydev->is_c45)
@@ -3262,6 +3309,9 @@ static const struct ethtool_phy_ops phy_ethtool_phy_ops = {
.get_sset_count = phy_ethtool_get_sset_count,
.get_strings = phy_ethtool_get_strings,
.get_stats = phy_ethtool_get_stats,
+ .get_plca_cfg = phy_ethtool_get_plca_cfg,
+ .set_plca_cfg = phy_ethtool_set_plca_cfg,
+ .get_plca_status = phy_ethtool_get_plca_status,
.start_cable_test = phy_start_cable_test,
.start_cable_test_tdr = phy_start_cable_test_tdr,
};
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 09cc65c0da93..1a2f074685fa 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -241,12 +241,16 @@ void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps)
if (caps & MAC_ASYM_PAUSE)
__set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, linkmodes);
- if (caps & MAC_10HD)
+ if (caps & MAC_10HD) {
__set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Half_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT, linkmodes);
+ }
if (caps & MAC_10FD) {
__set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, linkmodes);
__set_bit(ETHTOOL_LINK_MODE_10baseT1L_Full_BIT, linkmodes);
+ __set_bit(ETHTOOL_LINK_MODE_10baseT1S_Full_BIT, linkmodes);
}
if (caps & MAC_100HD) {
@@ -707,6 +711,7 @@ static int phylink_parse_fixedlink(struct phylink *pl,
struct fwnode_handle *fwnode)
{
struct fwnode_handle *fixed_node;
+ bool pause, asym_pause, autoneg;
const struct phy_setting *s;
struct gpio_desc *desc;
u32 speed;
@@ -779,13 +784,23 @@ static int phylink_parse_fixedlink(struct phylink *pl,
linkmode_copy(pl->link_config.advertising, pl->supported);
phylink_validate(pl, pl->supported, &pl->link_config);
+ pause = phylink_test(pl->supported, Pause);
+ asym_pause = phylink_test(pl->supported, Asym_Pause);
+ autoneg = phylink_test(pl->supported, Autoneg);
s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex,
pl->supported, true);
linkmode_zero(pl->supported);
phylink_set(pl->supported, MII);
- phylink_set(pl->supported, Pause);
- phylink_set(pl->supported, Asym_Pause);
- phylink_set(pl->supported, Autoneg);
+
+ if (pause)
+ phylink_set(pl->supported, Pause);
+
+ if (asym_pause)
+ phylink_set(pl->supported, Asym_Pause);
+
+ if (autoneg)
+ phylink_set(pl->supported, Autoneg);
+
if (s) {
__set_bit(s->bit, pl->supported);
__set_bit(s->bit, pl->link_config.lp_advertising);
@@ -1812,10 +1827,9 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
- if (ret) {
- phy_device_free(phy_dev);
+ phy_device_free(phy_dev);
+ if (ret)
return ret;
- }
ret = phylink_bringup_phy(pl, phy_dev, pl->link_config.interface);
if (ret)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 83b99d95b278..c02cad6478a8 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1,6 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/acpi.h>
-#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
@@ -144,7 +142,7 @@ static const char *sm_state_to_str(unsigned short sm_state)
return sm_state_strings[sm_state];
}
-static const char *gpio_of_names[] = {
+static const char *gpio_names[] = {
"mod-def0",
"los",
"tx-fault",
@@ -2563,7 +2561,7 @@ static void sfp_check_state(struct sfp *sfp)
for (i = 0; i < GPIO_MAX; i++)
if (changed & BIT(i))
- dev_dbg(sfp->dev, "%s %u -> %u\n", gpio_of_names[i],
+ dev_dbg(sfp->dev, "%s %u -> %u\n", gpio_names[i],
!!(sfp->state & BIT(i)), !!(state & BIT(i)));
state |= sfp->state & (SFP_F_TX_DISABLE | SFP_F_RATE_SELECT);
@@ -2644,10 +2642,8 @@ static void sfp_cleanup(void *data)
static int sfp_i2c_get(struct sfp *sfp)
{
- struct acpi_handle *acpi_handle;
struct fwnode_handle *h;
struct i2c_adapter *i2c;
- struct device_node *np;
int err;
h = fwnode_find_reference(dev_fwnode(sfp->dev), "i2c-bus", 0);
@@ -2656,16 +2652,7 @@ static int sfp_i2c_get(struct sfp *sfp)
return -ENODEV;
}
- if (is_acpi_device_node(h)) {
- acpi_handle = ACPI_HANDLE_FWNODE(h);
- i2c = i2c_acpi_find_adapter_by_handle(acpi_handle);
- } else if ((np = to_of_node(h)) != NULL) {
- i2c = of_find_i2c_adapter_by_node(np);
- } else {
- err = -EINVAL;
- goto put;
- }
-
+ i2c = i2c_get_adapter_by_fwnode(h);
if (!i2c) {
err = -EPROBE_DEFER;
goto put;
@@ -2696,19 +2683,11 @@ static int sfp_probe(struct platform_device *pdev)
if (err < 0)
return err;
- sff = sfp->type = &sfp_data;
-
- if (pdev->dev.of_node) {
- const struct of_device_id *id;
+ sff = device_get_match_data(sfp->dev);
+ if (!sff)
+ sff = &sfp_data;
- id = of_match_node(sfp_of_match, pdev->dev.of_node);
- if (WARN_ON(!id))
- return -EINVAL;
-
- sff = sfp->type = id->data;
- } else if (!has_acpi_companion(&pdev->dev)) {
- return -EINVAL;
- }
+ sfp->type = sff;
err = sfp_i2c_get(sfp);
if (err)
@@ -2717,7 +2696,7 @@ static int sfp_probe(struct platform_device *pdev)
for (i = 0; i < GPIO_MAX; i++)
if (sff->gpios & BIT(i)) {
sfp->gpio[i] = devm_gpiod_get_optional(sfp->dev,
- gpio_of_names[i], gpio_flags[i]);
+ gpio_names[i], gpio_flags[i]);
if (IS_ERR(sfp->gpio[i]))
return PTR_ERR(sfp->gpio[i]);
}
@@ -2772,7 +2751,7 @@ static int sfp_probe(struct platform_device *pdev)
sfp_irq_name = devm_kasprintf(sfp->dev, GFP_KERNEL,
"%s-%s", dev_name(sfp->dev),
- gpio_of_names[i]);
+ gpio_names[i]);
if (!sfp_irq_name)
return -ENOMEM;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index a2be1994b389..8941aa199ea3 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -533,7 +533,7 @@ static int tap_open(struct inode *inode, struct file *file)
q->sock.state = SS_CONNECTED;
q->sock.file = file;
q->sock.ops = &tap_socket_ops;
- sock_init_data(&q->sock, &q->sk);
+ sock_init_data_uid(&q->sock, &q->sk, inode->i_uid);
q->sk.sk_write_space = tap_sock_write_space;
q->sk.sk_destruct = tap_sock_destruct;
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
diff --git a/drivers/net/thunderbolt/Kconfig b/drivers/net/thunderbolt/Kconfig
new file mode 100644
index 000000000000..e127848c8cbd
--- /dev/null
+++ b/drivers/net/thunderbolt/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config USB4_NET
+ tristate "Networking over USB4 and Thunderbolt cables"
+ depends on USB4 && INET
+ help
+ Select this if you want to create network between two computers
+ over a USB4 and Thunderbolt cables. The driver supports Apple
+ ThunderboltIP protocol and allows communication with any host
+ supporting the same protocol including Windows and macOS.
+
+ To compile this driver a module, choose M here. The module will be
+ called thunderbolt_net.
diff --git a/drivers/net/thunderbolt/Makefile b/drivers/net/thunderbolt/Makefile
new file mode 100644
index 000000000000..e81c2a4849f0
--- /dev/null
+++ b/drivers/net/thunderbolt/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_USB4_NET) := thunderbolt_net.o
+thunderbolt_net-objs := main.o trace.o
+
+# Tracepoints need to know where to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt/main.c
index 990484776f2d..26ef3706445e 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt/main.c
@@ -23,6 +23,8 @@
#include <net/ip6_checksum.h>
+#include "trace.h"
+
/* Protocol timeouts in ms */
#define TBNET_LOGIN_DELAY 4500
#define TBNET_LOGIN_TIMEOUT 500
@@ -305,6 +307,8 @@ static int tbnet_logout_request(struct tbnet *net)
static void start_login(struct tbnet *net)
{
+ netdev_dbg(net->dev, "login started\n");
+
mutex_lock(&net->connection_lock);
net->login_sent = false;
net->login_received = false;
@@ -318,6 +322,8 @@ static void stop_login(struct tbnet *net)
{
cancel_delayed_work_sync(&net->login_work);
cancel_work_sync(&net->connected_work);
+
+ netdev_dbg(net->dev, "login stopped\n");
}
static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
@@ -349,6 +355,8 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
size = TBNET_RX_PAGE_SIZE;
}
+ trace_tbnet_free_frame(i, tf->page, tf->frame.buffer_phy, dir);
+
if (tf->frame.buffer_phy)
dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
dir);
@@ -374,6 +382,8 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
int ret, retries = TBNET_LOGOUT_RETRIES;
while (send_logout && retries-- > 0) {
+ netdev_dbg(net->dev, "sending logout request %u\n",
+ retries);
ret = tbnet_logout_request(net);
if (ret != -ETIMEDOUT)
break;
@@ -400,6 +410,8 @@ static void tbnet_tear_down(struct tbnet *net, bool send_logout)
net->login_sent = false;
net->login_received = false;
+ netdev_dbg(net->dev, "network traffic stopped\n");
+
mutex_unlock(&net->connection_lock);
}
@@ -431,12 +443,15 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
switch (pkg->hdr.type) {
case TBIP_LOGIN:
+ netdev_dbg(net->dev, "remote login request received\n");
if (!netif_running(net->dev))
break;
ret = tbnet_login_response(net, route, sequence,
pkg->hdr.command_id);
if (!ret) {
+ netdev_dbg(net->dev, "remote login response sent\n");
+
mutex_lock(&net->connection_lock);
net->login_received = true;
net->remote_transmit_path = pkg->transmit_path;
@@ -458,9 +473,12 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
break;
case TBIP_LOGOUT:
+ netdev_dbg(net->dev, "remote logout request received\n");
ret = tbnet_logout_response(net, route, sequence, command_id);
- if (!ret)
+ if (!ret) {
+ netdev_dbg(net->dev, "remote logout response sent\n");
queue_work(system_long_wq, &net->disconnect_work);
+ }
break;
default:
@@ -512,6 +530,9 @@ static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
tf->frame.buffer_phy = dma_addr;
tf->dev = net->dev;
+ trace_tbnet_alloc_rx_frame(index, tf->page, dma_addr,
+ DMA_FROM_DEVICE);
+
tb_ring_rx(ring->ring, &tf->frame);
ring->prod++;
@@ -588,6 +609,8 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
tf->frame.callback = tbnet_tx_callback;
tf->frame.sof = TBIP_PDF_FRAME_START;
tf->frame.eof = TBIP_PDF_FRAME_END;
+
+ trace_tbnet_alloc_tx_frame(i, tf->page, dma_addr, DMA_TO_DEVICE);
}
ring->cons = 0;
@@ -612,6 +635,8 @@ static void tbnet_connected_work(struct work_struct *work)
if (!connected)
return;
+ netdev_dbg(net->dev, "login successful, enabling paths\n");
+
ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path);
if (ret != net->remote_transmit_path) {
netdev_err(net->dev, "failed to allocate Rx HopID\n");
@@ -647,6 +672,8 @@ static void tbnet_connected_work(struct work_struct *work)
netif_carrier_on(net->dev);
netif_start_queue(net->dev);
+
+ netdev_dbg(net->dev, "network traffic started\n");
return;
err_free_tx_buffers:
@@ -668,8 +695,13 @@ static void tbnet_login_work(struct work_struct *work)
if (netif_carrier_ok(net->dev))
return;
+ netdev_dbg(net->dev, "sending login request, retries=%u\n",
+ net->login_retries);
+
ret = tbnet_login_request(net, net->login_retries % 4);
if (ret) {
+ netdev_dbg(net->dev, "sending login request failed, ret=%d\n",
+ ret);
if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
queue_delayed_work(system_long_wq, &net->login_work,
delay);
@@ -677,6 +709,8 @@ static void tbnet_login_work(struct work_struct *work)
netdev_info(net->dev, "ThunderboltIP login timed out\n");
}
} else {
+ netdev_dbg(net->dev, "received login reply\n");
+
net->login_retries = 0;
mutex_lock(&net->connection_lock);
@@ -807,12 +841,16 @@ static int tbnet_poll(struct napi_struct *napi, int budget)
hdr = page_address(page);
if (!tbnet_check_frame(net, tf, hdr)) {
+ trace_tbnet_invalid_rx_ip_frame(hdr->frame_size,
+ hdr->frame_id, hdr->frame_index, hdr->frame_count);
__free_pages(page, TBNET_RX_PAGE_ORDER);
dev_kfree_skb_any(net->skb);
net->skb = NULL;
continue;
}
+ trace_tbnet_rx_ip_frame(hdr->frame_size, hdr->frame_id,
+ hdr->frame_index, hdr->frame_count);
frame_size = le32_to_cpu(hdr->frame_size);
skb = net->skb;
@@ -846,6 +884,7 @@ static int tbnet_poll(struct napi_struct *napi, int budget)
if (last) {
skb->protocol = eth_type_trans(skb, net->dev);
+ trace_tbnet_rx_skb(skb);
napi_gro_receive(&net->napi, skb);
net->skb = NULL;
}
@@ -965,6 +1004,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
for (i = 0; i < frame_count; i++) {
hdr = page_address(frames[i]->page);
hdr->frame_count = cpu_to_le32(frame_count);
+ trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
+ hdr->frame_index, hdr->frame_count);
dma_sync_single_for_device(dma_dev,
frames[i]->frame.buffer_phy,
tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
@@ -1029,6 +1070,8 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
len = le32_to_cpu(hdr->frame_size) - offset;
wsum = csum_partial(dest, len, wsum);
hdr->frame_count = cpu_to_le32(frame_count);
+ trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
+ hdr->frame_index, hdr->frame_count);
offset = 0;
}
@@ -1071,6 +1114,8 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
bool unmap = false;
void *dest;
+ trace_tbnet_tx_skb(skb);
+
nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
if (tbnet_available_buffers(&net->tx_ring) < nframes) {
netif_stop_queue(net->dev);
@@ -1177,6 +1222,7 @@ static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
net->stats.tx_packets++;
net->stats.tx_bytes += skb->len;
+ trace_tbnet_consume_skb(skb);
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/thunderbolt/trace.c b/drivers/net/thunderbolt/trace.c
new file mode 100644
index 000000000000..1b1499520a44
--- /dev/null
+++ b/drivers/net/thunderbolt/trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Tracepoints for Thunderbolt/USB4 networking driver
+ *
+ * Copyright (C) 2023, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/thunderbolt/trace.h b/drivers/net/thunderbolt/trace.h
new file mode 100644
index 000000000000..9626eadaebb9
--- /dev/null
+++ b/drivers/net/thunderbolt/trace.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Tracepoints for Thunderbolt/USB4 networking driver
+ *
+ * Copyright (C) 2023, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thunderbolt_net
+
+#if !defined(__TRACE_THUNDERBOLT_NET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __TRACE_THUNDERBOLT_NET_H
+
+#include <linux/dma-direction.h>
+#include <linux/skbuff.h>
+#include <linux/tracepoint.h>
+
+#define DMA_DATA_DIRECTION_NAMES \
+ { DMA_BIDIRECTIONAL, "DMA_BIDIRECTIONAL" }, \
+ { DMA_TO_DEVICE, "DMA_TO_DEVICE" }, \
+ { DMA_FROM_DEVICE, "DMA_FROM_DEVICE" }, \
+ { DMA_NONE, "DMA_NONE" }
+
+DECLARE_EVENT_CLASS(tbnet_frame,
+ TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
+ enum dma_data_direction dir),
+ TP_ARGS(index, page, phys, dir),
+ TP_STRUCT__entry(
+ __field(unsigned int, index)
+ __field(const void *, page)
+ __field(dma_addr_t, phys)
+ __field(enum dma_data_direction, dir)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->page = page;
+ __entry->phys = phys;
+ __entry->dir = dir;
+ ),
+ TP_printk("index=%u page=%p phys=%pad dir=%s",
+ __entry->index, __entry->page, &__entry->phys,
+ __print_symbolic(__entry->dir, DMA_DATA_DIRECTION_NAMES))
+);
+
+DEFINE_EVENT(tbnet_frame, tbnet_alloc_rx_frame,
+ TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
+ enum dma_data_direction dir),
+ TP_ARGS(index, page, phys, dir)
+);
+
+DEFINE_EVENT(tbnet_frame, tbnet_alloc_tx_frame,
+ TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
+ enum dma_data_direction dir),
+ TP_ARGS(index, page, phys, dir)
+);
+
+DEFINE_EVENT(tbnet_frame, tbnet_free_frame,
+ TP_PROTO(unsigned int index, const void *page, dma_addr_t phys,
+ enum dma_data_direction dir),
+ TP_ARGS(index, page, phys, dir)
+);
+
+DECLARE_EVENT_CLASS(tbnet_ip_frame,
+ TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
+ TP_ARGS(size, id, index, count),
+ TP_STRUCT__entry(
+ __field(u32, size)
+ __field(u16, id)
+ __field(u16, index)
+ __field(u32, count)
+ ),
+ TP_fast_assign(
+ __entry->size = le32_to_cpu(size);
+ __entry->id = le16_to_cpu(id);
+ __entry->index = le16_to_cpu(index);
+ __entry->count = le32_to_cpu(count);
+ ),
+ TP_printk("id=%u size=%u index=%u count=%u",
+ __entry->id, __entry->size, __entry->index, __entry->count)
+);
+
+DEFINE_EVENT(tbnet_ip_frame, tbnet_rx_ip_frame,
+ TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
+ TP_ARGS(size, id, index, count)
+);
+
+DEFINE_EVENT(tbnet_ip_frame, tbnet_invalid_rx_ip_frame,
+ TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
+ TP_ARGS(size, id, index, count)
+);
+
+DEFINE_EVENT(tbnet_ip_frame, tbnet_tx_ip_frame,
+ TP_PROTO(__le32 size, __le16 id, __le16 index, __le32 count),
+ TP_ARGS(size, id, index, count)
+);
+
+DECLARE_EVENT_CLASS(tbnet_skb,
+ TP_PROTO(const struct sk_buff *skb),
+ TP_ARGS(skb),
+ TP_STRUCT__entry(
+ __field(const void *, addr)
+ __field(unsigned int, len)
+ __field(unsigned int, data_len)
+ __field(unsigned int, nr_frags)
+ ),
+ TP_fast_assign(
+ __entry->addr = skb;
+ __entry->len = skb->len;
+ __entry->data_len = skb->data_len;
+ __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+ ),
+ TP_printk("skb=%p len=%u data_len=%u nr_frags=%u",
+ __entry->addr, __entry->len, __entry->data_len,
+ __entry->nr_frags)
+);
+
+DEFINE_EVENT(tbnet_skb, tbnet_rx_skb,
+ TP_PROTO(const struct sk_buff *skb),
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(tbnet_skb, tbnet_tx_skb,
+ TP_PROTO(const struct sk_buff *skb),
+ TP_ARGS(skb)
+);
+
+DEFINE_EVENT(tbnet_skb, tbnet_consume_skb,
+ TP_PROTO(const struct sk_buff *skb),
+ TP_ARGS(skb)
+);
+
+#endif /* _TRACE_THUNDERBOLT_NET_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a7d17c680f4a..ad653b32b2f0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1401,6 +1401,11 @@ static void tun_net_initialize(struct net_device *dev)
eth_hw_addr_random(dev);
+ /* Currently tun does not support XDP, only tap does. */
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
+
break;
}
@@ -3448,7 +3453,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
- sock_init_data(&tfile->socket, &tfile->sk);
+ sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid);
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c140edb4b648..80849d115e5d 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -747,13 +747,6 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
-/* Realtek RTL8152 Based USB 2.0 Ethernet Adapters */
-{
- USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
/* Realtek RTL8153 Based USB 3.0 Ethernet Adapters */
{
USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
@@ -761,71 +754,6 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
-/* Samsung USB Ethernet Adapters */
-{
- USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-#if IS_ENABLED(CONFIG_USB_RTL8152)
-/* Linksys USB3GIGV1 Ethernet Adapter */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-#endif
-
-/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* ThinkPad Thunderbolt 3 Dock (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3069, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* ThinkPad Thunderbolt 3 Dock Gen 2 (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3082, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Lenovo USB C to Ethernet Adapter (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x720c, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Lenovo USB-C Travel Hub (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7214, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM,
@@ -833,48 +761,6 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
-/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Microsoft Surface 2 dock (based on Realtek RTL8152) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07ab, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
-/* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
-{
- USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
-},
-
/* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */
{
USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101,
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 9f2b70ef39aa..613fc6910f14 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -65,8 +65,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
if (status != 0) {
netdev_err(dev->net,
- "Error sending init packet. Status %i, length %i\n",
- status, act_len);
+ "Error sending init packet. Status %i\n",
+ status);
return status;
}
else if (act_len != init_msg_len) {
@@ -83,8 +83,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
if (status != 0)
netdev_err(dev->net,
- "Error receiving init result. Status %i, length %i\n",
- status, act_len);
+ "Error receiving init result. Status %i\n",
+ status);
else if (act_len != expected_len)
netdev_err(dev->net, "Unexpected init result length: %i\n",
act_len);
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 2c82fbcaab22..7a2b0094de51 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -57,9 +57,7 @@
static inline int
pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index)
{
- return usbnet_read_cmd(dev, req,
- USB_DIR_IN | USB_TYPE_VENDOR |
- USB_RECIP_DEVICE,
+ return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE,
val, index, NULL, 0);
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 23da1d9dafd1..decb5ba56a25 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -8232,43 +8232,6 @@ static bool rtl_check_vendor_ok(struct usb_interface *intf)
return true;
}
-static bool rtl_vendor_mode(struct usb_interface *intf)
-{
- struct usb_host_interface *alt = intf->cur_altsetting;
- struct usb_device *udev;
- struct usb_host_config *c;
- int i, num_configs;
-
- if (alt->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC)
- return rtl_check_vendor_ok(intf);
-
- /* The vendor mode is not always config #1, so to find it out. */
- udev = interface_to_usbdev(intf);
- c = udev->config;
- num_configs = udev->descriptor.bNumConfigurations;
- if (num_configs < 2)
- return false;
-
- for (i = 0; i < num_configs; (i++, c++)) {
- struct usb_interface_descriptor *desc = NULL;
-
- if (c->desc.bNumInterfaces > 0)
- desc = &c->intf_cache[0]->altsetting->desc;
- else
- continue;
-
- if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
- usb_driver_set_configuration(udev, c->desc.bConfigurationValue);
- break;
- }
- }
-
- if (i == num_configs)
- dev_err(&intf->dev, "Unexpected Device\n");
-
- return false;
-}
-
static int rtl8152_pre_reset(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
@@ -9500,9 +9463,8 @@ static int rtl_fw_init(struct r8152 *tp)
return 0;
}
-u8 rtl8152_get_version(struct usb_interface *intf)
+static u8 __rtl_get_hw_ver(struct usb_device *udev)
{
- struct usb_device *udev = interface_to_usbdev(intf);
u32 ocp_data = 0;
__le32 *tmp;
u8 version;
@@ -9571,10 +9533,19 @@ u8 rtl8152_get_version(struct usb_interface *intf)
break;
default:
version = RTL_VER_UNKNOWN;
- dev_info(&intf->dev, "Unknown version 0x%04x\n", ocp_data);
+ dev_info(&udev->dev, "Unknown version 0x%04x\n", ocp_data);
break;
}
+ return version;
+}
+
+u8 rtl8152_get_version(struct usb_interface *intf)
+{
+ u8 version;
+
+ version = __rtl_get_hw_ver(interface_to_usbdev(intf));
+
dev_dbg(&intf->dev, "Detected version 0x%04x\n", version);
return version;
@@ -9610,15 +9581,19 @@ static int rtl8152_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *udev = interface_to_usbdev(intf);
- u8 version = rtl8152_get_version(intf);
struct r8152 *tp;
struct net_device *netdev;
+ u8 version;
int ret;
- if (version == RTL_VER_UNKNOWN)
+ if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
return -ENODEV;
- if (!rtl_vendor_mode(intf))
+ if (!rtl_check_vendor_ok(intf))
+ return -ENODEV;
+
+ version = rtl8152_get_version(intf);
+ if (version == RTL_VER_UNKNOWN)
return -ENODEV;
usb_reset_device(udev);
@@ -9814,43 +9789,35 @@ static void rtl8152_disconnect(struct usb_interface *intf)
}
}
-#define REALTEK_USB_DEVICE(vend, prod) { \
- USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC), \
-}, \
-{ \
- USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_COMM, \
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), \
-}
-
/* table of devices that work with this driver */
static const struct usb_device_id rtl8152_table[] = {
/* Realtek */
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8050),
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8053),
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8152),
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153),
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8155),
- REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8156),
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8050) },
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8053) },
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8152) },
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8153) },
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8155) },
+ { USB_DEVICE(VENDOR_ID_REALTEK, 0x8156) },
/* Microsoft */
- REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab),
- REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6),
- REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
- REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e),
- REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e),
- REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387),
- REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041),
- REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff),
- REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601),
+ { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab) },
+ { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6) },
+ { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927) },
+ { USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e) },
+ { USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x304f) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x3054) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x3062) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x3069) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x3082) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x7205) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x720c) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x7214) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0x721e) },
+ { USB_DEVICE(VENDOR_ID_LENOVO, 0xa387) },
+ { USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
+ { USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
+ { USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
{}
};
@@ -9870,7 +9837,67 @@ static struct usb_driver rtl8152_driver = {
.disable_hub_initiated_lpm = 1,
};
-module_usb_driver(rtl8152_driver);
+static int rtl8152_cfgselector_probe(struct usb_device *udev)
+{
+ struct usb_host_config *c;
+ int i, num_configs;
+
+ /* Switch the device to vendor mode, if and only if the vendor mode
+ * driver supports it.
+ */
+ if (__rtl_get_hw_ver(udev) == RTL_VER_UNKNOWN)
+ return 0;
+
+ /* The vendor mode is not always config #1, so to find it out. */
+ c = udev->config;
+ num_configs = udev->descriptor.bNumConfigurations;
+ for (i = 0; i < num_configs; (i++, c++)) {
+ struct usb_interface_descriptor *desc = NULL;
+
+ if (!c->desc.bNumInterfaces)
+ continue;
+ desc = &c->intf_cache[0]->altsetting->desc;
+ if (desc->bInterfaceClass == USB_CLASS_VENDOR_SPEC)
+ break;
+ }
+
+ if (i == num_configs)
+ return -ENODEV;
+
+ if (usb_set_configuration(udev, c->desc.bConfigurationValue)) {
+ dev_err(&udev->dev, "Failed to set configuration %d\n",
+ c->desc.bConfigurationValue);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static struct usb_device_driver rtl8152_cfgselector_driver = {
+ .name = MODULENAME "-cfgselector",
+ .probe = rtl8152_cfgselector_probe,
+ .id_table = rtl8152_table,
+ .generic_subclass = 1,
+};
+
+static int __init rtl8152_driver_init(void)
+{
+ int ret;
+
+ ret = usb_register_device_driver(&rtl8152_cfgselector_driver, THIS_MODULE);
+ if (ret)
+ return ret;
+ return usb_register(&rtl8152_driver);
+}
+
+static void __exit rtl8152_driver_exit(void)
+{
+ usb_deregister(&rtl8152_driver);
+ usb_deregister_device_driver(&rtl8152_cfgselector_driver);
+}
+
+module_init(rtl8152_driver_init);
+module_exit(rtl8152_driver_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 64a9a80b2309..283ffddda821 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -555,32 +555,30 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
/*-------------------------------------------------------------------------*/
-static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
+static inline int rx_process(struct usbnet *dev, struct sk_buff *skb)
{
if (dev->driver_info->rx_fixup &&
!dev->driver_info->rx_fixup (dev, skb)) {
/* With RX_ASSEMBLE, rx_fixup() must update counters */
if (!(dev->driver_info->flags & FLAG_RX_ASSEMBLE))
dev->net->stats.rx_errors++;
- goto done;
+ return -EPROTO;
}
// else network stack removes extra byte if we forced a short packet
/* all data was already cloned from skb inside the driver */
if (dev->driver_info->flags & FLAG_MULTI_PACKET)
- goto done;
+ return -EALREADY;
if (skb->len < ETH_HLEN) {
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
- } else {
- usbnet_skb_return(dev, skb);
- return;
+ return -EPROTO;
}
-done:
- skb_queue_tail(&dev->done, skb);
+ usbnet_skb_return(dev, skb);
+ return 0;
}
/*-------------------------------------------------------------------------*/
@@ -1514,6 +1512,14 @@ err:
return ret;
}
+static inline void usb_free_skb(struct sk_buff *skb)
+{
+ struct skb_data *entry = (struct skb_data *)skb->cb;
+
+ usb_free_urb(entry->urb);
+ dev_kfree_skb(skb);
+}
+
/*-------------------------------------------------------------------------*/
// tasklet (work deferred from completions, in_irq) or timer
@@ -1528,15 +1534,14 @@ static void usbnet_bh (struct timer_list *t)
entry = (struct skb_data *) skb->cb;
switch (entry->state) {
case rx_done:
- entry->state = rx_cleanup;
- rx_process (dev, skb);
+ if (rx_process(dev, skb))
+ usb_free_skb(skb);
continue;
case tx_done:
kfree(entry->urb->sg);
fallthrough;
case rx_cleanup:
- usb_free_urb (entry->urb);
- dev_kfree_skb (skb);
+ usb_free_skb(skb);
continue;
default:
netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index dfc7d87fad59..1bb54de7124d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -116,6 +116,11 @@ static struct {
{ "peer_ifindex" },
};
+struct veth_xdp_buff {
+ struct xdp_buff xdp;
+ struct sk_buff *skb;
+};
+
static int veth_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -592,23 +597,25 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (likely(xdp_prog)) {
- struct xdp_buff xdp;
+ struct veth_xdp_buff vxbuf;
+ struct xdp_buff *xdp = &vxbuf.xdp;
u32 act;
- xdp_convert_frame_to_buff(frame, &xdp);
- xdp.rxq = &rq->xdp_rxq;
+ xdp_convert_frame_to_buff(frame, xdp);
+ xdp->rxq = &rq->xdp_rxq;
+ vxbuf.skb = NULL;
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
- if (xdp_update_frame_from_buff(&xdp, frame))
+ if (xdp_update_frame_from_buff(xdp, frame))
goto err_xdp;
break;
case XDP_TX:
orig_frame = *frame;
- xdp.rxq->mem = frame->mem;
- if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
+ xdp->rxq->mem = frame->mem;
+ if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
frame = &orig_frame;
stats->rx_drops++;
@@ -619,8 +626,8 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq,
goto xdp_xmit;
case XDP_REDIRECT:
orig_frame = *frame;
- xdp.rxq->mem = frame->mem;
- if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
+ xdp->rxq->mem = frame->mem;
+ if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
frame = &orig_frame;
stats->rx_drops++;
goto err_xdp;
@@ -801,7 +808,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
{
void *orig_data, *orig_data_end;
struct bpf_prog *xdp_prog;
- struct xdp_buff xdp;
+ struct veth_xdp_buff vxbuf;
+ struct xdp_buff *xdp = &vxbuf.xdp;
u32 act, metalen;
int off;
@@ -815,22 +823,23 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
}
__skb_push(skb, skb->data - skb_mac_header(skb));
- if (veth_convert_skb_to_xdp_buff(rq, &xdp, &skb))
+ if (veth_convert_skb_to_xdp_buff(rq, xdp, &skb))
goto drop;
+ vxbuf.skb = skb;
- orig_data = xdp.data;
- orig_data_end = xdp.data_end;
+ orig_data = xdp->data;
+ orig_data_end = xdp->data_end;
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
- veth_xdp_get(&xdp);
+ veth_xdp_get(xdp);
consume_skb(skb);
- xdp.rxq->mem = rq->xdp_mem;
- if (unlikely(veth_xdp_tx(rq, &xdp, bq) < 0)) {
+ xdp->rxq->mem = rq->xdp_mem;
+ if (unlikely(veth_xdp_tx(rq, xdp, bq) < 0)) {
trace_xdp_exception(rq->dev, xdp_prog, act);
stats->rx_drops++;
goto err_xdp;
@@ -839,10 +848,10 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
- veth_xdp_get(&xdp);
+ veth_xdp_get(xdp);
consume_skb(skb);
- xdp.rxq->mem = rq->xdp_mem;
- if (xdp_do_redirect(rq->dev, &xdp, xdp_prog)) {
+ xdp->rxq->mem = rq->xdp_mem;
+ if (xdp_do_redirect(rq->dev, xdp, xdp_prog)) {
stats->rx_drops++;
goto err_xdp;
}
@@ -862,7 +871,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
rcu_read_unlock();
/* check if bpf_xdp_adjust_head was used */
- off = orig_data - xdp.data;
+ off = orig_data - xdp->data;
if (off > 0)
__skb_push(skb, off);
else if (off < 0)
@@ -871,21 +880,21 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
skb_reset_mac_header(skb);
/* check if bpf_xdp_adjust_tail was used */
- off = xdp.data_end - orig_data_end;
+ off = xdp->data_end - orig_data_end;
if (off != 0)
__skb_put(skb, off); /* positive on grow, negative on shrink */
/* XDP frag metadata (e.g. nr_frags) are updated in eBPF helpers
* (e.g. bpf_xdp_adjust_tail), we need to update data_len here.
*/
- if (xdp_buff_has_frags(&xdp))
+ if (xdp_buff_has_frags(xdp))
skb->data_len = skb_shinfo(skb)->xdp_frags_size;
else
skb->data_len = 0;
skb->protocol = eth_type_trans(skb, rq->dev);
- metalen = xdp.data - xdp.data_meta;
+ metalen = xdp->data - xdp->data_meta;
if (metalen)
skb_metadata_set(skb, metalen);
out:
@@ -898,7 +907,7 @@ xdp_drop:
return NULL;
err_xdp:
rcu_read_unlock();
- xdp_return_buff(&xdp);
+ xdp_return_buff(xdp);
xdp_xmit:
return NULL;
}
@@ -1596,6 +1605,28 @@ static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+static int veth_xdp_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ struct veth_xdp_buff *_ctx = (void *)ctx;
+
+ if (!_ctx->skb)
+ return -EOPNOTSUPP;
+
+ *timestamp = skb_hwtstamps(_ctx->skb)->hwtstamp;
+ return 0;
+}
+
+static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash)
+{
+ struct veth_xdp_buff *_ctx = (void *)ctx;
+
+ if (!_ctx->skb)
+ return -EOPNOTSUPP;
+
+ *hash = skb_get_hash(_ctx->skb);
+ return 0;
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -1617,6 +1648,11 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_get_peer_dev = veth_peer_dev,
};
+static const struct xdp_metadata_ops veth_xdp_metadata_ops = {
+ .xmo_rx_timestamp = veth_xdp_rx_timestamp,
+ .xmo_rx_hash = veth_xdp_rx_hash,
+};
+
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
NETIF_F_RXCSUM | NETIF_F_SCTP_CRC | NETIF_F_HIGHDMA | \
NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
@@ -1633,6 +1669,7 @@ static void veth_setup(struct net_device *dev)
dev->priv_flags |= IFF_PHONY_HEADROOM;
dev->netdev_ops = &veth_netdev_ops;
+ dev->xdp_metadata_ops = &veth_xdp_metadata_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
@@ -1649,6 +1686,10 @@ static void veth_setup(struct net_device *dev)
dev->hw_enc_features = VETH_FEATURES;
dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
}
/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 18b3de854aeb..fb5e68ed3ec2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -134,7 +134,7 @@ struct send_queue {
struct scatterlist sg[MAX_SKB_FRAGS + 2];
/* Name of the send queue: output.$index */
- char name[40];
+ char name[16];
struct virtnet_sq_stats stats;
@@ -171,7 +171,7 @@ struct receive_queue {
unsigned int min_buf_len;
/* Name of this receive queue: input.$index */
- char name[40];
+ char name[16];
struct xdp_rxq_info xdp_rxq;
};
@@ -446,9 +446,7 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx)
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct receive_queue *rq,
struct page *page, unsigned int offset,
- unsigned int len, unsigned int truesize,
- bool hdr_valid, unsigned int metasize,
- unsigned int headroom)
+ unsigned int len, unsigned int truesize)
{
struct sk_buff *skb;
struct virtio_net_hdr_mrg_rxbuf *hdr;
@@ -466,21 +464,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
else
hdr_padded_len = sizeof(struct padded_vnet_hdr);
- /* If headroom is not 0, there is an offset between the beginning of the
- * data and the allocated space, otherwise the data and the allocated
- * space are aligned.
- *
- * Buffers with headroom use PAGE_SIZE as alloc size, see
- * add_recvbuf_mergeable() + get_mergeable_buf_len()
- */
- truesize = headroom ? PAGE_SIZE : truesize;
- tailroom = truesize - headroom;
- buf = p - headroom;
-
+ buf = p;
len -= hdr_len;
offset += hdr_padded_len;
p += hdr_padded_len;
- tailroom -= hdr_padded_len + len;
+ tailroom = truesize - hdr_padded_len - len;
shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -510,7 +498,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
if (len <= skb_tailroom(skb))
copy = len;
else
- copy = ETH_HLEN + metasize;
+ copy = ETH_HLEN;
skb_put_data(skb, p, copy);
len -= copy;
@@ -549,19 +537,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
give_pages(rq, page);
ok:
- /* hdr_valid means no XDP, so we can copy the vnet header */
- if (hdr_valid) {
- hdr = skb_vnet_hdr(skb);
- memcpy(hdr, hdr_p, hdr_len);
- }
+ hdr = skb_vnet_hdr(skb);
+ memcpy(hdr, hdr_p, hdr_len);
if (page_to_free)
put_page(page_to_free);
- if (metasize) {
- __skb_pull(skb, metasize);
- skb_metadata_set(skb, metasize);
- }
-
return skb;
}
@@ -570,22 +550,43 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct xdp_frame *xdpf)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
- int err;
+ struct skb_shared_info *shinfo;
+ u8 nr_frags = 0;
+ int err, i;
if (unlikely(xdpf->headroom < vi->hdr_len))
return -EOVERFLOW;
- /* Make room for virtqueue hdr (also change xdpf->headroom?) */
+ if (unlikely(xdp_frame_has_frags(xdpf))) {
+ shinfo = xdp_get_shared_info_from_frame(xdpf);
+ nr_frags = shinfo->nr_frags;
+ }
+
+ /* In wrapping function virtnet_xdp_xmit(), we need to free
+ * up the pending old buffers, where we need to calculate the
+ * position of skb_shared_info in xdp_get_frame_len() and
+ * xdp_return_frame(), which will involve to xdpf->data and
+ * xdpf->headroom. Therefore, we need to update the value of
+ * headroom synchronously here.
+ */
+ xdpf->headroom -= vi->hdr_len;
xdpf->data -= vi->hdr_len;
/* Zero header and leave csum up to XDP layers */
hdr = xdpf->data;
memset(hdr, 0, vi->hdr_len);
xdpf->len += vi->hdr_len;
- sg_init_one(sq->sg, xdpf->data, xdpf->len);
+ sg_init_table(sq->sg, nr_frags + 1);
+ sg_set_buf(sq->sg, xdpf->data, xdpf->len);
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &shinfo->frags[i];
- err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
- GFP_ATOMIC);
+ sg_set_page(&sq->sg[i + 1], skb_frag_page(frag),
+ skb_frag_size(frag), skb_frag_off(frag));
+ }
+
+ err = virtqueue_add_outbuf(sq->vq, sq->sg, nr_frags + 1,
+ xdp_to_ptr(xdpf), GFP_ATOMIC);
if (unlikely(err))
return -ENOSPC; /* Caller handle free/refcnt */
@@ -665,7 +666,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
if (likely(is_xdp_frame(ptr))) {
struct xdp_frame *frame = ptr_to_xdp(ptr);
- bytes += frame->len;
+ bytes += xdp_get_frame_len(frame);
xdp_return_frame(frame);
} else {
struct sk_buff *skb = ptr;
@@ -722,7 +723,7 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
* have enough headroom.
*/
static struct page *xdp_linearize_page(struct receive_queue *rq,
- u16 *num_buf,
+ int *num_buf,
struct page *p,
int offset,
int page_off,
@@ -822,7 +823,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
int offset = buf - page_address(page) + header_offset;
unsigned int tlen = len + vi->hdr_len;
- u16 num_buf = 1;
+ int num_buf = 1;
xdp_headroom = virtnet_get_headroom(vi);
header_offset = VIRTNET_RX_PAD + xdp_headroom;
@@ -924,7 +925,7 @@ static struct sk_buff *receive_big(struct net_device *dev,
{
struct page *page = buf;
struct sk_buff *skb =
- page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0, 0);
+ page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
stats->bytes += len - vi->hdr_len;
if (unlikely(!skb))
@@ -938,6 +939,143 @@ err:
return NULL;
}
+/* Why not use xdp_build_skb_from_frame() ?
+ * XDP core assumes that xdp frags are PAGE_SIZE in length, while in
+ * virtio-net there are 2 points that do not match its requirements:
+ * 1. The size of the prefilled buffer is not fixed before xdp is set.
+ * 2. xdp_build_skb_from_frame() does more checks that we don't need,
+ * like eth_type_trans() (which virtio-net does in receive_buf()).
+ */
+static struct sk_buff *build_skb_from_xdp_buff(struct net_device *dev,
+ struct virtnet_info *vi,
+ struct xdp_buff *xdp,
+ unsigned int xdp_frags_truesz)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ unsigned int headroom, data_len;
+ struct sk_buff *skb;
+ int metasize;
+ u8 nr_frags;
+
+ if (unlikely(xdp->data_end > xdp_data_hard_end(xdp))) {
+ pr_debug("Error building skb as missing reserved tailroom for xdp");
+ return NULL;
+ }
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ nr_frags = sinfo->nr_frags;
+
+ skb = build_skb(xdp->data_hard_start, xdp->frame_sz);
+ if (unlikely(!skb))
+ return NULL;
+
+ headroom = xdp->data - xdp->data_hard_start;
+ data_len = xdp->data_end - xdp->data;
+ skb_reserve(skb, headroom);
+ __skb_put(skb, data_len);
+
+ metasize = xdp->data - xdp->data_meta;
+ metasize = metasize > 0 ? metasize : 0;
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ xdp_update_skb_shared_info(skb, nr_frags,
+ sinfo->xdp_frags_size,
+ xdp_frags_truesz,
+ xdp_buff_is_frag_pfmemalloc(xdp));
+
+ return skb;
+}
+
+/* TODO: build xdp in big mode */
+static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
+ struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct xdp_buff *xdp,
+ void *buf,
+ unsigned int len,
+ unsigned int frame_sz,
+ int *num_buf,
+ unsigned int *xdp_frags_truesize,
+ struct virtnet_rq_stats *stats)
+{
+ struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
+ unsigned int headroom, tailroom, room;
+ unsigned int truesize, cur_frag_size;
+ struct skb_shared_info *shinfo;
+ unsigned int xdp_frags_truesz = 0;
+ struct page *page;
+ skb_frag_t *frag;
+ int offset;
+ void *ctx;
+
+ xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
+ xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
+ VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
+
+ if (!*num_buf)
+ return 0;
+
+ if (*num_buf > 1) {
+ /* If we want to build multi-buffer xdp, we need
+ * to specify that the flags of xdp_buff have the
+ * XDP_FLAGS_HAS_FRAG bit.
+ */
+ if (!xdp_buff_has_frags(xdp))
+ xdp_buff_set_frags_flag(xdp);
+
+ shinfo = xdp_get_shared_info_from_buff(xdp);
+ shinfo->nr_frags = 0;
+ shinfo->xdp_frags_size = 0;
+ }
+
+ if (*num_buf > MAX_SKB_FRAGS + 1)
+ return -EINVAL;
+
+ while (--*num_buf > 0) {
+ buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+ if (unlikely(!buf)) {
+ pr_debug("%s: rx error: %d buffers out of %d missing\n",
+ dev->name, *num_buf,
+ virtio16_to_cpu(vi->vdev, hdr->num_buffers));
+ dev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+
+ stats->bytes += len;
+ page = virt_to_head_page(buf);
+ offset = buf - page_address(page);
+
+ truesize = mergeable_ctx_to_truesize(ctx);
+ headroom = mergeable_ctx_to_headroom(ctx);
+ tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+ room = SKB_DATA_ALIGN(headroom + tailroom);
+
+ cur_frag_size = truesize;
+ xdp_frags_truesz += cur_frag_size;
+ if (unlikely(len > truesize - room || cur_frag_size > PAGE_SIZE)) {
+ put_page(page);
+ pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
+ dev->name, len, (unsigned long)(truesize - room));
+ dev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
+
+ frag = &shinfo->frags[shinfo->nr_frags++];
+ __skb_frag_set_page(frag, page);
+ skb_frag_off_set(frag, offset);
+ skb_frag_size_set(frag, len);
+ if (page_is_pfmemalloc(page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+
+ shinfo->xdp_frags_size += len;
+ }
+
+ *xdp_frags_truesize = xdp_frags_truesz;
+ return 0;
+}
+
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
@@ -948,23 +1086,24 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_rq_stats *stats)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
- u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
+ int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
struct page *page = virt_to_head_page(buf);
int offset = buf - page_address(page);
struct sk_buff *head_skb, *curr_skb;
struct bpf_prog *xdp_prog;
unsigned int truesize = mergeable_ctx_to_truesize(ctx);
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
- unsigned int metasize = 0;
- unsigned int frame_sz;
+ unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+ unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
+ unsigned int frame_sz, xdp_room;
int err;
head_skb = NULL;
stats->bytes += len - vi->hdr_len;
- if (unlikely(len > truesize)) {
+ if (unlikely(len > truesize - room)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)ctx);
+ dev->name, len, (unsigned long)(truesize - room));
dev->stats.rx_length_errors++;
goto err_skb;
}
@@ -977,11 +1116,14 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
+ unsigned int xdp_frags_truesz = 0;
+ struct skb_shared_info *shinfo;
struct xdp_frame *xdpf;
struct page *xdp_page;
struct xdp_buff xdp;
void *data;
u32 act;
+ int i;
/* Transient failure which in theory could occur if
* in-flight packets from before XDP was enabled reach
@@ -990,19 +1132,23 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
- /* Buffers with headroom use PAGE_SIZE as alloc size,
- * see add_recvbuf_mergeable() + get_mergeable_buf_len()
+ /* Now XDP core assumes frag size is PAGE_SIZE, but buffers
+ * with headroom may add hole in truesize, which
+ * make their length exceed PAGE_SIZE. So we disabled the
+ * hole mechanism for xdp. See add_recvbuf_mergeable().
*/
- frame_sz = headroom ? PAGE_SIZE : truesize;
-
- /* This happens when rx buffer size is underestimated
- * or headroom is not enough because of the buffer
- * was refilled before XDP is set. This should only
- * happen for the first several packets, so we don't
- * care much about its performance.
+ frame_sz = truesize;
+
+ /* This happens when headroom is not enough because
+ * of the buffer was prefilled before XDP is set.
+ * This should only happen for the first several packets.
+ * In fact, vq reset can be used here to help us clean up
+ * the prefilled buffers, but many existing devices do not
+ * support it, and we don't want to bother users who are
+ * using xdp normally.
*/
- if (unlikely(num_buf > 1 ||
- headroom < virtnet_get_headroom(vi))) {
+ if (!xdp_prog->aux->xdp_has_frags &&
+ (num_buf > 1 || headroom < virtnet_get_headroom(vi))) {
/* linearize data for XDP */
xdp_page = xdp_linearize_page(rq, &num_buf,
page, offset,
@@ -1013,82 +1159,53 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (!xdp_page)
goto err_xdp;
offset = VIRTIO_XDP_HEADROOM;
+ } else if (unlikely(headroom < virtnet_get_headroom(vi))) {
+ xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
+ sizeof(struct skb_shared_info));
+ if (len + xdp_room > PAGE_SIZE)
+ goto err_xdp;
+
+ xdp_page = alloc_page(GFP_ATOMIC);
+ if (!xdp_page)
+ goto err_xdp;
+
+ memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
+ page_address(page) + offset, len);
+ frame_sz = PAGE_SIZE;
+ offset = VIRTIO_XDP_HEADROOM;
} else {
xdp_page = page;
}
- /* Allow consuming headroom but reserve enough space to push
- * the descriptor on if we get an XDP_TX return code.
- */
data = page_address(xdp_page) + offset;
- xdp_init_buff(&xdp, frame_sz - vi->hdr_len, &rq->xdp_rxq);
- xdp_prepare_buff(&xdp, data - VIRTIO_XDP_HEADROOM + vi->hdr_len,
- VIRTIO_XDP_HEADROOM, len - vi->hdr_len, true);
+ err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
+ &num_buf, &xdp_frags_truesz, stats);
+ if (unlikely(err))
+ goto err_xdp_frags;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
stats->xdp_packets++;
switch (act) {
case XDP_PASS:
- metasize = xdp.data - xdp.data_meta;
-
- /* recalculate offset to account for any header
- * adjustments and minus the metasize to copy the
- * metadata in page_to_skb(). Note other cases do not
- * build an skb and avoid using offset
- */
- offset = xdp.data - page_address(xdp_page) -
- vi->hdr_len - metasize;
-
- /* recalculate len if xdp.data, xdp.data_end or
- * xdp.data_meta were adjusted
- */
- len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
-
- /* recalculate headroom if xdp.data or xdp_data_meta
- * were adjusted, note that offset should always point
- * to the start of the reserved bytes for virtio_net
- * header which are followed by xdp.data, that means
- * that offset is equal to the headroom (when buf is
- * starting at the beginning of the page, otherwise
- * there is a base offset inside the page) but it's used
- * with a different starting point (buf start) than
- * xdp.data (buf start + vnet hdr size). If xdp.data or
- * data_meta were adjusted by the xdp prog then the
- * headroom size has changed and so has the offset, we
- * can use data_hard_start, which points at buf start +
- * vnet hdr size, to calculate the new headroom and use
- * it later to compute buf start in page_to_skb()
- */
- headroom = xdp.data - xdp.data_hard_start - metasize;
-
- /* We can only create skb based on xdp_page. */
- if (unlikely(xdp_page != page)) {
- rcu_read_unlock();
+ if (unlikely(xdp_page != page))
put_page(page);
- head_skb = page_to_skb(vi, rq, xdp_page, offset,
- len, PAGE_SIZE, false,
- metasize,
- headroom);
- return head_skb;
- }
- break;
+ head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
+ rcu_read_unlock();
+ return head_skb;
case XDP_TX:
stats->xdp_tx++;
xdpf = xdp_convert_buff_to_frame(&xdp);
if (unlikely(!xdpf)) {
- if (unlikely(xdp_page != page))
- put_page(xdp_page);
- goto err_xdp;
+ netdev_dbg(dev, "convert buff to frame failed for xdp\n");
+ goto err_xdp_frags;
}
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
if (unlikely(!err)) {
xdp_return_frame_rx_napi(xdpf);
} else if (unlikely(err < 0)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
- if (unlikely(xdp_page != page))
- put_page(xdp_page);
- goto err_xdp;
+ goto err_xdp_frags;
}
*xdp_xmit |= VIRTIO_XDP_TX;
if (unlikely(xdp_page != page))
@@ -1098,11 +1215,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
case XDP_REDIRECT:
stats->xdp_redirects++;
err = xdp_do_redirect(dev, &xdp, xdp_prog);
- if (err) {
- if (unlikely(xdp_page != page))
- put_page(xdp_page);
- goto err_xdp;
- }
+ if (err)
+ goto err_xdp_frags;
*xdp_xmit |= VIRTIO_XDP_REDIR;
if (unlikely(xdp_page != page))
put_page(page);
@@ -1115,16 +1229,26 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
trace_xdp_exception(vi->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- if (unlikely(xdp_page != page))
- __free_pages(xdp_page, 0);
- goto err_xdp;
+ goto err_xdp_frags;
+ }
+err_xdp_frags:
+ if (unlikely(xdp_page != page))
+ __free_pages(xdp_page, 0);
+
+ if (xdp_buff_has_frags(&xdp)) {
+ shinfo = xdp_get_shared_info_from_buff(&xdp);
+ for (i = 0; i < shinfo->nr_frags; i++) {
+ xdp_page = skb_frag_page(&shinfo->frags[i]);
+ put_page(xdp_page);
+ }
}
+
+ goto err_xdp;
}
rcu_read_unlock();
skip_xdp:
- head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
- metasize, headroom);
+ head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
curr_skb = head_skb;
if (unlikely(!curr_skb))
@@ -1146,9 +1270,12 @@ skip_xdp:
page = virt_to_head_page(buf);
truesize = mergeable_ctx_to_truesize(ctx);
- if (unlikely(len > truesize)) {
+ headroom = mergeable_ctx_to_headroom(ctx);
+ tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
+ room = SKB_DATA_ALIGN(headroom + tailroom);
+ if (unlikely(len > truesize - room)) {
pr_debug("%s: rx error: len %u exceeds truesize %lu\n",
- dev->name, len, (unsigned long)ctx);
+ dev->name, len, (unsigned long)(truesize - room));
dev->stats.rx_length_errors++;
goto err_skb;
}
@@ -1251,13 +1378,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
- if (vi->mergeable_rx_bufs) {
- put_page(virt_to_head_page(buf));
- } else if (vi->big_packets) {
- give_pages(rq, buf);
- } else {
- put_page(virt_to_head_page(buf));
- }
+ virtnet_rq_free_unused_buf(rq->vq, buf);
return;
}
@@ -1426,13 +1547,16 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
/* To avoid internal fragmentation, if there is very likely not
* enough space for another buffer, add the remaining space to
* the current buffer.
+ * XDP core assumes that frame_size of xdp_buff and the length
+ * of the frag are PAGE_SIZE, so we disable the hole mechanism.
*/
- len += hole;
+ if (!headroom)
+ len += hole;
alloc_frag->offset += hole;
}
sg_init_one(rq->sg, buf, len);
- ctx = mergeable_len_to_ctx(len, headroom);
+ ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0)
put_page(virt_to_head_page(buf));
@@ -1608,7 +1732,7 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
} else {
struct xdp_frame *frame = ptr_to_xdp(ptr);
- bytes += frame->len;
+ bytes += xdp_get_frame_len(frame);
xdp_return_frame(frame);
}
packets++;
@@ -1677,13 +1801,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
received = virtnet_receive(rq, budget, &xdp_xmit);
+ if (xdp_xmit & VIRTIO_XDP_REDIR)
+ xdp_do_flush();
+
/* Out of packets? */
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
- if (xdp_xmit & VIRTIO_XDP_REDIR)
- xdp_do_flush();
-
if (xdp_xmit & VIRTIO_XDP_TX) {
sq = virtnet_xdp_get_sq(vi);
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
@@ -2158,9 +2282,9 @@ static int virtnet_close(struct net_device *dev)
cancel_delayed_work_sync(&vi->refill);
for (i = 0; i < vi->max_queue_pairs; i++) {
- xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
- napi_disable(&vi->rq[i].napi);
virtnet_napi_tx_disable(&vi->sq[i].napi);
+ napi_disable(&vi->rq[i].napi);
+ xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
}
return 0;
@@ -3080,7 +3204,9 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
- unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
+ unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
+ sizeof(struct skb_shared_info));
+ unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
struct virtnet_info *vi = netdev_priv(dev);
struct bpf_prog *old_prog;
u16 xdp_qp = 0, curr_qp;
@@ -3103,9 +3229,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
return -EINVAL;
}
- if (dev->mtu > max_sz) {
- NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP");
- netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
+ if (prog && !prog->aux->xdp_has_frags && dev->mtu > max_sz) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large to enable XDP without frags");
+ netdev_warn(dev, "single-buffer XDP requires MTU less than %u\n", max_sz);
return -EINVAL;
}
@@ -3157,7 +3283,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (i == 0 && !old_prog)
virtnet_clear_guest_offloads(vi);
}
+ if (!old_prog)
+ xdp_features_set_redirect_target(dev, true);
} else {
+ xdp_features_clear_redirect_target(dev);
vi->xdp_enabled = false;
}
@@ -3690,6 +3819,12 @@ static int virtnet_validate(struct virtio_device *vdev)
__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
}
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY) &&
+ !virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
+ dev_warn(&vdev->dev, "device advertises feature VIRTIO_NET_F_STANDBY but not VIRTIO_NET_F_MAC, disabling standby");
+ __virtio_clear_bit(vdev, VIRTIO_NET_F_STANDBY);
+ }
+
return 0;
}
@@ -3787,6 +3922,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->hw_features |= NETIF_F_GRO_HW;
dev->vlan_features = dev->features;
+ dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
/* MTU range: 68 - 65535 */
dev->min_mtu = MIN_MTU;
@@ -3802,6 +3938,8 @@ static int virtnet_probe(struct virtio_device *vdev)
eth_hw_addr_set(dev, addr);
} else {
eth_hw_addr_random(dev);
+ dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
+ dev->dev_addr);
}
/* Set up our device-specific information */
@@ -3813,8 +3951,10 @@ static int virtnet_probe(struct virtio_device *vdev)
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
spin_lock_init(&vi->refill_lock);
- if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
vi->mergeable_rx_bufs = true;
+ dev->xdp_features |= NETDEV_XDP_ACT_RX_SG;
+ }
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
vi->rx_usecs = 0;
@@ -3929,6 +4069,24 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
+ /* a random MAC address has been assigned, notify the device.
+ * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
+ * because many devices work fine without getting MAC explicitly
+ */
+ if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
+ virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
+ struct scatterlist sg;
+
+ sg_init_one(&sg, dev->dev_addr, dev->addr_len);
+ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
+ VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
+ pr_debug("virtio_net: setting MAC address failed\n");
+ rtnl_unlock();
+ err = -EINVAL;
+ goto free_unregister_netdev;
+ }
+ }
+
rtnl_unlock();
err = virtnet_cpu_notif_add(vi);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 56267c327f0b..682987040ea8 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1546,31 +1546,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
rxd->len = rbi->len;
}
-#ifdef VMXNET3_RSS
- if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
- (adapter->netdev->features & NETIF_F_RXHASH)) {
- enum pkt_hash_types hash_type;
-
- switch (rcd->rssType) {
- case VMXNET3_RCD_RSS_TYPE_IPV4:
- case VMXNET3_RCD_RSS_TYPE_IPV6:
- hash_type = PKT_HASH_TYPE_L3;
- break;
- case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
- case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
- case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
- case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
- hash_type = PKT_HASH_TYPE_L4;
- break;
- default:
- hash_type = PKT_HASH_TYPE_L3;
- break;
- }
- skb_set_hash(ctx->skb,
- le32_to_cpu(rcd->rssHash),
- hash_type);
- }
-#endif
skb_record_rx_queue(ctx->skb, rq->qid);
skb_put(ctx->skb, rcd->len);
@@ -1653,6 +1628,31 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
u32 mtu = adapter->netdev->mtu;
skb->len += skb->data_len;
+#ifdef VMXNET3_RSS
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE &&
+ (adapter->netdev->features & NETIF_F_RXHASH)) {
+ enum pkt_hash_types hash_type;
+
+ switch (rcd->rssType) {
+ case VMXNET3_RCD_RSS_TYPE_IPV4:
+ case VMXNET3_RCD_RSS_TYPE_IPV6:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_TCPIPV6:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV4:
+ case VMXNET3_RCD_RSS_TYPE_UDPIPV6:
+ hash_type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ hash_type = PKT_HASH_TYPE_L3;
+ break;
+ }
+ skb_set_hash(skb,
+ le32_to_cpu(rcd->rssHash),
+ hash_type);
+ }
+#endif
vmxnet3_rx_csum(adapter, skb,
(union Vmxnet3_GenericDesc *)rcd);
skb->protocol = eth_type_trans(skb, adapter->netdev);
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index d88edbf1bea3..910c10028b14 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -63,5 +63,6 @@ source "drivers/net/wireless/ath/wil6210/Kconfig"
source "drivers/net/wireless/ath/ath10k/Kconfig"
source "drivers/net/wireless/ath/wcn36xx/Kconfig"
source "drivers/net/wireless/ath/ath11k/Kconfig"
+source "drivers/net/wireless/ath/ath12k/Kconfig"
endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 8e4ae9de5ae4..8d6e6e218d24 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_WIL6210) += wil6210/
obj-$(CONFIG_ATH10K) += ath10k/
obj-$(CONFIG_WCN36XX) += wcn36xx/
obj-$(CONFIG_ATH11K) += ath11k/
+obj-$(CONFIG_ATH12K) += ath12k/
obj-$(CONFIG_ATH_COMMON) += ath.o
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 59926227bd49..c2f3bd35c392 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -208,14 +208,6 @@ ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
}
-static inline void
-ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
- struct ath10k_ce_pipe *ce_state,
- unsigned int value)
-{
- ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value);
-}
-
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
u32 ce_id,
u64 addr)
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index cfcb759a87de..9a82f0336d95 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -1639,7 +1639,7 @@ static int ath10k_fw_init(struct ath10k *ar)
ret = iommu_map(iommu_dom, ar_snoc->fw.fw_start_addr,
ar->msa.paddr, ar->msa.mem_size,
- IOMMU_READ | IOMMU_WRITE);
+ IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (ret) {
ath10k_err(ar, "failed to map firmware region: %d\n", ret);
goto err_iommu_detach;
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index d34a4d6325b2..920abce9053a 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -32,6 +32,9 @@ static const struct of_device_id ath11k_ahb_of_match[] = {
{ .compatible = "qcom,wcn6750-wifi",
.data = (void *)ATH11K_HW_WCN6750_HW10,
},
+ { .compatible = "qcom,ipq5018-wifi",
+ .data = (void *)ATH11K_HW_IPQ5018_HW10,
+ },
{ }
};
@@ -267,30 +270,42 @@ static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
ce_attr = &ab->hw_params.host_ce_config[ce_id];
if (ce_attr->src_nentries)
- ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
+ ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
if (ce_attr->dest_nentries) {
- ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
+ ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
- CE_HOST_IE_3_ADDRESS);
+ ie3_reg_addr);
}
}
static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
ce_attr = &ab->hw_params.host_ce_config[ce_id];
if (ce_attr->src_nentries)
- ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
+ ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
if (ce_attr->dest_nentries) {
- ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_2_ADDRESS);
+ ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
- CE_HOST_IE_3_ADDRESS);
+ ie3_reg_addr);
}
}
@@ -1021,7 +1036,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
- IOMMU_READ | IOMMU_WRITE);
+ IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (ret) {
ath11k_err(ab, "failed to map firmware region: %d\n", ret);
goto err_iommu_detach;
@@ -1029,7 +1044,7 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
- IOMMU_READ | IOMMU_WRITE);
+ IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
if (ret) {
ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
goto err_iommu_unmap;
@@ -1150,6 +1165,22 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
if (ret)
goto err_core_free;
+ ab->mem_ce = ab->mem;
+
+ if (ab->hw_params.ce_remap) {
+ const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
+ /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
+ * and the space is not contiguous, hence remapping the CE registers
+ * to a new space for accessing them.
+ */
+ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
+ if (IS_ERR(ab->mem_ce)) {
+ dev_err(&pdev->dev, "ce ioremap error\n");
+ ret = -ENOMEM;
+ goto err_core_free;
+ }
+ }
+
ret = ath11k_ahb_fw_resources_init(ab);
if (ret)
goto err_core_free;
@@ -1236,6 +1267,10 @@ static void ath11k_ahb_free_resources(struct ath11k_base *ab)
ath11k_ahb_release_smp2p_handle(ab);
ath11k_ahb_fw_resource_deinit(ab);
ath11k_ce_free_pipes(ab);
+
+ if (ab->hw_params.ce_remap)
+ iounmap(ab->mem_ce);
+
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index 9644ff909502..1fc6360e7f01 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -49,6 +49,11 @@ void ath11k_ce_byte_swap(void *mem, u32 len);
#define CE_HOST_IE_2_ADDRESS 0x00A18040
#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
+/* CE IE registers are different for IPQ5018 */
+#define CE_HOST_IPQ5018_IE_ADDRESS 0x0841804C
+#define CE_HOST_IPQ5018_IE_2_ADDRESS 0x08418050
+#define CE_HOST_IPQ5018_IE_3_ADDRESS CE_HOST_IPQ5018_IE_ADDRESS
+
#define CE_HOST_IE_3_SHIFT 0xC
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
@@ -84,6 +89,17 @@ struct ce_pipe_config {
__le32 reserved;
};
+struct ce_ie_addr {
+ u32 ie1_reg_addr;
+ u32 ie2_reg_addr;
+ u32 ie3_reg_addr;
+};
+
+struct ce_remap {
+ u32 base;
+ u32 size;
+};
+
struct ce_attr {
/* CE_ATTR_* values */
unsigned int flags;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index edf78df9b12f..75fdbe4ef83a 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -54,6 +54,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
.svc_to_ce_map_len = 21,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -115,6 +116,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tcl_ring_retry = true,
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
+ .ftm_responder = true,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -137,6 +139,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 11,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
.svc_to_ce_map_len = 19,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
@@ -196,6 +199,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .ftm_responder = true,
},
{
.name = "qca6390 hw2.0",
@@ -218,6 +222,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -279,6 +284,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .ftm_responder = false,
},
{
.name = "qcn9074 hw1.0",
@@ -301,6 +307,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
.svc_to_ce_map_len = 18,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.rxdma1_enable = true,
.num_rxmda_per_pdev = 1,
.rx_mac_buf_ring = false,
@@ -359,6 +366,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = false,
+ .ftm_responder = true,
},
{
.name = "wcn6855 hw2.0",
@@ -381,6 +389,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 2,
@@ -442,6 +451,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .ftm_responder = false,
},
{
.name = "wcn6855 hw2.1",
@@ -524,6 +534,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE,
.smp2p_wow_exit = false,
.support_fw_mac_sequence = true,
+ .ftm_responder = false,
},
{
.name = "wcn6750 hw1.0",
@@ -546,6 +557,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.target_ce_count = 9,
.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
.svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
.num_rxmda_per_pdev = 1,
@@ -603,6 +615,87 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
.smp2p_wow_exit = true,
.support_fw_mac_sequence = true,
+ .ftm_responder = false,
+ },
+ {
+ .hw_rev = ATH11K_HW_IPQ5018_HW10,
+ .name = "ipq5018 hw1.0",
+ .fw = {
+ .dir = "IPQ5018/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = MAX_RADIOS_5018,
+ .bdf_addr = 0x4BA00000,
+ /* hal_desc_sz and hw ops are similar to qcn9074 */
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .credit_flow = false,
+ .max_tx_ring = 1,
+ .spectral = {
+ .fft_sz = 2,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 16,
+ .fft_hdr_len = 24,
+ .max_fft_bins = 1024,
+ },
+ .internal_sleep_clock = false,
+ .regs = &ipq5018_regs,
+ .hw_ops = &ipq5018_ops,
+ .host_ce_config = ath11k_host_ce_config_qcn9074,
+ .ce_count = CE_CNT_5018,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq5018,
+ .target_ce_count = TARGET_CE_CNT_5018,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq5018,
+ .svc_to_ce_map_len = SVC_CE_MAP_LEN_5018,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq5018,
+ .ce_remap = &ath11k_ce_remap_ipq5018,
+ .rxdma1_enable = true,
+ .num_rxmda_per_pdev = RXDMA_PER_PDEV_5018,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = false,
+ .supports_sta_ps = false,
+ .supports_shadow_regs = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_regdb = false,
+ .idle_ps = false,
+ .supports_suspend = false,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .single_pdev_only = false,
+ .cold_boot_calib = true,
+ .fix_l1ss = true,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = true,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = false,
+ .ftm_responder = true,
},
};
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 22460b0abf03..0830276e5028 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -142,6 +142,7 @@ enum ath11k_hw_rev {
ATH11K_HW_WCN6855_HW20,
ATH11K_HW_WCN6855_HW21,
ATH11K_HW_WCN6750_HW10,
+ ATH11K_HW_IPQ5018_HW10,
};
enum ath11k_firmware_mode {
@@ -230,6 +231,13 @@ struct ath11k_he {
#define MAX_RADIOS 3
+/* ipq5018 hw param macros */
+#define MAX_RADIOS_5018 1
+#define CE_CNT_5018 6
+#define TARGET_CE_CNT_5018 9
+#define SVC_CE_MAP_LEN_5018 17
+#define RXDMA_PER_PDEV_5018 1
+
enum {
WMI_HOST_TP_SCALE_MAX = 0,
WMI_HOST_TP_SCALE_50 = 1,
@@ -338,6 +346,7 @@ struct ath11k_vif {
bool is_started;
bool is_up;
+ bool ftm_responder;
bool spectral_enabled;
bool ps;
u32 aid;
@@ -512,8 +521,8 @@ struct ath11k_sta {
#define ATH11K_MIN_5G_FREQ 4150
#define ATH11K_MIN_6G_FREQ 5925
#define ATH11K_MAX_6G_FREQ 7115
-#define ATH11K_NUM_CHANS 101
-#define ATH11K_MAX_5G_CHAN 173
+#define ATH11K_NUM_CHANS 102
+#define ATH11K_MAX_5G_CHAN 177
enum ath11k_state {
ATH11K_STATE_OFF,
@@ -843,6 +852,7 @@ struct ath11k_base {
struct ath11k_dp dp;
void __iomem *mem;
+ void __iomem *mem_ce;
unsigned long mem_len;
struct {
@@ -912,7 +922,6 @@ struct ath11k_base {
enum ath11k_dfs_region dfs_region;
#ifdef CONFIG_ATH11K_DEBUGFS
struct dentry *debugfs_soc;
- struct dentry *debugfs_ath11k;
#endif
struct ath11k_soc_dp_stats soc_stats;
@@ -1138,6 +1147,9 @@ extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[];
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[];
+
extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[];
extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[];
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index ccdf3d5ba1ab..5bb6fd17fdf6 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -976,10 +976,6 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
- ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
- if (IS_ERR(ab->debugfs_soc))
- return PTR_ERR(ab->debugfs_soc);
-
debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
&fops_simulate_fw_crash);
@@ -1001,15 +997,51 @@ void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
int ath11k_debugfs_soc_create(struct ath11k_base *ab)
{
- ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
+ struct dentry *root;
+ bool dput_needed;
+ char name[64];
+ int ret;
+
+ root = debugfs_lookup("ath11k", NULL);
+ if (!root) {
+ root = debugfs_create_dir("ath11k", NULL);
+ if (IS_ERR_OR_NULL(root))
+ return PTR_ERR(root);
+
+ dput_needed = false;
+ } else {
+ /* a dentry from lookup() needs dput() after we don't use it */
+ dput_needed = true;
+ }
+
+ scnprintf(name, sizeof(name), "%s-%s", ath11k_bus_str(ab->hif.bus),
+ dev_name(ab->dev));
+
+ ab->debugfs_soc = debugfs_create_dir(name, root);
+ if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
+ ret = PTR_ERR(ab->debugfs_soc);
+ goto out;
+ }
+
+ ret = 0;
- return PTR_ERR_OR_ZERO(ab->debugfs_ath11k);
+out:
+ if (dput_needed)
+ dput(root);
+
+ return ret;
}
void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
{
- debugfs_remove_recursive(ab->debugfs_ath11k);
- ab->debugfs_ath11k = NULL;
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+
+ /* We are not removing ath11k directory on purpose, even if it
+ * would be empty. This simplifies the directory handling and it's
+ * a minor cosmetic issue to leave an empty ath11k directory to
+ * debugfs.
+ */
}
EXPORT_SYMBOL(ath11k_debugfs_soc_destroy);
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index c5a4c34d7749..b65a84a88264 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1535,13 +1535,12 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
{
struct htt_ppdu_stats_info *ppdu_info;
- spin_lock_bh(&ar->data_lock);
+ lockdep_assert_held(&ar->data_lock);
+
if (!list_empty(&ar->ppdu_stats_info)) {
list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
- if (ppdu_info->ppdu_id == ppdu_id) {
- spin_unlock_bh(&ar->data_lock);
+ if (ppdu_info->ppdu_id == ppdu_id)
return ppdu_info;
- }
}
if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
@@ -1553,16 +1552,13 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
kfree(ppdu_info);
}
}
- spin_unlock_bh(&ar->data_lock);
ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
if (!ppdu_info)
return NULL;
- spin_lock_bh(&ar->data_lock);
list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
ar->ppdu_stat_list_depth++;
- spin_unlock_bh(&ar->data_lock);
return ppdu_info;
}
@@ -1586,16 +1582,17 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
if (!ar) {
ret = -EINVAL;
- goto exit;
+ goto out;
}
if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
+ spin_lock_bh(&ar->data_lock);
ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
if (!ppdu_info) {
ret = -EINVAL;
- goto exit;
+ goto out_unlock_data;
}
ppdu_info->ppdu_id = ppdu_id;
@@ -1604,10 +1601,13 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
(void *)ppdu_info);
if (ret) {
ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
- goto exit;
+ goto out_unlock_data;
}
-exit:
+out_unlock_data:
+ spin_unlock_bh(&ar->data_lock);
+
+out:
rcu_read_unlock();
return ret;
@@ -3126,6 +3126,7 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
if (!peer) {
ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
return -ENOENT;
}
@@ -5022,6 +5023,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
} else {
rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
}
+ rxs->flag |= RX_FLAG_ONLY_MONITOR;
ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index 2fd224480d45..22422237500c 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1220,16 +1220,20 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
s = &hal->srng_config[HAL_CE_SRC];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP +
+ ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
s = &hal->srng_config[HAL_CE_DST];
- s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP +
+ ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
@@ -1237,8 +1241,9 @@ static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
s = &hal->srng_config[HAL_CE_DST_STATUS];
s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
- HAL_CE_DST_STATUS_RING_BASE_LSB;
- s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP;
+ HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP +
+ ATH11K_CE_OFFSET(ab);
s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 6a1f78ee6eb6..1942d41d6de5 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -321,6 +321,10 @@ struct ath11k_base;
#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
+/* IPQ5018 ce registers */
+#define HAL_IPQ5018_CE_WFSS_REG_BASE 0x08400000
+#define HAL_IPQ5018_CE_SIZE 0x200000
+
/* Add any other errors here and return them in
* ath11k_hal_rx_desc_get_err().
*/
@@ -519,6 +523,7 @@ enum hal_srng_dir {
#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
#define HAL_SRNG_FLAGS_CACHED 0x20000000
#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
+#define HAL_SRNG_FLAGS_REMAP_CE_RING 0x10000000
#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index dbcc0c4035b6..ab8f0ccacc6b 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -791,6 +791,49 @@ static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
ring_hash_map);
}
+static void ath11k_hw_ipq5018_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+
+ /* Each hash entry uses three bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 4 |
+ HAL_HASH_ROUTING_RING_SW3 << 8 |
+ HAL_HASH_ROUTING_RING_SW4 << 12 |
+ HAL_HASH_ROUTING_RING_SW1 << 16 |
+ HAL_HASH_ROUTING_RING_SW2 << 20 |
+ HAL_HASH_ROUTING_RING_SW3 << 24 |
+ HAL_HASH_ROUTING_RING_SW4 << 28;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+ HAL_SRNG_RING_ID_REO2SW1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ ring_hash_map);
+}
+
static u16 ath11k_hw_ipq8074_mpdu_info_get_peerid(u8 *tlv_data)
{
u16 peer_id = 0;
@@ -1084,6 +1127,47 @@ const struct ath11k_hw_ops wcn6750_ops = {
.get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
};
+/* IPQ5018 hw ops is similar to QCN9074 except for the dest ring remap */
+const struct ath11k_hw_ops ipq5018_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
+ .reo_setup = ath11k_hw_ipq5018_reo_setup,
+ .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+
+};
+
#define ATH11K_TX_RING_MASK_0 BIT(0)
#define ATH11K_TX_RING_MASK_1 BIT(1)
#define ATH11K_TX_RING_MASK_2 BIT(2)
@@ -1972,6 +2056,214 @@ const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
},
};
+/* Target firmware's Copy Engine configuration for IPQ5018 */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0x2000),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
+/* Map from service/endpoint to Copy Engine for IPQ5018.
+ * This table is derived from the CE TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074 = {
+ .ie1_reg_addr = CE_HOST_IE_ADDRESS,
+ .ie2_reg_addr = CE_HOST_IE_2_ADDRESS,
+ .ie3_reg_addr = CE_HOST_IE_3_ADDRESS,
+};
+
+const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018 = {
+ .ie1_reg_addr = CE_HOST_IPQ5018_IE_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .ie2_reg_addr = CE_HOST_IPQ5018_IE_2_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .ie3_reg_addr = CE_HOST_IPQ5018_IE_3_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+};
+
+const struct ce_remap ath11k_ce_remap_ipq5018 = {
+ .base = HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .size = HAL_IPQ5018_CE_SIZE,
+};
+
const struct ath11k_hw_regs ipq8074_regs = {
/* SW2TCL(x) R0 ring configuration address */
.hal_tcl1_ring_base_lsb = 0x00000510,
@@ -2437,6 +2729,85 @@ static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[
},
};
+const struct ath11k_hw_regs ipq5018_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000694,
+ .hal_tcl1_ring_base_msb = 0x00000698,
+ .hal_tcl1_ring_id = 0x0000069c,
+ .hal_tcl1_ring_misc = 0x000006a4,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006e0,
+ .hal_tcl1_ring_msi1_data = 0x000006e4,
+ .hal_tcl2_ring_base_lsb = 0x000006ec,
+ .hal_tcl_ring_base_lsb = 0x0000079c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a4,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x000001ec,
+ .hal_reo1_ring_base_msb = 0x000001f0,
+ .hal_reo1_ring_id = 0x000001f4,
+ .hal_reo1_ring_misc = 0x000001fc,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000200,
+ .hal_reo1_ring_hp_addr_msb = 0x00000204,
+ .hal_reo1_ring_producer_int_setup = 0x00000210,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000234,
+ .hal_reo1_ring_msi1_base_msb = 0x00000238,
+ .hal_reo1_ring_msi1_data = 0x0000023c,
+ .hal_reo2_ring_base_lsb = 0x00000244,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003028,
+ .hal_reo1_ring_tp = 0x0000302c,
+ .hal_reo2_ring_hp = 0x00003030,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x0000013c,
+ .hal_sw2reo_ring_hp = 0x00003018,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x000000e4,
+ .hal_reo_cmd_ring_hp = 0x00003010,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x08400000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x08401000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x08402000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x08403000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+ .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000924,
+ .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+};
+
const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
.rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
.tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 0c5ef8a526d8..0be4e1232384 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -80,6 +80,8 @@
#define ATH11K_M3_FILE "m3.bin"
#define ATH11K_REGDB_FILE_NAME "regdb.bin"
+#define ATH11K_CE_OFFSET(ab) (ab->mem_ce - ab->mem)
+
enum ath11k_hw_rate_cck {
ATH11K_HW_RATE_CCK_LP_11M = 0,
ATH11K_HW_RATE_CCK_LP_5_5M,
@@ -158,6 +160,8 @@ struct ath11k_hw_params {
u32 target_ce_count;
const struct service_to_pipe *svc_to_ce_map;
u32 svc_to_ce_map_len;
+ const struct ce_ie_addr *ce_ie_addr;
+ const struct ce_remap *ce_remap;
bool single_pdev_only;
@@ -220,6 +224,7 @@ struct ath11k_hw_params {
u32 tx_ring_size;
bool smp2p_wow_exit;
bool support_fw_mac_sequence;
+ bool ftm_responder;
};
struct ath11k_hw_ops {
@@ -271,12 +276,18 @@ extern const struct ath11k_hw_ops qca6390_ops;
extern const struct ath11k_hw_ops qcn9074_ops;
extern const struct ath11k_hw_ops wcn6855_ops;
extern const struct ath11k_hw_ops wcn6750_ops;
+extern const struct ath11k_hw_ops ipq5018_ops;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750;
+extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074;
+extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018;
+
+extern const struct ce_remap ath11k_ce_remap_ipq5018;
+
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390;
extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750;
@@ -406,6 +417,7 @@ extern const struct ath11k_hw_regs qca6390_regs;
extern const struct ath11k_hw_regs qcn9074_regs;
extern const struct ath11k_hw_regs wcn6855_regs;
extern const struct ath11k_hw_regs wcn6750_regs;
+extern const struct ath11k_hw_regs ipq5018_regs;
static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type)
{
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 9e923ecb0891..110a38cce0a7 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -96,6 +96,7 @@ static const struct ieee80211_channel ath11k_5ghz_channels[] = {
CHAN5G(165, 5825, 0),
CHAN5G(169, 5845, 0),
CHAN5G(173, 5865, 0),
+ CHAN5G(177, 5885, 0),
};
static const struct ieee80211_channel ath11k_6ghz_channels[] = {
@@ -3110,7 +3111,7 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
u16 bitrate;
int ret = 0;
u8 rateidx;
- u32 rate;
+ u32 rate, param;
u32 ipv4_cnt;
mutex_lock(&ar->conf_mutex);
@@ -3412,6 +3413,20 @@ static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & BSS_CHANGED_FTM_RESPONDER &&
+ arvif->ftm_responder != info->ftm_responder &&
+ ar->ab->hw_params.ftm_responder &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ arvif->ftm_responder = info->ftm_responder;
+ param = WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ arvif->ftm_responder);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set ftm responder %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
if (changed & BSS_CHANGED_FILS_DISCOVERY ||
changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
ath11k_mac_fils_discovery(arvif, info);
@@ -3612,7 +3627,7 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
- struct scan_req_params arg;
+ struct scan_req_params *arg = NULL;
int ret = 0;
int i;
u32 scan_timeout;
@@ -3640,72 +3655,78 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
if (ret)
goto exit;
- memset(&arg, 0, sizeof(arg));
- ath11k_wmi_start_scan_init(ar, &arg);
- arg.vdev_id = arvif->vdev_id;
- arg.scan_id = ATH11K_SCAN_ID;
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath11k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH11K_SCAN_ID;
if (req->ie_len) {
- arg.extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
- if (!arg.extraie.ptr) {
+ arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
+ if (!arg->extraie.ptr) {
ret = -ENOMEM;
goto exit;
}
- arg.extraie.len = req->ie_len;
+ arg->extraie.len = req->ie_len;
}
if (req->n_ssids) {
- arg.num_ssids = req->n_ssids;
- for (i = 0; i < arg.num_ssids; i++) {
- arg.ssid[i].length = req->ssids[i].ssid_len;
- memcpy(&arg.ssid[i].ssid, req->ssids[i].ssid,
+ arg->num_ssids = req->n_ssids;
+ for (i = 0; i < arg->num_ssids; i++) {
+ arg->ssid[i].length = req->ssids[i].ssid_len;
+ memcpy(&arg->ssid[i].ssid, req->ssids[i].ssid,
req->ssids[i].ssid_len);
}
} else {
- arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
}
if (req->n_channels) {
- arg.num_chan = req->n_channels;
- arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
- GFP_KERNEL);
+ arg->num_chan = req->n_channels;
+ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
+ GFP_KERNEL);
- if (!arg.chan_list) {
+ if (!arg->chan_list) {
ret = -ENOMEM;
goto exit;
}
- for (i = 0; i < arg.num_chan; i++)
- arg.chan_list[i] = req->channels[i]->center_freq;
+ for (i = 0; i < arg->num_chan; i++)
+ arg->chan_list[i] = req->channels[i]->center_freq;
}
if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
- arg.scan_f_add_spoofed_mac_in_probe = 1;
- ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
- ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
+ arg->scan_f_add_spoofed_mac_in_probe = 1;
+ ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
+ ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
}
/* if duration is set, default dwell times will be overwritten */
if (req->duration) {
- arg.dwell_time_active = req->duration;
- arg.dwell_time_active_2g = req->duration;
- arg.dwell_time_active_6g = req->duration;
- arg.dwell_time_passive = req->duration;
- arg.dwell_time_passive_6g = req->duration;
- arg.burst_duration = req->duration;
-
- scan_timeout = min_t(u32, arg.max_rest_time *
- (arg.num_chan - 1) + (req->duration +
+ arg->dwell_time_active = req->duration;
+ arg->dwell_time_active_2g = req->duration;
+ arg->dwell_time_active_6g = req->duration;
+ arg->dwell_time_passive = req->duration;
+ arg->dwell_time_passive_6g = req->duration;
+ arg->burst_duration = req->duration;
+
+ scan_timeout = min_t(u32, arg->max_rest_time *
+ (arg->num_chan - 1) + (req->duration +
ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
- arg.num_chan, arg.max_scan_time);
+ arg->num_chan, arg->max_scan_time);
} else {
- scan_timeout = arg.max_scan_time;
+ scan_timeout = arg->max_scan_time;
}
/* Add a margin to account for event/command processing */
scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD;
- ret = ath11k_start_scan(ar, &arg);
+ ret = ath11k_start_scan(ar, arg);
if (ret) {
ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
spin_lock_bh(&ar->data_lock);
@@ -3717,10 +3738,11 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
msecs_to_jiffies(scan_timeout));
exit:
- kfree(arg.chan_list);
-
- if (req->ie_len)
- kfree(arg.extraie.ptr);
+ if (arg) {
+ kfree(arg->chan_list);
+ kfree(arg->extraie.ptr);
+ kfree(arg);
+ }
mutex_unlock(&ar->conf_mutex);
@@ -9106,6 +9128,10 @@ static int __ath11k_mac_register(struct ath11k *ar)
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+ if (ab->hw_params.ftm_responder)
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
+
ath11k_reg_init(ar);
if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 99cf3357c66e..776362d151cb 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -543,6 +543,8 @@ static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
goto clear_master;
}
+ ab->mem_ce = ab->mem;
+
ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 2a8a3e3dcff6..b3a7d7bfe17c 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -3853,8 +3853,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
- GFP_KERNEL);
+ ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
+ GFP_KERNEL);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index 8f2c07d70a4a..0a045af5419b 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -1073,6 +1073,7 @@ enum wmi_tlv_vdev_param {
WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
WMI_VDEV_PARAM_HE_LTF = 0x74,
+ WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE = 0x7d,
WMI_VDEV_PARAM_BA_MODE = 0x7e,
WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80,
WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
new file mode 100644
index 000000000000..4f9c514c13e7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config ATH12K
+ tristate "Qualcomm Technologies Wi-Fi 7 support (ath12k)"
+ depends on MAC80211 && HAS_DMA && PCI
+ depends on CRYPTO_MICHAEL_MIC
+ select QCOM_QMI_HELPERS
+ select MHI_BUS
+ select QRTR
+ select QRTR_MHI
+ help
+ Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE
+ 802.11be) family of chipsets, for example WCN7850 and
+ QCN9274.
+
+ If you choose to build a module, it'll be called ath12k.
+
+config ATH12K_DEBUG
+ bool "ath12k debugging"
+ depends on ATH12K
+ help
+ Enable debug support, for example debug messages which must
+ be enabled separately using the debug_mask module parameter.
+
+ If unsure, say Y to make it easier to debug problems. But if
+ you want optimal performance choose N.
+
+config ATH12K_TRACING
+ bool "ath12k tracing support"
+ depends on ATH12K && EVENT_TRACING
+ help
+ Enable ath12k tracing infrastructure.
+
+ If unsure, say Y to make it easier to debug problems. But if
+ you want optimal performance choose N.
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
new file mode 100644
index 000000000000..62c52e733b5e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -0,0 +1,27 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_ATH12K) += ath12k.o
+ath12k-y += core.o \
+ hal.o \
+ hal_tx.o \
+ hal_rx.o \
+ wmi.o \
+ mac.o \
+ reg.o \
+ htc.o \
+ qmi.o \
+ dp.o \
+ dp_tx.o \
+ dp_rx.o \
+ debug.o \
+ ce.o \
+ peer.o \
+ dbring.o \
+ hw.o \
+ mhi.o \
+ pci.o \
+ dp_mon.o
+
+ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath12k/ce.c b/drivers/net/wireless/ath/ath12k/ce.c
new file mode 100644
index 000000000000..aed6987804bf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/ce.c
@@ -0,0 +1,964 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "dp_rx.h"
+#include "debug.h"
+#include "hif.h"
+
+const struct ce_attr ath12k_host_ce_config_qcn9274[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE9: MHI */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE10: MHI */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE11: MHI */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE12: CV Prefetch */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE13: CV Prefetch */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE14: target->host dbg log */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE15: reserved for future use */
+ {
+ .flags = (CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
+const struct ce_attr ath12k_host_ce_config_wcn7850[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 64,
+ .recv_cb = ath12k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+};
+
+static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
+ struct sk_buff *skb, dma_addr_t paddr)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct ath12k_ce_ring *ring = pipe->dest_ring;
+ struct hal_srng *srng;
+ unsigned int write_index;
+ unsigned int nentries_mask = ring->nentries_mask;
+ struct hal_ce_srng_dest_desc *desc;
+ int ret;
+
+ lockdep_assert_held(&ab->ce.ce_lock);
+
+ write_index = ring->write_index;
+
+ srng = &ab->hal.srng_list[ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ ath12k_hal_ce_dst_set_desc(desc, paddr);
+
+ ring->skb[write_index] = skb;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ring->write_index = write_index;
+
+ pipe->rx_buf_needed--;
+
+ ret = 0;
+exit:
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe *pipe)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret = 0;
+
+ if (!(pipe->dest_ring || pipe->status_ring))
+ return 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+ while (pipe->rx_buf_needed) {
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr))) {
+ ath12k_warn(ab, "failed to dma map ce rx buf\n");
+ dev_kfree_skb_any(skb);
+ ret = -EIO;
+ goto exit;
+ }
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+
+ ret = ath12k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
+ if (ret) {
+ ath12k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ dma_unmap_single(ab->dev, paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ goto exit;
+ }
+ }
+
+exit:
+ spin_unlock_bh(&ab->ce.ce_lock);
+ return ret;
+}
+
+static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
+ struct sk_buff **skb, int *nbytes)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct hal_ce_srng_dst_status_desc *desc;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ int ret = 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->dest_ring->sw_index;
+ nentries_mask = pipe->dest_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *nbytes = ath12k_hal_ce_dst_status_get_length(desc);
+ if (*nbytes == 0) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *skb = pipe->dest_ring->skb[sw_index];
+ pipe->dest_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->dest_ring->sw_index = sw_index;
+
+ pipe->rx_buf_needed++;
+err:
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ unsigned int nbytes, max_nbytes;
+ int ret;
+
+ __skb_queue_head_init(&list);
+ while (ath12k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath12k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath12k_dbg(ab, ATH12K_DBG_AHB, "rx ce pipe %d len %d\n",
+ pipe->pipe_num, skb->len);
+ pipe->recv_cb(ab, skb);
+ }
+
+ ret = ath12k_ce_rx_post_pipe(pipe);
+ if (ret && ret != -ENOSPC) {
+ ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ pipe->pipe_num, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
+ }
+}
+
+static struct sk_buff *ath12k_ce_completed_send_next(struct ath12k_ce_pipe *pipe)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct hal_ce_srng_src_desc *desc;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ struct sk_buff *skb;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->src_ring->sw_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ desc = ath12k_hal_srng_src_reap_next(ab, srng);
+ if (!desc) {
+ skb = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+
+ skb = pipe->src_ring->skb[sw_index];
+
+ pipe->src_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->src_ring->sw_index = sw_index;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return skb;
+}
+
+static void ath12k_ce_send_done_cb(struct ath12k_ce_pipe *pipe)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+
+ while (!IS_ERR(skb = ath12k_ce_completed_send_next(pipe))) {
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath12k_ce_srng_msi_ring_params_setup(struct ath12k_base *ab, u32 ce_id,
+ struct hal_srng_params *ring_params)
+{
+ u32 msi_data_start;
+ u32 msi_data_count, msi_data_idx;
+ u32 msi_irq_start;
+ u32 addr_lo;
+ u32 addr_hi;
+ int ret;
+
+ ret = ath12k_hif_get_user_msi_vector(ab, "CE",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+
+ if (ret)
+ return;
+
+ ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
+ ath12k_hif_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
+static int ath12k_ce_init_ring(struct ath12k_base *ab,
+ struct ath12k_ce_ring *ce_ring,
+ int ce_id, enum hal_ring_type type)
+{
+ struct hal_srng_params params = { 0 };
+ int ret;
+
+ params.ring_base_paddr = ce_ring->base_addr_ce_space;
+ params.ring_base_vaddr = ce_ring->base_addr_owner_space;
+ params.num_entries = ce_ring->nentries;
+
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
+ ath12k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
+
+ switch (type) {
+ case HAL_CE_SRC:
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
+ params.intr_batch_cntr_thres_entries = 1;
+ break;
+ case HAL_CE_DST:
+ params.max_buffer_len = ab->hw_params->host_ce_config[ce_id].src_sz_max;
+ if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_timer_thres_us = 1024;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.low_threshold = ce_ring->nentries - 3;
+ }
+ break;
+ case HAL_CE_DST_STATUS:
+ if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_batch_cntr_thres_entries = 1;
+ params.intr_timer_thres_us = 0x1000;
+ }
+ break;
+ default:
+ ath12k_warn(ab, "Invalid CE ring type %d\n", type);
+ return -EINVAL;
+ }
+
+ /* TODO: Init other params needed by HAL to init the ring */
+
+ ret = ath12k_hal_srng_setup(ab, type, ce_id, 0, &params);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ce_id);
+ return ret;
+ }
+
+ ce_ring->hal_ring_id = ret;
+
+ return 0;
+}
+
+static struct ath12k_ce_ring *
+ath12k_ce_alloc_ring(struct ath12k_base *ab, int nentries, int desc_sz)
+{
+ struct ath12k_ce_ring *ce_ring;
+ dma_addr_t base_addr;
+
+ ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
+ if (!ce_ring)
+ return ERR_PTR(-ENOMEM);
+
+ ce_ring->nentries = nentries;
+ ce_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ ce_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ab->dev,
+ nentries * desc_sz + CE_DESC_RING_ALIGN,
+ &base_addr, GFP_KERNEL);
+ if (!ce_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ce_ring->base_addr_ce_space_unaligned = base_addr;
+
+ ce_ring->base_addr_owner_space =
+ PTR_ALIGN(ce_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ ce_ring->base_addr_ce_space = ALIGN(ce_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return ce_ring;
+}
+
+static int ath12k_ce_alloc_pipe(struct ath12k_base *ab, int ce_id)
+{
+ struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+ const struct ce_attr *attr = &ab->hw_params->host_ce_config[ce_id];
+ struct ath12k_ce_ring *ring;
+ int nentries;
+ int desc_sz;
+
+ pipe->attr_flags = attr->flags;
+
+ if (attr->src_nentries) {
+ pipe->send_cb = ath12k_ce_send_done_cb;
+ nentries = roundup_pow_of_two(attr->src_nentries);
+ desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->src_ring = ring;
+ }
+
+ if (attr->dest_nentries) {
+ pipe->recv_cb = attr->recv_cb;
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+ desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->dest_ring = ring;
+
+ desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->status_ring = ring;
+ }
+
+ return 0;
+}
+
+void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id)
+{
+ struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+
+ if (pipe->send_cb)
+ pipe->send_cb(pipe);
+
+ if (pipe->recv_cb)
+ ath12k_ce_recv_process_cb(pipe);
+}
+
+void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id)
+{
+ struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+
+ if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
+ pipe->send_cb(pipe);
+}
+
+int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id)
+{
+ struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+ struct hal_ce_srng_src_desc *desc;
+ struct hal_srng *srng;
+ unsigned int write_index, sw_index;
+ unsigned int nentries_mask;
+ int ret = 0;
+ u8 byte_swap_data = 0;
+ int num_used;
+
+ /* Check if some entries could be regained by handling tx completion if
+ * the CE has interrupts disabled and the used entries is more than the
+ * defined usage threshold.
+ */
+ if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
+ spin_lock_bh(&ab->ce.ce_lock);
+ write_index = pipe->src_ring->write_index;
+
+ sw_index = pipe->src_ring->sw_index;
+
+ if (write_index >= sw_index)
+ num_used = write_index - sw_index;
+ else
+ num_used = pipe->src_ring->nentries - sw_index +
+ write_index;
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ if (num_used > ATH12K_CE_USAGE_THRESHOLD)
+ ath12k_ce_poll_send_completed(ab, pipe->pipe_num);
+ }
+
+ if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ write_index = pipe->src_ring->write_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ath12k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto unlock;
+ }
+
+ desc = ath12k_hal_srng_src_get_next_reaped(ab, srng);
+ if (!desc) {
+ ath12k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto unlock;
+ }
+
+ if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
+ byte_swap_data = 1;
+
+ ath12k_hal_ce_src_set_desc(desc, ATH12K_SKB_CB(skb)->paddr,
+ skb->len, transfer_id, byte_swap_data);
+
+ pipe->src_ring->skb[write_index] = skb;
+ pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
+ write_index);
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath12k_ce_rx_pipe_cleanup(struct ath12k_ce_pipe *pipe)
+{
+ struct ath12k_base *ab = pipe->ab;
+ struct ath12k_ce_ring *ring = pipe->dest_ring;
+ struct sk_buff *skb;
+ int i;
+
+ if (!(ring && pipe->buf_sz))
+ return;
+
+ for (i = 0; i < ring->nentries; i++) {
+ skb = ring->skb[i];
+ if (!skb)
+ continue;
+
+ ring->skb[i] = NULL;
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+void ath12k_ce_cleanup_pipes(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *pipe;
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < ab->hw_params->ce_count; pipe_num++) {
+ pipe = &ab->ce.ce_pipe[pipe_num];
+ ath12k_ce_rx_pipe_cleanup(pipe);
+
+ /* Cleanup any src CE's which have interrupts disabled */
+ ath12k_ce_poll_send_completed(ab, pipe_num);
+
+ /* NOTE: Should we also clean up tx buffer in all pipes? */
+ }
+}
+
+void ath12k_ce_rx_post_buf(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+ ret = ath12k_ce_rx_post_pipe(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ continue;
+
+ ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ i, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
+
+ return;
+ }
+ }
+}
+
+void ath12k_ce_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath12k_base *ab = from_timer(ab, t, rx_replenish_retry);
+
+ ath12k_ce_rx_post_buf(ab);
+}
+
+static void ath12k_ce_shadow_config(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ab->hw_params->host_ce_config[i].src_nentries)
+ ath12k_hal_srng_update_shadow_config(ab, HAL_CE_SRC, i);
+
+ if (ab->hw_params->host_ce_config[i].dest_nentries) {
+ ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST, i);
+ ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST_STATUS, i);
+ }
+ }
+}
+
+void ath12k_ce_get_shadow_config(struct ath12k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len)
+{
+ if (!ab->hw_params->supports_shadow_regs)
+ return;
+
+ ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+
+ /* shadow is already configured */
+ if (*shadow_cfg_len)
+ return;
+
+ /* shadow isn't configured yet, configure now.
+ * non-CE srngs are configured firstly, then
+ * all CE srngs.
+ */
+ ath12k_hal_srng_shadow_config(ab);
+ ath12k_ce_shadow_config(ab);
+
+ /* get the shadow configuration */
+ ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+}
+
+int ath12k_ce_init_pipes(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
+ &ab->qmi.ce_cfg.shadow_reg_v3_len);
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ ret = ath12k_ce_init_ring(ab, pipe->src_ring, i,
+ HAL_CE_SRC);
+ if (ret) {
+ ath12k_warn(ab, "failed to init src ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->src_ring->write_index = 0;
+ pipe->src_ring->sw_index = 0;
+ }
+
+ if (pipe->dest_ring) {
+ ret = ath12k_ce_init_ring(ab, pipe->dest_ring, i,
+ HAL_CE_DST);
+ if (ret) {
+ ath12k_warn(ab, "failed to init dest ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->rx_buf_needed = pipe->dest_ring->nentries ?
+ pipe->dest_ring->nentries - 2 : 0;
+
+ pipe->dest_ring->write_index = 0;
+ pipe->dest_ring->sw_index = 0;
+ }
+
+ if (pipe->status_ring) {
+ ret = ath12k_ce_init_ring(ab, pipe->status_ring, i,
+ HAL_CE_DST_STATUS);
+ if (ret) {
+ ath12k_warn(ab, "failed to init dest status ing: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->status_ring->write_index = 0;
+ pipe->status_ring->sw_index = 0;
+ }
+ }
+
+ return 0;
+}
+
+void ath12k_ce_free_pipes(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *pipe;
+ int desc_sz;
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ dma_free_coherent(ab->dev,
+ pipe->src_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->src_ring->base_addr_owner_space,
+ pipe->src_ring->base_addr_ce_space);
+ kfree(pipe->src_ring);
+ pipe->src_ring = NULL;
+ }
+
+ if (pipe->dest_ring) {
+ desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ dma_free_coherent(ab->dev,
+ pipe->dest_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->dest_ring->base_addr_owner_space,
+ pipe->dest_ring->base_addr_ce_space);
+ kfree(pipe->dest_ring);
+ pipe->dest_ring = NULL;
+ }
+
+ if (pipe->status_ring) {
+ desc_sz =
+ ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ dma_free_coherent(ab->dev,
+ pipe->status_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ pipe->status_ring->base_addr_owner_space,
+ pipe->status_ring->base_addr_ce_space);
+ kfree(pipe->status_ring);
+ pipe->status_ring = NULL;
+ }
+ }
+}
+
+int ath12k_ce_alloc_pipes(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *pipe;
+ int i;
+ int ret;
+ const struct ce_attr *attr;
+
+ spin_lock_init(&ab->ce.ce_lock);
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ attr = &ab->hw_params->host_ce_config[i];
+ pipe = &ab->ce.ce_pipe[i];
+ pipe->pipe_num = i;
+ pipe->ab = ab;
+ pipe->buf_sz = attr->src_sz_max;
+
+ ret = ath12k_ce_alloc_pipe(ab, i);
+ if (ret) {
+ /* Free any parial successful allocation */
+ ath12k_ce_free_pipes(ab);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id)
+{
+ if (ce_id >= ab->hw_params->ce_count)
+ return -EINVAL;
+
+ return ab->hw_params->host_ce_config[ce_id].flags;
+}
diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
new file mode 100644
index 000000000000..17cf16235e0b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/ce.h
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_CE_H
+#define ATH12K_CE_H
+
+#define CE_COUNT_MAX 16
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA 2
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR 8
+
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/* Threshold to poll for tx completion in case of Interrupt disabled CE's */
+#define ATH12K_CE_USAGE_THRESHOLD 32
+
+/* Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */
+
+/* CE address/mask */
+#define CE_HOST_IE_ADDRESS 0x00A1803C
+#define CE_HOST_IE_2_ADDRESS 0x00A18040
+#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
+
+#define CE_HOST_IE_3_SHIFT 0xC
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define ATH12K_CE_RX_POST_RETRY_JIFFIES 50
+
+struct ath12k_base;
+
+/* Establish a mapping between a service/direction and a pipe.
+ * Configuration information for a Copy Engine pipe and services.
+ * Passed from Host to Target through QMI message and must be in
+ * little endian format.
+ */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
+/* Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target through QMI message during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+struct ce_attr {
+ /* CE_ATTR_* values */
+ unsigned int flags;
+
+ /* #entries in source ring - Must be a power of 2 */
+ unsigned int src_nentries;
+
+ /* Max source send size for this CE.
+ * This is also the minimum size of a destination buffer.
+ */
+ unsigned int src_sz_max;
+
+ /* #entries in destination ring - Must be a power of 2 */
+ unsigned int dest_nentries;
+
+ void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb);
+};
+
+#define CE_DESC_RING_ALIGN 8
+
+struct ath12k_ce_ring {
+ /* Number of entries in this ring; must be power of 2 */
+ unsigned int nentries;
+ unsigned int nentries_mask;
+
+ /* For dest ring, this is the next index to be processed
+ * by software after it was/is received into.
+ *
+ * For src ring, this is the last descriptor that was sent
+ * and completion processed by software.
+ *
+ * Regardless of src or dest ring, this is an invariant
+ * (modulo ring size):
+ * write index >= read index >= sw_index
+ */
+ unsigned int sw_index;
+ /* cached copy */
+ unsigned int write_index;
+
+ /* Start of DMA-coherent area reserved for descriptors */
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+ u32 base_addr_ce_space_unaligned;
+
+ /* Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+ * Points into reserved DMA-coherent area, above.
+ */
+ /* Host address space */
+ void *base_addr_owner_space;
+
+ /* CE address space */
+ u32 base_addr_ce_space;
+
+ /* HAL ring id */
+ u32 hal_ring_id;
+
+ /* keep last */
+ struct sk_buff *skb[];
+};
+
+struct ath12k_ce_pipe {
+ struct ath12k_base *ab;
+ u16 pipe_num;
+ unsigned int attr_flags;
+ unsigned int buf_sz;
+ unsigned int rx_buf_needed;
+
+ void (*send_cb)(struct ath12k_ce_pipe *pipe);
+ void (*recv_cb)(struct ath12k_base *ab, struct sk_buff *skb);
+
+ struct tasklet_struct intr_tq;
+ struct ath12k_ce_ring *src_ring;
+ struct ath12k_ce_ring *dest_ring;
+ struct ath12k_ce_ring *status_ring;
+ u64 timestamp;
+};
+
+struct ath12k_ce {
+ struct ath12k_ce_pipe ce_pipe[CE_COUNT_MAX];
+ /* Protects rings of all ce pipes */
+ spinlock_t ce_lock;
+ struct ath12k_hp_update_timer hp_timer[CE_COUNT_MAX];
+};
+
+extern const struct ce_attr ath12k_host_ce_config_qcn9274[];
+extern const struct ce_attr ath12k_host_ce_config_wcn7850[];
+
+void ath12k_ce_cleanup_pipes(struct ath12k_base *ab);
+void ath12k_ce_rx_replenish_retry(struct timer_list *t);
+void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id);
+int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id);
+void ath12k_ce_rx_post_buf(struct ath12k_base *ab);
+int ath12k_ce_init_pipes(struct ath12k_base *ab);
+int ath12k_ce_alloc_pipes(struct ath12k_base *ab);
+void ath12k_ce_free_pipes(struct ath12k_base *ab);
+int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id);
+void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id);
+int ath12k_ce_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+int ath12k_ce_attr_attach(struct ath12k_base *ab);
+void ath12k_ce_get_shadow_config(struct ath12k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
new file mode 100644
index 000000000000..a89e66653f04
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -0,0 +1,939 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/remoteproc.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+#include "hif.h"
+
+unsigned int ath12k_debug_mask;
+module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+int ath12k_core_suspend(struct ath12k_base *ab)
+{
+ int ret;
+
+ if (!ab->hw_params->supports_suspend)
+ return -EOPNOTSUPP;
+
+ /* TODO: there can frames in queues so for now add delay as a hack.
+ * Need to implement to handle and remove this delay.
+ */
+ msleep(500);
+
+ ret = ath12k_dp_rx_pktlog_stop(ab, true);
+ if (ret) {
+ ath12k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath12k_dp_rx_pktlog_stop(ab, false);
+ if (ret) {
+ ath12k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath12k_hif_irq_disable(ab);
+ ath12k_hif_ce_irq_disable(ab);
+
+ ret = ath12k_hif_suspend(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to suspend hif: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_core_resume(struct ath12k_base *ab)
+{
+ int ret;
+
+ if (!ab->hw_params->supports_suspend)
+ return -EOPNOTSUPP;
+
+ ret = ath12k_hif_resume(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to resume hif during resume: %d\n", ret);
+ return ret;
+ }
+
+ ath12k_hif_ce_irq_enable(ab);
+ ath12k_hif_irq_enable(ab);
+
+ ret = ath12k_dp_rx_pktlog_start(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to start rx pktlog during resume: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
+ size_t name_len)
+{
+ /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
+ char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
+
+ if (ab->qmi.target.bdf_ext[0] != '\0')
+ scnprintf(variant, sizeof(variant), ",variant=%s",
+ ab->qmi.target.bdf_ext);
+
+ scnprintf(name, name_len,
+ "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath12k_bus_str(ab->hif.bus),
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id, variant);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot using board name '%s'\n", name);
+
+ return 0;
+}
+
+const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
+ const char *file)
+{
+ const struct firmware *fw;
+ char path[100];
+ int ret;
+
+ if (!file)
+ return ERR_PTR(-ENOENT);
+
+ ath12k_core_create_firmware_path(ab, file, path, sizeof(path));
+
+ ret = firmware_request_nowarn(&fw, path, ab->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot firmware request %s size %zu\n",
+ path, fw->size);
+
+ return fw;
+}
+
+void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
+{
+ if (!IS_ERR(bd->fw))
+ release_firmware(bd->fw);
+
+ memset(bd, 0, sizeof(*bd));
+}
+
+static int ath12k_core_parse_bd_ie_board(struct ath12k_base *ab,
+ struct ath12k_board_data *bd,
+ const void *buf, size_t buf_len,
+ const char *boardname,
+ int bd_ie_type)
+{
+ const struct ath12k_fw_ie *hdr;
+ bool name_match_found;
+ int ret, board_ie_id;
+ size_t board_ie_len;
+ const void *board_ie_data;
+
+ name_match_found = false;
+
+ /* go through ATH12K_BD_IE_BOARD_ elements */
+ while (buf_len > sizeof(struct ath12k_fw_ie)) {
+ hdr = buf;
+ board_ie_id = le32_to_cpu(hdr->id);
+ board_ie_len = le32_to_cpu(hdr->len);
+ board_ie_data = hdr->data;
+
+ buf_len -= sizeof(*hdr);
+ buf += sizeof(*hdr);
+
+ if (buf_len < ALIGN(board_ie_len, 4)) {
+ ath12k_err(ab, "invalid ATH12K_BD_IE_BOARD length: %zu < %zu\n",
+ buf_len, ALIGN(board_ie_len, 4));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (board_ie_id) {
+ case ATH12K_BD_IE_BOARD_NAME:
+ ath12k_dbg_dump(ab, ATH12K_DBG_BOOT, "board name", "",
+ board_ie_data, board_ie_len);
+
+ if (board_ie_len != strlen(boardname))
+ break;
+
+ ret = memcmp(board_ie_data, boardname, strlen(boardname));
+ if (ret)
+ break;
+
+ name_match_found = true;
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "boot found match for name '%s'",
+ boardname);
+ break;
+ case ATH12K_BD_IE_BOARD_DATA:
+ if (!name_match_found)
+ /* no match found */
+ break;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "boot found board data for '%s'", boardname);
+
+ bd->data = board_ie_data;
+ bd->len = board_ie_len;
+
+ ret = 0;
+ goto out;
+ default:
+ ath12k_warn(ab, "unknown ATH12K_BD_IE_BOARD found: %d\n",
+ board_ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ board_ie_len = ALIGN(board_ie_len, 4);
+
+ buf_len -= board_ie_len;
+ buf += board_ie_len;
+ }
+
+ /* no match found */
+ ret = -ENOENT;
+
+out:
+ return ret;
+}
+
+static int ath12k_core_fetch_board_data_api_n(struct ath12k_base *ab,
+ struct ath12k_board_data *bd,
+ const char *boardname)
+{
+ size_t len, magic_len;
+ const u8 *data;
+ char *filename, filepath[100];
+ size_t ie_len;
+ struct ath12k_fw_ie *hdr;
+ int ret, ie_id;
+
+ filename = ATH12K_BOARD_API2_FILE;
+
+ if (!bd->fw)
+ bd->fw = ath12k_core_firmware_request(ab, filename);
+
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ data = bd->fw->data;
+ len = bd->fw->size;
+
+ ath12k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+
+ /* magic has extra null byte padded */
+ magic_len = strlen(ATH12K_BOARD_MAGIC) + 1;
+ if (len < magic_len) {
+ ath12k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
+ filepath, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH12K_BOARD_MAGIC, magic_len)) {
+ ath12k_err(ab, "found invalid board magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* magic is padded to 4 bytes */
+ magic_len = ALIGN(magic_len, 4);
+ if (len < magic_len) {
+ ath12k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
+ filepath, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data += magic_len;
+ len -= magic_len;
+
+ while (len > sizeof(struct ath12k_fw_ie)) {
+ hdr = (struct ath12k_fw_ie *)data;
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data = hdr->data;
+
+ if (len < ALIGN(ie_len, 4)) {
+ ath12k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ ie_id, ie_len, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH12K_BD_IE_BOARD:
+ ret = ath12k_core_parse_bd_ie_board(ab, bd, data,
+ ie_len,
+ boardname,
+ ATH12K_BD_IE_BOARD);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ break;
+ else if (ret)
+ /* there was an error, bail out */
+ goto err;
+ /* either found or error, so stop searching */
+ goto out;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+out:
+ if (!bd->data || !bd->len) {
+ ath12k_err(ab,
+ "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
+ ret = -ENODATA;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath12k_core_free_bdf(ab, bd);
+ return ret;
+}
+
+int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
+ struct ath12k_board_data *bd,
+ char *filename)
+{
+ bd->fw = ath12k_core_firmware_request(ab, filename);
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ bd->data = bd->fw->data;
+ bd->len = bd->fw->size;
+
+ return 0;
+}
+
+#define BOARD_NAME_SIZE 100
+int ath12k_core_fetch_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE];
+ int ret;
+
+ ret = ath12k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath12k_err(ab, "failed to create board name: %d", ret);
+ return ret;
+ }
+
+ ab->bd_api = 2;
+ ret = ath12k_core_fetch_board_data_api_n(ab, bd, boardname);
+ if (!ret)
+ goto success;
+
+ ab->bd_api = 1;
+ ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
+ if (ret) {
+ ath12k_err(ab, "failed to fetch board-2.bin or board.bin from %s\n",
+ ab->hw_params->fw.dir);
+ return ret;
+ }
+
+success:
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", ab->bd_api);
+ return 0;
+}
+
+static void ath12k_core_stop(struct ath12k_base *ab)
+{
+ if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath12k_qmi_firmware_stop(ab);
+
+ ath12k_hif_stop(ab);
+ ath12k_wmi_detach(ab);
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
+
+ /* De-Init of components as needed */
+}
+
+static int ath12k_core_soc_create(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_qmi_init_service(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_hif_power_up(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to power up :%d\n", ret);
+ goto err_qmi_deinit;
+ }
+
+ return 0;
+
+err_qmi_deinit:
+ ath12k_qmi_deinit_service(ab);
+ return ret;
+}
+
+static void ath12k_core_soc_destroy(struct ath12k_base *ab)
+{
+ ath12k_dp_free(ab);
+ ath12k_reg_free(ab);
+ ath12k_qmi_deinit_service(ab);
+}
+
+static int ath12k_core_pdev_create(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_mac_register(ab);
+ if (ret) {
+ ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_dp_pdev_alloc(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
+ goto err_mac_unregister;
+ }
+
+ return 0;
+
+err_mac_unregister:
+ ath12k_mac_unregister(ab);
+
+ return ret;
+}
+
+static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
+{
+ ath12k_mac_unregister(ab);
+ ath12k_hif_irq_disable(ab);
+ ath12k_dp_pdev_free(ab);
+}
+
+static int ath12k_core_start(struct ath12k_base *ab,
+ enum ath12k_firmware_mode mode)
+{
+ int ret;
+
+ ret = ath12k_wmi_attach(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to attach wmi: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_htc_init(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to init htc: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath12k_hif_start(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to start HIF: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath12k_htc_wait_target(&ab->htc);
+ if (ret) {
+ ath12k_err(ab, "failed to connect to HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath12k_dp_htt_connect(&ab->dp);
+ if (ret) {
+ ath12k_err(ab, "failed to connect to HTT: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath12k_wmi_connect(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to connect wmi: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath12k_htc_start(&ab->htc);
+ if (ret) {
+ ath12k_err(ab, "failed to start HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath12k_wmi_wait_for_service_ready(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to receive wmi service ready event: %d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath12k_mac_allocate(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ath12k_dp_cc_config(ab);
+
+ ath12k_dp_pdev_pre_alloc(ab);
+
+ ret = ath12k_dp_rx_pdev_reo_setup(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
+ goto err_mac_destroy;
+ }
+
+ ret = ath12k_wmi_cmd_init(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to send wmi init cmd: %d\n", ret);
+ goto err_reo_cleanup;
+ }
+
+ ret = ath12k_wmi_wait_for_unified_ready(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to receive wmi unified ready event: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ /* put hardware to DBS mode */
+ if (ab->hw_params->single_pdev_only) {
+ ret = ath12k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
+ if (ret) {
+ ath12k_err(ab, "failed to send dbs mode: %d\n", ret);
+ goto err_reo_cleanup;
+ }
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ver_req_msg(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to send htt version request message: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
+err_mac_destroy:
+ ath12k_mac_destroy(ab);
+err_hif_stop:
+ ath12k_hif_stop(ab);
+err_wmi_detach:
+ ath12k_wmi_detach(ab);
+ return ret;
+}
+
+static int ath12k_core_start_firmware(struct ath12k_base *ab,
+ enum ath12k_firmware_mode mode)
+{
+ int ret;
+
+ ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
+ &ab->qmi.ce_cfg.shadow_reg_v3_len);
+
+ ret = ath12k_qmi_firmware_start(ab, mode);
+ if (ret) {
+ ath12k_err(ab, "failed to send firmware start: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath12k_err(ab, "failed to start firmware: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_ce_init_pipes(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to initialize CE: %d\n", ret);
+ goto err_firmware_stop;
+ }
+
+ ret = ath12k_dp_alloc(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to init DP: %d\n", ret);
+ goto err_firmware_stop;
+ }
+
+ mutex_lock(&ab->core_lock);
+ ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+ if (ret) {
+ ath12k_err(ab, "failed to start core: %d\n", ret);
+ goto err_dp_free;
+ }
+
+ ret = ath12k_core_pdev_create(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to create pdev core: %d\n", ret);
+ goto err_core_stop;
+ }
+ ath12k_hif_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
+
+ return 0;
+
+err_core_stop:
+ ath12k_core_stop(ab);
+ ath12k_mac_destroy(ab);
+err_dp_free:
+ ath12k_dp_free(ab);
+ mutex_unlock(&ab->core_lock);
+err_firmware_stop:
+ ath12k_qmi_firmware_stop(ab);
+
+ return ret;
+}
+
+static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
+{
+ int ret;
+
+ mutex_lock(&ab->core_lock);
+ ath12k_hif_irq_disable(ab);
+ ath12k_dp_pdev_free(ab);
+ ath12k_hif_stop(ab);
+ ath12k_wmi_detach(ab);
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
+ mutex_unlock(&ab->core_lock);
+
+ ath12k_dp_free(ab);
+ ath12k_hal_srng_deinit(ab);
+
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
+ ret = ath12k_hal_srng_init(ab);
+ if (ret)
+ return ret;
+
+ clear_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+
+ ret = ath12k_core_qmi_firmware_ready(ab);
+ if (ret)
+ goto err_hal_srng_deinit;
+
+ clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
+
+ return 0;
+
+err_hal_srng_deinit:
+ ath12k_hal_srng_deinit(ab);
+ return ret;
+}
+
+void ath12k_core_halt(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ar->num_created_vdevs = 0;
+ ar->allocated_vdev_map = 0;
+
+ ath12k_mac_scan_finish(ar);
+ ath12k_mac_peer_cleanup_all(ar);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
+ synchronize_rcu();
+ INIT_LIST_HEAD(&ar->arvifs);
+ idr_init(&ar->txmgmt_idr);
+}
+
+static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+
+ spin_lock_bh(&ab->base_lock);
+ ab->stats.fw_crash_counter++;
+ spin_unlock_bh(&ab->base_lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH12K_STATE_OFF)
+ continue;
+
+ ieee80211_stop_queues(ar->hw);
+ ath12k_mac_drain_tx(ar);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->peer_assoc_done);
+ complete(&ar->peer_delete_done);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->vdev_delete_done);
+ complete(&ar->bss_survey_done);
+
+ wake_up(&ar->dp.tx_empty_waitq);
+ idr_for_each(&ar->txmgmt_idr,
+ ath12k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+ }
+
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+ wake_up(&ab->peer_mapping_wq);
+}
+
+static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH12K_STATE_OFF)
+ continue;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH12K_STATE_ON:
+ ar->state = ATH12K_STATE_RESTARTING;
+ ath12k_core_halt(ar);
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH12K_STATE_OFF:
+ ath12k_warn(ab,
+ "cannot restart radio %d that hasn't been started\n",
+ i);
+ break;
+ case ATH12K_STATE_RESTARTING:
+ break;
+ case ATH12K_STATE_RESTARTED:
+ ar->state = ATH12K_STATE_WEDGED;
+ fallthrough;
+ case ATH12K_STATE_WEDGED:
+ ath12k_warn(ab,
+ "device is wedged, will not restart radio %d\n", i);
+ break;
+ }
+ mutex_unlock(&ar->conf_mutex);
+ }
+ complete(&ab->driver_recovery);
+}
+
+static void ath12k_core_restart(struct work_struct *work)
+{
+ struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
+ int ret;
+
+ if (!ab->is_reset)
+ ath12k_core_pre_reconfigure_recovery(ab);
+
+ ret = ath12k_core_reconfigure_on_crash(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to reconfigure driver on crash recovery\n");
+ return;
+ }
+
+ if (ab->is_reset)
+ complete_all(&ab->reconfigure_complete);
+
+ if (!ab->is_reset)
+ ath12k_core_post_reconfigure_recovery(ab);
+}
+
+static void ath12k_core_reset(struct work_struct *work)
+{
+ struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
+ int reset_count, fail_cont_count;
+ long time_left;
+
+ if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) {
+ ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
+ return;
+ }
+
+ /* Sometimes the recovery will fail and then the next all recovery fail,
+ * this is to avoid infinite recovery since it can not recovery success
+ */
+ fail_cont_count = atomic_read(&ab->fail_cont_count);
+
+ if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FINAL)
+ return;
+
+ if (fail_cont_count >= ATH12K_RESET_MAX_FAIL_COUNT_FIRST &&
+ time_before(jiffies, ab->reset_fail_timeout))
+ return;
+
+ reset_count = atomic_inc_return(&ab->reset_count);
+
+ if (reset_count > 1) {
+ /* Sometimes it happened another reset worker before the previous one
+ * completed, then the second reset worker will destroy the previous one,
+ * thus below is to avoid that.
+ */
+ ath12k_warn(ab, "already resetting count %d\n", reset_count);
+
+ reinit_completion(&ab->reset_complete);
+ time_left = wait_for_completion_timeout(&ab->reset_complete,
+ ATH12K_RESET_TIMEOUT_HZ);
+ if (time_left) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "to skip reset\n");
+ atomic_dec(&ab->reset_count);
+ return;
+ }
+
+ ab->reset_fail_timeout = jiffies + ATH12K_RESET_FAIL_TIMEOUT_HZ;
+ /* Record the continuous recovery fail count when recovery failed*/
+ fail_cont_count = atomic_inc_return(&ab->fail_cont_count);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
+
+ ab->is_reset = true;
+ atomic_set(&ab->recovery_count, 0);
+
+ ath12k_core_pre_reconfigure_recovery(ab);
+
+ reinit_completion(&ab->reconfigure_complete);
+ ath12k_core_post_reconfigure_recovery(ab);
+
+ reinit_completion(&ab->recovery_start);
+ atomic_set(&ab->recovery_start_count, 0);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
+
+ time_left = wait_for_completion_timeout(&ab->recovery_start,
+ ATH12K_RECOVER_START_TIMEOUT_HZ);
+
+ ath12k_hif_power_down(ab);
+ ath12k_hif_power_up(ab);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
+}
+
+int ath12k_core_pre_init(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_hw_init(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to init hw params: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_core_init(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_core_soc_create(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to create soc core: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath12k_core_deinit(struct ath12k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath12k_core_pdev_destroy(ab);
+ ath12k_core_stop(ab);
+
+ mutex_unlock(&ab->core_lock);
+
+ ath12k_hif_power_down(ab);
+ ath12k_mac_destroy(ab);
+ ath12k_core_soc_destroy(ab);
+}
+
+void ath12k_core_free(struct ath12k_base *ab)
+{
+ destroy_workqueue(ab->workqueue_aux);
+ destroy_workqueue(ab->workqueue);
+ kfree(ab);
+}
+
+struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
+ enum ath12k_bus bus)
+{
+ struct ath12k_base *ab;
+
+ ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
+ if (!ab)
+ return NULL;
+
+ init_completion(&ab->driver_recovery);
+
+ ab->workqueue = create_singlethread_workqueue("ath12k_wq");
+ if (!ab->workqueue)
+ goto err_sc_free;
+
+ ab->workqueue_aux = create_singlethread_workqueue("ath12k_aux_wq");
+ if (!ab->workqueue_aux)
+ goto err_free_wq;
+
+ mutex_init(&ab->core_lock);
+ spin_lock_init(&ab->base_lock);
+ init_completion(&ab->reset_complete);
+ init_completion(&ab->reconfigure_complete);
+ init_completion(&ab->recovery_start);
+
+ INIT_LIST_HEAD(&ab->peers);
+ init_waitqueue_head(&ab->peer_mapping_wq);
+ init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
+ INIT_WORK(&ab->restart_work, ath12k_core_restart);
+ INIT_WORK(&ab->reset_work, ath12k_core_reset);
+ timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
+ init_completion(&ab->htc_suspend);
+
+ ab->dev = dev;
+ ab->hif.bus = bus;
+
+ return ab;
+
+err_free_wq:
+ destroy_workqueue(ab->workqueue);
+err_sc_free:
+ kfree(ab);
+ return NULL;
+}
+
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
new file mode 100644
index 000000000000..a54ae74543c1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -0,0 +1,822 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_CORE_H
+#define ATH12K_CORE_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/bitfield.h>
+#include "qmi.h"
+#include "htc.h"
+#include "wmi.h"
+#include "hal.h"
+#include "dp.h"
+#include "ce.h"
+#include "mac.h"
+#include "hw.h"
+#include "hal_rx.h"
+#include "reg.h"
+#include "dbring.h"
+
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+
+#define ATH12K_TX_MGMT_NUM_PENDING_MAX 512
+
+#define ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64
+
+/* Pending management packets threshold for dropping probe responses */
+#define ATH12K_PRB_RSP_DROP_THRESHOLD ((ATH12K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4)
+
+#define ATH12K_INVALID_HW_MAC_ID 0xFF
+#define ATH12K_RX_RATE_TABLE_NUM 320
+#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
+
+#define ATH12K_MON_TIMER_INTERVAL 10
+#define ATH12K_RESET_TIMEOUT_HZ (20 * HZ)
+#define ATH12K_RESET_MAX_FAIL_COUNT_FIRST 3
+#define ATH12K_RESET_MAX_FAIL_COUNT_FINAL 5
+#define ATH12K_RESET_FAIL_TIMEOUT_HZ (20 * HZ)
+#define ATH12K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
+#define ATH12K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+
+enum wme_ac {
+ WME_AC_BE,
+ WME_AC_BK,
+ WME_AC_VI,
+ WME_AC_VO,
+ WME_NUM_AC
+};
+
+#define ATH12K_HT_MCS_MAX 7
+#define ATH12K_VHT_MCS_MAX 9
+#define ATH12K_HE_MCS_MAX 11
+
+enum ath12k_crypt_mode {
+ /* Only use hardware crypto engine */
+ ATH12K_CRYPT_MODE_HW,
+ /* Only use software crypto */
+ ATH12K_CRYPT_MODE_SW,
+};
+
+static inline enum wme_ac ath12k_tid_to_ac(u32 tid)
+{
+ return (((tid == 0) || (tid == 3)) ? WME_AC_BE :
+ ((tid == 1) || (tid == 2)) ? WME_AC_BK :
+ ((tid == 4) || (tid == 5)) ? WME_AC_VI :
+ WME_AC_VO);
+}
+
+enum ath12k_skb_flags {
+ ATH12K_SKB_HW_80211_ENCAP = BIT(0),
+ ATH12K_SKB_CIPHER_SET = BIT(1),
+};
+
+struct ath12k_skb_cb {
+ dma_addr_t paddr;
+ struct ath12k *ar;
+ struct ieee80211_vif *vif;
+ dma_addr_t paddr_ext_desc;
+ u32 cipher;
+ u8 flags;
+};
+
+struct ath12k_skb_rxcb {
+ dma_addr_t paddr;
+ bool is_first_msdu;
+ bool is_last_msdu;
+ bool is_continuation;
+ bool is_mcbc;
+ bool is_eapol;
+ struct hal_rx_desc *rx_desc;
+ u8 err_rel_src;
+ u8 err_code;
+ u8 mac_id;
+ u8 unmapped;
+ u8 is_frag;
+ u8 tid;
+ u16 peer_id;
+};
+
+enum ath12k_hw_rev {
+ ATH12K_HW_QCN9274_HW10,
+ ATH12K_HW_QCN9274_HW20,
+ ATH12K_HW_WCN7850_HW20
+};
+
+enum ath12k_firmware_mode {
+ /* the default mode, standard 802.11 functionality */
+ ATH12K_FIRMWARE_MODE_NORMAL,
+
+ /* factory tests etc */
+ ATH12K_FIRMWARE_MODE_FTM,
+};
+
+#define ATH12K_IRQ_NUM_MAX 57
+#define ATH12K_EXT_IRQ_NUM_MAX 16
+
+struct ath12k_ext_irq_grp {
+ struct ath12k_base *ab;
+ u32 irqs[ATH12K_EXT_IRQ_NUM_MAX];
+ u32 num_irq;
+ u32 grp_id;
+ u64 timestamp;
+ struct napi_struct napi;
+ struct net_device napi_ndev;
+};
+
+#define HEHANDLE_CAP_PHYINFO_SIZE 3
+#define HECAP_PHYINFO_SIZE 9
+#define HECAP_MACINFO_SIZE 5
+#define HECAP_TXRX_MCS_NSS_SIZE 2
+#define HECAP_PPET16_PPET8_MAX_SIZE 25
+
+#define HE_PPET16_PPET8_SIZE 8
+
+/* 802.11ax PPE (PPDU packet Extension) threshold */
+struct he_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_mask;
+ u32 ppet16_ppet8_ru3_ru0[HE_PPET16_PPET8_SIZE];
+};
+
+struct ath12k_he {
+ u8 hecap_macinfo[HECAP_MACINFO_SIZE];
+ u32 hecap_rxmcsnssmap;
+ u32 hecap_txmcsnssmap;
+ u32 hecap_phyinfo[HEHANDLE_CAP_PHYINFO_SIZE];
+ struct he_ppe_threshold hecap_ppet;
+ u32 heop_param;
+};
+
+#define MAX_RADIOS 3
+
+enum {
+ WMI_HOST_TP_SCALE_MAX = 0,
+ WMI_HOST_TP_SCALE_50 = 1,
+ WMI_HOST_TP_SCALE_25 = 2,
+ WMI_HOST_TP_SCALE_12 = 3,
+ WMI_HOST_TP_SCALE_MIN = 4,
+ WMI_HOST_TP_SCALE_SIZE = 5,
+};
+
+enum ath12k_scan_state {
+ ATH12K_SCAN_IDLE,
+ ATH12K_SCAN_STARTING,
+ ATH12K_SCAN_RUNNING,
+ ATH12K_SCAN_ABORTING,
+};
+
+enum ath12k_dev_flags {
+ ATH12K_CAC_RUNNING,
+ ATH12K_FLAG_CRASH_FLUSH,
+ ATH12K_FLAG_RAW_MODE,
+ ATH12K_FLAG_HW_CRYPTO_DISABLED,
+ ATH12K_FLAG_RECOVERY,
+ ATH12K_FLAG_UNREGISTERING,
+ ATH12K_FLAG_REGISTERED,
+ ATH12K_FLAG_QMI_FAIL,
+ ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
+};
+
+enum ath12k_monitor_flags {
+ ATH12K_FLAG_MONITOR_ENABLED,
+};
+
+struct ath12k_vif {
+ u32 vdev_id;
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u16 ast_hash;
+ u16 ast_idx;
+ u16 tcl_metadata;
+ u8 hal_addr_search_flags;
+ u8 search_type;
+
+ struct ath12k *ar;
+ struct ieee80211_vif *vif;
+
+ int bank_id;
+ u8 vdev_id_check_en;
+
+ struct wmi_wmm_params_all_arg wmm_params;
+ struct list_head list;
+ union {
+ struct {
+ u32 uapsd;
+ } sta;
+ struct {
+ /* 127 stations; wmi limit */
+ u8 tim_bitmap[16];
+ u8 tim_len;
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ bool hidden_ssid;
+ /* P2P_IE with NoA attribute for P2P_GO case */
+ u32 noa_len;
+ u8 *noa_data;
+ } ap;
+ } u;
+
+ bool is_started;
+ bool is_up;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+ struct cfg80211_bitrate_mask bitrate_mask;
+ int num_legacy_stations;
+ int rtscts_prot_mode;
+ int txpower;
+ bool rsnie_present;
+ bool wpaie_present;
+ struct ieee80211_chanctx_conf chanctx;
+ u32 key_cipher;
+ u8 tx_encap_type;
+ u8 vdev_stats_id;
+};
+
+struct ath12k_vif_iter {
+ u32 vdev_id;
+ struct ath12k_vif *arvif;
+};
+
+#define HAL_AST_IDX_INVALID 0xFFFF
+#define HAL_RX_MAX_MCS 12
+#define HAL_RX_MAX_MCS_HT 31
+#define HAL_RX_MAX_MCS_VHT 9
+#define HAL_RX_MAX_MCS_HE 11
+#define HAL_RX_MAX_NSS 8
+#define HAL_RX_MAX_NUM_LEGACY_RATES 12
+#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
+#define ATH12K_RX_RATE_TABLE_NUM 320
+
+struct ath12k_rx_peer_rate_stats {
+ u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
+ u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
+ u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1];
+ u64 nss_count[HAL_RX_MAX_NSS];
+ u64 bw_count[HAL_RX_BW_MAX];
+ u64 gi_count[HAL_RX_GI_MAX];
+ u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES];
+ u64 rx_rate[ATH12K_RX_RATE_TABLE_11AX_NUM];
+};
+
+struct ath12k_rx_peer_stats {
+ u64 num_msdu;
+ u64 num_mpdu_fcs_ok;
+ u64 num_mpdu_fcs_err;
+ u64 tcp_msdu_count;
+ u64 udp_msdu_count;
+ u64 other_msdu_count;
+ u64 ampdu_msdu_count;
+ u64 non_ampdu_msdu_count;
+ u64 stbc_count;
+ u64 beamformed_count;
+ u64 mcs_count[HAL_RX_MAX_MCS + 1];
+ u64 nss_count[HAL_RX_MAX_NSS];
+ u64 bw_count[HAL_RX_BW_MAX];
+ u64 gi_count[HAL_RX_GI_MAX];
+ u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
+ u64 tid_count[IEEE80211_NUM_TIDS + 1];
+ u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
+ u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
+ u64 rx_duration;
+ u64 dcm_count;
+ u64 ru_alloc_cnt[HAL_RX_RU_ALLOC_TYPE_MAX];
+ struct ath12k_rx_peer_rate_stats pkt_stats;
+ struct ath12k_rx_peer_rate_stats byte_stats;
+};
+
+#define ATH12K_HE_MCS_NUM 12
+#define ATH12K_VHT_MCS_NUM 10
+#define ATH12K_BW_NUM 5
+#define ATH12K_NSS_NUM 4
+#define ATH12K_LEGACY_NUM 12
+#define ATH12K_GI_NUM 4
+#define ATH12K_HT_MCS_NUM 32
+
+enum ath12k_pkt_rx_err {
+ ATH12K_PKT_RX_ERR_FCS,
+ ATH12K_PKT_RX_ERR_TKIP,
+ ATH12K_PKT_RX_ERR_CRYPT,
+ ATH12K_PKT_RX_ERR_PEER_IDX_INVAL,
+ ATH12K_PKT_RX_ERR_MAX,
+};
+
+enum ath12k_ampdu_subfrm_num {
+ ATH12K_AMPDU_SUBFRM_NUM_10,
+ ATH12K_AMPDU_SUBFRM_NUM_20,
+ ATH12K_AMPDU_SUBFRM_NUM_30,
+ ATH12K_AMPDU_SUBFRM_NUM_40,
+ ATH12K_AMPDU_SUBFRM_NUM_50,
+ ATH12K_AMPDU_SUBFRM_NUM_60,
+ ATH12K_AMPDU_SUBFRM_NUM_MORE,
+ ATH12K_AMPDU_SUBFRM_NUM_MAX,
+};
+
+enum ath12k_amsdu_subfrm_num {
+ ATH12K_AMSDU_SUBFRM_NUM_1,
+ ATH12K_AMSDU_SUBFRM_NUM_2,
+ ATH12K_AMSDU_SUBFRM_NUM_3,
+ ATH12K_AMSDU_SUBFRM_NUM_4,
+ ATH12K_AMSDU_SUBFRM_NUM_MORE,
+ ATH12K_AMSDU_SUBFRM_NUM_MAX,
+};
+
+enum ath12k_counter_type {
+ ATH12K_COUNTER_TYPE_BYTES,
+ ATH12K_COUNTER_TYPE_PKTS,
+ ATH12K_COUNTER_TYPE_MAX,
+};
+
+enum ath12k_stats_type {
+ ATH12K_STATS_TYPE_SUCC,
+ ATH12K_STATS_TYPE_FAIL,
+ ATH12K_STATS_TYPE_RETRY,
+ ATH12K_STATS_TYPE_AMPDU,
+ ATH12K_STATS_TYPE_MAX,
+};
+
+struct ath12k_htt_data_stats {
+ u64 legacy[ATH12K_COUNTER_TYPE_MAX][ATH12K_LEGACY_NUM];
+ u64 ht[ATH12K_COUNTER_TYPE_MAX][ATH12K_HT_MCS_NUM];
+ u64 vht[ATH12K_COUNTER_TYPE_MAX][ATH12K_VHT_MCS_NUM];
+ u64 he[ATH12K_COUNTER_TYPE_MAX][ATH12K_HE_MCS_NUM];
+ u64 bw[ATH12K_COUNTER_TYPE_MAX][ATH12K_BW_NUM];
+ u64 nss[ATH12K_COUNTER_TYPE_MAX][ATH12K_NSS_NUM];
+ u64 gi[ATH12K_COUNTER_TYPE_MAX][ATH12K_GI_NUM];
+ u64 transmit_type[ATH12K_COUNTER_TYPE_MAX][HAL_RX_RECEPTION_TYPE_MAX];
+ u64 ru_loc[ATH12K_COUNTER_TYPE_MAX][HAL_RX_RU_ALLOC_TYPE_MAX];
+};
+
+struct ath12k_htt_tx_stats {
+ struct ath12k_htt_data_stats stats[ATH12K_STATS_TYPE_MAX];
+ u64 tx_duration;
+ u64 ba_fails;
+ u64 ack_fails;
+ u16 ru_start;
+ u16 ru_tones;
+ u32 mu_group[MAX_MU_GROUP_ID];
+};
+
+struct ath12k_per_ppdu_tx_stats {
+ u16 succ_pkts;
+ u16 failed_pkts;
+ u16 retry_pkts;
+ u32 succ_bytes;
+ u32 failed_bytes;
+ u32 retry_bytes;
+};
+
+struct ath12k_wbm_tx_stats {
+ u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
+};
+
+struct ath12k_sta {
+ struct ath12k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+ enum hal_pn_type pn_type;
+
+ struct work_struct update_wk;
+ struct rate_info txrate;
+ struct rate_info last_txrate;
+ u64 rx_duration;
+ u64 tx_duration;
+ u8 rssi_comb;
+ struct ath12k_rx_peer_stats *rx_stats;
+ struct ath12k_wbm_tx_stats *wbm_tx_stats;
+};
+
+#define ATH12K_MIN_5G_FREQ 4150
+#define ATH12K_MIN_6G_FREQ 5945
+#define ATH12K_MAX_6G_FREQ 7115
+#define ATH12K_NUM_CHANS 100
+#define ATH12K_MAX_5G_CHAN 173
+
+enum ath12k_state {
+ ATH12K_STATE_OFF,
+ ATH12K_STATE_ON,
+ ATH12K_STATE_RESTARTING,
+ ATH12K_STATE_RESTARTED,
+ ATH12K_STATE_WEDGED,
+ /* Add other states as required */
+};
+
+/* Antenna noise floor */
+#define ATH12K_DEFAULT_NOISE_FLOOR -95
+
+struct ath12k_fw_stats {
+ u32 pdev_id;
+ u32 stats_id;
+ struct list_head pdevs;
+ struct list_head vdevs;
+ struct list_head bcn;
+};
+
+struct ath12k_per_peer_tx_stats {
+ u32 succ_bytes;
+ u32 retry_bytes;
+ u32 failed_bytes;
+ u32 duration;
+ u16 succ_pkts;
+ u16 retry_pkts;
+ u16 failed_pkts;
+ u16 ru_start;
+ u16 ru_tones;
+ u8 ba_fails;
+ u8 ppdu_type;
+ u32 mu_grpid;
+ u32 mu_pos;
+ bool is_ampdu;
+};
+
+#define ATH12K_FLUSH_TIMEOUT (5 * HZ)
+#define ATH12K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ)
+
+struct ath12k {
+ struct ath12k_base *ab;
+ struct ath12k_pdev *pdev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
+ struct ath12k_wmi_pdev *wmi;
+ struct ath12k_pdev_dp dp;
+ u8 mac_addr[ETH_ALEN];
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ struct ath12k_he ar_he;
+ enum ath12k_state state;
+ bool supports_6ghz;
+ struct {
+ struct completion started;
+ struct completion completed;
+ struct completion on_channel;
+ struct delayed_work timeout;
+ enum ath12k_scan_state state;
+ bool is_roc;
+ int vdev_id;
+ int roc_freq;
+ bool roc_notify;
+ } scan;
+
+ struct {
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+ struct ieee80211_sband_iftype_data
+ iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
+ } mac;
+
+ unsigned long dev_flags;
+ unsigned int filter_flags;
+ unsigned long monitor_flags;
+ u32 min_tx_power;
+ u32 max_tx_power;
+ u32 txpower_limit_2g;
+ u32 txpower_limit_5g;
+ u32 txpower_scale;
+ u32 power_scale;
+ u32 chan_tx_pwr;
+ u32 num_stations;
+ u32 max_num_stations;
+ bool monitor_present;
+ /* To synchronize concurrent synchronous mac80211 callback operations,
+ * concurrent debugfs configuration and concurrent FW statistics events.
+ */
+ struct mutex conf_mutex;
+ /* protects the radio specific data like debug stats, ppdu_stats_info stats,
+ * vdev_stop_status info, scan data, ath12k_sta info, ath12k_vif info,
+ * channel context data, survey info, test mode data.
+ */
+ spinlock_t data_lock;
+
+ struct list_head arvifs;
+ /* should never be NULL; needed for regular htt rx */
+ struct ieee80211_channel *rx_channel;
+
+ /* valid during scan; needed for mgmt rx during scan */
+ struct ieee80211_channel *scan_channel;
+
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+ u8 num_rx_chains;
+ u8 num_tx_chains;
+ /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
+ u8 pdev_idx;
+ u8 lmac_id;
+
+ struct completion peer_assoc_done;
+ struct completion peer_delete_done;
+
+ int install_key_status;
+ struct completion install_key_done;
+
+ int last_wmi_vdev_start_status;
+ struct completion vdev_setup_done;
+ struct completion vdev_delete_done;
+
+ int num_peers;
+ int max_num_peers;
+ u32 num_started_vdevs;
+ u32 num_created_vdevs;
+ unsigned long long allocated_vdev_map;
+
+ struct idr txmgmt_idr;
+ /* protects txmgmt_idr data */
+ spinlock_t txmgmt_idr_lock;
+ atomic_t num_pending_mgmt_tx;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock
+ */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+
+ /* Channel info events are expected to come in pairs without and with
+ * COMPLETE flag set respectively for each channel visit during scan.
+ *
+ * However there are deviations from this rule. This flag is used to
+ * avoid reporting garbage data.
+ */
+ bool ch_info_can_report_survey;
+ struct survey_info survey[ATH12K_NUM_CHANS];
+ struct completion bss_survey_done;
+
+ struct work_struct regd_update_work;
+
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
+ struct ath12k_per_peer_tx_stats peer_tx_stats;
+ struct list_head ppdu_stats_info;
+ u32 ppdu_stat_list_depth;
+
+ struct ath12k_per_peer_tx_stats cached_stats;
+ u32 last_ppdu_id;
+ u32 cached_ppdu_id;
+
+ bool dfs_block_radar_events;
+ bool monitor_conf_enabled;
+ bool monitor_vdev_created;
+ bool monitor_started;
+ int monitor_vdev_id;
+};
+
+struct ath12k_band_cap {
+ u32 phy_id;
+ u32 max_bw_supported;
+ u32 ht_cap_info;
+ u32 he_cap_info[2];
+ u32 he_mcs;
+ u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
+ struct ath12k_wmi_ppe_threshold_arg he_ppet;
+ u16 he_6ghz_capa;
+};
+
+struct ath12k_pdev_cap {
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 vht_cap;
+ u32 vht_mcs;
+ u32 he_mcs;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 tx_chain_mask_shift;
+ u32 rx_chain_mask_shift;
+ struct ath12k_band_cap band[NUM_NL80211_BANDS];
+};
+
+struct mlo_timestamp {
+ u32 info;
+ u32 sync_timestamp_lo_us;
+ u32 sync_timestamp_hi_us;
+ u32 mlo_offset_lo;
+ u32 mlo_offset_hi;
+ u32 mlo_offset_clks;
+ u32 mlo_comp_clks;
+ u32 mlo_comp_timer;
+};
+
+struct ath12k_pdev {
+ struct ath12k *ar;
+ u32 pdev_id;
+ struct ath12k_pdev_cap cap;
+ u8 mac_addr[ETH_ALEN];
+ struct mlo_timestamp timestamp;
+};
+
+struct ath12k_board_data {
+ const struct firmware *fw;
+ const void *data;
+ size_t len;
+};
+
+struct ath12k_soc_dp_tx_err_stats {
+ /* TCL Ring Descriptor unavailable */
+ u32 desc_na[DP_TCL_NUM_RING_MAX];
+ /* Other failures during dp_tx due to mem allocation failure
+ * idr unavailable etc.
+ */
+ atomic_t misc_fail;
+};
+
+struct ath12k_soc_dp_stats {
+ u32 err_ring_pkts;
+ u32 invalid_rbm;
+ u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
+ u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
+ u32 hal_reo_error[DP_REO_DST_RING_MAX];
+ struct ath12k_soc_dp_tx_err_stats tx_err;
+};
+
+/* Master structure to hold the hw data which may be used in core module */
+struct ath12k_base {
+ enum ath12k_hw_rev hw_rev;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct ath12k_qmi qmi;
+ struct ath12k_wmi_base wmi_ab;
+ struct completion fw_ready;
+ int num_radios;
+ /* HW channel counters frequency value in hertz common to all MACs */
+ u32 cc_freq_hz;
+
+ struct ath12k_htc htc;
+
+ struct ath12k_dp dp;
+
+ void __iomem *mem;
+ unsigned long mem_len;
+
+ struct {
+ enum ath12k_bus bus;
+ const struct ath12k_hif_ops *ops;
+ } hif;
+
+ struct ath12k_ce ce;
+ struct timer_list rx_replenish_retry;
+ struct ath12k_hal hal;
+ /* To synchronize core_start/core_stop */
+ struct mutex core_lock;
+ /* Protects data like peers */
+ spinlock_t base_lock;
+ struct ath12k_pdev pdevs[MAX_RADIOS];
+ struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
+ unsigned long long free_vdev_map;
+ unsigned long long free_vdev_stats_id_map;
+ struct list_head peers;
+ wait_queue_head_t peer_mapping_wq;
+ u8 mac_addr[ETH_ALEN];
+ bool wmi_ready;
+ u32 wlan_init_status;
+ int irq_num[ATH12K_IRQ_NUM_MAX];
+ struct ath12k_ext_irq_grp ext_irq_grp[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ struct napi_struct *napi;
+ struct ath12k_wmi_target_cap_arg target_caps;
+ u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
+ bool pdevs_macaddr_valid;
+ int bd_api;
+
+ const struct ath12k_hw_params *hw_params;
+
+ const struct firmware *cal_file;
+
+ /* Below regd's are protected by ab->data_lock */
+ /* This is the regd set for every radio
+ * by the firmware during initializatin
+ */
+ struct ieee80211_regdomain *default_regd[MAX_RADIOS];
+ /* This regd is set during dynamic country setting
+ * This may or may not be used during the runtime
+ */
+ struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+
+ /* Current DFS Regulatory */
+ enum ath12k_dfs_region dfs_region;
+ struct ath12k_soc_dp_stats soc_stats;
+
+ unsigned long dev_flags;
+ struct completion driver_recovery;
+ struct workqueue_struct *workqueue;
+ struct work_struct restart_work;
+ struct workqueue_struct *workqueue_aux;
+ struct work_struct reset_work;
+ atomic_t reset_count;
+ atomic_t recovery_count;
+ atomic_t recovery_start_count;
+ bool is_reset;
+ struct completion reset_complete;
+ struct completion reconfigure_complete;
+ struct completion recovery_start;
+ /* continuous recovery fail count */
+ atomic_t fail_cont_count;
+ unsigned long reset_fail_timeout;
+ struct {
+ /* protected by data_lock */
+ u32 fw_crash_counter;
+ } stats;
+ u32 pktlog_defs_checksum;
+
+ struct ath12k_dbring_cap *db_caps;
+ u32 num_db_cap;
+
+ struct timer_list mon_reap_timer;
+
+ struct completion htc_suspend;
+
+ u64 fw_soc_drop_count;
+ bool static_window_map;
+
+ /* must be last */
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
+int ath12k_core_pre_init(struct ath12k_base *ab);
+int ath12k_core_init(struct ath12k_base *ath12k);
+void ath12k_core_deinit(struct ath12k_base *ath12k);
+struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
+ enum ath12k_bus bus);
+void ath12k_core_free(struct ath12k_base *ath12k);
+int ath12k_core_fetch_board_data_api_1(struct ath12k_base *ab,
+ struct ath12k_board_data *bd,
+ char *filename);
+int ath12k_core_fetch_bdf(struct ath12k_base *ath12k,
+ struct ath12k_board_data *bd);
+void ath12k_core_free_bdf(struct ath12k_base *ab, struct ath12k_board_data *bd);
+int ath12k_core_check_dt(struct ath12k_base *ath12k);
+
+void ath12k_core_halt(struct ath12k *ar);
+int ath12k_core_resume(struct ath12k_base *ab);
+int ath12k_core_suspend(struct ath12k_base *ab);
+
+const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
+ const char *filename);
+
+static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
+{
+ switch (state) {
+ case ATH12K_SCAN_IDLE:
+ return "idle";
+ case ATH12K_SCAN_STARTING:
+ return "starting";
+ case ATH12K_SCAN_RUNNING:
+ return "running";
+ case ATH12K_SCAN_ABORTING:
+ return "aborting";
+ }
+
+ return "unknown";
+}
+
+static inline struct ath12k_skb_cb *ATH12K_SKB_CB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath12k_skb_cb) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+ return (struct ath12k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline struct ath12k_skb_rxcb *ATH12K_SKB_RXCB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath12k_skb_rxcb) > sizeof(skb->cb));
+ return (struct ath12k_skb_rxcb *)skb->cb;
+}
+
+static inline struct ath12k_vif *ath12k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+ return (struct ath12k_vif *)vif->drv_priv;
+}
+
+static inline struct ath12k *ath12k_ab_to_ar(struct ath12k_base *ab,
+ int mac_id)
+{
+ return ab->pdevs[ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id)].ar;
+}
+
+static inline void ath12k_core_create_firmware_path(struct ath12k_base *ab,
+ const char *filename,
+ void *buf, size_t buf_len)
+{
+ snprintf(buf, buf_len, "%s/%s/%s", ATH12K_FW_DIR,
+ ab->hw_params->fw.dir, filename);
+}
+
+static inline const char *ath12k_bus_str(enum ath12k_bus bus)
+{
+ switch (bus) {
+ case ATH12K_BUS_PCI:
+ return "pci";
+ }
+
+ return "unknown";
+}
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dbring.c b/drivers/net/wireless/ath/ath12k/dbring.c
new file mode 100644
index 000000000000..8fbf868e6f7e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dbring.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "debug.h"
+
+static int ath12k_dbring_bufs_replenish(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ struct ath12k_dbring_element *buff,
+ gfp_t gfp)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_srng *srng;
+ dma_addr_t paddr;
+ void *ptr_aligned, *ptr_unaligned, *desc;
+ int ret;
+ int buf_id;
+ u32 cookie;
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+
+ lockdep_assert_held(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ ptr_unaligned = buff->payload;
+ ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
+ paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
+ DMA_FROM_DEVICE);
+
+ ret = dma_mapping_error(ab->dev, paddr);
+ if (ret)
+ goto err;
+
+ spin_lock_bh(&ring->idr_lock);
+ buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp);
+ spin_unlock_bh(&ring->idr_lock);
+ if (buf_id < 0) {
+ ret = -ENOBUFS;
+ goto err_dma_unmap;
+ }
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOENT;
+ goto err_idr_remove;
+ }
+
+ buff->paddr = paddr;
+
+ cookie = u32_encode_bits(ar->pdev_idx, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
+ u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ return 0;
+
+err_idr_remove:
+ spin_lock_bh(&ring->idr_lock);
+ idr_remove(&ring->bufs_idr, buf_id);
+ spin_unlock_bh(&ring->idr_lock);
+err_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, ring->buf_sz,
+ DMA_FROM_DEVICE);
+err:
+ ath12k_hal_srng_access_end(ab, srng);
+ return ret;
+}
+
+static int ath12k_dbring_fill_bufs(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ gfp_t gfp)
+{
+ struct ath12k_dbring_element *buff;
+ struct hal_srng *srng;
+ struct ath12k_base *ab = ar->ab;
+ int num_remain, req_entries, num_free;
+ u32 align;
+ int size, ret;
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
+ req_entries = min(num_free, ring->bufs_max);
+ num_remain = req_entries;
+ align = ring->buf_align;
+ size = sizeof(*buff) + ring->buf_sz + align - 1;
+
+ while (num_remain > 0) {
+ buff = kzalloc(size, gfp);
+ if (!buff)
+ break;
+
+ ret = ath12k_dbring_bufs_replenish(ar, ring, buff, gfp);
+ if (ret) {
+ ath12k_warn(ab, "failed to replenish db ring num_remain %d req_ent %d\n",
+ num_remain, req_entries);
+ kfree(buff);
+ break;
+ }
+ num_remain--;
+ }
+
+ spin_unlock_bh(&srng->lock);
+
+ return num_remain;
+}
+
+int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ enum wmi_direct_buffer_module id)
+{
+ struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {0};
+ int ret;
+
+ if (id >= WMI_DIRECT_BUF_MAX)
+ return -EINVAL;
+
+ arg.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
+ arg.module_id = id;
+ arg.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
+ arg.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
+ arg.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
+ arg.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
+ arg.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
+ arg.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
+ arg.num_elems = ring->bufs_max;
+ arg.buf_size = ring->buf_sz;
+ arg.num_resp_per_event = ring->num_resp_per_event;
+ arg.event_timeout_ms = ring->event_timeout_ms;
+
+ ret = ath12k_wmi_pdev_dma_ring_cfg(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to setup db ring cfg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dbring_set_cfg(struct ath12k *ar, struct ath12k_dbring *ring,
+ u32 num_resp_per_event, u32 event_timeout_ms,
+ int (*handler)(struct ath12k *,
+ struct ath12k_dbring_data *))
+{
+ if (WARN_ON(!ring))
+ return -EINVAL;
+
+ ring->num_resp_per_event = num_resp_per_event;
+ ring->event_timeout_ms = event_timeout_ms;
+ ring->handler = handler;
+
+ return 0;
+}
+
+int ath12k_dbring_buf_setup(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ struct ath12k_dbring_cap *db_cap)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_srng *srng;
+ int ret;
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+ ring->bufs_max = ring->refill_srng.size /
+ ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
+
+ ring->buf_sz = db_cap->min_buf_sz;
+ ring->buf_align = db_cap->min_buf_align;
+ ring->pdev_id = db_cap->pdev_id;
+ ring->hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
+ ring->tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
+
+ ret = ath12k_dbring_fill_bufs(ar, ring, GFP_KERNEL);
+
+ return ret;
+}
+
+int ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring,
+ int ring_num, int num_entries)
+{
+ int ret;
+
+ ret = ath12k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
+ ring_num, ar->pdev_idx, num_entries);
+ if (ret < 0) {
+ ath12k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ring_num);
+ goto err;
+ }
+
+ return 0;
+err:
+ ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
+ return ret;
+}
+
+int ath12k_dbring_get_cap(struct ath12k_base *ab,
+ u8 pdev_idx,
+ enum wmi_direct_buffer_module id,
+ struct ath12k_dbring_cap *db_cap)
+{
+ int i;
+
+ if (!ab->num_db_cap || !ab->db_caps)
+ return -ENOENT;
+
+ if (id >= WMI_DIRECT_BUF_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < ab->num_db_cap; i++) {
+ if (pdev_idx == ab->db_caps[i].pdev_id &&
+ id == ab->db_caps[i].id) {
+ *db_cap = ab->db_caps[i];
+
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int ath12k_dbring_buffer_release_event(struct ath12k_base *ab,
+ struct ath12k_dbring_buf_release_event *ev)
+{
+ struct ath12k_dbring *ring = NULL;
+ struct hal_srng *srng;
+ struct ath12k *ar;
+ struct ath12k_dbring_element *buff;
+ struct ath12k_dbring_data handler_data;
+ struct ath12k_buffer_addr desc;
+ u8 *vaddr_unalign;
+ u32 num_entry, num_buff_reaped;
+ u8 pdev_idx, rbm;
+ u32 cookie;
+ int buf_id;
+ int size;
+ dma_addr_t paddr;
+ int ret = 0;
+
+ pdev_idx = le32_to_cpu(ev->fixed.pdev_id);
+
+ if (pdev_idx >= ab->num_radios) {
+ ath12k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
+ return -EINVAL;
+ }
+
+ if (ev->fixed.num_buf_release_entry !=
+ ev->fixed.num_meta_data_entry) {
+ ath12k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
+ ev->fixed.num_buf_release_entry,
+ ev->fixed.num_meta_data_entry);
+ return -EINVAL;
+ }
+
+ ar = ab->pdevs[pdev_idx].ar;
+
+ rcu_read_lock();
+ if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
+ ret = -EINVAL;
+ goto rcu_unlock;
+ }
+
+ switch (ev->fixed.module_id) {
+ case WMI_DIRECT_BUF_SPECTRAL:
+ break;
+ default:
+ ring = NULL;
+ ath12k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
+ ev->fixed.module_id);
+ break;
+ }
+
+ if (!ring) {
+ ret = -EINVAL;
+ goto rcu_unlock;
+ }
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+ num_entry = le32_to_cpu(ev->fixed.num_buf_release_entry);
+ size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1;
+ num_buff_reaped = 0;
+
+ spin_lock_bh(&srng->lock);
+
+ while (num_buff_reaped < num_entry) {
+ desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
+ desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
+ handler_data.meta = ev->meta_data[num_buff_reaped];
+
+ num_buff_reaped++;
+
+ ath12k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
+
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&ring->idr_lock);
+ buff = idr_find(&ring->bufs_idr, buf_id);
+ if (!buff) {
+ spin_unlock_bh(&ring->idr_lock);
+ continue;
+ }
+ idr_remove(&ring->bufs_idr, buf_id);
+ spin_unlock_bh(&ring->idr_lock);
+
+ dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
+ DMA_FROM_DEVICE);
+
+ if (ring->handler) {
+ vaddr_unalign = buff->payload;
+ handler_data.data = PTR_ALIGN(vaddr_unalign,
+ ring->buf_align);
+ handler_data.data_sz = ring->buf_sz;
+
+ ring->handler(ar, &handler_data);
+ }
+
+ memset(buff, 0, size);
+ ath12k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC);
+ }
+
+ spin_unlock_bh(&srng->lock);
+
+rcu_unlock:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring)
+{
+ ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
+}
+
+void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring)
+{
+ struct ath12k_dbring_element *buff;
+ int buf_id;
+
+ spin_lock_bh(&ring->idr_lock);
+ idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
+ idr_remove(&ring->bufs_idr, buf_id);
+ dma_unmap_single(ar->ab->dev, buff->paddr,
+ ring->buf_sz, DMA_FROM_DEVICE);
+ kfree(buff);
+ }
+
+ idr_destroy(&ring->bufs_idr);
+ spin_unlock_bh(&ring->idr_lock);
+}
diff --git a/drivers/net/wireless/ath/ath12k/dbring.h b/drivers/net/wireless/ath/ath12k/dbring.h
new file mode 100644
index 000000000000..e1c0eba774ec
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dbring.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_DBRING_H
+#define ATH12K_DBRING_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include "dp.h"
+
+struct ath12k_dbring_element {
+ dma_addr_t paddr;
+ u8 payload[];
+};
+
+struct ath12k_dbring_data {
+ void *data;
+ u32 data_sz;
+ struct ath12k_wmi_dma_buf_release_meta_data_params meta;
+};
+
+struct ath12k_dbring_buf_release_event {
+ struct ath12k_wmi_dma_buf_release_fixed_params fixed;
+ const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
+ const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
+ u32 num_buf_entry;
+ u32 num_meta;
+};
+
+struct ath12k_dbring_cap {
+ u32 pdev_id;
+ enum wmi_direct_buffer_module id;
+ u32 min_elem;
+ u32 min_buf_sz;
+ u32 min_buf_align;
+};
+
+struct ath12k_dbring {
+ struct dp_srng refill_srng;
+ struct idr bufs_idr;
+ /* Protects bufs_idr */
+ spinlock_t idr_lock;
+ dma_addr_t tp_addr;
+ dma_addr_t hp_addr;
+ int bufs_max;
+ u32 pdev_id;
+ u32 buf_sz;
+ u32 buf_align;
+ u32 num_resp_per_event;
+ u32 event_timeout_ms;
+ int (*handler)(struct ath12k *ar, struct ath12k_dbring_data *data);
+};
+
+int ath12k_dbring_set_cfg(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ u32 num_resp_per_event,
+ u32 event_timeout_ms,
+ int (*handler)(struct ath12k *,
+ struct ath12k_dbring_data *));
+int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ enum wmi_direct_buffer_module id);
+int ath12k_dbring_buf_setup(struct ath12k *ar,
+ struct ath12k_dbring *ring,
+ struct ath12k_dbring_cap *db_cap);
+int ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring,
+ int ring_num, int num_entries);
+int ath12k_dbring_buffer_release_event(struct ath12k_base *ab,
+ struct ath12k_dbring_buf_release_event *ev);
+int ath12k_dbring_get_cap(struct ath12k_base *ab,
+ u8 pdev_idx,
+ enum wmi_direct_buffer_module id,
+ struct ath12k_dbring_cap *db_cap);
+void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring);
+void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring);
+#endif /* ATH12K_DBRING_H */
diff --git a/drivers/net/wireless/ath/ath12k/debug.c b/drivers/net/wireless/ath/ath12k/debug.c
new file mode 100644
index 000000000000..67893923e010
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debug.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "debug.h"
+
+void ath12k_info(struct ath12k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_info(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+void ath12k_err(struct ath12k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_err(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ /* TODO: Trace the log */
+ va_end(args);
+}
+
+#ifdef CONFIG_ATH12K_DEBUG
+
+void __ath12k_dbg(struct ath12k_base *ab, enum ath12k_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ath12k_debug_mask & mask)
+ dev_dbg(ab->dev, "%pV", &vaf);
+
+ /* TODO: trace log */
+
+ va_end(args);
+}
+
+void ath12k_dbg_dump(struct ath12k_base *ab,
+ enum ath12k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ char linebuf[256];
+ size_t linebuflen;
+ const void *ptr;
+
+ if (ath12k_debug_mask & mask) {
+ if (msg)
+ __ath12k_dbg(ab, mask, "%s\n", msg);
+
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_dbg(ab->dev, "%s\n", linebuf);
+ }
+ }
+}
+
+#endif /* CONFIG_ATH12K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
new file mode 100644
index 000000000000..aa685295f8a4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_DEBUG_H_
+#define _ATH12K_DEBUG_H_
+
+#include "trace.h"
+
+enum ath12k_debug_mask {
+ ATH12K_DBG_AHB = 0x00000001,
+ ATH12K_DBG_WMI = 0x00000002,
+ ATH12K_DBG_HTC = 0x00000004,
+ ATH12K_DBG_DP_HTT = 0x00000008,
+ ATH12K_DBG_MAC = 0x00000010,
+ ATH12K_DBG_BOOT = 0x00000020,
+ ATH12K_DBG_QMI = 0x00000040,
+ ATH12K_DBG_DATA = 0x00000080,
+ ATH12K_DBG_MGMT = 0x00000100,
+ ATH12K_DBG_REG = 0x00000200,
+ ATH12K_DBG_TESTMODE = 0x00000400,
+ ATH12K_DBG_HAL = 0x00000800,
+ ATH12K_DBG_PCI = 0x00001000,
+ ATH12K_DBG_DP_TX = 0x00002000,
+ ATH12K_DBG_DP_RX = 0x00004000,
+ ATH12K_DBG_ANY = 0xffffffff,
+};
+
+__printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...);
+
+extern unsigned int ath12k_debug_mask;
+
+#ifdef CONFIG_ATH12K_DEBUG
+__printf(3, 4) void __ath12k_dbg(struct ath12k_base *ab,
+ enum ath12k_debug_mask mask,
+ const char *fmt, ...);
+void ath12k_dbg_dump(struct ath12k_base *ab,
+ enum ath12k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+#else /* CONFIG_ATH12K_DEBUG */
+static inline void __ath12k_dbg(struct ath12k_base *ab,
+ enum ath12k_debug_mask dbg_mask,
+ const char *fmt, ...)
+{
+}
+
+static inline void ath12k_dbg_dump(struct ath12k_base *ab,
+ enum ath12k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH12K_DEBUG */
+
+#define ath12k_dbg(ar, dbg_mask, fmt, ...) \
+do { \
+ typeof(dbg_mask) mask = (dbg_mask); \
+ if (ath12k_debug_mask & mask) \
+ __ath12k_dbg(ar, mask, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* _ATH12K_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
new file mode 100644
index 000000000000..eb0261500ab0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -0,0 +1,1580 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <crypto/hash.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "hal_tx.h"
+#include "hif.h"
+#include "debug.h"
+#include "dp_rx.h"
+#include "peer.h"
+#include "dp_mon.h"
+
+static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+
+ /* TODO: Any other peer specific DP cleanup */
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
+ addr, vdev_id);
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+
+ ath12k_dp_rx_peer_tid_cleanup(ar, peer);
+ crypto_free_shash(peer->tfm_mmic);
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ u32 reo_dest;
+ int ret = 0, tid;
+
+ /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
+ reo_dest = ar->dp.mac_id + 1;
+ ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id,
+ WMI_PEER_SET_DEFAULT_ROUTING,
+ DP_RX_HASH_ENABLE | (reo_dest << 1));
+
+ if (ret) {
+ ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
+ ret, addr, vdev_id);
+ return ret;
+ }
+
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0,
+ HAL_PN_TYPE_NONE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
+ tid, ret);
+ goto peer_clean;
+ }
+ }
+
+ ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rx defrag context\n");
+ goto peer_clean;
+ }
+
+ /* TODO: Setup other peer specific resource used in data path */
+
+ return 0;
+
+peer_clean:
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath12k_warn(ab, "failed to find the peer to del rx tid\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ for (; tid >= 0; tid--)
+ ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return ret;
+}
+
+void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring)
+{
+ if (!ring->vaddr_unaligned)
+ return;
+
+ dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned);
+
+ ring->vaddr_unaligned = NULL;
+}
+
+static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
+{
+ int ext_group_num;
+ u8 mask = 1 << ring_num;
+
+ for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
+ ext_group_num++) {
+ if (mask & grp_mask[ext_group_num])
+ return ext_group_num;
+ }
+
+ return -ENOENT;
+}
+
+static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
+ enum hal_ring_type type, int ring_num)
+{
+ const u8 *grp_mask;
+
+ switch (type) {
+ case HAL_WBM2SW_RELEASE:
+ if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
+ grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
+ ring_num = 0;
+ } else {
+ grp_mask = &ab->hw_params->ring_mask->tx[0];
+ }
+ break;
+ case HAL_REO_EXCEPTION:
+ grp_mask = &ab->hw_params->ring_mask->rx_err[0];
+ break;
+ case HAL_REO_DST:
+ grp_mask = &ab->hw_params->ring_mask->rx[0];
+ break;
+ case HAL_REO_STATUS:
+ grp_mask = &ab->hw_params->ring_mask->reo_status[0];
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ case HAL_RXDMA_MONITOR_DST:
+ grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
+ break;
+ case HAL_TX_MONITOR_DST:
+ grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0];
+ break;
+ case HAL_RXDMA_BUF:
+ grp_mask = &ab->hw_params->ring_mask->host2rxdma[0];
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_REO_CMD:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_TCL_STATUS:
+ case HAL_REO_REINJECT:
+ case HAL_CE_SRC:
+ case HAL_CE_DST:
+ case HAL_CE_DST_STATUS:
+ default:
+ return -ENOENT;
+ }
+
+ return ath12k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
+}
+
+static void ath12k_dp_srng_msi_setup(struct ath12k_base *ab,
+ struct hal_srng_params *ring_params,
+ enum hal_ring_type type, int ring_num)
+{
+ int msi_group_number, msi_data_count;
+ u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
+ int ret;
+
+ ret = ath12k_hif_get_user_msi_vector(ab, "DP",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+ if (ret)
+ return;
+
+ msi_group_number = ath12k_dp_srng_calculate_msi_group(ab, type,
+ ring_num);
+ if (msi_group_number < 0) {
+ ath12k_dbg(ab, ATH12K_DBG_PCI,
+ "ring not part of an ext_group; ring_type: %d,ring_num %d",
+ type, ring_num);
+ ring_params->msi_addr = 0;
+ ring_params->msi_data = 0;
+ return;
+ }
+
+ if (msi_group_number > msi_data_count) {
+ ath12k_dbg(ab, ATH12K_DBG_PCI,
+ "multiple msi_groups share one msi, msi_group_num %d",
+ msi_group_number);
+ }
+
+ ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (msi_group_number % msi_data_count)
+ + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
+int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries)
+{
+ struct hal_srng_params params = { 0 };
+ int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
+ int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
+ int ret;
+
+ if (max_entries < 0 || entry_sz < 0)
+ return -EINVAL;
+
+ if (num_entries > max_entries)
+ num_entries = max_entries;
+
+ ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
+ ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ GFP_KERNEL);
+ if (!ring->vaddr_unaligned)
+ return -ENOMEM;
+
+ ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
+ ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
+ (unsigned long)ring->vaddr_unaligned);
+
+ params.ring_base_vaddr = ring->vaddr;
+ params.ring_base_paddr = ring->paddr;
+ params.num_entries = num_entries;
+ ath12k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
+
+ switch (type) {
+ case HAL_REO_DST:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_RX;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_RXDMA_BUF:
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_RXDMA_MONITOR_STATUS:
+ params.low_threshold = num_entries >> 3;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.intr_batch_cntr_thres_entries = 0;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_TX_MONITOR_DST:
+ params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.intr_batch_cntr_thres_entries = 0;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_WBM2SW_RELEASE:
+ if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) {
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_TX;
+ params.intr_timer_thres_us =
+ HAL_SRNG_INT_TIMER_THRESHOLD_TX;
+ break;
+ }
+ /* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
+ fallthrough;
+ case HAL_REO_EXCEPTION:
+ case HAL_REO_REINJECT:
+ case HAL_REO_CMD:
+ case HAL_REO_STATUS:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_TCL_STATUS:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_RXDMA_DST:
+ case HAL_RXDMA_MONITOR_DST:
+ case HAL_RXDMA_MONITOR_DESC:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
+ break;
+ case HAL_RXDMA_DIR_BUF:
+ break;
+ default:
+ ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
+ return -EINVAL;
+ }
+
+ ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ring_num);
+ return ret;
+ }
+
+ ring->ring_id = ret;
+
+ return 0;
+}
+
+static
+u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif)
+{
+ u32 bank_config = 0;
+
+ /* Only valid for raw frames with HW crypto enabled.
+ * With SW crypto, mac80211 sets key per packet
+ */
+ if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
+ test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
+ bank_config |=
+ u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher),
+ HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
+
+ bank_config |= u32_encode_bits(arvif->tx_encap_type,
+ HAL_TX_BANK_CONFIG_ENCAP_TYPE);
+ bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
+ u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
+ u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
+
+ /* only valid if idx_lookup_override is not set in tcl_data_cmd */
+ bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
+
+ bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
+ HAL_TX_BANK_CONFIG_ADDRX_EN) |
+ u32_encode_bits(!!(arvif->hal_addr_search_flags &
+ HAL_TX_ADDRY_EN),
+ HAL_TX_BANK_CONFIG_ADDRY_EN);
+
+ bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0,
+ HAL_TX_BANK_CONFIG_MESH_EN) |
+ u32_encode_bits(arvif->vdev_id_check_en,
+ HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
+
+ bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID);
+
+ return bank_config;
+}
+
+static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif,
+ struct ath12k_dp *dp)
+{
+ int bank_id = DP_INVALID_BANK_ID;
+ int i;
+ u32 bank_config;
+ bool configure_register = false;
+
+ /* convert vdev params into hal_tx_bank_config */
+ bank_config = ath12k_dp_tx_get_vdev_bank_config(ab, arvif);
+
+ spin_lock_bh(&dp->tx_bank_lock);
+ /* TODO: implement using idr kernel framework*/
+ for (i = 0; i < dp->num_bank_profiles; i++) {
+ if (dp->bank_profiles[i].is_configured &&
+ (dp->bank_profiles[i].bank_config ^ bank_config) == 0) {
+ bank_id = i;
+ goto inc_ref_and_return;
+ }
+ if (!dp->bank_profiles[i].is_configured ||
+ !dp->bank_profiles[i].num_users) {
+ bank_id = i;
+ goto configure_and_return;
+ }
+ }
+
+ if (bank_id == DP_INVALID_BANK_ID) {
+ spin_unlock_bh(&dp->tx_bank_lock);
+ ath12k_err(ab, "unable to find TX bank!");
+ return bank_id;
+ }
+
+configure_and_return:
+ dp->bank_profiles[bank_id].is_configured = true;
+ dp->bank_profiles[bank_id].bank_config = bank_config;
+ configure_register = true;
+inc_ref_and_return:
+ dp->bank_profiles[bank_id].num_users++;
+ spin_unlock_bh(&dp->tx_bank_lock);
+
+ if (configure_register)
+ ath12k_hal_tx_configure_bank_register(ab, bank_config, bank_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
+ bank_id, bank_config, dp->bank_profiles[bank_id].bank_config,
+ dp->bank_profiles[bank_id].num_users);
+
+ return bank_id;
+}
+
+void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id)
+{
+ spin_lock_bh(&dp->tx_bank_lock);
+ dp->bank_profiles[bank_id].num_users--;
+ spin_unlock_bh(&dp->tx_bank_lock);
+}
+
+static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+
+ kfree(dp->bank_profiles);
+ dp->bank_profiles = NULL;
+}
+
+static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ u32 num_tcl_banks = ab->hw_params->num_tcl_banks;
+ int i;
+
+ dp->num_bank_profiles = num_tcl_banks;
+ dp->bank_profiles = kmalloc_array(num_tcl_banks,
+ sizeof(struct ath12k_dp_tx_bank_profile),
+ GFP_KERNEL);
+ if (!dp->bank_profiles)
+ return -ENOMEM;
+
+ spin_lock_init(&dp->tx_bank_lock);
+
+ for (i = 0; i < num_tcl_banks; i++) {
+ dp->bank_profiles[i].is_configured = false;
+ dp->bank_profiles[i].num_users = 0;
+ }
+
+ return 0;
+}
+
+static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i;
+
+ ath12k_dp_srng_cleanup(ab, &dp->reo_status_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->reo_except_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
+ }
+ ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
+}
+
+static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
+ struct hal_srng *srng;
+ int i, ret, tx_comp_ring_num;
+ u32 ring_hash_map;
+
+ ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
+ HAL_SW2WBM_RELEASE, 0, 0,
+ DP_WBM_RELEASE_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
+ DP_TCL_CMD_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
+ 0, 0, DP_TCL_STATUS_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
+ tx_comp_ring_num = map[i].wbm_ring_num;
+
+ ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
+ HAL_TCL_DATA, i, 0,
+ DP_TCL_DATA_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
+ HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
+ DP_TX_COMP_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
+ tx_comp_ring_num, ret);
+ goto err;
+ }
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
+ 0, 0, DP_REO_REINJECT_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
+ HAL_WBM2SW_REL_ERR_RING_NUM, 0,
+ DP_RX_RELEASE_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
+ 0, 0, DP_REO_EXCEPTION_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up reo_exception ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
+ 0, 0, DP_REO_CMD_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ ath12k_hal_reo_init_cmd_ring(ab, srng);
+
+ ret = ath12k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
+ 0, 0, DP_REO_STATUS_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
+ goto err;
+ }
+
+ /* When hash based routing of rx packet is enabled, 32 entries to map
+ * the hash values to the ring will be configured. Each hash entry uses
+ * four bits to map to a particular ring. The ring mapping will be
+ * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5
+ * 8:SW6, 9:SW7, 10:SW8, 11:Not used.
+ */
+ ring_hash_map = HAL_HASH_ROUTING_RING_SW1 |
+ HAL_HASH_ROUTING_RING_SW2 << 4 |
+ HAL_HASH_ROUTING_RING_SW3 << 8 |
+ HAL_HASH_ROUTING_RING_SW4 << 12 |
+ HAL_HASH_ROUTING_RING_SW1 << 16 |
+ HAL_HASH_ROUTING_RING_SW2 << 20 |
+ HAL_HASH_ROUTING_RING_SW3 << 24 |
+ HAL_HASH_ROUTING_RING_SW4 << 28;
+
+ ath12k_hal_reo_hw_setup(ab, ring_hash_map);
+
+ return 0;
+
+err:
+ ath12k_dp_srng_common_cleanup(ab);
+
+ return ret;
+}
+
+static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ int i;
+
+ for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
+ if (!slist[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ slist[i].vaddr, slist[i].paddr);
+ slist[i].vaddr = NULL;
+ }
+}
+
+static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
+ int size,
+ u32 n_link_desc_bank,
+ u32 n_link_desc,
+ u32 last_bank_sz)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ u32 n_entries_per_buf;
+ int num_scatter_buf, scatter_idx;
+ struct hal_wbm_link_desc *scatter_buf;
+ int align_bytes, n_entries;
+ dma_addr_t paddr;
+ int rem_entries;
+ int i;
+ int ret = 0;
+ u32 end_offset, cookie;
+
+ n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
+ ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
+ num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
+
+ if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < num_scatter_buf; i++) {
+ slist[i].vaddr = dma_alloc_coherent(ab->dev,
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ &slist[i].paddr, GFP_KERNEL);
+ if (!slist[i].vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ scatter_idx = 0;
+ scatter_buf = slist[scatter_idx].vaddr;
+ rem_entries = n_entries_per_buf;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries) {
+ cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
+ ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ if (rem_entries) {
+ rem_entries--;
+ scatter_buf++;
+ continue;
+ }
+
+ rem_entries = n_entries_per_buf;
+ scatter_idx++;
+ scatter_buf = slist[scatter_idx].vaddr;
+ }
+ }
+
+ end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
+ sizeof(struct hal_wbm_link_desc);
+ ath12k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
+ n_link_desc, end_offset);
+
+ return 0;
+
+err:
+ ath12k_dp_scatter_idle_link_desc_cleanup(ab);
+
+ return ret;
+}
+
+static void
+ath12k_dp_link_desc_bank_free(struct ath12k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks)
+{
+ int i;
+
+ for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
+ if (link_desc_banks[i].vaddr_unaligned) {
+ dma_free_coherent(ab->dev,
+ link_desc_banks[i].size,
+ link_desc_banks[i].vaddr_unaligned,
+ link_desc_banks[i].paddr_unaligned);
+ link_desc_banks[i].vaddr_unaligned = NULL;
+ }
+ }
+}
+
+static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ int n_link_desc_bank,
+ int last_bank_sz)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i;
+ int ret = 0;
+ int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ if (i == (n_link_desc_bank - 1) && last_bank_sz)
+ desc_sz = last_bank_sz;
+
+ desc_bank[i].vaddr_unaligned =
+ dma_alloc_coherent(ab->dev, desc_sz,
+ &desc_bank[i].paddr_unaligned,
+ GFP_KERNEL);
+ if (!desc_bank[i].vaddr_unaligned) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
+ HAL_LINK_DESC_ALIGN);
+ desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
+ ((unsigned long)desc_bank[i].vaddr -
+ (unsigned long)desc_bank[i].vaddr_unaligned);
+ desc_bank[i].size = desc_sz;
+ }
+
+ return 0;
+
+err:
+ ath12k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
+
+ return ret;
+}
+
+void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring)
+{
+ ath12k_dp_link_desc_bank_free(ab, desc_bank);
+
+ if (ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ath12k_dp_srng_cleanup(ab, ring);
+ ath12k_dp_scatter_idle_link_desc_cleanup(ab);
+ }
+}
+
+static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ u32 n_mpdu_link_desc, n_mpdu_queue_desc;
+ u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
+ int ret = 0;
+
+ n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
+ HAL_NUM_MPDUS_PER_LINK_DESC;
+
+ n_mpdu_queue_desc = n_mpdu_link_desc /
+ HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
+
+ n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
+ DP_AVG_MSDUS_PER_FLOW) /
+ HAL_NUM_TX_MSDUS_PER_LINK_DESC;
+
+ n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
+ DP_AVG_MSDUS_PER_MPDU) /
+ HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
+ n_tx_msdu_link_desc + n_rx_msdu_link_desc;
+
+ if (*n_link_desc & (*n_link_desc - 1))
+ *n_link_desc = 1 << fls(*n_link_desc);
+
+ ret = ath12k_dp_srng_setup(ab, &dp->wbm_idle_ring,
+ HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc)
+{
+ u32 tot_mem_sz;
+ u32 n_link_desc_bank, last_bank_sz;
+ u32 entry_sz, align_bytes, n_entries;
+ struct hal_wbm_link_desc *desc;
+ u32 paddr;
+ int i, ret;
+ u32 cookie;
+
+ tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
+ tot_mem_sz += HAL_LINK_DESC_ALIGN;
+
+ if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
+ n_link_desc_bank = 1;
+ last_bank_sz = tot_mem_sz;
+ } else {
+ n_link_desc_bank = tot_mem_sz /
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+ last_bank_sz = tot_mem_sz %
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+
+ if (last_bank_sz)
+ n_link_desc_bank += 1;
+ }
+
+ if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
+ return -EINVAL;
+
+ ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks,
+ n_link_desc_bank, last_bank_sz);
+ if (ret)
+ return ret;
+
+ /* Setup link desc idle list for HW internal usage */
+ entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
+ tot_mem_sz = entry_sz * n_link_desc;
+
+ /* Setup scatter desc list when the total memory requirement is more */
+ if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
+ ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
+ n_link_desc_bank,
+ n_link_desc,
+ last_bank_sz);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
+ ret);
+ goto fail_desc_bank_free;
+ }
+
+ return 0;
+ }
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (link_desc_banks[i].size - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries &&
+ (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
+ cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
+ ath12k_hal_set_link_desc_addr(desc,
+ cookie, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ }
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+fail_desc_bank_free:
+ ath12k_dp_link_desc_bank_free(ab, link_desc_banks);
+
+ return ret;
+}
+
+int ath12k_dp_service_srng(struct ath12k_base *ab,
+ struct ath12k_ext_irq_grp *irq_grp,
+ int budget)
+{
+ struct napi_struct *napi = &irq_grp->napi;
+ int grp_id = irq_grp->grp_id;
+ int work_done = 0;
+ int i = 0, j;
+ int tot_work_done = 0;
+ enum dp_monitor_mode monitor_mode;
+ u8 ring_mask;
+
+ while (i < ab->hw_params->max_tx_ring) {
+ if (ab->hw_params->ring_mask->tx[grp_id] &
+ BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
+ ath12k_dp_tx_completion_handler(ab, i);
+ i++;
+ }
+
+ if (ab->hw_params->ring_mask->rx_err[grp_id]) {
+ work_done = ath12k_dp_rx_process_err(ab, napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
+ work_done = ath12k_dp_rx_process_wbm_err(ab,
+ napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params->ring_mask->rx[grp_id]) {
+ i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
+ work_done = ath12k_dp_rx_process(ab, i, napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
+ monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
+ ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+
+ if (ring_mask & BIT(id)) {
+ work_done =
+ ath12k_dp_mon_process_ring(ab, id, napi, budget,
+ monitor_mode);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+ }
+ }
+ }
+
+ if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
+ monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
+ ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+
+ if (ring_mask & BIT(id)) {
+ work_done =
+ ath12k_dp_mon_process_ring(ab, id, napi, budget,
+ monitor_mode);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+ }
+ }
+ }
+
+ if (ab->hw_params->ring_mask->reo_status[grp_id])
+ ath12k_dp_rx_process_reo_status(ab);
+
+ if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, 0,
+ ab->hw_params->hal_params->rx_buf_rbm,
+ true);
+ }
+
+ /* TODO: Implement handler for other interrupts */
+
+done:
+ return tot_work_done;
+}
+
+void ath12k_dp_pdev_free(struct ath12k_base *ab)
+{
+ int i;
+
+ del_timer_sync(&ab->mon_reap_timer);
+
+ for (i = 0; i < ab->num_radios; i++)
+ ath12k_dp_rx_pdev_free(ab, i);
+}
+
+void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev_dp *dp;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ dp->mac_id = i;
+ atomic_set(&dp->num_tx_pending, 0);
+ init_waitqueue_head(&dp->tx_empty_waitq);
+
+ /* TODO: Add any RXDMA setup required per pdev */
+ }
+}
+
+static void ath12k_dp_service_mon_ring(struct timer_list *t)
+{
+ struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
+ int i;
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
+ ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
+ ATH12K_DP_RX_MONITOR_MODE);
+
+ mod_timer(&ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
+}
+
+static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
+{
+ if (ab->hw_params->rxdma1_enable)
+ return;
+
+ timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
+}
+
+int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ int ret;
+ int i;
+
+ ret = ath12k_dp_rx_htt_setup(ab);
+ if (ret)
+ goto out;
+
+ ath12k_dp_mon_reap_timer_init(ab);
+
+ /* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ret = ath12k_dp_rx_pdev_alloc(ab, i);
+ if (ret) {
+ ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
+ i);
+ goto err;
+ }
+ ret = ath12k_dp_rx_pdev_mon_attach(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to initialize mon pdev %d\n", i);
+ goto err;
+ }
+ }
+
+ return 0;
+err:
+ ath12k_dp_pdev_free(ab);
+out:
+ return ret;
+}
+
+int ath12k_dp_htt_connect(struct ath12k_dp *dp)
+{
+ struct ath12k_htc_svc_conn_req conn_req = {0};
+ struct ath12k_htc_svc_conn_resp conn_resp = {0};
+ int status;
+
+ conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
+
+ /* connect to control service */
+ conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ dp->eid = conn_resp.eid;
+
+ return 0;
+}
+
+static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
+{
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ /* TODO: Verify the search type and flags since ast hash
+ * is not part of peer mapv3
+ */
+ arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ break;
+ case WMI_VDEV_TYPE_AP:
+ case WMI_VDEV_TYPE_IBSS:
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ break;
+ case WMI_VDEV_TYPE_MONITOR:
+ default:
+ return;
+ }
+}
+
+void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
+{
+ struct ath12k_base *ab = ar->ab;
+
+ arvif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
+ u32_encode_bits(arvif->vdev_id,
+ HTT_TCL_META_DATA_VDEV_ID) |
+ u32_encode_bits(ar->pdev->pdev_id,
+ HTT_TCL_META_DATA_PDEV_ID);
+
+ /* set HTT extension valid bit to 0 by default */
+ arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+
+ ath12k_dp_update_vdev_search(arvif);
+ arvif->vdev_id_check_en = true;
+ arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, &ab->dp);
+
+ /* TODO: error path for bank id failure */
+ if (arvif->bank_id == DP_INVALID_BANK_ID) {
+ ath12k_err(ar->ab, "Failed to initialize DP TX Banks");
+ return;
+ }
+}
+
+static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_rx_desc_info *desc_info, *tmp;
+ struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
+ struct ath12k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ int i;
+
+ if (!dp->spt_info)
+ return;
+
+ /* RX Descriptor cleanup */
+ spin_lock_bh(&dp->rx_desc_lock);
+
+ list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) {
+ list_del(&desc_info->list);
+ skb = desc_info->skb;
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ spin_unlock_bh(&dp->rx_desc_lock);
+
+ /* TX Descriptor cleanup */
+ for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
+ spin_lock_bh(&dp->tx_desc_lock[i]);
+
+ list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
+ list) {
+ list_del(&tx_desc_info->list);
+ skb = tx_desc_info->skb;
+
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ spin_unlock_bh(&dp->tx_desc_lock[i]);
+ }
+
+ /* unmap SPT pages */
+ for (i = 0; i < dp->num_spt_pages; i++) {
+ if (!dp->spt_info[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev, ATH12K_PAGE_SIZE,
+ dp->spt_info[i].vaddr, dp->spt_info[i].paddr);
+ dp->spt_info[i].vaddr = NULL;
+ }
+
+ kfree(dp->spt_info);
+}
+
+static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return;
+
+ if (!dp->reoq_lut.vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+ dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+ dp->reoq_lut.vaddr = NULL;
+
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+}
+
+void ath12k_dp_free(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i;
+
+ ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ ath12k_dp_cc_cleanup(ab);
+ ath12k_dp_reoq_lut_cleanup(ab);
+ ath12k_dp_deinit_bank_profiles(ab);
+ ath12k_dp_srng_common_cleanup(ab);
+
+ ath12k_dp_rx_reo_cmd_list_cleanup(ab);
+
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++)
+ kfree(dp->tx_ring[i].tx_status);
+
+ ath12k_dp_rx_free(ab);
+ /* Deinit any SOC level resource */
+}
+
+void ath12k_dp_cc_config(struct ath12k_base *ab)
+{
+ u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
+ u32 val = 0;
+
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
+
+ val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
+ HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
+ u32_encode_bits(ATH12K_CC_PPT_MSB,
+ HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
+ u32_encode_bits(ATH12K_CC_SPT_MSB,
+ HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
+ u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN) |
+ u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE) |
+ u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE);
+
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG1(ab), val);
+
+ /* Enable HW CC for WBM */
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG0, cmem_base);
+
+ val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
+ HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
+ u32_encode_bits(ATH12K_CC_PPT_MSB,
+ HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
+ u32_encode_bits(ATH12K_CC_SPT_MSB,
+ HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
+ u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN);
+
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG1, val);
+
+ /* Enable conversion complete indication */
+ val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2);
+ val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) |
+ u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) |
+ u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN);
+
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2, val);
+
+ /* Enable Cookie conversion for WBM2SW Rings */
+ val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG);
+ val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) |
+ ab->hw_params->hal_params->wbm2sw_cc_enable;
+
+ ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, val);
+}
+
+static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
+{
+ return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
+}
+
+static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
+ u16 ppt_idx, u16 spt_idx)
+{
+ struct ath12k_dp *dp = &ab->dp;
+
+ return dp->spt_info[ppt_idx].vaddr + spt_idx;
+}
+
+struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
+ u32 cookie)
+{
+ struct ath12k_rx_desc_info **desc_addr_ptr;
+ u16 ppt_idx, spt_idx;
+
+ ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
+ spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
+
+ if (ppt_idx > ATH12K_NUM_RX_SPT_PAGES ||
+ spt_idx > ATH12K_MAX_SPT_ENTRIES)
+ return NULL;
+
+ desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
+
+ return *desc_addr_ptr;
+}
+
+struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
+ u32 cookie)
+{
+ struct ath12k_tx_desc_info **desc_addr_ptr;
+ u16 ppt_idx, spt_idx;
+
+ ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
+ spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
+
+ if (ppt_idx < ATH12K_NUM_RX_SPT_PAGES ||
+ ppt_idx > ab->dp.num_spt_pages ||
+ spt_idx > ATH12K_MAX_SPT_ENTRIES)
+ return NULL;
+
+ desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
+
+ return *desc_addr_ptr;
+}
+
+static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
+ struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
+ u32 i, j, pool_id, tx_spt_page;
+ u32 ppt_idx;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+
+ /* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
+ for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+ rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
+ GFP_ATOMIC);
+
+ if (!rx_descs) {
+ spin_unlock_bh(&dp->rx_desc_lock);
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+ rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
+ rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
+ list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
+
+ /* Update descriptor VA in SPT */
+ rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j);
+ *rx_desc_addr = &rx_descs[j];
+ }
+ }
+
+ spin_unlock_bh(&dp->rx_desc_lock);
+
+ for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
+ spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+ for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
+ tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
+ GFP_ATOMIC);
+
+ if (!tx_descs) {
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+ /* Caller takes care of TX pending and RX desc cleanup */
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+ tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+ ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
+ tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
+ tx_descs[j].pool_id = pool_id;
+ list_add_tail(&tx_descs[j].list,
+ &dp->tx_desc_free_list[pool_id]);
+
+ /* Update descriptor VA in SPT */
+ tx_desc_addr =
+ ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
+ *tx_desc_addr = &tx_descs[j];
+ }
+ }
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+ }
+ return 0;
+}
+
+static int ath12k_dp_cc_init(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i, ret = 0;
+ u32 cmem_base;
+
+ INIT_LIST_HEAD(&dp->rx_desc_free_list);
+ INIT_LIST_HEAD(&dp->rx_desc_used_list);
+ spin_lock_init(&dp->rx_desc_lock);
+
+ for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
+ INIT_LIST_HEAD(&dp->tx_desc_free_list[i]);
+ INIT_LIST_HEAD(&dp->tx_desc_used_list[i]);
+ spin_lock_init(&dp->tx_desc_lock[i]);
+ }
+
+ dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
+ if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
+ dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
+
+ dp->spt_info = kcalloc(dp->num_spt_pages, sizeof(struct ath12k_spt_info),
+ GFP_KERNEL);
+
+ if (!dp->spt_info) {
+ ath12k_warn(ab, "SPT page allocation failure");
+ return -ENOMEM;
+ }
+
+ cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+
+ for (i = 0; i < dp->num_spt_pages; i++) {
+ dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
+ ATH12K_PAGE_SIZE,
+ &dp->spt_info[i].paddr,
+ GFP_KERNEL);
+
+ if (!dp->spt_info[i].vaddr) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) {
+ ath12k_warn(ab, "SPT allocated memoty is not 4K aligned");
+ ret = -EINVAL;
+ goto free;
+ }
+
+ /* Write to PPT in CMEM */
+ ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
+ dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+ }
+
+ ret = ath12k_dp_cc_desc_init(ab);
+ if (ret) {
+ ath12k_warn(ab, "HW CC desc init failed %d", ret);
+ goto free;
+ }
+
+ return 0;
+free:
+ ath12k_dp_cc_cleanup(ab);
+ return ret;
+}
+
+static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return 0;
+
+ dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
+ DP_REOQ_LUT_SIZE,
+ &dp->reoq_lut.paddr,
+ GFP_KERNEL);
+
+ if (!dp->reoq_lut.vaddr) {
+ ath12k_warn(ab, "failed to allocate memory for reoq table");
+ return -ENOMEM;
+ }
+
+ memset(dp->reoq_lut.vaddr, 0, DP_REOQ_LUT_SIZE);
+
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
+ dp->reoq_lut.paddr);
+ return 0;
+}
+
+int ath12k_dp_alloc(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_srng *srng = NULL;
+ size_t size = 0;
+ u32 n_link_desc = 0;
+ int ret;
+ int i;
+
+ dp->ab = ab;
+
+ INIT_LIST_HEAD(&dp->reo_cmd_list);
+ INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+ spin_lock_init(&dp->reo_cmd_lock);
+
+ dp->reo_cmd_cache_flush_count = 0;
+
+ ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+
+ srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
+
+ ret = ath12k_dp_link_desc_setup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, srng, n_link_desc);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_dp_cc_init(ab);
+
+ if (ret) {
+ ath12k_warn(ab, "failed to setup cookie converter %d\n", ret);
+ goto fail_link_desc_cleanup;
+ }
+ ret = ath12k_dp_init_bank_profiles(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup bank profiles %d\n", ret);
+ goto fail_hw_cc_cleanup;
+ }
+
+ ret = ath12k_dp_srng_common_setup(ab);
+ if (ret)
+ goto fail_dp_bank_profiles_cleanup;
+
+ size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE;
+
+ ret = ath12k_dp_reoq_lut_setup(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup reoq table %d\n", ret);
+ goto fail_cmn_srng_cleanup;
+ }
+
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ dp->tx_ring[i].tcl_data_ring_id = i;
+
+ dp->tx_ring[i].tx_status_head = 0;
+ dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
+ dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
+ if (!dp->tx_ring[i].tx_status) {
+ ret = -ENOMEM;
+ /* FIXME: The allocated tx status is not freed
+ * properly here
+ */
+ goto fail_cmn_reoq_cleanup;
+ }
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
+ ath12k_hal_tx_set_dscp_tid_map(ab, i);
+
+ ret = ath12k_dp_rx_alloc(ab);
+ if (ret)
+ goto fail_dp_rx_free;
+
+ /* Init any SOC level resource for DP */
+
+ return 0;
+
+fail_dp_rx_free:
+ ath12k_dp_rx_free(ab);
+
+fail_cmn_reoq_cleanup:
+ ath12k_dp_reoq_lut_cleanup(ab);
+
+fail_cmn_srng_cleanup:
+ ath12k_dp_srng_common_cleanup(ab);
+
+fail_dp_bank_profiles_cleanup:
+ ath12k_dp_deinit_bank_profiles(ab);
+
+fail_hw_cc_cleanup:
+ ath12k_dp_cc_cleanup(ab);
+
+fail_link_desc_cleanup:
+ ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
new file mode 100644
index 000000000000..36a876d7f61d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -0,0 +1,1816 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_DP_H
+#define ATH12K_DP_H
+
+#include "hal_rx.h"
+#include "hw.h"
+
+#define MAX_RXDMA_PER_PDEV 2
+
+struct ath12k_base;
+struct ath12k_peer;
+struct ath12k_dp;
+struct ath12k_vif;
+struct hal_tcl_status_ring;
+struct ath12k_ext_irq_grp;
+
+#define DP_MON_PURGE_TIMEOUT_MS 100
+#define DP_MON_SERVICE_BUDGET 128
+
+struct dp_srng {
+ u32 *vaddr_unaligned;
+ u32 *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ int size;
+ u32 ring_id;
+};
+
+struct dp_rxdma_ring {
+ struct dp_srng refill_buf_ring;
+ struct idr bufs_idr;
+ /* Protects bufs_idr */
+ spinlock_t idr_lock;
+ int bufs_max;
+};
+
+#define ATH12K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE)
+
+struct dp_tx_ring {
+ u8 tcl_data_ring_id;
+ struct dp_srng tcl_data_ring;
+ struct dp_srng tcl_comp_ring;
+ struct hal_wbm_completion_ring_tx *tx_status;
+ int tx_status_head;
+ int tx_status_tail;
+};
+
+struct ath12k_pdev_mon_stats {
+ u32 status_ppdu_state;
+ u32 status_ppdu_start;
+ u32 status_ppdu_end;
+ u32 status_ppdu_compl;
+ u32 status_ppdu_start_mis;
+ u32 status_ppdu_end_mis;
+ u32 status_ppdu_done;
+ u32 dest_ppdu_done;
+ u32 dest_mpdu_done;
+ u32 dest_mpdu_drop;
+ u32 dup_mon_linkdesc_cnt;
+ u32 dup_mon_buf_cnt;
+};
+
+struct dp_link_desc_bank {
+ void *vaddr_unaligned;
+ void *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ u32 size;
+};
+
+/* Size to enforce scatter idle list mode */
+#define DP_LINK_DESC_ALLOC_SIZE_THRESH 0x200000
+#define DP_LINK_DESC_BANKS_MAX 8
+
+#define DP_LINK_DESC_START 0x4000
+#define DP_LINK_DESC_SHIFT 3
+
+#define DP_LINK_DESC_COOKIE_SET(id, page) \
+ ((((id) + DP_LINK_DESC_START) << DP_LINK_DESC_SHIFT) | (page))
+
+#define DP_LINK_DESC_BANK_MASK GENMASK(2, 0)
+
+#define DP_RX_DESC_COOKIE_INDEX_MAX 0x3ffff
+#define DP_RX_DESC_COOKIE_POOL_ID_MAX 0x1c0000
+#define DP_RX_DESC_COOKIE_MAX \
+ (DP_RX_DESC_COOKIE_INDEX_MAX | DP_RX_DESC_COOKIE_POOL_ID_MAX)
+#define DP_NOT_PPDU_ID_WRAP_AROUND 20000
+
+enum ath12k_dp_ppdu_state {
+ DP_PPDU_STATUS_START,
+ DP_PPDU_STATUS_DONE,
+};
+
+struct dp_mon_mpdu {
+ struct list_head list;
+ struct sk_buff *head;
+ struct sk_buff *tail;
+};
+
+#define DP_MON_MAX_STATUS_BUF 32
+
+struct ath12k_mon_data {
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct hal_rx_mon_ppdu_info mon_ppdu_info;
+
+ u32 mon_ppdu_status;
+ u32 mon_last_buf_cookie;
+ u64 mon_last_linkdesc_paddr;
+ u16 chan_noise_floor;
+
+ struct ath12k_pdev_mon_stats rx_mon_stats;
+ /* lock for monitor data */
+ spinlock_t mon_lock;
+ struct sk_buff_head rx_status_q;
+ struct dp_mon_mpdu *mon_mpdu;
+ struct list_head dp_rx_mon_mpdu_list;
+ struct sk_buff *dest_skb_q[DP_MON_MAX_STATUS_BUF];
+ struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info;
+ struct dp_mon_tx_ppdu_info *tx_data_ppdu_info;
+};
+
+struct ath12k_pdev_dp {
+ u32 mac_id;
+ atomic_t num_tx_pending;
+ wait_queue_head_t tx_empty_waitq;
+ struct dp_srng rxdma_mon_dst_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_srng tx_mon_dst_ring[MAX_RXDMA_PER_PDEV];
+
+ struct ieee80211_rx_status rx_status;
+ struct ath12k_mon_data mon_data;
+};
+
+#define DP_NUM_CLIENTS_MAX 64
+#define DP_AVG_TIDS_PER_CLIENT 2
+#define DP_NUM_TIDS_MAX (DP_NUM_CLIENTS_MAX * DP_AVG_TIDS_PER_CLIENT)
+#define DP_AVG_MSDUS_PER_FLOW 128
+#define DP_AVG_FLOWS_PER_TID 2
+#define DP_AVG_MPDUS_PER_TID_MAX 128
+#define DP_AVG_MSDUS_PER_MPDU 4
+
+#define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */
+
+#define DP_BA_WIN_SZ_MAX 256
+
+#define DP_TCL_NUM_RING_MAX 4
+
+#define DP_IDLE_SCATTER_BUFS_MAX 16
+
+#define DP_WBM_RELEASE_RING_SIZE 64
+#define DP_TCL_DATA_RING_SIZE 512
+#define DP_TX_COMP_RING_SIZE 32768
+#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
+#define DP_TCL_CMD_RING_SIZE 32
+#define DP_TCL_STATUS_RING_SIZE 32
+#define DP_REO_DST_RING_MAX 8
+#define DP_REO_DST_RING_SIZE 2048
+#define DP_REO_REINJECT_RING_SIZE 32
+#define DP_RX_RELEASE_RING_SIZE 1024
+#define DP_REO_EXCEPTION_RING_SIZE 128
+#define DP_REO_CMD_RING_SIZE 128
+#define DP_REO_STATUS_RING_SIZE 2048
+#define DP_RXDMA_BUF_RING_SIZE 4096
+#define DP_RXDMA_REFILL_RING_SIZE 2048
+#define DP_RXDMA_ERR_DST_RING_SIZE 1024
+#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
+#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
+#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
+#define DP_TX_MONITOR_BUF_RING_SIZE 4096
+#define DP_TX_MONITOR_DEST_RING_SIZE 2048
+
+#define DP_TX_MONITOR_BUF_SIZE 2048
+#define DP_TX_MONITOR_BUF_SIZE_MIN 48
+#define DP_TX_MONITOR_BUF_SIZE_MAX 8192
+
+#define DP_RX_BUFFER_SIZE 2048
+#define DP_RX_BUFFER_SIZE_LITE 1024
+#define DP_RX_BUFFER_ALIGN_SIZE 128
+
+#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
+#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(19, 18)
+
+#define DP_HW2SW_MACID(mac_id) ({ typeof(mac_id) x = (mac_id); x ? x - 1 : 0; })
+#define DP_SW2HW_MACID(mac_id) ((mac_id) + 1)
+
+#define DP_TX_DESC_ID_MAC_ID GENMASK(1, 0)
+#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
+#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
+
+#define ATH12K_SHADOW_DP_TIMER_INTERVAL 20
+#define ATH12K_SHADOW_CTRL_TIMER_INTERVAL 10
+
+#define ATH12K_NUM_POOL_TX_DESC 32768
+
+/* TODO: revisit this count during testing */
+#define ATH12K_RX_DESC_COUNT (12288)
+
+#define ATH12K_PAGE_SIZE PAGE_SIZE
+
+/* Total 1024 entries in PPT, i.e 4K/4 considering 4K aligned
+ * SPT pages which makes lower 12bits 0
+ */
+#define ATH12K_MAX_PPT_ENTRIES 1024
+
+/* Total 512 entries in a SPT, i.e 4K Page/8 */
+#define ATH12K_MAX_SPT_ENTRIES 512
+
+#define ATH12K_NUM_RX_SPT_PAGES ((ATH12K_RX_DESC_COUNT) / ATH12K_MAX_SPT_ENTRIES)
+
+#define ATH12K_TX_SPT_PAGES_PER_POOL (ATH12K_NUM_POOL_TX_DESC / \
+ ATH12K_MAX_SPT_ENTRIES)
+#define ATH12K_NUM_TX_SPT_PAGES (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES)
+#define ATH12K_NUM_SPT_PAGES (ATH12K_NUM_RX_SPT_PAGES + ATH12K_NUM_TX_SPT_PAGES)
+
+/* The SPT pages are divided for RX and TX, first block for RX
+ * and remaining for TX
+ */
+#define ATH12K_NUM_TX_SPT_PAGE_START ATH12K_NUM_RX_SPT_PAGES
+
+#define ATH12K_DP_RX_DESC_MAGIC 0xBABABABA
+
+/* 4K aligned address have last 12 bits set to 0, this check is done
+ * so that two spt pages address can be stored per 8bytes
+ * of CMEM (PPT)
+ */
+#define ATH12K_SPT_4K_ALIGN_CHECK 0xFFF
+#define ATH12K_SPT_4K_ALIGN_OFFSET 12
+#define ATH12K_PPT_ADDR_OFFSET(ppt_index) (4 * (ppt_index))
+
+/* To indicate HW of CMEM address, b0-31 are cmem base received via QMI */
+#define ATH12K_CMEM_ADDR_MSB 0x10
+
+/* Of 20 bits cookie, b0-b8 is to indicate SPT offset and b9-19 for PPT */
+#define ATH12K_CC_SPT_MSB 8
+#define ATH12K_CC_PPT_MSB 19
+#define ATH12K_CC_PPT_SHIFT 9
+#define ATH12k_DP_CC_COOKIE_SPT GENMASK(8, 0)
+#define ATH12K_DP_CC_COOKIE_PPT GENMASK(19, 9)
+
+#define DP_REO_QREF_NUM GENMASK(31, 16)
+#define DP_MAX_PEER_ID 2047
+
+/* Total size of the LUT is based on 2K peers, each having reference
+ * for 17tids, note each entry is of type ath12k_reo_queue_ref
+ * hence total size is 2048 * 17 * 8 = 278528
+ */
+#define DP_REOQ_LUT_SIZE 278528
+
+/* Invalid TX Bank ID value */
+#define DP_INVALID_BANK_ID -1
+
+struct ath12k_dp_tx_bank_profile {
+ u8 is_configured;
+ u32 num_users;
+ u32 bank_config;
+};
+
+struct ath12k_hp_update_timer {
+ struct timer_list timer;
+ bool started;
+ bool init;
+ u32 tx_num;
+ u32 timer_tx_num;
+ u32 ring_id;
+ u32 interval;
+ struct ath12k_base *ab;
+};
+
+struct ath12k_rx_desc_info {
+ struct list_head list;
+ struct sk_buff *skb;
+ u32 cookie;
+ u32 magic;
+};
+
+struct ath12k_tx_desc_info {
+ struct list_head list;
+ struct sk_buff *skb;
+ u32 desc_id; /* Cookie */
+ u8 mac_id;
+ u8 pool_id;
+};
+
+struct ath12k_spt_info {
+ dma_addr_t paddr;
+ u64 *vaddr;
+};
+
+struct ath12k_reo_queue_ref {
+ u32 info0;
+ u32 info1;
+} __packed;
+
+struct ath12k_reo_q_addr_lut {
+ dma_addr_t paddr;
+ u32 *vaddr;
+};
+
+struct ath12k_dp {
+ struct ath12k_base *ab;
+ u8 num_bank_profiles;
+ /* protects the access and update of bank_profiles */
+ spinlock_t tx_bank_lock;
+ struct ath12k_dp_tx_bank_profile *bank_profiles;
+ enum ath12k_htc_ep_id eid;
+ struct completion htt_tgt_version_received;
+ u8 htt_tgt_ver_major;
+ u8 htt_tgt_ver_minor;
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct dp_srng wbm_idle_ring;
+ struct dp_srng wbm_desc_rel_ring;
+ struct dp_srng tcl_cmd_ring;
+ struct dp_srng tcl_status_ring;
+ struct dp_srng reo_reinject_ring;
+ struct dp_srng rx_rel_ring;
+ struct dp_srng reo_except_ring;
+ struct dp_srng reo_cmd_ring;
+ struct dp_srng reo_status_ring;
+ struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
+ struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
+ struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
+ struct list_head reo_cmd_list;
+ struct list_head reo_cmd_cache_flush_list;
+ u32 reo_cmd_cache_flush_count;
+
+ /* protects access to below fields,
+ * - reo_cmd_list
+ * - reo_cmd_cache_flush_list
+ * - reo_cmd_cache_flush_count
+ */
+ spinlock_t reo_cmd_lock;
+ struct ath12k_hp_update_timer reo_cmd_timer;
+ struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
+ struct ath12k_spt_info *spt_info;
+ u32 num_spt_pages;
+ struct list_head rx_desc_free_list;
+ struct list_head rx_desc_used_list;
+ /* protects the free and used desc list */
+ spinlock_t rx_desc_lock;
+
+ struct list_head tx_desc_free_list[ATH12K_HW_MAX_QUEUES];
+ struct list_head tx_desc_used_list[ATH12K_HW_MAX_QUEUES];
+ /* protects the free and used desc lists */
+ spinlock_t tx_desc_lock[ATH12K_HW_MAX_QUEUES];
+
+ struct dp_rxdma_ring rx_refill_buf_ring;
+ struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_rxdma_ring rxdma_mon_buf_ring;
+ struct dp_rxdma_ring tx_mon_buf_ring;
+ struct ath12k_reo_q_addr_lut reoq_lut;
+};
+
+/* HTT definitions */
+
+#define HTT_TCL_META_DATA_TYPE BIT(0)
+#define HTT_TCL_META_DATA_VALID_HTT BIT(1)
+
+/* vdev meta data */
+#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2)
+#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10)
+#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12)
+
+/* peer meta data */
+#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
+
+#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
+
+/* HTT tx completion is overlayed in wbm_release_ring */
+#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13)
+#define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0)
+#define HTT_TX_WBM_COMP_INFO1_EXCEPTION_FRAME BIT(4)
+
+#define HTT_TX_WBM_COMP_INFO2_ACK_RSSI GENMASK(31, 24)
+
+struct htt_tx_wbm_completion {
+ __le32 rsvd0[2];
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 rsvd1;
+
+} __packed;
+
+enum htt_h2t_msg_type {
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
+ HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11,
+ HTT_H2T_MSG_TYPE_VDEV_TXRX_STATS_CFG = 0x1a,
+ HTT_H2T_MSG_TYPE_TX_MONITOR_CFG = 0x1b,
+};
+
+#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
+
+struct htt_ver_req_cmd {
+ __le32 ver_reg_info;
+} __packed;
+
+enum htt_srng_ring_type {
+ HTT_HW_TO_SW_RING,
+ HTT_SW_TO_HW_RING,
+ HTT_SW_TO_SW_RING,
+};
+
+enum htt_srng_ring_id {
+ HTT_RXDMA_HOST_BUF_RING,
+ HTT_RXDMA_MONITOR_STATUS_RING,
+ HTT_RXDMA_MONITOR_BUF_RING,
+ HTT_RXDMA_MONITOR_DESC_RING,
+ HTT_RXDMA_MONITOR_DEST_RING,
+ HTT_HOST1_TO_FW_RXBUF_RING,
+ HTT_HOST2_TO_FW_RXBUF_RING,
+ HTT_RXDMA_NON_MONITOR_DEST_RING,
+ HTT_TX_MON_HOST2MON_BUF_RING,
+ HTT_TX_MON_MON2HOST_DEST_RING,
+};
+
+/* host -> target HTT_SRING_SETUP message
+ *
+ * After target is booted up, Host can send SRING setup message for
+ * each host facing LMAC SRING. Target setups up HW registers based
+ * on setup message and confirms back to Host if response_required is set.
+ * Host should wait for confirmation message before sending new SRING
+ * setup message
+ *
+ * The message would appear as follows:
+ *
+ * |31 24|23 20|19|18 16|15|14 8|7 0|
+ * |--------------- +-----------------+----------------+------------------|
+ * | ring_type | ring_id | pdev_id | msg_type |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_hi |
+ * |----------------------------------------------------------------------|
+ * |ring_misc_cfg_flag|ring_entry_size| ring_size |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_data |
+ * |----------------------------------------------------------------------|
+ * | intr_timer_th |IM| intr_batch_counter_th |
+ * |----------------------------------------------------------------------|
+ * | reserved |RR|PTCF| intr_low_threshold |
+ * |----------------------------------------------------------------------|
+ * Where
+ * IM = sw_intr_mode
+ * RR = response_required
+ * PTCF = prefetch_timer_cfg
+ *
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_SRING_SETUP
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id: identify which ring is to setup,
+ * more details can be got from enum htt_srng_ring_id
+ * b'24:31 - ring_type: identify type of host rings,
+ * more details can be got from enum htt_srng_ring_type
+ * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address
+ * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address
+ * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words
+ * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units
+ * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and
+ * SW_TO_HW_RING.
+ * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs.
+ * dword4 - b'0:31 - ring_head_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword5 - b'0:31 - ring_head_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword10 - b'0:31 - ring_msi_data: MSI data
+ * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword11 - b'0:14 - intr_batch_counter_th:
+ * batch counter threshold is in units of 4-byte words.
+ * HW internally maintains and increments batch count.
+ * (see SRING spec for detail description).
+ * When batch count reaches threshold value, an interrupt
+ * is generated by HW.
+ * b'15 - sw_intr_mode:
+ * This configuration shall be static.
+ * Only programmed at power up.
+ * 0: generate pulse style sw interrupts
+ * 1: generate level style sw interrupts
+ * b'16:31 - intr_timer_th:
+ * The timer init value when timer is idle or is
+ * initialized to start downcounting.
+ * In 8us units (to cover a range of 0 to 524 ms)
+ * dword12 - b'0:15 - intr_low_threshold:
+ * Used only by Consumer ring to generate ring_sw_int_p.
+ * Ring entries low threshold water mark, that is used
+ * in combination with the interrupt timer as well as
+ * the clearing of the level interrupt.
+ * b'16:18 - prefetch_timer_cfg:
+ * Used only by Consumer ring to set timer mode to
+ * support Application prefetch handling.
+ * The external tail offset/pointer will be updated
+ * at following intervals:
+ * 3'b000: (Prefetch feature disabled; used only for debug)
+ * 3'b001: 1 usec
+ * 3'b010: 4 usec
+ * 3'b011: 8 usec (default)
+ * 3'b100: 16 usec
+ * Others: Reserverd
+ * b'19 - response_required:
+ * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
+ * b'20:31 - reserved: reserved for future use
+ */
+
+#define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE GENMASK(31, 24)
+
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS BIT(25)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP BIT(27)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP BIT(28)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP BIT(29)
+
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH GENMASK(14, 0)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE BIT(15)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH GENMASK(31, 16)
+
+#define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG GENMASK(18, 16)
+#define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED BIT(19)
+
+struct htt_srng_setup_cmd {
+ __le32 info0;
+ __le32 ring_base_addr_lo;
+ __le32 ring_base_addr_hi;
+ __le32 info1;
+ __le32 ring_head_off32_remote_addr_lo;
+ __le32 ring_head_off32_remote_addr_hi;
+ __le32 ring_tail_off32_remote_addr_lo;
+ __le32 ring_tail_off32_remote_addr_hi;
+ __le32 ring_msi_addr_lo;
+ __le32 ring_msi_addr_hi;
+ __le32 msi_data;
+ __le32 intr_info;
+ __le32 info2;
+} __packed;
+
+/* host -> target FW PPDU_STATS config message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW for PPDU_STATS_CFG msg.
+ * The message allows the host to configure the PPDU_STATS_IND messages
+ * produced by the target.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | REQ bit mask | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a req to configure ppdu_stats_ind from target
+ * Value: 0x11
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies which pdevs this PPDU stats configuration applies to
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - REQ_TLV_BIT_MASK
+ * Bits 16:31
+ * Purpose: each set bit indicates the corresponding PPDU stats TLV type
+ * needs to be included in the target's PPDU_STATS_IND messages.
+ * Value: refer htt_ppdu_stats_tlv_tag_t <<<???
+ *
+ */
+
+struct htt_ppdu_stats_cfg_cmd {
+ __le32 msg;
+} __packed;
+
+#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8)
+#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
+
+enum htt_ppdu_stats_tag_type {
+ HTT_PPDU_STATS_TAG_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMMON,
+ HTT_PPDU_STATS_TAG_USR_RATE,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256,
+ HTT_PPDU_STATS_TAG_SCH_CMD_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH,
+ HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY,
+ HTT_PPDU_STATS_TAG_INFO,
+ HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD,
+
+ /* New TLV's are added above to this line */
+ HTT_PPDU_STATS_TAG_MAX,
+};
+
+#define HTT_PPDU_STATS_TAG_DEFAULT (BIT(HTT_PPDU_STATS_TAG_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_RATE) \
+ | BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY))
+
+#define HTT_PPDU_STATS_TAG_PKTLOG (BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_INFO) | \
+ BIT(HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD) | \
+ HTT_PPDU_STATS_TAG_DEFAULT)
+
+enum htt_stats_internal_ppdu_frametype {
+ HTT_STATS_PPDU_FTYPE_CTRL,
+ HTT_STATS_PPDU_FTYPE_DATA,
+ HTT_STATS_PPDU_FTYPE_BAR,
+ HTT_STATS_PPDU_FTYPE_MAX
+};
+
+/* HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message
+ *
+ * details:
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to
+ * configure RXDMA rings.
+ * The configuration is per ring based and includes both packet subtypes
+ * and PPDU/MPDU TLVs.
+ *
+ * The message would appear as follows:
+ *
+ * |31 26|25|24|23 16|15 8|7 0|
+ * |-----------------+----------------+----------------+---------------|
+ * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type |
+ * |-------------------------------------------------------------------|
+ * | rsvd2 | ring_buffer_size |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_0 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_1 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_2 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_3 |
+ * |-------------------------------------------------------------------|
+ * | tlv_filter_in_flags |
+ * |-------------------------------------------------------------------|
+ * Where:
+ * PS = pkt_swap
+ * SS = status_swap
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id : Identify the ring to configure.
+ * More details can be got from enum htt_srng_ring_id
+ * b'24 - status_swap: 1 is to swap status TLV
+ * b'25 - pkt_swap: 1 is to swap packet TLV
+ * b'26:31 - rsvd1: reserved for future use
+ * dword1 - b'0:16 - ring_buffer_size: size of bufferes referenced by rx ring,
+ * in byte units.
+ * Valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * - b'16:31 - rsvd2: Reserved for future use
+ * dword2 - b'0:31 - packet_type_enable_flags_0:
+ * Enable MGMT packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * FP: Filter_Pass
+ * MD: Monitor_Direct
+ * MO: Monitor_Other
+ * 10 mgmt subtypes * 3 bits -> 30 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs
+ * dword3 - b'0:31 - packet_type_enable_flags_1:
+ * Enable MGMT packet from 0b1010 to 0b1111
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs
+ * dword4 - b'0:31 - packet_type_enable_flags_2:
+ * Enable CTRL packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs
+ * dword5 - b'0:31 - packet_type_enable_flags_3:
+ * Enable CTRL packet from 0b1010 to 0b1111,
+ * MCAST_DATA, UCAST_DATA, NULL_DATA
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs
+ * dword6 - b'0:31 - tlv_filter_in_flags:
+ * Filter in Attention/MPDU/PPDU/Header/User tlvs
+ * Refer to CFG_TLV_FILTER_IN_FLAG defs
+ */
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID BIT(26)
+
+#define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET GENMASK(31, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET GENMASK(31, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET GENMASK(15, 0)
+#define HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET GENMASK(31, 16)
+#define HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET GENMASK(15, 0)
+
+enum htt_rx_filter_tlv_flags {
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET = BIT(2),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END = BIT(3),
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END = BIT(4),
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER = BIT(5),
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER = BIT(6),
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION = BIT(7),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START = BIT(8),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END = BIT(9),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(17),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(18),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(19),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(20),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(21),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(22),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(23),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(24),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(25),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(26),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(27),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(28),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(29),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags1 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(17),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags2 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(17),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(18),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(19),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(20),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(21),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(22),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(23),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(24),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(25),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(26),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(27),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(28),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(29),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags3 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(17),
+};
+
+enum htt_rx_data_pkt_filter_tlv_flasg3 {
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(18),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(19),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(20),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(21),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(22),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(23),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(24),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(25),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(26),
+};
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \
+ (HTT_RX_FP_CTRL_FILTER_FLASG2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \
+ (HTT_RX_MO_CTRL_FILTER_FLASG2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
+
+/* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */
+#define HTT_RX_TLV_FLAGS_RXDMA_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END)
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+
+struct htt_rx_ring_selection_cfg_cmd {
+ __le32 info0;
+ __le32 info1;
+ __le32 pkt_type_en_flags0;
+ __le32 pkt_type_en_flags1;
+ __le32 pkt_type_en_flags2;
+ __le32 pkt_type_en_flags3;
+ __le32 rx_filter_tlv;
+ __le32 rx_packet_offset;
+ __le32 rx_mpdu_offset;
+ __le32 rx_msdu_offset;
+ __le32 rx_attn_offset;
+} __packed;
+
+struct htt_rx_ring_tlv_filter {
+ u32 rx_filter; /* see htt_rx_filter_tlv_flags */
+ u32 pkt_filter_flags0; /* MGMT */
+ u32 pkt_filter_flags1; /* MGMT */
+ u32 pkt_filter_flags2; /* CTRL */
+ u32 pkt_filter_flags3; /* DATA */
+ bool offset_valid;
+ u16 rx_packet_offset;
+ u16 rx_header_offset;
+ u16 rx_mpdu_end_offset;
+ u16 rx_mpdu_start_offset;
+ u16 rx_msdu_end_offset;
+ u16 rx_msdu_start_offset;
+ u16 rx_attn_offset;
+};
+
+#define HTT_STATS_FRAME_CTRL_TYPE_MGMT 0x0
+#define HTT_STATS_FRAME_CTRL_TYPE_CTRL 0x1
+#define HTT_STATS_FRAME_CTRL_TYPE_DATA 0x2
+#define HTT_STATS_FRAME_CTRL_TYPE_RESV 0x3
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE GENMASK(15, 0)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE GENMASK(18, 16)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT GENMASK(21, 19)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL GENMASK(24, 22)
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA GENMASK(27, 25)
+
+#define HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG GENMASK(2, 0)
+
+struct htt_tx_ring_selection_cfg_cmd {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 tlv_filter_mask_in0;
+ __le32 tlv_filter_mask_in1;
+ __le32 tlv_filter_mask_in2;
+ __le32 tlv_filter_mask_in3;
+ __le32 reserverd[3];
+} __packed;
+
+#define HTT_TX_RING_TLV_FILTER_MGMT_DMA_LEN GENMASK(3, 0)
+#define HTT_TX_RING_TLV_FILTER_CTRL_DMA_LEN GENMASK(7, 4)
+#define HTT_TX_RING_TLV_FILTER_DATA_DMA_LEN GENMASK(11, 8)
+
+#define HTT_TX_MON_FILTER_HYBRID_MODE \
+ (HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS | \
+ HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT | \
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE | \
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO | \
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2)
+
+struct htt_tx_ring_tlv_filter {
+ u32 tx_mon_downstream_tlv_flags;
+ u32 tx_mon_upstream_tlv_flags0;
+ u32 tx_mon_upstream_tlv_flags1;
+ u32 tx_mon_upstream_tlv_flags2;
+ bool tx_mon_mgmt_filter;
+ bool tx_mon_data_filter;
+ bool tx_mon_ctrl_filter;
+ u16 tx_mon_pkt_dma_len;
+} __packed;
+
+enum htt_tx_mon_upstream_tlv_flags0 {
+ HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_START_STATUS = BIT(1),
+ HTT_TX_FILTER_TLV_FLAGS0_RESPONSE_END_STATUS = BIT(2),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START = BIT(3),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_END = BIT(4),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PPDU = BIT(5),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_PPDU = BIT(6),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_ACK_OR_BA = BIT(7),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_1K_BA = BIT(8),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_START_PROT = BIT(9),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_PROT = BIT(10),
+ HTT_TX_FILTER_TLV_FLAGS0_TX_FES_STATUS_USER_RESPONSE = BIT(11),
+ HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_BITMAP_ACK = BIT(12),
+ HTT_TX_FILTER_TLV_FLAGS0_RX_FRAME_1K_BITMAP_ACK = BIT(13),
+ HTT_TX_FILTER_TLV_FLAGS0_COEX_TX_STATUS = BIT(14),
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO = BIT(15),
+ HTT_TX_FILTER_TLV_FLAGS0_RECEIVED_RESPONSE_INFO_PART2 = BIT(16),
+};
+
+#define HTT_TX_FILTER_TLV_FLAGS2_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 BIT(11)
+
+/* HTT message target->host */
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_T2H_MSG_TYPE_PEER_MAP2 = 0x1e,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP2 = 0x1f,
+ HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
+ HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
+ HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
+ HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND = 0x28,
+ HTT_T2H_MSG_TYPE_PEER_MAP3 = 0x2b,
+ HTT_T2H_MSG_TYPE_VDEV_TXRX_STATS_PERIODIC_IND = 0x2c,
+};
+
+#define HTT_TARGET_VERSION_MAJOR 3
+
+#define HTT_T2H_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_VERSION_CONF_MINOR GENMASK(15, 8)
+#define HTT_T2H_VERSION_CONF_MAJOR GENMASK(23, 16)
+
+struct htt_t2h_version_conf_msg {
+ __le32 version;
+} __packed;
+
+#define HTT_T2H_PEER_MAP_INFO_VDEV_ID GENMASK(15, 8)
+#define HTT_T2H_PEER_MAP_INFO_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16
+
+struct htt_t2h_peer_map_event {
+ __le32 info;
+ __le32 mac_addr_l32;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+#define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID HTT_T2H_PEER_MAP_INFO_VDEV_ID
+#define HTT_T2H_PEER_UNMAP_INFO_PEER_ID HTT_T2H_PEER_MAP_INFO_PEER_ID
+#define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S
+
+struct htt_t2h_peer_unmap_event {
+ __le32 info;
+ __le32 mac_addr_l32;
+ __le32 info1;
+} __packed;
+
+struct htt_resp_msg {
+ union {
+ struct htt_t2h_version_conf_msg version_msg;
+ struct htt_t2h_peer_map_event peer_map_ev;
+ struct htt_t2h_peer_unmap_event peer_unmap_ev;
+ };
+} __packed;
+
+#define HTT_VDEV_GET_STATS_U64(msg_l32, msg_u32)\
+ (((u64)__le32_to_cpu(msg_u32) << 32) | (__le32_to_cpu(msg_l32)))
+#define HTT_T2H_VDEV_STATS_PERIODIC_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_VDEV_STATS_PERIODIC_PDEV_ID GENMASK(15, 8)
+#define HTT_T2H_VDEV_STATS_PERIODIC_NUM_VDEV GENMASK(23, 16)
+#define HTT_T2H_VDEV_STATS_PERIODIC_PAYLOAD_BYTES GENMASK(15, 0)
+#define HTT_VDEV_TXRX_STATS_COMMON_TLV 0
+#define HTT_VDEV_TXRX_STATS_HW_STATS_TLV 1
+
+struct htt_t2h_vdev_txrx_stats_ind {
+ __le32 vdev_id;
+ __le32 rx_msdu_byte_cnt_lo;
+ __le32 rx_msdu_byte_cnt_hi;
+ __le32 rx_msdu_cnt_lo;
+ __le32 rx_msdu_cnt_hi;
+ __le32 tx_msdu_byte_cnt_lo;
+ __le32 tx_msdu_byte_cnt_hi;
+ __le32 tx_msdu_cnt_lo;
+ __le32 tx_msdu_cnt_hi;
+ __le32 tx_retry_cnt_lo;
+ __le32 tx_retry_cnt_hi;
+ __le32 tx_retry_byte_cnt_lo;
+ __le32 tx_retry_byte_cnt_hi;
+ __le32 tx_drop_cnt_lo;
+ __le32 tx_drop_cnt_hi;
+ __le32 tx_drop_byte_cnt_lo;
+ __le32 tx_drop_byte_cnt_hi;
+ __le32 msdu_ttl_cnt_lo;
+ __le32 msdu_ttl_cnt_hi;
+ __le32 msdu_ttl_byte_cnt_lo;
+ __le32 msdu_ttl_byte_cnt_hi;
+} __packed;
+
+struct htt_t2h_vdev_common_stats_tlv {
+ __le32 soc_drop_count_lo;
+ __le32 soc_drop_count_hi;
+} __packed;
+
+/* ppdu stats
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host ppdu stats indication message.
+ *
+ *
+ * |31 16|15 12|11 10|9 8|7 0 |
+ * |----------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id | msg type |
+ * |----------------------------------------------------------------------|
+ * | ppdu_id |
+ * |----------------------------------------------------------------------|
+ * | Timestamp in us |
+ * |----------------------------------------------------------------------|
+ * | reserved |
+ * |----------------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_ppdu_stats.h) |
+ * |----------------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a PPDU STATS indication
+ * message.
+ * Value: 0x1d
+ * - mac_id
+ * Bits 9:8
+ * Purpose: mac_id of this ppdu_id
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id of this ppdu_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: total tlv size
+ * Value: payload_size in bytes
+ */
+
+#define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10)
+#define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16)
+
+struct ath12k_htt_ppdu_stats_msg {
+ __le32 info;
+ __le32 ppdu_id;
+ __le32 timestamp;
+ __le32 rsvd;
+ u8 data[];
+} __packed;
+
+struct htt_tlv {
+ __le32 header;
+ u8 value[];
+} __packed;
+
+#define HTT_TLV_TAG GENMASK(11, 0)
+#define HTT_TLV_LEN GENMASK(23, 12)
+
+enum HTT_PPDU_STATS_BW {
+ HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0,
+ HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1,
+ HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2,
+ HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3,
+ HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4,
+ HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
+ HTT_PPDU_STATS_BANDWIDTH_DYN = 6,
+};
+
+#define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M GENMASK(15, 8)
+/* bw - HTT_PPDU_STATS_BW */
+#define HTT_PPDU_STATS_CMN_FLAGS_BW_M GENMASK(19, 16)
+
+struct htt_ppdu_stats_common {
+ __le32 ppdu_id;
+ __le16 sched_cmdid;
+ u8 ring_id;
+ u8 num_users;
+ __le32 flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/
+ __le32 chain_mask;
+ __le32 fes_duration_us; /* frame exchange sequence */
+ __le32 ppdu_sch_eval_start_tstmp_us;
+ __le32 ppdu_sch_end_tstmp_us;
+ __le32 ppdu_start_tstmp_us;
+ /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted
+ * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted
+ */
+ __le16 phy_mode;
+ __le16 bw_mhz;
+} __packed;
+
+enum htt_ppdu_stats_gi {
+ HTT_PPDU_STATS_SGI_0_8_US,
+ HTT_PPDU_STATS_SGI_0_4_US,
+ HTT_PPDU_STATS_SGI_1_6_US,
+ HTT_PPDU_STATS_SGI_3_2_US,
+};
+
+#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4)
+
+enum HTT_PPDU_STATS_PPDU_TYPE {
+ HTT_PPDU_STATS_PPDU_TYPE_SU,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_TRIG,
+ HTT_PPDU_STATS_PPDU_TYPE_BURST_BCN,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_RESP,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_TRIG,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_RESP,
+ HTT_PPDU_STATS_PPDU_TYPE_MAX
+};
+
+#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M BIT(0)
+#define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M GENMASK(5, 1)
+
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29)
+
+#define HTT_USR_RATE_PREAMBLE(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M)
+#define HTT_USR_RATE_BW(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M)
+#define HTT_USR_RATE_NSS(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M)
+#define HTT_USR_RATE_MCS(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M)
+#define HTT_USR_RATE_GI(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M)
+#define HTT_USR_RATE_DCM(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M)
+
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M BIT(29)
+
+struct htt_ppdu_stats_user_rate {
+ u8 tid_num;
+ u8 reserved0;
+ __le16 sw_peer_id;
+ __le32 info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/
+ __le16 ru_end;
+ __le16 ru_start;
+ __le16 resp_ru_end;
+ __le16 resp_ru_start;
+ __le32 info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */
+ __le32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */
+ /* Note: resp_rate_info is only valid for if resp_type is UL */
+ __le32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */
+} __packed;
+
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M GENMASK(10, 9)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M GENMASK(13, 11)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M BIT(14)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M GENMASK(31, 16)
+
+#define HTT_TX_INFO_IS_AMSDU(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M)
+#define HTT_TX_INFO_BA_ACK_FAILED(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M)
+#define HTT_TX_INFO_RATECODE(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M)
+#define HTT_TX_INFO_PEERID(_flags) \
+ u32_get_bits(_flags, HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M)
+
+struct htt_tx_ppdu_stats_info {
+ struct htt_tlv tlv_hdr;
+ __le32 tx_success_bytes;
+ __le32 tx_retry_bytes;
+ __le32 tx_failed_bytes;
+ __le32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
+ __le16 tx_success_msdus;
+ __le16 tx_retry_msdus;
+ __le16 tx_failed_msdus;
+ __le16 tx_duration; /* united in us */
+} __packed;
+
+enum htt_ppdu_stats_usr_compln_status {
+ HTT_PPDU_STATS_USER_STATUS_OK,
+ HTT_PPDU_STATS_USER_STATUS_FILTERED,
+ HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT,
+ HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH,
+ HTT_PPDU_STATS_USER_STATUS_ABORT,
+};
+
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M GENMASK(12, 9)
+
+#define HTT_USR_CMPLTN_IS_AMPDU(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M)
+#define HTT_USR_CMPLTN_LONG_RETRY(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M)
+#define HTT_USR_CMPLTN_SHORT_RETRY(_val) \
+ le32_get_bits(_val, HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M)
+
+struct htt_ppdu_stats_usr_cmpltn_cmn {
+ u8 status;
+ u8 tid_num;
+ __le16 sw_peer_id;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ __le32 ack_rssi;
+ __le16 mpdu_tried;
+ __le16 mpdu_success;
+ __le32 flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/
+} __packed;
+
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M GENMASK(8, 0)
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M GENMASK(24, 9)
+#define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM GENMASK(31, 25)
+
+#define HTT_PPDU_STATS_NON_QOS_TID 16
+
+struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
+ __le32 ppdu_id;
+ __le16 sw_peer_id;
+ __le16 reserved0;
+ __le32 info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */
+ __le16 current_seq;
+ __le16 start_seq;
+ __le32 success_bytes;
+} __packed;
+
+struct htt_ppdu_user_stats {
+ u16 peer_id;
+ u16 delay_ba;
+ u32 tlv_flags;
+ bool is_valid_peer_id;
+ struct htt_ppdu_stats_user_rate rate;
+ struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn;
+ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba;
+};
+
+#define HTT_PPDU_STATS_MAX_USERS 8
+#define HTT_PPDU_DESC_MAX_DEPTH 16
+
+struct htt_ppdu_stats {
+ struct htt_ppdu_stats_common common;
+ struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS];
+};
+
+struct htt_ppdu_stats_info {
+ u32 tlv_bitmap;
+ u32 ppdu_id;
+ u32 frame_type;
+ u32 frame_ctrl;
+ u32 delay_ba;
+ u32 bar_num_users;
+ struct htt_ppdu_stats ppdu_stats;
+ struct list_head list;
+};
+
+/* @brief target -> host MLO offset indiciation message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host mlo offset indication message.
+ *
+ *
+ * |31 29|28 |26|25 22|21 16|15 13|12 10 |9 8|7 0|
+ * |---------------------------------------------------------------------|
+ * | rsvd1 | mac_freq |chip_id |pdev_id|msgtype|
+ * |---------------------------------------------------------------------|
+ * | sync_timestamp_lo_us |
+ * |---------------------------------------------------------------------|
+ * | sync_timestamp_hi_us |
+ * |---------------------------------------------------------------------|
+ * | mlo_offset_lo |
+ * |---------------------------------------------------------------------|
+ * | mlo_offset_hi |
+ * |---------------------------------------------------------------------|
+ * | mlo_offset_clcks |
+ * |---------------------------------------------------------------------|
+ * | rsvd2 | mlo_comp_clks |mlo_comp_us |
+ * |---------------------------------------------------------------------|
+ * | rsvd3 |mlo_comp_timer |
+ * |---------------------------------------------------------------------|
+ * Header fields
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a MLO offset indication msg
+ * - PDEV_ID
+ * Bits 9:8
+ * Purpose: Pdev of this MLO offset
+ * - CHIP_ID
+ * Bits 12:10
+ * Purpose: chip_id of this MLO offset
+ * - MAC_FREQ
+ * Bits 28:13
+ * - SYNC_TIMESTAMP_LO_US
+ * Purpose: clock frequency of the mac HW block in MHz
+ * Bits: 31:0
+ * Purpose: lower 32 bits of the WLAN global time stamp at which
+ * last sync interrupt was received
+ * - SYNC_TIMESTAMP_HI_US
+ * Bits: 31:0
+ * Purpose: upper 32 bits of WLAN global time stamp at which
+ * last sync interrupt was received
+ * - MLO_OFFSET_LO
+ * Bits: 31:0
+ * Purpose: lower 32 bits of the MLO offset in us
+ * - MLO_OFFSET_HI
+ * Bits: 31:0
+ * Purpose: upper 32 bits of the MLO offset in us
+ * - MLO_COMP_US
+ * Bits: 15:0
+ * Purpose: MLO time stamp compensation applied in us
+ * - MLO_COMP_CLCKS
+ * Bits: 25:16
+ * Purpose: MLO time stamp compensation applied in clock ticks
+ * - MLO_COMP_TIMER
+ * Bits: 21:0
+ * Purpose: Periodic timer at which compensation is applied
+ */
+
+#define HTT_T2H_MLO_OFFSET_INFO_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_MLO_OFFSET_INFO_PDEV_ID GENMASK(9, 8)
+
+struct ath12k_htt_mlo_offset_msg {
+ __le32 info;
+ __le32 sync_timestamp_lo_us;
+ __le32 sync_timestamp_hi_us;
+ __le32 mlo_offset_hi;
+ __le32 mlo_offset_lo;
+ __le32 mlo_offset_clks;
+ __le32 mlo_comp_clks;
+ __le32 mlo_comp_timer;
+} __packed;
+
+/* @brief host -> target FW extended statistics retrieve
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW extended stats retrieve message.
+ * The message specifies the type of stats the host wants to retrieve.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | reserved | stats type | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * | config param [0] |
+ * |-----------------------------------------------------------|
+ * | config param [1] |
+ * |-----------------------------------------------------------|
+ * | config param [2] |
+ * |-----------------------------------------------------------|
+ * | config param [3] |
+ * |-----------------------------------------------------------|
+ * | reserved |
+ * |-----------------------------------------------------------|
+ * | cookie LSBs |
+ * |-----------------------------------------------------------|
+ * | cookie MSBs |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a extended stats upload request message
+ * Value: 0x10
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies the mask of PDEVs to retrieve stats from
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - STATS_TYPE
+ * Bits 23:16
+ * Purpose: identifies which FW statistics to upload
+ * Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h)
+ * - Reserved
+ * Bits 31:24
+ * - CONFIG_PARAM [0]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [1]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [2]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [3]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - Reserved [31:0] for future use.
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ */
+
+struct htt_ext_stats_cfg_hdr {
+ u8 msg_type;
+ u8 pdev_mask;
+ u8 stats_type;
+ u8 reserved;
+} __packed;
+
+struct htt_ext_stats_cfg_cmd {
+ struct htt_ext_stats_cfg_hdr hdr;
+ __le32 cfg_param0;
+ __le32 cfg_param1;
+ __le32 cfg_param2;
+ __le32 cfg_param3;
+ __le32 reserved;
+ __le32 cookie_lsb;
+ __le32 cookie_msb;
+} __packed;
+
+/* htt stats config default params */
+#define HTT_STAT_DEFAULT_RESET_START_OFFSET 0
+#define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff
+#define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00
+
+/* HTT_DBG_EXT_STATS_PEER_INFO
+ * PARAMS:
+ * @config_param0:
+ * [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request
+ * [Bit15 : Bit 1] htt_peer_stats_req_mode_t
+ * [Bit31 : Bit16] sw_peer_id
+ * @config_param1:
+ * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum)
+ * 0 bit htt_peer_stats_cmn_tlv
+ * 1 bit htt_peer_details_tlv
+ * 2 bit htt_tx_peer_rate_stats_tlv
+ * 3 bit htt_rx_peer_rate_stats_tlv
+ * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
+ * 5 bit htt_rx_tid_stats_tlv
+ * 6 bit htt_msdu_flow_stats_tlv
+ * @config_param2: [Bit31 : Bit0] mac_addr31to0
+ * @config_param3: [Bit15 : Bit0] mac_addr47to32
+ * [Bit31 : Bit16] reserved
+ */
+#define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0)
+#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f
+
+/* Used to set different configs to the specified stats type.*/
+struct htt_ext_stats_cfg_params {
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg2;
+ u32 cfg3;
+};
+
+enum vdev_stats_offload_timer_duration {
+ ATH12K_STATS_TIMER_DUR_500MS = 1,
+ ATH12K_STATS_TIMER_DUR_1SEC = 2,
+ ATH12K_STATS_TIMER_DUR_2SEC = 3,
+};
+
+static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
+{
+ memcpy(addr, &addr_l32, 4);
+ memcpy(addr + 4, &addr_h16, ETH_ALEN - 4);
+}
+
+int ath12k_dp_service_srng(struct ath12k_base *ab,
+ struct ath12k_ext_irq_grp *irq_grp,
+ int budget);
+int ath12k_dp_htt_connect(struct ath12k_dp *dp);
+void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif);
+void ath12k_dp_free(struct ath12k_base *ab);
+int ath12k_dp_alloc(struct ath12k_base *ab);
+void ath12k_dp_cc_config(struct ath12k_base *ab);
+int ath12k_dp_pdev_alloc(struct ath12k_base *ab);
+void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab);
+void ath12k_dp_pdev_free(struct ath12k_base *ab);
+int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type);
+int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr);
+void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr);
+void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring);
+int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries);
+void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring);
+int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc);
+struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
+ u32 cookie);
+struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
+ u32 desc_id);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
new file mode 100644
index 000000000000..a214797c96a2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -0,0 +1,2596 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "dp_mon.h"
+#include "debug.h"
+#include "dp_rx.h"
+#include "dp_tx.h"
+#include "peer.h"
+
+static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+ rx_user_status->ul_ofdma_user_v0_word0 =
+ __le32_to_cpu(ppdu_end_user->usr_resp_ref);
+ rx_user_status->ul_ofdma_user_v0_word1 =
+ __le32_to_cpu(ppdu_end_user->usr_resp_ref_ext);
+}
+
+static void
+ath12k_dp_mon_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+ u32 mpdu_ok_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_ok_cnt);
+ u32 mpdu_err_byte_count = __le32_to_cpu(ppdu_end_user->mpdu_err_cnt);
+
+ rx_user_status->mpdu_ok_byte_count =
+ u32_get_bits(mpdu_ok_byte_count,
+ HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT);
+ rx_user_status->mpdu_err_byte_count =
+ u32_get_bits(mpdu_err_byte_count,
+ HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT);
+}
+
+static void
+ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct hal_rx_user_status *rx_user_status)
+{
+ rx_user_status->ast_index = ppdu_info->ast_index;
+ rx_user_status->tid = ppdu_info->tid;
+ rx_user_status->tcp_ack_msdu_count =
+ ppdu_info->tcp_ack_msdu_count;
+ rx_user_status->tcp_msdu_count =
+ ppdu_info->tcp_msdu_count;
+ rx_user_status->udp_msdu_count =
+ ppdu_info->udp_msdu_count;
+ rx_user_status->other_msdu_count =
+ ppdu_info->other_msdu_count;
+ rx_user_status->frame_control = ppdu_info->frame_control;
+ rx_user_status->frame_control_info_valid =
+ ppdu_info->frame_control_info_valid;
+ rx_user_status->data_sequence_control_info_valid =
+ ppdu_info->data_sequence_control_info_valid;
+ rx_user_status->first_data_seq_ctrl =
+ ppdu_info->first_data_seq_ctrl;
+ rx_user_status->preamble_type = ppdu_info->preamble_type;
+ rx_user_status->ht_flags = ppdu_info->ht_flags;
+ rx_user_status->vht_flags = ppdu_info->vht_flags;
+ rx_user_status->he_flags = ppdu_info->he_flags;
+ rx_user_status->rs_flags = ppdu_info->rs_flags;
+
+ rx_user_status->mpdu_cnt_fcs_ok =
+ ppdu_info->num_mpdu_fcs_ok;
+ rx_user_status->mpdu_cnt_fcs_err =
+ ppdu_info->num_mpdu_fcs_err;
+ memcpy(&rx_user_status->mpdu_fcs_ok_bitmap[0], &ppdu_info->mpdu_fcs_ok_bitmap[0],
+ HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
+ sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0]));
+
+ ath12k_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
+}
+
+static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_vht_sig_a_info *vht_sig =
+ (struct hal_rx_vht_sig_a_info *)tlv_data;
+ u32 nsts, group_id, info0, info1;
+ u8 gi_setting;
+
+ info0 = __le32_to_cpu(vht_sig->info0);
+ info1 = __le32_to_cpu(vht_sig->info1);
+
+ ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
+ ppdu_info->mcs = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_MCS);
+ gi_setting = u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING);
+ switch (gi_setting) {
+ case HAL_RX_VHT_SIG_A_NORMAL_GI:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case HAL_RX_VHT_SIG_A_SHORT_GI:
+ case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
+ ppdu_info->gi = HAL_RX_GI_0_4_US;
+ break;
+ }
+
+ ppdu_info->is_stbc = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_STBC);
+ nsts = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS);
+ if (ppdu_info->is_stbc && nsts > 0)
+ nsts = ((nsts + 1) >> 1) - 1;
+
+ ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK);
+ ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW);
+ ppdu_info->beamformed = u32_get_bits(info1,
+ HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED);
+ group_id = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
+ if (group_id == 0 || group_id == 63)
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ else
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ ppdu_info->vht_flag_values5 = group_id;
+ ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
+ ppdu_info->nss);
+ ppdu_info->vht_flag_values2 = ppdu_info->bw;
+ ppdu_info->vht_flag_values4 =
+ u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
+}
+
+static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_ht_sig_info *ht_sig =
+ (struct hal_rx_ht_sig_info *)tlv_data;
+ u32 info0 = __le32_to_cpu(ht_sig->info0);
+ u32 info1 = __le32_to_cpu(ht_sig->info1);
+
+ ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_MCS);
+ ppdu_info->bw = u32_get_bits(info0, HAL_RX_HT_SIG_INFO_INFO0_BW);
+ ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_STBC);
+ ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING);
+ ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI);
+ ppdu_info->nss = (ppdu_info->mcs >> 3);
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+}
+
+static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_lsig_b_info *lsigb =
+ (struct hal_rx_lsig_b_info *)tlv_data;
+ u32 info0 = __le32_to_cpu(lsigb->info0);
+ u8 rate;
+
+ rate = u32_get_bits(info0, HAL_RX_LSIG_B_INFO_INFO0_RATE);
+ switch (rate) {
+ case 1:
+ rate = HAL_RX_LEGACY_RATE_1_MBPS;
+ break;
+ case 2:
+ case 5:
+ rate = HAL_RX_LEGACY_RATE_2_MBPS;
+ break;
+ case 3:
+ case 6:
+ rate = HAL_RX_LEGACY_RATE_5_5_MBPS;
+ break;
+ case 4:
+ case 7:
+ rate = HAL_RX_LEGACY_RATE_11_MBPS;
+ break;
+ default:
+ rate = HAL_RX_LEGACY_RATE_INVALID;
+ }
+
+ ppdu_info->rate = rate;
+ ppdu_info->cck_flag = 1;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+}
+
+static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_lsig_a_info *lsiga =
+ (struct hal_rx_lsig_a_info *)tlv_data;
+ u32 info0 = __le32_to_cpu(lsiga->info0);
+ u8 rate;
+
+ rate = u32_get_bits(info0, HAL_RX_LSIG_A_INFO_INFO0_RATE);
+ switch (rate) {
+ case 8:
+ rate = HAL_RX_LEGACY_RATE_48_MBPS;
+ break;
+ case 9:
+ rate = HAL_RX_LEGACY_RATE_24_MBPS;
+ break;
+ case 10:
+ rate = HAL_RX_LEGACY_RATE_12_MBPS;
+ break;
+ case 11:
+ rate = HAL_RX_LEGACY_RATE_6_MBPS;
+ break;
+ case 12:
+ rate = HAL_RX_LEGACY_RATE_54_MBPS;
+ break;
+ case 13:
+ rate = HAL_RX_LEGACY_RATE_36_MBPS;
+ break;
+ case 14:
+ rate = HAL_RX_LEGACY_RATE_18_MBPS;
+ break;
+ case 15:
+ rate = HAL_RX_LEGACY_RATE_9_MBPS;
+ break;
+ default:
+ rate = HAL_RX_LEGACY_RATE_INVALID;
+ }
+
+ ppdu_info->rate = rate;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+}
+
+static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
+ (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
+ u32 info0, value;
+
+ info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+
+ ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN;
+
+ /* HE-data2 */
+ ppdu_info->he_data2 |= HE_TXBF_KNOWN;
+
+ ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS);
+ value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM);
+ value = value << HE_DCM_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING);
+ ppdu_info->ldpc = value;
+ value = value << HE_CODING_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ /* HE-data4 */
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID);
+ value = value << HE_STA_ID_SHIFT;
+ ppdu_info->he_data4 |= value;
+
+ ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS);
+ ppdu_info->beamformed = u32_get_bits(info0,
+ HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF);
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+}
+
+static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
+ (struct hal_rx_he_sig_b2_mu_info *)tlv_data;
+ u32 info0, value;
+
+ info0 = __le32_to_cpu(he_sig_b2_mu->info0);
+
+ ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_CODING_KNOWN;
+
+ ppdu_info->mcs = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS);
+ value = ppdu_info->mcs << HE_TRANSMIT_MCS_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING);
+ ppdu_info->ldpc = value;
+ value = value << HE_CODING_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID);
+ value = value << HE_STA_ID_SHIFT;
+ ppdu_info->he_data4 |= value;
+
+ ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS);
+}
+
+static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
+ (struct hal_rx_he_sig_b1_mu_info *)tlv_data;
+ u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0);
+ u16 ru_tones;
+
+ ru_tones = u32_get_bits(info0,
+ HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION);
+ ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ ppdu_info->he_RU[0] = ru_tones;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+}
+
+static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
+ (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
+ u32 info0, info1, value;
+ u16 he_gi = 0, he_ltf = 0;
+
+ info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
+ info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
+
+ ppdu_info->he_mu_flags = 1;
+
+ ppdu_info->he_data1 = HE_MU_FORMAT_TYPE;
+ ppdu_info->he_data1 |=
+ HE_BSS_COLOR_KNOWN |
+ HE_DL_UL_KNOWN |
+ HE_LDPC_EXTRA_SYMBOL_KNOWN |
+ HE_STBC_KNOWN |
+ HE_DATA_BW_RU_KNOWN |
+ HE_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 =
+ HE_GI_KNOWN |
+ HE_LTF_SYMBOLS_KNOWN |
+ HE_PRE_FEC_PADDING_KNOWN |
+ HE_PE_DISAMBIGUITY_KNOWN |
+ HE_TXOP_KNOWN |
+ HE_MIDABLE_PERIODICITY_KNOWN;
+
+ /* data3 */
+ ppdu_info->he_data3 = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_BSS_COLOR);
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_UL_FLAG);
+ value = value << HE_DL_UL_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA);
+ value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC);
+ value = value << HE_STBC_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ /* data4 */
+ ppdu_info->he_data4 = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_MU_DL_INFO0_SPATIAL_REUSE);
+ ppdu_info->he_data4 = value;
+
+ /* data5 */
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW);
+ ppdu_info->he_data5 = value;
+ ppdu_info->bw = value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_CP_LTF_SIZE);
+ switch (value) {
+ case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ break;
+ case 1:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 2:
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 3:
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ break;
+ }
+
+ ppdu_info->gi = he_gi;
+ value = he_gi << HE_GI_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = he_ltf << HE_LTF_SIZE_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB);
+ value = (value << HE_LTF_SYM_SHIFT);
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR);
+ value = value << HE_PRE_FEC_PAD_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM);
+ value = value << HE_PE_DISAMBIGUITY_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ /*data6*/
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION);
+ value = value << HE_DOPPLER_SHIFT;
+ ppdu_info->he_data6 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION);
+ value = value << HE_TXOP_SHIFT;
+ ppdu_info->he_data6 |= value;
+
+ /* HE-MU Flags */
+ /* HE-MU-flags1 */
+ ppdu_info->he_flags1 =
+ HE_SIG_B_MCS_KNOWN |
+ HE_SIG_B_DCM_KNOWN |
+ HE_SIG_B_COMPRESSION_FLAG_1_KNOWN |
+ HE_SIG_B_SYM_NUM_KNOWN |
+ HE_RU_0_KNOWN;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_MCS_OF_SIGB);
+ ppdu_info->he_flags1 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_DCM_OF_SIGB);
+ value = value << HE_DCM_FLAG_1_SHIFT;
+ ppdu_info->he_flags1 |= value;
+
+ /* HE-MU-flags2 */
+ ppdu_info->he_flags2 = HE_BW_KNOWN;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW);
+ ppdu_info->he_flags2 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_COMP_MODE_SIGB);
+ value = value << HE_SIG_B_COMPRESSION_FLAG_2_SHIFT;
+ ppdu_info->he_flags2 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_MU_DL_INFO0_NUM_SIGB_SYMB);
+ value = value - 1;
+ value = value << HE_NUM_SIG_B_SYMBOLS_SHIFT;
+ ppdu_info->he_flags2 |= value;
+
+ ppdu_info->is_stbc = info1 &
+ HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+}
+
+static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_he_sig_a_su_info *he_sig_a =
+ (struct hal_rx_he_sig_a_su_info *)tlv_data;
+ u32 info0, info1, value;
+ u32 dcm;
+ u8 he_dcm = 0, he_stbc = 0;
+ u16 he_gi = 0, he_ltf = 0;
+
+ ppdu_info->he_flags = 1;
+
+ info0 = __le32_to_cpu(he_sig_a->info0);
+ info1 = __le32_to_cpu(he_sig_a->info1);
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND);
+ if (value == 0)
+ ppdu_info->he_data1 = HE_TRIG_FORMAT_TYPE;
+ else
+ ppdu_info->he_data1 = HE_SU_FORMAT_TYPE;
+
+ ppdu_info->he_data1 |=
+ HE_BSS_COLOR_KNOWN |
+ HE_BEAM_CHANGE_KNOWN |
+ HE_DL_UL_KNOWN |
+ HE_MCS_KNOWN |
+ HE_DCM_KNOWN |
+ HE_CODING_KNOWN |
+ HE_LDPC_EXTRA_SYMBOL_KNOWN |
+ HE_STBC_KNOWN |
+ HE_DATA_BW_RU_KNOWN |
+ HE_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 |=
+ HE_GI_KNOWN |
+ HE_TXBF_KNOWN |
+ HE_PE_DISAMBIGUITY_KNOWN |
+ HE_TXOP_KNOWN |
+ HE_LTF_SYMBOLS_KNOWN |
+ HE_PRE_FEC_PADDING_KNOWN |
+ HE_MIDABLE_PERIODICITY_KNOWN;
+
+ ppdu_info->he_data3 = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR);
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE);
+ value = value << HE_BEAM_CHANGE_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG);
+ value = value << HE_DL_UL_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS);
+ ppdu_info->mcs = value;
+ value = value << HE_TRANSMIT_MCS_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
+ he_dcm = value;
+ value = value << HE_DCM_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING);
+ value = value << HE_CODING_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA);
+ value = value << HE_LDPC_EXTRA_SYMBOL_SHIFT;
+ ppdu_info->he_data3 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
+ he_stbc = value;
+ value = value << HE_STBC_SHIFT;
+ ppdu_info->he_data3 |= value;
+
+ /* data4 */
+ ppdu_info->he_data4 = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE);
+
+ /* data5 */
+ value = u32_get_bits(info0,
+ HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW);
+ ppdu_info->he_data5 = value;
+ ppdu_info->bw = value;
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE);
+ switch (value) {
+ case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_1_X;
+ break;
+ case 1:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 2:
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 3:
+ if (he_dcm && he_stbc) {
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ } else {
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ }
+ break;
+ }
+ ppdu_info->gi = he_gi;
+ value = he_gi << HE_GI_SHIFT;
+ ppdu_info->he_data5 |= value;
+ value = he_ltf << HE_LTF_SIZE_SHIFT;
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
+ value = (value << HE_LTF_SYM_SHIFT);
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR);
+ value = value << HE_PRE_FEC_PAD_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
+ value = value << HE_TXBF_SHIFT;
+ ppdu_info->he_data5 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM);
+ value = value << HE_PE_DISAMBIGUITY_SHIFT;
+ ppdu_info->he_data5 |= value;
+
+ /* data6 */
+ value = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
+ value++;
+ ppdu_info->he_data6 = value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND);
+ value = value << HE_DOPPLER_SHIFT;
+ ppdu_info->he_data6 |= value;
+ value = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION);
+ value = value << HE_TXOP_SHIFT;
+ ppdu_info->he_data6 |= value;
+
+ ppdu_info->mcs =
+ u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS);
+ ppdu_info->bw =
+ u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW);
+ ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING);
+ ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
+ ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
+ dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
+ ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
+ ppdu_info->dcm = dcm;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+}
+
+static enum hal_rx_mon_status
+ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
+ struct ath12k_mon_data *pmon,
+ u32 tlv_tag, u8 *tlv_data, u32 userid)
+{
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ u32 info[7];
+
+ switch (tlv_tag) {
+ case HAL_RX_PPDU_START: {
+ struct hal_rx_ppdu_start *ppdu_start =
+ (struct hal_rx_ppdu_start *)tlv_data;
+
+ info[0] = __le32_to_cpu(ppdu_start->info0);
+
+ ppdu_info->ppdu_id =
+ u32_get_bits(info[0], HAL_RX_PPDU_START_INFO0_PPDU_ID);
+ ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
+ ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
+
+ if (ppdu_info->ppdu_id != ppdu_info->last_ppdu_id) {
+ ppdu_info->last_ppdu_id = ppdu_info->ppdu_id;
+ ppdu_info->num_users = 0;
+ memset(&ppdu_info->mpdu_fcs_ok_bitmap, 0,
+ HAL_RX_NUM_WORDS_PER_PPDU_BITMAP *
+ sizeof(ppdu_info->mpdu_fcs_ok_bitmap[0]));
+ }
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS: {
+ struct hal_rx_ppdu_end_user_stats *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats *)tlv_data;
+
+ info[0] = __le32_to_cpu(eu_stats->info0);
+ info[1] = __le32_to_cpu(eu_stats->info1);
+ info[2] = __le32_to_cpu(eu_stats->info2);
+ info[4] = __le32_to_cpu(eu_stats->info4);
+ info[5] = __le32_to_cpu(eu_stats->info5);
+ info[6] = __le32_to_cpu(eu_stats->info6);
+
+ ppdu_info->ast_index =
+ u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX);
+ ppdu_info->fc_valid =
+ u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID);
+ ppdu_info->tid =
+ ffs(u32_get_bits(info[6],
+ HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP)
+ - 1);
+ ppdu_info->tcp_msdu_count =
+ u32_get_bits(info[4],
+ HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT);
+ ppdu_info->udp_msdu_count =
+ u32_get_bits(info[4],
+ HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT);
+ ppdu_info->other_msdu_count =
+ u32_get_bits(info[5],
+ HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT);
+ ppdu_info->tcp_ack_msdu_count =
+ u32_get_bits(info[5],
+ HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT);
+ ppdu_info->preamble_type =
+ u32_get_bits(info[1],
+ HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE);
+ ppdu_info->num_mpdu_fcs_ok =
+ u32_get_bits(info[1],
+ HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK);
+ ppdu_info->num_mpdu_fcs_err =
+ u32_get_bits(info[0],
+ HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR);
+ switch (ppdu_info->preamble_type) {
+ case HAL_RX_PREAMBLE_11N:
+ ppdu_info->ht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AC:
+ ppdu_info->vht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AX:
+ ppdu_info->he_flags = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (userid < HAL_MAX_UL_MU_USERS) {
+ struct hal_rx_user_status *rxuser_stats =
+ &ppdu_info->userstats[userid];
+ ppdu_info->num_users += 1;
+
+ ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats);
+ ath12k_dp_mon_rx_populate_mu_user_info(tlv_data, ppdu_info,
+ rxuser_stats);
+ }
+ ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]);
+ ppdu_info->mpdu_fcs_ok_bitmap[1] = __le32_to_cpu(eu_stats->rsvd1[1]);
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS_EXT: {
+ struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+ ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1);
+ ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2);
+ ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3);
+ ppdu_info->mpdu_fcs_ok_bitmap[5] = __le32_to_cpu(eu_stats->info4);
+ ppdu_info->mpdu_fcs_ok_bitmap[6] = __le32_to_cpu(eu_stats->info5);
+ ppdu_info->mpdu_fcs_ok_bitmap[7] = __le32_to_cpu(eu_stats->info6);
+ break;
+ }
+ case HAL_PHYRX_HT_SIG:
+ ath12k_dp_mon_parse_ht_sig(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_L_SIG_B:
+ ath12k_dp_mon_parse_l_sig_b(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_L_SIG_A:
+ ath12k_dp_mon_parse_l_sig_a(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_VHT_SIG_A:
+ ath12k_dp_mon_parse_vht_sig_a(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_A_SU:
+ ath12k_dp_mon_parse_he_sig_su(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_A_MU_DL:
+ ath12k_dp_mon_parse_he_sig_mu(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_B1_MU:
+ ath12k_dp_mon_parse_he_sig_b1_mu(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_B2_MU:
+ ath12k_dp_mon_parse_he_sig_b2_mu(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_HE_SIG_B2_OFDMA:
+ ath12k_dp_mon_parse_he_sig_b2_ofdma(tlv_data, ppdu_info);
+ break;
+
+ case HAL_PHYRX_RSSI_LEGACY: {
+ struct hal_rx_phyrx_rssi_legacy_info *rssi =
+ (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
+ u32 reception_type = 0;
+ u32 rssi_legacy_info = __le32_to_cpu(rssi->rsvd[0]);
+
+ info[0] = __le32_to_cpu(rssi->info0);
+
+ /* TODO: Please note that the combined rssi will not be accurate
+ * in MU case. Rssi in MU needs to be retrieved from
+ * PHYRX_OTHER_RECEIVE_INFO TLV.
+ */
+ ppdu_info->rssi_comb =
+ u32_get_bits(info[0],
+ HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB);
+ reception_type =
+ u32_get_bits(rssi_legacy_info,
+ HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION);
+
+ switch (reception_type) {
+ case HAL_RECEPTION_TYPE_ULOFMDA:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+ break;
+ case HAL_RECEPTION_TYPE_ULMIMO:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ default:
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ break;
+ }
+ case HAL_RXPCU_PPDU_END_INFO: {
+ struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
+ (struct hal_rx_ppdu_end_duration *)tlv_data;
+
+ info[0] = __le32_to_cpu(ppdu_rx_duration->info0);
+ ppdu_info->rx_duration =
+ u32_get_bits(info[0], HAL_RX_PPDU_END_DURATION);
+ ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
+ ppdu_info->tsft = (ppdu_info->tsft << 32) |
+ __le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
+ break;
+ }
+ case HAL_RX_MPDU_START: {
+ struct hal_rx_mpdu_start *mpdu_start =
+ (struct hal_rx_mpdu_start *)tlv_data;
+ struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+ u16 peer_id;
+
+ info[1] = __le32_to_cpu(mpdu_start->info1);
+ peer_id = u32_get_bits(info[1], HAL_RX_MPDU_START_INFO1_PEERID);
+ if (peer_id)
+ ppdu_info->peer_id = peer_id;
+
+ ppdu_info->mpdu_len += u32_get_bits(info[1],
+ HAL_RX_MPDU_START_INFO2_MPDU_LEN);
+ if (userid < HAL_MAX_UL_MU_USERS) {
+ info[0] = __le32_to_cpu(mpdu_start->info0);
+ ppdu_info->userid = userid;
+ ppdu_info->ampdu_id[userid] =
+ u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID);
+ }
+
+ mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
+ if (!mon_mpdu)
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+
+ break;
+ }
+ case HAL_RX_MSDU_START:
+ /* TODO: add msdu start parsing logic */
+ break;
+ case HAL_MON_BUF_ADDR: {
+ struct dp_rxdma_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
+ struct dp_mon_packet_info *packet_info =
+ (struct dp_mon_packet_info *)tlv_data;
+ int buf_id = u32_get_bits(packet_info->cookie,
+ DP_RXDMA_BUF_COOKIE_BUF_ID);
+ struct sk_buff *msdu;
+ struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+ struct ath12k_skb_rxcb *rxcb;
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!msdu)) {
+ ath12k_warn(ab, "montior destination with invalid buf_id %d\n",
+ buf_id);
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (mon_mpdu->tail)
+ mon_mpdu->tail->next = msdu;
+ else
+ mon_mpdu->tail = msdu;
+
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+
+ break;
+ }
+ case HAL_RX_MSDU_END: {
+ struct rx_msdu_end_qcn9274 *msdu_end =
+ (struct rx_msdu_end_qcn9274 *)tlv_data;
+ bool is_first_msdu_in_mpdu;
+ u16 msdu_end_info;
+
+ msdu_end_info = __le16_to_cpu(msdu_end->info5);
+ is_first_msdu_in_mpdu = u32_get_bits(msdu_end_info,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+ if (is_first_msdu_in_mpdu) {
+ pmon->mon_mpdu->head = pmon->mon_mpdu->tail;
+ pmon->mon_mpdu->tail = NULL;
+ }
+ break;
+ }
+ case HAL_RX_MPDU_END:
+ list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
+ break;
+ case HAL_DUMMY:
+ return HAL_RX_MON_STATUS_BUF_DONE;
+ case HAL_RX_PPDU_END_STATUS_DONE:
+ case 0:
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+ default:
+ break;
+ }
+
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+}
+
+static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar, struct sk_buff *msdu)
+{
+ u32 rx_pkt_offset, l2_hdr_offset;
+
+ rx_pkt_offset = ar->ab->hw_params->hal_desc_sz;
+ l2_hdr_offset = ath12k_dp_rx_h_l3pad(ar->ab,
+ (struct hal_rx_desc *)msdu->data);
+ skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
+}
+
+static struct sk_buff *
+ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
+ u32 mac_id, struct sk_buff *head_msdu,
+ struct ieee80211_rx_status *rxs, bool *fcs_err)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct sk_buff *msdu, *mpdu_buf, *prev_buf;
+ struct hal_rx_desc *rx_desc;
+ u8 *hdr_desc, *dest, decap_format;
+ struct ieee80211_hdr_3addr *wh;
+ u32 err_bitmap;
+
+ mpdu_buf = NULL;
+
+ if (!head_msdu)
+ goto err_merge_fail;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
+
+ if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
+ *fcs_err = true;
+
+ decap_format = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+
+ ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+
+ if (decap_format == DP_RX_DECAP_TYPE_RAW) {
+ ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu);
+
+ prev_buf = head_msdu;
+ msdu = head_msdu->next;
+
+ while (msdu) {
+ ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
+
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+
+ prev_buf->next = NULL;
+
+ skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+ } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
+ u8 qos_pkt = 0;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ hdr_desc = ab->hw_params->hal_ops->rx_desc_get_msdu_payload(rx_desc);
+
+ /* Base size */
+ wh = (struct ieee80211_hdr_3addr *)hdr_desc;
+
+ if (ieee80211_is_data_qos(wh->frame_control))
+ qos_pkt = 1;
+
+ msdu = head_msdu;
+
+ while (msdu) {
+ ath12k_dp_mon_rx_msdus_set_payload(ar, msdu);
+ if (qos_pkt) {
+ dest = skb_push(msdu, sizeof(__le16));
+ if (!dest)
+ goto err_merge_fail;
+ memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
+ }
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+ dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
+ if (!dest)
+ goto err_merge_fail;
+
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "mpdu_buf %pK mpdu_buf->len %u",
+ prev_buf, prev_buf->len);
+ } else {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "decap format %d is not supported!\n",
+ decap_format);
+ goto err_merge_fail;
+ }
+
+ return head_msdu;
+
+err_merge_fail:
+ if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "err_merge_fail mpdu_buf %pK", mpdu_buf);
+ /* Free the head buffer */
+ dev_kfree_skb_any(mpdu_buf);
+ }
+ return NULL;
+}
+
+static void
+ath12k_dp_mon_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
+}
+
+static void
+ath12k_dp_mon_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[0];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[1];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[2];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[3];
+}
+
+static void ath12k_dp_mon_update_radiotap(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct sk_buff *mon_skb,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ u8 *ptr = NULL;
+ u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid];
+
+ rxs->flag |= RX_FLAG_MACTIME_START;
+ rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
+ rxs->nss = ppduinfo->nss + 1;
+
+ if (ampdu_id) {
+ rxs->flag |= RX_FLAG_AMPDU_DETAILS;
+ rxs->ampdu_reference = ampdu_id;
+ }
+
+ if (ppduinfo->he_mu_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
+ ath12k_dp_mon_rx_update_radiotap_he_mu(ppduinfo, ptr);
+ } else if (ppduinfo->he_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
+ ath12k_dp_mon_rx_update_radiotap_he(ppduinfo, ptr);
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->vht_flags) {
+ rxs->encoding = RX_ENC_VHT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->ht_flags) {
+ rxs->encoding = RX_ENC_HT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else {
+ rxs->encoding = RX_ENC_LEGACY;
+ sband = &ar->mac.sbands[rxs->band];
+ rxs->rate_idx = ath12k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
+ ppduinfo->cck_flag);
+ }
+
+ rxs->mactime = ppduinfo->tsft;
+}
+
+static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
+ };
+ struct ieee80211_rx_status *rx_status;
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_sta *pubsta = NULL;
+ struct ath12k_peer *peer;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
+ bool is_mcbc = rxcb->is_mcbc;
+ bool is_eapol_tkip = rxcb->is_eapol;
+
+ if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
+ !(status->flag & RX_FLAG_SKIP_MONITOR)) {
+ he = skb_push(msdu, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ }
+
+ if (!(status->flag & RX_FLAG_ONLY_MONITOR))
+ decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+ if (peer && peer->sta)
+ pubsta = peer->sta;
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+ "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ msdu,
+ msdu->len,
+ peer ? peer->addr : NULL,
+ rxcb->tid,
+ (is_mcbc) ? "mcast" : "ucast",
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
+ msdu->data, msdu->len);
+ rx_status = IEEE80211_SKB_RXCB(msdu);
+ *rx_status = *status;
+
+ /* TODO: trace rx packet */
+
+ /* PN for multicast packets are not validate in HW,
+ * so skip 802.3 rx path
+ * Also, fast_rx expectes the STA to be authorized, hence
+ * eapol packets are sent in slow path.
+ */
+ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol_tkip &&
+ !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
+ rx_status->flag |= RX_FLAG_8023;
+
+ ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+}
+
+static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
+ struct sk_buff *head_msdu,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct napi_struct *napi)
+{
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ struct sk_buff *mon_skb, *skb_next, *header;
+ struct ieee80211_rx_status *rxs = &dp->rx_status;
+ bool fcs_err = false;
+
+ mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id, head_msdu,
+ rxs, &fcs_err);
+ if (!mon_skb)
+ goto mon_deliver_fail;
+
+ header = mon_skb;
+ rxs->flag = 0;
+
+ if (fcs_err)
+ rxs->flag = RX_FLAG_FAILED_FCS_CRC;
+
+ do {
+ skb_next = mon_skb->next;
+ if (!skb_next)
+ rxs->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rxs->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (mon_skb == header) {
+ header = NULL;
+ rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+ rxs->flag |= RX_FLAG_ONLY_MONITOR;
+ ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
+ ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs);
+ mon_skb = skb_next;
+ } while (mon_skb);
+ rxs->flag = 0;
+
+ return 0;
+
+mon_deliver_fail:
+ mon_skb = head_msdu;
+ while (mon_skb) {
+ skb_next = mon_skb->next;
+ dev_kfree_skb_any(mon_skb);
+ mon_skb = skb_next;
+ }
+ return -EINVAL;
+}
+
+static enum hal_rx_mon_status
+ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon,
+ struct sk_buff *skb)
+{
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ struct hal_tlv_hdr *tlv;
+ enum hal_rx_mon_status hal_status;
+ u32 tlv_userid = 0;
+ u16 tlv_tag, tlv_len;
+ u8 *ptr = skb->data;
+
+ memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
+
+ do {
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
+ tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
+ tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
+ ptr += sizeof(*tlv);
+
+ /* The actual length of PPDU_END is the combined length of many PHY
+ * TLVs that follow. Skip the TLV header and
+ * rx_rxpcu_classification_overview that follows the header to get to
+ * next TLV.
+ */
+
+ if (tlv_tag == HAL_RX_PPDU_END)
+ tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
+
+ hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon,
+ tlv_tag, ptr, tlv_userid);
+ ptr += tlv_len;
+ ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+
+ if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+ break;
+
+ } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
+
+ return hal_status;
+}
+
+enum hal_rx_mon_status
+ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ int mac_id,
+ struct sk_buff *skb,
+ struct napi_struct *napi)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ struct dp_mon_mpdu *tmp;
+ struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+ struct sk_buff *head_msdu, *tail_msdu;
+ enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+
+ ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+
+ list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+ head_msdu = mon_mpdu->head;
+ tail_msdu = mon_mpdu->tail;
+
+ if (head_msdu && tail_msdu) {
+ ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
+ ppdu_info, napi);
+ }
+
+ kfree(mon_mpdu);
+ }
+ return hal_status;
+}
+
+int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
+ struct dp_rxdma_ring *buf_ring,
+ int req_entries)
+{
+ struct hal_mon_buf_ring *mon_buf;
+ struct sk_buff *skb;
+ struct hal_srng *srng;
+ dma_addr_t paddr;
+ u32 cookie, buf_id;
+
+ srng = &ab->hal.srng_list[buf_ring->refill_buf_ring.ring_id];
+ spin_lock_bh(&srng->lock);
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (req_entries > 0) {
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE + DP_RX_BUFFER_ALIGN_SIZE);
+ if (unlikely(!skb))
+ goto fail_alloc_skb;
+
+ if (!IS_ALIGNED((unsigned long)skb->data, DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(ab->dev, paddr)))
+ goto fail_free_skb;
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ buf_id = idr_alloc(&buf_ring->bufs_idr, skb, 0,
+ buf_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(buf_id < 0))
+ goto fail_dma_unmap;
+
+ mon_buf = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (unlikely(!mon_buf))
+ goto fail_idr_remove;
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+
+ cookie = u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ mon_buf->paddr_lo = cpu_to_le32(lower_32_bits(paddr));
+ mon_buf->paddr_hi = cpu_to_le32(upper_32_bits(paddr));
+ mon_buf->cookie = cpu_to_le64(cookie);
+
+ req_entries--;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ return 0;
+
+fail_idr_remove:
+ spin_lock_bh(&buf_ring->idr_lock);
+ idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+fail_alloc_skb:
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ return -ENOMEM;
+}
+
+static struct dp_mon_tx_ppdu_info *
+ath12k_dp_mon_tx_get_ppdu_info(struct ath12k_mon_data *pmon,
+ unsigned int ppdu_id,
+ enum dp_mon_tx_ppdu_info_type type)
+{
+ struct dp_mon_tx_ppdu_info *tx_ppdu_info;
+
+ if (type == DP_MON_TX_PROT_PPDU_INFO) {
+ tx_ppdu_info = pmon->tx_prot_ppdu_info;
+
+ if (tx_ppdu_info && !tx_ppdu_info->is_used)
+ return tx_ppdu_info;
+ kfree(tx_ppdu_info);
+ } else {
+ tx_ppdu_info = pmon->tx_data_ppdu_info;
+
+ if (tx_ppdu_info && !tx_ppdu_info->is_used)
+ return tx_ppdu_info;
+ kfree(tx_ppdu_info);
+ }
+
+ /* allocate new tx_ppdu_info */
+ tx_ppdu_info = kzalloc(sizeof(*tx_ppdu_info), GFP_ATOMIC);
+ if (!tx_ppdu_info)
+ return NULL;
+
+ tx_ppdu_info->is_used = 0;
+ tx_ppdu_info->ppdu_id = ppdu_id;
+
+ if (type == DP_MON_TX_PROT_PPDU_INFO)
+ pmon->tx_prot_ppdu_info = tx_ppdu_info;
+ else
+ pmon->tx_data_ppdu_info = tx_ppdu_info;
+
+ return tx_ppdu_info;
+}
+
+static struct dp_mon_tx_ppdu_info *
+ath12k_dp_mon_hal_tx_ppdu_info(struct ath12k_mon_data *pmon,
+ u16 tlv_tag)
+{
+ switch (tlv_tag) {
+ case HAL_TX_FES_SETUP:
+ case HAL_TX_FLUSH:
+ case HAL_PCU_PPDU_SETUP_INIT:
+ case HAL_TX_PEER_ENTRY:
+ case HAL_TX_QUEUE_EXTENSION:
+ case HAL_TX_MPDU_START:
+ case HAL_TX_MSDU_START:
+ case HAL_TX_DATA:
+ case HAL_MON_BUF_ADDR:
+ case HAL_TX_MPDU_END:
+ case HAL_TX_LAST_MPDU_FETCHED:
+ case HAL_TX_LAST_MPDU_END:
+ case HAL_COEX_TX_REQ:
+ case HAL_TX_RAW_OR_NATIVE_FRAME_SETUP:
+ case HAL_SCH_CRITICAL_TLV_REFERENCE:
+ case HAL_TX_FES_SETUP_COMPLETE:
+ case HAL_TQM_MPDU_GLOBAL_START:
+ case HAL_SCHEDULER_END:
+ case HAL_TX_FES_STATUS_USER_PPDU:
+ break;
+ case HAL_TX_FES_STATUS_PROT: {
+ if (!pmon->tx_prot_ppdu_info->is_used)
+ pmon->tx_prot_ppdu_info->is_used = true;
+
+ return pmon->tx_prot_ppdu_info;
+ }
+ }
+
+ if (!pmon->tx_data_ppdu_info->is_used)
+ pmon->tx_data_ppdu_info->is_used = true;
+
+ return pmon->tx_data_ppdu_info;
+}
+
+#define MAX_MONITOR_HEADER 512
+#define MAX_DUMMY_FRM_BODY 128
+
+struct sk_buff *ath12k_dp_mon_tx_alloc_skb(void)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(MAX_MONITOR_HEADER + MAX_DUMMY_FRM_BODY);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, MAX_MONITOR_HEADER);
+
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ skb_pull(skb, PTR_ALIGN(skb->data, 4) - skb->data);
+
+ return skb;
+}
+
+static int
+ath12k_dp_mon_tx_gen_cts2self_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct ieee80211_cts *cts;
+
+ skb = ath12k_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ cts = (struct ieee80211_cts *)skb->data;
+ memset(cts, 0, MAX_DUMMY_FRM_BODY);
+ cts->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTS);
+ cts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(cts->ra, tx_ppdu_info->rx_status.addr1, sizeof(cts->ra));
+
+ skb_put(skb, sizeof(*cts));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_tx_gen_rts_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct ieee80211_rts *rts;
+
+ skb = ath12k_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ rts = (struct ieee80211_rts *)skb->data;
+ memset(rts, 0, MAX_DUMMY_FRM_BODY);
+ rts->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
+ rts->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(rts->ra, tx_ppdu_info->rx_status.addr1, sizeof(rts->ra));
+ memcpy(rts->ta, tx_ppdu_info->rx_status.addr2, sizeof(rts->ta));
+
+ skb_put(skb, sizeof(*rts));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_tx_gen_3addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct ieee80211_qos_hdr *qhdr;
+
+ skb = ath12k_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ qhdr = (struct ieee80211_qos_hdr *)skb->data;
+ memset(qhdr, 0, MAX_DUMMY_FRM_BODY);
+ qhdr->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
+ qhdr->duration_id = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
+ memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN);
+ memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN);
+
+ skb_put(skb, sizeof(*qhdr));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_tx_gen_4addr_qos_null_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct dp_mon_qosframe_addr4 *qhdr;
+
+ skb = ath12k_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ qhdr = (struct dp_mon_qosframe_addr4 *)skb->data;
+ memset(qhdr, 0, MAX_DUMMY_FRM_BODY);
+ qhdr->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_NULLFUNC);
+ qhdr->duration = cpu_to_le16(tx_ppdu_info->rx_status.rx_duration);
+ memcpy(qhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
+ memcpy(qhdr->addr2, tx_ppdu_info->rx_status.addr2, ETH_ALEN);
+ memcpy(qhdr->addr3, tx_ppdu_info->rx_status.addr3, ETH_ALEN);
+ memcpy(qhdr->addr4, tx_ppdu_info->rx_status.addr4, ETH_ALEN);
+
+ skb_put(skb, sizeof(*qhdr));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_tx_gen_ack_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct sk_buff *skb;
+ struct dp_mon_frame_min_one *fbmhdr;
+
+ skb = ath12k_dp_mon_tx_alloc_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ fbmhdr = (struct dp_mon_frame_min_one *)skb->data;
+ memset(fbmhdr, 0, MAX_DUMMY_FRM_BODY);
+ fbmhdr->frame_control =
+ cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_CFACK);
+ memcpy(fbmhdr->addr1, tx_ppdu_info->rx_status.addr1, ETH_ALEN);
+
+ /* set duration zero for ack frame */
+ fbmhdr->duration = 0;
+
+ skb_put(skb, sizeof(*fbmhdr));
+ tx_ppdu_info->tx_mon_mpdu->head = skb;
+ tx_ppdu_info->tx_mon_mpdu->tail = NULL;
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+
+ return 0;
+}
+
+static int
+ath12k_dp_mon_tx_gen_prot_frame(struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ int ret = 0;
+
+ switch (tx_ppdu_info->rx_status.medium_prot_type) {
+ case DP_MON_TX_MEDIUM_RTS_LEGACY:
+ case DP_MON_TX_MEDIUM_RTS_11AC_STATIC_BW:
+ case DP_MON_TX_MEDIUM_RTS_11AC_DYNAMIC_BW:
+ ret = ath12k_dp_mon_tx_gen_rts_frame(tx_ppdu_info);
+ break;
+ case DP_MON_TX_MEDIUM_CTS2SELF:
+ ret = ath12k_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info);
+ break;
+ case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_3ADDR:
+ ret = ath12k_dp_mon_tx_gen_3addr_qos_null_frame(tx_ppdu_info);
+ break;
+ case DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_4ADDR:
+ ret = ath12k_dp_mon_tx_gen_4addr_qos_null_frame(tx_ppdu_info);
+ break;
+ }
+
+ return ret;
+}
+
+static enum dp_mon_tx_tlv_status
+ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
+ struct ath12k_mon_data *pmon,
+ u16 tlv_tag, u8 *tlv_data, u32 userid)
+{
+ struct dp_mon_tx_ppdu_info *tx_ppdu_info;
+ enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
+ u32 info[7];
+
+ tx_ppdu_info = ath12k_dp_mon_hal_tx_ppdu_info(pmon, tlv_tag);
+
+ switch (tlv_tag) {
+ case HAL_TX_FES_SETUP: {
+ struct hal_tx_fes_setup *tx_fes_setup =
+ (struct hal_tx_fes_setup *)tlv_data;
+
+ info[0] = __le32_to_cpu(tx_fes_setup->info0);
+ tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id);
+ tx_ppdu_info->num_users =
+ u32_get_bits(info[0], HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS);
+ status = DP_MON_TX_FES_SETUP;
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_END: {
+ struct hal_tx_fes_status_end *tx_fes_status_end =
+ (struct hal_tx_fes_status_end *)tlv_data;
+ u32 tst_15_0, tst_31_16;
+
+ info[0] = __le32_to_cpu(tx_fes_status_end->info0);
+ tst_15_0 =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_15_0);
+ tst_31_16 =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_31_16);
+
+ tx_ppdu_info->rx_status.ppdu_ts = (tst_15_0 | (tst_31_16 << 16));
+ status = DP_MON_TX_FES_STATUS_END;
+ break;
+ }
+
+ case HAL_RX_RESPONSE_REQUIRED_INFO: {
+ struct hal_rx_resp_req_info *rx_resp_req_info =
+ (struct hal_rx_resp_req_info *)tlv_data;
+ u32 addr_32;
+ u16 addr_16;
+
+ info[0] = __le32_to_cpu(rx_resp_req_info->info0);
+ info[1] = __le32_to_cpu(rx_resp_req_info->info1);
+ info[2] = __le32_to_cpu(rx_resp_req_info->info2);
+ info[3] = __le32_to_cpu(rx_resp_req_info->info3);
+ info[4] = __le32_to_cpu(rx_resp_req_info->info4);
+ info[5] = __le32_to_cpu(rx_resp_req_info->info5);
+
+ tx_ppdu_info->rx_status.ppdu_id =
+ u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_PPDU_ID);
+ tx_ppdu_info->rx_status.reception_type =
+ u32_get_bits(info[0], HAL_RX_RESP_REQ_INFO0_RECEPTION_TYPE);
+ tx_ppdu_info->rx_status.rx_duration =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_DURATION);
+ tx_ppdu_info->rx_status.mcs =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_RATE_MCS);
+ tx_ppdu_info->rx_status.sgi =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_SGI);
+ tx_ppdu_info->rx_status.is_stbc =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_STBC);
+ tx_ppdu_info->rx_status.ldpc =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_LDPC);
+ tx_ppdu_info->rx_status.is_ampdu =
+ u32_get_bits(info[1], HAL_RX_RESP_REQ_INFO1_IS_AMPDU);
+ tx_ppdu_info->rx_status.num_users =
+ u32_get_bits(info[2], HAL_RX_RESP_REQ_INFO2_NUM_USER);
+
+ addr_32 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO3_ADDR1_31_0);
+ addr_16 = u32_get_bits(info[3], HAL_RX_RESP_REQ_INFO4_ADDR1_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
+
+ addr_16 = u32_get_bits(info[4], HAL_RX_RESP_REQ_INFO4_ADDR1_15_0);
+ addr_32 = u32_get_bits(info[5], HAL_RX_RESP_REQ_INFO5_ADDR1_47_16);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2);
+
+ if (tx_ppdu_info->rx_status.reception_type == 0)
+ ath12k_dp_mon_tx_gen_cts2self_frame(tx_ppdu_info);
+ status = DP_MON_RX_RESPONSE_REQUIRED_INFO;
+ break;
+ }
+
+ case HAL_PCU_PPDU_SETUP_INIT: {
+ struct hal_tx_pcu_ppdu_setup_init *ppdu_setup =
+ (struct hal_tx_pcu_ppdu_setup_init *)tlv_data;
+ u32 addr_32;
+ u16 addr_16;
+
+ info[0] = __le32_to_cpu(ppdu_setup->info0);
+ info[1] = __le32_to_cpu(ppdu_setup->info1);
+ info[2] = __le32_to_cpu(ppdu_setup->info2);
+ info[3] = __le32_to_cpu(ppdu_setup->info3);
+ info[4] = __le32_to_cpu(ppdu_setup->info4);
+ info[5] = __le32_to_cpu(ppdu_setup->info5);
+ info[6] = __le32_to_cpu(ppdu_setup->info6);
+
+ /* protection frame address 1 */
+ addr_32 = u32_get_bits(info[1],
+ HAL_TX_PPDU_SETUP_INFO1_PROT_FRAME_ADDR1_31_0);
+ addr_16 = u32_get_bits(info[2],
+ HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR1_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
+
+ /* protection frame address 2 */
+ addr_16 = u32_get_bits(info[2],
+ HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR2_15_0);
+ addr_32 = u32_get_bits(info[3],
+ HAL_TX_PPDU_SETUP_INFO3_PROT_FRAME_ADDR2_47_16);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr2);
+
+ /* protection frame address 3 */
+ addr_32 = u32_get_bits(info[4],
+ HAL_TX_PPDU_SETUP_INFO4_PROT_FRAME_ADDR3_31_0);
+ addr_16 = u32_get_bits(info[5],
+ HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR3_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr3);
+
+ /* protection frame address 4 */
+ addr_16 = u32_get_bits(info[5],
+ HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR4_15_0);
+ addr_32 = u32_get_bits(info[6],
+ HAL_TX_PPDU_SETUP_INFO6_PROT_FRAME_ADDR4_47_16);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr4);
+
+ status = u32_get_bits(info[0],
+ HAL_TX_PPDU_SETUP_INFO0_MEDIUM_PROT_TYPE);
+ break;
+ }
+
+ case HAL_TX_QUEUE_EXTENSION: {
+ struct hal_tx_queue_exten *tx_q_exten =
+ (struct hal_tx_queue_exten *)tlv_data;
+
+ info[0] = __le32_to_cpu(tx_q_exten->info0);
+
+ tx_ppdu_info->rx_status.frame_control =
+ u32_get_bits(info[0],
+ HAL_TX_Q_EXT_INFO0_FRAME_CTRL);
+ tx_ppdu_info->rx_status.fc_valid = true;
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_START: {
+ struct hal_tx_fes_status_start *tx_fes_start =
+ (struct hal_tx_fes_status_start *)tlv_data;
+
+ info[0] = __le32_to_cpu(tx_fes_start->info0);
+
+ tx_ppdu_info->rx_status.medium_prot_type =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STATUS_START_INFO0_MEDIUM_PROT_TYPE);
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_PROT: {
+ struct hal_tx_fes_status_prot *tx_fes_status =
+ (struct hal_tx_fes_status_prot *)tlv_data;
+ u32 start_timestamp;
+ u32 end_timestamp;
+
+ info[0] = __le32_to_cpu(tx_fes_status->info0);
+ info[1] = __le32_to_cpu(tx_fes_status->info1);
+
+ start_timestamp =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_15_0);
+ start_timestamp |=
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_31_16) << 15;
+ end_timestamp =
+ u32_get_bits(info[1],
+ HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_15_0);
+ end_timestamp |=
+ u32_get_bits(info[1],
+ HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_31_16) << 15;
+ tx_ppdu_info->rx_status.rx_duration = end_timestamp - start_timestamp;
+
+ ath12k_dp_mon_tx_gen_prot_frame(tx_ppdu_info);
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_START_PPDU:
+ case HAL_TX_FES_STATUS_START_PROT: {
+ struct hal_tx_fes_status_start_prot *tx_fes_stat_start =
+ (struct hal_tx_fes_status_start_prot *)tlv_data;
+ u64 ppdu_ts;
+
+ info[0] = __le32_to_cpu(tx_fes_stat_start->info0);
+
+ tx_ppdu_info->rx_status.ppdu_ts =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_STRT_INFO0_PROT_TS_LOWER_32);
+ ppdu_ts = (u32_get_bits(info[1],
+ HAL_TX_FES_STAT_STRT_INFO1_PROT_TS_UPPER_32));
+ tx_ppdu_info->rx_status.ppdu_ts |= ppdu_ts << 32;
+ break;
+ }
+
+ case HAL_TX_FES_STATUS_USER_PPDU: {
+ struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu =
+ (struct hal_tx_fes_status_user_ppdu *)tlv_data;
+
+ info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0);
+
+ tx_ppdu_info->rx_status.rx_duration =
+ u32_get_bits(info[0],
+ HAL_TX_FES_STAT_USR_PPDU_INFO0_DURATION);
+ break;
+ }
+
+ case HAL_MACTX_HE_SIG_A_SU:
+ ath12k_dp_mon_parse_he_sig_su(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_A_MU_DL:
+ ath12k_dp_mon_parse_he_sig_mu(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_B1_MU:
+ ath12k_dp_mon_parse_he_sig_b1_mu(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_B2_MU:
+ ath12k_dp_mon_parse_he_sig_b2_mu(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_HE_SIG_B2_OFDMA:
+ ath12k_dp_mon_parse_he_sig_b2_ofdma(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_VHT_SIG_A:
+ ath12k_dp_mon_parse_vht_sig_a(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_L_SIG_A:
+ ath12k_dp_mon_parse_l_sig_a(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_MACTX_L_SIG_B:
+ ath12k_dp_mon_parse_l_sig_b(tlv_data, &tx_ppdu_info->rx_status);
+ break;
+
+ case HAL_RX_FRAME_BITMAP_ACK: {
+ struct hal_rx_frame_bitmap_ack *fbm_ack =
+ (struct hal_rx_frame_bitmap_ack *)tlv_data;
+ u32 addr_32;
+ u16 addr_16;
+
+ info[0] = __le32_to_cpu(fbm_ack->info0);
+ info[1] = __le32_to_cpu(fbm_ack->info1);
+
+ addr_32 = u32_get_bits(info[0],
+ HAL_RX_FBM_ACK_INFO0_ADDR1_31_0);
+ addr_16 = u32_get_bits(info[1],
+ HAL_RX_FBM_ACK_INFO1_ADDR1_47_32);
+ ath12k_dp_get_mac_addr(addr_32, addr_16, tx_ppdu_info->rx_status.addr1);
+
+ ath12k_dp_mon_tx_gen_ack_frame(tx_ppdu_info);
+ break;
+ }
+
+ case HAL_MACTX_PHY_DESC: {
+ struct hal_tx_phy_desc *tx_phy_desc =
+ (struct hal_tx_phy_desc *)tlv_data;
+
+ info[0] = __le32_to_cpu(tx_phy_desc->info0);
+ info[1] = __le32_to_cpu(tx_phy_desc->info1);
+ info[2] = __le32_to_cpu(tx_phy_desc->info2);
+ info[3] = __le32_to_cpu(tx_phy_desc->info3);
+
+ tx_ppdu_info->rx_status.beamformed =
+ u32_get_bits(info[0],
+ HAL_TX_PHY_DESC_INFO0_BF_TYPE);
+ tx_ppdu_info->rx_status.preamble_type =
+ u32_get_bits(info[0],
+ HAL_TX_PHY_DESC_INFO0_PREAMBLE_11B);
+ tx_ppdu_info->rx_status.mcs =
+ u32_get_bits(info[1],
+ HAL_TX_PHY_DESC_INFO1_MCS);
+ tx_ppdu_info->rx_status.ltf_size =
+ u32_get_bits(info[3],
+ HAL_TX_PHY_DESC_INFO3_LTF_SIZE);
+ tx_ppdu_info->rx_status.nss =
+ u32_get_bits(info[2],
+ HAL_TX_PHY_DESC_INFO2_NSS);
+ tx_ppdu_info->rx_status.chan_num =
+ u32_get_bits(info[3],
+ HAL_TX_PHY_DESC_INFO3_ACTIVE_CHANNEL);
+ tx_ppdu_info->rx_status.bw =
+ u32_get_bits(info[0],
+ HAL_TX_PHY_DESC_INFO0_BANDWIDTH);
+ break;
+ }
+
+ case HAL_TX_MPDU_START: {
+ struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
+
+ mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
+ if (!mon_mpdu)
+ return DP_MON_TX_STATUS_PPDU_NOT_DONE;
+ status = DP_MON_TX_MPDU_START;
+ break;
+ }
+
+ case HAL_MON_BUF_ADDR: {
+ struct dp_rxdma_ring *buf_ring = &ab->dp.tx_mon_buf_ring;
+ struct dp_mon_packet_info *packet_info =
+ (struct dp_mon_packet_info *)tlv_data;
+ int buf_id = u32_get_bits(packet_info->cookie,
+ DP_RXDMA_BUF_COOKIE_BUF_ID);
+ struct sk_buff *msdu;
+ struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
+ struct ath12k_skb_rxcb *rxcb;
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!msdu)) {
+ ath12k_warn(ab, "montior destination with invalid buf_id %d\n",
+ buf_id);
+ return DP_MON_TX_STATUS_PPDU_NOT_DONE;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (!mon_mpdu->head)
+ mon_mpdu->head = msdu;
+ else if (mon_mpdu->tail)
+ mon_mpdu->tail->next = msdu;
+
+ mon_mpdu->tail = msdu;
+
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+ status = DP_MON_TX_BUFFER_ADDR;
+ break;
+ }
+
+ case HAL_TX_MPDU_END:
+ list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list);
+ break;
+ }
+
+ return status;
+}
+
+enum dp_mon_tx_tlv_status
+ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
+ struct hal_tlv_hdr *tx_tlv,
+ u8 *num_users)
+{
+ u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
+ u32 info0;
+
+ switch (tlv_tag) {
+ case HAL_TX_FES_SETUP: {
+ struct hal_tx_fes_setup *tx_fes_setup =
+ (struct hal_tx_fes_setup *)tx_tlv;
+
+ info0 = __le32_to_cpu(tx_fes_setup->info0);
+
+ *num_users = u32_get_bits(info0, HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS);
+ tlv_status = DP_MON_TX_FES_SETUP;
+ break;
+ }
+
+ case HAL_RX_RESPONSE_REQUIRED_INFO: {
+ /* TODO: need to update *num_users */
+ tlv_status = DP_MON_RX_RESPONSE_REQUIRED_INFO;
+ break;
+ }
+ }
+
+ return tlv_status;
+}
+
+static void
+ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
+ struct napi_struct *napi,
+ struct dp_mon_tx_ppdu_info *tx_ppdu_info)
+{
+ struct dp_mon_mpdu *tmp, *mon_mpdu;
+ struct sk_buff *head_msdu;
+
+ list_for_each_entry_safe(mon_mpdu, tmp,
+ &tx_ppdu_info->dp_tx_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+ head_msdu = mon_mpdu->head;
+
+ if (head_msdu)
+ ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
+ &tx_ppdu_info->rx_status, napi);
+
+ kfree(mon_mpdu);
+ }
+}
+
+enum hal_rx_mon_status
+ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ int mac_id,
+ struct sk_buff *skb,
+ struct napi_struct *napi,
+ u32 ppdu_id)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info, *tx_data_ppdu_info;
+ struct hal_tlv_hdr *tlv;
+ u8 *ptr = skb->data;
+ u16 tlv_tag;
+ u16 tlv_len;
+ u32 tlv_userid = 0;
+ u8 num_user;
+ u32 tlv_status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
+
+ tx_prot_ppdu_info = ath12k_dp_mon_tx_get_ppdu_info(pmon, ppdu_id,
+ DP_MON_TX_PROT_PPDU_INFO);
+ if (!tx_prot_ppdu_info)
+ return -ENOMEM;
+
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
+
+ tlv_status = ath12k_dp_mon_tx_status_get_num_user(tlv_tag, tlv, &num_user);
+ if (tlv_status == DP_MON_TX_STATUS_PPDU_NOT_DONE || !num_user)
+ return -EINVAL;
+
+ tx_data_ppdu_info = ath12k_dp_mon_tx_get_ppdu_info(pmon, ppdu_id,
+ DP_MON_TX_DATA_PPDU_INFO);
+ if (!tx_data_ppdu_info)
+ return -ENOMEM;
+
+ do {
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
+ tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
+ tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
+
+ tlv_status = ath12k_dp_mon_tx_parse_status_tlv(ab, pmon,
+ tlv_tag, ptr,
+ tlv_userid);
+ ptr += tlv_len;
+ ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+ if ((ptr - skb->data) >= DP_TX_MONITOR_BUF_SIZE)
+ break;
+ } while (tlv_status != DP_MON_TX_FES_STATUS_END);
+
+ ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_data_ppdu_info);
+ ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_prot_ppdu_info);
+
+ return tlv_status;
+}
+
+int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
+ enum dp_monitor_mode monitor_mode,
+ struct napi_struct *napi)
+{
+ struct hal_mon_dest_desc *mon_dst_desc;
+ struct ath12k_pdev_dp *pdev_dp = &ar->dp;
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct ath12k_skb_rxcb *rxcb;
+ struct dp_srng *mon_dst_ring;
+ struct hal_srng *srng;
+ struct dp_rxdma_ring *buf_ring;
+ u64 cookie;
+ u32 ppdu_id;
+ int num_buffs_reaped = 0, srng_id, buf_id;
+ u8 dest_idx = 0, i;
+ bool end_of_ppdu;
+ struct hal_rx_mon_ppdu_info *ppdu_info;
+ struct ath12k_peer *peer = NULL;
+
+ ppdu_info = &pmon->mon_ppdu_info;
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+
+ srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+
+ if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) {
+ mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
+ buf_ring = &dp->rxdma_mon_buf_ring;
+ } else {
+ mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id];
+ buf_ring = &dp->tx_mon_buf_ring;
+ }
+
+ srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
+
+ spin_lock_bh(&srng->lock);
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (likely(*budget)) {
+ *budget -= 1;
+ mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
+ if (unlikely(!mon_dst_desc))
+ break;
+
+ cookie = le32_to_cpu(mon_dst_desc->cookie);
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ skb = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!skb)) {
+ ath12k_warn(ab, "montior destination with invalid buf_id %d\n",
+ buf_id);
+ goto move_next;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ pmon->dest_skb_q[dest_idx] = skb;
+ dest_idx++;
+ ppdu_id = le32_to_cpu(mon_dst_desc->ppdu_id);
+ end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
+ HAL_MON_DEST_INFO0_END_OF_PPDU);
+ if (!end_of_ppdu)
+ continue;
+
+ for (i = 0; i < dest_idx; i++) {
+ skb = pmon->dest_skb_q[i];
+
+ if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
+ ath12k_dp_mon_rx_parse_mon_status(ar, pmon, mac_id,
+ skb, napi);
+ else
+ ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id,
+ skb, napi, ppdu_id);
+
+ peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
+
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info->peer_id);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ dev_kfree_skb_any(skb);
+ pmon->dest_skb_q[i] = NULL;
+ }
+
+ dest_idx = 0;
+move_next:
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+ ath12k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return num_buffs_reaped;
+}
+
+static void
+ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct hal_rx_user_status *user_stats,
+ u32 num_msdu)
+{
+ u32 rate_idx = 0;
+ u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs;
+ u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1;
+ u32 bw_idx = ppdu_info->bw;
+ u32 gi_idx = ppdu_info->gi;
+
+ if ((mcs_idx > HAL_RX_MAX_MCS_HE) || (nss_idx >= HAL_RX_MAX_NSS) ||
+ (bw_idx >= HAL_RX_BW_MAX) || (gi_idx >= HAL_RX_GI_MAX)) {
+ return;
+ }
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC) {
+ rate_idx = mcs_idx * 8 + 8 * 10 * nss_idx;
+ rate_idx += bw_idx * 2 + gi_idx;
+ } else if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX) {
+ gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi);
+ rate_idx = mcs_idx * 12 + 12 * 12 * nss_idx;
+ rate_idx += bw_idx * 3 + gi_idx;
+ } else {
+ return;
+ }
+
+ rx_stats->pkt_stats.rx_rate[rate_idx] += num_msdu;
+ if (user_stats)
+ rx_stats->byte_stats.rx_rate[rate_idx] += user_stats->mpdu_ok_byte_count;
+ else
+ rx_stats->byte_stats.rx_rate[rate_idx] += ppdu_info->mpdu_len;
+}
+
+static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
+ struct ath12k_sta *arsta,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ u32 num_msdu;
+
+ if (!rx_stats)
+ return;
+
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+
+ num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
+ ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
+
+ rx_stats->num_msdu += num_msdu;
+ rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
+ ppdu_info->tcp_ack_msdu_count;
+ rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
+ rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
+ ppdu_info->nss = 1;
+ ppdu_info->mcs = HAL_RX_MAX_MCS;
+ ppdu_info->tid = IEEE80211_NUM_TIDS;
+ }
+
+ if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
+ rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
+
+ if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
+ rx_stats->tid_count[ppdu_info->tid] += num_msdu;
+
+ if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
+ rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
+
+ if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
+ rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
+
+ if (ppdu_info->is_stbc)
+ rx_stats->stbc_count += num_msdu;
+
+ if (ppdu_info->beamformed)
+ rx_stats->beamformed_count += num_msdu;
+
+ if (ppdu_info->num_mpdu_fcs_ok > 1)
+ rx_stats->ampdu_msdu_count += num_msdu;
+ else
+ rx_stats->non_ampdu_msdu_count += num_msdu;
+
+ rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
+ rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
+ rx_stats->dcm_count += ppdu_info->dcm;
+
+ rx_stats->rx_duration += ppdu_info->rx_duration;
+ arsta->rx_duration = rx_stats->rx_duration;
+
+ if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS) {
+ rx_stats->pkt_stats.nss_count[ppdu_info->nss - 1] += num_msdu;
+ rx_stats->byte_stats.nss_count[ppdu_info->nss - 1] += ppdu_info->mpdu_len;
+ }
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N &&
+ ppdu_info->mcs <= HAL_RX_MAX_MCS_HT) {
+ rx_stats->pkt_stats.ht_mcs_count[ppdu_info->mcs] += num_msdu;
+ rx_stats->byte_stats.ht_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
+ /* To fit into rate table for HT packets */
+ ppdu_info->mcs = ppdu_info->mcs % 8;
+ }
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC &&
+ ppdu_info->mcs <= HAL_RX_MAX_MCS_VHT) {
+ rx_stats->pkt_stats.vht_mcs_count[ppdu_info->mcs] += num_msdu;
+ rx_stats->byte_stats.vht_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
+ }
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX &&
+ ppdu_info->mcs <= HAL_RX_MAX_MCS_HE) {
+ rx_stats->pkt_stats.he_mcs_count[ppdu_info->mcs] += num_msdu;
+ rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
+ }
+
+ if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) &&
+ ppdu_info->rate < HAL_RX_LEGACY_RATE_INVALID) {
+ rx_stats->pkt_stats.legacy_count[ppdu_info->rate] += num_msdu;
+ rx_stats->byte_stats.legacy_count[ppdu_info->rate] += ppdu_info->mpdu_len;
+ }
+
+ if (ppdu_info->gi < HAL_RX_GI_MAX) {
+ rx_stats->pkt_stats.gi_count[ppdu_info->gi] += num_msdu;
+ rx_stats->byte_stats.gi_count[ppdu_info->gi] += ppdu_info->mpdu_len;
+ }
+
+ if (ppdu_info->bw < HAL_RX_BW_MAX) {
+ rx_stats->pkt_stats.bw_count[ppdu_info->bw] += num_msdu;
+ rx_stats->byte_stats.bw_count[ppdu_info->bw] += ppdu_info->mpdu_len;
+ }
+
+ ath12k_dp_mon_rx_update_peer_rate_table_stats(rx_stats, ppdu_info,
+ NULL, num_msdu);
+}
+
+void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct hal_rx_user_status *rx_user_status;
+ u32 num_users, i, mu_ul_user_v0_word0, mu_ul_user_v0_word1, ru_size;
+
+ if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO))
+ return;
+
+ num_users = ppdu_info->num_users;
+ if (num_users > HAL_MAX_UL_MU_USERS)
+ num_users = HAL_MAX_UL_MU_USERS;
+
+ for (i = 0; i < num_users; i++) {
+ rx_user_status = &ppdu_info->userstats[i];
+ mu_ul_user_v0_word0 =
+ rx_user_status->ul_ofdma_user_v0_word0;
+ mu_ul_user_v0_word1 =
+ rx_user_status->ul_ofdma_user_v0_word1;
+
+ if (u32_get_bits(mu_ul_user_v0_word0,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VALID) &&
+ !u32_get_bits(mu_ul_user_v0_word0,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VER)) {
+ rx_user_status->mcs =
+ u32_get_bits(mu_ul_user_v0_word1,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W1_MCS);
+ rx_user_status->nss =
+ u32_get_bits(mu_ul_user_v0_word1,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W1_NSS) + 1;
+
+ rx_user_status->ofdma_info_valid = 1;
+ rx_user_status->ul_ofdma_ru_start_index =
+ u32_get_bits(mu_ul_user_v0_word1,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_START);
+
+ ru_size = u32_get_bits(mu_ul_user_v0_word1,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE);
+ rx_user_status->ul_ofdma_ru_width = ru_size;
+ rx_user_status->ul_ofdma_ru_size = ru_size;
+ }
+ rx_user_status->ldpc = u32_get_bits(mu_ul_user_v0_word1,
+ HAL_RX_UL_OFDMA_USER_INFO_V0_W1_LDPC);
+ }
+ ppdu_info->ldpc = 1;
+}
+
+static void
+ath12k_dp_mon_rx_update_user_stats(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ u32 uid)
+{
+ struct ath12k_sta *arsta = NULL;
+ struct ath12k_rx_peer_stats *rx_stats = NULL;
+ struct hal_rx_user_status *user_stats = &ppdu_info->userstats[uid];
+ struct ath12k_peer *peer;
+ u32 num_msdu;
+
+ if (user_stats->ast_index == 0 || user_stats->ast_index == 0xFFFF)
+ return;
+
+ peer = ath12k_peer_find_by_ast(ar->ab, user_stats->ast_index);
+
+ if (!peer) {
+ ath12k_warn(ar->ab, "peer ast idx %d can't be found\n",
+ user_stats->ast_index);
+ return;
+ }
+
+ arsta = (struct ath12k_sta *)peer->sta->drv_priv;
+ rx_stats = arsta->rx_stats;
+
+ if (!rx_stats)
+ return;
+
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+
+ num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count +
+ user_stats->udp_msdu_count + user_stats->other_msdu_count;
+
+ rx_stats->num_msdu += num_msdu;
+ rx_stats->tcp_msdu_count += user_stats->tcp_msdu_count +
+ user_stats->tcp_ack_msdu_count;
+ rx_stats->udp_msdu_count += user_stats->udp_msdu_count;
+ rx_stats->other_msdu_count += user_stats->other_msdu_count;
+
+ if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
+ rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
+
+ if (user_stats->tid <= IEEE80211_NUM_TIDS)
+ rx_stats->tid_count[user_stats->tid] += num_msdu;
+
+ if (user_stats->preamble_type < HAL_RX_PREAMBLE_MAX)
+ rx_stats->pream_cnt[user_stats->preamble_type] += num_msdu;
+
+ if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
+ rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
+
+ if (ppdu_info->is_stbc)
+ rx_stats->stbc_count += num_msdu;
+
+ if (ppdu_info->beamformed)
+ rx_stats->beamformed_count += num_msdu;
+
+ if (user_stats->mpdu_cnt_fcs_ok > 1)
+ rx_stats->ampdu_msdu_count += num_msdu;
+ else
+ rx_stats->non_ampdu_msdu_count += num_msdu;
+
+ rx_stats->num_mpdu_fcs_ok += user_stats->mpdu_cnt_fcs_ok;
+ rx_stats->num_mpdu_fcs_err += user_stats->mpdu_cnt_fcs_err;
+ rx_stats->dcm_count += ppdu_info->dcm;
+ if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
+ ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO)
+ rx_stats->ru_alloc_cnt[user_stats->ul_ofdma_ru_size] += num_msdu;
+
+ rx_stats->rx_duration += ppdu_info->rx_duration;
+ arsta->rx_duration = rx_stats->rx_duration;
+
+ if (user_stats->nss > 0 && user_stats->nss <= HAL_RX_MAX_NSS) {
+ rx_stats->pkt_stats.nss_count[user_stats->nss - 1] += num_msdu;
+ rx_stats->byte_stats.nss_count[user_stats->nss - 1] +=
+ user_stats->mpdu_ok_byte_count;
+ }
+
+ if (user_stats->preamble_type == HAL_RX_PREAMBLE_11AX &&
+ user_stats->mcs <= HAL_RX_MAX_MCS_HE) {
+ rx_stats->pkt_stats.he_mcs_count[user_stats->mcs] += num_msdu;
+ rx_stats->byte_stats.he_mcs_count[user_stats->mcs] +=
+ user_stats->mpdu_ok_byte_count;
+ }
+
+ if (ppdu_info->gi < HAL_RX_GI_MAX) {
+ rx_stats->pkt_stats.gi_count[ppdu_info->gi] += num_msdu;
+ rx_stats->byte_stats.gi_count[ppdu_info->gi] +=
+ user_stats->mpdu_ok_byte_count;
+ }
+
+ if (ppdu_info->bw < HAL_RX_BW_MAX) {
+ rx_stats->pkt_stats.bw_count[ppdu_info->bw] += num_msdu;
+ rx_stats->byte_stats.bw_count[ppdu_info->bw] +=
+ user_stats->mpdu_ok_byte_count;
+ }
+
+ ath12k_dp_mon_rx_update_peer_rate_table_stats(rx_stats, ppdu_info,
+ user_stats, num_msdu);
+}
+
+static void
+ath12k_dp_mon_rx_update_peer_mu_stats(struct ath12k *ar,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ u32 num_users, i;
+
+ num_users = ppdu_info->num_users;
+ if (num_users > HAL_MAX_UL_MU_USERS)
+ num_users = HAL_MAX_UL_MU_USERS;
+
+ for (i = 0; i < num_users; i++)
+ ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i);
+}
+
+int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
+ struct napi_struct *napi, int *budget)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev_dp *pdev_dp = &ar->dp;
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_mon_dest_desc *mon_dst_desc;
+ struct sk_buff *skb;
+ struct ath12k_skb_rxcb *rxcb;
+ struct dp_srng *mon_dst_ring;
+ struct hal_srng *srng;
+ struct dp_rxdma_ring *buf_ring;
+ struct ath12k_sta *arsta = NULL;
+ struct ath12k_peer *peer;
+ u64 cookie;
+ int num_buffs_reaped = 0, srng_id, buf_id;
+ u8 dest_idx = 0, i;
+ bool end_of_ppdu;
+ u32 hal_status;
+
+ srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+ mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
+ buf_ring = &dp->rxdma_mon_buf_ring;
+
+ srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
+ spin_lock_bh(&srng->lock);
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (likely(*budget)) {
+ *budget -= 1;
+ mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
+ if (unlikely(!mon_dst_desc))
+ break;
+ cookie = le32_to_cpu(mon_dst_desc->cookie);
+ buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+ spin_lock_bh(&buf_ring->idr_lock);
+ skb = idr_remove(&buf_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&buf_ring->idr_lock);
+
+ if (unlikely(!skb)) {
+ ath12k_warn(ab, "montior destination with invalid buf_id %d\n",
+ buf_id);
+ goto move_next;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(skb);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ pmon->dest_skb_q[dest_idx] = skb;
+ dest_idx++;
+ end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
+ HAL_MON_DEST_INFO0_END_OF_PPDU);
+ if (!end_of_ppdu)
+ continue;
+
+ for (i = 0; i < dest_idx; i++) {
+ skb = pmon->dest_skb_q[i];
+ hal_status = ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+
+ if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
+ hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
+ if (!peer || !peer->sta) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
+ arsta = (struct ath12k_sta *)peer->sta->drv_priv;
+ ath12k_dp_mon_rx_update_peer_su_stats(ar, arsta,
+ ppdu_info);
+ } else if ((ppdu_info->fc_valid) &&
+ (ppdu_info->ast_index != HAL_AST_IDX_INVALID)) {
+ ath12k_dp_mon_rx_process_ulofdma(ppdu_info);
+ ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info);
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ dev_kfree_skb_any(skb);
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+ }
+
+ dest_idx = 0;
+move_next:
+ ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+ ath12k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ return num_buffs_reaped;
+}
+
+int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget,
+ enum dp_monitor_mode monitor_mode)
+{
+ struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id);
+ int num_buffs_reaped = 0;
+
+ if (!ar->monitor_started)
+ ath12k_dp_mon_rx_process_stats(ar, mac_id, napi, &budget);
+ else
+ num_buffs_reaped = ath12k_dp_mon_srng_process(ar, mac_id, &budget,
+ monitor_mode, napi);
+
+ return num_buffs_reaped;
+}
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.h b/drivers/net/wireless/ath/ath12k/dp_mon.h
new file mode 100644
index 000000000000..c18c385798a1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_DP_MON_H
+#define ATH12K_DP_MON_H
+
+#include "core.h"
+
+enum dp_monitor_mode {
+ ATH12K_DP_TX_MONITOR_MODE,
+ ATH12K_DP_RX_MONITOR_MODE
+};
+
+enum dp_mon_tx_ppdu_info_type {
+ DP_MON_TX_PROT_PPDU_INFO,
+ DP_MON_TX_DATA_PPDU_INFO
+};
+
+enum dp_mon_tx_tlv_status {
+ DP_MON_TX_FES_SETUP,
+ DP_MON_TX_FES_STATUS_END,
+ DP_MON_RX_RESPONSE_REQUIRED_INFO,
+ DP_MON_RESPONSE_END_STATUS_INFO,
+ DP_MON_TX_MPDU_START,
+ DP_MON_TX_MSDU_START,
+ DP_MON_TX_BUFFER_ADDR,
+ DP_MON_TX_DATA,
+ DP_MON_TX_STATUS_PPDU_NOT_DONE,
+};
+
+enum dp_mon_tx_medium_protection_type {
+ DP_MON_TX_MEDIUM_NO_PROTECTION,
+ DP_MON_TX_MEDIUM_RTS_LEGACY,
+ DP_MON_TX_MEDIUM_RTS_11AC_STATIC_BW,
+ DP_MON_TX_MEDIUM_RTS_11AC_DYNAMIC_BW,
+ DP_MON_TX_MEDIUM_CTS2SELF,
+ DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_3ADDR,
+ DP_MON_TX_MEDIUM_QOS_NULL_NO_ACK_4ADDR
+};
+
+struct dp_mon_qosframe_addr4 {
+ __le16 frame_control;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+} __packed;
+
+struct dp_mon_frame_min_one {
+ __le16 frame_control;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+} __packed;
+
+struct dp_mon_packet_info {
+ u64 cookie;
+ u16 dma_length;
+ bool msdu_continuation;
+ bool truncated;
+};
+
+struct dp_mon_tx_ppdu_info {
+ u32 ppdu_id;
+ u8 num_users;
+ bool is_used;
+ struct hal_rx_mon_ppdu_info rx_status;
+ struct list_head dp_tx_mon_mpdu_list;
+ struct dp_mon_mpdu *tx_mon_mpdu;
+};
+
+enum hal_rx_mon_status
+ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ int mac_id, struct sk_buff *skb,
+ struct napi_struct *napi);
+int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
+ struct dp_rxdma_ring *buf_ring,
+ int req_entries);
+int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id,
+ int *budget, enum dp_monitor_mode monitor_mode,
+ struct napi_struct *napi);
+int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget,
+ enum dp_monitor_mode monitor_mode);
+struct sk_buff *ath12k_dp_mon_tx_alloc_skb(void);
+enum dp_mon_tx_tlv_status
+ath12k_dp_mon_tx_status_get_num_user(u16 tlv_tag,
+ struct hal_tlv_hdr *tx_tlv,
+ u8 *num_users);
+enum hal_rx_mon_status
+ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
+ struct ath12k_mon_data *pmon,
+ int mac_id,
+ struct sk_buff *skb,
+ struct napi_struct *napi,
+ u32 ppdu_id);
+void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info);
+int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
+ struct napi_struct *napi, int *budget);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
new file mode 100644
index 000000000000..83a43ad48c51
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -0,0 +1,4234 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/ieee80211.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <crypto/hash.h>
+#include "core.h"
+#include "debug.h"
+#include "hal_desc.h"
+#include "hw.h"
+#include "dp_rx.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "peer.h"
+#include "dp_mon.h"
+
+#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
+
+static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ if (!ab->hw_params->hal_ops->rx_desc_encrypt_valid(desc))
+ return HAL_ENCRYPT_TYPE_OPEN;
+
+ return ab->hw_params->hal_ops->rx_desc_get_encrypt_type(desc);
+}
+
+u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_decap_type(desc);
+}
+
+static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mesh_ctl(desc);
+}
+
+static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
+}
+
+static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_fc_valid(desc);
+}
+
+static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ return ieee80211_has_morefrags(hdr->frame_control);
+}
+
+static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params->hal_desc_sz);
+ return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+}
+
+static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_start_seq_no(desc);
+}
+
+static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->dp_rx_h_msdu_done(desc);
+}
+
+static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->dp_rx_h_l4_cksum_fail(desc);
+}
+
+static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->dp_rx_h_ip_cksum_fail(desc);
+}
+
+static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->dp_rx_h_is_decrypted(desc);
+}
+
+u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->dp_rx_h_mpdu_err(desc);
+}
+
+static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_len(desc);
+}
+
+static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_sgi(desc);
+}
+
+static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_rate_mcs(desc);
+}
+
+static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_rx_bw(desc);
+}
+
+static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_freq(desc);
+}
+
+static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_msdu_pkt_type(desc);
+}
+
+static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return hweight8(ab->hw_params->hal_ops->rx_desc_get_msdu_nss(desc));
+}
+
+static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_tid(desc);
+}
+
+static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_peer_id(desc);
+}
+
+u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_l3_pad_bytes(desc);
+}
+
+static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_first_msdu(desc);
+}
+
+static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_last_msdu(desc);
+}
+
+static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
+ struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ ab->hw_params->hal_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
+}
+
+static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
+ struct hal_rx_desc *desc,
+ u16 len)
+{
+ ab->hw_params->hal_ops->rx_desc_set_msdu_len(desc, len);
+}
+
+static bool ath12k_dp_rx_h_is_mcbc(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_is_mcbc(desc);
+}
+
+static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_mac_addr2_valid(desc);
+}
+
+static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_mpdu_start_addr2(desc);
+}
+
+static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
+ struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ ab->hw_params->hal_ops->rx_desc_get_dot11_hdr(desc, hdr);
+}
+
+static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
+ struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ ab->hw_params->hal_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
+}
+
+static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params->hal_ops->rx_desc_get_mpdu_frame_ctl(desc);
+}
+
+static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
+{
+ int i, reaped = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
+
+ do {
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
+ reaped += ath12k_dp_mon_process_ring(ab, i, NULL,
+ DP_MON_SERVICE_BUDGET,
+ ATH12K_DP_RX_MONITOR_MODE);
+
+ /* nothing more to reap */
+ if (reaped < DP_MON_SERVICE_BUDGET)
+ return 0;
+
+ } while (time_before(jiffies, timeout));
+
+ ath12k_warn(ab, "dp mon ring purge timeout");
+
+ return -ETIMEDOUT;
+}
+
+/* Returns number of Rx buffers replenished */
+int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ bool hw_cc)
+{
+ struct ath12k_buffer_addr *desc;
+ struct hal_srng *srng;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+ struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_rx_desc_info *rx_desc;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
+ if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+ req_entries = num_free;
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+ if (!skb)
+ break;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ goto fail_free_skb;
+
+ if (hw_cc) {
+ spin_lock_bh(&dp->rx_desc_lock);
+
+ /* Get desc from free list and store in used list
+ * for cleanup purposes
+ *
+ * TODO: pass the removed descs rather than
+ * add/read to optimize
+ */
+ rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
+ struct ath12k_rx_desc_info,
+ list);
+ if (!rx_desc) {
+ spin_unlock_bh(&dp->rx_desc_lock);
+ goto fail_dma_unmap;
+ }
+
+ rx_desc->skb = skb;
+ cookie = rx_desc->cookie;
+ list_del(&rx_desc->list);
+ list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
+
+ spin_unlock_bh(&dp->rx_desc_lock);
+ } else {
+ spin_lock_bh(&rx_ring->idr_lock);
+ buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (buf_id < 0)
+ goto fail_dma_unmap;
+ cookie = u32_encode_bits(mac_id,
+ DP_RXDMA_BUF_COOKIE_PDEV_ID) |
+ u32_encode_bits(buf_id,
+ DP_RXDMA_BUF_COOKIE_BUF_ID);
+ }
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_buf_unassign;
+
+ ATH12K_SKB_RXCB(skb)->paddr = paddr;
+
+ num_remain--;
+
+ ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_buf_unassign:
+ if (hw_cc) {
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_del(&rx_desc->list);
+ list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
+ rx_desc->skb = NULL;
+ spin_unlock_bh(&dp->rx_desc_lock);
+ } else {
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ }
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+static int ath12k_dp_rxdma_buf_ring_free(struct ath12k_base *ab,
+ struct dp_rxdma_ring *rx_ring)
+{
+ struct sk_buff *skb;
+ int buf_id;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ /* TODO: Understand where internal driver does this dma_unmap
+ * of rxdma_buffer.
+ */
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ idr_destroy(&rx_ring->bufs_idr);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ return 0;
+}
+
+static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
+
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
+
+ rx_ring = &dp->tx_mon_buf_ring;
+ ath12k_dp_rxdma_buf_ring_free(ab, rx_ring);
+
+ return 0;
+}
+
+static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
+ struct dp_rxdma_ring *rx_ring,
+ u32 ringtype)
+{
+ int num_entries;
+
+ num_entries = rx_ring->refill_buf_ring.size /
+ ath12k_hal_srng_get_entrysize(ab, ringtype);
+
+ rx_ring->bufs_max = num_entries;
+ if ((ringtype == HAL_RXDMA_MONITOR_BUF) || (ringtype == HAL_TX_MONITOR_BUF))
+ ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
+ else
+ ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_entries,
+ ab->hw_params->hal_params->rx_buf_rbm,
+ ringtype == HAL_RXDMA_BUF);
+ return 0;
+}
+
+static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ int ret;
+
+ ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
+ HAL_RXDMA_BUF);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to setup HAL_RXDMA_BUF\n");
+ return ret;
+ }
+
+ if (ab->hw_params->rxdma1_enable) {
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
+ HAL_RXDMA_MONITOR_BUF);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ rx_ring = &dp->tx_mon_buf_ring;
+ ret = ath12k_dp_rxdma_ring_buf_setup(ab, rx_ring,
+ HAL_TX_MONITOR_BUF);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to setup HAL_TX_MONITOR_BUF\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
+{
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ struct ath12k_base *ab = ar->ab;
+ int i;
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
+ ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
+ }
+}
+
+void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i;
+
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+ ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
+}
+
+int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int ret;
+ int i;
+
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
+ ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
+ HAL_REO_DST, i, 0,
+ DP_REO_DST_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup reo_dst_ring\n");
+ goto err_reo_cleanup;
+ }
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath12k_dp_rx_pdev_reo_cleanup(ab);
+
+ return ret;
+}
+
+static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
+{
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ struct ath12k_base *ab = ar->ab;
+ int i;
+ int ret;
+ u32 mac_id = dp->mac_id;
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ret = ath12k_dp_srng_setup(ar->ab,
+ &dp->rxdma_mon_dst_ring[i],
+ HAL_RXDMA_MONITOR_DST,
+ 0, mac_id + i,
+ DP_RXDMA_MONITOR_DST_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DST\n");
+ return ret;
+ }
+
+ ret = ath12k_dp_srng_setup(ar->ab,
+ &dp->tx_mon_dst_ring[i],
+ HAL_TX_MONITOR_DST,
+ 0, mac_id + i,
+ DP_TX_MONITOR_DEST_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to setup HAL_TX_MONITOR_DST\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
+ struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ list_del(&cmd->list);
+ dma_unmap_single(ab->dev, cmd->data.paddr,
+ cmd->data.size, DMA_BIDIRECTIONAL);
+ kfree(cmd->data.vaddr);
+ kfree(cmd);
+ }
+
+ list_for_each_entry_safe(cmd_cache, tmp_cache,
+ &dp->reo_cmd_cache_flush_list, list) {
+ list_del(&cmd_cache->list);
+ dp->reo_cmd_cache_flush_count--;
+ dma_unmap_single(ab->dev, cmd_cache->data.paddr,
+ cmd_cache->data.size, DMA_BIDIRECTIONAL);
+ kfree(cmd_cache->data.vaddr);
+ kfree(cmd_cache);
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+}
+
+static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct ath12k_dp_rx_tid *rx_tid = ctx;
+
+ if (status != HAL_REO_CMD_SUCCESS)
+ ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
+ rx_tid->tid, status);
+
+ dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+}
+
+static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd,
+ void (*cb)(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status))
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_dp_rx_reo_cmd *dp_cmd;
+ struct hal_srng *cmd_ring;
+ int cmd_num;
+
+ cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
+
+ /* cmd_num should start from 1, during failure return the error code */
+ if (cmd_num < 0)
+ return cmd_num;
+
+ /* reo cmd ring descriptors has cmd_num starting from 1 */
+ if (cmd_num == 0)
+ return -EINVAL;
+
+ if (!cb)
+ return 0;
+
+ /* Can this be optimized so that we keep the pending command list only
+ * for tid delete command to free up the resource on the command status
+ * indication?
+ */
+ dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
+
+ if (!dp_cmd)
+ return -ENOMEM;
+
+ memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
+ dp_cmd->cmd_num = cmd_num;
+ dp_cmd->handler = cb;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return 0;
+}
+
+static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
+ struct ath12k_dp_rx_tid *rx_tid)
+{
+ struct ath12k_hal_reo_cmd cmd = {0};
+ unsigned long tot_desc_sz, desc_sz;
+ int ret;
+
+ tot_desc_sz = rx_tid->size;
+ desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+
+ while (tot_desc_sz > desc_sz) {
+ tot_desc_sz -= desc_sz;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE, &cmd,
+ NULL);
+ if (ret)
+ ath12k_warn(ab,
+ "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE,
+ &cmd, ath12k_dp_reo_cmd_free);
+ if (ret) {
+ ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+ }
+}
+
+static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct ath12k_base *ab = dp->ab;
+ struct ath12k_dp_rx_tid *rx_tid = ctx;
+ struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
+
+ if (status == HAL_REO_CMD_DRAIN) {
+ goto free_desc;
+ } else if (status != HAL_REO_CMD_SUCCESS) {
+ /* Shouldn't happen! Cleanup in case of other failure? */
+ ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
+ rx_tid->tid, status);
+ return;
+ }
+
+ elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
+ if (!elem)
+ goto free_desc;
+
+ elem->ts = jiffies;
+ memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
+ dp->reo_cmd_cache_flush_count++;
+
+ /* Flush and invalidate aged REO desc from HW cache */
+ list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
+ list) {
+ if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
+ time_after(jiffies, elem->ts +
+ msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
+ list_del(&elem->list);
+ dp->reo_cmd_cache_flush_count--;
+
+ /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
+ * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
+ * is used in only two contexts, one is in this function called
+ * from napi and the other in ath12k_dp_free during core destroy.
+ * Before dp_free, the irqs would be disabled and would wait to
+ * synchronize. Hence there wouldn’t be any race against add or
+ * delete to this list. Hence unlock-lock is safe here.
+ */
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ ath12k_dp_reo_cache_flush(ab, &elem->data);
+ kfree(elem);
+ spin_lock_bh(&dp->reo_cmd_lock);
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return;
+free_desc:
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+}
+
+static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
+ dma_addr_t paddr)
+{
+ struct ath12k_reo_queue_ref *qref;
+ struct ath12k_dp *dp = &ab->dp;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return;
+
+ /* TODO: based on ML peer or not, select the LUT. below assumes non
+ * ML peer
+ */
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+
+ qref->info0 = u32_encode_bits(lower_32_bits(paddr),
+ BUFFER_ADDR_INFO0_ADDR);
+ qref->info1 = u32_encode_bits(upper_32_bits(paddr),
+ BUFFER_ADDR_INFO1_ADDR) |
+ u32_encode_bits(tid, DP_REO_QREF_NUM);
+}
+
+static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
+{
+ struct ath12k_reo_queue_ref *qref;
+ struct ath12k_dp *dp = &ab->dp;
+
+ if (!ab->hw_params->reoq_lut_support)
+ return;
+
+ /* TODO: based on ML peer or not, select the LUT. below assumes non
+ * ML peer
+ */
+ qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
+ (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+
+ qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
+ qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
+ u32_encode_bits(tid, DP_REO_QREF_NUM);
+}
+
+void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
+ struct ath12k_peer *peer, u8 tid)
+{
+ struct ath12k_hal_reo_cmd cmd = {0};
+ struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+ int ret;
+
+ if (!rx_tid->active)
+ return;
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
+ ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ ath12k_dp_rx_tid_del_func);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+ tid, ret);
+ dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+ }
+
+ ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
+
+ rx_tid->active = false;
+}
+
+/* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
+ * to struct hal_wbm_release_ring, I couldn't figure out the logic behind
+ * that.
+ */
+static int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
+ struct hal_reo_dest_ring *ring,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct hal_wbm_release_ring *link_desc = (struct hal_wbm_release_ring *)ring;
+ struct hal_wbm_release_ring *desc;
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ int ret = 0;
+
+ srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ ath12k_hal_rx_msdu_link_desc_set(ab, desc, link_desc, action);
+
+exit:
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
+ bool rel_link_desc)
+{
+ struct ath12k_base *ab = rx_tid->ab;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rx_tid->dst_ring_desc) {
+ if (rel_link_desc)
+ ath12k_dp_rx_link_desc_return(ab, rx_tid->dst_ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ kfree(rx_tid->dst_ring_desc);
+ rx_tid->dst_ring_desc = NULL;
+ }
+
+ rx_tid->cur_sn = 0;
+ rx_tid->last_frag_no = 0;
+ rx_tid->rx_frag_bitmap = 0;
+ __skb_queue_purge(&rx_tid->rx_frags);
+}
+
+void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
+{
+ struct ath12k_dp_rx_tid *rx_tid;
+ int i;
+
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
+ ath12k_dp_rx_peer_tid_delete(ar, peer, i);
+ ath12k_dp_rx_frags_cleanup(rx_tid, true);
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+ }
+}
+
+static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
+ struct ath12k_peer *peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ u32 ba_win_sz, u16 ssn,
+ bool update_ssn)
+{
+ struct ath12k_hal_reo_cmd cmd = {0};
+ int ret;
+
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
+ cmd.ba_window_size = ba_win_sz;
+
+ if (update_ssn) {
+ cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
+ cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
+ }
+
+ ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ NULL);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ return ret;
+ }
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ return 0;
+}
+
+int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_rx_reo_queue *addr_aligned;
+ struct ath12k_peer *peer;
+ struct ath12k_dp_rx_tid *rx_tid;
+ u32 hw_desc_sz;
+ void *vaddr;
+ dma_addr_t paddr;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
+ return -ENOENT;
+ }
+
+ if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
+ spin_unlock_bh(&ab->base_lock);
+ ath12k_warn(ab, "reo qref table is not setup\n");
+ return -EINVAL;
+ }
+
+ if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
+ ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
+ peer->peer_id, tid);
+ spin_unlock_bh(&ab->base_lock);
+ return -EINVAL;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ /* Update the tid queue if it is already setup */
+ if (rx_tid->active) {
+ paddr = rx_tid->paddr;
+ ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
+ ba_win_sz, ssn, true);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
+ return ret;
+ }
+
+ return ret;
+ }
+
+ rx_tid->tid = tid;
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ /* TODO: Optimize the memory allocation for qos tid based on
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
+ if (!vaddr) {
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOMEM;
+ }
+
+ addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+
+ ath12k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
+ ssn, pn_type);
+
+ paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
+ DMA_BIDIRECTIONAL);
+
+ ret = dma_mapping_error(ab->dev, paddr);
+ if (ret) {
+ spin_unlock_bh(&ab->base_lock);
+ goto err_mem_free;
+ }
+
+ rx_tid->vaddr = vaddr;
+ rx_tid->paddr = paddr;
+ rx_tid->size = hw_desc_sz;
+ rx_tid->active = true;
+
+ if (ab->hw_params->reoq_lut_support) {
+ /* Update the REO queue LUT at the corresponding peer id
+ * and tid with qaddr.
+ */
+ ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+ spin_unlock_bh(&ab->base_lock);
+ } else {
+ spin_unlock_bh(&ab->base_lock);
+ ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
+ paddr, tid, 1, ba_win_sz);
+ }
+
+ return ret;
+
+err_mem_free:
+ kfree(vaddr);
+
+ return ret;
+}
+
+int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ int ret;
+
+ ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
+ params->tid, params->buf_size,
+ params->ssn, arsta->pn_type);
+ if (ret)
+ ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
+
+ return ret;
+}
+
+int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ struct ath12k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ bool active;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
+ return -ENOENT;
+ }
+
+ active = peer->rx_tid[params->tid].active;
+
+ if (!active) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
+ params->tid, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_hal_reo_cmd cmd = {0};
+ struct ath12k_peer *peer;
+ struct ath12k_dp_rx_tid *rx_tid;
+ u8 tid;
+ int ret = 0;
+
+ /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
+ * We use mac80211 PN/TSC replay check functionality for bcast/mcast
+ * for now.
+ */
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return 0;
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 = HAL_REO_CMD_UPD0_PN |
+ HAL_REO_CMD_UPD0_PN_SIZE |
+ HAL_REO_CMD_UPD0_PN_VALID |
+ HAL_REO_CMD_UPD0_PN_CHECK |
+ HAL_REO_CMD_UPD0_SVLD;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (key_cmd == SET_KEY) {
+ cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
+ cmd.pn_size = 48;
+ }
+ break;
+ default:
+ break;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
+ peer_addr);
+ return -ENOENT;
+ }
+
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ rx_tid = &peer->rx_tid[tid];
+ if (!rx_tid->active)
+ continue;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE,
+ &cmd, NULL);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
+ tid, peer_addr, ret);
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return ret;
+}
+
+static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
+ u16 peer_id)
+{
+ int i;
+
+ for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
+ if (ppdu_stats->user_stats[i].is_valid_peer_id) {
+ if (peer_id == ppdu_stats->user_stats[i].peer_id)
+ return i;
+ } else {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
+ u16 tag, u16 len, const void *ptr,
+ void *data)
+{
+ const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
+ const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
+ const struct htt_ppdu_stats_user_rate *user_rate;
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct htt_ppdu_user_stats *user_stats;
+ int cur_user;
+ u16 peer_id;
+
+ ppdu_info = data;
+
+ switch (tag) {
+ case HTT_PPDU_STATS_TAG_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_common)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+ memcpy(&ppdu_info->ppdu_stats.common, ptr,
+ sizeof(struct htt_ppdu_stats_common));
+ break;
+ case HTT_PPDU_STATS_TAG_USR_RATE:
+ if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+ user_rate = ptr;
+ peer_id = le16_to_cpu(user_rate->sw_peer_id);
+ cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy(&user_stats->rate, ptr,
+ sizeof(struct htt_ppdu_stats_user_rate));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ cmplt_cmn = ptr;
+ peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
+ cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy(&user_stats->cmpltn_cmn, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
+ if (len <
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
+ ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ ba_status = ptr;
+ peer_id = le16_to_cpu(ba_status->sw_peer_id);
+ cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy(&user_stats->ack_ba, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ }
+ return 0;
+}
+
+static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const struct htt_tlv *tlv;
+ const void *begin = ptr;
+ u16 tlv_tag, tlv_len;
+ int ret = -EINVAL;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+ tlv = (struct htt_tlv *)ptr;
+ tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
+ tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret == -ENOMEM)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+ return 0;
+}
+
+static void
+ath12k_update_per_peer_tx_stats(struct ath12k *ar,
+ struct htt_ppdu_stats *ppdu_stats, u8 user)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath12k_sta *arsta;
+ struct htt_ppdu_stats_user_rate *user_rate;
+ struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
+ struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
+ struct htt_ppdu_stats_common *common = &ppdu_stats->common;
+ int ret;
+ u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
+ u32 v, succ_bytes = 0;
+ u16 tones, rate = 0, succ_pkts = 0;
+ u32 tx_duration = 0;
+ u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
+ bool is_ampdu = false;
+
+ if (!usr_stats)
+ return;
+
+ if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
+ return;
+
+ if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
+ is_ampdu =
+ HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
+
+ if (usr_stats->tlv_flags &
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
+ succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
+ succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
+ HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
+ tid = le32_get_bits(usr_stats->ack_ba.info,
+ HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
+ }
+
+ if (common->fes_duration_us)
+ tx_duration = le32_to_cpu(common->fes_duration_us);
+
+ user_rate = &usr_stats->rate;
+ flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
+ bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
+ nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
+ mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
+ sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
+ dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
+
+ /* Note: If host configured fixed rates and in some other special
+ * cases, the broadcast/management frames are sent in different rates.
+ * Firmware rate's control to be skipped for this?
+ */
+
+ if (flags == WMI_RATE_PREAMBLE_HE && mcs > 11) {
+ ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
+ ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
+ mcs, nss);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
+ ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
+ flags,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ return;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);
+
+ if (!peer || !peer->sta) {
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ return;
+ }
+
+ sta = peer->sta;
+ arsta = (struct ath12k_sta *)sta->drv_priv;
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+
+ switch (flags) {
+ case WMI_RATE_PREAMBLE_OFDM:
+ arsta->txrate.legacy = rate;
+ break;
+ case WMI_RATE_PREAMBLE_CCK:
+ arsta->txrate.legacy = rate;
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ arsta->txrate.mcs = mcs + 8 * (nss - 1);
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case WMI_RATE_PREAMBLE_HE:
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ arsta->txrate.he_dcm = dcm;
+ arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+ tones = le16_to_cpu(user_rate->ru_end) -
+ le16_to_cpu(user_rate->ru_start) + 1;
+ v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
+ arsta->txrate.he_ru_alloc = v;
+ break;
+ }
+
+ arsta->txrate.nss = nss;
+ arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ arsta->tx_duration += tx_duration;
+ memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
+
+ /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
+ * So skip peer stats update for mgmt packets.
+ */
+ if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
+ memset(peer_stats, 0, sizeof(*peer_stats));
+ peer_stats->succ_pkts = succ_pkts;
+ peer_stats->succ_bytes = succ_bytes;
+ peer_stats->is_ampdu = is_ampdu;
+ peer_stats->duration = tx_duration;
+ peer_stats->ba_fails =
+ HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
+ HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
+ struct htt_ppdu_stats *ppdu_stats)
+{
+ u8 user;
+
+ for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
+ ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
+}
+
+static
+struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,
+ u32 ppdu_id)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+
+ lockdep_assert_held(&ar->data_lock);
+ if (!list_empty(&ar->ppdu_stats_info)) {
+ list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
+ if (ppdu_info->ppdu_id == ppdu_id)
+ return ppdu_info;
+ }
+
+ if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
+ ppdu_info = list_first_entry(&ar->ppdu_stats_info,
+ typeof(*ppdu_info), list);
+ list_del(&ppdu_info->list);
+ ar->ppdu_stat_list_depth--;
+ ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
+ kfree(ppdu_info);
+ }
+ }
+
+ ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
+ if (!ppdu_info)
+ return NULL;
+
+ list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
+ ar->ppdu_stat_list_depth++;
+
+ return ppdu_info;
+}
+
+static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,
+ struct htt_ppdu_user_stats *usr_stats)
+{
+ peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
+ peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
+ peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
+ peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
+ peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
+ peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
+ peer->ppdu_stats_delayba.resp_rate_flags =
+ le32_to_cpu(usr_stats->rate.resp_rate_flags);
+
+ peer->delayba_flag = true;
+}
+
+static void ath12k_copy_to_bar(struct ath12k_peer *peer,
+ struct htt_ppdu_user_stats *usr_stats)
+{
+ usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
+ usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
+ usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
+ usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
+ usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
+ usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
+ usr_stats->rate.resp_rate_flags =
+ cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
+
+ peer->delayba_flag = false;
+}
+
+static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_htt_ppdu_stats_msg *msg;
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct ath12k_peer *peer = NULL;
+ struct htt_ppdu_user_stats *usr_stats = NULL;
+ u32 peer_id = 0;
+ struct ath12k *ar;
+ int ret, i;
+ u8 pdev_id;
+ u32 ppdu_id, len;
+
+ msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
+ len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
+ pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
+ ppdu_id = le32_to_cpu(msg->ppdu_id);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
+ if (!ppdu_info) {
+ spin_unlock_bh(&ar->data_lock);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ppdu_info->ppdu_id = ppdu_id;
+ ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath12k_htt_tlv_ppdu_stats_parse,
+ (void *)ppdu_info);
+ if (ret) {
+ spin_unlock_bh(&ar->data_lock);
+ ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
+ goto exit;
+ }
+
+ /* back up data rate tlv for all peers */
+ if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
+ (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
+ ppdu_info->delay_ba) {
+ for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
+ peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ continue;
+ }
+
+ usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
+ if (usr_stats->delay_ba)
+ ath12k_copy_to_delay_stats(peer, usr_stats);
+ spin_unlock_bh(&ab->base_lock);
+ }
+ }
+
+ /* restore all peers' data rate tlv to mu-bar tlv */
+ if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
+ (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
+ for (i = 0; i < ppdu_info->bar_num_users; i++) {
+ peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ continue;
+ }
+
+ usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
+ if (peer->delayba_flag)
+ ath12k_copy_to_bar(peer, usr_stats);
+ spin_unlock_bh(&ab->base_lock);
+ }
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+exit:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_htt_mlo_offset_msg *msg;
+ struct ath12k_pdev *pdev;
+ struct ath12k *ar;
+ u8 pdev_id;
+
+ msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
+ pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
+ HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ pdev = ar->pdev;
+
+ pdev->timestamp.info = __le32_to_cpu(msg->info);
+ pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
+ pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
+ pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
+ pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
+ pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
+ pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
+ pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
+ enum htt_t2h_msg_type type;
+ u16 peer_id;
+ u8 vdev_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 peer_mac_h16;
+ u16 ast_hash = 0;
+ u16 hw_peer_id;
+
+ type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
+
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
+
+ switch (type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF:
+ dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
+ HTT_T2H_VERSION_CONF_MAJOR);
+ dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
+ HTT_T2H_VERSION_CONF_MINOR);
+ complete(&dp->htt_tgt_version_received);
+ break;
+ /* TODO: remove unused peer map versions after testing */
+ case HTT_T2H_MSG_TYPE_PEER_MAP:
+ vdev_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_VDEV_ID);
+ peer_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_PEER_ID);
+ peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
+ ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
+ peer_mac_h16, mac_addr);
+ ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP2:
+ vdev_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_VDEV_ID);
+ peer_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_PEER_ID);
+ peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
+ ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
+ peer_mac_h16, mac_addr);
+ ast_hash = le32_get_bits(resp->peer_map_ev.info2,
+ HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
+ hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
+ ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
+ hw_peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP3:
+ vdev_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_VDEV_ID);
+ peer_id = le32_get_bits(resp->peer_map_ev.info,
+ HTT_T2H_PEER_MAP_INFO_PEER_ID);
+ peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
+ ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
+ peer_mac_h16, mac_addr);
+ ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
+ peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
+ peer_id = le32_get_bits(resp->peer_unmap_ev.info,
+ HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
+ ath12k_peer_unmap_event(ab, peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
+ ath12k_htt_pull_ppdu_stats(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ break;
+ case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
+ ath12k_htt_mlo_offset_event_handler(ab, skb);
+ break;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
+ type);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+}
+
+static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
+ struct sk_buff_head *msdu_list,
+ struct sk_buff *first, struct sk_buff *last,
+ u8 l3pad_bytes, int msdu_len)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct sk_buff *skb;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
+ int buf_first_hdr_len, buf_first_len;
+ struct hal_rx_desc *ldesc;
+ int space_extra, rem_len, buf_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ /* As the msdu is spread across multiple rx buffers,
+ * find the offset to the start of msdu for computing
+ * the length of the msdu in the first buffer.
+ */
+ buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
+ buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
+
+ if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
+ skb_put(first, buf_first_hdr_len + msdu_len);
+ skb_pull(first, buf_first_hdr_len);
+ return 0;
+ }
+
+ ldesc = (struct hal_rx_desc *)last->data;
+ rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
+ rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
+
+ /* MSDU spans over multiple buffers because the length of the MSDU
+ * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
+ * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
+ */
+ skb_put(first, DP_RX_BUFFER_SIZE);
+ skb_pull(first, buf_first_hdr_len);
+
+ /* When an MSDU spread over multiple buffers MSDU_END
+ * tlvs are valid only in the last buffer. Copy those tlvs.
+ */
+ ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
+
+ space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
+ if (space_extra > 0 &&
+ (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
+ /* Free up all buffers of the MSDU */
+ while ((skb = __skb_dequeue(msdu_list)) != NULL) {
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+ return -ENOMEM;
+ }
+
+ rem_len = msdu_len - buf_first_len;
+ while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (rxcb->is_continuation)
+ buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
+ else
+ buf_len = rem_len;
+
+ if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
+ WARN_ON_ONCE(1);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ skb_put(skb, buf_len + hal_rx_desc_sz);
+ skb_pull(skb, hal_rx_desc_sz);
+ skb_copy_from_linear_data(skb, skb_put(first, buf_len),
+ buf_len);
+ dev_kfree_skb_any(skb);
+
+ rem_len -= buf_len;
+ if (!rxcb->is_continuation)
+ break;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
+ struct sk_buff *first)
+{
+ struct sk_buff *skb;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
+
+ if (!rxcb->is_continuation)
+ return first;
+
+ skb_queue_walk(msdu_list, skb) {
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation)
+ return skb;
+ }
+
+ return NULL;
+}
+
+static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
+{
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ath12k_base *ab = ar->ab;
+ bool ip_csum_fail, l4_csum_fail;
+
+ ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
+ l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
+
+ msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+}
+
+static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return 0;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
+ return 0;
+}
+
+static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_IV_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
+ struct sk_buff *msdu,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ u8 *crypto_hdr;
+ u16 qos_ctl;
+
+ /* pull decapped header */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ skb_pull(msdu, hdr_len);
+
+ /* Rebuild qos header */
+ hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+
+ /* Reset the order bit as the HT_Control header is stripped */
+ hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
+
+ qos_ctl = rxcb->tid;
+
+ if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))
+ qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
+
+ /* TODO: Add other QoS ctl fields when required */
+
+ /* copy decap header before overwriting for reuse below */
+ memcpy(decap_hdr, hdr, hdr_len);
+
+ /* Rebuild crypto header for mac80211 use */
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));
+ ath12k_dp_rx_desc_get_crypto_header(ar->ab,
+ rxcb->rx_desc, crypto_hdr,
+ enctype);
+ }
+
+ memcpy(skb_push(msdu,
+ IEEE80211_QOS_CTL_LEN), &qos_ctl,
+ IEEE80211_QOS_CTL_LEN);
+ memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
+}
+
+static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+
+ if (!rxcb->is_first_msdu ||
+ !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ if (!decrypted)
+ return;
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_icv_len(ar, enctype));
+ } else {
+ /* MIC */
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_mic_len(ar, enctype));
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_icv_len(ar, enctype));
+ }
+
+ /* MMIC */
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
+ skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
+
+ /* Head */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove(msdu->data + crypto_len, msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
+ struct sk_buff *msdu,
+ struct ath12k_skb_rxcb *rxcb,
+ struct ieee80211_rx_status *status,
+ enum hal_encrypt_type enctype)
+{
+ struct hal_rx_desc *rx_desc = rxcb->rx_desc;
+ struct ath12k_base *ab = ar->ab;
+ size_t hdr_len, crypto_len;
+ struct ieee80211_hdr *hdr;
+ u16 qos_ctl;
+ __le16 fc;
+ u8 *crypto_hdr;
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
+ crypto_hdr = skb_push(msdu, crypto_len);
+ ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
+ }
+
+ fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
+ hdr_len = ieee80211_hdrlen(fc);
+ skb_push(msdu, hdr_len);
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr->frame_control = fc;
+
+ /* Get wifi header from rx_desc */
+ ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
+
+ if (rxcb->is_mcbc)
+ status->flag &= ~RX_FLAG_PN_VALIDATED;
+
+ /* Add QOS header */
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos_ctl = rxcb->tid;
+ if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
+ qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
+
+ /* TODO: Add other QoS ctl fields when required */
+ memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
+ &qos_ctl, IEEE80211_QOS_CTL_LEN);
+ }
+}
+
+static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
+ struct sk_buff *msdu,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr;
+ struct ethhdr *eth;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
+
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ rfc.snap_type = eth->h_proto;
+ skb_pull(msdu, sizeof(*eth));
+ memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
+ sizeof(rfc));
+ ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ struct ath12k_base *ab = ar->ab;
+ u8 decap;
+ struct ethhdr *ehdr;
+
+ decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+
+ switch (decap) {
+ case DP_RX_DECAP_TYPE_NATIVE_WIFI:
+ ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_RAW:
+ ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
+ decrypted);
+ break;
+ case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
+ ehdr = (struct ethhdr *)msdu->data;
+
+ /* mac80211 allows fast path only for authorized STA */
+ if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
+ ATH12K_SKB_RXCB(msdu)->is_eapol = true;
+ ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
+ break;
+ }
+
+ /* PN for mcast packets will be validated in mac80211;
+ * remove eth header and add 802.11 header.
+ */
+ if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
+ ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_8023:
+ /* TODO: Handle undecap for these formats */
+ break;
+ }
+}
+
+struct ath12k_peer *
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
+{
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct hal_rx_desc *rx_desc = rxcb->rx_desc;
+ struct ath12k_peer *peer = NULL;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rxcb->peer_id)
+ peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
+
+ if (peer)
+ return peer;
+
+ if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
+ return NULL;
+
+ peer = ath12k_peer_find_by_addr(ab,
+ ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
+ rx_desc));
+ return peer;
+}
+
+static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
+ struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ bool fill_crypto_hdr;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_skb_rxcb *rxcb;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted = false;
+ struct ieee80211_hdr *hdr;
+ struct ath12k_peer *peer;
+ u32 err_bitmap;
+
+ /* PN for multicast packets will be checked in mac80211 */
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ fill_crypto_hdr = ath12k_dp_rx_h_is_mcbc(ar->ab, rx_desc);
+ rxcb->is_mcbc = fill_crypto_hdr;
+
+ if (rxcb->is_mcbc)
+ rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
+
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+ if (peer) {
+ if (rxcb->is_mcbc)
+ enctype = peer->sec_type_grp;
+ else
+ enctype = peer->sec_type;
+ } else {
+ enctype = HAL_ENCRYPT_TYPE_OPEN;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
+ is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact */
+ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (is_decrypted) {
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
+
+ if (fill_crypto_hdr)
+ rx_status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ rx_status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_PN_VALIDATED;
+ }
+
+ ath12k_dp_rx_h_csum_offload(ar, msdu);
+ ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ enctype, rx_status, is_decrypted);
+
+ if (!is_decrypted || fill_crypto_hdr)
+ return;
+
+ if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+}
+
+static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_supported_band *sband;
+ enum rx_msdu_start_pkt_type pkt_type;
+ u8 bw;
+ u8 rate_mcs, nss;
+ u8 sgi;
+ bool is_cck;
+
+ pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
+ bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
+ rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
+ nss = ath12k_dp_rx_h_nss(ab, rx_desc);
+ sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
+
+ switch (pkt_type) {
+ case RX_MSDU_START_PKT_TYPE_11A:
+ case RX_MSDU_START_PKT_TYPE_11B:
+ is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+ sband = &ar->mac.sbands[rx_status->band];
+ rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
+ is_cck);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11N:
+ rx_status->encoding = RX_ENC_HT;
+ if (rate_mcs > ATH12K_HT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in HT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AC:
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_VHT_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in VHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->nss = nss;
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AX:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH12K_HE_MCS_MAX) {
+ ath12k_warn(ar->ab,
+ "Received with invalid mcs in HE mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->nss = nss;
+ rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+ rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+ break;
+ }
+}
+
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ath12k_base *ab = ar->ab;
+ u8 channel_num;
+ u32 center_freq, meta_data;
+ struct ieee80211_channel *channel;
+
+ rx_status->freq = 0;
+ rx_status->rate_idx = 0;
+ rx_status->nss = 0;
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->bw = RATE_INFO_BW_20;
+ rx_status->enc_flags = 0;
+
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
+ channel_num = meta_data;
+ center_freq = meta_data >> 16;
+
+ if (center_freq >= 5935 && center_freq <= 7105) {
+ rx_status->band = NL80211_BAND_6GHZ;
+ } else if (channel_num >= 1 && channel_num <= 14) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (channel_num >= 36 && channel_num <= 173) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ spin_lock_bh(&ar->data_lock);
+ channel = ar->rx_channel;
+ if (channel) {
+ rx_status->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
+ spin_unlock_bh(&ar->data_lock);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
+ rx_desc, sizeof(*rx_desc));
+ }
+
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
+
+ ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
+}
+
+static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ struct ath12k_base *ab = ar->ab;
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
+ };
+ struct ieee80211_radiotap_he *he;
+ struct ieee80211_rx_status *rx_status;
+ struct ieee80211_sta *pubsta;
+ struct ath12k_peer *peer;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
+ bool is_mcbc = rxcb->is_mcbc;
+ bool is_eapol = rxcb->is_eapol;
+
+ if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
+ !(status->flag & RX_FLAG_SKIP_MONITOR)) {
+ he = skb_push(msdu, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ }
+
+ if (!(status->flag & RX_FLAG_ONLY_MONITOR))
+ decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_dp_rx_h_find_peer(ab, msdu);
+
+ pubsta = peer ? peer->sta : NULL;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ msdu,
+ msdu->len,
+ peer ? peer->addr : NULL,
+ rxcb->tid,
+ is_mcbc ? "mcast" : "ucast",
+ ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
+ msdu->data, msdu->len);
+
+ rx_status = IEEE80211_SKB_RXCB(msdu);
+ *rx_status = *status;
+
+ /* TODO: trace rx packet */
+
+ /* PN for multicast packets are not validate in HW,
+ * so skip 802.3 rx path
+ * Also, fast_rx expectes the STA to be authorized, hence
+ * eapol packets are sent in slow path.
+ */
+ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
+ !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
+ rx_status->flag |= RX_FLAG_8023;
+
+ ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+}
+
+static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc, *lrx_desc;
+ struct ath12k_skb_rxcb *rxcb;
+ struct sk_buff *last_buf;
+ u8 l3_pad_bytes;
+ u16 msdu_len;
+ int ret;
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
+ if (!last_buf) {
+ ath12k_warn(ab,
+ "No valid Rx buffer to access MSDU_END tlv\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ lrx_desc = (struct hal_rx_desc *)last_buf->data;
+ if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
+ ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->rx_desc = rx_desc;
+ msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
+ l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, hal_rx_desc_sz);
+ } else if (!rxcb->is_continuation) {
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ ret = -EINVAL;
+ ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
+ ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(*rx_desc));
+ goto free_out;
+ }
+ skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
+ } else {
+ ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
+ msdu, last_buf,
+ l3_pad_bytes, msdu_len);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to coalesce msdu rx buffer%d\n", ret);
+ goto free_out;
+ }
+ }
+
+ ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
+ ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
+
+ rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+
+ return 0;
+
+free_out:
+ return ret;
+}
+
+static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
+ struct napi_struct *napi,
+ struct sk_buff_head *msdu_list,
+ int ring_id)
+{
+ struct ieee80211_rx_status rx_status = {0};
+ struct ath12k_skb_rxcb *rxcb;
+ struct sk_buff *msdu;
+ struct ath12k *ar;
+ u8 mac_id;
+ int ret;
+
+ if (skb_queue_empty(msdu_list))
+ return;
+
+ rcu_read_lock();
+
+ while ((msdu = __skb_dequeue(msdu_list))) {
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ mac_id = rxcb->mac_id;
+ ar = ab->pdevs[mac_id].ar;
+ if (!rcu_dereference(ab->pdevs_active[mac_id])) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA,
+ "Unable to process msdu %d", ret);
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
+ }
+
+ rcu_read_unlock();
+}
+
+int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath12k_rx_desc_info *desc_info;
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ struct hal_reo_dest_ring *desc;
+ int num_buffs_reaped = 0;
+ struct sk_buff_head msdu_list;
+ struct ath12k_skb_rxcb *rxcb;
+ int total_msdu_reaped = 0;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ bool done = false;
+ int mac_id;
+ u64 desc_va;
+
+ __skb_queue_head_init(&msdu_list);
+
+ srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+try_again:
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 cookie;
+
+ cookie = le32_get_bits(desc->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+
+ mac_id = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+
+ desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
+ le32_to_cpu(desc->buf_va_lo));
+ desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+
+ /* retry manual desc retrieval */
+ if (!desc_info) {
+ desc_info = ath12k_dp_get_rx_desc(ab, cookie);
+ if (!desc_info) {
+ ath12k_warn(ab, "Invalid cookie in manual desc retrival");
+ continue;
+ }
+ }
+
+ if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
+ ath12k_warn(ab, "Check HW CC implementation");
+
+ msdu = desc_info->skb;
+ desc_info->skb = NULL;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
+ spin_unlock_bh(&dp->rx_desc_lock);
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped++;
+
+ push_reason = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_PUSH_REASON);
+ if (push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ dev_kfree_skb_any(msdu);
+ ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
+ continue;
+ }
+
+ rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+ rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+ rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
+ rxcb->mac_id = mac_id;
+ rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
+ RX_MPDU_DESC_META_DATA_PEER_ID);
+ rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
+ RX_MPDU_DESC_INFO0_TID);
+
+ __skb_queue_tail(&msdu_list, msdu);
+
+ if (!rxcb->is_continuation) {
+ total_msdu_reaped++;
+ done = true;
+ } else {
+ done = false;
+ }
+
+ if (total_msdu_reaped >= budget)
+ break;
+ }
+
+ /* Hw might have updated the head pointer after we cached it.
+ * In this case, even though there are entries in the ring we'll
+ * get rx_desc NULL. Give the read another try with updated cached
+ * head pointer so that we can reap complete MPDU in the current
+ * rx processing.
+ */
+ if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
+ ath12k_hal_srng_access_end(ab, srng);
+ goto try_again;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!total_msdu_reaped)
+ goto exit;
+
+ /* TODO: Move to implicit BM? */
+ ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
+ ab->hw_params->hal_params->rx_buf_rbm, true);
+
+ ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
+ ring_id);
+
+exit:
+ return total_msdu_reaped;
+}
+
+static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
+{
+ struct ath12k_dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
+
+ spin_lock_bh(&rx_tid->ab->base_lock);
+ if (rx_tid->last_frag_no &&
+ rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+ return;
+ }
+ ath12k_dp_rx_frags_cleanup(rx_tid, true);
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+}
+
+int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct crypto_shash *tfm;
+ struct ath12k_peer *peer;
+ struct ath12k_dp_rx_tid *rx_tid;
+ int i;
+
+ tfm = crypto_alloc_shash("michael_mic", 0, 0);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
+ return -ENOENT;
+ }
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+ rx_tid->ab = ab;
+ timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
+ skb_queue_head_init(&rx_tid->rx_frags);
+ }
+
+ peer->tfm_mmic = tfm;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+}
+
+static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
+ struct ieee80211_hdr *hdr, u8 *data,
+ size_t data_len, u8 *mic)
+{
+ SHASH_DESC_ON_STACK(desc, tfm);
+ u8 mic_hdr[16] = {0};
+ u8 tid = 0;
+ int ret;
+
+ if (!tfm)
+ return -EINVAL;
+
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, key, 8);
+ if (ret)
+ goto out;
+
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out;
+
+ /* TKIP MIC header */
+ memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ mic_hdr[12] = tid;
+
+ ret = crypto_shash_update(desc, mic_hdr, 16);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(desc, data, data_len);
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(desc, mic);
+out:
+ shash_desc_zero(desc);
+ return ret;
+}
+
+static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,
+ struct sk_buff *msdu)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
+ struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
+ struct ieee80211_key_conf *key_conf;
+ struct ieee80211_hdr *hdr;
+ u8 mic[IEEE80211_CCMP_MIC_LEN];
+ int head_len, tail_len, ret;
+ size_t data_len;
+ u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+ u8 *key, *data;
+ u8 key_idx;
+
+ if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
+ return 0;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
+ tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
+
+ if (!is_multicast_ether_addr(hdr->addr1))
+ key_idx = peer->ucast_keyidx;
+ else
+ key_idx = peer->mcast_keyidx;
+
+ key_conf = peer->keys[key_idx];
+
+ data = msdu->data + head_len;
+ data_len = msdu->len - head_len - tail_len;
+ key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
+
+ ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
+ if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
+ goto mic_fail;
+
+ return 0;
+
+mic_fail:
+ (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
+ (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
+
+ rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
+ skb_pull(msdu, hal_rx_desc_sz);
+
+ ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+ ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
+ ieee80211_rx(ar->hw, msdu);
+ return -EINVAL;
+}
+
+static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype, u32 flags)
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ if (!flags)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
+
+ if (flags & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_mic_len(ar, enctype));
+
+ if (flags & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath12k_dp_rx_crypto_icv_len(ar, enctype));
+
+ if (flags & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove(msdu->data + hal_rx_desc_sz + crypto_len,
+ msdu->data + hal_rx_desc_sz, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
+ struct ath12k_peer *peer,
+ struct ath12k_dp_rx_tid *rx_tid,
+ struct sk_buff **defrag_skb)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc;
+ struct sk_buff *skb, *first_frag, *last_frag;
+ struct ieee80211_hdr *hdr;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted = false;
+ int msdu_len = 0;
+ int extra_space;
+ u32 flags, hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ last_frag = skb_peek_tail(&rx_tid->rx_frags);
+
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ flags = 0;
+ rx_desc = (struct hal_rx_desc *)skb->data;
+ hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
+
+ enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN)
+ is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,
+ rx_desc);
+
+ if (is_decrypted) {
+ if (skb != first_frag)
+ flags |= RX_FLAG_IV_STRIPPED;
+ if (skb != last_frag)
+ flags |= RX_FLAG_ICV_STRIPPED |
+ RX_FLAG_MIC_STRIPPED;
+ }
+
+ /* RX fragments are always raw packets */
+ if (skb != last_frag)
+ skb_trim(skb, skb->len - FCS_LEN);
+ ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
+
+ if (skb != first_frag)
+ skb_pull(skb, hal_rx_desc_sz +
+ ieee80211_hdrlen(hdr->frame_control));
+ msdu_len += skb->len;
+ }
+
+ extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
+ if (extra_space > 0 &&
+ (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
+ return -ENOMEM;
+
+ __skb_unlink(first_frag, &rx_tid->rx_frags);
+ while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
+ skb_put_data(first_frag, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
+
+ hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
+ ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
+
+ if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
+ first_frag = NULL;
+
+ *defrag_skb = first_frag;
+ return 0;
+}
+
+static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
+ struct ath12k_dp_rx_tid *rx_tid,
+ struct sk_buff *defrag_skb)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
+ struct hal_reo_entrance_ring *reo_ent_ring;
+ struct hal_reo_dest_ring *reo_dest_ring;
+ struct dp_link_desc_bank *link_desc_banks;
+ struct hal_rx_msdu_link *msdu_link;
+ struct hal_rx_msdu_details *msdu0;
+ struct hal_srng *srng;
+ dma_addr_t link_paddr, buf_paddr;
+ u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
+ u32 cookie, hal_rx_desc_sz, dest_ring_info0;
+ int ret;
+ struct ath12k_rx_desc_info *desc_info;
+ u8 dst_ind;
+
+ hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ link_desc_banks = dp->link_desc_banks;
+ reo_dest_ring = rx_tid->dst_ring_desc;
+
+ ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
+ &link_paddr, &cookie);
+ desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
+
+ msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
+ (link_paddr - link_desc_banks[desc_bank].paddr));
+ msdu0 = &msdu_link->msdu_link[0];
+ msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
+ dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
+
+ memset(msdu0, 0, sizeof(*msdu0));
+
+ msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
+ u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
+ u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
+ u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
+ RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
+ u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
+ u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
+ msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
+ msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
+
+ /* change msdu len in hal rx desc */
+ ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
+
+ buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
+ defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, buf_paddr))
+ return -ENOMEM;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+ desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
+ struct ath12k_rx_desc_info,
+ list);
+ if (!desc_info) {
+ spin_unlock_bh(&dp->rx_desc_lock);
+ ath12k_warn(ab, "failed to find rx desc for reinject\n");
+ ret = -ENOMEM;
+ goto err_unmap_dma;
+ }
+
+ desc_info->skb = defrag_skb;
+
+ list_del(&desc_info->list);
+ list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
+ spin_unlock_bh(&dp->rx_desc_lock);
+
+ ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
+
+ ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
+ desc_info->cookie,
+ HAL_RX_BUF_RBM_SW3_BM);
+
+ /* Fill mpdu details into reo entrace ring */
+ srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_ent_ring) {
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ ret = -ENOSPC;
+ goto err_free_desc;
+ }
+ memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
+
+ ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
+ cookie,
+ HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
+
+ mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
+ u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
+ u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
+ u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
+ u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
+
+ reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
+ reo_ent_ring->rx_mpdu_info.peer_meta_data =
+ reo_dest_ring->rx_mpdu_info.peer_meta_data;
+
+ reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr));
+ reo_ent_ring->info0 = le32_encode_bits(upper_32_bits(rx_tid->paddr),
+ HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
+ le32_encode_bits(dst_ind, HAL_REO_ENTR_RING_INFO0_DEST_IND);
+
+ reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
+ HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
+ dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+ reo_ent_ring->info2 =
+ cpu_to_le32(u32_get_bits(dest_ring_info0,
+ HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
+
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+err_free_desc:
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_del(&desc_info->list);
+ list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
+ desc_info->skb = NULL;
+ spin_unlock_bh(&dp->rx_desc_lock);
+err_unmap_dma:
+ dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_FROM_DEVICE);
+ return ret;
+}
+
+static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
+ struct sk_buff *a, struct sk_buff *b)
+{
+ int frag1, frag2;
+
+ frag1 = ath12k_dp_rx_h_frag_no(ab, a);
+ frag2 = ath12k_dp_rx_h_frag_no(ab, b);
+
+ return frag1 - frag2;
+}
+
+static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
+ struct sk_buff_head *frag_list,
+ struct sk_buff *cur_frag)
+{
+ struct sk_buff *skb;
+ int cmp;
+
+ skb_queue_walk(frag_list, skb) {
+ cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
+ if (cmp < 0)
+ continue;
+ __skb_queue_before(frag_list, skb, cur_frag);
+ return;
+ }
+ __skb_queue_tail(frag_list, cur_frag);
+}
+
+static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ u64 pn = 0;
+ u8 *ehdr;
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
+ ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
+
+ pn = ehdr[0];
+ pn |= (u64)ehdr[1] << 8;
+ pn |= (u64)ehdr[4] << 16;
+ pn |= (u64)ehdr[5] << 24;
+ pn |= (u64)ehdr[6] << 32;
+ pn |= (u64)ehdr[7] << 40;
+
+ return pn;
+}
+
+static bool
+ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)
+{
+ struct ath12k_base *ab = ar->ab;
+ enum hal_encrypt_type encrypt_type;
+ struct sk_buff *first_frag, *skb;
+ struct hal_rx_desc *desc;
+ u64 last_pn;
+ u64 cur_pn;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ desc = (struct hal_rx_desc *)first_frag->data;
+
+ encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);
+ if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
+ return true;
+
+ last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ if (skb == first_frag)
+ continue;
+
+ cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);
+ if (cur_pn != last_pn + 1)
+ return false;
+ last_pn = cur_pn;
+ }
+ return true;
+}
+
+static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
+ struct sk_buff *msdu,
+ struct hal_reo_dest_ring *ring_desc)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc;
+ struct ath12k_peer *peer;
+ struct ath12k_dp_rx_tid *rx_tid;
+ struct sk_buff *defrag_skb = NULL;
+ u32 peer_id;
+ u16 seqno, frag_no;
+ u8 tid;
+ int ret = 0;
+ bool more_frags;
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
+ tid = ath12k_dp_rx_h_tid(ab, rx_desc);
+ seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);
+ frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
+ more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
+
+ if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||
+ !ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||
+ tid > IEEE80211_NUM_TIDS)
+ return -EINVAL;
+
+ /* received unfragmented packet in reo
+ * exception ring, this shouldn't happen
+ * as these packets typically come from
+ * reo2sw srngs.
+ */
+ if (WARN_ON_ONCE(!frag_no && !more_frags))
+ return -EINVAL;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
+ peer_id);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+ rx_tid = &peer->rx_tid[tid];
+
+ if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+ skb_queue_empty(&rx_tid->rx_frags)) {
+ /* Flush stored fragments and start a new sequence */
+ ath12k_dp_rx_frags_cleanup(rx_tid, true);
+ rx_tid->cur_sn = seqno;
+ }
+
+ if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
+ /* Fragment already present */
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (frag_no > __fls(rx_tid->rx_frag_bitmap))
+ __skb_queue_tail(&rx_tid->rx_frags, msdu);
+ else
+ ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
+
+ rx_tid->rx_frag_bitmap |= BIT(frag_no);
+ if (!more_frags)
+ rx_tid->last_frag_no = frag_no;
+
+ if (frag_no == 0) {
+ rx_tid->dst_ring_desc = kmemdup(ring_desc,
+ sizeof(*rx_tid->dst_ring_desc),
+ GFP_ATOMIC);
+ if (!rx_tid->dst_ring_desc) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ } else {
+ ath12k_dp_rx_link_desc_return(ab, ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ if (!rx_tid->last_frag_no ||
+ rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
+ mod_timer(&rx_tid->frag_timer, jiffies +
+ ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
+ goto out_unlock;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find_by_id(ab, peer_id);
+ if (!peer)
+ goto err_frags_cleanup;
+
+ if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
+ goto err_frags_cleanup;
+
+ if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
+ goto err_frags_cleanup;
+
+ if (!defrag_skb)
+ goto err_frags_cleanup;
+
+ if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
+ goto err_frags_cleanup;
+
+ ath12k_dp_rx_frags_cleanup(rx_tid, false);
+ goto out_unlock;
+
+err_frags_cleanup:
+ dev_kfree_skb_any(defrag_skb);
+ ath12k_dp_rx_frags_cleanup(rx_tid, true);
+out_unlock:
+ spin_unlock_bh(&ab->base_lock);
+ return ret;
+}
+
+static int
+ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
+ bool drop, u32 cookie)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct sk_buff *msdu;
+ struct ath12k_skb_rxcb *rxcb;
+ struct hal_rx_desc *rx_desc;
+ u16 msdu_len;
+ u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ struct ath12k_rx_desc_info *desc_info;
+ u64 desc_va;
+
+ desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
+ le32_to_cpu(desc->buf_va_lo));
+ desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+
+ /* retry manual desc retrieval */
+ if (!desc_info) {
+ desc_info = ath12k_dp_get_rx_desc(ab, cookie);
+ if (!desc_info) {
+ ath12k_warn(ab, "Invalid cookie in manual desc retrival");
+ return -EINVAL;
+ }
+ }
+
+ if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
+ ath12k_warn(ab, " RX Exception, Check HW CC implementation");
+
+ msdu = desc_info->skb;
+ desc_info->skb = NULL;
+ spin_lock_bh(&ab->dp.rx_desc_lock);
+ list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
+ spin_unlock_bh(&ab->dp.rx_desc_lock);
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return 0;
+ }
+
+ rcu_read_lock();
+ if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(*rx_desc));
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ skb_put(msdu, hal_rx_desc_sz + msdu_len);
+
+ if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
+ dev_kfree_skb_any(msdu);
+ ath12k_dp_rx_link_desc_return(ar->ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+exit:
+ rcu_read_unlock();
+ return 0;
+}
+
+int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
+ int budget)
+{
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ struct dp_link_desc_bank *link_desc_banks;
+ enum hal_rx_buf_return_buf_manager rbm;
+ struct hal_rx_msdu_link *link_desc_va;
+ int tot_n_bufs_reaped, quota, ret, i;
+ struct hal_reo_dest_ring *reo_desc;
+ struct dp_rxdma_ring *rx_ring;
+ struct dp_srng *reo_except;
+ u32 desc_bank, num_msdus;
+ struct hal_srng *srng;
+ struct ath12k_dp *dp;
+ int mac_id;
+ struct ath12k *ar;
+ dma_addr_t paddr;
+ bool is_frag;
+ bool drop = false;
+
+ tot_n_bufs_reaped = 0;
+ quota = budget;
+
+ dp = &ab->dp;
+ reo_except = &dp->reo_except_ring;
+ link_desc_banks = dp->link_desc_banks;
+
+ srng = &ab->hal.srng_list[reo_except->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (budget &&
+ (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ ab->soc_stats.err_ring_pkts++;
+ ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
+ &desc_bank);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse error reo desc %d\n",
+ ret);
+ continue;
+ }
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
+ &rbm);
+ if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
+ rbm != HAL_RX_BUF_RBM_SW3_BM &&
+ rbm != ab->hw_params->hal_params->rx_buf_rbm) {
+ ab->soc_stats.invalid_rbm++;
+ ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
+ ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ HAL_WBM_REL_BM_ACT_REL_MSDU);
+ continue;
+ }
+
+ is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
+ RX_MPDU_DESC_INFO0_FRAG_FLAG);
+
+ /* Process only rx fragments with one msdu per link desc below, and drop
+ * msdu's indicated due to error reasons.
+ */
+ if (!is_frag || num_msdus > 1) {
+ drop = true;
+ /* Return the link desc back to wbm idle list */
+ ath12k_dp_rx_link_desc_return(ab, reo_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ for (i = 0; i < num_msdus; i++) {
+ mac_id = le32_get_bits(reo_desc->info0,
+ HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
+ msdu_cookies[i]))
+ tot_n_bufs_reaped++;
+ }
+
+ if (tot_n_bufs_reaped >= quota) {
+ tot_n_bufs_reaped = quota;
+ goto exit;
+ }
+
+ budget = quota - tot_n_bufs_reaped;
+ }
+
+exit:
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ rx_ring = &dp->rx_refill_buf_ring;
+
+ ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, tot_n_bufs_reaped,
+ ab->hw_params->hal_params->rx_buf_rbm, true);
+
+ return tot_n_bufs_reaped;
+}
+
+static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
+ int msdu_len,
+ struct sk_buff_head *msdu_list)
+{
+ struct sk_buff *skb, *tmp;
+ struct ath12k_skb_rxcb *rxcb;
+ int n_buffs;
+
+ n_buffs = DIV_ROUND_UP(msdu_len,
+ (DP_RX_BUFFER_SIZE - ar->ab->hw_params->hal_desc_sz));
+
+ skb_queue_walk_safe(msdu_list, skb, tmp) {
+ rxcb = ATH12K_SKB_RXCB(skb);
+ if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
+ rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
+ if (!n_buffs)
+ break;
+ __skb_unlink(skb, msdu_list);
+ dev_kfree_skb_any(skb);
+ n_buffs--;
+ }
+ }
+}
+
+static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath12k_base *ab = ar->ab;
+ u16 msdu_len, peer_id;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
+ peer_id = ath12k_dp_rx_h_peer_id(ab, desc);
+
+ if (!ath12k_peer_find_by_id(ab, peer_id)) {
+ ath12k_dbg(ab, ATH12K_DBG_DATA, "invalid peer id received in wbm err pkt%d\n",
+ peer_id);
+ return -EINVAL;
+ }
+
+ if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
+ /* First buffer will be freed by the caller, so deduct it's length */
+ msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
+ ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
+ return -EINVAL;
+ }
+
+ /* Even after cleaning up the sg buffers in the msdu list with above check
+ * any msdu received with continuation flag needs to be dropped as invalid.
+ * This protects against some random err frame with continuation flag.
+ */
+ if (rxcb->is_continuation)
+ return -EINVAL;
+
+ if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {
+ ath12k_warn(ar->ab,
+ "msdu_done bit not set in null_q_des processing\n");
+ __skb_queue_purge(msdu_list);
+ return -EIO;
+ }
+
+ /* Handle NULL queue descriptor violations arising out a missing
+ * REO queue for a given peer or a given TID. This typically
+ * may happen if a packet is received on a QOS enabled TID before the
+ * ADDBA negotiation for that TID, when the TID queue is setup. Or
+ * it may also happen for MC/BC frames if they are not routed to the
+ * non-QOS TID queue, in the absence of any other default TID queue.
+ * This error can show up both in a REO destination or WBM release ring.
+ */
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, hal_rx_desc_sz);
+ } else {
+ l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
+
+ if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+ return -EINVAL;
+
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+ }
+ ath12k_dp_rx_h_ppdu(ar, desc, status);
+
+ ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
+
+ rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
+
+ /* Please note that caller will having the access to msdu and completing
+ * rx with mac80211. Need not worry about cleaning up amsdu_list.
+ */
+
+ return 0;
+}
+
+static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
+ if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+ drop = true;
+ break;
+ case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
+ /* TODO: Do not drop PN failed packets in the driver;
+ * instead, it is good to drop such packets in mac80211
+ * after incrementing the replay counters.
+ */
+ fallthrough;
+ default:
+ /* TODO: Review other errors and process them to mac80211
+ * as appropriate.
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ struct ath12k_base *ab = ar->ab;
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = ar->ab->hw_params->hal_desc_sz;
+
+ rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
+ rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
+
+ l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
+ msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+
+ ath12k_dp_rx_h_ppdu(ar, desc, status);
+
+ status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED);
+
+ ath12k_dp_rx_h_undecap(ar, msdu, desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+}
+
+static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
+ bool drop = false;
+ u32 err_bitmap;
+
+ ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
+ case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
+ err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
+ if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
+ ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ break;
+ }
+ fallthrough;
+ default:
+ /* TODO: Review other rxdma error code to check if anything is
+ * worth reporting to mac80211
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
+ struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+ struct ieee80211_rx_status rxs = {0};
+ bool drop = true;
+
+ switch (rxcb->err_rel_src) {
+ case HAL_WBM_REL_SRC_MODULE_REO:
+ drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+ break;
+ case HAL_WBM_REL_SRC_MODULE_RXDMA:
+ drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+ break;
+ default:
+ /* msdu will get freed */
+ break;
+ }
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return;
+ }
+
+ ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
+}
+
+int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
+ struct napi_struct *napi, int budget)
+{
+ struct ath12k *ar;
+ struct ath12k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring;
+ struct hal_rx_wbm_rel_info err_info;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct ath12k_skb_rxcb *rxcb;
+ void *rx_desc;
+ int mac_id;
+ int num_buffs_reaped = 0;
+ struct ath12k_rx_desc_info *desc_info;
+ int ret, i;
+
+ for (i = 0; i < ab->num_radios; i++)
+ __skb_queue_head_init(&msdu_list[i]);
+
+ srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
+ rx_ring = &dp->rx_refill_buf_ring;
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while (budget) {
+ rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!rx_desc)
+ break;
+
+ ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to parse rx error in wbm_rel ring desc %d\n",
+ ret);
+ continue;
+ }
+
+ desc_info = (struct ath12k_rx_desc_info *)err_info.rx_desc;
+
+ /* retry manual desc retrieval if hw cc is not done */
+ if (!desc_info) {
+ desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
+ if (!desc_info) {
+ ath12k_warn(ab, "Invalid cookie in manual desc retrival");
+ continue;
+ }
+ }
+
+ /* FIXME: Extract mac id correctly. Since descs are not tied
+ * to mac, we can extract from vdev id in ring desc.
+ */
+ mac_id = 0;
+
+ if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
+ ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
+
+ msdu = desc_info->skb;
+ desc_info->skb = NULL;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
+ spin_unlock_bh(&dp->rx_desc_lock);
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped++;
+
+ if (!err_info.continuation)
+ budget--;
+
+ if (err_info.push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ rxcb->err_rel_src = err_info.err_rel_src;
+ rxcb->err_code = err_info.err_code;
+ rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
+ __skb_queue_tail(&msdu_list[mac_id], msdu);
+
+ rxcb->is_first_msdu = err_info.first_msdu;
+ rxcb->is_last_msdu = err_info.last_msdu;
+ rxcb->is_continuation = err_info.continuation;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!num_buffs_reaped)
+ goto done;
+
+ ath12k_dp_rx_bufs_replenish(ab, 0, rx_ring, num_buffs_reaped,
+ ab->hw_params->hal_params->rx_buf_rbm, true);
+
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!rcu_dereference(ab->pdevs_active[i])) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ ar = ab->pdevs[i].ar;
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
+ ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+ }
+ rcu_read_unlock();
+done:
+ return num_buffs_reaped;
+}
+
+void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_tlv_64_hdr *hdr;
+ struct hal_srng *srng;
+ struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
+ bool found = false;
+ u16 tag;
+ struct hal_reo_status reo_status;
+
+ srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
+
+ memset(&reo_status, 0, sizeof(reo_status));
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+
+ while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
+
+ switch (tag) {
+ case HAL_REO_GET_QUEUE_STATS_STATUS:
+ ath12k_hal_reo_status_queue_stats(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_QUEUE_STATUS:
+ ath12k_hal_reo_flush_queue_status(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_CACHE_STATUS:
+ ath12k_hal_reo_flush_cache_status(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_UNBLOCK_CACHE_STATUS:
+ ath12k_hal_reo_unblk_cache_status(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
+ ath12k_hal_reo_flush_timeout_list_status(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
+ ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,
+ &reo_status);
+ break;
+ case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
+ ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,
+ &reo_status);
+ break;
+ default:
+ ath12k_warn(ab, "Unknown reo status type %d\n", tag);
+ continue;
+ }
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
+ found = true;
+ list_del(&cmd->list);
+ break;
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ if (found) {
+ cmd->handler(dp, (void *)&cmd->data,
+ reo_status.uniform_hdr.cmd_status);
+ kfree(cmd);
+ }
+
+ found = false;
+ }
+
+ ath12k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+}
+
+void ath12k_dp_rx_free(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i;
+
+ ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ if (ab->hw_params->rx_mac_buf_ring)
+ ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
+ ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
+
+ ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
+ ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
+
+ ath12k_dp_rxdma_buf_free(ab);
+}
+
+void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
+{
+ struct ath12k *ar = ab->pdevs[mac_id].ar;
+
+ ath12k_dp_rx_pdev_srng_free(ar);
+}
+
+int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 ring_id;
+ int ret;
+ u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+
+ tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
+ tlv_filter.offset_valid = true;
+ tlv_filter.rx_packet_offset = hal_rx_desc_sz;
+
+ tlv_filter.rx_mpdu_start_offset =
+ ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ tlv_filter.rx_msdu_end_offset =
+ ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+
+ /* TODO: Selectively subscribe to required qwords within msdu_end
+ * and mpdu_start and setup the mask in below msg
+ * and modify the rx_desc struct
+ */
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+
+ return ret;
+}
+
+int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 ring_id;
+ int ret;
+ u32 hal_rx_desc_sz = ab->hw_params->hal_desc_sz;
+ int i;
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+
+ tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
+ tlv_filter.offset_valid = true;
+ tlv_filter.rx_packet_offset = hal_rx_desc_sz;
+
+ tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
+
+ tlv_filter.rx_mpdu_start_offset =
+ ab->hw_params->hal_ops->rx_desc_get_mpdu_start_offset();
+ tlv_filter.rx_msdu_end_offset =
+ ab->hw_params->hal_ops->rx_desc_get_msdu_end_offset();
+
+ /* TODO: Selectively subscribe to required qwords within msdu_end
+ * and mpdu_start and setup the mask in below msg
+ * and modify the rx_desc struct
+ */
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
+ HAL_RXDMA_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ }
+
+ return ret;
+}
+
+int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ u32 ring_id;
+ int i, ret;
+
+ /* TODO: Need to verify the HTT setup for QCN9224 */
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->hw_params->rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ i, HAL_RXDMA_BUF);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
+ ring_id = dp->rxdma_err_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ i, HAL_RXDMA_DST);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ if (ab->hw_params->rxdma1_enable) {
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ 0, HAL_RXDMA_MONITOR_BUF);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ 0, HAL_TX_MONITOR_BUF);
+ if (ret) {
+ ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_rx_alloc(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ int i, ret;
+
+ idr_init(&dp->rx_refill_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
+
+ idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
+
+ idr_init(&dp->tx_mon_buf_ring.bufs_idr);
+ spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
+
+ ret = ath12k_dp_srng_setup(ab,
+ &dp->rx_refill_buf_ring.refill_buf_ring,
+ HAL_RXDMA_BUF, 0, 0,
+ DP_RXDMA_BUF_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
+ return ret;
+ }
+
+ if (ab->hw_params->rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ret = ath12k_dp_srng_setup(ab,
+ &dp->rx_mac_buf_ring[i],
+ HAL_RXDMA_BUF, 1,
+ i, 1024);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
+ i);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
+ ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
+ HAL_RXDMA_DST, 0, i,
+ DP_RXDMA_ERR_DST_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
+ return ret;
+ }
+ }
+
+ if (ab->hw_params->rxdma1_enable) {
+ ret = ath12k_dp_srng_setup(ab,
+ &dp->rxdma_mon_buf_ring.refill_buf_ring,
+ HAL_RXDMA_MONITOR_BUF, 0, 0,
+ DP_RXDMA_MONITOR_BUF_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ ret = ath12k_dp_srng_setup(ab,
+ &dp->tx_mon_buf_ring.refill_buf_ring,
+ HAL_TX_MONITOR_BUF, 0, 0,
+ DP_TX_MONITOR_BUF_RING_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
+ return ret;
+ }
+ }
+
+ ret = ath12k_dp_rxdma_buf_setup(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rxdma ring\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
+{
+ struct ath12k *ar = ab->pdevs[mac_id].ar;
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ u32 ring_id;
+ int i;
+ int ret;
+
+ if (!ab->hw_params->rxdma1_enable)
+ goto out;
+
+ ret = ath12k_dp_rx_pdev_srng_alloc(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup rx srngs\n");
+ return ret;
+ }
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i,
+ HAL_RXDMA_MONITOR_DST);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to configure rxdma_mon_dst_ring %d %d\n",
+ i, ret);
+ return ret;
+ }
+
+ ring_id = dp->tx_mon_dst_ring[i].ring_id;
+ ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i,
+ HAL_TX_MONITOR_DST);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to configure tx_mon_dst_ring %d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+out:
+ return 0;
+}
+
+static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
+{
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
+
+ skb_queue_head_init(&pmon->rx_status_q);
+
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+
+ memset(&pmon->rx_mon_stats, 0,
+ sizeof(pmon->rx_mon_stats));
+ return 0;
+}
+
+int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
+{
+ struct ath12k_pdev_dp *dp = &ar->dp;
+ struct ath12k_mon_data *pmon = &dp->mon_data;
+ int ret = 0;
+
+ ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
+ return ret;
+ }
+
+ /* if rxdma1_enable is false, no need to setup
+ * rxdma_mon_desc_ring.
+ */
+ if (!ar->ab->hw_params->rxdma1_enable)
+ return 0;
+
+ pmon->mon_last_linkdesc_paddr = 0;
+ pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
+ spin_lock_init(&pmon->mon_lock);
+
+ return 0;
+}
+
+int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
+{
+ /* start reap timer */
+ mod_timer(&ab->mon_reap_timer,
+ jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
+
+ return 0;
+}
+
+int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)
+{
+ int ret;
+
+ if (stop_timer)
+ del_timer_sync(&ab->mon_reap_timer);
+
+ /* reap all the monitor related rings */
+ ret = ath12k_dp_purge_mon_ring(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
new file mode 100644
index 000000000000..c955b5c859d1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_DP_RX_H
+#define ATH12K_DP_RX_H
+
+#include "core.h"
+#include "rx_desc.h"
+#include "debug.h"
+
+#define DP_MAX_NWIFI_HDR_LEN 30
+
+struct ath12k_dp_rx_tid {
+ u8 tid;
+ u32 *vaddr;
+ dma_addr_t paddr;
+ u32 size;
+ u32 ba_win_sz;
+ bool active;
+
+ /* Info related to rx fragments */
+ u32 cur_sn;
+ u16 last_frag_no;
+ u16 rx_frag_bitmap;
+
+ struct sk_buff_head rx_frags;
+ struct hal_reo_dest_ring *dst_ring_desc;
+
+ /* Timer info related to fragments */
+ struct timer_list frag_timer;
+ struct ath12k_base *ab;
+};
+
+struct ath12k_dp_rx_reo_cache_flush_elem {
+ struct list_head list;
+ struct ath12k_dp_rx_tid data;
+ unsigned long ts;
+};
+
+struct ath12k_dp_rx_reo_cmd {
+ struct list_head list;
+ struct ath12k_dp_rx_tid data;
+ int cmd_num;
+ void (*handler)(struct ath12k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status);
+};
+
+#define ATH12K_DP_RX_REO_DESC_FREE_THRES 64
+#define ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS 1000
+
+enum ath12k_dp_rx_decap_type {
+ DP_RX_DECAP_TYPE_RAW,
+ DP_RX_DECAP_TYPE_NATIVE_WIFI,
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX,
+ DP_RX_DECAP_TYPE_8023,
+};
+
+struct ath12k_dp_rx_rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
+{
+ u32 ret = 0;
+
+ switch (sgi) {
+ case RX_MSDU_START_SGI_0_8_US:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case RX_MSDU_START_SGI_1_6_US:
+ ret = NL80211_RATE_INFO_HE_GI_1_6;
+ break;
+ case RX_MSDU_START_SGI_3_2_US:
+ ret = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ return ret;
+}
+
+int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
+ struct ieee80211_ampdu_params *params);
+int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
+ struct ieee80211_ampdu_params *params);
+int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key);
+void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer);
+void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
+ struct ath12k_peer *peer, u8 tid);
+int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type);
+void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
+ struct sk_buff *skb);
+int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab);
+void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab);
+int ath12k_dp_rx_htt_setup(struct ath12k_base *ab);
+int ath12k_dp_rx_alloc(struct ath12k_base *ab);
+void ath12k_dp_rx_free(struct ath12k_base *ab);
+int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int pdev_idx);
+void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int pdev_idx);
+void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab);
+void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab);
+int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
+ struct napi_struct *napi, int budget);
+int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
+ int budget);
+int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id,
+ struct napi_struct *napi,
+ int budget);
+int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr,
+ bool hw_cc);
+int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar);
+int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id);
+
+int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab);
+int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer);
+u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
+ struct hal_rx_desc *desc);
+struct ath12k_peer *
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
+u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
+ struct hal_rx_desc *desc);
+u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
+ struct hal_rx_desc *desc);
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status);
+struct ath12k_peer *
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
+
+int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
+int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
+
+#endif /* ATH12K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
new file mode 100644
index 000000000000..95294f35155c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -0,0 +1,1211 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "dp_tx.h"
+#include "debug.h"
+#include "hw.h"
+
+static enum hal_tcl_encap_type
+ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+
+ if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return HAL_TCL_ENCAP_TYPE_ETHERNET;
+
+ return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
+}
+
+static void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u8 *qos_ctl;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+ hdr = (void *)skb->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb);
+
+ if (cb->flags & ATH12K_SKB_HW_80211_ENCAP)
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ else if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HAL_DESC_REO_NON_QOS_TID;
+ else
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return HAL_ENCRYPT_TYPE_WEP_40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return HAL_ENCRYPT_TYPE_WEP_104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return HAL_ENCRYPT_TYPE_TKIP_MIC;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return HAL_ENCRYPT_TYPE_CCMP_128;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return HAL_ENCRYPT_TYPE_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return HAL_ENCRYPT_TYPE_GCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return HAL_ENCRYPT_TYPE_AES_GCMP_256;
+ default:
+ return HAL_ENCRYPT_TYPE_OPEN;
+ }
+}
+
+static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
+ struct ath12k_tx_desc_info *tx_desc,
+ u8 pool_id)
+{
+ spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+ list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+}
+
+static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
+ u8 pool_id)
+{
+ struct ath12k_tx_desc_info *desc;
+
+ spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+ desc = list_first_entry_or_null(&dp->tx_desc_free_list[pool_id],
+ struct ath12k_tx_desc_info,
+ list);
+ if (!desc) {
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+ ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n");
+ return NULL;
+ }
+
+ list_move_tail(&desc->list, &dp->tx_desc_used_list[pool_id]);
+ spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
+
+ return desc;
+}
+
+static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, void *cmd,
+ struct hal_tx_info *ti)
+{
+ struct hal_tx_msdu_ext_desc *tcl_ext_cmd = (struct hal_tx_msdu_ext_desc *)cmd;
+
+ tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr,
+ HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO);
+ tcl_ext_cmd->info1 = le32_encode_bits(0x0,
+ HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI) |
+ le32_encode_bits(ti->data_len,
+ HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
+
+ tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
+ le32_encode_bits(ti->encap_type,
+ HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
+ le32_encode_bits(ti->encrypt_type,
+ HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
+}
+
+int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct hal_tx_info ti = {0};
+ struct ath12k_tx_desc_info *tx_desc;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct hal_tcl_data_cmd *hal_tcl_desc;
+ struct hal_tx_msdu_ext_desc *msg;
+ struct sk_buff *skb_ext_desc;
+ struct hal_srng *tcl_ring;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct dp_tx_ring *tx_ring;
+ u8 pool_id;
+ u8 hal_ring_id;
+ int ret;
+ u8 ring_selector, ring_map = 0;
+ bool tcl_ring_retry;
+ bool msdu_ext_desc = false;
+
+ if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+ !ieee80211_is_data(hdr->frame_control))
+ return -ENOTSUPP;
+
+ pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
+
+ /* Let the default ring selection be based on current processor
+ * number, where one of the 3 tcl rings are selected based on
+ * the smp_processor_id(). In case that ring
+ * is full/busy, we resort to other available rings.
+ * If all rings are full, we drop the packet.
+ * TODO: Add throttling logic when all rings are full
+ */
+ ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
+
+tcl_ring_sel:
+ tcl_ring_retry = false;
+ ti.ring_id = ring_selector % ab->hw_params->max_tx_ring;
+
+ ring_map |= BIT(ti.ring_id);
+ ti.rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
+
+ tx_ring = &dp->tx_ring[ti.ring_id];
+
+ tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id);
+ if (!tx_desc)
+ return -ENOMEM;
+
+ ti.bank_id = arvif->bank_id;
+ ti.meta_data_flags = arvif->tcl_metadata;
+
+ if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
+ test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
+ if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
+ ti.encrypt_type =
+ ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
+
+ if (ieee80211_has_protected(hdr->frame_control))
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ } else {
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+
+ msdu_ext_desc = true;
+ }
+
+ ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
+ ti.addr_search_flags = arvif->hal_addr_search_flags;
+ ti.search_type = arvif->search_type;
+ ti.type = HAL_TCL_DESC_TYPE_BUFFER;
+ ti.pkt_offset = 0;
+ ti.lmac_id = ar->lmac_id;
+ ti.vdev_id = arvif->vdev_id;
+ ti.bss_ast_hash = arvif->ast_hash;
+ ti.bss_ast_idx = arvif->ast_idx;
+ ti.dscp_tid_tbl_idx = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
+ ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN) |
+ u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN);
+ }
+
+ ti.flags1 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE);
+
+ ti.tid = ath12k_dp_tx_get_tid(skb);
+
+ switch (ti.encap_type) {
+ case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
+ ath12k_dp_tx_encap_nwifi(skb);
+ break;
+ case HAL_TCL_ENCAP_TYPE_RAW:
+ if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ret = -EINVAL;
+ goto fail_remove_tx_buf;
+ }
+ break;
+ case HAL_TCL_ENCAP_TYPE_ETHERNET:
+ /* no need to encap */
+ break;
+ case HAL_TCL_ENCAP_TYPE_802_3:
+ default:
+ /* TODO: Take care of other encap modes as well */
+ ret = -EINVAL;
+ atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ goto fail_remove_tx_buf;
+ }
+
+ ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, ti.paddr)) {
+ atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
+ ret = -ENOMEM;
+ goto fail_remove_tx_buf;
+ }
+
+ tx_desc->skb = skb;
+ tx_desc->mac_id = ar->pdev_idx;
+ ti.desc_id = tx_desc->desc_id;
+ ti.data_len = skb->len;
+ skb_cb->paddr = ti.paddr;
+ skb_cb->vif = arvif->vif;
+ skb_cb->ar = ar;
+
+ if (msdu_ext_desc) {
+ skb_ext_desc = dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc));
+ if (!skb_ext_desc) {
+ ret = -ENOMEM;
+ goto fail_unmap_dma;
+ }
+
+ skb_put(skb_ext_desc, sizeof(struct hal_tx_msdu_ext_desc));
+ memset(skb_ext_desc->data, 0, skb_ext_desc->len);
+
+ msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
+ ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
+
+ ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
+ skb_ext_desc->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ab->dev, ti.paddr);
+ if (ret) {
+ kfree(skb_ext_desc);
+ goto fail_unmap_dma;
+ }
+
+ ti.data_len = skb_ext_desc->len;
+ ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
+
+ skb_cb->paddr_ext_desc = ti.paddr;
+ }
+
+ hal_ring_id = tx_ring->tcl_data_ring.ring_id;
+ tcl_ring = &ab->hal.srng_list[hal_ring_id];
+
+ spin_lock_bh(&tcl_ring->lock);
+
+ ath12k_hal_srng_access_begin(ab, tcl_ring);
+
+ hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring);
+ if (!hal_tcl_desc) {
+ /* NOTE: It is highly unlikely we'll be running out of tcl_ring
+ * desc because the desc is directly enqueued onto hw queue.
+ */
+ ath12k_hal_srng_access_end(ab, tcl_ring);
+ ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
+ spin_unlock_bh(&tcl_ring->lock);
+ ret = -ENOMEM;
+
+ /* Checking for available tcl descritors in another ring in
+ * case of failure due to full tcl ring now, is better than
+ * checking this ring earlier for each pkt tx.
+ * Restart ring selection if some rings are not checked yet.
+ */
+ if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
+ ab->hw_params->tcl_ring_retry) {
+ tcl_ring_retry = true;
+ ring_selector++;
+ }
+
+ goto fail_unmap_dma;
+ }
+
+ ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
+
+ ath12k_hal_srng_access_end(ab, tcl_ring);
+
+ spin_unlock_bh(&tcl_ring->lock);
+
+ ath12k_dbg_dump(ab, ATH12K_DBG_DP_TX, NULL, "dp tx msdu: ",
+ skb->data, skb->len);
+
+ atomic_inc(&ar->dp.num_tx_pending);
+
+ return 0;
+
+fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+ dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+
+fail_remove_tx_buf:
+ ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
+ if (tcl_ring_retry)
+ goto tcl_ring_sel;
+
+ return ret;
+}
+
+static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
+ struct sk_buff *msdu, u8 mac_id,
+ struct dp_tx_ring *tx_ring)
+{
+ struct ath12k *ar;
+ struct ath12k_skb_cb *skb_cb;
+
+ skb_cb = ATH12K_SKB_CB(msdu);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ if (skb_cb->paddr_ext_desc)
+ dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(msdu);
+
+ ar = ab->pdevs[mac_id].ar;
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+}
+
+static void
+ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
+ struct sk_buff *msdu,
+ struct dp_tx_ring *tx_ring,
+ struct ath12k_dp_htt_wbm_tx_status *ts)
+{
+ struct ieee80211_tx_info *info;
+ struct ath12k_skb_cb *skb_cb;
+ struct ath12k *ar;
+
+ skb_cb = ATH12K_SKB_CB(msdu);
+ info = IEEE80211_SKB_CB(msdu);
+
+ ar = skb_cb->ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ if (skb_cb->paddr_ext_desc)
+ dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ if (ts->acked) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ } else {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ }
+ }
+
+ ieee80211_tx_status(ar->hw, msdu);
+}
+
+static void
+ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
+ void *desc, u8 mac_id,
+ struct sk_buff *msdu,
+ struct dp_tx_ring *tx_ring)
+{
+ struct htt_tx_wbm_completion *status_desc;
+ struct ath12k_dp_htt_wbm_tx_status ts = {0};
+ enum hal_wbm_htt_tx_comp_status wbm_status;
+
+ status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+
+ wbm_status = le32_get_bits(status_desc->info0,
+ HTT_TX_WBM_COMP_INFO0_STATUS);
+
+ switch (wbm_status) {
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+ ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
+ ts.ack_rssi = le32_get_bits(status_desc->info2,
+ HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
+ ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
+ ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
+ /* This event is to be handled only when the driver decides to
+ * use WDS offload functionality.
+ */
+ break;
+ default:
+ ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+ break;
+ }
+}
+
+static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_tx_info *info;
+ struct ath12k_skb_cb *skb_cb;
+
+ if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
+ /* Must not happen */
+ return;
+ }
+
+ skb_cb = ATH12K_SKB_CB(msdu);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ if (skb_cb->paddr_ext_desc)
+ dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
+ sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+
+ rcu_read_lock();
+
+ if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (!skb_cb->vif) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* skip tx rate update from ieee80211_status*/
+ info->status.rates[0].idx = -1;
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
+ !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ /* NOTE: Tx rate status reporting. Tx completion status does not have
+ * necessary information (for example nss) to build the tx rate.
+ * Might end up reporting it out-of-band from HTT stats.
+ */
+
+ ieee80211_tx_status(ar->hw, msdu);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
+ struct hal_wbm_completion_ring_tx *desc,
+ struct hal_tx_status *ts)
+{
+ ts->buf_rel_source =
+ le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
+ if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
+ ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
+ return;
+
+ if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
+ return;
+
+ ts->status = le32_get_bits(desc->info0,
+ HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
+
+ ts->ppdu_id = le32_get_bits(desc->info1,
+ HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
+ if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
+ ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
+ else
+ ts->rate_stats = 0;
+}
+
+void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
+{
+ struct ath12k *ar;
+ struct ath12k_dp *dp = &ab->dp;
+ int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
+ struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
+ struct ath12k_tx_desc_info *tx_desc = NULL;
+ struct sk_buff *msdu;
+ struct hal_tx_status ts = { 0 };
+ struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
+ struct hal_wbm_release_ring *desc;
+ u8 mac_id;
+ u64 desc_va;
+
+ spin_lock_bh(&status_ring->lock);
+
+ ath12k_hal_srng_access_begin(ab, status_ring);
+
+ while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) != tx_ring->tx_status_tail) {
+ desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring);
+ if (!desc)
+ break;
+
+ memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
+ desc, sizeof(*desc));
+ tx_ring->tx_status_head =
+ ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head);
+ }
+
+ if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
+ (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
+ /* TODO: Process pending tx_status messages when kfifo_is_full() */
+ ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
+ }
+
+ ath12k_hal_srng_access_end(ab, status_ring);
+
+ spin_unlock_bh(&status_ring->lock);
+
+ while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
+ struct hal_wbm_completion_ring_tx *tx_status;
+ u32 desc_id;
+
+ tx_ring->tx_status_tail =
+ ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
+ tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
+ ath12k_dp_tx_status_parse(ab, tx_status, &ts);
+
+ if (le32_get_bits(tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) {
+ /* HW done cookie conversion */
+ desc_va = ((u64)le32_to_cpu(tx_status->buf_va_hi) << 32 |
+ le32_to_cpu(tx_status->buf_va_lo));
+ tx_desc = (struct ath12k_tx_desc_info *)((unsigned long)desc_va);
+ } else {
+ /* SW does cookie conversion to VA */
+ desc_id = le32_get_bits(tx_status->buf_va_hi,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+
+ tx_desc = ath12k_dp_get_tx_desc(ab, desc_id);
+ }
+ if (!tx_desc) {
+ ath12k_warn(ab, "unable to retrieve tx_desc!");
+ continue;
+ }
+
+ msdu = tx_desc->skb;
+ mac_id = tx_desc->mac_id;
+
+ /* Release descriptor as soon as extracting necessary info
+ * to reduce contention
+ */
+ ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
+ if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
+ ath12k_dp_tx_process_htt_tx_complete(ab,
+ (void *)tx_status,
+ mac_id, msdu,
+ tx_ring);
+ continue;
+ }
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ ath12k_dp_tx_complete_msdu(ar, msdu, &ts);
+ }
+}
+
+static int
+ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
+ int mac_id, u32 ring_id,
+ enum hal_ring_type ring_type,
+ enum htt_srng_ring_type *htt_ring_type,
+ enum htt_srng_ring_id *htt_ring_id)
+{
+ int ret = 0;
+
+ switch (ring_type) {
+ case HAL_RXDMA_BUF:
+ /* for some targets, host fills rx buffer to fw and fw fills to
+ * rxbuf ring for each rxdma
+ */
+ if (!ab->hw_params->rx_mac_buf_ring) {
+ if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
+ ring_id == HAL_SRNG_SW2RXDMA_BUF1)) {
+ ret = -EINVAL;
+ }
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ } else {
+ if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
+ *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
+ *htt_ring_type = HTT_SW_TO_SW_RING;
+ } else {
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ }
+ }
+ break;
+ case HAL_RXDMA_DST:
+ *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DST:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DESC:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_TX_MONITOR_BUF:
+ *htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_TX_MONITOR_DST:
+ *htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ default:
+ ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type)
+{
+ struct htt_srng_setup_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ u32 ring_entry_sz;
+ int len = sizeof(*cmd);
+ dma_addr_t hp_addr, tp_addr;
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath12k_hal_srng_get_params(ab, srng, &params);
+
+ hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
+ tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
+
+ ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_srng_setup_cmd *)skb->data;
+ cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP,
+ HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id),
+ HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
+ else
+ cmd->info0 |= le32_encode_bits(mac_id,
+ HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
+ cmd->info0 |= le32_encode_bits(htt_ring_type,
+ HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE);
+ cmd->info0 |= le32_encode_bits(htt_ring_id,
+ HTT_SRNG_SETUP_CMD_INFO0_RING_ID);
+
+ cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr &
+ HAL_ADDR_LSB_REG_MASK);
+
+ cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT);
+
+ ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
+ if (ret < 0)
+ goto err_free;
+
+ ring_entry_sz = ret;
+
+ ring_entry_sz >>= 2;
+ cmd->info1 = le32_encode_bits(ring_entry_sz,
+ HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE);
+ cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz,
+ HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE);
+ cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP);
+ cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP);
+ cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP),
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP);
+ if (htt_ring_type == HTT_SW_TO_HW_RING)
+ cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS);
+
+ cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr));
+ cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr));
+
+ cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr));
+ cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr));
+
+ cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr));
+ cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr));
+ cmd->msi_data = cpu_to_le32(params.msi_data);
+
+ cmd->intr_info =
+ le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz,
+ HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH);
+ cmd->intr_info |=
+ le32_encode_bits(params.intr_timer_thres_us >> 3,
+ HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH);
+
+ cmd->info2 = 0;
+ if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ cmd->info2 = le32_encode_bits(params.low_threshold,
+ HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
+ __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
+ cmd->msi_data);
+
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
+ ring_id, ring_type, cmd->intr_info, cmd->info2);
+
+ ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
+
+int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
+{
+ struct ath12k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ver_req_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ init_completion(&dp->htt_tgt_version_received);
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ver_req_cmd *)skb->data;
+ cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
+ HTT_VER_REQ_INFO_MSG_ID);
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (ret == 0) {
+ ath12k_warn(ab, "htt target version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
+ ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
+ dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ppdu_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ u8 pdev_mask;
+ int ret;
+ int i;
+
+ for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
+ cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
+ HTT_PPDU_STATS_CFG_MSG_TYPE);
+
+ pdev_mask = 1 << (i + 1);
+ cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
+ cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter)
+{
+ struct htt_rx_ring_selection_cfg_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ int len = sizeof(*cmd);
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath12k_hal_srng_get_params(ab, srng, &params);
+
+ ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
+ cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |=
+ le32_encode_bits(DP_SW2HW_MACID(mac_id),
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ else
+ cmd->info0 |=
+ le32_encode_bits(mac_id,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ cmd->info0 |= le32_encode_bits(htt_ring_id,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
+ cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
+ HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID);
+ cmd->info1 = le32_encode_bits(rx_buf_size,
+ HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
+ cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
+ cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
+ cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
+ cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
+ cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
+
+ if (tlv_filter->offset_valid) {
+ cmd->rx_packet_offset =
+ le32_encode_bits(tlv_filter->rx_packet_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET);
+
+ cmd->rx_packet_offset |=
+ le32_encode_bits(tlv_filter->rx_header_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET);
+
+ cmd->rx_mpdu_offset =
+ le32_encode_bits(tlv_filter->rx_mpdu_end_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET);
+
+ cmd->rx_mpdu_offset |=
+ le32_encode_bits(tlv_filter->rx_mpdu_start_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET);
+
+ cmd->rx_msdu_offset =
+ le32_encode_bits(tlv_filter->rx_msdu_end_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET);
+
+ cmd->rx_msdu_offset |=
+ le32_encode_bits(tlv_filter->rx_msdu_start_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET);
+
+ cmd->rx_attn_offset =
+ le32_encode_bits(tlv_filter->rx_attn_offset,
+ HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
+ }
+
+ ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int
+ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ext_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
+
+ cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
+
+ cmd->hdr.stats_type = type;
+ cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
+ cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1);
+ cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2);
+ cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3);
+ cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie));
+ cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie));
+
+ ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ ath12k_warn(ab, "failed to send htt type stats request: %d",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
+{
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
+ if (ret) {
+ ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
+ if (ret) {
+ ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ int ret, ring_id;
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+ tlv_filter.offset_valid = false;
+
+ if (!reset) {
+ tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+ tlv_filter.pkt_filter_flags0 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+ }
+
+ if (ab->hw_params->rxdma1_enable) {
+ ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0,
+ HAL_RXDMA_MONITOR_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for monitor buf %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int tx_buf_size,
+ struct htt_tx_ring_tlv_filter *htt_tlv_filter)
+{
+ struct htt_tx_ring_selection_cfg_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ int len = sizeof(*cmd);
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath12k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath12k_hal_srng_get_params(ab, srng, &params);
+
+ ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data;
+ cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |=
+ le32_encode_bits(DP_SW2HW_MACID(mac_id),
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ else
+ cmd->info0 |=
+ le32_encode_bits(mac_id,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
+ cmd->info0 |= le32_encode_bits(htt_ring_id,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS);
+ cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS);
+
+ cmd->info1 |=
+ le32_encode_bits(tx_buf_size,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE);
+
+ if (htt_tlv_filter->tx_mon_mgmt_filter) {
+ cmd->info1 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
+ cmd->info1 |=
+ le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
+ cmd->info2 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
+ }
+
+ if (htt_tlv_filter->tx_mon_data_filter) {
+ cmd->info1 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
+ cmd->info1 |=
+ le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
+ cmd->info2 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
+ }
+
+ if (htt_tlv_filter->tx_mon_ctrl_filter) {
+ cmd->info1 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
+ cmd->info1 |=
+ le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
+ cmd->info2 |=
+ le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
+ HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
+ }
+
+ cmd->tlv_filter_mask_in0 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags);
+ cmd->tlv_filter_mask_in1 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0);
+ cmd->tlv_filter_mask_in2 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1);
+ cmd->tlv_filter_mask_in3 =
+ cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2);
+
+ ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+ return ret;
+}
+
+int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_dp *dp = &ab->dp;
+ struct htt_tx_ring_tlv_filter tlv_filter = {0};
+ int ret, ring_id;
+
+ ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
+
+ /* TODO: Need to set upstream/downstream tlv filters
+ * here
+ */
+
+ if (ab->hw_params->rxdma1_enable) {
+ ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0,
+ HAL_TX_MONITOR_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ if (ret) {
+ ath12k_err(ab,
+ "failed to setup filter for monitor buf %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h
new file mode 100644
index 000000000000..436d77e5e9ee
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_DP_TX_H
+#define ATH12K_DP_TX_H
+
+#include "core.h"
+#include "hal_tx.h"
+
+struct ath12k_dp_htt_wbm_tx_status {
+ bool acked;
+ int ack_rssi;
+};
+
+int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
+int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
+ struct sk_buff *skb);
+void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
+
+int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);
+int
+ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie);
+int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset);
+
+int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter);
+void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id);
+int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int tx_buf_size,
+ struct htt_tx_ring_tlv_filter *htt_tlv_filter);
+int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset);
+int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
new file mode 100644
index 000000000000..95d04819083f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -0,0 +1,2222 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/dma-mapping.h>
+#include "hal_tx.h"
+#include "hal_rx.h"
+#include "debug.h"
+#include "hal_desc.h"
+#include "hif.h"
+
+static const struct hal_srng_config hw_srng_config_template[] = {
+ /* TODO: max_rings can populated by querying HW capabilities */
+ [HAL_REO_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
+ .max_rings = 8,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_EXCEPTION] = {
+ /* Designating REO2SW0 ring as exception ring.
+ * Any of theREO2SW rings can be used as exception ring.
+ */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_REINJECT] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_CMD] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_64_hdr) +
+ sizeof(struct hal_reo_get_queue_stats)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_REO_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_64_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_DATA] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
+ .max_rings = 6,
+ .entry_size = sizeof(struct hal_tcl_data_cmd) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_CMD] = {
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_tcl_gse_cmd) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TCL_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_status_ring)) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_SRC] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_CE_DST_STATUS] = {
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
+ .max_rings = 16,
+ .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_WBM_IDLE_LINK] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_SW2WBM_RELEASE] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_SW0_RELEASE,
+ .max_rings = 2,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_WBM2SW_RELEASE] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ .max_rings = 8,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_UMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_RXDMA_BUF] = {
+ .start_ring_id = HAL_SRNG_SW2RXDMA_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_DMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ .max_rings = 0,
+ .entry_size = 0,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_BUF] = {
+ .start_ring_id = HAL_SRNG_SW2RXMON_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_STATUS] = { 0, },
+ [HAL_RXDMA_MONITOR_DESC] = { 0, },
+ [HAL_RXDMA_DIR_BUF] = {
+ .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+ .max_rings = 2,
+ .entry_size = 8 >> 2, /* TODO: Define the struct */
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_PPE2TCL] = {
+ .start_ring_id = HAL_SRNG_RING_ID_PPE2TCL1,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_tcl_entrance_from_ppe_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_PPE_RELEASE] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_PPE_RELEASE,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ [HAL_TX_MONITOR_BUF] = {
+ .start_ring_id = HAL_SRNG_SW2TXMON_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_RXDMA_MONITOR_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ },
+ [HAL_TX_MONITOR_DST] = {
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_mon_dest_desc) >> 2,
+ .mac_type = ATH12K_HAL_SRNG_PMAC,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE_BE,
+ }
+};
+
+static const struct ath12k_hal_tcl_to_wbm_rbm_map
+ath12k_hal_qcn9274_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = {
+ {
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .wbm_ring_num = 1,
+ .rbm_id = HAL_RX_BUF_RBM_SW1_BM,
+ },
+ {
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+ {
+ .wbm_ring_num = 4,
+ .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ }
+};
+
+static const struct ath12k_hal_tcl_to_wbm_rbm_map
+ath12k_hal_wcn7850_tcl_to_wbm_rbm_map[DP_TCL_NUM_RING_MAX] = {
+ {
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+ {
+ .wbm_ring_num = 4,
+ .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ },
+};
+
+static unsigned int ath12k_hal_reo1_ring_id_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_msi1_base_lsb_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_msi1_base_msb_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_msi1_data_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_base_msb_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_producer_int_setup_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_hp_addr_lsb_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_hp_addr_msb_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static unsigned int ath12k_hal_reo1_ring_misc_offset(struct ath12k_base *ab)
+{
+ return HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+ RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+ RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
+ RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
+}
+
+static u32 ath12k_hw_qcn9274_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.mpdu_start.info2,
+ RX_MPDU_START_INFO2_ENC_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info11,
+ RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
+}
+
+static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
+}
+
+static u16 ath12k_hw_qcn9274_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info10,
+ RX_MSDU_END_INFO10_MSDU_LENGTH);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
+ RX_MSDU_END_INFO12_SGI);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
+ RX_MSDU_END_INFO12_RATE_MCS);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
+ RX_MSDU_END_INFO12_RECV_BW);
+}
+
+static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274.msdu_end.phy_meta_data);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
+ RX_MSDU_END_INFO12_PKT_TYPE);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.qcn9274.msdu_end.info12,
+ RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
+}
+
+static u8 ath12k_hw_qcn9274_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.qcn9274.msdu_end.info5,
+ RX_MSDU_END_INFO5_TID);
+}
+
+static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274.mpdu_start.sw_peer_id);
+}
+
+static void ath12k_hw_qcn9274_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy(&fdesc->u.qcn9274.msdu_end, &ldesc->u.qcn9274.msdu_end,
+ sizeof(struct rx_msdu_end_qcn9274));
+}
+
+static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274.mpdu_start.phy_ppdu_id);
+}
+
+static void ath12k_hw_qcn9274_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info10);
+
+ info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH;
+ info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH);
+
+ desc->u.qcn9274.msdu_end.info10 = __cpu_to_le32(info);
+}
+
+static u8 *ath12k_hw_qcn9274_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9274.msdu_payload[0];
+}
+
+static u32 ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274, mpdu_start);
+}
+
+static u32 ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset(void)
+{
+ return offsetof(struct hal_rx_desc_qcn9274, msdu_end);
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+}
+
+static u8 *ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9274.mpdu_start.addr2;
+}
+
+static bool ath12k_hw_qcn9274_rx_desc_is_mcbc(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
+}
+
+static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hdr->frame_control = desc->u.qcn9274.mpdu_start.frame_ctrl;
+ hdr->duration_id = desc->u.qcn9274.mpdu_start.duration;
+ ether_addr_copy(hdr->addr1, desc->u.qcn9274.mpdu_start.addr1);
+ ether_addr_copy(hdr->addr2, desc->u.qcn9274.mpdu_start.addr2);
+ ether_addr_copy(hdr->addr3, desc->u.qcn9274.mpdu_start.addr3);
+ if (__le32_to_cpu(desc->u.qcn9274.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
+ ether_addr_copy(hdr->addr4, desc->u.qcn9274.mpdu_start.addr4);
+ }
+ hdr->seq_ctrl = desc->u.qcn9274.mpdu_start.seq_ctrl;
+}
+
+static void ath12k_hw_qcn9274_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ unsigned int key_id;
+
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]);
+ crypto_hdr[1] = 0;
+ crypto_hdr[2] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]);
+ break;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[0]);
+ crypto_hdr[1] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[0]);
+ crypto_hdr[2] = 0;
+ break;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ return;
+ }
+ key_id = le32_get_bits(desc->u.qcn9274.mpdu_start.info5,
+ RX_MPDU_START_INFO5_KEY_ID);
+ crypto_hdr[3] = 0x20 | (key_id << 6);
+ crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.qcn9274.mpdu_start.pn[0]);
+ crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.qcn9274.mpdu_start.pn[0]);
+ crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.qcn9274.mpdu_start.pn[1]);
+ crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]);
+}
+
+static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9274.mpdu_start.frame_ctrl);
+}
+
+static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_srng_config *s;
+
+ hal->srng_config = kmemdup(hw_srng_config_template,
+ sizeof(hw_srng_config_template),
+ GFP_KERNEL);
+ if (!hal->srng_config)
+ return -ENOMEM;
+
+ s = &hal->srng_config[HAL_REO_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP;
+ s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_EXCEPTION];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_REINJECT];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
+ s->reg_size[0] = HAL_SW2REO1_RING_BASE_LSB(ab) - HAL_SW2REO_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_SW2REO1_RING_HP - HAL_SW2REO_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
+
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
+
+ s = &hal->srng_config[HAL_TCL_DATA];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
+ s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+
+ s = &hal->srng_config[HAL_CE_SRC];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+
+ s = &hal->srng_config[HAL_CE_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+
+ s = &hal->srng_config[HAL_CE_DST_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_STATUS_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+
+ s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
+
+ s = &hal->srng_config[HAL_SW2WBM_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM_SW1_RELEASE_RING_BASE_LSB(ab) -
+ HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_WBM_SW1_RELEASE_RING_HP - HAL_WBM_SW_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_WBM2SW_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
+ HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
+
+ /* Some LMAC rings are not accesed from the host:
+ * RXDMA_BUG, RXDMA_DST, RXDMA_MONITOR_BUF, RXDMA_MONITOR_STATUS,
+ * RXDMA_MONITOR_DST, RXDMA_MONITOR_DESC, RXDMA_DIR_BUF_SRC,
+ * RXDMA_RX_MONITOR_BUF, TX_MONITOR_BUF, TX_MONITOR_DST, SW2RXDMA
+ */
+ s = &hal->srng_config[HAL_PPE2TCL];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_PPE2TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_PPE_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_PPE_RELEASE_RING_HP;
+
+ return 0;
+}
+
+static bool ath12k_hw_qcn9274_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.msdu_end.info14,
+ RX_MSDU_END_INFO14_MSDU_DONE);
+}
+
+static bool ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13,
+ RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.qcn9274.msdu_end.info13,
+ RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_qcn9274_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
+{
+ return (le32_get_bits(desc->u.qcn9274.msdu_end.info14,
+ RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath12k_hw_qcn9274_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9274.msdu_end.info13);
+ u32 errmap = 0;
+
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+const struct hal_ops hal_qcn9274_ops = {
+ .rx_desc_get_first_msdu = ath12k_hw_qcn9274_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath12k_hw_qcn9274_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath12k_hw_qcn9274_rx_desc_get_l3_pad_bytes,
+ .rx_desc_encrypt_valid = ath12k_hw_qcn9274_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath12k_hw_qcn9274_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath12k_hw_qcn9274_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath12k_hw_qcn9274_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_qcn9274_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath12k_hw_qcn9274_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath12k_hw_qcn9274_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath12k_hw_qcn9274_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath12k_hw_qcn9274_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath12k_hw_qcn9274_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath12k_hw_qcn9274_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath12k_hw_qcn9274_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath12k_hw_qcn9274_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath12k_hw_qcn9274_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_end_tlv = ath12k_hw_qcn9274_rx_desc_copy_end_tlv,
+ .rx_desc_get_mpdu_ppdu_id = ath12k_hw_qcn9274_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath12k_hw_qcn9274_rx_desc_set_msdu_len,
+ .rx_desc_get_msdu_payload = ath12k_hw_qcn9274_rx_desc_get_msdu_payload,
+ .rx_desc_get_mpdu_start_offset = ath12k_hw_qcn9274_rx_desc_get_mpdu_start_offset,
+ .rx_desc_get_msdu_end_offset = ath12k_hw_qcn9274_rx_desc_get_msdu_end_offset,
+ .rx_desc_mac_addr2_valid = ath12k_hw_qcn9274_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath12k_hw_qcn9274_rx_desc_mpdu_start_addr2,
+ .rx_desc_is_mcbc = ath12k_hw_qcn9274_rx_desc_is_mcbc,
+ .rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
+ .rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
+ .rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
+ .create_srng_config = ath12k_hal_srng_create_config_qcn9274,
+ .tcl_to_wbm_rbm_map = ath12k_hal_qcn9274_tcl_to_wbm_rbm_map,
+ .dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
+ .dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
+ .dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
+ .dp_rx_h_is_decrypted = ath12k_hw_qcn9274_dp_rx_h_is_decrypted,
+ .dp_rx_h_mpdu_err = ath12k_hw_qcn9274_dp_rx_h_mpdu_err,
+};
+
+static bool ath12k_hw_wcn7850_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5,
+ RX_MSDU_END_INFO5_FIRST_MSDU);
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!le16_get_bits(desc->u.wcn7850.msdu_end.info5,
+ RX_MSDU_END_INFO5_LAST_MSDU);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.wcn7850.msdu_end.info5,
+ RX_MSDU_END_INFO5_L3_HDR_PADDING);
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID);
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.mpdu_start.info2,
+ RX_MPDU_START_INFO2_ENC_TYPE);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info11,
+ RX_MSDU_END_INFO11_DECAP_FORMAT);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info11,
+ RX_MSDU_END_INFO11_MESH_CTRL_PRESENT);
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID);
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_FCTRL_VALID);
+}
+
+static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.mpdu_start.info4,
+ RX_MPDU_START_INFO4_MPDU_SEQ_NUM);
+}
+
+static u16 ath12k_hw_wcn7850_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info10,
+ RX_MSDU_END_INFO10_MSDU_LENGTH);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_SGI);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_RATE_MCS);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_RECV_BW);
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn7850.msdu_end.phy_meta_data);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_PKT_TYPE);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return le32_get_bits(desc->u.wcn7850.msdu_end.info12,
+ RX_MSDU_END_INFO12_MIMO_SS_BITMAP);
+}
+
+static u8 ath12k_hw_wcn7850_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return le16_get_bits(desc->u.wcn7850.msdu_end.info5,
+ RX_MSDU_END_INFO5_TID);
+}
+
+static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn7850.mpdu_start.sw_peer_id);
+}
+
+static void ath12k_hw_wcn7850_rx_desc_copy_end_tlv(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy(&fdesc->u.wcn7850.msdu_end, &ldesc->u.wcn7850.msdu_end,
+ sizeof(struct rx_msdu_end_qcn9274));
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return le64_get_bits(desc->u.wcn7850.mpdu_start_tag,
+ HAL_TLV_HDR_TAG);
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn7850.mpdu_start.phy_ppdu_id);
+}
+
+static void ath12k_hw_wcn7850_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info10);
+
+ info &= ~RX_MSDU_END_INFO10_MSDU_LENGTH;
+ info |= u32_encode_bits(len, RX_MSDU_END_INFO10_MSDU_LENGTH);
+
+ desc->u.wcn7850.msdu_end.info10 = __cpu_to_le32(info);
+}
+
+static u8 *ath12k_hw_wcn7850_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.wcn7850.msdu_payload[0];
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset(void)
+{
+ return offsetof(struct hal_rx_desc_wcn7850, mpdu_start_tag);
+}
+
+static u32 ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset(void)
+{
+ return offsetof(struct hal_rx_desc_wcn7850, msdu_end_tag);
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR2_VALID;
+}
+
+static u8 *ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn7850.mpdu_start.addr2;
+}
+
+static bool ath12k_hw_wcn7850_rx_desc_is_mcbc(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn7850.mpdu_start.info6) &
+ RX_MPDU_START_INFO6_MCAST_BCAST;
+}
+
+static void ath12k_hw_wcn7850_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr)
+{
+ hdr->frame_control = desc->u.wcn7850.mpdu_start.frame_ctrl;
+ hdr->duration_id = desc->u.wcn7850.mpdu_start.duration;
+ ether_addr_copy(hdr->addr1, desc->u.wcn7850.mpdu_start.addr1);
+ ether_addr_copy(hdr->addr2, desc->u.wcn7850.mpdu_start.addr2);
+ ether_addr_copy(hdr->addr3, desc->u.wcn7850.mpdu_start.addr3);
+ if (__le32_to_cpu(desc->u.wcn7850.mpdu_start.info4) &
+ RX_MPDU_START_INFO4_MAC_ADDR4_VALID) {
+ ether_addr_copy(hdr->addr4, desc->u.wcn7850.mpdu_start.addr4);
+ }
+ hdr->seq_ctrl = desc->u.wcn7850.mpdu_start.seq_ctrl;
+}
+
+static void ath12k_hw_wcn7850_rx_desc_get_crypto_hdr(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype)
+{
+ unsigned int key_id;
+
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[1] = 0;
+ crypto_hdr[2] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]);
+ break;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ crypto_hdr[0] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[1] =
+ HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[2] = 0;
+ break;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ return;
+ }
+ key_id = u32_get_bits(__le32_to_cpu(desc->u.wcn7850.mpdu_start.info5),
+ RX_MPDU_START_INFO5_KEY_ID);
+ crypto_hdr[3] = 0x20 | (key_id << 6);
+ crypto_hdr[4] = HAL_RX_MPDU_INFO_PN_GET_BYTE3(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[5] = HAL_RX_MPDU_INFO_PN_GET_BYTE4(desc->u.wcn7850.mpdu_start.pn[0]);
+ crypto_hdr[6] = HAL_RX_MPDU_INFO_PN_GET_BYTE1(desc->u.wcn7850.mpdu_start.pn[1]);
+ crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]);
+}
+
+static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn7850.mpdu_start.frame_ctrl);
+}
+
+static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_srng_config *s;
+
+ hal->srng_config = kmemdup(hw_srng_config_template,
+ sizeof(hw_srng_config_template),
+ GFP_KERNEL);
+ if (!hal->srng_config)
+ return -ENOMEM;
+
+ s = &hal->srng_config[HAL_REO_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP;
+ s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_REO2_RING_HP - HAL_REO1_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_EXCEPTION];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_SW0_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_REINJECT];
+ s->max_rings = 1;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
+
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP;
+
+ s = &hal->srng_config[HAL_TCL_DATA];
+ s->max_rings = 5;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB;
+ s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+
+ s = &hal->srng_config[HAL_CE_SRC];
+ s->max_rings = 12;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG;
+
+ s = &hal->srng_config[HAL_CE_DST];
+ s->max_rings = 12;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+
+ s = &hal->srng_config[HAL_CE_DST_STATUS];
+ s->max_rings = 12;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG +
+ HAL_CE_DST_STATUS_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG + HAL_CE_DST_STATUS_RING_HP;
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG;
+
+ s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
+
+ s = &hal->srng_config[HAL_SW2WBM_RELEASE];
+ s->max_rings = 1;
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_SW_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_WBM2SW_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
+ HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_RXDMA_BUF];
+ s->max_rings = 2;
+ s->mac_type = ATH12K_HAL_SRNG_PMAC;
+
+ s = &hal->srng_config[HAL_RXDMA_DST];
+ s->max_rings = 1;
+ s->entry_size = sizeof(struct hal_reo_entrance_ring) >> 2;
+
+ /* below rings are not used */
+ s = &hal->srng_config[HAL_RXDMA_DIR_BUF];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_PPE2TCL];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_PPE_RELEASE];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_TX_MONITOR_BUF];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_TX_MONITOR_DST];
+ s->max_rings = 0;
+
+ s = &hal->srng_config[HAL_PPE2TCL];
+ s->max_rings = 0;
+
+ return 0;
+}
+
+static bool ath12k_hw_wcn7850_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.msdu_end.info14,
+ RX_MSDU_END_INFO14_MSDU_DONE);
+}
+
+static bool ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13,
+ RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail(struct hal_rx_desc *desc)
+{
+ return !!le32_get_bits(desc->u.wcn7850.msdu_end.info13,
+ RX_MSDU_END_INFO13_IP_CKSUM_FAIL);
+}
+
+static bool ath12k_hw_wcn7850_dp_rx_h_is_decrypted(struct hal_rx_desc *desc)
+{
+ return (le32_get_bits(desc->u.wcn7850.msdu_end.info14,
+ RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath12k_hw_wcn7850_dp_rx_h_mpdu_err(struct hal_rx_desc *desc)
+{
+ u32 info = __le32_to_cpu(desc->u.wcn7850.msdu_end.info13);
+ u32 errmap = 0;
+
+ if (info & RX_MSDU_END_INFO13_FCS_ERR)
+ errmap |= HAL_RX_MPDU_ERR_FCS;
+
+ if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+ errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+ errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+ errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+ errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+ errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+const struct hal_ops hal_wcn7850_ops = {
+ .rx_desc_get_first_msdu = ath12k_hw_wcn7850_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath12k_hw_wcn7850_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath12k_hw_wcn7850_rx_desc_get_l3_pad_bytes,
+ .rx_desc_encrypt_valid = ath12k_hw_wcn7850_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath12k_hw_wcn7850_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath12k_hw_wcn7850_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath12k_hw_wcn7850_rx_desc_get_mesh_ctl,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath12k_hw_wcn7850_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath12k_hw_wcn7850_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath12k_hw_wcn7850_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath12k_hw_wcn7850_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath12k_hw_wcn7850_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath12k_hw_wcn7850_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath12k_hw_wcn7850_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath12k_hw_wcn7850_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath12k_hw_wcn7850_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath12k_hw_wcn7850_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_end_tlv = ath12k_hw_wcn7850_rx_desc_copy_end_tlv,
+ .rx_desc_get_mpdu_start_tag = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath12k_hw_wcn7850_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath12k_hw_wcn7850_rx_desc_set_msdu_len,
+ .rx_desc_get_msdu_payload = ath12k_hw_wcn7850_rx_desc_get_msdu_payload,
+ .rx_desc_get_mpdu_start_offset = ath12k_hw_wcn7850_rx_desc_get_mpdu_start_offset,
+ .rx_desc_get_msdu_end_offset = ath12k_hw_wcn7850_rx_desc_get_msdu_end_offset,
+ .rx_desc_mac_addr2_valid = ath12k_hw_wcn7850_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath12k_hw_wcn7850_rx_desc_mpdu_start_addr2,
+ .rx_desc_is_mcbc = ath12k_hw_wcn7850_rx_desc_is_mcbc,
+ .rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
+ .rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
+ .rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
+ .create_srng_config = ath12k_hal_srng_create_config_wcn7850,
+ .tcl_to_wbm_rbm_map = ath12k_hal_wcn7850_tcl_to_wbm_rbm_map,
+ .dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
+ .dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
+ .dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
+ .dp_rx_h_is_decrypted = ath12k_hw_wcn7850_dp_rx_h_is_decrypted,
+ .dp_rx_h_mpdu_err = ath12k_hw_wcn7850_dp_rx_h_mpdu_err,
+};
+
+static int ath12k_hal_alloc_cont_rdp(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
+ GFP_KERNEL);
+ if (!hal->rdp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath12k_hal_free_cont_rdp(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->rdp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ dma_free_coherent(ab->dev, size,
+ hal->rdp.vaddr, hal->rdp.paddr);
+ hal->rdp.vaddr = NULL;
+}
+
+static int ath12k_hal_alloc_cont_wrp(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS);
+ hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
+ GFP_KERNEL);
+ if (!hal->wrp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath12k_hal_free_cont_wrp(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->wrp.vaddr)
+ return;
+
+ size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS);
+ dma_free_coherent(ab->dev, size,
+ hal->wrp.vaddr, hal->wrp.paddr);
+ hal->wrp.vaddr = NULL;
+}
+
+static void ath12k_hal_ce_dst_setup(struct ath12k_base *ab,
+ struct hal_srng *srng, int ring_num)
+{
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
+ u32 addr;
+ u32 val;
+
+ addr = HAL_CE_DST_RING_CTRL +
+ srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
+ ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
+
+ val = ath12k_hif_read32(ab, addr);
+ val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
+ val |= u32_encode_bits(srng->u.dst_ring.max_buffer_length,
+ HAL_CE_DST_R0_DEST_CTRL_MAX_LEN);
+ ath12k_hif_write32(ab, addr, val);
+}
+
+static void ath12k_hal_srng_dst_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ u32 val;
+ u64 hp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath12k_hif_write32(ab, reg_base +
+ ath12k_hal_reo1_ring_msi1_base_lsb_offset(ab),
+ srng->msi_addr);
+
+ val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_REO1_RING_MSI1_BASE_MSB_ADDR) |
+ HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath12k_hif_write32(ab, reg_base +
+ ath12k_hal_reo1_ring_msi1_base_msb_offset(ab), val);
+
+ ath12k_hif_write32(ab,
+ reg_base + ath12k_hal_reo1_ring_msi1_data_offset(ab),
+ srng->msi_data);
+ }
+
+ ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr);
+
+ val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
+ u32_encode_bits((srng->entry_size * srng->num_entries),
+ HAL_REO1_RING_BASE_MSB_RING_SIZE);
+ ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_base_msb_offset(ab), val);
+
+ val = u32_encode_bits(srng->ring_id, HAL_REO1_RING_ID_RING_ID) |
+ u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
+ ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_id_offset(ab), val);
+
+ /* interrupt setup */
+ val = u32_encode_bits((srng->intr_timer_thres_us >> 3),
+ HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD);
+
+ val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size),
+ HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD);
+
+ ath12k_hif_write32(ab,
+ reg_base + ath12k_hal_reo1_ring_producer_int_setup_offset(ab),
+ val);
+
+ hp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_hp_addr_lsb_offset(ab),
+ hp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_hp_addr_msb_offset(ab),
+ hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath12k_hif_write32(ab, reg_base, 0);
+ ath12k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
+ *srng->u.dst_ring.hp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_REO1_RING_MISC_MSI_SWAP;
+ val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
+
+ ath12k_hif_write32(ab, reg_base + ath12k_hal_reo1_ring_misc_offset(ab), val);
+}
+
+static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ u32 val;
+ u64 tp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath12k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
+ srng->msi_addr);
+
+ val = u32_encode_bits(((u64)srng->msi_addr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_TCL1_RING_MSI1_BASE_MSB_ADDR) |
+ HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath12k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
+ val);
+
+ ath12k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
+ srng->msi_data);
+ }
+
+ ath12k_hif_write32(ab, reg_base, srng->ring_base_paddr);
+
+ val = u32_encode_bits(((u64)srng->ring_base_paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB) |
+ u32_encode_bits((srng->entry_size * srng->num_entries),
+ HAL_TCL1_RING_BASE_MSB_RING_SIZE);
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
+
+ val = u32_encode_bits(srng->entry_size, HAL_REO1_RING_ID_ENTRY_SIZE);
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
+
+ val = u32_encode_bits(srng->intr_timer_thres_us,
+ HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD);
+
+ val |= u32_encode_bits((srng->intr_batch_cntr_thres_entries * srng->entry_size),
+ HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD);
+
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
+ val);
+
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ val |= u32_encode_bits(srng->u.src_ring.low_threshold,
+ HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD);
+ }
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
+ val);
+
+ if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ tp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
+ tp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath12k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
+ tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+ }
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath12k_hif_write32(ab, reg_base, 0);
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
+ *srng->u.src_ring.tp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_TCL1_RING_MISC_MSI_SWAP;
+
+ /* Loop count is not used for SRC rings */
+ val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
+
+ val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
+
+ if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK)
+ val |= HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE;
+
+ ath12k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
+}
+
+static void ath12k_hal_srng_hw_init(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath12k_hal_srng_src_hw_init(ab, srng);
+ else
+ ath12k_hal_srng_dst_hw_init(ab, srng);
+}
+
+static int ath12k_hal_srng_get_ring_id(struct ath12k_base *ab,
+ enum hal_ring_type type,
+ int ring_num, int mac_id)
+{
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
+ int ring_id;
+
+ if (ring_num >= srng_config->max_rings) {
+ ath12k_warn(ab, "invalid ring number :%d\n", ring_num);
+ return -EINVAL;
+ }
+
+ ring_id = srng_config->start_ring_id + ring_num;
+ if (srng_config->mac_type == ATH12K_HAL_SRNG_PMAC)
+ ring_id += mac_id * HAL_SRNG_RINGS_PER_PMAC;
+
+ if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
+ return -EINVAL;
+
+ return ring_id;
+}
+
+int ath12k_hal_srng_get_entrysize(struct ath12k_base *ab, u32 ring_type)
+{
+ struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &ab->hal.srng_config[ring_type];
+
+ return (srng_config->entry_size << 2);
+}
+
+int ath12k_hal_srng_get_max_entries(struct ath12k_base *ab, u32 ring_type)
+{
+ struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &ab->hal.srng_config[ring_type];
+
+ return (srng_config->max_size / srng_config->entry_size);
+}
+
+void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params)
+{
+ params->ring_base_paddr = srng->ring_base_paddr;
+ params->ring_base_vaddr = srng->ring_base_vaddr;
+ params->num_entries = srng->num_entries;
+ params->intr_timer_thres_us = srng->intr_timer_thres_us;
+ params->intr_batch_cntr_thres_entries =
+ srng->intr_batch_cntr_thres_entries;
+ params->low_threshold = srng->u.src_ring.low_threshold;
+ params->msi_addr = srng->msi_addr;
+ params->msi2_addr = srng->msi2_addr;
+ params->msi_data = srng->msi_data;
+ params->msi2_data = srng->msi2_data;
+ params->flags = srng->flags;
+}
+
+dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+ else
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+}
+
+dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+ else
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+}
+
+u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type)
+{
+ switch (type) {
+ case HAL_CE_DESC_SRC:
+ return sizeof(struct hal_ce_srng_src_desc);
+ case HAL_CE_DESC_DST:
+ return sizeof(struct hal_ce_srng_dest_desc);
+ case HAL_CE_DESC_DST_STATUS:
+ return sizeof(struct hal_ce_srng_dst_status_desc);
+ }
+
+ return 0;
+}
+
+void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
+ u32 len, u32 id, u8 byte_swap_data)
+{
+ desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK);
+ desc->buffer_addr_info =
+ le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI) |
+ le32_encode_bits(byte_swap_data,
+ HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP) |
+ le32_encode_bits(0, HAL_CE_SRC_DESC_ADDR_INFO_GATHER) |
+ le32_encode_bits(len, HAL_CE_SRC_DESC_ADDR_INFO_LEN);
+ desc->meta_info = le32_encode_bits(id, HAL_CE_SRC_DESC_META_INFO_DATA);
+}
+
+void ath12k_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc, dma_addr_t paddr)
+{
+ desc->buffer_addr_low = cpu_to_le32(paddr & HAL_ADDR_LSB_REG_MASK);
+ desc->buffer_addr_info =
+ le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI);
+}
+
+u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc)
+{
+ u32 len;
+
+ len = le32_get_bits(desc->flags, HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
+ desc->flags &= ~cpu_to_le32(HAL_CE_DST_STATUS_DESC_FLAGS_LEN);
+
+ return len;
+}
+
+void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr)
+{
+ desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK),
+ BUFFER_ADDR_INFO0_ADDR);
+ desc->buf_addr_info.info1 =
+ le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ BUFFER_ADDR_INFO1_ADDR) |
+ le32_encode_bits(1, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
+ le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE);
+}
+
+void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
+ return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
+
+ return NULL;
+}
+
+void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ void *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
+
+ srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ tp = srng->u.dst_ring.tp;
+
+ if (sync_hw_ptr) {
+ hp = *srng->u.dst_ring.hp_addr;
+ srng->u.dst_ring.cached_hp = hp;
+ } else {
+ hp = srng->u.dst_ring.cached_hp;
+ }
+
+ if (hp >= tp)
+ return (hp - tp) / srng->entry_size;
+ else
+ return (srng->ring_size - tp + hp) / srng->entry_size;
+}
+
+/* Returns number of available entries in src ring */
+int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ hp = srng->u.src_ring.hp;
+
+ if (sync_hw_ptr) {
+ tp = *srng->u.src_ring.tp_addr;
+ srng->u.src_ring.cached_tp = tp;
+ } else {
+ tp = srng->u.src_ring.cached_tp;
+ }
+
+ if (tp > hp)
+ return ((tp - hp) / srng->entry_size) - 1;
+ else
+ return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
+}
+
+void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ void *desc;
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: Using % is expensive, but we have to do this since size of some
+ * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
+ * if separate function is defined for rings having power of 2 ring size
+ * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
+ * overhead of % by using mask (with &).
+ */
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = next_hp;
+
+ /* TODO: Reap functionality is not used by all rings. If particular
+ * ring does not use reap functionality, we need not update reap_hp
+ * with next_hp pointer. Need to make sure a separate function is used
+ * before doing any optimization by removing below code updating
+ * reap_hp.
+ */
+ srng->u.src_ring.reap_hp = next_hp;
+
+ return desc;
+}
+
+void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ void *desc;
+ u32 next_reap_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
+ srng->ring_size;
+
+ if (next_reap_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + next_reap_hp;
+ srng->u.src_ring.reap_hp = next_reap_hp;
+
+ return desc;
+}
+
+void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ void *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ srng->u.src_ring.cached_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ else
+ srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+}
+
+/* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
+ * should have been called before this.
+ */
+void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: See if we need a write memory barrier here */
+ if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
+ /* For LMAC rings, ring pointer updates are done through FW and
+ * hence written to a shared memory location that is read by FW
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+ } else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+ *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+ }
+ } else {
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ ath12k_hif_write32(ab,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem,
+ srng->u.src_ring.hp);
+ } else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+ ath12k_hif_write32(ab,
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem,
+ srng->u.dst_ring.tp);
+ }
+ }
+
+ srng->timestamp = jiffies;
+}
+
+void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset)
+{
+ struct ath12k_buffer_addr *link_addr;
+ int i;
+ u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
+ u32 val;
+
+ link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+
+ for (i = 1; i < nsbufs; i++) {
+ link_addr->info0 = cpu_to_le32(sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK);
+
+ link_addr->info1 =
+ le32_encode_bits((u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
+ le32_encode_bits(BASE_ADDR_MATCH_TAG_VAL,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG);
+
+ link_addr = (void *)sbuf[i].vaddr +
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+ }
+
+ val = u32_encode_bits(reg_scatter_buf_sz, HAL_WBM_SCATTER_BUFFER_SIZE) |
+ u32_encode_bits(0x1, HAL_WBM_LINK_DESC_IDLE_LIST_MODE);
+
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(ab),
+ val);
+
+ val = u32_encode_bits(reg_scatter_buf_sz * nsbufs,
+ HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(ab),
+ val);
+
+ val = u32_encode_bits(sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK,
+ BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_LSB(ab),
+ val);
+
+ val = u32_encode_bits(BASE_ADDR_MATCH_TAG_VAL,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG) |
+ u32_encode_bits((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT,
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_MSB(ab),
+ val);
+
+ /* Setup head and tail pointers for the idle list */
+ val = u32_encode_bits(sbuf[nsbufs - 1].paddr, BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab),
+ val);
+
+ val = u32_encode_bits(((u64)sbuf[nsbufs - 1].paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
+ u32_encode_bits((end_offset >> 2),
+ HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(ab),
+ val);
+
+ val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab),
+ val);
+
+ val = u32_encode_bits(sbuf[0].paddr, BUFFER_ADDR_INFO0_ADDR);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(ab),
+ val);
+
+ val = u32_encode_bits(((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32) |
+ u32_encode_bits(0, HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(ab),
+ val);
+
+ val = 2 * tot_link_desc;
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(ab),
+ val);
+
+ /* Enable the SRNG */
+ val = u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE) |
+ u32_encode_bits(1, HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE);
+ ath12k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab),
+ val);
+}
+
+int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
+ struct hal_srng *srng;
+ int ring_id;
+ u32 idx;
+ int i;
+ u32 reg_base;
+
+ ring_id = ath12k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
+ if (ring_id < 0)
+ return ring_id;
+
+ srng = &hal->srng_list[ring_id];
+
+ srng->ring_id = ring_id;
+ srng->ring_dir = srng_config->ring_dir;
+ srng->ring_base_paddr = params->ring_base_paddr;
+ srng->ring_base_vaddr = params->ring_base_vaddr;
+ srng->entry_size = srng_config->entry_size;
+ srng->num_entries = params->num_entries;
+ srng->ring_size = srng->entry_size * srng->num_entries;
+ srng->intr_batch_cntr_thres_entries =
+ params->intr_batch_cntr_thres_entries;
+ srng->intr_timer_thres_us = params->intr_timer_thres_us;
+ srng->flags = params->flags;
+ srng->msi_addr = params->msi_addr;
+ srng->msi2_addr = params->msi2_addr;
+ srng->msi_data = params->msi_data;
+ srng->msi2_data = params->msi2_data;
+ srng->initialized = 1;
+ spin_lock_init(&srng->lock);
+ lockdep_set_class(&srng->lock, &srng->lock_key);
+
+ for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
+ srng->hwreg_base[i] = srng_config->reg_start[i] +
+ (ring_num * srng_config->reg_size[i]);
+ }
+
+ memset(srng->ring_base_vaddr, 0,
+ (srng->entry_size * srng->num_entries) << 2);
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.hp = 0;
+ srng->u.src_ring.cached_tp = 0;
+ srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
+ srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ srng->u.src_ring.low_threshold = params->low_threshold *
+ srng->entry_size;
+ if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
+ if (!ab->hw_params->supports_shadow_regs)
+ srng->u.src_ring.hp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base);
+ else
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem);
+ } else {
+ idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
+ srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
+ idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ }
+ } else {
+ /* During initialization loop count in all the descriptors
+ * will be set to zero, and HW will set it to 1 on completing
+ * descriptor update in first loop, and increments it by 1 on
+ * subsequent loops (loop count wraps around after reaching
+ * 0xffff). The 'loop_cnt' in SW ring state is the expected
+ * loop count in descriptors updated by HW (to be processed
+ * by SW).
+ */
+ srng->u.dst_ring.loop_cnt = 1;
+ srng->u.dst_ring.tp = 0;
+ srng->u.dst_ring.cached_hp = 0;
+ srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
+ if (!ab->hw_params->supports_shadow_regs)
+ srng->u.dst_ring.tp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base +
+ (HAL_REO1_RING_TP - HAL_REO1_RING_HP));
+ else
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base + HAL_REO1_RING_TP - HAL_REO1_RING_HP,
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem);
+ } else {
+ /* For PMAC & DMAC rings, tail pointer updates will be done
+ * through FW by writing to a shared memory location
+ */
+ idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
+ srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
+ idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ }
+ }
+
+ if (srng_config->mac_type != ATH12K_HAL_SRNG_UMAC)
+ return ring_id;
+
+ ath12k_hal_srng_hw_init(ab, srng);
+
+ if (type == HAL_CE_DST) {
+ srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
+ ath12k_hal_ce_dst_setup(ab, srng, ring_num);
+ }
+
+ return ring_id;
+}
+
+static void ath12k_hal_srng_update_hp_tp_addr(struct ath12k_base *ab,
+ int shadow_cfg_idx,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct hal_srng *srng;
+ struct ath12k_hal *hal = &ab->hal;
+ int ring_id;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ ring_id = ath12k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
+ if (ring_id < 0)
+ return;
+
+ srng = &hal->srng_list[ring_id];
+
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+ else
+ srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+}
+
+int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+ int shadow_cfg_idx = hal->num_shadow_reg_configured;
+ u32 target_reg;
+
+ if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
+ return -EINVAL;
+
+ hal->num_shadow_reg_configured++;
+
+ target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
+ target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
+ ring_num;
+
+ /* For destination ring, shadow the TP */
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ target_reg += HAL_OFFSET_FROM_HP_TO_TP;
+
+ hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
+
+ /* update hp/tp addr to hal structure*/
+ ath12k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
+ ring_num);
+
+ ath12k_dbg(ab, ATH12K_DBG_HAL,
+ "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
+ target_reg,
+ HAL_SHADOW_REG(shadow_cfg_idx),
+ shadow_cfg_idx,
+ ring_type, ring_num);
+
+ return 0;
+}
+
+void ath12k_hal_srng_shadow_config(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ int ring_type, ring_num;
+
+ /* update all the non-CE srngs. */
+ for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ if (ring_type == HAL_CE_SRC ||
+ ring_type == HAL_CE_DST ||
+ ring_type == HAL_CE_DST_STATUS)
+ continue;
+
+ if (srng_config->mac_type == ATH12K_HAL_SRNG_DMAC ||
+ srng_config->mac_type == ATH12K_HAL_SRNG_PMAC)
+ continue;
+
+ for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
+ ath12k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
+ }
+}
+
+void ath12k_hal_srng_get_shadow_config(struct ath12k_base *ab,
+ u32 **cfg, u32 *len)
+{
+ struct ath12k_hal *hal = &ab->hal;
+
+ *len = hal->num_shadow_reg_configured;
+ *cfg = hal->shadow_reg_addr;
+}
+
+void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* check whether the ring is empty. Update the shadow
+ * HP only when then ring isn't' empty.
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
+ *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
+ ath12k_hal_srng_access_end(ab, srng);
+}
+
+static void ath12k_hal_register_srng_lock_keys(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ u32 ring_id;
+
+ for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
+ lockdep_register_key(&hal->srng_list[ring_id].lock_key);
+}
+
+static void ath12k_hal_unregister_srng_lock_keys(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ u32 ring_id;
+
+ for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
+ lockdep_unregister_key(&hal->srng_list[ring_id].lock_key);
+}
+
+int ath12k_hal_srng_init(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ int ret;
+
+ memset(hal, 0, sizeof(*hal));
+
+ ret = ab->hw_params->hal_ops->create_srng_config(ab);
+ if (ret)
+ goto err_hal;
+
+ ret = ath12k_hal_alloc_cont_rdp(ab);
+ if (ret)
+ goto err_hal;
+
+ ret = ath12k_hal_alloc_cont_wrp(ab);
+ if (ret)
+ goto err_free_cont_rdp;
+
+ ath12k_hal_register_srng_lock_keys(ab);
+
+ return 0;
+
+err_free_cont_rdp:
+ ath12k_hal_free_cont_rdp(ab);
+
+err_hal:
+ return ret;
+}
+
+void ath12k_hal_srng_deinit(struct ath12k_base *ab)
+{
+ struct ath12k_hal *hal = &ab->hal;
+
+ ath12k_hal_unregister_srng_lock_keys(ab);
+ ath12k_hal_free_cont_rdp(ab);
+ ath12k_hal_free_cont_wrp(ab);
+ kfree(hal->srng_config);
+ hal->srng_config = NULL;
+}
+
+void ath12k_hal_dump_srng_stats(struct ath12k_base *ab)
+{
+ struct hal_srng *srng;
+ struct ath12k_ext_irq_grp *irq_grp;
+ struct ath12k_ce_pipe *ce_pipe;
+ int i;
+
+ ath12k_err(ab, "Last interrupt received for each CE:\n");
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ ath12k_err(ab, "CE_id %d pipe_num %d %ums before\n",
+ i, ce_pipe->pipe_num,
+ jiffies_to_msecs(jiffies - ce_pipe->timestamp));
+ }
+
+ ath12k_err(ab, "\nLast interrupt received for each group:\n");
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ ath12k_err(ab, "group_id %d %ums before\n",
+ irq_grp->grp_id,
+ jiffies_to_msecs(jiffies - irq_grp->timestamp));
+ }
+
+ for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
+ srng = &ab->hal.srng_list[i];
+
+ if (!srng->initialized)
+ continue;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath12k_err(ab,
+ "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.src_ring.hp,
+ srng->u.src_ring.reap_hp,
+ *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
+ srng->u.src_ring.last_tp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ else if (srng->ring_dir == HAL_SRNG_DIR_DST)
+ ath12k_err(ab,
+ "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.dst_ring.tp,
+ *srng->u.dst_ring.hp_addr,
+ srng->u.dst_ring.cached_hp,
+ srng->u.dst_ring.last_hp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
new file mode 100644
index 000000000000..dfbd8bce70e5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -0,0 +1,1142 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HAL_H
+#define ATH12K_HAL_H
+
+#include "hal_desc.h"
+#include "rx_desc.h"
+
+struct ath12k_base;
+
+#define HAL_LINK_DESC_SIZE (32 << 2)
+#define HAL_LINK_DESC_ALIGN 128
+#define HAL_NUM_MPDUS_PER_LINK_DESC 6
+#define HAL_NUM_TX_MSDUS_PER_LINK_DESC 7
+#define HAL_NUM_RX_MSDUS_PER_LINK_DESC 6
+#define HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC 12
+#define HAL_MAX_AVAIL_BLK_RES 3
+
+#define HAL_RING_BASE_ALIGN 8
+
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704
+/* TODO: Check with hw team on the supported scatter buf size */
+#define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE 8
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \
+ HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE)
+
+/* TODO: 16 entries per radio times MAX_VAPS_SUPPORTED */
+#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 32
+#define HAL_DSCP_TID_TBL_SIZE 24
+
+/* calculate the register address from bar0 of shadow register x */
+#define HAL_SHADOW_BASE_ADDR 0x000008fc
+#define HAL_SHADOW_NUM_REGS 40
+#define HAL_HP_OFFSET_IN_REG_START 1
+#define HAL_OFFSET_FROM_HP_TO_TP 4
+
+#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
+
+/* WCSS Relative address */
+#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
+#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
+#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG 0x01b80000
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG 0x01b81000
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG 0x01b82000
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG 0x01b83000
+#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
+
+#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000
+
+#define HAL_TCL_SW_CONFIG_BANK_ADDR 0x00a4408c
+
+/* SW2TCL(x) R0 ring configuration address */
+#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000020
+#define HAL_TCL1_RING_DSCP_TID_MAP 0x00000240
+#define HAL_TCL1_RING_BASE_LSB 0x00000900
+#define HAL_TCL1_RING_BASE_MSB 0x00000904
+#define HAL_TCL1_RING_ID(ab) ((ab)->hw_params->regs->hal_tcl1_ring_id)
+#define HAL_TCL1_RING_MISC(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_misc)
+#define HAL_TCL1_RING_TP_ADDR_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_tp_addr_lsb)
+#define HAL_TCL1_RING_TP_ADDR_MSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_tp_addr_msb)
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_consumer_int_setup_ix0)
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_consumer_int_setup_ix1)
+#define HAL_TCL1_RING_MSI1_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_lsb)
+#define HAL_TCL1_RING_MSI1_BASE_MSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_msi1_base_msb)
+#define HAL_TCL1_RING_MSI1_DATA(ab) \
+ ((ab)->hw_params->regs->hal_tcl1_ring_msi1_data)
+#define HAL_TCL2_RING_BASE_LSB 0x00000978
+#define HAL_TCL_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl_ring_base_lsb)
+
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_BASE_MSB_OFFSET \
+ (HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_ID_OFFSET(ab) \
+ (HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_MISC_OFFSET(ab) \
+ (HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB)
+
+/* SW2TCL(x) R2 ring pointers (head/tail) address */
+#define HAL_TCL1_RING_HP 0x00002000
+#define HAL_TCL1_RING_TP 0x00002004
+#define HAL_TCL2_RING_HP 0x00002008
+#define HAL_TCL_RING_HP 0x00002028
+
+#define HAL_TCL1_RING_TP_OFFSET \
+ (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
+
+/* TCL STATUS ring address */
+#define HAL_TCL_STATUS_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_tcl_status_ring_base_lsb)
+#define HAL_TCL_STATUS_RING_HP 0x00002048
+
+/* PPE2TCL1 Ring address */
+#define HAL_TCL_PPE2TCL1_RING_BASE_LSB 0x00000c48
+#define HAL_TCL_PPE2TCL1_RING_HP 0x00002038
+
+/* WBM PPE Release Ring address */
+#define HAL_WBM_PPE_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_ppe_rel_ring_base)
+#define HAL_WBM_PPE_RELEASE_RING_HP 0x00003020
+
+/* REO2SW(x) R0 ring configuration address */
+#define HAL_REO1_GEN_ENABLE 0x00000000
+#define HAL_REO1_MISC_CTRL_ADDR(ab) \
+ ((ab)->hw_params->regs->hal_reo1_misc_ctrl_addr)
+#define HAL_REO1_DEST_RING_CTRL_IX_0 0x00000004
+#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
+#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
+#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_SW_COOKIE_CFG0(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg0)
+#define HAL_REO1_SW_COOKIE_CFG1(ab) ((ab)->hw_params->regs->hal_reo1_sw_cookie_cfg1)
+#define HAL_REO1_QDESC_LUT_BASE0(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_lut_base0)
+#define HAL_REO1_QDESC_LUT_BASE1(ab) ((ab)->hw_params->regs->hal_reo1_qdesc_lut_base1)
+#define HAL_REO1_RING_BASE_LSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_base_lsb)
+#define HAL_REO1_RING_BASE_MSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_base_msb)
+#define HAL_REO1_RING_ID(ab) ((ab)->hw_params->regs->hal_reo1_ring_id)
+#define HAL_REO1_RING_MISC(ab) ((ab)->hw_params->regs->hal_reo1_ring_misc)
+#define HAL_REO1_RING_HP_ADDR_LSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_hp_addr_lsb)
+#define HAL_REO1_RING_HP_ADDR_MSB(ab) ((ab)->hw_params->regs->hal_reo1_ring_hp_addr_msb)
+#define HAL_REO1_RING_PRODUCER_INT_SETUP(ab) \
+ ((ab)->hw_params->regs->hal_reo1_ring_producer_int_setup)
+#define HAL_REO1_RING_MSI1_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_reo1_ring_msi1_base_lsb)
+#define HAL_REO1_RING_MSI1_BASE_MSB(ab) \
+ ((ab)->hw_params->regs->hal_reo1_ring_msi1_base_msb)
+#define HAL_REO1_RING_MSI1_DATA(ab) ((ab)->hw_params->regs->hal_reo1_ring_msi1_data)
+#define HAL_REO2_RING_BASE_LSB(ab) ((ab)->hw_params->regs->hal_reo2_ring_base)
+#define HAL_REO1_AGING_THRESH_IX_0(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix0)
+#define HAL_REO1_AGING_THRESH_IX_1(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix1)
+#define HAL_REO1_AGING_THRESH_IX_2(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix2)
+#define HAL_REO1_AGING_THRESH_IX_3(ab) ((ab)->hw_params->regs->hal_reo1_aging_thres_ix3)
+
+/* REO2SW(x) R2 ring pointers (head/tail) address */
+#define HAL_REO1_RING_HP 0x00003048
+#define HAL_REO1_RING_TP 0x0000304c
+#define HAL_REO2_RING_HP 0x00003050
+
+#define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP)
+
+/* REO2SW0 ring configuration address */
+#define HAL_REO_SW0_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_reo2_sw0_ring_base)
+
+/* REO2SW0 R2 ring pointer (head/tail) address */
+#define HAL_REO_SW0_RING_HP 0x00003088
+
+/* REO CMD R0 address */
+#define HAL_REO_CMD_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_reo_cmd_ring_base)
+
+/* REO CMD R2 address */
+#define HAL_REO_CMD_HP 0x00003020
+
+/* SW2REO R0 address */
+#define HAL_SW2REO_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_sw2reo_ring_base)
+#define HAL_SW2REO1_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_sw2reo1_ring_base)
+
+/* SW2REO R2 address */
+#define HAL_SW2REO_RING_HP 0x00003028
+#define HAL_SW2REO1_RING_HP 0x00003030
+
+/* CE ring R0 address */
+#define HAL_CE_SRC_RING_BASE_LSB 0x00000000
+#define HAL_CE_DST_RING_BASE_LSB 0x00000000
+#define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058
+#define HAL_CE_DST_RING_CTRL 0x000000b0
+
+/* CE ring R2 address */
+#define HAL_CE_DST_RING_HP 0x00000400
+#define HAL_CE_DST_STATUS_RING_HP 0x00000408
+
+/* REO status address */
+#define HAL_REO_STATUS_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_reo_status_ring_base)
+#define HAL_REO_STATUS_HP 0x000030a8
+
+/* WBM Idle R0 address */
+#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm_idle_ring_base_lsb)
+#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab) \
+ ((ab)->hw_params->regs->hal_wbm_idle_ring_misc_addr)
+#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR(ab) \
+ ((ab)->hw_params->regs->hal_wbm_r0_idle_list_cntl_addr)
+#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR(ab) \
+ ((ab)->hw_params->regs->hal_wbm_r0_idle_list_size_addr)
+#define HAL_WBM_SCATTERED_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_ring_base_lsb)
+#define HAL_WBM_SCATTERED_RING_BASE_MSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_ring_base_msb)
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_desc_head_info_ix0)
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_desc_head_info_ix1)
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_desc_tail_info_ix0)
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_desc_tail_info_ix1)
+#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR(ab) \
+ ((ab)->hw_params->regs->hal_wbm_scattered_desc_ptr_hp_addr)
+
+/* WBM Idle R2 address */
+#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b8
+
+/* SW2WBM R0 release address */
+#define HAL_WBM_SW_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm_sw_release_ring_base_lsb)
+#define HAL_WBM_SW1_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm_sw1_release_ring_base_lsb)
+
+/* SW2WBM R2 release address */
+#define HAL_WBM_SW_RELEASE_RING_HP 0x00003010
+#define HAL_WBM_SW1_RELEASE_RING_HP 0x00003018
+
+/* WBM2SW R0 release address */
+#define HAL_WBM0_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm0_release_ring_base_lsb)
+
+#define HAL_WBM1_RELEASE_RING_BASE_LSB(ab) \
+ ((ab)->hw_params->regs->hal_wbm1_release_ring_base_lsb)
+
+/* WBM2SW R2 release address */
+#define HAL_WBM0_RELEASE_RING_HP 0x000030c8
+#define HAL_WBM1_RELEASE_RING_HP 0x000030d0
+
+/* WBM cookie config address and mask */
+#define HAL_WBM_SW_COOKIE_CFG0 0x00000040
+#define HAL_WBM_SW_COOKIE_CFG1 0x00000044
+#define HAL_WBM_SW_COOKIE_CFG2 0x00000090
+#define HAL_WBM_SW_COOKIE_CONVERT_CFG 0x00000094
+
+#define HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8)
+#define HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13)
+#define HAL_WBM_SW_COOKIE_CFG_ALIGN BIT(18)
+#define HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN BIT(0)
+#define HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN BIT(1)
+#define HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN BIT(3)
+
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN BIT(1)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN BIT(2)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN BIT(3)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN BIT(4)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN BIT(5)
+#define HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN BIT(8)
+
+/* TCL ring feild mask and offset */
+#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_TCL1_RING_MISC_MSI_RING_ID_DISABLE BIT(0)
+#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1)
+#define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(23)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
+
+/* REO ring feild mask and offset */
+#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
+#define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_REO1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_REO1_MISC_CTL_FRAG_DST_RING GENMASK(20, 17)
+#define HAL_REO1_MISC_CTL_BAR_DST_RING GENMASK(24, 21)
+#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
+#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
+#define HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB GENMASK(12, 8)
+#define HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB GENMASK(17, 13)
+#define HAL_REO1_SW_COOKIE_CFG_ALIGN BIT(18)
+#define HAL_REO1_SW_COOKIE_CFG_ENABLE BIT(19)
+#define HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE BIT(20)
+
+/* CE ring bit field mask and shift */
+#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
+
+#define HAL_ADDR_LSB_REG_MASK 0xffffffff
+
+#define HAL_ADDR_MSB_REG_SHIFT 32
+
+/* WBM ring bit field mask and shift */
+#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1)
+#define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2)
+#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8)
+
+#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8)
+#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8)
+
+#define HAL_WBM_IDLE_LINK_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_WBM_IDLE_LINK_RING_MISC_RIND_ID_DISABLE BIT(0)
+
+#define BASE_ADDR_MATCH_TAG_VAL 0x5
+
+#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_REO2SW0_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
+#define HAL_RXDMA_RING_MAX_SIZE_BE 0x000fffff
+#define HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
+
+#define HAL_WBM2SW_REL_ERR_RING_NUM 3
+/* Add any other errors here and return them in
+ * ath12k_hal_rx_desc_get_err().
+ */
+
+enum hal_srng_ring_id {
+ HAL_SRNG_RING_ID_REO2SW0 = 0,
+ HAL_SRNG_RING_ID_REO2SW1,
+ HAL_SRNG_RING_ID_REO2SW2,
+ HAL_SRNG_RING_ID_REO2SW3,
+ HAL_SRNG_RING_ID_REO2SW4,
+ HAL_SRNG_RING_ID_REO2SW5,
+ HAL_SRNG_RING_ID_REO2SW6,
+ HAL_SRNG_RING_ID_REO2SW7,
+ HAL_SRNG_RING_ID_REO2SW8,
+ HAL_SRNG_RING_ID_REO2TCL,
+ HAL_SRNG_RING_ID_REO2PPE,
+
+ HAL_SRNG_RING_ID_SW2REO = 16,
+ HAL_SRNG_RING_ID_SW2REO1,
+ HAL_SRNG_RING_ID_SW2REO2,
+ HAL_SRNG_RING_ID_SW2REO3,
+
+ HAL_SRNG_RING_ID_REO_CMD,
+ HAL_SRNG_RING_ID_REO_STATUS,
+
+ HAL_SRNG_RING_ID_SW2TCL1 = 24,
+ HAL_SRNG_RING_ID_SW2TCL2,
+ HAL_SRNG_RING_ID_SW2TCL3,
+ HAL_SRNG_RING_ID_SW2TCL4,
+ HAL_SRNG_RING_ID_SW2TCL5,
+ HAL_SRNG_RING_ID_SW2TCL6,
+ HAL_SRNG_RING_ID_PPE2TCL1 = 30,
+
+ HAL_SRNG_RING_ID_SW2TCL_CMD = 40,
+ HAL_SRNG_RING_ID_SW2TCL1_CMD,
+ HAL_SRNG_RING_ID_TCL_STATUS,
+
+ HAL_SRNG_RING_ID_CE0_SRC = 64,
+ HAL_SRNG_RING_ID_CE1_SRC,
+ HAL_SRNG_RING_ID_CE2_SRC,
+ HAL_SRNG_RING_ID_CE3_SRC,
+ HAL_SRNG_RING_ID_CE4_SRC,
+ HAL_SRNG_RING_ID_CE5_SRC,
+ HAL_SRNG_RING_ID_CE6_SRC,
+ HAL_SRNG_RING_ID_CE7_SRC,
+ HAL_SRNG_RING_ID_CE8_SRC,
+ HAL_SRNG_RING_ID_CE9_SRC,
+ HAL_SRNG_RING_ID_CE10_SRC,
+ HAL_SRNG_RING_ID_CE11_SRC,
+ HAL_SRNG_RING_ID_CE12_SRC,
+ HAL_SRNG_RING_ID_CE13_SRC,
+ HAL_SRNG_RING_ID_CE14_SRC,
+ HAL_SRNG_RING_ID_CE15_SRC,
+
+ HAL_SRNG_RING_ID_CE0_DST = 81,
+ HAL_SRNG_RING_ID_CE1_DST,
+ HAL_SRNG_RING_ID_CE2_DST,
+ HAL_SRNG_RING_ID_CE3_DST,
+ HAL_SRNG_RING_ID_CE4_DST,
+ HAL_SRNG_RING_ID_CE5_DST,
+ HAL_SRNG_RING_ID_CE6_DST,
+ HAL_SRNG_RING_ID_CE7_DST,
+ HAL_SRNG_RING_ID_CE8_DST,
+ HAL_SRNG_RING_ID_CE9_DST,
+ HAL_SRNG_RING_ID_CE10_DST,
+ HAL_SRNG_RING_ID_CE11_DST,
+ HAL_SRNG_RING_ID_CE12_DST,
+ HAL_SRNG_RING_ID_CE13_DST,
+ HAL_SRNG_RING_ID_CE14_DST,
+ HAL_SRNG_RING_ID_CE15_DST,
+
+ HAL_SRNG_RING_ID_CE0_DST_STATUS = 100,
+ HAL_SRNG_RING_ID_CE1_DST_STATUS,
+ HAL_SRNG_RING_ID_CE2_DST_STATUS,
+ HAL_SRNG_RING_ID_CE3_DST_STATUS,
+ HAL_SRNG_RING_ID_CE4_DST_STATUS,
+ HAL_SRNG_RING_ID_CE5_DST_STATUS,
+ HAL_SRNG_RING_ID_CE6_DST_STATUS,
+ HAL_SRNG_RING_ID_CE7_DST_STATUS,
+ HAL_SRNG_RING_ID_CE8_DST_STATUS,
+ HAL_SRNG_RING_ID_CE9_DST_STATUS,
+ HAL_SRNG_RING_ID_CE10_DST_STATUS,
+ HAL_SRNG_RING_ID_CE11_DST_STATUS,
+ HAL_SRNG_RING_ID_CE12_DST_STATUS,
+ HAL_SRNG_RING_ID_CE13_DST_STATUS,
+ HAL_SRNG_RING_ID_CE14_DST_STATUS,
+ HAL_SRNG_RING_ID_CE15_DST_STATUS,
+
+ HAL_SRNG_RING_ID_WBM_IDLE_LINK = 120,
+ HAL_SRNG_RING_ID_WBM_SW0_RELEASE,
+ HAL_SRNG_RING_ID_WBM_SW1_RELEASE,
+ HAL_SRNG_RING_ID_WBM_PPE_RELEASE = 123,
+
+ HAL_SRNG_RING_ID_WBM2SW0_RELEASE = 128,
+ HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW3_RELEASE, /* RX ERROR RING */
+ HAL_SRNG_RING_ID_WBM2SW4_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW5_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW6_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW7_RELEASE,
+
+ HAL_SRNG_RING_ID_UMAC_ID_END = 159,
+
+ /* Common DMAC rings shared by all LMACs */
+ HAL_SRNG_RING_ID_DMAC_CMN_ID_START = 160,
+ HAL_SRNG_SW2RXDMA_BUF0 = HAL_SRNG_RING_ID_DMAC_CMN_ID_START,
+ HAL_SRNG_SW2RXDMA_BUF1 = 161,
+ HAL_SRNG_SW2RXDMA_BUF2 = 162,
+
+ HAL_SRNG_SW2RXMON_BUF0 = 168,
+
+ HAL_SRNG_SW2TXMON_BUF0 = 176,
+
+ HAL_SRNG_RING_ID_DMAC_CMN_ID_END = 183,
+ HAL_SRNG_RING_ID_PMAC1_ID_START = 184,
+
+ HAL_SRNG_RING_ID_WMAC1_SW2RXMON_BUF0 = HAL_SRNG_RING_ID_PMAC1_ID_START,
+
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
+ HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
+
+ HAL_SRNG_RING_ID_PMAC1_ID_END,
+};
+
+/* SRNG registers are split into two groups R0 and R2 */
+#define HAL_SRNG_REG_GRP_R0 0
+#define HAL_SRNG_REG_GRP_R2 1
+#define HAL_SRNG_NUM_REG_GRP 2
+
+/* TODO: number of PMACs */
+#define HAL_SRNG_NUM_PMACS 3
+#define HAL_SRNG_NUM_DMAC_RINGS (HAL_SRNG_RING_ID_DMAC_CMN_ID_END - \
+ HAL_SRNG_RING_ID_DMAC_CMN_ID_START)
+#define HAL_SRNG_RINGS_PER_PMAC (HAL_SRNG_RING_ID_PMAC1_ID_END - \
+ HAL_SRNG_RING_ID_PMAC1_ID_START)
+#define HAL_SRNG_NUM_PMAC_RINGS (HAL_SRNG_NUM_PMACS * HAL_SRNG_RINGS_PER_PMAC)
+#define HAL_SRNG_RING_ID_MAX (HAL_SRNG_RING_ID_DMAC_CMN_ID_END + \
+ HAL_SRNG_NUM_PMAC_RINGS)
+
+enum hal_ring_type {
+ HAL_REO_DST,
+ HAL_REO_EXCEPTION,
+ HAL_REO_REINJECT,
+ HAL_REO_CMD,
+ HAL_REO_STATUS,
+ HAL_TCL_DATA,
+ HAL_TCL_CMD,
+ HAL_TCL_STATUS,
+ HAL_CE_SRC,
+ HAL_CE_DST,
+ HAL_CE_DST_STATUS,
+ HAL_WBM_IDLE_LINK,
+ HAL_SW2WBM_RELEASE,
+ HAL_WBM2SW_RELEASE,
+ HAL_RXDMA_BUF,
+ HAL_RXDMA_DST,
+ HAL_RXDMA_MONITOR_BUF,
+ HAL_RXDMA_MONITOR_STATUS,
+ HAL_RXDMA_MONITOR_DST,
+ HAL_RXDMA_MONITOR_DESC,
+ HAL_RXDMA_DIR_BUF,
+ HAL_PPE2TCL,
+ HAL_PPE_RELEASE,
+ HAL_TX_MONITOR_BUF,
+ HAL_TX_MONITOR_DST,
+ HAL_MAX_RING_TYPES,
+};
+
+#define HAL_RX_MAX_BA_WINDOW 256
+
+#define HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC (100 * 1000)
+#define HAL_DEFAULT_VO_REO_TIMEOUT_USEC (40 * 1000)
+
+/**
+ * enum hal_reo_cmd_type: Enum for REO command type
+ * @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @HAL_REO_CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @HAL_REO_CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @HAL_REO_CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
+ * earlier with a 'REO_FLUSH_CACHE' command
+ * @HAL_REO_CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @HAL_REO_CMD_UPDATE_RX_QUEUE: Update REO queue settings
+ */
+enum hal_reo_cmd_type {
+ HAL_REO_CMD_GET_QUEUE_STATS = 0,
+ HAL_REO_CMD_FLUSH_QUEUE = 1,
+ HAL_REO_CMD_FLUSH_CACHE = 2,
+ HAL_REO_CMD_UNBLOCK_CACHE = 3,
+ HAL_REO_CMD_FLUSH_TIMEOUT_LIST = 4,
+ HAL_REO_CMD_UPDATE_RX_QUEUE = 5,
+};
+
+/**
+ * enum hal_reo_cmd_status: Enum for execution status of REO command
+ * @HAL_REO_CMD_SUCCESS: Command has successfully executed
+ * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue
+ * or cache was blocked
+ * @HAL_REO_CMD_FAILED: Command execution failed, could be due to
+ * invalid queue desc
+ * @HAL_REO_CMD_RESOURCE_BLOCKED:
+ * @HAL_REO_CMD_DRAIN:
+ */
+enum hal_reo_cmd_status {
+ HAL_REO_CMD_SUCCESS = 0,
+ HAL_REO_CMD_BLOCKED = 1,
+ HAL_REO_CMD_FAILED = 2,
+ HAL_REO_CMD_RESOURCE_BLOCKED = 3,
+ HAL_REO_CMD_DRAIN = 0xff,
+};
+
+struct hal_wbm_idle_scatter_list {
+ dma_addr_t paddr;
+ struct hal_wbm_link_desc *vaddr;
+};
+
+struct hal_srng_params {
+ dma_addr_t ring_base_paddr;
+ u32 *ring_base_vaddr;
+ int num_entries;
+ u32 intr_batch_cntr_thres_entries;
+ u32 intr_timer_thres_us;
+ u32 flags;
+ u32 max_buffer_len;
+ u32 low_threshold;
+ u32 high_threshold;
+ dma_addr_t msi_addr;
+ dma_addr_t msi2_addr;
+ u32 msi_data;
+ u32 msi2_data;
+
+ /* Add more params as needed */
+};
+
+enum hal_srng_dir {
+ HAL_SRNG_DIR_SRC,
+ HAL_SRNG_DIR_DST
+};
+
+/* srng flags */
+#define HAL_SRNG_FLAGS_MSI_SWAP 0x00000008
+#define HAL_SRNG_FLAGS_RING_PTR_SWAP 0x00000010
+#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020
+#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000
+#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
+#define HAL_SRNG_FLAGS_HIGH_THRESH_INTR_EN 0x00080000
+#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
+
+#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
+
+/* Common SRNG ring structure for source and destination rings */
+struct hal_srng {
+ /* Unique SRNG ring ID */
+ u8 ring_id;
+
+ /* Ring initialization done */
+ u8 initialized;
+
+ /* Interrupt/MSI value assigned to this ring */
+ int irq;
+
+ /* Physical base address of the ring */
+ dma_addr_t ring_base_paddr;
+
+ /* Virtual base address of the ring */
+ u32 *ring_base_vaddr;
+
+ /* Number of entries in ring */
+ u32 num_entries;
+
+ /* Ring size */
+ u32 ring_size;
+
+ /* Ring size mask */
+ u32 ring_size_mask;
+
+ /* Size of ring entry */
+ u32 entry_size;
+
+ /* Interrupt timer threshold - in micro seconds */
+ u32 intr_timer_thres_us;
+
+ /* Interrupt batch counter threshold - in number of ring entries */
+ u32 intr_batch_cntr_thres_entries;
+
+ /* MSI Address */
+ dma_addr_t msi_addr;
+
+ /* MSI data */
+ u32 msi_data;
+
+ /* MSI2 Address */
+ dma_addr_t msi2_addr;
+
+ /* MSI2 data */
+ u32 msi2_data;
+
+ /* Misc flags */
+ u32 flags;
+
+ /* Lock for serializing ring index updates */
+ spinlock_t lock;
+
+ struct lock_class_key lock_key;
+
+ /* Start offset of SRNG register groups for this ring
+ * TBD: See if this is required - register address can be derived
+ * from ring ID
+ */
+ u32 hwreg_base[HAL_SRNG_NUM_REG_GRP];
+
+ u64 timestamp;
+
+ /* Source or Destination ring */
+ enum hal_srng_dir ring_dir;
+
+ union {
+ struct {
+ /* SW tail pointer */
+ u32 tp;
+
+ /* Shadow head pointer location to be updated by HW */
+ volatile u32 *hp_addr;
+
+ /* Cached head pointer */
+ u32 cached_hp;
+
+ /* Tail pointer location to be updated by SW - This
+ * will be a register address and need not be
+ * accessed through SW structure
+ */
+ u32 *tp_addr;
+
+ /* Current SW loop cnt */
+ u32 loop_cnt;
+
+ /* max transfer size */
+ u16 max_buffer_length;
+
+ /* head pointer at access end */
+ u32 last_hp;
+ } dst_ring;
+
+ struct {
+ /* SW head pointer */
+ u32 hp;
+
+ /* SW reap head pointer */
+ u32 reap_hp;
+
+ /* Shadow tail pointer location to be updated by HW */
+ u32 *tp_addr;
+
+ /* Cached tail pointer */
+ u32 cached_tp;
+
+ /* Head pointer location to be updated by SW - This
+ * will be a register address and need not be accessed
+ * through SW structure
+ */
+ u32 *hp_addr;
+
+ /* Low threshold - in number of ring entries */
+ u32 low_threshold;
+
+ /* tail pointer at access end */
+ u32 last_tp;
+ } src_ring;
+ } u;
+};
+
+/* Interrupt mitigation - Batch threshold in terms of numer of frames */
+#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
+#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
+#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
+
+/* Interrupt mitigation - timer threshold in us */
+#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
+#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
+#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 256
+
+enum hal_srng_mac_type {
+ ATH12K_HAL_SRNG_UMAC,
+ ATH12K_HAL_SRNG_DMAC,
+ ATH12K_HAL_SRNG_PMAC
+};
+
+/* HW SRNG configuration table */
+struct hal_srng_config {
+ int start_ring_id;
+ u16 max_rings;
+ u16 entry_size;
+ u32 reg_start[HAL_SRNG_NUM_REG_GRP];
+ u16 reg_size[HAL_SRNG_NUM_REG_GRP];
+ enum hal_srng_mac_type mac_type;
+ enum hal_srng_dir ring_dir;
+ u32 max_size;
+};
+
+/**
+ * enum hal_rx_buf_return_buf_manager
+ *
+ * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
+ * @HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the chip 0 WBM is chosen in case of a multi-chip config
+ * @HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the chip 1 WBM is chosen in case of a multi-chip config
+ * @HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the chip 2 WBM is chosen in case of a multi-chip config
+ * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
+ * @HAL_RX_BUF_RBM_SW0_BM: For ring 0 -- returned to host
+ * @HAL_RX_BUF_RBM_SW1_BM: For ring 1 -- returned to host
+ * @HAL_RX_BUF_RBM_SW2_BM: For ring 2 -- returned to host
+ * @HAL_RX_BUF_RBM_SW3_BM: For ring 3 -- returned to host
+ * @HAL_RX_BUF_RBM_SW4_BM: For ring 4 -- returned to host
+ * @HAL_RX_BUF_RBM_SW5_BM: For ring 5 -- returned to host
+ * @HAL_RX_BUF_RBM_SW6_BM: For ring 6 -- returned to host
+ */
+
+enum hal_rx_buf_return_buf_manager {
+ HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
+ HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_FW_BM,
+ HAL_RX_BUF_RBM_SW0_BM,
+ HAL_RX_BUF_RBM_SW1_BM,
+ HAL_RX_BUF_RBM_SW2_BM,
+ HAL_RX_BUF_RBM_SW3_BM,
+ HAL_RX_BUF_RBM_SW4_BM,
+ HAL_RX_BUF_RBM_SW5_BM,
+ HAL_RX_BUF_RBM_SW6_BM,
+};
+
+#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
+
+#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0)
+#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1)
+#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2)
+#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
+#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4)
+#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5)
+#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
+#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
+#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_CMD_UPD0_VLD BIT(9)
+#define HAL_REO_CMD_UPD0_ALDC BIT(10)
+#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_CMD_UPD0_AC BIT(13)
+#define HAL_REO_CMD_UPD0_BAR BIT(14)
+#define HAL_REO_CMD_UPD0_RETRY BIT(15)
+#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16)
+#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17)
+#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19)
+#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20)
+#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21)
+#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23)
+#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_CMD_UPD0_SVLD BIT(25)
+#define HAL_REO_CMD_UPD0_SSN BIT(26)
+#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27)
+#define HAL_REO_CMD_UPD0_PN_ERR BIT(28)
+#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
+#define HAL_REO_CMD_UPD0_PN BIT(30)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+#define HAL_REO_CMD_UPD1_VLD BIT(16)
+#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
+#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21)
+#define HAL_REO_CMD_UPD1_BAR BIT(23)
+#define HAL_REO_CMD_UPD1_RETRY BIT(24)
+#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26)
+#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27)
+#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28)
+#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29)
+#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+#define HAL_REO_CMD_UPD2_SVLD BIT(10)
+#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
+#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_CMD_UPD2_PN_ERR BIT(24)
+
+struct ath12k_hal_reo_cmd {
+ u32 addr_lo;
+ u32 flag;
+ u32 upd0;
+ u32 upd1;
+ u32 upd2;
+ u32 pn[4];
+ u16 rx_queue_num;
+ u16 min_rel;
+ u16 min_fwd;
+ u8 addr_hi;
+ u8 ac_list;
+ u8 blocking_idx;
+ u16 ba_window_size;
+ u8 pn_size;
+};
+
+enum hal_pn_type {
+ HAL_PN_TYPE_NONE,
+ HAL_PN_TYPE_WPA,
+ HAL_PN_TYPE_WAPI_EVEN,
+ HAL_PN_TYPE_WAPI_UNEVEN,
+};
+
+enum hal_ce_desc {
+ HAL_CE_DESC_SRC,
+ HAL_CE_DESC_DST,
+ HAL_CE_DESC_DST_STATUS,
+};
+
+#define HAL_HASH_ROUTING_RING_TCL 0
+#define HAL_HASH_ROUTING_RING_SW1 1
+#define HAL_HASH_ROUTING_RING_SW2 2
+#define HAL_HASH_ROUTING_RING_SW3 3
+#define HAL_HASH_ROUTING_RING_SW4 4
+#define HAL_HASH_ROUTING_RING_REL 5
+#define HAL_HASH_ROUTING_RING_FW 6
+
+struct hal_reo_status_header {
+ u16 cmd_num;
+ enum hal_reo_cmd_status cmd_status;
+ u16 cmd_exe_time;
+ u32 timestamp;
+};
+
+struct hal_reo_status_queue_stats {
+ u16 ssn;
+ u16 curr_idx;
+ u32 pn[4];
+ u32 last_rx_queue_ts;
+ u32 last_rx_dequeue_ts;
+ u32 rx_bitmap[8]; /* Bitmap from 0-255 */
+ u32 curr_mpdu_cnt;
+ u32 curr_msdu_cnt;
+ u16 fwd_due_to_bar_cnt;
+ u16 dup_cnt;
+ u32 frames_in_order_cnt;
+ u32 num_mpdu_processed_cnt;
+ u32 num_msdu_processed_cnt;
+ u32 total_num_processed_byte_cnt;
+ u32 late_rx_mpdu_cnt;
+ u32 reorder_hole_cnt;
+ u8 timeout_cnt;
+ u8 bar_rx_cnt;
+ u8 num_window_2k_jump_cnt;
+};
+
+struct hal_reo_status_flush_queue {
+ bool err_detected;
+};
+
+enum hal_reo_status_flush_cache_err_code {
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND,
+};
+
+struct hal_reo_status_flush_cache {
+ bool err_detected;
+ enum hal_reo_status_flush_cache_err_code err_code;
+ bool cache_controller_flush_status_hit;
+ u8 cache_controller_flush_status_desc_type;
+ u8 cache_controller_flush_status_client_id;
+ u8 cache_controller_flush_status_err;
+ u8 cache_controller_flush_status_cnt;
+};
+
+enum hal_reo_status_unblock_cache_type {
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE,
+ HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE,
+};
+
+struct hal_reo_status_unblock_cache {
+ bool err_detected;
+ enum hal_reo_status_unblock_cache_type unblock_type;
+};
+
+struct hal_reo_status_flush_timeout_list {
+ bool err_detected;
+ bool list_empty;
+ u16 release_desc_cnt;
+ u16 fwd_buf_cnt;
+};
+
+enum hal_reo_threshold_idx {
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER0,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER1,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER2,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM,
+};
+
+struct hal_reo_status_desc_thresh_reached {
+ enum hal_reo_threshold_idx threshold_idx;
+ u32 link_desc_counter0;
+ u32 link_desc_counter1;
+ u32 link_desc_counter2;
+ u32 link_desc_counter_sum;
+};
+
+struct hal_reo_status {
+ struct hal_reo_status_header uniform_hdr;
+ u8 loop_cnt;
+ union {
+ struct hal_reo_status_queue_stats queue_stats;
+ struct hal_reo_status_flush_queue flush_queue;
+ struct hal_reo_status_flush_cache flush_cache;
+ struct hal_reo_status_unblock_cache unblock_cache;
+ struct hal_reo_status_flush_timeout_list timeout_list;
+ struct hal_reo_status_desc_thresh_reached desc_thresh_reached;
+ } u;
+};
+
+/* HAL context to be used to access SRNG APIs (currently used by data path
+ * and transport (CE) modules)
+ */
+struct ath12k_hal {
+ /* HAL internal state for all SRNG rings.
+ */
+ struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
+
+ /* SRNG configuration table */
+ struct hal_srng_config *srng_config;
+
+ /* Remote pointer memory for HW/FW updates */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } rdp;
+
+ /* Shared memory for ring pointer updates from host to FW */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } wrp;
+
+ /* Available REO blocking resources bitmap */
+ u8 avail_blk_resource;
+
+ u8 current_blk_index;
+
+ /* shadow register configuration */
+ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
+ int num_shadow_reg_configured;
+};
+
+/* Maps WBM ring number and Return Buffer Manager Id per TCL ring */
+struct ath12k_hal_tcl_to_wbm_rbm_map {
+ u8 wbm_ring_num;
+ u8 rbm_id;
+};
+
+struct hal_ops {
+ bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
+ u8 *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc);
+ bool (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc);
+ void (*rx_desc_copy_end_tlv)(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc);
+ u32 (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc);
+ void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, u16 len);
+ struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
+ u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_mpdu_start_offset)(void);
+ u32 (*rx_desc_get_msdu_end_offset)(void);
+ bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
+ u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
+ bool (*rx_desc_is_mcbc)(struct hal_rx_desc *desc);
+ void (*rx_desc_get_dot11_hdr)(struct hal_rx_desc *desc,
+ struct ieee80211_hdr *hdr);
+ u16 (*rx_desc_get_mpdu_frame_ctl)(struct hal_rx_desc *desc);
+ void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
+ u8 *crypto_hdr,
+ enum hal_encrypt_type enctype);
+ int (*create_srng_config)(struct ath12k_base *ab);
+ bool (*dp_rx_h_msdu_done)(struct hal_rx_desc *desc);
+ bool (*dp_rx_h_l4_cksum_fail)(struct hal_rx_desc *desc);
+ bool (*dp_rx_h_ip_cksum_fail)(struct hal_rx_desc *desc);
+ bool (*dp_rx_h_is_decrypted)(struct hal_rx_desc *desc);
+ u32 (*dp_rx_h_mpdu_err)(struct hal_rx_desc *desc);
+ const struct ath12k_hal_tcl_to_wbm_rbm_map *tcl_to_wbm_rbm_map;
+};
+
+extern const struct hal_ops hal_qcn9274_ops;
+extern const struct hal_ops hal_wcn7850_ops;
+
+u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
+void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
+ int tid, u32 ba_window_size,
+ u32 start_seq, enum hal_pn_type type);
+void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map);
+void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset);
+
+dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
+ struct hal_srng *srng);
+dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr);
+u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type);
+void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
+ u32 len, u32 id, u8 byte_swap_data);
+void ath12k_hal_ce_dst_set_desc(struct hal_ce_srng_dest_desc *desc, dma_addr_t paddr);
+u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc);
+int ath12k_hal_srng_get_entrysize(struct ath12k_base *ab, u32 ring_type);
+int ath12k_hal_srng_get_max_entries(struct ath12k_base *ab, u32 ring_type);
+void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params);
+void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng);
+int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
+ struct hal_srng *srng);
+int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+void ath12k_hal_srng_access_begin(struct ath12k_base *ab,
+ struct hal_srng *srng);
+void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng);
+int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params);
+int ath12k_hal_srng_init(struct ath12k_base *ath12k);
+void ath12k_hal_srng_deinit(struct ath12k_base *ath12k);
+void ath12k_hal_dump_srng_stats(struct ath12k_base *ab);
+void ath12k_hal_srng_get_shadow_config(struct ath12k_base *ab,
+ u32 **cfg, u32 *len);
+int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num);
+void ath12k_hal_srng_shadow_config(struct ath12k_base *ab);
+void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab,
+ struct hal_srng *srng);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
new file mode 100644
index 000000000000..2250ca2d19a3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -0,0 +1,2961 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include "core.h"
+
+#ifndef ATH12K_HAL_DESC_H
+#define ATH12K_HAL_DESC_H
+
+#define BUFFER_ADDR_INFO0_ADDR GENMASK(31, 0)
+
+#define BUFFER_ADDR_INFO1_ADDR GENMASK(7, 0)
+#define BUFFER_ADDR_INFO1_RET_BUF_MGR GENMASK(11, 8)
+#define BUFFER_ADDR_INFO1_SW_COOKIE GENMASK(31, 12)
+
+struct ath12k_buffer_addr {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+/* ath12k_buffer_addr
+ *
+ * buffer_addr_31_0
+ * Address (lower 32 bits) of the MSDU buffer or MSDU_EXTENSION
+ * descriptor or Link descriptor
+ *
+ * buffer_addr_39_32
+ * Address (upper 8 bits) of the MSDU buffer or MSDU_EXTENSION
+ * descriptor or Link descriptor
+ *
+ * return_buffer_manager (RBM)
+ * Consumer: WBM
+ * Producer: SW/FW
+ * Indicates to which buffer manager the buffer or MSDU_EXTENSION
+ * descriptor or link descriptor that is being pointed to shall be
+ * returned after the frame has been processed. It is used by WBM
+ * for routing purposes.
+ *
+ * Values are defined in enum %HAL_RX_BUF_RBM_
+ *
+ * sw_buffer_cookie
+ * Cookie field exclusively used by SW. HW ignores the contents,
+ * accept that it passes the programmed value on to other
+ * descriptors together with the physical address.
+ *
+ * Field can be used by SW to for example associate the buffers
+ * physical address with the virtual address.
+ *
+ * NOTE1:
+ * The three most significant bits can have a special meaning
+ * in case this struct is embedded in a TX_MPDU_DETAILS STRUCT,
+ * and field transmit_bw_restriction is set
+ *
+ * In case of NON punctured transmission:
+ * Sw_buffer_cookie[19:17] = 3'b000: 20 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b001: 40 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b010: 80 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b011: 160 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b101: 240 MHz TX only
+ * Sw_buffer_cookie[19:17] = 3'b100: 320 MHz TX only
+ * Sw_buffer_cookie[19:18] = 2'b11: reserved
+ *
+ * In case of punctured transmission:
+ * Sw_buffer_cookie[19:16] = 4'b0000: pattern 0 only
+ * Sw_buffer_cookie[19:16] = 4'b0001: pattern 1 only
+ * Sw_buffer_cookie[19:16] = 4'b0010: pattern 2 only
+ * Sw_buffer_cookie[19:16] = 4'b0011: pattern 3 only
+ * Sw_buffer_cookie[19:16] = 4'b0100: pattern 4 only
+ * Sw_buffer_cookie[19:16] = 4'b0101: pattern 5 only
+ * Sw_buffer_cookie[19:16] = 4'b0110: pattern 6 only
+ * Sw_buffer_cookie[19:16] = 4'b0111: pattern 7 only
+ * Sw_buffer_cookie[19:16] = 4'b1000: pattern 8 only
+ * Sw_buffer_cookie[19:16] = 4'b1001: pattern 9 only
+ * Sw_buffer_cookie[19:16] = 4'b1010: pattern 10 only
+ * Sw_buffer_cookie[19:16] = 4'b1011: pattern 11 only
+ * Sw_buffer_cookie[19:18] = 2'b11: reserved
+ *
+ * Note: a punctured transmission is indicated by the presence
+ * of TLV TX_PUNCTURE_SETUP embedded in the scheduler TLV
+ *
+ * Sw_buffer_cookie[20:17]: Tid: The TID field in the QoS control
+ * field
+ *
+ * Sw_buffer_cookie[16]: Mpdu_qos_control_valid: This field
+ * indicates MPDUs with a QoS control field.
+ *
+ */
+
+enum hal_tlv_tag {
+ HAL_MACTX_CBF_START = 0 /* 0x0 */,
+ HAL_PHYRX_DATA = 1 /* 0x1 */,
+ HAL_PHYRX_CBF_DATA_RESP = 2 /* 0x2 */,
+ HAL_PHYRX_ABORT_REQUEST = 3 /* 0x3 */,
+ HAL_PHYRX_USER_ABORT_NOTIFICATION = 4 /* 0x4 */,
+ HAL_MACTX_DATA_RESP = 5 /* 0x5 */,
+ HAL_MACTX_CBF_DATA = 6 /* 0x6 */,
+ HAL_MACTX_CBF_DONE = 7 /* 0x7 */,
+ HAL_PHYRX_LMR_DATA_RESP = 8 /* 0x8 */,
+ HAL_RXPCU_TO_UCODE_START = 9 /* 0x9 */,
+ HAL_RXPCU_TO_UCODE_DELIMITER_FOR_FULL_MPDU = 10 /* 0xa */,
+ HAL_RXPCU_TO_UCODE_FULL_MPDU_DATA = 11 /* 0xb */,
+ HAL_RXPCU_TO_UCODE_FCS_STATUS = 12 /* 0xc */,
+ HAL_RXPCU_TO_UCODE_MPDU_DELIMITER = 13 /* 0xd */,
+ HAL_RXPCU_TO_UCODE_DELIMITER_FOR_MPDU_HEADER = 14 /* 0xe */,
+ HAL_RXPCU_TO_UCODE_MPDU_HEADER_DATA = 15 /* 0xf */,
+ HAL_RXPCU_TO_UCODE_END = 16 /* 0x10 */,
+ HAL_MACRX_CBF_READ_REQUEST = 32 /* 0x20 */,
+ HAL_MACRX_CBF_DATA_REQUEST = 33 /* 0x21 */,
+ HAL_MACRXXPECT_NDP_RECEPTION = 34 /* 0x22 */,
+ HAL_MACRX_FREEZE_CAPTURE_CHANNEL = 35 /* 0x23 */,
+ HAL_MACRX_NDP_TIMEOUT = 36 /* 0x24 */,
+ HAL_MACRX_ABORT_ACK = 37 /* 0x25 */,
+ HAL_MACRX_REQ_IMPLICIT_FB = 38 /* 0x26 */,
+ HAL_MACRX_CHAIN_MASK = 39 /* 0x27 */,
+ HAL_MACRX_NAP_USER = 40 /* 0x28 */,
+ HAL_MACRX_ABORT_REQUEST = 41 /* 0x29 */,
+ HAL_PHYTX_OTHER_TRANSMIT_INFO16 = 42 /* 0x2a */,
+ HAL_PHYTX_ABORT_ACK = 43 /* 0x2b */,
+ HAL_PHYTX_ABORT_REQUEST = 44 /* 0x2c */,
+ HAL_PHYTX_PKT_END = 45 /* 0x2d */,
+ HAL_PHYTX_PPDU_HEADER_INFO_REQUEST = 46 /* 0x2e */,
+ HAL_PHYTX_REQUEST_CTRL_INFO = 47 /* 0x2f */,
+ HAL_PHYTX_DATA_REQUEST = 48 /* 0x30 */,
+ HAL_PHYTX_BF_CV_LOADING_DONE = 49 /* 0x31 */,
+ HAL_PHYTX_NAP_ACK = 50 /* 0x32 */,
+ HAL_PHYTX_NAP_DONE = 51 /* 0x33 */,
+ HAL_PHYTX_OFF_ACK = 52 /* 0x34 */,
+ HAL_PHYTX_ON_ACK = 53 /* 0x35 */,
+ HAL_PHYTX_SYNTH_OFF_ACK = 54 /* 0x36 */,
+ HAL_PHYTX_DEBUG16 = 55 /* 0x37 */,
+ HAL_MACTX_ABORT_REQUEST = 56 /* 0x38 */,
+ HAL_MACTX_ABORT_ACK = 57 /* 0x39 */,
+ HAL_MACTX_PKT_END = 58 /* 0x3a */,
+ HAL_MACTX_PRE_PHY_DESC = 59 /* 0x3b */,
+ HAL_MACTX_BF_PARAMS_COMMON = 60 /* 0x3c */,
+ HAL_MACTX_BF_PARAMS_PER_USER = 61 /* 0x3d */,
+ HAL_MACTX_PREFETCH_CV = 62 /* 0x3e */,
+ HAL_MACTX_USER_DESC_COMMON = 63 /* 0x3f */,
+ HAL_MACTX_USER_DESC_PER_USER = 64 /* 0x40 */,
+ HAL_XAMPLE_USER_TLV_16 = 65 /* 0x41 */,
+ HAL_XAMPLE_TLV_16 = 66 /* 0x42 */,
+ HAL_MACTX_PHY_OFF = 67 /* 0x43 */,
+ HAL_MACTX_PHY_ON = 68 /* 0x44 */,
+ HAL_MACTX_SYNTH_OFF = 69 /* 0x45 */,
+ HAL_MACTXXPECT_CBF_COMMON = 70 /* 0x46 */,
+ HAL_MACTXXPECT_CBF_PER_USER = 71 /* 0x47 */,
+ HAL_MACTX_PHY_DESC = 72 /* 0x48 */,
+ HAL_MACTX_L_SIG_A = 73 /* 0x49 */,
+ HAL_MACTX_L_SIG_B = 74 /* 0x4a */,
+ HAL_MACTX_HT_SIG = 75 /* 0x4b */,
+ HAL_MACTX_VHT_SIG_A = 76 /* 0x4c */,
+ HAL_MACTX_VHT_SIG_B_SU20 = 77 /* 0x4d */,
+ HAL_MACTX_VHT_SIG_B_SU40 = 78 /* 0x4e */,
+ HAL_MACTX_VHT_SIG_B_SU80 = 79 /* 0x4f */,
+ HAL_MACTX_VHT_SIG_B_SU160 = 80 /* 0x50 */,
+ HAL_MACTX_VHT_SIG_B_MU20 = 81 /* 0x51 */,
+ HAL_MACTX_VHT_SIG_B_MU40 = 82 /* 0x52 */,
+ HAL_MACTX_VHT_SIG_B_MU80 = 83 /* 0x53 */,
+ HAL_MACTX_VHT_SIG_B_MU160 = 84 /* 0x54 */,
+ HAL_MACTX_SERVICE = 85 /* 0x55 */,
+ HAL_MACTX_HE_SIG_A_SU = 86 /* 0x56 */,
+ HAL_MACTX_HE_SIG_A_MU_DL = 87 /* 0x57 */,
+ HAL_MACTX_HE_SIG_A_MU_UL = 88 /* 0x58 */,
+ HAL_MACTX_HE_SIG_B1_MU = 89 /* 0x59 */,
+ HAL_MACTX_HE_SIG_B2_MU = 90 /* 0x5a */,
+ HAL_MACTX_HE_SIG_B2_OFDMA = 91 /* 0x5b */,
+ HAL_MACTX_DELETE_CV = 92 /* 0x5c */,
+ HAL_MACTX_MU_UPLINK_COMMON = 93 /* 0x5d */,
+ HAL_MACTX_MU_UPLINK_USER_SETUP = 94 /* 0x5e */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO = 95 /* 0x5f */,
+ HAL_MACTX_PHY_NAP = 96 /* 0x60 */,
+ HAL_MACTX_DEBUG = 97 /* 0x61 */,
+ HAL_PHYRX_ABORT_ACK = 98 /* 0x62 */,
+ HAL_PHYRX_GENERATED_CBF_DETAILS = 99 /* 0x63 */,
+ HAL_PHYRX_RSSI_LEGACY = 100 /* 0x64 */,
+ HAL_PHYRX_RSSI_HT = 101 /* 0x65 */,
+ HAL_PHYRX_USER_INFO = 102 /* 0x66 */,
+ HAL_PHYRX_PKT_END = 103 /* 0x67 */,
+ HAL_PHYRX_DEBUG = 104 /* 0x68 */,
+ HAL_PHYRX_CBF_TRANSFER_DONE = 105 /* 0x69 */,
+ HAL_PHYRX_CBF_TRANSFER_ABORT = 106 /* 0x6a */,
+ HAL_PHYRX_L_SIG_A = 107 /* 0x6b */,
+ HAL_PHYRX_L_SIG_B = 108 /* 0x6c */,
+ HAL_PHYRX_HT_SIG = 109 /* 0x6d */,
+ HAL_PHYRX_VHT_SIG_A = 110 /* 0x6e */,
+ HAL_PHYRX_VHT_SIG_B_SU20 = 111 /* 0x6f */,
+ HAL_PHYRX_VHT_SIG_B_SU40 = 112 /* 0x70 */,
+ HAL_PHYRX_VHT_SIG_B_SU80 = 113 /* 0x71 */,
+ HAL_PHYRX_VHT_SIG_B_SU160 = 114 /* 0x72 */,
+ HAL_PHYRX_VHT_SIG_B_MU20 = 115 /* 0x73 */,
+ HAL_PHYRX_VHT_SIG_B_MU40 = 116 /* 0x74 */,
+ HAL_PHYRX_VHT_SIG_B_MU80 = 117 /* 0x75 */,
+ HAL_PHYRX_VHT_SIG_B_MU160 = 118 /* 0x76 */,
+ HAL_PHYRX_HE_SIG_A_SU = 119 /* 0x77 */,
+ HAL_PHYRX_HE_SIG_A_MU_DL = 120 /* 0x78 */,
+ HAL_PHYRX_HE_SIG_A_MU_UL = 121 /* 0x79 */,
+ HAL_PHYRX_HE_SIG_B1_MU = 122 /* 0x7a */,
+ HAL_PHYRX_HE_SIG_B2_MU = 123 /* 0x7b */,
+ HAL_PHYRX_HE_SIG_B2_OFDMA = 124 /* 0x7c */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO = 125 /* 0x7d */,
+ HAL_PHYRX_COMMON_USER_INFO = 126 /* 0x7e */,
+ HAL_PHYRX_DATA_DONE = 127 /* 0x7f */,
+ HAL_COEX_TX_REQ = 128 /* 0x80 */,
+ HAL_DUMMY = 129 /* 0x81 */,
+ HALXAMPLE_TLV_32_NAME = 130 /* 0x82 */,
+ HAL_MPDU_LIMIT = 131 /* 0x83 */,
+ HAL_NA_LENGTH_END = 132 /* 0x84 */,
+ HAL_OLE_BUF_STATUS = 133 /* 0x85 */,
+ HAL_PCU_PPDU_SETUP_DONE = 134 /* 0x86 */,
+ HAL_PCU_PPDU_SETUP_END = 135 /* 0x87 */,
+ HAL_PCU_PPDU_SETUP_INIT = 136 /* 0x88 */,
+ HAL_PCU_PPDU_SETUP_START = 137 /* 0x89 */,
+ HAL_PDG_FES_SETUP = 138 /* 0x8a */,
+ HAL_PDG_RESPONSE = 139 /* 0x8b */,
+ HAL_PDG_TX_REQ = 140 /* 0x8c */,
+ HAL_SCH_WAIT_INSTR = 141 /* 0x8d */,
+ HAL_TQM_FLOWMPTY_STATUS = 143 /* 0x8f */,
+ HAL_TQM_FLOW_NOTMPTY_STATUS = 144 /* 0x90 */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST = 145 /* 0x91 */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST_STATUS = 146 /* 0x92 */,
+ HAL_TQM_GEN_MPDUS = 147 /* 0x93 */,
+ HAL_TQM_GEN_MPDUS_STATUS = 148 /* 0x94 */,
+ HAL_TQM_REMOVE_MPDU = 149 /* 0x95 */,
+ HAL_TQM_REMOVE_MPDU_STATUS = 150 /* 0x96 */,
+ HAL_TQM_REMOVE_MSDU = 151 /* 0x97 */,
+ HAL_TQM_REMOVE_MSDU_STATUS = 152 /* 0x98 */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT = 153 /* 0x99 */,
+ HAL_TQM_WRITE_CMD = 154 /* 0x9a */,
+ HAL_OFDMA_TRIGGER_DETAILS = 155 /* 0x9b */,
+ HAL_TX_DATA = 156 /* 0x9c */,
+ HAL_TX_FES_SETUP = 157 /* 0x9d */,
+ HAL_RX_PACKET = 158 /* 0x9e */,
+ HALXPECTED_RESPONSE = 159 /* 0x9f */,
+ HAL_TX_MPDU_END = 160 /* 0xa0 */,
+ HAL_TX_MPDU_START = 161 /* 0xa1 */,
+ HAL_TX_MSDU_END = 162 /* 0xa2 */,
+ HAL_TX_MSDU_START = 163 /* 0xa3 */,
+ HAL_TX_SW_MODE_SETUP = 164 /* 0xa4 */,
+ HAL_TXPCU_BUFFER_STATUS = 165 /* 0xa5 */,
+ HAL_TXPCU_USER_BUFFER_STATUS = 166 /* 0xa6 */,
+ HAL_DATA_TO_TIME_CONFIG = 167 /* 0xa7 */,
+ HALXAMPLE_USER_TLV_32 = 168 /* 0xa8 */,
+ HAL_MPDU_INFO = 169 /* 0xa9 */,
+ HAL_PDG_USER_SETUP = 170 /* 0xaa */,
+ HAL_TX_11AH_SETUP = 171 /* 0xab */,
+ HAL_REO_UPDATE_RX_REO_QUEUE_STATUS = 172 /* 0xac */,
+ HAL_TX_PEER_ENTRY = 173 /* 0xad */,
+ HAL_TX_RAW_OR_NATIVE_FRAME_SETUP = 174 /* 0xae */,
+ HALXAMPLE_USER_TLV_44 = 175 /* 0xaf */,
+ HAL_TX_FLUSH = 176 /* 0xb0 */,
+ HAL_TX_FLUSH_REQ = 177 /* 0xb1 */,
+ HAL_TQM_WRITE_CMD_STATUS = 178 /* 0xb2 */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS = 179 /* 0xb3 */,
+ HAL_TQM_GET_MSDU_FLOW_STATS = 180 /* 0xb4 */,
+ HALXAMPLE_USER_CTLV_44 = 181 /* 0xb5 */,
+ HAL_TX_FES_STATUS_START = 182 /* 0xb6 */,
+ HAL_TX_FES_STATUS_USER_PPDU = 183 /* 0xb7 */,
+ HAL_TX_FES_STATUS_USER_RESPONSE = 184 /* 0xb8 */,
+ HAL_TX_FES_STATUS_END = 185 /* 0xb9 */,
+ HAL_RX_TRIG_INFO = 186 /* 0xba */,
+ HAL_RXPCU_TX_SETUP_CLEAR = 187 /* 0xbb */,
+ HAL_RX_FRAME_BITMAP_REQ = 188 /* 0xbc */,
+ HAL_RX_FRAME_BITMAP_ACK = 189 /* 0xbd */,
+ HAL_COEX_RX_STATUS = 190 /* 0xbe */,
+ HAL_RX_START_PARAM = 191 /* 0xbf */,
+ HAL_RX_PPDU_START = 192 /* 0xc0 */,
+ HAL_RX_PPDU_END = 193 /* 0xc1 */,
+ HAL_RX_MPDU_START = 194 /* 0xc2 */,
+ HAL_RX_MPDU_END = 195 /* 0xc3 */,
+ HAL_RX_MSDU_START = 196 /* 0xc4 */,
+ HAL_RX_MSDU_END = 197 /* 0xc5 */,
+ HAL_RX_ATTENTION = 198 /* 0xc6 */,
+ HAL_RECEIVED_RESPONSE_INFO = 199 /* 0xc7 */,
+ HAL_RX_PHY_SLEEP = 200 /* 0xc8 */,
+ HAL_RX_HEADER = 201 /* 0xc9 */,
+ HAL_RX_PEER_ENTRY = 202 /* 0xca */,
+ HAL_RX_FLUSH = 203 /* 0xcb */,
+ HAL_RX_RESPONSE_REQUIRED_INFO = 204 /* 0xcc */,
+ HAL_RX_FRAMELESS_BAR_DETAILS = 205 /* 0xcd */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS_STATUS = 206 /* 0xce */,
+ HAL_TQM_GET_MSDU_FLOW_STATS_STATUS = 207 /* 0xcf */,
+ HAL_TX_CBF_INFO = 208 /* 0xd0 */,
+ HAL_PCU_PPDU_SETUP_USER = 209 /* 0xd1 */,
+ HAL_RX_MPDU_PCU_START = 210 /* 0xd2 */,
+ HAL_RX_PM_INFO = 211 /* 0xd3 */,
+ HAL_RX_USER_PPDU_END = 212 /* 0xd4 */,
+ HAL_RX_PRE_PPDU_START = 213 /* 0xd5 */,
+ HAL_RX_PREAMBLE = 214 /* 0xd6 */,
+ HAL_TX_FES_SETUP_COMPLETE = 215 /* 0xd7 */,
+ HAL_TX_LAST_MPDU_FETCHED = 216 /* 0xd8 */,
+ HAL_TXDMA_STOP_REQUEST = 217 /* 0xd9 */,
+ HAL_RXPCU_SETUP = 218 /* 0xda */,
+ HAL_RXPCU_USER_SETUP = 219 /* 0xdb */,
+ HAL_TX_FES_STATUS_ACK_OR_BA = 220 /* 0xdc */,
+ HAL_TQM_ACKED_MPDU = 221 /* 0xdd */,
+ HAL_COEX_TX_RESP = 222 /* 0xde */,
+ HAL_COEX_TX_STATUS = 223 /* 0xdf */,
+ HAL_MACTX_COEX_PHY_CTRL = 224 /* 0xe0 */,
+ HAL_COEX_STATUS_BROADCAST = 225 /* 0xe1 */,
+ HAL_RESPONSE_START_STATUS = 226 /* 0xe2 */,
+ HAL_RESPONSEND_STATUS = 227 /* 0xe3 */,
+ HAL_CRYPTO_STATUS = 228 /* 0xe4 */,
+ HAL_RECEIVED_TRIGGER_INFO = 229 /* 0xe5 */,
+ HAL_COEX_TX_STOP_CTRL = 230 /* 0xe6 */,
+ HAL_RX_PPDU_ACK_REPORT = 231 /* 0xe7 */,
+ HAL_RX_PPDU_NO_ACK_REPORT = 232 /* 0xe8 */,
+ HAL_SCH_COEX_STATUS = 233 /* 0xe9 */,
+ HAL_SCHEDULER_COMMAND_STATUS = 234 /* 0xea */,
+ HAL_SCHEDULER_RX_PPDU_NO_RESPONSE_STATUS = 235 /* 0xeb */,
+ HAL_TX_FES_STATUS_PROT = 236 /* 0xec */,
+ HAL_TX_FES_STATUS_START_PPDU = 237 /* 0xed */,
+ HAL_TX_FES_STATUS_START_PROT = 238 /* 0xee */,
+ HAL_TXPCU_PHYTX_DEBUG32 = 239 /* 0xef */,
+ HAL_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 = 240 /* 0xf0 */,
+ HAL_TX_MPDU_COUNT_TRANSFERND = 241 /* 0xf1 */,
+ HAL_WHO_ANCHOR_OFFSET = 242 /* 0xf2 */,
+ HAL_WHO_ANCHOR_VALUE = 243 /* 0xf3 */,
+ HAL_WHO_CCE_INFO = 244 /* 0xf4 */,
+ HAL_WHO_COMMIT = 245 /* 0xf5 */,
+ HAL_WHO_COMMIT_DONE = 246 /* 0xf6 */,
+ HAL_WHO_FLUSH = 247 /* 0xf7 */,
+ HAL_WHO_L2_LLC = 248 /* 0xf8 */,
+ HAL_WHO_L2_PAYLOAD = 249 /* 0xf9 */,
+ HAL_WHO_L3_CHECKSUM = 250 /* 0xfa */,
+ HAL_WHO_L3_INFO = 251 /* 0xfb */,
+ HAL_WHO_L4_CHECKSUM = 252 /* 0xfc */,
+ HAL_WHO_L4_INFO = 253 /* 0xfd */,
+ HAL_WHO_MSDU = 254 /* 0xfe */,
+ HAL_WHO_MSDU_MISC = 255 /* 0xff */,
+ HAL_WHO_PACKET_DATA = 256 /* 0x100 */,
+ HAL_WHO_PACKET_HDR = 257 /* 0x101 */,
+ HAL_WHO_PPDU_END = 258 /* 0x102 */,
+ HAL_WHO_PPDU_START = 259 /* 0x103 */,
+ HAL_WHO_TSO = 260 /* 0x104 */,
+ HAL_WHO_WMAC_HEADER_PV0 = 261 /* 0x105 */,
+ HAL_WHO_WMAC_HEADER_PV1 = 262 /* 0x106 */,
+ HAL_WHO_WMAC_IV = 263 /* 0x107 */,
+ HAL_MPDU_INFO_END = 264 /* 0x108 */,
+ HAL_MPDU_INFO_BITMAP = 265 /* 0x109 */,
+ HAL_TX_QUEUE_EXTENSION = 266 /* 0x10a */,
+ HAL_SCHEDULER_SELFGEN_RESPONSE_STATUS = 267 /* 0x10b */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT_STATUS = 268 /* 0x10c */,
+ HAL_TQM_ACKED_MPDU_STATUS = 269 /* 0x10d */,
+ HAL_TQM_ADD_MSDU_STATUS = 270 /* 0x10e */,
+ HAL_TQM_LIST_GEN_DONE = 271 /* 0x10f */,
+ HAL_WHO_TERMINATE = 272 /* 0x110 */,
+ HAL_TX_LAST_MPDU_END = 273 /* 0x111 */,
+ HAL_TX_CV_DATA = 274 /* 0x112 */,
+ HAL_PPDU_TX_END = 275 /* 0x113 */,
+ HAL_PROT_TX_END = 276 /* 0x114 */,
+ HAL_MPDU_INFO_GLOBAL_END = 277 /* 0x115 */,
+ HAL_TQM_SCH_INSTR_GLOBAL_END = 278 /* 0x116 */,
+ HAL_RX_PPDU_END_USER_STATS = 279 /* 0x117 */,
+ HAL_RX_PPDU_END_USER_STATS_EXT = 280 /* 0x118 */,
+ HAL_REO_GET_QUEUE_STATS = 281 /* 0x119 */,
+ HAL_REO_FLUSH_QUEUE = 282 /* 0x11a */,
+ HAL_REO_FLUSH_CACHE = 283 /* 0x11b */,
+ HAL_REO_UNBLOCK_CACHE = 284 /* 0x11c */,
+ HAL_REO_GET_QUEUE_STATS_STATUS = 285 /* 0x11d */,
+ HAL_REO_FLUSH_QUEUE_STATUS = 286 /* 0x11e */,
+ HAL_REO_FLUSH_CACHE_STATUS = 287 /* 0x11f */,
+ HAL_REO_UNBLOCK_CACHE_STATUS = 288 /* 0x120 */,
+ HAL_TQM_FLUSH_CACHE = 289 /* 0x121 */,
+ HAL_TQM_UNBLOCK_CACHE = 290 /* 0x122 */,
+ HAL_TQM_FLUSH_CACHE_STATUS = 291 /* 0x123 */,
+ HAL_TQM_UNBLOCK_CACHE_STATUS = 292 /* 0x124 */,
+ HAL_RX_PPDU_END_STATUS_DONE = 293 /* 0x125 */,
+ HAL_RX_STATUS_BUFFER_DONE = 294 /* 0x126 */,
+ HAL_TX_DATA_SYNC = 297 /* 0x129 */,
+ HAL_PHYRX_CBF_READ_REQUEST_ACK = 298 /* 0x12a */,
+ HAL_TQM_GET_MPDU_HEAD_INFO = 299 /* 0x12b */,
+ HAL_TQM_SYNC_CMD = 300 /* 0x12c */,
+ HAL_TQM_GET_MPDU_HEAD_INFO_STATUS = 301 /* 0x12d */,
+ HAL_TQM_SYNC_CMD_STATUS = 302 /* 0x12e */,
+ HAL_TQM_THRESHOLD_DROP_NOTIFICATION_STATUS = 303 /* 0x12f */,
+ HAL_TQM_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 304 /* 0x130 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST = 305 /* 0x131 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST_STATUS = 306 /* 0x132 */,
+ HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 307 /* 0x133 */,
+ HAL_SCHEDULER_RX_SIFS_RESPONSE_TRIGGER_STATUS = 308 /* 0x134 */,
+ HALXAMPLE_USER_TLV_32_NAME = 309 /* 0x135 */,
+ HAL_RX_PPDU_START_USER_INFO = 310 /* 0x136 */,
+ HAL_RX_RING_MASK = 311 /* 0x137 */,
+ HAL_COEX_MAC_NAP = 312 /* 0x138 */,
+ HAL_RXPCU_PPDU_END_INFO = 313 /* 0x139 */,
+ HAL_WHO_MESH_CONTROL = 314 /* 0x13a */,
+ HAL_PDG_SW_MODE_BW_START = 315 /* 0x13b */,
+ HAL_PDG_SW_MODE_BW_END = 316 /* 0x13c */,
+ HAL_PDG_WAIT_FOR_MAC_REQUEST = 317 /* 0x13d */,
+ HAL_PDG_WAIT_FOR_PHY_REQUEST = 318 /* 0x13e */,
+ HAL_SCHEDULER_END = 319 /* 0x13f */,
+ HAL_RX_PPDU_START_DROPPED = 320 /* 0x140 */,
+ HAL_RX_PPDU_END_DROPPED = 321 /* 0x141 */,
+ HAL_RX_PPDU_END_STATUS_DONE_DROPPED = 322 /* 0x142 */,
+ HAL_RX_MPDU_START_DROPPED = 323 /* 0x143 */,
+ HAL_RX_MSDU_START_DROPPED = 324 /* 0x144 */,
+ HAL_RX_MSDU_END_DROPPED = 325 /* 0x145 */,
+ HAL_RX_MPDU_END_DROPPED = 326 /* 0x146 */,
+ HAL_RX_ATTENTION_DROPPED = 327 /* 0x147 */,
+ HAL_TXPCU_USER_SETUP = 328 /* 0x148 */,
+ HAL_RXPCU_USER_SETUP_EXT = 329 /* 0x149 */,
+ HAL_CMD_PART_0_END = 330 /* 0x14a */,
+ HAL_MACTX_SYNTH_ON = 331 /* 0x14b */,
+ HAL_SCH_CRITICAL_TLV_REFERENCE = 332 /* 0x14c */,
+ HAL_TQM_MPDU_GLOBAL_START = 333 /* 0x14d */,
+ HALXAMPLE_TLV_32 = 334 /* 0x14e */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW = 335 /* 0x14f */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD = 336 /* 0x150 */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW_STATUS = 337 /* 0x151 */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD_STATUS = 338 /* 0x152 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE = 339 /* 0x153 */,
+ HAL_TQM_MPDU_QUEUEMPTY_STATUS = 340 /* 0x154 */,
+ HAL_TQM_2_SCH_MPDU_AVAILABLE = 341 /* 0x155 */,
+ HAL_PDG_TRIG_RESPONSE = 342 /* 0x156 */,
+ HAL_TRIGGER_RESPONSE_TX_DONE = 343 /* 0x157 */,
+ HAL_ABORT_FROM_PHYRX_DETAILS = 344 /* 0x158 */,
+ HAL_SCH_TQM_CMD_WRAPPER = 345 /* 0x159 */,
+ HAL_MPDUS_AVAILABLE = 346 /* 0x15a */,
+ HAL_RECEIVED_RESPONSE_INFO_PART2 = 347 /* 0x15b */,
+ HAL_PHYRX_TX_START_TIMING = 348 /* 0x15c */,
+ HAL_TXPCU_PREAMBLE_DONE = 349 /* 0x15d */,
+ HAL_NDP_PREAMBLE_DONE = 350 /* 0x15e */,
+ HAL_SCH_TQM_CMD_WRAPPER_RBO_DROP = 351 /* 0x15f */,
+ HAL_SCH_TQM_CMD_WRAPPER_CONT_DROP = 352 /* 0x160 */,
+ HAL_MACTX_CLEAR_PREV_TX_INFO = 353 /* 0x161 */,
+ HAL_TX_PUNCTURE_SETUP = 354 /* 0x162 */,
+ HAL_R2R_STATUS_END = 355 /* 0x163 */,
+ HAL_MACTX_PREFETCH_CV_COMMON = 356 /* 0x164 */,
+ HAL_END_OF_FLUSH_MARKER = 357 /* 0x165 */,
+ HAL_MACTX_MU_UPLINK_COMMON_PUNC = 358 /* 0x166 */,
+ HAL_MACTX_MU_UPLINK_USER_SETUP_PUNC = 359 /* 0x167 */,
+ HAL_RECEIVED_RESPONSE_USER_7_0 = 360 /* 0x168 */,
+ HAL_RECEIVED_RESPONSE_USER_15_8 = 361 /* 0x169 */,
+ HAL_RECEIVED_RESPONSE_USER_23_16 = 362 /* 0x16a */,
+ HAL_RECEIVED_RESPONSE_USER_31_24 = 363 /* 0x16b */,
+ HAL_RECEIVED_RESPONSE_USER_36_32 = 364 /* 0x16c */,
+ HAL_TX_LOOPBACK_SETUP = 365 /* 0x16d */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO_RU_DETAILS = 366 /* 0x16e */,
+ HAL_SCH_WAIT_INSTR_TX_PATH = 367 /* 0x16f */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO_TX2TX = 368 /* 0x170 */,
+ HAL_MACTX_OTHER_TRANSMIT_INFOMUPHY_SETUP = 369 /* 0x171 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFOVM_DETAILS = 370 /* 0x172 */,
+ HAL_TX_WUR_DATA = 371 /* 0x173 */,
+ HAL_RX_PPDU_END_START = 372 /* 0x174 */,
+ HAL_RX_PPDU_END_MIDDLE = 373 /* 0x175 */,
+ HAL_RX_PPDU_END_LAST = 374 /* 0x176 */,
+ HAL_MACTX_BACKOFF_BASED_TRANSMISSION = 375 /* 0x177 */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO_DL_OFDMA_TX = 376 /* 0x178 */,
+ HAL_SRP_INFO = 377 /* 0x179 */,
+ HAL_OBSS_SR_INFO = 378 /* 0x17a */,
+ HAL_SCHEDULER_SW_MSG_STATUS = 379 /* 0x17b */,
+ HAL_HWSCH_RXPCU_MAC_INFO_ANNOUNCEMENT = 380 /* 0x17c */,
+ HAL_RXPCU_SETUP_COMPLETE = 381 /* 0x17d */,
+ HAL_SNOOP_PPDU_START = 382 /* 0x17e */,
+ HAL_SNOOP_MPDU_USR_DBG_INFO = 383 /* 0x17f */,
+ HAL_SNOOP_MSDU_USR_DBG_INFO = 384 /* 0x180 */,
+ HAL_SNOOP_MSDU_USR_DATA = 385 /* 0x181 */,
+ HAL_SNOOP_MPDU_USR_STAT_INFO = 386 /* 0x182 */,
+ HAL_SNOOP_PPDU_END = 387 /* 0x183 */,
+ HAL_SNOOP_SPARE = 388 /* 0x184 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO_MU_RSSI_COMMON = 390 /* 0x186 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO_MU_RSSI_USER = 391 /* 0x187 */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO_SCH_DETAILS = 392 /* 0x188 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO_108PVM_DETAILS = 393 /* 0x189 */,
+ HAL_SCH_TLV_WRAPPER = 394 /* 0x18a */,
+ HAL_SCHEDULER_STATUS_WRAPPER = 395 /* 0x18b */,
+ HAL_MPDU_INFO_6X = 396 /* 0x18c */,
+ HAL_MACTX_11AZ_USER_DESC_PER_USER = 397 /* 0x18d */,
+ HAL_MACTX_U_SIGHT_SU_MU = 398 /* 0x18e */,
+ HAL_MACTX_U_SIGHT_TB = 399 /* 0x18f */,
+ HAL_PHYRX_U_SIGHT_SU_MU = 403 /* 0x193 */,
+ HAL_PHYRX_U_SIGHT_TB = 404 /* 0x194 */,
+ HAL_MACRX_LMR_READ_REQUEST = 408 /* 0x198 */,
+ HAL_MACRX_LMR_DATA_REQUEST = 409 /* 0x199 */,
+ HAL_PHYRX_LMR_TRANSFER_DONE = 410 /* 0x19a */,
+ HAL_PHYRX_LMR_TRANSFER_ABORT = 411 /* 0x19b */,
+ HAL_PHYRX_LMR_READ_REQUEST_ACK = 412 /* 0x19c */,
+ HAL_MACRX_SECURE_LTF_SEQ_PTR = 413 /* 0x19d */,
+ HAL_PHYRX_USER_INFO_MU_UL = 414 /* 0x19e */,
+ HAL_MPDU_QUEUE_OVERVIEW = 415 /* 0x19f */,
+ HAL_SCHEDULER_NAV_INFO = 416 /* 0x1a0 */,
+ HAL_LMR_PEER_ENTRY = 418 /* 0x1a2 */,
+ HAL_LMR_MPDU_START = 419 /* 0x1a3 */,
+ HAL_LMR_DATA = 420 /* 0x1a4 */,
+ HAL_LMR_MPDU_END = 421 /* 0x1a5 */,
+ HAL_REO_GET_QUEUE_1K_STATS_STATUS = 422 /* 0x1a6 */,
+ HAL_RX_FRAME_1K_BITMAP_ACK = 423 /* 0x1a7 */,
+ HAL_TX_FES_STATUS_1K_BA = 424 /* 0x1a8 */,
+ HAL_TQM_ACKED_1K_MPDU = 425 /* 0x1a9 */,
+ HAL_MACRX_INBSS_OBSS_IND = 426 /* 0x1aa */,
+ HAL_PHYRX_LOCATION = 427 /* 0x1ab */,
+ HAL_MLO_TX_NOTIFICATION_SU = 428 /* 0x1ac */,
+ HAL_MLO_TX_NOTIFICATION_MU = 429 /* 0x1ad */,
+ HAL_MLO_TX_REQ_SU = 430 /* 0x1ae */,
+ HAL_MLO_TX_REQ_MU = 431 /* 0x1af */,
+ HAL_MLO_TX_RESP = 432 /* 0x1b0 */,
+ HAL_MLO_RX_NOTIFICATION = 433 /* 0x1b1 */,
+ HAL_MLO_BKOFF_TRUNC_REQ = 434 /* 0x1b2 */,
+ HAL_MLO_TBTT_NOTIFICATION = 435 /* 0x1b3 */,
+ HAL_MLO_MESSAGE = 436 /* 0x1b4 */,
+ HAL_MLO_TS_SYNC_MSG = 437 /* 0x1b5 */,
+ HAL_MLO_FES_SETUP = 438 /* 0x1b6 */,
+ HAL_MLO_PDG_FES_SETUP_SU = 439 /* 0x1b7 */,
+ HAL_MLO_PDG_FES_SETUP_MU = 440 /* 0x1b8 */,
+ HAL_MPDU_INFO_1K_BITMAP = 441 /* 0x1b9 */,
+ HAL_MON_BUF_ADDR = 442 /* 0x1ba */,
+ HAL_TX_FRAG_STATE = 443 /* 0x1bb */,
+ HAL_MACTXHT_SIG_USR_OFDMA = 446 /* 0x1be */,
+ HAL_PHYRXHT_SIG_CMN_PUNC = 448 /* 0x1c0 */,
+ HAL_PHYRXHT_SIG_CMN_OFDMA = 450 /* 0x1c2 */,
+ HAL_PHYRXHT_SIG_USR_OFDMA = 454 /* 0x1c6 */,
+ HAL_PHYRX_PKT_END_PART1 = 456 /* 0x1c8 */,
+ HAL_MACTXXPECT_NDP_RECEPTION = 457 /* 0x1c9 */,
+ HAL_MACTX_SECURE_LTF_SEQ_PTR = 458 /* 0x1ca */,
+ HAL_MLO_PDG_BKOFF_TRUNC_NOTIFY = 460 /* 0x1cc */,
+ HAL_PHYRX_11AZ_INTEGRITY_DATA = 461 /* 0x1cd */,
+ HAL_PHYTX_LOCATION = 462 /* 0x1ce */,
+ HAL_PHYTX_11AZ_INTEGRITY_DATA = 463 /* 0x1cf */,
+ HAL_MACTXHT_SIG_USR_SU = 466 /* 0x1d2 */,
+ HAL_MACTXHT_SIG_USR_MU_MIMO = 467 /* 0x1d3 */,
+ HAL_PHYRXHT_SIG_USR_SU = 468 /* 0x1d4 */,
+ HAL_PHYRXHT_SIG_USR_MU_MIMO = 469 /* 0x1d5 */,
+ HAL_PHYRX_GENERIC_U_SIG = 470 /* 0x1d6 */,
+ HAL_PHYRX_GENERICHT_SIG = 471 /* 0x1d7 */,
+ HAL_OVERWRITE_RESP_START = 472 /* 0x1d8 */,
+ HAL_OVERWRITE_RESP_PREAMBLE_INFO = 473 /* 0x1d9 */,
+ HAL_OVERWRITE_RESP_FRAME_INFO = 474 /* 0x1da */,
+ HAL_OVERWRITE_RESP_END = 475 /* 0x1db */,
+ HAL_RXPCUARLY_RX_INDICATION = 476 /* 0x1dc */,
+ HAL_MON_DROP = 477 /* 0x1dd */,
+ HAL_MACRX_MU_UPLINK_COMMON_SNIFF = 478 /* 0x1de */,
+ HAL_MACRX_MU_UPLINK_USER_SETUP_SNIFF = 479 /* 0x1df */,
+ HAL_MACRX_MU_UPLINK_USER_SEL_SNIFF = 480 /* 0x1e0 */,
+ HAL_MACRX_MU_UPLINK_FCS_STATUS_SNIFF = 481 /* 0x1e1 */,
+ HAL_MACTX_PREFETCH_CV_DMA = 482 /* 0x1e2 */,
+ HAL_MACTX_PREFETCH_CV_PER_USER = 483 /* 0x1e3 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO_ALL_SIGB_DETAILS = 484 /* 0x1e4 */,
+ HAL_MACTX_BF_PARAMS_UPDATE_COMMON = 485 /* 0x1e5 */,
+ HAL_MACTX_BF_PARAMS_UPDATE_PER_USER = 486 /* 0x1e6 */,
+ HAL_RANGING_USER_DETAILS = 487 /* 0x1e7 */,
+ HAL_PHYTX_CV_CORR_STATUS = 488 /* 0x1e8 */,
+ HAL_PHYTX_CV_CORR_COMMON = 489 /* 0x1e9 */,
+ HAL_PHYTX_CV_CORR_USER = 490 /* 0x1ea */,
+ HAL_MACTX_CV_CORR_COMMON = 491 /* 0x1eb */,
+ HAL_MACTX_CV_CORR_MAC_INFO_GROUP = 492 /* 0x1ec */,
+ HAL_BW_PUNCTUREVAL_WRAPPER = 493 /* 0x1ed */,
+ HAL_MACTX_RX_NOTIFICATION_FOR_PHY = 494 /* 0x1ee */,
+ HAL_MACTX_TX_NOTIFICATION_FOR_PHY = 495 /* 0x1ef */,
+ HAL_MACTX_MU_UPLINK_COMMON_PER_BW = 496 /* 0x1f0 */,
+ HAL_MACTX_MU_UPLINK_USER_SETUP_PER_BW = 497 /* 0x1f1 */,
+ HAL_RX_PPDU_END_USER_STATS_EXT2 = 498 /* 0x1f2 */,
+ HAL_FW2SW_MON = 499 /* 0x1f3 */,
+ HAL_WSI_DIRECT_MESSAGE = 500 /* 0x1f4 */,
+ HAL_MACTXMLSR_PRE_SWITCH = 501 /* 0x1f5 */,
+ HAL_MACTXMLSR_SWITCH = 502 /* 0x1f6 */,
+ HAL_MACTXMLSR_SWITCH_BACK = 503 /* 0x1f7 */,
+ HAL_PHYTXMLSR_SWITCH_ACK = 504 /* 0x1f8 */,
+ HAL_PHYTXMLSR_SWITCH_BACK_ACK = 505 /* 0x1f9 */,
+ HAL_SPARE_REUSE_TAG_0 = 506 /* 0x1fa */,
+ HAL_SPARE_REUSE_TAG_1 = 507 /* 0x1fb */,
+ HAL_SPARE_REUSE_TAG_2 = 508 /* 0x1fc */,
+ HAL_SPARE_REUSE_TAG_3 = 509 /* 0x1fd */,
+ /* FIXME: Assign correct value for HAL_TCL_DATA_CMD */
+ HAL_TCL_DATA_CMD = 510,
+ HAL_TLV_BASE = 511 /* 0x1ff */,
+};
+
+#define HAL_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_TLV_HDR_LEN GENMASK(25, 10)
+#define HAL_TLV_USR_ID GENMASK(31, 26)
+
+#define HAL_TLV_ALIGN 4
+
+struct hal_tlv_hdr {
+ __le32 tl;
+ u8 value[];
+} __packed;
+
+#define HAL_TLV_64_HDR_TAG GENMASK(9, 1)
+#define HAL_TLV_64_HDR_LEN GENMASK(21, 10)
+
+struct hal_tlv_64_hdr {
+ u64 tl;
+ u8 value[];
+} __packed;
+
+#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
+#define RX_MPDU_DESC_INFO0_FRAG_FLAG BIT(8)
+#define RX_MPDU_DESC_INFO0_MPDU_RETRY BIT(9)
+#define RX_MPDU_DESC_INFO0_AMPDU_FLAG BIT(10)
+#define RX_MPDU_DESC_INFO0_BAR_FRAME BIT(11)
+#define RX_MPDU_DESC_INFO0_VALID_PN BIT(12)
+#define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(13)
+#define RX_MPDU_DESC_INFO0_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_DESC_INFO0_SRC_INFO GENMASK(26, 15)
+#define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID BIT(27)
+#define RX_MPDU_DESC_INFO0_TID GENMASK(31, 28)
+
+/* TODO revisit after meta data is concluded */
+#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0)
+
+struct rx_mpdu_desc {
+ __le32 info0; /* %RX_MPDU_DESC_INFO */
+ __le32 peer_meta_data;
+} __packed;
+
+/* rx_mpdu_desc
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * msdu_count
+ * The number of MSDUs within the MPDU
+ *
+ * fragment_flag
+ * When set, this MPDU is a fragment and REO should forward this
+ * fragment MPDU to the REO destination ring without any reorder
+ * checks, pn checks or bitmap update. This implies that REO is
+ * forwarding the pointer to the MSDU link descriptor.
+ *
+ * mpdu_retry_bit
+ * The retry bit setting from the MPDU header of the received frame
+ *
+ * ampdu_flag
+ * Indicates the MPDU was received as part of an A-MPDU.
+ *
+ * bar_frame
+ * Indicates the received frame is a BAR frame. After processing,
+ * this frame shall be pushed to SW or deleted.
+ *
+ * valid_pn
+ * When not set, REO will not perform a PN sequence number check.
+ *
+ * raw_mpdu
+ * Field only valid when first_msdu_in_mpdu_flag is set. Indicates
+ * the contents in the MSDU buffer contains a 'RAW' MPDU. This
+ * 'RAW' MPDU might be spread out over multiple MSDU buffers.
+ *
+ * more_fragment_flag
+ * The More Fragment bit setting from the MPDU header of the
+ * received frame
+ *
+ * src_info
+ * Source (Virtual) device/interface info associated with this peer.
+ * This field gets passed on by REO to PPE in the EDMA descriptor.
+ *
+ * mpdu_qos_control_valid
+ * When set, the MPDU has a QoS control field
+ *
+ * tid
+ * Field only valid when mpdu_qos_control_valid is set
+ */
+
+enum hal_rx_msdu_desc_reo_dest_ind {
+ HAL_RX_MSDU_DESC_REO_DEST_IND_TCL,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW1,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW2,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW3,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW4,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_RELEASE,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_FW,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW5,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW6,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW7,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW8,
+};
+
+#define RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU BIT(0)
+#define RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU BIT(1)
+#define RX_MSDU_DESC_INFO0_MSDU_CONTINUATION BIT(2)
+#define RX_MSDU_DESC_INFO0_MSDU_LENGTH GENMASK(16, 3)
+#define RX_MSDU_DESC_INFO0_MSDU_DROP BIT(17)
+#define RX_MSDU_DESC_INFO0_VALID_SA BIT(18)
+#define RX_MSDU_DESC_INFO0_VALID_DA BIT(19)
+#define RX_MSDU_DESC_INFO0_DA_MCBC BIT(20)
+#define RX_MSDU_DESC_INFO0_L3_HDR_PAD_MSB BIT(21)
+#define RX_MSDU_DESC_INFO0_TCP_UDP_CHKSUM_FAIL BIT(22)
+#define RX_MSDU_DESC_INFO0_IP_CHKSUM_FAIL BIT(23)
+#define RX_MSDU_DESC_INFO0_FROM_DS BIT(24)
+#define RX_MSDU_DESC_INFO0_TO_DS BIT(25)
+#define RX_MSDU_DESC_INFO0_INTRA_BSS BIT(26)
+#define RX_MSDU_DESC_INFO0_DST_CHIP_ID GENMASK(28, 27)
+#define RX_MSDU_DESC_INFO0_DECAP_FORMAT GENMASK(30, 29)
+
+#define HAL_RX_MSDU_PKT_LENGTH_GET(val) \
+ (u32_get_bits((val), RX_MSDU_DESC_INFO0_MSDU_LENGTH))
+
+struct rx_msdu_desc {
+ __le32 info0;
+} __packed;
+
+/* rx_msdu_desc
+ *
+ * first_msdu_in_mpdu
+ * Indicates first msdu in mpdu.
+ *
+ * last_msdu_in_mpdu
+ * Indicates last msdu in mpdu. This flag can be true only when
+ * 'Msdu_continuation' set to 0. This implies that when an msdu
+ * is spread out over multiple buffers and thus msdu_continuation
+ * is set, only for the very last buffer of the msdu, can the
+ * 'last_msdu_in_mpdu' be set.
+ *
+ * When both first_msdu_in_mpdu and last_msdu_in_mpdu are set,
+ * the MPDU that this MSDU belongs to only contains a single MSDU.
+ *
+ * msdu_continuation
+ * When set, this MSDU buffer was not able to hold the entire MSDU.
+ * The next buffer will therefor contain additional information
+ * related to this MSDU.
+ *
+ * msdu_length
+ * Field is only valid in combination with the 'first_msdu_in_mpdu'
+ * being set. Full MSDU length in bytes after decapsulation. This
+ * field is still valid for MPDU frames without A-MSDU. It still
+ * represents MSDU length after decapsulation Or in case of RAW
+ * MPDUs, it indicates the length of the entire MPDU (without FCS
+ * field).
+ *
+ * msdu_drop
+ * Indicates that REO shall drop this MSDU and not forward it to
+ * any other ring.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for this MSDU.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for this MSDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates the DA address
+ * is a Multicast or Broadcast address for this MSDU.
+ *
+ * l3_header_padding_msb
+ * Passed on from 'RX_MSDU_END' TLV (only the MSB is reported as
+ * the LSB is always zero). Number of bytes padded to make sure
+ * that the L3 header will always start of a Dword boundary
+ *
+ * tcp_udp_checksum_fail
+ * Passed on from 'RX_ATTENTION' TLV
+ * Indicates that the computed checksum did not match the checksum
+ * in the TCP/UDP header.
+ *
+ * ip_checksum_fail
+ * Passed on from 'RX_ATTENTION' TLV
+ * Indicates that the computed checksum did not match the checksum
+ * in the IP header.
+ *
+ * from_DS
+ * Set if the 'from DS' bit is set in the frame control.
+ *
+ * to_DS
+ * Set if the 'to DS' bit is set in the frame control.
+ *
+ * intra_bss
+ * This packet needs intra-BSS routing by SW as the 'vdev_id'
+ * for the destination is the same as the 'vdev_id' that this
+ * MSDU was got in.
+ *
+ * dest_chip_id
+ * If intra_bss is set, copied by RXOLE/RXDMA from 'ADDR_SEARCH_ENTRY'
+ * to support intra-BSS routing with multi-chip multi-link operation.
+ * This indicates into which chip's TCL the packet should be queued.
+ *
+ * decap_format
+ * Indicates the format after decapsulation:
+ */
+
+#define RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND GENMASK(4, 0)
+#define RX_MSDU_EXT_DESC_INFO0_SERVICE_CODE GENMASK(13, 5)
+#define RX_MSDU_EXT_DESC_INFO0_PRIORITY_VALID BIT(14)
+#define RX_MSDU_EXT_DESC_INFO0_DATA_OFFSET GENMASK(26, 15)
+#define RX_MSDU_EXT_DESC_INFO0_SRC_LINK_ID GENMASK(29, 27)
+
+struct rx_msdu_ext_desc {
+ __le32 info0;
+} __packed;
+
+/* rx_msdu_ext_desc
+ *
+ * reo_destination_indication
+ * The ID of the REO exit ring where the MSDU frame shall push
+ * after (MPDU level) reordering has finished.
+ *
+ * service_code
+ * Opaque service code between PPE and Wi-Fi
+ *
+ * priority_valid
+ *
+ * data_offset
+ * The offset to Rx packet data within the buffer (including
+ * Rx DMA offset programming and L3 header padding inserted
+ * by Rx OLE).
+ *
+ * src_link_id
+ * Set to the link ID of the PMAC that received the frame
+ */
+
+enum hal_reo_dest_ring_buffer_type {
+ HAL_REO_DEST_RING_BUFFER_TYPE_MSDU,
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC,
+};
+
+enum hal_reo_dest_ring_push_reason {
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED,
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION,
+};
+
+enum hal_reo_dest_ring_error_code {
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID,
+ HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA,
+ HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED,
+ HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED,
+ HAL_REO_DEST_RING_ERROR_CODE_MAX,
+};
+
+#define HAL_REO_DEST_RING_INFO0_BUFFER_TYPE BIT(0)
+#define HAL_REO_DEST_RING_INFO0_PUSH_REASON GENMASK(2, 1)
+#define HAL_REO_DEST_RING_INFO0_ERROR_CODE GENMASK(7, 3)
+#define HAL_REO_DEST_RING_INFO0_MSDU_DATA_SIZE GENMASK(11, 8)
+#define HAL_REO_DEST_RING_INFO0_SW_EXCEPTION BIT(12)
+#define HAL_REO_DEST_RING_INFO0_SRC_LINK_ID GENMASK(15, 13)
+#define HAL_REO_DEST_RING_INFO0_SIGNATURE GENMASK(19, 16)
+#define HAL_REO_DEST_RING_INFO0_RING_ID GENMASK(27, 20)
+#define HAL_REO_DEST_RING_INFO0_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_reo_dest_ring {
+ struct ath12k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ __le32 buf_va_lo;
+ __le32 buf_va_hi;
+ __le32 info0; /* %HAL_REO_DEST_RING_INFO0_ */
+} __packed;
+
+/* hal_reo_dest_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * rx_msdu_info
+ * General information related to the MSDU that is passed
+ * on from RXDMA all the way to the REO destination ring.
+ *
+ * buf_va_lo
+ * Field only valid if Reo_dest_buffer_type is set to MSDU_buf_address
+ * Lower 32 bits of the 64-bit virtual address corresponding
+ * to Buf_or_link_desc_addr_info
+ *
+ * buf_va_hi
+ * Address (upper 32 bits) of the REO queue descriptor.
+ * Upper 32 bits of the 64-bit virtual address corresponding
+ * to Buf_or_link_desc_addr_info
+ *
+ * buffer_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * captured_msdu_data_size
+ * The number of following REO_DESTINATION STRUCTs that have
+ * been replaced with msdu_data extracted from the msdu_buffer
+ * and copied into the ring for easy FW/SW access.
+ *
+ * sw_exception
+ * This field has the same setting as the SW_exception field
+ * in the corresponding REO_entrance_ring descriptor.
+ * When set, the REO entrance descriptor is generated by FW,
+ * and the MPDU was processed in the following way:
+ * - NO re-order function is needed.
+ * - MPDU delinking is determined by the setting of Entrance
+ * ring field: SW_excection_mpdu_delink
+ * - Destination ring selection is based on the setting of
+ * the Entrance ring field SW_exception_destination _ring_valid
+ *
+ * src_link_id
+ * Set to the link ID of the PMAC that received the frame
+ *
+ * signature
+ * Set to value 0x8 when msdu capture mode is enabled for this ring
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+#define HAL_REO_TO_PPE_RING_INFO0_DATA_LENGTH GENMASK(15, 0)
+#define HAL_REO_TO_PPE_RING_INFO0_DATA_OFFSET GENMASK(23, 16)
+#define HAL_REO_TO_PPE_RING_INFO0_POOL_ID GENMASK(28, 24)
+#define HAL_REO_TO_PPE_RING_INFO0_PREHEADER BIT(29)
+#define HAL_REO_TO_PPE_RING_INFO0_TSO_EN BIT(30)
+#define HAL_REO_TO_PPE_RING_INFO0_MORE BIT(31)
+
+struct hal_reo_to_ppe_ring {
+ __le32 buffer_addr;
+ __le32 info0; /* %HAL_REO_TO_PPE_RING_INFO0_ */
+} __packed;
+
+/* hal_reo_to_ppe_ring
+ *
+ * Producer: REO
+ * Consumer: PPE
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * data_length
+ * Length of valid data in bytes
+ *
+ * data_offset
+ * Offset to the data from buffer pointer. Can be used to
+ * strip header in the data for tunnel termination etc.
+ *
+ * pool_id
+ * REO has global configuration register for this field.
+ * It may have several free buffer pools, each
+ * RX-Descriptor ring can fetch free buffer from specific
+ * buffer pool; pool id will indicate which pool the buffer
+ * will be released to; POOL_ID Zero returned to SW
+ *
+ * preheader
+ * Disabled: 0 (Default)
+ * Enabled: 1
+ *
+ * tso_en
+ * Disabled: 0 (Default)
+ * Enabled: 1
+ *
+ * more
+ * More Segments followed
+ */
+
+enum hal_reo_entr_rxdma_push_reason {
+ HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ERR_DETECTED,
+ HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_ROUTING_INSTRUCTION,
+ HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_RX_FLUSH,
+};
+
+enum hal_reo_entr_rxdma_ecode {
+ HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_FRAG_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
+};
+
+enum hal_rx_reo_dest_ring {
+ HAL_RX_REO_DEST_RING_TCL,
+ HAL_RX_REO_DEST_RING_SW1,
+ HAL_RX_REO_DEST_RING_SW2,
+ HAL_RX_REO_DEST_RING_SW3,
+ HAL_RX_REO_DEST_RING_SW4,
+ HAL_RX_REO_DEST_RING_RELEASE,
+ HAL_RX_REO_DEST_RING_FW,
+ HAL_RX_REO_DEST_RING_SW5,
+ HAL_RX_REO_DEST_RING_SW6,
+ HAL_RX_REO_DEST_RING_SW7,
+ HAL_RX_REO_DEST_RING_SW8,
+};
+
+#define HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_ENTR_RING_INFO0_MPDU_BYTE_COUNT GENMASK(21, 8)
+#define HAL_REO_ENTR_RING_INFO0_DEST_IND GENMASK(26, 22)
+#define HAL_REO_ENTR_RING_INFO0_FRAMELESS_BAR BIT(27)
+
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE GENMASK(6, 2)
+#define HAL_REO_ENTR_RING_INFO1_MPDU_FRAG_NUM GENMASK(10, 7)
+#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPTION BIT(11)
+#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPT_MPDU_DELINK BIT(12)
+#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPTION_RING_VLD BIT(13)
+#define HAL_REO_ENTR_RING_INFO1_SW_EXCEPTION_RING GENMASK(18, 14)
+#define HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM GENMASK(30, 19)
+
+#define HAL_REO_ENTR_RING_INFO2_PHY_PPDU_ID GENMASK(15, 0)
+#define HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID GENMASK(18, 16)
+#define HAL_REO_ENTR_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_REO_ENTR_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_reo_entrance_ring {
+ struct ath12k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ __le32 queue_addr_lo;
+ __le32 info0; /* %HAL_REO_ENTR_RING_INFO0_ */
+ __le32 info1; /* %HAL_REO_ENTR_RING_INFO1_ */
+ __le32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+
+} __packed;
+
+/* hal_reo_entrance_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * mpdu_byte_count
+ * An approximation of the number of bytes received in this MPDU.
+ * Used to keeps stats on the amount of data flowing
+ * through a queue.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * frameless_bar
+ * Indicates that this REO entrance ring struct contains BAR info
+ * from a multi TID BAR frame. The original multi TID BAR frame
+ * itself contained all the REO info for the first TID, but all
+ * the subsequent TID info and their linkage to the REO descriptors
+ * is passed down as 'frameless' BAR info.
+ *
+ * The only fields valid in this descriptor when this bit is set
+ * are queue_addr_lo, queue_addr_hi, mpdu_sequence_number,
+ * bar_frame and peer_meta_data.
+ *
+ * rxdma_push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_ENTR_RING_RXDMA_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * mpdu_fragment_number
+ * Field only valid when Reo_level_mpdu_frame_info.
+ * Rx_mpdu_desc_info_details.Fragment_flag is set.
+ *
+ * sw_exception
+ * When not set, REO is performing all its default MPDU processing
+ * operations,
+ * When set, this REO entrance descriptor is generated by FW, and
+ * should be processed as an exception. This implies:
+ * NO re-order function is needed.
+ * MPDU delinking is determined by the setting of field
+ * SW_excection_mpdu_delink
+ *
+ * sw_exception_mpdu_delink
+ * Field only valid when SW_exception is set.
+ * 1'b0: REO should NOT delink the MPDU, and thus pass this
+ * MPDU on to the destination ring as is. This implies that
+ * in the REO_DESTINATION_RING struct field
+ * Buf_or_link_desc_addr_info should point to an MSDU link
+ * descriptor
+ * 1'b1: REO should perform the normal MPDU delink into MSDU operations.
+ *
+ * sw_exception_dest_ring
+ * Field only valid when fields SW_exception and SW
+ * exception_destination_ring_valid are set. values are defined
+ * in %HAL_RX_REO_DEST_RING_.
+ *
+ * mpdu_seq_number
+ * The field can have two different meanings based on the setting
+ * of sub-field Reo level mpdu frame info.
+ * Rx_mpdu_desc_info_details. BAR_frame
+ * 'BAR_frame' is NOT set:
+ * The MPDU sequence number of the received frame.
+ * 'BAR_frame' is set.
+ * The MPDU Start sequence number from the BAR frame
+ *
+ * phy_ppdu_id
+ * A PPDU counter value that PHY increments for every PPDU received
+ *
+ * src_link_id
+ * Set to the link ID of the PMAC that received the frame
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+#define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER GENMASK(15, 0)
+#define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED BIT(16)
+
+struct hal_reo_cmd_hdr {
+ __le32 info0;
+} __packed;
+
+#define HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS BIT(8)
+
+struct hal_reo_get_queue_stats {
+ struct hal_reo_cmd_hdr cmd;
+ __le32 queue_addr_lo;
+ __le32 info0;
+ __le32 rsvd0[6];
+ __le32 tlv64_pad;
+} __packed;
+
+/* hal_reo_get_queue_stats
+ * Producer: SW
+ * Consumer: REO
+ *
+ * cmd
+ * Details for command execution tracking purposes.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * clear_stats
+ * Clear stats settings. When set, Clear the stats after
+ * generating the status.
+ *
+ * Following stats will be cleared.
+ * Timeout_count
+ * Forward_due_to_bar_count
+ * Duplicate_count
+ * Frames_in_order_count
+ * BAR_received_count
+ * MPDU_Frames_processed_count
+ * MSDU_Frames_processed_count
+ * Total_processed_byte_count
+ * Late_receive_MPDU_count
+ * window_jump_2k
+ * Hole_count
+ */
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_DESC_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_DESC_ADDR BIT(8)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_RESRC_IDX GENMASK(10, 9)
+
+struct hal_reo_flush_queue {
+ struct hal_reo_cmd_hdr cmd;
+ __le32 desc_addr_lo;
+ __le32 info0;
+ __le32 rsvd0[6];
+} __packed;
+
+#define HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS BIT(8)
+#define HAL_REO_FLUSH_CACHE_INFO0_RELEASE_BLOCK_IDX BIT(9)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX GENMASK(11, 10)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE BIT(12)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE BIT(13)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL BIT(14)
+
+struct hal_reo_flush_cache {
+ struct hal_reo_cmd_hdr cmd;
+ __le32 cache_addr_lo;
+ __le32 info0;
+ __le32 rsvd0[6];
+} __packed;
+
+#define HAL_TCL_DATA_CMD_INFO0_CMD_TYPE BIT(0)
+#define HAL_TCL_DATA_CMD_INFO0_DESC_TYPE BIT(1)
+#define HAL_TCL_DATA_CMD_INFO0_BANK_ID GENMASK(7, 2)
+#define HAL_TCL_DATA_CMD_INFO0_TX_NOTIFY_FRAME GENMASK(10, 8)
+#define HAL_TCL_DATA_CMD_INFO0_HDR_LEN_READ_SEL BIT(11)
+#define HAL_TCL_DATA_CMD_INFO0_BUF_TIMESTAMP GENMASK(30, 12)
+#define HAL_TCL_DATA_CMD_INFO0_BUF_TIMESTAMP_VLD BIT(31)
+
+#define HAL_TCL_DATA_CMD_INFO1_CMD_NUM GENMASK(31, 16)
+
+#define HAL_TCL_DATA_CMD_INFO2_DATA_LEN GENMASK(15, 0)
+#define HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN BIT(16)
+#define HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN BIT(17)
+#define HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN BIT(18)
+#define HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN BIT(19)
+#define HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN BIT(20)
+#define HAL_TCL_DATA_CMD_INFO2_TO_FW BIT(21)
+#define HAL_TCL_DATA_CMD_INFO2_PKT_OFFSET GENMASK(31, 23)
+
+#define HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE BIT(0)
+#define HAL_TCL_DATA_CMD_INFO3_FLOW_OVERRIDE_EN BIT(1)
+#define HAL_TCL_DATA_CMD_INFO3_CLASSIFY_INFO_SEL GENMASK(3, 2)
+#define HAL_TCL_DATA_CMD_INFO3_TID GENMASK(7, 4)
+#define HAL_TCL_DATA_CMD_INFO3_FLOW_OVERRIDE BIT(8)
+#define HAL_TCL_DATA_CMD_INFO3_PMAC_ID GENMASK(10, 9)
+#define HAL_TCL_DATA_CMD_INFO3_MSDU_COLOR GENMASK(12, 11)
+#define HAL_TCL_DATA_CMD_INFO3_VDEV_ID GENMASK(31, 24)
+
+#define HAL_TCL_DATA_CMD_INFO4_SEARCH_INDEX GENMASK(19, 0)
+#define HAL_TCL_DATA_CMD_INFO4_CACHE_SET_NUM GENMASK(23, 20)
+#define HAL_TCL_DATA_CMD_INFO4_IDX_LOOKUP_OVERRIDE BIT(24)
+
+#define HAL_TCL_DATA_CMD_INFO5_RING_ID GENMASK(27, 20)
+#define HAL_TCL_DATA_CMD_INFO5_LOOPING_COUNT GENMASK(31, 28)
+
+enum hal_encrypt_type {
+ HAL_ENCRYPT_TYPE_WEP_40,
+ HAL_ENCRYPT_TYPE_WEP_104,
+ HAL_ENCRYPT_TYPE_TKIP_NO_MIC,
+ HAL_ENCRYPT_TYPE_WEP_128,
+ HAL_ENCRYPT_TYPE_TKIP_MIC,
+ HAL_ENCRYPT_TYPE_WAPI,
+ HAL_ENCRYPT_TYPE_CCMP_128,
+ HAL_ENCRYPT_TYPE_OPEN,
+ HAL_ENCRYPT_TYPE_CCMP_256,
+ HAL_ENCRYPT_TYPE_GCMP_128,
+ HAL_ENCRYPT_TYPE_AES_GCMP_256,
+ HAL_ENCRYPT_TYPE_WAPI_GCM_SM4,
+};
+
+enum hal_tcl_encap_type {
+ HAL_TCL_ENCAP_TYPE_RAW,
+ HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
+ HAL_TCL_ENCAP_TYPE_ETHERNET,
+ HAL_TCL_ENCAP_TYPE_802_3 = 3,
+};
+
+enum hal_tcl_desc_type {
+ HAL_TCL_DESC_TYPE_BUFFER,
+ HAL_TCL_DESC_TYPE_EXT_DESC,
+};
+
+enum hal_wbm_htt_tx_comp_status {
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_OK,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX,
+};
+
+struct hal_tcl_data_cmd {
+ struct ath12k_buffer_addr buf_addr_info;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 info5;
+} __packed;
+
+/* hal_tcl_data_cmd
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * tcl_cmd_type
+ * used to select the type of TCL Command decriptor
+ *
+ * desc_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * bank_id
+ * used to select one of the TCL register banks for fields removed
+ * from 'TCL_DATA_CMD' that do not change often within one virtual
+ * device or a set of virtual devices:
+ *
+ * tx_notify_frame
+ * TCL copies this value to 'TQM_ENTRANCE_RING' field FW_tx_notify_frame.
+ *
+ * hdr_length_read_sel
+ * used to select the per 'encap_type' register set for MSDU header
+ * read length
+ *
+ * buffer_timestamp
+ * buffer_timestamp_valid
+ * Frame system entrance timestamp. It shall be filled by first
+ * module (SW, TCL or TQM) that sees the frames first.
+ *
+ * cmd_num
+ * This number can be used to match against status.
+ *
+ * data_length
+ * MSDU length in case of direct descriptor. Length of link
+ * extension descriptor in case of Link extension descriptor.
+ *
+ * *_checksum_en
+ * Enable checksum replacement for ipv4, udp_over_ipv4, ipv6,
+ * udp_over_ipv6, tcp_over_ipv4 and tcp_over_ipv6.
+ *
+ * to_fw
+ * Forward packet to FW along with classification result. The
+ * packet will not be forward to TQM when this bit is set.
+ * 1'b0: Use classification result to forward the packet.
+ * 1'b1: Override classification result & forward packet only to fw
+ *
+ * packet_offset
+ * Packet offset from Metadata in case of direct buffer descriptor.
+ *
+ * hlos_tid_overwrite
+ *
+ * When set, TCL shall ignore the IP DSCP and VLAN PCP
+ * fields and use HLOS_TID as the final TID. Otherwise TCL
+ * shall consider the DSCP and PCP fields as well as HLOS_TID
+ * and choose a final TID based on the configured priority
+ *
+ * flow_override_enable
+ * TCL uses this to select the flow pointer from the peer table,
+ * which can be overridden by SW for pre-encrypted raw WiFi packets
+ * that cannot be parsed for UDP or for other MLO
+ * 0 - FP_PARSE_IP: Use the flow-pointer based on parsing the IPv4
+ * or IPv6 header.
+ * 1 - FP_USE_OVERRIDE: Use the who_classify_info_sel and
+ * flow_override fields to select the flow-pointer
+ *
+ * who_classify_info_sel
+ * Field only valid when flow_override_enable is set to FP_USE_OVERRIDE.
+ * This field is used to select one of the 'WHO_CLASSIFY_INFO's in the
+ * peer table in case more than 2 flows are mapped to a single TID.
+ * 0: To choose Flow 0 and 1 of any TID use this value.
+ * 1: To choose Flow 2 and 3 of any TID use this value.
+ * 2: To choose Flow 4 and 5 of any TID use this value.
+ * 3: To choose Flow 6 and 7 of any TID use this value.
+ *
+ * If who_classify_info sel is not in sync with the num_tx_classify_info
+ * field from address search, then TCL will set 'who_classify_info_sel'
+ * to 0 use flows 0 and 1.
+ *
+ * hlos_tid
+ * HLOS MSDU priority
+ * Field is used when HLOS_TID_overwrite is set.
+ *
+ * flow_override
+ * Field only valid when flow_override_enable is set to FP_USE_OVERRIDE
+ * TCL uses this to select the flow pointer from the peer table,
+ * which can be overridden by SW for pre-encrypted raw WiFi packets
+ * that cannot be parsed for UDP or for other MLO
+ * 0 - FP_USE_NON_UDP: Use the non-UDP flow pointer (flow 0)
+ * 1 - FP_USE_UDP: Use the UDP flow pointer (flow 1)
+ *
+ * pmac_id
+ * TCL uses this PMAC_ID in address search, i.e, while
+ * finding matching entry for the packet in AST corresponding
+ * to given PMAC_ID
+ *
+ * If PMAC ID is all 1s (=> value 3), it indicates wildcard
+ * match for any PMAC
+ *
+ * vdev_id
+ * Virtual device ID to check against the address search entry to
+ * avoid security issues from transmitting packets from an incorrect
+ * virtual device
+ *
+ * search_index
+ * The index that will be used for index based address or
+ * flow search. The field is valid when 'search_type' is 1 or 2.
+ *
+ * cache_set_num
+ *
+ * Cache set number that should be used to cache the index
+ * based search results, for address and flow search. This
+ * value should be equal to LSB four bits of the hash value of
+ * match data, in case of search index points to an entry which
+ * may be used in content based search also. The value can be
+ * anything when the entry pointed by search index will not be
+ * used for content based search.
+ *
+ * index_loop_override
+ * When set, address search and packet routing is forced to use
+ * 'search_index' instead of following the register configuration
+ * seleced by Bank_id.
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ *
+ * looping_count
+ *
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TCL_DESC_LEN sizeof(struct hal_tcl_data_cmd)
+
+#define HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO GENMASK(31, 0)
+
+#define HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI GENMASK(7, 0)
+#define HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE BIT(8)
+#define HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE GENMASK(10, 9)
+#define HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE GENMASK(14, 11)
+#define HAL_TX_MSDU_EXT_INFO1_BUF_LEN GENMASK(31, 16)
+
+struct hal_tx_msdu_ext_desc {
+ __le32 rsvd0[6];
+ __le32 info0;
+ __le32 info1;
+ __le32 rsvd1[10];
+};
+
+struct hal_tcl_gse_cmd {
+ __le32 ctrl_buf_addr_lo;
+ __le32 info0;
+ __le32 meta_data[2];
+ __le32 rsvd0[2];
+ __le32 info1;
+} __packed;
+
+/* hal_tcl_gse_cmd
+ *
+ * ctrl_buf_addr_lo, ctrl_buf_addr_hi
+ * Address of a control buffer containing additional info needed
+ * for this command execution.
+ *
+ * meta_data
+ * Meta data to be returned in the status descriptor
+ */
+
+enum hal_tcl_cache_op_res {
+ HAL_TCL_CACHE_OP_RES_DONE,
+ HAL_TCL_CACHE_OP_RES_NOT_FOUND,
+ HAL_TCL_CACHE_OP_RES_TIMEOUT,
+};
+
+struct hal_tcl_status_ring {
+ __le32 info0;
+ __le32 msdu_byte_count;
+ __le32 msdu_timestamp;
+ __le32 meta_data[2];
+ __le32 info1;
+ __le32 rsvd0;
+ __le32 info2;
+} __packed;
+
+/* hal_tcl_status_ring
+ *
+ * msdu_cnt
+ * msdu_byte_count
+ * MSDU count of Entry and MSDU byte count for entry 1.
+ *
+ */
+
+#define HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_SRC_DESC_ADDR_INFO_HASH_EN BIT(8)
+#define HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP BIT(9)
+#define HAL_CE_SRC_DESC_ADDR_INFO_DEST_SWAP BIT(10)
+#define HAL_CE_SRC_DESC_ADDR_INFO_GATHER BIT(11)
+#define HAL_CE_SRC_DESC_ADDR_INFO_LEN GENMASK(31, 16)
+
+#define HAL_CE_SRC_DESC_META_INFO_DATA GENMASK(15, 0)
+
+#define HAL_CE_SRC_DESC_FLAGS_RING_ID GENMASK(27, 20)
+#define HAL_CE_SRC_DESC_FLAGS_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_src_desc {
+ __le32 buffer_addr_low;
+ __le32 buffer_addr_info; /* %HAL_CE_SRC_DESC_ADDR_INFO_ */
+ __le32 meta_info; /* %HAL_CE_SRC_DESC_META_INFO_ */
+ __le32 flags; /* %HAL_CE_SRC_DESC_FLAGS_ */
+} __packed;
+
+/* hal_ce_srng_src_desc
+ *
+ * buffer_addr_lo
+ * LSB 32 bits of the 40 Bit Pointer to the source buffer
+ *
+ * buffer_addr_hi
+ * MSB 8 bits of the 40 Bit Pointer to the source buffer
+ *
+ * toeplitz_en
+ * Enable generation of 32-bit Toeplitz-LFSR hash for
+ * data transfer. In case of gather field in first source
+ * ring entry of the gather copy cycle in taken into account.
+ *
+ * src_swap
+ * Treats source memory organization as big-endian. For
+ * each dword read (4 bytes), the byte 0 is swapped with byte 3
+ * and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * dest_swap
+ * Treats destination memory organization as big-endian.
+ * For each dword write (4 bytes), the byte 0 is swapped with
+ * byte 3 and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * gather
+ * Enables gather of multiple copy engine source
+ * descriptors to one destination.
+ *
+ * ce_res_0
+ * Reserved
+ *
+ *
+ * length
+ * Length of the buffer in units of octets of the current
+ * descriptor
+ *
+ * fw_metadata
+ * Meta data used by FW.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_1
+ * Reserved
+ *
+ * ce_res_2
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_DEST_DESC_ADDR_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DEST_DESC_ADDR_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dest_desc {
+ __le32 buffer_addr_low;
+ __le32 buffer_addr_info; /* %HAL_CE_DEST_DESC_ADDR_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dest_desc
+ *
+ * dst_buffer_low
+ * LSB 32 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * dst_buffer_high
+ * MSB 8 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * ce_res_4
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DST_STATUS_DESC_FLAGS_HASH_EN BIT(8)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_BYTE_SWAP BIT(9)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_DEST_SWAP BIT(10)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_GATHER BIT(11)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_LEN GENMASK(31, 16)
+
+#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(15, 0)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dst_status_desc {
+ __le32 flags; /* %HAL_CE_DST_STATUS_DESC_FLAGS_ */
+ __le32 toeplitz_hash0;
+ __le32 toeplitz_hash1;
+ __le32 meta_info; /* HAL_CE_DST_STATUS_DESC_META_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dst_status_desc
+ *
+ * ce_res_5
+ * Reserved
+ *
+ * toeplitz_en
+ *
+ * src_swap
+ * Source memory buffer swapped
+ *
+ * dest_swap
+ * Destination memory buffer swapped
+ *
+ * gather
+ * Gather of multiple copy engine source descriptors to one
+ * destination enabled
+ *
+ * ce_res_6
+ * Reserved
+ *
+ * length
+ * Sum of all the Lengths of the source descriptor in the
+ * gather chain
+ *
+ * toeplitz_hash_0
+ * 32 LS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * toeplitz_hash_1
+ * 32 MS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * fw_metadata
+ * Meta data used by FW
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_7
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TX_RATE_STATS_INFO0_VALID BIT(0)
+#define HAL_TX_RATE_STATS_INFO0_BW GENMASK(3, 1)
+#define HAL_TX_RATE_STATS_INFO0_PKT_TYPE GENMASK(7, 4)
+#define HAL_TX_RATE_STATS_INFO0_STBC BIT(8)
+#define HAL_TX_RATE_STATS_INFO0_LDPC BIT(9)
+#define HAL_TX_RATE_STATS_INFO0_SGI GENMASK(11, 10)
+#define HAL_TX_RATE_STATS_INFO0_MCS GENMASK(15, 12)
+#define HAL_TX_RATE_STATS_INFO0_OFDMA_TX BIT(16)
+#define HAL_TX_RATE_STATS_INFO0_TONES_IN_RU GENMASK(28, 17)
+
+enum hal_tx_rate_stats_bw {
+ HAL_TX_RATE_STATS_BW_20,
+ HAL_TX_RATE_STATS_BW_40,
+ HAL_TX_RATE_STATS_BW_80,
+ HAL_TX_RATE_STATS_BW_160,
+};
+
+enum hal_tx_rate_stats_pkt_type {
+ HAL_TX_RATE_STATS_PKT_TYPE_11A,
+ HAL_TX_RATE_STATS_PKT_TYPE_11B,
+ HAL_TX_RATE_STATS_PKT_TYPE_11N,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AC,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AX,
+ HAL_TX_RATE_STATS_PKT_TYPE_11BA,
+ HAL_TX_RATE_STATS_PKT_TYPE_11BE,
+};
+
+enum hal_tx_rate_stats_sgi {
+ HAL_TX_RATE_STATS_SGI_08US,
+ HAL_TX_RATE_STATS_SGI_04US,
+ HAL_TX_RATE_STATS_SGI_16US,
+ HAL_TX_RATE_STATS_SGI_32US,
+};
+
+struct hal_tx_rate_stats {
+ __le32 info0;
+ __le32 tsf;
+} __packed;
+
+struct hal_wbm_link_desc {
+ struct ath12k_buffer_addr buf_addr_info;
+} __packed;
+
+/* hal_wbm_link_desc
+ *
+ * Producer: WBM
+ * Consumer: WBM
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ */
+
+enum hal_wbm_rel_src_module {
+ HAL_WBM_REL_SRC_MODULE_TQM,
+ HAL_WBM_REL_SRC_MODULE_RXDMA,
+ HAL_WBM_REL_SRC_MODULE_REO,
+ HAL_WBM_REL_SRC_MODULE_FW,
+ HAL_WBM_REL_SRC_MODULE_SW,
+};
+
+enum hal_wbm_rel_desc_type {
+ HAL_WBM_REL_DESC_TYPE_REL_MSDU,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MPDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MSDU_EXT,
+ HAL_WBM_REL_DESC_TYPE_QUEUE_EXT,
+};
+
+/* hal_wbm_rel_desc_type
+ *
+ * msdu_buffer
+ * The address points to an MSDU buffer
+ *
+ * msdu_link_descriptor
+ * The address points to an Tx MSDU link descriptor
+ *
+ * mpdu_link_descriptor
+ * The address points to an MPDU link descriptor
+ *
+ * msdu_ext_descriptor
+ * The address points to an MSDU extension descriptor
+ *
+ * queue_ext_descriptor
+ * The address points to an TQM queue extension descriptor. WBM should
+ * treat this is the same way as a link descriptor.
+ */
+
+enum hal_wbm_rel_bm_act {
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE,
+ HAL_WBM_REL_BM_ACT_REL_MSDU,
+};
+
+/* hal_wbm_rel_bm_act
+ *
+ * put_in_idle_list
+ * Put the buffer or descriptor back in the idle list. In case of MSDU or
+ * MDPU link descriptor, BM does not need to check to release any
+ * individual MSDU buffers.
+ *
+ * release_msdu_list
+ * This BM action can only be used in combination with desc_type being
+ * msdu_link_descriptor. Field first_msdu_index points out which MSDU
+ * pointer in the MSDU link descriptor is the first of an MPDU that is
+ * released. BM shall release all the MSDU buffers linked to this first
+ * MSDU buffer pointer. All related MSDU buffer pointer entries shall be
+ * set to value 0, which represents the 'NULL' pointer. When all MSDU
+ * buffer pointers in the MSDU link descriptor are 'NULL', the MSDU link
+ * descriptor itself shall also be released.
+ */
+#define HAL_WBM_COMPL_RX_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_COMPL_RX_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_COMPL_RX_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_COMPL_RX_INFO0_RBM GENMASK(12, 9)
+#define HAL_WBM_COMPL_RX_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17)
+#define HAL_WBM_COMPL_RX_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19)
+#define HAL_WBM_COMPL_RX_INFO0_REO_PUSH_REASON GENMASK(25, 24)
+#define HAL_WBM_COMPL_RX_INFO0_REO_ERROR_CODE GENMASK(30, 26)
+#define HAL_WBM_COMPL_RX_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_COMPL_RX_INFO1_PHY_ADDR_HI GENMASK(7, 0)
+#define HAL_WBM_COMPL_RX_INFO1_SW_COOKIE GENMASK(27, 8)
+#define HAL_WBM_COMPL_RX_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_wbm_completion_ring_rx {
+ __le32 addr_lo;
+ __le32 addr_hi;
+ __le32 info0;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ __le32 phy_addr_lo;
+ __le32 info1;
+} __packed;
+
+#define HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_COMPL_TX_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_COMPL_TX_INFO0_RBM GENMASK(12, 9)
+#define HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON GENMASK(16, 13)
+#define HAL_WBM_COMPL_TX_INFO0_RBM_OVERRIDE_VLD BIT(17)
+#define HAL_WBM_COMPL_TX_INFO0_SW_COOKIE_LO GENMASK(29, 18)
+#define HAL_WBM_COMPL_TX_INFO0_CC_DONE BIT(30)
+#define HAL_WBM_COMPL_TX_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER GENMASK(23, 0)
+#define HAL_WBM_COMPL_TX_INFO1_TRANSMIT_COUNT GENMASK(30, 24)
+#define HAL_WBM_COMPL_TX_INFO1_SW_REL_DETAILS_VALID BIT(31)
+
+#define HAL_WBM_COMPL_TX_INFO2_ACK_FRAME_RSSI GENMASK(7, 0)
+#define HAL_WBM_COMPL_TX_INFO2_FIRST_MSDU BIT(8)
+#define HAL_WBM_COMPL_TX_INFO2_LAST_MSDU BIT(9)
+#define HAL_WBM_COMPL_TX_INFO2_FW_TX_NOTIF_FRAME GENMASK(12, 10)
+#define HAL_WBM_COMPL_TX_INFO2_BUFFER_TIMESTAMP GENMASK(31, 13)
+
+#define HAL_WBM_COMPL_TX_INFO3_PEER_ID GENMASK(15, 0)
+#define HAL_WBM_COMPL_TX_INFO3_TID GENMASK(19, 16)
+#define HAL_WBM_COMPL_TX_INFO3_SW_COOKIE_HI GENMASK(27, 20)
+#define HAL_WBM_COMPL_TX_INFO3_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_wbm_completion_ring_tx {
+ __le32 buf_va_lo;
+ __le32 buf_va_hi;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ struct hal_tx_rate_stats rate_stats;
+ __le32 info3;
+} __packed;
+
+#define HAL_WBM_RELEASE_TX_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_RELEASE_TX_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_RELEASE_TX_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_RELEASE_TX_INFO0_FIRST_MSDU_IDX GENMASK(12, 9)
+#define HAL_WBM_RELEASE_TX_INFO0_TQM_RELEASE_REASON GENMASK(18, 13)
+#define HAL_WBM_RELEASE_TX_INFO0_RBM_OVERRIDE_VLD BIT(17)
+#define HAL_WBM_RELEASE_TX_INFO0_SW_BUFFER_COOKIE_11_0 GENMASK(29, 18)
+#define HAL_WBM_RELEASE_TX_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_RELEASE_TX_INFO1_TQM_STATUS_NUMBER GENMASK(23, 0)
+#define HAL_WBM_RELEASE_TX_INFO1_TRANSMIT_COUNT GENMASK(30, 24)
+#define HAL_WBM_RELEASE_TX_INFO1_SW_REL_DETAILS_VALID BIT(31)
+
+#define HAL_WBM_RELEASE_TX_INFO2_ACK_FRAME_RSSI GENMASK(7, 0)
+#define HAL_WBM_RELEASE_TX_INFO2_FIRST_MSDU BIT(8)
+#define HAL_WBM_RELEASE_TX_INFO2_LAST_MSDU BIT(9)
+#define HAL_WBM_RELEASE_TX_INFO2_FW_TX_NOTIF_FRAME GENMASK(12, 10)
+#define HAL_WBM_RELEASE_TX_INFO2_BUFFER_TIMESTAMP GENMASK(31, 13)
+
+#define HAL_WBM_RELEASE_TX_INFO3_PEER_ID GENMASK(15, 0)
+#define HAL_WBM_RELEASE_TX_INFO3_TID GENMASK(19, 16)
+#define HAL_WBM_RELEASE_TX_INFO3_SW_BUFFER_COOKIE_19_12 GENMASK(27, 20)
+#define HAL_WBM_RELEASE_TX_INFO3_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_wbm_release_ring_tx {
+ struct ath12k_buffer_addr buf_addr_info;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ struct hal_tx_rate_stats rate_stats;
+ __le32 info3;
+} __packed;
+
+#define HAL_WBM_RELEASE_RX_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_RELEASE_RX_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_RELEASE_RX_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_RELEASE_RX_INFO0_FIRST_MSDU_IDX GENMASK(12, 9)
+#define HAL_WBM_RELEASE_RX_INFO0_CC_STATUS BIT(16)
+#define HAL_WBM_RELEASE_RX_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17)
+#define HAL_WBM_RELEASE_RX_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19)
+#define HAL_WBM_RELEASE_RX_INFO0_REO_PUSH_REASON GENMASK(25, 24)
+#define HAL_WBM_RELEASE_RX_INFO0_REO_ERROR_CODE GENMASK(30, 26)
+#define HAL_WBM_RELEASE_RX_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_RELEASE_RX_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_WBM_RELEASE_RX_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_wbm_release_ring_rx {
+ struct ath12k_buffer_addr buf_addr_info;
+ __le32 info0;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+#define HAL_WBM_RELEASE_RX_CC_INFO0_RBM GENMASK(12, 9)
+#define HAL_WBM_RELEASE_RX_CC_INFO1_COOKIE GENMASK(27, 8)
+/* Used when hw cc is success */
+struct hal_wbm_release_ring_cc_rx {
+ __le32 buf_va_lo;
+ __le32 buf_va_hi;
+ __le32 info0;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ __le32 buf_pa_lo;
+ __le32 info1;
+} __packed;
+
+#define HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_RELEASE_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_RELEASE_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19)
+#define HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON GENMASK(25, 24)
+#define HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE GENMASK(30, 26)
+#define HAL_WBM_RELEASE_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_RELEASE_INFO3_FIRST_MSDU BIT(0)
+#define HAL_WBM_RELEASE_INFO3_LAST_MSDU BIT(1)
+#define HAL_WBM_RELEASE_INFO3_CONTINUATION BIT(2)
+
+#define HAL_WBM_RELEASE_INFO5_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_wbm_release_ring {
+ struct ath12k_buffer_addr buf_addr_info;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 info5;
+} __packed;
+
+/* hal_wbm_release_ring
+ *
+ * Producer: SW/TQM/RXDMA/REO/SWITCH
+ * Consumer: WBM/SW/FW
+ *
+ * HTT tx status is overlayed on wbm_release ring on 4-byte words 2, 3, 4 and 5
+ * for software based completions.
+ *
+ * buf_addr_info
+ * Details of the physical address of the buffer or link descriptor.
+ *
+ * release_source_module
+ * Indicates which module initiated the release of this buffer/descriptor.
+ * Values are defined in enum %HAL_WBM_REL_SRC_MODULE_.
+ *
+ * buffer_or_desc_type
+ * Field only valid when WBM is marked as the return_buffer_manager in
+ * the Released_Buffer_address_info. Indicates that type of buffer or
+ * descriptor is being released. Values are in enum %HAL_WBM_REL_DESC_TYPE.
+ *
+ * wbm_internal_error
+ * Is set when WBM got a buffer pointer but the action was to push it to
+ * the idle link descriptor ring or do link related activity OR
+ * Is set when WBM got a link buffer pointer but the action was to push it
+ * to the buffer descriptor ring.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Buffer Manager Ring has looped
+ * around the ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+/**
+ * enum hal_wbm_tqm_rel_reason - TQM release reason code
+ * @HAL_WBM_TQM_REL_REASON_FRAME_ACKED: ACK or BACK received for the frame
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: Command remove_mpdus initiated by SW
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: Command remove transmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX: Command remove untransmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: Command remove aged msdus or
+ * mpdus.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1: Remove command initiated by
+ * fw with fw_reason1.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2: Remove command initiated by
+ * fw with fw_reason2.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
+ * fw with fw_reason3.
+ */
+enum hal_wbm_tqm_rel_reason {
+ HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+};
+
+struct hal_wbm_buffer_ring {
+ struct ath12k_buffer_addr buf_addr_info;
+};
+
+enum hal_mon_end_reason {
+ HAL_MON_STATUS_BUFFER_FULL,
+ HAL_MON_FLUSH_DETECTED,
+ HAL_MON_END_OF_PPDU,
+ HAL_MON_PPDU_TRUNCATED,
+};
+
+#define HAL_SW_MONITOR_RING_INFO0_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_SW_MONITOR_RING_INFO0_RXDMA_ERROR_CODE GENMASK(6, 2)
+#define HAL_SW_MONITOR_RING_INFO0_MPDU_FRAGMENT_NUMBER GENMASK(10, 7)
+#define HAL_SW_MONITOR_RING_INFO0_FRAMELESS_BAR BIT(11)
+#define HAL_SW_MONITOR_RING_INFO0_STATUS_BUF_COUNT GENMASK(15, 12)
+#define HAL_SW_MONITOR_RING_INFO0_END_OF_PPDU BIT(16)
+
+#define HAL_SW_MONITOR_RING_INFO1_PHY_PPDU_ID GENMASK(15, 0)
+#define HAL_SW_MONITOR_RING_INFO1_RING_ID GENMASK(27, 20)
+#define HAL_SW_MONITOR_RING_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_sw_monitor_ring {
+ struct ath12k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct ath12k_buffer_addr status_buff_addr_info;
+ __le32 info0; /* %HAL_SW_MONITOR_RING_INFO0 */
+ __le32 info1; /* %HAL_SW_MONITOR_RING_INFO1 */
+} __packed;
+
+/* hal_sw_monitor_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * Details related to the MPDU being pushed to SW, valid
+ * only if end_of_ppdu is set to 0.
+ *
+ * status_buff_addr_info
+ * Details of the physical address of the first status
+ * buffer used for the PPDU (either the PPDU that included the
+ * MPDU being pushed to SW if end_of_ppdu = 0, or the PPDU
+ * whose end is indicated through end_of_ppdu = 1)
+ *
+ * rxdma_push_reason
+ * Indicates why RXDMA pushed the frame to this ring
+ *
+ * <enum 0 rxdma_error_detected> RXDMA detected an error an
+ * pushed this frame to this queue
+ *
+ * <enum 1 rxdma_routing_instruction> RXDMA pushed the
+ * frame to this queue per received routing instructions. No
+ * error within RXDMA was detected
+ *
+ * <enum 2 rxdma_rx_flush> RXDMA received an RX_FLUSH. As a
+ * result the MSDU link descriptor might not have the
+ * last_msdu_in_mpdu_flag set, but instead WBM might just see a
+ * NULL pointer in the MSDU link descriptor. This is to be
+ * considered a normal condition for this scenario.
+ *
+ * rxdma_error_code
+ * Field only valid when rxdma_push_reason is set to
+ * 'rxdma_error_detected.'
+ *
+ * <enum 0 rxdma_overflow_err>MPDU frame is not complete
+ * due to a FIFO overflow error in RXPCU.
+ *
+ * <enum 1 rxdma_mpdu_length_err>MPDU frame is not complete
+ * due to receiving incomplete MPDU from the PHY
+ *
+ * <enum 3 rxdma_decrypt_err>CRYPTO reported a decryption
+ * error or CRYPTO received an encrypted frame, but did not get
+ * a valid corresponding key id in the peer entry.
+ *
+ * <enum 4 rxdma_tkip_mic_err>CRYPTO reported a TKIP MIC
+ * error
+ *
+ * <enum 5 rxdma_unecrypted_err>CRYPTO reported an
+ * unencrypted frame error when encrypted was expected
+ *
+ * <enum 6 rxdma_msdu_len_err>RX OLE reported an MSDU
+ * length error
+ *
+ * <enum 7 rxdma_msdu_limit_err>RX OLE reported that max
+ * number of MSDUs allowed in an MPDU got exceeded
+ *
+ * <enum 8 rxdma_wifi_parse_err>RX OLE reported a parsing
+ * error
+ *
+ * <enum 9 rxdma_amsdu_parse_err>RX OLE reported an A-MSDU
+ * parsing error
+ *
+ * <enum 10 rxdma_sa_timeout_err>RX OLE reported a timeout
+ * during SA search
+ *
+ * <enum 11 rxdma_da_timeout_err>RX OLE reported a timeout
+ * during DA search
+ *
+ * <enum 12 rxdma_flow_timeout_err>RX OLE reported a
+ * timeout during flow search
+ *
+ * <enum 13 rxdma_flush_request>RXDMA received a flush
+ * request
+ *
+ * <enum 14 rxdma_amsdu_fragment_err>Rx PCU reported A-MSDU
+ * present as well as a fragmented MPDU.
+ *
+ * mpdu_fragment_number
+ * Field only valid when Reo_level_mpdu_frame_info.
+ * Rx_mpdu_desc_info_details.Fragment_flag is set and
+ * end_of_ppdu is set to 0.
+ *
+ * The fragment number from the 802.11 header.
+ *
+ * Note that the sequence number is embedded in the field:
+ * Reo_level_mpdu_frame_info. Rx_mpdu_desc_info_details.
+ * Mpdu_sequence_number
+ *
+ * frameless_bar
+ * When set, this SW monitor ring struct contains BAR info
+ * from a multi TID BAR frame. The original multi TID BAR frame
+ * itself contained all the REO info for the first TID, but all
+ * the subsequent TID info and their linkage to the REO
+ * descriptors is passed down as 'frameless' BAR info.
+ *
+ * The only fields valid in this descriptor when this bit
+ * is within the
+ *
+ * Reo_level_mpdu_frame_info:
+ * Within Rx_mpdu_desc_info_details:
+ * Mpdu_Sequence_number
+ * BAR_frame
+ * Peer_meta_data
+ * All other fields shall be set to 0.
+ *
+ * status_buf_count
+ * A count of status buffers used so far for the PPDU
+ * (either the PPDU that included the MPDU being pushed to SW
+ * if end_of_ppdu = 0, or the PPDU whose end is indicated
+ * through end_of_ppdu = 1)
+ *
+ * end_of_ppdu
+ * Some hw RXDMA can be configured to generate a separate
+ * 'SW_MONITOR_RING' descriptor at the end of a PPDU (either
+ * through an 'RX_PPDU_END' TLV or through an 'RX_FLUSH') to
+ * demarcate PPDUs.
+ *
+ * For such a descriptor, this bit is set to 1 and fields
+ * Reo_level_mpdu_frame_info, mpdu_fragment_number and
+ * Frameless_bar are all set to 0.
+ *
+ * Otherwise this bit is set to 0.
+ *
+ * phy_ppdu_id
+ * A PPDU counter value that PHY increments for every PPDU
+ * received
+ *
+ * The counter value wraps around. Some hw RXDMA can be
+ * configured to copy this from the RX_PPDU_START TLV for every
+ * output descriptor.
+ *
+ * ring_id
+ * For debugging.
+ * This field is filled in by the SRNG module.
+ * It help to identify the ring that is being looked
+ *
+ * looping_count
+ * For debugging.
+ * This field is filled in by the SRNG module.
+ *
+ * A count value that indicates the number of times the
+ * producer of entries into this Ring has looped around the
+ * ring.
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ */
+
+enum hal_desc_owner {
+ HAL_DESC_OWNER_WBM,
+ HAL_DESC_OWNER_SW,
+ HAL_DESC_OWNER_TQM,
+ HAL_DESC_OWNER_RXDMA,
+ HAL_DESC_OWNER_REO,
+ HAL_DESC_OWNER_SWITCH,
+};
+
+enum hal_desc_buf_type {
+ HAL_DESC_BUF_TYPE_TX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_HEAD,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_TX_FLOW,
+ HAL_DESC_BUF_TYPE_TX_BUFFER,
+ HAL_DESC_BUF_TYPE_RX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_RX_BUFFER,
+ HAL_DESC_BUF_TYPE_IDLE_LINK,
+};
+
+#define HAL_DESC_REO_OWNED 4
+#define HAL_DESC_REO_QUEUE_DESC 8
+#define HAL_DESC_REO_QUEUE_EXT_DESC 9
+#define HAL_DESC_REO_NON_QOS_TID 16
+
+#define HAL_DESC_HDR_INFO0_OWNER GENMASK(3, 0)
+#define HAL_DESC_HDR_INFO0_BUF_TYPE GENMASK(7, 4)
+#define HAL_DESC_HDR_INFO0_DBG_RESERVED GENMASK(31, 8)
+
+struct hal_desc_header {
+ __le32 info0;
+} __packed;
+
+struct hal_rx_mpdu_link_ptr {
+ struct ath12k_buffer_addr addr_info;
+} __packed;
+
+struct hal_rx_msdu_details {
+ struct ath12k_buffer_addr buf_addr_info;
+ struct rx_msdu_desc rx_msdu_info;
+ struct rx_msdu_ext_desc rx_msdu_ext_info;
+} __packed;
+
+#define HAL_RX_MSDU_LNK_INFO0_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_RX_MSDU_LNK_INFO0_FIRST_MSDU_LNK BIT(16)
+
+struct hal_rx_msdu_link {
+ struct hal_desc_header desc_hdr;
+ struct ath12k_buffer_addr buf_addr_info;
+ __le32 info0;
+ __le32 pn[4];
+ struct hal_rx_msdu_details msdu_link[6];
+} __packed;
+
+struct hal_rx_reo_queue_ext {
+ struct hal_desc_header desc_hdr;
+ __le32 rsvd;
+ struct hal_rx_mpdu_link_ptr mpdu_link[15];
+} __packed;
+
+/* hal_rx_reo_queue_ext
+ * Consumer: REO
+ * Producer: REO
+ *
+ * descriptor_header
+ * Details about which module owns this struct.
+ *
+ * mpdu_link
+ * Pointer to the next MPDU_link descriptor in the MPDU queue.
+ */
+
+enum hal_rx_reo_queue_pn_size {
+ HAL_RX_REO_QUEUE_PN_SIZE_24,
+ HAL_RX_REO_QUEUE_PN_SIZE_48,
+ HAL_RX_REO_QUEUE_PN_SIZE_128,
+};
+
+#define HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER GENMASK(15, 0)
+
+#define HAL_RX_REO_QUEUE_INFO0_VLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER GENMASK(2, 1)
+#define HAL_RX_REO_QUEUE_INFO0_DIS_DUP_DETECTION BIT(3)
+#define HAL_RX_REO_QUEUE_INFO0_SOFT_REORDER_EN BIT(4)
+#define HAL_RX_REO_QUEUE_INFO0_AC GENMASK(6, 5)
+#define HAL_RX_REO_QUEUE_INFO0_BAR BIT(7)
+#define HAL_RX_REO_QUEUE_INFO0_RETRY BIT(8)
+#define HAL_RX_REO_QUEUE_INFO0_CHECK_2K_MODE BIT(9)
+#define HAL_RX_REO_QUEUE_INFO0_OOR_MODE BIT(10)
+#define HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE GENMASK(20, 11)
+#define HAL_RX_REO_QUEUE_INFO0_PN_CHECK BIT(21)
+#define HAL_RX_REO_QUEUE_INFO0_EVEN_PN BIT(22)
+#define HAL_RX_REO_QUEUE_INFO0_UNEVEN_PN BIT(23)
+#define HAL_RX_REO_QUEUE_INFO0_PN_HANDLE_ENABLE BIT(24)
+#define HAL_RX_REO_QUEUE_INFO0_PN_SIZE GENMASK(26, 25)
+#define HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG BIT(27)
+
+#define HAL_RX_REO_QUEUE_INFO1_SVLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO1_SSN GENMASK(12, 1)
+#define HAL_RX_REO_QUEUE_INFO1_CURRENT_IDX GENMASK(22, 13)
+#define HAL_RX_REO_QUEUE_INFO1_SEQ_2K_ERR BIT(23)
+#define HAL_RX_REO_QUEUE_INFO1_PN_ERR BIT(24)
+#define HAL_RX_REO_QUEUE_INFO1_PN_VALID BIT(31)
+
+#define HAL_RX_REO_QUEUE_INFO2_MPDU_COUNT GENMASK(6, 0)
+#define HAL_RX_REO_QUEUE_INFO2_MSDU_COUNT (31, 7)
+
+#define HAL_RX_REO_QUEUE_INFO3_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_RX_REO_QUEUE_INFO3_FWD_DUE_TO_BAR_CNT GENMASK(15, 10)
+#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 16)
+
+#define HAL_RX_REO_QUEUE_INFO4_FRAME_IN_ORD_COUNT GENMASK(23, 0)
+#define HAL_RX_REO_QUEUE_INFO4_BAR_RECVD_COUNT GENMASK(31, 24)
+
+#define HAL_RX_REO_QUEUE_INFO5_LATE_RX_MPDU_COUNT GENMASK(11, 0)
+#define HAL_RX_REO_QUEUE_INFO5_WINDOW_JUMP_2K GENMASK(15, 12)
+#define HAL_RX_REO_QUEUE_INFO5_HOLE_COUNT GENMASK(31, 16)
+
+struct hal_rx_reo_queue {
+ struct hal_desc_header desc_hdr;
+ __le32 rx_queue_num;
+ __le32 info0;
+ __le32 info1;
+ __le32 pn[4];
+ __le32 last_rx_enqueue_timestamp;
+ __le32 last_rx_dequeue_timestamp;
+ __le32 next_aging_queue[2];
+ __le32 prev_aging_queue[2];
+ __le32 rx_bitmap[9];
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 processed_mpdus;
+ __le32 processed_msdus;
+ __le32 processed_total_bytes;
+ __le32 info5;
+ __le32 rsvd[2];
+ struct hal_rx_reo_queue_ext ext_desc[];
+} __packed;
+
+/* hal_rx_reo_queue
+ *
+ * descriptor_header
+ * Details about which module owns this struct. Note that sub field
+ * Buffer_type shall be set to receive_reo_queue_descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link descriptor belongs.
+ *
+ * vld
+ * Valid bit indicating a session is established and the queue descriptor
+ * is valid.
+ * associated_link_descriptor_counter
+ * Indicates which of the 3 link descriptor counters shall be incremented
+ * or decremented when link descriptors are added or removed from this
+ * flow queue.
+ * disable_duplicate_detection
+ * When set, do not perform any duplicate detection.
+ * soft_reorder_enable
+ * When set, REO has been instructed to not perform the actual re-ordering
+ * of frames for this queue, but just to insert the reorder opcodes.
+ * ac
+ * Indicates the access category of the queue descriptor.
+ * bar
+ * Indicates if BAR has been received.
+ * retry
+ * Retry bit is checked if this bit is set.
+ * chk_2k_mode
+ * Indicates what type of operation is expected from Reo when the received
+ * frame SN falls within the 2K window.
+ * oor_mode
+ * Indicates what type of operation is expected when the received frame
+ * falls within the OOR window.
+ * ba_window_size
+ * Indicates the negotiated (window size + 1). Max of 256 bits.
+ *
+ * A value 255 means 256 bitmap, 63 means 64 bitmap, 0 (means non-BA
+ * session, with window size of 0). The 3 values here are the main values
+ * validated, but other values should work as well.
+ *
+ * A BA window size of 0 (=> one frame entry bitmat), means that there is
+ * no additional rx_reo_queue_ext desc. following rx_reo_queue in memory.
+ * A BA window size of 1 - 105, means that there is 1 rx_reo_queue_ext.
+ * A BA window size of 106 - 210, means that there are 2 rx_reo_queue_ext.
+ * A BA window size of 211 - 256, means that there are 3 rx_reo_queue_ext.
+ * pn_check_needed, pn_shall_be_even, pn_shall_be_uneven, pn_handling_enable,
+ * pn_size
+ * REO shall perform the PN increment check, even number check, uneven
+ * number check, PN error check and size of the PN field check.
+ * ignore_ampdu_flag
+ * REO shall ignore the ampdu_flag on entrance descriptor for this queue.
+ *
+ * svld
+ * Sequence number in next field is valid one.
+ * ssn
+ * Starting Sequence number of the session.
+ * current_index
+ * Points to last forwarded packet
+ * seq_2k_error_detected_flag
+ * REO has detected a 2k error jump in the sequence number and from that
+ * moment forward, all new frames are forwarded directly to FW, without
+ * duplicate detect, reordering, etc.
+ * pn_error_detected_flag
+ * REO has detected a PN error.
+ */
+
+#define HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD BIT(9)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC BIT(13)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR BIT(14)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY BIT(15)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE BIT(17)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN BIT(21)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_ERR BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN BIT(30)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_VLD BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER GENMASK(18, 17)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_AC GENMASK(22, 21)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_BAR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RETRY BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25)
+
+struct hal_reo_update_rx_queue {
+ struct hal_reo_cmd_hdr cmd;
+ __le32 queue_addr_lo;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 pn[4];
+} __packed;
+
+#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1)
+
+struct hal_reo_unblock_cache {
+ struct hal_reo_cmd_hdr cmd;
+ __le32 info0;
+ __le32 rsvd[7];
+} __packed;
+
+enum hal_reo_exec_status {
+ HAL_REO_EXEC_STATUS_SUCCESS,
+ HAL_REO_EXEC_STATUS_BLOCKED,
+ HAL_REO_EXEC_STATUS_FAILED,
+ HAL_REO_EXEC_STATUS_RESOURCE_BLOCKED,
+};
+
+#define HAL_REO_STATUS_HDR_INFO0_STATUS_NUM GENMASK(15, 0)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_TIME GENMASK(25, 16)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS GENMASK(27, 26)
+
+struct hal_reo_status_hdr {
+ __le32 info0;
+ __le32 timestamp;
+} __packed;
+
+/* hal_reo_status_hdr
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_num
+ * The value in this field is equal to value of the reo command
+ * number. This field helps to correlate the statuses with the REO
+ * commands.
+ *
+ * execution_time (in us)
+ * The amount of time REO took to excecute the command. Note that
+ * this time does not include the duration of the command waiting
+ * in the command ring, before the execution started.
+ *
+ * execution_status
+ * Execution status of the command. Values are defined in
+ * enum %HAL_REO_EXEC_STATUS_.
+ */
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX GENMASK(21, 12)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT GENMASK(6, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT GENMASK(31, 7)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_WINDOW_JMP2K GENMASK(3, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT GENMASK(15, 10)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT GENMASK(23, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT GENMASK(31, 24)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT GENMASK(27, 12)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT GENMASK(31, 28)
+
+struct hal_reo_get_queue_stats_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 pn[4];
+ __le32 last_rx_enqueue_timestamp;
+ __le32 last_rx_dequeue_timestamp;
+ __le32 rx_bitmap[9];
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 num_mpdu_frames;
+ __le32 num_msdu_frames;
+ __le32 total_bytes;
+ __le32 info4;
+ __le32 info5;
+} __packed;
+
+/* hal_reo_get_queue_stats_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * ssn
+ * Starting Sequence number of the session, this changes whenever
+ * window moves (can be filled by SW then maintained by REO).
+ *
+ * current_index
+ * Points to last forwarded packet.
+ *
+ * pn
+ * Bits of the PN number.
+ *
+ * last_rx_enqueue_timestamp
+ * last_rx_dequeue_timestamp
+ * Timestamp of arrival of the last MPDU for this queue and
+ * Timestamp of forwarding an MPDU accordingly.
+ *
+ * rx_bitmap
+ * When a bit is set, the corresponding frame is currently held
+ * in the re-order queue. The bitmap is Fully managed by HW.
+ *
+ * current_mpdu_count
+ * current_msdu_count
+ * The number of MPDUs and MSDUs in the queue.
+ *
+ * timeout_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Forwarding reason is timeout.
+ *
+ * forward_due_to_bar_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Fwd reason is reception of BAR.
+ *
+ * duplicate_count
+ * The number of duplicate frames that have been detected.
+ *
+ * frames_in_order_count
+ * The number of frames that have been received in order (without
+ * a hole that prevented them from being forwarded immediately).
+ *
+ * bar_received_count
+ * The number of times a BAR frame is received.
+ *
+ * mpdu_frames_processed_count
+ * msdu_frames_processed_count
+ * The total number of MPDU/MSDU frames that have been processed.
+ *
+ * total_bytes
+ * An approximation of the number of bytes received for this queue.
+ *
+ * late_receive_mpdu_count
+ * The number of MPDUs received after the window had already moved
+ * on. The 'late' sequence window is defined as
+ * (Window SSN - 256) - (Window SSN - 1).
+ *
+ * window_jump_2k
+ * The number of times the window moved more than 2K
+ *
+ * hole_count
+ * The number of times a hole was created in the receive bitmap.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_STATUS_LOOP_CNT GENMASK(31, 28)
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED BIT(0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_RSVD GENMASK(31, 1)
+#define HAL_REO_FLUSH_QUEUE_INFO1_RSVD GENMASK(27, 0)
+
+struct hal_reo_flush_queue_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 rsvd0[21];
+ __le32 info1;
+} __packed;
+
+/* hal_reo_flush_queue_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status of blocking resource
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - Error detected. The resource to be used for blocking was
+ * already in use.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE GENMASK(2, 1)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT BIT(8)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE GENMASK(11, 9)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID GENMASK(15, 12)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR GENMASK(17, 16)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT GENMASK(25, 18)
+
+struct hal_reo_flush_cache_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 rsvd0[21];
+ __le32 info1;
+} __packed;
+
+/* hal_reo_flush_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status for blocking resource handling
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - An error in the blocking resource management was detected
+ *
+ * block_error_details
+ * only valid when error_detected is set
+ *
+ * 0 - No blocking related errors found
+ * 1 - Blocking resource is already in use
+ * 2 - Resource requested to be unblocked, was not blocked
+ *
+ * cache_controller_flush_status_hit
+ * The status that the cache controller returned on executing the
+ * flush command.
+ *
+ * 0 - miss; 1 - hit
+ *
+ * cache_controller_flush_status_desc_type
+ * Flush descriptor type
+ *
+ * cache_controller_flush_status_client_id
+ * Module who made the flush request
+ *
+ * In REO, this is always 0
+ *
+ * cache_controller_flush_status_error
+ * Error condition
+ *
+ * 0 - No error found
+ * 1 - HW interface is still busy
+ * 2 - Line currently locked. Used for one line flush command
+ * 3 - At least one line is still locked.
+ * Used for cache flush command.
+ *
+ * cache_controller_flush_count
+ * The number of lines that were actually flushed out
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE BIT(1)
+
+struct hal_reo_unblock_cache_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 rsvd0[21];
+ __le32 info1;
+} __packed;
+
+/* hal_reo_unblock_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - The blocking resource was not in use, and therefore it could
+ * not be unblocked.
+ *
+ * unblock_type
+ * Reference to the type of unblock command
+ * 0 - Unblock a blocking resource
+ * 1 - The entire cache usage is unblock
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY BIT(1)
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT GENMASK(15, 0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT GENMASK(31, 16)
+
+struct hal_reo_flush_timeout_list_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 info1;
+ __le32 rsvd0[20];
+ __le32 info2;
+} __packed;
+
+/* hal_reo_flush_timeout_list_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - Command not properly executed and returned with error
+ *
+ * timeout_list_empty
+ * When set, REO has depleted the timeout list and all entries are
+ * gone.
+ *
+ * release_desc_count
+ * Producer: SW; Consumer: REO
+ * The number of link descriptor released
+ *
+ * forward_buf_count
+ * Producer: SW; Consumer: REO
+ * The number of buffers forwarded to the REO destination rings
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX GENMASK(1, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(25, 0)
+
+struct hal_reo_desc_thresh_reached_status {
+ struct hal_reo_status_hdr hdr;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 rsvd0[17];
+ __le32 info5;
+} __packed;
+
+/* hal_reo_desc_thresh_reached_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * threshold_index
+ * The index of the threshold register whose value got reached
+ *
+ * link_descriptor_counter0
+ * link_descriptor_counter1
+ * link_descriptor_counter2
+ * link_descriptor_counter_sum
+ * Value of the respective counters at generation of this message
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_DATA_LENGTH GENMASK(13, 0)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_L4_CSUM_STATUS BIT(14)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_L3_CSUM_STATUS BIT(15)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_PID GENMASK(27, 24)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_QDISC BIT(28)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_MULTICAST BIT(29)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_MORE BIT(30)
+#define HAL_TCL_ENTRANCE_FROM_PPE_RING_INFO0_VALID_TOGGLE BIT(31)
+
+struct hal_tcl_entrance_from_ppe_ring {
+ __le32 buffer_addr;
+ __le32 info0;
+} __packed;
+
+struct hal_mon_buf_ring {
+ __le32 paddr_lo;
+ __le32 paddr_hi;
+ __le64 cookie;
+};
+
+/* hal_mon_buf_ring
+ * Producer : SW
+ * Consumer : Monitor
+ *
+ * paddr_lo
+ * Lower 32-bit physical address of the buffer pointer from the source ring.
+ * paddr_hi
+ * bit range 7-0 : upper 8 bit of the physical address.
+ * bit range 31-8 : reserved.
+ * cookie
+ * Consumer: RxMon/TxMon 64 bit cookie of the buffers.
+ */
+
+#define HAL_MON_DEST_COOKIE_BUF_ID GENMASK(17, 0)
+
+#define HAL_MON_DEST_INFO0_END_OFFSET GENMASK(15, 0)
+#define HAL_MON_DEST_INFO0_FLUSH_DETECTED BIT(16)
+#define HAL_MON_DEST_INFO0_END_OF_PPDU BIT(17)
+#define HAL_MON_DEST_INFO0_INITIATOR BIT(18)
+#define HAL_MON_DEST_INFO0_EMPTY_DESC BIT(19)
+#define HAL_MON_DEST_INFO0_RING_ID GENMASK(27, 20)
+#define HAL_MON_DEST_INFO0_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_mon_dest_desc {
+ __le32 cookie;
+ __le32 reserved;
+ __le32 ppdu_id;
+ __le32 info0;
+};
+
+/* hal_mon_dest_ring
+ * Producer : TxMon/RxMon
+ * Consumer : SW
+ * cookie
+ * bit 0 -17 buf_id to track the skb's vaddr.
+ * ppdu_id
+ * Phy ppdu_id
+ * end_offset
+ * The offset into status buffer where DMA ended, ie., offset to the last
+ * TLV + last TLV size.
+ * flush_detected
+ * Indicates whether 'tx_flush' or 'rx_flush' occurred.
+ * end_of_ppdu
+ * Indicates end of ppdu.
+ * pmac_id
+ * Indicates PMAC that received from frame.
+ * empty_descriptor
+ * This descriptor is written on flush or end of ppdu or end of status
+ * buffer.
+ * ring_id
+ * updated by SRNG.
+ * looping_count
+ * updated by SRNG.
+ */
+
+#endif /* ATH12K_HAL_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.c b/drivers/net/wireless/ath/ath12k/hal_rx.c
new file mode 100644
index 000000000000..ee61a6462fdc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "debug.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "hal_rx.h"
+#include "hal_desc.h"
+#include "hif.h"
+
+static void ath12k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
+ u8 owner, u8 buffer_type, u32 magic)
+{
+ hdr->info0 = le32_encode_bits(owner, HAL_DESC_HDR_INFO0_OWNER) |
+ le32_encode_bits(buffer_type, HAL_DESC_HDR_INFO0_BUF_TYPE);
+
+ /* Magic pattern in reserved bits for debugging */
+ hdr->info0 |= le32_encode_bits(magic, HAL_DESC_HDR_INFO0_DBG_RESERVED);
+}
+
+static int ath12k_hal_reo_cmd_queue_stats(struct hal_tlv_64_hdr *tlv,
+ struct ath12k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_get_queue_stats *desc;
+
+ tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
+ u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ memset_startat(desc, 0, queue_addr_lo);
+
+ desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+
+ desc->queue_addr_lo = cpu_to_le32(cmd->addr_lo);
+ desc->info0 = le32_encode_bits(cmd->addr_hi,
+ HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI);
+ if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
+ desc->info0 |= cpu_to_le32(HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS);
+
+ return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
+}
+
+static int ath12k_hal_reo_cmd_flush_cache(struct ath12k_hal *hal,
+ struct hal_tlv_64_hdr *tlv,
+ struct ath12k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_flush_cache *desc;
+ u8 avail_slot = ffz(hal->avail_blk_resource);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
+ return -ENOSPC;
+
+ hal->current_blk_index = avail_slot;
+ }
+
+ tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
+ u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+
+ desc = (struct hal_reo_flush_cache *)tlv->value;
+ memset_startat(desc, 0, cache_addr_lo);
+
+ desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+
+ desc->cache_addr_lo = cpu_to_le32(cmd->addr_lo);
+ desc->info0 = le32_encode_bits(cmd->addr_hi,
+ HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
+ desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE);
+ desc->info0 |=
+ le32_encode_bits(avail_slot,
+ HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX);
+ }
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
+ desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
+ desc->info0 |= cpu_to_le32(HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL);
+
+ return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
+}
+
+static int ath12k_hal_reo_cmd_update_rx_queue(struct hal_tlv_64_hdr *tlv,
+ struct ath12k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_update_rx_queue *desc;
+
+ tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
+ u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+
+ desc = (struct hal_reo_update_rx_queue *)tlv->value;
+ memset_startat(desc, 0, queue_addr_lo);
+
+ desc->cmd.info0 &= ~cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= cpu_to_le32(HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED);
+
+ desc->queue_addr_lo = cpu_to_le32(cmd->addr_lo);
+ desc->info0 =
+ le32_encode_bits(cmd->addr_hi,
+ HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_AC),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID) |
+ le32_encode_bits(!!(cmd->upd0 & HAL_REO_CMD_UPD0_PN),
+ HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN);
+
+ desc->info1 =
+ le32_encode_bits(cmd->rx_queue_num,
+ HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD),
+ HAL_REO_UPD_RX_QUEUE_INFO1_VLD) |
+ le32_encode_bits(u32_get_bits(cmd->upd1, HAL_REO_CMD_UPD1_ALDC),
+ HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION),
+ HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN),
+ HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN) |
+ le32_encode_bits(u32_get_bits(cmd->upd1, HAL_REO_CMD_UPD1_AC),
+ HAL_REO_UPD_RX_QUEUE_INFO1_AC) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR),
+ HAL_REO_UPD_RX_QUEUE_INFO1_BAR) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE),
+ HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY),
+ HAL_REO_UPD_RX_QUEUE_INFO1_RETRY) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE),
+ HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK),
+ HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN),
+ HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN),
+ HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE),
+ HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE) |
+ le32_encode_bits(!!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG),
+ HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG);
+
+ if (cmd->pn_size == 24)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
+ else if (cmd->pn_size == 48)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
+ else if (cmd->pn_size == 128)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
+
+ if (cmd->ba_window_size < 1)
+ cmd->ba_window_size = 1;
+
+ if (cmd->ba_window_size == 1)
+ cmd->ba_window_size++;
+
+ desc->info2 =
+ le32_encode_bits(cmd->ba_window_size - 1,
+ HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE) |
+ le32_encode_bits(cmd->pn_size, HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE) |
+ le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD),
+ HAL_REO_UPD_RX_QUEUE_INFO2_SVLD) |
+ le32_encode_bits(u32_get_bits(cmd->upd2, HAL_REO_CMD_UPD2_SSN),
+ HAL_REO_UPD_RX_QUEUE_INFO2_SSN) |
+ le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR),
+ HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR) |
+ le32_encode_bits(!!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR),
+ HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR);
+
+ return le32_get_bits(desc->cmd.info0, HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
+}
+
+int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd)
+{
+ struct hal_tlv_64_hdr *reo_desc;
+ int ret;
+
+ spin_lock_bh(&srng->lock);
+
+ ath12k_hal_srng_access_begin(ab, srng);
+ reo_desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_desc) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ switch (type) {
+ case HAL_REO_CMD_GET_QUEUE_STATS:
+ ret = ath12k_hal_reo_cmd_queue_stats(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_CACHE:
+ ret = ath12k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_UPDATE_RX_QUEUE:
+ ret = ath12k_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_QUEUE:
+ case HAL_REO_CMD_UNBLOCK_CACHE:
+ case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
+ ath12k_warn(ab, "Unsupported reo command %d\n", type);
+ ret = -ENOTSUPP;
+ break;
+ default:
+ ath12k_warn(ab, "Unknown reo command %d\n", type);
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ ath12k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
+ dma_addr_t paddr, u32 cookie, u8 manager)
+{
+ u32 paddr_lo, paddr_hi;
+
+ paddr_lo = lower_32_bits(paddr);
+ paddr_hi = upper_32_bits(paddr);
+ binfo->info0 = le32_encode_bits(paddr_lo, BUFFER_ADDR_INFO0_ADDR);
+ binfo->info1 = le32_encode_bits(paddr_hi, BUFFER_ADDR_INFO1_ADDR) |
+ le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE) |
+ le32_encode_bits(manager, BUFFER_ADDR_INFO1_RET_BUF_MGR);
+}
+
+void ath12k_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo,
+ dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm)
+{
+ *paddr = (((u64)le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_ADDR)) << 32) |
+ le32_get_bits(binfo->info0, BUFFER_ADDR_INFO0_ADDR);
+ *cookie = le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_SW_COOKIE);
+ *rbm = le32_get_bits(binfo->info1, BUFFER_ADDR_INFO1_RET_BUF_MGR);
+}
+
+void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm)
+{
+ struct hal_rx_msdu_details *msdu;
+ u32 val;
+ int i;
+
+ *num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ msdu = &link->msdu_link[0];
+ *rbm = le32_get_bits(msdu->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_RET_BUF_MGR);
+
+ for (i = 0; i < *num_msdus; i++) {
+ msdu = &link->msdu_link[i];
+
+ val = le32_get_bits(msdu->buf_addr_info.info0,
+ BUFFER_ADDR_INFO0_ADDR);
+ if (val == 0) {
+ *num_msdus = i;
+ break;
+ }
+ *msdu_cookies = le32_get_bits(msdu->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+ msdu_cookies++;
+ }
+}
+
+int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
+ struct hal_reo_dest_ring *desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ enum hal_reo_dest_ring_push_reason push_reason;
+ enum hal_reo_dest_ring_error_code err_code;
+ u32 cookie, val;
+
+ push_reason = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_PUSH_REASON);
+ err_code = le32_get_bits(desc->info0,
+ HAL_REO_DEST_RING_INFO0_ERROR_CODE);
+ ab->soc_stats.reo_error[err_code]++;
+
+ if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
+ push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ ath12k_warn(ab, "expected error push reason code, received %d\n",
+ push_reason);
+ return -EINVAL;
+ }
+
+ val = le32_get_bits(desc->info0, HAL_REO_DEST_RING_INFO0_BUFFER_TYPE);
+ if (val != HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
+ ath12k_warn(ab, "expected buffer type link_desc");
+ return -EINVAL;
+ }
+
+ ath12k_hal_rx_reo_ent_paddr_get(ab, &desc->buf_addr_info, paddr, &cookie);
+ *desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
+
+ return 0;
+}
+
+int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info)
+{
+ struct hal_wbm_release_ring *wbm_desc = desc;
+ struct hal_wbm_release_ring_cc_rx *wbm_cc_desc = desc;
+ enum hal_wbm_rel_desc_type type;
+ enum hal_wbm_rel_src_module rel_src;
+ bool hw_cc_done;
+ u64 desc_va;
+ u32 val;
+
+ type = le32_get_bits(wbm_desc->info0, HAL_WBM_RELEASE_INFO0_DESC_TYPE);
+ /* We expect only WBM_REL buffer type */
+ if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rel_src = le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE);
+ if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
+ rel_src != HAL_WBM_REL_SRC_MODULE_REO)
+ return -EINVAL;
+
+ /* The format of wbm rel ring desc changes based on the
+ * hw cookie conversion status
+ */
+ hw_cc_done = le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_RX_INFO0_CC_STATUS);
+
+ if (!hw_cc_done) {
+ val = le32_get_bits(wbm_desc->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_RET_BUF_MGR);
+ if (val != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ return -EINVAL;
+ }
+
+ rel_info->cookie = le32_get_bits(wbm_desc->buf_addr_info.info1,
+ BUFFER_ADDR_INFO1_SW_COOKIE);
+
+ rel_info->rx_desc = NULL;
+ } else {
+ val = le32_get_bits(wbm_cc_desc->info0,
+ HAL_WBM_RELEASE_RX_CC_INFO0_RBM);
+ if (val != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ return -EINVAL;
+ }
+
+ rel_info->cookie = le32_get_bits(wbm_cc_desc->info1,
+ HAL_WBM_RELEASE_RX_CC_INFO1_COOKIE);
+
+ desc_va = ((u64)le32_to_cpu(wbm_cc_desc->buf_va_hi) << 32 |
+ le32_to_cpu(wbm_cc_desc->buf_va_lo));
+ rel_info->rx_desc =
+ (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
+ }
+
+ rel_info->err_rel_src = rel_src;
+ rel_info->hw_cc_done = hw_cc_done;
+
+ rel_info->first_msdu = le32_get_bits(wbm_desc->info3,
+ HAL_WBM_RELEASE_INFO3_FIRST_MSDU);
+ rel_info->last_msdu = le32_get_bits(wbm_desc->info3,
+ HAL_WBM_RELEASE_INFO3_LAST_MSDU);
+ rel_info->continuation = le32_get_bits(wbm_desc->info3,
+ HAL_WBM_RELEASE_INFO3_CONTINUATION);
+
+ if (rel_info->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
+ rel_info->push_reason =
+ le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON);
+ rel_info->err_code =
+ le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE);
+ } else {
+ rel_info->push_reason =
+ le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON);
+ rel_info->err_code =
+ le32_get_bits(wbm_desc->info0,
+ HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE);
+ }
+
+ return 0;
+}
+
+void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
+ struct ath12k_buffer_addr *buff_addr,
+ dma_addr_t *paddr, u32 *cookie)
+{
+ *paddr = ((u64)(le32_get_bits(buff_addr->info1,
+ BUFFER_ADDR_INFO1_ADDR)) << 32) |
+ le32_get_bits(buff_addr->info0, BUFFER_ADDR_INFO0_ADDR);
+
+ *cookie = le32_get_bits(buff_addr->info1, BUFFER_ADDR_INFO1_SW_COOKIE);
+}
+
+void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
+ struct hal_wbm_release_ring *dst_desc,
+ struct hal_wbm_release_ring *src_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ dst_desc->buf_addr_info = src_desc->buf_addr_info;
+ dst_desc->info0 |= le32_encode_bits(HAL_WBM_REL_SRC_MODULE_SW,
+ HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE) |
+ le32_encode_bits(action, HAL_WBM_RELEASE_INFO0_BM_ACTION) |
+ le32_encode_bits(HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_RELEASE_INFO0_DESC_TYPE);
+}
+
+void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct hal_reo_get_queue_stats_status *desc =
+ (struct hal_reo_get_queue_stats_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "Queue stats status:\n");
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "header: cmd_num %d status %d\n",
+ status->uniform_hdr.cmd_num,
+ status->uniform_hdr.cmd_status);
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "ssn %u cur_idx %u\n",
+ le32_get_bits(desc->info0,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN),
+ le32_get_bits(desc->info0,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX));
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
+ desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+ desc->last_rx_enqueue_timestamp,
+ desc->last_rx_dequeue_timestamp);
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+ desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
+ desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
+ desc->rx_bitmap[6], desc->rx_bitmap[7]);
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "count: cur_mpdu %u cur_msdu %u\n",
+ le32_get_bits(desc->info1,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT),
+ le32_get_bits(desc->info1,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT));
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "fwd_timeout %u fwd_bar %u dup_count %u\n",
+ le32_get_bits(desc->info2,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT),
+ le32_get_bits(desc->info2,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT),
+ le32_get_bits(desc->info2,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT));
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "frames_in_order %u bar_rcvd %u\n",
+ le32_get_bits(desc->info3,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT),
+ le32_get_bits(desc->info3,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT));
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
+ desc->num_mpdu_frames, desc->num_msdu_frames,
+ desc->total_bytes);
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "late_rcvd %u win_jump_2k %u hole_cnt %u\n",
+ le32_get_bits(desc->info4,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU),
+ le32_get_bits(desc->info2,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_WINDOW_JMP2K),
+ le32_get_bits(desc->info4,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT));
+ ath12k_dbg(ab, ATH12K_DBG_HAL, "looping count %u\n",
+ le32_get_bits(desc->info5,
+ HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT));
+}
+
+void ath12k_hal_reo_flush_queue_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct hal_reo_flush_queue_status *desc =
+ (struct hal_reo_flush_queue_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+ status->u.flush_queue.err_detected =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED);
+}
+
+void ath12k_hal_reo_flush_cache_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_reo_flush_cache_status *desc =
+ (struct hal_reo_flush_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+
+ status->u.flush_cache.err_detected =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR);
+ status->u.flush_cache.err_code =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE);
+ if (!status->u.flush_cache.err_code)
+ hal->avail_blk_resource |= BIT(hal->current_blk_index);
+
+ status->u.flush_cache.cache_controller_flush_status_hit =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT);
+
+ status->u.flush_cache.cache_controller_flush_status_desc_type =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE);
+ status->u.flush_cache.cache_controller_flush_status_client_id =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID);
+ status->u.flush_cache.cache_controller_flush_status_err =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR);
+ status->u.flush_cache.cache_controller_flush_status_cnt =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT);
+}
+
+void ath12k_hal_reo_unblk_cache_status(struct ath12k_base *ab, struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct ath12k_hal *hal = &ab->hal;
+ struct hal_reo_unblock_cache_status *desc =
+ (struct hal_reo_unblock_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+
+ status->u.unblock_cache.err_detected =
+ le32_get_bits(desc->info0,
+ HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR);
+ status->u.unblock_cache.unblock_type =
+ le32_get_bits(desc->info0,
+ HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE);
+
+ if (!status->u.unblock_cache.err_detected &&
+ status->u.unblock_cache.unblock_type ==
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
+ hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
+}
+
+void ath12k_hal_reo_flush_timeout_list_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct hal_reo_flush_timeout_list_status *desc =
+ (struct hal_reo_flush_timeout_list_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+
+ status->u.timeout_list.err_detected =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR);
+ status->u.timeout_list.list_empty =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY);
+
+ status->u.timeout_list.release_desc_cnt =
+ le32_get_bits(desc->info1,
+ HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT);
+ status->u.timeout_list.fwd_buf_cnt =
+ le32_get_bits(desc->info0,
+ HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT);
+}
+
+void ath12k_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct hal_reo_desc_thresh_reached_status *desc =
+ (struct hal_reo_desc_thresh_reached_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->hdr.info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+
+ status->u.desc_thresh_reached.threshold_idx =
+ le32_get_bits(desc->info0,
+ HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX);
+
+ status->u.desc_thresh_reached.link_desc_counter0 =
+ le32_get_bits(desc->info1,
+ HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0);
+
+ status->u.desc_thresh_reached.link_desc_counter1 =
+ le32_get_bits(desc->info2,
+ HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1);
+
+ status->u.desc_thresh_reached.link_desc_counter2 =
+ le32_get_bits(desc->info3,
+ HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2);
+
+ status->u.desc_thresh_reached.link_desc_counter_sum =
+ le32_get_bits(desc->info4,
+ HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM);
+}
+
+void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status)
+{
+ struct hal_reo_status_hdr *desc =
+ (struct hal_reo_status_hdr *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ le32_get_bits(desc->info0,
+ HAL_REO_STATUS_HDR_INFO0_STATUS_NUM);
+ status->uniform_hdr.cmd_status =
+ le32_get_bits(desc->info0,
+ HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS);
+}
+
+u32 ath12k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
+{
+ u32 num_ext_desc;
+
+ if (ba_window_size <= 1) {
+ if (tid != HAL_DESC_REO_NON_QOS_TID)
+ num_ext_desc = 1;
+ else
+ num_ext_desc = 0;
+ } else if (ba_window_size <= 105) {
+ num_ext_desc = 1;
+ } else if (ba_window_size <= 210) {
+ num_ext_desc = 2;
+ } else {
+ num_ext_desc = 3;
+ }
+
+ return sizeof(struct hal_rx_reo_queue) +
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+}
+
+void ath12k_hal_reo_qdesc_setup(struct hal_rx_reo_queue *qdesc,
+ int tid, u32 ba_window_size,
+ u32 start_seq, enum hal_pn_type type)
+{
+ struct hal_rx_reo_queue_ext *ext_desc;
+
+ memset(qdesc, 0, sizeof(*qdesc));
+
+ ath12k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
+
+ qdesc->rx_queue_num = le32_encode_bits(tid, HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER);
+
+ qdesc->info0 =
+ le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_VLD) |
+ le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER) |
+ le32_encode_bits(ath12k_tid_to_ac(tid), HAL_RX_REO_QUEUE_INFO0_AC);
+
+ if (ba_window_size < 1)
+ ba_window_size = 1;
+
+ if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
+ ba_window_size++;
+
+ if (ba_window_size == 1)
+ qdesc->info0 |= le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_RETRY);
+
+ qdesc->info0 |= le32_encode_bits(ba_window_size - 1,
+ HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE);
+ switch (type) {
+ case HAL_PN_TYPE_NONE:
+ case HAL_PN_TYPE_WAPI_EVEN:
+ case HAL_PN_TYPE_WAPI_UNEVEN:
+ break;
+ case HAL_PN_TYPE_WPA:
+ qdesc->info0 |=
+ le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_PN_CHECK) |
+ le32_encode_bits(HAL_RX_REO_QUEUE_PN_SIZE_48,
+ HAL_RX_REO_QUEUE_INFO0_PN_SIZE);
+ break;
+ }
+
+ /* TODO: Set Ignore ampdu flags based on BA window size and/or
+ * AMPDU capabilities
+ */
+ qdesc->info0 |= le32_encode_bits(1, HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG);
+
+ qdesc->info1 |= le32_encode_bits(0, HAL_RX_REO_QUEUE_INFO1_SVLD);
+
+ if (start_seq <= 0xfff)
+ qdesc->info1 = le32_encode_bits(start_seq,
+ HAL_RX_REO_QUEUE_INFO1_SSN);
+
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ return;
+
+ ext_desc = qdesc->ext_desc;
+
+ /* TODO: HW queue descriptors are currently allocated for max BA
+ * window size for all QOS TIDs so that same descriptor can be used
+ * later when ADDBA request is received. This should be changed to
+ * allocate HW queue descriptors based on BA window size being
+ * negotiated (0 for non BA cases), and reallocate when BA window
+ * size changes and also send WMI message to FW to change the REO
+ * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
+ */
+ memset(ext_desc, 0, 3 * sizeof(*ext_desc));
+ ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
+ ext_desc++;
+ ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
+ ext_desc++;
+ ath12k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
+}
+
+void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab,
+ struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_64_hdr *tlv;
+ struct hal_reo_get_queue_stats *desc;
+ int i, cmd_num = 1;
+ int entry_size;
+ u8 *entry;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath12k_hal_srng_get_entrysize(ab, HAL_REO_CMD);
+ ath12k_hal_srng_get_params(ab, srng, &params);
+ entry = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_64_hdr *)entry;
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ desc->cmd.info0 = le32_encode_bits(cmd_num++,
+ HAL_REO_CMD_HDR_INFO0_CMD_NUMBER);
+ entry += entry_size;
+ }
+}
+
+void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+
+ val = ath12k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val |= u32_encode_bits(1, HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE) |
+ u32_encode_bits(1, HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ val = ath12k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(ab));
+
+ val &= ~(HAL_REO1_MISC_CTL_FRAG_DST_RING |
+ HAL_REO1_MISC_CTL_BAR_DST_RING);
+ val |= u32_encode_bits(HAL_SRNG_RING_ID_REO2SW0,
+ HAL_REO1_MISC_CTL_FRAG_DST_RING);
+ val |= u32_encode_bits(HAL_SRNG_RING_ID_REO2SW0,
+ HAL_REO1_MISC_CTL_BAR_DST_RING);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTRL_ADDR(ab), val);
+
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_BE_BK_VI_REO_TIMEOUT_USEC);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_VO_REO_TIMEOUT_USEC);
+
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ ring_hash_map);
+ ath12k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ ring_hash_map);
+}
diff --git a/drivers/net/wireless/ath/ath12k/hal_rx.h b/drivers/net/wireless/ath/ath12k/hal_rx.h
new file mode 100644
index 000000000000..fcfb6c819047
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal_rx.h
@@ -0,0 +1,704 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HAL_RX_H
+#define ATH12K_HAL_RX_H
+
+struct hal_rx_wbm_rel_info {
+ u32 cookie;
+ enum hal_wbm_rel_src_module err_rel_src;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 err_code;
+ bool first_msdu;
+ bool last_msdu;
+ bool continuation;
+ void *rx_desc;
+ bool hw_cc_done;
+};
+
+#define HAL_INVALID_PEERID 0xffff
+#define VHT_SIG_SU_NSS_MASK 0x7
+
+#define HAL_RX_MAX_MCS 12
+#define HAL_RX_MAX_NSS 8
+
+#define HAL_RX_MPDU_INFO_PN_GET_BYTE1(__val) \
+ le32_get_bits((__val), GENMASK(7, 0))
+
+#define HAL_RX_MPDU_INFO_PN_GET_BYTE2(__val) \
+ le32_get_bits((__val), GENMASK(15, 8))
+
+#define HAL_RX_MPDU_INFO_PN_GET_BYTE3(__val) \
+ le32_get_bits((__val), GENMASK(23, 16))
+
+#define HAL_RX_MPDU_INFO_PN_GET_BYTE4(__val) \
+ le32_get_bits((__val), GENMASK(31, 24))
+
+struct hal_rx_mon_status_tlv_hdr {
+ u32 hdr;
+ u8 value[];
+};
+
+enum hal_rx_su_mu_coding {
+ HAL_RX_SU_MU_CODING_BCC,
+ HAL_RX_SU_MU_CODING_LDPC,
+ HAL_RX_SU_MU_CODING_MAX,
+};
+
+enum hal_rx_gi {
+ HAL_RX_GI_0_8_US,
+ HAL_RX_GI_0_4_US,
+ HAL_RX_GI_1_6_US,
+ HAL_RX_GI_3_2_US,
+ HAL_RX_GI_MAX,
+};
+
+enum hal_rx_bw {
+ HAL_RX_BW_20MHZ,
+ HAL_RX_BW_40MHZ,
+ HAL_RX_BW_80MHZ,
+ HAL_RX_BW_160MHZ,
+ HAL_RX_BW_MAX,
+};
+
+enum hal_rx_preamble {
+ HAL_RX_PREAMBLE_11A,
+ HAL_RX_PREAMBLE_11B,
+ HAL_RX_PREAMBLE_11N,
+ HAL_RX_PREAMBLE_11AC,
+ HAL_RX_PREAMBLE_11AX,
+ HAL_RX_PREAMBLE_MAX,
+};
+
+enum hal_rx_reception_type {
+ HAL_RX_RECEPTION_TYPE_SU,
+ HAL_RX_RECEPTION_TYPE_MU_MIMO,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO,
+ HAL_RX_RECEPTION_TYPE_MAX,
+};
+
+enum hal_rx_legacy_rate {
+ HAL_RX_LEGACY_RATE_1_MBPS,
+ HAL_RX_LEGACY_RATE_2_MBPS,
+ HAL_RX_LEGACY_RATE_5_5_MBPS,
+ HAL_RX_LEGACY_RATE_6_MBPS,
+ HAL_RX_LEGACY_RATE_9_MBPS,
+ HAL_RX_LEGACY_RATE_11_MBPS,
+ HAL_RX_LEGACY_RATE_12_MBPS,
+ HAL_RX_LEGACY_RATE_18_MBPS,
+ HAL_RX_LEGACY_RATE_24_MBPS,
+ HAL_RX_LEGACY_RATE_36_MBPS,
+ HAL_RX_LEGACY_RATE_48_MBPS,
+ HAL_RX_LEGACY_RATE_54_MBPS,
+ HAL_RX_LEGACY_RATE_INVALID,
+};
+
+#define HAL_TLV_STATUS_PPDU_NOT_DONE 0
+#define HAL_TLV_STATUS_PPDU_DONE 1
+#define HAL_TLV_STATUS_BUF_DONE 2
+#define HAL_TLV_STATUS_PPDU_NON_STD_DONE 3
+#define HAL_RX_FCS_LEN 4
+
+enum hal_rx_mon_status {
+ HAL_RX_MON_STATUS_PPDU_NOT_DONE,
+ HAL_RX_MON_STATUS_PPDU_DONE,
+ HAL_RX_MON_STATUS_BUF_DONE,
+};
+
+#define HAL_RX_MAX_MPDU 256
+#define HAL_RX_NUM_WORDS_PER_PPDU_BITMAP (HAL_RX_MAX_MPDU >> 5)
+
+struct hal_rx_user_status {
+ u32 mcs:4,
+ nss:3,
+ ofdma_info_valid:1,
+ ul_ofdma_ru_start_index:7,
+ ul_ofdma_ru_width:7,
+ ul_ofdma_ru_size:8;
+ u32 ul_ofdma_user_v0_word0;
+ u32 ul_ofdma_user_v0_word1;
+ u32 ast_index;
+ u32 tid;
+ u16 tcp_msdu_count;
+ u16 tcp_ack_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 frame_control;
+ u8 frame_control_info_valid;
+ u8 data_sequence_control_info_valid;
+ u16 first_data_seq_ctrl;
+ u32 preamble_type;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u8 rs_flags;
+ u8 ldpc;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
+ u32 mpdu_ok_byte_count;
+ u32 mpdu_err_byte_count;
+};
+
+#define HAL_MAX_UL_MU_USERS 37
+
+struct hal_rx_mon_ppdu_info {
+ u32 ppdu_id;
+ u32 last_ppdu_id;
+ u64 ppdu_ts;
+ u32 num_mpdu_fcs_ok;
+ u32 num_mpdu_fcs_err;
+ u32 preamble_type;
+ u32 mpdu_len;
+ u16 chan_num;
+ u16 tcp_msdu_count;
+ u16 tcp_ack_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 peer_id;
+ u8 rate;
+ u8 mcs;
+ u8 nss;
+ u8 bw;
+ u8 vht_flag_values1;
+ u8 vht_flag_values2;
+ u8 vht_flag_values3[4];
+ u8 vht_flag_values4;
+ u8 vht_flag_values5;
+ u16 vht_flag_values6;
+ u8 is_stbc;
+ u8 gi;
+ u8 sgi;
+ u8 ldpc;
+ u8 beamformed;
+ u8 rssi_comb;
+ u16 tid;
+ u8 fc_valid;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u16 he_mu_flags;
+ u8 dcm;
+ u8 ru_alloc;
+ u8 reception_type;
+ u64 tsft;
+ u64 rx_duration;
+ u16 frame_control;
+ u32 ast_index;
+ u8 rs_fcs_err;
+ u8 rs_flags;
+ u8 cck_flag;
+ u8 ofdm_flag;
+ u8 ulofdma_flag;
+ u8 frame_control_info_valid;
+ u16 he_per_user_1;
+ u16 he_per_user_2;
+ u8 he_per_user_position;
+ u8 he_per_user_known;
+ u16 he_flags1;
+ u16 he_flags2;
+ u8 he_RU[4];
+ u16 he_data1;
+ u16 he_data2;
+ u16 he_data3;
+ u16 he_data4;
+ u16 he_data5;
+ u16 he_data6;
+ u32 ppdu_len;
+ u32 prev_ppdu_id;
+ u32 device_id;
+ u16 first_data_seq_ctrl;
+ u8 monitor_direct_used;
+ u8 data_sequence_control_info_valid;
+ u8 ltf_size;
+ u8 rxpcu_filter_pass;
+ s8 rssi_chain[8][8];
+ u32 num_users;
+ u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u8 addr4[ETH_ALEN];
+ struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS];
+ u8 userid;
+ u16 ampdu_id[HAL_MAX_UL_MU_USERS];
+ bool first_msdu_in_mpdu;
+ bool is_ampdu;
+ u8 medium_prot_type;
+};
+
+#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
+
+struct hal_rx_ppdu_start {
+ __le32 info0;
+ __le32 chan_num;
+ __le32 ppdu_start_ts;
+} __packed;
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO3_QOS_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_OK_BYTE_COUNT GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_MPDU_DELIM_ERR_BYTE_COUNT GENMASK(24, 0)
+
+struct hal_rx_ppdu_end_user_stats {
+ __le32 rsvd0[2];
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 ht_ctrl;
+ __le32 rsvd1[2];
+ __le32 info4;
+ __le32 info5;
+ __le32 usr_resp_ref;
+ __le32 info6;
+ __le32 rsvd3[4];
+ __le32 mpdu_ok_cnt;
+ __le32 rsvd4;
+ __le32 mpdu_err_cnt;
+ __le32 rsvd5[2];
+ __le32 usr_resp_ref_ext;
+ __le32 rsvd6;
+} __packed;
+
+struct hal_rx_ppdu_end_user_stats_ext {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+} __packed;
+
+#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
+#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
+
+#define HAL_RX_HT_SIG_INFO_INFO1_STBC GENMASK(5, 4)
+#define HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING BIT(6)
+#define HAL_RX_HT_SIG_INFO_INFO1_GI BIT(7)
+
+struct hal_rx_ht_sig_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_LSIG_B_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_B_INFO_INFO0_LEN GENMASK(15, 4)
+
+struct hal_rx_lsig_b_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_LSIG_A_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_A_INFO_INFO0_LEN GENMASK(16, 5)
+#define HAL_RX_LSIG_A_INFO_INFO0_PKT_TYPE GENMASK(27, 24)
+
+struct hal_rx_lsig_a_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_BW GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_STBC BIT(3)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID GENMASK(9, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS GENMASK(21, 10)
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING BIT(2)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_MCS GENMASK(7, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED BIT(8)
+
+struct hal_rx_vht_sig_a_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+enum hal_rx_vht_sig_a_gi_setting {
+ HAL_RX_VHT_SIG_A_NORMAL_GI = 0,
+ HAL_RX_VHT_SIG_A_SHORT_GI = 1,
+ HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
+};
+
+#define HE_GI_0_8 0
+#define HE_GI_0_4 1
+#define HE_GI_1_6 2
+#define HE_GI_3_2 3
+
+#define HE_LTF_1_X 0
+#define HE_LTF_2_X 1
+#define HE_LTF_4_X 2
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR GENMASK(13, 8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE GENMASK(18, 15)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND BIT(0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE BIT(1)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG BIT(2)
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA BIT(8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR GENMASK(12, 11)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM BIT(13)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND BIT(15)
+
+struct hal_rx_he_sig_a_su_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_UL_FLAG BIT(1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_MCS_OF_SIGB GENMASK(3, 1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_DCM_OF_SIGB BIT(4)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_BSS_COLOR GENMASK(10, 5)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_SPATIAL_REUSE GENMASK(14, 11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_TRANSMIT_BW GENMASK(17, 15)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_NUM_SIGB_SYMB GENMASK(21, 18)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_COMP_MODE_SIGB BIT(22)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_CP_LTF_SIZE GENMASK(24, 23)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION BIT(25)
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA BIT(11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC BIT(12)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM BIT(15)
+
+struct hal_rx_he_sig_a_mu_dl_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION GENMASK(7, 0)
+
+struct hal_rx_he_sig_b1_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
+
+struct hal_rx_he_sig_b2_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20)
+
+struct hal_rx_he_sig_b2_ofdma_info {
+ __le32 info0;
+} __packed;
+
+enum hal_rx_ul_reception_type {
+ HAL_RECEPTION_TYPE_ULOFMDA,
+ HAL_RECEPTION_TYPE_ULMIMO,
+ HAL_RECEPTION_TYPE_OTHER,
+ HAL_RECEPTION_TYPE_FRAMELESS
+};
+
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB GENMASK(15, 8)
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_RSVD1_RECEPTION GENMASK(3, 0)
+
+struct hal_rx_phyrx_rssi_legacy_info {
+ __le32 rsvd[35];
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_MPDU_START_INFO0_PPDU_ID GENMASK(31, 16)
+#define HAL_RX_MPDU_START_INFO1_PEERID GENMASK(31, 16)
+#define HAL_RX_MPDU_START_INFO2_MPDU_LEN GENMASK(13, 0)
+struct hal_rx_mpdu_start {
+ __le32 info0;
+ __le32 info1;
+ __le32 rsvd1[11];
+ __le32 info2;
+ __le32 rsvd2[9];
+} __packed;
+
+#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
+struct hal_rx_ppdu_end_duration {
+ __le32 rsvd0[9];
+ __le32 info0;
+ __le32 rsvd1[4];
+} __packed;
+
+struct hal_rx_rxpcu_classification_overview {
+ u32 rsvd0;
+} __packed;
+
+struct hal_rx_msdu_desc_info {
+ u32 msdu_flags;
+ u16 msdu_len; /* 14 bits for length */
+};
+
+#define HAL_RX_NUM_MSDU_DESC 6
+struct hal_rx_msdu_list {
+ struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
+ u32 sw_cookie[HAL_RX_NUM_MSDU_DESC];
+ u8 rbm[HAL_RX_NUM_MSDU_DESC];
+};
+
+#define HAL_RX_FBM_ACK_INFO0_ADDR1_31_0 GENMASK(31, 0)
+#define HAL_RX_FBM_ACK_INFO1_ADDR1_47_32 GENMASK(15, 0)
+#define HAL_RX_FBM_ACK_INFO1_ADDR2_15_0 GENMASK(31, 16)
+#define HAL_RX_FBM_ACK_INFO2_ADDR2_47_16 GENMASK(31, 0)
+
+struct hal_rx_frame_bitmap_ack {
+ __le32 reserved;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 reserved1[10];
+} __packed;
+
+#define HAL_RX_RESP_REQ_INFO0_PPDU_ID GENMASK(15, 0)
+#define HAL_RX_RESP_REQ_INFO0_RECEPTION_TYPE BIT(16)
+#define HAL_RX_RESP_REQ_INFO1_DURATION GENMASK(15, 0)
+#define HAL_RX_RESP_REQ_INFO1_RATE_MCS GENMASK(24, 21)
+#define HAL_RX_RESP_REQ_INFO1_SGI GENMASK(26, 25)
+#define HAL_RX_RESP_REQ_INFO1_STBC BIT(27)
+#define HAL_RX_RESP_REQ_INFO1_LDPC BIT(28)
+#define HAL_RX_RESP_REQ_INFO1_IS_AMPDU BIT(29)
+#define HAL_RX_RESP_REQ_INFO2_NUM_USER GENMASK(6, 0)
+#define HAL_RX_RESP_REQ_INFO3_ADDR1_31_0 GENMASK(31, 0)
+#define HAL_RX_RESP_REQ_INFO4_ADDR1_47_32 GENMASK(15, 0)
+#define HAL_RX_RESP_REQ_INFO4_ADDR1_15_0 GENMASK(31, 16)
+#define HAL_RX_RESP_REQ_INFO5_ADDR1_47_16 GENMASK(31, 0)
+
+struct hal_rx_resp_req_info {
+ __le32 info0;
+ __le32 reserved[1];
+ __le32 info1;
+ __le32 info2;
+ __le32 reserved1[2];
+ __le32 info3;
+ __le32 info4;
+ __le32 info5;
+ __le32 reserved2[5];
+} __packed;
+
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF
+
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VALID BIT(30)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W0_VER BIT(31)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_NSS GENMASK(2, 0)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_MCS GENMASK(6, 3)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_LDPC BIT(7)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_DCM BIT(8)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_START GENMASK(15, 9)
+#define HAL_RX_UL_OFDMA_USER_INFO_V0_W1_RU_SIZE GENMASK(18, 16)
+
+/* HE Radiotap data1 Mask */
+#define HE_SU_FORMAT_TYPE 0x0000
+#define HE_EXT_SU_FORMAT_TYPE 0x0001
+#define HE_MU_FORMAT_TYPE 0x0002
+#define HE_TRIG_FORMAT_TYPE 0x0003
+#define HE_BEAM_CHANGE_KNOWN 0x0008
+#define HE_DL_UL_KNOWN 0x0010
+#define HE_MCS_KNOWN 0x0020
+#define HE_DCM_KNOWN 0x0040
+#define HE_CODING_KNOWN 0x0080
+#define HE_LDPC_EXTRA_SYMBOL_KNOWN 0x0100
+#define HE_STBC_KNOWN 0x0200
+#define HE_DATA_BW_RU_KNOWN 0x4000
+#define HE_DOPPLER_KNOWN 0x8000
+#define HE_BSS_COLOR_KNOWN 0x0004
+
+/* HE Radiotap data2 Mask */
+#define HE_GI_KNOWN 0x0002
+#define HE_TXBF_KNOWN 0x0010
+#define HE_PE_DISAMBIGUITY_KNOWN 0x0020
+#define HE_TXOP_KNOWN 0x0040
+#define HE_LTF_SYMBOLS_KNOWN 0x0004
+#define HE_PRE_FEC_PADDING_KNOWN 0x0008
+#define HE_MIDABLE_PERIODICITY_KNOWN 0x0080
+
+/* HE radiotap data3 shift values */
+#define HE_BEAM_CHANGE_SHIFT 6
+#define HE_DL_UL_SHIFT 7
+#define HE_TRANSMIT_MCS_SHIFT 8
+#define HE_DCM_SHIFT 12
+#define HE_CODING_SHIFT 13
+#define HE_LDPC_EXTRA_SYMBOL_SHIFT 14
+#define HE_STBC_SHIFT 15
+
+/* HE radiotap data4 shift values */
+#define HE_STA_ID_SHIFT 4
+
+/* HE radiotap data5 */
+#define HE_GI_SHIFT 4
+#define HE_LTF_SIZE_SHIFT 6
+#define HE_LTF_SYM_SHIFT 8
+#define HE_TXBF_SHIFT 14
+#define HE_PE_DISAMBIGUITY_SHIFT 15
+#define HE_PRE_FEC_PAD_SHIFT 12
+
+/* HE radiotap data6 */
+#define HE_DOPPLER_SHIFT 4
+#define HE_TXOP_SHIFT 8
+
+/* HE radiotap HE-MU flags1 */
+#define HE_SIG_B_MCS_KNOWN 0x0010
+#define HE_SIG_B_DCM_KNOWN 0x0040
+#define HE_SIG_B_SYM_NUM_KNOWN 0x8000
+#define HE_RU_0_KNOWN 0x0100
+#define HE_RU_1_KNOWN 0x0200
+#define HE_RU_2_KNOWN 0x0400
+#define HE_RU_3_KNOWN 0x0800
+#define HE_DCM_FLAG_1_SHIFT 5
+#define HE_SPATIAL_REUSE_MU_KNOWN 0x0100
+#define HE_SIG_B_COMPRESSION_FLAG_1_KNOWN 0x4000
+
+/* HE radiotap HE-MU flags2 */
+#define HE_SIG_B_COMPRESSION_FLAG_2_SHIFT 3
+#define HE_BW_KNOWN 0x0004
+#define HE_NUM_SIG_B_SYMBOLS_SHIFT 4
+#define HE_SIG_B_COMPRESSION_FLAG_2_KNOWN 0x0100
+#define HE_NUM_SIG_B_FLAG_2_SHIFT 9
+#define HE_LTF_FLAG_2_SYMBOLS_SHIFT 12
+#define HE_LTF_KNOWN 0x8000
+
+/* HE radiotap per_user_1 */
+#define HE_STA_SPATIAL_SHIFT 11
+#define HE_TXBF_SHIFT 14
+#define HE_RESERVED_SET_TO_1_SHIFT 19
+#define HE_STA_CODING_SHIFT 20
+
+/* HE radiotap per_user_2 */
+#define HE_STA_MCS_SHIFT 4
+#define HE_STA_DCM_SHIFT 5
+
+/* HE radiotap per user known */
+#define HE_USER_FIELD_POSITION_KNOWN 0x01
+#define HE_STA_ID_PER_USER_KNOWN 0x02
+#define HE_STA_NSTS_KNOWN 0x04
+#define HE_STA_TX_BF_KNOWN 0x08
+#define HE_STA_SPATIAL_CONFIG_KNOWN 0x10
+#define HE_STA_MCS_KNOWN 0x20
+#define HE_STA_DCM_KNOWN 0x40
+#define HE_STA_CODING_KNOWN 0x80
+
+#define HAL_RX_MPDU_ERR_FCS BIT(0)
+#define HAL_RX_MPDU_ERR_DECRYPT BIT(1)
+#define HAL_RX_MPDU_ERR_TKIP_MIC BIT(2)
+#define HAL_RX_MPDU_ERR_AMSDU_ERR BIT(3)
+#define HAL_RX_MPDU_ERR_OVERFLOW BIT(4)
+#define HAL_RX_MPDU_ERR_MSDU_LEN BIT(5)
+#define HAL_RX_MPDU_ERR_MPDU_LEN BIT(6)
+#define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
+
+static inline
+enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
+{
+ enum nl80211_he_ru_alloc ret;
+
+ switch (ru_tones) {
+ case RU_52:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ break;
+ case RU_106:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ case RU_242:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ case RU_484:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case RU_996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case RU_26:
+ fallthrough;
+ default:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ }
+ return ret;
+}
+
+void ath12k_hal_reo_status_queue_stats(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_flush_queue_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_flush_cache_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_unblk_cache_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_flush_timeout_list_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_desc_thresh_reached_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_reo_update_rx_reo_queue_status(struct ath12k_base *ab,
+ struct hal_tlv_64_hdr *tlv,
+ struct hal_reo_status *status);
+void ath12k_hal_rx_msdu_link_info_get(struct hal_rx_msdu_link *link, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm);
+void ath12k_hal_rx_msdu_link_desc_set(struct ath12k_base *ab,
+ struct hal_wbm_release_ring *dst_desc,
+ struct hal_wbm_release_ring *src_desc,
+ enum hal_wbm_rel_bm_act action);
+void ath12k_hal_rx_buf_addr_info_set(struct ath12k_buffer_addr *binfo,
+ dma_addr_t paddr, u32 cookie, u8 manager);
+void ath12k_hal_rx_buf_addr_info_get(struct ath12k_buffer_addr *binfo,
+ dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm);
+int ath12k_hal_desc_reo_parse_err(struct ath12k_base *ab,
+ struct hal_reo_dest_ring *desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+int ath12k_hal_wbm_desc_parse_err(struct ath12k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info);
+void ath12k_hal_rx_reo_ent_paddr_get(struct ath12k_base *ab,
+ struct ath12k_buffer_addr *buff_addr,
+ dma_addr_t *paddr, u32 *cookie);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.c b/drivers/net/wireless/ath/ath12k/hal_tx.c
new file mode 100644
index 000000000000..869e07e406fe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal_tx.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "hal_desc.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "hif.h"
+
+#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
+
+/* dscp_tid_map - Default DSCP-TID mapping
+ *=================
+ * DSCP TID
+ *=================
+ * 000xxx 0
+ * 001xxx 1
+ * 010xxx 2
+ * 011xxx 3
+ * 100xxx 4
+ * 101xxx 5
+ * 110xxx 6
+ * 111xxx 7
+ */
+static inline u8 dscp2tid(u8 dscp)
+{
+ return dscp >> 3;
+}
+
+void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd,
+ struct hal_tx_info *ti)
+{
+ tcl_cmd->buf_addr_info.info0 =
+ le32_encode_bits(ti->paddr, BUFFER_ADDR_INFO0_ADDR);
+ tcl_cmd->buf_addr_info.info1 =
+ le32_encode_bits(((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT),
+ BUFFER_ADDR_INFO1_ADDR);
+ tcl_cmd->buf_addr_info.info1 |=
+ le32_encode_bits((ti->rbm_id), BUFFER_ADDR_INFO1_RET_BUF_MGR) |
+ le32_encode_bits(ti->desc_id, BUFFER_ADDR_INFO1_SW_COOKIE);
+
+ tcl_cmd->info0 =
+ le32_encode_bits(ti->type, HAL_TCL_DATA_CMD_INFO0_DESC_TYPE) |
+ le32_encode_bits(ti->bank_id, HAL_TCL_DATA_CMD_INFO0_BANK_ID);
+
+ tcl_cmd->info1 =
+ le32_encode_bits(ti->meta_data_flags,
+ HAL_TCL_DATA_CMD_INFO1_CMD_NUM);
+
+ tcl_cmd->info2 = cpu_to_le32(ti->flags0) |
+ le32_encode_bits(ti->data_len, HAL_TCL_DATA_CMD_INFO2_DATA_LEN) |
+ le32_encode_bits(ti->pkt_offset, HAL_TCL_DATA_CMD_INFO2_PKT_OFFSET);
+
+ tcl_cmd->info3 = cpu_to_le32(ti->flags1) |
+ le32_encode_bits(ti->tid, HAL_TCL_DATA_CMD_INFO3_TID) |
+ le32_encode_bits(ti->lmac_id, HAL_TCL_DATA_CMD_INFO3_PMAC_ID) |
+ le32_encode_bits(ti->vdev_id, HAL_TCL_DATA_CMD_INFO3_VDEV_ID);
+
+ tcl_cmd->info4 = le32_encode_bits(ti->bss_ast_idx,
+ HAL_TCL_DATA_CMD_INFO4_SEARCH_INDEX) |
+ le32_encode_bits(ti->bss_ast_hash,
+ HAL_TCL_DATA_CMD_INFO4_CACHE_SET_NUM);
+ tcl_cmd->info5 = 0;
+}
+
+void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id)
+{
+ u32 ctrl_reg_val;
+ u32 addr;
+ u8 hw_map_val[HAL_DSCP_TID_TBL_SIZE], dscp, tid;
+ int i;
+ u32 value;
+
+ ctrl_reg_val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ /* Enable read/write access */
+ ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
+
+ addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
+ (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
+
+ /* Configure each DSCP-TID mapping in three bits there by configure
+ * three bytes in an iteration.
+ */
+ for (i = 0, dscp = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 3) {
+ tid = dscp2tid(dscp);
+ value = u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP0);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP1);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP2);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP3);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP4);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP5);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP6);
+ dscp++;
+
+ tid = dscp2tid(dscp);
+ value |= u32_encode_bits(tid, HAL_TCL1_RING_FIELD_DSCP_TID_MAP7);
+ dscp++;
+
+ memcpy(&hw_map_val[i], &value, 3);
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
+ ath12k_hif_write32(ab, addr, *(u32 *)&hw_map_val[i]);
+ addr += 4;
+ }
+
+ /* Disable read/write access */
+ ctrl_reg_val = ath12k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG,
+ ctrl_reg_val);
+}
+
+void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab, u32 bank_config,
+ u8 bank_id)
+{
+ ath12k_hif_write32(ab, HAL_TCL_SW_CONFIG_BANK_ADDR + 4 * bank_id,
+ bank_config);
+}
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h
new file mode 100644
index 000000000000..7c837094a6f7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hal_tx.h
@@ -0,0 +1,194 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HAL_TX_H
+#define ATH12K_HAL_TX_H
+
+#include "hal_desc.h"
+#include "core.h"
+
+#define HAL_TX_ADDRX_EN 1
+#define HAL_TX_ADDRY_EN 2
+
+#define HAL_TX_ADDR_SEARCH_DEFAULT 0
+#define HAL_TX_ADDR_SEARCH_INDEX 1
+
+/* TODO: check all these data can be managed with struct ath12k_tx_desc_info for perf */
+struct hal_tx_info {
+ u16 meta_data_flags; /* %HAL_TCL_DATA_CMD_INFO0_META_ */
+ u8 ring_id;
+ u8 rbm_id;
+ u32 desc_id;
+ enum hal_tcl_desc_type type;
+ enum hal_tcl_encap_type encap_type;
+ dma_addr_t paddr;
+ u32 data_len;
+ u32 pkt_offset;
+ enum hal_encrypt_type encrypt_type;
+ u32 flags0; /* %HAL_TCL_DATA_CMD_INFO1_ */
+ u32 flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
+ u16 addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
+ u16 bss_ast_hash;
+ u16 bss_ast_idx;
+ u8 tid;
+ u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */
+ u8 lmac_id;
+ u8 vdev_id;
+ u8 dscp_tid_tbl_idx;
+ bool enable_mesh;
+ int bank_id;
+};
+
+/* TODO: Check if the actual desc macros can be used instead */
+#define HAL_TX_STATUS_FLAGS_FIRST_MSDU BIT(0)
+#define HAL_TX_STATUS_FLAGS_LAST_MSDU BIT(1)
+#define HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU BIT(2)
+#define HAL_TX_STATUS_FLAGS_RATE_STATS_VALID BIT(3)
+#define HAL_TX_STATUS_FLAGS_RATE_LDPC BIT(4)
+#define HAL_TX_STATUS_FLAGS_RATE_STBC BIT(5)
+#define HAL_TX_STATUS_FLAGS_OFDMA BIT(6)
+
+#define HAL_TX_STATUS_DESC_LEN sizeof(struct hal_wbm_release_ring)
+
+/* Tx status parsed from srng desc */
+struct hal_tx_status {
+ enum hal_wbm_rel_src_module buf_rel_source;
+ enum hal_wbm_tqm_rel_reason status;
+ u8 ack_rssi;
+ u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
+ u32 ppdu_id;
+ u8 try_cnt;
+ u8 tid;
+ u16 peer_id;
+ u32 rate_stats;
+};
+
+#define HAL_TX_PHY_DESC_INFO0_BF_TYPE GENMASK(17, 16)
+#define HAL_TX_PHY_DESC_INFO0_PREAMBLE_11B BIT(20)
+#define HAL_TX_PHY_DESC_INFO0_PKT_TYPE GENMASK(24, 21)
+#define HAL_TX_PHY_DESC_INFO0_BANDWIDTH GENMASK(30, 28)
+#define HAL_TX_PHY_DESC_INFO1_MCS GENMASK(3, 0)
+#define HAL_TX_PHY_DESC_INFO1_STBC BIT(6)
+#define HAL_TX_PHY_DESC_INFO2_NSS GENMASK(23, 21)
+#define HAL_TX_PHY_DESC_INFO3_AP_PKT_BW GENMASK(6, 4)
+#define HAL_TX_PHY_DESC_INFO3_LTF_SIZE GENMASK(20, 19)
+#define HAL_TX_PHY_DESC_INFO3_ACTIVE_CHANNEL GENMASK(17, 15)
+
+struct hal_tx_phy_desc {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+} __packed;
+
+#define HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_15_0 GENMASK(15, 0)
+#define HAL_TX_FES_STAT_PROT_INFO0_STRT_FRM_TS_31_16 GENMASK(31, 16)
+#define HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_15_0 GENMASK(15, 0)
+#define HAL_TX_FES_STAT_PROT_INFO1_END_FRM_TS_31_16 GENMASK(31, 16)
+
+struct hal_tx_fes_status_prot {
+ __le64 reserved;
+ __le32 info0;
+ __le32 info1;
+ __le32 reserved1[11];
+} __packed;
+
+#define HAL_TX_FES_STAT_USR_PPDU_INFO0_DURATION GENMASK(15, 0)
+
+struct hal_tx_fes_status_user_ppdu {
+ __le64 reserved;
+ __le32 info0;
+ __le32 reserved1[3];
+} __packed;
+
+#define HAL_TX_FES_STAT_STRT_INFO0_PROT_TS_LOWER_32 GENMASK(31, 0)
+#define HAL_TX_FES_STAT_STRT_INFO1_PROT_TS_UPPER_32 GENMASK(31, 0)
+
+struct hal_tx_fes_status_start_prot {
+ __le32 info0;
+ __le32 info1;
+ __le64 reserved;
+} __packed;
+
+#define HAL_TX_FES_STATUS_START_INFO0_MEDIUM_PROT_TYPE GENMASK(29, 27)
+
+struct hal_tx_fes_status_start {
+ __le32 reserved;
+ __le32 info0;
+ __le64 reserved1;
+} __packed;
+
+#define HAL_TX_Q_EXT_INFO0_FRAME_CTRL GENMASK(15, 0)
+#define HAL_TX_Q_EXT_INFO0_QOS_CTRL GENMASK(31, 16)
+#define HAL_TX_Q_EXT_INFO1_AMPDU_FLAG BIT(0)
+
+struct hal_tx_queue_exten {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_TX_FES_SETUP_INFO0_NUM_OF_USERS GENMASK(28, 23)
+
+struct hal_tx_fes_setup {
+ __le32 schedule_id;
+ __le32 info0;
+ __le64 reserved;
+} __packed;
+
+#define HAL_TX_PPDU_SETUP_INFO0_MEDIUM_PROT_TYPE GENMASK(2, 0)
+#define HAL_TX_PPDU_SETUP_INFO1_PROT_FRAME_ADDR1_31_0 GENMASK(31, 0)
+#define HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR1_47_32 GENMASK(15, 0)
+#define HAL_TX_PPDU_SETUP_INFO2_PROT_FRAME_ADDR2_15_0 GENMASK(31, 16)
+#define HAL_TX_PPDU_SETUP_INFO3_PROT_FRAME_ADDR2_47_16 GENMASK(31, 0)
+#define HAL_TX_PPDU_SETUP_INFO4_PROT_FRAME_ADDR3_31_0 GENMASK(31, 0)
+#define HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR3_47_32 GENMASK(15, 0)
+#define HAL_TX_PPDU_SETUP_INFO5_PROT_FRAME_ADDR4_15_0 GENMASK(31, 16)
+#define HAL_TX_PPDU_SETUP_INFO6_PROT_FRAME_ADDR4_47_16 GENMASK(31, 0)
+
+struct hal_tx_pcu_ppdu_setup_init {
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 reserved;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+} __packed;
+
+#define HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_15_0 GENMASK(15, 0)
+#define HAL_TX_FES_STATUS_END_INFO0_START_TIMESTAMP_31_16 GENMASK(31, 16)
+
+struct hal_tx_fes_status_end {
+ __le32 reserved[2];
+ __le32 info0;
+ __le32 reserved1[19];
+} __packed;
+
+#define HAL_TX_BANK_CONFIG_EPD BIT(0)
+#define HAL_TX_BANK_CONFIG_ENCAP_TYPE GENMASK(2, 1)
+#define HAL_TX_BANK_CONFIG_ENCRYPT_TYPE GENMASK(6, 3)
+#define HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP BIT(7)
+#define HAL_TX_BANK_CONFIG_LINK_META_SWAP BIT(8)
+#define HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN BIT(9)
+#define HAL_TX_BANK_CONFIG_ADDRX_EN BIT(10)
+#define HAL_TX_BANK_CONFIG_ADDRY_EN BIT(11)
+#define HAL_TX_BANK_CONFIG_MESH_EN GENMASK(13, 12)
+#define HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN BIT(14)
+#define HAL_TX_BANK_CONFIG_PMAC_ID GENMASK(16, 15)
+/* STA mode will have MCAST_PKT_CTRL instead of DSCP_TID_MAP bitfield */
+#define HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID GENMASK(22, 17)
+
+void ath12k_hal_tx_cmd_desc_setup(struct ath12k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd,
+ struct hal_tx_info *ti);
+void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id);
+int ath12k_hal_reo_cmd_send(struct ath12k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath12k_hal_reo_cmd *cmd);
+void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab, u32 bank_config,
+ u8 bank_id);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h
new file mode 100644
index 000000000000..54490cdb63a1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hif.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HIF_H
+#define ATH12K_HIF_H
+
+#include "core.h"
+
+struct ath12k_hif_ops {
+ u32 (*read32)(struct ath12k_base *sc, u32 address);
+ void (*write32)(struct ath12k_base *sc, u32 address, u32 data);
+ void (*irq_enable)(struct ath12k_base *sc);
+ void (*irq_disable)(struct ath12k_base *sc);
+ int (*start)(struct ath12k_base *sc);
+ void (*stop)(struct ath12k_base *sc);
+ int (*power_up)(struct ath12k_base *sc);
+ void (*power_down)(struct ath12k_base *sc);
+ int (*suspend)(struct ath12k_base *ab);
+ int (*resume)(struct ath12k_base *ab);
+ int (*map_service_to_pipe)(struct ath12k_base *sc, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+ int (*get_user_msi_vector)(struct ath12k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+ void (*get_msi_address)(struct ath12k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
+ void (*ce_irq_enable)(struct ath12k_base *ab);
+ void (*ce_irq_disable)(struct ath12k_base *ab);
+ void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx);
+};
+
+static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ return ab->hif.ops->map_service_to_pipe(ab, service_id,
+ ul_pipe, dl_pipe);
+}
+
+static inline int ath12k_hif_get_user_msi_vector(struct ath12k_base *ab,
+ char *user_name,
+ int *num_vectors,
+ u32 *user_base_data,
+ u32 *base_vector)
+{
+ if (!ab->hif.ops->get_user_msi_vector)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->get_user_msi_vector(ab, user_name, num_vectors,
+ user_base_data,
+ base_vector);
+}
+
+static inline void ath12k_hif_get_msi_address(struct ath12k_base *ab,
+ u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ if (!ab->hif.ops->get_msi_address)
+ return;
+
+ ab->hif.ops->get_msi_address(ab, msi_addr_lo, msi_addr_hi);
+}
+
+static inline void ath12k_hif_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id,
+ u32 *msi_data_idx)
+{
+ if (ab->hif.ops->get_ce_msi_idx)
+ ab->hif.ops->get_ce_msi_idx(ab, ce_id, msi_data_idx);
+ else
+ *msi_data_idx = ce_id;
+}
+
+static inline void ath12k_hif_ce_irq_enable(struct ath12k_base *ab)
+{
+ if (ab->hif.ops->ce_irq_enable)
+ ab->hif.ops->ce_irq_enable(ab);
+}
+
+static inline void ath12k_hif_ce_irq_disable(struct ath12k_base *ab)
+{
+ if (ab->hif.ops->ce_irq_disable)
+ ab->hif.ops->ce_irq_disable(ab);
+}
+
+static inline void ath12k_hif_irq_enable(struct ath12k_base *ab)
+{
+ ab->hif.ops->irq_enable(ab);
+}
+
+static inline void ath12k_hif_irq_disable(struct ath12k_base *ab)
+{
+ ab->hif.ops->irq_disable(ab);
+}
+
+static inline int ath12k_hif_suspend(struct ath12k_base *ab)
+{
+ if (ab->hif.ops->suspend)
+ return ab->hif.ops->suspend(ab);
+
+ return 0;
+}
+
+static inline int ath12k_hif_resume(struct ath12k_base *ab)
+{
+ if (ab->hif.ops->resume)
+ return ab->hif.ops->resume(ab);
+
+ return 0;
+}
+
+static inline int ath12k_hif_start(struct ath12k_base *ab)
+{
+ return ab->hif.ops->start(ab);
+}
+
+static inline void ath12k_hif_stop(struct ath12k_base *ab)
+{
+ ab->hif.ops->stop(ab);
+}
+
+static inline u32 ath12k_hif_read32(struct ath12k_base *ab, u32 address)
+{
+ return ab->hif.ops->read32(ab, address);
+}
+
+static inline void ath12k_hif_write32(struct ath12k_base *ab, u32 address,
+ u32 data)
+{
+ ab->hif.ops->write32(ab, address, data);
+}
+
+static inline int ath12k_hif_power_up(struct ath12k_base *ab)
+{
+ return ab->hif.ops->power_up(ab);
+}
+
+static inline void ath12k_hif_power_down(struct ath12k_base *ab)
+{
+ ab->hif.ops->power_down(ab);
+}
+
+#endif /* ATH12K_HIF_H */
diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c
new file mode 100644
index 000000000000..23f7428abd95
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/htc.c
@@ -0,0 +1,789 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+
+#include "debug.h"
+#include "hif.h"
+
+struct sk_buff *ath12k_htc_alloc_skb(struct ath12k_base *ab, int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size + sizeof(struct ath12k_htc_hdr));
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath12k_htc_hdr));
+
+ /* FW/HTC requires 4-byte aligned streams */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath12k_warn(ab, "Unaligned HTC tx skb\n");
+
+ return skb;
+}
+
+static void ath12k_htc_control_tx_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static struct sk_buff *ath12k_htc_build_tx_ctrl_skb(void)
+{
+ struct sk_buff *skb;
+ struct ath12k_skb_cb *skb_cb;
+
+ skb = dev_alloc_skb(ATH12K_HTC_CONTROL_BUFFER_SIZE);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath12k_htc_hdr));
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ skb_cb = ATH12K_SKB_CB(skb);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ return skb;
+}
+
+static void ath12k_htc_prepare_tx_skb(struct ath12k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath12k_htc_hdr *hdr;
+
+ hdr = (struct ath12k_htc_hdr *)skb->data;
+
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->htc_info = le32_encode_bits(ep->eid, HTC_HDR_ENDPOINTID) |
+ le32_encode_bits((skb->len - sizeof(*hdr)),
+ HTC_HDR_PAYLOADLEN);
+
+ if (ep->tx_credit_flow_enabled)
+ hdr->htc_info |= le32_encode_bits(ATH12K_HTC_FLAG_NEED_CREDIT_UPDATE,
+ HTC_HDR_FLAGS);
+
+ spin_lock_bh(&ep->htc->tx_lock);
+ hdr->ctrl_info = le32_encode_bits(ep->seq_no++, HTC_HDR_CONTROLBYTES1);
+ spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+int ath12k_htc_send(struct ath12k_htc *htc,
+ enum ath12k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath12k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct device *dev = htc->ab->dev;
+ struct ath12k_base *ab = htc->ab;
+ int credits = 0;
+ int ret;
+
+ if (eid >= ATH12K_HTC_EP_COUNT) {
+ ath12k_warn(ab, "Invalid endpoint id: %d\n", eid);
+ return -ENOENT;
+ }
+
+ skb_push(skb, sizeof(struct ath12k_htc_hdr));
+
+ if (ep->tx_credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ ath12k_dbg(ab, ATH12K_DBG_HTC,
+ "htc insufficient credits ep %d required %d available %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ ath12k_dbg(ab, ATH12K_DBG_HTC,
+ "htc ep %d consumed %d credits (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath12k_htc_prepare_tx_skb(ep, skb);
+
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_credits;
+ }
+
+ ret = ath12k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+err_credits:
+ if (ep->tx_credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath12k_dbg(ab, ATH12K_DBG_HTC,
+ "htc ep %d reverted %d credits back (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath12k_htc_hdr));
+ return ret;
+}
+
+static void
+ath12k_htc_process_credit_report(struct ath12k_htc *htc,
+ const struct ath12k_htc_credit_report *report,
+ int len,
+ enum ath12k_htc_ep_id eid)
+{
+ struct ath12k_base *ab = htc->ab;
+ struct ath12k_htc_ep *ep;
+ int i, n_reports;
+
+ if (len % sizeof(*report))
+ ath12k_warn(ab, "Uneven credit report len %d", len);
+
+ n_reports = len / sizeof(*report);
+
+ spin_lock_bh(&htc->tx_lock);
+ for (i = 0; i < n_reports; i++, report++) {
+ if (report->eid >= ATH12K_HTC_EP_COUNT)
+ break;
+
+ ep = &htc->endpoint[report->eid];
+ ep->tx_credits += report->credits;
+
+ ath12k_dbg(ab, ATH12K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ report->eid, report->credits, ep->tx_credits);
+
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ spin_lock_bh(&htc->tx_lock);
+ }
+ }
+ spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath12k_htc_process_trailer(struct ath12k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath12k_htc_ep_id src_eid)
+{
+ struct ath12k_base *ab = htc->ab;
+ int status = 0;
+ struct ath12k_htc_record *record;
+ size_t len;
+
+ while (length > 0) {
+ record = (struct ath12k_htc_record *)buffer;
+
+ if (length < sizeof(record->hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (record->hdr.len > length) {
+ /* no room left in buffer for record */
+ ath12k_warn(ab, "Invalid record length: %d\n",
+ record->hdr.len);
+ status = -EINVAL;
+ break;
+ }
+
+ switch (record->hdr.id) {
+ case ATH12K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath12k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath12k_warn(ab, "Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath12k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ default:
+ ath12k_warn(ab, "Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
+ break;
+ }
+
+ if (status)
+ break;
+
+ /* multiple records may be present in a trailer */
+ buffer += sizeof(record->hdr) + record->hdr.len;
+ length -= sizeof(record->hdr) + record->hdr.len;
+ }
+
+ return status;
+}
+
+static void ath12k_htc_suspend_complete(struct ath12k_base *ab, bool ack)
+{
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot suspend complete %d\n", ack);
+
+ if (ack)
+ set_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+ else
+ clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+
+ complete(&ab->htc_suspend);
+}
+
+void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ int status = 0;
+ struct ath12k_htc *htc = &ab->htc;
+ struct ath12k_htc_hdr *hdr;
+ struct ath12k_htc_ep *ep;
+ u16 payload_len;
+ u32 trailer_len = 0;
+ size_t min_len;
+ u8 eid;
+ bool trailer_present;
+
+ hdr = (struct ath12k_htc_hdr *)skb->data;
+ skb_pull(skb, sizeof(*hdr));
+
+ eid = le32_get_bits(hdr->htc_info, HTC_HDR_ENDPOINTID);
+
+ if (eid >= ATH12K_HTC_EP_COUNT) {
+ ath12k_warn(ab, "HTC Rx: invalid eid %d\n", eid);
+ goto out;
+ }
+
+ ep = &htc->endpoint[eid];
+
+ payload_len = le32_get_bits(hdr->htc_info, HTC_HDR_PAYLOADLEN);
+
+ if (payload_len + sizeof(*hdr) > ATH12K_HTC_MAX_LEN) {
+ ath12k_warn(ab, "HTC rx frame too long, len: %zu\n",
+ payload_len + sizeof(*hdr));
+ goto out;
+ }
+
+ if (skb->len < payload_len) {
+ ath12k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n",
+ skb->len, payload_len);
+ goto out;
+ }
+
+ /* get flags to check for trailer */
+ trailer_present = le32_get_bits(hdr->htc_info, HTC_HDR_FLAGS) &
+ ATH12K_HTC_FLAG_TRAILER_PRESENT;
+
+ if (trailer_present) {
+ u8 *trailer;
+
+ trailer_len = le32_get_bits(hdr->ctrl_info,
+ HTC_HDR_CONTROLBYTES0);
+ min_len = sizeof(struct ath12k_htc_record_hdr);
+
+ if ((trailer_len < min_len) ||
+ (trailer_len > payload_len)) {
+ ath12k_warn(ab, "Invalid trailer length: %d\n",
+ trailer_len);
+ goto out;
+ }
+
+ trailer = (u8 *)hdr;
+ trailer += sizeof(*hdr);
+ trailer += payload_len;
+ trailer -= trailer_len;
+ status = ath12k_htc_process_trailer(htc, trailer,
+ trailer_len, eid);
+ if (status)
+ goto out;
+
+ skb_trim(skb, skb->len - trailer_len);
+ }
+
+ if (trailer_len >= payload_len)
+ /* zero length packet with trailer data, just drop these */
+ goto out;
+
+ if (eid == ATH12K_HTC_EP_0) {
+ struct ath12k_htc_msg *msg = (struct ath12k_htc_msg *)skb->data;
+
+ switch (le32_get_bits(msg->msg_svc_id, HTC_MSG_MESSAGEID)) {
+ case ATH12K_HTC_MSG_READY_ID:
+ case ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /* this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath12k_warn(ab, "HTC rx ctrl still processing\n");
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH12K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ case ATH12K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+ ath12k_htc_suspend_complete(ab, true);
+ break;
+ case ATH12K_HTC_MSG_NACK_SUSPEND:
+ ath12k_htc_suspend_complete(ab, false);
+ break;
+ case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
+ break;
+ default:
+ ath12k_warn(ab, "ignoring unsolicited htc ep0 event %u\n",
+ le32_get_bits(msg->msg_svc_id, HTC_MSG_MESSAGEID));
+ break;
+ }
+ goto out;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ eid, skb);
+ ep->ep_ops.ep_rx_complete(ab, skb);
+
+ /* poll tx completion for interrupt disabled CE's */
+ ath12k_ce_poll_send_completed(ab, ep->ul_pipe_id);
+
+ /* skb is now owned by the rx completion handler */
+ skb = NULL;
+out:
+ kfree_skb(skb);
+}
+
+static void ath12k_htc_control_rx_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ /* This is unexpected. FW is not supposed to send regular rx on this
+ * endpoint.
+ */
+ ath12k_warn(ab, "unexpected htc rx\n");
+ kfree_skb(skb);
+}
+
+static const char *htc_service_name(enum ath12k_htc_svc_id id)
+{
+ switch (id) {
+ case ATH12K_HTC_SVC_ID_RESERVED:
+ return "Reserved";
+ case ATH12K_HTC_SVC_ID_RSVD_CTRL:
+ return "Control";
+ case ATH12K_HTC_SVC_ID_WMI_CONTROL:
+ return "WMI";
+ case ATH12K_HTC_SVC_ID_WMI_DATA_BE:
+ return "DATA BE";
+ case ATH12K_HTC_SVC_ID_WMI_DATA_BK:
+ return "DATA BK";
+ case ATH12K_HTC_SVC_ID_WMI_DATA_VI:
+ return "DATA VI";
+ case ATH12K_HTC_SVC_ID_WMI_DATA_VO:
+ return "DATA VO";
+ case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1:
+ return "WMI MAC1";
+ case ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2:
+ return "WMI MAC2";
+ case ATH12K_HTC_SVC_ID_NMI_CONTROL:
+ return "NMI Control";
+ case ATH12K_HTC_SVC_ID_NMI_DATA:
+ return "NMI Data";
+ case ATH12K_HTC_SVC_ID_HTT_DATA_MSG:
+ return "HTT Data";
+ case ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ return "RAW";
+ case ATH12K_HTC_SVC_ID_IPA_TX:
+ return "IPA TX";
+ case ATH12K_HTC_SVC_ID_PKT_LOG:
+ return "PKT LOG";
+ case ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG:
+ return "WMI DIAG";
+ }
+
+ return "Unknown";
+}
+
+static void ath12k_htc_reset_endpoint_states(struct ath12k_htc *htc)
+{
+ struct ath12k_htc_ep *ep;
+ int i;
+
+ for (i = ATH12K_HTC_EP_0; i < ATH12K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ep->service_id = ATH12K_HTC_SVC_ID_UNUSED;
+ ep->max_ep_message_len = 0;
+ ep->max_tx_queue_depth = 0;
+ ep->eid = i;
+ ep->htc = htc;
+ ep->tx_credit_flow_enabled = true;
+ }
+}
+
+static u8 ath12k_htc_get_credit_allocation(struct ath12k_htc *htc,
+ u16 service_id)
+{
+ struct ath12k_htc_svc_tx_credits *serv_entry;
+ u8 i, allocation = 0;
+
+ serv_entry = htc->service_alloc_table;
+
+ for (i = 0; i < ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
+ if (serv_entry[i].service_id == service_id) {
+ allocation = serv_entry[i].credit_allocation;
+ break;
+ }
+ }
+
+ return allocation;
+}
+
+static int ath12k_htc_setup_target_buffer_assignments(struct ath12k_htc *htc)
+{
+ struct ath12k_htc_svc_tx_credits *serv_entry;
+ static const u32 svc_id[] = {
+ ATH12K_HTC_SVC_ID_WMI_CONTROL,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2,
+ };
+ int i, credits;
+
+ credits = htc->total_transmit_credits;
+ serv_entry = htc->service_alloc_table;
+
+ if ((htc->wmi_ep_count == 0) ||
+ (htc->wmi_ep_count > ARRAY_SIZE(svc_id)))
+ return -EINVAL;
+
+ /* Divide credits among number of endpoints for WMI */
+ credits = credits / htc->wmi_ep_count;
+ for (i = 0; i < htc->wmi_ep_count; i++) {
+ serv_entry[i].service_id = svc_id[i];
+ serv_entry[i].credit_allocation = credits;
+ }
+
+ return 0;
+}
+
+int ath12k_htc_wait_target(struct ath12k_htc *htc)
+{
+ int i, status = 0;
+ struct ath12k_base *ab = htc->ab;
+ unsigned long time_left;
+ struct ath12k_htc_ready *ready;
+ u16 message_id;
+ u16 credit_count;
+ u16 credit_size;
+
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH12K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
+ ath12k_warn(ab, "failed to receive control response completion, polling..\n");
+
+ for (i = 0; i < ab->hw_params->ce_count; i++)
+ ath12k_ce_per_engine_service(htc->ab, i);
+
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH12K_HTC_WAIT_TIMEOUT_HZ);
+
+ if (!time_left)
+ status = -ETIMEDOUT;
+ }
+
+ if (status < 0) {
+ ath12k_warn(ab, "ctl_resp never came in (%d)\n", status);
+ return status;
+ }
+
+ if (htc->control_resp_len < sizeof(*ready)) {
+ ath12k_warn(ab, "Invalid HTC ready msg len:%d\n",
+ htc->control_resp_len);
+ return -ECOMM;
+ }
+
+ ready = (struct ath12k_htc_ready *)htc->control_resp_buffer;
+ message_id = le32_get_bits(ready->id_credit_count, HTC_MSG_MESSAGEID);
+ credit_count = le32_get_bits(ready->id_credit_count,
+ HTC_READY_MSG_CREDITCOUNT);
+ credit_size = le32_get_bits(ready->size_ep, HTC_READY_MSG_CREDITSIZE);
+
+ if (message_id != ATH12K_HTC_MSG_READY_ID) {
+ ath12k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id);
+ return -ECOMM;
+ }
+
+ htc->total_transmit_credits = credit_count;
+ htc->target_credit_size = credit_size;
+
+ ath12k_dbg(ab, ATH12K_DBG_HTC,
+ "Target ready! transmit resources: %d size:%d\n",
+ htc->total_transmit_credits, htc->target_credit_size);
+
+ if ((htc->total_transmit_credits == 0) ||
+ (htc->target_credit_size == 0)) {
+ ath12k_warn(ab, "Invalid credit size received\n");
+ return -ECOMM;
+ }
+
+ ath12k_htc_setup_target_buffer_assignments(htc);
+
+ return 0;
+}
+
+int ath12k_htc_connect_service(struct ath12k_htc *htc,
+ struct ath12k_htc_svc_conn_req *conn_req,
+ struct ath12k_htc_svc_conn_resp *conn_resp)
+{
+ struct ath12k_base *ab = htc->ab;
+ struct ath12k_htc_conn_svc *req_msg;
+ struct ath12k_htc_conn_svc_resp resp_msg_dummy;
+ struct ath12k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
+ enum ath12k_htc_ep_id assigned_eid = ATH12K_HTC_EP_COUNT;
+ struct ath12k_htc_ep *ep;
+ struct sk_buff *skb;
+ unsigned int max_msg_size = 0;
+ int length, status;
+ unsigned long time_left;
+ bool disable_credit_flow_ctrl = false;
+ u16 message_id, service_id, flags = 0;
+ u8 tx_alloc = 0;
+
+ /* special case for HTC pseudo control service */
+ if (conn_req->service_id == ATH12K_HTC_SVC_ID_RSVD_CTRL) {
+ disable_credit_flow_ctrl = true;
+ assigned_eid = ATH12K_HTC_EP_0;
+ max_msg_size = ATH12K_HTC_MAX_CTRL_MSG_LEN;
+ memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+ goto setup;
+ }
+
+ tx_alloc = ath12k_htc_get_credit_allocation(htc,
+ conn_req->service_id);
+ if (!tx_alloc)
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
+
+ skb = ath12k_htc_build_tx_ctrl_skb();
+ if (!skb) {
+ ath12k_warn(ab, "Failed to allocate HTC packet\n");
+ return -ENOMEM;
+ }
+
+ length = sizeof(*req_msg);
+ skb_put(skb, length);
+ memset(skb->data, 0, length);
+
+ req_msg = (struct ath12k_htc_conn_svc *)skb->data;
+ req_msg->msg_svc_id = le32_encode_bits(ATH12K_HTC_MSG_CONNECT_SERVICE_ID,
+ HTC_MSG_MESSAGEID);
+
+ flags |= u32_encode_bits(tx_alloc, ATH12K_HTC_CONN_FLAGS_RECV_ALLOC);
+
+ /* Only enable credit flow control for WMI ctrl service */
+ if (!(conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL ||
+ conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
+ conn_req->service_id == ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
+ flags |= ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ req_msg->flags_len = le32_encode_bits(flags, HTC_SVC_MSG_CONNECTIONFLAGS);
+ req_msg->msg_svc_id |= le32_encode_bits(conn_req->service_id,
+ HTC_SVC_MSG_SERVICE_ID);
+
+ reinit_completion(&htc->ctl_resp);
+
+ status = ath12k_htc_send(htc, ATH12K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ /* wait for response */
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH12K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath12k_err(ab, "Service connect timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* we controlled the buffer creation, it's aligned */
+ resp_msg = (struct ath12k_htc_conn_svc_resp *)htc->control_resp_buffer;
+ message_id = le32_get_bits(resp_msg->msg_svc_id, HTC_MSG_MESSAGEID);
+ service_id = le32_get_bits(resp_msg->msg_svc_id,
+ HTC_SVC_RESP_MSG_SERVICEID);
+
+ if ((message_id != ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+ (htc->control_resp_len < sizeof(*resp_msg))) {
+ ath12k_err(ab, "Invalid resp message ID 0x%x", message_id);
+ return -EPROTO;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_HTC,
+ "HTC Service %s connect response: status: %u, assigned ep: %u\n",
+ htc_service_name(service_id),
+ le32_get_bits(resp_msg->flags_len, HTC_SVC_RESP_MSG_STATUS),
+ le32_get_bits(resp_msg->flags_len, HTC_SVC_RESP_MSG_ENDPOINTID));
+
+ conn_resp->connect_resp_code = le32_get_bits(resp_msg->flags_len,
+ HTC_SVC_RESP_MSG_STATUS);
+
+ /* check response status */
+ if (conn_resp->connect_resp_code != ATH12K_HTC_CONN_SVC_STATUS_SUCCESS) {
+ ath12k_err(ab, "HTC Service %s connect request failed: 0x%x)\n",
+ htc_service_name(service_id),
+ conn_resp->connect_resp_code);
+ return -EPROTO;
+ }
+
+ assigned_eid = le32_get_bits(resp_msg->flags_len,
+ HTC_SVC_RESP_MSG_ENDPOINTID);
+
+ max_msg_size = le32_get_bits(resp_msg->flags_len,
+ HTC_SVC_RESP_MSG_MAXMSGSIZE);
+
+setup:
+
+ if (assigned_eid >= ATH12K_HTC_EP_COUNT)
+ return -EPROTO;
+
+ if (max_msg_size == 0)
+ return -EPROTO;
+
+ ep = &htc->endpoint[assigned_eid];
+ ep->eid = assigned_eid;
+
+ if (ep->service_id != ATH12K_HTC_SVC_ID_UNUSED)
+ return -EPROTO;
+
+ /* return assigned endpoint to caller */
+ conn_resp->eid = assigned_eid;
+ conn_resp->max_msg_len = le32_get_bits(resp_msg->flags_len,
+ HTC_SVC_RESP_MSG_MAXMSGSIZE);
+
+ /* setup the endpoint */
+ ep->service_id = conn_req->service_id;
+ ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+ ep->max_ep_message_len = le32_get_bits(resp_msg->flags_len,
+ HTC_SVC_RESP_MSG_MAXMSGSIZE);
+ ep->tx_credits = tx_alloc;
+
+ /* copy all the callbacks */
+ ep->ep_ops = conn_req->ep_ops;
+
+ status = ath12k_hif_map_service_to_pipe(htc->ab,
+ ep->service_id,
+ &ep->ul_pipe_id,
+ &ep->dl_pipe_id);
+ if (status)
+ return status;
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
+ ep->dl_pipe_id, ep->eid);
+
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+ ep->tx_credit_flow_enabled = false;
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
+ htc_service_name(ep->service_id), assigned_eid);
+ }
+
+ return status;
+}
+
+int ath12k_htc_start(struct ath12k_htc *htc)
+{
+ struct sk_buff *skb;
+ int status;
+ struct ath12k_base *ab = htc->ab;
+ struct ath12k_htc_setup_complete_extended *msg;
+
+ skb = ath12k_htc_build_tx_ctrl_skb();
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(*msg));
+ memset(skb->data, 0, skb->len);
+
+ msg = (struct ath12k_htc_setup_complete_extended *)skb->data;
+ msg->msg_id = le32_encode_bits(ATH12K_HTC_MSG_SETUP_COMPLETE_EX_ID,
+ HTC_MSG_MESSAGEID);
+
+ ath12k_dbg(ab, ATH12K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+ status = ath12k_htc_send(htc, ATH12K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ return 0;
+}
+
+int ath12k_htc_init(struct ath12k_base *ab)
+{
+ struct ath12k_htc *htc = &ab->htc;
+ struct ath12k_htc_svc_conn_req conn_req = { };
+ struct ath12k_htc_svc_conn_resp conn_resp = { };
+ int ret;
+
+ spin_lock_init(&htc->tx_lock);
+
+ ath12k_htc_reset_endpoint_states(htc);
+
+ htc->ab = ab;
+
+ switch (ab->wmi_ab.preferred_hw_mode) {
+ case WMI_HOST_HW_MODE_SINGLE:
+ htc->wmi_ep_count = 1;
+ break;
+ case WMI_HOST_HW_MODE_DBS:
+ case WMI_HOST_HW_MODE_DBS_OR_SBS:
+ htc->wmi_ep_count = 2;
+ break;
+ case WMI_HOST_HW_MODE_DBS_SBS:
+ htc->wmi_ep_count = 3;
+ break;
+ default:
+ htc->wmi_ep_count = ab->hw_params->max_radios;
+ break;
+ }
+
+ /* setup our pseudo HTC control endpoint connection */
+ conn_req.ep_ops.ep_tx_complete = ath12k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath12k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH12K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH12K_HTC_SVC_ID_RSVD_CTRL;
+
+ /* connect fake service */
+ ret = ath12k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (ret) {
+ ath12k_err(ab, "could not connect to htc service (%d)\n", ret);
+ return ret;
+ }
+
+ init_completion(&htc->ctl_resp);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/htc.h b/drivers/net/wireless/ath/ath12k/htc.h
new file mode 100644
index 000000000000..7e3dccc7cc14
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/htc.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HTC_H
+#define ATH12K_HTC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct ath12k_base;
+
+#define HTC_HDR_ENDPOINTID GENMASK(7, 0)
+#define HTC_HDR_FLAGS GENMASK(15, 8)
+#define HTC_HDR_PAYLOADLEN GENMASK(31, 16)
+#define HTC_HDR_CONTROLBYTES0 GENMASK(7, 0)
+#define HTC_HDR_CONTROLBYTES1 GENMASK(15, 8)
+#define HTC_HDR_RESERVED GENMASK(31, 16)
+
+#define HTC_SVC_MSG_SERVICE_ID GENMASK(31, 16)
+#define HTC_SVC_MSG_CONNECTIONFLAGS GENMASK(15, 0)
+#define HTC_SVC_MSG_SERVICEMETALENGTH GENMASK(23, 16)
+#define HTC_READY_MSG_CREDITCOUNT GENMASK(31, 16)
+#define HTC_READY_MSG_CREDITSIZE GENMASK(15, 0)
+#define HTC_READY_MSG_MAXENDPOINTS GENMASK(23, 16)
+
+#define HTC_READY_EX_MSG_HTCVERSION GENMASK(7, 0)
+#define HTC_READY_EX_MSG_MAXMSGSPERHTCBUNDLE GENMASK(15, 8)
+
+#define HTC_SVC_RESP_MSG_SERVICEID GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_STATUS GENMASK(7, 0)
+#define HTC_SVC_RESP_MSG_ENDPOINTID GENMASK(15, 8)
+#define HTC_SVC_RESP_MSG_MAXMSGSIZE GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_SERVICEMETALENGTH GENMASK(7, 0)
+
+#define HTC_MSG_MESSAGEID GENMASK(15, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_SETUPFLAGS GENMASK(31, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_MAXMSGSPERBUNDLEDRECV GENMASK(7, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD0 GENMASK(15, 8)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD1 GENMASK(23, 16)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD2 GENMASK(31, 24)
+
+enum ath12k_htc_tx_flags {
+ ATH12K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+ ATH12K_HTC_FLAG_SEND_BUNDLE = 0x02
+};
+
+enum ath12k_htc_rx_flags {
+ ATH12K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+ ATH12K_HTC_FLAG_BUNDLE_MASK = 0xF0
+};
+
+struct ath12k_htc_hdr {
+ __le32 htc_info;
+ __le32 ctrl_info;
+} __packed __aligned(4);
+
+enum ath12k_htc_msg_id {
+ ATH12K_HTC_MSG_READY_ID = 1,
+ ATH12K_HTC_MSG_CONNECT_SERVICE_ID = 2,
+ ATH12K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+ ATH12K_HTC_MSG_SETUP_COMPLETE_ID = 4,
+ ATH12K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
+ ATH12K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6,
+ ATH12K_HTC_MSG_NACK_SUSPEND = 7,
+ ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID = 8,
+};
+
+enum ath12k_htc_version {
+ ATH12K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+ ATH12K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+enum ath12k_htc_conn_flag_threshold_level {
+ ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH,
+ ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF,
+ ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS,
+ ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY,
+};
+
+#define ATH12K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK GENMASK(1, 0)
+#define ATH12K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE BIT(2)
+#define ATH12K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL BIT(3)
+#define ATH12K_HTC_CONN_FLAGS_RECV_ALLOC GENMASK(15, 8)
+
+enum ath12k_htc_conn_svc_status {
+ ATH12K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
+ ATH12K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
+ ATH12K_HTC_CONN_SVC_STATUS_FAILED = 2,
+ ATH12K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+ ATH12K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
+};
+
+struct ath12k_htc_ready {
+ __le32 id_credit_count;
+ __le32 size_ep;
+} __packed;
+
+struct ath12k_htc_ready_extended {
+ struct ath12k_htc_ready base;
+ __le32 ver_bundle;
+} __packed;
+
+struct ath12k_htc_conn_svc {
+ __le32 msg_svc_id;
+ __le32 flags_len;
+} __packed;
+
+struct ath12k_htc_conn_svc_resp {
+ __le32 msg_svc_id;
+ __le32 flags_len;
+ __le32 svc_meta_pad;
+} __packed;
+
+struct ath12k_htc_setup_complete_extended {
+ __le32 msg_id;
+ __le32 flags;
+ __le32 max_msgs_per_bundled_recv;
+} __packed;
+
+struct ath12k_htc_msg {
+ __le32 msg_svc_id;
+ __le32 flags_len;
+} __packed __aligned(4);
+
+enum ath12k_htc_record_id {
+ ATH12K_HTC_RECORD_NULL = 0,
+ ATH12K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath12k_htc_record_hdr {
+ u8 id; /* @enum ath12k_htc_record_id */
+ u8 len;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath12k_htc_credit_report {
+ u8 eid; /* @enum ath12k_htc_ep_id */
+ u8 credits;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath12k_htc_record {
+ struct ath12k_htc_record_hdr hdr;
+ struct ath12k_htc_credit_report credit_report[];
+} __packed __aligned(4);
+
+/* HTC FRAME structure layout draft
+ *
+ * note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ *
+ *=======================================================
+ *
+ * HTC HEADER
+ *
+ *=======================================================
+ * |
+ * HTC message | payload
+ * (variable length) | (variable length)
+ *=======================================================
+ *
+ * HTC Record
+ *
+ *=======================================================
+ */
+
+enum ath12k_htc_svc_gid {
+ ATH12K_HTC_SVC_GRP_RSVD = 0,
+ ATH12K_HTC_SVC_GRP_WMI = 1,
+ ATH12K_HTC_SVC_GRP_NMI = 2,
+ ATH12K_HTC_SVC_GRP_HTT = 3,
+ ATH12K_HTC_SVC_GRP_CFG = 4,
+ ATH12K_HTC_SVC_GRP_IPA = 5,
+ ATH12K_HTC_SVC_GRP_PKTLOG = 6,
+
+ ATH12K_HTC_SVC_GRP_TEST = 254,
+ ATH12K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+ (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath12k_htc_svc_id {
+ /* NOTE: service ID of 0x0000 is reserved and should never be used */
+ ATH12K_HTC_SVC_ID_RESERVED = 0x0000,
+ ATH12K_HTC_SVC_ID_UNUSED = ATH12K_HTC_SVC_ID_RESERVED,
+
+ ATH12K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH12K_HTC_SVC_GRP_RSVD, 1),
+ ATH12K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH12K_HTC_SVC_GRP_WMI, 0),
+ ATH12K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH12K_HTC_SVC_GRP_WMI, 1),
+ ATH12K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH12K_HTC_SVC_GRP_WMI, 2),
+ ATH12K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH12K_HTC_SVC_GRP_WMI, 3),
+ ATH12K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH12K_HTC_SVC_GRP_WMI, 4),
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1 = SVC(ATH12K_HTC_SVC_GRP_WMI, 5),
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 = SVC(ATH12K_HTC_SVC_GRP_WMI, 6),
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG = SVC(ATH12K_HTC_SVC_GRP_WMI, 7),
+
+ ATH12K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH12K_HTC_SVC_GRP_NMI, 0),
+ ATH12K_HTC_SVC_ID_NMI_DATA = SVC(ATH12K_HTC_SVC_GRP_NMI, 1),
+
+ ATH12K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH12K_HTC_SVC_GRP_HTT, 0),
+
+ /* raw stream service (i.e. flash, tcmd, calibration apps) */
+ ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH12K_HTC_SVC_GRP_TEST, 0),
+ ATH12K_HTC_SVC_ID_IPA_TX = SVC(ATH12K_HTC_SVC_GRP_IPA, 0),
+ ATH12K_HTC_SVC_ID_PKT_LOG = SVC(ATH12K_HTC_SVC_GRP_PKTLOG, 0),
+};
+
+#undef SVC
+
+enum ath12k_htc_ep_id {
+ ATH12K_HTC_EP_UNUSED = -1,
+ ATH12K_HTC_EP_0 = 0,
+ ATH12K_HTC_EP_1 = 1,
+ ATH12K_HTC_EP_2,
+ ATH12K_HTC_EP_3,
+ ATH12K_HTC_EP_4,
+ ATH12K_HTC_EP_5,
+ ATH12K_HTC_EP_6,
+ ATH12K_HTC_EP_7,
+ ATH12K_HTC_EP_8,
+ ATH12K_HTC_EP_COUNT,
+};
+
+struct ath12k_htc_ep_ops {
+ void (*ep_tx_complete)(struct ath12k_base *ab, struct sk_buff *skb);
+ void (*ep_rx_complete)(struct ath12k_base *ab, struct sk_buff *skb);
+ void (*ep_tx_credits)(struct ath12k_base *ab);
+};
+
+/* service connection information */
+struct ath12k_htc_svc_conn_req {
+ u16 service_id;
+ struct ath12k_htc_ep_ops ep_ops;
+ int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath12k_htc_svc_conn_resp {
+ u8 buffer_len;
+ u8 actual_len;
+ enum ath12k_htc_ep_id eid;
+ unsigned int max_msg_len;
+ u8 connect_resp_code;
+};
+
+#define ATH12K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH12K_HTC_MAX_LEN 4096
+#define ATH12K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH12K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
+#define ATH12K_HTC_CONTROL_BUFFER_SIZE (ATH12K_HTC_MAX_CTRL_MSG_LEN + \
+ sizeof(struct ath12k_htc_hdr))
+#define ATH12K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
+#define ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES 8
+
+struct ath12k_htc_ep {
+ struct ath12k_htc *htc;
+ enum ath12k_htc_ep_id eid;
+ enum ath12k_htc_svc_id service_id;
+ struct ath12k_htc_ep_ops ep_ops;
+
+ int max_tx_queue_depth;
+ int max_ep_message_len;
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+
+ u8 seq_no; /* for debugging */
+ int tx_credits;
+ bool tx_credit_flow_enabled;
+};
+
+struct ath12k_htc_svc_tx_credits {
+ u16 service_id;
+ u8 credit_allocation;
+};
+
+struct ath12k_htc {
+ struct ath12k_base *ab;
+ struct ath12k_htc_ep endpoint[ATH12K_HTC_EP_COUNT];
+
+ /* protects endpoints */
+ spinlock_t tx_lock;
+
+ u8 control_resp_buffer[ATH12K_HTC_MAX_CTRL_MSG_LEN];
+ int control_resp_len;
+
+ struct completion ctl_resp;
+
+ int total_transmit_credits;
+ struct ath12k_htc_svc_tx_credits
+ service_alloc_table[ATH12K_HTC_MAX_SERVICE_ALLOC_ENTRIES];
+ int target_credit_size;
+ u8 wmi_ep_count;
+};
+
+int ath12k_htc_init(struct ath12k_base *ar);
+int ath12k_htc_wait_target(struct ath12k_htc *htc);
+int ath12k_htc_start(struct ath12k_htc *htc);
+int ath12k_htc_connect_service(struct ath12k_htc *htc,
+ struct ath12k_htc_svc_conn_req *conn_req,
+ struct ath12k_htc_svc_conn_resp *conn_resp);
+int ath12k_htc_send(struct ath12k_htc *htc, enum ath12k_htc_ep_id eid,
+ struct sk_buff *packet);
+struct sk_buff *ath12k_htc_alloc_skb(struct ath12k_base *ar, int size);
+void ath12k_htc_rx_completion_handler(struct ath12k_base *ar,
+ struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
new file mode 100644
index 000000000000..91d576fd4b0f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -0,0 +1,1041 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#include "debug.h"
+#include "core.h"
+#include "ce.h"
+#include "hw.h"
+#include "mhi.h"
+#include "dp_rx.h"
+
+static u8 ath12k_hw_qcn9274_mac_from_pdev_id(int pdev_idx)
+{
+ return pdev_idx;
+}
+
+static int ath12k_hw_mac_id_to_pdev_id_qcn9274(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static int ath12k_hw_mac_id_to_srng_id_qcn9274(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static u8 ath12k_hw_get_ring_selector_qcn9274(struct sk_buff *skb)
+{
+ return smp_processor_id();
+}
+
+static bool ath12k_dp_srng_is_comp_ring_qcn9274(int ring_num)
+{
+ if (ring_num < 3 || ring_num == 4)
+ return true;
+
+ return false;
+}
+
+static int ath12k_hw_mac_id_to_pdev_id_wcn7850(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static int ath12k_hw_mac_id_to_srng_id_wcn7850(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static u8 ath12k_hw_get_ring_selector_wcn7850(struct sk_buff *skb)
+{
+ return skb_get_queue_mapping(skb);
+}
+
+static bool ath12k_dp_srng_is_comp_ring_wcn7850(int ring_num)
+{
+ if (ring_num == 0 || ring_num == 2 || ring_num == 4)
+ return true;
+
+ return false;
+}
+
+static const struct ath12k_hw_ops qcn9274_ops = {
+ .get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id,
+ .mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_qcn9274,
+ .mac_id_to_srng_id = ath12k_hw_mac_id_to_srng_id_qcn9274,
+ .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_qcn9274,
+ .get_ring_selector = ath12k_hw_get_ring_selector_qcn9274,
+ .dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_qcn9274,
+};
+
+static const struct ath12k_hw_ops wcn7850_ops = {
+ .get_hw_mac_from_pdev_id = ath12k_hw_qcn9274_mac_from_pdev_id,
+ .mac_id_to_pdev_id = ath12k_hw_mac_id_to_pdev_id_wcn7850,
+ .mac_id_to_srng_id = ath12k_hw_mac_id_to_srng_id_wcn7850,
+ .rxdma_ring_sel_config = ath12k_dp_rxdma_ring_sel_config_wcn7850,
+ .get_ring_selector = ath12k_hw_get_ring_selector_wcn7850,
+ .dp_srng_is_tx_comp_ring = ath12k_dp_srng_is_comp_ring_wcn7850,
+};
+
+#define ATH12K_TX_RING_MASK_0 0x1
+#define ATH12K_TX_RING_MASK_1 0x2
+#define ATH12K_TX_RING_MASK_2 0x4
+#define ATH12K_TX_RING_MASK_3 0x8
+#define ATH12K_TX_RING_MASK_4 0x10
+
+#define ATH12K_RX_RING_MASK_0 0x1
+#define ATH12K_RX_RING_MASK_1 0x2
+#define ATH12K_RX_RING_MASK_2 0x4
+#define ATH12K_RX_RING_MASK_3 0x8
+
+#define ATH12K_RX_ERR_RING_MASK_0 0x1
+
+#define ATH12K_RX_WBM_REL_RING_MASK_0 0x1
+
+#define ATH12K_REO_STATUS_RING_MASK_0 0x1
+
+#define ATH12K_HOST2RXDMA_RING_MASK_0 0x1
+
+#define ATH12K_RX_MON_RING_MASK_0 0x1
+#define ATH12K_RX_MON_RING_MASK_1 0x2
+#define ATH12K_RX_MON_RING_MASK_2 0x4
+
+#define ATH12K_TX_MON_RING_MASK_0 0x1
+#define ATH12K_TX_MON_RING_MASK_1 0x2
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config ath12k_target_ce_config_wlan_qcn9274[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9, 10 and 11: Reserved for MHI */
+
+ /* CE12: Target CV prefetch */
+ {
+ .pipenum = __cpu_to_le32(12),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE13: Target CV prefetch */
+ {
+ .pipenum = __cpu_to_le32(13),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE14: WMI logging/CFR/Spectral/Radar */
+ {
+ .pipenum = __cpu_to_le32(14),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE15: Reserved */
+};
+
+/* Target firmware's Copy Engine configuration. */
+static const struct ce_pipe_config ath12k_target_ce_config_wlan_wcn7850[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE 9, 10, 11 are used by MHI driver */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_qcn9274[] = {
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(7),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_PKT_LOG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(5),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL_DIAG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(14),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+static const struct service_to_pipe ath12k_target_service_to_ce_map_wlan_wcn7850[] = {
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH12K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
+ .tx = {
+ ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_1,
+ ATH12K_TX_RING_MASK_2,
+ ATH12K_TX_RING_MASK_3,
+ },
+ .rx_mon_dest = {
+ 0, 0, 0,
+ ATH12K_RX_MON_RING_MASK_0,
+ ATH12K_RX_MON_RING_MASK_1,
+ ATH12K_RX_MON_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0,
+ ATH12K_RX_RING_MASK_0,
+ ATH12K_RX_RING_MASK_1,
+ ATH12K_RX_RING_MASK_2,
+ ATH12K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, 0, 0,
+ ATH12K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, 0, 0,
+ ATH12K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, 0, 0,
+ ATH12K_REO_STATUS_RING_MASK_0,
+ },
+ .host2rxdma = {
+ 0, 0, 0,
+ ATH12K_HOST2RXDMA_RING_MASK_0,
+ },
+ .tx_mon_dest = {
+ ATH12K_TX_MON_RING_MASK_0,
+ ATH12K_TX_MON_RING_MASK_1,
+ },
+};
+
+static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
+ .tx = {
+ ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_2,
+ ATH12K_TX_RING_MASK_4,
+ },
+ .rx_mon_dest = {
+ },
+ .rx = {
+ 0, 0, 0,
+ ATH12K_RX_RING_MASK_0,
+ ATH12K_RX_RING_MASK_1,
+ ATH12K_RX_RING_MASK_2,
+ ATH12K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ ATH12K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ ATH12K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ ATH12K_REO_STATUS_RING_MASK_0,
+ },
+ .host2rxdma = {
+ },
+ .tx_mon_dest = {
+ },
+};
+
+static const struct ath12k_hw_regs qcn9274_v1_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_id = 0x00000908,
+ .hal_tcl1_ring_misc = 0x00000910,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000920,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000948,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000094c,
+ .hal_tcl1_ring_msi1_data = 0x00000950,
+ .hal_tcl_ring_base_lsb = 0x00000b58,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000d38,
+
+ .hal_wbm_idle_ring_base_lsb = 0x00000d0c,
+ .hal_wbm_idle_ring_misc_addr = 0x00000d1c,
+ .hal_wbm_r0_idle_list_cntl_addr = 0x00000210,
+ .hal_wbm_r0_idle_list_size_addr = 0x00000214,
+ .hal_wbm_scattered_ring_base_lsb = 0x00000220,
+ .hal_wbm_scattered_ring_base_msb = 0x00000224,
+ .hal_wbm_scattered_desc_head_info_ix0 = 0x00000230,
+ .hal_wbm_scattered_desc_head_info_ix1 = 0x00000234,
+ .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000240,
+ .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000244,
+ .hal_wbm_scattered_desc_ptr_hp_addr = 0x0000024c,
+
+ .hal_wbm_sw_release_ring_base_lsb = 0x0000034c,
+ .hal_wbm_sw1_release_ring_base_lsb = 0x000003c4,
+ .hal_wbm0_release_ring_base_lsb = 0x00000dd8,
+ .hal_wbm1_release_ring_base_lsb = 0x00000e50,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
+
+ /* PPE release ring address */
+ .hal_ppe_rel_ring_base = 0x0000043c,
+
+ /* REO DEST ring address */
+ .hal_reo2_ring_base = 0x0000055c,
+ .hal_reo1_misc_ctrl_addr = 0x00000b7c,
+ .hal_reo1_sw_cookie_cfg0 = 0x00000050,
+ .hal_reo1_sw_cookie_cfg1 = 0x00000054,
+ .hal_reo1_qdesc_lut_base0 = 0x00000058,
+ .hal_reo1_qdesc_lut_base1 = 0x0000005c,
+ .hal_reo1_ring_base_lsb = 0x000004e4,
+ .hal_reo1_ring_base_msb = 0x000004e8,
+ .hal_reo1_ring_id = 0x000004ec,
+ .hal_reo1_ring_misc = 0x000004f4,
+ .hal_reo1_ring_hp_addr_lsb = 0x000004f8,
+ .hal_reo1_ring_hp_addr_msb = 0x000004fc,
+ .hal_reo1_ring_producer_int_setup = 0x00000508,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000052C,
+ .hal_reo1_ring_msi1_base_msb = 0x00000530,
+ .hal_reo1_ring_msi1_data = 0x00000534,
+ .hal_reo1_aging_thres_ix0 = 0x00000b08,
+ .hal_reo1_aging_thres_ix1 = 0x00000b0c,
+ .hal_reo1_aging_thres_ix2 = 0x00000b10,
+ .hal_reo1_aging_thres_ix3 = 0x00000b14,
+
+ /* REO Exception ring address */
+ .hal_reo2_sw0_ring_base = 0x000008a4,
+
+ /* REO Reinject ring address */
+ .hal_sw2reo_ring_base = 0x00000304,
+ .hal_sw2reo1_ring_base = 0x0000037c,
+
+ /* REO cmd ring address */
+ .hal_reo_cmd_ring_base = 0x0000028c,
+
+ /* REO status ring address */
+ .hal_reo_status_ring_base = 0x00000a84,
+};
+
+static const struct ath12k_hw_regs qcn9274_v2_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_id = 0x00000908,
+ .hal_tcl1_ring_misc = 0x00000910,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000920,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000948,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000094c,
+ .hal_tcl1_ring_msi1_data = 0x00000950,
+ .hal_tcl_ring_base_lsb = 0x00000b58,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000d38,
+
+ /* WBM idle link ring address */
+ .hal_wbm_idle_ring_base_lsb = 0x00000d3c,
+ .hal_wbm_idle_ring_misc_addr = 0x00000d4c,
+ .hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
+ .hal_wbm_r0_idle_list_size_addr = 0x00000244,
+ .hal_wbm_scattered_ring_base_lsb = 0x00000250,
+ .hal_wbm_scattered_ring_base_msb = 0x00000254,
+ .hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
+ .hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
+ .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+ .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+ .hal_wbm_scattered_desc_ptr_hp_addr = 0x0000027c,
+
+ /* SW2WBM release ring address */
+ .hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
+ .hal_wbm_sw1_release_ring_base_lsb = 0x000003f4,
+
+ /* WBM2SW release ring address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000e08,
+ .hal_wbm1_release_ring_base_lsb = 0x00000e80,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0d45c,
+
+ /* PPE release ring address */
+ .hal_ppe_rel_ring_base = 0x0000046c,
+
+ /* REO DEST ring address */
+ .hal_reo2_ring_base = 0x00000578,
+ .hal_reo1_misc_ctrl_addr = 0x00000b9c,
+ .hal_reo1_sw_cookie_cfg0 = 0x0000006c,
+ .hal_reo1_sw_cookie_cfg1 = 0x00000070,
+ .hal_reo1_qdesc_lut_base0 = 0x00000074,
+ .hal_reo1_qdesc_lut_base1 = 0x00000078,
+ .hal_reo1_ring_base_lsb = 0x00000500,
+ .hal_reo1_ring_base_msb = 0x00000504,
+ .hal_reo1_ring_id = 0x00000508,
+ .hal_reo1_ring_misc = 0x00000510,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000514,
+ .hal_reo1_ring_hp_addr_msb = 0x00000518,
+ .hal_reo1_ring_producer_int_setup = 0x00000524,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000548,
+ .hal_reo1_ring_msi1_base_msb = 0x0000054C,
+ .hal_reo1_ring_msi1_data = 0x00000550,
+ .hal_reo1_aging_thres_ix0 = 0x00000B28,
+ .hal_reo1_aging_thres_ix1 = 0x00000B2C,
+ .hal_reo1_aging_thres_ix2 = 0x00000B30,
+ .hal_reo1_aging_thres_ix3 = 0x00000B34,
+
+ /* REO Exception ring address */
+ .hal_reo2_sw0_ring_base = 0x000008c0,
+
+ /* REO Reinject ring address */
+ .hal_sw2reo_ring_base = 0x00000320,
+ .hal_sw2reo1_ring_base = 0x00000398,
+
+ /* REO cmd ring address */
+ .hal_reo_cmd_ring_base = 0x000002A8,
+
+ /* REO status ring address */
+ .hal_reo_status_ring_base = 0x00000aa0,
+};
+
+static const struct ath12k_hw_regs wcn7850_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_id = 0x00000908,
+ .hal_tcl1_ring_misc = 0x00000910,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000091c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000920,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000930,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000934,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000948,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000094c,
+ .hal_tcl1_ring_msi1_data = 0x00000950,
+ .hal_tcl_ring_base_lsb = 0x00000b58,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000d38,
+
+ .hal_wbm_idle_ring_base_lsb = 0x00000d3c,
+ .hal_wbm_idle_ring_misc_addr = 0x00000d4c,
+ .hal_wbm_r0_idle_list_cntl_addr = 0x00000240,
+ .hal_wbm_r0_idle_list_size_addr = 0x00000244,
+ .hal_wbm_scattered_ring_base_lsb = 0x00000250,
+ .hal_wbm_scattered_ring_base_msb = 0x00000254,
+ .hal_wbm_scattered_desc_head_info_ix0 = 0x00000260,
+ .hal_wbm_scattered_desc_head_info_ix1 = 0x00000264,
+ .hal_wbm_scattered_desc_tail_info_ix0 = 0x00000270,
+ .hal_wbm_scattered_desc_tail_info_ix1 = 0x00000274,
+ .hal_wbm_scattered_desc_ptr_hp_addr = 0x00000027c,
+
+ .hal_wbm_sw_release_ring_base_lsb = 0x0000037c,
+ .hal_wbm_sw1_release_ring_base_lsb = 0x00000284,
+ .hal_wbm0_release_ring_base_lsb = 0x00000e08,
+ .hal_wbm1_release_ring_base_lsb = 0x00000e80,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
+
+ /* PPE release ring address */
+ .hal_ppe_rel_ring_base = 0x0000043c,
+
+ /* REO DEST ring address */
+ .hal_reo2_ring_base = 0x0000055c,
+ .hal_reo1_misc_ctrl_addr = 0x00000b7c,
+ .hal_reo1_sw_cookie_cfg0 = 0x00000050,
+ .hal_reo1_sw_cookie_cfg1 = 0x00000054,
+ .hal_reo1_qdesc_lut_base0 = 0x00000058,
+ .hal_reo1_qdesc_lut_base1 = 0x0000005c,
+ .hal_reo1_ring_base_lsb = 0x000004e4,
+ .hal_reo1_ring_base_msb = 0x000004e8,
+ .hal_reo1_ring_id = 0x000004ec,
+ .hal_reo1_ring_misc = 0x000004f4,
+ .hal_reo1_ring_hp_addr_lsb = 0x000004f8,
+ .hal_reo1_ring_hp_addr_msb = 0x000004fc,
+ .hal_reo1_ring_producer_int_setup = 0x00000508,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000052C,
+ .hal_reo1_ring_msi1_base_msb = 0x00000530,
+ .hal_reo1_ring_msi1_data = 0x00000534,
+ .hal_reo1_aging_thres_ix0 = 0x00000b08,
+ .hal_reo1_aging_thres_ix1 = 0x00000b0c,
+ .hal_reo1_aging_thres_ix2 = 0x00000b10,
+ .hal_reo1_aging_thres_ix3 = 0x00000b14,
+
+ /* REO Exception ring address */
+ .hal_reo2_sw0_ring_base = 0x000008a4,
+
+ /* REO Reinject ring address */
+ .hal_sw2reo_ring_base = 0x00000304,
+ .hal_sw2reo1_ring_base = 0x0000037c,
+
+ /* REO cmd ring address */
+ .hal_reo_cmd_ring_base = 0x0000028c,
+
+ /* REO status ring address */
+ .hal_reo_status_ring_base = 0x00000a84,
+};
+
+static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW1_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
+};
+
+static const struct ath12k_hw_hal_params ath12k_hw_hal_params_wcn7850 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .wbm2sw_cc_enable = HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW0_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW2_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW3_EN |
+ HAL_WBM_SW_COOKIE_CONV_CFG_WBM2SW4_EN,
+};
+
+static const struct ath12k_hw_params ath12k_hw_params[] = {
+ {
+ .name = "qcn9274 hw1.0",
+ .hw_rev = ATH12K_HW_QCN9274_HW10,
+ .fw = {
+ .dir = "QCN9274/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 1,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
+ .internal_sleep_clock = false,
+
+ .hw_ops = &qcn9274_ops,
+ .ring_mask = &ath12k_hw_ring_mask_qcn9274,
+ .regs = &qcn9274_v1_regs,
+
+ .host_ce_config = ath12k_host_ce_config_qcn9274,
+ .ce_count = 16,
+ .target_ce_config = ath12k_target_ce_config_wlan_qcn9274,
+ .target_ce_count = 12,
+ .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qcn9274,
+ .svc_to_ce_map_len = 18,
+
+ .hal_params = &ath12k_hw_hal_params_qcn9274,
+
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 1,
+ .num_rxdma_dst_ring = 0,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+
+ .idle_ps = false,
+ .download_calib = true,
+ .supports_suspend = false,
+ .tcl_ring_retry = true,
+ .reoq_lut_support = false,
+ .supports_shadow_regs = false,
+
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
+ .num_tcl_banks = 48,
+ .max_tx_ring = 4,
+
+ .mhi_config = &ath12k_mhi_config_qcn9274,
+
+ .wmi_init = ath12k_wmi_init_qcn9274,
+
+ .hal_ops = &hal_qcn9274_ops,
+
+ },
+ {
+ .name = "wcn7850 hw2.0",
+ .hw_rev = ATH12K_HW_WCN7850_HW20,
+
+ .fw = {
+ .dir = "WCN7850/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 256 * 1024,
+ },
+
+ .max_radios = 1,
+ .single_pdev_only = true,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850,
+ .internal_sleep_clock = true,
+
+ .hw_ops = &wcn7850_ops,
+ .ring_mask = &ath12k_hw_ring_mask_wcn7850,
+ .regs = &wcn7850_regs,
+
+ .host_ce_config = ath12k_host_ce_config_wcn7850,
+ .ce_count = 9,
+ .target_ce_config = ath12k_target_ce_config_wlan_wcn7850,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_wcn7850,
+ .svc_to_ce_map_len = 14,
+
+ .hal_params = &ath12k_hw_hal_params_wcn7850,
+
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .num_rxdma_dst_ring = 1,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION),
+ .supports_monitor = false,
+
+ .idle_ps = false,
+ .download_calib = false,
+ .supports_suspend = false,
+ .tcl_ring_retry = false,
+ .reoq_lut_support = false,
+ .supports_shadow_regs = true,
+
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn7850),
+ .num_tcl_banks = 7,
+ .max_tx_ring = 3,
+
+ .mhi_config = &ath12k_mhi_config_wcn7850,
+
+ .wmi_init = ath12k_wmi_init_wcn7850,
+
+ .hal_ops = &hal_wcn7850_ops,
+ },
+ {
+ .name = "qcn9274 hw2.0",
+ .hw_rev = ATH12K_HW_QCN9274_HW20,
+ .fw = {
+ .dir = "QCN9274/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 1,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274,
+ .internal_sleep_clock = false,
+
+ .hw_ops = &qcn9274_ops,
+ .ring_mask = &ath12k_hw_ring_mask_qcn9274,
+ .regs = &qcn9274_v2_regs,
+
+ .host_ce_config = ath12k_host_ce_config_qcn9274,
+ .ce_count = 16,
+ .target_ce_config = ath12k_target_ce_config_wlan_qcn9274,
+ .target_ce_count = 12,
+ .svc_to_ce_map = ath12k_target_service_to_ce_map_wlan_qcn9274,
+ .svc_to_ce_map_len = 18,
+
+ .hal_params = &ath12k_hw_hal_params_qcn9274,
+
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 1,
+ .num_rxdma_dst_ring = 0,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+
+ .idle_ps = false,
+ .download_calib = true,
+ .supports_suspend = false,
+ .tcl_ring_retry = true,
+ .reoq_lut_support = false,
+ .supports_shadow_regs = false,
+
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9274),
+ .num_tcl_banks = 48,
+ .max_tx_ring = 4,
+
+ .mhi_config = &ath12k_mhi_config_qcn9274,
+
+ .wmi_init = ath12k_wmi_init_qcn9274,
+
+ .hal_ops = &hal_qcn9274_ops,
+ },
+};
+
+int ath12k_hw_init(struct ath12k_base *ab)
+{
+ const struct ath12k_hw_params *hw_params = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath12k_hw_params); i++) {
+ hw_params = &ath12k_hw_params[i];
+
+ if (hw_params->hw_rev == ab->hw_rev)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath12k_hw_params)) {
+ ath12k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev);
+ return -EINVAL;
+ }
+
+ ab->hw_params = hw_params;
+
+ ath12k_info(ab, "Hardware name: %s\n", ab->hw_params->name);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
new file mode 100644
index 000000000000..e3461004188b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_HW_H
+#define ATH12K_HW_H
+
+#include <linux/mhi.h>
+
+#include "wmi.h"
+#include "hal.h"
+
+/* Target configuration defines */
+
+/* Num VDEVS per radio */
+#define TARGET_NUM_VDEVS (16 + 1)
+
+#define TARGET_NUM_PEERS_PDEV (512 + TARGET_NUM_VDEVS)
+
+/* Num of peers for Single Radio mode */
+#define TARGET_NUM_PEERS_SINGLE (TARGET_NUM_PEERS_PDEV)
+
+/* Num of peers for DBS */
+#define TARGET_NUM_PEERS_DBS (2 * TARGET_NUM_PEERS_PDEV)
+
+/* Num of peers for DBS_SBS */
+#define TARGET_NUM_PEERS_DBS_SBS (3 * TARGET_NUM_PEERS_PDEV)
+
+/* Max num of stations (per radio) */
+#define TARGET_NUM_STATIONS 512
+
+#define TARGET_NUM_PEERS(x) TARGET_NUM_PEERS_##x
+#define TARGET_NUM_PEER_KEYS 2
+#define TARGET_NUM_TIDS(x) (2 * TARGET_NUM_PEERS(x) + \
+ 4 * TARGET_NUM_VDEVS + 8)
+
+#define TARGET_AST_SKID_LIMIT 16
+#define TARGET_NUM_OFFLD_PEERS 4
+#define TARGET_NUM_OFFLD_REORDER_BUFFS 4
+
+#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_TIMEOUT_LO_PRI 100
+#define TARGET_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_DECAP_MODE_RAW 0
+#define TARGET_DECAP_MODE_NATIVE_WIFI 1
+#define TARGET_DECAP_MODE_ETH 2
+
+#define TARGET_SCAN_MAX_PENDING_REQS 4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_NUM_MCAST_GROUPS 12
+#define TARGET_NUM_MCAST_TABLE_ELEMS 64
+#define TARGET_MCAST2UCAST_MODE 2
+#define TARGET_TX_DBG_LOG_SIZE 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_VOW_CONFIG 0
+#define TARGET_NUM_MSDU_DESC (2500)
+#define TARGET_MAX_FRAG_ENTRIES 6
+#define TARGET_MAX_BCN_OFFLD 16
+#define TARGET_NUM_WDS_ENTRIES 32
+#define TARGET_DMA_BURST_SIZE 1
+#define TARGET_RX_BATCHMODE 1
+
+#define ATH12K_HW_MAX_QUEUES 4
+#define ATH12K_QUEUE_LEN 4096
+
+#define ATH12K_HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4
+
+#define ATH12K_FW_DIR "ath12k"
+
+#define ATH12K_BOARD_MAGIC "QCA-ATH12K-BOARD"
+#define ATH12K_BOARD_API2_FILE "board-2.bin"
+#define ATH12K_DEFAULT_BOARD_FILE "board.bin"
+#define ATH12K_DEFAULT_CAL_FILE "caldata.bin"
+#define ATH12K_AMSS_FILE "amss.bin"
+#define ATH12K_M3_FILE "m3.bin"
+#define ATH12K_REGDB_FILE_NAME "regdb.bin"
+
+enum ath12k_hw_rate_cck {
+ ATH12K_HW_RATE_CCK_LP_11M = 0,
+ ATH12K_HW_RATE_CCK_LP_5_5M,
+ ATH12K_HW_RATE_CCK_LP_2M,
+ ATH12K_HW_RATE_CCK_LP_1M,
+ ATH12K_HW_RATE_CCK_SP_11M,
+ ATH12K_HW_RATE_CCK_SP_5_5M,
+ ATH12K_HW_RATE_CCK_SP_2M,
+};
+
+enum ath12k_hw_rate_ofdm {
+ ATH12K_HW_RATE_OFDM_48M = 0,
+ ATH12K_HW_RATE_OFDM_24M,
+ ATH12K_HW_RATE_OFDM_12M,
+ ATH12K_HW_RATE_OFDM_6M,
+ ATH12K_HW_RATE_OFDM_54M,
+ ATH12K_HW_RATE_OFDM_36M,
+ ATH12K_HW_RATE_OFDM_18M,
+ ATH12K_HW_RATE_OFDM_9M,
+};
+
+enum ath12k_bus {
+ ATH12K_BUS_PCI,
+};
+
+#define ATH12K_EXT_IRQ_GRP_NUM_MAX 11
+
+struct hal_rx_desc;
+struct hal_tcl_data_cmd;
+struct htt_rx_ring_tlv_filter;
+enum hal_encrypt_type;
+
+struct ath12k_hw_ring_mask {
+ u8 tx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_err[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_wbm_rel[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 reo_status[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 host2rxdma[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+ u8 tx_mon_dest[ATH12K_EXT_IRQ_GRP_NUM_MAX];
+};
+
+struct ath12k_hw_hal_params {
+ enum hal_rx_buf_return_buf_manager rx_buf_rbm;
+ u32 wbm2sw_cc_enable;
+};
+
+struct ath12k_hw_params {
+ const char *name;
+ u16 hw_rev;
+
+ struct {
+ const char *dir;
+ size_t board_size;
+ size_t cal_offset;
+ } fw;
+
+ u8 max_radios;
+ bool single_pdev_only:1;
+ u32 qmi_service_ins_id;
+ bool internal_sleep_clock:1;
+
+ const struct ath12k_hw_ops *hw_ops;
+ const struct ath12k_hw_ring_mask *ring_mask;
+ const struct ath12k_hw_regs *regs;
+
+ const struct ce_attr *host_ce_config;
+ u32 ce_count;
+ const struct ce_pipe_config *target_ce_config;
+ u32 target_ce_count;
+ const struct service_to_pipe *svc_to_ce_map;
+ u32 svc_to_ce_map_len;
+
+ const struct ath12k_hw_hal_params *hal_params;
+
+ bool rxdma1_enable:1;
+ int num_rxmda_per_pdev;
+ int num_rxdma_dst_ring;
+ bool rx_mac_buf_ring:1;
+ bool vdev_start_delay:1;
+
+ u16 interface_modes;
+ bool supports_monitor:1;
+
+ bool idle_ps:1;
+ bool download_calib:1;
+ bool supports_suspend:1;
+ bool tcl_ring_retry:1;
+ bool reoq_lut_support:1;
+ bool supports_shadow_regs:1;
+
+ u32 hal_desc_sz;
+ u32 num_tcl_banks;
+ u32 max_tx_ring;
+
+ const struct mhi_controller_config *mhi_config;
+
+ void (*wmi_init)(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config);
+
+ const struct hal_ops *hal_ops;
+};
+
+struct ath12k_hw_ops {
+ u8 (*get_hw_mac_from_pdev_id)(int pdev_id);
+ int (*mac_id_to_pdev_id)(const struct ath12k_hw_params *hw, int mac_id);
+ int (*mac_id_to_srng_id)(const struct ath12k_hw_params *hw, int mac_id);
+ int (*rxdma_ring_sel_config)(struct ath12k_base *ab);
+ u8 (*get_ring_selector)(struct sk_buff *skb);
+ bool (*dp_srng_is_tx_comp_ring)(int ring_num);
+};
+
+static inline
+int ath12k_hw_get_mac_from_pdev_id(const struct ath12k_hw_params *hw,
+ int pdev_idx)
+{
+ if (hw->hw_ops->get_hw_mac_from_pdev_id)
+ return hw->hw_ops->get_hw_mac_from_pdev_id(pdev_idx);
+
+ return 0;
+}
+
+static inline int ath12k_hw_mac_id_to_pdev_id(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_pdev_id)
+ return hw->hw_ops->mac_id_to_pdev_id(hw, mac_id);
+
+ return 0;
+}
+
+static inline int ath12k_hw_mac_id_to_srng_id(const struct ath12k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_srng_id)
+ return hw->hw_ops->mac_id_to_srng_id(hw, mac_id);
+
+ return 0;
+}
+
+struct ath12k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[];
+};
+
+enum ath12k_bd_ie_board_type {
+ ATH12K_BD_IE_BOARD_NAME = 0,
+ ATH12K_BD_IE_BOARD_DATA = 1,
+};
+
+enum ath12k_bd_ie_type {
+ /* contains sub IEs of enum ath12k_bd_ie_board_type */
+ ATH12K_BD_IE_BOARD = 0,
+ ATH12K_BD_IE_BOARD_EXT = 1,
+};
+
+struct ath12k_hw_regs {
+ u32 hal_tcl1_ring_id;
+ u32 hal_tcl1_ring_misc;
+ u32 hal_tcl1_ring_tp_addr_lsb;
+ u32 hal_tcl1_ring_tp_addr_msb;
+ u32 hal_tcl1_ring_consumer_int_setup_ix0;
+ u32 hal_tcl1_ring_consumer_int_setup_ix1;
+ u32 hal_tcl1_ring_msi1_base_lsb;
+ u32 hal_tcl1_ring_msi1_base_msb;
+ u32 hal_tcl1_ring_msi1_data;
+ u32 hal_tcl_ring_base_lsb;
+
+ u32 hal_tcl_status_ring_base_lsb;
+
+ u32 hal_wbm_idle_ring_base_lsb;
+ u32 hal_wbm_idle_ring_misc_addr;
+ u32 hal_wbm_r0_idle_list_cntl_addr;
+ u32 hal_wbm_r0_idle_list_size_addr;
+ u32 hal_wbm_scattered_ring_base_lsb;
+ u32 hal_wbm_scattered_ring_base_msb;
+ u32 hal_wbm_scattered_desc_head_info_ix0;
+ u32 hal_wbm_scattered_desc_head_info_ix1;
+ u32 hal_wbm_scattered_desc_tail_info_ix0;
+ u32 hal_wbm_scattered_desc_tail_info_ix1;
+ u32 hal_wbm_scattered_desc_ptr_hp_addr;
+
+ u32 hal_wbm_sw_release_ring_base_lsb;
+ u32 hal_wbm_sw1_release_ring_base_lsb;
+ u32 hal_wbm0_release_ring_base_lsb;
+ u32 hal_wbm1_release_ring_base_lsb;
+
+ u32 pcie_qserdes_sysclk_en_sel;
+ u32 pcie_pcs_osc_dtct_config_base;
+
+ u32 hal_ppe_rel_ring_base;
+
+ u32 hal_reo2_ring_base;
+ u32 hal_reo1_misc_ctrl_addr;
+ u32 hal_reo1_sw_cookie_cfg0;
+ u32 hal_reo1_sw_cookie_cfg1;
+ u32 hal_reo1_qdesc_lut_base0;
+ u32 hal_reo1_qdesc_lut_base1;
+ u32 hal_reo1_ring_base_lsb;
+ u32 hal_reo1_ring_base_msb;
+ u32 hal_reo1_ring_id;
+ u32 hal_reo1_ring_misc;
+ u32 hal_reo1_ring_hp_addr_lsb;
+ u32 hal_reo1_ring_hp_addr_msb;
+ u32 hal_reo1_ring_producer_int_setup;
+ u32 hal_reo1_ring_msi1_base_lsb;
+ u32 hal_reo1_ring_msi1_base_msb;
+ u32 hal_reo1_ring_msi1_data;
+ u32 hal_reo1_aging_thres_ix0;
+ u32 hal_reo1_aging_thres_ix1;
+ u32 hal_reo1_aging_thres_ix2;
+ u32 hal_reo1_aging_thres_ix3;
+
+ u32 hal_reo2_sw0_ring_base;
+
+ u32 hal_sw2reo_ring_base;
+ u32 hal_sw2reo1_ring_base;
+
+ u32 hal_reo_cmd_ring_base;
+
+ u32 hal_reo_status_ring_base;
+};
+
+int ath12k_hw_init(struct ath12k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
new file mode 100644
index 000000000000..bf7e5b6977b2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -0,0 +1,7038 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include "mac.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "peer.h"
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN6G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_6GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel ath12k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath12k_5ghz_channels[] = {
+ CHAN5G(36, 5180, 0),
+ CHAN5G(40, 5200, 0),
+ CHAN5G(44, 5220, 0),
+ CHAN5G(48, 5240, 0),
+ CHAN5G(52, 5260, 0),
+ CHAN5G(56, 5280, 0),
+ CHAN5G(60, 5300, 0),
+ CHAN5G(64, 5320, 0),
+ CHAN5G(100, 5500, 0),
+ CHAN5G(104, 5520, 0),
+ CHAN5G(108, 5540, 0),
+ CHAN5G(112, 5560, 0),
+ CHAN5G(116, 5580, 0),
+ CHAN5G(120, 5600, 0),
+ CHAN5G(124, 5620, 0),
+ CHAN5G(128, 5640, 0),
+ CHAN5G(132, 5660, 0),
+ CHAN5G(136, 5680, 0),
+ CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
+ CHAN5G(149, 5745, 0),
+ CHAN5G(153, 5765, 0),
+ CHAN5G(157, 5785, 0),
+ CHAN5G(161, 5805, 0),
+ CHAN5G(165, 5825, 0),
+ CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+};
+
+static const struct ieee80211_channel ath12k_6ghz_channels[] = {
+ CHAN6G(1, 5955, 0),
+ CHAN6G(5, 5975, 0),
+ CHAN6G(9, 5995, 0),
+ CHAN6G(13, 6015, 0),
+ CHAN6G(17, 6035, 0),
+ CHAN6G(21, 6055, 0),
+ CHAN6G(25, 6075, 0),
+ CHAN6G(29, 6095, 0),
+ CHAN6G(33, 6115, 0),
+ CHAN6G(37, 6135, 0),
+ CHAN6G(41, 6155, 0),
+ CHAN6G(45, 6175, 0),
+ CHAN6G(49, 6195, 0),
+ CHAN6G(53, 6215, 0),
+ CHAN6G(57, 6235, 0),
+ CHAN6G(61, 6255, 0),
+ CHAN6G(65, 6275, 0),
+ CHAN6G(69, 6295, 0),
+ CHAN6G(73, 6315, 0),
+ CHAN6G(77, 6335, 0),
+ CHAN6G(81, 6355, 0),
+ CHAN6G(85, 6375, 0),
+ CHAN6G(89, 6395, 0),
+ CHAN6G(93, 6415, 0),
+ CHAN6G(97, 6435, 0),
+ CHAN6G(101, 6455, 0),
+ CHAN6G(105, 6475, 0),
+ CHAN6G(109, 6495, 0),
+ CHAN6G(113, 6515, 0),
+ CHAN6G(117, 6535, 0),
+ CHAN6G(121, 6555, 0),
+ CHAN6G(125, 6575, 0),
+ CHAN6G(129, 6595, 0),
+ CHAN6G(133, 6615, 0),
+ CHAN6G(137, 6635, 0),
+ CHAN6G(141, 6655, 0),
+ CHAN6G(145, 6675, 0),
+ CHAN6G(149, 6695, 0),
+ CHAN6G(153, 6715, 0),
+ CHAN6G(157, 6735, 0),
+ CHAN6G(161, 6755, 0),
+ CHAN6G(165, 6775, 0),
+ CHAN6G(169, 6795, 0),
+ CHAN6G(173, 6815, 0),
+ CHAN6G(177, 6835, 0),
+ CHAN6G(181, 6855, 0),
+ CHAN6G(185, 6875, 0),
+ CHAN6G(189, 6895, 0),
+ CHAN6G(193, 6915, 0),
+ CHAN6G(197, 6935, 0),
+ CHAN6G(201, 6955, 0),
+ CHAN6G(205, 6975, 0),
+ CHAN6G(209, 6995, 0),
+ CHAN6G(213, 7015, 0),
+ CHAN6G(217, 7035, 0),
+ CHAN6G(221, 7055, 0),
+ CHAN6G(225, 7075, 0),
+ CHAN6G(229, 7095, 0),
+ CHAN6G(233, 7115, 0),
+};
+
+static struct ieee80211_rate ath12k_legacy_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH12K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH12K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH12K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH12K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH12K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH12K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH12K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH12K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH12K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH12K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH12K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH12K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH12K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH12K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH12K_HW_RATE_OFDM_54M },
+};
+
+static const int
+ath12k_phymodes[NUM_NL80211_BANDS][ATH12K_CHAN_WIDTH_NUM] = {
+ [NL80211_BAND_2GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN,
+ },
+ [NL80211_BAND_5GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+ [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+ },
+ [NL80211_BAND_6GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+ [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+ },
+
+};
+
+const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default = {
+ .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
+ .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
+ .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
+ .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
+ .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_FP_CTRL_FILTER_FLASG3
+};
+
+#define ATH12K_MAC_FIRST_OFDM_RATE_IDX 4
+#define ath12k_g_rates ath12k_legacy_rates
+#define ath12k_g_rates_size (ARRAY_SIZE(ath12k_legacy_rates))
+#define ath12k_a_rates (ath12k_legacy_rates + 4)
+#define ath12k_a_rates_size (ARRAY_SIZE(ath12k_legacy_rates) - 4)
+
+#define ATH12K_MAC_SCAN_TIMEOUT_MSECS 200 /* in msecs */
+
+static const u32 ath12k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT160:
+ return "11ac-vht160";
+ case MODE_11AC_VHT80_80:
+ return "11ac-vht80+80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_11AX_HE20:
+ return "11ax-he20";
+ case MODE_11AX_HE40:
+ return "11ax-he40";
+ case MODE_11AX_HE80:
+ return "11ax-he80";
+ case MODE_11AX_HE80_80:
+ return "11ax-he80+80";
+ case MODE_11AX_HE160:
+ return "11ax-he160";
+ case MODE_11AX_HE20_2G:
+ return "11ax-he20-2g";
+ case MODE_11AX_HE40_2G:
+ return "11ax-he40-2g";
+ case MODE_11AX_HE80_2G:
+ return "11ax-he80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled
+ */
+ }
+
+ return "<unknown>";
+}
+
+enum rate_info_bw
+ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw)
+{
+ u8 ret = RATE_INFO_BW_20;
+
+ switch (bw) {
+ case ATH12K_BW_20:
+ ret = RATE_INFO_BW_20;
+ break;
+ case ATH12K_BW_40:
+ ret = RATE_INFO_BW_40;
+ break;
+ case ATH12K_BW_80:
+ ret = RATE_INFO_BW_80;
+ break;
+ case ATH12K_BW_160:
+ ret = RATE_INFO_BW_160;
+ break;
+ }
+
+ return ret;
+}
+
+enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw)
+{
+ switch (bw) {
+ case RATE_INFO_BW_20:
+ return ATH12K_BW_20;
+ case RATE_INFO_BW_40:
+ return ATH12K_BW_40;
+ case RATE_INFO_BW_80:
+ return ATH12K_BW_80;
+ case RATE_INFO_BW_160:
+ return ATH12K_BW_160;
+ default:
+ return ATH12K_BW_20;
+ }
+}
+
+int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate)
+{
+ /* As default, it is OFDM rates */
+ int i = ATH12K_MAC_FIRST_OFDM_RATE_IDX;
+ int max_rates_idx = ath12k_g_rates_size;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK) {
+ hw_rc &= ~ATH12K_HW_RATECODE_CCK_SHORT_PREAM_MASK;
+ i = 0;
+ max_rates_idx = ATH12K_MAC_FIRST_OFDM_RATE_IDX;
+ }
+
+ while (i < max_rates_idx) {
+ if (hw_rc == ath12k_legacy_rates[i].hw_value) {
+ *rateidx = i;
+ *rate = ath12k_legacy_rates[i].bitrate;
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static u32
+ath12k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath12k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u8 ath12k_parse_mpdudensity(u8 mpdudensity)
+{
+/* From IEEE Std 802.11-2020 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ * 1 microsecond
+ */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static int ath12k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->bss_conf.chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static bool ath12k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (ath12k_mac_bitrate_is_cck(rate->bitrate) != cck)
+ continue;
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+static u8 ath12k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath12k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+static void ath12k_get_arvif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif_iter *arvif_iter = data;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+
+ if (arvif->vdev_id == arvif_iter->vdev_id)
+ arvif_iter->arvif = arvif;
+}
+
+struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
+{
+ struct ath12k_vif_iter arvif_iter = {};
+ u32 flags;
+
+ arvif_iter.vdev_id = vdev_id;
+
+ flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ flags,
+ ath12k_get_arvif_iter,
+ &arvif_iter);
+ if (!arvif_iter.arvif) {
+ ath12k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id);
+ return NULL;
+ }
+
+ return arvif_iter.arvif;
+}
+
+struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath12k_pdev *pdev;
+ struct ath12k_vif *arvif;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ arvif = ath12k_mac_get_arvif(pdev->ar, vdev_id);
+ if (arvif)
+ return arvif;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id)
+{
+ int i;
+ struct ath12k_pdev *pdev;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ if (pdev->ar->allocated_vdev_map & (1LL << vdev_id))
+ return pdev->ar;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
+{
+ int i;
+ struct ath12k_pdev *pdev;
+
+ if (ab->hw_params->single_pdev_only) {
+ pdev = rcu_dereference(ab->pdevs_active[0]);
+ return pdev ? pdev->ar : NULL;
+ }
+
+ if (WARN_ON(pdev_id > ab->num_radios))
+ return NULL;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+
+ if (pdev && pdev->pdev_id == pdev_id)
+ return (pdev->ar ? pdev->ar : NULL);
+ }
+
+ return NULL;
+}
+
+static void ath12k_pdev_caps_update(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+
+ ar->max_tx_power = ab->target_caps.hw_max_tx_power;
+
+ /* FIXME: Set min_tx_power to ab->target_caps.hw_min_tx_power.
+ * But since the received value in svcrdy is same as hw_max_tx_power,
+ * we can set ar->min_tx_power to 0 currently until
+ * this is fixed in firmware
+ */
+ ar->min_tx_power = 0;
+
+ ar->txpower_limit_2g = ar->max_tx_power;
+ ar->txpower_limit_5g = ar->max_tx_power;
+ ar->txpower_scale = WMI_HOST_TP_SCALE_MAX;
+}
+
+static int ath12k_mac_txpower_recalc(struct ath12k *ar)
+{
+ struct ath12k_pdev *pdev = ar->pdev;
+ struct ath12k_vif *arvif;
+ int ret, txpower = -1;
+ u32 param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->txpower <= 0)
+ continue;
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (txpower == -1)
+ return 0;
+
+ /* txpwr is set as 2 units per dBm in FW*/
+ txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower),
+ ar->max_tx_power) * 2;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower to set in hw %d\n",
+ txpower / 2);
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ ar->txpower_limit_2g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
+ ret = ath12k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_2g = txpower;
+ }
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
+ ar->txpower_limit_5g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
+ ret = ath12k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_5g = txpower;
+ }
+
+ return 0;
+
+fail:
+ ath12k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n",
+ txpower / 2, param, ret);
+ return ret;
+}
+
+static int ath12k_recalc_rtscts_prot(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ u32 vdev_param, rts_cts;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS;
+
+ /* Enable RTS/CTS protection for sw retries (when legacy stations
+ * are in BSS) or by default only for second rate series.
+ * TODO: Check if we need to enable CTS 2 Self in any case
+ */
+ rts_cts = WMI_USE_RTS_CTS;
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4;
+ else
+ rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4;
+
+ /* Need not send duplicate param value to firmware */
+ if (arvif->rtscts_prot_mode == rts_cts)
+ return 0;
+
+ arvif->rtscts_prot_mode = rts_cts;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
+ arvif->vdev_id, rts_cts);
+
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rts_cts);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return ret;
+}
+
+static int ath12k_mac_set_kickout(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ ATH12K_KICKOUT_THRESHOLD,
+ ar->pdev->pdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH12K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH12K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH12K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath12k_mac_peer_cleanup_all(struct ath12k *ar)
+{
+ struct ath12k_peer *peer, *tmp;
+ struct ath12k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ ath12k_dp_rx_peer_tid_cleanup(ar, peer);
+ list_del(&peer->list);
+ kfree(peer);
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
+}
+
+static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH12K_VDEV_SETUP_TIMEOUT_HZ))
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
+}
+
+static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id)
+{
+ int ret;
+
+ ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n",
+ vdev_id);
+ return 0;
+}
+
+static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *channel;
+ struct wmi_vdev_start_req_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ channel = chandef->chan;
+ arg.vdev_id = vdev_id;
+ arg.freq = channel->center_freq;
+ arg.band_center_freq1 = chandef->center_freq1;
+ arg.band_center_freq2 = chandef->center_freq2;
+ arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width];
+ arg.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ arg.min_power = 0;
+ arg.max_power = channel->max_power;
+ arg.max_reg_power = channel->max_reg_power;
+ arg.max_antenna_gain = channel->max_antenna_gain;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath12k_wmi_vdev_start(ar, &arg, false);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ goto vdev_stop;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i started\n",
+ vdev_id);
+ return 0;
+
+vdev_stop:
+ ret = ath12k_wmi_vdev_stop(ar, vdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n",
+ vdev_id, ret);
+ return ret;
+}
+
+static int ath12k_mac_monitor_vdev_stop(struct ath12k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath12k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ret = ath12k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
+ return ret;
+}
+
+static int ath12k_mac_monitor_vdev_create(struct ath12k *ar)
+{
+ struct ath12k_pdev *pdev = ar->pdev;
+ struct ath12k_wmi_vdev_create_arg arg = {};
+ int bit, ret;
+ u8 tmp_addr[6];
+ u16 nss;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->monitor_vdev_created)
+ return 0;
+
+ if (ar->ab->free_vdev_map == 0) {
+ ath12k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
+ return -ENOMEM;
+ }
+
+ bit = __ffs64(ar->ab->free_vdev_map);
+
+ ar->monitor_vdev_id = bit;
+
+ arg.if_id = ar->monitor_vdev_id;
+ arg.type = WMI_VDEV_TYPE_MONITOR;
+ arg.subtype = WMI_VDEV_SUBTYPE_NONE;
+ arg.pdev_id = pdev->pdev_id;
+ arg.if_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ arg.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ arg.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ arg.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ arg.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+
+ ret = ath12k_wmi_vdev_create(ar, tmp_addr, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
+ ar->monitor_vdev_id, ret);
+ ar->monitor_vdev_id = -1;
+ return ret;
+ }
+
+ nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ return ret;
+ }
+
+ ret = ath12k_mac_txpower_recalc(ar);
+ if (ret)
+ return ret;
+
+ ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
+ ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->num_created_vdevs++;
+ ar->monitor_vdev_created = true;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d created\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath12k_mac_monitor_vdev_delete(struct ath12k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!ar->monitor_vdev_created)
+ return 0;
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath12k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH12K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath12k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
+ } else {
+ ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor vdev %d deleted\n",
+ ar->monitor_vdev_id);
+ ar->num_created_vdevs--;
+ ar->monitor_vdev_id = -1;
+ ar->monitor_vdev_created = false;
+ }
+
+ return ret;
+}
+
+static void
+ath12k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static int ath12k_mac_monitor_start(struct ath12k *ar)
+{
+ struct cfg80211_chan_def *chandef = NULL;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->monitor_started)
+ return 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath12k_mac_get_any_chandef_iter,
+ &chandef);
+ if (!chandef)
+ return 0;
+
+ ret = ath12k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
+ ath12k_mac_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ ar->monitor_started = true;
+ ar->num_started_vdevs++;
+ ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started ret %d\n", ret);
+
+ return ret;
+}
+
+static int ath12k_mac_monitor_stop(struct ath12k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!ar->monitor_started)
+ return 0;
+
+ ret = ath12k_mac_monitor_vdev_stop(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ ar->monitor_started = false;
+ ar->num_started_vdevs--;
+ ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, true);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor stopped ret %d\n", ret);
+ return ret;
+}
+
+static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath12k *ar = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ ar->monitor_conf_enabled = conf->flags & IEEE80211_CONF_MONITOR;
+ if (ar->monitor_conf_enabled) {
+ if (ar->monitor_vdev_created)
+ goto exit;
+ ret = ath12k_mac_monitor_vdev_create(ar);
+ if (ret)
+ goto exit;
+ ret = ath12k_mac_monitor_start(ar);
+ if (ret)
+ goto err_mon_del;
+ } else {
+ if (!ar->monitor_vdev_created)
+ goto exit;
+ ret = ath12k_mac_monitor_stop(ar);
+ if (ret)
+ goto exit;
+ ath12k_mac_monitor_vdev_delete(ar);
+ }
+ }
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+
+err_mon_del:
+ ath12k_mac_monitor_vdev_delete(ar);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_mutable_offsets offs = {};
+ struct sk_buff *bcn;
+ struct ieee80211_mgmt *mgmt;
+ u8 *ies;
+ int ret;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+ if (!bcn) {
+ ath12k_warn(ab, "failed to get beacon template from mac80211\n");
+ return -EPERM;
+ }
+
+ ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
+ ies += sizeof(mgmt->u.beacon);
+
+ if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
+ arvif->rsnie_present = true;
+
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies, (skb_tail_pointer(bcn) - ies)))
+ arvif->wpaie_present = true;
+
+ ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+
+ kfree_skb(bcn);
+
+ if (ret)
+ ath12k_warn(ab, "failed to submit beacon template command: %d\n",
+ ret);
+
+ return ret;
+}
+
+static void ath12k_control_beaconing(struct ath12k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->enable_beacon) {
+ ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ return;
+ }
+
+ /* Install the beacon template to the FW */
+ ret = ath12k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n",
+ ret);
+ return;
+ }
+
+ arvif->aid = 0;
+
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
+}
+
+static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ u32 aid;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->cfg.aid;
+ else
+ aid = sta->aid;
+
+ ether_addr_copy(arg->peer_mac, sta->addr);
+ arg->vdev_id = arvif->vdev_id;
+ arg->peer_associd = aid;
+ arg->auth_flag = true;
+ /* TODO: STA WAR in ath10k for listen interval required? */
+ arg->peer_listen_intval = ar->hw->conf.listen_interval;
+ arg->peer_nss = 1;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
+}
+
+static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
+ struct cfg80211_bss *bss;
+ struct ath12k_vif *arvif = (struct ath12k_vif *)vif->drv_priv;
+ const u8 *rsnie = NULL;
+ const u8 *wpaie = NULL;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+
+ if (arvif->rsnie_present || arvif->wpaie_present) {
+ arg->need_ptk_4_way = true;
+ if (arvif->wpaie_present)
+ arg->need_gtk_2_way = true;
+ } else if (bss) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+ ies = rcu_dereference(bss->ies);
+
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data,
+ ies->len);
+ rcu_read_unlock();
+ cfg80211_put_bss(ar->hw->wiphy, bss);
+ }
+
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+ if (rsnie || wpaie) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "%s: rsn ie found\n", __func__);
+ arg->need_ptk_4_way = true;
+ }
+
+ if (wpaie) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "%s: wpa ie found\n", __func__);
+ arg->need_gtk_2_way = true;
+ }
+
+ if (sta->mfp) {
+ /* TODO: Need to check if FW supports PMF? */
+ arg->is_pmf_enabled = true;
+ }
+
+ /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */
+}
+
+static void ath12k_peer_assoc_h_rates(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rates;
+ enum nl80211_band band;
+ u32 ratemask;
+ u8 rate;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->deflink.supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
+ rates = sband->bitrates;
+
+ rateset->num_rates = 0;
+
+ for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+ if (!(ratemask & 1))
+ continue;
+
+ rate = ath12k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
+ rateset->num_rates++;
+ }
+}
+
+static bool
+ath12k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath12k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static void ath12k_peer_assoc_h_ht(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ int i, n;
+ u8 max_nss;
+ u32 stbc;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+
+ if (ath12k_peer_assoc_h_ht_masked(ht_mcs_mask))
+ return;
+
+ arg->ht_flag = true;
+
+ arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ht_cap->ampdu_factor)) - 1;
+
+ arg->peer_mpdu_density =
+ ath12k_parse_mpdudensity(ht_cap->ampdu_density);
+
+ arg->peer_ht_caps = ht_cap->cap;
+ arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+ arg->ldpc_flag = true;
+
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
+ arg->bw_40 = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
+ }
+
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40))
+ arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+ arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S;
+ arg->peer_rate_caps |= stbc;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+ arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG;
+ else if (ht_cap->mcs.rx_mask[1])
+ arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG;
+
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
+ arg->peer_ht_rates.rates[n++] = i;
+ }
+
+ /* This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->peer_mac,
+ arg->peer_ht_rates.num_rates,
+ arg->peer_nss);
+}
+
+static int ath12k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u16
+ath12k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath12k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u16 *vht_mcs_mask;
+ u16 tx_mcs_map;
+ u8 ampdu_factor;
+ u8 max_nss, vht_mcs;
+ int i;
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!vht_cap->vht_supported)
+ return;
+
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath12k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
+ arg->vht_flag = true;
+
+ /* TODO: similar flags required? */
+ arg->vht_capable = true;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ arg->vht_ng_flag = true;
+
+ arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller.
+ */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ /* Calculate peer NSS capability from VHT capabilities if STA
+ * supports VHT.
+ */
+ for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
+ (2 * i) & 3;
+
+ if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED &&
+ vht_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+ arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+
+ tx_mcs_map = __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
+ arg->tx_mcs_set = ath12k_peer_assoc_h_vht_limit(tx_mcs_map, vht_mcs_mask);
+
+ /* In QCN9274 platform, VHT MCS rate 10 and 11 is enabled by default.
+ * VHT MCS rate 10 and 11 is not supported in 11ac standard.
+ * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
+ */
+ arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
+ arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
+
+ if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) ==
+ IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ /* TODO: Check */
+ arg->tx_max_mcs_nss = 0xFF;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+
+ /* TODO: rxnss_override */
+}
+
+static void ath12k_peer_assoc_h_he(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ int i;
+ u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss;
+ u16 mcs_160_map, mcs_80_map;
+ bool support_160;
+ u16 v;
+
+ if (!he_cap->has_he)
+ return;
+
+ arg->he_flag = true;
+
+ support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G);
+
+ /* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */
+ mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+
+ if (support_160) {
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
+
+ if (mcs_160 != IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ rx_mcs_160 = i + 1;
+ break;
+ }
+ }
+ }
+
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
+
+ if (mcs_80 != IEEE80211_HE_MCS_NOT_SUPPORTED) {
+ rx_mcs_80 = i + 1;
+ break;
+ }
+ }
+
+ if (support_160)
+ max_nss = min(rx_mcs_80, rx_mcs_160);
+ else
+ max_nss = rx_mcs_80;
+
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+
+ memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
+ sizeof(arg->peer_he_cap_macinfo));
+ memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
+ sizeof(arg->peer_he_cap_phyinfo));
+ arg->peer_he_ops = vif->bss_conf.he_oper.params;
+
+ /* the top most byte is used to indicate BSS color info */
+ arg->peer_he_ops &= 0xffffff;
+
+ /* As per section 26.6.1 IEEE Std 802.11ax‐2022, if the Max AMPDU
+ * Exponent Extension in HE cap is zero, use the arg->peer_max_mpdu
+ * as calculated while parsing VHT caps(if VHT caps is present)
+ * or HT caps (if VHT caps is not present).
+ *
+ * For non-zero value of Max AMPDU Exponent Extension in HE MAC caps,
+ * if a HE STA sends VHT cap and HE cap IE in assoc request then, use
+ * MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length.
+ * If a HE STA that does not send VHT cap, but HE and HT cap in assoc
+ * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
+ * length.
+ */
+ ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] &
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >>
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK;
+
+ if (ampdu_factor) {
+ if (sta->deflink.vht_cap.vht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ else if (sta->deflink.ht_cap.ht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ }
+
+ if (he_cap->he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ int bit = 7;
+ int nss, ru;
+
+ arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK;
+ arg->peer_ppet.ru_bit_mask =
+ (he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+
+ for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u32 val = 0;
+ int i;
+
+ if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ for (i = 0; i < 6; i++) {
+ val >>= 1;
+ val |= ((he_cap->ppe_thres[bit / 8] >>
+ (bit % 8)) & 0x1) << 5;
+ bit++;
+ }
+ arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |=
+ val << (ru * 6);
+ }
+ }
+ }
+
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES)
+ arg->twt_responder = true;
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
+ arg->twt_requester = true;
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ arg->peer_he_mcs_count++;
+ }
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ arg->peer_he_mcs_count++;
+ fallthrough;
+
+ default:
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ arg->peer_he_mcs_count++;
+ break;
+ }
+}
+
+static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ switch (smps) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ arg->static_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ arg->dynamic_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ arg->spatial_mux_flag = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath12k_peer_assoc_h_qos(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ if (sta->wme) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->apsd_flag = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (sta->wme) {
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n",
+ sta->addr, arg->qos_flag);
+}
+
+static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_wmi_ap_ps_arg arg;
+ u32 max_sp;
+ u32 uapsd;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arg.vdev_id = arvif->vdev_id;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
+ sta->uapsd_queues, sta->max_sp);
+
+ uapsd = 0;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+ max_sp = 0;
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+ max_sp = sta->max_sp;
+
+ arg.param = WMI_AP_PS_PEER_PARAM_UAPSD;
+ arg.value = uapsd;
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ if (ret)
+ goto err;
+
+ arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
+ arg.value = max_sp;
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ if (ret)
+ goto err;
+
+ /* TODO: revisit during testing */
+ arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
+ arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ if (ret)
+ goto err;
+
+ arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
+ arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ ath12k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n",
+ arg.param, arvif->vdev_id, ret);
+ return ret;
+}
+
+static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+{
+ return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
+ ATH12K_MAC_FIRST_OFDM_RATE_IDX;
+}
+
+static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->deflink.vht_cap.cap &
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ return MODE_11AC_VHT160;
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+ return MODE_11AC_VHT80_80;
+ default:
+ /* not sure if this is a valid case? */
+ return MODE_11AC_VHT160;
+ }
+ }
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AC_VHT80;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AC_VHT40;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AC_VHT20;
+
+ return MODE_UNKNOWN;
+}
+
+static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return MODE_11AX_HE160;
+ else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return MODE_11AX_HE80_80;
+ /* not sure if this is a valid case? */
+ return MODE_11AX_HE160;
+ }
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AX_HE80;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AX_HE40;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AX_HE20;
+
+ return MODE_UNKNOWN;
+}
+
+static void ath12k_peer_assoc_h_phymode(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ if (sta->deflink.he_cap.has_he) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AX_HE80_2G;
+ else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AX_HE40_2G;
+ else
+ phymode = MODE_11AX_HE20_2G;
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->deflink.ht_cap.ht_supported &&
+ !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NG_HT40;
+ else
+ phymode = MODE_11NG_HT20;
+ } else if (ath12k_mac_sta_has_ofdm_only(sta)) {
+ phymode = MODE_11G;
+ } else {
+ phymode = MODE_11B;
+ }
+ break;
+ case NL80211_BAND_5GHZ:
+ case NL80211_BAND_6GHZ:
+ /* Check HE first */
+ if (sta->deflink.he_cap.has_he) {
+ phymode = ath12k_mac_get_phymode_he(ar, sta);
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ phymode = ath12k_mac_get_phymode_vht(ar, sta);
+ } else if (sta->deflink.ht_cap.ht_supported &&
+ !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NA_HT40;
+ else
+ phymode = MODE_11NA_HT20;
+ } else {
+ phymode = MODE_11A;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath12k_mac_phymode_str(phymode));
+
+ arg->peer_phymode = phymode;
+ WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static void ath12k_peer_assoc_prepare(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg,
+ bool reassoc)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ memset(arg, 0, sizeof(*arg));
+
+ reinit_completion(&ar->peer_assoc_done);
+
+ arg->peer_new_assoc = !reassoc;
+ ath12k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_crypto(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_vht(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_qos(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_phymode(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_smps(sta, arg);
+
+ /* TODO: amsdu_disable req? */
+}
+
+static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return 0;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (smps >= ARRAY_SIZE(ath12k_smps_map))
+ return -EINVAL;
+
+ return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE,
+ ath12k_smps_map[smps]);
+}
+
+static void ath12k_bss_assoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct ath12k_wmi_peer_assoc_arg peer_arg;
+ struct ieee80211_sta *ap_sta;
+ struct ath12k_peer *peer;
+ bool is_auth = false;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!ap_sta) {
+ ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ath12k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
+
+ rcu_read_unlock();
+
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ return;
+ }
+
+ ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
+ &ap_sta->deflink.ht_cap);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ WARN_ON(arvif->is_up);
+
+ arvif->aid = vif->cfg.aid;
+ ether_addr_copy(arvif->bssid, bss_conf->bssid);
+
+ ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid);
+ if (peer && peer->is_authorized)
+ is_auth = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ /* Authorize BSS Peer */
+ if (is_auth) {
+ ret = ath12k_wmi_set_peer_param(ar, arvif->bssid,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath12k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
+ }
+
+ ret = ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &bss_conf->he_obss_pd);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
+ arvif->vdev_id, ret);
+}
+
+static void ath12k_bss_disassoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
+
+ ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+
+ /* TODO: cancel connection_loss_work */
+}
+
+static u32 ath12k_mac_get_rate_hw_value(int bitrate)
+{
+ u32 preamble;
+ u16 hw_value;
+ int rate;
+ size_t i;
+
+ if (ath12k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ for (i = 0; i < ARRAY_SIZE(ath12k_legacy_rates); i++) {
+ if (ath12k_legacy_rates[i].bitrate != bitrate)
+ continue;
+
+ hw_value = ath12k_legacy_rates[i].hw_value;
+ rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ return rate;
+ }
+
+ return -EINVAL;
+}
+
+static void ath12k_recalculate_mgmt_rate(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ const struct ieee80211_supported_band *sband;
+ u8 basic_rate_idx;
+ int hw_rate_code;
+ u32 vdev_param;
+ u16 bitrate;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[def->chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath12k_warn(ar->ab, "bitrate not supported %d\n", bitrate);
+ return;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+
+ vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
+}
+
+static int ath12k_mac_fils_discovery(struct ath12k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath12k *ar = arvif->ar;
+ struct sk_buff *tmpl;
+ int ret;
+ u32 interval;
+ bool unsol_bcast_probe_resp_enabled = false;
+
+ if (info->fils_discovery.max_interval) {
+ interval = info->fils_discovery.max_interval;
+
+ tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
+ if (tmpl)
+ ret = ath12k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
+ tmpl);
+ } else if (info->unsol_bcast_probe_resp_interval) {
+ unsol_bcast_probe_resp_enabled = 1;
+ interval = info->unsol_bcast_probe_resp_interval;
+
+ tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
+ arvif->vif);
+ if (tmpl)
+ ret = ath12k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
+ tmpl);
+ } else { /* Disable */
+ return ath12k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false);
+ }
+
+ if (!tmpl) {
+ ath12k_warn(ar->ab,
+ "mac vdev %i failed to retrieve %s template\n",
+ arvif->vdev_id, (unsol_bcast_probe_resp_enabled ?
+ "unsolicited broadcast probe response" :
+ "FILS discovery"));
+ return -EPERM;
+ }
+ kfree_skb(tmpl);
+
+ if (!ret)
+ ret = ath12k_wmi_fils_discovery(ar, arvif->vdev_id, interval,
+ unsol_bcast_probe_resp_enabled);
+
+ return ret;
+}
+
+static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ u32 param_id, param_value;
+ enum nl80211_band band;
+ u32 vdev_param;
+ int mcast_rate;
+ u32 preamble;
+ u16 hw_value;
+ u16 bitrate;
+ int ret;
+ u8 rateidx;
+ u32 rate;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ arvif->beacon_interval = info->beacon_int;
+
+ param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->beacon_interval);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Beacon interval: %d set for VDEV: %d\n",
+ arvif->beacon_interval, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ param_id = WMI_PDEV_PARAM_BEACON_TX_MODE;
+ param_value = WMI_BEACON_STAGGERED_MODE;
+ ret = ath12k_wmi_pdev_set_param(ar, param_id,
+ param_value, ar->pdev->pdev_id);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Set staggered beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+
+ ret = ath12k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to update bcn template: %d\n",
+ ret);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+ arvif->dtim_period = info->dtim_period;
+
+ param_id = WMI_VDEV_PARAM_DTIM_PERIOD;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->dtim_period);
+
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n",
+ arvif->vdev_id, ret);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "DTIM period: %d set for VDEV: %d\n",
+ arvif->dtim_period, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_SSID &&
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->u.ap.ssid_len = vif->cfg.ssid_len;
+ if (vif->cfg.ssid_len)
+ memcpy(arvif->u.ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ }
+
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ ath12k_control_beaconing(arvif, info);
+
+ if (arvif->is_up && vif->bss_conf.he_support &&
+ vif->bss_conf.he_oper.params) {
+ /* TODO: Extend to support 1024 BA Bitmap size */
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BA_MODE,
+ WMI_BA_MODE_BUFFER_SIZE_256);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+ arvif->vdev_id);
+
+ param_id = WMI_VDEV_PARAM_HEOPS_0_31;
+ param_value = vif->bss_conf.he_oper.params;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "he oper param: %x set for VDEV: %d\n",
+ param_value, arvif->vdev_id);
+
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n",
+ param_value, arvif->vdev_id, ret);
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ u32 cts_prot;
+
+ cts_prot = !!(info->use_cts_prot);
+ param_id = WMI_VDEV_PARAM_PROTECTION_MODE;
+
+ if (arvif->is_started) {
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, cts_prot);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n",
+ cts_prot, arvif->vdev_id);
+ } else {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n");
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ u32 slottime;
+
+ if (info->use_short_slot)
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+ else
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+ param_id = WMI_VDEV_PARAM_SLOT_TIME;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, slottime);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Set slottime: %d for VDEV: %d\n",
+ slottime, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ u32 preamble;
+
+ if (info->use_short_preamble)
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
+ else
+ preamble = WMI_VDEV_PREAMBLE_LONG;
+
+ param_id = WMI_VDEV_PARAM_PREAMBLE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, preamble);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Set preamble: %d for VDEV: %d\n",
+ preamble, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc)
+ ath12k_bss_assoc(hw, vif, info);
+ else
+ ath12k_bss_disassoc(hw, vif);
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+
+ arvif->txpower = info->txpower;
+ ath12k_mac_txpower_recalc(ar);
+ }
+
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !ath12k_mac_vif_chan(arvif->vif, &def)) {
+ band = def.chan->band;
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+
+ if (mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+ if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
+ rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath12k_legacy_rates[rateidx].bitrate;
+ hw_value = ath12k_legacy_rates[rateidx].hw_value;
+
+ if (ath12k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH12K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES &&
+ !ath12k_mac_vif_chan(arvif->vif, &def))
+ ath12k_recalculate_mgmt_rate(ar, vif, &def);
+
+ if (changed & BSS_CHANGED_TWT) {
+ if (info->twt_requester || info->twt_responder)
+ ath12k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id);
+ else
+ ath12k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ }
+
+ if (changed & BSS_CHANGED_HE_OBSS_PD)
+ ath12k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &info->he_obss_pd);
+
+ if (changed & BSS_CHANGED_HE_BSS_COLOR) {
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = ath12k_wmi_obss_color_cfg_cmd(ar,
+ arvif->vdev_id,
+ info->he_bss_color.color,
+ ATH12K_BSS_COLOR_AP_PERIODS,
+ info->he_bss_color.enabled);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ ret = ath12k_wmi_send_bss_color_change_enable_cmd(ar,
+ arvif->vdev_id,
+ 1);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ ret = ath12k_wmi_obss_color_cfg_cmd(ar,
+ arvif->vdev_id,
+ 0,
+ ATH12K_BSS_COLOR_STA_PERIODS,
+ 1);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+ }
+
+ if (changed & BSS_CHANGED_FILS_DISCOVERY ||
+ changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
+ ath12k_mac_fils_discovery(arvif, info);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+void __ath12k_mac_scan_finish(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ break;
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = (ar->scan.state ==
+ ATH12K_SCAN_ABORTING),
+ };
+
+ ieee80211_scan_completed(ar->hw, &info);
+ } else if (ar->scan.roc_notify) {
+ ieee80211_remain_on_channel_expired(ar->hw);
+ }
+ fallthrough;
+ case ATH12K_SCAN_STARTING:
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ cancel_delayed_work(&ar->scan.timeout);
+ complete(&ar->scan.completed);
+ break;
+ }
+}
+
+void ath12k_mac_scan_finish(struct ath12k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ __ath12k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath12k_scan_stop(struct ath12k *ar)
+{
+ struct ath12k_wmi_scan_cancel_arg arg = {
+ .req_type = WLAN_SCAN_CANCEL_SINGLE,
+ .scan_id = ATH12K_SCAN_ID,
+ };
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* TODO: Fill other STOP Params */
+ arg.pdev_id = ar->pdev->pdev_id;
+
+ ret = ath12k_wmi_send_scan_stop_cmd(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret);
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ar->ab,
+ "failed to receive scan abort comple: timed out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ }
+
+out:
+ /* Scan state should be updated upon scan completion but in case
+ * firmware fails to deliver the event (for whatever reason) it is
+ * desired to clean up scan state anyway. Firmware may have just
+ * dropped the scan completion event delivery due to transport pipe
+ * being overflown with data and/or it can recover on its own before
+ * next scan request is submitted.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state != ATH12K_SCAN_IDLE)
+ __ath12k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath12k_scan_abort(struct ath12k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ /* This can happen if timeout worker kicked in and called
+ * abortion while scan completion was being processed.
+ */
+ break;
+ case ATH12K_SCAN_STARTING:
+ case ATH12K_SCAN_ABORTING:
+ ath12k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n",
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_RUNNING:
+ ar->scan.state = ATH12K_SCAN_ABORTING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath12k_scan_stop(ar);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to abort scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath12k_scan_timeout_work(struct work_struct *work)
+{
+ struct ath12k *ar = container_of(work, struct ath12k,
+ scan.timeout.work);
+
+ mutex_lock(&ar->conf_mutex);
+ ath12k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath12k_start_scan(struct ath12k *ar,
+ struct ath12k_wmi_scan_req_arg *arg)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath12k_wmi_send_scan_start_cmd(ar, arg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
+ if (ret == 0) {
+ ret = ath12k_scan_stop(ar);
+ if (ret)
+ ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+
+ return -ETIMEDOUT;
+ }
+
+ /* If we failed to start the scan, return error code at
+ * this point. This is probably due to some issue in the
+ * firmware, but no need to wedge the driver due to that...
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH12K_SCAN_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct ath12k_wmi_scan_req_arg arg = {};
+ int ret;
+ int i;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ ar->scan.state = ATH12K_SCAN_STARTING;
+ ar->scan.is_roc = false;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ret = 0;
+ break;
+ case ATH12K_SCAN_STARTING:
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ ath12k_wmi_start_scan_init(ar, &arg);
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH12K_SCAN_ID;
+
+ if (req->ie_len) {
+ arg.extraie.len = req->ie_len;
+ arg.extraie.ptr = kzalloc(req->ie_len, GFP_KERNEL);
+ memcpy(arg.extraie.ptr, req->ie, req->ie_len);
+ }
+
+ if (req->n_ssids) {
+ arg.num_ssids = req->n_ssids;
+ for (i = 0; i < arg.num_ssids; i++)
+ arg.ssid[i] = req->ssids[i];
+ } else {
+ arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ }
+
+ if (req->n_channels) {
+ arg.num_chan = req->n_channels;
+ for (i = 0; i < arg.num_chan; i++)
+ arg.chan_list[i] = req->channels[i]->center_freq;
+ }
+
+ ret = ath12k_start_scan(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH12K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ /* Add a margin to account for event/command processing */
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(arg.max_scan_time +
+ ATH12K_MAC_SCAN_TIMEOUT_MSECS));
+
+exit:
+ if (req->ie_len)
+ kfree(arg.extraie.ptr);
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath12k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+}
+
+static int ath12k_install_key(struct ath12k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, u32 flags)
+{
+ int ret;
+ struct ath12k *ar = arvif->ar;
+ struct wmi_vdev_install_key_arg arg = {
+ .vdev_id = arvif->vdev_id,
+ .key_idx = key->keyidx,
+ .key_len = key->keylen,
+ .key_data = key->key,
+ .key_flags = flags,
+ .macaddr = macaddr,
+ };
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ reinit_completion(&ar->install_key_done);
+
+ if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 0;
+
+ if (cmd == DISABLE_KEY) {
+ /* TODO: Check if FW expects value other than NONE for del */
+ /* arg.key_cipher = WMI_CIPHER_NONE; */
+ arg.key_len = 0;
+ arg.key_data = NULL;
+ goto install;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ /* TODO: Re-check if flag is valid */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ arg.key_cipher = WMI_CIPHER_TKIP;
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_GCM;
+ break;
+ default:
+ ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
+ return -EOPNOTSUPP;
+ }
+
+ if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags))
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+
+install:
+ ret = ath12k_wmi_vdev_install_key(arvif->ar, &arg);
+
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
+ return -ETIMEDOUT;
+
+ if (ether_addr_equal(macaddr, arvif->vif->addr))
+ arvif->key_cipher = key->cipher;
+
+ return ar->install_key_status ? -EINVAL : 0;
+}
+
+static int ath12k_clear_peer_keys(struct ath12k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_peer *peer;
+ int first_errno = 0;
+ int ret;
+ int i;
+ u32 flags = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find(ab, arvif->vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (!peer->keys[i])
+ continue;
+
+ /* key flags are not required to delete the key */
+ ret = ath12k_install_key(arvif, peer->keys[i],
+ DISABLE_KEY, addr, flags);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath12k_warn(ab, "failed to remove peer key %d: %d\n",
+ i, ret);
+
+ spin_lock_bh(&ab->base_lock);
+ peer->keys[i] = NULL;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+ return first_errno;
+}
+
+static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_peer *peer;
+ struct ath12k_sta *arsta;
+ const u8 *peer_addr;
+ int ret = 0;
+ u32 flags = 0;
+
+ /* BIP needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
+ return 1;
+
+ if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 1;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta)
+ peer_addr = sta->addr;
+ else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ peer_addr = vif->bss_conf.bssid;
+ else
+ peer_addr = vif->addr;
+
+ key->hw_key_idx = key->keyidx;
+
+ /* the peer should not disappear in mid-way (unless FW goes awry) since
+ * we already hold conf_mutex. we just make sure its there now.
+ */
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer) {
+ if (cmd == SET_KEY) {
+ ath12k_warn(ab, "cannot install key for non-existent peer %pM\n",
+ peer_addr);
+ ret = -EOPNOTSUPP;
+ goto exit;
+ } else {
+ /* if the peer doesn't exist there is no key to disable
+ * anymore
+ */
+ goto exit;
+ }
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags |= WMI_KEY_PAIRWISE;
+ else
+ flags |= WMI_KEY_GROUP;
+
+ ret = ath12k_install_key(arvif, key, cmd, peer_addr, flags);
+ if (ret) {
+ ath12k_warn(ab, "ath12k_install_key failed (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_dp_rx_peer_pn_replay_config(arvif, peer_addr, cmd, key);
+ if (ret) {
+ ath12k_warn(ab, "failed to offload PN replay detection %d\n", ret);
+ goto exit;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (peer && cmd == SET_KEY) {
+ peer->keys[key->keyidx] = key;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ peer->ucast_keyidx = key->keyidx;
+ peer->sec_type = ath12k_dp_tx_get_encrypt_type(key->cipher);
+ } else {
+ peer->mcast_keyidx = key->keyidx;
+ peer->sec_type_grp = ath12k_dp_tx_get_encrypt_type(key->cipher);
+ }
+ } else if (peer && cmd == DISABLE_KEY) {
+ peer->keys[key->keyidx] = NULL;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ peer->ucast_keyidx = 0;
+ else
+ peer->mcast_keyidx = 0;
+ } else if (!peer)
+ /* impossible unless FW goes crazy */
+ ath12k_warn(ab, "peer %pM disappeared!\n", peer_addr);
+
+ if (sta) {
+ arsta = (struct ath12k_sta *)sta->drv_priv;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (cmd == SET_KEY)
+ arsta->pn_type = HAL_PN_TYPE_WPA;
+ else
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ default:
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int
+ath12k_mac_bitrate_mask_num_vht_rates(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+ num_rates += hweight16(mask->control[band].vht_mcs[i]);
+
+ return num_rates;
+}
+
+static int
+ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath12k *ar = arvif->ar;
+ u8 vht_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ nss = i + 1;
+ vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1,
+ WMI_RATE_PREAMBLE_VHT);
+ ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to update STA %pM Fixed Rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
+static int ath12k_station_assoc(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_wmi_peer_assoc_arg peer_arg;
+ int ret;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ struct cfg80211_bitrate_mask *mask;
+ u8 num_vht_rates;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return -EPERM;
+
+ band = def.chan->band;
+ mask = &arvif->bitrate_mask;
+
+ ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+
+ ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return -ETIMEDOUT;
+ }
+
+ num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+
+ /* If single VHT rate is configured (by set_bitrate_mask()),
+ * peer_assoc will disable VHT. This is now enabled by a peer specific
+ * fixed param.
+ * Note that all other rates and NSS will be disabled for this peer.
+ */
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
+ }
+
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (reassoc)
+ return 0;
+
+ ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->deflink.ht_cap);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath12k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ ret = ath12k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_station_disassoc(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath12k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ ret = ath12k_clear_peer_keys(arvif, sta->addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void ath12k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath12k *ar;
+ struct ath12k_vif *arvif;
+ struct ath12k_sta *arsta;
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 changed, bw, nss, smps;
+ int err, num_vht_rates;
+ const struct cfg80211_bitrate_mask *mask;
+ struct ath12k_wmi_peer_assoc_arg peer_arg;
+
+ arsta = container_of(wk, struct ath12k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ if (WARN_ON(ath12k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask),
+ ath12k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
+ if (err)
+ ath12k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_NSS, nss);
+ if (err)
+ ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE, smps);
+ if (err)
+ ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ mask = &arvif->bitrate_mask;
+ num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ /* Peer_assoc_prepare will reject vht rates in
+ * bitrate_mask if its not available in range format and
+ * sets vht tx_rateset as unsupported. So multiple VHT MCS
+ * setting(eg. MCS 4,5,6) per peer is not supported here.
+ * But, Single rate in VHT mask can be set as per-peer
+ * fixed rate. But even if any HT rates are configured in
+ * the bitrate mask, device will not switch to those rates
+ * when per-peer Fixed rate is set.
+ * TODO: Check RATEMASK_CMDID to support auto rates selection
+ * across HT/VHT and for multiple VHT MCS support.
+ */
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ ath12k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ } else {
+ /* If the peer is non-VHT or no fixed VHT rate
+ * is provided in the new bitrate mask we set the
+ * other rates using peer_assoc command.
+ */
+ ath12k_peer_assoc_prepare(ar, arvif->vif, sta,
+ &peer_arg, true);
+
+ err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (err)
+ ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, err);
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
+ ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath12k_mac_inc_num_stations(struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+
+ return 0;
+}
+
+static void ath12k_mac_dec_num_stations(struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return;
+
+ ar->num_stations--;
+}
+
+static int ath12k_mac_station_add(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+ struct ath12k_wmi_peer_create_arg peer_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath12k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath12k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
+
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = sta->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = ath12k_peer_create(ar, arvif, sta, &peer_param);
+ if (ret) {
+ ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ goto free_peer;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+ if (ret) {
+ ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
+ sta->addr, ret);
+ goto free_peer;
+ }
+ }
+
+ ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto free_peer;
+ }
+
+ if (ab->hw_params->vdev_start_delay &&
+ !arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath12k_start_vdev_delay(ar->hw, vif);
+ if (ret) {
+ ath12k_warn(ab, "failed to delay vdev start: %d\n", ret);
+ goto free_peer;
+ }
+ }
+
+ return 0;
+
+free_peer:
+ ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
+dec_num_station:
+ ath12k_mac_dec_num_stations(arvif, sta);
+exit:
+ return ret;
+}
+
+static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+ struct ath12k_peer *peer;
+ int ret = 0;
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST))
+ cancel_work_sync(&arsta->update_wk);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ INIT_WORK(&arsta->update_wk, ath12k_sta_rc_update_wk);
+
+ ret = ath12k_mac_station_add(ar, vif, sta);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ ath12k_mac_dec_num_stations(arvif, sta);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer && peer->sta == sta) {
+ ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath12k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath12k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+ s16 txpwr;
+
+ if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ txpwr = 0;
+ } else {
+ txpwr = sta->deflink.txpwr.power;
+ if (!txpwr)
+ return -EINVAL;
+ }
+
+ if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_USE_FIXED_PWR, txpwr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct ath12k_peer *peer;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return;
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss,
+ sta->deflink.smps_mode);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ default:
+ ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
+ sta->deflink.bandwidth, sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->deflink.rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->deflink.smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ default:
+ ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
+ sta->deflink.smps_mode, sta->addr);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static int ath12k_conf_tx_uapsd(struct ath12k *ar, struct ieee80211_vif *vif,
+ u16 ac, bool enable)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ u32 value;
+ int ret;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_VI:
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BE:
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BK:
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ break;
+ }
+
+ if (enable)
+ arvif->u.sta.uapsd |= value;
+ else
+ arvif->u.sta.uapsd &= ~value;
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_UAPSD,
+ arvif->u.sta.uapsd);
+ if (ret) {
+ ath12k_warn(ar->ab, "could not set uapsd params %d\n", ret);
+ goto exit;
+ }
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+ else
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+ value);
+ if (ret)
+ ath12k_warn(ar->ab, "could not set rx wake param %d\n", ret);
+
+exit:
+ return ret;
+}
+
+static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_wmm_params_arg *p = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->wmm_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->wmm_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->wmm_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->wmm_params.ac_bk;
+ break;
+ }
+
+ if (WARN_ON(!p)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ p->cwmin = params->cw_min;
+ p->cwmax = params->cw_max;
+ p->aifs = params->aifs;
+ p->txop = params->txop;
+
+ ret = ath12k_wmi_send_wmm_update_cmd(ar, arvif->vdev_id,
+ &arvif->wmm_params);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+
+ if (ret)
+ ath12k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static struct ieee80211_sta_ht_cap
+ath12k_create_ht_cap(struct ath12k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
+{
+ int i;
+ struct ieee80211_sta_ht_cap ht_cap = {0};
+ u32 ar_vht_cap = ar->pdev->cap.vht_cap;
+
+ if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
+ return ht_cap;
+
+ ht_cap.ht_supported = 1;
+ ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT20_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT40_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) {
+ u32 smps;
+
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ ht_cap.cap |= smps;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_TX_STBC)
+ ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_STBC) {
+ u32 stbc;
+
+ stbc = ar_ht_cap;
+ stbc &= WMI_HT_CAP_RX_STBC;
+ stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+ stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc &= IEEE80211_HT_CAP_RX_STBC;
+
+ ht_cap.cap |= stbc;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_LDPC)
+ ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT)
+ ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+ if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+ ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ for (i = 0; i < ar->num_rx_chains; i++) {
+ if (rate_cap_rx_chainmask & BIT(i))
+ ht_cap.mcs.rx_mask[i] = 0xFF;
+ }
+
+ ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+ return ht_cap;
+}
+
+static int ath12k_mac_set_txbf_conf(struct ath12k_vif *arvif)
+{
+ u32 value = 0;
+ struct ath12k *ar = arvif->ar;
+ int nsts;
+ int sound_dim;
+ u32 vht_cap = ar->pdev->cap.vht_cap;
+ u32 vdev_param = WMI_VDEV_PARAM_TXBF;
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
+ nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
+ }
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ sound_dim = vht_cap &
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+ value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
+ }
+
+ if (!value)
+ return 0;
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+ }
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+ }
+
+ return ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, value);
+}
+
+static void ath12k_set_vht_txbf_cap(struct ath12k *ar, u32 *vht_cap)
+{
+ bool subfer, subfee;
+ int sound_dim = 0;
+
+ subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
+ subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
+
+ if (ar->num_tx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ subfer = false;
+ }
+
+ /* If SU Beaformer is not set, then disable MU Beamformer Capability */
+ if (!subfer)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+
+ /* If SU Beaformee is not set, then disable MU Beamformee Capability */
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+
+ sound_dim = u32_get_bits(*vht_cap,
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+ *vht_cap = u32_replace_bits(*vht_cap, 0,
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+
+ /* TODO: Need to check invalid STS and Sound_dim values set by FW? */
+
+ /* Enable Sounding Dimension Field only if SU BF is enabled */
+ if (subfer) {
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+
+ *vht_cap = u32_replace_bits(*vht_cap, sound_dim,
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+ }
+
+ /* Use the STS advertised by FW unless SU Beamformee is not supported*/
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+}
+
+static struct ieee80211_sta_vht_cap
+ath12k_create_vht_cap(struct ath12k *ar, u32 rate_cap_tx_chainmask,
+ u32 rate_cap_rx_chainmask)
+{
+ struct ieee80211_sta_vht_cap vht_cap = {0};
+ u16 txmcs_map, rxmcs_map;
+ int i;
+
+ vht_cap.vht_supported = 1;
+ vht_cap.cap = ar->pdev->cap.vht_cap;
+
+ ath12k_set_vht_txbf_cap(ar, &vht_cap.cap);
+
+ /* TODO: Enable back VHT160 mode once association issues are fixed */
+ /* Disabling VHT160 and VHT80+80 modes */
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
+
+ rxmcs_map = 0;
+ txmcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i))
+ txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i))
+ rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ if (rate_cap_tx_chainmask <= 1)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
+ vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
+ vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
+
+ return vht_cap;
+}
+
+static void ath12k_mac_setup_ht_vht_cap(struct ath12k *ar,
+ struct ath12k_pdev_cap *cap,
+ u32 *ht_cap_info)
+{
+ struct ieee80211_supported_band *band;
+ u32 rate_cap_tx_chainmask;
+ u32 rate_cap_rx_chainmask;
+ u32 ht_cap;
+
+ rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
+ rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath12k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ (ar->ab->hw_params->single_pdev_only ||
+ !ar->supports_6ghz)) {
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath12k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ band->vht_cap = ath12k_create_vht_cap(ar, rate_cap_tx_chainmask,
+ rate_cap_rx_chainmask);
+ }
+}
+
+static int ath12k_check_chain_mask(struct ath12k *ar, u32 ant, bool is_tx_ant)
+{
+ /* TODO: Check the request chainmask against the supported
+ * chainmask table which is advertised in extented_service_ready event
+ */
+
+ return 0;
+}
+
+static void ath12k_gen_ppe_thresh(struct ath12k_wmi_ppe_threshold_arg *fw_ppet,
+ u8 *he_ppet)
+{
+ int nss, ru;
+ u8 bit = 7;
+
+ he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK;
+ he_ppet[0] |= (fw_ppet->ru_bit_mask <<
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK;
+ for (nss = 0; nss <= fw_ppet->numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u8 val;
+ int i;
+
+ if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
+ 0x3f;
+ val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
+ for (i = 5; i >= 0; i--) {
+ he_ppet[bit / 8] |=
+ ((val >> i) & 0x1) << ((bit % 8));
+ bit++;
+ }
+ }
+ }
+}
+
+static void
+ath12k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
+{
+ u8 m;
+
+ m = IEEE80211_HE_MAC_CAP0_TWT_RES |
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->mac_cap_info[0] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP2_TRS |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG |
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION |
+ IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
+ IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING |
+ IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ he_cap_elem->mac_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ he_cap_elem->phy_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK;
+ he_cap_elem->phy_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ he_cap_elem->phy_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ he_cap_elem->phy_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
+ he_cap_elem->phy_cap_info[6] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR |
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ he_cap_elem->phy_cap_info[7] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ he_cap_elem->phy_cap_info[8] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ he_cap_elem->phy_cap_info[9] &= ~m;
+}
+
+static __le16 ath12k_mac_setup_he_6ghz_cap(struct ath12k_pdev_cap *pcap,
+ struct ath12k_band_cap *bcap)
+{
+ u8 val;
+
+ bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE;
+ if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+ bcap->he_6ghz_capa |=
+ u32_encode_bits(WLAN_HT_CAP_SM_PS_DYNAMIC,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+ else
+ bcap->he_6ghz_capa |=
+ u32_encode_bits(WLAN_HT_CAP_SM_PS_DISABLED,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+ val = u32_get_bits(pcap->vht_cap,
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
+ bcap->he_6ghz_capa |=
+ u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ val = u32_get_bits(pcap->vht_cap,
+ IEEE80211_VHT_CAP_MAX_MPDU_MASK);
+ bcap->he_6ghz_capa |=
+ u32_encode_bits(val, IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN)
+ bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+ if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN)
+ bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS;
+
+ return cpu_to_le16(bcap->he_6ghz_capa);
+}
+
+static int ath12k_mac_copy_he_cap(struct ath12k *ar,
+ struct ath12k_pdev_cap *cap,
+ struct ieee80211_sband_iftype_data *data,
+ int band)
+{
+ int i, idx = 0;
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ath12k_band_cap *band_cap = &cap->band[band];
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
+
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+ memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
+ sizeof(he_cap_elem->mac_cap_info));
+ memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info,
+ sizeof(he_cap_elem->phy_cap_info));
+
+ he_cap_elem->mac_cap_info[1] &=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
+
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1;
+
+ switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->phy_cap_info[3] &=
+ ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[0] &=
+ ~IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ ath12k_mac_filter_he_cap_mesh(he_cap_elem);
+ break;
+ }
+
+ he_cap->he_mcs_nss_supp.rx_mcs_80 =
+ cpu_to_le16(band_cap->he_mcs & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 =
+ cpu_to_le16(band_cap->he_mcs & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_160 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_80p80 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80p80 =
+ cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
+ ath12k_gen_ppe_thresh(&band_cap->he_ppet,
+ he_cap->ppe_thres);
+
+ if (band == NL80211_BAND_6GHZ) {
+ data[idx].he_6ghz_capa.capa =
+ ath12k_mac_setup_he_6ghz_cap(cap, band_cap);
+ }
+ idx++;
+ }
+
+ return idx;
+}
+
+static void ath12k_mac_setup_he_cap(struct ath12k *ar,
+ struct ath12k_pdev_cap *cap)
+{
+ struct ieee80211_supported_band *band;
+ int count;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ count = ath12k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_2GHZ],
+ NL80211_BAND_2GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_2GHZ];
+ band->n_iftype_data = count;
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ count = ath12k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_5GHZ],
+ NL80211_BAND_5GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ];
+ band->n_iftype_data = count;
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ ar->supports_6ghz) {
+ count = ath12k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_6GHZ],
+ NL80211_BAND_6GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ];
+ band->n_iftype_data = count;
+ }
+}
+
+static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath12k_check_chain_mask(ar, tx_ant, true))
+ return -EINVAL;
+
+ if (ath12k_check_chain_mask(ar, rx_ant, false))
+ return -EINVAL;
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if (ar->state != ATH12K_STATE_ON &&
+ ar->state != ATH12K_STATE_RESTARTED)
+ return 0;
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ tx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ar->num_tx_chains = hweight32(tx_ant);
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ rx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ ar->num_rx_chains = hweight32(rx_ant);
+
+ /* Reload HT/VHT/HE capability */
+ ath12k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
+ ath12k_mac_setup_he_cap(ar, &ar->pdev->cap);
+
+ return 0;
+}
+
+int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+ struct sk_buff *msdu = skb;
+ struct ieee80211_tx_info *info;
+ struct ath12k *ar = ctx;
+ struct ath12k_base *ab = ar->ab;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ ieee80211_free_txskb(ar->hw, msdu);
+
+ return 0;
+}
+
+static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = ctx;
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct sk_buff *msdu = skb;
+ struct ath12k *ar = skb_cb->ar;
+ struct ath12k_base *ab = ar->ab;
+
+ if (skb_cb->vif == vif) {
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
+ DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info;
+ dma_addr_t paddr;
+ int buf_id;
+ int ret;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
+ ATH12K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ if (buf_id < 0)
+ return -ENOSPC;
+
+ info = IEEE80211_SKB_CB(skb);
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ }
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr)) {
+ ath12k_warn(ab, "failed to DMA map mgmt Tx buffer\n");
+ ret = -EIO;
+ goto err_free_idr;
+ }
+
+ ATH12K_SKB_CB(skb)->paddr = paddr;
+
+ ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
+ goto err_unmap_buf;
+ }
+
+ return 0;
+
+err_unmap_buf:
+ dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+err_free_idr:
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ return ret;
+}
+
+static void ath12k_mgmt_over_wmi_tx_purge(struct ath12k *ar)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
+ ieee80211_free_txskb(ar->hw, skb);
+}
+
+static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
+ struct ath12k_skb_cb *skb_cb;
+ struct ath12k_vif *arvif;
+ struct sk_buff *skb;
+ int ret;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
+ skb_cb = ATH12K_SKB_CB(skb);
+ if (!skb_cb->vif) {
+ ath12k_warn(ar->ab, "no vif found for mgmt frame\n");
+ ieee80211_free_txskb(ar->hw, skb);
+ continue;
+ }
+
+ arvif = ath12k_vif_to_arvif(skb_cb->vif);
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id) &&
+ arvif->is_started) {
+ ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
+ arvif->vdev_id, ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ } else {
+ atomic_inc(&ar->num_pending_mgmt_tx);
+ }
+ } else {
+ ath12k_warn(ar->ab,
+ "dropping mgmt frame for vdev %d, is_started %d\n",
+ arvif->vdev_id,
+ arvif->is_started);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ }
+}
+
+static int ath12k_mac_mgmt_tx(struct ath12k *ar, struct sk_buff *skb,
+ bool is_prb_rsp)
+{
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+
+ if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ /* Drop probe response packets when the pending management tx
+ * count has reached a certain threshold, so as to prioritize
+ * other mgmt packets like auth and assoc to be sent on time
+ * for establishing successful connections.
+ */
+ if (is_prb_rsp &&
+ atomic_read(&ar->num_pending_mgmt_tx) > ATH12K_PRB_RSP_DROP_THRESHOLD) {
+ ath12k_warn(ar->ab,
+ "dropping probe response as pending queue is almost full\n");
+ return -ENOSPC;
+ }
+
+ if (skb_queue_len(q) == ATH12K_TX_MGMT_NUM_PENDING_MAX) {
+ ath12k_warn(ar->ab, "mgmt tx queue is full\n");
+ return -ENOSPC;
+ }
+
+ skb_queue_tail(q, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+
+ return 0;
+}
+
+static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct ath12k *ar = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ u32 info_flags = info->flags;
+ bool is_prb_rsp;
+ int ret;
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ skb_cb->vif = vif;
+
+ if (key) {
+ skb_cb->cipher = key->cipher;
+ skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
+ }
+
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
+ } else if (ieee80211_is_mgmt(hdr->frame_control)) {
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+ ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to queue management frame %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ return;
+ }
+
+ ret = ath12k_dp_tx(ar, arvif, skb);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath12k_mac_drain_tx(struct ath12k *ar)
+{
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ ath12k_mgmt_over_wmi_tx_purge(ar);
+}
+
+static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
+{
+ return -ENOTSUPP;
+ /* TODO: Need to support new monitor mode */
+}
+
+static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
+{
+ int recovery_start_count;
+
+ if (!ab->is_reset)
+ return;
+
+ recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
+
+ if (recovery_start_count == ab->num_radios) {
+ complete(&ab->recovery_start);
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n");
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n");
+
+ wait_for_completion_timeout(&ab->reconfigure_complete,
+ ATH12K_RECONFIGURE_TIMEOUT_HZ);
+}
+
+static int ath12k_mac_op_start(struct ieee80211_hw *hw)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev *pdev = ar->pdev;
+ int ret;
+
+ ath12k_mac_drain_tx(ar);
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH12K_STATE_OFF:
+ ar->state = ATH12K_STATE_ON;
+ break;
+ case ATH12K_STATE_RESTARTING:
+ ar->state = ATH12K_STATE_RESTARTED;
+ ath12k_mac_wait_reconfigure(ab);
+ break;
+ case ATH12K_STATE_RESTARTED:
+ case ATH12K_STATE_WEDGED:
+ case ATH12K_STATE_ON:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath12k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+ goto err;
+ }
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
+ pdev->pdev_id);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ 0, pdev->pdev_id);
+ if (ret) {
+ ath12k_err(ab, "failed to set ac override for ARP: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id);
+ if (ret) {
+ ath12k_err(ab, "failed to offload radar detection: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath12k_err(ab, "failed to req ppdu stats: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath12k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+ goto err;
+ }
+
+ __ath12k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
+
+ /* TODO: Do we need to enable ANI? */
+
+ ath12k_reg_update_chan_list(ar);
+
+ ar->num_started_vdevs = 0;
+ ar->num_created_vdevs = 0;
+ ar->num_peers = 0;
+ ar->allocated_vdev_map = 0;
+
+ /* Configure monitor status ring with default rx_filter to get rx status
+ * such as rssi, rx_duration.
+ */
+ ret = ath12k_mac_config_mon_status_default(ar, true);
+ if (ret && (ret != -ENOTSUPP)) {
+ ath12k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
+ ret);
+ goto err;
+ }
+
+ if (ret == -ENOTSUPP)
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "monitor status config is not yet supported");
+
+ /* Configure the hash seed for hash based reo dest ring selection */
+ ath12k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
+
+ /* allow device to enter IMPS */
+ if (ab->hw_params->idle_ps) {
+ ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ 1, pdev->pdev_id);
+ if (ret) {
+ ath12k_err(ab, "failed to enable idle ps: %d\n", ret);
+ goto err;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
+ &ab->pdevs[ar->pdev_idx]);
+
+ return 0;
+
+err:
+ ar->state = ATH12K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+{
+ struct ath12k *ar = hw->priv;
+ struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+ int ret;
+
+ ath12k_mac_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_config_mon_status_default(ar, false);
+ if (ret && (ret != -ENOTSUPP))
+ ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
+ ret);
+
+ clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ ar->state = ATH12K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
+ list_del(&ppdu_stats->list);
+ kfree(ppdu_stats);
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
+
+ synchronize_rcu();
+
+ atomic_set(&ar->num_pending_mgmt_tx, 0);
+}
+
+static u8
+ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
+{
+ struct ath12k_base *ab = arvif->ar->ab;
+ u8 vdev_stats_id = 0;
+
+ do {
+ if (ab->free_vdev_stats_id_map & (1LL << vdev_stats_id)) {
+ vdev_stats_id++;
+ if (vdev_stats_id <= ATH12K_INVAL_VDEV_STATS_ID) {
+ vdev_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
+ break;
+ }
+ } else {
+ ab->free_vdev_stats_id_map |= (1LL << vdev_stats_id);
+ break;
+ }
+ } while (vdev_stats_id);
+
+ arvif->vdev_stats_id = vdev_stats_id;
+ return vdev_stats_id;
+}
+
+static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
+ struct ath12k_wmi_vdev_create_arg *arg)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_pdev *pdev = ar->pdev;
+
+ arg->if_id = arvif->vdev_id;
+ arg->type = arvif->vdev_type;
+ arg->subtype = arvif->vdev_subtype;
+ arg->pdev_id = pdev->pdev_id;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ arg->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ arg->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ ar->supports_6ghz) {
+ arg->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
+ arg->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
+ }
+
+ arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
+}
+
+static u32
+ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype)
+{
+ struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
+ struct ath12k_band_cap *cap_band = NULL;
+ u32 *hecap_phy_ptr = NULL;
+ u32 hemode;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP)
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ else
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+
+ hecap_phy_ptr = &cap_band->he_cap_phy_info[0];
+
+ hemode = u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE) |
+ u32_encode_bits(HECAP_PHY_SUBFMR_GET(hecap_phy_ptr),
+ HE_MODE_SU_TX_BFER) |
+ u32_encode_bits(HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr),
+ HE_MODE_UL_MUMIMO);
+
+ /* TODO: WDS and other modes */
+ if (viftype == NL80211_IFTYPE_AP) {
+ hemode |= u32_encode_bits(HECAP_PHY_MUBFMR_GET(hecap_phy_ptr),
+ HE_MODE_MU_TX_BFER) |
+ u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) |
+ u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA);
+ } else {
+ hemode |= u32_encode_bits(HE_MU_BFEE_ENABLE, HE_MODE_MU_TX_BFEE);
+ }
+
+ return hemode;
+}
+
+static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
+ struct ath12k_vif *arvif)
+{
+ u32 param_id, param_value;
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ param_id = WMI_VDEV_PARAM_SET_HEMU_MODE;
+ param_value = ath12k_mac_prepare_he_mode(ar->pdev, arvif->vif->type);
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n",
+ arvif->vdev_id, ret, param_value);
+ return ret;
+ }
+ param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
+ param_value =
+ u32_encode_bits(HE_VHT_SOUNDING_MODE_ENABLE, HE_VHT_SOUNDING_MODE) |
+ u32_encode_bits(HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE,
+ HE_TRIG_NONTRIG_SOUNDING_MODE);
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return ret;
+}
+
+static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ u32 param_id, param_value;
+ int ret;
+
+ param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED |
+ IEEE80211_OFFLOAD_DECAP_ENABLED);
+
+ if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
+ arvif->tx_encap_type = ATH12K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
+ arvif->tx_encap_type = ATH12K_HW_TXRX_RAW;
+ else
+ arvif->tx_encap_type = ATH12K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, arvif->tx_encap_type);
+ if (ret) {
+ ath12k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ }
+
+ param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
+ if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED)
+ param_value = ATH12K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH12K_HW_TXRX_RAW;
+ else
+ param_value = ATH12K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ab, "failed to set vdev %d rx decap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED;
+ }
+}
+
+static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
+ struct ath12k_wmi_peer_create_arg peer_param;
+ u32 param_id, param_value;
+ u16 nss;
+ int i;
+ int ret;
+ int bit;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_AP &&
+ ar->num_peers > (ar->max_num_peers - 1)) {
+ ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+ ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
+ TARGET_NUM_VDEVS);
+ ret = -EBUSY;
+ goto err;
+ }
+
+ memset(arvif, 0, sizeof(*arvif));
+
+ arvif->ar = ar;
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+
+ /* Should we initialize any worker to handle connection loss indication
+ * from firmware in sta mode?
+ */
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ bit = __ffs64(ab->free_vdev_map);
+
+ arvif->vdev_id = bit;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
+ fallthrough;
+ case NL80211_IFTYPE_AP:
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ ar->monitor_vdev_id = bit;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ ab->free_vdev_map);
+
+ vif->cab_queue = arvif->vdev_id % (ATH12K_HW_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1);
+
+ ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
+
+ ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to create WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ar->num_created_vdevs++;
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
+ ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
+ ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
+
+ spin_lock_bh(&ar->data_lock);
+ list_add(&arvif->list, &ar->arvifs);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath12k_mac_op_update_vif_offload(hw, vif);
+
+ nss = hweight32(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath12k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = vif->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ ret = ath12k_peer_create(ar, arvif, NULL, &peer_param);
+ if (ret) {
+ ath12k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath12k_mac_set_kickout(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+ param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+ ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ ret = ath12k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, false);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+ break;
+ default:
+ break;
+ }
+
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath12k_mac_txpower_recalc(ar);
+ if (ret)
+ goto err_peer_del;
+
+ param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ param_value = ar->hw->wiphy->rts_threshold;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ath12k_dp_vdev_tx_attach(ar, arvif);
+
+ if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled)
+ ath12k_mac_monitor_vdev_create(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+
+err_peer_del:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ reinit_completion(&ar->peer_delete_done);
+
+ ret = ath12k_wmi_send_peer_delete_cmd(ar, vif->addr,
+ arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
+ arvif->vdev_id, vif->addr);
+ goto err;
+ }
+
+ ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id,
+ vif->addr);
+ if (ret)
+ goto err;
+
+ ar->num_peers--;
+ }
+
+err_vdev_del:
+ ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ar->num_created_vdevs--;
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ab->free_vdev_map |= 1LL << arvif->vdev_id;
+ ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif)
+{
+ struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
+ struct ath12k_skb_cb *skb_cb;
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
+ spin_lock_bh(&dp->tx_desc_lock[i]);
+
+ list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
+ list) {
+ skb = tx_desc_info->skb;
+ if (!skb)
+ continue;
+
+ skb_cb = ATH12K_SKB_CB(skb);
+ if (skb_cb->vif == vif)
+ skb_cb->vif = NULL;
+ }
+
+ spin_unlock_bh(&dp->tx_desc_lock[i]);
+ }
+}
+
+static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_base *ab = ar->ab;
+ unsigned long time_left;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
+ arvif->vdev_id);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr);
+ if (ret)
+ ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ab, "failed to delete WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH12K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath12k_warn(ab, "Timeout in receiving vdev delete response\n");
+ goto err_vdev_del;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ar->monitor_vdev_id = -1;
+ ar->monitor_vdev_created = false;
+ } else if (ar->monitor_vdev_created && !ar->monitor_started) {
+ ret = ath12k_mac_monitor_vdev_delete(ar);
+ }
+
+ ab->free_vdev_map |= 1LL << (arvif->vdev_id);
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
+ ar->num_created_vdevs--;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
+
+err_vdev_del:
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath12k_peer_cleanup(ar, arvif->vdev_id);
+
+ idr_for_each(&ar->txmgmt_idr,
+ ath12k_mac_vif_txmgmt_idr_remove, vif);
+
+ ath12k_mac_vif_unref(&ab->dp, vif);
+ ath12k_dp_tx_put_bank_profile(&ab->dp, arvif->bank_id);
+
+ /* Recalc txpower for remaining vdev */
+ ath12k_mac_txpower_recalc(ar);
+ clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+
+ /* TODO: recal traffic pause state based on the available vdevs */
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/* FIXME: Has to be verified. */
+#define SUPPORTED_FILTERS \
+ (FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath12k *ar = hw->priv;
+ bool reset_flag;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+ ar->filter_flags = *total_flags;
+
+ /* For monitor mode */
+ reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
+
+ ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
+ if (!ret) {
+ if (!reset_flag)
+ set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ else
+ clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ } else {
+ ath12k_warn(ar->ab,
+ "fail to set monitor filter: %d\n", ret);
+ }
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n",
+ changed_flags, *total_flags, reset_flag);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath12k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath12k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath12k *ar = hw->priv;
+ int ret = -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ ret = ath12k_dp_rx_ampdu_start(ar, params);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = ath12k_dp_rx_ampdu_stop(ar, params);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211
+ * Tx aggregation requests.
+ */
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx add freq %u width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of multiple channel context, populate rx_channel from
+ * Rx PPDU desc information.
+ */
+ ar->rx_channel = ctx->def.chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx remove freq %u width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of there is one more channel context left, populate
+ * rx_channel with the channel of that remaining channel context.
+ */
+ ar->rx_channel = NULL;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
+ const struct cfg80211_chan_def *chandef,
+ bool restart)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct wmi_vdev_start_req_arg arg = {};
+ int he_support = arvif->vif->bss_conf.he_support;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.freq = chandef->chan->center_freq;
+ arg.band_center_freq1 = chandef->center_freq1;
+ arg.band_center_freq2 = chandef->center_freq2;
+ arg.mode = ath12k_phymodes[chandef->chan->band][chandef->width];
+
+ arg.min_power = 0;
+ arg.max_power = chandef->chan->max_power * 2;
+ arg.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.chan_radar = !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+
+ arg.passive = arg.chan_radar;
+
+ spin_lock_bh(&ab->base_lock);
+ arg.regdomain = ar->ab->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+
+ /* TODO: Notify if secondary 80Mhz also needs radar detection */
+ if (he_support) {
+ ret = ath12k_set_he_mu_sounding_mode(ar, arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set he mode vdev %i\n",
+ arg.vdev_id);
+ return ret;
+ }
+ }
+ }
+
+ arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.freq,
+ ath12k_mac_phymode_str(arg.mode));
+
+ ret = ath12k_wmi_vdev_start(ar, &arg, restart);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to %s WMI vdev %i\n",
+ restart ? "restart" : "start", arg.vdev_id);
+ return ret;
+ }
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n",
+ arg.vdev_id, restart ? "restart" : "start", ret);
+ return ret;
+ }
+
+ ar->num_started_vdevs++;
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ /* Enable CAC Flag in the driver by checking the channel DFS cac time,
+ * i.e dfs_cac_ms value which will be valid only for radar channels
+ * and state as NL80211_DFS_USABLE which indicates CAC needs to be
+ * done before channel usage. This flags is used to drop rx packets.
+ * during CAC.
+ */
+ /* TODO: Set the flag for other interface types as required */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
+ chandef->chan->dfs_cac_ms &&
+ chandef->chan->dfs_state == NL80211_DFS_USABLE) {
+ set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "CAC Started in chan_freq %d for vdev %d\n",
+ arg.freq, arg.vdev_id);
+ }
+
+ ret = ath12k_mac_set_txbf_conf(arvif);
+ if (ret)
+ ath12k_warn(ab, "failed to set txbf conf for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return 0;
+}
+
+static int ath12k_mac_vdev_stop(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath12k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath12k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ ar->num_started_vdevs--;
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
+ arvif->vdev_id);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int ath12k_mac_vdev_start(struct ath12k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath12k_mac_vdev_start_restart(arvif, chandef, false);
+}
+
+static int ath12k_mac_vdev_restart(struct ath12k_vif *arvif,
+ const struct cfg80211_chan_def *chandef)
+{
+ return ath12k_mac_vdev_start_restart(arvif, chandef, true);
+}
+
+struct ath12k_mac_change_chanctx_arg {
+ struct ieee80211_chanctx_conf *ctx;
+ struct ieee80211_vif_chanctx_switch *vifs;
+ int n_vifs;
+ int next_vif;
+};
+
+static void
+ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_mac_change_chanctx_arg *arg = data;
+
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
+ return;
+
+ arg->n_vifs++;
+}
+
+static void
+ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_chanctx_conf *ctx;
+
+ ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
+ if (ctx != arg->ctx)
+ return;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
+
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->next_vif++;
+}
+
+static void
+ath12k_mac_update_vif_chan(struct ath12k *ar,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif;
+ int ret;
+ int i;
+ bool monitor_vif = false;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
+ monitor_vif = true;
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
+ arvif->vdev_id,
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].new_ctx->def.width);
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ab, "failed to down vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* All relevant vdevs are downed and associated channel resources
+ * should be available for the channel switch now.
+ */
+
+ /* TODO: Update ar->rx_channel */
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath12k_mac_vdev_restart(arvif, &vifs[i].new_ctx->def);
+ if (ret) {
+ ath12k_warn(ab, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath12k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath12k_warn(ab, "failed to bring vdev up %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* Restart the internal monitor vdev on new channel */
+ if (!monitor_vif && ar->monitor_vdev_created) {
+ if (!ath12k_mac_monitor_stop(ar))
+ ath12k_mac_monitor_start(ar);
+ }
+}
+
+static void
+ath12k_mac_update_active_vif_chan(struct ath12k *ar,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_mac_change_chanctx_cnt_iter,
+ &arg);
+ if (arg.n_vifs == 0)
+ return;
+
+ arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL);
+ if (!arg.vifs)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_mac_change_chanctx_fill_iter,
+ &arg);
+
+ ath12k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+
+ kfree(arg.vifs);
+}
+
+static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx change freq %u width %d ptr %pK changed %x\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH)
+ ath12k_mac_update_active_vif_chan(ar, ctx);
+
+ /* TODO: Recalc radar detection */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath12k_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ if (WARN_ON(arvif->is_started))
+ return -EBUSY;
+
+ ret = ath12k_mac_vdev_start(arvif, &arvif->chanctx.def);
+ if (ret) {
+ ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ arvif->chanctx.def.chan->center_freq, ret);
+ return ret;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath12k_monitor_vdev_up(ar, arvif->vdev_id);
+ if (ret) {
+ ath12k_warn(ab, "failed put monitor up: %d\n", ret);
+ return ret;
+ }
+ }
+
+ arvif->is_started = true;
+
+ /* TODO: Setup ps and cts/rts protection */
+ return 0;
+}
+
+static int
+ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+ struct ath12k_wmi_peer_create_arg param;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx assign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ /* for some targets bss peer must be created before vdev_start */
+ if (ab->hw_params->vdev_start_delay &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ !ath12k_peer_exist_by_vdev_id(ab, arvif->vdev_id)) {
+ memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
+ ret = 0;
+ goto out;
+ }
+
+ if (WARN_ON(arvif->is_started)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (ab->hw_params->vdev_start_delay &&
+ (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
+ param.vdev_id = arvif->vdev_id;
+ param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ param.peer_addr = ar->mac_addr;
+
+ ret = ath12k_peer_create(ar, arvif, NULL, &param);
+ if (ret) {
+ ath12k_warn(ab, "failed to create peer after vdev start delay: %d",
+ ret);
+ goto out;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath12k_mac_monitor_start(ar);
+ if (ret)
+ goto out;
+ arvif->is_started = true;
+ goto out;
+ }
+
+ ret = ath12k_mac_vdev_start(arvif, &ctx->def);
+ if (ret) {
+ ath12k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto out;
+ }
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->monitor_vdev_created)
+ ath12k_mac_monitor_start(ar);
+
+ arvif->is_started = true;
+
+ /* TODO: Setup ps and cts/rts protection */
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC,
+ "mac chanctx unassign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ WARN_ON(!arvif->is_started);
+
+ if (ab->hw_params->vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
+ ath12k_peer_find_by_addr(ab, ar->mac_addr))
+ ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath12k_mac_monitor_stop(ar);
+ if (ret) {
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
+ arvif->is_started = false;
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ ret = ath12k_mac_vdev_stop(arvif);
+ if (ret)
+ ath12k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+
+ if (ab->hw_params->vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ ath12k_wmi_vdev_down(ar, arvif->vdev_id);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
+ ath12k_mac_monitor_stop(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath12k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "mac chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+ ath12k_mac_update_vif_chan(ar, vifs, n_vifs);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath12k_set_vdev_param_to_all_vifs(struct ath12k *ar, int param, u32 value)
+{
+ struct ath12k_vif *arvif;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "setting mac vdev %d param %d value %d\n",
+ param, arvif->vdev_id, value);
+
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n",
+ param, arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/* mac80211 stores device specific RTS/Fragmentation threshold value,
+ * this is set interface specific to firmware from ath12k driver
+ */
+static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ath12k *ar = hw->priv;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+
+ return ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+}
+
+static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ /* Even though there's a WMI vdev param for fragmentation threshold no
+ * known firmware actually implements it. Moreover it is not possible to
+ * rely frame fragmentation to mac80211 because firmware clears the
+ * "more fragments" bit in frame control making it impossible for remote
+ * devices to reassemble frames.
+ *
+ * Hence implement a dummy callback just to say fragmentation isn't
+ * supported. This effectively prevents mac80211 from doing frame
+ * fragmentation in software.
+ */
+ return -EOPNOTSUPP;
+}
+
+static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath12k *ar = hw->priv;
+ long time_left;
+
+ if (drop)
+ return;
+
+ time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
+ (atomic_read(&ar->dp.num_tx_pending) == 0),
+ ATH12K_FLUSH_TIMEOUT);
+ if (time_left == 0)
+ ath12k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+}
+
+static int
+ath12k_mac_bitrate_mask_num_ht_rates(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight16(mask->control[band].ht_mcs[i]);
+
+ return num_rates;
+}
+
+static bool
+ath12k_mac_has_single_legacy_rate(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+
+ num_rates = hweight32(mask->control[band].legacy);
+
+ if (ath12k_mac_bitrate_mask_num_ht_rates(ar, band, mask))
+ return false;
+
+ if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
+ return false;
+
+ return num_rates == 1;
+}
+
+static bool
+ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ int i;
+
+ /* No need to consider legacy here. Basic rates are always present
+ * in bitrate mask
+ */
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath12k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask)
+ return false;
+
+ if (ht_nss_mask == 0)
+ return false;
+
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
+ return false;
+
+ *nss = fls(ht_nss_mask);
+
+ return true;
+}
+
+static int
+ath12k_mac_get_single_legacy_rate(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u32 *rate, u8 *nss)
+{
+ int rate_idx;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
+
+ if (hweight32(mask->control[band].legacy) != 1)
+ return -EINVAL;
+
+ rate_idx = ffs(mask->control[band].legacy) - 1;
+
+ if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ)
+ rate_idx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath12k_legacy_rates[rate_idx].hw_value;
+ bitrate = ath12k_legacy_rates[rate_idx].bitrate;
+
+ if (ath12k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ *nss = 1;
+ *rate = ATH12K_HW_RATE_CODE(hw_rate, 0, preamble);
+
+ return 0;
+}
+
+static int ath12k_mac_set_fixed_rate_params(struct ath12k_vif *arvif,
+ u32 rate, u8 nss, u8 sgi, u8 ldpc)
+{
+ struct ath12k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
+ arvif->vdev_id, rate, nss, sgi);
+
+ vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_NSS;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, nss);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set nss param %d: %d\n",
+ nss, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_SGI;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, sgi);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+ sgi, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_LDPC;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, ldpc);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
+ ldpc, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+ath12k_mac_vht_mcs_range_present(struct ath12k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 vht_mcs;
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ath12k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_vif *arvif = data;
+ struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+ struct ath12k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
+}
+
+static void ath12k_mac_disable_peer_fixed_rate(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath12k_vif *arvif = data;
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "failed to disable peer fixed rate for STA %pM ret %d\n",
+ sta->addr, ret);
+}
+
+static int
+ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath12k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ struct ath12k *ar = arvif->ar;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 rate;
+ u8 nss;
+ u8 sgi;
+ u8 ldpc;
+ int single_nss;
+ int ret;
+ int num_rates;
+
+ if (ath12k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
+
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+
+ /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
+ * requires passing at least one of used basic rates along with them.
+ * Fixed rate setting across different preambles(legacy, HT, VHT) is
+ * not supported by the FW. Hence use of FIXED_RATE vdev param is not
+ * suitable for setting single HT/VHT rates.
+ * But, there could be a single basic rate passed from userspace which
+ * can be done through the FIXED_RATE param.
+ */
+ if (ath12k_mac_has_single_legacy_rate(ar, band, mask)) {
+ ret = ath12k_mac_get_single_legacy_rate(ar, band, mask, &rate,
+ &nss);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath12k_mac_disable_peer_fixed_rate,
+ arvif);
+ } else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = min_t(u32, ar->num_tx_chains,
+ max(ath12k_mac_max_ht_nss(ht_mcs_mask),
+ ath12k_mac_max_vht_nss(vht_mcs_mask)));
+
+ /* If multiple rates across different preambles are given
+ * we can reconfigure this info with all peers using PEER_ASSOC
+ * command with the below exception cases.
+ * - Single VHT Rate : peer_assoc command accommodates only MCS
+ * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211
+ * mandates passing basic rates along with HT/VHT rates, FW
+ * doesn't allow switching from VHT to Legacy. Hence instead of
+ * setting legacy and VHT rates using RATEMASK_CMD vdev cmd,
+ * we could set this VHT rate as peer fixed rate param, which
+ * will override FIXED rate and FW rate control algorithm.
+ * If single VHT rate is passed along with HT rates, we select
+ * the VHT rate as fixed rate for vht peers.
+ * - Multiple VHT Rates : When Multiple VHT rates are given,this
+ * can be set using RATEMASK CMD which uses FW rate-ctl alg.
+ * TODO: Setting multiple VHT MCS and replacing peer_assoc with
+ * RATEMASK_CMDID can cover all use cases of setting rates
+ * across multiple preambles and rates within same type.
+ * But requires more validation of the command at this point.
+ */
+
+ num_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ if (!ath12k_mac_vht_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ /* TODO: Handle multiple VHT MCS values setting using
+ * RATEMASK CMD
+ */
+ ath12k_warn(ar->ab,
+ "Setting more than one MCS Value in bitrate mask not supported\n");
+ return -EINVAL;
+ }
+
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath12k_mac_disable_peer_fixed_rate,
+ arvif);
+
+ mutex_lock(&ar->conf_mutex);
+
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath12k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct ath12k *ar = hw->priv;
+ struct ath12k_base *ab = ar->ab;
+ int recovery_count;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH12K_STATE_RESTARTED) {
+ ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
+ ar->pdev->pdev_id);
+ ar->state = ATH12K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
+
+ if (ab->is_reset) {
+ recovery_count = atomic_inc_return(&ab->recovery_count);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n",
+ recovery_count);
+ /* When there are multiple radios in an SOC,
+ * the recovery has to be done for each radio
+ */
+ if (recovery_count == ab->num_radios) {
+ atomic_dec(&ab->reset_count);
+ complete(&ab->reset_complete);
+ ab->is_reset = false;
+ atomic_set(&ab->fail_cont_count, 0);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
+ }
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) ||
+ ar->rx_channel != channel)
+ return;
+
+ if (ar->scan.state != ATH12K_SCAN_IDLE) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "ignoring bss chan info req while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath12k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (ret == 0)
+ ath12k_warn(ar->ab, "bss channel survey timed out\n");
+}
+
+static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath12k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey;
+ int ret = 0;
+
+ if (idx >= ATH12K_NUM_CHANS)
+ return -ENOENT;
+
+ ar_survey = &ar->survey[idx];
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath12k_sta *arsta = (struct ath12k_sta *)sta->drv_priv;
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+
+ sinfo->tx_duration = arsta->tx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+
+ if (!arsta->txrate.legacy && !arsta->txrate.nss)
+ return;
+
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ sinfo->txrate.he_gi = arsta->txrate.he_gi;
+ sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ /* TODO: Use real NF instead of default one. */
+ sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+}
+
+static const struct ieee80211_ops ath12k_ops = {
+ .tx = ath12k_mac_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
+ .start = ath12k_mac_op_start,
+ .stop = ath12k_mac_op_stop,
+ .reconfig_complete = ath12k_mac_op_reconfig_complete,
+ .add_interface = ath12k_mac_op_add_interface,
+ .remove_interface = ath12k_mac_op_remove_interface,
+ .update_vif_offload = ath12k_mac_op_update_vif_offload,
+ .config = ath12k_mac_op_config,
+ .bss_info_changed = ath12k_mac_op_bss_info_changed,
+ .configure_filter = ath12k_mac_op_configure_filter,
+ .hw_scan = ath12k_mac_op_hw_scan,
+ .cancel_hw_scan = ath12k_mac_op_cancel_hw_scan,
+ .set_key = ath12k_mac_op_set_key,
+ .sta_state = ath12k_mac_op_sta_state,
+ .sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
+ .sta_rc_update = ath12k_mac_op_sta_rc_update,
+ .conf_tx = ath12k_mac_op_conf_tx,
+ .set_antenna = ath12k_mac_op_set_antenna,
+ .get_antenna = ath12k_mac_op_get_antenna,
+ .ampdu_action = ath12k_mac_op_ampdu_action,
+ .add_chanctx = ath12k_mac_op_add_chanctx,
+ .remove_chanctx = ath12k_mac_op_remove_chanctx,
+ .change_chanctx = ath12k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath12k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath12k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath12k_mac_op_switch_vif_chanctx,
+ .set_rts_threshold = ath12k_mac_op_set_rts_threshold,
+ .set_frag_threshold = ath12k_mac_op_set_frag_threshold,
+ .set_bitrate_mask = ath12k_mac_op_set_bitrate_mask,
+ .get_survey = ath12k_mac_op_get_survey,
+ .flush = ath12k_mac_op_flush,
+ .sta_statistics = ath12k_mac_op_sta_statistics,
+};
+
+static void ath12k_mac_update_ch_list(struct ath12k *ar,
+ struct ieee80211_supported_band *band,
+ u32 freq_low, u32 freq_high)
+{
+ int i;
+
+ if (!(freq_low && freq_high))
+ return;
+
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].center_freq < freq_low ||
+ band->channels[i].center_freq > freq_high)
+ band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+ }
+}
+
+static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
+{
+ struct ath12k_pdev *pdev = ar->pdev;
+ struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
+
+ if (band == WMI_HOST_WLAN_2G_CAP)
+ return pdev_cap->band[NL80211_BAND_2GHZ].phy_id;
+
+ if (band == WMI_HOST_WLAN_5G_CAP)
+ return pdev_cap->band[NL80211_BAND_5GHZ].phy_id;
+
+ ath12k_warn(ar->ab, "unsupported phy cap:%d\n", band);
+
+ return 0;
+}
+
+static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
+ u32 supported_bands)
+{
+ struct ieee80211_supported_band *band;
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+ void *channels;
+ u32 phy_id;
+
+ BUILD_BUG_ON((ARRAY_SIZE(ath12k_2ghz_channels) +
+ ARRAY_SIZE(ath12k_5ghz_channels) +
+ ARRAY_SIZE(ath12k_6ghz_channels)) !=
+ ATH12K_NUM_CHANS);
+
+ reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+
+ if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ channels = kmemdup(ath12k_2ghz_channels,
+ sizeof(ath12k_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->band = NL80211_BAND_2GHZ;
+ band->n_channels = ARRAY_SIZE(ath12k_2ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath12k_g_rates_size;
+ band->bitrates = ath12k_g_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+
+ if (ar->ab->hw_params->single_pdev_only) {
+ phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
+ reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+ ath12k_mac_update_ch_list(ar, band,
+ reg_cap->low_2ghz_chan,
+ reg_cap->high_2ghz_chan);
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ if (reg_cap->high_5ghz_chan >= ATH12K_MAX_6G_FREQ) {
+ channels = kmemdup(ath12k_6ghz_channels,
+ sizeof(ath12k_6ghz_channels), GFP_KERNEL);
+ if (!channels) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ return -ENOMEM;
+ }
+
+ ar->supports_6ghz = true;
+ band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+ band->band = NL80211_BAND_6GHZ;
+ band->n_channels = ARRAY_SIZE(ath12k_6ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath12k_a_rates_size;
+ band->bitrates = ath12k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+ ath12k_mac_update_ch_list(ar, band,
+ reg_cap->low_5ghz_chan,
+ reg_cap->high_5ghz_chan);
+ }
+
+ if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) {
+ channels = kmemdup(ath12k_5ghz_channels,
+ sizeof(ath12k_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+ return -ENOMEM;
+ }
+
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->band = NL80211_BAND_5GHZ;
+ band->n_channels = ARRAY_SIZE(ath12k_5ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath12k_a_rates_size;
+ band->bitrates = ath12k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+
+ if (ar->ab->hw_params->single_pdev_only) {
+ phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+ reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+
+ ath12k_mac_update_ch_list(ar, band,
+ reg_cap->low_5ghz_chan,
+ reg_cap->high_5ghz_chan);
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_mac_setup_iface_combinations(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_iface_combination *combinations;
+ struct ieee80211_iface_limit *limits;
+ int n_limits, max_interfaces;
+ bool ap, mesh;
+
+ ap = ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_AP);
+
+ mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+ if (!combinations)
+ return -ENOMEM;
+
+ if (ap || mesh) {
+ n_limits = 2;
+ max_interfaces = 16;
+ } else {
+ n_limits = 1;
+ max_interfaces = 1;
+ }
+
+ limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
+ if (!limits) {
+ kfree(combinations);
+ return -ENOMEM;
+ }
+
+ limits[0].max = 1;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+
+ if (ap) {
+ limits[1].max = max_interfaces;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ }
+
+ if (mesh)
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = n_limits;
+ combinations[0].max_interfaces = max_interfaces;
+ combinations[0].num_different_channels = 1;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80);
+
+ ar->hw->wiphy->iface_combinations = combinations;
+ ar->hw->wiphy->n_iface_combinations = 1;
+
+ return 0;
+}
+
+static const u8 ath12k_if_types_ext_capa[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const u8 ath12k_if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+static const u8 ath12k_if_types_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+};
+
+static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
+ {
+ .extended_capabilities = ath12k_if_types_ext_capa,
+ .extended_capabilities_mask = ath12k_if_types_ext_capa,
+ .extended_capabilities_len = sizeof(ath12k_if_types_ext_capa),
+ }, {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = ath12k_if_types_ext_capa_sta,
+ .extended_capabilities_mask = ath12k_if_types_ext_capa_sta,
+ .extended_capabilities_len =
+ sizeof(ath12k_if_types_ext_capa_sta),
+ }, {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = ath12k_if_types_ext_capa_ap,
+ .extended_capabilities_mask = ath12k_if_types_ext_capa_ap,
+ .extended_capabilities_len =
+ sizeof(ath12k_if_types_ext_capa_ap),
+ },
+};
+
+static void __ath12k_mac_unregister(struct ath12k *ar)
+{
+ cancel_work_sync(&ar->regd_update_work);
+
+ ieee80211_unregister_hw(ar->hw);
+
+ idr_for_each(&ar->txmgmt_idr, ath12k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+}
+
+void ath12k_mac_unregister(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ __ath12k_mac_unregister(ar);
+ }
+}
+
+static int __ath12k_mac_register(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_CCMP_256,
+ };
+ int ret;
+ u32 ht_cap = 0;
+
+ ath12k_pdev_caps_update(ar);
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+ SET_IEEE80211_DEV(ar->hw, ab->dev);
+
+ ret = ath12k_mac_setup_channels_rates(ar,
+ cap->supported_bands);
+ if (ret)
+ goto err;
+
+ ath12k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
+ ath12k_mac_setup_he_cap(ar, cap);
+
+ ret = ath12k_mac_setup_iface_combinations(ar);
+ if (ret) {
+ ath12k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
+ goto err_free_channels;
+ }
+
+ ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
+ ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
+
+ ar->hw->wiphy->interface_modes = ab->hw_params->interface_modes;
+
+ ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+ ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(ar->hw, AP_LINK_PS);
+ ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+
+ if (ht_cap & WMI_HT_CAP_ENABLED) {
+ ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(ar->hw, USES_RSS);
+ }
+
+ ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ /* TODO: Check if HT capability advertised from firmware is different
+ * for each band for a dual band capable radio. It will be tricky to
+ * handle it when the ht capability different for each band.
+ */
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+ ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+ ar->hw->max_listen_interval = ATH12K_MAX_HW_LISTEN_INTERVAL;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
+
+ ar->max_num_stations = TARGET_NUM_STATIONS;
+ ar->max_num_peers = TARGET_NUM_PEERS_PDEV;
+
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ ar->hw->queues = ATH12K_HW_MAX_QUEUES;
+ ar->hw->wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
+ ar->hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
+ ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+
+ ar->hw->vif_data_size = sizeof(struct ath12k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath12k_sta);
+
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+
+ ar->hw->wiphy->cipher_suites = cipher_suites;
+ ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ar->hw->wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
+ ar->hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(ath12k_iftypes_ext_capa);
+
+ if (ar->supports_6ghz) {
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_FILS_DISCOVERY);
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
+ }
+
+ ath12k_reg_init(ar);
+
+ if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ }
+
+ ret = ieee80211_register_hw(ar->hw);
+ if (ret) {
+ ath12k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
+ goto err_free_if_combs;
+ }
+
+ if (!ab->hw_params->supports_monitor)
+ /* There's a race between calling ieee80211_register_hw()
+ * and here where the monitor mode is enabled for a little
+ * while. But that time is so short and in practise it make
+ * a difference in real life.
+ */
+ ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
+
+ /* Apply the regd received during initialization */
+ ret = ath12k_regd_update(ar, true);
+ if (ret) {
+ ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
+ goto err_unregister_hw;
+ }
+
+ return 0;
+
+err_unregister_hw:
+ ieee80211_unregister_hw(ar->hw);
+
+err_free_if_combs:
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
+err_free_channels:
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+
+err:
+ SET_IEEE80211_DEV(ar->hw, NULL);
+ return ret;
+}
+
+int ath12k_mac_register(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+ int ret;
+
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ab->pdevs_macaddr_valid) {
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ } else {
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ ar->mac_addr[4] += i;
+ }
+
+ ret = __ath12k_mac_register(ar);
+ if (ret)
+ goto err_cleanup;
+
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
+ }
+
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = 320000;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
+ return 0;
+
+err_cleanup:
+ for (i = i - 1; i >= 0; i--) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ __ath12k_mac_unregister(ar);
+ }
+
+ return ret;
+}
+
+int ath12k_mac_allocate(struct ath12k_base *ab)
+{
+ struct ieee80211_hw *hw;
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int ret;
+ int i;
+
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ hw = ieee80211_alloc_hw(sizeof(struct ath12k), &ath12k_ops);
+ if (!hw) {
+ ath12k_warn(ab, "failed to allocate mac80211 hw device\n");
+ ret = -ENOMEM;
+ goto err_free_mac;
+ }
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->ab = ab;
+ ar->pdev = pdev;
+ ar->pdev_idx = i;
+ ar->lmac_id = ath12k_hw_get_mac_from_pdev_id(ab->hw_params, i);
+
+ ar->wmi = &ab->wmi_ab.wmi[i];
+ /* FIXME: wmi[0] is already initialized during attach,
+ * Should we do this again?
+ */
+ ath12k_wmi_pdev_attach(ab, i);
+
+ ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
+ ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
+ ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
+ ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
+
+ pdev->ar = ar;
+ spin_lock_init(&ar->data_lock);
+ INIT_LIST_HEAD(&ar->arvifs);
+ INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ mutex_init(&ar->conf_mutex);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->vdev_delete_done);
+ init_completion(&ar->peer_assoc_done);
+ init_completion(&ar->peer_delete_done);
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
+ INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+ clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+ }
+
+ return 0;
+
+err_free_mac:
+ ath12k_mac_destroy(ab);
+
+ return ret;
+}
+
+void ath12k_mac_destroy(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ struct ath12k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ ieee80211_free_hw(ar->hw);
+ pdev->ar = NULL;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
new file mode 100644
index 000000000000..57f4295420bb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_MAC_H
+#define ATH12K_MAC_H
+
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+
+struct ath12k;
+struct ath12k_base;
+
+struct ath12k_generic_iter {
+ struct ath12k *ar;
+ int ret;
+};
+
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH12K_KICKOUT_THRESHOLD (20 * 16)
+
+/* Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH12K_KEEPALIVE_MIN_IDLE 3747
+#define ATH12K_KEEPALIVE_MAX_IDLE 3895
+#define ATH12K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
+/* FIXME: should these be in ieee80211.h? */
+#define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16)
+#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24)
+
+#define ATH12K_CHAN_WIDTH_NUM 8
+
+#define ATH12K_TX_POWER_MAX_VAL 70
+#define ATH12K_TX_POWER_MIN_VAL 0
+
+enum ath12k_supported_bw {
+ ATH12K_BW_20 = 0,
+ ATH12K_BW_40 = 1,
+ ATH12K_BW_80 = 2,
+ ATH12K_BW_160 = 3,
+};
+
+extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
+
+void ath12k_mac_destroy(struct ath12k_base *ab);
+void ath12k_mac_unregister(struct ath12k_base *ab);
+int ath12k_mac_register(struct ath12k_base *ab);
+int ath12k_mac_allocate(struct ath12k_base *ab);
+int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate);
+u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+u8 ath12k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck);
+
+void __ath12k_mac_scan_finish(struct ath12k *ar);
+void ath12k_mac_scan_finish(struct ath12k *ar);
+
+struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id);
+struct ath12k_vif *ath12k_mac_get_arvif_by_vdev_id(struct ath12k_base *ab,
+ u32 vdev_id);
+struct ath12k *ath12k_mac_get_ar_by_vdev_id(struct ath12k_base *ab, u32 vdev_id);
+struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id);
+
+void ath12k_mac_drain_tx(struct ath12k *ar);
+void ath12k_mac_peer_cleanup_all(struct ath12k *ar);
+int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
+enum rate_info_bw ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw);
+enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw bw);
+enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher);
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
new file mode 100644
index 000000000000..42f1140baa4f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -0,0 +1,616 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+#include "core.h"
+#include "debug.h"
+#include "mhi.h"
+#include "pci.h"
+
+#define MHI_TIMEOUT_DEFAULT_MS 90000
+
+static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
+ {
+ .num = 0,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 1,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static struct mhi_event_config ath12k_mhi_events_qcn9274[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .data_type = MHI_ER_CTRL,
+ .mode = MHI_DB_BRST_DISABLE,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+const struct mhi_controller_config ath12k_mhi_config_qcn9274 = {
+ .max_channels = 30,
+ .timeout_ms = 10000,
+ .use_bounce_buf = false,
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(ath12k_mhi_channels_qcn9274),
+ .ch_cfg = ath12k_mhi_channels_qcn9274,
+ .num_events = ARRAY_SIZE(ath12k_mhi_events_qcn9274),
+ .event_cfg = ath12k_mhi_events_qcn9274,
+};
+
+static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = {
+ {
+ .num = 0,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 1,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static struct mhi_event_config ath12k_mhi_events_wcn7850[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+const struct mhi_controller_config ath12k_mhi_config_wcn7850 = {
+ .max_channels = 128,
+ .timeout_ms = 2000,
+ .use_bounce_buf = false,
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(ath12k_mhi_channels_wcn7850),
+ .ch_cfg = ath12k_mhi_channels_wcn7850,
+ .num_events = ARRAY_SIZE(ath12k_mhi_events_wcn7850),
+ .event_cfg = ath12k_mhi_events_wcn7850,
+};
+
+void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab)
+{
+ u32 val;
+
+ val = ath12k_pci_read32(ab, MHISTATUS);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "MHISTATUS 0x%x\n", val);
+
+ /* Observed on some targets that after SOC_GLOBAL_RESET, MHISTATUS
+ * has SYSERR bit set and thus need to set MHICTRL_RESET
+ * to clear SYSERR.
+ */
+ ath12k_pci_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
+
+ mdelay(10);
+}
+
+static void ath12k_mhi_reset_txvecdb(struct ath12k_base *ab)
+{
+ ath12k_pci_write32(ab, PCIE_TXVECDB, 0);
+}
+
+static void ath12k_mhi_reset_txvecstatus(struct ath12k_base *ab)
+{
+ ath12k_pci_write32(ab, PCIE_TXVECSTATUS, 0);
+}
+
+static void ath12k_mhi_reset_rxvecdb(struct ath12k_base *ab)
+{
+ ath12k_pci_write32(ab, PCIE_RXVECDB, 0);
+}
+
+static void ath12k_mhi_reset_rxvecstatus(struct ath12k_base *ab)
+{
+ ath12k_pci_write32(ab, PCIE_RXVECSTATUS, 0);
+}
+
+void ath12k_mhi_clear_vector(struct ath12k_base *ab)
+{
+ ath12k_mhi_reset_txvecdb(ab);
+ ath12k_mhi_reset_txvecstatus(ab);
+ ath12k_mhi_reset_rxvecdb(ab);
+ ath12k_mhi_reset_rxvecstatus(ab);
+}
+
+static int ath12k_mhi_get_msi(struct ath12k_pci *ab_pci)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ u32 user_base_data, base_vector;
+ int ret, num_vectors, i;
+ int *irq;
+
+ ret = ath12k_pci_get_user_msi_assignment(ab,
+ "MHI", &num_vectors,
+ &user_base_data, &base_vector);
+ if (ret)
+ return ret;
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "Number of assigned MSI for MHI is %d, base vector is %d\n",
+ num_vectors, base_vector);
+
+ irq = kcalloc(num_vectors, sizeof(*irq), GFP_KERNEL);
+ if (!irq)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vectors; i++)
+ irq[i] = ath12k_pci_get_msi_irq(ab->dev,
+ base_vector + i);
+
+ ab_pci->mhi_ctrl->irq = irq;
+ ab_pci->mhi_ctrl->nr_irqs = num_vectors;
+
+ return 0;
+}
+
+static int ath12k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl)
+{
+ return 0;
+}
+
+static void ath12k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static char *ath12k_mhi_op_callback_to_str(enum mhi_callback reason)
+{
+ switch (reason) {
+ case MHI_CB_IDLE:
+ return "MHI_CB_IDLE";
+ case MHI_CB_PENDING_DATA:
+ return "MHI_CB_PENDING_DATA";
+ case MHI_CB_LPM_ENTER:
+ return "MHI_CB_LPM_ENTER";
+ case MHI_CB_LPM_EXIT:
+ return "MHI_CB_LPM_EXIT";
+ case MHI_CB_EE_RDDM:
+ return "MHI_CB_EE_RDDM";
+ case MHI_CB_EE_MISSION_MODE:
+ return "MHI_CB_EE_MISSION_MODE";
+ case MHI_CB_SYS_ERROR:
+ return "MHI_CB_SYS_ERROR";
+ case MHI_CB_FATAL_ERROR:
+ return "MHI_CB_FATAL_ERROR";
+ case MHI_CB_BW_REQ:
+ return "MHI_CB_BW_REQ";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb)
+{
+ struct ath12k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s\n",
+ ath12k_mhi_op_callback_to_str(cb));
+
+ switch (cb) {
+ case MHI_CB_SYS_ERROR:
+ ath12k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
+ break;
+ case MHI_CB_EE_RDDM:
+ if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags)))
+ queue_work(ab->workqueue_aux, &ab->reset_work);
+ break;
+ default:
+ break;
+ }
+}
+
+static int ath12k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 *out)
+{
+ *out = readl(addr);
+
+ return 0;
+}
+
+static void ath12k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 val)
+{
+ writel(val, addr);
+}
+
+int ath12k_mhi_register(struct ath12k_pci *ab_pci)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ struct mhi_controller *mhi_ctrl;
+ int ret;
+
+ mhi_ctrl = mhi_alloc_controller();
+ if (!mhi_ctrl)
+ return -ENOMEM;
+
+ ath12k_core_create_firmware_path(ab, ATH12K_AMSS_FILE,
+ ab_pci->amss_path,
+ sizeof(ab_pci->amss_path));
+
+ ab_pci->mhi_ctrl = mhi_ctrl;
+ mhi_ctrl->cntrl_dev = ab->dev;
+ mhi_ctrl->fw_image = ab_pci->amss_path;
+ mhi_ctrl->regs = ab->mem;
+ mhi_ctrl->reg_len = ab->mem_len;
+
+ ret = ath12k_mhi_get_msi(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to get msi for mhi\n");
+ mhi_free_controller(mhi_ctrl);
+ return ret;
+ }
+
+ mhi_ctrl->iova_start = 0;
+ mhi_ctrl->iova_stop = 0xffffffff;
+ mhi_ctrl->sbl_size = SZ_512K;
+ mhi_ctrl->seg_len = SZ_512K;
+ mhi_ctrl->fbc_download = true;
+ mhi_ctrl->runtime_get = ath12k_mhi_op_runtime_get;
+ mhi_ctrl->runtime_put = ath12k_mhi_op_runtime_put;
+ mhi_ctrl->status_cb = ath12k_mhi_op_status_cb;
+ mhi_ctrl->read_reg = ath12k_mhi_op_read_reg;
+ mhi_ctrl->write_reg = ath12k_mhi_op_write_reg;
+
+ ret = mhi_register_controller(mhi_ctrl, ab->hw_params->mhi_config);
+ if (ret) {
+ ath12k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
+ mhi_free_controller(mhi_ctrl);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath12k_mhi_unregister(struct ath12k_pci *ab_pci)
+{
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+
+ mhi_unregister_controller(mhi_ctrl);
+ kfree(mhi_ctrl->irq);
+ mhi_free_controller(mhi_ctrl);
+ ab_pci->mhi_ctrl = NULL;
+}
+
+static char *ath12k_mhi_state_to_str(enum ath12k_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case ATH12K_MHI_INIT:
+ return "INIT";
+ case ATH12K_MHI_DEINIT:
+ return "DEINIT";
+ case ATH12K_MHI_POWER_ON:
+ return "POWER_ON";
+ case ATH12K_MHI_POWER_OFF:
+ return "POWER_OFF";
+ case ATH12K_MHI_FORCE_POWER_OFF:
+ return "FORCE_POWER_OFF";
+ case ATH12K_MHI_SUSPEND:
+ return "SUSPEND";
+ case ATH12K_MHI_RESUME:
+ return "RESUME";
+ case ATH12K_MHI_TRIGGER_RDDM:
+ return "TRIGGER_RDDM";
+ case ATH12K_MHI_RDDM_DONE:
+ return "RDDM_DONE";
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static void ath12k_mhi_set_state_bit(struct ath12k_pci *ab_pci,
+ enum ath12k_mhi_state mhi_state)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+
+ switch (mhi_state) {
+ case ATH12K_MHI_INIT:
+ set_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_DEINIT:
+ clear_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_POWER_ON:
+ set_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_POWER_OFF:
+ case ATH12K_MHI_FORCE_POWER_OFF:
+ clear_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
+ clear_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
+ clear_bit(ATH12K_MHI_RDDM_DONE, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_SUSPEND:
+ set_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_RESUME:
+ clear_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_TRIGGER_RDDM:
+ set_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
+ break;
+ case ATH12K_MHI_RDDM_DONE:
+ set_bit(ATH12K_MHI_RDDM_DONE, &ab_pci->mhi_state);
+ break;
+ default:
+ ath12k_err(ab, "unhandled mhi state (%d)\n", mhi_state);
+ }
+}
+
+static int ath12k_mhi_check_state_bit(struct ath12k_pci *ab_pci,
+ enum ath12k_mhi_state mhi_state)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+
+ switch (mhi_state) {
+ case ATH12K_MHI_INIT:
+ if (!test_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_DEINIT:
+ case ATH12K_MHI_POWER_ON:
+ if (test_bit(ATH12K_MHI_INIT, &ab_pci->mhi_state) &&
+ !test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_FORCE_POWER_OFF:
+ if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_POWER_OFF:
+ case ATH12K_MHI_SUSPEND:
+ if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) &&
+ !test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_RESUME:
+ if (test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_TRIGGER_RDDM:
+ if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) &&
+ !test_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH12K_MHI_RDDM_DONE:
+ return 0;
+ default:
+ ath12k_err(ab, "unhandled mhi state: %s(%d)\n",
+ ath12k_mhi_state_to_str(mhi_state), mhi_state);
+ }
+
+ ath12k_err(ab, "failed to set mhi state %s(%d) in current mhi state (0x%lx)\n",
+ ath12k_mhi_state_to_str(mhi_state), mhi_state,
+ ab_pci->mhi_state);
+
+ return -EINVAL;
+}
+
+static int ath12k_mhi_set_state(struct ath12k_pci *ab_pci,
+ enum ath12k_mhi_state mhi_state)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ int ret;
+
+ ret = ath12k_mhi_check_state_bit(ab_pci, mhi_state);
+ if (ret)
+ goto out;
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "setting mhi state: %s(%d)\n",
+ ath12k_mhi_state_to_str(mhi_state), mhi_state);
+
+ switch (mhi_state) {
+ case ATH12K_MHI_INIT:
+ ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
+ break;
+ case ATH12K_MHI_DEINIT:
+ mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
+ ret = 0;
+ break;
+ case ATH12K_MHI_POWER_ON:
+ ret = mhi_async_power_up(ab_pci->mhi_ctrl);
+ break;
+ case ATH12K_MHI_POWER_OFF:
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+ ret = 0;
+ break;
+ case ATH12K_MHI_FORCE_POWER_OFF:
+ mhi_power_down(ab_pci->mhi_ctrl, false);
+ ret = 0;
+ break;
+ case ATH12K_MHI_SUSPEND:
+ ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
+ break;
+ case ATH12K_MHI_RESUME:
+ ret = mhi_pm_resume(ab_pci->mhi_ctrl);
+ break;
+ case ATH12K_MHI_TRIGGER_RDDM:
+ ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
+ break;
+ case ATH12K_MHI_RDDM_DONE:
+ break;
+ default:
+ ath12k_err(ab, "unhandled MHI state (%d)\n", mhi_state);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto out;
+
+ ath12k_mhi_set_state_bit(ab_pci, mhi_state);
+
+ return 0;
+
+out:
+ ath12k_err(ab, "failed to set mhi state: %s(%d)\n",
+ ath12k_mhi_state_to_str(mhi_state), mhi_state);
+ return ret;
+}
+
+int ath12k_mhi_start(struct ath12k_pci *ab_pci)
+{
+ int ret;
+
+ ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
+
+ ret = ath12k_mhi_set_state(ab_pci, ATH12K_MHI_INIT);
+ if (ret)
+ goto out;
+
+ ret = ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_ON);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+void ath12k_mhi_stop(struct ath12k_pci *ab_pci)
+{
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF);
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_DEINIT);
+}
+
+void ath12k_mhi_suspend(struct ath12k_pci *ab_pci)
+{
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_SUSPEND);
+}
+
+void ath12k_mhi_resume(struct ath12k_pci *ab_pci)
+{
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_RESUME);
+}
diff --git a/drivers/net/wireless/ath/ath12k/mhi.h b/drivers/net/wireless/ath/ath12k/mhi.h
new file mode 100644
index 000000000000..ebc23640ce7a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/mhi.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _ATH12K_MHI_H
+#define _ATH12K_MHI_H
+
+#include "pci.h"
+
+#define PCIE_TXVECDB 0x360
+#define PCIE_TXVECSTATUS 0x368
+#define PCIE_RXVECDB 0x394
+#define PCIE_RXVECSTATUS 0x39C
+
+#define MHISTATUS 0x48
+#define MHICTRL 0x38
+#define MHICTRL_RESET_MASK 0x2
+
+enum ath12k_mhi_state {
+ ATH12K_MHI_INIT,
+ ATH12K_MHI_DEINIT,
+ ATH12K_MHI_POWER_ON,
+ ATH12K_MHI_POWER_OFF,
+ ATH12K_MHI_FORCE_POWER_OFF,
+ ATH12K_MHI_SUSPEND,
+ ATH12K_MHI_RESUME,
+ ATH12K_MHI_TRIGGER_RDDM,
+ ATH12K_MHI_RDDM,
+ ATH12K_MHI_RDDM_DONE,
+};
+
+extern const struct mhi_controller_config ath12k_mhi_config_qcn9274;
+extern const struct mhi_controller_config ath12k_mhi_config_wcn7850;
+
+int ath12k_mhi_start(struct ath12k_pci *ar_pci);
+void ath12k_mhi_stop(struct ath12k_pci *ar_pci);
+int ath12k_mhi_register(struct ath12k_pci *ar_pci);
+void ath12k_mhi_unregister(struct ath12k_pci *ar_pci);
+void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab);
+void ath12k_mhi_clear_vector(struct ath12k_base *ab);
+
+void ath12k_mhi_suspend(struct ath12k_pci *ar_pci);
+void ath12k_mhi_resume(struct ath12k_pci *ar_pci);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
new file mode 100644
index 000000000000..ae7f6083c9fc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -0,0 +1,1374 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "core.h"
+#include "hif.h"
+#include "mhi.h"
+#include "debug.h"
+
+#define ATH12K_PCI_BAR_NUM 0
+#define ATH12K_PCI_DMA_MASK 32
+
+#define ATH12K_PCI_IRQ_CE0_OFFSET 3
+
+#define WINDOW_ENABLE_BIT 0x40000000
+#define WINDOW_REG_ADDRESS 0x310c
+#define WINDOW_VALUE_MASK GENMASK(24, 19)
+#define WINDOW_START 0x80000
+#define WINDOW_RANGE_MASK GENMASK(18, 0)
+#define WINDOW_STATIC_MASK GENMASK(31, 6)
+
+#define TCSR_SOC_HW_VERSION 0x1B00000
+#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
+#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 4)
+
+/* BAR0 + 4k is always accessible, and no
+ * need to force wakeup.
+ * 4K - 32 = 0xFE0
+ */
+#define ACCESS_ALWAYS_OFF 0xFE0
+
+#define QCN9274_DEVICE_ID 0x1109
+#define WCN7850_DEVICE_ID 0x1107
+
+static const struct pci_device_id ath12k_pci_id_table[] = {
+ { PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
+ { PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table);
+
+/* TODO: revisit IRQ mapping for new SRNG's */
+static const struct ath12k_msi_config ath12k_msi_config[] = {
+ {
+ .total_vectors = 16,
+ .total_users = 3,
+ .users = (struct ath12k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 5, .base_vector = 3 },
+ { .name = "DP", .num_vectors = 8, .base_vector = 8 },
+ },
+ },
+};
+
+static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
+ "bhi",
+ "mhi-er0",
+ "mhi-er1",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "ce12",
+ "ce13",
+ "ce14",
+ "ce15",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring4",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+
+ u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK);
+ u32 static_window;
+
+ lockdep_assert_held(&ab_pci->window_lock);
+
+ /* Preserve the static window configuration and reset only dynamic window */
+ static_window = ab_pci->register_window & WINDOW_STATIC_MASK;
+ window |= static_window;
+
+ if (window != ab_pci->register_window) {
+ iowrite32(WINDOW_ENABLE_BIT | window,
+ ab->mem + WINDOW_REG_ADDRESS);
+ ioread32(ab->mem + WINDOW_REG_ADDRESS);
+ ab_pci->register_window = window;
+ }
+}
+
+static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci)
+{
+ u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK);
+ u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK);
+ u32 window;
+
+ window = (umac_window << 12) | (ce_window << 6);
+
+ spin_lock_bh(&ab_pci->window_lock);
+ ab_pci->register_window = window;
+ spin_unlock_bh(&ab_pci->window_lock);
+
+ iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
+}
+
+static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
+ u32 offset)
+{
+ u32 window_start;
+
+ /* If offset lies within DP register range, use 3rd window */
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
+ window_start = 3 * WINDOW_START;
+ /* If offset lies within CE register range, use 2nd window */
+ else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
+ window_start = 2 * WINDOW_START;
+ /* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE
+ * use 0th window
+ */
+ else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) &&
+ !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK))
+ window_start = 0;
+ else
+ window_start = WINDOW_START;
+
+ return window_start;
+}
+
+static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
+{
+ u32 val, delay;
+
+ val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+
+ val |= PCIE_SOC_GLOBAL_RESET_V;
+
+ ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ /* TODO: exact time to sleep is uncertain */
+ delay = 10;
+ mdelay(delay);
+
+ /* Need to toggle V bit back otherwise stuck in reset status */
+ val &= ~PCIE_SOC_GLOBAL_RESET_V;
+
+ ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ mdelay(delay);
+
+ val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+ if (val == 0xffffffff)
+ ath12k_warn(ab, "link down error during global reset\n");
+}
+
+static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab)
+{
+ u32 val;
+
+ /* read cookie */
+ val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val);
+
+ val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
+
+ /* TODO: exact time to sleep is uncertain */
+ mdelay(10);
+
+ /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
+ * continuing warm path and entering dead loop.
+ */
+ ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
+ mdelay(10);
+
+ val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
+
+ /* A read clear register. clear the register to prevent
+ * Q6 from entering wrong code path.
+ */
+ val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val);
+}
+
+static void ath12k_pci_enable_ltssm(struct ath12k_base *ab)
+{
+ u32 val;
+ int i;
+
+ val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
+
+ /* PCIE link seems very unstable after the Hot Reset*/
+ for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
+ if (val == 0xffffffff)
+ mdelay(5);
+
+ ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
+ val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val);
+
+ val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ val |= GCC_GCC_PCIE_HOT_RST_VAL;
+ ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
+ val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
+
+ mdelay(5);
+}
+
+static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab)
+{
+ /* This is a WAR for PCIE Hotreset.
+ * When target receive Hotreset, but will set the interrupt.
+ * So when download SBL again, SBL will open Interrupt and
+ * receive it, and crash immediately.
+ */
+ ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
+}
+
+static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab)
+{
+ u32 val;
+
+ val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
+ val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
+ ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
+}
+
+static void ath12k_pci_force_wake(struct ath12k_base *ab)
+{
+ ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
+ mdelay(5);
+}
+
+static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on)
+{
+ if (power_on) {
+ ath12k_pci_enable_ltssm(ab);
+ ath12k_pci_clear_all_intrs(ab);
+ ath12k_pci_set_wlaon_pwr_ctrl(ab);
+ }
+
+ ath12k_mhi_clear_vector(ab);
+ ath12k_pci_clear_dbg_registers(ab);
+ ath12k_pci_soc_global_reset(ab);
+ ath12k_mhi_set_mhictrl_reset(ab);
+}
+
+static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
+ }
+}
+
+static void ath12k_pci_free_irq(struct ath12k_base *ab)
+{
+ int i, irq_idx;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath12k_pci_free_ext_irq(ab);
+}
+
+static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
+ enable_irq(ab->irq_num[irq_idx]);
+}
+
+static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+}
+
+static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_pci_ce_irq_disable(ab, i);
+ }
+}
+
+static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath12k_pci_ce_tasklet(struct tasklet_struct *t)
+{
+ struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+
+ ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath12k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ce_pipe *ce_pipe = arg;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ ath12k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc)
+{
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
+
+ ath12k_pci_ext_grp_disable(irq_grp);
+
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ }
+}
+
+static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab)
+{
+ int i, j, irq_idx;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath12k_ext_irq_grp,
+ napi);
+ struct ath12k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath12k_pci_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath12k_ext_irq_grp *irq_grp = arg;
+
+ ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ ath12k_pci_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
+{
+ int i, j, ret, num_vectors = 0;
+ u32 user_base_data = 0, base_vector = 0, base_idx;
+
+ base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
+ ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
+ &num_vectors,
+ &user_base_data,
+ &base_vector);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ init_dummy_netdev(&irq_grp->napi_ndev);
+ netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ ath12k_pci_ext_grp_napi_poll);
+
+ if (ab->hw_params->ring_mask->tx[i] ||
+ ab->hw_params->ring_mask->rx[i] ||
+ ab->hw_params->ring_mask->rx_err[i] ||
+ ab->hw_params->ring_mask->rx_wbm_rel[i] ||
+ ab->hw_params->ring_mask->reo_status[i] ||
+ ab->hw_params->ring_mask->host2rxdma[i] ||
+ ab->hw_params->ring_mask->rx_mon_dest[i]) {
+ num_irq = 1;
+ }
+
+ irq_grp->num_irq = num_irq;
+ irq_grp->irqs[0] = base_idx + i;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+ int vector = (i % num_vectors) + base_vector;
+ int irq = ath12k_pci_get_msi_irq(ab->dev, vector);
+
+ ab->irq_num[irq_idx] = irq;
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI,
+ "irq:%d group:%d\n", irq, i);
+
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+ ret = request_irq(irq, ath12k_pci_ext_interrupt_handler,
+ IRQF_SHARED,
+ "DP_EXT_IRQ", irq_grp);
+ if (ret) {
+ ath12k_err(ab, "failed request irq %d: %d\n",
+ vector, ret);
+ return ret;
+ }
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_pci_config_irq(struct ath12k_base *ab)
+{
+ struct ath12k_ce_pipe *ce_pipe;
+ u32 msi_data_start;
+ u32 msi_data_count, msi_data_idx;
+ u32 msi_irq_start;
+ unsigned int msi_data;
+ int irq, i, ret, irq_idx;
+
+ ret = ath12k_pci_get_user_msi_assignment(ab,
+ "CE", &msi_data_count,
+ &msi_data_start, &msi_irq_start);
+ if (ret)
+ return ret;
+
+ /* Configure CE irqs */
+
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
+ irq = ath12k_pci_get_msi_irq(ab->dev, msi_data);
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
+
+ tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet);
+
+ ret = request_irq(irq, ath12k_pci_ce_interrupt_handler,
+ IRQF_SHARED, irq_name[irq_idx],
+ ce_pipe);
+ if (ret) {
+ ath12k_err(ab, "failed to request irq %d: %d\n",
+ irq_idx, ret);
+ return ret;
+ }
+
+ ab->irq_num[irq_idx] = irq;
+ msi_data_idx++;
+
+ ath12k_pci_ce_irq_disable(ab, i);
+ }
+
+ ret = ath12k_pci_ext_irq_config(ab);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
+{
+ struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce = ab->hw_params->target_ce_config;
+ cfg->tgt_ce_len = ab->hw_params->target_ce_count;
+
+ cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
+ cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
+ ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
+}
+
+static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath12k_pci_ce_irq_enable(ab, i);
+ }
+}
+
+static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable)
+{
+ struct pci_dev *dev = ab_pci->pdev;
+ u16 control;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ else
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
+}
+
+static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci)
+{
+ ath12k_pci_msi_config(ab_pci, true);
+}
+
+static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci)
+{
+ ath12k_pci_msi_config(ab_pci, false);
+}
+
+static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
+ struct msi_desc *msi_desc;
+ int num_vectors;
+ int ret;
+
+ num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
+ msi_config->total_vectors,
+ msi_config->total_vectors,
+ PCI_IRQ_MSI);
+ if (num_vectors != msi_config->total_vectors) {
+ ath12k_err(ab, "failed to get %d MSI vectors, only %d available",
+ msi_config->total_vectors, num_vectors);
+
+ if (num_vectors >= 0)
+ return -EINVAL;
+ else
+ return num_vectors;
+ }
+
+ ath12k_pci_msi_disable(ab_pci);
+
+ msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
+ if (!msi_desc) {
+ ath12k_err(ab, "msi_desc is NULL!\n");
+ ret = -EINVAL;
+ goto free_msi_vector;
+ }
+
+ ab_pci->msi_ep_base_data = msi_desc->msg.data;
+ if (msi_desc->pci.msi_attrib.is_64)
+ set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
+
+ return 0;
+
+free_msi_vector:
+ pci_free_irq_vectors(ab_pci->pdev);
+
+ return ret;
+}
+
+static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci)
+{
+ pci_free_irq_vectors(ab_pci->pdev);
+}
+
+static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ u16 device_id;
+ int ret = 0;
+
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+ if (device_id != ab_pci->dev_id) {
+ ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
+ device_id, ab_pci->dev_id);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM);
+ if (ret) {
+ ath12k_err(ab, "failed to assign pci resource: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ath12k_err(ab, "failed to enable pci device: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci");
+ if (ret) {
+ ath12k_err(ab, "failed to request pci region: %d\n", ret);
+ goto disable_device;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(ATH12K_PCI_DMA_MASK));
+ if (ret) {
+ ath12k_err(ab, "failed to set pci dma mask to %d: %d\n",
+ ATH12K_PCI_DMA_MASK, ret);
+ goto release_region;
+ }
+
+ pci_set_master(pdev);
+
+ ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM);
+ ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0);
+ if (!ab->mem) {
+ ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM);
+ ret = -EIO;
+ goto clear_master;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
+ return 0;
+
+clear_master:
+ pci_clear_master(pdev);
+release_region:
+ pci_release_region(pdev, ATH12K_PCI_BAR_NUM);
+disable_device:
+ pci_disable_device(pdev);
+out:
+ return ret;
+}
+
+static void ath12k_pci_free_region(struct ath12k_pci *ab_pci)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+ struct pci_dev *pci_dev = ab_pci->pdev;
+
+ pci_iounmap(pci_dev, ab->mem);
+ ab->mem = NULL;
+ pci_clear_master(pci_dev);
+ pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM);
+ if (pci_is_enabled(pci_dev))
+ pci_disable_device(pci_dev);
+}
+
+static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
+{
+ struct ath12k_base *ab = ab_pci->ab;
+
+ pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ &ab_pci->link_ctl);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
+ ab_pci->link_ctl,
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+ /* disable L0s and L1 */
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+ set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
+{
+ if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl);
+}
+
+static void ath12k_pci_kill_tasklets(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->ce_count; i++) {
+ struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab)
+{
+ ath12k_pci_ce_irqs_disable(ab);
+ ath12k_pci_sync_ce_irqs(ab);
+ ath12k_pci_kill_tasklets(ab);
+}
+
+int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params->svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+ return pci_irq_vector(pci_dev, vector);
+}
+
+int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
+ int idx;
+
+ for (idx = 0; idx < msi_config->total_users; idx++) {
+ if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+ *num_vectors = msi_config->users[idx].num_vectors;
+ *user_base_data = msi_config->users[idx].base_vector
+ + ab_pci->msi_ep_base_data;
+ *base_vector = msi_config->users[idx].base_vector;
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+ user_name, *num_vectors, *user_base_data,
+ *base_vector);
+
+ return 0;
+ }
+ }
+
+ ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
+
+ return -EINVAL;
+}
+
+void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ struct pci_dev *pci_dev = to_pci_dev(ab->dev);
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+ msi_addr_lo);
+
+ if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+ msi_addr_hi);
+ } else {
+ *msi_addr_hi = 0;
+ }
+}
+
+void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id,
+ u32 *msi_idx)
+{
+ u32 i, msi_data_idx;
+
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
+ if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ if (ce_id == i)
+ break;
+
+ msi_data_idx++;
+ }
+ *msi_idx = msi_data_idx;
+}
+
+void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab)
+{
+ ath12k_pci_ce_irqs_enable(ab);
+}
+
+void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab)
+{
+ ath12k_pci_ce_irq_disable_sync(ab);
+}
+
+void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ napi_enable(&irq_grp->napi);
+ ath12k_pci_ext_grp_enable(irq_grp);
+ }
+}
+
+void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
+{
+ __ath12k_pci_ext_irq_disable(ab);
+ ath12k_pci_sync_ext_irqs(ab);
+}
+
+int ath12k_pci_hif_suspend(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
+
+ ath12k_mhi_suspend(ar_pci);
+
+ return 0;
+}
+
+int ath12k_pci_hif_resume(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
+
+ ath12k_mhi_resume(ar_pci);
+
+ return 0;
+}
+
+void ath12k_pci_stop(struct ath12k_base *ab)
+{
+ ath12k_pci_ce_irq_disable_sync(ab);
+ ath12k_ce_cleanup_pipes(ab);
+}
+
+int ath12k_pci_start(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+
+ ath12k_pci_aspm_restore(ab_pci);
+
+ ath12k_pci_ce_irqs_enable(ab);
+ ath12k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ u32 val, window_start;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup MHI to access.
+ */
+ if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+
+ if (offset < WINDOW_START) {
+ val = ioread32(ab->mem + offset);
+ } else {
+ if (ab->static_window_map)
+ window_start = ath12k_pci_get_window_start(ab, offset);
+ else
+ window_start = WINDOW_START;
+
+ if (window_start == WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath12k_pci_select_window(ab_pci, offset);
+ val = ioread32(ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ if ((!window_start) &&
+ (offset >= PCI_MHIREGLEN_REG &&
+ offset <= PCI_MHI_REGION_END))
+ offset = offset - PCI_MHIREGLEN_REG;
+
+ val = ioread32(ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
+ }
+
+ if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+
+ return val;
+}
+
+void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ u32 window_start;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup MHI to access.
+ */
+ if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+
+ if (offset < WINDOW_START) {
+ iowrite32(value, ab->mem + offset);
+ } else {
+ if (ab->static_window_map)
+ window_start = ath12k_pci_get_window_start(ab, offset);
+ else
+ window_start = WINDOW_START;
+
+ if (window_start == WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath12k_pci_select_window(ab_pci, offset);
+ iowrite32(value, ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ if ((!window_start) &&
+ (offset >= PCI_MHIREGLEN_REG &&
+ offset <= PCI_MHI_REGION_END))
+ offset = offset - PCI_MHIREGLEN_REG;
+
+ iowrite32(value, ab->mem + window_start +
+ (offset & WINDOW_RANGE_MASK));
+ }
+ }
+
+ if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+}
+
+int ath12k_pci_power_up(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+ int ret;
+
+ ab_pci->register_window = 0;
+ clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath12k_pci_sw_reset(ab_pci->ab, true);
+
+ /* Disable ASPM during firmware download due to problems switching
+ * to AMSS state.
+ */
+ ath12k_pci_aspm_disable(ab_pci);
+
+ ath12k_pci_msi_enable(ab_pci);
+
+ ret = ath12k_mhi_start(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to start mhi: %d\n", ret);
+ return ret;
+ }
+
+ if (ab->static_window_map)
+ ath12k_pci_select_static_window(ab_pci);
+
+ return 0;
+}
+
+void ath12k_pci_power_down(struct ath12k_base *ab)
+{
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ /* restore aspm in case firmware bootup fails */
+ ath12k_pci_aspm_restore(ab_pci);
+
+ ath12k_pci_force_wake(ab_pci->ab);
+ ath12k_pci_msi_disable(ab_pci);
+ ath12k_mhi_stop(ab_pci);
+ clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath12k_pci_sw_reset(ab_pci->ab, false);
+}
+
+static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
+ .start = ath12k_pci_start,
+ .stop = ath12k_pci_stop,
+ .read32 = ath12k_pci_read32,
+ .write32 = ath12k_pci_write32,
+ .power_down = ath12k_pci_power_down,
+ .power_up = ath12k_pci_power_up,
+ .suspend = ath12k_pci_hif_suspend,
+ .resume = ath12k_pci_hif_resume,
+ .irq_enable = ath12k_pci_ext_irq_enable,
+ .irq_disable = ath12k_pci_ext_irq_disable,
+ .get_msi_address = ath12k_pci_get_msi_address,
+ .get_user_msi_vector = ath12k_pci_get_user_msi_assignment,
+ .map_service_to_pipe = ath12k_pci_map_service_to_pipe,
+ .ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
+ .ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
+ .get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
+};
+
+static
+void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor)
+{
+ u32 soc_hw_version;
+
+ soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION);
+ *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
+ soc_hw_version);
+ *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
+ soc_hw_version);
+
+ ath12k_dbg(ab, ATH12K_DBG_PCI,
+ "pci tcsr_soc_hw_version major %d minor %d\n",
+ *major, *minor);
+}
+
+static int ath12k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ struct ath12k_base *ab;
+ struct ath12k_pci *ab_pci;
+ u32 soc_hw_version_major, soc_hw_version_minor;
+ int ret;
+
+ ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI);
+ if (!ab) {
+ dev_err(&pdev->dev, "failed to allocate ath12k base\n");
+ return -ENOMEM;
+ }
+
+ ab->dev = &pdev->dev;
+ pci_set_drvdata(pdev, ab);
+ ab_pci = ath12k_pci_priv(ab);
+ ab_pci->dev_id = pci_dev->device;
+ ab_pci->ab = ab;
+ ab_pci->pdev = pdev;
+ ab->hif.ops = &ath12k_pci_hif_ops;
+ pci_set_drvdata(pdev, ab);
+ spin_lock_init(&ab_pci->window_lock);
+
+ ret = ath12k_pci_claim(ab_pci, pdev);
+ if (ret) {
+ ath12k_err(ab, "failed to claim device: %d\n", ret);
+ goto err_free_core;
+ }
+
+ switch (pci_dev->device) {
+ case QCN9274_DEVICE_ID:
+ ab_pci->msi_config = &ath12k_msi_config[0];
+ ab->static_window_map = true;
+ ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
+ switch (soc_hw_version_major) {
+ case ATH12K_PCI_SOC_HW_VERSION_2:
+ ab->hw_rev = ATH12K_HW_QCN9274_HW20;
+ break;
+ case ATH12K_PCI_SOC_HW_VERSION_1:
+ ab->hw_rev = ATH12K_HW_QCN9274_HW10;
+ break;
+ default:
+ dev_err(&pdev->dev,
+ "Unknown hardware version found for QCN9274: 0x%x\n",
+ soc_hw_version_major);
+ return -EOPNOTSUPP;
+ }
+ break;
+ case WCN7850_DEVICE_ID:
+ ab_pci->msi_config = &ath12k_msi_config[0];
+ ab->static_window_map = false;
+ ab->hw_rev = ATH12K_HW_WCN7850_HW20;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
+ pci_dev->device);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+
+ ret = ath12k_pci_msi_alloc(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to alloc msi: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
+ ret = ath12k_core_pre_init(ab);
+ if (ret)
+ goto err_pci_msi_free;
+
+ ret = ath12k_mhi_register(ab_pci);
+ if (ret) {
+ ath12k_err(ab, "failed to register mhi: %d\n", ret);
+ goto err_pci_msi_free;
+ }
+
+ ret = ath12k_hal_srng_init(ab);
+ if (ret)
+ goto err_mhi_unregister;
+
+ ret = ath12k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath12k_pci_init_qmi_ce_config(ab);
+
+ ret = ath12k_pci_config_irq(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to config irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ret = ath12k_core_init(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to init core: %d\n", ret);
+ goto err_free_irq;
+ }
+ return 0;
+
+err_free_irq:
+ ath12k_pci_free_irq(ab);
+
+err_ce_free:
+ ath12k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath12k_hal_srng_deinit(ab);
+
+err_mhi_unregister:
+ ath12k_mhi_unregister(ab_pci);
+
+err_pci_msi_free:
+ ath12k_pci_msi_free(ab_pci);
+
+err_pci_free_region:
+ ath12k_pci_free_region(ab_pci);
+
+err_free_core:
+ ath12k_core_free(ab);
+
+ return ret;
+}
+
+static void ath12k_pci_remove(struct pci_dev *pdev)
+{
+ struct ath12k_base *ab = pci_get_drvdata(pdev);
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+ if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath12k_pci_power_down(ab);
+ ath12k_qmi_deinit_service(ab);
+ goto qmi_fail;
+ }
+
+ set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
+
+ cancel_work_sync(&ab->reset_work);
+ ath12k_core_deinit(ab);
+
+qmi_fail:
+ ath12k_mhi_unregister(ab_pci);
+
+ ath12k_pci_free_irq(ab);
+ ath12k_pci_msi_free(ab_pci);
+ ath12k_pci_free_region(ab_pci);
+
+ ath12k_hal_srng_deinit(ab);
+ ath12k_ce_free_pipes(ab);
+ ath12k_core_free(ab);
+}
+
+static void ath12k_pci_shutdown(struct pci_dev *pdev)
+{
+ struct ath12k_base *ab = pci_get_drvdata(pdev);
+
+ ath12k_pci_power_down(ab);
+}
+
+static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
+{
+ struct ath12k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath12k_core_suspend(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to suspend core: %d\n", ret);
+
+ return ret;
+}
+
+static __maybe_unused int ath12k_pci_pm_resume(struct device *dev)
+{
+ struct ath12k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath12k_core_resume(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to resume core: %d\n", ret);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops,
+ ath12k_pci_pm_suspend,
+ ath12k_pci_pm_resume);
+
+static struct pci_driver ath12k_pci_driver = {
+ .name = "ath12k_pci",
+ .id_table = ath12k_pci_id_table,
+ .probe = ath12k_pci_probe,
+ .remove = ath12k_pci_remove,
+ .shutdown = ath12k_pci_shutdown,
+ .driver.pm = &ath12k_pci_pm_ops,
+};
+
+static int ath12k_pci_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&ath12k_pci_driver);
+ if (ret) {
+ pr_err("failed to register ath12k pci driver: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(ath12k_pci_init);
+
+static void ath12k_pci_exit(void)
+{
+ pci_unregister_driver(&ath12k_pci_driver);
+}
+
+module_exit(ath12k_pci_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN PCIe devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
new file mode 100644
index 000000000000..0d9e40ab31f2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_PCI_H
+#define ATH12K_PCI_H
+
+#include <linux/mhi.h>
+
+#include "core.h"
+
+#define PCIE_SOC_GLOBAL_RESET 0x3008
+#define PCIE_SOC_GLOBAL_RESET_V 1
+
+#define WLAON_WARM_SW_ENTRY 0x1f80504
+#define WLAON_SOC_RESET_CAUSE_REG 0x01f8060c
+
+#define PCIE_Q6_COOKIE_ADDR 0x01f80500
+#define PCIE_Q6_COOKIE_DATA 0xc0000000
+
+/* register to wake the UMAC from power collapse */
+#define PCIE_SCRATCH_0_SOC_PCIE_REG 0x4040
+
+/* register used for handshake mechanism to validate UMAC is awake */
+#define PCIE_SOC_WAKE_PCIE_LOCAL_REG 0x3004
+
+#define PCIE_PCIE_PARF_LTSSM 0x1e081b0
+#define PARM_LTSSM_VALUE 0x111
+
+#define GCC_GCC_PCIE_HOT_RST 0x1e38338
+#define GCC_GCC_PCIE_HOT_RST_VAL 0x10
+
+#define PCIE_PCIE_INT_ALL_CLEAR 0x1e08228
+#define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2
+#define PCIE_INT_CLEAR_ALL 0xffffffff
+
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab) \
+ ((ab)->hw_params->regs->pcie_qserdes_sysclk_en_sel)
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff
+#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab) \
+ ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base)
+#define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02
+#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab) \
+ ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base + 0x4)
+#define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52
+#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab) \
+ ((ab)->hw_params->regs->pcie_pcs_osc_dtct_config_base + 0xc)
+#define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff
+#define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff
+
+#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
+#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
+
+#define PCI_BAR_WINDOW0_BASE 0x1E00000
+#define PCI_BAR_WINDOW0_END 0x1E7FFFC
+#define PCI_SOC_RANGE_MASK 0x3FFF
+#define PCI_SOC_PCI_REG_BASE 0x1E04000
+#define PCI_SOC_PCI_REG_END 0x1E07FFC
+#define PCI_PARF_BASE 0x1E08000
+#define PCI_PARF_END 0x1E0BFFC
+#define PCI_MHIREGLEN_REG 0x1E0E100
+#define PCI_MHI_REGION_END 0x1E0EFFC
+#define QRTR_PCI_DOMAIN_NR_MASK GENMASK(7, 4)
+#define QRTR_PCI_BUS_NUMBER_MASK GENMASK(3, 0)
+
+#define ATH12K_PCI_SOC_HW_VERSION_1 1
+#define ATH12K_PCI_SOC_HW_VERSION_2 2
+
+struct ath12k_msi_user {
+ const char *name;
+ int num_vectors;
+ u32 base_vector;
+};
+
+struct ath12k_msi_config {
+ int total_vectors;
+ int total_users;
+ const struct ath12k_msi_user *users;
+};
+
+enum ath12k_pci_flags {
+ ATH12K_PCI_FLAG_INIT_DONE,
+ ATH12K_PCI_FLAG_IS_MSI_64,
+ ATH12K_PCI_ASPM_RESTORE,
+};
+
+struct ath12k_pci {
+ struct pci_dev *pdev;
+ struct ath12k_base *ab;
+ u16 dev_id;
+ char amss_path[100];
+ u32 msi_ep_base_data;
+ struct mhi_controller *mhi_ctrl;
+ const struct ath12k_msi_config *msi_config;
+ unsigned long mhi_state;
+ u32 register_window;
+
+ /* protects register_window above */
+ spinlock_t window_lock;
+
+ /* enum ath12k_pci_flags */
+ unsigned long flags;
+ u16 link_ctl;
+};
+
+static inline struct ath12k_pci *ath12k_pci_priv(struct ath12k_base *ab)
+{
+ return (struct ath12k_pci *)ab->drv_priv;
+}
+
+int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector);
+void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value);
+u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset);
+int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
+void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id,
+ u32 *msi_idx);
+void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab);
+void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab);
+void ath12k_pci_ext_irq_enable(struct ath12k_base *ab);
+void ath12k_pci_ext_irq_disable(struct ath12k_base *ab);
+int ath12k_pci_hif_suspend(struct ath12k_base *ab);
+int ath12k_pci_hif_resume(struct ath12k_base *ab);
+void ath12k_pci_stop(struct ath12k_base *ab);
+int ath12k_pci_start(struct ath12k_base *ab);
+int ath12k_pci_power_up(struct ath12k_base *ab);
+void ath12k_pci_power_down(struct ath12k_base *ab);
+#endif /* ATH12K_PCI_H */
diff --git a/drivers/net/wireless/ath/ath12k/peer.c b/drivers/net/wireless/ath/ath12k/peer.c
new file mode 100644
index 000000000000..19c0626fbff1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/peer.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+
+struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
+ const u8 *addr)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+static struct ath12k_peer *ath12k_peer_find_by_pdev_idx(struct ath12k_base *ab,
+ u8 pdev_idx, const u8 *addr)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->pdev_idx != pdev_idx)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
+ const u8 *addr)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
+ int peer_id)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list)
+ if (peer_id == peer->peer_id)
+ return peer;
+
+ return NULL;
+}
+
+bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id)
+{
+ struct ath12k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (vdev_id == peer->vdev_id) {
+ spin_unlock_bh(&ab->base_lock);
+ return true;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+ return false;
+}
+
+struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
+ int ast_hash)
+{
+ struct ath12k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list)
+ if (ast_hash == peer->ast_hash)
+ return peer;
+
+ return NULL;
+}
+
+void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
+{
+ struct ath12k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ ath12k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
+ peer_id);
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, peer_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ wake_up(&ab->peer_mapping_wq);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
+{
+ struct ath12k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath12k_peer_find(ab, vdev_id, mac_addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = vdev_id;
+ peer->peer_id = peer_id;
+ peer->ast_hash = ast_hash;
+ peer->hw_peer_id = hw_peer_id;
+ ether_addr_copy(peer->addr, mac_addr);
+ list_add(&peer->list, &ab->peers);
+ wake_up(&ab->peer_mapping_wq);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "htt peer map vdev %d peer %pM id %d\n",
+ vdev_id, mac_addr, peer_id);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath12k_wait_for_peer_common(struct ath12k_base *ab, int vdev_id,
+ const u8 *addr, bool expect_mapped)
+{
+ int ret;
+
+ ret = wait_event_timeout(ab->peer_mapping_wq, ({
+ bool mapped;
+
+ spin_lock_bh(&ab->base_lock);
+ mapped = !!ath12k_peer_find(ab, vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ (mapped == expect_mapped ||
+ test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags));
+ }), 3 * HZ);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id)
+{
+ struct ath12k_peer *peer, *tmp;
+ struct ath12k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+
+ ath12k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
+ peer->addr, vdev_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath12k_wait_for_peer_deleted(struct ath12k *ar, int vdev_id, const u8 *addr)
+{
+ return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
+}
+
+int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
+ const u8 *addr)
+{
+ int ret;
+ unsigned long time_left;
+
+ ret = ath12k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed wait for peer deleted");
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->peer_delete_done,
+ 3 * HZ);
+ if (time_left == 0) {
+ ath12k_warn(ar->ab, "Timeout in receiving peer delete response\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->peer_delete_done);
+
+ ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to delete peer vdev_id %d addr %pM ret %d\n",
+ vdev_id, addr, ret);
+ return ret;
+ }
+
+ ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ar->num_peers--;
+
+ return 0;
+}
+
+static int ath12k_wait_for_peer_created(struct ath12k *ar, int vdev_id, const u8 *addr)
+{
+ return ath12k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
+}
+
+int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_create_arg *arg)
+{
+ struct ath12k_peer *peer;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->num_peers > (ar->max_num_peers - 1)) {
+ ath12k_warn(ar->ab,
+ "failed to create peer due to insufficient peer entry resource in firmware\n");
+ return -ENOBUFS;
+ }
+
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath12k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, arg->peer_addr);
+ if (peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ret = ath12k_wmi_send_peer_create_cmd(ar, arg);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send peer create vdev_id %d ret %d\n",
+ arg->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath12k_wait_for_peer_created(ar, arg->vdev_id,
+ arg->peer_addr);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath12k_peer_find(ar->ab, arg->vdev_id, arg->peer_addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath12k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
+ arg->peer_addr, arg->vdev_id);
+
+ reinit_completion(&ar->peer_delete_done);
+
+ ret = ath12k_wmi_send_peer_delete_cmd(ar, arg->peer_addr,
+ arg->vdev_id);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to delete peer vdev_id %d addr %pM\n",
+ arg->vdev_id, arg->peer_addr);
+ return ret;
+ }
+
+ ret = ath12k_wait_for_peer_delete_done(ar, arg->vdev_id,
+ arg->peer_addr);
+ if (ret)
+ return ret;
+
+ return -ENOENT;
+ }
+
+ peer->pdev_idx = ar->pdev_idx;
+ peer->sta = sta;
+
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ arvif->ast_hash = peer->ast_hash;
+ arvif->ast_idx = peer->hw_peer_id;
+ }
+
+ peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
+ peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+
+ ar->num_peers++;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/peer.h b/drivers/net/wireless/ath/ath12k/peer.h
new file mode 100644
index 000000000000..b296dc0e2f67
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/peer.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_PEER_H
+#define ATH12K_PEER_H
+
+#include "dp_rx.h"
+
+struct ppdu_user_delayba {
+ u16 sw_peer_id;
+ u32 info0;
+ u16 ru_end;
+ u16 ru_start;
+ u32 info1;
+ u32 rate_flags;
+ u32 resp_rate_flags;
+};
+
+struct ath12k_peer {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ int peer_id;
+ u16 ast_hash;
+ u8 pdev_idx;
+ u16 hw_peer_id;
+
+ /* protected by ab->data_lock */
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+ struct ath12k_dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+
+ /* Info used in MMIC verification of
+ * RX fragments
+ */
+ struct crypto_shash *tfm_mmic;
+ u8 mcast_keyidx;
+ u8 ucast_keyidx;
+ u16 sec_type;
+ u16 sec_type_grp;
+ struct ppdu_user_delayba ppdu_stats_delayba;
+ bool delayba_flag;
+ bool is_authorized;
+};
+
+void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
+void ath12k_peer_map_event(struct ath12k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id);
+struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
+ const u8 *addr);
+struct ath12k_peer *ath12k_peer_find_by_addr(struct ath12k_base *ab,
+ const u8 *addr);
+struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab, int peer_id);
+void ath12k_peer_cleanup(struct ath12k *ar, u32 vdev_id);
+int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr);
+int ath12k_peer_create(struct ath12k *ar, struct ath12k_vif *arvif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_create_arg *arg);
+int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
+ const u8 *addr);
+bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id);
+struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash);
+
+#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
new file mode 100644
index 000000000000..979a63f2e2ab
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -0,0 +1,3087 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/elf.h>
+
+#include "qmi.h"
+#include "core.h"
+#include "debug.h"
+#include <linux/of.h>
+#include <linux/firmware.h>
+
+#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
+#define HOST_CSTATE_BIT 0x04
+#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
+#define ATH12K_QMI_MAX_CHUNK_SIZE 2097152
+
+static struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
+ num_local_links),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01,
+ .elem_size = sizeof(u8),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
+ hw_link_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01,
+ .elem_size = sizeof(u8),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct wlfw_host_mlo_chip_info_s_v01,
+ valid_mlo_link_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
+ .elem_size = sizeof(u32),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_duration_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1D,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_duraiton),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ platform_name_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1E,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ platform_name),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ ddr_range_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_host_ddr_range),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x1F,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ ddr_range),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x20,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ host_build_type_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_host_build_type),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x20,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ host_build_type),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x21,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_capable_valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x21,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_capable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x22,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_chip_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x22,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_chip_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x23,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_group_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x23,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_group_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x24,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ max_mlo_peer_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x24,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ max_mlo_peer),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x25,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_num_chips_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x25,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_num_chips),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x26,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLFW_MAX_NUM_MLO_CHIPS_V01,
+ .elem_size = sizeof(struct wlfw_host_mlo_chip_info_s_v01),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x26,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mlo_chip_info),
+ .ei_array = wlfw_host_mlo_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x27,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ feature_list_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x27,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ feature_list),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable),
+ },
+
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
+ .ei_array = qmi_wlanfw_mem_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_dev_mem_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_dev_mem_info_s_v01,
+ start),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_dev_mem_info_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = qmi_wlanfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = qmi_wlanfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = qmi_wlanfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_caldata_read_timeout_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_caldata_read_timeout),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_caps_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, fw_caps),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ rd_card_chain_cap_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ rd_card_chain_cap),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ dev_mem_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_dev_mem_info_s_v01),
+ .array_type = STATIC_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, dev_mem),
+ .ei_array = qmi_wlanfw_dev_mem_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v3_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v3_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v3),
+ .ei_array = qmi_wlanfw_shadow_reg_v3_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static void ath12k_host_cap_parse_mlo(struct qmi_wlanfw_host_cap_req_msg_v01 *req)
+{
+ req->mlo_capable_valid = 1;
+ req->mlo_capable = 1;
+ req->mlo_chip_id_valid = 1;
+ req->mlo_chip_id = 0;
+ req->mlo_group_id_valid = 1;
+ req->mlo_group_id = 0;
+ req->max_mlo_peer_valid = 1;
+ /* Max peer number generally won't change for the same device
+ * but needs to be synced with host driver.
+ */
+ req->max_mlo_peer = 32;
+ req->mlo_num_chips_valid = 1;
+ req->mlo_num_chips = 1;
+ req->mlo_chip_info_valid = 1;
+ req->mlo_chip_info[0].chip_id = 0;
+ req->mlo_chip_info[0].num_local_links = 2;
+ req->mlo_chip_info[0].hw_link_id[0] = 0;
+ req->mlo_chip_info[0].hw_link_id[1] = 1;
+ req->mlo_chip_info[0].valid_mlo_link_id[0] = 1;
+ req->mlo_chip_info[0].valid_mlo_link_id[1] = 1;
+}
+
+static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_host_cap_req_msg_v01 req;
+ struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.num_clients_valid = 1;
+ req.num_clients = 1;
+ req.mem_cfg_mode = ab->qmi.target_mem_mode;
+ req.mem_cfg_mode_valid = 1;
+ req.bdf_support_valid = 1;
+ req.bdf_support = 1;
+
+ req.m3_support_valid = 1;
+ req.m3_support = 1;
+ req.m3_cache_support_valid = 1;
+ req.m3_cache_support = 1;
+
+ req.cal_done_valid = 1;
+ req.cal_done = ab->qmi.cal_done;
+
+ req.feature_list_valid = 1;
+ req.feature_list = BIT(CNSS_QDSS_CFG_MISS_V01);
+
+ /* BRINGUP: here we are piggybacking a lot of stuff using
+ * internal_sleep_clock, should it be split?
+ */
+ if (ab->hw_params->internal_sleep_clock) {
+ req.nm_modem_valid = 1;
+
+ /* Notify firmware that this is non-qualcomm platform. */
+ req.nm_modem |= HOST_CSTATE_BIT;
+
+ /* Notify firmware about the sleep clock selection,
+ * nm_modem_bit[1] is used for this purpose. Host driver on
+ * non-qualcomm platforms should select internal sleep
+ * clock.
+ */
+ req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
+ req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
+
+ ath12k_host_cap_parse_mlo(&req);
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_HOST_CAP_REQ_V01,
+ QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath12k_warn(ab, "Failed to send host capability request,err = %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "Host capability request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath12k_qmi_fw_ind_register_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_ind_register_req_msg_v01 *req;
+ struct qmi_wlanfw_ind_register_resp_msg_v01 *resp;
+ struct qmi_handle *handle = &ab->qmi.handle;
+ struct qmi_txn txn;
+ int ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ ret = -ENOMEM;
+ goto resp_out;
+ }
+
+ req->client_id_valid = 1;
+ req->client_id = QMI_WLANFW_CLIENT_ID;
+ req->fw_ready_enable_valid = 1;
+ req->fw_ready_enable = 1;
+ req->request_mem_enable_valid = 1;
+ req->request_mem_enable = 1;
+ req->fw_mem_ready_enable_valid = 1;
+ req->fw_mem_ready_enable = 1;
+ req->cal_done_enable_valid = 1;
+ req->cal_done_enable = 1;
+ req->fw_init_done_enable_valid = 1;
+ req->fw_init_done_enable = 1;
+
+ req->pin_connect_result_enable_valid = 0;
+ req->pin_connect_result_enable = 0;
+
+ ret = qmi_txn_init(handle, &txn,
+ qmi_wlanfw_ind_register_resp_msg_v01_ei, resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_IND_REGISTER_REQ_V01,
+ QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_ind_register_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath12k_warn(ab, "Failed to send indication register request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to register fw indication %d\n", ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "FW Ind register request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(resp);
+resp_out:
+ kfree(req);
+ return ret;
+}
+
+static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
+ struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0, i;
+ bool delayed;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ /* Some targets by default request a block of big contiguous
+ * DMA memory, it's hard to allocate from kernel. So host returns
+ * failure to firmware and firmware then request multiple blocks of
+ * small chunk size memory.
+ */
+ if (ab->qmi.target_mem_delayed) {
+ delayed = true;
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi delays mem_request %d\n",
+ ab->qmi.mem_seg_count);
+ memset(req, 0, sizeof(*req));
+ } else {
+ delayed = false;
+ req->mem_seg_len = ab->qmi.mem_seg_count;
+ for (i = 0; i < req->mem_seg_len ; i++) {
+ req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
+ req->mem_seg[i].size = ab->qmi.target_mem[i].size;
+ req->mem_seg[i].type = ab->qmi.target_mem[i].type;
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "qmi req mem_seg[%d] %pad %u %u\n", i,
+ &ab->qmi.target_mem[i].paddr,
+ ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].type);
+ }
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_RESPOND_MEM_REQ_V01,
+ QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to respond memory request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed memory request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* the error response is expected when
+ * target_mem_delayed is true.
+ */
+ if (delayed && resp.resp.error == 0)
+ goto out;
+
+ ath12k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ kfree(req);
+ return ret;
+}
+
+static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ if (!ab->qmi.target_mem[i].v.addr)
+ continue;
+ dma_free_coherent(ab->dev,
+ ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].v.addr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].v.addr = NULL;
+ }
+}
+
+static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
+{
+ int i;
+ struct target_mem_chunk *chunk;
+
+ ab->qmi.target_mem_delayed = false;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ chunk = &ab->qmi.target_mem[i];
+
+ /* Allocate memory for the region and the functionality supported
+ * on the host. For the non-supported memory region, host does not
+ * allocate memory, assigns NULL and FW will handle this without crashing.
+ */
+ switch (chunk->type) {
+ case HOST_DDR_REGION_TYPE:
+ case M3_DUMP_REGION_TYPE:
+ case PAGEABLE_MEM_REGION_TYPE:
+ case CALDB_MEM_REGION_TYPE:
+ chunk->v.addr = dma_alloc_coherent(ab->dev,
+ chunk->size,
+ &chunk->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!chunk->v.addr) {
+ if (chunk->size > ATH12K_QMI_MAX_CHUNK_SIZE) {
+ ab->qmi.target_mem_delayed = true;
+ ath12k_warn(ab,
+ "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath12k_qmi_free_target_mem_chunk(ab);
+ return 0;
+ }
+ ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
+ chunk->type, chunk->size);
+ return -ENOMEM;
+ }
+ break;
+ default:
+ ath12k_warn(ab, "memory type %u not supported\n",
+ chunk->type);
+ chunk->paddr = 0;
+ chunk->v.addr = NULL;
+ break;
+ }
+ }
+ return 0;
+}
+
+static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_cap_req_msg_v01 req;
+ struct qmi_wlanfw_cap_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ unsigned int board_id = ATH12K_BOARD_ID_DEFAULT;
+ int ret = 0;
+ int i;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_CAP_REQ_V01,
+ QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send target cap request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed target cap request %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "qmi targetcap req failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.chip_info_valid) {
+ ab->qmi.target.chip_id = resp.chip_info.chip_id;
+ ab->qmi.target.chip_family = resp.chip_info.chip_family;
+ }
+
+ if (resp.board_info_valid)
+ ab->qmi.target.board_id = resp.board_info.board_id;
+ else
+ ab->qmi.target.board_id = board_id;
+
+ if (resp.soc_info_valid)
+ ab->qmi.target.soc_id = resp.soc_info.soc_id;
+
+ if (resp.fw_version_info_valid) {
+ ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
+ strscpy(ab->qmi.target.fw_build_timestamp,
+ resp.fw_version_info.fw_build_timestamp,
+ sizeof(ab->qmi.target.fw_build_timestamp));
+ }
+
+ if (resp.fw_build_id_valid)
+ strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
+ sizeof(ab->qmi.target.fw_build_id));
+
+ if (resp.dev_mem_info_valid) {
+ for (i = 0; i < ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01; i++) {
+ ab->qmi.dev_mem[i].start =
+ resp.dev_mem[i].start;
+ ab->qmi.dev_mem[i].size =
+ resp.dev_mem[i].size;
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "devmem [%d] start ox%llx size %llu\n", i,
+ ab->qmi.dev_mem[i].start,
+ ab->qmi.dev_mem[i].size);
+ }
+ }
+
+ if (resp.eeprom_caldata_read_timeout_valid) {
+ ab->qmi.target.eeprom_caldata = resp.eeprom_caldata_read_timeout;
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi cal data supported from eeprom\n");
+ }
+
+ ath12k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
+ ab->qmi.target.chip_id, ab->qmi.target.chip_family,
+ ab->qmi.target.board_id, ab->qmi.target.soc_id);
+
+ ath12k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
+ ab->qmi.target.fw_version,
+ ab->qmi.target.fw_build_timestamp,
+ ab->qmi.target.fw_build_id);
+
+out:
+ return ret;
+}
+
+static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
+ const u8 *data, u32 len, u8 type)
+{
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ const u8 *temp = data;
+ int ret;
+ u32 remaining = len;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ memset(&resp, 0, sizeof(resp));
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = ab->qmi.target.board_id;
+ req->total_size_valid = 1;
+ req->total_size = remaining;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->bdf_type = type;
+ req->bdf_type_valid = 1;
+ req->end_valid = 1;
+ req->end = 0;
+
+ if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ if (type == ATH12K_QMI_FILE_TYPE_EEPROM) {
+ req->data_valid = 0;
+ req->end = 1;
+ req->data_len = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ memcpy(req->data, temp, req->data_len);
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi bdf download req fixed addr type %d\n",
+ type);
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (type == ATH12K_QMI_FILE_TYPE_EEPROM) {
+ remaining = 0;
+ } else {
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "qmi bdf download request remaining %i\n",
+ remaining);
+ }
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
+ enum ath12k_qmi_bdf_type type)
+{
+ struct device *dev = ab->dev;
+ char filename[ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE];
+ const struct firmware *fw_entry;
+ struct ath12k_board_data bd;
+ u32 fw_size, file_type;
+ int ret = 0;
+ const u8 *tmp;
+
+ memset(&bd, 0, sizeof(bd));
+
+ switch (type) {
+ case ATH12K_QMI_BDF_TYPE_ELF:
+ ret = ath12k_core_fetch_bdf(ab, &bd);
+ if (ret) {
+ ath12k_warn(ab, "qmi failed to load bdf:\n");
+ goto out;
+ }
+
+ if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
+ type = ATH12K_QMI_BDF_TYPE_ELF;
+ else
+ type = ATH12K_QMI_BDF_TYPE_BIN;
+
+ break;
+ case ATH12K_QMI_BDF_TYPE_REGDB:
+ ret = ath12k_core_fetch_board_data_api_1(ab, &bd,
+ ATH12K_REGDB_FILE_NAME);
+ if (ret) {
+ ath12k_warn(ab, "qmi failed to load regdb bin:\n");
+ goto out;
+ }
+ break;
+ case ATH12K_QMI_BDF_TYPE_CALIBRATION:
+
+ if (ab->qmi.target.eeprom_caldata) {
+ file_type = ATH12K_QMI_FILE_TYPE_EEPROM;
+ tmp = filename;
+ fw_size = ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ file_type = ATH12K_QMI_FILE_TYPE_CALDATA;
+
+ /* cal-<bus>-<id>.bin */
+ snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+ ath12k_bus_str(ab->hif.bus), dev_name(dev));
+ fw_entry = ath12k_core_firmware_request(ab, filename);
+ if (!IS_ERR(fw_entry))
+ goto success;
+
+ fw_entry = ath12k_core_firmware_request(ab,
+ ATH12K_DEFAULT_CAL_FILE);
+ if (IS_ERR(fw_entry)) {
+ ret = PTR_ERR(fw_entry);
+ ath12k_warn(ab,
+ "qmi failed to load CAL data file:%s\n",
+ filename);
+ goto out;
+ }
+
+success:
+ fw_size = min_t(u32, ab->hw_params->fw.board_size,
+ fw_entry->size);
+ tmp = fw_entry->data;
+ }
+ ret = ath12k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to load caldata\n");
+ goto out_qmi_cal;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi caldata downloaded: type: %u\n",
+ file_type);
+
+out_qmi_cal:
+ if (!ab->qmi.target.eeprom_caldata)
+ release_firmware(fw_entry);
+ return ret;
+ default:
+ ath12k_warn(ab, "unknown file type for load %d", type);
+ goto out;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi bdf_type %d\n", type);
+
+ fw_size = min_t(u32, ab->hw_params->fw.board_size, bd.len);
+
+ ret = ath12k_qmi_load_file_target_mem(ab, bd.data, fw_size, type);
+ if (ret < 0)
+ ath12k_warn(ab, "qmi failed to load bdf file\n");
+
+out:
+ ath12k_core_free_bdf(ab, &bd);
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi BDF download sequence completed\n");
+
+ return ret;
+}
+
+static int ath12k_qmi_m3_load(struct ath12k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ const struct firmware *fw;
+ char path[100];
+ int ret;
+
+ if (m3_mem->vaddr || m3_mem->size)
+ return 0;
+
+ fw = ath12k_core_firmware_request(ab, ATH12K_M3_FILE);
+ if (IS_ERR(fw)) {
+ ret = PTR_ERR(fw);
+ ath12k_core_create_firmware_path(ab, ATH12K_M3_FILE,
+ path, sizeof(path));
+ ath12k_err(ab, "failed to load %s: %d\n", path, ret);
+ return ret;
+ }
+
+ m3_mem->vaddr = dma_alloc_coherent(ab->dev,
+ fw->size, &m3_mem->paddr,
+ GFP_KERNEL);
+ if (!m3_mem->vaddr) {
+ ath12k_err(ab, "failed to allocate memory for M3 with size %zu\n",
+ fw->size);
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ memcpy(m3_mem->vaddr, fw->data, fw->size);
+ m3_mem->size = fw->size;
+ release_firmware(fw);
+
+ return 0;
+}
+
+static void ath12k_qmi_m3_free(struct ath12k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+
+ if (!m3_mem->vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, m3_mem->size,
+ m3_mem->vaddr, m3_mem->paddr);
+ m3_mem->vaddr = NULL;
+}
+
+static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ struct qmi_wlanfw_m3_info_req_msg_v01 req;
+ struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ ret = ath12k_qmi_m3_load(ab);
+ if (ret) {
+ ath12k_err(ab, "failed to load m3 firmware: %d", ret);
+ return ret;
+ }
+
+ req.addr = m3_mem->paddr;
+ req.size = m3_mem->size;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_M3_INFO_REQ_V01,
+ QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send M3 information request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed M3 information request %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "qmi M3 info request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ath12k_qmi_wlanfw_mode_send(struct ath12k_base *ab,
+ u32 mode)
+{
+ struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
+ struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
+ struct qmi_txn txn = {};
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_MODE_REQ_V01,
+ QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ if (mode == ATH12K_FIRMWARE_MODE_OFF && ret == -ENETRESET) {
+ ath12k_warn(ab, "WLFW service is dis-connected\n");
+ return 0;
+ }
+ ath12k_warn(ab, "qmi failed set mode request, mode: %d, err = %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "Mode request failed, mode: %d, result: %d err: %d\n",
+ mode, resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath12k_qmi_wlanfw_wlan_cfg_send(struct ath12k_base *ab)
+{
+ struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
+ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+ struct ce_pipe_config *ce_cfg;
+ struct service_to_pipe *svc_cfg;
+ struct qmi_txn txn = {};
+ int ret = 0, pipe_num;
+
+ ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
+ svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->host_version_valid = 1;
+ strscpy(req->host_version, ATH12K_HOST_VERSION_STRING,
+ sizeof(req->host_version));
+
+ req->tgt_cfg_valid = 1;
+ /* This is number of CE configs */
+ req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
+ for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
+ req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
+ req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
+ req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
+ req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
+ req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
+ }
+
+ req->svc_cfg_valid = 1;
+ /* This is number of Service/CE configs */
+ req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
+ for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
+ req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
+ req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
+ req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
+ }
+
+ /* set shadow v3 configuration */
+ if (ab->hw_params->supports_shadow_regs) {
+ req->shadow_reg_v3_valid = 1;
+ req->shadow_reg_v3_len = min_t(u32,
+ ab->qmi.ce_cfg.shadow_reg_v3_len,
+ QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01);
+ memcpy(&req->shadow_reg_v3, ab->qmi.ce_cfg.shadow_reg_v3,
+ sizeof(u32) * req->shadow_reg_v3_len);
+ } else {
+ req->shadow_reg_v3_valid = 0;
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_CFG_REQ_V01,
+ QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan config request, err = %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed wlan config request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath12k_warn(ab, "qmi wlan config request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+void ath12k_qmi_firmware_stop(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan mode off\n");
+ return;
+ }
+}
+
+int ath12k_qmi_firmware_start(struct ath12k_base *ab,
+ u32 mode)
+{
+ int ret;
+
+ ret = ath12k_qmi_wlanfw_wlan_cfg_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan cfg:%d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_qmi_wlanfw_mode_send(ab, mode);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send wlan fw mode:%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ath12k_qmi_driver_event_post(struct ath12k_qmi *qmi,
+ enum ath12k_qmi_event_type type,
+ void *data)
+{
+ struct ath12k_qmi_driver_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ event->type = type;
+ event->data = data;
+
+ spin_lock(&qmi->event_lock);
+ list_add_tail(&event->list, &qmi->event_list);
+ spin_unlock(&qmi->event_lock);
+
+ queue_work(qmi->event_wq, &qmi->event_work);
+
+ return 0;
+}
+
+static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath12k_qmi_fw_ind_register_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send FW indication QMI:%d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath12k_qmi_respond_fw_mem_request(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to respond fw mem req:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
+{
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath12k_qmi_request_target_cap(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to req target capabilities:%d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_REGDB);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to load regdb file:%d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_ELF);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to load board data file:%d\n", ret);
+ return ret;
+ }
+
+ if (ab->hw_params->download_calib) {
+ ret = ath12k_qmi_load_bdf_qmi(ab, ATH12K_QMI_BDF_TYPE_CALIBRATION);
+ if (ret < 0)
+ ath12k_warn(ab, "qmi failed to load calibrated data :%d\n", ret);
+ }
+
+ ret = ath12k_qmi_wlanfw_m3_info_send(ab);
+ if (ret < 0) {
+ ath12k_warn(ab, "qmi failed to send m3 info req:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void ath12k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
+ struct ath12k_base *ab = qmi->ab;
+ const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data;
+ int i, ret;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware request memory request\n");
+
+ if (msg->mem_seg_len == 0 ||
+ msg->mem_seg_len > ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01)
+ ath12k_warn(ab, "Invalid memory segment length: %u\n",
+ msg->mem_seg_len);
+
+ ab->qmi.mem_seg_count = msg->mem_seg_len;
+
+ for (i = 0; i < qmi->mem_seg_count ; i++) {
+ ab->qmi.target_mem[i].type = msg->mem_seg[i].type;
+ ab->qmi.target_mem[i].size = msg->mem_seg[i].size;
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi mem seg type %d size %d\n",
+ msg->mem_seg[i].type, msg->mem_seg[i].size);
+ }
+
+ ret = ath12k_qmi_alloc_target_mem_chunk(ab);
+ if (ret) {
+ ath12k_warn(ab, "qmi failed to alloc target memory: %d\n",
+ ret);
+ return;
+ }
+
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_REQUEST_MEM, NULL);
+}
+
+static void ath12k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
+ struct ath12k_base *ab = qmi->ab;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware memory ready indication\n");
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_FW_MEM_READY, NULL);
+}
+
+static void ath12k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
+ struct ath12k_base *ab = qmi->ab;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi firmware ready\n");
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_FW_READY, NULL);
+}
+
+static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
+ .ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01),
+ .fn = ath12k_qmi_msg_mem_request_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
+ .ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_mem_ready_ind_msg_v01),
+ .fn = ath12k_qmi_msg_mem_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_READY_IND_V01,
+ .ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
+ .fn = ath12k_qmi_msg_fw_ready_cb,
+ },
+};
+
+static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
+ struct ath12k_base *ab = qmi->ab;
+ struct sockaddr_qrtr *sq = &qmi->sq;
+ int ret;
+
+ sq->sq_family = AF_QIPCRTR;
+ sq->sq_node = service->node;
+ sq->sq_port = service->port;
+
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq,
+ sizeof(*sq), 0);
+ if (ret) {
+ ath12k_warn(ab, "qmi failed to connect to remote service %d\n", ret);
+ return ret;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi wifi fw qmi service connected\n");
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_SERVER_ARRIVE, NULL);
+
+ return ret;
+}
+
+static void ath12k_qmi_ops_del_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath12k_qmi *qmi = container_of(qmi_hdl, struct ath12k_qmi, handle);
+ struct ath12k_base *ab = qmi->ab;
+
+ ath12k_dbg(ab, ATH12K_DBG_QMI, "qmi wifi fw del server\n");
+ ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_SERVER_EXIT, NULL);
+}
+
+static const struct qmi_ops ath12k_qmi_ops = {
+ .new_server = ath12k_qmi_ops_new_server,
+ .del_server = ath12k_qmi_ops_del_server,
+};
+
+static void ath12k_qmi_driver_event_work(struct work_struct *work)
+{
+ struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
+ event_work);
+ struct ath12k_qmi_driver_event *event;
+ struct ath12k_base *ab = qmi->ab;
+ int ret;
+
+ spin_lock(&qmi->event_lock);
+ while (!list_empty(&qmi->event_list)) {
+ event = list_first_entry(&qmi->event_list,
+ struct ath12k_qmi_driver_event, list);
+ list_del(&event->list);
+ spin_unlock(&qmi->event_lock);
+
+ if (test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))
+ return;
+
+ switch (event->type) {
+ case ATH12K_QMI_EVENT_SERVER_ARRIVE:
+ ret = ath12k_qmi_event_server_arrive(qmi);
+ if (ret < 0)
+ set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ case ATH12K_QMI_EVENT_SERVER_EXIT:
+ set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
+ break;
+ case ATH12K_QMI_EVENT_REQUEST_MEM:
+ ret = ath12k_qmi_event_mem_request(qmi);
+ if (ret < 0)
+ set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ case ATH12K_QMI_EVENT_FW_MEM_READY:
+ ret = ath12k_qmi_event_load_bdf(qmi);
+ if (ret < 0)
+ set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ case ATH12K_QMI_EVENT_FW_READY:
+ clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ ath12k_hal_dump_srng_stats(ab);
+ queue_work(ab->workqueue, &ab->restart_work);
+ break;
+ }
+
+ clear_bit(ATH12K_FLAG_CRASH_FLUSH,
+ &ab->dev_flags);
+ clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
+ ath12k_core_qmi_firmware_ready(ab);
+ set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
+ break;
+ default:
+ ath12k_warn(ab, "invalid event type: %d", event->type);
+ break;
+ }
+ kfree(event);
+ spin_lock(&qmi->event_lock);
+ }
+ spin_unlock(&qmi->event_lock);
+}
+
+int ath12k_qmi_init_service(struct ath12k_base *ab)
+{
+ int ret;
+
+ memset(&ab->qmi.target, 0, sizeof(struct target_info));
+ memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
+ ab->qmi.ab = ab;
+
+ ab->qmi.target_mem_mode = ATH12K_QMI_TARGET_MEM_MODE_DEFAULT;
+ ret = qmi_handle_init(&ab->qmi.handle, ATH12K_QMI_RESP_LEN_MAX,
+ &ath12k_qmi_ops, ath12k_qmi_msg_handlers);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to initialize qmi handle\n");
+ return ret;
+ }
+
+ ab->qmi.event_wq = alloc_workqueue("ath12k_qmi_driver_event",
+ WQ_UNBOUND, 1);
+ if (!ab->qmi.event_wq) {
+ ath12k_err(ab, "failed to allocate workqueue\n");
+ return -EFAULT;
+ }
+
+ INIT_LIST_HEAD(&ab->qmi.event_list);
+ spin_lock_init(&ab->qmi.event_lock);
+ INIT_WORK(&ab->qmi.event_work, ath12k_qmi_driver_event_work);
+
+ ret = qmi_add_lookup(&ab->qmi.handle, ATH12K_QMI_WLFW_SERVICE_ID_V01,
+ ATH12K_QMI_WLFW_SERVICE_VERS_V01,
+ ab->qmi.service_ins_id);
+ if (ret < 0) {
+ ath12k_warn(ab, "failed to add qmi lookup\n");
+ destroy_workqueue(ab->qmi.event_wq);
+ return ret;
+ }
+
+ return ret;
+}
+
+void ath12k_qmi_deinit_service(struct ath12k_base *ab)
+{
+ qmi_handle_release(&ab->qmi.handle);
+ cancel_work_sync(&ab->qmi.event_work);
+ destroy_workqueue(ab->qmi.event_wq);
+ ath12k_qmi_m3_free(ab);
+ ath12k_qmi_free_target_mem_chunk(ab);
+}
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
new file mode 100644
index 000000000000..ad87f19903db
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -0,0 +1,569 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_QMI_H
+#define ATH12K_QMI_H
+
+#include <linux/mutex.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define ATH12K_HOST_VERSION_STRING "WIN"
+#define ATH12K_QMI_WLANFW_TIMEOUT_MS 10000
+#define ATH12K_QMI_MAX_BDF_FILE_NAME_SIZE 64
+#define ATH12K_QMI_CALDB_ADDRESS 0x4BA00000
+#define ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
+#define ATH12K_QMI_WLFW_NODE_ID_BASE 0x07
+#define ATH12K_QMI_WLFW_SERVICE_ID_V01 0x45
+#define ATH12K_QMI_WLFW_SERVICE_VERS_V01 0x01
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_WCN7850 0x1
+
+#define ATH12K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9274 0x07
+#define ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
+#define ATH12K_QMI_RESP_LEN_MAX 8192
+#define ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52
+#define ATH12K_QMI_CALDB_SIZE 0x480000
+#define ATH12K_QMI_BDF_EXT_STR_LENGTH 0x20
+#define ATH12K_QMI_FW_MEM_REQ_SEGMENT_CNT 3
+#define ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01 4
+#define ATH12K_QMI_DEVMEM_CMEM_INDEX 0
+
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_FW_READY_IND_V01 0x0038
+
+#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
+#define ATH12K_FIRMWARE_MODE_OFF 4
+#define ATH12K_QMI_TARGET_MEM_MODE_DEFAULT 0
+
+#define ATH12K_BOARD_ID_DEFAULT 0xFF
+
+struct ath12k_base;
+
+enum ath12k_qmi_file_type {
+ ATH12K_QMI_FILE_TYPE_BDF_GOLDEN = 0,
+ ATH12K_QMI_FILE_TYPE_CALDATA = 2,
+ ATH12K_QMI_FILE_TYPE_EEPROM = 3,
+ ATH12K_QMI_MAX_FILE_TYPE = 4,
+};
+
+enum ath12k_qmi_bdf_type {
+ ATH12K_QMI_BDF_TYPE_BIN = 0,
+ ATH12K_QMI_BDF_TYPE_ELF = 1,
+ ATH12K_QMI_BDF_TYPE_REGDB = 4,
+ ATH12K_QMI_BDF_TYPE_CALIBRATION = 5,
+};
+
+enum ath12k_qmi_event_type {
+ ATH12K_QMI_EVENT_SERVER_ARRIVE,
+ ATH12K_QMI_EVENT_SERVER_EXIT,
+ ATH12K_QMI_EVENT_REQUEST_MEM,
+ ATH12K_QMI_EVENT_FW_MEM_READY,
+ ATH12K_QMI_EVENT_FW_READY,
+ ATH12K_QMI_EVENT_REGISTER_DRIVER,
+ ATH12K_QMI_EVENT_UNREGISTER_DRIVER,
+ ATH12K_QMI_EVENT_RECOVERY,
+ ATH12K_QMI_EVENT_FORCE_FW_ASSERT,
+ ATH12K_QMI_EVENT_POWER_UP,
+ ATH12K_QMI_EVENT_POWER_DOWN,
+ ATH12K_QMI_EVENT_MAX,
+};
+
+struct ath12k_qmi_driver_event {
+ struct list_head list;
+ enum ath12k_qmi_event_type type;
+ void *data;
+};
+
+struct ath12k_qmi_ce_cfg {
+ const struct ce_pipe_config *tgt_ce;
+ int tgt_ce_len;
+ const struct service_to_pipe *svc_to_ce_map;
+ int svc_to_ce_map_len;
+ const u8 *shadow_reg;
+ int shadow_reg_len;
+ u32 *shadow_reg_v3;
+ int shadow_reg_v3_len;
+};
+
+struct ath12k_qmi_event_msg {
+ struct list_head list;
+ enum ath12k_qmi_event_type type;
+};
+
+struct target_mem_chunk {
+ u32 size;
+ u32 type;
+ dma_addr_t paddr;
+ union {
+ void __iomem *ioaddr;
+ void *addr;
+ } v;
+};
+
+struct target_info {
+ u32 chip_id;
+ u32 chip_family;
+ u32 board_id;
+ u32 soc_id;
+ u32 fw_version;
+ u32 eeprom_caldata;
+ char fw_build_timestamp[ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+ char fw_build_id[ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+ char bdf_ext[ATH12K_QMI_BDF_EXT_STR_LENGTH];
+};
+
+struct m3_mem_region {
+ u32 size;
+ dma_addr_t paddr;
+ void *vaddr;
+};
+
+struct dev_mem_info {
+ u64 start;
+ u64 size;
+};
+
+struct ath12k_qmi {
+ struct ath12k_base *ab;
+ struct qmi_handle handle;
+ struct sockaddr_qrtr sq;
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for qmi event list */
+ struct ath12k_qmi_ce_cfg ce_cfg;
+ struct target_mem_chunk target_mem[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+ u32 mem_seg_count;
+ u32 target_mem_mode;
+ bool target_mem_delayed;
+ u8 cal_done;
+ struct target_info target;
+ struct m3_mem_region m3_mem;
+ unsigned int service_ins_id;
+ struct dev_mem_info dev_mem[ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01];
+};
+
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 261
+#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+#define QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01 64
+#define QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01 3
+
+struct qmi_wlanfw_host_ddr_range {
+ u64 start;
+ u64 size;
+};
+
+enum ath12k_qmi_target_mem {
+ HOST_DDR_REGION_TYPE = 0x1,
+ BDF_MEM_REGION_TYPE = 0x2,
+ M3_DUMP_REGION_TYPE = 0x3,
+ CALDB_MEM_REGION_TYPE = 0x4,
+ PAGEABLE_MEM_REGION_TYPE = 0x9,
+};
+
+enum qmi_wlanfw_host_build_type {
+ WLANFW_HOST_BUILD_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLANFW_HOST_BUILD_TYPE_UNSPECIFIED_V01 = 0,
+ QMI_WLANFW_HOST_BUILD_TYPE_PRIMARY_V01 = 1,
+ QMI_WLANFW_HOST_BUILD_TYPE_SECONDARY_V01 = 2,
+ WLANFW_HOST_BUILD_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+#define QMI_WLFW_MAX_NUM_MLO_CHIPS_V01 3
+#define QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01 2
+
+struct wlfw_host_mlo_chip_info_s_v01 {
+ u8 chip_id;
+ u8 num_local_links;
+ u8 hw_link_id[QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01];
+ u8 valid_mlo_link_id[QMI_WLFW_MAX_NUM_MLO_LINKS_PER_CHIP_V01];
+};
+
+enum ath12k_qmi_cnss_feature {
+ CNSS_FEATURE_MIN_ENUM_VAL_V01 = INT_MIN,
+ CNSS_QDSS_CFG_MISS_V01 = 3,
+ CNSS_MAX_FEATURE_V01 = 64,
+ CNSS_FEATURE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_host_cap_req_msg_v01 {
+ u8 num_clients_valid;
+ u32 num_clients;
+ u8 wake_msi_valid;
+ u32 wake_msi;
+ u8 gpios_valid;
+ u32 gpios_len;
+ u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+ u8 nm_modem_valid;
+ u8 nm_modem;
+ u8 bdf_support_valid;
+ u8 bdf_support;
+ u8 bdf_cache_support_valid;
+ u8 bdf_cache_support;
+ u8 m3_support_valid;
+ u8 m3_support;
+ u8 m3_cache_support_valid;
+ u8 m3_cache_support;
+ u8 cal_filesys_support_valid;
+ u8 cal_filesys_support;
+ u8 cal_cache_support_valid;
+ u8 cal_cache_support;
+ u8 cal_done_valid;
+ u8 cal_done;
+ u8 mem_bucket_valid;
+ u32 mem_bucket;
+ u8 mem_cfg_mode_valid;
+ u8 mem_cfg_mode;
+ u8 cal_duration_valid;
+ u16 cal_duraiton;
+ u8 platform_name_valid;
+ char platform_name[QMI_WLANFW_MAX_PLATFORM_NAME_LEN_V01 + 1];
+ u8 ddr_range_valid;
+ struct qmi_wlanfw_host_ddr_range ddr_range[QMI_WLANFW_MAX_HOST_DDR_RANGE_SIZE_V01];
+ u8 host_build_type_valid;
+ enum qmi_wlanfw_host_build_type host_build_type;
+ u8 mlo_capable_valid;
+ u8 mlo_capable;
+ u8 mlo_chip_id_valid;
+ u16 mlo_chip_id;
+ u8 mlo_group_id_valid;
+ u8 mlo_group_id;
+ u8 max_mlo_peer_valid;
+ u16 max_mlo_peer;
+ u8 mlo_num_chips_valid;
+ u8 mlo_num_chips;
+ u8 mlo_chip_info_valid;
+ struct wlfw_host_mlo_chip_info_s_v01 mlo_chip_info[QMI_WLFW_MAX_NUM_MLO_CHIPS_V01];
+ u8 feature_list_valid;
+ u64 feature_list;
+
+};
+
+struct qmi_wlanfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
+#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_IND_REGISTER_RESP_V01 0x0020
+#define QMI_WLANFW_CLIENT_ID 0x4b4e454c
+
+struct qmi_wlanfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 fw_mem_ready_enable_valid;
+ u8 fw_mem_ready_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+ u8 cal_done_enable_valid;
+ u8 cal_done_enable;
+};
+
+struct qmi_wlanfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1824
+#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 888
+#define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLANFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLANFW_MAX_NUM_MEM_CFG_V01 2
+#define QMI_WLANFW_MAX_STR_LEN_V01 16
+
+struct qmi_wlanfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+enum qmi_wlanfw_mem_type_enum_v01 {
+ WLANFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLANFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLANFW_MEM_TYPE_DDR_V01 = 1,
+ QMI_WLANFW_MEM_BDF_V01 = 2,
+ QMI_WLANFW_MEM_M3_V01 = 3,
+ QMI_WLANFW_MEM_CAL_V01 = 4,
+ QMI_WLANFW_MEM_DPD_V01 = 5,
+ WLANFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_mem_seg_s_v01 {
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct qmi_wlanfw_mem_cfg_s_v01 mem_cfg[QMI_WLANFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct qmi_wlanfw_request_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_s_v01 mem_seg[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u8 restore;
+};
+
+struct qmi_wlanfw_respond_mem_req_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_resp_s_v01 mem_seg[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207
+#define QMI_WLANFW_CAP_REQ_V01 0x0024
+#define QMI_WLANFW_CAP_RESP_V01 0x0024
+
+enum qmi_wlanfw_pipedir_enum_v01 {
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+};
+
+struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+};
+
+struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct qmi_wlanfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01 {
+ u32 addr;
+};
+
+struct qmi_wlanfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct qmi_wlanfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct qmi_wlanfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct qmi_wlanfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct qmi_wlanfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[ATH12K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+struct qmi_wlanfw_dev_mem_info_s_v01 {
+ u64 start;
+ u64 size;
+};
+
+enum qmi_wlanfw_cal_temp_id_enum_v01 {
+ QMI_WLANFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLANFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLANFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLANFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLANFW_CAL_TEMP_IDX_4_V01 = 4,
+ QMI_WLANFW_CAL_TEMP_ID_MAX_V01 = 0xFF,
+};
+
+enum qmi_wlanfw_rd_card_chain_cap_v01 {
+ WLFW_RD_CARD_CHAIN_CAP_MIN_VAL_V01 = INT_MIN,
+ WLFW_RD_CARD_CHAIN_CAP_UNSPECIFIED_V01 = 0,
+ WLFW_RD_CARD_CHAIN_CAP_1x1_V01 = 1,
+ WLFW_RD_CARD_CHAIN_CAP_2x2_V01 = 2,
+ WLFW_RD_CARD_CHAIN_CAP_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct qmi_wlanfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct qmi_wlanfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct qmi_wlanfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct qmi_wlanfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[ATH12K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+ u8 voltage_mv_valid;
+ u32 voltage_mv;
+ u8 time_freq_hz_valid;
+ u32 time_freq_hz;
+ u8 otp_version_valid;
+ u32 otp_version;
+ u8 eeprom_caldata_read_timeout_valid;
+ u32 eeprom_caldata_read_timeout;
+ u8 fw_caps_valid;
+ u64 fw_caps;
+ u8 rd_card_chain_cap_valid;
+ enum qmi_wlanfw_rd_card_chain_cap_v01 rd_card_chain_cap;
+ u8 dev_mem_info_valid;
+ struct qmi_wlanfw_dev_mem_info_s_v01 dev_mem[ATH12K_QMI_WLFW_MAX_DEV_MEM_NUM_V01];
+};
+
+struct qmi_wlanfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_V01 0x0025
+/* TODO: Need to check with MCL and FW team that data can be pointer and
+ * can be last element in structure
+ */
+struct qmi_wlanfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum qmi_wlanfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+
+};
+
+struct qmi_wlanfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+#define QMI_WLANFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+#define QMI_WLANFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLANFW_M3_INFO_REQ_V01 0x003C
+
+struct qmi_wlanfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+struct qmi_wlanfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN 11
+#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803
+#define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLANFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLANFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLANFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLANFW_MAX_STR_LEN_V01 16
+#define QMI_WLANFW_MAX_NUM_CE_V01 12
+#define QMI_WLANFW_MAX_NUM_SVC_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01 60
+
+struct qmi_wlanfw_wlan_mode_req_msg_v01 {
+ u32 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+struct qmi_wlanfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLANFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01
+ tgt_cfg[QMI_WLANFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01
+ svc_cfg[QMI_WLANFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct qmi_wlanfw_shadow_reg_cfg_s_v01
+ shadow_reg[QMI_WLANFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v3_valid;
+ u32 shadow_reg_v3_len;
+ struct qmi_wlanfw_shadow_reg_v3_cfg_s_v01
+ shadow_reg_v3[QMI_WLANFW_MAX_NUM_SHADOW_REG_V3_V01];
+};
+
+struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+int ath12k_qmi_firmware_start(struct ath12k_base *ab,
+ u32 mode);
+void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
+void ath12k_qmi_event_work(struct work_struct *work);
+void ath12k_qmi_msg_recv_work(struct work_struct *work);
+void ath12k_qmi_deinit_service(struct ath12k_base *ab);
+int ath12k_qmi_init_service(struct ath12k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
new file mode 100644
index 000000000000..6ede91ebc8e1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -0,0 +1,732 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/rtnetlink.h>
+#include "core.h"
+#include "debug.h"
+
+/* World regdom to be used in case default regd from fw is unavailable */
+#define ATH12K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
+#define ATH12K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+#define ATH12K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+
+#define ETSI_WEATHER_RADAR_BAND_LOW 5590
+#define ETSI_WEATHER_RADAR_BAND_HIGH 5650
+#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000
+
+static const struct ieee80211_regdomain ath12k_world_regd = {
+ .n_reg_rules = 3,
+ .alpha2 = "00",
+ .reg_rules = {
+ ATH12K_2GHZ_CH01_11,
+ ATH12K_5GHZ_5150_5350,
+ ATH12K_5GHZ_5725_5850,
+ }
+};
+
+static bool ath12k_regdom_changes(struct ath12k *ar, char *alpha2)
+{
+ const struct ieee80211_regdomain *regd;
+
+ regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
+ /* This can happen during wiphy registration where the previous
+ * user request is received before we update the regd received
+ * from firmware.
+ */
+ if (!regd)
+ return true;
+
+ return memcmp(regd->alpha2, alpha2, 2) != 0;
+}
+
+static void
+ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath12k_wmi_init_country_arg arg;
+ struct ath12k *ar = hw->priv;
+ int ret;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "Regulatory Notification received for %s\n", wiphy_name(wiphy));
+
+ /* Currently supporting only General User Hints. Cell base user
+ * hints to be handled later.
+ * Hints from other sources like Core, Beacons are not expected for
+ * self managed wiphy's
+ */
+ if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
+ ath12k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
+ return;
+ }
+
+ if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG,
+ "Country Setting is not allowed\n");
+ return;
+ }
+
+ if (!ath12k_regdom_changes(ar, request->alpha2)) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Country is already set\n");
+ return;
+ }
+
+ /* Set the country code to the firmware and wait for
+ * the WMI_REG_CHAN_LIST_CC EVENT for updating the
+ * reg info
+ */
+ arg.flags = ALPHA_IS_SET;
+ memcpy(&arg.cc_info.alpha2, request->alpha2, 2);
+ arg.cc_info.alpha2[2] = 0;
+
+ ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+}
+
+int ath12k_reg_update_chan_list(struct ath12k *ar)
+{
+ struct ieee80211_supported_band **bands;
+ struct ath12k_wmi_scan_chan_list_arg *arg;
+ struct ieee80211_channel *channel;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ath12k_wmi_channel_arg *ch;
+ enum nl80211_band band;
+ int num_channels = 0;
+ int i, ret;
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ if (bands[band]->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
+
+ num_channels++;
+ }
+ }
+
+ if (WARN_ON(!num_channels))
+ return -EINVAL;
+
+ arg = kzalloc(struct_size(arg, channel, num_channels), GFP_KERNEL);
+
+ if (!arg)
+ return -ENOMEM;
+
+ arg->pdev_id = ar->pdev->pdev_id;
+ arg->nallchans = num_channels;
+
+ ch = arg->channel;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ channel = &bands[band]->channels[i];
+
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ /* TODO: Set to true/false based on some condition? */
+ ch->allow_ht = true;
+ ch->allow_vht = true;
+ ch->allow_he = true;
+
+ ch->dfs_set =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+ ch->is_chan_passive = !!(channel->flags &
+ IEEE80211_CHAN_NO_IR);
+ ch->is_chan_passive |= ch->dfs_set;
+ ch->mhz = channel->center_freq;
+ ch->cfreq1 = channel->center_freq;
+ ch->minpower = 0;
+ ch->maxpower = channel->max_power * 2;
+ ch->maxregpower = channel->max_reg_power * 2;
+ ch->antennamax = channel->max_antenna_gain * 2;
+
+ /* TODO: Use appropriate phymodes */
+ if (channel->band == NL80211_BAND_2GHZ)
+ ch->phy_mode = MODE_11G;
+ else
+ ch->phy_mode = MODE_11A;
+
+ if (channel->band == NL80211_BAND_6GHZ &&
+ cfg80211_channel_is_psc(channel))
+ ch->psc_channel = true;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ i, arg->nallchans,
+ ch->mhz, ch->maxpower, ch->maxregpower,
+ ch->antennamax, ch->phy_mode);
+
+ ch++;
+ /* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
+ * set_agile, reg_class_idx
+ */
+ }
+ }
+
+ ret = ath12k_wmi_send_scan_chan_list_cmd(ar, arg);
+ kfree(arg);
+
+ return ret;
+}
+
+static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
+ struct ieee80211_regdomain *regd_copy)
+{
+ u8 i;
+
+ /* The caller should have checked error conditions */
+ memcpy(regd_copy, regd_orig, sizeof(*regd_orig));
+
+ for (i = 0; i < regd_orig->n_reg_rules; i++)
+ memcpy(&regd_copy->reg_rules[i], &regd_orig->reg_rules[i],
+ sizeof(struct ieee80211_reg_rule));
+}
+
+int ath12k_regd_update(struct ath12k *ar, bool init)
+{
+ struct ieee80211_regdomain *regd, *regd_copy = NULL;
+ int ret, regd_len, pdev_id;
+ struct ath12k_base *ab;
+
+ ab = ar->ab;
+ pdev_id = ar->pdev_idx;
+
+ spin_lock_bh(&ab->base_lock);
+
+ if (init) {
+ /* Apply the regd received during init through
+ * WMI_REG_CHAN_LIST_CC event. In case of failure to
+ * receive the regd, initialize with a default world
+ * regulatory.
+ */
+ if (ab->default_regd[pdev_id]) {
+ regd = ab->default_regd[pdev_id];
+ } else {
+ ath12k_warn(ab,
+ "failed to receive default regd during init\n");
+ regd = (struct ieee80211_regdomain *)&ath12k_world_regd;
+ }
+ } else {
+ regd = ab->new_regd[pdev_id];
+ }
+
+ if (!regd) {
+ ret = -EINVAL;
+ spin_unlock_bh(&ab->base_lock);
+ goto err;
+ }
+
+ regd_len = sizeof(*regd) + (regd->n_reg_rules *
+ sizeof(struct ieee80211_reg_rule));
+
+ regd_copy = kzalloc(regd_len, GFP_ATOMIC);
+ if (regd_copy)
+ ath12k_copy_regd(regd, regd_copy);
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!regd_copy) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ rtnl_lock();
+ wiphy_lock(ar->hw->wiphy);
+ ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
+ wiphy_unlock(ar->hw->wiphy);
+ rtnl_unlock();
+
+ kfree(regd_copy);
+
+ if (ret)
+ goto err;
+
+ if (ar->state == ATH12K_STATE_ON) {
+ ret = ath12k_reg_update_chan_list(ar);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ ath12k_warn(ab, "failed to perform regd update : %d\n", ret);
+ return ret;
+}
+
+static enum nl80211_dfs_regions
+ath12k_map_fw_dfs_region(enum ath12k_dfs_region dfs_region)
+{
+ switch (dfs_region) {
+ case ATH12K_DFS_REG_FCC:
+ case ATH12K_DFS_REG_CN:
+ return NL80211_DFS_FCC;
+ case ATH12K_DFS_REG_ETSI:
+ case ATH12K_DFS_REG_KR:
+ return NL80211_DFS_ETSI;
+ case ATH12K_DFS_REG_MKK:
+ case ATH12K_DFS_REG_MKK_N:
+ return NL80211_DFS_JP;
+ default:
+ return NL80211_DFS_UNSET;
+ }
+}
+
+static u32 ath12k_map_fw_reg_flags(u16 reg_flags)
+{
+ u32 flags = 0;
+
+ if (reg_flags & REGULATORY_CHAN_NO_IR)
+ flags = NL80211_RRF_NO_IR;
+
+ if (reg_flags & REGULATORY_CHAN_RADAR)
+ flags |= NL80211_RRF_DFS;
+
+ if (reg_flags & REGULATORY_CHAN_NO_OFDM)
+ flags |= NL80211_RRF_NO_OFDM;
+
+ if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY)
+ flags |= NL80211_RRF_NO_OUTDOOR;
+
+ if (reg_flags & REGULATORY_CHAN_NO_HT40)
+ flags |= NL80211_RRF_NO_HT40;
+
+ if (reg_flags & REGULATORY_CHAN_NO_80MHZ)
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (reg_flags & REGULATORY_CHAN_NO_160MHZ)
+ flags |= NL80211_RRF_NO_160MHZ;
+
+ return flags;
+}
+
+static bool
+ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ if ((start_freq1 >= start_freq2 &&
+ start_freq1 < end_freq2) ||
+ (start_freq2 > start_freq1 &&
+ start_freq2 < end_freq1))
+ return true;
+
+ /* TODO: Should we restrict intersection feasibility
+ * based on min bandwidth of the intersected region also,
+ * say the intersected rule should have a min bandwidth
+ * of 20MHz?
+ */
+
+ return false;
+}
+
+static void ath12k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2,
+ struct ieee80211_reg_rule *new_rule)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+ u32 freq_diff, max_bw;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
+ start_freq2);
+ new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
+
+ freq_diff = new_rule->freq_range.end_freq_khz -
+ new_rule->freq_range.start_freq_khz;
+ max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
+ rule2->freq_range.max_bandwidth_khz);
+ new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
+
+ new_rule->power_rule.max_antenna_gain =
+ min_t(u32, rule1->power_rule.max_antenna_gain,
+ rule2->power_rule.max_antenna_gain);
+
+ new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
+ rule2->power_rule.max_eirp);
+
+ /* Use the flags of both the rules */
+ new_rule->flags = rule1->flags | rule2->flags;
+
+ /* To be safe, lts use the max cac timeout of both rules */
+ new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
+ rule2->dfs_cac_ms);
+}
+
+static struct ieee80211_regdomain *
+ath12k_regd_intersect(struct ieee80211_regdomain *default_regd,
+ struct ieee80211_regdomain *curr_regd)
+{
+ u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
+ struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
+ struct ieee80211_regdomain *new_regd = NULL;
+ u8 i, j, k;
+
+ num_old_regd_rules = default_regd->n_reg_rules;
+ num_curr_regd_rules = curr_regd->n_reg_rules;
+ num_new_regd_rules = 0;
+
+ /* Find the number of intersecting rules to allocate new regd memory */
+ for (i = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath12k_reg_can_intersect(old_rule, curr_rule))
+ num_new_regd_rules++;
+ }
+ }
+
+ if (!num_new_regd_rules)
+ return NULL;
+
+ new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
+ sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+
+ if (!new_regd)
+ return NULL;
+
+ /* We set the new country and dfs region directly and only trim
+ * the freq, power, antenna gain by intersecting with the
+ * default regdomain. Also MAX of the dfs cac timeout is selected.
+ */
+ new_regd->n_reg_rules = num_new_regd_rules;
+ memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
+ new_regd->dfs_region = curr_regd->dfs_region;
+ new_rule = new_regd->reg_rules;
+
+ for (i = 0, k = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath12k_reg_can_intersect(old_rule, curr_rule))
+ ath12k_reg_intersect_rules(old_rule, curr_rule,
+ (new_rule + k++));
+ }
+ }
+ return new_regd;
+}
+
+static const char *
+ath12k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_FCC:
+ return "FCC";
+ case NL80211_DFS_ETSI:
+ return "ETSI";
+ case NL80211_DFS_JP:
+ return "JP";
+ default:
+ return "UNSET";
+ }
+}
+
+static u16
+ath12k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
+{
+ u16 bw;
+
+ bw = end_freq - start_freq;
+ bw = min_t(u16, bw, max_bw);
+
+ if (bw >= 80 && bw < 160)
+ bw = 80;
+ else if (bw >= 40 && bw < 80)
+ bw = 40;
+ else if (bw < 40)
+ bw = 20;
+
+ return bw;
+}
+
+static void
+ath12k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
+ u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
+ u32 reg_flags)
+{
+ reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
+ reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
+ reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
+ reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
+ reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->flags = reg_flags;
+}
+
+static void
+ath12k_reg_update_weather_radar_band(struct ath12k_base *ab,
+ struct ieee80211_regdomain *regd,
+ struct ath12k_reg_rule *reg_rule,
+ u8 *rule_idx, u32 flags, u16 max_bw)
+{
+ u32 end_freq;
+ u16 bw;
+ u8 i;
+
+ i = *rule_idx;
+
+ bw = ath12k_reg_adjust_bw(reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
+
+ ath12k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath12k_dbg(ab, ATH12K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH)
+ end_freq = ETSI_WEATHER_RADAR_BAND_HIGH;
+ else
+ end_freq = reg_rule->end_freq;
+
+ bw = ath12k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
+ max_bw);
+
+ i++;
+
+ ath12k_reg_update_rule(regd->reg_rules + i,
+ ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
+
+ ath12k_dbg(ab, ATH12K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ if (end_freq == reg_rule->end_freq) {
+ regd->n_reg_rules--;
+ *rule_idx = i;
+ return;
+ }
+
+ bw = ath12k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, max_bw);
+
+ i++;
+
+ ath12k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath12k_dbg(ab, ATH12K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq,
+ bw, reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms,
+ flags);
+
+ *rule_idx = i;
+}
+
+struct ieee80211_regdomain *
+ath12k_reg_build_regd(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info, bool intersect)
+{
+ struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
+ struct ath12k_reg_rule *reg_rule;
+ u8 i = 0, j = 0, k = 0;
+ u8 num_rules;
+ u16 max_bw;
+ u32 flags;
+ char alpha2[3];
+
+ num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
+
+ /* FIXME: Currently taking reg rules for 6G only from Indoor AP mode list.
+ * This can be updated to choose the combination dynamically based on AP
+ * type and client type, after complete 6G regulatory support is added.
+ */
+ if (reg_info->is_ext_reg_event)
+ num_rules += reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP];
+
+ if (!num_rules)
+ goto ret;
+
+ /* Add max additional rules to accommodate weather radar band */
+ if (reg_info->dfs_region == ATH12K_DFS_REG_ETSI)
+ num_rules += 2;
+
+ tmp_regd = kzalloc(sizeof(*tmp_regd) +
+ (num_rules * sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+ if (!tmp_regd)
+ goto ret;
+
+ memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ alpha2[2] = '\0';
+ tmp_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
+
+ ath12k_dbg(ab, ATH12K_DBG_REG,
+ "\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
+ alpha2, ath12k_reg_get_regdom_str(tmp_regd->dfs_region),
+ reg_info->dfs_region, num_rules);
+ /* Update reg_rules[] below. Firmware is expected to
+ * send these rules in order(2G rules first and then 5G)
+ */
+ for (; i < num_rules; i++) {
+ if (reg_info->num_2g_reg_rules &&
+ (i < reg_info->num_2g_reg_rules)) {
+ reg_rule = reg_info->reg_rules_2g_ptr + i;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_2g);
+ flags = 0;
+ } else if (reg_info->num_5g_reg_rules &&
+ (j < reg_info->num_5g_reg_rules)) {
+ reg_rule = reg_info->reg_rules_5g_ptr + j++;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_5g);
+
+ /* FW doesn't pass NL80211_RRF_AUTO_BW flag for
+ * BW Auto correction, we can enable this by default
+ * for all 5G rules here. The regulatory core performs
+ * BW correction if required and applies flags as
+ * per other BW rule flags we pass from here
+ */
+ flags = NL80211_RRF_AUTO_BW;
+ } else if (reg_info->is_ext_reg_event &&
+ reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] &&
+ (k < reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP])) {
+ reg_rule = reg_info->reg_rules_6g_ap_ptr[WMI_REG_INDOOR_AP] + k++;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP]);
+ flags = NL80211_RRF_AUTO_BW;
+ } else {
+ break;
+ }
+
+ flags |= ath12k_map_fw_reg_flags(reg_rule->flags);
+
+ ath12k_reg_update_rule(tmp_regd->reg_rules + i,
+ reg_rule->start_freq,
+ reg_rule->end_freq, max_bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ /* Update dfs cac timeout if the dfs domain is ETSI and the
+ * new rule covers weather radar band.
+ * Default value of '0' corresponds to 60s timeout, so no
+ * need to update that for other rules.
+ */
+ if (flags & NL80211_RRF_DFS &&
+ reg_info->dfs_region == ATH12K_DFS_REG_ETSI &&
+ (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
+ reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
+ ath12k_reg_update_weather_radar_band(ab, tmp_regd,
+ reg_rule, &i,
+ flags, max_bw);
+ continue;
+ }
+
+ if (reg_info->is_ext_reg_event) {
+ ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
+ i + 1, reg_rule->start_freq, reg_rule->end_freq,
+ max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+ tmp_regd->reg_rules[i].dfs_cac_ms,
+ flags, reg_rule->psd_flag, reg_rule->psd_eirp);
+ } else {
+ ath12k_dbg(ab, ATH12K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, reg_rule->end_freq,
+ max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+ tmp_regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
+ }
+
+ tmp_regd->n_reg_rules = i;
+
+ if (intersect) {
+ default_regd = ab->default_regd[reg_info->phy_id];
+
+ /* Get a new regd by intersecting the received regd with
+ * our default regd.
+ */
+ new_regd = ath12k_regd_intersect(default_regd, tmp_regd);
+ kfree(tmp_regd);
+ if (!new_regd) {
+ ath12k_warn(ab, "Unable to create intersected regdomain\n");
+ goto ret;
+ }
+ } else {
+ new_regd = tmp_regd;
+ }
+
+ret:
+ return new_regd;
+}
+
+void ath12k_regd_update_work(struct work_struct *work)
+{
+ struct ath12k *ar = container_of(work, struct ath12k,
+ regd_update_work);
+ int ret;
+
+ ret = ath12k_regd_update(ar, false);
+ if (ret) {
+ /* Firmware has already moved to the new regd. We need
+ * to maintain channel consistency across FW, Host driver
+ * and userspace. Hence as a fallback mechanism we can set
+ * the prev or default country code to the firmware.
+ */
+ /* TODO: Implement Fallback Mechanism */
+ }
+}
+
+void ath12k_reg_init(struct ath12k *ar)
+{
+ ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ ar->hw->wiphy->reg_notifier = ath12k_reg_notifier;
+}
+
+void ath12k_reg_free(struct ath12k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params->max_radios; i++) {
+ kfree(ab->default_regd[i]);
+ kfree(ab->new_regd[i]);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath12k/reg.h b/drivers/net/wireless/ath/ath12k/reg.h
new file mode 100644
index 000000000000..56d009a47234
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/reg.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_REG_H
+#define ATH12K_REG_H
+
+#include <linux/kernel.h>
+#include <net/regulatory.h>
+
+struct ath12k_base;
+struct ath12k;
+
+/* DFS regdomains supported by Firmware */
+enum ath12k_dfs_region {
+ ATH12K_DFS_REG_UNSET,
+ ATH12K_DFS_REG_FCC,
+ ATH12K_DFS_REG_ETSI,
+ ATH12K_DFS_REG_MKK,
+ ATH12K_DFS_REG_CN,
+ ATH12K_DFS_REG_KR,
+ ATH12K_DFS_REG_MKK_N,
+ ATH12K_DFS_REG_UNDEF,
+};
+
+enum ath12k_reg_cc_code {
+ REG_SET_CC_STATUS_PASS = 0,
+ REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ REG_INIT_ALPHA2_NOT_FOUND = 2,
+ REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ REG_SET_CC_STATUS_NO_MEMORY = 4,
+ REG_SET_CC_STATUS_FAIL = 5,
+};
+
+struct ath12k_reg_rule {
+ u16 start_freq;
+ u16 end_freq;
+ u16 max_bw;
+ u8 reg_power;
+ u8 ant_gain;
+ u16 flags;
+ bool psd_flag;
+ u16 psd_eirp;
+};
+
+struct ath12k_reg_info {
+ enum ath12k_reg_cc_code status_code;
+ u8 num_phy;
+ u8 phy_id;
+ u16 reg_dmn_pair;
+ u16 ctry_code;
+ u8 alpha2[REG_ALPHA2_LEN + 1];
+ u32 dfs_region;
+ u32 phybitmap;
+ bool is_ext_reg_event;
+ u32 min_bw_2g;
+ u32 max_bw_2g;
+ u32 min_bw_5g;
+ u32 max_bw_5g;
+ u32 num_2g_reg_rules;
+ u32 num_5g_reg_rules;
+ struct ath12k_reg_rule *reg_rules_2g_ptr;
+ struct ath12k_reg_rule *reg_rules_5g_ptr;
+ enum wmi_reg_6g_client_type client_type;
+ bool rnr_tpe_usable;
+ bool unspecified_ap_usable;
+ /* TODO: All 6G related info can be stored only for required
+ * combination instead of all types, to optimize memory usage.
+ */
+ u8 domain_code_6g_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u8 domain_code_6g_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 domain_code_6g_super_id;
+ u32 min_bw_6g_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 max_bw_6g_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 min_bw_6g_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 max_bw_6g_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ struct ath12k_reg_rule *reg_rules_6g_ap_ptr[WMI_REG_CURRENT_MAX_AP_TYPE];
+ struct ath12k_reg_rule *reg_rules_6g_client_ptr
+ [WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+};
+
+void ath12k_reg_init(struct ath12k *ar);
+void ath12k_reg_free(struct ath12k_base *ab);
+void ath12k_regd_update_work(struct work_struct *work);
+struct ieee80211_regdomain *ath12k_reg_build_regd(struct ath12k_base *ab,
+ struct ath12k_reg_info *reg_info,
+ bool intersect);
+int ath12k_regd_update(struct ath12k *ar, bool init);
+int ath12k_reg_update_chan_list(struct ath12k *ar);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/rx_desc.h b/drivers/net/wireless/ath/ath12k/rx_desc.h
new file mode 100644
index 000000000000..5feaff6450ad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/rx_desc.h
@@ -0,0 +1,1441 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_RX_DESC_H
+#define ATH12K_RX_DESC_H
+
+enum rx_desc_decap_type {
+ RX_DESC_DECAP_TYPE_RAW,
+ RX_DESC_DECAP_TYPE_NATIVE_WIFI,
+ RX_DESC_DECAP_TYPE_ETHERNET2_DIX,
+ RX_DESC_DECAP_TYPE_8023,
+};
+
+enum rx_desc_decrypt_status_code {
+ RX_DESC_DECRYPT_STATUS_CODE_OK,
+ RX_DESC_DECRYPT_STATUS_CODE_UNPROTECTED_FRAME,
+ RX_DESC_DECRYPT_STATUS_CODE_DATA_ERR,
+ RX_DESC_DECRYPT_STATUS_CODE_KEY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_PEER_ENTRY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_OTHER,
+};
+
+#define RX_MPDU_START_INFO0_REO_DEST_IND GENMASK(4, 0)
+#define RX_MPDU_START_INFO0_LMAC_PEER_ID_MSB GENMASK(6, 5)
+#define RX_MPDU_START_INFO0_FLOW_ID_TOEPLITZ BIT(7)
+#define RX_MPDU_START_INFO0_PKT_SEL_FP_UCAST_DATA BIT(8)
+#define RX_MPDU_START_INFO0_PKT_SEL_FP_MCAST_DATA BIT(9)
+#define RX_MPDU_START_INFO0_PKT_SEL_FP_CTRL_BAR BIT(10)
+#define RX_MPDU_START_INFO0_RXDMA0_SRC_RING_SEL GENMASK(13, 11)
+#define RX_MPDU_START_INFO0_RXDMA0_DST_RING_SEL GENMASK(16, 14)
+#define RX_MPDU_START_INFO0_MCAST_ECHO_DROP_EN BIT(17)
+#define RX_MPDU_START_INFO0_WDS_LEARN_DETECT_EN BIT(18)
+#define RX_MPDU_START_INFO0_INTRA_BSS_CHECK_EN BIT(19)
+#define RX_MPDU_START_INFO0_USE_PPE BIT(20)
+#define RX_MPDU_START_INFO0_PPE_ROUTING_EN BIT(21)
+
+#define RX_MPDU_START_INFO1_REO_QUEUE_DESC_HI GENMASK(7, 0)
+#define RX_MPDU_START_INFO1_RECV_QUEUE_NUM GENMASK(23, 8)
+#define RX_MPDU_START_INFO1_PRE_DELIM_ERR_WARN BIT(24)
+#define RX_MPDU_START_INFO1_FIRST_DELIM_ERR BIT(25)
+
+#define RX_MPDU_START_INFO2_EPD_EN BIT(0)
+#define RX_MPDU_START_INFO2_ALL_FRAME_ENCPD BIT(1)
+#define RX_MPDU_START_INFO2_ENC_TYPE GENMASK(5, 2)
+#define RX_MPDU_START_INFO2_VAR_WEP_KEY_WIDTH GENMASK(7, 6)
+#define RX_MPDU_START_INFO2_MESH_STA GENMASK(9, 8)
+#define RX_MPDU_START_INFO2_BSSID_HIT BIT(10)
+#define RX_MPDU_START_INFO2_BSSID_NUM GENMASK(14, 11)
+#define RX_MPDU_START_INFO2_TID GENMASK(18, 15)
+
+#define RX_MPDU_START_INFO3_RXPCU_MPDU_FLTR GENMASK(1, 0)
+#define RX_MPDU_START_INFO3_SW_FRAME_GRP_ID GENMASK(8, 2)
+#define RX_MPDU_START_INFO3_NDP_FRAME BIT(9)
+#define RX_MPDU_START_INFO3_PHY_ERR BIT(10)
+#define RX_MPDU_START_INFO3_PHY_ERR_MPDU_HDR BIT(11)
+#define RX_MPDU_START_INFO3_PROTO_VER_ERR BIT(12)
+#define RX_MPDU_START_INFO3_AST_LOOKUP_VALID BIT(13)
+#define RX_MPDU_START_INFO3_RANGING BIT(14)
+
+#define RX_MPDU_START_INFO4_MPDU_FCTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO4_MPDU_DUR_VALID BIT(1)
+#define RX_MPDU_START_INFO4_MAC_ADDR1_VALID BIT(2)
+#define RX_MPDU_START_INFO4_MAC_ADDR2_VALID BIT(3)
+#define RX_MPDU_START_INFO4_MAC_ADDR3_VALID BIT(4)
+#define RX_MPDU_START_INFO4_MAC_ADDR4_VALID BIT(5)
+#define RX_MPDU_START_INFO4_MPDU_SEQ_CTRL_VALID BIT(6)
+#define RX_MPDU_START_INFO4_MPDU_QOS_CTRL_VALID BIT(7)
+#define RX_MPDU_START_INFO4_MPDU_HT_CTRL_VALID BIT(8)
+#define RX_MPDU_START_INFO4_ENCRYPT_INFO_VALID BIT(9)
+#define RX_MPDU_START_INFO4_MPDU_FRAG_NUMBER GENMASK(13, 10)
+#define RX_MPDU_START_INFO4_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_START_INFO4_FROM_DS BIT(16)
+#define RX_MPDU_START_INFO4_TO_DS BIT(17)
+#define RX_MPDU_START_INFO4_ENCRYPTED BIT(18)
+#define RX_MPDU_START_INFO4_MPDU_RETRY BIT(19)
+#define RX_MPDU_START_INFO4_MPDU_SEQ_NUM GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO5_KEY_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO5_NEW_PEER_ENTRY BIT(8)
+#define RX_MPDU_START_INFO5_DECRYPT_NEEDED BIT(9)
+#define RX_MPDU_START_INFO5_DECAP_TYPE GENMASK(11, 10)
+#define RX_MPDU_START_INFO5_VLAN_TAG_C_PADDING BIT(12)
+#define RX_MPDU_START_INFO5_VLAN_TAG_S_PADDING BIT(13)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_C BIT(14)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_S BIT(15)
+#define RX_MPDU_START_INFO5_PRE_DELIM_COUNT GENMASK(27, 16)
+#define RX_MPDU_START_INFO5_AMPDU_FLAG BIT(28)
+#define RX_MPDU_START_INFO5_BAR_FRAME BIT(29)
+#define RX_MPDU_START_INFO5_RAW_MPDU BIT(30)
+
+#define RX_MPDU_START_INFO6_MPDU_LEN GENMASK(13, 0)
+#define RX_MPDU_START_INFO6_FIRST_MPDU BIT(14)
+#define RX_MPDU_START_INFO6_MCAST_BCAST BIT(15)
+#define RX_MPDU_START_INFO6_AST_IDX_NOT_FOUND BIT(16)
+#define RX_MPDU_START_INFO6_AST_IDX_TIMEOUT BIT(17)
+#define RX_MPDU_START_INFO6_POWER_MGMT BIT(18)
+#define RX_MPDU_START_INFO6_NON_QOS BIT(19)
+#define RX_MPDU_START_INFO6_NULL_DATA BIT(20)
+#define RX_MPDU_START_INFO6_MGMT_TYPE BIT(21)
+#define RX_MPDU_START_INFO6_CTRL_TYPE BIT(22)
+#define RX_MPDU_START_INFO6_MORE_DATA BIT(23)
+#define RX_MPDU_START_INFO6_EOSP BIT(24)
+#define RX_MPDU_START_INFO6_FRAGMENT BIT(25)
+#define RX_MPDU_START_INFO6_ORDER BIT(26)
+#define RX_MPDU_START_INFO6_UAPSD_TRIGGER BIT(27)
+#define RX_MPDU_START_INFO6_ENCRYPT_REQUIRED BIT(28)
+#define RX_MPDU_START_INFO6_DIRECTED BIT(29)
+#define RX_MPDU_START_INFO6_AMSDU_PRESENT BIT(30)
+
+#define RX_MPDU_START_INFO7_VDEV_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO7_SERVICE_CODE GENMASK(16, 8)
+#define RX_MPDU_START_INFO7_PRIORITY_VALID BIT(17)
+#define RX_MPDU_START_INFO7_SRC_INFO GENMASK(29, 18)
+
+#define RX_MPDU_START_INFO8_AUTH_TO_SEND_WDS BIT(0)
+
+struct rx_mpdu_start_qcn9274 {
+ __le32 info0;
+ __le32 reo_queue_desc_lo;
+ __le32 info1;
+ __le32 pn[4];
+ __le32 info2;
+ __le32 peer_meta_data;
+ __le16 info3;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+ __le32 info7;
+ u8 multi_link_addr1[ETH_ALEN];
+ u8 multi_link_addr2[ETH_ALEN];
+ __le32 info8;
+ __le32 res0;
+ __le32 res1;
+} __packed;
+
+/* rx_mpdu_start
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * lmac_peer_id_msb
+ *
+ * If use_flow_id_toeplitz_clfy is set and lmac_peer_id_'sb
+ * is 2'b00, Rx OLE uses a REO destination indicati'n of {1'b1,
+ * hash[3:0]} using the chosen Toeplitz hash from Common Parser
+ * if flow search fails.
+ * If use_flow_id_toeplitz_clfy is set and lmac_peer_id_msb
+ * 's not 2'b00, Rx OLE uses a REO destination indication of
+ * {lmac_peer_id_msb, hash[2:0]} using the chosen Toeplitz
+ * hash from Common Parser if flow search fails.
+ *
+ * use_flow_id_toeplitz_clfy
+ * Indication to Rx OLE to enable REO destination routing based
+ * on the chosen Toeplitz hash from Common Parser, in case
+ * flow search fails
+ *
+ * pkt_selection_fp_ucast_data
+ * Filter pass Unicast data frame (matching rxpcu_filter_pass
+ * and sw_frame_group_Unicast_data) routing selection
+ *
+ * pkt_selection_fp_mcast_data
+ * Filter pass Multicast data frame (matching rxpcu_filter_pass
+ * and sw_frame_group_Multicast_data) routing selection
+ *
+ * pkt_selection_fp_ctrl_bar
+ * Filter pass BAR frame (matching rxpcu_filter_pass
+ * and sw_frame_group_ctrl_1000) routing selection
+ *
+ * rxdma0_src_ring_selection
+ * Field only valid when for the received frame type the corresponding
+ * pkt_selection_fp_... bit is set
+ *
+ * rxdma0_dst_ring_selection
+ * Field only valid when for the received frame type the corresponding
+ * pkt_selection_fp_... bit is set
+ *
+ * mcast_echo_drop_enable
+ * If set, for multicast packets, multicast echo check (i.e.
+ * SA search with mcast_echo_check = 1) shall be performed
+ * by RXOLE, and any multicast echo packets should be indicated
+ * to RXDMA for release to WBM
+ *
+ * wds_learning_detect_en
+ * If set, WDS learning detection based on SA search and notification
+ * to FW (using RXDMA0 status ring) is enabled and the "timestamp"
+ * field in address search failure cache-only entry should
+ * be used to avoid multiple WDS learning notifications.
+ *
+ * intrabss_check_en
+ * If set, intra-BSS routing detection is enabled
+ *
+ * use_ppe
+ * Indicates to RXDMA to ignore the REO_destination_indication
+ * and use a programmed value corresponding to the REO2PPE
+ * ring
+ * This override to REO2PPE for packets requiring multiple
+ * buffers shall be disabled based on an RXDMA configuration,
+ * as PPE may not support such packets.
+ *
+ * Supported only in full AP chips, not in client/soft
+ * chips
+ *
+ * ppe_routing_enable
+ * Global enable/disable bit for routing to PPE, used to disable
+ * PPE routing even if RXOLE CCE or flow search indicate 'Use_PPE'
+ * This is set by SW for peers which are being handled by a
+ * host SW/accelerator subsystem that also handles packet
+ * uffer management for WiFi-to-PPE routing.
+ *
+ * This is cleared by SW for peers which are being handled
+ * by a different subsystem, completely disabling WiFi-to-PPE
+ * routing for such peers.
+ *
+ * rx_reo_queue_desc_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * rx_reo_queue_desc_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link
+ * descriptor belongs.
+ *
+ * pre_delim_err_warning
+ * Indicates that a delimiter FCS error was found in between the
+ * previous MPDU and this MPDU. Note that this is just a warning,
+ * and does not mean that this MPDU is corrupted in any way. If
+ * it is, there will be other errors indicated such as FCS or
+ * decrypt errors.
+ *
+ * first_delim_err
+ * Indicates that the first delimiter had a FCS failure.
+ *
+ * pn
+ * The PN number.
+ *
+ * epd_en
+ * Field only valid when AST_based_lookup_valid == 1.
+ * In case of ndp or phy_err or AST_based_lookup_valid == 0,
+ * this field will be set to 0
+ * If set to one use EPD instead of LPD
+ * In case of ndp or phy_err, this field will never be set.
+ *
+ * all_frames_shall_be_encrypted
+ * In case of ndp or phy_err or AST_based_lookup_valid == 0,
+ * this field will be set to 0
+ *
+ * When set, all frames (data only ?) shall be encrypted. If
+ * not, RX CRYPTO shall set an error flag.
+ *
+ *
+ * encrypt_type
+ * In case of ndp or phy_err or AST_based_lookup_valid == 0,
+ * this field will be set to 0
+ *
+ * Indicates type of decrypt cipher used (as defined in the
+ * peer entry)
+ *
+ * wep_key_width_for_variable_key
+ *
+ * Field only valid when key_type is set to wep_varied_width.
+ *
+ * mesh_sta
+ *
+ * bssid_hit
+ * When set, the BSSID of the incoming frame matched one of
+ * the 8 BSSID register values
+ * bssid_number
+ * Field only valid when bssid_hit is set.
+ * This number indicates which one out of the 8 BSSID register
+ * values matched the incoming frame
+ *
+ * tid
+ * Field only valid when mpdu_qos_control_valid is set
+ * The TID field in the QoS control field
+ *
+ * peer_meta_data
+ * Meta data that SW has programmed in the Peer table entry
+ * of the transmitting STA.
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * ndp_frame
+ * When set, the received frame was an NDP frame, and thus
+ * there will be no MPDU data.
+ * phy_err
+ * When set, a PHY error was received before MAC received any
+ * data, and thus there will be no MPDU data.
+ *
+ * phy_err_during_mpdu_header
+ * When set, a PHY error was received before MAC received the
+ * complete MPDU header which was needed for proper decoding
+ *
+ * protocol_version_err
+ * Set when RXPCU detected a version error in the Frame control
+ * field
+ *
+ * ast_based_lookup_valid
+ * When set, AST based lookup for this frame has found a valid
+ * result.
+ *
+ * ranging
+ * When set, a ranging NDPA or a ranging NDP was received.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ast_index
+ *
+ * This field indicates the index of the AST entry corresponding
+ * to this MPDU. It is provided by the GSE module instantiated
+ * in RXPCU.
+ * A value of 0xFFFF indicates an invalid AST index, meaning
+ * that No AST entry was found or NO AST search was performed
+ *
+ * sw_peer_id
+ * In case of ndp or phy_err or AST_based_lookup_valid == 0,
+ * this field will be set to 0
+ * This field indicates a unique peer identifier. It is set
+ * equal to field 'sw_peer_id' from the AST entry
+ *
+ * frame_control_valid
+ * When set, the field Mpdu_Frame_control_field has valid information
+ *
+ * frame_duration_valid
+ * When set, the field Mpdu_duration_field has valid information
+ *
+ * mac_addr_ad1..4_valid
+ * When set, the fields mac_addr_adx_..... have valid information
+ *
+ * mpdu_seq_ctrl_valid
+ *
+ * When set, the fields mpdu_sequence_control_field and mpdu_sequence_number
+ * have valid information as well as field
+ * For MPDUs without a sequence control field, this field will
+ * not be set.
+ *
+ * mpdu_qos_ctrl_valid, mpdu_ht_ctrl_valid
+ *
+ * When set, the field mpdu_qos_control_field, mpdu_ht_control has valid
+ * information, For MPDUs without a QoS,HT control field, this field
+ * will not be set.
+ *
+ * frame_encryption_info_valid
+ *
+ * When set, the encryption related info fields, like IV and
+ * PN are valid
+ * For MPDUs that are not encrypted, this will not be set.
+ *
+ * mpdu_fragment_number
+ *
+ * Field only valid when Mpdu_sequence_control_valid is set
+ * AND Fragment_flag is set. The fragment number from the 802.11 header
+ *
+ * more_fragment_flag
+ *
+ * The More Fragment bit setting from the MPDU header of the
+ * received frame
+ *
+ * fr_ds
+ *
+ * Field only valid when Mpdu_frame_control_valid is set
+ * Set if the from DS bit is set in the frame control.
+ *
+ * to_ds
+ *
+ * Field only valid when Mpdu_frame_control_valid is set
+ * Set if the to DS bit is set in the frame control.
+ *
+ * encrypted
+ *
+ * Field only valid when Mpdu_frame_control_valid is set.
+ * Protected bit from the frame control.
+ *
+ * mpdu_retry
+ * Field only valid when Mpdu_frame_control_valid is set.
+ * Retry bit from the frame control. Only valid when first_msdu is set
+ *
+ * mpdu_sequence_number
+ * Field only valid when Mpdu_sequence_control_valid is set.
+ *
+ * The sequence number from the 802.11 header.
+ * key_id
+ * The key ID octet from the IV.
+ * Field only valid when Frame_encryption_info_valid is set
+ *
+ * new_peer_entry
+ * Set if new RX_PEER_ENTRY TLV follows. If clear, RX_PEER_ENTRY
+ * doesn't follow so RX DECRYPTION module either uses old peer
+ * entry or not decrypt.
+ *
+ * decrypt_needed
+ * When RXPCU sets bit 'ast_index_not_found or ast_index_timeout',
+ * RXPCU will also ensure that this bit is NOT set. CRYPTO for that
+ * reason only needs to evaluate this bit and non of the other ones
+ *
+ * decap_type
+ * Used by the OLE during decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * rx_insert_vlan_c_tag_padding
+ * rx_insert_vlan_s_tag_padding
+ * Insert 4 byte of all zeros as VLAN tag or double VLAN tag if
+ * the rx payload does not have VLAN.
+ *
+ * strip_vlan_c_tag_decap
+ * strip_vlan_s_tag_decap
+ * Strip VLAN or double VLAN during decapsulation.
+ *
+ * pre_delim_count
+ * The number of delimiters before this MPDU. Note that this
+ * number is cleared at PPDU start. If this MPDU is the first
+ * received MPDU in the PPDU and this MPDU gets filtered-in,
+ * this field will indicate the number of delimiters located
+ * after the last MPDU in the previous PPDU.
+ *
+ * If this MPDU is located after the first received MPDU in
+ * an PPDU, this field will indicate the number of delimiters
+ * located between the previous MPDU and this MPDU.
+ *
+ * ampdu_flag
+ * Received frame was part of an A-MPDU.
+ *
+ * bar_frame
+ * Received frame is a BAR frame
+ *
+ * raw_mpdu
+ * Set when no 802.11 to nwifi/ethernet hdr conversion is done
+ *
+ * mpdu_length
+ * MPDU length before decapsulation.
+ *
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * ast_index_not_found
+ * Only valid when first_msdu is set. Indicates no AST matching
+ * entries within the max search count.
+ *
+ * ast_index_timeout
+ * Only valid when first_msdu is set. Indicates an unsuccessful
+ * search in the address search table due to timeout.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ *
+ * fragment_flag
+ * Fragment indication
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only
+ * set when first_msdu is set.
+ *
+ * u_apsd_trigger
+ * U-APSD trigger frame
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ * amsdu_present
+ * AMSDU present
+ *
+ * mpdu_frame_control_field
+ * Frame control field in header. Only valid when the field is marked valid.
+ *
+ * mpdu_duration_field
+ * Duration field in header. Only valid when the field is marked valid.
+ *
+ * mac_addr_adx
+ * MAC addresses in the received frame. Only valid when corresponding
+ * address valid bit is set
+ *
+ * mpdu_qos_control_field, mpdu_ht_control_field
+ * QoS/HT control fields from header. Valid only when corresponding fields
+ * are marked valid
+ *
+ * vdev_id
+ * Virtual device associated with this peer
+ * RXOLE uses this to determine intra-BSS routing.
+ *
+ * service_code
+ * Opaque service code between PPE and Wi-Fi
+ * This field gets passed on by REO to PPE in the EDMA descriptor
+ * ('REO_TO_PPE_RING').
+ *
+ * priority_valid
+ * This field gets passed on by REO to PPE in the EDMA descriptor
+ * ('REO_TO_PPE_RING').
+ *
+ * src_info
+ * Source (virtual) device/interface info. associated with
+ * this peer
+ * This field gets passed on by REO to PPE in the EDMA descriptor
+ * ('REO_TO_PPE_RING').
+ *
+ * multi_link_addr_ad1_ad2_valid
+ * If set, Rx OLE shall convert Address1 and Address2 of received
+ * data frames to multi-link addresses during decapsulation to eth/nwifi
+ *
+ * multi_link_addr_ad1,ad2
+ * Multi-link receiver address1,2. Only valid when corresponding
+ * valid bit is set
+ *
+ * authorize_to_send_wds
+ * If not set, RXDMA shall perform error-routing for WDS packets
+ * as the sender is not authorized and might misuse WDS frame
+ * format to inject packets with arbitrary DA/SA.
+ *
+ */
+
+enum rx_msdu_start_pkt_type {
+ RX_MSDU_START_PKT_TYPE_11A,
+ RX_MSDU_START_PKT_TYPE_11B,
+ RX_MSDU_START_PKT_TYPE_11N,
+ RX_MSDU_START_PKT_TYPE_11AC,
+ RX_MSDU_START_PKT_TYPE_11AX,
+};
+
+enum rx_msdu_start_sgi {
+ RX_MSDU_START_SGI_0_8_US,
+ RX_MSDU_START_SGI_0_4_US,
+ RX_MSDU_START_SGI_1_6_US,
+ RX_MSDU_START_SGI_3_2_US,
+};
+
+enum rx_msdu_start_recv_bw {
+ RX_MSDU_START_RECV_BW_20MHZ,
+ RX_MSDU_START_RECV_BW_40MHZ,
+ RX_MSDU_START_RECV_BW_80MHZ,
+ RX_MSDU_START_RECV_BW_160MHZ,
+};
+
+enum rx_msdu_start_reception_type {
+ RX_MSDU_START_RECEPTION_TYPE_SU,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+#define RX_MSDU_END_INFO1_REPORTED_MPDU_LENGTH GENMASK(13, 0)
+
+#define RX_MSDU_END_INFO2_CCE_SUPER_RULE GENMASK(13, 8)
+#define RX_MSDU_END_INFO2_CCND_TRUNCATE BIT(14)
+#define RX_MSDU_END_INFO2_CCND_CCE_DIS BIT(15)
+
+#define RX_MSDU_END_INFO3_DA_OFFSET GENMASK(5, 0)
+#define RX_MSDU_END_INFO3_SA_OFFSET GENMASK(11, 6)
+#define RX_MSDU_END_INFO3_DA_OFFSET_VALID BIT(12)
+#define RX_MSDU_END_INFO3_SA_OFFSET_VALID BIT(13)
+
+#define RX_MSDU_END_INFO4_TCP_FLAG GENMASK(8, 0)
+#define RX_MSDU_END_INFO4_LRO_ELIGIBLE BIT(9)
+
+#define RX_MSDU_END_INFO5_SA_IDX_TIMEOUT BIT(0)
+#define RX_MSDU_END_INFO5_DA_IDX_TIMEOUT BIT(1)
+#define RX_MSDU_END_INFO5_TO_DS BIT(2)
+#define RX_MSDU_END_INFO5_TID GENMASK(6, 3)
+#define RX_MSDU_END_INFO5_SA_IS_VALID BIT(7)
+#define RX_MSDU_END_INFO5_DA_IS_VALID BIT(8)
+#define RX_MSDU_END_INFO5_DA_IS_MCBC BIT(9)
+#define RX_MSDU_END_INFO5_L3_HDR_PADDING GENMASK(11, 10)
+#define RX_MSDU_END_INFO5_FIRST_MSDU BIT(12)
+#define RX_MSDU_END_INFO5_LAST_MSDU BIT(13)
+#define RX_MSDU_END_INFO5_FROM_DS BIT(14)
+#define RX_MSDU_END_INFO5_IP_CHKSUM_FAIL_COPY BIT(15)
+
+#define RX_MSDU_END_INFO6_MSDU_DROP BIT(0)
+#define RX_MSDU_END_INFO6_REO_DEST_IND GENMASK(5, 1)
+#define RX_MSDU_END_INFO6_FLOW_IDX GENMASK(25, 6)
+#define RX_MSDU_END_INFO6_USE_PPE BIT(26)
+#define RX_MSDU_END_INFO6_MESH_STA GENMASK(28, 27)
+#define RX_MSDU_END_INFO6_VLAN_CTAG_STRIPPED BIT(29)
+#define RX_MSDU_END_INFO6_VLAN_STAG_STRIPPED BIT(30)
+#define RX_MSDU_END_INFO6_FRAGMENT_FLAG BIT(31)
+
+#define RX_MSDU_END_INFO7_AGGR_COUNT GENMASK(7, 0)
+#define RX_MSDU_END_INFO7_FLOW_AGGR_CONTN BIT(8)
+#define RX_MSDU_END_INFO7_FISA_TIMEOUT BIT(9)
+#define RX_MSDU_END_INFO7_TCPUDP_CSUM_FAIL_CPY BIT(10)
+#define RX_MSDU_END_INFO7_MSDU_LIMIT_ERROR BIT(11)
+#define RX_MSDU_END_INFO7_FLOW_IDX_TIMEOUT BIT(12)
+#define RX_MSDU_END_INFO7_FLOW_IDX_INVALID BIT(13)
+#define RX_MSDU_END_INFO7_CCE_MATCH BIT(14)
+#define RX_MSDU_END_INFO7_AMSDU_PARSER_ERR BIT(15)
+
+#define RX_MSDU_END_INFO8_KEY_ID GENMASK(7, 0)
+
+#define RX_MSDU_END_INFO9_SERVICE_CODE GENMASK(14, 6)
+#define RX_MSDU_END_INFO9_PRIORITY_VALID BIT(15)
+#define RX_MSDU_END_INFO9_INRA_BSS BIT(16)
+#define RX_MSDU_END_INFO9_DEST_CHIP_ID GENMASK(18, 17)
+#define RX_MSDU_END_INFO9_MCAST_ECHO BIT(19)
+#define RX_MSDU_END_INFO9_WDS_LEARN_EVENT BIT(20)
+#define RX_MSDU_END_INFO9_WDS_ROAM_EVENT BIT(21)
+#define RX_MSDU_END_INFO9_WDS_KEEP_ALIVE_EVENT BIT(22)
+
+#define RX_MSDU_END_INFO10_MSDU_LENGTH GENMASK(13, 0)
+#define RX_MSDU_END_INFO10_STBC BIT(14)
+#define RX_MSDU_END_INFO10_IPSEC_ESP BIT(15)
+#define RX_MSDU_END_INFO10_L3_OFFSET GENMASK(22, 16)
+#define RX_MSDU_END_INFO10_IPSEC_AH BIT(23)
+#define RX_MSDU_END_INFO10_L4_OFFSET GENMASK(31, 24)
+
+#define RX_MSDU_END_INFO11_MSDU_NUMBER GENMASK(7, 0)
+#define RX_MSDU_END_INFO11_DECAP_FORMAT GENMASK(9, 8)
+#define RX_MSDU_END_INFO11_IPV4 BIT(10)
+#define RX_MSDU_END_INFO11_IPV6 BIT(11)
+#define RX_MSDU_END_INFO11_TCP BIT(12)
+#define RX_MSDU_END_INFO11_UDP BIT(13)
+#define RX_MSDU_END_INFO11_IP_FRAG BIT(14)
+#define RX_MSDU_END_INFO11_TCP_ONLY_ACK BIT(15)
+#define RX_MSDU_END_INFO11_DA_IS_BCAST_MCAST BIT(16)
+#define RX_MSDU_END_INFO11_SEL_TOEPLITZ_HASH GENMASK(18, 17)
+#define RX_MSDU_END_INFO11_IP_FIXED_HDR_VALID BIT(19)
+#define RX_MSDU_END_INFO11_IP_EXTN_HDR_VALID BIT(20)
+#define RX_MSDU_END_INFO11_IP_TCP_UDP_HDR_VALID BIT(21)
+#define RX_MSDU_END_INFO11_MESH_CTRL_PRESENT BIT(22)
+#define RX_MSDU_END_INFO11_LDPC BIT(23)
+#define RX_MSDU_END_INFO11_IP4_IP6_NXT_HDR GENMASK(31, 24)
+
+#define RX_MSDU_END_INFO12_USER_RSSI GENMASK(7, 0)
+#define RX_MSDU_END_INFO12_PKT_TYPE GENMASK(11, 8)
+#define RX_MSDU_END_INFO12_SGI GENMASK(13, 12)
+#define RX_MSDU_END_INFO12_RATE_MCS GENMASK(17, 14)
+#define RX_MSDU_END_INFO12_RECV_BW GENMASK(20, 18)
+#define RX_MSDU_END_INFO12_RECEPTION_TYPE GENMASK(23, 21)
+#define RX_MSDU_END_INFO12_MIMO_SS_BITMAP GENMASK(30, 24)
+#define RX_MSDU_END_INFO12_MIMO_DONE_COPY BIT(31)
+
+#define RX_MSDU_END_INFO13_FIRST_MPDU BIT(0)
+#define RX_MSDU_END_INFO13_MCAST_BCAST BIT(2)
+#define RX_MSDU_END_INFO13_AST_IDX_NOT_FOUND BIT(3)
+#define RX_MSDU_END_INFO13_AST_IDX_TIMEDOUT BIT(4)
+#define RX_MSDU_END_INFO13_POWER_MGMT BIT(5)
+#define RX_MSDU_END_INFO13_NON_QOS BIT(6)
+#define RX_MSDU_END_INFO13_NULL_DATA BIT(7)
+#define RX_MSDU_END_INFO13_MGMT_TYPE BIT(8)
+#define RX_MSDU_END_INFO13_CTRL_TYPE BIT(9)
+#define RX_MSDU_END_INFO13_MORE_DATA BIT(10)
+#define RX_MSDU_END_INFO13_EOSP BIT(11)
+#define RX_MSDU_END_INFO13_A_MSDU_ERROR BIT(12)
+#define RX_MSDU_END_INFO13_ORDER BIT(14)
+#define RX_MSDU_END_INFO13_WIFI_PARSER_ERR BIT(15)
+#define RX_MSDU_END_INFO13_OVERFLOW_ERR BIT(16)
+#define RX_MSDU_END_INFO13_MSDU_LEN_ERR BIT(17)
+#define RX_MSDU_END_INFO13_TCP_UDP_CKSUM_FAIL BIT(18)
+#define RX_MSDU_END_INFO13_IP_CKSUM_FAIL BIT(19)
+#define RX_MSDU_END_INFO13_SA_IDX_INVALID BIT(20)
+#define RX_MSDU_END_INFO13_DA_IDX_INVALID BIT(21)
+#define RX_MSDU_END_INFO13_AMSDU_ADDR_MISMATCH BIT(22)
+#define RX_MSDU_END_INFO13_RX_IN_TX_DECRYPT_BYP BIT(23)
+#define RX_MSDU_END_INFO13_ENCRYPT_REQUIRED BIT(24)
+#define RX_MSDU_END_INFO13_DIRECTED BIT(25)
+#define RX_MSDU_END_INFO13_BUFFER_FRAGMENT BIT(26)
+#define RX_MSDU_END_INFO13_MPDU_LEN_ERR BIT(27)
+#define RX_MSDU_END_INFO13_TKIP_MIC_ERR BIT(28)
+#define RX_MSDU_END_INFO13_DECRYPT_ERR BIT(29)
+#define RX_MSDU_END_INFO13_UNDECRYPT_FRAME_ERR BIT(30)
+#define RX_MSDU_END_INFO13_FCS_ERR BIT(31)
+
+#define RX_MSDU_END_INFO14_DECRYPT_STATUS_CODE GENMASK(12, 10)
+#define RX_MSDU_END_INFO14_RX_BITMAP_NOT_UPDED BIT(13)
+#define RX_MSDU_END_INFO14_MSDU_DONE BIT(31)
+
+struct rx_msdu_end_qcn9274 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 info1;
+ __le16 info2;
+ __le16 cumulative_l3_checksum;
+ __le32 rule_indication0;
+ __le32 ipv6_options_crc;
+ __le16 info3;
+ __le16 l3_type;
+ __le32 rule_indication1;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info4;
+ __le16 window_size;
+ __le16 sa_sw_peer_id;
+ __le16 info5;
+ __le16 sa_idx;
+ __le16 da_idx_or_sw_peer_id;
+ __le32 info6;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 tcp_udp_cksum;
+ __le16 info7;
+ __le16 cumulative_ip_length;
+ __le32 info8;
+ __le32 info9;
+ __le32 info10;
+ __le32 info11;
+ __le16 vlan_ctag_ci;
+ __le16 vlan_stag_ci;
+ __le32 peer_meta_data;
+ __le32 info12;
+ __le32 flow_id_toeplitz;
+ __le32 ppdu_start_timestamp_63_32;
+ __le32 phy_meta_data;
+ __le32 ppdu_start_timestamp_31_0;
+ __le32 toeplitz_hash_2_or_4;
+ __le16 res0;
+ __le16 sa_15_0;
+ __le32 sa_47_16;
+ __le32 info13;
+ __le32 info14;
+} __packed;
+
+/* rx_msdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ip_hdr_cksum
+ * This can include the IP header checksum or the pseudo
+ * header checksum used by TCP/UDP checksum.
+ *
+ * reported_mpdu_length
+ * MPDU length before decapsulation. Only valid when first_msdu is
+ * set. This field is taken directly from the length field of the
+ * A-MPDU delimiter or the preamble length field for non-A-MPDU
+ * frames.
+ *
+ * cce_super_rule
+ * Indicates the super filter rule.
+ *
+ * cce_classify_not_done_truncate
+ * Classification failed due to truncated frame.
+ *
+ * cce_classify_not_done_cce_dis
+ * Classification failed due to CCE global disable
+ *
+ * cumulative_l3_checksum
+ * FISA: IP header checksum including the total MSDU length
+ * that is part of this flow aggregated so far, reported if
+ * 'RXOLE_R0_FISA_CTRL. CHKSUM_CUM_IP_LEN_EN' is set
+ *
+ * rule_indication
+ * Bitmap indicating which of rules have matched.
+ *
+ * ipv6_options_crc
+ * 32 bit CRC computed out of IP v6 extension headers.
+ *
+ * da_offset
+ * Offset into MSDU buffer for DA.
+ *
+ * sa_offset
+ * Offset into MSDU buffer for SA.
+ *
+ * da_offset_valid
+ * da_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when DA is compressed.
+ *
+ * sa_offset_valid
+ * sa_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when SA is compressed.
+ *
+ * l3_type
+ * The 16-bit type value indicating the type of L3 later
+ * extracted from LLC/SNAP, set to zero if SNAP is not
+ * available.
+ *
+ * tcp_seq_number
+ * TCP sequence number.
+ *
+ * tcp_ack_number
+ * TCP acknowledge number.
+ *
+ * tcp_flag
+ * TCP flags {NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN}.
+ *
+ * lro_eligible
+ * Computed out of TCP and IP fields to indicate that this
+ * MSDU is eligible for LRO.
+ *
+ * window_size
+ * TCP receive window size.
+ *
+ * sa_sw_peer_id
+ * sw_peer_id from the address search entry corresponding to the
+ * source address of the MSDU.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful MAC source address search due to the
+ * expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful MAC destination address search due to
+ * the expiring of the search timer.
+ *
+ * to_ds
+ * Set if the to DS bit is set in the frame control.
+ *
+ * tid
+ * TID field in the QoS control field
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast of Broadcast address.
+ *
+ * l3_header_padding
+ * Number of bytes padded to make sure that the L3 header will
+ * always start of a Dword boundary.
+ *
+ * first_msdu
+ * Indicates the first MSDU of A-MSDU. If both first_msdu and
+ * last_msdu are set in the MSDU then this is a non-aggregated MSDU
+ * frame: normal MPDU. Interior MSDU in an A-MSDU shall have both
+ * first_mpdu and last_mpdu bits set to 0.
+ *
+ * last_msdu
+ * Indicates the last MSDU of the A-MSDU. MPDU end status is only
+ * valid when last_msdu is set.
+ *
+ * fr_ds
+ * Set if the from DS bit is set in the frame control.
+ *
+ * ip_chksum_fail_copy
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx
+ * The offset in the address table which matches the MAC source
+ * address.
+ *
+ * da_idx_or_sw_peer_id
+ * Based on a register configuration in RXOLE, this field will
+ * contain:
+ * The offset in the address table which matches the MAC destination
+ * address
+ * OR:
+ * sw_peer_id from the address search entry corresponding to
+ * the destination address of the MSDU
+ *
+ * msdu_drop
+ * REO shall drop this MSDU and not forward it to any other ring.
+ *
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * flow_idx
+ * Flow table index.
+ *
+ * use_ppe
+ * Indicates to RXDMA to ignore the REO_destination_indication
+ * and use a programmed value corresponding to the REO2PPE
+ * ring
+ *
+ * mesh_sta
+ * When set, this is a Mesh (11s) STA.
+ *
+ * vlan_ctag_stripped
+ * Set by RXOLE if it stripped 4-bytes of C-VLAN Tag from the
+ * packet
+ *
+ * vlan_stag_stripped
+ * Set by RXOLE if it stripped 4-bytes of S-VLAN Tag from the
+ * packet
+ *
+ * fragment_flag
+ * Indicates that this is an 802.11 fragment frame. This is
+ * set when either the more_frag bit is set in the frame control
+ * or the fragment number is not zero. Only set when first_msdu
+ * is set.
+ *
+ * fse_metadata
+ * FSE related meta data.
+ *
+ * cce_metadata
+ * CCE related meta data.
+ *
+ * tcp_udp_chksum
+ * The value of the computed TCP/UDP checksum. A mode bit
+ * selects whether this checksum is the full checksum or the
+ * partial checksum which does not include the pseudo header.
+ *
+ * aggregation_count
+ * Number of MSDU's aggregated so far
+ *
+ * flow_aggregation_continuation
+ * To indicate that this MSDU can be aggregated with
+ * the previous packet with the same flow id
+ *
+ * fisa_timeout
+ * To indicate that the aggregation has restarted for
+ * this flow due to timeout
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus all the
+ * rest of the MSDUs will not be scattered and will not be
+ * decapsulated but will be DMA'ed in RAW format as a single MSDU.
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * cce_match
+ * Indicates that this status has a corresponding MSDU that
+ * requires FW processing. The OLE will have classification
+ * ring mask registers which will indicate the ring(s) for
+ * packets and descriptors which need FW attention.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * cumulative_ip_length
+ * Total MSDU length that is part of this flow aggregated
+ * so far
+ *
+ * key_id
+ * The key ID octet from the IV. Only valid when first_msdu is set.
+ *
+ * service_code
+ * Opaque service code between PPE and Wi-Fi
+ *
+ * priority_valid
+ * This field gets passed on by REO to PPE in the EDMA descriptor
+ *
+ * intra_bss
+ * This packet needs intra-BSS routing by SW as the 'vdev_id'
+ * for the destination is the same as 'vdev_id' (from 'RX_MPDU_PCU_START')
+ * that this MSDU was got in.
+ *
+ * dest_chip_id
+ * If intra_bss is set, copied by RXOLE from 'ADDR_SEARCH_ENTRY'
+ * to support intra-BSS routing with multi-chip multi-link
+ * operation. This indicates into which chip's TCL the packet should be
+ * queueued
+ *
+ * multicast_echo
+ * If set, this packet is a multicast echo, i.e. the DA is
+ * multicast and Rx OLE SA search with mcast_echo_check = 1
+ * passed. RXDMA should release such packets to WBM.
+ *
+ * wds_learning_event
+ * If set, this packet has an SA search failure with WDS learning
+ * enabled for the peer. RXOLE should route this TLV to the
+ * RXDMA0 status ring to notify FW.
+ *
+ * wds_roaming_event
+ * If set, this packet's SA 'Sw_peer_id' mismatches the 'Sw_peer_id'
+ * of the peer through which the packet was got, indicating
+ * the SA node has roamed. RXOLE should route this TLV to
+ * the RXDMA0 status ring to notify FW.
+ *
+ * wds_keep_alive_event
+ * If set, the AST timestamp for this packet's SA is older
+ * than the current timestamp by more than a threshold programmed
+ * in RXOLE. RXOLE should route this TLV to the RXDMA0 status
+ * ring to notify FW to keep the AST entry for the SA alive.
+ *
+ * msdu_length
+ * MSDU length in bytes after decapsulation.
+ * This field is still valid for MPDU frames without A-MSDU.
+ * It still represents MSDU length after decapsulation
+ *
+ * stbc
+ * When set, use STBC transmission rates.
+ *
+ * ipsec_esp
+ * Set if IPv4/v6 packet is using IPsec ESP.
+ *
+ * l3_offset
+ * Depending upon mode bit, this field either indicates the
+ * L3 offset in bytes from the start of the RX_HEADER or the IP
+ * offset in bytes from the start of the packet after
+ * decapsulation. The latter is only valid if ipv4_proto or
+ * ipv6_proto is set.
+ *
+ * ipsec_ah
+ * Set if IPv4/v6 packet is using IPsec AH
+ *
+ * l4_offset
+ * Depending upon mode bit, this field either indicates the
+ * L4 offset nin bytes from the start of RX_HEADER (only valid
+ * if either ipv4_proto or ipv6_proto is set to 1) or indicates
+ * the offset in bytes to the start of TCP or UDP header from
+ * the start of the IP header after decapsulation (Only valid if
+ * tcp_proto or udp_proto is set). The value 0 indicates that
+ * the offset is longer than 127 bytes.
+ *
+ * msdu_number
+ * Indicates the MSDU number within a MPDU. This value is
+ * reset to zero at the start of each MPDU. If the number of
+ * MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_type
+ * Indicates the format after decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * ipv4_proto
+ * Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ * Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates TCP.
+ *
+ * udp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates UDP.
+ *
+ * ip_frag
+ * Indicates that either the IP More frag bit is set or IP frag
+ * number is non-zero. If set indicates that this is a fragmented
+ * IP packet.
+ *
+ * tcp_only_ack
+ * Set if only the TCP Ack bit is set in the TCP flags and if
+ * the TCP payload is 0.
+ *
+ * da_is_bcast_mcast
+ * The destination address is broadcast or multicast.
+ *
+ * toeplitz_hash
+ * Actual chosen Hash.
+ * 0 - Toeplitz hash of 2-tuple (IP source address, IP
+ * destination address)
+ * 1 - Toeplitz hash of 4-tuple (IP source address,
+ * IP destination address, L4 (TCP/UDP) source port,
+ * L4 (TCP/UDP) destination port)
+ * 2 - Toeplitz of flow_id
+ * 3 - Zero is used
+ *
+ * ip_fixed_header_valid
+ * Fixed 20-byte IPv4 header or 40-byte IPv6 header parsed
+ * fully within first 256 bytes of the packet
+ *
+ * ip_extn_header_valid
+ * IPv6/IPv6 header, including IPv4 options and
+ * recognizable extension headers parsed fully within first 256
+ * bytes of the packet
+ *
+ * tcp_udp_header_valid
+ * Fixed 20-byte TCP (excluding TCP options) or 8-byte UDP
+ * header parsed fully within first 256 bytes of the packet
+ *
+ * mesh_control_present
+ * When set, this MSDU includes the 'Mesh Control' field
+ *
+ * ldpc
+ *
+ * ip4_protocol_ip6_next_header
+ * For IPv4, this is the 8 bit protocol field set). For IPv6 this
+ * is the 8 bit next_header field.
+ *
+ *
+ * vlan_ctag_ci
+ * 2 bytes of C-VLAN Tag Control Information from WHO_L2_LLC
+ *
+ * vlan_stag_ci
+ * 2 bytes of S-VLAN Tag Control Information from WHO_L2_LLC
+ * in case of double VLAN
+ *
+ * peer_meta_data
+ * Meta data that SW has programmed in the Peer table entry
+ * of the transmitting STA.
+ *
+ * user_rssi
+ * RSSI for this user
+ *
+ * pkt_type
+ * Values are defined in enum %RX_MSDU_START_PKT_TYPE_*.
+ *
+ * sgi
+ * Field only valid when pkt type is HT, VHT or HE. Values are
+ * defined in enum %RX_MSDU_START_SGI_*.
+ *
+ * rate_mcs
+ * MCS Rate used.
+ *
+ * receive_bandwidth
+ * Full receive Bandwidth. Values are defined in enum
+ * %RX_MSDU_START_RECV_*.
+ *
+ * reception_type
+ * Indicates what type of reception this is and defined in enum
+ * %RX_MSDU_START_RECEPTION_TYPE_*.
+ *
+ * mimo_ss_bitmap
+ * Field only valid when
+ * Reception_type is RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO or
+ * RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO.
+ *
+ * Bitmap, with each bit indicating if the related spatial
+ * stream is used for this STA
+ *
+ * LSB related to SS 0
+ *
+ * 0 - spatial stream not used for this reception
+ * 1 - spatial stream used for this reception
+ *
+ * msdu_done_copy
+ * If set indicates that the RX packet data, RX header data,
+ * RX PPDU start descriptor, RX MPDU start/end descriptor,
+ * RX MSDU start/end descriptors and RX Attention descriptor
+ * are all valid. This bit is in the last 64-bit of the descriptor
+ * expected to be subscribed in future hardware.
+ *
+ * flow_id_toeplitz
+ * Toeplitz hash of 5-tuple
+ * {IP source address, IP destination address, IP source port, IP
+ * destination port, L4 protocol} in case of non-IPSec.
+ *
+ * In case of IPSec - Toeplitz hash of 4-tuple
+ * {IP source address, IP destination address, SPI, L4 protocol}
+ *
+ * The relevant Toeplitz key registers are provided in RxOLE's
+ * instance of common parser module. These registers are separate
+ * from the Toeplitz keys used by ASE/FSE modules inside RxOLE.
+ * The actual value will be passed on from common parser module
+ * to RxOLE in one of the WHO_* TLVs.
+ *
+ * ppdu_start_timestamp
+ * Timestamp that indicates when the PPDU that contained this MPDU
+ * started on the medium.
+ *
+ * phy_meta_data
+ * SW programmed Meta data provided by the PHY. Can be used for SW
+ * to indicate the channel the device is on.
+ *
+ * toeplitz_hash_2_or_4
+ * Controlled by multiple RxOLE registers for TCP/UDP over
+ * IPv4/IPv6 - Either, Toeplitz hash computed over 2-tuple
+ * IPv4 or IPv6 src/dest addresses is reported; or, Toeplitz
+ * hash computed over 4-tuple IPv4 or IPv6 src/dest addresses
+ * and src/dest ports is reported. The Flow_id_toeplitz hash
+ * can also be reported here. Usually the hash reported here
+ * is the one used for hash-based REO routing (see use_flow_id_toeplitz_clfy
+ * in 'RXPT_CLASSIFY_INFO').
+ *
+ * sa
+ * Source MAC address
+ *
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * ast_index_not_found
+ * Only valid when first_msdu is set. Indicates no AST matching
+ * entries within the max search count.
+ *
+ * ast_index_timeout
+ * Only valid when first_msdu is set. Indicates an unsuccessful
+ * search in the address search table due to timeout.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ * a_msdu_error
+ * Set if number of MSDUs in A-MSDU is above a threshold or if the
+ * size of the MSDU is invalid. This receive buffer will contain
+ * all of the remainder of MSDUs in this MPDU w/o decapsulation.
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only
+ * set when first_msdu is set.
+ *
+ * wifi_parser_error
+ * Indicates that the WiFi frame has one of the following errors
+ *
+ * overflow_err
+ * RXPCU Receive FIFO ran out of space to receive the full MPDU.
+ * Therefore this MPDU is terminated early and is thus corrupted.
+ *
+ * This MPDU will not be ACKed.
+ *
+ * RXPCU might still be able to correctly receive the following
+ * MPDUs in the PPDU if enough fifo space became available in time.
+ *
+ * mpdu_length_err
+ * Set by RXPCU if the expected MPDU length does not correspond
+ * with the actually received number of bytes in the MPDU.
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the source MAC address.
+ *
+ * da_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the destination MAC address.
+ *
+ * amsdu_addr_mismatch
+ * Indicates that an A-MSDU with 'from DS = 0' had an SA mismatching
+ * TA or an A-MDU with 'to DS = 0' had a DA mismatching RA
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is busy
+ * with TX packet processing.
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ * Indicates that at least one of the rx buffers has been
+ * fragmented. If set the FW should look at the rx_frag_info
+ * descriptor described below.
+ *
+ * mpdu_length_err
+ * Indicates that the MPDU was pre-maturely terminated
+ * resulting in a truncated MPDU. Don't trust the MPDU length
+ * field.
+ *
+ * tkip_mic_err
+ * Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ * Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ * Indicates that the MPDU FCS check failed
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values
+ * are defined in enum %RX_DESC_DECRYPT_STATUS_CODE_*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ *
+ * msdu_done
+ * If set indicates that the RX packet data, RX header data, RX
+ * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ * start/end descriptors and RX Attention descriptor are all
+ * valid. This bit must be in the last octet of the
+ * descriptor.
+ *
+ */
+
+/* TODO: Move to compact TLV approach
+ * By default these tlv's are not aligned to 128b boundary
+ * Need to remove unused qwords and make them compact/aligned
+ */
+struct hal_rx_desc_qcn9274 {
+ struct rx_msdu_end_qcn9274 msdu_end;
+ struct rx_mpdu_start_qcn9274 mpdu_start;
+ u8 msdu_payload[];
+} __packed;
+
+#define RX_BE_PADDING0_BYTES 8
+#define RX_BE_PADDING1_BYTES 8
+
+#define HAL_RX_BE_PKT_HDR_TLV_LEN 112
+
+struct rx_pkt_hdr_tlv {
+ __le64 tag;
+ __le64 phy_ppdu_id;
+ u8 rx_pkt_hdr[HAL_RX_BE_PKT_HDR_TLV_LEN];
+};
+
+struct hal_rx_desc_wcn7850 {
+ __le64 msdu_end_tag;
+ struct rx_msdu_end_qcn9274 msdu_end;
+ u8 rx_padding0[RX_BE_PADDING0_BYTES];
+ __le64 mpdu_start_tag;
+ struct rx_mpdu_start_qcn9274 mpdu_start;
+ struct rx_pkt_hdr_tlv pkt_hdr_tlv;
+ u8 msdu_payload[];
+};
+
+struct hal_rx_desc {
+ union {
+ struct hal_rx_desc_qcn9274 qcn9274;
+ struct hal_rx_desc_wcn7850 wcn7850;
+ } u;
+} __packed;
+
+#define MAX_USER_POS 8
+#define MAX_MU_GROUP_ID 64
+#define MAX_MU_GROUP_SHOW 16
+#define MAX_MU_GROUP_LENGTH (6 * MAX_MU_GROUP_SHOW)
+
+#define HAL_RX_RU_ALLOC_TYPE_MAX 6
+#define RU_26 1
+#define RU_52 2
+#define RU_106 4
+#define RU_242 9
+#define RU_484 18
+#define RU_996 37
+
+#endif /* ATH12K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/trace.c b/drivers/net/wireless/ath/ath12k/trace.c
new file mode 100644
index 000000000000..0d0edf4204b7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath12k/trace.h b/drivers/net/wireless/ath/ath12k/trace.h
new file mode 100644
index 000000000000..f72096684b74
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/trace.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+#include "core.h"
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH12K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH12K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath12k
+
+TRACE_EVENT(ath12k_htt_pktlog,
+ TP_PROTO(struct ath12k *ar, const void *buf, u16 buf_len,
+ u32 pktlog_checksum),
+
+ TP_ARGS(ar, buf, buf_len, pktlog_checksum),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, buf_len)
+ __field(u32, pktlog_checksum)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->buf_len = buf_len;
+ __entry->pktlog_checksum = pktlog_checksum;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s size %u pktlog_checksum %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len,
+ __entry->pktlog_checksum
+ )
+);
+
+TRACE_EVENT(ath12k_htt_ppdu_stats,
+ TP_PROTO(struct ath12k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __field(u32, info)
+ __field(u32, sync_tstmp_lo_us)
+ __field(u32, sync_tstmp_hi_us)
+ __field(u32, mlo_offset_lo)
+ __field(u32, mlo_offset_hi)
+ __field(u32, mlo_offset_clks)
+ __field(u32, mlo_comp_clks)
+ __field(u32, mlo_comp_timer)
+ __dynamic_array(u8, ppdu, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ __entry->info = ar->pdev->timestamp.info;
+ __entry->sync_tstmp_lo_us = ar->pdev->timestamp.sync_timestamp_hi_us;
+ __entry->sync_tstmp_hi_us = ar->pdev->timestamp.sync_timestamp_lo_us;
+ __entry->mlo_offset_lo = ar->pdev->timestamp.mlo_offset_lo;
+ __entry->mlo_offset_hi = ar->pdev->timestamp.mlo_offset_hi;
+ __entry->mlo_offset_clks = ar->pdev->timestamp.mlo_offset_clks;
+ __entry->mlo_comp_clks = ar->pdev->timestamp.mlo_comp_clks;
+ __entry->mlo_comp_timer = ar->pdev->timestamp.mlo_comp_timer;
+ memcpy(__get_dynamic_array(ppdu), data, len);
+ ),
+
+ TP_printk(
+ "%s %s ppdu len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath12k_htt_rxdesc,
+ TP_PROTO(struct ath12k *ar, const void *data, size_t type, size_t len),
+
+ TP_ARGS(ar, data, type, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __field(u16, type)
+ __field(u32, info)
+ __field(u32, sync_tstmp_lo_us)
+ __field(u32, sync_tstmp_hi_us)
+ __field(u32, mlo_offset_lo)
+ __field(u32, mlo_offset_hi)
+ __field(u32, mlo_offset_clks)
+ __field(u32, mlo_comp_clks)
+ __field(u32, mlo_comp_timer)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ __entry->type = type;
+ __entry->info = ar->pdev->timestamp.info;
+ __entry->sync_tstmp_lo_us = ar->pdev->timestamp.sync_timestamp_hi_us;
+ __entry->sync_tstmp_hi_us = ar->pdev->timestamp.sync_timestamp_lo_us;
+ __entry->mlo_offset_lo = ar->pdev->timestamp.mlo_offset_lo;
+ __entry->mlo_offset_hi = ar->pdev->timestamp.mlo_offset_hi;
+ __entry->mlo_offset_clks = ar->pdev->timestamp.mlo_offset_clks;
+ __entry->mlo_comp_clks = ar->pdev->timestamp.mlo_comp_clks;
+ __entry->mlo_comp_timer = ar->pdev->timestamp.mlo_comp_timer;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s rxdesc len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
new file mode 100644
index 000000000000..f6df14149531
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -0,0 +1,6600 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include "core.h"
+#include "debug.h"
+#include "mac.h"
+#include "hw.h"
+#include "peer.h"
+
+struct ath12k_wmi_svc_ready_parse {
+ bool wmi_svc_bitmap_done;
+};
+
+struct ath12k_wmi_dma_ring_caps_parse {
+ struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
+ u32 n_dma_ring_caps;
+};
+
+struct ath12k_wmi_service_ext_arg {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct ath12k_wmi_ppe_threshold_arg ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 num_hw_modes;
+ u32 num_phy;
+};
+
+struct ath12k_wmi_svc_rdy_ext_parse {
+ struct ath12k_wmi_service_ext_arg arg;
+ const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps;
+ const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
+ u32 n_hw_mode_caps;
+ u32 tot_phy_id;
+ struct ath12k_wmi_hw_mode_cap_params pref_hw_mode_caps;
+ struct ath12k_wmi_mac_phy_caps_params *mac_phy_caps;
+ u32 n_mac_phy_caps;
+ const struct ath12k_wmi_soc_hal_reg_caps_params *soc_hal_reg_caps;
+ const struct ath12k_wmi_hal_reg_caps_ext_params *ext_hal_reg_caps;
+ u32 n_ext_hal_reg_caps;
+ struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
+ bool hw_mode_done;
+ bool mac_phy_done;
+ bool ext_hal_reg_done;
+ bool mac_phy_chainmask_combo_done;
+ bool mac_phy_chainmask_cap_done;
+ bool oem_dma_ring_cap_done;
+ bool dma_ring_cap_done;
+};
+
+struct ath12k_wmi_svc_rdy_ext2_parse {
+ struct ath12k_wmi_dma_ring_caps_parse dma_caps_parse;
+ bool dma_ring_cap_done;
+};
+
+struct ath12k_wmi_rdy_parse {
+ u32 num_extra_mac_addr;
+};
+
+struct ath12k_wmi_dma_buf_release_arg {
+ struct ath12k_wmi_dma_buf_release_fixed_params fixed;
+ const struct ath12k_wmi_dma_buf_release_entry_params *buf_entry;
+ const struct ath12k_wmi_dma_buf_release_meta_data_params *meta_data;
+ u32 num_buf_entry;
+ u32 num_meta;
+ bool buf_entry_done;
+ bool meta_data_done;
+};
+
+struct ath12k_wmi_tlv_policy {
+ size_t min_len;
+};
+
+struct wmi_tlv_mgmt_rx_parse {
+ const struct ath12k_wmi_mgmt_rx_params *fixed;
+ const u8 *frame_buf;
+ bool frame_buf_done;
+};
+
+static const struct ath12k_wmi_tlv_policy ath12k_wmi_tlv_policies[] = {
+ [WMI_TAG_ARRAY_BYTE] = { .min_len = 0 },
+ [WMI_TAG_ARRAY_UINT32] = { .min_len = 0 },
+ [WMI_TAG_SERVICE_READY_EVENT] = {
+ .min_len = sizeof(struct wmi_service_ready_event) },
+ [WMI_TAG_SERVICE_READY_EXT_EVENT] = {
+ .min_len = sizeof(struct wmi_service_ready_ext_event) },
+ [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS] = {
+ .min_len = sizeof(struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params) },
+ [WMI_TAG_SOC_HAL_REG_CAPABILITIES] = {
+ .min_len = sizeof(struct ath12k_wmi_soc_hal_reg_caps_params) },
+ [WMI_TAG_VDEV_START_RESPONSE_EVENT] = {
+ .min_len = sizeof(struct wmi_vdev_start_resp_event) },
+ [WMI_TAG_PEER_DELETE_RESP_EVENT] = {
+ .min_len = sizeof(struct wmi_peer_delete_resp_event) },
+ [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT] = {
+ .min_len = sizeof(struct wmi_bcn_tx_status_event) },
+ [WMI_TAG_VDEV_STOPPED_EVENT] = {
+ .min_len = sizeof(struct wmi_vdev_stopped_event) },
+ [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT] = {
+ .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
+ [WMI_TAG_MGMT_RX_HDR] = {
+ .min_len = sizeof(struct ath12k_wmi_mgmt_rx_params) },
+ [WMI_TAG_MGMT_TX_COMPL_EVENT] = {
+ .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
+ [WMI_TAG_SCAN_EVENT] = {
+ .min_len = sizeof(struct wmi_scan_event) },
+ [WMI_TAG_PEER_STA_KICKOUT_EVENT] = {
+ .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+ [WMI_TAG_ROAM_EVENT] = {
+ .min_len = sizeof(struct wmi_roam_event) },
+ [WMI_TAG_CHAN_INFO_EVENT] = {
+ .min_len = sizeof(struct wmi_chan_info_event) },
+ [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT] = {
+ .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
+ [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
+ [WMI_TAG_READY_EVENT] = {
+ .min_len = sizeof(struct ath12k_wmi_ready_event_min_params) },
+ [WMI_TAG_SERVICE_AVAILABLE_EVENT] = {
+ .min_len = sizeof(struct wmi_service_available_event) },
+ [WMI_TAG_PEER_ASSOC_CONF_EVENT] = {
+ .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
+ [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT] = {
+ .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
+ [WMI_TAG_HOST_SWFDA_EVENT] = {
+ .min_len = sizeof(struct wmi_fils_discovery_event) },
+ [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
+ .min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
+ [WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
+ .min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+};
+
+static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
+{
+ return le32_encode_bits(cmd, WMI_TLV_TAG) |
+ le32_encode_bits(len, WMI_TLV_LEN);
+}
+
+static __le32 ath12k_wmi_tlv_cmd_hdr(u32 cmd, u32 len)
+{
+ return ath12k_wmi_tlv_hdr(cmd, len - TLV_HDR_SIZE);
+}
+
+void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config)
+{
+ config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
+
+ if (ab->num_radios == 2) {
+ config->num_peers = TARGET_NUM_PEERS(DBS);
+ config->num_tids = TARGET_NUM_TIDS(DBS);
+ } else if (ab->num_radios == 3) {
+ config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
+ config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
+ } else {
+ /* Control should not reach here */
+ config->num_peers = TARGET_NUM_PEERS(SINGLE);
+ config->num_tids = TARGET_NUM_TIDS(SINGLE);
+ }
+ config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+ config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+
+ if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
+ config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
+ else
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+ config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+ config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+ config->dma_burst_size = TARGET_DMA_BURST_SIZE;
+ config->rx_skip_defrag_timeout_dup_detection_check =
+ TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+ config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
+ config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+ /* Indicates host supports peer map v3 and unmap v2 support */
+ config->peer_map_unmap_version = 0x32;
+ config->twt_ap_pdev_count = ab->num_radios;
+ config->twt_ap_sta_count = 1000;
+}
+
+void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config)
+{
+ config->num_vdevs = 4;
+ config->num_peers = 16;
+ config->num_tids = 32;
+
+ config->num_offload_peers = 3;
+ config->num_offload_reorder_buffs = 3;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = 0;
+ config->num_mcast_table_elems = 0;
+ config->mcast2ucast_mode = 0;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = 0;
+ config->dma_burst_size = 0;
+ config->rx_skip_defrag_timeout_dup_detection_check = 0;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = 2;
+ config->num_msdu_desc = 0x400;
+ config->beacon_tx_offload_max_vdev = 2;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+
+ config->peer_map_unmap_version = 0x1;
+ config->use_pdev_id = 1;
+ config->max_frag_entries = 0xa;
+ config->num_tdls_vdevs = 0x1;
+ config->num_tdls_conn_table_entries = 8;
+ config->beacon_tx_offload_max_vdev = 0x2;
+ config->num_multicast_filter_entries = 0x20;
+ config->num_wow_filters = 0x16;
+ config->num_keep_alive_pattern = 0;
+}
+
+#define PRIMAP(_hw_mode_) \
+ [_hw_mode_] = _hw_mode_##_PRI
+
+static const int ath12k_hw_mode_pri_map[] = {
+ PRIMAP(WMI_HOST_HW_MODE_SINGLE),
+ PRIMAP(WMI_HOST_HW_MODE_DBS),
+ PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
+ PRIMAP(WMI_HOST_HW_MODE_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
+ /* keep last */
+ PRIMAP(WMI_HOST_HW_MODE_MAX),
+};
+
+static int
+ath12k_wmi_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const void *begin = ptr;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag, tlv_len;
+ int ret;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath12k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+
+ tlv = ptr;
+ tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
+ tlv_len = le32_get_bits(tlv->header, WMI_TLV_LEN);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_tag < ARRAY_SIZE(ath12k_wmi_tlv_policies) &&
+ ath12k_wmi_tlv_policies[tlv_tag].min_len &&
+ ath12k_wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+ ath12k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
+ tlv_tag, ptr - begin, tlv_len,
+ ath12k_wmi_tlv_policies[tlv_tag].min_len);
+ return -EINVAL;
+ }
+
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+
+ return 0;
+}
+
+static int ath12k_wmi_tlv_iter_parse(struct ath12k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const void **tb = data;
+
+ if (tag < WMI_TAG_MAX)
+ tb[tag] = ptr;
+
+ return 0;
+}
+
+static int ath12k_wmi_tlv_parse(struct ath12k_base *ar, const void **tb,
+ const void *ptr, size_t len)
+{
+ return ath12k_wmi_tlv_iter(ar, ptr, len, ath12k_wmi_tlv_iter_parse,
+ (void *)tb);
+}
+
+static const void **
+ath12k_wmi_tlv_parse_alloc(struct ath12k_base *ab, const void *ptr,
+ size_t len, gfp_t gfp)
+{
+ const void **tb;
+ int ret;
+
+ tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
+ if (!tb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ath12k_wmi_tlv_parse(ab, tb, ptr, len);
+ if (ret) {
+ kfree(tb);
+ return ERR_PTR(ret);
+ }
+
+ return tb;
+}
+
+static int ath12k_wmi_cmd_send_nowait(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_cmd_hdr *cmd_hdr;
+ int ret;
+
+ if (!skb_push(skb, sizeof(struct wmi_cmd_hdr)))
+ return -ENOMEM;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ cmd_hdr->cmd_id = le32_encode_bits(cmd_id, WMI_CMD_HDR_CMD_ID);
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath12k_htc_send(&ab->htc, wmi->eid, skb);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath12k_wmi_base *wmi_sc = wmi->wmi_ab;
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ wait_event_timeout(wmi_sc->tx_credits_wq, ({
+ ret = ath12k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+
+ if (ret && test_bit(ATH12K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -EAGAIN);
+ }), WMI_SEND_TIMEOUT_HZ);
+
+ if (ret == -EAGAIN)
+ ath12k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
+
+ return ret;
+}
+
+static int ath12k_pull_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
+ const void *ptr,
+ struct ath12k_wmi_service_ext_arg *arg)
+{
+ const struct wmi_service_ready_ext_event *ev = ptr;
+ int i;
+
+ if (!ev)
+ return -EINVAL;
+
+ /* Move this to host based bitmap */
+ arg->default_conc_scan_config_bits =
+ le32_to_cpu(ev->default_conc_scan_config_bits);
+ arg->default_fw_config_bits = le32_to_cpu(ev->default_fw_config_bits);
+ arg->he_cap_info = le32_to_cpu(ev->he_cap_info);
+ arg->mpdu_density = le32_to_cpu(ev->mpdu_density);
+ arg->max_bssid_rx_filters = le32_to_cpu(ev->max_bssid_rx_filters);
+ arg->ppet.numss_m1 = le32_to_cpu(ev->ppet.numss_m1);
+ arg->ppet.ru_bit_mask = le32_to_cpu(ev->ppet.ru_info);
+
+ for (i = 0; i < WMI_MAX_NUM_SS; i++)
+ arg->ppet.ppet16_ppet8_ru3_ru0[i] =
+ le32_to_cpu(ev->ppet.ppet16_ppet8_ru3_ru0[i]);
+
+ return 0;
+}
+
+static int
+ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
+ struct ath12k_wmi_svc_rdy_ext_parse *svc,
+ u8 hw_mode_id, u8 phy_id,
+ struct ath12k_pdev *pdev)
+{
+ const struct ath12k_wmi_mac_phy_caps_params *mac_caps;
+ const struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params *hw_caps = svc->hw_caps;
+ const struct ath12k_wmi_hw_mode_cap_params *wmi_hw_mode_caps = svc->hw_mode_caps;
+ const struct ath12k_wmi_mac_phy_caps_params *wmi_mac_phy_caps = svc->mac_phy_caps;
+ struct ath12k_band_cap *cap_band;
+ struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
+ u32 phy_map;
+ u32 hw_idx, phy_idx = 0;
+ int i;
+
+ if (!hw_caps || !wmi_hw_mode_caps || !svc->soc_hal_reg_caps)
+ return -EINVAL;
+
+ for (hw_idx = 0; hw_idx < le32_to_cpu(hw_caps->num_hw_modes); hw_idx++) {
+ if (hw_mode_id == le32_to_cpu(wmi_hw_mode_caps[hw_idx].hw_mode_id))
+ break;
+
+ phy_map = le32_to_cpu(wmi_hw_mode_caps[hw_idx].phy_id_map);
+ phy_idx = fls(phy_map);
+ }
+
+ if (hw_idx == le32_to_cpu(hw_caps->num_hw_modes))
+ return -EINVAL;
+
+ phy_idx += phy_id;
+ if (phy_id >= le32_to_cpu(svc->soc_hal_reg_caps->num_phy))
+ return -EINVAL;
+
+ mac_caps = wmi_mac_phy_caps + phy_idx;
+
+ pdev->pdev_id = le32_to_cpu(mac_caps->pdev_id);
+ pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
+ pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
+
+ /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
+ * band to band for a single radio, need to see how this should be
+ * handled.
+ */
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
+ pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
+ pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
+ } else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
+ pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
+ pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
+ pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
+ pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
+ pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
+ } else {
+ return -EINVAL;
+ }
+
+ /* tx/rx chainmask reported from fw depends on the actual hw chains used,
+ * For example, for 4x4 capable macphys, first 4 chains can be used for first
+ * mac and the remaing 4 chains can be used for the second mac or vice-versa.
+ * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
+ * will be advertised for second mac or vice-versa. Compute the shift value
+ * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
+ * mac80211.
+ */
+ pdev_cap->tx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
+ pdev_cap->rx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
+
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
+ cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
+ cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_2g);
+ cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_2g);
+ cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_2g_ext);
+ cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_2g);
+ for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
+ cap_band->he_cap_phy_info[i] =
+ le32_to_cpu(mac_caps->he_cap_phy_info_2g[i]);
+
+ cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet2g.numss_m1);
+ cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet2g.ru_info);
+
+ for (i = 0; i < WMI_MAX_NUM_SS; i++)
+ cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
+ le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
+ }
+
+ if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+ cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
+ cap_band->max_bw_supported =
+ le32_to_cpu(mac_caps->max_bw_supported_5g);
+ cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
+ cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
+ cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
+ cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
+ for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
+ cap_band->he_cap_phy_info[i] =
+ le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
+
+ cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
+ cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
+
+ for (i = 0; i < WMI_MAX_NUM_SS; i++)
+ cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
+ le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
+
+ cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
+ cap_band->max_bw_supported =
+ le32_to_cpu(mac_caps->max_bw_supported_5g);
+ cap_band->ht_cap_info = le32_to_cpu(mac_caps->ht_cap_info_5g);
+ cap_band->he_cap_info[0] = le32_to_cpu(mac_caps->he_cap_info_5g);
+ cap_band->he_cap_info[1] = le32_to_cpu(mac_caps->he_cap_info_5g_ext);
+ cap_band->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
+ for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
+ cap_band->he_cap_phy_info[i] =
+ le32_to_cpu(mac_caps->he_cap_phy_info_5g[i]);
+
+ cap_band->he_ppet.numss_m1 = le32_to_cpu(mac_caps->he_ppet5g.numss_m1);
+ cap_band->he_ppet.ru_bit_mask = le32_to_cpu(mac_caps->he_ppet5g.ru_info);
+
+ for (i = 0; i < WMI_MAX_NUM_SS; i++)
+ cap_band->he_ppet.ppet16_ppet8_ru3_ru0[i] =
+ le32_to_cpu(mac_caps->he_ppet5g.ppet16_ppet8_ru3_ru0[i]);
+ }
+
+ return 0;
+}
+
+static int
+ath12k_pull_reg_cap_svc_rdy_ext(struct ath12k_wmi_pdev *wmi_handle,
+ const struct ath12k_wmi_soc_hal_reg_caps_params *reg_caps,
+ const struct ath12k_wmi_hal_reg_caps_ext_params *ext_caps,
+ u8 phy_idx,
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg *param)
+{
+ const struct ath12k_wmi_hal_reg_caps_ext_params *ext_reg_cap;
+
+ if (!reg_caps || !ext_caps)
+ return -EINVAL;
+
+ if (phy_idx >= le32_to_cpu(reg_caps->num_phy))
+ return -EINVAL;
+
+ ext_reg_cap = &ext_caps[phy_idx];
+
+ param->phy_id = le32_to_cpu(ext_reg_cap->phy_id);
+ param->eeprom_reg_domain = le32_to_cpu(ext_reg_cap->eeprom_reg_domain);
+ param->eeprom_reg_domain_ext =
+ le32_to_cpu(ext_reg_cap->eeprom_reg_domain_ext);
+ param->regcap1 = le32_to_cpu(ext_reg_cap->regcap1);
+ param->regcap2 = le32_to_cpu(ext_reg_cap->regcap2);
+ /* check if param->wireless_mode is needed */
+ param->low_2ghz_chan = le32_to_cpu(ext_reg_cap->low_2ghz_chan);
+ param->high_2ghz_chan = le32_to_cpu(ext_reg_cap->high_2ghz_chan);
+ param->low_5ghz_chan = le32_to_cpu(ext_reg_cap->low_5ghz_chan);
+ param->high_5ghz_chan = le32_to_cpu(ext_reg_cap->high_5ghz_chan);
+
+ return 0;
+}
+
+static int ath12k_pull_service_ready_tlv(struct ath12k_base *ab,
+ const void *evt_buf,
+ struct ath12k_wmi_target_cap_arg *cap)
+{
+ const struct wmi_service_ready_event *ev = evt_buf;
+
+ if (!ev) {
+ ath12k_err(ab, "%s: failed by NULL param\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ cap->phy_capability = le32_to_cpu(ev->phy_capability);
+ cap->max_frag_entry = le32_to_cpu(ev->max_frag_entry);
+ cap->num_rf_chains = le32_to_cpu(ev->num_rf_chains);
+ cap->ht_cap_info = le32_to_cpu(ev->ht_cap_info);
+ cap->vht_cap_info = le32_to_cpu(ev->vht_cap_info);
+ cap->vht_supp_mcs = le32_to_cpu(ev->vht_supp_mcs);
+ cap->hw_min_tx_power = le32_to_cpu(ev->hw_min_tx_power);
+ cap->hw_max_tx_power = le32_to_cpu(ev->hw_max_tx_power);
+ cap->sys_cap_info = le32_to_cpu(ev->sys_cap_info);
+ cap->min_pkt_size_enable = le32_to_cpu(ev->min_pkt_size_enable);
+ cap->max_bcn_ie_size = le32_to_cpu(ev->max_bcn_ie_size);
+ cap->max_num_scan_channels = le32_to_cpu(ev->max_num_scan_channels);
+ cap->max_supported_macs = le32_to_cpu(ev->max_supported_macs);
+ cap->wmi_fw_sub_feat_caps = le32_to_cpu(ev->wmi_fw_sub_feat_caps);
+ cap->txrx_chainmask = le32_to_cpu(ev->txrx_chainmask);
+ cap->default_dbs_hw_mode_index = le32_to_cpu(ev->default_dbs_hw_mode_index);
+ cap->num_msdu_desc = le32_to_cpu(ev->num_msdu_desc);
+
+ return 0;
+}
+
+/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
+ * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
+ * 4-byte word.
+ */
+static void ath12k_wmi_service_bitmap_copy(struct ath12k_wmi_pdev *wmi,
+ const u32 *wmi_svc_bm)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
+ do {
+ if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, wmi->wmi_ab->svc_map);
+ } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
+ }
+}
+
+static int ath12k_wmi_svc_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_ready_parse *svc_ready = data;
+ struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
+ u16 expect_len;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EVENT:
+ if (ath12k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
+ return -EINVAL;
+ break;
+
+ case WMI_TAG_ARRAY_UINT32:
+ if (!svc_ready->wmi_svc_bitmap_done) {
+ expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
+ if (len < expect_len) {
+ ath12k_warn(ab, "invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ ath12k_wmi_service_bitmap_copy(wmi_handle, ptr);
+
+ svc_ready->wmi_svc_bitmap_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath12k_service_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k_wmi_svc_ready_parse svc_ready = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_svc_rdy_parse,
+ &svc_ready);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len)
+{
+ struct sk_buff *skb;
+ struct ath12k_base *ab = wmi_sc->ab;
+ u32 round_len = roundup(len, 4);
+
+ skb = ath12k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, WMI_SKB_HEADROOM);
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath12k_warn(ab, "unaligned WMI skb data\n");
+
+ skb_put(skb, round_len);
+ memset(skb->data, 0, round_len);
+
+ return skb;
+}
+
+int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_mgmt_send_cmd *cmd;
+ struct wmi_tlv *frame_tlv;
+ struct sk_buff *skb;
+ u32 buf_len;
+ int ret, len;
+
+ buf_len = min_t(int, frame->len, WMI_MGMT_SEND_DOWNLD_LEN);
+
+ len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mgmt_send_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MGMT_TX_SEND_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->desc_id = cpu_to_le32(buf_id);
+ cmd->chanfreq = 0;
+ cmd->paddr_lo = cpu_to_le32(lower_32_bits(ATH12K_SKB_CB(frame)->paddr));
+ cmd->paddr_hi = cpu_to_le32(upper_32_bits(ATH12K_SKB_CB(frame)->paddr));
+ cmd->frame_len = cpu_to_le32(frame->len);
+ cmd->buf_len = cpu_to_le32(buf_len);
+ cmd->tx_params_valid = 0;
+
+ frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ frame_tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, buf_len);
+
+ memcpy(frame_tlv->value, frame->data, buf_len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
+ struct ath12k_wmi_vdev_create_arg *args)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_create_cmd *cmd;
+ struct sk_buff *skb;
+ struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
+ struct wmi_tlv *tlv;
+ int ret, len;
+ void *ptr;
+
+ /* It can be optimized my sending tx/rx chain configuration
+ * only for supported bands instead of always sending it for
+ * both the bands.
+ */
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_CREATE_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(args->if_id);
+ cmd->vdev_type = cpu_to_le32(args->type);
+ cmd->vdev_subtype = cpu_to_le32(args->subtype);
+ cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
+ cmd->pdev_id = cpu_to_le32(args->pdev_id);
+ cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
+ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+
+ ptr = skb->data + sizeof(*cmd);
+ len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
+
+ ptr += TLV_HDR_SIZE;
+ txrx_streams = ptr;
+ len = sizeof(*txrx_streams);
+ txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
+ len);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+ txrx_streams->supported_tx_streams =
+ args->chains[NL80211_BAND_2GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ args->chains[NL80211_BAND_2GHZ].rx;
+
+ txrx_streams++;
+ txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
+ len);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+ txrx_streams->supported_tx_streams =
+ args->chains[NL80211_BAND_5GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ args->chains[NL80211_BAND_5GHZ].rx;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
+ args->if_id, args->type, args->subtype,
+ macaddr, args->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to submit WMI_VDEV_CREATE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DELETE_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev delete id %d\n", vdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_STOP_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev stop id 0x%x\n", vdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_down_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_DOWN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI vdev down id 0x%x\n", vdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
+ struct wmi_vdev_start_req_arg *arg)
+{
+ memset(chan, 0, sizeof(*chan));
+
+ chan->mhz = cpu_to_le32(arg->freq);
+ chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
+ if (arg->mode == MODE_11AC_VHT80_80)
+ chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
+ else
+ chan->band_center_freq2 = 0;
+
+ chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
+ if (arg->passive)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
+ if (arg->allow_ibss)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_ADHOC_ALLOWED);
+ if (arg->allow_ht)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
+ if (arg->allow_vht)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
+ if (arg->allow_he)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
+ if (arg->ht40plus)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_HT40_PLUS);
+ if (arg->chan_radar)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS);
+ if (arg->freq2_radar)
+ chan->info |= cpu_to_le32(WMI_CHAN_INFO_DFS_FREQ2);
+
+ chan->reg_info_1 = le32_encode_bits(arg->max_power,
+ WMI_CHAN_REG_INFO1_MAX_PWR) |
+ le32_encode_bits(arg->max_reg_power,
+ WMI_CHAN_REG_INFO1_MAX_REG_PWR);
+
+ chan->reg_info_2 = le32_encode_bits(arg->max_antenna_gain,
+ WMI_CHAN_REG_INFO2_ANT_MAX) |
+ le32_encode_bits(arg->max_power, WMI_CHAN_REG_INFO2_MAX_TX_PWR);
+}
+
+int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct sk_buff *skb;
+ struct ath12k_wmi_channel_params *chan;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return -EINVAL;
+
+ len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_START_REQUEST_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->beacon_interval = cpu_to_le32(arg->bcn_intval);
+ cmd->bcn_tx_rate = cpu_to_le32(arg->bcn_tx_rate);
+ cmd->dtim_period = cpu_to_le32(arg->dtim_period);
+ cmd->num_noa_descriptors = cpu_to_le32(arg->num_noa_descriptors);
+ cmd->preferred_rx_streams = cpu_to_le32(arg->pref_rx_streams);
+ cmd->preferred_tx_streams = cpu_to_le32(arg->pref_tx_streams);
+ cmd->cac_duration_ms = cpu_to_le32(arg->cac_duration_ms);
+ cmd->regdomain = cpu_to_le32(arg->regdomain);
+ cmd->he_ops = cpu_to_le32(arg->he_ops);
+
+ if (!restart) {
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = cpu_to_le32(arg->ssid_len);
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+ if (arg->hidden_ssid)
+ cmd->flags |= cpu_to_le32(WMI_VDEV_START_HIDDEN_SSID);
+ if (arg->pmf_enabled)
+ cmd->flags |= cpu_to_le32(WMI_VDEV_START_PMF_ENABLED);
+ }
+
+ cmd->flags |= cpu_to_le32(WMI_VDEV_START_LDPC_RX_ENABLED);
+
+ ptr = skb->data + sizeof(*cmd);
+ chan = ptr;
+
+ ath12k_wmi_put_wmi_channel(chan, arg);
+
+ chan->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
+ sizeof(*chan));
+ ptr += sizeof(*chan);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
+ restart ? "restart" : "start", arg->vdev_id,
+ arg->freq, arg->mode);
+
+ if (restart)
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_RESTART_REQUEST_CMDID);
+ else
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_START_REQUEST_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
+ restart ? "restart" : "start");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_up_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->vdev_assoc_id = cpu_to_le32(aid);
+
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+ vdev_id, aid, bssid);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
+ struct ath12k_wmi_peer_create_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_create_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_CREATE_CMD,
+ sizeof(*cmd));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_addr);
+ cmd->peer_type = cpu_to_le32(arg->peer_type);
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI peer create vdev_id %d peer_addr %pM\n",
+ arg->vdev_id, arg->peer_addr);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
+ const u8 *peer_addr, u8 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_DELETE_CMD,
+ sizeof(*cmd));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI peer delete vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
+ struct ath12k_wmi_pdev_set_regdomain_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_set_regdomain_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
+ sizeof(*cmd));
+
+ cmd->reg_domain = cpu_to_le32(arg->current_rd_in_use);
+ cmd->reg_domain_2g = cpu_to_le32(arg->current_rd_2g);
+ cmd->reg_domain_5g = cpu_to_le32(arg->current_rd_5g);
+ cmd->conformance_test_limit_2g = cpu_to_le32(arg->ctl_2g);
+ cmd->conformance_test_limit_5g = cpu_to_le32(arg->ctl_5g);
+ cmd->dfs_domain = cpu_to_le32(arg->dfs_domain);
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
+ arg->current_rd_in_use, arg->current_rd_2g,
+ arg->current_rd_5g, arg->dfs_domain, arg->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_SET_PARAM_CMD,
+ sizeof(*cmd));
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->param_id = cpu_to_le32(param_id);
+ cmd->param_value = cpu_to_le32(param_val);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev %d peer 0x%pM set param %d value %d\n",
+ vdev_id, peer_addr, param_id, param_val);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
+ u8 peer_addr[ETH_ALEN],
+ u32 peer_tid_bitmap,
+ u8 vdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_FLUSH_TIDS_CMD,
+ sizeof(*cmd));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->peer_tid_bitmap = cpu_to_le32(peer_tid_bitmap);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI peer flush vdev_id %d peer_addr %pM tids %08x\n",
+ vdev_id, peer_addr, peer_tid_bitmap);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
+ int vdev_id, const u8 *addr,
+ dma_addr_t paddr, u8 tid,
+ u8 ba_window_size_valid,
+ u32 ba_window_size)
+{
+ struct wmi_peer_reorder_queue_setup_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_SETUP_CMD,
+ sizeof(*cmd));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, addr);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->tid = cpu_to_le32(tid);
+ cmd->queue_ptr_lo = cpu_to_le32(lower_32_bits(paddr));
+ cmd->queue_ptr_hi = cpu_to_le32(upper_32_bits(paddr));
+ cmd->queue_no = cpu_to_le32(tid);
+ cmd->ba_window_size_valid = cpu_to_le32(ba_window_size_valid);
+ cmd->ba_window_size = cpu_to_le32(ba_window_size);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi rx reorder queue setup addr %pM vdev_id %d tid %d\n",
+ addr, vdev_id, tid);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
+ struct ath12k_wmi_rx_reorder_queue_remove_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_reorder_queue_remove_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
+ sizeof(*cmd));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr);
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->tid_mask = cpu_to_le32(arg->peer_tid_bitmap);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "%s: peer_macaddr %pM vdev_id %d, tid_map %d", __func__,
+ arg->peer_macaddr, arg->vdev_id, arg->peer_tid_bitmap);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_PARAM_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+ cmd->param_id = cpu_to_le32(param_id);
+ cmd->param_value = cpu_to_le32(param_value);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI pdev set param %d pdev id %d value %d\n",
+ param_id, pdev_id, param_value);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_set_ps_mode_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_MODE_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->sta_ps_mode = cpu_to_le32(enable);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev set psmode %d vdev id %d\n",
+ enable, vdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
+ u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_suspend_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SUSPEND_CMD,
+ sizeof(*cmd));
+
+ cmd->suspend_opt = cpu_to_le32(suspend_opt);
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI pdev suspend pdev_id %d\n", pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_resume_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_resume_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_RESUME_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI pdev resume pdev id %d\n", pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+/* TODO FW Support for the cmd is not available yet.
+ * Can be tested once the command and corresponding
+ * event is implemented in FW
+ */
+int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
+ enum wmi_bss_chan_info_req_type type)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_pdev_bss_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
+ sizeof(*cmd));
+ cmd->req_type = cpu_to_le32(type);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI bss chan info req type %d\n", type);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
+ struct ath12k_wmi_ap_ps_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_AP_PS_PEER_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->param = cpu_to_le32(arg->param);
+ cmd->value = cpu_to_le32(arg->value);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI set ap ps vdev id %d peer %pM param %d value %d\n",
+ arg->vdev_id, peer_addr, arg->param, arg->value);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
+ u32 param, u32 param_value)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_POWERSAVE_PARAM_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->param = cpu_to_le32(param);
+ cmd->value = cpu_to_le32(param_value);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI set sta ps vdev_id %d param %d value %d\n",
+ vdev_id, param, param_value);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FORCE_FW_HANG_CMD,
+ len);
+
+ cmd->type = cpu_to_le32(type);
+ cmd->delay_time_ms = cpu_to_le32(delay_time_ms);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_PARAM_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->param_id = cpu_to_le32(param_id);
+ cmd->param_value = cpu_to_le32(param_value);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev id 0x%x set param %d value %d\n",
+ vdev_id, param_id, param_value);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_get_pdev_temperature_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_bcn_offload_ctrl_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->bcn_ctrl_op = cpu_to_le32(bcn_ctrl_op);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI bcn ctrl offload vdev id %d ctrl_op %d\n",
+ vdev_id, bcn_ctrl_op);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_bcn_tmpl_cmd *cmd;
+ struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len = roundup(bcn->len, 4);
+
+ len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_TMPL_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
+ cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
+ cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
+ cmd->buf_len = cpu_to_le32(bcn->len);
+
+ ptr = skb->data + sizeof(*cmd);
+
+ bcn_prb_info = ptr;
+ len = sizeof(*bcn_prb_info);
+ bcn_prb_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
+ len);
+ bcn_prb_info->caps = 0;
+ bcn_prb_info->erp = 0;
+
+ ptr += sizeof(*bcn_prb_info);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
+ memcpy(tlv->value, bcn->data, bcn->len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_vdev_install_key(struct ath12k *ar,
+ struct wmi_vdev_install_key_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret, len, key_len_aligned;
+
+ /* WMI_TAG_ARRAY_BYTE needs to be aligned with 4, the actual key
+ * length is specifed in cmd->key_len.
+ */
+ key_len_aligned = roundup(arg->key_len, 4);
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_INSTALL_KEY_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+ cmd->key_idx = cpu_to_le32(arg->key_idx);
+ cmd->key_flags = cpu_to_le32(arg->key_flags);
+ cmd->key_cipher = cpu_to_le32(arg->key_cipher);
+ cmd->key_len = cpu_to_le32(arg->key_len);
+ cmd->key_txmic_len = cpu_to_le32(arg->key_txmic_len);
+ cmd->key_rxmic_len = cpu_to_le32(arg->key_rxmic_len);
+
+ if (arg->key_rsc_counter)
+ cmd->key_rsc_counter = cpu_to_le64(arg->key_rsc_counter);
+
+ tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, key_len_aligned);
+ memcpy(tlv->value, arg->key_data, arg->key_len);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
+ struct ath12k_wmi_peer_assoc_arg *arg,
+ bool hw_crypto_disabled)
+{
+ cmd->peer_flags = 0;
+
+ if (arg->is_wme_set) {
+ if (arg->qos_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_QOS);
+ if (arg->apsd_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_APSD);
+ if (arg->ht_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_HT);
+ if (arg->bw_40)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_40MHZ);
+ if (arg->bw_80)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_80MHZ);
+ if (arg->bw_160)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
+
+ /* Typically if STBC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (arg->stbc_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_STBC);
+
+ /* Typically if LDPC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (arg->ldpc_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_LDPC);
+
+ if (arg->static_mimops_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_STATIC_MIMOPS);
+ if (arg->dynamic_mimops_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_DYN_MIMOPS);
+ if (arg->spatial_mux_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_SPATIAL_MUX);
+ if (arg->vht_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_VHT);
+ if (arg->he_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_HE);
+ if (arg->twt_requester)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_REQ);
+ if (arg->twt_responder)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_TWT_RESP);
+ }
+
+ /* Suppress authorization for all AUTH modes that need 4-way handshake
+ * (during re-association).
+ * Authorization will be done for these modes on key installation.
+ */
+ if (arg->auth_flag)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_AUTH);
+ if (arg->need_ptk_4_way) {
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_PTK_4_WAY);
+ if (!hw_crypto_disabled)
+ cmd->peer_flags &= cpu_to_le32(~WMI_PEER_AUTH);
+ }
+ if (arg->need_gtk_2_way)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_NEED_GTK_2_WAY);
+ /* safe mode bypass the 4-way handshake */
+ if (arg->safe_mode_enabled)
+ cmd->peer_flags &= cpu_to_le32(~(WMI_PEER_NEED_PTK_4_WAY |
+ WMI_PEER_NEED_GTK_2_WAY));
+
+ if (arg->is_pmf_enabled)
+ cmd->peer_flags |= cpu_to_le32(WMI_PEER_PMF);
+
+ /* Disable AMSDU for station transmit, if user configures it */
+ /* Disable AMSDU for AP transmit to 11n Stations, if user configures
+ * it
+ * if (arg->amsdu_disable) Add after FW support
+ **/
+
+ /* Target asserts if node is marked HT and all MCS is set to 0.
+ * Mark the node as non-HT if all the mcs rates are disabled through
+ * iwpriv
+ **/
+ if (arg->peer_ht_rates.num_rates == 0)
+ cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
+}
+
+int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_peer_assoc_complete_cmd *cmd;
+ struct ath12k_wmi_vht_rate_set_params *mcs;
+ struct ath12k_wmi_he_rate_set_params *he_mcs;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 peer_legacy_rates_align;
+ u32 peer_ht_rates_align;
+ int i, ret, len;
+
+ peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
+ sizeof(u32));
+ peer_ht_rates_align = roundup(arg->peer_ht_rates.num_rates,
+ sizeof(u32));
+
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
+ TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
+ sizeof(*mcs) + TLV_HDR_SIZE +
+ (sizeof(*he_mcs) * arg->peer_he_mcs_count);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+
+ cmd->peer_new_assoc = cpu_to_le32(arg->peer_new_assoc);
+ cmd->peer_associd = cpu_to_le32(arg->peer_associd);
+
+ ath12k_wmi_copy_peer_flags(cmd, arg,
+ test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED,
+ &ar->ab->dev_flags));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_mac);
+
+ cmd->peer_rate_caps = cpu_to_le32(arg->peer_rate_caps);
+ cmd->peer_caps = cpu_to_le32(arg->peer_caps);
+ cmd->peer_listen_intval = cpu_to_le32(arg->peer_listen_intval);
+ cmd->peer_ht_caps = cpu_to_le32(arg->peer_ht_caps);
+ cmd->peer_max_mpdu = cpu_to_le32(arg->peer_max_mpdu);
+ cmd->peer_mpdu_density = cpu_to_le32(arg->peer_mpdu_density);
+ cmd->peer_vht_caps = cpu_to_le32(arg->peer_vht_caps);
+ cmd->peer_phymode = cpu_to_le32(arg->peer_phymode);
+
+ /* Update 11ax capabilities */
+ cmd->peer_he_cap_info = cpu_to_le32(arg->peer_he_cap_macinfo[0]);
+ cmd->peer_he_cap_info_ext = cpu_to_le32(arg->peer_he_cap_macinfo[1]);
+ cmd->peer_he_cap_info_internal = cpu_to_le32(arg->peer_he_cap_macinfo_internal);
+ cmd->peer_he_caps_6ghz = cpu_to_le32(arg->peer_he_caps_6ghz);
+ cmd->peer_he_ops = cpu_to_le32(arg->peer_he_ops);
+ for (i = 0; i < WMI_MAX_HECAP_PHY_SIZE; i++)
+ cmd->peer_he_cap_phy[i] =
+ cpu_to_le32(arg->peer_he_cap_phyinfo[i]);
+ cmd->peer_ppet.numss_m1 = cpu_to_le32(arg->peer_ppet.numss_m1);
+ cmd->peer_ppet.ru_info = cpu_to_le32(arg->peer_ppet.ru_bit_mask);
+ for (i = 0; i < WMI_MAX_NUM_SS; i++)
+ cmd->peer_ppet.ppet16_ppet8_ru3_ru0[i] =
+ cpu_to_le32(arg->peer_ppet.ppet16_ppet8_ru3_ru0[i]);
+
+ /* Update peer legacy rate information */
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_legacy_rates_align);
+
+ ptr += TLV_HDR_SIZE;
+
+ cmd->num_peer_legacy_rates = cpu_to_le32(arg->peer_legacy_rates.num_rates);
+ memcpy(ptr, arg->peer_legacy_rates.rates,
+ arg->peer_legacy_rates.num_rates);
+
+ /* Update peer HT rate information */
+ ptr += peer_legacy_rates_align;
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, peer_ht_rates_align);
+ ptr += TLV_HDR_SIZE;
+ cmd->num_peer_ht_rates = cpu_to_le32(arg->peer_ht_rates.num_rates);
+ memcpy(ptr, arg->peer_ht_rates.rates,
+ arg->peer_ht_rates.num_rates);
+
+ /* VHT Rates */
+ ptr += peer_ht_rates_align;
+
+ mcs = ptr;
+
+ mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VHT_RATE_SET,
+ sizeof(*mcs));
+
+ cmd->peer_nss = cpu_to_le32(arg->peer_nss);
+
+ /* Update bandwidth-NSS mapping */
+ cmd->peer_bw_rxnss_override = 0;
+ cmd->peer_bw_rxnss_override |= cpu_to_le32(arg->peer_bw_rxnss_override);
+
+ if (arg->vht_capable) {
+ mcs->rx_max_rate = cpu_to_le32(arg->rx_max_rate);
+ mcs->rx_mcs_set = cpu_to_le32(arg->rx_mcs_set);
+ mcs->tx_max_rate = cpu_to_le32(arg->tx_max_rate);
+ mcs->tx_mcs_set = cpu_to_le32(arg->tx_mcs_set);
+ }
+
+ /* HE Rates */
+ cmd->peer_he_mcs = cpu_to_le32(arg->peer_he_mcs_count);
+ cmd->min_data_rate = cpu_to_le32(arg->min_data_rate);
+
+ ptr += sizeof(*mcs);
+
+ len = arg->peer_he_mcs_count * sizeof(*he_mcs);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
+ ptr += TLV_HDR_SIZE;
+
+ /* Loop through the HE rate set */
+ for (i = 0; i < arg->peer_he_mcs_count; i++) {
+ he_mcs = ptr;
+ he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
+ sizeof(*he_mcs));
+
+ he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
+ he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
+ ptr += sizeof(*he_mcs);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
+ cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
+ cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
+ cmd->peer_listen_intval, cmd->peer_ht_caps,
+ cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
+ cmd->peer_mpdu_density,
+ cmd->peer_vht_caps, cmd->peer_he_cap_info,
+ cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
+ cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
+ cmd->peer_he_cap_phy[2],
+ cmd->peer_bw_rxnss_override);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PEER_ASSOC_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+void ath12k_wmi_start_scan_init(struct ath12k *ar,
+ struct ath12k_wmi_scan_req_arg *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_active_2g = 0;
+ arg->dwell_time_passive = 150;
+ arg->dwell_time_active_6g = 40;
+ arg->dwell_time_passive_6g = 30;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 20000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
+ WMI_SCAN_EVENT_COMPLETED |
+ WMI_SCAN_EVENT_BSS_CHANNEL |
+ WMI_SCAN_EVENT_FOREIGN_CHAN |
+ WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->num_bssid = 1;
+
+ /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
+ * ZEROs in probe request
+ */
+ eth_broadcast_addr(arg->bssid_list[0].addr);
+}
+
+static void ath12k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
+ struct ath12k_wmi_scan_req_arg *arg)
+{
+ /* Scan events subscription */
+ if (arg->scan_ev_started)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_STARTED);
+ if (arg->scan_ev_completed)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_COMPLETED);
+ if (arg->scan_ev_bss_chan)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_BSS_CHANNEL);
+ if (arg->scan_ev_foreign_chan)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN);
+ if (arg->scan_ev_dequeued)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_DEQUEUED);
+ if (arg->scan_ev_preempted)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_PREEMPTED);
+ if (arg->scan_ev_start_failed)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_START_FAILED);
+ if (arg->scan_ev_restarted)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESTARTED);
+ if (arg->scan_ev_foreign_chn_exit)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT);
+ if (arg->scan_ev_suspended)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_SUSPENDED);
+ if (arg->scan_ev_resumed)
+ cmd->notify_scan_events |= cpu_to_le32(WMI_SCAN_EVENT_RESUMED);
+
+ /** Set scan control flags */
+ cmd->scan_ctrl_flags = 0;
+ if (arg->scan_f_passive)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_PASSIVE);
+ if (arg->scan_f_strict_passive_pch)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN);
+ if (arg->scan_f_promisc_mode)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROMISCUOS);
+ if (arg->scan_f_capture_phy_err)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CAPTURE_PHY_ERROR);
+ if (arg->scan_f_half_rate)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_HALF_RATE_SUPPORT);
+ if (arg->scan_f_quarter_rate)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT);
+ if (arg->scan_f_cck_rates)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_CCK_RATES);
+ if (arg->scan_f_ofdm_rates)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_OFDM_RATES);
+ if (arg->scan_f_chan_stat_evnt)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_CHAN_STAT_EVENT);
+ if (arg->scan_f_filter_prb_req)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
+ if (arg->scan_f_bcast_probe)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_BCAST_PROBE_REQ);
+ if (arg->scan_f_offchan_mgmt_tx)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_MGMT_TX);
+ if (arg->scan_f_offchan_data_tx)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_OFFCHAN_DATA_TX);
+ if (arg->scan_f_force_active_dfs_chn)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS);
+ if (arg->scan_f_add_tpc_ie_in_probe)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ);
+ if (arg->scan_f_add_ds_ie_in_probe)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ);
+ if (arg->scan_f_add_spoofed_mac_in_probe)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ);
+ if (arg->scan_f_add_rand_seq_in_probe)
+ cmd->scan_ctrl_flags |= cpu_to_le32(WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ);
+ if (arg->scan_f_en_ie_whitelist_in_probe)
+ cmd->scan_ctrl_flags |=
+ cpu_to_le32(WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ);
+
+ cmd->scan_ctrl_flags |= le32_encode_bits(arg->adaptive_dwell_time_mode,
+ WMI_SCAN_DWELL_MODE_MASK);
+}
+
+int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_req_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_start_scan_cmd *cmd;
+ struct ath12k_wmi_ssid_params *ssid = NULL;
+ struct ath12k_wmi_mac_addr_params *bssid;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u32 *tmp_ptr;
+ u8 extraie_len_with_pad = 0;
+ struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
+ struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
+
+ len = sizeof(*cmd);
+
+ len += TLV_HDR_SIZE;
+ if (arg->num_chan)
+ len += arg->num_chan * sizeof(u32);
+
+ len += TLV_HDR_SIZE;
+ if (arg->num_ssids)
+ len += arg->num_ssids * sizeof(*ssid);
+
+ len += TLV_HDR_SIZE;
+ if (arg->num_bssid)
+ len += sizeof(*bssid) * arg->num_bssid;
+
+ len += TLV_HDR_SIZE;
+ if (arg->extraie.len)
+ extraie_len_with_pad =
+ roundup(arg->extraie.len, sizeof(u32));
+ len += extraie_len_with_pad;
+
+ if (arg->num_hint_bssid)
+ len += TLV_HDR_SIZE +
+ arg->num_hint_bssid * sizeof(*hint_bssid);
+
+ if (arg->num_hint_s_ssid)
+ len += TLV_HDR_SIZE +
+ arg->num_hint_s_ssid * sizeof(*s_ssid);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_START_SCAN_CMD,
+ sizeof(*cmd));
+
+ cmd->scan_id = cpu_to_le32(arg->scan_id);
+ cmd->scan_req_id = cpu_to_le32(arg->scan_req_id);
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->scan_priority = cpu_to_le32(arg->scan_priority);
+ cmd->notify_scan_events = cpu_to_le32(arg->notify_scan_events);
+
+ ath12k_wmi_copy_scan_event_cntrl_flags(cmd, arg);
+
+ cmd->dwell_time_active = cpu_to_le32(arg->dwell_time_active);
+ cmd->dwell_time_active_2g = cpu_to_le32(arg->dwell_time_active_2g);
+ cmd->dwell_time_passive = cpu_to_le32(arg->dwell_time_passive);
+ cmd->dwell_time_active_6g = cpu_to_le32(arg->dwell_time_active_6g);
+ cmd->dwell_time_passive_6g = cpu_to_le32(arg->dwell_time_passive_6g);
+ cmd->min_rest_time = cpu_to_le32(arg->min_rest_time);
+ cmd->max_rest_time = cpu_to_le32(arg->max_rest_time);
+ cmd->repeat_probe_time = cpu_to_le32(arg->repeat_probe_time);
+ cmd->probe_spacing_time = cpu_to_le32(arg->probe_spacing_time);
+ cmd->idle_time = cpu_to_le32(arg->idle_time);
+ cmd->max_scan_time = cpu_to_le32(arg->max_scan_time);
+ cmd->probe_delay = cpu_to_le32(arg->probe_delay);
+ cmd->burst_duration = cpu_to_le32(arg->burst_duration);
+ cmd->num_chan = cpu_to_le32(arg->num_chan);
+ cmd->num_bssid = cpu_to_le32(arg->num_bssid);
+ cmd->num_ssids = cpu_to_le32(arg->num_ssids);
+ cmd->ie_len = cpu_to_le32(arg->extraie.len);
+ cmd->n_probes = cpu_to_le32(arg->n_probes);
+
+ ptr += sizeof(*cmd);
+
+ len = arg->num_chan * sizeof(u32);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, len);
+ ptr += TLV_HDR_SIZE;
+ tmp_ptr = (u32 *)ptr;
+
+ memcpy(tmp_ptr, arg->chan_list, arg->num_chan * 4);
+
+ ptr += len;
+
+ len = arg->num_ssids * sizeof(*ssid);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
+
+ ptr += TLV_HDR_SIZE;
+
+ if (arg->num_ssids) {
+ ssid = ptr;
+ for (i = 0; i < arg->num_ssids; ++i) {
+ ssid->ssid_len = cpu_to_le32(arg->ssid[i].ssid_len);
+ memcpy(ssid->ssid, arg->ssid[i].ssid,
+ arg->ssid[i].ssid_len);
+ ssid++;
+ }
+ }
+
+ ptr += (arg->num_ssids * sizeof(*ssid));
+ len = arg->num_bssid * sizeof(*bssid);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
+
+ ptr += TLV_HDR_SIZE;
+ bssid = ptr;
+
+ if (arg->num_bssid) {
+ for (i = 0; i < arg->num_bssid; ++i) {
+ ether_addr_copy(bssid->addr,
+ arg->bssid_list[i].addr);
+ bssid++;
+ }
+ }
+
+ ptr += arg->num_bssid * sizeof(*bssid);
+
+ len = extraie_len_with_pad;
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len);
+ ptr += TLV_HDR_SIZE;
+
+ if (arg->extraie.len)
+ memcpy(ptr, arg->extraie.ptr,
+ arg->extraie.len);
+
+ ptr += extraie_len_with_pad;
+
+ if (arg->num_hint_s_ssid) {
+ len = arg->num_hint_s_ssid * sizeof(*s_ssid);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
+ ptr += TLV_HDR_SIZE;
+ s_ssid = ptr;
+ for (i = 0; i < arg->num_hint_s_ssid; ++i) {
+ s_ssid->freq_flags = arg->hint_s_ssid[i].freq_flags;
+ s_ssid->short_ssid = arg->hint_s_ssid[i].short_ssid;
+ s_ssid++;
+ }
+ ptr += len;
+ }
+
+ if (arg->num_hint_bssid) {
+ len = arg->num_hint_bssid * sizeof(struct ath12k_wmi_hint_bssid_arg);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, len);
+ ptr += TLV_HDR_SIZE;
+ hint_bssid = ptr;
+ for (i = 0; i < arg->num_hint_bssid; ++i) {
+ hint_bssid->freq_flags =
+ arg->hint_bssid[i].freq_flags;
+ ether_addr_copy(&arg->hint_bssid[i].bssid.addr[0],
+ &hint_bssid->bssid.addr[0]);
+ hint_bssid++;
+ }
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_START_SCAN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_cancel_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_stop_scan_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_stop_scan_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STOP_SCAN_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->requestor = cpu_to_le32(arg->requester);
+ cmd->scan_id = cpu_to_le32(arg->scan_id);
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
+ /* stop the scan with the corresponding scan_id */
+ if (arg->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
+ /* Cancelling all scans */
+ cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_ALL);
+ } else if (arg->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
+ /* Cancelling VAP scans */
+ cmd->req_type = cpu_to_le32(WMI_SCAN_STOP_VAP_ALL);
+ } else if (arg->req_type == WLAN_SCAN_CANCEL_SINGLE) {
+ /* Cancelling specific scan */
+ cmd->req_type = WMI_SCAN_STOP_ONE;
+ } else {
+ ath12k_warn(ar->ab, "invalid scan cancel req_type %d",
+ arg->req_type);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_STOP_SCAN_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_chan_list_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_scan_chan_list_cmd *cmd;
+ struct sk_buff *skb;
+ struct ath12k_wmi_channel_params *chan_info;
+ struct ath12k_wmi_channel_arg *channel_arg;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
+ __le32 *reg1, *reg2;
+
+ channel_arg = &arg->channel[0];
+ while (arg->nallchans) {
+ len = sizeof(*cmd) + TLV_HDR_SIZE;
+ max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
+ sizeof(*chan_info);
+
+ num_send_chans = min(arg->nallchans, max_chan_limit);
+
+ arg->nallchans -= num_send_chans;
+ len += sizeof(*chan_info) * num_send_chans;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SCAN_CHAN_LIST_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
+ cmd->num_scan_chans = cpu_to_le32(num_send_chans);
+ if (num_sends)
+ cmd->flags |= cpu_to_le32(WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
+ num_send_chans, len, cmd->pdev_id, num_sends);
+
+ ptr = skb->data + sizeof(*cmd);
+
+ len = sizeof(*chan_info) * num_send_chans;
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_STRUCT,
+ len);
+ ptr += TLV_HDR_SIZE;
+
+ for (i = 0; i < num_send_chans; ++i) {
+ chan_info = ptr;
+ memset(chan_info, 0, sizeof(*chan_info));
+ len = sizeof(*chan_info);
+ chan_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_CHANNEL,
+ len);
+
+ reg1 = &chan_info->reg_info_1;
+ reg2 = &chan_info->reg_info_2;
+ chan_info->mhz = cpu_to_le32(channel_arg->mhz);
+ chan_info->band_center_freq1 = cpu_to_le32(channel_arg->cfreq1);
+ chan_info->band_center_freq2 = cpu_to_le32(channel_arg->cfreq2);
+
+ if (channel_arg->is_chan_passive)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PASSIVE);
+ if (channel_arg->allow_he)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HE);
+ else if (channel_arg->allow_vht)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_VHT);
+ else if (channel_arg->allow_ht)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_ALLOW_HT);
+ if (channel_arg->half_rate)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_HALF_RATE);
+ if (channel_arg->quarter_rate)
+ chan_info->info |=
+ cpu_to_le32(WMI_CHAN_INFO_QUARTER_RATE);
+
+ if (channel_arg->psc_channel)
+ chan_info->info |= cpu_to_le32(WMI_CHAN_INFO_PSC);
+
+ chan_info->info |= le32_encode_bits(channel_arg->phy_mode,
+ WMI_CHAN_INFO_MODE);
+ *reg1 |= le32_encode_bits(channel_arg->minpower,
+ WMI_CHAN_REG_INFO1_MIN_PWR);
+ *reg1 |= le32_encode_bits(channel_arg->maxpower,
+ WMI_CHAN_REG_INFO1_MAX_PWR);
+ *reg1 |= le32_encode_bits(channel_arg->maxregpower,
+ WMI_CHAN_REG_INFO1_MAX_REG_PWR);
+ *reg1 |= le32_encode_bits(channel_arg->reg_class_id,
+ WMI_CHAN_REG_INFO1_REG_CLS);
+ *reg2 |= le32_encode_bits(channel_arg->antennamax,
+ WMI_CHAN_REG_INFO2_ANT_MAX);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
+ i, chan_info->mhz, chan_info->info);
+
+ ptr += sizeof(*chan_info);
+
+ channel_arg++;
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ num_sends++;
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_vdev_set_wmm_params_cmd *cmd;
+ struct wmi_wmm_params *wmm_param;
+ struct wmi_wmm_params_arg *wmi_wmm_arg;
+ struct sk_buff *skb;
+ int ret, ac;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->wmm_param_type = 0;
+
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ switch (ac) {
+ case WME_AC_BE:
+ wmi_wmm_arg = &param->ac_be;
+ break;
+ case WME_AC_BK:
+ wmi_wmm_arg = &param->ac_bk;
+ break;
+ case WME_AC_VI:
+ wmi_wmm_arg = &param->ac_vi;
+ break;
+ case WME_AC_VO:
+ wmi_wmm_arg = &param->ac_vo;
+ break;
+ }
+
+ wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
+ wmm_param->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+ sizeof(*wmm_param));
+
+ wmm_param->aifs = cpu_to_le32(wmi_wmm_arg->aifs);
+ wmm_param->cwmin = cpu_to_le32(wmi_wmm_arg->cwmin);
+ wmm_param->cwmax = cpu_to_le32(wmi_wmm_arg->cwmax);
+ wmm_param->txoplimit = cpu_to_le32(wmi_wmm_arg->txop);
+ wmm_param->acm = cpu_to_le32(wmi_wmm_arg->acm);
+ wmm_param->no_ack = cpu_to_le32(wmi_wmm_arg->no_ack);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
+ ac, wmm_param->aifs, wmm_param->cwmin,
+ wmm_param->cwmax, wmm_param->txoplimit,
+ wmm_param->acm, wmm_param->no_ack);
+ }
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
+ u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_dfs_phyerr_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ sizeof(*cmd));
+
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI dfs phy err offload enable pdev id %d\n", pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_delba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delba_send_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DELBA_SEND_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = cpu_to_le32(tid);
+ cmd->initiator = cpu_to_le32(initiator);
+ cmd->reasoncode = cpu_to_le32(reason);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
+ vdev_id, mac, tid, initiator, reason);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_DELBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_addba_setresponse_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SETRESPONSE_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = cpu_to_le32(tid);
+ cmd->statuscode = cpu_to_le32(status);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
+ vdev_id, mac, tid, status);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_addba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_send_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_SEND_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = cpu_to_le32(tid);
+ cmd->buffersize = cpu_to_le32(buf_size);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
+ vdev_id, mac, tid, buf_size);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_addba_clear_resp_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ADDBA_CLEAR_RESP_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
+ vdev_id, mac);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
+ struct ath12k_wmi_init_country_arg *arg)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_init_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_country_cmd *)skb->data;
+ cmd->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_INIT_COUNTRY_CMD,
+ sizeof(*cmd));
+
+ cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+ switch (arg->flags) {
+ case ALPHA_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
+ memcpy(&cmd->cc_info.alpha2, arg->cc_info.alpha2, 3);
+ break;
+ case CC_IS_SET:
+ cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE);
+ cmd->cc_info.country_code =
+ cpu_to_le32(arg->cc_info.country_code);
+ break;
+ case REGDMN_IS_SET:
+ cmd->init_cc_type = cpu_to_le32(WMI_COUNTRY_INFO_TYPE_REGDOMAIN);
+ cmd->cc_info.regdom_id = cpu_to_le32(arg->cc_info.regdom_id);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_SET_INIT_COUNTRY_CMDID);
+
+out:
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int
+ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_ENABLE_CMD,
+ len);
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+ cmd->sta_cong_timer_ms = cpu_to_le32(ATH12K_TWT_DEF_STA_CONG_TIMER_MS);
+ cmd->default_slot_size = cpu_to_le32(ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE);
+ cmd->congestion_thresh_setup =
+ cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP);
+ cmd->congestion_thresh_teardown =
+ cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN);
+ cmd->congestion_thresh_critical =
+ cpu_to_le32(ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL);
+ cmd->interference_thresh_teardown =
+ cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN);
+ cmd->interference_thresh_setup =
+ cpu_to_le32(ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP);
+ cmd->min_no_sta_setup = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_SETUP);
+ cmd->min_no_sta_teardown = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN);
+ cmd->no_of_bcast_mcast_slots =
+ cpu_to_le32(ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS);
+ cmd->min_no_twt_slots = cpu_to_le32(ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS);
+ cmd->max_no_sta_twt = cpu_to_le32(ATH12K_TWT_DEF_MAX_NO_STA_TWT);
+ cmd->mode_check_interval = cpu_to_le32(ATH12K_TWT_DEF_MODE_CHECK_INTERVAL);
+ cmd->add_sta_slot_interval = cpu_to_le32(ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL);
+ cmd->remove_sta_slot_interval =
+ cpu_to_le32(ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL);
+ /* TODO add MBSSID support */
+ cmd->mbss_support = 0;
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_TWT_ENABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int
+ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_disable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_TWT_DISABLE_CMD,
+ len);
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_TWT_DISABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int
+ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_spatial_reuse_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
+ len);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->enable = cpu_to_le32(he_obss_pd->enable);
+ cmd->obss_min = a_cpu_to_sle32(he_obss_pd->min_offset);
+ cmd->obss_max = a_cpu_to_sle32(he_obss_pd->max_offset);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_color_collision_cfg_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
+ len);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->evt_type = enable ? cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION) :
+ cpu_to_le32(ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE);
+ cmd->current_bss_color = cpu_to_le32(bss_color);
+ cmd->detection_period_ms = cpu_to_le32(period);
+ cmd->scan_period_ms = cpu_to_le32(ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS);
+ cmd->free_slot_expiry_time_ms = 0;
+ cmd->flags = 0;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi_send_obss_color_collision_cfg id %d type %d bss_color %d detect_period %d scan_period %d\n",
+ cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
+ cmd->detection_period_ms, cmd->scan_period_ms);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
+ bool enable)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_bss_color_change_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
+ len);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->enable = enable ? cpu_to_le32(1) : 0;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi_send_bss_color_change_enable id %d enable %d\n",
+ cmd->vdev_id, cmd->enable);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct sk_buff *tmpl)
+{
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len;
+ struct wmi_fils_discovery_tmpl_cmd *cmd;
+
+ aligned_len = roundup(tmpl->len, 4);
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev %i set FILS discovery template\n", vdev_id);
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_FILS_DISCOVERY_TMPL_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->buf_len = cpu_to_le32(tmpl->len);
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
+ memcpy(tlv->value, tmpl->data, tmpl->len);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "WMI vdev %i failed to send FILS discovery template command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct sk_buff *tmpl)
+{
+ struct wmi_probe_tmpl_cmd *cmd;
+ struct ath12k_wmi_bcn_prb_info_params *probe_info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len = roundup(tmpl->len, 4);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev %i set probe response template\n", vdev_id);
+
+ len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PRB_TMPL_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->buf_len = cpu_to_le32(tmpl->len);
+
+ ptr = skb->data + sizeof(*cmd);
+
+ probe_info = ptr;
+ len = sizeof(*probe_info);
+ probe_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_BCN_PRB_INFO,
+ len);
+ probe_info->caps = 0;
+ probe_info->erp = 0;
+
+ ptr += sizeof(*probe_info);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
+ memcpy(tlv->value, tmpl->data, tmpl->len);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "WMI vdev %i failed to send probe response template command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
+ bool unsol_bcast_probe_resp_enabled)
+{
+ struct sk_buff *skb;
+ int ret, len;
+ struct wmi_fils_discovery_cmd *cmd;
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI vdev %i set %s interval to %u TU\n",
+ vdev_id, unsol_bcast_probe_resp_enabled ?
+ "unsolicited broadcast probe response" : "FILS discovery",
+ interval);
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_fils_discovery_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ENABLE_FILS_CMD,
+ len);
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->interval = cpu_to_le32(interval);
+ cmd->config = cpu_to_le32(unsol_bcast_probe_resp_enabled);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "WMI vdev %i failed to send FILS discovery enable/disable command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ }
+ return ret;
+}
+
+static void
+ath12k_fill_band_to_mac_param(struct ath12k_base *soc,
+ struct ath12k_wmi_pdev_band_arg *arg)
+{
+ u8 i;
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg *hal_reg_cap;
+ struct ath12k_pdev *pdev;
+
+ for (i = 0; i < soc->num_radios; i++) {
+ pdev = &soc->pdevs[i];
+ hal_reg_cap = &soc->hal_reg_cap[i];
+ arg[i].pdev_id = pdev->pdev_id;
+
+ switch (pdev->cap.supported_bands) {
+ case WMI_HOST_WLAN_2G_5G_CAP:
+ arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ case WMI_HOST_WLAN_2G_CAP:
+ arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
+ break;
+ case WMI_HOST_WLAN_5G_CAP:
+ arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
+ arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cfg,
+ struct ath12k_wmi_resource_config_arg *tg_cfg)
+{
+ wmi_cfg->num_vdevs = cpu_to_le32(tg_cfg->num_vdevs);
+ wmi_cfg->num_peers = cpu_to_le32(tg_cfg->num_peers);
+ wmi_cfg->num_offload_peers = cpu_to_le32(tg_cfg->num_offload_peers);
+ wmi_cfg->num_offload_reorder_buffs =
+ cpu_to_le32(tg_cfg->num_offload_reorder_buffs);
+ wmi_cfg->num_peer_keys = cpu_to_le32(tg_cfg->num_peer_keys);
+ wmi_cfg->num_tids = cpu_to_le32(tg_cfg->num_tids);
+ wmi_cfg->ast_skid_limit = cpu_to_le32(tg_cfg->ast_skid_limit);
+ wmi_cfg->tx_chain_mask = cpu_to_le32(tg_cfg->tx_chain_mask);
+ wmi_cfg->rx_chain_mask = cpu_to_le32(tg_cfg->rx_chain_mask);
+ wmi_cfg->rx_timeout_pri[0] = cpu_to_le32(tg_cfg->rx_timeout_pri[0]);
+ wmi_cfg->rx_timeout_pri[1] = cpu_to_le32(tg_cfg->rx_timeout_pri[1]);
+ wmi_cfg->rx_timeout_pri[2] = cpu_to_le32(tg_cfg->rx_timeout_pri[2]);
+ wmi_cfg->rx_timeout_pri[3] = cpu_to_le32(tg_cfg->rx_timeout_pri[3]);
+ wmi_cfg->rx_decap_mode = cpu_to_le32(tg_cfg->rx_decap_mode);
+ wmi_cfg->scan_max_pending_req = cpu_to_le32(tg_cfg->scan_max_pending_req);
+ wmi_cfg->bmiss_offload_max_vdev = cpu_to_le32(tg_cfg->bmiss_offload_max_vdev);
+ wmi_cfg->roam_offload_max_vdev = cpu_to_le32(tg_cfg->roam_offload_max_vdev);
+ wmi_cfg->roam_offload_max_ap_profiles =
+ cpu_to_le32(tg_cfg->roam_offload_max_ap_profiles);
+ wmi_cfg->num_mcast_groups = cpu_to_le32(tg_cfg->num_mcast_groups);
+ wmi_cfg->num_mcast_table_elems = cpu_to_le32(tg_cfg->num_mcast_table_elems);
+ wmi_cfg->mcast2ucast_mode = cpu_to_le32(tg_cfg->mcast2ucast_mode);
+ wmi_cfg->tx_dbg_log_size = cpu_to_le32(tg_cfg->tx_dbg_log_size);
+ wmi_cfg->num_wds_entries = cpu_to_le32(tg_cfg->num_wds_entries);
+ wmi_cfg->dma_burst_size = cpu_to_le32(tg_cfg->dma_burst_size);
+ wmi_cfg->mac_aggr_delim = cpu_to_le32(tg_cfg->mac_aggr_delim);
+ wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
+ cpu_to_le32(tg_cfg->rx_skip_defrag_timeout_dup_detection_check);
+ wmi_cfg->vow_config = cpu_to_le32(tg_cfg->vow_config);
+ wmi_cfg->gtk_offload_max_vdev = cpu_to_le32(tg_cfg->gtk_offload_max_vdev);
+ wmi_cfg->num_msdu_desc = cpu_to_le32(tg_cfg->num_msdu_desc);
+ wmi_cfg->max_frag_entries = cpu_to_le32(tg_cfg->max_frag_entries);
+ wmi_cfg->num_tdls_vdevs = cpu_to_le32(tg_cfg->num_tdls_vdevs);
+ wmi_cfg->num_tdls_conn_table_entries =
+ cpu_to_le32(tg_cfg->num_tdls_conn_table_entries);
+ wmi_cfg->beacon_tx_offload_max_vdev =
+ cpu_to_le32(tg_cfg->beacon_tx_offload_max_vdev);
+ wmi_cfg->num_multicast_filter_entries =
+ cpu_to_le32(tg_cfg->num_multicast_filter_entries);
+ wmi_cfg->num_wow_filters = cpu_to_le32(tg_cfg->num_wow_filters);
+ wmi_cfg->num_keep_alive_pattern = cpu_to_le32(tg_cfg->num_keep_alive_pattern);
+ wmi_cfg->keep_alive_pattern_size = cpu_to_le32(tg_cfg->keep_alive_pattern_size);
+ wmi_cfg->max_tdls_concurrent_sleep_sta =
+ cpu_to_le32(tg_cfg->max_tdls_concurrent_sleep_sta);
+ wmi_cfg->max_tdls_concurrent_buffer_sta =
+ cpu_to_le32(tg_cfg->max_tdls_concurrent_buffer_sta);
+ wmi_cfg->wmi_send_separate = cpu_to_le32(tg_cfg->wmi_send_separate);
+ wmi_cfg->num_ocb_vdevs = cpu_to_le32(tg_cfg->num_ocb_vdevs);
+ wmi_cfg->num_ocb_channels = cpu_to_le32(tg_cfg->num_ocb_channels);
+ wmi_cfg->num_ocb_schedules = cpu_to_le32(tg_cfg->num_ocb_schedules);
+ wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
+ wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
+ wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
+ wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config);
+ wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
+ wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
+ wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
+ wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
+ wmi_cfg->host_service_flags =
+ cpu_to_le32(1 << WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
+}
+
+static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
+ struct ath12k_wmi_init_cmd_arg *arg)
+{
+ struct ath12k_base *ab = wmi->wmi_ab->ab;
+ struct sk_buff *skb;
+ struct wmi_init_cmd *cmd;
+ struct ath12k_wmi_resource_config_params *cfg;
+ struct ath12k_wmi_pdev_set_hw_mode_cmd *hw_mode;
+ struct ath12k_wmi_pdev_band_to_mac_params *band_to_mac;
+ struct ath12k_wmi_host_mem_chunk_params *host_mem_chunks;
+ struct wmi_tlv *tlv;
+ size_t ret, len;
+ void *ptr;
+ u32 hw_mode_len = 0;
+ u16 idx;
+
+ if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX)
+ hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
+ (arg->num_band_to_mac * sizeof(*band_to_mac));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
+ (arg->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_INIT_CMD,
+ sizeof(*cmd));
+
+ ptr = skb->data + sizeof(*cmd);
+ cfg = ptr;
+
+ ath12k_wmi_copy_resource_config(cfg, &arg->res_cfg);
+
+ cfg->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_RESOURCE_CONFIG,
+ sizeof(*cfg));
+
+ ptr += sizeof(*cfg);
+ host_mem_chunks = ptr + TLV_HDR_SIZE;
+ len = sizeof(struct ath12k_wmi_host_mem_chunk_params);
+
+ for (idx = 0; idx < arg->num_mem_chunks; ++idx) {
+ host_mem_chunks[idx].tlv_header =
+ ath12k_wmi_tlv_hdr(WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
+ len);
+
+ host_mem_chunks[idx].ptr = cpu_to_le32(arg->mem_chunks[idx].paddr);
+ host_mem_chunks[idx].size = cpu_to_le32(arg->mem_chunks[idx].len);
+ host_mem_chunks[idx].req_id = cpu_to_le32(arg->mem_chunks[idx].req_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "WMI host mem chunk req_id %d paddr 0x%llx len %d\n",
+ arg->mem_chunks[idx].req_id,
+ (u64)arg->mem_chunks[idx].paddr,
+ arg->mem_chunks[idx].len);
+ }
+ cmd->num_host_mem_chunks = cpu_to_le32(arg->num_mem_chunks);
+ len = sizeof(struct ath12k_wmi_host_mem_chunk_params) * arg->num_mem_chunks;
+
+ /* num_mem_chunks is zero */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
+ ptr += TLV_HDR_SIZE + len;
+
+ if (arg->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
+ hw_mode = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)ptr;
+ hw_mode->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
+ sizeof(*hw_mode));
+
+ hw_mode->hw_mode_index = cpu_to_le32(arg->hw_mode_id);
+ hw_mode->num_band_to_mac = cpu_to_le32(arg->num_band_to_mac);
+
+ ptr += sizeof(*hw_mode);
+
+ len = arg->num_band_to_mac * sizeof(*band_to_mac);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
+
+ ptr += TLV_HDR_SIZE;
+ len = sizeof(*band_to_mac);
+
+ for (idx = 0; idx < arg->num_band_to_mac; idx++) {
+ band_to_mac = (void *)ptr;
+
+ band_to_mac->tlv_header =
+ ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_BAND_TO_MAC,
+ len);
+ band_to_mac->pdev_id = cpu_to_le32(arg->band_to_mac[idx].pdev_id);
+ band_to_mac->start_freq =
+ cpu_to_le32(arg->band_to_mac[idx].start_freq);
+ band_to_mac->end_freq =
+ cpu_to_le32(arg->band_to_mac[idx].end_freq);
+ ptr += sizeof(*band_to_mac);
+ }
+ }
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "failed to send WMI_INIT_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar,
+ int pdev_id)
+{
+ struct ath12k_wmi_pdev_lro_config_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath12k_wmi_pdev_lro_config_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_LRO_INFO_CMD,
+ sizeof(*cmd));
+
+ get_random_bytes(cmd->th_4, sizeof(cmd->th_4));
+ get_random_bytes(cmd->th_6, sizeof(cmd->th_6));
+
+ cmd->pdev_id = cpu_to_le32(pdev_id);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI lro cfg cmd pdev_id 0x%x\n", pdev_id);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send lro cfg req wmi cmd\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
+ enum wmi_host_hw_mode_config_type mode)
+{
+ struct ath12k_wmi_pdev_set_hw_mode_cmd *cmd;
+ struct sk_buff *skb;
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ int len;
+ int ret;
+
+ len = sizeof(*cmd);
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath12k_wmi_pdev_set_hw_mode_cmd *)skb->data;
+
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_HW_MODE_CMD,
+ sizeof(*cmd));
+
+ cmd->pdev_id = WMI_PDEV_ID_SOC;
+ cmd->hw_mode_index = cpu_to_le32(mode);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
+ if (ret) {
+ ath12k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_cmd_init(struct ath12k_base *ab)
+{
+ struct ath12k_wmi_base *wmi_sc = &ab->wmi_ab;
+ struct ath12k_wmi_init_cmd_arg arg = {};
+
+ ab->hw_params->wmi_init(ab, &arg.res_cfg);
+
+ arg.num_mem_chunks = wmi_sc->num_mem_chunks;
+ arg.hw_mode_id = wmi_sc->preferred_hw_mode;
+ arg.mem_chunks = wmi_sc->mem_chunks;
+
+ if (ab->hw_params->single_pdev_only)
+ arg.hw_mode_id = WMI_HOST_HW_MODE_MAX;
+
+ arg.num_band_to_mac = ab->num_radios;
+ ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
+
+ return ath12k_init_cmd_send(&wmi_sc->wmi[0], &arg);
+}
+
+int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
+ struct ath12k_wmi_vdev_spectral_conf_arg *arg)
+{
+ struct ath12k_wmi_vdev_spectral_conf_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath12k_wmi_vdev_spectral_conf_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->scan_count = cpu_to_le32(arg->scan_count);
+ cmd->scan_period = cpu_to_le32(arg->scan_period);
+ cmd->scan_priority = cpu_to_le32(arg->scan_priority);
+ cmd->scan_fft_size = cpu_to_le32(arg->scan_fft_size);
+ cmd->scan_gc_ena = cpu_to_le32(arg->scan_gc_ena);
+ cmd->scan_restart_ena = cpu_to_le32(arg->scan_restart_ena);
+ cmd->scan_noise_floor_ref = cpu_to_le32(arg->scan_noise_floor_ref);
+ cmd->scan_init_delay = cpu_to_le32(arg->scan_init_delay);
+ cmd->scan_nb_tone_thr = cpu_to_le32(arg->scan_nb_tone_thr);
+ cmd->scan_str_bin_thr = cpu_to_le32(arg->scan_str_bin_thr);
+ cmd->scan_wb_rpt_mode = cpu_to_le32(arg->scan_wb_rpt_mode);
+ cmd->scan_rssi_rpt_mode = cpu_to_le32(arg->scan_rssi_rpt_mode);
+ cmd->scan_rssi_thr = cpu_to_le32(arg->scan_rssi_thr);
+ cmd->scan_pwr_format = cpu_to_le32(arg->scan_pwr_format);
+ cmd->scan_rpt_mode = cpu_to_le32(arg->scan_rpt_mode);
+ cmd->scan_bin_scale = cpu_to_le32(arg->scan_bin_scale);
+ cmd->scan_dbm_adj = cpu_to_le32(arg->scan_dbm_adj);
+ cmd->scan_chn_mask = cpu_to_le32(arg->scan_chn_mask);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI spectral scan config cmd vdev_id 0x%x\n",
+ arg->vdev_id);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb,
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send spectral scan config wmi cmd\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
+ u32 trigger, u32 enable)
+{
+ struct ath12k_wmi_vdev_spectral_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath12k_wmi_vdev_spectral_enable_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
+ sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->trigger_cmd = cpu_to_le32(trigger);
+ cmd->enable_cmd = cpu_to_le32(enable);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI spectral enable cmd vdev id 0x%x\n",
+ vdev_id);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb,
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send spectral enable wmi cmd\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
+ struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg)
+{
+ struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
+ sizeof(*cmd));
+
+ cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
+ cmd->module_id = cpu_to_le32(arg->module_id);
+ cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
+ cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
+ cmd->head_idx_paddr_lo = cpu_to_le32(arg->head_idx_paddr_lo);
+ cmd->head_idx_paddr_hi = cpu_to_le32(arg->head_idx_paddr_hi);
+ cmd->tail_idx_paddr_lo = cpu_to_le32(arg->tail_idx_paddr_lo);
+ cmd->tail_idx_paddr_hi = cpu_to_le32(arg->tail_idx_paddr_hi);
+ cmd->num_elems = cpu_to_le32(arg->num_elems);
+ cmd->buf_size = cpu_to_le32(arg->buf_size);
+ cmd->num_resp_per_event = cpu_to_le32(arg->num_resp_per_event);
+ cmd->event_timeout_ms = cpu_to_le32(arg->event_timeout_ms);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI DMA ring cfg req cmd pdev_id 0x%x\n",
+ arg->pdev_id);
+
+ ret = ath12k_wmi_cmd_send(ar->wmi, skb,
+ WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to send dma ring cfg req wmi cmd\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int ath12k_wmi_dma_buf_entry_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_dma_buf_release_arg *arg = data;
+
+ if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
+ return -EPROTO;
+
+ if (arg->num_buf_entry >= le32_to_cpu(arg->fixed.num_buf_release_entry))
+ return -ENOBUFS;
+
+ arg->num_buf_entry++;
+ return 0;
+}
+
+static int ath12k_wmi_dma_buf_meta_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_dma_buf_release_arg *arg = data;
+
+ if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
+ return -EPROTO;
+
+ if (arg->num_meta >= le32_to_cpu(arg->fixed.num_meta_data_entry))
+ return -ENOBUFS;
+
+ arg->num_meta++;
+
+ return 0;
+}
+
+static int ath12k_wmi_dma_buf_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_dma_buf_release_arg *arg = data;
+ const struct ath12k_wmi_dma_buf_release_fixed_params *fixed;
+ u32 pdev_id;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_DMA_BUF_RELEASE:
+ fixed = ptr;
+ arg->fixed = *fixed;
+ pdev_id = DP_HW2SW_MACID(le32_to_cpu(fixed->pdev_id));
+ arg->fixed.pdev_id = cpu_to_le32(pdev_id);
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!arg->buf_entry_done) {
+ arg->num_buf_entry = 0;
+ arg->buf_entry = ptr;
+
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_dma_buf_entry_parse,
+ arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse dma buf entry tlv %d\n",
+ ret);
+ return ret;
+ }
+
+ arg->buf_entry_done = true;
+ } else if (!arg->meta_data_done) {
+ arg->num_meta = 0;
+ arg->meta_data = ptr;
+
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_dma_buf_meta_parse,
+ arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse dma buf meta tlv %d\n",
+ ret);
+ return ret;
+ }
+
+ arg->meta_data_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void ath12k_wmi_pdev_dma_ring_buf_release_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_wmi_dma_buf_release_arg arg = {};
+ struct ath12k_dbring_buf_release_event param;
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_dma_buf_parse,
+ &arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
+ return;
+ }
+
+ param.fixed = arg.fixed;
+ param.buf_entry = arg.buf_entry;
+ param.num_buf_entry = arg.num_buf_entry;
+ param.meta_data = arg.meta_data;
+ param.num_meta = arg.num_meta;
+
+ ret = ath12k_dbring_buffer_release_event(ab, &param);
+ if (ret) {
+ ath12k_warn(ab, "failed to handle dma buf release event %d\n", ret);
+ return;
+ }
+}
+
+static int ath12k_wmi_hw_mode_caps_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct ath12k_wmi_hw_mode_cap_params *hw_mode_cap;
+ u32 phy_map = 0;
+
+ if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->arg.num_hw_modes)
+ return -ENOBUFS;
+
+ hw_mode_cap = container_of(ptr, struct ath12k_wmi_hw_mode_cap_params,
+ hw_mode_id);
+ svc_rdy_ext->n_hw_mode_caps++;
+
+ phy_map = le32_to_cpu(hw_mode_cap->phy_id_map);
+ svc_rdy_ext->tot_phy_id += fls(phy_map);
+
+ return 0;
+}
+
+static int ath12k_wmi_hw_mode_caps(struct ath12k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+ const struct ath12k_wmi_hw_mode_cap_params *hw_mode_caps;
+ enum wmi_host_hw_mode_config_type mode, pref;
+ u32 i;
+ int ret;
+
+ svc_rdy_ext->n_hw_mode_caps = 0;
+ svc_rdy_ext->hw_mode_caps = ptr;
+
+ ret = ath12k_wmi_tlv_iter(soc, ptr, len,
+ ath12k_wmi_hw_mode_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath12k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0 ; i < svc_rdy_ext->n_hw_mode_caps; i++) {
+ hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
+ mode = le32_to_cpu(hw_mode_caps->hw_mode_id);
+ pref = soc->wmi_ab.preferred_hw_mode;
+
+ if (ath12k_hw_mode_pri_map[mode] < ath12k_hw_mode_pri_map[pref]) {
+ svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
+ soc->wmi_ab.preferred_hw_mode = mode;
+ }
+ }
+
+ ath12k_dbg(soc, ATH12K_DBG_WMI, "preferred_hw_mode:%d\n",
+ soc->wmi_ab.preferred_hw_mode);
+ if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath12k_wmi_mac_phy_caps_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
+ return -ENOBUFS;
+
+ len = min_t(u16, len, sizeof(struct ath12k_wmi_mac_phy_caps_params));
+ if (!svc_rdy_ext->n_mac_phy_caps) {
+ svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
+ GFP_ATOMIC);
+ if (!svc_rdy_ext->mac_phy_caps)
+ return -ENOMEM;
+ }
+
+ memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
+ svc_rdy_ext->n_mac_phy_caps++;
+ return 0;
+}
+
+static int ath12k_wmi_ext_hal_reg_caps_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->arg.num_phy)
+ return -ENOBUFS;
+
+ svc_rdy_ext->n_ext_hal_reg_caps++;
+ return 0;
+}
+
+static int ath12k_wmi_ext_hal_reg_caps(struct ath12k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct ath12k_wmi_hal_reg_capabilities_ext_arg reg_cap;
+ int ret;
+ u32 i;
+
+ svc_rdy_ext->n_ext_hal_reg_caps = 0;
+ svc_rdy_ext->ext_hal_reg_caps = ptr;
+ ret = ath12k_wmi_tlv_iter(soc, ptr, len,
+ ath12k_wmi_ext_hal_reg_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath12k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < svc_rdy_ext->arg.num_phy; i++) {
+ ret = ath12k_pull_reg_cap_svc_rdy_ext(wmi_handle,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->ext_hal_reg_caps, i,
+ &reg_cap);
+ if (ret) {
+ ath12k_warn(soc, "failed to extract reg cap %d\n", i);
+ return ret;
+ }
+ soc->hal_reg_cap[reg_cap.phy_id] = reg_cap;
+ }
+ return 0;
+}
+
+static int ath12k_wmi_ext_soc_hal_reg_caps_parse(struct ath12k_base *soc,
+ u16 len, const void *ptr,
+ void *data)
+{
+ struct ath12k_wmi_pdev *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+ u8 hw_mode_id = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.hw_mode_id);
+ u32 phy_id_map;
+ int pdev_index = 0;
+ int ret;
+
+ svc_rdy_ext->soc_hal_reg_caps = ptr;
+ svc_rdy_ext->arg.num_phy = le32_to_cpu(svc_rdy_ext->soc_hal_reg_caps->num_phy);
+
+ soc->num_radios = 0;
+ phy_id_map = le32_to_cpu(svc_rdy_ext->pref_hw_mode_caps.phy_id_map);
+
+ while (phy_id_map && soc->num_radios < MAX_RADIOS) {
+ ret = ath12k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
+ svc_rdy_ext,
+ hw_mode_id, soc->num_radios,
+ &soc->pdevs[pdev_index]);
+ if (ret) {
+ ath12k_warn(soc, "failed to extract mac caps, idx :%d\n",
+ soc->num_radios);
+ return ret;
+ }
+
+ soc->num_radios++;
+
+ /* For single_pdev_only targets,
+ * save mac_phy capability in the same pdev
+ */
+ if (soc->hw_params->single_pdev_only)
+ pdev_index = 0;
+ else
+ pdev_index = soc->num_radios;
+
+ /* TODO: mac_phy_cap prints */
+ phy_id_map >>= 1;
+ }
+
+ if (soc->hw_params->single_pdev_only) {
+ soc->num_radios = 1;
+ soc->pdevs[0].pdev_id = 0;
+ }
+
+ return 0;
+}
+
+static int ath12k_wmi_dma_ring_caps_parse(struct ath12k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_dma_ring_caps_parse *parse = data;
+
+ if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
+ return -EPROTO;
+
+ parse->n_dma_ring_caps++;
+ return 0;
+}
+
+static int ath12k_wmi_alloc_dbring_caps(struct ath12k_base *ab,
+ u32 num_cap)
+{
+ size_t sz;
+ void *ptr;
+
+ sz = num_cap * sizeof(struct ath12k_dbring_cap);
+ ptr = kzalloc(sz, GFP_ATOMIC);
+ if (!ptr)
+ return -ENOMEM;
+
+ ab->db_caps = ptr;
+ ab->num_db_cap = num_cap;
+
+ return 0;
+}
+
+static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
+{
+ kfree(ab->db_caps);
+ ab->db_caps = NULL;
+}
+
+static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
+ u16 len, const void *ptr, void *data)
+{
+ struct ath12k_wmi_dma_ring_caps_parse *dma_caps_parse = data;
+ struct ath12k_wmi_dma_ring_caps_params *dma_caps;
+ struct ath12k_dbring_cap *dir_buff_caps;
+ int ret;
+ u32 i;
+
+ dma_caps_parse->n_dma_ring_caps = 0;
+ dma_caps = (struct ath12k_wmi_dma_ring_caps_params *)ptr;
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_dma_ring_caps_parse,
+ dma_caps_parse);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
+ return ret;
+ }
+
+ if (!dma_caps_parse->n_dma_ring_caps)
+ return 0;
+
+ if (ab->num_db_cap) {
+ ath12k_warn(ab, "Already processed, so ignoring dma ring caps\n");
+ return 0;
+ }
+
+ ret = ath12k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
+ if (ret)
+ return ret;
+
+ dir_buff_caps = ab->db_caps;
+ for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
+ if (le32_to_cpu(dma_caps[i].module_id) >= WMI_DIRECT_BUF_MAX) {
+ ath12k_warn(ab, "Invalid module id %d\n",
+ le32_to_cpu(dma_caps[i].module_id));
+ ret = -EINVAL;
+ goto free_dir_buff;
+ }
+
+ dir_buff_caps[i].id = le32_to_cpu(dma_caps[i].module_id);
+ dir_buff_caps[i].pdev_id =
+ DP_HW2SW_MACID(le32_to_cpu(dma_caps[i].pdev_id));
+ dir_buff_caps[i].min_elem = le32_to_cpu(dma_caps[i].min_elem);
+ dir_buff_caps[i].min_buf_sz = le32_to_cpu(dma_caps[i].min_buf_sz);
+ dir_buff_caps[i].min_buf_align = le32_to_cpu(dma_caps[i].min_buf_align);
+ }
+
+ return 0;
+
+free_dir_buff:
+ ath12k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath12k_wmi_svc_rdy_ext_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_pdev *wmi_handle = &ab->wmi_ab.wmi[0];
+ struct ath12k_wmi_svc_rdy_ext_parse *svc_rdy_ext = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EXT_EVENT:
+ ret = ath12k_pull_svc_ready_ext(wmi_handle, ptr,
+ &svc_rdy_ext->arg);
+ if (ret) {
+ ath12k_warn(ab, "unable to extract ext params\n");
+ return ret;
+ }
+ break;
+
+ case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
+ svc_rdy_ext->hw_caps = ptr;
+ svc_rdy_ext->arg.num_hw_modes =
+ le32_to_cpu(svc_rdy_ext->hw_caps->num_hw_modes);
+ break;
+
+ case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
+ ret = ath12k_wmi_ext_soc_hal_reg_caps_parse(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+ break;
+
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!svc_rdy_ext->hw_mode_done) {
+ ret = ath12k_wmi_hw_mode_caps(ab, len, ptr, svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->hw_mode_done = true;
+ } else if (!svc_rdy_ext->mac_phy_done) {
+ svc_rdy_ext->n_mac_phy_caps = 0;
+ ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+ ath12k_wmi_mac_phy_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ svc_rdy_ext->mac_phy_done = true;
+ } else if (!svc_rdy_ext->ext_hal_reg_done) {
+ ret = ath12k_wmi_ext_hal_reg_caps(ab, len, ptr, svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->ext_hal_reg_done = true;
+ } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
+ svc_rdy_ext->mac_phy_chainmask_combo_done = true;
+ } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
+ svc_rdy_ext->mac_phy_chainmask_cap_done = true;
+ } else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
+ svc_rdy_ext->oem_dma_ring_cap_done = true;
+ } else if (!svc_rdy_ext->dma_ring_cap_done) {
+ ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
+ &svc_rdy_ext->dma_caps_parse);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->dma_ring_cap_done = true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath12k_service_ready_ext_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_wmi_svc_rdy_ext_parse svc_rdy_ext = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_svc_rdy_ext_parse,
+ &svc_rdy_ext);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse tlv %d\n", ret);
+ goto err;
+ }
+
+ if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
+ complete(&ab->wmi_ab.service_ready);
+
+ kfree(svc_rdy_ext.mac_phy_caps);
+ return 0;
+
+err:
+ ath12k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath12k_wmi_svc_rdy_ext2_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_svc_rdy_ext2_parse *parse = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!parse->dma_ring_cap_done) {
+ ret = ath12k_wmi_dma_ring_caps(ab, len, ptr,
+ &parse->dma_caps_parse);
+ if (ret)
+ return ret;
+
+ parse->dma_ring_cap_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath12k_service_ready_ext2_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_wmi_svc_rdy_ext2_parse svc_rdy_ext2 = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_svc_rdy_ext2_parse,
+ &svc_rdy_ext2);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
+ goto err;
+ }
+
+ complete(&ab->wmi_ab.service_ready);
+
+ return 0;
+
+err:
+ ath12k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath12k_pull_vdev_start_resp_tlv(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_start_resp_event *vdev_rsp)
+{
+ const void **tb;
+ const struct wmi_vdev_start_resp_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch vdev start resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_rsp = *ev;
+
+ kfree(tb);
+ return 0;
+}
+
+static struct ath12k_reg_rule
+*create_ext_reg_rules_from_wmi(u32 num_reg_rules,
+ struct ath12k_wmi_reg_rule_ext_params *wmi_reg_rule)
+{
+ struct ath12k_reg_rule *reg_rule_ptr;
+ u32 count;
+
+ reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
+ GFP_ATOMIC);
+
+ if (!reg_rule_ptr)
+ return NULL;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ reg_rule_ptr[count].start_freq =
+ le32_get_bits(wmi_reg_rule[count].freq_info,
+ REG_RULE_START_FREQ);
+ reg_rule_ptr[count].end_freq =
+ le32_get_bits(wmi_reg_rule[count].freq_info,
+ REG_RULE_END_FREQ);
+ reg_rule_ptr[count].max_bw =
+ le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_MAX_BW);
+ reg_rule_ptr[count].reg_power =
+ le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_REG_PWR);
+ reg_rule_ptr[count].ant_gain =
+ le32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_ANT_GAIN);
+ reg_rule_ptr[count].flags =
+ le32_get_bits(wmi_reg_rule[count].flag_info,
+ REG_RULE_FLAGS);
+ reg_rule_ptr[count].psd_flag =
+ le32_get_bits(wmi_reg_rule[count].psd_power_info,
+ REG_RULE_PSD_INFO);
+ reg_rule_ptr[count].psd_eirp =
+ le32_get_bits(wmi_reg_rule[count].psd_power_info,
+ REG_RULE_PSD_EIRP);
+ }
+
+ return reg_rule_ptr;
+}
+
+static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ struct ath12k_reg_info *reg_info)
+{
+ const void **tb;
+ const struct wmi_reg_chan_list_cc_ext_event *ev;
+ struct ath12k_wmi_reg_rule_ext_params *ext_wmi_reg_rule;
+ u32 num_2g_reg_rules, num_5g_reg_rules;
+ u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 total_reg_rules = 0;
+ int ret, i, j;
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "processing regulatory ext channel list\n");
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch reg chan list ext update ev\n");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ reg_info->num_2g_reg_rules = le32_to_cpu(ev->num_2g_reg_rules);
+ reg_info->num_5g_reg_rules = le32_to_cpu(ev->num_5g_reg_rules);
+ reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] =
+ le32_to_cpu(ev->num_6g_reg_rules_ap_lpi);
+ reg_info->num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP] =
+ le32_to_cpu(ev->num_6g_reg_rules_ap_sp);
+ reg_info->num_6g_reg_rules_ap[WMI_REG_VLP_AP] =
+ le32_to_cpu(ev->num_6g_reg_rules_ap_vlp);
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
+ le32_to_cpu(ev->num_6g_reg_rules_cl_lpi[i]);
+ reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
+ le32_to_cpu(ev->num_6g_reg_rules_cl_sp[i]);
+ reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
+ le32_to_cpu(ev->num_6g_reg_rules_cl_vlp[i]);
+ }
+
+ num_2g_reg_rules = reg_info->num_2g_reg_rules;
+ total_reg_rules += num_2g_reg_rules;
+ num_5g_reg_rules = reg_info->num_5g_reg_rules;
+ total_reg_rules += num_5g_reg_rules;
+
+ if (num_2g_reg_rules > MAX_REG_RULES || num_5g_reg_rules > MAX_REG_RULES) {
+ ath12k_warn(ab, "Num reg rules for 2G/5G exceeds max limit (num_2g_reg_rules: %d num_5g_reg_rules: %d max_rules: %d)\n",
+ num_2g_reg_rules, num_5g_reg_rules, MAX_REG_RULES);
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
+
+ if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
+ ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
+ i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ total_reg_rules += num_6g_reg_rules_ap[i];
+ }
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] =
+ reg_info->num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
+ total_reg_rules += num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i];
+
+ num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] =
+ reg_info->num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
+ total_reg_rules += num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i];
+
+ num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] =
+ reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
+ total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
+
+ if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
+ num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
+ num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] > MAX_6G_REG_RULES) {
+ ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
+ i);
+ kfree(tb);
+ return -EINVAL;
+ }
+ }
+
+ if (!total_reg_rules) {
+ ath12k_warn(ab, "No reg rules available\n");
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
+
+ /* FIXME: Currently FW includes 6G reg rule also in 5G rule
+ * list for country US.
+ * Having same 6G reg rule in 5G and 6G rules list causes
+ * intersect check to be true, and same rules will be shown
+ * multiple times in iw cmd. So added hack below to avoid
+ * parsing 6G rule from 5G reg rule list, and this can be
+ * removed later, after FW updates to remove 6G reg rule
+ * from 5G rules list.
+ */
+ if (memcmp(reg_info->alpha2, "US", 2) == 0) {
+ reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
+ num_5g_reg_rules = reg_info->num_5g_reg_rules;
+ }
+
+ reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
+ reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
+ reg_info->num_phy = le32_to_cpu(ev->num_phy);
+ reg_info->phy_id = le32_to_cpu(ev->phy_id);
+ reg_info->ctry_code = le32_to_cpu(ev->country_id);
+ reg_info->reg_dmn_pair = le32_to_cpu(ev->domain_code);
+
+ switch (le32_to_cpu(ev->status_code)) {
+ case WMI_REG_SET_CC_STATUS_PASS:
+ reg_info->status_code = REG_SET_CC_STATUS_PASS;
+ break;
+ case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
+ reg_info->status_code = REG_CURRENT_ALPHA2_NOT_FOUND;
+ break;
+ case WMI_REG_INIT_ALPHA2_NOT_FOUND:
+ reg_info->status_code = REG_INIT_ALPHA2_NOT_FOUND;
+ break;
+ case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
+ reg_info->status_code = REG_SET_CC_CHANGE_NOT_ALLOWED;
+ break;
+ case WMI_REG_SET_CC_STATUS_NO_MEMORY:
+ reg_info->status_code = REG_SET_CC_STATUS_NO_MEMORY;
+ break;
+ case WMI_REG_SET_CC_STATUS_FAIL:
+ reg_info->status_code = REG_SET_CC_STATUS_FAIL;
+ break;
+ }
+
+ reg_info->is_ext_reg_event = true;
+
+ reg_info->min_bw_2g = le32_to_cpu(ev->min_bw_2g);
+ reg_info->max_bw_2g = le32_to_cpu(ev->max_bw_2g);
+ reg_info->min_bw_5g = le32_to_cpu(ev->min_bw_5g);
+ reg_info->max_bw_5g = le32_to_cpu(ev->max_bw_5g);
+ reg_info->min_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->min_bw_6g_ap_lpi);
+ reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP] = le32_to_cpu(ev->max_bw_6g_ap_lpi);
+ reg_info->min_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->min_bw_6g_ap_sp);
+ reg_info->max_bw_6g_ap[WMI_REG_STD_POWER_AP] = le32_to_cpu(ev->max_bw_6g_ap_sp);
+ reg_info->min_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->min_bw_6g_ap_vlp);
+ reg_info->max_bw_6g_ap[WMI_REG_VLP_AP] = le32_to_cpu(ev->max_bw_6g_ap_vlp);
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->min_bw_6g_client[WMI_REG_INDOOR_AP][i] =
+ le32_to_cpu(ev->min_bw_6g_client_lpi[i]);
+ reg_info->max_bw_6g_client[WMI_REG_INDOOR_AP][i] =
+ le32_to_cpu(ev->max_bw_6g_client_lpi[i]);
+ reg_info->min_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
+ le32_to_cpu(ev->min_bw_6g_client_sp[i]);
+ reg_info->max_bw_6g_client[WMI_REG_STD_POWER_AP][i] =
+ le32_to_cpu(ev->max_bw_6g_client_sp[i]);
+ reg_info->min_bw_6g_client[WMI_REG_VLP_AP][i] =
+ le32_to_cpu(ev->min_bw_6g_client_vlp[i]);
+ reg_info->max_bw_6g_client[WMI_REG_VLP_AP][i] =
+ le32_to_cpu(ev->max_bw_6g_client_vlp[i]);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "%s:cc_ext %s dsf %d BW: min_2g %d max_2g %d min_5g %d max_5g %d",
+ __func__, reg_info->alpha2, reg_info->dfs_region,
+ reg_info->min_bw_2g, reg_info->max_bw_2g,
+ reg_info->min_bw_5g, reg_info->max_bw_5g);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "num_2g_reg_rules %d num_5g_reg_rules %d",
+ num_2g_reg_rules, num_5g_reg_rules);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "num_6g_reg_rules_ap_lpi: %d num_6g_reg_rules_ap_sp: %d num_6g_reg_rules_ap_vlp: %d",
+ num_6g_reg_rules_ap[WMI_REG_INDOOR_AP],
+ num_6g_reg_rules_ap[WMI_REG_STD_POWER_AP],
+ num_6g_reg_rules_ap[WMI_REG_VLP_AP]);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "6g Regular client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
+ num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_DEFAULT_CLIENT],
+ num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_DEFAULT_CLIENT],
+ num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_DEFAULT_CLIENT]);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "6g Subordinate client: num_6g_reg_rules_lpi: %d num_6g_reg_rules_sp: %d num_6g_reg_rules_vlp: %d",
+ num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][WMI_REG_SUBORDINATE_CLIENT],
+ num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][WMI_REG_SUBORDINATE_CLIENT],
+ num_6g_reg_rules_cl[WMI_REG_VLP_AP][WMI_REG_SUBORDINATE_CLIENT]);
+
+ ext_wmi_reg_rule =
+ (struct ath12k_wmi_reg_rule_ext_params *)((u8 *)ev
+ + sizeof(*ev)
+ + sizeof(struct wmi_tlv));
+
+ if (num_2g_reg_rules) {
+ reg_info->reg_rules_2g_ptr =
+ create_ext_reg_rules_from_wmi(num_2g_reg_rules,
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_2g_ptr) {
+ kfree(tb);
+ ath12k_warn(ab, "Unable to Allocate memory for 2g rules\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (num_5g_reg_rules) {
+ ext_wmi_reg_rule += num_2g_reg_rules;
+ reg_info->reg_rules_5g_ptr =
+ create_ext_reg_rules_from_wmi(num_5g_reg_rules,
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_5g_ptr) {
+ kfree(tb);
+ ath12k_warn(ab, "Unable to Allocate memory for 5g rules\n");
+ return -ENOMEM;
+ }
+ }
+
+ ext_wmi_reg_rule += num_5g_reg_rules;
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ reg_info->reg_rules_6g_ap_ptr[i] =
+ create_ext_reg_rules_from_wmi(num_6g_reg_rules_ap[i],
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_6g_ap_ptr[i]) {
+ kfree(tb);
+ ath12k_warn(ab, "Unable to Allocate memory for 6g ap rules\n");
+ return -ENOMEM;
+ }
+
+ ext_wmi_reg_rule += num_6g_reg_rules_ap[i];
+ }
+
+ for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->reg_rules_6g_client_ptr[j][i] =
+ create_ext_reg_rules_from_wmi(num_6g_reg_rules_cl[j][i],
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_6g_client_ptr[j][i]) {
+ kfree(tb);
+ ath12k_warn(ab, "Unable to Allocate memory for 6g client rules\n");
+ return -ENOMEM;
+ }
+
+ ext_wmi_reg_rule += num_6g_reg_rules_cl[j][i];
+ }
+ }
+
+ reg_info->client_type = le32_to_cpu(ev->client_type);
+ reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
+ reg_info->unspecified_ap_usable = ev->unspecified_ap_usable;
+ reg_info->domain_code_6g_ap[WMI_REG_INDOOR_AP] =
+ le32_to_cpu(ev->domain_code_6g_ap_lpi);
+ reg_info->domain_code_6g_ap[WMI_REG_STD_POWER_AP] =
+ le32_to_cpu(ev->domain_code_6g_ap_sp);
+ reg_info->domain_code_6g_ap[WMI_REG_VLP_AP] =
+ le32_to_cpu(ev->domain_code_6g_ap_vlp);
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->domain_code_6g_client[WMI_REG_INDOOR_AP][i] =
+ le32_to_cpu(ev->domain_code_6g_client_lpi[i]);
+ reg_info->domain_code_6g_client[WMI_REG_STD_POWER_AP][i] =
+ le32_to_cpu(ev->domain_code_6g_client_sp[i]);
+ reg_info->domain_code_6g_client[WMI_REG_VLP_AP][i] =
+ le32_to_cpu(ev->domain_code_6g_client_vlp[i]);
+ }
+
+ reg_info->domain_code_6g_super_id = le32_to_cpu(ev->domain_code_6g_super_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "6g client_type: %d domain_code_6g_super_id: %d",
+ reg_info->client_type, reg_info->domain_code_6g_super_id);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "processed regulatory ext channel list\n");
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_peer_del_resp_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_delete_resp_event *peer_del_resp)
+{
+ const void **tb;
+ const struct wmi_peer_delete_resp_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch peer delete resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(peer_del_resp, 0, sizeof(*peer_del_resp));
+
+ peer_del_resp->vdev_id = ev->vdev_id;
+ ether_addr_copy(peer_del_resp->peer_macaddr.addr,
+ ev->peer_macaddr.addr);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_vdev_del_resp_ev(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id)
+{
+ const void **tb;
+ const struct wmi_vdev_delete_resp_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch vdev delete resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = le32_to_cpu(ev->vdev_id);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_bcn_tx_status_ev(struct ath12k_base *ab, void *evt_buf,
+ u32 len, u32 *vdev_id,
+ u32 *tx_status)
+{
+ const void **tb;
+ const struct wmi_bcn_tx_status_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch bcn tx status ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = le32_to_cpu(ev->vdev_id);
+ *tx_status = le32_to_cpu(ev->tx_status);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_vdev_stopped_param_tlv(struct ath12k_base *ab, struct sk_buff *skb,
+ u32 *vdev_id)
+{
+ const void **tb;
+ const struct wmi_vdev_stopped_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch vdev stop ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = le32_to_cpu(ev->vdev_id);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_wmi_tlv_mgmt_rx_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_mgmt_rx_parse *parse = data;
+
+ switch (tag) {
+ case WMI_TAG_MGMT_RX_HDR:
+ parse->fixed = ptr;
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ if (!parse->frame_buf_done) {
+ parse->frame_buf = ptr;
+ parse->frame_buf_done = true;
+ }
+ break;
+ }
+ return 0;
+}
+
+static int ath12k_pull_mgmt_rx_params_tlv(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ struct ath12k_wmi_mgmt_rx_arg *hdr)
+{
+ struct wmi_tlv_mgmt_rx_parse parse = { };
+ const struct ath12k_wmi_mgmt_rx_params *ev;
+ const u8 *frame;
+ int i, ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_tlv_mgmt_rx_parse,
+ &parse);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse mgmt rx tlv %d\n", ret);
+ return ret;
+ }
+
+ ev = parse.fixed;
+ frame = parse.frame_buf;
+
+ if (!ev || !frame) {
+ ath12k_warn(ab, "failed to fetch mgmt rx hdr");
+ return -EPROTO;
+ }
+
+ hdr->pdev_id = le32_to_cpu(ev->pdev_id);
+ hdr->chan_freq = le32_to_cpu(ev->chan_freq);
+ hdr->channel = le32_to_cpu(ev->channel);
+ hdr->snr = le32_to_cpu(ev->snr);
+ hdr->rate = le32_to_cpu(ev->rate);
+ hdr->phy_mode = le32_to_cpu(ev->phy_mode);
+ hdr->buf_len = le32_to_cpu(ev->buf_len);
+ hdr->status = le32_to_cpu(ev->status);
+ hdr->flags = le32_to_cpu(ev->flags);
+ hdr->rssi = a_sle32_to_cpu(ev->rssi);
+ hdr->tsf_delta = le32_to_cpu(ev->tsf_delta);
+
+ for (i = 0; i < ATH_MAX_ANTENNA; i++)
+ hdr->rssi_ctl[i] = le32_to_cpu(ev->rssi_ctl[i]);
+
+ if (skb->len < (frame - skb->data) + hdr->buf_len) {
+ ath12k_warn(ab, "invalid length in mgmt rx hdr ev");
+ return -EPROTO;
+ }
+
+ /* shift the sk_buff to point to `frame` */
+ skb_trim(skb, 0);
+ skb_put(skb, frame - skb->data);
+ skb_pull(skb, frame - skb->data);
+ skb_put(skb, hdr->buf_len);
+
+ return 0;
+}
+
+static int wmi_process_mgmt_tx_comp(struct ath12k *ar, u32 desc_id,
+ u32 status)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath12k_skb_cb *skb_cb;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ msdu = idr_find(&ar->txmgmt_idr, desc_id);
+
+ if (!msdu) {
+ ath12k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
+ desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ return -ENOENT;
+ }
+
+ idr_remove(&ar->txmgmt_idr, desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ skb_cb = ATH12K_SKB_CB(msdu);
+ dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ /* WARN when we received this event without doing any mgmt tx */
+ if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
+ WARN_ON_ONCE(1);
+
+ return 0;
+}
+
+static int ath12k_pull_mgmt_tx_compl_param_tlv(struct ath12k_base *ab,
+ struct sk_buff *skb,
+ struct wmi_mgmt_tx_compl_event *param)
+{
+ const void **tb;
+ const struct wmi_mgmt_tx_compl_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch mgmt tx compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ param->pdev_id = ev->pdev_id;
+ param->desc_id = ev->desc_id;
+ param->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath12k_wmi_event_scan_started(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ath12k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
+ ath12k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_STARTING:
+ ar->scan.state = ATH12K_SCAN_RUNNING;
+ complete(&ar->scan.started);
+ break;
+ }
+}
+
+static void ath12k_wmi_event_scan_start_failed(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ath12k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
+ ath12k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_STARTING:
+ complete(&ar->scan.started);
+ __ath12k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath12k_wmi_event_scan_completed(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_STARTING:
+ /* One suspected reason scan can be completed while starting is
+ * if firmware fails to deliver all scan events to the host,
+ * e.g. when transport pipe is full. This has been observed
+ * with spectral scan phyerr events starving wmi transport
+ * pipe. In such case the "scan completed" event should be (and
+ * is) ignored by the host as it may be just firmware's scan
+ * state machine recovering.
+ */
+ ath12k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
+ ath12k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ __ath12k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath12k_wmi_event_scan_bss_chan(struct ath12k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_STARTING:
+ ath12k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
+ ath12k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ar->scan_channel = NULL;
+ break;
+ }
+}
+
+static void ath12k_wmi_event_scan_foreign_chan(struct ath12k *ar, u32 freq)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_STARTING:
+ ath12k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
+ ath12k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ break;
+ }
+}
+
+static const char *
+ath12k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+ enum wmi_scan_completion_reason reason)
+{
+ switch (type) {
+ case WMI_SCAN_EVENT_STARTED:
+ return "started";
+ case WMI_SCAN_EVENT_COMPLETED:
+ switch (reason) {
+ case WMI_SCAN_REASON_COMPLETED:
+ return "completed";
+ case WMI_SCAN_REASON_CANCELLED:
+ return "completed [cancelled]";
+ case WMI_SCAN_REASON_PREEMPTED:
+ return "completed [preempted]";
+ case WMI_SCAN_REASON_TIMEDOUT:
+ return "completed [timedout]";
+ case WMI_SCAN_REASON_INTERNAL_FAILURE:
+ return "completed [internal err]";
+ case WMI_SCAN_REASON_MAX:
+ break;
+ }
+ return "completed [unknown]";
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ return "bss channel";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ return "foreign channel";
+ case WMI_SCAN_EVENT_DEQUEUED:
+ return "dequeued";
+ case WMI_SCAN_EVENT_PREEMPTED:
+ return "preempted";
+ case WMI_SCAN_EVENT_START_FAILED:
+ return "start failed";
+ case WMI_SCAN_EVENT_RESTARTED:
+ return "restarted";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ return "foreign channel exit";
+ default:
+ return "unknown";
+ }
+}
+
+static int ath12k_pull_scan_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_scan_event *scan_evt_param)
+{
+ const void **tb;
+ const struct wmi_scan_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_SCAN_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch scan ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ scan_evt_param->event_type = ev->event_type;
+ scan_evt_param->reason = ev->reason;
+ scan_evt_param->channel_freq = ev->channel_freq;
+ scan_evt_param->scan_req_id = ev->scan_req_id;
+ scan_evt_param->scan_id = ev->scan_id;
+ scan_evt_param->vdev_id = ev->vdev_id;
+ scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_peer_sta_kickout_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_sta_kickout_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_sta_kickout_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch peer sta kickout ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_roam_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_roam_event *roam_ev)
+{
+ const void **tb;
+ const struct wmi_roam_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_ROAM_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch roam ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ roam_ev->vdev_id = ev->vdev_id;
+ roam_ev->reason = ev->reason;
+ roam_ev->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int freq_to_idx(struct ath12k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static int ath12k_pull_chan_info_ev(struct ath12k_base *ab, u8 *evt_buf,
+ u32 len, struct wmi_chan_info_event *ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_chan_info_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ ch_info_ev->err_code = ev->err_code;
+ ch_info_ev->freq = ev->freq;
+ ch_info_ev->cmd_flags = ev->cmd_flags;
+ ch_info_ev->noise_floor = ev->noise_floor;
+ ch_info_ev->rx_clear_count = ev->rx_clear_count;
+ ch_info_ev->cycle_count = ev->cycle_count;
+ ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+ ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+ ch_info_ev->rx_frame_count = ev->rx_frame_count;
+ ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
+ ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
+ ch_info_ev->vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath12k_pull_pdev_bss_chan_info_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_pdev_bss_chan_info_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch pdev bss chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ bss_ch_info_ev->pdev_id = ev->pdev_id;
+ bss_ch_info_ev->freq = ev->freq;
+ bss_ch_info_ev->noise_floor = ev->noise_floor;
+ bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
+ bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
+ bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
+ bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
+ bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
+ bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
+ bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
+ bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
+ bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
+ bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath12k_pull_vdev_install_key_compl_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_install_key_complete_arg *arg)
+{
+ const void **tb;
+ const struct wmi_vdev_install_key_compl_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch vdev install key compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = le32_to_cpu(ev->vdev_id);
+ arg->macaddr = ev->peer_macaddr.addr;
+ arg->key_idx = le32_to_cpu(ev->key_idx);
+ arg->key_flags = le32_to_cpu(ev->key_flags);
+ arg->status = le32_to_cpu(ev->status);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath12k_pull_peer_assoc_conf_ev(struct ath12k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
+{
+ const void **tb;
+ const struct wmi_peer_assoc_conf_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch peer assoc conf ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ peer_assoc_conf->vdev_id = le32_to_cpu(ev->vdev_id);
+ peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, u8 *evt_buf,
+ u32 len, const struct wmi_pdev_temperature_event *ev)
+{
+ const void **tb;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch pdev temp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath12k_wmi_op_ep_tx_credits(struct ath12k_base *ab)
+{
+ /* try to send pending beacons first. they take priority */
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+}
+
+static void ath12k_wmi_htc_tx_complete(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
+}
+
+static bool ath12k_reg_is_world_alpha(char *alpha)
+{
+ return alpha[0] == '0' && alpha[1] == '0';
+}
+
+static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k_reg_info *reg_info = NULL;
+ struct ieee80211_regdomain *regd = NULL;
+ bool intersect = false;
+ int ret = 0, pdev_idx, i, j;
+ struct ath12k *ar;
+
+ reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
+ if (!reg_info) {
+ ret = -ENOMEM;
+ goto fallback;
+ }
+
+ ret = ath12k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
+
+ if (ret) {
+ ath12k_warn(ab, "failed to extract regulatory info from received event\n");
+ goto fallback;
+ }
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath12k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ goto mem_free;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback.
+ */
+ if (ab->hw_params->single_pdev_only &&
+ pdev_idx < ab->hw_params->num_rxmda_per_pdev)
+ goto mem_free;
+ else
+ goto fallback;
+ }
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp(ab->default_regd[pdev_idx]->alpha2,
+ reg_info->alpha2, 2))
+ goto mem_free;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath12k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ regd = ath12k_reg_build_regd(ab, reg_info, intersect);
+ if (!regd) {
+ ath12k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ spin_lock(&ab->base_lock);
+ if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+ /* Once mac is registered, ar is valid and all CC events from
+ * fw is considered to be received due to user requests
+ * currently.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ ieee80211_queue_work(ar->hw, &ar->regd_update_work);
+ } else {
+ /* Multiple events for the same *ar is not expected. But we
+ * can still clear any previously stored default_regd if we
+ * are receiving this event for the same radio by mistake.
+ * NULL pointer handling will be taken care by kfree itself.
+ */
+ kfree(ab->default_regd[pdev_idx]);
+ /* This regd would be applied during mac registration */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock(&ab->base_lock);
+
+ goto mem_free;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+mem_free:
+ if (reg_info) {
+ kfree(reg_info->reg_rules_2g_ptr);
+ kfree(reg_info->reg_rules_5g_ptr);
+ if (reg_info->is_ext_reg_event) {
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
+ kfree(reg_info->reg_rules_6g_ap_ptr[i]);
+
+ for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
+ kfree(reg_info->reg_rules_6g_client_ptr[j][i]);
+ }
+ kfree(reg_info);
+ }
+ return ret;
+}
+
+static int ath12k_wmi_rdy_parse(struct ath12k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath12k_wmi_rdy_parse *rdy_parse = data;
+ struct wmi_ready_event fixed_param;
+ struct ath12k_wmi_mac_addr_params *addr_list;
+ struct ath12k_pdev *pdev;
+ u32 num_mac_addr;
+ int i;
+
+ switch (tag) {
+ case WMI_TAG_READY_EVENT:
+ memset(&fixed_param, 0, sizeof(fixed_param));
+ memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
+ min_t(u16, sizeof(fixed_param), len));
+ ab->wlan_init_status = le32_to_cpu(fixed_param.ready_event_min.status);
+ rdy_parse->num_extra_mac_addr =
+ le32_to_cpu(fixed_param.ready_event_min.num_extra_mac_addr);
+
+ ether_addr_copy(ab->mac_addr,
+ fixed_param.ready_event_min.mac_addr.addr);
+ ab->pktlog_defs_checksum = le32_to_cpu(fixed_param.pktlog_defs_checksum);
+ ab->wmi_ready = true;
+ break;
+ case WMI_TAG_ARRAY_FIXED_STRUCT:
+ addr_list = (struct ath12k_wmi_mac_addr_params *)ptr;
+ num_mac_addr = rdy_parse->num_extra_mac_addr;
+
+ if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
+ break;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
+ }
+ ab->pdevs_macaddr_valid = true;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath12k_ready_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k_wmi_rdy_parse rdy_parse = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_rdy_parse, &rdy_parse);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ complete(&ab->wmi_ab.unified_ready);
+ return 0;
+}
+
+static void ath12k_peer_delete_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_delete_resp_event peer_del_resp;
+ struct ath12k *ar;
+
+ if (ath12k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
+ ath12k_warn(ab, "failed to extract peer delete resp");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(peer_del_resp.vdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in peer delete resp ev %d",
+ peer_del_resp.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->peer_delete_done);
+ rcu_read_unlock();
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
+ peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
+}
+
+static void ath12k_vdev_delete_resp_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k *ar;
+ u32 vdev_id = 0;
+
+ if (ath12k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
+ ath12k_warn(ab, "failed to extract vdev delete resp");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->vdev_delete_done);
+
+ rcu_read_unlock();
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev delete resp for vdev id %d\n",
+ vdev_id);
+}
+
+static const char *ath12k_wmi_vdev_resp_print(u32 vdev_resp_status)
+{
+ switch (vdev_resp_status) {
+ case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
+ return "invalid vdev id";
+ case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
+ return "not supported";
+ case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
+ return "dfs violation";
+ case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
+ return "invalid regdomain";
+ default:
+ return "unknown";
+ }
+}
+
+static void ath12k_vdev_start_resp_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_vdev_start_resp_event vdev_start_resp;
+ struct ath12k *ar;
+ u32 status;
+
+ if (ath12k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
+ ath12k_warn(ab, "failed to extract vdev start resp");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(vdev_start_resp.vdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in vdev start resp ev %d",
+ vdev_start_resp.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->last_wmi_vdev_start_status = 0;
+
+ status = le32_to_cpu(vdev_start_resp.status);
+
+ if (WARN_ON_ONCE(status)) {
+ ath12k_warn(ab, "vdev start resp error status %d (%s)\n",
+ status, ath12k_wmi_vdev_resp_print(status));
+ ar->last_wmi_vdev_start_status = status;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev start resp for vdev id %d",
+ vdev_start_resp.vdev_id);
+}
+
+static void ath12k_bcn_tx_status_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ u32 vdev_id, tx_status;
+
+ if (ath12k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
+ &vdev_id, &tx_status) != 0) {
+ ath12k_warn(ab, "failed to extract bcn tx status");
+ return;
+ }
+}
+
+static void ath12k_vdev_stopped_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k *ar;
+ u32 vdev_id = 0;
+
+ if (ath12k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
+ ath12k_warn(ab, "failed to extract vdev stopped event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in vdev stopped ev %d",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
+}
+
+static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k_wmi_mgmt_rx_arg rx_ev = {0};
+ struct ath12k *ar;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+ struct ieee80211_supported_band *sband;
+
+ if (ath12k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
+ ath12k_warn(ab, "failed to extract mgmt rx event");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ memset(status, 0, sizeof(*status));
+
+ ath12k_dbg(ab, ATH12K_DBG_MGMT, "mgmt rx event status %08x\n",
+ rx_ev.status);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
+
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
+ rx_ev.pdev_id);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
+ (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
+ WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
+ WMI_RX_STATUS_ERR_CRC))) {
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
+ status->band = NL80211_BAND_6GHZ;
+ } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
+ status->band = NL80211_BAND_2GHZ;
+ } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
+ status->band = NL80211_BAND_5GHZ;
+ } else {
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.phy_mode == MODE_11B &&
+ (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
+
+ sband = &ar->mac.sbands[status->band];
+
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+ status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
+ status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
+ /* In case of PMF, FW delivers decrypted frames with Protected Bit set
+ * including group privacy action frames.
+ */
+ if (ieee80211_has_protected(hdr->frame_control)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_robust_mgmt_frame(skb)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+ }
+
+ /* TODO: Pending handle beacon implementation
+ *if (ieee80211_is_beacon(hdr->frame_control))
+ * ath12k_mac_handle_beacon(ar, skb);
+ */
+
+ ath12k_dbg(ab, ATH12K_DBG_MGMT,
+ "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ skb, skb->len,
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+ ath12k_dbg(ab, ATH12K_DBG_MGMT,
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+ status->freq, status->band, status->signal,
+ status->rate_idx);
+
+ ieee80211_rx_ni(ar->hw, skb);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath12k_mgmt_tx_compl_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
+ struct ath12k *ar;
+
+ if (ath12k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
+ ath12k_warn(ab, "failed to extract mgmt tx compl event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(tx_compl_param.pdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
+ tx_compl_param.pdev_id);
+ goto exit;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, le32_to_cpu(tx_compl_param.desc_id),
+ le32_to_cpu(tx_compl_param.status));
+
+ ath12k_dbg(ab, ATH12K_DBG_MGMT,
+ "mgmt tx compl ev pdev_id %d, desc_id %d, status %d",
+ tx_compl_param.pdev_id, tx_compl_param.desc_id,
+ tx_compl_param.status);
+
+exit:
+ rcu_read_unlock();
+}
+
+static struct ath12k *ath12k_get_ar_on_scan_abort(struct ath12k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath12k_pdev *pdev;
+ struct ath12k *ar;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ ar = pdev->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH12K_SCAN_ABORTING &&
+ ar->scan.vdev_id == vdev_id) {
+ spin_unlock_bh(&ar->data_lock);
+ return ar;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ }
+ return NULL;
+}
+
+static void ath12k_scan_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct ath12k *ar;
+ struct wmi_scan_event scan_ev = {0};
+
+ if (ath12k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
+ ath12k_warn(ab, "failed to extract scan event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ /* In case the scan was cancelled, ex. during interface teardown,
+ * the interface will not be found in active interfaces.
+ * Rather, in such scenarios, iterate over the active pdev's to
+ * search 'ar' if the corresponding 'ar' scan is ABORTING and the
+ * aborting scan's vdev id matches this event info.
+ */
+ if (le32_to_cpu(scan_ev.event_type) == WMI_SCAN_EVENT_COMPLETED &&
+ le32_to_cpu(scan_ev.reason) == WMI_SCAN_REASON_CANCELLED)
+ ar = ath12k_get_ar_on_scan_abort(ab, le32_to_cpu(scan_ev.vdev_id));
+ else
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(scan_ev.vdev_id));
+
+ if (!ar) {
+ ath12k_warn(ab, "Received scan event for unknown vdev");
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
+ ath12k_wmi_event_scan_type_str(le32_to_cpu(scan_ev.event_type),
+ le32_to_cpu(scan_ev.reason)),
+ le32_to_cpu(scan_ev.event_type),
+ le32_to_cpu(scan_ev.reason),
+ le32_to_cpu(scan_ev.channel_freq),
+ le32_to_cpu(scan_ev.scan_req_id),
+ le32_to_cpu(scan_ev.scan_id),
+ le32_to_cpu(scan_ev.vdev_id),
+ ath12k_scan_state_str(ar->scan.state), ar->scan.state);
+
+ switch (le32_to_cpu(scan_ev.event_type)) {
+ case WMI_SCAN_EVENT_STARTED:
+ ath12k_wmi_event_scan_started(ar);
+ break;
+ case WMI_SCAN_EVENT_COMPLETED:
+ ath12k_wmi_event_scan_completed(ar);
+ break;
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ ath12k_wmi_event_scan_bss_chan(ar);
+ break;
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ ath12k_wmi_event_scan_foreign_chan(ar, le32_to_cpu(scan_ev.channel_freq));
+ break;
+ case WMI_SCAN_EVENT_START_FAILED:
+ ath12k_warn(ab, "received scan start failure event\n");
+ ath12k_wmi_event_scan_start_failed(ar);
+ break;
+ case WMI_SCAN_EVENT_DEQUEUED:
+ case WMI_SCAN_EVENT_PREEMPTED:
+ case WMI_SCAN_EVENT_RESTARTED:
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_read_unlock();
+}
+
+static void ath12k_peer_sta_kickout_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_sta_kickout_arg arg = {};
+ struct ieee80211_sta *sta;
+ struct ath12k_peer *peer;
+ struct ath12k *ar;
+
+ if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
+ ath12k_warn(ab, "failed to extract peer sta kickout event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath12k_peer_find_by_addr(ab, arg.mac_addr);
+
+ if (!peer) {
+ ath12k_warn(ab, "peer not found %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
+ peer->vdev_id);
+ goto exit;
+ }
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arg.mac_addr, NULL);
+ if (!sta) {
+ ath12k_warn(ab, "Spurious quick kickout for STA %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
+ arg.mac_addr);
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_roam_event roam_ev = {};
+ struct ath12k *ar;
+
+ if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
+ ath12k_warn(ab, "failed to extract roam event");
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+ roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in roam ev %d",
+ roam_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX)
+ ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+
+ switch (le32_to_cpu(roam_ev.reason)) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ /* TODO: Pending beacon miss and connection_loss_work
+ * implementation
+ * ath12k_mac_handle_beacon_miss(ar, vdev_id);
+ */
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+ break;
+ }
+
+ rcu_read_unlock();
+}
+
+static void ath12k_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_chan_info_event ch_info_ev = {0};
+ struct ath12k *ar;
+ struct survey_info *survey;
+ int idx;
+ /* HW channel counters frequency value in hertz */
+ u32 cc_freq_hz = ab->cc_freq_hz;
+
+ if (ath12k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ ath12k_warn(ab, "failed to extract chan info event");
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
+ ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
+ ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
+ ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
+ ch_info_ev.mac_clk_mhz);
+
+ if (le32_to_cpu(ch_info_ev.cmd_flags) == WMI_CHAN_INFO_END_RESP) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "chan info report completed\n");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(ch_info_ev.vdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in chan info ev %d",
+ ch_info_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH12K_SCAN_IDLE:
+ case ATH12K_SCAN_STARTING:
+ ath12k_warn(ab, "received chan info event without a scan request, ignoring\n");
+ goto exit;
+ case ATH12K_SCAN_RUNNING:
+ case ATH12K_SCAN_ABORTING:
+ break;
+ }
+
+ idx = freq_to_idx(ar, le32_to_cpu(ch_info_ev.freq));
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath12k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ /* If FW provides MAC clock frequency in Mhz, overriding the initialized
+ * HW channel counters frequency value
+ */
+ if (ch_info_ev.mac_clk_mhz)
+ cc_freq_hz = (le32_to_cpu(ch_info_ev.mac_clk_mhz) * 1000);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
+ survey = &ar->survey[idx];
+ memset(survey, 0, sizeof(*survey));
+ survey->noise = le32_to_cpu(ch_info_ev.noise_floor);
+ survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+ survey->time = div_u64(le32_to_cpu(ch_info_ev.cycle_count), cc_freq_hz);
+ survey->time_busy = div_u64(le32_to_cpu(ch_info_ev.rx_clear_count),
+ cc_freq_hz);
+ }
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+static void
+ath12k_pdev_bss_chan_info_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
+ struct survey_info *survey;
+ struct ath12k *ar;
+ u32 cc_freq_hz = ab->cc_freq_hz;
+ u64 busy, total, tx, rx, rx_bss;
+ int idx;
+
+ if (ath12k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
+ ath12k_warn(ab, "failed to extract pdev bss chan info event");
+ return;
+ }
+
+ busy = (u64)(le32_to_cpu(bss_ch_info_ev.rx_clear_count_high)) << 32 |
+ le32_to_cpu(bss_ch_info_ev.rx_clear_count_low);
+
+ total = (u64)(le32_to_cpu(bss_ch_info_ev.cycle_count_high)) << 32 |
+ le32_to_cpu(bss_ch_info_ev.cycle_count_low);
+
+ tx = (u64)(le32_to_cpu(bss_ch_info_ev.tx_cycle_count_high)) << 32 |
+ le32_to_cpu(bss_ch_info_ev.tx_cycle_count_low);
+
+ rx = (u64)(le32_to_cpu(bss_ch_info_ev.rx_cycle_count_high)) << 32 |
+ le32_to_cpu(bss_ch_info_ev.rx_cycle_count_low);
+
+ rx_bss = (u64)(le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_high)) << 32 |
+ le32_to_cpu(bss_ch_info_ev.rx_bss_cycle_count_low);
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
+ bss_ch_info_ev.noise_floor, busy, total,
+ tx, rx, rx_bss);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(bss_ch_info_ev.pdev_id));
+
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
+ bss_ch_info_ev.pdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, le32_to_cpu(bss_ch_info_ev.freq));
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath12k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ bss_ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = le32_to_cpu(bss_ch_info_ev.noise_floor);
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+
+ rcu_read_unlock();
+}
+
+static void ath12k_vdev_install_key_compl_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
+ struct ath12k *ar;
+
+ if (ath12k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
+ ath12k_warn(ab, "failed to extract install key compl event");
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
+ install_key_compl.key_idx, install_key_compl.key_flags,
+ install_key_compl.macaddr, install_key_compl.status);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in install key compl ev %d",
+ install_key_compl.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->install_key_status = 0;
+
+ if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
+ ath12k_warn(ab, "install key failed for %pM status %d\n",
+ install_key_compl.macaddr, install_key_compl.status);
+ ar->install_key_status = install_key_compl.status;
+ }
+
+ complete(&ar->install_key_done);
+ rcu_read_unlock();
+}
+
+static void ath12k_service_available_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_service_available_event *ev;
+ int ret;
+ int i, j;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_SERVICE_AVAILABLE_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch svc available ev");
+ kfree(tb);
+ return;
+ }
+
+ /* TODO: Use wmi_service_segment_offset information to get the service
+ * especially when more services are advertised in multiple sevice
+ * available events.
+ */
+ for (i = 0, j = WMI_MAX_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
+ i++) {
+ do {
+ if (le32_to_cpu(ev->wmi_service_segment_bitmap[i]) &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "wmi_ext_service_bitmap 0:0x%x, 1:0x%x, 2:0x%x, 3:0x%x",
+ ev->wmi_service_segment_bitmap[0], ev->wmi_service_segment_bitmap[1],
+ ev->wmi_service_segment_bitmap[2], ev->wmi_service_segment_bitmap[3]);
+
+ kfree(tb);
+}
+
+static void ath12k_peer_assoc_conf_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
+ struct ath12k *ar;
+
+ if (ath12k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
+ ath12k_warn(ab, "failed to extract peer assoc conf event");
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "peer assoc conf ev vdev id %d macaddr %pM\n",
+ peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
+
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
+
+ if (!ar) {
+ ath12k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
+ peer_assoc_conf.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->peer_assoc_done);
+ rcu_read_unlock();
+}
+
+static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+}
+
+/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
+ * is not part of BDF CTL(Conformance test limits) table entries.
+ */
+static void ath12k_pdev_ctl_failsafe_check_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_ctl_failsafe_chk_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
+ kfree(tb);
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev ctl failsafe check ev status %d\n",
+ ev->ctl_failsafe_status);
+
+ /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
+ * to 10 dBm else the CTL power entry in the BDF would be picked up.
+ */
+ if (ev->ctl_failsafe_status != 0)
+ ath12k_warn(ab, "pdev ctl failsafe failure status %d",
+ ev->ctl_failsafe_status);
+
+ kfree(tb);
+}
+
+static void
+ath12k_wmi_process_csa_switch_count_event(struct ath12k_base *ab,
+ const struct ath12k_wmi_pdev_csa_event *ev,
+ const u32 *vdev_ids)
+{
+ int i;
+ struct ath12k_vif *arvif;
+
+ /* Finish CSA once the switch count becomes NULL */
+ if (ev->current_switch_count)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
+ arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
+
+ if (!arvif) {
+ ath12k_warn(ab, "Recvd csa status for unknown vdev %d",
+ vdev_ids[i]);
+ continue;
+ }
+
+ if (arvif->is_up && arvif->vif->bss_conf.csa_active)
+ ieee80211_csa_finish(arvif->vif);
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath12k_wmi_pdev_csa_switch_count_status_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct ath12k_wmi_pdev_csa_event *ev;
+ const u32 *vdev_ids;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
+ vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
+
+ if (!ev || !vdev_ids) {
+ ath12k_warn(ab, "failed to fetch pdev csa switch count ev");
+ kfree(tb);
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev csa switch count %d for pdev %d, num_vdevs %d",
+ ev->current_switch_count, ev->pdev_id,
+ ev->num_vdevs);
+
+ ath12k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
+
+ kfree(tb);
+}
+
+static void
+ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct ath12k_wmi_pdev_radar_event *ev;
+ struct ath12k *ar;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
+
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch pdev dfs radar detected ev");
+ kfree(tb);
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
+ ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev->pdev_id));
+
+ if (!ar) {
+ ath12k_warn(ab, "radar detected in invalid pdev %d\n",
+ ev->pdev_id);
+ goto exit;
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
+ ev->pdev_id);
+
+ if (ar->dfs_block_radar_events)
+ ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
+ else
+ ieee80211_radar_detected(ar->hw);
+
+exit:
+ kfree(tb);
+}
+
+static void
+ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k *ar;
+ struct wmi_pdev_temperature_event ev = {0};
+
+ if (ath12k_pull_pdev_temp_ev(ab, skb->data, skb->len, &ev) != 0) {
+ ath12k_warn(ab, "failed to extract pdev temperature event");
+ return;
+ }
+
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "pdev temperature ev temp %d pdev_id %d\n", ev.temp, ev.pdev_id);
+
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(ev.pdev_id));
+ if (!ar) {
+ ath12k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev.pdev_id);
+ return;
+ }
+}
+
+static void ath12k_fils_discovery_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_fils_discovery_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab,
+ "failed to parse FILS discovery event tlv %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch FILS discovery event\n");
+ kfree(tb);
+ return;
+ }
+
+ ath12k_warn(ab,
+ "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
+ ev->vdev_id, ev->fils_tt, ev->tbtt);
+
+ kfree(tb);
+}
+
+static void ath12k_probe_resp_tx_status_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_probe_resp_tx_status_event *ev;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab,
+ "failed to parse probe response transmission status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
+ if (!ev) {
+ ath12k_warn(ab,
+ "failed to fetch probe response transmission status event");
+ kfree(tb);
+ return;
+ }
+
+ if (ev->tx_status)
+ ath12k_warn(ab,
+ "Probe response transmission failed for vdev_id %u, status %u\n",
+ ev->vdev_id, ev->tx_status);
+
+ kfree(tb);
+}
+
+static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_tlv_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = le32_get_bits(cmd_hdr->cmd_id, WMI_CMD_HDR_CMD_ID);
+
+ if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
+ goto out;
+
+ switch (id) {
+ /* Process all the WMI events here */
+ case WMI_SERVICE_READY_EVENTID:
+ ath12k_service_ready_event(ab, skb);
+ break;
+ case WMI_SERVICE_READY_EXT_EVENTID:
+ ath12k_service_ready_ext_event(ab, skb);
+ break;
+ case WMI_SERVICE_READY_EXT2_EVENTID:
+ ath12k_service_ready_ext2_event(ab, skb);
+ break;
+ case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
+ ath12k_reg_chan_list_event(ab, skb);
+ break;
+ case WMI_READY_EVENTID:
+ ath12k_ready_event(ab, skb);
+ break;
+ case WMI_PEER_DELETE_RESP_EVENTID:
+ ath12k_peer_delete_resp_event(ab, skb);
+ break;
+ case WMI_VDEV_START_RESP_EVENTID:
+ ath12k_vdev_start_resp_event(ab, skb);
+ break;
+ case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
+ ath12k_bcn_tx_status_event(ab, skb);
+ break;
+ case WMI_VDEV_STOPPED_EVENTID:
+ ath12k_vdev_stopped_event(ab, skb);
+ break;
+ case WMI_MGMT_RX_EVENTID:
+ ath12k_mgmt_rx_event(ab, skb);
+ /* mgmt_rx_event() owns the skb now! */
+ return;
+ case WMI_MGMT_TX_COMPLETION_EVENTID:
+ ath12k_mgmt_tx_compl_event(ab, skb);
+ break;
+ case WMI_SCAN_EVENTID:
+ ath12k_scan_event(ab, skb);
+ break;
+ case WMI_PEER_STA_KICKOUT_EVENTID:
+ ath12k_peer_sta_kickout_event(ab, skb);
+ break;
+ case WMI_ROAM_EVENTID:
+ ath12k_roam_event(ab, skb);
+ break;
+ case WMI_CHAN_INFO_EVENTID:
+ ath12k_chan_info_event(ab, skb);
+ break;
+ case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath12k_pdev_bss_chan_info_event(ab, skb);
+ break;
+ case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath12k_vdev_install_key_compl_event(ab, skb);
+ break;
+ case WMI_SERVICE_AVAILABLE_EVENTID:
+ ath12k_service_available_event(ab, skb);
+ break;
+ case WMI_PEER_ASSOC_CONF_EVENTID:
+ ath12k_peer_assoc_conf_event(ab, skb);
+ break;
+ case WMI_UPDATE_STATS_EVENTID:
+ ath12k_update_stats_event(ab, skb);
+ break;
+ case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
+ ath12k_pdev_ctl_failsafe_check_event(ab, skb);
+ break;
+ case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
+ ath12k_wmi_pdev_csa_switch_count_status_event(ab, skb);
+ break;
+ case WMI_PDEV_TEMPERATURE_EVENTID:
+ ath12k_wmi_pdev_temperature_event(ab, skb);
+ break;
+ case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
+ ath12k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
+ break;
+ case WMI_HOST_FILS_DISCOVERY_EVENTID:
+ ath12k_fils_discovery_event(ab, skb);
+ break;
+ case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
+ ath12k_probe_resp_tx_status_event(ab, skb);
+ break;
+ /* add Unsupported events here */
+ case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
+ case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
+ case WMI_TWT_ENABLE_EVENTID:
+ case WMI_TWT_DISABLE_EVENTID:
+ case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "ignoring unsupported event 0x%x\n", id);
+ break;
+ case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
+ ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
+ break;
+ case WMI_VDEV_DELETE_RESP_EVENTID:
+ ath12k_vdev_delete_resp_event(ab, skb);
+ break;
+ /* TODO: Add remaining events */
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static int ath12k_connect_pdev_htc_service(struct ath12k_base *ab,
+ u32 pdev_idx)
+{
+ int status;
+ u32 svc_id[] = { ATH12K_HTC_SVC_ID_WMI_CONTROL,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH12K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
+ struct ath12k_htc_svc_conn_req conn_req = {};
+ struct ath12k_htc_svc_conn_resp conn_resp = {};
+
+ /* these fields are the same for all service endpoints */
+ conn_req.ep_ops.ep_tx_complete = ath12k_wmi_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath12k_wmi_op_rx;
+ conn_req.ep_ops.ep_tx_credits = ath12k_wmi_op_ep_tx_credits;
+
+ /* connect to control service */
+ conn_req.service_id = svc_id[pdev_idx];
+
+ status = ath12k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath12k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
+ status);
+ return status;
+ }
+
+ ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
+ ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
+ ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
+
+ return 0;
+}
+
+static int
+ath12k_wmi_send_unit_test_cmd(struct ath12k *ar,
+ struct wmi_unit_test_cmd ut_cmd,
+ u32 *test_args)
+{
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_unit_test_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 *ut_cmd_args;
+ int buf_len, arg_len;
+ int ret;
+ int i;
+
+ arg_len = sizeof(u32) * le32_to_cpu(ut_cmd.num_args);
+ buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
+
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_unit_test_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_UNIT_TEST_CMD,
+ sizeof(ut_cmd));
+
+ cmd->vdev_id = ut_cmd.vdev_id;
+ cmd->module_id = ut_cmd.module_id;
+ cmd->num_args = ut_cmd.num_args;
+ cmd->diag_token = ut_cmd.diag_token;
+
+ ptr = skb->data + sizeof(ut_cmd);
+
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
+
+ ptr += TLV_HDR_SIZE;
+
+ ut_cmd_args = ptr;
+ for (i = 0; i < le32_to_cpu(ut_cmd.num_args); i++)
+ ut_cmd_args[i] = test_args[i];
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "WMI unit test : module %d vdev %d n_args %d token %d\n",
+ cmd->module_id, cmd->vdev_id, cmd->num_args,
+ cmd->diag_token);
+
+ ret = ath12k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_simulate_radar(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+ u32 dfs_args[DFS_MAX_TEST_ARGS];
+ struct wmi_unit_test_cmd wmi_ut;
+ bool arvif_found = false;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arvif_found = true;
+ break;
+ }
+ }
+
+ if (!arvif_found)
+ return -EINVAL;
+
+ dfs_args[DFS_TEST_CMDID] = 0;
+ dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
+ /* Currently we could pass segment_id(b0 - b1), chirp(b2)
+ * freq offset (b3 - b10) to unit test. For simulation
+ * purpose this can be set to 0 which is valid.
+ */
+ dfs_args[DFS_TEST_RADAR_PARAM] = 0;
+
+ wmi_ut.vdev_id = cpu_to_le32(arvif->vdev_id);
+ wmi_ut.module_id = cpu_to_le32(DFS_UNIT_TEST_MODULE);
+ wmi_ut.num_args = cpu_to_le32(DFS_MAX_TEST_ARGS);
+ wmi_ut.diag_token = cpu_to_le32(DFS_UNIT_TEST_TOKEN);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Triggering Radar Simulation\n");
+
+ return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
+}
+
+int ath12k_wmi_connect(struct ath12k_base *ab)
+{
+ u32 i;
+ u8 wmi_ep_count;
+
+ wmi_ep_count = ab->htc.wmi_ep_count;
+ if (wmi_ep_count > ab->hw_params->max_radios)
+ return -1;
+
+ for (i = 0; i < wmi_ep_count; i++)
+ ath12k_connect_pdev_htc_service(ab, i);
+
+ return 0;
+}
+
+static void ath12k_wmi_pdev_detach(struct ath12k_base *ab, u8 pdev_id)
+{
+ if (WARN_ON(pdev_id >= MAX_RADIOS))
+ return;
+
+ /* TODO: Deinit any pdev specific wmi resource */
+}
+
+int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
+ u8 pdev_id)
+{
+ struct ath12k_wmi_pdev *wmi_handle;
+
+ if (pdev_id >= ab->hw_params->max_radios)
+ return -EINVAL;
+
+ wmi_handle = &ab->wmi_ab.wmi[pdev_id];
+
+ wmi_handle->wmi_ab = &ab->wmi_ab;
+
+ ab->wmi_ab.ab = ab;
+ /* TODO: Init remaining resource specific to pdev */
+
+ return 0;
+}
+
+int ath12k_wmi_attach(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_wmi_pdev_attach(ab, 0);
+ if (ret)
+ return ret;
+
+ ab->wmi_ab.ab = ab;
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
+
+ /* It's overwritten when service_ext_ready is handled */
+ if (ab->hw_params->single_pdev_only)
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
+
+ /* TODO: Init remaining wmi soc resources required */
+ init_completion(&ab->wmi_ab.service_ready);
+ init_completion(&ab->wmi_ab.unified_ready);
+
+ return 0;
+}
+
+void ath12k_wmi_detach(struct ath12k_base *ab)
+{
+ int i;
+
+ /* TODO: Deinit wmi resource specific to SOC as required */
+
+ for (i = 0; i < ab->htc.wmi_ep_count; i++)
+ ath12k_wmi_pdev_detach(ab, i);
+
+ ath12k_wmi_free_dbring_caps(ab);
+}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
new file mode 100644
index 000000000000..84e3fb918e43
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -0,0 +1,4803 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_WMI_H
+#define ATH12K_WMI_H
+
+#include <net/mac80211.h>
+#include "htc.h"
+
+/* Naming conventions for structures:
+ *
+ * _cmd means that this is a firmware command sent from host to firmware.
+ *
+ * _event means that this is a firmware event sent from firmware to host
+ *
+ * _params is a structure which is embedded either into _cmd or _event (or
+ * both), it is not sent individually.
+ *
+ * _arg is used inside the host, the firmware does not see that at all.
+ */
+
+struct ath12k_base;
+struct ath12k;
+
+/* There is no signed version of __le32, so for a temporary solution come
+ * up with our own version. The idea is from fs/ntfs/endian.h.
+ *
+ * Use a_ prefix so that it doesn't conflict if we get proper support to
+ * linux/types.h.
+ */
+typedef __s32 __bitwise a_sle32;
+
+static inline a_sle32 a_cpu_to_sle32(s32 val)
+{
+ return (__force a_sle32)cpu_to_le32(val);
+}
+
+static inline s32 a_sle32_to_cpu(a_sle32 val)
+{
+ return le32_to_cpu((__force __le32)val);
+}
+
+/* defines to set Packet extension values which can be 0 us, 8 usec or 16 usec */
+#define MAX_HE_NSS 8
+#define MAX_HE_MODULATION 8
+#define MAX_HE_RU 4
+#define HE_MODULATION_NONE 7
+#define HE_PET_0_USEC 0
+#define HE_PET_8_USEC 1
+#define HE_PET_16_USEC 2
+
+#define WMI_MAX_CHAINS 8
+
+#define WMI_MAX_NUM_SS MAX_HE_NSS
+#define WMI_MAX_NUM_RU MAX_HE_RU
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+
+struct wmi_cmd_hdr {
+ __le32 cmd_id;
+} __packed;
+
+struct wmi_tlv {
+ __le32 header;
+ u8 value[];
+} __packed;
+
+#define WMI_TLV_LEN GENMASK(15, 0)
+#define WMI_TLV_TAG GENMASK(31, 16)
+#define TLV_HDR_SIZE sizeof_field(struct wmi_tlv, header)
+
+#define WMI_CMD_HDR_CMD_ID GENMASK(23, 0)
+#define WMI_MAX_MEM_REQS 32
+#define ATH12K_MAX_HW_LISTEN_INTERVAL 5
+
+#define WMI_HOST_RC_DS_FLAG 0x01
+#define WMI_HOST_RC_CW40_FLAG 0x02
+#define WMI_HOST_RC_SGI_FLAG 0x04
+#define WMI_HOST_RC_HT_FLAG 0x08
+#define WMI_HOST_RC_RTSCTS_FLAG 0x10
+#define WMI_HOST_RC_TX_STBC_FLAG 0x20
+#define WMI_HOST_RC_RX_STBC_FLAG 0xC0
+#define WMI_HOST_RC_RX_STBC_FLAG_S 6
+#define WMI_HOST_RC_WEP_TKIP_FLAG 0x100
+#define WMI_HOST_RC_TS_FLAG 0x200
+#define WMI_HOST_RC_UAPSD_FLAG 0x400
+
+#define WMI_HT_CAP_ENABLED 0x0001
+#define WMI_HT_CAP_HT20_SGI 0x0002
+#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004
+#define WMI_HT_CAP_TX_STBC 0x0008
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
+#define WMI_HT_CAP_RX_STBC 0x0030
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
+#define WMI_HT_CAP_LDPC 0x0040
+#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080
+#define WMI_HT_CAP_MPDU_DENSITY 0x0700
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI 0x0800
+#define WMI_HT_CAP_RX_LDPC 0x1000
+#define WMI_HT_CAP_TX_LDPC 0x2000
+#define WMI_HT_CAP_IBF_BFER 0x4000
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_HT_CAP_RX_STBC_1SS 0x0010
+#define WMI_HT_CAP_RX_STBC_2SS 0x0020
+#define WMI_HT_CAP_RX_STBC_3SS 0x0030
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
+ WMI_HT_CAP_HT20_SGI | \
+ WMI_HT_CAP_HT40_SGI | \
+ WMI_HT_CAP_TX_STBC | \
+ WMI_HT_CAP_RX_STBC | \
+ WMI_HT_CAP_LDPC)
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
+#define WMI_VHT_CAP_RX_LDPC 0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_SGI_160MHZ 0x00000040
+#define WMI_VHT_CAP_TX_STBC 0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_SU_BFER 0x00000800
+#define WMI_VHT_CAP_SU_BFEE 0x00001000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16
+#define WMI_VHT_CAP_MU_BFER 0x00080000
+#define WMI_VHT_CAP_MU_BFEE 0x00100000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIT 23
+#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_VHT_CAP_RX_STBC_1SS 0x00000100
+#define WMI_VHT_CAP_RX_STBC_2SS 0x00000200
+#define WMI_VHT_CAP_RX_STBC_3SS 0x00000300
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
+ WMI_VHT_CAP_SGI_80MHZ | \
+ WMI_VHT_CAP_TX_STBC | \
+ WMI_VHT_CAP_RX_STBC_MASK | \
+ WMI_VHT_CAP_RX_LDPC | \
+ WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
+ WMI_VHT_CAP_RX_FIXED_ANT | \
+ WMI_VHT_CAP_TX_FIXED_ANT)
+
+#define WLAN_SCAN_MAX_HINT_S_SSID 10
+#define WLAN_SCAN_MAX_HINT_BSSID 10
+#define MAX_RNR_BSS 5
+
+#define WLAN_SCAN_MAX_HINT_S_SSID 10
+#define WLAN_SCAN_MAX_HINT_BSSID 10
+#define MAX_RNR_BSS 5
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
+
+#define WMI_BA_MODE_BUFFER_SIZE_256 3
+
+/* HW mode config type replicated from FW header
+ * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active.
+ * @WMI_HOST_HW_MODE_DBS: Both PHYs are active in different bands,
+ * one in 2G and another in 5G.
+ * @WMI_HOST_HW_MODE_SBS_PASSIVE: Both PHYs are in passive mode (only rx) in
+ * same band; no tx allowed.
+ * @WMI_HOST_HW_MODE_SBS: Both PHYs are active in the same band.
+ * Support for both PHYs within one band is planned
+ * for 5G only(as indicated in WMI_MAC_PHY_CAPABILITIES),
+ * but could be extended to other bands in the future.
+ * The separation of the band between the two PHYs needs
+ * to be communicated separately.
+ * @WMI_HOST_HW_MODE_DBS_SBS: 3 PHYs, with 2 on the same band doing SBS
+ * as in WMI_HW_MODE_SBS, and 3rd on the other band
+ * @WMI_HOST_HW_MODE_DBS_OR_SBS: Two PHY with one PHY capabale of both 2G and
+ * 5G. It can support SBS (5G + 5G) OR DBS (5G + 2G).
+ * @WMI_HOST_HW_MODE_MAX: Max hw_mode_id. Used to indicate invalid mode.
+ */
+enum wmi_host_hw_mode_config_type {
+ WMI_HOST_HW_MODE_SINGLE = 0,
+ WMI_HOST_HW_MODE_DBS = 1,
+ WMI_HOST_HW_MODE_SBS_PASSIVE = 2,
+ WMI_HOST_HW_MODE_SBS = 3,
+ WMI_HOST_HW_MODE_DBS_SBS = 4,
+ WMI_HOST_HW_MODE_DBS_OR_SBS = 5,
+
+ /* keep last */
+ WMI_HOST_HW_MODE_MAX
+};
+
+/* HW mode priority values used to detect the preferred HW mode
+ * on the available modes.
+ */
+enum wmi_host_hw_mode_priority {
+ WMI_HOST_HW_MODE_DBS_SBS_PRI,
+ WMI_HOST_HW_MODE_DBS_PRI,
+ WMI_HOST_HW_MODE_DBS_OR_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PASSIVE_PRI,
+ WMI_HOST_HW_MODE_SINGLE_PRI,
+
+ /* keep last the lowest priority */
+ WMI_HOST_HW_MODE_MAX_PRI
+};
+
+enum WMI_HOST_WLAN_BAND {
+ WMI_HOST_WLAN_2G_CAP = 1,
+ WMI_HOST_WLAN_5G_CAP = 2,
+ WMI_HOST_WLAN_2G_5G_CAP = 3,
+};
+
+enum wmi_cmd_group {
+ /* 0 to 2 are reserved */
+ WMI_GRP_START = 0x3,
+ WMI_GRP_SCAN = WMI_GRP_START,
+ WMI_GRP_PDEV = 0x4,
+ WMI_GRP_VDEV = 0x5,
+ WMI_GRP_PEER = 0x6,
+ WMI_GRP_MGMT = 0x7,
+ WMI_GRP_BA_NEG = 0x8,
+ WMI_GRP_STA_PS = 0x9,
+ WMI_GRP_DFS = 0xa,
+ WMI_GRP_ROAM = 0xb,
+ WMI_GRP_OFL_SCAN = 0xc,
+ WMI_GRP_P2P = 0xd,
+ WMI_GRP_AP_PS = 0xe,
+ WMI_GRP_RATE_CTRL = 0xf,
+ WMI_GRP_PROFILE = 0x10,
+ WMI_GRP_SUSPEND = 0x11,
+ WMI_GRP_BCN_FILTER = 0x12,
+ WMI_GRP_WOW = 0x13,
+ WMI_GRP_RTT = 0x14,
+ WMI_GRP_SPECTRAL = 0x15,
+ WMI_GRP_STATS = 0x16,
+ WMI_GRP_ARP_NS_OFL = 0x17,
+ WMI_GRP_NLO_OFL = 0x18,
+ WMI_GRP_GTK_OFL = 0x19,
+ WMI_GRP_CSA_OFL = 0x1a,
+ WMI_GRP_CHATTER = 0x1b,
+ WMI_GRP_TID_ADDBA = 0x1c,
+ WMI_GRP_MISC = 0x1d,
+ WMI_GRP_GPIO = 0x1e,
+ WMI_GRP_FWTEST = 0x1f,
+ WMI_GRP_TDLS = 0x20,
+ WMI_GRP_RESMGR = 0x21,
+ WMI_GRP_STA_SMPS = 0x22,
+ WMI_GRP_WLAN_HB = 0x23,
+ WMI_GRP_RMC = 0x24,
+ WMI_GRP_MHF_OFL = 0x25,
+ WMI_GRP_LOCATION_SCAN = 0x26,
+ WMI_GRP_OEM = 0x27,
+ WMI_GRP_NAN = 0x28,
+ WMI_GRP_COEX = 0x29,
+ WMI_GRP_OBSS_OFL = 0x2a,
+ WMI_GRP_LPI = 0x2b,
+ WMI_GRP_EXTSCAN = 0x2c,
+ WMI_GRP_DHCP_OFL = 0x2d,
+ WMI_GRP_IPA = 0x2e,
+ WMI_GRP_MDNS_OFL = 0x2f,
+ WMI_GRP_SAP_OFL = 0x30,
+ WMI_GRP_OCB = 0x31,
+ WMI_GRP_SOC = 0x32,
+ WMI_GRP_PKT_FILTER = 0x33,
+ WMI_GRP_MAWC = 0x34,
+ WMI_GRP_PMF_OFFLOAD = 0x35,
+ WMI_GRP_BPF_OFFLOAD = 0x36,
+ WMI_GRP_NAN_DATA = 0x37,
+ WMI_GRP_PROTOTYPE = 0x38,
+ WMI_GRP_MONITOR = 0x39,
+ WMI_GRP_REGULATORY = 0x3a,
+ WMI_GRP_HW_DATA_FILTER = 0x3b,
+ WMI_GRP_WLM = 0x3c,
+ WMI_GRP_11K_OFFLOAD = 0x3d,
+ WMI_GRP_TWT = 0x3e,
+ WMI_GRP_MOTION_DET = 0x3f,
+ WMI_GRP_SPATIAL_REUSE = 0x40,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+enum wmi_tlv_cmd_id {
+ WMI_CMD_UNSUPPORTED = 0,
+ WMI_INIT_CMDID = 0x1,
+ WMI_START_SCAN_CMDID = WMI_TLV_CMD(WMI_GRP_SCAN),
+ WMI_STOP_SCAN_CMDID,
+ WMI_SCAN_CHAN_LIST_CMDID,
+ WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_SCAN_PROB_REQ_OUI_CMDID,
+ WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID,
+ WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_PDEV_SET_CHANNEL_CMDID,
+ WMI_PDEV_SET_PARAM_CMDID,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_PDEV_DUMP_CMDID,
+ WMI_PDEV_SET_LED_CONFIG_CMDID,
+ WMI_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_PDEV_SET_LED_FLASHING_CMDID,
+ WMI_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_PDEV_FIPS_CMDID,
+ WMI_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ WMI_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ WMI_PDEV_GET_NFCAL_POWER_CMDID,
+ WMI_PDEV_GET_TPC_CMDID,
+ WMI_MIB_STATS_ENABLE_CMDID,
+ WMI_PDEV_SET_PCL_CMDID,
+ WMI_PDEV_SET_HW_MODE_CMDID,
+ WMI_PDEV_SET_MAC_CONFIG_CMDID,
+ WMI_PDEV_SET_ANTENNA_MODE_CMDID,
+ WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID,
+ WMI_PDEV_WAL_POWER_DEBUG_CMDID,
+ WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID,
+ WMI_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_PDEV_GET_ANTDIV_STATUS_CMDID,
+ WMI_PDEV_GET_CHIP_POWER_STATS_CMDID,
+ WMI_PDEV_SET_STATS_THRESHOLD_CMDID,
+ WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PKT_ROUTING_CMDID,
+ WMI_PDEV_CHECK_CAL_VERSION_CMDID,
+ WMI_PDEV_SET_DIVERSITY_GAIN_CMDID,
+ WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PMK_CACHE_CMDID,
+ WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID,
+ WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID,
+ WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID,
+ WMI_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMDID,
+ WMI_PDEV_SET_RX_FILTER_PROMISCUOUS_CMDID,
+ WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
+ WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
+ WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_DELETE_CMDID,
+ WMI_VDEV_START_REQUEST_CMDID,
+ WMI_VDEV_RESTART_REQUEST_CMDID,
+ WMI_VDEV_UP_CMDID,
+ WMI_VDEV_STOP_CMDID,
+ WMI_VDEV_DOWN_CMDID,
+ WMI_VDEV_SET_PARAM_CMDID,
+ WMI_VDEV_INSTALL_KEY_CMDID,
+ WMI_VDEV_WNM_SLEEPMODE_CMDID,
+ WMI_VDEV_WMM_ADDTS_CMDID,
+ WMI_VDEV_WMM_DELTS_CMDID,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID,
+ WMI_VDEV_SET_GTX_PARAMS_CMDID,
+ WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+ WMI_VDEV_PLMREQ_START_CMDID,
+ WMI_VDEV_PLMREQ_STOP_CMDID,
+ WMI_VDEV_TSF_TSTAMP_ACTION_CMDID,
+ WMI_VDEV_SET_IE_CMDID,
+ WMI_VDEV_RATEMASK_CMDID,
+ WMI_VDEV_ATF_REQUEST_CMDID,
+ WMI_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ WMI_VDEV_SET_QUIET_MODE_CMDID,
+ WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_DELETE_CMDID,
+ WMI_PEER_FLUSH_TIDS_CMDID,
+ WMI_PEER_SET_PARAM_CMDID,
+ WMI_PEER_ASSOC_CMDID,
+ WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_PEER_MCAST_GROUP_CMDID,
+ WMI_PEER_INFO_REQ_CMDID,
+ WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+ WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID,
+ WMI_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_PEER_ATF_REQUEST_CMDID,
+ WMI_PEER_BWF_REQUEST_CMDID,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID,
+ WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
+ WMI_PEER_ANTDIV_INFO_REQ_CMDID,
+ WMI_BCN_TX_CMDID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_PDEV_SEND_BCN_CMDID,
+ WMI_BCN_TMPL_CMDID,
+ WMI_BCN_FILTER_RX_CMDID,
+ WMI_PRB_REQ_FILTER_RX_CMDID,
+ WMI_MGMT_TX_CMDID,
+ WMI_PRB_TMPL_CMDID,
+ WMI_MGMT_TX_SEND_CMDID,
+ WMI_OFFCHAN_DATA_TX_SEND_CMDID,
+ WMI_PDEV_SEND_FD_CMDID,
+ WMI_BCN_OFFLOAD_CTRL_CMDID,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID,
+ WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID,
+ WMI_FILS_DISCOVERY_TMPL_CMDID,
+ WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_ADDBA_SEND_CMDID,
+ WMI_ADDBA_STATUS_CMDID,
+ WMI_DELBA_SEND_CMDID,
+ WMI_ADDBA_SET_RESP_CMDID,
+ WMI_SEND_SINGLEAMSDU_CMDID,
+ WMI_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_PS),
+ WMI_STA_POWERSAVE_PARAM_CMDID,
+ WMI_STA_MIMO_PS_MODE_CMDID,
+ WMI_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_PDEV_DFS_DISABLE_CMDID,
+ WMI_DFS_PHYERR_FILTER_ENA_CMDID,
+ WMI_DFS_PHYERR_FILTER_DIS_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID,
+ WMI_VDEV_ADFS_CH_CFG_CMDID,
+ WMI_VDEV_ADFS_OCAC_ABORT_CMDID,
+ WMI_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_ROAM_SCAN_PERIOD,
+ WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_ROAM_AP_PROFILE,
+ WMI_ROAM_CHAN_LIST,
+ WMI_ROAM_SCAN_CMD,
+ WMI_ROAM_SYNCH_COMPLETE,
+ WMI_ROAM_SET_RIC_REQUEST_CMDID,
+ WMI_ROAM_INVOKE_CMDID,
+ WMI_ROAM_FILTER_CMDID,
+ WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID,
+ WMI_ROAM_CONFIGURE_MAWC_CMDID,
+ WMI_ROAM_SET_MBO_PARAM_CMDID,
+ WMI_ROAM_PER_CONFIG_CMDID,
+ WMI_ROAM_BTM_CONFIG_CMDID,
+ WMI_ENABLE_FILS_CMDID,
+ WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_GRP_OFL_SCAN),
+ WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_OFL_SCAN_PERIOD,
+ WMI_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_P2P_GO_SET_BEACON_IE,
+ WMI_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+ WMI_P2P_DISC_OFFLOAD_APPIE_CMDID,
+ WMI_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+ WMI_P2P_SET_OPPPS_PARAM_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_START_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID,
+ WMI_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_AP_PS_EGAP_PARAM_CMDID,
+ WMI_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_GRP_RATE_CTRL),
+ WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_GRP_PROFILE),
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_PDEV_RESUME_CMDID,
+ WMI_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_BCN_FILTER),
+ WMI_RMV_BCN_FILTER_CMDID,
+ WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_WOW_ENABLE_CMDID,
+ WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_WOW_IOAC_ADD_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_DEL_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_ADD_WAKE_PATTERN_CMDID,
+ WMI_WOW_IOAC_DEL_WAKE_PATTERN_CMDID,
+ WMI_D0_WOW_ENABLE_DISABLE_CMDID,
+ WMI_EXTWOW_ENABLE_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+ WMI_WOW_ENABLE_ICMPV6_NA_FLT_CMDID,
+ WMI_WOW_UDP_SVC_OFLD_CMDID,
+ WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID,
+ WMI_WOW_SET_ACTION_WAKE_UP_CMDID,
+ WMI_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_RTT_TSF_CMDID,
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_TLV_CMD(WMI_GRP_SPECTRAL),
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_MCC_SCHED_TRAFFIC_STATS_CMDID,
+ WMI_REQUEST_STATS_EXT_CMDID,
+ WMI_REQUEST_LINK_STATS_CMDID,
+ WMI_START_LINK_STATS_CMDID,
+ WMI_CLEAR_LINK_STATS_CMDID,
+ WMI_GET_FW_MEM_DUMP_CMDID,
+ WMI_DEBUG_MESG_FLUSH_CMDID,
+ WMI_DIAG_EVENT_LOG_CONFIG_CMDID,
+ WMI_REQUEST_WLAN_STATS_CMDID,
+ WMI_REQUEST_RCPI_CMDID,
+ WMI_REQUEST_PEER_STATS_INFO_CMDID,
+ WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
+ WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_APFIND_CMDID,
+ WMI_PASSPOINT_LIST_CONFIG_CMDID,
+ WMI_NLO_CONFIGURE_MAWC_CMDID,
+ WMI_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_CHATTER_ADD_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_COALESCING_QUERY_CMDID,
+ WMI_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_GRP_TID_ADDBA),
+ WMI_PEER_TID_DELBA_CMDID,
+ WMI_STA_DTIM_PS_METHOD_CMDID,
+ WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ WMI_STA_KEEPALIVE_CMDID,
+ WMI_BA_REQ_SSN_CMDID,
+ WMI_ECHO_CMDID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_CMDID,
+ WMI_DBGLOG_CFG_CMDID,
+ WMI_PDEV_QVIT_CMDID,
+ WMI_PDEV_FTM_INTG_CMDID,
+ WMI_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
+ WMI_SET_MCASTBCAST_FILTER_CMDID,
+ WMI_THERMAL_MGMT_CMDID,
+ WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+ WMI_TPC_CHAINMASK_CONFIG_CMDID,
+ WMI_SET_ANTENNA_DIVERSITY_CMDID,
+ WMI_OCB_SET_SCHED_CMDID,
+ WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID,
+ WMI_LRO_CONFIG_CMDID,
+ WMI_TRANSFER_DATA_TO_FLASH_CMDID,
+ WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID,
+ WMI_VDEV_WISA_CMDID,
+ WMI_DBGLOG_TIME_STAMP_SYNC_CMDID,
+ WMI_SET_MULTIPLE_MCAST_FILTER_CMDID,
+ WMI_READ_DATA_FROM_FLASH_CMDID,
+ WMI_THERM_THROT_SET_CONF_CMDID,
+ WMI_RUNTIME_DPD_RECAL_CMDID,
+ WMI_GET_TPC_POWER_CMDID,
+ WMI_IDLE_TRIGGER_MONITOR_CMDID,
+ WMI_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_GPIO_OUTPUT_CMDID,
+ WMI_TXBF_CMDID,
+ WMI_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_FWTEST),
+ WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+ WMI_UNIT_TEST_CMDID,
+ WMI_FWTEST_CMDID,
+ WMI_QBOOST_CFG_CMDID,
+ WMI_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_TDLS_PEER_UPDATE_CMDID,
+ WMI_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_RESMGR_ADAPTIVE_OCS_EN_DIS_CMDID = WMI_TLV_CMD(WMI_GRP_RESMGR),
+ WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+ WMI_RESMGR_SET_CHAN_LATENCY_CMDID,
+ WMI_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_STA_SMPS_PARAM_CMDID,
+ WMI_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_WLAN_HB),
+ WMI_HB_SET_TCP_PARAMS_CMDID,
+ WMI_HB_SET_TCP_PKT_FILTER_CMDID,
+ WMI_HB_SET_UDP_PARAMS_CMDID,
+ WMI_HB_SET_UDP_PKT_FILTER_CMDID,
+ WMI_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_RMC_SET_ACTION_PERIOD_CMDID,
+ WMI_RMC_CONFIG_CMDID,
+ WMI_RMC_SET_MANUAL_LEADER_CMDID,
+ WMI_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_MHF_OFL),
+ WMI_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+ WMI_BATCH_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_DISABLE_CMDID,
+ WMI_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+ WMI_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_REQUEST_CMDID,
+ WMI_LPI_OEM_REQ_CMDID,
+ WMI_NAN_CMDID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_CHAN_AVOID_UPDATE_CMDID,
+ WMI_COEX_CONFIG_CMDID,
+ WMI_CHAN_AVOID_RPT_ALLOW_CMDID,
+ WMI_COEX_GET_ANTENNA_ISOLATION_CMDID,
+ WMI_SAR_LIMITS_CMDID,
+ WMI_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_OBSS_OFL),
+ WMI_OBSS_SCAN_DISABLE_CMDID,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID,
+ WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_START_SCAN_CMDID,
+ WMI_LPI_STOP_SCAN_CMDID,
+ WMI_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_STOP_CMDID,
+ WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+ WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+ WMI_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+ WMI_EXTSCAN_SET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_GET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_MAWC_CMDID,
+ WMI_SET_DHCP_SERVER_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_DHCP_OFL),
+ WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_GRP_IPA),
+ WMI_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_MDNS_SET_FQDN_CMDID,
+ WMI_MDNS_SET_RESPONSE_CMDID,
+ WMI_MDNS_GET_STATS_CMDID,
+ WMI_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_SET_BLACKLIST_PARAM_CMDID,
+ WMI_OCB_SET_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_SET_UTC_TIME_CMDID,
+ WMI_OCB_START_TIMING_ADVERT_CMDID,
+ WMI_OCB_STOP_TIMING_ADVERT_CMDID,
+ WMI_OCB_GET_TSF_TIMER_CMDID,
+ WMI_DCC_GET_STATS_CMDID,
+ WMI_DCC_CLEAR_STATS_CMDID,
+ WMI_DCC_UPDATE_NDL_CMDID,
+ WMI_SOC_SET_PCL_CMDID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_SET_HW_MODE_CMDID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID,
+ WMI_SOC_SET_ANTENNA_MODE_CMDID,
+ WMI_PACKET_FILTER_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_PKT_FILTER),
+ WMI_PACKET_FILTER_ENABLE_CMDID,
+ WMI_MAWC_SENSOR_REPORT_IND_CMDID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID = WMI_TLV_CMD(WMI_GRP_PMF_OFFLOAD),
+ WMI_BPF_GET_CAPABILITY_CMDID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_GET_VDEV_STATS_CMDID,
+ WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_DEL_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID,
+ WMI_MNT_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_MONITOR),
+ WMI_SET_CURRENT_COUNTRY_CMDID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_SCAN_START_CMDID,
+ WMI_11D_SCAN_STOP_CMDID,
+ WMI_SET_INIT_COUNTRY_CMDID,
+ WMI_NDI_GET_CAP_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_REQ_CMDID,
+ WMI_NDP_RESPONDER_REQ_CMDID,
+ WMI_NDP_END_REQ_CMDID,
+ WMI_HW_DATA_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_HW_DATA_FILTER),
+ WMI_TWT_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_CMDID,
+ WMI_TWT_ADD_DIALOG_CMDID,
+ WMI_TWT_DEL_DIALOG_CMDID,
+ WMI_TWT_PAUSE_DIALOG_CMDID,
+ WMI_TWT_RESUME_DIALOG_CMDID,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
+ WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+};
+
+enum wmi_tlv_event_id {
+ WMI_SERVICE_READY_EVENTID = 0x1,
+ WMI_READY_EVENTID,
+ WMI_SERVICE_AVAILABLE_EVENTID,
+ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+ WMI_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_CHAN_INFO_EVENTID,
+ WMI_PHYERR_EVENTID,
+ WMI_PDEV_DUMP_EVENTID,
+ WMI_TX_PAUSE_EVENTID,
+ WMI_DFS_RADAR_EVENTID,
+ WMI_PDEV_L1SS_TRACK_EVENTID,
+ WMI_PDEV_TEMPERATURE_EVENTID,
+ WMI_SERVICE_READY_EXT_EVENTID,
+ WMI_PDEV_FIPS_EVENTID,
+ WMI_PDEV_CHANNEL_HOPPING_EVENTID,
+ WMI_PDEV_ANI_CCK_LEVEL_EVENTID,
+ WMI_PDEV_ANI_OFDM_LEVEL_EVENTID,
+ WMI_PDEV_TPC_EVENTID,
+ WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+ WMI_PDEV_SET_HW_MODE_RESP_EVENTID,
+ WMI_PDEV_HW_MODE_TRANSITION_EVENTID,
+ WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID,
+ WMI_PDEV_ANTDIV_STATUS_EVENTID,
+ WMI_PDEV_CHIP_POWER_STATS_EVENTID,
+ WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID,
+ WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID,
+ WMI_PDEV_CHECK_CAL_VERSION_EVENTID,
+ WMI_PDEV_DIV_RSSI_ANTID_EVENTID,
+ WMI_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_PDEV_UPDATE_CTLTABLE_EVENTID,
+ WMI_PDEV_DMA_RING_CFG_RSP_EVENTID,
+ WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID,
+ WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID,
+ WMI_PDEV_CSC_SWITCH_COUNT_STATUS_EVENTID,
+ WMI_PDEV_COLD_BOOT_CAL_DATA_EVENTID,
+ WMI_PDEV_RAP_INFO_EVENTID,
+ WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
+ WMI_SERVICE_READY_EXT2_EVENTID,
+ WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_STOPPED_EVENTID,
+ WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+ WMI_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_VDEV_TSF_REPORT_EVENTID,
+ WMI_VDEV_DELETE_RESP_EVENTID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID,
+ WMI_PEER_STA_KICKOUT_EVENTID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_INFO_EVENTID,
+ WMI_PEER_TX_FAIL_CNT_THR_EVENTID,
+ WMI_PEER_ESTIMATED_LINKSPEED_EVENTID,
+ WMI_PEER_STATE_EVENTID,
+ WMI_PEER_ASSOC_CONF_EVENTID,
+ WMI_PEER_DELETE_RESP_EVENTID,
+ WMI_PEER_RATECODE_LIST_EVENTID,
+ WMI_WDS_PEER_EVENTID,
+ WMI_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_PEER_ANTDIV_INFO_EVENTID,
+ WMI_PEER_RESERVED0_EVENTID,
+ WMI_PEER_RESERVED1_EVENTID,
+ WMI_PEER_RESERVED2_EVENTID,
+ WMI_PEER_RESERVED3_EVENTID,
+ WMI_PEER_RESERVED4_EVENTID,
+ WMI_PEER_RESERVED5_EVENTID,
+ WMI_PEER_RESERVED6_EVENTID,
+ WMI_PEER_RESERVED7_EVENTID,
+ WMI_PEER_RESERVED8_EVENTID,
+ WMI_PEER_RESERVED9_EVENTID,
+ WMI_PEER_RESERVED10_EVENTID,
+ WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+ WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_HOST_SWBA_EVENTID,
+ WMI_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_OFFLOAD_BCN_TX_STATUS_EVENTID,
+ WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_MGMT_TX_COMPLETION_EVENTID,
+ WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
+ WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
+ WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID,
+ WMI_HOST_FILS_DISCOVERY_EVENTID,
+ WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_TX_ADDBA_COMPLETE_EVENTID,
+ WMI_BA_RSP_SSN_EVENTID,
+ WMI_AGGR_STATE_TRIG_EVENTID,
+ WMI_ROAM_EVENTID = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_PROFILE_MATCH,
+ WMI_ROAM_SYNCH_EVENTID,
+ WMI_P2P_DISC_EVENTID = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_NOA_EVENTID,
+ WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID,
+ WMI_AP_PS_EGAP_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_PDEV_RESUME_EVENTID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_D0_WOW_DISABLE_ACK_EVENTID,
+ WMI_WOW_INITIAL_WAKEUP_EVENTID,
+ WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_RTT_ERROR_REPORT_EVENTID,
+ WMI_STATS_EXT_EVENTID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_IFACE_LINK_STATS_EVENTID,
+ WMI_PEER_LINK_STATS_EVENTID,
+ WMI_RADIO_LINK_STATS_EVENTID,
+ WMI_UPDATE_FW_MEM_DUMP_EVENTID,
+ WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
+ WMI_INST_RSSI_STATS_EVENTID,
+ WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+ WMI_REPORT_STATS_EVENTID,
+ WMI_UPDATE_RCPI_EVENTID,
+ WMI_PEER_STATS_INFO_EVENTID,
+ WMI_RADIO_CHAN_STATS_EVENTID,
+ WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_NLO_SCAN_COMPLETE_EVENTID,
+ WMI_APFIND_EVENTID,
+ WMI_PASSPOINT_MATCH_EVENTID,
+ WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_GTK_REKEY_FAIL_EVENTID,
+ WMI_CSA_HANDLING_EVENTID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CHATTER_PC_QUERY_EVENTID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_PDEV_DFS_RADAR_DETECTION_EVENTID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_VDEV_DFS_CAC_COMPLETE_EVENTID,
+ WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID,
+ WMI_ECHO_EVENTID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_EVENTID,
+ WMI_DEBUG_MESG_EVENTID,
+ WMI_UPDATE_STATS_EVENTID,
+ WMI_DEBUG_PRINT_EVENTID,
+ WMI_DCS_INTERFERENCE_EVENTID,
+ WMI_PDEV_QVIT_EVENTID,
+ WMI_WLAN_PROFILE_DATA_EVENTID,
+ WMI_PDEV_FTM_INTG_EVENTID,
+ WMI_WLAN_FREQ_AVOID_EVENTID,
+ WMI_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_THERMAL_MGMT_EVENTID,
+ WMI_DIAG_DATA_CONTAINER_EVENTID,
+ WMI_HOST_AUTO_SHUTDOWN_EVENTID,
+ WMI_UPDATE_WHAL_MIB_STATS_EVENTID,
+ WMI_UPDATE_VDEV_RATE_STATS_EVENTID,
+ WMI_DIAG_EVENTID,
+ WMI_OCB_SET_SCHED_EVENTID,
+ WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID,
+ WMI_RSSI_BREACH_EVENTID,
+ WMI_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENTID,
+ WMI_PDEV_UTF_SCPC_EVENTID,
+ WMI_READ_DATA_FROM_FLASH_EVENTID,
+ WMI_REPORT_RX_AGGR_FAILURE_EVENTID,
+ WMI_PKGID_EVENTID,
+ WMI_GPIO_INPUT_EVENTID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_UPLOADH_EVENTID,
+ WMI_CAPTUREH_EVENTID,
+ WMI_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_TDLS_PEER_EVENTID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_STA_SMPS_FORCE_MODE_COMPL_EVENTID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_BATCH_SCAN_ENABLED_EVENTID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_RESULT_EVENTID,
+ WMI_OEM_CAPABILITY_EVENTID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_OEM_ERROR_REPORT_EVENTID,
+ WMI_OEM_RESPONSE_EVENTID,
+ WMI_NAN_EVENTID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_NAN_DISC_IFACE_CREATED_EVENTID,
+ WMI_NAN_DISC_IFACE_DELETED_EVENTID,
+ WMI_NAN_STARTED_CLUSTER_EVENTID,
+ WMI_NAN_JOINED_CLUSTER_EVENTID,
+ WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_LPI_RESULT_EVENTID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_STATUS_EVENTID,
+ WMI_LPI_HANDOFF_EVENTID,
+ WMI_EXTSCAN_START_STOP_EVENTID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_OPERATION_EVENTID,
+ WMI_EXTSCAN_TABLE_USAGE_EVENTID,
+ WMI_EXTSCAN_CACHED_RESULTS_EVENTID,
+ WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+ WMI_EXTSCAN_HOTLIST_MATCH_EVENTID,
+ WMI_EXTSCAN_CAPABILITIES_EVENTID,
+ WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID,
+ WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_OFL_DEL_STA_EVENTID,
+ WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
+ WMI_DCC_GET_STATS_RESP_EVENTID,
+ WMI_DCC_UPDATE_NDL_RESP_EVENTID,
+ WMI_DCC_STATS_EVENTID,
+ WMI_SOC_SET_HW_MODE_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_HW_MODE_TRANSITION_EVENTID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_RESP_EVENTID,
+ WMI_MAWC_ENABLE_SENSOR_EVENTID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_BPF_CAPABILIY_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_VDEV_STATS_INFO_EVENTID,
+ WMI_RMC_NEW_LEADER_EVENTID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_REG_CHAN_LIST_CC_EVENTID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_NEW_COUNTRY_EVENTID,
+ WMI_REG_CHAN_LIST_CC_EXT_EVENTID,
+ WMI_NDI_CAP_RSP_EVENTID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_RSP_EVENTID,
+ WMI_NDP_RESPONDER_RSP_EVENTID,
+ WMI_NDP_END_RSP_EVENTID,
+ WMI_NDP_INDICATION_EVENTID,
+ WMI_NDP_CONFIRM_EVENTID,
+ WMI_NDP_END_INDICATION_EVENTID,
+
+ WMI_TWT_ENABLE_EVENTID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_EVENTID,
+ WMI_TWT_ADD_DIALOG_EVENTID,
+ WMI_TWT_DEL_DIALOG_EVENTID,
+ WMI_TWT_PAUSE_DIALOG_EVENTID,
+ WMI_TWT_RESUME_DIALOG_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_PDEV_PARAM_PROTECTION_MODE,
+ WMI_PDEV_PARAM_DYNAMIC_BW,
+ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_PDEV_PARAM_LTR_ENABLE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_PDEV_PARAM_L1SS_ENABLE,
+ WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PMF_QOS,
+ WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_PDEV_PARAM_DCS,
+ WMI_PDEV_PARAM_ANI_ENABLE,
+ WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_PDEV_PARAM_DYNTXCHAIN,
+ WMI_PDEV_PARAM_PROXY_STA,
+ WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_PDEV_PARAM_RFKILL_ENABLE,
+ WMI_PDEV_PARAM_BURST_DUR,
+ WMI_PDEV_PARAM_BURST_ENABLE,
+ WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
+ WMI_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+ WMI_PDEV_PARAM_L1SS_TRACK,
+ WMI_PDEV_PARAM_HYST_EN,
+ WMI_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+ WMI_PDEV_PARAM_LED_SYS_STATE,
+ WMI_PDEV_PARAM_LED_ENABLE,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+ WMI_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+ WMI_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_CTS_CBW,
+ WMI_PDEV_PARAM_WNTS_CONFIG,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_ENABLE,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_MIN_SLEEP_SLOP,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_INC_DEC_STEP,
+ WMI_PDEV_PARAM_EARLY_RX_FIX_SLEEP_SLOP,
+ WMI_PDEV_PARAM_BMISS_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_BMISS_BTO_MIN_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_BMISS_BTO_INC_DEC_STEP,
+ WMI_PDEV_PARAM_BTO_FIX_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_CE_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_CE_BTO_COMBO_CE_VALUE,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_1SS,
+ WMI_PDEV_PARAM_CTS2SELF_FOR_P2P_GO_CONFIG,
+ WMI_PDEV_PARAM_TXPOWER_DECR_DB,
+ WMI_PDEV_PARAM_AGGR_BURST,
+ WMI_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_PDEV_PARAM_FAST_CHANNEL_RESET,
+ WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_PDEV_PARAM_RX_FILTER,
+ WMI_PDEV_SET_MCAST_TO_UCAST_TID,
+ WMI_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ WMI_PDEV_PARAM_BLOCK_INTERBSS,
+ WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ WMI_PDEV_PARAM_EN_STATS,
+ WMI_PDEV_PARAM_MU_GROUP_POLICY,
+ WMI_PDEV_PARAM_NOISE_DETECTION,
+ WMI_PDEV_PARAM_NOISE_THRESHOLD,
+ WMI_PDEV_PARAM_DPD_ENABLE,
+ WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ WMI_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_PDEV_PARAM_ANT_PLZN,
+ WMI_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ WMI_PDEV_PARAM_SENSITIVITY_LEVEL,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ WMI_PDEV_PARAM_CCA_THRESHOLD,
+ WMI_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_PDEV_PARAM_PDEV_RESET,
+ WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ WMI_PDEV_PARAM_ARP_DBG_SRCADDR,
+ WMI_PDEV_PARAM_ARP_DBG_DSTADDR,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+ WMI_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_PDEV_PARAM_CTRL_RETRY_LIMIT,
+ WMI_PDEV_PARAM_PROPAGATION_DELAY,
+ WMI_PDEV_PARAM_ENA_ANT_DIV,
+ WMI_PDEV_PARAM_FORCE_CHAIN_ANT,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST_INTVL,
+ WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_BIN_SIZE_MS,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_RX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_SCH_DELAY,
+ WMI_PDEV_PARAM_ENABLE_RTS_SIFS_BURSTING,
+ WMI_PDEV_PARAM_MAX_MPDUS_IN_AMPDU,
+ WMI_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
+ WMI_PDEV_PARAM_FAST_PWR_TRANSITION,
+ WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
+ WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
+ WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+};
+
+enum wmi_tlv_vdev_param {
+ WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_VDEV_PARAM_MULTICAST_RATE,
+ WMI_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_VDEV_PARAM_SLOT_TIME,
+ WMI_VDEV_PARAM_PREAMBLE,
+ WMI_VDEV_PARAM_SWBA_TIME,
+ WMI_VDEV_STATS_UPDATE_PERIOD,
+ WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_VDEV_HOST_SWBA_INTERVAL,
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_VDEV_PARAM_WDS,
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_VDEV_PARAM_FEATURE_WMM,
+ WMI_VDEV_PARAM_CHWIDTH,
+ WMI_VDEV_PARAM_CHEXTOFFSET,
+ WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_VDEV_PARAM_MGMT_RATE,
+ WMI_VDEV_PARAM_PROTECTION_MODE,
+ WMI_VDEV_PARAM_FIXED_RATE,
+ WMI_VDEV_PARAM_SGI,
+ WMI_VDEV_PARAM_LDPC,
+ WMI_VDEV_PARAM_TX_STBC,
+ WMI_VDEV_PARAM_RX_STBC,
+ WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_VDEV_PARAM_DEF_KEYID,
+ WMI_VDEV_PARAM_NSS,
+ WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_INDICATE,
+ WMI_VDEV_PARAM_DHCP_INDICATE,
+ WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_VDEV_PARAM_TXBF,
+ WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_VDEV_PARAM_DROP_UNENCRY,
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_VDEV_PARAM_TX_PWRLIMIT,
+ WMI_VDEV_PARAM_SNR_NUM_FOR_CAL,
+ WMI_VDEV_PARAM_ROAM_FW_OFFLOAD,
+ WMI_VDEV_PARAM_ENABLE_RMC,
+ WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+ WMI_VDEV_PARAM_MAX_RATE,
+ WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+ WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+ WMI_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+ WMI_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+ WMI_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+ WMI_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+ WMI_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+ WMI_VDEV_PARAM_INACTIVITY_CNT,
+ WMI_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+ WMI_VDEV_PARAM_DTIM_POLICY,
+ WMI_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+ WMI_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+ WMI_VDEV_PARAM_RX_LEAK_WINDOW,
+ WMI_VDEV_PARAM_STATS_AVG_FACTOR,
+ WMI_VDEV_PARAM_DISCONNECT_TH,
+ WMI_VDEV_PARAM_RTSCTS_RATE,
+ WMI_VDEV_PARAM_MCC_RTSCTS_PROTECTION_ENABLE,
+ WMI_VDEV_PARAM_MCC_BROADCAST_PROBE_ENABLE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE_DECR_DB,
+ WMI_VDEV_PARAM_MCAST2UCAST_SET,
+ WMI_VDEV_PARAM_RC_NUM_RETRIES,
+ WMI_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_VDEV_PARAM_MFPTEST_SET,
+ WMI_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_VDEV_PARAM_VHT_SGIMASK,
+ WMI_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_VDEV_PARAM_PROXY_STA,
+ WMI_VDEV_PARAM_VIRTUAL_CELL_MODE,
+ WMI_VDEV_PARAM_RX_DECAP_TYPE,
+ WMI_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_VDEV_PARAM_SENSOR_AP,
+ WMI_VDEV_PARAM_BEACON_RATE,
+ WMI_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_VDEV_PARAM_STA_KICKOUT,
+ WMI_VDEV_PARAM_CAPABILITIES,
+ WMI_VDEV_PARAM_TSF_INCREMENT,
+ WMI_VDEV_PARAM_AMPDU_PER_AC,
+ WMI_VDEV_PARAM_RX_FILTER,
+ WMI_VDEV_PARAM_MGMT_TX_POWER,
+ WMI_VDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+ WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+ WMI_VDEV_PARAM_HE_DCM,
+ WMI_VDEV_PARAM_HE_RANGE_EXT,
+ WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
+ WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+ WMI_VDEV_PARAM_BA_MODE = 0x7e,
+ WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
+ WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
+ WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
+ WMI_VDEV_PARAM_BSS_COLOR,
+ WMI_VDEV_PARAM_SET_HEMU_MODE,
+ WMI_VDEV_PARAM_HEOPS_0_31 = 0x8003,
+};
+
+enum wmi_tlv_peer_flags {
+ WMI_TLV_PEER_AUTH = 0x00000001,
+ WMI_TLV_PEER_QOS = 0x00000002,
+ WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_TLV_PEER_APSD = 0x00000800,
+ WMI_TLV_PEER_HT = 0x00001000,
+ WMI_TLV_PEER_40MHZ = 0x00002000,
+ WMI_TLV_PEER_STBC = 0x00008000,
+ WMI_TLV_PEER_LDPC = 0x00010000,
+ WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_TLV_PEER_VHT = 0x02000000,
+ WMI_TLV_PEER_80MHZ = 0x04000000,
+ WMI_TLV_PEER_PMF = 0x08000000,
+ WMI_PEER_IS_P2P_CAPABLE = 0x20000000,
+ WMI_PEER_160MHZ = 0x40000000,
+ WMI_PEER_SAFEMODE_EN = 0x80000000,
+
+};
+
+/** Enum list of TLV Tags for each parameter structure type. */
+enum wmi_tlv_tag {
+ WMI_TAG_LAST_RESERVED = 15,
+ WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_UINT32 = WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_BYTE,
+ WMI_TAG_ARRAY_STRUCT,
+ WMI_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TAG_LAST_ARRAY_ENUM = 31,
+ WMI_TAG_SERVICE_READY_EVENT,
+ WMI_TAG_HAL_REG_CAPABILITIES,
+ WMI_TAG_WLAN_HOST_MEM_REQ,
+ WMI_TAG_READY_EVENT,
+ WMI_TAG_SCAN_EVENT,
+ WMI_TAG_PDEV_TPC_CONFIG_EVENT,
+ WMI_TAG_CHAN_INFO_EVENT,
+ WMI_TAG_COMB_PHYERR_RX_HDR,
+ WMI_TAG_VDEV_START_RESPONSE_EVENT,
+ WMI_TAG_VDEV_STOPPED_EVENT,
+ WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+ WMI_TAG_PEER_STA_KICKOUT_EVENT,
+ WMI_TAG_MGMT_RX_HDR,
+ WMI_TAG_TBTT_OFFSET_EVENT,
+ WMI_TAG_TX_DELBA_COMPLETE_EVENT,
+ WMI_TAG_TX_ADDBA_COMPLETE_EVENT,
+ WMI_TAG_ROAM_EVENT,
+ WMI_TAG_WOW_EVENT_INFO,
+ WMI_TAG_WOW_EVENT_INFO_SECTION_BITMAP,
+ WMI_TAG_RTT_EVENT_HEADER,
+ WMI_TAG_RTT_ERROR_REPORT_EVENT,
+ WMI_TAG_RTT_MEAS_EVENT,
+ WMI_TAG_ECHO_EVENT,
+ WMI_TAG_FTM_INTG_EVENT,
+ WMI_TAG_VDEV_GET_KEEPALIVE_EVENT,
+ WMI_TAG_GPIO_INPUT_EVENT,
+ WMI_TAG_CSA_EVENT,
+ WMI_TAG_GTK_OFFLOAD_STATUS_EVENT,
+ WMI_TAG_IGTK_INFO,
+ WMI_TAG_DCS_INTERFERENCE_EVENT,
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_WLAN_DCS_CW_INT = /* ALIAS */
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_DCS_IM_TGT_STATS_T = /* ALIAS */
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_PROFILE_CTX_T,
+ WMI_TAG_WLAN_PROFILE_T,
+ WMI_TAG_PDEV_QVIT_EVENT,
+ WMI_TAG_HOST_SWBA_EVENT,
+ WMI_TAG_TIM_INFO,
+ WMI_TAG_P2P_NOA_INFO,
+ WMI_TAG_STATS_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGES_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGE_DESC,
+ WMI_TAG_GTK_REKEY_FAIL_EVENT,
+ WMI_TAG_INIT_CMD,
+ WMI_TAG_RESOURCE_CONFIG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
+ WMI_TAG_START_SCAN_CMD,
+ WMI_TAG_STOP_SCAN_CMD,
+ WMI_TAG_SCAN_CHAN_LIST_CMD,
+ WMI_TAG_CHANNEL,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
+ WMI_TAG_PDEV_SET_PARAM_CMD,
+ WMI_TAG_PDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_WMM_PARAMS,
+ WMI_TAG_PDEV_SET_QUIET_CMD,
+ WMI_TAG_VDEV_CREATE_CMD,
+ WMI_TAG_VDEV_DELETE_CMD,
+ WMI_TAG_VDEV_START_REQUEST_CMD,
+ WMI_TAG_P2P_NOA_DESCRIPTOR,
+ WMI_TAG_P2P_GO_SET_BEACON_IE,
+ WMI_TAG_GTK_OFFLOAD_CMD,
+ WMI_TAG_VDEV_UP_CMD,
+ WMI_TAG_VDEV_STOP_CMD,
+ WMI_TAG_VDEV_DOWN_CMD,
+ WMI_TAG_VDEV_SET_PARAM_CMD,
+ WMI_TAG_VDEV_INSTALL_KEY_CMD,
+ WMI_TAG_PEER_CREATE_CMD,
+ WMI_TAG_PEER_DELETE_CMD,
+ WMI_TAG_PEER_FLUSH_TIDS_CMD,
+ WMI_TAG_PEER_SET_PARAM_CMD,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
+ WMI_TAG_VHT_RATE_SET,
+ WMI_TAG_BCN_TMPL_CMD,
+ WMI_TAG_PRB_TMPL_CMD,
+ WMI_TAG_BCN_PRB_INFO,
+ WMI_TAG_PEER_TID_ADDBA_CMD,
+ WMI_TAG_PEER_TID_DELBA_CMD,
+ WMI_TAG_STA_POWERSAVE_MODE_CMD,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD,
+ WMI_TAG_STA_DTIM_PS_METHOD_CMD,
+ WMI_TAG_ROAM_SCAN_MODE,
+ WMI_TAG_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TAG_ROAM_SCAN_PERIOD,
+ WMI_TAG_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TAG_PDEV_SUSPEND_CMD,
+ WMI_TAG_PDEV_RESUME_CMD,
+ WMI_TAG_ADD_BCN_FILTER_CMD,
+ WMI_TAG_RMV_BCN_FILTER_CMD,
+ WMI_TAG_WOW_ENABLE_CMD,
+ WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_PARAM,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ WMI_TAG_ARP_OFFLOAD_TUPLE,
+ WMI_TAG_NS_OFFLOAD_TUPLE,
+ WMI_TAG_FTM_INTG_CMD,
+ WMI_TAG_STA_KEEPALIVE_CMD,
+ WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD,
+ WMI_TAG_AP_PS_PEER_CMD,
+ WMI_TAG_PEER_RATE_RETRY_SCHED_CMD,
+ WMI_TAG_WLAN_PROFILE_TRIGGER_CMD,
+ WMI_TAG_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+ WMI_TAG_WLAN_PROFILE_GET_PROF_DATA_CMD,
+ WMI_TAG_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+ WMI_TAG_WOW_DEL_PATTERN_CMD,
+ WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ WMI_TAG_RTT_MEASREQ_HEAD,
+ WMI_TAG_RTT_MEASREQ_BODY,
+ WMI_TAG_RTT_TSF_CMD,
+ WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
+ WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
+ WMI_TAG_REQUEST_STATS_CMD,
+ WMI_TAG_NLO_CONFIG_CMD,
+ WMI_TAG_NLO_CONFIGURED_PARAMETERS,
+ WMI_TAG_CSA_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_CSA_OFFLOAD_CHANSWITCH_CMD,
+ WMI_TAG_CHATTER_SET_MODE_CMD,
+ WMI_TAG_ECHO_CMD,
+ WMI_TAG_VDEV_SET_KEEPALIVE_CMD,
+ WMI_TAG_VDEV_GET_KEEPALIVE_CMD,
+ WMI_TAG_FORCE_FW_HANG_CMD,
+ WMI_TAG_GPIO_CONFIG_CMD,
+ WMI_TAG_GPIO_OUTPUT_CMD,
+ WMI_TAG_PEER_ADD_WDS_ENTRY_CMD,
+ WMI_TAG_PEER_REMOVE_WDS_ENTRY_CMD,
+ WMI_TAG_BCN_TX_HDR,
+ WMI_TAG_BCN_SEND_FROM_HOST_CMD,
+ WMI_TAG_MGMT_TX_HDR,
+ WMI_TAG_ADDBA_CLEAR_RESP_CMD,
+ WMI_TAG_ADDBA_SEND_CMD,
+ WMI_TAG_DELBA_SEND_CMD,
+ WMI_TAG_ADDBA_SETRESPONSE_CMD,
+ WMI_TAG_SEND_SINGLEAMSDU_CMD,
+ WMI_TAG_PDEV_PKTLOG_ENABLE_CMD,
+ WMI_TAG_PDEV_PKTLOG_DISABLE_CMD,
+ WMI_TAG_PDEV_SET_HT_IE_CMD,
+ WMI_TAG_PDEV_SET_VHT_IE_CMD,
+ WMI_TAG_PDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_PDEV_GREEN_AP_PS_ENABLE_CMD,
+ WMI_TAG_PDEV_GET_TPC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_BASE_MACADDR_CMD,
+ WMI_TAG_PEER_MCAST_GROUP_CMD,
+ WMI_TAG_ROAM_AP_PROFILE,
+ WMI_TAG_AP_PROFILE,
+ WMI_TAG_SCAN_SCH_PRIORITY_TABLE_CMD,
+ WMI_TAG_PDEV_DFS_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_DISABLE_CMD,
+ WMI_TAG_WOW_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_BITMAP_PATTERN_T,
+ WMI_TAG_WOW_IPV4_SYNC_PATTERN_T,
+ WMI_TAG_WOW_IPV6_SYNC_PATTERN_T,
+ WMI_TAG_WOW_MAGIC_PATTERN_CMD,
+ WMI_TAG_SCAN_UPDATE_REQUEST_CMD,
+ WMI_TAG_CHATTER_PKT_COALESCING_FILTER,
+ WMI_TAG_CHATTER_COALESCING_ADD_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_DELETE_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_QUERY_CMD,
+ WMI_TAG_TXBF_CMD,
+ WMI_TAG_DEBUG_LOG_CONFIG_CMD,
+ WMI_TAG_NLO_EVENT,
+ WMI_TAG_CHATTER_QUERY_REPLY_EVENT,
+ WMI_TAG_UPLOAD_H_HDR,
+ WMI_TAG_CAPTURE_H_EVENT_HDR,
+ WMI_TAG_VDEV_WNM_SLEEPMODE_CMD,
+ WMI_TAG_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+ WMI_TAG_VDEV_WMM_ADDTS_CMD,
+ WMI_TAG_VDEV_WMM_DELTS_CMD,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_TDLS_SET_STATE_CMD,
+ WMI_TAG_TDLS_PEER_UPDATE_CMD,
+ WMI_TAG_TDLS_PEER_EVENT,
+ WMI_TAG_TDLS_PEER_CAPABILITIES,
+ WMI_TAG_VDEV_MCC_SET_TBTT_MODE_CMD,
+ WMI_TAG_ROAM_CHAN_LIST,
+ WMI_TAG_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+ WMI_TAG_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_LATENCY_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD,
+ WMI_TAG_BA_RSP_SSN_EVENT,
+ WMI_TAG_STA_SMPS_FORCE_MODE_CMD,
+ WMI_TAG_SET_MCASTBCAST_FILTER_CMD,
+ WMI_TAG_P2P_SET_OPPPS_CMD,
+ WMI_TAG_P2P_SET_NOA_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+ WMI_TAG_STA_SMPS_PARAM_CMD,
+ WMI_TAG_VDEV_SET_GTX_PARAMS_CMD,
+ WMI_TAG_MCC_SCHED_TRAFFIC_STATS_CMD,
+ WMI_TAG_MCC_SCHED_STA_TRAFFIC_STATS,
+ WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT,
+ WMI_TAG_P2P_NOA_EVENT,
+ WMI_TAG_HB_SET_ENABLE_CMD,
+ WMI_TAG_HB_SET_TCP_PARAMS_CMD,
+ WMI_TAG_HB_SET_TCP_PKT_FILTER_CMD,
+ WMI_TAG_HB_SET_UDP_PARAMS_CMD,
+ WMI_TAG_HB_SET_UDP_PKT_FILTER_CMD,
+ WMI_TAG_HB_IND_EVENT,
+ WMI_TAG_TX_PAUSE_EVENT,
+ WMI_TAG_RFKILL_EVENT,
+ WMI_TAG_DFS_RADAR_EVENT,
+ WMI_TAG_DFS_PHYERR_FILTER_ENA_CMD,
+ WMI_TAG_DFS_PHYERR_FILTER_DIS_CMD,
+ WMI_TAG_BATCH_SCAN_RESULT_SCAN_LIST,
+ WMI_TAG_BATCH_SCAN_RESULT_NETWORK_INFO,
+ WMI_TAG_BATCH_SCAN_ENABLE_CMD,
+ WMI_TAG_BATCH_SCAN_DISABLE_CMD,
+ WMI_TAG_BATCH_SCAN_TRIGGER_RESULT_CMD,
+ WMI_TAG_BATCH_SCAN_ENABLED_EVENT,
+ WMI_TAG_BATCH_SCAN_RESULT_EVENT,
+ WMI_TAG_VDEV_PLMREQ_START_CMD,
+ WMI_TAG_VDEV_PLMREQ_STOP_CMD,
+ WMI_TAG_THERMAL_MGMT_CMD,
+ WMI_TAG_THERMAL_MGMT_EVENT,
+ WMI_TAG_PEER_INFO_REQ_CMD,
+ WMI_TAG_PEER_INFO_EVENT,
+ WMI_TAG_PEER_INFO,
+ WMI_TAG_PEER_TX_FAIL_CNT_THR_EVENT,
+ WMI_TAG_RMC_SET_MODE_CMD,
+ WMI_TAG_RMC_SET_ACTION_PERIOD_CMD,
+ WMI_TAG_RMC_CONFIG_CMD,
+ WMI_TAG_MHF_OFFLOAD_SET_MODE_CMD,
+ WMI_TAG_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+ WMI_TAG_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_NAN_CMD_PARAM,
+ WMI_TAG_NAN_EVENT_HDR,
+ WMI_TAG_PDEV_L1SS_TRACK_EVENT,
+ WMI_TAG_DIAG_DATA_CONTAINER_EVENT,
+ WMI_TAG_MODEM_POWER_STATE_CMD_PARAM,
+ WMI_TAG_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+ WMI_TAG_PEER_ESTIMATED_LINKSPEED_EVENT,
+ WMI_TAG_AGGR_STATE_TRIG_EVENT,
+ WMI_TAG_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+ WMI_TAG_ROAM_SCAN_CMD,
+ WMI_TAG_REQ_STATS_EXT_CMD,
+ WMI_TAG_STATS_EXT_EVENT,
+ WMI_TAG_OBSS_SCAN_ENABLE_CMD,
+ WMI_TAG_OBSS_SCAN_DISABLE_CMD,
+ WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+ WMI_TAG_PDEV_SET_LED_CONFIG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_CFG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_EVENT,
+ WMI_TAG_UPDATE_WHAL_MIB_STATS_EVENT,
+ WMI_TAG_CHAN_AVOID_UPDATE_CMD_PARAM,
+ WMI_TAG_WOW_IOAC_PKT_PATTERN_T,
+ WMI_TAG_WOW_IOAC_TMR_PATTERN_T,
+ WMI_TAG_WOW_IOAC_ADD_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_DEL_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_KEEPALIVE_T,
+ WMI_TAG_WOW_IOAC_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_IOAC_DEL_PATTERN_CMD,
+ WMI_TAG_START_LINK_STATS_CMD,
+ WMI_TAG_CLEAR_LINK_STATS_CMD,
+ WMI_TAG_REQUEST_LINK_STATS_CMD,
+ WMI_TAG_IFACE_LINK_STATS_EVENT,
+ WMI_TAG_RADIO_LINK_STATS_EVENT,
+ WMI_TAG_PEER_STATS_EVENT,
+ WMI_TAG_CHANNEL_STATS,
+ WMI_TAG_RADIO_LINK_STATS,
+ WMI_TAG_RATE_STATS,
+ WMI_TAG_PEER_LINK_STATS,
+ WMI_TAG_WMM_AC_STATS,
+ WMI_TAG_IFACE_LINK_STATS,
+ WMI_TAG_LPI_MGMT_SNOOPING_CONFIG_CMD,
+ WMI_TAG_LPI_START_SCAN_CMD,
+ WMI_TAG_LPI_STOP_SCAN_CMD,
+ WMI_TAG_LPI_RESULT_EVENT,
+ WMI_TAG_PEER_STATE_EVENT,
+ WMI_TAG_EXTSCAN_BUCKET_CMD,
+ WMI_TAG_EXTSCAN_BUCKET_CHANNEL_EVENT,
+ WMI_TAG_EXTSCAN_START_CMD,
+ WMI_TAG_EXTSCAN_STOP_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_GET_CACHED_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_SET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_GET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_OPERATION_EVENT,
+ WMI_TAG_EXTSCAN_START_STOP_EVENT,
+ WMI_TAG_EXTSCAN_TABLE_USAGE_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+ WMI_TAG_EXTSCAN_RSSI_INFO_EVENT,
+ WMI_TAG_EXTSCAN_CACHED_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MATCH_EVENT,
+ WMI_TAG_EXTSCAN_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_D0_WOW_ENABLE_DISABLE_CMD,
+ WMI_TAG_D0_WOW_DISABLE_ACK_EVENT,
+ WMI_TAG_UNIT_TEST_CMD,
+ WMI_TAG_ROAM_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11I_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11R_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_ESE_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_SYNCH_EVENT,
+ WMI_TAG_ROAM_SYNCH_COMPLETE,
+ WMI_TAG_EXTWOW_ENABLE_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+ WMI_TAG_LPI_STATUS_EVENT,
+ WMI_TAG_LPI_HANDOFF_EVENT,
+ WMI_TAG_VDEV_RATE_STATS_EVENT,
+ WMI_TAG_VDEV_RATE_HT_INFO,
+ WMI_TAG_RIC_REQUEST,
+ WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
+ WMI_TAG_PDEV_TEMPERATURE_EVENT,
+ WMI_TAG_SET_DHCP_SERVER_OFFLOAD_CMD,
+ WMI_TAG_TPC_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_RIC_TSPEC,
+ WMI_TAG_TPC_CHAINMASK_CONFIG,
+ WMI_TAG_IPA_OFFLOAD_ENABLE_DISABLE_CMD,
+ WMI_TAG_SCAN_PROB_REQ_OUI_CMD,
+ WMI_TAG_KEY_MATERIAL,
+ WMI_TAG_TDLS_SET_OFFCHAN_MODE_CMD,
+ WMI_TAG_SET_LED_FLASHING_CMD,
+ WMI_TAG_MDNS_OFFLOAD_CMD,
+ WMI_TAG_MDNS_SET_FQDN_CMD,
+ WMI_TAG_MDNS_SET_RESP_CMD,
+ WMI_TAG_MDNS_GET_STATS_CMD,
+ WMI_TAG_MDNS_STATS_EVENT,
+ WMI_TAG_ROAM_INVOKE_CMD,
+ WMI_TAG_PDEV_RESUME_EVENT,
+ WMI_TAG_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+ WMI_TAG_SAP_OFL_ENABLE_CMD,
+ WMI_TAG_SAP_OFL_ADD_STA_EVENT,
+ WMI_TAG_SAP_OFL_DEL_STA_EVENT,
+ WMI_TAG_APFIND_CMD_PARAM,
+ WMI_TAG_APFIND_EVENT_HDR,
+ WMI_TAG_OCB_SET_SCHED_CMD,
+ WMI_TAG_OCB_SET_SCHED_EVENT,
+ WMI_TAG_OCB_SET_CONFIG_CMD,
+ WMI_TAG_OCB_SET_CONFIG_RESP_EVENT,
+ WMI_TAG_OCB_SET_UTC_TIME_CMD,
+ WMI_TAG_OCB_START_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_STOP_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_RESP_EVENT,
+ WMI_TAG_DCC_GET_STATS_CMD,
+ WMI_TAG_DCC_CHANNEL_STATS_REQUEST,
+ WMI_TAG_DCC_GET_STATS_RESP_EVENT,
+ WMI_TAG_DCC_CLEAR_STATS_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_RESP_EVENT,
+ WMI_TAG_DCC_STATS_EVENT,
+ WMI_TAG_OCB_CHANNEL,
+ WMI_TAG_OCB_SCHEDULE_ELEMENT,
+ WMI_TAG_DCC_NDL_STATS_PER_CHANNEL,
+ WMI_TAG_DCC_NDL_CHAN,
+ WMI_TAG_QOS_PARAMETER,
+ WMI_TAG_DCC_NDL_ACTIVE_STATE_CONFIG,
+ WMI_TAG_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+ WMI_TAG_ROAM_FILTER,
+ WMI_TAG_PASSPOINT_CONFIG_CMD,
+ WMI_TAG_PASSPOINT_EVENT_HDR,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+ WMI_TAG_VDEV_TSF_TSTAMP_ACTION_CMD,
+ WMI_TAG_VDEV_TSF_REPORT_EVENT,
+ WMI_TAG_GET_FW_MEM_DUMP,
+ WMI_TAG_UPDATE_FW_MEM_DUMP,
+ WMI_TAG_FW_MEM_DUMP_PARAMS,
+ WMI_TAG_DEBUG_MESG_FLUSH,
+ WMI_TAG_DEBUG_MESG_FLUSH_COMPLETE,
+ WMI_TAG_PEER_SET_RATE_REPORT_CONDITION,
+ WMI_TAG_ROAM_SUBNET_CHANGE_CONFIG,
+ WMI_TAG_VDEV_SET_IE_CMD,
+ WMI_TAG_RSSI_BREACH_MONITOR_CONFIG,
+ WMI_TAG_RSSI_BREACH_EVENT,
+ WMI_TAG_WOW_EVENT_INITIAL_WAKEUP,
+ WMI_TAG_SOC_SET_PCL_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_SOC_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_VDEV_TXRX_STREAMS,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_CMD,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_WOW_IOAC_SOCK_PATTERN_T,
+ WMI_TAG_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+ WMI_TAG_DIAG_EVENT_LOG_CONFIG,
+ WMI_TAG_DIAG_EVENT_LOG_SUPPORTED_EVENT_FIXED_PARAMS,
+ WMI_TAG_PACKET_FILTER_CONFIG,
+ WMI_TAG_PACKET_FILTER_ENABLE,
+ WMI_TAG_SAP_SET_BLACKLIST_PARAM_CMD,
+ WMI_TAG_MGMT_TX_SEND_CMD,
+ WMI_TAG_MGMT_TX_COMPL_EVENT,
+ WMI_TAG_SOC_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_WOW_UDP_SVC_OFLD_CMD,
+ WMI_TAG_LRO_INFO_CMD,
+ WMI_TAG_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+ WMI_TAG_SERVICE_READY_EXT_EVENT,
+ WMI_TAG_MAWC_SENSOR_REPORT_IND_CMD,
+ WMI_TAG_MAWC_ENABLE_SENSOR_EVENT,
+ WMI_TAG_ROAM_CONFIGURE_MAWC_CMD,
+ WMI_TAG_NLO_CONFIGURE_MAWC_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_MAWC_CMD,
+ WMI_TAG_PEER_ASSOC_CONF_EVENT,
+ WMI_TAG_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+ WMI_TAG_AP_PS_EGAP_PARAM_CMD,
+ WMI_TAG_AP_PS_EGAP_INFO_EVENT,
+ WMI_TAG_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+ WMI_TAG_SCPC_EVENT,
+ WMI_TAG_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+ WMI_TAG_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+ WMI_TAG_BPF_GET_CAPABILITY_CMD,
+ WMI_TAG_BPF_CAPABILITY_INFO_EVT,
+ WMI_TAG_BPF_GET_VDEV_STATS_CMD,
+ WMI_TAG_BPF_VDEV_STATS_INFO_EVT,
+ WMI_TAG_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_VDEV_DELETE_RESP_EVENT,
+ WMI_TAG_PEER_DELETE_RESP_EVENT,
+ WMI_TAG_ROAM_DENSE_THRES_PARAM,
+ WMI_TAG_ENLO_CANDIDATE_SCORE_PARAM,
+ WMI_TAG_PEER_UPDATE_WDS_ENTRY_CMD,
+ WMI_TAG_VDEV_CONFIG_RATEMASK,
+ WMI_TAG_PDEV_FIPS_CMD,
+ WMI_TAG_PDEV_SMART_ANT_ENABLE_CMD,
+ WMI_TAG_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+ WMI_TAG_PDEV_SET_ANT_SWITCH_TBL_CMD,
+ WMI_TAG_PDEV_SET_CTL_TABLE_CMD,
+ WMI_TAG_PDEV_SET_MIMOGAIN_TABLE_CMD,
+ WMI_TAG_FWTEST_SET_PARAM_CMD,
+ WMI_TAG_PEER_ATF_REQUEST,
+ WMI_TAG_VDEV_ATF_REQUEST,
+ WMI_TAG_PDEV_GET_ANI_CCK_CONFIG_CMD,
+ WMI_TAG_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+ WMI_TAG_INST_RSSI_STATS_RESP,
+ WMI_TAG_MED_UTIL_REPORT_EVENT,
+ WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT,
+ WMI_TAG_WDS_ADDR_EVENT,
+ WMI_TAG_PEER_RATECODE_LIST_EVENT,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+ WMI_TAG_PDEV_TPC_EVENT,
+ WMI_TAG_ANI_OFDM_EVENT,
+ WMI_TAG_ANI_CCK_EVENT,
+ WMI_TAG_PDEV_CHANNEL_HOPPING_EVENT,
+ WMI_TAG_PDEV_FIPS_EVENT,
+ WMI_TAG_ATF_PEER_INFO,
+ WMI_TAG_PDEV_GET_TPC_CMD,
+ WMI_TAG_VDEV_FILTER_NRP_CONFIG_CMD,
+ WMI_TAG_QBOOST_CFG_CMD,
+ WMI_TAG_PDEV_SMART_ANT_GPIO_HANDLE,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+ WMI_TAG_PDEV_SET_ANT_CTRL_CHAIN,
+ WMI_TAG_PEER_CCK_OFDM_RATE_INFO,
+ WMI_TAG_PEER_MCS_RATE_INFO,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+ WMI_TAG_MU_REPORT_TOTAL_MU,
+ WMI_TAG_VDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_ROAM_SET_MBO,
+ WMI_TAG_MIB_STATS_ENABLE_CMD,
+ WMI_TAG_NAN_DISC_IFACE_CREATED_EVENT,
+ WMI_TAG_NAN_DISC_IFACE_DELETED_EVENT,
+ WMI_TAG_NAN_STARTED_CLUSTER_EVENT,
+ WMI_TAG_NAN_JOINED_CLUSTER_EVENT,
+ WMI_TAG_NDI_GET_CAP_REQ,
+ WMI_TAG_NDP_INITIATOR_REQ,
+ WMI_TAG_NDP_RESPONDER_REQ,
+ WMI_TAG_NDP_END_REQ,
+ WMI_TAG_NDI_CAP_RSP_EVENT,
+ WMI_TAG_NDP_INITIATOR_RSP_EVENT,
+ WMI_TAG_NDP_RESPONDER_RSP_EVENT,
+ WMI_TAG_NDP_END_RSP_EVENT,
+ WMI_TAG_NDP_INDICATION_EVENT,
+ WMI_TAG_NDP_CONFIRM_EVENT,
+ WMI_TAG_NDP_END_INDICATION_EVENT,
+ WMI_TAG_VDEV_SET_QUIET_CMD,
+ WMI_TAG_PDEV_SET_PCL_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_PDEV_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_COEX_CONFIG_CMD,
+ WMI_TAG_CONFIG_ENHANCED_MCAST_FILTER,
+ WMI_TAG_CHAN_AVOID_RPT_ALLOW_CMD,
+ WMI_TAG_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_TAG_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+ WMI_TAG_PDEV_WAL_POWER_DEBUG_CMD,
+ WMI_TAG_MAC_PHY_CAPABILITIES,
+ WMI_TAG_HW_MODE_CAPABILITIES,
+ WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS,
+ WMI_TAG_HAL_REG_CAPABILITIES_EXT,
+ WMI_TAG_SOC_HAL_REG_CAPABILITIES,
+ WMI_TAG_VDEV_WISA_CMD,
+ WMI_TAG_TX_POWER_LEVEL_STATS_EVT,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_CONFIG,
+ WMI_TAG_WOW_SET_ACTION_WAKE_UP_CMD,
+ WMI_TAG_NDP_END_RSP_PER_NDI,
+ WMI_TAG_PEER_BWF_REQUEST,
+ WMI_TAG_BWF_PEER_INFO,
+ WMI_TAG_DBGLOG_TIME_STAMP_SYNC_CMD,
+ WMI_TAG_RMC_SET_LEADER_CMD,
+ WMI_TAG_RMC_MANUAL_LEADER_EVENT,
+ WMI_TAG_PER_CHAIN_RSSI_STATS,
+ WMI_TAG_RSSI_STATS,
+ WMI_TAG_P2P_LO_START_CMD,
+ WMI_TAG_P2P_LO_STOP_CMD,
+ WMI_TAG_P2P_LO_STOPPED_EVENT,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
+ WMI_TAG_SET_MULTIPLE_MCAST_FILTER_CMD,
+ WMI_TAG_MGMT_TX_COMPL_BUNDLE_EVENT,
+ WMI_TAG_READ_DATA_FROM_FLASH_CMD,
+ WMI_TAG_READ_DATA_FROM_FLASH_EVENT,
+ WMI_TAG_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+ WMI_TAG_PEER_SET_RX_BLOCKSIZE_CMD,
+ WMI_TAG_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_TAG_TLV_BUF_LEN_PARAM,
+ WMI_TAG_SERVICE_AVAILABLE_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO_REQ_CMD,
+ WMI_TAG_PEER_ANTDIV_INFO_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO,
+ WMI_TAG_PDEV_GET_ANTDIV_STATUS_CMD,
+ WMI_TAG_PDEV_ANTDIV_STATUS_EVENT,
+ WMI_TAG_MNT_FILTER_CMD,
+ WMI_TAG_GET_CHIP_POWER_STATS_CMD,
+ WMI_TAG_PDEV_CHIP_POWER_STATS_EVENT,
+ WMI_TAG_COEX_GET_ANTENNA_ISOLATION_CMD,
+ WMI_TAG_COEX_REPORT_ISOLATION_EVENT,
+ WMI_TAG_CHAN_CCA_STATS,
+ WMI_TAG_PEER_SIGNAL_STATS,
+ WMI_TAG_TX_STATS,
+ WMI_TAG_PEER_AC_TX_STATS,
+ WMI_TAG_RX_STATS,
+ WMI_TAG_PEER_AC_RX_STATS,
+ WMI_TAG_REPORT_STATS_EVENT,
+ WMI_TAG_CHAN_CCA_STATS_THRESH,
+ WMI_TAG_PEER_SIGNAL_STATS_THRESH,
+ WMI_TAG_TX_STATS_THRESH,
+ WMI_TAG_RX_STATS_THRESH,
+ WMI_TAG_PDEV_SET_STATS_THRESHOLD_CMD,
+ WMI_TAG_REQUEST_WLAN_STATS_CMD,
+ WMI_TAG_RX_AGGR_FAILURE_EVENT,
+ WMI_TAG_RX_AGGR_FAILURE_INFO,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+ WMI_TAG_PDEV_BAND_TO_MAC,
+ WMI_TAG_TBTT_OFFSET_INFO,
+ WMI_TAG_TBTT_OFFSET_EXT_EVENT,
+ WMI_TAG_SAR_LIMITS_CMD,
+ WMI_TAG_SAR_LIMIT_CMD_ROW,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+ WMI_TAG_VDEV_ADFS_CH_CFG_CMD,
+ WMI_TAG_VDEV_ADFS_OCAC_ABORT_CMD,
+ WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT,
+ WMI_TAG_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+ WMI_TAG_VDEV_DFS_CAC_COMPLETE_EVENT,
+ WMI_TAG_VENDOR_OUI,
+ WMI_TAG_REQUEST_RCPI_CMD,
+ WMI_TAG_UPDATE_RCPI_EVENT,
+ WMI_TAG_REQUEST_PEER_STATS_INFO_CMD,
+ WMI_TAG_PEER_STATS_INFO,
+ WMI_TAG_PEER_STATS_INFO_EVENT,
+ WMI_TAG_PKGID_EVENT,
+ WMI_TAG_CONNECTED_NLO_RSSI_PARAMS,
+ WMI_TAG_SET_CURRENT_COUNTRY_CMD,
+ WMI_TAG_REGULATORY_RULE_STRUCT,
+ WMI_TAG_REG_CHAN_LIST_CC_EVENT,
+ WMI_TAG_11D_SCAN_START_CMD,
+ WMI_TAG_11D_SCAN_STOP_CMD,
+ WMI_TAG_11D_NEW_COUNTRY_EVENT,
+ WMI_TAG_REQUEST_RADIO_CHAN_STATS_CMD,
+ WMI_TAG_RADIO_CHAN_STATS,
+ WMI_TAG_RADIO_CHAN_STATS_EVENT,
+ WMI_TAG_ROAM_PER_CONFIG,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+ WMI_TAG_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+ WMI_TAG_HW_DATA_FILTER_CMD,
+ WMI_TAG_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+ WMI_TAG_PEER_OPER_MODE_CHANGE_EVENT,
+ WMI_TAG_CHIP_POWER_SAVE_FAILURE_DETECTED,
+ WMI_TAG_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+ WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+ WMI_TAG_PDEV_UPDATE_PKT_ROUTING_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_EVENT,
+ WMI_TAG_PDEV_SET_DIVERSITY_GAIN_CMD,
+ WMI_TAG_MAC_PHY_CHAINMASK_COMBO,
+ WMI_TAG_MAC_PHY_CHAINMASK_CAPABILITY,
+ WMI_TAG_VDEV_SET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_EVENT,
+ WMI_TAG_IFACE_OFFLOAD_STATS,
+ WMI_TAG_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_RSSI_CTL_EXT,
+ WMI_TAG_SINGLE_PHYERR_EXT_RX_HDR,
+ WMI_TAG_COEX_BT_ACTIVITY_EVENT,
+ WMI_TAG_VDEV_GET_TX_POWER_CMD,
+ WMI_TAG_VDEV_TX_POWER_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_COMPL_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_SEND_CMD,
+ WMI_TAG_TX_SEND_PARAMS,
+ WMI_TAG_HE_RATE_SET,
+ WMI_TAG_CONGESTION_STATS,
+ WMI_TAG_SET_INIT_COUNTRY_CMD,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+ WMI_TAG_PDEV_DIV_GET_RSSI_ANTID,
+ WMI_TAG_THERM_THROT_CONFIG_REQUEST,
+ WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO,
+ WMI_TAG_THERM_THROT_STATS_EVENT,
+ WMI_TAG_THERM_THROT_LEVEL_STATS_INFO,
+ WMI_TAG_PDEV_DIV_RSSI_ANTID_EVENT,
+ WMI_TAG_OEM_DMA_RING_CAPABILITIES,
+ WMI_TAG_OEM_DMA_RING_CFG_REQ,
+ WMI_TAG_OEM_DMA_RING_CFG_RSP,
+ WMI_TAG_OEM_INDIRECT_DATA,
+ WMI_TAG_OEM_DMA_BUF_RELEASE,
+ WMI_TAG_OEM_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT,
+ WMI_TAG_ROAM_LCA_DISALLOW_CONFIG,
+ WMI_TAG_VDEV_LIMIT_OFFCHAN_CMD,
+ WMI_TAG_ROAM_RSSI_REJECTION_OCE_CONFIG,
+ WMI_TAG_UNIT_TEST_EVENT,
+ WMI_TAG_ROAM_FILS_OFFLOAD,
+ WMI_TAG_PDEV_UPDATE_PMK_CACHE_CMD,
+ WMI_TAG_PMK_CACHE,
+ WMI_TAG_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+ WMI_TAG_ROAM_FILS_SYNCH,
+ WMI_TAG_GTK_OFFLOAD_EXTENDED,
+ WMI_TAG_ROAM_BG_SCAN_ROAMING,
+ WMI_TAG_OIC_PING_OFFLOAD_PARAMS_CMD,
+ WMI_TAG_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+ WMI_TAG_OIC_PING_HANDOFF_EVENT,
+ WMI_TAG_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+ WMI_TAG_DHCP_LEASE_RENEW_EVENT,
+ WMI_TAG_BTM_CONFIG,
+ WMI_TAG_DEBUG_MESG_FW_DATA_STALL,
+ WMI_TAG_WLM_CONFIG_CMD,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_REQUEST,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_EVENT,
+ WMI_TAG_ROAM_CND_SCORING_PARAM,
+ WMI_TAG_PDEV_CONFIG_VENDOR_OUI_ACTION,
+ WMI_TAG_VENDOR_OUI_EXT,
+ WMI_TAG_ROAM_SYNCH_FRAME_EVENT,
+ WMI_TAG_FD_SEND_FROM_HOST_CMD,
+ WMI_TAG_ENABLE_FILS_CMD,
+ WMI_TAG_HOST_SWFDA_EVENT,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
+ WMI_TAG_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMD,
+ WMI_TAG_STATS_PERIOD,
+ WMI_TAG_NDL_SCHEDULE_UPDATE,
+ WMI_TAG_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMD,
+ WMI_TAG_MSDUQ_QDEPTH_THRESH_UPDATE,
+ WMI_TAG_PDEV_SET_RX_FILTER_PROMISCUOUS_CMD,
+ WMI_TAG_SAR2_RESULT_EVENT,
+ WMI_TAG_SAR_CAPABILITIES,
+ WMI_TAG_SAP_OBSS_DETECTION_CFG_CMD,
+ WMI_TAG_SAP_OBSS_DETECTION_INFO_EVT,
+ WMI_TAG_DMA_RING_CAPABILITIES,
+ WMI_TAG_DMA_RING_CFG_REQ,
+ WMI_TAG_DMA_RING_CFG_RSP,
+ WMI_TAG_DMA_BUF_RELEASE,
+ WMI_TAG_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_SAR_GET_LIMITS_CMD,
+ WMI_TAG_SAR_GET_LIMITS_EVENT,
+ WMI_TAG_SAR_GET_LIMITS_EVENT_ROW,
+ WMI_TAG_OFFLOAD_11K_REPORT,
+ WMI_TAG_INVOKE_NEIGHBOR_REPORT,
+ WMI_TAG_NEIGHBOR_REPORT_OFFLOAD,
+ WMI_TAG_VDEV_SET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_VDEV_GET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_BPF_SET_VDEV_ENABLE_CMD,
+ WMI_TAG_BPF_SET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_RESP_EVT,
+ WMI_TAG_PDEV_GET_NFCAL_POWER,
+ WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
+ WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
+ WMI_TAG_OBSS_COLOR_COLLISION_EVT,
+ WMI_TAG_RUNTIME_DPD_RECAL_CMD,
+ WMI_TAG_TWT_ENABLE_CMD,
+ WMI_TAG_TWT_DISABLE_CMD,
+ WMI_TAG_TWT_ADD_DIALOG_CMD,
+ WMI_TAG_TWT_DEL_DIALOG_CMD,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD,
+ WMI_TAG_TWT_ENABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_DISABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_DEL_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_PAUSE_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_RESUME_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_REQUEST_ROAM_SCAN_STATS_CMD,
+ WMI_TAG_ROAM_SCAN_STATS_EVENT,
+ WMI_TAG_PEER_TID_CONFIGURATIONS_CMD,
+ WMI_TAG_VDEV_SET_CUSTOM_SW_RETRY_TH_CMD,
+ WMI_TAG_GET_TPC_POWER_CMD,
+ WMI_TAG_GET_TPC_POWER_EVENT,
+ WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA,
+ WMI_TAG_MOTION_DET_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_EVENT,
+ WMI_TAG_MOTION_DET_BASE_LINE_EVENT,
+ WMI_TAG_NDP_TRANSPORT_IP,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
+ WMI_TAG_ESP_ESTIMATE_EVENT,
+ WMI_TAG_NAN_HOST_CONFIG,
+ WMI_TAG_SPECTRAL_BIN_SCALING_PARAMS,
+ WMI_TAG_PEER_CFR_CAPTURE_CMD,
+ WMI_TAG_PEER_CHAN_WIDTH_SWITCH_CMD,
+ WMI_TAG_CHAN_WIDTH_PEER_LIST,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMD,
+ WMI_TAG_PDEV_HE_TB_ACTION_FRM_CMD,
+ WMI_TAG_PEER_EXTD2_STATS,
+ WMI_TAG_HPCS_PULSE_START_CMD,
+ WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT,
+ WMI_TAG_VDEV_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMD,
+ WMI_TAG_NAN_EVENT_INFO,
+ WMI_TAG_NDP_CHANNEL_INFO,
+ WMI_TAG_NDP_CMD,
+ WMI_TAG_NDP_EVENT,
+ /* TODO add all the missing cmds */
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
+ WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344,
+ WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F,
+ WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
+ WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+ WMI_TAG_MAX
+};
+
+enum wmi_tlv_service {
+ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_TLV_SERVICE_SCAN_OFFLOAD = 1,
+ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD = 2,
+ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD = 3,
+ WMI_TLV_SERVICE_STA_PWRSAVE = 4,
+ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE = 5,
+ WMI_TLV_SERVICE_AP_UAPSD = 6,
+ WMI_TLV_SERVICE_AP_DFS = 7,
+ WMI_TLV_SERVICE_11AC = 8,
+ WMI_TLV_SERVICE_BLOCKACK = 9,
+ WMI_TLV_SERVICE_PHYERR = 10,
+ WMI_TLV_SERVICE_BCN_FILTER = 11,
+ WMI_TLV_SERVICE_RTT = 12,
+ WMI_TLV_SERVICE_WOW = 13,
+ WMI_TLV_SERVICE_RATECTRL_CACHE = 14,
+ WMI_TLV_SERVICE_IRAM_TIDS = 15,
+ WMI_TLV_SERVICE_ARPNS_OFFLOAD = 16,
+ WMI_TLV_SERVICE_NLO = 17,
+ WMI_TLV_SERVICE_GTK_OFFLOAD = 18,
+ WMI_TLV_SERVICE_SCAN_SCH = 19,
+ WMI_TLV_SERVICE_CSA_OFFLOAD = 20,
+ WMI_TLV_SERVICE_CHATTER = 21,
+ WMI_TLV_SERVICE_COEX_FREQAVOID = 22,
+ WMI_TLV_SERVICE_PACKET_POWER_SAVE = 23,
+ WMI_TLV_SERVICE_FORCE_FW_HANG = 24,
+ WMI_TLV_SERVICE_GPIO = 25,
+ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM = 26,
+ WMI_STA_UAPSD_BASIC_AUTO_TRIG = 27,
+ WMI_STA_UAPSD_VAR_AUTO_TRIG = 28,
+ WMI_TLV_SERVICE_STA_KEEP_ALIVE = 29,
+ WMI_TLV_SERVICE_TX_ENCAP = 30,
+ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC = 31,
+ WMI_TLV_SERVICE_EARLY_RX = 32,
+ WMI_TLV_SERVICE_STA_SMPS = 33,
+ WMI_TLV_SERVICE_FWTEST = 34,
+ WMI_TLV_SERVICE_STA_WMMAC = 35,
+ WMI_TLV_SERVICE_TDLS = 36,
+ WMI_TLV_SERVICE_BURST = 37,
+ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE = 38,
+ WMI_TLV_SERVICE_ADAPTIVE_OCS = 39,
+ WMI_TLV_SERVICE_BA_SSN_SUPPORT = 40,
+ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE = 41,
+ WMI_TLV_SERVICE_WLAN_HB = 42,
+ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT = 43,
+ WMI_TLV_SERVICE_BATCH_SCAN = 44,
+ WMI_TLV_SERVICE_QPOWER = 45,
+ WMI_TLV_SERVICE_PLMREQ = 46,
+ WMI_TLV_SERVICE_THERMAL_MGMT = 47,
+ WMI_TLV_SERVICE_RMC = 48,
+ WMI_TLV_SERVICE_MHF_OFFLOAD = 49,
+ WMI_TLV_SERVICE_COEX_SAR = 50,
+ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE = 51,
+ WMI_TLV_SERVICE_NAN = 52,
+ WMI_TLV_SERVICE_L1SS_STAT = 53,
+ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED = 54,
+ WMI_TLV_SERVICE_OBSS_SCAN = 55,
+ WMI_TLV_SERVICE_TDLS_OFFCHAN = 56,
+ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA = 57,
+ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA = 58,
+ WMI_TLV_SERVICE_IBSS_PWRSAVE = 59,
+ WMI_TLV_SERVICE_LPASS = 60,
+ WMI_TLV_SERVICE_EXTSCAN = 61,
+ WMI_TLV_SERVICE_D0WOW = 62,
+ WMI_TLV_SERVICE_HSOFFLOAD = 63,
+ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD = 64,
+ WMI_TLV_SERVICE_RX_FULL_REORDER = 65,
+ WMI_TLV_SERVICE_DHCP_OFFLOAD = 66,
+ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT = 67,
+ WMI_TLV_SERVICE_MDNS_OFFLOAD = 68,
+ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD = 69,
+ WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT = 70,
+ WMI_TLV_SERVICE_OCB = 71,
+ WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD = 72,
+ WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT = 73,
+ WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD = 74,
+ WMI_TLV_SERVICE_MGMT_TX_HTT = 75,
+ WMI_TLV_SERVICE_MGMT_TX_WMI = 76,
+ WMI_TLV_SERVICE_EXT_MSG = 77,
+ WMI_TLV_SERVICE_MAWC = 78,
+ WMI_TLV_SERVICE_PEER_ASSOC_CONF = 79,
+ WMI_TLV_SERVICE_EGAP = 80,
+ WMI_TLV_SERVICE_STA_PMF_OFFLOAD = 81,
+ WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY = 82,
+ WMI_TLV_SERVICE_ENHANCED_PROXY_STA = 83,
+ WMI_TLV_SERVICE_ATF = 84,
+ WMI_TLV_SERVICE_COEX_GPIO = 85,
+ WMI_TLV_SERVICE_AUX_SPECTRAL_INTF = 86,
+ WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF = 87,
+ WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64 = 88,
+ WMI_TLV_SERVICE_ENTERPRISE_MESH = 89,
+ WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT = 90,
+ WMI_TLV_SERVICE_BPF_OFFLOAD = 91,
+ WMI_TLV_SERVICE_SYNC_DELETE_CMDS = 92,
+ WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT = 93,
+ WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT = 94,
+ WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES = 95,
+ WMI_TLV_SERVICE_NAN_DATA = 96,
+ WMI_TLV_SERVICE_NAN_RTT = 97,
+ WMI_TLV_SERVICE_11AX = 98,
+ WMI_TLV_SERVICE_DEPRECATED_REPLACE = 99,
+ WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE = 100,
+ WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER = 101,
+ WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT = 102,
+ WMI_TLV_SERVICE_MESH_11S = 103,
+ WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT = 104,
+ WMI_TLV_SERVICE_VDEV_RX_FILTER = 105,
+ WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT = 106,
+ WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET = 107,
+ WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET = 108,
+ WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER = 109,
+ WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT = 110,
+ WMI_TLV_SERVICE_WLAN_STATS_REPORT = 111,
+ WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT = 112,
+ WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD = 113,
+ WMI_TLV_SERVICE_RCPI_SUPPORT = 114,
+ WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT = 115,
+ WMI_TLV_SERVICE_PEER_STATS_INFO = 116,
+ WMI_TLV_SERVICE_REGULATORY_DB = 117,
+ WMI_TLV_SERVICE_11D_OFFLOAD = 118,
+ WMI_TLV_SERVICE_HW_DATA_FILTERING = 119,
+ WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART = 120,
+ WMI_TLV_SERVICE_PKT_ROUTING = 121,
+ WMI_TLV_SERVICE_CHECK_CAL_VERSION = 122,
+ WMI_TLV_SERVICE_OFFCHAN_TX_WMI = 123,
+ WMI_TLV_SERVICE_8SS_TX_BFEE = 124,
+ WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT = 125,
+ WMI_TLV_SERVICE_ACK_TIMEOUT = 126,
+ WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127,
+
+ WMI_MAX_SERVICE = 128,
+
+ WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+ WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT = 129,
+ WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT = 130,
+ WMI_TLV_SERVICE_FILS_SUPPORT = 131,
+ WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD = 132,
+ WMI_TLV_SERVICE_WLAN_DHCP_RENEW = 133,
+ WMI_TLV_SERVICE_MAWC_SUPPORT = 134,
+ WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG = 135,
+ WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT = 136,
+ WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT = 137,
+ WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT = 138,
+ WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT = 139,
+ WMI_TLV_SERVICE_THERM_THROT = 140,
+ WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT = 141,
+ WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN = 142,
+ WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+ WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+ WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+ WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+ WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+ WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+ WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+ WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+ WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+ WMI_TLV_SERVICE_STA_TWT = 152,
+ WMI_TLV_SERVICE_AP_TWT = 153,
+ WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+ WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+ WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156,
+ WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159,
+ WMI_TLV_SERVICE_MOTION_DET = 160,
+ WMI_TLV_SERVICE_INFRA_MBSSID = 161,
+ WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162,
+ WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163,
+ WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164,
+ WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165,
+ WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166,
+ WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167,
+ WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169,
+ WMI_TLV_SERVICE_ESP_SUPPORT = 170,
+ WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171,
+ WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
+ WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
+ WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+ WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+ WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+ WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
+ WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
+ WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
+ WMI_TLV_SERVICE_BEACON_RECEPTION_STATS = 180,
+ WMI_TLV_SERVICE_FETCH_TX_PN = 181,
+ WMI_TLV_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT = 182,
+ WMI_TLV_SERVICE_TX_PER_PEER_AMPDU_SIZE = 183,
+ WMI_TLV_SERVICE_BSS_COLOR_SWITCH_COUNT = 184,
+ WMI_TLV_SERVICE_HTT_PEER_STATS_SUPPORT = 185,
+ WMI_TLV_SERVICE_UL_RU26_ALLOWED = 186,
+ WMI_TLV_SERVICE_GET_MWS_COEX_STATE = 187,
+ WMI_TLV_SERVICE_GET_MWS_DPWB_STATE = 188,
+ WMI_TLV_SERVICE_GET_MWS_TDM_STATE = 189,
+ WMI_TLV_SERVICE_GET_MWS_IDRX_STATE = 190,
+ WMI_TLV_SERVICE_GET_MWS_ANTENNA_SHARING_STATE = 191,
+ WMI_TLV_SERVICE_ENHANCED_TPC_CONFIG_EVENT = 192,
+ WMI_TLV_SERVICE_WLM_STATS_REQUEST = 193,
+ WMI_TLV_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT = 194,
+ WMI_TLV_SERVICE_WPA3_FT_SAE_SUPPORT = 195,
+ WMI_TLV_SERVICE_WPA3_FT_SUITE_B_SUPPORT = 196,
+ WMI_TLV_SERVICE_VOW_ENABLE = 197,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_EVT_TYPE_1 = 198,
+ WMI_TLV_SERVICE_BROADCAST_TWT = 199,
+ WMI_TLV_SERVICE_RAP_DETECTION_SUPPORT = 200,
+ WMI_TLV_SERVICE_PS_TDCC = 201,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY = 202,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_OVERRIDE = 203,
+ WMI_TLV_SERVICE_TX_PWR_PER_PEER = 204,
+ WMI_TLV_SERVICE_STA_PLUS_STA_SUPPORT = 205,
+ WMI_TLV_SERVICE_WPA3_FT_FILS = 206,
+ WMI_TLV_SERVICE_ADAPTIVE_11R_ROAM = 207,
+ WMI_TLV_SERVICE_CHAN_RF_CHARACTERIZATION_INFO = 208,
+ WMI_TLV_SERVICE_FW_IFACE_COMBINATION_SUPPORT = 209,
+ WMI_TLV_SERVICE_TX_COMPL_TSF64 = 210,
+ WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211,
+ WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212,
+ WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
+ WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
+ WMI_TLV_SERVICE_EXT2_MSG = 220,
+
+ WMI_MAX_EXT_SERVICE
+};
+
+enum {
+ WMI_SMPS_FORCED_MODE_NONE = 0,
+ WMI_SMPS_FORCED_MODE_DISABLED,
+ WMI_SMPS_FORCED_MODE_STATIC,
+ WMI_SMPS_FORCED_MODE_DYNAMIC
+};
+
+enum wmi_tpc_chainmask {
+ WMI_TPC_CHAINMASK_CONFIG_BAND_2G = 0,
+ WMI_TPC_CHAINMASK_CONFIG_BAND_5G = 1,
+ WMI_NUM_SUPPORTED_BAND_MAX = 2,
+};
+
+enum wmi_peer_param {
+ WMI_PEER_MIMO_PS_STATE = 1,
+ WMI_PEER_AMPDU = 2,
+ WMI_PEER_AUTHORIZE = 3,
+ WMI_PEER_CHWIDTH = 4,
+ WMI_PEER_NSS = 5,
+ WMI_PEER_USE_4ADDR = 6,
+ WMI_PEER_MEMBERSHIP = 7,
+ WMI_PEER_USERPOS = 8,
+ WMI_PEER_CRIT_PROTO_HINT_ENABLED = 9,
+ WMI_PEER_TX_FAIL_CNT_THR = 10,
+ WMI_PEER_SET_HW_RETRY_CTS2S = 11,
+ WMI_PEER_IBSS_ATIM_WINDOW_LENGTH = 12,
+ WMI_PEER_PHYMODE = 13,
+ WMI_PEER_USE_FIXED_PWR = 14,
+ WMI_PEER_PARAM_FIXED_RATE = 15,
+ WMI_PEER_SET_MU_WHITELIST = 16,
+ WMI_PEER_SET_MAX_TX_RATE = 17,
+ WMI_PEER_SET_MIN_TX_RATE = 18,
+ WMI_PEER_SET_DEFAULT_ROUTING = 19,
+};
+
+enum wmi_slot_time {
+ WMI_VDEV_SLOT_TIME_LONG = 1,
+ WMI_VDEV_SLOT_TIME_SHORT = 2,
+};
+
+enum wmi_preamble {
+ WMI_VDEV_PREAMBLE_LONG = 1,
+ WMI_VDEV_PREAMBLE_SHORT = 2,
+};
+
+enum wmi_peer_smps_state {
+ WMI_PEER_SMPS_PS_NONE = 0,
+ WMI_PEER_SMPS_STATIC = 1,
+ WMI_PEER_SMPS_DYNAMIC = 2
+};
+
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+ WMI_PEER_CHWIDTH_160MHZ = 3,
+};
+
+enum wmi_beacon_gen_mode {
+ WMI_BEACON_STAGGERED_MODE = 0,
+ WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_direct_buffer_module {
+ WMI_DIRECT_BUF_SPECTRAL = 0,
+ WMI_DIRECT_BUF_CFR = 1,
+
+ /* keep it last */
+ WMI_DIRECT_BUF_MAX
+};
+
+struct ath12k_wmi_pdev_band_arg {
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+};
+
+struct ath12k_wmi_ppe_threshold_arg {
+ u32 numss_m1;
+ u32 ru_bit_mask;
+ u32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
+};
+
+#define PSOC_HOST_MAX_PHY_SIZE (3)
+#define ATH12K_11B_SUPPORT BIT(0)
+#define ATH12K_11G_SUPPORT BIT(1)
+#define ATH12K_11A_SUPPORT BIT(2)
+#define ATH12K_11N_SUPPORT BIT(3)
+#define ATH12K_11AC_SUPPORT BIT(4)
+#define ATH12K_11AX_SUPPORT BIT(5)
+
+struct ath12k_wmi_hal_reg_capabilities_ext_arg {
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+#define WMI_HOST_MAX_PDEV 3
+
+struct ath12k_wmi_host_mem_chunk_params {
+ __le32 tlv_header;
+ __le32 req_id;
+ __le32 ptr;
+ __le32 size;
+} __packed;
+
+struct ath12k_wmi_host_mem_chunk_arg {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+struct ath12k_wmi_resource_config_arg {
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_active_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 max_peer_ext_stats;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 rx_batchmode;
+ u32 tt_support;
+ u32 atf_config;
+ u32 iphdr_pad_config;
+ u32 qwrap_config:16,
+ alloc_frag_desc_for_data_pkt:16;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 peer_map_unmap_version;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+};
+
+struct ath12k_wmi_init_cmd_arg {
+ struct ath12k_wmi_resource_config_arg res_cfg;
+ u8 num_mem_chunks;
+ struct ath12k_wmi_host_mem_chunk_arg *mem_chunks;
+ u32 hw_mode_id;
+ u32 num_band_to_mac;
+ struct ath12k_wmi_pdev_band_arg band_to_mac[WMI_HOST_MAX_PDEV];
+};
+
+struct ath12k_wmi_pdev_band_to_mac_params {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 start_freq;
+ __le32 end_freq;
+} __packed;
+
+/* This is both individual command WMI_PDEV_SET_HW_MODE_CMDID and also part
+ * of WMI_TAG_INIT_CMD.
+ */
+struct ath12k_wmi_pdev_set_hw_mode_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 hw_mode_index;
+ __le32 num_band_to_mac;
+} __packed;
+
+struct ath12k_wmi_ppe_threshold_params {
+ __le32 numss_m1; /** NSS - 1*/
+ __le32 ru_info;
+ __le32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
+} __packed;
+
+#define HW_BD_INFO_SIZE 5
+
+struct ath12k_wmi_abi_version_params {
+ __le32 abi_version_0;
+ __le32 abi_version_1;
+ __le32 abi_version_ns_0;
+ __le32 abi_version_ns_1;
+ __le32 abi_version_ns_2;
+ __le32 abi_version_ns_3;
+} __packed;
+
+struct wmi_init_cmd {
+ __le32 tlv_header;
+ struct ath12k_wmi_abi_version_params host_abi_vers;
+ __le32 num_host_mem_chunks;
+} __packed;
+
+#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
+
+struct ath12k_wmi_resource_config_params {
+ __le32 tlv_header;
+ __le32 num_vdevs;
+ __le32 num_peers;
+ __le32 num_offload_peers;
+ __le32 num_offload_reorder_buffs;
+ __le32 num_peer_keys;
+ __le32 num_tids;
+ __le32 ast_skid_limit;
+ __le32 tx_chain_mask;
+ __le32 rx_chain_mask;
+ __le32 rx_timeout_pri[4];
+ __le32 rx_decap_mode;
+ __le32 scan_max_pending_req;
+ __le32 bmiss_offload_max_vdev;
+ __le32 roam_offload_max_vdev;
+ __le32 roam_offload_max_ap_profiles;
+ __le32 num_mcast_groups;
+ __le32 num_mcast_table_elems;
+ __le32 mcast2ucast_mode;
+ __le32 tx_dbg_log_size;
+ __le32 num_wds_entries;
+ __le32 dma_burst_size;
+ __le32 mac_aggr_delim;
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+ __le32 vow_config;
+ __le32 gtk_offload_max_vdev;
+ __le32 num_msdu_desc;
+ __le32 max_frag_entries;
+ __le32 num_tdls_vdevs;
+ __le32 num_tdls_conn_table_entries;
+ __le32 beacon_tx_offload_max_vdev;
+ __le32 num_multicast_filter_entries;
+ __le32 num_wow_filters;
+ __le32 num_keep_alive_pattern;
+ __le32 keep_alive_pattern_size;
+ __le32 max_tdls_concurrent_sleep_sta;
+ __le32 max_tdls_concurrent_buffer_sta;
+ __le32 wmi_send_separate;
+ __le32 num_ocb_vdevs;
+ __le32 num_ocb_channels;
+ __le32 num_ocb_schedules;
+ __le32 flag1;
+ __le32 smart_ant_cap;
+ __le32 bk_minfree;
+ __le32 be_minfree;
+ __le32 vi_minfree;
+ __le32 vo_minfree;
+ __le32 alloc_frag_desc_for_data_pkt;
+ __le32 num_ns_ext_tuples_cfg;
+ __le32 bpf_instruction_size;
+ __le32 max_bssid_rx_filters;
+ __le32 use_pdev_id;
+ __le32 max_num_dbs_scan_duty_cycle;
+ __le32 max_num_group_keys;
+ __le32 peer_map_unmap_version;
+ __le32 sched_params;
+ __le32 twt_ap_pdev_count;
+ __le32 twt_ap_sta_count;
+ __le32 max_nlo_ssids;
+ __le32 num_pkt_filters;
+ __le32 num_max_sta_vdevs;
+ __le32 max_bssid_indicator;
+ __le32 ul_resp_config;
+ __le32 msdu_flow_override_config0;
+ __le32 msdu_flow_override_config1;
+ __le32 flags2;
+ __le32 host_service_flags;
+ __le32 max_rnr_neighbours;
+ __le32 ema_max_vap_cnt;
+ __le32 ema_max_profile_period;
+} __packed;
+
+struct wmi_service_ready_event {
+ __le32 fw_build_vers;
+ struct ath12k_wmi_abi_version_params fw_abi_vers;
+ __le32 phy_capability;
+ __le32 max_frag_entry;
+ __le32 num_rf_chains;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 vht_supp_mcs;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable;
+ __le32 max_bcn_ie_size;
+ __le32 num_mem_reqs;
+ __le32 max_num_scan_channels;
+ __le32 hw_bd_id;
+ __le32 hw_bd_info[HW_BD_INFO_SIZE];
+ __le32 max_supported_macs;
+ __le32 wmi_fw_sub_feat_caps;
+ __le32 num_dbs_hw_modes;
+ /* txrx_chainmask
+ * [7:0] - 2G band tx chain mask
+ * [15:8] - 2G band rx chain mask
+ * [23:16] - 5G band tx chain mask
+ * [31:24] - 5G band rx chain mask
+ */
+ __le32 txrx_chainmask;
+ __le32 default_dbs_hw_mode_index;
+ __le32 num_msdu_desc;
+} __packed;
+
+#define WMI_SERVICE_BM_SIZE ((WMI_MAX_SERVICE + sizeof(u32) - 1) / sizeof(u32))
+
+#define WMI_SERVICE_SEGMENT_BM_SIZE32 4 /* 4x u32 = 128 bits */
+#define WMI_SERVICE_EXT_BM_SIZE (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32))
+#define WMI_AVAIL_SERVICE_BITS_IN_SIZE32 32
+#define WMI_SERVICE_BITS_IN_SIZE32 4
+
+struct wmi_service_ready_ext_event {
+ __le32 default_conc_scan_config_bits;
+ __le32 default_fw_config_bits;
+ struct ath12k_wmi_ppe_threshold_params ppet;
+ __le32 he_cap_info;
+ __le32 mpdu_density;
+ __le32 max_bssid_rx_filters;
+ __le32 fw_build_vers_ext;
+ __le32 max_nlo_ssids;
+ __le32 max_bssid_indicator;
+ __le32 he_cap_info_ext;
+} __packed;
+
+struct ath12k_wmi_soc_mac_phy_hw_mode_caps_params {
+ __le32 num_hw_modes;
+ __le32 num_chainmask_tables;
+} __packed;
+
+struct ath12k_wmi_hw_mode_cap_params {
+ __le32 tlv_header;
+ __le32 hw_mode_id;
+ __le32 phy_id_map;
+ __le32 hw_mode_config_type;
+} __packed;
+
+#define WMI_MAX_HECAP_PHY_SIZE (3)
+
+struct ath12k_wmi_mac_phy_caps_params {
+ __le32 hw_mode_id;
+ __le32 pdev_id;
+ __le32 phy_id;
+ __le32 supported_flags;
+ __le32 supported_bands;
+ __le32 ampdu_density;
+ __le32 max_bw_supported_2g;
+ __le32 ht_cap_info_2g;
+ __le32 vht_cap_info_2g;
+ __le32 vht_supp_mcs_2g;
+ __le32 he_cap_info_2g;
+ __le32 he_supp_mcs_2g;
+ __le32 tx_chain_mask_2g;
+ __le32 rx_chain_mask_2g;
+ __le32 max_bw_supported_5g;
+ __le32 ht_cap_info_5g;
+ __le32 vht_cap_info_5g;
+ __le32 vht_supp_mcs_5g;
+ __le32 he_cap_info_5g;
+ __le32 he_supp_mcs_5g;
+ __le32 tx_chain_mask_5g;
+ __le32 rx_chain_mask_5g;
+ __le32 he_cap_phy_info_2g[WMI_MAX_HECAP_PHY_SIZE];
+ __le32 he_cap_phy_info_5g[WMI_MAX_HECAP_PHY_SIZE];
+ struct ath12k_wmi_ppe_threshold_params he_ppet2g;
+ struct ath12k_wmi_ppe_threshold_params he_ppet5g;
+ __le32 chainmask_table_id;
+ __le32 lmac_id;
+ __le32 he_cap_info_2g_ext;
+ __le32 he_cap_info_5g_ext;
+ __le32 he_cap_info_internal;
+} __packed;
+
+struct ath12k_wmi_hal_reg_caps_ext_params {
+ __le32 tlv_header;
+ __le32 phy_id;
+ __le32 eeprom_reg_domain;
+ __le32 eeprom_reg_domain_ext;
+ __le32 regcap1;
+ __le32 regcap2;
+ __le32 wireless_modes;
+ __le32 low_2ghz_chan;
+ __le32 high_2ghz_chan;
+ __le32 low_5ghz_chan;
+ __le32 high_5ghz_chan;
+} __packed;
+
+struct ath12k_wmi_soc_hal_reg_caps_params {
+ __le32 num_phy;
+} __packed;
+
+/* 2 word representation of MAC addr */
+struct ath12k_wmi_mac_addr_params {
+ u8 addr[ETH_ALEN];
+ u8 padding[2];
+} __packed;
+
+struct ath12k_wmi_dma_ring_caps_params {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 module_id;
+ __le32 min_elem;
+ __le32 min_buf_sz;
+ __le32 min_buf_align;
+} __packed;
+
+struct ath12k_wmi_ready_event_min_params {
+ struct ath12k_wmi_abi_version_params fw_abi_vers;
+ struct ath12k_wmi_mac_addr_params mac_addr;
+ __le32 status;
+ __le32 num_dscp_table;
+ __le32 num_extra_mac_addr;
+ __le32 num_total_peers;
+ __le32 num_extra_peers;
+} __packed;
+
+struct wmi_ready_event {
+ struct ath12k_wmi_ready_event_min_params ready_event_min;
+ __le32 max_ast_index;
+ __le32 pktlog_defs_checksum;
+} __packed;
+
+struct wmi_service_available_event {
+ __le32 wmi_service_segment_offset;
+ __le32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
+} __packed;
+
+struct ath12k_wmi_vdev_create_arg {
+ u8 if_id;
+ u32 type;
+ u32 subtype;
+ struct {
+ u8 tx;
+ u8 rx;
+ } chains[NUM_NL80211_BANDS];
+ u32 pdev_id;
+ u8 if_stats_id;
+};
+
+#define ATH12K_MAX_VDEV_STATS_ID 0x30
+#define ATH12K_INVAL_VDEV_STATS_ID 0xFF
+
+struct wmi_vdev_create_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 vdev_type;
+ __le32 vdev_subtype;
+ struct ath12k_wmi_mac_addr_params vdev_macaddr;
+ __le32 num_cfg_txrx_streams;
+ __le32 pdev_id;
+ __le32 vdev_stats_id;
+} __packed;
+
+struct ath12k_wmi_vdev_txrx_streams_params {
+ __le32 tlv_header;
+ u32 band;
+ u32 supported_tx_streams;
+ u32 supported_rx_streams;
+} __packed;
+
+struct wmi_vdev_delete_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 vdev_assoc_id;
+ struct ath12k_wmi_mac_addr_params vdev_bssid;
+ struct ath12k_wmi_mac_addr_params trans_bssid;
+ __le32 profile_idx;
+ __le32 profile_num;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+} __packed;
+
+#define WMI_VDEV_START_HIDDEN_SSID BIT(0)
+#define WMI_VDEV_START_PMF_ENABLED BIT(1)
+#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+
+#define ATH12K_WMI_SSID_LEN 32
+
+struct ath12k_wmi_ssid_params {
+ __le32 ssid_len;
+ u8 ssid[ATH12K_WMI_SSID_LEN];
+} __packed;
+
+#define ATH12K_VDEV_SETUP_TIMEOUT_HZ (1 * HZ)
+
+struct wmi_vdev_start_request_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 requestor_id;
+ __le32 beacon_interval;
+ __le32 dtim_period;
+ __le32 flags;
+ struct ath12k_wmi_ssid_params ssid;
+ __le32 bcn_tx_rate;
+ __le32 bcn_txpower;
+ __le32 num_noa_descriptors;
+ __le32 disable_hw_ack;
+ __le32 preferred_tx_streams;
+ __le32 preferred_rx_streams;
+ __le32 he_ops;
+ __le32 cac_duration_ms;
+ __le32 regdomain;
+} __packed;
+
+#define MGMT_TX_DL_FRM_LEN 64
+
+struct ath12k_wmi_channel_arg {
+ u8 chan_id;
+ u8 pwr;
+ u32 mhz;
+ u32 half_rate:1,
+ quarter_rate:1,
+ dfs_set:1,
+ dfs_set_cfreq2:1,
+ is_chan_passive:1,
+ allow_ht:1,
+ allow_vht:1,
+ allow_he:1,
+ set_agile:1,
+ psc_channel:1;
+ u32 phy_mode;
+ u32 cfreq1;
+ u32 cfreq2;
+ char maxpower;
+ char minpower;
+ char maxregpower;
+ u8 antennamax;
+ u8 reg_class_id;
+};
+
+enum wmi_phy_mode {
+ MODE_11A = 0,
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+ MODE_11NA_HT20 = 4,
+ MODE_11NG_HT20 = 5,
+ MODE_11NA_HT40 = 6,
+ MODE_11NG_HT40 = 7,
+ MODE_11AC_VHT20 = 8,
+ MODE_11AC_VHT40 = 9,
+ MODE_11AC_VHT80 = 10,
+ MODE_11AC_VHT20_2G = 11,
+ MODE_11AC_VHT40_2G = 12,
+ MODE_11AC_VHT80_2G = 13,
+ MODE_11AC_VHT80_80 = 14,
+ MODE_11AC_VHT160 = 15,
+ MODE_11AX_HE20 = 16,
+ MODE_11AX_HE40 = 17,
+ MODE_11AX_HE80 = 18,
+ MODE_11AX_HE80_80 = 19,
+ MODE_11AX_HE160 = 20,
+ MODE_11AX_HE20_2G = 21,
+ MODE_11AX_HE40_2G = 22,
+ MODE_11AX_HE80_2G = 23,
+ MODE_UNKNOWN = 24,
+ MODE_MAX = 24
+};
+
+struct wmi_vdev_start_req_arg {
+ u32 vdev_id;
+ u32 freq;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ bool passive;
+ bool allow_ibss;
+ bool allow_ht;
+ bool allow_vht;
+ bool ht40plus;
+ bool chan_radar;
+ bool freq2_radar;
+ bool allow_he;
+ u32 min_power;
+ u32 max_power;
+ u32 max_reg_power;
+ u32 max_antenna_gain;
+ enum wmi_phy_mode mode;
+ u32 bcn_intval;
+ u32 dtim_period;
+ u8 *ssid;
+ u32 ssid_len;
+ u32 bcn_tx_rate;
+ u32 bcn_tx_power;
+ bool disable_hw_ack;
+ bool hidden_ssid;
+ bool pmf_enabled;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+ u32 pref_rx_streams;
+ u32 pref_tx_streams;
+ u32 num_noa_descriptors;
+};
+
+struct ath12k_wmi_peer_create_arg {
+ const u8 *peer_addr;
+ u32 peer_type;
+ u32 vdev_id;
+};
+
+struct ath12k_wmi_pdev_set_regdomain_arg {
+ u16 current_rd_in_use;
+ u16 current_rd_2g;
+ u16 current_rd_5g;
+ u32 ctl_2g;
+ u32 ctl_5g;
+ u8 dfs_domain;
+ u32 pdev_id;
+};
+
+struct ath12k_wmi_rx_reorder_queue_remove_arg {
+ u8 *peer_macaddr;
+ u16 vdev_id;
+ u32 peer_tid_bitmap;
+};
+
+#define WMI_HOST_PDEV_ID_SOC 0xFF
+#define WMI_HOST_PDEV_ID_0 0
+#define WMI_HOST_PDEV_ID_1 1
+#define WMI_HOST_PDEV_ID_2 2
+
+#define WMI_PDEV_ID_SOC 0
+#define WMI_PDEV_ID_1ST 1
+#define WMI_PDEV_ID_2ND 2
+#define WMI_PDEV_ID_3RD 3
+
+/* Freq units in MHz */
+#define REG_RULE_START_FREQ 0x0000ffff
+#define REG_RULE_END_FREQ 0xffff0000
+#define REG_RULE_FLAGS 0x0000ffff
+#define REG_RULE_MAX_BW 0x0000ffff
+#define REG_RULE_REG_PWR 0x00ff0000
+#define REG_RULE_ANT_GAIN 0xff000000
+#define REG_RULE_PSD_INFO BIT(2)
+#define REG_RULE_PSD_EIRP 0xffff0000
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+#define HECAP_PHYDWORD_0 0
+#define HECAP_PHYDWORD_1 1
+#define HECAP_PHYDWORD_2 2
+
+#define HECAP_PHY_SU_BFER BIT(31)
+#define HECAP_PHY_SU_BFEE BIT(0)
+#define HECAP_PHY_MU_BFER BIT(1)
+#define HECAP_PHY_UL_MUMIMO BIT(22)
+#define HECAP_PHY_UL_MUOFDMA BIT(23)
+
+#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
+ u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_SU_BFER)
+
+#define HECAP_PHY_SUBFME_GET(hecap_phy) \
+ u32_get_bits(hecap_phy[HECAP_PHYDWORD_1], HECAP_PHY_SU_BFEE)
+
+#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
+ u32_get_bits(hecap_phy[HECAP_PHYDWORD_1], HECAP_PHY_MU_BFER)
+
+#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
+ u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_UL_MUMIMO)
+
+#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
+ u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_UL_MUOFDMA)
+
+#define HE_MODE_SU_TX_BFEE BIT(0)
+#define HE_MODE_SU_TX_BFER BIT(1)
+#define HE_MODE_MU_TX_BFEE BIT(2)
+#define HE_MODE_MU_TX_BFER BIT(3)
+#define HE_MODE_DL_OFDMA BIT(4)
+#define HE_MODE_UL_OFDMA BIT(5)
+#define HE_MODE_UL_MUMIMO BIT(6)
+
+#define HE_DL_MUOFDMA_ENABLE 1
+#define HE_UL_MUOFDMA_ENABLE 1
+#define HE_DL_MUMIMO_ENABLE 1
+#define HE_MU_BFEE_ENABLE 1
+#define HE_SU_BFEE_ENABLE 1
+
+#define HE_VHT_SOUNDING_MODE_ENABLE 1
+#define HE_SU_MU_SOUNDING_MODE_ENABLE 1
+#define HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE 1
+
+/* HE or VHT Sounding */
+#define HE_VHT_SOUNDING_MODE BIT(0)
+/* SU or MU Sounding */
+#define HE_SU_MU_SOUNDING_MODE BIT(2)
+/* Trig or Non-Trig Sounding */
+#define HE_TRIG_NONTRIG_SOUNDING_MODE BIT(3)
+
+#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK 0x700
+
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
+struct wmi_peer_create_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 peer_type;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+struct wmi_peer_reorder_queue_setup_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 tid;
+ __le32 queue_ptr_lo;
+ __le32 queue_ptr_hi;
+ __le32 queue_no;
+ __le32 ba_window_size_valid;
+ __le32 ba_window_size;
+} __packed;
+
+struct wmi_peer_reorder_queue_remove_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 tid_mask;
+} __packed;
+
+enum wmi_bss_chan_info_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_pdev_set_param_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_pdev_set_ps_mode_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 sta_ps_mode;
+} __packed;
+
+struct wmi_pdev_suspend_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 suspend_opt;
+} __packed;
+
+struct wmi_pdev_resume_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_req_cmd {
+ __le32 tlv_header;
+ /* ref wmi_bss_chan_info_req_type */
+ __le32 req_type;
+} __packed;
+
+struct wmi_ap_ps_peer_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 param;
+ __le32 value;
+} __packed;
+
+struct wmi_sta_powersave_param_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 param;
+ __le32 value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 reg_domain;
+ __le32 reg_domain_2g;
+ __le32 reg_domain_5g;
+ __le32 conformance_test_limit_2g;
+ __le32 conformance_test_limit_5g;
+ __le32 dfs_domain;
+} __packed;
+
+struct wmi_peer_set_param_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_dfs_phyerr_offload_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_bcn_offload_ctrl_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 bcn_ctrl_op;
+} __packed;
+
+enum scan_dwelltime_adaptive_mode {
+ SCAN_DWELL_MODE_DEFAULT = 0,
+ SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ SCAN_DWELL_MODE_MODERATE = 2,
+ SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ SCAN_DWELL_MODE_STATIC = 4
+};
+
+#define WLAN_SCAN_MAX_NUM_SSID 10
+#define WLAN_SCAN_MAX_NUM_BSSID 10
+#define WLAN_SCAN_MAX_NUM_CHANNELS 40
+
+struct ath12k_wmi_element_info_arg {
+ u32 len;
+ u8 *ptr;
+};
+
+#define WMI_IE_BITMAP_SIZE 8
+
+#define WMI_SCAN_MAX_NUM_SSID 0x0A
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
+ WMI_SCAN_PRIORITY_LOW,
+ WMI_SCAN_PRIORITY_MEDIUM,
+ WMI_SCAN_PRIORITY_HIGH,
+ WMI_SCAN_PRIORITY_VERY_HIGH,
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
+};
+
+enum wmi_scan_event_type {
+ WMI_SCAN_EVENT_STARTED = BIT(0),
+ WMI_SCAN_EVENT_COMPLETED = BIT(1),
+ WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2),
+ WMI_SCAN_EVENT_FOREIGN_CHAN = BIT(3),
+ WMI_SCAN_EVENT_DEQUEUED = BIT(4),
+ /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_PREEMPTED = BIT(5),
+ WMI_SCAN_EVENT_START_FAILED = BIT(6),
+ WMI_SCAN_EVENT_RESTARTED = BIT(7),
+ WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT = BIT(8),
+ WMI_SCAN_EVENT_SUSPENDED = BIT(9),
+ WMI_SCAN_EVENT_RESUMED = BIT(10),
+ WMI_SCAN_EVENT_MAX = BIT(15),
+};
+
+enum wmi_scan_completion_reason {
+ WMI_SCAN_REASON_COMPLETED,
+ WMI_SCAN_REASON_CANCELLED,
+ WMI_SCAN_REASON_PREEMPTED,
+ WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_INTERNAL_FAILURE,
+ WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_start_scan_cmd {
+ __le32 tlv_header;
+ __le32 scan_id;
+ __le32 scan_req_id;
+ __le32 vdev_id;
+ __le32 scan_priority;
+ __le32 notify_scan_events;
+ __le32 dwell_time_active;
+ __le32 dwell_time_passive;
+ __le32 min_rest_time;
+ __le32 max_rest_time;
+ __le32 repeat_probe_time;
+ __le32 probe_spacing_time;
+ __le32 idle_time;
+ __le32 max_scan_time;
+ __le32 probe_delay;
+ __le32 scan_ctrl_flags;
+ __le32 burst_duration;
+ __le32 num_chan;
+ __le32 num_bssid;
+ __le32 num_ssids;
+ __le32 ie_len;
+ __le32 n_probes;
+ struct ath12k_wmi_mac_addr_params mac_addr;
+ struct ath12k_wmi_mac_addr_params mac_mask;
+ u32 ie_bitmap[WMI_IE_BITMAP_SIZE];
+ __le32 num_vendor_oui;
+ __le32 scan_ctrl_flags_ext;
+ __le32 dwell_time_active_2g;
+ __le32 dwell_time_active_6g;
+ __le32 dwell_time_passive_6g;
+ __le32 scan_start_offset;
+} __packed;
+
+#define WMI_SCAN_FLAG_PASSIVE 0x1
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+#define WMI_SCAN_FILTER_PROMISCUOS 0x100
+#define WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS 0x200
+#define WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ 0x400
+#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ 0x800
+#define WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ 0x1000
+#define WMI_SCAN_OFFCHAN_MGMT_TX 0x2000
+#define WMI_SCAN_OFFCHAN_DATA_TX 0x4000
+#define WMI_SCAN_CAPTURE_PHY_ERROR 0x8000
+#define WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN 0x10000
+#define WMI_SCAN_FLAG_HALF_RATE_SUPPORT 0x20000
+#define WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT 0x40000
+#define WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ 0x80000
+#define WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ 0x100000
+
+#define WMI_SCAN_DWELL_MODE_MASK GENMASK(23, 21)
+
+enum {
+ WMI_SCAN_DWELL_MODE_DEFAULT = 0,
+ WMI_SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ WMI_SCAN_DWELL_MODE_MODERATE = 2,
+ WMI_SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ WMI_SCAN_DWELL_MODE_STATIC = 4,
+};
+
+struct ath12k_wmi_hint_short_ssid_arg {
+ u32 freq_flags;
+ u32 short_ssid;
+};
+
+struct ath12k_wmi_hint_bssid_arg {
+ u32 freq_flags;
+ struct ath12k_wmi_mac_addr_params bssid;
+};
+
+struct ath12k_wmi_scan_req_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 pdev_id;
+ enum wmi_scan_priority scan_priority;
+ union {
+ struct {
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
+ };
+ u32 scan_events;
+ };
+ u32 dwell_time_active;
+ u32 dwell_time_active_2g;
+ u32 dwell_time_passive;
+ u32 dwell_time_active_6g;
+ u32 dwell_time_passive_6g;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ union {
+ struct {
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
+ };
+ u32 scan_flags;
+ };
+ enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 n_probes;
+ u32 chan_list[WLAN_SCAN_MAX_NUM_CHANNELS];
+ u32 notify_scan_events;
+ struct cfg80211_ssid ssid[WLAN_SCAN_MAX_NUM_SSID];
+ struct ath12k_wmi_mac_addr_params bssid_list[WLAN_SCAN_MAX_NUM_BSSID];
+ struct ath12k_wmi_element_info_arg extraie;
+ u32 num_hint_s_ssid;
+ u32 num_hint_bssid;
+ struct ath12k_wmi_hint_short_ssid_arg hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID];
+ struct ath12k_wmi_hint_bssid_arg hint_bssid[WLAN_SCAN_MAX_HINT_BSSID];
+};
+
+struct wmi_ssid_arg {
+ int len;
+ const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+ const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+
+ u32 ie_len;
+ u32 n_channels;
+ u32 n_ssids;
+ u32 n_bssids;
+
+ u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+ u32 channels[64];
+ struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+#define WMI_SCAN_STOP_ONE 0x00000000
+#define WMI_SCAN_STOP_VAP_ALL 0x01000000
+#define WMI_SCAN_STOP_ALL 0x04000000
+
+/* Prefix 0xA000 indicates that the scan request
+ * is trigger by HOST
+ */
+#define ATH12K_SCAN_ID 0xA000
+
+enum scan_cancel_req_type {
+ WLAN_SCAN_CANCEL_SINGLE = 1,
+ WLAN_SCAN_CANCEL_VDEV_ALL,
+ WLAN_SCAN_CANCEL_PDEV_ALL,
+};
+
+struct ath12k_wmi_scan_cancel_arg {
+ u32 requester;
+ u32 scan_id;
+ enum scan_cancel_req_type req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct wmi_bcn_send_from_host_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 data_len;
+ union {
+ __le32 frag_ptr;
+ __le32 frag_ptr_lo;
+ };
+ __le32 frame_ctrl;
+ __le32 dtim_flag;
+ __le32 bcn_antenna;
+ __le32 frag_ptr_hi;
+};
+
+#define WMI_CHAN_INFO_MODE GENMASK(5, 0)
+#define WMI_CHAN_INFO_HT40_PLUS BIT(6)
+#define WMI_CHAN_INFO_PASSIVE BIT(7)
+#define WMI_CHAN_INFO_ADHOC_ALLOWED BIT(8)
+#define WMI_CHAN_INFO_AP_DISABLED BIT(9)
+#define WMI_CHAN_INFO_DFS BIT(10)
+#define WMI_CHAN_INFO_ALLOW_HT BIT(11)
+#define WMI_CHAN_INFO_ALLOW_VHT BIT(12)
+#define WMI_CHAN_INFO_CHAN_CHANGE_CAUSE_CSA BIT(13)
+#define WMI_CHAN_INFO_HALF_RATE BIT(14)
+#define WMI_CHAN_INFO_QUARTER_RATE BIT(15)
+#define WMI_CHAN_INFO_DFS_FREQ2 BIT(16)
+#define WMI_CHAN_INFO_ALLOW_HE BIT(17)
+#define WMI_CHAN_INFO_PSC BIT(18)
+
+#define WMI_CHAN_REG_INFO1_MIN_PWR GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO1_MAX_PWR GENMASK(15, 8)
+#define WMI_CHAN_REG_INFO1_MAX_REG_PWR GENMASK(23, 16)
+#define WMI_CHAN_REG_INFO1_REG_CLS GENMASK(31, 24)
+
+#define WMI_CHAN_REG_INFO2_ANT_MAX GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO2_MAX_TX_PWR GENMASK(15, 8)
+
+struct ath12k_wmi_channel_params {
+ __le32 tlv_header;
+ __le32 mhz;
+ __le32 band_center_freq1;
+ __le32 band_center_freq2;
+ __le32 info;
+ __le32 reg_info_1;
+ __le32 reg_info_2;
+} __packed;
+
+enum wmi_sta_ps_mode {
+ WMI_STA_PS_MODE_DISABLED = 0,
+ WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+#define WMI_SMPS_MASK_LOWER_16BITS 0xFF
+#define WMI_SMPS_MASK_UPPER_3BITS 0x7
+#define WMI_SMPS_PARAM_VALUE_SHIFT 29
+
+#define ATH12K_WMI_FW_HANG_ASSERT_TYPE 1
+#define ATH12K_WMI_FW_HANG_DELAY 0
+
+/* type, 0:unused 1: ASSERT 2: not respond detect command
+ * delay_time_ms, the simulate will delay time
+ */
+
+struct wmi_force_fw_hang_cmd {
+ __le32 tlv_header;
+ __le32 type;
+ __le32 delay_time_ms;
+} __packed;
+
+struct wmi_vdev_set_param_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_get_pdev_temperature_cmd {
+ __le32 tlv_header;
+ __le32 param;
+ __le32 pdev_id;
+} __packed;
+
+#define WMI_BEACON_TX_BUFFER_SIZE 512
+
+struct wmi_bcn_tmpl_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 tim_ie_offset;
+ __le32 buf_len;
+ __le32 csa_switch_count_offset;
+ __le32 ext_csa_switch_count_offset;
+ __le32 csa_event_bitmap;
+ __le32 mbssid_ie_offset;
+ __le32 esp_ie_offset;
+} __packed;
+
+struct wmi_vdev_install_key_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 key_idx;
+ __le32 key_flags;
+ __le32 key_cipher;
+ __le64 key_rsc_counter;
+ __le64 key_global_rsc_counter;
+ __le64 key_tsc_counter;
+ u8 wpi_key_rsc_counter[16];
+ u8 wpi_key_tsc_counter[16];
+ __le32 key_len;
+ __le32 key_txmic_len;
+ __le32 key_rxmic_len;
+ __le32 is_group_key_id_valid;
+ __le32 group_key_id;
+
+ /* Followed by key_data containing key followed by
+ * tx mic and then rx mic
+ */
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u64 key_rsc_counter;
+ const void *key_data;
+};
+
+#define WMI_MAX_SUPPORTED_RATES 128
+#define WMI_HOST_MAX_HECAP_PHY_SIZE 3
+#define WMI_HOST_MAX_HE_RATE_SET 3
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80 0
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
+
+struct wmi_rate_set_arg {
+ u32 num_rates;
+ u8 rates[WMI_MAX_SUPPORTED_RATES];
+};
+
+struct ath12k_wmi_peer_assoc_arg {
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ struct wmi_rate_set_arg peer_legacy_rates;
+ struct wmi_rate_set_arg peer_ht_rates;
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+ u8 vht_capable;
+ u8 min_data_rate;
+ u32 tx_max_mcs_nss;
+ u32 peer_bw_rxnss_override;
+ bool is_pmf_enabled;
+ bool is_wme_set;
+ bool qos_flag;
+ bool apsd_flag;
+ bool ht_flag;
+ bool bw_40;
+ bool bw_80;
+ bool bw_160;
+ bool stbc_flag;
+ bool ldpc_flag;
+ bool static_mimops_flag;
+ bool dynamic_mimops_flag;
+ bool spatial_mux_flag;
+ bool vht_flag;
+ bool vht_ng_flag;
+ bool need_ptk_4_way;
+ bool need_gtk_2_way;
+ bool auth_flag;
+ bool safe_mode_enabled;
+ bool amsdu_disable;
+ /* Use common structure */
+ u8 peer_mac[ETH_ALEN];
+
+ bool he_flag;
+ u32 peer_he_cap_macinfo[2];
+ u32 peer_he_cap_macinfo_internal;
+ u32 peer_he_caps_6ghz;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs_count;
+ u32 peer_he_rx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ bool twt_responder;
+ bool twt_requester;
+ struct ath12k_wmi_ppe_threshold_arg peer_ppet;
+};
+
+struct wmi_peer_assoc_complete_cmd {
+ __le32 tlv_header;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 vdev_id;
+ __le32 peer_new_assoc;
+ __le32 peer_associd;
+ __le32 peer_flags;
+ __le32 peer_caps;
+ __le32 peer_listen_intval;
+ __le32 peer_ht_caps;
+ __le32 peer_max_mpdu;
+ __le32 peer_mpdu_density;
+ __le32 peer_rate_caps;
+ __le32 peer_nss;
+ __le32 peer_vht_caps;
+ __le32 peer_phymode;
+ __le32 peer_ht_info[2];
+ __le32 num_peer_legacy_rates;
+ __le32 num_peer_ht_rates;
+ __le32 peer_bw_rxnss_override;
+ struct ath12k_wmi_ppe_threshold_params peer_ppet;
+ __le32 peer_he_cap_info;
+ __le32 peer_he_ops;
+ __le32 peer_he_cap_phy[WMI_MAX_HECAP_PHY_SIZE];
+ __le32 peer_he_mcs;
+ __le32 peer_he_cap_info_ext;
+ __le32 peer_he_cap_info_internal;
+ __le32 min_data_rate;
+ __le32 peer_he_caps_6ghz;
+} __packed;
+
+struct wmi_stop_scan_cmd {
+ __le32 tlv_header;
+ __le32 requestor;
+ __le32 scan_id;
+ __le32 req_type;
+ __le32 vdev_id;
+ __le32 pdev_id;
+} __packed;
+
+struct ath12k_wmi_scan_chan_list_arg {
+ u32 pdev_id;
+ u16 nallchans;
+ struct ath12k_wmi_channel_arg channel[];
+};
+
+struct wmi_scan_chan_list_cmd {
+ __le32 tlv_header;
+ __le32 num_scan_chans;
+ __le32 flags;
+ __le32 pdev_id;
+} __packed;
+
+#define WMI_MGMT_SEND_DOWNLD_LEN 64
+
+#define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD0_MCS_MASK GENMASK(19, 8)
+#define WMI_TX_PARAMS_DWORD0_NSS_MASK GENMASK(27, 20)
+#define WMI_TX_PARAMS_DWORD0_RETRY_LIMIT GENMASK(31, 28)
+
+#define WMI_TX_PARAMS_DWORD1_CHAIN_MASK GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD1_BW_MASK GENMASK(14, 8)
+#define WMI_TX_PARAMS_DWORD1_PREAMBLE_TYPE GENMASK(19, 15)
+#define WMI_TX_PARAMS_DWORD1_FRAME_TYPE BIT(20)
+#define WMI_TX_PARAMS_DWORD1_RSVD GENMASK(31, 21)
+
+struct wmi_mgmt_send_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 desc_id;
+ __le32 chanfreq;
+ __le32 paddr_lo;
+ __le32 paddr_hi;
+ __le32 frame_len;
+ __le32 buf_len;
+ __le32 tx_params_valid;
+
+ /* This TLV is followed by struct wmi_mgmt_frame */
+
+ /* Followed by struct wmi_mgmt_send_params */
+} __packed;
+
+struct wmi_sta_powersave_mode_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 sta_ps_mode;
+} __packed;
+
+struct wmi_sta_smps_force_mode_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 forced_mode;
+} __packed;
+
+struct wmi_sta_smps_param_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 param;
+ __le32 value;
+} __packed;
+
+struct ath12k_wmi_bcn_prb_info_params {
+ __le32 tlv_header;
+ __le32 caps;
+ __le32 erp;
+} __packed;
+
+enum {
+ WMI_PDEV_SUSPEND,
+ WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct wmi_pdev_green_ap_ps_enable_cmd_param {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 enable;
+} __packed;
+
+struct ath12k_wmi_ap_ps_arg {
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+enum set_init_cc_type {
+ WMI_COUNTRY_INFO_TYPE_ALPHA,
+ WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE,
+ WMI_COUNTRY_INFO_TYPE_REGDOMAIN,
+};
+
+enum set_init_cc_flags {
+ INVALID_CC,
+ CC_IS_SET,
+ REGDMN_IS_SET,
+ ALPHA_IS_SET,
+};
+
+struct ath12k_wmi_init_country_arg {
+ union {
+ u16 country_code;
+ u16 regdom_id;
+ u8 alpha2[3];
+ } cc_info;
+ enum set_init_cc_flags flags;
+};
+
+struct wmi_init_country_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 init_cc_type;
+ union {
+ __le32 country_code;
+ __le32 regdom_id;
+ __le32 alpha2;
+ } cc_info;
+} __packed;
+
+struct wmi_delba_send_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 tid;
+ __le32 initiator;
+ __le32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 tid;
+ __le32 statuscode;
+} __packed;
+
+struct wmi_addba_send_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 tid;
+ __le32 buffersize;
+} __packed;
+
+struct wmi_addba_clear_resp_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+#define DFS_PHYERR_UNIT_TEST_CMD 0
+#define DFS_UNIT_TEST_MODULE 0x2b
+#define DFS_UNIT_TEST_TOKEN 0xAA
+
+enum dfs_test_args_idx {
+ DFS_TEST_CMDID = 0,
+ DFS_TEST_PDEV_ID,
+ DFS_TEST_RADAR_PARAM,
+ DFS_MAX_TEST_ARGS,
+};
+
+struct wmi_dfs_unit_test_arg {
+ u32 cmd_id;
+ u32 pdev_id;
+ u32 radar_param;
+};
+
+struct wmi_unit_test_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 module_id;
+ __le32 num_args;
+ __le32 diag_token;
+ /* Followed by test args*/
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+#define WMI_PEER_AUTH 0x00000001
+#define WMI_PEER_QOS 0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
+#define WMI_PEER_HE 0x00000400
+#define WMI_PEER_APSD 0x00000800
+#define WMI_PEER_HT 0x00001000
+#define WMI_PEER_40MHZ 0x00002000
+#define WMI_PEER_STBC 0x00008000
+#define WMI_PEER_LDPC 0x00010000
+#define WMI_PEER_DYN_MIMOPS 0x00020000
+#define WMI_PEER_STATIC_MIMOPS 0x00040000
+#define WMI_PEER_SPATIAL_MUX 0x00200000
+#define WMI_PEER_TWT_REQ 0x00400000
+#define WMI_PEER_TWT_RESP 0x00800000
+#define WMI_PEER_VHT 0x02000000
+#define WMI_PEER_80MHZ 0x04000000
+#define WMI_PEER_PMF 0x08000000
+/* TODO: Place holder for WLAN_PEER_F_PS_PRESEND_REQUIRED = 0x10000000.
+ * Need to be cleaned up
+ */
+#define WMI_PEER_IS_P2P_CAPABLE 0x20000000
+#define WMI_PEER_160MHZ 0x40000000
+#define WMI_PEER_SAFEMODE_EN 0x80000000
+
+struct ath12k_wmi_vht_rate_set_params {
+ __le32 tlv_header;
+ __le32 rx_max_rate;
+ __le32 rx_mcs_set;
+ __le32 tx_max_rate;
+ __le32 tx_mcs_set;
+ __le32 tx_max_mcs_nss;
+} __packed;
+
+struct ath12k_wmi_he_rate_set_params {
+ __le32 tlv_header;
+ __le32 rx_mcs_set;
+ __le32 tx_mcs_set;
+} __packed;
+
+#define MAX_REG_RULES 10
+#define REG_ALPHA2_LEN 2
+#define MAX_6G_REG_RULES 5
+#define REG_US_5G_NUM_REG_RULES 4
+
+enum wmi_start_event_param {
+ WMI_VDEV_START_RESP_EVENT = 0,
+ WMI_VDEV_RESTART_RESP_EVENT,
+};
+
+struct wmi_vdev_start_resp_event {
+ __le32 vdev_id;
+ __le32 requestor_id;
+ /* enum wmi_start_event_param */
+ __le32 resp_type;
+ __le32 status;
+ __le32 chain_mask;
+ __le32 smps_mode;
+ union {
+ __le32 mac_id;
+ __le32 pdev_id;
+ };
+ __le32 cfgd_tx_streams;
+ __le32 cfgd_rx_streams;
+} __packed;
+
+/* VDEV start response status codes */
+enum wmi_vdev_start_resp_status_code {
+ WMI_VDEV_START_RESPONSE_STATUS_SUCCESS = 0,
+ WMI_VDEV_START_RESPONSE_INVALID_VDEVID = 1,
+ WMI_VDEV_START_RESPONSE_NOT_SUPPORTED = 2,
+ WMI_VDEV_START_RESPONSE_DFS_VIOLATION = 3,
+ WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN = 4,
+};
+
+enum wmi_reg_6g_ap_type {
+ WMI_REG_INDOOR_AP = 0,
+ WMI_REG_STD_POWER_AP = 1,
+ WMI_REG_VLP_AP = 2,
+ WMI_REG_CURRENT_MAX_AP_TYPE,
+ WMI_REG_MAX_SUPP_AP_TYPE = WMI_REG_VLP_AP,
+ WMI_REG_MAX_AP_TYPE = 7,
+};
+
+enum wmi_reg_6g_client_type {
+ WMI_REG_DEFAULT_CLIENT = 0,
+ WMI_REG_SUBORDINATE_CLIENT = 1,
+ WMI_REG_MAX_CLIENT_TYPE = 2,
+};
+
+/* Regulatory Rule Flags Passed by FW */
+#define REGULATORY_CHAN_DISABLED BIT(0)
+#define REGULATORY_CHAN_NO_IR BIT(1)
+#define REGULATORY_CHAN_RADAR BIT(3)
+#define REGULATORY_CHAN_NO_OFDM BIT(6)
+#define REGULATORY_CHAN_INDOOR_ONLY BIT(9)
+
+#define REGULATORY_CHAN_NO_HT40 BIT(4)
+#define REGULATORY_CHAN_NO_80MHZ BIT(7)
+#define REGULATORY_CHAN_NO_160MHZ BIT(8)
+#define REGULATORY_CHAN_NO_20MHZ BIT(11)
+#define REGULATORY_CHAN_NO_10MHZ BIT(12)
+
+enum {
+ WMI_REG_SET_CC_STATUS_PASS = 0,
+ WMI_REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ WMI_REG_INIT_ALPHA2_NOT_FOUND = 2,
+ WMI_REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ WMI_REG_SET_CC_STATUS_NO_MEMORY = 4,
+ WMI_REG_SET_CC_STATUS_FAIL = 5,
+};
+
+#define WMI_REG_CLIENT_MAX 4
+
+struct wmi_reg_chan_list_cc_ext_event {
+ __le32 status_code;
+ __le32 phy_id;
+ __le32 alpha2;
+ __le32 num_phy;
+ __le32 country_id;
+ __le32 domain_code;
+ __le32 dfs_region;
+ __le32 phybitmap;
+ __le32 min_bw_2g;
+ __le32 max_bw_2g;
+ __le32 min_bw_5g;
+ __le32 max_bw_5g;
+ __le32 num_2g_reg_rules;
+ __le32 num_5g_reg_rules;
+ __le32 client_type;
+ __le32 rnr_tpe_usable;
+ __le32 unspecified_ap_usable;
+ __le32 domain_code_6g_ap_lpi;
+ __le32 domain_code_6g_ap_sp;
+ __le32 domain_code_6g_ap_vlp;
+ __le32 domain_code_6g_client_lpi[WMI_REG_CLIENT_MAX];
+ __le32 domain_code_6g_client_sp[WMI_REG_CLIENT_MAX];
+ __le32 domain_code_6g_client_vlp[WMI_REG_CLIENT_MAX];
+ __le32 domain_code_6g_super_id;
+ __le32 min_bw_6g_ap_sp;
+ __le32 max_bw_6g_ap_sp;
+ __le32 min_bw_6g_ap_lpi;
+ __le32 max_bw_6g_ap_lpi;
+ __le32 min_bw_6g_ap_vlp;
+ __le32 max_bw_6g_ap_vlp;
+ __le32 min_bw_6g_client_sp[WMI_REG_CLIENT_MAX];
+ __le32 max_bw_6g_client_sp[WMI_REG_CLIENT_MAX];
+ __le32 min_bw_6g_client_lpi[WMI_REG_CLIENT_MAX];
+ __le32 max_bw_6g_client_lpi[WMI_REG_CLIENT_MAX];
+ __le32 min_bw_6g_client_vlp[WMI_REG_CLIENT_MAX];
+ __le32 max_bw_6g_client_vlp[WMI_REG_CLIENT_MAX];
+ __le32 num_6g_reg_rules_ap_sp;
+ __le32 num_6g_reg_rules_ap_lpi;
+ __le32 num_6g_reg_rules_ap_vlp;
+ __le32 num_6g_reg_rules_cl_sp[WMI_REG_CLIENT_MAX];
+ __le32 num_6g_reg_rules_cl_lpi[WMI_REG_CLIENT_MAX];
+ __le32 num_6g_reg_rules_cl_vlp[WMI_REG_CLIENT_MAX];
+} __packed;
+
+struct ath12k_wmi_reg_rule_ext_params {
+ __le32 tlv_header;
+ __le32 freq_info;
+ __le32 bw_pwr_info;
+ __le32 flag_info;
+ __le32 psd_power_info;
+} __packed;
+
+struct wmi_vdev_delete_resp_event {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_peer_delete_resp_event {
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+struct wmi_bcn_tx_status_event {
+ __le32 vdev_id;
+ __le32 tx_status;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_event {
+ __le32 pdev_id;
+ __le32 freq; /* Units in MHz */
+ __le32 noise_floor; /* units are dBm */
+ /* rx clear - how often the channel was unused */
+ __le32 rx_clear_count_low;
+ __le32 rx_clear_count_high;
+ /* cycle count - elapsed time during measured period, in clock ticks */
+ __le32 cycle_count_low;
+ __le32 cycle_count_high;
+ /* tx cycle count - elapsed time spent in tx, in clock ticks */
+ __le32 tx_cycle_count_low;
+ __le32 tx_cycle_count_high;
+ /* rx cycle count - elapsed time spent in rx, in clock ticks */
+ __le32 rx_cycle_count_low;
+ __le32 rx_cycle_count_high;
+ /*rx_cycle cnt for my bss in 64bits format */
+ __le32 rx_bss_cycle_count_low;
+ __le32 rx_bss_cycle_count_high;
+} __packed;
+
+#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
+
+struct wmi_vdev_install_key_compl_event {
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+ __le32 key_idx;
+ __le32 key_flags;
+ __le32 status;
+} __packed;
+
+struct wmi_vdev_install_key_complete_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+};
+
+struct wmi_peer_assoc_conf_event {
+ __le32 vdev_id;
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+struct wmi_peer_assoc_conf_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+};
+
+struct wmi_fils_discovery_event {
+ __le32 vdev_id;
+ __le32 fils_tt;
+ __le32 tbtt;
+} __packed;
+
+struct wmi_probe_resp_tx_status_event {
+ __le32 vdev_id;
+ __le32 tx_status;
+} __packed;
+
+struct wmi_pdev_ctl_failsafe_chk_event {
+ __le32 pdev_id;
+ __le32 ctl_failsafe_status;
+} __packed;
+
+struct ath12k_wmi_pdev_csa_event {
+ __le32 pdev_id;
+ __le32 current_switch_count;
+ __le32 num_vdevs;
+} __packed;
+
+struct ath12k_wmi_pdev_radar_event {
+ __le32 pdev_id;
+ __le32 detection_mode;
+ __le32 chan_freq;
+ __le32 chan_width;
+ __le32 detector_id;
+ __le32 segment_id;
+ __le32 timestamp;
+ __le32 is_chirp;
+ a_sle32 freq_offset;
+ a_sle32 sidx;
+} __packed;
+
+struct wmi_pdev_temperature_event {
+ /* temperature value in Celcius degree */
+ a_sle32 temp;
+ __le32 pdev_id;
+} __packed;
+
+#define WMI_RX_STATUS_OK 0x00
+#define WMI_RX_STATUS_ERR_CRC 0x01
+#define WMI_RX_STATUS_ERR_DECRYPT 0x08
+#define WMI_RX_STATUS_ERR_MIC 0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+
+#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4
+
+struct ath12k_wmi_mgmt_rx_arg {
+ u32 chan_freq;
+ u32 channel;
+ u32 snr;
+ u8 rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA];
+ u32 rate;
+ enum wmi_phy_mode phy_mode;
+ u32 buf_len;
+ int status;
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u8 pdev_id;
+};
+
+#define ATH_MAX_ANTENNA 4
+
+struct ath12k_wmi_mgmt_rx_params {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status;
+ __le32 rssi_ctl[ATH_MAX_ANTENNA];
+ __le32 flags;
+ a_sle32 rssi;
+ __le32 tsf_delta;
+ __le32 rx_tsf_l32;
+ __le32 rx_tsf_u32;
+ __le32 pdev_id;
+ __le32 chan_freq;
+} __packed;
+
+#define MAX_ANTENNA_EIGHT 8
+
+struct wmi_mgmt_tx_compl_event {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_scan_event {
+ __le32 event_type; /* %WMI_SCAN_EVENT_ */
+ __le32 reason; /* %WMI_SCAN_REASON_ */
+ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 vdev_id;
+ /* TSF Timestamp when the scan event (%WMI_SCAN_EVENT_) is completed
+ * In case of AP it is TSF of the AP vdev
+ * In case of STA connected state, this is the TSF of the AP
+ * In case of STA not connected, it will be the free running HW timer
+ */
+ __le32 tsf_timestamp;
+} __packed;
+
+struct wmi_peer_sta_kickout_arg {
+ const u8 *mac_addr;
+};
+
+struct wmi_peer_sta_kickout_event {
+ struct ath12k_wmi_mac_addr_params peer_macaddr;
+} __packed;
+
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_event {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+} __packed;
+
+#define WMI_CHAN_INFO_START_RESP 0
+#define WMI_CHAN_INFO_END_RESP 1
+
+struct wmi_chan_info_event {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
+ __le32 my_bss_rx_cycle_count;
+ __le32 rx_11b_mode_data_duration;
+ __le32 tx_frame_cnt;
+ __le32 mac_clk_mhz;
+ __le32 vdev_id;
+} __packed;
+
+struct ath12k_wmi_target_cap_arg {
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 max_num_scan_channels;
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+};
+
+enum wmi_vdev_type {
+ WMI_VDEV_TYPE_AP = 1,
+ WMI_VDEV_TYPE_STA = 2,
+ WMI_VDEV_TYPE_IBSS = 3,
+ WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+ WMI_VDEV_SUBTYPE_NONE,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT,
+ WMI_VDEV_SUBTYPE_P2P_GO,
+ WMI_VDEV_SUBTYPE_PROXY_STA,
+ WMI_VDEV_SUBTYPE_MESH_NON_11S,
+ WMI_VDEV_SUBTYPE_MESH_11S,
+};
+
+enum wmi_sta_powersave_param {
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+ WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+ WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+ WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+ WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+enum wmi_sta_ps_param_uapsd {
+ WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+enum wmi_sta_ps_param_tx_wake_threshold {
+ WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+ WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+ /* Values greater than one indicate that many TX attempts per beacon
+ * interval before the STA will wake up
+ */
+};
+
+/* The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+ WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+ /* Values greater than 0 indicate the maximum numer of PS-Poll frames
+ * FW will send before waking up.
+ */
+};
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+ WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+ WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+ MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+enum wmi_ap_ps_peer_param {
+ /** Set uapsd configuration for a given peer.
+ *
+ * This include the delivery and trigger enabled state for each AC.
+ * The host MLME needs to set this based on AP capability and stations
+ * request Set in the association request received from the station.
+ *
+ * Lower 8 bits of the value specify the UAPSD configuration.
+ *
+ * (see enum wmi_ap_ps_param_uapsd)
+ * The default value is 0.
+ */
+ WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+ /**
+ * Set the service period for a UAPSD capable station
+ *
+ * The service period from wme ie in the (re)assoc request frame.
+ *
+ * (see enum wmi_ap_ps_peer_param_max_sp)
+ */
+ WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+ /** Time in seconds for aging out buffered frames
+ * for STA in power save
+ */
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+
+ /** Specify frame types that are considered SIFS
+ * RESP trigger frame
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE = 3,
+
+ /** Specifies the trigger state of TID.
+ * Valid only for UAPSD frame type
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD = 4,
+
+ /* Specifies the WNM sleep state of a STA */
+ WMI_AP_PS_PEER_PARAM_WNM_SLEEP = 5,
+};
+
+#define DISABLE_SIFS_RESPONSE_TRIGGER 0
+
+#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_LEN 32
+
+enum wmi_key_type {
+ WMI_KEY_PAIRWISE = 0,
+ WMI_KEY_GROUP = 1,
+};
+
+enum wmi_cipher_type {
+ WMI_CIPHER_NONE = 0, /* clear key */
+ WMI_CIPHER_WEP = 1,
+ WMI_CIPHER_TKIP = 2,
+ WMI_CIPHER_AES_OCB = 3,
+ WMI_CIPHER_AES_CCM = 4,
+ WMI_CIPHER_WAPI = 5,
+ WMI_CIPHER_CKIP = 6,
+ WMI_CIPHER_AES_CMAC = 7,
+ WMI_CIPHER_ANY = 8,
+ WMI_CIPHER_AES_GCM = 9,
+ WMI_CIPHER_AES_GMAC = 10,
+};
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE (0xffff)
+
+#define ATH12K_RC_VERSION_OFFSET 28
+#define ATH12K_RC_PREAMBLE_OFFSET 8
+#define ATH12K_RC_NSS_OFFSET 5
+
+#define ATH12K_HW_RATE_CODE(rate, nss, preamble) \
+ ((1 << ATH12K_RC_VERSION_OFFSET) | \
+ ((nss) << ATH12K_RC_NSS_OFFSET) | \
+ ((preamble) << ATH12K_RC_PREAMBLE_OFFSET) | \
+ (rate))
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+ WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_HT,
+ WMI_RATE_PREAMBLE_VHT,
+ WMI_RATE_PREAMBLE_HE,
+};
+
+/**
+ * enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection.
+ * @WMI_RTS_CTS_DISABLED: RTS/CTS protection is disabled.
+ * @WMI_USE_RTS_CTS: RTS/CTS Enabled.
+ * @WMI_USE_CTS2SELF: CTS to self protection Enabled.
+ */
+enum wmi_rtscts_prot_mode {
+ WMI_RTS_CTS_DISABLED = 0,
+ WMI_USE_RTS_CTS = 1,
+ WMI_USE_CTS2SELF = 2,
+};
+
+/**
+ * enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling
+ * protection mode.
+ * @WMI_RTSCTS_FOR_NO_RATESERIES: Neither of rate-series should use RTS-CTS
+ * @WMI_RTSCTS_FOR_SECOND_RATESERIES: Only second rate-series will use RTS-CTS
+ * @WMI_RTSCTS_ACROSS_SW_RETRIES: Only the second rate-series will use RTS-CTS,
+ * but if there's a sw retry, both the rate
+ * series will use RTS-CTS.
+ * @WMI_RTSCTS_ERP: RTS/CTS used for ERP protection for every PPDU.
+ * @WMI_RTSCTS_FOR_ALL_RATESERIES: Enable RTS-CTS for all rate series.
+ */
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES = 1,
+ WMI_RTSCTS_ACROSS_SW_RETRIES = 2,
+ WMI_RTSCTS_ERP = 3,
+ WMI_RTSCTS_FOR_ALL_RATESERIES = 4,
+};
+
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+enum wmi_sta_ps_param_rx_wake_policy {
+ WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+ WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/* Do not change existing values! Used by ath12k_frame_mode parameter
+ * module parameter.
+ */
+enum ath12k_hw_txrx_mode {
+ ATH12K_HW_TXRX_RAW = 0,
+ ATH12K_HW_TXRX_NATIVE_WIFI = 1,
+ ATH12K_HW_TXRX_ETHERNET = 2,
+};
+
+struct wmi_wmm_params {
+ __le32 tlv_header;
+ __le32 cwmin;
+ __le32 cwmax;
+ __le32 aifs;
+ __le32 txoplimit;
+ __le32 acm;
+ __le32 no_ack;
+} __packed;
+
+struct wmi_wmm_params_arg {
+ u8 acm;
+ u8 aifs;
+ u16 cwmin;
+ u16 cwmax;
+ u16 txop;
+ u8 no_ack;
+};
+
+struct wmi_vdev_set_wmm_params_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ struct wmi_wmm_params wmm_params[4];
+ __le32 wmm_param_type;
+} __packed;
+
+struct wmi_wmm_params_all_arg {
+ struct wmi_wmm_params_arg ac_be;
+ struct wmi_wmm_params_arg ac_bk;
+ struct wmi_wmm_params_arg ac_vi;
+ struct wmi_wmm_params_arg ac_vo;
+};
+
+#define ATH12K_TWT_DEF_STA_CONG_TIMER_MS 5000
+#define ATH12K_TWT_DEF_DEFAULT_SLOT_SIZE 10
+#define ATH12K_TWT_DEF_CONGESTION_THRESH_SETUP 50
+#define ATH12K_TWT_DEF_CONGESTION_THRESH_TEARDOWN 20
+#define ATH12K_TWT_DEF_CONGESTION_THRESH_CRITICAL 100
+#define ATH12K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN 80
+#define ATH12K_TWT_DEF_INTERFERENCE_THRESH_SETUP 50
+#define ATH12K_TWT_DEF_MIN_NO_STA_SETUP 10
+#define ATH12K_TWT_DEF_MIN_NO_STA_TEARDOWN 2
+#define ATH12K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS 2
+#define ATH12K_TWT_DEF_MIN_NO_TWT_SLOTS 2
+#define ATH12K_TWT_DEF_MAX_NO_STA_TWT 500
+#define ATH12K_TWT_DEF_MODE_CHECK_INTERVAL 10000
+#define ATH12K_TWT_DEF_ADD_STA_SLOT_INTERVAL 1000
+#define ATH12K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL 5000
+
+struct wmi_twt_enable_params_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 sta_cong_timer_ms;
+ __le32 mbss_support;
+ __le32 default_slot_size;
+ __le32 congestion_thresh_setup;
+ __le32 congestion_thresh_teardown;
+ __le32 congestion_thresh_critical;
+ __le32 interference_thresh_teardown;
+ __le32 interference_thresh_setup;
+ __le32 min_no_sta_setup;
+ __le32 min_no_sta_teardown;
+ __le32 no_of_bcast_mcast_slots;
+ __le32 min_no_twt_slots;
+ __le32 max_no_sta_twt;
+ __le32 mode_check_interval;
+ __le32 add_sta_slot_interval;
+ __le32 remove_sta_slot_interval;
+} __packed;
+
+struct wmi_twt_disable_params_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_obss_spatial_reuse_params_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 enable;
+ a_sle32 obss_min;
+ a_sle32 obss_max;
+ __le32 vdev_id;
+} __packed;
+
+#define ATH12K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS 200
+#define ATH12K_OBSS_COLOR_COLLISION_DETECTION_DISABLE 0
+#define ATH12K_OBSS_COLOR_COLLISION_DETECTION 1
+
+#define ATH12K_BSS_COLOR_STA_PERIODS 10000
+#define ATH12K_BSS_COLOR_AP_PERIODS 5000
+
+struct wmi_obss_color_collision_cfg_params_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 flags;
+ __le32 evt_type;
+ __le32 current_bss_color;
+ __le32 detection_period_ms;
+ __le32 scan_period_ms;
+ __le32 free_slot_expiry_time_ms;
+} __packed;
+
+struct wmi_bss_color_change_enable_params_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 enable;
+} __packed;
+
+#define ATH12K_IPV4_TH_SEED_SIZE 5
+#define ATH12K_IPV6_TH_SEED_SIZE 11
+
+struct ath12k_wmi_pdev_lro_config_cmd {
+ __le32 tlv_header;
+ __le32 lro_enable;
+ __le32 res;
+ u32 th_4[ATH12K_IPV4_TH_SEED_SIZE];
+ u32 th_6[ATH12K_IPV6_TH_SEED_SIZE];
+ __le32 pdev_id;
+} __packed;
+
+#define ATH12K_WMI_SPECTRAL_COUNT_DEFAULT 0
+#define ATH12K_WMI_SPECTRAL_PERIOD_DEFAULT 224
+#define ATH12K_WMI_SPECTRAL_PRIORITY_DEFAULT 1
+#define ATH12K_WMI_SPECTRAL_FFT_SIZE_DEFAULT 7
+#define ATH12K_WMI_SPECTRAL_GC_ENA_DEFAULT 1
+#define ATH12K_WMI_SPECTRAL_RESTART_ENA_DEFAULT 0
+#define ATH12K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96
+#define ATH12K_WMI_SPECTRAL_INIT_DELAY_DEFAULT 80
+#define ATH12K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12
+#define ATH12K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8
+#define ATH12K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0
+#define ATH12K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0
+#define ATH12K_WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0
+#define ATH12K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0
+#define ATH12K_WMI_SPECTRAL_RPT_MODE_DEFAULT 2
+#define ATH12K_WMI_SPECTRAL_BIN_SCALE_DEFAULT 1
+#define ATH12K_WMI_SPECTRAL_DBM_ADJ_DEFAULT 1
+#define ATH12K_WMI_SPECTRAL_CHN_MASK_DEFAULT 1
+
+struct ath12k_wmi_vdev_spectral_conf_arg {
+ u32 vdev_id;
+ u32 scan_count;
+ u32 scan_period;
+ u32 scan_priority;
+ u32 scan_fft_size;
+ u32 scan_gc_ena;
+ u32 scan_restart_ena;
+ u32 scan_noise_floor_ref;
+ u32 scan_init_delay;
+ u32 scan_nb_tone_thr;
+ u32 scan_str_bin_thr;
+ u32 scan_wb_rpt_mode;
+ u32 scan_rssi_rpt_mode;
+ u32 scan_rssi_thr;
+ u32 scan_pwr_format;
+ u32 scan_rpt_mode;
+ u32 scan_bin_scale;
+ u32 scan_dbm_adj;
+ u32 scan_chn_mask;
+};
+
+struct ath12k_wmi_vdev_spectral_conf_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 scan_count;
+ __le32 scan_period;
+ __le32 scan_priority;
+ __le32 scan_fft_size;
+ __le32 scan_gc_ena;
+ __le32 scan_restart_ena;
+ __le32 scan_noise_floor_ref;
+ __le32 scan_init_delay;
+ __le32 scan_nb_tone_thr;
+ __le32 scan_str_bin_thr;
+ __le32 scan_wb_rpt_mode;
+ __le32 scan_rssi_rpt_mode;
+ __le32 scan_rssi_thr;
+ __le32 scan_pwr_format;
+ __le32 scan_rpt_mode;
+ __le32 scan_bin_scale;
+ __le32 scan_dbm_adj;
+ __le32 scan_chn_mask;
+} __packed;
+
+#define ATH12K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1
+#define ATH12K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2
+#define ATH12K_WMI_SPECTRAL_ENABLE_CMD_ENABLE 1
+#define ATH12K_WMI_SPECTRAL_ENABLE_CMD_DISABLE 2
+
+struct ath12k_wmi_vdev_spectral_enable_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 trigger_cmd;
+ __le32 enable_cmd;
+} __packed;
+
+struct ath12k_wmi_pdev_dma_ring_cfg_arg {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 module_id;
+ u32 base_paddr_lo;
+ u32 base_paddr_hi;
+ u32 head_idx_paddr_lo;
+ u32 head_idx_paddr_hi;
+ u32 tail_idx_paddr_lo;
+ u32 tail_idx_paddr_hi;
+ u32 num_elems;
+ u32 buf_size;
+ u32 num_resp_per_event;
+ u32 event_timeout_ms;
+};
+
+struct ath12k_wmi_pdev_dma_ring_cfg_req_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 module_id; /* see enum wmi_direct_buffer_module */
+ __le32 base_paddr_lo;
+ __le32 base_paddr_hi;
+ __le32 head_idx_paddr_lo;
+ __le32 head_idx_paddr_hi;
+ __le32 tail_idx_paddr_lo;
+ __le32 tail_idx_paddr_hi;
+ __le32 num_elems; /* Number of elems in the ring */
+ __le32 buf_size; /* size of allocated buffer in bytes */
+
+ /* Number of wmi_dma_buf_release_entry packed together */
+ __le32 num_resp_per_event;
+
+ /* Target should timeout and send whatever resp
+ * it has if this time expires, units in milliseconds
+ */
+ __le32 event_timeout_ms;
+} __packed;
+
+struct ath12k_wmi_dma_buf_release_fixed_params {
+ __le32 pdev_id;
+ __le32 module_id;
+ __le32 num_buf_release_entry;
+ __le32 num_meta_data_entry;
+} __packed;
+
+struct ath12k_wmi_dma_buf_release_entry_params {
+ __le32 tlv_header;
+ __le32 paddr_lo;
+
+ /* Bits 11:0: address of data
+ * Bits 31:12: host context data
+ */
+ __le32 paddr_hi;
+} __packed;
+
+#define WMI_SPECTRAL_META_INFO1_FREQ1 GENMASK(15, 0)
+#define WMI_SPECTRAL_META_INFO1_FREQ2 GENMASK(31, 16)
+
+#define WMI_SPECTRAL_META_INFO2_CHN_WIDTH GENMASK(7, 0)
+
+struct ath12k_wmi_dma_buf_release_meta_data_params {
+ __le32 tlv_header;
+ a_sle32 noise_floor[WMI_MAX_CHAINS];
+ __le32 reset_delay;
+ __le32 freq1;
+ __le32 freq2;
+ __le32 ch_width;
+} __packed;
+
+enum wmi_fils_discovery_cmd_type {
+ WMI_FILS_DISCOVERY_CMD,
+ WMI_UNSOL_BCAST_PROBE_RESP,
+};
+
+struct wmi_fils_discovery_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 interval;
+ __le32 config; /* enum wmi_fils_discovery_cmd_type */
+} __packed;
+
+struct wmi_fils_discovery_tmpl_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_probe_tmpl_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 buf_len;
+} __packed;
+
+#define WMI_MAX_MEM_REQS 32
+
+#define MAX_RADIOS 3
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+
+struct ath12k_wmi_pdev {
+ struct ath12k_wmi_base *wmi_ab;
+ enum ath12k_htc_ep_id eid;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 rx_decap_mode;
+};
+
+struct ath12k_wmi_base {
+ struct ath12k_base *ab;
+ struct ath12k_wmi_pdev wmi[MAX_RADIOS];
+ enum ath12k_htc_ep_id wmi_endpoint_id[MAX_RADIOS];
+ u32 max_msg_len[MAX_RADIOS];
+
+ struct completion service_ready;
+ struct completion unified_ready;
+ DECLARE_BITMAP(svc_map, WMI_MAX_EXT_SERVICE);
+ wait_queue_head_t tx_credits_wq;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 num_mem_chunks;
+ u32 rx_decap_mode;
+ struct ath12k_wmi_host_mem_chunk_arg mem_chunks[WMI_MAX_MEM_REQS];
+
+ enum wmi_host_hw_mode_config_type preferred_hw_mode;
+
+ struct ath12k_wmi_target_cap_arg *targ_cap;
+};
+
+#define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config);
+void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
+ struct ath12k_wmi_resource_config_arg *config);
+int ath12k_wmi_cmd_send(struct ath12k_wmi_pdev *wmi, struct sk_buff *skb,
+ u32 cmd_id);
+struct sk_buff *ath12k_wmi_alloc_skb(struct ath12k_wmi_base *wmi_sc, u32 len);
+int ath12k_wmi_mgmt_send(struct ath12k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame);
+int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn);
+int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id);
+int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid);
+int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id);
+int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart);
+int ath12k_wmi_set_peer_param(struct ath12k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val);
+int ath12k_wmi_pdev_set_param(struct ath12k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id);
+int ath12k_wmi_pdev_set_ps_mode(struct ath12k *ar, int vdev_id, u32 enable);
+int ath12k_wmi_wait_for_unified_ready(struct ath12k_base *ab);
+int ath12k_wmi_cmd_init(struct ath12k_base *ab);
+int ath12k_wmi_wait_for_service_ready(struct ath12k_base *ab);
+int ath12k_wmi_connect(struct ath12k_base *ab);
+int ath12k_wmi_pdev_attach(struct ath12k_base *ab,
+ u8 pdev_id);
+int ath12k_wmi_attach(struct ath12k_base *ab);
+void ath12k_wmi_detach(struct ath12k_base *ab);
+int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
+ struct ath12k_wmi_vdev_create_arg *arg);
+int ath12k_wmi_send_peer_create_cmd(struct ath12k *ar,
+ struct ath12k_wmi_peer_create_arg *arg);
+int ath12k_wmi_vdev_set_param_cmd(struct ath12k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value);
+
+int ath12k_wmi_set_sta_ps_param(struct ath12k *ar, u32 vdev_id,
+ u32 param, u32 param_value);
+int ath12k_wmi_force_fw_hang_cmd(struct ath12k *ar, u32 type, u32 delay_time_ms);
+int ath12k_wmi_send_peer_delete_cmd(struct ath12k *ar,
+ const u8 *peer_addr, u8 vdev_id);
+int ath12k_wmi_vdev_delete(struct ath12k *ar, u8 vdev_id);
+void ath12k_wmi_start_scan_init(struct ath12k *ar,
+ struct ath12k_wmi_scan_req_arg *arg);
+int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_req_arg *arg);
+int ath12k_wmi_send_scan_stop_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_cancel_arg *arg);
+int ath12k_wmi_send_wmm_update_cmd(struct ath12k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param);
+int ath12k_wmi_pdev_suspend(struct ath12k *ar, u32 suspend_opt,
+ u32 pdev_id);
+int ath12k_wmi_pdev_resume(struct ath12k *ar, u32 pdev_id);
+
+int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
+ struct ath12k_wmi_peer_assoc_arg *arg);
+int ath12k_wmi_vdev_install_key(struct ath12k *ar,
+ struct wmi_vdev_install_key_arg *arg);
+int ath12k_wmi_pdev_bss_chan_info_request(struct ath12k *ar,
+ enum wmi_bss_chan_info_req_type type);
+int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
+ u32 vdev_id, u32 pdev_id);
+int ath12k_wmi_send_pdev_temperature_cmd(struct ath12k *ar);
+int ath12k_wmi_send_peer_flush_tids_cmd(struct ath12k *ar,
+ u8 peer_addr[ETH_ALEN],
+ u32 peer_tid_bitmap,
+ u8 vdev_id);
+int ath12k_wmi_send_set_ap_ps_param_cmd(struct ath12k *ar, u8 *peer_addr,
+ struct ath12k_wmi_ap_ps_arg *arg);
+int ath12k_wmi_send_scan_chan_list_cmd(struct ath12k *ar,
+ struct ath12k_wmi_scan_chan_list_arg *arg);
+int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
+ u32 pdev_id);
+int ath12k_wmi_addba_clear_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac);
+int ath12k_wmi_addba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size);
+int ath12k_wmi_addba_set_resp(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status);
+int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason);
+int ath12k_wmi_send_bcn_offload_control_cmd(struct ath12k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op);
+int ath12k_wmi_send_init_country_cmd(struct ath12k *ar,
+ struct ath12k_wmi_init_country_arg *arg);
+int ath12k_wmi_peer_rx_reorder_queue_setup(struct ath12k *ar,
+ int vdev_id, const u8 *addr,
+ dma_addr_t paddr, u8 tid,
+ u8 ba_window_size_valid,
+ u32 ba_window_size);
+int
+ath12k_wmi_rx_reord_queue_remove(struct ath12k *ar,
+ struct ath12k_wmi_rx_reorder_queue_remove_arg *arg);
+int ath12k_wmi_send_pdev_set_regdomain(struct ath12k *ar,
+ struct ath12k_wmi_pdev_set_regdomain_arg *arg);
+int ath12k_wmi_simulate_radar(struct ath12k *ar);
+int ath12k_wmi_send_twt_enable_cmd(struct ath12k *ar, u32 pdev_id);
+int ath12k_wmi_send_twt_disable_cmd(struct ath12k *ar, u32 pdev_id);
+int ath12k_wmi_send_obss_spr_cmd(struct ath12k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd);
+int ath12k_wmi_obss_color_cfg_cmd(struct ath12k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable);
+int ath12k_wmi_send_bss_color_change_enable_cmd(struct ath12k *ar, u32 vdev_id,
+ bool enable);
+int ath12k_wmi_pdev_lro_cfg(struct ath12k *ar, int pdev_id);
+int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
+ struct ath12k_wmi_pdev_dma_ring_cfg_arg *arg);
+int ath12k_wmi_vdev_spectral_enable(struct ath12k *ar, u32 vdev_id,
+ u32 trigger, u32 enable);
+int ath12k_wmi_vdev_spectral_conf(struct ath12k *ar,
+ struct ath12k_wmi_vdev_spectral_conf_arg *arg);
+int ath12k_wmi_fils_discovery_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct sk_buff *tmpl);
+int ath12k_wmi_fils_discovery(struct ath12k *ar, u32 vdev_id, u32 interval,
+ bool unsol_bcast_probe_resp_enabled);
+int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
+ struct sk_buff *tmpl);
+int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
+ enum wmi_host_hw_mode_config_type mode);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index a20e0aeae284..0c2b8b1a10d5 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -1119,7 +1119,7 @@ void ath6kl_cfg80211_ch_switch_notify(struct ath6kl_vif *vif, int freq,
NL80211_CHAN_HT20 : NL80211_CHAN_NO_HT);
mutex_lock(&vif->wdev.mtx);
- cfg80211_ch_switch_notify(vif->ndev, &chandef, 0);
+ cfg80211_ch_switch_notify(vif->ndev, &chandef, 0, 0);
mutex_unlock(&vif->wdev.mtx);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 6610d76131fa..7a45f5f62826 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -1277,13 +1277,13 @@ static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
static void ar5008_hw_init_txpower_cck(struct ath_hw *ah, int16_t *rate_array)
{
-#define CCK_DELTA(x) ((OLC_FOR_AR9280_20_LATER) ? max((x) - 2, 0) : (x))
- ah->tx_power[0] = CCK_DELTA(rate_array[rate1l]);
- ah->tx_power[1] = CCK_DELTA(min(rate_array[rate2l],
+#define CCK_DELTA(_ah, x) ((OLC_FOR_AR9280_20_LATER(_ah)) ? max((x) - 2, 0) : (x))
+ ah->tx_power[0] = CCK_DELTA(ah, rate_array[rate1l]);
+ ah->tx_power[1] = CCK_DELTA(ah, min(rate_array[rate2l],
rate_array[rate2s]));
- ah->tx_power[2] = CCK_DELTA(min(rate_array[rate5_5l],
+ ah->tx_power[2] = CCK_DELTA(ah, min(rate_array[rate5_5l],
rate_array[rate5_5s]));
- ah->tx_power[3] = CCK_DELTA(min(rate_array[rate11l],
+ ah->tx_power[3] = CCK_DELTA(ah, min(rate_array[rate11l],
rate_array[rate11s]));
#undef CCK_DELTA
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index fd53b5f9e9b5..c8b3f3aaa45b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -659,9 +659,9 @@ static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset)
static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah)
{
- if (OLC_FOR_AR9287_10_LATER)
+ if (OLC_FOR_AR9287_10_LATER(ah))
ar9287_hw_olc_temp_compensation(ah);
- else if (OLC_FOR_AR9280_20_LATER)
+ else if (OLC_FOR_AR9280_20_LATER(ah))
ar9280_hw_olc_temp_compensation(ah);
}
@@ -672,7 +672,7 @@ static int ar9002_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
bool nfcal, nfcal_pending = false, percal_pending;
int ret;
- nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF);
+ nfcal = !!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF);
if (ah->caldata) {
nfcal_pending = test_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
if (longcal) /* Remember to not miss */
@@ -752,11 +752,11 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (IS_CHAN_HT20(chan)) {
REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_FLTR_CAL);
REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in %d ms; noisy environment?\n",
@@ -768,10 +768,10 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
}
REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_FLTR_CAL);
REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL);
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in %d ms; noisy environment?\n",
@@ -781,7 +781,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_FLTR_CAL);
return true;
}
@@ -857,17 +857,17 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (!AR_SREV_9287_11_OR_LATER(ah))
REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_FLTR_CAL);
}
/* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah),
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) |
AR_PHY_AGC_CONTROL_CAL);
/* Poll for offset calibration complete */
- if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
ath_dbg(common, CALIBRATE,
@@ -880,7 +880,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (!AR_SREV_9287_11_OR_LATER(ah))
REG_SET_BIT(ah, AR_PHY_ADC_CTL,
AR_PHY_ADC_CTL_OFF_PWDADC);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_FLTR_CAL);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index ae68f674829b..d08ea0b28530 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -249,9 +249,9 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
if (power_off) {
/* clear bit 19 to disable L1 */
- REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PCIE_PM_CTRL_ENA);
- val = REG_READ(ah, AR_WA);
+ val = REG_READ(ah, AR_WA(ah));
/*
* Set PCIe workaround bits
@@ -286,7 +286,7 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;
- REG_WRITE(ah, AR_WA, val);
+ REG_WRITE(ah, AR_WA(ah), val);
} else {
if (ah->config.pcie_waen) {
val = ah->config.pcie_waen;
@@ -314,10 +314,10 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
if (AR_SREV_9285E_20(ah))
val |= AR_WA_BIT23;
- REG_WRITE(ah, AR_WA, val);
+ REG_WRITE(ah, AR_WA(ah), val);
/* set bit 19 to allow forcing of pcie core into L1 state */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PCIE_PM_CTRL_ENA);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index a8c0e8e2d78c..b70cd4af1ae0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -21,7 +21,7 @@
static void ar9002_hw_rx_enable(struct ath_hw *ah)
{
- REG_WRITE(ah, AR_CR, AR_CR_RXE);
+ REG_WRITE(ah, AR_CR, AR_CR_RXE(ah));
}
static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
@@ -40,14 +40,14 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
struct ath_common *common = ath9k_hw_common(ah);
if (!AR_SREV_9100(ah)) {
- if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
- if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ if (REG_READ(ah, AR_INTR_ASYNC_CAUSE(ah)) & AR_INTR_MAC_IRQ) {
+ if ((REG_READ(ah, AR_RTC_STATUS(ah)) & AR_RTC_STATUS_M(ah))
== AR_RTC_STATUS_ON) {
isr = REG_READ(ah, AR_ISR);
}
}
- sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah)) &
AR_INTR_SYNC_DEFAULT;
*masked = 0;
@@ -138,7 +138,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
u32 s5_s;
if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
- s5_s = REG_READ(ah, AR_ISR_S5_S);
+ s5_s = REG_READ(ah, AR_ISR_S5_S(ah));
} else {
s5_s = REG_READ(ah, AR_ISR_S5);
}
@@ -201,8 +201,8 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
}
- REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
- (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR(ah), sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR(ah));
}
return true;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index ebdb97999335..23ac6b7c2cbd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -281,10 +281,10 @@ static void ar9002_olc_init(struct ath_hw *ah)
{
u32 i;
- if (!OLC_FOR_AR9280_20_LATER)
+ if (!OLC_FOR_AR9280_20_LATER(ah))
return;
- if (OLC_FOR_AR9287_10_LATER) {
+ if (OLC_FOR_AR9287_10_LATER(ah)) {
REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 6ca089f15629..2224cb74b1d4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -346,14 +346,14 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
/*
* Clear offset and IQ calibration, run AGC cal.
*/
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_OFFSET_CAL);
- REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah),
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL);
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
if (!status) {
@@ -367,13 +367,13 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
* (Carrier Leak calibration, TX Filter calibration and
* Peak Detector offset calibration).
*/
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_OFFSET_CAL);
REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL,
AR_PHY_CL_CAL_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_FLTR_CAL);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_PKDET_CAL);
ch0_done = 0;
@@ -387,10 +387,10 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_CAL);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah),
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL);
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
if (!status) {
@@ -531,7 +531,7 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
}
}
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_OFFSET_CAL);
REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
@@ -539,7 +539,7 @@ static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah,
* We don't need to check txiqcal_done here since it is always
* set for AR9550.
*/
- REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
return true;
@@ -897,7 +897,7 @@ static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff));
for (i = 0; i < MAX_MEASUREMENT / 2; i++) {
tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] =
- AR_PHY_TX_IQCAL_CORR_COEFF_B0(i);
+ AR_PHY_TX_IQCAL_CORR_COEFF_B0(ah, i);
if (!AR_SREV_9485(ah)) {
tx_corr_coeff[i * 2][1] =
tx_corr_coeff[(i * 2) + 1][1] =
@@ -914,7 +914,7 @@ static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
if (!(ah->txchainmask & (1 << i)))
continue;
nmeasurement = REG_READ_FIELD(ah,
- AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_TX_IQCAL_STATUS_B0(ah),
AR_PHY_CALIBRATED_GAINS_0);
if (nmeasurement > MAX_MEASUREMENT)
@@ -988,10 +988,10 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
AR_PHY_TXGAIN_FORCE, 0);
- REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START(ah),
AR_PHY_TX_IQCAL_START_DO_CAL, 1);
- if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
+ if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START(ah),
AR_PHY_TX_IQCAL_START_DO_CAL, 0,
AH_WAIT_TIMEOUT)) {
ath_dbg(common, CALIBRATE, "Tx IQ Cal is not completed\n");
@@ -1056,7 +1056,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
- AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_TX_IQCAL_STATUS_B0(ah),
AR_PHY_TX_IQCAL_STATUS_B1,
AR_PHY_TX_IQCAL_STATUS_B2,
};
@@ -1076,7 +1076,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
continue;
nmeasurement = REG_READ_FIELD(ah,
- AR_PHY_TX_IQCAL_STATUS_B0,
+ AR_PHY_TX_IQCAL_STATUS_B0(ah),
AR_PHY_CALIBRATED_GAINS_0);
if (nmeasurement > MAX_MEASUREMENT)
nmeasurement = MAX_MEASUREMENT;
@@ -1096,7 +1096,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
u32 idx = 2 * j, offset = 4 * (3 * im + j);
REG_RMW_FIELD(ah,
- AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_MEMORY(ah),
AR_PHY_CHAN_INFO_TAB_S2_READ,
0);
@@ -1106,7 +1106,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
offset);
REG_RMW_FIELD(ah,
- AR_PHY_CHAN_INFO_MEMORY,
+ AR_PHY_CHAN_INFO_MEMORY(ah),
AR_PHY_CHAN_INFO_TAB_S2_READ,
1);
@@ -1161,7 +1161,7 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff));
for (i = 0; i < MAX_MEASUREMENT / 2; i++) {
tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] =
- AR_PHY_TX_IQCAL_CORR_COEFF_B0(i);
+ AR_PHY_TX_IQCAL_CORR_COEFF_B0(ah, i);
if (!AR_SREV_9485(ah)) {
tx_corr_coeff[i * 2][1] =
tx_corr_coeff[(i * 2) + 1][1] =
@@ -1346,7 +1346,7 @@ static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable)
if (!caldata || !(ah->enabled_cals & TX_CL_CAL))
return;
- txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) &
AR_PHY_AGC_CONTROL_CLC_SUCCESS);
if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) {
@@ -1424,12 +1424,12 @@ static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
if (rtt) {
if (!run_rtt_cal) {
- agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+ agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL(ah));
agc_supp_cals &= agc_ctrl;
agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL);
- REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), agc_ctrl);
} else {
if (ah->ah_flags & AH_FASTCC)
run_agc_cal = true;
@@ -1452,7 +1452,7 @@ static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
goto skip_tx_iqcal;
/* Do Tx IQ Calibration */
- REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1(ah),
AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
DELPT);
@@ -1462,10 +1462,10 @@ static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah,
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags))
- REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
else
- REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL);
txiqcal_done = run_agc_cal = true;
}
@@ -1485,12 +1485,12 @@ skip_tx_iqcal:
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
/* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah),
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) |
AR_PHY_AGC_CONTROL_CAL);
/* Poll for offset calibration complete */
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
@@ -1507,7 +1507,7 @@ skip_tx_iqcal:
if (rtt && !run_rtt_cal) {
agc_ctrl |= agc_supp_cals;
- REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), agc_ctrl);
}
if (!status) {
@@ -1558,11 +1558,11 @@ static bool do_ar9003_agc_cal(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
bool status;
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah),
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) |
AR_PHY_AGC_CONTROL_CAL);
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
if (!status) {
@@ -1596,7 +1596,7 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
goto skip_tx_iqcal;
/* Do Tx IQ Calibration */
- REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
+ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1(ah),
AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
DELPT);
@@ -1605,7 +1605,7 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
* AGC calibration. Specifically, AR9550 in SoC chips.
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
- if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) {
txiqcal_done = true;
} else {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 16bfcd0a1f6e..944f46cdf34c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3084,13 +3084,13 @@ error:
static bool ar9300_otp_read_word(struct ath_hw *ah, int addr, u32 *data)
{
- REG_READ(ah, AR9300_OTP_BASE + (4 * addr));
+ REG_READ(ah, AR9300_OTP_BASE(ah) + (4 * addr));
- if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS, AR9300_OTP_STATUS_TYPE,
+ if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS(ah), AR9300_OTP_STATUS_TYPE,
AR9300_OTP_STATUS_VALID, 1000))
return false;
- *data = REG_READ(ah, AR9300_OTP_READ_DATA);
+ *data = REG_READ(ah, AR9300_OTP_READ_DATA(ah));
return true;
}
@@ -3607,15 +3607,15 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
AR_SREV_9531(ah) || AR_SREV_9561(ah))
- REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
+ REG_RMW_FIELD(ah, AR_CH0_TOP2(ah), AR_CH0_TOP2_XPABIASLVL, bias);
else if (AR_SREV_9462(ah) || AR_SREV_9550(ah) || AR_SREV_9565(ah))
- REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
+ REG_RMW_FIELD(ah, AR_CH0_TOP(ah), AR_CH0_TOP_XPABIASLVL, bias);
else {
- REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
- REG_RMW_FIELD(ah, AR_CH0_THERM,
+ REG_RMW_FIELD(ah, AR_CH0_TOP(ah), AR_CH0_TOP_XPABIASLVL, bias);
+ REG_RMW_FIELD(ah, AR_CH0_THERM(ah),
AR_CH0_THERM_XPABIASLVL_MSB,
bias >> 2);
- REG_RMW_FIELD(ah, AR_CH0_THERM,
+ REG_RMW_FIELD(ah, AR_CH0_THERM(ah),
AR_CH0_THERM_XPASHORT2GND, 1);
}
}
@@ -3960,9 +3960,9 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
int reg_pmu_set;
- reg_pmu_set = REG_READ(ah, AR_PHY_PMU2) & ~AR_PHY_PMU2_PGM;
- REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
- if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ reg_pmu_set = REG_READ(ah, AR_PHY_PMU2(ah)) & ~AR_PHY_PMU2_PGM;
+ REG_WRITE(ah, AR_PHY_PMU2(ah), reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2(ah), reg_pmu_set))
return;
if (AR_SREV_9330(ah)) {
@@ -3984,28 +3984,28 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
(3 << 24) | (1 << 28);
}
- REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
- if (!is_pmu_set(ah, AR_PHY_PMU1, reg_pmu_set))
+ REG_WRITE(ah, AR_PHY_PMU1(ah), reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU1(ah), reg_pmu_set))
return;
- reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0xFFC00000)
+ reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2(ah)) & ~0xFFC00000)
| (4 << 26);
- REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
- if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ REG_WRITE(ah, AR_PHY_PMU2(ah), reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2(ah), reg_pmu_set))
return;
- reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0x00200000)
+ reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2(ah)) & ~0x00200000)
| (1 << 21);
- REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
- if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
+ REG_WRITE(ah, AR_PHY_PMU2(ah), reg_pmu_set);
+ if (!is_pmu_set(ah, AR_PHY_PMU2(ah), reg_pmu_set))
return;
} else if (AR_SREV_9462(ah) || AR_SREV_9565(ah) ||
AR_SREV_9561(ah)) {
reg_val = le32_to_cpu(pBase->swreg);
- REG_WRITE(ah, AR_PHY_PMU1, reg_val);
+ REG_WRITE(ah, AR_PHY_PMU1(ah), reg_val);
if (AR_SREV_9561(ah))
- REG_WRITE(ah, AR_PHY_PMU2, 0x10200000);
+ REG_WRITE(ah, AR_PHY_PMU2(ah), 0x10200000);
} else {
/* Internal regulator is ON. Write swreg register. */
reg_val = le32_to_cpu(pBase->swreg);
@@ -4021,25 +4021,25 @@ void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
}
} else {
if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
- REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0);
- while (REG_READ_FIELD(ah, AR_PHY_PMU2,
+ REG_RMW_FIELD(ah, AR_PHY_PMU2(ah), AR_PHY_PMU2_PGM, 0);
+ while (REG_READ_FIELD(ah, AR_PHY_PMU2(ah),
AR_PHY_PMU2_PGM))
udelay(10);
- REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
- while (!REG_READ_FIELD(ah, AR_PHY_PMU1,
+ REG_RMW_FIELD(ah, AR_PHY_PMU1(ah), AR_PHY_PMU1_PWD, 0x1);
+ while (!REG_READ_FIELD(ah, AR_PHY_PMU1(ah),
AR_PHY_PMU1_PWD))
udelay(10);
- REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0x1);
- while (!REG_READ_FIELD(ah, AR_PHY_PMU2,
+ REG_RMW_FIELD(ah, AR_PHY_PMU2(ah), AR_PHY_PMU2_PGM, 0x1);
+ while (!REG_READ_FIELD(ah, AR_PHY_PMU2(ah),
AR_PHY_PMU2_PGM))
udelay(10);
} else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
- REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
+ REG_RMW_FIELD(ah, AR_PHY_PMU1(ah), AR_PHY_PMU1_PWD, 0x1);
else {
- reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK) |
+ reg_val = REG_READ(ah, AR_RTC_SLEEP_CLK(ah)) |
AR_RTC_FORCE_SWREG_PRD;
- REG_WRITE(ah, AR_RTC_SLEEP_CLK, reg_val);
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK(ah), reg_val);
}
}
@@ -4055,9 +4055,9 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
if (eep->baseEepHeader.featureEnable & 0x40) {
tuning_caps_param &= 0x7f;
- REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPINDAC,
+ REG_RMW_FIELD(ah, AR_CH0_XTAL(ah), AR_CH0_XTAL_CAPINDAC,
tuning_caps_param);
- REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPOUTDAC,
+ REG_RMW_FIELD(ah, AR_CH0_XTAL(ah), AR_CH0_XTAL_CAPOUTDAC,
tuning_caps_param);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index f8ae20318302..b91ef1250ba8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -82,16 +82,16 @@
/* AR5416_EEPMISC_BIG_ENDIAN not set indicates little endian */
#define AR9300_EEPMISC_LITTLE_ENDIAN 0
-#define AR9300_OTP_BASE \
- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x30000 : 0x14000)
-#define AR9300_OTP_STATUS \
- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x31018 : 0x15f18)
+#define AR9300_OTP_BASE(_ah) \
+ ((AR_SREV_9340(_ah) || AR_SREV_9550(_ah)) ? 0x30000 : 0x14000)
+#define AR9300_OTP_STATUS(_ah) \
+ ((AR_SREV_9340(_ah) || AR_SREV_9550(_ah)) ? 0x31018 : 0x15f18)
#define AR9300_OTP_STATUS_TYPE 0x7
#define AR9300_OTP_STATUS_VALID 0x4
#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
#define AR9300_OTP_STATUS_SM_BUSY 0x1
-#define AR9300_OTP_READ_DATA \
- ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) ? 0x3101c : 0x15f1c)
+#define AR9300_OTP_READ_DATA(_ah) \
+ ((AR_SREV_9340(_ah) || AR_SREV_9550(_ah)) ? 0x3101c : 0x15f1c)
enum targetPowerHTRates {
HT_TARGET_RATE_0_8_16,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 42f00a2a8c80..4f27a9fb1482 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -1032,8 +1032,8 @@ static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
/* Nothing to do on restore for 11N */
if (!power_off /* !restore */) {
/* set bit 19 to allow forcing of pcie core into L1 state */
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PCIE_PM_CTRL_ENA);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index ff8ab58e67d9..a8bc003077dc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -193,16 +193,16 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
if (ath9k_hw_mci_is_enabled(ah))
async_mask |= AR_INTR_ASYNC_MASK_MCI;
- async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+ async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE(ah));
if (async_cause & async_mask) {
- if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
+ if ((REG_READ(ah, AR_RTC_STATUS(ah)) & AR_RTC_STATUS_M(ah))
== AR_RTC_STATUS_ON)
isr = REG_READ(ah, AR_ISR);
}
- sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
+ sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah)) & AR_INTR_SYNC_DEFAULT;
*masked = 0;
@@ -280,7 +280,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
u32 s5;
if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
- s5 = REG_READ(ah, AR_ISR_S5_S);
+ s5 = REG_READ(ah, AR_ISR_S5_S(ah));
else
s5 = REG_READ(ah, AR_ISR_S5);
@@ -345,8 +345,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked,
ath_dbg(common, INTERRUPT,
"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
- REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
- (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR(ah), sync_cause);
+ (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR(ah));
}
return true;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index 8d7efd80d97a..2b9c07961cd7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -458,7 +458,7 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah)
} else
return;
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah), AR_GPIO_JTAG_DISABLE);
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_DS_JTAG_DISABLE, 1);
REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL, AR_GLB_WLAN_UART_INTF_EN, 0);
@@ -466,12 +466,12 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
- REG_WRITE(ah, AR_OBS, 0x4b);
+ REG_WRITE(ah, AR_OBS(ah), 0x4b);
REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
- REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
+ REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS(ah),
AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index b2d53b6c0ffd..83d993fff695 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -201,19 +201,19 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
ar9003_paprd_enable(ah, false);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP, 0x30);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_ENABLE, 1);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_TX_GAIN_FORCE, 1);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_RX_BB_GAIN_FORCE, 0);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_IQCORR_ENABLE, 0);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING, 28);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL1(ah),
AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE, 1);
if (AR_SREV_9485(ah)) {
@@ -229,15 +229,15 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
}
}
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL2(ah),
AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN, val);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_FINE_CORR_LEN, 4);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_COARSE_CORR_LEN, 4);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
if (AR_SREV_9485(ah) ||
@@ -246,10 +246,10 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_SREV_9550(ah) ||
AR_SREV_9330(ah) ||
AR_SREV_9340(ah))
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -3);
else
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6);
val = -10;
@@ -257,16 +257,16 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
if (IS_CHAN_2GHZ(ah->curchan) && !AR_SREV_9462(ah) && !AR_SREV_9565(ah))
val = -15;
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
val);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE, 1);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4(ah),
AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA, 0);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4(ah),
AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_MIN_CORR, 400);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL4(ah),
AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES,
100);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_0_B0,
@@ -313,7 +313,7 @@ static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
int desired_scale, desired_gain = 0;
u32 reg_olpc = 0, reg_cl_gain = 0;
- REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah),
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
desired_scale = REG_READ_FIELD(ah, AR_PHY_TPC_12,
AR_PHY_TPC_12_DESIRED_SCALE_HT40_5);
@@ -812,7 +812,7 @@ void ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
ar9003_tx_force_gain(ah, gain_index);
- REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah),
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
}
EXPORT_SYMBOL(ar9003_paprd_setup_gain_table);
@@ -833,7 +833,7 @@ static bool ar9003_paprd_retrain_pa_in(struct ath_hw *ah,
capdiv2g = REG_READ_FIELD(ah, AR_PHY_65NM_CH0_TXRF3,
AR_PHY_65NM_CH0_TXRF3_CAPDIV2G);
- quick_drop = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ quick_drop = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP);
if (quick_drop)
@@ -906,7 +906,7 @@ static bool ar9003_paprd_retrain_pa_in(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_TXRF3,
AR_PHY_65NM_CH0_TXRF3_CAPDIV2G, capdiv2g);
- REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
+ REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3(ah),
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
quick_drop);
@@ -932,14 +932,14 @@ int ar9003_paprd_create_curve(struct ath_hw *ah,
data_L = &buf[0];
data_U = &buf[48];
- REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+ REG_CLR_BIT(ah, AR_PHY_CHAN_INFO_MEMORY(ah),
AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
reg = AR_PHY_CHAN_INFO_TAB_0;
for (i = 0; i < 48; i++)
data_L[i] = REG_READ(ah, reg + (i << 2));
- REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY,
+ REG_SET_BIT(ah, AR_PHY_CHAN_INFO_MEMORY(ah),
AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ);
for (i = 0; i < 48; i++)
@@ -951,7 +951,7 @@ int ar9003_paprd_create_curve(struct ath_hw *ah,
if (ar9003_paprd_retrain_pa_in(ah, caldata, chain))
status = -EINPROGRESS;
- REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah),
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
kfree(buf);
@@ -977,14 +977,14 @@ bool ar9003_paprd_is_done(struct ath_hw *ah)
{
int paprd_done, agc2_pwr;
- paprd_done = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ paprd_done = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah),
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE);
if (AR_SREV_9485(ah))
goto exit;
if (paprd_done == 0x1) {
- agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
+ agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1(ah),
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR);
ath_dbg(ath9k_hw_common(ah), CALIBRATE,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 090ff0600c81..a29c11f944a5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -296,7 +296,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
cck_spur_freq = cck_spur_freq & 0xfffff;
- REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7);
REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f);
@@ -314,7 +314,7 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
}
}
- REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
+ REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5);
REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0);
@@ -352,7 +352,7 @@ static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0);
REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0);
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0);
@@ -360,7 +360,7 @@ static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0);
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0);
@@ -419,7 +419,7 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1);
REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index);
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index);
@@ -427,7 +427,7 @@ static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc);
REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc);
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
@@ -449,7 +449,7 @@ static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah,
mask_index);
/* A == B */
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A,
mask_index);
@@ -462,7 +462,7 @@ static void ar9003_hw_spur_ofdm_9565(struct ath_hw *ah,
AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_B, 0xe);
/* A == B */
- REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B,
+ REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_B(ah),
AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
}
@@ -710,7 +710,7 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
- if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah),
AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
ah->enabled_cals |= TX_IQ_CAL;
else
@@ -726,11 +726,11 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
AR_SREV_9561(ah)) {
if (ah->is_clk_25mhz) {
- REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
+ REG_WRITE(ah, AR_RTC_DERIVED_CLK(ah), 0x17c << 1);
REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
} else {
- REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
+ REG_WRITE(ah, AR_RTC_DERIVED_CLK(ah), 0x261 << 1);
REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
}
@@ -1795,7 +1795,7 @@ static void ar9003_hw_spectral_scan_wait(struct ath_hw *ah)
static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
{
- REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_SET_BIT(ah, AR_PHY_TEST(ah), PHY_AGC_CLR);
REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
REG_WRITE(ah, AR_CR, AR_CR_RXD);
REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
@@ -1808,7 +1808,7 @@ static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
static void ar9003_hw_tx99_stop(struct ath_hw *ah)
{
- REG_CLR_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
+ REG_CLR_BIT(ah, AR_PHY_TEST(ah), PHY_AGC_CLR);
REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index ad949eb02f3d..57e2b4c89125 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -454,8 +454,8 @@
#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4)
#define AR_PHY_MODE (AR_SM_BASE + 0x8)
#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
-#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x18 : 0x20))
-#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x1c : 0x24))
+#define AR_PHY_SPUR_MASK_A(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x18 : 0x20))
+#define AR_PHY_SPUR_MASK_B(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x1c : 0x24))
#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
#define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
@@ -498,7 +498,7 @@
#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0
-#define AR_PHY_TEST (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x15c : 0x160))
+#define AR_PHY_TEST(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x15c : 0x160))
#define AR_PHY_TEST_BBB_OBS_SEL 0x780000
#define AR_PHY_TEST_BBB_OBS_SEL_S 19
@@ -509,7 +509,7 @@
#define AR_PHY_TEST_CHAIN_SEL 0xC0000000
#define AR_PHY_TEST_CHAIN_SEL_S 30
-#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x160 : 0x164))
+#define AR_PHY_TEST_CTL_STATUS(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x160 : 0x164))
#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1
#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0
#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C
@@ -524,22 +524,22 @@
#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29
-#define AR_PHY_TSTDAC (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x164 : 0x168))
+#define AR_PHY_TSTDAC(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x164 : 0x168))
-#define AR_PHY_CHAN_STATUS (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x168 : 0x16c))
+#define AR_PHY_CHAN_STATUS(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x168 : 0x16c))
-#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x16c : 0x170))
+#define AR_PHY_CHAN_INFO_MEMORY(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x16c : 0x170))
#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ 0x00000008
#define AR_PHY_CHAN_INFO_MEMORY_CHANINFOMEM_S2_READ_S 3
-#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x170 : 0x174))
-#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x174 : 0x178))
-#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x178 : 0x17c))
-#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x17c : 0x180))
-#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x184 : 0x190))
-#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x188 : 0x194))
+#define AR_PHY_CHNINFO_NOISEPWR(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x170 : 0x174))
+#define AR_PHY_CHNINFO_GAINDIFF(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x174 : 0x178))
+#define AR_PHY_CHNINFO_FINETIM(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x178 : 0x17c))
+#define AR_PHY_CHAN_INFO_GAIN_0(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x17c : 0x180))
+#define AR_PHY_SCRAMBLER_SEED(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x184 : 0x190))
+#define AR_PHY_CCK_TX_CTRL(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x188 : 0x194))
-#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + (AR_SREV_9561(ah) ? 0x198 : 0x1a4))
+#define AR_PHY_HEAVYCLIP_CTL(_ah) (AR_SM_BASE + (AR_SREV_9561(_ah) ? 0x198 : 0x1a4))
#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
#define AR_PHY_HEAVYCLIP_1 (AR_SM_BASE + 0x19c)
@@ -611,16 +611,16 @@
#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
-#define AR_PHY_TX_IQCAL_CONTROL_0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+#define AR_PHY_TX_IQCAL_CONTROL_0(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \
0x3c4 : 0x444))
-#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+#define AR_PHY_TX_IQCAL_CONTROL_1(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \
0x3c8 : 0x448))
-#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+#define AR_PHY_TX_IQCAL_START(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \
0x3c4 : 0x440))
-#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + (AR_SREV_9485(ah) ? \
+#define AR_PHY_TX_IQCAL_STATUS_B0(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? \
0x3f0 : 0x48c))
-#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \
- (AR_SREV_9485(ah) ? \
+#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_ah, _i) (AR_SM_BASE + \
+ (AR_SREV_9485(_ah) ? \
0x3d0 : 0x450) + ((_i) << 2))
#define AR_PHY_RTT_CTRL (AR_SM_BASE + 0x380)
@@ -684,8 +684,8 @@
#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK 0x00000008
#define AR_PHY_65NM_CH0_RXTX2_SYNTHOVR_MASK_S 3
-#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
- (((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x1628c : 0x16280)))
+#define AR_CH0_TOP(_ah) (AR_SREV_9300(_ah) ? 0x16288 : \
+ (((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x1628c : 0x16280)))
#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
@@ -705,8 +705,8 @@
#define AR_SWITCH_TABLE_ALL (0xfff)
#define AR_SWITCH_TABLE_ALL_S (0)
-#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 :\
- ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16294 : 0x1628c))
+#define AR_CH0_THERM(_ah) (AR_SREV_9300(_ah) ? 0x16290 :\
+ ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16294 : 0x1628c))
#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
#define AR_CH0_THERM_XPASHORT2GND 0x4
@@ -717,26 +717,26 @@
#define AR_CH0_THERM_SAR_ADC_OUT 0x0000ff00
#define AR_CH0_THERM_SAR_ADC_OUT_S 8
-#define AR_CH0_TOP2 (AR_SREV_9300(ah) ? 0x1628c : \
- (AR_SREV_9462(ah) ? 0x16290 : 0x16284))
+#define AR_CH0_TOP2(_ah) (AR_SREV_9300(_ah) ? 0x1628c : \
+ (AR_SREV_9462(_ah) ? 0x16290 : 0x16284))
#define AR_CH0_TOP2_XPABIASLVL (AR_SREV_9561(ah) ? 0x1e00 : 0xf000)
#define AR_CH0_TOP2_XPABIASLVL_S (AR_SREV_9561(ah) ? 9 : 12)
-#define AR_CH0_XTAL (AR_SREV_9300(ah) ? 0x16294 : \
- ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16298 : \
- (AR_SREV_9561(ah) ? 0x162c0 : 0x16290)))
+#define AR_CH0_XTAL(_ah) (AR_SREV_9300(_ah) ? 0x16294 : \
+ ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16298 : \
+ (AR_SREV_9561(_ah) ? 0x162c0 : 0x16290)))
#define AR_CH0_XTAL_CAPINDAC 0x7f000000
#define AR_CH0_XTAL_CAPINDAC_S 24
#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
#define AR_CH0_XTAL_CAPOUTDAC_S 17
-#define AR_PHY_PMU1 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16340 : \
- (AR_SREV_9561(ah) ? 0x16cc0 : 0x16c40))
+#define AR_PHY_PMU1(_ah) ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16340 : \
+ (AR_SREV_9561(_ah) ? 0x16cc0 : 0x16c40))
#define AR_PHY_PMU1_PWD 0x1
#define AR_PHY_PMU1_PWD_S 0
-#define AR_PHY_PMU2 ((AR_SREV_9462(ah) || AR_SREV_9565(ah)) ? 0x16344 : \
- (AR_SREV_9561(ah) ? 0x16cc4 : 0x16c44))
+#define AR_PHY_PMU2(_ah) ((AR_SREV_9462(_ah) || AR_SREV_9565(_ah)) ? 0x16344 : \
+ (AR_SREV_9561(_ah) ? 0x16cc4 : 0x16c44))
#define AR_PHY_PMU2_PGM 0x00200000
#define AR_PHY_PMU2_PGM_S 21
@@ -974,7 +974,7 @@
#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
-#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + (AR_SREV_9462_20_OR_LATER(ah) ? \
+#define AR_PHY_PDADC_TAB_1(_ah) (AR_SM1_BASE + (AR_SREV_9462_20_OR_LATER(_ah) ? \
0x280 : 0x240))
#define AR_PHY_TPC_19_B1 (AR_SM1_BASE + 0x240)
#define AR_PHY_TPC_19_B1_ALPHA_THERM 0xff
@@ -1152,7 +1152,7 @@
#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000
#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17
-#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x580 : 0x490))
+#define AR_PHY_PAPRD_TRAINER_CNTL1(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x580 : 0x490))
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0
@@ -1169,12 +1169,12 @@
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000
#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12
-#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x584 : 0x494))
+#define AR_PHY_PAPRD_TRAINER_CNTL2(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x584 : 0x494))
#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF
#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0
-#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x588 : 0x498))
+#define AR_PHY_PAPRD_TRAINER_CNTL3(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x588 : 0x498))
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0
@@ -1191,7 +1191,7 @@
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000
#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29
-#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x58c : 0x49c))
+#define AR_PHY_PAPRD_TRAINER_CNTL4(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x58c : 0x49c))
#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000
#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16
@@ -1211,7 +1211,7 @@
#define AR_PHY_PAPRD_PRE_POST_SCALING 0x3FFFF
#define AR_PHY_PAPRD_PRE_POST_SCALING_S 0
-#define AR_PHY_PAPRD_TRAINER_STAT1 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x590 : 0x4a0))
+#define AR_PHY_PAPRD_TRAINER_STAT1(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x590 : 0x4a0))
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE 0x00000001
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE_S 0
@@ -1226,7 +1226,7 @@
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR 0x0001fe00
#define AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR_S 9
-#define AR_PHY_PAPRD_TRAINER_STAT2 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x594 : 0x4a4))
+#define AR_PHY_PAPRD_TRAINER_STAT2(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x594 : 0x4a4))
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL 0x0000ffff
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_VAL_S 0
@@ -1235,7 +1235,7 @@
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX 0x00600000
#define AR_PHY_PAPRD_TRAINER_STAT2_PAPRD_FINE_IDX_S 21
-#define AR_PHY_PAPRD_TRAINER_STAT3 (AR_SM_BASE + (AR_SREV_9485(ah) ? 0x598 : 0x4a8))
+#define AR_PHY_PAPRD_TRAINER_STAT3(_ah) (AR_SM_BASE + (AR_SREV_9485(_ah) ? 0x598 : 0x4a8))
#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT 0x000fffff
#define AR_PHY_PAPRD_TRAINER_STAT3_PAPRD_TRAIN_SAMPLES_CNT_S 0
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_wow.c b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
index bea41df9fbd7..ac32afbf2c97 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_wow.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_wow.c
@@ -43,7 +43,7 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
/* set rx disable bit */
REG_WRITE(ah, AR_CR, AR_CR_RXD);
- if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0, AH_WAIT_TIMEOUT)) {
+ if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE(ah), 0, AH_WAIT_TIMEOUT)) {
ath_err(common, "Failed to stop Rx DMA in 10ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
return;
@@ -61,7 +61,7 @@ static void ath9k_hw_set_powermode_wow_sleep(struct ath_hw *ah)
if (ath9k_hw_mci_is_enabled(ah))
REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
- REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_ON_INT);
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_ON_INT);
}
static void ath9k_wow_create_keep_alive_pattern(struct ath_hw *ah)
@@ -226,7 +226,7 @@ u32 ath9k_hw_wow_wakeup(struct ath_hw *ah)
*/
/* do we need to check the bit value 0x01000000 (7-10) ?? */
- REG_RMW(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR,
+ REG_RMW(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_WOW_PME_CLR,
AR_PMCTRL_PWR_STATE_D1D3);
/*
@@ -278,12 +278,12 @@ static void ath9k_hw_wow_set_arwr_reg(struct ath_hw *ah)
* to the external PCI-E reset. We also need to tie
* the PCI-E Phy reset to the PCI-E reset.
*/
- wa_reg = REG_READ(ah, AR_WA);
+ wa_reg = REG_READ(ah, AR_WA(ah));
wa_reg &= ~AR_WA_UNTIE_RESET_EN;
wa_reg |= AR_WA_RESET_EN;
wa_reg |= AR_WA_POR_SHORT;
- REG_WRITE(ah, AR_WA, wa_reg);
+ REG_WRITE(ah, AR_WA(ah), wa_reg);
}
void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
@@ -309,11 +309,11 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
* Set and clear WOW_PME_CLEAR for the chip
* to generate next wow signal.
*/
- REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_HOST_PME_EN |
+ REG_SET_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_HOST_PME_EN |
AR_PMCTRL_PWR_PM_CTRL_ENA |
AR_PMCTRL_AUX_PWR_DET |
AR_PMCTRL_WOW_PME_CLR);
- REG_CLR_BIT(ah, AR_PCIE_PM_CTRL, AR_PMCTRL_WOW_PME_CLR);
+ REG_CLR_BIT(ah, AR_PCIE_PM_CTRL(ah), AR_PMCTRL_WOW_PME_CLR);
/*
* Random Backoff.
@@ -414,7 +414,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
/*
* Set the power states appropriately and enable PME.
*/
- host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL);
+ host_pm_ctrl = REG_READ(ah, AR_PCIE_PM_CTRL(ah));
host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3 |
AR_PMCTRL_HOST_PME_EN |
AR_PMCTRL_PWR_PM_CTRL_ENA;
@@ -430,7 +430,7 @@ void ath9k_hw_wow_enable(struct ath_hw *ah, u32 pattern_enable)
host_pm_ctrl |= AR_PMCTRL_PWR_STATE_D1D3_REAL;
}
- REG_WRITE(ah, AR_PCIE_PM_CTRL, host_pm_ctrl);
+ REG_WRITE(ah, AR_PCIE_PM_CTRL(ah), host_pm_ctrl);
/*
* Enable sequence number generation when asleep.
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 618c9df35fc1..9b393a8f7c3a 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -173,16 +173,16 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
/* connect bt_active to baseband */
- REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah),
(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF));
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah),
AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
/* Set input mux for bt_active to gpio pin */
if (!AR_SREV_SOC(ah))
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1(ah),
AR_GPIO_INPUT_MUX1_BT_ACTIVE,
btcoex_hw->btactive_gpio);
@@ -197,17 +197,17 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
/* btcoex 3-wire */
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah),
(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB));
/* Set input mux for bt_prority_async and
* bt_active_async to GPIO pins */
if (!AR_SREV_SOC(ah)) {
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1(ah),
AR_GPIO_INPUT_MUX1_BT_ACTIVE,
btcoex_hw->btactive_gpio);
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1(ah),
AR_GPIO_INPUT_MUX1_BT_PRIORITY,
btcoex_hw->btpriority_gpio);
}
@@ -404,7 +404,7 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI &&
!AR_SREV_SOC(ah)) {
- REG_RMW(ah, AR_GPIO_PDPU,
+ REG_RMW(ah, AR_GPIO_PDPU(ah),
(0x2 << (btcoex_hw->btactive_gpio * 2)),
(0x3 << (btcoex_hw->btactive_gpio * 2)));
}
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 0422a33395b7..fb270df75eb2 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -231,17 +231,17 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
if (ah->caldata)
set_bit(NFCAL_PENDING, &ah->caldata->cal_flags);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_ENABLE_NF);
if (update)
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
else
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF);
}
int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
@@ -251,7 +251,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
s16 default_nf = ath9k_hw_get_nf_limits(ah, chan)->nominal;
- u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL);
+ u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL(ah));
if (ah->caldata)
h = ah->caldata->nfCalHist;
@@ -286,7 +286,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* (or after end rx/tx frame if ongoing)
*/
if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) {
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF);
REG_RMW_BUFFER_FLUSH(ah);
ENABLE_REG_RMW_BUFFER(ah);
}
@@ -295,11 +295,11 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* Load software filtered NF value into baseband internal minCCApwr
* variable.
*/
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_ENABLE_NF);
- REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF);
REG_RMW_BUFFER_FLUSH(ah);
/*
@@ -309,7 +309,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* (11n max length 22.1 msec)
*/
for (j = 0; j < 22200; j++) {
- if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
+ if ((REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) &
AR_PHY_AGC_CONTROL_NF) == 0)
break;
udelay(10);
@@ -321,12 +321,12 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) {
ENABLE_REG_RMW_BUFFER(ah);
if (bb_agc_ctl & AR_PHY_AGC_CONTROL_ENABLE_NF)
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_ENABLE_NF);
if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NO_UPDATE_NF)
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
- REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
+ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF);
REG_RMW_BUFFER_FLUSH(ah);
}
@@ -342,7 +342,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
if (j == 22200) {
ath_dbg(common, ANY,
"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
- REG_READ(ah, AR_PHY_AGC_CONTROL));
+ REG_READ(ah, AR_PHY_AGC_CONTROL(ah)));
return -ETIMEDOUT;
}
@@ -410,7 +410,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
struct ieee80211_channel *c = chan->chan;
struct ath9k_hw_cal_data *caldata = ah->caldata;
- if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
+ if (REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF) {
ath_dbg(common, CALIBRATE,
"NF did not complete in calibration window\n");
return false;
@@ -478,7 +478,7 @@ void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
*/
if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
ath9k_hw_start_nfcal(ah, true);
- else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
+ else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF))
ath9k_hw_getnf(ah, ah->curchan);
set_bit(NFCAL_INTF, &caldata->cal_flags);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 31390af6c33e..f1cde43fcb55 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -68,8 +68,8 @@
#define AR5416_EEPROM_OFFSET 0x2000
#define AR5416_EEPROM_MAX 0xae0
-#define AR5416_EEPROM_START_ADDR \
- (AR_SREV_9100(ah)) ? 0x1fff1000 : 0x503f1200
+#define AR5416_EEPROM_START_ADDR(_ah) \
+ (AR_SREV_9100(_ah)) ? 0x1fff1000 : 0x503f1200
#define SD_NO_CTL 0xE0
#define NO_CTL 0xff
@@ -110,10 +110,10 @@
#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
-#define OLC_FOR_AR9280_20_LATER (AR_SREV_9280_20_OR_LATER(ah) && \
- ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
-#define OLC_FOR_AR9287_10_LATER (AR_SREV_9287_11_OR_LATER(ah) && \
- ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
+#define OLC_FOR_AR9280_20_LATER(_ah) (AR_SREV_9280_20_OR_LATER(_ah) && \
+ _ah->eep_ops->get_eeprom(_ah, EEP_OL_PWRCTRL))
+#define OLC_FOR_AR9287_10_LATER(_ah) (AR_SREV_9287_11_OR_LATER(_ah) && \
+ _ah->eep_ops->get_eeprom(_ah, EEP_OL_PWRCTRL))
#define EEP_RFSILENT_ENABLED 0x0001
#define EEP_RFSILENT_ENABLED_S 0
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 9729a69d3e2e..7685f8ab371e 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -800,7 +800,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
numPiers = AR5416_NUM_5G_CAL_PIERS;
}
- if (OLC_FOR_AR9280_20_LATER && IS_CHAN_2GHZ(chan)) {
+ if (OLC_FOR_AR9280_20_LATER(ah) && IS_CHAN_2GHZ(chan)) {
pRawDataset = pEepData->calPierData2G[0];
ah->initPDADC = ((struct calDataPerFreqOpLoop *)
pRawDataset)->vpdPdg[0][0];
@@ -841,7 +841,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
pRawDataset = pEepData->calPierData5G[i];
- if (OLC_FOR_AR9280_20_LATER) {
+ if (OLC_FOR_AR9280_20_LATER(ah)) {
u8 pcdacIdx;
u8 txPower;
@@ -869,7 +869,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
ENABLE_REGWRITE_BUFFER(ah);
- if (OLC_FOR_AR9280_20_LATER) {
+ if (OLC_FOR_AR9280_20_LATER(ah)) {
REG_WRITE(ah,
AR_PHY_TPCRG5 + regChainOffset,
SM(0x6,
@@ -1203,7 +1203,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
| ATH9K_POW_SM(ratesArray[rate24mb], 0));
if (IS_CHAN_2GHZ(chan)) {
- if (OLC_FOR_AR9280_20_LATER) {
+ if (OLC_FOR_AR9280_20_LATER(ah)) {
cck_ofdm_delta = 2;
REG_WRITE(ah, AR_PHY_POWER_TX_RATE3,
ATH9K_POW_SM(RT_AR_DELTA(rate2s), 24)
@@ -1259,7 +1259,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
ht40PowerIncForPdadc, 8)
| ATH9K_POW_SM(ratesArray[rateHt40_4] +
ht40PowerIncForPdadc, 0));
- if (OLC_FOR_AR9280_20_LATER) {
+ if (OLC_FOR_AR9280_20_LATER(ah)) {
REG_WRITE(ah, AR_PHY_POWER_TX_RATE9,
ATH9K_POW_SM(ratesArray[rateExtOfdm], 24)
| ATH9K_POW_SM(RT_AR_DELTA(rateExtCck), 16)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 1a2e0c7eeb02..f521dfa2f194 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -561,11 +561,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
memcpy(ptr, skb->data, rx_remain_len);
rx_pkt_len += rx_remain_len;
- hif_dev->rx_remain_len = 0;
skb_put(remain_skb, rx_pkt_len);
skb_pool[pool_index++] = remain_skb;
-
+ hif_dev->remain_skb = NULL;
+ hif_dev->rx_remain_len = 0;
} else {
index = rx_remain_len;
}
@@ -584,16 +584,21 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
pkt_len = get_unaligned_le16(ptr + index);
pkt_tag = get_unaligned_le16(ptr + index + 2);
+ /* It is supposed that if we have an invalid pkt_tag or
+ * pkt_len then the whole input SKB is considered invalid
+ * and dropped; the associated packets already in skb_pool
+ * are dropped, too.
+ */
if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
RX_STAT_INC(hif_dev, skb_dropped);
- return;
+ goto invalid_pkt;
}
if (pkt_len > 2 * MAX_RX_BUF_SIZE) {
dev_err(&hif_dev->udev->dev,
"ath9k_htc: invalid pkt_len (%x)\n", pkt_len);
RX_STAT_INC(hif_dev, skb_dropped);
- return;
+ goto invalid_pkt;
}
pad_len = 4 - (pkt_len & 0x3);
@@ -605,11 +610,6 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
if (index > MAX_RX_BUF_SIZE) {
spin_lock(&hif_dev->rx_lock);
- hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
- hif_dev->rx_transfer_len =
- MAX_RX_BUF_SIZE - chk_idx - 4;
- hif_dev->rx_pad_len = pad_len;
-
nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
if (!nskb) {
dev_err(&hif_dev->udev->dev,
@@ -617,6 +617,12 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
spin_unlock(&hif_dev->rx_lock);
goto err;
}
+
+ hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
+ hif_dev->rx_transfer_len =
+ MAX_RX_BUF_SIZE - chk_idx - 4;
+ hif_dev->rx_pad_len = pad_len;
+
skb_reserve(nskb, 32);
RX_STAT_INC(hif_dev, skb_allocated);
@@ -654,6 +660,13 @@ err:
skb_pool[i]->len, USB_WLAN_RX_PIPE);
RX_STAT_INC(hif_dev, skb_completed);
}
+ return;
+invalid_pkt:
+ for (i = 0; i < pool_index; i++) {
+ dev_kfree_skb_any(skb_pool[i]);
+ RX_STAT_INC(hif_dev, skb_dropped);
+ }
+ return;
}
static void ath9k_hif_usb_rx_cb(struct urb *urb)
@@ -1411,8 +1424,6 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
if (hif_dev->flags & HIF_USB_READY) {
ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
- ath9k_hif_usb_dev_deinit(hif_dev);
- ath9k_destroy_wmi(hif_dev->htc_handle->drv_priv);
ath9k_htc_hw_free(hif_dev->htc_handle);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 07ac88fb1c57..dae3d9c7b640 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -523,13 +523,13 @@ static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
(void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
if (!ath9k_hw_wait(ah,
- AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA(ah),
AR_EEPROM_STATUS_DATA_BUSY |
AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
AH_WAIT_TIMEOUT))
return false;
- *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
+ *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA(ah)),
AR_EEPROM_STATUS_DATA_VAL);
return true;
@@ -988,6 +988,8 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
ath9k_deinit_device(htc_handle->drv_priv);
ath9k_stop_wmi(htc_handle->drv_priv);
+ ath9k_hif_usb_dealloc_urbs((struct hif_device_usb *)htc_handle->hif_dev);
+ ath9k_destroy_wmi(htc_handle->drv_priv);
ieee80211_free_hw(htc_handle->drv_priv->hw);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index ca05b07a45e6..fe62ff668f75 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -391,7 +391,7 @@ static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
* HTC Messages are handled directly here and the obtained SKB
* is freed.
*
- * Service messages (Data, WMI) passed to the corresponding
+ * Service messages (Data, WMI) are passed to the corresponding
* endpoint RX handlers, which have to free the SKB.
*/
void ath9k_htc_rx_msg(struct htc_target *htc_handle,
@@ -478,6 +478,8 @@ invalid:
if (endpoint->ep_callbacks.rx)
endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
skb, epid);
+ else
+ goto invalid;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 172081ffe477..5982e0db45f9 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -266,7 +266,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
case AR9300_DEVID_AR9330:
ah->hw_version.macVersion = AR_SREV_VERSION_9330;
if (!ah->get_mac_revision) {
- val = REG_READ(ah, AR_SREV);
+ val = REG_READ(ah, AR_SREV(ah));
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
}
return true;
@@ -284,7 +284,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
return true;
}
- srev = REG_READ(ah, AR_SREV);
+ srev = REG_READ(ah, AR_SREV(ah));
if (srev == -1) {
ath_err(ath9k_hw_common(ah),
@@ -292,7 +292,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
return false;
}
- val = srev & AR_SREV_ID;
+ val = srev & AR_SREV_ID(ah);
if (val == 0xFF) {
val = srev;
@@ -601,12 +601,12 @@ static int __ath9k_hw_init(struct ath_hw *ah)
}
/*
- * Read back AR_WA into a permanent copy and set bits 14 and 17.
+ * Read back AR_WA(ah) into a permanent copy and set bits 14 and 17.
* We need to do this to avoid RMW of this register. We cannot
* read the reg when chip is asleep.
*/
if (AR_SREV_9300_20_OR_LATER(ah)) {
- ah->WARegVal = REG_READ(ah, AR_WA);
+ ah->WARegVal = REG_READ(ah, AR_WA(ah));
ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
AR_WA_ASPM_TIMER_BASED_DISABLE);
}
@@ -618,7 +618,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (AR_SREV_9565(ah)) {
ah->WARegVal |= AR_WA_BIT22;
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
}
ath9k_hw_init_defaults(ah);
@@ -814,7 +814,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
AR_CH0_DPLL3_PHASE_SHIFT, 0x1);
- REG_WRITE(ah, AR_RTC_PLL_CONTROL,
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL(ah),
pll | AR_RTC_9300_PLL_BYPASS);
udelay(1000);
@@ -832,7 +832,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
AR_SREV_9561(ah)) {
u32 regval, pll2_divint, pll2_divfrac, refdiv;
- REG_WRITE(ah, AR_RTC_PLL_CONTROL,
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL(ah),
pll | AR_RTC_9300_SOC_PLL_BYPASS);
udelay(1000);
@@ -911,7 +911,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
if (AR_SREV_9565(ah))
pll |= 0x40000;
- REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
+ REG_WRITE(ah, AR_RTC_PLL_CONTROL(ah), pll);
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
AR_SREV_9550(ah))
@@ -925,7 +925,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
udelay(RTC_PLL_SETTLE_DELAY);
- REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
+ REG_WRITE(ah, AR_RTC_SLEEP_CLK(ah), AR_RTC_FORCE_DERIVED_CLK);
}
static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
@@ -977,7 +977,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
if (ah->msi_enabled) {
- ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
+ ah->msi_reg = REG_READ(ah, AR_PCIE_MSI(ah));
ah->msi_reg |= AR_PCIE_MSI_HW_DBI_WR_EN;
ah->msi_reg &= AR_PCIE_MSI_HW_INT_PENDING_ADDR_MSI_64;
REG_WRITE(ah, AR_INTCFG, msi_cfg);
@@ -987,18 +987,18 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
}
if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
- REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
+ REG_WRITE(ah, AR_INTR_SYNC_CAUSE(ah), 0xFFFFFFFF);
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), sync_default);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK(ah), 0);
}
REGWRITE_BUFFER_FLUSH(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
- REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0);
- REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0);
- REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah), 0);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK(ah), 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE(ah), 0);
+ REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK(ah), 0);
}
}
@@ -1341,7 +1341,7 @@ static bool ath9k_hw_ar9330_reset_war(struct ath_hw *ah, int type)
return false;
}
- REG_WRITE(ah, AR_RTC_RESET, 1);
+ REG_WRITE(ah, AR_RTC_RESET(ah), 1);
}
return true;
@@ -1353,26 +1353,26 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
u32 tmpReg;
if (AR_SREV_9100(ah)) {
- REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK,
+ REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK(ah),
AR_RTC_DERIVED_CLK_PERIOD, 1);
- (void)REG_READ(ah, AR_RTC_DERIVED_CLK);
+ (void)REG_READ(ah, AR_RTC_DERIVED_CLK(ah));
}
ENABLE_REGWRITE_BUFFER(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
udelay(10);
}
- REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
if (AR_SREV_9100(ah)) {
rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
} else {
- tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah));
if (AR_SREV_9340(ah))
tmpReg &= AR9340_INTR_SYNC_LOCAL_TIMEOUT;
else
@@ -1381,7 +1381,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
if (tmpReg) {
u32 val;
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), 0);
val = AR_RC_HOSTIF;
if (!AR_SREV_9300_20_OR_LATER(ah))
@@ -1414,7 +1414,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
REG_CLR_BIT(ah, AR_CFG, AR_CFG_HALT_REQ);
}
- REG_WRITE(ah, AR_RTC_RC, rst_flags);
+ REG_WRITE(ah, AR_RTC_RC(ah), rst_flags);
REGWRITE_BUFFER_FLUSH(ah);
@@ -1425,8 +1425,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
else
udelay(100);
- REG_WRITE(ah, AR_RTC_RC, 0);
- if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
+ REG_WRITE(ah, AR_RTC_RC(ah), 0);
+ if (!ath9k_hw_wait(ah, AR_RTC_RC(ah), AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n");
return false;
}
@@ -1445,17 +1445,17 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
ENABLE_REGWRITE_BUFFER(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
udelay(10);
}
- REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
- REG_WRITE(ah, AR_RTC_RESET, 0);
+ REG_WRITE(ah, AR_RTC_RESET(ah), 0);
REGWRITE_BUFFER_FLUSH(ah);
@@ -1464,11 +1464,11 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, 0);
- REG_WRITE(ah, AR_RTC_RESET, 1);
+ REG_WRITE(ah, AR_RTC_RESET(ah), 1);
if (!ath9k_hw_wait(ah,
- AR_RTC_STATUS,
- AR_RTC_STATUS_M,
+ AR_RTC_STATUS(ah),
+ AR_RTC_STATUS_M(ah),
AR_RTC_STATUS_ON,
AH_WAIT_TIMEOUT)) {
ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n");
@@ -1483,11 +1483,11 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
bool ret = false;
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
udelay(10);
}
- REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah),
AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
if (!ah->reset_power_on)
@@ -1521,7 +1521,7 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
else
reset_type = ATH9K_RESET_COLD;
} else if (ah->chip_fullsleep || REG_READ(ah, AR_Q_TXE) ||
- (REG_READ(ah, AR_CR) & AR_CR_RXE))
+ (REG_READ(ah, AR_CR) & AR_CR_RXE(ah)))
reset_type = ATH9K_RESET_COLD;
if (!ath9k_hw_set_reset_reg(ah, reset_type))
@@ -1955,7 +1955,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_settsf64(ah, tsf + tsf_offset);
if (AR_SREV_9280_20_OR_LATER(ah))
- REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL(ah), AR_GPIO_JTAG_DISABLE);
if (!AR_SREV_9300_20_OR_LATER(ah))
ar9002_hw_enable_async_fifo(ah);
@@ -2017,7 +2017,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_set_dma(ah);
if (!ath9k_hw_mci_is_enabled(ah))
- REG_WRITE(ah, AR_OBS, 8);
+ REG_WRITE(ah, AR_OBS(ah), 8);
ENABLE_REG_RMW_BUFFER(ah);
if (ah->config.rx_intr_mitigation) {
@@ -2111,7 +2111,7 @@ static void ath9k_set_power_sleep(struct ath_hw *ah)
* Clear the RTC force wake bit to allow the
* mac to go to sleep.
*/
- REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN);
if (ath9k_hw_mci_is_enabled(ah))
udelay(100);
@@ -2121,13 +2121,13 @@ static void ath9k_set_power_sleep(struct ath_hw *ah)
/* Shutdown chip. Active low */
if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) {
- REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
+ REG_CLR_BIT(ah, AR_RTC_RESET(ah), AR_RTC_RESET_EN);
udelay(2);
}
- /* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
+ /* Clear Bit 14 of AR_WA(ah) after putting chip into Full Sleep mode. */
if (AR_SREV_9300_20_OR_LATER(ah))
- REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
/*
@@ -2143,7 +2143,7 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah)
if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
/* Set WakeOnInterrupt bit; clear ForceWake bit */
- REG_WRITE(ah, AR_RTC_FORCE_WAKE,
+ REG_WRITE(ah, AR_RTC_FORCE_WAKE(ah),
AR_RTC_FORCE_WAKE_ON_INT);
} else {
@@ -2163,15 +2163,15 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah)
* Clear the RTC force wake bit to allow the
* mac to go to sleep.
*/
- REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
+ REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE(ah), AR_RTC_FORCE_WAKE_EN);
if (ath9k_hw_mci_is_enabled(ah))
udelay(30);
}
- /* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */
+ /* Clear Bit 14 of AR_WA(ah) after putting chip into Net Sleep mode. */
if (AR_SREV_9300_20_OR_LATER(ah))
- REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
@@ -2179,14 +2179,14 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
u32 val;
int i;
- /* Set Bits 14 and 17 of AR_WA before powering on the chip. */
+ /* Set Bits 14 and 17 of AR_WA(ah) before powering on the chip. */
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA(ah), ah->WARegVal);
udelay(10);
}
- if ((REG_READ(ah, AR_RTC_STATUS) &
- AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
+ if ((REG_READ(ah, AR_RTC_STATUS(ah)) &
+ AR_RTC_STATUS_M(ah)) == AR_RTC_STATUS_SHUTDOWN) {
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
return false;
}
@@ -2194,10 +2194,10 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
ath9k_hw_init_pll(ah, NULL);
}
if (AR_SREV_9100(ah))
- REG_SET_BIT(ah, AR_RTC_RESET,
+ REG_SET_BIT(ah, AR_RTC_RESET(ah),
AR_RTC_RESET_EN);
- REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
+ REG_SET_BIT(ah, AR_RTC_FORCE_WAKE(ah),
AR_RTC_FORCE_WAKE_EN);
if (AR_SREV_9100(ah))
mdelay(10);
@@ -2205,11 +2205,11 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
udelay(50);
for (i = POWER_UP_TIME / 50; i > 0; i--) {
- val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
+ val = REG_READ(ah, AR_RTC_STATUS(ah)) & AR_RTC_STATUS_M(ah);
if (val == AR_RTC_STATUS_ON)
break;
udelay(50);
- REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
+ REG_SET_BIT(ah, AR_RTC_FORCE_WAKE(ah),
AR_RTC_FORCE_WAKE_EN);
}
if (i == 0) {
@@ -2701,16 +2701,16 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
u32 gpio_shift, tmp;
if (gpio > 11)
- addr = AR_GPIO_OUTPUT_MUX3;
+ addr = AR_GPIO_OUTPUT_MUX3(ah);
else if (gpio > 5)
- addr = AR_GPIO_OUTPUT_MUX2;
+ addr = AR_GPIO_OUTPUT_MUX2(ah);
else
- addr = AR_GPIO_OUTPUT_MUX1;
+ addr = AR_GPIO_OUTPUT_MUX1(ah);
gpio_shift = (gpio % 6) * 5;
if (AR_SREV_9280_20_OR_LATER(ah) ||
- (addr != AR_GPIO_OUTPUT_MUX1)) {
+ (addr != AR_GPIO_OUTPUT_MUX1(ah))) {
REG_RMW(ah, addr, (type << gpio_shift),
(0x1f << gpio_shift));
} else {
@@ -2754,13 +2754,13 @@ static void ath9k_hw_gpio_cfg_wmac(struct ath_hw *ah, u32 gpio, bool out,
AR7010_GPIO_OE_MASK << gpio_shift);
} else if (AR_SREV_SOC(ah)) {
gpio_set = out ? 1 : 0;
- REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+ REG_RMW(ah, AR_GPIO_OE_OUT(ah), gpio_set << gpio_shift,
gpio_set << gpio_shift);
} else {
gpio_shift = gpio << 1;
gpio_set = out ?
AR_GPIO_OE_OUT_DRV_ALL : AR_GPIO_OE_OUT_DRV_NO;
- REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+ REG_RMW(ah, AR_GPIO_OE_OUT(ah), gpio_set << gpio_shift,
AR_GPIO_OE_OUT_DRV << gpio_shift);
if (out)
@@ -2813,7 +2813,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
u32 val = 0xffffffff;
#define MS_REG_READ(x, y) \
- (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & BIT(y))
+ (MS(REG_READ(ah, AR_GPIO_IN_OUT(ah)), x##_GPIO_IN_VAL) & BIT(y))
WARN_ON(gpio >= ah->caps.num_gpio_pins);
@@ -2829,7 +2829,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
else if (AR_DEVID_7010(ah))
val = REG_READ(ah, AR7010_GPIO_IN) & BIT(gpio);
else if (AR_SREV_9300_20_OR_LATER(ah))
- val = REG_READ(ah, AR_GPIO_IN) & BIT(gpio);
+ val = REG_READ(ah, AR_GPIO_IN(ah)) & BIT(gpio);
else
val = MS_REG_READ(AR, gpio);
} else if (BIT(gpio) & ah->caps.gpio_requested) {
@@ -2853,7 +2853,7 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
if (BIT(gpio) & ah->caps.gpio_mask) {
u32 out_addr = AR_DEVID_7010(ah) ?
- AR7010_GPIO_OUT : AR_GPIO_IN_OUT;
+ AR7010_GPIO_OUT : AR_GPIO_IN_OUT(ah);
REG_RMW(ah, out_addr, val << gpio, BIT(gpio));
} else if (BIT(gpio) & ah->caps.gpio_requested) {
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 58d02c19b6d0..b070403e083f 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -707,7 +707,7 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
/* Wait for rx enable bit to go low */
for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
- if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
+ if ((REG_READ(ah, AR_CR) & AR_CR_RXE(ah)) == 0)
break;
if (!AR_SREV_9300_20_OR_LATER(ah)) {
@@ -762,14 +762,14 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
if (AR_SREV_9100(ah))
return true;
- host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+ host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE(ah));
if (((host_isr & AR_INTR_MAC_IRQ) ||
(host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
(host_isr != AR_INTR_SPURIOUS))
return true;
- host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
+ host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE(ah));
if ((host_isr & AR_INTR_SYNC_DEFAULT)
&& (host_isr != AR_INTR_SPURIOUS))
return true;
@@ -786,11 +786,11 @@ void ath9k_hw_kill_interrupts(struct ath_hw *ah)
REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
(void) REG_READ(ah, AR_IER);
if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE(ah), 0);
+ (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE(ah));
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
- (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), 0);
+ (void) REG_READ(ah, AR_INTR_SYNC_ENABLE(ah));
}
}
EXPORT_SYMBOL(ath9k_hw_kill_interrupts);
@@ -824,11 +824,11 @@ static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
ath_dbg(common, INTERRUPT, "enable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
- REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE(ah), async_mask);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK(ah), async_mask);
- REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
- REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
+ REG_WRITE(ah, AR_INTR_SYNC_ENABLE(ah), sync_default);
+ REG_WRITE(ah, AR_INTR_SYNC_MASK(ah), sync_default);
}
ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
@@ -841,26 +841,26 @@ static void __ath9k_hw_enable_interrupts(struct ath_hw *ah)
ath_dbg(ath9k_hw_common(ah), INTERRUPT,
"Enabling MSI, msi_mask=0x%X\n", ah->msi_mask);
- REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, ah->msi_mask);
- REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, ah->msi_mask);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah), ah->msi_mask);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK(ah), ah->msi_mask);
ath_dbg(ath9k_hw_common(ah), INTERRUPT,
"AR_INTR_PRIO_ASYNC_ENABLE=0x%X, AR_INTR_PRIO_ASYNC_MASK=0x%X\n",
- REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE),
- REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK));
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah)),
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_MASK(ah)));
if (ah->msi_reg == 0)
- ah->msi_reg = REG_READ(ah, AR_PCIE_MSI);
+ ah->msi_reg = REG_READ(ah, AR_PCIE_MSI(ah));
ath_dbg(ath9k_hw_common(ah), INTERRUPT,
"AR_PCIE_MSI=0x%X, ah->msi_reg = 0x%X\n",
- AR_PCIE_MSI, ah->msi_reg);
+ AR_PCIE_MSI(ah), ah->msi_reg);
i = 0;
do {
- REG_WRITE(ah, AR_PCIE_MSI,
+ REG_WRITE(ah, AR_PCIE_MSI(ah),
(ah->msi_reg | AR_PCIE_MSI_ENABLE)
& msi_pend_addr_mask);
- _msi_reg = REG_READ(ah, AR_PCIE_MSI);
+ _msi_reg = REG_READ(ah, AR_PCIE_MSI(ah));
i++;
} while ((_msi_reg & AR_PCIE_MSI_ENABLE) == 0 && i < 200);
@@ -918,8 +918,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
if (ah->msi_enabled) {
ath_dbg(common, INTERRUPT, "Clearing AR_INTR_PRIO_ASYNC_ENABLE\n");
- REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
- REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE);
+ REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah), 0);
+ REG_READ(ah, AR_INTR_PRIO_ASYNC_ENABLE(ah));
}
ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index a074e23013c5..a09f9d223f3d 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -804,14 +804,14 @@ static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
if (!ath9k_hw_wait(ah,
- AR_EEPROM_STATUS_DATA,
+ AR_EEPROM_STATUS_DATA(ah),
AR_EEPROM_STATUS_DATA_BUSY |
AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
AH_WAIT_TIMEOUT)) {
return false;
}
- *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
+ *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA(ah)),
AR_EEPROM_STATUS_DATA_VAL);
return true;
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8983ea6fc727..9f5b8a538071 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -20,7 +20,7 @@
#include "../reg.h"
#define AR_CR 0x0008
-#define AR_CR_RXE (AR_SREV_9300_20_OR_LATER(ah) ? 0x0000000c : 0x00000004)
+#define AR_CR_RXE(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x0000000c : 0x00000004)
#define AR_CR_RXD 0x00000020
#define AR_CR_SWI 0x00000040
@@ -352,10 +352,10 @@
#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
#define AR_ISR_S1_QCU_TXEOL_S 16
-#define AR_ISR_S2_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d0 : 0x00cc)
-#define AR_ISR_S3_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d4 : 0x00d0)
-#define AR_ISR_S4_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d8 : 0x00d4)
-#define AR_ISR_S5_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00dc : 0x00d8)
+#define AR_ISR_S2_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00d0 : 0x00cc)
+#define AR_ISR_S3_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00d4 : 0x00d0)
+#define AR_ISR_S4_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00d8 : 0x00d4)
+#define AR_ISR_S5_S(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x00dc : 0x00d8)
#define AR_DMADBG_0 0x00e0
#define AR_DMADBG_1 0x00e4
#define AR_DMADBG_2 0x00e8
@@ -699,7 +699,7 @@
#define AR_RC_APB 0x00000002
#define AR_RC_HOSTIF 0x00000100
-#define AR_WA (AR_SREV_9340(ah) ? 0x40c4 : 0x4004)
+#define AR_WA(_ah) (AR_SREV_9340(_ah) ? 0x40c4 : 0x4004)
#define AR_WA_BIT6 (1 << 6)
#define AR_WA_BIT7 (1 << 7)
#define AR_WA_BIT23 (1 << 23)
@@ -721,7 +721,7 @@
#define AR_PM_STATE 0x4008
#define AR_PM_STATE_PME_D3COLD_VAUX 0x00100000
-#define AR_HOST_TIMEOUT (AR_SREV_9340(ah) ? 0x4008 : 0x4018)
+#define AR_HOST_TIMEOUT(_ah) (AR_SREV_9340(_ah) ? 0x4008 : 0x4018)
#define AR_HOST_TIMEOUT_APB_CNTR 0x0000FFFF
#define AR_HOST_TIMEOUT_APB_CNTR_S 0
#define AR_HOST_TIMEOUT_LCL_CNTR 0xFFFF0000
@@ -750,12 +750,12 @@
#define EEPROM_PROTECT_RP_1024_2047 0x4000
#define EEPROM_PROTECT_WP_1024_2047 0x8000
-#define AR_SREV \
- ((AR_SREV_9100(ah)) ? 0x0600 : (AR_SREV_9340(ah) \
+#define AR_SREV(_ah) \
+ ((AR_SREV_9100(_ah)) ? 0x0600 : (AR_SREV_9340(_ah) \
? 0x400c : 0x4020))
-#define AR_SREV_ID \
- ((AR_SREV_9100(ah)) ? 0x00000FFF : 0x000000FF)
+#define AR_SREV_ID(_ah) \
+ ((AR_SREV_9100(_ah)) ? 0x00000FFF : 0x000000FF)
#define AR_SREV_VERSION 0x000000F0
#define AR_SREV_VERSION_S 4
#define AR_SREV_REVISION 0x00000007
@@ -1038,11 +1038,11 @@ enum ath_usb_dev {
#define AR_INTR_SPURIOUS 0xFFFFFFFF
-#define AR_INTR_SYNC_CAUSE (AR_SREV_9340(ah) ? 0x4010 : 0x4028)
-#define AR_INTR_SYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4010 : 0x4028)
+#define AR_INTR_SYNC_CAUSE(_ah) (AR_SREV_9340(_ah) ? 0x4010 : 0x4028)
+#define AR_INTR_SYNC_CAUSE_CLR(_ah) (AR_SREV_9340(_ah) ? 0x4010 : 0x4028)
-#define AR_INTR_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4014 : 0x402c)
+#define AR_INTR_SYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4014 : 0x402c)
#define AR_INTR_SYNC_ENABLE_GPIO 0xFFFC0000
#define AR_INTR_SYNC_ENABLE_GPIO_S 18
@@ -1084,18 +1084,18 @@ enum {
};
-#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
+#define AR_INTR_ASYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x4018 : 0x4030)
#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
#define AR_INTR_ASYNC_MASK_GPIO_S 18
#define AR_INTR_ASYNC_MASK_MCI 0x00000080
#define AR_INTR_ASYNC_MASK_MCI_S 7
-#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034)
+#define AR_INTR_SYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x401c : 0x4034)
#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
#define AR_INTR_SYNC_MASK_GPIO_S 18
-#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
-#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE_CLR(_ah) (AR_SREV_9340(_ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE(_ah) (AR_SREV_9340(_ah) ? 0x4020 : 0x4038)
#define AR_INTR_ASYNC_CAUSE_MCI 0x00000080
#define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \
AR_INTR_ASYNC_CAUSE_MCI)
@@ -1105,13 +1105,13 @@ enum {
#define AR_INTR_ASYNC_ENABLE_MCI_S 7
-#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
+#define AR_INTR_ASYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4024 : 0x403c)
#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
#define AR_INTR_ASYNC_ENABLE_GPIO_S 18
#define AR_PCIE_SERDES 0x4040
#define AR_PCIE_SERDES2 0x4044
-#define AR_PCIE_PM_CTRL (AR_SREV_9340(ah) ? 0x4004 : 0x4014)
+#define AR_PCIE_PM_CTRL(_ah) (AR_SREV_9340(_ah) ? 0x4004 : 0x4014)
#define AR_PCIE_PM_CTRL_ENA 0x00080000
#define AR_PCIE_PHY_REG3 0x18c08
@@ -1156,7 +1156,7 @@ enum {
#define AR9580_GPIO_MASK 0x0000F4FF
#define AR7010_GPIO_MASK 0x0000FFFF
-#define AR_GPIO_IN_OUT (AR_SREV_9340(ah) ? 0x4028 : 0x4048)
+#define AR_GPIO_IN_OUT(_ah) (AR_SREV_9340(_ah) ? 0x4028 : 0x4048)
#define AR_GPIO_IN_VAL 0x0FFFC000
#define AR_GPIO_IN_VAL_S 14
#define AR928X_GPIO_IN_VAL 0x000FFC00
@@ -1170,12 +1170,12 @@ enum {
#define AR7010_GPIO_IN_VAL 0x0000FFFF
#define AR7010_GPIO_IN_VAL_S 0
-#define AR_GPIO_IN (AR_SREV_9340(ah) ? 0x402c : 0x404c)
+#define AR_GPIO_IN(_ah) (AR_SREV_9340(_ah) ? 0x402c : 0x404c)
#define AR9300_GPIO_IN_VAL 0x0001FFFF
#define AR9300_GPIO_IN_VAL_S 0
-#define AR_GPIO_OE_OUT (AR_SREV_9340(ah) ? 0x4030 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c))
+#define AR_GPIO_OE_OUT(_ah) (AR_SREV_9340(_ah) ? 0x4030 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4050 : 0x404c))
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
#define AR_GPIO_OE_OUT_DRV_LOW 0x1
@@ -1197,13 +1197,13 @@ enum {
#define AR7010_GPIO_INT_MASK 0x52024
#define AR7010_GPIO_FUNCTION 0x52028
-#define AR_GPIO_INTR_POL (AR_SREV_9340(ah) ? 0x4038 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050))
+#define AR_GPIO_INTR_POL(_ah) (AR_SREV_9340(_ah) ? 0x4038 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4058 : 0x4050))
#define AR_GPIO_INTR_POL_VAL 0x0001FFFF
#define AR_GPIO_INTR_POL_VAL_S 0
-#define AR_GPIO_INPUT_EN_VAL (AR_SREV_9340(ah) ? 0x403c : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x405c : 0x4054))
+#define AR_GPIO_INPUT_EN_VAL(_ah) (AR_SREV_9340(_ah) ? 0x403c : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x405c : 0x4054))
#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004
#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2
#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008
@@ -1221,15 +1221,15 @@ enum {
#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
#define AR_GPIO_JTAG_DISABLE 0x00020000
-#define AR_GPIO_INPUT_MUX1 (AR_SREV_9340(ah) ? 0x4040 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4060 : 0x4058))
+#define AR_GPIO_INPUT_MUX1(_ah) (AR_SREV_9340(_ah) ? 0x4040 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4060 : 0x4058))
#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000
#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16
#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00
#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8
-#define AR_GPIO_INPUT_MUX2 (AR_SREV_9340(ah) ? 0x4044 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4064 : 0x405c))
+#define AR_GPIO_INPUT_MUX2(_ah) (AR_SREV_9340(_ah) ? 0x4044 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4064 : 0x405c))
#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
#define AR_GPIO_INPUT_MUX2_CLK25_S 0
#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
@@ -1237,18 +1237,18 @@ enum {
#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
-#define AR_GPIO_OUTPUT_MUX1 (AR_SREV_9340(ah) ? 0x4048 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4068 : 0x4060))
-#define AR_GPIO_OUTPUT_MUX2 (AR_SREV_9340(ah) ? 0x404c : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x406c : 0x4064))
-#define AR_GPIO_OUTPUT_MUX3 (AR_SREV_9340(ah) ? 0x4050 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4070 : 0x4068))
+#define AR_GPIO_OUTPUT_MUX1(_ah) (AR_SREV_9340(_ah) ? 0x4048 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4068 : 0x4060))
+#define AR_GPIO_OUTPUT_MUX2(_ah) (AR_SREV_9340(_ah) ? 0x404c : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x406c : 0x4064))
+#define AR_GPIO_OUTPUT_MUX3(_ah) (AR_SREV_9340(_ah) ? 0x4050 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4070 : 0x4068))
-#define AR_INPUT_STATE (AR_SREV_9340(ah) ? 0x4054 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4074 : 0x406c))
+#define AR_INPUT_STATE(_ah) (AR_SREV_9340(_ah) ? 0x4054 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4074 : 0x406c))
-#define AR_EEPROM_STATUS_DATA (AR_SREV_9340(ah) ? 0x40c8 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4084 : 0x407c))
+#define AR_EEPROM_STATUS_DATA(_ah) (AR_SREV_9340(_ah) ? 0x40c8 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4084 : 0x407c))
#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
#define AR_EEPROM_STATUS_DATA_VAL_S 0
#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
@@ -1256,13 +1256,13 @@ enum {
#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
-#define AR_OBS (AR_SREV_9340(ah) ? 0x405c : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x4088 : 0x4080))
+#define AR_OBS(_ah) (AR_SREV_9340(_ah) ? 0x405c : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4088 : 0x4080))
-#define AR_GPIO_PDPU (AR_SREV_9300_20_OR_LATER(ah) ? 0x4090 : 0x4088)
+#define AR_GPIO_PDPU(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? 0x4090 : 0x4088)
-#define AR_PCIE_MSI (AR_SREV_9340(ah) ? 0x40d8 : \
- (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094))
+#define AR_PCIE_MSI(_ah) (AR_SREV_9340(_ah) ? 0x40d8 : \
+ (AR_SREV_9300_20_OR_LATER(_ah) ? 0x40a4 : 0x4094))
#define AR_PCIE_MSI_ENABLE 0x00000001
#define AR_PCIE_MSI_HW_DBI_WR_EN 0x02000000
#define AR_PCIE_MSI_HW_INT_PENDING_ADDR 0xFFA0C1FF /* bits 8..11: value must be 0x5060 */
@@ -1272,10 +1272,10 @@ enum {
#define AR_INTR_PRIO_RXLP 0x00000002
#define AR_INTR_PRIO_RXHP 0x00000004
-#define AR_INTR_PRIO_SYNC_ENABLE (AR_SREV_9340(ah) ? 0x4088 : 0x40c4)
-#define AR_INTR_PRIO_ASYNC_MASK (AR_SREV_9340(ah) ? 0x408c : 0x40c8)
-#define AR_INTR_PRIO_SYNC_MASK (AR_SREV_9340(ah) ? 0x4090 : 0x40cc)
-#define AR_INTR_PRIO_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4094 : 0x40d4)
+#define AR_INTR_PRIO_SYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4088 : 0x40c4)
+#define AR_INTR_PRIO_ASYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x408c : 0x40c8)
+#define AR_INTR_PRIO_SYNC_MASK(_ah) (AR_SREV_9340(_ah) ? 0x4090 : 0x40cc)
+#define AR_INTR_PRIO_ASYNC_ENABLE(_ah) (AR_SREV_9340(_ah) ? 0x4094 : 0x40d4)
#define AR_ENT_OTP 0x40d8
#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
#define AR_ENT_OTP_49GHZ_DISABLE 0x00100000
@@ -1339,8 +1339,8 @@ enum {
#define AR_RTC_9160_PLL_CLKSEL_S 14
#define AR_RTC_BASE 0x00020000
-#define AR_RTC_RC \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000)
+#define AR_RTC_RC(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0000) : 0x7000)
#define AR_RTC_RC_M 0x00000003
#define AR_RTC_RC_MAC_WARM 0x00000001
#define AR_RTC_RC_MAC_COLD 0x00000002
@@ -1357,8 +1357,8 @@ enum {
#define AR_RTC_REG_CONTROL1 0x700c
#define AR_RTC_REG_CONTROL1_SWREG_PROGRAM 0x00000001
-#define AR_RTC_PLL_CONTROL \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
+#define AR_RTC_PLL_CONTROL(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
#define AR_RTC_PLL_CONTROL2 0x703c
@@ -1378,15 +1378,15 @@ enum {
#define PLL4_MEAS_DONE 0x8
#define SQSUM_DVC_MASK 0x007ffff8
-#define AR_RTC_RESET \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
+#define AR_RTC_RESET(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0040) : 0x7040)
#define AR_RTC_RESET_EN (0x00000001)
-#define AR_RTC_STATUS \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044)
+#define AR_RTC_STATUS(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0044) : 0x7044)
-#define AR_RTC_STATUS_M \
- ((AR_SREV_9100(ah)) ? 0x0000003f : 0x0000000f)
+#define AR_RTC_STATUS_M(_ah) \
+ ((AR_SREV_9100(_ah)) ? 0x0000003f : 0x0000000f)
#define AR_RTC_PM_STATUS_M 0x0000000f
@@ -1395,32 +1395,32 @@ enum {
#define AR_RTC_STATUS_SLEEP 0x00000004
#define AR_RTC_STATUS_WAKEUP 0x00000008
-#define AR_RTC_SLEEP_CLK \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
+#define AR_RTC_SLEEP_CLK(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
#define AR_RTC_FORCE_DERIVED_CLK 0x2
#define AR_RTC_FORCE_SWREG_PRD 0x00000004
-#define AR_RTC_FORCE_WAKE \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
+#define AR_RTC_FORCE_WAKE(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
#define AR_RTC_FORCE_WAKE_EN 0x00000001
#define AR_RTC_FORCE_WAKE_ON_INT 0x00000002
-#define AR_RTC_INTR_CAUSE \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050)
+#define AR_RTC_INTR_CAUSE(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0050) : 0x7050)
-#define AR_RTC_INTR_ENABLE \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054)
+#define AR_RTC_INTR_ENABLE(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0054) : 0x7054)
-#define AR_RTC_INTR_MASK \
- ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
+#define AR_RTC_INTR_MASK(_ah) \
+ ((AR_SREV_9100(_ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
#define AR_RTC_KEEP_AWAKE 0x7034
/* RTC_DERIVED_* - only for AR9100 */
-#define AR_RTC_DERIVED_CLK \
- (AR_SREV_9100(ah) ? (AR_RTC_BASE + 0x0038) : 0x7038)
+#define AR_RTC_DERIVED_CLK(_ah) \
+ (AR_SREV_9100(_ah) ? (AR_RTC_BASE + 0x0038) : 0x7038)
#define AR_RTC_DERIVED_CLK_PERIOD 0x0000fffe
#define AR_RTC_DERIVED_CLK_PERIOD_S 1
@@ -2114,7 +2114,7 @@ enum {
#define AR9300_SM_BASE 0xa200
#define AR9002_PHY_AGC_CONTROL 0x9860
#define AR9003_PHY_AGC_CONTROL AR9300_SM_BASE + 0xc4
-#define AR_PHY_AGC_CONTROL (AR_SREV_9300_20_OR_LATER(ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL)
+#define AR_PHY_AGC_CONTROL(_ah) (AR_SREV_9300_20_OR_LATER(_ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL)
#define AR_PHY_AGC_CONTROL_CAL 0x00000001 /* do internal calibration */
#define AR_PHY_AGC_CONTROL_NF 0x00000002 /* do noise-floor calibration */
#define AR_PHY_AGC_CONTROL_OFFSET_CAL 0x00000800 /* allow offset calibration */
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index 58c0ab01771b..e1def77591c6 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -29,9 +29,9 @@ static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
ath9k_ps_wakeup(sc);
- REG_RMW_FIELD(ah, AR_PHY_TEST, AR_PHY_TEST_BBB_OBS_SEL, 1);
- REG_CLR_BIT(ah, AR_PHY_TEST, AR_PHY_TEST_RX_OBS_SEL_BIT5);
- REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS, AR_PHY_TEST_CTL_RX_OBS_SEL, 0);
+ REG_RMW_FIELD(ah, AR_PHY_TEST(ah), AR_PHY_TEST_BBB_OBS_SEL, 1);
+ REG_CLR_BIT(ah, AR_PHY_TEST(ah), AR_PHY_TEST_RX_OBS_SEL_BIT5);
+ REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS(ah), AR_PHY_TEST_CTL_RX_OBS_SEL, 0);
for (i = 0, j = 0; i < buf_size; i++) {
v1 = REG_READ(ah, AR_PHY_TST_ADC) & 0xffff;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index f315c54bd3ac..19345b8f7bfd 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -341,6 +341,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
if (!time_left) {
ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
+ wmi->last_seq_id = 0;
mutex_unlock(&wmi->op_mutex);
return -ETIMEDOUT;
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 39abb59d8771..ef9a8e0b75e6 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1216,7 +1216,7 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
txpower -= 2 * power_offset;
}
- if (OLC_FOR_AR9280_20_LATER && is_cck)
+ if (OLC_FOR_AR9280_20_LATER(ah) && is_cck)
txpower -= 2;
txpower = max(txpower, 0);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b115902eb475..a9690ec4c850 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -7928,13 +7928,10 @@ exit:
}
static s32
-cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
+brcmf_set_channel(struct brcmf_cfg80211_info *cfg, struct ieee80211_channel *chan)
{
u16 chspec = 0;
int err = 0;
- struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
if (chan->flags & IEEE80211_CHAN_DISABLED)
@@ -7994,7 +7991,7 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
/* Setting current channel to the requested channel */
info->filled = 0;
- if (cfg80211_set_channel(wiphy, ndev, info->channel, NL80211_CHAN_HT20))
+ if (brcmf_set_channel(cfg, info->channel))
return 0;
/* Disable mpc */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index e90a30808c22..0e1fa3f0dea2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -383,7 +383,7 @@ struct brcmf_cfg80211_info {
struct brcmf_tlv {
u8 id;
u8 len;
- u8 data[1];
+ u8 data[];
};
static inline struct wiphy *cfg_to_wiphy(struct brcmf_cfg80211_info *cfg)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 121893bbaa1d..8073f31be27d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -726,17 +726,17 @@ static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci)
case BRCM_CC_43664_CHIP_ID:
case BRCM_CC_43666_CHIP_ID:
return 0x200000;
+ case BRCM_CC_4355_CHIP_ID:
case BRCM_CC_4359_CHIP_ID:
return (ci->pub.chiprev < 9) ? 0x180000 : 0x160000;
case BRCM_CC_4364_CHIP_ID:
case CY_CC_4373_CHIP_ID:
return 0x160000;
case CY_CC_43752_CHIP_ID:
+ case BRCM_CC_4377_CHIP_ID:
return 0x170000;
case BRCM_CC_4378_CHIP_ID:
return 0x352000;
- case CY_CC_89459_CHIP_ID:
- return ((ci->pub.chiprev < 9) ? 0x180000 : 0x160000);
default:
brcmf_err("unknown chip: %s\n", ci->pub.name);
break;
@@ -1426,8 +1426,8 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
addr = CORE_CC_REG(base, sr_control1);
reg = chip->ops->read32(chip->ctx, addr);
return reg != 0;
+ case BRCM_CC_4355_CHIP_ID:
case CY_CC_4373_CHIP_ID:
- case CY_CC_89459_CHIP_ID:
/* explicitly check SR engine enable bit */
addr = CORE_CC_REG(base, sr_control0);
reg = chip->ops->read32(chip->ctx, addr);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 4a309e5a5707..f235beaddddb 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -299,6 +299,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
err);
goto done;
}
+ buf[sizeof(buf) - 1] = '\0';
ptr = (char *)buf;
strsep(&ptr, "\n");
@@ -319,15 +320,17 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
if (err) {
brcmf_dbg(TRACE, "retrieving clmver failed, %d\n", err);
} else {
+ buf[sizeof(buf) - 1] = '\0';
clmver = (char *)buf;
- /* store CLM version for adding it to revinfo debugfs file */
- memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver));
/* Replace all newline/linefeed characters with space
* character
*/
strreplace(clmver, '\n', ' ');
+ /* store CLM version for adding it to revinfo debugfs file */
+ memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver));
+
brcmf_dbg(INFO, "CLM version = %s\n", clmver);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index 83ea251cfcec..f599d5f896e8 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -336,6 +336,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
bphy_err(drvr, "%s: failed to expand headroom\n",
brcmf_ifname(ifp));
atomic_inc(&drvr->bus_if->stats.pktcow_failed);
+ dev_kfree_skb(skb);
goto done;
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index cec53f934940..45fbcbdc7d9e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -347,8 +347,11 @@ brcmf_msgbuf_alloc_pktid(struct device *dev,
count++;
} while (count < pktids->array_size);
- if (count == pktids->array_size)
+ if (count == pktids->array_size) {
+ dma_unmap_single(dev, *physaddr, skb->len - data_offset,
+ pktids->direction);
return -ENOMEM;
+ }
array[*idx].data_offset = data_offset;
array[*idx].physaddr = *physaddr;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index e975d10e6009..d4492d02e4ea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1466,8 +1466,8 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
ETH_ALEN);
memcpy(mgmt_frame->sa, e->addr, ETH_ALEN);
mgmt_frame->frame_control = cpu_to_le16(IEEE80211_STYPE_ACTION);
- memcpy(&mgmt_frame->u, frame, mgmt_frame_len);
- mgmt_frame_len += offsetof(struct ieee80211_mgmt, u);
+ memcpy(mgmt_frame->u.body, frame, mgmt_frame_len);
+ mgmt_frame_len += offsetof(struct ieee80211_mgmt, u.body);
freq = ieee80211_channel_to_frequency(ch.control_ch_num,
ch.band == BRCMU_CHAN_BAND_2G ?
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index b67f6d0810b6..a9b9b2dc62d4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -51,18 +51,21 @@ enum brcmf_pcie_state {
BRCMF_FW_DEF(43602, "brcmfmac43602-pcie");
BRCMF_FW_DEF(4350, "brcmfmac4350-pcie");
BRCMF_FW_DEF(4350C, "brcmfmac4350c2-pcie");
+BRCMF_FW_CLM_DEF(4355, "brcmfmac4355-pcie");
+BRCMF_FW_CLM_DEF(4355C1, "brcmfmac4355c1-pcie");
BRCMF_FW_CLM_DEF(4356, "brcmfmac4356-pcie");
BRCMF_FW_CLM_DEF(43570, "brcmfmac43570-pcie");
BRCMF_FW_DEF(4358, "brcmfmac4358-pcie");
BRCMF_FW_DEF(4359, "brcmfmac4359-pcie");
-BRCMF_FW_DEF(4364, "brcmfmac4364-pcie");
+BRCMF_FW_CLM_DEF(4364B2, "brcmfmac4364b2-pcie");
+BRCMF_FW_CLM_DEF(4364B3, "brcmfmac4364b3-pcie");
BRCMF_FW_DEF(4365B, "brcmfmac4365b-pcie");
BRCMF_FW_DEF(4365C, "brcmfmac4365c-pcie");
BRCMF_FW_DEF(4366B, "brcmfmac4366b-pcie");
BRCMF_FW_DEF(4366C, "brcmfmac4366c-pcie");
BRCMF_FW_DEF(4371, "brcmfmac4371-pcie");
+BRCMF_FW_CLM_DEF(4377B3, "brcmfmac4377b3-pcie");
BRCMF_FW_CLM_DEF(4378B1, "brcmfmac4378b1-pcie");
-BRCMF_FW_DEF(4355, "brcmfmac89459-pcie");
/* firmware config files */
MODULE_FIRMWARE(BRCMF_FW_DEFAULT_PATH "brcmfmac*-pcie.txt");
@@ -78,13 +81,16 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0x000000FF, 4350C),
BRCMF_FW_ENTRY(BRCM_CC_4350_CHIP_ID, 0xFFFFFF00, 4350),
BRCMF_FW_ENTRY(BRCM_CC_43525_CHIP_ID, 0xFFFFFFF0, 4365C),
+ BRCMF_FW_ENTRY(BRCM_CC_4355_CHIP_ID, 0x000007FF, 4355),
+ BRCMF_FW_ENTRY(BRCM_CC_4355_CHIP_ID, 0xFFFFF800, 4355C1), /* rev ID 12/C2 seen */
BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356),
BRCMF_FW_ENTRY(BRCM_CC_43567_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_ENTRY(BRCM_CC_43569_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_ENTRY(BRCM_CC_43570_CHIP_ID, 0xFFFFFFFF, 43570),
BRCMF_FW_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358),
BRCMF_FW_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359),
- BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFFF, 4364),
+ BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0x0000000F, 4364B2), /* 3 */
+ BRCMF_FW_ENTRY(BRCM_CC_4364_CHIP_ID, 0xFFFFFFF0, 4364B3), /* 4 */
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0x0000000F, 4365B),
BRCMF_FW_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFF0, 4365C),
BRCMF_FW_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B),
@@ -92,8 +98,8 @@ static const struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = {
BRCMF_FW_ENTRY(BRCM_CC_43664_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_43666_CHIP_ID, 0xFFFFFFF0, 4366C),
BRCMF_FW_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371),
+ BRCMF_FW_ENTRY(BRCM_CC_4377_CHIP_ID, 0xFFFFFFFF, 4377B3), /* revision ID 4 */
BRCMF_FW_ENTRY(BRCM_CC_4378_CHIP_ID, 0xFFFFFFFF, 4378B1), /* revision ID 3 */
- BRCMF_FW_ENTRY(CY_CC_89459_CHIP_ID, 0xFFFFFFFF, 4355),
};
#define BRCMF_PCIE_FW_UP_TIMEOUT 5000 /* msec */
@@ -1994,6 +2000,17 @@ static int brcmf_pcie_read_otp(struct brcmf_pciedev_info *devinfo)
int ret;
switch (devinfo->ci->chip) {
+ case BRCM_CC_4355_CHIP_ID:
+ coreid = BCMA_CORE_CHIPCOMMON;
+ base = 0x8c0;
+ words = 0xb2;
+ break;
+ case BRCM_CC_4364_CHIP_ID:
+ coreid = BCMA_CORE_CHIPCOMMON;
+ base = 0x8c0;
+ words = 0x1a0;
+ break;
+ case BRCM_CC_4377_CHIP_ID:
case BRCM_CC_4378_CHIP_ID:
coreid = BCMA_CORE_GCI;
base = 0x1120;
@@ -2590,6 +2607,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID, WCC),
@@ -2600,7 +2618,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, BCA),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID, BCA),
@@ -2609,9 +2627,10 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4371_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_43596_DEVICE_ID, CYW),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4377_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4378_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(CY_PCIE_89459_DEVICE_ID, CYW),
- BRCMF_PCIE_DEVICE(CY_PCIE_89459_RAW_DEVICE_ID, CYW),
+
{ /* end: all zeroes */ }
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
index f4939cf62767..896615f57952 100644
--- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h
@@ -37,6 +37,7 @@
#define BRCM_CC_4350_CHIP_ID 0x4350
#define BRCM_CC_43525_CHIP_ID 43525
#define BRCM_CC_4354_CHIP_ID 0x4354
+#define BRCM_CC_4355_CHIP_ID 0x4355
#define BRCM_CC_4356_CHIP_ID 0x4356
#define BRCM_CC_43566_CHIP_ID 43566
#define BRCM_CC_43567_CHIP_ID 43567
@@ -51,12 +52,12 @@
#define BRCM_CC_43664_CHIP_ID 43664
#define BRCM_CC_43666_CHIP_ID 43666
#define BRCM_CC_4371_CHIP_ID 0x4371
+#define BRCM_CC_4377_CHIP_ID 0x4377
#define BRCM_CC_4378_CHIP_ID 0x4378
#define CY_CC_4373_CHIP_ID 0x4373
#define CY_CC_43012_CHIP_ID 43012
#define CY_CC_43439_CHIP_ID 43439
#define CY_CC_43752_CHIP_ID 43752
-#define CY_CC_89459_CHIP_ID 0x4355
/* USB Device IDs */
#define BRCM_USB_43143_DEVICE_ID 0xbd1e
@@ -72,6 +73,7 @@
#define BRCM_PCIE_4350_DEVICE_ID 0x43a3
#define BRCM_PCIE_4354_DEVICE_ID 0x43df
#define BRCM_PCIE_4354_RAW_DEVICE_ID 0x4354
+#define BRCM_PCIE_4355_DEVICE_ID 0x43dc
#define BRCM_PCIE_4356_DEVICE_ID 0x43ec
#define BRCM_PCIE_43567_DEVICE_ID 0x43d3
#define BRCM_PCIE_43570_DEVICE_ID 0x43d9
@@ -90,9 +92,9 @@
#define BRCM_PCIE_4366_2G_DEVICE_ID 0x43c4
#define BRCM_PCIE_4366_5G_DEVICE_ID 0x43c5
#define BRCM_PCIE_4371_DEVICE_ID 0x440d
+#define BRCM_PCIE_43596_DEVICE_ID 0x4415
+#define BRCM_PCIE_4377_DEVICE_ID 0x4488
#define BRCM_PCIE_4378_DEVICE_ID 0x4425
-#define CY_PCIE_89459_DEVICE_ID 0x4415
-#define CY_PCIE_89459_RAW_DEVICE_ID 0x4355
/* brcmsmac IDs */
#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index ca802af8cddc..d382f2017325 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -3427,7 +3427,7 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv,
dma_unmap_single(&priv->pci_dev->dev,
rxq->pool[i].dma_addr,
IPW_RX_BUF_SIZE, DMA_FROM_DEVICE);
- dev_kfree_skb(rxq->pool[i].skb);
+ dev_kfree_skb_irq(rxq->pool[i].skb);
rxq->pool[i].skb = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -11383,9 +11383,14 @@ static int ipw_wdev_init(struct net_device *dev)
set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
/* With that information in place, we can now register the wiphy... */
- if (wiphy_register(wdev->wiphy))
- rc = -EIO;
+ rc = wiphy_register(wdev->wiphy);
+ if (rc)
+ goto out;
+
+ return 0;
out:
+ kfree(priv->ieee->a_band.channels);
+ kfree(priv->ieee->bg_band.channels);
return rc;
}
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index d7e99d50b287..9eaf5ec133f9 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3372,10 +3372,12 @@ static DEVICE_ATTR(dump_errors, 0200, NULL, il3945_dump_error_log);
*
*****************************************************************************/
-static void
+static int
il3945_setup_deferred_work(struct il_priv *il)
{
il->workqueue = create_singlethread_workqueue(DRV_NAME);
+ if (!il->workqueue)
+ return -ENOMEM;
init_waitqueue_head(&il->wait_command_queue);
@@ -3392,6 +3394,8 @@ il3945_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_setup(&il->irq_tasklet, il3945_irq_tasklet);
+
+ return 0;
}
static void
@@ -3712,7 +3716,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
- il3945_setup_deferred_work(il);
+ err = il3945_setup_deferred_work(il);
+ if (err)
+ goto out_remove_sysfs;
+
il3945_setup_handlers(il);
il_power_initialize(il);
@@ -3724,7 +3731,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = il3945_setup_mac(il);
if (err)
- goto out_remove_sysfs;
+ goto out_destroy_workqueue;
il_dbgfs_register(il, DRV_NAME);
@@ -3733,9 +3740,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-out_remove_sysfs:
+out_destroy_workqueue:
destroy_workqueue(il->workqueue);
il->workqueue = NULL;
+out_remove_sysfs:
sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
out_release_irq:
free_irq(il->pci_dev->irq, il);
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 721b4042b4bf..0a4aa3c678c1 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -4020,7 +4020,7 @@ il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
D_INFO("Initialization Alive received.\n");
- memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+ memcpy(&il->card_alive_init, &pkt->u.raw,
sizeof(struct il_init_alive_resp));
pwork = &il->init_alive_start;
} else {
@@ -6211,10 +6211,12 @@ out:
mutex_unlock(&il->mutex);
}
-static void
+static int
il4965_setup_deferred_work(struct il_priv *il)
{
il->workqueue = create_singlethread_workqueue(DRV_NAME);
+ if (!il->workqueue)
+ return -ENOMEM;
init_waitqueue_head(&il->wait_command_queue);
@@ -6233,6 +6235,8 @@ il4965_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
tasklet_setup(&il->irq_tasklet, il4965_irq_tasklet);
+
+ return 0;
}
static void
@@ -6618,7 +6622,10 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_msi;
}
- il4965_setup_deferred_work(il);
+ err = il4965_setup_deferred_work(il);
+ if (err)
+ goto out_free_irq;
+
il4965_setup_handlers(il);
/*********************************************
@@ -6656,6 +6663,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
out_destroy_workqueue:
destroy_workqueue(il->workqueue);
il->workqueue = NULL;
+out_free_irq:
free_irq(il->pci_dev->irq, il);
out_disable_msi:
pci_disable_msi(il->pci_dev);
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index 341c17fe2af4..96002121bb8b 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -5174,7 +5174,7 @@ il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
/* new association get rid of ibss beacon skb */
- dev_kfree_skb(il->beacon_skb);
+ dev_consume_skb_irq(il->beacon_skb);
il->beacon_skb = NULL;
il->timestamp = 0;
@@ -5293,7 +5293,7 @@ il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
}
spin_lock_irqsave(&il->lock, flags);
- dev_kfree_skb(il->beacon_skb);
+ dev_consume_skb_irq(il->beacon_skb);
il->beacon_skb = skb;
timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index ec6198f1b38c..3bdd6774716d 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -10,7 +10,7 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 72
+#define IWL_22000_UCODE_API_MAX 74
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 0b052c2e563a..28c87a480246 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -153,6 +153,7 @@ enum iwl_legacy_cmds {
/**
* @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd
+ * response in &struct iwl_tx_path_flush_cmd_rsp
*/
TXPATH_FLUSH = 0x1e,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 9263413ee06f..8b38a0073077 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -83,7 +83,7 @@ enum iwl_data_path_subcmd_ids {
MONITOR_NOTIF = 0xF4,
/**
- * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data
+ * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data or &struct iwl_rx_no_data_ver_3
*/
RX_NO_DATA_NOTIF = 0xF5,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 74a01888715b..1c4e84932058 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -273,7 +273,7 @@ enum iwl_rx_mpdu_mac_info {
};
/* TSF overload low dword */
-enum iwl_rx_phy_data0 {
+enum iwl_rx_phy_he_data0 {
/* info type: HE any */
IWL_RX_PHY_DATA0_HE_BEAM_CHNG = 0x00000001,
IWL_RX_PHY_DATA0_HE_UPLINK = 0x00000002,
@@ -289,6 +289,25 @@ enum iwl_rx_phy_data0 {
IWL_RX_PHY_DATA0_HE_DELIM_EOF = 0x80000000,
};
+/* TSF overload low dword */
+enum iwl_rx_phy_eht_data0 {
+ /* info type: EHT any */
+ /* 1 bits reserved */
+ IWL_RX_PHY_DATA0_EHT_UPLINK = BIT(1),
+ IWL_RX_PHY_DATA0_EHT_BSS_COLOR_MASK = 0x000000fc,
+ IWL_RX_PHY_DATA0_ETH_SPATIAL_REUSE_MASK = 0x00000f00,
+ IWL_RX_PHY_DATA0_EHT_PS160 = BIT(12),
+ IWL_RX_PHY_DATA0_EHT_TXOP_DUR_MASK = 0x000fe000,
+ IWL_RX_PHY_DATA0_EHT_LDPC_EXT_SYM = BIT(20),
+ IWL_RX_PHY_DATA0_EHT_PRE_FEC_PAD_MASK = 0x00600000,
+ IWL_RX_PHY_DATA0_EHT_PE_DISAMBIG = BIT(23),
+ IWL_RX_PHY_DATA0_EHT_BW320_SLOT = BIT(24),
+ IWL_RX_PHY_DATA0_EHT_SIGA_CRC_OK = BIT(25),
+ IWL_RX_PHY_DATA0_EHT_PHY_VER = 0x1c000000,
+ /* 2 bits reserved */
+ IWL_RX_PHY_DATA0_EHT_DELIM_EOF = BIT(31),
+};
+
enum iwl_rx_phy_info_type {
IWL_RX_PHY_INFO_TYPE_NONE = 0,
IWL_RX_PHY_INFO_TYPE_CCK = 1,
@@ -301,19 +320,26 @@ enum iwl_rx_phy_info_type {
IWL_RX_PHY_INFO_TYPE_HE_TB = 8,
IWL_RX_PHY_INFO_TYPE_HE_MU_EXT = 9,
IWL_RX_PHY_INFO_TYPE_HE_TB_EXT = 10,
+ IWL_RX_PHY_INFO_TYPE_EHT_MU = 11,
+ IWL_RX_PHY_INFO_TYPE_EHT_TB = 12,
+ IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT = 13,
+ IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT = 14,
};
/* TSF overload high dword */
-enum iwl_rx_phy_data1 {
+enum iwl_rx_phy_common_data1 {
/*
* check this first - if TSF overload is set,
* see &enum iwl_rx_phy_info_type
*/
IWL_RX_PHY_DATA1_INFO_TYPE_MASK = 0xf0000000,
- /* info type: HT/VHT/HE any */
+ /* info type: HT/VHT/HE/EHT any */
IWL_RX_PHY_DATA1_LSIG_LEN_MASK = 0x0fff0000,
+};
+/* TSF overload high dword For HE rates*/
+enum iwl_rx_phy_he_data1 {
/* info type: HE MU/MU-EXT */
IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION = 0x00000001,
IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x0000001e,
@@ -329,8 +355,23 @@ enum iwl_rx_phy_data1 {
IWL_RX_PHY_DATA1_HE_TB_LOW_SS_MASK = 0x0000000e,
};
+/* TSF overload high dword For EHT-MU/TB rates*/
+enum iwl_rx_phy_eht_data1 {
+ /* info type: EHT-MU */
+ IWL_RX_PHY_DATA1_EHT_MU_NUM_SIG_SYM_USIGA2 = 0x0000001f,
+ /* info type: EHT-TB */
+ IWL_RX_PHY_DATA1_EHT_TB_PILOT_TYPE = BIT(0),
+ IWL_RX_PHY_DATA1_EHT_TB_LOW_SS = 0x0000001e,
+
+ /* info type: EHT any */
+ /* number of EHT-LTF symbols 0 - 1 EHT-LTF, 1 - 2 EHT-LTFs, 2 - 4 EHT-LTFs,
+ * 3 - 6 EHT-LTFs, 4 - 8 EHT-LTFs */
+ IWL_RX_PHY_DATA1_EHT_SIG_LTF_NUM = 0x000000e0,
+ IWL_RX_PHY_DATA1_EHT_RU_ALLOC = 0x0000ff00,
+};
+
/* goes into Metadata DW 7 */
-enum iwl_rx_phy_data2 {
+enum iwl_rx_phy_he_data2 {
/* info type: HE MU-EXT */
/* the a1/a2/... is what the PHY/firmware calls the values */
IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0 = 0x000000ff, /* a1 */
@@ -346,7 +387,7 @@ enum iwl_rx_phy_data2 {
};
/* goes into Metadata DW 8 */
-enum iwl_rx_phy_data3 {
+enum iwl_rx_phy_he_data3 {
/* info type: HE MU-EXT */
IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */
IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3 = 0x0000ff00, /* c2 */
@@ -355,7 +396,7 @@ enum iwl_rx_phy_data3 {
};
/* goes into Metadata DW 4 high 16 bits */
-enum iwl_rx_phy_data4 {
+enum iwl_rx_phy_he_he_data4 {
/* info type: HE MU-EXT */
IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU = 0x0001,
IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU = 0x0002,
@@ -366,6 +407,51 @@ enum iwl_rx_phy_data4 {
IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600,
};
+/* goes into Metadata DW 7 */
+enum iwl_rx_phy_eht_data2 {
+ /* info type: EHT-MU-EXT */
+ /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_0_OUT */
+ IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A1 = 0x000001ff,
+ IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A2 = 0x0003fe00,
+ IWL_RX_PHY_DATA2_EHT_MU_EXT_RU_ALLOC_A3 = 0x01fc0000,
+
+ /* info type: EHT-TB-EXT */
+ IWL_RX_PHY_DATA2_EHT_TB_EXT_TRIG_SIGA1 = 0xffffffff,
+};
+
+/* goes into Metadata DW 8 */
+enum iwl_rx_phy_eht_data3 {
+ /* info type: EHT-MU-EXT */
+ /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_1_OUT */
+ IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_B1 = 0x000001ff,
+ IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_B2 = 0x0003fe00,
+ IWL_RX_PHY_DATA3_EHT_MU_EXT_RU_ALLOC_B3 = 0x01fc0000,
+};
+
+/* goes into Metadata DW 4 */
+enum iwl_rx_phy_eht_data4 {
+ /* info type: EHT-MU-EXT */
+ /* OFDM_RX_VECTOR_COMMON_RU_ALLOC_2_OUT */
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_C1 = 0x000001ff,
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_C2 = 0x0003fe00,
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_RU_ALLOC_C3 = 0x01fc0000,
+ IWL_RX_PHY_DATA4_EHT_MU_EXT_SIGB_MCS = 0x18000000,
+};
+
+/* goes into Metadata DW 16 */
+enum iwl_rx_phy_data5 {
+ /* info type: EHT any */
+ IWL_RX_PHY_DATA5_EHT_TYPE_AND_COMP = 0x00000003,
+ /* info type: EHT-TB */
+ IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE1 = 0x0000003c,
+ IWL_RX_PHY_DATA5_EHT_TB_SPATIAL_REUSE2 = 0x000003c0,
+ /* info type: EHT-MU */
+ IWL_RX_PHY_DATA5_EHT_MU_PUNC_CH_CODE = 0x0000007c,
+ IWL_RX_PHY_DATA5_EHT_MU_STA_ID_USR = 0x0003ff80,
+ IWL_RX_PHY_DATA5_EHT_MU_NUM_USR_NON_OFDMA = 0x001c0000,
+ IWL_RX_PHY_DATA5_EHT_MU_SPATIAL_CONF_USR_FIELD = 0x0fe00000,
+};
+
/**
* struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
*/
@@ -440,7 +526,9 @@ struct iwl_rx_mpdu_desc_v1 {
/**
* @phy_data1: valid only if
* %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
- * see &enum iwl_rx_phy_data1.
+ * see &enum iwl_rx_phy_common_data1 or
+ * &enum iwl_rx_phy_he_data1 or
+ * &enum iwl_rx_phy_eht_data1.
*/
__le32 phy_data1;
};
@@ -540,11 +628,18 @@ struct iwl_rx_mpdu_desc_v3 {
__le32 phy_data1;
};
};
- /* DW16 & DW17 */
+ /* DW16 */
+ /**
+ * @phy_data5: valid only if
+ * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set,
+ * see &enum iwl_rx_phy_data5.
+ */
+ __le32 phy_data5;
+ /* DW17 */
/**
* @reserved: reserved
*/
- __le32 reserved[2];
+ __le32 reserved[1];
} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
RX_MPDU_RES_START_API_S_VER_5 */
@@ -639,12 +734,14 @@ struct iwl_rx_mpdu_desc {
#define RX_NO_DATA_INFO_ERR_UNSUPPORTED_RATE 2
#define RX_NO_DATA_INFO_ERR_NO_DELIM 3
#define RX_NO_DATA_INFO_ERR_BAD_MAC_HDR 4
+#define RX_NO_DATA_INFO_LOW_ENERGY 5
#define RX_NO_DATA_FRAME_TIME_POS 0
#define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS)
#define RX_NO_DATA_RX_VEC0_HE_NSTS_MSK 0x03800000
#define RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK 0x38000000
+#define RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK 0x00f00000
/**
* struct iwl_rx_no_data - RX no data descriptor
@@ -654,7 +751,8 @@ struct iwl_rx_mpdu_desc {
* @on_air_rise_time: GP2 during on air rise
* @fr_time: frame time
* @rate: rate/mcs of frame
- * @phy_info: &enum iwl_rx_phy_data0 and &enum iwl_rx_phy_info_type
+ * @phy_info: &enum iwl_rx_phy_he_data0 or &enum iwl_rx_phy_eht_data0
+ * based on &enum iwl_rx_phy_info_type
* @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type.
* for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT
* for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT
@@ -670,6 +768,33 @@ struct iwl_rx_no_data {
} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1,
RX_NO_DATA_NTFY_API_S_VER_2 */
+/**
+ * struct iwl_rx_no_data_ver_3 - RX no data descriptor
+ * @info: 7:0 frame type, 15:8 RX error type
+ * @rssi: 7:0 energy chain-A,
+ * 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel
+ * @on_air_rise_time: GP2 during on air rise
+ * @fr_time: frame time
+ * @rate: rate/mcs of frame
+ * @phy_info: &enum iwl_rx_phy_eht_data0 and &enum iwl_rx_phy_info_type
+ * @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type.
+ * for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT
+ * for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT
+ * for EHT: OFDM_RX_VECTOR_USIG_A1_OUT, OFDM_RX_VECTOR_USIG_A2_OUT,
+ * OFDM_RX_VECTOR_EHT_OUT, OFDM_RX_VECTOR_EHT_USER_FIELD_OUT
+ */
+struct iwl_rx_no_data_ver_3 {
+ __le32 info;
+ __le32 rssi;
+ __le32 on_air_rise_time;
+ __le32 fr_time;
+ __le32 rate;
+ __le32 phy_info[2];
+ __le32 rx_vec[4];
+} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1,
+ RX_NO_DATA_NTFY_API_S_VER_2
+ RX_NO_DATA_NTFY_API_S_VER_3 */
+
struct iwl_frame_release {
u8 baid;
u8 reserved;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index 6d408cd0f517..0b6f694cf30d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2021-2022 Intel Corporation
*/
#include "iwl-drv.h"
@@ -246,6 +246,63 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
return data;
}
+static int iwl_uefi_step_parse(struct uefi_cnv_common_step_data *common_step_data,
+ struct iwl_trans *trans)
+{
+ if (common_step_data->revision != 1)
+ return -EINVAL;
+
+ trans->mbx_addr_0_step = (u32)common_step_data->revision |
+ (u32)common_step_data->cnvi_eq_channel << 8 |
+ (u32)common_step_data->cnvr_eq_channel << 16 |
+ (u32)common_step_data->radio1 << 24;
+ trans->mbx_addr_1_step = (u32)common_step_data->radio2;
+ return 0;
+}
+
+void iwl_uefi_get_step_table(struct iwl_trans *trans)
+{
+ struct uefi_cnv_common_step_data *data;
+ unsigned long package_size;
+ efi_status_t status;
+ int ret;
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return;
+
+ if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+ return;
+
+ /* TODO: we hardcode a maximum length here, because reading
+ * from the UEFI is not working. To implement this properly,
+ * we have to call efivar_entry_size().
+ */
+ package_size = IWL_HARDCODED_STEP_SIZE;
+
+ data = kmalloc(package_size, GFP_KERNEL);
+ if (!data)
+ return;
+
+ status = efi.get_variable(IWL_UEFI_STEP_NAME, &IWL_EFI_VAR_GUID,
+ NULL, &package_size, data);
+ if (status != EFI_SUCCESS) {
+ IWL_DEBUG_FW(trans,
+ "STEP UEFI variable not found 0x%lx\n", status);
+ goto out_free;
+ }
+
+ IWL_DEBUG_FW(trans, "Read STEP from UEFI with size %lu\n",
+ package_size);
+
+ ret = iwl_uefi_step_parse(data, trans);
+ if (ret < 0)
+ IWL_DEBUG_FW(trans, "Cannot read STEP tables. rev is invalid\n");
+
+out_free:
+ kfree(data);
+}
+IWL_EXPORT_SYMBOL(iwl_uefi_get_step_table);
+
#ifdef CONFIG_ACPI
static int iwl_uefi_sgom_parse(struct uefi_cnv_wlan_sgom_data *sgom_data,
struct iwl_fw_runtime *fwrt)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index 09d2a971b3a0..17089bc74cf9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2021 Intel Corporation
+ * Copyright(c) 2021-2022 Intel Corporation
*/
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
@@ -8,6 +8,7 @@
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
#define IWL_UEFI_SGOM_NAME L"UefiCnvWlanSarGeoOffsetMapping"
+#define IWL_UEFI_STEP_NAME L"UefiCnvCommonSTEP"
/*
* TODO: we have these hardcoded values that the caller must pass,
@@ -18,6 +19,7 @@
#define IWL_HARDCODED_PNVM_SIZE 4096
#define IWL_HARDCODED_REDUCE_POWER_SIZE 32768
#define IWL_HARDCODED_SGOM_SIZE 339
+#define IWL_HARDCODED_STEP_SIZE 6
struct pnvm_sku_package {
u8 rev;
@@ -32,6 +34,15 @@ struct uefi_cnv_wlan_sgom_data {
u8 offset_map[IWL_HARDCODED_SGOM_SIZE - 1];
} __packed;
+struct uefi_cnv_common_step_data {
+ u8 revision;
+ u8 step_mode;
+ u8 cnvi_eq_channel;
+ u8 cnvr_eq_channel;
+ u8 radio1;
+ u8 radio2;
+} __packed;
+
/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
@@ -40,6 +51,7 @@ struct uefi_cnv_wlan_sgom_data {
#ifdef CONFIG_EFI
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len);
void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len);
+void iwl_uefi_get_step_table(struct iwl_trans *trans);
#else /* CONFIG_EFI */
static inline
void *iwl_uefi_get_pnvm(struct iwl_trans *trans, size_t *len)
@@ -52,6 +64,11 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
{
return ERR_PTR(-EOPNOTSUPP);
}
+
+static inline
+void iwl_uefi_get_step_table(struct iwl_trans *trans)
+{
+}
#endif /* CONFIG_EFI */
#if defined(CONFIG_EFI) && defined(CONFIG_ACPI)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index b84884034c74..3f7278014009 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -141,12 +141,27 @@ struct iwl_prph_scratch_uefi_cfg {
} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
/*
+ * struct iwl_prph_scratch_step_cfg - prph scratch step configuration
+ * @mbx_addr_0: [0:7] revision,
+ * [8:15] cnvi_to_cnvr length,
+ * [16:23] cnvr_to_cnvi channel length,
+ * [24:31] radio1 reserved
+ * @mbx_addr_1: [0:7] radio2 reserved
+ */
+
+struct iwl_prph_scratch_step_cfg {
+ __le32 mbx_addr_0;
+ __le32 mbx_addr_1;
+} __packed;
+
+/*
* struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
* @version: version information of context info and HW
* @control: control flags of FH configurations
* @pnvm_cfg: ror configuration
* @hwm_cfg: hwm configuration
* @rbd_cfg: default RX queue configuration
+ * @step_cfg: step configuration
*/
struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_version version;
@@ -155,6 +170,7 @@ struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_hwm_cfg hwm_cfg;
struct iwl_prph_scratch_rbd_cfg rbd_cfg;
struct iwl_prph_scratch_uefi_cfg reduce_power_cfg;
+ struct iwl_prph_scratch_step_cfg step_cfg;
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
/*
@@ -165,7 +181,7 @@ struct iwl_prph_scratch_ctrl_cfg {
*/
struct iwl_prph_scratch {
struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
- __le32 reserved[12];
+ __le32 reserved[10];
struct iwl_context_info_dram dram;
} __packed; /* PERIPH_SCRATCH_S */
@@ -265,5 +281,6 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
const void *data, u32 len);
int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
const void *data, u32 len);
-
+int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans,
+ u32 mbx_addr_0_step, u32 mbx_addr_1_step);
#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index ab7065c93826..4c977ba9cd85 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -163,7 +163,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw,
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
const struct iwl_cfg *cfg = drv->trans->cfg;
- char tag[8];
if (drv->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
(drv->trans->hw_rev_step != SILICON_B_STEP &&
@@ -174,13 +173,10 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -EINVAL;
}
- if (first) {
+ if (first)
drv->fw_index = cfg->ucode_api_max;
- sprintf(tag, "%d", drv->fw_index);
- } else {
+ else
drv->fw_index--;
- sprintf(tag, "%d", drv->fw_index);
- }
if (drv->fw_index < cfg->ucode_api_min) {
IWL_ERR(drv, "no suitable firmware found!\n");
@@ -200,8 +196,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -ENOENT;
}
- snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
- cfg->fw_name_pre, tag);
+ snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%d.ucode",
+ cfg->fw_name_pre, drv->fw_index);
IWL_DEBUG_FW_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 479a518c89a1..9aced3e44bc2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -1001,6 +1001,8 @@ struct iwl_trans_txqs {
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
* @iwl_trans_txqs: transport tx queues data.
+ * @mbx_addr_0_step: step address data 0
+ * @mbx_addr_1_step: step address data 1
*/
struct iwl_trans {
bool csme_own;
@@ -1057,6 +1059,8 @@ struct iwl_trans {
const char *name;
struct iwl_trans_txqs txqs;
+ u32 mbx_addr_0_step;
+ u32 mbx_addr_1_step;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index f9d11935ed97..67dfb77fedf7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -788,7 +788,7 @@ static void iwl_mei_handle_amt_state(struct mei_cl_device *cldev,
if (mei->amt_enabled)
iwl_mei_set_init_conf(mei);
else if (iwl_mei_cache.ops)
- iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false, false);
+ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, false);
schedule_work(&mei->netdev_work);
@@ -829,7 +829,7 @@ static void iwl_mei_handle_csme_taking_ownership(struct mei_cl_device *cldev,
*/
mei->csme_taking_ownership = true;
- iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true, true);
+ iwl_mei_cache.ops->rfkill(iwl_mei_cache.priv, true);
} else {
iwl_mei_send_sap_msg(cldev,
SAP_MSG_NOTIF_CSME_OWNERSHIP_CONFIRMED);
@@ -1774,7 +1774,7 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
if (mei->amt_enabled)
iwl_mei_send_sap_msg(mei->cldev,
SAP_MSG_NOTIF_WIFIDR_UP);
- ops->rfkill(priv, mei->link_prot_state, false);
+ ops->rfkill(priv, mei->link_prot_state);
}
}
ret = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 78d8b37eb71a..3779ac040ba0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -438,13 +438,6 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
-static inline char *iwl_dbgfs_is_match(char *name, char *buf)
-{
- int len = strlen(name);
-
- return !strncmp(name, buf, len) ? buf + len : NULL;
-}
-
static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 1ce9450e5add..85b99316d029 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -324,12 +324,12 @@ static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos,
"Use geographic profile %d\n", tbl_idx);
pos += scnprintf(buf + pos, bufsz - pos,
- "2.4GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n",
+ "2.4GHz:\n\tChain A offset: %u dBm\n\tChain B offset: %u dBm\n\tmax tx power: %u dBm\n",
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[0],
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].chains[1],
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[0].max);
pos += scnprintf(buf + pos, bufsz - pos,
- "5.2GHz:\n\tChain A offset: %hhu dBm\n\tChain B offset: %hhu dBm\n\tmax tx power: %hhu dBm\n",
+ "5.2GHz:\n\tChain A offset: %u dBm\n\tChain B offset: %u dBm\n\tmax tx power: %u dBm\n",
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[0],
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].chains[1],
mvm->fwrt.geo_profiles[tbl_idx - 1].bands[1].max);
@@ -1069,7 +1069,7 @@ iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "A");
if (mvm->scan_rx_ant & ANT_B)
pos += scnprintf(buf + pos, bufsz - pos, "B");
- pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
+ pos += scnprintf(buf + pos, bufsz - pos, " (%x)\n", mvm->scan_rx_ant);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 6eee3d0b2157..05f3136b1c43 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1206,7 +1206,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
}
IWL_DEBUG_INFO(mvm, "Range response received\n");
- IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %hhu\n",
+ IWL_DEBUG_INFO(mvm, "request id: %lld, num of entries: %u\n",
mvm->ftm_initiator.req->cookie, num_of_aps);
for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
@@ -1298,7 +1298,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
- IWL_DEBUG_INFO(mvm, "RTT confidence: %hhu\n",
+ IWL_DEBUG_INFO(mvm, "RTT confidence: %u\n",
fw_ap->rttConfidence);
iwl_mvm_debug_range_resp(mvm, i, &result);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 5273ade71117..565522466eba 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -5116,6 +5116,9 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
case RATE_MCS_CHAN_WIDTH_160:
rinfo->bw = RATE_INFO_BW_160;
break;
+ case RATE_MCS_CHAN_WIDTH_320:
+ rinfo->bw = RATE_INFO_BW_320;
+ break;
}
if (format == RATE_MCS_CCK_MSK ||
@@ -5176,6 +5179,10 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
switch (format) {
+ case RATE_MCS_EHT_MSK:
+ /* TODO: GI/LTF/RU. How does the firmware encode them? */
+ rinfo->flags |= RATE_INFO_FLAGS_EHT_MCS;
+ break;
case RATE_MCS_HE_MSK:
gi_ltf = u32_get_bits(rate_n_flags, RATE_MCS_HE_GI_LTF_MSK);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index ce6b701f3f4c..90bc95d96a78 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -501,7 +501,7 @@ struct iwl_mvm_tt_mgmt {
* @tzone: thermal zone device data
*/
struct iwl_mvm_thermal_device {
- s16 temp_trips[IWL_MAX_DTS_TRIPS];
+ struct thermal_trip trips[IWL_MAX_DTS_TRIPS];
u8 fw_trips_index[IWL_MAX_DTS_TRIPS];
struct thermal_zone_device *tzone;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index ebe6d9c4ccaf..f4e9446d9dc2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1128,6 +1128,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_get_acpi_tables(mvm);
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
+ iwl_uefi_get_step_table(trans);
mvm->init_status = 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 97b67270f384..549dbe0be223 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -10,37 +10,11 @@
#include "mvm.h"
#include "fw-api.h"
-static void *iwl_mvm_skb_get_hdr(struct sk_buff *skb)
-{
- struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- u8 *data = skb->data;
-
- /* Alignment concerns */
- BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he) % 4);
- BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_he_mu) % 4);
- BUILD_BUG_ON(sizeof(struct ieee80211_radiotap_lsig) % 4);
- BUILD_BUG_ON(sizeof(struct ieee80211_vendor_radiotap) % 4);
-
- if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
- data += sizeof(struct ieee80211_radiotap_he);
- if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
- data += sizeof(struct ieee80211_radiotap_he_mu);
- if (rx_status->flag & RX_FLAG_RADIOTAP_LSIG)
- data += sizeof(struct ieee80211_radiotap_lsig);
- if (rx_status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
- struct ieee80211_vendor_radiotap *radiotap = (void *)data;
-
- data += sizeof(*radiotap) + radiotap->len + radiotap->pad;
- }
-
- return data;
-}
-
static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
int queue, struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta;
- struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb);
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
struct iwl_mvm_key_pn *ptk_pn;
int res;
@@ -179,6 +153,10 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
if (unlikely(headlen < hdrlen))
return -EINVAL;
+ /* Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would be put
+ */
+ skb_set_mac_header(skb, skb->len);
skb_put_data(skb, hdr, hdrlen);
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
@@ -936,7 +914,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc)
{
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_hdr *hdr = iwl_mvm_skb_get_hdr(skb);
+ struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
struct iwl_mvm_sta *mvm_sta;
struct iwl_mvm_baid_data *baid_data;
struct iwl_mvm_reorder_buffer *buffer;
@@ -1346,6 +1324,10 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm,
case IWL_RX_PHY_INFO_TYPE_HT:
case IWL_RX_PHY_INFO_TYPE_VHT_SU:
case IWL_RX_PHY_INFO_TYPE_VHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB:
+ case IWL_RX_PHY_INFO_TYPE_EHT_MU_EXT:
+ case IWL_RX_PHY_INFO_TYPE_EHT_TB_EXT:
return;
case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT:
he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN |
@@ -1690,6 +1672,9 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
case RATE_MCS_CHAN_WIDTH_160:
rx_status->bw = RATE_INFO_BW_160;
break;
+ case RATE_MCS_CHAN_WIDTH_320:
+ rx_status->bw = RATE_INFO_BW_320;
+ break;
}
/* must be before L-SIG data */
@@ -1726,6 +1711,9 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
rx_status->he_dcm =
!!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
break;
+ case RATE_MCS_EHT_MSK:
+ rx_status->encoding = RX_ENC_EHT;
+ break;
}
switch (format) {
@@ -1736,6 +1724,7 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
break;
case RATE_MCS_VHT_MSK:
case RATE_MCS_HE_MSK:
+ case RATE_MCS_EHT_MSK:
rx_status->nss =
u32_get_bits(rate_n_flags, RATE_MCS_NSS_MSK) + 1;
rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
@@ -1747,10 +1736,11 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
rx_status->rate_idx = rate;
- if ((rate < 0 || rate > 0xFF) && net_ratelimit()) {
- IWL_ERR(mvm, "Invalid rate flags 0x%x, band %d,\n",
- rate_n_flags, rx_status->band);
+ if ((rate < 0 || rate > 0xFF)) {
rx_status->rate_idx = 0;
+ if (net_ratelimit())
+ IWL_ERR(mvm, "Invalid rate flags 0x%x, band %d,\n",
+ rate_n_flags, rx_status->band);
}
break;
@@ -2065,7 +2055,7 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
{
struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_rx_no_data *desc = (void *)pkt->data;
+ struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data;
u32 rssi;
u32 info_type;
struct ieee80211_sta *sta = NULL;
@@ -2101,6 +2091,18 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
format = phy_data.rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ RX_NO_DATA_NOTIF, 0) >= 3) {
+ if (unlikely(iwl_rx_packet_payload_len(pkt) <
+ sizeof(struct iwl_rx_no_data_ver_3)))
+ /* invalid len for ver 3 */
+ return;
+ } else {
+ if (format == RATE_MCS_EHT_MSK)
+ /* no support for EHT before version 3 API */
+ return;
+ }
+
/* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled.
*/
@@ -2136,6 +2138,16 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_rx_fill_status(mvm, skb, &phy_data, queue);
+ /* no more radio tap info should be put after this point.
+ *
+ * We mark it as mac header, for upper layers to know where
+ * all radio tap header ends.
+ *
+ * Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would be put
+ */
+ skb_set_mac_header(skb, skb->len);
+
/*
* Override the nss from the rx_vec since the rate_n_flags has
* only 2 bits for the nss which gives a max of 4 ss but there
@@ -2152,6 +2164,10 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
break;
+ case RATE_MCS_EHT_MSK:
+ rx_status->nss =
+ le32_get_bits(desc->rx_vec[2],
+ RX_NO_DATA_RX_VEC2_EHT_NSTS_MSK) + 1;
}
rcu_read_lock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 69cf3a372759..232c200af38f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -573,11 +573,11 @@ int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
* and uncompressed, the FW should get it compressed and sorted
*/
- /* compress temp_trips to cmd array, remove uninitialized values*/
+ /* compress trips to cmd array, remove uninitialized values*/
for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
- if (mvm->tz_device.temp_trips[i] != S16_MIN) {
+ if (mvm->tz_device.trips[i].temperature != INT_MIN) {
cmd.thresholds[idx++] =
- cpu_to_le16(mvm->tz_device.temp_trips[i]);
+ cpu_to_le16((s16)(mvm->tz_device.trips[i].temperature / 1000));
}
}
cmd.num_temps = cpu_to_le32(idx);
@@ -593,8 +593,8 @@ int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
*/
for (i = 0; i < idx; i++) {
for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) {
- if (le16_to_cpu(cmd.thresholds[i]) ==
- mvm->tz_device.temp_trips[j])
+ if ((int)(le16_to_cpu(cmd.thresholds[i]) * 1000) ==
+ mvm->tz_device.trips[j].temperature)
mvm->tz_device.fw_trips_index[i] = j;
}
}
@@ -638,37 +638,12 @@ out:
return ret;
}
-static int iwl_mvm_tzone_get_trip_temp(struct thermal_zone_device *device,
- int trip, int *temp)
-{
- struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
-
- if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
- return -EINVAL;
-
- *temp = mvm->tz_device.temp_trips[trip] * 1000;
-
- return 0;
-}
-
-static int iwl_mvm_tzone_get_trip_type(struct thermal_zone_device *device,
- int trip, enum thermal_trip_type *type)
-{
- if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
- return -EINVAL;
-
- *type = THERMAL_TRIP_PASSIVE;
-
- return 0;
-}
-
static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
int trip, int temp)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
struct iwl_mvm_thermal_device *tzone;
- int i, ret;
- s16 temperature;
+ int ret;
mutex_lock(&mvm->mutex);
@@ -678,40 +653,17 @@ static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
goto out;
}
- if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) {
- ret = -EINVAL;
- goto out;
- }
-
if ((temp / 1000) > S16_MAX) {
ret = -EINVAL;
goto out;
}
- temperature = (s16)(temp / 1000);
tzone = &mvm->tz_device;
-
if (!tzone) {
ret = -EIO;
goto out;
}
- /* no updates*/
- if (tzone->temp_trips[trip] == temperature) {
- ret = 0;
- goto out;
- }
-
- /* already existing temperature */
- for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
- if (tzone->temp_trips[i] == temperature) {
- ret = -EINVAL;
- goto out;
- }
- }
-
- tzone->temp_trips[trip] = temperature;
-
ret = iwl_mvm_send_temp_report_ths_cmd(mvm);
out:
mutex_unlock(&mvm->mutex);
@@ -720,8 +672,6 @@ out:
static struct thermal_zone_device_ops tzone_ops = {
.get_temp = iwl_mvm_tzone_get_temp,
- .get_trip_temp = iwl_mvm_tzone_get_trip_temp,
- .get_trip_type = iwl_mvm_tzone_get_trip_type,
.set_trip_temp = iwl_mvm_tzone_set_trip_temp,
};
@@ -743,7 +693,8 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
sprintf(name, "iwlwifi_%u", atomic_inc_return(&counter) & 0xFF);
- mvm->tz_device.tzone = thermal_zone_device_register(name,
+ mvm->tz_device.tzone = thermal_zone_device_register_with_trips(name,
+ mvm->tz_device.trips,
IWL_MAX_DTS_TRIPS,
IWL_WRITABLE_TRIPS_MSK,
mvm, &tzone_ops,
@@ -766,8 +717,10 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
/* 0 is a valid temperature,
* so initialize the array with S16_MIN which invalid temperature
*/
- for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++)
- mvm->tz_device.temp_trips[i] = S16_MIN;
+ for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) {
+ mvm->tz_device.trips[i].temperature = INT_MIN;
+ mvm->tz_device.trips[i].type = THERMAL_TRIP_PASSIVE;
+ }
}
static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index fadaa683a416..9813d7fa1800 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1107,8 +1107,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
spin_lock(&mvmsta->lock);
/* nullfunc frames should go to the MGMT queue regardless of QOS,
- * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
- * assignment of MGMT TID
+ * the conditions of !ieee80211_is_qos_nullfunc(fc) and
+ * !ieee80211_is_data_qos(fc) keep the default assignment of MGMT TID
*/
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
tid = ieee80211_get_tid(hdr);
@@ -1133,7 +1133,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
/* update the tx_cmd hdr as it was already copied */
tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
}
- } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc)) {
+ } else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc) &&
+ !ieee80211_is_nullfunc(fc)) {
tid = IWL_TID_NON_QOS;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 75fd386b048e..cb60ba40fe97 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -136,6 +136,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
&control_flags);
prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+ /* initialize the Step equalizer data */
+ prph_sc_ctrl->step_cfg.mbx_addr_0 = cpu_to_le32(trans->mbx_addr_0_step);
+ prph_sc_ctrl->step_cfg.mbx_addr_1 = cpu_to_le32(trans->mbx_addr_1_step);
+
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
if (ret)
@@ -343,3 +347,4 @@ int iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
return 0;
}
+
diff --git a/drivers/net/wireless/intersil/orinoco/hermes.c b/drivers/net/wireless/intersil/orinoco/hermes.c
index 256946552742..4888286727ff 100644
--- a/drivers/net/wireless/intersil/orinoco/hermes.c
+++ b/drivers/net/wireless/intersil/orinoco/hermes.c
@@ -38,6 +38,7 @@
* under either the MPL or the GPL.
*/
+#include <linux/net.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c
index 0aea35c9c11c..4fcca08e50de 100644
--- a/drivers/net/wireless/intersil/orinoco/hw.c
+++ b/drivers/net/wireless/intersil/orinoco/hw.c
@@ -931,6 +931,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv)
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFAUTHENTICATION_AGERE,
auth_flag);
+ if (err)
+ return err;
}
err = hermes_write_wordrec(hw, USER_BAP,
HERMES_RID_CNFWEPENABLED_AGERE,
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c57c8903b7c0..4cc4eaf80b14 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2034,7 +2034,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct ieee80211_channel *chan)
{
struct mac80211_hwsim_data *data = hw->priv;
- u32 _pid = READ_ONCE(data->wmediumd);
+ u32 _portid = READ_ONCE(data->wmediumd);
if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) {
struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb);
@@ -2045,8 +2045,8 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
mac80211_hwsim_monitor_rx(hw, skb, chan);
- if (_pid || hwsim_virtio_enabled)
- return mac80211_hwsim_tx_frame_nl(hw, skb, _pid, chan);
+ if (_portid || hwsim_virtio_enabled)
+ return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, chan);
data->tx_pkts++;
data->tx_bytes += skb->len;
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 3e065cbb0af9..b700c213d10c 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -416,10 +416,20 @@ static int lbs_add_cf_param_tlv(u8 *tlv)
static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
{
- size_t tlv_len;
+ struct mrvl_ie_data *wpatlv = (struct mrvl_ie_data *)tlv;
+ const struct element *wpaie;
+
+ /* Find the first RSN or WPA IE to use */
+ wpaie = cfg80211_find_elem(WLAN_EID_RSN, ie, ie_len);
+ if (!wpaie)
+ wpaie = cfg80211_find_vendor_elem(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ie, ie_len);
+ if (!wpaie || wpaie->datalen > 128)
+ return 0;
/*
- * We need just convert an IE to an TLV. IEs use u8 for the header,
+ * Convert the found IE to a TLV. IEs use u8 for the header,
* u8 type
* u8 len
* u8[] data
@@ -428,14 +438,47 @@ static int lbs_add_wpa_tlv(u8 *tlv, const u8 *ie, u8 ie_len)
* __le16 len
* u8[] data
*/
- *tlv++ = *ie++;
- *tlv++ = 0;
- tlv_len = *tlv++ = *ie++;
- *tlv++ = 0;
- while (tlv_len--)
- *tlv++ = *ie++;
- /* the TLV is two bytes larger than the IE */
- return ie_len + 2;
+ wpatlv->header.type = cpu_to_le16(wpaie->id);
+ wpatlv->header.len = cpu_to_le16(wpaie->datalen);
+ memcpy(wpatlv->data, wpaie->data, wpaie->datalen);
+
+ /* Return the total number of bytes added to the TLV buffer */
+ return sizeof(struct mrvl_ie_header) + wpaie->datalen;
+}
+
+/* Add WPS enrollee TLV
+ */
+#define LBS_MAX_WPS_ENROLLEE_TLV_SIZE \
+ (sizeof(struct mrvl_ie_header) \
+ + 256)
+
+static int lbs_add_wps_enrollee_tlv(u8 *tlv, const u8 *ie, size_t ie_len)
+{
+ struct mrvl_ie_data *wpstlv = (struct mrvl_ie_data *)tlv;
+ const struct element *wpsie;
+
+ /* Look for a WPS IE and add it to the probe request */
+ wpsie = cfg80211_find_vendor_elem(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPS,
+ ie, ie_len);
+ if (!wpsie)
+ return 0;
+
+ /* Convert the WPS IE to a TLV. The IE looks like this:
+ * u8 type (WLAN_EID_VENDOR_SPECIFIC)
+ * u8 len
+ * u8[] data
+ * but the TLV will look like this instead:
+ * __le16 type (TLV_TYPE_WPS_ENROLLEE)
+ * __le16 len
+ * u8[] data
+ */
+ wpstlv->header.type = cpu_to_le16(TLV_TYPE_WPS_ENROLLEE);
+ wpstlv->header.len = cpu_to_le16(wpsie->datalen);
+ memcpy(wpstlv->data, wpsie->data, wpsie->datalen);
+
+ /* Return the total number of bytes added to the TLV buffer */
+ return sizeof(struct mrvl_ie_header) + wpsie->datalen;
}
/*
@@ -664,14 +707,15 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
/*
- * Our scan command contains a TLV, consting of a SSID TLV, a channel list
- * TLV and a rates TLV. Determine the maximum size of them:
+ * Our scan command contains a TLV, consisting of a SSID TLV, a channel list
+ * TLV, a rates TLV, and an optional WPS IE. Determine the maximum size of them:
*/
#define LBS_SCAN_MAX_CMD_SIZE \
(sizeof(struct cmd_ds_802_11_scan) \
+ LBS_MAX_SSID_TLV_SIZE \
+ LBS_MAX_CHANNEL_LIST_TLV_SIZE \
- + LBS_MAX_RATES_TLV_SIZE)
+ + LBS_MAX_RATES_TLV_SIZE \
+ + LBS_MAX_WPS_ENROLLEE_TLV_SIZE)
/*
* Assumes priv->scan_req is initialized and valid
@@ -720,6 +764,11 @@ static void lbs_scan_worker(struct work_struct *work)
/* add rates TLV */
tlv += lbs_add_supported_rates_tlv(tlv);
+ /* add optional WPS enrollee TLV */
+ if (priv->scan_req->ie && priv->scan_req->ie_len)
+ tlv += lbs_add_wps_enrollee_tlv(tlv, priv->scan_req->ie,
+ priv->scan_req->ie_len);
+
if (priv->scan_channel < priv->scan_req->n_channels) {
cancel_delayed_work(&priv->scan_work);
if (netif_running(priv->dev))
@@ -2106,6 +2155,7 @@ int lbs_cfg_register(struct lbs_private *priv)
int ret;
wdev->wiphy->max_scan_ssids = 1;
+ wdev->wiphy->max_scan_ie_len = 256;
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wdev->wiphy->interface_modes =
diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c
index cb515c5584c1..74cb7551f427 100644
--- a/drivers/net/wireless/marvell/libertas/cmdresp.c
+++ b/drivers/net/wireless/marvell/libertas/cmdresp.c
@@ -48,7 +48,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv,
/* Free Tx and Rx packets */
spin_lock_irqsave(&priv->driver_lock, flags);
- kfree_skb(priv->currenttxskb);
+ dev_kfree_skb_irq(priv->currenttxskb);
priv->currenttxskb = NULL;
priv->tx_pending_len = 0;
spin_unlock_irqrestore(&priv->driver_lock, flags);
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index 32fdc4150b60..2240b4db8c03 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -637,7 +637,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
priv->resp_len[i] = (recvlength - MESSAGE_HEADER_LEN);
memcpy(priv->resp_buf[i], recvbuff + MESSAGE_HEADER_LEN,
priv->resp_len[i]);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
lbs_notify_command_response(priv, i);
spin_unlock_irqrestore(&priv->driver_lock, flags);
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 8f5220cee112..78e8b5aecec0 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -216,7 +216,7 @@ int lbs_stop_iface(struct lbs_private *priv)
spin_lock_irqsave(&priv->driver_lock, flags);
priv->iface_running = false;
- kfree_skb(priv->currenttxskb);
+ dev_kfree_skb_irq(priv->currenttxskb);
priv->currenttxskb = NULL;
priv->tx_pending_len = 0;
spin_unlock_irqrestore(&priv->driver_lock, flags);
@@ -869,6 +869,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL);
if (ret) {
pr_err("Out of memory allocating event FIFO buffer\n");
+ lbs_free_cmd_buffer(priv);
goto out;
}
diff --git a/drivers/net/wireless/marvell/libertas/types.h b/drivers/net/wireless/marvell/libertas/types.h
index cd4ceb6f885d..bad38d312d0d 100644
--- a/drivers/net/wireless/marvell/libertas/types.h
+++ b/drivers/net/wireless/marvell/libertas/types.h
@@ -93,6 +93,7 @@ union ieee_phy_param_set {
#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19)
#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
#define TLV_TYPE_SNR_HIGH (PROPRIETARY_TLV_BASE_ID + 23)
+#define TLV_TYPE_WPS_ENROLLEE (PROPRIETARY_TLV_BASE_ID + 27)
#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
#define TLV_TYPE_MESH_ID (PROPRIETARY_TLV_BASE_ID + 37)
#define TLV_TYPE_OLD_MESH_ID (PROPRIETARY_TLV_BASE_ID + 291)
@@ -105,23 +106,23 @@ struct mrvl_ie_header {
struct mrvl_ie_data {
struct mrvl_ie_header header;
- u8 Data[1];
+ u8 data[];
} __packed;
struct mrvl_ie_rates_param_set {
struct mrvl_ie_header header;
- u8 rates[1];
+ u8 rates[];
} __packed;
struct mrvl_ie_ssid_param_set {
struct mrvl_ie_header header;
- u8 ssid[1];
+ u8 ssid[];
} __packed;
struct mrvl_ie_wildcard_ssid_param_set {
struct mrvl_ie_header header;
- u8 MaxSsidlength;
- u8 ssid[1];
+ u8 maxssidlength;
+ u8 ssid[];
} __packed;
struct chanscanmode {
@@ -146,7 +147,7 @@ struct chanscanparamset {
struct mrvl_ie_chanlist_param_set {
struct mrvl_ie_header header;
- struct chanscanparamset chanscanparam[1];
+ struct chanscanparamset chanscanparam[];
} __packed;
struct mrvl_ie_cf_param_set {
@@ -164,12 +165,12 @@ struct mrvl_ie_ds_param_set {
struct mrvl_ie_rsn_param_set {
struct mrvl_ie_header header;
- u8 rsnie[1];
+ u8 rsnie[];
} __packed;
struct mrvl_ie_tsf_timestamp {
struct mrvl_ie_header header;
- __le64 tsftable[1];
+ __le64 tsftable[];
} __packed;
/* v9 and later firmware only */
@@ -220,7 +221,7 @@ struct led_pin {
struct mrvl_ie_ledgpio {
struct mrvl_ie_header header;
- struct led_pin ledpin[1];
+ struct led_pin ledpin[];
} __packed;
struct led_bhv {
@@ -233,7 +234,7 @@ struct led_bhv {
struct mrvl_ie_ledbhv {
struct mrvl_ie_header header;
- struct led_bhv ledbhv[1];
+ struct led_bhv ledbhv[];
} __packed;
/*
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index 75b5319d033f..1750f5e93de2 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -613,7 +613,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
spin_lock_irqsave(&priv->driver_lock, flags);
memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN,
recvlength - MESSAGE_HEADER_LEN);
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
lbtf_cmd_response_rx(priv);
spin_unlock_irqrestore(&priv->driver_lock, flags);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c
index 6a9d7bc1f41e..b0c40a776a2e 100644
--- a/drivers/net/wireless/marvell/mwifiex/11h.c
+++ b/drivers/net/wireless/marvell/mwifiex/11h.c
@@ -292,6 +292,6 @@ void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work)
mwifiex_dbg(priv->adapter, MSG,
"indicating channel switch completion to kernel\n");
mutex_lock(&priv->wdev.mtx);
- cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0);
+ cfg80211_ch_switch_notify(priv->netdev, &priv->dfs_chandef, 0, 0);
mutex_unlock(&priv->wdev.mtx);
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 4af57e6d4393..90e401100898 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -878,7 +878,7 @@ mwifiex_send_delba_txbastream_tbl(struct mwifiex_private *priv, u8 tid)
*/
void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
{
- u8 i;
+ u8 i, j;
u32 tx_win_size;
struct mwifiex_private *priv;
@@ -909,8 +909,8 @@ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter)
if (tx_win_size != priv->add_ba_param.tx_win_size) {
if (!priv->media_connected)
continue;
- for (i = 0; i < MAX_NUM_TID; i++)
- mwifiex_send_delba_txbastream_tbl(priv, i);
+ for (j = 0; j < MAX_NUM_TID; j++)
+ mwifiex_send_delba_txbastream_tbl(priv, j);
}
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index a04b66284af4..391793a16adc 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -33,7 +33,7 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
- priv->wdev.iftype, 0, NULL, NULL);
+ priv->wdev.iftype, 0, NULL, NULL, false);
while (!skb_queue_empty(&list)) {
struct rx_packet_hdr *rx_hdr;
diff --git a/drivers/net/wireless/marvell/mwifiex/Kconfig b/drivers/net/wireless/marvell/mwifiex/Kconfig
index 2b4ff2b78a7e..b182f7155d66 100644
--- a/drivers/net/wireless/marvell/mwifiex/Kconfig
+++ b/drivers/net/wireless/marvell/mwifiex/Kconfig
@@ -10,13 +10,14 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997"
+ tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8978/SD8987/SD8997"
depends on MWIFIEX && MMC
select FW_LOADER
select WANT_DEV_COREDUMP
help
This adds support for wireless adapters based on Marvell
- 8786/8787/8797/8887/8897/8977/8987/8997 chipsets with SDIO interface.
+ 8786/8787/8797/8887/8897/8977/8978/8987/8997 chipsets with
+ SDIO interface. SD8978 is also known as NXP IW416.
If you choose to build it as a module, it will be called
mwifiex_sdio.
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index d3339d67e7a0..3756aa247e77 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -1607,6 +1607,11 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
api_rev->major_ver,
api_rev->minor_ver);
break;
+ case FW_HOTFIX_VER_ID:
+ mwifiex_dbg(adapter, INFO,
+ "Firmware hotfix version %d\n",
+ api_rev->major_ver);
+ break;
default:
mwifiex_dbg(adapter, FATAL,
"Unknown api_id: %d\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index b4f945a549f7..f2168fac95ed 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -41,7 +41,7 @@ struct mwifiex_fw_header {
struct mwifiex_fw_data {
struct mwifiex_fw_header header;
__le32 seq_num;
- u8 data[1];
+ u8 data[];
} __packed;
struct mwifiex_fw_dump_header {
@@ -641,7 +641,7 @@ struct mwifiex_ie_types_header {
struct mwifiex_ie_types_data {
struct mwifiex_ie_types_header header;
- u8 data[1];
+ u8 data[];
} __packed;
#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
@@ -794,12 +794,12 @@ struct mwifiex_ie_types_chan_band_list_param_set {
struct mwifiex_ie_types_rates_param_set {
struct mwifiex_ie_types_header header;
- u8 rates[1];
+ u8 rates[];
} __packed;
struct mwifiex_ie_types_ssid_param_set {
struct mwifiex_ie_types_header header;
- u8 ssid[1];
+ u8 ssid[];
} __packed;
struct mwifiex_ie_types_num_probes {
@@ -907,7 +907,7 @@ struct mwifiex_ie_types_tdls_idle_timeout {
struct mwifiex_ie_types_rsn_param_set {
struct mwifiex_ie_types_header header;
- u8 rsn_ie[1];
+ u8 rsn_ie[];
} __packed;
#define KEYPARAMSET_FIXED_LEN 6
@@ -1048,6 +1048,7 @@ enum API_VER_ID {
FW_API_VER_ID = 2,
UAP_FW_API_VER_ID = 3,
CHANRPT_API_VER_ID = 4,
+ FW_HOTFIX_VER_ID = 5,
};
struct hw_spec_api_rev {
@@ -1433,7 +1434,7 @@ struct mwifiex_tdls_stop_cs_params {
struct host_cmd_ds_tdls_config {
__le16 tdls_action;
- u8 tdls_data[1];
+ u8 tdls_data[];
} __packed;
struct mwifiex_chan_desc {
@@ -1574,13 +1575,13 @@ struct ie_body {
struct host_cmd_ds_802_11_scan {
u8 bss_mode;
u8 bssid[ETH_ALEN];
- u8 tlv_buffer[1];
+ u8 tlv_buffer[];
} __packed;
struct host_cmd_ds_802_11_scan_rsp {
__le16 bss_descript_size;
u8 number_of_sets;
- u8 bss_desc_and_tlv_buffer[1];
+ u8 bss_desc_and_tlv_buffer[];
} __packed;
struct host_cmd_ds_802_11_scan_ext {
@@ -1596,7 +1597,7 @@ struct mwifiex_ie_types_bss_mode {
struct mwifiex_ie_types_bss_scan_rsp {
struct mwifiex_ie_types_header header;
u8 bssid[ETH_ALEN];
- u8 frame_body[1];
+ u8 frame_body[];
} __packed;
struct mwifiex_ie_types_bss_scan_info {
@@ -1733,7 +1734,7 @@ struct mwifiex_ie_types_local_pwr_constraint {
struct mwifiex_ie_types_wmm_param_set {
struct mwifiex_ie_types_header header;
- u8 wmm_ie[1];
+ u8 wmm_ie[];
} __packed;
struct mwifiex_ie_types_mgmt_frame {
@@ -1959,7 +1960,7 @@ struct host_cmd_tlv_wep_key {
struct mwifiex_ie_types_header header;
u8 key_index;
u8 is_default;
- u8 key[1];
+ u8 key[];
};
struct host_cmd_tlv_auth_type {
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index b8dc3b5c9ad9..c64e24c10ea6 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -263,7 +263,7 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
0x68, 0x69, 0x6a},
};
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd89xx = {
.start_rd_port = 0,
.start_wr_port = 0,
.base_0_reg = 0xF8,
@@ -394,6 +394,22 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
.can_ext_scan = true,
};
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = {
+ .firmware_sdiouart = SD8978_SDIOUART_FW_NAME,
+ .reg = &mwifiex_reg_sd89xx,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
.firmware = SD8997_DEFAULT_FW_NAME,
.firmware_sdiouart = SD8997_SDIOUART_FW_NAME,
@@ -428,7 +444,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
.firmware = SD8987_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8987,
+ .reg = &mwifiex_reg_sd89xx,
.max_ports = 32,
.mp_agg_pkt_limit = 16,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
@@ -480,8 +496,11 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
};
static const struct of_device_id mwifiex_sdio_of_match_table[] = {
+ { .compatible = "marvell,sd8787" },
{ .compatible = "marvell,sd8897" },
+ { .compatible = "marvell,sd8978" },
{ .compatible = "marvell,sd8997" },
+ { .compatible = "nxp,iw416" },
{ }
};
@@ -919,6 +938,8 @@ static const struct sdio_device_id mwifiex_ids[] = {
.driver_data = (unsigned long)&mwifiex_sdio_sd8801},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977_WLAN),
.driver_data = (unsigned long)&mwifiex_sdio_sd8977},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8978_WLAN),
+ .driver_data = (unsigned long)&mwifiex_sdio_sd8978},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987_WLAN),
.driver_data = (unsigned long)&mwifiex_sdio_sd8987},
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997_WLAN),
@@ -3163,6 +3184,7 @@ MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8978_SDIOUART_FW_NAME);
MODULE_FIRMWARE(SD8987_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8997_SDIOUART_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index 3a24bb48b299..ae94c172310f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -25,6 +25,7 @@
#define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin"
#define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin"
#define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin"
+#define SD8978_SDIOUART_FW_NAME "mrvl/sdiouartiw416_combo_v0.bin"
#define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin"
#define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin"
#define SD8997_SDIOUART_FW_NAME "mrvl/sdiouart8997_combo_v4.bin"
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig
index d7f90a0eb21e..18152c16c36f 100644
--- a/drivers/net/wireless/mediatek/mt76/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config MT76_CORE
tristate
+ select PAGE_POOL
config MT76_LEDS
bool
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index 11b0b3d62f29..57fbcc83e074 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -112,7 +112,7 @@ mt76_register_debugfs_fops(struct mt76_phy *phy,
if (!dir)
return NULL;
- debugfs_create_u8("led_pin", 0600, dir, &dev->led_pin);
+ debugfs_create_u8("led_pin", 0600, dir, &phy->leds.pin);
debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg);
debugfs_create_file_unsafe("regval", 0600, dir, dev, fops);
debugfs_create_file_unsafe("napi_threaded", 0600, dir, dev,
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 06161815c180..da281cd1d36f 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -165,7 +165,7 @@ mt76_free_pending_txwi(struct mt76_dev *dev)
local_bh_enable();
}
-static void
+void
mt76_free_pending_rxwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
@@ -173,11 +173,12 @@ mt76_free_pending_rxwi(struct mt76_dev *dev)
local_bh_disable();
while ((t = __mt76_get_rxwi(dev)) != NULL) {
if (t->ptr)
- skb_free_frag(t->ptr);
+ mt76_put_page_pool_buf(t->ptr, false);
kfree(t);
}
local_bh_enable();
}
+EXPORT_SYMBOL_GPL(mt76_free_pending_rxwi);
static void
mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
@@ -218,8 +219,7 @@ mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
- if ((q->flags & MT_QFLAG_WED) &&
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
+ if (mt76_queue_is_wed_rx(q)) {
txwi = mt76_get_rxwi(dev);
if (!txwi)
return -ENOMEM;
@@ -401,8 +401,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
if (info)
*info = le32_to_cpu(desc->info);
- if ((q->flags & MT_QFLAG_WED) &&
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
+ if (mt76_queue_is_wed_rx(q)) {
u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
le32_to_cpu(desc->buf1));
struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
@@ -410,9 +409,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
if (!t)
return NULL;
- dma_unmap_single(dev->dma_dev, t->dma_addr,
- SKB_WITH_OVERHEAD(q->buf_size),
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
buf = t->ptr;
t->dma_addr = 0;
@@ -429,9 +428,9 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
} else {
buf = e->buf;
e->buf = NULL;
- dma_unmap_single(dev->dma_dev, e->dma_addr[0],
- SKB_WITH_OVERHEAD(q->buf_size),
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
+ SKB_WITH_OVERHEAD(q->buf_size),
+ page_pool_get_dma_dir(q->page_pool));
}
return buf;
@@ -582,26 +581,12 @@ free_skb:
return ret;
}
-static struct page_frag_cache *
-mt76_dma_rx_get_frag_cache(struct mt76_dev *dev, struct mt76_queue *q)
-{
- struct page_frag_cache *rx_page = &q->rx_page;
-
-#ifdef CONFIG_NET_MEDIATEK_SOC_WED
- if ((q->flags & MT_QFLAG_WED) &&
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX)
- rx_page = &dev->mmio.wed.rx_buf_ring.rx_page;
-#endif
- return rx_page;
-}
-
static int
-mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
+mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q,
+ bool allow_direct)
{
- struct page_frag_cache *rx_page = mt76_dma_rx_get_frag_cache(dev, q);
int len = SKB_WITH_OVERHEAD(q->buf_size);
- int frames = 0, offset = q->buf_offset;
- dma_addr_t addr;
+ int frames = 0;
if (!q->ndesc)
return 0;
@@ -609,26 +594,25 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
spin_lock_bh(&q->lock);
while (q->queued < q->ndesc - 1) {
+ enum dma_data_direction dir;
struct mt76_queue_buf qbuf;
- void *buf = NULL;
+ dma_addr_t addr;
+ int offset;
+ void *buf;
- buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
+ buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
if (!buf)
break;
- addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
- skb_free_frag(buf);
- break;
- }
+ addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+ dir = page_pool_get_dma_dir(q->page_pool);
+ dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
- qbuf.addr = addr + offset;
- qbuf.len = len - offset;
+ qbuf.addr = addr + q->buf_offset;
+ qbuf.len = len - q->buf_offset;
qbuf.skip_unmap = false;
if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
- dma_unmap_single(dev->dma_dev, addr, len,
- DMA_FROM_DEVICE);
- skb_free_frag(buf);
+ mt76_put_page_pool_buf(buf, allow_direct);
break;
}
frames++;
@@ -642,14 +626,17 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
return frames;
}
-static int
-mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
+int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
struct mtk_wed_device *wed = &dev->mmio.wed;
int ret, type, ring;
- u8 flags = q->flags;
+ u8 flags;
+ if (!q || !q->ndesc)
+ return -EINVAL;
+
+ flags = q->flags;
if (!mtk_wed_device_active(wed))
q->flags &= ~MT_QFLAG_WED;
@@ -661,7 +648,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
switch (type) {
case MT76_WED_Q_TX:
- ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, false);
+ ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset);
if (!ret)
q->wed_regs = wed->tx_ring[ring].reg_base;
break;
@@ -669,7 +656,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
/* WED txfree queue needs ring to be initialized before setup */
q->flags = 0;
mt76_dma_queue_reset(dev, q);
- mt76_dma_rx_fill(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
q->flags = flags;
ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
@@ -677,7 +664,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
q->wed_regs = wed->txfree_ring.reg_base;
break;
case MT76_WED_Q_RX:
- ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, false);
+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
if (!ret)
q->wed_regs = wed->rx_ring[ring].reg_base;
break;
@@ -690,6 +677,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
return 0;
#endif
}
+EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
@@ -716,7 +704,11 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
if (!q->entry)
return -ENOMEM;
- ret = mt76_dma_wed_setup(dev, q);
+ ret = mt76_create_page_pool(dev, q);
+ if (ret)
+ return ret;
+
+ ret = mt76_dma_wed_setup(dev, q, false);
if (ret)
return ret;
@@ -729,7 +721,6 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
static void
mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct page *page;
void *buf;
bool more;
@@ -737,21 +728,21 @@ mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
return;
spin_lock_bh(&q->lock);
+
do {
buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
if (!buf)
break;
- skb_free_frag(buf);
+ mt76_put_page_pool_buf(buf, false);
} while (1);
- spin_unlock_bh(&q->lock);
- if (!q->rx_page.va)
- return;
+ if (q->rx_head) {
+ dev_kfree_skb(q->rx_head);
+ q->rx_head = NULL;
+ }
- page = virt_to_page(q->rx_page.va);
- __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
- memset(&q->rx_page, 0, sizeof(q->rx_page));
+ spin_unlock_bh(&q->lock);
}
static void
@@ -767,14 +758,13 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
mt76_dma_rx_cleanup(dev, q);
- mt76_dma_sync_idx(dev, q);
- mt76_dma_rx_fill(dev, q);
-
- if (!q->rx_head)
- return;
- dev_kfree_skb(q->rx_head);
- q->rx_head = NULL;
+ /* reset WED rx queues */
+ mt76_dma_wed_setup(dev, q, true);
+ if (q->flags != MT_WED_Q_TXFREE) {
+ mt76_dma_sync_idx(dev, q);
+ mt76_dma_rx_fill(dev, q, false);
+ }
}
static void
@@ -791,7 +781,7 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
} else {
- skb_free_frag(data);
+ mt76_put_page_pool_buf(data, true);
}
if (more)
@@ -864,6 +854,7 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
goto free_frag;
skb_reserve(skb, q->buf_offset);
+ skb_mark_for_recycle(skb);
*(u32 *)skb->cb = info;
@@ -879,10 +870,10 @@ mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
continue;
free_frag:
- skb_free_frag(data);
+ mt76_put_page_pool_buf(data, true);
}
- mt76_dma_rx_fill(dev, q);
+ mt76_dma_rx_fill(dev, q, true);
return done;
}
@@ -922,10 +913,12 @@ mt76_dma_init(struct mt76_dev *dev,
snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
wiphy_name(dev->hw->wiphy));
dev->napi_dev.threaded = 1;
+ init_completion(&dev->mmio.wed_reset);
+ init_completion(&dev->mmio.wed_reset_complete);
mt76_for_each_q_rx(dev, i) {
netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
- mt76_dma_rx_fill(dev, &dev->q_rx[i]);
+ mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
napi_enable(&dev->napi[i]);
}
@@ -975,8 +968,9 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
struct mt76_queue *q = &dev->q_rx[i];
netif_napi_del(&dev->napi[i]);
- if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags))
- mt76_dma_rx_cleanup(dev, q);
+ mt76_dma_rx_cleanup(dev, q);
+
+ page_pool_destroy(q->page_pool);
}
mt76_free_pending_txwi(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 53c6ce2528b2..4b9bc7f462b8 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -56,5 +56,6 @@ enum mt76_mcu_evt_type {
int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
void mt76_dma_attach(struct mt76_dev *dev);
void mt76_dma_cleanup(struct mt76_dev *dev);
+int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 9bc8758573fc..dce851d42e08 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -138,6 +138,7 @@ mt76_find_power_limits_node(struct mt76_dev *dev)
{
struct device_node *np = dev->dev->of_node;
const char *const region_names[] = {
+ [NL80211_DFS_UNSET] = "ww",
[NL80211_DFS_ETSI] = "etsi",
[NL80211_DFS_FCC] = "fcc",
[NL80211_DFS_JP] = "jp",
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index fc608b369b3c..b117e4467c87 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -4,6 +4,7 @@
*/
#include <linux/sched.h>
#include <linux/of.h>
+#include <net/page_pool.h>
#include "mt76.h"
#define CHAN2G(_idx, _freq) { \
@@ -192,42 +193,48 @@ static const struct cfg80211_sar_capa mt76_sar_capa = {
.freq_ranges = &mt76_sar_freq_ranges[0],
};
-static int mt76_led_init(struct mt76_dev *dev)
+static int mt76_led_init(struct mt76_phy *phy)
{
- struct device_node *np = dev->dev->of_node;
- struct ieee80211_hw *hw = dev->hw;
- int led_pin;
+ struct mt76_dev *dev = phy->dev;
+ struct ieee80211_hw *hw = phy->hw;
- if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+ if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
return 0;
- snprintf(dev->led_name, sizeof(dev->led_name),
- "mt76-%s", wiphy_name(hw->wiphy));
+ snprintf(phy->leds.name, sizeof(phy->leds.name), "mt76-%s",
+ wiphy_name(hw->wiphy));
- dev->led_cdev.name = dev->led_name;
- dev->led_cdev.default_trigger =
+ phy->leds.cdev.name = phy->leds.name;
+ phy->leds.cdev.default_trigger =
ieee80211_create_tpt_led_trigger(hw,
IEEE80211_TPT_LEDTRIG_FL_RADIO,
mt76_tpt_blink,
ARRAY_SIZE(mt76_tpt_blink));
- np = of_get_child_by_name(np, "led");
- if (np) {
- if (!of_property_read_u32(np, "led-sources", &led_pin))
- dev->led_pin = led_pin;
- dev->led_al = of_property_read_bool(np, "led-active-low");
- of_node_put(np);
+ if (phy == &dev->phy) {
+ struct device_node *np = dev->dev->of_node;
+
+ np = of_get_child_by_name(np, "led");
+ if (np) {
+ int led_pin;
+
+ if (!of_property_read_u32(np, "led-sources", &led_pin))
+ phy->leds.pin = led_pin;
+ phy->leds.al = of_property_read_bool(np,
+ "led-active-low");
+ of_node_put(np);
+ }
}
- return led_classdev_register(dev->dev, &dev->led_cdev);
+ return led_classdev_register(dev->dev, &phy->leds.cdev);
}
-static void mt76_led_cleanup(struct mt76_dev *dev)
+static void mt76_led_cleanup(struct mt76_phy *phy)
{
- if (!dev->led_cdev.brightness_set && !dev->led_cdev.blink_set)
+ if (!phy->leds.cdev.brightness_set && !phy->leds.cdev.blink_set)
return;
- led_classdev_unregister(&dev->led_cdev);
+ led_classdev_unregister(&phy->leds.cdev);
}
static void mt76_init_stream_cap(struct mt76_phy *phy,
@@ -517,6 +524,12 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
return ret;
}
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ ret = mt76_led_init(phy);
+ if (ret)
+ return ret;
+ }
+
wiphy_read_of_freq_limits(phy->hw->wiphy);
mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
@@ -536,12 +549,55 @@ void mt76_unregister_phy(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
+ if (IS_ENABLED(CONFIG_MT76_LEDS))
+ mt76_led_cleanup(phy);
mt76_tx_status_check(dev, true);
ieee80211_unregister_hw(phy->hw);
dev->phys[phy->band_idx] = NULL;
}
EXPORT_SYMBOL_GPL(mt76_unregister_phy);
+int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_PAGE_FRAG,
+ .nid = NUMA_NO_NODE,
+ .dev = dev->dma_dev,
+ };
+ int idx = q - dev->q_rx;
+
+ switch (idx) {
+ case MT_RXQ_MAIN:
+ case MT_RXQ_BAND1:
+ case MT_RXQ_BAND2:
+ pp_params.pool_size = 256;
+ break;
+ default:
+ pp_params.pool_size = 16;
+ break;
+ }
+
+ if (mt76_is_mmio(dev)) {
+ /* rely on page_pool for DMA mapping */
+ pp_params.flags |= PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = PAGE_SIZE;
+ pp_params.offset = 0;
+ }
+
+ q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(q->page_pool)) {
+ int err = PTR_ERR(q->page_pool);
+
+ q->page_pool = NULL;
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76_create_page_pool);
+
struct mt76_dev *
mt76_alloc_device(struct device *pdev, unsigned int size,
const struct ieee80211_ops *ops,
@@ -653,7 +709,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- ret = mt76_led_init(dev);
+ ret = mt76_led_init(phy);
if (ret)
return ret;
}
@@ -674,7 +730,7 @@ void mt76_unregister_device(struct mt76_dev *dev)
struct ieee80211_hw *hw = dev->hw;
if (IS_ENABLED(CONFIG_MT76_LEDS))
- mt76_led_cleanup(dev);
+ mt76_led_cleanup(&dev->phy);
mt76_tx_status_check(dev, true);
ieee80211_unregister_hw(hw);
}
@@ -1644,7 +1700,7 @@ u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
- struct mt76_sta_stats *stats)
+ struct mt76_sta_stats *stats, bool eht)
{
int i, ei = wi->initial_stat_idx;
u64 *data = wi->data;
@@ -1660,17 +1716,37 @@ void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
+ if (eht) {
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_SU];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_TRIG];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_EHT_MU];
+ }
- for (i = 0; i < ARRAY_SIZE(stats->tx_bw); i++)
+ for (i = 0; i < (ARRAY_SIZE(stats->tx_bw) - !eht); i++)
data[ei++] += stats->tx_bw[i];
- for (i = 0; i < 12; i++)
+ for (i = 0; i < (eht ? 14 : 12); i++)
data[ei++] += stats->tx_mcs[i];
wi->worker_stat_count = ei - wi->initial_stat_idx;
}
EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
+void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index)
+{
+#ifdef CONFIG_PAGE_POOL_STATS
+ struct page_pool_stats stats = {};
+ int i;
+
+ mt76_for_each_q_rx(dev, i)
+ page_pool_get_stats(dev->q_rx[i].page_pool, &stats);
+
+ page_pool_ethtool_stats_get(data, &stats);
+ *index += page_pool_ethtool_stats_get_count();
+#endif
+}
+EXPORT_SYMBOL_GPL(mt76_ethtool_page_pool_stats);
+
enum mt76_dfs_state mt76_phy_dfs_state(struct mt76_phy *phy)
{
struct ieee80211_hw *hw = phy->hw;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 32a77a0ae9da..ccca0162c8f8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -202,7 +202,7 @@ struct mt76_queue {
dma_addr_t desc_dma;
struct sk_buff *rx_head;
- struct page_frag_cache rx_page;
+ struct page_pool *page_pool;
};
struct mt76_mcu_ops {
@@ -264,12 +264,15 @@ enum mt76_phy_type {
MT_PHY_TYPE_HE_EXT_SU,
MT_PHY_TYPE_HE_TB,
MT_PHY_TYPE_HE_MU,
- __MT_PHY_TYPE_HE_MAX,
+ MT_PHY_TYPE_EHT_SU = 13,
+ MT_PHY_TYPE_EHT_TRIG,
+ MT_PHY_TYPE_EHT_MU,
+ __MT_PHY_TYPE_MAX,
};
struct mt76_sta_stats {
- u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
- u64 tx_bw[4]; /* 20, 40, 80, 160 */
+ u64 tx_mode[__MT_PHY_TYPE_MAX];
+ u64 tx_bw[5]; /* 20, 40, 80, 160, 320 */
u64 tx_nss[4]; /* 1, 2, 3, 4 */
u64 tx_mcs[16]; /* mcs idx */
u64 tx_bytes;
@@ -291,7 +294,7 @@ enum mt76_wcid_flags {
MT_WCID_FLAG_HDR_TRANS,
};
-#define MT76_N_WCIDS 544
+#define MT76_N_WCIDS 1088
/* stored in ieee80211_tx_info::hw_queue */
#define MT_TX_HW_QUEUE_PHY GENMASK(3, 2)
@@ -413,6 +416,7 @@ enum {
MT76_STATE_SUSPEND,
MT76_STATE_ROC,
MT76_STATE_PM,
+ MT76_STATE_WED_RESET,
};
struct mt76_hw_cap {
@@ -591,6 +595,8 @@ struct mt76_mmio {
u32 irqmask;
struct mtk_wed_device wed;
+ struct completion wed_reset;
+ struct completion wed_reset_complete;
};
struct mt76_rx_status {
@@ -731,6 +737,13 @@ struct mt76_phy {
} rx_amsdu[__MT_RXQ_MAX];
struct mt76_freq_range_power *frp;
+
+ struct {
+ struct led_classdev cdev;
+ char name[32];
+ bool al;
+ u8 pin;
+ } leds;
};
struct mt76_dev {
@@ -812,11 +825,6 @@ struct mt76_dev {
u32 debugfs_reg;
- struct led_classdev led_cdev;
- char led_name[32];
- bool led_al;
- u8 led_pin;
-
u8 csa_complete;
u32 rxfilter;
@@ -886,7 +894,6 @@ extern struct ieee80211_rate mt76_rates[12];
#define mt76_mcu_restart(dev, ...) (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
-#define __mt76_mcu_restart(dev, ...) (dev)->mcu_ops->mcu_restart((dev))
#define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val)
#define mt76_clear(dev, offset, val) mt76_rmw(dev, offset, val, 0)
@@ -907,10 +914,11 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
#define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
-bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
- int timeout);
-
-#define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
+bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout, int kick);
+#define __mt76_poll_msec(...) ____mt76_poll_msec(__VA_ARGS__, 10)
+#define mt76_poll_msec(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__, 10)
+#define mt76_poll_msec_tick(dev, ...) ____mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
void mt76_pci_disable_aspm(struct pci_dev *pdev);
@@ -1267,6 +1275,7 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
void mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
struct mt76_txwi_cache *mt76_get_rxwi(struct mt76_dev *dev);
+void mt76_free_pending_rxwi(struct mt76_dev *dev);
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
struct napi_struct *napi);
void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
@@ -1309,8 +1318,9 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
}
+void mt76_ethtool_page_pool_stats(struct mt76_dev *dev, u64 *data, int *index);
void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
- struct mt76_sta_stats *stats);
+ struct mt76_sta_stats *stats, bool eht);
int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
int __mt76u_vendor_request(struct mt76_dev *dev, u8 req, u8 req_type,
u16 val, u16 offset, void *buf, size_t len);
@@ -1407,6 +1417,12 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
struct mt76_power_limits *dest,
s8 target_power);
+static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q)
+{
+ return (q->flags & MT_QFLAG_WED) &&
+ FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX;
+}
+
struct mt76_txwi_cache *
mt76_token_release(struct mt76_dev *dev, int token, bool *wake);
int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi);
@@ -1414,6 +1430,25 @@ void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked);
struct mt76_txwi_cache *mt76_rx_token_release(struct mt76_dev *dev, int token);
int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
struct mt76_txwi_cache *r, dma_addr_t phys);
+int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
+static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
+{
+ struct page *page = virt_to_head_page(buf);
+
+ page_pool_put_full_page(page->pp, page, allow_direct);
+}
+
+static inline void *
+mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
+{
+ struct page *page;
+
+ page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
+ if (!page)
+ return NULL;
+
+ return page_address(page) + *offset;
+}
static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 031d39a48a55..9a2e632d577a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -330,10 +330,10 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
-static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on,
+static void mt7603_led_set_config(struct mt76_phy *mphy, u8 delay_on,
u8 delay_off)
{
- struct mt7603_dev *dev = container_of(mt76, struct mt7603_dev,
+ struct mt7603_dev *dev = container_of(mphy->dev, struct mt7603_dev,
mt76);
u32 val, addr;
@@ -341,15 +341,15 @@ static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on,
FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
FIELD_PREP(MT_LED_STATUS_ON, delay_on);
- addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
+ addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mphy->leds.pin));
mt76_wr(dev, addr, val);
- addr = mt7603_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
+ addr = mt7603_reg_map(dev, MT_LED_STATUS_1(mphy->leds.pin));
mt76_wr(dev, addr, val);
- val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
- MT_LED_CTRL_KICK(mt76->led_pin);
- if (mt76->led_al)
- val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
+ val = MT_LED_CTRL_REPLAY(mphy->leds.pin) |
+ MT_LED_CTRL_KICK(mphy->leds.pin);
+ if (mphy->leds.al)
+ val |= MT_LED_CTRL_POLARITY(mphy->leds.pin);
addr = mt7603_reg_map(dev, MT_LED_CTRL);
mt76_wr(dev, addr, val);
}
@@ -358,27 +358,27 @@ static int mt7603_led_set_blink(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
- struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
- led_cdev);
+ struct mt76_phy *mphy = container_of(led_cdev, struct mt76_phy,
+ leds.cdev);
u8 delta_on, delta_off;
delta_off = max_t(u8, *delay_off / 10, 1);
delta_on = max_t(u8, *delay_on / 10, 1);
- mt7603_led_set_config(mt76, delta_on, delta_off);
+ mt7603_led_set_config(mphy, delta_on, delta_off);
return 0;
}
static void mt7603_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev,
- led_cdev);
+ struct mt76_phy *mphy = container_of(led_cdev, struct mt76_phy,
+ leds.cdev);
if (!brightness)
- mt7603_led_set_config(mt76, 0, 0xff);
+ mt7603_led_set_config(mphy, 0, 0xff);
else
- mt7603_led_set_config(mt76, 0xff, 0);
+ mt7603_led_set_config(mphy, 0xff, 0);
}
static u32 __mt7603_reg_addr(struct mt7603_dev *dev, u32 addr)
@@ -535,8 +535,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set = mt7603_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt7603_led_set_blink;
+ dev->mphy.leds.cdev.brightness_set = mt7603_led_set_brightness;
+ dev->mphy.leds.cdev.blink_set = mt7603_led_set_blink;
}
wiphy->reg_notifier = mt7603_regd_notifier;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 7884b952b720..301668c3cc92 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -221,7 +221,6 @@ int mt7603_mcu_init(struct mt7603_dev *dev)
.headroom = sizeof(struct mt7603_mcu_txd),
.mcu_skb_send_msg = mt7603_mcu_skb_send_msg,
.mcu_parse_response = mt7603_mcu_parse_response,
- .mcu_restart = mt7603_mcu_restart,
};
dev->mt76.mcu_ops = &mt7603_mcu_ops;
@@ -230,7 +229,7 @@ int mt7603_mcu_init(struct mt7603_dev *dev)
void mt7603_mcu_exit(struct mt7603_dev *dev)
{
- __mt76_mcu_restart(&dev->mt76);
+ mt7603_mcu_restart(&dev->mt76);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 07a1fea94f66..5fa6f097ec30 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -443,6 +443,85 @@ mt7615_cap_dbdc_disable(struct mt7615_dev *dev)
mt76_set_stream_caps(&dev->mphy, true);
}
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
+{
+ u32 base, offset;
+
+ if (is_mt7663(&dev->mt76)) {
+ base = addr & MT7663_MCU_PCIE_REMAP_2_BASE;
+ offset = addr & MT7663_MCU_PCIE_REMAP_2_OFFSET;
+ } else {
+ base = addr & MT_MCU_PCIE_REMAP_2_BASE;
+ offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
+ }
+ mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
+
+ return MT_PCIE_REMAP_BASE_2 + offset;
+}
+EXPORT_SYMBOL_GPL(mt7615_reg_map);
+
+static void
+mt7615_led_set_config(struct led_classdev *led_cdev,
+ u8 delay_on, u8 delay_off)
+{
+ struct mt7615_dev *dev;
+ struct mt76_phy *mphy;
+ u32 val, addr;
+ u8 index;
+
+ mphy = container_of(led_cdev, struct mt76_phy, leds.cdev);
+ dev = container_of(mphy->dev, struct mt7615_dev, mt76);
+
+ if (!mt76_connac_pm_ref(mphy, &dev->pm))
+ return;
+
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+
+ index = dev->dbdc_support ? mphy->band_idx : mphy->leds.pin;
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_0(index));
+ mt76_wr(dev, addr, val);
+ addr = mt7615_reg_map(dev, MT_LED_STATUS_1(index));
+ mt76_wr(dev, addr, val);
+
+ val = MT_LED_CTRL_REPLAY(index) | MT_LED_CTRL_KICK(index);
+ if (dev->mphy.leds.al)
+ val |= MT_LED_CTRL_POLARITY(index);
+ if (mphy->band_idx)
+ val |= MT_LED_CTRL_BAND(index);
+
+ addr = mt7615_reg_map(dev, MT_LED_CTRL);
+ mt76_wr(dev, addr, val);
+
+ mt76_connac_pm_unref(mphy, &dev->pm);
+}
+
+int mt7615_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u8 delta_on, delta_off;
+
+ delta_off = max_t(u8, *delay_off / 10, 1);
+ delta_on = max_t(u8, *delay_on / 10, 1);
+
+ mt7615_led_set_config(led_cdev, delta_on, delta_off);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt7615_led_set_blink);
+
+void mt7615_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ if (!brightness)
+ mt7615_led_set_config(led_cdev, 0, 0xff);
+ else
+ mt7615_led_set_config(led_cdev, 0xff, 0);
+}
+EXPORT_SYMBOL_GPL(mt7615_led_set_brightness);
+
int mt7615_register_ext_phy(struct mt7615_dev *dev)
{
struct mt7615_phy *phy = mt7615_ext_phy(dev);
@@ -497,6 +576,12 @@ int mt7615_register_ext_phy(struct mt7615_dev *dev)
for (i = 0; i <= MT_TXQ_PSD ; i++)
mphy->q_tx[i] = dev->mphy.q_tx[i];
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ mphy->leds.cdev.brightness_set = mt7615_led_set_brightness;
+ mphy->leds.cdev.blink_set = mt7615_led_set_blink;
+ }
+
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index 83f30305414d..eea398c79a98 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -1692,7 +1692,6 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
.headroom = sizeof(struct mt7615_mcu_txd),
.mcu_skb_send_msg = mt7615_mcu_send_message,
.mcu_parse_response = mt7615_mcu_parse_response,
- .mcu_restart = mt7615_mcu_restart,
};
int ret;
@@ -1732,7 +1731,7 @@ EXPORT_SYMBOL_GPL(mt7615_mcu_init);
void mt7615_mcu_exit(struct mt7615_dev *dev)
{
- __mt76_mcu_restart(&dev->mt76);
+ mt7615_mcu_restart(&dev->mt76);
mt7615_mcu_set_fw_ctrl(dev);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index a784f9d9e935..83173efb56dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -63,22 +63,6 @@ const u32 mt7663e_reg_map[] = {
[MT_EFUSE_ADDR_BASE] = 0x78011000,
};
-u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr)
-{
- u32 base, offset;
-
- if (is_mt7663(&dev->mt76)) {
- base = addr & MT7663_MCU_PCIE_REMAP_2_BASE;
- offset = addr & MT7663_MCU_PCIE_REMAP_2_OFFSET;
- } else {
- base = addr & MT_MCU_PCIE_REMAP_2_BASE;
- offset = addr & MT_MCU_PCIE_REMAP_2_OFFSET;
- }
- mt76_wr(dev, MT_MCU_PCIE_REMAP_2, base);
-
- return MT_PCIE_REMAP_BASE_2 + offset;
-}
-
static void
mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 087d4886162e..43591b4c1d9a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -376,6 +376,12 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
int irq, const u32 *map);
u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
+u32 mt7615_reg_map(struct mt7615_dev *dev, u32 addr);
+int mt7615_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+void mt7615_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
void mt7615_init_device(struct mt7615_dev *dev);
int mt7615_register_device(struct mt7615_dev *dev);
void mt7615_unregister_device(struct mt7615_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
index 87b4aa52ee0f..0680e002b981 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
@@ -66,64 +66,6 @@ static int mt7615_init_hardware(struct mt7615_dev *dev)
return 0;
}
-static void
-mt7615_led_set_config(struct led_classdev *led_cdev,
- u8 delay_on, u8 delay_off)
-{
- struct mt7615_dev *dev;
- struct mt76_dev *mt76;
- u32 val, addr;
-
- mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
- dev = container_of(mt76, struct mt7615_dev, mt76);
-
- if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm))
- return;
-
- val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
- FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
- FIELD_PREP(MT_LED_STATUS_ON, delay_on);
-
- addr = mt7615_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin));
- mt76_wr(dev, addr, val);
- addr = mt7615_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin));
- mt76_wr(dev, addr, val);
-
- val = MT_LED_CTRL_REPLAY(mt76->led_pin) |
- MT_LED_CTRL_KICK(mt76->led_pin);
- if (mt76->led_al)
- val |= MT_LED_CTRL_POLARITY(mt76->led_pin);
- addr = mt7615_reg_map(dev, MT_LED_CTRL);
- mt76_wr(dev, addr, val);
-
- mt76_connac_pm_unref(&dev->mphy, &dev->pm);
-}
-
-static int
-mt7615_led_set_blink(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
-{
- u8 delta_on, delta_off;
-
- delta_off = max_t(u8, *delay_off / 10, 1);
- delta_on = max_t(u8, *delay_on / 10, 1);
-
- mt7615_led_set_config(led_cdev, delta_on, delta_off);
-
- return 0;
-}
-
-static void
-mt7615_led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- if (!brightness)
- mt7615_led_set_config(led_cdev, 0, 0xff);
- else
- mt7615_led_set_config(led_cdev, 0xff, 0);
-}
-
int mt7615_register_device(struct mt7615_dev *dev)
{
int ret;
@@ -133,8 +75,8 @@ int mt7615_register_device(struct mt7615_dev *dev)
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set = mt7615_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt7615_led_set_blink;
+ dev->mphy.leds.cdev.brightness_set = mt7615_led_set_brightness;
+ dev->mphy.leds.cdev.blink_set = mt7615_led_set_blink;
}
ret = mt7622_wmac_init(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index fa1b9b26b399..7cecb22c569e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -544,6 +544,7 @@ enum mt7615_reg_base {
#define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n)))
#define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n)))
#define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n)))
+#define MT_LED_CTRL_BAND(_n) BIT(4 + (8 * (_n)))
#define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n)))
#define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n)))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
index dc9a2f0b45a5..b0094205ba95 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
@@ -137,7 +137,6 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
.tailroom = MT_USB_TAIL_SIZE,
.mcu_skb_send_msg = mt7663s_mcu_send_message,
.mcu_parse_response = mt7615_mcu_parse_response,
- .mcu_restart = mt7615_mcu_restart,
.mcu_rr = mt76_connac_mcu_reg_rr,
.mcu_wr = mt76_connac_mcu_reg_wr,
};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index 98bf2f6ae936..a8b1a0f8b2d7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -69,7 +69,6 @@ int mt7663u_mcu_init(struct mt7615_dev *dev)
.tailroom = MT_USB_TAIL_SIZE,
.mcu_skb_send_msg = mt7663u_mcu_send_message,
.mcu_parse_response = mt7615_mcu_parse_response,
- .mcu_restart = mt7615_mcu_restart,
};
int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 8ba883b03e50..b339c50bff20 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -42,6 +42,7 @@ enum {
CMD_CBW_10MHZ,
CMD_CBW_5MHZ,
CMD_CBW_8080MHZ,
+ CMD_CBW_320MHZ,
CMD_HE_MCS_BW80 = 0,
CMD_HE_MCS_BW160,
@@ -239,6 +240,7 @@ static inline u8 mt76_connac_chan_bw(struct cfg80211_chan_def *chandef)
[NL80211_CHAN_WIDTH_10] = CMD_CBW_10MHZ,
[NL80211_CHAN_WIDTH_20] = CMD_CBW_20MHZ,
[NL80211_CHAN_WIDTH_20_NOHT] = CMD_CBW_20MHZ,
+ [NL80211_CHAN_WIDTH_320] = CMD_CBW_320MHZ,
};
if (chandef->width >= ARRAY_SIZE(width_to_bw))
@@ -370,6 +372,9 @@ void mt76_connac2_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
+u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
+ struct ieee80211_vif *vif,
+ bool beacon, bool mcast);
bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
__le32 *txs_data);
bool mt76_connac2_mac_add_txs_skb(struct mt76_dev *dev, struct mt76_wcid *wcid,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
index fd60123fb284..aed4ee95fb2e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
@@ -267,9 +267,9 @@ int mt76_connac_init_tx_queues(struct mt76_phy *phy, int idx, int n_desc,
}
EXPORT_SYMBOL_GPL(mt76_connac_init_tx_queues);
-static u16
-mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
- bool beacon, bool mcast)
+u16 mt76_connac2_mac_tx_rate_val(struct mt76_phy *mphy,
+ struct ieee80211_vif *vif,
+ bool beacon, bool mcast)
{
u8 mode = 0, band = mphy->chandef.chan->band;
int rateidx = 0, mcast_rate;
@@ -319,6 +319,7 @@ out:
return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
FIELD_PREP(MT_TX_RATE_MODE, mode);
}
+EXPORT_SYMBOL_GPL(mt76_connac2_mac_tx_rate_val);
static void
mt76_connac2_mac_write_txwi_8023(__le32 *txwi, struct sk_buff *skb,
@@ -930,7 +931,7 @@ int mt76_connac2_reverse_frag0_hdr_trans(struct ieee80211_vif *vif,
ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
- break;
+ return -EINVAL;
}
skb_pull(skb, hdr_offset + sizeof(struct ethhdr) - 2);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 5a047e630860..efb9bfaa187f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -1329,6 +1329,40 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
}
EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode);
+u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band)
+{
+ const struct ieee80211_sta_eht_cap *eht_cap;
+ struct ieee80211_supported_band *sband;
+ u8 mode = 0;
+
+ if (band == NL80211_BAND_6GHZ)
+ mode |= PHY_MODE_AX_6G;
+
+ sband = phy->hw->wiphy->bands[band];
+ eht_cap = ieee80211_get_eht_iftype_cap(sband, vif->type);
+
+ if (!eht_cap || !eht_cap->has_eht)
+ return mode;
+
+ switch (band) {
+ case NL80211_BAND_6GHZ:
+ mode |= PHY_MODE_BE_6G;
+ break;
+ case NL80211_BAND_5GHZ:
+ mode |= PHY_MODE_BE_5G;
+ break;
+ case NL80211_BAND_2GHZ:
+ mode |= PHY_MODE_BE_24G;
+ break;
+ default:
+ break;
+ }
+
+ return mode;
+}
+EXPORT_SYMBOL_GPL(mt76_connac_get_phy_mode_ext);
+
const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
{
@@ -1341,6 +1375,18 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
}
EXPORT_SYMBOL_GPL(mt76_connac_get_he_phy_cap);
+const struct ieee80211_sta_eht_cap *
+mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif)
+{
+ enum nl80211_band band = phy->chandef.chan->band;
+ struct ieee80211_supported_band *sband;
+
+ sband = phy->hw->wiphy->bands[band];
+
+ return ieee80211_get_eht_iftype_cap(sband, vif->type);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_get_eht_phy_cap);
+
#define DEFAULT_HE_PE_DURATION 4
#define DEFAULT_HE_DURATION_RTS_THRES 1023
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index f1e942b9a887..a5e6ee4daf92 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -793,6 +793,7 @@ enum {
STA_REC_PHY = 0x15,
STA_REC_HE_6G = 0x17,
STA_REC_HE_V2 = 0x19,
+ STA_REC_EHT = 0x22,
STA_REC_HDRT = 0x28,
STA_REC_HDR_TRANS = 0x2B,
STA_REC_MAX_NUM
@@ -882,12 +883,16 @@ enum {
#define PHY_MODE_AX_5G BIT(7)
#define PHY_MODE_AX_6G BIT(0) /* phymode_ext */
+#define PHY_MODE_BE_24G BIT(1)
+#define PHY_MODE_BE_5G BIT(2)
+#define PHY_MODE_BE_6G BIT(3)
#define MODE_CCK BIT(0)
#define MODE_OFDM BIT(1)
#define MODE_HT BIT(2)
#define MODE_VHT BIT(3)
#define MODE_HE BIT(4)
+#define MODE_EHT BIT(5)
#define STA_CAP_WMM BIT(0)
#define STA_CAP_SGI_20 BIT(4)
@@ -1171,6 +1176,7 @@ enum {
MCU_EXT_CMD_GET_MIB_INFO = 0x5a,
MCU_EXT_CMD_TXDPD_CAL = 0x60,
MCU_EXT_CMD_CAL_CACHE = 0x67,
+ MCU_EXT_CMD_RED_ENABLE = 0x68,
MCU_EXT_CMD_SET_RADAR_TH = 0x7c,
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
@@ -1198,7 +1204,8 @@ enum {
MCU_UNI_CMD_REPT_MUAR = 0x09,
MCU_UNI_CMD_WSYS_CONFIG = 0x0b,
MCU_UNI_CMD_REG_ACCESS = 0x0d,
- MCU_UNI_CMD_POWER_CREL = 0x0f,
+ MCU_UNI_CMD_CHIP_CONFIG = 0x0e,
+ MCU_UNI_CMD_POWER_CTRL = 0x0f,
MCU_UNI_CMD_RX_HDR_TRANS = 0x12,
MCU_UNI_CMD_SER = 0x13,
MCU_UNI_CMD_TWT = 0x14,
@@ -1238,6 +1245,7 @@ enum {
MCU_CE_CMD_TEST_CTRL = 0x01,
MCU_CE_CMD_START_HW_SCAN = 0x03,
MCU_CE_CMD_SET_PS_PROFILE = 0x05,
+ MCU_CE_CMD_SET_RX_FILTER = 0x0a,
MCU_CE_CMD_SET_CHAN_DOMAIN = 0x0f,
MCU_CE_CMD_SET_BSS_CONNECTED = 0x16,
MCU_CE_CMD_SET_BSS_ABORT = 0x17,
@@ -1730,7 +1738,7 @@ mt76_connac_mcu_gen_dl_mode(struct mt76_dev *dev, u8 feature_set, bool is_wa)
}
#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
-#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
+#define to_wcid_hi(id) FIELD_GET(GENMASK(10, 8), (u16)id)
static inline void
mt76_connac_mcu_get_wlan_idx(struct mt76_dev *dev, struct mt76_wcid *wcid,
@@ -1866,8 +1874,12 @@ void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
const struct ieee80211_sta_he_cap *
mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
+const struct ieee80211_sta_eht_cap *
+mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
enum nl80211_band band, struct ieee80211_sta *sta);
+u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
+ enum nl80211_band band);
int mt76_connac_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct mt76_connac_sta_key_conf *sta_key_conf,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 6c6c8ada7943..d543ef3de65b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -642,7 +642,12 @@ mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode,
if (tx_rate > 9)
return -EINVAL;
- *target_power = cur_power + dev->rate_power.vht[tx_rate];
+ *target_power = cur_power;
+ if (tx_rate > 7)
+ *target_power += dev->rate_power.vht[tx_rate - 8];
+ else
+ *target_power += dev->rate_power.ht[tx_rate];
+
*target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate);
break;
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
index 45502fd4693f..6dc1f51f5658 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c
@@ -148,6 +148,7 @@ static int mt76x0u_load_firmware(struct mt76x02_dev *dev)
mt76_wr(dev, MT_USB_DMA_CFG, val);
ret = mt76x0u_upload_firmware(dev, hdr);
+ mt76x02_set_ethtool_fwver(dev, hdr);
release_firmware(fw);
mt76_wr(dev, MT_FCE_PSE_CTRL, 1);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index 604ddcc21123..7451a63206a5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -87,10 +87,9 @@ static const struct ieee80211_iface_combination mt76x02u_if_comb[] = {
};
static void
-mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
- u8 delay_off)
+mt76x02_led_set_config(struct mt76_phy *mphy, u8 delay_on, u8 delay_off)
{
- struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev,
+ struct mt76x02_dev *dev = container_of(mphy->dev, struct mt76x02_dev,
mt76);
u32 val;
@@ -98,13 +97,13 @@ mt76x02_led_set_config(struct mt76_dev *mdev, u8 delay_on,
FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
FIELD_PREP(MT_LED_STATUS_ON, delay_on);
- mt76_wr(dev, MT_LED_S0(mdev->led_pin), val);
- mt76_wr(dev, MT_LED_S1(mdev->led_pin), val);
+ mt76_wr(dev, MT_LED_S0(mphy->leds.pin), val);
+ mt76_wr(dev, MT_LED_S1(mphy->leds.pin), val);
- val = MT_LED_CTRL_REPLAY(mdev->led_pin) |
- MT_LED_CTRL_KICK(mdev->led_pin);
- if (mdev->led_al)
- val |= MT_LED_CTRL_POLARITY(mdev->led_pin);
+ val = MT_LED_CTRL_REPLAY(mphy->leds.pin) |
+ MT_LED_CTRL_KICK(mphy->leds.pin);
+ if (mphy->leds.al)
+ val |= MT_LED_CTRL_POLARITY(mphy->leds.pin);
mt76_wr(dev, MT_LED_CTRL, val);
}
@@ -113,14 +112,14 @@ mt76x02_led_set_blink(struct led_classdev *led_cdev,
unsigned long *delay_on,
unsigned long *delay_off)
{
- struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
- led_cdev);
+ struct mt76_phy *mphy = container_of(led_cdev, struct mt76_phy,
+ leds.cdev);
u8 delta_on, delta_off;
delta_off = max_t(u8, *delay_off / 10, 1);
delta_on = max_t(u8, *delay_on / 10, 1);
- mt76x02_led_set_config(mdev, delta_on, delta_off);
+ mt76x02_led_set_config(mphy, delta_on, delta_off);
return 0;
}
@@ -129,13 +128,13 @@ static void
mt76x02_led_set_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
- struct mt76_dev *mdev = container_of(led_cdev, struct mt76_dev,
- led_cdev);
+ struct mt76_phy *mphy = container_of(led_cdev, struct mt76_phy,
+ leds.cdev);
if (!brightness)
- mt76x02_led_set_config(mdev, 0, 0xff);
+ mt76x02_led_set_config(mphy, 0, 0xff);
else
- mt76x02_led_set_config(mdev, 0xff, 0);
+ mt76x02_led_set_config(mphy, 0xff, 0);
}
int mt76x02_init_device(struct mt76x02_dev *dev)
@@ -167,9 +166,9 @@ int mt76x02_init_device(struct mt76x02_dev *dev)
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set =
+ dev->mphy.leds.cdev.brightness_set =
mt76x02_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt76x02_led_set_blink;
+ dev->mphy.leds.cdev.blink_set = mt76x02_led_set_blink;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index fb46c2c1784f..5a46813a59ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -811,7 +811,7 @@ mt7915_hw_queue_read(struct seq_file *s, u32 size,
if (val & BIT(map[i].index))
continue;
- ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
+ ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
head = mt76_get_field(dev, MT_FL_Q2_CTRL,
@@ -996,7 +996,7 @@ mt7915_rate_txpower_get(struct file *file, char __user *user_buf,
ret = mt7915_mcu_get_txpower_sku(phy, txpwr, sizeof(txpwr));
if (ret)
- return ret;
+ goto out;
/* Txpower propagation path: TMAC -> TXV -> BBP */
len += scnprintf(buf + len, sz - len,
@@ -1047,6 +1047,8 @@ mt7915_rate_txpower_get(struct file *file, char __user *user_buf,
mt76_get_field(dev, reg, MT_WF_PHY_TPC_POWER));
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+out:
kfree(buf);
return ret;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index e3fa064918bf..abe17dac9996 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -559,9 +559,32 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
return 0;
}
+static void mt7915_dma_wed_reset(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+
+ if (!test_bit(MT76_STATE_WED_RESET, &dev->mphy.state))
+ return;
+
+ complete(&mdev->mmio.wed_reset);
+
+ if (!wait_for_completion_timeout(&dev->mt76.mmio.wed_reset_complete,
+ 3 * HZ))
+ dev_err(dev->mt76.dev, "wed reset complete timeout\n");
+}
+
+static void
+mt7915_dma_reset_tx_queue(struct mt7915_dev *dev, struct mt76_queue *q)
+{
+ mt76_queue_reset(dev, q);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt76_dma_wed_setup(&dev->mt76, q, true);
+}
+
int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
{
struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
int i;
/* clean up hw queues */
@@ -581,28 +604,40 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
if (force)
mt7915_wfsys_reset(dev);
+ if (mtk_wed_device_active(wed))
+ mtk_wed_device_dma_reset(wed);
+
mt7915_dma_disable(dev, force);
+ mt7915_dma_wed_reset(dev);
/* reset hw queues */
for (i = 0; i < __MT_TXQ_MAX; i++) {
- mt76_queue_reset(dev, dev->mphy.q_tx[i]);
+ mt7915_dma_reset_tx_queue(dev, dev->mphy.q_tx[i]);
if (mphy_ext)
- mt76_queue_reset(dev, mphy_ext->q_tx[i]);
+ mt7915_dma_reset_tx_queue(dev, mphy_ext->q_tx[i]);
}
for (i = 0; i < __MT_MCUQ_MAX; i++)
mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
- mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (dev->mt76.q_rx[i].flags == MT_WED_Q_TXFREE)
+ continue;
+
mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ }
mt76_tx_status_check(&dev->mt76, true);
- mt7915_dma_enable(dev);
-
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_reset(dev, i);
+ if (mtk_wed_device_active(wed) && is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
+ MT_WFDMA0_EXT0_RXWB_KEEP);
+
+ mt7915_dma_enable(dev);
+
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 59069fb86414..a79628933948 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -33,11 +33,14 @@ static int mt7915_check_eeprom(struct mt7915_dev *dev)
u8 *eeprom = dev->mt76.eeprom.data;
u16 val = get_unaligned_le16(eeprom);
+#define CHECK_EEPROM_ERR(match) (match ? 0 : -EINVAL)
switch (val) {
case 0x7915:
+ return CHECK_EEPROM_ERR(is_mt7915(&dev->mt76));
case 0x7916:
+ return CHECK_EEPROM_ERR(is_mt7916(&dev->mt76));
case 0x7986:
- return 0;
+ return CHECK_EEPROM_ERR(is_mt7986(&dev->mt76));
default:
return -EINVAL;
}
@@ -110,18 +113,23 @@ static int mt7915_eeprom_load(struct mt7915_dev *dev)
} else {
u8 free_block_num;
u32 block_num, i;
+ u32 eeprom_blk_size = MT7915_EEPROM_BLOCK_SIZE;
+
+ ret = mt7915_mcu_get_eeprom_free_block(dev, &free_block_num);
+ if (ret < 0)
+ return ret;
- mt7915_mcu_get_eeprom_free_block(dev, &free_block_num);
- /* efuse info not enough */
+ /* efuse info isn't enough */
if (free_block_num >= 29)
return -EINVAL;
/* read eeprom data from efuse */
- block_num = DIV_ROUND_UP(eeprom_size,
- MT7915_EEPROM_BLOCK_SIZE);
- for (i = 0; i < block_num; i++)
- mt7915_mcu_get_eeprom(dev,
- i * MT7915_EEPROM_BLOCK_SIZE);
+ block_num = DIV_ROUND_UP(eeprom_size, eeprom_blk_size);
+ for (i = 0; i < block_num; i++) {
+ ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size);
+ if (ret < 0)
+ return ret;
+ }
}
return mt7915_check_eeprom(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index c810c31fbd6e..1ab768feccaa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -38,8 +38,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_160) |
- BIT(NL80211_CHAN_WIDTH_80P80),
+ BIT(NL80211_CHAN_WIDTH_160),
}
};
@@ -83,9 +82,23 @@ static ssize_t mt7915_thermal_temp_store(struct device *dev,
mutex_lock(&phy->dev->mt76.mutex);
val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 60, 130);
+
+ if ((i - 1 == MT7915_CRIT_TEMP_IDX &&
+ val > phy->throttle_temp[MT7915_MAX_TEMP_IDX]) ||
+ (i - 1 == MT7915_MAX_TEMP_IDX &&
+ val < phy->throttle_temp[MT7915_CRIT_TEMP_IDX])) {
+ dev_err(phy->dev->mt76.dev,
+ "temp1_max shall be greater than temp1_crit.");
+ return -EINVAL;
+ }
+
phy->throttle_temp[i - 1] = val;
mutex_unlock(&phy->dev->mt76.mutex);
+ ret = mt7915_mcu_set_thermal_protect(phy);
+ if (ret)
+ return ret;
+
return count;
}
@@ -131,11 +144,11 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
u8 throttling = MT7915_THERMAL_THROTTLE_MAX - state;
int ret;
- if (state > MT7915_CDEV_THROTTLE_MAX)
+ if (state > MT7915_CDEV_THROTTLE_MAX) {
+ dev_err(phy->dev->mt76.dev,
+ "please specify a valid throttling state\n");
return -EINVAL;
-
- if (phy->throttle_temp[0] > phy->throttle_temp[1])
- return 0;
+ }
if (state == phy->cdev_state)
return 0;
@@ -164,7 +177,7 @@ static void mt7915_unregister_thermal(struct mt7915_phy *phy)
struct wiphy *wiphy = phy->mt76->hw->wiphy;
if (!phy->cdev)
- return;
+ return;
sysfs_remove_link(&wiphy->dev.kobj, "cooling_device");
thermal_cooling_device_unregister(phy->cdev);
@@ -198,41 +211,41 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
return PTR_ERR(hwmon);
/* initialize critical/maximum high temperature */
- phy->throttle_temp[0] = 110;
- phy->throttle_temp[1] = 120;
+ phy->throttle_temp[MT7915_CRIT_TEMP_IDX] = MT7915_CRIT_TEMP;
+ phy->throttle_temp[MT7915_MAX_TEMP_IDX] = MT7915_MAX_TEMP;
- return mt7915_mcu_set_thermal_throttling(phy,
- MT7915_THERMAL_THROTTLE_MAX);
+ return 0;
}
static void mt7915_led_set_config(struct led_classdev *led_cdev,
u8 delay_on, u8 delay_off)
{
struct mt7915_dev *dev;
- struct mt76_dev *mt76;
+ struct mt76_phy *mphy;
u32 val;
- mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
- dev = container_of(mt76, struct mt7915_dev, mt76);
+ mphy = container_of(led_cdev, struct mt76_phy, leds.cdev);
+ dev = container_of(mphy->dev, struct mt7915_dev, mt76);
- /* select TX blink mode, 2: only data frames */
- mt76_rmw_field(dev, MT_TMAC_TCR0(0), MT_TMAC_TCR0_TX_BLINK, 2);
+ /* set PWM mode */
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+ mt76_wr(dev, MT_LED_STATUS_0(mphy->band_idx), val);
+ mt76_wr(dev, MT_LED_STATUS_1(mphy->band_idx), val);
/* enable LED */
- mt76_wr(dev, MT_LED_EN(0), 1);
-
- /* set LED Tx blink on/off time */
- val = FIELD_PREP(MT_LED_TX_BLINK_ON_MASK, delay_on) |
- FIELD_PREP(MT_LED_TX_BLINK_OFF_MASK, delay_off);
- mt76_wr(dev, MT_LED_TX_BLINK(0), val);
+ mt76_wr(dev, MT_LED_EN(mphy->band_idx), 1);
/* control LED */
- val = MT_LED_CTRL_BLINK_MODE | MT_LED_CTRL_KICK;
- if (dev->mt76.led_al)
+ val = MT_LED_CTRL_KICK;
+ if (dev->mphy.leds.al)
val |= MT_LED_CTRL_POLARITY;
+ if (mphy->band_idx)
+ val |= MT_LED_CTRL_BAND;
- mt76_wr(dev, MT_LED_CTRL(0), val);
- mt76_clear(dev, MT_LED_CTRL(0), MT_LED_CTRL_KICK);
+ mt76_wr(dev, MT_LED_CTRL(mphy->band_idx), val);
+ mt76_clear(dev, MT_LED_CTRL(mphy->band_idx), MT_LED_CTRL_KICK);
}
static int mt7915_led_set_blink(struct led_classdev *led_cdev,
@@ -319,9 +332,10 @@ mt7915_regd_notifier(struct wiphy *wiphy,
}
static void
-mt7915_init_wiphy(struct ieee80211_hw *hw)
+mt7915_init_wiphy(struct mt7915_phy *phy)
{
- struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
struct mt76_dev *mdev = &phy->dev->mt76;
struct wiphy *wiphy = hw->wiphy;
struct mt7915_dev *dev = phy->dev;
@@ -392,11 +406,6 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
phy->mt76->sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
-
- if (!dev->dbdc_support)
- phy->mt76->sband_5g.sband.vht_cap.cap |=
- IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
} else {
phy->mt76->sband_5g.sband.vht_cap.cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
@@ -415,6 +424,12 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy->available_antennas_rx = phy->mt76->antenna_mask;
wiphy->available_antennas_tx = phy->mt76->antenna_mask;
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ mphy->leds.cdev.brightness_set = mt7915_led_set_brightness;
+ mphy->leds.cdev.blink_set = mt7915_led_set_blink;
+ }
}
static void
@@ -473,6 +488,72 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set);
}
+static void
+mt7915_init_led_mux(struct mt7915_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MT76_LEDS))
+ return;
+
+ if (dev->dbdc_support) {
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX2,
+ GENMASK(11, 8), 4);
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX3,
+ GENMASK(11, 8), 4);
+ break;
+ case 0x7986:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(7, 4), 1);
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(11, 8), 1);
+ break;
+ case 0x7916:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(27, 24), 3);
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(31, 28), 3);
+ break;
+ default:
+ break;
+ }
+ } else if (dev->mphy.leds.pin) {
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX3,
+ GENMASK(11, 8), 4);
+ break;
+ case 0x7986:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(11, 8), 1);
+ break;
+ case 0x7916:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(31, 28), 3);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX2,
+ GENMASK(11, 8), 4);
+ break;
+ case 0x7986:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(7, 4), 1);
+ break;
+ case 0x7916:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(27, 24), 3);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
void mt7915_mac_init(struct mt7915_dev *dev)
{
int i;
@@ -497,10 +578,7 @@ void mt7915_mac_init(struct mt7915_dev *dev)
for (i = 0; i < 2; i++)
mt7915_mac_init_band(dev, i);
- if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- i = dev->mt76.led_pin ? MT_LED_GPIO_MUX3 : MT_LED_GPIO_MUX2;
- mt76_rmw_field(dev, i, MT_LED_GPIO_SEL_MASK, 4);
- }
+ mt7915_init_led_mux(dev);
}
int mt7915_txbf_init(struct mt7915_dev *dev)
@@ -569,7 +647,7 @@ mt7915_register_ext_phy(struct mt7915_dev *dev, struct mt7915_phy *phy)
mt76_eeprom_override(mphy);
/* init wiphy according to mphy and phy */
- mt7915_init_wiphy(mphy->hw);
+ mt7915_init_wiphy(phy);
ret = mt76_register_phy(mphy, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
@@ -763,13 +841,9 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
int sts = hweight8(phy->mt76->chainmask);
u8 c, sts_160 = sts;
- /* Can do 1/2 of STS in 160Mhz mode for mt7915 */
- if (is_mt7915(&dev->mt76)) {
- if (!dev->dbdc_support)
- sts_160 /= 2;
- else
- sts_160 = 0;
- }
+ /* mt7915 doesn't support bw160 */
+ if (is_mt7915(&dev->mt76))
+ sts_160 = 0;
#ifdef CONFIG_MAC80211_MESH
if (vif == NL80211_IFTYPE_MESH_POINT)
@@ -823,9 +897,6 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
- /* num_snd_dim
- * for mt7915, max supported sts is 2 for bw > 80MHz and 0 if dbdc
- */
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1);
if (sts_160)
@@ -873,15 +944,10 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
u16 mcs_map = 0;
u16 mcs_map_160 = 0;
- u8 nss_160;
+ u8 nss_160 = nss;
- if (!is_mt7915(&dev->mt76))
- nss_160 = nss;
- else if (!dev->dbdc_support)
- /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
- nss_160 = nss / 2;
- else
- /* Can't do 160MHz with mt7915 dbdc */
+ /* Can't do 160MHz with mt7915 */
+ if (is_mt7915(&dev->mt76))
nss_160 = 0;
for (i = 0; i < 8; i++) {
@@ -931,8 +997,7 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
else if (nss_160)
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
else
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
@@ -1004,12 +1069,11 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
break;
}
+ memset(he_mcs, 0, sizeof(*he_mcs));
he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map_160);
he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map_160);
- he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map_160);
- he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map_160);
mt7915_set_stream_he_txbf_caps(phy, he_cap, i);
@@ -1101,10 +1165,8 @@ static void mt7915_stop_hardware(struct mt7915_dev *dev)
mt7986_wmac_disable(dev);
}
-
int mt7915_register_device(struct mt7915_dev *dev)
{
- struct ieee80211_hw *hw = mt76_hw(dev);
struct mt7915_phy *phy2;
int ret;
@@ -1133,18 +1195,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
if (ret)
goto free_phy2;
- mt7915_init_wiphy(hw);
+ mt7915_init_wiphy(&dev->phy);
#ifdef CONFIG_NL80211_TESTMODE
dev->mt76.test_ops = &mt7915_testmode_ops;
#endif
- /* init led callbacks */
- if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set = mt7915_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt7915_led_set_blink;
- }
-
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index f0d5a3603902..97ca55d283fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -256,8 +256,7 @@ mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
if (!msta || !msta->vif)
return;
- if (!(q->flags & MT_QFLAG_WED) ||
- FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) != MT76_WED_Q_RX)
+ if (!mt76_queue_is_wed_rx(q))
return;
if (!(info & MT_DMA_INFO_PPE_VLD))
@@ -1061,9 +1060,6 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
u16 wcidx;
u8 pid;
- if (le32_get_bits(txs_data[0], MT_TXS0_TXS_FORMAT) > 1)
- return;
-
wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
@@ -1582,6 +1578,12 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
return;
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ mtk_wed_device_stop(&dev->mt76.mmio.wed);
+ if (!is_mt7986(&dev->mt76))
+ mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
+ }
+
ieee80211_stop_queues(mt76_hw(dev));
if (ext_phy)
ieee80211_stop_queues(ext_phy->hw);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 0511d6a505b0..3bbccbdfc5eb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -57,6 +57,17 @@ int mt7915_run(struct ieee80211_hw *hw)
mt7915_mac_enable_nf(dev, phy->mt76->band_idx);
}
+ ret = mt7915_mcu_set_thermal_throttling(phy,
+ MT7915_THERMAL_THROTTLE_MAX);
+
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_thermal_protect(phy);
+
+ if (ret)
+ goto out;
+
ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
phy->mt76->band_idx);
if (ret)
@@ -1280,19 +1291,22 @@ void mt7915_get_et_strings(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u32 sset, u8 *data)
{
- if (sset == ETH_SS_STATS)
- memcpy(data, *mt7915_gstrings_stats,
- sizeof(mt7915_gstrings_stats));
+ if (sset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, *mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
+ data += sizeof(mt7915_gstrings_stats);
+ page_pool_ethtool_stats_get_strings(data);
}
static
int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int sset)
{
- if (sset == ETH_SS_STATS)
- return MT7915_SSTATS_LEN;
+ if (sset != ETH_SS_STATS)
+ return 0;
- return 0;
+ return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
}
static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
@@ -1303,7 +1317,7 @@ static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->wcid.stats);
+ mt76_ethtool_worker(wi, &msta->wcid.stats, false);
}
static
@@ -1320,7 +1334,7 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
};
struct mib_stats *mib = &phy->mib;
/* See mt7915_ampdu_stat_read_phy, etc */
- int i, ei = 0;
+ int i, ei = 0, stats_size;
mutex_lock(&dev->mt76.mutex);
@@ -1401,9 +1415,12 @@ void mt7915_get_et_stats(struct ieee80211_hw *hw,
return;
ei += wi.worker_stat_count;
- if (ei != MT7915_SSTATS_LEN)
- dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
- ei, (int)MT7915_SSTATS_LEN);
+
+ mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
+
+ stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
+ if (ei != stats_size)
+ dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
}
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index b2652de082ba..5545a8bdf1d0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -232,8 +232,11 @@ mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
c = (struct mt7915_mcu_csa_notify *)skb->data;
+ if (c->band_idx > MT_BAND1)
+ return;
+
if ((c->band_idx && !dev->phy.mt76->band_idx) &&
- dev->mt76.phys[MT_BAND1])
+ dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@@ -252,8 +255,11 @@ mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
return;
+ if (t->ctrl.band_idx > MT_BAND1)
+ return;
+
if ((t->ctrl.band_idx && !dev->phy.mt76->band_idx) &&
- dev->mt76.phys[MT_BAND1])
+ dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
phy = (struct mt7915_phy *)mphy->priv;
@@ -268,8 +274,11 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
r = (struct mt7915_mcu_rdd_report *)skb->data;
+ if (r->band_idx > MT_BAND1)
+ return;
+
if ((r->band_idx && !dev->phy.mt76->band_idx) &&
- dev->mt76.phys[MT_BAND1])
+ dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
if (r->band_idx == MT_RX_SEL2)
@@ -326,7 +335,11 @@ mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb)
b = (struct mt7915_mcu_bcc_notify *)skb->data;
- if ((b->band_idx && !dev->phy.mt76->band_idx) && dev->mt76.phys[MT_BAND1])
+ if (b->band_idx > MT_BAND1)
+ return;
+
+ if ((b->band_idx && !dev->phy.mt76->band_idx) &&
+ dev->mt76.phys[MT_BAND1])
mphy = dev->mt76.phys[MT_BAND1];
ieee80211_iterate_active_interfaces_atomic(mphy->hw,
@@ -2104,7 +2117,7 @@ static int mt7915_load_firmware(struct mt7915_dev *dev)
/* make sure fw is download state */
if (mt7915_firmware_state(dev, false)) {
/* restart firmware once */
- __mt76_mcu_restart(&dev->mt76);
+ mt76_connac_mcu_restart(&dev->mt76);
ret = mt7915_firmware_state(dev, false);
if (ret) {
dev_err(dev->mt76.dev,
@@ -2278,6 +2291,53 @@ mt7915_mcu_init_rx_airtime(struct mt7915_dev *dev)
sizeof(req), true);
}
+static int mt7915_red_set_watermark(struct mt7915_dev *dev)
+{
+#define RED_GLOBAL_TOKEN_WATERMARK 2
+ struct {
+ __le32 args[3];
+ u8 cmd;
+ u8 version;
+ u8 __rsv1[4];
+ __le16 len;
+ __le16 high_mark;
+ __le16 low_mark;
+ u8 __rsv2[12];
+ } __packed req = {
+ .args[0] = cpu_to_le32(MCU_WA_PARAM_RED_SETTING),
+ .cmd = RED_GLOBAL_TOKEN_WATERMARK,
+ .len = cpu_to_le16(sizeof(req) - sizeof(req.args)),
+ .high_mark = cpu_to_le16(MT7915_HW_TOKEN_SIZE - 256),
+ .low_mark = cpu_to_le16(MT7915_HW_TOKEN_SIZE - 256 - 1536),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_PARAM_CMD(SET), &req,
+ sizeof(req), false);
+}
+
+static int mt7915_mcu_set_red(struct mt7915_dev *dev, bool enabled)
+{
+#define RED_DISABLE 0
+#define RED_BY_WA_ENABLE 2
+ int ret;
+ u32 red_type = enabled ? RED_BY_WA_ENABLE : RED_DISABLE;
+ __le32 req = cpu_to_le32(red_type);
+
+ if (enabled) {
+ ret = mt7915_red_set_watermark(dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RED_ENABLE), &req,
+ sizeof(req), false);
+ if (ret < 0)
+ return ret;
+
+ return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
+ MCU_WA_PARAM_RED, enabled, 0);
+}
+
int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
{
int ret;
@@ -2326,8 +2386,7 @@ int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
if (ret)
return ret;
- return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
- MCU_WA_PARAM_RED, 0, 0);
+ return mt7915_mcu_set_red(dev, mtk_wed_device_active(&dev->mt76.mmio.wed));
}
int mt7915_mcu_init(struct mt7915_dev *dev)
@@ -2336,7 +2395,6 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
.headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_send_msg = mt7915_mcu_send_message,
.mcu_parse_response = mt7915_mcu_parse_response,
- .mcu_restart = mt76_connac_mcu_restart,
};
dev->mt76.mcu_ops = &mt7915_mcu_ops;
@@ -2346,16 +2404,17 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
void mt7915_mcu_exit(struct mt7915_dev *dev)
{
- __mt76_mcu_restart(&dev->mt76);
+ mt76_connac_mcu_restart(&dev->mt76);
if (mt7915_firmware_state(dev, false)) {
dev_err(dev->mt76.dev, "Failed to exit mcu\n");
- return;
+ goto out;
}
mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
if (dev->hif2)
mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
MT_TOP_LPCR_HOST_FW_OWN);
+out:
skb_queue_purge(&dev->mt76.mcu.res_q);
}
@@ -2792,8 +2851,9 @@ int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
int ret;
u8 *buf;
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_ACCESS), &req,
- sizeof(req), true, &skb);
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_EXT_QUERY(EFUSE_ACCESS),
+ &req, sizeof(req), true, &skb);
if (ret)
return ret;
@@ -2818,8 +2878,9 @@ int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num)
struct sk_buff *skb;
int ret;
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(EFUSE_FREE_BLOCK), &req,
- sizeof(req), true, &skb);
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_EXT_QUERY(EFUSE_FREE_BLOCK),
+ &req, sizeof(req), true, &skb);
if (ret)
return ret;
@@ -2974,38 +3035,42 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
{
- /* strict order */
- static const u32 offs[] = {
- MIB_NON_WIFI_TIME,
- MIB_TX_TIME,
- MIB_RX_TIME,
- MIB_OBSS_AIRTIME,
- MIB_TXOP_INIT_COUNT,
- /* v2 */
- MIB_NON_WIFI_TIME_V2,
- MIB_TX_TIME_V2,
- MIB_RX_TIME_V2,
- MIB_OBSS_AIRTIME_V2
- };
struct mt76_channel_state *state = phy->mt76->chan_state;
struct mt76_channel_state *state_ts = &phy->state_ts;
struct mt7915_dev *dev = phy->dev;
struct mt7915_mcu_mib *res, req[5];
struct sk_buff *skb;
- int i, ret, start = 0, ofs = 20;
+ static const u32 *offs;
+ int i, ret, len, offs_cc;
u64 cc_tx;
- if (!is_mt7915(&dev->mt76)) {
- start = 5;
- ofs = 0;
+ /* strict order */
+ if (is_mt7915(&dev->mt76)) {
+ static const u32 chip_offs[] = {
+ MIB_NON_WIFI_TIME,
+ MIB_TX_TIME,
+ MIB_RX_TIME,
+ MIB_OBSS_AIRTIME,
+ MIB_TXOP_INIT_COUNT,
+ };
+ len = ARRAY_SIZE(chip_offs);
+ offs = chip_offs;
+ offs_cc = 20;
+ } else {
+ static const u32 chip_offs[] = {
+ MIB_NON_WIFI_TIME_V2,
+ MIB_TX_TIME_V2,
+ MIB_RX_TIME_V2,
+ MIB_OBSS_AIRTIME_V2
+ };
+ len = ARRAY_SIZE(chip_offs);
+ offs = chip_offs;
+ offs_cc = 0;
}
- for (i = 0; i < 5; i++) {
+ for (i = 0; i < len; i++) {
req[i].band = cpu_to_le32(phy->mt76->band_idx);
- req[i].offs = cpu_to_le32(offs[i + start]);
-
- if (!is_mt7915(&dev->mt76) && i == 3)
- break;
+ req[i].offs = cpu_to_le32(offs[i]);
}
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
@@ -3013,7 +3078,7 @@ int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
if (ret)
return ret;
- res = (struct mt7915_mcu_mib *)(skb->data + ofs);
+ res = (struct mt7915_mcu_mib *)(skb->data + offs_cc);
#define __res_u64(s) le64_to_cpu(res[s].data)
/* subtract Tx backoff time from Tx duration */
@@ -3060,6 +3125,29 @@ int mt7915_mcu_get_temperature(struct mt7915_phy *phy)
int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
{
struct mt7915_dev *dev = phy->dev;
+ struct mt7915_mcu_thermal_ctrl req = {
+ .band_idx = phy->mt76->band_idx,
+ .ctrl_id = THERMAL_PROTECT_DUTY_CONFIG,
+ };
+ int level, ret;
+
+ /* set duty cycle and level */
+ for (level = 0; level < 4; level++) {
+ req.duty.duty_level = level;
+ req.duty.duty_cycle = state;
+ state /= 2;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+ &req, sizeof(req), false);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int mt7915_mcu_set_thermal_protect(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
struct {
struct mt7915_mcu_thermal_ctrl ctrl;
@@ -3070,29 +3158,18 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
} __packed req = {
.ctrl = {
.band_idx = phy->mt76->band_idx,
+ .type.protect_type = 1,
+ .type.trigger_type = 1,
},
};
- int level;
-
- if (!state) {
- req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE;
- goto out;
- }
-
- /* set duty cycle and level */
- for (level = 0; level < 4; level++) {
- int ret;
+ int ret;
- req.ctrl.ctrl_id = THERMAL_PROTECT_DUTY_CONFIG;
- req.ctrl.duty.duty_level = level;
- req.ctrl.duty.duty_cycle = state;
- state /= 2;
+ req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE;
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+ &req, sizeof(req.ctrl), false);
- ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
- &req, sizeof(req.ctrl), false);
- if (ret)
- return ret;
- }
+ if (ret)
+ return ret;
/* set high-temperature trigger threshold */
req.ctrl.ctrl_id = THERMAL_PROTECT_ENABLE;
@@ -3101,10 +3178,6 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
req.trigger_temp = cpu_to_le32(phy->throttle_temp[1]);
req.sustain_time = cpu_to_le16(10);
-out:
- req.ctrl.type.protect_type = 1;
- req.ctrl.type.trigger_type = 1;
-
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
&req, sizeof(req), false);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index 29b5434bfdb8..b9ea297f382c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -278,6 +278,7 @@ enum {
MCU_WA_PARAM_PDMA_RX = 0x04,
MCU_WA_PARAM_CPU_UTIL = 0x0b,
MCU_WA_PARAM_RED = 0x0e,
+ MCU_WA_PARAM_RED_SETTING = 0x40,
};
enum mcu_mmps_mode {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index 8388e2a65853..225a19604d3e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -4,10 +4,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
#include <linux/pci.h>
#include "mt7915.h"
#include "mac.h"
+#include "mcu.h"
#include "../trace.h"
#include "../dma.h"
@@ -495,7 +497,7 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
if (dev_is_pci(dev->mt76.dev) &&
((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
- (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
+ addr >= MT_CBTOP2_PHY_START))
return mt7915_reg_map_l1(dev, addr);
/* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
@@ -594,7 +596,6 @@ static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
{
struct mt7915_dev *dev;
- struct page *page;
int i;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
@@ -605,58 +606,50 @@ static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
if (!t || !t->ptr)
continue;
- dma_unmap_single(dev->mt76.dma_dev, t->dma_addr,
- wed->wlan.rx_size, DMA_FROM_DEVICE);
- skb_free_frag(t->ptr);
+ mt76_put_page_pool_buf(t->ptr, false);
t->ptr = NULL;
mt76_put_rxwi(&dev->mt76, t);
}
- if (!wed->rx_buf_ring.rx_page.va)
- return;
-
- page = virt_to_page(wed->rx_buf_ring.rx_page.va);
- __page_frag_cache_drain(page, wed->rx_buf_ring.rx_page.pagecnt_bias);
- memset(&wed->rx_buf_ring.rx_page, 0, sizeof(wed->rx_buf_ring.rx_page));
+ mt76_free_pending_rxwi(&dev->mt76);
}
static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
{
struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
+ struct mt76_txwi_cache *t = NULL;
struct mt7915_dev *dev;
- u32 length;
- int i;
+ struct mt76_queue *q;
+ int i, len;
dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
- length = SKB_DATA_ALIGN(NET_SKB_PAD + wed->wlan.rx_size +
- sizeof(struct skb_shared_info));
+ q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+ len = SKB_WITH_OVERHEAD(q->buf_size);
for (i = 0; i < size; i++) {
- struct mt76_txwi_cache *t = mt76_get_rxwi(&dev->mt76);
- dma_addr_t phy_addr;
+ enum dma_data_direction dir;
+ dma_addr_t addr;
+ u32 offset;
int token;
- void *ptr;
+ void *buf;
- ptr = page_frag_alloc(&wed->rx_buf_ring.rx_page, length,
- GFP_KERNEL);
- if (!ptr)
+ t = mt76_get_rxwi(&dev->mt76);
+ if (!t)
goto unmap;
- phy_addr = dma_map_single(dev->mt76.dma_dev, ptr,
- wed->wlan.rx_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(dev->mt76.dev, phy_addr))) {
- skb_free_frag(ptr);
+ buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
+ if (!buf)
goto unmap;
- }
- desc->buf0 = cpu_to_le32(phy_addr);
- token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
+ addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+ dir = page_pool_get_dma_dir(q->page_pool);
+ dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
+
+ desc->buf0 = cpu_to_le32(addr);
+ token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
if (token < 0) {
- dma_unmap_single(dev->mt76.dma_dev, phy_addr,
- wed->wlan.rx_size, DMA_TO_DEVICE);
- skb_free_frag(ptr);
+ mt76_put_page_pool_buf(buf, false);
goto unmap;
}
@@ -668,6 +661,8 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
return 0;
unmap:
+ if (t)
+ mt76_put_rxwi(&dev->mt76, t);
mt7915_mmio_wed_release_rx_buf(wed);
return -ENOMEM;
}
@@ -696,6 +691,42 @@ static void mt7915_mmio_wed_update_rx_stats(struct mtk_wed_device *wed,
rcu_read_unlock();
}
+
+static int mt7915_mmio_wed_reset(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *mdev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt76_phy *mphy = &dev->mphy;
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (test_and_set_bit(MT76_STATE_WED_RESET, &mphy->state))
+ return -EBUSY;
+
+ ret = mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L1,
+ mphy->band_idx);
+ if (ret)
+ goto out;
+
+ rtnl_unlock();
+ if (!wait_for_completion_timeout(&mdev->mmio.wed_reset, 20 * HZ)) {
+ dev_err(mdev->dev, "wed reset timeout\n");
+ ret = -ETIMEDOUT;
+ }
+ rtnl_lock();
+out:
+ clear_bit(MT76_STATE_WED_RESET, &mphy->state);
+
+ return ret;
+}
+
+static void mt7915_mmio_wed_reset_complete(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ complete(&dev->mmio.wed_reset_complete);
+}
#endif
int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
@@ -751,7 +782,7 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
wed->wlan.wpdma_rx_glo = res->start + MT_WPDMA_GLO_CFG;
wed->wlan.wpdma_rx = res->start + MT_RXQ_WED_DATA_RING_BASE;
}
- wed->wlan.nbuf = 4096;
+ wed->wlan.nbuf = MT7915_HW_TOKEN_SIZE;
wed->wlan.tx_tbit[0] = is_mt7915(&dev->mt76) ? 4 : 30;
wed->wlan.tx_tbit[1] = is_mt7915(&dev->mt76) ? 5 : 31;
wed->wlan.txfree_tbit = is_mt7986(&dev->mt76) ? 2 : 1;
@@ -778,6 +809,8 @@ int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
wed->wlan.init_rx_buf = mt7915_mmio_wed_init_rx_buf;
wed->wlan.release_rx_buf = mt7915_mmio_wed_release_rx_buf;
wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
+ wed->wlan.reset = mt7915_mmio_wed_reset;
+ wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete;
dev->mt76.rx_token_size = wed->wlan.rx_npkt;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 6351feba6bdf..3cbfb9b6a305 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -53,6 +53,7 @@
#define MT7916_EEPROM_SIZE 4096
#define MT7915_EEPROM_BLOCK_SIZE 16
+#define MT7915_HW_TOKEN_SIZE 4096
#define MT7915_TOKEN_SIZE 8192
#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
@@ -70,6 +71,11 @@
#define MT7915_WED_RX_TOKEN_SIZE 12288
+#define MT7915_CRIT_TEMP_IDX 0
+#define MT7915_MAX_TEMP_IDX 1
+#define MT7915_CRIT_TEMP 110
+#define MT7915_MAX_TEMP 120
+
struct mt7915_vif;
struct mt7915_sta;
struct mt7915_dfs_pulse;
@@ -543,6 +549,7 @@ int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy);
int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch);
int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
+int mt7915_mcu_set_thermal_protect(struct mt7915_phy *phy);
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index aca1b2f1e9e3..c8e478a55081 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -803,7 +803,6 @@ enum offs_rev {
#define MT_CBTOP1_PHY_START 0x70000000
#define MT_CBTOP1_PHY_END __REG(CBTOP1_PHY_END)
#define MT_CBTOP2_PHY_START 0xf0000000
-#define MT_CBTOP2_PHY_END 0xffffffff
#define MT_INFRA_MCU_START 0x7c000000
#define MT_INFRA_MCU_END __REG(INFRA_MCU_ADDR_END)
#define MT_CONN_INFRA_OFFSET(p) ((p) - MT_INFRA_BASE)
@@ -1055,6 +1054,7 @@ enum offs_rev {
#define MT_LED_CTRL(_n) MT_LED_PHYS(0x00 + ((_n) * 4))
#define MT_LED_CTRL_KICK BIT(7)
+#define MT_LED_CTRL_BAND BIT(4)
#define MT_LED_CTRL_BLINK_MODE BIT(2)
#define MT_LED_CTRL_POLARITY BIT(1)
@@ -1062,11 +1062,18 @@ enum offs_rev {
#define MT_LED_TX_BLINK_ON_MASK GENMASK(7, 0)
#define MT_LED_TX_BLINK_OFF_MASK GENMASK(15, 8)
+#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x20 + ((_n) * 8))
+#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x24 + ((_n) * 8))
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 0)
+
#define MT_LED_EN(_n) MT_LED_PHYS(0x40 + ((_n) * 4))
+#define MT_LED_GPIO_MUX0 0x70005050 /* GPIO 1 and GPIO 2 */
+#define MT_LED_GPIO_MUX1 0x70005054 /* GPIO 14 and 15 */
#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
-#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
-#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
+#define MT_LED_GPIO_MUX3 0x7000505c /* GPIO 26 */
/* MT TOP */
#define MT_TOP_BASE 0x18060000
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
index c06c56a0270d..2ac0a0f2859c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -278,6 +278,7 @@ static int mt7986_wmac_coninfra_setup(struct mt7915_dev *dev)
return -EINVAL;
rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
if (!rmem)
return -EINVAL;
@@ -882,6 +883,8 @@ static int mt7986_wmac_wm_enable(struct mt7915_dev *dev, bool enable)
{
u32 cur;
+ mt76_wr(dev, MT_CONNINFRA_SKU_DEC_ADDR, 0);
+
mt76_rmw_field(dev, MT7986_TOP_WM_RESET,
MT7986_TOP_WM_RESET_MASK, enable);
if (!enable)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
index 47e034a9b003..48dd0decac5d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.c
@@ -33,14 +33,17 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
sar_root->package.elements[0].type != ACPI_TYPE_INTEGER) {
dev_err(mdev->dev, "sar cnt = %d\n",
sar_root->package.count);
+ ret = -EINVAL;
goto free;
}
if (!*tbl) {
*tbl = devm_kzalloc(mdev->dev, sar_root->package.count,
GFP_KERNEL);
- if (!*tbl)
+ if (!*tbl) {
+ ret = -ENOMEM;
goto free;
+ }
}
if (len)
*len = sar_root->package.count;
@@ -52,9 +55,9 @@ mt7921_acpi_read(struct mt7921_dev *dev, u8 *method, u8 **tbl, u32 *len)
break;
*(*tbl + i) = (u8)sar_unit->integer.value;
}
-free:
ret = (i == sar_root->package.count) ? 0 : -EINVAL;
+free:
kfree(sar_root);
return ret;
@@ -135,6 +138,22 @@ mt7921_asar_acpi_read_mtgs(struct mt7921_dev *dev, u8 **table, u8 version)
return ret;
}
+/* MTFG : Flag Table */
+static int
+mt7921_asar_acpi_read_mtfg(struct mt7921_dev *dev, u8 **table)
+{
+ int len, ret;
+
+ ret = mt7921_acpi_read(dev, MT7921_ACPI_MTFG, table, &len);
+ if (ret)
+ return ret;
+
+ if (len < MT7921_ASAR_MIN_FG)
+ ret = -EINVAL;
+
+ return ret;
+}
+
int mt7921_init_acpi_sar(struct mt7921_dev *dev)
{
struct mt7921_acpi_sar *asar;
@@ -162,6 +181,12 @@ int mt7921_init_acpi_sar(struct mt7921_dev *dev)
asar->geo = NULL;
}
+ /* MTFG is optional */
+ ret = mt7921_asar_acpi_read_mtfg(dev, (u8 **)&asar->fg);
+ if (ret) {
+ devm_kfree(dev->mt76.dev, asar->fg);
+ asar->fg = NULL;
+ }
dev->phy.acpisar = asar;
return 0;
@@ -280,3 +305,36 @@ int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
return 0;
}
+
+u8 mt7921_acpi_get_flags(struct mt7921_phy *phy)
+{
+ struct mt7921_asar_fg *fg;
+ struct {
+ u8 acpi_idx;
+ u8 chip_idx;
+ } map[] = {
+ {1, 1},
+ {4, 2},
+ };
+ u8 flags = BIT(0);
+ int i, j;
+
+ if (!phy->acpisar)
+ return 0;
+
+ fg = phy->acpisar->fg;
+ if (!fg)
+ return flags;
+
+ /* pickup necessary settings per device and
+ * translate the index of bitmap for chip command.
+ */
+ for (i = 0; i < fg->nr_flag; i++)
+ for (j = 0; j < ARRAY_SIZE(map); j++)
+ if (fg->flag[i] == map[j].acpi_idx) {
+ flags |= BIT(map[j].chip_idx);
+ break;
+ }
+
+ return flags;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h
index 23f86bfae0c0..35268b0890ad 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/acpi_sar.h
@@ -8,10 +8,12 @@
#define MT7921_ASAR_MAX_DYN 8
#define MT7921_ASAR_MIN_GEO 3
#define MT7921_ASAR_MAX_GEO 8
+#define MT7921_ASAR_MIN_FG 8
#define MT7921_ACPI_MTCL "MTCL"
#define MT7921_ACPI_MTDS "MTDS"
#define MT7921_ACPI_MTGS "MTGS"
+#define MT7921_ACPI_MTFG "MTFG"
struct mt7921_asar_dyn_limit {
u8 idx;
@@ -77,6 +79,15 @@ struct mt7921_asar_cl {
u8 cl6g[6];
} __packed;
+struct mt7921_asar_fg {
+ u8 names[4];
+ u8 version;
+ u8 rsvd;
+ u8 nr_flag;
+ u8 rsvd1;
+ u8 flag[0];
+} __packed;
+
struct mt7921_acpi_sar {
u8 ver;
union {
@@ -88,6 +99,7 @@ struct mt7921_acpi_sar {
struct mt7921_asar_geo_v2 *geo_v2;
};
struct mt7921_asar_cl *countrylist;
+ struct mt7921_asar_fg *fg;
};
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index 542dfd425129..80c71acfe159 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -120,6 +120,7 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
@@ -142,6 +143,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
static void
mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
{
+ u32 mask, set;
+
mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
mt76_set(dev, MT_TMAC_CTCR0(band),
@@ -158,6 +161,12 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536);
/* disable rx rate report by default due to hw issues */
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
+
+ /* filter out non-resp frames and get instantaneous signal reporting */
+ mask = MT_WTBLOFF_TOP_RSCR_RCPI_MODE | MT_WTBLOFF_TOP_RSCR_RCPI_PARAM;
+ set = FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_MODE, 0) |
+ FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_PARAM, 0x3);
+ mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set);
}
u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
@@ -175,7 +184,7 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
if (!fw || !fw->data || fw->size < sizeof(*hdr)) {
dev_err(dev, "Invalid firmware\n");
- return -EINVAL;
+ goto out;
}
data = fw->data;
@@ -206,6 +215,7 @@ u8 mt7921_check_offload_capability(struct device *dev, const char *fw_wm)
data += le16_to_cpu(rel_info->len) + rel_info->pad_len;
}
+out:
release_firmware(fw);
return features ? features->data : 0;
@@ -228,8 +238,6 @@ int mt7921_mac_init(struct mt7921_dev *dev)
for (i = 0; i < 2; i++)
mt7921_mac_init_band(dev, i);
- dev->mt76.rxfilter = mt76_rr(dev, MT_WF_RFCR(0));
-
return mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
}
EXPORT_SYMBOL_GPL(mt7921_mac_init);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 82db3762be33..557c20190c2b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -59,6 +59,7 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev)
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
struct rate_info *rate;
+ s8 rssi[4];
int i;
spin_lock_bh(&dev->sta_poll_lock);
@@ -160,6 +161,20 @@ void mt7921_mac_sta_poll(struct mt7921_dev *dev)
else
rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
}
+
+ /* get signal strength of resp frames (CTS/BA/ACK) */
+ addr = mt7921_mac_wtbl_lmac_addr(idx, 30);
+ val = mt76_rr(dev, addr);
+
+ rssi[0] = to_rssi(GENMASK(7, 0), val);
+ rssi[1] = to_rssi(GENMASK(15, 8), val);
+ rssi[2] = to_rssi(GENMASK(23, 16), val);
+ rssi[3] = to_rssi(GENMASK(31, 14), val);
+
+ msta->ack_signal =
+ mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
+
+ ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
}
}
EXPORT_SYMBOL_GPL(mt7921_mac_sta_poll);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 76ac5069638f..75eaf86c6a78 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -422,15 +422,15 @@ void mt7921_roc_timer(struct timer_list *timer)
static int mt7921_abort_roc(struct mt7921_phy *phy, struct mt7921_vif *vif)
{
- int err;
-
- if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
- return 0;
+ int err = 0;
del_timer_sync(&phy->roc_timer);
cancel_work_sync(&phy->roc_work);
- err = mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
- clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+
+ mt7921_mutex_acquire(phy->dev);
+ if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
+ err = mt7921_mcu_abort_roc(phy, vif, phy->roc_token_id);
+ mt7921_mutex_release(phy->dev);
return err;
}
@@ -487,13 +487,8 @@ static int mt7921_cancel_remain_on_channel(struct ieee80211_hw *hw,
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct mt7921_phy *phy = mt7921_hw_phy(hw);
- int err;
- mt7921_mutex_acquire(phy->dev);
- err = mt7921_abort_roc(phy, mvif);
- mt7921_mutex_release(phy->dev);
-
- return err;
+ return mt7921_abort_roc(phy, mvif);
}
static int mt7921_set_channel(struct mt7921_phy *phy)
@@ -681,7 +676,6 @@ static int mt7921_config(struct ieee80211_hw *hw, u32 changed)
ieee80211_iterate_active_interfaces(hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
mt7921_sniffer_interface_iter, dev);
- dev->mt76.rxfilter = mt76_rr(dev, MT_WF_RFCR(0));
}
out:
@@ -710,53 +704,12 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct mt7921_dev *dev = mt7921_hw_dev(hw);
- u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
- MT_WF_RFCR1_DROP_BF_POLL |
- MT_WF_RFCR1_DROP_BA |
- MT_WF_RFCR1_DROP_CFEND |
- MT_WF_RFCR1_DROP_CFACK;
- u32 flags = 0;
-
-#define MT76_FILTER(_flag, _hw) do { \
- flags |= *total_flags & FIF_##_flag; \
- dev->mt76.rxfilter &= ~(_hw); \
- dev->mt76.rxfilter |= !(flags & FIF_##_flag) * (_hw); \
- } while (0)
mt7921_mutex_acquire(dev);
-
- dev->mt76.rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
- MT_WF_RFCR_DROP_OTHER_BEACON |
- MT_WF_RFCR_DROP_FRAME_REPORT |
- MT_WF_RFCR_DROP_PROBEREQ |
- MT_WF_RFCR_DROP_MCAST_FILTERED |
- MT_WF_RFCR_DROP_MCAST |
- MT_WF_RFCR_DROP_BCAST |
- MT_WF_RFCR_DROP_DUPLICATE |
- MT_WF_RFCR_DROP_A2_BSSID |
- MT_WF_RFCR_DROP_UNWANTED_CTL |
- MT_WF_RFCR_DROP_STBC_MULTI);
-
- MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
- MT_WF_RFCR_DROP_A3_MAC |
- MT_WF_RFCR_DROP_A3_BSSID);
-
- MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
-
- MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
- MT_WF_RFCR_DROP_RTS |
- MT_WF_RFCR_DROP_CTL_RSV |
- MT_WF_RFCR_DROP_NDPA);
-
- *total_flags = flags;
- mt76_wr(dev, MT_WF_RFCR(0), dev->mt76.rxfilter);
-
- if (*total_flags & FIF_CONTROL)
- mt76_clear(dev, MT_WF_RFCR1(0), ctl_flags);
- else
- mt76_set(dev, MT_WF_RFCR1(0), ctl_flags);
-
+ mt7921_mcu_set_rxfilter(dev, *total_flags, 0, 0);
mt7921_mutex_release(dev);
+
+ *total_flags &= (FIF_OTHER_BSS | FIF_FCSFAIL | FIF_CONTROL);
}
static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
@@ -860,6 +813,8 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
true, mvif->ctx);
+ ewma_avg_signal_init(&msta->avg_ack_signal);
+
mt7921_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
@@ -1135,17 +1090,34 @@ static void
mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 sset, u8 *data)
{
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
if (sset != ETH_SS_STATS)
return;
memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats));
+
+ if (mt76_is_sdio(&dev->mt76))
+ return;
+
+ data += sizeof(mt7921_gstrings_stats);
+ page_pool_ethtool_stats_get_strings(data);
}
static int
mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int sset)
{
- return sset == ETH_SS_STATS ? ARRAY_SIZE(mt7921_gstrings_stats) : 0;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ if (mt76_is_sdio(&dev->mt76))
+ return ARRAY_SIZE(mt7921_gstrings_stats);
+
+ return ARRAY_SIZE(mt7921_gstrings_stats) +
+ page_pool_ethtool_stats_get_count();
}
static void
@@ -1157,7 +1129,7 @@ mt7921_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->wcid.stats);
+ mt76_ethtool_worker(wi, &msta->wcid.stats, false);
}
static
@@ -1165,6 +1137,7 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ethtool_stats *stats, u64 *data)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ int stats_size = ARRAY_SIZE(mt7921_gstrings_stats);
struct mt7921_phy *phy = mt7921_hw_phy(hw);
struct mt7921_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
@@ -1220,9 +1193,14 @@ void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return;
ei += wi.worker_stat_count;
- if (ei != ARRAY_SIZE(mt7921_gstrings_stats))
- dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %zu",
- ei, ARRAY_SIZE(mt7921_gstrings_stats));
+
+ if (!mt76_is_sdio(&dev->mt76)) {
+ mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
+ stats_size += page_pool_ethtool_stats_get_count();
+ }
+
+ if (ei != stats_size)
+ dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %d", ei, stats_size);
}
static u64
@@ -1430,6 +1408,12 @@ static void mt7921_sta_statistics(struct ieee80211_hw *hw,
}
sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+
+ sinfo->ack_signal = (s8)msta->ack_signal;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
+
+ sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
}
#ifdef CONFIG_PM
@@ -1711,7 +1695,10 @@ static void mt7921_ctx_iter(void *priv, u8 *mac,
if (ctx != mvif->ctx)
return;
- mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
+ if (vif->type & NL80211_IFTYPE_MONITOR)
+ mt7921_mcu_config_sniffer(mvif, ctx);
+ else
+ mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
}
static void
@@ -1778,11 +1765,8 @@ static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw,
struct ieee80211_prep_tx_info *info)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
- struct mt7921_dev *dev = mt7921_hw_dev(hw);
- mt7921_mutex_acquire(dev);
mt7921_abort_roc(mvif->phy, mvif);
- mt7921_mutex_release(dev);
}
const struct ieee80211_ops mt7921_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index fb9c0f66cb27..c5e7ad06f877 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -174,7 +174,7 @@ mt7921_mcu_uni_roc_event(struct mt7921_dev *dev, struct sk_buff *skb)
wake_up(&dev->phy.roc_wait);
duration = le32_to_cpu(grant->max_interval);
mod_timer(&dev->phy.roc_timer,
- round_jiffies_up(jiffies + msecs_to_jiffies(duration)));
+ jiffies + msecs_to_jiffies(duration));
}
static void
@@ -1019,6 +1019,8 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
struct ieee80211_vif *vif,
bool enable)
{
+#define MT7921_FIF_BIT_CLR BIT(1)
+#define MT7921_FIF_BIT_SET BIT(0)
int err;
if (enable) {
@@ -1026,7 +1028,11 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
if (err)
return err;
- mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+ err = mt7921_mcu_set_rxfilter(dev, 0,
+ MT7921_FIF_BIT_SET,
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ if (err)
+ return err;
return 0;
}
@@ -1035,7 +1041,11 @@ int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
if (err)
return err;
- mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+ err = mt7921_mcu_set_rxfilter(dev, 0,
+ MT7921_FIF_BIT_CLR,
+ MT_WF_RFCR_DROP_OTHER_BEACON);
+ if (err)
+ return err;
return 0;
}
@@ -1093,6 +1103,74 @@ int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
true);
}
+int mt7921_mcu_config_sniffer(struct mt7921_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct cfg80211_chan_def *chandef = &ctx->def;
+ int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
+ const u8 ch_band[] = {
+ [NL80211_BAND_2GHZ] = 1,
+ [NL80211_BAND_5GHZ] = 2,
+ [NL80211_BAND_6GHZ] = 3,
+ };
+ const u8 ch_width[] = {
+ [NL80211_CHAN_WIDTH_20_NOHT] = 0,
+ [NL80211_CHAN_WIDTH_20] = 0,
+ [NL80211_CHAN_WIDTH_40] = 0,
+ [NL80211_CHAN_WIDTH_80] = 1,
+ [NL80211_CHAN_WIDTH_160] = 2,
+ [NL80211_CHAN_WIDTH_80P80] = 3,
+ [NL80211_CHAN_WIDTH_5] = 4,
+ [NL80211_CHAN_WIDTH_10] = 5,
+ [NL80211_CHAN_WIDTH_320] = 6,
+ };
+ struct {
+ struct {
+ u8 band_idx;
+ u8 pad[3];
+ } __packed hdr;
+ struct config_tlv {
+ __le16 tag;
+ __le16 len;
+ u16 aid;
+ u8 ch_band;
+ u8 bw;
+ u8 control_ch;
+ u8 sco;
+ u8 center_ch;
+ u8 center_ch2;
+ u8 drop_err;
+ u8 pad[3];
+ } __packed tlv;
+ } __packed req = {
+ .hdr = {
+ .band_idx = vif->mt76.band_idx,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(1),
+ .len = cpu_to_le16(sizeof(req.tlv)),
+ .control_ch = chandef->chan->hw_value,
+ .center_ch = ieee80211_frequency_to_channel(freq1),
+ .drop_err = 1,
+ },
+ };
+ if (chandef->chan->band < ARRAY_SIZE(ch_band))
+ req.tlv.ch_band = ch_band[chandef->chan->band];
+ if (chandef->width < ARRAY_SIZE(ch_width))
+ req.tlv.bw = ch_width[chandef->width];
+
+ if (freq2)
+ req.tlv.center_ch2 = ieee80211_frequency_to_channel(freq2);
+
+ if (req.tlv.control_ch < req.tlv.center_ch)
+ req.tlv.sco = 1; /* SCA */
+ else if (req.tlv.control_ch > req.tlv.center_ch)
+ req.tlv.sco = 3; /* SCB */
+
+ return mt76_mcu_send_msg(vif->phy->mt76->dev, MCU_UNI_CMD(SNIFFER),
+ &req, sizeof(req), true);
+}
+
int
mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
struct ieee80211_hw *hw,
@@ -1184,13 +1262,15 @@ int __mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
__le16 len;
u8 idx;
u8 env;
- u8 pad1[2];
+ u8 acpi_conf;
+ u8 pad1;
u8 alpha2[2];
u8 type[2];
u8 rsvd[64];
} __packed req = {
.idx = idx,
.env = env_cap,
+ .acpi_conf = mt7921_acpi_get_flags(&dev->phy),
};
int ret, valid_cnt = 0;
u8 i, *pos;
@@ -1253,3 +1333,25 @@ int mt7921_mcu_set_clc(struct mt7921_dev *dev, u8 *alpha2,
}
return 0;
}
+
+int mt7921_mcu_set_rxfilter(struct mt7921_dev *dev, u32 fif,
+ u8 bit_op, u32 bit_map)
+{
+ struct {
+ u8 rsv[4];
+ u8 mode;
+ u8 rsv2[3];
+ __le32 fif;
+ __le32 bit_map; /* bit_* for bitmap update */
+ u8 bit_op;
+ u8 pad[51];
+ } __packed data = {
+ .mode = fif ? 1 : 2,
+ .fif = cpu_to_le32(fif),
+ .bit_map = cpu_to_le32(bit_map),
+ .bit_op = bit_op,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_RX_FILTER),
+ &data, sizeof(data), false);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 15d6b7fe1c6c..1af70dac723b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -144,6 +144,8 @@ enum mt7921_rxq_id {
MT7921_RXQ_MCU_WM = 0,
};
+DECLARE_EWMA(avg_signal, 10, 8)
+
struct mt7921_sta {
struct mt76_wcid wcid; /* must be first */
@@ -152,6 +154,9 @@ struct mt7921_sta {
struct list_head poll_list;
u32 airtime_ac[8];
+ int ack_signal;
+ struct ewma_avg_signal avg_ack_signal;
+
unsigned long last_txs;
unsigned long ampdu_state;
@@ -383,6 +388,8 @@ int mt7921_mcu_get_rx_rate(struct mt7921_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7921_mcu_fw_log_2_host(struct mt7921_dev *dev, u8 ctrl);
void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb);
+int mt7921_mcu_set_rxfilter(struct mt7921_dev *dev, u32 fif,
+ u8 bit_op, u32 bit_map);
static inline void mt7921_irq_enable(struct mt7921_dev *dev, u32 mask)
{
@@ -529,6 +536,8 @@ void mt7921_set_ipv6_ns_work(struct work_struct *work);
int mt7921_mcu_set_sniffer(struct mt7921_dev *dev, struct ieee80211_vif *vif,
bool enable);
+int mt7921_mcu_config_sniffer(struct mt7921_vif *vif,
+ struct ieee80211_chanctx_conf *ctx);
int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
@@ -554,6 +563,7 @@ int mt7921_mcu_uni_add_beacon_offload(struct mt7921_dev *dev,
#ifdef CONFIG_ACPI
int mt7921_init_acpi_sar(struct mt7921_dev *dev);
int mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default);
+u8 mt7921_acpi_get_flags(struct mt7921_phy *phy);
#else
static inline int
mt7921_init_acpi_sar(struct mt7921_dev *dev)
@@ -566,6 +576,12 @@ mt7921_init_acpi_sar_power(struct mt7921_phy *phy, bool set_default)
{
return 0;
}
+
+static inline u8
+mt7921_acpi_get_flags(struct mt7921_phy *phy)
+{
+ return 0;
+}
#endif
int mt7921_set_tx_sar_pwr(struct ieee80211_hw *hw,
const struct cfg80211_sar_specs *sar);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
index 86340d3205c5..1aefbb6cf0ab 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
@@ -44,7 +44,6 @@ int mt7921e_mcu_init(struct mt7921_dev *dev)
.headroom = sizeof(struct mt76_connac2_mcu_txd),
.mcu_skb_send_msg = mt7921_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
- .mcu_restart = mt76_connac_mcu_restart,
};
int err;
@@ -69,8 +68,8 @@ int __mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN);
- if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
- PCIE_LPCR_HOST_OWN_SYNC, 0, 50))
+ if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL,
+ PCIE_LPCR_HOST_OWN_SYNC, 0, 50, 1))
break;
}
@@ -110,8 +109,8 @@ int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev)
for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN);
- if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
- PCIE_LPCR_HOST_OWN_SYNC, 4, 50))
+ if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL,
+ PCIE_LPCR_HOST_OWN_SYNC, 4, 50, 1))
break;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index c65582acfa55..e52977ff3349 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -80,6 +80,14 @@
#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
+/* WTBLOFF TOP: band 0(0x820e9000),band 1(0x820f9000) */
+#define MT_WTBLOFF_TOP_BASE(_band) ((_band) ? 0x820f9000 : 0x820e9000)
+#define MT_WTBLOFF_TOP(_band, ofs) (MT_WTBLOFF_TOP_BASE(_band) + (ofs))
+
+#define MT_WTBLOFF_TOP_RSCR(_band) MT_WTBLOFF_TOP(_band, 0x008)
+#define MT_WTBLOFF_TOP_RSCR_RCPI_MODE GENMASK(31, 30)
+#define MT_WTBLOFF_TOP_RSCR_RCPI_PARAM GENMASK(25, 24)
+
/* LPON: band 0(0x24200), band 1(0xa4200) */
#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000)
#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
index bdec8684ce94..7f408212e716 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
@@ -59,7 +59,6 @@ mt7921_tm_set(struct mt7921_dev *dev, struct mt7921_tm_cmd *req)
cancel_work_sync(&pm->wake_work);
__mt7921_mcu_drv_pmctrl(dev);
- mt76_wr(dev, MT_WF_RFCR(0), dev->mt76.rxfilter);
phy->test.state = MT76_TM_STATE_ON;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
index 5321d20dcdcb..8fef09ed29c9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
@@ -15,6 +15,9 @@
static const struct usb_device_id mt7921u_device_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7961, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ /* Comfast CF-952AX */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3574, 0x6211, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ },
};
@@ -133,7 +136,6 @@ static int mt7921u_mcu_init(struct mt7921_dev *dev)
.tailroom = MT_USB_TAIL_SIZE,
.mcu_skb_send_msg = mt7921u_mcu_send_message,
.mcu_parse_response = mt7921_mcu_parse_response,
- .mcu_restart = mt76_connac_mcu_restart,
};
int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
index 2e4a8909b9e8..9c5e9ac1c335 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -457,7 +457,7 @@ mt7996_hw_queue_read(struct seq_file *s, u32 size,
if (val & BIT(map[i].index))
continue;
- ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
+ ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
head = mt76_get_field(dev, MT_FL_Q2_CTRL,
@@ -653,8 +653,9 @@ static int
mt7996_rf_regval_set(void *data, u64 val)
{
struct mt7996_dev *dev = data;
+ u32 val32 = val;
- return mt7996_mcu_rf_regval(dev, dev->mt76.debugfs_reg, (u32 *)&val, true);
+ return mt7996_mcu_rf_regval(dev, dev->mt76.debugfs_reg, &val32, true);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7996_rf_regval_get,
@@ -790,10 +791,10 @@ static ssize_t mt7996_sta_fixed_rate_set(struct file *file,
else
buf[count] = '\0';
- /* mode - cck: 0, ofdm: 1, ht: 2, gf: 3, vht: 4, he_su: 8, he_er: 9
- * bw - bw20: 0, bw40: 1, bw80: 2, bw160: 3
- * nss - vht: 1~4, he: 1~4, others: ignore
- * mcs - cck: 0~4, ofdm: 0~7, ht: 0~32, vht: 0~9, he_su: 0~11, he_er: 0~2
+ /* mode - cck: 0, ofdm: 1, ht: 2, gf: 3, vht: 4, he_su: 8, he_er: 9 EHT: 15
+ * bw - bw20: 0, bw40: 1, bw80: 2, bw160: 3, BW320: 4
+ * nss - vht: 1~4, he: 1~4, eht: 1~4, others: ignore
+ * mcs - cck: 0~4, ofdm: 0~7, ht: 0~32, vht: 0~9, he_su: 0~11, he_er: 0~2, eht: 0~13
* gi - (ht/vht) lgi: 0, sgi: 1; (he) 0.8us: 0, 1.6us: 1, 3.2us: 2
* preamble - short: 1, long: 0
* ldpc - off: 0, on: 1
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
index b9f62bedbc48..2e48c5a40f81 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/eeprom.c
@@ -65,22 +65,50 @@ static int mt7996_eeprom_load(struct mt7996_dev *dev)
} else {
u8 free_block_num;
u32 block_num, i;
+ u32 eeprom_blk_size = MT7996_EEPROM_BLOCK_SIZE;
- /* TODO: check free block event */
- mt7996_mcu_get_eeprom_free_block(dev, &free_block_num);
- /* efuse info not enough */
+ ret = mt7996_mcu_get_eeprom_free_block(dev, &free_block_num);
+ if (ret < 0)
+ return ret;
+
+ /* efuse info isn't enough */
if (free_block_num >= 59)
return -EINVAL;
/* read eeprom data from efuse */
- block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, MT7996_EEPROM_BLOCK_SIZE);
- for (i = 0; i < block_num; i++)
- mt7996_mcu_get_eeprom(dev, i * MT7996_EEPROM_BLOCK_SIZE);
+ block_num = DIV_ROUND_UP(MT7996_EEPROM_SIZE, eeprom_blk_size);
+ for (i = 0; i < block_num; i++) {
+ ret = mt7996_mcu_get_eeprom(dev, i * eeprom_blk_size);
+ if (ret < 0)
+ return ret;
+ }
}
return mt7996_check_eeprom(dev);
}
+static int mt7996_eeprom_parse_efuse_hw_cap(struct mt7996_dev *dev)
+{
+#define MODE_HE_ONLY BIT(0)
+#define WTBL_SIZE_GROUP GENMASK(31, 28)
+ u32 cap = 0;
+ int ret;
+
+ ret = mt7996_mcu_get_chip_config(dev, &cap);
+ if (ret)
+ return ret;
+
+ if (cap) {
+ dev->has_eht = !(cap & MODE_HE_ONLY);
+ dev->wtbl_size_group = u32_get_bits(cap, WTBL_SIZE_GROUP);
+ }
+
+ if (dev->wtbl_size_group < 2 || dev->wtbl_size_group > 4)
+ dev->wtbl_size_group = 2; /* set default */
+
+ return 0;
+}
+
static int mt7996_eeprom_parse_band_config(struct mt7996_phy *phy)
{
u8 *eeprom = phy->dev->mt76.eeprom.data;
@@ -127,6 +155,7 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
u8 path, nss, band_idx = phy->mt76->band_idx;
u8 *eeprom = dev->mt76.eeprom.data;
struct mt76_phy *mphy = phy->mt76;
+ int ret;
switch (band_idx) {
case MT_BAND1:
@@ -161,6 +190,10 @@ int mt7996_eeprom_parse_hw_cap(struct mt7996_dev *dev, struct mt7996_phy *phy)
dev->chainshift[band_idx + 1] = dev->chainshift[band_idx] +
hweight16(mphy->chainmask);
+ ret = mt7996_eeprom_parse_efuse_hw_cap(dev);
+ if (ret)
+ return ret;
+
return mt7996_eeprom_parse_band_config(phy);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/init.c b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
index 46b290526092..946da93eed32 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/init.c
@@ -37,8 +37,7 @@ static const struct ieee80211_iface_combination if_comb[] = {
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_160) |
- BIT(NL80211_CHAN_WIDTH_80P80),
+ BIT(NL80211_CHAN_WIDTH_160),
}
};
@@ -46,11 +45,11 @@ static void mt7996_led_set_config(struct led_classdev *led_cdev,
u8 delay_on, u8 delay_off)
{
struct mt7996_dev *dev;
- struct mt76_dev *mt76;
+ struct mt76_phy *mphy;
u32 val;
- mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
- dev = container_of(mt76, struct mt7996_dev, mt76);
+ mphy = container_of(led_cdev, struct mt76_phy, leds.cdev);
+ dev = container_of(mphy->dev, struct mt7996_dev, mt76);
/* select TX blink mode, 2: only data frames */
mt76_rmw_field(dev, MT_TMAC_TCR0(0), MT_TMAC_TCR0_TX_BLINK, 2);
@@ -65,7 +64,7 @@ static void mt7996_led_set_config(struct led_classdev *led_cdev,
/* control LED */
val = MT_LED_CTRL_BLINK_MODE | MT_LED_CTRL_KICK;
- if (dev->mt76.led_al)
+ if (mphy->leds.al)
val |= MT_LED_CTRL_POLARITY;
mt76_wr(dev, MT_LED_CTRL(0), val);
@@ -153,10 +152,12 @@ mt7996_init_wiphy(struct ieee80211_hw *hw)
struct mt7996_phy *phy = mt7996_hw_phy(hw);
struct mt76_dev *mdev = &phy->dev->mt76;
struct wiphy *wiphy = hw->wiphy;
+ u16 max_subframes = phy->dev->has_eht ? IEEE80211_MAX_AMPDU_BUF_EHT :
+ IEEE80211_MAX_AMPDU_BUF_HE;
hw->queues = 4;
- hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
- hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_rx_aggregation_subframes = max_subframes;
+ hw->max_tx_aggregation_subframes = max_subframes;
hw->netdev_features = NETIF_F_RXCSUM;
hw->radiotap_timestamp.units_pos =
@@ -214,7 +215,7 @@ mt7996_init_wiphy(struct ieee80211_hw *hw)
mt76_set_stream_caps(phy->mt76, true);
mt7996_set_stream_vht_txbf_caps(phy);
- mt7996_set_stream_he_caps(phy);
+ mt7996_set_stream_he_eht_caps(phy);
wiphy->available_antennas_rx = phy->mt76->antenna_mask;
wiphy->available_antennas_tx = phy->mt76->antenna_mask;
@@ -256,12 +257,12 @@ static void mt7996_mac_init(struct mt7996_dev *dev)
mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
- for (i = 0; i < MT7996_WTBL_SIZE; i++)
+ for (i = 0; i < mt7996_wtbl_size(dev); i++)
mt7996_mac_wtbl_update(dev, i,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- i = dev->mt76.led_pin ? MT_LED_GPIO_MUX3 : MT_LED_GPIO_MUX2;
+ i = dev->mphy.leds.pin ? MT_LED_GPIO_MUX3 : MT_LED_GPIO_MUX2;
mt76_rmw_field(dev, i, MT_LED_GPIO_SEL_MASK, 4);
}
@@ -465,7 +466,7 @@ void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy)
*cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
- (3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT);
+ FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK, sts - 1);
*cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
@@ -572,11 +573,15 @@ mt7996_gen_ppe_thresh(u8 *he_ppet, int nss)
(0xff >> (8 - (ppet_bits - 1) % 8));
}
-static int
+static void
mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
- struct ieee80211_sband_iftype_data *data)
+ struct ieee80211_sband_iftype_data *data,
+ enum nl80211_iftype iftype)
{
- int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
+ struct ieee80211_sta_he_cap *he_cap = &data->he_cap;
+ struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp *he_mcs = &he_cap->he_mcs_nss_supp;
+ int i, nss = hweight8(phy->mt76->antenna_mask);
u16 mcs_map = 0;
for (i = 0; i < 8; i++) {
@@ -586,179 +591,254 @@ mt7996_init_he_caps(struct mt7996_phy *phy, enum nl80211_band band,
mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
}
- for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
- struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
- struct ieee80211_he_cap_elem *he_cap_elem =
- &he_cap->he_cap_elem;
- struct ieee80211_he_mcs_nss_supp *he_mcs =
- &he_cap->he_mcs_nss_supp;
-
- switch (i) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_AP:
-#ifdef CONFIG_MAC80211_MESH
- case NL80211_IFTYPE_MESH_POINT:
-#endif
- break;
- default:
- continue;
- }
+ he_cap->has_he = true;
- data[idx].types_mask = BIT(i);
- he_cap->has_he = true;
+ he_cap_elem->mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
+ he_cap_elem->mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
+ he_cap_elem->mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
- he_cap_elem->mac_cap_info[0] =
- IEEE80211_HE_MAC_CAP0_HTC_HE;
- he_cap_elem->mac_cap_info[3] =
- IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
- IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
- he_cap_elem->mac_cap_info[4] =
- IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ else
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
+ he_cap_elem->phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+
+ switch (iftype) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->mac_cap_info[0] |= IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[2] |= IEEE80211_HE_MAC_CAP2_BSR;
+ he_cap_elem->mac_cap_info[4] |= IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[5] |=
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[1] |=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
if (band == NL80211_BAND_2GHZ)
- he_cap_elem->phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
else
- he_cap_elem->phy_cap_info[0] =
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
-
- he_cap_elem->phy_cap_info[1] =
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
- he_cap_elem->phy_cap_info[2] =
- IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
- IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] |=
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[7] |=
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
+ he_cap_elem->phy_cap_info[8] |=
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ break;
+ default:
+ break;
+ }
- switch (i) {
- case NL80211_IFTYPE_AP:
- he_cap_elem->mac_cap_info[0] |=
- IEEE80211_HE_MAC_CAP0_TWT_RES;
- he_cap_elem->mac_cap_info[2] |=
- IEEE80211_HE_MAC_CAP2_BSR;
- he_cap_elem->mac_cap_info[4] |=
- IEEE80211_HE_MAC_CAP4_BQR;
- he_cap_elem->mac_cap_info[5] |=
- IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
- he_cap_elem->phy_cap_info[3] |=
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
- he_cap_elem->phy_cap_info[6] |=
- IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
- he_cap_elem->phy_cap_info[9] |=
- IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
- IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
- break;
- case NL80211_IFTYPE_STATION:
- he_cap_elem->mac_cap_info[1] |=
- IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
-
- if (band == NL80211_BAND_2GHZ)
- he_cap_elem->phy_cap_info[0] |=
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
- else
- he_cap_elem->phy_cap_info[0] |=
- IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
-
- he_cap_elem->phy_cap_info[1] |=
- IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
- IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
- he_cap_elem->phy_cap_info[3] |=
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
- IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
- he_cap_elem->phy_cap_info[6] |=
- IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
- IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
- he_cap_elem->phy_cap_info[7] |=
- IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
- IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
- he_cap_elem->phy_cap_info[8] |=
- IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
- IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU |
- IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
- he_cap_elem->phy_cap_info[9] |=
- IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
- IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
- IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
- IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
- IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
- break;
- }
+ he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
+
+ mt7996_set_stream_he_txbf_caps(phy, he_cap, iftype);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ mt7996_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ } else {
+ he_cap_elem->phy_cap_info[9] |=
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ }
- he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
- he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
- he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
- he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
- he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map);
- he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map);
-
- mt7996_set_stream_he_txbf_caps(phy, he_cap, i);
-
- memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
- if (he_cap_elem->phy_cap_info[6] &
- IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
- mt7996_gen_ppe_thresh(he_cap->ppe_thres, nss);
- } else {
- he_cap_elem->phy_cap_info[9] |=
- IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US;
- }
+ if (band == NL80211_BAND_6GHZ) {
+ u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
- if (band == NL80211_BAND_6GHZ) {
- u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
- IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+ cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_2,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
- cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_2,
- IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
- u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
- IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
- u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
- IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ data->he_6ghz_capa.capa = cpu_to_le16(cap);
+ }
+}
- data[idx].he_6ghz_capa.capa = cpu_to_le16(cap);
- }
+static void
+mt7996_init_eht_caps(struct mt7996_phy *phy, enum nl80211_band band,
+ struct ieee80211_sband_iftype_data *data,
+ enum nl80211_iftype iftype)
+{
+ struct ieee80211_sta_eht_cap *eht_cap = &data->eht_cap;
+ struct ieee80211_eht_cap_elem_fixed *eht_cap_elem = &eht_cap->eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp *eht_nss = &eht_cap->eht_mcs_nss_supp;
+ enum nl80211_chan_width width = phy->mt76->chandef.width;
+ int nss = hweight8(phy->mt76->antenna_mask);
+ int sts = hweight16(phy->mt76->chainmask);
+ u8 val;
- idx++;
- }
+ if (!phy->dev->has_eht)
+ return;
- return idx;
+ eht_cap->has_eht = true;
+
+ eht_cap_elem->mac_cap_info[0] =
+ IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS |
+ IEEE80211_EHT_MAC_CAP0_OM_CONTROL;
+
+ eht_cap_elem->phy_cap_info[0] =
+ IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ |
+ IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER |
+ IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE;
+
+ eht_cap_elem->phy_cap_info[0] |=
+ u8_encode_bits(u8_get_bits(sts - 1, BIT(0)),
+ IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[1] =
+ u8_encode_bits(u8_get_bits(sts - 1, GENMASK(2, 1)),
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK) |
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK) |
+ u8_encode_bits(sts - 1,
+ IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[2] =
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK) |
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK) |
+ u8_encode_bits(sts - 1, IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK);
+
+ eht_cap_elem->phy_cap_info[3] =
+ IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK |
+ IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK;
+
+ eht_cap_elem->phy_cap_info[4] =
+ u8_encode_bits(min_t(int, sts - 1, 2),
+ IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK);
+
+ eht_cap_elem->phy_cap_info[5] =
+ IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK |
+ u8_encode_bits(IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_16US,
+ IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK) |
+ u8_encode_bits(u8_get_bits(0x11, GENMASK(1, 0)),
+ IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK);
+
+ val = width == NL80211_CHAN_WIDTH_320 ? 0xf :
+ width == NL80211_CHAN_WIDTH_160 ? 0x7 :
+ width == NL80211_CHAN_WIDTH_80 ? 0x3 : 0x1;
+ eht_cap_elem->phy_cap_info[6] =
+ u8_encode_bits(u8_get_bits(0x11, GENMASK(4, 2)),
+ IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK) |
+ u8_encode_bits(val, IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK);
+
+ eht_cap_elem->phy_cap_info[7] =
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ |
+ IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ;
+
+ val = u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_RX) |
+ u8_encode_bits(nss, IEEE80211_EHT_MCS_NSS_TX);
+#define SET_EHT_MAX_NSS(_bw, _val) do { \
+ eht_nss->bw._##_bw.rx_tx_mcs9_max_nss = _val; \
+ eht_nss->bw._##_bw.rx_tx_mcs11_max_nss = _val; \
+ eht_nss->bw._##_bw.rx_tx_mcs13_max_nss = _val; \
+ } while (0)
+
+ SET_EHT_MAX_NSS(80, val);
+ SET_EHT_MAX_NSS(160, val);
+ SET_EHT_MAX_NSS(320, val);
+#undef SET_EHT_MAX_NSS
}
-void mt7996_set_stream_he_caps(struct mt7996_phy *phy)
+static void
+__mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy,
+ struct ieee80211_supported_band *sband,
+ enum nl80211_band band)
{
- struct ieee80211_sband_iftype_data *data;
- struct ieee80211_supported_band *band;
- int n;
+ struct ieee80211_sband_iftype_data *data = phy->iftype[band];
+ int i, n = 0;
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+#endif
+ break;
+ default:
+ continue;
+ }
- if (phy->mt76->cap.has_2ghz) {
- data = phy->iftype[NL80211_BAND_2GHZ];
- n = mt7996_init_he_caps(phy, NL80211_BAND_2GHZ, data);
+ data[n].types_mask = BIT(i);
+ mt7996_init_he_caps(phy, band, &data[n], i);
+ mt7996_init_eht_caps(phy, band, &data[n], i);
- band = &phy->mt76->sband_2g.sband;
- band->iftype_data = data;
- band->n_iftype_data = n;
+ n++;
}
- if (phy->mt76->cap.has_5ghz) {
- data = phy->iftype[NL80211_BAND_5GHZ];
- n = mt7996_init_he_caps(phy, NL80211_BAND_5GHZ, data);
+ sband->iftype_data = data;
+ sband->n_iftype_data = n;
+}
- band = &phy->mt76->sband_5g.sband;
- band->iftype_data = data;
- band->n_iftype_data = n;
- }
+void mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy)
+{
+ if (phy->mt76->cap.has_2ghz)
+ __mt7996_set_stream_he_eht_caps(phy, &phy->mt76->sband_2g.sband,
+ NL80211_BAND_2GHZ);
- if (phy->mt76->cap.has_6ghz) {
- data = phy->iftype[NL80211_BAND_6GHZ];
- n = mt7996_init_he_caps(phy, NL80211_BAND_6GHZ, data);
+ if (phy->mt76->cap.has_5ghz)
+ __mt7996_set_stream_he_eht_caps(phy, &phy->mt76->sband_5g.sband,
+ NL80211_BAND_5GHZ);
- band = &phy->mt76->sband_6g.sband;
- band->iftype_data = data;
- band->n_iftype_data = n;
- }
+ if (phy->mt76->cap.has_6ghz)
+ __mt7996_set_stream_he_eht_caps(phy, &phy->mt76->sband_6g.sband,
+ NL80211_BAND_6GHZ);
}
int mt7996_register_device(struct mt7996_dev *dev)
@@ -787,8 +867,8 @@ int mt7996_register_device(struct mt7996_dev *dev)
/* init led callbacks */
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
- dev->mt76.led_cdev.brightness_set = mt7996_led_set_brightness;
- dev->mt76.led_cdev.blink_set = mt7996_led_set_blink;
+ dev->mphy.leds.cdev.brightness_set = mt7996_led_set_brightness;
+ dev->mphy.leds.cdev.blink_set = mt7996_led_set_blink;
}
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 0b3e28748e76..c9a9f0e31771 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -189,6 +189,9 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
rate = &msta->wcid.rate;
switch (rate->bw) {
+ case RATE_INFO_BW_320:
+ bw = IEEE80211_STA_RX_BW_320;
+ break;
case RATE_INFO_BW_160:
bw = IEEE80211_STA_RX_BW_160;
break;
@@ -205,7 +208,11 @@ static void mt7996_mac_sta_poll(struct mt7996_dev *dev)
addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 6);
val = mt76_rr(dev, addr);
- if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ if (rate->flags & RATE_INFO_FLAGS_EHT_MCS) {
+ addr = mt7996_mac_wtbl_lmac_addr(dev, idx, 5);
+ val = mt76_rr(dev, addr);
+ rate->eht_gi = FIELD_GET(GENMASK(25, 24), val);
+ } else if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
u8 offs = 24 + 2 * bw;
rate->he_gi = (val & (0x3 << offs)) >> offs;
@@ -469,7 +476,7 @@ static int mt7996_reverse_frag0_hdr_trans(struct sk_buff *skb, u16 hdr_gap)
ether_addr_copy(hdr.addr4, eth_hdr->h_source);
break;
default:
- break;
+ return -EINVAL;
}
skb_pull(skb, hdr_gap + sizeof(struct ethhdr) - 2);
@@ -560,6 +567,15 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
status->he_dcm = dcm;
break;
+ case MT_PHY_TYPE_EHT_SU:
+ case MT_PHY_TYPE_EHT_TRIG:
+ case MT_PHY_TYPE_EHT_MU:
+ /* TODO: currently report rx rate with HE rate */
+ status->nss = nss;
+ status->encoding = RX_ENC_HE;
+ bw = min_t(int, bw, IEEE80211_STA_RX_BW_160);
+ i = min_t(int, i & 0xf, 11);
+ break;
default:
return -EINVAL;
}
@@ -584,6 +600,9 @@ mt7996_mac_fill_rx_rate(struct mt7996_dev *dev,
case IEEE80211_STA_RX_BW_160:
status->bw = RATE_INFO_BW_160;
break;
+ case IEEE80211_STA_RX_BW_320:
+ status->bw = RATE_INFO_BW_320;
+ break;
default:
return -EINVAL;
}
@@ -959,51 +978,6 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
}
}
-static u16
-mt7996_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
- bool beacon, bool mcast)
-{
- u8 mode = 0, band = mphy->chandef.chan->band;
- int rateidx = 0, mcast_rate;
-
- if (beacon) {
- struct cfg80211_bitrate_mask *mask;
-
- mask = &vif->bss_conf.beacon_tx_rate;
- if (hweight16(mask->control[band].he_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].he_mcs[0]) - 1;
- mode = MT_PHY_TYPE_HE_SU;
- goto out;
- } else if (hweight16(mask->control[band].vht_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].vht_mcs[0]) - 1;
- mode = MT_PHY_TYPE_VHT;
- goto out;
- } else if (hweight8(mask->control[band].ht_mcs[0]) == 1) {
- rateidx = ffs(mask->control[band].ht_mcs[0]) - 1;
- mode = MT_PHY_TYPE_HT;
- goto out;
- } else if (hweight32(mask->control[band].legacy) == 1) {
- rateidx = ffs(mask->control[band].legacy) - 1;
- goto legacy;
- }
- }
-
- mcast_rate = vif->bss_conf.mcast_rate[band];
- if (mcast && mcast_rate > 0)
- rateidx = mcast_rate - 1;
- else
- rateidx = ffs(vif->bss_conf.basic_rates) - 1;
-
-legacy:
- rateidx = mt76_calculate_default_rate(mphy, rateidx);
- mode = rateidx >> 8;
- rateidx &= GENMASK(7, 0);
-
-out:
- return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
- FIELD_PREP(MT_TX_RATE_MODE, mode);
-}
-
void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
struct ieee80211_key_conf *key, u32 changed)
@@ -1091,7 +1065,8 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
/* Fixed rata is available just for 802.11 txd */
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
bool multicast = is_multicast_ether_addr(hdr->addr1);
- u16 rate = mt7996_mac_tx_rate_val(mphy, vif, beacon, multicast);
+ u16 rate = mt76_connac2_mac_tx_rate_val(mphy, vif, beacon,
+ multicast);
/* fix to bw 20 */
val = MT_TXD6_FIXED_BW |
@@ -1113,8 +1088,8 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
struct ieee80211_key_conf *key = info->control.hw_key;
struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_connac_txp_common *txp;
struct mt76_txwi_cache *t;
- struct mt7996_txp *txp;
int id, i, pid, nbuf = tx_info->nbuf - 1;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
u8 *txwi = (u8 *)txwi_ptr;
@@ -1148,35 +1123,35 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
mt7996_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid,
key, 0);
- txp = (struct mt7996_txp *)(txwi + MT_TXD_SIZE);
+ txp = (struct mt76_connac_txp_common *)(txwi + MT_TXD_SIZE);
for (i = 0; i < nbuf; i++) {
- txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
- txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ txp->fw.buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->fw.len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
}
- txp->nbuf = nbuf;
+ txp->fw.nbuf = nbuf;
- txp->flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
+ txp->fw.flags = cpu_to_le16(MT_CT_INFO_FROM_HOST);
if (!is_8023 || pid >= MT_PACKET_ID_FIRST)
- txp->flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_APPLY_TXD);
if (!key)
- txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
if (!is_8023 && ieee80211_is_mgmt(hdr->frame_control))
- txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+ txp->fw.flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
if (vif) {
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- txp->bss_idx = mvif->mt76.idx;
+ txp->fw.bss_idx = mvif->mt76.idx;
}
- txp->token = cpu_to_le16(id);
+ txp->fw.token = cpu_to_le16(id);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
- txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
+ txp->fw.rept_wds_wcid = cpu_to_le16(wcid->idx);
else
- txp->rept_wds_wcid = cpu_to_le16(0xfff);
+ txp->fw.rept_wds_wcid = cpu_to_le16(0xfff);
tx_info->skb = DMA_DUMMY_DATA;
/* pass partial skb header to fw */
@@ -1213,18 +1188,6 @@ mt7996_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
}
static void
-mt7996_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- struct mt7996_txp *txp;
- int i;
-
- txp = mt7996_txwi_to_txp(dev, t);
- for (i = 0; i < txp->nbuf; i++)
- dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
- le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
-}
-
-static void
mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
struct ieee80211_sta *sta, struct list_head *free_list)
{
@@ -1233,7 +1196,7 @@ mt7996_txwi_free(struct mt7996_dev *dev, struct mt76_txwi_cache *t,
__le32 *txwi;
u16 wcid_idx;
- mt7996_txp_skb_unmap(mdev, t);
+ mt76_connac_txp_skb_unmap(mdev, t);
if (!t->skb)
goto out;
@@ -1434,6 +1397,15 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid,
rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
rate.flags = RATE_INFO_FLAGS_HE_MCS;
break;
+ case MT_PHY_TYPE_EHT_SU:
+ case MT_PHY_TYPE_EHT_TRIG:
+ case MT_PHY_TYPE_EHT_MU:
+ if (rate.mcs > 13)
+ goto out;
+
+ rate.eht_gi = wcid->rate.eht_gi;
+ rate.flags = RATE_INFO_FLAGS_EHT_MCS;
+ break;
default:
goto out;
}
@@ -1441,6 +1413,10 @@ mt7996_mac_add_txs_skb(struct mt7996_dev *dev, struct mt76_wcid *wcid, int pid,
stats->tx_mode[mode]++;
switch (FIELD_GET(MT_TXS0_BW, txs)) {
+ case IEEE80211_STA_RX_BW_320:
+ rate.bw = RATE_INFO_BW_320;
+ stats->tx_bw[4]++;
+ break;
case IEEE80211_STA_RX_BW_160:
rate.bw = RATE_INFO_BW_160;
stats->tx_bw[3]++;
@@ -1486,7 +1462,7 @@ static void mt7996_mac_add_txs(struct mt7996_dev *dev, void *data)
if (pid < MT_PACKET_ID_FIRST)
return;
- if (wcidx >= MT7996_WTBL_SIZE)
+ if (wcidx >= mt7996_wtbl_size(dev))
return;
rcu_read_lock();
@@ -1589,27 +1565,6 @@ void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
}
}
-void mt7996_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
-{
- if (!e->txwi) {
- dev_kfree_skb_any(e->skb);
- return;
- }
-
- /* error path */
- if (e->skb == DMA_DUMMY_DATA) {
- struct mt76_txwi_cache *t;
- struct mt7996_txp *txp;
-
- txp = mt7996_txwi_to_txp(mdev, e->txwi);
- t = mt76_token_put(mdev, le16_to_cpu(txp->token));
- e->skb = t ? t->skb : NULL;
- }
-
- if (e->skb)
- mt76_tx_complete_skb(mdev, e->wcid, e->skb);
-}
-
void mt7996_mac_cca_stats_reset(struct mt7996_phy *phy)
{
struct mt7996_dev *dev = phy->dev;
@@ -1690,7 +1645,7 @@ void mt7996_mac_set_timing(struct mt7996_phy *phy)
else
val = MT7996_CFEND_RATE_11B;
- mt76_rmw_field(dev, MT_AGG_ACR0(band_idx), MT_AGG_ACR_CFEND_RATE, val);
+ mt76_rmw_field(dev, MT_RATE_HRCR0(band_idx), MT_RATE_HRCR0_CFEND_RATE, val);
mt76_clear(dev, MT_ARB_SCR(band_idx),
MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.h b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
index 9f68852012b9..27184cbac619 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.h
@@ -268,17 +268,6 @@ enum tx_mgnt_type {
/* VHT/HE only use bits 0-3 */
#define MT_TX_RATE_IDX GENMASK(5, 0)
-struct mt7996_txp {
- __le16 flags;
- __le16 token;
- u8 bss_idx;
- __le16 rept_wds_wcid;
- u8 nbuf;
-#define MT_TXP_MAX_BUF_NUM 6
- __le32 buf[MT_TXP_MAX_BUF_NUM];
- __le16 len[MT_TXP_MAX_BUF_NUM];
-} __packed __aligned(4);
-
#define MT_TXFREE0_PKT_TYPE GENMASK(31, 27)
#define MT_TXFREE0_MSDU_CNT GENMASK(25, 16)
#define MT_TXFREE0_RX_BYTE GENMASK(15, 0)
@@ -382,17 +371,4 @@ struct mt7996_dfs_radar_spec {
struct mt7996_dfs_pattern radar_pattern[16];
};
-static inline struct mt7996_txp *
-mt7996_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- u8 *txwi;
-
- if (!t)
- return NULL;
-
- txwi = mt76_get_txwi_ptr(dev, t);
-
- return (struct mt7996_txp *)(txwi + MT_TXD_SIZE);
-}
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index 4421cd54311b..3e4da0350d96 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -170,7 +170,7 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
phy->monitor_vif = vif;
mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
- if (mvif->mt76.idx >= (MT7996_MAX_INTERFACES << dev->dbdc_support)) {
+ if (mvif->mt76.idx >= mt7996_max_interface_num(dev)) {
ret = -ENOSPC;
goto out;
}
@@ -880,14 +880,17 @@ mt7996_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
phy->mt76->antenna_mask = tx_ant;
/* restore to the origin chainmask which might have auxiliary path */
- if (hweight8(tx_ant) == max_nss)
+ if (hweight8(tx_ant) == max_nss && band_idx < MT_BAND2)
+ phy->mt76->chainmask = ((dev->chainmask >> shift) &
+ (BIT(dev->chainshift[band_idx + 1] - shift) - 1)) << shift;
+ else if (hweight8(tx_ant) == max_nss)
phy->mt76->chainmask = (dev->chainmask >> shift) << shift;
else
phy->mt76->chainmask = tx_ant << shift;
mt76_set_stream_caps(phy->mt76, true);
mt7996_set_stream_vht_txbf_caps(phy);
- mt7996_set_stream_he_caps(phy);
+ mt7996_set_stream_he_eht_caps(phy);
mutex_unlock(&dev->mt76.mutex);
@@ -1081,10 +1084,14 @@ static const char mt7996_gstrings_stats[][ETH_GSTRING_LEN] = {
"v_tx_mode_he_ext_su",
"v_tx_mode_he_tb",
"v_tx_mode_he_mu",
+ "v_tx_mode_eht_su",
+ "v_tx_mode_eht_trig",
+ "v_tx_mode_eht_mu",
"v_tx_bw_20",
"v_tx_bw_40",
"v_tx_bw_80",
"v_tx_bw_160",
+ "v_tx_bw_320",
"v_tx_mcs_0",
"v_tx_mcs_1",
"v_tx_mcs_2",
@@ -1097,6 +1104,8 @@ static const char mt7996_gstrings_stats[][ETH_GSTRING_LEN] = {
"v_tx_mcs_9",
"v_tx_mcs_10",
"v_tx_mcs_11",
+ "v_tx_mcs_12",
+ "v_tx_mcs_13",
};
#define MT7996_SSTATS_LEN ARRAY_SIZE(mt7996_gstrings_stats)
@@ -1130,7 +1139,7 @@ static void mt7996_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
if (msta->vif->mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->stats);
+ mt76_ethtool_worker(wi, &msta->stats, true);
}
static
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index 04e1d10bbd21..dbe30832fd88 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -70,6 +70,7 @@ struct mt7996_fw_region {
#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
+#define EHT_PHY(p, c) u8_get_bits(c, IEEE80211_EHT_PHY_##p)
static bool sr_scene_detect = true;
module_param(sr_scene_detect, bool, 0644);
@@ -335,6 +336,9 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
r = (struct mt7996_mcu_rdd_report *)skb->data;
+ if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys))
+ return;
+
mphy = dev->mt76.phys[r->band_idx];
if (!mphy)
return;
@@ -412,6 +416,9 @@ mt7996_mcu_ie_countdown(struct mt7996_dev *dev, struct sk_buff *skb)
struct header *hdr = (struct header *)data;
struct tlv *tlv = (struct tlv *)(data + 4);
+ if (hdr->band >= ARRAY_SIZE(dev->mt76.phys))
+ return;
+
if (hdr->band && dev->mt76.phys[hdr->band])
mphy = dev->mt76.phys[hdr->band];
@@ -765,9 +772,8 @@ mt7996_mcu_bss_basic_tlv(struct sk_buff *skb,
bss->dtim_period = vif->bss_conf.dtim_period;
bss->phymode = mt76_connac_get_phy_mode(phy, vif,
chandef->chan->band, NULL);
-
- if (chandef->chan->band == NL80211_BAND_6GHZ)
- bss->phymode_ext |= PHY_MODE_AX_6G;
+ bss->phymode_ext = mt76_connac_get_phy_mode_ext(phy, vif,
+ chandef->chan->band);
return 0;
}
@@ -903,8 +909,8 @@ mt7996_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
he = (struct sta_rec_he_v2 *)tlv;
for (i = 0; i < 11; i++) {
if (i < 6)
- he->he_mac_cap[i] = cpu_to_le16(elem->mac_cap_info[i]);
- he->he_phy_cap[i] = cpu_to_le16(elem->phy_cap_info[i]);
+ he->he_mac_cap[i] = elem->mac_cap_info[i];
+ he->he_phy_cap[i] = elem->phy_cap_info[i];
}
mcs_map = sta->deflink.he_cap.he_mcs_nss_supp;
@@ -946,6 +952,35 @@ mt7996_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
static void
+mt7996_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct ieee80211_eht_mcs_nss_supp *mcs_map;
+ struct ieee80211_eht_cap_elem_fixed *elem;
+ struct sta_rec_eht *eht;
+ struct tlv *tlv;
+
+ if (!sta->deflink.eht_cap.has_eht)
+ return;
+
+ mcs_map = &sta->deflink.eht_cap.eht_mcs_nss_supp;
+ elem = &sta->deflink.eht_cap.eht_cap_elem;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT, sizeof(*eht));
+
+ eht = (struct sta_rec_eht *)tlv;
+ eht->tid_bitmap = 0xff;
+ eht->mac_cap = cpu_to_le16(*(u16 *)elem->mac_cap_info);
+ eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info);
+ eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20));
+ memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80));
+ memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160));
+ memcpy(eht->mcs_map_bw320, &mcs_map->bw._320, sizeof(eht->mcs_map_bw320));
+}
+
+static void
mt7996_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
struct sta_rec_ht *ht;
@@ -1019,15 +1054,27 @@ mt7996_is_ebf_supported(struct mt7996_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool bfee)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- int tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
+ int sts = hweight16(phy->mt76->chainmask);
if (vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_AP)
return false;
- if (!bfee && tx_ant < 2)
+ if (!bfee && sts < 2)
return false;
+ if (sta->deflink.eht_cap.has_eht) {
+ struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap;
+ struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
+
+ if (bfee)
+ return mvif->cap.eht_su_ebfee &&
+ EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
+ else
+ return mvif->cap.eht_su_ebfer &&
+ EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
+ }
+
if (sta->deflink.he_cap.has_he) {
struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
@@ -1185,12 +1232,68 @@ mt7996_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
}
static void
+mt7996_mcu_sta_bfer_eht(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ struct mt7996_phy *phy, struct sta_rec_bf *bf)
+{
+ struct ieee80211_sta_eht_cap *pc = &sta->deflink.eht_cap;
+ struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
+ struct ieee80211_eht_mcs_nss_supp *eht_nss = &pc->eht_mcs_nss_supp;
+ const struct ieee80211_sta_eht_cap *vc =
+ mt76_connac_get_eht_phy_cap(phy->mt76, vif);
+ const struct ieee80211_eht_cap_elem_fixed *ve = &vc->eht_cap_elem;
+ u8 nss_mcs = u8_get_bits(eht_nss->bw._80.rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_RX) - 1;
+ u8 snd_dim, sts;
+
+ bf->tx_mode = MT_PHY_TYPE_EHT_MU;
+
+ mt7996_mcu_sta_sounding_rate(bf);
+
+ bf->trigger_su = EHT_PHY(CAP3_TRIG_SU_BF_FDBK, pe->phy_cap_info[3]);
+ bf->trigger_mu = EHT_PHY(CAP3_TRIG_MU_BF_PART_BW_FDBK, pe->phy_cap_info[3]);
+ snd_dim = EHT_PHY(CAP2_SOUNDING_DIM_80MHZ_MASK, ve->phy_cap_info[2]);
+ sts = EHT_PHY(CAP0_BEAMFORMEE_SS_80MHZ_MASK, pe->phy_cap_info[0]) +
+ (EHT_PHY(CAP1_BEAMFORMEE_SS_80MHZ_MASK, pe->phy_cap_info[1]) << 1);
+ bf->nrow = min_t(u8, snd_dim, sts);
+ bf->ncol = min_t(u8, nss_mcs, bf->nrow);
+ bf->ibf_ncol = bf->ncol;
+
+ if (sta->deflink.bandwidth < IEEE80211_STA_RX_BW_160)
+ return;
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ snd_dim = EHT_PHY(CAP2_SOUNDING_DIM_160MHZ_MASK, ve->phy_cap_info[2]);
+ sts = EHT_PHY(CAP1_BEAMFORMEE_SS_160MHZ_MASK, pe->phy_cap_info[1]);
+ nss_mcs = u8_get_bits(eht_nss->bw._160.rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_RX) - 1;
+
+ bf->nrow_gt_bw80 = min_t(u8, snd_dim, sts);
+ bf->ncol_gt_bw80 = nss_mcs;
+ break;
+ case IEEE80211_STA_RX_BW_320:
+ snd_dim = EHT_PHY(CAP2_SOUNDING_DIM_320MHZ_MASK, ve->phy_cap_info[2]) +
+ (EHT_PHY(CAP3_SOUNDING_DIM_320MHZ_MASK,
+ ve->phy_cap_info[3]) << 1);
+ sts = EHT_PHY(CAP1_BEAMFORMEE_SS_320MHZ_MASK, pe->phy_cap_info[1]);
+ nss_mcs = u8_get_bits(eht_nss->bw._320.rx_tx_mcs9_max_nss,
+ IEEE80211_EHT_MCS_NSS_RX) - 1;
+
+ bf->nrow_gt_bw80 = min_t(u8, snd_dim, sts) << 4;
+ bf->ncol_gt_bw80 = nss_mcs << 4;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_phy *phy = mvif->phy;
- int tx_ant = hweight8(phy->mt76->antenna_mask) - 1;
+ int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
const u8 matrix[4][4] = {
@@ -1211,11 +1314,13 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
bf = (struct sta_rec_bf *)tlv;
- /* he: eBF only, in accordance with spec
+ /* he/eht: eBF only, in accordance with spec
* vht: support eBF and iBF
* ht: iBF only, since mac80211 lacks of eBF support
*/
- if (sta->deflink.he_cap.has_he && ebf)
+ if (sta->deflink.eht_cap.has_eht && ebf)
+ mt7996_mcu_sta_bfer_eht(sta, vif, phy, bf);
+ else if (sta->deflink.he_cap.has_he && ebf)
mt7996_mcu_sta_bfer_he(sta, vif, phy, bf);
else if (sta->deflink.vht_cap.vht_supported)
mt7996_mcu_sta_bfer_vht(sta, phy, bf, ebf);
@@ -1430,8 +1535,9 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
ra->auto_rate = true;
ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
ra->channel = chandef->chan->hw_value;
- ra->bw = sta->deflink.bandwidth;
- ra->phy.bw = sta->deflink.bandwidth;
+ ra->bw = (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) ?
+ CMD_CBW_320MHZ : sta->deflink.bandwidth;
+ ra->phy.bw = ra->bw;
ra->mmps_mode = mt7996_mcu_get_mmps_mode(sta->deflink.smps_mode);
if (supp_rate) {
@@ -1613,6 +1719,8 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
mt7996_mcu_sta_he_tlv(skb, sta);
/* starec he 6g*/
mt7996_mcu_sta_he_6g_tlv(skb, sta);
+ /* starec eht */
+ mt7996_mcu_sta_eht_tlv(skb, sta);
/* TODO: starec muru */
/* starec bfee */
mt7996_mcu_sta_bfee_tlv(dev, skb, vif, sta);
@@ -1809,6 +1917,7 @@ mt7996_mcu_beacon_check_caps(struct mt7996_phy *phy, struct ieee80211_vif *vif,
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt7996_vif_cap *vc = &mvif->cap;
+ const struct ieee80211_eht_cap_elem_fixed *eht;
const struct ieee80211_he_cap_elem *he;
const struct ieee80211_vht_cap *vht;
const struct ieee80211_ht_cap *ht;
@@ -1879,6 +1988,23 @@ mt7996_mcu_beacon_check_caps(struct mt7996_phy *phy, struct ieee80211_vif *vif,
HE_PHY(CAP4_MU_BEAMFORMER, he->phy_cap_info[4]) &&
HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4]);
}
+
+ ie = cfg80211_find_ext_ie(WLAN_EID_EXT_EHT_CAPABILITY,
+ mgmt->u.beacon.variable, len);
+ if (ie && ie[1] >= sizeof(*eht) + 1) {
+ const struct ieee80211_sta_eht_cap *pc =
+ mt76_connac_get_eht_phy_cap(phy->mt76, vif);
+ const struct ieee80211_eht_cap_elem_fixed *pe = &pc->eht_cap_elem;
+
+ eht = (void *)(ie + 3);
+
+ vc->eht_su_ebfer =
+ EHT_PHY(CAP0_SU_BEAMFORMER, eht->phy_cap_info[0]) &&
+ EHT_PHY(CAP0_SU_BEAMFORMER, pe->phy_cap_info[0]);
+ vc->eht_su_ebfee =
+ EHT_PHY(CAP0_SU_BEAMFORMEE, eht->phy_cap_info[0]) &&
+ EHT_PHY(CAP0_SU_BEAMFORMEE, pe->phy_cap_info[0]);
+ }
}
int mt7996_mcu_add_beacon(struct ieee80211_hw *hw,
@@ -2241,6 +2367,26 @@ mt7996_firmware_state(struct mt7996_dev *dev, bool wa)
return 0;
}
+static int
+mt7996_mcu_restart(struct mt76_dev *dev)
+{
+ struct {
+ u8 __rsv1[4];
+
+ __le16 tag;
+ __le16 len;
+ u8 power_mode;
+ u8 __rsv2[3];
+ } __packed req = {
+ .tag = cpu_to_le16(UNI_POWER_OFF),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ .power_mode = 1,
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_WM_UNI_CMD(POWER_CTRL), &req,
+ sizeof(req), false);
+}
+
static int mt7996_load_firmware(struct mt7996_dev *dev)
{
int ret;
@@ -2248,7 +2394,7 @@ static int mt7996_load_firmware(struct mt7996_dev *dev)
/* make sure fw is download state */
if (mt7996_firmware_state(dev, false)) {
/* restart firmware once */
- __mt76_mcu_restart(&dev->mt76);
+ mt7996_mcu_restart(&dev->mt76);
ret = mt7996_firmware_state(dev, false);
if (ret) {
dev_err(dev->mt76.dev,
@@ -2377,33 +2523,12 @@ mt7996_mcu_init_rx_airtime(struct mt7996_dev *dev)
MCU_WM_UNI_CMD(VOW), true);
}
-static int
-mt7996_mcu_restart(struct mt76_dev *dev)
-{
- struct {
- u8 __rsv1[4];
-
- __le16 tag;
- __le16 len;
- u8 power_mode;
- u8 __rsv2[3];
- } __packed req = {
- .tag = cpu_to_le16(UNI_POWER_OFF),
- .len = cpu_to_le16(sizeof(req) - 4),
- .power_mode = 1,
- };
-
- return mt76_mcu_send_msg(dev, MCU_WM_UNI_CMD(POWER_CREL), &req,
- sizeof(req), false);
-}
-
int mt7996_mcu_init(struct mt7996_dev *dev)
{
static const struct mt76_mcu_ops mt7996_mcu_ops = {
.headroom = sizeof(struct mt76_connac2_mcu_txd), /* reuse */
.mcu_skb_send_msg = mt7996_mcu_send_message,
.mcu_parse_response = mt7996_mcu_parse_response,
- .mcu_restart = mt7996_mcu_restart,
};
int ret;
@@ -2451,16 +2576,17 @@ int mt7996_mcu_init(struct mt7996_dev *dev)
void mt7996_mcu_exit(struct mt7996_dev *dev)
{
- __mt76_mcu_restart(&dev->mt76);
+ mt7996_mcu_restart(&dev->mt76);
if (mt7996_firmware_state(dev, false)) {
dev_err(dev->mt76.dev, "Failed to exit mcu\n");
- return;
+ goto out;
}
mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
if (dev->hif2)
mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
MT_TOP_LPCR_HOST_FW_OWN);
+out:
skb_queue_purge(&dev->mt76.mcu.res_q);
}
@@ -2921,8 +3047,9 @@ int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset)
bool valid;
int ret;
- ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL), &req,
- sizeof(req), true, &skb);
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_WM_UNI_CMD_QUERY(EFUSE_CTRL),
+ &req, sizeof(req), true, &skb);
if (ret)
return ret;
@@ -2970,6 +3097,52 @@ int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num)
return 0;
}
+int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap)
+{
+#define NIC_CAP 3
+#define UNI_EVENT_CHIP_CONFIG_EFUSE_VERSION 0x21
+ struct {
+ u8 _rsv[4];
+
+ __le16 tag;
+ __le16 len;
+ } __packed req = {
+ .tag = cpu_to_le16(NIC_CAP),
+ .len = cpu_to_le16(sizeof(req) - 4),
+ };
+ struct sk_buff *skb;
+ u8 *buf;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_WM_UNI_CMD_QUERY(CHIP_CONFIG), &req,
+ sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ /* fixed field */
+ skb_pull(skb, 4);
+
+ buf = skb->data;
+ while (buf - skb->data < skb->len) {
+ struct tlv *tlv = (struct tlv *)buf;
+
+ switch (le16_to_cpu(tlv->tag)) {
+ case UNI_EVENT_CHIP_CONFIG_EFUSE_VERSION:
+ *cap = le32_to_cpu(*(__le32 *)(buf + sizeof(*tlv)));
+ break;
+ default:
+ break;
+ };
+
+ buf += le16_to_cpu(tlv->len);
+ }
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
int mt7996_mcu_get_chan_mib_info(struct mt7996_phy *phy, bool chan_switch)
{
struct {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
index 6084b2337598..dd0c5ac52703 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.h
@@ -347,6 +347,21 @@ struct sta_rec_ba_uni {
u8 __rsv[3];
} __packed;
+struct sta_rec_eht {
+ __le16 tag;
+ __le16 len;
+ u8 tid_bitmap;
+ u8 _rsv;
+ __le16 mac_cap;
+ __le64 phy_cap;
+ __le64 phy_cap_ext;
+ u8 mcs_map_bw20[4];
+ u8 mcs_map_bw80[3];
+ u8 mcs_map_bw160[3];
+ u8 mcs_map_bw320[3];
+ u8 _rsv2[3];
+} __packed;
+
struct sec_key_uni {
__le16 wlan_idx;
u8 mgmt_prot;
@@ -554,6 +569,7 @@ enum {
sizeof(struct sta_rec_sec) + \
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
+ sizeof(struct sta_rec_eht) + \
sizeof(struct sta_rec_hdrt) + \
sizeof(struct sta_rec_hdr_trans) + \
sizeof(struct tlv))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 521769eb6b0e..902370a2a639 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -21,6 +21,7 @@ static const struct __base mt7996_reg_base[] = {
[WF_ETBF_BASE] = { { 0x820ea000, 0x820fa000, 0x830ea000 } },
[WF_LPON_BASE] = { { 0x820eb000, 0x820fb000, 0x830eb000 } },
[WF_MIB_BASE] = { { 0x820ed000, 0x820fd000, 0x830ed000 } },
+ [WF_RATE_BASE] = { { 0x820ee000, 0x820fe000, 0x830ee000 } },
};
static const struct __map mt7996_reg_map[] = {
@@ -149,7 +150,7 @@ static u32 __mt7996_reg_addr(struct mt7996_dev *dev, u32 addr)
if (dev_is_pci(dev->mt76.dev) &&
((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
- (addr >= MT_CBTOP2_PHY_START && addr <= MT_CBTOP2_PHY_END)))
+ addr >= MT_CBTOP2_PHY_START))
return mt7996_reg_map_l1(dev, addr);
/* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
@@ -317,7 +318,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
{
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
- .txwi_size = MT_TXD_SIZE + sizeof(struct mt7996_txp),
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_fw_txp),
.drv_flags = MT_DRV_TXWI_NO_FREE |
MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
@@ -325,7 +326,7 @@ struct mt7996_dev *mt7996_mmio_probe(struct device *pdev,
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7996_TOKEN_SIZE,
.tx_prepare_skb = mt7996_tx_prepare_skb,
- .tx_complete_skb = mt7996_tx_complete_skb,
+ .tx_complete_skb = mt76_connac_tx_complete_skb,
.rx_skb = mt7996_queue_rx_skb,
.rx_check = mt7996_rx_check,
.rx_poll_complete = mt7996_rx_poll_complete,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 725344791b4c..018dfd2b36b0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -11,12 +11,11 @@
#include "../mt76_connac.h"
#include "regs.h"
-#define MT7996_MAX_INTERFACES 19
+#define MT7996_MAX_INTERFACES 19 /* per-band */
#define MT7996_MAX_WMM_SETS 4
-#define MT7996_WTBL_SIZE 544
-#define MT7996_WTBL_RESERVED (MT7996_WTBL_SIZE - 1)
+#define MT7996_WTBL_RESERVED (mt7996_wtbl_size(dev) - 1)
#define MT7996_WTBL_STA (MT7996_WTBL_RESERVED - \
- MT7996_MAX_INTERFACES)
+ mt7996_max_interface_num(dev))
#define MT7996_WATCHDOG_TIME (HZ / 10)
#define MT7996_RESET_TIMEOUT (30 * HZ)
@@ -124,6 +123,8 @@ struct mt7996_vif_cap {
bool he_su_ebfer:1;
bool he_su_ebfee:1;
bool he_mu_ebfer:1;
+ bool eht_su_ebfer:1;
+ bool eht_su_ebfee:1;
};
struct mt7996_vif {
@@ -264,6 +265,7 @@ struct mt7996_dev {
bool dbdc_support:1;
bool tbtc_support:1;
bool flash_mode:1;
+ bool has_eht:1;
bool ibf;
u8 fw_debug_wm;
@@ -281,6 +283,8 @@ struct mt7996_dev {
u32 reg_l1_backup;
u32 reg_l2_backup;
+
+ u8 wtbl_size_group;
};
enum {
@@ -419,6 +423,7 @@ int mt7996_mcu_set_fixed_rate_ctrl(struct mt7996_dev *dev,
int mt7996_mcu_set_eeprom(struct mt7996_dev *dev);
int mt7996_mcu_get_eeprom(struct mt7996_dev *dev, u32 offset);
int mt7996_mcu_get_eeprom_free_block(struct mt7996_dev *dev, u8 *block_num);
+int mt7996_mcu_get_chip_config(struct mt7996_dev *dev, u32 *cap);
int mt7996_mcu_set_ser(struct mt7996_dev *dev, u8 action, u8 set, u8 band);
int mt7996_mcu_set_txbf(struct mt7996_dev *dev, u8 action);
int mt7996_mcu_set_fcc5_lpn(struct mt7996_dev *dev, int val);
@@ -443,6 +448,16 @@ int mt7996_mcu_fw_dbg_ctrl(struct mt7996_dev *dev, u32 module, u8 level);
void mt7996_mcu_rx_event(struct mt7996_dev *dev, struct sk_buff *skb);
void mt7996_mcu_exit(struct mt7996_dev *dev);
+static inline u8 mt7996_max_interface_num(struct mt7996_dev *dev)
+{
+ return MT7996_MAX_INTERFACES * (1 + dev->dbdc_support + dev->tbtc_support);
+}
+
+static inline u16 mt7996_wtbl_size(struct mt7996_dev *dev)
+{
+ return (dev->wtbl_size_group << 8) + 64;
+}
+
void mt7996_dual_hif_set_irq_mask(struct mt7996_dev *dev, bool write_reg,
u32 clear, u32 set);
@@ -493,7 +508,6 @@ int mt7996_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt7996_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7996_tx_token_put(struct mt7996_dev *dev);
void mt7996_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb, u32 *info);
@@ -502,7 +516,7 @@ void mt7996_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
void mt7996_stats_work(struct work_struct *work);
int mt76_dfs_start_rdd(struct mt7996_dev *dev, bool force);
int mt7996_dfs_init_radar_detector(struct mt7996_phy *phy);
-void mt7996_set_stream_he_caps(struct mt7996_phy *phy);
+void mt7996_set_stream_he_eht_caps(struct mt7996_phy *phy);
void mt7996_set_stream_vht_txbf_caps(struct mt7996_phy *phy);
void mt7996_update_channel(struct mt76_phy *mphy);
int mt7996_init_debugfs(struct mt7996_phy *phy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
index 794f61b93a46..7a28cae34e34 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/regs.h
@@ -33,6 +33,7 @@ enum base_rev {
WF_ETBF_BASE,
WF_LPON_BASE,
WF_MIB_BASE,
+ WF_RATE_BASE,
__MT_REG_BASE_MAX,
};
@@ -235,13 +236,6 @@ enum base_rev {
FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
-/* AGG: band 0(0x820e2000), band 1(0x820f2000), band 2(0x830e2000) */
-#define MT_WF_AGG_BASE(_band) __BASE(WF_AGG_BASE, (_band))
-#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
-
-#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, 0x054)
-#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
-
/* ARB: band 0(0x820e3000), band 1(0x820f3000), band 2(0x830e3000) */
#define MT_WF_ARB_BASE(_band) __BASE(WF_ARB_BASE, (_band))
#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
@@ -300,6 +294,13 @@ enum base_rev {
#define MT_WF_RMAC_RSVD0(_band) MT_WF_RMAC(_band, 0x03e0)
#define MT_WF_RMAC_RSVD0_EIFS_CLR BIT(21)
+/* RATE: band 0(0x820ee000), band 1(0x820fe000), band 2(0x830ee000) */
+#define MT_WF_RATE_BASE(_band) __BASE(WF_RATE_BASE, (_band))
+#define MT_WF_RATE(_band, ofs) (MT_WF_RATE_BASE(_band) + (ofs))
+
+#define MT_RATE_HRCR0(_band) MT_WF_RATE(_band, 0x050)
+#define MT_RATE_HRCR0_CFEND_RATE GENMASK(14, 0)
+
/* WFDMA0 */
#define MT_WFDMA0_BASE 0xd4000
#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs))
@@ -463,7 +464,6 @@ enum base_rev {
#define MT_CBTOP1_PHY_START 0x70000000
#define MT_CBTOP1_PHY_END 0x77ffffff
#define MT_CBTOP2_PHY_START 0xf0000000
-#define MT_CBTOP2_PHY_END 0xffffffff
#define MT_INFRA_MCU_START 0x7c000000
#define MT_INFRA_MCU_END 0x7c3fffff
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 228bc7d45011..419723118ded 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -562,6 +562,10 @@ mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
q->entry[q->head].buf_sz = len;
q->entry[q->head].skb = skb;
+
+ /* ensure the entry fully updated before bus access */
+ smp_wmb();
+
q->head = (q->head + 1) % q->ndesc;
q->queued++;
diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index bfc4de50a4d2..ddd8c0cc744d 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -254,6 +254,10 @@ static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
__skb_put_zero(e->skb, 4);
+ err = __skb_grow(e->skb, roundup(e->skb->len,
+ sdio->func->cur_blksize));
+ if (err)
+ return err;
err = __mt76s_xmit_queue(dev, e->skb->data,
e->skb->len);
if (err)
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 3e281715fcd4..b88959ef38aa 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -319,29 +319,27 @@ mt76u_set_endpoints(struct usb_interface *intf,
static int
mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
- int nsgs, gfp_t gfp)
+ int nsgs)
{
int i;
for (i = 0; i < nsgs; i++) {
- struct page *page;
void *data;
int offset;
- data = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
+ data = mt76_get_page_pool_buf(q, &offset, q->buf_size);
if (!data)
break;
- page = virt_to_head_page(data);
- offset = data - page_address(page);
- sg_set_page(&urb->sg[i], page, q->buf_size, offset);
+ sg_set_page(&urb->sg[i], virt_to_head_page(data), q->buf_size,
+ offset);
}
if (i < nsgs) {
int j;
for (j = nsgs; j < urb->num_sgs; j++)
- skb_free_frag(sg_virt(&urb->sg[j]));
+ mt76_put_page_pool_buf(sg_virt(&urb->sg[j]), false);
urb->num_sgs = i;
}
@@ -354,15 +352,16 @@ mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb,
static int
mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q,
- struct urb *urb, int nsgs, gfp_t gfp)
+ struct urb *urb, int nsgs)
{
enum mt76_rxq_id qid = q - &dev->q_rx[MT_RXQ_MAIN];
+ int offset;
if (qid == MT_RXQ_MAIN && dev->usb.sg_en)
- return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp);
+ return mt76u_fill_rx_sg(dev, q, urb, nsgs);
urb->transfer_buffer_length = q->buf_size;
- urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp);
+ urb->transfer_buffer = mt76_get_page_pool_buf(q, &offset, q->buf_size);
return urb->transfer_buffer ? 0 : -ENOMEM;
}
@@ -400,7 +399,7 @@ mt76u_rx_urb_alloc(struct mt76_dev *dev, struct mt76_queue *q,
if (err)
return err;
- return mt76u_refill_rx(dev, q, e->urb, sg_size, GFP_KERNEL);
+ return mt76u_refill_rx(dev, q, e->urb, sg_size);
}
static void mt76u_urb_free(struct urb *urb)
@@ -408,10 +407,10 @@ static void mt76u_urb_free(struct urb *urb)
int i;
for (i = 0; i < urb->num_sgs; i++)
- skb_free_frag(sg_virt(&urb->sg[i]));
+ mt76_put_page_pool_buf(sg_virt(&urb->sg[i]), false);
if (urb->transfer_buffer)
- skb_free_frag(urb->transfer_buffer);
+ mt76_put_page_pool_buf(urb->transfer_buffer, false);
usb_free_urb(urb);
}
@@ -547,6 +546,8 @@ mt76u_process_rx_entry(struct mt76_dev *dev, struct urb *urb,
len -= data_len;
nsgs++;
}
+
+ skb_mark_for_recycle(skb);
dev->drv->rx_skb(dev, MT_RXQ_MAIN, skb, NULL);
return nsgs;
@@ -612,7 +613,7 @@ mt76u_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
count = mt76u_process_rx_entry(dev, urb, q->buf_size);
if (count > 0) {
- err = mt76u_refill_rx(dev, q, urb, count, GFP_ATOMIC);
+ err = mt76u_refill_rx(dev, q, urb, count);
if (err < 0)
break;
}
@@ -663,6 +664,10 @@ mt76u_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
struct mt76_queue *q = &dev->q_rx[qid];
int i, err;
+ err = mt76_create_page_pool(dev, q);
+ if (err)
+ return err;
+
spin_lock_init(&q->lock);
q->entry = devm_kcalloc(dev->dev,
MT_NUM_RX_ENTRIES, sizeof(*q->entry),
@@ -691,7 +696,6 @@ EXPORT_SYMBOL_GPL(mt76u_alloc_mcu_queue);
static void
mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- struct page *page;
int i;
for (i = 0; i < q->ndesc; i++) {
@@ -701,13 +705,7 @@ mt76u_free_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
mt76u_urb_free(q->entry[i].urb);
q->entry[i].urb = NULL;
}
-
- if (!q->rx_page.va)
- return;
-
- page = virt_to_page(q->rx_page.va);
- __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
- memset(&q->rx_page, 0, sizeof(q->rx_page));
+ page_pool_destroy(q->page_pool);
}
static void mt76u_free_rx(struct mt76_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index 581964425468..fc76c66ff1a5 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -24,23 +24,23 @@ bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
}
EXPORT_SYMBOL_GPL(__mt76_poll);
-bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
- int timeout)
+bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
+ int timeout, int tick)
{
u32 cur;
- timeout /= 10;
+ timeout /= tick;
do {
cur = __mt76_rr(dev, offset) & mask;
if (cur == val)
return true;
- usleep_range(10000, 20000);
+ usleep_range(1000 * tick, 2000 * tick);
} while (timeout-- > 0);
return false;
}
-EXPORT_SYMBOL_GPL(__mt76_poll_msec);
+EXPORT_SYMBOL_GPL(____mt76_poll_msec);
int mt76_wcid_alloc(u32 *mask, int size)
{
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index 457147394edc..773a1cc2f852 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -123,7 +123,8 @@ static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
if (data_len < min_seg_len ||
WARN_ON_ONCE(!dma_len) ||
WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
- WARN_ON_ONCE(dma_len & 0x3))
+ WARN_ON_ONCE(dma_len & 0x3) ||
+ WARN_ON_ONCE(dma_len < min_seg_len))
return 0;
return MT_DMA_HDRS + dma_len;
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 9b319a455b96..e9f59de31b0b 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -730,6 +730,7 @@ netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb->dev != ndev) {
netdev_err(ndev, "Packet not destined to this device\n");
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -980,7 +981,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
ndev->name);
if (!wl->hif_workqueue) {
ret = -ENOMEM;
- goto error;
+ goto unregister_netdev;
}
ndev->needs_free_netdev = true;
@@ -995,6 +996,11 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
return vif;
+unregister_netdev:
+ if (rtnl_locked)
+ cfg80211_unregister_netdevice(ndev);
+ else
+ unregister_netdev(ndev);
error:
free_netdev(ndev);
return ERR_PTR(ret);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 4fafe370101a..31bc58e96ac0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -478,7 +478,7 @@ qtnf_event_handle_freq_change(struct qtnf_wmac *mac,
continue;
mutex_lock(&vif->wdev.mtx);
- cfg80211_ch_switch_notify(vif->netdev, &chandef, 0);
+ cfg80211_ch_switch_notify(vif->netdev, &chandef, 0, 0);
mutex_unlock(&vif->wdev.mtx);
}
@@ -662,6 +662,7 @@ qtnf_event_handle_update_owe(struct qtnf_vif *vif,
memcpy(ie, owe_ev->ies, ie_len);
owe_info.ie_len = ie_len;
owe_info.ie = ie;
+ owe_info.assoc_link_id = -1;
pr_info("%s: external OWE processing: peer=%pM\n",
vif->netdev->name, owe_ev->peer);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 12b700c7b9c3..1226a883cd67 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -8924,8 +8924,6 @@ static void rt2800_rxiq_calibration(struct rt2x00_dev *rt2x00dev)
if (i < 2 && (bbptemp & 0x800000))
result = (bbptemp & 0xffffff) - 0x1000000;
- else if (i == 4)
- result = bbptemp;
else
result = bbptemp;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
index 631d078278be..2eed20b0988c 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig
@@ -5,12 +5,13 @@
config RTL8XXXU
tristate "Realtek 802.11n USB wireless chips support"
depends on MAC80211 && USB
+ depends on LEDS_CLASS
help
This is an alternative driver for various Realtek RTL8XXX
parts written to utilize the Linux mac80211 stack.
The driver is known to work with a number of RTL8723AU,
RL8188CU, RTL8188RU, RTL8191CU, RTL8192CU, RTL8723BU, RTL8192EU,
- and RTL8188FU devices.
+ RTL8188FU, and RTL8188EU devices.
This driver is under development and has a limited feature
set. In particular it does not yet support 40MHz channels
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
index c4ad5325f5e7..0cb58fb30228 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Makefile
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
@@ -2,4 +2,5 @@
obj-$(CONFIG_RTL8XXXU) += rtl8xxxu.o
rtl8xxxu-y := rtl8xxxu_core.o rtl8xxxu_8192e.o rtl8xxxu_8723b.o \
- rtl8xxxu_8723a.o rtl8xxxu_8192c.o rtl8xxxu_8188f.o
+ rtl8xxxu_8723a.o rtl8xxxu_8192c.o rtl8xxxu_8188f.o \
+ rtl8xxxu_8188e.o
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index d26df4095da0..c8cee4a24755 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -36,6 +36,7 @@
#define TX_TOTAL_PAGE_NUM 0xf8
#define TX_TOTAL_PAGE_NUM_8188F 0xf7
+#define TX_TOTAL_PAGE_NUM_8188E 0xa9
#define TX_TOTAL_PAGE_NUM_8192E 0xf3
#define TX_TOTAL_PAGE_NUM_8723B 0xf7
/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
@@ -49,6 +50,11 @@
#define TX_PAGE_NUM_LO_PQ_8188F 0x02
#define TX_PAGE_NUM_NORM_PQ_8188F 0x02
+#define TX_PAGE_NUM_PUBQ_8188E 0x47
+#define TX_PAGE_NUM_HI_PQ_8188E 0x29
+#define TX_PAGE_NUM_LO_PQ_8188E 0x1c
+#define TX_PAGE_NUM_NORM_PQ_8188E 0x1c
+
#define TX_PAGE_NUM_PUBQ_8192E 0xe7
#define TX_PAGE_NUM_HI_PQ_8192E 0x08
#define TX_PAGE_NUM_LO_PQ_8192E 0x0c
@@ -153,7 +159,8 @@ struct rtl8xxxu_rxdesc16 {
u32 htc:1;
u32 eosp:1;
u32 bssidfit:2;
- u32 reserved1:16;
+ u32 rpt_sel:2; /* 8188e */
+ u32 reserved1:14;
u32 unicastwake:1;
u32 magicwake:1;
@@ -211,7 +218,8 @@ struct rtl8xxxu_rxdesc16 {
u32 magicwake:1;
u32 unicastwake:1;
- u32 reserved1:16;
+ u32 reserved1:14;
+ u32 rpt_sel:2; /* 8188e */
u32 bssidfit:2;
u32 eosp:1;
u32 htc:1;
@@ -502,6 +510,8 @@ struct rtl8xxxu_txdesc40 {
#define TXDESC_AMPDU_DENSITY_SHIFT 20
#define TXDESC40_BT_INT BIT(23)
#define TXDESC40_GID_SHIFT 24
+#define TXDESC_ANTENNA_SELECT_A BIT(24)
+#define TXDESC_ANTENNA_SELECT_B BIT(25)
/* Word 3 */
#define TXDESC40_USE_DRIVER_RATE BIT(8)
@@ -521,6 +531,7 @@ struct rtl8xxxu_txdesc40 {
#define TXDESC32_CTS_SELF_ENABLE BIT(11)
#define TXDESC32_RTS_CTS_ENABLE BIT(12)
#define TXDESC32_HW_RTS_ENABLE BIT(13)
+#define TXDESC32_PT_STAGE_MASK GENMASK(17, 15)
#define TXDESC_PRIME_CH_OFF_LOWER BIT(20)
#define TXDESC_PRIME_CH_OFF_UPPER BIT(21)
#define TXDESC32_SHORT_PREAMBLE BIT(24)
@@ -546,6 +557,10 @@ struct rtl8xxxu_txdesc40 {
/* Word 6 */
#define TXDESC_MAX_AGG_SHIFT 11
+#define TXDESC_USB_TX_AGG_SHIT 24
+
+/* Word 7 */
+#define TXDESC_ANTENNA_SELECT_C BIT(29)
/* Word 8 */
#define TXDESC40_HW_SEQ_ENABLE BIT(15)
@@ -562,6 +577,9 @@ struct phy_rx_agc_info {
#endif
};
+#define CCK_AGC_RPT_LNA_IDX_MASK GENMASK(7, 5)
+#define CCK_AGC_RPT_VGA_IDX_MASK GENMASK(4, 0)
+
struct rtl8723au_phy_stats {
struct phy_rx_agc_info path_agc[RTL8723A_MAX_RF_PATHS];
u8 ch_corr[RTL8723A_MAX_RF_PATHS];
@@ -909,6 +927,42 @@ struct rtl8188fu_efuse {
u8 res11[0xc3];
};
+struct rtl8188eu_efuse {
+ __le16 rtl_id;
+ u8 res0[0x0e];
+ struct rtl8192eu_efuse_tx_power tx_power_index_A; /* 0x10 */
+ u8 res1[0x7e]; /* 0x3a */
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 thermal_meter;
+ u8 iqk_lck;
+ u8 res2[5];
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 res3[3];
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 res4[6];
+ u8 vid; /* 0xd0 */
+ u8 res5[1];
+ u8 pid; /* 0xd2 */
+ u8 res6[1];
+ u8 usb_optional_function;
+ u8 res7[2];
+ u8 mac_addr[ETH_ALEN]; /* 0xd7 */
+ u8 res8[2];
+ u8 vendor_name[7];
+ u8 res9[2];
+ u8 device_name[0x0b]; /* 0xe8 */
+ u8 res10[2];
+ u8 serial[0x0b]; /* 0xf5 */
+ u8 res11[0x30];
+ u8 unknown[0x0d]; /* 0x130 */
+ u8 res12[0xc3];
+} __packed;
+
struct rtl8xxxu_reg8val {
u16 reg;
u8 val;
@@ -1114,6 +1168,26 @@ struct h2c_cmd {
u8 cmd;
u8 data;
} __packed bt_grant;
+ struct {
+ u8 cmd;
+ u8 macid;
+ u8 unknown0;
+ u8 rssi;
+ /*
+ * [0] - is_rx
+ * [1] - stbc_en
+ * [2] - noisy_decision
+ * [6] - bf_en
+ */
+ u8 data;
+ /*
+ * [0:6] - ra_th_offset
+ * [7] - ra_offset_direction
+ */
+ u8 ra_th_offset;
+ u8 unknown1;
+ u8 unknown2;
+ } __packed rssi_report;
};
};
@@ -1323,6 +1397,39 @@ struct rtl8xxxu_ra_report {
u8 desc_rate;
};
+struct rtl8xxxu_ra_info {
+ u8 rate_id;
+ u32 rate_mask;
+ u32 ra_use_rate;
+ u8 rate_sgi;
+ u8 rssi_sta_ra; /* Percentage */
+ u8 pre_rssi_sta_ra;
+ u8 sgi_enable;
+ u8 decision_rate;
+ u8 pre_rate;
+ u8 highest_rate;
+ u8 lowest_rate;
+ u32 nsc_up;
+ u32 nsc_down;
+ u32 total;
+ u16 retry[5];
+ u16 drop;
+ u16 rpt_time;
+ u16 pre_min_rpt_time;
+ u8 dynamic_tx_rpt_timing_counter;
+ u8 ra_waiting_counter;
+ u8 ra_pending_counter;
+ u8 ra_drop_after_down;
+ u8 pt_try_state; /* 0 trying state, 1 for decision state */
+ u8 pt_stage; /* 0~6 */
+ u8 pt_stop_count; /* Stop PT counter */
+ u8 pt_pre_rate; /* if rate change do PT */
+ u8 pt_pre_rssi; /* if RSSI change 5% do PT */
+ u8 pt_mode_ss; /* decide which rate should do PT */
+ u8 ra_stage; /* StageRA, decide how many times RA will be done between PT */
+ u8 pt_smooth_factor;
+};
+
#define CFO_TH_XTAL_HIGH 20 /* kHz */
#define CFO_TH_XTAL_LOW 10 /* kHz */
#define CFO_TH_ATC 80 /* kHz */
@@ -1336,6 +1443,8 @@ struct rtl8xxxu_cfo_tracking {
u32 packet_count_pre;
};
+#define RTL8XXXU_HW_LED_CONTROL 2
+
struct rtl8xxxu_priv {
struct ieee80211_hw *hw;
struct usb_device *udev;
@@ -1432,6 +1541,7 @@ struct rtl8xxxu_priv {
struct rtl8192cu_efuse efuse8192;
struct rtl8192eu_efuse efuse8192eu;
struct rtl8188fu_efuse efuse8188fu;
+ struct rtl8188eu_efuse efuse8188eu;
} efuse_wifi;
u32 adda_backup[RTL8XXXU_ADDA_REGS];
u32 mac_backup[RTL8XXXU_MAC_REGS];
@@ -1455,6 +1565,11 @@ struct rtl8xxxu_priv {
struct rtl8xxxu_btcoex bt_coex;
struct rtl8xxxu_ra_report ra_report;
struct rtl8xxxu_cfo_tracking cfo_tracking;
+ struct rtl8xxxu_ra_info ra_info;
+
+ bool led_registered;
+ char led_name[32];
+ struct led_classdev led_cdev;
};
struct rtl8xxxu_rx_urb {
@@ -1496,6 +1611,7 @@ struct rtl8xxxu_fileops {
u32 ramask, u8 rateid, int sgi, int txbw_40mhz);
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
+ void (*report_rssi) (struct rtl8xxxu_priv *priv, u8 macid, u8 rssi);
void (*fill_txdesc) (struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *tx_info,
struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
@@ -1503,6 +1619,8 @@ struct rtl8xxxu_fileops {
u32 rts_rate);
void (*set_crystal_cap) (struct rtl8xxxu_priv *priv, u8 crystal_cap);
s8 (*cck_rssi) (struct rtl8xxxu_priv *priv, u8 cck_agc_rpt);
+ int (*led_classdev_brightness_set) (struct led_classdev *led_cdev,
+ enum led_brightness brightness);
int writeN_block_size;
int rx_agg_buf_size;
char tx_desc_size;
@@ -1523,6 +1641,7 @@ struct rtl8xxxu_fileops {
u8 page_num_hi;
u8 page_num_lo;
u8 page_num_norm;
+ u8 last_llt_entry;
};
extern int rtl8xxxu_debug;
@@ -1560,7 +1679,7 @@ int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
enum rtl8xxxu_rfpath path);
int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
const struct rtl8xxxu_reg32val *array);
-int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name);
+int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, const char *fw_name);
void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv);
void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv);
void rtl8xxxu_identify_vendor_1bit(struct rtl8xxxu_priv *priv, u32 vendor);
@@ -1582,6 +1701,8 @@ void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv,
int channel, bool ht40);
+void rtl8188f_set_tx_power(struct rtl8xxxu_priv *priv,
+ int channel, bool ht40);
void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw);
void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw);
void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv);
@@ -1594,6 +1715,8 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
+void rtl8xxxu_gen1_report_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi);
+void rtl8xxxu_gen2_report_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi);
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv);
void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv);
@@ -1602,6 +1725,8 @@ void rtl8xxxu_init_burst(struct rtl8xxxu_priv *priv);
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
int rtl8xxxu_gen2_channel_to_group(int channel);
+bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
+ int result[][8], int c1, int c2);
bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
int result[][8], int c1, int c2);
void rtl8xxxu_fill_txdesc_v1(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
@@ -1614,13 +1739,24 @@ void rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
bool short_preamble, bool ampdu_enable,
u32 rts_rate);
+void rtl8xxxu_fill_txdesc_v3(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *tx_info,
+ struct rtl8xxxu_txdesc32 *tx_desc32, bool sgi,
+ bool short_preamble, bool ampdu_enable,
+ u32 rts_rate);
void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5);
void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv);
void rtl8723a_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap);
+void rtl8188f_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap);
s8 rtl8723a_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt);
+void rtl8xxxu_update_ra_report(struct rtl8xxxu_ra_report *rarpt,
+ u8 rate, u8 sgi, u8 bw);
+void rtl8188e_ra_info_init_all(struct rtl8xxxu_ra_info *ra);
+void rtl8188e_handle_ra_tx_report2(struct rtl8xxxu_priv *priv, struct sk_buff *skb);
extern struct rtl8xxxu_fileops rtl8188fu_fops;
+extern struct rtl8xxxu_fileops rtl8188eu_fops;
extern struct rtl8xxxu_fileops rtl8192cu_fops;
extern struct rtl8xxxu_fileops rtl8192eu_fops;
extern struct rtl8xxxu_fileops rtl8723au_fops;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
new file mode 100644
index 000000000000..a99ddb41cd24
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
@@ -0,0 +1,1899 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * RTL8XXXU mac80211 USB driver - 8188e specific subdriver
+ *
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@gmail.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+static const struct rtl8xxxu_reg8val rtl8188e_mac_init_table[] = {
+ {0x026, 0x41}, {0x027, 0x35}, {0x040, 0x00}, {0x421, 0x0f},
+ {0x428, 0x0a}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x01},
+ {0x432, 0x02}, {0x433, 0x04}, {0x434, 0x05}, {0x435, 0x06},
+ {0x436, 0x07}, {0x437, 0x08}, {0x438, 0x00}, {0x439, 0x00},
+ {0x43a, 0x01}, {0x43b, 0x02}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
+ {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
+ {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x480, 0x08},
+ {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, {0x4cd, 0xff},
+ {0x4ce, 0x01}, {0x4d3, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+ {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+ {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+ {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+ {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+ {0x516, 0x0a}, {0x525, 0x4f}, {0x550, 0x10}, {0x551, 0x10},
+ {0x559, 0x02}, {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e},
+ {0x609, 0x2a}, {0x620, 0xff}, {0x621, 0xff}, {0x622, 0xff},
+ {0x623, 0xff}, {0x624, 0xff}, {0x625, 0xff}, {0x626, 0xff},
+ {0x627, 0xff}, {0x63c, 0x08}, {0x63d, 0x08}, {0x63e, 0x0c},
+ {0x63f, 0x0c}, {0x640, 0x40}, {0x652, 0x20}, {0x66e, 0x05},
+ {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65}, {0x703, 0x87},
+ {0x708, 0x21}, {0x709, 0x43}, {0x70a, 0x65}, {0x70b, 0x87},
+ {0xffff, 0xff},
+};
+
+static const struct rtl8xxxu_reg32val rtl8188eu_phy_init_table[] = {
+ {0x800, 0x80040000}, {0x804, 0x00000003},
+ {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+ {0x810, 0x10001331}, {0x814, 0x020c3d10},
+ {0x818, 0x02200385}, {0x81c, 0x00000000},
+ {0x820, 0x01000100}, {0x824, 0x00390204},
+ {0x828, 0x00000000}, {0x82c, 0x00000000},
+ {0x830, 0x00000000}, {0x834, 0x00000000},
+ {0x838, 0x00000000}, {0x83c, 0x00000000},
+ {0x840, 0x00010000}, {0x844, 0x00000000},
+ {0x848, 0x00000000}, {0x84c, 0x00000000},
+ {0x850, 0x00000000}, {0x854, 0x00000000},
+ {0x858, 0x569a11a9}, {0x85c, 0x01000014},
+ {0x860, 0x66f60110}, {0x864, 0x061f0649},
+ {0x868, 0x00000000}, {0x86c, 0x27272700},
+ {0x870, 0x07000760}, {0x874, 0x25004000},
+ {0x878, 0x00000808}, {0x87c, 0x00000000},
+ {0x880, 0xb0000c1c}, {0x884, 0x00000001},
+ {0x888, 0x00000000}, {0x88c, 0xccc000c0},
+ {0x890, 0x00000800}, {0x894, 0xfffffffe},
+ {0x898, 0x40302010}, {0x89c, 0x00706050},
+ {0x900, 0x00000000}, {0x904, 0x00000023},
+ {0x908, 0x00000000}, {0x90c, 0x81121111},
+ {0x910, 0x00000002}, {0x914, 0x00000201},
+ {0xa00, 0x00d047c8}, {0xa04, 0x80ff800c},
+ {0xa08, 0x8c838300}, {0xa0c, 0x2e7f120f},
+ {0xa10, 0x9500bb7e}, {0xa14, 0x1114d028},
+ {0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+ {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+ {0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+ {0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+ {0xa78, 0x00000900}, {0xa7c, 0x225b0606},
+ {0xa80, 0x218075b1}, {0xb2c, 0x80000000},
+ {0xc00, 0x48071d40}, {0xc04, 0x03a05611},
+ {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+ {0xc10, 0x08800000}, {0xc14, 0x40000100},
+ {0xc18, 0x08800000}, {0xc1c, 0x40000100},
+ {0xc20, 0x00000000}, {0xc24, 0x00000000},
+ {0xc28, 0x00000000}, {0xc2c, 0x00000000},
+ {0xc30, 0x69e9ac47}, {0xc34, 0x469652af},
+ {0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+ {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+ {0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+ {0xc50, 0x69553420}, {0xc54, 0x43bc0094},
+ {0xc58, 0x00013169}, {0xc5c, 0x00250492},
+ {0xc60, 0x00000000}, {0xc64, 0x7112848b},
+ {0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+ {0xc70, 0x2c7f000d}, {0xc74, 0x020610db},
+ {0xc78, 0x0000001f}, {0xc7c, 0x00b91612},
+ {0xc80, 0x390000e4}, {0xc84, 0x21f60000},
+ {0xc88, 0x40000100}, {0xc8c, 0x20200000},
+ {0xc90, 0x00091521}, {0xc94, 0x00000000},
+ {0xc98, 0x00121820}, {0xc9c, 0x00007f7f},
+ {0xca0, 0x00000000}, {0xca4, 0x000300a0},
+ {0xca8, 0x00000000}, {0xcac, 0x00000000},
+ {0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+ {0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+ {0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+ {0xcc8, 0x00000000}, {0xccc, 0x00000000},
+ {0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+ {0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+ {0xce0, 0x00222222}, {0xce4, 0x00000000},
+ {0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+ {0xd00, 0x00000740}, {0xd04, 0x00020401},
+ {0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+ {0xd10, 0xa0633333}, {0xd14, 0x3333bc43},
+ {0xd18, 0x7a8f5b6f}, {0xd2c, 0xcc979975},
+ {0xd30, 0x00000000}, {0xd34, 0x80608000},
+ {0xd38, 0x00000000}, {0xd3c, 0x00127353},
+ {0xd40, 0x00000000}, {0xd44, 0x00000000},
+ {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+ {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+ {0xd58, 0x00000282}, {0xd5c, 0x30032064},
+ {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+ {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+ {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+ {0xd78, 0x000e3c24}, {0xe00, 0x2d2d2d2d},
+ {0xe04, 0x2d2d2d2d}, {0xe08, 0x0390272d},
+ {0xe10, 0x2d2d2d2d}, {0xe14, 0x2d2d2d2d},
+ {0xe18, 0x2d2d2d2d}, {0xe1c, 0x2d2d2d2d},
+ {0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+ {0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+ {0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+ {0xe44, 0x01004800}, {0xe48, 0xfb000000},
+ {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+ {0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+ {0xe5c, 0x28160d05}, {0xe60, 0x00000048},
+ {0xe68, 0x001b25a4}, {0xe6c, 0x00c00014},
+ {0xe70, 0x00c00014}, {0xe74, 0x01000014},
+ {0xe78, 0x01000014}, {0xe7c, 0x01000014},
+ {0xe80, 0x01000014}, {0xe84, 0x00c00014},
+ {0xe88, 0x01000014}, {0xe8c, 0x00c00014},
+ {0xed0, 0x00c00014}, {0xed4, 0x00c00014},
+ {0xed8, 0x00c00014}, {0xedc, 0x00000014},
+ {0xee0, 0x00000014}, {0xee8, 0x21555448},
+ {0xeec, 0x01c00014}, {0xf14, 0x00000003},
+ {0xf4c, 0x00000000}, {0xf00, 0x00000300},
+ {0xffff, 0xffffffff},
+};
+
+static const struct rtl8xxxu_reg32val rtl8188e_agc_table[] = {
+ {0xc78, 0xfb000001}, {0xc78, 0xfb010001},
+ {0xc78, 0xfb020001}, {0xc78, 0xfb030001},
+ {0xc78, 0xfb040001}, {0xc78, 0xfb050001},
+ {0xc78, 0xfa060001}, {0xc78, 0xf9070001},
+ {0xc78, 0xf8080001}, {0xc78, 0xf7090001},
+ {0xc78, 0xf60a0001}, {0xc78, 0xf50b0001},
+ {0xc78, 0xf40c0001}, {0xc78, 0xf30d0001},
+ {0xc78, 0xf20e0001}, {0xc78, 0xf10f0001},
+ {0xc78, 0xf0100001}, {0xc78, 0xef110001},
+ {0xc78, 0xee120001}, {0xc78, 0xed130001},
+ {0xc78, 0xec140001}, {0xc78, 0xeb150001},
+ {0xc78, 0xea160001}, {0xc78, 0xe9170001},
+ {0xc78, 0xe8180001}, {0xc78, 0xe7190001},
+ {0xc78, 0xe61a0001}, {0xc78, 0xe51b0001},
+ {0xc78, 0xe41c0001}, {0xc78, 0xe31d0001},
+ {0xc78, 0xe21e0001}, {0xc78, 0xe11f0001},
+ {0xc78, 0x8a200001}, {0xc78, 0x89210001},
+ {0xc78, 0x88220001}, {0xc78, 0x87230001},
+ {0xc78, 0x86240001}, {0xc78, 0x85250001},
+ {0xc78, 0x84260001}, {0xc78, 0x83270001},
+ {0xc78, 0x82280001}, {0xc78, 0x6b290001},
+ {0xc78, 0x6a2a0001}, {0xc78, 0x692b0001},
+ {0xc78, 0x682c0001}, {0xc78, 0x672d0001},
+ {0xc78, 0x662e0001}, {0xc78, 0x652f0001},
+ {0xc78, 0x64300001}, {0xc78, 0x63310001},
+ {0xc78, 0x62320001}, {0xc78, 0x61330001},
+ {0xc78, 0x46340001}, {0xc78, 0x45350001},
+ {0xc78, 0x44360001}, {0xc78, 0x43370001},
+ {0xc78, 0x42380001}, {0xc78, 0x41390001},
+ {0xc78, 0x403a0001}, {0xc78, 0x403b0001},
+ {0xc78, 0x403c0001}, {0xc78, 0x403d0001},
+ {0xc78, 0x403e0001}, {0xc78, 0x403f0001},
+ {0xc78, 0xfb400001}, {0xc78, 0xfb410001},
+ {0xc78, 0xfb420001}, {0xc78, 0xfb430001},
+ {0xc78, 0xfb440001}, {0xc78, 0xfb450001},
+ {0xc78, 0xfb460001}, {0xc78, 0xfb470001},
+ {0xc78, 0xfb480001}, {0xc78, 0xfa490001},
+ {0xc78, 0xf94a0001}, {0xc78, 0xf84b0001},
+ {0xc78, 0xf74c0001}, {0xc78, 0xf64d0001},
+ {0xc78, 0xf54e0001}, {0xc78, 0xf44f0001},
+ {0xc78, 0xf3500001}, {0xc78, 0xf2510001},
+ {0xc78, 0xf1520001}, {0xc78, 0xf0530001},
+ {0xc78, 0xef540001}, {0xc78, 0xee550001},
+ {0xc78, 0xed560001}, {0xc78, 0xec570001},
+ {0xc78, 0xeb580001}, {0xc78, 0xea590001},
+ {0xc78, 0xe95a0001}, {0xc78, 0xe85b0001},
+ {0xc78, 0xe75c0001}, {0xc78, 0xe65d0001},
+ {0xc78, 0xe55e0001}, {0xc78, 0xe45f0001},
+ {0xc78, 0xe3600001}, {0xc78, 0xe2610001},
+ {0xc78, 0xc3620001}, {0xc78, 0xc2630001},
+ {0xc78, 0xc1640001}, {0xc78, 0x8b650001},
+ {0xc78, 0x8a660001}, {0xc78, 0x89670001},
+ {0xc78, 0x88680001}, {0xc78, 0x87690001},
+ {0xc78, 0x866a0001}, {0xc78, 0x856b0001},
+ {0xc78, 0x846c0001}, {0xc78, 0x676d0001},
+ {0xc78, 0x666e0001}, {0xc78, 0x656f0001},
+ {0xc78, 0x64700001}, {0xc78, 0x63710001},
+ {0xc78, 0x62720001}, {0xc78, 0x61730001},
+ {0xc78, 0x60740001}, {0xc78, 0x46750001},
+ {0xc78, 0x45760001}, {0xc78, 0x44770001},
+ {0xc78, 0x43780001}, {0xc78, 0x42790001},
+ {0xc78, 0x417a0001}, {0xc78, 0x407b0001},
+ {0xc78, 0x407c0001}, {0xc78, 0x407d0001},
+ {0xc78, 0x407e0001}, {0xc78, 0x407f0001},
+ {0xc50, 0x69553422}, {0xc50, 0x69553420},
+ {0xffff, 0xffffffff}
+};
+
+static const struct rtl8xxxu_rfregval rtl8188eu_radioa_init_table[] = {
+ {0x00, 0x00030000}, {0x08, 0x00084000},
+ {0x18, 0x00000407}, {0x19, 0x00000012},
+ {0x1e, 0x00080009}, {0x1f, 0x00000880},
+ {0x2f, 0x0001a060}, {0x3f, 0x00000000},
+ {0x42, 0x000060c0}, {0x57, 0x000d0000},
+ {0x58, 0x000be180}, {0x67, 0x00001552},
+ {0x83, 0x00000000}, {0xb0, 0x000ff8fc},
+ {0xb1, 0x00054400}, {0xb2, 0x000ccc19},
+ {0xb4, 0x00043003}, {0xb6, 0x0004953e},
+ {0xb7, 0x0001c718}, {0xb8, 0x000060ff},
+ {0xb9, 0x00080001}, {0xba, 0x00040000},
+ {0xbb, 0x00000400}, {0xbf, 0x000c0000},
+ {0xc2, 0x00002400}, {0xc3, 0x00000009},
+ {0xc4, 0x00040c91}, {0xc5, 0x00099999},
+ {0xc6, 0x000000a3}, {0xc7, 0x00088820},
+ {0xc8, 0x00076c06}, {0xc9, 0x00000000},
+ {0xca, 0x00080000}, {0xdf, 0x00000180},
+ {0xef, 0x000001a0}, {0x51, 0x0006b27d},
+ {0x52, 0x0007e49d}, /* Set to 0x0007e4dd for SDIO */
+ {0x53, 0x00000073}, {0x56, 0x00051ff3},
+ {0x35, 0x00000086}, {0x35, 0x00000186},
+ {0x35, 0x00000286}, {0x36, 0x00001c25},
+ {0x36, 0x00009c25}, {0x36, 0x00011c25},
+ {0x36, 0x00019c25}, {0xb6, 0x00048538},
+ {0x18, 0x00000c07}, {0x5a, 0x0004bd00},
+ {0x19, 0x000739d0}, {0x34, 0x0000adf3},
+ {0x34, 0x00009df0}, {0x34, 0x00008ded},
+ {0x34, 0x00007dea}, {0x34, 0x00006de7},
+ {0x34, 0x000054ee}, {0x34, 0x000044eb},
+ {0x34, 0x000034e8}, {0x34, 0x0000246b},
+ {0x34, 0x00001468}, {0x34, 0x0000006d},
+ {0x00, 0x00030159}, {0x84, 0x00068200},
+ {0x86, 0x000000ce}, {0x87, 0x00048a00},
+ {0x8e, 0x00065540}, {0x8f, 0x00088000},
+ {0xef, 0x000020a0}, {0x3b, 0x000f02b0},
+ {0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0},
+ {0x3b, 0x000cf060}, {0x3b, 0x000b0090},
+ {0x3b, 0x000a0080}, {0x3b, 0x00090080},
+ {0x3b, 0x0008f780}, {0x3b, 0x000722b0},
+ {0x3b, 0x0006f7b0}, {0x3b, 0x00054fb0},
+ {0x3b, 0x0004f060}, {0x3b, 0x00030090},
+ {0x3b, 0x00020080}, {0x3b, 0x00010080},
+ {0x3b, 0x0000f780}, {0xef, 0x000000a0},
+ {0x00, 0x00010159}, {0x18, 0x0000f407},
+ {0xFE, 0x00000000}, {0xFE, 0x00000000},
+ {0x1F, 0x00080003}, {0xFE, 0x00000000},
+ {0xFE, 0x00000000}, {0x1E, 0x00000001},
+ {0x1F, 0x00080000}, {0x00, 0x00033e60},
+ {0xff, 0xffffffff}
+};
+
+#define PERENTRY 23
+#define RETRYSIZE 5
+#define RATESIZE 28
+#define TX_RPT2_ITEM_SIZE 8
+
+static const u8 retry_penalty[PERENTRY][RETRYSIZE + 1] = {
+ {5, 4, 3, 2, 0, 3}, /* 92 , idx=0 */
+ {6, 5, 4, 3, 0, 4}, /* 86 , idx=1 */
+ {6, 5, 4, 2, 0, 4}, /* 81 , idx=2 */
+ {8, 7, 6, 4, 0, 6}, /* 75 , idx=3 */
+ {10, 9, 8, 6, 0, 8}, /* 71 , idx=4 */
+ {10, 9, 8, 4, 0, 8}, /* 66 , idx=5 */
+ {10, 9, 8, 2, 0, 8}, /* 62 , idx=6 */
+ {10, 9, 8, 0, 0, 8}, /* 59 , idx=7 */
+ {18, 17, 16, 8, 0, 16}, /* 53 , idx=8 */
+ {26, 25, 24, 16, 0, 24}, /* 50 , idx=9 */
+ {34, 33, 32, 24, 0, 32}, /* 47 , idx=0x0a */
+ {34, 31, 28, 20, 0, 32}, /* 43 , idx=0x0b */
+ {34, 31, 27, 18, 0, 32}, /* 40 , idx=0x0c */
+ {34, 31, 26, 16, 0, 32}, /* 37 , idx=0x0d */
+ {34, 30, 22, 16, 0, 32}, /* 32 , idx=0x0e */
+ {34, 30, 24, 16, 0, 32}, /* 26 , idx=0x0f */
+ {49, 46, 40, 16, 0, 48}, /* 20 , idx=0x10 */
+ {49, 45, 32, 0, 0, 48}, /* 17 , idx=0x11 */
+ {49, 45, 22, 18, 0, 48}, /* 15 , idx=0x12 */
+ {49, 40, 24, 16, 0, 48}, /* 12 , idx=0x13 */
+ {49, 32, 18, 12, 0, 48}, /* 9 , idx=0x14 */
+ {49, 22, 18, 14, 0, 48}, /* 6 , idx=0x15 */
+ {49, 16, 16, 0, 0, 48} /* 3, idx=0x16 */
+};
+
+static const u8 pt_penalty[RETRYSIZE + 1] = {34, 31, 30, 24, 0, 32};
+
+static const u8 retry_penalty_idx_normal[2][RATESIZE] = {
+ { /* RSSI>TH */
+ 4, 4, 4, 5,
+ 4, 4, 5, 7, 7, 7, 8, 0x0a,
+ 4, 4, 4, 4, 6, 0x0a, 0x0b, 0x0d,
+ 5, 5, 7, 7, 8, 0x0b, 0x0d, 0x0f
+ },
+ { /* RSSI<TH */
+ 0x0a, 0x0a, 0x0b, 0x0c,
+ 0x0a, 0x0a, 0x0b, 0x0c, 0x0d, 0x10, 0x13, 0x13,
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x11, 0x13, 0x13,
+ 9, 9, 9, 9, 0x0c, 0x0e, 0x11, 0x13
+ }
+};
+
+static const u8 retry_penalty_idx_cut_i[2][RATESIZE] = {
+ { /* RSSI>TH */
+ 4, 4, 4, 5,
+ 4, 4, 5, 7, 7, 7, 8, 0x0a,
+ 4, 4, 4, 4, 6, 0x0a, 0x0b, 0x0d,
+ 5, 5, 7, 7, 8, 0x0b, 0x0d, 0x0f
+ },
+ { /* RSSI<TH */
+ 0x0a, 0x0a, 0x0b, 0x0c,
+ 0x0a, 0x0a, 0x0b, 0x0c, 0x0d, 0x10, 0x13, 0x13,
+ 0x06, 0x07, 0x08, 0x0d, 0x0e, 0x11, 0x11, 0x11,
+ 9, 9, 9, 9, 0x0c, 0x0e, 0x11, 0x13
+ }
+};
+
+static const u8 retry_penalty_up_idx_normal[RATESIZE] = {
+ 0x0c, 0x0d, 0x0d, 0x0f,
+ 0x0d, 0x0e, 0x0f, 0x0f, 0x10, 0x12, 0x13, 0x14,
+ 0x0f, 0x10, 0x10, 0x12, 0x12, 0x13, 0x14, 0x15,
+ 0x11, 0x11, 0x12, 0x13, 0x13, 0x13, 0x14, 0x15
+};
+
+static const u8 retry_penalty_up_idx_cut_i[RATESIZE] = {
+ 0x0c, 0x0d, 0x0d, 0x0f,
+ 0x0d, 0x0e, 0x0f, 0x0f, 0x10, 0x12, 0x13, 0x14,
+ 0x0b, 0x0b, 0x11, 0x11, 0x12, 0x12, 0x12, 0x12,
+ 0x11, 0x11, 0x12, 0x13, 0x13, 0x13, 0x14, 0x15
+};
+
+static const u8 rssi_threshold[RATESIZE] = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0x24, 0x26, 0x2a,
+ 0x18, 0x1a, 0x1d, 0x1f, 0x21, 0x27, 0x29, 0x2a,
+ 0, 0, 0, 0x1f, 0x23, 0x28, 0x2a, 0x2c
+};
+
+static const u16 n_threshold_high[RATESIZE] = {
+ 4, 4, 8, 16,
+ 24, 36, 48, 72, 96, 144, 192, 216,
+ 60, 80, 100, 160, 240, 400, 600, 800,
+ 300, 320, 480, 720, 1000, 1200, 1600, 2000
+};
+
+static const u16 n_threshold_low[RATESIZE] = {
+ 2, 2, 4, 8,
+ 12, 18, 24, 36, 48, 72, 96, 108,
+ 30, 40, 50, 80, 120, 200, 300, 400,
+ 150, 160, 240, 360, 500, 600, 800, 1000
+};
+
+static const u8 dropping_necessary[RATESIZE] = {
+ 1, 1, 1, 1,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 5, 6, 7, 8, 9, 10, 11, 12
+};
+
+static const u8 pending_for_rate_up_fail[5] = {2, 10, 24, 40, 60};
+
+static const u16 dynamic_tx_rpt_timing[6] = {
+ 0x186a, 0x30d4, 0x493e, 0x61a8, 0x7a12, 0x927c /* 200ms-1200ms */
+};
+
+enum rtl8188e_tx_rpt_timing {
+ DEFAULT_TIMING = 0,
+ INCREASE_TIMING,
+ DECREASE_TIMING
+};
+
+static int rtl8188eu_identify_chip(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 sys_cfg, vendor;
+ int ret = 0;
+
+ strscpy(priv->chip_name, "8188EU", sizeof(priv->chip_name));
+ priv->rtl_chip = RTL8188E;
+ priv->rf_paths = 1;
+ priv->rx_paths = 1;
+ priv->tx_paths = 1;
+ priv->has_wifi = 1;
+
+ sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ priv->chip_cut = u32_get_bits(sys_cfg, SYS_CFG_CHIP_VERSION_MASK);
+ if (sys_cfg & SYS_CFG_TRP_VAUX_EN) {
+ dev_info(dev, "Unsupported test chip\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * TODO: At a glance, I cut requires a different firmware,
+ * different initialisation tables, and no software rate
+ * control. The vendor driver is not configured to handle
+ * I cut chips by default. Are there any in the wild?
+ */
+ if (priv->chip_cut == 8) {
+ dev_info(dev, "RTL8188EU cut I is not supported. Please complain about it at linux-wireless@vger.kernel.org.\n");
+ return -EOPNOTSUPP;
+ }
+
+ vendor = sys_cfg & SYS_CFG_VENDOR_ID;
+ rtl8xxxu_identify_vendor_1bit(priv, vendor);
+
+ ret = rtl8xxxu_config_endpoints_no_sie(priv);
+
+ return ret;
+}
+
+static void rtl8188eu_config_channel(struct ieee80211_hw *hw)
+{
+ struct rtl8xxxu_priv *priv = hw->priv;
+ u32 val32, rsr;
+ u8 opmode;
+ int sec_ch_above, channel;
+ int i;
+
+ opmode = rtl8xxxu_read8(priv, REG_BW_OPMODE);
+ rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
+ channel = hw->conf.chandef.chan->hw_value;
+
+ switch (hw->conf.chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ opmode |= BW_OPMODE_20MHZ;
+ rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 &= ~FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+ val32 &= ~FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ if (hw->conf.chandef.center_freq1 >
+ hw->conf.chandef.chan->center_freq) {
+ sec_ch_above = 1;
+ channel += 2;
+ } else {
+ sec_ch_above = 0;
+ channel -= 2;
+ }
+
+ opmode &= ~BW_OPMODE_20MHZ;
+ rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode);
+ rsr &= ~RSR_RSC_BANDWIDTH_40M;
+ if (sec_ch_above)
+ rsr |= RSR_RSC_LOWER_SUB_CHANNEL;
+ else
+ rsr |= RSR_RSC_UPPER_SUB_CHANNEL;
+ rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, rsr);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE);
+ val32 |= FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE);
+ val32 |= FPGA_RF_MODE;
+ rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32);
+
+ /*
+ * Set Control channel to upper or lower. These settings
+ * are required only for 40MHz
+ */
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM);
+ val32 &= ~CCK0_SIDEBAND;
+ if (!sec_ch_above)
+ val32 |= CCK0_SIDEBAND;
+ rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM1_LSTF);
+ val32 &= ~OFDM_LSTF_PRIME_CH_MASK; /* 0xc00 */
+ if (sec_ch_above)
+ val32 |= OFDM_LSTF_PRIME_CH_LOW;
+ else
+ val32 |= OFDM_LSTF_PRIME_CH_HIGH;
+ rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+ val32 &= ~(FPGA0_PS_LOWER_CHANNEL | FPGA0_PS_UPPER_CHANNEL);
+ if (sec_ch_above)
+ val32 |= FPGA0_PS_UPPER_CHANNEL;
+ else
+ val32 |= FPGA0_PS_LOWER_CHANNEL;
+ rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+ break;
+
+ default:
+ break;
+ }
+
+ for (i = RF_A; i < priv->rf_paths; i++) {
+ val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+ u32p_replace_bits(&val32, channel, MODE_AG_CHANNEL_MASK);
+ rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+ }
+
+ for (i = RF_A; i < priv->rf_paths; i++) {
+ val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG);
+ val32 &= ~MODE_AG_BW_MASK;
+ if (hw->conf.chandef.width == NL80211_CHAN_WIDTH_40)
+ val32 |= MODE_AG_BW_40MHZ_8723B;
+ else
+ val32 |= MODE_AG_BW_20MHZ_8723B;
+ rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32);
+ }
+}
+
+static void rtl8188eu_init_aggregation(struct rtl8xxxu_priv *priv)
+{
+ u8 agg_ctrl, usb_spec;
+
+ usb_spec = rtl8xxxu_read8(priv, REG_USB_SPECIAL_OPTION);
+ usb_spec &= ~USB_SPEC_USB_AGG_ENABLE;
+ rtl8xxxu_write8(priv, REG_USB_SPECIAL_OPTION, usb_spec);
+
+ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL);
+ agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN;
+ rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl);
+}
+
+static int rtl8188eu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8188eu_efuse *efuse = &priv->efuse_wifi.efuse8188eu;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
+ sizeof(efuse->tx_power_index_A.cck_base));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->tx_power_index_A.ht40_base,
+ sizeof(efuse->tx_power_index_A.ht40_base));
+
+ priv->default_crystal_cap = efuse->xtal_k & 0x3f;
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
+ dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
+
+ return 0;
+}
+
+static void rtl8188eu_reset_8051(struct rtl8xxxu_priv *priv)
+{
+ u16 sys_func;
+
+ sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ sys_func &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+
+ sys_func |= SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+}
+
+static int rtl8188eu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ const char *fw_name;
+ int ret;
+
+ fw_name = "rtlwifi/rtl8188eufw.bin";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+ return ret;
+}
+
+static void rtl8188eu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /*
+ * Per vendor driver, run power sequence before init of RF
+ */
+ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+ val8 = SYS_FUNC_USBA | SYS_FUNC_USBD |
+ SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ rtl8xxxu_init_phy_regs(priv, rtl8188eu_phy_init_table);
+ rtl8xxxu_init_phy_regs(priv, rtl8188e_agc_table);
+}
+
+static int rtl8188eu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+ return rtl8xxxu_init_phy_rf(priv, rtl8188eu_radioa_init_table, RF_A);
+}
+
+static int rtl8188eu_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_e94, reg_e9c;
+ int result = 0;
+
+ /* Path A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x10008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x30008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x8214032a);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+
+ return result;
+}
+
+static int rtl8188eu_rx_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_ea4, reg_eac, reg_e94, reg_e9c, val32;
+ int result = 0;
+
+ /* Leave IQK mode */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Enable path A PA in TX IQK mode */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf117b);
+
+ /* Enter IQK mode */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0x808000, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* TX IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x81004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x10008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x30008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160804);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+ else
+ goto out;
+
+ val32 = 0x80007c00 |
+ (reg_e94 & 0x03ff0000) | ((reg_e9c >> 16) & 0x03ff);
+ rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+ /* Modify RX IQK mode table */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7ffa);
+
+ /* Enter IQK mode */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0x808000, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* IQK setting */
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* Path A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x30008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x10008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c05);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c05);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+ if (!(reg_eac & BIT(27)) &&
+ ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+ ((reg_eac & 0x03ff0000) != 0x00360000))
+ result |= 0x02;
+ else
+ dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n",
+ __func__);
+
+out:
+ return result;
+}
+
+static void rtl8188eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+ int result[][8], int t)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 i, val32;
+ int path_a_ok;
+ int retry = 2;
+ static const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+ REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+ REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+ REG_TX_OFDM_BBON, REG_TX_TO_RX,
+ REG_TX_TO_TX, REG_RX_CCK,
+ REG_RX_OFDM, REG_RX_WAIT_RIFS,
+ REG_RX_TO_RX, REG_STANDBY,
+ REG_SLEEP, REG_PMPD_ANAEN
+ };
+ static const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ REG_TXPAUSE, REG_BEACON_CTRL,
+ REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+ };
+ static const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+ REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+ REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+ REG_FPGA0_XB_RF_INT_OE, REG_CCK0_AFE_SETTING
+ };
+
+ /*
+ * Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt
+ */
+
+ if (t == 0) {
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+ rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+ rtl8xxxu_save_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+ }
+
+ rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+ if (t == 0) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
+ priv->pi_enabled = u32_get_bits(val32, FPGA0_HSSI_PARM1_PI);
+ }
+
+ if (!priv->pi_enabled) {
+ /* Switch BB to PI mode to do IQ Calibration. */
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000100);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000100);
+ }
+
+ /* MAC settings */
+ rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+ u32p_replace_bits(&val32, 0xf, 0x0f000000);
+ rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+ rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
+
+ if (!priv->no_pape) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+ val32 |= (FPGA0_RF_PAPE |
+ (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+ }
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
+ val32 &= ~BIT(10);
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+ val32 &= ~BIT(10);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
+
+ /* Page B init */
+ rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x0f600000);
+
+ /* IQ calibration setting */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0x808000, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x81004800);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8188eu_iqk_path_a(priv);
+ if (path_a_ok == 0x01) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_BEFORE_IQK_A);
+ result[t][0] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_AFTER_IQK_A);
+ result[t][1] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8188eu_rx_iqk_path_a(priv);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_A_2);
+ result[t][2] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_A_2);
+ result[t][3] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
+
+ /* Back to BB mode, load original value */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ u32p_replace_bits(&val32, 0, 0xffffff00);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ if (t == 0)
+ return;
+
+ if (!priv->pi_enabled) {
+ /* Switch back BB to SI mode after finishing IQ Calibration */
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_HSSI_PARM1, 0x01000000);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_HSSI_PARM1, 0x01000000);
+ }
+
+ /* Reload ADDA power saving parameters */
+ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+
+ /* Reload MAC parameters */
+ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+ /* Reload BB parameters */
+ rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+
+ /* Restore RX initial gain */
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_LSSI_PARM, 0x00032ed3);
+
+ /* Load 0xe30 IQC default value */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+}
+
+static void rtl8188eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ int result[4][8]; /* last is final result */
+ int i, candidate;
+ bool path_a_ok;
+ u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+ u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ bool simu;
+
+ memset(result, 0, sizeof(result));
+ result[3][0] = 0x100;
+ result[3][2] = 0x100;
+ result[3][4] = 0x100;
+ result[3][6] = 0x100;
+
+ candidate = -1;
+
+ path_a_ok = false;
+
+ for (i = 0; i < 3; i++) {
+ rtl8188eu_phy_iqcalibrate(priv, result, i);
+
+ if (i == 1) {
+ simu = rtl8xxxu_simularity_compare(priv,
+ result, 0, 1);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ simu = rtl8xxxu_simularity_compare(priv,
+ result, 0, 2);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+
+ simu = rtl8xxxu_simularity_compare(priv,
+ result, 1, 2);
+ if (simu)
+ candidate = 1;
+ else
+ candidate = 3;
+ }
+ }
+
+ if (candidate >= 0) {
+ reg_e94 = result[candidate][0];
+ priv->rege94 = reg_e94;
+ reg_e9c = result[candidate][1];
+ priv->rege9c = reg_e9c;
+ reg_ea4 = result[candidate][2];
+ reg_eac = result[candidate][3];
+ reg_eb4 = result[candidate][4];
+ priv->regeb4 = reg_eb4;
+ reg_ebc = result[candidate][5];
+ priv->regebc = reg_ebc;
+ reg_ec4 = result[candidate][6];
+ reg_ecc = result[candidate][7];
+ dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+ dev_dbg(dev,
+ "%s: e94=%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x ecc=%x\n",
+ __func__, reg_e94, reg_e9c, reg_ea4, reg_eac,
+ reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+ path_a_ok = true;
+ } else {
+ reg_e94 = 0x100;
+ reg_eb4 = 0x100;
+ priv->rege94 = 0x100;
+ priv->regeb4 = 0x100;
+ reg_e9c = 0x0;
+ reg_ebc = 0x0;
+ priv->rege9c = 0x0;
+ priv->regebc = 0x0;
+ }
+
+ if (reg_e94 && candidate >= 0)
+ rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+ candidate, (reg_ea4 == 0));
+
+ rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
+ priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+}
+
+static void rtl8188e_disabled_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ val16 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE);
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+}
+
+static int rtl8188e_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ u16 val16;
+ int count, ret = 0;
+
+ /* wait till 0x04[17] = 1 power ready*/
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* reset baseband */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~(SYS_FUNC_BBRSTB | SYS_FUNC_BB_GLB_RSTN);
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ /*0x24[23] = 2b'01 schmit trigger */
+ val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+ val32 |= BIT(23);
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
+
+ /* 0x04[15] = 0 disable HWPDN (control by DRV)*/
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ val16 &= ~APS_FSMCO_HW_POWERDOWN;
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ /*0x04[12:11] = 2b'00 disable WL suspend*/
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ val16 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE);
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ /* set, then poll until 0 */
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* LDO normal mode*/
+ val8 = rtl8xxxu_read8(priv, REG_LPLDO_CTRL);
+ val8 &= ~BIT(4);
+ rtl8xxxu_write8(priv, REG_LPLDO_CTRL, val8);
+
+exit:
+ return ret;
+}
+
+static int rtl8188eu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /* Turn off RF */
+ val8 = rtl8xxxu_read8(priv, REG_RF_CTRL);
+ val8 &= ~RF_ENABLE;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+ /* LDO Sleep mode */
+ val8 = rtl8xxxu_read8(priv, REG_LPLDO_CTRL);
+ val8 |= BIT(4);
+ rtl8xxxu_write8(priv, REG_LPLDO_CTRL, val8);
+
+ return 0;
+}
+
+static int rtl8188eu_emu_to_disabled(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+ u16 val16;
+ u8 val8;
+
+ val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+ val32 |= BIT(23);
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
+
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ val16 &= ~APS_FSMCO_PCIE;
+ val16 |= APS_FSMCO_HW_SUSPEND;
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 3, 0x00);
+
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG + 1);
+ val8 &= ~BIT(4);
+ rtl8xxxu_write8(priv, REG_GPIO_MUXCFG + 1, val8);
+
+ /* Set USB suspend enable local register 0xfe10[4]=1 */
+ val8 = rtl8xxxu_read8(priv, 0xfe10);
+ val8 |= BIT(4);
+ rtl8xxxu_write8(priv, 0xfe10, val8);
+
+ return 0;
+}
+
+static int rtl8188eu_active_to_lps(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int retry, retval;
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x7f);
+
+ retry = 100;
+ retval = -EBUSY;
+ /* Poll 32 bit wide REG_SCH_TX_CMD for 0 to ensure no TX is pending. */
+ do {
+ val32 = rtl8xxxu_read32(priv, REG_SCH_TX_CMD);
+ if (!val32) {
+ retval = 0;
+ break;
+ }
+ } while (retry--);
+
+ if (!retry) {
+ dev_warn(dev, "Failed to flush TX queue\n");
+ retval = -EBUSY;
+ goto out;
+ }
+
+ /* Disable CCK and OFDM, clock gated */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC);
+ val8 &= ~SYS_FUNC_BBRSTB;
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, val8);
+
+ udelay(2);
+
+ /* Reset MAC TRX */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= 0xff;
+ val16 &= ~(CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE | CR_SECURITY_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ val8 = rtl8xxxu_read8(priv, REG_DUAL_TSF_RST);
+ val8 |= DUAL_TSF_TX_OK;
+ rtl8xxxu_write8(priv, REG_DUAL_TSF_RST, val8);
+
+out:
+ return retval;
+}
+
+static int rtl8188eu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+ int ret;
+
+ rtl8188e_disabled_to_emu(priv);
+
+ ret = rtl8188e_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ * Set CR bit10 to enable 32k calibration.
+ * We do not set CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE here
+ * due to a hardware bug in the 88E, requiring those to be
+ * set after REG_TRXFF_BNDY is set. If not the RXFF bundary
+ * will get set to a larger buffer size than the real buffer
+ * size.
+ */
+ val16 = (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+exit:
+ return ret;
+}
+
+static void rtl8188eu_power_off(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ rtl8xxxu_flush_fifo(priv);
+
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0x00);
+
+ rtl8188eu_active_to_lps(priv);
+
+ /* Reset Firmware if running in RAM */
+ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+ rtl8xxxu_firmware_self_reset(priv);
+
+ /* Reset MCU */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* Reset MCU ready status */
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+ /* 32K_CTRL looks to be very 8188e specific */
+ val8 = rtl8xxxu_read8(priv, REG_32K_CTRL);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_32K_CTRL, val8);
+
+ rtl8188eu_active_to_emu(priv);
+ rtl8188eu_emu_to_disabled(priv);
+
+ /* Reset MCU IO Wrapper */
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 &= ~BIT(3);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 |= BIT(3);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ /* Vendor driver refers to GPIO_IN */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_PIN_CTRL);
+ /* Vendor driver refers to GPIO_OUT */
+ rtl8xxxu_write8(priv, REG_GPIO_PIN_CTRL + 1, val8);
+ rtl8xxxu_write8(priv, REG_GPIO_PIN_CTRL + 2, 0xff);
+
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL);
+ rtl8xxxu_write8(priv, REG_GPIO_IO_SEL, val8 << 4);
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL + 1);
+ rtl8xxxu_write8(priv, REG_GPIO_IO_SEL + 1, val8 | 0x0f);
+
+ /*
+ * Set LNA, TRSW, EX_PA Pin to output mode
+ * Referred to as REG_BB_PAD_CTRL in 8188eu vendor driver
+ */
+ rtl8xxxu_write32(priv, REG_PAD_CTRL1, 0x00080808);
+
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x00);
+
+ rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, 0x00000000);
+}
+
+static void rtl8188e_enable_rf(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ rtl8xxxu_write8(priv, REG_RF_CTRL, RF_ENABLE | RF_RSTB | RF_SDMRSTB);
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+ val32 &= ~(OFDM_RF_PATH_RX_MASK | OFDM_RF_PATH_TX_MASK);
+ val32 |= OFDM_RF_PATH_RX_A | OFDM_RF_PATH_TX_A;
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
+}
+
+static void rtl8188e_disable_rf(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
+ val32 &= ~OFDM_RF_PATH_TX_MASK;
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, val32);
+
+ /* Power down RF module */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0);
+
+ rtl8188eu_active_to_emu(priv);
+}
+
+static void rtl8188e_usb_quirks(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+
+ /*
+ * Technically this is not a USB quirk, but a chip quirk.
+ * This has to be done after REG_TRXFF_BNDY is set, see
+ * rtl8188eu_power_on() for details.
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ rtl8xxxu_gen2_usb_quirks(priv);
+
+ /* Pre-TX enable WEP/TKIP security */
+ rtl8xxxu_write8(priv, REG_EARLY_MODE_CONTROL_8188E + 3, 0x01);
+}
+
+static s8 rtl8188e_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
+{
+ /* only use lna 0/1/2/3/7 */
+ static const s8 lna_gain_table_0[8] = {17, -1, -13, -29, -32, -35, -38, -41};
+ /* only use lna 3/7 */
+ static const s8 lna_gain_table_1[8] = {29, 20, 12, 3, -6, -15, -24, -33};
+
+ s8 rx_pwr_all = 0x00;
+ u8 vga_idx, lna_idx;
+ s8 lna_gain = 0;
+
+ lna_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_LNA_IDX_MASK);
+ vga_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_VGA_IDX_MASK);
+
+ if (priv->chip_cut >= 8) /* cut I */ /* SMIC */
+ lna_gain = lna_gain_table_0[lna_idx];
+ else /* TSMC */
+ lna_gain = lna_gain_table_1[lna_idx];
+
+ rx_pwr_all = lna_gain - (2 * vga_idx);
+
+ return rx_pwr_all;
+}
+
+static int rtl8188eu_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct rtl8xxxu_priv *priv = container_of(led_cdev,
+ struct rtl8xxxu_priv,
+ led_cdev);
+ u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG2);
+
+ if (brightness == LED_OFF) {
+ ledcfg &= ~LEDCFG2_HW_LED_CONTROL;
+ ledcfg |= LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE;
+ } else if (brightness == LED_ON) {
+ ledcfg &= ~(LEDCFG2_HW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE);
+ ledcfg |= LEDCFG2_SW_LED_CONTROL;
+ } else if (brightness == RTL8XXXU_HW_LED_CONTROL) {
+ ledcfg &= ~LEDCFG2_SW_LED_DISABLE;
+ ledcfg |= LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE;
+ }
+
+ rtl8xxxu_write8(priv, REG_LEDCFG2, ledcfg);
+
+ return 0;
+}
+
+static void rtl8188e_set_tx_rpt_timing(struct rtl8xxxu_ra_info *ra, u8 timing)
+{
+ u8 idx;
+
+ for (idx = 0; idx < 5; idx++)
+ if (dynamic_tx_rpt_timing[idx] == ra->rpt_time)
+ break;
+
+ if (timing == DEFAULT_TIMING) {
+ idx = 0; /* 200ms */
+ } else if (timing == INCREASE_TIMING) {
+ if (idx < 5)
+ idx++;
+ } else if (timing == DECREASE_TIMING) {
+ if (idx > 0)
+ idx--;
+ }
+
+ ra->rpt_time = dynamic_tx_rpt_timing[idx];
+}
+
+static void rtl8188e_rate_down(struct rtl8xxxu_ra_info *ra)
+{
+ u8 rate_id = ra->pre_rate;
+ u8 lowest_rate = ra->lowest_rate;
+ u8 highest_rate = ra->highest_rate;
+ s8 i;
+
+ if (rate_id > highest_rate) {
+ rate_id = highest_rate;
+ } else if (ra->rate_sgi) {
+ ra->rate_sgi = 0;
+ } else if (rate_id > lowest_rate) {
+ if (rate_id > 0) {
+ for (i = rate_id - 1; i >= lowest_rate; i--) {
+ if (ra->ra_use_rate & BIT(i)) {
+ rate_id = i;
+ goto rate_down_finish;
+ }
+ }
+ }
+ } else if (rate_id <= lowest_rate) {
+ rate_id = lowest_rate;
+ }
+
+rate_down_finish:
+ if (ra->ra_waiting_counter == 1) {
+ ra->ra_waiting_counter++;
+ ra->ra_pending_counter++;
+ } else if (ra->ra_waiting_counter > 1) {
+ ra->ra_waiting_counter = 0;
+ ra->ra_pending_counter = 0;
+ }
+
+ if (ra->ra_pending_counter >= 4)
+ ra->ra_pending_counter = 4;
+
+ ra->ra_drop_after_down = 1;
+
+ ra->decision_rate = rate_id;
+
+ rtl8188e_set_tx_rpt_timing(ra, DECREASE_TIMING);
+}
+
+static void rtl8188e_rate_up(struct rtl8xxxu_ra_info *ra)
+{
+ u8 rate_id = ra->pre_rate;
+ u8 highest_rate = ra->highest_rate;
+ u8 i;
+
+ if (ra->ra_waiting_counter == 1) {
+ ra->ra_waiting_counter = 0;
+ ra->ra_pending_counter = 0;
+ } else if (ra->ra_waiting_counter > 1) {
+ ra->pre_rssi_sta_ra = ra->rssi_sta_ra;
+ goto rate_up_finish;
+ }
+
+ rtl8188e_set_tx_rpt_timing(ra, DEFAULT_TIMING);
+
+ if (rate_id < highest_rate) {
+ for (i = rate_id + 1; i <= highest_rate; i++) {
+ if (ra->ra_use_rate & BIT(i)) {
+ rate_id = i;
+ goto rate_up_finish;
+ }
+ }
+ } else if (rate_id == highest_rate) {
+ if (ra->sgi_enable && !ra->rate_sgi)
+ ra->rate_sgi = 1;
+ else if (!ra->sgi_enable)
+ ra->rate_sgi = 0;
+ } else { /* rate_id > ra->highest_rate */
+ rate_id = highest_rate;
+ }
+
+rate_up_finish:
+ if (ra->ra_waiting_counter == (4 + pending_for_rate_up_fail[ra->ra_pending_counter]))
+ ra->ra_waiting_counter = 0;
+ else
+ ra->ra_waiting_counter++;
+
+ ra->decision_rate = rate_id;
+}
+
+static void rtl8188e_reset_ra_counter(struct rtl8xxxu_ra_info *ra)
+{
+ u8 rate_id = ra->decision_rate;
+
+ ra->nsc_up = (n_threshold_high[rate_id] + n_threshold_low[rate_id]) >> 1;
+ ra->nsc_down = (n_threshold_high[rate_id] + n_threshold_low[rate_id]) >> 1;
+}
+
+static void rtl8188e_rate_decision(struct rtl8xxxu_ra_info *ra)
+{
+ struct rtl8xxxu_priv *priv = container_of(ra, struct rtl8xxxu_priv, ra_info);
+ const u8 *retry_penalty_idx_0;
+ const u8 *retry_penalty_idx_1;
+ const u8 *retry_penalty_up_idx;
+ u8 rate_id, penalty_id1, penalty_id2;
+ int i;
+
+ if (ra->total == 0)
+ return;
+
+ if (ra->ra_drop_after_down) {
+ ra->ra_drop_after_down--;
+
+ rtl8188e_reset_ra_counter(ra);
+
+ return;
+ }
+
+ if (priv->chip_cut == 8) { /* cut I */
+ retry_penalty_idx_0 = retry_penalty_idx_cut_i[0];
+ retry_penalty_idx_1 = retry_penalty_idx_cut_i[1];
+ retry_penalty_up_idx = retry_penalty_up_idx_cut_i;
+ } else {
+ retry_penalty_idx_0 = retry_penalty_idx_normal[0];
+ retry_penalty_idx_1 = retry_penalty_idx_normal[1];
+ retry_penalty_up_idx = retry_penalty_up_idx_normal;
+ }
+
+ if (ra->rssi_sta_ra < (ra->pre_rssi_sta_ra - 3) ||
+ ra->rssi_sta_ra > (ra->pre_rssi_sta_ra + 3)) {
+ ra->pre_rssi_sta_ra = ra->rssi_sta_ra;
+ ra->ra_waiting_counter = 0;
+ ra->ra_pending_counter = 0;
+ }
+
+ /* Start RA decision */
+ if (ra->pre_rate > ra->highest_rate)
+ rate_id = ra->highest_rate;
+ else
+ rate_id = ra->pre_rate;
+
+ /* rate down */
+ if (ra->rssi_sta_ra > rssi_threshold[rate_id])
+ penalty_id1 = retry_penalty_idx_0[rate_id];
+ else
+ penalty_id1 = retry_penalty_idx_1[rate_id];
+
+ for (i = 0; i < 5; i++)
+ ra->nsc_down += ra->retry[i] * retry_penalty[penalty_id1][i];
+
+ if (ra->nsc_down > (ra->total * retry_penalty[penalty_id1][5]))
+ ra->nsc_down -= ra->total * retry_penalty[penalty_id1][5];
+ else
+ ra->nsc_down = 0;
+
+ /* rate up */
+ penalty_id2 = retry_penalty_up_idx[rate_id];
+
+ for (i = 0; i < 5; i++)
+ ra->nsc_up += ra->retry[i] * retry_penalty[penalty_id2][i];
+
+ if (ra->nsc_up > (ra->total * retry_penalty[penalty_id2][5]))
+ ra->nsc_up -= ra->total * retry_penalty[penalty_id2][5];
+ else
+ ra->nsc_up = 0;
+
+ if (ra->nsc_down < n_threshold_low[rate_id] ||
+ ra->drop > dropping_necessary[rate_id]) {
+ rtl8188e_rate_down(ra);
+
+ rtl8xxxu_update_ra_report(&priv->ra_report, ra->decision_rate,
+ ra->rate_sgi, priv->ra_report.txrate.bw);
+ } else if (ra->nsc_up > n_threshold_high[rate_id]) {
+ rtl8188e_rate_up(ra);
+
+ rtl8xxxu_update_ra_report(&priv->ra_report, ra->decision_rate,
+ ra->rate_sgi, priv->ra_report.txrate.bw);
+ }
+
+ if (ra->decision_rate == ra->pre_rate)
+ ra->dynamic_tx_rpt_timing_counter++;
+ else
+ ra->dynamic_tx_rpt_timing_counter = 0;
+
+ if (ra->dynamic_tx_rpt_timing_counter >= 4) {
+ /* Rate didn't change 4 times, extend RPT timing */
+ rtl8188e_set_tx_rpt_timing(ra, INCREASE_TIMING);
+ ra->dynamic_tx_rpt_timing_counter = 0;
+ }
+
+ ra->pre_rate = ra->decision_rate;
+
+ rtl8188e_reset_ra_counter(ra);
+}
+
+static void rtl8188e_power_training_try_state(struct rtl8xxxu_ra_info *ra)
+{
+ ra->pt_try_state = 0;
+ switch (ra->pt_mode_ss) {
+ case 3:
+ if (ra->decision_rate >= DESC_RATE_MCS13)
+ ra->pt_try_state = 1;
+ break;
+ case 2:
+ if (ra->decision_rate >= DESC_RATE_MCS5)
+ ra->pt_try_state = 1;
+ break;
+ case 1:
+ if (ra->decision_rate >= DESC_RATE_48M)
+ ra->pt_try_state = 1;
+ break;
+ case 0:
+ if (ra->decision_rate >= DESC_RATE_11M)
+ ra->pt_try_state = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (ra->rssi_sta_ra < 48) {
+ ra->pt_stage = 0;
+ } else if (ra->pt_try_state == 1) {
+ if ((ra->pt_stop_count >= 10) ||
+ (ra->pt_pre_rssi > ra->rssi_sta_ra + 5) ||
+ (ra->pt_pre_rssi < ra->rssi_sta_ra - 5) ||
+ (ra->decision_rate != ra->pt_pre_rate)) {
+ if (ra->pt_stage == 0)
+ ra->pt_stage = 1;
+ else if (ra->pt_stage == 1)
+ ra->pt_stage = 3;
+ else
+ ra->pt_stage = 5;
+
+ ra->pt_pre_rssi = ra->rssi_sta_ra;
+ ra->pt_stop_count = 0;
+ } else {
+ ra->ra_stage = 0;
+ ra->pt_stop_count++;
+ }
+ } else {
+ ra->pt_stage = 0;
+ ra->ra_stage = 0;
+ }
+
+ ra->pt_pre_rate = ra->decision_rate;
+
+ /* TODO: implement the "false alarm" statistics for this */
+ /* Disable power training when noisy environment */
+ /* if (p_dm_odm->is_disable_power_training) { */
+ if (1) {
+ ra->pt_stage = 0;
+ ra->ra_stage = 0;
+ ra->pt_stop_count = 0;
+ }
+}
+
+static void rtl8188e_power_training_decision(struct rtl8xxxu_ra_info *ra)
+{
+ u8 temp_stage;
+ u32 numsc;
+ u32 num_total;
+ u8 stage_id;
+ u8 j;
+
+ numsc = 0;
+ num_total = ra->total * pt_penalty[5];
+ for (j = 0; j <= 4; j++) {
+ numsc += ra->retry[j] * pt_penalty[j];
+
+ if (numsc > num_total)
+ break;
+ }
+
+ j >>= 1;
+ temp_stage = (ra->pt_stage + 1) >> 1;
+ if (temp_stage > j)
+ stage_id = temp_stage - j;
+ else
+ stage_id = 0;
+
+ ra->pt_smooth_factor = (ra->pt_smooth_factor >> 1) +
+ (ra->pt_smooth_factor >> 2) +
+ stage_id * 16 + 2;
+ if (ra->pt_smooth_factor > 192)
+ ra->pt_smooth_factor = 192;
+ stage_id = ra->pt_smooth_factor >> 6;
+ temp_stage = stage_id * 2;
+ if (temp_stage != 0)
+ temp_stage--;
+ if (ra->drop > 3)
+ temp_stage = 0;
+ ra->pt_stage = temp_stage;
+}
+
+void rtl8188e_handle_ra_tx_report2(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
+{
+ u32 *_rx_desc = (u32 *)(skb->data - sizeof(struct rtl8xxxu_rxdesc16));
+ struct rtl8xxxu_rxdesc16 *rx_desc = (struct rtl8xxxu_rxdesc16 *)_rx_desc;
+ struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_ra_info *ra = &priv->ra_info;
+ u32 tx_rpt_len = rx_desc->pktlen & 0x3ff;
+ u32 items = tx_rpt_len / TX_RPT2_ITEM_SIZE;
+ u64 macid_valid = ((u64)_rx_desc[5] << 32) | _rx_desc[4];
+ u32 macid;
+ u8 *rpt = skb->data;
+ bool valid;
+ u16 min_rpt_time = 0x927c;
+
+ dev_dbg(dev, "%s: len: %d items: %d\n", __func__, tx_rpt_len, items);
+
+ for (macid = 0; macid < items; macid++) {
+ valid = false;
+
+ if (macid < 64)
+ valid = macid_valid & BIT(macid);
+
+ if (valid) {
+ ra->retry[0] = le16_to_cpu(*(__le16 *)rpt);
+ ra->retry[1] = rpt[2];
+ ra->retry[2] = rpt[3];
+ ra->retry[3] = rpt[4];
+ ra->retry[4] = rpt[5];
+ ra->drop = rpt[6];
+ ra->total = ra->retry[0] + ra->retry[1] + ra->retry[2] +
+ ra->retry[3] + ra->retry[4] + ra->drop;
+
+ if (ra->total > 0) {
+ if (ra->ra_stage < 5)
+ rtl8188e_rate_decision(ra);
+ else if (ra->ra_stage == 5)
+ rtl8188e_power_training_try_state(ra);
+ else /* ra->ra_stage == 6 */
+ rtl8188e_power_training_decision(ra);
+
+ if (ra->ra_stage <= 5)
+ ra->ra_stage++;
+ else
+ ra->ra_stage = 0;
+ }
+ } else if (macid == 0) {
+ dev_warn(dev, "%s: TX report item 0 not valid\n", __func__);
+ }
+
+ dev_dbg(dev, "%s: valid: %d retry: %d %d %d %d %d drop: %d\n",
+ __func__, valid,
+ ra->retry[0], ra->retry[1], ra->retry[2],
+ ra->retry[3], ra->retry[4], ra->drop);
+
+ if (min_rpt_time > ra->rpt_time)
+ min_rpt_time = ra->rpt_time;
+
+ rpt += TX_RPT2_ITEM_SIZE;
+
+ /*
+ * We only use macid 0, so only the first item is relevant.
+ * AP mode will use more of them if it's ever implemented.
+ */
+ break;
+ }
+
+ if (min_rpt_time != ra->pre_min_rpt_time) {
+ rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, min_rpt_time);
+ ra->pre_min_rpt_time = min_rpt_time;
+ }
+}
+
+static void rtl8188e_arfb_refresh(struct rtl8xxxu_ra_info *ra)
+{
+ s8 i;
+
+ ra->ra_use_rate = ra->rate_mask;
+
+ /* Highest rate */
+ if (ra->ra_use_rate) {
+ for (i = RATESIZE; i >= 0; i--) {
+ if (ra->ra_use_rate & BIT(i)) {
+ ra->highest_rate = i;
+ break;
+ }
+ }
+ } else {
+ ra->highest_rate = 0;
+ }
+
+ /* Lowest rate */
+ if (ra->ra_use_rate) {
+ for (i = 0; i < RATESIZE; i++) {
+ if (ra->ra_use_rate & BIT(i)) {
+ ra->lowest_rate = i;
+ break;
+ }
+ }
+ } else {
+ ra->lowest_rate = 0;
+ }
+
+ if (ra->highest_rate > DESC_RATE_MCS7)
+ ra->pt_mode_ss = 3;
+ else if (ra->highest_rate > DESC_RATE_54M)
+ ra->pt_mode_ss = 2;
+ else if (ra->highest_rate > DESC_RATE_11M)
+ ra->pt_mode_ss = 1;
+ else
+ ra->pt_mode_ss = 0;
+}
+
+static void
+rtl8188e_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, u8 rateid, int sgi, int txbw_40mhz)
+{
+ struct rtl8xxxu_ra_info *ra = &priv->ra_info;
+
+ ra->rate_id = rateid;
+ ra->rate_mask = ramask;
+ ra->sgi_enable = sgi;
+
+ rtl8188e_arfb_refresh(ra);
+}
+
+static void rtl8188e_ra_set_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi)
+{
+ priv->ra_info.rssi_sta_ra = rssi;
+}
+
+void rtl8188e_ra_info_init_all(struct rtl8xxxu_ra_info *ra)
+{
+ ra->decision_rate = DESC_RATE_MCS7;
+ ra->pre_rate = DESC_RATE_MCS7;
+ ra->highest_rate = DESC_RATE_MCS7;
+ ra->lowest_rate = 0;
+ ra->rate_id = 0;
+ ra->rate_mask = 0xfffff;
+ ra->rssi_sta_ra = 0;
+ ra->pre_rssi_sta_ra = 0;
+ ra->sgi_enable = 0;
+ ra->ra_use_rate = 0xfffff;
+ ra->nsc_down = (n_threshold_high[DESC_RATE_MCS7] + n_threshold_low[DESC_RATE_MCS7]) / 2;
+ ra->nsc_up = (n_threshold_high[DESC_RATE_MCS7] + n_threshold_low[DESC_RATE_MCS7]) / 2;
+ ra->rate_sgi = 0;
+ ra->rpt_time = 0x927c;
+ ra->drop = 0;
+ ra->retry[0] = 0;
+ ra->retry[1] = 0;
+ ra->retry[2] = 0;
+ ra->retry[3] = 0;
+ ra->retry[4] = 0;
+ ra->total = 0;
+ ra->ra_waiting_counter = 0;
+ ra->ra_pending_counter = 0;
+ ra->ra_drop_after_down = 0;
+
+ ra->pt_try_state = 0;
+ ra->pt_stage = 5;
+ ra->pt_smooth_factor = 192;
+ ra->pt_stop_count = 0;
+ ra->pt_pre_rate = 0;
+ ra->pt_pre_rssi = 0;
+ ra->pt_mode_ss = 0;
+ ra->ra_stage = 0;
+}
+
+struct rtl8xxxu_fileops rtl8188eu_fops = {
+ .identify_chip = rtl8188eu_identify_chip,
+ .parse_efuse = rtl8188eu_parse_efuse,
+ .load_firmware = rtl8188eu_load_firmware,
+ .power_on = rtl8188eu_power_on,
+ .power_off = rtl8188eu_power_off,
+ .reset_8051 = rtl8188eu_reset_8051,
+ .llt_init = rtl8xxxu_init_llt_table,
+ .init_phy_bb = rtl8188eu_init_phy_bb,
+ .init_phy_rf = rtl8188eu_init_phy_rf,
+ .phy_lc_calibrate = rtl8723a_phy_lc_calibrate,
+ .phy_iq_calibrate = rtl8188eu_phy_iq_calibrate,
+ .config_channel = rtl8188eu_config_channel,
+ .parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+ .init_aggregation = rtl8188eu_init_aggregation,
+ .enable_rf = rtl8188e_enable_rf,
+ .disable_rf = rtl8188e_disable_rf,
+ .usb_quirks = rtl8188e_usb_quirks,
+ .set_tx_power = rtl8188f_set_tx_power,
+ .update_rate_mask = rtl8188e_update_rate_mask,
+ .report_connect = rtl8xxxu_gen2_report_connect,
+ .report_rssi = rtl8188e_ra_set_rssi,
+ .fill_txdesc = rtl8xxxu_fill_txdesc_v3,
+ .set_crystal_cap = rtl8188f_set_crystal_cap,
+ .cck_rssi = rtl8188e_cck_rssi,
+ .led_classdev_brightness_set = rtl8188eu_led_brightness_set,
+ .writeN_block_size = 128,
+ .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
+ .has_tx_report = 1,
+ .gen2_thermal_meter = 1,
+ .adda_1t_init = 0x0b1b25a0,
+ .adda_1t_path_on = 0x0bdb25a0,
+ /*
+ * Use 9K for 8188e normal chip
+ * Max RX buffer = 10K - max(TxReportSize(64*8), WOLPattern(16*24))
+ */
+ .trxff_boundary = 0x25ff,
+ .pbp_rx = PBP_PAGE_SIZE_128,
+ .pbp_tx = PBP_PAGE_SIZE_128,
+ .mactable = rtl8188e_mac_init_table,
+ .total_page_num = TX_TOTAL_PAGE_NUM_8188E,
+ .page_num_hi = TX_PAGE_NUM_HI_PQ_8188E,
+ .page_num_lo = TX_PAGE_NUM_LO_PQ_8188E,
+ .page_num_norm = TX_PAGE_NUM_NORM_PQ_8188E,
+ .last_llt_entry = 175,
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
index 2c4f403ba68f..af6e2c8a5025 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
@@ -370,7 +370,7 @@ static void rtl8188f_channel_to_group(int channel, int *group, int *cck_group)
*cck_group = *group;
}
-static void
+void
rtl8188f_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
{
u32 val32, ofdm, mcs;
@@ -716,7 +716,6 @@ static void rtl8188fu_init_statistics(struct rtl8xxxu_priv *priv)
static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv)
{
struct rtl8188fu_efuse *efuse = &priv->efuse_wifi.efuse8188fu;
- int i;
if (efuse->rtl_id != cpu_to_le16(0x8129))
return -EINVAL;
@@ -738,22 +737,12 @@ static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv)
dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
dev_info(&priv->udev->dev, "Product: %.7s\n", efuse->device_name);
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8188fu_efuse));
- for (i = 0; i < sizeof(struct rtl8188fu_efuse); i += 8)
- dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
- }
-
return 0;
}
static int rtl8188fu_load_firmware(struct rtl8xxxu_priv *priv)
{
- char *fw_name;
+ const char *fw_name;
int ret;
fw_name = "rtlwifi/rtl8188fufw.bin";
@@ -1122,7 +1111,7 @@ static void rtl8188fu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
if (t == 0) {
val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM1);
- priv->pi_enabled = val32 & FPGA0_HSSI_PARM1_PI;
+ priv->pi_enabled = u32_get_bits(val32, FPGA0_HSSI_PARM1_PI);
}
/* save RF path */
@@ -1662,7 +1651,7 @@ static void rtl8188f_usb_quirks(struct rtl8xxxu_priv *priv)
#define XTAL1 GENMASK(22, 17)
#define XTAL0 GENMASK(16, 11)
-static void rtl8188f_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap)
+void rtl8188f_set_crystal_cap(struct rtl8xxxu_priv *priv, u8 crystal_cap)
{
struct rtl8xxxu_cfo_tracking *cfo = &priv->cfo_tracking;
u32 val32;
@@ -1693,8 +1682,8 @@ static s8 rtl8188f_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
s8 rx_pwr_all = 0x00;
u8 vga_idx, lna_idx;
- lna_idx = (cck_agc_rpt & 0xE0) >> 5;
- vga_idx = cck_agc_rpt & 0x1F;
+ lna_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_LNA_IDX_MASK);
+ vga_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_VGA_IDX_MASK);
switch (lna_idx) {
case 7:
@@ -1743,6 +1732,7 @@ struct rtl8xxxu_fileops rtl8188fu_fops = {
.set_tx_power = rtl8188f_set_tx_power,
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
+ .report_rssi = rtl8xxxu_gen2_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.set_crystal_cap = rtl8188f_set_crystal_cap,
.cck_rssi = rtl8188f_cck_rssi,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
index 3bef9ffc8b02..e61d65c3579b 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -386,7 +386,7 @@ out:
static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
{
- char *fw_name;
+ const char *fw_name;
int ret;
if (!priv->vendor_umc)
@@ -404,7 +404,6 @@ static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
{
struct rtl8192cu_efuse *efuse = &priv->efuse_wifi.efuse8192;
- int i;
if (efuse->rtl_id != cpu_to_le16(0x8129))
return -EINVAL;
@@ -457,15 +456,6 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
priv->power_base = &rtl8188r_power_base;
}
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8192cu_efuse));
- for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8)
- dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
- }
return 0;
}
@@ -619,6 +609,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.set_tx_power = rtl8xxxu_gen1_set_tx_power,
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
+ .report_rssi = rtl8xxxu_gen1_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v1,
.cck_rssi = rtl8723a_cck_rssi,
.writeN_block_size = 128,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
index a7d76693c02d..5cfc00237f42 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -704,21 +704,12 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
rtl8192eu_log_next_device_info(priv, "Product", efuse->device_info, &record_offset);
rtl8192eu_log_next_device_info(priv, "Serial", efuse->device_info, &record_offset);
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8192eu_efuse));
- for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8)
- dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
- }
return 0;
}
static int rtl8192eu_load_firmware(struct rtl8xxxu_priv *priv)
{
- char *fw_name;
+ const char *fw_name;
int ret;
fw_name = "rtlwifi/rtl8192eu_nic.bin";
@@ -1744,6 +1735,11 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
val8 &= ~BIT(0);
rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+
+ /*
+ * Fix transmission failure of rtl8192e.
+ */
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
}
static s8 rtl8192e_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
@@ -1755,8 +1751,8 @@ static s8 rtl8192e_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
u8 vga_idx, lna_idx;
s8 lna_gain = 0;
- lna_idx = (cck_agc_rpt & 0xE0) >> 5;
- vga_idx = cck_agc_rpt & 0x1F;
+ lna_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_LNA_IDX_MASK);
+ vga_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_VGA_IDX_MASK);
if (priv->cck_agc_report_type == 0)
lna_gain = lna_gain_table_0[lna_idx];
@@ -1768,6 +1764,29 @@ static s8 rtl8192e_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
return rx_pwr_all;
}
+static int rtl8192eu_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct rtl8xxxu_priv *priv = container_of(led_cdev,
+ struct rtl8xxxu_priv,
+ led_cdev);
+ u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG1);
+
+ if (brightness == LED_OFF) {
+ ledcfg &= ~LEDCFG1_HW_LED_CONTROL;
+ ledcfg |= LEDCFG1_LED_DISABLE;
+ } else if (brightness == LED_ON) {
+ ledcfg &= ~(LEDCFG1_HW_LED_CONTROL | LEDCFG1_LED_DISABLE);
+ } else if (brightness == RTL8XXXU_HW_LED_CONTROL) {
+ ledcfg &= ~LEDCFG1_LED_DISABLE;
+ ledcfg |= LEDCFG1_HW_LED_CONTROL;
+ }
+
+ rtl8xxxu_write8(priv, REG_LEDCFG1, ledcfg);
+
+ return 0;
+}
+
struct rtl8xxxu_fileops rtl8192eu_fops = {
.identify_chip = rtl8192eu_identify_chip,
.parse_efuse = rtl8192eu_parse_efuse,
@@ -1788,9 +1807,11 @@ struct rtl8xxxu_fileops rtl8192eu_fops = {
.set_tx_power = rtl8192e_set_tx_power,
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
+ .report_rssi = rtl8xxxu_gen2_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.set_crystal_cap = rtl8723a_set_crystal_cap,
.cck_rssi = rtl8192e_cck_rssi,
+ .led_classdev_brightness_set = rtl8192eu_led_brightness_set,
.writeN_block_size = 128,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
index 707ac48ecc83..5e7b58d395ba 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -231,7 +231,7 @@ static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv)
{
- char *fw_name;
+ const char *fw_name;
int ret;
switch (priv->chip_cut) {
@@ -457,6 +457,30 @@ s8 rtl8723a_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
return rx_pwr_all;
}
+static int rtl8723au_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct rtl8xxxu_priv *priv = container_of(led_cdev,
+ struct rtl8xxxu_priv,
+ led_cdev);
+ u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG2);
+
+ if (brightness == LED_OFF) {
+ ledcfg &= ~LEDCFG2_HW_LED_CONTROL;
+ ledcfg |= LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE;
+ } else if (brightness == LED_ON) {
+ ledcfg &= ~(LEDCFG2_HW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE);
+ ledcfg |= LEDCFG2_SW_LED_CONTROL;
+ } else if (brightness == RTL8XXXU_HW_LED_CONTROL) {
+ ledcfg &= ~LEDCFG2_SW_LED_DISABLE;
+ ledcfg |= LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE;
+ }
+
+ rtl8xxxu_write8(priv, REG_LEDCFG2, ledcfg);
+
+ return 0;
+}
+
struct rtl8xxxu_fileops rtl8723au_fops = {
.identify_chip = rtl8723au_identify_chip,
.parse_efuse = rtl8723au_parse_efuse,
@@ -478,9 +502,11 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.set_tx_power = rtl8xxxu_gen1_set_tx_power,
.update_rate_mask = rtl8xxxu_update_rate_mask,
.report_connect = rtl8xxxu_gen1_report_connect,
+ .report_rssi = rtl8xxxu_gen1_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v1,
.set_crystal_cap = rtl8723a_set_crystal_cap,
.cck_rssi = rtl8723a_cck_rssi,
+ .led_classdev_brightness_set = rtl8723au_led_brightness_set,
.writeN_block_size = 1024,
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
index a0ec895b61a4..21613d60dc22 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -497,23 +497,12 @@ static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv)
dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
dev_info(&priv->udev->dev, "Product: %.41s\n", efuse->device_name);
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- int i;
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8723bu_efuse));
- for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8)
- dev_info(&priv->udev->dev, "%02x: %8ph\n", i, &raw[i]);
- }
-
return 0;
}
static int rtl8723bu_load_firmware(struct rtl8xxxu_priv *priv)
{
- char *fw_name;
+ const char *fw_name;
int ret;
if (priv->enable_bluetooth)
@@ -1691,8 +1680,8 @@ static s8 rtl8723b_cck_rssi(struct rtl8xxxu_priv *priv, u8 cck_agc_rpt)
s8 rx_pwr_all = 0x00;
u8 vga_idx, lna_idx;
- lna_idx = (cck_agc_rpt & 0xE0) >> 5;
- vga_idx = cck_agc_rpt & 0x1F;
+ lna_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_LNA_IDX_MASK);
+ vga_idx = u8_get_bits(cck_agc_rpt, CCK_AGC_RPT_VGA_IDX_MASK);
switch (lna_idx) {
case 6:
@@ -1738,6 +1727,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.set_tx_power = rtl8723b_set_tx_power,
.update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
.report_connect = rtl8xxxu_gen2_report_connect,
+ .report_rssi = rtl8xxxu_gen2_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.set_crystal_cap = rtl8723a_set_crystal_cap,
.cck_rssi = rtl8723b_cck_rssi,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 3ed435401e57..620a5cc2bfdd 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -34,7 +34,7 @@
#define DRIVER_NAME "rtl8xxxu"
-int rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
+int rtl8xxxu_debug;
static bool rtl8xxxu_ht40_2g;
static bool rtl8xxxu_dma_aggregation;
static int rtl8xxxu_dma_agg_timeout = -1;
@@ -46,6 +46,7 @@ MODULE_LICENSE("GPL");
MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin");
MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8188eufw.bin");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
@@ -1581,10 +1582,11 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv)
cut = 'A' + priv->chip_cut;
dev_info(dev,
- "RTL%s rev %c (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
- priv->chip_name, cut, priv->chip_vendor, priv->tx_paths,
- priv->rx_paths, priv->ep_tx_count, priv->has_wifi,
- priv->has_bluetooth, priv->has_gps, priv->hi_pa);
+ "RTL%s rev %c (%s) romver %d, %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n",
+ priv->chip_name, cut, priv->chip_vendor, priv->rom_rev,
+ priv->tx_paths, priv->rx_paths, priv->ep_tx_count,
+ priv->has_wifi, priv->has_bluetooth, priv->has_gps,
+ priv->hi_pa);
dev_info(dev, "RTL%s MAC: %pM\n", priv->chip_name, priv->mac_addr);
}
@@ -1813,6 +1815,16 @@ exit:
return ret;
}
+static void rtl8xxxu_dump_efuse(struct rtl8xxxu_priv *priv)
+{
+ dev_info(&priv->udev->dev,
+ "Dumping efuse for RTL%s (0x%02x bytes):\n",
+ priv->chip_name, EFUSE_MAP_LEN);
+
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+ priv->efuse_wifi.raw, EFUSE_MAP_LEN, true);
+}
+
void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -1970,7 +1982,7 @@ fw_abort:
return ret;
}
-int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
+int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, const char *fw_name)
{
struct device *dev = &priv->udev->dev;
const struct firmware *fw;
@@ -2000,6 +2012,7 @@ int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
switch (signature & 0xfff0) {
case 0x92e0:
case 0x92c0:
+ case 0x88e0:
case 0x88c0:
case 0x5300:
case 0x2300:
@@ -2071,10 +2084,20 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv)
}
}
- if (priv->rtl_chip != RTL8723B &&
- priv->rtl_chip != RTL8192E &&
- priv->rtl_chip != RTL8188F)
+ switch (priv->rtl_chip) {
+ case RTL8188C:
+ case RTL8188R:
+ case RTL8191C:
+ case RTL8192C:
+ case RTL8723A:
rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
+ break;
+ case RTL8188E:
+ rtl8xxxu_write16(priv, REG_MAX_AGGR_NUM, 0x0707);
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -2373,11 +2396,16 @@ static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv)
{
int ret;
- int i;
+ int i, last_entry;
u8 last_tx_page;
last_tx_page = priv->fops->total_page_num;
+ if (priv->fops->last_llt_entry)
+ last_entry = priv->fops->last_llt_entry;
+ else
+ last_entry = 255;
+
for (i = 0; i < last_tx_page; i++) {
ret = rtl8xxxu_llt_write(priv, i, i + 1);
if (ret)
@@ -2389,14 +2417,14 @@ int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv)
goto exit;
/* Mark remaining pages as a ring buffer */
- for (i = last_tx_page + 1; i < 0xff; i++) {
+ for (i = last_tx_page + 1; i < last_entry; i++) {
ret = rtl8xxxu_llt_write(priv, i, (i + 1));
if (ret)
goto exit;
}
/* Let last entry point to the start entry of ring buffer */
- ret = rtl8xxxu_llt_write(priv, 0xff, last_tx_page + 1);
+ ret = rtl8xxxu_llt_write(priv, last_entry, last_tx_page + 1);
if (ret)
goto exit;
@@ -2704,8 +2732,8 @@ void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv, bool iqk_ok,
#define MAX_TOLERANCE 5
-static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
- int result[][8], int c1, int c2)
+bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
+ int result[][8], int c1, int c2)
{
u32 i, j, diff, simubitmap, bound = 0;
int candidate[2] = {-1, -1}; /* for path A and path B */
@@ -3898,7 +3926,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
goto exit;
/* RFSW Control - clear bit 14 ?? */
- if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E)
+ if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E &&
+ priv->rtl_chip != RTL8188E)
rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
@@ -3911,7 +3940,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
/* 0x860[6:5]= 00 - why? - this sets antenna B */
- if (priv->rtl_chip != RTL8192E)
+ if (priv->rtl_chip != RTL8192E && priv->rtl_chip != RTL8188E)
rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66f60210);
if (!macpower) {
@@ -3953,7 +3982,25 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
* Enable TX report and TX report timer for 8723bu/8188eu/...
*/
if (fops->has_tx_report) {
+ /*
+ * The RTL8188EU has two types of TX reports:
+ * rpt_sel=1:
+ * One report for one frame. We can use this for frames
+ * with IEEE80211_TX_CTL_REQ_TX_STATUS.
+ * rpt_sel=2:
+ * One report for many frames transmitted over a period
+ * of time. (This is what REG_TX_REPORT_TIME is for.) The
+ * report includes the number of frames transmitted
+ * successfully, and the number of unsuccessful
+ * transmissions. We use this for software rate control.
+ *
+ * Bit 0 of REG_TX_REPORT_CTRL is required for both types.
+ * Bit 1 (TX_REPORT_CTRL_TIMER_ENABLE) is required for
+ * type 2.
+ */
val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ if (priv->rtl_chip == RTL8188E)
+ val8 |= BIT(0);
val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
/* Set MAX RPT MACID */
@@ -3979,6 +4026,15 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
} else if (priv->rtl_chip == RTL8188F) {
rtl8xxxu_write32(priv, REG_HISR0, 0xffffffff);
rtl8xxxu_write32(priv, REG_HISR1, 0xffffffff);
+ } else if (priv->rtl_chip == RTL8188E) {
+ rtl8xxxu_write32(priv, REG_HISR0, 0xffffffff);
+ val32 = IMR0_PSTIMEOUT | IMR0_TBDER | IMR0_CPWM | IMR0_CPWM2;
+ rtl8xxxu_write32(priv, REG_HIMR0, val32);
+ val32 = IMR1_TXERR | IMR1_RXERR | IMR1_TXFOVW | IMR1_RXFOVW;
+ rtl8xxxu_write32(priv, REG_HIMR1, val32);
+ val8 = rtl8xxxu_read8(priv, REG_USB_SPECIAL_OPTION);
+ val8 |= USB_SPEC_INT_BULK_SELECT;
+ rtl8xxxu_write8(priv, REG_USB_SPECIAL_OPTION, val8);
} else {
/*
* Enable all interrupts - not obvious USB needs to do this
@@ -4084,7 +4140,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (fops->init_aggregation)
fops->init_aggregation(priv);
- if (priv->rtl_chip == RTL8188F) {
+ if (priv->rtl_chip == RTL8188F || priv->rtl_chip == RTL8188E) {
rtl8xxxu_write16(priv, REG_PKT_VO_VI_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
rtl8xxxu_write16(priv, REG_PKT_BE_BK_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
}
@@ -4118,7 +4174,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/* Disable BAR - not sure if this has any effect on USB */
rtl8xxxu_write32(priv, REG_BAR_MODE_CTRL, 0x0201ffff);
- if (priv->rtl_chip != RTL8188F)
+ if (priv->rtl_chip != RTL8188F && priv->rtl_chip != RTL8188E)
rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0);
if (fops->init_statistics)
@@ -4136,9 +4192,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
* Reset USB mode switch setting
*/
rtl8xxxu_write8(priv, REG_ACLK_MON, 0x00);
- } else if (priv->rtl_chip == RTL8188F) {
+ } else if (priv->rtl_chip == RTL8188F || priv->rtl_chip == RTL8188E) {
/*
- * Init GPIO settings for 8188f
+ * Init GPIO settings for 8188f, 8188e
*/
val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
val8 &= ~GPIO_MUXCFG_IO_SEL_ENBT;
@@ -4184,7 +4240,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
val32 |= FPGA_RF_MODE_CCK;
rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
}
- } else if (priv->rtl_chip == RTL8192E) {
+ } else if (priv->rtl_chip == RTL8192E || priv->rtl_chip == RTL8188E) {
rtl8xxxu_write8(priv, REG_USB_HRPWM, 0x00);
}
@@ -4208,10 +4264,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
* should be equal or CCK RSSI report may be incorrect
*/
val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2);
- priv->cck_agc_report_type = val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR;
+ priv->cck_agc_report_type =
+ u32_get_bits(val32, FPGA0_HSSI_PARM2_CCK_HIGH_PWR);
val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_HSSI_PARM2);
- if (priv->cck_agc_report_type != (bool)(val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR)) {
+ if (priv->cck_agc_report_type !=
+ u32_get_bits(val32, FPGA0_HSSI_PARM2_CCK_HIGH_PWR)) {
if (priv->cck_agc_report_type)
val32 |= FPGA0_HSSI_PARM2_CCK_HIGH_PWR;
else
@@ -4235,6 +4293,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
priv->cfo_tracking.crystal_cap = priv->default_crystal_cap;
}
+ if (priv->rtl_chip == RTL8188E)
+ rtl8188e_ra_info_init_all(&priv->ra_info);
+
exit:
return ret;
}
@@ -4401,6 +4462,37 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
}
+void rtl8xxxu_gen1_report_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi)
+{
+ struct h2c_cmd h2c;
+ const int h2c_size = 4;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+
+ h2c.rssi_report.cmd = H2C_SET_RSSI;
+ h2c.rssi_report.macid = macid;
+ h2c.rssi_report.rssi = rssi;
+
+ rtl8xxxu_gen1_h2c_cmd(priv, &h2c, h2c_size);
+}
+
+void rtl8xxxu_gen2_report_rssi(struct rtl8xxxu_priv *priv, u8 macid, u8 rssi)
+{
+ struct h2c_cmd h2c;
+ int h2c_size = sizeof(h2c.rssi_report);
+
+ if (priv->rtl_chip == RTL8723B)
+ h2c_size = 4;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+
+ h2c.rssi_report.cmd = H2C_8723B_RSSI_SETTING;
+ h2c.rssi_report.macid = macid;
+ h2c.rssi_report.rssi = rssi;
+
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, h2c_size);
+}
+
void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
{
u8 agg_ctrl, usb_spec, page_thresh, timeout;
@@ -4598,8 +4690,8 @@ static void rtl8xxxu_set_aifs(struct rtl8xxxu_priv *priv, u8 slot_time)
}
}
-static void rtl8xxxu_update_ra_report(struct rtl8xxxu_ra_report *rarpt,
- u8 rate, u8 sgi, u8 bw)
+void rtl8xxxu_update_ra_report(struct rtl8xxxu_ra_report *rarpt,
+ u8 rate, u8 sgi, u8 bw)
{
u8 mcs, nss;
@@ -5069,6 +5161,98 @@ rtl8xxxu_fill_txdesc_v2(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
}
}
+/*
+ * Fill in v3 (gen1) specific TX descriptor bits.
+ * This format is a hybrid between the v1 and v2 formats, only seen
+ * on 8188eu devices so far.
+ */
+void
+rtl8xxxu_fill_txdesc_v3(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *tx_info,
+ struct rtl8xxxu_txdesc32 *tx_desc, bool sgi,
+ bool short_preamble, bool ampdu_enable, u32 rts_rate)
+{
+ struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
+ struct rtl8xxxu_priv *priv = hw->priv;
+ struct device *dev = &priv->udev->dev;
+ struct rtl8xxxu_ra_info *ra = &priv->ra_info;
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ u32 rate;
+ u16 rate_flags = tx_info->control.rates[0].flags;
+ u16 seq_number;
+
+ if (rate_flags & IEEE80211_TX_RC_MCS &&
+ !ieee80211_is_mgmt(hdr->frame_control))
+ rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0;
+ else
+ rate = tx_rate->hw_value;
+
+ seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ rate = ra->decision_rate;
+ tx_desc->txdw5 = cpu_to_le32(rate);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
+ tx_desc->txdw4 |= le32_encode_bits(ra->pt_stage, TXDESC32_PT_STAGE_MASK);
+ /* Data/RTS rate FB limit */
+ tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
+ }
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_TX)
+ dev_info(dev, "%s: TX rate: %d, pkt size %d\n",
+ __func__, rate, le16_to_cpu(tx_desc->pkt_size));
+
+ tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
+
+ if (ampdu_enable && test_bit(tid, priv->tid_tx_operational))
+ tx_desc->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
+ else
+ tx_desc->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
+
+ if (ieee80211_is_mgmt(hdr->frame_control)) {
+ tx_desc->txdw5 = cpu_to_le32(rate);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
+ tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
+ }
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
+
+ if (conf_is_ht40(&hw->conf)) {
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_DATA_BW);
+
+ if (conf_is_ht40_minus(&hw->conf))
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_PRIME_CH_OFF_UPPER);
+ else
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC_PRIME_CH_OFF_LOWER);
+ }
+ }
+
+ if (short_preamble)
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
+
+ if (sgi && ra->rate_sgi)
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
+
+ /*
+ * rts_rate is zero if RTS/CTS or CTS to SELF are not enabled
+ */
+ tx_desc->txdw4 |= cpu_to_le32(rts_rate << TXDESC32_RTS_RATE_SHIFT);
+ if (ampdu_enable || (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
+ } else if (rate_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_CTS_SELF_ENABLE);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
+ }
+
+ tx_desc->txdw2 |= cpu_to_le32(TXDESC_ANTENNA_SELECT_A |
+ TXDESC_ANTENNA_SELECT_B);
+ tx_desc->txdw7 |= cpu_to_le16(TXDESC_ANTENNA_SELECT_C >> 16);
+}
+
static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -5274,7 +5458,7 @@ static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv,
pending = priv->rx_urb_pending_count;
} else {
skb = (struct sk_buff *)rx_urb->urb.context;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
usb_free_urb(&rx_urb->urb);
}
@@ -5550,9 +5734,6 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
btcoex = &priv->bt_coex;
rarpt = &priv->ra_report;
- if (priv->rf_paths > 1)
- goto out;
-
while (!skb_queue_empty(&priv->c2hcmd_queue)) {
skb = skb_dequeue(&priv->c2hcmd_queue);
@@ -5585,10 +5766,9 @@ static void rtl8xxxu_c2hcmd_callback(struct work_struct *work)
default:
break;
}
- }
-out:
- dev_kfree_skb(skb);
+ dev_kfree_skb(skb);
+ }
}
static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
@@ -5640,6 +5820,44 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv,
schedule_work(&priv->c2hcmd_work);
}
+static void rtl8188e_c2hcmd_callback(struct work_struct *work)
+{
+ struct rtl8xxxu_priv *priv = container_of(work, struct rtl8xxxu_priv, c2hcmd_work);
+ struct device *dev = &priv->udev->dev;
+ struct sk_buff *skb = NULL;
+ struct rtl8xxxu_rxdesc16 *rx_desc;
+
+ while (!skb_queue_empty(&priv->c2hcmd_queue)) {
+ skb = skb_dequeue(&priv->c2hcmd_queue);
+
+ rx_desc = (struct rtl8xxxu_rxdesc16 *)(skb->data - sizeof(struct rtl8xxxu_rxdesc16));
+
+ switch (rx_desc->rpt_sel) {
+ case 1:
+ dev_dbg(dev, "C2H TX report type 1\n");
+
+ break;
+ case 2:
+ dev_dbg(dev, "C2H TX report type 2\n");
+
+ rtl8188e_handle_ra_tx_report2(priv, skb);
+
+ break;
+ case 3:
+ dev_dbg(dev, "C2H USB interrupt report\n");
+
+ break;
+ default:
+ dev_warn(dev, "%s: rpt_sel should not be %d\n",
+ __func__, rx_desc->rpt_sel);
+
+ break;
+ }
+
+ dev_kfree_skb(skb);
+ }
+}
+
int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
{
struct ieee80211_hw *hw = priv->hw;
@@ -5695,38 +5913,45 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16));
- phy_stats = (struct rtl8723au_phy_stats *)skb->data;
+ if (rx_desc->rpt_sel) {
+ skb_queue_tail(&priv->c2hcmd_queue, skb);
+ schedule_work(&priv->c2hcmd_work);
+ } else {
+ phy_stats = (struct rtl8723au_phy_stats *)skb->data;
- skb_pull(skb, drvinfo_sz + desc_shift);
+ skb_pull(skb, drvinfo_sz + desc_shift);
- skb_trim(skb, pkt_len);
+ skb_trim(skb, pkt_len);
- if (rx_desc->phy_stats)
- rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats,
- rx_desc->rxmcs, (struct ieee80211_hdr *)skb->data,
- rx_desc->crc32 || rx_desc->icverr);
+ if (rx_desc->phy_stats)
+ rtl8xxxu_rx_parse_phystats(
+ priv, rx_status, phy_stats,
+ rx_desc->rxmcs,
+ (struct ieee80211_hdr *)skb->data,
+ rx_desc->crc32 || rx_desc->icverr);
- rx_status->mactime = rx_desc->tsfl;
- rx_status->flag |= RX_FLAG_MACTIME_START;
+ rx_status->mactime = rx_desc->tsfl;
+ rx_status->flag |= RX_FLAG_MACTIME_START;
- if (!rx_desc->swdec)
- rx_status->flag |= RX_FLAG_DECRYPTED;
- if (rx_desc->crc32)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (rx_desc->bw)
- rx_status->bw = RATE_INFO_BW_40;
+ if (!rx_desc->swdec)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ if (rx_desc->crc32)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (rx_desc->bw)
+ rx_status->bw = RATE_INFO_BW_40;
- if (rx_desc->rxht) {
- rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
- } else {
- rx_status->rate_idx = rx_desc->rxmcs;
- }
+ if (rx_desc->rxht) {
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0;
+ } else {
+ rx_status->rate_idx = rx_desc->rxmcs;
+ }
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
- ieee80211_rx_irqsafe(hw, skb);
+ ieee80211_rx_irqsafe(hw, skb);
+ }
skb = next_skb;
if (skb)
@@ -5956,7 +6181,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
- u16 val16;
int ret = 0, channel;
bool ht40;
@@ -5966,14 +6190,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed)
__func__, hw->conf.chandef.chan->hw_value,
changed, hw->conf.chandef.width);
- if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- val16 = ((hw->conf.long_frame_max_tx_count <<
- RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) |
- ((hw->conf.short_frame_max_tx_count <<
- RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK);
- rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16);
- }
-
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
switch (hw->conf.chandef.width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -6504,6 +6720,9 @@ static void rtl8xxxu_watchdog_callback(struct work_struct *work)
signal = ieee80211_ave_rssi(vif);
+ priv->fops->report_rssi(priv, 0,
+ rtl8xxxu_signal_to_snr(signal));
+
if (priv->fops->set_crystal_cap)
rtl8xxxu_track_cfo(priv);
@@ -6587,7 +6806,10 @@ exit:
rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff);
rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff);
- rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e);
+ if (priv->rtl_chip == RTL8188E)
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6955341e);
+ else
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e);
return ret;
@@ -6733,6 +6955,40 @@ exit:
return ret;
}
+static void rtl8xxxu_init_led(struct rtl8xxxu_priv *priv)
+{
+ struct led_classdev *led = &priv->led_cdev;
+
+ if (!priv->fops->led_classdev_brightness_set)
+ return;
+
+ led->brightness_set_blocking = priv->fops->led_classdev_brightness_set;
+
+ snprintf(priv->led_name, sizeof(priv->led_name),
+ "rtl8xxxu-usb%s", dev_name(&priv->udev->dev));
+ led->name = priv->led_name;
+ led->max_brightness = RTL8XXXU_HW_LED_CONTROL;
+
+ if (led_classdev_register(&priv->udev->dev, led))
+ return;
+
+ priv->led_registered = true;
+
+ led->brightness = led->max_brightness;
+ priv->fops->led_classdev_brightness_set(led, led->brightness);
+}
+
+static void rtl8xxxu_deinit_led(struct rtl8xxxu_priv *priv)
+{
+ struct led_classdev *led = &priv->led_cdev;
+
+ if (!priv->led_registered)
+ return;
+
+ priv->fops->led_classdev_brightness_set(led, LED_OFF);
+ led_classdev_unregister(led);
+}
+
static int rtl8xxxu_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
@@ -6754,6 +7010,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
case 0x817f:
case 0x818b:
case 0xf179:
+ case 0x8179:
untested = 0;
break;
}
@@ -6809,7 +7066,6 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
spin_lock_init(&priv->rx_urb_lock);
INIT_WORK(&priv->rx_urb_wq, rtl8xxxu_rx_urb_work);
INIT_DELAYED_WORK(&priv->ra_watchdog, rtl8xxxu_watchdog_callback);
- INIT_WORK(&priv->c2hcmd_work, rtl8xxxu_c2hcmd_callback);
skb_queue_head_init(&priv->c2hcmd_queue);
usb_set_intfdata(interface, hw);
@@ -6827,6 +7083,11 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw->wiphy->available_antennas_tx = BIT(priv->tx_paths) - 1;
hw->wiphy->available_antennas_rx = BIT(priv->rx_paths) - 1;
+ if (priv->rtl_chip == RTL8188E)
+ INIT_WORK(&priv->c2hcmd_work, rtl8188e_c2hcmd_callback);
+ else
+ INIT_WORK(&priv->c2hcmd_work, rtl8xxxu_c2hcmd_callback);
+
ret = rtl8xxxu_read_efuse(priv);
if (ret) {
dev_err(&udev->dev, "Fatal - failed to read EFuse\n");
@@ -6839,6 +7100,9 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
goto err_set_intfdata;
}
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE)
+ rtl8xxxu_dump_efuse(priv);
+
rtl8xxxu_print_chipinfo(priv);
ret = priv->fops->load_firmware(priv);
@@ -6887,8 +7151,10 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
hw->extra_tx_headroom = priv->fops->tx_desc_size;
ieee80211_hw_set(hw, SIGNAL_DBM);
+
/*
- * The firmware handles rate control
+ * The firmware handles rate control, except for RTL8188EU,
+ * where we handle the rate control in the driver.
*/
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
@@ -6903,6 +7169,8 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
goto err_set_intfdata;
}
+ rtl8xxxu_init_led(priv);
+
return 0;
err_set_intfdata:
@@ -6927,6 +7195,8 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface)
hw = usb_get_intfdata(interface);
priv = hw->priv;
+ rtl8xxxu_deinit_led(priv);
+
ieee80211_unregister_hw(hw);
priv->fops->power_off(priv);
@@ -6973,6 +7243,50 @@ static const struct usb_device_id dev_table[] = {
/* RTL8188FU */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xf179, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8188fu_fops},
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8179, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* Tested by Hans de Goede - rtl8188etv */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x0179, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* Sitecom rtl8188eus */
+{USB_DEVICE_AND_INTERFACE_INFO(0x0df6, 0x0076, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* D-Link USB-GO-N150 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3311, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* D-Link DWA-125 REV D1 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x330f, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* D-Link DWA-123 REV D1 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x3310, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* D-Link DWA-121 rev B1 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x331b, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* Abocom - Abocom */
+{USB_DEVICE_AND_INTERFACE_INFO(0x07b8, 0x8179, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* Elecom WDC-150SU2M */
+{USB_DEVICE_AND_INTERFACE_INFO(0x056e, 0x4008, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* TP-Link TL-WN722N v2 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x010c, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* TP-Link TL-WN727N v5.21 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2357, 0x0111, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* MERCUSYS MW150US v2 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x2c4e, 0x0102, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* ASUS USB-N10 Nano B1 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x0b05, 0x18f0, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+ /* Edimax EW-7811Un V2 */
+{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0xb811, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
+/* Rosewill USB-N150 Nano */
+{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xffef, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8188eu_fops},
#ifdef CONFIG_RTL8XXXU_UNTESTED
/* Still supported by rtlwifi */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index 3e79efdfb4c2..5849fa4e1566 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -147,7 +147,13 @@
#define REG_LEDCFG0 0x004c
#define LEDCFG0_DPDT_SELECT BIT(23)
#define REG_LEDCFG1 0x004d
+#define LEDCFG1_HW_LED_CONTROL BIT(1)
+#define LEDCFG1_LED_DISABLE BIT(7)
#define REG_LEDCFG2 0x004e
+#define LEDCFG2_HW_LED_CONTROL BIT(1)
+#define LEDCFG2_HW_LED_ENABLE BIT(5)
+#define LEDCFG2_SW_LED_DISABLE BIT(3)
+#define LEDCFG2_SW_LED_CONTROL BIT(5)
#define LEDCFG2_DPDT_SELECT BIT(7)
#define REG_LEDCFG3 0x004f
#define REG_LEDCFG REG_LEDCFG2
@@ -371,6 +377,11 @@
#define PBP_PAGE_SIZE_512 0x3
#define PBP_PAGE_SIZE_1024 0x4
+/* 8188eu IOL magic */
+#define REG_PKT_BUF_ACCESS_CTRL 0x0106
+#define PKT_BUF_ACCESS_CTRL_TX 0x69
+#define PKT_BUF_ACCESS_CTRL_RX 0xa5
+
#define REG_TRXDMA_CTRL 0x010c
#define TRXDMA_CTRL_RXDMA_AGG_EN BIT(2)
#define TRXDMA_CTRL_VOQ_SHIFT 4
@@ -407,6 +418,8 @@
#define REG_MBIST_START 0x0174
#define REG_MBIST_DONE 0x0178
#define REG_MBIST_FAIL 0x017c
+/* 8188EU */
+#define REG_32K_CTRL 0x0194
#define REG_C2HEVT_MSG_NORMAL 0x01a0
/* 8192EU/8723BU/8812 */
#define REG_C2HEVT_CMD_ID_8723B 0x01ae
@@ -942,6 +955,16 @@
#define REG_FPGA1_RF_MODE 0x0900
#define REG_FPGA1_TX_INFO 0x090c
+#define FPGA1_TX_ANT_MASK 0x0000000f
+#define FPGA1_TX_ANT_L_MASK 0x000000f0
+#define FPGA1_TX_ANT_NON_HT_MASK 0x00000f00
+#define FPGA1_TX_ANT_HT1_MASK 0x0000f000
+#define FPGA1_TX_ANT_HT2_MASK 0x000f0000
+#define FPGA1_TX_ANT_HT_S1_MASK 0x00f00000
+#define FPGA1_TX_ANT_NON_HT_S1_MASK 0x0f000000
+#define FPGA1_TX_OFDM_TXSC_MASK 0x30000000
+
+#define REG_ANT_MAPPING1 0x0914
#define REG_DPDT_CTRL 0x092c /* 8723BU */
#define REG_RFE_CTRL_ANTA_SRC 0x0930 /* 8723BU */
#define REG_RFE_PATH_SELECT 0x0940 /* 8723BU */
@@ -954,9 +977,25 @@
#define REG_CCK0_AFE_SETTING 0x0a04
#define CCK0_AFE_RX_MASK 0x0f000000
-#define CCK0_AFE_RX_ANT_AB BIT(24)
+#define CCK0_AFE_TX_MASK 0xf0000000
#define CCK0_AFE_RX_ANT_A 0
-#define CCK0_AFE_RX_ANT_B (BIT(24) | BIT(26))
+#define CCK0_AFE_RX_ANT_B BIT(26)
+#define CCK0_AFE_RX_ANT_C BIT(27)
+#define CCK0_AFE_RX_ANT_D (BIT(26) | BIT(27))
+#define CCK0_AFE_RX_ANT_OPTION_A 0
+#define CCK0_AFE_RX_ANT_OPTION_B BIT(24)
+#define CCK0_AFE_RX_ANT_OPTION_C BIT(25)
+#define CCK0_AFE_RX_ANT_OPTION_D (BIT(24) | BIT(25))
+#define CCK0_AFE_TX_ANT_A BIT(31)
+#define CCK0_AFE_TX_ANT_B BIT(30)
+
+#define REG_CCK_ANTDIV_PARA2 0x0a04
+#define REG_BB_POWER_SAVE4 0x0a74
+
+/* 8188eu */
+#define REG_LNA_SWITCH 0x0b2c
+#define LNA_SWITCH_DISABLE_CSCG BIT(22)
+#define LNA_SWITCH_OUTPUT_CG BIT(31)
#define REG_CCK_PD_THRESH 0x0a0a
#define CCK_PD_TYPE1_LV0_TH 0x40
@@ -1020,6 +1059,9 @@
#define REG_OFDM0_RX_IQ_EXT_ANTA 0x0ca0
+/* 8188eu */
+#define REG_ANTDIV_PARA1 0x0ca4
+
/* 8723bu */
#define REG_OFDM0_TX_PSDO_NOISE_WEIGHT 0x0ce4
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 58c2ab3d44be..de61c9c0ddec 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -68,8 +68,10 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ struct sk_buff_head free_list;
unsigned long flags;
+ skb_queue_head_init(&free_list);
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
while (skb_queue_len(&ring->queue)) {
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
@@ -79,10 +81,12 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
true, HW_DESC_TXBUFF_ADDR),
skb->len, DMA_TO_DEVICE);
- kfree_skb(skb);
+ __skb_queue_tail(&free_list, skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ __skb_queue_purge(&free_list);
}
static void _rtl88ee_disable_bcn_sub_func(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
index 0455a3712f3e..12cdecdafc32 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.h
@@ -116,7 +116,7 @@ void rtl8723e_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw);
long rtl8723e_dm_bt_get_rx_ss(struct ieee80211_hw *hw);
void rtl8723e_dm_bt_balance(struct ieee80211_hw *hw,
bool balance_on, u8 ms0, u8 ms1);
-void rtl8723e_dm_bt_agc_table(struct ieee80211_hw *hw, u8 tyep);
+void rtl8723e_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type);
void rtl8723e_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type);
u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
u8 level_num, u8 rssi_thresh,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 189cc6437600..0ba3bbed6ed3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -30,8 +30,10 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ struct sk_buff_head free_list;
unsigned long flags;
+ skb_queue_head_init(&free_list);
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
while (skb_queue_len(&ring->queue)) {
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
@@ -41,10 +43,12 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
true, HW_DESC_TXBUFF_ADDR),
skb->len, DMA_TO_DEVICE);
- kfree_skb(skb);
+ __skb_queue_tail(&free_list, skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ __skb_queue_purge(&free_list);
}
static void _rtl8723be_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 7e0f62d59fe1..a7e3250957dc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -26,8 +26,10 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
+ struct sk_buff_head free_list;
unsigned long flags;
+ skb_queue_head_init(&free_list);
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
while (skb_queue_len(&ring->queue)) {
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
@@ -37,10 +39,12 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
true, HW_DESC_TXBUFF_ADDR),
skb->len, DMA_TO_DEVICE);
- kfree_skb(skb);
+ __skb_queue_tail(&free_list, skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
+
+ __skb_queue_purge(&free_list);
}
static void _rtl8821ae_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index a29321e2fa72..5323ead30db0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -1598,18 +1598,6 @@ static bool _rtl8812ae_get_integer_from_string(const char *str, u8 *pint)
return true;
}
-static bool _rtl8812ae_eq_n_byte(const char *str1, const char *str2, u32 num)
-{
- if (num == 0)
- return false;
- while (num > 0) {
- num--;
- if (str1[num] != str2[num])
- return false;
- }
- return true;
-}
-
static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
u8 band, u8 channel)
{
@@ -1659,42 +1647,42 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw,
power_limit = power_limit > MAX_POWER_INDEX ?
MAX_POWER_INDEX : power_limit;
- if (_rtl8812ae_eq_n_byte(pregulation, "FCC", 3))
+ if (strcmp(pregulation, "FCC") == 0)
regulation = 0;
- else if (_rtl8812ae_eq_n_byte(pregulation, "MKK", 3))
+ else if (strcmp(pregulation, "MKK") == 0)
regulation = 1;
- else if (_rtl8812ae_eq_n_byte(pregulation, "ETSI", 4))
+ else if (strcmp(pregulation, "ETSI") == 0)
regulation = 2;
- else if (_rtl8812ae_eq_n_byte(pregulation, "WW13", 4))
+ else if (strcmp(pregulation, "WW13") == 0)
regulation = 3;
- if (_rtl8812ae_eq_n_byte(prate_section, "CCK", 3))
+ if (strcmp(prate_section, "CCK") == 0)
rate_section = 0;
- else if (_rtl8812ae_eq_n_byte(prate_section, "OFDM", 4))
+ else if (strcmp(prate_section, "OFDM") == 0)
rate_section = 1;
- else if (_rtl8812ae_eq_n_byte(prate_section, "HT", 2) &&
- _rtl8812ae_eq_n_byte(prf_path, "1T", 2))
+ else if (strcmp(prate_section, "HT") == 0 &&
+ strcmp(prf_path, "1T") == 0)
rate_section = 2;
- else if (_rtl8812ae_eq_n_byte(prate_section, "HT", 2) &&
- _rtl8812ae_eq_n_byte(prf_path, "2T", 2))
+ else if (strcmp(prate_section, "HT") == 0 &&
+ strcmp(prf_path, "2T") == 0)
rate_section = 3;
- else if (_rtl8812ae_eq_n_byte(prate_section, "VHT", 3) &&
- _rtl8812ae_eq_n_byte(prf_path, "1T", 2))
+ else if (strcmp(prate_section, "VHT") == 0 &&
+ strcmp(prf_path, "1T") == 0)
rate_section = 4;
- else if (_rtl8812ae_eq_n_byte(prate_section, "VHT", 3) &&
- _rtl8812ae_eq_n_byte(prf_path, "2T", 2))
+ else if (strcmp(prate_section, "VHT") == 0 &&
+ strcmp(prf_path, "2T") == 0)
rate_section = 5;
- if (_rtl8812ae_eq_n_byte(pbandwidth, "20M", 3))
+ if (strcmp(pbandwidth, "20M") == 0)
bandwidth = 0;
- else if (_rtl8812ae_eq_n_byte(pbandwidth, "40M", 3))
+ else if (strcmp(pbandwidth, "40M") == 0)
bandwidth = 1;
- else if (_rtl8812ae_eq_n_byte(pbandwidth, "80M", 3))
+ else if (strcmp(pbandwidth, "80M") == 0)
bandwidth = 2;
- else if (_rtl8812ae_eq_n_byte(pbandwidth, "160M", 4))
+ else if (strcmp(pbandwidth, "160M") == 0)
bandwidth = 3;
- if (_rtl8812ae_eq_n_byte(pband, "2.4G", 4)) {
+ if (strcmp(pband, "2.4G") == 0) {
ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
BAND_ON_2_4G,
channel);
@@ -1718,7 +1706,7 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw,
regulation, bandwidth, rate_section, channel_index,
rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
[rate_section][channel_index][RF90_PATH_A]);
- } else if (_rtl8812ae_eq_n_byte(pband, "5G", 2)) {
+ } else if (strcmp(pband, "5G") == 0) {
ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
BAND_ON_5G,
channel);
diff --git a/drivers/net/wireless/realtek/rtw88/bf.c b/drivers/net/wireless/realtek/rtw88/bf.c
index 038a30b170ef..c827c4a2814b 100644
--- a/drivers/net/wireless/realtek/rtw88/bf.c
+++ b/drivers/net/wireless/realtek/rtw88/bf.c
@@ -49,19 +49,23 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
sta = ieee80211_find_sta(vif, bssid);
if (!sta) {
+ rcu_read_unlock();
+
rtw_warn(rtwdev, "failed to find station entry for bss %pM\n",
bssid);
- goto out_unlock;
+ return;
}
ic_vht_cap = &hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap;
vht_cap = &sta->deflink.vht_cap;
+ rcu_read_unlock();
+
if ((ic_vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
(vht_cap->cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
if (bfinfo->bfer_mu_cnt >= chip->bfer_mu_max_num) {
rtw_dbg(rtwdev, RTW_DBG_BF, "mu bfer number over limit\n");
- goto out_unlock;
+ return;
}
ether_addr_copy(bfee->mac_addr, bssid);
@@ -75,7 +79,7 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
(vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
if (bfinfo->bfer_su_cnt >= chip->bfer_su_max_num) {
rtw_dbg(rtwdev, RTW_DBG_BF, "su bfer number over limit\n");
- goto out_unlock;
+ return;
}
sound_dim = vht_cap->cap &
@@ -98,9 +102,6 @@ void rtw_bf_assoc(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
rtw_chip_config_bfee(rtwdev, rtwvif, bfee, true);
}
-
-out_unlock:
- rcu_read_unlock();
}
void rtw_bf_init_bfer_entry_mu(struct rtw_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 38697237ee5f..86467d2f8888 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -4056,7 +4056,7 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
rtwdev->stats.tx_throughput, rtwdev->stats.rx_throughput);
seq_printf(m, "%-40s = %u/ %u/ %u\n",
"IPS/ Low Power/ PS mode",
- test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags),
+ !test_bit(RTW_FLAG_POWERON, rtwdev->flags),
test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags),
rtwdev->lps_conf.mode);
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 98777f294945..dae64901bac5 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -217,10 +217,10 @@ static int rtw_pwr_seq_parser(struct rtw_dev *rtwdev,
cut_mask = cut_version_to_mask(cut);
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
- intf_mask = BIT(2);
+ intf_mask = RTW_PWR_INTF_PCI_MSK;
break;
case RTW_HCI_TYPE_USB:
- intf_mask = BIT(1);
+ intf_mask = RTW_PWR_INTF_USB_MSK;
break;
default:
return -EINVAL;
@@ -273,6 +273,11 @@ static int rtw_mac_power_switch(struct rtw_dev *rtwdev, bool pwr_on)
if (rtw_pwr_seq_parser(rtwdev, pwr_seq))
return -EINVAL;
+ if (pwr_on)
+ set_bit(RTW_FLAG_POWERON, rtwdev->flags);
+ else
+ clear_bit(RTW_FLAG_POWERON, rtwdev->flags);
+
return 0;
}
@@ -335,6 +340,11 @@ int rtw_mac_power_on(struct rtw_dev *rtwdev)
ret = rtw_mac_power_switch(rtwdev, true);
if (ret == -EALREADY) {
rtw_mac_power_switch(rtwdev, false);
+
+ ret = rtw_mac_pre_system_cfg(rtwdev);
+ if (ret)
+ goto err;
+
ret = rtw_mac_power_switch(rtwdev, true);
if (ret)
goto err;
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 776a9a9884b5..3b92ac611d3f 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -737,7 +737,7 @@ static void rtw_ra_mask_info_update(struct rtw_dev *rtwdev,
br_data.rtwdev = rtwdev;
br_data.vif = vif;
br_data.mask = mask;
- rtw_iterate_stas_atomic(rtwdev, rtw_ra_mask_info_update_iter, &br_data);
+ rtw_iterate_stas(rtwdev, rtw_ra_mask_info_update_iter, &br_data);
}
static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
@@ -746,7 +746,9 @@ static int rtw_ops_set_bitrate_mask(struct ieee80211_hw *hw,
{
struct rtw_dev *rtwdev = hw->priv;
+ mutex_lock(&rtwdev->mutex);
rtw_ra_mask_info_update(rtwdev, vif, mask);
+ mutex_unlock(&rtwdev->mutex);
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 888427cf3bdf..b2e78737bd5d 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -241,8 +241,10 @@ static void rtw_watch_dog_work(struct work_struct *work)
rtw_phy_dynamic_mechanism(rtwdev);
data.rtwdev = rtwdev;
- /* use atomic version to avoid taking local->iflist_mtx mutex */
- rtw_iterate_vifs_atomic(rtwdev, rtw_vif_watch_dog_iter, &data);
+ /* rtw_iterate_vifs internally uses an atomic iterator which is needed
+ * to avoid taking local->iflist_mtx mutex
+ */
+ rtw_iterate_vifs(rtwdev, rtw_vif_watch_dog_iter, &data);
/* fw supports only one station associated to enter lps, if there are
* more than two stations associated to the AP, then we can not enter
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 165f299e8e1f..d4a53d556745 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -356,7 +356,7 @@ enum rtw_flags {
RTW_FLAG_RUNNING,
RTW_FLAG_FW_RUNNING,
RTW_FLAG_SCANNING,
- RTW_FLAG_INACTIVE_PS,
+ RTW_FLAG_POWERON,
RTW_FLAG_LEISURE_PS,
RTW_FLAG_LEISURE_PS_DEEP,
RTW_FLAG_DIG_DISABLE,
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 0975d27240e4..b4bd831c9845 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -30,7 +30,8 @@ static u32 rtw_pci_tx_queue_idx_addr[] = {
[RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,
};
-static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
+static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb,
+ enum rtw_tx_queue_type queue)
{
switch (queue) {
case RTW_TX_QUEUE_BCN:
@@ -542,7 +543,7 @@ static int rtw_pci_setup(struct rtw_dev *rtwdev)
static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
{
struct rtw_pci_tx_ring *tx_ring;
- u8 queue;
+ enum rtw_tx_queue_type queue;
rtw_pci_reset_trx_ring(rtwdev);
for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
@@ -608,8 +609,8 @@ static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *tx_ring;
+ enum rtw_tx_queue_type queue;
bool tx_empty = true;
- u8 queue;
if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
goto enter_deep_ps;
@@ -669,37 +670,6 @@ static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
spin_unlock_bh(&rtwpci->irq_lock);
}
-static u8 ac_to_hwq[] = {
- [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
- [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
- [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
- [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
-};
-
-static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS);
-
-static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc = hdr->frame_control;
- u8 q_mapping = skb_get_queue_mapping(skb);
- u8 queue;
-
- if (unlikely(ieee80211_is_beacon(fc)))
- queue = RTW_TX_QUEUE_BCN;
- else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
- queue = RTW_TX_QUEUE_MGMT;
- else if (is_broadcast_ether_addr(hdr->addr1) ||
- is_multicast_ether_addr(hdr->addr1))
- queue = RTW_TX_QUEUE_HI0;
- else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
- queue = ac_to_hwq[IEEE80211_AC_BE];
- else
- queue = ac_to_hwq[q_mapping];
-
- return queue;
-}
-
static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
struct rtw_pci_tx_ring *ring)
{
@@ -797,13 +767,14 @@ static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
} else {
for (i = 0; i < rtwdev->hw->queues; i++)
if (queues & BIT(i))
- pci_queues |= BIT(ac_to_hwq[i]);
+ pci_queues |= BIT(rtw_tx_ac_to_hwq(i));
}
__rtw_pci_flush_queues(rtwdev, pci_queues, drop);
}
-static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
+static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev,
+ enum rtw_tx_queue_type queue)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *ring;
@@ -822,7 +793,7 @@ static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- u8 queue;
+ enum rtw_tx_queue_type queue;
for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
if (test_and_clear_bit(queue, rtwpci->tx_queued))
@@ -831,7 +802,8 @@ static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
- struct sk_buff *skb, u8 queue)
+ struct sk_buff *skb,
+ enum rtw_tx_queue_type queue)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
const struct rtw_chip_info *chip = rtwdev->chip;
@@ -949,9 +921,9 @@ static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct sk_buff *skb)
{
+ enum rtw_tx_queue_type queue = rtw_tx_queue_mapping(skb);
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
struct rtw_pci_tx_ring *ring;
- u8 queue = rtw_hw_queue_mapping(skb);
int ret;
ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
diff --git a/drivers/net/wireless/realtek/rtw88/ps.c b/drivers/net/wireless/realtek/rtw88/ps.c
index 11594940d6b0..996365575f44 100644
--- a/drivers/net/wireless/realtek/rtw88/ps.c
+++ b/drivers/net/wireless/realtek/rtw88/ps.c
@@ -25,7 +25,7 @@ static int rtw_ips_pwr_up(struct rtw_dev *rtwdev)
int rtw_enter_ips(struct rtw_dev *rtwdev)
{
- if (test_and_set_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
+ if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags))
return 0;
rtw_coex_ips_notify(rtwdev, COEX_IPS_ENTER);
@@ -50,7 +50,7 @@ int rtw_leave_ips(struct rtw_dev *rtwdev)
{
int ret;
- if (!test_and_clear_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags))
+ if (test_bit(RTW_FLAG_POWERON, rtwdev->flags))
return 0;
rtw_hci_link_ps(rtwdev, false);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index ab39245e9c2f..bb5c7492c98b 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -682,3 +682,44 @@ void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq)
list_del_init(&rtwtxq->list);
spin_unlock_bh(&rtwdev->txq_lock);
}
+
+static const enum rtw_tx_queue_type ac_to_hwq[] = {
+ [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
+ [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
+ [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
+ [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
+};
+
+static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS);
+
+enum rtw_tx_queue_type rtw_tx_ac_to_hwq(enum ieee80211_ac_numbers ac)
+{
+ if (WARN_ON(unlikely(ac >= IEEE80211_NUM_ACS)))
+ return RTW_TX_QUEUE_BE;
+
+ return ac_to_hwq[ac];
+}
+EXPORT_SYMBOL(rtw_tx_ac_to_hwq);
+
+enum rtw_tx_queue_type rtw_tx_queue_mapping(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ __le16 fc = hdr->frame_control;
+ u8 q_mapping = skb_get_queue_mapping(skb);
+ enum rtw_tx_queue_type queue;
+
+ if (unlikely(ieee80211_is_beacon(fc)))
+ queue = RTW_TX_QUEUE_BCN;
+ else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
+ queue = RTW_TX_QUEUE_MGMT;
+ else if (is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1))
+ queue = RTW_TX_QUEUE_HI0;
+ else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
+ queue = ac_to_hwq[IEEE80211_AC_BE];
+ else
+ queue = ac_to_hwq[q_mapping];
+
+ return queue;
+}
+EXPORT_SYMBOL(rtw_tx_queue_mapping);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index a2f3ac326041..197d5868c8ad 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -131,6 +131,9 @@ rtw_tx_write_data_h2c_get(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
u8 *buf, u32 size);
+enum rtw_tx_queue_type rtw_tx_ac_to_hwq(enum ieee80211_ac_numbers ac);
+enum rtw_tx_queue_type rtw_tx_queue_mapping(struct sk_buff *skb);
+
static inline
void fill_txdesc_checksum_common(u8 *txdesc, size_t words)
{
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index 4ef38279b64c..2a8336b1847a 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -271,6 +271,7 @@ static int rtw_usb_write_port(struct rtw_dev *rtwdev, u8 qsel, struct sk_buff *s
return -ENOMEM;
usb_fill_bulk_urb(urb, usbd, pipe, skb->data, skb->len, cb, context);
+ urb->transfer_flags |= URB_ZERO_PACKET;
ret = usb_submit_urb(urb, GFP_ATOMIC);
usb_free_urb(urb);
@@ -413,24 +414,11 @@ static int rtw_usb_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
u32 size)
{
const struct rtw_chip_info *chip = rtwdev->chip;
- struct rtw_usb *rtwusb;
struct rtw_tx_pkt_info pkt_info = {0};
- u32 len, desclen;
-
- rtwusb = rtw_get_usb_priv(rtwdev);
pkt_info.tx_pkt_size = size;
pkt_info.qsel = TX_DESC_QSEL_BEACON;
-
- desclen = chip->tx_pkt_desc_sz;
- len = desclen + size;
- if (len % rtwusb->bulkout_size == 0) {
- len += RTW_USB_PACKET_OFFSET_SZ;
- pkt_info.offset = desclen + RTW_USB_PACKET_OFFSET_SZ;
- pkt_info.pkt_offset = 1;
- } else {
- pkt_info.offset = desclen;
- }
+ pkt_info.offset = chip->tx_pkt_desc_sz;
return rtw_usb_write_data(rtwdev, &pkt_info, buf);
}
@@ -471,9 +459,9 @@ static int rtw_usb_tx_write(struct rtw_dev *rtwdev,
u8 *pkt_desc;
int ep;
+ pkt_info->qsel = rtw_usb_tx_queue_mapping_to_qsel(skb);
pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
memset(pkt_desc, 0, chip->tx_pkt_desc_sz);
- pkt_info->qsel = rtw_usb_tx_queue_mapping_to_qsel(skb);
ep = qsel_to_ep(rtwusb, pkt_info->qsel);
rtw_tx_fill_tx_desc(pkt_info, skb);
rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data);
diff --git a/drivers/net/wireless/realtek/rtw88/wow.c b/drivers/net/wireless/realtek/rtw88/wow.c
index 89dc595094d5..16ddee577efe 100644
--- a/drivers/net/wireless/realtek/rtw88/wow.c
+++ b/drivers/net/wireless/realtek/rtw88/wow.c
@@ -592,7 +592,7 @@ static int rtw_wow_leave_no_link_ps(struct rtw_dev *rtwdev)
if (rtw_get_lps_deep_mode(rtwdev) != LPS_DEEP_MODE_NONE)
rtw_leave_lps_deep(rtwdev);
} else {
- if (test_bit(RTW_FLAG_INACTIVE_PS, rtwdev->flags)) {
+ if (!test_bit(RTW_FLAG_POWERON, rtwdev->flags)) {
rtw_wow->ips_enabled = true;
ret = rtw_leave_ips(rtwdev);
if (ret)
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index f21c73310fdb..bcf483cafd20 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -9,7 +9,7 @@
#include "ps.h"
#include "reg.h"
-#define RTW89_COEX_VERSION 0x06030013
+#define RTW89_COEX_VERSION 0x07000013
#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/
enum btc_fbtc_tdma_template {
@@ -63,7 +63,7 @@ struct btc_fbtc_1slot {
static const struct rtw89_btc_fbtc_tdma t_def[] = {
[CXTD_OFF] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0},
[CXTD_OFF_B2] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 1, 0, 0},
- [CXTD_OFF_EXT] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 3, 0, 0},
+ [CXTD_OFF_EXT] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 2, 0, 0},
[CXTD_FIX] = { CXTDMA_FIX, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0},
[CXTD_PFIX] = { CXTDMA_FIX, CXFLC_NULLP, CXTPS_ON, 0, 5, 0, 0, 0},
[CXTD_AUTO] = { CXTDMA_AUTO, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0},
@@ -80,21 +80,21 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_OFF] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX),
[CXST_B2W] = __DEF_FBTC_SLOT(5, 0xea5a5a5a, SLOT_ISO),
[CXST_W1] = __DEF_FBTC_SLOT(70, 0xea5a5a5a, SLOT_ISO),
- [CXST_W2] = __DEF_FBTC_SLOT(70, 0xea5a5aaa, SLOT_ISO),
+ [CXST_W2] = __DEF_FBTC_SLOT(15, 0xea5a5a5a, SLOT_ISO),
[CXST_W2B] = __DEF_FBTC_SLOT(15, 0xea5a5a5a, SLOT_ISO),
- [CXST_B1] = __DEF_FBTC_SLOT(100, 0xe5555555, SLOT_MIX),
+ [CXST_B1] = __DEF_FBTC_SLOT(250, 0xe5555555, SLOT_MIX),
[CXST_B2] = __DEF_FBTC_SLOT(7, 0xea5a5a5a, SLOT_MIX),
[CXST_B3] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
[CXST_B4] = __DEF_FBTC_SLOT(50, 0xe5555555, SLOT_MIX),
[CXST_LK] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_ISO),
- [CXST_BLK] = __DEF_FBTC_SLOT(250, 0x55555555, SLOT_MIX),
- [CXST_E2G] = __DEF_FBTC_SLOT(20, 0xea5a5a5a, SLOT_MIX),
- [CXST_E5G] = __DEF_FBTC_SLOT(20, 0xffffffff, SLOT_MIX),
- [CXST_EBT] = __DEF_FBTC_SLOT(20, 0xe5555555, SLOT_MIX),
- [CXST_ENULL] = __DEF_FBTC_SLOT(7, 0xaaaaaaaa, SLOT_ISO),
+ [CXST_BLK] = __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX),
+ [CXST_E2G] = __DEF_FBTC_SLOT(0, 0xea5a5a5a, SLOT_MIX),
+ [CXST_E5G] = __DEF_FBTC_SLOT(0, 0xffffffff, SLOT_ISO),
+ [CXST_EBT] = __DEF_FBTC_SLOT(0, 0xe5555555, SLOT_MIX),
+ [CXST_ENULL] = __DEF_FBTC_SLOT(0, 0xaaaaaaaa, SLOT_ISO),
[CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
- [CXST_W1FDD] = __DEF_FBTC_SLOT(35, 0xfafafafa, SLOT_ISO),
- [CXST_B1FDD] = __DEF_FBTC_SLOT(100, 0xffffffff, SLOT_MIX),
+ [CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO),
+ [CXST_B1FDD] = __DEF_FBTC_SLOT(50, 0xffffdfff, SLOT_ISO),
};
static const u32 cxtbl[] = {
@@ -117,9 +117,78 @@ static const u32 cxtbl[] = {
0xfafafafa, /* 16 */
0xffffddff, /* 17 */
0xdaffdaff, /* 18 */
- 0xfafadafa /* 19 */
+ 0xfafadafa, /* 19 */
+ 0xea6a6a6a, /* 20 */
+ 0xea55556a, /* 21 */
+ 0xaafafafa, /* 22 */
+ 0xfafaaafa, /* 23 */
+ 0xfafffaff /* 24 */
};
+static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
+ /* firmware version must be in decreasing order for each chip */
+ {RTL8852C, RTW89_FW_VER_CODE(0, 27, 57, 0),
+ .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
+ .info_buf = 1280, .max_role_num = 5,
+ },
+ {RTL8852C, RTW89_FW_VER_CODE(0, 27, 42, 0),
+ .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 2, .fcxctrl = 1,
+ .info_buf = 1280, .max_role_num = 5,
+ },
+ {RTL8852C, RTW89_FW_VER_CODE(0, 27, 0, 0),
+ .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 2, .fcxctrl = 1,
+ .info_buf = 1280, .max_role_num = 5,
+ },
+ {RTL8852B, RTW89_FW_VER_CODE(0, 29, 14, 0),
+ .fcxbtcrpt = 5, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 4,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
+ .info_buf = 1800, .max_role_num = 6,
+ },
+ {RTL8852B, RTW89_FW_VER_CODE(0, 27, 0, 0),
+ .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 1, .fcxctrl = 1,
+ .info_buf = 1280, .max_role_num = 5,
+ },
+ {RTL8852A, RTW89_FW_VER_CODE(0, 13, 37, 0),
+ .fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
+ .fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
+ .fwlrole = 1, .frptmap = 3, .fcxctrl = 1,
+ .info_buf = 1280, .max_role_num = 5,
+ },
+ {RTL8852A, RTW89_FW_VER_CODE(0, 13, 0, 0),
+ .fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
+ .fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
+ .fwlrole = 0, .frptmap = 0, .fcxctrl = 0,
+ .info_buf = 1024, .max_role_num = 5,
+ },
+
+ /* keep it to be the last as default entry */
+ {0, RTW89_FW_VER_CODE(0, 0, 0, 0),
+ .fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
+ .fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
+ .fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
+ .fwlrole = 0, .frptmap = 0, .fcxctrl = 0,
+ .info_buf = 1024, .max_role_num = 5,
+ },
+};
+
+#define RTW89_DEFAULT_BTC_VER_IDX (ARRAY_SIZE(rtw89_btc_ver_defs) - 1)
+
struct rtw89_btc_btf_tlv {
u8 type;
u8 len;
@@ -127,16 +196,20 @@ struct rtw89_btc_btf_tlv {
} __packed;
enum btc_btf_set_report_en {
- RPT_EN_TDMA = BIT(0),
- RPT_EN_CYCLE = BIT(1),
- RPT_EN_MREG = BIT(2),
- RPT_EN_BT_VER_INFO = BIT(3),
- RPT_EN_BT_SCAN_INFO = BIT(4),
- RPT_EN_BT_AFH_MAP = BIT(5),
- RPT_EN_BT_DEVICE_INFO = BIT(6),
- RPT_EN_WL_ALL = GENMASK(2, 0),
- RPT_EN_BT_ALL = GENMASK(6, 3),
- RPT_EN_ALL = GENMASK(6, 0),
+ RPT_EN_TDMA,
+ RPT_EN_CYCLE,
+ RPT_EN_MREG,
+ RPT_EN_BT_VER_INFO,
+ RPT_EN_BT_SCAN_INFO,
+ RPT_EN_BT_DEVICE_INFO,
+ RPT_EN_BT_AFH_MAP,
+ RPT_EN_BT_AFH_MAP_LE,
+ RPT_EN_FW_STEP_INFO,
+ RPT_EN_TEST,
+ RPT_EN_WL_ALL,
+ RPT_EN_BT_ALL,
+ RPT_EN_ALL,
+ RPT_EN_MONITER,
};
#define BTF_SET_REPORT_VER 1
@@ -786,17 +859,18 @@ static void _chk_btc_err(struct rtw89_dev *rtwdev, u8 type, u32 cnt)
static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_a2dp_desc *a2dp = &bt_linfo->a2dp_desc;
struct rtw89_btc_fbtc_btver *pver = NULL;
struct rtw89_btc_fbtc_btscan *pscan = NULL;
- struct rtw89_btc_fbtc_btafh *pafh = NULL;
+ struct rtw89_btc_fbtc_btafh *pafh_v1 = NULL;
+ struct rtw89_btc_fbtc_btafh_v2 *pafh_v2 = NULL;
struct rtw89_btc_fbtc_btdevinfo *pdev = NULL;
pver = (struct rtw89_btc_fbtc_btver *)pfinfo;
pscan = (struct rtw89_btc_fbtc_btscan *)pfinfo;
- pafh = (struct rtw89_btc_fbtc_btafh *)pfinfo;
pdev = (struct rtw89_btc_fbtc_btdevinfo *)pfinfo;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -813,9 +887,23 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
memcpy(bt->scan_info, pscan->scan, BTC_SCAN_MAX1);
break;
case BTC_RPT_TYPE_BT_AFH:
- memcpy(&bt_linfo->afh_map[0], pafh->afh_l, 4);
- memcpy(&bt_linfo->afh_map[4], pafh->afh_m, 4);
- memcpy(&bt_linfo->afh_map[8], pafh->afh_h, 2);
+ if (ver->fcxbtafh == 2) {
+ pafh_v2 = (struct rtw89_btc_fbtc_btafh_v2 *)pfinfo;
+ if (pafh_v2->map_type & RPT_BT_AFH_SEQ_LEGACY) {
+ memcpy(&bt_linfo->afh_map[0], pafh_v2->afh_l, 4);
+ memcpy(&bt_linfo->afh_map[4], pafh_v2->afh_m, 4);
+ memcpy(&bt_linfo->afh_map[8], pafh_v2->afh_h, 2);
+ }
+ if (pafh_v2->map_type & RPT_BT_AFH_SEQ_LE) {
+ memcpy(&bt_linfo->afh_map_le[0], pafh_v2->afh_le_a, 4);
+ memcpy(&bt_linfo->afh_map_le[4], pafh_v2->afh_le_b, 1);
+ }
+ } else if (ver->fcxbtafh == 1) {
+ pafh_v1 = (struct rtw89_btc_fbtc_btafh *)pfinfo;
+ memcpy(&bt_linfo->afh_map[0], pafh_v1->afh_l, 4);
+ memcpy(&bt_linfo->afh_map[4], pafh_v1->afh_m, 4);
+ memcpy(&bt_linfo->afh_map[8], pafh_v1->afh_h, 2);
+ }
break;
case BTC_RPT_TYPE_BT_DEVICE:
a2dp->device_name = le32_to_cpu(pdev->dev_name);
@@ -827,76 +915,6 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
}
}
-struct rtw89_btc_fbtc_cysta_cpu {
- u8 fver;
- u8 rsvd;
- u16 cycles;
- u16 cycles_a2dp[CXT_FLCTRL_MAX];
- u16 a2dpept;
- u16 a2dpeptto;
- u16 tavg_cycle[CXT_MAX];
- u16 tmax_cycle[CXT_MAX];
- u16 tmaxdiff_cycle[CXT_MAX];
- u16 tavg_a2dp[CXT_FLCTRL_MAX];
- u16 tmax_a2dp[CXT_FLCTRL_MAX];
- u16 tavg_a2dpept;
- u16 tmax_a2dpept;
- u16 tavg_lk;
- u16 tmax_lk;
- u32 slot_cnt[CXST_MAX];
- u32 bcn_cnt[CXBCN_MAX];
- u32 leakrx_cnt;
- u32 collision_cnt;
- u32 skip_cnt;
- u32 exception;
- u32 except_cnt;
- u16 tslot_cycle[BTC_CYCLE_SLOT_MAX];
-};
-
-static void rtw89_btc_fbtc_cysta_to_cpu(const struct rtw89_btc_fbtc_cysta *src,
- struct rtw89_btc_fbtc_cysta_cpu *dst)
-{
- static_assert(sizeof(*src) == sizeof(*dst));
-
-#define __CPY_U8(_x) ({dst->_x = src->_x; })
-#define __CPY_LE16(_x) ({dst->_x = le16_to_cpu(src->_x); })
-#define __CPY_LE16S(_x) ({int _i; for (_i = 0; _i < ARRAY_SIZE(dst->_x); _i++) \
- dst->_x[_i] = le16_to_cpu(src->_x[_i]); })
-#define __CPY_LE32(_x) ({dst->_x = le32_to_cpu(src->_x); })
-#define __CPY_LE32S(_x) ({int _i; for (_i = 0; _i < ARRAY_SIZE(dst->_x); _i++) \
- dst->_x[_i] = le32_to_cpu(src->_x[_i]); })
-
- __CPY_U8(fver);
- __CPY_U8(rsvd);
- __CPY_LE16(cycles);
- __CPY_LE16S(cycles_a2dp);
- __CPY_LE16(a2dpept);
- __CPY_LE16(a2dpeptto);
- __CPY_LE16S(tavg_cycle);
- __CPY_LE16S(tmax_cycle);
- __CPY_LE16S(tmaxdiff_cycle);
- __CPY_LE16S(tavg_a2dp);
- __CPY_LE16S(tmax_a2dp);
- __CPY_LE16(tavg_a2dpept);
- __CPY_LE16(tmax_a2dpept);
- __CPY_LE16(tavg_lk);
- __CPY_LE16(tmax_lk);
- __CPY_LE32S(slot_cnt);
- __CPY_LE32S(bcn_cnt);
- __CPY_LE32(leakrx_cnt);
- __CPY_LE32(collision_cnt);
- __CPY_LE32(skip_cnt);
- __CPY_LE32(exception);
- __CPY_LE32(except_cnt);
- __CPY_LE16S(tslot_cycle);
-
-#undef __CPY_U8
-#undef __CPY_LE16
-#undef __CPY_LE16S
-#undef __CPY_LE32
-#undef __CPY_LE32S
-}
-
#define BTC_LEAK_AP_TH 10
#define BTC_CYSTA_CHK_PERIOD 100
@@ -910,19 +928,15 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *pfwinfo,
u8 *prptbuf, u32 index)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
- struct rtw89_btc_fbtc_rpt_ctrl *prpt;
- struct rtw89_btc_fbtc_rpt_ctrl_v1 *prpt_v1;
- struct rtw89_btc_fbtc_cysta *pcysta_le32 = NULL;
- struct rtw89_btc_fbtc_cysta_v1 *pcysta_v1 = NULL;
- struct rtw89_btc_fbtc_cysta_cpu pcysta[1];
+ union rtw89_btc_fbtc_rpt_ctrl_ver_info *prpt = NULL;
+ union rtw89_btc_fbtc_cysta_info *pcysta = NULL;
struct rtw89_btc_prpt *btc_prpt = NULL;
- struct rtw89_btc_fbtc_slot *rtp_slot = NULL;
void *rpt_content = NULL, *pfinfo = NULL;
u8 rpt_type = 0;
u16 wl_slot_set = 0, wl_slot_real = 0;
@@ -951,137 +965,141 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
switch (rpt_type) {
case BTC_RPT_TYPE_CTRL:
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
- if (chip->chip_id == RTL8852A) {
- pfinfo = &pfwinfo->rpt_ctrl.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo);
+ prpt = &pfwinfo->rpt_ctrl.finfo;
+ if (ver->fcxbtcrpt == 1) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v1);
+ } else if (ver->fcxbtcrpt == 4) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v4;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v4);
+ } else if (ver->fcxbtcrpt == 5) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v5;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v5);
} else {
- pfinfo = &pfwinfo->rpt_ctrl.finfo_v1;
- pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo_v1);
+ goto err;
}
- pcinfo->req_fver = chip->fcxbtcrpt_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxbtcrpt;
break;
case BTC_RPT_TYPE_TDMA:
pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo;
- if (chip->chip_id == RTL8852A) {
- pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo);
+ if (ver->fcxtdma == 1) {
+ pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v1);
+ } else if (ver->fcxtdma == 3) {
+ pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v3;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v3);
} else {
- pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo_v1;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo_v1);
+ goto err;
}
- pcinfo->req_fver = chip->fcxtdma_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxtdma;
break;
case BTC_RPT_TYPE_SLOT:
pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_slots.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo);
- pcinfo->req_fver = chip->fcxslots_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxslots;
break;
case BTC_RPT_TYPE_CYSTA:
pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
- if (chip->chip_id == RTL8852A) {
- pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo;
- pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
- rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo);
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+ if (ver->fcxcysta == 2) {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v2;
+ pcysta->v2 = pfwinfo->rpt_fbtc_cysta.finfo.v2;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v2);
+ } else if (ver->fcxcysta == 3) {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v3;
+ pcysta->v3 = pfwinfo->rpt_fbtc_cysta.finfo.v3;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v3);
+ } else if (ver->fcxcysta == 4) {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v4;
+ pcysta->v4 = pfwinfo->rpt_fbtc_cysta.finfo.v4;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v4);
} else {
- pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
- pcysta_v1 = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo_v1);
+ goto err;
}
- pcinfo->req_fver = chip->fcxcysta_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxcysta;
break;
case BTC_RPT_TYPE_STEP:
pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
- if (chip->chip_id == RTL8852A) {
- pfinfo = &pfwinfo->rpt_fbtc_step.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.step[0]) *
+ if (ver->fcxstep == 2) {
+ pfinfo = &pfwinfo->rpt_fbtc_step.finfo.v2;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.v2.step[0]) *
trace_step +
- offsetof(struct rtw89_btc_fbtc_steps, step);
- } else {
- pfinfo = &pfwinfo->rpt_fbtc_step.finfo_v1;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo_v1.step[0]) *
+ offsetof(struct rtw89_btc_fbtc_steps_v2, step);
+ } else if (ver->fcxstep == 3) {
+ pfinfo = &pfwinfo->rpt_fbtc_step.finfo.v3;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.v3.step[0]) *
trace_step +
- offsetof(struct rtw89_btc_fbtc_steps_v1, step);
+ offsetof(struct rtw89_btc_fbtc_steps_v3, step);
+ } else {
+ goto err;
}
- pcinfo->req_fver = chip->fcxstep_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxstep;
break;
case BTC_RPT_TYPE_NULLSTA:
pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo;
- if (chip->chip_id == RTL8852A) {
+ if (ver->fcxnullsta == 1) {
pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v1);
+ } else if (ver->fcxnullsta == 2) {
+ pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo.v2;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v2);
} else {
- pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo_v1;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo_v1);
+ goto err;
}
- pcinfo->req_fver = chip->fcxnullsta_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxnullsta;
break;
case BTC_RPT_TYPE_MREG:
pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo);
- pcinfo->req_fver = chip->fcxmreg_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxmreg;
break;
case BTC_RPT_TYPE_GPIO_DBG:
pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo);
- pcinfo->req_fver = chip->fcxgpiodbg_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxgpiodbg;
break;
case BTC_RPT_TYPE_BT_VER:
pcinfo = &pfwinfo->rpt_fbtc_btver.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_btver.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo);
- pcinfo->req_fver = chip->fcxbtver_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxbtver;
break;
case BTC_RPT_TYPE_BT_SCAN:
pcinfo = &pfwinfo->rpt_fbtc_btscan.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo);
- pcinfo->req_fver = chip->fcxbtscan_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxbtscan;
break;
case BTC_RPT_TYPE_BT_AFH:
pcinfo = &pfwinfo->rpt_fbtc_btafh.cinfo;
- pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo);
- pcinfo->req_fver = chip->fcxbtafh_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ if (ver->fcxbtafh == 1) {
+ pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v1);
+ } else if (ver->fcxbtafh == 2) {
+ pfinfo = &pfwinfo->rpt_fbtc_btafh.finfo.v2;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo.v2);
+ } else {
+ goto err;
+ }
+ pcinfo->req_fver = ver->fcxbtafh;
break;
case BTC_RPT_TYPE_BT_DEVICE:
pcinfo = &pfwinfo->rpt_fbtc_btdev.cinfo;
pfinfo = &pfwinfo->rpt_fbtc_btdev.finfo;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btdev.finfo);
- pcinfo->req_fver = chip->fcxbtdevinfo_ver;
- pcinfo->rx_len = rpt_len;
- pcinfo->rx_cnt++;
+ pcinfo->req_fver = ver->fcxbtdevinfo;
break;
default:
pfwinfo->err[BTFRE_UNDEF_TYPE]++;
return 0;
}
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+
if (rpt_len != pcinfo->req_len) {
if (rpt_type < BTC_RPT_TYPE_MAX)
pfwinfo->len_mismch |= (0x1 << rpt_type);
@@ -1102,235 +1120,257 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
memcpy(pfinfo, rpt_content, pcinfo->req_len);
pcinfo->valid = 1;
- if (rpt_type == BTC_RPT_TYPE_TDMA && chip->chip_id == RTL8852A) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): check %d %zu\n", __func__,
- BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now));
-
- if (memcmp(&dm->tdma_now, &pfwinfo->rpt_fbtc_tdma.finfo,
- sizeof(dm->tdma_now)) != 0) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d tdma_now %x %x %x %x %x %x %x %x\n",
- __func__, BTC_DCNT_TDMA_NONSYNC,
- dm->tdma_now.type, dm->tdma_now.rxflctrl,
- dm->tdma_now.txpause, dm->tdma_now.wtgle_n,
- dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl,
- dm->tdma_now.rxflctrl_role,
- dm->tdma_now.option_ctrl);
-
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n",
- __func__, BTC_DCNT_TDMA_NONSYNC,
- pfwinfo->rpt_fbtc_tdma.finfo.type,
- pfwinfo->rpt_fbtc_tdma.finfo.rxflctrl,
- pfwinfo->rpt_fbtc_tdma.finfo.txpause,
- pfwinfo->rpt_fbtc_tdma.finfo.wtgle_n,
- pfwinfo->rpt_fbtc_tdma.finfo.leak_n,
- pfwinfo->rpt_fbtc_tdma.finfo.ext_ctrl,
- pfwinfo->rpt_fbtc_tdma.finfo.rxflctrl_role,
- pfwinfo->rpt_fbtc_tdma.finfo.option_ctrl);
- }
+ switch (rpt_type) {
+ case BTC_RPT_TYPE_CTRL:
+ if (ver->fcxbtcrpt == 1) {
+ prpt->v1 = pfwinfo->rpt_ctrl.finfo.v1;
+ btc->fwinfo.rpt_en_map = prpt->v1.rpt_enable;
+ wl->ver_info.fw_coex = prpt->v1.wl_fw_coex_ver;
+ wl->ver_info.fw = prpt->v1.wl_fw_ver;
+ dm->wl_fw_cx_offload = !!prpt->v1.wl_fw_cx_offload;
+
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
+ pfwinfo->event[BTF_EVNT_RPT]);
+
+ /* To avoid I/O if WL LPS or power-off */
+ if (wl->status.map.lps != BTC_LPS_RF_OFF &&
+ !wl->status.map.rf_off) {
+ rtwdev->chip->ops->btc_update_bt_cnt(rtwdev);
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+
+ btc->cx.cnt_bt[BTC_BCNT_POLUT] =
+ rtw89_mac_get_plt_cnt(rtwdev,
+ RTW89_MAC_0);
+ }
+ } else if (ver->fcxbtcrpt == 4) {
+ prpt->v4 = pfwinfo->rpt_ctrl.finfo.v4;
+ btc->fwinfo.rpt_en_map = le32_to_cpu(prpt->v4.rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt->v4.wl_fw_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt->v4.wl_fw_info.fw_ver);
+ dm->wl_fw_cx_offload = !!le32_to_cpu(prpt->v4.wl_fw_info.cx_offload);
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt->v4.gnt_val[i],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] =
+ le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_HI_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] =
+ le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_HI_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] =
+ le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_LO_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] =
+ le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_LO_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_POLUT] =
+ le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_POLLUTED]);
- _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
- memcmp(&dm->tdma_now,
- &pfwinfo->rpt_fbtc_tdma.finfo,
- sizeof(dm->tdma_now)));
- } else if (rpt_type == BTC_RPT_TYPE_TDMA) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): check %d %zu\n", __func__,
- BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now));
-
- if (memcmp(&dm->tdma_now, &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma,
- sizeof(dm->tdma_now)) != 0) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d tdma_now %x %x %x %x %x %x %x %x\n",
- __func__, BTC_DCNT_TDMA_NONSYNC,
- dm->tdma_now.type, dm->tdma_now.rxflctrl,
- dm->tdma_now.txpause, dm->tdma_now.wtgle_n,
- dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl,
- dm->tdma_now.rxflctrl_role,
- dm->tdma_now.option_ctrl);
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n",
- __func__, BTC_DCNT_TDMA_NONSYNC,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.type,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.rxflctrl,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.txpause,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.wtgle_n,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.leak_n,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.ext_ctrl,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.rxflctrl_role,
- pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma.option_ctrl);
- }
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
+ pfwinfo->event[BTF_EVNT_RPT]);
- _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
- memcmp(&dm->tdma_now,
- &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma,
- sizeof(dm->tdma_now)));
- }
+ if (le32_to_cpu(prpt->v4.bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
+ bt->rfk_info.map.timeout = 1;
+ else
+ bt->rfk_info.map.timeout = 0;
+
+ dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
+ } else if (ver->fcxbtcrpt == 5) {
+ prpt->v5 = pfwinfo->rpt_ctrl.finfo.v5;
+ pfwinfo->rpt_en_map = le32_to_cpu(prpt->v5.rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt->v5.rpt_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt->v5.rpt_info.fw_ver);
+ dm->wl_fw_cx_offload = 0;
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt->v5.gnt_val[i][0],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] =
+ le16_to_cpu(prpt->v5.bt_cnt[BTC_BCNT_HI_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] =
+ le16_to_cpu(prpt->v5.bt_cnt[BTC_BCNT_HI_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] =
+ le16_to_cpu(prpt->v5.bt_cnt[BTC_BCNT_LO_TX]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] =
+ le16_to_cpu(prpt->v5.bt_cnt[BTC_BCNT_LO_RX]);
+ btc->cx.cnt_bt[BTC_BCNT_POLUT] =
+ le16_to_cpu(prpt->v5.bt_cnt[BTC_BCNT_POLLUTED]);
+
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
+ pfwinfo->event[BTF_EVNT_RPT]);
- if (rpt_type == BTC_RPT_TYPE_SLOT) {
+ dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
+ } else {
+ goto err;
+ }
+ break;
+ case BTC_RPT_TYPE_TDMA:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): check %d %zu\n", __func__,
+ BTC_DCNT_TDMA_NONSYNC,
+ sizeof(dm->tdma_now));
+ if (ver->fcxtdma == 1)
+ _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
+ memcmp(&dm->tdma_now,
+ &pfwinfo->rpt_fbtc_tdma.finfo.v1,
+ sizeof(dm->tdma_now)));
+ else if (ver->fcxtdma == 3)
+ _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
+ memcmp(&dm->tdma_now,
+ &pfwinfo->rpt_fbtc_tdma.finfo.v3.tdma,
+ sizeof(dm->tdma_now)));
+ else
+ goto err;
+ break;
+ case BTC_RPT_TYPE_SLOT:
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): check %d %zu\n",
__func__, BTC_DCNT_SLOT_NONSYNC,
sizeof(dm->slot_now));
-
- if (memcmp(dm->slot_now, pfwinfo->rpt_fbtc_slots.finfo.slot,
- sizeof(dm->slot_now)) != 0) {
- for (i = 0; i < CXST_MAX; i++) {
- rtp_slot =
- &pfwinfo->rpt_fbtc_slots.finfo.slot[i];
- if (memcmp(&dm->slot_now[i], rtp_slot,
- sizeof(dm->slot_now[i])) != 0) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d slot_now[%d] dur=0x%04x tbl=%08x type=0x%04x\n",
- __func__,
- BTC_DCNT_SLOT_NONSYNC, i,
- dm->slot_now[i].dur,
- dm->slot_now[i].cxtbl,
- dm->slot_now[i].cxtype);
-
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): %d rpt_fbtc_slots[%d] dur=0x%04x tbl=%08x type=0x%04x\n",
- __func__,
- BTC_DCNT_SLOT_NONSYNC, i,
- rtp_slot->dur,
- rtp_slot->cxtbl,
- rtp_slot->cxtype);
- }
- }
- }
_chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC,
memcmp(dm->slot_now,
pfwinfo->rpt_fbtc_slots.finfo.slot,
sizeof(dm->slot_now)));
- }
+ break;
+ case BTC_RPT_TYPE_CYSTA:
+ if (ver->fcxcysta == 2) {
+ if (le16_to_cpu(pcysta->v2.cycles) < BTC_CYSTA_CHK_PERIOD)
+ break;
+ /* Check Leak-AP */
+ if (le32_to_cpu(pcysta->v2.slot_cnt[CXST_LK]) != 0 &&
+ le32_to_cpu(pcysta->v2.leakrx_cnt) != 0 && dm->tdma_now.rxflctrl) {
+ if (le32_to_cpu(pcysta->v2.slot_cnt[CXST_LK]) <
+ BTC_LEAK_AP_TH * le32_to_cpu(pcysta->v2.leakrx_cnt))
+ dm->leak_ap = 1;
+ }
- if (rpt_type == BTC_RPT_TYPE_CYSTA && chip->chip_id == RTL8852A &&
- pcysta->cycles >= BTC_CYSTA_CHK_PERIOD) {
- /* Check Leak-AP */
- if (pcysta->slot_cnt[CXST_LK] != 0 &&
- pcysta->leakrx_cnt != 0 && dm->tdma_now.rxflctrl) {
- if (pcysta->slot_cnt[CXST_LK] <
- BTC_LEAK_AP_TH * pcysta->leakrx_cnt)
- dm->leak_ap = 1;
- }
+ /* Check diff time between WL slot and W1/E2G slot */
+ if (dm->tdma_now.type == CXTDMA_OFF &&
+ dm->tdma_now.ext_ctrl == CXECTL_EXT)
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_E2G].dur);
+ else
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
- /* Check diff time between WL slot and W1/E2G slot */
- if (dm->tdma_now.type == CXTDMA_OFF &&
- dm->tdma_now.ext_ctrl == CXECTL_EXT)
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_E2G].dur);
- else
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ if (le16_to_cpu(pcysta->v2.tavg_cycle[CXT_WL]) > wl_slot_set) {
+ diff_t = le16_to_cpu(pcysta->v2.tavg_cycle[CXT_WL]) - wl_slot_set;
+ _chk_btc_err(rtwdev,
+ BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ }
- if (pcysta->tavg_cycle[CXT_WL] > wl_slot_set) {
- diff_t = pcysta->tavg_cycle[CXT_WL] - wl_slot_set;
- _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
- }
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
+ le32_to_cpu(pcysta->v2.slot_cnt[CXST_W1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
+ le32_to_cpu(pcysta->v2.slot_cnt[CXST_B1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE,
+ le16_to_cpu(pcysta->v2.cycles));
+ } else if (ver->fcxcysta == 3) {
+ if (le16_to_cpu(pcysta->v3.cycles) < BTC_CYSTA_CHK_PERIOD)
+ break;
- _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
- _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_B1]);
- _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE, (u32)pcysta->cycles);
- } else if (rpt_type == BTC_RPT_TYPE_CYSTA && pcysta_v1 &&
- le16_to_cpu(pcysta_v1->cycles) >= BTC_CYSTA_CHK_PERIOD) {
- cnt_leak_slot = le32_to_cpu(pcysta_v1->slot_cnt[CXST_LK]);
- cnt_rx_imr = le32_to_cpu(pcysta_v1->leak_slot.cnt_rximr);
- /* Check Leak-AP */
- if (cnt_leak_slot != 0 && cnt_rx_imr != 0 &&
- dm->tdma_now.rxflctrl) {
- if (cnt_leak_slot < BTC_LEAK_AP_TH * cnt_rx_imr)
- dm->leak_ap = 1;
- }
+ cnt_leak_slot = le32_to_cpu(pcysta->v3.slot_cnt[CXST_LK]);
+ cnt_rx_imr = le32_to_cpu(pcysta->v3.leak_slot.cnt_rximr);
- /* Check diff time between real WL slot and W1 slot */
- if (dm->tdma_now.type == CXTDMA_OFF) {
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
- wl_slot_real = le16_to_cpu(pcysta_v1->cycle_time.tavg[CXT_WL]);
- if (wl_slot_real > wl_slot_set) {
- diff_t = wl_slot_real - wl_slot_set;
- _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ /* Check Leak-AP */
+ if (cnt_leak_slot != 0 && cnt_rx_imr != 0 &&
+ dm->tdma_now.rxflctrl) {
+ if (cnt_leak_slot < BTC_LEAK_AP_TH * cnt_rx_imr)
+ dm->leak_ap = 1;
}
- }
- /* Check diff time between real BT slot and EBT/E5G slot */
- if (dm->tdma_now.type == CXTDMA_OFF &&
- dm->tdma_now.ext_ctrl == CXECTL_EXT &&
- btc->bt_req_len != 0) {
- bt_slot_real = le16_to_cpu(pcysta_v1->cycle_time.tavg[CXT_BT]);
+ /* Check diff time between real WL slot and W1 slot */
+ if (dm->tdma_now.type == CXTDMA_OFF) {
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ wl_slot_real = le16_to_cpu(pcysta->v3.cycle_time.tavg[CXT_WL]);
+ if (wl_slot_real > wl_slot_set) {
+ diff_t = wl_slot_real - wl_slot_set;
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ }
+ }
- if (btc->bt_req_len > bt_slot_real) {
- diff_t = btc->bt_req_len - bt_slot_real;
- _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, diff_t);
+ /* Check diff time between real BT slot and EBT/E5G slot */
+ if (dm->tdma_now.type == CXTDMA_OFF &&
+ dm->tdma_now.ext_ctrl == CXECTL_EXT &&
+ btc->bt_req_len != 0) {
+ bt_slot_real = le16_to_cpu(pcysta->v3.cycle_time.tavg[CXT_BT]);
+ if (btc->bt_req_len > bt_slot_real) {
+ diff_t = btc->bt_req_len - bt_slot_real;
+ _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, diff_t);
+ }
}
- }
- _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
- le32_to_cpu(pcysta_v1->slot_cnt[CXST_W1]));
- _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE,
- le32_to_cpu(pcysta_v1->slot_cnt[CXST_B1]));
- _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE,
- (u32)le16_to_cpu(pcysta_v1->cycles));
- }
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
+ le32_to_cpu(pcysta->v3.slot_cnt[CXST_W1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE,
+ le32_to_cpu(pcysta->v3.slot_cnt[CXST_B1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE,
+ le16_to_cpu(pcysta->v3.cycles));
+ } else if (ver->fcxcysta == 4) {
+ if (le16_to_cpu(pcysta->v4.cycles) < BTC_CYSTA_CHK_PERIOD)
+ break;
- if (rpt_type == BTC_RPT_TYPE_CTRL && chip->chip_id == RTL8852A) {
- prpt = &pfwinfo->rpt_ctrl.finfo;
- btc->fwinfo.rpt_en_map = prpt->rpt_enable;
- wl->ver_info.fw_coex = prpt->wl_fw_coex_ver;
- wl->ver_info.fw = prpt->wl_fw_ver;
- dm->wl_fw_cx_offload = !!prpt->wl_fw_cx_offload;
+ cnt_leak_slot = le16_to_cpu(pcysta->v4.slot_cnt[CXST_LK]);
+ cnt_rx_imr = le32_to_cpu(pcysta->v4.leak_slot.cnt_rximr);
- _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
- pfwinfo->event[BTF_EVNT_RPT]);
+ /* Check Leak-AP */
+ if (cnt_leak_slot != 0 && cnt_rx_imr != 0 &&
+ dm->tdma_now.rxflctrl) {
+ if (cnt_leak_slot < BTC_LEAK_AP_TH * cnt_rx_imr)
+ dm->leak_ap = 1;
+ }
- /* To avoid I/O if WL LPS or power-off */
- if (wl->status.map.lps != BTC_LPS_RF_OFF && !wl->status.map.rf_off) {
- rtwdev->chip->ops->btc_update_bt_cnt(rtwdev);
- _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+ /* Check diff time between real WL slot and W1 slot */
+ if (dm->tdma_now.type == CXTDMA_OFF) {
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ wl_slot_real = le16_to_cpu(pcysta->v4.cycle_time.tavg[CXT_WL]);
+ if (wl_slot_real > wl_slot_set) {
+ diff_t = wl_slot_real - wl_slot_set;
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ }
+ }
- btc->cx.cnt_bt[BTC_BCNT_POLUT] =
- rtw89_mac_get_plt_cnt(rtwdev, RTW89_MAC_0);
- }
- } else if (rpt_type == BTC_RPT_TYPE_CTRL) {
- prpt_v1 = &pfwinfo->rpt_ctrl.finfo_v1;
- btc->fwinfo.rpt_en_map = le32_to_cpu(prpt_v1->rpt_info.en);
- wl->ver_info.fw_coex = le32_to_cpu(prpt_v1->wl_fw_info.cx_ver);
- wl->ver_info.fw = le32_to_cpu(prpt_v1->wl_fw_info.fw_ver);
- dm->wl_fw_cx_offload = !!le32_to_cpu(prpt_v1->wl_fw_info.cx_offload);
-
- for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
- memcpy(&dm->gnt.band[i], &prpt_v1->gnt_val[i],
- sizeof(dm->gnt.band[i]));
-
- btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_HI_TX]);
- btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_HI_RX]);
- btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_LO_TX]);
- btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_LO_RX]);
- btc->cx.cnt_bt[BTC_BCNT_POLUT] = le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_POLLUTED]);
-
- _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
- _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
- pfwinfo->event[BTF_EVNT_RPT]);
-
- if (le32_to_cpu(prpt_v1->bt_cnt[BTC_BCNT_RFK_TIMEOUT]) > 0)
- bt->rfk_info.map.timeout = 1;
- else
- bt->rfk_info.map.timeout = 0;
+ /* Check diff time between real BT slot and EBT/E5G slot */
+ if (dm->tdma_now.type == CXTDMA_OFF &&
+ dm->tdma_now.ext_ctrl == CXECTL_EXT &&
+ btc->bt_req_len != 0) {
+ bt_slot_real = le16_to_cpu(pcysta->v4.cycle_time.tavg[CXT_BT]);
- dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
- }
+ if (btc->bt_req_len > bt_slot_real) {
+ diff_t = btc->bt_req_len - bt_slot_real;
+ _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, diff_t);
+ }
+ }
- if (rpt_type >= BTC_RPT_TYPE_BT_VER &&
- rpt_type <= BTC_RPT_TYPE_BT_DEVICE)
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE,
+ le16_to_cpu(pcysta->v4.slot_cnt[CXST_W1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE,
+ le16_to_cpu(pcysta->v4.slot_cnt[CXST_B1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE,
+ le16_to_cpu(pcysta->v4.cycles));
+ } else {
+ goto err;
+ }
+ break;
+ case BTC_RPT_TYPE_BT_VER:
+ case BTC_RPT_TYPE_BT_SCAN:
+ case BTC_RPT_TYPE_BT_AFH:
+ case BTC_RPT_TYPE_BT_DEVICE:
_update_bt_report(rtwdev, rpt_type, pfinfo);
-
+ break;
+ }
return (rpt_len + BTC_RPT_HDR_SIZE);
+
+err:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): Undefined version for type=%d\n", __func__, rpt_type);
+ return 0;
}
static void _parse_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_btf_fwinfo *pfwinfo,
u8 *pbuf, u32 buf_len)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
struct rtw89_btc_prpt *btc_prpt = NULL;
u32 index = 0, rpt_len = 0;
@@ -1340,7 +1380,7 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
while (pbuf) {
btc_prpt = (struct rtw89_btc_prpt *)&pbuf[index];
- if (index + 2 >= chip->btc_fwinfo_buf)
+ if (index + 2 >= ver->info_buf)
break;
/* At least 3 bytes: type(1) & len(2) */
rpt_len = le16_to_cpu(btc_prpt->len);
@@ -1358,12 +1398,12 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
static void _append_tdma(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_btf_tlv *tlv;
struct rtw89_btc_fbtc_tdma *v;
- struct rtw89_btc_fbtc_tdma_v1 *v1;
+ struct rtw89_btc_fbtc_tdma_v3 *v3;
u16 len = btc->policy_len;
if (!btc->update_policy_force &&
@@ -1376,17 +1416,17 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
tlv = (struct rtw89_btc_btf_tlv *)&btc->policy[len];
tlv->type = CXPOLICY_TDMA;
- if (chip->chip_id == RTL8852A) {
+ if (ver->fcxtdma == 1) {
v = (struct rtw89_btc_fbtc_tdma *)&tlv->val[0];
tlv->len = sizeof(*v);
memcpy(v, &dm->tdma, sizeof(*v));
- btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
} else {
- tlv->len = sizeof(*v1);
- v1 = (struct rtw89_btc_fbtc_tdma_v1 *)&tlv->val[0];
- v1->fver = chip->fcxtdma_ver;
- v1->tdma = dm->tdma;
- btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v1);
+ tlv->len = sizeof(*v3);
+ v3 = (struct rtw89_btc_fbtc_tdma_v3 *)&tlv->val[0];
+ v3->fver = ver->fcxtdma;
+ memcpy(&v3->tdma, &dm->tdma, sizeof(v3->tdma));
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v3);
}
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -1441,22 +1481,169 @@ static void _append_slot(struct rtw89_dev *rtwdev)
__func__, cnt);
}
+static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ u32 bit_map = 0;
+
+ switch (rpt_map) {
+ case RPT_EN_TDMA:
+ bit_map = BIT(0);
+ break;
+ case RPT_EN_CYCLE:
+ bit_map = BIT(1);
+ break;
+ case RPT_EN_MREG:
+ bit_map = BIT(2);
+ break;
+ case RPT_EN_BT_VER_INFO:
+ bit_map = BIT(3);
+ break;
+ case RPT_EN_BT_SCAN_INFO:
+ bit_map = BIT(4);
+ break;
+ case RPT_EN_BT_DEVICE_INFO:
+ switch (ver->frptmap) {
+ case 0:
+ case 1:
+ case 2:
+ bit_map = BIT(6);
+ break;
+ case 3:
+ bit_map = BIT(5);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_BT_AFH_MAP:
+ switch (ver->frptmap) {
+ case 0:
+ case 1:
+ case 2:
+ bit_map = BIT(5);
+ break;
+ case 3:
+ bit_map = BIT(6);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_BT_AFH_MAP_LE:
+ switch (ver->frptmap) {
+ case 2:
+ bit_map = BIT(8);
+ break;
+ case 3:
+ bit_map = BIT(7);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_FW_STEP_INFO:
+ switch (ver->frptmap) {
+ case 1:
+ case 2:
+ bit_map = BIT(7);
+ break;
+ case 3:
+ bit_map = BIT(8);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_TEST:
+ bit_map = BIT(31);
+ break;
+ case RPT_EN_WL_ALL:
+ switch (ver->frptmap) {
+ case 0:
+ case 1:
+ case 2:
+ bit_map = GENMASK(2, 0);
+ break;
+ case 3:
+ bit_map = GENMASK(2, 0) | BIT(8);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_BT_ALL:
+ switch (ver->frptmap) {
+ case 0:
+ case 1:
+ bit_map = GENMASK(6, 3);
+ break;
+ case 2:
+ bit_map = GENMASK(6, 3) | BIT(8);
+ break;
+ case 3:
+ bit_map = GENMASK(7, 3);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_ALL:
+ switch (ver->frptmap) {
+ case 0:
+ bit_map = GENMASK(6, 0);
+ break;
+ case 1:
+ bit_map = GENMASK(7, 0);
+ break;
+ case 2:
+ case 3:
+ bit_map = GENMASK(8, 0);
+ break;
+ default:
+ break;
+ }
+ break;
+ case RPT_EN_MONITER:
+ switch (ver->frptmap) {
+ case 0:
+ case 1:
+ bit_map = GENMASK(6, 2);
+ break;
+ case 2:
+ bit_map = GENMASK(6, 2) | BIT(8);
+ break;
+ case 3:
+ bit_map = GENMASK(8, 2);
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return bit_map;
+}
+
static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
u32 rpt_map, bool rpt_state)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *fwinfo = &btc->fwinfo;
struct rtw89_btc_btf_set_report r = {0};
- u32 val = 0;
+ u32 val, bit_map;
+
+ bit_map = rtw89_btc_fw_rpt_ver(rtwdev, rpt_map);
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): rpt_map=%x, rpt_state=%x\n",
__func__, rpt_map, rpt_state);
if (rpt_state)
- val = fwinfo->rpt_en_map | rpt_map;
+ val = fwinfo->rpt_en_map | bit_map;
else
- val = fwinfo->rpt_en_map & ~rpt_map;
+ val = fwinfo->rpt_en_map & ~bit_map;
if (val == fwinfo->rpt_en_map)
return;
@@ -1593,16 +1780,17 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
switch (type) {
case CXDRVINFO_INIT:
rtw89_fw_h2c_cxdrv_init(rtwdev);
break;
case CXDRVINFO_ROLE:
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
rtw89_fw_h2c_cxdrv_role(rtwdev);
- else
+ else if (ver->fwlrole == 1)
rtw89_fw_h2c_cxdrv_role_v1(rtwdev);
break;
case CXDRVINFO_CTRL:
@@ -1892,6 +2080,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *b = &bt->link_info;
@@ -1905,7 +2094,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
if (btc->ctrl.manual || wl->status.map.scan)
return;
- if (chip->chip_id == RTL8852A) {
+ if (ver->fwlrole == 0) {
mode = wl_rinfo->link_mode;
connect_cnt = wl_rinfo->connect_cnt;
} else {
@@ -1924,13 +2113,13 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
- if (chip->chip_id == RTL8852A &&
+ if (ver->fwlrole == 0 &&
(r->role == RTW89_WIFI_ROLE_P2P_GO ||
r->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
ch = r->ch;
bw = r->bw;
break;
- } else if (chip->chip_id != RTL8852A &&
+ } else if (ver->fwlrole == 1 &&
(r1->role == RTW89_WIFI_ROLE_P2P_GO ||
r1->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
ch = r1->ch;
@@ -1945,12 +2134,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
- if (chip->chip_id == RTL8852A &&
+ if (ver->fwlrole == 0 &&
r->connected && r->band == RTW89_BAND_2G) {
ch = r->ch;
bw = r->bw;
break;
- } else if (chip->chip_id != RTL8852A &&
+ } else if (ver->fwlrole == 1 &&
r1->connected && r1->band == RTW89_BAND_2G) {
ch = r1->ch;
bw = r1->bw;
@@ -2517,15 +2706,16 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
break;
case BTC_CXP_OFF_EQ0:
_slot_set_tbl(btc, CXST_OFF, cxtbl[0]);
+ _slot_set_type(btc, CXST_OFF, SLOT_ISO);
break;
case BTC_CXP_OFF_EQ1:
_slot_set_tbl(btc, CXST_OFF, cxtbl[16]);
break;
case BTC_CXP_OFF_EQ2:
- _slot_set_tbl(btc, CXST_OFF, cxtbl[17]);
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[0]);
break;
case BTC_CXP_OFF_EQ3:
- _slot_set_tbl(btc, CXST_OFF, cxtbl[18]);
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[24]);
break;
case BTC_CXP_OFF_BWB0:
_slot_set_tbl(btc, CXST_OFF, cxtbl[5]);
@@ -2581,6 +2771,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
default:
break;
}
+ s[CXST_OFF] = s_def[CXST_OFF];
break;
case BTC_CXP_FIX: /* TDMA Fix-Slot */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
@@ -2607,6 +2798,10 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
_slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_ISO);
_slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
break;
+ case BTC_CXP_FIX_TD4020:
+ _slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_MIX);
+ _slot_set(btc, CXST_B1, 20, tbl_b1, SLOT_MIX);
+ break;
case BTC_CXP_FIX_TD7010:
_slot_set(btc, CXST_W1, 70, tbl_w1, SLOT_ISO);
_slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
@@ -3398,8 +3593,8 @@ static void _action_wl_rfk(struct rtw89_dev *rtwdev)
static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
@@ -3410,7 +3605,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
if (btc->ctrl.manual)
return;
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
else
mode = wl_rinfo_v1->link_mode;
@@ -3503,8 +3698,8 @@ static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
@@ -3524,7 +3719,7 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
if (btc->ctrl.manual)
return;
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
else
mode = wl_rinfo_v1->link_mode;
@@ -3572,8 +3767,8 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
@@ -3581,7 +3776,7 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
bool bt_hi_lna_rx = false;
u8 mode;
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
else
mode = wl_rinfo_v1->link_mode;
@@ -3623,6 +3818,7 @@ static void _action_common(struct rtw89_dev *rtwdev)
wl->scbd_change = false;
btc->cx.cnt_wl[BTC_WCNT_SCBDUPDATE]++;
}
+ btc->dm.tdma_instant_excute = 0;
}
static void _action_by_bt(struct rtw89_dev *rtwdev)
@@ -3651,7 +3847,7 @@ static void _action_by_bt(struct rtw89_dev *rtwdev)
case BTC_BT_NOPROFILE:
if (_check_freerun(rtwdev))
_action_freerun(rtwdev);
- else if (a2dp.active || pan.active)
+ else if (pan.active)
_action_bt_pan(rtwdev);
else
_action_bt_idle(rtwdev);
@@ -4406,7 +4602,7 @@ static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update)
bt->whql_test = !!(val & BTC_BSCB_WHQL);
bt->btg_type = val & BTC_BSCB_BT_S1 ? BTC_BT_BTG : BTC_BT_ALONE;
- bt->link_info.a2dp_desc.active = !!(val & BTC_BSCB_A2DP_ACT);
+ bt->link_info.a2dp_desc.exist = !!(val & BTC_BSCB_A2DP_ACT);
/* if rfk run 1->0 */
if (bt->rfk_info.map.run && !(val & BTC_BSCB_RFK_RUN))
@@ -4445,8 +4641,8 @@ static bool _chk_wl_rfk_request(struct rtw89_dev *rtwdev)
static
void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
@@ -4461,7 +4657,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_update_dm_step(rtwdev, reason);
_update_btc_state_map(rtwdev);
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
else
mode = wl_rinfo_v1->link_mode;
@@ -4567,6 +4763,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_action_wl_nc(rtwdev);
break;
case BTC_WLINK_2G_STA:
+ if (wl->status.map.traffic_dir & BIT(RTW89_TFC_DL))
+ bt->scan_rx_low_pri = true;
_action_wl_2g_sta(rtwdev);
break;
case BTC_WLINK_2G_AP:
@@ -4583,9 +4781,9 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
break;
case BTC_WLINK_2G_SCC:
bt->scan_rx_low_pri = true;
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
_action_wl_2g_scc(rtwdev);
- else if (chip->chip_id == RTL8852C)
+ else if (ver->fwlrole == 1)
_action_wl_2g_scc_v1(rtwdev);
break;
case BTC_WLINK_2G_MCC:
@@ -4690,7 +4888,7 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
_write_scbd(rtwdev,
BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG, true);
_update_bt_scbd(rtwdev, true);
- if (rtw89_mac_get_ctrl_path(rtwdev) && chip->chip_id == RTL8852A) {
+ if (rtw89_mac_get_ctrl_path(rtwdev)) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): PTA owner warning!!\n",
__func__);
@@ -5000,11 +5198,6 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
a2dp->sink = btinfo.hb3.a2dp_sink;
- if (b->profile_cnt.now || b->status.map.ble_connect)
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, 1);
- else
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, 0);
-
if (!a2dp->exist_last && a2dp->exist) {
a2dp->vendor_id = 0;
a2dp->flush_time = 0;
@@ -5014,12 +5207,6 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
RTW89_COEX_BT_DEVINFO_WORK_PERIOD);
}
- if (a2dp->exist && (a2dp->flush_time == 0 || a2dp->vendor_id == 0 ||
- a2dp->play_latency == 1))
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, 1);
- else
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, 0);
-
_run_coex(rtwdev, BTC_RSN_UPDATE_BT_INFO);
}
@@ -5034,10 +5221,10 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
struct rtw89_sta *rtwsta, enum btc_role_state state)
{
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_link_info r = {0};
struct rtw89_btc_wl_link_info *wlinfo = NULL;
@@ -5101,7 +5288,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
wlinfo = &wl->link_info[r.pid];
memcpy(wlinfo, &r, sizeof(*wlinfo));
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
_update_wl_info(rtwdev);
else
_update_wl_info_v1(rtwdev);
@@ -5151,9 +5338,7 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
}
if (rf_state == BTC_RFCTRL_WL_ON) {
- btc->dm.cnt_dm[BTC_DCNT_BTCNT_FREEZE] = 0;
- rtw89_btc_fw_en_rpt(rtwdev,
- RPT_EN_MREG | RPT_EN_BT_VER_INFO, true);
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_MREG, true);
val = BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG;
_write_scbd(rtwdev, val, true);
_update_bt_scbd(rtwdev, true);
@@ -5164,6 +5349,13 @@ void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_sta
_write_scbd(rtwdev, BTC_WSCB_ALL, false);
}
+ btc->dm.cnt_dm[BTC_DCNT_BTCNT_FREEZE] = 0;
+ if (wl->status.map.lps_pre == BTC_LPS_OFF &&
+ wl->status.map.lps_pre != wl->status.map.lps)
+ btc->dm.tdma_instant_excute = 1;
+ else
+ btc->dm.tdma_instant_excute = 0;
+
_run_coex(rtwdev, BTC_RSN_NTFY_RADIO_STATE);
wl->status.map.rf_off_pre = wl->status.map.rf_off;
@@ -5618,8 +5810,8 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
@@ -5631,7 +5823,7 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_puts(m, "========== [WL Status] ==========\n");
- if (chip->chip_id == RTL8852A)
+ if (ver->fwlrole == 0)
mode = wl_rinfo->link_mode;
else
mode = wl_rinfo_v1->link_mode;
@@ -5714,12 +5906,14 @@ static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_bt_info *bt = &cx->bt;
struct rtw89_btc_wl_info *wl = &cx->wl;
struct rtw89_btc_module *module = &btc->mdinfo;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
u8 *afh = bt_linfo->afh_map;
+ u8 *afh_le = bt_linfo->afh_map_le;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT))
return;
@@ -5769,6 +5963,12 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
afh[0], afh[1], afh[2], afh[3], afh[4],
afh[5], afh[6], afh[7], afh[8], afh[9]);
+ if (ver->fcxbtafh == 2 && bt_linfo->status.map.ble_connect)
+ seq_printf(m,
+ "LE[%02x%02x_%02x_%02x%02x]",
+ afh_le[0], afh_le[1], afh_le[2],
+ afh_le[3], afh_le[4]);
+
seq_printf(m, "wl_ch_map[en:%d/ch:%d/bw:%d]\n",
wl->afh_info.en, wl->afh_info.ch, wl->afh_info.bw);
@@ -5800,6 +6000,29 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
"[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX],
cx->cnt_bt[BTC_BCNT_HIPRI_TX], cx->cnt_bt[BTC_BCNT_LOPRI_RX],
cx->cnt_bt[BTC_BCNT_LOPRI_TX], cx->cnt_bt[BTC_BCNT_POLUT]);
+
+ if (bt->enable.now && bt->ver_info.fw == 0)
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, false);
+
+ if (bt_linfo->profile_cnt.now || bt_linfo->status.map.ble_connect)
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, true);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, false);
+
+ if (ver->fcxbtafh == 2 && bt_linfo->status.map.ble_connect)
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP_LE, true);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP_LE, false);
+
+ if (bt_linfo->a2dp_desc.exist &&
+ (bt_linfo->a2dp_desc.flush_time == 0 ||
+ bt_linfo->a2dp_desc.vendor_id == 0 ||
+ bt_linfo->a2dp_desc.play_latency == 1))
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, true);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, false);
}
#define CASE_BTC_RSN_STR(e) case BTC_RSN_ ## e: return #e
@@ -5807,6 +6030,7 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
#define CASE_BTC_POLICY_STR(e) \
case BTC_CXP_ ## e | BTC_POLICY_EXT_BIT: return #e
#define CASE_BTC_SLOT_STR(e) case CXST_ ## e: return #e
+#define CASE_BTC_EVT_STR(e) case CXEVNT_## e: return #e
static const char *steps_to_str(u16 step)
{
@@ -5947,6 +6171,40 @@ static const char *id_to_slot(u32 id)
}
}
+static const char *id_to_evt(u32 id)
+{
+ switch (id) {
+ CASE_BTC_EVT_STR(TDMA_ENTRY);
+ CASE_BTC_EVT_STR(WL_TMR);
+ CASE_BTC_EVT_STR(B1_TMR);
+ CASE_BTC_EVT_STR(B2_TMR);
+ CASE_BTC_EVT_STR(B3_TMR);
+ CASE_BTC_EVT_STR(B4_TMR);
+ CASE_BTC_EVT_STR(W2B_TMR);
+ CASE_BTC_EVT_STR(B2W_TMR);
+ CASE_BTC_EVT_STR(BCN_EARLY);
+ CASE_BTC_EVT_STR(A2DP_EMPTY);
+ CASE_BTC_EVT_STR(LK_END);
+ CASE_BTC_EVT_STR(RX_ISR);
+ CASE_BTC_EVT_STR(RX_FC0);
+ CASE_BTC_EVT_STR(RX_FC1);
+ CASE_BTC_EVT_STR(BT_RELINK);
+ CASE_BTC_EVT_STR(BT_RETRY);
+ CASE_BTC_EVT_STR(E2G);
+ CASE_BTC_EVT_STR(E5G);
+ CASE_BTC_EVT_STR(EBT);
+ CASE_BTC_EVT_STR(ENULL);
+ CASE_BTC_EVT_STR(DRV_WLK);
+ CASE_BTC_EVT_STR(BCN_OK);
+ CASE_BTC_EVT_STR(BT_CHANGE);
+ CASE_BTC_EVT_STR(EBT_EXTEND);
+ CASE_BTC_EVT_STR(E2G_NULL1);
+ CASE_BTC_EVT_STR(B1FDD_TMR);
+ default:
+ return "unknown";
+ }
+}
+
static
void seq_print_segment(struct seq_file *m, const char *prefix, u16 *data,
u8 len, u8 seg_len, u8 start_idx, u8 ring_len)
@@ -6045,21 +6303,27 @@ static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_fbtc_cysta *pcysta;
- struct rtw89_btc_fbtc_cysta_v1 *pcysta_v1;
+ union rtw89_btc_fbtc_cysta_info *pcysta;
u32 except_cnt, exception_map;
- if (chip->chip_id == RTL8852A) {
- pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
- except_cnt = le32_to_cpu(pcysta->except_cnt);
- exception_map = le32_to_cpu(pcysta->exception);
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+ if (ver->fcxcysta == 2) {
+ pcysta->v2 = pfwinfo->rpt_fbtc_cysta.finfo.v2;
+ except_cnt = le32_to_cpu(pcysta->v2.except_cnt);
+ exception_map = le32_to_cpu(pcysta->v2.exception);
+ } else if (ver->fcxcysta == 3) {
+ pcysta->v3 = pfwinfo->rpt_fbtc_cysta.finfo.v3;
+ except_cnt = le32_to_cpu(pcysta->v3.except_cnt);
+ exception_map = le32_to_cpu(pcysta->v3.except_map);
+ } else if (ver->fcxcysta == 4) {
+ pcysta->v4 = pfwinfo->rpt_fbtc_cysta.finfo.v4;
+ except_cnt = pcysta->v4.except_cnt;
+ exception_map = le32_to_cpu(pcysta->v4.except_map);
} else {
- pcysta_v1 = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
- except_cnt = le32_to_cpu(pcysta_v1->except_cnt);
- exception_map = le32_to_cpu(pcysta_v1->except_map);
+ return;
}
if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 && except_cnt == 0 &&
@@ -6097,23 +6361,20 @@ static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_tdma *t = NULL;
- struct rtw89_btc_fbtc_slot *s = NULL;
- struct rtw89_btc_dm *dm = &btc->dm;
- u8 i, cnt = 0;
pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo;
if (!pcinfo->valid)
return;
- if (chip->chip_id == RTL8852A)
- t = &pfwinfo->rpt_fbtc_tdma.finfo;
+ if (ver->fcxtdma == 1)
+ t = &pfwinfo->rpt_fbtc_tdma.finfo.v1;
else
- t = &pfwinfo->rpt_fbtc_tdma.finfo_v1.tdma;
+ t = &pfwinfo->rpt_fbtc_tdma.finfo.v3.tdma;
seq_printf(m,
" %-15s : ", "[tdma_policy]");
@@ -6130,75 +6391,43 @@ static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
"policy_type:%d",
(u32)btc->policy_type);
- s = pfwinfo->rpt_fbtc_slots.finfo.slot;
-
- for (i = 0; i < CXST_MAX; i++) {
- if (dm->update_slot_map == BIT(CXST_MAX) - 1)
- break;
-
- if (!(dm->update_slot_map & BIT(i)))
- continue;
-
- if (cnt % 6 == 0)
- seq_printf(m,
- " %-15s : %d[%d/0x%x/%d]",
- "[slot_policy]",
- (u32)i,
- s[i].dur, s[i].cxtbl, s[i].cxtype);
- else
- seq_printf(m,
- ", %d[%d/0x%x/%d]",
- (u32)i,
- s[i].dur, s[i].cxtbl, s[i].cxtype);
- if (cnt % 6 == 5)
- seq_puts(m, "\n");
- cnt++;
- }
seq_puts(m, "\n");
}
static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
- struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
- struct rtw89_btc_fbtc_slots *pslots = NULL;
- struct rtw89_btc_fbtc_slot s;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_slot *s;
u8 i = 0;
- pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo;
- if (!pcinfo->valid)
- return;
-
- pslots = &pfwinfo->rpt_fbtc_slots.finfo;
-
for (i = 0; i < CXST_MAX; i++) {
- s = pslots->slot[i];
- if (i % 6 == 0)
+ s = &dm->slot_now[i];
+ if (i % 5 == 0)
seq_printf(m,
- " %-15s : %02d[%03d/0x%x/%d]",
+ " %-15s : %5s[%03d/0x%x/%d]",
"[slot_list]",
- (u32)i,
- s.dur, s.cxtbl, s.cxtype);
+ id_to_slot((u32)i),
+ s->dur, s->cxtbl, s->cxtype);
else
seq_printf(m,
- ", %02d[%03d/0x%x/%d]",
- (u32)i,
- s.dur, s.cxtbl, s.cxtype);
- if (i % 6 == 5)
+ ", %5s[%03d/0x%x/%d]",
+ id_to_slot((u32)i),
+ s->dur, s->cxtbl, s->cxtype);
+ if (i % 5 == 4)
seq_puts(m, "\n");
}
+ seq_puts(m, "\n");
}
-static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
+static void _show_fbtc_cysta_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
- struct rtw89_btc_fbtc_cysta *pcysta_le32 = NULL;
- struct rtw89_btc_fbtc_cysta_cpu pcysta[1];
+ struct rtw89_btc_fbtc_cysta_v2 *pcysta_le32 = NULL;
union rtw89_btc_fbtc_rxflct r;
u8 i, cnt = 0, slot_pair;
u16 cycle, c_begin, c_end, store_index;
@@ -6207,65 +6436,66 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
- rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
+ pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo.v2;
seq_printf(m,
" %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
- "[cycle_cnt]", pcysta->cycles, pcysta->bcn_cnt[CXBCN_ALL],
- pcysta->bcn_cnt[CXBCN_ALL_OK],
- pcysta->bcn_cnt[CXBCN_BT_SLOT],
- pcysta->bcn_cnt[CXBCN_BT_OK]);
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta_le32->cycles),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_ALL_OK]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_SLOT]),
+ le32_to_cpu(pcysta_le32->bcn_cnt[CXBCN_BT_OK]));
for (i = 0; i < CXST_MAX; i++) {
- if (!pcysta->slot_cnt[i])
+ if (!le32_to_cpu(pcysta_le32->slot_cnt[i]))
continue;
- seq_printf(m,
- ", %d:%d", (u32)i, pcysta->slot_cnt[i]);
+ seq_printf(m, ", %s:%d", id_to_slot((u32)i),
+ le32_to_cpu(pcysta_le32->slot_cnt[i]));
}
if (dm->tdma_now.rxflctrl) {
- seq_printf(m,
- ", leak_rx:%d", pcysta->leakrx_cnt);
+ seq_printf(m, ", leak_rx:%d",
+ le32_to_cpu(pcysta_le32->leakrx_cnt));
}
- if (pcysta->collision_cnt) {
- seq_printf(m,
- ", collision:%d", pcysta->collision_cnt);
+ if (le32_to_cpu(pcysta_le32->collision_cnt)) {
+ seq_printf(m, ", collision:%d",
+ le32_to_cpu(pcysta_le32->collision_cnt));
}
- if (pcysta->skip_cnt) {
- seq_printf(m,
- ", skip:%d", pcysta->skip_cnt);
+ if (le32_to_cpu(pcysta_le32->skip_cnt)) {
+ seq_printf(m, ", skip:%d",
+ le32_to_cpu(pcysta_le32->skip_cnt));
}
seq_puts(m, "\n");
seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
"[cycle_time]",
- pcysta->tavg_cycle[CXT_WL],
- pcysta->tavg_cycle[CXT_BT],
- pcysta->tavg_lk / 1000, pcysta->tavg_lk % 1000);
- seq_printf(m,
- ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
- pcysta->tmax_cycle[CXT_WL],
- pcysta->tmax_cycle[CXT_BT],
- pcysta->tmax_lk / 1000, pcysta->tmax_lk % 1000);
- seq_printf(m,
- ", maxdiff_t[wl:%d/bt:%d]\n",
- pcysta->tmaxdiff_cycle[CXT_WL],
- pcysta->tmaxdiff_cycle[CXT_BT]);
-
- if (pcysta->cycles == 0)
+ le16_to_cpu(pcysta_le32->tavg_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tavg_cycle[CXT_BT]),
+ le16_to_cpu(pcysta_le32->tavg_lk) / 1000,
+ le16_to_cpu(pcysta_le32->tavg_lk) % 1000);
+ seq_printf(m, ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta_le32->tmax_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tmax_cycle[CXT_BT]),
+ le16_to_cpu(pcysta_le32->tmax_lk) / 1000,
+ le16_to_cpu(pcysta_le32->tmax_lk) % 1000);
+ seq_printf(m, ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_WL]),
+ le16_to_cpu(pcysta_le32->tmaxdiff_cycle[CXT_BT]));
+
+ if (le16_to_cpu(pcysta_le32->cycles) <= 1)
return;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
slot_pair = BTC_CYCLE_SLOT_MAX / 2;
- if (pcysta->cycles <= slot_pair)
+ if (le16_to_cpu(pcysta_le32->cycles) <= slot_pair)
c_begin = 1;
else
- c_begin = pcysta->cycles - slot_pair + 1;
+ c_begin = le16_to_cpu(pcysta_le32->cycles) - slot_pair + 1;
- c_end = pcysta->cycles;
+ c_end = le16_to_cpu(pcysta_le32->cycles);
for (cycle = c_begin; cycle <= c_end; cycle++) {
cnt++;
@@ -6274,13 +6504,13 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 1)
seq_printf(m,
" %-15s : ->b%02d->w%02d", "[cycle_step]",
- pcysta->tslot_cycle[store_index],
- pcysta->tslot_cycle[store_index + 1]);
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
else
seq_printf(m,
"->b%02d->w%02d",
- pcysta->tslot_cycle[store_index],
- pcysta->tslot_cycle[store_index + 1]);
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index]),
+ le16_to_cpu(pcysta_le32->tslot_cycle[store_index + 1]));
if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end)
seq_puts(m, "\n");
}
@@ -6289,41 +6519,43 @@ static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_printf(m,
" %-15s : a2dp_ept:%d, a2dp_late:%d",
"[a2dp_t_sta]",
- pcysta->a2dpept, pcysta->a2dpeptto);
+ le16_to_cpu(pcysta_le32->a2dpept),
+ le16_to_cpu(pcysta_le32->a2dpeptto));
seq_printf(m,
", avg_t:%d, max_t:%d",
- pcysta->tavg_a2dpept, pcysta->tmax_a2dpept);
+ le16_to_cpu(pcysta_le32->tavg_a2dpept),
+ le16_to_cpu(pcysta_le32->tmax_a2dpept));
r.val = dm->tdma_now.rxflctrl;
if (r.type && r.tgln_n) {
seq_printf(m,
", cycle[PSTDMA:%d/TDMA:%d], ",
- pcysta->cycles_a2dp[CXT_FLCTRL_ON],
- pcysta->cycles_a2dp[CXT_FLCTRL_OFF]);
+ le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->cycles_a2dp[CXT_FLCTRL_OFF]));
seq_printf(m,
"avg_t[PSTDMA:%d/TDMA:%d], ",
- pcysta->tavg_a2dp[CXT_FLCTRL_ON],
- pcysta->tavg_a2dp[CXT_FLCTRL_OFF]);
+ le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->tavg_a2dp[CXT_FLCTRL_OFF]));
seq_printf(m,
"max_t[PSTDMA:%d/TDMA:%d]",
- pcysta->tmax_a2dp[CXT_FLCTRL_ON],
- pcysta->tmax_a2dp[CXT_FLCTRL_OFF]);
+ le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_ON]),
+ le16_to_cpu(pcysta_le32->tmax_a2dp[CXT_FLCTRL_OFF]));
}
seq_puts(m, "\n");
}
}
-static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+static void _show_fbtc_cysta_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_fbtc_a2dp_trx_stat *a2dp_trx;
- struct rtw89_btc_fbtc_cysta_v1 *pcysta;
+ struct rtw89_btc_fbtc_cysta_v3 *pcysta;
struct rtw89_btc_rpt_cmn_info *pcinfo;
u8 i, cnt = 0, slot_pair, divide_cnt;
u16 cycle, c_begin, c_end, store_index;
@@ -6332,7 +6564,7 @@ static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- pcysta = &pfwinfo->rpt_fbtc_cysta.finfo_v1;
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v3;
seq_printf(m,
" %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
"[cycle_cnt]",
@@ -6379,7 +6611,7 @@ static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
cycle = le16_to_cpu(pcysta->cycles);
- if (cycle == 0)
+ if (cycle <= 1)
return;
/* 1 cycle record 1 wl-slot and 1 bt-slot */
@@ -6401,40 +6633,171 @@ static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt++;
store_index = ((cycle - 1) % slot_pair) * 2;
- if (cnt % divide_cnt == 1) {
- seq_printf(m, "\n\r %-15s : ", "[cycle_step]");
- } else {
- seq_printf(m, "->b%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index]));
- if (a2dp->exist) {
- a2dp_trx = &pcysta->a2dp_trx[store_index];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
- }
- seq_printf(m, "->w%02d",
- le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
- if (a2dp->exist) {
- a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
- seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
- a2dp_trx->empty_cnt,
- a2dp_trx->retry_cnt,
- a2dp_trx->tx_rate ? 3 : 2,
- a2dp_trx->tx_cnt,
- a2dp_trx->ack_cnt,
- a2dp_trx->nack_cnt);
- }
+ if (cnt % divide_cnt == 1)
+ seq_printf(m, " %-15s : ", "[cycle_step]");
+
+ seq_printf(m, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
}
- if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end)
+ seq_printf(m, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ if (cnt % divide_cnt == 0 || cnt == c_end)
seq_puts(m, "\n");
}
if (a2dp->exist) {
- seq_printf(m, "%-15s : a2dp_ept:%d, a2dp_late:%d",
+ seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
+
+ seq_printf(m, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
+
+ seq_puts(m, "\n");
+ }
+}
+
+static void _show_fbtc_cysta_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_a2dp_trx_stat_v4 *a2dp_trx;
+ struct rtw89_btc_fbtc_cysta_v4 *pcysta;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ u8 i, cnt = 0, slot_pair, divide_cnt;
+ u16 cycle, c_begin, c_end, store_index;
+
+ pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v4;
+ seq_printf(m,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]",
+ le16_to_cpu(pcysta->cycles),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (!le16_to_cpu(pcysta->slot_cnt[i]))
+ continue;
+
+ seq_printf(m, ", %s:%d", id_to_slot(i),
+ le16_to_cpu(pcysta->slot_cnt[i]));
+ }
+
+ if (dm->tdma_now.rxflctrl)
+ seq_printf(m, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+
+ if (pcysta->collision_cnt)
+ seq_printf(m, ", collision:%d", pcysta->collision_cnt);
+
+ if (le16_to_cpu(pcysta->skip_cnt))
+ seq_printf(m, ", skip:%d",
+ le16_to_cpu(pcysta->skip_cnt));
+
+ seq_puts(m, "\n");
+
+ seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ seq_printf(m,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tmax) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tmax) % 1000);
+ seq_printf(m,
+ ", maxdiff_t[wl:%d/bt:%d]\n",
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmaxdiff[CXT_BT]));
+
+ cycle = le16_to_cpu(pcysta->cycles);
+ if (cycle <= 1)
+ return;
+
+ /* 1 cycle record 1 wl-slot and 1 bt-slot */
+ slot_pair = BTC_CYCLE_SLOT_MAX / 2;
+
+ if (cycle <= slot_pair)
+ c_begin = 1;
+ else
+ c_begin = cycle - slot_pair + 1;
+
+ c_end = cycle;
+
+ if (a2dp->exist)
+ divide_cnt = 3;
+ else
+ divide_cnt = BTC_CYCLE_SLOT_MAX / 4;
+
+ for (cycle = c_begin; cycle <= c_end; cycle++) {
+ cnt++;
+ store_index = ((cycle - 1) % slot_pair) * 2;
+
+ if (cnt % divide_cnt == 1)
+ seq_printf(m, " %-15s : ", "[cycle_step]");
+
+ seq_printf(m, "->b%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ seq_printf(m, "->w%02d",
+ le16_to_cpu(pcysta->slot_step_time[store_index + 1]));
+ if (a2dp->exist) {
+ a2dp_trx = &pcysta->a2dp_trx[store_index + 1];
+ seq_printf(m, "(%d/%d/%dM/%d/%d/%d)",
+ a2dp_trx->empty_cnt,
+ a2dp_trx->retry_cnt,
+ a2dp_trx->tx_rate ? 3 : 2,
+ a2dp_trx->tx_cnt,
+ a2dp_trx->ack_cnt,
+ a2dp_trx->nack_cnt);
+ }
+ if (cnt % divide_cnt == 0 || cnt == c_end)
+ seq_puts(m, "\n");
+ }
+
+ if (a2dp->exist) {
+ seq_printf(m, " %-15s : a2dp_ept:%d, a2dp_late:%d",
"[a2dp_t_sta]",
le16_to_cpu(pcysta->a2dp_ept.cnt),
le16_to_cpu(pcysta->a2dp_ept.cnt_timeout));
@@ -6449,12 +6812,11 @@ static void _show_fbtc_cysta_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo;
- struct rtw89_btc_fbtc_cynullsta *ns;
- struct rtw89_btc_fbtc_cynullsta_v1 *ns_v1;
+ union rtw89_btc_fbtc_cynullsta_info *ns;
u8 i = 0;
if (!btc->dm.tdma_now.rxflctrl)
@@ -6464,68 +6826,56 @@ static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- if (chip->chip_id == RTL8852A) {
- ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
-
- seq_printf(m, " %-15s : ", "[null_sta]");
-
+ ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
+ if (ver->fcxnullsta == 1) {
for (i = 0; i < 2; i++) {
- if (i != 0)
- seq_printf(m, ", null-%d", i);
- else
- seq_printf(m, "null-%d", i);
+ seq_printf(m, " %-15s : ", "[NULL-STA]");
+ seq_printf(m, "null-%d", i);
seq_printf(m, "[ok:%d/",
- le32_to_cpu(ns->result[i][1]));
+ le32_to_cpu(ns->v1.result[i][1]));
seq_printf(m, "fail:%d/",
- le32_to_cpu(ns->result[i][0]));
+ le32_to_cpu(ns->v1.result[i][0]));
seq_printf(m, "on_time:%d/",
- le32_to_cpu(ns->result[i][2]));
+ le32_to_cpu(ns->v1.result[i][2]));
seq_printf(m, "retry:%d/",
- le32_to_cpu(ns->result[i][3]));
+ le32_to_cpu(ns->v1.result[i][3]));
seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns->avg_t[i]) / 1000,
- le32_to_cpu(ns->avg_t[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]",
- le32_to_cpu(ns->max_t[i]) / 1000,
- le32_to_cpu(ns->max_t[i]) % 1000);
+ le32_to_cpu(ns->v1.avg_t[i]) / 1000,
+ le32_to_cpu(ns->v1.avg_t[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v1.max_t[i]) / 1000,
+ le32_to_cpu(ns->v1.max_t[i]) % 1000);
}
} else {
- ns_v1 = &pfwinfo->rpt_fbtc_nullsta.finfo_v1;
-
- seq_printf(m, " %-15s : ", "[null_sta]");
-
for (i = 0; i < 2; i++) {
- if (i != 0)
- seq_printf(m, ", null-%d", i);
- else
- seq_printf(m, "null-%d", i);
+ seq_printf(m, " %-15s : ", "[NULL-STA]");
+ seq_printf(m, "null-%d", i);
seq_printf(m, "[Tx:%d/",
- le32_to_cpu(ns_v1->result[i][4]));
+ le32_to_cpu(ns->v2.result[i][4]));
seq_printf(m, "[ok:%d/",
- le32_to_cpu(ns_v1->result[i][1]));
+ le32_to_cpu(ns->v2.result[i][1]));
seq_printf(m, "fail:%d/",
- le32_to_cpu(ns_v1->result[i][0]));
+ le32_to_cpu(ns->v2.result[i][0]));
seq_printf(m, "on_time:%d/",
- le32_to_cpu(ns_v1->result[i][2]));
+ le32_to_cpu(ns->v2.result[i][2]));
seq_printf(m, "retry:%d/",
- le32_to_cpu(ns_v1->result[i][3]));
+ le32_to_cpu(ns->v2.result[i][3]));
seq_printf(m, "avg_t:%d.%03d/",
- le32_to_cpu(ns_v1->avg_t[i]) / 1000,
- le32_to_cpu(ns_v1->avg_t[i]) % 1000);
- seq_printf(m, "max_t:%d.%03d]",
- le32_to_cpu(ns_v1->max_t[i]) / 1000,
- le32_to_cpu(ns_v1->max_t[i]) % 1000);
+ le32_to_cpu(ns->v2.avg_t[i]) / 1000,
+ le32_to_cpu(ns->v2.avg_t[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v2.max_t[i]) / 1000,
+ le32_to_cpu(ns->v2.max_t[i]) % 1000);
}
}
- seq_puts(m, "\n");
}
-static void _show_fbtc_step(struct rtw89_dev *rtwdev, struct seq_file *m)
+static void _show_fbtc_step_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
- struct rtw89_btc_fbtc_steps *pstep = NULL;
+ struct rtw89_btc_fbtc_steps_v2 *pstep = NULL;
u8 type, val, cnt = 0, state = 0;
bool outloop = false;
u16 i, diff_t, n_start = 0, n_stop = 0;
@@ -6535,7 +6885,7 @@ static void _show_fbtc_step(struct rtw89_dev *rtwdev, struct seq_file *m)
if (!pcinfo->valid)
return;
- pstep = &pfwinfo->rpt_fbtc_step.finfo;
+ pstep = &pfwinfo->rpt_fbtc_step.finfo.v2;
pos_old = le16_to_cpu(pstep->pos_old);
pos_new = le16_to_cpu(pstep->pos_new);
@@ -6589,10 +6939,67 @@ static void _show_fbtc_step(struct rtw89_dev *rtwdev, struct seq_file *m)
} while (!outloop);
}
+static void _show_fbtc_step_v3(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ struct rtw89_btc_fbtc_steps_v3 *pstep;
+ u32 i, n_begin, n_end, array_idx, cnt = 0;
+ u8 type, val;
+ u16 diff_t;
+
+ if ((pfwinfo->rpt_en_map &
+ rtw89_btc_fw_rpt_ver(rtwdev, RPT_EN_FW_STEP_INFO)) == 0)
+ return;
+
+ pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pstep = &pfwinfo->rpt_fbtc_step.finfo.v3;
+ if (pcinfo->req_fver != pstep->fver)
+ return;
+
+ if (le32_to_cpu(pstep->cnt) <= FCXDEF_STEP)
+ n_begin = 1;
+ else
+ n_begin = le32_to_cpu(pstep->cnt) - FCXDEF_STEP + 1;
+
+ n_end = le32_to_cpu(pstep->cnt);
+
+ if (n_begin > n_end)
+ return;
+
+ /* restore step info by using ring instead of FIFO */
+ for (i = n_begin; i <= n_end; i++) {
+ array_idx = (i - 1) % FCXDEF_STEP;
+ type = pstep->step[array_idx].type;
+ val = pstep->step[array_idx].val;
+ diff_t = le16_to_cpu(pstep->step[array_idx].difft);
+
+ if (type == CXSTEP_NONE || type >= CXSTEP_MAX)
+ continue;
+
+ if (cnt % 10 == 0)
+ seq_printf(m, " %-15s : ", "[steps]");
+
+ seq_printf(m, "-> %s(%02d)",
+ (type == CXSTEP_SLOT ?
+ id_to_slot((u32)val) :
+ id_to_evt((u32)val)), diff_t);
+
+ if (cnt % 10 == 9)
+ seq_puts(m, "\n");
+
+ cnt++;
+ }
+}
+
static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_DM))
return;
@@ -6601,13 +7008,20 @@ static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_fbtc_tdma(rtwdev, m);
_show_fbtc_slots(rtwdev, m);
- if (chip->chip_id == RTL8852A)
- _show_fbtc_cysta(rtwdev, m);
- else
- _show_fbtc_cysta_v1(rtwdev, m);
+ if (ver->fcxcysta == 2)
+ _show_fbtc_cysta_v2(rtwdev, m);
+ else if (ver->fcxcysta == 3)
+ _show_fbtc_cysta_v3(rtwdev, m);
+ else if (ver->fcxcysta == 4)
+ _show_fbtc_cysta_v4(rtwdev, m);
_show_fbtc_nullsta(rtwdev, m);
- _show_fbtc_step(rtwdev, m);
+
+ if (ver->fcxstep == 2)
+ _show_fbtc_step_v2(rtwdev, m);
+ else if (ver->fcxstep == 3)
+ _show_fbtc_step_v3(rtwdev, m);
+
}
static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt_cfg)
@@ -6680,10 +7094,7 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
/* To avoid I/O if WL LPS or power-off */
if (!wl->status.map.lps && !wl->status.map.rf_off) {
- if (chip->chip_id == RTL8852A)
- btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
- else if (chip->chip_id == RTL8852C)
- btc->dm.pta_owner = 0;
+ btc->dm.pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
_get_gnt(rtwdev, &gnt_cfg);
gnt = gnt_cfg.band[0];
@@ -6729,6 +7140,9 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
if (cnt % 6 == 5)
seq_puts(m, "\n");
cnt++;
+
+ if (i >= pmreg->reg_num)
+ seq_puts(m, "\n");
}
pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
@@ -6754,12 +7168,12 @@ static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_puts(m, "\n");
}
-static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m)
+static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
- struct rtw89_btc_fbtc_rpt_ctrl *prptctrl = NULL;
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 *prptctrl = NULL;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_wl_info *wl = &cx->wl;
@@ -6774,7 +7188,7 @@ static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m)
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
- prptctrl = &pfwinfo->rpt_ctrl.finfo;
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v1;
seq_printf(m,
" %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
@@ -6858,11 +7272,11 @@ static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
-static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
+static void _show_summary_v4(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
- struct rtw89_btc_fbtc_rpt_ctrl_v1 *prptctrl;
+ struct rtw89_btc_fbtc_rpt_ctrl_v4 *prptctrl;
struct rtw89_btc_rpt_cmn_info *pcinfo;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -6878,7 +7292,7 @@ static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
- prptctrl = &pfwinfo->rpt_ctrl.finfo_v1;
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v4;
seq_printf(m,
" %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
@@ -6970,11 +7384,127 @@ static void _show_summary_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
+static void _show_summary_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_fbtc_rpt_ctrl_v5 *prptctrl;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_puts(m, "========== [Statistics] ==========\n");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v5;
+
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d), ",
+ "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c),
+ pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h));
+
+ seq_printf(m,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]",
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ seq_printf(m,
+ "A2DP_empty:%d(stop:%d, tx:%d, ack:%d, nack:%d)\n",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ seq_printf(m,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ seq_printf(m,
+ ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ seq_printf(m,
+ ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ } else {
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h);
+ }
+
+ if (!pcinfo->valid || pfwinfo->len_mismch || pfwinfo->fver_mismch ||
+ pfwinfo->err[BTFRE_EXCEPTION]) {
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : WL FW rpt error!![rpt_ctrl_valid:%d/len:"
+ "0x%x/ver:0x%x/ex:%d/lps=%d/rf_off=%d]",
+ "[ERROR]", pcinfo->valid, pfwinfo->len_mismch,
+ pfwinfo->fver_mismch, pfwinfo->err[BTFRE_EXCEPTION],
+ wl->status.map.lps, wl->status.map.rf_off);
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m,
+ "timer=%d, control=%d, customerize=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
+}
+
void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_bt_info *bt = &cx->bt;
@@ -7003,8 +7533,41 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_dm_info(rtwdev, m);
_show_fw_dm_msg(rtwdev, m);
_show_mreg(rtwdev, m);
- if (chip->chip_id == RTL8852A)
- _show_summary(rtwdev, m);
- else
+ if (ver->fcxbtcrpt == 1)
_show_summary_v1(rtwdev, m);
+ else if (ver->fcxbtcrpt == 4)
+ _show_summary_v4(rtwdev, m);
+ else if (ver->fcxbtcrpt == 5)
+ _show_summary_v5(rtwdev, m);
+}
+
+void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *btc_ver_def;
+ const struct rtw89_fw_suit *fw_suit;
+ u32 suit_ver_code;
+ int i;
+
+ fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
+ suit_ver_code = RTW89_FW_SUIT_VER_CODE(fw_suit);
+
+ for (i = 0; i < ARRAY_SIZE(rtw89_btc_ver_defs); i++) {
+ btc_ver_def = &rtw89_btc_ver_defs[i];
+
+ if (chip->chip_id != btc_ver_def->chip_id)
+ continue;
+
+ if (suit_ver_code >= btc_ver_def->fw_ver_code) {
+ btc->ver = btc_ver_def;
+ goto out;
+ }
+ }
+
+ btc->ver = &rtw89_btc_ver_defs[RTW89_DEFAULT_BTC_VER_IDX];
+
+out:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC] use version def[%d] = 0x%08x\n",
+ (int)(btc->ver - rtw89_btc_ver_defs), btc->ver->fw_ver_code);
}
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index ca16afa97ec0..401fb55df82b 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -164,6 +164,7 @@ void rtw89_coex_rfk_chk_work(struct work_struct *work);
void rtw89_coex_power_on(struct rtw89_dev *rtwdev);
void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type);
void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type);
+void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev);
static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx,
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index 931aff8b5dc9..f09361bc4a4d 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -498,7 +498,8 @@ static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u16 lowest_rate;
- if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE || vif->p2p)
+ if (tx_info->flags & IEEE80211_TX_CTL_NO_CCK_RATE ||
+ (vif && vif->p2p))
lowest_rate = RTW89_HW_RATE_OFDM6;
else if (chan->band_type == RTW89_BAND_2G)
lowest_rate = RTW89_HW_RATE_CCK1;
@@ -511,6 +512,21 @@ static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
return __ffs(vif->bss_conf.basic_rates) + lowest_rate;
}
+static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct ieee80211_sta *sta = tx_req->sta;
+ struct rtw89_sta *rtwsta;
+
+ if (!sta)
+ return rtwvif->mac_id;
+
+ rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ return rtwsta->mac_id;
+}
+
static void
rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
@@ -527,6 +543,7 @@ rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
desc_info->qsel = qsel;
desc_info->ch_dma = ch_dma;
desc_info->port = desc_info->hiq ? rtwvif->port : 0;
+ desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
desc_info->hw_ssn_sel = RTW89_MGMT_HW_SSN_SEL;
desc_info->hw_seq_mode = RTW89_MGMT_HW_SEQ_MODE;
@@ -669,27 +686,14 @@ desc_bk:
desc_info->bk = true;
}
-static u8 rtw89_core_tx_get_mac_id(struct rtw89_dev *rtwdev,
- struct rtw89_core_tx_request *tx_req)
-{
- struct ieee80211_vif *vif = tx_req->vif;
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
- struct ieee80211_sta *sta = tx_req->sta;
- struct rtw89_sta *rtwsta;
-
- if (!sta)
- return rtwvif->mac_id;
-
- rtwsta = (struct rtw89_sta *)sta->drv_priv;
- return rtwsta->mac_id;
-}
-
static void
rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
struct ieee80211_vif *vif = tx_req->vif;
+ struct ieee80211_sta *sta = tx_req->sta;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
@@ -707,6 +711,7 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
desc_info->qsel = qsel;
desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
desc_info->port = desc_info->hiq ? rtwvif->port : 0;
+ desc_info->er_cap = rtwsta ? rtwsta->er_cap : false;
/* enable wd_info for AMPDU */
desc_info->en_wd_info = true;
@@ -1006,7 +1011,9 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
- FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
+ FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DATA_ER, desc_info->er_cap) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DATA_BW_ER, 0);
return cpu_to_le32(dword);
}
@@ -1757,7 +1764,8 @@ static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
RTW89_CHK_FW_FEATURE(NO_DEEP_PS, &rtwdev->fw))
return RTW89_PS_MODE_NONE;
- if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED))
+ if ((chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED)) &&
+ !RTW89_CHK_FW_FEATURE(NO_LPS_PG, &rtwdev->fw))
return RTW89_PS_MODE_PWR_GATED;
if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_CLK_GATED))
@@ -2199,8 +2207,9 @@ static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
- if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
- rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT)
+ if ((rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION &&
+ rtwvif->wifi_role != RTW89_WIFI_ROLE_P2P_CLIENT) ||
+ rtwvif->tdls_peer)
return;
if (rtwvif->stats.tx_tfc_lv == RTW89_TFC_IDLE &&
@@ -2426,6 +2435,7 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
int i;
+ int ret;
rtwsta->rtwdev = rtwdev;
rtwsta->rtwvif = rtwvif;
@@ -2450,6 +2460,21 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
RTW89_MAX_MAC_ID_NUM);
if (rtwsta->mac_id == RTW89_MAX_MAC_ID_NUM)
return -ENOSPC;
+
+ ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
+ if (ret) {
+ rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
+ RTW89_ROLE_CREATE);
+ if (ret) {
+ rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
}
return 0;
@@ -2459,9 +2484,12 @@ int rtw89_core_sta_disassoc(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
rtwdev->total_sta_assoc--;
+ if (sta->tdls)
+ rtwvif->tdls_peer--;
rtwsta->disassoc = true;
return 0;
@@ -2484,8 +2512,10 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
if (sta->tdls)
rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
+ if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
rtw89_vif_type_mapping(vif, false);
+ rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, true);
+ }
ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
if (ret) {
@@ -2499,14 +2529,6 @@ int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
return ret;
}
- if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
- ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_REMOVE);
- if (ret) {
- rtw89_warn(rtwdev, "failed to send h2c role info\n");
- return ret;
- }
- }
-
/* update cam aid mac_id net_type */
ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
if (ret) {
@@ -2527,18 +2549,6 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
int ret;
if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
- ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
- if (ret) {
- rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
- return ret;
- }
-
- ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_CREATE);
- if (ret) {
- rtw89_warn(rtwdev, "failed to send h2c role info\n");
- return ret;
- }
-
if (sta->tdls) {
ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif, bssid_cam, sta->addr);
if (ret) {
@@ -2573,22 +2583,30 @@ int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
return ret;
}
- ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwsta->mac_id);
- if (ret) {
- rtw89_warn(rtwdev, "failed to send h2c general packet\n");
- return ret;
- }
-
rtwdev->total_sta_assoc++;
+ if (sta->tdls)
+ rtwvif->tdls_peer++;
rtw89_phy_ra_assoc(rtwdev, sta);
rtw89_mac_bf_assoc(rtwdev, vif, sta);
rtw89_mac_bf_monitor_calc(rtwdev, sta, false);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+ if (bss_conf->he_support &&
+ !(bss_conf->he_oper.params & IEEE80211_HE_OPERATION_ER_SU_DISABLE))
+ rtwsta->er_cap = true;
+
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_END);
rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template);
rtw89_phy_ul_tb_assoc(rtwdev, rtwvif);
+
+ ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif, rtwsta->mac_id);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c general packet\n");
+ return ret;
+ }
}
return ret;
@@ -2600,13 +2618,22 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ int ret;
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_DIS_CONN);
- else if (vif->type == NL80211_IFTYPE_AP || sta->tdls)
+ else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
+ RTW89_ROLE_REMOVE);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c role info\n");
+ return ret;
+ }
+ }
+
return 0;
}
@@ -3113,7 +3140,6 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
continue;
INIT_LIST_HEAD(&rtwdev->scan_info.pkt_list[band]);
}
- INIT_LIST_HEAD(&rtwdev->wow.pkt_list);
INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
@@ -3124,6 +3150,8 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
INIT_DELAYED_WORK(&rtwdev->forbid_ba_work, rtw89_forbid_ba_work);
rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
+ if (!rtwdev->txq_wq)
+ return -ENOMEM;
spin_lock_init(&rtwdev->ba_lock);
spin_lock_init(&rtwdev->rpwm_lock);
mutex_init(&rtwdev->mutex);
@@ -3138,7 +3166,6 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
rtw89_core_ppdu_sts_init(rtwdev);
rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
- rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR;
INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
@@ -3149,6 +3176,7 @@ int rtw89_core_init(struct rtw89_dev *rtwdev)
ret = rtw89_load_firmware(rtwdev);
if (ret) {
rtw89_warn(rtwdev, "no firmware loaded\n");
+ destroy_workqueue(rtwdev->txq_wq);
return ret;
}
rtw89_ser_init(rtwdev);
@@ -3293,6 +3321,8 @@ int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
if (ret)
return ret;
+ rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
+
return 0;
}
EXPORT_SYMBOL(rtw89_chip_info_setup);
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 2badb96d2ae3..41365ffb7e5e 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -816,6 +816,7 @@ struct rtw89_tx_desc_info {
#define RTW89_MGMT_HW_SEQ_MODE 1
bool hiq;
u8 port;
+ bool er_cap;
};
struct rtw89_core_tx_request {
@@ -1263,6 +1264,7 @@ union rtw89_btc_bt_state_map {
#define BTC_BT_RSSI_THMAX 4
#define BTC_BT_AFH_GROUP 12
+#define BTC_BT_AFH_LE_GROUP 5
struct rtw89_btc_bt_link_info {
struct rtw89_btc_u8_sta_chg profile_cnt;
@@ -1278,6 +1280,7 @@ struct rtw89_btc_bt_link_info {
u8 golden_rx_shift[BTC_PROFILE_MAX];
u8 rssi_state[BTC_BT_RSSI_THMAX];
u8 afh_map[BTC_BT_AFH_GROUP];
+ u8 afh_map_le[BTC_BT_AFH_LE_GROUP];
u32 role_sw: 1;
u32 slave_role: 1;
@@ -1437,7 +1440,7 @@ struct rtw89_btc_cx {
};
struct rtw89_btc_fbtc_tdma {
- u8 type; /* chip_info::fcxtdma_ver */
+ u8 type; /* btc_ver::fcxtdma */
u8 rxflctrl;
u8 txpause;
u8 wtgle_n;
@@ -1447,13 +1450,18 @@ struct rtw89_btc_fbtc_tdma {
u8 option_ctrl;
} __packed;
-struct rtw89_btc_fbtc_tdma_v1 {
- u8 fver; /* chip_info::fcxtdma_ver */
+struct rtw89_btc_fbtc_tdma_v3 {
+ u8 fver; /* btc_ver::fcxtdma */
u8 rsvd;
__le16 rsvd1;
struct rtw89_btc_fbtc_tdma tdma;
} __packed;
+union rtw89_btc_fbtc_tdma_le32 {
+ struct rtw89_btc_fbtc_tdma v1;
+ struct rtw89_btc_fbtc_tdma_v3 v3;
+};
+
#define CXMREG_MAX 30
#define FCXMAX_STEP 255 /*STEP trace record cnt, Max:65535, default:255*/
#define BTC_CYCLE_SLOT_MAX 48 /* must be even number, non-zero */
@@ -1472,8 +1480,8 @@ enum rtw89_btc_bt_sta_counter {
BTC_BCNT_STA_MAX
};
-struct rtw89_btc_fbtc_rpt_ctrl {
- u16 fver; /* chip_info::fcxbtcrpt_ver */
+struct rtw89_btc_fbtc_rpt_ctrl_v1 {
+ u16 fver; /* btc_ver::fcxbtcrpt */
u16 rpt_cnt; /* tmr counters */
u32 wl_fw_coex_ver; /* match which driver's coex version */
u32 wl_fw_cx_offload;
@@ -1504,6 +1512,20 @@ struct rtw89_btc_fbtc_rpt_ctrl_info {
__le32 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_info_v5 {
+ __le32 cx_ver; /* match which driver's coex version */
+ __le32 fw_ver;
+ __le32 en; /* report map */
+
+ __le16 cnt; /* fw report counter */
+ __le16 cnt_c2h; /* fw send c2h counter */
+ __le16 cnt_h2c; /* fw recv h2c counter */
+ __le16 len_c2h; /* The total length of the last C2H */
+
+ __le16 cnt_aoac_rf_on; /* rf-on counter for aoac switch notify */
+ __le16 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */
+} __packed;
+
struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info {
__le32 cx_ver; /* match which driver's coex version */
__le32 cx_offload;
@@ -1525,7 +1547,7 @@ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox {
struct rtw89_btc_fbtc_rpt_ctrl_a2dp_empty a2dp;
} __packed;
-struct rtw89_btc_fbtc_rpt_ctrl_v1 {
+struct rtw89_btc_fbtc_rpt_ctrl_v4 {
u8 fver;
u8 rsvd;
__le16 rsvd1;
@@ -1536,6 +1558,24 @@ struct rtw89_btc_fbtc_rpt_ctrl_v1 {
struct rtw89_mac_ax_gnt gnt_val[RTW89_PHY_MAX];
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_v5 {
+ u8 fver;
+ u8 rsvd;
+ __le16 rsvd1;
+
+ u8 gnt_val[RTW89_PHY_MAX][4];
+ __le16 bt_cnt[BTC_BCNT_STA_MAX];
+
+ struct rtw89_btc_fbtc_rpt_ctrl_info_v5 rpt_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
+} __packed;
+
+union rtw89_btc_fbtc_rpt_ctrl_ver_info {
+ struct rtw89_btc_fbtc_rpt_ctrl_v1 v1;
+ struct rtw89_btc_fbtc_rpt_ctrl_v4 v4;
+ struct rtw89_btc_fbtc_rpt_ctrl_v5 v5;
+};
+
enum rtw89_fbtc_ext_ctrl_type {
CXECTL_OFF = 0x0, /* tdma off */
CXECTL_B2 = 0x1, /* allow B2 (beacon-early) */
@@ -1571,6 +1611,36 @@ enum rtw89_btc_cxst_state {
CXST_MAX = 0x12,
};
+enum rtw89_btc_cxevnt {
+ CXEVNT_TDMA_ENTRY = 0x0,
+ CXEVNT_WL_TMR,
+ CXEVNT_B1_TMR,
+ CXEVNT_B2_TMR,
+ CXEVNT_B3_TMR,
+ CXEVNT_B4_TMR,
+ CXEVNT_W2B_TMR,
+ CXEVNT_B2W_TMR,
+ CXEVNT_BCN_EARLY,
+ CXEVNT_A2DP_EMPTY,
+ CXEVNT_LK_END,
+ CXEVNT_RX_ISR,
+ CXEVNT_RX_FC0,
+ CXEVNT_RX_FC1,
+ CXEVNT_BT_RELINK,
+ CXEVNT_BT_RETRY,
+ CXEVNT_E2G,
+ CXEVNT_E5G,
+ CXEVNT_EBT,
+ CXEVNT_ENULL,
+ CXEVNT_DRV_WLK,
+ CXEVNT_BCN_OK,
+ CXEVNT_BT_CHANGE,
+ CXEVNT_EBT_EXTEND,
+ CXEVNT_E2G_NULL1,
+ CXEVNT_B1FDD_TMR,
+ CXEVNT_MAX
+};
+
enum {
CXBCN_ALL = 0x0,
CXBCN_ALL_OK,
@@ -1604,9 +1674,14 @@ enum { /* STEP TYPE */
CXSTEP_MAX,
};
+enum rtw89_btc_afh_map_type { /*AFH MAP TYPE */
+ RPT_BT_AFH_SEQ_LEGACY = 0x10,
+ RPT_BT_AFH_SEQ_LE = 0x20
+};
+
#define BTC_DBG_MAX1 32
struct rtw89_btc_fbtc_gpio_dbg {
- u8 fver; /* chip_info::fcxgpiodbg_ver */
+ u8 fver; /* btc_ver::fcxgpiodbg */
u8 rsvd;
u16 rsvd2;
u32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */
@@ -1615,7 +1690,7 @@ struct rtw89_btc_fbtc_gpio_dbg {
} __packed;
struct rtw89_btc_fbtc_mreg_val {
- u8 fver; /* chip_info::fcxmreg_ver */
+ u8 fver; /* btc_ver::fcxmreg */
u8 reg_num;
__le16 rsvd;
__le32 mreg_val[CXMREG_MAX];
@@ -1638,7 +1713,7 @@ struct rtw89_btc_fbtc_slot {
} __packed;
struct rtw89_btc_fbtc_slots {
- u8 fver; /* chip_info::fcxslots_ver */
+ u8 fver; /* btc_ver::fcxslots */
u8 tbl_num;
__le16 rsvd;
__le32 update_map;
@@ -1651,8 +1726,8 @@ struct rtw89_btc_fbtc_step {
__le16 difft;
} __packed;
-struct rtw89_btc_fbtc_steps {
- u8 fver; /* chip_info::fcxstep_ver */
+struct rtw89_btc_fbtc_steps_v2 {
+ u8 fver; /* btc_ver::fcxstep */
u8 rsvd;
__le16 cnt;
__le16 pos_old;
@@ -1660,7 +1735,7 @@ struct rtw89_btc_fbtc_steps {
struct rtw89_btc_fbtc_step step[FCXMAX_STEP];
} __packed;
-struct rtw89_btc_fbtc_steps_v1 {
+struct rtw89_btc_fbtc_steps_v3 {
u8 fver;
u8 en;
__le16 rsvd;
@@ -1668,8 +1743,13 @@ struct rtw89_btc_fbtc_steps_v1 {
struct rtw89_btc_fbtc_step step[FCXMAX_STEP];
} __packed;
-struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
- u8 fver; /* chip_info::fcxcysta_ver */
+union rtw89_btc_fbtc_steps_info {
+ struct rtw89_btc_fbtc_steps_v2 v2;
+ struct rtw89_btc_fbtc_steps_v3 v3;
+};
+
+struct rtw89_btc_fbtc_cysta_v2 { /* statistics for cycles */
+ u8 fver; /* btc_ver::fcxcysta */
u8 rsvd;
__le16 cycles; /* total cycle number */
__le16 cycles_a2dp[CXT_FLCTRL_MAX];
@@ -1717,6 +1797,17 @@ struct rtw89_btc_fbtc_a2dp_trx_stat {
u8 rsvd2;
} __packed;
+struct rtw89_btc_fbtc_a2dp_trx_stat_v4 {
+ u8 empty_cnt;
+ u8 retry_cnt;
+ u8 tx_rate;
+ u8 tx_cnt;
+ u8 ack_cnt;
+ u8 nack_cnt;
+ u8 no_empty_cnt;
+ u8 rsvd;
+} __packed;
+
struct rtw89_btc_fbtc_cycle_a2dp_empty_info {
__le16 cnt; /* a2dp empty cnt */
__le16 cnt_timeout; /* a2dp empty timeout cnt*/
@@ -1730,7 +1821,35 @@ struct rtw89_btc_fbtc_cycle_leak_info {
__le16 tmax; /* max leak-slot time */
} __packed;
-struct rtw89_btc_fbtc_cysta_v1 { /* statistics for cycles */
+#define RTW89_BTC_FDDT_PHASE_CYCLE GENMASK(9, 0)
+#define RTW89_BTC_FDDT_TRAIN_STEP GENMASK(15, 10)
+
+struct rtw89_btc_fbtc_cycle_fddt_info {
+ __le16 train_cycle;
+ __le16 tp;
+
+ s8 tx_power; /* absolute Tx power (dBm), 0xff-> no BTC control */
+ s8 bt_tx_power; /* decrease Tx power (dB) */
+ s8 bt_rx_gain; /* LNA constrain level */
+ u8 no_empty_cnt;
+
+ u8 rssi; /* [7:4] -> bt_rssi_level, [3:0]-> wl_rssi_level */
+ u8 cn; /* condition_num */
+ u8 train_status; /* [7:4]-> train-state, [3:0]-> train-phase */
+ u8 train_result; /* refer to enum btc_fddt_check_map */
+} __packed;
+
+#define RTW89_BTC_FDDT_CELL_TRAIN_STATE GENMASK(3, 0)
+#define RTW89_BTC_FDDT_CELL_TRAIN_PHASE GENMASK(7, 4)
+
+struct rtw89_btc_fbtc_fddt_cell_status {
+ s8 wl_tx_pwr;
+ s8 bt_tx_pwr;
+ s8 bt_rx_gain;
+ u8 state_phase; /* [0:3] train state, [4:7] train phase */
+} __packed;
+
+struct rtw89_btc_fbtc_cysta_v3 { /* statistics for cycles */
u8 fver;
u8 rsvd;
__le16 cycles; /* total cycle number */
@@ -1748,8 +1867,41 @@ struct rtw89_btc_fbtc_cysta_v1 { /* statistics for cycles */
__le32 except_map;
} __packed;
-struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */
- u8 fver; /* chip_info::fcxnullsta_ver */
+#define FDD_TRAIN_WL_DIRECTION 2
+#define FDD_TRAIN_WL_RSSI_LEVEL 5
+#define FDD_TRAIN_BT_RSSI_LEVEL 5
+
+struct rtw89_btc_fbtc_cysta_v4 { /* statistics for cycles */
+ u8 fver;
+ u8 rsvd;
+ u8 collision_cnt; /* counter for event/timer occur at the same time */
+ u8 except_cnt;
+
+ __le16 skip_cnt;
+ __le16 cycles; /* total cycle number */
+
+ __le16 slot_step_time[BTC_CYCLE_SLOT_MAX]; /* record the wl/bt slot time */
+ __le16 slot_cnt[CXST_MAX]; /* slot count */
+ __le16 bcn_cnt[CXBCN_MAX];
+ struct rtw89_btc_fbtc_cycle_time_info cycle_time;
+ struct rtw89_btc_fbtc_cycle_leak_info leak_slot;
+ struct rtw89_btc_fbtc_cycle_a2dp_empty_info a2dp_ept;
+ struct rtw89_btc_fbtc_a2dp_trx_stat_v4 a2dp_trx[BTC_CYCLE_SLOT_MAX];
+ struct rtw89_btc_fbtc_cycle_fddt_info fddt_trx[BTC_CYCLE_SLOT_MAX];
+ struct rtw89_btc_fbtc_fddt_cell_status fddt_cells[FDD_TRAIN_WL_DIRECTION]
+ [FDD_TRAIN_WL_RSSI_LEVEL]
+ [FDD_TRAIN_BT_RSSI_LEVEL];
+ __le32 except_map;
+} __packed;
+
+union rtw89_btc_fbtc_cysta_info {
+ struct rtw89_btc_fbtc_cysta_v2 v2;
+ struct rtw89_btc_fbtc_cysta_v3 v3;
+ struct rtw89_btc_fbtc_cysta_v4 v4;
+};
+
+struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */
+ u8 fver; /* btc_ver::fcxnullsta */
u8 rsvd;
__le16 rsvd2;
__le32 max_t[2]; /* max_t for 0:null0/1:null1 */
@@ -1757,8 +1909,8 @@ struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */
__le32 result[2][4]; /* 0:fail, 1:ok, 2:on_time, 3:retry */
} __packed;
-struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */
- u8 fver; /* chip_info::fcxnullsta_ver */
+struct rtw89_btc_fbtc_cynullsta_v2 { /* cycle null statistics */
+ u8 fver; /* btc_ver::fcxnullsta */
u8 rsvd;
__le16 rsvd2;
__le32 max_t[2]; /* max_t for 0:null0/1:null1 */
@@ -1766,8 +1918,13 @@ struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */
__le32 result[2][5]; /* 0:fail, 1:ok, 2:on_time, 3:retry, 4:tx */
} __packed;
+union rtw89_btc_fbtc_cynullsta_info {
+ struct rtw89_btc_fbtc_cynullsta_v1 v1; /* info from fw */
+ struct rtw89_btc_fbtc_cynullsta_v2 v2;
+};
+
struct rtw89_btc_fbtc_btver {
- u8 fver; /* chip_info::fcxbtver_ver */
+ u8 fver; /* btc_ver::fcxbtver */
u8 rsvd;
__le16 rsvd2;
__le32 coex_ver; /*bit[15:8]->shared, bit[7:0]->non-shared */
@@ -1776,14 +1933,14 @@ struct rtw89_btc_fbtc_btver {
} __packed;
struct rtw89_btc_fbtc_btscan {
- u8 fver; /* chip_info::fcxbtscan_ver */
+ u8 fver; /* btc_ver::fcxbtscan */
u8 rsvd;
__le16 rsvd2;
u8 scan[6];
} __packed;
struct rtw89_btc_fbtc_btafh {
- u8 fver; /* chip_info::fcxbtafh_ver */
+ u8 fver; /* btc_ver::fcxbtafh */
u8 rsvd;
__le16 rsvd2;
u8 afh_l[4]; /*bit0:2402, bit1: 2403.... bit31:2433 */
@@ -1791,8 +1948,20 @@ struct rtw89_btc_fbtc_btafh {
u8 afh_h[4]; /*bit0:2466, bit1:2467......bit14:2480 */
} __packed;
+struct rtw89_btc_fbtc_btafh_v2 {
+ u8 fver; /* btc_ver::fcxbtafh */
+ u8 rsvd;
+ u8 rsvd2;
+ u8 map_type;
+ u8 afh_l[4];
+ u8 afh_m[4];
+ u8 afh_h[4];
+ u8 afh_le_a[4];
+ u8 afh_le_b[4];
+} __packed;
+
struct rtw89_btc_fbtc_btdevinfo {
- u8 fver; /* chip_info::fcxbtdevinfo_ver */
+ u8 fver; /* btc_ver::fcxbtdevinfo */
u8 rsvd;
__le16 vendor_id;
__le32 dev_name; /* only 24 bits valid */
@@ -1911,20 +2080,19 @@ struct rtw89_btc_rpt_cmn_info {
u8 valid;
} __packed;
+union rtw89_btc_fbtc_btafh_info {
+ struct rtw89_btc_fbtc_btafh v1;
+ struct rtw89_btc_fbtc_btafh_v2 v2;
+};
+
struct rtw89_btc_report_ctrl_state {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- union {
- struct rtw89_btc_fbtc_rpt_ctrl finfo; /* info from fw for 52A*/
- struct rtw89_btc_fbtc_rpt_ctrl_v1 finfo_v1; /* info from fw for 52C*/
- };
+ union rtw89_btc_fbtc_rpt_ctrl_ver_info finfo;
};
struct rtw89_btc_rpt_fbtc_tdma {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- union {
- struct rtw89_btc_fbtc_tdma finfo; /* info from fw */
- struct rtw89_btc_fbtc_tdma_v1 finfo_v1; /* info from fw for 52C*/
- };
+ union rtw89_btc_fbtc_tdma_le32 finfo;
};
struct rtw89_btc_rpt_fbtc_slots {
@@ -1934,26 +2102,17 @@ struct rtw89_btc_rpt_fbtc_slots {
struct rtw89_btc_rpt_fbtc_cysta {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- union {
- struct rtw89_btc_fbtc_cysta finfo; /* info from fw for 52A*/
- struct rtw89_btc_fbtc_cysta_v1 finfo_v1; /* info from fw for 52C*/
- };
+ union rtw89_btc_fbtc_cysta_info finfo;
};
struct rtw89_btc_rpt_fbtc_step {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- union {
- struct rtw89_btc_fbtc_steps finfo; /* info from fw */
- struct rtw89_btc_fbtc_steps_v1 finfo_v1; /* info from fw */
- };
+ union rtw89_btc_fbtc_steps_info finfo; /* info from fw */
};
struct rtw89_btc_rpt_fbtc_nullsta {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- union {
- struct rtw89_btc_fbtc_cynullsta finfo; /* info from fw */
- struct rtw89_btc_fbtc_cynullsta_v1 finfo_v1; /* info from fw */
- };
+ union rtw89_btc_fbtc_cynullsta_info finfo;
};
struct rtw89_btc_rpt_fbtc_mreg {
@@ -1978,7 +2137,7 @@ struct rtw89_btc_rpt_fbtc_btscan {
struct rtw89_btc_rpt_fbtc_btafh {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_btafh finfo; /* info from fw */
+ union rtw89_btc_fbtc_btafh_info finfo;
};
struct rtw89_btc_rpt_fbtc_btdev {
@@ -2018,9 +2177,35 @@ struct rtw89_btc_btf_fwinfo {
struct rtw89_btc_rpt_fbtc_btdev rpt_fbtc_btdev;
};
+struct rtw89_btc_ver {
+ enum rtw89_core_chip_id chip_id;
+ u32 fw_ver_code;
+
+ u8 fcxbtcrpt;
+ u8 fcxtdma;
+ u8 fcxslots;
+ u8 fcxcysta;
+ u8 fcxstep;
+ u8 fcxnullsta;
+ u8 fcxmreg;
+ u8 fcxgpiodbg;
+ u8 fcxbtver;
+ u8 fcxbtscan;
+ u8 fcxbtafh;
+ u8 fcxbtdevinfo;
+ u8 fwlrole;
+ u8 frptmap;
+ u8 fcxctrl;
+
+ u16 info_buf;
+ u8 max_role_num;
+};
+
#define RTW89_BTC_POLICY_MAXLEN 512
struct rtw89_btc {
+ const struct rtw89_btc_ver *ver;
+
struct rtw89_btc_cx cx;
struct rtw89_btc_dm dm;
struct rtw89_btc_ctrl ctrl;
@@ -2194,6 +2379,7 @@ struct rtw89_sec_cam_entry {
struct rtw89_sta {
u8 mac_id;
bool disassoc;
+ bool er_cap;
struct rtw89_dev *rtwdev;
struct rtw89_vif *rtwvif;
struct rtw89_ra_info ra;
@@ -2266,6 +2452,7 @@ struct rtw89_vif {
bool last_a_ctrl;
bool dyn_tb_bedge_en;
u8 def_tri_idx;
+ u32 tdls_peer;
struct work_struct update_beacon_work;
struct rtw89_addr_cam_entry addr_cam;
struct rtw89_bssid_cam_entry bssid_cam;
@@ -2274,6 +2461,7 @@ struct rtw89_vif {
struct rtw89_phy_rate_pattern rate_pattern;
struct cfg80211_scan_request *scan_req;
struct ieee80211_scan_ies *scan_ies;
+ struct list_head general_pkt_list;
};
enum rtw89_lv1_rcvy_step {
@@ -2660,6 +2848,7 @@ struct rtw89_chip_info {
enum rtw89_core_chip_id chip_id;
const struct rtw89_chip_ops *ops;
const char *fw_name;
+ bool try_ce_fw;
u32 fifo_size;
u32 dle_scc_rsvd_size;
u16 max_amsdu_limit;
@@ -2728,20 +2917,6 @@ struct rtw89_chip_info {
u8 btcx_desired;
u8 scbd;
u8 mailbox;
- u16 btc_fwinfo_buf;
-
- u8 fcxbtcrpt_ver;
- u8 fcxtdma_ver;
- u8 fcxslots_ver;
- u8 fcxcysta_ver;
- u8 fcxstep_ver;
- u8 fcxnullsta_ver;
- u8 fcxmreg_ver;
- u8 fcxgpiodbg_ver;
- u8 fcxbtver_ver;
- u8 fcxbtscan_ver;
- u8 fcxbtafh_ver;
- u8 fcxbtdevinfo_ver;
u8 afh_guard_ch;
const u8 *wl_rssi_thres;
@@ -2771,6 +2946,7 @@ struct rtw89_chip_info {
u8 dcfo_comp_sft;
const struct rtw89_imr_info *imr_info;
const struct rtw89_rrsr_cfgs *rrsr_cfgs;
+ u32 bss_clr_map_reg;
u32 dma_ch_mask;
const struct wiphy_wowlan_support *wowlan_stub;
};
@@ -2839,6 +3015,7 @@ static inline void rtw89_init_wait(struct rtw89_wait_info *wait)
enum rtw89_fw_type {
RTW89_FW_NORMAL = 1,
RTW89_FW_WOWLAN = 3,
+ RTW89_FW_NORMAL_CE = 5,
};
enum rtw89_fw_feature {
@@ -2848,6 +3025,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_CRASH_TRIGGER,
RTW89_FW_FEATURE_PACKET_DROP,
RTW89_FW_FEATURE_NO_DEEP_PS,
+ RTW89_FW_FEATURE_NO_LPS_PG,
};
struct rtw89_fw_suit {
@@ -3550,7 +3728,6 @@ struct rtw89_wow_param {
DECLARE_BITMAP(flags, RTW89_WOW_FLAG_NUM);
struct rtw89_wow_cam_info patterns[RTW89_MAX_PATTERN_NUM];
u8 pattern_cnt;
- struct list_head pkt_list;
};
struct rtw89_mcc_info {
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index 8297e35bfa52..0e0e1483c099 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -615,6 +615,7 @@ rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
struct seq_file *m = (struct seq_file *)filp->private_data;
struct rtw89_debugfs_priv *debugfs_priv = m->private;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
char buf[32];
size_t buf_size;
int sel;
@@ -634,6 +635,12 @@ rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
return -EINVAL;
}
+ if (sel == RTW89_DBG_SEL_MAC_30 && chip->chip_id != RTL8852C) {
+ rtw89_info(rtwdev, "sel %d is address hole on chip %d\n", sel,
+ chip->chip_id);
+ return -EINVAL;
+ }
+
debugfs_priv->cb_data = sel;
rtw89_info(rtwdev, "select mac page dump %d\n", debugfs_priv->cb_data);
@@ -3347,6 +3354,31 @@ static void rtw89_dump_addr_cam(struct seq_file *m,
}
}
+__printf(3, 4)
+static void rtw89_dump_pkt_offload(struct seq_file *m, struct list_head *pkt_list,
+ const char *fmt, ...)
+{
+ struct rtw89_pktofld_info *info;
+ struct va_format vaf;
+ va_list args;
+
+ if (list_empty(pkt_list))
+ return;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ vaf.fmt = fmt;
+
+ seq_printf(m, "%pV", &vaf);
+
+ va_end(args);
+
+ list_for_each_entry(info, pkt_list, list)
+ seq_printf(m, "%d ", info->id);
+
+ seq_puts(m, "\n");
+}
+
static
void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
@@ -3357,6 +3389,7 @@ void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
seq_printf(m, "VIF [%d] %pM\n", rtwvif->mac_id, rtwvif->mac_addr);
seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
rtw89_dump_addr_cam(m, &rtwvif->addr_cam);
+ rtw89_dump_pkt_offload(m, &rtwvif->general_pkt_list, "\tpkt_ofld[GENERAL]: ");
}
static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_sta *rtwsta)
@@ -3395,6 +3428,7 @@ static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
struct rtw89_debugfs_priv *debugfs_priv = m->private;
struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ u8 idx;
mutex_lock(&rtwdev->mutex);
@@ -3409,6 +3443,15 @@ static int rtw89_debug_priv_stations_get(struct seq_file *m, void *v)
cam_info->sec_cam_map);
seq_printf(m, "\tba_cam: %*ph\n", (int)sizeof(cam_info->ba_cam_map),
cam_info->ba_cam_map);
+ seq_printf(m, "\tpkt_ofld: %*ph\n", (int)sizeof(rtwdev->pkt_offload),
+ rtwdev->pkt_offload);
+
+ for (idx = NL80211_BAND_2GHZ; idx < NUM_NL80211_BANDS; idx++) {
+ if (!(rtwdev->chip->support_bands & BIT(idx)))
+ continue;
+ rtw89_dump_pkt_offload(m, &rtwdev->scan_info.pkt_list[idx],
+ "\t\t[SCAN %u]: ", idx);
+ }
ieee80211_iterate_active_interfaces_atomic(rtwdev->hw,
IEEE80211_IFACE_ITER_NORMAL, rtw89_vif_ids_get_iter, m);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
index d1de5e600836..079269bb5251 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.h
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -28,6 +28,7 @@ enum rtw89_debug_mask {
RTW89_DBG_STATE = BIT(17),
RTW89_DBG_WOW = BIT(18),
RTW89_DBG_UL_TB = BIT(19),
+ RTW89_DBG_CHAN = BIT(20),
RTW89_DBG_UNEXP = BIT(31),
};
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index de1f23779fc6..0b73dc2e9ad7 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -10,6 +10,7 @@
#include "mac.h"
#include "phy.h"
#include "reg.h"
+#include "util.h"
static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
struct sk_buff *skb);
@@ -91,6 +92,7 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
const u8 *fwdynhdr;
const u8 *bin;
u32 base_hdr_len;
+ u32 mssc_len = 0;
u32 i;
if (!info)
@@ -120,6 +122,14 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
fw += RTW89_FW_HDR_SIZE;
section_info = info->section_info;
for (i = 0; i < info->section_num; i++) {
+ section_info->type = GET_FWSECTION_HDR_SECTIONTYPE(fw);
+ if (section_info->type == FWDL_SECURITY_SECTION_TYPE) {
+ section_info->mssc = GET_FWSECTION_HDR_MSSC(fw);
+ mssc_len += section_info->mssc * FWDL_SECURITY_SIGLEN;
+ } else {
+ section_info->mssc = 0;
+ }
+
section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw);
if (GET_FWSECTION_HDR_CHECKSUM(fw))
section_info->len += FWDL_SECTION_CHKSUM_LEN;
@@ -132,7 +142,7 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
section_info++;
}
- if (fw_end != bin) {
+ if (fw_end != bin + mssc_len) {
rtw89_err(rtwdev, "[ERR]fw bin size\n");
return -EINVAL;
}
@@ -142,7 +152,7 @@ static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
static
int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
- struct rtw89_fw_suit *fw_suit)
+ struct rtw89_fw_suit *fw_suit, bool nowarn)
{
struct rtw89_fw_info *fw_info = &rtwdev->fw;
const u8 *mfw = fw_info->firmware->data;
@@ -173,7 +183,8 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
return 0;
}
- rtw89_err(rtwdev, "no suitable firmware found\n");
+ if (!nowarn)
+ rtw89_err(rtwdev, "no suitable firmware found\n");
return -ENOENT;
}
@@ -201,12 +212,13 @@ static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
}
static
-int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
+int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ bool nowarn)
{
struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
int ret;
- ret = rtw89_mfw_recognize(rtwdev, type, fw_suit);
+ ret = rtw89_mfw_recognize(rtwdev, type, fw_suit, nowarn);
if (ret)
return ret;
@@ -245,6 +257,7 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 35, 0, TX_WAKE),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 36, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852A, ge, 0, 13, 38, 0, PACKET_DROP),
+ __CFG_FW_FEAT(RTL8852B, ge, 0, 29, 26, 0, NO_LPS_PG),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 20, 0, PACKET_DROP),
__CFG_FW_FEAT(RTL8852C, le, 0, 27, 33, 0, NO_DEEP_PS),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 34, 0, TX_WAKE),
@@ -332,17 +345,27 @@ out:
int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
int ret;
- ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL);
+ if (chip->try_ce_fw) {
+ ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL_CE, true);
+ if (!ret)
+ goto normal_done;
+ }
+
+ ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL, false);
if (ret)
return ret;
+normal_done:
/* It still works if wowlan firmware isn't existing. */
- __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN);
+ __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN, false);
rtw89_fw_recognize_features(rtwdev);
+ rtw89_coex_recognize_ver(rtwdev);
+
return 0;
}
@@ -902,13 +925,12 @@ fail:
return ret;
}
-static int rtw89_fw_h2c_add_wow_fw_ofld(struct rtw89_dev *rtwdev,
+static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
enum rtw89_fw_pkt_ofld_type type,
u8 *id)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct rtw89_pktofld_info *info;
struct sk_buff *skb;
int ret;
@@ -937,13 +959,13 @@ static int rtw89_fw_h2c_add_wow_fw_ofld(struct rtw89_dev *rtwdev,
if (!skb)
goto err;
- list_add_tail(&info->list, &rtw_wow->pkt_list);
ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, skb);
kfree_skb(skb);
if (ret)
- return ret;
+ goto err;
+ list_add_tail(&info->list, &rtwvif->general_pkt_list);
*id = info->id;
return 0;
@@ -952,13 +974,48 @@ err:
return -ENOMEM;
}
+void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool notify_fw)
+{
+ struct list_head *pkt_list = &rtwvif->general_pkt_list;
+ struct rtw89_pktofld_info *info, *tmp;
+
+ list_for_each_entry_safe(info, tmp, pkt_list, list) {
+ if (notify_fw)
+ rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
+ rtw89_core_release_bit_map(rtwdev->pkt_offload,
+ info->id);
+ list_del(&info->list);
+ kfree(info);
+ }
+}
+
+void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw)
+{
+ struct rtw89_vif *rtwvif;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_fw_release_general_pkt_list_vif(rtwdev, rtwvif, notify_fw);
+}
+
#define H2C_GENERAL_PKT_LEN 6
#define H2C_GENERAL_PKT_ID_UND 0xff
-int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
+int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, u8 macid)
{
+ u8 pkt_id_ps_poll = H2C_GENERAL_PKT_ID_UND;
+ u8 pkt_id_null = H2C_GENERAL_PKT_ID_UND;
+ u8 pkt_id_qos_null = H2C_GENERAL_PKT_ID_UND;
struct sk_buff *skb;
int ret;
+ rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_PS_POLL, &pkt_id_ps_poll);
+ rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id_null);
+ rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_QOS_NULL, &pkt_id_qos_null);
+
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_GENERAL_PKT_LEN);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
@@ -967,9 +1024,9 @@ int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
skb_put(skb, H2C_GENERAL_PKT_LEN);
SET_GENERAL_PKT_MACID(skb->data, macid);
SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
- SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
- SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
- SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
+ SET_GENERAL_PKT_PSPOLL_ID(skb->data, pkt_id_ps_poll);
+ SET_GENERAL_PKT_NULL_ID(skb->data, pkt_id_null);
+ SET_GENERAL_PKT_QOS_NULL_ID(skb->data, pkt_id_qos_null);
SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -1807,33 +1864,36 @@ fail:
#define PORT_DATA_OFFSET 4
#define H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN 12
-#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_PORT_NUM + H2C_LEN_CXDRVHDR)
-#define H2C_LEN_CXDRVINFO_ROLE_V1 (4 + 16 * RTW89_PORT_NUM + \
- H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + \
- H2C_LEN_CXDRVHDR)
+#define H2C_LEN_CXDRVINFO_ROLE_SIZE(max_role_num) \
+ (4 + 12 * (max_role_num) + H2C_LEN_CXDRVHDR)
+
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
struct rtw89_btc_wl_active_role *active = role_info->active_role;
struct sk_buff *skb;
+ u32 len;
u8 offset = 0;
u8 *cmd;
int ret;
int i;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE);
+ len = H2C_LEN_CXDRVINFO_ROLE_SIZE(ver->max_role_num);
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
return -ENOMEM;
}
- skb_put(skb, H2C_LEN_CXDRVINFO_ROLE);
+ skb_put(skb, len);
cmd = skb->data;
RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
- RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE - H2C_LEN_CXDRVHDR);
+ RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
@@ -1870,7 +1930,7 @@ int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, BTFC_SET,
SET_DRV_INFO, 0, 0,
- H2C_LEN_CXDRVINFO_ROLE);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1885,28 +1945,35 @@ fail:
return ret;
}
+#define H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(max_role_num) \
+ (4 + 16 * (max_role_num) + H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN + H2C_LEN_CXDRVHDR)
+
int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info_v1 *role_info = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
struct rtw89_btc_wl_active_role_v1 *active = role_info->active_role_v1;
struct sk_buff *skb;
+ u32 len;
u8 *cmd, offset;
int ret;
int i;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_CXDRVINFO_ROLE_V1);
+ len = H2C_LEN_CXDRVINFO_ROLE_SIZE_V1(ver->max_role_num);
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
return -ENOMEM;
}
- skb_put(skb, H2C_LEN_CXDRVINFO_ROLE_V1);
+ skb_put(skb, len);
cmd = skb->data;
RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
- RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVHDR);
+ RTW89_SET_FWCMD_CXHDR_LEN(cmd, len - H2C_LEN_CXDRVHDR);
RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
@@ -1942,7 +2009,7 @@ int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXROLE_ACT_NOA_DUR(cmd, active->noa_duration, i, offset);
}
- offset = H2C_LEN_CXDRVINFO_ROLE_V1 - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
+ offset = len - H2C_LEN_CXDRVINFO_ROLE_DBCC_LEN;
RTW89_SET_FWCMD_CXROLE_MROLE_TYPE(cmd, role_info->mrole_type, offset);
RTW89_SET_FWCMD_CXROLE_MROLE_NOA(cmd, role_info->mrole_noa_duration, offset);
RTW89_SET_FWCMD_CXROLE_DBCC_EN(cmd, role_info->dbcc_en, offset);
@@ -1953,7 +2020,7 @@ int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev)
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_OUTSRC, BTFC_SET,
SET_DRV_INFO, 0, 0,
- H2C_LEN_CXDRVINFO_ROLE_V1);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -1971,8 +2038,8 @@ fail:
#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
{
- const struct rtw89_chip_info *chip = rtwdev->chip;
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
struct sk_buff *skb;
u8 *cmd;
@@ -1992,7 +2059,7 @@ int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
- if (chip->chip_id == RTL8852A)
+ if (ver->fcxctrl == 0)
RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
@@ -2112,6 +2179,7 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_LEN_PKT_OFLD + skb_ofld->len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for h2c pkt offload\n");
+ rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
return -ENOMEM;
}
skb_put(skb, H2C_LEN_PKT_OFLD);
@@ -2130,6 +2198,7 @@ int rtw89_fw_h2c_add_pkt_offload(struct rtw89_dev *rtwdev, u8 *id,
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
+ rtw89_core_release_bit_map(rtwdev->pkt_offload, alloc_id);
goto fail;
}
@@ -2663,11 +2732,14 @@ static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
goto out;
}
- list_add_tail(&info->list, &scan_info->pkt_list[band]);
ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
- if (ret)
+ if (ret) {
+ kfree_skb(new);
+ kfree(info);
goto out;
+ }
+ list_add_tail(&info->list, &scan_info->pkt_list[band]);
kfree_skb(new);
}
out:
@@ -2738,7 +2810,7 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
if (ssid_num == 1 && req->ssids[0].ssid_len == 0) {
ch_info->tx_pkt = false;
if (!req->duration_mandatory)
- ch_info->period -= RTW89_DWELL_TIME;
+ ch_info->period -= RTW89_DWELL_TIME_6G;
}
}
@@ -2791,7 +2863,8 @@ static int rtw89_hw_scan_add_chan_list(struct rtw89_dev *rtwdev,
if (req->duration_mandatory)
ch_info->period = req->duration;
else if (channel->band == NL80211_BAND_6GHZ)
- ch_info->period = RTW89_CHANNEL_TIME_6G + RTW89_DWELL_TIME;
+ ch_info->period = RTW89_CHANNEL_TIME_6G +
+ RTW89_DWELL_TIME_6G;
else
ch_info->period = RTW89_CHANNEL_TIME;
@@ -3072,8 +3145,9 @@ int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
int ret;
if (enable) {
- ret = rtw89_fw_h2c_add_wow_fw_ofld(rtwdev, rtwvif,
- RTW89_PKT_OFLD_TYPE_NULL_DATA, &pkt_id);
+ ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_NULL_DATA,
+ &pkt_id);
if (ret)
return -EPERM;
}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 4d2f9ea9e002..cae07e325326 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -171,6 +171,8 @@ struct rtw89_fw_hdr_section_info {
const u8 *addr;
u32 len;
u32 dladdr;
+ u32 mssc;
+ u8 type;
};
struct rtw89_fw_bin_info {
@@ -203,6 +205,7 @@ struct rtw89_h2creg_sch_tx_en {
#define RTW89_DFS_CHAN_TIME 105
#define RTW89_OFF_CHAN_TIME 100
#define RTW89_DWELL_TIME 20
+#define RTW89_DWELL_TIME_6G 10
#define RTW89_SCAN_WIDTH 0
#define RTW89_SCANOFLD_MAX_SSID 8
#define RTW89_SCANOFLD_MAX_IE_LEN 512
@@ -480,14 +483,21 @@ static inline void RTW89_SET_EDCA_PARAM(void *cmd, u32 val)
#define FW_EDCA_PARAM_CWMIN_MSK GENMASK(11, 8)
#define FW_EDCA_PARAM_AIFS_MSK GENMASK(7, 0)
+#define FWDL_SECURITY_SECTION_TYPE 9
+#define FWDL_SECURITY_SIGLEN 512
+
+#define GET_FWSECTION_HDR_DL_ADDR(fwhdr) \
+ le32_get_bits(*((const __le32 *)(fwhdr)), GENMASK(31, 0))
+#define GET_FWSECTION_HDR_SECTIONTYPE(fwhdr) \
+ le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(27, 24))
#define GET_FWSECTION_HDR_SEC_SIZE(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(23, 0))
#define GET_FWSECTION_HDR_CHECKSUM(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 1), BIT(28))
#define GET_FWSECTION_HDR_REDL(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 1), BIT(29))
-#define GET_FWSECTION_HDR_DL_ADDR(fwhdr) \
- le32_get_bits(*((const __le32 *)(fwhdr)), GENMASK(31, 0))
+#define GET_FWSECTION_HDR_MSSC(fwhdr) \
+ le32_get_bits(*((const __le32 *)(fwhdr) + 2), GENMASK(31, 0))
#define GET_FW_HDR_MAJOR_VERSION(fwhdr) \
le32_get_bits(*((const __le32 *)(fwhdr) + 1), GENMASK(7, 0))
@@ -3209,16 +3219,16 @@ static inline struct rtw89_fw_c2h_attr *RTW89_SKB_C2H_CB(struct sk_buff *skb)
le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(25, 24))
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_GROUP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(1, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
#define RTW89_GET_MAC_C2H_MCC_RCV_ACK_H2C_FUNC(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
#define RTW89_GET_MAC_C2H_MCC_REQ_ACK_GROUP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(1, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(1, 0))
#define RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_RETURN(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 2))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 2))
#define RTW89_GET_MAC_C2H_MCC_REQ_ACK_H2C_FUNC(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
struct rtw89_mac_mcc_tsf_rpt {
u32 macid_x;
@@ -3232,30 +3242,30 @@ struct rtw89_mac_mcc_tsf_rpt {
static_assert(sizeof(struct rtw89_mac_mcc_tsf_rpt) <= RTW89_COMPLETION_BUF_SIZE);
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_X(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 0))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_MACID_Y(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_GROUP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(17, 16))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(17, 16))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_X(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(31, 0))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_X(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_Y(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 5), GENMASK(31, 0))
#define RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_Y(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 6), GENMASK(31, 0))
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_STATUS(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(5, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(5, 0))
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_GROUP(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(7, 6))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(7, 6))
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_MACID(c2h) \
- le32_get_bits(*((const __le32 *)(c2h)), GENMASK(15, 8))
+ le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(15, 8))
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_LOW(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 1), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 3), GENMASK(31, 0))
#define RTW89_GET_MAC_C2H_MCC_STATUS_RPT_TSF_HIGH(c2h) \
- le32_get_bits(*((const __le32 *)(c2h) + 2), GENMASK(31, 0))
+ le32_get_bits(*((const __le32 *)(c2h) + 4), GENMASK(31, 0))
#define RTW89_FW_HDR_SIZE 32
#define RTW89_FW_SECTION_HDR_SIZE 16
@@ -3508,7 +3518,11 @@ int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len);
void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev);
void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
-int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid);
+int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u8 macid);
+void rtw89_fw_release_general_pkt_list_vif(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool notify_fw);
+void rtw89_fw_release_general_pkt_list(struct rtw89_dev *rtwdev, bool notify_fw);
int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
bool valid, struct ieee80211_ampdu_params *params);
void rtw89_fw_h2c_init_ba_cam_v1(struct rtw89_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index cf9a0a3120a7..2e2a2b6eab09 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -623,7 +623,8 @@ static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
if (err != MAC_AX_ERR_L1_ERR_DMAC &&
err != MAC_AX_ERR_L0_PROMOTE_TO_L1 &&
err != MAC_AX_ERR_L0_ERR_CMAC0 &&
- err != MAC_AX_ERR_L0_ERR_CMAC1)
+ err != MAC_AX_ERR_L0_ERR_CMAC1 &&
+ err != MAC_AX_ERR_RXI300)
return;
rtw89_info(rtwdev, "--->\nerr=0x%x\n", err);
@@ -663,6 +664,8 @@ u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev)
err = MAC_AX_ERR_CPU_EXCEPTION;
else if (err_scnr == RTW89_WCPU_ASSERTION)
err = MAC_AX_ERR_ASSERTION;
+ else if (err_scnr == RTW89_RXI300_ERROR)
+ err = MAC_AX_ERR_RXI300;
rtw89_fw_st_dbg_dump(rtwdev);
rtw89_mac_dump_err_status(rtwdev, err);
@@ -3411,6 +3414,11 @@ int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason, bool dlfw)
val |= B_AX_WCPU_FWDL_EN;
rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val);
+
+ if (rtwdev->chip->chip_id == RTL8852B)
+ rtw89_write32_mask(rtwdev, R_AX_SEC_CTRL,
+ B_AX_SEC_IDMEM_SIZE_CONFIG_MASK, 0x2);
+
rtw89_write16_mask(rtwdev, R_AX_BOOT_REASON, B_AX_BOOT_REASON_MASK,
boot_reason);
rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN);
@@ -3918,19 +3926,14 @@ static void rtw89_mac_port_cfg_tbtt_shift(struct rtw89_dev *rtwdev,
B_AX_TBTT_SHIFT_OFST_MASK, val);
}
-static void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif,
- struct rtw89_vif *rtwvif_src, u8 offset,
- int *n_offset)
+void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_vif *rtwvif_src,
+ u16 offset_tu)
{
u32 val, reg;
- if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE || rtwvif == rtwvif_src)
- return;
-
- /* adjust offset randomly to avoid beacon conflict */
- offset = offset - offset / 4 + get_random_u32() % (offset / 2);
- val = RTW89_PORT_OFFSET_MS_TO_32US((*n_offset)++, offset);
+ val = RTW89_PORT_OFFSET_TU_TO_32US(offset_tu);
reg = rtw89_mac_reg_by_idx(R_AX_PORT0_TSF_SYNC + rtwvif->port * 4,
rtwvif->mac_idx);
@@ -3939,6 +3942,22 @@ static void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
rtw89_write32_set(rtwdev, reg, B_AX_SYNC_NOW);
}
+static void rtw89_mac_port_tsf_sync_rand(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_vif *rtwvif_src,
+ u8 offset, int *n_offset)
+{
+ if (rtwvif->net_type != RTW89_NET_TYPE_AP_MODE || rtwvif == rtwvif_src)
+ return;
+
+ /* adjust offset randomly to avoid beacon conflict */
+ offset = offset - offset / 4 + get_random_u32() % (offset / 2);
+ rtw89_mac_port_tsf_sync(rtwdev, rtwvif, rtwvif_src,
+ (*n_offset) * offset);
+
+ (*n_offset)++;
+}
+
static void rtw89_mac_port_tsf_resync_all(struct rtw89_dev *rtwdev)
{
struct rtw89_vif *src = NULL, *tmp;
@@ -3958,7 +3977,7 @@ static void rtw89_mac_port_tsf_resync_all(struct rtw89_dev *rtwdev)
offset /= (vif_aps + 1);
rtw89_for_each_rtwvif(rtwdev, tmp)
- rtw89_mac_port_tsf_sync(rtwdev, tmp, src, offset, &n_offset);
+ rtw89_mac_port_tsf_sync_rand(rtwdev, tmp, src, offset, &n_offset);
}
int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
@@ -4050,6 +4069,24 @@ int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
return 0;
}
+int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u64 *tsf)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ u32 tsf_low, tsf_high;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, rtwvif->mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ tsf_low = rtw89_read32_port(rtwdev, rtwvif, p->tsftr_l);
+ tsf_high = rtw89_read32_port(rtwdev, rtwvif, p->tsftr_h);
+ *tsf = (u64)tsf_high << 32 | tsf_low;
+
+ return 0;
+}
+
static void rtw89_mac_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
struct cfg80211_bss *bss,
void *data)
@@ -4263,12 +4300,12 @@ rtw89_mac_c2h_mcc_rcv_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
case H2C_FUNC_MCC_SET_DURATION:
break;
default:
- rtw89_debug(rtwdev, RTW89_DBG_FW,
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"invalid MCC C2H RCV ACK: func %d\n", func);
return;
}
- rtw89_debug(rtwdev, RTW89_DBG_FW,
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC C2H RCV ACK: group %d, func %d\n", group, func);
}
@@ -4296,12 +4333,12 @@ rtw89_mac_c2h_mcc_req_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
case H2C_FUNC_DEL_MCC_GROUP:
case H2C_FUNC_RESET_MCC_GROUP:
default:
- rtw89_debug(rtwdev, RTW89_DBG_FW,
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"invalid MCC C2H REQ ACK: func %d\n", func);
return;
}
- rtw89_debug(rtwdev, RTW89_DBG_FW,
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"MCC C2H REQ ACK: group %d, func %d, return code %d\n",
group, func, retcode);
@@ -4329,6 +4366,11 @@ rtw89_mac_c2h_mcc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
rpt->tsf_y_low = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_LOW_Y(c2h->data);
rpt->tsf_y_high = RTW89_GET_MAC_C2H_MCC_TSF_RPT_TSF_HIGH_Y(c2h->data);
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC C2H TSF RPT: macid %d> %llu, macid %d> %llu\n",
+ rpt->macid_x, (u64)rpt->tsf_x_high << 32 | rpt->tsf_x_low,
+ rpt->macid_y, (u64)rpt->tsf_y_high << 32 | rpt->tsf_y_low);
+
cond = RTW89_MCC_WAIT_COND(group, H2C_FUNC_MCC_REQ_TSF);
rtw89_complete_cond(&rtwdev->mcc.wait, cond, &data);
}
@@ -4386,14 +4428,14 @@ rtw89_mac_c2h_mcc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
rsp = false;
break;
default:
- rtw89_debug(rtwdev, RTW89_DBG_FW,
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"invalid MCC C2H STS RPT: status %d\n", status);
return;
}
- rtw89_debug(rtwdev, RTW89_DBG_FW,
- "MCC C2H STS RPT: group %d, macid %d, status %d, tsf {%d, %d}\n",
- group, macid, status, tsf_low, tsf_high);
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MCC C2H STS RPT: group %d, macid %d, status %d, tsf %llu\n",
+ group, macid, status, (u64)tsf_high << 32 | tsf_low);
if (!rsp)
return;
@@ -4512,7 +4554,7 @@ EXPORT_SYMBOL(rtw89_mac_get_txpwr_cr);
int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
{
u32 reg = rtw89_mac_reg_by_idx(R_AX_PPDU_STAT, mac_idx);
- int ret = 0;
+ int ret;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
if (ret)
@@ -4520,7 +4562,7 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
if (!enable) {
rtw89_write32_clr(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN);
- return ret;
+ return 0;
}
rtw89_write32(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN |
@@ -4530,7 +4572,7 @@ int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
rtw89_write32_mask(rtwdev, R_AX_HW_RPT_FWD, B_AX_FWD_PPDU_STAT_MASK,
RTW89_PRPT_DEST_HOST);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(rtw89_mac_cfg_ppdu_status);
@@ -4865,9 +4907,16 @@ EXPORT_SYMBOL(rtw89_mac_cfg_ctrl_path_v1);
bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
{
- u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3);
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 val = 0;
+
+ if (chip->chip_id == RTL8852C)
+ return false;
+ else if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B)
+ val = rtw89_read8_mask(rtwdev, R_AX_SYS_SDIO_CTRL + 3,
+ B_AX_LTE_MUX_CTRL_PATH >> 24);
- return FIELD_GET(B_AX_LTE_MUX_CTRL_PATH >> 24, val);
+ return !!val;
}
u16 rtw89_mac_get_plt_cnt(struct rtw89_dev *rtwdev, u8 band)
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index f0b684b205f1..8064d3953d7f 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -168,7 +168,7 @@ enum rtw89_mac_ax_l0_to_l1_event {
MAC_AX_L0_TO_L1_EVENT_MAX = 15,
};
-#define RTW89_PORT_OFFSET_MS_TO_32US(n, shift_ms) ((n) * (shift_ms) * 1000 / 32)
+#define RTW89_PORT_OFFSET_TU_TO_32US(shift_tu) ((shift_tu) * 1024 / 32)
enum rtw89_mac_dbg_port_sel {
/* CMAC 0 related */
@@ -623,6 +623,7 @@ struct rtw89_mac_dle_dfi_qempty {
};
enum rtw89_mac_error_scenario {
+ RTW89_RXI300_ERROR = 1,
RTW89_WCPU_CPU_EXCEPTION = 2,
RTW89_WCPU_ASSERTION = 3,
};
@@ -769,6 +770,7 @@ enum mac_ax_err_info {
MAC_AX_ERR_L2_ERR_WDT_TIMEOUT_INT = 0x2599,
MAC_AX_ERR_CPU_EXCEPTION = 0x3000,
MAC_AX_ERR_ASSERTION = 0x4000,
+ MAC_AX_ERR_RXI300 = 0x5000,
MAC_AX_GET_ERR_MAX,
MAC_AX_DUMP_SHAREBUFF_INDICATOR = 0x80000000,
@@ -828,6 +830,15 @@ static inline u32 rtw89_mac_reg_by_port(u32 base, u8 port, u8 mac_idx)
}
static inline u32
+rtw89_read32_port(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u32 base)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx);
+ return rtw89_read32(rtwdev, reg);
+}
+
+static inline u32
rtw89_read32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
u32 base, u32 mask)
{
@@ -906,6 +917,12 @@ int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val);
int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val);
int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+void rtw89_mac_port_tsf_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ struct rtw89_vif *rtwvif_src,
+ u16 offset_tu);
+int rtw89_mac_port_get_tsf(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u64 *tsf);
void rtw89_mac_set_he_obss_narrow_bw_ru(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif);
void rtw89_mac_stop_ap(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index f9b95c52916b..d43281f7335b 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -135,6 +135,7 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
rtwvif->sub_entity_idx = RTW89_SUB_ENTITY_0;
rtwvif->hit_rule = 0;
ether_addr_copy(rtwvif->mac_addr, vif->addr);
+ INIT_LIST_HEAD(&rtwvif->general_pkt_list);
ret = rtw89_mac_add_vif(rtwdev, rtwvif);
if (ret) {
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 1c4500ba777c..ec8bb5f10482 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -1384,7 +1384,7 @@ static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx
return 0;
}
-static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
+const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM] = {
[RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
[RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
@@ -1399,11 +1399,24 @@ static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
[RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
[RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
};
+EXPORT_SYMBOL(rtw89_bd_ram_table_dual);
+
+const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
+ [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_CH8] = {.start_idx = 20, .max_num = 4, .min_num = 1},
+ [RTW89_TXCH_CH9] = {.start_idx = 24, .max_num = 4, .min_num = 1},
+ [RTW89_TXCH_CH12] = {.start_idx = 28, .max_num = 4, .min_num = 1},
+};
+EXPORT_SYMBOL(rtw89_bd_ram_table_single);
static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
const struct rtw89_pci_info *info = rtwdev->pci_info;
+ const struct rtw89_pci_bd_ram *bd_ram_table = *info->bd_ram_table;
struct rtw89_pci_tx_ring *tx_ring;
struct rtw89_pci_rx_ring *rx_ring;
struct rtw89_pci_dma_ring *bd_ring;
@@ -3385,7 +3398,7 @@ static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
if (ret)
rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
- if (chip_id == RTL8852A) {
+ if (chip_id == RTL8852A || chip_id == RTL8852B) {
if (enable)
ret = rtw89_pci_config_byte_set(rtwdev,
RTW89_PCIE_L1_CTRL,
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index 7d033501d4d9..1e19740db8c5 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -750,6 +750,12 @@ struct rtw89_pci_ch_dma_addr_set {
struct rtw89_pci_ch_dma_addr rx[RTW89_RXCH_NUM];
};
+struct rtw89_pci_bd_ram {
+ u8 start_idx;
+ u8 max_num;
+ u8 min_num;
+};
+
struct rtw89_pci_info {
enum mac_ax_bd_trunc_mode txbd_trunc_mode;
enum mac_ax_bd_trunc_mode rxbd_trunc_mode;
@@ -785,6 +791,7 @@ struct rtw89_pci_info {
u32 tx_dma_ch_mask;
const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power;
const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
+ const struct rtw89_pci_bd_ram (*bd_ram_table)[RTW89_TXCH_NUM];
int (*ltr_set)(struct rtw89_dev *rtwdev, bool en);
u32 (*fill_txaddr_info)(struct rtw89_dev *rtwdev,
@@ -798,12 +805,6 @@ struct rtw89_pci_info {
struct rtw89_pci_isrs *isrs);
};
-struct rtw89_pci_bd_ram {
- u8 start_idx;
- u8 max_num;
- u8 min_num;
-};
-
struct rtw89_pci_tx_data {
dma_addr_t dma;
};
@@ -1057,6 +1058,8 @@ static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
extern const struct dev_pm_ops rtw89_pm_ops;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set;
extern const struct rtw89_pci_ch_dma_addr_set rtw89_pci_ch_dma_addr_set_v1;
+extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_dual[RTW89_TXCH_NUM];
+extern const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM];
struct pci_device_id;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 017710c580c7..d9f61ba3d176 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -367,6 +367,7 @@ static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
}
ra->bw_cap = bw_mode;
+ ra->er_cap = rtwsta->er_cap;
ra->mode_ctrl = mode;
ra->macid = rtwsta->mac_id;
ra->stbc_cap = stbc_en;
@@ -2041,6 +2042,7 @@ void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ u8 max_nss_num = rtwdev->chip->rf_path_num;
static const u8 rs[] = {
RTW89_RS_CCK,
RTW89_RS_OFDM,
@@ -2063,7 +2065,7 @@ void rtw89_phy_set_txpwr_byrate(struct rtw89_dev *rtwdev,
BUILD_BUG_ON(rtw89_rs_idx_max[RTW89_RS_HEDCM] % 4);
addr = R_AX_PWR_BY_RATE;
- for (cur.nss = 0; cur.nss <= RTW89_NSS_2; cur.nss++) {
+ for (cur.nss = 0; cur.nss < max_nss_num; cur.nss++) {
for (i = 0; i < ARRAY_SIZE(rs); i++) {
if (cur.nss >= rtw89_rs_nss_max[rs[i]])
continue;
@@ -2126,6 +2128,7 @@ void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ u8 max_ntx_num = rtwdev->chip->rf_path_num;
struct rtw89_txpwr_limit lmt;
u8 ch = chan->channel;
u8 bw = chan->band_width;
@@ -2140,7 +2143,7 @@ void rtw89_phy_set_txpwr_limit(struct rtw89_dev *rtwdev,
RTW89_TXPWR_LMT_PAGE_SIZE);
addr = R_AX_PWR_LMT;
- for (i = 0; i < RTW89_NTX_NUM; i++) {
+ for (i = 0; i < max_ntx_num; i++) {
rtw89_phy_fill_txpwr_limit(rtwdev, chan, &lmt, i);
ptr = (s8 *)&lmt;
@@ -2161,6 +2164,7 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_phy_idx phy_idx)
{
+ u8 max_ntx_num = rtwdev->chip->rf_path_num;
struct rtw89_txpwr_limit_ru lmt_ru;
u8 ch = chan->channel;
u8 bw = chan->band_width;
@@ -2175,7 +2179,7 @@ void rtw89_phy_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
RTW89_TXPWR_LMT_RU_PAGE_SIZE);
addr = R_AX_PWR_RU_LMT;
- for (i = 0; i < RTW89_NTX_NUM; i++) {
+ for (i = 0; i < max_ntx_num; i++) {
rtw89_phy_fill_txpwr_limit_ru(rtwdev, chan, &lmt_ru, i);
ptr = (s8 *)&lmt_ru;
@@ -4116,6 +4120,7 @@ void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
u8 bss_color;
@@ -4124,11 +4129,11 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
bss_color = vif->bss_conf.he_bss_color.color;
- rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0, 0x1,
- phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_TGT, bss_color,
+ rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_VLD0, 0x1,
phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_STAID,
+ rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_TGT,
+ bss_color, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, chip->bss_clr_map_reg, B_BSS_CLR_MAP_STAID,
vif->cfg.aid, phy_idx);
}
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 5324e645728b..600257909df2 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -275,6 +275,9 @@
#define B_AX_S1_LDO2PWRCUT_F BIT(23)
#define B_AX_S0_LDO_VSEL_F_MASK GENMASK(22, 21)
+#define R_AX_SEC_CTRL 0x0C00
+#define B_AX_SEC_IDMEM_SIZE_CONFIG_MASK GENMASK(17, 16)
+
#define R_AX_FILTER_MODEL_ADDR 0x0C04
#define R_AX_HAXI_INIT_CFG1 0x1000
@@ -3559,6 +3562,7 @@
#define RR_MOD_IQK GENMASK(19, 4)
#define RR_MOD_DPK GENMASK(19, 5)
#define RR_MOD_MASK GENMASK(19, 16)
+#define RR_MOD_DCK GENMASK(14, 10)
#define RR_MOD_RGM GENMASK(13, 4)
#define RR_MOD_V_DOWN 0x0
#define RR_MOD_V_STANDBY 0x1
@@ -3572,6 +3576,7 @@
#define RR_MOD_NBW GENMASK(15, 14)
#define RR_MOD_M_RXG GENMASK(13, 4)
#define RR_MOD_M_RXBB GENMASK(9, 5)
+#define RR_MOD_LO_SEL BIT(1)
#define RR_MODOPT 0x01
#define RR_MODOPT_M_TXPWR GENMASK(5, 0)
#define RR_WLSEL 0x02
@@ -3638,6 +3643,7 @@
#define RR_LUTWA_M2 GENMASK(4, 0)
#define RR_LUTWD1 0x3e
#define RR_LUTWD0 0x3f
+#define RR_LUTWD0_MB GENMASK(11, 6)
#define RR_LUTWD0_LB GENMASK(5, 0)
#define RR_TM 0x42
#define RR_TM_TRI BIT(19)
@@ -3671,6 +3677,8 @@
#define RR_TXRSV_GAPK BIT(19)
#define RR_BIAS 0x5e
#define RR_BIAS_GAPK BIT(19)
+#define RR_TXAC 0x5f
+#define RR_TXAC_IQG GENMASK(3, 0)
#define RR_BIASA 0x60
#define RR_BIASA_TXG GENMASK(15, 12)
#define RR_BIASA_TXA GENMASK(19, 16)
@@ -3729,10 +3737,14 @@
#define RR_XALNA2_SW2 GENMASK(9, 8)
#define RR_XALNA2_SW GENMASK(1, 0)
#define RR_DCK 0x92
+#define RR_DCK_S1 GENMASK(19, 16)
+#define RR_DCK_TIA GENMASK(15, 9)
#define RR_DCK_DONE GENMASK(7, 5)
#define RR_DCK_FINE BIT(1)
#define RR_DCK_LV BIT(0)
#define RR_DCK1 0x93
+#define RR_DCK1_S1 GENMASK(19, 16)
+#define RR_DCK1_TIA GENMASK(15, 9)
#define RR_DCK1_DONE BIT(5)
#define RR_DCK1_CLR GENMASK(3, 0)
#define RR_DCK1_SEL BIT(3)
@@ -3781,11 +3793,14 @@
#define RR_LUTDBG 0xdf
#define RR_LUTDBG_TIA BIT(12)
#define RR_LUTDBG_LOK BIT(2)
+#define RR_LUTPLL 0xec
+#define RR_CAL_RW BIT(19)
#define RR_LUTWE2 0xee
#define RR_LUTWE2_RTXBW BIT(2)
#define RR_LUTWE 0xef
#define RR_LUTWE_LOK BIT(2)
#define RR_RFC 0xf0
+#define RR_WCAL BIT(16)
#define RR_RFC_CKEN BIT(1)
#define R_UPD_P0 0x0000
@@ -4090,12 +4105,13 @@
#define R_MUIC 0x40F8
#define B_MUIC_EN BIT(0)
#define R_DCFO 0x4264
-#define B_DCFO GENMASK(1, 0)
+#define B_DCFO GENMASK(7, 0)
#define R_SEG0CSI 0x42AC
-#define B_SEG0CSI_IDX GENMASK(11, 0)
+#define B_SEG0CSI_IDX GENMASK(10, 0)
#define R_SEG0CSI_EN 0x42C4
#define B_SEG0CSI_EN BIT(23)
#define R_BSS_CLR_MAP 0x43ac
+#define R_BSS_CLR_MAP_V1 0x43B0
#define B_BSS_CLR_MAP_VLD0 BIT(28)
#define B_BSS_CLR_MAP_TGT GENMASK(27, 22)
#define B_BSS_CLR_MAP_STAID GENMASK(21, 11)
@@ -4725,6 +4741,7 @@
#define R_DRCK_FH 0xC094
#define B_DRCK_LAT BIT(9)
#define R_DRCK 0xC0C4
+#define B_DRCK_MUL GENMASK(21, 17)
#define B_DRCK_IDLE BIT(9)
#define B_DRCK_EN BIT(6)
#define B_DRCK_VAL GENMASK(4, 0)
@@ -4742,9 +4759,13 @@
#define B_PATH0_SAMPL_DLY_T_MSK_V1 GENMASK(27, 26)
#define R_P0_CFCH_BW0 0xC0D4
#define B_P0_CFCH_BW0 GENMASK(27, 26)
+#define B_P0_CFCH_EN GENMASK(14, 11)
+#define B_P0_CFCH_CTL GENMASK(10, 7)
#define R_P0_CFCH_BW1 0xC0D8
#define B_P0_CFCH_EX BIT(13)
#define B_P0_CFCH_BW1 GENMASK(8, 5)
+#define R_ADCMOD 0xC0E8
+#define B_ADCMOD_LP GENMASK(31, 16)
#define R_ADDCK0D 0xC0F0
#define B_ADDCK0D_VAL2 GENMASK(31, 26)
#define B_ADDCK0D_VAL GENMASK(25, 16)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index eff6519cf019..9c42b6abd223 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -1035,7 +1035,7 @@ static void rtw8852a_spur_elimination(struct rtw89_dev *rtwdev, u8 central_ch)
0x210);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL,
0x210);
- rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x7c0);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, B_SEG0CSI_IDX, 0x7c0);
rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX,
B_P0_NBIIDX_NOTCH_EN, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX,
@@ -1047,7 +1047,7 @@ static void rtw8852a_spur_elimination(struct rtw89_dev *rtwdev, u8 central_ch)
0x210);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL,
0x210);
- rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x40);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, B_SEG0CSI_IDX, 0x40);
rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX,
B_P0_NBIIDX_NOTCH_EN, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX,
@@ -1059,7 +1059,7 @@ static void rtw8852a_spur_elimination(struct rtw89_dev *rtwdev, u8 central_ch)
0x2d0);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL,
0x2d0);
- rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x740);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, B_SEG0CSI_IDX, 0x740);
rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX,
B_P0_NBIIDX_NOTCH_EN, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX,
@@ -1878,9 +1878,13 @@ static
void rtw8852a_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_cx *cx = &btc->cx;
u32 val;
+ if (ver->fcxbtcrpt != 1)
+ return;
+
val = rtw89_read32(rtwdev, R_AX_BT_STAST_HIGH);
cx->cnt_bt[BTC_BCNT_HIPRI_TX] = FIELD_GET(B_AX_STATIS_BT_HI_TX_MASK, val);
cx->cnt_bt[BTC_BCNT_HIPRI_RX] = FIELD_GET(B_AX_STATIS_BT_HI_RX_MASK, val);
@@ -2051,6 +2055,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.chip_id = RTL8852A,
.ops = &rtw8852a_chip_ops,
.fw_name = "rtw89/rtw8852a_fw.bin",
+ .try_ce_fw = false,
.fifo_size = 458752,
.dle_scc_rsvd_size = 0,
.max_amsdu_limit = 3500,
@@ -2106,20 +2111,6 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
- .btc_fwinfo_buf = 1024,
-
- .fcxbtcrpt_ver = 1,
- .fcxtdma_ver = 1,
- .fcxslots_ver = 1,
- .fcxcysta_ver = 2,
- .fcxstep_ver = 2,
- .fcxnullsta_ver = 1,
- .fcxmreg_ver = 1,
- .fcxgpiodbg_ver = 1,
- .fcxbtver_ver = 1,
- .fcxbtscan_ver = 1,
- .fcxbtafh_ver = 1,
- .fcxbtdevinfo_ver = 1,
.afh_guard_ch = 6,
.wl_rssi_thres = rtw89_btc_8852a_wl_rssi_thres,
@@ -2149,6 +2140,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dcfo_comp_sft = 3,
.imr_info = &rtw8852a_imr_info,
.rrsr_cfgs = &rtw8852a_rrsr_cfgs,
+ .bss_clr_map_reg = R_BSS_CLR_MAP,
.dma_ch_mask = 0,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852a,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
index 582ff0d3a9ea..cd6c39b7f802 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -1643,7 +1643,7 @@ static void _doiqk(struct rtw89_dev *rtwdev, bool force,
rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[IQK]==========IQK strat!!!!!==========\n");
+ "[IQK]==========IQK start!!!!!==========\n");
iqk_info->iqk_times++;
iqk_info->kcount = 0;
iqk_info->version = RTW8852A_IQK_VER;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 0cd8c0c44d19..d835a44a1d0d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -44,6 +44,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+ .bd_ram_table = &rtw89_bd_ram_table_dual,
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index b635ac1d1ca2..ee8dba7e0074 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -1618,6 +1618,7 @@ static void rtw8852b_set_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852b_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
u8 tx_shape_idx,
enum rtw89_phy_idx phy_idx)
{
@@ -1637,7 +1638,6 @@ static void rtw8852b_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
__DECL_DFIR_PARAM(sharp_14,
0x023B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E,
0x00FD8F92, 0x0602D011, 0x0001C02C, 0x00FFF00A);
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
const u32 *param;
u32 addr;
@@ -1678,7 +1678,7 @@ static void rtw8852b_set_tx_shape(struct rtw89_dev *rtwdev,
u8 tx_shape_ofdm = rtw89_8852b_tx_shape[band][RTW89_RS_OFDM][regd];
if (band == RTW89_BAND_2G)
- rtw8852b_bb_set_tx_shape_dfir(rtwdev, tx_shape_cck, phy_idx);
+ rtw8852b_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx);
rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG,
tx_shape_ofdm);
@@ -1720,7 +1720,7 @@ void rtw8852b_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
pw_ofst = max_t(s8, pw_ofst - 3, -16);
reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst);
+ rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst);
}
static int
@@ -2423,13 +2423,14 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.btc_update_bt_cnt = rtw8852b_btc_update_bt_cnt,
.btc_wl_s1_standby = rtw8852b_btc_wl_s1_standby,
.btc_set_wl_rx_gain = rtw8852b_btc_set_wl_rx_gain,
- .btc_set_policy = rtw89_btc_set_policy,
+ .btc_set_policy = rtw89_btc_set_policy_v1,
};
const struct rtw89_chip_info rtw8852b_chip_info = {
.chip_id = RTL8852B,
.ops = &rtw8852b_chip_ops,
.fw_name = "rtw89/rtw8852b_fw.bin",
+ .try_ce_fw = true,
.fifo_size = 196608,
.dle_scc_rsvd_size = 98304,
.max_amsdu_limit = 3500,
@@ -2437,6 +2438,8 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.rsvd_ple_ofst = 0x2f800,
.hfc_param_ini = rtw8852b_hfc_param_ini_pcie,
.dle_mem = rtw8852b_dle_mem_pcie,
+ .wde_qempty_acq_num = 4,
+ .wde_qempty_mgq_sel = 4,
.rf_base_addr = {0xe000, 0xf000},
.pwr_on_seq = NULL,
.pwr_off_seq = NULL,
@@ -2483,20 +2486,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.btcx_desired = 0x5,
.scbd = 0x1,
.mailbox = 0x1,
- .btc_fwinfo_buf = 1024,
-
- .fcxbtcrpt_ver = 1,
- .fcxtdma_ver = 1,
- .fcxslots_ver = 1,
- .fcxcysta_ver = 2,
- .fcxstep_ver = 2,
- .fcxnullsta_ver = 1,
- .fcxmreg_ver = 1,
- .fcxgpiodbg_ver = 1,
- .fcxbtver_ver = 1,
- .fcxbtscan_ver = 1,
- .fcxbtafh_ver = 1,
- .fcxbtdevinfo_ver = 1,
+
.afh_guard_ch = 6,
.wl_rssi_thres = rtw89_btc_8852b_wl_rssi_thres,
.bt_rssi_thres = rtw89_btc_8852b_bt_rssi_thres,
@@ -2525,6 +2515,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.dcfo_comp_sft = 3,
.imr_info = &rtw8852b_imr_info,
.rrsr_cfgs = &rtw8852b_rrsr_cfgs,
+ .bss_clr_map_reg = R_BSS_CLR_MAP_V1,
.dma_ch_mask = BIT(RTW89_DMA_ACH4) | BIT(RTW89_DMA_ACH5) |
BIT(RTW89_DMA_ACH6) | BIT(RTW89_DMA_ACH7) |
BIT(RTW89_DMA_B1MG) | BIT(RTW89_DMA_B1HI),
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index 0ef2ca8efeb0..ecf39d2d9f81 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
.bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set,
+ .bd_ram_table = &rtw89_bd_ram_table_single,
.ltr_set = rtw89_pci_ltr_set,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index a87482cc25f5..d2dde21d3daf 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -1968,6 +1968,7 @@ static void rtw8852c_set_txpwr_ref(struct rtw89_dev *rtwdev,
}
static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
u8 tx_shape_idx,
enum rtw89_phy_idx phy_idx)
{
@@ -1991,7 +1992,6 @@ static void rtw8852c_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
__DECL_DFIR_ADDR(filter,
0x45BC, 0x45CC, 0x45D0, 0x45D4, 0x45D8, 0x45C0,
0x45C4, 0x45C8);
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
u8 ch = chan->channel;
const u32 *param;
int i;
@@ -2032,7 +2032,7 @@ static void rtw8852c_set_tx_shape(struct rtw89_dev *rtwdev,
u8 tx_shape_ofdm = rtw89_8852c_tx_shape[band][RTW89_RS_OFDM][regd];
if (band == RTW89_BAND_2G)
- rtw8852c_bb_set_tx_shape_dfir(rtwdev, tx_shape_cck, phy_idx);
+ rtw8852c_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx);
rtw89_phy_tssi_ctrl_set_bandedge_cfg(rtwdev,
(enum rtw89_mac_idx)phy_idx,
@@ -2857,6 +2857,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.chip_id = RTL8852C,
.ops = &rtw8852c_chip_ops,
.fw_name = "rtw89/rtw8852c_fw.bin",
+ .try_ce_fw = false,
.fifo_size = 458752,
.dle_scc_rsvd_size = 0,
.max_amsdu_limit = 8000,
@@ -2915,20 +2916,6 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.btcx_desired = 0x7,
.scbd = 0x1,
.mailbox = 0x1,
- .btc_fwinfo_buf = 1280,
-
- .fcxbtcrpt_ver = 4,
- .fcxtdma_ver = 3,
- .fcxslots_ver = 1,
- .fcxcysta_ver = 3,
- .fcxstep_ver = 3,
- .fcxnullsta_ver = 2,
- .fcxmreg_ver = 1,
- .fcxgpiodbg_ver = 1,
- .fcxbtver_ver = 1,
- .fcxbtscan_ver = 1,
- .fcxbtafh_ver = 1,
- .fcxbtdevinfo_ver = 1,
.afh_guard_ch = 6,
.wl_rssi_thres = rtw89_btc_8852c_wl_rssi_thres,
@@ -2959,6 +2946,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dcfo_comp_sft = 5,
.imr_info = &rtw8852c_imr_info,
.rrsr_cfgs = &rtw8852c_rrsr_cfgs,
+ .bss_clr_map_reg = R_BSS_CLR_MAP,
.dma_ch_mask = 0,
#ifdef CONFIG_PM
.wowlan_stub = &rtw_wowlan_stub_8852c,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index 60cd676fe22c..2c0bc3a4ab3b 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -11,6 +11,15 @@
#include "rtw8852c_rfk_table.h"
#include "rtw8852c_table.h"
+struct rxck_def {
+ u32 ctl;
+ u32 en;
+ u32 bw0;
+ u32 bw1;
+ u32 mul;
+ u32 lp;
+};
+
#define _TSSI_DE_MASK GENMASK(21, 12)
static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852C] = {0x5858, 0x7858};
static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852C] = {0x5860, 0x7860};
@@ -26,7 +35,7 @@ static const u32 rtw8852c_backup_bb_regs[] = {
};
static const u32 rtw8852c_backup_rf_regs[] = {
- 0xdf, 0x8f, 0x97, 0xa3, 0x5, 0x10005
+ 0xdf, 0x5f, 0x8f, 0x97, 0xa3, 0x5, 0x10005
};
#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852c_backup_bb_regs)
@@ -59,6 +68,13 @@ static const u32 dpk_par_regs[RTW89_DPK_RF_PATH][4] = {
{0x81a8, 0x81c4, 0x81c8, 0x81e8},
};
+static const u8 _dck_addr_bs[RF_PATH_NUM_8852C] = {0x0, 0x10};
+static const u8 _dck_addr[RF_PATH_NUM_8852C] = {0xc, 0x1c};
+
+static const struct rxck_def _ck480M = {0x8, 0x2, 0x3, 0xf, 0x0, 0x9};
+static const struct rxck_def _ck960M = {0x8, 0x2, 0x2, 0x8, 0x0, 0x9};
+static const struct rxck_def _ck1920M = {0x8, 0x0, 0x2, 0x4, 0x6, 0x9};
+
static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
@@ -337,7 +353,7 @@ static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
(dack->dadck_d[path][index] << 14);
addr = 0xc210 + offset;
rtw89_phy_write32(rtwdev, addr, val32);
- rtw89_phy_write32_set(rtwdev, addr, BIT(1));
+ rtw89_phy_write32_set(rtwdev, addr, BIT(0));
}
static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
@@ -437,6 +453,8 @@ static void rtw8852c_txck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
enum adc_ck ck)
{
+ const struct rxck_def *def;
+
rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
if (!force)
@@ -444,6 +462,26 @@ static void rtw8852c_rxck_force(struct rtw89_dev *rtwdev, u8 path, bool force,
rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
+
+ switch (ck) {
+ case ADC_480M:
+ def = &_ck480M;
+ break;
+ case ADC_960M:
+ def = &_ck960M;
+ break;
+ case ADC_1920M:
+ default:
+ def = &_ck1920M;
+ break;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_CTL, def->ctl);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_EN, def->en);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, def->bw0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, def->bw1);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK | (path << 8), B_DRCK_MUL, def->mul);
+ rtw89_phy_write32_mask(rtwdev, R_ADCMOD | (path << 8), B_ADCMOD_LP, def->lp);
}
static bool _check_dack_done(struct rtw89_dev *rtwdev, bool s0)
@@ -627,8 +665,6 @@ static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
rtw8852c_rxck_force(rtwdev, path, true, ADC_480M);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x3);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xf);
rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
break;
@@ -636,8 +672,6 @@ static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
rtw8852c_rxck_force(rtwdev, path, true, ADC_960M);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x2);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xd);
rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
break;
@@ -645,8 +679,6 @@ static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_DPD_GDIS, 0x1);
rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK + (path << 13), B_ACK_VAL, 0x2);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_RXBB2_CKT, 0x1);
rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG, 0x1);
break;
@@ -1410,8 +1442,6 @@ static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
rtw89_phy_write32_mask(rtwdev, R_UPD_CLK | (path << 13), B_ACK_VAL, 0x2);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 | (path << 8), B_P0_CFCH_BW0, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 | (path << 8), B_P0_CFCH_BW1, 0xb);
rtw89_phy_write32_mask(rtwdev, R_P0_NRBW | (path << 13), B_P0_NRBW_DBG, 0x1);
rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
@@ -1536,6 +1566,155 @@ static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool forc
}
}
+static void _rx_dck_value_rewrite(struct rtw89_dev *rtwdev, u8 path, u8 addr,
+ u8 val_i, u8 val_q)
+{
+ u32 ofst_val;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] rewrite val_i = 0x%x, val_q = 0x%x\n", val_i, val_q);
+
+ /* val_i and val_q are 7 bits, and target is 6 bits. */
+ ofst_val = u32_encode_bits(val_q >> 1, RR_LUTWD0_MB) |
+ u32_encode_bits(val_i >> 1, RR_LUTWD0_LB);
+
+ rtw89_write_rf(rtwdev, path, RR_LUTPLL, RR_CAL_RW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_RFC, RR_WCAL, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, MASKBYTE0, addr);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ofst_val);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ofst_val);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RFC, RR_WCAL, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTPLL, RR_CAL_RW, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] Final val_i = 0x%x, val_q = 0x%x\n",
+ u32_get_bits(ofst_val, RR_LUTWD0_LB) << 1,
+ u32_get_bits(ofst_val, RR_LUTWD0_MB) << 1);
+}
+
+static bool _rx_dck_rek_check(struct rtw89_dev *rtwdev, u8 path)
+{
+ u8 i_even_bs, q_even_bs;
+ u8 i_odd_bs, q_odd_bs;
+ u8 i_even, q_even;
+ u8 i_odd, q_odd;
+ const u8 th = 10;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i]);
+ i_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_even_bs/ q_even_bs = 0x%x/ 0x%x\n",
+ _dck_addr_bs[i], i_even_bs, q_even_bs);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i]);
+ i_even = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_even = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_even/ q_even = 0x%x/ 0x%x\n",
+ _dck_addr[i], i_even, q_even);
+
+ if (abs(i_even_bs - i_even) > th || abs(q_even_bs - q_even) > th)
+ return true;
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i] + 1);
+ i_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_odd_bs/ q_odd_bs = 0x%x/ 0x%x\n",
+ _dck_addr_bs[i] + 1, i_odd_bs, q_odd_bs);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i] + 1);
+ i_odd = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_odd = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_odd/ q_odd = 0x%x/ 0x%x\n",
+ _dck_addr[i] + 1, i_odd, q_odd);
+
+ if (abs(i_odd_bs - i_odd) > th || abs(q_odd_bs - q_odd) > th)
+ return true;
+ }
+
+ return false;
+}
+
+static void _rx_dck_fix_if_need(struct rtw89_dev *rtwdev, u8 path, u8 addr,
+ u8 val_i_bs, u8 val_q_bs, u8 val_i, u8 val_q)
+{
+ const u8 th = 10;
+
+ if ((abs(val_i_bs - val_i) < th) && (abs(val_q_bs - val_q) <= th)) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] offset check PASS!!\n");
+ return;
+ }
+
+ if (abs(val_i_bs - val_i) > th) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] val_i over TH (0x%x / 0x%x)\n", val_i_bs, val_i);
+ val_i = val_i_bs;
+ }
+
+ if (abs(val_q_bs - val_q) > th) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] val_q over TH (0x%x / 0x%x)\n", val_q_bs, val_q);
+ val_q = val_q_bs;
+ }
+
+ _rx_dck_value_rewrite(rtwdev, path, addr, val_i, val_q);
+}
+
+static void _rx_dck_recover(struct rtw89_dev *rtwdev, u8 path)
+{
+ u8 i_even_bs, q_even_bs;
+ u8 i_odd_bs, q_odd_bs;
+ u8 i_even, q_even;
+ u8 i_odd, q_odd;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] ===> recovery\n");
+
+ for (i = 0; i < RF_PATH_NUM_8852C; i++) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i]);
+ i_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_even_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr_bs[i] + 1);
+ i_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_odd_bs = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_even_bs/ q_even_bs = 0x%x/ 0x%x\n",
+ _dck_addr_bs[i], i_even_bs, q_even_bs);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i]);
+ i_even = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_even = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_even/ q_even = 0x%x/ 0x%x\n",
+ _dck_addr[i], i_even, q_even);
+ _rx_dck_fix_if_need(rtwdev, path, _dck_addr[i],
+ i_even_bs, q_even_bs, i_even, q_even);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_odd_bs/ q_odd_bs = 0x%x/ 0x%x\n",
+ _dck_addr_bs[i] + 1, i_odd_bs, q_odd_bs);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DCK, _dck_addr[i] + 1);
+ i_odd = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_TIA);
+ q_odd = rtw89_read_rf(rtwdev, path, RR_DCK1, RR_DCK1_TIA);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] Gain[0x%x] i_odd/ q_odd = 0x%x/ 0x%x\n",
+ _dck_addr[i] + 1, i_odd, q_odd);
+ _rx_dck_fix_if_need(rtwdev, path, _dck_addr[i] + 1,
+ i_odd_bs, q_odd_bs, i_odd, q_odd);
+ }
+}
+
static void _rx_dck_toggle(struct rtw89_dev *rtwdev, u8 path)
{
int ret;
@@ -1573,11 +1752,42 @@ static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 pat
}
}
+static
+u8 _rx_dck_channel_calc(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan)
+{
+ u8 target_ch = 0;
+
+ if (chan->band_type == RTW89_BAND_5G) {
+ if (chan->channel >= 36 && chan->channel <= 64) {
+ target_ch = 100;
+ } else if (chan->channel >= 100 && chan->channel <= 144) {
+ target_ch = chan->channel + 32;
+ if (target_ch > 144)
+ target_ch = chan->channel + 33;
+ } else if (chan->channel >= 149 && chan->channel <= 177) {
+ target_ch = chan->channel - 33;
+ }
+ } else if (chan->band_type == RTW89_BAND_6G) {
+ if (chan->channel >= 1 && chan->channel <= 125)
+ target_ch = chan->channel + 32;
+ else
+ target_ch = chan->channel - 32;
+ } else {
+ target_ch = chan->channel;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] cur_ch / target_ch = %d / %d\n",
+ chan->channel, target_ch);
+
+ return target_ch;
+}
+
#define RTW8852C_RF_REL_VERSION 34
-#define RTW8852C_DPK_VER 0x10
+#define RTW8852C_DPK_VER 0xf
#define RTW8852C_DPK_TH_AVG_NUM 4
#define RTW8852C_DPK_RF_PATH 2
-#define RTW8852C_DPK_KIP_REG_NUM 5
+#define RTW8852C_DPK_KIP_REG_NUM 7
#define RTW8852C_DPK_RXSRAM_DBG 0
enum rtw8852c_dpk_id {
@@ -1614,6 +1824,12 @@ enum dpk_agc_step {
DPK_AGC_STEP_SET_TX_GAIN,
};
+enum dpk_pas_result {
+ DPK_PAS_NOR,
+ DPK_PAS_GT,
+ DPK_PAS_LT,
+};
+
static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
enum rtw89_rf_path path, bool is_bybb)
{
@@ -1730,8 +1946,6 @@ static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
/*4. Set ADC clk*/
rtw8852c_rxck_force(rtwdev, path, true, ADC_1920M);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW0 + (path << 8), B_P0_CFCH_BW0, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1 + (path << 8), B_P0_CFCH_BW1, 0xb);
rtw89_phy_write32_mask(rtwdev, R_P0_NRBW + (path << 13),
B_P0_NRBW_DBG, 0x1);
rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
@@ -1872,12 +2086,11 @@ static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
0x50101 | BIT(rtwdev->dbcc_en));
rtw89_write_rf(rtwdev, path, RR_MOD_V1, RR_MOD_MASK, RF_DPK);
- if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161) {
+ if (dpk->bp[path][kidx].band == RTW89_BAND_6G && dpk->bp[path][kidx].ch >= 161)
rtw89_write_rf(rtwdev, path, RR_IQGEN, RR_IQGEN_BIAS, 0x8);
- rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
- } else {
- rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
- }
+
+ rtw89_write_rf(rtwdev, path, RR_LOGEN, RR_LOGEN_RPT, 0xd);
+ rtw89_write_rf(rtwdev, path, RR_TXAC, RR_TXAC_IQG, 0x8);
rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_ATT, 0x0);
rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT2, 0x3);
@@ -2024,9 +2237,10 @@ static u8 _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
return _dpk_gainloss_read(rtwdev);
}
-static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
+static enum dpk_pas_result _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
{
u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
+ u32 val1_sqrt_sum, val2_sqrt_sum;
u8 i;
rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
@@ -2057,15 +2271,25 @@ static bool _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
}
}
- if (val1_i * val1_i + val1_q * val1_q >= (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
- return true;
+ val1_sqrt_sum = val1_i * val1_i + val1_q * val1_q;
+ val2_sqrt_sum = val2_i * val2_i + val2_q * val2_q;
+
+ if (val1_sqrt_sum < val2_sqrt_sum)
+ return DPK_PAS_LT;
+ else if (val1_sqrt_sum >= val2_sqrt_sum * 8 / 5)
+ return DPK_PAS_GT;
else
- return false;
+ return DPK_PAS_NOR;
}
static bool _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
enum rtw89_rf_path path, u8 kidx)
{
+ _dpk_kip_control_rfc(rtwdev, path, false);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD,
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
+ _dpk_kip_control_rfc(rtwdev, path, true);
+
_dpk_one_shot(rtwdev, phy, path, D_RXAGC);
return _dpk_sync_check(rtwdev, path, kidx);
@@ -2103,6 +2327,7 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
u8 tmp_dbm = init_xdbm, tmp_gl_idx = 0;
u8 tmp_rxbb;
u8 goout = 0, agc_cnt = 0;
+ enum dpk_pas_result pas;
u16 dgain = 0;
bool is_fail = false;
int limit = 200;
@@ -2138,9 +2363,13 @@ static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
case DPK_AGC_STEP_GAIN_LOSS_IDX:
tmp_gl_idx = _dpk_gainloss(rtwdev, phy, path, kidx);
+ pas = _dpk_pas_read(rtwdev, true);
- if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
- tmp_gl_idx >= 7)
+ if (pas == DPK_PAS_LT && tmp_gl_idx > 0)
+ step = DPK_AGC_STEP_GL_LT_CRITERION;
+ else if (pas == DPK_PAS_GT && tmp_gl_idx == 0)
+ step = DPK_AGC_STEP_GL_GT_CRITERION;
+ else if (tmp_gl_idx >= 7)
step = DPK_AGC_STEP_GL_GT_CRITERION;
else if (tmp_gl_idx == 0)
step = DPK_AGC_STEP_GL_LT_CRITERION;
@@ -2467,12 +2696,14 @@ static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
enum rtw89_phy_idx phy, u8 kpath)
{
struct rtw89_dpk_info *dpk = &rtwdev->dpk;
- static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8};
+ static const u32 kip_reg[] = {0x813c, 0x8124, 0x8120, 0xc0c4, 0xc0e8, 0xc0d4, 0xc0d8};
u32 backup_rf_val[RTW8852C_DPK_RF_PATH][BACKUP_RF_REGS_NR];
u32 kip_bkup[RTW8852C_DPK_RF_PATH][RTW8852C_DPK_KIP_REG_NUM] = {};
u8 path;
bool is_fail = true, reloaded[RTW8852C_DPK_RF_PATH] = {false};
+ static_assert(ARRAY_SIZE(kip_reg) == RTW8852C_DPK_KIP_REG_NUM);
+
if (dpk->is_dpk_reload_en) {
for (path = 0; path < RTW8852C_DPK_RF_PATH; path++) {
if (!(kpath & BIT(path)))
@@ -3875,11 +4106,14 @@ void rtw8852c_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
#define RXDCK_VER_8852C 0xe
-void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
+static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool is_afe, u8 retry_limit)
{
struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
u8 path, kpath;
u32 rf_reg5;
+ bool is_fail;
+ u8 rek_cnt;
kpath = _kpath(rtwdev, phy);
rtw89_debug(rtwdev, RTW89_DBG_RFK,
@@ -3896,7 +4130,27 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a
B_P0_TSSI_TRK_EN, 0x1);
rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
- _set_rx_dck(rtwdev, phy, path, is_afe);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_LO_SEL, rtwdev->dbcc_en);
+
+ for (rek_cnt = 0; rek_cnt < retry_limit; rek_cnt++) {
+ _set_rx_dck(rtwdev, phy, path, is_afe);
+
+ /* To reduce IO of dck_rek_check(), the last try is seen
+ * as failure always, and then do recovery procedure.
+ */
+ if (rek_cnt == retry_limit - 1) {
+ _rx_dck_recover(rtwdev, path);
+ break;
+ }
+
+ is_fail = _rx_dck_rek_check(rtwdev, path);
+ if (!is_fail)
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RX_DCK] rek_cnt[%d]=%d",
+ path, rek_cnt);
+
rx_dck->thermal[path] = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
@@ -3906,15 +4160,31 @@ void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_a
}
}
-#define RTW8852C_RX_DCK_TH 8
+void rtw8852c_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool is_afe)
+{
+ _rx_dck(rtwdev, phy, is_afe, 1);
+}
+
+#define RTW8852C_RX_DCK_TH 12
void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
struct rtw89_rx_dck_info *rx_dck = &rtwdev->rx_dck;
+ enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u8 dck_channel;
u8 cur_thermal;
+ u32 tx_en;
int delta;
int path;
+ if (chan->band_type == RTW89_BAND_2G)
+ return;
+
+ if (rtwdev->scanning)
+ return;
+
for (path = 0; path < RF_PATH_NUM_8852C; path++) {
cur_thermal =
ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
@@ -3924,11 +4194,28 @@ void rtw8852c_rx_dck_track(struct rtw89_dev *rtwdev)
"[RX_DCK] path=%d current thermal=0x%x delta=0x%x\n",
path, cur_thermal, delta);
- if (delta >= RTW8852C_RX_DCK_TH) {
- rtw8852c_rx_dck(rtwdev, RTW89_PHY_0, false);
- return;
- }
+ if (delta >= RTW8852C_RX_DCK_TH)
+ goto trigger_rx_dck;
}
+
+ return;
+
+trigger_rx_dck:
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+
+ for (path = 0; path < RF_PATH_NUM_8852C; path++) {
+ dck_channel = _rx_dck_channel_calc(rtwdev, chan);
+ _ctrl_ch(rtwdev, RTW89_PHY_0, dck_channel, chan->band_type);
+ }
+
+ _rx_dck(rtwdev, RTW89_PHY_0, false, 20);
+
+ for (path = 0; path < RF_PATH_NUM_8852C; path++)
+ _ctrl_ch(rtwdev, RTW89_PHY_0, chan->channel, chan->band_type);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
}
void rtw8852c_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 35901f64d17d..80490a5437df 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -53,6 +53,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1,
+ .bd_ram_table = &rtw89_bd_ram_table_dual,
.ltr_set = rtw89_pci_ltr_set_v1,
.fill_txaddr_info = rtw89_pci_fill_txaddr_info_v1,
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index c1a4bc1c64d1..61db7189fdab 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -611,6 +611,7 @@ bottom:
ser_reset_mac_binding(rtwdev);
rtw89_core_stop(rtwdev);
rtw89_entity_init(rtwdev);
+ rtw89_fw_release_general_pkt_list(rtwdev, false);
INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
}
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index 9d4c6b6fa125..98eb9607cd21 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -75,7 +75,9 @@
#define RTW89_TXWD_INFO0_DATA_BW GENMASK(29, 28)
#define RTW89_TXWD_INFO0_GI_LTF GENMASK(27, 25)
#define RTW89_TXWD_INFO0_DATA_RATE GENMASK(24, 16)
+#define RTW89_TXWD_INFO0_DATA_ER BIT(15)
#define RTW89_TXWD_INFO0_DISDATAFB BIT(10)
+#define RTW89_TXWD_INFO0_DATA_BW_ER BIT(8)
#define RTW89_TXWD_INFO0_MULTIPORT_ID GENMASK(6, 4)
/* TX WD INFO DWORD 1 */
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index b2b826b2e09a..c78ee2ab732c 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -490,21 +490,6 @@ static int rtw89_wow_check_fw_status(struct rtw89_dev *rtwdev, bool wow_enable)
return ret;
}
-static void rtw89_wow_release_pkt_list(struct rtw89_dev *rtwdev)
-{
- struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
- struct list_head *pkt_list = &rtw_wow->pkt_list;
- struct rtw89_pktofld_info *info, *tmp;
-
- list_for_each_entry_safe(info, tmp, pkt_list, list) {
- rtw89_fw_h2c_del_pkt_offload(rtwdev, info->id);
- rtw89_core_release_bit_map(rtwdev->pkt_offload,
- info->id);
- list_del(&info->list);
- kfree(info);
- }
-}
-
static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
{
enum rtw89_fw_type fw_type = wow ? RTW89_FW_WOWLAN : RTW89_FW_NORMAL;
@@ -561,6 +546,11 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
}
if (is_conn) {
+ ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwvif, rtwsta->mac_id);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c general packet\n");
+ return ret;
+ }
rtw89_phy_ra_assoc(rtwdev, wow_sta);
rtw89_phy_set_bss_color(rtwdev, wow_vif);
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, wow_vif);
@@ -708,8 +698,6 @@ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
goto out;
}
- rtw89_wow_release_pkt_list(rtwdev);
-
ret = rtw89_fw_h2c_disconnect_detect(rtwdev, rtwvif, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to disable disconnect detect\n");
@@ -722,6 +710,8 @@ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
goto out;
}
+ rtw89_fw_release_general_pkt_list(rtwdev, true);
+
ret = rtw89_wow_check_fw_status(rtwdev, false);
if (ret) {
rtw89_err(rtwdev, "wow: failed to check disable fw ready\n");
@@ -744,6 +734,8 @@ static int rtw89_wow_enable(struct rtw89_dev *rtwdev)
goto out;
}
+ rtw89_fw_release_general_pkt_list(rtwdev, true);
+
ret = rtw89_wow_swap_fw(rtwdev, true);
if (ret) {
rtw89_err(rtwdev, "wow: failed to swap to wow fw\n");
diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c
index 8a3d86897ea8..45ac9371f262 100644
--- a/drivers/net/wireless/rsi/rsi_91x_coex.c
+++ b/drivers/net/wireless/rsi/rsi_91x_coex.c
@@ -160,6 +160,7 @@ int rsi_coex_attach(struct rsi_common *common)
rsi_coex_scheduler_thread,
"Coex-Tx-Thread")) {
rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__);
+ kfree(coex_cb);
return -EINVAL;
}
return 0;
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index c7460fbba014..d4489b943873 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -894,7 +894,7 @@ static int rsi_load_9113_firmware(struct rsi_hw *adapter)
struct ta_metadata *metadata_p;
int status;
- status = bl_cmd(adapter, CONFIG_AUTO_READ_MODE, CMD_PASS,
+ status = bl_cmd(adapter, AUTO_READ_MODE, CMD_PASS,
"AUTO_READ_CMD");
if (status < 0)
return status;
@@ -984,7 +984,7 @@ fw_upgrade:
}
rsi_dbg(ERR_ZONE, "Firmware upgrade failed\n");
- status = bl_cmd(adapter, CONFIG_AUTO_READ_MODE, CMD_PASS,
+ status = bl_cmd(adapter, AUTO_READ_MODE, CMD_PASS,
"AUTO_READ_MODE");
if (status)
goto fail;
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
index 5b07262a9740..479b1b0b57a6 100644
--- a/drivers/net/wireless/rsi/rsi_hal.h
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -69,7 +69,7 @@
#define EOF_REACHED 'E'
#define CHECK_CRC 'K'
#define POLLING_MODE 'P'
-#define CONFIG_AUTO_READ_MODE 'R'
+#define AUTO_READ_MODE 'R'
#define JUMP_TO_ZERO_PC 'J'
#define FW_LOADING_SUCCESSFUL 'S'
#define LOADING_INITIATED '1'
diff --git a/drivers/net/wireless/ti/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
index a19cce3a7e6f..5663f197ea69 100644
--- a/drivers/net/wireless/ti/wl1251/init.c
+++ b/drivers/net/wireless/ti/wl1251/init.c
@@ -373,7 +373,7 @@ int wl1251_hw_init(struct wl1251 *wl)
if (ret < 0)
goto out_free_data_path;
- /* Beacons and boradcast settings */
+ /* Beacons and broadcast settings */
ret = wl1251_hw_init_beacon_broadcast(wl);
if (ret < 0)
goto out_free_data_path;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 1b532e00a56f..7fb2f9513476 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1328,7 +1328,7 @@ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb,
} else {
++dev->stats.tx_packets;
dev->stats.tx_bytes += skb->len;
- kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
if (this->tx_buffer_cnt < 2)
netif_stop_queue(dev);
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_rf.h b/drivers/net/wireless/zydas/zd1211rw/zd_rf.h
index 8bfec9e75125..9ca69df3d288 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_rf.h
@@ -85,9 +85,6 @@ static inline int zd_rf_should_patch_cck_gain(struct zd_rf *rf)
return rf->patch_cck_gain;
}
-int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel);
-int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel);
-
/* Functions for individual RF chips */
int zd_rf_init_rf2959(struct zd_rf *rf);
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
index 7eff3531b9a5..7ff33c1d6ac7 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif.c
@@ -152,6 +152,15 @@ static irqreturn_t t7xx_dpmaif_isr_handler(int irq, void *data)
}
t7xx_pcie_mac_clear_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t t7xx_dpmaif_isr_thread(int irq, void *data)
+{
+ struct dpmaif_isr_para *isr_para = data;
+ struct dpmaif_ctrl *dpmaif_ctrl = isr_para->dpmaif_ctrl;
+
t7xx_dpmaif_irq_cb(isr_para);
t7xx_pcie_mac_set_int(dpmaif_ctrl->t7xx_dev, isr_para->pcie_int);
return IRQ_HANDLED;
@@ -188,7 +197,7 @@ static void t7xx_dpmaif_register_pcie_irq(struct dpmaif_ctrl *dpmaif_ctrl)
t7xx_pcie_mac_clear_int(t7xx_dev, int_type);
t7xx_dev->intr_handler[int_type] = t7xx_dpmaif_isr_handler;
- t7xx_dev->intr_thread[int_type] = NULL;
+ t7xx_dev->intr_thread[int_type] = t7xx_dpmaif_isr_thread;
t7xx_dev->callback_param[int_type] = isr_para;
t7xx_pcie_mac_clear_int_status(t7xx_dev, int_type);
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index aa2174a10437..f4ff2198b5ef 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -840,14 +840,13 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
if (!rxq->que_started) {
atomic_set(&rxq->rx_processing, 0);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
return work_done;
}
- if (!rxq->sleep_lock_pending) {
- pm_runtime_get_noresume(rxq->dpmaif_ctrl->dev);
+ if (!rxq->sleep_lock_pending)
t7xx_pci_disable_sleep(t7xx_dev);
- }
ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
if (!ret) {
@@ -876,22 +875,22 @@ int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
napi_complete_done(napi, work_done);
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
+ t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
+ pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
+ pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
+ atomic_set(&rxq->rx_processing, 0);
} else {
t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
}
- t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
- pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
- pm_runtime_put_noidle(rxq->dpmaif_ctrl->dev);
- atomic_set(&rxq->rx_processing, 0);
-
return work_done;
}
void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
{
struct dpmaif_rx_queue *rxq;
- int qno;
+ struct dpmaif_ctrl *ctrl;
+ int qno, ret;
qno = ffs(que_mask) - 1;
if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
@@ -900,6 +899,18 @@ void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int
}
rxq = &dpmaif_ctrl->rxq[qno];
+ ctrl = rxq->dpmaif_ctrl;
+ /* We need to make sure that the modem has been resumed before
+ * calling napi. This can't be done inside the polling function
+ * as we could be blocked waiting for device to be resumed,
+ * which can't be done from softirq context the poll function
+ * is running in.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0 && ret != -EACCES) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
+ return;
+ }
napi_schedule(&rxq->napi);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
index 494a28e386a3..3ef4a8a4f8fd 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -27,6 +27,7 @@
#include <linux/list.h>
#include <linux/netdev_features.h>
#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/wwan.h>
@@ -45,12 +46,25 @@
static void t7xx_ccmni_enable_napi(struct t7xx_ccmni_ctrl *ctlb)
{
- int i;
+ struct dpmaif_ctrl *ctrl;
+ int i, ret;
+
+ ctrl = ctlb->hif_ctrl;
if (ctlb->is_napi_en)
return;
for (i = 0; i < RXQ_NUM; i++) {
+ /* The usage count has to be bumped every time before calling
+ * napi_schedule. It will be decresed in the poll routine,
+ * right after napi_complete_done is called.
+ */
+ ret = pm_runtime_resume_and_get(ctrl->dev);
+ if (ret < 0) {
+ dev_err(ctrl->dev, "Failed to resume device: %d\n",
+ ret);
+ return;
+ }
napi_enable(ctlb->napi[i]);
napi_schedule(ctlb->napi[i]);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 871f2a27a398..226fc1703e90 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -121,6 +121,8 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
iowrite32(T7XX_L1_BIT(0), IREG_BASE(t7xx_dev) + ENABLE_ASPM_LOWPWR);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED);
+ pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
+ pm_runtime_allow(&t7xx_dev->pdev->dev);
pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 001636901dda..a78a25b87240 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -200,7 +200,7 @@ static void xenvif_debugfs_delif(struct xenvif *vif)
* and vif variables to the environment, for the benefit of the vif-* hotplug
* scripts.
*/
-static int netback_uevent(struct xenbus_device *xdev,
+static int netback_uevent(const struct xenbus_device *xdev,
struct kobj_uevent_env *env)
{
struct backend_info *be = dev_get_drvdata(&xdev->dev);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 12b074286df9..47d54d8ea59d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1741,6 +1741,8 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
* negotiate with the backend regarding supported features.
*/
netdev->features |= netdev->hw_features;
+ netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
netdev->ethtool_ops = &xennet_ethtool_ops;
netdev->min_mtu = ETH_MIN_MTU;
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index 79d93126453d..77b06d54cc62 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -102,6 +102,25 @@ config NVDIMM_KEYS
depends on ENCRYPTED_KEYS
depends on (LIBNVDIMM=ENCRYPTED_KEYS) || LIBNVDIMM=m
+config NVDIMM_KMSAN
+ bool
+ depends on KMSAN
+ help
+ KMSAN, and other memory debug facilities, increase the size of
+ 'struct page' to contain extra metadata. This collides with
+ the NVDIMM capability to store a potentially
+ larger-than-"System RAM" size 'struct page' array in a
+ reservation of persistent memory rather than limited /
+ precious DRAM. However, that reservation needs to persist for
+ the life of the given NVDIMM namespace. If you are using KMSAN
+ to debug an issue unrelated to NVDIMMs or DAX then say N to this
+ option. Otherwise, say Y but understand that any namespaces
+ (with the page array stored pmem) created with this build of
+ the kernel will permanently reserve and strand excess
+ capacity compared to the CONFIG_KMSAN=n case.
+
+ Select N if unsure.
+
config NVDIMM_TEST_BUILD
tristate "Build the unit test core"
depends on m
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 0297b7882e33..d5593b0dc700 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1482,20 +1482,6 @@ static void btt_submit_bio(struct bio *bio)
bio_endio(bio);
}
-static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, enum req_op op)
-{
- struct btt *btt = bdev->bd_disk->private_data;
- int rc;
-
- rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector);
- if (rc == 0)
- page_endio(page, op_is_write(op), 0);
-
- return rc;
-}
-
-
static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
/* some standard values */
@@ -1508,7 +1494,6 @@ static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
static const struct block_device_operations btt_fops = {
.owner = THIS_MODULE,
.submit_bio = btt_submit_bio,
- .rw_page = btt_rw_page,
.getgeo = btt_getgeo,
};
@@ -1530,6 +1515,7 @@ static int btt_blk_init(struct btt *btt)
blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size);
blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
+ blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
if (btt_meta_size(btt)) {
rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 5ad49056921b..4976a0069e9c 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -28,7 +28,7 @@ static int nvdimm_bus_major;
struct class *nd_class;
static DEFINE_IDA(nd_ida);
-static int to_nd_device_type(struct device *dev)
+static int to_nd_device_type(const struct device *dev)
{
if (is_nvdimm(dev))
return ND_DEVICE_DIMM;
@@ -42,7 +42,7 @@ static int to_nd_device_type(struct device *dev)
return 0;
}
-static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int nvdimm_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
to_nd_device_type(dev));
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 7f4a9d28b670..3bd61f245788 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -38,7 +38,7 @@ static const struct device_type nd_dax_device_type = {
.groups = nd_pfn_attribute_groups,
};
-bool is_nd_dax(struct device *dev)
+bool is_nd_dax(const struct device *dev)
{
return dev ? dev->type == &nd_dax_device_type : false;
}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 6d3b03a9fa02..957f7c3d17ba 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -572,7 +572,7 @@ static const struct device_type nvdimm_device_type = {
.groups = nvdimm_attribute_groups,
};
-bool is_nvdimm(struct device *dev)
+bool is_nvdimm(const struct device *dev)
{
return dev->type == &nvdimm_device_type;
}
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 845408f10655..86976a9e8a15 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -82,14 +82,14 @@ static inline void nvdimm_security_overwrite_query(struct work_struct *work)
}
#endif
-bool is_nvdimm(struct device *dev);
-bool is_nd_pmem(struct device *dev);
-bool is_nd_volatile(struct device *dev);
-static inline bool is_nd_region(struct device *dev)
+bool is_nvdimm(const struct device *dev);
+bool is_nd_pmem(const struct device *dev);
+bool is_nd_volatile(const struct device *dev);
+static inline bool is_nd_region(const struct device *dev)
{
return is_nd_pmem(dev) || is_nd_volatile(dev);
}
-static inline bool is_memory(struct device *dev)
+static inline bool is_memory(const struct device *dev)
{
return is_nd_pmem(dev) || is_nd_volatile(dev);
}
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 85ca5b4da3cf..e8b9d27dbb3c 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -599,7 +599,7 @@ static inline int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
struct nd_dax *to_nd_dax(struct device *dev);
#if IS_ENABLED(CONFIG_NVDIMM_DAX)
int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns);
-bool is_nd_dax(struct device *dev);
+bool is_nd_dax(const struct device *dev);
struct device *nd_dax_create(struct nd_region *nd_region);
#else
static inline int nd_dax_probe(struct device *dev,
@@ -608,7 +608,7 @@ static inline int nd_dax_probe(struct device *dev,
return -ENODEV;
}
-static inline bool is_nd_dax(struct device *dev)
+static inline bool is_nd_dax(const struct device *dev)
{
return false;
}
@@ -652,7 +652,7 @@ void devm_namespace_disable(struct device *dev,
struct nd_namespace_common *ndns);
#if IS_ENABLED(CONFIG_ND_CLAIM)
/* max struct page size independent of kernel config */
-#define MAX_STRUCT_PAGE_SIZE 128
+#define MAX_STRUCT_PAGE_SIZE 64
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
#else
static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 61af072ac98f..af7d9301520c 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -13,6 +13,8 @@
#include "pfn.h"
#include "nd.h"
+static const bool page_struct_override = IS_ENABLED(CONFIG_NVDIMM_KMSAN);
+
static void nd_pfn_release(struct device *dev)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
@@ -758,12 +760,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
return -ENXIO;
}
- /*
- * Note, we use 64 here for the standard size of struct page,
- * debugging options may cause it to be larger in which case the
- * implementation will limit the pfns advertised through
- * ->direct_access() to those that are included in the memmap.
- */
start = nsio->res.start;
size = resource_size(&nsio->res);
npfns = PHYS_PFN(size - SZ_8K);
@@ -782,20 +778,33 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
}
end_trunc = start + size - ALIGN_DOWN(start + size, align);
if (nd_pfn->mode == PFN_MODE_PMEM) {
+ unsigned long page_map_size = MAX_STRUCT_PAGE_SIZE * npfns;
+
/*
* The altmap should be padded out to the block size used
* when populating the vmemmap. This *should* be equal to
* PMD_SIZE for most architectures.
*
- * Also make sure size of struct page is less than 128. We
- * want to make sure we use large enough size here so that
- * we don't have a dynamic reserve space depending on
- * struct page size. But we also want to make sure we notice
- * when we end up adding new elements to struct page.
+ * Also make sure size of struct page is less than
+ * MAX_STRUCT_PAGE_SIZE. The goal here is compatibility in the
+ * face of production kernel configurations that reduce the
+ * 'struct page' size below MAX_STRUCT_PAGE_SIZE. For debug
+ * kernel configurations that increase the 'struct page' size
+ * above MAX_STRUCT_PAGE_SIZE, the page_struct_override allows
+ * for continuing with the capacity that will be wasted when
+ * reverting to a production kernel configuration. Otherwise,
+ * those configurations are blocked by default.
*/
- BUILD_BUG_ON(sizeof(struct page) > MAX_STRUCT_PAGE_SIZE);
- offset = ALIGN(start + SZ_8K + MAX_STRUCT_PAGE_SIZE * npfns, align)
- - start;
+ if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE) {
+ if (page_struct_override)
+ page_map_size = sizeof(struct page) * npfns;
+ else {
+ dev_err(&nd_pfn->dev,
+ "Memory debug options prevent using pmem for the page map\n");
+ return -EINVAL;
+ }
+ }
+ offset = ALIGN(start + SZ_8K + page_map_size, align) - start;
} else if (nd_pfn->mode == PFN_MODE_RAM)
offset = ALIGN(start + SZ_8K, align) - start;
else
@@ -818,7 +827,10 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
pfn_sb->version_minor = cpu_to_le16(4);
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
- pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
+ if (sizeof(struct page) > MAX_STRUCT_PAGE_SIZE && page_struct_override)
+ pfn_sb->page_struct_size = cpu_to_le16(sizeof(struct page));
+ else
+ pfn_sb->page_struct_size = cpu_to_le16(MAX_STRUCT_PAGE_SIZE);
pfn_sb->page_size = cpu_to_le32(PAGE_SIZE);
checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 96e6e9a5f235..ceea55f621cc 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -238,28 +238,6 @@ static void pmem_submit_bio(struct bio *bio)
bio_endio(bio);
}
-static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, enum req_op op)
-{
- struct pmem_device *pmem = bdev->bd_disk->private_data;
- blk_status_t rc;
-
- if (op_is_write(op))
- rc = pmem_do_write(pmem, page, 0, sector, thp_size(page));
- else
- rc = pmem_do_read(pmem, page, 0, sector, thp_size(page));
- /*
- * The ->rw_page interface is subtle and tricky. The core
- * retries on any error, so we can only invoke page_endio() in
- * the successful completion case. Otherwise, we'll see crashes
- * caused by double completion.
- */
- if (rc == 0)
- page_endio(page, op_is_write(op), 0);
-
- return blk_status_to_errno(rc);
-}
-
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
long nr_pages, enum dax_access_mode mode, void **kaddr,
@@ -310,7 +288,6 @@ __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
static const struct block_device_operations pmem_fops = {
.owner = THIS_MODULE,
.submit_bio = pmem_submit_bio,
- .rw_page = pmem_rw_page,
};
static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
@@ -565,6 +542,7 @@ static int pmem_attach_disk(struct device *dev,
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+ blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, q);
if (pmem->pfn_flags & PFN_MAP)
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 83dbf398ea84..8f134d63af13 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -839,12 +839,12 @@ static const struct device_type nd_volatile_device_type = {
.groups = nd_region_attribute_groups,
};
-bool is_nd_pmem(struct device *dev)
+bool is_nd_pmem(const struct device *dev)
{
return dev ? dev->type == &nd_pmem_device_type : false;
}
-bool is_nd_volatile(struct device *dev)
+bool is_nd_volatile(const struct device *dev)
{
return dev ? dev->type == &nd_volatile_device_type : false;
}
diff --git a/drivers/nvme/host/auth.c b/drivers/nvme/host/auth.c
index 4424f53a8a0a..901c59145811 100644
--- a/drivers/nvme/host/auth.c
+++ b/drivers/nvme/host/auth.c
@@ -45,6 +45,8 @@ struct nvme_dhchap_queue_context {
int sess_key_len;
};
+static struct workqueue_struct *nvme_auth_wq;
+
#define nvme_auth_flags_from_qid(qid) \
(qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED
#define nvme_auth_queue_from_qid(ctrl, qid) \
@@ -158,7 +160,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
- return NVME_SC_INVALID_FIELD;
+ return -EINVAL;
}
hmac_name = nvme_auth_hmac_name(data->hashid);
@@ -167,7 +169,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
"qid %d: invalid HASH ID %d\n",
chap->qid, data->hashid);
chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
- return NVME_SC_INVALID_FIELD;
+ return -EPROTO;
}
if (chap->hash_id == data->hashid && chap->shash_tfm &&
@@ -193,7 +195,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
chap->shash_tfm = NULL;
chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
- return NVME_SC_AUTH_REQUIRED;
+ return -ENOMEM;
}
if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
@@ -203,7 +205,7 @@ static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
crypto_free_shash(chap->shash_tfm);
chap->shash_tfm = NULL;
chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
- return NVME_SC_AUTH_REQUIRED;
+ return -EPROTO;
}
chap->hash_id = data->hashid;
@@ -219,7 +221,7 @@ select_kpp:
chap->qid, data->dhgid);
chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
/* Leave previous dh_tfm intact */
- return NVME_SC_AUTH_REQUIRED;
+ return -EPROTO;
}
if (chap->dhgroup_id == data->dhgid &&
@@ -242,7 +244,7 @@ select_kpp:
"qid %d: empty DH value\n",
chap->qid);
chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
- return NVME_SC_INVALID_FIELD;
+ return -EPROTO;
}
chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
@@ -254,7 +256,7 @@ select_kpp:
chap->qid, ret, gid_name);
chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
chap->dh_tfm = NULL;
- return NVME_SC_AUTH_REQUIRED;
+ return -ret;
}
dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
chap->qid, gid_name);
@@ -263,7 +265,7 @@ select_kpp:
"qid %d: invalid DH value for NULL DH\n",
chap->qid);
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
- return NVME_SC_INVALID_FIELD;
+ return -EPROTO;
}
chap->dhgroup_id = data->dhgid;
@@ -274,7 +276,7 @@ skip_kpp:
chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
if (!chap->ctrl_key) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
- return NVME_SC_AUTH_REQUIRED;
+ return -ENOMEM;
}
chap->ctrl_key_len = dhvlen;
memcpy(chap->ctrl_key, data->cval + chap->hash_len,
@@ -344,7 +346,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
if (size > CHAP_BUF_SIZE) {
chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
- return NVME_SC_INVALID_FIELD;
+ return -EINVAL;
}
if (data->hl != chap->hash_len) {
@@ -352,7 +354,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
"qid %d: invalid hash length %u\n",
chap->qid, data->hl);
chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
- return NVME_SC_INVALID_FIELD;
+ return -EPROTO;
}
/* Just print out information for the admin queue */
@@ -376,7 +378,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
"qid %d: controller authentication failed\n",
chap->qid);
chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
- return NVME_SC_AUTH_REQUIRED;
+ return -ECONNREFUSED;
}
/* Just print out information for the admin queue */
@@ -730,7 +732,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
if (ret) {
chap->status = ret;
- chap->error = NVME_SC_AUTH_REQUIRED;
+ chap->error = -ECONNREFUSED;
return;
}
@@ -798,7 +800,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
if (ret) {
chap->status = ret;
- chap->error = NVME_SC_AUTH_REQUIRED;
+ chap->error = -ECONNREFUSED;
return;
}
@@ -819,7 +821,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
ret = nvme_auth_process_dhchap_success1(ctrl, chap);
if (ret) {
/* Controller authentication failed */
- chap->error = NVME_SC_AUTH_REQUIRED;
+ chap->error = -ECONNREFUSED;
goto fail2;
}
@@ -866,7 +868,7 @@ int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
chap = &ctrl->dhchap_ctxs[qid];
cancel_work_sync(&chap->auth_work);
- queue_work(nvme_wq, &chap->auth_work);
+ queue_work(nvme_auth_wq, &chap->auth_work);
return 0;
}
EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
@@ -1008,10 +1010,15 @@ EXPORT_SYMBOL_GPL(nvme_auth_free);
int __init nvme_init_auth(void)
{
+ nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
+ if (!nvme_auth_wq)
+ return -ENOMEM;
+
nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
if (!nvme_chap_buf_cache)
- return -ENOMEM;
+ goto err_destroy_workqueue;
nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
mempool_free_slab, nvme_chap_buf_cache);
@@ -1021,6 +1028,8 @@ int __init nvme_init_auth(void)
return 0;
err_destroy_chap_buf_cache:
kmem_cache_destroy(nvme_chap_buf_cache);
+err_destroy_workqueue:
+ destroy_workqueue(nvme_auth_wq);
return -ENOMEM;
}
@@ -1028,4 +1037,5 @@ void __exit nvme_exit_auth(void)
{
mempool_destroy(nvme_chap_buf_pool);
kmem_cache_destroy(nvme_chap_buf_cache);
+ destroy_workqueue(nvme_auth_wq);
}
diff --git a/drivers/nvme/host/constants.c b/drivers/nvme/host/constants.c
index e958d5015585..bc523ca02254 100644
--- a/drivers/nvme/host/constants.c
+++ b/drivers/nvme/host/constants.c
@@ -54,6 +54,14 @@ static const char * const nvme_admin_ops[] = {
[nvme_admin_get_lba_status] = "Get LBA Status",
};
+static const char * const nvme_fabrics_ops[] = {
+ [nvme_fabrics_type_property_set] = "Property Set",
+ [nvme_fabrics_type_property_get] = "Property Get",
+ [nvme_fabrics_type_connect] = "Connect",
+ [nvme_fabrics_type_auth_send] = "Authentication Send",
+ [nvme_fabrics_type_auth_receive] = "Authentication Receive",
+};
+
static const char * const nvme_statuses[] = {
[NVME_SC_SUCCESS] = "Success",
[NVME_SC_INVALID_OPCODE] = "Invalid Command Opcode",
@@ -185,3 +193,11 @@ const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
return nvme_admin_ops[opcode];
return "Unknown";
}
+EXPORT_SYMBOL_GPL(nvme_get_admin_opcode_str);
+
+const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) {
+ if (opcode < ARRAY_SIZE(nvme_fabrics_ops) && nvme_fabrics_ops[opcode])
+ return nvme_fabrics_ops[opcode];
+ return "Unknown";
+}
+EXPORT_SYMBOL_GPL(nvme_get_fabrics_opcode_str);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 505e16f20e57..8698410aeb84 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -806,9 +806,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
cmnd->dsm.nr = cpu_to_le32(segments - 1);
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
- req->special_vec.bv_page = virt_to_page(range);
- req->special_vec.bv_offset = offset_in_page(range);
- req->special_vec.bv_len = alloc_size;
+ bvec_set_virt(&req->special_vec, range, alloc_size);
req->rq_flags |= RQF_SPECIAL_PAYLOAD;
return BLK_STS_OK;
@@ -1004,7 +1002,7 @@ EXPORT_SYMBOL_GPL(nvme_setup_cmd);
* >0: nvme controller's cqe status response
* <0: kernel error in lieu of controller response
*/
-static int nvme_execute_rq(struct request *rq, bool at_head)
+int nvme_execute_rq(struct request *rq, bool at_head)
{
blk_status_t status;
@@ -1015,6 +1013,7 @@ static int nvme_execute_rq(struct request *rq, bool at_head)
return nvme_req(rq)->status;
return blk_status_to_errno(status);
}
+EXPORT_SYMBOL_NS_GPL(nvme_execute_rq, NVME_TARGET_PASSTHRU);
/*
* Returns 0 on success. If the result is negative, it's a Linux error code;
@@ -1060,41 +1059,12 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
}
EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
-static u32 nvme_known_admin_effects(u8 opcode)
-{
- switch (opcode) {
- case nvme_admin_format_nvm:
- return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_NCC |
- NVME_CMD_EFFECTS_CSE_MASK;
- case nvme_admin_sanitize_nvm:
- return NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK;
- default:
- break;
- }
- return 0;
-}
-
-static u32 nvme_known_nvm_effects(u8 opcode)
-{
- switch (opcode) {
- case nvme_cmd_write:
- case nvme_cmd_write_zeroes:
- case nvme_cmd_write_uncor:
- return NVME_CMD_EFFECTS_LBCC;
- default:
- return 0;
- }
-}
-
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{
u32 effects = 0;
if (ns) {
- if (ns->head->effects)
- effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
- if (ns->head->ids.csi == NVME_CSI_NVM)
- effects |= nvme_known_nvm_effects(opcode);
+ effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
dev_warn_once(ctrl->device,
"IO command:%02x has unusual effects:%08x\n",
@@ -1107,17 +1077,14 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
*/
effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
} else {
- if (ctrl->effects)
- effects = le32_to_cpu(ctrl->effects->acs[opcode]);
- effects |= nvme_known_admin_effects(opcode);
+ effects = le32_to_cpu(ctrl->effects->acs[opcode]);
}
return effects;
}
EXPORT_SYMBOL_NS_GPL(nvme_command_effects, NVME_TARGET_PASSTHRU);
-static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
- u8 opcode)
+u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
{
u32 effects = nvme_command_effects(ctrl, ns, opcode);
@@ -1135,6 +1102,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
}
return effects;
}
+EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU);
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
struct nvme_command *cmd, int status)
@@ -1176,17 +1144,6 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
}
EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
-int nvme_execute_passthru_rq(struct request *rq, u32 *effects)
-{
- struct nvme_command *cmd = nvme_req(rq)->cmd;
- struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
- struct nvme_ns *ns = rq->q->queuedata;
-
- *effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
- return nvme_execute_rq(rq, false);
-}
-EXPORT_SYMBOL_NS_GPL(nvme_execute_passthru_rq, NVME_TARGET_PASSTHRU);
-
/*
* Recommended frequency for KATO commands per NVMe 1.4 section 7.12.1:
*
@@ -3122,6 +3079,62 @@ free_data:
return ret;
}
+static void nvme_init_known_nvm_effects(struct nvme_ctrl *ctrl)
+{
+ struct nvme_effects_log *log = ctrl->effects;
+
+ log->acs[nvme_admin_format_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
+ NVME_CMD_EFFECTS_NCC |
+ NVME_CMD_EFFECTS_CSE_MASK);
+ log->acs[nvme_admin_sanitize_nvm] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC |
+ NVME_CMD_EFFECTS_CSE_MASK);
+
+ /*
+ * The spec says the result of a security receive command depends on
+ * the previous security send command. As such, many vendors log this
+ * command as one to submitted only when no other commands to the same
+ * namespace are outstanding. The intention is to tell the host to
+ * prevent mixing security send and receive.
+ *
+ * This driver can only enforce such exclusive access against IO
+ * queues, though. We are not readily able to enforce such a rule for
+ * two commands to the admin queue, which is the only queue that
+ * matters for this command.
+ *
+ * Rather than blindly freezing the IO queues for this effect that
+ * doesn't even apply to IO, mask it off.
+ */
+ log->acs[nvme_admin_security_recv] &= ~NVME_CMD_EFFECTS_CSE_MASK;
+
+ log->iocs[nvme_cmd_write] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
+ log->iocs[nvme_cmd_write_zeroes] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
+ log->iocs[nvme_cmd_write_uncor] |= cpu_to_le32(NVME_CMD_EFFECTS_LBCC);
+}
+
+static int nvme_init_effects(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+{
+ int ret = 0;
+
+ if (ctrl->effects)
+ return 0;
+
+ if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
+ ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!ctrl->effects) {
+ ctrl->effects = kzalloc(sizeof(*ctrl->effects), GFP_KERNEL);
+ if (!ctrl->effects)
+ return -ENOMEM;
+ xa_store(&ctrl->cels, NVME_CSI_NVM, ctrl->effects, GFP_KERNEL);
+ }
+
+ nvme_init_known_nvm_effects(ctrl);
+ return 0;
+}
+
static int nvme_init_identify(struct nvme_ctrl *ctrl)
{
struct nvme_id_ctrl *id;
@@ -3135,12 +3148,6 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
return -EIO;
}
- if (id->lpa & NVME_CTRL_LPA_CMD_EFFECTS_LOG) {
- ret = nvme_get_effects_log(ctrl, NVME_CSI_NVM, &ctrl->effects);
- if (ret < 0)
- goto out_free;
- }
-
if (!(ctrl->ops->flags & NVME_F_FABRICS))
ctrl->cntlid = le16_to_cpu(id->cntlid);
@@ -3163,6 +3170,10 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
ret = nvme_init_subsystem(ctrl, id);
if (ret)
goto out_free;
+
+ ret = nvme_init_effects(ctrl, id);
+ if (ret)
+ goto out_free;
}
memcpy(ctrl->subsys->firmware_rev, id->fr,
sizeof(ctrl->subsys->firmware_rev));
@@ -4921,7 +4932,9 @@ out_cleanup_admin_q:
blk_mq_destroy_queue(ctrl->admin_q);
blk_put_queue(ctrl->admin_q);
out_free_tagset:
- blk_mq_free_tag_set(ctrl->admin_tagset);
+ blk_mq_free_tag_set(set);
+ ctrl->admin_q = NULL;
+ ctrl->fabrics_q = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set);
@@ -4983,6 +4996,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
out_free_tag_set:
blk_mq_free_tag_set(set);
+ ctrl->connect_q = NULL;
return ret;
}
EXPORT_SYMBOL_GPL(nvme_alloc_io_tag_set);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index ce27276f552d..bbaa04a0c502 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -410,7 +410,14 @@ int nvmf_connect_admin_queue(struct nvme_ctrl *ctrl)
result = le32_to_cpu(res.u32);
ctrl->cntlid = result & 0xFFFF;
- if ((result >> 16) & 0x3) {
+ if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
+ /* Secure concatenation is not implemented */
+ if (result & NVME_CONNECT_AUTHREQ_ASCR) {
+ dev_warn(ctrl->device,
+ "qid 0: secure concatenation is not supported\n");
+ ret = NVME_SC_AUTH_REQUIRED;
+ goto out_free_data;
+ }
/* Authentication required */
ret = nvme_auth_negotiate(ctrl, 0);
if (ret) {
@@ -486,7 +493,14 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
&cmd, data);
}
result = le32_to_cpu(res.u32);
- if ((result >> 16) & 2) {
+ if (result & (NVME_CONNECT_AUTHREQ_ATR | NVME_CONNECT_AUTHREQ_ASCR)) {
+ /* Secure concatenation is not implemented */
+ if (result & NVME_CONNECT_AUTHREQ_ASCR) {
+ dev_warn(ctrl->device,
+ "qid 0: secure concatenation is not supported\n");
+ ret = NVME_SC_AUTH_REQUIRED;
+ goto out_free_data;
+ }
/* Authentication required */
ret = nvme_auth_negotiate(ctrl, qid);
if (ret) {
@@ -500,6 +514,7 @@ int nvmf_connect_io_queue(struct nvme_ctrl *ctrl, u16 qid)
"qid %u: authentication failed\n", qid);
}
}
+out_free_data:
kfree(data);
return ret;
}
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 06f52db34be9..723e7d5b778f 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -219,6 +219,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
u64 *result, unsigned timeout, unsigned int flags)
{
+ struct nvme_ns *ns = q->queuedata;
struct nvme_ctrl *ctrl;
struct request *req;
void *meta = NULL;
@@ -241,8 +242,8 @@ static int nvme_submit_user_cmd(struct request_queue *q,
bio = req->bio;
ctrl = nvme_req(req)->ctrl;
- ret = nvme_execute_passthru_rq(req, &effects);
-
+ effects = nvme_passthru_start(ctrl, ns, cmd->common.opcode);
+ ret = nvme_execute_rq(req, false);
if (result)
*result = le64_to_cpu(nvme_req(req)->result.u64);
if (meta)
@@ -554,7 +555,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct nvme_uring_data d;
struct nvme_command c;
struct request *req;
- blk_opf_t rq_flags = 0;
+ blk_opf_t rq_flags = REQ_ALLOC_CACHE;
blk_mq_req_flags_t blk_flags = 0;
void *meta = NULL;
int ret;
@@ -590,7 +591,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
d.timeout_ms = READ_ONCE(cmd->timeout_ms);
if (issue_flags & IO_URING_F_NONBLOCK) {
- rq_flags = REQ_NOWAIT;
+ rq_flags |= REQ_NOWAIT;
blk_flags = BLK_MQ_REQ_NOWAIT;
}
if (issue_flags & IO_URING_F_IOPOLL)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 424c8a467a0c..bf46f122e9e1 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -1070,7 +1070,8 @@ static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
u8 opcode);
-int nvme_execute_passthru_rq(struct request *rq, u32 *effects);
+u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
+int nvme_execute_rq(struct request *rq, bool at_head);
void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
struct nvme_command *cmd, int status);
struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
@@ -1086,6 +1087,7 @@ static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
const unsigned char *nvme_get_error_status_str(u16 status);
const unsigned char *nvme_get_opcode_str(u8 opcode);
const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
+const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode);
#else /* CONFIG_NVME_VERBOSE_ERRORS */
static inline const unsigned char *nvme_get_error_status_str(u16 status)
{
@@ -1099,6 +1101,18 @@ static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
{
return "Admin Cmd";
}
+
+static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode)
+{
+ return "Fabrics Cmd";
+}
#endif /* CONFIG_NVME_VERBOSE_ERRORS */
+static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
+{
+ if (opcode == nvme_fabrics_command)
+ return nvme_get_fabrics_opcode_str(fctype);
+ return qid ? nvme_get_opcode_str(opcode) :
+ nvme_get_admin_opcode_str(opcode);
+}
#endif /* _NVME_H */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c734934c407c..5b95c94ee40f 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -42,8 +42,9 @@
* These can be higher, but we need to ensure that any command doesn't
* require an sg allocation that needs more than a page of data.
*/
-#define NVME_MAX_KB_SZ 4096
-#define NVME_MAX_SEGS 127
+#define NVME_MAX_KB_SZ 8192
+#define NVME_MAX_SEGS 128
+#define NVME_MAX_NR_ALLOCATIONS 5
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0444);
@@ -110,6 +111,7 @@ struct nvme_queue;
static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
static void nvme_delete_io_queues(struct nvme_dev *dev);
+static void nvme_update_attrs(struct nvme_dev *dev);
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
@@ -215,6 +217,11 @@ struct nvme_queue {
struct completion delete_done;
};
+union nvme_descriptor {
+ struct nvme_sgl_desc *sg_list;
+ __le64 *prp_list;
+};
+
/*
* The nvme_iod describes the data in an I/O.
*
@@ -224,7 +231,6 @@ struct nvme_queue {
struct nvme_iod {
struct nvme_request req;
struct nvme_command cmd;
- bool use_sgl;
bool aborted;
s8 nr_allocations; /* PRP list pool allocations. 0 means small
pool in use */
@@ -232,6 +238,7 @@ struct nvme_iod {
dma_addr_t first_dma;
dma_addr_t meta_dma;
struct sg_table sgt;
+ union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS];
};
static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev)
@@ -386,16 +393,6 @@ static int nvme_pci_npages_prp(void)
return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
}
-/*
- * Calculates the number of pages needed for the SGL segments. For example a 4k
- * page can accommodate 256 SGL descriptors.
- */
-static int nvme_pci_npages_sgl(void)
-{
- return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
- NVME_CTRL_PAGE_SIZE);
-}
-
static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int hctx_idx)
{
@@ -509,16 +506,10 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
spin_unlock(&nvmeq->sq_lock);
}
-static void **nvme_pci_iod_list(struct request *req)
-{
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req));
-}
-
-static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req)
+static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req,
+ int nseg)
{
struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
- int nseg = blk_rq_nr_phys_segments(req);
unsigned int avg_seg_size;
avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg);
@@ -540,7 +531,7 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
int i;
for (i = 0; i < iod->nr_allocations; i++) {
- __le64 *prp_list = nvme_pci_iod_list(req)[i];
+ __le64 *prp_list = iod->list[i].prp_list;
dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]);
dma_pool_free(dev->prp_page_pool, prp_list, dma_addr);
@@ -548,22 +539,6 @@ static void nvme_free_prps(struct nvme_dev *dev, struct request *req)
}
}
-static void nvme_free_sgls(struct nvme_dev *dev, struct request *req)
-{
- const int last_sg = SGES_PER_PAGE - 1;
- struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- dma_addr_t dma_addr = iod->first_dma;
- int i;
-
- for (i = 0; i < iod->nr_allocations; i++) {
- struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i];
- dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr);
-
- dma_pool_free(dev->prp_page_pool, sg_list, dma_addr);
- dma_addr = next_dma_addr;
- }
-}
-
static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -579,10 +554,11 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0);
if (iod->nr_allocations == 0)
- dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0],
+ dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list,
+ iod->first_dma);
+ else if (iod->nr_allocations == 1)
+ dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list,
iod->first_dma);
- else if (iod->use_sgl)
- nvme_free_sgls(dev, req);
else
nvme_free_prps(dev, req);
mempool_free(iod->sgt.sgl, dev->iod_mempool);
@@ -613,7 +589,6 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
u64 dma_addr = sg_dma_address(sg);
int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1);
__le64 *prp_list;
- void **list = nvme_pci_iod_list(req);
dma_addr_t prp_dma;
int nprps, i;
@@ -651,7 +626,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
iod->nr_allocations = -1;
return BLK_STS_RESOURCE;
}
- list[0] = prp_list;
+ iod->list[0].prp_list = prp_list;
iod->first_dma = prp_dma;
i = 0;
for (;;) {
@@ -660,7 +635,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
if (!prp_list)
goto free_prps;
- list[iod->nr_allocations++] = prp_list;
+ iod->list[iod->nr_allocations++].prp_list = prp_list;
prp_list[0] = old_prp_list[i - 1];
old_prp_list[i - 1] = cpu_to_le64(prp_dma);
i = 1;
@@ -705,13 +680,8 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
dma_addr_t dma_addr, int entries)
{
sge->addr = cpu_to_le64(dma_addr);
- if (entries < SGES_PER_PAGE) {
- sge->length = cpu_to_le32(entries * sizeof(*sge));
- sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
- } else {
- sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE);
- sge->type = NVME_SGL_FMT_SEG_DESC << 4;
- }
+ sge->length = cpu_to_le32(entries * sizeof(*sge));
+ sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
}
static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
@@ -747,34 +717,16 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
return BLK_STS_RESOURCE;
}
- nvme_pci_iod_list(req)[0] = sg_list;
+ iod->list[0].sg_list = sg_list;
iod->first_dma = sgl_dma;
nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries);
-
do {
- if (i == SGES_PER_PAGE) {
- struct nvme_sgl_desc *old_sg_desc = sg_list;
- struct nvme_sgl_desc *link = &old_sg_desc[i - 1];
-
- sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
- if (!sg_list)
- goto free_sgls;
-
- i = 0;
- nvme_pci_iod_list(req)[iod->nr_allocations++] = sg_list;
- sg_list[i++] = *link;
- nvme_pci_sgl_set_seg(link, sgl_dma, entries);
- }
-
nvme_pci_sgl_set_data(&sg_list[i++], sg);
sg = sg_next(sg);
} while (--entries > 0);
return BLK_STS_OK;
-free_sgls:
- nvme_free_sgls(dev, req);
- return BLK_STS_RESOURCE;
}
static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
@@ -856,8 +808,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out_free_sg;
}
- iod->use_sgl = nvme_pci_use_sgls(dev, req);
- if (iod->use_sgl)
+ if (nvme_pci_use_sgls(dev, req, iod->sgt.nents))
ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw);
else
ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
@@ -1923,6 +1874,8 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
(NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
pci_p2pmem_publish(pdev, true);
+
+ nvme_update_attrs(dev);
}
static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits)
@@ -2209,6 +2162,11 @@ static const struct attribute_group *nvme_pci_dev_attr_groups[] = {
NULL,
};
+static void nvme_update_attrs(struct nvme_dev *dev)
+{
+ sysfs_update_group(&dev->ctrl.device->kobj, &nvme_pci_dev_attrs_group);
+}
+
/*
* nirqs is the number of interrupts available for write and read
* queues. The core already reserved an interrupt for the admin queue.
@@ -2509,18 +2467,12 @@ static int nvme_pci_enable(struct nvme_dev *dev)
{
int result = -ENOMEM;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- int dma_address_bits = 64;
if (pci_enable_device_mem(pdev))
return result;
pci_set_master(pdev);
- if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
- dma_address_bits = 48;
- if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits)))
- goto disable;
-
if (readl(dev->bar + NVME_REG_CSTS) == -1) {
result = -ENODEV;
goto disable;
@@ -2704,11 +2656,8 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev)
{
- size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl());
- size_t alloc_size = sizeof(__le64 *) * npages +
- sizeof(struct scatterlist) * NVME_MAX_SEGS;
+ size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS;
- WARN_ON_ONCE(alloc_size > PAGE_SIZE);
dev->iod_mempool = mempool_create_node(1,
mempool_kmalloc, mempool_kfree,
(void *)alloc_size, GFP_KERNEL,
@@ -2970,7 +2919,7 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node);
if (!dev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work);
mutex_init(&dev->shutdown_lock);
@@ -2998,7 +2947,11 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
quirks);
if (ret)
goto out_put_device;
-
+
+ if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48)
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ else
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1);
dma_set_max_seg_size(&pdev->dev, 0xffffffff);
@@ -3031,8 +2984,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int result = -ENOMEM;
dev = nvme_pci_alloc_dev(pdev, id);
- if (!dev)
- return -ENOMEM;
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
result = nvme_dev_map(dev);
if (result)
@@ -3423,6 +3376,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
.driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN |
NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x10ec, 0x5763), /* ADATA SX6000PNP */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
@@ -3530,8 +3485,9 @@ static int __init nvme_init(void)
BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2);
- BUILD_BUG_ON(DIV_ROUND_UP(nvme_pci_npages_prp(), NVME_CTRL_PAGE_SIZE) >
- S8_MAX);
+ BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE);
+ BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE);
+ BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS);
return pci_register_driver(&nvme_driver);
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 8cedc1ef496c..1955c0ec209e 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -14,6 +14,7 @@
#include <linux/blk-mq.h>
#include <crypto/hash.h>
#include <net/busy_poll.h>
+#include <trace/events/sock.h>
#include "nvme.h"
#include "fabrics.h"
@@ -905,6 +906,8 @@ static void nvme_tcp_data_ready(struct sock *sk)
{
struct nvme_tcp_queue *queue;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
if (likely(queue && queue->rd_enabled) &&
@@ -2282,10 +2285,13 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq)
struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
struct nvme_tcp_cmd_pdu *pdu = req->pdu;
+ u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype;
+ int qid = nvme_tcp_queue_id(req->queue);
dev_warn(ctrl->device,
- "queue %d: timeout request %#x type %d\n",
- nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
+ "queue %d: timeout cid %#x type %d opcode %#x (%s)\n",
+ nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
+ opc, nvme_opcode_str(qid, opc, fctype));
if (ctrl->state != NVME_CTRL_LIVE) {
/*
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 6a54ed6fb121..80099df37314 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -840,7 +840,7 @@ void nvmet_execute_set_features(struct nvmet_req *req)
u16 nsqr;
u16 ncqr;
- if (!nvmet_check_transfer_len(req, 0))
+ if (!nvmet_check_data_len_lte(req, 0))
return;
switch (cdw10 & 0xff) {
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index ab2627e17bb9..1ab6601fdd5c 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1685,8 +1685,10 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
else {
queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
be16_to_cpu(rqst->assoc_cmd.sqsize));
- if (!queue)
+ if (!queue) {
ret = VERR_QUEUE_ALLOC_FAIL;
+ nvmet_fc_tgt_a_put(iod->assoc);
+ }
}
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 871c4f32f443..2d068439b129 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -73,13 +73,6 @@ err:
return ret;
}
-static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
-{
- bv->bv_page = sg_page(sg);
- bv->bv_offset = sg->offset;
- bv->bv_len = sg->length;
-}
-
static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
unsigned long nr_segs, size_t count, int ki_flags)
{
@@ -146,7 +139,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
memset(&req->f.iocb, 0, sizeof(struct kiocb));
for_each_sg(req->sg, sg, req->sg_cnt, i) {
- nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
+ bvec_set_page(&req->f.bvec[bv_cnt], sg_page(sg), sg->length,
+ sg->offset);
len += req->f.bvec[bv_cnt].bv_len;
total_len += req->f.bvec[bv_cnt].bv_len;
bv_cnt++;
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index adc0958755d6..511c980d538d 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -216,11 +216,12 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
struct request *rq = req->p.rq;
struct nvme_ctrl *ctrl = nvme_req(rq)->ctrl;
+ struct nvme_ns *ns = rq->q->queuedata;
u32 effects;
int status;
- status = nvme_execute_passthru_rq(rq, &effects);
-
+ effects = nvme_passthru_start(ctrl, ns, req->cmd->common.opcode);
+ status = nvme_execute_rq(rq, false);
if (status == NVME_SC_SUCCESS &&
req->cmd->common.opcode == nvme_admin_identify) {
switch (req->cmd->identify.cns) {
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index cc05c094de22..66e8f9fd0ca7 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -14,6 +14,7 @@
#include <linux/inet.h>
#include <linux/llist.h>
#include <crypto/hash.h>
+#include <trace/events/sock.h>
#include "nvmet.h"
@@ -321,9 +322,8 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
while (length) {
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
- iov->bv_page = sg_page(sg);
- iov->bv_len = sg->length;
- iov->bv_offset = sg->offset + sg_offset;
+ bvec_set_page(iov, sg_page(sg), sg->length,
+ sg->offset + sg_offset);
length -= iov_len;
sg = sg_next(sg);
@@ -1470,6 +1470,8 @@ static void nvmet_tcp_data_ready(struct sock *sk)
{
struct nvmet_tcp_queue *queue;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
queue = sk->sk_user_data;
if (likely(queue))
@@ -1667,6 +1669,8 @@ static void nvmet_tcp_listen_data_ready(struct sock *sk)
{
struct nvmet_tcp_port *port;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
port = sk->sk_user_data;
if (!port)
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 1254cf57e008..7e4292d88016 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -254,8 +254,7 @@ static unsigned long nvmet_req_nr_zones_from_slba(struct nvmet_req *req)
{
unsigned int sect = nvmet_lba_to_sect(req->ns, req->cmd->zmr.slba);
- return bdev_nr_zones(req->ns->bdev) -
- (sect >> ilog2(bdev_zone_sectors(req->ns->bdev)));
+ return bdev_nr_zones(req->ns->bdev) - bdev_zone_no(req->ns->bdev, sect);
}
static unsigned long get_nr_zones_from_buf(struct nvmet_req *req, u32 bufsize)
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 755f551426b5..6dec38805041 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -290,9 +290,19 @@ config NVMEM_SPRD_EFUSE
This driver can also be built as a module. If so, the module
will be called nvmem-sprd-efuse.
+config NVMEM_STM32_BSEC_OPTEE_TA
+ def_bool NVMEM_STM32_ROMEM && OPTEE
+ help
+ Say y here to enable the accesses to STM32MP SoC OTPs by the OP-TEE
+ trusted application STM32MP BSEC.
+
+ This library is a used by stm32-romem driver or included in the module
+ called nvmem-stm32-romem.
+
config NVMEM_STM32_ROMEM
tristate "STMicroelectronics STM32 factory-programmed memory support"
depends on ARCH_STM32 || COMPILE_TEST
+ depends on OPTEE || !OPTEE
help
Say y here to enable read-only access for STMicroelectronics STM32
factory-programmed memory area.
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index fa80fe17e567..6a1efffa88f0 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_NVMEM_SPRD_EFUSE) += nvmem_sprd_efuse.o
nvmem_sprd_efuse-y := sprd-efuse.o
obj-$(CONFIG_NVMEM_STM32_ROMEM) += nvmem_stm32_romem.o
nvmem_stm32_romem-y := stm32-romem.o
+nvmem_stm32_romem-$(CONFIG_NVMEM_STM32_BSEC_OPTEE_TA) += stm32-bsec-optee-ta.o
obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o
nvmem_sunplus_ocotp-y := sunplus-ocotp.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
diff --git a/drivers/nvmem/brcm_nvram.c b/drivers/nvmem/brcm_nvram.c
index 34130449f2d2..39aa27942f28 100644
--- a/drivers/nvmem/brcm_nvram.c
+++ b/drivers/nvmem/brcm_nvram.c
@@ -98,6 +98,9 @@ static int brcm_nvram_parse(struct brcm_nvram *priv)
len = le32_to_cpu(header.len);
data = kzalloc(len, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
memcpy_fromio(data, priv->base, len);
data[len - 1] = '\0';
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 321d7d63e068..174ef3574e07 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -60,6 +60,7 @@ struct nvmem_cell_entry {
struct nvmem_cell {
struct nvmem_cell_entry *entry;
const char *id;
+ int index;
};
static DEFINE_MUTEX(nvmem_mutex);
@@ -501,6 +502,36 @@ static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
}
/**
+ * nvmem_add_one_cell() - Add one cell information to an nvmem device
+ *
+ * @nvmem: nvmem device to add cells to.
+ * @info: nvmem cell info to add to the device
+ *
+ * Return: 0 or negative error code on failure.
+ */
+int nvmem_add_one_cell(struct nvmem_device *nvmem,
+ const struct nvmem_cell_info *info)
+{
+ struct nvmem_cell_entry *cell;
+ int rval;
+
+ cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+ if (!cell)
+ return -ENOMEM;
+
+ rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
+ if (rval) {
+ kfree(cell);
+ return rval;
+ }
+
+ nvmem_cell_entry_add(cell);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmem_add_one_cell);
+
+/**
* nvmem_add_cells() - Add cell information to an nvmem device
*
* @nvmem: nvmem device to add cells to.
@@ -513,40 +544,15 @@ static int nvmem_add_cells(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info,
int ncells)
{
- struct nvmem_cell_entry **cells;
int i, rval;
- cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
- if (!cells)
- return -ENOMEM;
-
for (i = 0; i < ncells; i++) {
- cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
- if (!cells[i]) {
- rval = -ENOMEM;
- goto err;
- }
-
- rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, &info[i], cells[i]);
- if (rval) {
- kfree(cells[i]);
- goto err;
- }
-
- nvmem_cell_entry_add(cells[i]);
+ rval = nvmem_add_one_cell(nvmem, &info[i]);
+ if (rval)
+ return rval;
}
- /* remove tmp array */
- kfree(cells);
-
return 0;
-err:
- while (i--)
- nvmem_cell_entry_drop(cells[i]);
-
- kfree(cells);
-
- return rval;
}
/**
@@ -682,15 +688,14 @@ static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
{
- struct device_node *parent, *child;
struct device *dev = &nvmem->dev;
- struct nvmem_cell_entry *cell;
+ struct device_node *child;
const __be32 *addr;
- int len;
+ int len, ret;
- parent = dev->of_node;
+ for_each_child_of_node(dev->of_node, child) {
+ struct nvmem_cell_info info = {0};
- for_each_child_of_node(parent, child) {
addr = of_get_property(child, "reg", &len);
if (!addr)
continue;
@@ -700,40 +705,24 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
return -EINVAL;
}
- cell = kzalloc(sizeof(*cell), GFP_KERNEL);
- if (!cell) {
- of_node_put(child);
- return -ENOMEM;
- }
-
- cell->nvmem = nvmem;
- cell->offset = be32_to_cpup(addr++);
- cell->bytes = be32_to_cpup(addr);
- cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
+ info.offset = be32_to_cpup(addr++);
+ info.bytes = be32_to_cpup(addr);
+ info.name = kasprintf(GFP_KERNEL, "%pOFn", child);
addr = of_get_property(child, "bits", &len);
if (addr && len == (2 * sizeof(u32))) {
- cell->bit_offset = be32_to_cpup(addr++);
- cell->nbits = be32_to_cpup(addr);
+ info.bit_offset = be32_to_cpup(addr++);
+ info.nbits = be32_to_cpup(addr);
}
- if (cell->nbits)
- cell->bytes = DIV_ROUND_UP(
- cell->nbits + cell->bit_offset,
- BITS_PER_BYTE);
+ info.np = of_node_get(child);
- if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
- dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
- cell->name, nvmem->stride);
- /* Cells already added will be freed later. */
- kfree_const(cell->name);
- kfree(cell);
+ ret = nvmem_add_one_cell(nvmem, &info);
+ kfree(info.name);
+ if (ret) {
of_node_put(child);
- return -EINVAL;
+ return ret;
}
-
- cell->np = of_node_get(child);
- nvmem_cell_entry_add(cell);
}
return 0;
@@ -764,37 +753,38 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (!nvmem)
return ERR_PTR(-ENOMEM);
- rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
+ rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
if (rval < 0) {
kfree(nvmem);
return ERR_PTR(rval);
}
- if (config->wp_gpio)
- nvmem->wp_gpio = config->wp_gpio;
- else if (!config->ignore_wp)
+ nvmem->id = rval;
+
+ nvmem->dev.type = &nvmem_provider_type;
+ nvmem->dev.bus = &nvmem_bus_type;
+ nvmem->dev.parent = config->dev;
+
+ device_initialize(&nvmem->dev);
+
+ if (!config->ignore_wp)
nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
GPIOD_OUT_HIGH);
if (IS_ERR(nvmem->wp_gpio)) {
- ida_free(&nvmem_ida, nvmem->id);
rval = PTR_ERR(nvmem->wp_gpio);
- kfree(nvmem);
- return ERR_PTR(rval);
+ nvmem->wp_gpio = NULL;
+ goto err_put_device;
}
kref_init(&nvmem->refcnt);
INIT_LIST_HEAD(&nvmem->cells);
- nvmem->id = rval;
nvmem->owner = config->owner;
if (!nvmem->owner && config->dev->driver)
nvmem->owner = config->dev->driver->owner;
nvmem->stride = config->stride ?: 1;
nvmem->word_size = config->word_size ?: 1;
nvmem->size = config->size;
- nvmem->dev.type = &nvmem_provider_type;
- nvmem->dev.bus = &nvmem_bus_type;
- nvmem->dev.parent = config->dev;
nvmem->root_only = config->root_only;
nvmem->priv = config->priv;
nvmem->type = config->type;
@@ -822,11 +812,8 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
break;
}
- if (rval) {
- ida_free(&nvmem_ida, nvmem->id);
- kfree(nvmem);
- return ERR_PTR(rval);
- }
+ if (rval)
+ goto err_put_device;
nvmem->read_only = device_property_present(config->dev, "read-only") ||
config->read_only || !nvmem->reg_write;
@@ -835,28 +822,22 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
nvmem->dev.groups = nvmem_dev_groups;
#endif
- dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
-
- rval = device_register(&nvmem->dev);
- if (rval)
- goto err_put_device;
-
if (nvmem->nkeepout) {
rval = nvmem_validate_keepouts(nvmem);
if (rval)
- goto err_device_del;
+ goto err_put_device;
}
if (config->compat) {
rval = nvmem_sysfs_setup_compat(nvmem, config);
if (rval)
- goto err_device_del;
+ goto err_put_device;
}
if (config->cells) {
rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
if (rval)
- goto err_teardown_compat;
+ goto err_remove_cells;
}
rval = nvmem_add_cells_from_table(nvmem);
@@ -867,17 +848,20 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
if (rval)
goto err_remove_cells;
+ dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
+
+ rval = device_add(&nvmem->dev);
+ if (rval)
+ goto err_remove_cells;
+
blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
return nvmem;
err_remove_cells:
nvmem_device_remove_all_cells(nvmem);
-err_teardown_compat:
if (config->compat)
nvmem_sysfs_remove_compat(nvmem, config);
-err_device_del:
- device_del(&nvmem->dev);
err_put_device:
put_device(&nvmem->dev);
@@ -1127,7 +1111,8 @@ struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
-static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, const char *id)
+static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry,
+ const char *id, int index)
{
struct nvmem_cell *cell;
const char *name = NULL;
@@ -1146,6 +1131,7 @@ static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, cons
cell->id = name;
cell->entry = entry;
+ cell->index = index;
return cell;
}
@@ -1184,7 +1170,7 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
__nvmem_device_put(nvmem);
cell = ERR_PTR(-ENOENT);
} else {
- cell = nvmem_create_cell(cell_entry, con_id);
+ cell = nvmem_create_cell(cell_entry, con_id, 0);
if (IS_ERR(cell))
__nvmem_device_put(nvmem);
}
@@ -1232,32 +1218,49 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
struct nvmem_device *nvmem;
struct nvmem_cell_entry *cell_entry;
struct nvmem_cell *cell;
+ struct of_phandle_args cell_spec;
int index = 0;
+ int cell_index = 0;
+ int ret;
/* if cell name exists, find index to the name */
if (id)
index = of_property_match_string(np, "nvmem-cell-names", id);
- cell_np = of_parse_phandle(np, "nvmem-cells", index);
- if (!cell_np)
- return ERR_PTR(-ENOENT);
+ ret = of_parse_phandle_with_optional_args(np, "nvmem-cells",
+ "#nvmem-cell-cells",
+ index, &cell_spec);
+ if (ret)
+ return ERR_PTR(ret);
- nvmem_np = of_get_next_parent(cell_np);
- if (!nvmem_np)
+ if (cell_spec.args_count > 1)
+ return ERR_PTR(-EINVAL);
+
+ cell_np = cell_spec.np;
+ if (cell_spec.args_count)
+ cell_index = cell_spec.args[0];
+
+ nvmem_np = of_get_parent(cell_np);
+ if (!nvmem_np) {
+ of_node_put(cell_np);
return ERR_PTR(-EINVAL);
+ }
nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
of_node_put(nvmem_np);
- if (IS_ERR(nvmem))
+ if (IS_ERR(nvmem)) {
+ of_node_put(cell_np);
return ERR_CAST(nvmem);
+ }
cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
+ of_node_put(cell_np);
if (!cell_entry) {
__nvmem_device_put(nvmem);
return ERR_PTR(-ENOENT);
}
- cell = nvmem_create_cell(cell_entry, id);
+ cell = nvmem_create_cell(cell_entry, id, cell_index);
if (IS_ERR(cell))
__nvmem_device_put(nvmem);
@@ -1410,8 +1413,8 @@ static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void
}
static int __nvmem_cell_read(struct nvmem_device *nvmem,
- struct nvmem_cell_entry *cell,
- void *buf, size_t *len, const char *id)
+ struct nvmem_cell_entry *cell,
+ void *buf, size_t *len, const char *id, int index)
{
int rc;
@@ -1425,7 +1428,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
nvmem_shift_read_buffer_in_place(cell, buf);
if (nvmem->cell_post_process) {
- rc = nvmem->cell_post_process(nvmem->priv, id,
+ rc = nvmem->cell_post_process(nvmem->priv, id, index,
cell->offset, buf, cell->bytes);
if (rc)
return rc;
@@ -1460,7 +1463,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
if (!buf)
return ERR_PTR(-ENOMEM);
- rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id);
+ rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id, cell->index);
if (rc) {
kfree(buf);
return ERR_PTR(rc);
@@ -1773,7 +1776,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
if (rc)
return rc;
- rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL);
+ rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL, 0);
if (rc)
return rc;
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index 14284e866f26..e9b52ecb3f72 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -222,8 +222,8 @@ read_end:
return ret;
}
-static int imx_ocotp_cell_pp(void *context, const char *id, unsigned int offset,
- void *data, size_t bytes)
+static int imx_ocotp_cell_pp(void *context, const char *id, int index,
+ unsigned int offset, void *data, size_t bytes)
{
struct ocotp_priv *priv = context;
diff --git a/drivers/nvmem/qcom-spmi-sdam.c b/drivers/nvmem/qcom-spmi-sdam.c
index 4fcb63507ecd..f822790db49e 100644
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
@@ -166,6 +166,7 @@ static const struct of_device_id sdam_match_table[] = {
{ .compatible = "qcom,spmi-sdam" },
{},
};
+MODULE_DEVICE_TABLE(of, sdam_match_table);
static struct platform_driver sdam_driver = {
.driver = {
@@ -174,18 +175,7 @@ static struct platform_driver sdam_driver = {
},
.probe = sdam_probe,
};
-
-static int __init sdam_init(void)
-{
- return platform_driver_register(&sdam_driver);
-}
-subsys_initcall(sdam_init);
-
-static void __exit sdam_exit(void)
-{
- return platform_driver_unregister(&sdam_driver);
-}
-module_exit(sdam_exit);
+module_platform_driver(sdam_driver);
MODULE_DESCRIPTION("QCOM SPMI SDAM driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/nvmem/rave-sp-eeprom.c b/drivers/nvmem/rave-sp-eeprom.c
index 66699d44f73d..c456011b75e8 100644
--- a/drivers/nvmem/rave-sp-eeprom.c
+++ b/drivers/nvmem/rave-sp-eeprom.c
@@ -45,7 +45,7 @@ enum rave_sp_eeprom_header_size {
* @type: Access type (see enum rave_sp_eeprom_access_type)
* @success: Success flag (Success = 1, Failure = 0)
* @data: Read data
-
+ *
* Note this structure corresponds to RSP_*_EEPROM payload from RAVE
* SP ICD
*/
diff --git a/drivers/nvmem/stm32-bsec-optee-ta.c b/drivers/nvmem/stm32-bsec-optee-ta.c
new file mode 100644
index 000000000000..f89ce791dd12
--- /dev/null
+++ b/drivers/nvmem/stm32-bsec-optee-ta.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OP-TEE STM32MP BSEC PTA interface, used by STM32 ROMEM driver
+ *
+ * Copyright (C) 2022, STMicroelectronics - All Rights Reserved
+ */
+
+#include <linux/tee_drv.h>
+
+#include "stm32-bsec-optee-ta.h"
+
+/*
+ * Read OTP memory
+ *
+ * [in] value[0].a OTP start offset in byte
+ * [in] value[0].b Access type (0:shadow, 1:fuse, 2:lock)
+ * [out] memref[1].buffer Output buffer to store read values
+ * [out] memref[1].size Size of OTP to be read
+ *
+ * Return codes:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_ACCESS_DENIED - OTP not accessible by caller
+ */
+#define PTA_BSEC_READ_MEM 0x0
+
+/*
+ * Write OTP memory
+ *
+ * [in] value[0].a OTP start offset in byte
+ * [in] value[0].b Access type (0:shadow, 1:fuse, 2:lock)
+ * [in] memref[1].buffer Input buffer to read values
+ * [in] memref[1].size Size of OTP to be written
+ *
+ * Return codes:
+ * TEE_SUCCESS - Invoke command success
+ * TEE_ERROR_BAD_PARAMETERS - Incorrect input param
+ * TEE_ERROR_ACCESS_DENIED - OTP not accessible by caller
+ */
+#define PTA_BSEC_WRITE_MEM 0x1
+
+/* value of PTA_BSEC access type = value[in] b */
+#define SHADOW_ACCESS 0
+#define FUSE_ACCESS 1
+#define LOCK_ACCESS 2
+
+/* Bitfield definition for LOCK status */
+#define LOCK_PERM BIT(30)
+
+/* OP-TEE STM32MP BSEC TA UUID */
+static const uuid_t stm32mp_bsec_ta_uuid =
+ UUID_INIT(0x94cf71ad, 0x80e6, 0x40b5,
+ 0xa7, 0xc6, 0x3d, 0xc5, 0x01, 0xeb, 0x28, 0x03);
+
+/*
+ * Check whether this driver supports the BSEC TA in the TEE instance
+ * represented by the params (ver/data) to this function.
+ */
+static int stm32_bsec_optee_ta_match(struct tee_ioctl_version_data *ver,
+ const void *data)
+{
+ /* Currently this driver only supports GP compliant, OP-TEE based TA */
+ if ((ver->impl_id == TEE_IMPL_ID_OPTEE) &&
+ (ver->gen_caps & TEE_GEN_CAP_GP))
+ return 1;
+ else
+ return 0;
+}
+
+/* Open a session to OP-TEE for STM32MP BSEC TA */
+static int stm32_bsec_ta_open_session(struct tee_context *ctx, u32 *id)
+{
+ struct tee_ioctl_open_session_arg sess_arg;
+ int rc;
+
+ memset(&sess_arg, 0, sizeof(sess_arg));
+ export_uuid(sess_arg.uuid, &stm32mp_bsec_ta_uuid);
+ sess_arg.clnt_login = TEE_IOCTL_LOGIN_REE_KERNEL;
+ sess_arg.num_params = 0;
+
+ rc = tee_client_open_session(ctx, &sess_arg, NULL);
+ if ((rc < 0) || (sess_arg.ret != 0)) {
+ pr_err("%s: tee_client_open_session failed err:%#x, ret:%#x\n",
+ __func__, sess_arg.ret, rc);
+ if (!rc)
+ rc = -EINVAL;
+ } else {
+ *id = sess_arg.session;
+ }
+
+ return rc;
+}
+
+/* close a session to OP-TEE for STM32MP BSEC TA */
+static void stm32_bsec_ta_close_session(void *ctx, u32 id)
+{
+ tee_client_close_session(ctx, id);
+}
+
+/* stm32_bsec_optee_ta_open() - initialize the STM32MP BSEC TA */
+int stm32_bsec_optee_ta_open(struct tee_context **ctx)
+{
+ struct tee_context *tee_ctx;
+ u32 session_id;
+ int rc;
+
+ /* Open context with TEE driver */
+ tee_ctx = tee_client_open_context(NULL, stm32_bsec_optee_ta_match, NULL, NULL);
+ if (IS_ERR(tee_ctx)) {
+ rc = PTR_ERR(tee_ctx);
+ if (rc == -ENOENT)
+ return -EPROBE_DEFER;
+ pr_err("%s: tee_client_open_context failed (%d)\n", __func__, rc);
+
+ return rc;
+ }
+
+ /* Check STM32MP BSEC TA presence */
+ rc = stm32_bsec_ta_open_session(tee_ctx, &session_id);
+ if (rc) {
+ tee_client_close_context(tee_ctx);
+ return rc;
+ }
+
+ stm32_bsec_ta_close_session(tee_ctx, session_id);
+
+ *ctx = tee_ctx;
+
+ return 0;
+}
+
+/* stm32_bsec_optee_ta_open() - release the PTA STM32MP BSEC TA */
+void stm32_bsec_optee_ta_close(void *ctx)
+{
+ tee_client_close_context(ctx);
+}
+
+/* stm32_bsec_optee_ta_read() - nvmem read access using PTA client driver */
+int stm32_bsec_optee_ta_read(struct tee_context *ctx, unsigned int offset,
+ void *buf, size_t bytes)
+{
+ struct tee_shm *shm;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[2];
+ u8 *shm_buf;
+ u32 start, num_bytes;
+ int ret;
+ u32 session_id;
+
+ ret = stm32_bsec_ta_open_session(ctx, &session_id);
+ if (ret)
+ return ret;
+
+ memset(&arg, 0, sizeof(arg));
+ memset(&param, 0, sizeof(param));
+
+ arg.func = PTA_BSEC_READ_MEM;
+ arg.session = session_id;
+ arg.num_params = 2;
+
+ /* align access on 32bits */
+ start = ALIGN_DOWN(offset, 4);
+ num_bytes = round_up(offset + bytes - start, 4);
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = start;
+ param[0].u.value.b = SHADOW_ACCESS;
+
+ shm = tee_shm_alloc_kernel_buf(ctx, num_bytes);
+ if (IS_ERR(shm)) {
+ ret = PTR_ERR(shm);
+ goto out_tee_session;
+ }
+
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT;
+ param[1].u.memref.shm = shm;
+ param[1].u.memref.size = num_bytes;
+
+ ret = tee_client_invoke_func(ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n",
+ arg.ret, ret);
+ if (!ret)
+ ret = -EIO;
+ }
+ if (!ret) {
+ shm_buf = tee_shm_get_va(shm, 0);
+ if (IS_ERR(shm_buf)) {
+ ret = PTR_ERR(shm_buf);
+ pr_err("tee_shm_get_va failed for transmit (%d)\n", ret);
+ } else {
+ /* read data from 32 bits aligned buffer */
+ memcpy(buf, &shm_buf[offset % 4], bytes);
+ }
+ }
+
+ tee_shm_free(shm);
+
+out_tee_session:
+ stm32_bsec_ta_close_session(ctx, session_id);
+
+ return ret;
+}
+
+/* stm32_bsec_optee_ta_write() - nvmem write access using PTA client driver */
+int stm32_bsec_optee_ta_write(struct tee_context *ctx, unsigned int lower,
+ unsigned int offset, void *buf, size_t bytes)
+{ struct tee_shm *shm;
+ struct tee_ioctl_invoke_arg arg;
+ struct tee_param param[2];
+ u8 *shm_buf;
+ int ret;
+ u32 session_id;
+
+ ret = stm32_bsec_ta_open_session(ctx, &session_id);
+ if (ret)
+ return ret;
+
+ /* Allow only writing complete 32-bits aligned words */
+ if ((bytes % 4) || (offset % 4))
+ return -EINVAL;
+
+ memset(&arg, 0, sizeof(arg));
+ memset(&param, 0, sizeof(param));
+
+ arg.func = PTA_BSEC_WRITE_MEM;
+ arg.session = session_id;
+ arg.num_params = 2;
+
+ param[0].attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ param[0].u.value.a = offset;
+ param[0].u.value.b = FUSE_ACCESS;
+
+ shm = tee_shm_alloc_kernel_buf(ctx, bytes);
+ if (IS_ERR(shm)) {
+ ret = PTR_ERR(shm);
+ goto out_tee_session;
+ }
+
+ param[1].attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ param[1].u.memref.shm = shm;
+ param[1].u.memref.size = bytes;
+
+ shm_buf = tee_shm_get_va(shm, 0);
+ if (IS_ERR(shm_buf)) {
+ ret = PTR_ERR(shm_buf);
+ pr_err("tee_shm_get_va failed for transmit (%d)\n", ret);
+ tee_shm_free(shm);
+
+ goto out_tee_session;
+ }
+
+ memcpy(shm_buf, buf, bytes);
+
+ ret = tee_client_invoke_func(ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n", arg.ret, ret);
+ if (!ret)
+ ret = -EIO;
+ }
+ pr_debug("Write OTPs %d to %zu, ret=%d\n", offset / 4, (offset + bytes) / 4, ret);
+
+ /* Lock the upper OTPs with ECC protection, word programming only */
+ if (!ret && ((offset + bytes) >= (lower * 4))) {
+ u32 start, nb_lock;
+ u32 *lock = (u32 *)shm_buf;
+ int i;
+
+ /*
+ * don't lock the lower OTPs, no ECC protection and incremental
+ * bit programming, a second write is allowed
+ */
+ start = max_t(u32, offset, lower * 4);
+ nb_lock = (offset + bytes - start) / 4;
+
+ param[0].u.value.a = start;
+ param[0].u.value.b = LOCK_ACCESS;
+ param[1].u.memref.size = nb_lock * 4;
+
+ for (i = 0; i < nb_lock; i++)
+ lock[i] = LOCK_PERM;
+
+ ret = tee_client_invoke_func(ctx, &arg, param);
+ if (ret < 0 || arg.ret != 0) {
+ pr_err("TA_BSEC invoke failed TEE err:%#x, ret:%#x\n", arg.ret, ret);
+ if (!ret)
+ ret = -EIO;
+ }
+ pr_debug("Lock upper OTPs %d to %d, ret=%d\n",
+ start / 4, start / 4 + nb_lock, ret);
+ }
+
+ tee_shm_free(shm);
+
+out_tee_session:
+ stm32_bsec_ta_close_session(ctx, session_id);
+
+ return ret;
+}
diff --git a/drivers/nvmem/stm32-bsec-optee-ta.h b/drivers/nvmem/stm32-bsec-optee-ta.h
new file mode 100644
index 000000000000..3966a0535179
--- /dev/null
+++ b/drivers/nvmem/stm32-bsec-optee-ta.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * OP-TEE STM32MP BSEC PTA interface, used by STM32 ROMEM driver
+ *
+ * Copyright (C) 2022, STMicroelectronics - All Rights Reserved
+ */
+
+#if IS_ENABLED(CONFIG_NVMEM_STM32_BSEC_OPTEE_TA)
+/**
+ * stm32_bsec_optee_ta_open() - initialize the STM32 BSEC TA
+ * @ctx: the OP-TEE context on success
+ *
+ * Return:
+ * On success, 0. On failure, -errno.
+ */
+int stm32_bsec_optee_ta_open(struct tee_context **ctx);
+
+/**
+ * stm32_bsec_optee_ta_close() - release the STM32 BSEC TA
+ * @ctx: the OP-TEE context
+ *
+ * This function used to clean the OP-TEE resources initialized in
+ * stm32_bsec_optee_ta_open(); it can be used as callback to
+ * devm_add_action_or_reset()
+ */
+void stm32_bsec_optee_ta_close(void *ctx);
+
+/**
+ * stm32_bsec_optee_ta_read() - nvmem read access using TA client driver
+ * @ctx: the OP-TEE context provided by stm32_bsec_optee_ta_open
+ * @offset: nvmem offset
+ * @buf: buffer to fill with nvem values
+ * @bytes: number of bytes to read
+ *
+ * Return:
+ * On success, 0. On failure, -errno.
+ */
+int stm32_bsec_optee_ta_read(struct tee_context *ctx, unsigned int offset,
+ void *buf, size_t bytes);
+
+/**
+ * stm32_bsec_optee_ta_write() - nvmem write access using TA client driver
+ * @ctx: the OP-TEE context provided by stm32_bsec_optee_ta_open
+ * @lower: number of lower OTP, not protected by ECC
+ * @offset: nvmem offset
+ * @buf: buffer with nvem values
+ * @bytes: number of bytes to write
+ *
+ * Return:
+ * On success, 0. On failure, -errno.
+ */
+int stm32_bsec_optee_ta_write(struct tee_context *ctx, unsigned int lower,
+ unsigned int offset, void *buf, size_t bytes);
+
+#else
+
+static inline int stm32_bsec_optee_ta_open(struct tee_context **ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void stm32_bsec_optee_ta_close(void *ctx)
+{
+}
+
+static inline int stm32_bsec_optee_ta_read(struct tee_context *ctx,
+ unsigned int offset, void *buf,
+ size_t bytes)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int stm32_bsec_optee_ta_write(struct tee_context *ctx,
+ unsigned int lower,
+ unsigned int offset, void *buf,
+ size_t bytes)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_NVMEM_STM32_BSEC_OPTEE_TA */
diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c
index d1d03c2ad081..ba779e26937a 100644
--- a/drivers/nvmem/stm32-romem.c
+++ b/drivers/nvmem/stm32-romem.c
@@ -11,6 +11,9 @@
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of_device.h>
+#include <linux/tee_drv.h>
+
+#include "stm32-bsec-optee-ta.h"
/* BSEC secure service access from non-secure */
#define STM32_SMC_BSEC 0x82001003
@@ -25,12 +28,14 @@
struct stm32_romem_cfg {
int size;
u8 lower;
+ bool ta;
};
struct stm32_romem_priv {
void __iomem *base;
struct nvmem_config cfg;
u8 lower;
+ struct tee_context *ctx;
};
static int stm32_romem_read(void *context, unsigned int offset, void *buf,
@@ -138,12 +143,54 @@ static int stm32_bsec_write(void *context, unsigned int offset, void *buf,
return 0;
}
+static int stm32_bsec_pta_read(void *context, unsigned int offset, void *buf,
+ size_t bytes)
+{
+ struct stm32_romem_priv *priv = context;
+
+ return stm32_bsec_optee_ta_read(priv->ctx, offset, buf, bytes);
+}
+
+static int stm32_bsec_pta_write(void *context, unsigned int offset, void *buf,
+ size_t bytes)
+{
+ struct stm32_romem_priv *priv = context;
+
+ return stm32_bsec_optee_ta_write(priv->ctx, priv->lower, offset, buf, bytes);
+}
+
+static bool stm32_bsec_smc_check(void)
+{
+ u32 val;
+ int ret;
+
+ /* check that the OP-TEE support the BSEC SMC (legacy mode) */
+ ret = stm32_bsec_smc(STM32_SMC_READ_SHADOW, 0, 0, &val);
+
+ return !ret;
+}
+
+static bool optee_presence_check(void)
+{
+ struct device_node *np;
+ bool tee_detected = false;
+
+ /* check that the OP-TEE node is present and available. */
+ np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz");
+ if (np && of_device_is_available(np))
+ tee_detected = true;
+ of_node_put(np);
+
+ return tee_detected;
+}
+
static int stm32_romem_probe(struct platform_device *pdev)
{
const struct stm32_romem_cfg *cfg;
struct device *dev = &pdev->dev;
struct stm32_romem_priv *priv;
struct resource *res;
+ int rc;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -173,15 +220,36 @@ static int stm32_romem_probe(struct platform_device *pdev)
} else {
priv->cfg.size = cfg->size;
priv->lower = cfg->lower;
- priv->cfg.reg_read = stm32_bsec_read;
- priv->cfg.reg_write = stm32_bsec_write;
+ if (cfg->ta || optee_presence_check()) {
+ rc = stm32_bsec_optee_ta_open(&priv->ctx);
+ if (rc) {
+ /* wait for OP-TEE client driver to be up and ready */
+ if (rc == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ /* BSEC PTA is required or SMC not supported */
+ if (cfg->ta || !stm32_bsec_smc_check())
+ return rc;
+ }
+ }
+ if (priv->ctx) {
+ rc = devm_add_action_or_reset(dev, stm32_bsec_optee_ta_close, priv->ctx);
+ if (rc) {
+ dev_err(dev, "devm_add_action_or_reset() failed (%d)\n", rc);
+ return rc;
+ }
+ priv->cfg.reg_read = stm32_bsec_pta_read;
+ priv->cfg.reg_write = stm32_bsec_pta_write;
+ } else {
+ priv->cfg.reg_read = stm32_bsec_read;
+ priv->cfg.reg_write = stm32_bsec_write;
+ }
}
return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &priv->cfg));
}
/*
- * STM32MP15 BSEC OTP regions: 4096 OTP bits (with 3072 effective bits)
+ * STM32MP15/13 BSEC OTP regions: 4096 OTP bits (with 3072 effective bits)
* => 96 x 32-bits data words
* - Lower: 1K bits, 2:1 redundancy, incremental bit programming
* => 32 (x 32-bits) lower shadow registers = words 0 to 31
@@ -191,6 +259,13 @@ static int stm32_romem_probe(struct platform_device *pdev)
static const struct stm32_romem_cfg stm32mp15_bsec_cfg = {
.size = 384,
.lower = 32,
+ .ta = false,
+};
+
+static const struct stm32_romem_cfg stm32mp13_bsec_cfg = {
+ .size = 384,
+ .lower = 32,
+ .ta = true,
};
static const struct of_device_id stm32_romem_of_match[] = {
@@ -198,7 +273,10 @@ static const struct of_device_id stm32_romem_of_match[] = {
.compatible = "st,stm32mp15-bsec",
.data = (void *)&stm32mp15_bsec_cfg,
}, {
+ .compatible = "st,stm32mp13-bsec",
+ .data = (void *)&stm32mp13_bsec_cfg,
},
+ { /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, stm32_romem_of_match);
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index 5750e1f4bcdb..a970f1741cc6 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -41,8 +41,21 @@ static int sunxi_sid_read(void *context, unsigned int offset,
void *val, size_t bytes)
{
struct sunxi_sid *sid = context;
+ u32 word;
+
+ /* .stride = 4 so offset is guaranteed to be aligned */
+ __ioread32_copy(val, sid->base + sid->value_offset + offset, bytes / 4);
+
+ val += round_down(bytes, 4);
+ offset += round_down(bytes, 4);
+ bytes = bytes % 4;
+
+ if (!bytes)
+ return 0;
- memcpy_fromio(val, sid->base + sid->value_offset + offset, bytes);
+ /* Handle any trailing bytes */
+ word = readl_relaxed(sid->base + sid->value_offset + offset);
+ memcpy(val, &word, bytes);
return 0;
}
@@ -184,15 +197,9 @@ static const struct sunxi_sid_cfg sun8i_h3_cfg = {
.need_register_readout = true,
};
-static const struct sunxi_sid_cfg sun20i_d1_cfg = {
- .value_offset = 0x200,
- .size = 0x100,
-};
-
static const struct sunxi_sid_cfg sun50i_a64_cfg = {
.value_offset = 0x200,
.size = 0x100,
- .need_register_readout = true,
};
static const struct sunxi_sid_cfg sun50i_h6_cfg = {
@@ -205,7 +212,7 @@ static const struct of_device_id sunxi_sid_of_match[] = {
{ .compatible = "allwinner,sun7i-a20-sid", .data = &sun7i_a20_cfg },
{ .compatible = "allwinner,sun8i-a83t-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun8i-h3-sid", .data = &sun8i_h3_cfg },
- { .compatible = "allwinner,sun20i-d1-sid", .data = &sun20i_d1_cfg },
+ { .compatible = "allwinner,sun20i-d1-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-a64-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-h5-sid", .data = &sun50i_a64_cfg },
{ .compatible = "allwinner,sun50i-h6-sid", .data = &sun50i_h6_cfg },
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 80b5fd44ab1c..644386833a7b 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -23,7 +23,19 @@ config OF_UNITTEST
that are executed once at boot time, and the results dumped to the
console.
- If unsure, say N here, but this option is safe to enable.
+ This option should only be enabled for a development kernel. The tests
+ will taint the kernel with TAINT_TEST. The tests will cause ERROR and
+ WARNING messages to print on the console. The tests will cause stack
+ traces to print on the console. It is possible that the tests will
+ leave the devicetree in a corrupted state.
+
+ The unittest output will be verbose. Copy the output to a file
+ via capturing the console output or via the dmesg command. Process
+ this file with scripts/dtc/of_unittest_expect to reduce the
+ verbosity, test whether expected output is present, and to
+ summarize the results.
+
+ If unsure, say N here. This option is not safe to enable.
config OF_ALL_DTBS
bool "Build all Device Tree Blobs"
diff --git a/drivers/of/address.c b/drivers/of/address.c
index c34ac33b7338..4c0b169ef9bf 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -626,6 +626,47 @@ u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
}
EXPORT_SYMBOL(of_translate_dma_address);
+/**
+ * of_translate_dma_region - Translate device tree address and size tuple
+ * @dev: device tree node for which to translate
+ * @prop: pointer into array of cells
+ * @start: return value for the start of the DMA range
+ * @length: return value for the length of the DMA range
+ *
+ * Returns a pointer to the cell immediately following the translated DMA region.
+ */
+const __be32 *of_translate_dma_region(struct device_node *dev, const __be32 *prop,
+ phys_addr_t *start, size_t *length)
+{
+ struct device_node *parent;
+ u64 address, size;
+ int na, ns;
+
+ parent = __of_get_dma_parent(dev);
+ if (!parent)
+ return NULL;
+
+ na = of_bus_n_addr_cells(parent);
+ ns = of_bus_n_size_cells(parent);
+
+ of_node_put(parent);
+
+ address = of_translate_dma_address(dev, prop);
+ if (address == OF_BAD_ADDR)
+ return NULL;
+
+ size = of_read_number(prop + na, ns);
+
+ if (start)
+ *start = address;
+
+ if (length)
+ *length = size;
+
+ return prop + na + ns;
+}
+EXPORT_SYMBOL(of_translate_dma_region);
+
const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
u64 *size, unsigned int *flags)
{
@@ -965,8 +1006,19 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
}
of_dma_range_parser_init(&parser, node);
- for_each_of_range(&parser, &range)
+ for_each_of_range(&parser, &range) {
+ if (range.cpu_addr == OF_BAD_ADDR) {
+ pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
+ range.bus_addr, node);
+ continue;
+ }
num_ranges++;
+ }
+
+ if (!num_ranges) {
+ ret = -EINVAL;
+ goto out;
+ }
r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL);
if (!r) {
@@ -975,18 +1027,16 @@ int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
}
/*
- * Record all info in the generic DMA ranges array for struct device.
+ * Record all info in the generic DMA ranges array for struct device,
+ * returning an error if we don't find any parsable ranges.
*/
*map = r;
of_dma_range_parser_init(&parser, node);
for_each_of_range(&parser, &range) {
pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
range.bus_addr, range.cpu_addr, range.size);
- if (range.cpu_addr == OF_BAD_ADDR) {
- pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
- range.bus_addr, node);
+ if (range.cpu_addr == OF_BAD_ADDR)
continue;
- }
r->cpu_start = range.cpu_addr;
r->dma_start = range.bus_addr;
r->size = range.size;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index d5a5c35eba72..ac6fde53342f 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1884,8 +1884,7 @@ static void of_alias_add(struct alias_prop *ap, struct device_node *np,
{
ap->np = np;
ap->id = id;
- strncpy(ap->stem, stem, stem_len);
- ap->stem[stem_len] = 0;
+ strscpy(ap->stem, stem, stem_len + 1);
list_add_tail(&ap->link, &aliases_lookup);
pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
ap->alias, ap->stem, ap->id, np);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index c674a13c3055..955bfb3d1a83 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -248,7 +248,7 @@ const void *of_device_get_match_data(const struct device *dev)
}
EXPORT_SYMBOL(of_device_get_match_data);
-static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len)
+static ssize_t of_device_get_modalias(const struct device *dev, char *str, ssize_t len)
{
const char *compat;
char *c;
@@ -256,7 +256,7 @@ static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len
ssize_t csize;
ssize_t tsize;
- if ((!dev) || (!dev->of_node))
+ if ((!dev) || (!dev->of_node) || dev->of_node_reused)
return -ENODEV;
/* Name & Type */
@@ -372,11 +372,11 @@ void of_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
mutex_unlock(&of_mutex);
}
-int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
+int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env *env)
{
int sl;
- if ((!dev) || (!dev->of_node))
+ if ((!dev) || (!dev->of_node) || dev->of_node_reused)
return -ENODEV;
/* Devicetree modalias is tricky, we add it in 2 steps */
@@ -385,6 +385,8 @@ int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
sl = of_device_get_modalias(dev, &env->buf[env->buflen-1],
sizeof(env->buf) - env->buflen);
+ if (sl < 0)
+ return sl;
if (sl >= (sizeof(env->buf) - env->buflen))
return -ENOMEM;
env->buflen += sl;
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index cd3821a6444f..07d93753b12f 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -329,10 +329,30 @@ void of_node_release(struct kobject *kobj)
{
struct device_node *node = kobj_to_device_node(kobj);
+ /*
+ * can not use '"%pOF", node' in pr_err() calls from this function
+ * because an of_node_get(node) when refcount is already zero
+ * will result in an error and a stack dump
+ */
+
/* We should never be releasing nodes that haven't been detached. */
if (!of_node_check_flag(node, OF_DETACHED)) {
- pr_err("ERROR: Bad of_node_put() on %pOF\n", node);
- dump_stack();
+
+ pr_err("ERROR: %s() detected bad of_node_put() on %pOF/%s\n",
+ __func__, node->parent, node->full_name);
+
+ /*
+ * of unittests will test this path. Do not print the stack
+ * trace when the error is caused by unittest so that we do
+ * not display what a normal developer might reasonably
+ * consider a real bug.
+ */
+ if (!IS_ENABLED(CONFIG_OF_UNITTEST) ||
+ strcmp(node->parent->full_name, "testcase-data")) {
+ dump_stack();
+ pr_err("ERROR: next of_node_put() on this node will result in a kobject warning 'refcount_t: underflow; use-after-free.'\n");
+ }
+
return;
}
if (!of_node_check_flag(node, OF_DYNAMIC))
@@ -357,6 +377,10 @@ void of_node_release(struct kobject *kobj)
__func__, node);
}
+ if (node->child)
+ pr_err("ERROR: %s() unexpected children for %pOF/%s\n",
+ __func__, node->parent, node->full_name);
+
property_list_free(node->properties);
property_list_free(node->deadprops);
fwnode_links_purge(of_fwnode_handle(node));
@@ -419,7 +443,8 @@ struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags)
* another node. The node data are dynamically allocated and all the node
* flags have the OF_DYNAMIC & OF_DETACHED bits set.
*
- * Return: The newly allocated node or NULL on out of memory error.
+ * Return: The newly allocated node or NULL on out of memory error. Use
+ * of_node_put() on it when done to free the memory allocated for it.
*/
struct device_node *__of_node_dup(const struct device_node *np,
const char *full_name)
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index f08b25195ae7..d1a68b6d03b3 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -26,7 +26,6 @@
#include <linux/serial_core.h>
#include <linux/sysfs.h>
#include <linux/random.h>
-#include <linux/kmemleak.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
@@ -525,12 +524,9 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (size &&
- early_init_dt_reserve_memory(base, size, nomap) == 0) {
+ early_init_dt_reserve_memory(base, size, nomap) == 0)
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
- if (!nomap)
- kmemleak_alloc_phys(base, size, 0);
- }
else
pr_err("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index e9bf5236ed89..174900072c18 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -438,10 +438,16 @@ int of_irq_get(struct device_node *dev, int index)
return rc;
domain = irq_find_host(oirq.np);
- if (!domain)
- return -EPROBE_DEFER;
+ if (!domain) {
+ rc = -EPROBE_DEFER;
+ goto out;
+ }
- return irq_create_of_mapping(&oirq);
+ rc = irq_create_of_mapping(&oirq);
+out:
+ of_node_put(oirq.np);
+
+ return rc;
}
EXPORT_SYMBOL_GPL(of_irq_get);
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
index 7d3853a5a09a..3dbce1e6f184 100644
--- a/drivers/of/kobj.c
+++ b/drivers/of/kobj.c
@@ -24,7 +24,7 @@ static void of_node_release(struct kobject *kobj)
}
#endif /* CONFIG_OF_DYNAMIC */
-struct kobj_type of_node_ktype = {
+const struct kobj_type of_node_ktype = {
.release = of_node_release,
};
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 65f3b02a0e4e..948efa9f99e3 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -48,9 +48,10 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
err = memblock_mark_nomap(base, size);
if (err)
memblock_phys_free(base, size);
- kmemleak_ignore_phys(base);
}
+ kmemleak_ignore_phys(base);
+
return err;
}
@@ -284,6 +285,16 @@ void __init fdt_init_reserved_mem(void)
else
memblock_phys_free(rmem->base,
rmem->size);
+ } else {
+ phys_addr_t end = rmem->base + rmem->size - 1;
+ bool reusable =
+ (of_get_flat_dt_prop(node, "reusable", NULL)) != NULL;
+
+ pr_info("%pa..%pa (%lu KiB) %s %s %s\n",
+ &rmem->base, &end, (unsigned long)(rmem->size / SZ_1K),
+ nomap ? "nomap" : "map",
+ reusable ? "reusable" : "non-reusable",
+ rmem->name ? rmem->name : "unknown");
}
}
}
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index ed4e6c144a68..2e01960f1aeb 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -1121,7 +1121,7 @@ static int node_overlaps_later_cs(struct overlay_changeset *remove_ovcs,
* The topmost check is done by exploiting this property. For each
* affected device node in the log list we check if this overlay is
* the one closest to the tail. If another overlay has affected this
- * device node and is closest to the tail, then removal is not permited.
+ * device node and is closest to the tail, then removal is not permitted.
*/
static int overlay_removal_is_ok(struct overlay_changeset *remove_ovcs)
{
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 81c8c227ab6b..b2bd2e783445 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -222,7 +222,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
struct device *parent)
{
struct amba_device *dev;
- const void *prop;
int ret;
pr_debug("Creating amba device %pOF\n", node);
@@ -250,9 +249,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
of_device_make_bus_id(&dev->dev);
/* Allow the HW Peripheral ID to be overridden */
- prop = of_get_property(node, "arm,primecell-periphid", NULL);
- if (prop)
- dev->periphid = of_read_ulong(prop, 1);
+ of_property_read_u32(node, "arm,primecell-periphid", &dev->periphid);
ret = of_address_to_resource(node, 0, &dev->res);
if (ret) {
@@ -525,10 +522,11 @@ static int __init of_platform_default_populate_init(void)
if (IS_ENABLED(CONFIG_PPC)) {
struct device_node *boot_display = NULL;
struct platform_device *dev;
+ int display_number = 0;
int ret;
/* Check if we have a MacOS display without a node spec */
- if (of_get_property(of_chosen, "linux,bootx-noscreen", NULL)) {
+ if (of_property_present(of_chosen, "linux,bootx-noscreen")) {
/*
* The old code tried to work out which node was the MacOS
* display based on the address. I'm dropping that since the
@@ -555,16 +553,23 @@ static int __init of_platform_default_populate_init(void)
if (!of_get_property(node, "linux,opened", NULL) ||
!of_get_property(node, "linux,boot-display", NULL))
continue;
- dev = of_platform_device_create(node, "of-display", NULL);
+ dev = of_platform_device_create(node, "of-display.0", NULL);
+ of_node_put(node);
if (WARN_ON(!dev))
return -ENOMEM;
boot_display = node;
+ display_number++;
break;
}
for_each_node_by_type(node, "display") {
+ char buf[14];
+ const char *of_display_format = "of-display.%d";
+
if (!of_get_property(node, "linux,opened", NULL) || node == boot_display)
continue;
- of_platform_device_create(node, "of-display", NULL);
+ ret = snprintf(buf, sizeof(buf), of_display_format, display_number++);
+ if (ret < sizeof(buf))
+ of_platform_device_create(node, buf, NULL);
}
} else {
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 134cfc980b70..ddc75cd50825 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1062,20 +1062,6 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
return of_device_get_match_data(dev);
}
-static bool of_is_ancestor_of(struct device_node *test_ancestor,
- struct device_node *child)
-{
- of_node_get(child);
- while (child) {
- if (child == test_ancestor) {
- of_node_put(child);
- return true;
- }
- child = of_get_next_parent(child);
- }
- return false;
-}
-
static struct device_node *of_get_compat_node(struct device_node *np)
{
of_node_get(np);
@@ -1086,7 +1072,7 @@ static struct device_node *of_get_compat_node(struct device_node *np)
np = NULL;
}
- if (of_find_property(np, "compatible", NULL))
+ if (of_property_present(np, "compatible"))
break;
np = of_get_next_parent(np);
@@ -1106,71 +1092,27 @@ static struct device_node *of_get_compat_node_parent(struct device_node *np)
return node;
}
-/**
- * of_link_to_phandle - Add fwnode link to supplier from supplier phandle
- * @con_np: consumer device tree node
- * @sup_np: supplier device tree node
- *
- * Given a phandle to a supplier device tree node (@sup_np), this function
- * finds the device that owns the supplier device tree node and creates a
- * device link from @dev consumer device to the supplier device. This function
- * doesn't create device links for invalid scenarios such as trying to create a
- * link with a parent device as the consumer of its child device. In such
- * cases, it returns an error.
- *
- * Returns:
- * - 0 if fwnode link successfully created to supplier
- * - -EINVAL if the supplier link is invalid and should not be created
- * - -ENODEV if struct device will never be create for supplier
- */
-static int of_link_to_phandle(struct device_node *con_np,
+static void of_link_to_phandle(struct device_node *con_np,
struct device_node *sup_np)
{
- struct device *sup_dev;
- struct device_node *tmp_np = sup_np;
+ struct device_node *tmp_np = of_node_get(sup_np);
- /*
- * Find the device node that contains the supplier phandle. It may be
- * @sup_np or it may be an ancestor of @sup_np.
- */
- sup_np = of_get_compat_node(sup_np);
- if (!sup_np) {
- pr_debug("Not linking %pOFP to %pOFP - No device\n",
- con_np, tmp_np);
- return -ENODEV;
- }
+ /* Check that sup_np and its ancestors are available. */
+ while (tmp_np) {
+ if (of_fwnode_handle(tmp_np)->dev) {
+ of_node_put(tmp_np);
+ break;
+ }
- /*
- * Don't allow linking a device node as a consumer of one of its
- * descendant nodes. By definition, a child node can't be a functional
- * dependency for the parent node.
- */
- if (of_is_ancestor_of(con_np, sup_np)) {
- pr_debug("Not linking %pOFP to %pOFP - is descendant\n",
- con_np, sup_np);
- of_node_put(sup_np);
- return -EINVAL;
- }
+ if (!of_device_is_available(tmp_np)) {
+ of_node_put(tmp_np);
+ return;
+ }
- /*
- * Don't create links to "early devices" that won't have struct devices
- * created for them.
- */
- sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
- if (!sup_dev &&
- (of_node_check_flag(sup_np, OF_POPULATED) ||
- sup_np->fwnode.flags & FWNODE_FLAG_NOT_DEVICE)) {
- pr_debug("Not linking %pOFP to %pOFP - No struct device\n",
- con_np, sup_np);
- of_node_put(sup_np);
- return -ENODEV;
+ tmp_np = of_get_next_parent(tmp_np);
}
- put_device(sup_dev);
fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np));
- of_node_put(sup_np);
-
- return 0;
}
/**
@@ -1202,8 +1144,8 @@ static struct device_node *parse_prop_cells(struct device_node *np,
if (strcmp(prop_name, list_name))
return NULL;
- if (of_parse_phandle_with_args(np, list_name, cells_name, index,
- &sup_args))
+ if (__of_parse_phandle_with_args(np, list_name, cells_name, 0, index,
+ &sup_args))
return NULL;
return sup_args.np;
@@ -1307,7 +1249,7 @@ DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
DEFINE_SIMPLE_PROP(extcon, "extcon", NULL)
-DEFINE_SIMPLE_PROP(nvmem_cells, "nvmem-cells", NULL)
+DEFINE_SIMPLE_PROP(nvmem_cells, "nvmem-cells", "#nvmem-cell-cells")
DEFINE_SIMPLE_PROP(phys, "phys", "#phy-cells")
DEFINE_SIMPLE_PROP(wakeup_parent, "wakeup-parent", NULL)
DEFINE_SIMPLE_PROP(pinctrl0, "pinctrl-0", NULL)
@@ -1358,7 +1300,7 @@ static struct device_node *parse_gpio_compat(struct device_node *np,
* Ignore node with gpio-hog property since its gpios are all provided
* by its parent.
*/
- if (of_find_property(np, "gpio-hog", NULL))
+ if (of_property_read_bool(np, "gpio-hog"))
return NULL;
if (of_parse_phandle_with_args(np, prop_name, "#gpio-cells", index,
diff --git a/drivers/of/unittest-data/testcases_common.dtsi b/drivers/of/unittest-data/testcases_common.dtsi
index 19292bbb4cbb..e7887f2301c1 100644
--- a/drivers/of/unittest-data/testcases_common.dtsi
+++ b/drivers/of/unittest-data/testcases_common.dtsi
@@ -17,3 +17,4 @@
#include "tests-address.dtsi"
#include "tests-platform.dtsi"
#include "tests-overlay.dtsi"
+#include "tests-lifecycle.dtsi"
diff --git a/drivers/of/unittest-data/tests-lifecycle.dtsi b/drivers/of/unittest-data/tests-lifecycle.dtsi
new file mode 100644
index 000000000000..28509a8783a7
--- /dev/null
+++ b/drivers/of/unittest-data/tests-lifecycle.dtsi
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/ {
+ testcase-data {
+ refcount-node {
+ };
+ };
+};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index bc0f1e50a4be..b5a7a31d8bd2 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -54,8 +54,9 @@ static struct unittest_results {
* Print the expected message only if the current loglevel will allow
* the actual message to print.
*
- * Do not use EXPECT_BEGIN() or EXPECT_END() for messages generated by
- * pr_debug().
+ * Do not use EXPECT_BEGIN(), EXPECT_END(), EXPECT_NOT_BEGIN(), or
+ * EXPECT_NOT_END() to report messages expected to be reported or not
+ * reported by pr_debug().
*/
#define EXPECT_BEGIN(level, fmt, ...) \
printk(level pr_fmt("EXPECT \\ : ") fmt, ##__VA_ARGS__)
@@ -63,6 +64,12 @@ static struct unittest_results {
#define EXPECT_END(level, fmt, ...) \
printk(level pr_fmt("EXPECT / : ") fmt, ##__VA_ARGS__)
+#define EXPECT_NOT_BEGIN(level, fmt, ...) \
+ printk(level pr_fmt("EXPECT_NOT \\ : ") fmt, ##__VA_ARGS__)
+
+#define EXPECT_NOT_END(level, fmt, ...) \
+ printk(level pr_fmt("EXPECT_NOT / : ") fmt, ##__VA_ARGS__)
+
static void __init of_unittest_find_node_by_name(void)
{
struct device_node *np;
@@ -1488,6 +1495,7 @@ static int __init unittest_data_add(void)
struct device_node *next = np->sibling;
np->parent = of_root;
+ /* this will clear OF_DETACHED in np and children */
attach_node_and_children(np);
np = next;
}
@@ -2998,6 +3006,143 @@ out:
static inline void __init of_unittest_overlay(void) { }
#endif
+static void __init of_unittest_lifecycle(void)
+{
+#ifdef CONFIG_OF_DYNAMIC
+ unsigned int refcount;
+ int found_refcount_one = 0;
+ int put_count = 0;
+ struct device_node *np;
+ struct device_node *prev_sibling, *next_sibling;
+ const char *refcount_path = "/testcase-data/refcount-node";
+ const char *refcount_parent_path = "/testcase-data";
+
+ /*
+ * Node lifecycle tests, non-dynamic node:
+ *
+ * - Decrementing refcount to zero via of_node_put() should cause the
+ * attempt to free the node memory by of_node_release() to fail
+ * because the node is not a dynamic node.
+ *
+ * - Decrementing refcount past zero should result in additional
+ * errors reported.
+ */
+
+ np = of_find_node_by_path(refcount_path);
+ unittest(np, "find refcount_path \"%s\"\n", refcount_path);
+ if (np == NULL)
+ goto out_skip_tests;
+
+ while (!found_refcount_one) {
+
+ if (put_count++ > 10) {
+ unittest(0, "guardrail to avoid infinite loop\n");
+ goto out_skip_tests;
+ }
+
+ refcount = kref_read(&np->kobj.kref);
+ if (refcount == 1)
+ found_refcount_one = 1;
+ else
+ of_node_put(np);
+ }
+
+ EXPECT_BEGIN(KERN_INFO, "OF: ERROR: of_node_release() detected bad of_node_put() on /testcase-data/refcount-node");
+
+ /*
+ * refcount is now one, decrementing to zero will result in a call to
+ * of_node_release() to free the node's memory, which should result
+ * in an error
+ */
+ unittest(1, "/testcase-data/refcount-node is one");
+ of_node_put(np);
+
+ EXPECT_END(KERN_INFO, "OF: ERROR: of_node_release() detected bad of_node_put() on /testcase-data/refcount-node");
+
+
+ /*
+ * expect stack trace for subsequent of_node_put():
+ * __refcount_sub_and_test() calls:
+ * refcount_warn_saturate(r, REFCOUNT_SUB_UAF)
+ *
+ * Not capturing entire WARN_ONCE() trace with EXPECT_*(), just
+ * the first three lines, and the last line.
+ */
+ EXPECT_BEGIN(KERN_INFO, "------------[ cut here ]------------");
+ EXPECT_BEGIN(KERN_INFO, "WARNING: <<all>>");
+ EXPECT_BEGIN(KERN_INFO, "refcount_t: underflow; use-after-free.");
+ EXPECT_BEGIN(KERN_INFO, "---[ end trace <<int>> ]---");
+
+ /* refcount is now zero, this should fail */
+ unittest(1, "/testcase-data/refcount-node is zero");
+ of_node_put(np);
+
+ EXPECT_END(KERN_INFO, "---[ end trace <<int>> ]---");
+ EXPECT_END(KERN_INFO, "refcount_t: underflow; use-after-free.");
+ EXPECT_END(KERN_INFO, "WARNING: <<all>>");
+ EXPECT_END(KERN_INFO, "------------[ cut here ]------------");
+
+ /*
+ * Q. do we expect to get yet another warning?
+ * A. no, the WARNING is from WARN_ONCE()
+ */
+ EXPECT_NOT_BEGIN(KERN_INFO, "------------[ cut here ]------------");
+ EXPECT_NOT_BEGIN(KERN_INFO, "WARNING: <<all>>");
+ EXPECT_NOT_BEGIN(KERN_INFO, "refcount_t: underflow; use-after-free.");
+ EXPECT_NOT_BEGIN(KERN_INFO, "---[ end trace <<int>> ]---");
+
+ unittest(1, "/testcase-data/refcount-node is zero, second time");
+ of_node_put(np);
+
+ EXPECT_NOT_END(KERN_INFO, "---[ end trace <<int>> ]---");
+ EXPECT_NOT_END(KERN_INFO, "refcount_t: underflow; use-after-free.");
+ EXPECT_NOT_END(KERN_INFO, "WARNING: <<all>>");
+ EXPECT_NOT_END(KERN_INFO, "------------[ cut here ]------------");
+
+ /*
+ * refcount of zero will trigger stack traces from any further
+ * attempt to of_node_get() node "refcount-node". One example of
+ * this is where of_unittest_check_node_linkage() will recursively
+ * scan the tree, with 'for_each_child_of_node()' doing an
+ * of_node_get() of the children of a node.
+ *
+ * Prevent the stack trace by removing node "refcount-node" from
+ * its parent's child list.
+ *
+ * WARNING: EVIL, EVIL, EVIL:
+ *
+ * Directly manipulate the child list of node /testcase-data to
+ * remove child refcount-node. This is ignoring all proper methods
+ * of removing a child and will leak a small amount of memory.
+ */
+
+ np = of_find_node_by_path(refcount_parent_path);
+ unittest(np, "find refcount_parent_path \"%s\"\n", refcount_parent_path);
+ unittest(np, "ERROR: devicetree live tree left in a 'bad state' if test fail\n");
+ if (np == NULL)
+ return;
+
+ prev_sibling = np->child;
+ next_sibling = prev_sibling->sibling;
+ if (!strcmp(prev_sibling->full_name, "refcount-node")) {
+ np->child = next_sibling;
+ next_sibling = next_sibling->sibling;
+ }
+ while (next_sibling) {
+ if (!strcmp(next_sibling->full_name, "refcount-node"))
+ prev_sibling->sibling = next_sibling->sibling;
+ prev_sibling = next_sibling;
+ next_sibling = next_sibling->sibling;
+ }
+ of_node_put(np);
+
+ return;
+
+out_skip_tests:
+#endif
+ unittest(0, "One or more lifecycle tests skipped\n");
+}
+
#ifdef CONFIG_OF_OVERLAY
/*
@@ -3502,6 +3647,7 @@ static int __init of_unittest(void)
of_unittest_match_node();
of_unittest_platform_populate();
of_unittest_overlay();
+ of_unittest_lifecycle();
/* Double check linkage after removing testcase data */
of_unittest_check_tree_linkage();
diff --git a/drivers/opp/Kconfig b/drivers/opp/Kconfig
index e8ce47b32735..d7c649a1a981 100644
--- a/drivers/opp/Kconfig
+++ b/drivers/opp/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config PM_OPP
bool
- select SRCU
help
SOCs have a standard set of tuples consisting of frequency and
voltage pairs that the device will support per voltage domain. This
diff --git a/drivers/opp/debugfs.c b/drivers/opp/debugfs.c
index 96a30a032c5f..2c7fb683441e 100644
--- a/drivers/opp/debugfs.c
+++ b/drivers/opp/debugfs.c
@@ -235,7 +235,7 @@ static void opp_migrate_dentry(struct opp_device *opp_dev,
dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
opp_table->dentry_name);
- if (!dentry) {
+ if (IS_ERR(dentry)) {
dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
__func__, dev_name(opp_dev->dev), dev_name(dev));
return;
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index d6af5726ddf3..2a18f7ba2398 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -274,8 +274,7 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun
/* We'll use a local copy of buf */
count = min_t(size_t, count, sizeof(in)-1);
- strncpy(in, buf, count);
- in[count] = '\0';
+ strscpy(in, buf, count + 1);
/* Let's clean up the target. 0xff is a blank pattern */
memset(&hwpath, 0xff, sizeof(hwpath));
@@ -388,8 +387,7 @@ pdcspath_layer_write(struct pdcspath_entry *entry, const char *buf, size_t count
/* We'll use a local copy of buf */
count = min_t(size_t, count, sizeof(in)-1);
- strncpy(in, buf, count);
- in[count] = '\0';
+ strscpy(in, buf, count + 1);
/* Let's clean up the target. 0 is a blank pattern */
memset(&layers, 0, sizeof(layers));
@@ -756,8 +754,7 @@ static ssize_t pdcs_auto_write(struct kobject *kobj,
/* We'll use a local copy of buf */
count = min_t(size_t, count, sizeof(in)-1);
- strncpy(in, buf, count);
- in[count] = '\0';
+ strscpy(in, buf, count + 1);
/* Current flags are stored in primary boot path entry */
pathentry = &pdcspath_entry_primary;
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index 68a4fe4cd60b..5561362224e2 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -140,17 +140,6 @@ config PARPORT_SUNBPP
found on many Sun machines. Note that many of the newer Ultras
actually have pc style hardware instead.
-config PARPORT_AX88796
- tristate "AX88796 Parallel Port"
- select PARPORT_NOT_PC
- help
- Say Y here if you need support for the parallel port hardware on
- the AX88796 network controller chip. This code is also available
- as a module (say M), called parport_ax88796.
-
- The driver is not dependent on the AX88796 network driver, and
- should not interfere with the networking functions of the chip.
-
config PARPORT_1284
bool "IEEE 1284 transfer modes"
help
diff --git a/drivers/parport/Makefile b/drivers/parport/Makefile
index 022c566c0f32..d4a6b890852d 100644
--- a/drivers/parport/Makefile
+++ b/drivers/parport/Makefile
@@ -18,5 +18,4 @@ obj-$(CONFIG_PARPORT_MFC3) += parport_mfc3.o
obj-$(CONFIG_PARPORT_ATARI) += parport_atari.o
obj-$(CONFIG_PARPORT_SUNBPP) += parport_sunbpp.o
obj-$(CONFIG_PARPORT_GSC) += parport_gsc.o
-obj-$(CONFIG_PARPORT_AX88796) += parport_ax88796.o
obj-$(CONFIG_PARPORT_IP32) += parport_ip32.o
diff --git a/drivers/parport/parport_ax88796.c b/drivers/parport/parport_ax88796.c
deleted file mode 100644
index 54b539809673..000000000000
--- a/drivers/parport/parport_ax88796.c
+++ /dev/null
@@ -1,418 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* linux/drivers/parport/parport_ax88796.c
- *
- * (c) 2005,2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/parport.h>
-#include <linux/interrupt.h>
-#include <linux/errno.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-
-#define AX_SPR_BUSY (1<<7)
-#define AX_SPR_ACK (1<<6)
-#define AX_SPR_PE (1<<5)
-#define AX_SPR_SLCT (1<<4)
-#define AX_SPR_ERR (1<<3)
-
-#define AX_CPR_nDOE (1<<5)
-#define AX_CPR_SLCTIN (1<<3)
-#define AX_CPR_nINIT (1<<2)
-#define AX_CPR_ATFD (1<<1)
-#define AX_CPR_STRB (1<<0)
-
-struct ax_drvdata {
- struct parport *parport;
- struct parport_state suspend;
-
- struct device *dev;
- struct resource *io;
-
- unsigned char irq_enabled;
-
- void __iomem *base;
- void __iomem *spp_data;
- void __iomem *spp_spr;
- void __iomem *spp_cpr;
-};
-
-static inline struct ax_drvdata *pp_to_drv(struct parport *p)
-{
- return p->private_data;
-}
-
-static unsigned char
-parport_ax88796_read_data(struct parport *p)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
-
- return readb(dd->spp_data);
-}
-
-static void
-parport_ax88796_write_data(struct parport *p, unsigned char data)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
-
- writeb(data, dd->spp_data);
-}
-
-static unsigned char
-parport_ax88796_read_control(struct parport *p)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
- unsigned int cpr = readb(dd->spp_cpr);
- unsigned int ret = 0;
-
- if (!(cpr & AX_CPR_STRB))
- ret |= PARPORT_CONTROL_STROBE;
-
- if (!(cpr & AX_CPR_ATFD))
- ret |= PARPORT_CONTROL_AUTOFD;
-
- if (cpr & AX_CPR_nINIT)
- ret |= PARPORT_CONTROL_INIT;
-
- if (!(cpr & AX_CPR_SLCTIN))
- ret |= PARPORT_CONTROL_SELECT;
-
- return ret;
-}
-
-static void
-parport_ax88796_write_control(struct parport *p, unsigned char control)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
- unsigned int cpr = readb(dd->spp_cpr);
-
- cpr &= AX_CPR_nDOE;
-
- if (!(control & PARPORT_CONTROL_STROBE))
- cpr |= AX_CPR_STRB;
-
- if (!(control & PARPORT_CONTROL_AUTOFD))
- cpr |= AX_CPR_ATFD;
-
- if (control & PARPORT_CONTROL_INIT)
- cpr |= AX_CPR_nINIT;
-
- if (!(control & PARPORT_CONTROL_SELECT))
- cpr |= AX_CPR_SLCTIN;
-
- dev_dbg(dd->dev, "write_control: ctrl=%02x, cpr=%02x\n", control, cpr);
- writeb(cpr, dd->spp_cpr);
-
- if (parport_ax88796_read_control(p) != control) {
- dev_err(dd->dev, "write_control: read != set (%02x, %02x)\n",
- parport_ax88796_read_control(p), control);
- }
-}
-
-static unsigned char
-parport_ax88796_read_status(struct parport *p)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
- unsigned int status = readb(dd->spp_spr);
- unsigned int ret = 0;
-
- if (status & AX_SPR_BUSY)
- ret |= PARPORT_STATUS_BUSY;
-
- if (status & AX_SPR_ACK)
- ret |= PARPORT_STATUS_ACK;
-
- if (status & AX_SPR_ERR)
- ret |= PARPORT_STATUS_ERROR;
-
- if (status & AX_SPR_SLCT)
- ret |= PARPORT_STATUS_SELECT;
-
- if (status & AX_SPR_PE)
- ret |= PARPORT_STATUS_PAPEROUT;
-
- return ret;
-}
-
-static unsigned char
-parport_ax88796_frob_control(struct parport *p, unsigned char mask,
- unsigned char val)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
- unsigned char old = parport_ax88796_read_control(p);
-
- dev_dbg(dd->dev, "frob: mask=%02x, val=%02x, old=%02x\n",
- mask, val, old);
-
- parport_ax88796_write_control(p, (old & ~mask) | val);
- return old;
-}
-
-static void
-parport_ax88796_enable_irq(struct parport *p)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
- unsigned long flags;
-
- local_irq_save(flags);
- if (!dd->irq_enabled) {
- enable_irq(p->irq);
- dd->irq_enabled = 1;
- }
- local_irq_restore(flags);
-}
-
-static void
-parport_ax88796_disable_irq(struct parport *p)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
- unsigned long flags;
-
- local_irq_save(flags);
- if (dd->irq_enabled) {
- disable_irq(p->irq);
- dd->irq_enabled = 0;
- }
- local_irq_restore(flags);
-}
-
-static void
-parport_ax88796_data_forward(struct parport *p)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
- void __iomem *cpr = dd->spp_cpr;
-
- writeb((readb(cpr) & ~AX_CPR_nDOE), cpr);
-}
-
-static void
-parport_ax88796_data_reverse(struct parport *p)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
- void __iomem *cpr = dd->spp_cpr;
-
- writeb(readb(cpr) | AX_CPR_nDOE, cpr);
-}
-
-static void
-parport_ax88796_init_state(struct pardevice *d, struct parport_state *s)
-{
- struct ax_drvdata *dd = pp_to_drv(d->port);
-
- memset(s, 0, sizeof(struct parport_state));
-
- dev_dbg(dd->dev, "init_state: %p: state=%p\n", d, s);
- s->u.ax88796.cpr = readb(dd->spp_cpr);
-}
-
-static void
-parport_ax88796_save_state(struct parport *p, struct parport_state *s)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
-
- dev_dbg(dd->dev, "save_state: %p: state=%p\n", p, s);
- s->u.ax88796.cpr = readb(dd->spp_cpr);
-}
-
-static void
-parport_ax88796_restore_state(struct parport *p, struct parport_state *s)
-{
- struct ax_drvdata *dd = pp_to_drv(p);
-
- dev_dbg(dd->dev, "restore_state: %p: state=%p\n", p, s);
- writeb(s->u.ax88796.cpr, dd->spp_cpr);
-}
-
-static struct parport_operations parport_ax88796_ops = {
- .write_data = parport_ax88796_write_data,
- .read_data = parport_ax88796_read_data,
-
- .write_control = parport_ax88796_write_control,
- .read_control = parport_ax88796_read_control,
- .frob_control = parport_ax88796_frob_control,
-
- .read_status = parport_ax88796_read_status,
-
- .enable_irq = parport_ax88796_enable_irq,
- .disable_irq = parport_ax88796_disable_irq,
-
- .data_forward = parport_ax88796_data_forward,
- .data_reverse = parport_ax88796_data_reverse,
-
- .init_state = parport_ax88796_init_state,
- .save_state = parport_ax88796_save_state,
- .restore_state = parport_ax88796_restore_state,
-
- .epp_write_data = parport_ieee1284_epp_write_data,
- .epp_read_data = parport_ieee1284_epp_read_data,
- .epp_write_addr = parport_ieee1284_epp_write_addr,
- .epp_read_addr = parport_ieee1284_epp_read_addr,
-
- .ecp_write_data = parport_ieee1284_ecp_write_data,
- .ecp_read_data = parport_ieee1284_ecp_read_data,
- .ecp_write_addr = parport_ieee1284_ecp_write_addr,
-
- .compat_write_data = parport_ieee1284_write_compat,
- .nibble_read_data = parport_ieee1284_read_nibble,
- .byte_read_data = parport_ieee1284_read_byte,
-
- .owner = THIS_MODULE,
-};
-
-static int parport_ax88796_probe(struct platform_device *pdev)
-{
- struct device *_dev = &pdev->dev;
- struct ax_drvdata *dd;
- struct parport *pp;
- struct resource *res;
- unsigned long size;
- int spacing;
- int irq;
- int ret;
-
- dd = kzalloc(sizeof(*dd), GFP_KERNEL);
- if (!dd)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(_dev, "no MEM specified\n");
- ret = -ENXIO;
- goto exit_mem;
- }
-
- size = resource_size(res);
- spacing = size / 3;
-
- dd->io = request_mem_region(res->start, size, pdev->name);
- if (dd->io == NULL) {
- dev_err(_dev, "cannot reserve memory\n");
- ret = -ENXIO;
- goto exit_mem;
- }
-
- dd->base = ioremap(res->start, size);
- if (dd->base == NULL) {
- dev_err(_dev, "cannot ioremap region\n");
- ret = -ENXIO;
- goto exit_res;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- irq = PARPORT_IRQ_NONE;
-
- pp = parport_register_port((unsigned long)dd->base, irq,
- PARPORT_DMA_NONE,
- &parport_ax88796_ops);
-
- if (pp == NULL) {
- dev_err(_dev, "failed to register parallel port\n");
- ret = -ENOMEM;
- goto exit_unmap;
- }
-
- pp->private_data = dd;
- dd->parport = pp;
- dd->dev = _dev;
-
- dd->spp_data = dd->base;
- dd->spp_spr = dd->base + (spacing * 1);
- dd->spp_cpr = dd->base + (spacing * 2);
-
- /* initialise the port controls */
- writeb(AX_CPR_STRB, dd->spp_cpr);
-
- if (irq >= 0) {
- /* request irq */
- ret = request_irq(irq, parport_irq_handler,
- IRQF_TRIGGER_FALLING, pdev->name, pp);
-
- if (ret < 0)
- goto exit_port;
-
- dd->irq_enabled = 1;
- }
-
- platform_set_drvdata(pdev, pp);
-
- dev_info(_dev, "attached parallel port driver\n");
- parport_announce_port(pp);
-
- return 0;
-
- exit_port:
- parport_remove_port(pp);
- exit_unmap:
- iounmap(dd->base);
- exit_res:
- release_mem_region(dd->io->start, size);
- exit_mem:
- kfree(dd);
- return ret;
-}
-
-static int parport_ax88796_remove(struct platform_device *pdev)
-{
- struct parport *p = platform_get_drvdata(pdev);
- struct ax_drvdata *dd = pp_to_drv(p);
-
- free_irq(p->irq, p);
- parport_remove_port(p);
- iounmap(dd->base);
- release_mem_region(dd->io->start, resource_size(dd->io));
- kfree(dd);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int parport_ax88796_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- struct parport *p = platform_get_drvdata(dev);
- struct ax_drvdata *dd = pp_to_drv(p);
-
- parport_ax88796_save_state(p, &dd->suspend);
- writeb(AX_CPR_nDOE | AX_CPR_STRB, dd->spp_cpr);
- return 0;
-}
-
-static int parport_ax88796_resume(struct platform_device *dev)
-{
- struct parport *p = platform_get_drvdata(dev);
- struct ax_drvdata *dd = pp_to_drv(p);
-
- parport_ax88796_restore_state(p, &dd->suspend);
- return 0;
-}
-
-#else
-#define parport_ax88796_suspend NULL
-#define parport_ax88796_resume NULL
-#endif
-
-MODULE_ALIAS("platform:ax88796-pp");
-
-static struct platform_driver axdrv = {
- .driver = {
- .name = "ax88796-pp",
- },
- .probe = parport_ax88796_probe,
- .remove = parport_ax88796_remove,
- .suspend = parport_ax88796_suspend,
- .resume = parport_ax88796_resume,
-};
-
-module_platform_driver(axdrv);
-
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("AX88796 Parport parallel port driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 5784dc20fb38..88e125e36230 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -106,15 +106,22 @@ static int pnp_registered_parport;
static void frob_econtrol(struct parport *pb, unsigned char m,
unsigned char v)
{
+ const struct parport_pc_private *priv = pb->physport->private_data;
+ unsigned char ecr_writable = priv->ecr_writable;
unsigned char ectr = 0;
+ unsigned char new;
if (m != 0xff)
ectr = inb(ECONTROL(pb));
- pr_debug("frob_econtrol(%02x,%02x): %02x -> %02x\n",
- m, v, ectr, (ectr & ~m) ^ v);
+ new = (ectr & ~m) ^ v;
+ if (ecr_writable)
+ /* All known users of the ECR mask require bit 0 to be set. */
+ new = (new & ecr_writable) | 1;
- outb((ectr & ~m) ^ v, ECONTROL(pb));
+ pr_debug("frob_econtrol(%02x,%02x): %02x -> %02x\n", m, v, ectr, new);
+
+ outb(new, ECONTROL(pb));
}
static inline void frob_set_mode(struct parport *p, int mode)
@@ -1479,21 +1486,24 @@ static int parport_ECR_present(struct parport *pb)
struct parport_pc_private *priv = pb->private_data;
unsigned char r = 0xc;
- outb(r, CONTROL(pb));
- if ((inb(ECONTROL(pb)) & 0x3) == (r & 0x3)) {
- outb(r ^ 0x2, CONTROL(pb)); /* Toggle bit 1 */
+ if (!priv->ecr_writable) {
+ outb(r, CONTROL(pb));
+ if ((inb(ECONTROL(pb)) & 0x3) == (r & 0x3)) {
+ outb(r ^ 0x2, CONTROL(pb)); /* Toggle bit 1 */
- r = inb(CONTROL(pb));
- if ((inb(ECONTROL(pb)) & 0x2) == (r & 0x2))
- goto no_reg; /* Sure that no ECR register exists */
- }
+ r = inb(CONTROL(pb));
+ if ((inb(ECONTROL(pb)) & 0x2) == (r & 0x2))
+ /* Sure that no ECR register exists */
+ goto no_reg;
+ }
- if ((inb(ECONTROL(pb)) & 0x3) != 0x1)
- goto no_reg;
+ if ((inb(ECONTROL(pb)) & 0x3) != 0x1)
+ goto no_reg;
- ECR_WRITE(pb, 0x34);
- if (inb(ECONTROL(pb)) != 0x35)
- goto no_reg;
+ ECR_WRITE(pb, 0x34);
+ if (inb(ECONTROL(pb)) != 0x35)
+ goto no_reg;
+ }
priv->ecr = 1;
outb(0xc, CONTROL(pb));
@@ -2000,11 +2010,13 @@ static int parport_dma_probe(struct parport *p)
static LIST_HEAD(ports_list);
static DEFINE_SPINLOCK(ports_lock);
-struct parport *parport_pc_probe_port(unsigned long int base,
- unsigned long int base_hi,
- int irq, int dma,
- struct device *dev,
- int irqflags)
+static struct parport *__parport_pc_probe_port(unsigned long int base,
+ unsigned long int base_hi,
+ int irq, int dma,
+ struct device *dev,
+ int irqflags,
+ unsigned int mode_mask,
+ unsigned char ecr_writable)
{
struct parport_pc_private *priv;
struct parport_operations *ops;
@@ -2053,6 +2065,7 @@ struct parport *parport_pc_probe_port(unsigned long int base,
priv->ctr = 0xc;
priv->ctr_writable = ~0x10;
priv->ecr = 0;
+ priv->ecr_writable = ecr_writable;
priv->fifo_depth = 0;
priv->dma_buf = NULL;
priv->dma_handle = 0;
@@ -2116,20 +2129,28 @@ struct parport *parport_pc_probe_port(unsigned long int base,
p->dma != PARPORT_DMA_NOFIFO &&
priv->fifo_depth > 0 && p->irq != PARPORT_IRQ_NONE) {
p->modes |= PARPORT_MODE_ECP | PARPORT_MODE_COMPAT;
+ if (p->dma != PARPORT_DMA_NONE)
+ p->modes |= PARPORT_MODE_DMA;
+ } else
+ /* We can't use the DMA channel after all. */
+ p->dma = PARPORT_DMA_NONE;
+#endif /* Allowed to use FIFO/DMA */
+
+ p->modes &= ~mode_mask;
+
+#ifdef CONFIG_PARPORT_PC_FIFO
+ if ((p->modes & PARPORT_MODE_COMPAT) != 0)
p->ops->compat_write_data = parport_pc_compat_write_block_pio;
#ifdef CONFIG_PARPORT_1284
+ if ((p->modes & PARPORT_MODE_ECP) != 0)
p->ops->ecp_write_data = parport_pc_ecp_write_block_pio;
- /* currently broken, but working on it.. (FB) */
- /* p->ops->ecp_read_data = parport_pc_ecp_read_block_pio; */
-#endif /* IEEE 1284 support */
- if (p->dma != PARPORT_DMA_NONE) {
+#endif
+ if ((p->modes & (PARPORT_MODE_ECP | PARPORT_MODE_COMPAT)) != 0) {
+ if ((p->modes & PARPORT_MODE_DMA) != 0)
pr_cont(", dma %d", p->dma);
- p->modes |= PARPORT_MODE_DMA;
- } else
+ else
pr_cont(", using FIFO");
- } else
- /* We can't use the DMA channel after all. */
- p->dma = PARPORT_DMA_NONE;
+ }
#endif /* Allowed to use FIFO/DMA */
pr_cont(" [");
@@ -2239,6 +2260,16 @@ out1:
platform_device_unregister(pdev);
return NULL;
}
+
+struct parport *parport_pc_probe_port(unsigned long int base,
+ unsigned long int base_hi,
+ int irq, int dma,
+ struct device *dev,
+ int irqflags)
+{
+ return __parport_pc_probe_port(base, base_hi, irq, dma,
+ dev, irqflags, 0, 0);
+}
EXPORT_SYMBOL(parport_pc_probe_port);
void parport_pc_unregister_port(struct parport *p)
@@ -2626,7 +2657,14 @@ static struct parport_pc_pci {
int lo;
int hi;
/* -1 if not there, >6 for offset-method (max BAR is 6) */
- } addr[4];
+ } addr[2];
+
+ /* Bit field of parport modes to exclude. */
+ unsigned int mode_mask;
+
+ /* If non-zero, sets the bitmask of writable ECR bits. In that
+ * case additionally bit 0 will be forcibly set on writes. */
+ unsigned char ecr_writable;
/* If set, this is called immediately after pci_enable_device.
* If it returns non-zero, no probing will take place and the
@@ -2658,12 +2696,19 @@ static struct parport_pc_pci {
/* titan_010l */ { 1, { { 3, -1 }, } },
/* avlab_1p */ { 1, { { 0, 1}, } },
/* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
- /* The Oxford Semi cards are unusual: 954 doesn't support ECP,
- * and 840 locks up if you write 1 to bit 2! */
- /* oxsemi_952 */ { 1, { { 0, 1 }, } },
- /* oxsemi_954 */ { 1, { { 0, -1 }, } },
- /* oxsemi_840 */ { 1, { { 0, 1 }, } },
- /* oxsemi_pcie_pport */ { 1, { { 0, 1 }, } },
+ /* The Oxford Semi cards are unusual: older variants of 954 don't
+ * support ECP, and 840 locks up if you write 1 to bit 2! None
+ * implement nFault or service interrupts and all require 00001
+ * bit pattern to be used for bits 4:0 with ECR writes. */
+ /* oxsemi_952 */ { 1, { { 0, 1 }, },
+ PARPORT_MODE_COMPAT, ECR_MODE_MASK },
+ /* oxsemi_954 */ { 1, { { 0, 1 }, },
+ PARPORT_MODE_ECP |
+ PARPORT_MODE_COMPAT, ECR_MODE_MASK },
+ /* oxsemi_840 */ { 1, { { 0, 1 }, },
+ PARPORT_MODE_COMPAT, ECR_MODE_MASK },
+ /* oxsemi_pcie_pport */ { 1, { { 0, 1 }, },
+ PARPORT_MODE_COMPAT, ECR_MODE_MASK },
/* aks_0100 */ { 1, { { 0, -1 }, } },
/* mobility_pp */ { 1, { { 0, 1 }, } },
/* netmos_9900 */ { 1, { { 0, -1 }, } },
@@ -2831,9 +2876,11 @@ static int parport_pc_pci_probe(struct pci_dev *dev,
id->vendor, id->device, io_lo, io_hi, irq);
}
data->ports[count] =
- parport_pc_probe_port(io_lo, io_hi, irq,
- PARPORT_DMA_NONE, &dev->dev,
- IRQF_SHARED);
+ __parport_pc_probe_port(io_lo, io_hi, irq,
+ PARPORT_DMA_NONE, &dev->dev,
+ IRQF_SHARED,
+ cards[i].mode_mask,
+ cards[i].ecr_writable);
if (data->ports[count])
count++;
}
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 1569d9a3ada0..42654035654a 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -9,6 +9,7 @@ config PCI_MVEBU
depends on MVEBU_MBUS
depends on ARM
depends on OF
+ depends on BROKEN
select PCI_BRIDGE_EMUL
help
Add support for Marvell EBU PCIe controller. This PCIe controller
@@ -258,7 +259,7 @@ config PCIE_MEDIATEK_GEN3
MediaTek SoCs.
config VMD
- depends on PCI_MSI && X86_64 && SRCU && !UML
+ depends on PCI_MSI && X86_64 && !UML
tristate "Intel Volume Management Device Driver"
help
Adds support for the Intel Volume Management Device (VMD). VMD is a
@@ -285,7 +286,7 @@ config PCIE_BRCMSTB
config PCI_HYPERV_INTERFACE
tristate "Hyper-V PCI Interface"
- depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI
+ depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI
help
The Hyper-V PCI Interface is a helper driver allows other drivers to
have a common interface with the Hyper-V PCI frontend driver.
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 99ec91e2a5cf..434f6a4f4041 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -92,10 +92,31 @@ config PCI_EXYNOS
functions to implement the driver.
config PCI_IMX6
- bool "Freescale i.MX6/7/8 PCIe controller"
+ bool
+
+config PCI_IMX6_HOST
+ bool "Freescale i.MX6/7/8 PCIe controller host mode"
depends on ARCH_MXC || COMPILE_TEST
depends on PCI_MSI
select PCIE_DW_HOST
+ select PCI_IMX6
+ help
+ Enables support for the PCIe controller in the i.MX SoCs to
+ work in Root Complex mode. The PCI controller on i.MX is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
+
+config PCI_IMX6_EP
+ bool "Freescale i.MX6/7/8 PCIe controller endpoint mode"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ select PCI_IMX6
+ help
+ Enables support for the PCIe controller in the i.MX SoCs to
+ work in endpoint mode. The PCI controller on i.MX is based
+ on DesignWare hardware and therefore the driver re-uses the
+ DesignWare core functions to implement the driver.
config PCIE_SPEAR13XX
bool "STMicroelectronics SPEAr PCIe controller"
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index 38462ed11d07..4ae807e7cf79 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -840,7 +840,7 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
}
dra7xx->mode = mode;
- ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler,
+ ret = devm_request_threaded_irq(dev, irq, NULL, dra7xx_pcie_irq_handler,
IRQF_SHARED, "dra7xx-pcie-main", dra7xx);
if (ret) {
dev_err(dev, "failed to request irq\n");
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 1dde5c579edc..55a0405b921d 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -52,6 +52,9 @@ enum imx6_pcie_variants {
IMX8MQ,
IMX8MM,
IMX8MP,
+ IMX8MQ_EP,
+ IMX8MM_EP,
+ IMX8MP_EP,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
@@ -60,6 +63,7 @@ enum imx6_pcie_variants {
struct imx6_pcie_drvdata {
enum imx6_pcie_variants variant;
+ enum dw_pcie_device_mode mode;
u32 flags;
int dbi_length;
const char *gpr;
@@ -152,24 +156,39 @@ struct imx6_pcie {
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
{
WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
+ imx6_pcie->drvdata->variant != IMX8MQ_EP &&
imx6_pcie->drvdata->variant != IMX8MM &&
- imx6_pcie->drvdata->variant != IMX8MP);
+ imx6_pcie->drvdata->variant != IMX8MM_EP &&
+ imx6_pcie->drvdata->variant != IMX8MP &&
+ imx6_pcie->drvdata->variant != IMX8MP_EP);
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
{
- unsigned int mask, val;
+ unsigned int mask, val, mode;
- if (imx6_pcie->drvdata->variant == IMX8MQ &&
- imx6_pcie->controller_id == 1) {
- mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
- val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
- } else {
+ if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE)
+ mode = PCI_EXP_TYPE_ENDPOINT;
+ else
+ mode = PCI_EXP_TYPE_ROOT_PORT;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ:
+ case IMX8MQ_EP:
+ if (imx6_pcie->controller_id == 1) {
+ mask = IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE;
+ val = FIELD_PREP(IMX8MQ_GPR12_PCIE2_CTRL_DEVICE_TYPE,
+ mode);
+ } else {
+ mask = IMX6Q_GPR12_DEVICE_TYPE;
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
+ }
+ break;
+ default:
mask = IMX6Q_GPR12_DEVICE_TYPE;
- val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE,
- PCI_EXP_TYPE_ROOT_PORT);
+ val = FIELD_PREP(IMX6Q_GPR12_DEVICE_TYPE, mode);
+ break;
}
regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, mask, val);
@@ -304,13 +323,16 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->drvdata->variant) {
case IMX8MM:
+ case IMX8MM_EP:
case IMX8MP:
+ case IMX8MP_EP:
/*
* The PHY initialization had been done in the PHY
* driver, break here directly.
*/
break;
case IMX8MQ:
+ case IMX8MQ_EP:
/*
* TODO: Currently this code assumes external
* oscillator is being used
@@ -561,8 +583,11 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
case IMX7D:
break;
case IMX8MM:
+ case IMX8MM_EP:
case IMX8MQ:
+ case IMX8MQ_EP:
case IMX8MP:
+ case IMX8MP_EP:
ret = clk_prepare_enable(imx6_pcie->pcie_aux);
if (ret) {
dev_err(dev, "unable to enable pcie_aux clock\n");
@@ -606,8 +631,11 @@ static void imx6_pcie_disable_ref_clk(struct imx6_pcie *imx6_pcie)
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
break;
case IMX8MM:
+ case IMX8MM_EP:
case IMX8MQ:
+ case IMX8MQ_EP:
case IMX8MP:
+ case IMX8MP_EP:
clk_disable_unprepare(imx6_pcie->pcie_aux);
break;
default:
@@ -672,10 +700,13 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
switch (imx6_pcie->drvdata->variant) {
case IMX7D:
case IMX8MQ:
+ case IMX8MQ_EP:
reset_control_assert(imx6_pcie->pciephy_reset);
fallthrough;
case IMX8MM:
+ case IMX8MM_EP:
case IMX8MP:
+ case IMX8MP_EP:
reset_control_assert(imx6_pcie->apps_reset);
break;
case IMX6SX:
@@ -713,6 +744,7 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
switch (imx6_pcie->drvdata->variant) {
case IMX8MQ:
+ case IMX8MQ_EP:
reset_control_deassert(imx6_pcie->pciephy_reset);
break;
case IMX7D:
@@ -751,7 +783,9 @@ static int imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
break;
case IMX6Q: /* Nothing to do */
case IMX8MM:
+ case IMX8MM_EP:
case IMX8MP:
+ case IMX8MP_EP:
break;
}
@@ -800,8 +834,11 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
break;
case IMX7D:
case IMX8MQ:
+ case IMX8MQ_EP:
case IMX8MM:
+ case IMX8MM_EP:
case IMX8MP:
+ case IMX8MP_EP:
reset_control_deassert(imx6_pcie->apps_reset);
break;
}
@@ -820,8 +857,11 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
break;
case IMX7D:
case IMX8MQ:
+ case IMX8MQ_EP:
case IMX8MM:
+ case IMX8MM_EP:
case IMX8MP:
+ case IMX8MP_EP:
reset_control_assert(imx6_pcie->apps_reset);
break;
}
@@ -1003,8 +1043,104 @@ static const struct dw_pcie_host_ops imx6_pcie_host_ops = {
static const struct dw_pcie_ops dw_pcie_ops = {
.start_link = imx6_pcie_start_link,
+ .stop_link = imx6_pcie_stop_link,
+};
+
+static void imx6_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ enum pci_barno bar;
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static int imx6_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type,
+ u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ case PCI_EPC_IRQ_MSIX:
+ return dw_pcie_ep_raise_msix_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "UNKNOWN IRQ type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct pci_epc_features imx8m_pcie_epc_features = {
+ .linkup_notifier = false,
+ .msi_capable = true,
+ .msix_capable = false,
+ .reserved_bar = 1 << BAR_1 | 1 << BAR_3,
+ .align = SZ_64K,
+};
+
+static const struct pci_epc_features*
+imx6_pcie_ep_get_features(struct dw_pcie_ep *ep)
+{
+ return &imx8m_pcie_epc_features;
+}
+
+static const struct dw_pcie_ep_ops pcie_ep_ops = {
+ .ep_init = imx6_pcie_ep_init,
+ .raise_irq = imx6_pcie_ep_raise_irq,
+ .get_features = imx6_pcie_ep_get_features,
};
+static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie,
+ struct platform_device *pdev)
+{
+ int ret;
+ unsigned int pcie_dbi2_offset;
+ struct dw_pcie_ep *ep;
+ struct resource *res;
+ struct dw_pcie *pci = imx6_pcie->pci;
+ struct dw_pcie_rp *pp = &pci->pp;
+ struct device *dev = pci->dev;
+
+ imx6_pcie_host_init(pp);
+ ep = &pci->ep;
+ ep->ops = &pcie_ep_ops;
+
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MQ_EP:
+ case IMX8MM_EP:
+ case IMX8MP_EP:
+ pcie_dbi2_offset = SZ_1M;
+ break;
+ default:
+ pcie_dbi2_offset = SZ_4K;
+ break;
+ }
+ pci->dbi_base2 = pci->dbi_base + pcie_dbi2_offset;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
+ if (!res)
+ return -EINVAL;
+
+ ep->phys_base = res->start;
+ ep->addr_size = resource_size(res);
+ ep->page_size = SZ_64K;
+
+ ret = dw_pcie_ep_init(ep);
+ if (ret) {
+ dev_err(dev, "failed to initialize endpoint\n");
+ return ret;
+ }
+ /* Start LTSSM. */
+ imx6_pcie_ltssm_enable(dev);
+
+ return 0;
+}
+
static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
{
struct device *dev = imx6_pcie->pci->dev;
@@ -1166,6 +1302,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
"pcie_inbound_axi clock missing or invalid\n");
break;
case IMX8MQ:
+ case IMX8MQ_EP:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
if (IS_ERR(imx6_pcie->pcie_aux))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
@@ -1190,7 +1327,9 @@ static int imx6_pcie_probe(struct platform_device *pdev)
}
break;
case IMX8MM:
+ case IMX8MM_EP:
case IMX8MP:
+ case IMX8MP_EP:
imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
if (IS_ERR(imx6_pcie->pcie_aux))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
@@ -1279,15 +1418,22 @@ static int imx6_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = dw_pcie_host_init(&pci->pp);
- if (ret < 0)
- return ret;
+ if (imx6_pcie->drvdata->mode == DW_PCIE_EP_TYPE) {
+ ret = imx6_add_pcie_ep(imx6_pcie, pdev);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = dw_pcie_host_init(&pci->pp);
+ if (ret < 0)
+ return ret;
+
+ if (pci_msi_enabled()) {
+ u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
- if (pci_msi_enabled()) {
- u8 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI);
- val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
- val |= PCI_MSI_FLAGS_ENABLE;
- dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val);
+ }
}
return 0;
@@ -1343,6 +1489,21 @@ static const struct imx6_pcie_drvdata drvdata[] = {
.flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
.gpr = "fsl,imx8mp-iomuxc-gpr",
},
+ [IMX8MQ_EP] = {
+ .variant = IMX8MQ_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mq-iomuxc-gpr",
+ },
+ [IMX8MM_EP] = {
+ .variant = IMX8MM_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mm-iomuxc-gpr",
+ },
+ [IMX8MP_EP] = {
+ .variant = IMX8MP_EP,
+ .mode = DW_PCIE_EP_TYPE,
+ .gpr = "fsl,imx8mp-iomuxc-gpr",
+ },
};
static const struct of_device_id imx6_pcie_of_match[] = {
@@ -1353,6 +1514,9 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
{ .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{ .compatible = "fsl,imx8mp-pcie", .data = &drvdata[IMX8MP], },
+ { .compatible = "fsl,imx8mq-pcie-ep", .data = &drvdata[IMX8MQ_EP], },
+ { .compatible = "fsl,imx8mm-pcie-ep", .data = &drvdata[IMX8MM_EP], },
+ { .compatible = "fsl,imx8mp-pcie-ep", .data = &drvdata[IMX8MP_EP], },
{},
};
diff --git a/drivers/pci/controller/dwc/pcie-bt1.c b/drivers/pci/controller/dwc/pcie-bt1.c
index 3346770e6654..95a723a6fd46 100644
--- a/drivers/pci/controller/dwc/pcie-bt1.c
+++ b/drivers/pci/controller/dwc/pcie-bt1.c
@@ -583,6 +583,10 @@ static int bt1_pcie_add_port(struct bt1_pcie *btpci)
struct device *dev = &btpci->pdev->dev;
int ret;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
btpci->dw.version = DW_PCIE_VER_460A;
btpci->dw.dev = dev;
btpci->dw.ops = &bt1_pcie_ops;
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index d06654895eba..f9182f8d552f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -612,8 +612,11 @@ int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
+ dw_pcie_edma_remove(pci);
+
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
@@ -785,6 +788,10 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
goto err_exit_epc_mem;
}
+ ret = dw_pcie_edma_detect(pci);
+ if (ret)
+ goto err_free_epc_mem;
+
if (ep->ops->get_features) {
epc_features = ep->ops->get_features(ep);
if (epc_features->core_init_notifier)
@@ -793,10 +800,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
ret = dw_pcie_ep_init_complete(ep);
if (ret)
- goto err_free_epc_mem;
+ goto err_remove_edma;
return 0;
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
err_free_epc_mem:
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->window.page_size);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 3ab6ae3712c4..9952057c8819 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -366,7 +366,17 @@ static int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
dw_chained_msi_isr, pp);
}
- ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ /*
+ * Even though the iMSI-RX Module supports 64-bit addresses some
+ * peripheral PCIe devices may lack 64-bit message support. In
+ * order not to miss MSI TLPs from those devices the MSI target
+ * address has to be within the lowest 4GB.
+ *
+ * Note until there is a better alternative found the reservation is
+ * done by allocating from the artificially limited DMA-coherent
+ * memory.
+ */
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret)
dev_warn(dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
@@ -467,14 +477,18 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
dw_pcie_iatu_detect(pci);
- ret = dw_pcie_setup_rc(pp);
+ ret = dw_pcie_edma_detect(pci);
if (ret)
goto err_free_msi;
+ ret = dw_pcie_setup_rc(pp);
+ if (ret)
+ goto err_remove_edma;
+
if (!dw_pcie_link_up(pci)) {
ret = dw_pcie_start_link(pci);
if (ret)
- goto err_free_msi;
+ goto err_remove_edma;
}
/* Ignore errors, the link may come up later */
@@ -491,6 +505,9 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp)
err_stop_link:
dw_pcie_stop_link(pci);
+err_remove_edma:
+ dw_pcie_edma_remove(pci);
+
err_free_msi:
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
@@ -512,6 +529,8 @@ void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
dw_pcie_stop_link(pci);
+ dw_pcie_edma_remove(pci);
+
if (pp->has_msi_ctrl)
dw_pcie_free_msi(pp);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 6d5d619ab2e9..53a16b8b6ac2 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -12,6 +12,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma/edma.h>
#include <linux/gpio/consumer.h>
#include <linux/ioport.h>
#include <linux/of.h>
@@ -142,6 +143,18 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
if (!pci->atu_size)
pci->atu_size = SZ_4K;
+ /* eDMA region can be mapped to a custom base address */
+ if (!pci->edma.reg_base) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dma");
+ if (res) {
+ pci->edma.reg_base = devm_ioremap_resource(pci->dev, res);
+ if (IS_ERR(pci->edma.reg_base))
+ return PTR_ERR(pci->edma.reg_base);
+ } else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) {
+ pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET;
+ }
+ }
+
/* LLDD is supposed to manually switch the clocks and resets state */
if (dw_pcie_cap_is(pci, REQ_RES)) {
ret = dw_pcie_get_clocks(pci);
@@ -782,6 +795,188 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci)
pci->region_align / SZ_1K, (pci->region_limit + 1) / SZ_1G);
}
+static u32 dw_pcie_readl_dma(struct dw_pcie *pci, u32 reg)
+{
+ u32 val = 0;
+ int ret;
+
+ if (pci->ops && pci->ops->read_dbi)
+ return pci->ops->read_dbi(pci, pci->edma.reg_base, reg, 4);
+
+ ret = dw_pcie_read(pci->edma.reg_base + reg, 4, &val);
+ if (ret)
+ dev_err(pci->dev, "Read DMA address failed\n");
+
+ return val;
+}
+
+static int dw_pcie_edma_irq_vector(struct device *dev, unsigned int nr)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ char name[6];
+ int ret;
+
+ if (nr >= EDMA_MAX_WR_CH + EDMA_MAX_RD_CH)
+ return -EINVAL;
+
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0)
+ return ret;
+
+ snprintf(name, sizeof(name), "dma%u", nr);
+
+ return platform_get_irq_byname_optional(pdev, name);
+}
+
+static struct dw_edma_core_ops dw_pcie_edma_ops = {
+ .irq_vector = dw_pcie_edma_irq_vector,
+};
+
+static int dw_pcie_edma_find_chip(struct dw_pcie *pci)
+{
+ u32 val;
+
+ /*
+ * Indirect eDMA CSRs access has been completely removed since v5.40a
+ * thus no space is now reserved for the eDMA channels viewport and
+ * former DMA CTRL register is no longer fixed to FFs.
+ */
+ if (dw_pcie_ver_is_ge(pci, 540A))
+ val = 0xFFFFFFFF;
+ else
+ val = dw_pcie_readl_dbi(pci, PCIE_DMA_VIEWPORT_BASE + PCIE_DMA_CTRL);
+
+ if (val == 0xFFFFFFFF && pci->edma.reg_base) {
+ pci->edma.mf = EDMA_MF_EDMA_UNROLL;
+
+ val = dw_pcie_readl_dma(pci, PCIE_DMA_CTRL);
+ } else if (val != 0xFFFFFFFF) {
+ pci->edma.mf = EDMA_MF_EDMA_LEGACY;
+
+ pci->edma.reg_base = pci->dbi_base + PCIE_DMA_VIEWPORT_BASE;
+ } else {
+ return -ENODEV;
+ }
+
+ pci->edma.dev = pci->dev;
+
+ if (!pci->edma.ops)
+ pci->edma.ops = &dw_pcie_edma_ops;
+
+ pci->edma.flags |= DW_EDMA_CHIP_LOCAL;
+
+ pci->edma.ll_wr_cnt = FIELD_GET(PCIE_DMA_NUM_WR_CHAN, val);
+ pci->edma.ll_rd_cnt = FIELD_GET(PCIE_DMA_NUM_RD_CHAN, val);
+
+ /* Sanity check the channels count if the mapping was incorrect */
+ if (!pci->edma.ll_wr_cnt || pci->edma.ll_wr_cnt > EDMA_MAX_WR_CH ||
+ !pci->edma.ll_rd_cnt || pci->edma.ll_rd_cnt > EDMA_MAX_RD_CH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dw_pcie_edma_irq_verify(struct dw_pcie *pci)
+{
+ struct platform_device *pdev = to_platform_device(pci->dev);
+ u16 ch_cnt = pci->edma.ll_wr_cnt + pci->edma.ll_rd_cnt;
+ char name[6];
+ int ret;
+
+ if (pci->edma.nr_irqs == 1)
+ return 0;
+ else if (pci->edma.nr_irqs > 1)
+ return pci->edma.nr_irqs != ch_cnt ? -EINVAL : 0;
+
+ ret = platform_get_irq_byname_optional(pdev, "dma");
+ if (ret > 0) {
+ pci->edma.nr_irqs = 1;
+ return 0;
+ }
+
+ for (; pci->edma.nr_irqs < ch_cnt; pci->edma.nr_irqs++) {
+ snprintf(name, sizeof(name), "dma%d", pci->edma.nr_irqs);
+
+ ret = platform_get_irq_byname_optional(pdev, name);
+ if (ret <= 0)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw_pcie_edma_ll_alloc(struct dw_pcie *pci)
+{
+ struct dw_edma_region *ll;
+ dma_addr_t paddr;
+ int i;
+
+ for (i = 0; i < pci->edma.ll_wr_cnt; i++) {
+ ll = &pci->edma.ll_region_wr[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ for (i = 0; i < pci->edma.ll_rd_cnt; i++) {
+ ll = &pci->edma.ll_region_rd[i];
+ ll->sz = DMA_LLP_MEM_SIZE;
+ ll->vaddr.mem = dmam_alloc_coherent(pci->dev, ll->sz,
+ &paddr, GFP_KERNEL);
+ if (!ll->vaddr.mem)
+ return -ENOMEM;
+
+ ll->paddr = paddr;
+ }
+
+ return 0;
+}
+
+int dw_pcie_edma_detect(struct dw_pcie *pci)
+{
+ int ret;
+
+ /* Don't fail if no eDMA was found (for the backward compatibility) */
+ ret = dw_pcie_edma_find_chip(pci);
+ if (ret)
+ return 0;
+
+ /* Don't fail on the IRQs verification (for the backward compatibility) */
+ ret = dw_pcie_edma_irq_verify(pci);
+ if (ret) {
+ dev_err(pci->dev, "Invalid eDMA IRQs found\n");
+ return 0;
+ }
+
+ ret = dw_pcie_edma_ll_alloc(pci);
+ if (ret) {
+ dev_err(pci->dev, "Couldn't allocate LLP memory\n");
+ return ret;
+ }
+
+ /* Don't fail if the DW eDMA driver can't find the device */
+ ret = dw_edma_probe(&pci->edma);
+ if (ret && ret != -ENODEV) {
+ dev_err(pci->dev, "Couldn't register eDMA device\n");
+ return ret;
+ }
+
+ dev_info(pci->dev, "eDMA: unroll %s, %hu wr, %hu rd\n",
+ pci->edma.mf == EDMA_MF_EDMA_UNROLL ? "T" : "F",
+ pci->edma.ll_wr_cnt, pci->edma.ll_rd_cnt);
+
+ return 0;
+}
+
+void dw_pcie_edma_remove(struct dw_pcie *pci)
+{
+ dw_edma_remove(&pci->edma);
+}
+
void dw_pcie_setup(struct dw_pcie *pci)
{
u32 val;
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 393dfb931df6..79713ce075cc 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -15,6 +15,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/edma.h>
#include <linux/gpio/consumer.h>
#include <linux/irq.h>
#include <linux/msi.h>
@@ -31,6 +32,7 @@
#define DW_PCIE_VER_480A 0x3438302a
#define DW_PCIE_VER_490A 0x3439302a
#define DW_PCIE_VER_520A 0x3532302a
+#define DW_PCIE_VER_540A 0x3534302a
#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
((_pci)->version _op DW_PCIE_VER_ ## _ver)
@@ -167,6 +169,18 @@
#define PCIE_MSIX_DOORBELL 0x948
#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
+/*
+ * eDMA CSRs. DW PCIe IP-core v4.70a and older had the eDMA registers accessible
+ * over the Port Logic registers space. Afterwards the unrolled mapping was
+ * introduced so eDMA and iATU could be accessed via a dedicated registers
+ * space.
+ */
+#define PCIE_DMA_VIEWPORT_BASE 0x970
+#define PCIE_DMA_UNROLL_BASE 0x80000
+#define PCIE_DMA_CTRL 0x008
+#define PCIE_DMA_NUM_WR_CHAN GENMASK(3, 0)
+#define PCIE_DMA_NUM_RD_CHAN GENMASK(19, 16)
+
#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
@@ -215,6 +229,7 @@
* this offset, if atu_base not set.
*/
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
+#define DEFAULT_DBI_DMA_OFFSET PCIE_DMA_UNROLL_BASE
#define MAX_MSI_IRQS 256
#define MAX_MSI_IRQS_PER_CTRL 32
@@ -226,6 +241,9 @@
#define MAX_IATU_IN 256
#define MAX_IATU_OUT 256
+/* Default eDMA LLP memory size */
+#define DMA_LLP_MEM_SIZE PAGE_SIZE
+
struct dw_pcie;
struct dw_pcie_rp;
struct dw_pcie_ep;
@@ -369,6 +387,7 @@ struct dw_pcie {
int num_lanes;
int link_gen;
u8 n_fts[2];
+ struct dw_edma_chip edma;
struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS];
@@ -408,6 +427,8 @@ int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
void dw_pcie_setup(struct dw_pcie *pci);
void dw_pcie_iatu_detect(struct dw_pcie *pci);
+int dw_pcie_edma_detect(struct dw_pcie *pci);
+void dw_pcie_edma_remove(struct dw_pcie *pci);
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
{
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 43c27812dd6d..927ae05dc920 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -450,4 +450,3 @@ static struct platform_driver histb_pcie_platform_driver = {
module_platform_driver(histb_pcie_platform_driver);
MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 77e5dc7b88ad..a232b04af048 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1534,8 +1534,19 @@ err_deinit:
return ret;
}
+static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct qcom_pcie *pcie = to_qcom_pcie(pci);
+
+ qcom_ep_reset_assert(pcie);
+ phy_power_off(pcie->phy);
+ pcie->cfg->ops->deinit(pcie);
+}
+
static const struct dw_pcie_host_ops qcom_pcie_dw_ops = {
- .host_init = qcom_pcie_host_init,
+ .host_init = qcom_pcie_host_init,
+ .host_deinit = qcom_pcie_host_deinit,
};
/* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */
@@ -1817,6 +1828,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-ipq8064", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8064-v2", .data = &cfg_2_1_0 },
{ .compatible = "qcom,pcie-ipq8074", .data = &cfg_2_3_3 },
+ { .compatible = "qcom,pcie-ipq8074-gen3", .data = &cfg_2_9_0 },
{ .compatible = "qcom,pcie-msm8996", .data = &cfg_2_3_2 },
{ .compatible = "qcom,pcie-qcs404", .data = &cfg_2_4_0 },
{ .compatible = "qcom,pcie-sa8540p", .data = &cfg_1_9_0 },
@@ -1826,6 +1838,7 @@ static const struct of_device_id qcom_pcie_match[] = {
{ .compatible = "qcom,pcie-sdm845", .data = &cfg_2_7_0 },
{ .compatible = "qcom,pcie-sm8150", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8250", .data = &cfg_1_9_0 },
+ { .compatible = "qcom,pcie-sm8350", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie0", .data = &cfg_1_9_0 },
{ .compatible = "qcom,pcie-sm8450-pcie1", .data = &cfg_1_9_0 },
{ }
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 02d78a12b6e7..09825b4a075e 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -286,6 +286,7 @@ struct tegra_pcie_dw {
struct gpio_desc *pex_refclk_sel_gpiod;
unsigned int pex_rst_irq;
int ep_state;
+ long link_status;
};
static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
@@ -449,9 +450,13 @@ static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
struct tegra_pcie_dw *pcie = arg;
+ struct dw_pcie_ep *ep = &pcie->pci.ep;
struct dw_pcie *pci = &pcie->pci;
u32 val, speed;
+ if (test_and_clear_bit(0, &pcie->link_status))
+ dw_pcie_ep_linkup(ep);
+
speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
PCI_EXP_LNKSTA_CLS;
clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
@@ -498,7 +503,6 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
{
struct tegra_pcie_dw *pcie = arg;
- struct dw_pcie_ep *ep = &pcie->pci.ep;
int spurious = 1;
u32 status_l0, status_l1, link_status;
@@ -514,7 +518,8 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
link_status = appl_readl(pcie, APPL_LINK_STATUS);
if (link_status & APPL_LINK_STATUS_RDLH_LINK_UP) {
dev_dbg(pcie->dev, "Link is up with Host\n");
- dw_pcie_ep_linkup(ep);
+ set_bit(0, &pcie->link_status);
+ return IRQ_WAKE_THREAD;
}
}
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
index f6fcd95c2bf5..c5bb87ff6d9a 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-plat.c
@@ -56,6 +56,5 @@ static struct platform_driver mobiveil_pcie_driver = {
builtin_platform_driver(mobiveil_pcie_driver);
-MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Mobiveil PCIe host controller driver");
MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>");
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 084f5313895c..f33370b75628 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -3800,13 +3800,10 @@ static int hv_pci_bus_exit(struct hv_device *hdev, bool keep_devs)
/**
* hv_pci_remove() - Remove routine for this VMBus channel
* @hdev: VMBus's tracking struct for this root PCI bus
- *
- * Return: 0 on success, -errno on failure
*/
-static int hv_pci_remove(struct hv_device *hdev)
+static void hv_pci_remove(struct hv_device *hdev)
{
struct hv_pcibus_device *hbus;
- int ret;
hbus = hv_get_drvdata(hdev);
if (hbus->state == hv_pcibus_installed) {
@@ -3829,7 +3826,7 @@ static int hv_pci_remove(struct hv_device *hdev)
pci_unlock_rescan_remove();
}
- ret = hv_pci_bus_exit(hdev, false);
+ hv_pci_bus_exit(hdev, false);
vmbus_close(hdev->channel);
@@ -3842,7 +3839,6 @@ static int hv_pci_remove(struct hv_device *hdev)
hv_put_dom_num(hbus->bridge->domain_nr);
kfree(hbus);
- return ret;
}
static int hv_pci_suspend(struct hv_device *hdev)
diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c
index 05c50408f13b..fe0f732f6e43 100644
--- a/drivers/pci/controller/pci-loongson.c
+++ b/drivers/pci/controller/pci-loongson.c
@@ -15,9 +15,14 @@
#include "../pci.h"
/* Device IDs */
-#define DEV_PCIE_PORT_0 0x7a09
-#define DEV_PCIE_PORT_1 0x7a19
-#define DEV_PCIE_PORT_2 0x7a29
+#define DEV_LS2K_PCIE_PORT0 0x1a05
+#define DEV_LS7A_PCIE_PORT0 0x7a09
+#define DEV_LS7A_PCIE_PORT1 0x7a19
+#define DEV_LS7A_PCIE_PORT2 0x7a29
+#define DEV_LS7A_PCIE_PORT3 0x7a39
+#define DEV_LS7A_PCIE_PORT4 0x7a49
+#define DEV_LS7A_PCIE_PORT5 0x7a59
+#define DEV_LS7A_PCIE_PORT6 0x7a69
#define DEV_LS2K_APB 0x7a02
#define DEV_LS7A_GMAC 0x7a03
@@ -53,11 +58,11 @@ static void bridge_class_quirk(struct pci_dev *dev)
dev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
- DEV_PCIE_PORT_0, bridge_class_quirk);
+ DEV_LS7A_PCIE_PORT0, bridge_class_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
- DEV_PCIE_PORT_1, bridge_class_quirk);
+ DEV_LS7A_PCIE_PORT1, bridge_class_quirk);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
- DEV_PCIE_PORT_2, bridge_class_quirk);
+ DEV_LS7A_PCIE_PORT2, bridge_class_quirk);
static void system_bus_quirk(struct pci_dev *pdev)
{
@@ -75,37 +80,33 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
DEV_LS7A_LPC, system_bus_quirk);
-static void loongson_mrrs_quirk(struct pci_dev *dev)
+static void loongson_mrrs_quirk(struct pci_dev *pdev)
{
- struct pci_bus *bus = dev->bus;
- struct pci_dev *bridge;
- static const struct pci_device_id bridge_devids[] = {
- { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_0) },
- { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_1) },
- { PCI_VDEVICE(LOONGSON, DEV_PCIE_PORT_2) },
- { 0, },
- };
-
- /* look for the matching bridge */
- while (!pci_is_root_bus(bus)) {
- bridge = bus->self;
- bus = bus->parent;
- /*
- * Some Loongson PCIe ports have a h/w limitation of
- * 256 bytes maximum read request size. They can't handle
- * anything larger than this. So force this limit on
- * any devices attached under these ports.
- */
- if (pci_match_id(bridge_devids, bridge)) {
- if (pcie_get_readrq(dev) > 256) {
- pci_info(dev, "limiting MRRS to 256\n");
- pcie_set_readrq(dev, 256);
- }
- break;
- }
- }
+ /*
+ * Some Loongson PCIe ports have h/w limitations of maximum read
+ * request size. They can't handle anything larger than this. So
+ * force this limit on any devices attached under these ports.
+ */
+ struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
+
+ bridge->no_inc_mrrs = 1;
}
-DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS2K_PCIE_PORT0, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT0, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT1, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT2, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT3, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT4, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT5, loongson_mrrs_quirk);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON,
+ DEV_LS7A_PCIE_PORT6, loongson_mrrs_quirk);
static void loongson_pci_pin_quirk(struct pci_dev *pdev)
{
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
index 929f9363e94b..74c109f14ff0 100644
--- a/drivers/pci/controller/pci-tegra.c
+++ b/drivers/pci/controller/pci-tegra.c
@@ -1330,12 +1330,9 @@ static struct phy *devm_of_phy_optional_get_index(struct device *dev,
if (!name)
return ERR_PTR(-ENOMEM);
- phy = devm_of_phy_get(dev, np, name);
+ phy = devm_of_phy_optional_get(dev, np, name);
kfree(name);
- if (PTR_ERR(phy) == -ENODEV)
- phy = NULL;
-
return phy;
}
@@ -2814,4 +2811,3 @@ static struct platform_driver tegra_pcie_driver = {
.remove = tegra_pcie_remove,
};
module_platform_driver(tegra_pcie_driver);
-MODULE_LICENSE("GPL");
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c
index 7991d334e0f1..e9a6758fe2c1 100644
--- a/drivers/pci/controller/pci-versatile.c
+++ b/drivers/pci/controller/pci-versatile.c
@@ -169,4 +169,3 @@ static struct platform_driver versatile_pci_driver = {
module_platform_driver(versatile_pci_driver);
MODULE_DESCRIPTION("Versatile PCI driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c
index 7959c9c8d2bc..7d88eb696b06 100644
--- a/drivers/pci/controller/pcie-hisi-error.c
+++ b/drivers/pci/controller/pcie-hisi-error.c
@@ -324,4 +324,3 @@ static struct platform_driver hisi_pcie_error_handler_driver = {
module_platform_driver(hisi_pcie_error_handler_driver);
MODULE_DESCRIPTION("HiSilicon HIP PCIe controller error handling driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index 0ebf7015e9af..5e710e485464 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -1135,6 +1135,5 @@ static struct platform_driver mc_pcie_driver = {
};
builtin_platform_driver(mc_pcie_driver);
-MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Microchip PCIe host controller driver");
MODULE_AUTHOR("Daire McNamara <daire.mcnamara@microchip.com>");
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index ee7aad09d627..63a5f4463a9f 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -60,6 +60,7 @@
#define PCIE_PORT_LINKUP BIT(0)
#define PCIE_PORT_CNT 3
+#define INIT_PORTS_DELAY_MS 100
#define PERST_DELAY_MS 100
/**
@@ -369,6 +370,7 @@ static int mt7621_pcie_init_ports(struct mt7621_pcie *pcie)
}
}
+ msleep(INIT_PORTS_DELAY_MS);
mt7621_pcie_reset_ep_deassert(pcie);
tmp = NULL;
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index 769eedeb8802..990630ec57c6 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -66,8 +66,23 @@ enum vmd_features {
* interrupt handling.
*/
VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4),
+
+ /*
+ * Enable ASPM on the PCIE root ports and set the default LTR of the
+ * storage devices on platforms where these values are not configured by
+ * BIOS. This is needed for laptops, which require these settings for
+ * proper power management of the SoC.
+ */
+ VMD_FEAT_BIOS_PM_QUIRK = (1 << 5),
};
+#define VMD_BIOS_PM_QUIRK_LTR 0x1003 /* 3145728 ns */
+
+#define VMD_FEATS_CLIENT (VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | \
+ VMD_FEAT_HAS_BUS_RESTRICTIONS | \
+ VMD_FEAT_OFFSET_FIRST_VECTOR | \
+ VMD_FEAT_BIOS_PM_QUIRK)
+
static DEFINE_IDA(vmd_instance_ida);
/*
@@ -709,6 +724,46 @@ static void vmd_copy_host_bridge_flags(struct pci_host_bridge *root_bridge,
vmd_bridge->native_dpc = root_bridge->native_dpc;
}
+/*
+ * Enable ASPM and LTR settings on devices that aren't configured by BIOS.
+ */
+static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
+{
+ unsigned long features = *(unsigned long *)userdata;
+ u16 ltr = VMD_BIOS_PM_QUIRK_LTR;
+ u32 ltr_reg;
+ int pos;
+
+ if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
+ return 0;
+
+ pci_enable_link_state(pdev, PCIE_LINK_STATE_ALL);
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
+ if (!pos)
+ return 0;
+
+ /*
+ * Skip if the max snoop LTR is non-zero, indicating BIOS has set it
+ * so the LTR quirk is not needed.
+ */
+ pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, &ltr_reg);
+ if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK)))
+ return 0;
+
+ /*
+ * Set the default values to the maximum required by the platform to
+ * allow the deepest power management savings. Write as a DWORD where
+ * the lower word is the max snoop latency and the upper word is the
+ * max non-snoop latency.
+ */
+ ltr_reg = (ltr << 16) | ltr;
+ pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg);
+ pci_info(pdev, "VMD: Default LTR value set by driver\n");
+
+ return 0;
+}
+
static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
{
struct pci_sysdata *sd = &vmd->sysdata;
@@ -881,6 +936,8 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
pci_assign_unassigned_bus_resources(vmd->bus);
+ pci_walk_bus(vmd->bus, vmd_pm_enable_quirk, &features);
+
/*
* VMD root buses are virtual and don't return true on pci_is_pcie()
* and will fail pcie_bus_configure_settings() early. It can instead be
@@ -1017,36 +1074,24 @@ static int vmd_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
static const struct pci_device_id vmd_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_CAN_BYPASS_MSI_REMAP,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa77f),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7d0b),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xad0b),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
- .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
- VMD_FEAT_HAS_BUS_RESTRICTIONS |
- VMD_FEAT_OFFSET_FIRST_VECTOR,},
+ {PCI_VDEVICE(INTEL, 0x467f),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0x4c3d),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xa77f),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0x7d0b),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, 0xad0b),
+ .driver_data = VMD_FEATS_CLIENT,},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
+ .driver_data = VMD_FEATS_CLIENT,},
{0,}
};
MODULE_DEVICE_TABLE(pci, vmd_ids);
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 55283d2379a6..0f9d2ec822ac 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -826,33 +826,21 @@ static int pci_epf_test_core_init(struct pci_epf *epf)
return 0;
}
-static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
+static int pci_epf_test_link_up(struct pci_epf *epf)
{
- struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
- int ret;
-
- switch (val) {
- case CORE_INIT:
- ret = pci_epf_test_core_init(epf);
- if (ret)
- return NOTIFY_BAD;
- break;
- case LINK_UP:
- queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
- msecs_to_jiffies(1));
- break;
-
- default:
- dev_err(&epf->dev, "Invalid EPF test notifier event\n");
- return NOTIFY_BAD;
- }
+ queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
+ msecs_to_jiffies(1));
- return NOTIFY_OK;
+ return 0;
}
+static const struct pci_epc_event_ops pci_epf_test_event_ops = {
+ .core_init = pci_epf_test_core_init,
+ .link_up = pci_epf_test_link_up,
+};
+
static int pci_epf_test_alloc_space(struct pci_epf *epf)
{
struct pci_epf_test *epf_test = epf_get_drvdata(epf);
@@ -979,12 +967,8 @@ static int pci_epf_test_bind(struct pci_epf *epf)
if (ret)
epf_test->dma_supported = false;
- if (linkup_notifier || core_init_notifier) {
- epf->nb.notifier_call = pci_epf_test_notifier;
- pci_epc_register_notifier(epc, &epf->nb);
- } else {
+ if (!linkup_notifier && !core_init_notifier)
queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
- }
return 0;
}
@@ -1010,6 +994,8 @@ static int pci_epf_test_probe(struct pci_epf *epf)
INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
+ epf->event_ops = &pci_epf_test_event_ops;
+
epf_set_drvdata(epf, epf_test);
return 0;
}
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 04698e7995a5..b7c7a8af99f4 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -652,6 +652,7 @@ err_alloc_mem:
/**
* epf_ntb_mw_bar_clear() - Clear Memory window BARs
* @ntb: NTB device that facilitates communication between HOST and VHOST
+ * @num_mws: the number of Memory window BARs that to be cleared
*/
static void epf_ntb_mw_bar_clear(struct epf_ntb *ntb, int num_mws)
{
diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c
index d4850bdd837f..4b8ac0ac84d5 100644
--- a/drivers/pci/endpoint/pci-ep-cfs.c
+++ b/drivers/pci/endpoint/pci-ep-cfs.c
@@ -728,4 +728,3 @@ module_exit(pci_ep_cfs_exit);
MODULE_DESCRIPTION("PCI EP CONFIGFS");
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 2542196e8c3d..9440d9811eea 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -613,7 +613,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
if (type == SECONDARY_INTERFACE && epf->sec_epc)
return -EBUSY;
- mutex_lock(&epc->lock);
+ mutex_lock(&epc->list_lock);
func_no = find_first_zero_bit(&epc->function_num_map,
BITS_PER_LONG);
if (func_no >= BITS_PER_LONG) {
@@ -640,7 +640,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
list_add_tail(list, &epc->pci_epf);
ret:
- mutex_unlock(&epc->lock);
+ mutex_unlock(&epc->list_lock);
return ret;
}
@@ -672,11 +672,11 @@ void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
list = &epf->sec_epc_list;
}
- mutex_lock(&epc->lock);
+ mutex_lock(&epc->list_lock);
clear_bit(func_no, &epc->function_num_map);
list_del(list);
epf->epc = NULL;
- mutex_unlock(&epc->lock);
+ mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
@@ -690,10 +690,19 @@ EXPORT_SYMBOL_GPL(pci_epc_remove_epf);
*/
void pci_epc_linkup(struct pci_epc *epc)
{
+ struct pci_epf *epf;
+
if (!epc || IS_ERR(epc))
return;
- atomic_notifier_call_chain(&epc->notifier, LINK_UP, NULL);
+ mutex_lock(&epc->list_lock);
+ list_for_each_entry(epf, &epc->pci_epf, list) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->link_up)
+ epf->event_ops->link_up(epf);
+ mutex_unlock(&epf->lock);
+ }
+ mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_linkup);
@@ -707,10 +716,19 @@ EXPORT_SYMBOL_GPL(pci_epc_linkup);
*/
void pci_epc_init_notify(struct pci_epc *epc)
{
+ struct pci_epf *epf;
+
if (!epc || IS_ERR(epc))
return;
- atomic_notifier_call_chain(&epc->notifier, CORE_INIT, NULL);
+ mutex_lock(&epc->list_lock);
+ list_for_each_entry(epf, &epc->pci_epf, list) {
+ mutex_lock(&epf->lock);
+ if (epf->event_ops && epf->event_ops->core_init)
+ epf->event_ops->core_init(epf);
+ mutex_unlock(&epf->lock);
+ }
+ mutex_unlock(&epc->list_lock);
}
EXPORT_SYMBOL_GPL(pci_epc_init_notify);
@@ -777,8 +795,8 @@ __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
}
mutex_init(&epc->lock);
+ mutex_init(&epc->list_lock);
INIT_LIST_HEAD(&epc->pci_epf);
- ATOMIC_INIT_NOTIFIER_HEAD(&epc->notifier);
device_initialize(&epc->dev);
epc->dev.class = pci_epc_class;
@@ -861,4 +879,3 @@ module_exit(pci_epc_exit);
MODULE_DESCRIPTION("PCI EPC Library");
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c
index a97b56a6d2db..7dcf6f480b82 100644
--- a/drivers/pci/endpoint/pci-epc-mem.c
+++ b/drivers/pci/endpoint/pci-epc-mem.c
@@ -260,4 +260,3 @@ EXPORT_SYMBOL_GPL(pci_epc_mem_free_addr);
MODULE_DESCRIPTION("PCI EPC Address Space Management");
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 9ed556936f48..2036e38be093 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -568,4 +568,3 @@ module_exit(pci_epf_exit);
MODULE_DESCRIPTION("PCI EPF Library");
MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 853e04ad272c..c02257f4b61c 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -45,7 +45,6 @@ static struct acpiphp_attention_info *attention_info;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
MODULE_PARM_DESC(disable, "disable acpiphp driver");
module_param_named(disable, acpiphp_disabled, bool, 0444);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 10e9670eea0b..f8c70115b691 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -1088,6 +1088,8 @@ static void quirk_cmd_compl(struct pci_dev *pdev)
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x010e,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0110,
PCI_CLASS_BRIDGE_PCI, 8, quirk_cmd_compl);
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_QCOM, 0x0400,
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 53692b048301..56c7795ed890 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -32,7 +32,6 @@ int shpchp_poll_time;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
module_param(shpchp_debug, bool, 0644);
module_param(shpchp_poll_mode, bool, 0644);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 952217572113..b2e8322755c1 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include "pci.h"
-#define VIRTFN_ID_LEN 16
+#define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */
int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id)
{
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 86812d2073ea..9e8205572830 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -194,11 +194,13 @@ static const struct attribute_group p2pmem_group = {
static void p2pdma_page_free(struct page *page)
{
struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap);
+ /* safe to dereference while a reference is held to the percpu ref */
+ struct pci_p2pdma *p2pdma =
+ rcu_dereference_protected(pgmap->provider->p2pdma, 1);
struct percpu_ref *ref;
- gen_pool_free_owner(pgmap->provider->p2pdma->pool,
- (uintptr_t)page_to_virt(page), PAGE_SIZE,
- (void **)&ref);
+ gen_pool_free_owner(p2pdma->pool, (uintptr_t)page_to_virt(page),
+ PAGE_SIZE, (void **)&ref);
percpu_ref_put(ref);
}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 068d6745bf98..052a611081ec 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -976,24 +976,41 @@ bool acpi_pci_power_manageable(struct pci_dev *dev)
bool acpi_pci_bridge_d3(struct pci_dev *dev)
{
struct pci_dev *rpdev;
- struct acpi_device *adev;
- acpi_status status;
- unsigned long long state;
+ struct acpi_device *adev, *rpadev;
const union acpi_object *obj;
if (acpi_pci_disabled || !dev->is_hotplug_bridge)
return false;
- /* Assume D3 support if the bridge is power-manageable by ACPI. */
- if (acpi_pci_power_manageable(dev))
- return true;
+ adev = ACPI_COMPANION(&dev->dev);
+ if (adev) {
+ /*
+ * If the bridge has _S0W, whether or not it can go into D3
+ * depends on what is returned by that object. In particular,
+ * if the power state returned by _S0W is D2 or shallower,
+ * entering D3 should not be allowed.
+ */
+ if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2)
+ return false;
+
+ /*
+ * Otherwise, assume that the bridge can enter D3 so long as it
+ * is power-manageable via ACPI.
+ */
+ if (acpi_device_power_manageable(adev))
+ return true;
+ }
rpdev = pcie_find_root_port(dev);
if (!rpdev)
return false;
- adev = ACPI_COMPANION(&rpdev->dev);
- if (!adev)
+ if (rpdev == dev)
+ rpadev = adev;
+ else
+ rpadev = ACPI_COMPANION(&rpdev->dev);
+
+ if (!rpadev)
return false;
/*
@@ -1001,15 +1018,15 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
* doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug
* events from low-power states including D3hot and D3cold.
*/
- if (!adev->wakeup.flags.valid)
+ if (!rpadev->wakeup.flags.valid)
return false;
/*
- * If the Root Port cannot wake itself from D3hot or D3cold, we
- * can't use D3.
+ * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port
+ * to verify whether or not it can signal wakeup from D3.
*/
- status = acpi_evaluate_integer(adev->handle, "_S0W", NULL, &state);
- if (ACPI_SUCCESS(status) && state < ACPI_STATE_D3_HOT)
+ if (rpadev != adev &&
+ acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2)
return false;
/*
@@ -1018,7 +1035,7 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev)
* bridges *below* that Root Port can also signal hotplug events
* while in D3.
*/
- if (!acpi_dev_get_property(adev, "HotPlugSupportInD3",
+ if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3",
ACPI_TYPE_INTEGER, &obj) &&
obj->integer.value == 1)
return true;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index a2ceeacc33eb..57ddcc59af30 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -572,7 +572,7 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev)
{
- pci_bridge_wait_for_secondary_bus(pci_dev);
+ pci_bridge_wait_for_secondary_bus(pci_dev, "resume", PCI_RESET_WAIT);
/*
* When powering on a bridge from D3cold, the whole hierarchy may be
* powered on into D0uninitialized state, resume them to give them a
@@ -1545,9 +1545,9 @@ void pci_dev_put(struct pci_dev *dev)
}
EXPORT_SYMBOL(pci_dev_put);
-static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int pci_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct pci_dev *pdev;
+ const struct pci_dev *pdev;
if (!dev)
return -ENODEV;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index fba95486caaf..7a67611dc5f4 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -167,9 +167,6 @@ static int __init pcie_port_pm_setup(char *str)
}
__setup("pcie_port_pm=", pcie_port_pm_setup);
-/* Time to wait after a reset for device to become responsive */
-#define PCIE_RESET_READY_POLL_MS 60000
-
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -1174,7 +1171,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
return -ENOTTY;
}
- if (delay > 1000)
+ if (delay > PCI_RESET_WAIT)
pci_info(dev, "not ready %dms after %s; waiting\n",
delay - 1, reset_type);
@@ -1183,7 +1180,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
pci_read_config_dword(dev, PCI_COMMAND, &id);
}
- if (delay > 1000)
+ if (delay > PCI_RESET_WAIT)
pci_info(dev, "ready %dms after %s\n", delay - 1,
reset_type);
@@ -1665,7 +1662,6 @@ int pci_save_state(struct pci_dev *dev)
return i;
pci_save_ltr_state(dev);
- pci_save_aspm_l1ss_state(dev);
pci_save_dpc_state(dev);
pci_save_aer_state(dev);
pci_save_ptm_state(dev);
@@ -1772,7 +1768,6 @@ void pci_restore_state(struct pci_dev *dev)
* LTR itself (in the PCIe capability).
*/
pci_restore_ltr_state(dev);
- pci_restore_aspm_l1ss_state(dev);
pci_restore_pcie_state(dev);
pci_restore_pasid_state(dev);
@@ -3465,11 +3460,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev)
if (error)
pci_err(dev, "unable to allocate suspend buffer for LTR\n");
- error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS,
- 2 * sizeof(u32));
- if (error)
- pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n");
-
pci_allocate_vc_save_buffers(dev);
}
@@ -4948,24 +4938,31 @@ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
/**
* pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
* @dev: PCI bridge
+ * @reset_type: reset type in human-readable form
+ * @timeout: maximum time to wait for devices on secondary bus (milliseconds)
*
* Handle necessary delays before access to the devices on the secondary
- * side of the bridge are permitted after D3cold to D0 transition.
+ * side of the bridge are permitted after D3cold to D0 transition
+ * or Conventional Reset.
*
* For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
* conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
* 4.3.2.
+ *
+ * Return 0 on success or -ENOTTY if the first device on the secondary bus
+ * failed to become accessible.
*/
-void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
+int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
+ int timeout)
{
struct pci_dev *child;
int delay;
if (pci_dev_is_disconnected(dev))
- return;
+ return 0;
- if (!pci_is_bridge(dev) || !dev->bridge_d3)
- return;
+ if (!pci_is_bridge(dev))
+ return 0;
down_read(&pci_bus_sem);
@@ -4977,14 +4974,14 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
*/
if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
up_read(&pci_bus_sem);
- return;
+ return 0;
}
/* Take d3cold_delay requirements into account */
delay = pci_bus_max_d3cold_delay(dev->subordinate);
if (!delay) {
up_read(&pci_bus_sem);
- return;
+ return 0;
}
child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
@@ -4993,14 +4990,12 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
/*
* Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
- * accessing the device after reset (that is 1000 ms + 100 ms). In
- * practice this should not be needed because we don't do power
- * management for them (see pci_bridge_d3_possible()).
+ * accessing the device after reset (that is 1000 ms + 100 ms).
*/
if (!pci_is_pcie(dev)) {
pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
msleep(1000 + delay);
- return;
+ return 0;
}
/*
@@ -5017,11 +5012,11 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
* configuration requests if we only wait for 100 ms (see
* https://bugzilla.kernel.org/show_bug.cgi?id=203885).
*
- * Therefore we wait for 100 ms and check for the device presence.
- * If it is still not present give it an additional 100 ms.
+ * Therefore we wait for 100 ms and check for the device presence
+ * until the timeout expires.
*/
if (!pcie_downstream_port(dev))
- return;
+ return 0;
if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
@@ -5032,14 +5027,11 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
- return;
+ return -ENOTTY;
}
}
- if (!pci_device_is_present(child)) {
- pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
- msleep(delay);
- }
+ return pci_dev_wait(child, reset_type, timeout - delay);
}
void pci_reset_secondary_bus(struct pci_dev *dev)
@@ -5058,15 +5050,6 @@ void pci_reset_secondary_bus(struct pci_dev *dev)
ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
-
- /*
- * Trhfa for conventional PCI is 2^25 clock cycles.
- * Assuming a minimum 33MHz clock this results in a 1s
- * delay before we can consider subordinate devices to
- * be re-initialized. PCIe has some ways to shorten this,
- * but we don't make use of them yet.
- */
- ssleep(1);
}
void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
@@ -5085,7 +5068,8 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
{
pcibios_reset_secondary_bus(dev);
- return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
+ return pci_bridge_wait_for_secondary_bus(dev, "bus reset",
+ PCIE_RESET_READY_POLL_MS);
}
EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
@@ -6033,6 +6017,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
{
u16 v;
int ret;
+ struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
return -EINVAL;
@@ -6051,6 +6036,15 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
v = (ffs(rq) - 8) << 12;
+ if (bridge->no_inc_mrrs) {
+ int max_mrrs = pcie_get_readrq(dev);
+
+ if (rq > max_mrrs) {
+ pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs);
+ return -EINVAL;
+ }
+ }
+
ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_READRQ, v);
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 9ed3b5550043..d2c08670a20e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -64,6 +64,19 @@ struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
#define PCI_PM_D3HOT_WAIT 10 /* msec */
#define PCI_PM_D3COLD_WAIT 100 /* msec */
+/*
+ * Following exit from Conventional Reset, devices must be ready within 1 sec
+ * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional
+ * Reset (PCIe r6.0 sec 5.8).
+ */
+#define PCI_RESET_WAIT 1000 /* msec */
+/*
+ * Devices may extend the 1 sec period through Request Retry Status completions
+ * (PCIe r6.0 sec 2.3.1). The spec does not provide an upper limit, but 60 sec
+ * ought to be enough for any device to become responsive.
+ */
+#define PCIE_RESET_READY_POLL_MS 60000 /* msec */
+
void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
void pci_refresh_power_state(struct pci_dev *dev);
int pci_power_up(struct pci_dev *dev);
@@ -86,8 +99,9 @@ void pci_msi_init(struct pci_dev *dev);
void pci_msix_init(struct pci_dev *dev);
bool pci_bridge_d3_possible(struct pci_dev *dev);
void pci_bridge_d3_update(struct pci_dev *dev);
-void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev);
void pci_bridge_reconfigure_ltr(struct pci_dev *dev);
+int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type,
+ int timeout);
static inline void pci_wakeup_event(struct pci_dev *dev)
{
@@ -310,53 +324,36 @@ struct pci_sriov {
* @dev: PCI device to set new error_state
* @new: the state we want dev to be in
*
- * Must be called with device_lock held.
+ * If the device is experiencing perm_failure, it has to remain in that state.
+ * Any other transition is allowed.
*
* Returns true if state has been changed to the requested state.
*/
static inline bool pci_dev_set_io_state(struct pci_dev *dev,
pci_channel_state_t new)
{
- bool changed = false;
+ pci_channel_state_t old;
- device_lock_assert(&dev->dev);
switch (new) {
case pci_channel_io_perm_failure:
- switch (dev->error_state) {
- case pci_channel_io_frozen:
- case pci_channel_io_normal:
- case pci_channel_io_perm_failure:
- changed = true;
- break;
- }
- break;
+ xchg(&dev->error_state, pci_channel_io_perm_failure);
+ return true;
case pci_channel_io_frozen:
- switch (dev->error_state) {
- case pci_channel_io_frozen:
- case pci_channel_io_normal:
- changed = true;
- break;
- }
- break;
+ old = cmpxchg(&dev->error_state, pci_channel_io_normal,
+ pci_channel_io_frozen);
+ return old != pci_channel_io_perm_failure;
case pci_channel_io_normal:
- switch (dev->error_state) {
- case pci_channel_io_frozen:
- case pci_channel_io_normal:
- changed = true;
- break;
- }
- break;
+ old = cmpxchg(&dev->error_state, pci_channel_io_frozen,
+ pci_channel_io_normal);
+ return old != pci_channel_io_perm_failure;
+ default:
+ return false;
}
- if (changed)
- dev->error_state = new;
- return changed;
}
static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
{
- device_lock(&dev->dev);
pci_dev_set_io_state(dev, pci_channel_io_perm_failure);
- device_unlock(&dev->dev);
return 0;
}
@@ -566,14 +563,10 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
void pcie_aspm_init_link_state(struct pci_dev *pdev);
void pcie_aspm_exit_link_state(struct pci_dev *pdev);
void pcie_aspm_powersave_config_link(struct pci_dev *pdev);
-void pci_save_aspm_l1ss_state(struct pci_dev *dev);
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev);
#else
static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { }
static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { }
-static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { }
-static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { }
#endif
#ifdef CONFIG_PCIE_ECRC
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 625f7b2cafe4..f6c24ded134c 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -184,6 +184,9 @@ static int disable_ecrc_checking(struct pci_dev *dev)
*/
void pcie_set_ecrc_checking(struct pci_dev *dev)
{
+ if (!pcie_aer_is_native(dev))
+ return;
+
switch (ecrc_policy) {
case ECRC_POLICY_DEFAULT:
return;
@@ -1224,42 +1227,6 @@ static irqreturn_t aer_irq(int irq, void *context)
return IRQ_WAKE_THREAD;
}
-static int set_device_error_reporting(struct pci_dev *dev, void *data)
-{
- bool enable = *((bool *)data);
- int type = pci_pcie_type(dev);
-
- if ((type == PCI_EXP_TYPE_ROOT_PORT) ||
- (type == PCI_EXP_TYPE_RC_EC) ||
- (type == PCI_EXP_TYPE_UPSTREAM) ||
- (type == PCI_EXP_TYPE_DOWNSTREAM)) {
- if (enable)
- pci_enable_pcie_error_reporting(dev);
- else
- pci_disable_pcie_error_reporting(dev);
- }
-
- return 0;
-}
-
-/**
- * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
- * @dev: pointer to root port's pci_dev data structure
- * @enable: true = enable error reporting, false = disable error reporting.
- */
-static void set_downstream_devices_error_reporting(struct pci_dev *dev,
- bool enable)
-{
- set_device_error_reporting(dev, &enable);
-
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC)
- pcie_walk_rcec(dev, set_device_error_reporting, &enable);
- else if (dev->subordinate)
- pci_walk_bus(dev->subordinate, set_device_error_reporting,
- &enable);
-
-}
-
/**
* aer_enable_rootport - enable Root Port's interrupts when receiving messages
* @rpc: pointer to a Root Port data structure
@@ -1289,12 +1256,6 @@ static void aer_enable_rootport(struct aer_rpc *rpc)
pci_read_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, &reg32);
pci_write_config_dword(pdev, aer + PCI_ERR_UNCOR_STATUS, reg32);
- /*
- * Enable error reporting for the root port device and downstream port
- * devices.
- */
- set_downstream_devices_error_reporting(pdev, true);
-
/* Enable Root Port's interrupt in response to error messages */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 |= ROOT_PORT_INTR_ON_MESG_MASK;
@@ -1313,12 +1274,6 @@ static void aer_disable_rootport(struct aer_rpc *rpc)
int aer = pdev->aer_cap;
u32 reg32;
- /*
- * Disable error reporting for the root port device and downstream port
- * devices.
- */
- set_downstream_devices_error_reporting(pdev, false);
-
/* Disable Root's interrupt in response to error messages */
pci_read_config_dword(pdev, aer + PCI_ERR_ROOT_COMMAND, &reg32);
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 53a1fa306e1e..66d7514ca111 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -470,31 +470,6 @@ static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
pci_write_config_dword(pdev, pos, val);
}
-static void aspm_program_l1ss(struct pci_dev *dev, u32 ctl1, u32 ctl2)
-{
- u16 l1ss = dev->l1ss;
- u32 l1_2_enable;
-
- /*
- * Per PCIe r6.0, sec 5.5.4, T_POWER_ON in PCI_L1SS_CTL2 must be
- * programmed prior to setting the L1.2 enable bits in PCI_L1SS_CTL1.
- */
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL2, ctl2);
-
- /*
- * In addition, Common_Mode_Restore_Time and LTR_L1.2_THRESHOLD in
- * PCI_L1SS_CTL1 must be programmed *before* setting the L1.2
- * enable bits, even though they're all in PCI_L1SS_CTL1.
- */
- l1_2_enable = ctl1 & PCI_L1SS_CTL1_L1_2_MASK;
- ctl1 &= ~PCI_L1SS_CTL1_L1_2_MASK;
-
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1, ctl1);
- if (l1_2_enable)
- pci_write_config_dword(dev, l1ss + PCI_L1SS_CTL1,
- ctl1 | l1_2_enable);
-}
-
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 parent_l1ss_cap, u32 child_l1ss_cap)
@@ -504,6 +479,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
u32 ctl1 = 0, ctl2 = 0;
u32 pctl1, pctl2, cctl1, cctl2;
+ u32 pl1_2_enables, cl1_2_enables;
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
@@ -552,21 +528,39 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
ctl2 == pctl2 && ctl2 == cctl2)
return;
- pctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
- pctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
- aspm_program_l1ss(parent, pctl1, ctl2);
-
- cctl1 &= ~(PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE);
- cctl1 |= (ctl1 & (PCI_L1SS_CTL1_CM_RESTORE_TIME |
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE));
- aspm_program_l1ss(child, cctl1, ctl2);
+ /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
+ pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ }
+
+ /* Program T_POWER_ON times in both ports */
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
+ pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
+
+ /* Program Common_Mode_Restore_Time in upstream device */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+
+ /* Program LTR_L1.2_THRESHOLD time in both ports */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
+ pl1_2_enables);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
+ cl1_2_enables);
+ }
}
static void aspm_l1ss_init(struct pcie_link_state *link)
@@ -757,43 +751,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
PCI_L1SS_CTL1_L1SS_MASK, val);
}
-void pci_save_aspm_l1ss_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- u16 l1ss = dev->l1ss;
- u32 *cap;
-
- if (!l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL2, cap++);
- pci_read_config_dword(dev, l1ss + PCI_L1SS_CTL1, cap++);
-}
-
-void pci_restore_aspm_l1ss_state(struct pci_dev *dev)
-{
- struct pci_cap_saved_state *save_state;
- u32 *cap, ctl1, ctl2;
- u16 l1ss = dev->l1ss;
-
- if (!l1ss)
- return;
-
- save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS);
- if (!save_state)
- return;
-
- cap = (u32 *)&save_state->cap.data[0];
- ctl2 = *cap++;
- ctl1 = *cap;
- aspm_program_l1ss(dev, ctl1, ctl2);
-}
-
static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL,
@@ -1181,6 +1138,60 @@ int pci_disable_link_state(struct pci_dev *pdev, int state)
}
EXPORT_SYMBOL(pci_disable_link_state);
+/**
+ * pci_enable_link_state - Clear and set the default device link state so that
+ * the link may be allowed to enter the specified states. Note that if the
+ * BIOS didn't grant ASPM control to the OS, this does nothing because we can't
+ * touch the LNKCTL register. Also note that this does not enable states
+ * disabled by pci_disable_link_state(). Return 0 or a negative errno.
+ *
+ * @pdev: PCI device
+ * @state: Mask of ASPM link states to enable
+ */
+int pci_enable_link_state(struct pci_dev *pdev, int state)
+{
+ struct pcie_link_state *link = pcie_aspm_get_link(pdev);
+
+ if (!link)
+ return -EINVAL;
+ /*
+ * A driver requested that ASPM be enabled on this device, but
+ * if we don't have permission to manage ASPM (e.g., on ACPI
+ * systems we have to observe the FADT ACPI_FADT_NO_ASPM bit and
+ * the _OSC method), we can't honor that request.
+ */
+ if (aspm_disabled) {
+ pci_warn(pdev, "can't override BIOS ASPM; OS doesn't have ASPM control\n");
+ return -EPERM;
+ }
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ link->aspm_default = 0;
+ if (state & PCIE_LINK_STATE_L0S)
+ link->aspm_default |= ASPM_STATE_L0S;
+ if (state & PCIE_LINK_STATE_L1)
+ /* L1 PM substates require L1 */
+ link->aspm_default |= ASPM_STATE_L1 | ASPM_STATE_L1SS;
+ if (state & PCIE_LINK_STATE_L1_1)
+ link->aspm_default |= ASPM_STATE_L1_1;
+ if (state & PCIE_LINK_STATE_L1_2)
+ link->aspm_default |= ASPM_STATE_L1_2;
+ if (state & PCIE_LINK_STATE_L1_1_PCIPM)
+ link->aspm_default |= ASPM_STATE_L1_1_PCIPM;
+ if (state & PCIE_LINK_STATE_L1_2_PCIPM)
+ link->aspm_default |= ASPM_STATE_L1_2_PCIPM;
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+
+ link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0;
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+
+ return 0;
+}
+EXPORT_SYMBOL(pci_enable_link_state);
+
static int pcie_aspm_set_policy(const char *val,
const struct kernel_param *kp)
{
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index f5ffea17c7f8..a5d7c69b764e 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -170,8 +170,8 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
- if (!pcie_wait_for_link(pdev, true)) {
- pci_info(pdev, "Data Link Layer Link Active not set in 1000 msec\n");
+ if (pci_bridge_wait_for_secondary_bus(pdev, "DPC",
+ PCIE_RESET_READY_POLL_MS)) {
clear_bit(PCI_DPC_RECOVERED, &pdev->priv_flags);
ret = PCI_ERS_RESULT_DISCONNECT;
} else {
diff --git a/drivers/pci/pcie/portdrv.c b/drivers/pci/pcie/portdrv.c
index 2cc2e60bcb39..46fad0d813b2 100644
--- a/drivers/pci/pcie/portdrv.c
+++ b/drivers/pci/pcie/portdrv.c
@@ -501,7 +501,6 @@ static void pcie_port_device_remove(struct pci_dev *dev)
{
device_for_each_child(&dev->dev, NULL, remove_iter);
pci_free_irq_vectors(dev);
- pci_disable_device(dev);
}
/**
@@ -727,6 +726,19 @@ static void pcie_portdrv_remove(struct pci_dev *dev)
}
pcie_port_device_remove(dev);
+
+ pci_disable_device(dev);
+}
+
+static void pcie_portdrv_shutdown(struct pci_dev *dev)
+{
+ if (pci_bridge_d3_possible(dev)) {
+ pm_runtime_forbid(&dev->dev);
+ pm_runtime_get_noresume(&dev->dev);
+ pm_runtime_dont_use_autosuspend(&dev->dev);
+ }
+
+ pcie_port_device_remove(dev);
}
static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
@@ -777,7 +789,7 @@ static struct pci_driver pcie_portdriver = {
.probe = pcie_portdrv_probe,
.remove = pcie_portdrv_remove,
- .shutdown = pcie_portdrv_remove,
+ .shutdown = pcie_portdrv_shutdown,
.err_handler = &pcie_portdrv_err_handler,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index f796dfb9b14b..a3f68b6ba6ac 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -997,7 +997,7 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
resource_list_for_each_entry_safe(window, n, &resources) {
offset = window->offset;
res = window->res;
- if (!res->end)
+ if (!res->flags && !res->start && !res->end)
continue;
list_move_tail(&window->node, &bridge->windows);
@@ -1842,6 +1842,8 @@ int pci_setup_device(struct pci_dev *dev)
pci_set_of_node(dev);
pci_set_acpi_fwnode(dev);
+ if (dev->dev.fwnode && !fwnode_device_is_available(dev->dev.fwnode))
+ return -ENODEV;
pci_dev_assign_slot(dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 285acc4aaccc..494fa46f5767 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4835,6 +4835,26 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
+/*
+ * Wangxun 10G/1G NICs have no ACS capability, and on multi-function
+ * devices, peer-to-peer transactions are not be used between the functions.
+ * So add an ACS quirk for below devices to isolate functions.
+ * SFxxx 1G NICs(em).
+ * RP1000/RP2000 10G NICs(sp).
+ */
+static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ switch (dev->device) {
+ case 0x0100 ... 0x010F:
+ case 0x1001:
+ case 0x2001:
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+
+ return false;
+}
+
static const struct pci_dev_acs_enabled {
u16 vendor;
u16 device;
@@ -4980,6 +5000,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs },
/* Zhaoxin Root/Downstream Ports */
{ PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
+ /* Wangxun nics */
+ { PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs },
{ 0 }
};
@@ -5340,6 +5362,7 @@ static void quirk_no_flr(struct pci_dev *dev)
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index b4096598dbcb..c690572b10ce 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1765,12 +1765,70 @@ static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
add_size = size - new_size;
pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res,
&add_size);
+ } else {
+ return;
}
res->end = res->start + new_size - 1;
- remove_from_list(add_list, res);
+
+ /* If the resource is part of the add_list, remove it now */
+ if (add_list)
+ remove_from_list(add_list, res);
+}
+
+static void remove_dev_resource(struct resource *avail, struct pci_dev *dev,
+ struct resource *res)
+{
+ resource_size_t size, align, tmp;
+
+ size = resource_size(res);
+ if (!size)
+ return;
+
+ align = pci_resource_alignment(dev, res);
+ align = align ? ALIGN(avail->start, align) - avail->start : 0;
+ tmp = align + size;
+ avail->start = min(avail->start + tmp, avail->end + 1);
+}
+
+static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
+ struct resource *mmio,
+ struct resource *mmio_pref)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *res = &dev->resource[i];
+
+ if (resource_type(res) == IORESOURCE_IO) {
+ remove_dev_resource(io, dev, res);
+ } else if (resource_type(res) == IORESOURCE_MEM) {
+
+ /*
+ * Make sure prefetchable memory is reduced from
+ * the correct resource. Specifically we put 32-bit
+ * prefetchable memory in non-prefetchable window
+ * if there is an 64-bit pretchable window.
+ *
+ * See comments in __pci_bus_size_bridges() for
+ * more information.
+ */
+ if ((res->flags & IORESOURCE_PREFETCH) &&
+ ((res->flags & IORESOURCE_MEM_64) ==
+ (mmio_pref->flags & IORESOURCE_MEM_64)))
+ remove_dev_resource(mmio_pref, dev, res);
+ else
+ remove_dev_resource(mmio, dev, res);
+ }
+ }
}
+/*
+ * io, mmio and mmio_pref contain the total amount of bridge window space
+ * available. This includes the minimal space needed to cover all the
+ * existing devices on the bus and the possible extra space that can be
+ * shared with the bridges.
+ */
static void pci_bus_distribute_available_resources(struct pci_bus *bus,
struct list_head *add_list,
struct resource io,
@@ -1780,7 +1838,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
unsigned int normal_bridges = 0, hotplug_bridges = 0;
struct resource *io_res, *mmio_res, *mmio_pref_res;
struct pci_dev *dev, *bridge = bus->self;
- resource_size_t io_per_hp, mmio_per_hp, mmio_pref_per_hp, align;
+ resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align;
io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
@@ -1824,94 +1882,88 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus,
normal_bridges++;
}
+ if (!(hotplug_bridges + normal_bridges))
+ return;
+
/*
- * There is only one bridge on the bus so it gets all available
- * resources which it can then distribute to the possible hotplug
- * bridges below.
+ * Calculate the amount of space we can forward from "bus" to any
+ * downstream buses, i.e., the space left over after assigning the
+ * BARs and windows on "bus".
*/
- if (hotplug_bridges + normal_bridges == 1) {
- dev = list_first_entry(&bus->devices, struct pci_dev, bus_list);
- if (dev->subordinate)
- pci_bus_distribute_available_resources(dev->subordinate,
- add_list, io, mmio, mmio_pref);
- return;
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (!dev->is_virtfn)
+ remove_dev_resources(dev, &io, &mmio, &mmio_pref);
}
- if (hotplug_bridges == 0)
- return;
-
/*
- * Calculate the total amount of extra resource space we can
- * pass to bridges below this one. This is basically the
- * extra space reduced by the minimal required space for the
- * non-hotplug bridges.
+ * If there is at least one hotplug bridge on this bus it gets all
+ * the extra resource space that was left after the reductions
+ * above.
+ *
+ * If there are no hotplug bridges the extra resource space is
+ * split between non-hotplug bridges. This is to allow possible
+ * hotplug bridges below them to get the extra space as well.
*/
+ if (hotplug_bridges) {
+ io_per_b = div64_ul(resource_size(&io), hotplug_bridges);
+ mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges);
+ mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
+ hotplug_bridges);
+ } else {
+ io_per_b = div64_ul(resource_size(&io), normal_bridges);
+ mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges);
+ mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
+ normal_bridges);
+ }
+
for_each_pci_bridge(dev, bus) {
- resource_size_t used_size;
struct resource *res;
+ struct pci_bus *b;
- if (dev->is_hotplug_bridge)
+ b = dev->subordinate;
+ if (!b)
+ continue;
+ if (hotplug_bridges && !dev->is_hotplug_bridge)
continue;
+ res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
+
/*
- * Reduce the available resource space by what the
- * bridge and devices below it occupy.
+ * Make sure the split resource space is properly aligned
+ * for bridge windows (align it down to avoid going above
+ * what is available).
*/
- res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
align = pci_resource_alignment(dev, res);
- align = align ? ALIGN(io.start, align) - io.start : 0;
- used_size = align + resource_size(res);
- if (!res->parent)
- io.start = min(io.start + used_size, io.end + 1);
+ io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1
+ : io.start + io_per_b - 1;
+
+ /*
+ * The x_per_b holds the extra resource space that can be
+ * added for each bridge but there is the minimal already
+ * reserved as well so adjust x.start down accordingly to
+ * cover the whole space.
+ */
+ io.start -= resource_size(res);
res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
- align = align ? ALIGN(mmio.start, align) - mmio.start : 0;
- used_size = align + resource_size(res);
- if (!res->parent)
- mmio.start = min(mmio.start + used_size, mmio.end + 1);
+ mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1
+ : mmio.start + mmio_per_b - 1;
+ mmio.start -= resource_size(res);
res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
align = pci_resource_alignment(dev, res);
- align = align ? ALIGN(mmio_pref.start, align) -
- mmio_pref.start : 0;
- used_size = align + resource_size(res);
- if (!res->parent)
- mmio_pref.start = min(mmio_pref.start + used_size,
- mmio_pref.end + 1);
- }
-
- io_per_hp = div64_ul(resource_size(&io), hotplug_bridges);
- mmio_per_hp = div64_ul(resource_size(&mmio), hotplug_bridges);
- mmio_pref_per_hp = div64_ul(resource_size(&mmio_pref),
- hotplug_bridges);
-
- /*
- * Go over devices on this bus and distribute the remaining
- * resource space between hotplug bridges.
- */
- for_each_pci_bridge(dev, bus) {
- struct pci_bus *b;
-
- b = dev->subordinate;
- if (!b || !dev->is_hotplug_bridge)
- continue;
-
- /*
- * Distribute available extra resources equally between
- * hotplug-capable downstream ports taking alignment into
- * account.
- */
- io.end = io.start + io_per_hp - 1;
- mmio.end = mmio.start + mmio_per_hp - 1;
- mmio_pref.end = mmio_pref.start + mmio_pref_per_hp - 1;
+ mmio_pref.end = align ? mmio_pref.start +
+ ALIGN_DOWN(mmio_pref_per_b, align) - 1
+ : mmio_pref.start + mmio_pref_per_b - 1;
+ mmio_pref.start -= resource_size(res);
pci_bus_distribute_available_resources(b, add_list, io, mmio,
mmio_pref);
- io.start += io_per_hp;
- mmio.start += mmio_per_hp;
- mmio_pref.start += mmio_pref_per_hp;
+ io.start += io.end + 1;
+ mmio.start += mmio.end + 1;
+ mmio_pref.start += mmio_pref.end + 1;
}
}
@@ -1923,6 +1975,8 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
if (!bridge->is_hotplug_bridge)
return;
+ pci_dbg(bridge, "distributing available resources\n");
+
/* Take the initial extra resources from the hotplug port */
available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
@@ -1934,6 +1988,54 @@ static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
available_mmio_pref);
}
+static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
+{
+ const struct resource *r;
+
+ /*
+ * If the child device's resources are not yet assigned it means we
+ * are configuring them (not the boot firmware), so we should be
+ * able to extend the upstream bridge resources in the same way we
+ * do with the normal hotplug case.
+ */
+ r = &dev->resource[PCI_BRIDGE_IO_WINDOW];
+ if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
+ return false;
+ r = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
+ if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
+ return false;
+ r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
+ if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
+ return false;
+
+ return true;
+}
+
+static void
+pci_root_bus_distribute_available_resources(struct pci_bus *bus,
+ struct list_head *add_list)
+{
+ struct pci_dev *dev, *bridge = bus->self;
+
+ for_each_pci_bridge(dev, bus) {
+ struct pci_bus *b;
+
+ b = dev->subordinate;
+ if (!b)
+ continue;
+
+ /*
+ * Need to check "bridge" here too because it is NULL
+ * in case of root bus.
+ */
+ if (bridge && pci_bridge_resources_not_assigned(dev))
+ pci_bridge_distribute_available_resources(bridge,
+ add_list);
+ else
+ pci_root_bus_distribute_available_resources(b, add_list);
+ }
+}
+
/*
* First try will not touch PCI bridge res.
* Second and later try will clear small leaf bridge res.
@@ -1973,6 +2075,8 @@ again:
*/
__pci_bus_size_bridges(bus, add_list);
+ pci_root_bus_distribute_available_resources(bus, add_list);
+
/* Depth last, allocate resources and update the hardware. */
__pci_bus_assign_resources(bus, add_list, &fail_head);
if (add_list)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index a0c67191a8b9..0f87cade10f7 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -98,7 +98,7 @@ static struct attribute *pci_slot_default_attrs[] = {
};
ATTRIBUTE_GROUPS(pci_slot_default);
-static struct kobj_type pci_slot_ktype = {
+static const struct kobj_type pci_slot_ktype = {
.sysfs_ops = &pci_slot_sysfs_ops,
.release = &pci_slot_release,
.default_groups = pci_slot_default_groups,
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 75be4fe22509..3d6f17ff2429 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -606,21 +606,20 @@ static ssize_t switchtec_dev_read(struct file *filp, char __user *data,
rc = copy_to_user(data, &stuser->return_code,
sizeof(stuser->return_code));
if (rc) {
- rc = -EFAULT;
- goto out;
+ mutex_unlock(&stdev->mrpc_mutex);
+ return -EFAULT;
}
data += sizeof(stuser->return_code);
rc = copy_to_user(data, &stuser->data,
size - sizeof(stuser->return_code));
if (rc) {
- rc = -EFAULT;
- goto out;
+ mutex_unlock(&stdev->mrpc_mutex);
+ return -EFAULT;
}
stuser_set_state(stuser, MRPC_IDLE);
-out:
mutex_unlock(&stdev->mrpc_mutex);
if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE ||
@@ -1480,15 +1479,13 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev)
static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev)
{
struct switchtec_dev *stdev = dev;
- irqreturn_t ret = IRQ_NONE;
iowrite32(SWITCHTEC_EVENT_CLEAR |
SWITCHTEC_EVENT_EN_IRQ,
&stdev->mmio_part_cfg->mrpc_comp_hdr);
schedule_work(&stdev->mrpc_work);
- ret = IRQ_HANDLED;
- return ret;
+ return IRQ_HANDLED;
}
static int switchtec_init_isr(struct switchtec_dev *stdev)
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 1525023e49b6..44c16508ef14 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -194,10 +194,8 @@ config PCMCIA_SA1111
tristate "SA1111 support"
depends on ARM && SA1111 && PCMCIA
select PCMCIA_SOC_COMMON
- select PCMCIA_SA11XX_BASE if ARCH_SA1100
- select PCMCIA_PXA2XX if ARCH_LUBBOCK && SA1111
+ select PCMCIA_SA11XX_BASE
select PCMCIA_MAX1600 if ASSABET_NEPONSET
- select PCMCIA_MAX1600 if ARCH_LUBBOCK && SA1111
help
Say Y here to include support for SA1111-based PCMCIA or CF
sockets, found on the Jornada 720, Graphicsmaster and other
@@ -207,14 +205,8 @@ config PCMCIA_SA1111
config PCMCIA_PXA2XX
tristate "PXA2xx support"
- depends on ARM && ARCH_PXA && PCMCIA
- depends on (ARCH_LUBBOCK || MACH_MAINSTONE || PXA_SHARPSL \
- || ARCH_PXA_PALM || TRIZEPS_PCMCIA \
- || ARCOM_PCMCIA || ARCH_PXA_ESERIES \
- || MACH_VPAC270 || MACH_BALLOON3 || MACH_COLIBRI \
- || MACH_COLIBRI320 || MACH_H4700)
+ depends on ARM && ARCH_PXA && PCMCIA && PXA_SHARPSL
select PCMCIA_SOC_COMMON
- select PCMCIA_MAX1600 if MACH_MAINSTONE
help
Say Y here to include support for the PXA2xx PCMCIA controller
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index b3a2accf47af..c9d51b150682 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -36,17 +36,12 @@ obj-$(CONFIG_PCMCIA_MAX1600) += max1600.o
sa1111_cs-y += sa1111_generic.o
sa1111_cs-$(CONFIG_ASSABET_NEPONSET) += sa1111_neponset.o
-sa1111_cs-$(CONFIG_SA1100_BADGE4) += sa1111_badge4.o
sa1111_cs-$(CONFIG_SA1100_JORNADA720) += sa1111_jornada720.o
-sa1111_cs-$(CONFIG_ARCH_LUBBOCK) += sa1111_lubbock.o
sa1100_cs-y += sa1100_generic.o
sa1100_cs-$(CONFIG_SA1100_COLLIE) += pxa2xx_sharpsl.o
-sa1100_cs-$(CONFIG_SA1100_H3100) += sa1100_h3600.o
sa1100_cs-$(CONFIG_SA1100_H3600) += sa1100_h3600.o
-sa1100_cs-$(CONFIG_SA1100_SIMPAD) += sa1100_simpad.o
-pxa2xx-obj-$(CONFIG_MACH_MAINSTONE) += pxa2xx_mainstone.o
pxa2xx-obj-$(CONFIG_PXA_SHARPSL) += pxa2xx_sharpsl.o
obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
obj-$(CONFIG_PCMCIA_XXS1500) += xxs1500_ss.o
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index ace133b9f7d4..c8087efa5e4a 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -927,9 +927,9 @@ static int pcmcia_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int pcmcia_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int pcmcia_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct pcmcia_device *p_dev;
+ const struct pcmcia_device *p_dev;
int i;
u32 hash[4] = { 0, 0, 0, 0};
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 0ea41f1411e5..5254028354f4 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -206,13 +206,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
void pxa2xx_configure_sockets(struct device *dev, struct pcmcia_low_level *ops)
{
- int nr = 1;
-
- if ((ops->first + ops->nr) > 1 ||
- machine_is_viper() || machine_is_arcom_zeus())
- nr = 2;
-
- pxa_smemc_set_pcmcia_socket(nr);
+ pxa_smemc_set_pcmcia_socket(1);
}
EXPORT_SYMBOL(pxa2xx_configure_sockets);
diff --git a/drivers/pcmcia/pxa2xx_mainstone.c b/drivers/pcmcia/pxa2xx_mainstone.c
deleted file mode 100644
index a076e4108452..000000000000
--- a/drivers/pcmcia/pxa2xx_mainstone.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa2xx_mainstone.c
- *
- * Mainstone PCMCIA specific routines.
- *
- * Created: May 12, 2004
- * Author: Nicolas Pitre
- * Copyright: MontaVista Software Inc.
- */
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/platform_device.h>
-
-#include <pcmcia/ss.h>
-
-#include <asm/mach-types.h>
-
-#include "soc_common.h"
-#include "max1600.h"
-
-static int mst_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- struct device *dev = skt->socket.dev.parent;
- struct max1600 *m;
- int ret;
-
- skt->stat[SOC_STAT_CD].name = skt->nr ? "bdetect" : "adetect";
- skt->stat[SOC_STAT_BVD1].name = skt->nr ? "bbvd1" : "abvd1";
- skt->stat[SOC_STAT_BVD2].name = skt->nr ? "bbvd2" : "abvd2";
- skt->stat[SOC_STAT_RDY].name = skt->nr ? "bready" : "aready";
- skt->stat[SOC_STAT_VS1].name = skt->nr ? "bvs1" : "avs1";
- skt->stat[SOC_STAT_VS2].name = skt->nr ? "bvs2" : "avs2";
-
- skt->gpio_reset = devm_gpiod_get(dev, skt->nr ? "breset" : "areset",
- GPIOD_OUT_HIGH);
- if (IS_ERR(skt->gpio_reset))
- return PTR_ERR(skt->gpio_reset);
-
- ret = max1600_init(dev, &m, skt->nr ? MAX1600_CHAN_B : MAX1600_CHAN_A,
- MAX1600_CODE_HIGH);
- if (ret)
- return ret;
-
- skt->driver_data = m;
-
- return soc_pcmcia_request_gpiods(skt);
-}
-
-static unsigned int mst_pcmcia_bvd1_status[2];
-
-static void mst_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- unsigned int flip = mst_pcmcia_bvd1_status[skt->nr] ^ state->bvd1;
-
- /*
- * Workaround for STSCHG which can't be deasserted:
- * We therefore disable/enable corresponding IRQs
- * as needed to avoid IRQ locks.
- */
- if (flip) {
- mst_pcmcia_bvd1_status[skt->nr] = state->bvd1;
- if (state->bvd1)
- enable_irq(skt->stat[SOC_STAT_BVD1].irq);
- else
- disable_irq(skt->stat[SOC_STAT_BVD2].irq);
- }
-}
-
-static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- return max1600_configure(skt->driver_data, state->Vcc, state->Vpp);
-}
-
-static struct pcmcia_low_level mst_pcmcia_ops __initdata = {
- .owner = THIS_MODULE,
- .hw_init = mst_pcmcia_hw_init,
- .socket_state = mst_pcmcia_socket_state,
- .configure_socket = mst_pcmcia_configure_socket,
- .nr = 2,
-};
-
-static struct platform_device *mst_pcmcia_device;
-
-static int __init mst_pcmcia_init(void)
-{
- int ret;
-
- if (!machine_is_mainstone())
- return -ENODEV;
-
- mst_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
- if (!mst_pcmcia_device)
- return -ENOMEM;
-
- ret = platform_device_add_data(mst_pcmcia_device, &mst_pcmcia_ops,
- sizeof(mst_pcmcia_ops));
- if (ret == 0)
- ret = platform_device_add(mst_pcmcia_device);
-
- if (ret)
- platform_device_put(mst_pcmcia_device);
-
- return ret;
-}
-
-static void __exit mst_pcmcia_exit(void)
-{
- platform_device_unregister(mst_pcmcia_device);
-}
-
-fs_initcall(mst_pcmcia_init);
-module_exit(mst_pcmcia_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:pxa2xx-pcmcia");
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index c2b6e828c2c6..89d4ba58c891 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -98,12 +98,9 @@ static struct pcmcia_low_level sa11x0_cf_ops = {
int __init pcmcia_collie_init(struct device *dev);
static int (*sa11x0_pcmcia_legacy_hw_init[])(struct device *dev) = {
-#if defined(CONFIG_SA1100_H3100) || defined(CONFIG_SA1100_H3600)
+#ifdef CONFIG_SA1100_H3600
pcmcia_h3600_init,
#endif
-#ifdef CONFIG_SA1100_SIMPAD
- pcmcia_simpad_init,
-#endif
#ifdef CONFIG_SA1100_COLLIE
pcmcia_collie_init,
#endif
diff --git a/drivers/pcmcia/sa1100_h3600.c b/drivers/pcmcia/sa1100_h3600.c
index a91222bc3824..10cb99c20a7f 100644
--- a/drivers/pcmcia/sa1100_h3600.c
+++ b/drivers/pcmcia/sa1100_h3600.c
@@ -156,7 +156,7 @@ int pcmcia_h3600_init(struct device *dev)
{
int ret = -ENODEV;
- if (machine_is_h3600() || machine_is_h3100())
+ if (machine_is_h3600())
ret = sa11xx_drv_pcmcia_probe(dev, &h3600_pcmcia_ops, 0, 2);
return ret;
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
deleted file mode 100644
index 784ada5b8c4f..000000000000
--- a/drivers/pcmcia/sa1100_simpad.c
+++ /dev/null
@@ -1,115 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * drivers/pcmcia/sa1100_simpad.c
- *
- * PCMCIA implementation routines for simpad
- *
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/init.h>
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <mach/simpad.h>
-#include "sa1100_generic.h"
-
-static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
-
- simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
-
- skt->stat[SOC_STAT_CD].name = "cf-detect";
- skt->stat[SOC_STAT_RDY].name = "cf-ready";
-
- return soc_pcmcia_request_gpiods(skt);
-}
-
-static void simpad_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
-{
- /* Disable CF bus: */
- /*simpad_set_cs3_bit(PCMCIA_BUFF_DIS);*/
- simpad_clear_cs3_bit(PCMCIA_RESET);
-}
-
-static void
-simpad_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
- struct pcmcia_state *state)
-{
- long cs3reg = simpad_get_cs3_ro();
-
- /* bvd1 might be cs3reg & PCMCIA_BVD1 */
- /* bvd2 might be cs3reg & PCMCIA_BVD2 */
-
- if ((cs3reg & (PCMCIA_VS1|PCMCIA_VS2)) ==
- (PCMCIA_VS1|PCMCIA_VS2)) {
- state->vs_3v=0;
- state->vs_Xv=0;
- } else {
- state->vs_3v=1;
- state->vs_Xv=0;
- }
-}
-
-static int
-simpad_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- /* Murphy: see table of MIC2562a-1 */
- switch (state->Vcc) {
- case 0:
- simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
- break;
-
- case 33:
- simpad_clear_cs3_bit(VCC_3V_EN|EN1);
- simpad_set_cs3_bit(VCC_5V_EN|EN0);
- break;
-
- case 50:
- simpad_clear_cs3_bit(VCC_5V_EN|EN1);
- simpad_set_cs3_bit(VCC_3V_EN|EN0);
- break;
-
- default:
- printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
- __func__, state->Vcc);
- simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
- local_irq_restore(flags);
- return -1;
- }
-
-
- local_irq_restore(flags);
-
- return 0;
-}
-
-static void simpad_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
-{
- simpad_set_cs3_bit(PCMCIA_RESET);
-}
-
-static struct pcmcia_low_level simpad_pcmcia_ops = {
- .owner = THIS_MODULE,
- .hw_init = simpad_pcmcia_hw_init,
- .hw_shutdown = simpad_pcmcia_hw_shutdown,
- .socket_state = simpad_pcmcia_socket_state,
- .configure_socket = simpad_pcmcia_configure_socket,
- .socket_suspend = simpad_pcmcia_socket_suspend,
-};
-
-int pcmcia_simpad_init(struct device *dev)
-{
- int ret = -ENODEV;
-
- if (machine_is_simpad())
- ret = sa11xx_drv_pcmcia_probe(dev, &simpad_pcmcia_ops, 1, 1);
-
- return ret;
-}
diff --git a/drivers/pcmcia/sa1111_badge4.c b/drivers/pcmcia/sa1111_badge4.c
deleted file mode 100644
index e76d5ba921dd..000000000000
--- a/drivers/pcmcia/sa1111_badge4.c
+++ /dev/null
@@ -1,158 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/sa1100_badge4.c
- *
- * BadgePAD 4 PCMCIA specific routines
- *
- * Christopher Hoover <ch@hpl.hp.com>
- *
- * Copyright (C) 2002 Hewlett-Packard Company
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <mach/badge4.h>
-#include <asm/hardware/sa1111.h>
-
-#include "sa1111_generic.h"
-
-/*
- * BadgePAD 4 Details
- *
- * PCM Vcc:
- *
- * PCM Vcc on BadgePAD 4 can be jumpered for 3v3 (short pins 1 and 3
- * on JP6) or 5v0 (short pins 3 and 5 on JP6).
- *
- * PCM Vpp:
- *
- * PCM Vpp on BadgePAD 4 can be jumpered for 12v0 (short pins 4 and 6
- * on JP6) or tied to PCM Vcc (short pins 2 and 4 on JP6). N.B.,
- * 12v0 operation requires that the power supply actually supply 12v0
- * via pin 7 of JP7.
- *
- * CF Vcc:
- *
- * CF Vcc on BadgePAD 4 can be jumpered either for 3v3 (short pins 1
- * and 2 on JP10) or 5v0 (short pins 2 and 3 on JP10).
- *
- * Unfortunately there's no way programmatically to determine how a
- * given board is jumpered. This code assumes a default jumpering
- * as described below.
- *
- * If the defaults aren't correct, you may override them with a pcmv
- * setup argument: pcmv=<pcm vcc>,<pcm vpp>,<cf vcc>. The units are
- * tenths of volts; e.g. pcmv=33,120,50 indicates 3v3 PCM Vcc, 12v0
- * PCM Vpp, and 5v0 CF Vcc.
- *
- */
-
-static int badge4_pcmvcc = 50; /* pins 3 and 5 jumpered on JP6 */
-static int badge4_pcmvpp = 50; /* pins 2 and 4 jumpered on JP6 */
-static int badge4_cfvcc = 33; /* pins 1 and 2 jumpered on JP10 */
-
-static void complain_about_jumpering(const char *whom,
- const char *supply,
- int given, int wanted)
-{
- printk(KERN_ERR
- "%s: %s %d.%dV wanted but board is jumpered for %s %d.%dV operation"
- "; re-jumper the board and/or use pcmv=xx,xx,xx\n",
- whom, supply,
- wanted / 10, wanted % 10,
- supply,
- given / 10, given % 10);
-}
-
-static int
-badge4_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state_t *state)
-{
- int ret;
-
- switch (skt->nr) {
- case 0:
- if ((state->Vcc != 0) &&
- (state->Vcc != badge4_pcmvcc)) {
- complain_about_jumpering(__func__, "pcmvcc",
- badge4_pcmvcc, state->Vcc);
- // Apply power regardless of the jumpering.
- // return -1;
- }
- if ((state->Vpp != 0) &&
- (state->Vpp != badge4_pcmvpp)) {
- complain_about_jumpering(__func__, "pcmvpp",
- badge4_pcmvpp, state->Vpp);
- return -1;
- }
- break;
-
- case 1:
- if ((state->Vcc != 0) &&
- (state->Vcc != badge4_cfvcc)) {
- complain_about_jumpering(__func__, "cfvcc",
- badge4_cfvcc, state->Vcc);
- return -1;
- }
- break;
-
- default:
- return -1;
- }
-
- ret = sa1111_pcmcia_configure_socket(skt, state);
- if (ret == 0) {
- unsigned long flags;
- int need5V;
-
- local_irq_save(flags);
-
- need5V = ((state->Vcc == 50) || (state->Vpp == 50));
-
- badge4_set_5V(BADGE4_5V_PCMCIA_SOCK(skt->nr), need5V);
-
- local_irq_restore(flags);
- }
-
- return ret;
-}
-
-static struct pcmcia_low_level badge4_pcmcia_ops = {
- .owner = THIS_MODULE,
- .configure_socket = badge4_pcmcia_configure_socket,
- .first = 0,
- .nr = 2,
-};
-
-int pcmcia_badge4_init(struct sa1111_dev *dev)
-{
- printk(KERN_INFO
- "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
- __func__,
- badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
-
- sa11xx_drv_pcmcia_ops(&badge4_pcmcia_ops);
- return sa1111_pcmcia_add(dev, &badge4_pcmcia_ops,
- sa11xx_drv_pcmcia_add_one);
-}
-
-#ifndef MODULE
-static int __init pcmv_setup(char *s)
-{
- int v[4];
-
- s = get_options(s, ARRAY_SIZE(v), v);
-
- if (v[0] >= 1) badge4_pcmvcc = v[1];
- if (v[0] >= 2) badge4_pcmvpp = v[2];
- if (v[0] >= 3) badge4_cfvcc = v[3];
-
- return 1;
-}
-
-__setup("pcmv=", pcmv_setup);
-#endif
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index bce664bbdc98..2a67e33fb5f0 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -212,18 +212,10 @@ static int pcmcia_probe(struct sa1111_dev *dev)
writel_relaxed(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
ret = -ENODEV;
-#ifdef CONFIG_SA1100_BADGE4
- if (machine_is_badge4())
- ret = pcmcia_badge4_init(dev);
-#endif
#ifdef CONFIG_SA1100_JORNADA720
if (machine_is_jornada720())
ret = pcmcia_jornada720_init(dev);
#endif
-#ifdef CONFIG_ARCH_LUBBOCK
- if (machine_is_lubbock())
- ret = pcmcia_lubbock_init(dev);
-#endif
#ifdef CONFIG_ASSABET_NEPONSET
if (machine_is_assabet())
ret = pcmcia_neponset_init(dev);
diff --git a/drivers/pcmcia/sa1111_lubbock.c b/drivers/pcmcia/sa1111_lubbock.c
deleted file mode 100644
index f1b5160cb8fa..000000000000
--- a/drivers/pcmcia/sa1111_lubbock.c
+++ /dev/null
@@ -1,155 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/pcmcia/pxa2xx_lubbock.c
- *
- * Author: George Davis
- * Created: Jan 10, 2002
- * Copyright: MontaVista Software Inc.
- *
- * Originally based upon linux/drivers/pcmcia/sa1100_neponset.c
- *
- * Lubbock PCMCIA specific routines.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-
-#include <asm/hardware/sa1111.h>
-#include <asm/mach-types.h>
-
-#include "sa1111_generic.h"
-#include "max1600.h"
-
-static int lubbock_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
-{
- struct max1600 *m;
- int ret;
-
- ret = max1600_init(skt->socket.dev.parent, &m,
- skt->nr ? MAX1600_CHAN_B : MAX1600_CHAN_A,
- MAX1600_CODE_HIGH);
- if (ret == 0)
- skt->driver_data = m;
-
- return ret;
-}
-
-static int
-lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
- const socket_state_t *state)
-{
- struct max1600 *m = skt->driver_data;
- int ret = 0;
-
- /* Lubbock uses the Maxim MAX1602, with the following connections:
- *
- * Socket 0 (PCMCIA):
- * MAX1602 Lubbock Register
- * Pin Signal
- * ----- ------- ----------------------
- * A0VPP S0_PWR0 SA-1111 GPIO A<0>
- * A1VPP S0_PWR1 SA-1111 GPIO A<1>
- * A0VCC S0_PWR2 SA-1111 GPIO A<2>
- * A1VCC S0_PWR3 SA-1111 GPIO A<3>
- * VX VCC
- * VY +3.3V
- * 12IN +12V
- * CODE +3.3V Cirrus Code, CODE = High (VY)
- *
- * Socket 1 (CF):
- * MAX1602 Lubbock Register
- * Pin Signal
- * ----- ------- ----------------------
- * A0VPP GND VPP is not connected
- * A1VPP GND VPP is not connected
- * A0VCC S1_PWR0 MISC_WR<14>
- * A1VCC S1_PWR1 MISC_WR<15>
- * VX VCC
- * VY +3.3V
- * 12IN GND VPP is not connected
- * CODE +3.3V Cirrus Code, CODE = High (VY)
- *
- */
-
- again:
- switch (skt->nr) {
- case 0:
- case 1:
- break;
-
- default:
- ret = -1;
- }
-
- if (ret == 0)
- ret = sa1111_pcmcia_configure_socket(skt, state);
- if (ret == 0)
- ret = max1600_configure(m, state->Vcc, state->Vpp);
-
-#if 1
- if (ret == 0 && state->Vcc == 33) {
- struct pcmcia_state new_state;
-
- /*
- * HACK ALERT:
- * We can't sense the voltage properly on Lubbock before
- * actually applying some power to the socket (catch 22).
- * Resense the socket Voltage Sense pins after applying
- * socket power.
- *
- * Note: It takes about 2.5ms for the MAX1602 VCC output
- * to rise.
- */
- mdelay(3);
-
- sa1111_pcmcia_socket_state(skt, &new_state);
-
- if (!new_state.vs_3v && !new_state.vs_Xv) {
- /*
- * Switch to 5V, Configure socket with 5V voltage
- */
- max1600_configure(m, 0, 0);
-
- /*
- * It takes about 100ms to turn off Vcc.
- */
- mdelay(100);
-
- /*
- * We need to hack around the const qualifier as
- * well to keep this ugly workaround localized and
- * not force it to the rest of the code. Barf bags
- * available in the seat pocket in front of you!
- */
- ((socket_state_t *)state)->Vcc = 50;
- ((socket_state_t *)state)->Vpp = 50;
- goto again;
- }
- }
-#endif
-
- return ret;
-}
-
-static struct pcmcia_low_level lubbock_pcmcia_ops = {
- .owner = THIS_MODULE,
- .hw_init = lubbock_pcmcia_hw_init,
- .configure_socket = lubbock_pcmcia_configure_socket,
- .first = 0,
- .nr = 2,
-};
-
-#include "pxa2xx_base.h"
-
-int pcmcia_lubbock_init(struct sa1111_dev *sadev)
-{
- pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
- pxa2xx_configure_sockets(&sadev->dev, &lubbock_pcmcia_ops);
- return sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
- pxa2xx_drv_pcmcia_add_one);
-}
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c
index 1deb61b22bc7..c9689861be3f 100644
--- a/drivers/perf/arm-cmn.c
+++ b/drivers/perf/arm-cmn.c
@@ -1870,6 +1870,7 @@ static void arm_cmn_init_dtm(struct arm_cmn_dtm *dtm, struct arm_cmn_node *xp, i
dtm->base = xp->pmu_base + CMN_DTM_OFFSET(idx);
dtm->pmu_config_low = CMN_DTM_PMU_CONFIG_PMU_EN;
+ writeq_relaxed(dtm->pmu_config_low, dtm->base + CMN_DTM_PMU_CONFIG);
for (i = 0; i < 4; i++) {
dtm->wp_event[i] = -1;
writeq_relaxed(0, dtm->base + CMN_DTM_WPn_MASK(i));
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 9b593f985805..15bd1e34a88e 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -550,13 +550,7 @@ static void armpmu_disable(struct pmu *pmu)
static bool armpmu_filter(struct pmu *pmu, int cpu)
{
struct arm_pmu *armpmu = to_arm_pmu(pmu);
- bool ret;
-
- ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
- if (ret && armpmu->filter)
- return armpmu->filter(pmu, cpu);
-
- return ret;
+ return !cpumask_test_cpu(cpu, &armpmu->supported_cpus);
}
static ssize_t cpus_show(struct device *dev,
@@ -758,17 +752,8 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
case CPU_PM_ENTER_FAILED:
/*
* Restore and enable the counter.
- * armpmu_start() indirectly calls
- *
- * perf_event_update_userpage()
- *
- * that requires RCU read locking to be functional,
- * wrap the call within RCU_NONIDLE to make the
- * RCU subsystem aware this cpu is not idle from
- * an RCU perspective for the armpmu_start() call
- * duration.
*/
- RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
+ armpmu_start(event, PERF_EF_RELOAD);
break;
default:
break;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 00e3a637f7b6..b9ba4c4fe5a2 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -12,6 +12,7 @@
#define DRVNAME PMUNAME "_pmu"
#define pr_fmt(fmt) DRVNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/capability.h>
@@ -84,6 +85,7 @@ struct arm_spe_pmu {
#define SPE_PMU_FEAT_ARCH_INST (1UL << 3)
#define SPE_PMU_FEAT_LDS (1UL << 4)
#define SPE_PMU_FEAT_ERND (1UL << 5)
+#define SPE_PMU_FEAT_INV_FILT_EVT (1UL << 6)
#define SPE_PMU_FEAT_DEV_PROBED (1UL << 63)
u64 features;
@@ -201,6 +203,10 @@ static const struct attribute_group arm_spe_pmu_cap_group = {
#define ATTR_CFG_FLD_min_latency_LO 0
#define ATTR_CFG_FLD_min_latency_HI 11
+#define ATTR_CFG_FLD_inv_event_filter_CFG config3 /* PMSNEVFR_EL1 */
+#define ATTR_CFG_FLD_inv_event_filter_LO 0
+#define ATTR_CFG_FLD_inv_event_filter_HI 63
+
/* Why does everything I do descend into this? */
#define __GEN_PMU_FORMAT_ATTR(cfg, lo, hi) \
(lo) == (hi) ? #cfg ":" #lo "\n" : #cfg ":" #lo "-" #hi
@@ -231,6 +237,7 @@ GEN_PMU_FORMAT_ATTR(branch_filter);
GEN_PMU_FORMAT_ATTR(load_filter);
GEN_PMU_FORMAT_ATTR(store_filter);
GEN_PMU_FORMAT_ATTR(event_filter);
+GEN_PMU_FORMAT_ATTR(inv_event_filter);
GEN_PMU_FORMAT_ATTR(min_latency);
static struct attribute *arm_spe_pmu_formats_attr[] = {
@@ -242,12 +249,27 @@ static struct attribute *arm_spe_pmu_formats_attr[] = {
&format_attr_load_filter.attr,
&format_attr_store_filter.attr,
&format_attr_event_filter.attr,
+ &format_attr_inv_event_filter.attr,
&format_attr_min_latency.attr,
NULL,
};
+static umode_t arm_spe_pmu_format_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr,
+ int unused)
+ {
+ struct device *dev = kobj_to_dev(kobj);
+ struct arm_spe_pmu *spe_pmu = dev_get_drvdata(dev);
+
+ if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
+ return 0;
+
+ return attr->mode;
+}
+
static const struct attribute_group arm_spe_pmu_format_group = {
.name = "format",
+ .is_visible = arm_spe_pmu_format_attr_is_visible,
.attrs = arm_spe_pmu_formats_attr,
};
@@ -282,18 +304,18 @@ static u64 arm_spe_event_to_pmscr(struct perf_event *event)
struct perf_event_attr *attr = &event->attr;
u64 reg = 0;
- reg |= ATTR_CFG_GET_FLD(attr, ts_enable) << SYS_PMSCR_EL1_TS_SHIFT;
- reg |= ATTR_CFG_GET_FLD(attr, pa_enable) << SYS_PMSCR_EL1_PA_SHIFT;
- reg |= ATTR_CFG_GET_FLD(attr, pct_enable) << SYS_PMSCR_EL1_PCT_SHIFT;
+ reg |= FIELD_PREP(PMSCR_EL1_TS, ATTR_CFG_GET_FLD(attr, ts_enable));
+ reg |= FIELD_PREP(PMSCR_EL1_PA, ATTR_CFG_GET_FLD(attr, pa_enable));
+ reg |= FIELD_PREP(PMSCR_EL1_PCT, ATTR_CFG_GET_FLD(attr, pct_enable));
if (!attr->exclude_user)
- reg |= BIT(SYS_PMSCR_EL1_E0SPE_SHIFT);
+ reg |= PMSCR_EL1_E0SPE;
if (!attr->exclude_kernel)
- reg |= BIT(SYS_PMSCR_EL1_E1SPE_SHIFT);
+ reg |= PMSCR_EL1_E1SPE;
if (get_spe_event_has_cx(event))
- reg |= BIT(SYS_PMSCR_EL1_CX_SHIFT);
+ reg |= PMSCR_EL1_CX;
return reg;
}
@@ -302,8 +324,7 @@ static void arm_spe_event_sanitise_period(struct perf_event *event)
{
struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu);
u64 period = event->hw.sample_period;
- u64 max_period = SYS_PMSIRR_EL1_INTERVAL_MASK
- << SYS_PMSIRR_EL1_INTERVAL_SHIFT;
+ u64 max_period = PMSIRR_EL1_INTERVAL_MASK;
if (period < spe_pmu->min_period)
period = spe_pmu->min_period;
@@ -322,7 +343,7 @@ static u64 arm_spe_event_to_pmsirr(struct perf_event *event)
arm_spe_event_sanitise_period(event);
- reg |= ATTR_CFG_GET_FLD(attr, jitter) << SYS_PMSIRR_EL1_RND_SHIFT;
+ reg |= FIELD_PREP(PMSIRR_EL1_RND, ATTR_CFG_GET_FLD(attr, jitter));
reg |= event->hw.sample_period;
return reg;
@@ -333,18 +354,21 @@ static u64 arm_spe_event_to_pmsfcr(struct perf_event *event)
struct perf_event_attr *attr = &event->attr;
u64 reg = 0;
- reg |= ATTR_CFG_GET_FLD(attr, load_filter) << SYS_PMSFCR_EL1_LD_SHIFT;
- reg |= ATTR_CFG_GET_FLD(attr, store_filter) << SYS_PMSFCR_EL1_ST_SHIFT;
- reg |= ATTR_CFG_GET_FLD(attr, branch_filter) << SYS_PMSFCR_EL1_B_SHIFT;
+ reg |= FIELD_PREP(PMSFCR_EL1_LD, ATTR_CFG_GET_FLD(attr, load_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_ST, ATTR_CFG_GET_FLD(attr, store_filter));
+ reg |= FIELD_PREP(PMSFCR_EL1_B, ATTR_CFG_GET_FLD(attr, branch_filter));
if (reg)
- reg |= BIT(SYS_PMSFCR_EL1_FT_SHIFT);
+ reg |= PMSFCR_EL1_FT;
if (ATTR_CFG_GET_FLD(attr, event_filter))
- reg |= BIT(SYS_PMSFCR_EL1_FE_SHIFT);
+ reg |= PMSFCR_EL1_FE;
+
+ if (ATTR_CFG_GET_FLD(attr, inv_event_filter))
+ reg |= PMSFCR_EL1_FnE;
if (ATTR_CFG_GET_FLD(attr, min_latency))
- reg |= BIT(SYS_PMSFCR_EL1_FL_SHIFT);
+ reg |= PMSFCR_EL1_FL;
return reg;
}
@@ -355,11 +379,16 @@ static u64 arm_spe_event_to_pmsevfr(struct perf_event *event)
return ATTR_CFG_GET_FLD(attr, event_filter);
}
+static u64 arm_spe_event_to_pmsnevfr(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ return ATTR_CFG_GET_FLD(attr, inv_event_filter);
+}
+
static u64 arm_spe_event_to_pmslatfr(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
- return ATTR_CFG_GET_FLD(attr, min_latency)
- << SYS_PMSLATFR_EL1_MINLAT_SHIFT;
+ return FIELD_PREP(PMSLATFR_EL1_MINLAT, ATTR_CFG_GET_FLD(attr, min_latency));
}
static void arm_spe_pmu_pad_buf(struct perf_output_handle *handle, int len)
@@ -511,7 +540,7 @@ static void arm_spe_perf_aux_output_begin(struct perf_output_handle *handle,
limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle)
: arm_spe_pmu_next_off(handle);
if (limit)
- limit |= BIT(SYS_PMBLIMITR_EL1_E_SHIFT);
+ limit |= PMBLIMITR_EL1_E;
limit += (u64)buf->base;
base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf);
@@ -570,28 +599,28 @@ arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
/* Service required? */
pmbsr = read_sysreg_s(SYS_PMBSR_EL1);
- if (!(pmbsr & BIT(SYS_PMBSR_EL1_S_SHIFT)))
+ if (!FIELD_GET(PMBSR_EL1_S, pmbsr))
return SPE_PMU_BUF_FAULT_ACT_SPURIOUS;
/*
* If we've lost data, disable profiling and also set the PARTIAL
* flag to indicate that the last record is corrupted.
*/
- if (pmbsr & BIT(SYS_PMBSR_EL1_DL_SHIFT))
+ if (FIELD_GET(PMBSR_EL1_DL, pmbsr))
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED |
PERF_AUX_FLAG_PARTIAL);
/* Report collisions to userspace so that it can up the period */
- if (pmbsr & BIT(SYS_PMBSR_EL1_COLL_SHIFT))
+ if (FIELD_GET(PMBSR_EL1_COLL, pmbsr))
perf_aux_output_flag(handle, PERF_AUX_FLAG_COLLISION);
/* We only expect buffer management events */
- switch (pmbsr & (SYS_PMBSR_EL1_EC_MASK << SYS_PMBSR_EL1_EC_SHIFT)) {
- case SYS_PMBSR_EL1_EC_BUF:
+ switch (FIELD_GET(PMBSR_EL1_EC, pmbsr)) {
+ case PMBSR_EL1_EC_BUF:
/* Handled below */
break;
- case SYS_PMBSR_EL1_EC_FAULT_S1:
- case SYS_PMBSR_EL1_EC_FAULT_S2:
+ case PMBSR_EL1_EC_FAULT_S1:
+ case PMBSR_EL1_EC_FAULT_S2:
err_str = "Unexpected buffer fault";
goto out_err;
default:
@@ -600,9 +629,8 @@ arm_spe_pmu_buf_get_fault_act(struct perf_output_handle *handle)
}
/* Buffer management event */
- switch (pmbsr &
- (SYS_PMBSR_EL1_BUF_BSC_MASK << SYS_PMBSR_EL1_BUF_BSC_SHIFT)) {
- case SYS_PMBSR_EL1_BUF_BSC_FULL:
+ switch (FIELD_GET(PMBSR_EL1_BUF_BSC_MASK, pmbsr)) {
+ case PMBSR_EL1_BUF_BSC_FULL:
ret = SPE_PMU_BUF_FAULT_ACT_OK;
goto out_stop;
default:
@@ -677,11 +705,13 @@ static u64 arm_spe_pmsevfr_res0(u16 pmsver)
{
switch (pmsver) {
case ID_AA64DFR0_EL1_PMSVer_IMP:
- return SYS_PMSEVFR_EL1_RES0_8_2;
+ return PMSEVFR_EL1_RES0_IMP;
case ID_AA64DFR0_EL1_PMSVer_V1P1:
+ return PMSEVFR_EL1_RES0_V1P1;
+ case ID_AA64DFR0_EL1_PMSVer_V1P2:
/* Return the highest version we support in default */
default:
- return SYS_PMSEVFR_EL1_RES0_8_3;
+ return PMSEVFR_EL1_RES0_V1P2;
}
}
@@ -703,6 +733,9 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
return -EOPNOTSUPP;
+ if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver))
+ return -EOPNOTSUPP;
+
if (attr->exclude_idle)
return -EOPNOTSUPP;
@@ -717,23 +750,26 @@ static int arm_spe_pmu_event_init(struct perf_event *event)
return -EINVAL;
reg = arm_spe_event_to_pmsfcr(event);
- if ((reg & BIT(SYS_PMSFCR_EL1_FE_SHIFT)) &&
+ if ((FIELD_GET(PMSFCR_EL1_FE, reg)) &&
!(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT))
return -EOPNOTSUPP;
- if ((reg & BIT(SYS_PMSFCR_EL1_FT_SHIFT)) &&
+ if ((FIELD_GET(PMSFCR_EL1_FnE, reg)) &&
+ !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT))
+ return -EOPNOTSUPP;
+
+ if ((FIELD_GET(PMSFCR_EL1_FT, reg)) &&
!(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP))
return -EOPNOTSUPP;
- if ((reg & BIT(SYS_PMSFCR_EL1_FL_SHIFT)) &&
+ if ((FIELD_GET(PMSFCR_EL1_FL, reg)) &&
!(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT))
return -EOPNOTSUPP;
set_spe_event_has_cx(event);
reg = arm_spe_event_to_pmscr(event);
if (!perfmon_capable() &&
- (reg & (BIT(SYS_PMSCR_EL1_PA_SHIFT) |
- BIT(SYS_PMSCR_EL1_PCT_SHIFT))))
+ (reg & (PMSCR_EL1_PA | PMSCR_EL1_PCT)))
return -EACCES;
return 0;
@@ -757,6 +793,11 @@ static void arm_spe_pmu_start(struct perf_event *event, int flags)
reg = arm_spe_event_to_pmsevfr(event);
write_sysreg_s(reg, SYS_PMSEVFR_EL1);
+ if (spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT) {
+ reg = arm_spe_event_to_pmsnevfr(event);
+ write_sysreg_s(reg, SYS_PMSNEVFR_EL1);
+ }
+
reg = arm_spe_event_to_pmslatfr(event);
write_sysreg_s(reg, SYS_PMSLATFR_EL1);
@@ -971,14 +1012,14 @@ static void __arm_spe_pmu_dev_probe(void *info)
/* Read PMBIDR first to determine whether or not we have access */
reg = read_sysreg_s(SYS_PMBIDR_EL1);
- if (reg & BIT(SYS_PMBIDR_EL1_P_SHIFT)) {
+ if (FIELD_GET(PMBIDR_EL1_P, reg)) {
dev_err(dev,
"profiling buffer owned by higher exception level\n");
return;
}
/* Minimum alignment. If it's out-of-range, then fail the probe */
- fld = reg >> SYS_PMBIDR_EL1_ALIGN_SHIFT & SYS_PMBIDR_EL1_ALIGN_MASK;
+ fld = FIELD_GET(PMBIDR_EL1_ALIGN, reg);
spe_pmu->align = 1 << fld;
if (spe_pmu->align > SZ_2K) {
dev_err(dev, "unsupported PMBIDR.Align [%d] on CPU %d\n",
@@ -988,58 +1029,61 @@ static void __arm_spe_pmu_dev_probe(void *info)
/* It's now safe to read PMSIDR and figure out what we've got */
reg = read_sysreg_s(SYS_PMSIDR_EL1);
- if (reg & BIT(SYS_PMSIDR_EL1_FE_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_FE, reg))
spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT;
- if (reg & BIT(SYS_PMSIDR_EL1_FT_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_FnE, reg))
+ spe_pmu->features |= SPE_PMU_FEAT_INV_FILT_EVT;
+
+ if (FIELD_GET(PMSIDR_EL1_FT, reg))
spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP;
- if (reg & BIT(SYS_PMSIDR_EL1_FL_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_FL, reg))
spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT;
- if (reg & BIT(SYS_PMSIDR_EL1_ARCHINST_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_ARCHINST, reg))
spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST;
- if (reg & BIT(SYS_PMSIDR_EL1_LDS_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_LDS, reg))
spe_pmu->features |= SPE_PMU_FEAT_LDS;
- if (reg & BIT(SYS_PMSIDR_EL1_ERND_SHIFT))
+ if (FIELD_GET(PMSIDR_EL1_ERND, reg))
spe_pmu->features |= SPE_PMU_FEAT_ERND;
/* This field has a spaced out encoding, so just use a look-up */
- fld = reg >> SYS_PMSIDR_EL1_INTERVAL_SHIFT & SYS_PMSIDR_EL1_INTERVAL_MASK;
+ fld = FIELD_GET(PMSIDR_EL1_INTERVAL, reg);
switch (fld) {
- case 0:
+ case PMSIDR_EL1_INTERVAL_256:
spe_pmu->min_period = 256;
break;
- case 2:
+ case PMSIDR_EL1_INTERVAL_512:
spe_pmu->min_period = 512;
break;
- case 3:
+ case PMSIDR_EL1_INTERVAL_768:
spe_pmu->min_period = 768;
break;
- case 4:
+ case PMSIDR_EL1_INTERVAL_1024:
spe_pmu->min_period = 1024;
break;
- case 5:
+ case PMSIDR_EL1_INTERVAL_1536:
spe_pmu->min_period = 1536;
break;
- case 6:
+ case PMSIDR_EL1_INTERVAL_2048:
spe_pmu->min_period = 2048;
break;
- case 7:
+ case PMSIDR_EL1_INTERVAL_3072:
spe_pmu->min_period = 3072;
break;
default:
dev_warn(dev, "unknown PMSIDR_EL1.Interval [%d]; assuming 8\n",
fld);
fallthrough;
- case 8:
+ case PMSIDR_EL1_INTERVAL_4096:
spe_pmu->min_period = 4096;
}
/* Maximum record size. If it's out-of-range, then fail the probe */
- fld = reg >> SYS_PMSIDR_EL1_MAXSIZE_SHIFT & SYS_PMSIDR_EL1_MAXSIZE_MASK;
+ fld = FIELD_GET(PMSIDR_EL1_MAXSIZE, reg);
spe_pmu->max_record_sz = 1 << fld;
if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) {
dev_err(dev, "unsupported PMSIDR_EL1.MaxSize [%d] on CPU %d\n",
@@ -1047,22 +1091,22 @@ static void __arm_spe_pmu_dev_probe(void *info)
return;
}
- fld = reg >> SYS_PMSIDR_EL1_COUNTSIZE_SHIFT & SYS_PMSIDR_EL1_COUNTSIZE_MASK;
+ fld = FIELD_GET(PMSIDR_EL1_COUNTSIZE, reg);
switch (fld) {
default:
dev_warn(dev, "unknown PMSIDR_EL1.CountSize [%d]; assuming 2\n",
fld);
fallthrough;
- case 2:
+ case PMSIDR_EL1_COUNTSIZE_12_BIT_SAT:
spe_pmu->counter_sz = 12;
break;
- case 3:
+ case PMSIDR_EL1_COUNTSIZE_16_BIT_SAT:
spe_pmu->counter_sz = 16;
}
dev_info(dev,
- "probed for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
- cpumask_pr_args(&spe_pmu->supported_cpus),
+ "probed SPEv1.%d for CPUs %*pbl [max_record_sz %u, align %u, features 0x%llx]\n",
+ spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus),
spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features);
spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED;
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 8e058e08fe81..5222ba1e79d0 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -97,7 +97,6 @@ struct ddr_pmu {
struct hlist_node node;
struct device *dev;
struct perf_event *events[NUM_COUNTERS];
- int active_events;
enum cpuhp_state cpuhp_state;
const struct fsl_ddr_devtype_data *devtype_data;
int irq;
@@ -530,7 +529,6 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
}
pmu->events[counter] = event;
- pmu->active_events++;
hwc->idx = counter;
hwc->state |= PERF_HES_STOPPED;
@@ -562,7 +560,6 @@ static void ddr_perf_event_del(struct perf_event *event, int flags)
ddr_perf_event_stop(event, PERF_EF_UPDATE);
ddr_perf_free_counter(pmu, counter);
- pmu->active_events--;
hwc->idx = -1;
}
diff --git a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
index a9bb73f76be4..4c67d57217a7 100644
--- a/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_cpa_pmu.c
@@ -316,21 +316,7 @@ static int hisi_cpa_pmu_probe(struct platform_device *pdev)
if (!name)
return -ENOMEM;
- cpa_pmu->pmu = (struct pmu) {
- .name = name,
- .module = THIS_MODULE,
- .task_ctx_nr = perf_invalid_context,
- .event_init = hisi_uncore_pmu_event_init,
- .pmu_enable = hisi_uncore_pmu_enable,
- .pmu_disable = hisi_uncore_pmu_disable,
- .add = hisi_uncore_pmu_add,
- .del = hisi_uncore_pmu_del,
- .start = hisi_uncore_pmu_start,
- .stop = hisi_uncore_pmu_stop,
- .read = hisi_uncore_pmu_read,
- .attr_groups = cpa_pmu->pmu_events.attr_groups,
- .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
- };
+ hisi_pmu_init(cpa_pmu, name, THIS_MODULE);
/* Power Management should be disabled before using CPA PMU. */
hisi_cpa_pmu_disable_pm(cpa_pmu);
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 50d0c0a2f1fe..8c3ffcbfd4c0 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -516,7 +516,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
"hisi_sccl%u_ddrc%u", ddrc_pmu->sccl_id,
ddrc_pmu->index_id);
- hisi_pmu_init(&ddrc_pmu->pmu, name, ddrc_pmu->pmu_events.attr_groups, THIS_MODULE);
+ hisi_pmu_init(ddrc_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&ddrc_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index 13017b3412a5..806698b9eabf 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -519,7 +519,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_hha%u",
hha_pmu->sccl_id, hha_pmu->index_id);
- hisi_pmu_init(&hha_pmu->pmu, name, hha_pmu->pmu_events.attr_groups, THIS_MODULE);
+ hisi_pmu_init(hha_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&hha_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 2995f3630d49..5b2c35f1658a 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -557,7 +557,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
*/
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%u_l3c%u",
l3c_pmu->sccl_id, l3c_pmu->ccl_id);
- hisi_pmu_init(&l3c_pmu->pmu, name, l3c_pmu->pmu_events.attr_groups, THIS_MODULE);
+ hisi_pmu_init(l3c_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&l3c_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index 47d3cc9b6eec..afe3419f3f6d 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -412,7 +412,7 @@ static int hisi_pa_pmu_probe(struct platform_device *pdev)
return ret;
}
- hisi_pmu_init(&pa_pmu->pmu, name, pa_pmu->pmu_events.attr_groups, THIS_MODULE);
+ hisi_pmu_init(pa_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&pa_pmu->pmu, name, -1);
if (ret) {
dev_err(pa_pmu->dev, "PMU register failed, ret = %d\n", ret);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c
index fbc8a93d5eac..f1b0f5e1a28f 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c
@@ -531,9 +531,11 @@ int hisi_uncore_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
}
EXPORT_SYMBOL_GPL(hisi_uncore_pmu_offline_cpu);
-void hisi_pmu_init(struct pmu *pmu, const char *name,
- const struct attribute_group **attr_groups, struct module *module)
+void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name,
+ struct module *module)
{
+ struct pmu *pmu = &hisi_pmu->pmu;
+
pmu->name = name;
pmu->module = module;
pmu->task_ctx_nr = perf_invalid_context;
@@ -545,7 +547,8 @@ void hisi_pmu_init(struct pmu *pmu, const char *name,
pmu->start = hisi_uncore_pmu_start;
pmu->stop = hisi_uncore_pmu_stop;
pmu->read = hisi_uncore_pmu_read;
- pmu->attr_groups = attr_groups;
+ pmu->attr_groups = hisi_pmu->pmu_events.attr_groups;
+ pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE;
}
EXPORT_SYMBOL_GPL(hisi_pmu_init);
diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.h b/drivers/perf/hisilicon/hisi_uncore_pmu.h
index b59de33cd059..f8e3cc6903d7 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pmu.h
+++ b/drivers/perf/hisilicon/hisi_uncore_pmu.h
@@ -121,6 +121,6 @@ ssize_t hisi_uncore_pmu_identifier_attr_show(struct device *dev,
int hisi_uncore_pmu_init_irq(struct hisi_pmu *hisi_pmu,
struct platform_device *pdev);
-void hisi_pmu_init(struct pmu *pmu, const char *name,
- const struct attribute_group **attr_groups, struct module *module);
+void hisi_pmu_init(struct hisi_pmu *hisi_pmu, const char *name,
+ struct module *module);
#endif /* __HISI_UNCORE_PMU_H__ */
diff --git a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
index b9c79f17230c..1e354433776a 100644
--- a/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_sllc_pmu.c
@@ -445,7 +445,7 @@ static int hisi_sllc_pmu_probe(struct platform_device *pdev)
return ret;
}
- hisi_pmu_init(&sllc_pmu->pmu, name, sllc_pmu->pmu_events.attr_groups, THIS_MODULE);
+ hisi_pmu_init(sllc_pmu, name, THIS_MODULE);
ret = perf_pmu_register(&sllc_pmu->pmu, name, -1);
if (ret) {
diff --git a/drivers/perf/marvell_cn10k_ddr_pmu.c b/drivers/perf/marvell_cn10k_ddr_pmu.c
index 665b382a0ee3..b94a5f6cc22b 100644
--- a/drivers/perf/marvell_cn10k_ddr_pmu.c
+++ b/drivers/perf/marvell_cn10k_ddr_pmu.c
@@ -12,6 +12,7 @@
#include <linux/of_device.h>
#include <linux/perf_event.h>
#include <linux/hrtimer.h>
+#include <linux/acpi.h>
/* Performance Counters Operating Mode Control Registers */
#define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
@@ -717,10 +718,19 @@ static const struct of_device_id cn10k_ddr_pmu_of_match[] = {
MODULE_DEVICE_TABLE(of, cn10k_ddr_pmu_of_match);
#endif
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cn10k_ddr_pmu_acpi_match[] = {
+ {"MRVL000A", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, cn10k_ddr_pmu_acpi_match);
+#endif
+
static struct platform_driver cn10k_ddr_pmu_driver = {
.driver = {
.name = "cn10k-ddr-pmu",
.of_match_table = of_match_ptr(cn10k_ddr_pmu_of_match),
+ .acpi_match_table = ACPI_PTR(cn10k_ddr_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = cn10k_ddr_perf_probe,
diff --git a/drivers/perf/marvell_cn10k_tad_pmu.c b/drivers/perf/marvell_cn10k_tad_pmu.c
index a1166afb3702..3972197e2210 100644
--- a/drivers/perf/marvell_cn10k_tad_pmu.c
+++ b/drivers/perf/marvell_cn10k_tad_pmu.c
@@ -13,6 +13,7 @@
#include <linux/cpuhotplug.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
+#include <linux/acpi.h>
#define TAD_PFC_OFFSET 0x800
#define TAD_PFC(counter) (TAD_PFC_OFFSET | (counter << 3))
@@ -254,7 +255,7 @@ static const struct attribute_group *tad_pmu_attr_groups[] = {
static int tad_pmu_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
struct tad_region *regions;
struct tad_pmu *tad_pmu;
struct resource *res;
@@ -276,21 +277,21 @@ static int tad_pmu_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = of_property_read_u32(node, "marvell,tad-page-size",
- &tad_page_size);
+ ret = device_property_read_u32(dev, "marvell,tad-page-size",
+ &tad_page_size);
if (ret) {
dev_err(&pdev->dev, "Can't find tad-page-size property\n");
return ret;
}
- ret = of_property_read_u32(node, "marvell,tad-pmu-page-size",
- &tad_pmu_page_size);
+ ret = device_property_read_u32(dev, "marvell,tad-pmu-page-size",
+ &tad_pmu_page_size);
if (ret) {
dev_err(&pdev->dev, "Can't find tad-pmu-page-size property\n");
return ret;
}
- ret = of_property_read_u32(node, "marvell,tad-cnt", &tad_cnt);
+ ret = device_property_read_u32(dev, "marvell,tad-cnt", &tad_cnt);
if (ret) {
dev_err(&pdev->dev, "Can't find tad-cnt property\n");
return ret;
@@ -369,10 +370,19 @@ static const struct of_device_id tad_pmu_of_match[] = {
};
#endif
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id tad_pmu_acpi_match[] = {
+ {"MRVL000B", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, tad_pmu_acpi_match);
+#endif
+
static struct platform_driver tad_pmu_driver = {
.driver = {
.name = "cn10k_tad_pmu",
.of_match_table = of_match_ptr(tad_pmu_of_match),
+ .acpi_match_table = ACPI_PTR(tad_pmu_acpi_match),
.suppress_bind_attrs = true,
},
.probe = tad_pmu_probe,
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index f6507efe2a58..7b2288d4b1ec 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -771,14 +771,8 @@ static int riscv_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
case CPU_PM_ENTER_FAILED:
/*
* Restore and enable the counter.
- *
- * Requires RCU read locking to be functional,
- * wrap the call within RCU_NONIDLE to make the
- * RCU subsystem aware this cpu is not idle from
- * an RCU perspective for the riscv_pmu_start() call
- * duration.
*/
- RCU_NONIDLE(riscv_pmu_start(event, PERF_EF_RELOAD));
+ riscv_pmu_start(event, PERF_EF_RELOAD);
break;
default:
break;
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 5472db9e87ef..fbcd7014ab43 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -99,28 +99,17 @@
#define DEBOUNCE_TIME msecs_to_jiffies(50)
#define POLL_TIME msecs_to_jiffies(250)
-enum sun4i_usb_phy_type {
- sun4i_a10_phy,
- sun6i_a31_phy,
- sun8i_a33_phy,
- sun8i_a83t_phy,
- sun8i_h3_phy,
- sun8i_r40_phy,
- sun8i_v3s_phy,
- sun50i_a64_phy,
- sun50i_h6_phy,
-};
-
struct sun4i_usb_phy_cfg {
int num_phys;
int hsic_index;
- enum sun4i_usb_phy_type type;
u32 disc_thresh;
u32 hci_phy_ctl_clear;
u8 phyctl_offset;
bool dedicated_clocks;
bool phy0_dual_route;
bool needs_phy2_siddq;
+ bool siddq_in_base;
+ bool poll_vbusen;
int missing_phys;
};
@@ -252,7 +241,8 @@ static void sun4i_usb_phy_passby(struct sun4i_usb_phy *phy, int enable)
SUNXI_AHB_INCRX_ALIGN_EN | SUNXI_ULPI_BYPASS_EN;
/* A83T USB2 is HSIC */
- if (phy_data->cfg->type == sun8i_a83t_phy && phy->index == 2)
+ if (phy_data->cfg->hsic_index &&
+ phy->index == phy_data->cfg->hsic_index)
bits |= SUNXI_EHCI_HS_FORCE | SUNXI_HSIC_CONNECT_INT |
SUNXI_HSIC;
@@ -340,8 +330,7 @@ static int sun4i_usb_phy_init(struct phy *_phy)
writel(val, phy->pmu + REG_HCI_PHY_CTL);
}
- if (data->cfg->type == sun8i_a83t_phy ||
- data->cfg->type == sun50i_h6_phy) {
+ if (data->cfg->siddq_in_base) {
if (phy->index == 0) {
val = readl(data->base + data->cfg->phyctl_offset);
val |= PHY_CTL_VBUSVLDEXT;
@@ -385,8 +374,7 @@ static int sun4i_usb_phy_exit(struct phy *_phy)
struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
if (phy->index == 0) {
- if (data->cfg->type == sun8i_a83t_phy ||
- data->cfg->type == sun50i_h6_phy) {
+ if (data->cfg->siddq_in_base) {
void __iomem *phyctl = data->base +
data->cfg->phyctl_offset;
@@ -466,9 +454,8 @@ static bool sun4i_usb_phy0_poll(struct sun4i_usb_phy_data *data)
* vbus using the N_VBUSEN pin on the pmic, so we must poll
* when using the pmic for vbus-det _and_ we're driving vbus.
*/
- if ((data->cfg->type == sun6i_a31_phy ||
- data->cfg->type == sun8i_a33_phy) &&
- data->vbus_power_supply && data->phys[0].regulator_on)
+ if (data->cfg->poll_vbusen && data->vbus_power_supply &&
+ data->phys[0].regulator_on)
return true;
return false;
@@ -918,9 +905,15 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
return 0;
}
+static const struct sun4i_usb_phy_cfg suniv_f1c100s_cfg = {
+ .num_phys = 1,
+ .disc_thresh = 3,
+ .phyctl_offset = REG_PHYCTL_A10,
+ .dedicated_clocks = true,
+};
+
static const struct sun4i_usb_phy_cfg sun4i_a10_cfg = {
.num_phys = 3,
- .type = sun4i_a10_phy,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
@@ -928,7 +921,6 @@ static const struct sun4i_usb_phy_cfg sun4i_a10_cfg = {
static const struct sun4i_usb_phy_cfg sun5i_a13_cfg = {
.num_phys = 2,
- .type = sun4i_a10_phy,
.disc_thresh = 2,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
@@ -936,15 +928,14 @@ static const struct sun4i_usb_phy_cfg sun5i_a13_cfg = {
static const struct sun4i_usb_phy_cfg sun6i_a31_cfg = {
.num_phys = 3,
- .type = sun6i_a31_phy,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
+ .poll_vbusen = true,
};
static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = {
.num_phys = 3,
- .type = sun4i_a10_phy,
.disc_thresh = 2,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = false,
@@ -952,31 +943,30 @@ static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = {
static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = {
.num_phys = 2,
- .type = sun6i_a31_phy,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A10,
.dedicated_clocks = true,
+ .poll_vbusen = true,
};
static const struct sun4i_usb_phy_cfg sun8i_a33_cfg = {
.num_phys = 2,
- .type = sun8i_a33_phy,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
+ .poll_vbusen = true,
};
static const struct sun4i_usb_phy_cfg sun8i_a83t_cfg = {
.num_phys = 3,
.hsic_index = 2,
- .type = sun8i_a83t_phy,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
+ .siddq_in_base = true,
};
static const struct sun4i_usb_phy_cfg sun8i_h3_cfg = {
.num_phys = 4,
- .type = sun8i_h3_phy,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -986,7 +976,6 @@ static const struct sun4i_usb_phy_cfg sun8i_h3_cfg = {
static const struct sun4i_usb_phy_cfg sun8i_r40_cfg = {
.num_phys = 3,
- .type = sun8i_r40_phy,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -996,7 +985,6 @@ static const struct sun4i_usb_phy_cfg sun8i_r40_cfg = {
static const struct sun4i_usb_phy_cfg sun8i_v3s_cfg = {
.num_phys = 1,
- .type = sun8i_v3s_phy,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -1006,16 +994,15 @@ static const struct sun4i_usb_phy_cfg sun8i_v3s_cfg = {
static const struct sun4i_usb_phy_cfg sun20i_d1_cfg = {
.num_phys = 2,
- .type = sun50i_h6_phy,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.hci_phy_ctl_clear = PHY_CTL_SIDDQ,
.phy0_dual_route = true,
+ .siddq_in_base = true,
};
static const struct sun4i_usb_phy_cfg sun50i_a64_cfg = {
.num_phys = 2,
- .type = sun50i_a64_phy,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
@@ -1025,22 +1012,22 @@ static const struct sun4i_usb_phy_cfg sun50i_a64_cfg = {
static const struct sun4i_usb_phy_cfg sun50i_h6_cfg = {
.num_phys = 4,
- .type = sun50i_h6_phy,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.phy0_dual_route = true,
.missing_phys = BIT(1) | BIT(2),
+ .siddq_in_base = true,
};
static const struct sun4i_usb_phy_cfg sun50i_h616_cfg = {
.num_phys = 4,
- .type = sun50i_h6_phy,
.disc_thresh = 3,
.phyctl_offset = REG_PHYCTL_A33,
.dedicated_clocks = true,
.phy0_dual_route = true,
.hci_phy_ctl_clear = PHY_CTL_SIDDQ,
.needs_phy2_siddq = true,
+ .siddq_in_base = true,
};
static const struct of_device_id sun4i_usb_phy_of_match[] = {
@@ -1059,6 +1046,8 @@ static const struct of_device_id sun4i_usb_phy_of_match[] = {
.data = &sun50i_a64_cfg},
{ .compatible = "allwinner,sun50i-h6-usb-phy", .data = &sun50i_h6_cfg },
{ .compatible = "allwinner,sun50i-h616-usb-phy", .data = &sun50i_h616_cfg },
+ { .compatible = "allwinner,suniv-f1c100s-usb-phy",
+ .data = &suniv_f1c100s_cfg },
{ },
};
MODULE_DEVICE_TABLE(of, sun4i_usb_phy_of_match);
diff --git a/drivers/phy/mediatek/phy-mtk-io.h b/drivers/phy/mediatek/phy-mtk-io.h
index d20ad5e5be81..58f06db822cb 100644
--- a/drivers/phy/mediatek/phy-mtk-io.h
+++ b/drivers/phy/mediatek/phy-mtk-io.h
@@ -39,8 +39,8 @@ static inline void mtk_phy_update_bits(void __iomem *reg, u32 mask, u32 val)
/* field @mask shall be constant and continuous */
#define mtk_phy_update_field(reg, mask, val) \
({ \
- typeof(mask) mask_ = (mask); \
- mtk_phy_update_bits(reg, mask_, FIELD_PREP(mask_, val)); \
+ BUILD_BUG_ON_MSG(!__builtin_constant_p(mask), "mask is not constant"); \
+ mtk_phy_update_bits(reg, mask, FIELD_PREP(mask, val)); \
})
#endif
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
index ce511ad5d369..5487b9dd1ead 100644
--- a/drivers/phy/phy-can-transceiver.c
+++ b/drivers/phy/phy-can-transceiver.c
@@ -84,6 +84,10 @@ static const struct of_device_id can_transceiver_phy_ids[] = {
.compatible = "ti,tcan1043",
.data = &tcan1043_drvdata
},
+ {
+ .compatible = "nxp,tjr1443",
+ .data = &tcan1043_drvdata
+ },
{ }
};
MODULE_DEVICE_TABLE(of, can_transceiver_phy_ids);
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index d93ddf1262c5..9951efc03eaa 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -767,27 +767,6 @@ struct phy *phy_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(phy_get);
/**
- * phy_optional_get() - lookup and obtain a reference to an optional phy.
- * @dev: device that requests this phy
- * @string: the phy name as given in the dt data or the name of the controller
- * port for non-dt case
- *
- * Returns the phy driver, after getting a refcount to it; or
- * NULL if there is no such phy. The caller is responsible for
- * calling phy_put() to release that count.
- */
-struct phy *phy_optional_get(struct device *dev, const char *string)
-{
- struct phy *phy = phy_get(dev, string);
-
- if (PTR_ERR(phy) == -ENODEV)
- phy = NULL;
-
- return phy;
-}
-EXPORT_SYMBOL_GPL(phy_optional_get);
-
-/**
* devm_phy_get() - lookup and obtain a reference to a phy.
* @dev: device that requests this phy
* @string: the phy name as given in the dt data or phy device name
@@ -880,6 +859,36 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
EXPORT_SYMBOL_GPL(devm_of_phy_get);
/**
+ * devm_of_phy_optional_get() - lookup and obtain a reference to an optional
+ * phy.
+ * @dev: device that requests this phy
+ * @np: node containing the phy
+ * @con_id: name of the phy from device's point of view
+ *
+ * Gets the phy using of_phy_get(), and associates a device with it using
+ * devres. On driver detach, release function is invoked on the devres data,
+ * then, devres data is freed. This differs to devm_of_phy_get() in
+ * that if the phy does not exist, it is not considered an error and
+ * -ENODEV will not be returned. Instead the NULL phy is returned,
+ * which can be passed to all other phy consumer calls.
+ */
+struct phy *devm_of_phy_optional_get(struct device *dev, struct device_node *np,
+ const char *con_id)
+{
+ struct phy *phy = devm_of_phy_get(dev, np, con_id);
+
+ if (PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ if (IS_ERR(phy))
+ dev_err_probe(dev, PTR_ERR(phy), "failed to get PHY %pOF:%s",
+ np, con_id);
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_of_phy_optional_get);
+
+/**
* devm_of_phy_get_by_index() - lookup and obtain a reference to a phy by index.
* @dev: device that requests this phy
* @np: node containing the phy
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index eb9ddc685b38..4850d48f31fa 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -50,14 +50,56 @@ config PHY_QCOM_PCIE2
Enable this to support the Qualcomm PCIe PHY, used with the Synopsys
based PCIe controller.
-config PHY_QCOM_QMP
- tristate "Qualcomm QMP PHY Driver"
+menuconfig PHY_QCOM_QMP
+ tristate "Qualcomm QMP PHY Drivers"
depends on OF && COMMON_CLK && (ARCH_QCOM || COMPILE_TEST)
+
+if PHY_QCOM_QMP
+
+config PHY_QCOM_QMP_COMBO
+ tristate "Qualcomm QMP Combo PHY Driver"
+ default PHY_QCOM_QMP
select GENERIC_PHY
select MFD_SYSCON
help
- Enable this to support the QMP PHY transceiver that is used
- with controllers such as PCIe, UFS, and USB on Qualcomm chips.
+ Enable this to support the QMP Combo PHY transceiver that is used
+ with USB3 and DisplayPort controllers on Qualcomm chips.
+
+config PHY_QCOM_QMP_PCIE
+ tristate "Qualcomm QMP PCIe PHY Driver"
+ depends on PCI || COMPILE_TEST
+ select GENERIC_PHY
+ default PHY_QCOM_QMP
+ help
+ Enable this to support the QMP PCIe PHY transceiver that is used
+ with PCIe controllers on Qualcomm chips.
+
+config PHY_QCOM_QMP_PCIE_8996
+ tristate "Qualcomm QMP PCIe 8996 PHY Driver"
+ depends on PCI || COMPILE_TEST
+ select GENERIC_PHY
+ default PHY_QCOM_QMP
+ help
+ Enable this to support the QMP PCIe PHY transceiver that is used
+ with PCIe controllers on Qualcomm msm8996 chips.
+
+config PHY_QCOM_QMP_UFS
+ tristate "Qualcomm QMP UFS PHY Driver"
+ select GENERIC_PHY
+ default PHY_QCOM_QMP
+ help
+ Enable this to support the QMP UFS PHY transceiver that is used
+ with UFS controllers on Qualcomm chips.
+
+config PHY_QCOM_QMP_USB
+ tristate "Qualcomm QMP USB PHY Driver"
+ select GENERIC_PHY
+ default PHY_QCOM_QMP
+ help
+ Enable this to support the QMP USB PHY transceiver that is used
+ with USB3 controllers on Qualcomm chips.
+
+endif # PHY_QCOM_QMP
config PHY_QCOM_QUSB2
tristate "Qualcomm QUSB2 PHY Driver"
@@ -70,6 +112,24 @@ config PHY_QCOM_QUSB2
PHY which is usually paired with either the ChipIdea or Synopsys DWC3
USB IPs on MSM SOCs.
+config PHY_QCOM_SNPS_EUSB2
+ tristate "Qualcomm SNPS eUSB2 PHY Driver"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable support for the USB high-speed SNPS eUSB2 phy on Qualcomm
+ chipsets. The PHY is paired with a Synopsys DWC3 USB controller
+ on Qualcomm SOCs.
+
+config PHY_QCOM_EUSB2_REPEATER
+ tristate "Qualcomm SNPS eUSB2 Repeater Driver"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ Enable support for the USB high-speed SNPS eUSB2 repeater on Qualcomm
+ PMICs. The repeater is paired with a Synopsys eUSB2 Phy
+ on Qualcomm SOCs.
+
config PHY_QCOM_USB_HS
tristate "Qualcomm USB HS PHY module"
depends on USB_ULPI_BUS
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 65f6c30a3e93..de3dc9ccf067 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -5,14 +5,16 @@ obj-$(CONFIG_PHY_QCOM_EDP) += phy-qcom-edp.o
obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
-obj-$(CONFIG_PHY_QCOM_QMP) += \
- phy-qcom-qmp-combo.o \
- phy-qcom-qmp-pcie.o \
- phy-qcom-qmp-pcie-msm8996.o \
- phy-qcom-qmp-ufs.o \
- phy-qcom-qmp-usb.o
+
+obj-$(CONFIG_PHY_QCOM_QMP_COMBO) += phy-qcom-qmp-combo.o
+obj-$(CONFIG_PHY_QCOM_QMP_PCIE) += phy-qcom-qmp-pcie.o
+obj-$(CONFIG_PHY_QCOM_QMP_PCIE_8996) += phy-qcom-qmp-pcie-msm8996.o
+obj-$(CONFIG_PHY_QCOM_QMP_UFS) += phy-qcom-qmp-ufs.o
+obj-$(CONFIG_PHY_QCOM_QMP_USB) += phy-qcom-qmp-usb.o
obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
+obj-$(CONFIG_PHY_QCOM_SNPS_EUSB2) += phy-qcom-snps-eusb2.o
+obj-$(CONFIG_PHY_QCOM_EUSB2_REPEATER) += phy-qcom-eusb2-repeater.o
obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
obj-$(CONFIG_PHY_QCOM_USB_HS_28NM) += phy-qcom-usb-hs-28nm.o
diff --git a/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
new file mode 100644
index 000000000000..3f265ac2df20
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-eusb2-repeater.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+
+/* eUSB2 status registers */
+#define EUSB2_RPTR_STATUS 0x08
+#define RPTR_OK BIT(7)
+
+/* eUSB2 control registers */
+#define EUSB2_EN_CTL1 0x46
+#define EUSB2_RPTR_EN BIT(7)
+
+#define EUSB2_FORCE_EN_5 0xe8
+#define F_CLK_19P2M_EN BIT(6)
+
+#define EUSB2_FORCE_VAL_5 0xeD
+#define V_CLK_19P2M_EN BIT(6)
+
+#define EUSB2_TUNE_IUSB2 0x51
+#define EUSB2_TUNE_SQUELCH_U 0x54
+#define EUSB2_TUNE_USB2_PREEM 0x57
+
+#define QCOM_EUSB2_REPEATER_INIT_CFG(o, v) \
+ { \
+ .offset = o, \
+ .val = v, \
+ }
+
+struct eusb2_repeater_init_tbl {
+ unsigned int offset;
+ unsigned int val;
+};
+
+struct eusb2_repeater_cfg {
+ const struct eusb2_repeater_init_tbl *init_tbl;
+ int init_tbl_num;
+ const char * const *vreg_list;
+ int num_vregs;
+};
+
+struct eusb2_repeater {
+ struct device *dev;
+ struct regmap *regmap;
+ struct phy *phy;
+ struct regulator_bulk_data *vregs;
+ const struct eusb2_repeater_cfg *cfg;
+ u16 base;
+ enum phy_mode mode;
+};
+
+static const char * const pm8550b_vreg_l[] = {
+ "vdd18", "vdd3",
+};
+
+static const struct eusb2_repeater_init_tbl pm8550b_init_tbl[] = {
+ QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_IUSB2, 0x8),
+ QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_SQUELCH_U, 0x3),
+ QCOM_EUSB2_REPEATER_INIT_CFG(EUSB2_TUNE_USB2_PREEM, 0x5),
+};
+
+static const struct eusb2_repeater_cfg pm8550b_eusb2_cfg = {
+ .init_tbl = pm8550b_init_tbl,
+ .init_tbl_num = ARRAY_SIZE(pm8550b_init_tbl),
+ .vreg_list = pm8550b_vreg_l,
+ .num_vregs = ARRAY_SIZE(pm8550b_vreg_l),
+};
+
+static int eusb2_repeater_init_vregs(struct eusb2_repeater *rptr)
+{
+ int num = rptr->cfg->num_vregs;
+ struct device *dev = rptr->dev;
+ int i;
+
+ rptr->vregs = devm_kcalloc(dev, num, sizeof(*rptr->vregs), GFP_KERNEL);
+ if (!rptr->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < num; i++)
+ rptr->vregs[i].supply = rptr->cfg->vreg_list[i];
+
+ return devm_regulator_bulk_get(dev, num, rptr->vregs);
+}
+
+static int eusb2_repeater_init(struct phy *phy)
+{
+ struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+ const struct eusb2_repeater_init_tbl *init_tbl = rptr->cfg->init_tbl;
+ int num = rptr->cfg->init_tbl_num;
+ u32 val;
+ int ret;
+ int i;
+
+ ret = regulator_bulk_enable(rptr->cfg->num_vregs, rptr->vregs);
+ if (ret)
+ return ret;
+
+ regmap_update_bits(rptr->regmap, rptr->base + EUSB2_EN_CTL1,
+ EUSB2_RPTR_EN, EUSB2_RPTR_EN);
+
+ for (i = 0; i < num; i++)
+ regmap_update_bits(rptr->regmap,
+ rptr->base + init_tbl[i].offset,
+ init_tbl[i].val, init_tbl[i].val);
+
+ ret = regmap_read_poll_timeout(rptr->regmap,
+ rptr->base + EUSB2_RPTR_STATUS, val,
+ val & RPTR_OK, 10, 5);
+ if (ret)
+ dev_err(rptr->dev, "initialization timed-out\n");
+
+ return ret;
+}
+
+static int eusb2_repeater_set_mode(struct phy *phy,
+ enum phy_mode mode, int submode)
+{
+ struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+
+ switch (mode) {
+ case PHY_MODE_USB_HOST:
+ /*
+ * CM.Lx is prohibited when repeater is already into Lx state as
+ * per eUSB 1.2 Spec. Below implement software workaround until
+ * PHY and controller is fixing seen observation.
+ */
+ regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
+ F_CLK_19P2M_EN, F_CLK_19P2M_EN);
+ regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
+ V_CLK_19P2M_EN, V_CLK_19P2M_EN);
+ break;
+ case PHY_MODE_USB_DEVICE:
+ /*
+ * In device mode clear host mode related workaround as there
+ * is no repeater reset available, and enable/disable of
+ * repeater doesn't clear previous value due to shared
+ * regulators (say host <-> device mode switch).
+ */
+ regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_EN_5,
+ F_CLK_19P2M_EN, 0);
+ regmap_update_bits(rptr->regmap, rptr->base + EUSB2_FORCE_VAL_5,
+ V_CLK_19P2M_EN, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int eusb2_repeater_exit(struct phy *phy)
+{
+ struct eusb2_repeater *rptr = phy_get_drvdata(phy);
+
+ return regulator_bulk_disable(rptr->cfg->num_vregs, rptr->vregs);
+}
+
+static const struct phy_ops eusb2_repeater_ops = {
+ .init = eusb2_repeater_init,
+ .exit = eusb2_repeater_exit,
+ .set_mode = eusb2_repeater_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int eusb2_repeater_probe(struct platform_device *pdev)
+{
+ struct eusb2_repeater *rptr;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct device_node *np = dev->of_node;
+ u32 res;
+ int ret;
+
+ rptr = devm_kzalloc(dev, sizeof(*rptr), GFP_KERNEL);
+ if (!rptr)
+ return -ENOMEM;
+
+ rptr->dev = dev;
+ dev_set_drvdata(dev, rptr);
+
+ rptr->cfg = of_device_get_match_data(dev);
+ if (!rptr->cfg)
+ return -EINVAL;
+
+ rptr->regmap = dev_get_regmap(dev->parent, NULL);
+ if (!rptr->regmap)
+ return -ENODEV;
+
+ ret = of_property_read_u32(np, "reg", &res);
+ if (ret < 0)
+ return ret;
+
+ rptr->base = res;
+
+ ret = eusb2_repeater_init_vregs(rptr);
+ if (ret < 0) {
+ dev_err(dev, "unable to get supplies\n");
+ return ret;
+ }
+
+ rptr->phy = devm_phy_create(dev, np, &eusb2_repeater_ops);
+ if (IS_ERR(rptr->phy)) {
+ dev_err(dev, "failed to create PHY: %d\n", ret);
+ return PTR_ERR(rptr->phy);
+ }
+
+ phy_set_drvdata(rptr->phy, rptr);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ dev_info(dev, "Registered Qcom-eUSB2 repeater\n");
+
+ return 0;
+}
+
+static int eusb2_repeater_remove(struct platform_device *pdev)
+{
+ struct eusb2_repeater *rptr = platform_get_drvdata(pdev);
+
+ if (!rptr)
+ return 0;
+
+ eusb2_repeater_exit(rptr->phy);
+
+ return 0;
+}
+
+static const struct of_device_id eusb2_repeater_of_match_table[] = {
+ {
+ .compatible = "qcom,pm8550b-eusb2-repeater",
+ .data = &pm8550b_eusb2_cfg,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, eusb2_repeater_of_match_table);
+
+static struct platform_driver eusb2_repeater_driver = {
+ .probe = eusb2_repeater_probe,
+ .remove = eusb2_repeater_remove,
+ .driver = {
+ .name = "qcom-eusb2-repeater",
+ .of_match_table = eusb2_repeater_of_match_table,
+ },
+};
+
+module_platform_driver(eusb2_repeater_driver);
+
+MODULE_DESCRIPTION("Qualcomm PMIC eUSB2 Repeater driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/qualcomm/phy-qcom-pcie2.c b/drivers/phy/qualcomm/phy-qcom-pcie2.c
index 5407e59bb185..11a2bb958681 100644
--- a/drivers/phy/qualcomm/phy-qcom-pcie2.c
+++ b/drivers/phy/qualcomm/phy-qcom-pcie2.c
@@ -243,7 +243,11 @@ static int phy_pipe_clksrc_register(struct qcom_phy *qphy)
fixed->fixed_rate = 250000000;
fixed->hw.init = &init;
- return devm_clk_hw_register(qphy->dev, &fixed->hw);
+ ret = devm_clk_hw_register(qphy->dev, &fixed->hw);
+ if (ret < 0)
+ return ret;
+
+ return devm_of_clk_add_hw_provider(qphy->dev, of_clk_hw_simple_get, &fixed->hw);
}
static int qcom_pcie2_phy_probe(struct platform_device *pdev)
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
index 77052c66cf70..c1483e157af4 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c
@@ -23,6 +23,10 @@
#include <dt-bindings/phy/phy-qcom-qmp.h>
#include "phy-qcom-qmp.h"
+#include "phy-qcom-qmp-pcs-misc-v3.h"
+#include "phy-qcom-qmp-pcs-usb-v4.h"
+#include "phy-qcom-qmp-pcs-usb-v5.h"
+#include "phy-qcom-qmp-pcs-usb-v6.h"
/* QPHY_SW_RESET bit */
#define SW_RESET BIT(0)
@@ -56,9 +60,6 @@
/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
#define IRQ_CLEAR BIT(0)
-/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
-#define RCVR_DETECT BIT(0)
-
/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
@@ -96,31 +97,29 @@ enum qphy_reg_layout {
QPHY_PCS_STATUS,
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
- QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
QPHY_PCS_POWER_DOWN_CONTROL,
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_STATUS] = 0x174,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
- [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
+ [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
};
static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x44,
- [QPHY_PCS_STATUS] = 0x14,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+ [QPHY_SW_RESET] = QPHY_V4_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V4_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V4_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V4_PCS_POWER_DOWN_CONTROL,
/* In PCS_USB */
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x008,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x014,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
};
static const struct qmp_phy_init_tbl qmp_v3_usb3_serdes_tbl[] = {
@@ -311,6 +310,70 @@ static const struct qmp_phy_init_tbl qmp_v3_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
};
+static const struct qmp_phy_init_tbl sm6350_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x77),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_OFFSET_ADAPTOR_CNTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x75),
+};
+
+static const struct qmp_phy_init_tbl sm6350_usb3_pcs_tbl[] = {
+ /* FLL settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL2, 0x83),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_L, 0x09),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNT_VAL_H_TOL, 0xa2),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_MAN_CODE, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_FLL_CNTRL1, 0x02),
+
+ /* Lock Det settings */
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG1, 0xd1),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG2, 0x1f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LOCK_DETECT_CONFIG3, 0x47),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_POWER_STATE_CONFIG2, 0x1b),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0xcc),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V0, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V1, 0x9f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V2, 0xb7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V3, 0x4e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_V4, 0x65),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXMGN_LS, 0x6b),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V0, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V1, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V1, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V2, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V2, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V3, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V3, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_V4, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_V4, 0x0d),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M6DB_LS, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TXDEEMPH_M3P5DB_LS, 0x0d),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RATE_SLEW_CNTRL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TSYNC_RSYNC_TIME, 0x44),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_PWRUP_RESET_DLY_TIME_AUXCLK, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_WAIT_TIME, 0x75),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_TX_ECSTART_EQTLOCK, 0x86),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RXEQTRAINING_RUN_TIME, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_LFPS_DET_HIGH_COUNT_VAL, 0x04),
+
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
+};
+
static const struct qmp_phy_init_tbl sm8150_usb3_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_EN_CENTER, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SSC_PER1, 0x31),
@@ -494,6 +557,223 @@ static const struct qmp_phy_init_tbl sm8250_usb3_pcs_usb_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
};
+static const struct qmp_phy_init_tbl sm8350_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x35),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_5, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x21),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_GAIN2, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xbb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0x7b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbb),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3d, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3c, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xd2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa9),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VTH_CODE, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG1, 0xd0),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_RX_SIGDET_LVL, 0xaa),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_EQ_CONFIG5, 0x10),
+};
+
+static const struct qmp_phy_init_tbl sm8350_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8550_usb3_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MSB_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE1, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MSB_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE1_MODE0, 0x25),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE2_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_1, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_2, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_3, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_ADDITIONAL_MISC, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8550_usb3_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_TX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_RX, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_RX, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_1, 0xf5),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_5, 0x5f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RCV_DETECT_LVL_2, 0x12),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_TX_PI_QEC_CTRL, 0x21, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_TX_PI_QEC_CTRL, 0x05, 2),
+};
+
+static const struct qmp_phy_init_tbl sm8550_usb3_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FO_GAIN, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SO_GAIN, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FASTLOCK_FO_GAIN, 0x2f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FASTLOCK_COUNT_LOW, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_PI_CONTROLS, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_GAIN1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_GAIN2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_AUX_DATA_TCOARSE_TFINE, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_VGA_CAL_CNTRL1, 0x54),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_GM_CAL, 0x13),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_IDAC_TSETTLE_LOW, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_IDAC_TSETTLE_HIGH, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x47),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_CNTRL, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_DEGLITCH_CNTRL, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH2, 0x9c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH3, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH4, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_DFE_EN_TIMER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_DCC_CTRL1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_VTH_CODE, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_CAL_CTRL1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_CAL_TRIM, 0x08),
+
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_RX_RX_MODE_00_LOW, 0x3f, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_RX_RX_MODE_00_HIGH, 0xbf, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_RX_RX_MODE_00_HIGH2, 0xff, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_RX_RX_MODE_00_HIGH3, 0xdf, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_RX_RX_MODE_00_HIGH4, 0xed, 1),
+
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_RX_RX_MODE_00_LOW, 0xbf, 2),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_RX_RX_MODE_00_HIGH, 0xbf, 2),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_RX_RX_MODE_00_HIGH2, 0xbf, 2),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_RX_RX_MODE_00_HIGH3, 0xdf, 2),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V6_RX_RX_MODE_00_HIGH4, 0xfd, 2),
+};
+
+static const struct qmp_phy_init_tbl sm8550_usb3_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG1, 0xc4),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG2, 0x89),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG3, 0x20),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG6, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_REFGEN_REQ_CONFIG1, 0x21),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_RX_SIGDET_LVL, 0x99),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_L, 0xe7),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_H, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_CDR_RESET_TIME, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_ALIGN_DETECT_CONFIG1, 0x88),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_ALIGN_DETECT_CONFIG2, 0x13),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_PCS_TX_RX_CONFIG, 0x0c),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG1, 0x4b),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_EQ_CONFIG5, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1, 0x68),
+};
+
+static const struct qmp_phy_init_tbl sm8550_usb3_pcs_usb_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL, 0xf8),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L, 0x40),
+ QMP_PHY_INIT_CFG(QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_H, 0x00),
+};
+
static const struct qmp_phy_init_tbl qmp_v4_dp_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SVS_MODE_CLK_SEL, 0x05),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0x3b),
@@ -600,6 +880,20 @@ static const struct qmp_phy_init_tbl qmp_v5_dp_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_CORE_CLK_EN, 0x1f),
};
+static const struct qmp_phy_init_tbl qmp_v5_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_INTERFACE_SELECT, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_TRAN_DRVR_EMP_EN, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_TX_INTERFACE_MODE, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_TX_BAND, 0x04),
+};
+
static const struct qmp_phy_init_tbl qmp_v5_5nm_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_LANE_MODE_3, 0x51),
QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_TRANSCEIVER_BIAS_EN, 0x1a),
@@ -615,6 +909,91 @@ static const struct qmp_phy_init_tbl qmp_v5_5nm_dp_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_5NM_TX_TX_BAND, 0x01),
};
+static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SVS_MODE_CLK_SEL, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_BUF_ENABLE, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_dp_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_VMODE_CTRL1, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_INTERFACE_SELECT, 0x3b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_CLKBUF_ENABLE, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RESET_TSYNC_EN, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_TRAN_DRVR_EMP_EN, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_TX_INTERFACE_MODE, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_RX, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_TX_BAND, 0x4),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_rbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x37),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_hbr[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_hbr2[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x10),
+};
+
+static const struct qmp_phy_init_tbl qmp_v6_dp_serdes_tbl_hbr3[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0x71),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x0c),
+};
+
static const struct qmp_phy_init_tbl sc8280xp_usb43dp_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
@@ -768,6 +1147,27 @@ static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = {
{ 0x1f, 0xff, 0xff, 0xff }
};
+static const u8 qmp_dp_v4_pre_emphasis_hbr3_hbr2[4][4] = {
+ { 0x00, 0x0c, 0x15, 0x1b },
+ { 0x02, 0x0e, 0x16, 0xff },
+ { 0x02, 0x11, 0xff, 0xff },
+ { 0x04, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v4_pre_emphasis_hbr_rbr[4][4] = {
+ { 0x00, 0x0d, 0x14, 0x1a },
+ { 0x00, 0x0e, 0x15, 0xff },
+ { 0x00, 0x0d, 0xff, 0xff },
+ { 0x03, 0xff, 0xff, 0xff }
+};
+
+static const u8 qmp_dp_v4_voltage_swing_hbr_rbr[4][4] = {
+ { 0x08, 0x0f, 0x16, 0x1f },
+ { 0x11, 0x1e, 0x1f, 0xff },
+ { 0x16, 0x1f, 0xff, 0xff },
+ { 0x1f, 0xff, 0xff, 0xff }
+};
+
static const u8 qmp_dp_v5_pre_emphasis_hbr3_hbr2[4][4] = {
{ 0x20, 0x2c, 0x35, 0x3b },
{ 0x22, 0x2e, 0x36, 0xff },
@@ -796,6 +1196,13 @@ static const u8 qmp_dp_v5_voltage_swing_hbr_rbr[4][4] = {
{ 0x3f, 0xff, 0xff, 0xff }
};
+static const u8 qmp_dp_v6_pre_emphasis_hbr_rbr[4][4] = {
+ { 0x20, 0x2d, 0x34, 0x3a },
+ { 0x20, 0x2e, 0x35, 0xff },
+ { 0x20, 0x2e, 0xff, 0xff },
+ { 0x22, 0xff, 0xff, 0xff }
+};
+
struct qmp_combo;
struct qmp_combo_offsets {
@@ -809,6 +1216,8 @@ struct qmp_combo_offsets {
u16 usb3_pcs;
u16 usb3_pcs_usb;
u16 dp_serdes;
+ u16 dp_txa;
+ u16 dp_txb;
u16 dp_dp_phy;
};
@@ -928,6 +1337,9 @@ static int qmp_v4_calibrate_dp_phy(struct qmp_combo *qmp);
static int qmp_v5_configure_dp_phy(struct qmp_combo *qmp);
+static void qmp_v6_dp_aux_init(struct qmp_combo *qmp);
+static int qmp_v6_configure_dp_phy(struct qmp_combo *qmp);
+
static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
{
u32 reg;
@@ -975,6 +1387,21 @@ static const char * const sc7180_usb3phy_reset_l[] = {
"phy",
};
+static const struct qmp_combo_offsets qmp_combo_offsets_v3 = {
+ .com = 0x0000,
+ .txa = 0x1200,
+ .rxa = 0x1400,
+ .txb = 0x1600,
+ .rxb = 0x1800,
+ .usb3_serdes = 0x1000,
+ .usb3_pcs_misc = 0x1a00,
+ .usb3_pcs = 0x1c00,
+ .dp_serdes = 0x2000,
+ .dp_txa = 0x2200,
+ .dp_txb = 0x2600,
+ .dp_dp_phy = 0x2a00,
+};
+
static const struct qmp_combo_offsets qmp_combo_offsets_v5 = {
.com = 0x0000,
.txa = 0x0400,
@@ -989,6 +1416,22 @@ static const struct qmp_combo_offsets qmp_combo_offsets_v5 = {
.dp_dp_phy = 0x2200,
};
+static const struct qmp_combo_offsets qmp_combo_offsets_v6 = {
+ .com = 0x0000,
+ .txa = 0x1200,
+ .rxa = 0x1400,
+ .txb = 0x1600,
+ .rxb = 0x1800,
+ .usb3_serdes = 0x1000,
+ .usb3_pcs_misc = 0x1a00,
+ .usb3_pcs = 0x1c00,
+ .usb3_pcs_usb = 0x1f00,
+ .dp_serdes = 0x2000,
+ .dp_txa = 0x2200,
+ .dp_txb = 0x2600,
+ .dp_dp_phy = 0x2a00,
+};
+
static const struct qmp_phy_cfg sc7180_usb3dpphy_cfg = {
.serdes_tbl = qmp_v3_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
@@ -1172,6 +1615,51 @@ static const struct qmp_phy_cfg sc8280xp_usb43dpphy_cfg = {
.regs = qmp_v4_usb3phy_regs_layout,
};
+static const struct qmp_phy_cfg sm6350_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
+ .serdes_tbl = qmp_v3_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(qmp_v3_usb3_serdes_tbl),
+ .tx_tbl = qmp_v3_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(qmp_v3_usb3_tx_tbl),
+ .rx_tbl = sm6350_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm6350_usb3_rx_tbl),
+ .pcs_tbl = sm6350_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm6350_usb3_pcs_tbl),
+
+ .dp_serdes_tbl = qmp_v3_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v3_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v3_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v3_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v3_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v3_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v3_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v3_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v3_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v3_pre_emphasis_hbr3_hbr2,
+
+ .dp_aux_init = qmp_v3_dp_aux_init,
+ .configure_dp_tx = qmp_v3_configure_dp_tx,
+ .configure_dp_phy = qmp_v3_configure_dp_phy,
+ .calibrate_dp_phy = qmp_v3_calibrate_dp_phy,
+
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v3_usb3phy_regs_layout,
+};
+
static const struct qmp_phy_cfg sm8250_usb3dpphy_cfg = {
.serdes_tbl = sm8150_usb3_serdes_tbl,
.serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
@@ -1220,6 +1708,102 @@ static const struct qmp_phy_cfg sm8250_usb3dpphy_cfg = {
.has_pwrdn_delay = true,
};
+static const struct qmp_phy_cfg sm8350_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v3,
+
+ .serdes_tbl = sm8150_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8150_usb3_serdes_tbl),
+ .tx_tbl = sm8350_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_usb3_tx_tbl),
+ .rx_tbl = sm8350_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_usb3_rx_tbl),
+ .pcs_tbl = sm8350_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8350_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8350_usb3_pcs_usb_tbl),
+
+ .dp_serdes_tbl = qmp_v4_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v5_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v5_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v4_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v4_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v4_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v4_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v4_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v4_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v4_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v3_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v4_pre_emphasis_hbr3_hbr2,
+
+ .dp_aux_init = qmp_v4_dp_aux_init,
+ .configure_dp_tx = qmp_v4_configure_dp_tx,
+ .configure_dp_phy = qmp_v4_configure_dp_phy,
+ .calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
+
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = qmp_v4_usb3phy_regs_layout,
+
+ .has_pwrdn_delay = true,
+};
+
+static const struct qmp_phy_cfg sm8550_usb3dpphy_cfg = {
+ .offsets = &qmp_combo_offsets_v6,
+
+ .serdes_tbl = sm8550_usb3_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8550_usb3_serdes_tbl),
+ .tx_tbl = sm8550_usb3_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8550_usb3_tx_tbl),
+ .rx_tbl = sm8550_usb3_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8550_usb3_rx_tbl),
+ .pcs_tbl = sm8550_usb3_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_tbl),
+ .pcs_usb_tbl = sm8550_usb3_pcs_usb_tbl,
+ .pcs_usb_tbl_num = ARRAY_SIZE(sm8550_usb3_pcs_usb_tbl),
+
+ .dp_serdes_tbl = qmp_v6_dp_serdes_tbl,
+ .dp_serdes_tbl_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl),
+ .dp_tx_tbl = qmp_v6_dp_tx_tbl,
+ .dp_tx_tbl_num = ARRAY_SIZE(qmp_v6_dp_tx_tbl),
+
+ .serdes_tbl_rbr = qmp_v6_dp_serdes_tbl_rbr,
+ .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_rbr),
+ .serdes_tbl_hbr = qmp_v6_dp_serdes_tbl_hbr,
+ .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr),
+ .serdes_tbl_hbr2 = qmp_v6_dp_serdes_tbl_hbr2,
+ .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr2),
+ .serdes_tbl_hbr3 = qmp_v6_dp_serdes_tbl_hbr3,
+ .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v6_dp_serdes_tbl_hbr3),
+
+ .swing_hbr_rbr = &qmp_dp_v5_voltage_swing_hbr_rbr,
+ .pre_emphasis_hbr_rbr = &qmp_dp_v6_pre_emphasis_hbr_rbr,
+ .swing_hbr3_hbr2 = &qmp_dp_v5_voltage_swing_hbr3_hbr2,
+ .pre_emphasis_hbr3_hbr2 = &qmp_dp_v5_pre_emphasis_hbr3_hbr2,
+
+ .dp_aux_init = qmp_v6_dp_aux_init,
+ .configure_dp_tx = qmp_v4_configure_dp_tx,
+ .configure_dp_phy = qmp_v6_configure_dp_phy,
+ .calibrate_dp_phy = qmp_v4_calibrate_dp_phy,
+
+ .regs = qmp_v4_usb3phy_regs_layout,
+ .clk_list = qmp_v4_phy_clk_l,
+ .num_clks = ARRAY_SIZE(qmp_v4_phy_clk_l),
+ .reset_list = msm8996_usb3phy_reset_l,
+ .num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+};
+
static void qmp_combo_configure_lane(void __iomem *base,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -1530,6 +2114,33 @@ static void qmp_v4_dp_aux_init(struct qmp_combo *qmp)
qmp->dp_dp_phy + QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK);
}
+static void qmp_v6_dp_aux_init(struct qmp_combo *qmp)
+{
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_PSR_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ qmp->dp_dp_phy + QSERDES_DP_PHY_PD_CTL);
+
+ /* Turn on BIAS current for PHY/PLL */
+ writel(0x17, qmp->dp_serdes + QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN);
+
+ writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG0);
+ writel(0x13, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG1);
+ writel(0xa4, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG2);
+ writel(0x00, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG3);
+ writel(0x0a, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG4);
+ writel(0x26, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG5);
+ writel(0x0a, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG6);
+ writel(0x03, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG7);
+ writel(0xb7, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG8);
+ writel(0x03, qmp->dp_dp_phy + QSERDES_DP_PHY_AUX_CFG9);
+ qmp->dp_aux_cfg = 0;
+
+ writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
+ PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
+ PHY_AUX_REQ_ERR_MASK,
+ qmp->dp_dp_phy + QSERDES_V4_DP_PHY_AUX_INTERRUPT_MASK);
+}
+
static void qmp_v4_configure_dp_tx(struct qmp_combo *qmp)
{
/* Program default values before writing proper values */
@@ -1543,7 +2154,10 @@ static void qmp_v4_configure_dp_tx(struct qmp_combo *qmp)
QSERDES_V4_TX_TX_EMP_POST1_LVL);
}
-static int qmp_v45_configure_dp_phy(struct qmp_combo *qmp)
+static int qmp_v456_configure_dp_phy(struct qmp_combo *qmp,
+ unsigned int com_resetm_ctrl_reg,
+ unsigned int com_c_ready_status_reg,
+ unsigned int dp_phy_status_reg)
{
const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
u32 phy_vco_div, status;
@@ -1590,9 +2204,9 @@ static int qmp_v45_configure_dp_phy(struct qmp_combo *qmp)
writel(0x01, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
writel(0x09, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- writel(0x20, qmp->dp_serdes + QSERDES_V4_COM_RESETSM_CNTRL);
+ writel(0x20, qmp->dp_serdes + com_resetm_ctrl_reg);
- if (readl_poll_timeout(qmp->dp_serdes + QSERDES_V4_COM_C_READY_STATUS,
+ if (readl_poll_timeout(qmp->dp_serdes + com_c_ready_status_reg,
status,
((status & BIT(0)) > 0),
500,
@@ -1615,14 +2229,14 @@ static int qmp_v45_configure_dp_phy(struct qmp_combo *qmp)
writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
- if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V4_DP_PHY_STATUS,
+ if (readl_poll_timeout(qmp->dp_dp_phy + dp_phy_status_reg,
status,
((status & BIT(0)) > 0),
500,
10000))
return -ETIMEDOUT;
- if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V4_DP_PHY_STATUS,
+ if (readl_poll_timeout(qmp->dp_dp_phy + dp_phy_status_reg,
status,
((status & BIT(1)) > 0),
500,
@@ -1640,7 +2254,9 @@ static int qmp_v4_configure_dp_phy(struct qmp_combo *qmp)
u32 status;
int ret;
- ret = qmp_v45_configure_dp_phy(qmp);
+ ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V4_COM_RESETSM_CNTRL,
+ QSERDES_V4_COM_C_READY_STATUS,
+ QSERDES_V4_DP_PHY_STATUS);
if (ret < 0)
return ret;
@@ -1702,7 +2318,9 @@ static int qmp_v5_configure_dp_phy(struct qmp_combo *qmp)
u32 status;
int ret;
- ret = qmp_v45_configure_dp_phy(qmp);
+ ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V4_COM_RESETSM_CNTRL,
+ QSERDES_V4_COM_C_READY_STATUS,
+ QSERDES_V4_DP_PHY_STATUS);
if (ret < 0)
return ret;
@@ -1751,6 +2369,65 @@ static int qmp_v5_configure_dp_phy(struct qmp_combo *qmp)
return 0;
}
+static int qmp_v6_configure_dp_phy(struct qmp_combo *qmp)
+{
+ const struct phy_configure_opts_dp *dp_opts = &qmp->dp_opts;
+ u32 bias0_en, drvr0_en, bias1_en, drvr1_en;
+ bool reverse = false;
+ u32 status;
+ int ret;
+
+ ret = qmp_v456_configure_dp_phy(qmp, QSERDES_V6_COM_RESETSM_CNTRL,
+ QSERDES_V6_COM_C_READY_STATUS,
+ QSERDES_V6_DP_PHY_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (dp_opts->lanes == 1) {
+ bias0_en = reverse ? 0x3e : 0x1a;
+ drvr0_en = reverse ? 0x13 : 0x10;
+ bias1_en = reverse ? 0x15 : 0x3e;
+ drvr1_en = reverse ? 0x10 : 0x13;
+ } else if (dp_opts->lanes == 2) {
+ bias0_en = reverse ? 0x3f : 0x15;
+ drvr0_en = 0x10;
+ bias1_en = reverse ? 0x15 : 0x3f;
+ drvr1_en = 0x10;
+ } else {
+ bias0_en = 0x3f;
+ bias1_en = 0x3f;
+ drvr0_en = 0x10;
+ drvr1_en = 0x10;
+ }
+
+ writel(drvr0_en, qmp->dp_tx + QSERDES_V4_TX_HIGHZ_DRVR_EN);
+ writel(bias0_en, qmp->dp_tx + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
+ writel(drvr1_en, qmp->dp_tx2 + QSERDES_V4_TX_HIGHZ_DRVR_EN);
+ writel(bias1_en, qmp->dp_tx2 + QSERDES_V4_TX_TRANSCEIVER_BIAS_EN);
+
+ writel(0x18, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
+ udelay(2000);
+ writel(0x19, qmp->dp_dp_phy + QSERDES_DP_PHY_CFG);
+
+ if (readl_poll_timeout(qmp->dp_dp_phy + QSERDES_V6_DP_PHY_STATUS,
+ status,
+ ((status & BIT(1)) > 0),
+ 500,
+ 10000))
+ return -ETIMEDOUT;
+
+ writel(0x0a, qmp->dp_tx + QSERDES_V4_TX_TX_POL_INV);
+ writel(0x0a, qmp->dp_tx2 + QSERDES_V4_TX_TX_POL_INV);
+
+ writel(0x27, qmp->dp_tx + QSERDES_V4_TX_TX_DRV_LVL);
+ writel(0x27, qmp->dp_tx2 + QSERDES_V4_TX_TX_DRV_LVL);
+
+ writel(0x20, qmp->dp_tx + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+ writel(0x20, qmp->dp_tx2 + QSERDES_V4_TX_TX_EMP_POST1_LVL);
+
+ return 0;
+}
+
/*
* We need to calibrate the aux setting here as many times
* as the caller tries
@@ -2641,8 +3318,13 @@ static int qmp_combo_parse_dt(struct qmp_combo *qmp)
qmp->pcs_usb = base + offs->usb3_pcs_usb;
qmp->dp_serdes = base + offs->dp_serdes;
- qmp->dp_tx = base + offs->txa;
- qmp->dp_tx2 = base + offs->txb;
+ if (offs->dp_txa) {
+ qmp->dp_tx = base + offs->dp_txa;
+ qmp->dp_tx2 = base + offs->dp_txb;
+ } else {
+ qmp->dp_tx = base + offs->txa;
+ qmp->dp_tx2 = base + offs->txb;
+ }
qmp->dp_dp_phy = base + offs->dp_dp_phy;
qmp->pipe_clk = devm_clk_get(dev, "usb3_pipe");
@@ -2790,9 +3472,25 @@ static const struct of_device_id qmp_combo_of_match_table[] = {
.data = &sdm845_usb3dpphy_cfg,
},
{
+ .compatible = "qcom,sm6350-qmp-usb3-dp-phy",
+ .data = &sm6350_usb3dpphy_cfg,
+ },
+ {
.compatible = "qcom,sm8250-qmp-usb3-dp-phy",
.data = &sm8250_usb3dpphy_cfg,
},
+ {
+ .compatible = "qcom,sm8350-qmp-usb3-dp-phy",
+ .data = &sm8350_usb3dpphy_cfg,
+ },
+ {
+ .compatible = "qcom,sm8450-qmp-usb3-dp-phy",
+ .data = &sm8350_usb3dpphy_cfg,
+ },
+ {
+ .compatible = "qcom,sm8550-qmp-usb3-dp-phy",
+ .data = &sm8550_usb3dpphy_cfg,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, qmp_combo_of_match_table);
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
index a088477e274f..09824be088c9 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
@@ -84,9 +84,9 @@ static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_COM_POWER_DOWN_CONTROL] = 0x404,
[QPHY_COM_START_CONTROL] = 0x408,
[QPHY_COM_PCS_READY_STATUS] = 0x448,
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_STATUS] = 0x174,
+ [QPHY_SW_RESET] = QPHY_V2_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V2_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V2_PCS_PCI_PCS_STATUS,
};
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
index 1b136a87053f..5182aeac43ee 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcie.c
@@ -24,6 +24,14 @@
#include <linux/slab.h>
#include "phy-qcom-qmp.h"
+#include "phy-qcom-qmp-pcs-misc-v3.h"
+#include "phy-qcom-qmp-pcs-pcie-v4.h"
+#include "phy-qcom-qmp-pcs-pcie-v4_20.h"
+#include "phy-qcom-qmp-pcs-pcie-v5.h"
+#include "phy-qcom-qmp-pcs-pcie-v5_20.h"
+#include "phy-qcom-qmp-pcs-pcie-v6.h"
+#include "phy-qcom-qmp-pcs-pcie-v6_20.h"
+#include "phy-qcom-qmp-pcie-qhp.h"
/* QPHY_SW_RESET bit */
#define SW_RESET BIT(0)
@@ -74,25 +82,18 @@ enum qphy_reg_layout {
QPHY_LAYOUT_SIZE
};
-static const unsigned int ipq_pciephy_gen3_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x44,
- [QPHY_PCS_STATUS] = 0x14,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+static const unsigned int pciephy_v2_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V2_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V2_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V2_PCS_PCI_PCS_STATUS,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V2_PCS_POWER_DOWN_CONTROL,
};
-static const unsigned int pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_STATUS] = 0x174,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
-};
-
-static const unsigned int sdm845_qmp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_STATUS] = 0x174,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
+static const unsigned int pciephy_v3_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
};
static const unsigned int sdm845_qhp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
@@ -102,11 +103,18 @@ static const unsigned int sdm845_qhp_pciephy_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
};
-static const unsigned int sm8250_pcie_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x44,
- [QPHY_PCS_STATUS] = 0x14,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+static const unsigned int pciephy_v4_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V4_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V4_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V4_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V4_PCS_POWER_DOWN_CONTROL,
+};
+
+static const unsigned int pciephy_v5_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V5_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V5_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V5_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V5_PCS_POWER_DOWN_CONTROL,
};
static const struct qmp_phy_init_tbl msm8998_pcie_serdes_tbl[] = {
@@ -1216,7 +1224,7 @@ static const struct qmp_phy_init_tbl sdx55_qmp_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2, 0x00),
};
-static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_serdes_tbl[] = {
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x08),
@@ -1250,7 +1258,6 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2),
- QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
@@ -1261,6 +1268,10 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
};
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_rc_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x07),
+};
+
static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x20),
QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x75),
@@ -1269,11 +1280,9 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x04),
};
-static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_rx_tbl[] = {
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3_pcie_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x7f),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0xff),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbf),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3f),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xd8),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xdc),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xdc),
@@ -1281,20 +1290,25 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x34),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa6),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x34),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0x38),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
- QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_TX_ADAPT_POST_THRESH, 0xf0),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_rc_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x07),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x09),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
};
-static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_tbl[] = {
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3_pcie_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x77),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_RATE_SLEW_CNTRL1, 0x0b),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x05),
@@ -1307,6 +1321,40 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_misc_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
};
+static const struct qmp_phy_init_tbl sm8350_qmp_gen3x1_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8350_qmp_gen3x1_pcie_rc_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0),
+};
+
+static const struct qmp_phy_init_tbl sm8350_qmp_gen3x2_pcie_rc_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sm8350_qmp_gen3x2_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_TX_PI_QEC_CTRL, 0x02, 1),
+ QMP_PHY_INIT_CFG_LANE(QSERDES_V5_TX_PI_QEC_CTRL, 0x04, 2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8350_qmp_gen3x2_pcie_rc_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG2, 0x0f),
+};
+
static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
@@ -1458,6 +1506,234 @@ static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_ep_pcs_misc_tbl[] =
QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_MODE2_CONFIG5, 0x08),
};
+static const struct qmp_phy_init_tbl sm8550_qmp_gen3x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0xf8),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0x93),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_ADDITIONAL_MISC_3, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0xa0),
+};
+
+static const struct qmp_phy_init_tbl sm8550_qmp_gen3x2_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_1, 0x15),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_PI_QEC_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_RX, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x18),
+};
+
+static const struct qmp_phy_init_tbl sm8550_qmp_gen3x2_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_GM_CAL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH3, 0xb7),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_HIGH4, 0xea),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_00_LOW, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH2, 0x9c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH3, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_HIGH4, 0x89),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_10_HIGH, 0x94),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_10_HIGH2, 0x5b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_10_HIGH3, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_MODE_10_HIGH4, 0x89),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_TX_ADAPT_POST_THRESH, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_FO_GAIN, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SO_GAIN, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_VGA_CAL_CNTRL2, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIDGET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_RX_IDAC_TSETTLE_LOW, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V6_RX_SIGDET_CAL_TRIM, 0x08),
+};
+
+static const struct qmp_phy_init_tbl sm8550_qmp_gen3x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_REFGEN_REQ_CONFIG1, 0x05),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_RX_SIGDET_LVL, 0x77),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_RATE_SLEW_CNTRL1, 0x0b),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_EQ_CONFIG2, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_PCS_TX_RX_CONFIG, 0x8c),
+};
+
+static const struct qmp_phy_init_tbl sm8550_qmp_gen3x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG2, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG4, 0x07),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+};
+
+static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1, 0x26),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE1, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE1, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0, 0xf8),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START1_MODE0, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START2_MODE0, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DIV_FRAC_START3_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER1, 0x62),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SSC_PER2, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_POST_DIV_MUX, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYS_CLK_CTRL, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CORE_CLK_EN, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_MISC_1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_VCO_DC_LEVEL_CTRL, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_ln_shrd_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RXCLK_DIV2_CTRL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_Q_EN_RATES, 0xe),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH2, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B0, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B1, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B2, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B3, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B4, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B5, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B6, 0x64),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+};
+
+static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_RES_CODE_LANE_OFFSET_RX, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_1, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_LANE_MODE_3, 0x51),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_TX_TRAN_DRVR_EMP_EN, 0x34),
+};
+
+static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_2, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_3, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_CAL_CTRL2, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_IVCM_POSTCAL_OFFSET, 0x7c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_3, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_VGA_CAL_MAN_VAL, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_GM_CAL, 0x0d),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_SIGDET_ENABLES, 0x1c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_Q_PI_INTRINSIC_BIAS_RATE32, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B1, 0xb3),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B2, 0x58),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B3, 0x9a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B4, 0x26),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B5, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE2_B6, 0xee),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B0, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B1, 0xdb),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B2, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B3, 0xdf),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B4, 0x78),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B5, 0x76),
+ QMP_PHY_INIT_CFG(QSERDES_V6_20_RX_MODE_RATE3_B6, 0xff),
+};
+
+static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_G3S2_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_COM_ELECIDLE_DLY_SEL, 0x25),
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_EQ_CONFIG4, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_EQ_CONFIG5, 0x22),
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_TX_RX_CONFIG1, 0x04),
+ QMP_PHY_INIT_CFG(QPHY_V6_20_PCS_TX_RX_CONFIG2, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8550_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_OSC_DTCT_ATCIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_EQ_CONFIG1, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG1, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG3, 0x28),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_TX_RX_CONFIG, 0xc0),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_POWER_STATE_CONFIG2, 0x1d),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG5, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G3_FOM_EQ_CONFIG5, 0xf2),
+ QMP_PHY_INIT_CFG(QPHY_PCIE_V6_20_PCS_G4_FOM_EQ_CONFIG5, 0xf2),
+};
+
struct qmp_pcie_offsets {
u16 serdes;
u16 pcs;
@@ -1466,6 +1742,7 @@ struct qmp_pcie_offsets {
u16 rx;
u16 tx2;
u16 rx2;
+ u16 ln_shrd;
};
struct qmp_phy_cfg_tbls {
@@ -1479,6 +1756,8 @@ struct qmp_phy_cfg_tbls {
int pcs_num;
const struct qmp_phy_init_tbl *pcs_misc;
int pcs_misc_num;
+ const struct qmp_phy_init_tbl *ln_shrd;
+ int ln_shrd_num;
};
/* struct qmp_phy_cfg - per-PHY initialization config */
@@ -1521,6 +1800,8 @@ struct qmp_phy_cfg {
bool skip_start_delay;
+ bool has_nocsr_reset;
+
/* QMP PHY pipe clock interface rate */
unsigned long pipe_clock_rate;
};
@@ -1538,6 +1819,7 @@ struct qmp_pcie {
void __iomem *rx;
void __iomem *tx2;
void __iomem *rx2;
+ void __iomem *ln_shrd;
void __iomem *port_b;
@@ -1546,6 +1828,7 @@ struct qmp_pcie {
int num_pipe_clks;
struct reset_control_bulk_data *resets;
+ struct reset_control *nocsr_reset;
struct regulator_bulk_data *vregs;
struct phy *phy;
@@ -1600,6 +1883,10 @@ static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
+static const char * const sm8550_qmp_phy_vreg_l[] = {
+ "vdda-phy", "vdda-pll", "vdda-qref",
+};
+
/* list of resets */
static const char * const ipq8074_pciephy_reset_l[] = {
"phy", "common",
@@ -1619,6 +1906,17 @@ static const struct qmp_pcie_offsets qmp_pcie_offsets_v5 = {
.rx2 = 0x1800,
};
+static const struct qmp_pcie_offsets qmp_pcie_offsets_v6_20 = {
+ .serdes = 0x1000,
+ .pcs = 0x1200,
+ .pcs_misc = 0x1400,
+ .tx = 0x0000,
+ .rx = 0x0200,
+ .tx2 = 0x0800,
+ .rx2 = 0x0a00,
+ .ln_shrd = 0x0e00,
+};
+
static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.lanes = 1,
@@ -1638,7 +1936,7 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = {
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
.num_vregs = 0,
- .regs = pciephy_regs_layout,
+ .regs = pciephy_v2_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -1665,7 +1963,7 @@ static const struct qmp_phy_cfg ipq8074_pciephy_gen3_cfg = {
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
.num_vregs = 0,
- .regs = ipq_pciephy_gen3_regs_layout,
+ .regs = pciephy_v4_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -1694,7 +1992,7 @@ static const struct qmp_phy_cfg ipq6018_pciephy_cfg = {
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = NULL,
.num_vregs = 0,
- .regs = ipq_pciephy_gen3_regs_layout,
+ .regs = pciephy_v4_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -1721,7 +2019,7 @@ static const struct qmp_phy_cfg sdm845_qmp_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sdm845_qmp_pciephy_regs_layout,
+ .regs = pciephy_v3_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -1783,7 +2081,7 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x1_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
+ .regs = pciephy_v4_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -1820,7 +2118,7 @@ static const struct qmp_phy_cfg sm8250_qmp_gen3x2_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
+ .regs = pciephy_v4_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -1845,7 +2143,7 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = {
.num_resets = ARRAY_SIZE(ipq8074_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = pciephy_regs_layout,
+ .regs = pciephy_v3_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -1874,7 +2172,7 @@ static const struct qmp_phy_cfg sc8180x_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
+ .regs = pciephy_v4_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -1909,7 +2207,7 @@ static const struct qmp_phy_cfg sc8280xp_qmp_gen3x1_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
+ .regs = pciephy_v5_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -1944,7 +2242,7 @@ static const struct qmp_phy_cfg sc8280xp_qmp_gen3x2_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
+ .regs = pciephy_v5_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -1982,7 +2280,7 @@ static const struct qmp_phy_cfg sc8280xp_qmp_gen3x4_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
+ .regs = pciephy_v5_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -2009,34 +2307,116 @@ static const struct qmp_phy_cfg sdx55_qmp_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
+ .regs = pciephy_v4_regs_layout,
.pwrdn_ctrl = SW_PWRDN,
.phy_status = PHYSTATUS_4_20,
};
+static const struct qmp_phy_cfg sm8350_qmp_gen3x1_pciephy_cfg = {
+ .lanes = 1,
+
+ .offsets = &qmp_pcie_offsets_v5,
+
+ .tbls = {
+ .serdes = sm8450_qmp_gen3_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sm8450_qmp_gen3_pcie_serdes_tbl),
+ .tx = sm8350_qmp_gen3x1_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sm8350_qmp_gen3x1_pcie_tx_tbl),
+ .rx = sm8450_qmp_gen3_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8450_qmp_gen3_pcie_rx_tbl),
+ .pcs = sm8450_qmp_gen3_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sm8450_qmp_gen3_pcie_pcs_tbl),
+ .pcs_misc = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl),
+ },
+
+ .tbls_rc = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sm8450_qmp_gen3x1_pcie_rc_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rc_serdes_tbl),
+ .rx = sm8350_qmp_gen3x1_pcie_rc_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8350_qmp_gen3x1_pcie_rc_rx_tbl),
+ },
+
+ .clk_list = sc8280xp_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+};
+
+static const struct qmp_phy_cfg sm8350_qmp_gen3x2_pciephy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_pcie_offsets_v5,
+
+ .tbls = {
+ .serdes = sm8450_qmp_gen3_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sm8450_qmp_gen3_pcie_serdes_tbl),
+ .tx = sm8350_qmp_gen3x2_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sm8350_qmp_gen3x2_pcie_tx_tbl),
+ .rx = sm8450_qmp_gen3_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8450_qmp_gen3_pcie_rx_tbl),
+ .pcs = sm8450_qmp_gen3_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sm8450_qmp_gen3_pcie_pcs_tbl),
+ .pcs_misc = sc8280xp_qmp_gen3x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sc8280xp_qmp_gen3x2_pcie_pcs_misc_tbl),
+ },
+
+ .tbls_rc = &(const struct qmp_phy_cfg_tbls) {
+ .rx = sm8350_qmp_gen3x2_pcie_rc_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8350_qmp_gen3x2_pcie_rc_rx_tbl),
+ .pcs = sm8350_qmp_gen3x2_pcie_rc_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sm8350_qmp_gen3x2_pcie_rc_pcs_tbl),
+ },
+
+ .clk_list = sc8280xp_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+};
+
static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
.lanes = 1,
.tbls = {
- .serdes = sm8450_qmp_gen3x1_pcie_serdes_tbl,
- .serdes_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
+ .serdes = sm8450_qmp_gen3_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sm8450_qmp_gen3_pcie_serdes_tbl),
.tx = sm8450_qmp_gen3x1_pcie_tx_tbl,
.tx_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl),
- .rx = sm8450_qmp_gen3x1_pcie_rx_tbl,
- .rx_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl),
- .pcs = sm8450_qmp_gen3x1_pcie_pcs_tbl,
- .pcs_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl),
+ .rx = sm8450_qmp_gen3_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8450_qmp_gen3_pcie_rx_tbl),
+ .pcs = sm8450_qmp_gen3_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sm8450_qmp_gen3_pcie_pcs_tbl),
.pcs_misc = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl,
.pcs_misc_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl),
},
+
+ .tbls_rc = &(const struct qmp_phy_cfg_tbls) {
+ .serdes = sm8450_qmp_gen3x1_pcie_rc_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rc_serdes_tbl),
+ .rx = sm8450_qmp_gen3x1_pcie_rc_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rc_rx_tbl),
+ },
+
.clk_list = sdm845_pciephy_clk_l,
.num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
.reset_list = sdm845_pciephy_reset_l,
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
+ .regs = pciephy_v5_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS,
@@ -2078,12 +2458,73 @@ static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
.num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8250_pcie_regs_layout,
+ .regs = pciephy_v5_regs_layout,
.pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
.phy_status = PHYSTATUS_4_20,
};
+static const struct qmp_phy_cfg sm8550_qmp_gen3x2_pciephy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_pcie_offsets_v5,
+
+ .tbls = {
+ .serdes = sm8550_qmp_gen3x2_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_serdes_tbl),
+ .tx = sm8550_qmp_gen3x2_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_tx_tbl),
+ .rx = sm8550_qmp_gen3x2_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_rx_tbl),
+ .pcs = sm8550_qmp_gen3x2_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_pcs_tbl),
+ .pcs_misc = sm8550_qmp_gen3x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sm8550_qmp_gen3x2_pcie_pcs_misc_tbl),
+ },
+ .clk_list = sc8280xp_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+};
+
+static const struct qmp_phy_cfg sm8550_qmp_gen4x2_pciephy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_pcie_offsets_v6_20,
+
+ .tbls = {
+ .serdes = sm8550_qmp_gen4x2_pcie_serdes_tbl,
+ .serdes_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_serdes_tbl),
+ .tx = sm8550_qmp_gen4x2_pcie_tx_tbl,
+ .tx_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_tx_tbl),
+ .rx = sm8550_qmp_gen4x2_pcie_rx_tbl,
+ .rx_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_rx_tbl),
+ .pcs = sm8550_qmp_gen4x2_pcie_pcs_tbl,
+ .pcs_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_pcs_tbl),
+ .pcs_misc = sm8550_qmp_gen4x2_pcie_pcs_misc_tbl,
+ .pcs_misc_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_pcs_misc_tbl),
+ .ln_shrd = sm8550_qmp_gen4x2_pcie_ln_shrd_tbl,
+ .ln_shrd_num = ARRAY_SIZE(sm8550_qmp_gen4x2_pcie_ln_shrd_tbl),
+ },
+ .clk_list = sc8280xp_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sc8280xp_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = sm8550_qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(sm8550_qmp_phy_vreg_l),
+ .regs = pciephy_v5_regs_layout,
+
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+ .has_nocsr_reset = true,
+};
+
static void qmp_pcie_configure_lane(void __iomem *base,
const struct qmp_phy_init_tbl tbl[],
int num,
@@ -2138,6 +2579,7 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c
void __iomem *rx2 = qmp->rx2;
void __iomem *pcs = qmp->pcs;
void __iomem *pcs_misc = qmp->pcs_misc;
+ void __iomem *ln_shrd = qmp->ln_shrd;
if (!tbls)
return;
@@ -2159,6 +2601,8 @@ static void qmp_pcie_init_registers(struct qmp_pcie *qmp, const struct qmp_phy_c
qmp_pcie_configure(serdes, cfg->serdes_4ln_tbl, cfg->serdes_4ln_num);
qmp_pcie_init_port_b(qmp, tbls);
}
+
+ qmp_pcie_configure(ln_shrd, tbls->ln_shrd, tbls->ln_shrd_num);
}
static int qmp_pcie_init(struct phy *phy)
@@ -2179,12 +2623,18 @@ static int qmp_pcie_init(struct phy *phy)
goto err_disable_regulators;
}
+ ret = reset_control_assert(qmp->nocsr_reset);
+ if (ret) {
+ dev_err(qmp->dev, "no-csr reset assert failed\n");
+ goto err_assert_reset;
+ }
+
usleep_range(200, 300);
ret = reset_control_bulk_deassert(cfg->num_resets, qmp->resets);
if (ret) {
dev_err(qmp->dev, "reset deassert failed\n");
- goto err_disable_regulators;
+ goto err_assert_reset;
}
ret = clk_bulk_prepare_enable(cfg->num_clks, qmp->clks);
@@ -2240,6 +2690,12 @@ static int qmp_pcie_power_on(struct phy *phy)
if (ret)
return ret;
+ ret = reset_control_deassert(qmp->nocsr_reset);
+ if (ret) {
+ dev_err(qmp->dev, "no-csr reset deassert failed\n");
+ goto err_disable_pipe_clk;
+ }
+
/* Pull PHY out of reset state */
qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
@@ -2373,6 +2829,13 @@ static int qmp_pcie_reset_init(struct qmp_pcie *qmp)
if (ret)
return dev_err_probe(dev, ret, "failed to get resets\n");
+ if (cfg->has_nocsr_reset) {
+ qmp->nocsr_reset = devm_reset_control_get_exclusive(dev, "phy_nocsr");
+ if (IS_ERR(qmp->nocsr_reset))
+ return dev_err_probe(dev, PTR_ERR(qmp->nocsr_reset),
+ "failed to get no-csr reset\n");
+ }
+
return 0;
}
@@ -2595,11 +3058,18 @@ static int qmp_pcie_parse_dt(struct qmp_pcie *qmp)
return PTR_ERR(qmp->port_b);
}
+ if (cfg->tbls.ln_shrd)
+ qmp->ln_shrd = base + offs->ln_shrd;
+
qmp->num_pipe_clks = 2;
qmp->pipe_clks[0].id = "pipe";
qmp->pipe_clks[1].id = "pipediv2";
- ret = devm_clk_bulk_get(dev, qmp->num_pipe_clks, qmp->pipe_clks);
+ ret = devm_clk_bulk_get(dev, 1, qmp->pipe_clks);
+ if (ret)
+ return ret;
+
+ ret = devm_clk_bulk_get_optional(dev, qmp->num_pipe_clks - 1, qmp->pipe_clks + 1);
if (ret)
return ret;
@@ -2720,11 +3190,23 @@ static const struct of_device_id qmp_pcie_of_match_table[] = {
.compatible = "qcom,sm8250-qmp-modem-pcie-phy",
.data = &sm8250_qmp_gen3x2_pciephy_cfg,
}, {
+ .compatible = "qcom,sm8350-qmp-gen3x1-pcie-phy",
+ .data = &sm8350_qmp_gen3x1_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8350-qmp-gen3x2-pcie-phy",
+ .data = &sm8350_qmp_gen3x2_pciephy_cfg,
+ }, {
.compatible = "qcom,sm8450-qmp-gen3x1-pcie-phy",
.data = &sm8450_qmp_gen3x1_pciephy_cfg,
}, {
.compatible = "qcom,sm8450-qmp-gen4x2-pcie-phy",
.data = &sm8450_qmp_gen4x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8550-qmp-gen3x2-pcie-phy",
+ .data = &sm8550_qmp_gen3x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8550-qmp-gen4x2-pcie-phy",
+ .data = &sm8550_qmp_gen4x2_pciephy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h
new file mode 100644
index 000000000000..91e70002eb47
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_PCIE_V6_H_
+#define QCOM_PHY_QMP_PCS_PCIE_V6_H_
+
+/* Only for QMP V6 PHY - PCIE have different offsets than V5 */
+#define QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG2 0x0c
+#define QPHY_PCIE_V6_PCS_PCIE_POWER_STATE_CONFIG4 0x14
+#define QPHY_PCIE_V6_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20
+#define QPHY_PCIE_V6_PCS_PCIE_OSC_DTCT_ACTIONS 0x94
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_20.h
new file mode 100644
index 000000000000..e3eb08776339
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-pcie-v6_20.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_PCIE_V6_20_H_
+#define QCOM_PHY_QMP_PCS_PCIE_V6_20_H_
+
+/* Only for QMP V6_20 PHY - PCIE have different offsets than V5 */
+#define QPHY_PCIE_V6_20_PCS_POWER_STATE_CONFIG2 0x00c
+#define QPHY_PCIE_V6_20_PCS_TX_RX_CONFIG 0x018
+#define QPHY_PCIE_V6_20_PCS_ENDPOINT_REFCLK_DRIVE 0x01c
+#define QPHY_PCIE_V6_20_PCS_OSC_DTCT_ATCIONS 0x090
+#define QPHY_PCIE_V6_20_PCS_EQ_CONFIG1 0x0a0
+#define QPHY_PCIE_V6_20_PCS_EQ_CONFIG5 0x108
+#define QPHY_PCIE_V6_20_PCS_G4_PRE_GAIN 0x15c
+#define QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG1 0x17c
+#define QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG3 0x184
+#define QPHY_PCIE_V6_20_PCS_RX_MARGINING_CONFIG5 0x18c
+#define QPHY_PCIE_V6_20_PCS_G3_FOM_EQ_CONFIG5 0x1ac
+#define QPHY_PCIE_V6_20_PCS_G4_FOM_EQ_CONFIG5 0x1c0
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v2.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v2.h
new file mode 100644
index 000000000000..a0803a8783d2
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v2.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_UFS_V2_H_
+#define QCOM_PHY_QMP_PCS_UFS_V2_H_
+
+#define QPHY_V2_PCS_UFS_PHY_START 0x000
+#define QPHY_V2_PCS_UFS_POWER_DOWN_CONTROL 0x004
+
+#define QPHY_V2_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x034
+#define QPHY_V2_PCS_UFS_TX_LARGE_AMP_POST_EMP_LVL 0x038
+#define QPHY_V2_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x03c
+#define QPHY_V2_PCS_UFS_TX_SMALL_AMP_POST_EMP_LVL 0x040
+
+#define QPHY_V2_PCS_UFS_RX_MIN_STALL_NOCONFIG_TIME_CAP 0x0cc
+#define QPHY_V2_PCS_UFS_RX_SYM_RESYNC_CTRL 0x13c
+#define QPHY_V2_PCS_UFS_RX_MIN_HIBERN8_TIME 0x140
+#define QPHY_V2_PCS_UFS_RX_SIGDET_CTRL2 0x148
+#define QPHY_V2_PCS_UFS_RX_PWM_GEAR_BAND 0x154
+
+#define QPHY_V2_PCS_UFS_READY_STATUS 0x168
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h
index ba1ea29d2884..adea13c3a9e6 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v3.h
@@ -6,12 +6,15 @@
#ifndef QCOM_PHY_QMP_PCS_UFS_V3_H_
#define QCOM_PHY_QMP_PCS_UFS_V3_H_
+#define QPHY_V3_PCS_UFS_PHY_START 0x000
+#define QPHY_V3_PCS_UFS_POWER_DOWN_CONTROL 0x004
#define QPHY_V3_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x02c
#define QPHY_V3_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x034
#define QPHY_V3_PCS_UFS_RX_SYM_RESYNC_CTRL 0x134
#define QPHY_V3_PCS_UFS_RX_MIN_HIBERN8_TIME 0x138
#define QPHY_V3_PCS_UFS_RX_SIGDET_CTRL1 0x13c
#define QPHY_V3_PCS_UFS_RX_SIGDET_CTRL2 0x140
+#define QPHY_V3_PCS_UFS_READY_STATUS 0x160
#define QPHY_V3_PCS_UFS_TX_MID_TERM_CTRL1 0x1bc
#define QPHY_V3_PCS_UFS_MULTI_LANE_CTRL1 0x1c4
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h
index bcca23493b7e..07959964fcf6 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v5.h
@@ -8,11 +8,15 @@
#define QCOM_PHY_QMP_PCS_UFS_V5_H_
/* Only for QMP V5 PHY - UFS PCS registers */
+#define QPHY_V5_PCS_UFS_PHY_START 0x000
+#define QPHY_V5_PCS_UFS_POWER_DOWN_CONTROL 0x004
+#define QPHY_V5_PCS_UFS_SW_RESET 0x008
#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
#define QPHY_V5_PCS_UFS_PLL_CNTL 0x02c
#define QPHY_V5_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030
#define QPHY_V5_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038
+#define QPHY_V5_PCS_UFS_BIST_FIXED_PAT_CTRL 0x060
#define QPHY_V5_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074
#define QPHY_V5_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0b4
#define QPHY_V5_PCS_UFS_DEBUG_BUS_CLKSEL 0x124
@@ -21,6 +25,7 @@
#define QPHY_V5_PCS_UFS_RX_SIGDET_CTRL2 0x158
#define QPHY_V5_PCS_UFS_TX_PWM_GEAR_BAND 0x160
#define QPHY_V5_PCS_UFS_TX_HS_GEAR_BAND 0x168
+#define QPHY_V5_PCS_UFS_READY_STATUS 0x180
#define QPHY_V5_PCS_UFS_TX_MID_TERM_CTRL1 0x1d8
#define QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1 0x1e0
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
new file mode 100644
index 000000000000..c23d5e41e25b
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-ufs-v6.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_UFS_V6_H_
+#define QCOM_PHY_QMP_PCS_UFS_V6_H_
+
+/* Only for QMP V6 PHY - UFS PCS registers */
+#define QPHY_V6_PCS_UFS_PHY_START 0x000
+#define QPHY_V6_PCS_UFS_POWER_DOWN_CONTROL 0x004
+#define QPHY_V6_PCS_UFS_SW_RESET 0x008
+#define QPHY_V6_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
+#define QPHY_V6_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
+#define QPHY_V6_PCS_UFS_PLL_CNTL 0x02c
+#define QPHY_V6_PCS_UFS_TX_LARGE_AMP_DRV_LVL 0x030
+#define QPHY_V6_PCS_UFS_TX_SMALL_AMP_DRV_LVL 0x038
+#define QPHY_V6_PCS_UFS_BIST_FIXED_PAT_CTRL 0x060
+#define QPHY_V6_PCS_UFS_TX_HSGEAR_CAPABILITY 0x074
+#define QPHY_V6_PCS_UFS_RX_HSGEAR_CAPABILITY 0x0bc
+#define QPHY_V6_PCS_UFS_DEBUG_BUS_CLKSEL 0x158
+#define QPHY_V6_PCS_UFS_LINECFG_DISABLE 0x17c
+#define QPHY_V6_PCS_UFS_RX_MIN_HIBERN8_TIME 0x184
+#define QPHY_V6_PCS_UFS_RX_SIGDET_CTRL2 0x18c
+#define QPHY_V6_PCS_UFS_TX_PWM_GEAR_BAND 0x178
+#define QPHY_V6_PCS_UFS_TX_HS_GEAR_BAND 0x174
+#define QPHY_V6_PCS_UFS_READY_STATUS 0x1a8
+#define QPHY_V6_PCS_UFS_TX_MID_TERM_CTRL1 0x1f4
+#define QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1 0x1fc
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
new file mode 100644
index 000000000000..9510e63ba9d8
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-usb-v6.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2022, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_USB_V6_H_
+#define QCOM_PHY_QMP_PCS_USB_V6_H_
+
+/* Only for QMP V6 PHY - USB3 have different offsets than V5 */
+#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG1 0xc4
+#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG2 0xc8
+#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG3 0xcc
+#define QPHY_USB_V6_PCS_LOCK_DETECT_CONFIG6 0xd8
+#define QPHY_USB_V6_PCS_REFGEN_REQ_CONFIG1 0xdc
+#define QPHY_USB_V6_PCS_USB3_POWER_STATE_CONFIG1 0x90
+#define QPHY_USB_V6_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_L 0x190
+#define QPHY_USB_V6_PCS_RCVR_DTCT_DLY_P1U2_H 0x194
+#define QPHY_USB_V6_PCS_CDR_RESET_TIME 0x1b0
+#define QPHY_USB_V6_PCS_ALIGN_DETECT_CONFIG1 0x1c0
+#define QPHY_USB_V6_PCS_ALIGN_DETECT_CONFIG2 0x1c4
+#define QPHY_USB_V6_PCS_PCS_TX_RX_CONFIG 0x1d0
+#define QPHY_USB_V6_PCS_EQ_CONFIG1 0x1dc
+#define QPHY_USB_V6_PCS_EQ_CONFIG5 0x1ec
+
+#define QPHY_USB_V6_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x18
+#define QPHY_USB_V6_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x3c
+#define QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x40
+#define QPHY_USB_V6_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x44
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h
index c8515f506872..bf36399d0057 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v2.h
@@ -7,13 +7,11 @@
#define QCOM_PHY_QMP_PCS_V2_H_
/* Only for QMP V2 PHY - PCS registers */
+#define QPHY_V2_PCS_SW_RESET 0x000
#define QPHY_V2_PCS_POWER_DOWN_CONTROL 0x004
+#define QPHY_V2_PCS_START_CONTROL 0x008
#define QPHY_V2_PCS_TXDEEMPH_M6DB_V0 0x024
#define QPHY_V2_PCS_TXDEEMPH_M3P5DB_V0 0x028
-#define QPHY_V2_PCS_TX_LARGE_AMP_DRV_LVL 0x034
-#define QPHY_V2_PCS_TX_LARGE_AMP_POST_EMP_LVL 0x038
-#define QPHY_V2_PCS_TX_SMALL_AMP_DRV_LVL 0x03c
-#define QPHY_V2_PCS_TX_SMALL_AMP_POST_EMP_LVL 0x040
#define QPHY_V2_PCS_ENDPOINT_REFCLK_DRIVE 0x054
#define QPHY_V2_PCS_RX_IDLE_DTCT_CNTRL 0x058
#define QPHY_V2_PCS_POWER_STATE_CONFIG1 0x060
@@ -30,17 +28,16 @@
#define QPHY_V2_PCS_FLL_CNT_VAL_L 0x0c8
#define QPHY_V2_PCS_FLL_CNT_VAL_H_TOL 0x0cc
#define QPHY_V2_PCS_FLL_MAN_CODE 0x0d0
-
-/* UFS only ? */
-#define QPHY_V2_PCS_RX_MIN_STALL_NOCONFIG_TIME_CAP 0x0cc
-#define QPHY_V2_PCS_RX_SYM_RESYNC_CTRL 0x13c
-#define QPHY_V2_PCS_RX_MIN_HIBERN8_TIME 0x140
-#define QPHY_V2_PCS_RX_SIGDET_CTRL2 0x148
-#define QPHY_V2_PCS_RX_PWM_GEAR_BAND 0x154
+#define QPHY_V2_PCS_AUTONOMOUS_MODE_CTRL 0x0d4
+#define QPHY_V2_PCS_LFPS_RXTERM_IRQ_CLEAR 0x0d8
+#define QPHY_V2_PCS_LFPS_RXTERM_IRQ_STATUS 0x178
+#define QPHY_V2_PCS_USB_PCS_STATUS 0x17c /* USB */
#define QPHY_V2_PCS_PLL_LOCK_CHK_DLY_TIME_AUXCLK_LSB 0x1a8
#define QPHY_V2_PCS_OSC_DTCT_ACTIONS 0x1ac
#define QPHY_V2_PCS_RX_SIGDET_LVL 0x1d8
#define QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_LSB 0x1dc
#define QPHY_V2_PCS_L1SS_WAKEUP_DLY_TIME_AUXCLK_MSB 0x1e0
+#define QPHY_V2_PCS_PCI_PCS_STATUS 0x174 /* PCI */
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h
index 04f260711ea1..36cc80bb9059 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v5.h
@@ -7,6 +7,10 @@
#define QCOM_PHY_QMP_PCS_V5_H_
/* Only for QMP V5 PHY - USB/PCIe PCS registers */
+#define QPHY_V5_PCS_SW_RESET 0x000
+#define QPHY_V5_PCS_PCS_STATUS1 0x014
+#define QPHY_V5_PCS_POWER_DOWN_CONTROL 0x040
+#define QPHY_V5_PCS_START_CONTROL 0x044
#define QPHY_V5_PCS_LOCK_DETECT_CONFIG1 0x0c4
#define QPHY_V5_PCS_LOCK_DETECT_CONFIG2 0x0c8
#define QPHY_V5_PCS_LOCK_DETECT_CONFIG3 0x0cc
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h
new file mode 100644
index 000000000000..18c4a3abe590
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V6_H_
+#define QCOM_PHY_QMP_PCS_V6_H_
+
+/* Only for QMP V6 PHY - USB/PCIe PCS registers */
+#define QPHY_V6_PCS_REFGEN_REQ_CONFIG1 0xdc
+#define QPHY_V6_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V6_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V6_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V6_PCS_PCS_TX_RX_CONFIG 0x1d0
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_20.h
new file mode 100644
index 000000000000..9c3f1e4950e6
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-pcs-v6_20.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_PCS_V6_20_H_
+#define QCOM_PHY_QMP_PCS_V6_20_H_
+
+/* Only for QMP V6_20 PHY - USB/PCIe PCS registers */
+#define QPHY_V6_20_PCS_G3S2_PRE_GAIN 0x178
+#define QPHY_V6_20_PCS_RX_SIGDET_LVL 0x190
+#define QPHY_V6_20_PCS_COM_ELECIDLE_DLY_SEL 0x1b8
+#define QPHY_V6_20_PCS_TX_RX_CONFIG1 0x1dc
+#define QPHY_V6_20_PCS_TX_RX_CONFIG2 0x1e0
+#define QPHY_V6_20_PCS_EQ_CONFIG4 0x1f8
+#define QPHY_V6_20_PCS_EQ_CONFIG5 0x1fc
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
new file mode 100644
index 000000000000..f420f8faf16a
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com-v6.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_COM_V6_H_
+#define QCOM_PHY_QMP_QSERDES_COM_V6_H_
+
+/* Only for QMP V6 PHY - QSERDES COM registers */
+
+#define QSERDES_V6_COM_SSC_STEP_SIZE1_MODE1 0x00
+#define QSERDES_V6_COM_SSC_STEP_SIZE2_MODE1 0x04
+#define QSERDES_V6_COM_CP_CTRL_MODE1 0x10
+#define QSERDES_V6_COM_PLL_RCTRL_MODE1 0x14
+#define QSERDES_V6_COM_PLL_CCTRL_MODE1 0x18
+#define QSERDES_V6_COM_CORECLK_DIV_MODE1 0x1c
+#define QSERDES_V6_COM_LOCK_CMP1_MODE1 0x20
+#define QSERDES_V6_COM_LOCK_CMP2_MODE1 0x24
+#define QSERDES_V6_COM_DEC_START_MODE1 0x28
+#define QSERDES_V6_COM_DEC_START_MSB_MODE1 0x2c
+#define QSERDES_V6_COM_DIV_FRAC_START1_MODE1 0x30
+#define QSERDES_V6_COM_DIV_FRAC_START2_MODE1 0x34
+#define QSERDES_V6_COM_DIV_FRAC_START3_MODE1 0x38
+#define QSERDES_V6_COM_HSCLK_SEL_1 0x3c
+#define QSERDES_V6_COM_VCO_TUNE1_MODE1 0x48
+#define QSERDES_V6_COM_VCO_TUNE2_MODE1 0x4c
+#define QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x50
+#define QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x54
+#define QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x58
+#define QSERDES_V6_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x5c
+#define QSERDES_V6_COM_SSC_STEP_SIZE1_MODE0 0x60
+#define QSERDES_V6_COM_SSC_STEP_SIZE2_MODE0 0x64
+#define QSERDES_V6_COM_CP_CTRL_MODE0 0x70
+#define QSERDES_V6_COM_PLL_RCTRL_MODE0 0x74
+#define QSERDES_V6_COM_PLL_CCTRL_MODE0 0x78
+#define QSERDES_V6_COM_PLL_CORE_CLK_DIV_MODE0 0x7c
+#define QSERDES_V6_COM_LOCK_CMP1_MODE0 0x80
+#define QSERDES_V6_COM_LOCK_CMP2_MODE0 0x84
+#define QSERDES_V6_COM_DEC_START_MODE0 0x88
+#define QSERDES_V6_COM_DEC_START_MSB_MODE0 0x8c
+#define QSERDES_V6_COM_DIV_FRAC_START1_MODE0 0x90
+#define QSERDES_V6_COM_DIV_FRAC_START2_MODE0 0x94
+#define QSERDES_V6_COM_DIV_FRAC_START3_MODE0 0x98
+#define QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1 0x9c
+#define QSERDES_V6_COM_INTEGLOOP_GAIN0_MODE0 0xa0
+#define QSERDES_V6_COM_INTEGLOOP_GAIN1_MODE0 0xa4
+#define QSERDES_V6_COM_VCO_TUNE1_MODE0 0xa8
+#define QSERDES_V6_COM_VCO_TUNE2_MODE0 0xac
+#define QSERDES_V6_COM_BG_TIMER 0xbc
+#define QSERDES_V6_COM_SSC_EN_CENTER 0xc0
+#define QSERDES_V6_COM_SSC_PER1 0xcc
+#define QSERDES_V6_COM_SSC_PER2 0xd0
+#define QSERDES_V6_COM_PLL_POST_DIV_MUX 0xd8
+#define QSERDES_V6_COM_PLL_BIAS_EN_CLK_BUFLR_EN 0xdc
+#define QSERDES_V6_COM_CLK_ENABLE1 0xe0
+#define QSERDES_V6_COM_SYS_CLK_CTRL 0xe4
+#define QSERDES_V6_COM_SYSCLK_BUF_ENABLE 0xe8
+#define QSERDES_V6_COM_PLL_IVCO 0xf4
+#define QSERDES_V6_COM_SYSCLK_EN_SEL 0x110
+#define QSERDES_V6_COM_RESETSM_CNTRL 0x118
+#define QSERDES_V6_COM_LOCK_CMP_EN 0x120
+#define QSERDES_V6_COM_LOCK_CMP_CFG 0x124
+#define QSERDES_V6_COM_VCO_TUNE_CTRL 0x13c
+#define QSERDES_V6_COM_VCO_TUNE_MAP 0x140
+#define QSERDES_V6_COM_VCO_TUNE_INITVAL2 0x148
+#define QSERDES_V6_COM_CLK_SELECT 0x164
+#define QSERDES_V6_COM_CORE_CLK_EN 0x170
+#define QSERDES_V6_COM_CMN_CONFIG_1 0x174
+#define QSERDES_V6_COM_SVS_MODE_CLK_SEL 0x17c
+#define QSERDES_V6_COM_CMN_MISC_1 0x184
+#define QSERDES_V6_COM_CMN_MODE 0x188
+#define QSERDES_V6_COM_PLL_VCO_DC_LEVEL_CTRL 0x198
+#define QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_1 0x1a4
+#define QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_2 0x1a8
+#define QSERDES_V6_COM_AUTO_GAIN_ADJ_CTRL_3 0x1ac
+#define QSERDES_V6_COM_ADDITIONAL_MISC 0x1b4
+#define QSERDES_V6_COM_ADDITIONAL_MISC_2 0x1b8
+#define QSERDES_V6_COM_ADDITIONAL_MISC_3 0x1bc
+#define QSERDES_V6_COM_CMN_STATUS 0x1d0
+#define QSERDES_V6_COM_C_READY_STATUS 0x1f8
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h
index fbaf6ef467f8..7fa5363feeb9 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-com.h
@@ -135,6 +135,6 @@
#define QSERDES_COM_CMN_MISC2 0x1b8
#define QSERDES_COM_CORECLK_DIV_MODE1 0x1bc
#define QSERDES_COM_CORECLK_DIV_MODE2 0x1c0
-#define QSERDES_COM_CMN_RSVD5 0x1c0
+#define QSERDES_COM_CMN_RSVD5 0x1c4
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v6.h
new file mode 100644
index 000000000000..86d7d796d5d7
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-ln-shrd-v6.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_LN_SHRD_V6_H_
+#define QCOM_PHY_QMP_QSERDES_LN_SHRD_V6_H_
+
+#define QSERDES_V6_LN_SHRD_RXCLK_DIV2_CTRL 0xa0
+#define QSERDES_V6_LN_SHRD_RX_Q_EN_RATES 0xb0
+#define QSERDES_V6_LN_SHRD_DFE_DAC_ENABLE1 0xb4
+#define QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH1 0xc4
+#define QSERDES_V6_LN_SHRD_TX_ADAPT_POST_THRESH2 0xc8
+#define QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B0 0xd4
+#define QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B1 0xd8
+#define QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B2 0xdc
+#define QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B3 0xe0
+#define QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B4 0xe4
+#define QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B5 0xe8
+#define QSERDES_V6_LN_SHRD_RX_MODE_RATE_0_1_B6 0xec
+#define QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH1_RATE210 0xf0
+#define QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH1_RATE3 0xf4
+#define QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH2_RATE210 0xf8
+#define QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH2_RATE3 0xfc
+#define QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH3_RATE210 0x100
+#define QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH3_RATE3 0x104
+#define QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH4_RATE3 0x10c
+#define QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH5_RATE3 0x114
+#define QSERDES_V6_LN_SHRD_RX_MARG_COARSE_THRESH6_RATE3 0x11c
+#define QSERDES_V6_LN_SHRD_RX_SUMMER_CAL_SPD_MODE 0x128
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
new file mode 100644
index 000000000000..15bcb4ba9139
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-ufs-v6.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_UFS_V6_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_UFS_V6_H_
+
+#define QSERDES_UFS_V6_TX_RES_CODE_LANE_TX 0x28
+#define QSERDES_UFS_V6_TX_RES_CODE_LANE_RX 0x2c
+#define QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_TX 0x30
+#define QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_RX 0x34
+
+#define QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE2 0x08
+#define QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE4 0x10
+#define QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL 0x178
+#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0 0x208
+#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B1 0x20c
+#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B3 0x214
+#define QSERDES_UFS_V6_RX_MODE_RATE_0_1_B6 0x220
+#define QSERDES_UFS_V6_RX_MODE_RATE2_B3 0x238
+#define QSERDES_UFS_V6_RX_MODE_RATE2_B6 0x244
+#define QSERDES_UFS_V6_RX_MODE_RATE3_B3 0x25c
+#define QSERDES_UFS_V6_RX_MODE_RATE3_B4 0x260
+#define QSERDES_UFS_V6_RX_MODE_RATE3_B5 0x264
+#define QSERDES_UFS_V6_RX_MODE_RATE3_B8 0x270
+#define QSERDES_UFS_V6_RX_MODE_RATE4_B3 0x280
+#define QSERDES_UFS_V6_RX_MODE_RATE4_B6 0x28c
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h
index a1c088bd5158..a4a4e251348d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v5_5nm.h
@@ -7,11 +7,6 @@
#define QCOM_PHY_QMP_QSERDES_TXRX_V5_5NM_H_
/* Only for QMP V5 5NM PHY - TX registers */
-#define QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_TX 0x30
-#define QSERDES_V5_5NM_TX_RES_CODE_LANE_OFFSET_RX 0x34
-#define QSERDES_V5_5NM_TX_LANE_MODE_1 0x78
-#define QSERDES_V5_5NM_TX_LANE_MODE_2 0x7c
-#define QSERDES_V5_5NM_TX_LANE_MODE_3 0x80
#define QSERDES_V5_5NM_TX_BIST_MODE_LANENO 0x00
#define QSERDES_V5_5NM_TX_BIST_INVERT 0x04
#define QSERDES_V5_5NM_TX_CLKBUF_ENABLE 0x08
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
new file mode 100644
index 000000000000..a69233e68f9a
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_USB_V6_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_USB_V6_H_
+
+#define QSERDES_V6_TX_CLKBUF_ENABLE 0x08
+#define QSERDES_V6_TX_RESET_TSYNC_EN 0x1c
+#define QSERDES_V6_TX_PRE_STALL_LDO_BOOST_EN 0x20
+#define QSERDES_V6_TX_TX_BAND 0x24
+#define QSERDES_V6_TX_INTERFACE_SELECT 0x2c
+#define QSERDES_V6_TX_RES_CODE_LANE_TX 0x34
+#define QSERDES_V6_TX_RES_CODE_LANE_RX 0x38
+#define QSERDES_V6_TX_RES_CODE_LANE_OFFSET_TX 0x3c
+#define QSERDES_V6_TX_RES_CODE_LANE_OFFSET_RX 0x40
+#define QSERDES_V6_TX_PARRATE_REC_DETECT_IDLE_EN 0x60
+#define QSERDES_V6_TX_BIST_PATTERN7 0x7c
+#define QSERDES_V6_TX_LANE_MODE_1 0x84
+#define QSERDES_V6_TX_LANE_MODE_3 0x8c
+#define QSERDES_V6_TX_LANE_MODE_4 0x90
+#define QSERDES_V6_TX_LANE_MODE_5 0x94
+#define QSERDES_V6_TX_RCV_DETECT_LVL_2 0xa4
+#define QSERDES_V6_TX_TRAN_DRVR_EMP_EN 0xc0
+#define QSERDES_V6_TX_TX_INTERFACE_MODE 0xc4
+#define QSERDES_V6_TX_VMODE_CTRL1 0xc8
+#define QSERDES_V6_TX_PI_QEC_CTRL 0xe4
+
+#define QSERDES_V6_RX_UCDR_FO_GAIN 0x08
+#define QSERDES_V6_RX_UCDR_SO_GAIN 0x14
+#define QSERDES_V6_RX_UCDR_FASTLOCK_FO_GAIN 0x30
+#define QSERDES_V6_RX_UCDR_SO_SATURATION_AND_ENABLE 0x34
+#define QSERDES_V6_RX_UCDR_FASTLOCK_COUNT_LOW 0x3c
+#define QSERDES_V6_RX_UCDR_FASTLOCK_COUNT_HIGH 0x40
+#define QSERDES_V6_RX_UCDR_PI_CONTROLS 0x44
+#define QSERDES_V6_RX_UCDR_SB2_THRESH1 0x4c
+#define QSERDES_V6_RX_UCDR_SB2_THRESH2 0x50
+#define QSERDES_V6_RX_UCDR_SB2_GAIN1 0x54
+#define QSERDES_V6_RX_UCDR_SB2_GAIN2 0x58
+#define QSERDES_V6_RX_AUX_DATA_TCOARSE_TFINE 0x60
+#define QSERDES_V6_RX_TX_ADAPT_POST_THRESH 0xcc
+#define QSERDES_V6_RX_VGA_CAL_CNTRL1 0xd4
+#define QSERDES_V6_RX_VGA_CAL_CNTRL2 0xd8
+#define QSERDES_V6_RX_GM_CAL 0xdc
+#define QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL2 0xec
+#define QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL3 0xf0
+#define QSERDES_V6_RX_RX_EQU_ADAPTOR_CNTRL4 0xf4
+#define QSERDES_V6_RX_RX_IDAC_TSETTLE_LOW 0xf8
+#define QSERDES_V6_RX_RX_IDAC_TSETTLE_HIGH 0xfc
+#define QSERDES_V6_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x110
+#define QSERDES_V6_RX_SIDGET_ENABLES 0x118
+#define QSERDES_V6_RX_SIGDET_CNTRL 0x11c
+#define QSERDES_V6_RX_SIGDET_DEGLITCH_CNTRL 0x124
+#define QSERDES_V6_RX_RX_MODE_00_LOW 0x15c
+#define QSERDES_V6_RX_RX_MODE_00_HIGH 0x160
+#define QSERDES_V6_RX_RX_MODE_00_HIGH2 0x164
+#define QSERDES_V6_RX_RX_MODE_00_HIGH3 0x168
+#define QSERDES_V6_RX_RX_MODE_00_HIGH4 0x16c
+#define QSERDES_V6_RX_RX_MODE_01_LOW 0x170
+#define QSERDES_V6_RX_RX_MODE_01_HIGH 0x174
+#define QSERDES_V6_RX_RX_MODE_01_HIGH2 0x178
+#define QSERDES_V6_RX_RX_MODE_01_HIGH3 0x17c
+#define QSERDES_V6_RX_RX_MODE_01_HIGH4 0x180
+#define QSERDES_V6_RX_RX_MODE_10_LOW 0x184
+#define QSERDES_V6_RX_RX_MODE_10_HIGH 0x188
+#define QSERDES_V6_RX_RX_MODE_10_HIGH2 0x18c
+#define QSERDES_V6_RX_RX_MODE_10_HIGH3 0x190
+#define QSERDES_V6_RX_RX_MODE_10_HIGH4 0x194
+#define QSERDES_V6_RX_DFE_EN_TIMER 0x1a0
+#define QSERDES_V6_RX_DFE_CTLE_POST_CAL_OFFSET 0x1a4
+#define QSERDES_V6_RX_DCC_CTRL1 0x1a8
+#define QSERDES_V6_RX_VTH_CODE 0x1b0
+#define QSERDES_V6_RX_SIGDET_CAL_CTRL1 0x1e4
+#define QSERDES_V6_RX_SIGDET_CAL_TRIM 0x1f8
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h
new file mode 100644
index 000000000000..5385a8b60970
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-qserdes-txrx-v6_20.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef QCOM_PHY_QMP_QSERDES_TXRX_PCIE_V6_20_H_
+#define QCOM_PHY_QMP_QSERDES_TXRX_PCIE_V6_20_H_
+
+#define QSERDES_V6_20_TX_RES_CODE_LANE_OFFSET_TX 0x30
+#define QSERDES_V6_20_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V6_20_TX_TRAN_DRVR_EMP_EN 0xac
+#define QSERDES_V6_20_TX_LANE_MODE_1 0x78
+#define QSERDES_V6_20_TX_LANE_MODE_2 0x7c
+#define QSERDES_V6_20_TX_LANE_MODE_3 0x80
+
+#define QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_2 0x08
+#define QSERDES_V6_20_RX_UCDR_FO_GAIN_RATE_3 0x0c
+#define QSERDES_V6_20_RX_UCDR_PI_CONTROLS 0x20
+#define QSERDES_V6_20_RX_UCDR_SO_ACC_DEFAULT_VAL_RATE3 0x34
+#define QSERDES_V6_20_RX_IVCM_CAL_CTRL2 0x9c
+#define QSERDES_V6_20_RX_IVCM_POSTCAL_OFFSET 0xa0
+#define QSERDES_V6_20_RX_DFE_3 0xb4
+#define QSERDES_V6_20_RX_VGA_CAL_MAN_VAL 0xe8
+#define QSERDES_V6_20_RX_GM_CAL 0x10c
+#define QSERDES_V6_20_RX_EQU_ADAPTOR_CNTRL4 0x120
+#define QSERDES_V6_20_RX_SIGDET_ENABLES 0x148
+#define QSERDES_V6_20_RX_PHPRE_CTRL 0x188
+#define QSERDES_V6_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x194
+#define QSERDES_V6_20_RX_Q_PI_INTRINSIC_BIAS_RATE32 0x1dc
+#define QSERDES_V6_20_RX_MODE_RATE2_B0 0x1f4
+#define QSERDES_V6_20_RX_MODE_RATE2_B1 0x1f8
+#define QSERDES_V6_20_RX_MODE_RATE2_B2 0x1fc
+#define QSERDES_V6_20_RX_MODE_RATE2_B3 0x200
+#define QSERDES_V6_20_RX_MODE_RATE2_B4 0x204
+#define QSERDES_V6_20_RX_MODE_RATE2_B5 0x208
+#define QSERDES_V6_20_RX_MODE_RATE2_B6 0x20c
+#define QSERDES_V6_20_RX_MODE_RATE3_B0 0x210
+#define QSERDES_V6_20_RX_MODE_RATE3_B1 0x214
+#define QSERDES_V6_20_RX_MODE_RATE3_B2 0x218
+#define QSERDES_V6_20_RX_MODE_RATE3_B3 0x21c
+#define QSERDES_V6_20_RX_MODE_RATE3_B4 0x220
+#define QSERDES_V6_20_RX_MODE_RATE3_B5 0x224
+#define QSERDES_V6_20_RX_MODE_RATE3_B6 0x228
+
+#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
index 318eea35b972..994ddd5d4a81 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-ufs.c
@@ -20,7 +20,15 @@
#include <linux/reset.h>
#include <linux/slab.h>
+#include <ufs/unipro.h>
#include "phy-qcom-qmp.h"
+#include "phy-qcom-qmp-pcs-ufs-v2.h"
+#include "phy-qcom-qmp-pcs-ufs-v3.h"
+#include "phy-qcom-qmp-pcs-ufs-v4.h"
+#include "phy-qcom-qmp-pcs-ufs-v5.h"
+#include "phy-qcom-qmp-pcs-ufs-v6.h"
+
+#include "phy-qcom-qmp-qserdes-txrx-ufs-v6.h"
/* QPHY_SW_RESET bit */
#define SW_RESET BIT(0)
@@ -69,32 +77,40 @@ enum qphy_reg_layout {
QPHY_LAYOUT_SIZE
};
-static const unsigned int msm8996_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_START_CTRL] = 0x00,
- [QPHY_PCS_READY_STATUS] = 0x168,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
-};
-
-static const unsigned int sdm845_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_START_CTRL] = 0x00,
- [QPHY_PCS_READY_STATUS] = 0x160,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
+static const unsigned int ufsphy_v2_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_START_CTRL] = QPHY_V2_PCS_UFS_PHY_START,
+ [QPHY_PCS_READY_STATUS] = QPHY_V2_PCS_UFS_READY_STATUS,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V2_PCS_UFS_POWER_DOWN_CONTROL,
};
-static const unsigned int sm6115_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_START_CTRL] = 0x00,
- [QPHY_PCS_READY_STATUS] = 0x168,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
+static const unsigned int ufsphy_v3_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_START_CTRL] = QPHY_V3_PCS_UFS_PHY_START,
+ [QPHY_PCS_READY_STATUS] = QPHY_V3_PCS_UFS_READY_STATUS,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_UFS_POWER_DOWN_CONTROL,
};
-static const unsigned int sm8150_ufsphy_regs_layout[QPHY_LAYOUT_SIZE] = {
+static const unsigned int ufsphy_v4_regs_layout[QPHY_LAYOUT_SIZE] = {
[QPHY_START_CTRL] = QPHY_V4_PCS_UFS_PHY_START,
[QPHY_PCS_READY_STATUS] = QPHY_V4_PCS_UFS_READY_STATUS,
[QPHY_SW_RESET] = QPHY_V4_PCS_UFS_SW_RESET,
[QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V4_PCS_UFS_POWER_DOWN_CONTROL,
};
-static const struct qmp_phy_init_tbl msm8996_ufs_serdes_tbl[] = {
+static const unsigned int ufsphy_v5_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_START_CTRL] = QPHY_V5_PCS_UFS_PHY_START,
+ [QPHY_PCS_READY_STATUS] = QPHY_V5_PCS_UFS_READY_STATUS,
+ [QPHY_SW_RESET] = QPHY_V5_PCS_UFS_SW_RESET,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V5_PCS_UFS_POWER_DOWN_CONTROL,
+};
+
+static const unsigned int ufsphy_v6_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_START_CTRL] = QPHY_V6_PCS_UFS_PHY_START,
+ [QPHY_PCS_READY_STATUS] = QPHY_V6_PCS_UFS_READY_STATUS,
+ [QPHY_SW_RESET] = QPHY_V6_PCS_UFS_SW_RESET,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V6_PCS_UFS_POWER_DOWN_CONTROL,
+};
+
+static const struct qmp_phy_init_tbl msm8996_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e),
QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0xd7),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
@@ -143,12 +159,12 @@ static const struct qmp_phy_init_tbl msm8996_ufs_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
};
-static const struct qmp_phy_init_tbl msm8996_ufs_tx_tbl[] = {
+static const struct qmp_phy_init_tbl msm8996_ufsphy_tx[] = {
QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x02),
};
-static const struct qmp_phy_init_tbl msm8996_ufs_rx_tbl[] = {
+static const struct qmp_phy_init_tbl msm8996_ufsphy_rx[] = {
QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24),
QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x02),
QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
@@ -162,7 +178,7 @@ static const struct qmp_phy_init_tbl msm8996_ufs_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
};
-static const struct qmp_phy_init_tbl sm6115_ufsphy_serdes_tbl[] = {
+static const struct qmp_phy_init_tbl sm6115_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_CMN_CONFIG, 0x0e),
QMP_PHY_INIT_CFG(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_SELECT, 0x30),
@@ -213,17 +229,18 @@ static const struct qmp_phy_init_tbl sm6115_ufsphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BG_TRIM, 0x0f),
QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_INITVAL1, 0xff),
QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+};
- /* Rate B */
+static const struct qmp_phy_init_tbl sm6115_ufsphy_hs_b_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_VCO_TUNE_MAP, 0x44),
};
-static const struct qmp_phy_init_tbl sm6115_ufsphy_tx_tbl[] = {
+static const struct qmp_phy_init_tbl sm6115_ufsphy_tx[] = {
QMP_PHY_INIT_CFG(QSERDES_TX_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x45),
QMP_PHY_INIT_CFG(QSERDES_TX_LANE_MODE, 0x06),
};
-static const struct qmp_phy_init_tbl sm6115_ufsphy_rx_tbl[] = {
+static const struct qmp_phy_init_tbl sm6115_ufsphy_rx[] = {
QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_LVL, 0x24),
QMP_PHY_INIT_CFG(QSERDES_RX_SIGDET_CNTRL, 0x0F),
QMP_PHY_INIT_CFG(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
@@ -241,19 +258,19 @@ static const struct qmp_phy_init_tbl sm6115_ufsphy_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5B),
};
-static const struct qmp_phy_init_tbl sm6115_ufsphy_pcs_tbl[] = {
- QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_PWM_GEAR_BAND, 0x15),
- QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_SIGDET_CTRL2, 0x6d),
- QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_LARGE_AMP_DRV_LVL, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_SMALL_AMP_DRV_LVL, 0x02),
- QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
- QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_SYM_RESYNC_CTRL, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_LARGE_AMP_POST_EMP_LVL, 0x12),
- QMP_PHY_INIT_CFG(QPHY_V2_PCS_TX_SMALL_AMP_POST_EMP_LVL, 0x0f),
- QMP_PHY_INIT_CFG(QPHY_V2_PCS_RX_MIN_HIBERN8_TIME, 0x9a), /* 8 us */
+static const struct qmp_phy_init_tbl sm6115_ufsphy_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_UFS_RX_PWM_GEAR_BAND, 0x15),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_UFS_RX_SIGDET_CTRL2, 0x6d),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_UFS_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_UFS_RX_SYM_RESYNC_CTRL, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_UFS_TX_LARGE_AMP_POST_EMP_LVL, 0x12),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_UFS_TX_SMALL_AMP_POST_EMP_LVL, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V2_PCS_UFS_RX_MIN_HIBERN8_TIME, 0x9a), /* 8 us */
};
-static const struct qmp_phy_init_tbl sdm845_ufsphy_serdes_tbl[] = {
+static const struct qmp_phy_init_tbl sdm845_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
@@ -290,18 +307,19 @@ static const struct qmp_phy_init_tbl sdm845_ufsphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE1, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE1, 0x32),
QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE1, 0x0f),
+};
- /* Rate B */
+static const struct qmp_phy_init_tbl sdm845_ufsphy_hs_b_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x44),
};
-static const struct qmp_phy_init_tbl sdm845_ufsphy_tx_tbl[] = {
+static const struct qmp_phy_init_tbl sdm845_ufsphy_tx[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x04),
QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07),
};
-static const struct qmp_phy_init_tbl sdm845_ufsphy_rx_tbl[] = {
+static const struct qmp_phy_init_tbl sdm845_ufsphy_rx[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_LVL, 0x24),
QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x0f),
QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
@@ -320,7 +338,7 @@ static const struct qmp_phy_init_tbl sdm845_ufsphy_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59),
};
-static const struct qmp_phy_init_tbl sdm845_ufsphy_pcs_tbl[] = {
+static const struct qmp_phy_init_tbl sdm845_ufsphy_pcs[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_RX_SIGDET_CTRL2, 0x6e),
QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
@@ -331,7 +349,7 @@ static const struct qmp_phy_init_tbl sdm845_ufsphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
};
-static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
+static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_SYSCLK_EN_SEL, 0xd9),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_SEL, 0x11),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_HSCLK_HS_SWITCH_SEL, 0x00),
@@ -356,12 +374,13 @@ static const struct qmp_phy_init_tbl sm8150_ufsphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_LOCK_CMP2_MODE1, 0x0f),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd),
QMP_PHY_INIT_CFG(QSERDES_V4_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
+};
- /* Rate B */
+static const struct qmp_phy_init_tbl sm8150_ufsphy_hs_b_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_COM_VCO_TUNE_MAP, 0x06),
};
-static const struct qmp_phy_init_tbl sm8150_ufsphy_tx_tbl[] = {
+static const struct qmp_phy_init_tbl sm8150_ufsphy_tx[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
QMP_PHY_INIT_CFG(QSERDES_V4_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
@@ -370,7 +389,11 @@ static const struct qmp_phy_init_tbl sm8150_ufsphy_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_TX_TRAN_DRVR_EMP_EN, 0x0c),
};
-static const struct qmp_phy_init_tbl sm8150_ufsphy_rx_tbl[] = {
+static const struct qmp_phy_init_tbl sm8150_ufsphy_hs_g4_tx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0x75),
+};
+
+static const struct qmp_phy_init_tbl sm8150_ufsphy_rx[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_LVL, 0x24),
QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_CNTRL, 0x0f),
QMP_PHY_INIT_CFG(QSERDES_V4_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
@@ -405,10 +428,28 @@ static const struct qmp_phy_init_tbl sm8150_ufsphy_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH2, 0xc8),
QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH3, 0x3b),
QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_10_HIGH4, 0xb1),
+};
+static const struct qmp_phy_init_tbl sm8150_ufsphy_hs_g4_rx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CTRL2, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_TERM_BW, 0x6f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_MEASURE_TIME, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x6c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xed),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x3c),
};
-static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs_tbl[] = {
+static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs[] = {
QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_RX_SIGDET_CTRL2, 0x6d),
QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
@@ -418,7 +459,40 @@ static const struct qmp_phy_init_tbl sm8150_ufsphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
};
-static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = {
+static const struct qmp_phy_init_tbl sm8150_ufsphy_hs_g4_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x10),
+ QMP_PHY_INIT_CFG(QPHY_V4_PCS_UFS_BIST_FIXED_PAT_CTRL, 0x0a),
+};
+
+static const struct qmp_phy_init_tbl sm8250_ufsphy_hs_g4_tx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_TX_LANE_MODE_1, 0xe5),
+};
+
+static const struct qmp_phy_init_tbl sm8250_ufsphy_hs_g4_rx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x5a),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_PI_CTRL2, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_UCDR_FO_GAIN, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_TERM_BW, 0x6f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL3, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQU_ADAPTOR_CNTRL4, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1, 0x17),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_MEASURE_TIME, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_IDAC_TSETTLE_HIGH, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_LOW, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH2, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_00_HIGH4, 0x2c),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_LOW, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH2, 0xed),
+ QMP_PHY_INIT_CFG(QSERDES_V4_RX_RX_MODE_01_HIGH4, 0x3c),
+};
+
+static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0xd9),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x11),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
@@ -443,12 +517,13 @@ static const struct qmp_phy_init_tbl sm8350_ufsphy_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x1e),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xdd),
QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x23),
+};
- /* Rate B */
+static const struct qmp_phy_init_tbl sm8350_ufsphy_hs_b_serdes[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x06),
};
-static const struct qmp_phy_init_tbl sm8350_ufsphy_tx_tbl[] = {
+static const struct qmp_phy_init_tbl sm8350_ufsphy_tx[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_1_DIVIDER_BAND0_1, 0x06),
QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_2_DIVIDER_BAND0_1, 0x03),
QMP_PHY_INIT_CFG(QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1, 0x01),
@@ -460,7 +535,7 @@ static const struct qmp_phy_init_tbl sm8350_ufsphy_tx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_TX_TRAN_DRVR_EMP_EN, 0x0c),
};
-static const struct qmp_phy_init_tbl sm8350_ufsphy_rx_tbl[] = {
+static const struct qmp_phy_init_tbl sm8350_ufsphy_rx[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_LVL, 0x24),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_CNTRL, 0x0f),
QMP_PHY_INIT_CFG(QSERDES_V5_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
@@ -500,24 +575,100 @@ static const struct qmp_phy_init_tbl sm8350_ufsphy_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V5_RX_DCC_CTRL1, 0x0c),
};
-static const struct qmp_phy_init_tbl sm8350_ufsphy_pcs_tbl[] = {
+static const struct qmp_phy_init_tbl sm8350_ufsphy_pcs[] = {
QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_SIGDET_CTRL2, 0x6d),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0a),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_SMALL_AMP_DRV_LVL, 0x02),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_DEBUG_BUS_CLKSEL, 0x1f),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_MIN_HIBERN8_TIME, 0xff),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_PLL_CNTL, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB, 0x16),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB, 0xd8),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_PWM_GEAR_BAND, 0xaa),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_HS_GEAR_BAND, 0x06),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_TX_HSGEAR_CAPABILITY, 0x03),
- QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_HSGEAR_CAPABILITY, 0x03),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_RX_SIGDET_CTRL1, 0x0e),
QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
};
+static const struct qmp_phy_init_tbl sm8350_ufsphy_g4_tx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0xe5),
+};
+
+static const struct qmp_phy_init_tbl sm8350_ufsphy_g4_rx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CTRL2, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_TERM_BW, 0x6f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL3, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_MEASURE_TIME, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_IDAC_TSETTLE_HIGH, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0x2d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0x6d),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0xed),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0x3c),
+};
+
+static const struct qmp_phy_init_tbl sm8350_ufsphy_g4_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_UFS_BIST_FIXED_PAT_CTRL, 0x0a),
+};
+
+static const struct qmp_phy_init_tbl sm8550_ufsphy_serdes[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_SYSCLK_EN_SEL, 0xd9),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CMN_CONFIG_1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_SEL_1, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_HSCLK_HS_SWITCH_SEL_1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x41),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_DEC_START_MODE0, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_CP_CTRL_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_RCTRL_MODE0, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_PLL_CCTRL_MODE0, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP1_MODE0, 0x99),
+ QMP_PHY_INIT_CFG(QSERDES_V6_COM_LOCK_CMP2_MODE0, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8550_ufsphy_tx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V6_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_TX_RES_CODE_LANE_OFFSET_TX, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sm8550_ufsphy_rx[] = {
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_UCDR_FASTLOCK_FO_GAIN_RATE4, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_VGA_CAL_MAN_VAL, 0x0e),
+
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B0, 0xc2),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B1, 0xc2),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B3, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE_0_1_B6, 0x60),
+
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE2_B3, 0x9e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE2_B6, 0x60),
+
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B3, 0x9e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B4, 0x0e),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B5, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_UFS_V6_RX_MODE_RATE3_B8, 0x02),
+};
+
+static const struct qmp_phy_init_tbl sm8550_ufsphy_pcs[] = {
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_RX_SIGDET_CTRL2, 0x69),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_LARGE_AMP_DRV_LVL, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_PLL_CNTL, 0x2b),
+ QMP_PHY_INIT_CFG(QPHY_V6_PCS_UFS_MULTI_LANE_CTRL1, 0x02),
+};
+
struct qmp_ufs_offsets {
u16 serdes;
u16 pcs;
@@ -527,21 +678,30 @@ struct qmp_ufs_offsets {
u16 rx2;
};
+struct qmp_phy_cfg_tbls {
+ /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_init_tbl *serdes;
+ int serdes_num;
+ const struct qmp_phy_init_tbl *tx;
+ int tx_num;
+ const struct qmp_phy_init_tbl *rx;
+ int rx_num;
+ const struct qmp_phy_init_tbl *pcs;
+ int pcs_num;
+};
+
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
int lanes;
const struct qmp_ufs_offsets *offsets;
- /* Init sequence for PHY blocks - serdes, tx, rx, pcs */
- const struct qmp_phy_init_tbl *serdes_tbl;
- int serdes_tbl_num;
- const struct qmp_phy_init_tbl *tx_tbl;
- int tx_tbl_num;
- const struct qmp_phy_init_tbl *rx_tbl;
- int rx_tbl_num;
- const struct qmp_phy_init_tbl *pcs_tbl;
- int pcs_tbl_num;
+ /* Main init sequence for PHY blocks - serdes, tx, rx, pcs */
+ const struct qmp_phy_cfg_tbls tbls;
+ /* Additional sequence for HS Series B */
+ const struct qmp_phy_cfg_tbls tbls_hs_b;
+ /* Additional sequence for HS G4 */
+ const struct qmp_phy_cfg_tbls tbls_hs_g4;
/* clock ids to be requested */
const char * const *clk_list;
@@ -575,6 +735,8 @@ struct qmp_ufs {
struct reset_control *ufs_reset;
struct phy *phy;
+ u32 mode;
+ u32 submode;
};
static inline void qphy_setbits(void __iomem *base, u32 offset, u32 val)
@@ -620,7 +782,7 @@ static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
-static const struct qmp_ufs_offsets qmp_ufs_offsets_v5 = {
+static const struct qmp_ufs_offsets qmp_ufs_offsets = {
.serdes = 0,
.pcs = 0xc00,
.tx = 0x400,
@@ -629,15 +791,26 @@ static const struct qmp_ufs_offsets qmp_ufs_offsets_v5 = {
.rx2 = 0xa00,
};
-static const struct qmp_phy_cfg msm8996_ufs_cfg = {
+static const struct qmp_ufs_offsets qmp_ufs_offsets_v6 = {
+ .serdes = 0,
+ .pcs = 0x0400,
+ .tx = 0x1000,
+ .rx = 0x1200,
+ .tx2 = 0x1800,
+ .rx2 = 0x1a00,
+};
+
+static const struct qmp_phy_cfg msm8996_ufsphy_cfg = {
.lanes = 1,
- .serdes_tbl = msm8996_ufs_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(msm8996_ufs_serdes_tbl),
- .tx_tbl = msm8996_ufs_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(msm8996_ufs_tx_tbl),
- .rx_tbl = msm8996_ufs_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(msm8996_ufs_rx_tbl),
+ .tbls = {
+ .serdes = msm8996_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(msm8996_ufsphy_serdes),
+ .tx = msm8996_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(msm8996_ufsphy_tx),
+ .rx = msm8996_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(msm8996_ufsphy_rx),
+ },
.clk_list = msm8996_ufs_phy_clk_l,
.num_clks = ARRAY_SIZE(msm8996_ufs_phy_clk_l),
@@ -645,7 +818,7 @@ static const struct qmp_phy_cfg msm8996_ufs_cfg = {
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = msm8996_ufsphy_regs_layout,
+ .regs = ufsphy_v2_regs_layout,
.no_pcs_sw_reset = true,
};
@@ -653,39 +826,59 @@ static const struct qmp_phy_cfg msm8996_ufs_cfg = {
static const struct qmp_phy_cfg sc8280xp_ufsphy_cfg = {
.lanes = 2,
- .offsets = &qmp_ufs_offsets_v5,
-
- .serdes_tbl = sm8350_ufsphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
- .tx_tbl = sm8350_ufsphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl),
- .rx_tbl = sm8350_ufsphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl),
- .pcs_tbl = sm8350_ufsphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl),
+ .offsets = &qmp_ufs_offsets,
+
+ .tbls = {
+ .serdes = sm8350_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sm8350_ufsphy_serdes),
+ .tx = sm8350_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sm8350_ufsphy_tx),
+ .rx = sm8350_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sm8350_ufsphy_rx),
+ .pcs = sm8350_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sm8350_ufsphy_pcs),
+ },
+ .tbls_hs_b = {
+ .serdes = sm8350_ufsphy_hs_b_serdes,
+ .serdes_num = ARRAY_SIZE(sm8350_ufsphy_hs_b_serdes),
+ },
+ .tbls_hs_g4 = {
+ .tx = sm8350_ufsphy_g4_tx,
+ .tx_num = ARRAY_SIZE(sm8350_ufsphy_g4_tx),
+ .rx = sm8350_ufsphy_g4_rx,
+ .rx_num = ARRAY_SIZE(sm8350_ufsphy_g4_rx),
+ .pcs = sm8350_ufsphy_g4_pcs,
+ .pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
+ },
.clk_list = sdm845_ufs_phy_clk_l,
.num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8150_ufsphy_regs_layout,
+ .regs = ufsphy_v5_regs_layout,
};
static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
.lanes = 2,
- .serdes_tbl = sdm845_ufsphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sdm845_ufsphy_serdes_tbl),
- .tx_tbl = sdm845_ufsphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_tx_tbl),
- .rx_tbl = sdm845_ufsphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_rx_tbl),
- .pcs_tbl = sdm845_ufsphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sdm845_ufsphy_pcs_tbl),
+ .tbls = {
+ .serdes = sdm845_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sdm845_ufsphy_serdes),
+ .tx = sdm845_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sdm845_ufsphy_tx),
+ .rx = sdm845_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sdm845_ufsphy_rx),
+ .pcs = sdm845_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sdm845_ufsphy_pcs),
+ },
+ .tbls_hs_b = {
+ .serdes = sdm845_ufsphy_hs_b_serdes,
+ .serdes_num = ARRAY_SIZE(sdm845_ufsphy_hs_b_serdes),
+ },
.clk_list = sdm845_ufs_phy_clk_l,
.num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sdm845_ufsphy_regs_layout,
+ .regs = ufsphy_v3_regs_layout,
.no_pcs_sw_reset = true,
};
@@ -693,19 +886,27 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
.lanes = 1,
- .serdes_tbl = sm6115_ufsphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm6115_ufsphy_serdes_tbl),
- .tx_tbl = sm6115_ufsphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm6115_ufsphy_tx_tbl),
- .rx_tbl = sm6115_ufsphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm6115_ufsphy_rx_tbl),
- .pcs_tbl = sm6115_ufsphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm6115_ufsphy_pcs_tbl),
+ .offsets = &qmp_ufs_offsets,
+
+ .tbls = {
+ .serdes = sm6115_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sm6115_ufsphy_serdes),
+ .tx = sm6115_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sm6115_ufsphy_tx),
+ .rx = sm6115_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sm6115_ufsphy_rx),
+ .pcs = sm6115_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sm6115_ufsphy_pcs),
+ },
+ .tbls_hs_b = {
+ .serdes = sm6115_ufsphy_hs_b_serdes,
+ .serdes_num = ARRAY_SIZE(sm6115_ufsphy_hs_b_serdes),
+ },
.clk_list = sdm845_ufs_phy_clk_l,
.num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm6115_ufsphy_regs_layout,
+ .regs = ufsphy_v2_regs_layout,
.no_pcs_sw_reset = true,
};
@@ -713,55 +914,151 @@ static const struct qmp_phy_cfg sm6115_ufsphy_cfg = {
static const struct qmp_phy_cfg sm8150_ufsphy_cfg = {
.lanes = 2,
- .serdes_tbl = sm8150_ufsphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8150_ufsphy_serdes_tbl),
- .tx_tbl = sm8150_ufsphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_tx_tbl),
- .rx_tbl = sm8150_ufsphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8150_ufsphy_rx_tbl),
- .pcs_tbl = sm8150_ufsphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8150_ufsphy_pcs_tbl),
+ .tbls = {
+ .serdes = sm8150_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sm8150_ufsphy_serdes),
+ .tx = sm8150_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sm8150_ufsphy_tx),
+ .rx = sm8150_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sm8150_ufsphy_rx),
+ .pcs = sm8150_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sm8150_ufsphy_pcs),
+ },
+ .tbls_hs_b = {
+ .serdes = sm8150_ufsphy_hs_b_serdes,
+ .serdes_num = ARRAY_SIZE(sm8150_ufsphy_hs_b_serdes),
+ },
+ .tbls_hs_g4 = {
+ .tx = sm8150_ufsphy_hs_g4_tx,
+ .tx_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_tx),
+ .rx = sm8150_ufsphy_hs_g4_rx,
+ .rx_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_rx),
+ .pcs = sm8150_ufsphy_hs_g4_pcs,
+ .pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
+ },
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = ufsphy_v4_regs_layout,
+};
+
+static const struct qmp_phy_cfg sm8250_ufsphy_cfg = {
+ .lanes = 2,
+
+ .tbls = {
+ .serdes = sm8150_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sm8150_ufsphy_serdes),
+ .tx = sm8150_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sm8150_ufsphy_tx),
+ .rx = sm8150_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sm8150_ufsphy_rx),
+ .pcs = sm8150_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sm8150_ufsphy_pcs),
+ },
+ .tbls_hs_b = {
+ .serdes = sm8150_ufsphy_hs_b_serdes,
+ .serdes_num = ARRAY_SIZE(sm8150_ufsphy_hs_b_serdes),
+ },
+ .tbls_hs_g4 = {
+ .tx = sm8250_ufsphy_hs_g4_tx,
+ .tx_num = ARRAY_SIZE(sm8250_ufsphy_hs_g4_tx),
+ .rx = sm8250_ufsphy_hs_g4_rx,
+ .rx_num = ARRAY_SIZE(sm8250_ufsphy_hs_g4_rx),
+ .pcs = sm8150_ufsphy_hs_g4_pcs,
+ .pcs_num = ARRAY_SIZE(sm8150_ufsphy_hs_g4_pcs),
+ },
.clk_list = sdm845_ufs_phy_clk_l,
.num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8150_ufsphy_regs_layout,
+ .regs = ufsphy_v4_regs_layout,
};
static const struct qmp_phy_cfg sm8350_ufsphy_cfg = {
.lanes = 2,
- .serdes_tbl = sm8350_ufsphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
- .tx_tbl = sm8350_ufsphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl),
- .rx_tbl = sm8350_ufsphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl),
- .pcs_tbl = sm8350_ufsphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl),
+ .tbls = {
+ .serdes = sm8350_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sm8350_ufsphy_serdes),
+ .tx = sm8350_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sm8350_ufsphy_tx),
+ .rx = sm8350_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sm8350_ufsphy_rx),
+ .pcs = sm8350_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sm8350_ufsphy_pcs),
+ },
+ .tbls_hs_b = {
+ .serdes = sm8350_ufsphy_hs_b_serdes,
+ .serdes_num = ARRAY_SIZE(sm8350_ufsphy_hs_b_serdes),
+ },
+ .tbls_hs_g4 = {
+ .tx = sm8350_ufsphy_g4_tx,
+ .tx_num = ARRAY_SIZE(sm8350_ufsphy_g4_tx),
+ .rx = sm8350_ufsphy_g4_rx,
+ .rx_num = ARRAY_SIZE(sm8350_ufsphy_g4_rx),
+ .pcs = sm8350_ufsphy_g4_pcs,
+ .pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
+ },
.clk_list = sdm845_ufs_phy_clk_l,
.num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8150_ufsphy_regs_layout,
+ .regs = ufsphy_v5_regs_layout,
};
static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
.lanes = 2,
- .serdes_tbl = sm8350_ufsphy_serdes_tbl,
- .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
- .tx_tbl = sm8350_ufsphy_tx_tbl,
- .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl),
- .rx_tbl = sm8350_ufsphy_rx_tbl,
- .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl),
- .pcs_tbl = sm8350_ufsphy_pcs_tbl,
- .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl),
+ .tbls = {
+ .serdes = sm8350_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sm8350_ufsphy_serdes),
+ .tx = sm8350_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sm8350_ufsphy_tx),
+ .rx = sm8350_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sm8350_ufsphy_rx),
+ .pcs = sm8350_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sm8350_ufsphy_pcs),
+ },
+ .tbls_hs_b = {
+ .serdes = sm8350_ufsphy_hs_b_serdes,
+ .serdes_num = ARRAY_SIZE(sm8350_ufsphy_hs_b_serdes),
+ },
+ .tbls_hs_g4 = {
+ .tx = sm8350_ufsphy_g4_tx,
+ .tx_num = ARRAY_SIZE(sm8350_ufsphy_g4_tx),
+ .rx = sm8350_ufsphy_g4_rx,
+ .rx_num = ARRAY_SIZE(sm8350_ufsphy_g4_rx),
+ .pcs = sm8350_ufsphy_g4_pcs,
+ .pcs_num = ARRAY_SIZE(sm8350_ufsphy_g4_pcs),
+ },
.clk_list = sm8450_ufs_phy_clk_l,
.num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = sm8150_ufsphy_regs_layout,
+ .regs = ufsphy_v5_regs_layout,
+};
+
+static const struct qmp_phy_cfg sm8550_ufsphy_cfg = {
+ .lanes = 2,
+
+ .offsets = &qmp_ufs_offsets_v6,
+
+ .tbls = {
+ .serdes = sm8550_ufsphy_serdes,
+ .serdes_num = ARRAY_SIZE(sm8550_ufsphy_serdes),
+ .tx = sm8550_ufsphy_tx,
+ .tx_num = ARRAY_SIZE(sm8550_ufsphy_tx),
+ .rx = sm8550_ufsphy_rx,
+ .rx_num = ARRAY_SIZE(sm8550_ufsphy_rx),
+ .pcs = sm8550_ufsphy_pcs,
+ .pcs_num = ARRAY_SIZE(sm8550_ufsphy_pcs),
+ },
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = ufsphy_v6_regs_layout,
};
static void qmp_ufs_configure_lane(void __iomem *base,
@@ -790,16 +1087,46 @@ static void qmp_ufs_configure(void __iomem *base,
qmp_ufs_configure_lane(base, tbl, num, 0xff);
}
-static int qmp_ufs_serdes_init(struct qmp_ufs *qmp)
+static void qmp_ufs_serdes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls *tbls)
{
- const struct qmp_phy_cfg *cfg = qmp->cfg;
void __iomem *serdes = qmp->serdes;
- const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl;
- int serdes_tbl_num = cfg->serdes_tbl_num;
- qmp_ufs_configure(serdes, serdes_tbl, serdes_tbl_num);
+ qmp_ufs_configure(serdes, tbls->serdes, tbls->serdes_num);
+}
- return 0;
+static void qmp_ufs_lanes_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls *tbls)
+{
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *tx = qmp->tx;
+ void __iomem *rx = qmp->rx;
+
+ qmp_ufs_configure_lane(tx, tbls->tx, tbls->tx_num, 1);
+ qmp_ufs_configure_lane(rx, tbls->rx, tbls->rx_num, 1);
+
+ if (cfg->lanes >= 2) {
+ qmp_ufs_configure_lane(qmp->tx2, tbls->tx, tbls->tx_num, 2);
+ qmp_ufs_configure_lane(qmp->rx2, tbls->rx, tbls->rx_num, 2);
+ }
+}
+
+static void qmp_ufs_pcs_init(struct qmp_ufs *qmp, const struct qmp_phy_cfg_tbls *tbls)
+{
+ void __iomem *pcs = qmp->pcs;
+
+ qmp_ufs_configure(pcs, tbls->pcs, tbls->pcs_num);
+}
+
+static void qmp_ufs_init_registers(struct qmp_ufs *qmp, const struct qmp_phy_cfg *cfg)
+{
+ qmp_ufs_serdes_init(qmp, &cfg->tbls);
+ if (qmp->mode == PHY_MODE_UFS_HS_B)
+ qmp_ufs_serdes_init(qmp, &cfg->tbls_hs_b);
+ qmp_ufs_lanes_init(qmp, &cfg->tbls);
+ if (qmp->submode == UFS_HS_G4)
+ qmp_ufs_lanes_init(qmp, &cfg->tbls_hs_g4);
+ qmp_ufs_pcs_init(qmp, &cfg->tbls);
+ if (qmp->submode == UFS_HS_G4)
+ qmp_ufs_pcs_init(qmp, &cfg->tbls_hs_g4);
}
static int qmp_ufs_com_init(struct qmp_ufs *qmp)
@@ -886,25 +1213,12 @@ static int qmp_ufs_power_on(struct phy *phy)
{
struct qmp_ufs *qmp = phy_get_drvdata(phy);
const struct qmp_phy_cfg *cfg = qmp->cfg;
- void __iomem *tx = qmp->tx;
- void __iomem *rx = qmp->rx;
void __iomem *pcs = qmp->pcs;
void __iomem *status;
unsigned int val;
int ret;
- qmp_ufs_serdes_init(qmp);
-
- /* Tx, Rx, and PCS configurations */
- qmp_ufs_configure_lane(tx, cfg->tx_tbl, cfg->tx_tbl_num, 1);
- qmp_ufs_configure_lane(rx, cfg->rx_tbl, cfg->rx_tbl_num, 1);
-
- if (cfg->lanes >= 2) {
- qmp_ufs_configure_lane(qmp->tx2, cfg->tx_tbl, cfg->tx_tbl_num, 2);
- qmp_ufs_configure_lane(qmp->rx2, cfg->rx_tbl, cfg->rx_tbl_num, 2);
- }
-
- qmp_ufs_configure(pcs, cfg->pcs_tbl, cfg->pcs_tbl_num);
+ qmp_ufs_init_registers(qmp, cfg);
ret = reset_control_deassert(qmp->ufs_reset);
if (ret)
@@ -981,9 +1295,20 @@ static int qmp_ufs_disable(struct phy *phy)
return qmp_ufs_exit(phy);
}
+static int qmp_ufs_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct qmp_ufs *qmp = phy_get_drvdata(phy);
+
+ qmp->mode = mode;
+ qmp->submode = submode;
+
+ return 0;
+}
+
static const struct phy_ops qcom_qmp_ufs_phy_ops = {
.power_on = qmp_ufs_enable,
.power_off = qmp_ufs_disable,
+ .set_mode = qmp_ufs_set_mode,
.owner = THIS_MODULE,
};
@@ -1021,6 +1346,59 @@ static int qmp_ufs_clk_init(struct qmp_ufs *qmp)
return devm_clk_bulk_get(dev, num, qmp->clks);
}
+static void qmp_ufs_clk_release_provider(void *res)
+{
+ of_clk_del_provider(res);
+}
+
+#define UFS_SYMBOL_CLOCKS 3
+
+static int qmp_ufs_register_clocks(struct qmp_ufs *qmp, struct device_node *np)
+{
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw *hw;
+ char name[64];
+ int ret;
+
+ clk_data = devm_kzalloc(qmp->dev,
+ struct_size(clk_data, hws, UFS_SYMBOL_CLOCKS),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = UFS_SYMBOL_CLOCKS;
+
+ snprintf(name, sizeof(name), "%s::rx_symbol_0", dev_name(qmp->dev));
+ hw = devm_clk_hw_register_fixed_rate(qmp->dev, name, NULL, 0, 0);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ clk_data->hws[0] = hw;
+
+ snprintf(name, sizeof(name), "%s::rx_symbol_1", dev_name(qmp->dev));
+ hw = devm_clk_hw_register_fixed_rate(qmp->dev, name, NULL, 0, 0);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ clk_data->hws[1] = hw;
+
+ snprintf(name, sizeof(name), "%s::tx_symbol_0", dev_name(qmp->dev));
+ hw = devm_clk_hw_register_fixed_rate(qmp->dev, name, NULL, 0, 0);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ clk_data->hws[2] = hw;
+
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ /*
+ * Roll a devm action because the clock provider can be a child node.
+ */
+ return devm_add_action_or_reset(qmp->dev, qmp_ufs_clk_release_provider, np);
+}
+
static int qmp_ufs_parse_dt_legacy(struct qmp_ufs *qmp, struct device_node *np)
{
struct platform_device *pdev = to_platform_device(qmp->dev);
@@ -1133,6 +1511,10 @@ static int qmp_ufs_probe(struct platform_device *pdev)
if (ret)
goto err_node_put;
+ ret = qmp_ufs_register_clocks(qmp, np);
+ if (ret)
+ goto err_node_put;
+
qmp->phy = devm_phy_create(dev, np, &qcom_qmp_ufs_phy_ops);
if (IS_ERR(qmp->phy)) {
ret = PTR_ERR(qmp->phy);
@@ -1156,7 +1538,7 @@ err_node_put:
static const struct of_device_id qmp_ufs_of_match_table[] = {
{
.compatible = "qcom,msm8996-qmp-ufs-phy",
- .data = &msm8996_ufs_cfg,
+ .data = &msm8996_ufsphy_cfg,
}, {
.compatible = "qcom,msm8998-qmp-ufs-phy",
.data = &sdm845_ufsphy_cfg,
@@ -1173,6 +1555,9 @@ static const struct of_device_id qmp_ufs_of_match_table[] = {
.compatible = "qcom,sm6115-qmp-ufs-phy",
.data = &sm6115_ufsphy_cfg,
}, {
+ .compatible = "qcom,sm6125-qmp-ufs-phy",
+ .data = &sm6115_ufsphy_cfg,
+ }, {
.compatible = "qcom,sm6350-qmp-ufs-phy",
.data = &sdm845_ufsphy_cfg,
}, {
@@ -1180,13 +1565,16 @@ static const struct of_device_id qmp_ufs_of_match_table[] = {
.data = &sm8150_ufsphy_cfg,
}, {
.compatible = "qcom,sm8250-qmp-ufs-phy",
- .data = &sm8150_ufsphy_cfg,
+ .data = &sm8250_ufsphy_cfg,
}, {
.compatible = "qcom,sm8350-qmp-ufs-phy",
.data = &sm8350_ufsphy_cfg,
}, {
.compatible = "qcom,sm8450-qmp-ufs-phy",
.data = &sm8450_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8550-qmp-ufs-phy",
+ .data = &sm8550_ufsphy_cfg,
},
{ },
};
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
index 4aa338fc4643..a49711c5a63d 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp-usb.c
@@ -21,6 +21,9 @@
#include <linux/slab.h>
#include "phy-qcom-qmp.h"
+#include "phy-qcom-qmp-pcs-misc-v3.h"
+#include "phy-qcom-qmp-pcs-usb-v4.h"
+#include "phy-qcom-qmp-pcs-usb-v5.h"
/* QPHY_SW_RESET bit */
#define SW_RESET BIT(0)
@@ -54,9 +57,6 @@
/* QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR register bits */
#define IRQ_CLEAR BIT(0)
-/* QPHY_PCS_LFPS_RXTERM_IRQ_STATUS register bits */
-#define RCVR_DETECT BIT(0)
-
/* QPHY_V3_PCS_MISC_CLAMP_ENABLE register bits */
#define CLAMP_EN BIT(0) /* enables i/o clamp_n */
@@ -94,53 +94,49 @@ enum qphy_reg_layout {
QPHY_PCS_STATUS,
QPHY_PCS_AUTONOMOUS_MODE_CTRL,
QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR,
- QPHY_PCS_LFPS_RXTERM_IRQ_STATUS,
QPHY_PCS_POWER_DOWN_CONTROL,
- /* PCS_MISC registers */
- QPHY_PCS_MISC_TYPEC_CTRL,
/* Keep last to ensure regs_layout arrays are properly initialized */
QPHY_LAYOUT_SIZE
};
-static const unsigned int usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_STATUS] = 0x17c,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8,
- [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
+static const unsigned int qmp_v2_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V2_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V2_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V2_PCS_USB_PCS_STATUS,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V2_PCS_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V2_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V2_PCS_POWER_DOWN_CONTROL,
};
static const unsigned int qmp_v3_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_STATUS] = 0x174,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc,
- [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
+ [QPHY_SW_RESET] = QPHY_V3_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V3_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V3_PCS_PCS_STATUS,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V3_PCS_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V3_PCS_LFPS_RXTERM_IRQ_CLEAR,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V3_PCS_POWER_DOWN_CONTROL,
};
static const unsigned int qmp_v4_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_START_CTRL] = 0x44,
- [QPHY_PCS_STATUS] = 0x14,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x40,
+ [QPHY_SW_RESET] = QPHY_V4_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V4_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V4_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V4_PCS_POWER_DOWN_CONTROL,
/* In PCS_USB */
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x008,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x014,
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V4_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V4_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
};
-static const unsigned int qcm2290_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
- [QPHY_SW_RESET] = 0x00,
- [QPHY_PCS_POWER_DOWN_CONTROL] = 0x04,
- [QPHY_START_CTRL] = 0x08,
- [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0xd8,
- [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0xdc,
- [QPHY_PCS_STATUS] = 0x174,
- [QPHY_PCS_MISC_TYPEC_CTRL] = 0x00,
+static const unsigned int qmp_v5_usb3phy_regs_layout[QPHY_LAYOUT_SIZE] = {
+ [QPHY_SW_RESET] = QPHY_V5_PCS_SW_RESET,
+ [QPHY_START_CTRL] = QPHY_V5_PCS_START_CONTROL,
+ [QPHY_PCS_STATUS] = QPHY_V5_PCS_PCS_STATUS1,
+ [QPHY_PCS_POWER_DOWN_CONTROL] = QPHY_V5_PCS_POWER_DOWN_CONTROL,
+
+ /* In PCS_USB */
+ [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = QPHY_V5_PCS_USB3_AUTONOMOUS_MODE_CTRL,
+ [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = QPHY_V5_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR,
};
static const struct qmp_phy_init_tbl ipq8074_usb3_serdes_tbl[] = {
@@ -1265,7 +1261,7 @@ static const struct qmp_phy_init_tbl qcm2290_usb3_tx_tbl[] = {
static const struct qmp_phy_init_tbl qcm2290_usb3_rx_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
- QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x80),
QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_HIGH, 0x00),
QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FO_GAIN, 0x0a),
@@ -1607,7 +1603,7 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = usb3phy_regs_layout,
+ .regs = qmp_v2_usb3phy_regs_layout,
};
static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
@@ -1675,7 +1671,7 @@ static const struct qmp_phy_cfg sc8280xp_usb3_uniphy_cfg = {
.num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v5_usb3phy_regs_layout,
};
static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
@@ -1866,7 +1862,7 @@ static const struct qmp_phy_cfg sdx65_usb3_uniphy_cfg = {
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v5_usb3phy_regs_layout,
.pcs_usb_offset = 0x1000,
.has_pwrdn_delay = true,
@@ -1891,7 +1887,7 @@ static const struct qmp_phy_cfg sm8350_usb3phy_cfg = {
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v5_usb3phy_regs_layout,
.pcs_usb_offset = 0x300,
.has_pwrdn_delay = true,
@@ -1917,7 +1913,7 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qmp_v4_usb3phy_regs_layout,
+ .regs = qmp_v5_usb3phy_regs_layout,
.pcs_usb_offset = 0x1000,
.has_pwrdn_delay = true,
@@ -1940,7 +1936,7 @@ static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
.num_resets = ARRAY_SIZE(qcm2290_usb3phy_reset_l),
.vreg_list = qmp_phy_vreg_l,
.num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
- .regs = qcm2290_usb3phy_regs_layout,
+ .regs = qmp_v3_usb3phy_regs_layout,
};
static void qmp_usb_configure_lane(void __iomem *base,
@@ -2623,6 +2619,9 @@ static const struct of_device_id qmp_usb_of_match_table[] = {
.compatible = "qcom,sdx65-qmp-usb3-uni-phy",
.data = &sdx65_usb3_uniphy_cfg,
}, {
+ .compatible = "qcom,sm6115-qmp-usb3-phy",
+ .data = &qcm2290_usb3phy_cfg,
+ }, {
.compatible = "qcom,sm8150-qmp-usb3-phy",
.data = &sm8150_usb3phy_cfg,
}, {
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 29a48f0436d2..7ee4b0e07d11 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -21,31 +21,28 @@
#include "phy-qcom-qmp-qserdes-txrx-v5_20.h"
#include "phy-qcom-qmp-qserdes-txrx-v5_5nm.h"
+#include "phy-qcom-qmp-qserdes-com-v6.h"
+#include "phy-qcom-qmp-qserdes-txrx-v6.h"
+#include "phy-qcom-qmp-qserdes-txrx-v6_20.h"
+#include "phy-qcom-qmp-qserdes-ln-shrd-v6.h"
+
#include "phy-qcom-qmp-qserdes-pll.h"
#include "phy-qcom-qmp-pcs-v2.h"
#include "phy-qcom-qmp-pcs-v3.h"
-#include "phy-qcom-qmp-pcs-misc-v3.h"
-#include "phy-qcom-qmp-pcs-ufs-v3.h"
#include "phy-qcom-qmp-pcs-v4.h"
-#include "phy-qcom-qmp-pcs-pcie-v4.h"
-#include "phy-qcom-qmp-pcs-usb-v4.h"
-#include "phy-qcom-qmp-pcs-ufs-v4.h"
#include "phy-qcom-qmp-pcs-v4_20.h"
-#include "phy-qcom-qmp-pcs-pcie-v4_20.h"
#include "phy-qcom-qmp-pcs-v5.h"
+
#include "phy-qcom-qmp-pcs-v5_20.h"
-#include "phy-qcom-qmp-pcs-pcie-v5.h"
-#include "phy-qcom-qmp-pcs-usb-v5.h"
-#include "phy-qcom-qmp-pcs-ufs-v5.h"
-#include "phy-qcom-qmp-pcs-pcie-v5_20.h"
+#include "phy-qcom-qmp-pcs-v6.h"
-#include "phy-qcom-qmp-pcie-qhp.h"
+#include "phy-qcom-qmp-pcs-v6_20.h"
/* Only for QMP V3 & V4 PHY - DP COM registers */
#define QPHY_V3_DP_COM_PHY_MODE_CTRL 0x00
@@ -137,4 +134,8 @@
#define QPHY_V4_PCS_MISC_TYPEC_STATUS 0x10
#define QPHY_V4_PCS_MISC_PLACEHOLDER_STATUS 0x14
+/* Only for QMP V6 PHY - DP PHY registers */
+#define QSERDES_V6_DP_PHY_AUX_INTERRUPT_STATUS 0x0e0
+#define QSERDES_V6_DP_PHY_STATUS 0x0e4
+
#endif
diff --git a/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c b/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c
new file mode 100644
index 000000000000..eeaa1eb0e24b
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-snps-eusb2.c
@@ -0,0 +1,441 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#define USB_PHY_UTMI_CTRL0 (0x3c)
+#define SLEEPM BIT(0)
+#define OPMODE_MASK GENMASK(4, 3)
+#define OPMODE_NONDRIVING BIT(3)
+
+#define USB_PHY_UTMI_CTRL5 (0x50)
+#define POR BIT(1)
+
+#define USB_PHY_HS_PHY_CTRL_COMMON0 (0x54)
+#define PHY_ENABLE BIT(0)
+#define SIDDQ_SEL BIT(1)
+#define SIDDQ BIT(2)
+#define RETENABLEN BIT(3)
+#define FSEL_MASK GENMASK(6, 4)
+#define FSEL_19_2_MHZ_VAL (0x0)
+#define FSEL_38_4_MHZ_VAL (0x4)
+
+#define USB_PHY_CFG_CTRL_1 (0x58)
+#define PHY_CFG_PLL_CPBIAS_CNTRL_MASK GENMASK(7, 1)
+
+#define USB_PHY_CFG_CTRL_2 (0x5c)
+#define PHY_CFG_PLL_FB_DIV_7_0_MASK GENMASK(7, 0)
+#define DIV_7_0_19_2_MHZ_VAL (0x90)
+#define DIV_7_0_38_4_MHZ_VAL (0xc8)
+
+#define USB_PHY_CFG_CTRL_3 (0x60)
+#define PHY_CFG_PLL_FB_DIV_11_8_MASK GENMASK(3, 0)
+#define DIV_11_8_19_2_MHZ_VAL (0x1)
+#define DIV_11_8_38_4_MHZ_VAL (0x0)
+
+#define PHY_CFG_PLL_REF_DIV GENMASK(7, 4)
+#define PLL_REF_DIV_VAL (0x0)
+
+#define USB_PHY_HS_PHY_CTRL2 (0x64)
+#define VBUSVLDEXT0 BIT(0)
+#define USB2_SUSPEND_N BIT(2)
+#define USB2_SUSPEND_N_SEL BIT(3)
+#define VBUS_DET_EXT_SEL BIT(4)
+
+#define USB_PHY_CFG_CTRL_4 (0x68)
+#define PHY_CFG_PLL_GMP_CNTRL_MASK GENMASK(1, 0)
+#define PHY_CFG_PLL_INT_CNTRL_MASK GENMASK(7, 2)
+
+#define USB_PHY_CFG_CTRL_5 (0x6c)
+#define PHY_CFG_PLL_PROP_CNTRL_MASK GENMASK(4, 0)
+#define PHY_CFG_PLL_VREF_TUNE_MASK GENMASK(7, 6)
+
+#define USB_PHY_CFG_CTRL_6 (0x70)
+#define PHY_CFG_PLL_VCO_CNTRL_MASK GENMASK(2, 0)
+
+#define USB_PHY_CFG_CTRL_7 (0x74)
+
+#define USB_PHY_CFG_CTRL_8 (0x78)
+#define PHY_CFG_TX_FSLS_VREF_TUNE_MASK GENMASK(1, 0)
+#define PHY_CFG_TX_FSLS_VREG_BYPASS BIT(2)
+#define PHY_CFG_TX_HS_VREF_TUNE_MASK GENMASK(5, 3)
+#define PHY_CFG_TX_HS_XV_TUNE_MASK GENMASK(7, 6)
+
+#define USB_PHY_CFG_CTRL_9 (0x7c)
+#define PHY_CFG_TX_PREEMP_TUNE_MASK GENMASK(2, 0)
+#define PHY_CFG_TX_RES_TUNE_MASK GENMASK(4, 3)
+#define PHY_CFG_TX_RISE_TUNE_MASK GENMASK(6, 5)
+#define PHY_CFG_RCAL_BYPASS BIT(7)
+
+#define USB_PHY_CFG_CTRL_10 (0x80)
+
+#define USB_PHY_CFG0 (0x94)
+#define DATAPATH_CTRL_OVERRIDE_EN BIT(0)
+#define CMN_CTRL_OVERRIDE_EN BIT(1)
+
+#define UTMI_PHY_CMN_CTRL0 (0x98)
+#define TESTBURNIN BIT(6)
+
+#define USB_PHY_FSEL_SEL (0xb8)
+#define FSEL_SEL BIT(0)
+
+#define USB_PHY_APB_ACCESS_CMD (0x130)
+#define RW_ACCESS BIT(0)
+#define APB_START_CMD BIT(1)
+#define APB_LOGIC_RESET BIT(2)
+
+#define USB_PHY_APB_ACCESS_STATUS (0x134)
+#define ACCESS_DONE BIT(0)
+#define TIMED_OUT BIT(1)
+#define ACCESS_ERROR BIT(2)
+#define ACCESS_IN_PROGRESS BIT(3)
+
+#define USB_PHY_APB_ADDRESS (0x138)
+#define APB_REG_ADDR_MASK GENMASK(7, 0)
+
+#define USB_PHY_APB_WRDATA_LSB (0x13c)
+#define APB_REG_WRDATA_7_0_MASK GENMASK(3, 0)
+
+#define USB_PHY_APB_WRDATA_MSB (0x140)
+#define APB_REG_WRDATA_15_8_MASK GENMASK(7, 4)
+
+#define USB_PHY_APB_RDDATA_LSB (0x144)
+#define APB_REG_RDDATA_7_0_MASK GENMASK(3, 0)
+
+#define USB_PHY_APB_RDDATA_MSB (0x148)
+#define APB_REG_RDDATA_15_8_MASK GENMASK(7, 4)
+
+static const char * const eusb2_hsphy_vreg_names[] = {
+ "vdd", "vdda12",
+};
+
+#define EUSB2_NUM_VREGS ARRAY_SIZE(eusb2_hsphy_vreg_names)
+
+struct qcom_snps_eusb2_hsphy {
+ struct phy *phy;
+ void __iomem *base;
+
+ struct clk *ref_clk;
+ struct reset_control *phy_reset;
+
+ struct regulator_bulk_data vregs[EUSB2_NUM_VREGS];
+
+ enum phy_mode mode;
+
+ struct phy *repeater;
+};
+
+static int qcom_snps_eusb2_hsphy_set_mode(struct phy *p, enum phy_mode mode, int submode)
+{
+ struct qcom_snps_eusb2_hsphy *phy = phy_get_drvdata(p);
+
+ phy->mode = mode;
+
+ return phy_set_mode_ext(phy->repeater, mode, submode);
+}
+
+static void qcom_snps_eusb2_hsphy_write_mask(void __iomem *base, u32 offset,
+ u32 mask, u32 val)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg &= ~mask;
+ reg |= val & mask;
+ writel_relaxed(reg, base + offset);
+
+ /* Ensure above write is completed */
+ readl_relaxed(base + offset);
+}
+
+static void qcom_eusb2_default_parameters(struct qcom_snps_eusb2_hsphy *phy)
+{
+ /* default parameters: tx pre-emphasis */
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_9,
+ PHY_CFG_TX_PREEMP_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_TX_PREEMP_TUNE_MASK, 0));
+
+ /* tx rise/fall time */
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_9,
+ PHY_CFG_TX_RISE_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_TX_RISE_TUNE_MASK, 0x2));
+
+ /* source impedance adjustment */
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_9,
+ PHY_CFG_TX_RES_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_TX_RES_TUNE_MASK, 0x1));
+
+ /* dc voltage level adjustement */
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_8,
+ PHY_CFG_TX_HS_VREF_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_TX_HS_VREF_TUNE_MASK, 0x3));
+
+ /* transmitter HS crossover adjustement */
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_8,
+ PHY_CFG_TX_HS_XV_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_TX_HS_XV_TUNE_MASK, 0x0));
+}
+
+static int qcom_eusb2_ref_clk_init(struct qcom_snps_eusb2_hsphy *phy)
+{
+ unsigned long ref_clk_freq = clk_get_rate(phy->ref_clk);
+
+ switch (ref_clk_freq) {
+ case 19200000:
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0,
+ FSEL_MASK,
+ FIELD_PREP(FSEL_MASK, FSEL_19_2_MHZ_VAL));
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_2,
+ PHY_CFG_PLL_FB_DIV_7_0_MASK,
+ DIV_7_0_19_2_MHZ_VAL);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_3,
+ PHY_CFG_PLL_FB_DIV_11_8_MASK,
+ DIV_11_8_19_2_MHZ_VAL);
+ break;
+
+ case 38400000:
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0,
+ FSEL_MASK,
+ FIELD_PREP(FSEL_MASK, FSEL_38_4_MHZ_VAL));
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_2,
+ PHY_CFG_PLL_FB_DIV_7_0_MASK,
+ DIV_7_0_38_4_MHZ_VAL);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_3,
+ PHY_CFG_PLL_FB_DIV_11_8_MASK,
+ DIV_11_8_38_4_MHZ_VAL);
+ break;
+
+ default:
+ dev_err(&phy->phy->dev, "unsupported ref_clk_freq:%lu\n", ref_clk_freq);
+ return -EINVAL;
+ }
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_3,
+ PHY_CFG_PLL_REF_DIV, PLL_REF_DIV_VAL);
+
+ return 0;
+}
+
+static int qcom_snps_eusb2_hsphy_init(struct phy *p)
+{
+ struct qcom_snps_eusb2_hsphy *phy = phy_get_drvdata(p);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(phy->vregs), phy->vregs);
+ if (ret)
+ return ret;
+
+ ret = phy_init(phy->repeater);
+ if (ret) {
+ dev_err(&p->dev, "repeater init failed. %d\n", ret);
+ goto disable_vreg;
+ }
+
+ ret = clk_prepare_enable(phy->ref_clk);
+ if (ret) {
+ dev_err(&p->dev, "failed to enable ref clock, %d\n", ret);
+ goto disable_vreg;
+ }
+
+ ret = reset_control_assert(phy->phy_reset);
+ if (ret) {
+ dev_err(&p->dev, "failed to assert phy_reset, %d\n", ret);
+ goto disable_ref_clk;
+ }
+
+ usleep_range(100, 150);
+
+ ret = reset_control_deassert(phy->phy_reset);
+ if (ret) {
+ dev_err(&p->dev, "failed to de-assert phy_reset, %d\n", ret);
+ goto disable_ref_clk;
+ }
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG0,
+ CMN_CTRL_OVERRIDE_EN, CMN_CTRL_OVERRIDE_EN);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_UTMI_CTRL5, POR, POR);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0,
+ PHY_ENABLE | RETENABLEN, PHY_ENABLE | RETENABLEN);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_APB_ACCESS_CMD,
+ APB_LOGIC_RESET, APB_LOGIC_RESET);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, UTMI_PHY_CMN_CTRL0, TESTBURNIN, 0);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_FSEL_SEL,
+ FSEL_SEL, FSEL_SEL);
+
+ /* update ref_clk related registers */
+ ret = qcom_eusb2_ref_clk_init(phy);
+ if (ret)
+ goto disable_ref_clk;
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_1,
+ PHY_CFG_PLL_CPBIAS_CNTRL_MASK,
+ FIELD_PREP(PHY_CFG_PLL_CPBIAS_CNTRL_MASK, 0x1));
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_4,
+ PHY_CFG_PLL_INT_CNTRL_MASK,
+ FIELD_PREP(PHY_CFG_PLL_INT_CNTRL_MASK, 0x8));
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_4,
+ PHY_CFG_PLL_GMP_CNTRL_MASK,
+ FIELD_PREP(PHY_CFG_PLL_GMP_CNTRL_MASK, 0x1));
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_5,
+ PHY_CFG_PLL_PROP_CNTRL_MASK,
+ FIELD_PREP(PHY_CFG_PLL_PROP_CNTRL_MASK, 0x10));
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_6,
+ PHY_CFG_PLL_VCO_CNTRL_MASK,
+ FIELD_PREP(PHY_CFG_PLL_VCO_CNTRL_MASK, 0x0));
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_CFG_CTRL_5,
+ PHY_CFG_PLL_VREF_TUNE_MASK,
+ FIELD_PREP(PHY_CFG_PLL_VREF_TUNE_MASK, 0x1));
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL2,
+ VBUS_DET_EXT_SEL, VBUS_DET_EXT_SEL);
+
+ /* set default parameters */
+ qcom_eusb2_default_parameters(phy);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N_SEL | USB2_SUSPEND_N,
+ USB2_SUSPEND_N_SEL | USB2_SUSPEND_N);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_UTMI_CTRL0, SLEEPM, SLEEPM);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0,
+ SIDDQ_SEL, SIDDQ_SEL);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL_COMMON0,
+ SIDDQ, 0);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_UTMI_CTRL5, POR, 0);
+
+ qcom_snps_eusb2_hsphy_write_mask(phy->base, USB_PHY_HS_PHY_CTRL2,
+ USB2_SUSPEND_N_SEL, 0);
+
+ return 0;
+
+disable_ref_clk:
+ clk_disable_unprepare(phy->ref_clk);
+
+disable_vreg:
+ regulator_bulk_disable(ARRAY_SIZE(phy->vregs), phy->vregs);
+
+ return ret;
+}
+
+static int qcom_snps_eusb2_hsphy_exit(struct phy *p)
+{
+ struct qcom_snps_eusb2_hsphy *phy = phy_get_drvdata(p);
+
+ clk_disable_unprepare(phy->ref_clk);
+
+ regulator_bulk_disable(ARRAY_SIZE(phy->vregs), phy->vregs);
+
+ phy_exit(phy->repeater);
+
+ return 0;
+}
+
+static const struct phy_ops qcom_snps_eusb2_hsphy_ops = {
+ .init = qcom_snps_eusb2_hsphy_init,
+ .exit = qcom_snps_eusb2_hsphy_exit,
+ .set_mode = qcom_snps_eusb2_hsphy_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static int qcom_snps_eusb2_hsphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct qcom_snps_eusb2_hsphy *phy;
+ struct phy_provider *phy_provider;
+ struct phy *generic_phy;
+ int ret, i;
+ int num;
+
+ phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ phy->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(phy->base))
+ return PTR_ERR(phy->base);
+
+ phy->phy_reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(phy->phy_reset))
+ return PTR_ERR(phy->phy_reset);
+
+ phy->ref_clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(phy->ref_clk))
+ return dev_err_probe(dev, PTR_ERR(phy->ref_clk),
+ "failed to get ref clk\n");
+
+ num = ARRAY_SIZE(phy->vregs);
+ for (i = 0; i < num; i++)
+ phy->vregs[i].supply = eusb2_hsphy_vreg_names[i];
+
+ ret = devm_regulator_bulk_get(dev, num, phy->vregs);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get regulator supplies\n");
+
+ phy->repeater = devm_of_phy_get_by_index(dev, np, 0);
+ if (IS_ERR(phy->repeater))
+ return dev_err_probe(dev, PTR_ERR(phy->repeater),
+ "failed to get repeater\n");
+
+ generic_phy = devm_phy_create(dev, NULL, &qcom_snps_eusb2_hsphy_ops);
+ if (IS_ERR(generic_phy)) {
+ dev_err(dev, "failed to create phy %d\n", ret);
+ return PTR_ERR(generic_phy);
+ }
+
+ dev_set_drvdata(dev, phy);
+ phy_set_drvdata(generic_phy, phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ dev_info(dev, "Registered Qcom-eUSB2 phy\n");
+
+ return 0;
+}
+
+static const struct of_device_id qcom_snps_eusb2_hsphy_of_match_table[] = {
+ { .compatible = "qcom,sm8550-snps-eusb2-phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcom_snps_eusb2_hsphy_of_match_table);
+
+static struct platform_driver qcom_snps_eusb2_hsphy_driver = {
+ .probe = qcom_snps_eusb2_hsphy_probe,
+ .driver = {
+ .name = "qcom-snps-eusb2-hsphy",
+ .of_match_table = qcom_snps_eusb2_hsphy_of_match_table,
+ },
+};
+
+module_platform_driver(qcom_snps_eusb2_hsphy_driver);
+MODULE_DESCRIPTION("Qualcomm SNPS eUSB2 HS PHY driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/phy/renesas/r8a779f0-ether-serdes.c b/drivers/phy/renesas/r8a779f0-ether-serdes.c
index e7588a940d69..c5206ef9195b 100644
--- a/drivers/phy/renesas/r8a779f0-ether-serdes.c
+++ b/drivers/phy/renesas/r8a779f0-ether-serdes.c
@@ -18,7 +18,6 @@
#define R8A779F0_ETH_SERDES_BANK_SELECT 0x03fc
#define R8A779F0_ETH_SERDES_TIMEOUT_US 100000
#define R8A779F0_ETH_SERDES_NUM_RETRY_LINKUP 3
-#define R8A779F0_ETH_SERDES_NUM_RETRY_INIT 3
struct r8a779f0_eth_serdes_drv_data;
struct r8a779f0_eth_serdes_channel {
@@ -242,51 +241,46 @@ static int r8a779f0_eth_serdes_hw_init(struct r8a779f0_eth_serdes_channel *chann
if (ret)
return ret;
- ret = r8a779f0_eth_serdes_reg_wait(&dd->channel[0], 0x0000, 0x380, BIT(15), 0);
+ return r8a779f0_eth_serdes_reg_wait(&dd->channel[0], 0x0000, 0x380, BIT(15), 0);
+}
+
+static int r8a779f0_eth_serdes_init(struct phy *p)
+{
+ struct r8a779f0_eth_serdes_channel *channel = phy_get_drvdata(p);
+ int ret;
+
+ ret = r8a779f0_eth_serdes_hw_init(channel);
+ if (!ret)
+ channel->dd->initialized = true;
+
+ return ret;
+}
+
+static int r8a779f0_eth_serdes_hw_init_late(struct r8a779f0_eth_serdes_channel
+*channel)
+{
+ int ret;
+
+ ret = r8a779f0_eth_serdes_chan_setting(channel);
if (ret)
return ret;
- for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) {
- ret = r8a779f0_eth_serdes_chan_setting(&dd->channel[i]);
- if (ret)
- return ret;
- }
-
- for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) {
- ret = r8a779f0_eth_serdes_chan_speed(&dd->channel[i]);
- if (ret)
- return ret;
- }
+ ret = r8a779f0_eth_serdes_chan_speed(channel);
+ if (ret)
+ return ret;
- for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++)
- r8a779f0_eth_serdes_write32(dd->channel[i].addr, 0x03c0, 0x380, 0x0000);
- for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++)
- r8a779f0_eth_serdes_write32(dd->channel[i].addr, 0x03d0, 0x380, 0x0000);
+ r8a779f0_eth_serdes_write32(channel->addr, 0x03c0, 0x380, 0x0000);
- for (i = 0; i < R8A779F0_ETH_SERDES_NUM; i++) {
- ret = r8a779f0_eth_serdes_monitor_linkup(&dd->channel[i]);
- if (ret)
- return ret;
- }
+ r8a779f0_eth_serdes_write32(channel->addr, 0x03d0, 0x380, 0x0000);
- return 0;
+ return r8a779f0_eth_serdes_monitor_linkup(channel);
}
-static int r8a779f0_eth_serdes_init(struct phy *p)
+static int r8a779f0_eth_serdes_power_on(struct phy *p)
{
struct r8a779f0_eth_serdes_channel *channel = phy_get_drvdata(p);
- int i, ret;
- for (i = 0; i < R8A779F0_ETH_SERDES_NUM_RETRY_INIT; i++) {
- ret = r8a779f0_eth_serdes_hw_init(channel);
- if (!ret) {
- channel->dd->initialized = true;
- break;
- }
- usleep_range(1000, 2000);
- }
-
- return ret;
+ return r8a779f0_eth_serdes_hw_init_late(channel);
}
static int r8a779f0_eth_serdes_set_mode(struct phy *p, enum phy_mode mode,
@@ -319,6 +313,7 @@ static int r8a779f0_eth_serdes_set_speed(struct phy *p, int speed)
static const struct phy_ops r8a779f0_eth_serdes_ops = {
.init = r8a779f0_eth_serdes_init,
+ .power_on = r8a779f0_eth_serdes_power_on,
.set_mode = r8a779f0_eth_serdes_set_mode,
.set_speed = r8a779f0_eth_serdes_set_speed,
};
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index d76440ae10ff..39db8acde61a 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -808,9 +808,8 @@ static int tcphy_get_mode(struct rockchip_typec_phy *tcphy)
struct extcon_dev *edev = tcphy->extcon;
union extcon_property_value property;
unsigned int id;
- bool ufp, dp;
u8 mode;
- int ret;
+ int ret, ufp, dp;
if (!edev)
return MODE_DFP_USB;
@@ -821,10 +820,10 @@ static int tcphy_get_mode(struct rockchip_typec_phy *tcphy)
mode = MODE_DFP_USB;
id = EXTCON_USB_HOST;
- if (ufp) {
+ if (ufp > 0) {
mode = MODE_UFP_USB;
id = EXTCON_USB;
- } else if (dp) {
+ } else if (dp > 0) {
mode = MODE_DFP_DP;
id = EXTCON_DISP_DP;
diff --git a/drivers/phy/tegra/Makefile b/drivers/phy/tegra/Makefile
index 89b84067cb4c..eeeea72de117 100644
--- a/drivers/phy/tegra/Makefile
+++ b/drivers/phy/tegra/Makefile
@@ -7,4 +7,5 @@ phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_132_SOC) += xusb-tegra124.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_210_SOC) += xusb-tegra210.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_186_SOC) += xusb-tegra186.o
phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_194_SOC) += xusb-tegra186.o
+phy-tegra-xusb-$(CONFIG_ARCH_TEGRA_234_SOC) += xusb-tegra186.o
obj-$(CONFIG_PHY_TEGRA194_P2U) += phy-tegra194-p2u.o
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 6a8bd87cfdbd..1aae8535f452 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -89,6 +89,11 @@
#define USB2_TRK_START_TIMER(x) (((x) & 0x7f) << 12)
#define USB2_TRK_DONE_RESET_TIMER(x) (((x) & 0x7f) << 19)
#define USB2_PD_TRK BIT(26)
+#define USB2_TRK_COMPLETED BIT(31)
+
+#define XUSB_PADCTL_USB2_BIAS_PAD_CTL2 0x28c
+#define USB2_TRK_HW_MODE BIT(0)
+#define CYA_TRK_CODE_UPDATE_ON_IDLE BIT(31)
#define XUSB_PADCTL_HSIC_PADX_CTL0(x) (0x300 + (x) * 0x20)
#define HSIC_PD_TX_DATA0 BIT(1)
@@ -609,6 +614,32 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl)
value &= ~USB2_PD_TRK;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ if (padctl->soc->poll_trk_completed) {
+ err = padctl_readl_poll(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1,
+ USB2_TRK_COMPLETED, USB2_TRK_COMPLETED, 100);
+ if (err) {
+ /* The failure with polling on trk complete will not
+ * cause the failure of powering on the bias pad.
+ */
+ dev_warn(dev, "failed to poll USB2 trk completed: %d\n", err);
+ }
+
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ value |= USB2_TRK_COMPLETED;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
+ } else {
+ udelay(100);
+ }
+
+ if (padctl->soc->trk_hw_mode) {
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+ value |= USB2_TRK_HW_MODE;
+ value &= ~CYA_TRK_CODE_UPDATE_ON_IDLE;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+ } else {
+ clk_disable_unprepare(priv->usb2_trk_clk);
+ }
+
mutex_unlock(&padctl->lock);
}
@@ -633,7 +664,12 @@ static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl)
value |= USB2_PD_TRK;
padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL1);
- clk_disable_unprepare(priv->usb2_trk_clk);
+ if (padctl->soc->trk_hw_mode) {
+ value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+ value &= ~USB2_TRK_HW_MODE;
+ padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2);
+ clk_disable_unprepare(priv->usb2_trk_clk);
+ }
mutex_unlock(&padctl->lock);
}
@@ -1557,7 +1593,8 @@ const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc = {
EXPORT_SYMBOL_GPL(tegra186_xusb_padctl_soc);
#endif
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) || \
+ IS_ENABLED(CONFIG_ARCH_TEGRA_234_SOC)
static const char * const tegra194_xusb_padctl_supply_names[] = {
"avdd-usb",
"vclamp-usb",
@@ -1613,8 +1650,31 @@ const struct tegra_xusb_padctl_soc tegra194_xusb_padctl_soc = {
.supply_names = tegra194_xusb_padctl_supply_names,
.num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names),
.supports_gen2 = true,
+ .poll_trk_completed = true,
};
EXPORT_SYMBOL_GPL(tegra194_xusb_padctl_soc);
+
+const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc = {
+ .num_pads = ARRAY_SIZE(tegra194_pads),
+ .pads = tegra194_pads,
+ .ports = {
+ .usb2 = {
+ .ops = &tegra186_usb2_port_ops,
+ .count = 4,
+ },
+ .usb3 = {
+ .ops = &tegra186_usb3_port_ops,
+ .count = 4,
+ },
+ },
+ .ops = &tegra186_xusb_padctl_ops,
+ .supply_names = tegra194_xusb_padctl_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names),
+ .supports_gen2 = true,
+ .poll_trk_completed = true,
+ .trk_hw_mode = true,
+};
+EXPORT_SYMBOL_GPL(tegra234_xusb_padctl_soc);
#endif
MODULE_AUTHOR("JC Kuo <jckuo@nvidia.com>");
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index ff4b930879f3..78045bd6c214 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -72,6 +72,12 @@ static const struct of_device_id tegra_xusb_padctl_of_match[] = {
.data = &tegra194_xusb_padctl_soc,
},
#endif
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+ {
+ .compatible = "nvidia,tegra234-xusb-padctl",
+ .data = &tegra234_xusb_padctl_soc,
+ },
+#endif
{ }
};
MODULE_DEVICE_TABLE(of, tegra_xusb_padctl_of_match);
@@ -712,6 +718,22 @@ static int tegra_xusb_setup_usb_role_switch(struct tegra_xusb_port *port)
return err;
}
+static void tegra_xusb_parse_usb_role_default_mode(struct tegra_xusb_port *port)
+{
+ enum usb_role role = USB_ROLE_NONE;
+ enum usb_dr_mode mode = usb_get_role_switch_default_mode(&port->dev);
+
+ if (mode == USB_DR_MODE_HOST)
+ role = USB_ROLE_HOST;
+ else if (mode == USB_DR_MODE_PERIPHERAL)
+ role = USB_ROLE_DEVICE;
+
+ if (role != USB_ROLE_NONE) {
+ usb_role_switch_set_role(port->usb_role_sw, role);
+ dev_dbg(&port->dev, "usb role default mode is %s", modes[mode]);
+ }
+}
+
static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2)
{
struct tegra_xusb_port *port = &usb2->base;
@@ -741,6 +763,7 @@ static int tegra_xusb_usb2_port_parse_dt(struct tegra_xusb_usb2_port *usb2)
err = tegra_xusb_setup_usb_role_switch(port);
if (err < 0)
return err;
+ tegra_xusb_parse_usb_role_default_mode(port);
} else {
dev_err(&port->dev, "usb-role-switch not found for %s mode",
modes[usb2->mode]);
diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h
index c384734a61c2..8bd6cd281119 100644
--- a/drivers/phy/tegra/xusb.h
+++ b/drivers/phy/tegra/xusb.h
@@ -8,6 +8,7 @@
#define __PHY_TEGRA_XUSB_H
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/mutex.h>
#include <linux/workqueue.h>
@@ -431,6 +432,8 @@ struct tegra_xusb_padctl_soc {
unsigned int num_supplies;
bool supports_gen2;
bool need_fake_usb3_port;
+ bool poll_trk_completed;
+ bool trk_hw_mode;
};
struct tegra_xusb_padctl {
@@ -473,6 +476,23 @@ static inline u32 padctl_readl(struct tegra_xusb_padctl *padctl,
return value;
}
+static inline u32 padctl_readl_poll(struct tegra_xusb_padctl *padctl,
+ unsigned long offset, u32 val, u32 mask,
+ int us)
+{
+ u32 regval;
+ int err;
+
+ err = readl_poll_timeout(padctl->regs + offset, regval,
+ (regval & mask) == val, 1, us);
+ if (err) {
+ dev_err(padctl->dev, "%08lx poll timeout > %08x\n", offset,
+ regval);
+ }
+
+ return err;
+}
+
struct tegra_xusb_lane *tegra_xusb_find_lane(struct tegra_xusb_padctl *padctl,
const char *name,
unsigned int index);
@@ -489,5 +509,8 @@ extern const struct tegra_xusb_padctl_soc tegra186_xusb_padctl_soc;
#if defined(CONFIG_ARCH_TEGRA_194_SOC)
extern const struct tegra_xusb_padctl_soc tegra194_xusb_padctl_soc;
#endif
+#if defined(CONFIG_ARCH_TEGRA_234_SOC)
+extern const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc;
+#endif
#endif /* __PHY_TEGRA_XUSB_H */
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index ddce5ef7711c..1b83c98a78f0 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -58,6 +58,14 @@ enum wiz_lane_standard_mode {
LANE_MODE_GEN4,
};
+/*
+ * List of master lanes used for lane swapping
+ */
+enum wiz_typec_master_lane {
+ LANE0 = 0,
+ LANE2 = 2,
+};
+
enum wiz_refclk_mux_sel {
PLL0_REFCLK,
PLL1_REFCLK,
@@ -194,6 +202,9 @@ static const struct reg_field p_mac_div_sel1[WIZ_MAX_LANES] = {
static const struct reg_field typec_ln10_swap =
REG_FIELD(WIZ_SERDES_TYPEC, 30, 30);
+static const struct reg_field typec_ln23_swap =
+ REG_FIELD(WIZ_SERDES_TYPEC, 31, 31);
+
struct wiz_clk_mux {
struct clk_hw hw;
struct regmap_field *field;
@@ -367,6 +378,7 @@ struct wiz {
struct regmap_field *mux_sel_field[WIZ_MUX_NUM_CLOCKS];
struct regmap_field *div_sel_field[WIZ_DIV_NUM_CLOCKS_16G];
struct regmap_field *typec_ln10_swap;
+ struct regmap_field *typec_ln23_swap;
struct regmap_field *sup_legacy_clk_override;
struct device *dev;
@@ -376,6 +388,7 @@ struct wiz {
struct gpio_desc *gpio_typec_dir;
int typec_dir_delay;
u32 lane_phy_type[WIZ_MAX_LANES];
+ u32 master_lane_num[WIZ_MAX_LANES];
struct clk *input_clks[WIZ_MAX_INPUT_CLOCKS];
struct clk *output_clks[WIZ_MAX_OUTPUT_CLOCKS];
struct clk_onecell_data clk_data;
@@ -675,6 +688,13 @@ static int wiz_regfield_init(struct wiz *wiz)
return PTR_ERR(wiz->typec_ln10_swap);
}
+ wiz->typec_ln23_swap = devm_regmap_field_alloc(dev, regmap,
+ typec_ln23_swap);
+ if (IS_ERR(wiz->typec_ln23_swap)) {
+ dev_err(dev, "LN23_SWAP reg field init failed\n");
+ return PTR_ERR(wiz->typec_ln23_swap);
+ }
+
wiz->phy_en_refclk = devm_regmap_field_alloc(dev, regmap, phy_en_refclk);
if (IS_ERR(wiz->phy_en_refclk)) {
dev_err(dev, "PHY_EN_REFCLK reg field init failed\n");
@@ -1234,15 +1254,39 @@ static int wiz_phy_reset_deassert(struct reset_controller_dev *rcdev,
struct wiz *wiz = dev_get_drvdata(dev);
int ret;
- /* if typec-dir gpio was specified, set LN10 SWAP bit based on that */
- if (id == 0 && wiz->gpio_typec_dir) {
- if (wiz->typec_dir_delay)
- msleep_interruptible(wiz->typec_dir_delay);
-
- if (gpiod_get_value_cansleep(wiz->gpio_typec_dir))
- regmap_field_write(wiz->typec_ln10_swap, 1);
- else
- regmap_field_write(wiz->typec_ln10_swap, 0);
+ if (id == 0) {
+ /* if typec-dir gpio was specified, set LN10 SWAP bit based on that */
+ if (wiz->gpio_typec_dir) {
+ if (wiz->typec_dir_delay)
+ msleep_interruptible(wiz->typec_dir_delay);
+
+ if (gpiod_get_value_cansleep(wiz->gpio_typec_dir))
+ regmap_field_write(wiz->typec_ln10_swap, 1);
+ else
+ regmap_field_write(wiz->typec_ln10_swap, 0);
+ } else {
+ /* if no typec-dir gpio is specified and PHY type is USB3
+ * with master lane number is '0' or '2', then set LN10 or
+ * LN23 SWAP bit to '1' respectively.
+ */
+ u32 num_lanes = wiz->num_lanes;
+ int i;
+
+ for (i = 0; i < num_lanes; i++) {
+ if (wiz->lane_phy_type[i] == PHY_TYPE_USB3) {
+ switch (wiz->master_lane_num[i]) {
+ case LANE0:
+ regmap_field_write(wiz->typec_ln10_swap, 1);
+ break;
+ case LANE2:
+ regmap_field_write(wiz->typec_ln23_swap, 1);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ }
}
if (id == 0) {
@@ -1386,8 +1430,10 @@ static int wiz_get_lane_phy_types(struct device *dev, struct wiz *wiz)
dev_dbg(dev, "%s: Lanes %u-%u have phy-type %u\n", __func__,
reg, reg + num_lanes - 1, phy_type);
- for (i = reg; i < reg + num_lanes; i++)
+ for (i = reg; i < reg + num_lanes; i++) {
+ wiz->master_lane_num[i] = reg;
wiz->lane_phy_type[i] = phy_type;
+ }
}
return 0;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 7d5f5458c72e..dcb53c4a9584 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -172,7 +172,7 @@ config PINCTRL_DA9062
config PINCTRL_DIGICOLOR
bool
- depends on OF && (ARCH_DIGICOLOR || COMPILE_TEST)
+ depends on ARCH_DIGICOLOR || COMPILE_TEST
select PINMUX
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
index 3945612900e6..9c6ee46ac7a0 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
@@ -93,10 +93,19 @@ static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx,
static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx,
const struct aspeed_sig_expr *expr)
{
+ int ret;
+
pr_debug("Disabling signal %s for %s\n", expr->signal,
expr->function);
- return aspeed_sig_expr_set(ctx, expr, false);
+ ret = aspeed_sig_expr_eval(ctx, expr, true);
+ if (ret < 0)
+ return ret;
+
+ if (ret)
+ return aspeed_sig_expr_set(ctx, expr, false);
+
+ return 0;
}
/**
@@ -114,7 +123,7 @@ static int aspeed_disable_sig(struct aspeed_pinmux_data *ctx,
int ret = 0;
if (!exprs)
- return true;
+ return -EINVAL;
while (*exprs && !ret) {
ret = aspeed_sig_expr_disable(ctx, *exprs);
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 7857e612a100..8e2551a08c37 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -358,13 +358,11 @@ static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
-static int bcm2835_of_gpio_ranges_fallback(struct gpio_chip *gc,
- struct device_node *np)
+static int bcm2835_add_pin_ranges_fallback(struct gpio_chip *gc)
{
+ struct device_node *np = dev_of_node(gc->parent);
struct pinctrl_dev *pctldev = of_pinctrl_get(np);
- of_node_put(np);
-
if (!pctldev)
return 0;
@@ -388,7 +386,7 @@ static const struct gpio_chip bcm2835_gpio_chip = {
.base = -1,
.ngpio = BCM2835_NUM_GPIOS,
.can_sleep = false,
- .of_gpio_ranges_fallback = bcm2835_of_gpio_ranges_fallback,
+ .add_pin_ranges = bcm2835_add_pin_ranges_fallback,
};
static const struct gpio_chip bcm2711_gpio_chip = {
@@ -405,7 +403,7 @@ static const struct gpio_chip bcm2711_gpio_chip = {
.base = -1,
.ngpio = BCM2711_NUM_GPIOS,
.can_sleep = false,
- .of_gpio_ranges_fallback = bcm2835_of_gpio_ranges_fallback,
+ .add_pin_ranges = bcm2835_add_pin_ranges_fallback,
};
static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc,
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 41fd84738707..d6e6c751255f 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -325,7 +325,12 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio)
{
struct pinctrl_dev *pctldev;
struct pinctrl_gpio_range *range = NULL;
- struct gpio_chip *chip = gpio_to_chip(gpio);
+ /*
+ * FIXME: "gpio" here is a number in the global GPIO numberspace.
+ * get rid of this from the ranges eventually and get the GPIO
+ * descriptor from the gpio_chip.
+ */
+ struct gpio_chip *chip = gpiod_to_chip(gpio_to_desc(gpio));
if (WARN(!chip, "no gpio_chip for gpio%i?", gpio))
return false;
@@ -1657,7 +1662,12 @@ static int pinctrl_pins_show(struct seq_file *s, void *what)
}
}
if (gpio_num >= 0)
- chip = gpio_to_chip(gpio_num);
+ /*
+ * FIXME: gpio_num comes from the global GPIO numberspace.
+ * we need to get rid of the range->base eventually and
+ * get the descriptor directly from the gpio_chip.
+ */
+ chip = gpiod_to_chip(gpio_to_desc(gpio_num));
else
chip = NULL;
if (chip)
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index 9f78c9b29ddd..cf3f4d2e0c16 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -269,9 +269,9 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
for (n = 0; n < num_configs; n++) {
config = configs[n];
- ma = CONFIG_TO_MA(config);
- vol = CONFIG_TO_VOL(config);
- pull = CONFIG_TO_PULL(config);
+ ma = PIN_CONFIG_TO_MA(config);
+ vol = PIN_CONFIG_TO_VOL(config);
+ pull = PIN_CONFIG_TO_PULL(config);
for (i = 0; i < g->npins; i++) {
bank = PINID_TO_BANK(g->pins[i]);
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.h b/drivers/pinctrl/freescale/pinctrl-mxs.h
index ab9f834b03e6..5b26511d56aa 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.h
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.h
@@ -44,9 +44,9 @@
#define VOL_SHIFT 3
#define MA_PRESENT (1 << 2)
#define MA_SHIFT 0
-#define CONFIG_TO_PULL(c) ((c) >> PULL_SHIFT & 0x1)
-#define CONFIG_TO_VOL(c) ((c) >> VOL_SHIFT & 0x1)
-#define CONFIG_TO_MA(c) ((c) >> MA_SHIFT & 0x3)
+#define PIN_CONFIG_TO_PULL(c) ((c) >> PULL_SHIFT & 0x1)
+#define PIN_CONFIG_TO_VOL(c) ((c) >> VOL_SHIFT & 0x1)
+#define PIN_CONFIG_TO_MA(c) ((c) >> MA_SHIFT & 0x3)
struct mxs_function {
const char *name;
diff --git a/drivers/pinctrl/intel/pinctrl-alderlake.c b/drivers/pinctrl/intel/pinctrl-alderlake.c
index 427febe09b69..55bbfd647ba4 100644
--- a/drivers/pinctrl/intel/pinctrl-alderlake.c
+++ b/drivers/pinctrl/intel/pinctrl-alderlake.c
@@ -34,25 +34,11 @@
.gpio_base = (g), \
}
-#define ADL_COMMUNITY(b, s, e, g, v) \
- { \
- .barno = (b), \
- .padown_offset = ADL_##v##_PAD_OWN, \
- .padcfglock_offset = ADL_##v##_PADCFGLOCK, \
- .hostown_offset = ADL_##v##_HOSTSW_OWN, \
- .is_offset = ADL_##v##_GPI_IS, \
- .ie_offset = ADL_##v##_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
-
#define ADL_N_COMMUNITY(b, s, e, g) \
- ADL_COMMUNITY(b, s, e, g, N)
+ INTEL_COMMUNITY_GPPS(b, s, e, g, ADL_N)
#define ADL_S_COMMUNITY(b, s, e, g) \
- ADL_COMMUNITY(b, s, e, g, S)
+ INTEL_COMMUNITY_GPPS(b, s, e, g, ADL_S)
/* Alder Lake-N */
static const struct pinctrl_pin_desc adln_pins[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 67db79f38051..770a2723ef81 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -637,18 +637,18 @@ static const char *byt_get_function_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- return vg->soc->functions[selector].name;
+ return vg->soc->functions[selector].func.name;
}
static int byt_get_function_groups(struct pinctrl_dev *pctldev,
unsigned int selector,
const char * const **groups,
- unsigned int *num_groups)
+ unsigned int *ngroups)
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctldev);
- *groups = vg->soc->functions[selector].groups;
- *num_groups = vg->soc->functions[selector].ngroups;
+ *groups = vg->soc->functions[selector].func.groups;
+ *ngroups = vg->soc->functions[selector].func.ngroups;
return 0;
}
@@ -722,7 +722,7 @@ static int byt_set_mux(struct pinctrl_dev *pctldev, unsigned int func_selector,
if (group.modes)
byt_set_group_mixed_mux(vg, group, group.modes);
- else if (!strcmp(func.name, "gpio"))
+ else if (!strcmp(func.func.name, "gpio"))
byt_set_group_simple_mux(vg, group, BYT_DEFAULT_GPIO_MUX);
else
byt_set_group_simple_mux(vg, group, group.mode);
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index fb15cd10a32f..77e921b2178d 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -20,17 +20,8 @@
#define BXT_GPI_IS 0x100
#define BXT_GPI_IE 0x110
-#define BXT_COMMUNITY(s, e) \
- { \
- .padown_offset = BXT_PAD_OWN, \
- .padcfglock_offset = BXT_PADCFGLOCK, \
- .hostown_offset = BXT_HOSTSW_OWN, \
- .is_offset = BXT_GPI_IS, \
- .ie_offset = BXT_GPI_IE, \
- .gpp_size = 32, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- }
+#define BXT_COMMUNITY(b, s, e) \
+ INTEL_COMMUNITY_SIZE(b, s, e, 32, 4, BXT)
/* BXT */
static const struct pinctrl_pin_desc bxt_north_pins[] = {
@@ -172,7 +163,7 @@ static const struct intel_function bxt_north_functions[] = {
};
static const struct intel_community bxt_north_communities[] = {
- BXT_COMMUNITY(0, 82),
+ BXT_COMMUNITY(0, 0, 82),
};
static const struct intel_pinctrl_soc_data bxt_north_soc_data = {
@@ -289,7 +280,7 @@ static const struct intel_function bxt_northwest_functions[] = {
};
static const struct intel_community bxt_northwest_communities[] = {
- BXT_COMMUNITY(0, 71),
+ BXT_COMMUNITY(0, 0, 71),
};
static const struct intel_pinctrl_soc_data bxt_northwest_soc_data = {
@@ -396,7 +387,7 @@ static const struct intel_function bxt_west_functions[] = {
};
static const struct intel_community bxt_west_communities[] = {
- BXT_COMMUNITY(0, 41),
+ BXT_COMMUNITY(0, 0, 41),
};
static const struct intel_pinctrl_soc_data bxt_west_soc_data = {
@@ -472,7 +463,7 @@ static const struct intel_function bxt_southwest_functions[] = {
};
static const struct intel_community bxt_southwest_communities[] = {
- BXT_COMMUNITY(0, 30),
+ BXT_COMMUNITY(0, 0, 30),
};
static const struct intel_pinctrl_soc_data bxt_southwest_soc_data = {
@@ -511,7 +502,7 @@ static const struct pinctrl_pin_desc bxt_south_pins[] = {
};
static const struct intel_community bxt_south_communities[] = {
- BXT_COMMUNITY(0, 19),
+ BXT_COMMUNITY(0, 0, 19),
};
static const struct intel_pinctrl_soc_data bxt_south_soc_data = {
@@ -650,7 +641,7 @@ static const struct intel_function apl_north_functions[] = {
};
static const struct intel_community apl_north_communities[] = {
- BXT_COMMUNITY(0, 77),
+ BXT_COMMUNITY(0, 0, 77),
};
static const struct intel_pinctrl_soc_data apl_north_soc_data = {
@@ -770,7 +761,7 @@ static const struct intel_function apl_northwest_functions[] = {
};
static const struct intel_community apl_northwest_communities[] = {
- BXT_COMMUNITY(0, 76),
+ BXT_COMMUNITY(0, 0, 76),
};
static const struct intel_pinctrl_soc_data apl_northwest_soc_data = {
@@ -880,7 +871,7 @@ static const struct intel_function apl_west_functions[] = {
};
static const struct intel_community apl_west_communities[] = {
- BXT_COMMUNITY(0, 46),
+ BXT_COMMUNITY(0, 0, 46),
};
static const struct intel_pinctrl_soc_data apl_west_soc_data = {
@@ -972,7 +963,7 @@ static const struct intel_function apl_southwest_functions[] = {
};
static const struct intel_community apl_southwest_communities[] = {
- BXT_COMMUNITY(0, 42),
+ BXT_COMMUNITY(0, 0, 42),
};
static const struct intel_pinctrl_soc_data apl_southwest_soc_data = {
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index f8a8b9b14de9..88142ec57b25 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -15,12 +15,17 @@
#include "pinctrl-intel.h"
-#define CNL_PAD_OWN 0x020
-#define CNL_PADCFGLOCK 0x080
+#define CNL_LP_PAD_OWN 0x020
+#define CNL_LP_PADCFGLOCK 0x080
#define CNL_LP_HOSTSW_OWN 0x0b0
+#define CNL_LP_GPI_IS 0x100
+#define CNL_LP_GPI_IE 0x120
+
+#define CNL_H_PAD_OWN 0x020
+#define CNL_H_PADCFGLOCK 0x080
#define CNL_H_HOSTSW_OWN 0x0c0
-#define CNL_GPI_IS 0x100
-#define CNL_GPI_IE 0x120
+#define CNL_H_GPI_IS 0x100
+#define CNL_H_GPI_IE 0x120
#define CNL_GPP(r, s, e, g) \
{ \
@@ -30,25 +35,11 @@
.gpio_base = (g), \
}
-#define CNL_COMMUNITY(b, s, e, g, v) \
- { \
- .barno = (b), \
- .padown_offset = CNL_PAD_OWN, \
- .padcfglock_offset = CNL_PADCFGLOCK, \
- .hostown_offset = CNL_##v##_HOSTSW_OWN, \
- .is_offset = CNL_GPI_IS, \
- .ie_offset = CNL_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
-
#define CNL_LP_COMMUNITY(b, s, e, g) \
- CNL_COMMUNITY(b, s, e, g, LP)
+ INTEL_COMMUNITY_GPPS(b, s, e, g, CNL_LP)
#define CNL_H_COMMUNITY(b, s, e, g) \
- CNL_COMMUNITY(b, s, e, g, H)
+ INTEL_COMMUNITY_GPPS(b, s, e, g, CNL_H)
/* Cannon Lake-H */
static const struct pinctrl_pin_desc cnlh_pins[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-cedarfork.c b/drivers/pinctrl/intel/pinctrl-cedarfork.c
index aa6f9040d3d8..2ab52b1fbc59 100644
--- a/drivers/pinctrl/intel/pinctrl-cedarfork.c
+++ b/drivers/pinctrl/intel/pinctrl-cedarfork.c
@@ -28,18 +28,7 @@
}
#define CDF_COMMUNITY(b, s, e, g) \
- { \
- .barno = (b), \
- .padown_offset = CDF_PAD_OWN, \
- .padcfglock_offset = CDF_PADCFGLOCK, \
- .hostown_offset = CDF_HOSTSW_OWN, \
- .is_offset = CDF_GPI_IS, \
- .ie_offset = CDF_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
+ INTEL_COMMUNITY_GPPS(b, s, e, g, CDF)
/* Cedar Fork PCH */
static const struct pinctrl_pin_desc cdf_pins[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 11b81213922d..722990e27836 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -694,7 +694,7 @@ static const char *chv_get_function_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->soc->functions[function].name;
+ return pctrl->soc->functions[function].func.name;
}
static int chv_get_function_groups(struct pinctrl_dev *pctldev,
@@ -704,8 +704,8 @@ static int chv_get_function_groups(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- *groups = pctrl->soc->functions[function].groups;
- *ngroups = pctrl->soc->functions[function].ngroups;
+ *groups = pctrl->soc->functions[function].func.groups;
+ *ngroups = pctrl->soc->functions[function].func.ngroups;
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-denverton.c b/drivers/pinctrl/intel/pinctrl-denverton.c
index f26d030b9b41..c1a9db091c6e 100644
--- a/drivers/pinctrl/intel/pinctrl-denverton.c
+++ b/drivers/pinctrl/intel/pinctrl-denverton.c
@@ -28,18 +28,7 @@
}
#define DNV_COMMUNITY(b, s, e, g) \
- { \
- .barno = (b), \
- .padown_offset = DNV_PAD_OWN, \
- .padcfglock_offset = DNV_PADCFGLOCK, \
- .hostown_offset = DNV_HOSTSW_OWN, \
- .is_offset = DNV_GPI_IS, \
- .ie_offset = DNV_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
+ INTEL_COMMUNITY_GPPS(b, s, e, g, DNV)
/* Denverton */
static const struct pinctrl_pin_desc dnv_pins[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-elkhartlake.c b/drivers/pinctrl/intel/pinctrl-elkhartlake.c
index 4702bdfa10e3..64b1997df0be 100644
--- a/drivers/pinctrl/intel/pinctrl-elkhartlake.c
+++ b/drivers/pinctrl/intel/pinctrl-elkhartlake.c
@@ -27,18 +27,8 @@
.size = ((e) - (s) + 1), \
}
-#define EHL_COMMUNITY(s, e, g) \
- { \
- .padown_offset = EHL_PAD_OWN, \
- .padcfglock_offset = EHL_PADCFGLOCK, \
- .hostown_offset = EHL_HOSTSW_OWN, \
- .is_offset = EHL_GPI_IS, \
- .ie_offset = EHL_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
+#define EHL_COMMUNITY(b, s, e, g) \
+ INTEL_COMMUNITY_GPPS(b, s, e, g, EHL)
/* Elkhart Lake */
static const struct pinctrl_pin_desc ehl_community0_pins[] = {
@@ -121,7 +111,7 @@ static const struct intel_padgroup ehl_community0_gpps[] = {
};
static const struct intel_community ehl_community0[] = {
- EHL_COMMUNITY(0, 66, ehl_community0_gpps),
+ EHL_COMMUNITY(0, 0, 66, ehl_community0_gpps),
};
static const struct intel_pinctrl_soc_data ehl_community0_soc_data = {
@@ -262,7 +252,7 @@ static const struct intel_padgroup ehl_community1_gpps[] = {
};
static const struct intel_community ehl_community1[] = {
- EHL_COMMUNITY(0, 112, ehl_community1_gpps),
+ EHL_COMMUNITY(0, 0, 112, ehl_community1_gpps),
};
static const struct intel_pinctrl_soc_data ehl_community1_soc_data = {
@@ -335,7 +325,7 @@ static const struct intel_padgroup ehl_community3_gpps[] = {
};
static const struct intel_community ehl_community3[] = {
- EHL_COMMUNITY(0, 46, ehl_community3_gpps),
+ EHL_COMMUNITY(0, 0, 46, ehl_community3_gpps),
};
static const struct intel_pinctrl_soc_data ehl_community3_soc_data = {
@@ -441,7 +431,7 @@ static const struct intel_padgroup ehl_community4_gpps[] = {
};
static const struct intel_community ehl_community4[] = {
- EHL_COMMUNITY(0, 79, ehl_community4_gpps),
+ EHL_COMMUNITY(0, 0, 79, ehl_community4_gpps),
};
static const struct intel_pinctrl_soc_data ehl_community4_soc_data = {
@@ -469,7 +459,7 @@ static const struct intel_padgroup ehl_community5_gpps[] = {
};
static const struct intel_community ehl_community5[] = {
- EHL_COMMUNITY(0, 7, ehl_community5_gpps),
+ EHL_COMMUNITY(0, 0, 7, ehl_community5_gpps),
};
static const struct intel_pinctrl_soc_data ehl_community5_soc_data = {
diff --git a/drivers/pinctrl/intel/pinctrl-emmitsburg.c b/drivers/pinctrl/intel/pinctrl-emmitsburg.c
index f6114dbf7520..cc8f0baabc91 100644
--- a/drivers/pinctrl/intel/pinctrl-emmitsburg.c
+++ b/drivers/pinctrl/intel/pinctrl-emmitsburg.c
@@ -28,18 +28,7 @@
}
#define EBG_COMMUNITY(b, s, e, g) \
- { \
- .barno = (b), \
- .padown_offset = EBG_PAD_OWN, \
- .padcfglock_offset = EBG_PADCFGLOCK, \
- .hostown_offset = EBG_HOSTSW_OWN, \
- .is_offset = EBG_GPI_IS, \
- .ie_offset = EBG_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
+ INTEL_COMMUNITY_GPPS(b, s, e, g, EBG)
/* Emmitsburg */
static const struct pinctrl_pin_desc ebg_pins[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-geminilake.c b/drivers/pinctrl/intel/pinctrl-geminilake.c
index df02028b40f3..918cc9f261cf 100644
--- a/drivers/pinctrl/intel/pinctrl-geminilake.c
+++ b/drivers/pinctrl/intel/pinctrl-geminilake.c
@@ -20,17 +20,8 @@
#define GLK_GPI_IS 0x100
#define GLK_GPI_IE 0x110
-#define GLK_COMMUNITY(s, e) \
- { \
- .padown_offset = GLK_PAD_OWN, \
- .padcfglock_offset = GLK_PADCFGLOCK, \
- .hostown_offset = GLK_HOSTSW_OWN, \
- .is_offset = GLK_GPI_IS, \
- .ie_offset = GLK_GPI_IE, \
- .gpp_size = 32, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- }
+#define GLK_COMMUNITY(b, s, e) \
+ INTEL_COMMUNITY_SIZE(b, s, e, 32, 4, GLK)
/* GLK */
static const struct pinctrl_pin_desc glk_northwest_pins[] = {
@@ -173,7 +164,7 @@ static const struct intel_function glk_northwest_functions[] = {
};
static const struct intel_community glk_northwest_communities[] = {
- GLK_COMMUNITY(0, 79),
+ GLK_COMMUNITY(0, 0, 79),
};
static const struct intel_pinctrl_soc_data glk_northwest_soc_data = {
@@ -306,7 +297,7 @@ static const struct intel_function glk_north_functions[] = {
};
static const struct intel_community glk_north_communities[] = {
- GLK_COMMUNITY(0, 79),
+ GLK_COMMUNITY(0, 0, 79),
};
static const struct intel_pinctrl_soc_data glk_north_soc_data = {
@@ -345,7 +336,7 @@ static const struct pinctrl_pin_desc glk_audio_pins[] = {
};
static const struct intel_community glk_audio_communities[] = {
- GLK_COMMUNITY(0, 19),
+ GLK_COMMUNITY(0, 0, 19),
};
static const struct intel_pinctrl_soc_data glk_audio_soc_data = {
@@ -427,7 +418,7 @@ static const struct intel_function glk_scc_functions[] = {
};
static const struct intel_community glk_scc_communities[] = {
- GLK_COMMUNITY(0, 34),
+ GLK_COMMUNITY(0, 0, 34),
};
static const struct intel_pinctrl_soc_data glk_scc_soc_data = {
diff --git a/drivers/pinctrl/intel/pinctrl-icelake.c b/drivers/pinctrl/intel/pinctrl-icelake.c
index 84a56d9ae47e..1c64b4a1c491 100644
--- a/drivers/pinctrl/intel/pinctrl-icelake.c
+++ b/drivers/pinctrl/intel/pinctrl-icelake.c
@@ -15,12 +15,17 @@
#include "pinctrl-intel.h"
-#define ICL_PAD_OWN 0x020
-#define ICL_PADCFGLOCK 0x080
-#define ICL_HOSTSW_OWN 0x0b0
-#define ICL_GPI_IS 0x100
-#define ICL_LP_GPI_IE 0x110
-#define ICL_N_GPI_IE 0x120
+#define ICL_LP_PAD_OWN 0x020
+#define ICL_LP_PADCFGLOCK 0x080
+#define ICL_LP_HOSTSW_OWN 0x0b0
+#define ICL_LP_GPI_IS 0x100
+#define ICL_LP_GPI_IE 0x110
+
+#define ICL_N_PAD_OWN 0x020
+#define ICL_N_PADCFGLOCK 0x080
+#define ICL_N_HOSTSW_OWN 0x0b0
+#define ICL_N_GPI_IS 0x100
+#define ICL_N_GPI_IE 0x120
#define ICL_GPP(r, s, e, g) \
{ \
@@ -30,25 +35,11 @@
.gpio_base = (g), \
}
-#define ICL_COMMUNITY(b, s, e, g, v) \
- { \
- .barno = (b), \
- .padown_offset = ICL_PAD_OWN, \
- .padcfglock_offset = ICL_PADCFGLOCK, \
- .hostown_offset = ICL_HOSTSW_OWN, \
- .is_offset = ICL_GPI_IS, \
- .ie_offset = ICL_##v##_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
-
#define ICL_LP_COMMUNITY(b, s, e, g) \
- ICL_COMMUNITY(b, s, e, g, LP)
+ INTEL_COMMUNITY_GPPS(b, s, e, g, ICL_LP)
#define ICL_N_COMMUNITY(b, s, e, g) \
- ICL_COMMUNITY(b, s, e, g, N)
+ INTEL_COMMUNITY_GPPS(b, s, e, g, ICL_N)
/* Ice Lake-LP */
static const struct pinctrl_pin_desc icllp_pins[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index cc3aaba24188..c7a71c49df0a 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -81,13 +81,16 @@
#define PADCFG1_TERM_MASK GENMASK(12, 10)
#define PADCFG1_TERM_20K BIT(2)
#define PADCFG1_TERM_5K BIT(1)
+#define PADCFG1_TERM_4K (BIT(2) | BIT(1))
#define PADCFG1_TERM_1K BIT(0)
+#define PADCFG1_TERM_952 (BIT(2) | BIT(0))
#define PADCFG1_TERM_833 (BIT(1) | BIT(0))
+#define PADCFG1_TERM_800 (BIT(2) | BIT(1) | BIT(0))
#define PADCFG2 0x008
-#define PADCFG2_DEBEN BIT(0)
#define PADCFG2_DEBOUNCE_SHIFT 1
#define PADCFG2_DEBOUNCE_MASK GENMASK(4, 1)
+#define PADCFG2_DEBEN BIT(0)
#define DEBOUNCE_PERIOD_NSEC 31250
@@ -369,7 +372,7 @@ static const char *intel_get_function_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- return pctrl->soc->functions[function].name;
+ return pctrl->soc->functions[function].func.name;
}
static int intel_get_function_groups(struct pinctrl_dev *pctldev,
@@ -379,8 +382,8 @@ static int intel_get_function_groups(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
- *groups = pctrl->soc->functions[function].groups;
- *ngroups = pctrl->soc->functions[function].ngroups;
+ *groups = pctrl->soc->functions[function].func.groups;
+ *ngroups = pctrl->soc->functions[function].func.ngroups;
return 0;
}
@@ -574,6 +577,9 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
case PADCFG1_TERM_1K:
*arg = 1000;
break;
+ case PADCFG1_TERM_4K:
+ *arg = 4000;
+ break;
case PADCFG1_TERM_5K:
*arg = 5000;
break;
@@ -599,6 +605,9 @@ static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin,
return -EINVAL;
*arg = 1000;
break;
+ case PADCFG1_TERM_4K:
+ *arg = 4000;
+ break;
case PADCFG1_TERM_5K:
*arg = 5000;
break;
@@ -691,21 +700,17 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
raw_spin_lock_irqsave(&pctrl->lock, flags);
value = readl(padcfg1);
+ value &= ~(PADCFG1_TERM_MASK | PADCFG1_TERM_UP);
+
+ /* Set default strength value in case none is given */
+ if (arg == 1)
+ arg = 5000;
switch (param) {
case PIN_CONFIG_BIAS_DISABLE:
- value &= ~(PADCFG1_TERM_MASK | PADCFG1_TERM_UP);
break;
case PIN_CONFIG_BIAS_PULL_UP:
- value &= ~PADCFG1_TERM_MASK;
-
- value |= PADCFG1_TERM_UP;
-
- /* Set default strength value in case none is given */
- if (arg == 1)
- arg = 5000;
-
switch (arg) {
case 20000:
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
@@ -713,6 +718,9 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
case 5000:
value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
break;
+ case 4000:
+ value |= PADCFG1_TERM_4K << PADCFG1_TERM_SHIFT;
+ break;
case 1000:
value |= PADCFG1_TERM_1K << PADCFG1_TERM_SHIFT;
break;
@@ -721,17 +729,13 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
break;
default:
ret = -EINVAL;
+ break;
}
+ value |= PADCFG1_TERM_UP;
break;
case PIN_CONFIG_BIAS_PULL_DOWN:
- value &= ~(PADCFG1_TERM_UP | PADCFG1_TERM_MASK);
-
- /* Set default strength value in case none is given */
- if (arg == 1)
- arg = 5000;
-
switch (arg) {
case 20000:
value |= PADCFG1_TERM_20K << PADCFG1_TERM_SHIFT;
@@ -739,6 +743,9 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
case 5000:
value |= PADCFG1_TERM_5K << PADCFG1_TERM_SHIFT;
break;
+ case 4000:
+ value |= PADCFG1_TERM_4K << PADCFG1_TERM_SHIFT;
+ break;
case 1000:
if (!(community->features & PINCTRL_FEATURE_1K_PD)) {
ret = -EINVAL;
@@ -755,9 +762,14 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin,
break;
default:
ret = -EINVAL;
+ break;
}
break;
+
+ default:
+ ret = -EINVAL;
+ break;
}
if (!ret)
@@ -1215,13 +1227,8 @@ static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
/* Only interrupts that are enabled */
pending &= enabled;
- for_each_set_bit(gpp_offset, &pending, padgrp->size) {
- unsigned int irq;
-
- irq = irq_find_mapping(gc->irq.domain,
- padgrp->gpio_base + gpp_offset);
- generic_handle_irq(irq);
- }
+ for_each_set_bit(gpp_offset, &pending, padgrp->size)
+ generic_handle_domain_irq(gc->irq.domain, padgrp->gpio_base + gpp_offset);
ret += pending ? 1 : 0;
}
@@ -1399,7 +1406,7 @@ static int intel_pinctrl_add_padgroups_by_gpps(struct intel_pinctrl *pctrl,
for (i = 0; i < ngpps; i++) {
gpps[i] = community->gpps[i];
- if (gpps[i].size > 32)
+ if (gpps[i].size > INTEL_PINCTRL_MAX_GPP_SIZE)
return -EINVAL;
/* Special treatment for GPIO base */
@@ -1417,7 +1424,7 @@ static int intel_pinctrl_add_padgroups_by_gpps(struct intel_pinctrl *pctrl,
}
gpps[i].padown_num = padown_num;
- padown_num += DIV_ROUND_UP(gpps[i].size * 4, 32);
+ padown_num += DIV_ROUND_UP(gpps[i].size * 4, INTEL_PINCTRL_MAX_GPP_SIZE);
}
community->gpps = gpps;
@@ -1433,7 +1440,7 @@ static int intel_pinctrl_add_padgroups_by_size(struct intel_pinctrl *pctrl,
unsigned int padown_num = 0;
size_t i, ngpps = DIV_ROUND_UP(npins, community->gpp_size);
- if (community->gpp_size > 32)
+ if (community->gpp_size > INTEL_PINCTRL_MAX_GPP_SIZE)
return -EINVAL;
gpps = devm_kcalloc(pctrl->dev, ngpps, sizeof(*gpps), GFP_KERNEL);
@@ -1451,14 +1458,7 @@ static int intel_pinctrl_add_padgroups_by_size(struct intel_pinctrl *pctrl,
gpps[i].gpio_base = gpps[i].base;
gpps[i].padown_num = padown_num;
- /*
- * In older hardware the number of padown registers per
- * group is fixed regardless of the group size.
- */
- if (community->gpp_num_padown_regs)
- padown_num += community->gpp_num_padown_regs;
- else
- padown_num += DIV_ROUND_UP(gpps[i].size * 4, 32);
+ padown_num += community->gpp_num_padown_regs;
}
community->ngpps = ngpps;
@@ -1709,6 +1709,12 @@ const struct intel_pinctrl_soc_data *intel_pinctrl_get_soc_data(struct platform_
EXPORT_SYMBOL_GPL(intel_pinctrl_get_soc_data);
#ifdef CONFIG_PM_SLEEP
+static bool __intel_gpio_is_direct_irq(u32 value)
+{
+ return (value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
+ (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO);
+}
+
static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int pin)
{
const struct pin_desc *pd = pin_desc_get(pctrl->pctldev, pin);
@@ -1742,8 +1748,7 @@ static bool intel_pinctrl_should_save(struct intel_pinctrl *pctrl, unsigned int
* See https://bugzilla.kernel.org/show_bug.cgi?id=214749.
*/
value = readl(intel_get_padcfg(pctrl, pin, PADCFG0));
- if ((value & PADCFG0_GPIROUTIOXAPIC) && (value & PADCFG0_GPIOTXDIS) &&
- (__intel_gpio_get_gpio_mode(value) == PADCFG0_PMODE_GPIO))
+ if (__intel_gpio_is_direct_irq(value))
return true;
return false;
@@ -1873,7 +1878,12 @@ int intel_pinctrl_resume_noirq(struct device *dev)
for (i = 0; i < pctrl->soc->npins; i++) {
const struct pinctrl_pin_desc *desc = &pctrl->soc->pins[i];
- if (!intel_pinctrl_should_save(pctrl, desc->number))
+ if (!(intel_pinctrl_should_save(pctrl, desc->number) ||
+ /*
+ * If the firmware mangled the register contents too much,
+ * check the saved value for the Direct IRQ mode.
+ */
+ __intel_gpio_is_direct_irq(pads[i].padcfg0)))
continue;
intel_restore_padcfg(pctrl, desc->number, PADCFG0, pads[i].padcfg0);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 65628423bf63..1faf2ada480a 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -36,21 +36,19 @@ struct intel_pingroup {
/**
* struct intel_function - Description about a function
- * @name: Name of the function
- * @groups: An array of groups for this function
- * @ngroups: Number of groups in @groups
+ * @func: Generic data of the pin function (name and groups of pins)
*/
struct intel_function {
- const char *name;
- const char * const *groups;
- size_t ngroups;
+ struct pinfunction func;
};
+#define INTEL_PINCTRL_MAX_GPP_SIZE 32
+
/**
* struct intel_padgroup - Hardware pad group information
* @reg_num: GPI_IS register number
* @base: Starting pin of this group
- * @size: Size of this group (maximum is 32).
+ * @size: Size of this group (maximum is %INTEL_PINCTRL_MAX_GPP_SIZE).
* @gpio_base: Starting GPIO base of this group
* @padown_num: PAD_OWN register number (assigned by the core driver)
*
@@ -96,8 +94,7 @@ enum {
* @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
* HOSTSW_OWN, GPI_IS, GPI_IE. Used when @gpps is %NULL.
* @gpp_num_padown_regs: Number of pad registers each pad group consumes at
- * minimum. Use %0 if the number of registers can be
- * determined by the size of the group.
+ * minimum. Used when @gpps is %NULL.
* @gpps: Pad groups if the controller has variable size pad groups
* @ngpps: Number of pad groups in this community
* @pad_map: Optional non-linear mapping of the pads
@@ -106,11 +103,13 @@ enum {
* @regs: Community specific common registers (reserved for core driver)
* @pad_regs: Community specific pad registers (reserved for core driver)
*
- * In some of Intel GPIO host controllers this driver supports each pad group
+ * In older Intel GPIO host controllers, this driver supports, each pad group
* is of equal size (except the last one). In that case the driver can just
- * fill in @gpp_size field and let the core driver to handle the rest. If
- * the controller has pad groups of variable size the client driver can
- * pass custom @gpps and @ngpps instead.
+ * fill in @gpp_size and @gpp_num_padown_regs fields and let the core driver
+ * to handle the rest.
+ *
+ * In newer Intel GPIO host controllers each pad group is of variable size,
+ * so the client driver can pass custom @gpps and @ngpps instead.
*/
struct intel_community {
unsigned int barno;
@@ -143,6 +142,28 @@ struct intel_community {
#define PINCTRL_FEATURE_BLINK BIT(4)
#define PINCTRL_FEATURE_EXP BIT(5)
+#define __INTEL_COMMUNITY(b, s, e, g, n, gs, gn, soc) \
+ { \
+ .barno = (b), \
+ .padown_offset = soc ## _PAD_OWN, \
+ .padcfglock_offset = soc ## _PADCFGLOCK, \
+ .hostown_offset = soc ## _HOSTSW_OWN, \
+ .is_offset = soc ## _GPI_IS, \
+ .ie_offset = soc ## _GPI_IE, \
+ .gpp_size = (gs), \
+ .gpp_num_padown_regs = (gn), \
+ .pin_base = (s), \
+ .npins = ((e) - (s) + 1), \
+ .gpps = (g), \
+ .ngpps = (n), \
+ }
+
+#define INTEL_COMMUNITY_GPPS(b, s, e, g, soc) \
+ __INTEL_COMMUNITY(b, s, e, g, ARRAY_SIZE(g), 0, 0, soc)
+
+#define INTEL_COMMUNITY_SIZE(b, s, e, gs, gn, soc) \
+ __INTEL_COMMUNITY(b, s, e, NULL, 0, gs, gn, soc)
+
/**
* PIN_GROUP - Declare a pin group
* @n: Name of the group
@@ -158,11 +179,9 @@ struct intel_community {
.modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)), \
}
-#define FUNCTION(n, g) \
- { \
- .name = (n), \
- .groups = (g), \
- .ngroups = ARRAY_SIZE((g)), \
+#define FUNCTION(n, g) \
+ { \
+ .func = PINCTRL_PINFUNCTION((n), (g), ARRAY_SIZE(g)), \
}
/**
diff --git a/drivers/pinctrl/intel/pinctrl-jasperlake.c b/drivers/pinctrl/intel/pinctrl-jasperlake.c
index ec435b7ab392..086ab7fe08dd 100644
--- a/drivers/pinctrl/intel/pinctrl-jasperlake.c
+++ b/drivers/pinctrl/intel/pinctrl-jasperlake.c
@@ -29,18 +29,7 @@
}
#define JSL_COMMUNITY(b, s, e, g) \
- { \
- .barno = (b), \
- .padown_offset = JSL_PAD_OWN, \
- .padcfglock_offset = JSL_PADCFGLOCK, \
- .hostown_offset = JSL_HOSTSW_OWN, \
- .is_offset = JSL_GPI_IS, \
- .ie_offset = JSL_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
+ INTEL_COMMUNITY_GPPS(b, s, e, g, JSL)
/* Jasper Lake */
static const struct pinctrl_pin_desc jsl_pins[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-lakefield.c b/drivers/pinctrl/intel/pinctrl-lakefield.c
index 3c6283c4827f..8dac2d6012b1 100644
--- a/drivers/pinctrl/intel/pinctrl-lakefield.c
+++ b/drivers/pinctrl/intel/pinctrl-lakefield.c
@@ -29,18 +29,7 @@
}
#define LKF_COMMUNITY(b, s, e, g) \
- { \
- .barno = (b), \
- .padown_offset = LKF_PAD_OWN, \
- .padcfglock_offset = LKF_PADCFGLOCK, \
- .hostown_offset = LKF_HOSTSW_OWN, \
- .is_offset = LKF_GPI_IS, \
- .ie_offset = LKF_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
+ INTEL_COMMUNITY_GPPS(b, s, e, g, LKF)
/* Lakefield */
static const struct pinctrl_pin_desc lkf_pins[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
index ad4b446d588e..7aac1bbde2e9 100644
--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
+++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
@@ -21,17 +21,7 @@
#define LBG_GPI_IE 0x110
#define LBG_COMMUNITY(b, s, e) \
- { \
- .barno = (b), \
- .padown_offset = LBG_PAD_OWN, \
- .padcfglock_offset = LBG_PADCFGLOCK, \
- .hostown_offset = LBG_HOSTSW_OWN, \
- .is_offset = LBG_GPI_IS, \
- .ie_offset = LBG_GPI_IE, \
- .gpp_size = 24, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- }
+ INTEL_COMMUNITY_SIZE(b, s, e, 24, 3, LBG)
/* Lewisburg */
static const struct pinctrl_pin_desc lbg_pins[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
index 8d05dad38556..cdace55aaeac 100644
--- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c
+++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c
@@ -341,18 +341,18 @@ static const char *lp_get_function_name(struct pinctrl_dev *pctldev,
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
- return lg->soc->functions[selector].name;
+ return lg->soc->functions[selector].func.name;
}
static int lp_get_function_groups(struct pinctrl_dev *pctldev,
unsigned int selector,
const char * const **groups,
- unsigned int *num_groups)
+ unsigned int *ngroups)
{
struct intel_pinctrl *lg = pinctrl_dev_get_drvdata(pctldev);
- *groups = lg->soc->functions[selector].groups;
- *num_groups = lg->soc->functions[selector].ngroups;
+ *groups = lg->soc->functions[selector].func.groups;
+ *ngroups = lg->soc->functions[selector].func.ngroups;
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index c0845bb1e9e3..365c391c97a3 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -576,7 +576,7 @@ static const char *mrfld_get_function_name(struct pinctrl_dev *pctldev,
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- return mp->functions[function].name;
+ return mp->functions[function].func.name;
}
static int mrfld_get_function_groups(struct pinctrl_dev *pctldev,
@@ -586,8 +586,8 @@ static int mrfld_get_function_groups(struct pinctrl_dev *pctldev,
{
struct mrfld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- *groups = mp->functions[function].groups;
- *ngroups = mp->functions[function].ngroups;
+ *groups = mp->functions[function].func.groups;
+ *ngroups = mp->functions[function].func.ngroups;
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-meteorlake.c b/drivers/pinctrl/intel/pinctrl-meteorlake.c
index 9576dcd1cb29..a82f6754c45b 100644
--- a/drivers/pinctrl/intel/pinctrl-meteorlake.c
+++ b/drivers/pinctrl/intel/pinctrl-meteorlake.c
@@ -14,11 +14,11 @@
#include "pinctrl-intel.h"
-#define MTL_PAD_OWN 0x0b0
-#define MTL_PADCFGLOCK 0x110
-#define MTL_HOSTSW_OWN 0x140
-#define MTL_GPI_IS 0x200
-#define MTL_GPI_IE 0x210
+#define MTL_P_PAD_OWN 0x0b0
+#define MTL_P_PADCFGLOCK 0x110
+#define MTL_P_HOSTSW_OWN 0x140
+#define MTL_P_GPI_IS 0x200
+#define MTL_P_GPI_IE 0x210
#define MTL_GPP(r, s, e, g) \
{ \
@@ -29,18 +29,7 @@
}
#define MTL_COMMUNITY(b, s, e, g) \
- { \
- .barno = (b), \
- .padown_offset = MTL_PAD_OWN, \
- .padcfglock_offset = MTL_PADCFGLOCK, \
- .hostown_offset = MTL_HOSTSW_OWN, \
- .is_offset = MTL_GPI_IS, \
- .ie_offset = MTL_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
+ INTEL_COMMUNITY_GPPS(b, s, e, g, MTL_P)
/* Meteor Lake-P */
static const struct pinctrl_pin_desc mtlp_pins[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-moorefield.c b/drivers/pinctrl/intel/pinctrl-moorefield.c
index e3eec671e15d..3c9a8484b442 100644
--- a/drivers/pinctrl/intel/pinctrl-moorefield.c
+++ b/drivers/pinctrl/intel/pinctrl-moorefield.c
@@ -530,7 +530,7 @@ static const char *mofld_get_function_name(struct pinctrl_dev *pctldev, unsigned
{
struct mofld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- return mp->functions[function].name;
+ return mp->functions[function].func.name;
}
static int mofld_get_function_groups(struct pinctrl_dev *pctldev, unsigned int function,
@@ -538,8 +538,8 @@ static int mofld_get_function_groups(struct pinctrl_dev *pctldev, unsigned int f
{
struct mofld_pinctrl *mp = pinctrl_dev_get_drvdata(pctldev);
- *groups = mp->functions[function].groups;
- *ngroups = mp->functions[function].ngroups;
+ *groups = mp->functions[function].func.groups;
+ *ngroups = mp->functions[function].func.ngroups;
return 0;
}
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 292b660067e9..f91e27feb7c3 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -15,31 +15,17 @@
#include "pinctrl-intel.h"
-#define SPT_PAD_OWN 0x020
+#define SPT_H_PAD_OWN 0x020
#define SPT_H_PADCFGLOCK 0x090
-#define SPT_LP_PADCFGLOCK 0x0a0
-#define SPT_HOSTSW_OWN 0x0d0
-#define SPT_GPI_IS 0x100
-#define SPT_GPI_IE 0x120
-
-#define SPT_COMMUNITY(b, s, e, g, n, v, gs, gn) \
- { \
- .barno = (b), \
- .padown_offset = SPT_PAD_OWN, \
- .padcfglock_offset = SPT_##v##_PADCFGLOCK, \
- .hostown_offset = SPT_HOSTSW_OWN, \
- .is_offset = SPT_GPI_IS, \
- .ie_offset = SPT_GPI_IE, \
- .gpp_size = (gs), \
- .gpp_num_padown_regs = (gn), \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = (n), \
- }
+#define SPT_H_HOSTSW_OWN 0x0d0
+#define SPT_H_GPI_IS 0x100
+#define SPT_H_GPI_IE 0x120
-#define SPT_LP_COMMUNITY(b, s, e) \
- SPT_COMMUNITY(b, s, e, NULL, 0, LP, 24, 4)
+#define SPT_LP_PAD_OWN 0x020
+#define SPT_LP_PADCFGLOCK 0x0a0
+#define SPT_LP_HOSTSW_OWN 0x0d0
+#define SPT_LP_GPI_IS 0x100
+#define SPT_LP_GPI_IE 0x120
#define SPT_H_GPP(r, s, e, g) \
{ \
@@ -50,7 +36,10 @@
}
#define SPT_H_COMMUNITY(b, s, e, g) \
- SPT_COMMUNITY(b, s, e, g, ARRAY_SIZE(g), H, 0, 0)
+ INTEL_COMMUNITY_GPPS(b, s, e, g, SPT_H)
+
+#define SPT_LP_COMMUNITY(b, s, e) \
+ INTEL_COMMUNITY_SIZE(b, s, e, 24, 4, SPT_LP)
/* Sunrisepoint-LP */
static const struct pinctrl_pin_desc sptlp_pins[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c
index 431352fa2ab5..6e3a651d1241 100644
--- a/drivers/pinctrl/intel/pinctrl-tigerlake.c
+++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c
@@ -15,13 +15,17 @@
#include "pinctrl-intel.h"
-#define TGL_PAD_OWN 0x020
+#define TGL_LP_PAD_OWN 0x020
#define TGL_LP_PADCFGLOCK 0x080
-#define TGL_H_PADCFGLOCK 0x090
#define TGL_LP_HOSTSW_OWN 0x0b0
+#define TGL_LP_GPI_IS 0x100
+#define TGL_LP_GPI_IE 0x120
+
+#define TGL_H_PAD_OWN 0x020
+#define TGL_H_PADCFGLOCK 0x090
#define TGL_H_HOSTSW_OWN 0x0c0
-#define TGL_GPI_IS 0x100
-#define TGL_GPI_IE 0x120
+#define TGL_H_GPI_IS 0x100
+#define TGL_H_GPI_IE 0x120
#define TGL_GPP(r, s, e, g) \
{ \
@@ -31,25 +35,11 @@
.gpio_base = (g), \
}
-#define TGL_COMMUNITY(b, s, e, g, v) \
- { \
- .barno = (b), \
- .padown_offset = TGL_PAD_OWN, \
- .padcfglock_offset = TGL_##v##_PADCFGLOCK, \
- .hostown_offset = TGL_##v##_HOSTSW_OWN, \
- .is_offset = TGL_GPI_IS, \
- .ie_offset = TGL_GPI_IE, \
- .pin_base = (s), \
- .npins = ((e) - (s) + 1), \
- .gpps = (g), \
- .ngpps = ARRAY_SIZE(g), \
- }
-
#define TGL_LP_COMMUNITY(b, s, e, g) \
- TGL_COMMUNITY(b, s, e, g, LP)
+ INTEL_COMMUNITY_GPPS(b, s, e, g, TGL_LP)
#define TGL_H_COMMUNITY(b, s, e, g) \
- TGL_COMMUNITY(b, s, e, g, H)
+ INTEL_COMMUNITY_GPPS(b, s, e, g, TGL_H)
/* Tiger Lake-LP */
static const struct pinctrl_pin_desc tgllp_pins[] = {
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
index fed02c6fea06..f20c28334bcb 100644
--- a/drivers/pinctrl/mediatek/Kconfig
+++ b/drivers/pinctrl/mediatek/Kconfig
@@ -127,6 +127,11 @@ config PINCTRL_MT7622
default ARM64 && ARCH_MEDIATEK
select PINCTRL_MTK_MOORE
+config PINCTRL_MT7981
+ bool "Mediatek MT7981 pin control"
+ depends on OF
+ select PINCTRL_MTK_MOORE
+
config PINCTRL_MT7986
bool "Mediatek MT7986 pin control"
depends on OF
diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
index 53265404a39d..44d197af516a 100644
--- a/drivers/pinctrl/mediatek/Makefile
+++ b/drivers/pinctrl/mediatek/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_PINCTRL_MT6797) += pinctrl-mt6797.o
obj-$(CONFIG_PINCTRL_MT7622) += pinctrl-mt7622.o
obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
obj-$(CONFIG_PINCTRL_MT7629) += pinctrl-mt7629.o
+obj-$(CONFIG_PINCTRL_MT7981) += pinctrl-mt7981.o
obj-$(CONFIG_PINCTRL_MT7986) += pinctrl-mt7986.o
obj-$(CONFIG_PINCTRL_MT8167) += pinctrl-mt8167.o
obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index 1ec0413959e1..007b98ce5631 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -574,7 +574,6 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
chip->ngpio = hw->soc->npins;
- chip->of_gpio_n_cells = 2;
ret = gpiochip_add_data(chip, hw);
if (ret < 0)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7981.c b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
new file mode 100644
index 000000000000..18abc5780011
--- /dev/null
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7981.c
@@ -0,0 +1,1048 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The MT7981 driver based on Linux generic pinctrl binding.
+ *
+ * Copyright (C) 2020 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#include "pinctrl-moore.h"
+
+#define MT7981_PIN(_number, _name) \
+ MTK_PIN(_number, _name, 0, _number, DRV_GRP4)
+
+#define PIN_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 0)
+
+#define PINS_FIELD_BASE(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, _x_bits) \
+ PIN_FIELD_CALC(_s_pin, _e_pin, _i_base, _s_addr, _x_addrs, _s_bit, \
+ _x_bits, 32, 1)
+
+static const struct mtk_pin_field_calc mt7981_pin_mode_range[] = {
+ PIN_FIELD(0, 56, 0x300, 0x10, 0, 4),
+};
+
+static const struct mtk_pin_field_calc mt7981_pin_dir_range[] = {
+ PIN_FIELD(0, 56, 0x0, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7981_pin_di_range[] = {
+ PIN_FIELD(0, 56, 0x200, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7981_pin_do_range[] = {
+ PIN_FIELD(0, 56, 0x100, 0x10, 0, 1),
+};
+
+static const struct mtk_pin_field_calc mt7981_pin_ies_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x10, 0x10, 1, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x10, 0x10, 0, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x20, 0x10, 6, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x20, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x20, 0x10, 2, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x20, 0x10, 1, 1),
+ PIN_FIELD_BASE(6, 6, 4, 0x20, 0x10, 3, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x20, 0x10, 0, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x20, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(9, 9, 5, 0x20, 0x10, 9, 1),
+ PIN_FIELD_BASE(10, 10, 5, 0x20, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 5, 0x40, 0x10, 10, 1),
+ PIN_FIELD_BASE(12, 12, 5, 0x20, 0x10, 7, 1),
+ PIN_FIELD_BASE(13, 13, 5, 0x20, 0x10, 11, 1),
+
+ PIN_FIELD_BASE(14, 14, 4, 0x20, 0x10, 8, 1),
+
+ PIN_FIELD_BASE(15, 15, 2, 0x20, 0x10, 0, 1),
+ PIN_FIELD_BASE(16, 16, 2, 0x20, 0x10, 1, 1),
+ PIN_FIELD_BASE(17, 17, 2, 0x20, 0x10, 5, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x20, 0x10, 4, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x20, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x20, 0x10, 3, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x20, 0x10, 6, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x20, 0x10, 7, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x20, 0x10, 10, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x20, 0x10, 9, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x20, 0x10, 8, 1),
+
+ PIN_FIELD_BASE(26, 26, 5, 0x20, 0x10, 0, 1),
+ PIN_FIELD_BASE(27, 27, 5, 0x20, 0x10, 4, 1),
+ PIN_FIELD_BASE(28, 28, 5, 0x20, 0x10, 3, 1),
+ PIN_FIELD_BASE(29, 29, 5, 0x20, 0x10, 1, 1),
+ PIN_FIELD_BASE(30, 30, 5, 0x20, 0x10, 2, 1),
+ PIN_FIELD_BASE(31, 31, 5, 0x20, 0x10, 5, 1),
+
+ PIN_FIELD_BASE(32, 32, 1, 0x10, 0x10, 2, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x10, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(34, 34, 4, 0x20, 0x10, 5, 1),
+ PIN_FIELD_BASE(35, 35, 4, 0x20, 0x10, 7, 1),
+
+ PIN_FIELD_BASE(36, 36, 3, 0x10, 0x10, 2, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x10, 0x10, 3, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x10, 0x10, 0, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x10, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(40, 40, 7, 0x30, 0x10, 1, 1),
+ PIN_FIELD_BASE(41, 41, 7, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 7, 0x30, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x30, 0x10, 7, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x30, 0x10, 8, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x30, 0x10, 3, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x30, 0x10, 4, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x30, 0x10, 5, 1),
+ PIN_FIELD_BASE(48, 48, 7, 0x30, 0x10, 6, 1),
+ PIN_FIELD_BASE(49, 49, 7, 0x30, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(50, 50, 6, 0x10, 0x10, 0, 1),
+ PIN_FIELD_BASE(51, 51, 6, 0x10, 0x10, 2, 1),
+ PIN_FIELD_BASE(52, 52, 6, 0x10, 0x10, 3, 1),
+ PIN_FIELD_BASE(53, 53, 6, 0x10, 0x10, 4, 1),
+ PIN_FIELD_BASE(54, 54, 6, 0x10, 0x10, 5, 1),
+ PIN_FIELD_BASE(55, 55, 6, 0x10, 0x10, 6, 1),
+ PIN_FIELD_BASE(56, 56, 6, 0x10, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt7981_pin_smt_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x60, 0x10, 1, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x90, 0x10, 6, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x80, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x80, 0x10, 2, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x80, 0x10, 1, 1),
+ PIN_FIELD_BASE(6, 6, 4, 0x80, 0x10, 3, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x80, 0x10, 0, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x80, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(9, 9, 5, 0x90, 0x10, 9, 1),
+ PIN_FIELD_BASE(10, 10, 5, 0x90, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 5, 0x90, 0x10, 10, 1),
+ PIN_FIELD_BASE(12, 12, 5, 0x90, 0x10, 7, 1),
+ PIN_FIELD_BASE(13, 13, 5, 0x90, 0x10, 11, 1),
+
+ PIN_FIELD_BASE(14, 14, 4, 0x80, 0x10, 8, 1),
+
+ PIN_FIELD_BASE(15, 15, 2, 0x90, 0x10, 0, 1),
+ PIN_FIELD_BASE(16, 16, 2, 0x90, 0x10, 1, 1),
+ PIN_FIELD_BASE(17, 17, 2, 0x90, 0x10, 5, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x90, 0x10, 4, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x90, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x90, 0x10, 3, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x90, 0x10, 6, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x90, 0x10, 7, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x90, 0x10, 10, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x90, 0x10, 9, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x90, 0x10, 8, 1),
+
+ PIN_FIELD_BASE(26, 26, 5, 0x90, 0x10, 0, 1),
+ PIN_FIELD_BASE(27, 27, 5, 0x90, 0x10, 4, 1),
+ PIN_FIELD_BASE(28, 28, 5, 0x90, 0x10, 3, 1),
+ PIN_FIELD_BASE(29, 29, 5, 0x90, 0x10, 1, 1),
+ PIN_FIELD_BASE(30, 30, 5, 0x90, 0x10, 2, 1),
+ PIN_FIELD_BASE(31, 31, 5, 0x90, 0x10, 5, 1),
+
+ PIN_FIELD_BASE(32, 32, 1, 0x60, 0x10, 2, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x60, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(34, 34, 4, 0x80, 0x10, 5, 1),
+ PIN_FIELD_BASE(35, 35, 4, 0x80, 0x10, 7, 1),
+
+ PIN_FIELD_BASE(36, 36, 3, 0x60, 0x10, 2, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x60, 0x10, 3, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x60, 0x10, 0, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x60, 0x10, 1, 1),
+
+ PIN_FIELD_BASE(40, 40, 7, 0x70, 0x10, 1, 1),
+ PIN_FIELD_BASE(41, 41, 7, 0x70, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 7, 0x70, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x70, 0x10, 7, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x30, 0x10, 8, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x70, 0x10, 3, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x70, 0x10, 4, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x70, 0x10, 5, 1),
+ PIN_FIELD_BASE(48, 48, 7, 0x70, 0x10, 6, 1),
+ PIN_FIELD_BASE(49, 49, 7, 0x70, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(50, 50, 6, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(51, 51, 6, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(52, 52, 6, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(53, 53, 6, 0x50, 0x10, 4, 1),
+ PIN_FIELD_BASE(54, 54, 6, 0x50, 0x10, 5, 1),
+ PIN_FIELD_BASE(55, 55, 6, 0x50, 0x10, 6, 1),
+ PIN_FIELD_BASE(56, 56, 6, 0x50, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt7981_pin_pu_range[] = {
+ PIN_FIELD_BASE(40, 40, 7, 0x50, 0x10, 1, 1),
+ PIN_FIELD_BASE(41, 41, 7, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 7, 0x50, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x50, 0x10, 7, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x50, 0x10, 8, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x50, 0x10, 4, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x50, 0x10, 5, 1),
+ PIN_FIELD_BASE(48, 48, 7, 0x50, 0x10, 6, 1),
+ PIN_FIELD_BASE(49, 49, 7, 0x50, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(50, 50, 6, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(51, 51, 6, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(52, 52, 6, 0x30, 0x10, 3, 1),
+ PIN_FIELD_BASE(53, 53, 6, 0x30, 0x10, 4, 1),
+ PIN_FIELD_BASE(54, 54, 6, 0x30, 0x10, 5, 1),
+ PIN_FIELD_BASE(55, 55, 6, 0x30, 0x10, 6, 1),
+ PIN_FIELD_BASE(56, 56, 6, 0x30, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt7981_pin_pd_range[] = {
+ PIN_FIELD_BASE(40, 40, 7, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(41, 41, 7, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(42, 42, 7, 0x40, 0x10, 9, 1),
+ PIN_FIELD_BASE(43, 43, 7, 0x40, 0x10, 7, 1),
+ PIN_FIELD_BASE(44, 44, 7, 0x40, 0x10, 8, 1),
+ PIN_FIELD_BASE(45, 45, 7, 0x40, 0x10, 3, 1),
+ PIN_FIELD_BASE(46, 46, 7, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(47, 47, 7, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(48, 48, 7, 0x40, 0x10, 6, 1),
+ PIN_FIELD_BASE(49, 49, 7, 0x40, 0x10, 2, 1),
+
+ PIN_FIELD_BASE(50, 50, 6, 0x20, 0x10, 0, 1),
+ PIN_FIELD_BASE(51, 51, 6, 0x20, 0x10, 2, 1),
+ PIN_FIELD_BASE(52, 52, 6, 0x20, 0x10, 3, 1),
+ PIN_FIELD_BASE(53, 53, 6, 0x20, 0x10, 4, 1),
+ PIN_FIELD_BASE(54, 54, 6, 0x20, 0x10, 5, 1),
+ PIN_FIELD_BASE(55, 55, 6, 0x20, 0x10, 6, 1),
+ PIN_FIELD_BASE(56, 56, 6, 0x20, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt7981_pin_drv_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(1, 1, 1, 0x00, 0x10, 0, 3),
+
+ PIN_FIELD_BASE(2, 2, 5, 0x00, 0x10, 18, 3),
+
+ PIN_FIELD_BASE(3, 3, 4, 0x00, 0x10, 18, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x00, 0x10, 6, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(6, 6, 4, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(7, 7, 4, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(8, 8, 4, 0x00, 0x10, 12, 3),
+
+ PIN_FIELD_BASE(9, 9, 5, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(10, 10, 5, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(11, 11, 5, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(12, 12, 5, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(13, 13, 5, 0x00, 0x10, 3, 3),
+
+ PIN_FIELD_BASE(14, 14, 4, 0x00, 0x10, 27, 3),
+
+ PIN_FIELD_BASE(15, 15, 2, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(16, 16, 2, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(17, 17, 2, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(18, 18, 2, 0x00, 0x10, 12, 3),
+ PIN_FIELD_BASE(19, 19, 2, 0x00, 0x10, 6, 3),
+ PIN_FIELD_BASE(20, 20, 2, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(21, 21, 2, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(22, 22, 2, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(23, 23, 2, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(24, 24, 2, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(25, 25, 2, 0x00, 0x10, 24, 3),
+
+ PIN_FIELD_BASE(26, 26, 5, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(27, 27, 5, 0x00, 0x10, 12, 3),
+ PIN_FIELD_BASE(28, 28, 5, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(29, 29, 5, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(30, 30, 5, 0x00, 0x10, 6, 3),
+ PIN_FIELD_BASE(31, 31, 5, 0x00, 0x10, 15, 3),
+
+ PIN_FIELD_BASE(32, 32, 1, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(33, 33, 1, 0x00, 0x10, 12, 3),
+
+ PIN_FIELD_BASE(34, 34, 4, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(35, 35, 4, 0x00, 0x10, 21, 3),
+
+ PIN_FIELD_BASE(36, 36, 3, 0x00, 0x10, 6, 3),
+ PIN_FIELD_BASE(37, 37, 3, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(38, 38, 3, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(39, 39, 3, 0x00, 0x10, 3, 3),
+
+ PIN_FIELD_BASE(40, 40, 7, 0x00, 0x10, 3, 3),
+ PIN_FIELD_BASE(41, 41, 7, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(42, 42, 7, 0x00, 0x10, 27, 3),
+ PIN_FIELD_BASE(43, 43, 7, 0x00, 0x10, 21, 3),
+ PIN_FIELD_BASE(44, 44, 7, 0x00, 0x10, 24, 3),
+ PIN_FIELD_BASE(45, 45, 7, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(46, 46, 7, 0x00, 0x10, 12, 3),
+ PIN_FIELD_BASE(47, 47, 7, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(48, 48, 7, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(49, 49, 7, 0x00, 0x10, 6, 3),
+
+ PIN_FIELD_BASE(50, 50, 6, 0x00, 0x10, 0, 3),
+ PIN_FIELD_BASE(51, 51, 6, 0x00, 0x10, 6, 3),
+ PIN_FIELD_BASE(52, 52, 6, 0x00, 0x10, 9, 3),
+ PIN_FIELD_BASE(53, 53, 6, 0x00, 0x10, 12, 3),
+ PIN_FIELD_BASE(54, 54, 6, 0x00, 0x10, 15, 3),
+ PIN_FIELD_BASE(55, 55, 6, 0x00, 0x10, 18, 3),
+ PIN_FIELD_BASE(56, 56, 6, 0x00, 0x10, 3, 3),
+};
+
+static const struct mtk_pin_field_calc mt7981_pin_pupd_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x20, 0x10, 1, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x20, 0x10, 0, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x30, 0x10, 6, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x30, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x30, 0x10, 1, 1),
+ PIN_FIELD_BASE(6, 6, 4, 0x30, 0x10, 3, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x30, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(9, 9, 5, 0x30, 0x10, 9, 1),
+ PIN_FIELD_BASE(10, 10, 5, 0x30, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 5, 0x30, 0x10, 10, 1),
+ PIN_FIELD_BASE(12, 12, 5, 0x30, 0x10, 7, 1),
+ PIN_FIELD_BASE(13, 13, 5, 0x30, 0x10, 11, 1),
+
+ PIN_FIELD_BASE(14, 14, 4, 0x30, 0x10, 8, 1),
+
+ PIN_FIELD_BASE(15, 15, 2, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(16, 16, 2, 0x30, 0x10, 1, 1),
+ PIN_FIELD_BASE(17, 17, 2, 0x30, 0x10, 5, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x30, 0x10, 4, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x90, 0x10, 3, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x30, 0x10, 6, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x30, 0x10, 7, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x30, 0x10, 10, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x30, 0x10, 9, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x30, 0x10, 8, 1),
+
+ PIN_FIELD_BASE(26, 26, 5, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(27, 27, 5, 0x30, 0x10, 4, 1),
+ PIN_FIELD_BASE(28, 28, 5, 0x30, 0x10, 3, 1),
+ PIN_FIELD_BASE(29, 29, 5, 0x30, 0x10, 1, 1),
+ PIN_FIELD_BASE(30, 30, 5, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(31, 31, 5, 0x30, 0x10, 5, 1),
+
+ PIN_FIELD_BASE(32, 32, 1, 0x20, 0x10, 2, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x20, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(34, 34, 4, 0x30, 0x10, 5, 1),
+ PIN_FIELD_BASE(35, 35, 4, 0x30, 0x10, 7, 1),
+
+ PIN_FIELD_BASE(36, 36, 3, 0x20, 0x10, 2, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x20, 0x10, 3, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x20, 0x10, 0, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x20, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt7981_pin_r0_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x30, 0x10, 1, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x40, 0x10, 6, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x40, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x40, 0x10, 2, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(6, 6, 4, 0x40, 0x10, 3, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x40, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(9, 9, 5, 0x40, 0x10, 9, 1),
+ PIN_FIELD_BASE(10, 10, 5, 0x40, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 5, 0x40, 0x10, 10, 1),
+ PIN_FIELD_BASE(12, 12, 5, 0x40, 0x10, 7, 1),
+ PIN_FIELD_BASE(13, 13, 5, 0x40, 0x10, 11, 1),
+
+ PIN_FIELD_BASE(14, 14, 4, 0x40, 0x10, 8, 1),
+
+ PIN_FIELD_BASE(15, 15, 2, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(16, 16, 2, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(17, 17, 2, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x40, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x40, 0x10, 3, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x40, 0x10, 6, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x40, 0x10, 7, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x40, 0x10, 10, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x40, 0x10, 9, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x40, 0x10, 8, 1),
+
+ PIN_FIELD_BASE(26, 26, 5, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(27, 27, 5, 0x40, 0x10, 4, 1),
+ PIN_FIELD_BASE(28, 28, 5, 0x40, 0x10, 3, 1),
+ PIN_FIELD_BASE(29, 29, 5, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(30, 30, 5, 0x40, 0x10, 2, 1),
+ PIN_FIELD_BASE(31, 31, 5, 0x40, 0x10, 5, 1),
+
+ PIN_FIELD_BASE(32, 32, 1, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x30, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(34, 34, 4, 0x40, 0x10, 5, 1),
+ PIN_FIELD_BASE(35, 35, 4, 0x40, 0x10, 7, 1),
+
+ PIN_FIELD_BASE(36, 36, 3, 0x30, 0x10, 2, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x30, 0x10, 3, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x30, 0x10, 0, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x30, 0x10, 1, 1),
+};
+
+static const struct mtk_pin_field_calc mt7981_pin_r1_range[] = {
+ PIN_FIELD_BASE(0, 0, 1, 0x40, 0x10, 1, 1),
+ PIN_FIELD_BASE(1, 1, 1, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(2, 2, 5, 0x50, 0x10, 6, 1),
+ PIN_FIELD_BASE(3, 3, 4, 0x50, 0x10, 6, 1),
+ PIN_FIELD_BASE(4, 4, 4, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(5, 5, 4, 0x50, 0x10, 1, 1),
+ PIN_FIELD_BASE(6, 6, 4, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(7, 7, 4, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(8, 8, 4, 0x50, 0x10, 4, 1),
+
+ PIN_FIELD_BASE(9, 9, 5, 0x50, 0x10, 9, 1),
+ PIN_FIELD_BASE(10, 10, 5, 0x50, 0x10, 8, 1),
+ PIN_FIELD_BASE(11, 11, 5, 0x50, 0x10, 10, 1),
+ PIN_FIELD_BASE(12, 12, 5, 0x50, 0x10, 7, 1),
+ PIN_FIELD_BASE(13, 13, 5, 0x50, 0x10, 11, 1),
+
+ PIN_FIELD_BASE(14, 14, 4, 0x50, 0x10, 8, 1),
+
+ PIN_FIELD_BASE(15, 15, 2, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(16, 16, 2, 0x50, 0x10, 1, 1),
+ PIN_FIELD_BASE(17, 17, 2, 0x50, 0x10, 5, 1),
+ PIN_FIELD_BASE(18, 18, 2, 0x50, 0x10, 4, 1),
+ PIN_FIELD_BASE(19, 19, 2, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(20, 20, 2, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(21, 21, 2, 0x50, 0x10, 6, 1),
+ PIN_FIELD_BASE(22, 22, 2, 0x50, 0x10, 7, 1),
+ PIN_FIELD_BASE(23, 23, 2, 0x50, 0x10, 10, 1),
+ PIN_FIELD_BASE(24, 24, 2, 0x50, 0x10, 9, 1),
+ PIN_FIELD_BASE(25, 25, 2, 0x50, 0x10, 8, 1),
+
+ PIN_FIELD_BASE(26, 26, 5, 0x50, 0x10, 0, 1),
+ PIN_FIELD_BASE(27, 27, 5, 0x50, 0x10, 4, 1),
+ PIN_FIELD_BASE(28, 28, 5, 0x50, 0x10, 3, 1),
+ PIN_FIELD_BASE(29, 29, 5, 0x50, 0x10, 1, 1),
+ PIN_FIELD_BASE(30, 30, 5, 0x50, 0x10, 2, 1),
+ PIN_FIELD_BASE(31, 31, 5, 0x50, 0x10, 5, 1),
+
+ PIN_FIELD_BASE(32, 32, 1, 0x40, 0x10, 2, 1),
+ PIN_FIELD_BASE(33, 33, 1, 0x40, 0x10, 3, 1),
+
+ PIN_FIELD_BASE(34, 34, 4, 0x50, 0x10, 5, 1),
+ PIN_FIELD_BASE(35, 35, 4, 0x50, 0x10, 7, 1),
+
+ PIN_FIELD_BASE(36, 36, 3, 0x40, 0x10, 2, 1),
+ PIN_FIELD_BASE(37, 37, 3, 0x40, 0x10, 3, 1),
+ PIN_FIELD_BASE(38, 38, 3, 0x40, 0x10, 0, 1),
+ PIN_FIELD_BASE(39, 39, 3, 0x40, 0x10, 1, 1),
+};
+
+static const unsigned int mt7981_pull_type[] = {
+ MTK_PULL_PUPD_R1R0_TYPE,/*0*/ MTK_PULL_PUPD_R1R0_TYPE,/*1*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*2*/ MTK_PULL_PUPD_R1R0_TYPE,/*3*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*4*/ MTK_PULL_PUPD_R1R0_TYPE,/*5*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*6*/ MTK_PULL_PUPD_R1R0_TYPE,/*7*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*8*/ MTK_PULL_PUPD_R1R0_TYPE,/*9*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*10*/ MTK_PULL_PUPD_R1R0_TYPE,/*11*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*12*/ MTK_PULL_PUPD_R1R0_TYPE,/*13*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*14*/ MTK_PULL_PUPD_R1R0_TYPE,/*15*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*16*/ MTK_PULL_PUPD_R1R0_TYPE,/*17*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*18*/ MTK_PULL_PUPD_R1R0_TYPE,/*19*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*20*/ MTK_PULL_PUPD_R1R0_TYPE,/*21*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*22*/ MTK_PULL_PUPD_R1R0_TYPE,/*23*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*24*/ MTK_PULL_PUPD_R1R0_TYPE,/*25*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*26*/ MTK_PULL_PUPD_R1R0_TYPE,/*27*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*28*/ MTK_PULL_PUPD_R1R0_TYPE,/*29*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*30*/ MTK_PULL_PUPD_R1R0_TYPE,/*31*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*32*/ MTK_PULL_PUPD_R1R0_TYPE,/*33*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*34*/ MTK_PULL_PUPD_R1R0_TYPE,/*35*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*36*/ MTK_PULL_PUPD_R1R0_TYPE,/*37*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*38*/ MTK_PULL_PUPD_R1R0_TYPE,/*39*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*40*/ MTK_PULL_PUPD_R1R0_TYPE,/*41*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*42*/ MTK_PULL_PUPD_R1R0_TYPE,/*43*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*44*/ MTK_PULL_PUPD_R1R0_TYPE,/*45*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*46*/ MTK_PULL_PUPD_R1R0_TYPE,/*47*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*48*/ MTK_PULL_PUPD_R1R0_TYPE,/*49*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*50*/ MTK_PULL_PUPD_R1R0_TYPE,/*51*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*52*/ MTK_PULL_PUPD_R1R0_TYPE,/*53*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*54*/ MTK_PULL_PUPD_R1R0_TYPE,/*55*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*56*/ MTK_PULL_PUPD_R1R0_TYPE,/*57*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*58*/ MTK_PULL_PUPD_R1R0_TYPE,/*59*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*60*/ MTK_PULL_PUPD_R1R0_TYPE,/*61*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*62*/ MTK_PULL_PUPD_R1R0_TYPE,/*63*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*64*/ MTK_PULL_PUPD_R1R0_TYPE,/*65*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*66*/ MTK_PULL_PUPD_R1R0_TYPE,/*67*/
+ MTK_PULL_PUPD_R1R0_TYPE,/*68*/ MTK_PULL_PU_PD_TYPE,/*69*/
+ MTK_PULL_PU_PD_TYPE,/*70*/ MTK_PULL_PU_PD_TYPE,/*71*/
+ MTK_PULL_PU_PD_TYPE,/*72*/ MTK_PULL_PU_PD_TYPE,/*73*/
+ MTK_PULL_PU_PD_TYPE,/*74*/ MTK_PULL_PU_PD_TYPE,/*75*/
+ MTK_PULL_PU_PD_TYPE,/*76*/ MTK_PULL_PU_PD_TYPE,/*77*/
+ MTK_PULL_PU_PD_TYPE,/*78*/ MTK_PULL_PU_PD_TYPE,/*79*/
+ MTK_PULL_PU_PD_TYPE,/*80*/ MTK_PULL_PU_PD_TYPE,/*81*/
+ MTK_PULL_PU_PD_TYPE,/*82*/ MTK_PULL_PU_PD_TYPE,/*83*/
+ MTK_PULL_PU_PD_TYPE,/*84*/ MTK_PULL_PU_PD_TYPE,/*85*/
+ MTK_PULL_PU_PD_TYPE,/*86*/ MTK_PULL_PU_PD_TYPE,/*87*/
+ MTK_PULL_PU_PD_TYPE,/*88*/ MTK_PULL_PU_PD_TYPE,/*89*/
+ MTK_PULL_PU_PD_TYPE,/*90*/ MTK_PULL_PU_PD_TYPE,/*91*/
+ MTK_PULL_PU_PD_TYPE,/*92*/ MTK_PULL_PU_PD_TYPE,/*93*/
+ MTK_PULL_PU_PD_TYPE,/*94*/ MTK_PULL_PU_PD_TYPE,/*95*/
+ MTK_PULL_PU_PD_TYPE,/*96*/ MTK_PULL_PU_PD_TYPE,/*97*/
+ MTK_PULL_PU_PD_TYPE,/*98*/ MTK_PULL_PU_PD_TYPE,/*99*/
+ MTK_PULL_PU_PD_TYPE,/*100*/
+};
+
+static const struct mtk_pin_reg_calc mt7981_reg_cals[] = {
+ [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt7981_pin_mode_range),
+ [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt7981_pin_dir_range),
+ [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt7981_pin_di_range),
+ [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt7981_pin_do_range),
+ [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt7981_pin_smt_range),
+ [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt7981_pin_ies_range),
+ [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt7981_pin_pu_range),
+ [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt7981_pin_pd_range),
+ [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt7981_pin_drv_range),
+ [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt7981_pin_pupd_range),
+ [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt7981_pin_r0_range),
+ [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt7981_pin_r1_range),
+};
+
+static const struct mtk_pin_desc mt7981_pins[] = {
+ MT7981_PIN(0, "GPIO_WPS"),
+ MT7981_PIN(1, "GPIO_RESET"),
+ MT7981_PIN(2, "SYS_WATCHDOG"),
+ MT7981_PIN(3, "PCIE_PERESET_N"),
+ MT7981_PIN(4, "JTAG_JTDO"),
+ MT7981_PIN(5, "JTAG_JTDI"),
+ MT7981_PIN(6, "JTAG_JTMS"),
+ MT7981_PIN(7, "JTAG_JTCLK"),
+ MT7981_PIN(8, "JTAG_JTRST_N"),
+ MT7981_PIN(9, "WO_JTAG_JTDO"),
+ MT7981_PIN(10, "WO_JTAG_JTDI"),
+ MT7981_PIN(11, "WO_JTAG_JTMS"),
+ MT7981_PIN(12, "WO_JTAG_JTCLK"),
+ MT7981_PIN(13, "WO_JTAG_JTRST_N"),
+ MT7981_PIN(14, "USB_VBUS"),
+ MT7981_PIN(15, "PWM0"),
+ MT7981_PIN(16, "SPI0_CLK"),
+ MT7981_PIN(17, "SPI0_MOSI"),
+ MT7981_PIN(18, "SPI0_MISO"),
+ MT7981_PIN(19, "SPI0_CS"),
+ MT7981_PIN(20, "SPI0_HOLD"),
+ MT7981_PIN(21, "SPI0_WP"),
+ MT7981_PIN(22, "SPI1_CLK"),
+ MT7981_PIN(23, "SPI1_MOSI"),
+ MT7981_PIN(24, "SPI1_MISO"),
+ MT7981_PIN(25, "SPI1_CS"),
+ MT7981_PIN(26, "SPI2_CLK"),
+ MT7981_PIN(27, "SPI2_MOSI"),
+ MT7981_PIN(28, "SPI2_MISO"),
+ MT7981_PIN(29, "SPI2_CS"),
+ MT7981_PIN(30, "SPI2_HOLD"),
+ MT7981_PIN(31, "SPI2_WP"),
+ MT7981_PIN(32, "UART0_RXD"),
+ MT7981_PIN(33, "UART0_TXD"),
+ MT7981_PIN(34, "PCIE_CLK_REQ"),
+ MT7981_PIN(35, "PCIE_WAKE_N"),
+ MT7981_PIN(36, "SMI_MDC"),
+ MT7981_PIN(37, "SMI_MDIO"),
+ MT7981_PIN(38, "GBE_INT"),
+ MT7981_PIN(39, "GBE_RESET"),
+ MT7981_PIN(40, "WF_DIG_RESETB"),
+ MT7981_PIN(41, "WF_CBA_RESETB"),
+ MT7981_PIN(42, "WF_XO_REQ"),
+ MT7981_PIN(43, "WF_TOP_CLK"),
+ MT7981_PIN(44, "WF_TOP_DATA"),
+ MT7981_PIN(45, "WF_HB1"),
+ MT7981_PIN(46, "WF_HB2"),
+ MT7981_PIN(47, "WF_HB3"),
+ MT7981_PIN(48, "WF_HB4"),
+ MT7981_PIN(49, "WF_HB0"),
+ MT7981_PIN(50, "WF_HB0_B"),
+ MT7981_PIN(51, "WF_HB5"),
+ MT7981_PIN(52, "WF_HB6"),
+ MT7981_PIN(53, "WF_HB7"),
+ MT7981_PIN(54, "WF_HB8"),
+ MT7981_PIN(55, "WF_HB9"),
+ MT7981_PIN(56, "WF_HB10"),
+};
+
+/* List all groups consisting of these pins dedicated to the enablement of
+ * certain hardware block and the corresponding mode for all of the pins.
+ * The hardware probably has multiple combinations of these pinouts.
+ */
+
+/* WA_AICE */
+static int mt7981_wa_aice1_pins[] = { 0, 1, };
+static int mt7981_wa_aice1_funcs[] = { 2, 2, };
+
+static int mt7981_wa_aice2_pins[] = { 0, 1, };
+static int mt7981_wa_aice2_funcs[] = { 3, 3, };
+
+static int mt7981_wa_aice3_pins[] = { 28, 29, };
+static int mt7981_wa_aice3_funcs[] = { 3, 3, };
+
+static int mt7981_wm_aice1_pins[] = { 9, 10, };
+static int mt7981_wm_aice1_funcs[] = { 2, 2, };
+
+static int mt7981_wm_aice2_pins[] = { 30, 31, };
+static int mt7981_wm_aice2_funcs[] = { 5, 5, };
+
+/* WM_UART */
+static int mt7981_wm_uart_0_pins[] = { 0, 1, };
+static int mt7981_wm_uart_0_funcs[] = { 5, 5, };
+
+static int mt7981_wm_uart_1_pins[] = { 20, 21, };
+static int mt7981_wm_uart_1_funcs[] = { 4, 4, };
+
+static int mt7981_wm_uart_2_pins[] = { 30, 31, };
+static int mt7981_wm_uart_2_funcs[] = { 3, 3, };
+
+/* DFD */
+static int mt7981_dfd_pins[] = { 0, 1, 4, 5, };
+static int mt7981_dfd_funcs[] = { 5, 5, 6, 6, };
+
+/* SYS_WATCHDOG */
+static int mt7981_watchdog_pins[] = { 2, };
+static int mt7981_watchdog_funcs[] = { 1, };
+
+static int mt7981_watchdog1_pins[] = { 13, };
+static int mt7981_watchdog1_funcs[] = { 5, };
+
+/* PCIE_PERESET_N */
+static int mt7981_pcie_pereset_pins[] = { 3, };
+static int mt7981_pcie_pereset_funcs[] = { 1, };
+
+/* JTAG */
+static int mt7981_jtag_pins[] = { 4, 5, 6, 7, 8, };
+static int mt7981_jtag_funcs[] = { 1, 1, 1, 1, 1, };
+
+/* WM_JTAG */
+static int mt7981_wm_jtag_0_pins[] = { 4, 5, 6, 7, 8, };
+static int mt7981_wm_jtag_0_funcs[] = { 2, 2, 2, 2, 2, };
+
+static int mt7981_wm_jtag_1_pins[] = { 20, 21, 22, 23, 24, };
+static int mt7981_wm_jtag_1_funcs[] = { 5, 5, 5, 5, 5, };
+
+/* WO0_JTAG */
+static int mt7981_wo0_jtag_0_pins[] = { 9, 10, 11, 12, 13, };
+static int mt7981_wo0_jtag_0_funcs[] = { 1, 1, 1, 1, 1, };
+
+static int mt7981_wo0_jtag_1_pins[] = { 25, 26, 27, 28, 29, };
+static int mt7981_wo0_jtag_1_funcs[] = { 5, 5, 5, 5, 5, };
+
+/* UART2 */
+static int mt7981_uart2_0_pins[] = { 4, 5, 6, 7, };
+static int mt7981_uart2_0_funcs[] = { 3, 3, 3, 3, };
+
+/* GBE_LED0 */
+static int mt7981_gbe_led0_pins[] = { 8, };
+static int mt7981_gbe_led0_funcs[] = { 3, };
+
+/* PTA_EXT */
+static int mt7981_pta_ext_0_pins[] = { 4, 5, 6, };
+static int mt7981_pta_ext_0_funcs[] = { 4, 4, 4, };
+
+static int mt7981_pta_ext_1_pins[] = { 22, 23, 24, };
+static int mt7981_pta_ext_1_funcs[] = { 4, 4, 4, };
+
+/* PWM2 */
+static int mt7981_pwm2_pins[] = { 7, };
+static int mt7981_pwm2_funcs[] = { 4, };
+
+/* NET_WO0_UART_TXD */
+static int mt7981_net_wo0_uart_txd_0_pins[] = { 8, };
+static int mt7981_net_wo0_uart_txd_0_funcs[] = { 4, };
+
+static int mt7981_net_wo0_uart_txd_1_pins[] = { 14, };
+static int mt7981_net_wo0_uart_txd_1_funcs[] = { 3, };
+
+static int mt7981_net_wo0_uart_txd_2_pins[] = { 15, };
+static int mt7981_net_wo0_uart_txd_2_funcs[] = { 4, };
+
+/* SPI1 */
+static int mt7981_spi1_0_pins[] = { 4, 5, 6, 7, };
+static int mt7981_spi1_0_funcs[] = { 5, 5, 5, 5, };
+
+/* I2C */
+static int mt7981_i2c0_0_pins[] = { 6, 7, };
+static int mt7981_i2c0_0_funcs[] = { 6, 6, };
+
+static int mt7981_i2c0_1_pins[] = { 30, 31, };
+static int mt7981_i2c0_1_funcs[] = { 4, 4, };
+
+static int mt7981_i2c0_2_pins[] = { 36, 37, };
+static int mt7981_i2c0_2_funcs[] = { 2, 2, };
+
+static int mt7981_u2_phy_i2c_pins[] = { 30, 31, };
+static int mt7981_u2_phy_i2c_funcs[] = { 6, 6, };
+
+static int mt7981_u3_phy_i2c_pins[] = { 32, 33, };
+static int mt7981_u3_phy_i2c_funcs[] = { 3, 3, };
+
+static int mt7981_sgmii1_phy_i2c_pins[] = { 32, 33, };
+static int mt7981_sgmii1_phy_i2c_funcs[] = { 2, 2, };
+
+static int mt7981_sgmii0_phy_i2c_pins[] = { 32, 33, };
+static int mt7981_sgmii0_phy_i2c_funcs[] = { 5, 5, };
+
+/* DFD_NTRST */
+static int mt7981_dfd_ntrst_pins[] = { 8, };
+static int mt7981_dfd_ntrst_funcs[] = { 6, };
+
+/* PWM0 */
+static int mt7981_pwm0_0_pins[] = { 13, };
+static int mt7981_pwm0_0_funcs[] = { 2, };
+
+static int mt7981_pwm0_1_pins[] = { 15, };
+static int mt7981_pwm0_1_funcs[] = { 1, };
+
+/* PWM1 */
+static int mt7981_pwm1_0_pins[] = { 14, };
+static int mt7981_pwm1_0_funcs[] = { 2, };
+
+static int mt7981_pwm1_1_pins[] = { 15, };
+static int mt7981_pwm1_1_funcs[] = { 3, };
+
+/* GBE_LED1 */
+static int mt7981_gbe_led1_pins[] = { 13, };
+static int mt7981_gbe_led1_funcs[] = { 3, };
+
+/* PCM */
+static int mt7981_pcm_pins[] = { 9, 10, 11, 12, 13, 25 };
+static int mt7981_pcm_funcs[] = { 4, 4, 4, 4, 4, 4, };
+
+/* UDI */
+static int mt7981_udi_pins[] = { 9, 10, 11, 12, 13, };
+static int mt7981_udi_funcs[] = { 6, 6, 6, 6, 6, };
+
+/* DRV_VBUS */
+static int mt7981_drv_vbus_pins[] = { 14, };
+static int mt7981_drv_vbus_funcs[] = { 1, };
+
+/* EMMC */
+static int mt7981_emmc_45_pins[] = { 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, };
+static int mt7981_emmc_45_funcs[] = { 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, };
+
+/* SNFI */
+static int mt7981_snfi_pins[] = { 16, 17, 18, 19, 20, 21, };
+static int mt7981_snfi_funcs[] = { 3, 3, 3, 3, 3, 3, };
+
+/* SPI0 */
+static int mt7981_spi0_pins[] = { 16, 17, 18, 19, };
+static int mt7981_spi0_funcs[] = { 1, 1, 1, 1, };
+
+/* SPI0 */
+static int mt7981_spi0_wp_hold_pins[] = { 20, 21, };
+static int mt7981_spi0_wp_hold_funcs[] = { 1, 1, };
+
+/* SPI1 */
+static int mt7981_spi1_1_pins[] = { 22, 23, 24, 25, };
+static int mt7981_spi1_1_funcs[] = { 1, 1, 1, 1, };
+
+/* SPI2 */
+static int mt7981_spi2_pins[] = { 26, 27, 28, 29, };
+static int mt7981_spi2_funcs[] = { 1, 1, 1, 1, };
+
+/* SPI2 */
+static int mt7981_spi2_wp_hold_pins[] = { 30, 31, };
+static int mt7981_spi2_wp_hold_funcs[] = { 1, 1, };
+
+/* UART1 */
+static int mt7981_uart1_0_pins[] = { 16, 17, 18, 19, };
+static int mt7981_uart1_0_funcs[] = { 4, 4, 4, 4, };
+
+static int mt7981_uart1_1_pins[] = { 26, 27, 28, 29, };
+static int mt7981_uart1_1_funcs[] = { 2, 2, 2, 2, };
+
+/* UART2 */
+static int mt7981_uart2_1_pins[] = { 22, 23, 24, 25, };
+static int mt7981_uart2_1_funcs[] = { 3, 3, 3, 3, };
+
+/* UART0 */
+static int mt7981_uart0_pins[] = { 32, 33, };
+static int mt7981_uart0_funcs[] = { 1, 1, };
+
+/* PCIE_CLK_REQ */
+static int mt7981_pcie_clk_pins[] = { 34, };
+static int mt7981_pcie_clk_funcs[] = { 2, };
+
+/* PCIE_WAKE_N */
+static int mt7981_pcie_wake_pins[] = { 35, };
+static int mt7981_pcie_wake_funcs[] = { 2, };
+
+/* MDC_MDIO */
+static int mt7981_smi_mdc_mdio_pins[] = { 36, 37, };
+static int mt7981_smi_mdc_mdio_funcs[] = { 1, 1, };
+
+static int mt7981_gbe_ext_mdc_mdio_pins[] = { 36, 37, };
+static int mt7981_gbe_ext_mdc_mdio_funcs[] = { 3, 3, };
+
+/* WF0_MODE1 */
+static int mt7981_wf0_mode1_pins[] = { 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56 };
+static int mt7981_wf0_mode1_funcs[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+
+/* WF0_MODE3 */
+static int mt7981_wf0_mode3_pins[] = { 45, 46, 47, 48, 49, 51 };
+static int mt7981_wf0_mode3_funcs[] = { 2, 2, 2, 2, 2, 2 };
+
+/* WF2G_LED */
+static int mt7981_wf2g_led0_pins[] = { 30, };
+static int mt7981_wf2g_led0_funcs[] = { 2, };
+
+static int mt7981_wf2g_led1_pins[] = { 34, };
+static int mt7981_wf2g_led1_funcs[] = { 1, };
+
+/* WF5G_LED */
+static int mt7981_wf5g_led0_pins[] = { 31, };
+static int mt7981_wf5g_led0_funcs[] = { 2, };
+
+static int mt7981_wf5g_led1_pins[] = { 35, };
+static int mt7981_wf5g_led1_funcs[] = { 1, };
+
+/* MT7531_INT */
+static int mt7981_mt7531_int_pins[] = { 38, };
+static int mt7981_mt7531_int_funcs[] = { 1, };
+
+/* ANT_SEL */
+static int mt7981_ant_sel_pins[] = { 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 34, 35 };
+static int mt7981_ant_sel_funcs[] = { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 };
+
+static const struct group_desc mt7981_groups[] = {
+ /* @GPIO(0,1): WA_AICE(2) */
+ PINCTRL_PIN_GROUP("wa_aice1", mt7981_wa_aice1),
+ /* @GPIO(0,1): WA_AICE(3) */
+ PINCTRL_PIN_GROUP("wa_aice2", mt7981_wa_aice2),
+ /* @GPIO(0,1): WM_UART(5) */
+ PINCTRL_PIN_GROUP("wm_uart_0", mt7981_wm_uart_0),
+ /* @GPIO(0,1,4,5): DFD(6) */
+ PINCTRL_PIN_GROUP("dfd", mt7981_dfd),
+ /* @GPIO(2): SYS_WATCHDOG(1) */
+ PINCTRL_PIN_GROUP("watchdog", mt7981_watchdog),
+ /* @GPIO(3): PCIE_PERESET_N(1) */
+ PINCTRL_PIN_GROUP("pcie_pereset", mt7981_pcie_pereset),
+ /* @GPIO(4,8) JTAG(1) */
+ PINCTRL_PIN_GROUP("jtag", mt7981_jtag),
+ /* @GPIO(4,8) WM_JTAG(2) */
+ PINCTRL_PIN_GROUP("wm_jtag_0", mt7981_wm_jtag_0),
+ /* @GPIO(9,13) WO0_JTAG(1) */
+ PINCTRL_PIN_GROUP("wo0_jtag_0", mt7981_wo0_jtag_0),
+ /* @GPIO(4,7) WM_JTAG(3) */
+ PINCTRL_PIN_GROUP("uart2_0", mt7981_uart2_0),
+ /* @GPIO(8) GBE_LED0(3) */
+ PINCTRL_PIN_GROUP("gbe_led0", mt7981_gbe_led0),
+ /* @GPIO(4,6) PTA_EXT(4) */
+ PINCTRL_PIN_GROUP("pta_ext_0", mt7981_pta_ext_0),
+ /* @GPIO(7) PWM2(4) */
+ PINCTRL_PIN_GROUP("pwm2", mt7981_pwm2),
+ /* @GPIO(8) NET_WO0_UART_TXD(4) */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_0", mt7981_net_wo0_uart_txd_0),
+ /* @GPIO(4,7) SPI1(5) */
+ PINCTRL_PIN_GROUP("spi1_0", mt7981_spi1_0),
+ /* @GPIO(6,7) I2C(5) */
+ PINCTRL_PIN_GROUP("i2c0_0", mt7981_i2c0_0),
+ /* @GPIO(0,1,4,5): DFD_NTRST(6) */
+ PINCTRL_PIN_GROUP("dfd_ntrst", mt7981_dfd_ntrst),
+ /* @GPIO(9,10): WM_AICE(2) */
+ PINCTRL_PIN_GROUP("wm_aice1", mt7981_wm_aice1),
+ /* @GPIO(13): PWM0(2) */
+ PINCTRL_PIN_GROUP("pwm0_0", mt7981_pwm0_0),
+ /* @GPIO(15): PWM0(1) */
+ PINCTRL_PIN_GROUP("pwm0_1", mt7981_pwm0_1),
+ /* @GPIO(14): PWM1(2) */
+ PINCTRL_PIN_GROUP("pwm1_0", mt7981_pwm1_0),
+ /* @GPIO(15): PWM1(3) */
+ PINCTRL_PIN_GROUP("pwm1_1", mt7981_pwm1_1),
+ /* @GPIO(14) NET_WO0_UART_TXD(3) */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_1", mt7981_net_wo0_uart_txd_1),
+ /* @GPIO(15) NET_WO0_UART_TXD(4) */
+ PINCTRL_PIN_GROUP("net_wo0_uart_txd_2", mt7981_net_wo0_uart_txd_2),
+ /* @GPIO(13) GBE_LED0(3) */
+ PINCTRL_PIN_GROUP("gbe_led1", mt7981_gbe_led1),
+ /* @GPIO(9,13) PCM(4) */
+ PINCTRL_PIN_GROUP("pcm", mt7981_pcm),
+ /* @GPIO(13): SYS_WATCHDOG1(5) */
+ PINCTRL_PIN_GROUP("watchdog1", mt7981_watchdog1),
+ /* @GPIO(9,13) UDI(4) */
+ PINCTRL_PIN_GROUP("udi", mt7981_udi),
+ /* @GPIO(14) DRV_VBUS(1) */
+ PINCTRL_PIN_GROUP("drv_vbus", mt7981_drv_vbus),
+ /* @GPIO(15,25): EMMC(2) */
+ PINCTRL_PIN_GROUP("emmc_45", mt7981_emmc_45),
+ /* @GPIO(16,21): SNFI(3) */
+ PINCTRL_PIN_GROUP("snfi", mt7981_snfi),
+ /* @GPIO(16,19): SPI0(1) */
+ PINCTRL_PIN_GROUP("spi0", mt7981_spi0),
+ /* @GPIO(20,21): SPI0(1) */
+ PINCTRL_PIN_GROUP("spi0_wp_hold", mt7981_spi0_wp_hold),
+ /* @GPIO(22,25) SPI1(1) */
+ PINCTRL_PIN_GROUP("spi1_1", mt7981_spi1_1),
+ /* @GPIO(26,29): SPI2(1) */
+ PINCTRL_PIN_GROUP("spi2", mt7981_spi2),
+ /* @GPIO(30,31): SPI0(1) */
+ PINCTRL_PIN_GROUP("spi2_wp_hold", mt7981_spi2_wp_hold),
+ /* @GPIO(16,19): UART1(4) */
+ PINCTRL_PIN_GROUP("uart1_0", mt7981_uart1_0),
+ /* @GPIO(26,29): UART1(2) */
+ PINCTRL_PIN_GROUP("uart1_1", mt7981_uart1_1),
+ /* @GPIO(22,25): UART1(3) */
+ PINCTRL_PIN_GROUP("uart2_1", mt7981_uart2_1),
+ /* @GPIO(22,24) PTA_EXT(4) */
+ PINCTRL_PIN_GROUP("pta_ext_1", mt7981_pta_ext_1),
+ /* @GPIO(20,21): WM_UART(4) */
+ PINCTRL_PIN_GROUP("wm_aurt_1", mt7981_wm_uart_1),
+ /* @GPIO(30,31): WM_UART(3) */
+ PINCTRL_PIN_GROUP("wm_aurt_2", mt7981_wm_uart_2),
+ /* @GPIO(20,24) WM_JTAG(5) */
+ PINCTRL_PIN_GROUP("wm_jtag_1", mt7981_wm_jtag_1),
+ /* @GPIO(25,29) WO0_JTAG(5) */
+ PINCTRL_PIN_GROUP("wo0_jtag_1", mt7981_wo0_jtag_1),
+ /* @GPIO(28,29): WA_AICE(3) */
+ PINCTRL_PIN_GROUP("wa_aice3", mt7981_wa_aice3),
+ /* @GPIO(30,31): WM_AICE(5) */
+ PINCTRL_PIN_GROUP("wm_aice2", mt7981_wm_aice2),
+ /* @GPIO(30,31): I2C(4) */
+ PINCTRL_PIN_GROUP("i2c0_1", mt7981_i2c0_1),
+ /* @GPIO(30,31): I2C(6) */
+ PINCTRL_PIN_GROUP("u2_phy_i2c", mt7981_u2_phy_i2c),
+ /* @GPIO(32,33): I2C(1) */
+ PINCTRL_PIN_GROUP("uart0", mt7981_uart0),
+ /* @GPIO(32,33): I2C(2) */
+ PINCTRL_PIN_GROUP("sgmii1_phy_i2c", mt7981_sgmii1_phy_i2c),
+ /* @GPIO(32,33): I2C(3) */
+ PINCTRL_PIN_GROUP("u3_phy_i2c", mt7981_u3_phy_i2c),
+ /* @GPIO(32,33): I2C(5) */
+ PINCTRL_PIN_GROUP("sgmii0_phy_i2c", mt7981_sgmii0_phy_i2c),
+ /* @GPIO(34): PCIE_CLK_REQ(2) */
+ PINCTRL_PIN_GROUP("pcie_clk", mt7981_pcie_clk),
+ /* @GPIO(35): PCIE_WAKE_N(2) */
+ PINCTRL_PIN_GROUP("pcie_wake", mt7981_pcie_wake),
+ /* @GPIO(36,37): I2C(2) */
+ PINCTRL_PIN_GROUP("i2c0_2", mt7981_i2c0_2),
+ /* @GPIO(36,37): MDC_MDIO(1) */
+ PINCTRL_PIN_GROUP("smi_mdc_mdio", mt7981_smi_mdc_mdio),
+ /* @GPIO(36,37): MDC_MDIO(3) */
+ PINCTRL_PIN_GROUP("gbe_ext_mdc_mdio", mt7981_gbe_ext_mdc_mdio),
+ /* @GPIO(69,85): WF0_MODE1(1) */
+ PINCTRL_PIN_GROUP("wf0_mode1", mt7981_wf0_mode1),
+ /* @GPIO(74,80): WF0_MODE3(3) */
+ PINCTRL_PIN_GROUP("wf0_mode3", mt7981_wf0_mode3),
+ /* @GPIO(30): WF2G_LED(2) */
+ PINCTRL_PIN_GROUP("wf2g_led0", mt7981_wf2g_led0),
+ /* @GPIO(34): WF2G_LED(1) */
+ PINCTRL_PIN_GROUP("wf2g_led1", mt7981_wf2g_led1),
+ /* @GPIO(31): WF5G_LED(2) */
+ PINCTRL_PIN_GROUP("wf5g_led0", mt7981_wf5g_led0),
+ /* @GPIO(35): WF5G_LED(1) */
+ PINCTRL_PIN_GROUP("wf5g_led1", mt7981_wf5g_led1),
+ /* @GPIO(38): MT7531_INT(1) */
+ PINCTRL_PIN_GROUP("mt7531_int", mt7981_mt7531_int),
+ /* @GPIO(14,15,26,17,18,19,20,21,22,23,24,25,34,35): ANT_SEL(1) */
+ PINCTRL_PIN_GROUP("ant_sel", mt7981_ant_sel),
+};
+
+/* Joint those groups owning the same capability in user point of view which
+ * allows that people tend to use through the device tree.
+ */
+static const char *mt7981_wa_aice_groups[] = { "wa_aice1", "wa_aice2", "wm_aice1_1",
+ "wa_aice3", "wm_aice1_2", };
+static const char *mt7981_uart_groups[] = { "wm_uart_0", "uart2_0",
+ "net_wo0_uart_txd_0", "net_wo0_uart_txd_1", "net_wo0_uart_txd_2",
+ "uart1_0", "uart1_1", "uart2_1", "wm_aurt_1", "wm_aurt_2", "uart0", };
+static const char *mt7981_dfd_groups[] = { "dfd", "dfd_ntrst", };
+static const char *mt7981_wdt_groups[] = { "watchdog", "watchdog1", };
+static const char *mt7981_pcie_groups[] = { "pcie_pereset", "pcie_clk", "pcie_wake", };
+static const char *mt7981_jtag_groups[] = { "jtag", "wm_jtag_0", "wo0_jtag_0",
+ "wo0_jtag_1", "wm_jtag_1", };
+static const char *mt7981_led_groups[] = { "gbe_led0", "gbe_led1", "wf2g_led0",
+ "wf2g_led1", "wf5g_led0", "wf5g_led1", };
+static const char *mt7981_pta_groups[] = { "pta_ext_0", "pta_ext_1", };
+static const char *mt7981_pwm_groups[] = { "pwm2", "pwm0_0", "pwm0_1",
+ "pwm1_0", "pwm1_1", };
+static const char *mt7981_spi_groups[] = { "spi1_0", "spi0", "spi0_wp_hold", "spi1_1", "spi2",
+ "spi2_wp_hold", };
+static const char *mt7981_i2c_groups[] = { "i2c0_0", "i2c0_1", "u2_phy_i2c",
+ "sgmii1_phy_i2c", "u3_phy_i2c", "sgmii0_phy_i2c", "i2c0_2", };
+static const char *mt7981_pcm_groups[] = { "pcm", };
+static const char *mt7981_udi_groups[] = { "udi", };
+static const char *mt7981_usb_groups[] = { "drv_vbus", };
+static const char *mt7981_flash_groups[] = { "emmc_45", "snfi", };
+static const char *mt7981_ethernet_groups[] = { "smi_mdc_mdio", "gbe_ext_mdc_mdio",
+ "wf0_mode1", "wf0_mode3", "mt7531_int", };
+static const char *mt7981_ant_groups[] = { "ant_sel", };
+
+static const struct function_desc mt7981_functions[] = {
+ {"wa_aice", mt7981_wa_aice_groups, ARRAY_SIZE(mt7981_wa_aice_groups)},
+ {"dfd", mt7981_dfd_groups, ARRAY_SIZE(mt7981_dfd_groups)},
+ {"jtag", mt7981_jtag_groups, ARRAY_SIZE(mt7981_jtag_groups)},
+ {"pta", mt7981_pta_groups, ARRAY_SIZE(mt7981_pta_groups)},
+ {"pcm", mt7981_pcm_groups, ARRAY_SIZE(mt7981_pcm_groups)},
+ {"udi", mt7981_udi_groups, ARRAY_SIZE(mt7981_udi_groups)},
+ {"usb", mt7981_usb_groups, ARRAY_SIZE(mt7981_usb_groups)},
+ {"ant", mt7981_ant_groups, ARRAY_SIZE(mt7981_ant_groups)},
+ {"eth", mt7981_ethernet_groups, ARRAY_SIZE(mt7981_ethernet_groups)},
+ {"i2c", mt7981_i2c_groups, ARRAY_SIZE(mt7981_i2c_groups)},
+ {"led", mt7981_led_groups, ARRAY_SIZE(mt7981_led_groups)},
+ {"pwm", mt7981_pwm_groups, ARRAY_SIZE(mt7981_pwm_groups)},
+ {"spi", mt7981_spi_groups, ARRAY_SIZE(mt7981_spi_groups)},
+ {"uart", mt7981_uart_groups, ARRAY_SIZE(mt7981_uart_groups)},
+ {"watchdog", mt7981_wdt_groups, ARRAY_SIZE(mt7981_wdt_groups)},
+ {"flash", mt7981_flash_groups, ARRAY_SIZE(mt7981_flash_groups)},
+ {"pcie", mt7981_pcie_groups, ARRAY_SIZE(mt7981_pcie_groups)},
+};
+
+static const struct mtk_eint_hw mt7981_eint_hw = {
+ .port_mask = 7,
+ .ports = 7,
+ .ap_num = ARRAY_SIZE(mt7981_pins),
+ .db_cnt = 16,
+};
+
+static const char * const mt7981_pinctrl_register_base_names[] = {
+ "gpio", "iocfg_rt", "iocfg_rm", "iocfg_rb",
+ "iocfg_lb", "iocfg_bl", "iocfg_tm", "iocfg_tl",
+};
+
+static struct mtk_pin_soc mt7981_data = {
+ .reg_cal = mt7981_reg_cals,
+ .pins = mt7981_pins,
+ .npins = ARRAY_SIZE(mt7981_pins),
+ .grps = mt7981_groups,
+ .ngrps = ARRAY_SIZE(mt7981_groups),
+ .funcs = mt7981_functions,
+ .nfuncs = ARRAY_SIZE(mt7981_functions),
+ .eint_hw = &mt7981_eint_hw,
+ .gpio_m = 0,
+ .ies_present = false,
+ .base_names = mt7981_pinctrl_register_base_names,
+ .nbase_names = ARRAY_SIZE(mt7981_pinctrl_register_base_names),
+ .pull_type = mt7981_pull_type,
+ .bias_set_combo = mtk_pinconf_bias_set_combo,
+ .bias_get_combo = mtk_pinconf_bias_get_combo,
+ .drive_set = mtk_pinconf_drive_set_rev1,
+ .drive_get = mtk_pinconf_drive_get_rev1,
+ .adv_pull_get = mtk_pinconf_adv_pull_get,
+ .adv_pull_set = mtk_pinconf_adv_pull_set,
+};
+
+static const struct of_device_id mt7981_pinctrl_of_match[] = {
+ { .compatible = "mediatek,mt7981-pinctrl", },
+ {}
+};
+
+static int mt7981_pinctrl_probe(struct platform_device *pdev)
+{
+ return mtk_moore_pinctrl_probe(pdev, &mt7981_data);
+}
+
+static struct platform_driver mt7981_pinctrl_driver = {
+ .driver = {
+ .name = "mt7981-pinctrl",
+ .of_match_table = mt7981_pinctrl_of_match,
+ },
+ .probe = mt7981_pinctrl_probe,
+};
+
+static int __init mt7981_pinctrl_init(void)
+{
+ return platform_driver_register(&mt7981_pinctrl_driver);
+}
+arch_initcall(mt7981_pinctrl_init);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8195.c b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
index 89557c7ed2ab..09c4dcef9338 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt8195.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt8195.c
@@ -659,7 +659,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
PIN_FIELD_BASE(10, 10, 4, 0x010, 0x10, 9, 3),
PIN_FIELD_BASE(11, 11, 4, 0x000, 0x10, 24, 3),
PIN_FIELD_BASE(12, 12, 4, 0x010, 0x10, 12, 3),
- PIN_FIELD_BASE(13, 13, 4, 0x010, 0x10, 27, 3),
+ PIN_FIELD_BASE(13, 13, 4, 0x000, 0x10, 27, 3),
PIN_FIELD_BASE(14, 14, 4, 0x010, 0x10, 15, 3),
PIN_FIELD_BASE(15, 15, 4, 0x010, 0x10, 0, 3),
PIN_FIELD_BASE(16, 16, 4, 0x010, 0x10, 18, 3),
@@ -708,7 +708,7 @@ static const struct mtk_pin_field_calc mt8195_pin_drv_range[] = {
PIN_FIELD_BASE(78, 78, 3, 0x000, 0x10, 15, 3),
PIN_FIELD_BASE(79, 79, 3, 0x000, 0x10, 18, 3),
PIN_FIELD_BASE(80, 80, 3, 0x000, 0x10, 21, 3),
- PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 28, 3),
+ PIN_FIELD_BASE(81, 81, 3, 0x000, 0x10, 24, 3),
PIN_FIELD_BASE(82, 82, 3, 0x000, 0x10, 27, 3),
PIN_FIELD_BASE(83, 83, 3, 0x010, 0x10, 0, 3),
PIN_FIELD_BASE(84, 84, 3, 0x010, 0x10, 3, 3),
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 553d16703475..665dec419e7c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -906,7 +906,6 @@ static const struct gpio_chip mtk_gpio_chip = {
.set = mtk_gpio_set,
.to_irq = mtk_gpio_to_irq,
.set_config = mtk_gpio_set_config,
- .of_gpio_n_cells = 2,
};
static int mtk_eint_suspend(struct device *device)
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index 475f4172d508..33d6c3fb7908 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -640,7 +640,7 @@ static int mtk_hw_get_value_wrap(struct mtk_pinctrl *hw, unsigned int gpio, int
ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
unsigned int gpio, char *buf, unsigned int buf_len)
{
- int pinmux, pullup, pullen, len = 0, r1 = -1, r0 = -1, rsel = -1;
+ int pinmux, pullup = 0, pullen = 0, len = 0, r1 = -1, r0 = -1, rsel = -1;
const struct mtk_pin_desc *desc;
u32 try_all_type = 0;
@@ -717,7 +717,7 @@ static void mtk_pctrl_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s,
unsigned int gpio)
{
struct mtk_pinctrl *hw = pinctrl_dev_get_drvdata(pctldev);
- char buf[PIN_DBG_BUF_SZ];
+ char buf[PIN_DBG_BUF_SZ] = { 0 };
(void)mtk_pctrl_show_one_pin(hw, gpio, buf, PIN_DBG_BUF_SZ);
@@ -987,7 +987,6 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
chip->ngpio = hw->soc->npins;
- chip->of_gpio_n_cells = 2;
ret = gpiochip_add_data(chip, hw);
if (ret < 0)
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 28c3403df1b0..6b90051af206 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -446,8 +446,6 @@ out:
return ret;
}
-#include <linux/seq_file.h>
-
static void abx500_gpio_dbg_show_one(struct seq_file *s,
struct pinctrl_dev *pctldev,
struct gpio_chip *chip,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 9bc6e3922e78..9236a132c7ba 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -218,6 +218,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
char *orientation;
char debounce_value[40];
char *debounce_enable;
+ char *wake_cntrlz;
for (bank = 0; bank < gpio_dev->hwbank_num; bank++) {
unsigned int time = 0;
@@ -305,6 +306,12 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
wake_cntrl2 = " ∅";
seq_printf(s, "S4/S5 %s| ", wake_cntrl2);
+ if (pin_reg & BIT(WAKECNTRL_Z_OFF))
+ wake_cntrlz = "⏰";
+ else
+ wake_cntrlz = " ∅";
+ seq_printf(s, "Z %s| ", wake_cntrlz);
+
if (pin_reg & BIT(PULL_UP_ENABLE_OFF)) {
pull_up_enable = "+";
if (pin_reg & BIT(PULL_UP_SEL_OFF))
@@ -365,6 +372,7 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
} else {
debounce_enable = " ∅";
+ time = 0;
}
snprintf(debounce_value, sizeof(debounce_value), "%u", time * unit);
seq_printf(s, "debounce %s (🕑 %sus)| ", debounce_enable, debounce_value);
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index c8635998465d..81ae8319a1f0 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -42,6 +42,7 @@
#define OUTPUT_ENABLE_OFF 23
#define SW_CNTRL_IN_OFF 24
#define SW_CNTRL_EN_OFF 25
+#define WAKECNTRL_Z_OFF 27
#define INTERRUPT_STS_OFF 28
#define WAKE_STS_OFF 29
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 39b233f73e13..373eed8bc4be 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -1149,8 +1149,8 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
pin_desc[i].number = i;
/* Pin naming convention: P(bank_name)(bank_pin_number). */
- pin_desc[i].name = kasprintf(GFP_KERNEL, "P%c%d",
- bank + 'A', line);
+ pin_desc[i].name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "P%c%d",
+ bank + 'A', line);
group->name = group_names[i] = pin_desc[i].name;
group->pin = pin_desc[i].number;
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 1e1813d7c550..735c501e7a06 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1294,7 +1294,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
struct at91_pinctrl *info)
{
int ret = 0;
- int i, j;
+ int i, j, ngpio_chips_enabled = 0;
uint32_t *tmp;
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
@@ -1307,10 +1307,17 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
at91_pinctrl_child_count(info, np);
- if (gpio_banks < 1) {
- dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
- return -EINVAL;
- }
+ /*
+ * We need all the GPIO drivers to probe FIRST, or we will not be able
+ * to obtain references to the struct gpio_chip * for them, and we
+ * need this to proceed.
+ */
+ for (i = 0; i < MAX_GPIO_BANKS; i++)
+ if (gpio_chips[i])
+ ngpio_chips_enabled++;
+
+ if (ngpio_chips_enabled < info->nactive_banks)
+ return -EPROBE_DEFER;
ret = at91_pinctrl_mux_mask(info, np);
if (ret)
@@ -1366,7 +1373,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
{
struct at91_pinctrl *info;
struct pinctrl_pin_desc *pdesc;
- int ret, i, j, k, ngpio_chips_enabled = 0;
+ int ret, i, j, k;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1376,23 +1383,6 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
if (ret)
return ret;
- /*
- * We need all the GPIO drivers to probe FIRST, or we will not be able
- * to obtain references to the struct gpio_chip * for them, and we
- * need this to proceed.
- */
- for (i = 0; i < gpio_banks; i++)
- if (gpio_chips[i])
- ngpio_chips_enabled++;
-
- if (ngpio_chips_enabled < info->nactive_banks) {
- dev_warn(&pdev->dev,
- "All GPIO chips are not registered yet (%d/%d)\n",
- ngpio_chips_enabled, info->nactive_banks);
- devm_kfree(&pdev->dev, info);
- return -EPROBE_DEFER;
- }
-
at91_pinctrl_desc.name = dev_name(&pdev->dev);
at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
at91_pinctrl_desc.pins = pdesc =
@@ -1649,7 +1639,7 @@ static int gpio_irq_set_wake(struct irq_data *d, unsigned state)
return 0;
}
-static int at91_gpio_suspend(struct device *dev)
+static int __maybe_unused at91_gpio_suspend(struct device *dev)
{
struct at91_gpio_chip *at91_chip = dev_get_drvdata(dev);
void __iomem *pio = at91_chip->regbase;
@@ -1667,7 +1657,7 @@ static int at91_gpio_suspend(struct device *dev)
return 0;
}
-static int at91_gpio_resume(struct device *dev)
+static int __maybe_unused at91_gpio_resume(struct device *dev)
{
struct at91_gpio_chip *at91_chip = dev_get_drvdata(dev);
void __iomem *pio = at91_chip->regbase;
@@ -1885,7 +1875,7 @@ static int at91_gpio_probe(struct platform_device *pdev)
}
for (i = 0; i < chip->ngpio; i++)
- names[i] = kasprintf(GFP_KERNEL, "pio%c%d", alias_idx + 'A', i);
+ names[i] = devm_kasprintf(&pdev->dev, GFP_KERNEL, "pio%c%d", alias_idx + 'A', i);
chip->names = (const char *const *)names;
@@ -1923,7 +1913,7 @@ err:
}
static const struct dev_pm_ops at91_gpio_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(at91_gpio_suspend, at91_gpio_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(at91_gpio_suspend, at91_gpio_resume)
};
static struct platform_driver at91_gpio_driver = {
diff --git a/drivers/pinctrl/pinctrl-da850-pupd.c b/drivers/pinctrl/pinctrl-da850-pupd.c
index 5a0a1f20c843..5eb248663e17 100644
--- a/drivers/pinctrl/pinctrl-da850-pupd.c
+++ b/drivers/pinctrl/pinctrl-da850-pupd.c
@@ -173,11 +173,6 @@ static int da850_pupd_probe(struct platform_device *pdev)
return 0;
}
-static int da850_pupd_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id da850_pupd_of_match[] = {
{ .compatible = "ti,da850-pupd" },
{ }
@@ -190,7 +185,6 @@ static struct platform_driver da850_pupd_driver = {
.of_match_table = da850_pupd_of_match,
},
.probe = da850_pupd_probe,
- .remove = da850_pupd_remove,
};
module_platform_driver(da850_pupd_driver);
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index cc3546fc4610..a0423172bdd6 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -11,18 +11,19 @@
* - Pin pad configuration (pull up/down, strength)
*/
+#include <linux/gpio/driver.h>
#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/io.h>
-#include <linux/gpio/driver.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
+
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
+
#include "pinctrl-utils.h"
#define DRIVER_NAME "pinctrl-digicolor"
@@ -248,7 +249,6 @@ static int dc_gpiochip_add(struct dc_pinmap *pmap)
chip->set = dc_gpio_set;
chip->base = -1;
chip->ngpio = PINS_COUNT;
- chip->of_gpio_n_cells = 2;
spin_lock_init(&pmap->lock);
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_i2c.c b/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
index e0b001c8c08c..b635c5737e0c 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08_i2c.c
@@ -8,8 +8,9 @@
#include "pinctrl-mcp23s08.h"
-static int mcp230xx_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int mcp230xx_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct device *dev = &client->dev;
unsigned int type = id->driver_data;
struct mcp23s08 *mcp;
@@ -100,7 +101,7 @@ static struct i2c_driver mcp230xx_driver = {
.name = "mcp230xx",
.of_match_table = mcp23s08_i2c_of_match,
},
- .probe = mcp230xx_probe,
+ .probe_new = mcp230xx_probe,
.id_table = mcp230xx_id,
};
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 5eeac92f610a..0276b52f3716 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -3045,6 +3045,7 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
np_config = of_find_node_by_phandle(be32_to_cpup(phandle));
ret = pinconf_generic_parse_dt_config(np_config, NULL,
&grp->data[j].configs, &grp->data[j].nconfigs);
+ of_node_put(np_config);
if (ret)
return ret;
}
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 99c3745da456..190923757cda 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -372,6 +372,8 @@ static int pcs_set_mux(struct pinctrl_dev *pctldev, unsigned fselector,
if (!pcs->fmask)
return 0;
function = pinmux_generic_get_function(pctldev, fselector);
+ if (!function)
+ return -EINVAL;
func = function->data;
if (!func)
return -EINVAL;
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index a87ea3b95cf4..0b5ff99641e1 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -1094,9 +1094,9 @@ static const struct regmap_config sx150x_regmap_config = {
.volatile_reg = sx150x_reg_volatile,
};
-static int sx150x_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int sx150x_probe(struct i2c_client *client)
{
+ const struct i2c_device_id *id = i2c_client_get_device_id(client);
static const u32 i2c_funcs = I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WRITE_WORD_DATA;
struct device *dev = &client->dev;
@@ -1252,7 +1252,7 @@ static struct i2c_driver sx150x_driver = {
.name = "sx150x-pinctrl",
.of_match_table = of_match_ptr(sx150x_of_match),
},
- .probe = sx150x_probe,
+ .probe_new = sx150x_probe,
.id_table = sx150x_id,
};
diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c
index 590bbbf619af..7a5ff955877c 100644
--- a/drivers/pinctrl/pinctrl-thunderbay.c
+++ b/drivers/pinctrl/pinctrl-thunderbay.c
@@ -1278,19 +1278,12 @@ static int thunderbay_pinctrl_probe(struct platform_device *pdev)
return 0;
}
-static int thunderbay_pinctrl_remove(struct platform_device *pdev)
-{
- /* thunderbay_pinctrl_remove function to clear the assigned memory */
- return 0;
-}
-
static struct platform_driver thunderbay_pinctrl_driver = {
.driver = {
.name = "thunderbay-pinctrl",
.of_match_table = thunderbay_pinctrl_match,
},
.probe = thunderbay_pinctrl_probe,
- .remove = thunderbay_pinctrl_remove,
};
builtin_platform_driver(thunderbay_pinctrl_driver);
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 6bd7ac37a0e0..021382632608 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -744,10 +744,8 @@ static ssize_t pinmux_select(struct file *file, const char __user *user_buf,
}
ret = pinctrl_get_group_selector(pctldev, gname);
- if (ret < 0) {
- dev_err(pctldev->dev, "failed to get group selector for %s", gname);
+ if (ret < 0)
goto exit_free_buf;
- }
gsel = ret;
ret = pmxops->set_mux(pctldev, fsel, gsel);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 1378ddca084f..62d4810cfee1 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -48,6 +48,16 @@ config PINCTRL_IPQ8064
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm TLMM block found in the Qualcomm IPQ8064 platform.
+config PINCTRL_IPQ5332
+ tristate "Qualcomm Technologies Inc IPQ5332 pin controller driver"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc IPQ5332 platform.
+
config PINCTRL_IPQ8074
tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver"
depends on OF
@@ -248,6 +258,25 @@ config PINCTRL_QCOM_SSBI_PMIC
which are using SSBI for communication with SoC. Example PMIC's
devices are pm8058 and pm8921.
+config PINCTRL_QDU1000
+ tristate "Qualcomm Tehcnologies Inc QDU1000/QRU1000 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf, and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc QDU1000 and QRU1000 platforms.
+
+config PINCTRL_SA8775P
+ tristate "Qualcomm Technologies Inc SA8775P pin controller driver"
+ depends on OF
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux and pinconf driver for the Qualcomm
+ TLMM block found on the Qualcomm SA8775P platforms.
+
config PINCTRL_SC7180
tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
depends on OF
@@ -457,6 +486,27 @@ config PINCTRL_SC8280XP_LPASS_LPI
Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
(Low Power Island) found on the Qualcomm Technologies Inc SC8280XP platform.
+config PINCTRL_SM8550
+ tristate "Qualcomm Technologies Inc SM8550 pin controller driver"
+ depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM8550 platform.
+
+config PINCTRL_SM8550_LPASS_LPI
+ tristate "Qualcomm Technologies Inc SM8550 LPASS LPI pin controller driver"
+ depends on GPIOLIB
+ depends on ARM64 || COMPILE_TEST
+ depends on PINCTRL_LPASS_LPI
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI
+ (Low Power Island) found on the Qualcomm Technologies Inc SM8550
+ platform.
+
config PINCTRL_LPASS_LPI
tristate "Qualcomm Technologies Inc LPASS LPI pin controller driver"
select PINMUX
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index a5c40f552e5c..bea53b52275b 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_PINCTRL_APQ8064) += pinctrl-apq8064.o
obj-$(CONFIG_PINCTRL_APQ8084) += pinctrl-apq8084.o
obj-$(CONFIG_PINCTRL_IPQ4019) += pinctrl-ipq4019.o
obj-$(CONFIG_PINCTRL_IPQ8064) += pinctrl-ipq8064.o
+obj-$(CONFIG_PINCTRL_IPQ5332) += pinctrl-ipq5332.o
obj-$(CONFIG_PINCTRL_IPQ8074) += pinctrl-ipq8074.o
obj-$(CONFIG_PINCTRL_IPQ6018) += pinctrl-ipq6018.o
obj-$(CONFIG_PINCTRL_MSM8226) += pinctrl-msm8226.o
@@ -27,6 +28,8 @@ obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
+obj-$(CONFIG_PINCTRL_QDU1000) += pinctrl-qdu1000.o
+obj-$(CONFIG_PINCTRL_SA8775P) += pinctrl-sa8775p.o
obj-$(CONFIG_PINCTRL_SC7180) += pinctrl-sc7180.o
obj-$(CONFIG_PINCTRL_SC7280) += pinctrl-sc7280.o
obj-$(CONFIG_PINCTRL_SC7280_LPASS_LPI) += pinctrl-sc7280-lpass-lpi.o
@@ -47,5 +50,7 @@ obj-$(CONFIG_PINCTRL_SM8250_LPASS_LPI) += pinctrl-sm8250-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SM8350) += pinctrl-sm8350.o
obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o
obj-$(CONFIG_PINCTRL_SM8450_LPASS_LPI) += pinctrl-sm8450-lpass-lpi.o
+obj-$(CONFIG_PINCTRL_SM8550) += pinctrl-sm8550.o
+obj-$(CONFIG_PINCTRL_SM8550_LPASS_LPI) += pinctrl-sm8550-lpass-lpi.o
obj-$(CONFIG_PINCTRL_SC8280XP_LPASS_LPI) += pinctrl-sc8280xp-lpass-lpi.o
obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5332.c b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
new file mode 100644
index 000000000000..e78d11292f42
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
@@ -0,0 +1,861 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/*
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+static const struct pinctrl_pin_desc ipq5332_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+
+enum ipq5332_functions {
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_atest_tic,
+ msm_mux_audio_pri,
+ msm_mux_audio_pri0,
+ msm_mux_audio_pri1,
+ msm_mux_audio_sec,
+ msm_mux_audio_sec0,
+ msm_mux_audio_sec1,
+ msm_mux_blsp0_i2c,
+ msm_mux_blsp0_spi,
+ msm_mux_blsp0_uart0,
+ msm_mux_blsp0_uart1,
+ msm_mux_blsp1_i2c0,
+ msm_mux_blsp1_i2c1,
+ msm_mux_blsp1_spi0,
+ msm_mux_blsp1_spi1,
+ msm_mux_blsp1_uart0,
+ msm_mux_blsp1_uart1,
+ msm_mux_blsp1_uart2,
+ msm_mux_blsp2_i2c0,
+ msm_mux_blsp2_i2c1,
+ msm_mux_blsp2_spi,
+ msm_mux_blsp2_spi0,
+ msm_mux_blsp2_spi1,
+ msm_mux_core_voltage,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_cri_trng2,
+ msm_mux_cri_trng3,
+ msm_mux_cxc_clk,
+ msm_mux_cxc_data,
+ msm_mux_dbg_out,
+ msm_mux_gcc_plltest,
+ msm_mux_gcc_tlmm,
+ msm_mux_gpio,
+ msm_mux_lock_det,
+ msm_mux_mac0,
+ msm_mux_mac1,
+ msm_mux_mdc0,
+ msm_mux_mdc1,
+ msm_mux_mdio0,
+ msm_mux_mdio1,
+ msm_mux_pc,
+ msm_mux_pcie0_clk,
+ msm_mux_pcie0_wake,
+ msm_mux_pcie1_clk,
+ msm_mux_pcie1_wake,
+ msm_mux_pcie2_clk,
+ msm_mux_pcie2_wake,
+ msm_mux_pll_test,
+ msm_mux_prng_rosc0,
+ msm_mux_prng_rosc1,
+ msm_mux_prng_rosc2,
+ msm_mux_prng_rosc3,
+ msm_mux_pta,
+ msm_mux_pwm0,
+ msm_mux_pwm1,
+ msm_mux_pwm2,
+ msm_mux_pwm3,
+ msm_mux_qdss_cti_trig_in_a0,
+ msm_mux_qdss_cti_trig_in_a1,
+ msm_mux_qdss_cti_trig_in_b0,
+ msm_mux_qdss_cti_trig_in_b1,
+ msm_mux_qdss_cti_trig_out_a0,
+ msm_mux_qdss_cti_trig_out_a1,
+ msm_mux_qdss_cti_trig_out_b0,
+ msm_mux_qdss_cti_trig_out_b1,
+ msm_mux_qdss_traceclk_a,
+ msm_mux_qdss_traceclk_b,
+ msm_mux_qdss_tracectl_a,
+ msm_mux_qdss_tracectl_b,
+ msm_mux_qdss_tracedata_a,
+ msm_mux_qdss_tracedata_b,
+ msm_mux_qspi_data,
+ msm_mux_qspi_clk,
+ msm_mux_qspi_cs,
+ msm_mux_resout,
+ msm_mux_rx0,
+ msm_mux_rx1,
+ msm_mux_sdc_data,
+ msm_mux_sdc_clk,
+ msm_mux_sdc_cmd,
+ msm_mux_tsens_max,
+ msm_mux_wci_txd,
+ msm_mux_wci_rxd,
+ msm_mux_wsi_clk,
+ msm_mux_wsi_clk3,
+ msm_mux_wsi_data,
+ msm_mux_wsi_data3,
+ msm_mux_wsis_reset,
+ msm_mux_xfem,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio46",
+};
+
+static const char * const atest_char0_groups[] = {
+ "gpio0",
+};
+
+static const char * const atest_char1_groups[] = {
+ "gpio1",
+};
+
+static const char * const atest_char2_groups[] = {
+ "gpio2",
+};
+
+static const char * const atest_char3_groups[] = {
+ "gpio3",
+};
+
+static const char * const atest_tic_groups[] = {
+ "gpio9",
+};
+
+static const char * const audio_pri_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32",
+};
+
+static const char * const audio_pri0_groups[] = {
+ "gpio34", "gpio34",
+};
+
+static const char * const audio_pri1_groups[] = {
+ "gpio43", "gpio43",
+};
+
+static const char * const audio_sec_groups[] = {
+ "gpio33", "gpio34", "gpio35", "gpio36",
+};
+
+static const char * const audio_sec0_groups[] = {
+ "gpio30", "gpio30",
+};
+
+static const char * const audio_sec1_groups[] = {
+ "gpio45", "gpio45",
+};
+
+static const char * const blsp0_i2c_groups[] = {
+ "gpio16", "gpio17",
+};
+
+static const char * const blsp0_spi_groups[] = {
+ "gpio14", "gpio15", "gpio16", "gpio17",
+};
+
+static const char * const blsp0_uart0_groups[] = {
+ "gpio18", "gpio19",
+};
+
+static const char * const blsp0_uart1_groups[] = {
+ "gpio27", "gpio28",
+};
+
+static const char * const blsp1_i2c0_groups[] = {
+ "gpio29", "gpio30",
+};
+
+static const char * const blsp1_i2c1_groups[] = {
+ "gpio40", "gpio41",
+};
+
+static const char * const blsp1_spi0_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32",
+};
+
+static const char * const blsp1_spi1_groups[] = {
+ "gpio25", "gpio26", "gpio27", "gpio28",
+};
+
+static const char * const blsp1_uart0_groups[] = {
+ "gpio14", "gpio15", "gpio16", "gpio17",
+};
+
+static const char * const blsp1_uart1_groups[] = {
+ "gpio25", "gpio26", "gpio27", "gpio28",
+};
+
+static const char * const blsp1_uart2_groups[] = {
+ "gpio33", "gpio34", "gpio35", "gpio36",
+};
+
+static const char * const blsp2_i2c0_groups[] = {
+ "gpio43", "gpio45",
+};
+
+static const char * const blsp2_i2c1_groups[] = {
+ "gpio33", "gpio34",
+};
+
+static const char * const blsp2_spi_groups[] = {
+ "gpio37",
+};
+
+static const char * const blsp2_spi0_groups[] = {
+ "gpio33", "gpio34", "gpio35", "gpio36",
+};
+
+static const char * const blsp2_spi1_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio52",
+};
+
+static const char * const core_voltage_groups[] = {
+ "gpio21", "gpio23",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio17",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio18",
+};
+
+static const char * const cri_trng2_groups[] = {
+ "gpio19",
+};
+
+static const char * const cri_trng3_groups[] = {
+ "gpio20",
+};
+
+static const char * const cxc_clk_groups[] = {
+ "gpio49",
+};
+
+static const char * const cxc_data_groups[] = {
+ "gpio50",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio48",
+};
+
+static const char * const gcc_plltest_groups[] = {
+ "gpio43", "gpio45",
+};
+
+static const char * const gcc_tlmm_groups[] = {
+ "gpio44",
+};
+
+static const char * const lock_det_groups[] = {
+ "gpio51",
+};
+
+static const char * const mac0_groups[] = {
+ "gpio18",
+};
+
+static const char * const mac1_groups[] = {
+ "gpio19",
+};
+
+static const char * const mdc0_groups[] = {
+ "gpio25",
+};
+
+static const char * const mdc1_groups[] = {
+ "gpio27",
+};
+
+static const char * const mdio0_groups[] = {
+ "gpio26",
+};
+
+static const char * const mdio1_groups[] = {
+ "gpio28",
+};
+
+static const char * const pc_groups[] = {
+ "gpio35",
+};
+
+static const char * const pcie0_clk_groups[] = {
+ "gpio37",
+};
+
+static const char * const pcie0_wake_groups[] = {
+ "gpio39",
+};
+
+static const char * const pcie1_clk_groups[] = {
+ "gpio46",
+};
+
+static const char * const pcie1_wake_groups[] = {
+ "gpio48",
+};
+
+static const char * const pcie2_clk_groups[] = {
+ "gpio43",
+};
+
+static const char * const pcie2_wake_groups[] = {
+ "gpio45",
+};
+
+static const char * const pll_test_groups[] = {
+ "gpio49",
+};
+
+static const char * const prng_rosc0_groups[] = {
+ "gpio22",
+};
+
+static const char * const prng_rosc1_groups[] = {
+ "gpio24",
+};
+
+static const char * const prng_rosc2_groups[] = {
+ "gpio25",
+};
+
+static const char * const prng_rosc3_groups[] = {
+ "gpio26",
+};
+
+static const char * const pta_groups[] = {
+ "gpio49", "gpio50", "gpio51",
+};
+
+static const char * const pwm0_groups[] = {
+ "gpio43", "gpio44", "gpio45", "gpio46",
+};
+
+static const char * const pwm1_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32",
+};
+
+static const char * const pwm2_groups[] = {
+ "gpio25", "gpio26", "gpio27", "gpio28",
+};
+
+static const char * const pwm3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const qdss_cti_trig_in_a0_groups[] = {
+ "gpio5",
+};
+
+static const char * const qdss_cti_trig_in_a1_groups[] = {
+ "gpio7",
+};
+
+static const char * const qdss_cti_trig_in_b0_groups[] = {
+ "gpio47",
+};
+
+static const char * const qdss_cti_trig_in_b1_groups[] = {
+ "gpio49",
+};
+
+static const char * const qdss_cti_trig_out_a0_groups[] = {
+ "gpio4",
+};
+
+static const char * const qdss_cti_trig_out_a1_groups[] = {
+ "gpio6",
+};
+
+static const char * const qdss_cti_trig_out_b0_groups[] = {
+ "gpio46",
+};
+
+static const char * const qdss_cti_trig_out_b1_groups[] = {
+ "gpio48",
+};
+
+static const char * const qdss_traceclk_a_groups[] = {
+ "gpio8",
+};
+
+static const char * const qdss_traceclk_b_groups[] = {
+ "gpio45",
+};
+
+static const char * const qdss_tracectl_a_groups[] = {
+ "gpio9",
+};
+
+static const char * const qdss_tracectl_b_groups[] = {
+ "gpio44",
+};
+
+static const char * const qdss_tracedata_a_groups[] = {
+ "gpio10", "gpio11", "gpio12", "gpio13", "gpio14", "gpio15", "gpio16",
+ "gpio17", "gpio18", "gpio19", "gpio20", "gpio22", "gpio24", "gpio25",
+ "gpio26", "gpio27",
+};
+
+static const char * const qdss_tracedata_b_groups[] = {
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio52",
+};
+
+static const char * const qspi_clk_groups[] = {
+ "gpio13",
+};
+
+static const char * const qspi_cs_groups[] = {
+ "gpio12",
+};
+
+static const char * const qspi_data_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const resout_groups[] = {
+ "gpio20",
+};
+
+static const char * const rx0_groups[] = {
+ "gpio48",
+};
+
+static const char * const rx1_groups[] = {
+ "gpio45",
+};
+
+static const char * const sdc_clk_groups[] = {
+ "gpio13",
+};
+
+static const char * const sdc_cmd_groups[] = {
+ "gpio12",
+};
+
+static const char * const sdc_data_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const tsens_max_groups[] = {
+ "gpio28",
+};
+
+static const char * const wci_txd_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio36", "gpio43", "gpio45",
+};
+
+static const char * const wci_rxd_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio35", "gpio36", "gpio43", "gpio45",
+};
+
+static const char * const wsi_clk_groups[] = {
+ "gpio40", "gpio42",
+};
+
+static const char * const wsi_clk3_groups[] = {
+ "gpio43",
+};
+
+static const char * const wsi_data_groups[] = {
+ "gpio41", "gpio52",
+};
+
+static const char * const wsi_data3_groups[] = {
+ "gpio44",
+};
+
+static const char * const wsis_reset_groups[] = {
+ "gpio41",
+};
+
+static const char * const xfem_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const struct msm_function ipq5332_functions[] = {
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(atest_tic),
+ FUNCTION(audio_pri),
+ FUNCTION(audio_pri0),
+ FUNCTION(audio_pri1),
+ FUNCTION(audio_sec),
+ FUNCTION(audio_sec0),
+ FUNCTION(audio_sec1),
+ FUNCTION(blsp0_i2c),
+ FUNCTION(blsp0_spi),
+ FUNCTION(blsp0_uart0),
+ FUNCTION(blsp0_uart1),
+ FUNCTION(blsp1_i2c0),
+ FUNCTION(blsp1_i2c1),
+ FUNCTION(blsp1_spi0),
+ FUNCTION(blsp1_spi1),
+ FUNCTION(blsp1_uart0),
+ FUNCTION(blsp1_uart1),
+ FUNCTION(blsp1_uart2),
+ FUNCTION(blsp2_i2c0),
+ FUNCTION(blsp2_i2c1),
+ FUNCTION(blsp2_spi),
+ FUNCTION(blsp2_spi0),
+ FUNCTION(blsp2_spi1),
+ FUNCTION(core_voltage),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(cri_trng2),
+ FUNCTION(cri_trng3),
+ FUNCTION(cxc_clk),
+ FUNCTION(cxc_data),
+ FUNCTION(dbg_out),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gcc_tlmm),
+ FUNCTION(gpio),
+ FUNCTION(lock_det),
+ FUNCTION(mac0),
+ FUNCTION(mac1),
+ FUNCTION(mdc0),
+ FUNCTION(mdc1),
+ FUNCTION(mdio0),
+ FUNCTION(mdio1),
+ FUNCTION(pc),
+ FUNCTION(pcie0_clk),
+ FUNCTION(pcie0_wake),
+ FUNCTION(pcie1_clk),
+ FUNCTION(pcie1_wake),
+ FUNCTION(pcie2_clk),
+ FUNCTION(pcie2_wake),
+ FUNCTION(pll_test),
+ FUNCTION(prng_rosc0),
+ FUNCTION(prng_rosc1),
+ FUNCTION(prng_rosc2),
+ FUNCTION(prng_rosc3),
+ FUNCTION(pta),
+ FUNCTION(pwm0),
+ FUNCTION(pwm1),
+ FUNCTION(pwm2),
+ FUNCTION(pwm3),
+ FUNCTION(qdss_cti_trig_in_a0),
+ FUNCTION(qdss_cti_trig_in_a1),
+ FUNCTION(qdss_cti_trig_in_b0),
+ FUNCTION(qdss_cti_trig_in_b1),
+ FUNCTION(qdss_cti_trig_out_a0),
+ FUNCTION(qdss_cti_trig_out_a1),
+ FUNCTION(qdss_cti_trig_out_b0),
+ FUNCTION(qdss_cti_trig_out_b1),
+ FUNCTION(qdss_traceclk_a),
+ FUNCTION(qdss_traceclk_b),
+ FUNCTION(qdss_tracectl_a),
+ FUNCTION(qdss_tracectl_b),
+ FUNCTION(qdss_tracedata_a),
+ FUNCTION(qdss_tracedata_b),
+ FUNCTION(qspi_data),
+ FUNCTION(qspi_clk),
+ FUNCTION(qspi_cs),
+ FUNCTION(resout),
+ FUNCTION(rx0),
+ FUNCTION(rx1),
+ FUNCTION(sdc_data),
+ FUNCTION(sdc_clk),
+ FUNCTION(sdc_cmd),
+ FUNCTION(tsens_max),
+ FUNCTION(wci_txd),
+ FUNCTION(wci_rxd),
+ FUNCTION(wsi_clk),
+ FUNCTION(wsi_clk3),
+ FUNCTION(wsi_data),
+ FUNCTION(wsi_data3),
+ FUNCTION(wsis_reset),
+ FUNCTION(xfem),
+};
+
+static const struct msm_pingroup ipq5332_groups[] = {
+ PINGROUP(0, atest_char0, wci_txd, wci_rxd, xfem, _, _, _, _, _),
+ PINGROUP(1, atest_char1, wci_txd, wci_rxd, xfem, _, _, _, _, _),
+ PINGROUP(2, atest_char2, wci_txd, wci_rxd, xfem, _, _, _, _, _),
+ PINGROUP(3, atest_char3, wci_txd, wci_rxd, xfem, _, _, _, _, _),
+ PINGROUP(4, qdss_cti_trig_out_a0, wci_txd, wci_rxd, xfem, _, _, _, _, _),
+ PINGROUP(5, qdss_cti_trig_in_a0, wci_txd, wci_rxd, xfem, _, _, _, _, _),
+ PINGROUP(6, qdss_cti_trig_out_a1, wci_txd, wci_rxd, xfem, _, _, _, _, _),
+ PINGROUP(7, qdss_cti_trig_in_a1, wci_txd, wci_rxd, xfem, _, _, _, _, _),
+ PINGROUP(8, sdc_data, qspi_data, pwm3, qdss_traceclk_a, _, _, _, _, _),
+ PINGROUP(9, sdc_data, qspi_data, pwm3, qdss_tracectl_a, _, atest_tic, _, _, _),
+ PINGROUP(10, sdc_data, qspi_data, pwm3, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(11, sdc_data, qspi_data, pwm3, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(12, sdc_cmd, qspi_cs, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(13, sdc_clk, qspi_clk, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(14, blsp0_spi, blsp1_uart0, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(15, blsp0_spi, blsp1_uart0, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(16, blsp0_spi, blsp0_i2c, blsp1_uart0, _, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(17, blsp0_spi, blsp0_i2c, blsp1_uart0, _, cri_trng0, qdss_tracedata_a, _, _, _),
+ PINGROUP(18, blsp0_uart0, mac0, _, cri_trng1, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(19, blsp0_uart0, mac1, _, cri_trng2, qdss_tracedata_a, _, _, _, _),
+ PINGROUP(20, resout, _, cri_trng3, qdss_tracedata_a, _, _, _, _, _),
+ PINGROUP(21, core_voltage, _, _, _, _, _, _, _, _),
+ PINGROUP(22, _, prng_rosc0, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(23, core_voltage, _, _, _, _, _, _, _, _),
+ PINGROUP(24, _, prng_rosc1, qdss_tracedata_a, _, _, _, _, _, _),
+ PINGROUP(25, mdc0, blsp1_uart1, blsp1_spi1, pwm2, _, _, prng_rosc2, qdss_tracedata_a, _),
+ PINGROUP(26, mdio0, blsp1_uart1, blsp1_spi1, pwm2, _, _, prng_rosc3, qdss_tracedata_a, _),
+ PINGROUP(27, mdc1, blsp0_uart1, blsp1_uart1, blsp1_spi1, pwm2, _, _, qdss_tracedata_a, _),
+ PINGROUP(28, mdio1, blsp0_uart1, blsp1_uart1, blsp1_spi1, pwm2, _, tsens_max, _, _),
+ PINGROUP(29, audio_pri, blsp1_spi0, blsp1_i2c0, pwm1, _, qdss_tracedata_b, _, _, _),
+ PINGROUP(30, audio_pri, blsp1_spi0, blsp1_i2c0, pwm1, audio_sec0, audio_sec0, _, qdss_tracedata_b, _),
+ PINGROUP(31, audio_pri, blsp1_spi0, pwm1, _, qdss_tracedata_b, _, _, _, _),
+ PINGROUP(32, audio_pri, blsp1_spi0, pwm1, _, qdss_tracedata_b, _, _, _, _),
+ PINGROUP(33, audio_sec, blsp1_uart2, blsp2_i2c1, blsp2_spi0, _, qdss_tracedata_b, _, _, _),
+ PINGROUP(34, audio_sec, blsp1_uart2, blsp2_i2c1, blsp2_spi0, audio_pri0, audio_pri0, _, qdss_tracedata_b, _),
+ PINGROUP(35, audio_sec, blsp1_uart2, pc, wci_rxd, blsp2_spi0, _, qdss_tracedata_b, _, _),
+ PINGROUP(36, audio_sec, blsp1_uart2, wci_txd, wci_rxd, blsp2_spi0, _, qdss_tracedata_b, _, _),
+ PINGROUP(37, pcie0_clk, blsp2_spi, _, qdss_tracedata_b, _, _, _, _, _),
+ PINGROUP(38, _, qdss_tracedata_b, _, _, _, _, _, _, _),
+ PINGROUP(39, pcie0_wake, _, qdss_tracedata_b, _, _, _, _, _, _),
+ PINGROUP(40, wsi_clk, blsp1_i2c1, blsp2_spi1, _, _, qdss_tracedata_b, _, _, _),
+ PINGROUP(41, wsi_data, blsp1_i2c1, blsp2_spi1, _, _, qdss_tracedata_b, _, wsis_reset, _),
+ PINGROUP(42, wsi_clk, blsp2_spi1, _, qdss_tracedata_b, _, _, _, _, _),
+ PINGROUP(43, pcie2_clk, wci_txd, wci_rxd, blsp2_i2c0, pwm0, audio_pri1, audio_pri1, _, gcc_plltest),
+ PINGROUP(44, pwm0, _, gcc_tlmm, qdss_tracectl_b, _, wsi_data3, _, _, _),
+ PINGROUP(45, pcie2_wake, wci_txd, wci_rxd, blsp2_i2c0, rx1, pwm0, audio_sec1, audio_sec1, _),
+ PINGROUP(46, pcie1_clk, atest_char, pwm0, _, qdss_cti_trig_out_b0, _, _, _, _),
+ PINGROUP(47, _, qdss_cti_trig_in_b0, _, _, _, _, _, _, _),
+ PINGROUP(48, pcie1_wake, rx0, dbg_out, qdss_cti_trig_out_b1, _, _, _, _, _),
+ PINGROUP(49, pta, cxc_clk, pll_test, _, qdss_cti_trig_in_b1, _, _, _, _),
+ PINGROUP(50, pta, cxc_data, _, _, _, _, _, _, _),
+ PINGROUP(51, pta, lock_det, _, _, _, _, _, _, _),
+ PINGROUP(52, wsi_data, blsp2_spi1, _, qdss_tracedata_b, _, _, _, _, _),
+};
+
+static const struct msm_pinctrl_soc_data ipq5332_pinctrl = {
+ .pins = ipq5332_pins,
+ .npins = ARRAY_SIZE(ipq5332_pins),
+ .functions = ipq5332_functions,
+ .nfunctions = ARRAY_SIZE(ipq5332_functions),
+ .groups = ipq5332_groups,
+ .ngroups = ARRAY_SIZE(ipq5332_groups),
+ .ngpios = 53,
+};
+
+static int ipq5332_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &ipq5332_pinctrl);
+}
+
+static const struct of_device_id ipq5332_pinctrl_of_match[] = {
+ { .compatible = "qcom,ipq5332-tlmm", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, ipq5332_pinctrl_of_match);
+
+static struct platform_driver ipq5332_pinctrl_driver = {
+ .driver = {
+ .name = "ipq5332-tlmm",
+ .of_match_table = ipq5332_pinctrl_of_match,
+ },
+ .probe = ipq5332_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init ipq5332_pinctrl_init(void)
+{
+ return platform_driver_register(&ipq5332_pinctrl_driver);
+}
+arch_initcall(ipq5332_pinctrl_init);
+
+static void __exit ipq5332_pinctrl_exit(void)
+{
+ platform_driver_unregister(&ipq5332_pinctrl_driver);
+}
+module_exit(ipq5332_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI IPQ5332 TLMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
index 3dc670faa59e..87920257bb73 100644
--- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
@@ -430,7 +430,6 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
pctrl->chip.base = -1;
pctrl->chip.ngpio = data->npins;
pctrl->chip.label = dev_name(dev);
- pctrl->chip.of_gpio_n_cells = 2;
pctrl->chip.can_sleep = false;
mutex_init(&pctrl->slew_access_lock);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 47e9a8b0d474..a69f93e74435 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -310,6 +310,8 @@ static int msm_config_reg(struct msm_pinctrl *pctrl,
case PIN_CONFIG_BIAS_PULL_UP:
*bit = g->pull_bit;
*mask = 3;
+ if (g->i2c_pull_bit)
+ *mask |= BIT(g->i2c_pull_bit) >> *bit;
break;
case PIN_CONFIG_DRIVE_OPEN_DRAIN:
*bit = g->od_bit;
@@ -336,6 +338,7 @@ static int msm_config_reg(struct msm_pinctrl *pctrl,
#define MSM_KEEPER 2
#define MSM_PULL_UP_NO_KEEPER 2
#define MSM_PULL_UP 3
+#define MSM_I2C_STRONG_PULL_UP 2200
static unsigned msm_regval_to_drive(u32 val)
{
@@ -387,6 +390,8 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_PULL_UP:
if (pctrl->soc->pull_no_keeper)
arg = arg == MSM_PULL_UP_NO_KEEPER;
+ else if (arg & BIT(g->i2c_pull_bit))
+ arg = MSM_I2C_STRONG_PULL_UP;
else
arg = arg == MSM_PULL_UP;
if (!arg)
@@ -467,6 +472,8 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev,
case PIN_CONFIG_BIAS_PULL_UP:
if (pctrl->soc->pull_no_keeper)
arg = MSM_PULL_UP_NO_KEEPER;
+ else if (g->i2c_pull_bit && arg == MSM_I2C_STRONG_PULL_UP)
+ arg = BIT(g->i2c_pull_bit) | MSM_PULL_UP;
else
arg = MSM_PULL_UP;
break;
@@ -1350,7 +1357,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
girq = &chip->irq;
gpio_irq_chip_set_chip(girq, &msm_gpio_irq_chip);
girq->parent_handler = msm_gpio_irq_handler;
- girq->fwnode = pctrl->dev->fwnode;
+ girq->fwnode = dev_fwnode(pctrl->dev);
girq->num_parents = 1;
girq->parents = devm_kcalloc(pctrl->dev, 1, sizeof(*girq->parents),
GFP_KERNEL);
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 05a1209bf9ae..985eceda2517 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -80,6 +80,7 @@ struct msm_pingroup {
unsigned pull_bit:5;
unsigned drv_bit:5;
+ unsigned i2c_pull_bit:5;
unsigned od_bit:5;
unsigned egpio_enable:5;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8226.c b/drivers/pinctrl/qcom/pinctrl-msm8226.c
index fca0645e8008..0f05725e0a21 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8226.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8226.c
@@ -362,6 +362,8 @@ enum msm8226_functions {
MSM_MUX_cam_mclk0,
MSM_MUX_cam_mclk1,
MSM_MUX_cci_i2c0,
+ MSM_MUX_gp0_clk,
+ MSM_MUX_gp1_clk,
MSM_MUX_gpio,
MSM_MUX_sdc3,
MSM_MUX_wlan,
@@ -447,6 +449,9 @@ static const char * const cci_i2c0_groups[] = { "gpio29", "gpio30" };
static const char * const cam_mclk0_groups[] = { "gpio26" };
static const char * const cam_mclk1_groups[] = { "gpio27" };
+static const char * const gp0_clk_groups[] = { "gpio33" };
+static const char * const gp1_clk_groups[] = { "gpio34" };
+
static const char * const sdc3_groups[] = {
"gpio39", "gpio40", "gpio41", "gpio42", "gpio43", "gpio44"
};
@@ -480,6 +485,8 @@ static const struct msm_function msm8226_functions[] = {
FUNCTION(cam_mclk0),
FUNCTION(cam_mclk1),
FUNCTION(cci_i2c0),
+ FUNCTION(gp0_clk),
+ FUNCTION(gp1_clk),
FUNCTION(gpio),
FUNCTION(sdc3),
FUNCTION(wlan),
@@ -519,8 +526,8 @@ static const struct msm_pingroup msm8226_groups[] = {
PINGROUP(30, cci_i2c0, NA, NA, NA, NA, NA, NA),
PINGROUP(31, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(32, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(33, NA, NA, NA, NA, NA, NA, NA),
- PINGROUP(34, NA, NA, NA, NA, NA, NA, NA),
+ PINGROUP(33, NA, NA, gp0_clk, NA, NA, NA, NA),
+ PINGROUP(34, NA, NA, gp1_clk, NA, NA, NA, NA),
PINGROUP(35, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(36, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(37, NA, NA, NA, NA, NA, NA, NA),
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
index ec43edf9b660..e11d84584719 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -733,7 +733,7 @@ static const char * const codec_int2_groups[] = {
"gpio74",
};
static const char * const wcss_bt_groups[] = {
- "gpio39", "gpio47", "gpio88",
+ "gpio39", "gpio47", "gpio48",
};
static const char * const sdc3_groups[] = {
"gpio39", "gpio40", "gpio41",
@@ -958,9 +958,9 @@ static const struct msm_pingroup msm8976_groups[] = {
PINGROUP(37, NA, NA, NA, qdss_tracedata_b, NA, NA, NA, NA, NA),
PINGROUP(38, NA, NA, NA, NA, NA, NA, NA, qdss_tracedata_b, NA),
PINGROUP(39, wcss_bt, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
- PINGROUP(40, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
- PINGROUP(41, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
- PINGROUP(42, wcss_wlan, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(40, wcss_wlan2, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(41, wcss_wlan1, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
+ PINGROUP(42, wcss_wlan0, sdc3, NA, qdss_tracedata_a, NA, NA, NA, NA, NA),
PINGROUP(43, wcss_wlan, sdc3, NA, NA, qdss_tracedata_a, NA, NA, NA, NA),
PINGROUP(44, wcss_wlan, sdc3, NA, NA, NA, NA, NA, NA, NA),
PINGROUP(45, wcss_fm, NA, qdss_tracectl_a, NA, NA, NA, NA, NA, NA),
diff --git a/drivers/pinctrl/qcom/pinctrl-qdu1000.c b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
new file mode 100644
index 000000000000..b1d7674a2bec
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
@@ -0,0 +1,1274 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x100000
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_BASE + REG_SIZE * id, \
+ .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \
+ .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = REG_BASE + ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define QUP_I3C(qup_mode, qup_offset) \
+ { \
+ .mode = qup_mode, \
+ .offset = qup_offset, \
+ }
+
+static const struct pinctrl_pin_desc qdu1000_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "SDC1_RCLK"),
+ PINCTRL_PIN(152, "SDC1_CLK"),
+ PINCTRL_PIN(153, "SDC1_CMD"),
+ PINCTRL_PIN(154, "SDC1_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+
+static const unsigned int sdc1_rclk_pins[] = { 151 };
+static const unsigned int sdc1_clk_pins[] = { 152 };
+static const unsigned int sdc1_cmd_pins[] = { 153 };
+static const unsigned int sdc1_data_pins[] = { 154 };
+
+enum qdu1000_functions {
+ msm_mux_gpio,
+ msm_mux_cmo_pri,
+ msm_mux_si5518_int,
+ msm_mux_atest_char,
+ msm_mux_atest_usb,
+ msm_mux_char_exec,
+ msm_mux_cmu_rng,
+ msm_mux_dbg_out_clk,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_ddr_pxi4,
+ msm_mux_ddr_pxi5,
+ msm_mux_ddr_pxi6,
+ msm_mux_ddr_pxi7,
+ msm_mux_eth012_int_n,
+ msm_mux_eth345_int_n,
+ msm_mux_eth6_int_n,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gps_pps_in,
+ msm_mux_hardsync_pps_in,
+ msm_mux_intr_c,
+ msm_mux_jitter_bist_ref,
+ msm_mux_pcie_clkreqn,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist,
+ msm_mux_pll_clk,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_enable,
+ msm_mux_qlink1_request,
+ msm_mux_qlink1_wmss,
+ msm_mux_qlink2_enable,
+ msm_mux_qlink2_request,
+ msm_mux_qlink2_wmss,
+ msm_mux_qlink3_enable,
+ msm_mux_qlink3_request,
+ msm_mux_qlink3_wmss,
+ msm_mux_qlink4_enable,
+ msm_mux_qlink4_request,
+ msm_mux_qlink4_wmss,
+ msm_mux_qlink5_enable,
+ msm_mux_qlink5_request,
+ msm_mux_qlink5_wmss,
+ msm_mux_qlink6_enable,
+ msm_mux_qlink6_request,
+ msm_mux_qlink6_wmss,
+ msm_mux_qlink7_enable,
+ msm_mux_qlink7_request,
+ msm_mux_qlink7_wmss,
+ msm_mux_qspi_clk,
+ msm_mux_qspi_cs,
+ msm_mux_qspi0,
+ msm_mux_qspi1,
+ msm_mux_qspi2,
+ msm_mux_qspi3,
+ msm_mux_qup00,
+ msm_mux_qup01,
+ msm_mux_qup02,
+ msm_mux_qup03,
+ msm_mux_qup04,
+ msm_mux_qup05,
+ msm_mux_qup06,
+ msm_mux_qup07,
+ msm_mux_qup08,
+ msm_mux_qup10,
+ msm_mux_qup11,
+ msm_mux_qup12,
+ msm_mux_qup13,
+ msm_mux_qup14,
+ msm_mux_qup15,
+ msm_mux_qup16,
+ msm_mux_qup17,
+ msm_mux_qup20,
+ msm_mux_qup21,
+ msm_mux_qup22,
+ msm_mux_smb_alert,
+ msm_mux_smb_clk,
+ msm_mux_smb_dat,
+ msm_mux_tb_trig,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tgu_ch4,
+ msm_mux_tgu_ch5,
+ msm_mux_tgu_ch6,
+ msm_mux_tgu_ch7,
+ msm_mux_tmess_prng0,
+ msm_mux_tmess_prng1,
+ msm_mux_tmess_prng2,
+ msm_mux_tmess_prng3,
+ msm_mux_tod_pps_in,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_usb2phy_ac,
+ msm_mux_usb_con_det,
+ msm_mux_usb_dfp_en,
+ msm_mux_usb_phy,
+ msm_mux_vfr_0,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149", "gpio150",
+};
+static const char * const cmo_pri_groups[] = {
+ "gpio103",
+};
+static const char * const si5518_int_groups[] = {
+ "gpio44",
+};
+static const char * const atest_char_groups[] = {
+ "gpio89", "gpio90", "gpio91", "gpio92", "gpio95",
+};
+static const char * const atest_usb_groups[] = {
+ "gpio114", "gpio115", "gpio116", "gpio117", "gpio118",
+};
+static const char * const char_exec_groups[] = {
+ "gpio99", "gpio100",
+};
+static const char * const cmu_rng_groups[] = {
+ "gpio89", "gpio90", "gpio91", "gpio92",
+};
+static const char * const dbg_out_clk_groups[] = {
+ "gpio136",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio114", "gpio115",
+};
+static const char * const ddr_pxi1_groups[] = {
+ "gpio116", "gpio117",
+};
+static const char * const ddr_pxi2_groups[] = {
+ "gpio118", "gpio119",
+};
+static const char * const ddr_pxi3_groups[] = {
+ "gpio120", "gpio121",
+};
+static const char * const ddr_pxi4_groups[] = {
+ "gpio122", "gpio123",
+};
+static const char * const ddr_pxi5_groups[] = {
+ "gpio124", "gpio125",
+};
+static const char * const ddr_pxi6_groups[] = {
+ "gpio126", "gpio127",
+};
+static const char * const ddr_pxi7_groups[] = {
+ "gpio128", "gpio129",
+};
+static const char * const eth012_int_n_groups[] = {
+ "gpio86",
+};
+static const char * const eth345_int_n_groups[] = {
+ "gpio87",
+};
+static const char * const eth6_int_n_groups[] = {
+ "gpio88",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio86", "gpio134",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio87", "gpio135",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio88", "gpio136",
+};
+static const char * const gps_pps_in_groups[] = {
+ "gpio49",
+};
+static const char * const hardsync_pps_in_groups[] = {
+ "gpio47",
+};
+static const char * const intr_c_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio141", "gpio142", "gpio143",
+};
+static const char * const jitter_bist_ref_groups[] = {
+ "gpio130",
+};
+static const char * const pcie_clkreqn_groups[] = {
+ "gpio98", "gpio99", "gpio100",
+};
+static const char * const phase_flag_groups[] = {
+ "gpio6", "gpio7", "gpio8", "gpio9", "gpio16", "gpio17", "gpio18",
+ "gpio19", "gpio20", "gpio22", "gpio21", "gpio23", "gpio24", "gpio25",
+ "gpio26", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31", "gpio32",
+ "gpio33", "gpio42", "gpio43", "gpio89", "gpio90", "gpio91", "gpio92",
+ "gpio95", "gpio96", "gpio97", "gpio102",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio20",
+};
+static const char * const pll_clk_groups[] = {
+ "gpio98",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio18", "gpio19", "gpio20", "gpio21",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio48",
+ "gpio49", "gpio86", "gpio87", "gpio93", "gpio94", "gpio130", "gpio131",
+ "gpio132", "gpio133", "gpio134", "gpio135", "gpio144", "gpio145",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio6", "gpio7", "gpio8", "gpio9", "gpio16", "gpio17", "gpio18",
+ "gpio19", "gpio20", "gpio21", "gpio22", "gpio23", "gpio25", "gpio26",
+ "gpio27", "gpio28", "gpio24", "gpio29", "gpio30", "gpio31", "gpio32",
+ "gpio33", "gpio34", "gpio35", "gpio42", "gpio43", "gpio88", "gpio89",
+ "gpio90", "gpio91", "gpio92", "gpio95", "gpio96", "gpio97", "gpio102",
+ "gpio103",
+};
+static const char * const qlink0_enable_groups[] = {
+ "gpio67",
+};
+static const char * const qlink0_request_groups[] = {
+ "gpio66",
+};
+static const char * const qlink0_wmss_groups[] = {
+ "gpio82",
+};
+static const char * const qlink1_enable_groups[] = {
+ "gpio69",
+};
+static const char * const qlink1_request_groups[] = {
+ "gpio68",
+};
+static const char * const qlink1_wmss_groups[] = {
+ "gpio83",
+};
+static const char * const qlink2_enable_groups[] = {
+ "gpio71",
+};
+static const char * const qlink2_request_groups[] = {
+ "gpio70",
+};
+static const char * const qlink2_wmss_groups[] = {
+ "gpio138",
+};
+static const char * const qlink3_enable_groups[] = {
+ "gpio73",
+};
+static const char * const qlink3_request_groups[] = {
+ "gpio72",
+};
+static const char * const qlink3_wmss_groups[] = {
+ "gpio139",
+};
+static const char * const qlink4_enable_groups[] = {
+ "gpio75",
+};
+static const char * const qlink4_request_groups[] = {
+ "gpio74",
+};
+static const char * const qlink4_wmss_groups[] = {
+ "gpio84",
+};
+static const char * const qlink5_enable_groups[] = {
+ "gpio77",
+};
+static const char * const qlink5_request_groups[] = {
+ "gpio76",
+};
+static const char * const qlink5_wmss_groups[] = {
+ "gpio85",
+};
+static const char * const qlink6_enable_groups[] = {
+ "gpio79",
+};
+static const char * const qlink6_request_groups[] = {
+ "gpio78",
+};
+static const char * const qlink6_wmss_groups[] = {
+ "gpio56",
+};
+static const char * const qlink7_enable_groups[] = {
+ "gpio81",
+};
+static const char * const qlink7_request_groups[] = {
+ "gpio80",
+};
+static const char * const qlink7_wmss_groups[] = {
+ "gpio57",
+};
+static const char * const qspi0_groups[] = {
+ "gpio114",
+};
+static const char * const qspi1_groups[] = {
+ "gpio115",
+};
+static const char * const qspi2_groups[] = {
+ "gpio116",
+};
+static const char * const qspi3_groups[] = {
+ "gpio117",
+};
+static const char * const qspi_clk_groups[] = {
+ "gpio126",
+};
+static const char * const qspi_cs_groups[] = {
+ "gpio125",
+};
+static const char * const qup00_groups[] = {
+ "gpio6", "gpio7", "gpio8", "gpio9",
+};
+static const char * const qup01_groups[] = {
+ "gpio10", "gpio11", "gpio12", "gpio13",
+};
+static const char * const qup02_groups[] = {
+ "gpio10", "gpio11", "gpio12", "gpio13",
+};
+static const char * const qup03_groups[] = {
+ "gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const qup04_groups[] = {
+ "gpio14", "gpio15", "gpio16", "gpio17",
+};
+static const char * const qup05_groups[] = {
+ "gpio130", "gpio131", "gpio132", "gpio133",
+};
+static const char * const qup06_groups[] = {
+ "gpio130", "gpio131", "gpio132", "gpio133",
+};
+static const char * const qup07_groups[] = {
+ "gpio134", "gpio135",
+};
+static const char * const qup08_groups[] = {
+ "gpio134", "gpio135",
+};
+static const char * const qup10_groups[] = {
+ "gpio18", "gpio19", "gpio20", "gpio21",
+};
+static const char * const qup11_groups[] = {
+ "gpio22", "gpio23", "gpio24", "gpio25",
+};
+static const char * const qup12_groups[] = {
+ "gpio22", "gpio23", "gpio24", "gpio25",
+};
+static const char * const qup13_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio29",
+};
+static const char * const qup14_groups[] = {
+ "gpio26", "gpio27", "gpio28", "gpio29",
+};
+static const char * const qup15_groups[] = {
+ "gpio30", "gpio31", "gpio32", "gpio33",
+};
+static const char * const qup16_groups[] = {
+ "gpio29", "gpio34", "gpio35", "gpio36", "gpio37", "gpio38", "gpio39",
+};
+static const char * const qup17_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio30", "gpio31", "gpio40", "gpio41",
+};
+static const char * const qup20_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const qup21_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const qup22_groups[] = {
+ "gpio4", "gpio5", "gpio128", "gpio129",
+};
+static const char * const smb_alert_groups[] = {
+ "gpio88", "gpio101",
+};
+static const char * const smb_clk_groups[] = {
+ "gpio133",
+};
+static const char * const smb_dat_groups[] = {
+ "gpio132",
+};
+static const char * const tb_trig_groups[] = {
+ "gpio114",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio6",
+};
+static const char * const tgu_ch1_groups[] = {
+ "gpio7",
+};
+static const char * const tgu_ch2_groups[] = {
+ "gpio8",
+};
+static const char * const tgu_ch3_groups[] = {
+ "gpio9",
+};
+static const char * const tgu_ch4_groups[] = {
+ "gpio44",
+};
+static const char * const tgu_ch5_groups[] = {
+ "gpio45",
+};
+static const char * const tgu_ch6_groups[] = {
+ "gpio46",
+};
+static const char * const tgu_ch7_groups[] = {
+ "gpio47",
+};
+static const char * const tmess_prng0_groups[] = {
+ "gpio33",
+};
+static const char * const tmess_prng1_groups[] = {
+ "gpio32",
+};
+static const char * const tmess_prng2_groups[] = {
+ "gpio31",
+};
+static const char * const tmess_prng3_groups[] = {
+ "gpio30",
+};
+static const char * const tod_pps_in_groups[] = {
+ "gpio48",
+};
+static const char * const tsense_pwm1_groups[] = {
+ "gpio2",
+};
+static const char * const tsense_pwm2_groups[] = {
+ "gpio3",
+};
+static const char * const usb2phy_ac_groups[] = {
+ "gpio90",
+};
+static const char * const usb_con_det_groups[] = {
+ "gpio42",
+};
+static const char * const usb_dfp_en_groups[] = {
+ "gpio43",
+};
+static const char * const usb_phy_groups[] = {
+ "gpio91",
+};
+static const char * const vfr_0_groups[] = {
+ "gpio93",
+};
+static const char * const vfr_1_groups[] = {
+ "gpio94",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio135",
+};
+
+static const struct msm_function qdu1000_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(cmo_pri),
+ FUNCTION(si5518_int),
+ FUNCTION(atest_char),
+ FUNCTION(atest_usb),
+ FUNCTION(char_exec),
+ FUNCTION(cmu_rng),
+ FUNCTION(dbg_out_clk),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(ddr_pxi4),
+ FUNCTION(ddr_pxi5),
+ FUNCTION(ddr_pxi6),
+ FUNCTION(ddr_pxi7),
+ FUNCTION(eth012_int_n),
+ FUNCTION(eth345_int_n),
+ FUNCTION(eth6_int_n),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gps_pps_in),
+ FUNCTION(hardsync_pps_in),
+ FUNCTION(intr_c),
+ FUNCTION(jitter_bist_ref),
+ FUNCTION(pcie_clkreqn),
+ FUNCTION(phase_flag),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_clk),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qlink0_enable),
+ FUNCTION(qlink0_request),
+ FUNCTION(qlink0_wmss),
+ FUNCTION(qlink1_enable),
+ FUNCTION(qlink1_request),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qlink2_enable),
+ FUNCTION(qlink2_request),
+ FUNCTION(qlink2_wmss),
+ FUNCTION(qlink3_enable),
+ FUNCTION(qlink3_request),
+ FUNCTION(qlink3_wmss),
+ FUNCTION(qlink4_enable),
+ FUNCTION(qlink4_request),
+ FUNCTION(qlink4_wmss),
+ FUNCTION(qlink5_enable),
+ FUNCTION(qlink5_request),
+ FUNCTION(qlink5_wmss),
+ FUNCTION(qlink6_enable),
+ FUNCTION(qlink6_request),
+ FUNCTION(qlink6_wmss),
+ FUNCTION(qlink7_enable),
+ FUNCTION(qlink7_request),
+ FUNCTION(qlink7_wmss),
+ FUNCTION(qspi0),
+ FUNCTION(qspi1),
+ FUNCTION(qspi2),
+ FUNCTION(qspi3),
+ FUNCTION(qspi_clk),
+ FUNCTION(qspi_cs),
+ FUNCTION(qup00),
+ FUNCTION(qup01),
+ FUNCTION(qup02),
+ FUNCTION(qup03),
+ FUNCTION(qup04),
+ FUNCTION(qup05),
+ FUNCTION(qup06),
+ FUNCTION(qup07),
+ FUNCTION(qup08),
+ FUNCTION(qup10),
+ FUNCTION(qup11),
+ FUNCTION(qup12),
+ FUNCTION(qup13),
+ FUNCTION(qup14),
+ FUNCTION(qup15),
+ FUNCTION(qup16),
+ FUNCTION(qup17),
+ FUNCTION(qup20),
+ FUNCTION(qup21),
+ FUNCTION(qup22),
+ FUNCTION(smb_alert),
+ FUNCTION(smb_clk),
+ FUNCTION(smb_dat),
+ FUNCTION(tb_trig),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tgu_ch2),
+ FUNCTION(tgu_ch3),
+ FUNCTION(tgu_ch4),
+ FUNCTION(tgu_ch5),
+ FUNCTION(tgu_ch6),
+ FUNCTION(tgu_ch7),
+ FUNCTION(tmess_prng0),
+ FUNCTION(tmess_prng1),
+ FUNCTION(tmess_prng2),
+ FUNCTION(tmess_prng3),
+ FUNCTION(tod_pps_in),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(usb_con_det),
+ FUNCTION(usb_dfp_en),
+ FUNCTION(usb_phy),
+ FUNCTION(vfr_0),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup qdu1000_groups[] = {
+ [0] = PINGROUP(0, qup20, qup21, ddr_bist, _, _, _, _, _, _),
+ [1] = PINGROUP(1, qup20, qup21, ddr_bist, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup21, qup20, ddr_bist, _,
+ tsense_pwm1, _, _, _, _),
+ [3] = PINGROUP(3, qup21, qup20, ddr_bist, _,
+ tsense_pwm2, _, _, _, _),
+ [4] = PINGROUP(4, qup22, _, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup22, _, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, qup00, tgu_ch0, phase_flag, _,
+ qdss_gpio, _, _, _, _),
+ [7] = PINGROUP(7, qup00, tgu_ch1, phase_flag, _,
+ qdss_gpio, _, _, _, _),
+ [8] = PINGROUP(8, qup00, tgu_ch2, phase_flag, _,
+ qdss_gpio, _, _, _, _),
+ [9] = PINGROUP(9, qup00, tgu_ch3, phase_flag, _,
+ qdss_gpio, _, _, _, _),
+ [10] = PINGROUP(10, qup01, qup02, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, qup01, qup02, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, qup02, qup01, qup17, _, _, _, _, _, _),
+ [13] = PINGROUP(13, qup02, qup01, qup17, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup03, qup04, qup17, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup03, qup04, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup04, qup03, phase_flag, _,
+ qdss_gpio, _, _, _, _),
+ [17] = PINGROUP(17, qup04, qup03, phase_flag, _,
+ qdss_gpio, _, _, _, _),
+ [18] = PINGROUP(18, qup10, prng_rosc, phase_flag,
+ _, qdss_gpio, _, _, _, _),
+ [19] = PINGROUP(19, qup10, prng_rosc, phase_flag,
+ _, qdss_gpio, _, _, _, _),
+ [20] = PINGROUP(20, qup10, prng_rosc, pll_bist,
+ phase_flag, _, qdss_gpio, _, _, _),
+ [21] = PINGROUP(21, qup10, prng_rosc, phase_flag,
+ _, qdss_gpio, _, _, _, _),
+ [22] = PINGROUP(22, qup11, qup12, phase_flag, _,
+ qdss_gpio, _, _, _, _),
+ [23] = PINGROUP(23, qup11, qup12, phase_flag, _,
+ qdss_gpio, _, _, _, _),
+ [24] = PINGROUP(24, qup12, qup11, phase_flag, _,
+ qdss_gpio, _, _, _, _),
+ [25] = PINGROUP(25, qup12, qup11, phase_flag, _,
+ qdss_gpio, _, _, _, _),
+ [26] = PINGROUP(26, qup13, qup14, intr_c,
+ phase_flag, _, qdss_gpio, _, _, _),
+ [27] = PINGROUP(27, qup13, qup14, intr_c,
+ phase_flag, _, qdss_gpio, _, _, _),
+ [28] = PINGROUP(28, qup14, qup13, intr_c,
+ phase_flag, _, qdss_gpio, _, _, _),
+ [29] = PINGROUP(29, qup14, qup13, qup16,
+ phase_flag, _, qdss_gpio, _, _, _),
+ [30] = PINGROUP(30, qup17, qup15, tmess_prng3,
+ phase_flag, _, qdss_gpio, _, _, _),
+ [31] = PINGROUP(31, qup17, qup15, tmess_prng2,
+ phase_flag, _, qdss_gpio, _, _, _),
+ [32] = PINGROUP(32, qup15, tmess_prng1, phase_flag,
+ _, qdss_gpio, _, _, _, _),
+ [33] = PINGROUP(33, qup15, tmess_prng0, phase_flag,
+ _, qdss_gpio, _, _, _, _),
+ [34] = PINGROUP(34, qup16, qdss_gpio, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qup16, qdss_gpio, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, qup16, qdss_cti, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, qup16, qdss_cti, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, qup16, qdss_cti, _, _, _, _, _, _, _),
+ [39] = PINGROUP(39, qup16, qdss_cti, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, qup17, qdss_cti, _, _, _, _, _, _, _),
+ [41] = PINGROUP(41, qup17, qdss_cti, _, _, _, _, _, _, _),
+ [42] = PINGROUP(42, usb_con_det, phase_flag, _,
+ qdss_gpio, _, _, _, _, _),
+ [43] = PINGROUP(43, usb_dfp_en, phase_flag, _,
+ qdss_gpio, _, _, _, _, _),
+ [44] = PINGROUP(44, si5518_int, tgu_ch4, _, _, _, _, _, _, _),
+ [45] = PINGROUP(45, tgu_ch5, _, _, _, _, _, _, _, _),
+ [46] = PINGROUP(46, tgu_ch6, _, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, hardsync_pps_in, tgu_ch7, _, _, _, _, _, _, _),
+ [48] = PINGROUP(48, tod_pps_in, qdss_cti, _, _, _, _, _, _, _),
+ [49] = PINGROUP(49, gps_pps_in, qdss_cti, _, _, _, _, _, _, _),
+ [50] = PINGROUP(50, _, _, _, _, _, _, _, _, _),
+ [51] = PINGROUP(51, _, _, _, _, _, _, _, _, _),
+ [52] = PINGROUP(52, _, _, _, _, _, _, _, _, _),
+ [53] = PINGROUP(53, _, _, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, _, _, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, _, _, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, _, qlink6_wmss, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, _, qlink7_wmss, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, _, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, _, _, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, _, _, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, _, _, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, _, _, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, _, _, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, _, _, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, _, _, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, qlink0_request, _, _, _, _, _, _, _, _),
+ [67] = PINGROUP(67, qlink0_enable, _, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, qlink1_request, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, qlink1_enable, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, qlink2_request, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, qlink2_enable, _, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, qlink3_request, _, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, qlink3_enable, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, qlink4_request, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, qlink4_enable, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, qlink5_request, _, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, qlink5_enable, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, qlink6_request, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, qlink6_enable, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, qlink7_request, _, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, qlink7_enable, _, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, qlink0_wmss, _, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, qlink4_wmss, _, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, qlink5_wmss, _, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, eth012_int_n, gcc_gp1, _, qdss_cti, _, _, _, _, _),
+ [87] = PINGROUP(87, eth345_int_n, gcc_gp2, _, qdss_cti, _, _, _, _, _),
+ [88] = PINGROUP(88, eth6_int_n, smb_alert, gcc_gp3, _,
+ qdss_gpio, _, _, _, _),
+ [89] = PINGROUP(89, phase_flag, cmu_rng, _,
+ qdss_gpio, atest_char, _, _, _, _),
+ [90] = PINGROUP(90, usb2phy_ac, phase_flag,
+ cmu_rng, _, qdss_gpio,
+ atest_char, _, _, _),
+ [91] = PINGROUP(91, usb_phy, phase_flag, cmu_rng,
+ _, qdss_gpio, atest_char, _, _, _),
+ [92] = PINGROUP(92, phase_flag, cmu_rng, _,
+ qdss_gpio, atest_char, _, _, _, _),
+ [93] = PINGROUP(93, vfr_0, qdss_cti, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, vfr_1, qdss_cti, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, phase_flag, _, qdss_gpio,
+ atest_char, _, _, _, _, _),
+ [96] = PINGROUP(96, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+ [97] = PINGROUP(97, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+ [98] = PINGROUP(98, pll_clk, _, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, pcie_clkreqn, char_exec, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, char_exec, _, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, smb_alert, _, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, phase_flag, _, qdss_gpio, _, _, _, _, _, _),
+ [103] = PINGROUP(103, cmo_pri, qdss_gpio, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, _, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ [108] = PINGROUP(108, _, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, _, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, _, _, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, _, _, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, _, _, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, _, _, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, qspi0, tb_trig, _,
+ atest_usb, ddr_pxi0, _, _, _, _),
+ [115] = PINGROUP(115, qspi1, _, atest_usb,
+ ddr_pxi0, _, _, _, _, _),
+ [116] = PINGROUP(116, qspi2, _, atest_usb,
+ ddr_pxi1, _, _, _, _, _),
+ [117] = PINGROUP(117, qspi3, _, atest_usb,
+ ddr_pxi1, _, _, _, _, _),
+ [118] = PINGROUP(118, _, atest_usb, ddr_pxi2, _, _, _, _, _, _),
+ [119] = PINGROUP(119, _, _, ddr_pxi2, _, _, _, _, _, _),
+ [120] = PINGROUP(120, _, _, ddr_pxi3, _, _, _, _, _, _),
+ [121] = PINGROUP(121, _, ddr_pxi3, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, _, ddr_pxi4, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, _, ddr_pxi4, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, _, ddr_pxi5, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, qspi_cs, _, ddr_pxi5, _, _, _, _, _, _),
+ [126] = PINGROUP(126, qspi_clk, _, ddr_pxi6, _, _, _, _, _, _),
+ [127] = PINGROUP(127, _, ddr_pxi6, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, qup22, _, ddr_pxi7, _, _, _, _, _, _),
+ [129] = PINGROUP(129, qup22, ddr_pxi7, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, qup05, qup06, jitter_bist_ref,
+ qdss_cti, _, _, _, _, _),
+ [131] = PINGROUP(131, qup05, qup06, qdss_cti, _, _, _, _, _, _),
+ [132] = PINGROUP(132, qup06, qup05, smb_dat,
+ qdss_cti, _, _, _, _, _),
+ [133] = PINGROUP(133, qup06, qup05, smb_clk,
+ qdss_cti, _, _, _, _, _),
+ [134] = PINGROUP(134, qup08, qup07, gcc_gp1, _,
+ qdss_cti, _, _, _, _),
+ [135] = PINGROUP(135, qup08, qup07, gcc_gp2, _,
+ qdss_cti, vsense_trigger, _, _, _),
+ [136] = PINGROUP(136, gcc_gp3, dbg_out_clk, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, _, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, qlink2_wmss, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, qlink3_wmss, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, _, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, intr_c, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, intr_c, _, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, intr_c, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, qdss_cti, _, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, qdss_cti, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, _, _, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, _, _, _, _, _, _, _, _, _),
+ [151] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9e000, 0, 0),
+ [152] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9d000, 13, 6),
+ [153] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9d000, 11, 3),
+ [154] = SDC_QDSD_PINGROUP(sdc1_data, 0x9d000, 9, 0),
+};
+static const struct msm_pinctrl_soc_data qdu1000_tlmm = {
+ .pins = qdu1000_pins,
+ .npins = ARRAY_SIZE(qdu1000_pins),
+ .functions = qdu1000_functions,
+ .nfunctions = ARRAY_SIZE(qdu1000_functions),
+ .groups = qdu1000_groups,
+ .ngroups = ARRAY_SIZE(qdu1000_groups),
+ .ngpios = 151,
+};
+
+static int qdu1000_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &qdu1000_tlmm);
+}
+
+static const struct of_device_id qdu1000_tlmm_of_match[] = {
+ { .compatible = "qcom,qdu1000-tlmm", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qdu1000_tlmm_of_match);
+
+static struct platform_driver qdu1000_tlmm_driver = {
+ .driver = {
+ .name = "qdu1000-tlmm",
+ .of_match_table = qdu1000_tlmm_of_match,
+ },
+ .probe = qdu1000_tlmm_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init qdu1000_tlmm_init(void)
+{
+ return platform_driver_register(&qdu1000_tlmm_driver);
+}
+arch_initcall(qdu1000_tlmm_init);
+
+static void __exit qdu1000_tlmm_exit(void)
+{
+ platform_driver_unregister(&qdu1000_tlmm_driver);
+}
+module_exit(qdu1000_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI QDU1000 TLMM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
new file mode 100644
index 000000000000..2ae7cdca65d3
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
@@ -0,0 +1,1537 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x100000
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9)\
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_BASE + REG_SIZE * id, \
+ .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \
+ .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define QUP_I3C(qup_mode, qup_offset) \
+ { \
+ .mode = qup_mode, \
+ .offset = qup_offset, \
+ }
+
+#define QUP_I3C_6_MODE_OFFSET 0xAF000
+#define QUP_I3C_7_MODE_OFFSET 0xB0000
+#define QUP_I3C_13_MODE_OFFSET 0xB1000
+#define QUP_I3C_14_MODE_OFFSET 0xB2000
+
+static const struct pinctrl_pin_desc sa8775p_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "UFS_RESET"),
+ PINCTRL_PIN(150, "SDC1_RCLK"),
+ PINCTRL_PIN(151, "SDC1_CLK"),
+ PINCTRL_PIN(152, "SDC1_CMD"),
+ PINCTRL_PIN(153, "SDC1_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+
+static const unsigned int ufs_reset_pins[] = { 149 };
+static const unsigned int sdc1_rclk_pins[] = { 150 };
+static const unsigned int sdc1_clk_pins[] = { 151 };
+static const unsigned int sdc1_cmd_pins[] = { 152 };
+static const unsigned int sdc1_data_pins[] = { 153 };
+
+enum sa8775p_functions {
+ msm_mux_gpio,
+ msm_mux_atest_char,
+ msm_mux_atest_usb2,
+ msm_mux_audio_ref,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer0,
+ msm_mux_cci_timer1,
+ msm_mux_cci_timer2,
+ msm_mux_cci_timer3,
+ msm_mux_cci_timer4,
+ msm_mux_cci_timer5,
+ msm_mux_cci_timer6,
+ msm_mux_cci_timer7,
+ msm_mux_cci_timer8,
+ msm_mux_cci_timer9,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_ddr_pxi4,
+ msm_mux_ddr_pxi5,
+ msm_mux_edp0_hot,
+ msm_mux_edp0_lcd,
+ msm_mux_edp1_hot,
+ msm_mux_edp1_lcd,
+ msm_mux_edp2_hot,
+ msm_mux_edp2_lcd,
+ msm_mux_edp3_hot,
+ msm_mux_edp3_lcd,
+ msm_mux_emac0_mcg0,
+ msm_mux_emac0_mcg1,
+ msm_mux_emac0_mcg2,
+ msm_mux_emac0_mcg3,
+ msm_mux_emac0_mdc,
+ msm_mux_emac0_mdio,
+ msm_mux_emac0_ptp_aux,
+ msm_mux_emac0_ptp_pps,
+ msm_mux_emac1_mcg0,
+ msm_mux_emac1_mcg1,
+ msm_mux_emac1_mcg2,
+ msm_mux_emac1_mcg3,
+ msm_mux_emac1_mdc,
+ msm_mux_emac1_mdio,
+ msm_mux_emac1_ptp_aux,
+ msm_mux_emac1_ptp_pps,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gcc_gp4,
+ msm_mux_gcc_gp5,
+ msm_mux_hs0_mi2s,
+ msm_mux_hs1_mi2s,
+ msm_mux_hs2_mi2s,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_mdp0_vsync0,
+ msm_mux_mdp0_vsync1,
+ msm_mux_mdp0_vsync2,
+ msm_mux_mdp0_vsync3,
+ msm_mux_mdp0_vsync4,
+ msm_mux_mdp0_vsync5,
+ msm_mux_mdp0_vsync6,
+ msm_mux_mdp0_vsync7,
+ msm_mux_mdp0_vsync8,
+ msm_mux_mdp1_vsync0,
+ msm_mux_mdp1_vsync1,
+ msm_mux_mdp1_vsync2,
+ msm_mux_mdp1_vsync3,
+ msm_mux_mdp1_vsync4,
+ msm_mux_mdp1_vsync5,
+ msm_mux_mdp1_vsync6,
+ msm_mux_mdp1_vsync7,
+ msm_mux_mdp1_vsync8,
+ msm_mux_mdp_vsync,
+ msm_mux_mi2s1_data0,
+ msm_mux_mi2s1_data1,
+ msm_mux_mi2s1_sck,
+ msm_mux_mi2s1_ws,
+ msm_mux_mi2s2_data0,
+ msm_mux_mi2s2_data1,
+ msm_mux_mi2s2_sck,
+ msm_mux_mi2s2_ws,
+ msm_mux_mi2s_mclk0,
+ msm_mux_mi2s_mclk1,
+ msm_mux_pcie0_clkreq,
+ msm_mux_pcie1_clkreq,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist,
+ msm_mux_pll_clk,
+ msm_mux_prng_rosc0,
+ msm_mux_prng_rosc1,
+ msm_mux_prng_rosc2,
+ msm_mux_prng_rosc3,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qup0_se0,
+ msm_mux_qup0_se1,
+ msm_mux_qup0_se2,
+ msm_mux_qup0_se3,
+ msm_mux_qup0_se4,
+ msm_mux_qup0_se5,
+ msm_mux_qup1_se0,
+ msm_mux_qup1_se1,
+ msm_mux_qup1_se2,
+ msm_mux_qup1_se3,
+ msm_mux_qup1_se4,
+ msm_mux_qup1_se5,
+ msm_mux_qup1_se6,
+ msm_mux_qup2_se0,
+ msm_mux_qup2_se1,
+ msm_mux_qup2_se2,
+ msm_mux_qup2_se3,
+ msm_mux_qup2_se4,
+ msm_mux_qup2_se5,
+ msm_mux_qup2_se6,
+ msm_mux_qup3_se0,
+ msm_mux_sail_top,
+ msm_mux_sailss_emac0,
+ msm_mux_sailss_ospi,
+ msm_mux_sgmii_phy,
+ msm_mux_tb_trig,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tgu_ch4,
+ msm_mux_tgu_ch5,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_tsense_pwm3,
+ msm_mux_tsense_pwm4,
+ msm_mux_usb2phy_ac,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio27", "gpio58", "gpio59", "gpio89", "gpio90",
+};
+
+static const char * const atest_usb2_groups[] = {
+ "gpio58", "gpio59", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90",
+ "gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97",
+ "gpio105",
+};
+
+static const char * const audio_ref_groups[] = {
+ "gpio113",
+};
+
+static const char * const cam_mclk_groups[] = {
+ "gpio72", "gpio73", "gpio74", "gpio75",
+};
+
+static const char * const cci_async_groups[] = {
+ "gpio50", "gpio66", "gpio68", "gpio69", "gpio70", "gpio71",
+};
+
+static const char * const cci_i2c_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55", "gpio56", "gpio57", "gpio58",
+ "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", "gpio64", "gpio65",
+ "gpio66", "gpio67",
+};
+
+static const char * const cci_timer0_groups[] = {
+ "gpio68",
+};
+
+static const char * const cci_timer1_groups[] = {
+ "gpio69",
+};
+
+static const char * const cci_timer2_groups[] = {
+ "gpio70",
+};
+
+static const char * const cci_timer3_groups[] = {
+ "gpio71",
+};
+
+static const char * const cci_timer4_groups[] = {
+ "gpio52",
+};
+
+static const char * const cci_timer5_groups[] = {
+ "gpio53",
+};
+
+static const char * const cci_timer6_groups[] = {
+ "gpio54",
+};
+
+static const char * const cci_timer7_groups[] = {
+ "gpio55",
+};
+
+static const char * const cci_timer8_groups[] = {
+ "gpio56",
+};
+
+static const char * const cci_timer9_groups[] = {
+ "gpio57",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio99",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio97",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio98",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio144",
+};
+
+static const char * const ddr_bist_groups[] = {
+ "gpio56", "gpio57", "gpio58", "gpio59",
+};
+
+static const char * const ddr_pxi0_groups[] = {
+ "gpio33", "gpio34",
+};
+
+static const char * const ddr_pxi1_groups[] = {
+ "gpio52", "gpio53",
+};
+
+static const char * const ddr_pxi2_groups[] = {
+ "gpio55", "gpio86",
+};
+
+static const char * const ddr_pxi3_groups[] = {
+ "gpio87", "gpio88",
+};
+
+static const char * const ddr_pxi4_groups[] = {
+ "gpio89", "gpio90",
+};
+
+static const char * const ddr_pxi5_groups[] = {
+ "gpio118", "gpio119",
+};
+
+static const char * const edp0_hot_groups[] = {
+ "gpio101",
+};
+
+static const char * const edp0_lcd_groups[] = {
+ "gpio44",
+};
+
+static const char * const edp1_hot_groups[] = {
+ "gpio102",
+};
+
+static const char * const edp1_lcd_groups[] = {
+ "gpio45",
+};
+
+static const char * const edp2_hot_groups[] = {
+ "gpio104",
+};
+
+static const char * const edp2_lcd_groups[] = {
+ "gpio48",
+};
+
+static const char * const edp3_hot_groups[] = {
+ "gpio103",
+};
+
+static const char * const edp3_lcd_groups[] = {
+ "gpio49",
+};
+
+static const char * const emac0_mcg0_groups[] = {
+ "gpio12",
+};
+
+static const char * const emac0_mcg1_groups[] = {
+ "gpio13",
+};
+
+static const char * const emac0_mcg2_groups[] = {
+ "gpio14",
+};
+
+static const char * const emac0_mcg3_groups[] = {
+ "gpio15",
+};
+
+static const char * const emac0_mdc_groups[] = {
+ "gpio8",
+};
+
+static const char * const emac0_mdio_groups[] = {
+ "gpio9",
+};
+
+static const char * const emac0_ptp_aux_groups[] = {
+ "gpio6", "gpio10", "gpio11", "gpio12",
+};
+
+static const char * const emac0_ptp_pps_groups[] = {
+ "gpio6", "gpio10", "gpio11", "gpio12",
+};
+
+static const char * const emac1_mcg0_groups[] = {
+ "gpio16",
+
+};
+
+static const char * const emac1_mcg1_groups[] = {
+ "gpio17",
+};
+
+static const char * const emac1_mcg2_groups[] = {
+ "gpio18",
+};
+
+static const char * const emac1_mcg3_groups[] = {
+ "gpio19",
+};
+
+static const char * const emac1_mdc_groups[] = {
+ "gpio20",
+};
+
+static const char * const emac1_mdio_groups[] = {
+ "gpio21",
+};
+
+static const char * const emac1_ptp_aux_groups[] = {
+ "gpio6", "gpio10", "gpio11", "gpio12",
+};
+
+static const char * const emac1_ptp_pps_groups[] = {
+ "gpio6", "gpio10", "gpio11", "gpio12",
+};
+
+static const char * const gcc_gp1_groups[] = {
+ "gpio51", "gpio82",
+};
+
+static const char * const gcc_gp2_groups[] = {
+ "gpio52", "gpio83",
+};
+
+static const char * const gcc_gp3_groups[] = {
+ "gpio53", "gpio84",
+};
+
+static const char * const gcc_gp4_groups[] = {
+ "gpio33", "gpio55",
+};
+
+static const char * const gcc_gp5_groups[] = {
+ "gpio34", "gpio42",
+};
+
+static const char * const hs0_mi2s_groups[] = {
+ "gpio114", "gpio115", "gpio116", "gpio117",
+};
+
+static const char * const hs1_mi2s_groups[] = {
+ "gpio118", "gpio119", "gpio120", "gpio121",
+};
+
+static const char * const hs2_mi2s_groups[] = {
+ "gpio122", "gpio123", "gpio124", "gpio125",
+};
+
+static const char * const ibi_i3c_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43", "gpio80", "gpio81", "gpio84",
+ "gpio85",
+};
+
+static const char * const jitter_bist_groups[] = {
+ "gpio86",
+};
+
+static const char * const mdp0_vsync0_groups[] = {
+ "gpio57",
+};
+
+static const char * const mdp0_vsync1_groups[] = {
+ "gpio58",
+};
+
+static const char * const mdp0_vsync2_groups[] = {
+ "gpio59",
+};
+
+static const char * const mdp0_vsync3_groups[] = {
+ "gpio80",
+};
+
+static const char * const mdp0_vsync4_groups[] = {
+ "gpio81",
+};
+
+static const char * const mdp0_vsync5_groups[] = {
+ "gpio91",
+};
+
+static const char * const mdp0_vsync6_groups[] = {
+ "gpio92",
+};
+
+static const char * const mdp0_vsync7_groups[] = {
+ "gpio93",
+};
+
+static const char * const mdp0_vsync8_groups[] = {
+ "gpio94",
+};
+
+static const char * const mdp1_vsync0_groups[] = {
+ "gpio40",
+};
+
+static const char * const mdp1_vsync1_groups[] = {
+ "gpio41",
+};
+
+static const char * const mdp1_vsync2_groups[] = {
+ "gpio42",
+};
+
+static const char * const mdp1_vsync3_groups[] = {
+ "gpio43",
+};
+
+static const char * const mdp1_vsync4_groups[] = {
+ "gpio46",
+};
+
+static const char * const mdp1_vsync5_groups[] = {
+ "gpio47",
+};
+
+static const char * const mdp1_vsync6_groups[] = {
+ "gpio51",
+};
+
+static const char * const mdp1_vsync7_groups[] = {
+ "gpio52",
+};
+
+static const char * const mdp1_vsync8_groups[] = {
+ "gpio50",
+};
+
+static const char * const mdp_vsync_groups[] = {
+ "gpio82", "gpio83", "gpio84",
+};
+
+static const char * const mi2s1_data0_groups[] = {
+ "gpio108",
+};
+
+static const char * const mi2s1_data1_groups[] = {
+ "gpio109",
+};
+
+static const char * const mi2s1_sck_groups[] = {
+ "gpio106",
+};
+
+static const char * const mi2s1_ws_groups[] = {
+ "gpio107",
+};
+
+static const char * const mi2s2_data0_groups[] = {
+ "gpio112",
+};
+
+static const char * const mi2s2_data1_groups[] = {
+ "gpio113",
+};
+
+static const char * const mi2s2_sck_groups[] = {
+ "gpio110",
+};
+
+static const char * const mi2s2_ws_groups[] = {
+ "gpio111",
+};
+
+static const char * const mi2s_mclk0_groups[] = {
+ "gpio105",
+};
+
+static const char * const mi2s_mclk1_groups[] = {
+ "gpio117",
+};
+
+static const char * const pcie0_clkreq_groups[] = {
+ "gpio1",
+};
+
+static const char * const pcie1_clkreq_groups[] = {
+ "gpio3",
+};
+
+static const char * const phase_flag_groups[] = {
+ "gpio25", "gpio26", "gpio27", "gpio28", "gpio29", "gpio30", "gpio31",
+ "gpio32", "gpio35", "gpio36", "gpio37", "gpio38", "gpio39", "gpio56",
+ "gpio57", "gpio98", "gpio99", "gpio106", "gpio107", "gpio108",
+ "gpio109", "gpio110", "gpio111", "gpio112", "gpio113", "gpio114",
+ "gpio120", "gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
+};
+
+static const char * const pll_bist_groups[] = {
+ "gpio114",
+};
+
+static const char * const pll_clk_groups[] = {
+ "gpio87",
+};
+
+static const char * const prng_rosc0_groups[] = {
+ "gpio101",
+};
+
+static const char * const prng_rosc1_groups[] = {
+ "gpio102",
+};
+
+static const char * const prng_rosc2_groups[] = {
+ "gpio103",
+};
+
+static const char * const prng_rosc3_groups[] = {
+ "gpio104",
+};
+
+static const char * const qdss_cti_groups[] = {
+ "gpio26", "gpio27", "gpio38", "gpio39", "gpio48", "gpio49", "gpio50",
+ "gpio51",
+};
+
+static const char * const qdss_gpio_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23", "gpio24", "gpio25", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio105", "gpio106", "gpio107",
+ "gpio108", "gpio109", "gpio110", "gpio111", "gpio112", "gpio113",
+ "gpio114", "gpio115", "gpio116", "gpio117", "gpio118", "gpio119",
+ "gpio120", "gpio121", "gpio122",
+};
+
+static const char * const qup0_se0_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const qup0_se1_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+
+static const char * const qup0_se2_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+
+static const char * const qup0_se3_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+};
+
+static const char * const qup0_se4_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+
+static const char * const qup0_se5_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+
+static const char * const qup1_se0_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+};
+
+static const char * const qup1_se1_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+};
+
+static const char * const qup1_se2_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const char * const qup1_se3_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const char * const qup1_se4_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51",
+};
+
+static const char * const qup1_se5_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char * const qup1_se6_groups[] = {
+ "gpio56", "gpio56", "gpio57", "gpio57",
+};
+
+static const char * const qup2_se0_groups[] = {
+ "gpio80", "gpio81", "gpio82", "gpio83",
+};
+
+static const char * const qup2_se1_groups[] = {
+ "gpio84", "gpio85", "gpio99", "gpio100",
+};
+
+static const char * const qup2_se2_groups[] = {
+ "gpio86", "gpio87", "gpio88", "gpio89", "gpio90",
+};
+
+static const char * const qup2_se3_groups[] = {
+ "gpio91", "gpio92", "gpio93", "gpio94",
+};
+
+static const char * const qup2_se4_groups[] = {
+ "gpio95", "gpio96", "gpio97", "gpio98",
+};
+
+static const char * const qup2_se5_groups[] = {
+ "gpio84", "gpio85", "gpio99", "gpio100",
+};
+
+static const char * const qup2_se6_groups[] = {
+ "gpio95", "gpio96", "gpio97", "gpio98",
+};
+
+static const char * const qup3_se0_groups[] = {
+ "gpio13", "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const sail_top_groups[] = {
+ "gpio13", "gpio14", "gpio15", "gpio16",
+};
+
+static const char * const sailss_emac0_groups[] = {
+ "gpio18", "gpio19",
+};
+
+static const char * const sailss_ospi_groups[] = {
+ "gpio18", "gpio19",
+};
+
+static const char * const sgmii_phy_groups[] = {
+ "gpio7", "gpio26",
+};
+
+static const char * const tb_trig_groups[] = {
+ "gpio17", "gpio17",
+};
+
+static const char * const tgu_ch0_groups[] = {
+ "gpio46",
+};
+
+static const char * const tgu_ch1_groups[] = {
+ "gpio47",
+};
+
+static const char * const tgu_ch2_groups[] = {
+ "gpio36",
+};
+
+static const char * const tgu_ch3_groups[] = {
+ "gpio37",
+};
+
+static const char * const tgu_ch4_groups[] = {
+ "gpio38",
+};
+
+static const char * const tgu_ch5_groups[] = {
+ "gpio39",
+};
+
+static const char * const tsense_pwm1_groups[] = {
+ "gpio104",
+};
+
+static const char * const tsense_pwm2_groups[] = {
+ "gpio103",
+};
+
+static const char * const tsense_pwm3_groups[] = {
+ "gpio102",
+};
+
+static const char * const tsense_pwm4_groups[] = {
+ "gpio101",
+};
+
+static const char * const usb2phy_ac_groups[] = {
+ "gpio10", "gpio11", "gpio12",
+};
+
+static const char * const vsense_trigger_groups[] = {
+ "gpio111",
+};
+
+static const struct msm_function sa8775p_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(atest_char),
+ FUNCTION(atest_usb2),
+ FUNCTION(audio_ref),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer0),
+ FUNCTION(cci_timer1),
+ FUNCTION(cci_timer2),
+ FUNCTION(cci_timer3),
+ FUNCTION(cci_timer4),
+ FUNCTION(cci_timer5),
+ FUNCTION(cci_timer6),
+ FUNCTION(cci_timer7),
+ FUNCTION(cci_timer8),
+ FUNCTION(cci_timer9),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(ddr_pxi4),
+ FUNCTION(ddr_pxi5),
+ FUNCTION(edp0_hot),
+ FUNCTION(edp0_lcd),
+ FUNCTION(edp1_hot),
+ FUNCTION(edp1_lcd),
+ FUNCTION(edp2_hot),
+ FUNCTION(edp2_lcd),
+ FUNCTION(edp3_hot),
+ FUNCTION(edp3_lcd),
+ FUNCTION(emac0_mcg0),
+ FUNCTION(emac0_mcg1),
+ FUNCTION(emac0_mcg2),
+ FUNCTION(emac0_mcg3),
+ FUNCTION(emac0_mdc),
+ FUNCTION(emac0_mdio),
+ FUNCTION(emac0_ptp_aux),
+ FUNCTION(emac0_ptp_pps),
+ FUNCTION(emac1_mcg0),
+ FUNCTION(emac1_mcg1),
+ FUNCTION(emac1_mcg2),
+ FUNCTION(emac1_mcg3),
+ FUNCTION(emac1_mdc),
+ FUNCTION(emac1_mdio),
+ FUNCTION(emac1_ptp_aux),
+ FUNCTION(emac1_ptp_pps),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gcc_gp4),
+ FUNCTION(gcc_gp5),
+ FUNCTION(hs0_mi2s),
+ FUNCTION(hs1_mi2s),
+ FUNCTION(hs2_mi2s),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(mdp0_vsync0),
+ FUNCTION(mdp0_vsync1),
+ FUNCTION(mdp0_vsync2),
+ FUNCTION(mdp0_vsync3),
+ FUNCTION(mdp0_vsync4),
+ FUNCTION(mdp0_vsync5),
+ FUNCTION(mdp0_vsync6),
+ FUNCTION(mdp0_vsync7),
+ FUNCTION(mdp0_vsync8),
+ FUNCTION(mdp1_vsync0),
+ FUNCTION(mdp1_vsync1),
+ FUNCTION(mdp1_vsync2),
+ FUNCTION(mdp1_vsync3),
+ FUNCTION(mdp1_vsync4),
+ FUNCTION(mdp1_vsync5),
+ FUNCTION(mdp1_vsync6),
+ FUNCTION(mdp1_vsync7),
+ FUNCTION(mdp1_vsync8),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mi2s1_data0),
+ FUNCTION(mi2s1_data1),
+ FUNCTION(mi2s1_sck),
+ FUNCTION(mi2s1_ws),
+ FUNCTION(mi2s2_data0),
+ FUNCTION(mi2s2_data1),
+ FUNCTION(mi2s2_sck),
+ FUNCTION(mi2s2_ws),
+ FUNCTION(mi2s_mclk0),
+ FUNCTION(mi2s_mclk1),
+ FUNCTION(pcie0_clkreq),
+ FUNCTION(pcie1_clkreq),
+ FUNCTION(phase_flag),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_clk),
+ FUNCTION(prng_rosc0),
+ FUNCTION(prng_rosc1),
+ FUNCTION(prng_rosc2),
+ FUNCTION(prng_rosc3),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qup0_se0),
+ FUNCTION(qup0_se1),
+ FUNCTION(qup0_se2),
+ FUNCTION(qup0_se3),
+ FUNCTION(qup0_se4),
+ FUNCTION(qup0_se5),
+ FUNCTION(qup1_se0),
+ FUNCTION(qup1_se1),
+ FUNCTION(qup1_se2),
+ FUNCTION(qup1_se3),
+ FUNCTION(qup1_se4),
+ FUNCTION(qup1_se5),
+ FUNCTION(qup1_se6),
+ FUNCTION(qup2_se0),
+ FUNCTION(qup2_se1),
+ FUNCTION(qup2_se2),
+ FUNCTION(qup2_se3),
+ FUNCTION(qup2_se4),
+ FUNCTION(qup2_se5),
+ FUNCTION(qup2_se6),
+ FUNCTION(qup3_se0),
+ FUNCTION(sail_top),
+ FUNCTION(sailss_emac0),
+ FUNCTION(sailss_ospi),
+ FUNCTION(sgmii_phy),
+ FUNCTION(tb_trig),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tgu_ch2),
+ FUNCTION(tgu_ch3),
+ FUNCTION(tgu_ch4),
+ FUNCTION(tgu_ch5),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(tsense_pwm3),
+ FUNCTION(tsense_pwm4),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(vsense_trigger),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sa8775p_groups[] = {
+ [0] = PINGROUP(0, _, _, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, pcie0_clkreq, _, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, _, _, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, pcie1_clkreq, _, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, _, _, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, _, _, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, emac0_ptp_aux, emac0_ptp_pps, emac1_ptp_aux, emac1_ptp_pps,
+ _, _, _, _, _),
+ [7] = PINGROUP(7, sgmii_phy, _, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, emac0_mdc, _, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, emac0_mdio, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, usb2phy_ac, emac0_ptp_aux, emac0_ptp_pps, emac1_ptp_aux, emac1_ptp_pps,
+ _, _, _, _),
+ [11] = PINGROUP(11, usb2phy_ac, emac0_ptp_aux, emac0_ptp_pps, emac1_ptp_aux, emac1_ptp_pps,
+ _, _, _, _),
+ [12] = PINGROUP(12, usb2phy_ac, emac0_ptp_aux, emac0_ptp_pps, emac1_ptp_aux, emac1_ptp_pps,
+ emac0_mcg0, _, _, _),
+ [13] = PINGROUP(13, qup3_se0, emac0_mcg1, _, _, sail_top, _, _, _, _),
+ [14] = PINGROUP(14, qup3_se0, emac0_mcg2, _, _, sail_top, _, _, _, _),
+ [15] = PINGROUP(15, qup3_se0, emac0_mcg3, _, _, sail_top, _, _, _, _),
+ [16] = PINGROUP(16, qup3_se0, emac1_mcg0, _, _, sail_top, _, _, _, _),
+ [17] = PINGROUP(17, qup3_se0, tb_trig, tb_trig, emac1_mcg1, _, _, _, _, _),
+ [18] = PINGROUP(18, qup3_se0, emac1_mcg2, _, _, sailss_ospi, sailss_emac0, _, _, _),
+ [19] = PINGROUP(19, qup3_se0, emac1_mcg3, _, _, sailss_ospi, sailss_emac0, _, _, _),
+ [20] = PINGROUP(20, qup0_se0, emac1_mdc, qdss_gpio, _, _, _, _, _, _),
+ [21] = PINGROUP(21, qup0_se0, emac1_mdio, qdss_gpio, _, _, _, _, _, _),
+ [22] = PINGROUP(22, qup0_se0, qdss_gpio, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, qup0_se0, qdss_gpio, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, qup0_se1, qdss_gpio, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup0_se1, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [26] = PINGROUP(26, sgmii_phy, qup0_se1, qdss_cti, phase_flag, _, _, _, _, _),
+ [27] = PINGROUP(27, qup0_se1, qdss_cti, phase_flag, _, atest_char, _, _, _, _),
+ [28] = PINGROUP(28, qup0_se3, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [29] = PINGROUP(29, qup0_se3, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [30] = PINGROUP(30, qup0_se3, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [31] = PINGROUP(31, qup0_se3, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [32] = PINGROUP(32, qup0_se4, phase_flag, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, qup0_se4, gcc_gp4, _, ddr_pxi0, _, _, _, _, _),
+ [34] = PINGROUP(34, qup0_se4, gcc_gp5, _, ddr_pxi0, _, _, _, _, _),
+ [35] = PINGROUP(35, qup0_se4, phase_flag, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, qup0_se2, qup0_se5, phase_flag, tgu_ch2, _, _, _, _, _),
+ [37] = PINGROUP(37, qup0_se2, qup0_se5, phase_flag, tgu_ch3, _, _, _, _, _),
+ [38] = PINGROUP(38, qup0_se5, qup0_se2, qdss_cti, phase_flag, tgu_ch4, _, _, _, _),
+ [39] = PINGROUP(39, qup0_se5, qup0_se2, qdss_cti, phase_flag, tgu_ch5, _, _, _, _),
+ [40] = PINGROUP(40, qup1_se0, qup1_se1, ibi_i3c, mdp1_vsync0, _, _, _, _, _),
+ [41] = PINGROUP(41, qup1_se0, qup1_se1, ibi_i3c, mdp1_vsync1, _, _, _, _, _),
+ [42] = PINGROUP(42, qup1_se1, qup1_se0, ibi_i3c, mdp1_vsync2, gcc_gp5, _, _, _, _),
+ [43] = PINGROUP(43, qup1_se1, qup1_se0, ibi_i3c, mdp1_vsync3, _, _, _, _, _),
+ [44] = PINGROUP(44, qup1_se2, qup1_se3, edp0_lcd, _, _, _, _, _, _),
+ [45] = PINGROUP(45, qup1_se2, qup1_se3, edp1_lcd, _, _, _, _, _, _),
+ [46] = PINGROUP(46, qup1_se3, qup1_se2, mdp1_vsync4, tgu_ch0, _, _, _, _, _),
+ [47] = PINGROUP(47, qup1_se3, qup1_se2, mdp1_vsync5, tgu_ch1, _, _, _, _, _),
+ [48] = PINGROUP(48, qup1_se4, qdss_cti, edp2_lcd, _, _, _, _, _, _),
+ [49] = PINGROUP(49, qup1_se4, qdss_cti, edp3_lcd, _, _, _, _, _, _),
+ [50] = PINGROUP(50, qup1_se4, cci_async, qdss_cti, mdp1_vsync8, _, _, _, _, _),
+ [51] = PINGROUP(51, qup1_se4, qdss_cti, mdp1_vsync6, gcc_gp1, _, _, _, _, _),
+ [52] = PINGROUP(52, qup1_se5, cci_timer4, cci_i2c, mdp1_vsync7, gcc_gp2, _, ddr_pxi1, _, _),
+ [53] = PINGROUP(53, qup1_se5, cci_timer5, cci_i2c, gcc_gp3, _, ddr_pxi1, _, _, _),
+ [54] = PINGROUP(54, qup1_se5, cci_timer6, cci_i2c, _, _, _, _, _, _),
+ [55] = PINGROUP(55, qup1_se5, cci_timer7, cci_i2c, gcc_gp4, _, ddr_pxi2, _, _, _),
+ [56] = PINGROUP(56, qup1_se6, qup1_se6, cci_timer8, cci_i2c, phase_flag,
+ ddr_bist, _, _, _),
+ [57] = PINGROUP(57, qup1_se6, qup1_se6, cci_timer9, cci_i2c, mdp0_vsync0,
+ phase_flag, ddr_bist, _, _),
+ [58] = PINGROUP(58, cci_i2c, mdp0_vsync1, ddr_bist, _, atest_usb2, atest_char, _, _, _),
+ [59] = PINGROUP(59, cci_i2c, mdp0_vsync2, ddr_bist, _, atest_usb2, atest_char, _, _, _),
+ [60] = PINGROUP(60, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [65] = PINGROUP(65, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, cci_i2c, cci_async, qdss_gpio, _, _, _, _, _, _),
+ [67] = PINGROUP(67, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, cci_timer0, cci_async, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, cci_timer1, cci_async, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, cci_timer2, cci_async, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, cci_timer3, cci_async, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, cam_mclk, _, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, cam_mclk, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, cam_mclk, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, cam_mclk, _, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, _, _, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, _, _, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, _, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, _, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, qup2_se0, ibi_i3c, mdp0_vsync3, _, _, _, _, _, _),
+ [81] = PINGROUP(81, qup2_se0, ibi_i3c, mdp0_vsync4, _, _, _, _, _, _),
+ [82] = PINGROUP(82, qup2_se0, mdp_vsync, gcc_gp1, _, _, _, _, _, _),
+ [83] = PINGROUP(83, qup2_se0, mdp_vsync, gcc_gp2, _, _, _, _, _, _),
+ [84] = PINGROUP(84, qup2_se1, qup2_se5, ibi_i3c, mdp_vsync, gcc_gp3, _, _, _, _),
+ [85] = PINGROUP(85, qup2_se1, qup2_se5, ibi_i3c, _, _, _, _, _, _),
+ [86] = PINGROUP(86, qup2_se2, jitter_bist, atest_usb2, ddr_pxi2, _, _, _, _, _),
+ [87] = PINGROUP(87, qup2_se2, pll_clk, atest_usb2, ddr_pxi3, _, _, _, _, _),
+ [88] = PINGROUP(88, qup2_se2, _, atest_usb2, ddr_pxi3, _, _, _, _, _),
+ [89] = PINGROUP(89, qup2_se2, _, atest_usb2, ddr_pxi4, atest_char, _, _, _, _),
+ [90] = PINGROUP(90, qup2_se2, _, atest_usb2, ddr_pxi4, atest_char, _, _, _, _),
+ [91] = PINGROUP(91, qup2_se3, mdp0_vsync5, _, atest_usb2, _, _, _, _, _),
+ [92] = PINGROUP(92, qup2_se3, mdp0_vsync6, _, atest_usb2, _, _, _, _, _),
+ [93] = PINGROUP(93, qup2_se3, mdp0_vsync7, _, atest_usb2, _, _, _, _, _),
+ [94] = PINGROUP(94, qup2_se3, mdp0_vsync8, _, atest_usb2, _, _, _, _, _),
+ [95] = PINGROUP(95, qup2_se4, qup2_se6, _, atest_usb2, _, _, _, _, _),
+ [96] = PINGROUP(96, qup2_se4, qup2_se6, _, atest_usb2, _, _, _, _, _),
+ [97] = PINGROUP(97, qup2_se6, qup2_se4, cri_trng0, _, atest_usb2, _, _, _, _),
+ [98] = PINGROUP(98, qup2_se6, qup2_se4, phase_flag, cri_trng1, _, _, _, _, _),
+ [99] = PINGROUP(99, qup2_se5, qup2_se1, phase_flag, cri_trng, _, _, _, _, _),
+ [100] = PINGROUP(100, qup2_se5, qup2_se1, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, edp0_hot, prng_rosc0, tsense_pwm4, _, _, _, _, _, _),
+ [102] = PINGROUP(102, edp1_hot, prng_rosc1, tsense_pwm3, _, _, _, _, _, _),
+ [103] = PINGROUP(103, edp3_hot, prng_rosc2, tsense_pwm2, _, _, _, _, _, _),
+ [104] = PINGROUP(104, edp2_hot, prng_rosc3, tsense_pwm1, _, _, _, _, _, _),
+ [105] = PINGROUP(105, mi2s_mclk0, _, qdss_gpio, atest_usb2, _, _, _, _, _),
+ [106] = PINGROUP(106, mi2s1_sck, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [107] = PINGROUP(107, mi2s1_ws, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [108] = PINGROUP(108, mi2s1_data0, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [109] = PINGROUP(109, mi2s1_data1, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [110] = PINGROUP(110, mi2s2_sck, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [111] = PINGROUP(111, mi2s2_ws, phase_flag, _, qdss_gpio, vsense_trigger, _, _, _, _),
+ [112] = PINGROUP(112, mi2s2_data0, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [113] = PINGROUP(113, mi2s2_data1, audio_ref, phase_flag, _, qdss_gpio, _, _, _, _),
+ [114] = PINGROUP(114, hs0_mi2s, pll_bist, phase_flag, _, qdss_gpio, _, _, _, _),
+ [115] = PINGROUP(115, hs0_mi2s, _, qdss_gpio, _, _, _, _, _, _),
+ [116] = PINGROUP(116, hs0_mi2s, _, qdss_gpio, _, _, _, _, _, _),
+ [117] = PINGROUP(117, hs0_mi2s, mi2s_mclk1, _, qdss_gpio, _, _, _, _, _),
+ [118] = PINGROUP(118, hs1_mi2s, _, qdss_gpio, ddr_pxi5, _, _, _, _, _),
+ [119] = PINGROUP(119, hs1_mi2s, _, qdss_gpio, ddr_pxi5, _, _, _, _, _),
+ [120] = PINGROUP(120, hs1_mi2s, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [121] = PINGROUP(121, hs1_mi2s, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [122] = PINGROUP(122, hs2_mi2s, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [123] = PINGROUP(123, hs2_mi2s, phase_flag, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, hs2_mi2s, phase_flag, _, _, _, _, _, _, _),
+ [125] = PINGROUP(125, hs2_mi2s, phase_flag, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, _, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, _, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, _, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, _, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, _, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, _, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, _, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, _, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, _, _, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, _, _, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, _, _, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, _, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, _, _, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, _, _, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, _, _, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, _, _, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, _, _, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, dbg_out, _, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
+ [149] = UFS_RESET(ufs_reset, 0x1a2000),
+ [150] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x199000, 15, 0),
+ [151] = SDC_QDSD_PINGROUP(sdc1_clk, 0x199000, 13, 6),
+ [152] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x199000, 11, 3),
+ [153] = SDC_QDSD_PINGROUP(sdc1_data, 0x199000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data sa8775p_pinctrl = {
+ .pins = sa8775p_pins,
+ .npins = ARRAY_SIZE(sa8775p_pins),
+ .functions = sa8775p_functions,
+ .nfunctions = ARRAY_SIZE(sa8775p_functions),
+ .groups = sa8775p_groups,
+ .ngroups = ARRAY_SIZE(sa8775p_groups),
+ .ngpios = 150,
+};
+
+static int sa8775p_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sa8775p_pinctrl);
+}
+
+static const struct of_device_id sa8775p_pinctrl_of_match[] = {
+ { .compatible = "qcom,sa8775p-tlmm", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sa8775p_pinctrl_of_match);
+
+static struct platform_driver sa8775p_pinctrl_driver = {
+ .driver = {
+ .name = "sa8775p-tlmm",
+ .of_match_table = sa8775p_pinctrl_of_match,
+ },
+ .probe = sa8775p_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sa8775p_pinctrl_init(void)
+{
+ return platform_driver_register(&sa8775p_pinctrl_driver);
+}
+arch_initcall(sa8775p_pinctrl_init);
+
+static void __exit sa8775p_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sa8775p_pinctrl_driver);
+}
+module_exit(sa8775p_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI SA8775P pinctrl driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
index c3c8c34148f1..e22d03ce292e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450-lpass-lpi.c
@@ -105,7 +105,7 @@ static const struct pinctrl_pin_desc sm8450_lpi_pins[] = {
static const char * const swr_tx_clk_groups[] = { "gpio0" };
static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
static const char * const swr_rx_clk_groups[] = { "gpio3" };
-static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" };
static const char * const dmic1_clk_groups[] = { "gpio6" };
static const char * const dmic1_data_groups[] = { "gpio7" };
static const char * const dmic2_clk_groups[] = { "gpio8" };
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c
new file mode 100644
index 000000000000..c2bdd936d27f
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022-2023 Linaro Ltd.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-lpass-lpi.h"
+
+enum lpass_lpi_functions {
+ LPI_MUX_dmic1_clk,
+ LPI_MUX_dmic1_data,
+ LPI_MUX_dmic2_clk,
+ LPI_MUX_dmic2_data,
+ LPI_MUX_dmic3_clk,
+ LPI_MUX_dmic3_data,
+ LPI_MUX_dmic4_clk,
+ LPI_MUX_dmic4_data,
+ LPI_MUX_i2s0_clk,
+ LPI_MUX_i2s0_data,
+ LPI_MUX_i2s0_ws,
+ LPI_MUX_i2s1_clk,
+ LPI_MUX_i2s1_data,
+ LPI_MUX_i2s1_ws,
+ LPI_MUX_i2s2_clk,
+ LPI_MUX_i2s2_data,
+ LPI_MUX_i2s2_ws,
+ LPI_MUX_i2s3_clk,
+ LPI_MUX_i2s3_data,
+ LPI_MUX_i2s3_ws,
+ LPI_MUX_i2s4_clk,
+ LPI_MUX_i2s4_data,
+ LPI_MUX_i2s4_ws,
+ LPI_MUX_slimbus_clk,
+ LPI_MUX_slimbus_data,
+ LPI_MUX_swr_rx_clk,
+ LPI_MUX_swr_rx_data,
+ LPI_MUX_swr_tx_clk,
+ LPI_MUX_swr_tx_data,
+ LPI_MUX_wsa_swr_clk,
+ LPI_MUX_wsa_swr_data,
+ LPI_MUX_wsa2_swr_clk,
+ LPI_MUX_wsa2_swr_data,
+ LPI_MUX_ext_mclk1_a,
+ LPI_MUX_ext_mclk1_b,
+ LPI_MUX_ext_mclk1_c,
+ LPI_MUX_ext_mclk1_d,
+ LPI_MUX_ext_mclk1_e,
+ LPI_MUX_gpio,
+ LPI_MUX__,
+};
+
+static int gpio0_pins[] = { 0 };
+static int gpio1_pins[] = { 1 };
+static int gpio2_pins[] = { 2 };
+static int gpio3_pins[] = { 3 };
+static int gpio4_pins[] = { 4 };
+static int gpio5_pins[] = { 5 };
+static int gpio6_pins[] = { 6 };
+static int gpio7_pins[] = { 7 };
+static int gpio8_pins[] = { 8 };
+static int gpio9_pins[] = { 9 };
+static int gpio10_pins[] = { 10 };
+static int gpio11_pins[] = { 11 };
+static int gpio12_pins[] = { 12 };
+static int gpio13_pins[] = { 13 };
+static int gpio14_pins[] = { 14 };
+static int gpio15_pins[] = { 15 };
+static int gpio16_pins[] = { 16 };
+static int gpio17_pins[] = { 17 };
+static int gpio18_pins[] = { 18 };
+static int gpio19_pins[] = { 19 };
+static int gpio20_pins[] = { 20 };
+static int gpio21_pins[] = { 21 };
+static int gpio22_pins[] = { 22 };
+
+static const struct pinctrl_pin_desc sm8550_lpi_pins[] = {
+ PINCTRL_PIN(0, "gpio0"),
+ PINCTRL_PIN(1, "gpio1"),
+ PINCTRL_PIN(2, "gpio2"),
+ PINCTRL_PIN(3, "gpio3"),
+ PINCTRL_PIN(4, "gpio4"),
+ PINCTRL_PIN(5, "gpio5"),
+ PINCTRL_PIN(6, "gpio6"),
+ PINCTRL_PIN(7, "gpio7"),
+ PINCTRL_PIN(8, "gpio8"),
+ PINCTRL_PIN(9, "gpio9"),
+ PINCTRL_PIN(10, "gpio10"),
+ PINCTRL_PIN(11, "gpio11"),
+ PINCTRL_PIN(12, "gpio12"),
+ PINCTRL_PIN(13, "gpio13"),
+ PINCTRL_PIN(14, "gpio14"),
+ PINCTRL_PIN(15, "gpio15"),
+ PINCTRL_PIN(16, "gpio16"),
+ PINCTRL_PIN(17, "gpio17"),
+ PINCTRL_PIN(18, "gpio18"),
+ PINCTRL_PIN(19, "gpio19"),
+ PINCTRL_PIN(20, "gpio20"),
+ PINCTRL_PIN(21, "gpio21"),
+ PINCTRL_PIN(22, "gpio22"),
+};
+
+static const char * const dmic1_clk_groups[] = { "gpio6" };
+static const char * const dmic1_data_groups[] = { "gpio7" };
+static const char * const dmic2_clk_groups[] = { "gpio8" };
+static const char * const dmic2_data_groups[] = { "gpio9" };
+static const char * const dmic3_clk_groups[] = { "gpio12" };
+static const char * const dmic3_data_groups[] = { "gpio13" };
+static const char * const dmic4_clk_groups[] = { "gpio17" };
+static const char * const dmic4_data_groups[] = { "gpio18" };
+static const char * const i2s0_clk_groups[] = { "gpio0" };
+static const char * const i2s0_ws_groups[] = { "gpio1" };
+static const char * const i2s0_data_groups[] = { "gpio2", "gpio3", "gpio4", "gpio5" };
+static const char * const i2s1_clk_groups[] = { "gpio6" };
+static const char * const i2s1_ws_groups[] = { "gpio7" };
+static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" };
+static const char * const i2s2_clk_groups[] = { "gpio10" };
+static const char * const i2s2_ws_groups[] = { "gpio11" };
+static const char * const i2s2_data_groups[] = { "gpio15", "gpio16" };
+static const char * const i2s3_clk_groups[] = { "gpio12" };
+static const char * const i2s3_ws_groups[] = { "gpio13" };
+static const char * const i2s3_data_groups[] = { "gpio17", "gpio18" };
+static const char * const i2s4_clk_groups[] = { "gpio19"};
+static const char * const i2s4_ws_groups[] = { "gpio20"};
+static const char * const i2s4_data_groups[] = { "gpio21", "gpio22"};
+static const char * const slimbus_clk_groups[] = { "gpio19"};
+static const char * const slimbus_data_groups[] = { "gpio20"};
+static const char * const swr_tx_clk_groups[] = { "gpio0" };
+static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio14" };
+static const char * const swr_rx_clk_groups[] = { "gpio3" };
+static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5", "gpio15" };
+static const char * const wsa_swr_clk_groups[] = { "gpio10" };
+static const char * const wsa_swr_data_groups[] = { "gpio11" };
+static const char * const wsa2_swr_clk_groups[] = { "gpio15" };
+static const char * const wsa2_swr_data_groups[] = { "gpio16" };
+static const char * const ext_mclk1_c_groups[] = { "gpio5" };
+static const char * const ext_mclk1_b_groups[] = { "gpio9" };
+static const char * const ext_mclk1_a_groups[] = { "gpio13" };
+static const char * const ext_mclk1_d_groups[] = { "gpio14" };
+static const char * const ext_mclk1_e_groups[] = { "gpio22" };
+
+static const struct lpi_pingroup sm8550_groups[] = {
+ LPI_PINGROUP(0, 0, swr_tx_clk, i2s0_clk, _, _),
+ LPI_PINGROUP(1, 2, swr_tx_data, i2s0_ws, _, _),
+ LPI_PINGROUP(2, 4, swr_tx_data, i2s0_data, _, _),
+ LPI_PINGROUP(3, 8, swr_rx_clk, i2s0_data, _, _),
+ LPI_PINGROUP(4, 10, swr_rx_data, i2s0_data, _, _),
+ LPI_PINGROUP(5, 12, swr_rx_data, ext_mclk1_c, i2s0_data, _),
+ LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _),
+ LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _),
+ LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _),
+ LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, ext_mclk1_b, _),
+ LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _),
+ LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _),
+ LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s3_clk, _, _),
+ LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s3_ws, ext_mclk1_a, _),
+ LPI_PINGROUP(14, 6, swr_tx_data, ext_mclk1_d, _, _),
+ LPI_PINGROUP(15, 20, i2s2_data, wsa2_swr_clk, _, _),
+ LPI_PINGROUP(16, 22, i2s2_data, wsa2_swr_data, _, _),
+ LPI_PINGROUP(17, LPI_NO_SLEW, dmic4_clk, i2s3_data, _, _),
+ LPI_PINGROUP(18, LPI_NO_SLEW, dmic4_data, i2s3_data, _, _),
+ LPI_PINGROUP(19, LPI_NO_SLEW, i2s4_clk, slimbus_clk, _, _),
+ LPI_PINGROUP(20, LPI_NO_SLEW, i2s4_ws, slimbus_data, _, _),
+ LPI_PINGROUP(21, LPI_NO_SLEW, i2s4_data, _, _, _),
+ LPI_PINGROUP(22, LPI_NO_SLEW, i2s4_data, ext_mclk1_e, _, _),
+};
+
+static const struct lpi_function sm8550_functions[] = {
+ LPI_FUNCTION(dmic1_clk),
+ LPI_FUNCTION(dmic1_data),
+ LPI_FUNCTION(dmic2_clk),
+ LPI_FUNCTION(dmic2_data),
+ LPI_FUNCTION(dmic3_clk),
+ LPI_FUNCTION(dmic3_data),
+ LPI_FUNCTION(dmic4_clk),
+ LPI_FUNCTION(dmic4_data),
+ LPI_FUNCTION(i2s0_clk),
+ LPI_FUNCTION(i2s0_data),
+ LPI_FUNCTION(i2s0_ws),
+ LPI_FUNCTION(i2s1_clk),
+ LPI_FUNCTION(i2s1_data),
+ LPI_FUNCTION(i2s1_ws),
+ LPI_FUNCTION(i2s2_clk),
+ LPI_FUNCTION(i2s2_data),
+ LPI_FUNCTION(i2s2_ws),
+ LPI_FUNCTION(i2s3_clk),
+ LPI_FUNCTION(i2s3_data),
+ LPI_FUNCTION(i2s3_ws),
+ LPI_FUNCTION(i2s4_clk),
+ LPI_FUNCTION(i2s4_data),
+ LPI_FUNCTION(i2s4_ws),
+ LPI_FUNCTION(slimbus_clk),
+ LPI_FUNCTION(slimbus_data),
+ LPI_FUNCTION(swr_rx_clk),
+ LPI_FUNCTION(swr_rx_data),
+ LPI_FUNCTION(swr_tx_clk),
+ LPI_FUNCTION(swr_tx_data),
+ LPI_FUNCTION(wsa_swr_clk),
+ LPI_FUNCTION(wsa_swr_data),
+ LPI_FUNCTION(wsa2_swr_clk),
+ LPI_FUNCTION(wsa2_swr_data),
+ LPI_FUNCTION(ext_mclk1_a),
+ LPI_FUNCTION(ext_mclk1_b),
+ LPI_FUNCTION(ext_mclk1_c),
+ LPI_FUNCTION(ext_mclk1_d),
+ LPI_FUNCTION(ext_mclk1_e),
+};
+
+static const struct lpi_pinctrl_variant_data sm8550_lpi_data = {
+ .pins = sm8550_lpi_pins,
+ .npins = ARRAY_SIZE(sm8550_lpi_pins),
+ .groups = sm8550_groups,
+ .ngroups = ARRAY_SIZE(sm8550_groups),
+ .functions = sm8550_functions,
+ .nfunctions = ARRAY_SIZE(sm8550_functions),
+};
+
+static const struct of_device_id lpi_pinctrl_of_match[] = {
+ {
+ .compatible = "qcom,sm8550-lpass-lpi-pinctrl",
+ .data = &sm8550_lpi_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match);
+
+static struct platform_driver lpi_pinctrl_driver = {
+ .driver = {
+ .name = "qcom-sm8550-lpass-lpi-pinctrl",
+ .of_match_table = lpi_pinctrl_of_match,
+ },
+ .probe = lpi_pinctrl_probe,
+ .remove = lpi_pinctrl_remove,
+};
+
+module_platform_driver(lpi_pinctrl_driver);
+MODULE_DESCRIPTION("Qualcomm SM8550 LPI GPIO pin control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550.c b/drivers/pinctrl/qcom/pinctrl-sm8550.c
new file mode 100644
index 000000000000..c9d038098f2c
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8550.c
@@ -0,0 +1,1790 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .i2c_pull_bit = 13, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sm8550_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "GPIO_180"),
+ PINCTRL_PIN(181, "GPIO_181"),
+ PINCTRL_PIN(182, "GPIO_182"),
+ PINCTRL_PIN(183, "GPIO_183"),
+ PINCTRL_PIN(184, "GPIO_184"),
+ PINCTRL_PIN(185, "GPIO_185"),
+ PINCTRL_PIN(186, "GPIO_186"),
+ PINCTRL_PIN(187, "GPIO_187"),
+ PINCTRL_PIN(188, "GPIO_188"),
+ PINCTRL_PIN(189, "GPIO_189"),
+ PINCTRL_PIN(190, "GPIO_190"),
+ PINCTRL_PIN(191, "GPIO_191"),
+ PINCTRL_PIN(192, "GPIO_192"),
+ PINCTRL_PIN(193, "GPIO_193"),
+ PINCTRL_PIN(194, "GPIO_194"),
+ PINCTRL_PIN(195, "GPIO_195"),
+ PINCTRL_PIN(196, "GPIO_196"),
+ PINCTRL_PIN(197, "GPIO_197"),
+ PINCTRL_PIN(198, "GPIO_198"),
+ PINCTRL_PIN(199, "GPIO_199"),
+ PINCTRL_PIN(200, "GPIO_200"),
+ PINCTRL_PIN(201, "GPIO_201"),
+ PINCTRL_PIN(202, "GPIO_202"),
+ PINCTRL_PIN(203, "GPIO_203"),
+ PINCTRL_PIN(204, "GPIO_204"),
+ PINCTRL_PIN(205, "GPIO_205"),
+ PINCTRL_PIN(206, "GPIO_206"),
+ PINCTRL_PIN(207, "GPIO_207"),
+ PINCTRL_PIN(208, "GPIO_208"),
+ PINCTRL_PIN(209, "GPIO_209"),
+ PINCTRL_PIN(210, "UFS_RESET"),
+ PINCTRL_PIN(211, "SDC2_CLK"),
+ PINCTRL_PIN(212, "SDC2_CMD"),
+ PINCTRL_PIN(213, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+DECLARE_MSM_GPIO_PINS(180);
+DECLARE_MSM_GPIO_PINS(181);
+DECLARE_MSM_GPIO_PINS(182);
+DECLARE_MSM_GPIO_PINS(183);
+DECLARE_MSM_GPIO_PINS(184);
+DECLARE_MSM_GPIO_PINS(185);
+DECLARE_MSM_GPIO_PINS(186);
+DECLARE_MSM_GPIO_PINS(187);
+DECLARE_MSM_GPIO_PINS(188);
+DECLARE_MSM_GPIO_PINS(189);
+DECLARE_MSM_GPIO_PINS(190);
+DECLARE_MSM_GPIO_PINS(191);
+DECLARE_MSM_GPIO_PINS(192);
+DECLARE_MSM_GPIO_PINS(193);
+DECLARE_MSM_GPIO_PINS(194);
+DECLARE_MSM_GPIO_PINS(195);
+DECLARE_MSM_GPIO_PINS(196);
+DECLARE_MSM_GPIO_PINS(197);
+DECLARE_MSM_GPIO_PINS(198);
+DECLARE_MSM_GPIO_PINS(199);
+DECLARE_MSM_GPIO_PINS(200);
+DECLARE_MSM_GPIO_PINS(201);
+DECLARE_MSM_GPIO_PINS(202);
+DECLARE_MSM_GPIO_PINS(203);
+DECLARE_MSM_GPIO_PINS(204);
+DECLARE_MSM_GPIO_PINS(205);
+DECLARE_MSM_GPIO_PINS(206);
+DECLARE_MSM_GPIO_PINS(207);
+DECLARE_MSM_GPIO_PINS(208);
+DECLARE_MSM_GPIO_PINS(209);
+
+static const unsigned int ufs_reset_pins[] = { 210 };
+static const unsigned int sdc2_clk_pins[] = { 211 };
+static const unsigned int sdc2_cmd_pins[] = { 212 };
+static const unsigned int sdc2_data_pins[] = { 213 };
+
+enum sm8550_functions {
+ msm_mux_gpio,
+ msm_mux_aon_cci,
+ msm_mux_aoss_cti,
+ msm_mux_atest_char,
+ msm_mux_atest_usb,
+ msm_mux_audio_ext_mclk0,
+ msm_mux_audio_ext_mclk1,
+ msm_mux_audio_ref_clk,
+ msm_mux_cam_aon_mclk4,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async_in,
+ msm_mux_cci_i2c_scl,
+ msm_mux_cci_i2c_sda,
+ msm_mux_cci_timer,
+ msm_mux_cmu_rng,
+ msm_mux_coex_uart1_rx,
+ msm_mux_coex_uart1_tx,
+ msm_mux_coex_uart2_rx,
+ msm_mux_coex_uart2_tx,
+ msm_mux_cri_trng,
+ msm_mux_dbg_out_clk,
+ msm_mux_ddr_bist_complete,
+ msm_mux_ddr_bist_fail,
+ msm_mux_ddr_bist_start,
+ msm_mux_ddr_bist_stop,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_dp_hot,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_i2chub0_se0,
+ msm_mux_i2chub0_se1,
+ msm_mux_i2chub0_se2,
+ msm_mux_i2chub0_se3,
+ msm_mux_i2chub0_se4,
+ msm_mux_i2chub0_se5,
+ msm_mux_i2chub0_se6,
+ msm_mux_i2chub0_se7,
+ msm_mux_i2chub0_se8,
+ msm_mux_i2chub0_se9,
+ msm_mux_i2s0_data0,
+ msm_mux_i2s0_data1,
+ msm_mux_i2s0_sck,
+ msm_mux_i2s0_ws,
+ msm_mux_i2s1_data0,
+ msm_mux_i2s1_data1,
+ msm_mux_i2s1_sck,
+ msm_mux_i2s1_ws,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0_out,
+ msm_mux_mdp_vsync1_out,
+ msm_mux_mdp_vsync2_out,
+ msm_mux_mdp_vsync3_out,
+ msm_mux_mdp_vsync_e,
+ msm_mux_nav_gpio0,
+ msm_mux_nav_gpio1,
+ msm_mux_nav_gpio2,
+ msm_mux_pcie0_clk_req_n,
+ msm_mux_pcie1_clk_req_n,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist_sync,
+ msm_mux_pll_clk_aux,
+ msm_mux_prng_rosc0,
+ msm_mux_prng_rosc1,
+ msm_mux_prng_rosc2,
+ msm_mux_prng_rosc3,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_enable,
+ msm_mux_qlink1_request,
+ msm_mux_qlink1_wmss,
+ msm_mux_qlink2_enable,
+ msm_mux_qlink2_request,
+ msm_mux_qlink2_wmss,
+ msm_mux_qspi0,
+ msm_mux_qspi1,
+ msm_mux_qspi2,
+ msm_mux_qspi3,
+ msm_mux_qspi_clk,
+ msm_mux_qspi_cs,
+ msm_mux_qup1_se0,
+ msm_mux_qup1_se1,
+ msm_mux_qup1_se2,
+ msm_mux_qup1_se3,
+ msm_mux_qup1_se4,
+ msm_mux_qup1_se5,
+ msm_mux_qup1_se6,
+ msm_mux_qup1_se7,
+ msm_mux_qup2_se0,
+ msm_mux_qup2_se0_l0_mira,
+ msm_mux_qup2_se0_l0_mirb,
+ msm_mux_qup2_se0_l1_mira,
+ msm_mux_qup2_se0_l1_mirb,
+ msm_mux_qup2_se0_l2_mira,
+ msm_mux_qup2_se0_l2_mirb,
+ msm_mux_qup2_se0_l3_mira,
+ msm_mux_qup2_se0_l3_mirb,
+ msm_mux_qup2_se1,
+ msm_mux_qup2_se2,
+ msm_mux_qup2_se3,
+ msm_mux_qup2_se4,
+ msm_mux_qup2_se5,
+ msm_mux_qup2_se6,
+ msm_mux_qup2_se7,
+ msm_mux_resout_n,
+ msm_mux_sd_write_protect,
+ msm_mux_sdc40,
+ msm_mux_sdc41,
+ msm_mux_sdc42,
+ msm_mux_sdc43,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_tb_trig_sdc2,
+ msm_mux_tb_trig_sdc4,
+ msm_mux_tgu_ch0_trigout,
+ msm_mux_tgu_ch1_trigout,
+ msm_mux_tgu_ch2_trigout,
+ msm_mux_tgu_ch3_trigout,
+ msm_mux_tmess_prng0,
+ msm_mux_tmess_prng1,
+ msm_mux_tmess_prng2,
+ msm_mux_tmess_prng3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_tsense_pwm3,
+ msm_mux_uim0_clk,
+ msm_mux_uim0_data,
+ msm_mux_uim0_present,
+ msm_mux_uim0_reset,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_usb1_hs,
+ msm_mux_usb_phy,
+ msm_mux_vfr_0,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger_mirnat,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152",
+ "gpio153", "gpio154", "gpio155", "gpio156", "gpio157", "gpio158",
+ "gpio159", "gpio160", "gpio161", "gpio162", "gpio163", "gpio164",
+ "gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170",
+ "gpio171", "gpio172", "gpio173", "gpio174", "gpio175", "gpio176",
+ "gpio177", "gpio178", "gpio179", "gpio180", "gpio181", "gpio182",
+ "gpio183", "gpio184", "gpio185", "gpio186", "gpio187", "gpio188",
+ "gpio189", "gpio190", "gpio191", "gpio192", "gpio193", "gpio194",
+ "gpio195", "gpio196", "gpio197", "gpio198", "gpio199", "gpio200",
+ "gpio201", "gpio202", "gpio203", "gpio204", "gpio205", "gpio206",
+ "gpio207", "gpio208", "gpio209",
+};
+
+static const char * const aon_cci_groups[] = {
+ "gpio208", "gpio209",
+};
+
+static const char * const aoss_cti_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const char *const atest_char_groups[] = {
+ "gpio130", "gpio132", "gpio133", "gpio134", "gpio135",
+};
+
+static const char *const atest_usb_groups[] = {
+ "gpio37", "gpio39", "gpio55", "gpio149", "gpio148",
+};
+
+static const char *const audio_ext_mclk0_groups[] = {
+ "gpio125",
+};
+
+static const char *const audio_ext_mclk1_groups[] = {
+ "gpio124",
+};
+
+static const char *const audio_ref_clk_groups[] = {
+ "gpio124",
+};
+
+static const char *const cam_aon_mclk4_groups[] = {
+ "gpio104",
+};
+
+static const char *const cam_mclk_groups[] = {
+ "gpio100", "gpio101", "gpio102", "gpio103",
+ "gpio105", "gpio106", "gpio107",
+};
+
+static const char *const cci_async_in_groups[] = {
+ "gpio71", "gpio72", "gpio109",
+};
+
+static const char *const cci_i2c_scl_groups[] = {
+ "gpio111", "gpio113", "gpio115", "gpio75", "gpio1",
+};
+
+static const char *const cci_i2c_sda_groups[] = {
+ "gpio110", "gpio112", "gpio114", "gpio74", "gpio0",
+};
+
+static const char *const cci_timer_groups[] = {
+ "gpio116", "gpio117", "gpio118", "gpio119", "gpio120",
+};
+
+static const char *const cmu_rng_groups[] = {
+ "gpio129", "gpio128", "gpio127", "gpio122",
+};
+
+static const char *const coex_uart1_rx_groups[] = {
+ "gpio148",
+};
+
+static const char *const coex_uart1_tx_groups[] = {
+ "gpio149",
+};
+
+static const char *const coex_uart2_rx_groups[] = {
+ "gpio150",
+};
+
+static const char *const coex_uart2_tx_groups[] = {
+ "gpio151",
+};
+
+static const char *const cri_trng_groups[] = {
+ "gpio187",
+};
+
+static const char *const dbg_out_clk_groups[] = {
+ "gpio89",
+};
+
+static const char *const ddr_bist_complete_groups[] = {
+ "gpio40",
+};
+
+static const char *const ddr_bist_fail_groups[] = {
+ "gpio36",
+};
+
+static const char *const ddr_bist_start_groups[] = {
+ "gpio37",
+};
+
+static const char *const ddr_bist_stop_groups[] = {
+ "gpio41",
+};
+
+static const char *const ddr_pxi0_groups[] = {
+ "gpio51",
+ "gpio52",
+};
+
+static const char *const ddr_pxi1_groups[] = {
+ "gpio40",
+ "gpio41",
+};
+
+static const char *const ddr_pxi2_groups[] = {
+ "gpio45",
+ "gpio47",
+};
+
+static const char *const ddr_pxi3_groups[] = {
+ "gpio43",
+ "gpio44",
+};
+
+static const char *const dp_hot_groups[] = {
+ "gpio47",
+};
+
+static const char *const gcc_gp1_groups[] = {
+ "gpio86",
+ "gpio134",
+};
+
+static const char *const gcc_gp2_groups[] = {
+ "gpio87",
+ "gpio135",
+};
+
+static const char *const gcc_gp3_groups[] = {
+ "gpio88",
+ "gpio136",
+};
+
+static const char *const i2chub0_se0_groups[] = {
+ "gpio16",
+ "gpio17",
+};
+
+static const char *const i2chub0_se1_groups[] = {
+ "gpio18",
+ "gpio19",
+};
+
+static const char *const i2chub0_se2_groups[] = {
+ "gpio20",
+ "gpio21",
+};
+
+static const char *const i2chub0_se3_groups[] = {
+ "gpio22",
+ "gpio23",
+};
+
+static const char *const i2chub0_se4_groups[] = {
+ "gpio4",
+ "gpio5",
+};
+
+static const char *const i2chub0_se5_groups[] = {
+ "gpio6",
+ "gpio7",
+};
+
+static const char *const i2chub0_se6_groups[] = {
+ "gpio8",
+ "gpio9",
+};
+
+static const char *const i2chub0_se7_groups[] = {
+ "gpio10",
+ "gpio11",
+};
+
+static const char *const i2chub0_se8_groups[] = {
+ "gpio206",
+ "gpio207",
+};
+
+static const char *const i2chub0_se9_groups[] = {
+ "gpio84",
+ "gpio85",
+};
+
+static const char *const i2s0_data0_groups[] = {
+ "gpio127",
+};
+
+static const char *const i2s0_data1_groups[] = {
+ "gpio128",
+};
+
+static const char *const i2s0_sck_groups[] = {
+ "gpio126",
+};
+
+static const char *const i2s0_ws_groups[] = {
+ "gpio129",
+};
+
+static const char *const i2s1_data0_groups[] = {
+ "gpio122",
+};
+
+static const char *const i2s1_data1_groups[] = {
+ "gpio124",
+};
+
+static const char *const i2s1_sck_groups[] = {
+ "gpio121",
+};
+
+static const char *const i2s1_ws_groups[] = {
+ "gpio123",
+};
+
+static const char *const ibi_i3c_groups[] = {
+ "gpio0", "gpio1", "gpio28", "gpio29", "gpio32",
+ "gpio33", "gpio56", "gpio57", "gpio60", "gpio61",
+};
+
+static const char *const jitter_bist_groups[] = {
+ "gpio43",
+};
+
+static const char *const mdp_vsync_groups[] = {
+ "gpio86",
+ "gpio87",
+ "gpio133",
+ "gpio137",
+};
+
+static const char *const mdp_vsync0_out_groups[] = {
+ "gpio86",
+};
+
+static const char *const mdp_vsync1_out_groups[] = {
+ "gpio86",
+};
+
+static const char *const mdp_vsync2_out_groups[] = {
+ "gpio87",
+};
+
+static const char *const mdp_vsync3_out_groups[] = {
+ "gpio87",
+};
+
+static const char *const mdp_vsync_e_groups[] = {
+ "gpio88",
+};
+
+static const char *const nav_gpio0_groups[] = {
+ "gpio154",
+};
+
+static const char *const nav_gpio1_groups[] = {
+ "gpio155",
+};
+
+static const char *const nav_gpio2_groups[] = {
+ "gpio153",
+};
+
+static const char *const pcie0_clk_req_n_groups[] = {
+ "gpio95",
+};
+
+static const char *const pcie1_clk_req_n_groups[] = {
+ "gpio98",
+};
+
+static const char *const phase_flag_groups[] = {
+ "gpio0", "gpio2", "gpio3", "gpio10", "gpio11", "gpio12", "gpio13", "gpio59",
+ "gpio63", "gpio64", "gpio65", "gpio67", "gpio68", "gpio69", "gpio75", "gpio76",
+ "gpio77", "gpio79", "gpio80", "gpio81", "gpio92", "gpio83", "gpio94", "gpio95",
+ "gpio96", "gpio97", "gpio98", "gpio99", "gpio116", "gpio117", "gpio119", "gpio120",
+};
+
+static const char *const pll_bist_sync_groups[] = {
+ "gpio20",
+};
+
+static const char *const pll_clk_aux_groups[] = {
+ "gpio107",
+};
+
+static const char *const prng_rosc0_groups[] = {
+ "gpio186",
+};
+
+static const char *const prng_rosc1_groups[] = {
+ "gpio183",
+};
+
+static const char *const prng_rosc2_groups[] = {
+ "gpio182",
+};
+
+static const char *const prng_rosc3_groups[] = {
+ "gpio181",
+};
+
+static const char *const qdss_cti_groups[] = {
+ "gpio10", "gpio11", "gpio75", "gpio79",
+ "gpio159", "gpio160", "gpio161", "gpio162",
+};
+
+static const char *const qdss_gpio_groups[] = {
+ "gpio59", "gpio64", "gpio73", "gpio100", "gpio101", "gpio102", "gpio103",
+ "gpio104", "gpio105", "gpio110", "gpio111", "gpio112", "gpio113", "gpio114",
+ "gpio115", "gpio116", "gpio117", "gpio120", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio148", "gpio149",
+ "gpio150", "gpio151", "gpio152", "gpio153", "gpio154", "gpio155", "gpio156",
+ "gpio157",
+};
+
+static const char *const qlink0_enable_groups[] = {
+ "gpio157",
+};
+
+static const char *const qlink0_request_groups[] = {
+ "gpio156",
+};
+
+static const char *const qlink0_wmss_groups[] = {
+ "gpio158",
+};
+
+static const char *const qlink1_enable_groups[] = {
+ "gpio160",
+};
+
+static const char *const qlink1_request_groups[] = {
+ "gpio159",
+};
+
+static const char *const qlink1_wmss_groups[] = {
+ "gpio161",
+};
+
+static const char *const qlink2_enable_groups[] = {
+ "gpio163",
+};
+
+static const char *const qlink2_request_groups[] = {
+ "gpio162",
+};
+
+static const char *const qlink2_wmss_groups[] = {
+ "gpio164",
+};
+
+static const char *const qspi0_groups[] = {
+ "gpio89",
+};
+
+static const char *const qspi1_groups[] = {
+ "gpio90",
+};
+
+static const char *const qspi2_groups[] = {
+ "gpio48",
+};
+
+static const char *const qspi3_groups[] = {
+ "gpio49",
+};
+
+static const char *const qspi_clk_groups[] = {
+ "gpio50",
+};
+
+static const char *const qspi_cs_groups[] = {
+ "gpio51", "gpio91",
+};
+
+static const char *const qup1_se0_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+};
+
+static const char *const qup1_se1_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+
+static const char *const qup1_se2_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio36",
+ "gpio37", "gpio38", "gpio39",
+};
+
+static const char *const qup1_se3_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+};
+
+static const char *const qup1_se4_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const char *const qup1_se5_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char *const qup1_se6_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51",
+};
+
+static const char *const qup1_se7_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+
+static const char *const qup2_se0_groups[] = {
+ "gpio63", "gpio66", "gpio67",
+};
+
+static const char *const qup2_se0_l0_mira_groups[] = {
+ "gpio56",
+};
+
+static const char *const qup2_se0_l0_mirb_groups[] = {
+ "gpio0",
+};
+
+static const char *const qup2_se0_l1_mira_groups[] = {
+ "gpio57",
+};
+
+static const char *const qup2_se0_l1_mirb_groups[] = {
+ "gpio1",
+};
+
+static const char *const qup2_se0_l2_mira_groups[] = {
+ "gpio58",
+};
+
+static const char *const qup2_se0_l2_mirb_groups[] = {
+ "gpio109",
+};
+
+static const char *const qup2_se0_l3_mira_groups[] = {
+ "gpio59",
+};
+
+static const char *const qup2_se0_l3_mirb_groups[] = {
+ "gpio107",
+};
+
+static const char *const qup2_se1_groups[] = {
+ "gpio60", "gpio61", "gpio62", "gpio63",
+};
+
+static const char *const qup2_se2_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67",
+};
+
+static const char *const qup2_se3_groups[] = {
+ "gpio68", "gpio69", "gpio70", "gpio71",
+};
+
+static const char *const qup2_se4_groups[] = {
+ "gpio2", "gpio3", "gpio118", "gpio119",
+};
+
+static const char *const qup2_se5_groups[] = {
+ "gpio80", "gpio81", "gpio82", "gpio83",
+};
+
+static const char *const qup2_se6_groups[] = {
+ "gpio76", "gpio77", "gpio78", "gpio79",
+};
+
+static const char *const qup2_se7_groups[] = {
+ "gpio72", "gpio106", "gpio74", "gpio75",
+};
+
+static const char * const resout_n_groups[] = {
+ "gpio92",
+};
+
+static const char *const sd_write_protect_groups[] = {
+ "gpio93",
+};
+
+static const char *const sdc40_groups[] = {
+ "gpio89",
+};
+
+static const char *const sdc41_groups[] = {
+ "gpio90",
+};
+
+static const char *const sdc42_groups[] = {
+ "gpio48",
+};
+
+static const char *const sdc43_groups[] = {
+ "gpio49",
+};
+
+static const char *const sdc4_clk_groups[] = {
+ "gpio50",
+};
+
+static const char *const sdc4_cmd_groups[] = {
+ "gpio51",
+};
+
+static const char * const tb_trig_sdc2_groups[] = {
+ "gpio64",
+};
+
+static const char * const tb_trig_sdc4_groups[] = {
+ "gpio91",
+};
+
+static const char * const tgu_ch0_trigout_groups[] = {
+ "gpio64",
+};
+
+static const char * const tgu_ch1_trigout_groups[] = {
+ "gpio65",
+};
+
+static const char * const tgu_ch2_trigout_groups[] = {
+ "gpio66",
+};
+
+static const char * const tgu_ch3_trigout_groups[] = {
+ "gpio67",
+};
+
+static const char *const tmess_prng0_groups[] = {
+ "gpio92",
+};
+
+static const char *const tmess_prng1_groups[] = {
+ "gpio94",
+};
+
+static const char *const tmess_prng2_groups[] = {
+ "gpio95",
+};
+
+static const char *const tmess_prng3_groups[] = {
+ "gpio96",
+};
+
+static const char *const tsense_pwm1_groups[] = {
+ "gpio50",
+};
+
+static const char *const tsense_pwm2_groups[] = {
+ "gpio50",
+};
+
+static const char *const tsense_pwm3_groups[] = {
+ "gpio50",
+};
+
+static const char *const uim0_clk_groups[] = {
+ "gpio131",
+};
+
+static const char *const uim0_data_groups[] = {
+ "gpio130",
+};
+
+static const char *const uim0_present_groups[] = {
+ "gpio27",
+};
+
+static const char *const uim0_reset_groups[] = {
+ "gpio132",
+};
+
+static const char *const uim1_clk_groups[] = {
+ "gpio135",
+};
+
+static const char *const uim1_data_groups[] = {
+ "gpio134",
+};
+
+static const char *const uim1_present_groups[] = {
+ "gpio26",
+};
+
+static const char *const uim1_reset_groups[] = {
+ "gpio136",
+};
+
+static const char *const usb1_hs_groups[] = {
+ "gpio90",
+};
+
+static const char *const usb_phy_groups[] = {
+ "gpio11",
+ "gpio48",
+};
+
+static const char *const vfr_0_groups[] = {
+ "gpio150",
+};
+
+static const char *const vfr_1_groups[] = {
+ "gpio155",
+};
+
+static const char *const vsense_trigger_mirnat_groups[] = {
+ "gpio24",
+};
+
+static const struct msm_function sm8550_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(aon_cci),
+ FUNCTION(aoss_cti),
+ FUNCTION(atest_char),
+ FUNCTION(atest_usb),
+ FUNCTION(audio_ext_mclk0),
+ FUNCTION(audio_ext_mclk1),
+ FUNCTION(audio_ref_clk),
+ FUNCTION(cam_aon_mclk4),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async_in),
+ FUNCTION(cci_i2c_scl),
+ FUNCTION(cci_i2c_sda),
+ FUNCTION(cci_timer),
+ FUNCTION(cmu_rng),
+ FUNCTION(coex_uart1_rx),
+ FUNCTION(coex_uart1_tx),
+ FUNCTION(coex_uart2_rx),
+ FUNCTION(coex_uart2_tx),
+ FUNCTION(cri_trng),
+ FUNCTION(dbg_out_clk),
+ FUNCTION(ddr_bist_complete),
+ FUNCTION(ddr_bist_fail),
+ FUNCTION(ddr_bist_start),
+ FUNCTION(ddr_bist_stop),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(dp_hot),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(i2chub0_se0),
+ FUNCTION(i2chub0_se1),
+ FUNCTION(i2chub0_se2),
+ FUNCTION(i2chub0_se3),
+ FUNCTION(i2chub0_se4),
+ FUNCTION(i2chub0_se5),
+ FUNCTION(i2chub0_se6),
+ FUNCTION(i2chub0_se7),
+ FUNCTION(i2chub0_se8),
+ FUNCTION(i2chub0_se9),
+ FUNCTION(i2s0_data0),
+ FUNCTION(i2s0_data1),
+ FUNCTION(i2s0_sck),
+ FUNCTION(i2s0_ws),
+ FUNCTION(i2s1_data0),
+ FUNCTION(i2s1_data1),
+ FUNCTION(i2s1_sck),
+ FUNCTION(i2s1_ws),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0_out),
+ FUNCTION(mdp_vsync1_out),
+ FUNCTION(mdp_vsync2_out),
+ FUNCTION(mdp_vsync3_out),
+ FUNCTION(mdp_vsync_e),
+ FUNCTION(nav_gpio0),
+ FUNCTION(nav_gpio1),
+ FUNCTION(nav_gpio2),
+ FUNCTION(pcie0_clk_req_n),
+ FUNCTION(pcie1_clk_req_n),
+ FUNCTION(phase_flag),
+ FUNCTION(pll_bist_sync),
+ FUNCTION(pll_clk_aux),
+ FUNCTION(prng_rosc0),
+ FUNCTION(prng_rosc1),
+ FUNCTION(prng_rosc2),
+ FUNCTION(prng_rosc3),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qlink0_enable),
+ FUNCTION(qlink0_request),
+ FUNCTION(qlink0_wmss),
+ FUNCTION(qlink1_enable),
+ FUNCTION(qlink1_request),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qlink2_enable),
+ FUNCTION(qlink2_request),
+ FUNCTION(qlink2_wmss),
+ FUNCTION(qspi0),
+ FUNCTION(qspi1),
+ FUNCTION(qspi2),
+ FUNCTION(qspi3),
+ FUNCTION(qspi_clk),
+ FUNCTION(qspi_cs),
+ FUNCTION(qup1_se0),
+ FUNCTION(qup1_se1),
+ FUNCTION(qup1_se2),
+ FUNCTION(qup1_se3),
+ FUNCTION(qup1_se4),
+ FUNCTION(qup1_se5),
+ FUNCTION(qup1_se6),
+ FUNCTION(qup1_se7),
+ FUNCTION(qup2_se0),
+ FUNCTION(qup2_se0_l0_mira),
+ FUNCTION(qup2_se0_l0_mirb),
+ FUNCTION(qup2_se0_l1_mira),
+ FUNCTION(qup2_se0_l1_mirb),
+ FUNCTION(qup2_se0_l2_mira),
+ FUNCTION(qup2_se0_l2_mirb),
+ FUNCTION(qup2_se0_l3_mira),
+ FUNCTION(qup2_se0_l3_mirb),
+ FUNCTION(qup2_se1),
+ FUNCTION(qup2_se2),
+ FUNCTION(qup2_se3),
+ FUNCTION(qup2_se4),
+ FUNCTION(qup2_se5),
+ FUNCTION(qup2_se6),
+ FUNCTION(qup2_se7),
+ FUNCTION(resout_n),
+ FUNCTION(sd_write_protect),
+ FUNCTION(sdc40),
+ FUNCTION(sdc41),
+ FUNCTION(sdc42),
+ FUNCTION(sdc43),
+ FUNCTION(sdc4_clk),
+ FUNCTION(sdc4_cmd),
+ FUNCTION(tb_trig_sdc2),
+ FUNCTION(tb_trig_sdc4),
+ FUNCTION(tgu_ch0_trigout),
+ FUNCTION(tgu_ch1_trigout),
+ FUNCTION(tgu_ch2_trigout),
+ FUNCTION(tgu_ch3_trigout),
+ FUNCTION(tmess_prng0),
+ FUNCTION(tmess_prng1),
+ FUNCTION(tmess_prng2),
+ FUNCTION(tmess_prng3),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(tsense_pwm3),
+ FUNCTION(uim0_clk),
+ FUNCTION(uim0_data),
+ FUNCTION(uim0_present),
+ FUNCTION(uim0_reset),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(usb1_hs),
+ FUNCTION(usb_phy),
+ FUNCTION(vfr_0),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger_mirnat),
+};
+
+/*
+ * Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm8550_groups[] = {
+ [0] = PINGROUP(0, cci_i2c_sda, qup2_se0_l0_mirb, ibi_i3c, phase_flag, _, _, _, _, _),
+ [1] = PINGROUP(1, cci_i2c_scl, qup2_se0_l1_mirb, ibi_i3c, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup2_se4, phase_flag, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, qup2_se4, phase_flag, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, i2chub0_se4, _, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, i2chub0_se4, _, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, i2chub0_se5, _, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, i2chub0_se5, _, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, i2chub0_se6, _, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, i2chub0_se6, _, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, i2chub0_se7, qdss_cti, phase_flag, _, _, _, _, _, _),
+ [11] = PINGROUP(11, i2chub0_se7, usb_phy, qdss_cti, phase_flag, _, _, _, _, _),
+ [12] = PINGROUP(12, phase_flag, _, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, phase_flag, _, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, _, _, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, _, _, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, i2chub0_se0, _, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, i2chub0_se0, _, _, _, _, _, _, _, _),
+ [18] = PINGROUP(18, i2chub0_se1, _, _, _, _, _, _, _, _),
+ [19] = PINGROUP(19, i2chub0_se1, _, _, _, _, _, _, _, _),
+ [20] = PINGROUP(20, i2chub0_se2, pll_bist_sync, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, i2chub0_se2, _, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, i2chub0_se3, _, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, i2chub0_se3, _, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, qup1_se7, vsense_trigger_mirnat, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup1_se7, _, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup1_se7, uim1_present, _, _, _, _, _, _, _),
+ [27] = PINGROUP(27, qup1_se7, uim0_present, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, qup1_se0, ibi_i3c, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, qup1_se0, ibi_i3c, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, qup1_se0, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, qup1_se0, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, qup1_se1, ibi_i3c, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, qup1_se1, ibi_i3c, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, qup1_se1, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qup1_se1, _, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, qup1_se2, ddr_bist_fail, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, qup1_se2, ddr_bist_start, _, atest_usb, _, _, _, _, _),
+ [38] = PINGROUP(38, qup1_se2, _, _, _, _, _, _, _, _),
+ [39] = PINGROUP(39, qup1_se2, _, atest_usb, _, _, _, _, _, _),
+ [40] = PINGROUP(40, qup1_se3, qup1_se2, ddr_bist_complete, _, ddr_pxi1, _, _, _, _),
+ [41] = PINGROUP(41, qup1_se3, qup1_se2, ddr_bist_stop, _, ddr_pxi1, _, _, _, _),
+ [42] = PINGROUP(42, qup1_se3, qup1_se2, _, _, _, _, _, _, _),
+ [43] = PINGROUP(43, qup1_se3, jitter_bist, ddr_pxi3, _, _, _, _, _, _),
+ [44] = PINGROUP(44, qup1_se4, aoss_cti, ddr_pxi3, _, _, _, _, _, _),
+ [45] = PINGROUP(45, qup1_se4, aoss_cti, ddr_pxi2, _, _, _, _, _, _),
+ [46] = PINGROUP(46, qup1_se4, aoss_cti, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, qup1_se4, aoss_cti, dp_hot, ddr_pxi2, _, _, _, _, _),
+ [48] = PINGROUP(48, usb_phy, qup1_se6, qspi2, sdc42, _, _, _, _, _),
+ [49] = PINGROUP(49, qup1_se6, qspi3, sdc43, _, _, _, _, _, _),
+ [50] = PINGROUP(50, qup1_se6, qspi_clk, sdc4_clk, tsense_pwm1, tsense_pwm2, tsense_pwm3, _, _, _),
+ [51] = PINGROUP(51, qup1_se6, qspi_cs, sdc4_cmd, ddr_pxi0, _, _, _, _, _),
+ [52] = PINGROUP(52, _, qup1_se5, ddr_pxi0, _, _, _, _, _, _),
+ [53] = PINGROUP(53, _, qup1_se5, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, _, qup1_se5, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, qup1_se5, atest_usb, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup2_se0_l0_mira, ibi_i3c, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, qup2_se0_l1_mira, ibi_i3c, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, qup2_se0_l2_mira, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, qup2_se0_l3_mira, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [60] = PINGROUP(60, qup2_se1, ibi_i3c, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, qup2_se1, ibi_i3c, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, qup2_se1, _, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, qup2_se1, qup2_se0, phase_flag, _, _, _, _, _, _),
+ [64] = PINGROUP(64, qup2_se2, tb_trig_sdc2, phase_flag, tgu_ch0_trigout, _, qdss_gpio, _, _, _),
+ [65] = PINGROUP(65, qup2_se2, phase_flag, tgu_ch1_trigout, _, _, _, _, _, _),
+ [66] = PINGROUP(66, qup2_se2, qup2_se0, tgu_ch2_trigout, _, _, _, _, _, _),
+ [67] = PINGROUP(67, qup2_se2, qup2_se0, phase_flag, tgu_ch3_trigout, _, _, _, _, _),
+ [68] = PINGROUP(68, qup2_se3, phase_flag, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, qup2_se3, phase_flag, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, qup2_se3, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, cci_async_in, qup2_se3, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, cci_async_in, qup2_se7, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, qdss_gpio, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, cci_i2c_sda, qup2_se7, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, cci_i2c_scl, qup2_se7, qdss_cti, phase_flag, _, _, _, _, _),
+ [76] = PINGROUP(76, qup2_se6, phase_flag, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, qup2_se6, phase_flag, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, qup2_se6, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, qup2_se6, qdss_cti, phase_flag, _, _, _, _, _, _),
+ [80] = PINGROUP(80, qup2_se5, phase_flag, _, _, _, _, _, _, _),
+ [81] = PINGROUP(81, qup2_se5, phase_flag, _, _, _, _, _, _, _),
+ [82] = PINGROUP(82, qup2_se5, _, _, _, _, _, _, _, _),
+ [83] = PINGROUP(83, qup2_se5, phase_flag, _, _, _, _, _, _, _),
+ [84] = PINGROUP(84, i2chub0_se9, _, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, i2chub0_se9, _, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, mdp_vsync, mdp_vsync0_out, mdp_vsync1_out, gcc_gp1, _, _, _, _, _),
+ [87] = PINGROUP(87, mdp_vsync, mdp_vsync2_out, mdp_vsync3_out, gcc_gp2, _, _, _, _, _),
+ [88] = PINGROUP(88, mdp_vsync_e, gcc_gp3, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, qspi0, sdc40, dbg_out_clk, _, _, _, _, _, _),
+ [90] = PINGROUP(90, usb1_hs, qspi1, sdc41, _, _, _, _, _, _),
+ [91] = PINGROUP(91, qspi_cs, tb_trig_sdc4, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, resout_n, phase_flag, tmess_prng0, _, _, _, _, _, _),
+ [93] = PINGROUP(93, sd_write_protect, _, _, _, _, _, _, _, _),
+ [94] = PINGROUP(94, phase_flag, tmess_prng1, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, pcie0_clk_req_n, phase_flag, tmess_prng2, _, _, _, _, _, _),
+ [96] = PINGROUP(96, phase_flag, tmess_prng3, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, phase_flag, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, pcie1_clk_req_n, phase_flag, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, phase_flag, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, cam_aon_mclk4, qdss_gpio, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, cam_mclk, qup2_se7, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, cam_mclk, qup2_se0_l3_mirb, pll_clk_aux, _, _, _, _, _, _),
+ [108] = PINGROUP(108, _, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, cci_async_in, qup2_se0_l2_mirb, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, cci_i2c_sda, qdss_gpio, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, cci_i2c_scl, qdss_gpio, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, cci_i2c_sda, qdss_gpio, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, cci_i2c_scl, qdss_gpio, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, cci_i2c_sda, qdss_gpio, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, cci_i2c_scl, qdss_gpio, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, cci_timer, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [117] = PINGROUP(117, cci_timer, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [118] = PINGROUP(118, qup2_se4, cci_timer, _, _, _, _, _, _, _),
+ [119] = PINGROUP(119, qup2_se4, cci_timer, phase_flag, _, _, _, _, _, _),
+ [120] = PINGROUP(120, cci_timer, phase_flag, _, qdss_gpio, _, _, _, _, _),
+ [121] = PINGROUP(121, i2s1_sck, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, i2s1_data0, cmu_rng, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, i2s1_ws, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, i2s1_data1, audio_ext_mclk1, audio_ref_clk, _, _, _, _, _, _),
+ [125] = PINGROUP(125, audio_ext_mclk0, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, i2s0_sck, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, i2s0_data0, cmu_rng, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, i2s0_data1, cmu_rng, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, i2s0_ws, cmu_rng, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, uim0_data, atest_char, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, uim0_clk, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, uim0_reset, atest_char, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, mdp_vsync, atest_char, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, uim1_data, gcc_gp1, atest_char, _, _, _, _, _, _),
+ [135] = PINGROUP(135, uim1_clk, gcc_gp2, atest_char, _, _, _, _, _, _),
+ [136] = PINGROUP(136, uim1_reset, gcc_gp3, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, mdp_vsync, _, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, _, _, qdss_gpio, _, _, _, _, _, _),
+ [139] = PINGROUP(139, _, _, qdss_gpio, _, _, _, _, _, _),
+ [140] = PINGROUP(140, _, _, qdss_gpio, _, _, _, _, _, _),
+ [141] = PINGROUP(141, _, _, qdss_gpio, _, _, _, _, _, _),
+ [142] = PINGROUP(142, _, _, qdss_gpio, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, _, qdss_gpio, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, _, qdss_gpio, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, _, qdss_gpio, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, coex_uart1_rx, qdss_gpio, atest_usb, _, _, _, _, _, _),
+ [149] = PINGROUP(149, coex_uart1_tx, qdss_gpio, atest_usb, _, _, _, _, _, _),
+ [150] = PINGROUP(150, coex_uart2_rx, _, vfr_0, qdss_gpio, _, _, _, _, _),
+ [151] = PINGROUP(151, coex_uart2_tx, _, qdss_gpio, _, _, _, _, _, _),
+ [152] = PINGROUP(152, _, qdss_gpio, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, _, nav_gpio2, qdss_gpio, _, _, _, _, _, _),
+ [154] = PINGROUP(154, nav_gpio0, qdss_gpio, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, nav_gpio1, vfr_1, qdss_gpio, _, _, _, _, _, _),
+ [156] = PINGROUP(156, qlink0_request, qdss_gpio, _, _, _, _, _, _, _),
+ [157] = PINGROUP(157, qlink0_enable, qdss_gpio, _, _, _, _, _, _, _),
+ [158] = PINGROUP(158, qlink0_wmss, _, _, _, _, _, _, _, _),
+ [159] = PINGROUP(159, qlink1_request, qdss_cti, _, _, _, _, _, _, _),
+ [160] = PINGROUP(160, qlink1_enable, qdss_cti, _, _, _, _, _, _, _),
+ [161] = PINGROUP(161, qlink1_wmss, qdss_cti, _, _, _, _, _, _, _),
+ [162] = PINGROUP(162, qlink2_request, qdss_cti, _, _, _, _, _, _, _),
+ [163] = PINGROUP(163, qlink2_enable, _, _, _, _, _, _, _, _),
+ [164] = PINGROUP(164, qlink2_wmss, _, _, _, _, _, _, _, _),
+ [165] = PINGROUP(165, _, _, _, _, _, _, _, _, _),
+ [166] = PINGROUP(166, _, _, _, _, _, _, _, _, _),
+ [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _),
+ [168] = PINGROUP(168, _, _, _, _, _, _, _, _, _),
+ [169] = PINGROUP(169, _, _, _, _, _, _, _, _, _),
+ [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _),
+ [171] = PINGROUP(171, _, _, _, _, _, _, _, _, _),
+ [172] = PINGROUP(172, _, _, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, _, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, _, _, _, _, _, _, _, _, _),
+ [175] = PINGROUP(175, _, _, _, _, _, _, _, _, _),
+ [176] = PINGROUP(176, _, _, _, _, _, _, _, _, _),
+ [177] = PINGROUP(177, _, _, _, _, _, _, _, _, _),
+ [178] = PINGROUP(178, _, _, _, _, _, _, _, _, _),
+ [179] = PINGROUP(179, _, _, _, _, _, _, _, _, _),
+ [180] = PINGROUP(180, _, _, _, _, _, _, _, _, _),
+ [181] = PINGROUP(181, prng_rosc3, _, _, _, _, _, _, _, _),
+ [182] = PINGROUP(182, prng_rosc2, _, _, _, _, _, _, _, _),
+ [183] = PINGROUP(183, prng_rosc1, _, _, _, _, _, _, _, _),
+ [184] = PINGROUP(184, _, _, _, _, _, _, _, _, _),
+ [185] = PINGROUP(185, _, _, _, _, _, _, _, _, _),
+ [186] = PINGROUP(186, prng_rosc0, _, _, _, _, _, _, _, _),
+ [187] = PINGROUP(187, cri_trng, _, _, _, _, _, _, _, _),
+ [188] = PINGROUP(188, _, _, _, _, _, _, _, _, _),
+ [189] = PINGROUP(189, _, _, _, _, _, _, _, _, _),
+ [190] = PINGROUP(190, _, _, _, _, _, _, _, _, _),
+ [191] = PINGROUP(191, _, _, _, _, _, _, _, _, _),
+ [192] = PINGROUP(192, _, _, _, _, _, _, _, _, _),
+ [193] = PINGROUP(193, _, _, _, _, _, _, _, _, _),
+ [194] = PINGROUP(194, _, _, _, _, _, _, _, _, _),
+ [195] = PINGROUP(195, _, _, _, _, _, _, _, _, _),
+ [196] = PINGROUP(196, _, _, _, _, _, _, _, _, _),
+ [197] = PINGROUP(197, _, _, _, _, _, _, _, _, _),
+ [198] = PINGROUP(198, _, _, _, _, _, _, _, _, _),
+ [199] = PINGROUP(199, _, _, _, _, _, _, _, _, _),
+ [200] = PINGROUP(200, _, _, _, _, _, _, _, _, _),
+ [201] = PINGROUP(201, _, _, _, _, _, _, _, _, _),
+ [202] = PINGROUP(202, _, _, _, _, _, _, _, _, _),
+ [203] = PINGROUP(203, _, _, _, _, _, _, _, _, _),
+ [204] = PINGROUP(204, _, _, _, _, _, _, _, _, _),
+ [205] = PINGROUP(205, _, _, _, _, _, _, _, _, _),
+ [206] = PINGROUP(206, i2chub0_se8, _, _, _, _, _, _, _, _),
+ [207] = PINGROUP(207, i2chub0_se8, _, _, _, _, _, _, _, _),
+ [208] = PINGROUP(208, aon_cci, _, _, _, _, _, _, _, _),
+ [209] = PINGROUP(209, aon_cci, _, _, _, _, _, _, _, _),
+ [210] = UFS_RESET(ufs_reset, 0xde000),
+ [211] = SDC_QDSD_PINGROUP(sdc2_clk, 0xd6000, 14, 6),
+ [212] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xd6000, 11, 3),
+ [213] = SDC_QDSD_PINGROUP(sdc2_data, 0xd6000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sm8550_pdc_map[] = {
+ { 0, 118 }, { 2, 90 }, { 3, 101 }, { 8, 60 }, { 9, 67 },
+ { 11, 103 }, { 14, 136 }, { 15, 78 }, { 16, 138 }, { 17, 80 },
+ { 18, 71 }, { 19, 59 }, { 25, 57 }, { 26, 74 }, { 27, 76 },
+ { 28, 62 }, { 31, 88 }, { 32, 63 }, { 35, 124 }, { 39, 92 },
+ { 40, 77 }, { 41, 83 }, { 43, 86 }, { 44, 75 }, { 45, 93 },
+ { 46, 96 }, { 47, 64 }, { 48, 110 }, { 51, 89 }, { 55, 95 },
+ { 56, 68 }, { 59, 87 }, { 60, 65 }, { 62, 100 }, { 63, 81 },
+ { 67, 79 }, { 71, 102 }, { 73, 82 }, { 75, 72 }, { 79, 140 },
+ { 82, 105 }, { 83, 104 }, { 84, 126 }, { 85, 142 }, { 86, 106 },
+ { 87, 107 }, { 88, 61 }, { 89, 111 }, { 95, 108 }, { 96, 109 },
+ { 98, 97 }, { 99, 58 }, { 107, 139 }, { 119, 94 }, { 120, 135 },
+ { 133, 52 }, { 137, 84 }, { 148, 66 }, { 150, 73 }, { 153, 70 },
+ { 154, 53 }, { 155, 69 }, { 156, 54 }, { 159, 55 }, { 162, 56 },
+ { 166, 116 }, { 169, 119 }, { 171, 120 }, { 172, 85 }, { 174, 98 },
+ { 176, 112 }, { 177, 51 }, { 181, 114 }, { 182, 115 }, { 185, 117 },
+ { 187, 91 }, { 188, 123 }, { 190, 127 }, { 191, 113 }, { 192, 128 },
+ { 193, 129 }, { 196, 133 }, { 197, 134 }, { 198, 50 }, { 199, 99 },
+ { 200, 49 }, { 201, 48 }, { 203, 125 }, { 205, 141 }, { 206, 137 },
+ { 207, 47 }, { 208, 121 }, { 209, 122 },
+};
+
+static const struct msm_pinctrl_soc_data sm8550_tlmm = {
+ .pins = sm8550_pins,
+ .npins = ARRAY_SIZE(sm8550_pins),
+ .functions = sm8550_functions,
+ .nfunctions = ARRAY_SIZE(sm8550_functions),
+ .groups = sm8550_groups,
+ .ngroups = ARRAY_SIZE(sm8550_groups),
+ .ngpios = 211,
+ .wakeirq_map = sm8550_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sm8550_pdc_map),
+ .egpio_func = 9,
+};
+
+static int sm8550_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm8550_tlmm);
+}
+
+static const struct of_device_id sm8550_tlmm_of_match[] = {
+ { .compatible = "qcom,sm8550-tlmm", },
+ {},
+};
+
+static struct platform_driver sm8550_tlmm_driver = {
+ .driver = {
+ .name = "sm8550-tlmm",
+ .of_match_table = sm8550_tlmm_of_match,
+ },
+ .probe = sm8550_tlmm_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sm8550_tlmm_init(void)
+{
+ return platform_driver_register(&sm8550_tlmm_driver);
+}
+arch_initcall(sm8550_tlmm_init);
+
+static void __exit sm8550_tlmm_exit(void)
+{
+ platform_driver_unregister(&sm8550_tlmm_driver);
+}
+module_exit(sm8550_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM8550 TLMM driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(of, sm8550_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 89695b5a2ce7..ea3485344f06 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1146,7 +1146,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
gpio_irq_chip_set_chip(girq, &spmi_gpio_irq_chip);
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
- girq->fwnode = of_node_to_fwnode(state->dev->of_node);
+ girq->fwnode = dev_fwnode(state->dev);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
girq->populate_parent_alloc_arg = pmic_gpio_populate_parent_fwspec;
@@ -1221,6 +1221,10 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm8350b-gpio", .data = (void *) 8 },
{ .compatible = "qcom,pm8350c-gpio", .data = (void *) 9 },
{ .compatible = "qcom,pm8450-gpio", .data = (void *) 4 },
+ { .compatible = "qcom,pm8550-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm8550b-gpio", .data = (void *) 12 },
+ { .compatible = "qcom,pm8550ve-gpio", .data = (void *) 8 },
+ { .compatible = "qcom,pm8550vs-gpio", .data = (void *) 6 },
{ .compatible = "qcom,pm8916-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8941-gpio", .data = (void *) 36 },
/* pm8950 has 8 GPIOs with holes on 3 */
@@ -1232,11 +1236,13 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pmi8994-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pmi8998-gpio", .data = (void *) 14 },
{ .compatible = "qcom,pmk8350-gpio", .data = (void *) 4 },
+ { .compatible = "qcom,pmk8550-gpio", .data = (void *) 6 },
{ .compatible = "qcom,pmm8155au-gpio", .data = (void *) 10 },
/* pmp8074 has 12 GPIOs with holes on 1 and 12 */
{ .compatible = "qcom,pmp8074-gpio", .data = (void *) 12 },
{ .compatible = "qcom,pmr735a-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pmr735b-gpio", .data = (void *) 4 },
+ { .compatible = "qcom,pmr735d-gpio", .data = (void *) 2 },
/* pms405 has 12 GPIOs with holes on 1, 9, and 10 */
{ .compatible = "qcom,pms405-gpio", .data = (void *) 12 },
/* pmx55 has 11 GPIOs with holes on 3, 7, 10, 11 */
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 063177b79927..644fb4a0e72a 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -927,7 +927,7 @@ static int pmic_mpp_probe(struct platform_device *pdev)
girq->chip = &state->irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
- girq->fwnode = of_node_to_fwnode(state->dev->of_node);
+ girq->fwnode = dev_fwnode(state->dev);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pmic_mpp_child_to_parent_hwirq;
girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index 99314925bb13..e973001e5c88 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -791,7 +791,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
girq->chip = &pm8xxx_irq_chip;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
- girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
+ girq->fwnode = dev_fwnode(pctrl->dev);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_twocell;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index a46650db678a..86f66cb8bf30 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -881,7 +881,7 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
girq->chip = &pctrl->irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
- girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
+ girq->fwnode = dev_fwnode(pctrl->dev);
girq->parent_domain = parent_domain;
if (of_device_is_compatible(pdev->dev.of_node, "qcom,pm8821-mpp"))
girq->child_to_parent_hwirq = pm8821_mpp_child_to_parent_hwirq;
diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c
index 22ff16eff02f..4e8d26bb3430 100644
--- a/drivers/pinctrl/ralink/pinctrl-mt7620.c
+++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c
@@ -54,20 +54,20 @@
#define MT7620_GPIO_MODE_EPHY 15
#define MT7620_GPIO_MODE_PA 20
-static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
-static struct ralink_pmx_func mdio_func[] = {
+static struct ralink_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) };
+static struct ralink_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) };
+static struct ralink_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) };
+static struct ralink_pmx_func mdio_grp[] = {
FUNC("mdio", MT7620_GPIO_MODE_MDIO, 22, 2),
FUNC("refclk", MT7620_GPIO_MODE_MDIO_REFCLK, 22, 2),
};
-static struct ralink_pmx_func rgmii1_func[] = { FUNC("rgmii1", 0, 24, 12) };
-static struct ralink_pmx_func refclk_func[] = { FUNC("spi refclk", 0, 37, 3) };
-static struct ralink_pmx_func ephy_func[] = { FUNC("ephy", 0, 40, 5) };
-static struct ralink_pmx_func rgmii2_func[] = { FUNC("rgmii2", 0, 60, 12) };
-static struct ralink_pmx_func wled_func[] = { FUNC("wled", 0, 72, 1) };
-static struct ralink_pmx_func pa_func[] = { FUNC("pa", 0, 18, 4) };
-static struct ralink_pmx_func uartf_func[] = {
+static struct ralink_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) };
+static struct ralink_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) };
+static struct ralink_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) };
+static struct ralink_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) };
+static struct ralink_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) };
+static struct ralink_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) };
+static struct ralink_pmx_func uartf_grp[] = {
FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8),
FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8),
FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8),
@@ -76,202 +76,202 @@ static struct ralink_pmx_func uartf_func[] = {
FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4),
FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4),
};
-static struct ralink_pmx_func wdt_func[] = {
+static struct ralink_pmx_func wdt_grp[] = {
FUNC("wdt rst", 0, 17, 1),
FUNC("wdt refclk", 0, 17, 1),
};
-static struct ralink_pmx_func pcie_rst_func[] = {
+static struct ralink_pmx_func pcie_rst_grp[] = {
FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1),
FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1)
};
-static struct ralink_pmx_func nd_sd_func[] = {
+static struct ralink_pmx_func nd_sd_grp[] = {
FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13)
};
static struct ralink_pmx_group mt7620a_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, MT7620_GPIO_MODE_I2C),
- GRP("uartf", uartf_func, MT7620_GPIO_MODE_UART0_MASK,
+ GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C),
+ GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK,
MT7620_GPIO_MODE_UART0_SHIFT),
- GRP("spi", spi_func, 1, MT7620_GPIO_MODE_SPI),
- GRP("uartlite", uartlite_func, 1, MT7620_GPIO_MODE_UART1),
- GRP_G("wdt", wdt_func, MT7620_GPIO_MODE_WDT_MASK,
+ GRP("spi", spi_grp, 1, MT7620_GPIO_MODE_SPI),
+ GRP("uartlite", uartlite_grp, 1, MT7620_GPIO_MODE_UART1),
+ GRP_G("wdt", wdt_grp, MT7620_GPIO_MODE_WDT_MASK,
MT7620_GPIO_MODE_WDT_GPIO, MT7620_GPIO_MODE_WDT_SHIFT),
- GRP_G("mdio", mdio_func, MT7620_GPIO_MODE_MDIO_MASK,
+ GRP_G("mdio", mdio_grp, MT7620_GPIO_MODE_MDIO_MASK,
MT7620_GPIO_MODE_MDIO_GPIO, MT7620_GPIO_MODE_MDIO_SHIFT),
- GRP("rgmii1", rgmii1_func, 1, MT7620_GPIO_MODE_RGMII1),
- GRP("spi refclk", refclk_func, 1, MT7620_GPIO_MODE_SPI_REF_CLK),
- GRP_G("pcie", pcie_rst_func, MT7620_GPIO_MODE_PCIE_MASK,
+ GRP("rgmii1", rgmii1_grp, 1, MT7620_GPIO_MODE_RGMII1),
+ GRP("spi refclk", refclk_grp, 1, MT7620_GPIO_MODE_SPI_REF_CLK),
+ GRP_G("pcie", pcie_rst_grp, MT7620_GPIO_MODE_PCIE_MASK,
MT7620_GPIO_MODE_PCIE_GPIO, MT7620_GPIO_MODE_PCIE_SHIFT),
- GRP_G("nd_sd", nd_sd_func, MT7620_GPIO_MODE_ND_SD_MASK,
+ GRP_G("nd_sd", nd_sd_grp, MT7620_GPIO_MODE_ND_SD_MASK,
MT7620_GPIO_MODE_ND_SD_GPIO, MT7620_GPIO_MODE_ND_SD_SHIFT),
- GRP("rgmii2", rgmii2_func, 1, MT7620_GPIO_MODE_RGMII2),
- GRP("wled", wled_func, 1, MT7620_GPIO_MODE_WLED),
- GRP("ephy", ephy_func, 1, MT7620_GPIO_MODE_EPHY),
- GRP("pa", pa_func, 1, MT7620_GPIO_MODE_PA),
+ GRP("rgmii2", rgmii2_grp, 1, MT7620_GPIO_MODE_RGMII2),
+ GRP("wled", wled_grp, 1, MT7620_GPIO_MODE_WLED),
+ GRP("ephy", ephy_grp, 1, MT7620_GPIO_MODE_EPHY),
+ GRP("pa", pa_grp, 1, MT7620_GPIO_MODE_PA),
{ 0 }
};
-static struct ralink_pmx_func pwm1_func_mt76x8[] = {
+static struct ralink_pmx_func pwm1_grp_mt76x8[] = {
FUNC("sdxc d6", 3, 19, 1),
FUNC("utif", 2, 19, 1),
FUNC("gpio", 1, 19, 1),
FUNC("pwm1", 0, 19, 1),
};
-static struct ralink_pmx_func pwm0_func_mt76x8[] = {
+static struct ralink_pmx_func pwm0_grp_mt76x8[] = {
FUNC("sdxc d7", 3, 18, 1),
FUNC("utif", 2, 18, 1),
FUNC("gpio", 1, 18, 1),
FUNC("pwm0", 0, 18, 1),
};
-static struct ralink_pmx_func uart2_func_mt76x8[] = {
+static struct ralink_pmx_func uart2_grp_mt76x8[] = {
FUNC("sdxc d5 d4", 3, 20, 2),
FUNC("pwm", 2, 20, 2),
FUNC("gpio", 1, 20, 2),
FUNC("uart2", 0, 20, 2),
};
-static struct ralink_pmx_func uart1_func_mt76x8[] = {
+static struct ralink_pmx_func uart1_grp_mt76x8[] = {
FUNC("sw_r", 3, 45, 2),
FUNC("pwm", 2, 45, 2),
FUNC("gpio", 1, 45, 2),
FUNC("uart1", 0, 45, 2),
};
-static struct ralink_pmx_func i2c_func_mt76x8[] = {
+static struct ralink_pmx_func i2c_grp_mt76x8[] = {
FUNC("-", 3, 4, 2),
FUNC("debug", 2, 4, 2),
FUNC("gpio", 1, 4, 2),
FUNC("i2c", 0, 4, 2),
};
-static struct ralink_pmx_func refclk_func_mt76x8[] = { FUNC("refclk", 0, 37, 1) };
-static struct ralink_pmx_func perst_func_mt76x8[] = { FUNC("perst", 0, 36, 1) };
-static struct ralink_pmx_func wdt_func_mt76x8[] = { FUNC("wdt", 0, 38, 1) };
-static struct ralink_pmx_func spi_func_mt76x8[] = { FUNC("spi", 0, 7, 4) };
+static struct ralink_pmx_func refclk_grp_mt76x8[] = { FUNC("refclk", 0, 37, 1) };
+static struct ralink_pmx_func perst_grp_mt76x8[] = { FUNC("perst", 0, 36, 1) };
+static struct ralink_pmx_func wdt_grp_mt76x8[] = { FUNC("wdt", 0, 38, 1) };
+static struct ralink_pmx_func spi_grp_mt76x8[] = { FUNC("spi", 0, 7, 4) };
-static struct ralink_pmx_func sd_mode_func_mt76x8[] = {
+static struct ralink_pmx_func sd_mode_grp_mt76x8[] = {
FUNC("jtag", 3, 22, 8),
FUNC("utif", 2, 22, 8),
FUNC("gpio", 1, 22, 8),
FUNC("sdxc", 0, 22, 8),
};
-static struct ralink_pmx_func uart0_func_mt76x8[] = {
+static struct ralink_pmx_func uart0_grp_mt76x8[] = {
FUNC("-", 3, 12, 2),
FUNC("-", 2, 12, 2),
FUNC("gpio", 1, 12, 2),
FUNC("uart0", 0, 12, 2),
};
-static struct ralink_pmx_func i2s_func_mt76x8[] = {
+static struct ralink_pmx_func i2s_grp_mt76x8[] = {
FUNC("antenna", 3, 0, 4),
FUNC("pcm", 2, 0, 4),
FUNC("gpio", 1, 0, 4),
FUNC("i2s", 0, 0, 4),
};
-static struct ralink_pmx_func spi_cs1_func_mt76x8[] = {
+static struct ralink_pmx_func spi_cs1_grp_mt76x8[] = {
FUNC("-", 3, 6, 1),
FUNC("refclk", 2, 6, 1),
FUNC("gpio", 1, 6, 1),
FUNC("spi cs1", 0, 6, 1),
};
-static struct ralink_pmx_func spis_func_mt76x8[] = {
+static struct ralink_pmx_func spis_grp_mt76x8[] = {
FUNC("pwm_uart2", 3, 14, 4),
FUNC("utif", 2, 14, 4),
FUNC("gpio", 1, 14, 4),
FUNC("spis", 0, 14, 4),
};
-static struct ralink_pmx_func gpio_func_mt76x8[] = {
+static struct ralink_pmx_func gpio_grp_mt76x8[] = {
FUNC("pcie", 3, 11, 1),
FUNC("refclk", 2, 11, 1),
FUNC("gpio", 1, 11, 1),
FUNC("gpio", 0, 11, 1),
};
-static struct ralink_pmx_func p4led_kn_func_mt76x8[] = {
+static struct ralink_pmx_func p4led_kn_grp_mt76x8[] = {
FUNC("jtag", 3, 30, 1),
FUNC("utif", 2, 30, 1),
FUNC("gpio", 1, 30, 1),
FUNC("p4led_kn", 0, 30, 1),
};
-static struct ralink_pmx_func p3led_kn_func_mt76x8[] = {
+static struct ralink_pmx_func p3led_kn_grp_mt76x8[] = {
FUNC("jtag", 3, 31, 1),
FUNC("utif", 2, 31, 1),
FUNC("gpio", 1, 31, 1),
FUNC("p3led_kn", 0, 31, 1),
};
-static struct ralink_pmx_func p2led_kn_func_mt76x8[] = {
+static struct ralink_pmx_func p2led_kn_grp_mt76x8[] = {
FUNC("jtag", 3, 32, 1),
FUNC("utif", 2, 32, 1),
FUNC("gpio", 1, 32, 1),
FUNC("p2led_kn", 0, 32, 1),
};
-static struct ralink_pmx_func p1led_kn_func_mt76x8[] = {
+static struct ralink_pmx_func p1led_kn_grp_mt76x8[] = {
FUNC("jtag", 3, 33, 1),
FUNC("utif", 2, 33, 1),
FUNC("gpio", 1, 33, 1),
FUNC("p1led_kn", 0, 33, 1),
};
-static struct ralink_pmx_func p0led_kn_func_mt76x8[] = {
+static struct ralink_pmx_func p0led_kn_grp_mt76x8[] = {
FUNC("jtag", 3, 34, 1),
FUNC("rsvd", 2, 34, 1),
FUNC("gpio", 1, 34, 1),
FUNC("p0led_kn", 0, 34, 1),
};
-static struct ralink_pmx_func wled_kn_func_mt76x8[] = {
+static struct ralink_pmx_func wled_kn_grp_mt76x8[] = {
FUNC("rsvd", 3, 35, 1),
FUNC("rsvd", 2, 35, 1),
FUNC("gpio", 1, 35, 1),
FUNC("wled_kn", 0, 35, 1),
};
-static struct ralink_pmx_func p4led_an_func_mt76x8[] = {
+static struct ralink_pmx_func p4led_an_grp_mt76x8[] = {
FUNC("jtag", 3, 39, 1),
FUNC("utif", 2, 39, 1),
FUNC("gpio", 1, 39, 1),
FUNC("p4led_an", 0, 39, 1),
};
-static struct ralink_pmx_func p3led_an_func_mt76x8[] = {
+static struct ralink_pmx_func p3led_an_grp_mt76x8[] = {
FUNC("jtag", 3, 40, 1),
FUNC("utif", 2, 40, 1),
FUNC("gpio", 1, 40, 1),
FUNC("p3led_an", 0, 40, 1),
};
-static struct ralink_pmx_func p2led_an_func_mt76x8[] = {
+static struct ralink_pmx_func p2led_an_grp_mt76x8[] = {
FUNC("jtag", 3, 41, 1),
FUNC("utif", 2, 41, 1),
FUNC("gpio", 1, 41, 1),
FUNC("p2led_an", 0, 41, 1),
};
-static struct ralink_pmx_func p1led_an_func_mt76x8[] = {
+static struct ralink_pmx_func p1led_an_grp_mt76x8[] = {
FUNC("jtag", 3, 42, 1),
FUNC("utif", 2, 42, 1),
FUNC("gpio", 1, 42, 1),
FUNC("p1led_an", 0, 42, 1),
};
-static struct ralink_pmx_func p0led_an_func_mt76x8[] = {
+static struct ralink_pmx_func p0led_an_grp_mt76x8[] = {
FUNC("jtag", 3, 43, 1),
FUNC("rsvd", 2, 43, 1),
FUNC("gpio", 1, 43, 1),
FUNC("p0led_an", 0, 43, 1),
};
-static struct ralink_pmx_func wled_an_func_mt76x8[] = {
+static struct ralink_pmx_func wled_an_grp_mt76x8[] = {
FUNC("rsvd", 3, 44, 1),
FUNC("rsvd", 2, 44, 1),
FUNC("gpio", 1, 44, 1),
@@ -309,55 +309,55 @@ static struct ralink_pmx_func wled_an_func_mt76x8[] = {
#define MT76X8_GPIO_MODE_GPIO 0
static struct ralink_pmx_group mt76x8_pinmux_data[] = {
- GRP_G("pwm1", pwm1_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("pwm1", pwm1_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_PWM1),
- GRP_G("pwm0", pwm0_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("pwm0", pwm0_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_PWM0),
- GRP_G("uart2", uart2_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("uart2", uart2_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_UART2),
- GRP_G("uart1", uart1_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("uart1", uart1_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_UART1),
- GRP_G("i2c", i2c_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("i2c", i2c_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_I2C),
- GRP("refclk", refclk_func_mt76x8, 1, MT76X8_GPIO_MODE_REFCLK),
- GRP("perst", perst_func_mt76x8, 1, MT76X8_GPIO_MODE_PERST),
- GRP("wdt", wdt_func_mt76x8, 1, MT76X8_GPIO_MODE_WDT),
- GRP("spi", spi_func_mt76x8, 1, MT76X8_GPIO_MODE_SPI),
- GRP_G("sdmode", sd_mode_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP("refclk", refclk_grp_mt76x8, 1, MT76X8_GPIO_MODE_REFCLK),
+ GRP("perst", perst_grp_mt76x8, 1, MT76X8_GPIO_MODE_PERST),
+ GRP("wdt", wdt_grp_mt76x8, 1, MT76X8_GPIO_MODE_WDT),
+ GRP("spi", spi_grp_mt76x8, 1, MT76X8_GPIO_MODE_SPI),
+ GRP_G("sdmode", sd_mode_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_SDMODE),
- GRP_G("uart0", uart0_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("uart0", uart0_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_UART0),
- GRP_G("i2s", i2s_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("i2s", i2s_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_I2S),
- GRP_G("spi cs1", spi_cs1_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("spi cs1", spi_cs1_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_CS1),
- GRP_G("spis", spis_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("spis", spis_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_SPIS),
- GRP_G("gpio", gpio_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("gpio", gpio_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_GPIO),
- GRP_G("wled_an", wled_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("wled_an", wled_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_WLED_AN),
- GRP_G("p0led_an", p0led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("p0led_an", p0led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_P0LED_AN),
- GRP_G("p1led_an", p1led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("p1led_an", p1led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_P1LED_AN),
- GRP_G("p2led_an", p2led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("p2led_an", p2led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_P2LED_AN),
- GRP_G("p3led_an", p3led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("p3led_an", p3led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_P3LED_AN),
- GRP_G("p4led_an", p4led_an_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("p4led_an", p4led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_P4LED_AN),
- GRP_G("wled_kn", wled_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("wled_kn", wled_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_WLED_KN),
- GRP_G("p0led_kn", p0led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("p0led_kn", p0led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_P0LED_KN),
- GRP_G("p1led_kn", p1led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("p1led_kn", p1led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_P1LED_KN),
- GRP_G("p2led_kn", p2led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("p2led_kn", p2led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_P2LED_KN),
- GRP_G("p3led_kn", p3led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("p3led_kn", p3led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_P3LED_KN),
- GRP_G("p4led_kn", p4led_kn_func_mt76x8, MT76X8_GPIO_MODE_MASK,
+ GRP_G("p4led_kn", p4led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK,
1, MT76X8_GPIO_MODE_P4LED_KN),
{ 0 }
};
diff --git a/drivers/pinctrl/ralink/pinctrl-mt7621.c b/drivers/pinctrl/ralink/pinctrl-mt7621.c
index b47968f40e0c..eddc0ba6d468 100644
--- a/drivers/pinctrl/ralink/pinctrl-mt7621.c
+++ b/drivers/pinctrl/ralink/pinctrl-mt7621.c
@@ -34,59 +34,59 @@
#define MT7621_GPIO_MODE_SDHCI_SHIFT 18
#define MT7621_GPIO_MODE_SDHCI_GPIO 1
-static struct ralink_pmx_func uart1_func[] = { FUNC("uart1", 0, 1, 2) };
-static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 3, 2) };
-static struct ralink_pmx_func uart3_func[] = {
+static struct ralink_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) };
+static struct ralink_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) };
+static struct ralink_pmx_func uart3_grp[] = {
FUNC("uart3", 0, 5, 4),
FUNC("i2s", 2, 5, 4),
FUNC("spdif3", 3, 5, 4),
};
-static struct ralink_pmx_func uart2_func[] = {
+static struct ralink_pmx_func uart2_grp[] = {
FUNC("uart2", 0, 9, 4),
FUNC("pcm", 2, 9, 4),
FUNC("spdif2", 3, 9, 4),
};
-static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 13, 5) };
-static struct ralink_pmx_func wdt_func[] = {
+static struct ralink_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) };
+static struct ralink_pmx_func wdt_grp[] = {
FUNC("wdt rst", 0, 18, 1),
FUNC("wdt refclk", 2, 18, 1),
};
-static struct ralink_pmx_func pcie_rst_func[] = {
+static struct ralink_pmx_func pcie_rst_grp[] = {
FUNC("pcie rst", MT7621_GPIO_MODE_PCIE_RST, 19, 1),
FUNC("pcie refclk", MT7621_GPIO_MODE_PCIE_REF, 19, 1)
};
-static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 20, 2) };
-static struct ralink_pmx_func rgmii2_func[] = { FUNC("rgmii2", 0, 22, 12) };
-static struct ralink_pmx_func spi_func[] = {
+static struct ralink_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) };
+static struct ralink_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) };
+static struct ralink_pmx_func spi_grp[] = {
FUNC("spi", 0, 34, 7),
FUNC("nand1", 2, 34, 7),
};
-static struct ralink_pmx_func sdhci_func[] = {
+static struct ralink_pmx_func sdhci_grp[] = {
FUNC("sdhci", 0, 41, 8),
FUNC("nand2", 2, 41, 8),
};
-static struct ralink_pmx_func rgmii1_func[] = { FUNC("rgmii1", 0, 49, 12) };
+static struct ralink_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) };
static struct ralink_pmx_group mt7621_pinmux_data[] = {
- GRP("uart1", uart1_func, 1, MT7621_GPIO_MODE_UART1),
- GRP("i2c", i2c_func, 1, MT7621_GPIO_MODE_I2C),
- GRP_G("uart3", uart3_func, MT7621_GPIO_MODE_UART3_MASK,
+ GRP("uart1", uart1_grp, 1, MT7621_GPIO_MODE_UART1),
+ GRP("i2c", i2c_grp, 1, MT7621_GPIO_MODE_I2C),
+ GRP_G("uart3", uart3_grp, MT7621_GPIO_MODE_UART3_MASK,
MT7621_GPIO_MODE_UART3_GPIO, MT7621_GPIO_MODE_UART3_SHIFT),
- GRP_G("uart2", uart2_func, MT7621_GPIO_MODE_UART2_MASK,
+ GRP_G("uart2", uart2_grp, MT7621_GPIO_MODE_UART2_MASK,
MT7621_GPIO_MODE_UART2_GPIO, MT7621_GPIO_MODE_UART2_SHIFT),
- GRP("jtag", jtag_func, 1, MT7621_GPIO_MODE_JTAG),
- GRP_G("wdt", wdt_func, MT7621_GPIO_MODE_WDT_MASK,
+ GRP("jtag", jtag_grp, 1, MT7621_GPIO_MODE_JTAG),
+ GRP_G("wdt", wdt_grp, MT7621_GPIO_MODE_WDT_MASK,
MT7621_GPIO_MODE_WDT_GPIO, MT7621_GPIO_MODE_WDT_SHIFT),
- GRP_G("pcie", pcie_rst_func, MT7621_GPIO_MODE_PCIE_MASK,
+ GRP_G("pcie", pcie_rst_grp, MT7621_GPIO_MODE_PCIE_MASK,
MT7621_GPIO_MODE_PCIE_GPIO, MT7621_GPIO_MODE_PCIE_SHIFT),
- GRP_G("mdio", mdio_func, MT7621_GPIO_MODE_MDIO_MASK,
+ GRP_G("mdio", mdio_grp, MT7621_GPIO_MODE_MDIO_MASK,
MT7621_GPIO_MODE_MDIO_GPIO, MT7621_GPIO_MODE_MDIO_SHIFT),
- GRP("rgmii2", rgmii2_func, 1, MT7621_GPIO_MODE_RGMII2),
- GRP_G("spi", spi_func, MT7621_GPIO_MODE_SPI_MASK,
+ GRP("rgmii2", rgmii2_grp, 1, MT7621_GPIO_MODE_RGMII2),
+ GRP_G("spi", spi_grp, MT7621_GPIO_MODE_SPI_MASK,
MT7621_GPIO_MODE_SPI_GPIO, MT7621_GPIO_MODE_SPI_SHIFT),
- GRP_G("sdhci", sdhci_func, MT7621_GPIO_MODE_SDHCI_MASK,
+ GRP_G("sdhci", sdhci_grp, MT7621_GPIO_MODE_SDHCI_MASK,
MT7621_GPIO_MODE_SDHCI_GPIO, MT7621_GPIO_MODE_SDHCI_SHIFT),
- GRP("rgmii1", rgmii1_func, 1, MT7621_GPIO_MODE_RGMII1),
+ GRP("rgmii1", rgmii1_grp, 1, MT7621_GPIO_MODE_RGMII1),
{ 0 }
};
diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
index 811e12df1133..3e2f1aaaf095 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
@@ -15,22 +15,22 @@
#define RT2880_GPIO_MODE_SDRAM BIT(6)
#define RT2880_GPIO_MODE_PCI BIT(7)
-static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) };
-static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct ralink_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
-static struct ralink_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) };
+static struct ralink_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) };
+static struct ralink_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) };
+static struct ralink_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 7, 8) };
+static struct ralink_pmx_func jtag_grp[] = { FUNC("jtag", 0, 17, 5) };
+static struct ralink_pmx_func mdio_grp[] = { FUNC("mdio", 0, 22, 2) };
+static struct ralink_pmx_func sdram_grp[] = { FUNC("sdram", 0, 24, 16) };
+static struct ralink_pmx_func pci_grp[] = { FUNC("pci", 0, 40, 32) };
static struct ralink_pmx_group rt2880_pinmux_data_act[] = {
- GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI),
- GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0),
- GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO),
- GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM),
- GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI),
+ GRP("i2c", i2c_grp, 1, RT2880_GPIO_MODE_I2C),
+ GRP("spi", spi_grp, 1, RT2880_GPIO_MODE_SPI),
+ GRP("uartlite", uartlite_grp, 1, RT2880_GPIO_MODE_UART0),
+ GRP("jtag", jtag_grp, 1, RT2880_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_grp, 1, RT2880_GPIO_MODE_MDIO),
+ GRP("sdram", sdram_grp, 1, RT2880_GPIO_MODE_SDRAM),
+ GRP("pci", pci_grp, 1, RT2880_GPIO_MODE_PCI),
{ 0 }
};
diff --git a/drivers/pinctrl/ralink/pinctrl-rt305x.c b/drivers/pinctrl/ralink/pinctrl-rt305x.c
index 5b204b7ca1f3..bdaee5ce1ee0 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt305x.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt305x.c
@@ -31,9 +31,9 @@
#define RT3352_GPIO_MODE_LNA 18
#define RT3352_GPIO_MODE_PA 20
-static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct ralink_pmx_func uartf_func[] = {
+static struct ralink_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) };
+static struct ralink_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) };
+static struct ralink_pmx_func uartf_grp[] = {
FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8),
FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8),
FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8),
@@ -42,65 +42,65 @@ static struct ralink_pmx_func uartf_func[] = {
FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4),
FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4),
};
-static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
-static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct ralink_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) };
-static struct ralink_pmx_func rt5350_cs1_func[] = {
+static struct ralink_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) };
+static struct ralink_pmx_func jtag_grp[] = { FUNC("jtag", 0, 17, 5) };
+static struct ralink_pmx_func mdio_grp[] = { FUNC("mdio", 0, 22, 2) };
+static struct ralink_pmx_func rt5350_led_grp[] = { FUNC("led", 0, 22, 5) };
+static struct ralink_pmx_func rt5350_cs1_grp[] = {
FUNC("spi_cs1", 0, 27, 1),
FUNC("wdg_cs1", 1, 27, 1),
};
-static struct ralink_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
-static struct ralink_pmx_func rt3352_rgmii_func[] = {
+static struct ralink_pmx_func sdram_grp[] = { FUNC("sdram", 0, 24, 16) };
+static struct ralink_pmx_func rt3352_rgmii_grp[] = {
FUNC("rgmii", 0, 24, 12)
};
-static struct ralink_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) };
-static struct ralink_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) };
-static struct ralink_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) };
-static struct ralink_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) };
-static struct ralink_pmx_func rt3352_cs1_func[] = {
+static struct ralink_pmx_func rgmii_grp[] = { FUNC("rgmii", 0, 40, 12) };
+static struct ralink_pmx_func rt3352_lna_grp[] = { FUNC("lna", 0, 36, 2) };
+static struct ralink_pmx_func rt3352_pa_grp[] = { FUNC("pa", 0, 38, 2) };
+static struct ralink_pmx_func rt3352_led_grp[] = { FUNC("led", 0, 40, 5) };
+static struct ralink_pmx_func rt3352_cs1_grp[] = {
FUNC("spi_cs1", 0, 45, 1),
FUNC("wdg_cs1", 1, 45, 1),
};
static struct ralink_pmx_group rt3050_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
+ GRP("i2c", i2c_grp, 1, RT305X_GPIO_MODE_I2C),
+ GRP("spi", spi_grp, 1, RT305X_GPIO_MODE_SPI),
+ GRP("uartf", uartf_grp, RT305X_GPIO_MODE_UART0_MASK,
RT305X_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
- GRP("rgmii", rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
- GRP("sdram", sdram_func, 1, RT305X_GPIO_MODE_SDRAM),
+ GRP("uartlite", uartlite_grp, 1, RT305X_GPIO_MODE_UART1),
+ GRP("jtag", jtag_grp, 1, RT305X_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_grp, 1, RT305X_GPIO_MODE_MDIO),
+ GRP("rgmii", rgmii_grp, 1, RT305X_GPIO_MODE_RGMII),
+ GRP("sdram", sdram_grp, 1, RT305X_GPIO_MODE_SDRAM),
{ 0 }
};
static struct ralink_pmx_group rt3352_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
+ GRP("i2c", i2c_grp, 1, RT305X_GPIO_MODE_I2C),
+ GRP("spi", spi_grp, 1, RT305X_GPIO_MODE_SPI),
+ GRP("uartf", uartf_grp, RT305X_GPIO_MODE_UART0_MASK,
RT305X_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
- GRP("rgmii", rt3352_rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
- GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA),
- GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA),
- GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
- GRP("spi_cs1", rt3352_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
+ GRP("uartlite", uartlite_grp, 1, RT305X_GPIO_MODE_UART1),
+ GRP("jtag", jtag_grp, 1, RT305X_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_grp, 1, RT305X_GPIO_MODE_MDIO),
+ GRP("rgmii", rt3352_rgmii_grp, 1, RT305X_GPIO_MODE_RGMII),
+ GRP("lna", rt3352_lna_grp, 1, RT3352_GPIO_MODE_LNA),
+ GRP("pa", rt3352_pa_grp, 1, RT3352_GPIO_MODE_PA),
+ GRP("led", rt3352_led_grp, 1, RT5350_GPIO_MODE_PHY_LED),
+ GRP("spi_cs1", rt3352_cs1_grp, 2, RT5350_GPIO_MODE_SPI_CS1),
{ 0 }
};
static struct ralink_pmx_group rt5350_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
+ GRP("i2c", i2c_grp, 1, RT305X_GPIO_MODE_I2C),
+ GRP("spi", spi_grp, 1, RT305X_GPIO_MODE_SPI),
+ GRP("uartf", uartf_grp, RT305X_GPIO_MODE_UART0_MASK,
RT305X_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
- GRP("led", rt5350_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
- GRP("spi_cs1", rt5350_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
+ GRP("uartlite", uartlite_grp, 1, RT305X_GPIO_MODE_UART1),
+ GRP("jtag", jtag_grp, 1, RT305X_GPIO_MODE_JTAG),
+ GRP("led", rt5350_led_grp, 1, RT5350_GPIO_MODE_PHY_LED),
+ GRP("spi_cs1", rt5350_cs1_grp, 2, RT5350_GPIO_MODE_SPI_CS1),
{ 0 }
};
diff --git a/drivers/pinctrl/ralink/pinctrl-rt3883.c b/drivers/pinctrl/ralink/pinctrl-rt3883.c
index 44a66c3d2d2a..392208662355 100644
--- a/drivers/pinctrl/ralink/pinctrl-rt3883.c
+++ b/drivers/pinctrl/ralink/pinctrl-rt3883.c
@@ -39,9 +39,9 @@
#define RT3883_GPIO_MODE_LNA_G_GPIO 0x3
#define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
-static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
-static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
-static struct ralink_pmx_func uartf_func[] = {
+static struct ralink_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) };
+static struct ralink_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) };
+static struct ralink_pmx_func uartf_grp[] = {
FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8),
FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8),
FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8),
@@ -50,34 +50,34 @@ static struct ralink_pmx_func uartf_func[] = {
FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4),
FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4),
};
-static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
-static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
-static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
-static struct ralink_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
-static struct ralink_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
-static struct ralink_pmx_func pci_func[] = {
+static struct ralink_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) };
+static struct ralink_pmx_func jtag_grp[] = { FUNC("jtag", 0, 17, 5) };
+static struct ralink_pmx_func mdio_grp[] = { FUNC("mdio", 0, 22, 2) };
+static struct ralink_pmx_func lna_a_grp[] = { FUNC("lna a", 0, 32, 3) };
+static struct ralink_pmx_func lna_g_grp[] = { FUNC("lna g", 0, 35, 3) };
+static struct ralink_pmx_func pci_grp[] = {
FUNC("pci-dev", 0, 40, 32),
FUNC("pci-host2", 1, 40, 32),
FUNC("pci-host1", 2, 40, 32),
FUNC("pci-fnc", 3, 40, 32)
};
-static struct ralink_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
-static struct ralink_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
+static struct ralink_pmx_func ge1_grp[] = { FUNC("ge1", 0, 72, 12) };
+static struct ralink_pmx_func ge2_grp[] = { FUNC("ge2", 0, 84, 12) };
static struct ralink_pmx_group rt3883_pinmux_data[] = {
- GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
- GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI),
- GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK,
+ GRP("i2c", i2c_grp, 1, RT3883_GPIO_MODE_I2C),
+ GRP("spi", spi_grp, 1, RT3883_GPIO_MODE_SPI),
+ GRP("uartf", uartf_grp, RT3883_GPIO_MODE_UART0_MASK,
RT3883_GPIO_MODE_UART0_SHIFT),
- GRP("uartlite", uartlite_func, 1, RT3883_GPIO_MODE_UART1),
- GRP("jtag", jtag_func, 1, RT3883_GPIO_MODE_JTAG),
- GRP("mdio", mdio_func, 1, RT3883_GPIO_MODE_MDIO),
- GRP("lna a", lna_a_func, 1, RT3883_GPIO_MODE_LNA_A),
- GRP("lna g", lna_g_func, 1, RT3883_GPIO_MODE_LNA_G),
- GRP("pci", pci_func, RT3883_GPIO_MODE_PCI_MASK,
+ GRP("uartlite", uartlite_grp, 1, RT3883_GPIO_MODE_UART1),
+ GRP("jtag", jtag_grp, 1, RT3883_GPIO_MODE_JTAG),
+ GRP("mdio", mdio_grp, 1, RT3883_GPIO_MODE_MDIO),
+ GRP("lna a", lna_a_grp, 1, RT3883_GPIO_MODE_LNA_A),
+ GRP("lna g", lna_g_grp, 1, RT3883_GPIO_MODE_LNA_G),
+ GRP("pci", pci_grp, RT3883_GPIO_MODE_PCI_MASK,
RT3883_GPIO_MODE_PCI_SHIFT),
- GRP("ge1", ge1_func, 1, RT3883_GPIO_MODE_GE1),
- GRP("ge2", ge2_func, 1, RT3883_GPIO_MODE_GE2),
+ GRP("ge1", ge1_grp, 1, RT3883_GPIO_MODE_GE1),
+ GRP("ge2", ge2_grp, 1, RT3883_GPIO_MODE_GE2),
{ 0 }
};
diff --git a/drivers/pinctrl/renesas/pfc-r8a77950.c b/drivers/pinctrl/renesas/pfc-r8a77950.c
index 4c543ec3a863..cc66c6de045c 100644
--- a/drivers/pinctrl/renesas/pfc-r8a77950.c
+++ b/drivers/pinctrl/renesas/pfc-r8a77950.c
@@ -3820,6 +3820,186 @@ static const unsigned int usb31_mux[] = {
USB31_PWEN_MARK, USB31_OVC_MARK,
};
+/* - VIN4 ------------------------------------------------------------------- */
+static const unsigned int vin4_data18_a_pins[] = {
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+};
+static const unsigned int vin4_data18_a_mux[] = {
+ VI4_DATA2_A_MARK, VI4_DATA3_A_MARK,
+ VI4_DATA4_A_MARK, VI4_DATA5_A_MARK,
+ VI4_DATA6_A_MARK, VI4_DATA7_A_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+ VI4_DATA18_MARK, VI4_DATA19_MARK,
+ VI4_DATA20_MARK, VI4_DATA21_MARK,
+ VI4_DATA22_MARK, VI4_DATA23_MARK,
+};
+static const unsigned int vin4_data18_b_pins[] = {
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
+ RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+};
+static const unsigned int vin4_data18_b_mux[] = {
+ VI4_DATA2_B_MARK, VI4_DATA3_B_MARK,
+ VI4_DATA4_B_MARK, VI4_DATA5_B_MARK,
+ VI4_DATA6_B_MARK, VI4_DATA7_B_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+ VI4_DATA18_MARK, VI4_DATA19_MARK,
+ VI4_DATA20_MARK, VI4_DATA21_MARK,
+ VI4_DATA22_MARK, VI4_DATA23_MARK,
+};
+static const unsigned int vin4_data_a_pins[] = {
+ RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 9),
+ RCAR_GP_PIN(0, 10), RCAR_GP_PIN(0, 11),
+ RCAR_GP_PIN(0, 12), RCAR_GP_PIN(0, 13),
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+ RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+};
+static const unsigned int vin4_data_a_mux[] = {
+ VI4_DATA0_A_MARK, VI4_DATA1_A_MARK,
+ VI4_DATA2_A_MARK, VI4_DATA3_A_MARK,
+ VI4_DATA4_A_MARK, VI4_DATA5_A_MARK,
+ VI4_DATA6_A_MARK, VI4_DATA7_A_MARK,
+ VI4_DATA8_MARK, VI4_DATA9_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+ VI4_DATA16_MARK, VI4_DATA17_MARK,
+ VI4_DATA18_MARK, VI4_DATA19_MARK,
+ VI4_DATA20_MARK, VI4_DATA21_MARK,
+ VI4_DATA22_MARK, VI4_DATA23_MARK,
+};
+static const unsigned int vin4_data_b_pins[] = {
+ RCAR_GP_PIN(2, 0), RCAR_GP_PIN(2, 1),
+ RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5),
+ RCAR_GP_PIN(2, 6), RCAR_GP_PIN(2, 7),
+ RCAR_GP_PIN(1, 0), RCAR_GP_PIN(1, 1),
+ RCAR_GP_PIN(1, 2), RCAR_GP_PIN(1, 3),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+};
+static const unsigned int vin4_data_b_mux[] = {
+ VI4_DATA0_B_MARK, VI4_DATA1_B_MARK,
+ VI4_DATA2_B_MARK, VI4_DATA3_B_MARK,
+ VI4_DATA4_B_MARK, VI4_DATA5_B_MARK,
+ VI4_DATA6_B_MARK, VI4_DATA7_B_MARK,
+ VI4_DATA8_MARK, VI4_DATA9_MARK,
+ VI4_DATA10_MARK, VI4_DATA11_MARK,
+ VI4_DATA12_MARK, VI4_DATA13_MARK,
+ VI4_DATA14_MARK, VI4_DATA15_MARK,
+ VI4_DATA16_MARK, VI4_DATA17_MARK,
+ VI4_DATA18_MARK, VI4_DATA19_MARK,
+ VI4_DATA20_MARK, VI4_DATA21_MARK,
+ VI4_DATA22_MARK, VI4_DATA23_MARK,
+};
+static const unsigned int vin4_sync_pins[] = {
+ /* HSYNC#, VSYNC# */
+ RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 17),
+};
+static const unsigned int vin4_sync_mux[] = {
+ VI4_HSYNC_N_MARK, VI4_VSYNC_N_MARK,
+};
+static const unsigned int vin4_field_pins[] = {
+ /* FIELD */
+ RCAR_GP_PIN(1, 16),
+};
+static const unsigned int vin4_field_mux[] = {
+ VI4_FIELD_MARK,
+};
+static const unsigned int vin4_clkenb_pins[] = {
+ /* CLKENB */
+ RCAR_GP_PIN(1, 19),
+};
+static const unsigned int vin4_clkenb_mux[] = {
+ VI4_CLKENB_MARK,
+};
+static const unsigned int vin4_clk_pins[] = {
+ /* CLK */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int vin4_clk_mux[] = {
+ VI4_CLK_MARK,
+};
+
+/* - VIN5 ------------------------------------------------------------------- */
+static const unsigned int vin5_data_pins[] = {
+ RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+ RCAR_GP_PIN(0, 6), RCAR_GP_PIN(0, 7),
+ RCAR_GP_PIN(1, 12), RCAR_GP_PIN(1, 13),
+ RCAR_GP_PIN(1, 14), RCAR_GP_PIN(1, 15),
+ RCAR_GP_PIN(1, 4), RCAR_GP_PIN(1, 5),
+ RCAR_GP_PIN(1, 6), RCAR_GP_PIN(1, 7),
+};
+static const unsigned int vin5_data_mux[] = {
+ VI5_DATA0_MARK, VI5_DATA1_MARK,
+ VI5_DATA2_MARK, VI5_DATA3_MARK,
+ VI5_DATA4_MARK, VI5_DATA5_MARK,
+ VI5_DATA6_MARK, VI5_DATA7_MARK,
+ VI5_DATA8_MARK, VI5_DATA9_MARK,
+ VI5_DATA10_MARK, VI5_DATA11_MARK,
+ VI5_DATA12_MARK, VI5_DATA13_MARK,
+ VI5_DATA14_MARK, VI5_DATA15_MARK,
+};
+static const unsigned int vin5_sync_pins[] = {
+ /* HSYNC#, VSYNC# */
+ RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 9),
+};
+static const unsigned int vin5_sync_mux[] = {
+ VI5_HSYNC_N_MARK, VI5_VSYNC_N_MARK,
+};
+static const unsigned int vin5_field_pins[] = {
+ RCAR_GP_PIN(1, 11),
+};
+static const unsigned int vin5_field_mux[] = {
+ /* FIELD */
+ VI5_FIELD_MARK,
+};
+static const unsigned int vin5_clkenb_pins[] = {
+ RCAR_GP_PIN(1, 20),
+};
+static const unsigned int vin5_clkenb_mux[] = {
+ /* CLKENB */
+ VI5_CLKENB_MARK,
+};
+static const unsigned int vin5_clk_pins[] = {
+ RCAR_GP_PIN(1, 21),
+};
+static const unsigned int vin5_clk_mux[] = {
+ /* CLK */
+ VI5_CLK_MARK,
+};
+
static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(audio_clk_a_a),
SH_PFC_PIN_GROUP(audio_clk_a_b),
@@ -4141,6 +4321,34 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(usb2),
SH_PFC_PIN_GROUP(usb30),
SH_PFC_PIN_GROUP(usb31),
+ BUS_DATA_PIN_GROUP(vin4_data, 8, _a),
+ BUS_DATA_PIN_GROUP(vin4_data, 10, _a),
+ BUS_DATA_PIN_GROUP(vin4_data, 12, _a),
+ BUS_DATA_PIN_GROUP(vin4_data, 16, _a),
+ SH_PFC_PIN_GROUP(vin4_data18_a),
+ BUS_DATA_PIN_GROUP(vin4_data, 20, _a),
+ BUS_DATA_PIN_GROUP(vin4_data, 24, _a),
+ BUS_DATA_PIN_GROUP(vin4_data, 8, _b),
+ BUS_DATA_PIN_GROUP(vin4_data, 10, _b),
+ BUS_DATA_PIN_GROUP(vin4_data, 12, _b),
+ BUS_DATA_PIN_GROUP(vin4_data, 16, _b),
+ SH_PFC_PIN_GROUP(vin4_data18_b),
+ BUS_DATA_PIN_GROUP(vin4_data, 20, _b),
+ BUS_DATA_PIN_GROUP(vin4_data, 24, _b),
+ SH_PFC_PIN_GROUP_SUBSET(vin4_g8, vin4_data_a, 8, 8),
+ SH_PFC_PIN_GROUP(vin4_sync),
+ SH_PFC_PIN_GROUP(vin4_field),
+ SH_PFC_PIN_GROUP(vin4_clkenb),
+ SH_PFC_PIN_GROUP(vin4_clk),
+ BUS_DATA_PIN_GROUP(vin5_data, 8),
+ BUS_DATA_PIN_GROUP(vin5_data, 10),
+ BUS_DATA_PIN_GROUP(vin5_data, 12),
+ BUS_DATA_PIN_GROUP(vin5_data, 16),
+ SH_PFC_PIN_GROUP_SUBSET(vin5_high8, vin5_data, 8, 8),
+ SH_PFC_PIN_GROUP(vin5_sync),
+ SH_PFC_PIN_GROUP(vin5_field),
+ SH_PFC_PIN_GROUP(vin5_clkenb),
+ SH_PFC_PIN_GROUP(vin5_clk),
};
static const char * const audio_clk_groups[] = {
@@ -4637,6 +4845,40 @@ static const char * const usb31_groups[] = {
"usb31",
};
+static const char * const vin4_groups[] = {
+ "vin4_data8_a",
+ "vin4_data10_a",
+ "vin4_data12_a",
+ "vin4_data16_a",
+ "vin4_data18_a",
+ "vin4_data20_a",
+ "vin4_data24_a",
+ "vin4_data8_b",
+ "vin4_data10_b",
+ "vin4_data12_b",
+ "vin4_data16_b",
+ "vin4_data18_b",
+ "vin4_data20_b",
+ "vin4_data24_b",
+ "vin4_g8",
+ "vin4_sync",
+ "vin4_field",
+ "vin4_clkenb",
+ "vin4_clk",
+};
+
+static const char * const vin5_groups[] = {
+ "vin5_data8",
+ "vin5_data10",
+ "vin5_data12",
+ "vin5_data16",
+ "vin5_high8",
+ "vin5_sync",
+ "vin5_field",
+ "vin5_clkenb",
+ "vin5_clk",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(audio_clk),
SH_PFC_FUNCTION(avb),
@@ -4696,6 +4938,8 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(usb2),
SH_PFC_FUNCTION(usb30),
SH_PFC_FUNCTION(usb31),
+ SH_PFC_FUNCTION(vin4),
+ SH_PFC_FUNCTION(vin5),
};
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
diff --git a/drivers/pinctrl/renesas/pfc-r8a779g0.c b/drivers/pinctrl/renesas/pfc-r8a779g0.c
index 5dd1c2c7708a..bf7fcce2d9c6 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779g0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779g0.c
@@ -206,66 +206,66 @@
#define GPSR5_0 FM(AVB2_AVTP_PPS)
/* GPSR 6 */
-#define GPSR6_20 F_(AVB1_TXCREFCLK, IP2SR6_19_16)
-#define GPSR6_19 F_(AVB1_RD3, IP2SR6_15_12)
-#define GPSR6_18 F_(AVB1_TD3, IP2SR6_11_8)
-#define GPSR6_17 F_(AVB1_RD2, IP2SR6_7_4)
-#define GPSR6_16 F_(AVB1_TD2, IP2SR6_3_0)
-#define GPSR6_15 F_(AVB1_RD0, IP1SR6_31_28)
-#define GPSR6_14 F_(AVB1_RD1, IP1SR6_27_24)
-#define GPSR6_13 F_(AVB1_TD0, IP1SR6_23_20)
-#define GPSR6_12 F_(AVB1_TD1, IP1SR6_19_16)
-#define GPSR6_11 F_(AVB1_AVTP_CAPTURE, IP1SR6_15_12)
-#define GPSR6_10 F_(AVB1_AVTP_PPS, IP1SR6_11_8)
-#define GPSR6_9 F_(AVB1_RX_CTL, IP1SR6_7_4)
-#define GPSR6_8 F_(AVB1_RXC, IP1SR6_3_0)
-#define GPSR6_7 F_(AVB1_TX_CTL, IP0SR6_31_28)
-#define GPSR6_6 F_(AVB1_TXC, IP0SR6_27_24)
-#define GPSR6_5 F_(AVB1_AVTP_MATCH, IP0SR6_23_20)
-#define GPSR6_4 F_(AVB1_LINK, IP0SR6_19_16)
-#define GPSR6_3 F_(AVB1_PHY_INT, IP0SR6_15_12)
-#define GPSR6_2 F_(AVB1_MDC, IP0SR6_11_8)
-#define GPSR6_1 F_(AVB1_MAGIC, IP0SR6_7_4)
-#define GPSR6_0 F_(AVB1_MDIO, IP0SR6_3_0)
+#define GPSR6_20 F_(AVB1_TXCREFCLK, IP2SR6_19_16)
+#define GPSR6_19 F_(AVB1_RD3, IP2SR6_15_12)
+#define GPSR6_18 F_(AVB1_TD3, IP2SR6_11_8)
+#define GPSR6_17 F_(AVB1_RD2, IP2SR6_7_4)
+#define GPSR6_16 F_(AVB1_TD2, IP2SR6_3_0)
+#define GPSR6_15 F_(AVB1_RD0, IP1SR6_31_28)
+#define GPSR6_14 F_(AVB1_RD1, IP1SR6_27_24)
+#define GPSR6_13 F_(AVB1_TD0, IP1SR6_23_20)
+#define GPSR6_12 F_(AVB1_TD1, IP1SR6_19_16)
+#define GPSR6_11 F_(AVB1_AVTP_CAPTURE, IP1SR6_15_12)
+#define GPSR6_10 F_(AVB1_AVTP_PPS, IP1SR6_11_8)
+#define GPSR6_9 F_(AVB1_RX_CTL, IP1SR6_7_4)
+#define GPSR6_8 F_(AVB1_RXC, IP1SR6_3_0)
+#define GPSR6_7 F_(AVB1_TX_CTL, IP0SR6_31_28)
+#define GPSR6_6 F_(AVB1_TXC, IP0SR6_27_24)
+#define GPSR6_5 F_(AVB1_AVTP_MATCH, IP0SR6_23_20)
+#define GPSR6_4 F_(AVB1_LINK, IP0SR6_19_16)
+#define GPSR6_3 F_(AVB1_PHY_INT, IP0SR6_15_12)
+#define GPSR6_2 F_(AVB1_MDC, IP0SR6_11_8)
+#define GPSR6_1 F_(AVB1_MAGIC, IP0SR6_7_4)
+#define GPSR6_0 F_(AVB1_MDIO, IP0SR6_3_0)
/* GPSR7 */
-#define GPSR7_20 F_(AVB0_RX_CTL, IP2SR7_19_16)
-#define GPSR7_19 F_(AVB0_RXC, IP2SR7_15_12)
-#define GPSR7_18 F_(AVB0_RD0, IP2SR7_11_8)
-#define GPSR7_17 F_(AVB0_RD1, IP2SR7_7_4)
-#define GPSR7_16 F_(AVB0_TX_CTL, IP2SR7_3_0)
-#define GPSR7_15 F_(AVB0_TXC, IP1SR7_31_28)
-#define GPSR7_14 F_(AVB0_MDIO, IP1SR7_27_24)
-#define GPSR7_13 F_(AVB0_MDC, IP1SR7_23_20)
-#define GPSR7_12 F_(AVB0_RD2, IP1SR7_19_16)
-#define GPSR7_11 F_(AVB0_TD0, IP1SR7_15_12)
-#define GPSR7_10 F_(AVB0_MAGIC, IP1SR7_11_8)
-#define GPSR7_9 F_(AVB0_TXCREFCLK, IP1SR7_7_4)
-#define GPSR7_8 F_(AVB0_RD3, IP1SR7_3_0)
-#define GPSR7_7 F_(AVB0_TD1, IP0SR7_31_28)
-#define GPSR7_6 F_(AVB0_TD2, IP0SR7_27_24)
-#define GPSR7_5 F_(AVB0_PHY_INT, IP0SR7_23_20)
-#define GPSR7_4 F_(AVB0_LINK, IP0SR7_19_16)
-#define GPSR7_3 F_(AVB0_TD3, IP0SR7_15_12)
-#define GPSR7_2 F_(AVB0_AVTP_MATCH, IP0SR7_11_8)
-#define GPSR7_1 F_(AVB0_AVTP_CAPTURE, IP0SR7_7_4)
-#define GPSR7_0 F_(AVB0_AVTP_PPS, IP0SR7_3_0)
+#define GPSR7_20 F_(AVB0_RX_CTL, IP2SR7_19_16)
+#define GPSR7_19 F_(AVB0_RXC, IP2SR7_15_12)
+#define GPSR7_18 F_(AVB0_RD0, IP2SR7_11_8)
+#define GPSR7_17 F_(AVB0_RD1, IP2SR7_7_4)
+#define GPSR7_16 F_(AVB0_TX_CTL, IP2SR7_3_0)
+#define GPSR7_15 F_(AVB0_TXC, IP1SR7_31_28)
+#define GPSR7_14 F_(AVB0_MDIO, IP1SR7_27_24)
+#define GPSR7_13 F_(AVB0_MDC, IP1SR7_23_20)
+#define GPSR7_12 F_(AVB0_RD2, IP1SR7_19_16)
+#define GPSR7_11 F_(AVB0_TD0, IP1SR7_15_12)
+#define GPSR7_10 F_(AVB0_MAGIC, IP1SR7_11_8)
+#define GPSR7_9 F_(AVB0_TXCREFCLK, IP1SR7_7_4)
+#define GPSR7_8 F_(AVB0_RD3, IP1SR7_3_0)
+#define GPSR7_7 F_(AVB0_TD1, IP0SR7_31_28)
+#define GPSR7_6 F_(AVB0_TD2, IP0SR7_27_24)
+#define GPSR7_5 F_(AVB0_PHY_INT, IP0SR7_23_20)
+#define GPSR7_4 F_(AVB0_LINK, IP0SR7_19_16)
+#define GPSR7_3 F_(AVB0_TD3, IP0SR7_15_12)
+#define GPSR7_2 F_(AVB0_AVTP_MATCH, IP0SR7_11_8)
+#define GPSR7_1 F_(AVB0_AVTP_CAPTURE, IP0SR7_7_4)
+#define GPSR7_0 F_(AVB0_AVTP_PPS, IP0SR7_3_0)
/* GPSR8 */
-#define GPSR8_13 F_(GP8_13, IP1SR8_23_20)
-#define GPSR8_12 F_(GP8_12, IP1SR8_19_16)
-#define GPSR8_11 F_(SDA5, IP1SR8_15_12)
-#define GPSR8_10 F_(SCL5, IP1SR8_11_8)
-#define GPSR8_9 F_(SDA4, IP1SR8_7_4)
-#define GPSR8_8 F_(SCL4, IP1SR8_3_0)
-#define GPSR8_7 F_(SDA3, IP0SR8_31_28)
-#define GPSR8_6 F_(SCL3, IP0SR8_27_24)
-#define GPSR8_5 F_(SDA2, IP0SR8_23_20)
-#define GPSR8_4 F_(SCL2, IP0SR8_19_16)
-#define GPSR8_3 F_(SDA1, IP0SR8_15_12)
-#define GPSR8_2 F_(SCL1, IP0SR8_11_8)
-#define GPSR8_1 F_(SDA0, IP0SR8_7_4)
-#define GPSR8_0 F_(SCL0, IP0SR8_3_0)
+#define GPSR8_13 F_(GP8_13, IP1SR8_23_20)
+#define GPSR8_12 F_(GP8_12, IP1SR8_19_16)
+#define GPSR8_11 F_(SDA5, IP1SR8_15_12)
+#define GPSR8_10 F_(SCL5, IP1SR8_11_8)
+#define GPSR8_9 F_(SDA4, IP1SR8_7_4)
+#define GPSR8_8 F_(SCL4, IP1SR8_3_0)
+#define GPSR8_7 F_(SDA3, IP0SR8_31_28)
+#define GPSR8_6 F_(SCL3, IP0SR8_27_24)
+#define GPSR8_5 F_(SDA2, IP0SR8_23_20)
+#define GPSR8_4 F_(SCL2, IP0SR8_19_16)
+#define GPSR8_3 F_(SDA1, IP0SR8_15_12)
+#define GPSR8_2 F_(SCL1, IP0SR8_11_8)
+#define GPSR8_1 F_(SDA0, IP0SR8_7_4)
+#define GPSR8_0 F_(SCL0, IP0SR8_3_0)
/* SR0 */
/* IP0SR0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 4 5 6 7 8 9 A B C D E F */
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 5aa3836dbc22..04b31f0c6b34 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -130,6 +130,7 @@ struct rzg2l_dedicated_configs {
struct rzg2l_pinctrl_data {
const char * const *port_pins;
const u32 *port_pin_configs;
+ unsigned int n_ports;
struct rzg2l_dedicated_configs *dedicated_pins;
unsigned int n_port_pins;
unsigned int n_dedicated_pins;
@@ -1124,7 +1125,7 @@ static struct {
}
};
-static int rzg2l_gpio_get_gpioint(unsigned int virq)
+static int rzg2l_gpio_get_gpioint(unsigned int virq, const struct rzg2l_pinctrl_data *data)
{
unsigned int gpioint;
unsigned int i;
@@ -1133,13 +1134,13 @@ static int rzg2l_gpio_get_gpioint(unsigned int virq)
port = virq / 8;
bit = virq % 8;
- if (port >= ARRAY_SIZE(rzg2l_gpio_configs) ||
- bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port]))
+ if (port >= data->n_ports ||
+ bit >= RZG2L_GPIO_PORT_GET_PINCNT(data->port_pin_configs[port]))
return -EINVAL;
gpioint = bit;
for (i = 0; i < port; i++)
- gpioint += RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[i]);
+ gpioint += RZG2L_GPIO_PORT_GET_PINCNT(data->port_pin_configs[i]);
return gpioint;
}
@@ -1239,7 +1240,7 @@ static int rzg2l_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
unsigned long flags;
int gpioint, irq;
- gpioint = rzg2l_gpio_get_gpioint(child);
+ gpioint = rzg2l_gpio_get_gpioint(child, pctrl->data);
if (gpioint < 0)
return gpioint;
@@ -1313,8 +1314,8 @@ static void rzg2l_init_irq_valid_mask(struct gpio_chip *gc,
port = offset / 8;
bit = offset % 8;
- if (port >= ARRAY_SIZE(rzg2l_gpio_configs) ||
- bit >= RZG2L_GPIO_PORT_GET_PINCNT(rzg2l_gpio_configs[port]))
+ if (port >= pctrl->data->n_ports ||
+ bit >= RZG2L_GPIO_PORT_GET_PINCNT(pctrl->data->port_pin_configs[port]))
clear_bit(offset, valid_mask);
}
}
@@ -1467,6 +1468,12 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
struct rzg2l_pinctrl *pctrl;
int ret;
+ BUILD_BUG_ON(ARRAY_SIZE(rzg2l_gpio_configs) * RZG2L_PINS_PER_PORT >
+ ARRAY_SIZE(rzg2l_gpio_names));
+
+ BUILD_BUG_ON(ARRAY_SIZE(r9a07g043_gpio_configs) * RZG2L_PINS_PER_PORT >
+ ARRAY_SIZE(rzg2l_gpio_names));
+
pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
if (!pctrl)
return -ENOMEM;
@@ -1519,6 +1526,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev)
static struct rzg2l_pinctrl_data r9a07g043_data = {
.port_pins = rzg2l_gpio_names,
.port_pin_configs = r9a07g043_gpio_configs,
+ .n_ports = ARRAY_SIZE(r9a07g043_gpio_configs),
.dedicated_pins = rzg2l_dedicated_pins.common,
.n_port_pins = ARRAY_SIZE(r9a07g043_gpio_configs) * RZG2L_PINS_PER_PORT,
.n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common),
@@ -1527,8 +1535,9 @@ static struct rzg2l_pinctrl_data r9a07g043_data = {
static struct rzg2l_pinctrl_data r9a07g044_data = {
.port_pins = rzg2l_gpio_names,
.port_pin_configs = rzg2l_gpio_configs,
+ .n_ports = ARRAY_SIZE(rzg2l_gpio_configs),
.dedicated_pins = rzg2l_dedicated_pins.common,
- .n_port_pins = ARRAY_SIZE(rzg2l_gpio_names),
+ .n_port_pins = ARRAY_SIZE(rzg2l_gpio_configs) * RZG2L_PINS_PER_PORT,
.n_dedicated_pins = ARRAY_SIZE(rzg2l_dedicated_pins.common) +
ARRAY_SIZE(rzg2l_dedicated_pins.rzg2l_pins),
};
diff --git a/drivers/pinctrl/samsung/Kconfig b/drivers/pinctrl/samsung/Kconfig
index 7b0576f71376..697ff7812d8e 100644
--- a/drivers/pinctrl/samsung/Kconfig
+++ b/drivers/pinctrl/samsung/Kconfig
@@ -23,11 +23,6 @@ config PINCTRL_EXYNOS_ARM64
bool "ARMv8-specific pinctrl driver for Samsung Exynos SoCs" if COMPILE_TEST
depends on PINCTRL_EXYNOS
-config PINCTRL_S3C24XX
- bool "Samsung S3C24XX SoC pinctrl driver"
- depends on ARCH_S3C24XX || (COMPILE_TEST && OF)
- select PINCTRL_SAMSUNG
-
config PINCTRL_S3C64XX
bool "Samsung S3C64XX SoC pinctrl driver"
depends on ARCH_S3C64XX || (COMPILE_TEST && OF)
diff --git a/drivers/pinctrl/samsung/Makefile b/drivers/pinctrl/samsung/Makefile
index ed951df6a112..4dd4a92ab6cc 100644
--- a/drivers/pinctrl/samsung/Makefile
+++ b/drivers/pinctrl/samsung/Makefile
@@ -5,5 +5,4 @@ obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o
obj-$(CONFIG_PINCTRL_EXYNOS) += pinctrl-exynos.o
obj-$(CONFIG_PINCTRL_EXYNOS_ARM) += pinctrl-exynos-arm.o
obj-$(CONFIG_PINCTRL_EXYNOS_ARM64) += pinctrl-exynos-arm64.o
-obj-$(CONFIG_PINCTRL_S3C24XX) += pinctrl-s3c24xx.o
obj-$(CONFIG_PINCTRL_S3C64XX) += pinctrl-s3c64xx.o
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
deleted file mode 100644
index 625cb1065eaf..000000000000
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ /dev/null
@@ -1,653 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-//
-// S3C24XX specific support for Samsung pinctrl/gpiolib driver.
-//
-// Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
-//
-// This file contains the SamsungS3C24XX specific information required by the
-// Samsung pinctrl/gpiolib driver. It also includes the implementation of
-// external gpio and wakeup interrupt support.
-
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/interrupt.h>
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/of_irq.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/err.h>
-
-#include "pinctrl-samsung.h"
-
-#define NUM_EINT 24
-#define NUM_EINT_IRQ 6
-#define EINT_MAX_PER_GROUP 8
-
-#define EINTPEND_REG 0xa8
-#define EINTMASK_REG 0xa4
-
-#define EINT_GROUP(i) ((int)((i) / EINT_MAX_PER_GROUP))
-#define EINT_REG(i) ((EINT_GROUP(i) * 4) + 0x88)
-#define EINT_OFFS(i) ((i) % EINT_MAX_PER_GROUP * 4)
-
-#define EINT_LEVEL_LOW 0
-#define EINT_LEVEL_HIGH 1
-#define EINT_EDGE_FALLING 2
-#define EINT_EDGE_RISING 4
-#define EINT_EDGE_BOTH 6
-#define EINT_MASK 0xf
-
-static const struct samsung_pin_bank_type bank_type_1bit = {
- .fld_width = { 1, 1, },
- .reg_offset = { 0x00, 0x04, },
-};
-
-static const struct samsung_pin_bank_type bank_type_2bit = {
- .fld_width = { 2, 1, 2, },
- .reg_offset = { 0x00, 0x04, 0x08, },
-};
-
-#define PIN_BANK_A(pins, reg, id) \
- { \
- .type = &bank_type_1bit, \
- .pctl_offset = reg, \
- .nr_pins = pins, \
- .eint_type = EINT_TYPE_NONE, \
- .name = id \
- }
-
-#define PIN_BANK_2BIT(pins, reg, id) \
- { \
- .type = &bank_type_2bit, \
- .pctl_offset = reg, \
- .nr_pins = pins, \
- .eint_type = EINT_TYPE_NONE, \
- .name = id \
- }
-
-#define PIN_BANK_2BIT_EINTW(pins, reg, id, eoffs, emask)\
- { \
- .type = &bank_type_2bit, \
- .pctl_offset = reg, \
- .nr_pins = pins, \
- .eint_type = EINT_TYPE_WKUP, \
- .eint_func = 2, \
- .eint_mask = emask, \
- .eint_offset = eoffs, \
- .name = id \
- }
-
-/**
- * struct s3c24xx_eint_data - EINT common data
- * @drvdata: pin controller driver data
- * @domains: IRQ domains of particular EINT interrupts
- * @parents: mapped parent irqs in the main interrupt controller
- */
-struct s3c24xx_eint_data {
- struct samsung_pinctrl_drv_data *drvdata;
- struct irq_domain *domains[NUM_EINT];
- int parents[NUM_EINT_IRQ];
-};
-
-/**
- * struct s3c24xx_eint_domain_data - per irq-domain data
- * @bank: pin bank related to the domain
- * @eint_data: common data
- * @eint0_3_parent_only: live eints 0-3 only in the main intc
- */
-struct s3c24xx_eint_domain_data {
- struct samsung_pin_bank *bank;
- struct s3c24xx_eint_data *eint_data;
- bool eint0_3_parent_only;
-};
-
-static int s3c24xx_eint_get_trigger(unsigned int type)
-{
- switch (type) {
- case IRQ_TYPE_EDGE_RISING:
- return EINT_EDGE_RISING;
- case IRQ_TYPE_EDGE_FALLING:
- return EINT_EDGE_FALLING;
- case IRQ_TYPE_EDGE_BOTH:
- return EINT_EDGE_BOTH;
- case IRQ_TYPE_LEVEL_HIGH:
- return EINT_LEVEL_HIGH;
- case IRQ_TYPE_LEVEL_LOW:
- return EINT_LEVEL_LOW;
- default:
- return -EINVAL;
- }
-}
-
-static void s3c24xx_eint_set_handler(struct irq_data *d, unsigned int type)
-{
- /* Edge- and level-triggered interrupts need different handlers */
- if (type & IRQ_TYPE_EDGE_BOTH)
- irq_set_handler_locked(d, handle_edge_irq);
- else
- irq_set_handler_locked(d, handle_level_irq);
-}
-
-static void s3c24xx_eint_set_function(struct samsung_pinctrl_drv_data *d,
- struct samsung_pin_bank *bank, int pin)
-{
- const struct samsung_pin_bank_type *bank_type = bank->type;
- unsigned long flags;
- void __iomem *reg;
- u8 shift;
- u32 mask;
- u32 val;
-
- /* Make sure that pin is configured as interrupt */
- reg = d->virt_base + bank->pctl_offset;
- shift = pin * bank_type->fld_width[PINCFG_TYPE_FUNC];
- mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
-
- raw_spin_lock_irqsave(&bank->slock, flags);
-
- val = readl(reg);
- val &= ~(mask << shift);
- val |= bank->eint_func << shift;
- writel(val, reg);
-
- raw_spin_unlock_irqrestore(&bank->slock, flags);
-}
-
-static int s3c24xx_eint_type(struct irq_data *data, unsigned int type)
-{
- struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
- int index = bank->eint_offset + data->hwirq;
- void __iomem *reg;
- int trigger;
- u8 shift;
- u32 val;
-
- trigger = s3c24xx_eint_get_trigger(type);
- if (trigger < 0) {
- dev_err(d->dev, "unsupported external interrupt type\n");
- return -EINVAL;
- }
-
- s3c24xx_eint_set_handler(data, type);
-
- /* Set up interrupt trigger */
- reg = d->virt_base + EINT_REG(index);
- shift = EINT_OFFS(index);
-
- val = readl(reg);
- val &= ~(EINT_MASK << shift);
- val |= trigger << shift;
- writel(val, reg);
-
- s3c24xx_eint_set_function(d, bank, data->hwirq);
-
- return 0;
-}
-
-/* Handling of EINTs 0-3 on all except S3C2412 and S3C2413 */
-
-static void s3c2410_eint0_3_ack(struct irq_data *data)
-{
- struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct s3c24xx_eint_domain_data *ddata = bank->irq_domain->host_data;
- struct s3c24xx_eint_data *eint_data = ddata->eint_data;
- int parent_irq = eint_data->parents[data->hwirq];
- struct irq_chip *parent_chip = irq_get_chip(parent_irq);
-
- parent_chip->irq_ack(irq_get_irq_data(parent_irq));
-}
-
-static void s3c2410_eint0_3_mask(struct irq_data *data)
-{
- struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct s3c24xx_eint_domain_data *ddata = bank->irq_domain->host_data;
- struct s3c24xx_eint_data *eint_data = ddata->eint_data;
- int parent_irq = eint_data->parents[data->hwirq];
- struct irq_chip *parent_chip = irq_get_chip(parent_irq);
-
- parent_chip->irq_mask(irq_get_irq_data(parent_irq));
-}
-
-static void s3c2410_eint0_3_unmask(struct irq_data *data)
-{
- struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct s3c24xx_eint_domain_data *ddata = bank->irq_domain->host_data;
- struct s3c24xx_eint_data *eint_data = ddata->eint_data;
- int parent_irq = eint_data->parents[data->hwirq];
- struct irq_chip *parent_chip = irq_get_chip(parent_irq);
-
- parent_chip->irq_unmask(irq_get_irq_data(parent_irq));
-}
-
-static struct irq_chip s3c2410_eint0_3_chip = {
- .name = "s3c2410-eint0_3",
- .irq_ack = s3c2410_eint0_3_ack,
- .irq_mask = s3c2410_eint0_3_mask,
- .irq_unmask = s3c2410_eint0_3_unmask,
- .irq_set_type = s3c24xx_eint_type,
-};
-
-static void s3c2410_demux_eint0_3(struct irq_desc *desc)
-{
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
- int ret;
-
- /* the first 4 eints have a simple 1 to 1 mapping */
- ret = generic_handle_domain_irq(eint_data->domains[data->hwirq], data->hwirq);
- /* Something must be really wrong if an unmapped EINT is unmasked */
- BUG_ON(ret);
-}
-
-/* Handling of EINTs 0-3 on S3C2412 and S3C2413 */
-
-static void s3c2412_eint0_3_ack(struct irq_data *data)
-{
- struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
-
- unsigned long bitval = 1UL << data->hwirq;
- writel(bitval, d->virt_base + EINTPEND_REG);
-}
-
-static void s3c2412_eint0_3_mask(struct irq_data *data)
-{
- struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
- unsigned long mask;
-
- mask = readl(d->virt_base + EINTMASK_REG);
- mask |= (1UL << data->hwirq);
- writel(mask, d->virt_base + EINTMASK_REG);
-}
-
-static void s3c2412_eint0_3_unmask(struct irq_data *data)
-{
- struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
- unsigned long mask;
-
- mask = readl(d->virt_base + EINTMASK_REG);
- mask &= ~(1UL << data->hwirq);
- writel(mask, d->virt_base + EINTMASK_REG);
-}
-
-static struct irq_chip s3c2412_eint0_3_chip = {
- .name = "s3c2412-eint0_3",
- .irq_ack = s3c2412_eint0_3_ack,
- .irq_mask = s3c2412_eint0_3_mask,
- .irq_unmask = s3c2412_eint0_3_unmask,
- .irq_set_type = s3c24xx_eint_type,
-};
-
-static void s3c2412_demux_eint0_3(struct irq_desc *desc)
-{
- struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
- struct irq_data *data = irq_desc_get_irq_data(desc);
- struct irq_chip *chip = irq_data_get_irq_chip(data);
- int ret;
-
- chained_irq_enter(chip, desc);
-
- /* the first 4 eints have a simple 1 to 1 mapping */
- ret = generic_handle_domain_irq(eint_data->domains[data->hwirq], data->hwirq);
- /* Something must be really wrong if an unmapped EINT is unmasked */
- BUG_ON(ret);
-
- chained_irq_exit(chip, desc);
-}
-
-/* Handling of all other eints */
-
-static void s3c24xx_eint_ack(struct irq_data *data)
-{
- struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
- unsigned char index = bank->eint_offset + data->hwirq;
-
- writel(1UL << index, d->virt_base + EINTPEND_REG);
-}
-
-static void s3c24xx_eint_mask(struct irq_data *data)
-{
- struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
- unsigned char index = bank->eint_offset + data->hwirq;
- unsigned long mask;
-
- mask = readl(d->virt_base + EINTMASK_REG);
- mask |= (1UL << index);
- writel(mask, d->virt_base + EINTMASK_REG);
-}
-
-static void s3c24xx_eint_unmask(struct irq_data *data)
-{
- struct samsung_pin_bank *bank = irq_data_get_irq_chip_data(data);
- struct samsung_pinctrl_drv_data *d = bank->drvdata;
- unsigned char index = bank->eint_offset + data->hwirq;
- unsigned long mask;
-
- mask = readl(d->virt_base + EINTMASK_REG);
- mask &= ~(1UL << index);
- writel(mask, d->virt_base + EINTMASK_REG);
-}
-
-static struct irq_chip s3c24xx_eint_chip = {
- .name = "s3c-eint",
- .irq_ack = s3c24xx_eint_ack,
- .irq_mask = s3c24xx_eint_mask,
- .irq_unmask = s3c24xx_eint_unmask,
- .irq_set_type = s3c24xx_eint_type,
-};
-
-static inline void s3c24xx_demux_eint(struct irq_desc *desc,
- u32 offset, u32 range)
-{
- struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- struct samsung_pinctrl_drv_data *d = data->drvdata;
- unsigned int pend, mask;
-
- chained_irq_enter(chip, desc);
-
- pend = readl(d->virt_base + EINTPEND_REG);
- mask = readl(d->virt_base + EINTMASK_REG);
-
- pend &= ~mask;
- pend &= range;
-
- while (pend) {
- unsigned int irq;
- int ret;
-
- irq = __ffs(pend);
- pend &= ~(1 << irq);
- ret = generic_handle_domain_irq(data->domains[irq], irq - offset);
- /* Something is really wrong if an unmapped EINT is unmasked */
- BUG_ON(ret);
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void s3c24xx_demux_eint4_7(struct irq_desc *desc)
-{
- s3c24xx_demux_eint(desc, 0, 0xf0);
-}
-
-static void s3c24xx_demux_eint8_23(struct irq_desc *desc)
-{
- s3c24xx_demux_eint(desc, 8, 0xffff00);
-}
-
-static irq_flow_handler_t s3c2410_eint_handlers[NUM_EINT_IRQ] = {
- s3c2410_demux_eint0_3,
- s3c2410_demux_eint0_3,
- s3c2410_demux_eint0_3,
- s3c2410_demux_eint0_3,
- s3c24xx_demux_eint4_7,
- s3c24xx_demux_eint8_23,
-};
-
-static irq_flow_handler_t s3c2412_eint_handlers[NUM_EINT_IRQ] = {
- s3c2412_demux_eint0_3,
- s3c2412_demux_eint0_3,
- s3c2412_demux_eint0_3,
- s3c2412_demux_eint0_3,
- s3c24xx_demux_eint4_7,
- s3c24xx_demux_eint8_23,
-};
-
-static int s3c24xx_gpf_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct s3c24xx_eint_domain_data *ddata = h->host_data;
- struct samsung_pin_bank *bank = ddata->bank;
-
- if (!(bank->eint_mask & (1 << (bank->eint_offset + hw))))
- return -EINVAL;
-
- if (hw <= 3) {
- if (ddata->eint0_3_parent_only)
- irq_set_chip_and_handler(virq, &s3c2410_eint0_3_chip,
- handle_edge_irq);
- else
- irq_set_chip_and_handler(virq, &s3c2412_eint0_3_chip,
- handle_edge_irq);
- } else {
- irq_set_chip_and_handler(virq, &s3c24xx_eint_chip,
- handle_edge_irq);
- }
- irq_set_chip_data(virq, bank);
- return 0;
-}
-
-static const struct irq_domain_ops s3c24xx_gpf_irq_ops = {
- .map = s3c24xx_gpf_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static int s3c24xx_gpg_irq_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- struct s3c24xx_eint_domain_data *ddata = h->host_data;
- struct samsung_pin_bank *bank = ddata->bank;
-
- if (!(bank->eint_mask & (1 << (bank->eint_offset + hw))))
- return -EINVAL;
-
- irq_set_chip_and_handler(virq, &s3c24xx_eint_chip, handle_edge_irq);
- irq_set_chip_data(virq, bank);
- return 0;
-}
-
-static const struct irq_domain_ops s3c24xx_gpg_irq_ops = {
- .map = s3c24xx_gpg_irq_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static const struct of_device_id s3c24xx_eint_irq_ids[] = {
- { .compatible = "samsung,s3c2410-wakeup-eint", .data = (void *)1 },
- { .compatible = "samsung,s3c2412-wakeup-eint", .data = (void *)0 },
- { }
-};
-
-static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
-{
- struct device *dev = d->dev;
- const struct of_device_id *match;
- struct device_node *eint_np = NULL;
- struct device_node *np;
- struct samsung_pin_bank *bank;
- struct s3c24xx_eint_data *eint_data;
- const struct irq_domain_ops *ops;
- unsigned int i;
- bool eint0_3_parent_only;
- irq_flow_handler_t *handlers;
-
- for_each_child_of_node(dev->of_node, np) {
- match = of_match_node(s3c24xx_eint_irq_ids, np);
- if (match) {
- eint_np = np;
- eint0_3_parent_only = (bool)match->data;
- break;
- }
- }
- if (!eint_np)
- return -ENODEV;
-
- eint_data = devm_kzalloc(dev, sizeof(*eint_data), GFP_KERNEL);
- if (!eint_data) {
- of_node_put(eint_np);
- return -ENOMEM;
- }
-
- eint_data->drvdata = d;
-
- handlers = eint0_3_parent_only ? s3c2410_eint_handlers
- : s3c2412_eint_handlers;
- for (i = 0; i < NUM_EINT_IRQ; ++i) {
- unsigned int irq;
-
- irq = irq_of_parse_and_map(eint_np, i);
- if (!irq) {
- dev_err(dev, "failed to get wakeup EINT IRQ %d\n", i);
- of_node_put(eint_np);
- return -ENXIO;
- }
-
- eint_data->parents[i] = irq;
- irq_set_chained_handler_and_data(irq, handlers[i], eint_data);
- }
- of_node_put(eint_np);
-
- bank = d->pin_banks;
- for (i = 0; i < d->nr_banks; ++i, ++bank) {
- struct s3c24xx_eint_domain_data *ddata;
- unsigned int mask;
- unsigned int irq;
- unsigned int pin;
-
- if (bank->eint_type != EINT_TYPE_WKUP)
- continue;
-
- ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- ddata->bank = bank;
- ddata->eint_data = eint_data;
- ddata->eint0_3_parent_only = eint0_3_parent_only;
-
- ops = (bank->eint_offset == 0) ? &s3c24xx_gpf_irq_ops
- : &s3c24xx_gpg_irq_ops;
-
- bank->irq_domain = irq_domain_create_linear(bank->fwnode,
- bank->nr_pins, ops, ddata);
- if (!bank->irq_domain) {
- dev_err(dev, "wkup irq domain add failed\n");
- return -ENXIO;
- }
-
- irq = bank->eint_offset;
- mask = bank->eint_mask;
- for (pin = 0; mask; ++pin, mask >>= 1) {
- if (irq >= NUM_EINT)
- break;
- if (!(mask & 1))
- continue;
- eint_data->domains[irq] = bank->irq_domain;
- ++irq;
- }
- }
-
- return 0;
-}
-
-static const struct samsung_pin_bank_data s3c2412_pin_banks[] __initconst = {
- PIN_BANK_A(23, 0x000, "gpa"),
- PIN_BANK_2BIT(11, 0x010, "gpb"),
- PIN_BANK_2BIT(16, 0x020, "gpc"),
- PIN_BANK_2BIT(16, 0x030, "gpd"),
- PIN_BANK_2BIT(16, 0x040, "gpe"),
- PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff),
- PIN_BANK_2BIT_EINTW(16, 0x060, "gpg", 8, 0xffff00),
- PIN_BANK_2BIT(11, 0x070, "gph"),
- PIN_BANK_2BIT(13, 0x080, "gpj"),
-};
-
-static const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = {
- {
- .pin_banks = s3c2412_pin_banks,
- .nr_banks = ARRAY_SIZE(s3c2412_pin_banks),
- .eint_wkup_init = s3c24xx_eint_init,
- },
-};
-
-const struct samsung_pinctrl_of_match_data s3c2412_of_data __initconst = {
- .ctrl = s3c2412_pin_ctrl,
- .num_ctrl = ARRAY_SIZE(s3c2412_pin_ctrl),
-};
-
-static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = {
- PIN_BANK_A(27, 0x000, "gpa"),
- PIN_BANK_2BIT(11, 0x010, "gpb"),
- PIN_BANK_2BIT(16, 0x020, "gpc"),
- PIN_BANK_2BIT(16, 0x030, "gpd"),
- PIN_BANK_2BIT(16, 0x040, "gpe"),
- PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff),
- PIN_BANK_2BIT_EINTW(8, 0x060, "gpg", 8, 0xff00),
- PIN_BANK_2BIT(15, 0x070, "gph"),
- PIN_BANK_2BIT(16, 0x0e0, "gpk"),
- PIN_BANK_2BIT(14, 0x0f0, "gpl"),
- PIN_BANK_2BIT(2, 0x100, "gpm"),
-};
-
-static const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = {
- {
- .pin_banks = s3c2416_pin_banks,
- .nr_banks = ARRAY_SIZE(s3c2416_pin_banks),
- .eint_wkup_init = s3c24xx_eint_init,
- },
-};
-
-const struct samsung_pinctrl_of_match_data s3c2416_of_data __initconst = {
- .ctrl = s3c2416_pin_ctrl,
- .num_ctrl = ARRAY_SIZE(s3c2416_pin_ctrl),
-};
-
-static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = {
- PIN_BANK_A(25, 0x000, "gpa"),
- PIN_BANK_2BIT(11, 0x010, "gpb"),
- PIN_BANK_2BIT(16, 0x020, "gpc"),
- PIN_BANK_2BIT(16, 0x030, "gpd"),
- PIN_BANK_2BIT(16, 0x040, "gpe"),
- PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff),
- PIN_BANK_2BIT_EINTW(16, 0x060, "gpg", 8, 0xffff00),
- PIN_BANK_2BIT(11, 0x070, "gph"),
- PIN_BANK_2BIT(13, 0x0d0, "gpj"),
-};
-
-static const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = {
- {
- .pin_banks = s3c2440_pin_banks,
- .nr_banks = ARRAY_SIZE(s3c2440_pin_banks),
- .eint_wkup_init = s3c24xx_eint_init,
- },
-};
-
-const struct samsung_pinctrl_of_match_data s3c2440_of_data __initconst = {
- .ctrl = s3c2440_pin_ctrl,
- .num_ctrl = ARRAY_SIZE(s3c2440_pin_ctrl),
-};
-
-static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = {
- PIN_BANK_A(28, 0x000, "gpa"),
- PIN_BANK_2BIT(11, 0x010, "gpb"),
- PIN_BANK_2BIT(16, 0x020, "gpc"),
- PIN_BANK_2BIT(16, 0x030, "gpd"),
- PIN_BANK_2BIT(16, 0x040, "gpe"),
- PIN_BANK_2BIT_EINTW(8, 0x050, "gpf", 0, 0xff),
- PIN_BANK_2BIT_EINTW(16, 0x060, "gpg", 8, 0xffff00),
- PIN_BANK_2BIT(15, 0x070, "gph"),
- PIN_BANK_2BIT(16, 0x0d0, "gpj"),
- PIN_BANK_2BIT(16, 0x0e0, "gpk"),
- PIN_BANK_2BIT(15, 0x0f0, "gpl"),
- PIN_BANK_2BIT(2, 0x100, "gpm"),
-};
-
-static const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = {
- {
- .pin_banks = s3c2450_pin_banks,
- .nr_banks = ARRAY_SIZE(s3c2450_pin_banks),
- .eint_wkup_init = s3c24xx_eint_init,
- },
-};
-
-const struct samsung_pinctrl_of_match_data s3c2450_of_data __initconst = {
- .ctrl = s3c2450_pin_ctrl,
- .num_ctrl = ARRAY_SIZE(s3c2450_pin_ctrl),
-};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 5736761927cb..833e170e3d99 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -649,7 +649,7 @@ static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
}
/*
- * gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
+ * gpiod_to_irq() callback function. Creates a mapping between a GPIO pin
* and a virtual IRQ, if not already present.
*/
static int samsung_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -1324,16 +1324,6 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
{ .compatible = "samsung,s3c64xx-pinctrl",
.data = &s3c64xx_of_data },
#endif
-#ifdef CONFIG_PINCTRL_S3C24XX
- { .compatible = "samsung,s3c2412-pinctrl",
- .data = &s3c2412_of_data },
- { .compatible = "samsung,s3c2416-pinctrl",
- .data = &s3c2416_of_data },
- { .compatible = "samsung,s3c2440-pinctrl",
- .data = &s3c2440_of_data },
- { .compatible = "samsung,s3c2450-pinctrl",
- .data = &s3c2450_of_data },
-#endif
{},
};
diff --git a/drivers/pinctrl/starfive/Kconfig b/drivers/pinctrl/starfive/Kconfig
index 55c514e622f9..8192ac2087fc 100644
--- a/drivers/pinctrl/starfive/Kconfig
+++ b/drivers/pinctrl/starfive/Kconfig
@@ -16,3 +16,36 @@ config PINCTRL_STARFIVE_JH7100
This also provides an interface to the GPIO pins not used by other
peripherals supporting inputs, outputs, configuring pull-up/pull-down
and interrupts on input changes.
+
+config PINCTRL_STARFIVE_JH7110
+ bool
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select OF_GPIO
+
+config PINCTRL_STARFIVE_JH7110_SYS
+ tristate "System pinctrl and GPIO driver for the StarFive JH7110 SoC"
+ depends on SOC_STARFIVE || COMPILE_TEST
+ depends on OF
+ select PINCTRL_STARFIVE_JH7110
+ default SOC_STARFIVE
+ help
+ Say yes here to support system pin control on the StarFive JH7110 SoC.
+ This also provides an interface to the GPIO pins not used by other
+ peripherals supporting inputs, outputs, configuring pull-up/pull-down
+ and interrupts on input changes.
+
+config PINCTRL_STARFIVE_JH7110_AON
+ tristate "Always-on pinctrl and GPIO driver for the StarFive JH7110 SoC"
+ depends on SOC_STARFIVE || COMPILE_TEST
+ depends on OF
+ select PINCTRL_STARFIVE_JH7110
+ default SOC_STARFIVE
+ help
+ Say yes here to support always-on pin control on the StarFive JH7110 SoC.
+ This also provides an interface to the GPIO pins not used by other
+ peripherals supporting inputs, outputs, configuring pull-up/pull-down
+ and interrupts on input changes.
diff --git a/drivers/pinctrl/starfive/Makefile b/drivers/pinctrl/starfive/Makefile
index 0293f26a0a99..ee0d32d085cb 100644
--- a/drivers/pinctrl/starfive/Makefile
+++ b/drivers/pinctrl/starfive/Makefile
@@ -1,3 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PINCTRL_STARFIVE_JH7100) += pinctrl-starfive-jh7100.o
+
+obj-$(CONFIG_PINCTRL_STARFIVE_JH7110) += pinctrl-starfive-jh7110.o
+obj-$(CONFIG_PINCTRL_STARFIVE_JH7110_SYS) += pinctrl-starfive-jh7110-sys.o
+obj-$(CONFIG_PINCTRL_STARFIVE_JH7110_AON) += pinctrl-starfive-jh7110-aon.o
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c
new file mode 100644
index 000000000000..8cf28aaed254
--- /dev/null
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pinctrl / GPIO driver for StarFive JH7110 SoC aon controller
+ *
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/pinctrl/starfive,jh7110-pinctrl.h>
+
+#include "../core.h"
+#include "../pinconf.h"
+#include "../pinmux.h"
+#include "pinctrl-starfive-jh7110.h"
+
+#define JH7110_AON_NGPIO 4
+#define JH7110_AON_GC_BASE 64
+
+/* registers */
+#define JH7110_AON_DOEN 0x0
+#define JH7110_AON_DOUT 0x4
+#define JH7110_AON_GPI 0x8
+#define JH7110_AON_GPIOIN 0x2c
+
+#define JH7110_AON_GPIOEN 0xc
+#define JH7110_AON_GPIOIS 0x10
+#define JH7110_AON_GPIOIC 0x14
+#define JH7110_AON_GPIOIBE 0x18
+#define JH7110_AON_GPIOIEV 0x1c
+#define JH7110_AON_GPIOIE 0x20
+#define JH7110_AON_GPIORIS 0x28
+#define JH7110_AON_GPIOMIS 0x28
+
+#define JH7110_AON_GPO_PDA_0_5_CFG 0x30
+
+static const struct pinctrl_pin_desc jh7110_aon_pins[] = {
+ PINCTRL_PIN(PAD_TESTEN, "TESTEN"),
+ PINCTRL_PIN(PAD_RGPIO0, "RGPIO0"),
+ PINCTRL_PIN(PAD_RGPIO1, "RGPIO1"),
+ PINCTRL_PIN(PAD_RGPIO2, "RGPIO2"),
+ PINCTRL_PIN(PAD_RGPIO3, "RGPIO3"),
+ PINCTRL_PIN(PAD_RSTN, "RSTN"),
+ PINCTRL_PIN(PAD_GMAC0_MDC, "GMAC0_MDC"),
+ PINCTRL_PIN(PAD_GMAC0_MDIO, "GMAC0_MDIO"),
+ PINCTRL_PIN(PAD_GMAC0_RXD0, "GMAC0_RXD0"),
+ PINCTRL_PIN(PAD_GMAC0_RXD1, "GMAC0_RXD1"),
+ PINCTRL_PIN(PAD_GMAC0_RXD2, "GMAC0_RXD2"),
+ PINCTRL_PIN(PAD_GMAC0_RXD3, "GMAC0_RXD3"),
+ PINCTRL_PIN(PAD_GMAC0_RXDV, "GMAC0_RXDV"),
+ PINCTRL_PIN(PAD_GMAC0_RXC, "GMAC0_RXC"),
+ PINCTRL_PIN(PAD_GMAC0_TXD0, "GMAC0_TXD0"),
+ PINCTRL_PIN(PAD_GMAC0_TXD1, "GMAC0_TXD1"),
+ PINCTRL_PIN(PAD_GMAC0_TXD2, "GMAC0_TXD2"),
+ PINCTRL_PIN(PAD_GMAC0_TXD3, "GMAC0_TXD3"),
+ PINCTRL_PIN(PAD_GMAC0_TXEN, "GMAC0_TXEN"),
+ PINCTRL_PIN(PAD_GMAC0_TXC, "GMAC0_TXC"),
+};
+
+static int jh7110_aon_set_one_pin_mux(struct jh7110_pinctrl *sfp,
+ unsigned int pin,
+ unsigned int din, u32 dout,
+ u32 doen, u32 func)
+{
+ if (pin < sfp->gc.ngpio && func == 0)
+ jh7110_set_gpiomux(sfp, pin, din, dout, doen);
+
+ return 0;
+}
+
+static int jh7110_aon_get_padcfg_base(struct jh7110_pinctrl *sfp,
+ unsigned int pin)
+{
+ if (pin < PAD_GMAC0_MDC)
+ return JH7110_AON_GPO_PDA_0_5_CFG;
+
+ return -1;
+}
+
+static void jh7110_aon_irq_handler(struct irq_desc *desc)
+{
+ struct jh7110_pinctrl *sfp = jh7110_from_irq_desc(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long mis;
+ unsigned int pin;
+
+ chained_irq_enter(chip, desc);
+
+ mis = readl_relaxed(sfp->base + JH7110_AON_GPIOMIS);
+ for_each_set_bit(pin, &mis, JH7110_AON_NGPIO)
+ generic_handle_domain_irq(sfp->gc.irq.domain, pin);
+
+ chained_irq_exit(chip, desc);
+}
+
+static int jh7110_aon_init_hw(struct gpio_chip *gc)
+{
+ struct jh7110_pinctrl *sfp = container_of(gc,
+ struct jh7110_pinctrl, gc);
+
+ /* mask all GPIO interrupts */
+ writel_relaxed(0, sfp->base + JH7110_AON_GPIOIE);
+ /* clear edge interrupt flags */
+ writel_relaxed(0, sfp->base + JH7110_AON_GPIOIC);
+ writel_relaxed(0x0f, sfp->base + JH7110_AON_GPIOIC);
+ /* enable GPIO interrupts */
+ writel_relaxed(1, sfp->base + JH7110_AON_GPIOEN);
+ return 0;
+}
+
+static const struct jh7110_gpio_irq_reg jh7110_aon_irq_reg = {
+ .is_reg_base = JH7110_AON_GPIOIS,
+ .ic_reg_base = JH7110_AON_GPIOIC,
+ .ibe_reg_base = JH7110_AON_GPIOIBE,
+ .iev_reg_base = JH7110_AON_GPIOIEV,
+ .ie_reg_base = JH7110_AON_GPIOIE,
+ .ris_reg_base = JH7110_AON_GPIORIS,
+ .mis_reg_base = JH7110_AON_GPIOMIS,
+};
+
+static const struct jh7110_pinctrl_soc_info jh7110_aon_pinctrl_info = {
+ .pins = jh7110_aon_pins,
+ .npins = ARRAY_SIZE(jh7110_aon_pins),
+ .ngpios = JH7110_AON_NGPIO,
+ .gc_base = JH7110_AON_GC_BASE,
+ .dout_reg_base = JH7110_AON_DOUT,
+ .dout_mask = GENMASK(3, 0),
+ .doen_reg_base = JH7110_AON_DOEN,
+ .doen_mask = GENMASK(2, 0),
+ .gpi_reg_base = JH7110_AON_GPI,
+ .gpi_mask = GENMASK(3, 0),
+ .gpioin_reg_base = JH7110_AON_GPIOIN,
+ .irq_reg = &jh7110_aon_irq_reg,
+ .jh7110_set_one_pin_mux = jh7110_aon_set_one_pin_mux,
+ .jh7110_get_padcfg_base = jh7110_aon_get_padcfg_base,
+ .jh7110_gpio_irq_handler = jh7110_aon_irq_handler,
+ .jh7110_gpio_init_hw = jh7110_aon_init_hw,
+};
+
+static const struct of_device_id jh7110_aon_pinctrl_of_match[] = {
+ {
+ .compatible = "starfive,jh7110-aon-pinctrl",
+ .data = &jh7110_aon_pinctrl_info,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, jh7110_aon_pinctrl_of_match);
+
+static struct platform_driver jh7110_aon_pinctrl_driver = {
+ .probe = jh7110_pinctrl_probe,
+ .driver = {
+ .name = "starfive-jh7110-aon-pinctrl",
+ .of_match_table = jh7110_aon_pinctrl_of_match,
+ },
+};
+module_platform_driver(jh7110_aon_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the StarFive JH7110 SoC aon controller");
+MODULE_AUTHOR("Jianlong Huang <jianlong.huang@starfivetech.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c
new file mode 100644
index 000000000000..bc279a39613f
--- /dev/null
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c
@@ -0,0 +1,449 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pinctrl / GPIO driver for StarFive JH7110 SoC sys controller
+ *
+ * Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/starfive,jh7110-pinctrl.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "../pinmux.h"
+#include "../pinconf.h"
+#include "pinctrl-starfive-jh7110.h"
+
+#define JH7110_SYS_NGPIO 64
+#define JH7110_SYS_GC_BASE 0
+
+/* registers */
+#define JH7110_SYS_DOEN 0x000
+#define JH7110_SYS_DOUT 0x040
+#define JH7110_SYS_GPI 0x080
+#define JH7110_SYS_GPIOIN 0x118
+
+#define JH7110_SYS_GPIOEN 0x0dc
+#define JH7110_SYS_GPIOIS0 0x0e0
+#define JH7110_SYS_GPIOIS1 0x0e4
+#define JH7110_SYS_GPIOIC0 0x0e8
+#define JH7110_SYS_GPIOIC1 0x0ec
+#define JH7110_SYS_GPIOIBE0 0x0f0
+#define JH7110_SYS_GPIOIBE1 0x0f4
+#define JH7110_SYS_GPIOIEV0 0x0f8
+#define JH7110_SYS_GPIOIEV1 0x0fc
+#define JH7110_SYS_GPIOIE0 0x100
+#define JH7110_SYS_GPIOIE1 0x104
+#define JH7110_SYS_GPIORIS0 0x108
+#define JH7110_SYS_GPIORIS1 0x10c
+#define JH7110_SYS_GPIOMIS0 0x110
+#define JH7110_SYS_GPIOMIS1 0x114
+
+#define JH7110_SYS_GPO_PDA_0_74_CFG 0x120
+#define JH7110_SYS_GPO_PDA_89_94_CFG 0x284
+
+static const struct pinctrl_pin_desc jh7110_sys_pins[] = {
+ PINCTRL_PIN(PAD_GPIO0, "GPIO0"),
+ PINCTRL_PIN(PAD_GPIO1, "GPIO1"),
+ PINCTRL_PIN(PAD_GPIO2, "GPIO2"),
+ PINCTRL_PIN(PAD_GPIO3, "GPIO3"),
+ PINCTRL_PIN(PAD_GPIO4, "GPIO4"),
+ PINCTRL_PIN(PAD_GPIO5, "GPIO5"),
+ PINCTRL_PIN(PAD_GPIO6, "GPIO6"),
+ PINCTRL_PIN(PAD_GPIO7, "GPIO7"),
+ PINCTRL_PIN(PAD_GPIO8, "GPIO8"),
+ PINCTRL_PIN(PAD_GPIO9, "GPIO9"),
+ PINCTRL_PIN(PAD_GPIO10, "GPIO10"),
+ PINCTRL_PIN(PAD_GPIO11, "GPIO11"),
+ PINCTRL_PIN(PAD_GPIO12, "GPIO12"),
+ PINCTRL_PIN(PAD_GPIO13, "GPIO13"),
+ PINCTRL_PIN(PAD_GPIO14, "GPIO14"),
+ PINCTRL_PIN(PAD_GPIO15, "GPIO15"),
+ PINCTRL_PIN(PAD_GPIO16, "GPIO16"),
+ PINCTRL_PIN(PAD_GPIO17, "GPIO17"),
+ PINCTRL_PIN(PAD_GPIO18, "GPIO18"),
+ PINCTRL_PIN(PAD_GPIO19, "GPIO19"),
+ PINCTRL_PIN(PAD_GPIO20, "GPIO20"),
+ PINCTRL_PIN(PAD_GPIO21, "GPIO21"),
+ PINCTRL_PIN(PAD_GPIO22, "GPIO22"),
+ PINCTRL_PIN(PAD_GPIO23, "GPIO23"),
+ PINCTRL_PIN(PAD_GPIO24, "GPIO24"),
+ PINCTRL_PIN(PAD_GPIO25, "GPIO25"),
+ PINCTRL_PIN(PAD_GPIO26, "GPIO26"),
+ PINCTRL_PIN(PAD_GPIO27, "GPIO27"),
+ PINCTRL_PIN(PAD_GPIO28, "GPIO28"),
+ PINCTRL_PIN(PAD_GPIO29, "GPIO29"),
+ PINCTRL_PIN(PAD_GPIO30, "GPIO30"),
+ PINCTRL_PIN(PAD_GPIO31, "GPIO31"),
+ PINCTRL_PIN(PAD_GPIO32, "GPIO32"),
+ PINCTRL_PIN(PAD_GPIO33, "GPIO33"),
+ PINCTRL_PIN(PAD_GPIO34, "GPIO34"),
+ PINCTRL_PIN(PAD_GPIO35, "GPIO35"),
+ PINCTRL_PIN(PAD_GPIO36, "GPIO36"),
+ PINCTRL_PIN(PAD_GPIO37, "GPIO37"),
+ PINCTRL_PIN(PAD_GPIO38, "GPIO38"),
+ PINCTRL_PIN(PAD_GPIO39, "GPIO39"),
+ PINCTRL_PIN(PAD_GPIO40, "GPIO40"),
+ PINCTRL_PIN(PAD_GPIO41, "GPIO41"),
+ PINCTRL_PIN(PAD_GPIO42, "GPIO42"),
+ PINCTRL_PIN(PAD_GPIO43, "GPIO43"),
+ PINCTRL_PIN(PAD_GPIO44, "GPIO44"),
+ PINCTRL_PIN(PAD_GPIO45, "GPIO45"),
+ PINCTRL_PIN(PAD_GPIO46, "GPIO46"),
+ PINCTRL_PIN(PAD_GPIO47, "GPIO47"),
+ PINCTRL_PIN(PAD_GPIO48, "GPIO48"),
+ PINCTRL_PIN(PAD_GPIO49, "GPIO49"),
+ PINCTRL_PIN(PAD_GPIO50, "GPIO50"),
+ PINCTRL_PIN(PAD_GPIO51, "GPIO51"),
+ PINCTRL_PIN(PAD_GPIO52, "GPIO52"),
+ PINCTRL_PIN(PAD_GPIO53, "GPIO53"),
+ PINCTRL_PIN(PAD_GPIO54, "GPIO54"),
+ PINCTRL_PIN(PAD_GPIO55, "GPIO55"),
+ PINCTRL_PIN(PAD_GPIO56, "GPIO56"),
+ PINCTRL_PIN(PAD_GPIO57, "GPIO57"),
+ PINCTRL_PIN(PAD_GPIO58, "GPIO58"),
+ PINCTRL_PIN(PAD_GPIO59, "GPIO59"),
+ PINCTRL_PIN(PAD_GPIO60, "GPIO60"),
+ PINCTRL_PIN(PAD_GPIO61, "GPIO61"),
+ PINCTRL_PIN(PAD_GPIO62, "GPIO62"),
+ PINCTRL_PIN(PAD_GPIO63, "GPIO63"),
+ PINCTRL_PIN(PAD_SD0_CLK, "SD0_CLK"),
+ PINCTRL_PIN(PAD_SD0_CMD, "SD0_CMD"),
+ PINCTRL_PIN(PAD_SD0_DATA0, "SD0_DATA0"),
+ PINCTRL_PIN(PAD_SD0_DATA1, "SD0_DATA1"),
+ PINCTRL_PIN(PAD_SD0_DATA2, "SD0_DATA2"),
+ PINCTRL_PIN(PAD_SD0_DATA3, "SD0_DATA3"),
+ PINCTRL_PIN(PAD_SD0_DATA4, "SD0_DATA4"),
+ PINCTRL_PIN(PAD_SD0_DATA5, "SD0_DATA5"),
+ PINCTRL_PIN(PAD_SD0_DATA6, "SD0_DATA6"),
+ PINCTRL_PIN(PAD_SD0_DATA7, "SD0_DATA7"),
+ PINCTRL_PIN(PAD_SD0_STRB, "SD0_STRB"),
+ PINCTRL_PIN(PAD_GMAC1_MDC, "GMAC1_MDC"),
+ PINCTRL_PIN(PAD_GMAC1_MDIO, "GMAC1_MDIO"),
+ PINCTRL_PIN(PAD_GMAC1_RXD0, "GMAC1_RXD0"),
+ PINCTRL_PIN(PAD_GMAC1_RXD1, "GMAC1_RXD1"),
+ PINCTRL_PIN(PAD_GMAC1_RXD2, "GMAC1_RXD2"),
+ PINCTRL_PIN(PAD_GMAC1_RXD3, "GMAC1_RXD3"),
+ PINCTRL_PIN(PAD_GMAC1_RXDV, "GMAC1_RXDV"),
+ PINCTRL_PIN(PAD_GMAC1_RXC, "GMAC1_RXC"),
+ PINCTRL_PIN(PAD_GMAC1_TXD0, "GMAC1_TXD0"),
+ PINCTRL_PIN(PAD_GMAC1_TXD1, "GMAC1_TXD1"),
+ PINCTRL_PIN(PAD_GMAC1_TXD2, "GMAC1_TXD2"),
+ PINCTRL_PIN(PAD_GMAC1_TXD3, "GMAC1_TXD3"),
+ PINCTRL_PIN(PAD_GMAC1_TXEN, "GMAC1_TXEN"),
+ PINCTRL_PIN(PAD_GMAC1_TXC, "GMAC1_TXC"),
+ PINCTRL_PIN(PAD_QSPI_SCLK, "QSPI_SCLK"),
+ PINCTRL_PIN(PAD_QSPI_CS0, "QSPI_CS0"),
+ PINCTRL_PIN(PAD_QSPI_DATA0, "QSPI_DATA0"),
+ PINCTRL_PIN(PAD_QSPI_DATA1, "QSPI_DATA1"),
+ PINCTRL_PIN(PAD_QSPI_DATA2, "QSPI_DATA2"),
+ PINCTRL_PIN(PAD_QSPI_DATA3, "QSPI_DATA3"),
+};
+
+struct jh7110_func_sel {
+ u16 offset;
+ u8 shift;
+ u8 max;
+};
+
+static const struct jh7110_func_sel
+ jh7110_sys_func_sel[ARRAY_SIZE(jh7110_sys_pins)] = {
+ [PAD_GMAC1_RXC] = { 0x29c, 0, 1 },
+ [PAD_GPIO10] = { 0x29c, 2, 3 },
+ [PAD_GPIO11] = { 0x29c, 5, 3 },
+ [PAD_GPIO12] = { 0x29c, 8, 3 },
+ [PAD_GPIO13] = { 0x29c, 11, 3 },
+ [PAD_GPIO14] = { 0x29c, 14, 3 },
+ [PAD_GPIO15] = { 0x29c, 17, 3 },
+ [PAD_GPIO16] = { 0x29c, 20, 3 },
+ [PAD_GPIO17] = { 0x29c, 23, 3 },
+ [PAD_GPIO18] = { 0x29c, 26, 3 },
+ [PAD_GPIO19] = { 0x29c, 29, 3 },
+
+ [PAD_GPIO20] = { 0x2a0, 0, 3 },
+ [PAD_GPIO21] = { 0x2a0, 3, 3 },
+ [PAD_GPIO22] = { 0x2a0, 6, 3 },
+ [PAD_GPIO23] = { 0x2a0, 9, 3 },
+ [PAD_GPIO24] = { 0x2a0, 12, 3 },
+ [PAD_GPIO25] = { 0x2a0, 15, 3 },
+ [PAD_GPIO26] = { 0x2a0, 18, 3 },
+ [PAD_GPIO27] = { 0x2a0, 21, 3 },
+ [PAD_GPIO28] = { 0x2a0, 24, 3 },
+ [PAD_GPIO29] = { 0x2a0, 27, 3 },
+
+ [PAD_GPIO30] = { 0x2a4, 0, 3 },
+ [PAD_GPIO31] = { 0x2a4, 3, 3 },
+ [PAD_GPIO32] = { 0x2a4, 6, 3 },
+ [PAD_GPIO33] = { 0x2a4, 9, 3 },
+ [PAD_GPIO34] = { 0x2a4, 12, 3 },
+ [PAD_GPIO35] = { 0x2a4, 15, 3 },
+ [PAD_GPIO36] = { 0x2a4, 17, 3 },
+ [PAD_GPIO37] = { 0x2a4, 20, 3 },
+ [PAD_GPIO38] = { 0x2a4, 23, 3 },
+ [PAD_GPIO39] = { 0x2a4, 26, 3 },
+ [PAD_GPIO40] = { 0x2a4, 29, 3 },
+
+ [PAD_GPIO41] = { 0x2a8, 0, 3 },
+ [PAD_GPIO42] = { 0x2a8, 3, 3 },
+ [PAD_GPIO43] = { 0x2a8, 6, 3 },
+ [PAD_GPIO44] = { 0x2a8, 9, 3 },
+ [PAD_GPIO45] = { 0x2a8, 12, 3 },
+ [PAD_GPIO46] = { 0x2a8, 15, 3 },
+ [PAD_GPIO47] = { 0x2a8, 18, 3 },
+ [PAD_GPIO48] = { 0x2a8, 21, 3 },
+ [PAD_GPIO49] = { 0x2a8, 24, 3 },
+ [PAD_GPIO50] = { 0x2a8, 27, 3 },
+ [PAD_GPIO51] = { 0x2a8, 30, 3 },
+
+ [PAD_GPIO52] = { 0x2ac, 0, 3 },
+ [PAD_GPIO53] = { 0x2ac, 2, 3 },
+ [PAD_GPIO54] = { 0x2ac, 4, 3 },
+ [PAD_GPIO55] = { 0x2ac, 6, 3 },
+ [PAD_GPIO56] = { 0x2ac, 9, 3 },
+ [PAD_GPIO57] = { 0x2ac, 12, 3 },
+ [PAD_GPIO58] = { 0x2ac, 15, 3 },
+ [PAD_GPIO59] = { 0x2ac, 18, 3 },
+ [PAD_GPIO60] = { 0x2ac, 21, 3 },
+ [PAD_GPIO61] = { 0x2ac, 24, 3 },
+ [PAD_GPIO62] = { 0x2ac, 27, 3 },
+ [PAD_GPIO63] = { 0x2ac, 30, 3 },
+
+ [PAD_GPIO6] = { 0x2b0, 0, 3 },
+ [PAD_GPIO7] = { 0x2b0, 2, 3 },
+ [PAD_GPIO8] = { 0x2b0, 5, 3 },
+ [PAD_GPIO9] = { 0x2b0, 8, 3 },
+};
+
+struct jh7110_vin_group_sel {
+ u16 offset;
+ u8 shift;
+ u8 group;
+};
+
+static const struct jh7110_vin_group_sel
+ jh7110_sys_vin_group_sel[ARRAY_SIZE(jh7110_sys_pins)] = {
+ [PAD_GPIO6] = { 0x2b4, 21, 0 },
+ [PAD_GPIO7] = { 0x2b4, 18, 0 },
+ [PAD_GPIO8] = { 0x2b4, 15, 0 },
+ [PAD_GPIO9] = { 0x2b0, 11, 0 },
+ [PAD_GPIO10] = { 0x2b0, 20, 0 },
+ [PAD_GPIO11] = { 0x2b0, 23, 0 },
+ [PAD_GPIO12] = { 0x2b0, 26, 0 },
+ [PAD_GPIO13] = { 0x2b0, 29, 0 },
+ [PAD_GPIO14] = { 0x2b4, 0, 0 },
+ [PAD_GPIO15] = { 0x2b4, 3, 0 },
+ [PAD_GPIO16] = { 0x2b4, 6, 0 },
+ [PAD_GPIO17] = { 0x2b4, 9, 0 },
+ [PAD_GPIO18] = { 0x2b4, 12, 0 },
+ [PAD_GPIO19] = { 0x2b0, 14, 0 },
+ [PAD_GPIO20] = { 0x2b0, 17, 0 },
+
+ [PAD_GPIO21] = { 0x2b4, 21, 1 },
+ [PAD_GPIO22] = { 0x2b4, 18, 1 },
+ [PAD_GPIO23] = { 0x2b4, 15, 1 },
+ [PAD_GPIO24] = { 0x2b0, 11, 1 },
+ [PAD_GPIO25] = { 0x2b0, 20, 1 },
+ [PAD_GPIO26] = { 0x2b0, 23, 1 },
+ [PAD_GPIO27] = { 0x2b0, 26, 1 },
+ [PAD_GPIO28] = { 0x2b0, 29, 1 },
+ [PAD_GPIO29] = { 0x2b4, 0, 1 },
+ [PAD_GPIO30] = { 0x2b4, 3, 1 },
+ [PAD_GPIO31] = { 0x2b4, 6, 1 },
+ [PAD_GPIO32] = { 0x2b4, 9, 1 },
+ [PAD_GPIO33] = { 0x2b4, 12, 1 },
+ [PAD_GPIO34] = { 0x2b0, 14, 1 },
+ [PAD_GPIO35] = { 0x2b0, 17, 1 },
+
+ [PAD_GPIO36] = { 0x2b4, 21, 2 },
+ [PAD_GPIO37] = { 0x2b4, 18, 2 },
+ [PAD_GPIO38] = { 0x2b4, 15, 2 },
+ [PAD_GPIO39] = { 0x2b0, 11, 2 },
+ [PAD_GPIO40] = { 0x2b0, 20, 2 },
+ [PAD_GPIO41] = { 0x2b0, 23, 2 },
+ [PAD_GPIO42] = { 0x2b0, 26, 2 },
+ [PAD_GPIO43] = { 0x2b0, 29, 2 },
+ [PAD_GPIO44] = { 0x2b4, 0, 2 },
+ [PAD_GPIO45] = { 0x2b4, 3, 2 },
+ [PAD_GPIO46] = { 0x2b4, 6, 2 },
+ [PAD_GPIO47] = { 0x2b4, 9, 2 },
+ [PAD_GPIO48] = { 0x2b4, 12, 2 },
+ [PAD_GPIO49] = { 0x2b0, 14, 2 },
+ [PAD_GPIO50] = { 0x2b0, 17, 2 },
+};
+
+static void jh7110_set_function(struct jh7110_pinctrl *sfp,
+ unsigned int pin, u32 func)
+{
+ const struct jh7110_func_sel *fs = &jh7110_sys_func_sel[pin];
+ unsigned long flags;
+ void __iomem *reg;
+ u32 mask;
+
+ if (!fs->offset)
+ return;
+
+ if (func > fs->max)
+ return;
+
+ reg = sfp->base + fs->offset;
+ func = func << fs->shift;
+ mask = 0x3U << fs->shift;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ func |= readl_relaxed(reg) & ~mask;
+ writel_relaxed(func, reg);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static void jh7110_set_vin_group(struct jh7110_pinctrl *sfp,
+ unsigned int pin)
+{
+ const struct jh7110_vin_group_sel *gs = &jh7110_sys_vin_group_sel[pin];
+ unsigned long flags;
+ void __iomem *reg;
+ u32 mask;
+ u32 grp;
+
+ if (!gs->offset)
+ return;
+
+ reg = sfp->base + gs->offset;
+ grp = gs->group << gs->shift;
+ mask = 0x3U << gs->shift;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ grp |= readl_relaxed(reg) & ~mask;
+ writel_relaxed(grp, reg);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static int jh7110_sys_set_one_pin_mux(struct jh7110_pinctrl *sfp,
+ unsigned int pin,
+ unsigned int din, u32 dout,
+ u32 doen, u32 func)
+{
+ if (pin < sfp->gc.ngpio && func == 0)
+ jh7110_set_gpiomux(sfp, pin, din, dout, doen);
+
+ jh7110_set_function(sfp, pin, func);
+
+ if (pin < sfp->gc.ngpio && func == 2)
+ jh7110_set_vin_group(sfp, pin);
+
+ return 0;
+}
+
+static int jh7110_sys_get_padcfg_base(struct jh7110_pinctrl *sfp,
+ unsigned int pin)
+{
+ if (pin < PAD_GMAC1_MDC)
+ return JH7110_SYS_GPO_PDA_0_74_CFG;
+ else if (pin > PAD_GMAC1_TXC && pin <= PAD_QSPI_DATA3)
+ return JH7110_SYS_GPO_PDA_89_94_CFG;
+ else
+ return -1;
+}
+
+static void jh7110_sys_irq_handler(struct irq_desc *desc)
+{
+ struct jh7110_pinctrl *sfp = jh7110_from_irq_desc(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long mis;
+ unsigned int pin;
+
+ chained_irq_enter(chip, desc);
+
+ mis = readl_relaxed(sfp->base + JH7110_SYS_GPIOMIS0);
+ for_each_set_bit(pin, &mis, 32)
+ generic_handle_domain_irq(sfp->gc.irq.domain, pin);
+
+ mis = readl_relaxed(sfp->base + JH7110_SYS_GPIOMIS1);
+ for_each_set_bit(pin, &mis, 32)
+ generic_handle_domain_irq(sfp->gc.irq.domain, pin + 32);
+
+ chained_irq_exit(chip, desc);
+}
+
+static int jh7110_sys_init_hw(struct gpio_chip *gc)
+{
+ struct jh7110_pinctrl *sfp = container_of(gc,
+ struct jh7110_pinctrl, gc);
+
+ /* mask all GPIO interrupts */
+ writel(0U, sfp->base + JH7110_SYS_GPIOIE0);
+ writel(0U, sfp->base + JH7110_SYS_GPIOIE1);
+ /* clear edge interrupt flags */
+ writel(~0U, sfp->base + JH7110_SYS_GPIOIC0);
+ writel(~0U, sfp->base + JH7110_SYS_GPIOIC1);
+ /* enable GPIO interrupts */
+ writel(1U, sfp->base + JH7110_SYS_GPIOEN);
+ return 0;
+}
+
+static const struct jh7110_gpio_irq_reg jh7110_sys_irq_reg = {
+ .is_reg_base = JH7110_SYS_GPIOIS0,
+ .ic_reg_base = JH7110_SYS_GPIOIC0,
+ .ibe_reg_base = JH7110_SYS_GPIOIBE0,
+ .iev_reg_base = JH7110_SYS_GPIOIEV0,
+ .ie_reg_base = JH7110_SYS_GPIOIE0,
+ .ris_reg_base = JH7110_SYS_GPIORIS0,
+ .mis_reg_base = JH7110_SYS_GPIOMIS0,
+};
+
+static const struct jh7110_pinctrl_soc_info jh7110_sys_pinctrl_info = {
+ .pins = jh7110_sys_pins,
+ .npins = ARRAY_SIZE(jh7110_sys_pins),
+ .ngpios = JH7110_SYS_NGPIO,
+ .gc_base = JH7110_SYS_GC_BASE,
+ .dout_reg_base = JH7110_SYS_DOUT,
+ .dout_mask = GENMASK(6, 0),
+ .doen_reg_base = JH7110_SYS_DOEN,
+ .doen_mask = GENMASK(5, 0),
+ .gpi_reg_base = JH7110_SYS_GPI,
+ .gpi_mask = GENMASK(6, 0),
+ .gpioin_reg_base = JH7110_SYS_GPIOIN,
+ .irq_reg = &jh7110_sys_irq_reg,
+ .jh7110_set_one_pin_mux = jh7110_sys_set_one_pin_mux,
+ .jh7110_get_padcfg_base = jh7110_sys_get_padcfg_base,
+ .jh7110_gpio_irq_handler = jh7110_sys_irq_handler,
+ .jh7110_gpio_init_hw = jh7110_sys_init_hw,
+};
+
+static const struct of_device_id jh7110_sys_pinctrl_of_match[] = {
+ {
+ .compatible = "starfive,jh7110-sys-pinctrl",
+ .data = &jh7110_sys_pinctrl_info,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, jh7110_sys_pinctrl_of_match);
+
+static struct platform_driver jh7110_sys_pinctrl_driver = {
+ .probe = jh7110_pinctrl_probe,
+ .driver = {
+ .name = "starfive-jh7110-sys-pinctrl",
+ .of_match_table = jh7110_sys_pinctrl_of_match,
+ },
+};
+module_platform_driver(jh7110_sys_pinctrl_driver);
+
+MODULE_DESCRIPTION("Pinctrl driver for the StarFive JH7110 SoC sys controller");
+MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>");
+MODULE_AUTHOR("Jianlong Huang <jianlong.huang@starfivetech.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
new file mode 100644
index 000000000000..5fe729b4a03d
--- /dev/null
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
@@ -0,0 +1,982 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Pinctrl / GPIO driver for StarFive JH7110 SoC
+ *
+ * Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/gpio/driver.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <dt-bindings/pinctrl/starfive,jh7110-pinctrl.h>
+
+#include "../core.h"
+#include "../pinctrl-utils.h"
+#include "../pinmux.h"
+#include "../pinconf.h"
+#include "pinctrl-starfive-jh7110.h"
+
+/* pad control bits */
+#define JH7110_PADCFG_POS BIT(7)
+#define JH7110_PADCFG_SMT BIT(6)
+#define JH7110_PADCFG_SLEW BIT(5)
+#define JH7110_PADCFG_PD BIT(4)
+#define JH7110_PADCFG_PU BIT(3)
+#define JH7110_PADCFG_BIAS (JH7110_PADCFG_PD | JH7110_PADCFG_PU)
+#define JH7110_PADCFG_DS_MASK GENMASK(2, 1)
+#define JH7110_PADCFG_DS_2MA (0U << 1)
+#define JH7110_PADCFG_DS_4MA BIT(1)
+#define JH7110_PADCFG_DS_8MA (2U << 1)
+#define JH7110_PADCFG_DS_12MA (3U << 1)
+#define JH7110_PADCFG_IE BIT(0)
+
+/*
+ * The packed pinmux values from the device tree look like this:
+ *
+ * | 31 - 24 | 23 - 16 | 15 - 10 | 9 - 8 | 7 - 0 |
+ * | din | dout | doen | function | pin |
+ */
+static unsigned int jh7110_pinmux_din(u32 v)
+{
+ return (v & GENMASK(31, 24)) >> 24;
+}
+
+static u32 jh7110_pinmux_dout(u32 v)
+{
+ return (v & GENMASK(23, 16)) >> 16;
+}
+
+static u32 jh7110_pinmux_doen(u32 v)
+{
+ return (v & GENMASK(15, 10)) >> 10;
+}
+
+static u32 jh7110_pinmux_function(u32 v)
+{
+ return (v & GENMASK(9, 8)) >> 8;
+}
+
+static unsigned int jh7110_pinmux_pin(u32 v)
+{
+ return v & GENMASK(7, 0);
+}
+
+static struct jh7110_pinctrl *jh7110_from_irq_data(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ return container_of(gc, struct jh7110_pinctrl, gc);
+}
+
+struct jh7110_pinctrl *jh7110_from_irq_desc(struct irq_desc *desc)
+{
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+
+ return container_of(gc, struct jh7110_pinctrl, gc);
+}
+EXPORT_SYMBOL_GPL(jh7110_from_irq_desc);
+
+#ifdef CONFIG_DEBUG_FS
+static void jh7110_pin_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int pin)
+{
+ struct jh7110_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ const struct jh7110_pinctrl_soc_info *info = sfp->info;
+
+ seq_printf(s, "%s", dev_name(pctldev->dev));
+
+ if (pin < sfp->gc.ngpio) {
+ unsigned int offset = 4 * (pin / 4);
+ unsigned int shift = 8 * (pin % 4);
+ u32 dout = readl_relaxed(sfp->base + info->dout_reg_base + offset);
+ u32 doen = readl_relaxed(sfp->base + info->doen_reg_base + offset);
+ u32 gpi = readl_relaxed(sfp->base + info->gpi_reg_base + offset);
+
+ dout = (dout >> shift) & info->dout_mask;
+ doen = (doen >> shift) & info->doen_mask;
+ gpi = ((gpi >> shift) - 2) & info->gpi_mask;
+
+ seq_printf(s, " dout=%u doen=%u din=%u", dout, doen, gpi);
+ }
+}
+#else
+#define jh7110_pin_dbg_show NULL
+#endif
+
+static int jh7110_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np,
+ struct pinctrl_map **maps,
+ unsigned int *num_maps)
+{
+ struct jh7110_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = sfp->gc.parent;
+ struct device_node *child;
+ struct pinctrl_map *map;
+ const char **pgnames;
+ const char *grpname;
+ int ngroups;
+ int nmaps;
+ int ret;
+
+ ngroups = 0;
+ for_each_child_of_node(np, child)
+ ngroups += 1;
+ nmaps = 2 * ngroups;
+
+ pgnames = devm_kcalloc(dev, ngroups, sizeof(*pgnames), GFP_KERNEL);
+ if (!pgnames)
+ return -ENOMEM;
+
+ map = kcalloc(nmaps, sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ nmaps = 0;
+ ngroups = 0;
+ mutex_lock(&sfp->mutex);
+ for_each_child_of_node(np, child) {
+ int npins = of_property_count_u32_elems(child, "pinmux");
+ int *pins;
+ u32 *pinmux;
+ int i;
+
+ if (npins < 1) {
+ dev_err(dev,
+ "invalid pinctrl group %pOFn.%pOFn: pinmux not set\n",
+ np, child);
+ ret = -EINVAL;
+ goto put_child;
+ }
+
+ grpname = devm_kasprintf(dev, GFP_KERNEL, "%pOFn.%pOFn", np, child);
+ if (!grpname) {
+ ret = -ENOMEM;
+ goto put_child;
+ }
+
+ pgnames[ngroups++] = grpname;
+
+ pins = devm_kcalloc(dev, npins, sizeof(*pins), GFP_KERNEL);
+ if (!pins) {
+ ret = -ENOMEM;
+ goto put_child;
+ }
+
+ pinmux = devm_kcalloc(dev, npins, sizeof(*pinmux), GFP_KERNEL);
+ if (!pinmux) {
+ ret = -ENOMEM;
+ goto put_child;
+ }
+
+ ret = of_property_read_u32_array(child, "pinmux", pinmux, npins);
+ if (ret)
+ goto put_child;
+
+ for (i = 0; i < npins; i++)
+ pins[i] = jh7110_pinmux_pin(pinmux[i]);
+
+ map[nmaps].type = PIN_MAP_TYPE_MUX_GROUP;
+ map[nmaps].data.mux.function = np->name;
+ map[nmaps].data.mux.group = grpname;
+ nmaps += 1;
+
+ ret = pinctrl_generic_add_group(pctldev, grpname,
+ pins, npins, pinmux);
+ if (ret < 0) {
+ dev_err(dev, "error adding group %s: %d\n", grpname, ret);
+ goto put_child;
+ }
+
+ ret = pinconf_generic_parse_dt_config(child, pctldev,
+ &map[nmaps].data.configs.configs,
+ &map[nmaps].data.configs.num_configs);
+ if (ret) {
+ dev_err(dev, "error parsing pin config of group %s: %d\n",
+ grpname, ret);
+ goto put_child;
+ }
+
+ /* don't create a map if there are no pinconf settings */
+ if (map[nmaps].data.configs.num_configs == 0)
+ continue;
+
+ map[nmaps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
+ map[nmaps].data.configs.group_or_pin = grpname;
+ nmaps += 1;
+ }
+
+ ret = pinmux_generic_add_function(pctldev, np->name,
+ pgnames, ngroups, NULL);
+ if (ret < 0) {
+ dev_err(dev, "error adding function %s: %d\n", np->name, ret);
+ goto free_map;
+ }
+ mutex_unlock(&sfp->mutex);
+
+ *maps = map;
+ *num_maps = nmaps;
+ return 0;
+
+put_child:
+ of_node_put(child);
+free_map:
+ pinctrl_utils_free_map(pctldev, map, nmaps);
+ mutex_unlock(&sfp->mutex);
+ return ret;
+}
+
+static const struct pinctrl_ops jh7110_pinctrl_ops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .pin_dbg_show = jh7110_pin_dbg_show,
+ .dt_node_to_map = jh7110_dt_node_to_map,
+ .dt_free_map = pinctrl_utils_free_map,
+};
+
+void jh7110_set_gpiomux(struct jh7110_pinctrl *sfp, unsigned int pin,
+ unsigned int din, u32 dout, u32 doen)
+{
+ const struct jh7110_pinctrl_soc_info *info = sfp->info;
+
+ unsigned int offset = 4 * (pin / 4);
+ unsigned int shift = 8 * (pin % 4);
+ u32 dout_mask = info->dout_mask << shift;
+ u32 done_mask = info->doen_mask << shift;
+ u32 ival, imask;
+ void __iomem *reg_dout;
+ void __iomem *reg_doen;
+ void __iomem *reg_din;
+ unsigned long flags;
+
+ reg_dout = sfp->base + info->dout_reg_base + offset;
+ reg_doen = sfp->base + info->doen_reg_base + offset;
+ dout <<= shift;
+ doen <<= shift;
+ if (din != GPI_NONE) {
+ unsigned int ioffset = 4 * (din / 4);
+ unsigned int ishift = 8 * (din % 4);
+
+ reg_din = sfp->base + info->gpi_reg_base + ioffset;
+ ival = (pin + 2) << ishift;
+ imask = info->gpi_mask << ishift;
+ } else {
+ reg_din = NULL;
+ }
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ dout |= readl_relaxed(reg_dout) & ~dout_mask;
+ writel_relaxed(dout, reg_dout);
+ doen |= readl_relaxed(reg_doen) & ~done_mask;
+ writel_relaxed(doen, reg_doen);
+ if (reg_din) {
+ ival |= readl_relaxed(reg_din) & ~imask;
+ writel_relaxed(ival, reg_din);
+ }
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+EXPORT_SYMBOL_GPL(jh7110_set_gpiomux);
+
+static int jh7110_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int fsel, unsigned int gsel)
+{
+ struct jh7110_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ const struct jh7110_pinctrl_soc_info *info = sfp->info;
+ const struct group_desc *group;
+ const u32 *pinmux;
+ unsigned int i;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ pinmux = group->data;
+ for (i = 0; i < group->num_pins; i++) {
+ u32 v = pinmux[i];
+
+ if (info->jh7110_set_one_pin_mux)
+ info->jh7110_set_one_pin_mux(sfp,
+ jh7110_pinmux_pin(v),
+ jh7110_pinmux_din(v),
+ jh7110_pinmux_dout(v),
+ jh7110_pinmux_doen(v),
+ jh7110_pinmux_function(v));
+ }
+
+ return 0;
+}
+
+static const struct pinmux_ops jh7110_pinmux_ops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = jh7110_set_mux,
+ .strict = true,
+};
+
+static const u8 jh7110_drive_strength_mA[4] = { 2, 4, 8, 12 };
+
+static u32 jh7110_padcfg_ds_to_mA(u32 padcfg)
+{
+ return jh7110_drive_strength_mA[(padcfg >> 1) & 3U];
+}
+
+static u32 jh7110_padcfg_ds_from_mA(u32 v)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ if (v <= jh7110_drive_strength_mA[i])
+ break;
+ }
+ return i << 1;
+}
+
+static void jh7110_padcfg_rmw(struct jh7110_pinctrl *sfp,
+ unsigned int pin, u32 mask, u32 value)
+{
+ const struct jh7110_pinctrl_soc_info *info = sfp->info;
+ void __iomem *reg;
+ unsigned long flags;
+ int padcfg_base;
+
+ if (!info->jh7110_get_padcfg_base)
+ return;
+
+ padcfg_base = info->jh7110_get_padcfg_base(sfp, pin);
+ if (padcfg_base < 0)
+ return;
+
+ reg = sfp->base + padcfg_base + 4 * pin;
+ value &= mask;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ value |= readl_relaxed(reg) & ~mask;
+ writel_relaxed(value, reg);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static int jh7110_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned int pin, unsigned long *config)
+{
+ struct jh7110_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ const struct jh7110_pinctrl_soc_info *info = sfp->info;
+ int param = pinconf_to_config_param(*config);
+ u32 padcfg, arg;
+ bool enabled;
+ int padcfg_base;
+
+ if (!info->jh7110_get_padcfg_base)
+ return 0;
+
+ padcfg_base = info->jh7110_get_padcfg_base(sfp, pin);
+ if (padcfg_base < 0)
+ return 0;
+
+ padcfg = readl_relaxed(sfp->base + padcfg_base + 4 * pin);
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ enabled = !(padcfg & JH7110_PADCFG_BIAS);
+ arg = 0;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ enabled = padcfg & JH7110_PADCFG_PD;
+ arg = 1;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ enabled = padcfg & JH7110_PADCFG_PU;
+ arg = 1;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ enabled = true;
+ arg = jh7110_padcfg_ds_to_mA(padcfg);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ enabled = padcfg & JH7110_PADCFG_IE;
+ arg = enabled;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ enabled = padcfg & JH7110_PADCFG_SMT;
+ arg = enabled;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ enabled = true;
+ arg = !!(padcfg & JH7110_PADCFG_SLEW);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+ return enabled ? 0 : -EINVAL;
+}
+
+static int jh7110_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned int gsel,
+ unsigned long *config)
+{
+ const struct group_desc *group;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ return jh7110_pinconf_get(pctldev, group->pins[0], config);
+}
+
+static int jh7110_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned int gsel,
+ unsigned long *configs,
+ unsigned int num_configs)
+{
+ struct jh7110_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ const struct group_desc *group;
+ u16 mask, value;
+ int i;
+
+ group = pinctrl_generic_get_group(pctldev, gsel);
+ if (!group)
+ return -EINVAL;
+
+ mask = 0;
+ value = 0;
+ for (i = 0; i < num_configs; i++) {
+ int param = pinconf_to_config_param(configs[i]);
+ u32 arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ mask |= JH7110_PADCFG_BIAS;
+ value &= ~JH7110_PADCFG_BIAS;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (arg == 0)
+ return -ENOTSUPP;
+ mask |= JH7110_PADCFG_BIAS;
+ value = (value & ~JH7110_PADCFG_BIAS) | JH7110_PADCFG_PD;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (arg == 0)
+ return -ENOTSUPP;
+ mask |= JH7110_PADCFG_BIAS;
+ value = (value & ~JH7110_PADCFG_BIAS) | JH7110_PADCFG_PU;
+ break;
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ mask |= JH7110_PADCFG_DS_MASK;
+ value = (value & ~JH7110_PADCFG_DS_MASK) |
+ jh7110_padcfg_ds_from_mA(arg);
+ break;
+ case PIN_CONFIG_INPUT_ENABLE:
+ mask |= JH7110_PADCFG_IE;
+ if (arg)
+ value |= JH7110_PADCFG_IE;
+ else
+ value &= ~JH7110_PADCFG_IE;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ mask |= JH7110_PADCFG_SMT;
+ if (arg)
+ value |= JH7110_PADCFG_SMT;
+ else
+ value &= ~JH7110_PADCFG_SMT;
+ break;
+ case PIN_CONFIG_SLEW_RATE:
+ mask |= JH7110_PADCFG_SLEW;
+ if (arg)
+ value |= JH7110_PADCFG_SLEW;
+ else
+ value &= ~JH7110_PADCFG_SLEW;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+
+ for (i = 0; i < group->num_pins; i++)
+ jh7110_padcfg_rmw(sfp, group->pins[i], mask, value);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void jh7110_pinconf_dbg_show(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned int pin)
+{
+ struct jh7110_pinctrl *sfp = pinctrl_dev_get_drvdata(pctldev);
+ const struct jh7110_pinctrl_soc_info *info = sfp->info;
+ u32 value;
+ int padcfg_base;
+
+ if (!info->jh7110_get_padcfg_base)
+ return;
+
+ padcfg_base = info->jh7110_get_padcfg_base(sfp, pin);
+ if (padcfg_base < 0)
+ return;
+
+ value = readl_relaxed(sfp->base + padcfg_base + 4 * pin);
+ seq_printf(s, " (0x%02x)", value);
+}
+#else
+#define jh7110_pinconf_dbg_show NULL
+#endif
+
+static const struct pinconf_ops jh7110_pinconf_ops = {
+ .pin_config_get = jh7110_pinconf_get,
+ .pin_config_group_get = jh7110_pinconf_group_get,
+ .pin_config_group_set = jh7110_pinconf_group_set,
+ .pin_config_dbg_show = jh7110_pinconf_dbg_show,
+ .is_generic = true,
+};
+
+static int jh7110_gpio_request(struct gpio_chip *gc, unsigned int gpio)
+{
+ return pinctrl_gpio_request(gc->base + gpio);
+}
+
+static void jh7110_gpio_free(struct gpio_chip *gc, unsigned int gpio)
+{
+ pinctrl_gpio_free(gc->base + gpio);
+}
+
+static int jh7110_gpio_get_direction(struct gpio_chip *gc,
+ unsigned int gpio)
+{
+ struct jh7110_pinctrl *sfp = container_of(gc,
+ struct jh7110_pinctrl, gc);
+ const struct jh7110_pinctrl_soc_info *info = sfp->info;
+ unsigned int offset = 4 * (gpio / 4);
+ unsigned int shift = 8 * (gpio % 4);
+ u32 doen = readl_relaxed(sfp->base + info->doen_reg_base + offset);
+
+ doen = (doen >> shift) & info->doen_mask;
+
+ return doen == GPOEN_ENABLE ?
+ GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int jh7110_gpio_direction_input(struct gpio_chip *gc,
+ unsigned int gpio)
+{
+ struct jh7110_pinctrl *sfp = container_of(gc,
+ struct jh7110_pinctrl, gc);
+ const struct jh7110_pinctrl_soc_info *info = sfp->info;
+
+ /* enable input and schmitt trigger */
+ jh7110_padcfg_rmw(sfp, gpio,
+ JH7110_PADCFG_IE | JH7110_PADCFG_SMT,
+ JH7110_PADCFG_IE | JH7110_PADCFG_SMT);
+
+ if (info->jh7110_set_one_pin_mux)
+ info->jh7110_set_one_pin_mux(sfp, gpio,
+ GPI_NONE, GPOUT_LOW, GPOEN_DISABLE, 0);
+
+ return 0;
+}
+
+static int jh7110_gpio_direction_output(struct gpio_chip *gc,
+ unsigned int gpio, int value)
+{
+ struct jh7110_pinctrl *sfp = container_of(gc,
+ struct jh7110_pinctrl, gc);
+ const struct jh7110_pinctrl_soc_info *info = sfp->info;
+
+ if (info->jh7110_set_one_pin_mux)
+ info->jh7110_set_one_pin_mux(sfp, gpio,
+ GPI_NONE, value ? GPOUT_HIGH : GPOUT_LOW,
+ GPOEN_ENABLE, 0);
+
+ /* disable input, schmitt trigger and bias */
+ jh7110_padcfg_rmw(sfp, gpio,
+ JH7110_PADCFG_IE | JH7110_PADCFG_SMT |
+ JH7110_PADCFG_BIAS, 0);
+ return 0;
+}
+
+static int jh7110_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct jh7110_pinctrl *sfp = container_of(gc,
+ struct jh7110_pinctrl, gc);
+ const struct jh7110_pinctrl_soc_info *info = sfp->info;
+ void __iomem *reg = sfp->base + info->gpioin_reg_base
+ + 4 * (gpio / 32);
+
+ return !!(readl_relaxed(reg) & BIT(gpio % 32));
+}
+
+static void jh7110_gpio_set(struct gpio_chip *gc,
+ unsigned int gpio, int value)
+{
+ struct jh7110_pinctrl *sfp = container_of(gc,
+ struct jh7110_pinctrl, gc);
+ const struct jh7110_pinctrl_soc_info *info = sfp->info;
+ unsigned int offset = 4 * (gpio / 4);
+ unsigned int shift = 8 * (gpio % 4);
+ void __iomem *reg_dout = sfp->base + info->dout_reg_base + offset;
+ u32 dout = (value ? GPOUT_HIGH : GPOUT_LOW) << shift;
+ u32 mask = info->dout_mask << shift;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ dout |= readl_relaxed(reg_dout) & ~mask;
+ writel_relaxed(dout, reg_dout);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static int jh7110_gpio_set_config(struct gpio_chip *gc,
+ unsigned int gpio, unsigned long config)
+{
+ struct jh7110_pinctrl *sfp = container_of(gc,
+ struct jh7110_pinctrl, gc);
+ u32 arg = pinconf_to_config_argument(config);
+ u32 value;
+ u32 mask;
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ mask = JH7110_PADCFG_BIAS;
+ value = 0;
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (arg == 0)
+ return -ENOTSUPP;
+ mask = JH7110_PADCFG_BIAS;
+ value = JH7110_PADCFG_PD;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ if (arg == 0)
+ return -ENOTSUPP;
+ mask = JH7110_PADCFG_BIAS;
+ value = JH7110_PADCFG_PU;
+ break;
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ return 0;
+ case PIN_CONFIG_INPUT_ENABLE:
+ mask = JH7110_PADCFG_IE;
+ value = arg ? JH7110_PADCFG_IE : 0;
+ break;
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ mask = JH7110_PADCFG_SMT;
+ value = arg ? JH7110_PADCFG_SMT : 0;
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ jh7110_padcfg_rmw(sfp, gpio, mask, value);
+ return 0;
+}
+
+static int jh7110_gpio_add_pin_ranges(struct gpio_chip *gc)
+{
+ struct jh7110_pinctrl *sfp = container_of(gc,
+ struct jh7110_pinctrl, gc);
+
+ sfp->gpios.name = sfp->gc.label;
+ sfp->gpios.base = sfp->gc.base;
+ sfp->gpios.pin_base = 0;
+ sfp->gpios.npins = sfp->gc.ngpio;
+ sfp->gpios.gc = &sfp->gc;
+ pinctrl_add_gpio_range(sfp->pctl, &sfp->gpios);
+ return 0;
+}
+
+static void jh7110_irq_ack(struct irq_data *d)
+{
+ struct jh7110_pinctrl *sfp = jh7110_from_irq_data(d);
+ const struct jh7110_gpio_irq_reg *irq_reg = sfp->info->irq_reg;
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *ic = sfp->base + irq_reg->ic_reg_base
+ + 4 * (gpio / 32);
+ u32 mask = BIT(gpio % 32);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ value = readl_relaxed(ic) & ~mask;
+ writel_relaxed(value, ic);
+ writel_relaxed(value | mask, ic);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static void jh7110_irq_mask(struct irq_data *d)
+{
+ struct jh7110_pinctrl *sfp = jh7110_from_irq_data(d);
+ const struct jh7110_gpio_irq_reg *irq_reg = sfp->info->irq_reg;
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *ie = sfp->base + irq_reg->ie_reg_base
+ + 4 * (gpio / 32);
+ u32 mask = BIT(gpio % 32);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ value = readl_relaxed(ie) & ~mask;
+ writel_relaxed(value, ie);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+
+ gpiochip_disable_irq(&sfp->gc, d->hwirq);
+}
+
+static void jh7110_irq_mask_ack(struct irq_data *d)
+{
+ struct jh7110_pinctrl *sfp = jh7110_from_irq_data(d);
+ const struct jh7110_gpio_irq_reg *irq_reg = sfp->info->irq_reg;
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *ie = sfp->base + irq_reg->ie_reg_base
+ + 4 * (gpio / 32);
+ void __iomem *ic = sfp->base + irq_reg->ic_reg_base
+ + 4 * (gpio / 32);
+ u32 mask = BIT(gpio % 32);
+ unsigned long flags;
+ u32 value;
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ value = readl_relaxed(ie) & ~mask;
+ writel_relaxed(value, ie);
+
+ value = readl_relaxed(ic) & ~mask;
+ writel_relaxed(value, ic);
+ writel_relaxed(value | mask, ic);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static void jh7110_irq_unmask(struct irq_data *d)
+{
+ struct jh7110_pinctrl *sfp = jh7110_from_irq_data(d);
+ const struct jh7110_gpio_irq_reg *irq_reg = sfp->info->irq_reg;
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *ie = sfp->base + irq_reg->ie_reg_base
+ + 4 * (gpio / 32);
+ u32 mask = BIT(gpio % 32);
+ unsigned long flags;
+ u32 value;
+
+ gpiochip_enable_irq(&sfp->gc, d->hwirq);
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ value = readl_relaxed(ie) | mask;
+ writel_relaxed(value, ie);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+}
+
+static int jh7110_irq_set_type(struct irq_data *d, unsigned int trigger)
+{
+ struct jh7110_pinctrl *sfp = jh7110_from_irq_data(d);
+ const struct jh7110_gpio_irq_reg *irq_reg = sfp->info->irq_reg;
+ irq_hw_number_t gpio = irqd_to_hwirq(d);
+ void __iomem *base = sfp->base + 4 * (gpio / 32);
+ u32 mask = BIT(gpio % 32);
+ u32 irq_type, edge_both, polarity;
+ unsigned long flags;
+
+ switch (trigger) {
+ case IRQ_TYPE_EDGE_RISING:
+ irq_type = mask; /* 1: edge triggered */
+ edge_both = 0; /* 0: single edge */
+ polarity = mask; /* 1: rising edge */
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ irq_type = mask; /* 1: edge triggered */
+ edge_both = 0; /* 0: single edge */
+ polarity = 0; /* 0: falling edge */
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ irq_type = mask; /* 1: edge triggered */
+ edge_both = mask; /* 1: both edges */
+ polarity = 0; /* 0: ignored */
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_type = 0; /* 0: level triggered */
+ edge_both = 0; /* 0: ignored */
+ polarity = mask; /* 1: high level */
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_type = 0; /* 0: level triggered */
+ edge_both = 0; /* 0: ignored */
+ polarity = 0; /* 0: low level */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (trigger & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+ else
+ irq_set_handler_locked(d, handle_level_irq);
+
+ raw_spin_lock_irqsave(&sfp->lock, flags);
+ irq_type |= readl_relaxed(base + irq_reg->is_reg_base) & ~mask;
+ writel_relaxed(irq_type, base + irq_reg->is_reg_base);
+
+ edge_both |= readl_relaxed(base + irq_reg->ibe_reg_base) & ~mask;
+ writel_relaxed(edge_both, base + irq_reg->ibe_reg_base);
+
+ polarity |= readl_relaxed(base + irq_reg->iev_reg_base) & ~mask;
+ writel_relaxed(polarity, base + irq_reg->iev_reg_base);
+ raw_spin_unlock_irqrestore(&sfp->lock, flags);
+ return 0;
+}
+
+static struct irq_chip jh7110_irq_chip = {
+ .irq_ack = jh7110_irq_ack,
+ .irq_mask = jh7110_irq_mask,
+ .irq_mask_ack = jh7110_irq_mask_ack,
+ .irq_unmask = jh7110_irq_unmask,
+ .irq_set_type = jh7110_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE | IRQCHIP_SET_TYPE_MASKED,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static void jh7110_disable_clock(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+int jh7110_pinctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct jh7110_pinctrl_soc_info *info;
+ struct jh7110_pinctrl *sfp;
+ struct pinctrl_desc *jh7110_pinctrl_desc;
+ struct reset_control *rst;
+ struct clk *clk;
+ int ret;
+
+ info = of_device_get_match_data(&pdev->dev);
+ if (!info)
+ return -ENODEV;
+
+ if (!info->pins || !info->npins) {
+ dev_err(dev, "wrong pinctrl info\n");
+ return -EINVAL;
+ }
+
+ sfp = devm_kzalloc(dev, sizeof(*sfp), GFP_KERNEL);
+ if (!sfp)
+ return -ENOMEM;
+
+ sfp->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(sfp->base))
+ return PTR_ERR(sfp->base);
+
+ clk = devm_clk_get_optional(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "could not get clock\n");
+
+ rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(dev, PTR_ERR(rst), "could not get reset\n");
+
+ /*
+ * we don't want to assert reset and risk undoing pin muxing for the
+ * early boot serial console, but let's make sure the reset line is
+ * deasserted in case someone runs a really minimal bootloader.
+ */
+ ret = reset_control_deassert(rst);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not deassert reset\n");
+
+ if (clk) {
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not enable clock\n");
+
+ ret = devm_add_action_or_reset(dev, jh7110_disable_clock, clk);
+ if (ret)
+ return ret;
+ }
+
+ jh7110_pinctrl_desc = devm_kzalloc(&pdev->dev,
+ sizeof(*jh7110_pinctrl_desc),
+ GFP_KERNEL);
+ if (!jh7110_pinctrl_desc)
+ return -ENOMEM;
+
+ jh7110_pinctrl_desc->name = dev_name(dev);
+ jh7110_pinctrl_desc->pins = info->pins;
+ jh7110_pinctrl_desc->npins = info->npins;
+ jh7110_pinctrl_desc->pctlops = &jh7110_pinctrl_ops;
+ jh7110_pinctrl_desc->pmxops = &jh7110_pinmux_ops;
+ jh7110_pinctrl_desc->confops = &jh7110_pinconf_ops;
+ jh7110_pinctrl_desc->owner = THIS_MODULE;
+
+ sfp->info = info;
+ sfp->dev = dev;
+ platform_set_drvdata(pdev, sfp);
+ sfp->gc.parent = dev;
+ raw_spin_lock_init(&sfp->lock);
+ mutex_init(&sfp->mutex);
+
+ ret = devm_pinctrl_register_and_init(dev,
+ jh7110_pinctrl_desc,
+ sfp, &sfp->pctl);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "could not register pinctrl driver\n");
+
+ sfp->gc.label = dev_name(dev);
+ sfp->gc.owner = THIS_MODULE;
+ sfp->gc.request = jh7110_gpio_request;
+ sfp->gc.free = jh7110_gpio_free;
+ sfp->gc.get_direction = jh7110_gpio_get_direction;
+ sfp->gc.direction_input = jh7110_gpio_direction_input;
+ sfp->gc.direction_output = jh7110_gpio_direction_output;
+ sfp->gc.get = jh7110_gpio_get;
+ sfp->gc.set = jh7110_gpio_set;
+ sfp->gc.set_config = jh7110_gpio_set_config;
+ sfp->gc.add_pin_ranges = jh7110_gpio_add_pin_ranges;
+ sfp->gc.base = info->gc_base;
+ sfp->gc.ngpio = info->ngpios;
+
+ jh7110_irq_chip.name = sfp->gc.label;
+ gpio_irq_chip_set_chip(&sfp->gc.irq, &jh7110_irq_chip);
+ sfp->gc.irq.parent_handler = info->jh7110_gpio_irq_handler;
+ sfp->gc.irq.num_parents = 1;
+ sfp->gc.irq.parents = devm_kcalloc(dev, sfp->gc.irq.num_parents,
+ sizeof(*sfp->gc.irq.parents),
+ GFP_KERNEL);
+ if (!sfp->gc.irq.parents)
+ return -ENOMEM;
+ sfp->gc.irq.default_type = IRQ_TYPE_NONE;
+ sfp->gc.irq.handler = handle_bad_irq;
+ sfp->gc.irq.init_hw = info->jh7110_gpio_init_hw;
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ sfp->gc.irq.parents[0] = ret;
+
+ ret = devm_gpiochip_add_data(dev, &sfp->gc, sfp);
+ if (ret)
+ return dev_err_probe(dev, ret, "could not register gpiochip\n");
+
+ irq_domain_set_pm_device(sfp->gc.irq.domain, dev);
+
+ dev_info(dev, "StarFive GPIO chip registered %d GPIOs\n", sfp->gc.ngpio);
+
+ return pinctrl_enable(sfp->pctl);
+}
+EXPORT_SYMBOL_GPL(jh7110_pinctrl_probe);
+
+MODULE_DESCRIPTION("Pinctrl driver for the StarFive JH7110 SoC");
+MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>");
+MODULE_AUTHOR("Jianlong Huang <jianlong.huang@starfivetech.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h
new file mode 100644
index 000000000000..3f20b7ff96dd
--- /dev/null
+++ b/drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Pinctrl / GPIO driver for StarFive JH7110 SoC
+ *
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
+ */
+
+#ifndef __PINCTRL_STARFIVE_JH7110_H__
+#define __PINCTRL_STARFIVE_JH7110_H__
+
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinmux.h>
+
+struct jh7110_pinctrl {
+ struct device *dev;
+ struct gpio_chip gc;
+ struct pinctrl_gpio_range gpios;
+ raw_spinlock_t lock;
+ void __iomem *base;
+ struct pinctrl_dev *pctl;
+ /* register read/write mutex */
+ struct mutex mutex;
+ const struct jh7110_pinctrl_soc_info *info;
+};
+
+struct jh7110_gpio_irq_reg {
+ unsigned int is_reg_base;
+ unsigned int ic_reg_base;
+ unsigned int ibe_reg_base;
+ unsigned int iev_reg_base;
+ unsigned int ie_reg_base;
+ unsigned int ris_reg_base;
+ unsigned int mis_reg_base;
+};
+
+struct jh7110_pinctrl_soc_info {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+ unsigned int ngpios;
+ unsigned int gc_base;
+
+ /* gpio dout/doen/din/gpioinput register */
+ unsigned int dout_reg_base;
+ unsigned int dout_mask;
+ unsigned int doen_reg_base;
+ unsigned int doen_mask;
+ unsigned int gpi_reg_base;
+ unsigned int gpi_mask;
+ unsigned int gpioin_reg_base;
+
+ const struct jh7110_gpio_irq_reg *irq_reg;
+
+ /* generic pinmux */
+ int (*jh7110_set_one_pin_mux)(struct jh7110_pinctrl *sfp,
+ unsigned int pin,
+ unsigned int din, u32 dout,
+ u32 doen, u32 func);
+ /* gpio chip */
+ int (*jh7110_get_padcfg_base)(struct jh7110_pinctrl *sfp,
+ unsigned int pin);
+ void (*jh7110_gpio_irq_handler)(struct irq_desc *desc);
+ int (*jh7110_gpio_init_hw)(struct gpio_chip *gc);
+};
+
+void jh7110_set_gpiomux(struct jh7110_pinctrl *sfp, unsigned int pin,
+ unsigned int din, u32 dout, u32 doen);
+int jh7110_pinctrl_probe(struct platform_device *pdev);
+struct jh7110_pinctrl *jh7110_from_irq_desc(struct irq_desc *desc);
+
+#endif /* __PINCTRL_STARFIVE_JH7110_H__ */
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 1cddca506ad7..cb33a23ab0c1 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1382,6 +1382,7 @@ static struct irq_domain *stm32_pctrl_get_irq_domain(struct platform_device *pde
return ERR_PTR(-ENXIO);
domain = irq_find_host(parent);
+ of_node_put(parent);
if (!domain)
/* domain not registered yet */
return ERR_PTR(-EPROBE_DEFER);
diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c
index 25101293268f..6bbbab3a6fdf 100644
--- a/drivers/pinctrl/sunplus/sppctl.c
+++ b/drivers/pinctrl/sunplus/sppctl.c
@@ -553,7 +553,6 @@ static int sppctl_gpio_new(struct platform_device *pdev, struct sppctl_pdata *pc
gchip->base = -1;
gchip->ngpio = sppctl_gpio_list_sz;
gchip->names = sppctl_gpio_list_s;
- gchip->of_gpio_n_cells = 2;
pctl->pctl_grange.npins = gchip->ngpio;
pctl->pctl_grange.name = gchip->label;
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index c1ca247987d2..7d82a0946e1c 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -119,6 +119,16 @@ config CROS_EC_SPI
response time cannot be guaranteed, we support ignoring
'pre-amble' bytes before the response actually starts.
+config CROS_EC_UART
+ tristate "ChromeOS Embedded Controller (UART)"
+ depends on CROS_EC && ACPI && SERIAL_DEV_BUS
+ help
+ If you say Y here, you get support for talking to the ChromeOS EC
+ through a UART, using a byte-level protocol.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_ec_uart.
+
config CROS_EC_LPC
tristate "ChromeOS Embedded Controller (LPC)"
depends on CROS_EC && ACPI && (X86 || COMPILE_TEST)
@@ -226,7 +236,7 @@ config CROS_EC_TYPEC
information from the Chrome OS EC.
To compile this driver as a module, choose M here: the module will be
- called cros_ec_typec.
+ called cros-ec-typec.
config CROS_HPS_I2C
tristate "ChromeOS HPS device"
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index f6068d077a40..9e26e45c4a37 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -15,8 +15,10 @@ obj-$(CONFIG_CROS_EC_ISHTP) += cros_ec_ishtp.o
obj-$(CONFIG_CROS_TYPEC_SWITCH) += cros_typec_switch.o
obj-$(CONFIG_CROS_EC_RPMSG) += cros_ec_rpmsg.o
obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o
+obj-$(CONFIG_CROS_EC_UART) += cros_ec_uart.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_mec.o
-obj-$(CONFIG_CROS_EC_TYPEC) += cros_ec_typec.o
+cros-ec-typec-objs := cros_ec_typec.o cros_typec_vdm.o
+obj-$(CONFIG_CROS_EC_TYPEC) += cros-ec-typec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o cros_ec_trace.o
obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index ec733f683f34..b895c8130bba 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -182,6 +182,7 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
int err = 0;
BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->event_notifier);
+ BLOCKING_INIT_NOTIFIER_HEAD(&ec_dev->panic_notifier);
ec_dev->max_request = sizeof(struct ec_params_hello);
ec_dev->max_response = sizeof(struct ec_response_get_protocol_info);
@@ -198,12 +199,14 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
if (!ec_dev->dout)
return -ENOMEM;
+ lockdep_register_key(&ec_dev->lockdep_key);
mutex_init(&ec_dev->lock);
+ lockdep_set_class(&ec_dev->lock, &ec_dev->lockdep_key);
err = cros_ec_query_all(ec_dev);
if (err) {
dev_err(dev, "Cannot identify the EC: error %d\n", err);
- return err;
+ goto destroy_mutex;
}
if (ec_dev->irq > 0) {
@@ -215,7 +218,7 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
if (err) {
dev_err(dev, "Failed to request IRQ %d: %d\n",
ec_dev->irq, err);
- return err;
+ goto destroy_mutex;
}
}
@@ -226,7 +229,8 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
if (IS_ERR(ec_dev->ec)) {
dev_err(ec_dev->dev,
"Failed to create CrOS EC platform device\n");
- return PTR_ERR(ec_dev->ec);
+ err = PTR_ERR(ec_dev->ec);
+ goto destroy_mutex;
}
if (ec_dev->max_passthru) {
@@ -292,6 +296,9 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
exit:
platform_device_unregister(ec_dev->ec);
platform_device_unregister(ec_dev->pd);
+destroy_mutex:
+ mutex_destroy(&ec_dev->lock);
+ lockdep_unregister_key(&ec_dev->lockdep_key);
return err;
}
EXPORT_SYMBOL(cros_ec_register);
@@ -309,6 +316,8 @@ void cros_ec_unregister(struct cros_ec_device *ec_dev)
if (ec_dev->pd)
platform_device_unregister(ec_dev->pd);
platform_device_unregister(ec_dev->ec);
+ mutex_destroy(&ec_dev->lock);
+ lockdep_unregister_key(&ec_dev->lockdep_key);
}
EXPORT_SYMBOL(cros_ec_unregister);
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 21d973fc6be2..a98c529d8c69 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -38,6 +38,8 @@ static DECLARE_WAIT_QUEUE_HEAD(cros_ec_debugfs_log_wq);
* @log_mutex: mutex to protect circular buffer
* @log_poll_work: recurring task to poll EC for new console log data
* @panicinfo_blob: panicinfo debugfs blob
+ * @notifier_panic: notifier_block to let kernel to flush buffered log
+ * when EC panic
*/
struct cros_ec_debugfs {
struct cros_ec_dev *ec;
@@ -49,6 +51,7 @@ struct cros_ec_debugfs {
struct delayed_work log_poll_work;
/* EC panicinfo */
struct debugfs_blob_wrapper panicinfo_blob;
+ struct notifier_block notifier_panic;
};
/*
@@ -437,6 +440,22 @@ free:
return ret;
}
+static int cros_ec_debugfs_panic_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend, void *_notify)
+{
+ struct cros_ec_debugfs *debug_info =
+ container_of(nb, struct cros_ec_debugfs, notifier_panic);
+
+ if (debug_info->log_buffer.buf) {
+ /* Force log poll work to run immediately */
+ mod_delayed_work(debug_info->log_poll_work.wq, &debug_info->log_poll_work, 0);
+ /* Block until log poll work finishes */
+ flush_delayed_work(&debug_info->log_poll_work);
+ }
+
+ return NOTIFY_DONE;
+}
+
static int cros_ec_debugfs_probe(struct platform_device *pd)
{
struct cros_ec_dev *ec = dev_get_drvdata(pd->dev.parent);
@@ -473,6 +492,12 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
debugfs_create_u16("suspend_timeout_ms", 0664, debug_info->dir,
&ec->ec_dev->suspend_timeout_ms);
+ debug_info->notifier_panic.notifier_call = cros_ec_debugfs_panic_event;
+ ret = blocking_notifier_chain_register(&ec->ec_dev->panic_notifier,
+ &debug_info->notifier_panic);
+ if (ret)
+ goto remove_debugfs;
+
ec->debug_info = debug_info;
dev_set_drvdata(&pd->dev, ec);
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index 1674105decfb..376425bbd8ff 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -34,7 +34,7 @@ static ssize_t interval_msec_show(struct device *dev,
{
unsigned long msec = lb_interval_jiffies * 1000 / HZ;
- return scnprintf(buf, PAGE_SIZE, "%lu\n", msec);
+ return sysfs_emit(buf, "%lu\n", msec);
}
static ssize_t interval_msec_store(struct device *dev,
@@ -169,7 +169,7 @@ static ssize_t version_show(struct device *dev,
if (!get_lightbar_version(ec, &version, &flags))
return -EIO;
- return scnprintf(buf, PAGE_SIZE, "%d %d\n", version, flags);
+ return sysfs_emit(buf, "%d %d\n", version, flags);
}
static ssize_t brightness_store(struct device *dev,
@@ -302,17 +302,15 @@ static ssize_t sequence_show(struct device *dev,
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
- ret = scnprintf(buf, PAGE_SIZE, "XFER / EC ERROR %d / %d\n",
- ret, msg->result);
+ ret = sysfs_emit(buf, "XFER / EC ERROR %d / %d\n", ret, msg->result);
goto exit;
}
resp = (struct ec_response_lightbar *)msg->data;
if (resp->get_seq.num >= ARRAY_SIZE(seqname))
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", resp->get_seq.num);
+ ret = sysfs_emit(buf, "%d\n", resp->get_seq.num);
else
- ret = scnprintf(buf, PAGE_SIZE, "%s\n",
- seqname[resp->get_seq.num]);
+ ret = sysfs_emit(buf, "%s\n", seqname[resp->get_seq.num]);
exit:
kfree(msg);
@@ -483,7 +481,7 @@ static ssize_t userspace_control_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%d\n", userspace_control);
+ return sysfs_emit(buf, "%d\n", userspace_control);
}
static ssize_t userspace_control_store(struct device *dev,
diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
index 7fc8f82280ac..68bba0fcafab 100644
--- a/drivers/platform/chrome/cros_ec_lpc.c
+++ b/drivers/platform/chrome/cros_ec_lpc.c
@@ -21,6 +21,7 @@
#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <linux/reboot.h>
#include <linux/suspend.h>
#include "cros_ec.h"
@@ -320,6 +321,15 @@ static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data)
ec_dev->last_event_time = cros_ec_get_time_ns();
+ if (value == ACPI_NOTIFY_CROS_EC_PANIC) {
+ dev_emerg(ec_dev->dev, "CrOS EC Panic Reported. Shutdown is imminent!");
+ blocking_notifier_call_chain(&ec_dev->panic_notifier, 0, ec_dev);
+ /* Begin orderly shutdown. Force shutdown after 1 second. */
+ hw_protection_shutdown("CrOS EC Panic", 1000);
+ /* Do not query for other events after a panic is reported */
+ return;
+ }
+
if (ec_dev->mkbp_event_supported)
do {
ret = cros_ec_get_next_event(ec_dev, NULL,
@@ -340,7 +350,7 @@ static int cros_ec_lpc_probe(struct platform_device *pdev)
struct acpi_device *adev;
acpi_status status;
struct cros_ec_device *ec_dev;
- u8 buf[2];
+ u8 buf[2] = {};
int irq, ret;
/*
diff --git a/drivers/platform/chrome/cros_ec_proto_test.c b/drivers/platform/chrome/cros_ec_proto_test.c
index c6a83df91ae1..5b9748e0463b 100644
--- a/drivers/platform/chrome/cros_ec_proto_test.c
+++ b/drivers/platform/chrome/cros_ec_proto_test.c
@@ -5,7 +5,8 @@
#include <kunit/test.h>
-#include <asm-generic/unaligned.h>
+#include <asm/unaligned.h>
+
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
@@ -2370,7 +2371,7 @@ static void cros_ec_proto_test_get_host_event_normal(struct kunit *test)
static void cros_ec_proto_test_check_features_cached(struct kunit *test)
{
int ret, i;
- struct cros_ec_dev ec;
+ static struct cros_ec_dev ec;
ec.features.flags[0] = EC_FEATURE_MASK_0(EC_FEATURE_FINGERPRINT);
ec.features.flags[1] = EC_FEATURE_MASK_0(EC_FEATURE_SCP);
@@ -2395,7 +2396,7 @@ static void cros_ec_proto_test_check_features_not_cached(struct kunit *test)
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret, i;
- struct cros_ec_dev ec;
+ static struct cros_ec_dev ec;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
@@ -2448,7 +2449,7 @@ static void cros_ec_proto_test_get_sensor_count_normal(struct kunit *test)
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
- struct cros_ec_dev ec;
+ static struct cros_ec_dev ec;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
@@ -2493,7 +2494,7 @@ static void cros_ec_proto_test_get_sensor_count_xfer_error(struct kunit *test)
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret;
- struct cros_ec_dev ec;
+ static struct cros_ec_dev ec;
ec_dev->max_request = 0xff;
ec_dev->max_response = 0xee;
@@ -2533,7 +2534,7 @@ static void cros_ec_proto_test_get_sensor_count_legacy(struct kunit *test)
struct cros_ec_device *ec_dev = &priv->ec_dev;
struct ec_xfer_mock *mock;
int ret, i;
- struct cros_ec_dev ec;
+ static struct cros_ec_dev ec;
struct {
u8 readmem_data;
int expected_result;
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index f07eabcf9494..09e3bf5e8ec6 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -27,10 +27,9 @@ static ssize_t reboot_show(struct device *dev,
{
int count = 0;
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "ro|rw|cancel|cold|disable-jump|hibernate|cold-ap-off");
- count += scnprintf(buf + count, PAGE_SIZE - count,
- " [at-shutdown]\n");
+ count += sysfs_emit_at(buf, count,
+ "ro|rw|cancel|cold|disable-jump|hibernate|cold-ap-off");
+ count += sysfs_emit_at(buf, count, " [at-shutdown]\n");
return count;
}
@@ -138,12 +137,9 @@ static ssize_t version_show(struct device *dev,
/* Strings should be null-terminated, but let's be sure. */
r_ver->version_string_ro[sizeof(r_ver->version_string_ro) - 1] = '\0';
r_ver->version_string_rw[sizeof(r_ver->version_string_rw) - 1] = '\0';
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "RO version: %s\n", r_ver->version_string_ro);
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "RW version: %s\n", r_ver->version_string_rw);
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Firmware copy: %s\n",
+ count += sysfs_emit_at(buf, count, "RO version: %s\n", r_ver->version_string_ro);
+ count += sysfs_emit_at(buf, count, "RW version: %s\n", r_ver->version_string_rw);
+ count += sysfs_emit_at(buf, count, "Firmware copy: %s\n",
(r_ver->current_image < ARRAY_SIZE(image_names) ?
image_names[r_ver->current_image] : "?"));
@@ -152,13 +148,12 @@ static ssize_t version_show(struct device *dev,
msg->insize = EC_HOST_PARAM_SIZE;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
+ count += sysfs_emit_at(buf, count,
"Build info: XFER / EC ERROR %d / %d\n",
ret, msg->result);
} else {
msg->data[EC_HOST_PARAM_SIZE - 1] = '\0';
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Build info: %s\n", msg->data);
+ count += sysfs_emit_at(buf, count, "Build info: %s\n", msg->data);
}
/* Get chip info. */
@@ -166,7 +161,7 @@ static ssize_t version_show(struct device *dev,
msg->insize = sizeof(*r_chip);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
+ count += sysfs_emit_at(buf, count,
"Chip info: XFER / EC ERROR %d / %d\n",
ret, msg->result);
} else {
@@ -175,12 +170,9 @@ static ssize_t version_show(struct device *dev,
r_chip->vendor[sizeof(r_chip->vendor) - 1] = '\0';
r_chip->name[sizeof(r_chip->name) - 1] = '\0';
r_chip->revision[sizeof(r_chip->revision) - 1] = '\0';
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Chip vendor: %s\n", r_chip->vendor);
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Chip name: %s\n", r_chip->name);
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Chip revision: %s\n", r_chip->revision);
+ count += sysfs_emit_at(buf, count, "Chip vendor: %s\n", r_chip->vendor);
+ count += sysfs_emit_at(buf, count, "Chip name: %s\n", r_chip->name);
+ count += sysfs_emit_at(buf, count, "Chip revision: %s\n", r_chip->revision);
}
/* Get board version */
@@ -188,13 +180,13 @@ static ssize_t version_show(struct device *dev,
msg->insize = sizeof(*r_board);
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
+ count += sysfs_emit_at(buf, count,
"Board version: XFER / EC ERROR %d / %d\n",
ret, msg->result);
} else {
r_board = (struct ec_response_board_version *)msg->data;
- count += scnprintf(buf + count, PAGE_SIZE - count,
+ count += sysfs_emit_at(buf, count,
"Board version: %d\n",
r_board->board_version);
}
@@ -227,7 +219,7 @@ static ssize_t flashinfo_show(struct device *dev,
resp = (struct ec_response_flash_info *)msg->data;
- ret = scnprintf(buf, PAGE_SIZE,
+ ret = sysfs_emit(buf,
"FlashSize %d\nWriteSize %d\n"
"EraseSize %d\nProtectSize %d\n",
resp->flash_size, resp->write_block_size,
@@ -264,7 +256,7 @@ static ssize_t kb_wake_angle_show(struct device *dev,
goto exit;
resp = (struct ec_response_motion_sense *)msg->data;
- ret = scnprintf(buf, PAGE_SIZE, "%d\n", resp->kb_wake_angle.ret);
+ ret = sysfs_emit(buf, "%d\n", resp->kb_wake_angle.ret);
exit:
kfree(msg);
return ret;
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 001b0de95a46..a673c3342470 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -7,92 +7,22 @@
*/
#include <linux/acpi.h>
-#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
-#include <linux/platform_data/cros_ec_proto.h>
#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
-#include <linux/usb/pd.h>
#include <linux/usb/pd_vdo.h>
-#include <linux/usb/typec.h>
-#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
-#include <linux/usb/typec_mux.h>
-#include <linux/usb/typec_retimer.h>
#include <linux/usb/typec_tbt.h>
-#include <linux/usb/role.h>
+
+#include "cros_ec_typec.h"
+#include "cros_typec_vdm.h"
#define DRV_NAME "cros-ec-typec"
#define DP_PORT_VDO (DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)) | \
- DP_CAP_DFP_D)
-
-/* Supported alt modes. */
-enum {
- CROS_EC_ALTMODE_DP = 0,
- CROS_EC_ALTMODE_TBT,
- CROS_EC_ALTMODE_MAX,
-};
-
-/* Container for altmode pointer nodes. */
-struct cros_typec_altmode_node {
- struct typec_altmode *amode;
- struct list_head list;
-};
-
-/* Per port data. */
-struct cros_typec_port {
- struct typec_port *port;
- /* Initial capabilities for the port. */
- struct typec_capability caps;
- struct typec_partner *partner;
- struct typec_cable *cable;
- /* SOP' plug. */
- struct typec_plug *plug;
- /* Port partner PD identity info. */
- struct usb_pd_identity p_identity;
- /* Port cable PD identity info. */
- struct usb_pd_identity c_identity;
- struct typec_switch *ori_sw;
- struct typec_mux *mux;
- struct typec_retimer *retimer;
- struct usb_role_switch *role_sw;
-
- /* Variables keeping track of switch state. */
- struct typec_mux_state state;
- uint8_t mux_flags;
- uint8_t role;
-
- struct typec_altmode *port_altmode[CROS_EC_ALTMODE_MAX];
-
- /* Flag indicating that PD partner discovery data parsing is completed. */
- bool sop_disc_done;
- bool sop_prime_disc_done;
- struct ec_response_typec_discovery *disc_data;
- struct list_head partner_mode_list;
- struct list_head plug_mode_list;
-
- /* PDO-related structs */
- struct usb_power_delivery *partner_pd;
- struct usb_power_delivery_capabilities *partner_src_caps;
- struct usb_power_delivery_capabilities *partner_sink_caps;
-};
-
-/* Platform-specific data for the Chrome OS EC Type C controller. */
-struct cros_typec_data {
- struct device *dev;
- struct cros_ec_device *ec;
- int num_ports;
- unsigned int pd_ctrl_ver;
- /* Array of ports, indexed by port number. */
- struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
- struct notifier_block nb;
- struct work_struct port_work;
- bool typec_cmd_supported;
- bool needs_mux_ack;
-};
+ DP_CAP_DFP_D | DP_CAP_RECEPTACLE)
static int cros_typec_parse_port_props(struct typec_capability *cap,
struct fwnode_handle *fwnode,
@@ -145,27 +75,33 @@ static int cros_typec_get_switch_handles(struct cros_typec_port *port,
struct fwnode_handle *fwnode,
struct device *dev)
{
+ int ret = 0;
+
port->mux = fwnode_typec_mux_get(fwnode, NULL);
if (IS_ERR(port->mux)) {
- dev_dbg(dev, "Mux handle not found.\n");
+ ret = PTR_ERR(port->mux);
+ dev_dbg(dev, "Mux handle not found: %d.\n", ret);
goto mux_err;
}
port->retimer = fwnode_typec_retimer_get(fwnode);
if (IS_ERR(port->retimer)) {
- dev_dbg(dev, "Retimer handle not found.\n");
+ ret = PTR_ERR(port->retimer);
+ dev_dbg(dev, "Retimer handle not found: %d.\n", ret);
goto retimer_sw_err;
}
port->ori_sw = fwnode_typec_switch_get(fwnode);
if (IS_ERR(port->ori_sw)) {
- dev_dbg(dev, "Orientation switch handle not found.\n");
+ ret = PTR_ERR(port->ori_sw);
+ dev_dbg(dev, "Orientation switch handle not found: %d\n", ret);
goto ori_sw_err;
}
port->role_sw = fwnode_usb_role_switch_get(fwnode);
if (IS_ERR(port->role_sw)) {
- dev_dbg(dev, "USB role switch handle not found.\n");
+ ret = PTR_ERR(port->role_sw);
+ dev_dbg(dev, "USB role switch handle not found: %d\n", ret);
goto role_sw_err;
}
@@ -181,7 +117,7 @@ retimer_sw_err:
typec_mux_put(port->mux);
port->mux = NULL;
mux_err:
- return -ENODEV;
+ return ret;
}
static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
@@ -342,6 +278,8 @@ static int cros_typec_register_port_altmodes(struct cros_typec_data *typec,
if (IS_ERR(amode))
return PTR_ERR(amode);
port->port_altmode[CROS_EC_ALTMODE_DP] = amode;
+ typec_altmode_set_drvdata(amode, port);
+ amode->ops = &port_amode_ops;
/*
* Register TBT compatibility alt mode. The EC will not enter the mode
@@ -355,6 +293,8 @@ static int cros_typec_register_port_altmodes(struct cros_typec_data *typec,
if (IS_ERR(amode))
return PTR_ERR(amode);
port->port_altmode[CROS_EC_ALTMODE_TBT] = amode;
+ typec_altmode_set_drvdata(amode, port);
+ amode->ops = &port_amode_ops;
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
@@ -408,6 +348,8 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
goto unregister_ports;
}
+ cros_port->port_num = port_num;
+ cros_port->typec_data = typec;
typec->ports[port_num] = cros_port;
cap = &cros_port->caps;
@@ -423,9 +365,11 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
}
ret = cros_typec_get_switch_handles(cros_port, fwnode, dev);
- if (ret)
- dev_dbg(dev, "No switch control for port %d\n",
- port_num);
+ if (ret) {
+ dev_dbg(dev, "No switch control for port %d, err: %d\n", port_num, ret);
+ if (ret == -EPROBE_DEFER)
+ goto unregister_ports;
+ }
ret = cros_typec_register_port_altmodes(typec, port_num);
if (ret) {
@@ -1064,6 +1008,21 @@ static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num
"Failed SOP Disc event clear, port: %d\n", port_num);
}
}
+
+ if (resp.events & PD_STATUS_EVENT_VDM_REQ_REPLY) {
+ cros_typec_handle_vdm_response(typec, port_num);
+ ret = cros_typec_send_clear_event(typec, port_num, PD_STATUS_EVENT_VDM_REQ_REPLY);
+ if (ret < 0)
+ dev_warn(typec->dev, "Failed VDM Reply event clear, port: %d\n", port_num);
+ }
+
+ if (resp.events & PD_STATUS_EVENT_VDM_ATTENTION) {
+ cros_typec_handle_vdm_attention(typec, port_num);
+ ret = cros_typec_send_clear_event(typec, port_num, PD_STATUS_EVENT_VDM_ATTENTION);
+ if (ret < 0)
+ dev_warn(typec->dev, "Failed VDM attention event clear, port: %d\n",
+ port_num);
+ }
}
static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
diff --git a/drivers/platform/chrome/cros_ec_typec.h b/drivers/platform/chrome/cros_ec_typec.h
new file mode 100644
index 000000000000..deda180a646f
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_typec.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __CROS_EC_TYPEC__
+#define __CROS_EC_TYPEC__
+
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/role.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_altmode.h>
+#include <linux/usb/typec_mux.h>
+#include <linux/usb/typec_retimer.h>
+#include <linux/workqueue.h>
+
+/* Supported alt modes. */
+enum {
+ CROS_EC_ALTMODE_DP = 0,
+ CROS_EC_ALTMODE_TBT,
+ CROS_EC_ALTMODE_MAX,
+};
+
+/* Container for altmode pointer nodes. */
+struct cros_typec_altmode_node {
+ struct typec_altmode *amode;
+ struct list_head list;
+};
+
+/* Platform-specific data for the Chrome OS EC Type C controller. */
+struct cros_typec_data {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ int num_ports;
+ unsigned int pd_ctrl_ver;
+ /* Array of ports, indexed by port number. */
+ struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
+ struct notifier_block nb;
+ struct work_struct port_work;
+ bool typec_cmd_supported;
+ bool needs_mux_ack;
+};
+
+/* Per port data. */
+struct cros_typec_port {
+ struct typec_port *port;
+ int port_num;
+ /* Initial capabilities for the port. */
+ struct typec_capability caps;
+ struct typec_partner *partner;
+ struct typec_cable *cable;
+ /* SOP' plug. */
+ struct typec_plug *plug;
+ /* Port partner PD identity info. */
+ struct usb_pd_identity p_identity;
+ /* Port cable PD identity info. */
+ struct usb_pd_identity c_identity;
+ struct typec_switch *ori_sw;
+ struct typec_mux *mux;
+ struct typec_retimer *retimer;
+ struct usb_role_switch *role_sw;
+
+ /* Variables keeping track of switch state. */
+ struct typec_mux_state state;
+ uint8_t mux_flags;
+ uint8_t role;
+
+ struct typec_altmode *port_altmode[CROS_EC_ALTMODE_MAX];
+
+ /* Flag indicating that PD partner discovery data parsing is completed. */
+ bool sop_disc_done;
+ bool sop_prime_disc_done;
+ struct ec_response_typec_discovery *disc_data;
+ struct list_head partner_mode_list;
+ struct list_head plug_mode_list;
+
+ /* PDO-related structs */
+ struct usb_power_delivery *partner_pd;
+ struct usb_power_delivery_capabilities *partner_src_caps;
+ struct usb_power_delivery_capabilities *partner_sink_caps;
+
+ struct cros_typec_data *typec_data;
+};
+
+#endif /* __CROS_EC_TYPEC__ */
diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
new file mode 100644
index 000000000000..788246559bbb
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_uart.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UART interface for ChromeOS Embedded Controller
+ *
+ * Copyright 2020-2022 Google LLC.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+#include <uapi/linux/sched/types.h>
+
+#include "cros_ec.h"
+
+/*
+ * EC sends contiguous bytes of response packet on UART AP RX.
+ * TTY driver in AP accumulates incoming bytes and calls the registered callback
+ * function. Byte count can range from 1 to MAX bytes supported by EC.
+ * This driver should wait for long time for all callbacks to be processed.
+ * Considering the worst case scenario, wait for 500 msec. This timeout should
+ * account for max latency and some additional guard time.
+ * Best case: Entire packet is received in ~200 ms, wait queue will be released
+ * and packet will be processed.
+ * Worst case: TTY driver sends bytes in multiple callbacks. In this case this
+ * driver will wait for ~1 sec beyond which it will timeout.
+ * This timeout value should not exceed ~500 msec because in case if
+ * EC_CMD_REBOOT_EC sent, high level driver should be able to intercept EC
+ * in RO.
+ */
+#define EC_MSG_DEADLINE_MS 500
+
+/**
+ * struct response_info - Encapsulate EC response related
+ * information for passing between function
+ * cros_ec_uart_pkt_xfer() and cros_ec_uart_rx_bytes()
+ * callback.
+ * @data: Copy the data received from EC here.
+ * @max_size: Max size allocated for the @data buffer. If the
+ * received data exceeds this value, we log an error.
+ * @size: Actual size of data received from EC. This is also
+ * used to accumulate byte count with response is received
+ * in dma chunks.
+ * @exp_len: Expected bytes of response from EC including header.
+ * @status: Re-init to 0 before sending a cmd. Updated to 1 when
+ * a response is successfully received, or an error number
+ * on failure.
+ * @wait_queue: Wait queue EC response where the cros_ec sends request
+ * to EC and waits
+ */
+struct response_info {
+ void *data;
+ size_t max_size;
+ size_t size;
+ size_t exp_len;
+ int status;
+ wait_queue_head_t wait_queue;
+};
+
+/**
+ * struct cros_ec_uart - information about a uart-connected EC
+ *
+ * @serdev: serdev uart device we are connected to.
+ * @baudrate: UART baudrate of attached EC device.
+ * @flowcontrol: UART flowcontrol of attached device.
+ * @irq: Linux IRQ number of associated serial device.
+ * @response: Response info passing between cros_ec_uart_pkt_xfer()
+ * and cros_ec_uart_rx_bytes()
+ */
+struct cros_ec_uart {
+ struct serdev_device *serdev;
+ u32 baudrate;
+ u8 flowcontrol;
+ u32 irq;
+ struct response_info response;
+};
+
+static int cros_ec_uart_rx_bytes(struct serdev_device *serdev,
+ const u8 *data,
+ size_t count)
+{
+ struct ec_host_response *host_response;
+ struct cros_ec_device *ec_dev = serdev_device_get_drvdata(serdev);
+ struct cros_ec_uart *ec_uart = ec_dev->priv;
+ struct response_info *resp = &ec_uart->response;
+
+ /* Check if bytes were sent out of band */
+ if (!resp->data) {
+ /* Discard all bytes */
+ dev_warn(ec_dev->dev, "Bytes received out of band, dropping them.\n");
+ return count;
+ }
+
+ /*
+ * Check if incoming bytes + resp->size is greater than allocated
+ * buffer in din by cros_ec. This will ensure that if EC sends more
+ * bytes than max_size, waiting process will be notified with an error.
+ */
+ if (resp->size + count > resp->max_size) {
+ resp->status = -EMSGSIZE;
+ wake_up(&resp->wait_queue);
+ return count;
+ }
+
+ memcpy(resp->data + resp->size, data, count);
+
+ resp->size += count;
+
+ /* Read data_len if we received response header and if exp_len was not read before. */
+ if (resp->size >= sizeof(*host_response) && resp->exp_len == 0) {
+ host_response = (struct ec_host_response *)resp->data;
+ resp->exp_len = host_response->data_len + sizeof(*host_response);
+ }
+
+ /* If driver received response header and payload from EC, wake up the wait queue. */
+ if (resp->size >= sizeof(*host_response) && resp->size == resp->exp_len) {
+ resp->status = 1;
+ wake_up(&resp->wait_queue);
+ }
+
+ return count;
+}
+
+static int cros_ec_uart_pkt_xfer(struct cros_ec_device *ec_dev,
+ struct cros_ec_command *ec_msg)
+{
+ struct cros_ec_uart *ec_uart = ec_dev->priv;
+ struct serdev_device *serdev = ec_uart->serdev;
+ struct response_info *resp = &ec_uart->response;
+ struct ec_host_response *host_response;
+ unsigned int len;
+ int ret, i;
+ u8 sum;
+
+ len = cros_ec_prepare_tx(ec_dev, ec_msg);
+ dev_dbg(ec_dev->dev, "Prepared len=%d\n", len);
+
+ /* Setup for incoming response */
+ resp->data = ec_dev->din;
+ resp->max_size = ec_dev->din_size;
+ resp->size = 0;
+ resp->exp_len = 0;
+ resp->status = 0;
+
+ ret = serdev_device_write_buf(serdev, ec_dev->dout, len);
+ if (ret < 0 || ret < len) {
+ dev_err(ec_dev->dev, "Unable to write data\n");
+ if (ret >= 0)
+ ret = -EIO;
+ goto exit;
+ }
+
+ ret = wait_event_timeout(resp->wait_queue, resp->status,
+ msecs_to_jiffies(EC_MSG_DEADLINE_MS));
+ if (ret == 0) {
+ dev_warn(ec_dev->dev, "Timed out waiting for response.\n");
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ if (resp->status < 0) {
+ ret = resp->status;
+ dev_warn(ec_dev->dev, "Error response received: %d\n", ret);
+ goto exit;
+ }
+
+ host_response = (struct ec_host_response *)ec_dev->din;
+ ec_msg->result = host_response->result;
+
+ if (host_response->data_len > ec_msg->insize) {
+ dev_err(ec_dev->dev, "Resp too long (%d bytes, expected %d)\n",
+ host_response->data_len, ec_msg->insize);
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ /* Validate checksum */
+ sum = 0;
+ for (i = 0; i < sizeof(*host_response) + host_response->data_len; i++)
+ sum += ec_dev->din[i];
+
+ if (sum) {
+ dev_err(ec_dev->dev, "Bad packet checksum calculated %x\n", sum);
+ ret = -EBADMSG;
+ goto exit;
+ }
+
+ memcpy(ec_msg->data, ec_dev->din + sizeof(*host_response), host_response->data_len);
+
+ ret = host_response->data_len;
+
+exit:
+ /* Invalidate response buffer to guard against out of band rx data */
+ resp->data = NULL;
+
+ if (ec_msg->command == EC_CMD_REBOOT_EC)
+ msleep(EC_REBOOT_DELAY_MS);
+
+ return ret;
+}
+
+static int cros_ec_uart_resource(struct acpi_resource *ares, void *data)
+{
+ struct cros_ec_uart *ec_uart = data;
+ struct acpi_resource_uart_serialbus *sb = &ares->data.uart_serial_bus;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS &&
+ sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) {
+ ec_uart->baudrate = sb->default_baud_rate;
+ dev_dbg(&ec_uart->serdev->dev, "Baudrate %d\n", ec_uart->baudrate);
+
+ ec_uart->flowcontrol = sb->flow_control;
+ dev_dbg(&ec_uart->serdev->dev, "Flow control %d\n", ec_uart->flowcontrol);
+ }
+
+ return 0;
+}
+
+static int cros_ec_uart_acpi_probe(struct cros_ec_uart *ec_uart)
+{
+ int ret;
+ LIST_HEAD(resources);
+ struct acpi_device *adev = ACPI_COMPANION(&ec_uart->serdev->dev);
+
+ ret = acpi_dev_get_resources(adev, &resources, cros_ec_uart_resource, ec_uart);
+ if (ret < 0)
+ return ret;
+
+ acpi_dev_free_resource_list(&resources);
+
+ /* Retrieve GpioInt and translate it to Linux IRQ number */
+ ret = acpi_dev_gpio_irq_get(adev, 0);
+ if (ret < 0)
+ return ret;
+
+ ec_uart->irq = ret;
+ dev_dbg(&ec_uart->serdev->dev, "IRQ number %d\n", ec_uart->irq);
+
+ return 0;
+}
+
+static const struct serdev_device_ops cros_ec_uart_client_ops = {
+ .receive_buf = cros_ec_uart_rx_bytes,
+};
+
+static int cros_ec_uart_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct cros_ec_device *ec_dev;
+ struct cros_ec_uart *ec_uart;
+ int ret;
+
+ ec_uart = devm_kzalloc(dev, sizeof(*ec_uart), GFP_KERNEL);
+ if (!ec_uart)
+ return -ENOMEM;
+
+ ec_dev = devm_kzalloc(dev, sizeof(*ec_dev), GFP_KERNEL);
+ if (!ec_dev)
+ return -ENOMEM;
+
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret) {
+ dev_err(dev, "Unable to open UART device");
+ return ret;
+ }
+
+ serdev_device_set_drvdata(serdev, ec_dev);
+ init_waitqueue_head(&ec_uart->response.wait_queue);
+
+ ec_uart->serdev = serdev;
+
+ ret = cros_ec_uart_acpi_probe(ec_uart);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get ACPI info (%d)", ret);
+ return ret;
+ }
+
+ ret = serdev_device_set_baudrate(serdev, ec_uart->baudrate);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set up host baud rate (%d)", ret);
+ return ret;
+ }
+
+ serdev_device_set_flow_control(serdev, ec_uart->flowcontrol);
+
+ /* Initialize ec_dev for cros_ec */
+ ec_dev->phys_name = dev_name(dev);
+ ec_dev->dev = dev;
+ ec_dev->priv = ec_uart;
+ ec_dev->irq = ec_uart->irq;
+ ec_dev->cmd_xfer = NULL;
+ ec_dev->pkt_xfer = cros_ec_uart_pkt_xfer;
+ ec_dev->din_size = sizeof(struct ec_host_response) +
+ sizeof(struct ec_response_get_protocol_info);
+ ec_dev->dout_size = sizeof(struct ec_host_request);
+
+ serdev_device_set_client_ops(serdev, &cros_ec_uart_client_ops);
+
+ return cros_ec_register(ec_dev);
+}
+
+static void cros_ec_uart_remove(struct serdev_device *serdev)
+{
+ struct cros_ec_device *ec_dev = serdev_device_get_drvdata(serdev);
+
+ cros_ec_unregister(ec_dev);
+};
+
+static int __maybe_unused cros_ec_uart_suspend(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+
+ return cros_ec_suspend(ec_dev);
+}
+
+static int __maybe_unused cros_ec_uart_resume(struct device *dev)
+{
+ struct cros_ec_device *ec_dev = dev_get_drvdata(dev);
+
+ return cros_ec_resume(ec_dev);
+}
+
+static SIMPLE_DEV_PM_OPS(cros_ec_uart_pm_ops, cros_ec_uart_suspend,
+ cros_ec_uart_resume);
+
+static const struct of_device_id cros_ec_uart_of_match[] = {
+ { .compatible = "google,cros-ec-uart" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cros_ec_uart_of_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_ec_uart_acpi_id[] = {
+ { "GOOG0019", 0 },
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, cros_ec_uart_acpi_id);
+#endif
+
+static struct serdev_device_driver cros_ec_uart_driver = {
+ .driver = {
+ .name = "cros-ec-uart",
+ .acpi_match_table = ACPI_PTR(cros_ec_uart_acpi_id),
+ .of_match_table = cros_ec_uart_of_match,
+ .pm = &cros_ec_uart_pm_ops,
+ },
+ .probe = cros_ec_uart_probe,
+ .remove = cros_ec_uart_remove,
+};
+
+module_serdev_device_driver(cros_ec_uart_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("UART interface for ChromeOS Embedded Controller");
+MODULE_AUTHOR("Bhanu Prakash Maiya <bhanumaiya@chromium.org>");
diff --git a/drivers/platform/chrome/cros_typec_switch.c b/drivers/platform/chrome/cros_typec_switch.c
index a26219e97c93..9ed1605f4071 100644
--- a/drivers/platform/chrome/cros_typec_switch.c
+++ b/drivers/platform/chrome/cros_typec_switch.c
@@ -246,15 +246,17 @@ static int cros_typec_register_switches(struct cros_typec_switch_data *sdata)
port->port_num = index;
sdata->ports[index] = port;
- ret = cros_typec_register_retimer(port, fwnode);
- if (ret) {
- dev_err(dev, "Retimer switch register failed\n");
- goto err_switch;
+ if (fwnode_property_present(fwnode, "retimer-switch")) {
+ ret = cros_typec_register_retimer(port, fwnode);
+ if (ret) {
+ dev_err(dev, "Retimer switch register failed\n");
+ goto err_switch;
+ }
+
+ dev_dbg(dev, "Retimer switch registered for index %llu\n", index);
}
- dev_dbg(dev, "Retimer switch registered for index %llu\n", index);
-
- if (!device_property_present(fwnode->dev, "mode-switch"))
+ if (!fwnode_property_present(fwnode, "mode-switch"))
continue;
ret = cros_typec_register_mode_switch(port, fwnode);
diff --git a/drivers/platform/chrome/cros_typec_vdm.c b/drivers/platform/chrome/cros_typec_vdm.c
new file mode 100644
index 000000000000..20515ee0a20e
--- /dev/null
+++ b/drivers/platform/chrome/cros_typec_vdm.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * USB Power Delivery Vendor Defined Message (VDM) support code.
+ *
+ * Copyright 2023 Google LLC
+ * Author: Prashant Malani <pmalani@chromium.org>
+ */
+
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/usb/pd_vdo.h>
+
+#include "cros_ec_typec.h"
+#include "cros_typec_vdm.h"
+
+/*
+ * Retrieves pending VDM attention messages from the EC and forwards them to the altmode driver
+ * based on SVID.
+ */
+void cros_typec_handle_vdm_attention(struct cros_typec_data *typec, int port_num)
+{
+ struct ec_response_typec_vdm_response resp;
+ struct ec_params_typec_vdm_response req = {
+ .port = port_num,
+ };
+ struct typec_altmode *amode;
+ u16 svid;
+ u32 hdr;
+ int ret;
+
+ do {
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_VDM_RESPONSE, &req,
+ sizeof(req), &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(typec->dev, "Failed VDM response fetch, port: %d\n", port_num);
+ return;
+ }
+
+ hdr = resp.vdm_response[0];
+ svid = PD_VDO_VID(hdr);
+ dev_dbg(typec->dev, "Received VDM Attention header: %x, port: %d\n", hdr, port_num);
+
+ amode = typec_match_altmode(typec->ports[port_num]->port_altmode,
+ CROS_EC_ALTMODE_MAX, svid, PD_VDO_OPOS(hdr));
+ if (!amode) {
+ dev_err(typec->dev,
+ "Received VDM for unregistered altmode (SVID:%x), port: %d\n",
+ svid, port_num);
+ return;
+ }
+
+ typec_altmode_attention(amode, resp.vdm_attention[1]);
+ } while (resp.vdm_attention_left);
+}
+
+/*
+ * Retrieves a VDM response from the EC and forwards it to the altmode driver based on SVID.
+ */
+void cros_typec_handle_vdm_response(struct cros_typec_data *typec, int port_num)
+{
+ struct ec_response_typec_vdm_response resp;
+ struct ec_params_typec_vdm_response req = {
+ .port = port_num,
+ };
+ struct typec_altmode *amode;
+ u16 svid;
+ u32 hdr;
+ int ret;
+
+ ret = cros_ec_cmd(typec->ec, 0, EC_CMD_TYPEC_VDM_RESPONSE, &req,
+ sizeof(req), &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(typec->dev, "Failed VDM response fetch, port: %d\n", port_num);
+ return;
+ }
+
+ hdr = resp.vdm_response[0];
+ svid = PD_VDO_VID(hdr);
+ dev_dbg(typec->dev, "Received VDM header: %x, port: %d\n", hdr, port_num);
+
+ amode = typec_match_altmode(typec->ports[port_num]->port_altmode, CROS_EC_ALTMODE_MAX,
+ svid, PD_VDO_OPOS(hdr));
+ if (!amode) {
+ dev_err(typec->dev, "Received VDM for unregistered altmode (SVID:%x), port: %d\n",
+ svid, port_num);
+ return;
+ }
+
+ ret = typec_altmode_vdm(amode, hdr, &resp.vdm_response[1], resp.vdm_data_objects);
+ if (ret)
+ dev_err(typec->dev, "Failed to forward VDM to altmode (SVID:%x), port: %d\n",
+ svid, port_num);
+}
+
+static int cros_typec_port_amode_enter(struct typec_altmode *amode, u32 *vdo)
+{
+ struct cros_typec_port *port = typec_altmode_get_drvdata(amode);
+ struct ec_params_typec_control req = {
+ .port = port->port_num,
+ .command = TYPEC_CONTROL_COMMAND_SEND_VDM_REQ,
+ };
+ struct typec_vdm_req vdm_req = {};
+ u32 hdr;
+
+ hdr = VDO(amode->svid, 1, SVDM_VER_2_0, CMD_ENTER_MODE);
+ hdr |= VDO_OPOS(amode->mode);
+
+ vdm_req.vdm_data[0] = hdr;
+ vdm_req.vdm_data_objects = 1;
+ vdm_req.partner_type = TYPEC_PARTNER_SOP;
+ req.vdm_req_params = vdm_req;
+
+ dev_dbg(port->typec_data->dev, "Sending EnterMode VDM, hdr: %x, port: %d\n",
+ hdr, port->port_num);
+
+ return cros_ec_cmd(port->typec_data->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
+ sizeof(req), NULL, 0);
+}
+
+static int cros_typec_port_amode_vdm(struct typec_altmode *amode, const u32 hdr,
+ const u32 *vdo, int cnt)
+{
+ struct cros_typec_port *port = typec_altmode_get_drvdata(amode);
+ struct ec_params_typec_control req = {
+ .port = port->port_num,
+ .command = TYPEC_CONTROL_COMMAND_SEND_VDM_REQ,
+ };
+ struct typec_vdm_req vdm_req = {};
+ int i;
+
+ vdm_req.vdm_data[0] = hdr;
+ vdm_req.vdm_data_objects = cnt;
+ for (i = 1; i < cnt; i++)
+ vdm_req.vdm_data[i] = vdo[i-1];
+ vdm_req.partner_type = TYPEC_PARTNER_SOP;
+ req.vdm_req_params = vdm_req;
+
+ dev_dbg(port->typec_data->dev, "Sending VDM, hdr: %x, num_objects: %d, port: %d\n",
+ hdr, cnt, port->port_num);
+
+ return cros_ec_cmd(port->typec_data->ec, 0, EC_CMD_TYPEC_CONTROL, &req,
+ sizeof(req), NULL, 0);
+}
+
+struct typec_altmode_ops port_amode_ops = {
+ .enter = cros_typec_port_amode_enter,
+ .vdm = cros_typec_port_amode_vdm,
+};
diff --git a/drivers/platform/chrome/cros_typec_vdm.h b/drivers/platform/chrome/cros_typec_vdm.h
new file mode 100644
index 000000000000..95a6a75d32b6
--- /dev/null
+++ b/drivers/platform/chrome/cros_typec_vdm.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __CROS_TYPEC_VDM__
+#define __CROS_TYPEC_VDM__
+
+#include <linux/usb/typec_altmode.h>
+
+extern struct typec_altmode_ops port_amode_ops;
+
+void cros_typec_handle_vdm_attention(struct cros_typec_data *typec, int port_num);
+void cros_typec_handle_vdm_response(struct cros_typec_data *typec, int port_num);
+
+#endif /* __CROS_TYPEC_VDM__ */
diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c
index 79a5e8fa680f..893c59dde32a 100644
--- a/drivers/platform/chrome/wilco_ec/sysfs.c
+++ b/drivers/platform/chrome/wilco_ec/sysfs.c
@@ -119,8 +119,7 @@ static ssize_t get_info(struct device *dev, char *buf, enum get_ec_info_op op)
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%.*s\n", (int)sizeof(resp.value),
- (char *)&resp.value);
+ return sysfs_emit(buf, "%.*s\n", (int)sizeof(resp.value), (char *)&resp.value);
}
static ssize_t version_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index 117bc3f395fd..b7dcc64cd238 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -239,6 +239,17 @@ static ssize_t mlxreg_hotplug_attr_show(struct device *dev,
#define PRIV_ATTR(i) priv->mlxreg_hotplug_attr[i]
#define PRIV_DEV_ATTR(i) priv->mlxreg_hotplug_dev_attr[i]
+static int mlxreg_hotplug_item_label_index_get(u32 mask, u32 bit)
+{
+ int i, j;
+
+ for (i = 0, j = -1; i <= bit; i++) {
+ if (mask & BIT(i))
+ j++;
+ }
+ return j;
+}
+
static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
{
struct mlxreg_core_hotplug_platform_data *pdata;
@@ -246,7 +257,7 @@ static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
struct mlxreg_core_data *data;
unsigned long mask;
u32 regval;
- int num_attrs = 0, id = 0, i, j, k, ret;
+ int num_attrs = 0, id = 0, i, j, k, count, ret;
pdata = dev_get_platdata(&priv->pdev->dev);
item = pdata->items;
@@ -272,7 +283,8 @@ static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
/* Go over all unmasked units within item. */
mask = item->mask;
k = 0;
- for_each_set_bit(j, &mask, item->count) {
+ count = item->ind ? item->ind : item->count;
+ for_each_set_bit(j, &mask, count) {
if (data->capability) {
/*
* Read capability register and skip non
@@ -282,16 +294,17 @@ static int mlxreg_hotplug_attr_init(struct mlxreg_hotplug_priv_data *priv)
data->capability, &regval);
if (ret)
return ret;
+
if (!(regval & data->bit)) {
data++;
continue;
}
}
+
PRIV_ATTR(id) = &PRIV_DEV_ATTR(id).dev_attr.attr;
PRIV_ATTR(id)->name = devm_kasprintf(&priv->pdev->dev,
GFP_KERNEL,
data->label);
-
if (!PRIV_ATTR(id)->name) {
dev_err(priv->dev, "Memory allocation failed for attr %d.\n",
id);
@@ -365,9 +378,14 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
regval &= item->mask;
asserted = item->cache ^ regval;
item->cache = regval;
-
for_each_set_bit(bit, &asserted, 8) {
- data = item->data + bit;
+ int pos;
+
+ pos = mlxreg_hotplug_item_label_index_get(item->mask, bit);
+ if (pos < 0)
+ goto out;
+
+ data = item->data + pos;
if (regval & BIT(bit)) {
if (item->inversed)
mlxreg_hotplug_device_destroy(priv, data, item->kind);
diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c
index de539938896e..aaad41294200 100644
--- a/drivers/platform/surface/aggregator/bus.c
+++ b/drivers/platform/surface/aggregator/bus.c
@@ -35,9 +35,9 @@ static struct attribute *ssam_device_attrs[] = {
};
ATTRIBUTE_GROUPS(ssam_device);
-static int ssam_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ssam_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct ssam_device *sdev = to_ssam_device(dev);
+ const struct ssam_device *sdev = to_ssam_device(dev);
return add_uevent_var(env, "MODALIAS=ssam:d%02Xc%02Xt%02Xi%02Xf%02X",
sdev->uid.domain, sdev->uid.category,
@@ -136,9 +136,9 @@ int ssam_device_add(struct ssam_device *sdev)
* is always valid and can be used for requests as long as the client
* device we add here is registered as child under it. This essentially
* guarantees that the client driver can always expect the preconditions
- * for functions like ssam_request_sync (controller has to be started
- * and is not suspended) to hold and thus does not have to check for
- * them.
+ * for functions like ssam_request_do_sync() (controller has to be
+ * started and is not suspended) to hold and thus does not have to check
+ * for them.
*
* Note that for this to work, the controller has to be a parent device.
* If it is not a direct parent, care has to be taken that the device is
diff --git a/drivers/platform/surface/aggregator/controller.c b/drivers/platform/surface/aggregator/controller.c
index c6537a1b3a2e..535581c0471c 100644
--- a/drivers/platform/surface/aggregator/controller.c
+++ b/drivers/platform/surface/aggregator/controller.c
@@ -994,7 +994,7 @@ static void ssam_handle_event(struct ssh_rtl *rtl,
item->rqid = get_unaligned_le16(&cmd->rqid);
item->event.target_category = cmd->tc;
- item->event.target_id = cmd->tid_in;
+ item->event.target_id = cmd->sid;
item->event.command_id = cmd->cid;
item->event.instance_id = cmd->iid;
memcpy(&item->event.data[0], data->ptr, data->len);
@@ -1674,7 +1674,7 @@ int ssam_request_sync_submit(struct ssam_controller *ctrl,
EXPORT_SYMBOL_GPL(ssam_request_sync_submit);
/**
- * ssam_request_sync() - Execute a synchronous request.
+ * ssam_request_do_sync() - Execute a synchronous request.
* @ctrl: The controller via which the request will be submitted.
* @spec: The request specification and payload.
* @rsp: The response buffer.
@@ -1686,9 +1686,9 @@ EXPORT_SYMBOL_GPL(ssam_request_sync_submit);
*
* Return: Returns the status of the request or any failure during setup.
*/
-int ssam_request_sync(struct ssam_controller *ctrl,
- const struct ssam_request *spec,
- struct ssam_response *rsp)
+int ssam_request_do_sync(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp)
{
struct ssam_request_sync *rqst;
struct ssam_span buf;
@@ -1722,10 +1722,10 @@ int ssam_request_sync(struct ssam_controller *ctrl,
ssam_request_sync_free(rqst);
return status;
}
-EXPORT_SYMBOL_GPL(ssam_request_sync);
+EXPORT_SYMBOL_GPL(ssam_request_do_sync);
/**
- * ssam_request_sync_with_buffer() - Execute a synchronous request with the
+ * ssam_request_do_sync_with_buffer() - Execute a synchronous request with the
* provided buffer as back-end for the message buffer.
* @ctrl: The controller via which the request will be submitted.
* @spec: The request specification and payload.
@@ -1738,17 +1738,17 @@ EXPORT_SYMBOL_GPL(ssam_request_sync);
* SSH_COMMAND_MESSAGE_LENGTH() macro can be used to compute the required
* message buffer size.
*
- * This function does essentially the same as ssam_request_sync(), but instead
- * of dynamically allocating the request and message data buffer, it uses the
- * provided message data buffer and stores the (small) request struct on the
- * heap.
+ * This function does essentially the same as ssam_request_do_sync(), but
+ * instead of dynamically allocating the request and message data buffer, it
+ * uses the provided message data buffer and stores the (small) request struct
+ * on the heap.
*
* Return: Returns the status of the request or any failure during setup.
*/
-int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
- const struct ssam_request *spec,
- struct ssam_response *rsp,
- struct ssam_span *buf)
+int ssam_request_do_sync_with_buffer(struct ssam_controller *ctrl,
+ const struct ssam_request *spec,
+ struct ssam_response *rsp,
+ struct ssam_span *buf)
{
struct ssam_request_sync rqst;
ssize_t len;
@@ -1772,42 +1772,42 @@ int ssam_request_sync_with_buffer(struct ssam_controller *ctrl,
return status;
}
-EXPORT_SYMBOL_GPL(ssam_request_sync_with_buffer);
+EXPORT_SYMBOL_GPL(ssam_request_do_sync_with_buffer);
/* -- Internal SAM requests. ------------------------------------------------ */
SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_get_firmware_version, __le32, {
.target_category = SSAM_SSH_TC_SAM,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x13,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_off, u8, {
.target_category = SSAM_SSH_TC_SAM,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x15,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_display_on, u8, {
.target_category = SSAM_SSH_TC_SAM,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x16,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_exit, u8, {
.target_category = SSAM_SSH_TC_SAM,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x33,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_ssh_notif_d0_entry, u8, {
.target_category = SSAM_SSH_TC_SAM,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x34,
.instance_id = 0x00,
});
@@ -1864,7 +1864,7 @@ static int __ssam_ssh_event_request(struct ssam_controller *ctrl,
result.length = 0;
result.pointer = &buf;
- status = ssam_retry(ssam_request_sync_onstack, ctrl, &rqst, &result,
+ status = ssam_retry(ssam_request_do_sync_onstack, ctrl, &rqst, &result,
sizeof(params));
return status < 0 ? status : buf;
diff --git a/drivers/platform/surface/aggregator/ssh_msgb.h b/drivers/platform/surface/aggregator/ssh_msgb.h
index f3ecad92eefd..438873e06098 100644
--- a/drivers/platform/surface/aggregator/ssh_msgb.h
+++ b/drivers/platform/surface/aggregator/ssh_msgb.h
@@ -189,8 +189,8 @@ static inline void msgb_push_cmd(struct msgbuf *msgb, u8 seq, u16 rqid,
__msgb_push_u8(msgb, SSH_PLD_TYPE_CMD); /* Payload type. */
__msgb_push_u8(msgb, rqst->target_category); /* Target category. */
- __msgb_push_u8(msgb, rqst->target_id); /* Target ID (out). */
- __msgb_push_u8(msgb, 0x00); /* Target ID (in). */
+ __msgb_push_u8(msgb, rqst->target_id); /* Target ID. */
+ __msgb_push_u8(msgb, SSAM_SSH_TID_HOST); /* Source ID. */
__msgb_push_u8(msgb, rqst->instance_id); /* Instance ID. */
__msgb_push_u16(msgb, rqid); /* Request ID. */
__msgb_push_u8(msgb, rqst->command_id); /* Command ID. */
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
index 69132976d297..90634dcacabf 100644
--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
@@ -920,13 +920,14 @@ static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
* Check if the message was intended for us. If not, drop it.
*
* Note: We will need to change this to handle debug messages. On newer
- * generation devices, these seem to be sent to tid_out=0x03. We as
- * host can still receive them as they can be forwarded via an override
- * option on SAM, but doing so does not change tid_out=0x00.
+ * generation devices, these seem to be sent to SSAM_SSH_TID_DEBUG. We
+ * as host can still receive them as they can be forwarded via an
+ * override option on SAM, but doing so does not change the target ID
+ * to SSAM_SSH_TID_HOST.
*/
- if (command->tid_out != 0x00) {
+ if (command->tid != SSAM_SSH_TID_HOST) {
rtl_warn(rtl, "rtl: dropping message not intended for us (tid = %#04x)\n",
- command->tid_out);
+ command->tid);
return;
}
diff --git a/drivers/platform/surface/aggregator/trace.h b/drivers/platform/surface/aggregator/trace.h
index 2a2c17771d01..55cc61bba1da 100644
--- a/drivers/platform/surface/aggregator/trace.h
+++ b/drivers/platform/surface/aggregator/trace.h
@@ -96,6 +96,7 @@ TRACE_DEFINE_ENUM(SSAM_SSH_TC_POS);
#define SSAM_SEQ_NOT_APPLICABLE ((u16)-1)
#define SSAM_RQID_NOT_APPLICABLE ((u32)-1)
#define SSAM_SSH_TC_NOT_APPLICABLE 0
+#define SSAM_SSH_TID_NOT_APPLICABLE ((u8)-1)
#ifndef _SURFACE_AGGREGATOR_TRACE_HELPERS
#define _SURFACE_AGGREGATOR_TRACE_HELPERS
@@ -151,11 +152,43 @@ static inline u32 ssam_trace_get_request_id(const struct ssh_packet *p)
}
/**
+ * ssam_trace_get_request_tid() - Read the packet's request target ID.
+ * @p: The packet.
+ *
+ * Return: Returns the packet's request target ID (TID) field if the packet
+ * represents a request with command data, or %SSAM_SSH_TID_NOT_APPLICABLE
+ * if not (e.g. flush request, control packet).
+ */
+static inline u32 ssam_trace_get_request_tid(const struct ssh_packet *p)
+{
+ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
+ return SSAM_SSH_TID_NOT_APPLICABLE;
+
+ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(tid)]);
+}
+
+/**
+ * ssam_trace_get_request_sid() - Read the packet's request source ID.
+ * @p: The packet.
+ *
+ * Return: Returns the packet's request source ID (SID) field if the packet
+ * represents a request with command data, or %SSAM_SSH_TID_NOT_APPLICABLE
+ * if not (e.g. flush request, control packet).
+ */
+static inline u32 ssam_trace_get_request_sid(const struct ssh_packet *p)
+{
+ if (!p->data.ptr || p->data.len < SSH_COMMAND_MESSAGE_LENGTH(0))
+ return SSAM_SSH_TID_NOT_APPLICABLE;
+
+ return get_unaligned_le16(&p->data.ptr[SSH_MSGOFFSET_COMMAND(sid)]);
+}
+
+/**
* ssam_trace_get_request_tc() - Read the packet's request target category.
* @p: The packet.
*
* Return: Returns the packet's request target category (TC) field if the
- * packet represents a request with command data, or %SSAM_TC_NOT_APPLICABLE
+ * packet represents a request with command data, or %SSAM_SSH_TC_NOT_APPLICABLE
* if not (e.g. flush request, control packet).
*/
static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
@@ -232,8 +265,18 @@ static inline u32 ssam_trace_get_request_tc(const struct ssh_packet *p)
{ SSAM_RQID_NOT_APPLICABLE, "N/A" } \
)
-#define ssam_show_ssh_tc(rqid) \
- __print_symbolic(rqid, \
+#define ssam_show_ssh_tid(tid) \
+ __print_symbolic(tid, \
+ { SSAM_SSH_TID_NOT_APPLICABLE, "N/A" }, \
+ { SSAM_SSH_TID_HOST, "Host" }, \
+ { SSAM_SSH_TID_SAM, "SAM" }, \
+ { SSAM_SSH_TID_KIP, "KIP" }, \
+ { SSAM_SSH_TID_DEBUG, "Debug" }, \
+ { SSAM_SSH_TID_SURFLINK, "SurfLink" } \
+ )
+
+#define ssam_show_ssh_tc(tc) \
+ __print_symbolic(tc, \
{ SSAM_SSH_TC_NOT_APPLICABLE, "N/A" }, \
{ SSAM_SSH_TC_SAM, "SAM" }, \
{ SSAM_SSH_TC_BAT, "BAT" }, \
@@ -313,6 +356,8 @@ DECLARE_EVENT_CLASS(ssam_command_class,
TP_STRUCT__entry(
__field(u16, rqid)
__field(u16, len)
+ __field(u8, tid)
+ __field(u8, sid)
__field(u8, tc)
__field(u8, cid)
__field(u8, iid)
@@ -320,14 +365,18 @@ DECLARE_EVENT_CLASS(ssam_command_class,
TP_fast_assign(
__entry->rqid = get_unaligned_le16(&cmd->rqid);
+ __entry->tid = cmd->tid;
+ __entry->sid = cmd->sid;
__entry->tc = cmd->tc;
__entry->cid = cmd->cid;
__entry->iid = cmd->iid;
__entry->len = len;
),
- TP_printk("rqid=%#06x, tc=%s, cid=%#04x, iid=%#04x, len=%u",
+ TP_printk("rqid=%#06x, tid=%s, sid=%s, tc=%s, cid=%#04x, iid=%#04x, len=%u",
__entry->rqid,
+ ssam_show_ssh_tid(__entry->tid),
+ ssam_show_ssh_tid(__entry->sid),
ssam_show_ssh_tc(__entry->tc),
__entry->cid,
__entry->iid,
@@ -430,6 +479,8 @@ DECLARE_EVENT_CLASS(ssam_request_class,
__field(u8, tc)
__field(u16, cid)
__field(u16, iid)
+ __field(u8, tid)
+ __field(u8, sid)
),
TP_fast_assign(
@@ -439,16 +490,20 @@ DECLARE_EVENT_CLASS(ssam_request_class,
__entry->state = READ_ONCE(request->state);
__entry->rqid = ssam_trace_get_request_id(p);
ssam_trace_ptr_uid(p, __entry->uid);
+ __entry->tid = ssam_trace_get_request_tid(p);
+ __entry->sid = ssam_trace_get_request_sid(p);
__entry->tc = ssam_trace_get_request_tc(p);
__entry->cid = ssam_trace_get_command_field_u8(p, cid);
__entry->iid = ssam_trace_get_command_field_u8(p, iid);
),
- TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s",
+ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tid=%s, sid=%s, tc=%s, cid=%s, iid=%s",
__entry->uid,
ssam_show_request_id(__entry->rqid),
ssam_show_request_type(__entry->state),
ssam_show_request_state(__entry->state),
+ ssam_show_ssh_tid(__entry->tid),
+ ssam_show_ssh_tid(__entry->sid),
ssam_show_ssh_tc(__entry->tc),
ssam_show_generic_u8_field(__entry->cid),
ssam_show_generic_u8_field(__entry->iid)
@@ -474,6 +529,8 @@ DECLARE_EVENT_CLASS(ssam_request_status_class,
__field(u8, tc)
__field(u16, cid)
__field(u16, iid)
+ __field(u8, tid)
+ __field(u8, sid)
),
TP_fast_assign(
@@ -484,16 +541,20 @@ DECLARE_EVENT_CLASS(ssam_request_status_class,
__entry->rqid = ssam_trace_get_request_id(p);
__entry->status = status;
ssam_trace_ptr_uid(p, __entry->uid);
+ __entry->tid = ssam_trace_get_request_tid(p);
+ __entry->sid = ssam_trace_get_request_sid(p);
__entry->tc = ssam_trace_get_request_tc(p);
__entry->cid = ssam_trace_get_command_field_u8(p, cid);
__entry->iid = ssam_trace_get_command_field_u8(p, iid);
),
- TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tc=%s, cid=%s, iid=%s, status=%d",
+ TP_printk("uid=%s, rqid=%s, ty=%s, sta=%s, tid=%s, sid=%s, tc=%s, cid=%s, iid=%s, status=%d",
__entry->uid,
ssam_show_request_id(__entry->rqid),
ssam_show_request_type(__entry->state),
ssam_show_request_state(__entry->state),
+ ssam_show_ssh_tid(__entry->tid),
+ ssam_show_ssh_tid(__entry->sid),
ssam_show_ssh_tc(__entry->tc),
ssam_show_generic_u8_field(__entry->cid),
ssam_show_generic_u8_field(__entry->iid),
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
index 50500e562963..897cdd9c3aae 100644
--- a/drivers/platform/surface/surface_acpi_notify.c
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -590,7 +590,7 @@ static acpi_status san_rqst(struct san_data *d, struct gsb_buffer *buffer)
return san_rqst_fixup_suspended(d, &rqst, buffer);
}
- status = __ssam_retry(ssam_request_sync_onstack, SAN_REQUEST_NUM_TRIES,
+ status = __ssam_retry(ssam_request_do_sync_onstack, SAN_REQUEST_NUM_TRIES,
d->ctrl, &rqst, &rsp, SAN_GSB_MAX_RQSX_PAYLOAD);
if (!status) {
diff --git a/drivers/platform/surface/surface_aggregator_cdev.c b/drivers/platform/surface/surface_aggregator_cdev.c
index 492c82e69182..07f0ed658369 100644
--- a/drivers/platform/surface/surface_aggregator_cdev.c
+++ b/drivers/platform/surface/surface_aggregator_cdev.c
@@ -302,8 +302,8 @@ static long ssam_cdev_request(struct ssam_cdev_client *client, struct ssam_cdev_
* theoretical maximum (SSH_COMMAND_MAX_PAYLOAD_SIZE) of the
* underlying protocol (note that nothing remotely this size
* should ever be allocated in any normal case). This size is
- * validated later in ssam_request_sync(), for allocation the
- * bound imposed by u16 should be enough.
+ * validated later in ssam_request_do_sync(), for allocation
+ * the bound imposed by u16 should be enough.
*/
spec.payload = kzalloc(spec.length, GFP_KERNEL);
if (!spec.payload) {
@@ -342,7 +342,7 @@ static long ssam_cdev_request(struct ssam_cdev_client *client, struct ssam_cdev_
}
/* Perform request. */
- status = ssam_request_sync(client->cdev->ctrl, &spec, &rsp);
+ status = ssam_request_do_sync(client->cdev->ctrl, &spec, &rsp);
if (status)
goto out;
diff --git a/drivers/platform/surface/surface_aggregator_hub.c b/drivers/platform/surface/surface_aggregator_hub.c
index 43061514be38..8b8b80228c14 100644
--- a/drivers/platform/surface/surface_aggregator_hub.c
+++ b/drivers/platform/surface/surface_aggregator_hub.c
@@ -214,7 +214,7 @@ static void ssam_hub_remove(struct ssam_device *sdev)
SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_query_opmode, u8, {
.target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x0d,
.instance_id = 0x00,
});
@@ -292,7 +292,7 @@ static const struct ssam_hub_desc base_hub = {
SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_query_state, u8, {
.target_category = SSAM_SSH_TC_KIP,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x2c,
.instance_id = 0x00,
});
@@ -348,8 +348,8 @@ static const struct ssam_hub_desc kip_hub = {
/* -- Driver registration. -------------------------------------------------- */
static const struct ssam_device_id ssam_hub_match[] = {
- { SSAM_VDEV(HUB, 0x01, SSAM_SSH_TC_KIP, 0x00), (unsigned long)&kip_hub },
- { SSAM_VDEV(HUB, 0x02, SSAM_SSH_TC_BAS, 0x00), (unsigned long)&base_hub },
+ { SSAM_VDEV(HUB, SAM, SSAM_SSH_TC_KIP, 0x00), (unsigned long)&kip_hub },
+ { SSAM_VDEV(HUB, SAM, SSAM_SSH_TC_BAS, 0x00), (unsigned long)&base_hub },
{ }
};
MODULE_DEVICE_TABLE(ssam, ssam_hub_match);
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index 023f126121d7..296f72d52e6a 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -46,7 +46,7 @@ static const struct software_node ssam_node_hub_kip = {
/* Base device hub (devices attached to Surface Book 3 base). */
static const struct software_node ssam_node_hub_base = {
- .name = "ssam:00:00:02:11:00",
+ .name = "ssam:00:00:01:11:00",
.parent = &ssam_node_root,
};
diff --git a/drivers/platform/surface/surface_aggregator_tabletsw.c b/drivers/platform/surface/surface_aggregator_tabletsw.c
index 27d95a6a7851..9fed800c7cc0 100644
--- a/drivers/platform/surface/surface_aggregator_tabletsw.c
+++ b/drivers/platform/surface/surface_aggregator_tabletsw.c
@@ -247,7 +247,7 @@ static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw, u32 s
SSAM_DEFINE_SYNC_REQUEST_R(__ssam_kip_get_cover_state, u8, {
.target_category = SSAM_SSH_TC_KIP,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x1d,
.instance_id = 0x00,
});
@@ -371,7 +371,7 @@ static int ssam_pos_get_sources_list(struct ssam_tablet_sw *sw, struct ssam_sour
int status;
rqst.target_category = SSAM_SSH_TC_POS;
- rqst.target_id = 0x01;
+ rqst.target_id = SSAM_SSH_TID_SAM;
rqst.command_id = 0x01;
rqst.instance_id = 0x00;
rqst.flags = SSAM_REQUEST_HAS_RESPONSE;
@@ -382,7 +382,7 @@ static int ssam_pos_get_sources_list(struct ssam_tablet_sw *sw, struct ssam_sour
rsp.length = 0;
rsp.pointer = (u8 *)sources;
- status = ssam_retry(ssam_request_sync_onstack, sw->sdev->ctrl, &rqst, &rsp, 0);
+ status = ssam_retry(ssam_request_do_sync_onstack, sw->sdev->ctrl, &rqst, &rsp, 0);
if (status)
return status;
@@ -430,7 +430,7 @@ static int ssam_pos_get_source(struct ssam_tablet_sw *sw, u32 *source_id)
SSAM_DEFINE_SYNC_REQUEST_WR(__ssam_pos_get_posture_for_source, __le32, __le32, {
.target_category = SSAM_SSH_TC_POS,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x02,
.instance_id = 0x00,
});
@@ -510,8 +510,8 @@ static const struct ssam_tablet_sw_desc ssam_pos_sw_desc = {
/* -- Driver registration. -------------------------------------------------- */
static const struct ssam_device_id ssam_tablet_sw_match[] = {
- { SSAM_SDEV(KIP, 0x01, 0x00, 0x01), (unsigned long)&ssam_kip_sw_desc },
- { SSAM_SDEV(POS, 0x01, 0x00, 0x01), (unsigned long)&ssam_pos_sw_desc },
+ { SSAM_SDEV(KIP, SAM, 0x00, 0x01), (unsigned long)&ssam_kip_sw_desc },
+ { SSAM_SDEV(POS, SAM, 0x00, 0x01), (unsigned long)&ssam_pos_sw_desc },
{ },
};
MODULE_DEVICE_TABLE(ssam, ssam_tablet_sw_match);
diff --git a/drivers/platform/surface/surface_dtx.c b/drivers/platform/surface/surface_dtx.c
index ed36944467f9..30cbde278c59 100644
--- a/drivers/platform/surface/surface_dtx.c
+++ b/drivers/platform/surface/surface_dtx.c
@@ -71,63 +71,63 @@ static_assert(sizeof(struct ssam_bas_base_info) == 2);
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_lock, {
.target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x06,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_unlock, {
.target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x07,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_request, {
.target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x08,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_confirm, {
.target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x09,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_heartbeat, {
.target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x0a,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_N(ssam_bas_latch_cancel, {
.target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x0b,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_base, struct ssam_bas_base_info, {
.target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x0c,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_device_mode, u8, {
.target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x0d,
.instance_id = 0x00,
});
SSAM_DEFINE_SYNC_REQUEST_R(ssam_bas_get_latch_status, u8, {
.target_category = SSAM_SSH_TC_BAS,
- .target_id = 0x01,
+ .target_id = SSAM_SSH_TID_SAM,
.command_id = 0x11,
.instance_id = 0x00,
});
@@ -1214,7 +1214,7 @@ static void surface_dtx_ssam_remove(struct ssam_device *sdev)
}
static const struct ssam_device_id surface_dtx_ssam_match[] = {
- { SSAM_SDEV(BAS, 0x01, 0x00, 0x00) },
+ { SSAM_SDEV(BAS, SAM, 0x00, 0x00) },
{ },
};
MODULE_DEVICE_TABLE(ssam, surface_dtx_ssam_match);
diff --git a/drivers/platform/surface/surface_hotplug.c b/drivers/platform/surface/surface_hotplug.c
index f004a2495201..7b6d887dccdb 100644
--- a/drivers/platform/surface/surface_hotplug.c
+++ b/drivers/platform/surface/surface_hotplug.c
@@ -101,18 +101,12 @@ static void shps_dsm_notify_irq(struct platform_device *pdev, enum shps_irq_type
param.type = ACPI_TYPE_INTEGER;
param.integer.value = value;
- result = acpi_evaluate_dsm(handle, &shps_dsm_guid, SHPS_DSM_REVISION,
- shps_dsm_fn_for_irq(type), &param);
-
+ result = acpi_evaluate_dsm_typed(handle, &shps_dsm_guid, SHPS_DSM_REVISION,
+ shps_dsm_fn_for_irq(type), &param, ACPI_TYPE_BUFFER);
if (!result) {
dev_err(&pdev->dev, "IRQ notification via DSM failed (irq=%d, gpio=%d)\n",
type, value);
- } else if (result->type != ACPI_TYPE_BUFFER) {
- dev_err(&pdev->dev,
- "IRQ notification via DSM failed: unexpected result type (irq=%d, gpio=%d)\n",
- type, value);
-
} else if (result->buffer.length != 1 || result->buffer.pointer[0] != 0) {
dev_err(&pdev->dev,
"IRQ notification via DSM failed: unexpected result value (irq=%d, gpio=%d)\n",
@@ -121,8 +115,7 @@ static void shps_dsm_notify_irq(struct platform_device *pdev, enum shps_irq_type
mutex_unlock(&sdev->lock[type]);
- if (result)
- ACPI_FREE(result);
+ ACPI_FREE(result);
}
static irqreturn_t shps_handle_irq(int irq, void *data)
diff --git a/drivers/platform/surface/surface_platform_profile.c b/drivers/platform/surface/surface_platform_profile.c
index fbf2e11fd6ce..f433a13c3689 100644
--- a/drivers/platform/surface/surface_platform_profile.c
+++ b/drivers/platform/surface/surface_platform_profile.c
@@ -169,7 +169,7 @@ static void surface_platform_profile_remove(struct ssam_device *sdev)
}
static const struct ssam_device_id ssam_platform_profile_match[] = {
- { SSAM_SDEV(TMP, 0x01, 0x00, 0x01) },
+ { SSAM_SDEV(TMP, SAM, 0x00, 0x01) },
{ },
};
MODULE_DEVICE_TABLE(ssam, ssam_platform_profile_match);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 5692385e2d26..ec7c2b4e1721 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -214,7 +214,6 @@ config APPLE_GMUX
depends on PNP
depends on BACKLIGHT_CLASS_DEVICE
depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE
- depends on ACPI_VIDEO=n || ACPI_VIDEO
help
This driver provides support for the gmux device found on many
Apple laptops, which controls the display mux for the hybrid
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index d2c0fc38c201..a48638ad2a8a 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -46,6 +46,8 @@
* measured by the on-die thermal monitor are within 0 <= Tj <= 90. So,
* assume 89°C is critical temperature.
*/
+#define ACERHDF_DEFAULT_TEMP_FANON 60000
+#define ACERHDF_DEFAULT_TEMP_FANOFF 53000
#define ACERHDF_TEMP_CRIT 89000
#define ACERHDF_FAN_OFF 0
#define ACERHDF_FAN_AUTO 1
@@ -70,8 +72,8 @@ static int kernelmode;
#endif
static unsigned int interval = 10;
-static unsigned int fanon = 60000;
-static unsigned int fanoff = 53000;
+static unsigned int fanon = ACERHDF_DEFAULT_TEMP_FANON;
+static unsigned int fanoff = ACERHDF_DEFAULT_TEMP_FANOFF;
static unsigned int verbose;
static unsigned int list_supported;
static unsigned int fanstate = ACERHDF_FAN_AUTO;
@@ -137,6 +139,15 @@ struct ctrl_settings {
int mcmd_enable;
};
+static struct thermal_trip trips[] = {
+ [0] = { .temperature = ACERHDF_DEFAULT_TEMP_FANON,
+ .hysteresis = ACERHDF_DEFAULT_TEMP_FANON - ACERHDF_DEFAULT_TEMP_FANOFF,
+ .type = THERMAL_TRIP_ACTIVE },
+
+ [1] = { .temperature = ACERHDF_TEMP_CRIT,
+ .type = THERMAL_TRIP_CRITICAL }
+};
+
static struct ctrl_settings ctrl_cfg __read_mostly;
/* Register addresses and values for different BIOS versions */
@@ -326,6 +337,15 @@ static void acerhdf_check_param(struct thermal_zone_device *thermal)
fanon = ACERHDF_MAX_FANON;
}
+ if (fanon < fanoff) {
+ pr_err("fanoff temperature (%d) is above fanon temperature (%d), clamping to %d\n",
+ fanoff, fanon, fanon);
+ fanoff = fanon;
+ };
+
+ trips[0].temperature = fanon;
+ trips[0].hysteresis = fanon - fanoff;
+
if (kernelmode && prev_interval != interval) {
if (interval > ACERHDF_MAX_INTERVAL) {
pr_err("interval too high, set to %d\n",
@@ -424,43 +444,6 @@ static int acerhdf_change_mode(struct thermal_zone_device *thermal,
return 0;
}
-static int acerhdf_get_trip_type(struct thermal_zone_device *thermal, int trip,
- enum thermal_trip_type *type)
-{
- if (trip == 0)
- *type = THERMAL_TRIP_ACTIVE;
- else if (trip == 1)
- *type = THERMAL_TRIP_CRITICAL;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int acerhdf_get_trip_hyst(struct thermal_zone_device *thermal, int trip,
- int *temp)
-{
- if (trip != 0)
- return -EINVAL;
-
- *temp = fanon - fanoff;
-
- return 0;
-}
-
-static int acerhdf_get_trip_temp(struct thermal_zone_device *thermal, int trip,
- int *temp)
-{
- if (trip == 0)
- *temp = fanon;
- else if (trip == 1)
- *temp = ACERHDF_TEMP_CRIT;
- else
- return -EINVAL;
-
- return 0;
-}
-
static int acerhdf_get_crit_temp(struct thermal_zone_device *thermal,
int *temperature)
{
@@ -474,13 +457,9 @@ static struct thermal_zone_device_ops acerhdf_dev_ops = {
.unbind = acerhdf_unbind,
.get_temp = acerhdf_get_ec_temp,
.change_mode = acerhdf_change_mode,
- .get_trip_type = acerhdf_get_trip_type,
- .get_trip_hyst = acerhdf_get_trip_hyst,
- .get_trip_temp = acerhdf_get_trip_temp,
.get_crit_temp = acerhdf_get_crit_temp,
};
-
/*
* cooling device callback functions
* get maximal fan cooling state
@@ -565,11 +544,6 @@ static int acerhdf_probe(struct platform_device *device)
return 0;
}
-static int acerhdf_remove(struct platform_device *device)
-{
- return 0;
-}
-
static const struct dev_pm_ops acerhdf_pm_ops = {
.suspend = acerhdf_suspend,
.freeze = acerhdf_suspend,
@@ -581,7 +555,6 @@ static struct platform_driver acerhdf_driver = {
.pm = &acerhdf_pm_ops,
},
.probe = acerhdf_probe,
- .remove = acerhdf_remove,
};
/* check hardware */
@@ -710,10 +683,10 @@ static int __init acerhdf_register_thermal(void)
if (IS_ERR(cl_dev))
return -EINVAL;
- thz_dev = thermal_zone_device_register("acerhdf", 2, 0, NULL,
- &acerhdf_dev_ops,
- &acerhdf_zone_params, 0,
- (kernelmode) ? interval*1000 : 0);
+ thz_dev = thermal_zone_device_register_with_trips("acerhdf", trips, ARRAY_SIZE(trips),
+ 0, NULL, &acerhdf_dev_ops,
+ &acerhdf_zone_params, 0,
+ (kernelmode) ? interval*1000 : 0);
if (IS_ERR(thz_dev))
return -EINVAL;
diff --git a/drivers/platform/x86/amd/Kconfig b/drivers/platform/x86/amd/Kconfig
index a825af8126c8..2ce8cb2170df 100644
--- a/drivers/platform/x86/amd/Kconfig
+++ b/drivers/platform/x86/amd/Kconfig
@@ -8,6 +8,7 @@ source "drivers/platform/x86/amd/pmf/Kconfig"
config AMD_PMC
tristate "AMD SoC PMC driver"
depends on ACPI && PCI && RTC_CLASS
+ select SERIO
help
The driver provides support for AMD Power Management Controller
primarily responsible for S2Idle transactions that are driven from
diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c
index 3cbb01ec10e3..ab05b9ee6655 100644
--- a/drivers/platform/x86/amd/pmc.c
+++ b/drivers/platform/x86/amd/pmc.c
@@ -43,6 +43,7 @@
#define AMD_PMC_STB_S2IDLE_PREPARE 0xC6000001
#define AMD_PMC_STB_S2IDLE_RESTORE 0xC6000002
#define AMD_PMC_STB_S2IDLE_CHECK 0xC6000003
+#define AMD_PMC_STB_DUMMY_PC 0xC6000007
/* STB S2D(Spill to DRAM) has different message port offset */
#define STB_SPILL_TO_DRAM 0xBE
@@ -104,6 +105,7 @@
#define DELAY_MIN_US 2000
#define DELAY_MAX_US 3000
#define FIFO_SIZE 4096
+
enum amd_pmc_def {
MSG_TEST = 0x01,
MSG_OS_HINT_PCO,
@@ -114,6 +116,7 @@ enum s2d_arg {
S2D_TELEMETRY_SIZE = 0x01,
S2D_PHYS_ADDR_LOW,
S2D_PHYS_ADDR_HIGH,
+ S2D_NUM_SAMPLES,
};
struct amd_pmc_bit_map {
@@ -246,13 +249,40 @@ static const struct file_operations amd_pmc_stb_debugfs_fops = {
static int amd_pmc_stb_debugfs_open_v2(struct inode *inode, struct file *filp)
{
struct amd_pmc_dev *dev = filp->f_inode->i_private;
- u32 *buf;
+ u32 *buf, fsize, num_samples, stb_rdptr_offset = 0;
+ int ret;
+
+ /* Write dummy postcode while reading the STB buffer */
+ ret = amd_pmc_write_stb(dev, AMD_PMC_STB_DUMMY_PC);
+ if (ret)
+ dev_err(dev->dev, "error writing to STB: %d\n", ret);
buf = kzalloc(S2D_TELEMETRY_BYTES_MAX, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- memcpy_fromio(buf, dev->stb_virt_addr, S2D_TELEMETRY_BYTES_MAX);
+ /* Spill to DRAM num_samples uses separate SMU message port */
+ dev->msg_port = 1;
+
+ /* Get the num_samples to calculate the last push location */
+ ret = amd_pmc_send_cmd(dev, S2D_NUM_SAMPLES, &num_samples, STB_SPILL_TO_DRAM, 1);
+ /* Clear msg_port for other SMU operation */
+ dev->msg_port = 0;
+ if (ret) {
+ dev_err(dev->dev, "error: S2D_NUM_SAMPLES not supported : %d\n", ret);
+ return ret;
+ }
+
+ /* Start capturing data from the last push location */
+ if (num_samples > S2D_TELEMETRY_BYTES_MAX) {
+ fsize = S2D_TELEMETRY_BYTES_MAX;
+ stb_rdptr_offset = num_samples - fsize;
+ } else {
+ fsize = num_samples;
+ stb_rdptr_offset = 0;
+ }
+
+ memcpy_fromio(buf, dev->stb_virt_addr + stb_rdptr_offset, fsize);
filp->private_data = buf;
return 0;
@@ -560,13 +590,13 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
}
value = amd_pmc_reg_read(dev, response);
- dev_dbg(dev->dev, "AMD_PMC_REGISTER_RESPONSE:%x\n", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_RESPONSE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
value = amd_pmc_reg_read(dev, argument);
- dev_dbg(dev->dev, "AMD_PMC_REGISTER_ARGUMENT:%x\n", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_ARGUMENT:%x\n", dev->msg_port ? "S2D" : "PMC", value);
value = amd_pmc_reg_read(dev, message);
- dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
+ dev_dbg(dev->dev, "AMD_%s_REGISTER_MESSAGE:%x\n", dev->msg_port ? "S2D" : "PMC", value);
}
static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
diff --git a/drivers/platform/x86/amd/pmf/Kconfig b/drivers/platform/x86/amd/pmf/Kconfig
index c375498c4071..6d89528c3177 100644
--- a/drivers/platform/x86/amd/pmf/Kconfig
+++ b/drivers/platform/x86/amd/pmf/Kconfig
@@ -6,6 +6,7 @@
config AMD_PMF
tristate "AMD Platform Management Framework"
depends on ACPI && PCI
+ depends on POWER_SUPPLY
select ACPI_PLATFORM_PROFILE
help
This driver provides support for the AMD Platform Management Framework.
diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
index 644af42e07cf..96a8e1832c05 100644
--- a/drivers/platform/x86/amd/pmf/auto-mode.c
+++ b/drivers/platform/x86/amd/pmf/auto-mode.c
@@ -275,13 +275,8 @@ int amd_pmf_reset_amt(struct amd_pmf_dev *dev)
*/
if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR)) {
- int mode = amd_pmf_get_pprof_modes(dev);
-
- if (mode < 0)
- return mode;
-
dev_dbg(dev->dev, "resetting AMT thermals\n");
- amd_pmf_update_slider(dev, SLIDER_OP_SET, mode, NULL);
+ amd_pmf_set_sps_power_limits(dev);
}
return 0;
}
@@ -299,7 +294,5 @@ void amd_pmf_deinit_auto_mode(struct amd_pmf_dev *dev)
void amd_pmf_init_auto_mode(struct amd_pmf_dev *dev)
{
amd_pmf_load_defaults_auto_mode(dev);
- /* update the thermal limits for Automode */
- amd_pmf_set_automode(dev, config_store.current_mode, NULL);
amd_pmf_init_metrics_table(dev);
}
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
index 3f9731a2ac28..4beb22a19466 100644
--- a/drivers/platform/x86/amd/pmf/cnqf.c
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -103,7 +103,7 @@ int amd_pmf_trans_cnqf(struct amd_pmf_dev *dev, int socket_power, ktime_t time_l
src = amd_pmf_cnqf_get_power_source(dev);
- if (dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (is_pprof_balanced(dev)) {
amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
} else {
/*
@@ -307,13 +307,9 @@ static ssize_t cnqf_enable_store(struct device *dev,
const char *buf, size_t count)
{
struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
- int mode, result, src;
+ int result, src;
bool input;
- mode = amd_pmf_get_pprof_modes(pdev);
- if (mode < 0)
- return mode;
-
result = kstrtobool(buf, &input);
if (result)
return result;
@@ -321,11 +317,11 @@ static ssize_t cnqf_enable_store(struct device *dev,
src = amd_pmf_cnqf_get_power_source(pdev);
pdev->cnqf_enabled = input;
- if (pdev->cnqf_enabled && pdev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (pdev->cnqf_enabled && is_pprof_balanced(pdev)) {
amd_pmf_set_cnqf(pdev, src, config_store.current_mode, NULL);
} else {
if (is_apmf_func_supported(pdev, APMF_FUNC_STATIC_SLIDER_GRANULAR))
- amd_pmf_update_slider(pdev, SLIDER_OP_SET, mode, NULL);
+ amd_pmf_set_sps_power_limits(pdev);
}
dev_dbg(pdev->dev, "Received CnQF %s\n", input ? "on" : "off");
@@ -386,7 +382,7 @@ int amd_pmf_init_cnqf(struct amd_pmf_dev *dev)
dev->cnqf_enabled = amd_pmf_check_flags(dev);
/* update the thermal for CnQF */
- if (dev->cnqf_enabled && dev->current_profile == PLATFORM_PROFILE_BALANCED) {
+ if (dev->cnqf_enabled && is_pprof_balanced(dev)) {
src = amd_pmf_cnqf_get_power_source(dev);
amd_pmf_set_cnqf(dev, src, config_store.current_mode, NULL);
}
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index a5f5a4bcff6d..da23639071d7 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -58,6 +58,25 @@ static bool force_load;
module_param(force_load, bool, 0444);
MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
+static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
+{
+ struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
+
+ if (event != PSY_EVENT_PROP_CHANGED)
+ return NOTIFY_OK;
+
+ if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
+ is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
+ if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
+ return NOTIFY_DONE;
+ }
+
+ amd_pmf_set_sps_power_limits(pmf);
+
+ return NOTIFY_OK;
+}
+
static int current_power_limits_show(struct seq_file *seq, void *unused)
{
struct amd_pmf_dev *dev = seq->private;
@@ -366,14 +385,18 @@ static int amd_pmf_probe(struct platform_device *pdev)
if (!dev->regbase)
return -ENOMEM;
+ mutex_init(&dev->lock);
+ mutex_init(&dev->update_mutex);
+
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
amd_pmf_init_features(dev);
apmf_install_handler(dev);
amd_pmf_dbgfs_register(dev);
- mutex_init(&dev->lock);
- mutex_init(&dev->update_mutex);
+ dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
+ power_supply_reg_notifier(&dev->pwr_src_notifier);
+
dev_info(dev->dev, "registered PMF device successfully\n");
return 0;
@@ -383,11 +406,12 @@ static int amd_pmf_remove(struct platform_device *pdev)
{
struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
- mutex_destroy(&dev->lock);
- mutex_destroy(&dev->update_mutex);
+ power_supply_unreg_notifier(&dev->pwr_src_notifier);
amd_pmf_deinit_features(dev);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);
+ mutex_destroy(&dev->lock);
+ mutex_destroy(&dev->update_mutex);
kfree(dev->buf);
return 0;
}
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 84bbe2c6ea61..06c30cdc0573 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -169,6 +169,7 @@ struct amd_pmf_dev {
struct mutex update_mutex; /* protects race between ACPI handler and metrics thread */
bool cnqf_enabled;
bool cnqf_supported;
+ struct notifier_block pwr_src_notifier;
};
struct apmf_sps_prop_granular {
@@ -391,9 +392,11 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev);
void amd_pmf_deinit_sps(struct amd_pmf_dev *dev);
int apmf_get_static_slider_granular(struct amd_pmf_dev *pdev,
struct apmf_static_slider_granular_output *output);
+bool is_pprof_balanced(struct amd_pmf_dev *pmf);
int apmf_update_fan_idx(struct amd_pmf_dev *pdev, bool manual, u32 idx);
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf);
/* Auto Mode Layer */
int apmf_get_auto_mode_def(struct amd_pmf_dev *pdev, struct apmf_auto_mode *data);
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index dba7e36962dc..bed762d47a14 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -70,6 +70,24 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
}
}
+int amd_pmf_set_sps_power_limits(struct amd_pmf_dev *pmf)
+{
+ int mode;
+
+ mode = amd_pmf_get_pprof_modes(pmf);
+ if (mode < 0)
+ return mode;
+
+ amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
+
+ return 0;
+}
+
+bool is_pprof_balanced(struct amd_pmf_dev *pmf)
+{
+ return (pmf->current_profile == PLATFORM_PROFILE_BALANCED) ? true : false;
+}
+
static int amd_pmf_profile_get(struct platform_profile_handler *pprof,
enum platform_profile_option *profile)
{
@@ -105,15 +123,10 @@ static int amd_pmf_profile_set(struct platform_profile_handler *pprof,
enum platform_profile_option profile)
{
struct amd_pmf_dev *pmf = container_of(pprof, struct amd_pmf_dev, pprof);
- int mode;
pmf->current_profile = profile;
- mode = amd_pmf_get_pprof_modes(pmf);
- if (mode < 0)
- return mode;
- amd_pmf_update_slider(pmf, SLIDER_OP_SET, mode, NULL);
- return 0;
+ return amd_pmf_set_sps_power_limits(pmf);
}
int amd_pmf_init_sps(struct amd_pmf_dev *dev)
@@ -123,6 +136,9 @@ int amd_pmf_init_sps(struct amd_pmf_dev *dev)
dev->current_profile = PLATFORM_PROFILE_BALANCED;
amd_pmf_load_defaults_sps(dev);
+ /* update SPS balanced power mode thermals */
+ amd_pmf_set_sps_power_limits(dev);
+
dev->pprof.profile_get = amd_pmf_profile_get;
dev->pprof.profile_set = amd_pmf_profile_set;
diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c
index 62310e06282b..aeb1138464df 100644
--- a/drivers/platform/x86/asus-tf103c-dock.c
+++ b/drivers/platform/x86/asus-tf103c-dock.c
@@ -250,7 +250,7 @@ static int tf103c_dock_hid_raw_request(struct hid_device *hid, u8 reportnum,
return 0;
}
-static struct hid_ll_driver tf103c_dock_hid_ll_driver = {
+static const struct hid_ll_driver tf103c_dock_hid_ll_driver = {
.parse = tf103c_dock_hid_parse,
.start = tf103c_dock_hid_start,
.stop = tf103c_dock_hid_stop,
@@ -259,7 +259,7 @@ static struct hid_ll_driver tf103c_dock_hid_ll_driver = {
.raw_request = tf103c_dock_hid_raw_request,
};
-static int tf103c_dock_toprow_codes[13][2] = {
+static const int tf103c_dock_toprow_codes[13][2] = {
/* Normal, AltGr pressed */
{ KEY_POWER, KEY_F1 },
{ KEY_RFKILL, KEY_F2 },
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index d319de8f2132..bdd78076b1d7 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -192,12 +192,12 @@ config DELL_WMI_DESCRIPTOR
config DELL_WMI_DDV
tristate "Dell WMI sensors Support"
default m
- depends on ACPI_BATTERY
depends on ACPI_WMI
+ depends on ACPI_BATTERY || HWMON
help
- This option adds support for WMI-based sensors like
- battery temperature sensors found on some Dell notebooks.
- It also supports reading of the battery ePPID.
+ This option adds support for WMI-based fan and thermal sensors
+ found on some Dell notebooks. It also supports various WMI-based battery
+ extras like reading of the battery temperature and ePPID.
To compile this drivers as a module, choose M here: the module will
be called dell-wmi-ddv.
diff --git a/drivers/platform/x86/dell/dell-smo8800.c b/drivers/platform/x86/dell/dell-smo8800.c
index 3385e852104c..8d6b7a83cf24 100644
--- a/drivers/platform/x86/dell/dell-smo8800.c
+++ b/drivers/platform/x86/dell/dell-smo8800.c
@@ -67,10 +67,7 @@ static ssize_t smo8800_misc_read(struct file *file, char __user *buf,
retval = 1;
- if (data < 255)
- byte_data = data;
- else
- byte_data = 255;
+ byte_data = min_t(u32, data, 255);
if (put_user(byte_data, buf))
retval = -EFAULT;
diff --git a/drivers/platform/x86/dell/dell-wmi-ddv.c b/drivers/platform/x86/dell/dell-wmi-ddv.c
index 2bb449845d14..d547c9d09725 100644
--- a/drivers/platform/x86/dell/dell-wmi-ddv.c
+++ b/drivers/platform/x86/dell/dell-wmi-ddv.c
@@ -10,28 +10,43 @@
#include <linux/acpi.h>
#include <linux/debugfs.h>
#include <linux/device.h>
+#include <linux/device/driver.h>
#include <linux/dev_printk.h>
+#include <linux/errno.h>
+#include <linux/kconfig.h>
#include <linux/kernel.h>
+#include <linux/hwmon.h>
#include <linux/kstrtox.h>
#include <linux/math.h>
+#include <linux/math64.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/limits.h>
+#include <linux/pm.h>
#include <linux/power_supply.h>
#include <linux/printk.h>
#include <linux/seq_file.h>
#include <linux/sysfs.h>
+#include <linux/types.h>
#include <linux/wmi.h>
#include <acpi/battery.h>
+#include <asm/unaligned.h>
+
#define DRIVER_NAME "dell-wmi-ddv"
-#define DELL_DDV_SUPPORTED_INTERFACE 2
+#define DELL_DDV_SUPPORTED_VERSION_MIN 2
+#define DELL_DDV_SUPPORTED_VERSION_MAX 3
#define DELL_DDV_GUID "8A42EA14-4F2A-FD45-6422-0087F7A7E608"
#define DELL_EPPID_LENGTH 20
#define DELL_EPPID_EXT_LENGTH 23
+static bool force;
+module_param_unsafe(force, bool, 0);
+MODULE_PARM_DESC(force, "Force loading without checking for supported WMI interface versions");
+
enum dell_ddv_method {
DELL_DDV_BATTERY_DESIGN_CAPACITY = 0x01,
DELL_DDV_BATTERY_FULL_CHARGE_CAPACITY = 0x02,
@@ -49,6 +64,7 @@ enum dell_ddv_method {
DELL_DDV_BATTERY_RAW_ANALYTICS_START = 0x0E,
DELL_DDV_BATTERY_RAW_ANALYTICS = 0x0F,
DELL_DDV_BATTERY_DESIGN_VOLTAGE = 0x10,
+ DELL_DDV_BATTERY_RAW_ANALYTICS_A_BLOCK = 0x11, /* version 3 */
DELL_DDV_INTERFACE_VERSION = 0x12,
@@ -56,13 +72,63 @@ enum dell_ddv_method {
DELL_DDV_THERMAL_SENSOR_INFORMATION = 0x22,
};
+struct fan_sensor_entry {
+ u8 type;
+ __le16 rpm;
+} __packed;
+
+struct thermal_sensor_entry {
+ u8 type;
+ s8 now;
+ s8 min;
+ s8 max;
+ u8 unknown;
+} __packed;
+
+struct combined_channel_info {
+ struct hwmon_channel_info info;
+ u32 config[];
+};
+
+struct combined_chip_info {
+ struct hwmon_chip_info chip;
+ const struct hwmon_channel_info *info[];
+};
+
+struct dell_wmi_ddv_sensors {
+ struct mutex lock; /* protect caching */
+ unsigned long timestamp;
+ union acpi_object *obj;
+ u64 entries;
+};
+
struct dell_wmi_ddv_data {
struct acpi_battery_hook hook;
struct device_attribute temp_attr;
struct device_attribute eppid_attr;
+ struct dell_wmi_ddv_sensors fans;
+ struct dell_wmi_ddv_sensors temps;
struct wmi_device *wdev;
};
+static const char * const fan_labels[] = {
+ "CPU Fan",
+ "Chassis Motherboard Fan",
+ "Video Fan",
+ "Power Supply Fan",
+ "Chipset Fan",
+ "Memory Fan",
+ "PCI Fan",
+ "HDD Fan",
+};
+
+static const char * const fan_dock_labels[] = {
+ "Docking Chassis/Motherboard Fan",
+ "Docking Video Fan",
+ "Docking Power Supply Fan",
+ "Docking Chipset Fan",
+};
+
static int dell_wmi_ddv_query_type(struct wmi_device *wdev, enum dell_ddv_method method, u32 arg,
union acpi_object **result, acpi_object_type type)
{
@@ -84,7 +150,7 @@ static int dell_wmi_ddv_query_type(struct wmi_device *wdev, enum dell_ddv_method
if (obj->type != type) {
kfree(obj);
- return -EIO;
+ return -ENOMSG;
}
*result = obj;
@@ -123,21 +189,27 @@ static int dell_wmi_ddv_query_buffer(struct wmi_device *wdev, enum dell_ddv_meth
if (ret < 0)
return ret;
- if (obj->package.count != 2)
- goto err_free;
+ if (obj->package.count != 2 ||
+ obj->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ obj->package.elements[1].type != ACPI_TYPE_BUFFER) {
+ ret = -ENOMSG;
- if (obj->package.elements[0].type != ACPI_TYPE_INTEGER)
goto err_free;
+ }
buffer_size = obj->package.elements[0].integer.value;
- if (obj->package.elements[1].type != ACPI_TYPE_BUFFER)
+ if (!buffer_size) {
+ ret = -ENODATA;
+
goto err_free;
+ }
if (buffer_size > obj->package.elements[1].buffer.length) {
dev_warn(&wdev->dev,
FW_WARN "WMI buffer size (%llu) exceeds ACPI buffer size (%d)\n",
buffer_size, obj->package.elements[1].buffer.length);
+ ret = -EMSGSIZE;
goto err_free;
}
@@ -149,7 +221,7 @@ static int dell_wmi_ddv_query_buffer(struct wmi_device *wdev, enum dell_ddv_meth
err_free:
kfree(obj);
- return -EIO;
+ return ret;
}
static int dell_wmi_ddv_query_string(struct wmi_device *wdev, enum dell_ddv_method method,
@@ -158,6 +230,410 @@ static int dell_wmi_ddv_query_string(struct wmi_device *wdev, enum dell_ddv_meth
return dell_wmi_ddv_query_type(wdev, method, arg, result, ACPI_TYPE_STRING);
}
+/*
+ * Needs to be called with lock held, except during initialization.
+ */
+static int dell_wmi_ddv_update_sensors(struct wmi_device *wdev, enum dell_ddv_method method,
+ struct dell_wmi_ddv_sensors *sensors, size_t entry_size)
+{
+ u64 buffer_size, rem, entries;
+ union acpi_object *obj;
+ u8 *buffer;
+ int ret;
+
+ if (sensors->obj) {
+ if (time_before(jiffies, sensors->timestamp + HZ))
+ return 0;
+
+ kfree(sensors->obj);
+ sensors->obj = NULL;
+ }
+
+ ret = dell_wmi_ddv_query_buffer(wdev, method, 0, &obj);
+ if (ret < 0)
+ return ret;
+
+ /* buffer format sanity check */
+ buffer_size = obj->package.elements[0].integer.value;
+ buffer = obj->package.elements[1].buffer.pointer;
+ entries = div64_u64_rem(buffer_size, entry_size, &rem);
+ if (rem != 1 || buffer[buffer_size - 1] != 0xff) {
+ ret = -ENOMSG;
+ goto err_free;
+ }
+
+ if (!entries) {
+ ret = -ENODATA;
+ goto err_free;
+ }
+
+ sensors->obj = obj;
+ sensors->entries = entries;
+ sensors->timestamp = jiffies;
+
+ return 0;
+
+err_free:
+ kfree(obj);
+
+ return ret;
+}
+
+static umode_t dell_wmi_ddv_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ return 0444;
+}
+
+static int dell_wmi_ddv_fan_read_channel(struct dell_wmi_ddv_data *data, u32 attr, int channel,
+ long *val)
+{
+ struct fan_sensor_entry *entry;
+ int ret;
+
+ ret = dell_wmi_ddv_update_sensors(data->wdev, DELL_DDV_FAN_SENSOR_INFORMATION,
+ &data->fans, sizeof(*entry));
+ if (ret < 0)
+ return ret;
+
+ if (channel >= data->fans.entries)
+ return -ENXIO;
+
+ entry = (struct fan_sensor_entry *)data->fans.obj->package.elements[1].buffer.pointer;
+ switch (attr) {
+ case hwmon_fan_input:
+ *val = get_unaligned_le16(&entry[channel].rpm);
+ return 0;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int dell_wmi_ddv_temp_read_channel(struct dell_wmi_ddv_data *data, u32 attr, int channel,
+ long *val)
+{
+ struct thermal_sensor_entry *entry;
+ int ret;
+
+ ret = dell_wmi_ddv_update_sensors(data->wdev, DELL_DDV_THERMAL_SENSOR_INFORMATION,
+ &data->temps, sizeof(*entry));
+ if (ret < 0)
+ return ret;
+
+ if (channel >= data->temps.entries)
+ return -ENXIO;
+
+ entry = (struct thermal_sensor_entry *)data->temps.obj->package.elements[1].buffer.pointer;
+ switch (attr) {
+ case hwmon_temp_input:
+ *val = entry[channel].now * 1000;
+ return 0;
+ case hwmon_temp_min:
+ *val = entry[channel].min * 1000;
+ return 0;
+ case hwmon_temp_max:
+ *val = entry[channel].max * 1000;
+ return 0;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int dell_wmi_ddv_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct dell_wmi_ddv_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_fan:
+ mutex_lock(&data->fans.lock);
+ ret = dell_wmi_ddv_fan_read_channel(data, attr, channel, val);
+ mutex_unlock(&data->fans.lock);
+ return ret;
+ case hwmon_temp:
+ mutex_lock(&data->temps.lock);
+ ret = dell_wmi_ddv_temp_read_channel(data, attr, channel, val);
+ mutex_unlock(&data->temps.lock);
+ return ret;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int dell_wmi_ddv_fan_read_string(struct dell_wmi_ddv_data *data, int channel,
+ const char **str)
+{
+ struct fan_sensor_entry *entry;
+ int ret;
+ u8 type;
+
+ ret = dell_wmi_ddv_update_sensors(data->wdev, DELL_DDV_FAN_SENSOR_INFORMATION,
+ &data->fans, sizeof(*entry));
+ if (ret < 0)
+ return ret;
+
+ if (channel >= data->fans.entries)
+ return -ENXIO;
+
+ entry = (struct fan_sensor_entry *)data->fans.obj->package.elements[1].buffer.pointer;
+ type = entry[channel].type;
+ switch (type) {
+ case 0x00 ... 0x07:
+ *str = fan_labels[type];
+ break;
+ case 0x11 ... 0x14:
+ *str = fan_dock_labels[type - 0x11];
+ break;
+ default:
+ *str = "Unknown Fan";
+ break;
+ }
+
+ return 0;
+}
+
+static int dell_wmi_ddv_temp_read_string(struct dell_wmi_ddv_data *data, int channel,
+ const char **str)
+{
+ struct thermal_sensor_entry *entry;
+ int ret;
+
+ ret = dell_wmi_ddv_update_sensors(data->wdev, DELL_DDV_THERMAL_SENSOR_INFORMATION,
+ &data->temps, sizeof(*entry));
+ if (ret < 0)
+ return ret;
+
+ if (channel >= data->temps.entries)
+ return -ENXIO;
+
+ entry = (struct thermal_sensor_entry *)data->temps.obj->package.elements[1].buffer.pointer;
+ switch (entry[channel].type) {
+ case 0x00:
+ *str = "CPU";
+ break;
+ case 0x11:
+ *str = "Video";
+ break;
+ case 0x22:
+ *str = "Memory"; /* sometimes called DIMM */
+ break;
+ case 0x33:
+ *str = "Other";
+ break;
+ case 0x44:
+ *str = "Ambient"; /* sometimes called SKIN */
+ break;
+ case 0x52:
+ *str = "SODIMM";
+ break;
+ case 0x55:
+ *str = "HDD";
+ break;
+ case 0x62:
+ *str = "SODIMM 2";
+ break;
+ case 0x73:
+ *str = "NB";
+ break;
+ case 0x83:
+ *str = "Charger";
+ break;
+ case 0xbb:
+ *str = "Memory 3";
+ break;
+ default:
+ *str = "Unknown";
+ break;
+ }
+
+ return 0;
+}
+
+static int dell_wmi_ddv_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ struct dell_wmi_ddv_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_label:
+ mutex_lock(&data->fans.lock);
+ ret = dell_wmi_ddv_fan_read_string(data, channel, str);
+ mutex_unlock(&data->fans.lock);
+ return ret;
+ default:
+ break;
+ }
+ break;
+ case hwmon_temp:
+ switch (attr) {
+ case hwmon_temp_label:
+ mutex_lock(&data->temps.lock);
+ ret = dell_wmi_ddv_temp_read_string(data, channel, str);
+ mutex_unlock(&data->temps.lock);
+ return ret;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops dell_wmi_ddv_ops = {
+ .is_visible = dell_wmi_ddv_is_visible,
+ .read = dell_wmi_ddv_read,
+ .read_string = dell_wmi_ddv_read_string,
+};
+
+static struct hwmon_channel_info *dell_wmi_ddv_channel_create(struct device *dev, u64 count,
+ enum hwmon_sensor_types type,
+ u32 config)
+{
+ struct combined_channel_info *cinfo;
+ int i;
+
+ cinfo = devm_kzalloc(dev, struct_size(cinfo, config, count + 1), GFP_KERNEL);
+ if (!cinfo)
+ return ERR_PTR(-ENOMEM);
+
+ cinfo->info.type = type;
+ cinfo->info.config = cinfo->config;
+
+ for (i = 0; i < count; i++)
+ cinfo->config[i] = config;
+
+ return &cinfo->info;
+}
+
+static void dell_wmi_ddv_hwmon_cache_invalidate(struct dell_wmi_ddv_sensors *sensors)
+{
+ mutex_lock(&sensors->lock);
+ kfree(sensors->obj);
+ sensors->obj = NULL;
+ mutex_unlock(&sensors->lock);
+}
+
+static void dell_wmi_ddv_hwmon_cache_destroy(void *data)
+{
+ struct dell_wmi_ddv_sensors *sensors = data;
+
+ mutex_destroy(&sensors->lock);
+ kfree(sensors->obj);
+}
+
+static struct hwmon_channel_info *dell_wmi_ddv_channel_init(struct wmi_device *wdev,
+ enum dell_ddv_method method,
+ struct dell_wmi_ddv_sensors *sensors,
+ size_t entry_size,
+ enum hwmon_sensor_types type,
+ u32 config)
+{
+ struct hwmon_channel_info *info;
+ int ret;
+
+ ret = dell_wmi_ddv_update_sensors(wdev, method, sensors, entry_size);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ mutex_init(&sensors->lock);
+
+ ret = devm_add_action_or_reset(&wdev->dev, dell_wmi_ddv_hwmon_cache_destroy, sensors);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ info = dell_wmi_ddv_channel_create(&wdev->dev, sensors->entries, type, config);
+ if (IS_ERR(info))
+ devm_release_action(&wdev->dev, dell_wmi_ddv_hwmon_cache_destroy, sensors);
+
+ return info;
+}
+
+static int dell_wmi_ddv_hwmon_add(struct dell_wmi_ddv_data *data)
+{
+ struct wmi_device *wdev = data->wdev;
+ struct combined_chip_info *cinfo;
+ struct hwmon_channel_info *info;
+ struct device *hdev;
+ int index = 0;
+ int ret;
+
+ if (!devres_open_group(&wdev->dev, dell_wmi_ddv_hwmon_add, GFP_KERNEL))
+ return -ENOMEM;
+
+ cinfo = devm_kzalloc(&wdev->dev, struct_size(cinfo, info, 4), GFP_KERNEL);
+ if (!cinfo) {
+ ret = -ENOMEM;
+
+ goto err_release;
+ }
+
+ cinfo->chip.ops = &dell_wmi_ddv_ops;
+ cinfo->chip.info = cinfo->info;
+
+ info = dell_wmi_ddv_channel_create(&wdev->dev, 1, hwmon_chip, HWMON_C_REGISTER_TZ);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+
+ goto err_release;
+ }
+
+ cinfo->info[index] = info;
+ index++;
+
+ info = dell_wmi_ddv_channel_init(wdev, DELL_DDV_FAN_SENSOR_INFORMATION, &data->fans,
+ sizeof(struct fan_sensor_entry), hwmon_fan,
+ (HWMON_F_INPUT | HWMON_F_LABEL));
+ if (!IS_ERR(info)) {
+ cinfo->info[index] = info;
+ index++;
+ }
+
+ info = dell_wmi_ddv_channel_init(wdev, DELL_DDV_THERMAL_SENSOR_INFORMATION, &data->temps,
+ sizeof(struct thermal_sensor_entry), hwmon_temp,
+ (HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX |
+ HWMON_T_LABEL));
+ if (!IS_ERR(info)) {
+ cinfo->info[index] = info;
+ index++;
+ }
+
+ if (index < 2) {
+ ret = -ENODEV;
+
+ goto err_release;
+ }
+
+ hdev = devm_hwmon_device_register_with_info(&wdev->dev, "dell_ddv", data, &cinfo->chip,
+ NULL);
+ if (IS_ERR(hdev)) {
+ ret = PTR_ERR(hdev);
+
+ goto err_release;
+ }
+
+ devres_close_group(&wdev->dev, dell_wmi_ddv_hwmon_add);
+
+ return 0;
+
+err_release:
+ devres_release_group(&wdev->dev, dell_wmi_ddv_hwmon_add);
+
+ return ret;
+}
+
static int dell_wmi_ddv_battery_index(struct acpi_device *acpi_dev, u32 *index)
{
const char *uid_str;
@@ -340,8 +816,13 @@ static int dell_wmi_ddv_probe(struct wmi_device *wdev, const void *context)
return ret;
dev_dbg(&wdev->dev, "WMI interface version: %d\n", version);
- if (version != DELL_DDV_SUPPORTED_INTERFACE)
- return -ENODEV;
+ if (version < DELL_DDV_SUPPORTED_VERSION_MIN || version > DELL_DDV_SUPPORTED_VERSION_MAX) {
+ if (!force)
+ return -ENODEV;
+
+ dev_warn(&wdev->dev, "Loading despite unsupported WMI interface version (%u)\n",
+ version);
+ }
data = devm_kzalloc(&wdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -352,9 +833,34 @@ static int dell_wmi_ddv_probe(struct wmi_device *wdev, const void *context)
dell_wmi_ddv_debugfs_init(wdev);
- return dell_wmi_ddv_battery_add(data);
+ if (IS_REACHABLE(CONFIG_ACPI_BATTERY)) {
+ ret = dell_wmi_ddv_battery_add(data);
+ if (ret < 0 && ret != -ENODEV)
+ dev_warn(&wdev->dev, "Unable to register ACPI battery hook: %d\n", ret);
+ }
+
+ if (IS_REACHABLE(CONFIG_HWMON)) {
+ ret = dell_wmi_ddv_hwmon_add(data);
+ if (ret < 0 && ret != -ENODEV)
+ dev_warn(&wdev->dev, "Unable to register hwmon interface: %d\n", ret);
+ }
+
+ return 0;
}
+static int dell_wmi_ddv_resume(struct device *dev)
+{
+ struct dell_wmi_ddv_data *data = dev_get_drvdata(dev);
+
+ /* Force re-reading of all sensors */
+ dell_wmi_ddv_hwmon_cache_invalidate(&data->fans);
+ dell_wmi_ddv_hwmon_cache_invalidate(&data->temps);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(dell_wmi_ddv_dev_pm_ops, NULL, dell_wmi_ddv_resume);
+
static const struct wmi_device_id dell_wmi_ddv_id_table[] = {
{ DELL_DDV_GUID, NULL },
{ }
@@ -364,6 +870,8 @@ MODULE_DEVICE_TABLE(wmi, dell_wmi_ddv_id_table);
static struct wmi_driver dell_wmi_ddv_driver = {
.driver = {
.name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .pm = pm_sleep_ptr(&dell_wmi_ddv_dev_pm_ops),
},
.id_table = dell_wmi_ddv_id_table,
.probe = dell_wmi_ddv_probe,
diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
index 0a6411a8a104..0285b47d99d1 100644
--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
@@ -255,7 +255,7 @@ static void attr_name_release(struct kobject *kobj)
kfree(kobj);
}
-static struct kobj_type attr_name_ktype = {
+static const struct kobj_type attr_name_ktype = {
.release = attr_name_release,
.sysfs_ops = &wmi_sysman_kobj_sysfs_ops,
};
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index 2ef201b625b3..873f59c3e280 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -217,6 +217,8 @@ static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x213b, { KEY_INFO } },
{ KE_KEY, 0x2169, { KEY_ROTATE_DISPLAY } },
{ KE_KEY, 0x216a, { KEY_SETUP } },
+ { KE_IGNORE, 0x21a4, }, /* Win Lock On */
+ { KE_IGNORE, 0x121a4, }, /* Win Lock Off */
{ KE_KEY, 0x21a5, { KEY_PROG2 } }, /* HP Omen Key */
{ KE_KEY, 0x21a7, { KEY_FN_ESC } },
{ KE_KEY, 0x21a9, { KEY_TOUCHPAD_OFF } },
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index d5a33473e838..bbbd9e54e9ee 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -182,6 +182,19 @@ config INTEL_SMARTCONNECT
This driver checks to determine whether the device has Intel Smart
Connect enabled, and if so disables it.
+config INTEL_TPMI
+ tristate "Intel Topology Aware Register and PM Capsule Interface (TPMI)"
+ depends on INTEL_VSEC
+ depends on X86_64
+ help
+ The Intel Topology Aware Register and PM Capsule Interface (TPMI),
+ provides enumerable MMIO interface for power management features.
+ This driver creates devices, so that other PM feature driver can
+ be loaded for PM specific feature operation.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_vsec_tpmi.
+
config INTEL_TURBO_MAX_3
bool "Intel Turbo Boost Max Technology 3.0 enumeration driver"
depends on X86_64 && SCHED_MC_PRIO
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
index 717933dd0cfd..411df4040427 100644
--- a/drivers/platform/x86/intel/Makefile
+++ b/drivers/platform/x86/intel/Makefile
@@ -47,6 +47,10 @@ obj-$(CONFIG_INTEL_MRFLD_PWRBTN) += intel_mrfld_pwrbtn.o
intel_punit_ipc-y := punit_ipc.o
obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
+# TPMI drivers
+intel_vsec_tpmi-y := tpmi.o
+obj-$(CONFIG_INTEL_TPMI) += intel_vsec_tpmi.o
+
# Intel Uncore drivers
intel-rst-y := rst.o
obj-$(CONFIG_INTEL_RST) += intel-rst.o
diff --git a/drivers/platform/x86/intel/int1092/intel_sar.c b/drivers/platform/x86/intel/int1092/intel_sar.c
index e03943e6380a..352fc4596494 100644
--- a/drivers/platform/x86/intel/int1092/intel_sar.c
+++ b/drivers/platform/x86/intel/int1092/intel_sar.c
@@ -131,16 +131,15 @@ static acpi_status sar_get_device_mode(struct platform_device *device)
acpi_status status = AE_OK;
union acpi_object *out;
u32 rev = 0;
- int value;
- out = acpi_evaluate_dsm(context->handle, &context->guid, rev,
- COMMAND_ID_DEV_MODE, NULL);
- if (get_int_value(out, &value)) {
+ out = acpi_evaluate_dsm_typed(context->handle, &context->guid, rev,
+ COMMAND_ID_DEV_MODE, NULL, ACPI_TYPE_INTEGER);
+ if (!out) {
dev_err(&device->dev, "DSM cmd:%d Failed to retrieve value\n", COMMAND_ID_DEV_MODE);
status = AE_ERROR;
goto dev_mode_error;
}
- context->sar_data.device_mode = value;
+ context->sar_data.device_mode = out->integer.value;
update_sar_data(context);
sysfs_notify(&device->dev.kobj, NULL, SYSFS_DATANAME);
@@ -221,11 +220,11 @@ static void sar_get_data(int reg, struct wwan_sar_context *context)
req.type = ACPI_TYPE_INTEGER;
req.integer.value = reg;
- out = acpi_evaluate_dsm(context->handle, &context->guid, rev,
- COMMAND_ID_CONFIG_TABLE, &req);
+ out = acpi_evaluate_dsm_typed(context->handle, &context->guid, rev,
+ COMMAND_ID_CONFIG_TABLE, &req, ACPI_TYPE_PACKAGE);
if (!out)
return;
- if (out->type == ACPI_TYPE_PACKAGE && out->package.count >= 3 &&
+ if (out->package.count >= 3 &&
out->package.elements[0].type == ACPI_TYPE_INTEGER &&
out->package.elements[1].type == ACPI_TYPE_INTEGER &&
out->package.elements[2].type == ACPI_TYPE_PACKAGE &&
diff --git a/drivers/platform/x86/intel/int3472/Kconfig b/drivers/platform/x86/intel/int3472/Kconfig
index 62e5d4cf9ee5..17ae997f93ea 100644
--- a/drivers/platform/x86/intel/int3472/Kconfig
+++ b/drivers/platform/x86/intel/int3472/Kconfig
@@ -4,6 +4,7 @@ config INTEL_SKL_INT3472
depends on COMMON_CLK
depends on I2C
depends on GPIOLIB
+ depends on LEDS_CLASS
depends on REGULATOR
select MFD_CORE
select REGMAP_I2C
diff --git a/drivers/platform/x86/intel/int3472/Makefile b/drivers/platform/x86/intel/int3472/Makefile
index cfec7784c5c9..9f16cb514397 100644
--- a/drivers/platform/x86/intel/int3472/Makefile
+++ b/drivers/platform/x86/intel/int3472/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_INTEL_SKL_INT3472) += intel_skl_int3472_discrete.o \
intel_skl_int3472_tps68470.o
-intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o common.o
+intel_skl_int3472_discrete-y := discrete.o clk_and_regulator.o led.o common.o
intel_skl_int3472_tps68470-y := tps68470.o tps68470_board_data.o common.o
diff --git a/drivers/platform/x86/intel/int3472/clk_and_regulator.c b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
index 74dc2cff799e..1086c3d83494 100644
--- a/drivers/platform/x86/intel/int3472/clk_and_regulator.c
+++ b/drivers/platform/x86/intel/int3472/clk_and_regulator.c
@@ -23,8 +23,6 @@ static int skl_int3472_clk_prepare(struct clk_hw *hw)
struct int3472_gpio_clock *clk = to_int3472_clk(hw);
gpiod_set_value_cansleep(clk->ena_gpio, 1);
- gpiod_set_value_cansleep(clk->led_gpio, 1);
-
return 0;
}
@@ -33,7 +31,6 @@ static void skl_int3472_clk_unprepare(struct clk_hw *hw)
struct int3472_gpio_clock *clk = to_int3472_clk(hw);
gpiod_set_value_cansleep(clk->ena_gpio, 0);
- gpiod_set_value_cansleep(clk->led_gpio, 0);
}
static int skl_int3472_clk_enable(struct clk_hw *hw)
@@ -89,18 +86,37 @@ static const struct clk_ops skl_int3472_clock_ops = {
.recalc_rate = skl_int3472_clk_recalc_rate,
};
-int skl_int3472_register_clock(struct int3472_discrete_device *int3472)
+int skl_int3472_register_clock(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio, u32 polarity)
{
+ char *path = agpio->resource_source.string_ptr;
struct clk_init_data init = {
.ops = &skl_int3472_clock_ops,
.flags = CLK_GET_RATE_NOCACHE,
};
int ret;
+ if (int3472->clock.cl)
+ return -EBUSY;
+
+ int3472->clock.ena_gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0],
+ "int3472,clk-enable");
+ if (IS_ERR(int3472->clock.ena_gpio))
+ return dev_err_probe(int3472->dev, PTR_ERR(int3472->clock.ena_gpio),
+ "getting clk-enable GPIO\n");
+
+ if (polarity == GPIO_ACTIVE_LOW)
+ gpiod_toggle_active_low(int3472->clock.ena_gpio);
+
+ /* Ensure the pin is in output mode and non-active state */
+ gpiod_direction_output(int3472->clock.ena_gpio, 0);
+
init.name = kasprintf(GFP_KERNEL, "%s-clk",
acpi_dev_name(int3472->adev));
- if (!init.name)
- return -ENOMEM;
+ if (!init.name) {
+ ret = -ENOMEM;
+ goto out_put_gpio;
+ }
int3472->clock.frequency = skl_int3472_get_clk_frequency(int3472);
@@ -126,14 +142,20 @@ err_unregister_clk:
clk_unregister(int3472->clock.clk);
out_free_init_name:
kfree(init.name);
+out_put_gpio:
+ gpiod_put(int3472->clock.ena_gpio);
return ret;
}
void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472)
{
+ if (!int3472->clock.cl)
+ return;
+
clkdev_drop(int3472->clock.cl);
clk_unregister(int3472->clock.clk);
+ gpiod_put(int3472->clock.ena_gpio);
}
int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
diff --git a/drivers/platform/x86/intel/int3472/common.h b/drivers/platform/x86/intel/int3472/common.h
index 53270d19c73a..61688e450ce5 100644
--- a/drivers/platform/x86/intel/int3472/common.h
+++ b/drivers/platform/x86/intel/int3472/common.h
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/gpio/machine.h>
+#include <linux/leds.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/types.h>
@@ -28,6 +29,8 @@
#define GPIO_REGULATOR_NAME_LENGTH 21
#define GPIO_REGULATOR_SUPPLY_NAME_LENGTH 9
+#define INT3472_LED_MAX_NAME_LEN 32
+
#define CIO2_SENSOR_SSDB_MCLKSPEED_OFFSET 86
#define INT3472_REGULATOR(_name, _supply, _ops) \
@@ -96,10 +99,16 @@ struct int3472_discrete_device {
struct clk_hw clk_hw;
struct clk_lookup *cl;
struct gpio_desc *ena_gpio;
- struct gpio_desc *led_gpio;
u32 frequency;
} clock;
+ struct int3472_pled {
+ struct led_classdev classdev;
+ struct led_lookup_data lookup;
+ char name[INT3472_LED_MAX_NAME_LEN];
+ struct gpio_desc *gpio;
+ } pled;
+
unsigned int ngpios; /* how many GPIOs have we seen */
unsigned int n_sensor_gpios; /* how many have we mapped to sensor */
struct gpiod_lookup_table gpios;
@@ -112,11 +121,16 @@ int skl_int3472_get_sensor_adev_and_name(struct device *dev,
struct acpi_device **sensor_adev_ret,
const char **name_ret);
-int skl_int3472_register_clock(struct int3472_discrete_device *int3472);
+int skl_int3472_register_clock(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio, u32 polarity);
void skl_int3472_unregister_clock(struct int3472_discrete_device *int3472);
int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio);
void skl_int3472_unregister_regulator(struct int3472_discrete_device *int3472);
+int skl_int3472_register_pled(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio, u32 polarity);
+void skl_int3472_unregister_pled(struct int3472_discrete_device *int3472);
+
#endif
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index c42c3faa2c32..f064da74f50a 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -2,8 +2,6 @@
/* Author: Dan Scally <djrscally@gmail.com> */
#include <linux/acpi.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/machine.h>
@@ -80,14 +78,6 @@ skl_int3472_get_sensor_module_config(struct int3472_discrete_device *int3472)
return ERR_PTR(-ENODEV);
}
- if (obj->string.type != ACPI_TYPE_STRING) {
- dev_err(int3472->dev,
- "Sensor _DSM returned a non-string value\n");
-
- ACPI_FREE(obj);
- return ERR_PTR(-EINVAL);
- }
-
for (i = 0; i < ARRAY_SIZE(int3472_sensor_configs); i++) {
if (!strcmp(int3472_sensor_configs[i].sensor_module_name,
obj->string.pointer))
@@ -154,38 +144,34 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
return 0;
}
-static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472,
- struct acpi_resource_gpio *agpio, u8 type)
+static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polarity)
{
- char *path = agpio->resource_source.string_ptr;
- u16 pin = agpio->pin_table[0];
- struct gpio_desc *gpio;
-
switch (type) {
+ case INT3472_GPIO_TYPE_RESET:
+ *func = "reset";
+ *polarity = GPIO_ACTIVE_LOW;
+ break;
+ case INT3472_GPIO_TYPE_POWERDOWN:
+ *func = "powerdown";
+ *polarity = GPIO_ACTIVE_LOW;
+ break;
case INT3472_GPIO_TYPE_CLK_ENABLE:
- gpio = acpi_get_and_request_gpiod(path, pin, "int3472,clk-enable");
- if (IS_ERR(gpio))
- return (PTR_ERR(gpio));
-
- int3472->clock.ena_gpio = gpio;
- /* Ensure the pin is in output mode and non-active state */
- gpiod_direction_output(int3472->clock.ena_gpio, 0);
+ *func = "clk-enable";
+ *polarity = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_PRIVACY_LED:
- gpio = acpi_get_and_request_gpiod(path, pin, "int3472,privacy-led");
- if (IS_ERR(gpio))
- return (PTR_ERR(gpio));
-
- int3472->clock.led_gpio = gpio;
- /* Ensure the pin is in output mode and non-active state */
- gpiod_direction_output(int3472->clock.led_gpio, 0);
+ *func = "privacy-led";
+ *polarity = GPIO_ACTIVE_HIGH;
+ break;
+ case INT3472_GPIO_TYPE_POWER_ENABLE:
+ *func = "power-enable";
+ *polarity = GPIO_ACTIVE_HIGH;
break;
default:
- dev_err(int3472->dev, "Invalid GPIO type 0x%02x for clock\n", type);
+ *func = "unknown";
+ *polarity = GPIO_ACTIVE_HIGH;
break;
}
-
- return 0;
}
/**
@@ -226,9 +212,11 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
struct int3472_discrete_device *int3472 = data;
struct acpi_resource_gpio *agpio;
union acpi_object *obj;
+ u8 active_value, type;
const char *err_msg;
+ const char *func;
+ u32 polarity;
int ret;
- u8 type;
if (!acpi_gpio_get_io_resource(ares, &agpio))
return 1;
@@ -250,26 +238,35 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
type = obj->integer.value & 0xff;
+ int3472_get_func_and_polarity(type, &func, &polarity);
+
+ /* If bits 31-24 of the _DSM entry are all 0 then the signal is inverted */
+ active_value = obj->integer.value >> 24;
+ if (!active_value)
+ polarity ^= GPIO_ACTIVE_LOW;
+
+ dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func,
+ agpio->resource_source.string_ptr, agpio->pin_table[0],
+ (polarity == GPIO_ACTIVE_HIGH) ? "high" : "low");
+
switch (type) {
case INT3472_GPIO_TYPE_RESET:
- ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "reset",
- GPIO_ACTIVE_LOW);
+ case INT3472_GPIO_TYPE_POWERDOWN:
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, polarity);
if (ret)
- err_msg = "Failed to map reset pin to sensor\n";
+ err_msg = "Failed to map GPIO pin to sensor\n";
break;
- case INT3472_GPIO_TYPE_POWERDOWN:
- ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, "powerdown",
- GPIO_ACTIVE_LOW);
+ case INT3472_GPIO_TYPE_CLK_ENABLE:
+ ret = skl_int3472_register_clock(int3472, agpio, polarity);
if (ret)
- err_msg = "Failed to map powerdown pin to sensor\n";
+ err_msg = "Failed to register clock\n";
break;
- case INT3472_GPIO_TYPE_CLK_ENABLE:
case INT3472_GPIO_TYPE_PRIVACY_LED:
- ret = skl_int3472_map_gpio_to_clk(int3472, agpio, type);
+ ret = skl_int3472_register_pled(int3472, agpio, polarity);
if (ret)
- err_msg = "Failed to map GPIO to clock\n";
+ err_msg = "Failed to register LED\n";
break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
@@ -314,21 +311,6 @@ static int skl_int3472_parse_crs(struct int3472_discrete_device *int3472)
acpi_dev_free_resource_list(&resource_list);
- /*
- * If we find no clock enable GPIO pin then the privacy LED won't work.
- * We've never seen that situation, but it's possible. Warn the user so
- * it's clear what's happened.
- */
- if (int3472->clock.ena_gpio) {
- ret = skl_int3472_register_clock(int3472);
- if (ret)
- return ret;
- } else {
- if (int3472->clock.led_gpio)
- dev_warn(int3472->dev,
- "No clk GPIO. The privacy LED won't work\n");
- }
-
int3472->gpios.dev_id = int3472->sensor_name;
gpiod_add_lookup_table(&int3472->gpios);
@@ -341,12 +323,8 @@ static int skl_int3472_discrete_remove(struct platform_device *pdev)
gpiod_remove_lookup_table(&int3472->gpios);
- if (int3472->clock.cl)
- skl_int3472_unregister_clock(int3472);
-
- gpiod_put(int3472->clock.ena_gpio);
- gpiod_put(int3472->clock.led_gpio);
-
+ skl_int3472_unregister_clock(int3472);
+ skl_int3472_unregister_pled(int3472);
skl_int3472_unregister_regulator(int3472);
return 0;
diff --git a/drivers/platform/x86/intel/int3472/led.c b/drivers/platform/x86/intel/int3472/led.c
new file mode 100644
index 000000000000..bca1ce7d0d0c
--- /dev/null
+++ b/drivers/platform/x86/intel/int3472/led.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Author: Hans de Goede <hdegoede@redhat.com> */
+
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/leds.h>
+#include "common.h"
+
+static int int3472_pled_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct int3472_discrete_device *int3472 =
+ container_of(led_cdev, struct int3472_discrete_device, pled.classdev);
+
+ gpiod_set_value_cansleep(int3472->pled.gpio, brightness);
+ return 0;
+}
+
+int skl_int3472_register_pled(struct int3472_discrete_device *int3472,
+ struct acpi_resource_gpio *agpio, u32 polarity)
+{
+ char *p, *path = agpio->resource_source.string_ptr;
+ int ret;
+
+ if (int3472->pled.classdev.dev)
+ return -EBUSY;
+
+ int3472->pled.gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0],
+ "int3472,privacy-led");
+ if (IS_ERR(int3472->pled.gpio))
+ return dev_err_probe(int3472->dev, PTR_ERR(int3472->pled.gpio),
+ "getting privacy LED GPIO\n");
+
+ if (polarity == GPIO_ACTIVE_LOW)
+ gpiod_toggle_active_low(int3472->pled.gpio);
+
+ /* Ensure the pin is in output mode and non-active state */
+ gpiod_direction_output(int3472->pled.gpio, 0);
+
+ /* Generate the name, replacing the ':' in the ACPI devname with '_' */
+ snprintf(int3472->pled.name, sizeof(int3472->pled.name),
+ "%s::privacy_led", acpi_dev_name(int3472->sensor));
+ p = strchr(int3472->pled.name, ':');
+ if (p)
+ *p = '_';
+
+ int3472->pled.classdev.name = int3472->pled.name;
+ int3472->pled.classdev.max_brightness = 1;
+ int3472->pled.classdev.brightness_set_blocking = int3472_pled_set;
+
+ ret = led_classdev_register(int3472->dev, &int3472->pled.classdev);
+ if (ret)
+ goto err_free_gpio;
+
+ int3472->pled.lookup.provider = int3472->pled.name;
+ int3472->pled.lookup.dev_id = int3472->sensor_name;
+ int3472->pled.lookup.con_id = "privacy-led";
+ led_add_lookup(&int3472->pled.lookup);
+
+ return 0;
+
+err_free_gpio:
+ gpiod_put(int3472->pled.gpio);
+ return ret;
+}
+
+void skl_int3472_unregister_pled(struct int3472_discrete_device *int3472)
+{
+ if (IS_ERR_OR_NULL(int3472->pled.classdev.dev))
+ return;
+
+ led_remove_lookup(&int3472->pled.lookup);
+ led_classdev_unregister(&int3472->pled.classdev);
+ gpiod_put(int3472->pled.gpio);
+}
diff --git a/drivers/platform/x86/intel/oaktrail.c b/drivers/platform/x86/intel/oaktrail.c
index 7c5c623630c1..fa720967e69b 100644
--- a/drivers/platform/x86/intel/oaktrail.c
+++ b/drivers/platform/x86/intel/oaktrail.c
@@ -266,17 +266,11 @@ static int oaktrail_probe(struct platform_device *pdev)
return 0;
}
-static int oaktrail_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static struct platform_driver oaktrail_driver = {
.driver = {
.name = DRIVER_NAME,
},
.probe = oaktrail_probe,
- .remove = oaktrail_remove,
};
static int dmi_check_cb(const struct dmi_system_id *id)
diff --git a/drivers/platform/x86/intel/pmc/tgl.c b/drivers/platform/x86/intel/pmc/tgl.c
index e3e50538465d..c245ada849d0 100644
--- a/drivers/platform/x86/intel/pmc/tgl.c
+++ b/drivers/platform/x86/intel/pmc/tgl.c
@@ -221,9 +221,9 @@ void pmc_core_get_tgl_lpm_reqs(struct platform_device *pdev)
guid_parse(ACPI_S0IX_DSM_UUID, &s0ix_dsm_guid);
- out_obj = acpi_evaluate_dsm(adev->handle, &s0ix_dsm_guid, 0,
- ACPI_GET_LOW_MODE_REGISTERS, NULL);
- if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) {
+ out_obj = acpi_evaluate_dsm_typed(adev->handle, &s0ix_dsm_guid, 0,
+ ACPI_GET_LOW_MODE_REGISTERS, NULL, ACPI_TYPE_BUFFER);
+ if (out_obj) {
u32 size = out_obj->buffer.length;
if (size != lpm_size) {
diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
index 66bb39fd0ef9..cd0ba84cc8e4 100644
--- a/drivers/platform/x86/intel/punit_ipc.c
+++ b/drivers/platform/x86/intel/punit_ipc.c
@@ -302,11 +302,6 @@ static int intel_punit_ipc_probe(struct platform_device *pdev)
return 0;
}
-static int intel_punit_ipc_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct acpi_device_id punit_ipc_acpi_ids[] = {
{ "INT34D4", 0 },
{ }
@@ -315,7 +310,6 @@ MODULE_DEVICE_TABLE(acpi, punit_ipc_acpi_ids);
static struct platform_driver intel_punit_ipc_driver = {
.probe = intel_punit_ipc_probe,
- .remove = intel_punit_ipc_remove,
.driver = {
.name = "intel_punit_ipc",
.acpi_match_table = punit_ipc_acpi_ids,
diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c
new file mode 100644
index 000000000000..c60733261c89
--- /dev/null
+++ b/drivers/platform/x86/intel/tpmi.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * intel-tpmi : Driver to enumerate TPMI features and create devices
+ *
+ * Copyright (c) 2023, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * The TPMI (Topology Aware Register and PM Capsule Interface) provides a
+ * flexible, extendable and PCIe enumerable MMIO interface for PM features.
+ *
+ * For example Intel RAPL (Running Average Power Limit) provides a MMIO
+ * interface using TPMI. This has advantage over traditional MSR
+ * (Model Specific Register) interface, where a thread needs to be scheduled
+ * on the target CPU to read or write. Also the RAPL features vary between
+ * CPU models, and hence lot of model specific code. Here TPMI provides an
+ * architectural interface by providing hierarchical tables and fields,
+ * which will not need any model specific implementation.
+ *
+ * The TPMI interface uses a PCI VSEC structure to expose the location of
+ * MMIO region.
+ *
+ * This VSEC structure is present in the PCI configuration space of the
+ * Intel Out-of-Band (OOB) device, which is handled by the Intel VSEC
+ * driver. The Intel VSEC driver parses VSEC structures present in the PCI
+ * configuration space of the given device and creates an auxiliary device
+ * object for each of them. In particular, it creates an auxiliary device
+ * object representing TPMI that can be bound by an auxiliary driver.
+ *
+ * This TPMI driver will bind to the TPMI auxiliary device object created
+ * by the Intel VSEC driver.
+ *
+ * The TPMI specification defines a PFS (PM Feature Structure) table.
+ * This table is present in the TPMI MMIO region. The starting address
+ * of PFS is derived from the tBIR (Bar Indicator Register) and "Address"
+ * field from the VSEC header.
+ *
+ * Each TPMI PM feature has one entry in the PFS with a unique TPMI
+ * ID and its access details. The TPMI driver creates device nodes
+ * for the supported PM features.
+ *
+ * The names of the devices created by the TPMI driver start with the
+ * "intel_vsec.tpmi-" prefix which is followed by a specific name of the
+ * given PM feature (for example, "intel_vsec.tpmi-rapl.0").
+ *
+ * The device nodes are create by using interface "intel_vsec_add_aux()"
+ * provided by the Intel VSEC driver.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/intel_tpmi.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "vsec.h"
+
+/**
+ * struct intel_tpmi_pfs_entry - TPMI PM Feature Structure (PFS) entry
+ * @tpmi_id: TPMI feature identifier (what the feature is and its data format).
+ * @num_entries: Number of feature interface instances present in the PFS.
+ * This represents the maximum number of Power domains in the SoC.
+ * @entry_size: Interface instance entry size in 32-bit words.
+ * @cap_offset: Offset from the PM_Features base address to the base of the PM VSEC
+ * register bank in KB.
+ * @attribute: Feature attribute: 0=BIOS. 1=OS. 2-3=Reserved.
+ * @reserved: Bits for use in the future.
+ *
+ * Represents one TPMI feature entry data in the PFS retrieved as is
+ * from the hardware.
+ */
+struct intel_tpmi_pfs_entry {
+ u64 tpmi_id:8;
+ u64 num_entries:8;
+ u64 entry_size:16;
+ u64 cap_offset:16;
+ u64 attribute:2;
+ u64 reserved:14;
+} __packed;
+
+/**
+ * struct intel_tpmi_pm_feature - TPMI PM Feature information for a TPMI ID
+ * @pfs_header: PFS header retireved from the hardware.
+ * @vsec_offset: Starting MMIO address for this feature in bytes. Essentially
+ * this offset = "Address" from VSEC header + PFS Capability
+ * offset for this feature entry.
+ *
+ * Represents TPMI instance information for one TPMI ID.
+ */
+struct intel_tpmi_pm_feature {
+ struct intel_tpmi_pfs_entry pfs_header;
+ unsigned int vsec_offset;
+};
+
+/**
+ * struct intel_tpmi_info - TPMI information for all IDs in an instance
+ * @tpmi_features: Pointer to a list of TPMI feature instances
+ * @vsec_dev: Pointer to intel_vsec_device structure for this TPMI device
+ * @feature_count: Number of TPMI of TPMI instances pointed by tpmi_features
+ * @pfs_start: Start of PFS offset for the TPMI instances in this device
+ * @plat_info: Stores platform info which can be used by the client drivers
+ *
+ * Stores the information for all TPMI devices enumerated from a single PCI device.
+ */
+struct intel_tpmi_info {
+ struct intel_tpmi_pm_feature *tpmi_features;
+ struct intel_vsec_device *vsec_dev;
+ int feature_count;
+ u64 pfs_start;
+ struct intel_tpmi_plat_info plat_info;
+};
+
+/**
+ * struct tpmi_info_header - CPU package ID to PCI device mapping information
+ * @fn: PCI function number
+ * @dev: PCI device number
+ * @bus: PCI bus number
+ * @pkg: CPU Package id
+ * @reserved: Reserved for future use
+ * @lock: When set to 1 the register is locked and becomes read-only
+ * until next reset. Not for use by the OS driver.
+ *
+ * The structure to read hardware provided mapping information.
+ */
+struct tpmi_info_header {
+ u64 fn:3;
+ u64 dev:5;
+ u64 bus:8;
+ u64 pkg:8;
+ u64 reserved:39;
+ u64 lock:1;
+} __packed;
+
+/*
+ * List of supported TMPI IDs.
+ * Some TMPI IDs are not used by Linux, so the numbers are not consecutive.
+ */
+enum intel_tpmi_id {
+ TPMI_ID_RAPL = 0, /* Running Average Power Limit */
+ TPMI_ID_PEM = 1, /* Power and Perf excursion Monitor */
+ TPMI_ID_UNCORE = 2, /* Uncore Frequency Scaling */
+ TPMI_ID_SST = 5, /* Speed Select Technology */
+ TPMI_INFO_ID = 0x81, /* Special ID for PCI BDF and Package ID information */
+};
+
+/* Used during auxbus device creation */
+static DEFINE_IDA(intel_vsec_tpmi_ida);
+
+struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *auxdev)
+{
+ struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
+
+ return vsec_dev->priv_data;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_platform_data, INTEL_TPMI);
+
+int tpmi_get_resource_count(struct auxiliary_device *auxdev)
+{
+ struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
+
+ if (vsec_dev)
+ return vsec_dev->num_resources;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_count, INTEL_TPMI);
+
+struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index)
+{
+ struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
+
+ if (vsec_dev && index < vsec_dev->num_resources)
+ return &vsec_dev->resource[index];
+
+ return NULL;
+}
+EXPORT_SYMBOL_NS_GPL(tpmi_get_resource_at_index, INTEL_TPMI);
+
+static const char *intel_tpmi_name(enum intel_tpmi_id id)
+{
+ switch (id) {
+ case TPMI_ID_RAPL:
+ return "rapl";
+ case TPMI_ID_PEM:
+ return "pem";
+ case TPMI_ID_UNCORE:
+ return "uncore";
+ case TPMI_ID_SST:
+ return "sst";
+ default:
+ return NULL;
+ }
+}
+
+/* String Length for tpmi-"feature_name(upto 8 bytes)" */
+#define TPMI_FEATURE_NAME_LEN 14
+
+static int tpmi_create_device(struct intel_tpmi_info *tpmi_info,
+ struct intel_tpmi_pm_feature *pfs,
+ u64 pfs_start)
+{
+ struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev;
+ char feature_id_name[TPMI_FEATURE_NAME_LEN];
+ struct intel_vsec_device *feature_vsec_dev;
+ struct resource *res, *tmp;
+ const char *name;
+ int ret, i;
+
+ name = intel_tpmi_name(pfs->pfs_header.tpmi_id);
+ if (!name)
+ return -EOPNOTSUPP;
+
+ feature_vsec_dev = kzalloc(sizeof(*feature_vsec_dev), GFP_KERNEL);
+ if (!feature_vsec_dev)
+ return -ENOMEM;
+
+ res = kcalloc(pfs->pfs_header.num_entries, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ ret = -ENOMEM;
+ goto free_vsec;
+ }
+
+ snprintf(feature_id_name, sizeof(feature_id_name), "tpmi-%s", name);
+
+ for (i = 0, tmp = res; i < pfs->pfs_header.num_entries; i++, tmp++) {
+ u64 entry_size_bytes = pfs->pfs_header.entry_size * 4;
+
+ tmp->start = pfs->vsec_offset + entry_size_bytes * i;
+ tmp->end = tmp->start + entry_size_bytes - 1;
+ tmp->flags = IORESOURCE_MEM;
+ }
+
+ feature_vsec_dev->pcidev = vsec_dev->pcidev;
+ feature_vsec_dev->resource = res;
+ feature_vsec_dev->num_resources = pfs->pfs_header.num_entries;
+ feature_vsec_dev->priv_data = &tpmi_info->plat_info;
+ feature_vsec_dev->priv_data_size = sizeof(tpmi_info->plat_info);
+ feature_vsec_dev->ida = &intel_vsec_tpmi_ida;
+
+ /*
+ * intel_vsec_add_aux() is resource managed, no explicit
+ * delete is required on error or on module unload.
+ */
+ ret = intel_vsec_add_aux(vsec_dev->pcidev, &vsec_dev->auxdev.dev,
+ feature_vsec_dev, feature_id_name);
+ if (ret)
+ goto free_res;
+
+ return 0;
+
+free_res:
+ kfree(res);
+free_vsec:
+ kfree(feature_vsec_dev);
+
+ return ret;
+}
+
+static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info)
+{
+ struct intel_vsec_device *vsec_dev = tpmi_info->vsec_dev;
+ int ret, i;
+
+ for (i = 0; i < vsec_dev->num_resources; i++) {
+ ret = tpmi_create_device(tpmi_info, &tpmi_info->tpmi_features[i],
+ tpmi_info->pfs_start);
+ /*
+ * Fail, if the supported features fails to create device,
+ * otherwise, continue. Even if one device failed to create,
+ * fail the loading of driver. Since intel_vsec_add_aux()
+ * is resource managed, no clean up is required for the
+ * successfully created devices.
+ */
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+ }
+
+ return 0;
+}
+
+#define TPMI_INFO_BUS_INFO_OFFSET 0x08
+
+static int tpmi_process_info(struct intel_tpmi_info *tpmi_info,
+ struct intel_tpmi_pm_feature *pfs)
+{
+ struct tpmi_info_header header;
+ void __iomem *info_mem;
+
+ info_mem = ioremap(pfs->vsec_offset + TPMI_INFO_BUS_INFO_OFFSET,
+ pfs->pfs_header.entry_size * 4 - TPMI_INFO_BUS_INFO_OFFSET);
+ if (!info_mem)
+ return -ENOMEM;
+
+ memcpy_fromio(&header, info_mem, sizeof(header));
+
+ tpmi_info->plat_info.package_id = header.pkg;
+ tpmi_info->plat_info.bus_number = header.bus;
+ tpmi_info->plat_info.device_number = header.dev;
+ tpmi_info->plat_info.function_number = header.fn;
+
+ iounmap(info_mem);
+
+ return 0;
+}
+
+static int tpmi_fetch_pfs_header(struct intel_tpmi_pm_feature *pfs, u64 start, int size)
+{
+ void __iomem *pfs_mem;
+
+ pfs_mem = ioremap(start, size);
+ if (!pfs_mem)
+ return -ENOMEM;
+
+ memcpy_fromio(&pfs->pfs_header, pfs_mem, sizeof(pfs->pfs_header));
+
+ iounmap(pfs_mem);
+
+ return 0;
+}
+
+static int intel_vsec_tpmi_init(struct auxiliary_device *auxdev)
+{
+ struct intel_vsec_device *vsec_dev = auxdev_to_ivdev(auxdev);
+ struct pci_dev *pci_dev = vsec_dev->pcidev;
+ struct intel_tpmi_info *tpmi_info;
+ u64 pfs_start = 0;
+ int i;
+
+ tpmi_info = devm_kzalloc(&auxdev->dev, sizeof(*tpmi_info), GFP_KERNEL);
+ if (!tpmi_info)
+ return -ENOMEM;
+
+ tpmi_info->vsec_dev = vsec_dev;
+ tpmi_info->feature_count = vsec_dev->num_resources;
+ tpmi_info->plat_info.bus_number = pci_dev->bus->number;
+
+ tpmi_info->tpmi_features = devm_kcalloc(&auxdev->dev, vsec_dev->num_resources,
+ sizeof(*tpmi_info->tpmi_features),
+ GFP_KERNEL);
+ if (!tpmi_info->tpmi_features)
+ return -ENOMEM;
+
+ for (i = 0; i < vsec_dev->num_resources; i++) {
+ struct intel_tpmi_pm_feature *pfs;
+ struct resource *res;
+ u64 res_start;
+ int size, ret;
+
+ pfs = &tpmi_info->tpmi_features[i];
+
+ res = &vsec_dev->resource[i];
+ if (!res)
+ continue;
+
+ res_start = res->start;
+ size = resource_size(res);
+ if (size < 0)
+ continue;
+
+ ret = tpmi_fetch_pfs_header(pfs, res_start, size);
+ if (ret)
+ continue;
+
+ if (!pfs_start)
+ pfs_start = res_start;
+
+ pfs->pfs_header.cap_offset *= 1024;
+
+ pfs->vsec_offset = pfs_start + pfs->pfs_header.cap_offset;
+
+ /*
+ * Process TPMI_INFO to get PCI device to CPU package ID.
+ * Device nodes for TPMI features are not created in this
+ * for loop. So, the mapping information will be available
+ * when actual device nodes created outside this
+ * loop via tpmi_create_devices().
+ */
+ if (pfs->pfs_header.tpmi_id == TPMI_INFO_ID)
+ tpmi_process_info(tpmi_info, pfs);
+ }
+
+ tpmi_info->pfs_start = pfs_start;
+
+ auxiliary_set_drvdata(auxdev, tpmi_info);
+
+ return tpmi_create_devices(tpmi_info);
+}
+
+static int tpmi_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
+{
+ return intel_vsec_tpmi_init(auxdev);
+}
+
+/*
+ * Remove callback is not needed currently as there is no
+ * cleanup required. All memory allocs are device managed. All
+ * devices created by this modules are also device managed.
+ */
+
+static const struct auxiliary_device_id tpmi_id_table[] = {
+ { .name = "intel_vsec.tpmi" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, tpmi_id_table);
+
+static struct auxiliary_driver tpmi_aux_driver = {
+ .id_table = tpmi_id_table,
+ .probe = tpmi_probe,
+};
+
+module_auxiliary_driver(tpmi_aux_driver);
+
+MODULE_IMPORT_NS(INTEL_VSEC);
+MODULE_DESCRIPTION("Intel TPMI enumeration module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index bb81b8b1f7e9..13decf36c6de 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -64,6 +64,7 @@ enum intel_vsec_id {
VSEC_ID_WATCHER = 3,
VSEC_ID_CRASHLOG = 4,
VSEC_ID_SDSI = 65,
+ VSEC_ID_TPMI = 66,
};
static enum intel_vsec_id intel_vsec_allow_list[] = {
@@ -71,6 +72,7 @@ static enum intel_vsec_id intel_vsec_allow_list[] = {
VSEC_ID_WATCHER,
VSEC_ID_CRASHLOG,
VSEC_ID_SDSI,
+ VSEC_ID_TPMI,
};
static const char *intel_vsec_name(enum intel_vsec_id id)
@@ -88,6 +90,9 @@ static const char *intel_vsec_name(enum intel_vsec_id id)
case VSEC_ID_SDSI:
return "sdsi";
+ case VSEC_ID_TPMI:
+ return "tpmi";
+
default:
return NULL;
}
@@ -124,35 +129,48 @@ static void intel_vsec_remove_aux(void *data)
auxiliary_device_uninit(data);
}
+static DEFINE_MUTEX(vsec_ida_lock);
+
static void intel_vsec_dev_release(struct device *dev)
{
struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev);
+ mutex_lock(&vsec_ida_lock);
ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id);
+ mutex_unlock(&vsec_ida_lock);
+
kfree(intel_vsec_dev->resource);
kfree(intel_vsec_dev);
}
-static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *intel_vsec_dev,
- const char *name)
+int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ struct intel_vsec_device *intel_vsec_dev,
+ const char *name)
{
struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
int ret, id;
+ mutex_lock(&vsec_ida_lock);
ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
+ mutex_unlock(&vsec_ida_lock);
if (ret < 0) {
kfree(intel_vsec_dev);
return ret;
}
+ if (!parent)
+ parent = &pdev->dev;
+
auxdev->id = ret;
auxdev->name = name;
- auxdev->dev.parent = &pdev->dev;
+ auxdev->dev.parent = parent;
auxdev->dev.release = intel_vsec_dev_release;
ret = auxiliary_device_init(auxdev);
if (ret < 0) {
+ mutex_lock(&vsec_ida_lock);
ida_free(intel_vsec_dev->ida, auxdev->id);
+ mutex_unlock(&vsec_ida_lock);
kfree(intel_vsec_dev->resource);
kfree(intel_vsec_dev);
return ret;
@@ -164,7 +182,7 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
return ret;
}
- ret = devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux,
+ ret = devm_add_action_or_reset(parent, intel_vsec_remove_aux,
auxdev);
if (ret < 0)
return ret;
@@ -177,6 +195,7 @@ static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *in
return 0;
}
+EXPORT_SYMBOL_NS_GPL(intel_vsec_add_aux, INTEL_VSEC);
static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
struct intel_vsec_platform_info *info)
@@ -234,7 +253,8 @@ static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *he
else
intel_vsec_dev->ida = &intel_vsec_ida;
- return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id));
+ return intel_vsec_add_aux(pdev, NULL, intel_vsec_dev,
+ intel_vsec_name(header->id));
}
static bool intel_vsec_walk_header(struct pci_dev *pdev,
@@ -408,14 +428,23 @@ static const struct intel_vsec_platform_info dg1_info = {
.quirks = VSEC_QUIRK_NO_DVSEC | VSEC_QUIRK_EARLY_HW,
};
+/* MTL info */
+static const struct intel_vsec_platform_info mtl_info = {
+ .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG,
+};
+
#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
+#define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d
+#define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &(struct intel_vsec_platform_info) {}) },
{ PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
index 3deeb05cf394..ae8fe92c5595 100644
--- a/drivers/platform/x86/intel/vsec.h
+++ b/drivers/platform/x86/intel/vsec.h
@@ -38,8 +38,14 @@ struct intel_vsec_device {
struct ida *ida;
struct intel_vsec_platform_info *info;
int num_resources;
+ void *priv_data;
+ size_t priv_data_size;
};
+int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent,
+ struct intel_vsec_device *intel_vsec_dev,
+ const char *name);
+
static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev)
{
return container_of(dev, struct intel_vsec_device, auxdev.dev);
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 2fac05a17a5c..7b6779cdb134 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/platform_data/i2c-mux-reg.h>
#include <linux/platform_data/mlxreg.h>
+#include <linux/reboot.h>
#include <linux/regmap.h>
#define MLX_PLAT_DEVICE_NAME "mlxplat"
@@ -61,12 +62,19 @@
#define MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET 0x37
#define MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET 0x3a
#define MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET 0x3b
+#define MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET 0x3c
+#define MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET 0x3d
+#define MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET 0x3e
+#define MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET 0x3f
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET 0x40
#define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42
#define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44
#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45
+#define MLXPLAT_CPLD_LPC_REG_BRD_OFFSET 0x47
+#define MLXPLAT_CPLD_LPC_REG_BRD_EVENT_OFFSET 0x48
+#define MLXPLAT_CPLD_LPC_REG_BRD_MASK_OFFSET 0x49
#define MLXPLAT_CPLD_LPC_REG_GWP_OFFSET 0x4a
#define MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET 0x4b
#define MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET 0x4c
@@ -90,6 +98,15 @@
#define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88
#define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89
#define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a
+#define MLXPLAT_CPLD_LPC_REG_EROT_OFFSET 0x91
+#define MLXPLAT_CPLD_LPC_REG_EROT_EVENT_OFFSET 0x92
+#define MLXPLAT_CPLD_LPC_REG_EROT_MASK_OFFSET 0x93
+#define MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET 0x94
+#define MLXPLAT_CPLD_LPC_REG_EROTE_EVENT_OFFSET 0x95
+#define MLXPLAT_CPLD_LPC_REG_EROTE_MASK_OFFSET 0x96
+#define MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET 0x97
+#define MLXPLAT_CPLD_LPC_REG_PWRB_EVENT_OFFSET 0x98
+#define MLXPLAT_CPLD_LPC_REG_PWRB_MASK_OFFSET 0x99
#define MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET 0x9a
#define MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET 0x9b
#define MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET 0x9c
@@ -109,6 +126,8 @@
#define MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET 0xaa
#define MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET 0xab
#define MLXPLAT_CPLD_LPC_REG_LC_PWR_ON 0xb2
+#define MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET 0xc2
+#define MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT 0xc3
#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET 0xc7
#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET 0xc8
#define MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET 0xc9
@@ -119,6 +138,11 @@
#define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1
#define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2
#define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3
+#define MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET 0xd9
+#define MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET 0xdb
+#define MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET 0xda
+#define MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET 0xdc
+#define MLXPLAT_CPLD_LPC_REG_I2C_CH4_OFFSET 0xdd
#define MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET 0xde
#define MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET 0xdf
#define MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET 0xe0
@@ -152,23 +176,19 @@
#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
#define MLXPLAT_CPLD_LPC_REG_CONFIG3_OFFSET 0xfd
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
-#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
-#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
-#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF 0xdc
-#define MLXPLAT_CPLD_LPC_I2C_CH4_OFF 0xdd
#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
- MLXPLAT_CPLD_LPC_I2C_CH1_OFF) | \
+ MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
#define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
- MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \
+ MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
- MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \
+ MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
#define MLXPLAT_CPLD_LPC_REG4 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
- MLXPLAT_CPLD_LPC_I2C_CH4_OFF) | \
+ MLXPLAT_CPLD_LPC_REG_I2C_CH4_OFFSET) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
/* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */
@@ -202,6 +222,7 @@
MLXPLAT_CPLD_AGGR_MASK_LC_SDWN)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc1
#define MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2 BIT(2)
+#define MLXPLAT_CPLD_LOW_AGGR_MASK_PWR_BUT BIT(4)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
#define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0)
@@ -214,6 +235,17 @@
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
#define MLXPLAT_CPLD_GWP_MASK GENMASK(0, 0)
+#define MLXPLAT_CPLD_EROT_MASK GENMASK(1, 0)
+#define MLXPLAT_CPLD_PWR_BUTTON_MASK BIT(0)
+#define MLXPLAT_CPLD_LATCH_RST_MASK BIT(5)
+#define MLXPLAT_CPLD_THERMAL1_PDB_MASK BIT(3)
+#define MLXPLAT_CPLD_THERMAL2_PDB_MASK BIT(4)
+#define MLXPLAT_CPLD_INTRUSION_MASK BIT(6)
+#define MLXPLAT_CPLD_PWM_PG_MASK BIT(7)
+#define MLXPLAT_CPLD_L1_CHA_HEALTH_MASK (MLXPLAT_CPLD_THERMAL1_PDB_MASK | \
+ MLXPLAT_CPLD_THERMAL2_PDB_MASK | \
+ MLXPLAT_CPLD_INTRUSION_MASK |\
+ MLXPLAT_CPLD_PWM_PG_MASK)
#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04
#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT)
@@ -226,6 +258,8 @@
/* Masks for aggregation for modular systems */
#define MLXPLAT_CPLD_LPC_LC_MASK GENMASK(7, 0)
+#define MLXPLAT_CPLD_HALT_MASK BIT(3)
+
/* Default I2C parent bus number */
#define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1
@@ -243,6 +277,8 @@
#define MLXPLAT_CPLD_CH2_ETH_MODULAR 3
#define MLXPLAT_CPLD_CH3_ETH_MODULAR 43
#define MLXPLAT_CPLD_CH4_ETH_MODULAR 51
+#define MLXPLAT_CPLD_CH2_RACK_SWITCH 18
+#define MLXPLAT_CPLD_CH2_NG800 34
/* Number of LPC attached MUX platform devices */
#define MLXPLAT_CPLD_LPC_MUX_DEVS 4
@@ -280,6 +316,12 @@
/* Minimum power required for turning on Ethernet modular system (WATT) */
#define MLXPLAT_CPLD_ETH_MODULAR_PWR_MIN 50
+/* Default value for PWM control register for rack switch system */
+#define MLXPLAT_REGMAP_NVSWITCH_PWM_DEFAULT 0xf4
+
+#define MLXPLAT_I2C_MAIN_BUS_NOTIFIED 0x01
+#define MLXPLAT_I2C_MAIN_BUS_HANDLE_CREATED 0x02
+
/* mlxplat_priv - platform private data
* @pdev_i2c - i2c controller platform device
* @pdev_mux - array of mux platform devices
@@ -289,6 +331,9 @@
* @pdev_fan - FAN platform devices
* @pdev_wd - array of watchdog platform devices
* @regmap: device register map
+ * @hotplug_resources: system hotplug resources
+ * @hotplug_resources_size: size of system hotplug resources
+ * @hi2c_main_init_status: init status of I2C main bus
*/
struct mlxplat_priv {
struct platform_device *pdev_i2c;
@@ -299,8 +344,14 @@ struct mlxplat_priv {
struct platform_device *pdev_fan;
struct platform_device *pdev_wd[MLXPLAT_CPLD_WD_MAX_DEVS];
void *regmap;
+ struct resource *hotplug_resources;
+ unsigned int hotplug_resources_size;
+ u8 i2c_main_init_status;
};
+static struct platform_device *mlxplat_dev;
+static int mlxplat_i2c_main_complition_notify(void *handle, int id);
+
/* Regions for LPC I2C controller and LPC base register space */
static const struct resource mlxplat_lpc_resources[] = {
[0] = DEFINE_RES_NAMED(MLXPLAT_CPLD_LPC_I2C_BASE_ADRR,
@@ -312,6 +363,11 @@ static const struct resource mlxplat_lpc_resources[] = {
IORESOURCE_IO),
};
+/* Platform systems default i2c data */
+static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_default_data = {
+ .completion_notify = mlxplat_i2c_main_complition_notify,
+};
+
/* Platform i2c next generation systems data */
static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = {
{
@@ -334,6 +390,7 @@ static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = {
.mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_I2C,
+ .completion_notify = mlxplat_i2c_main_complition_notify,
};
/* Platform default channels */
@@ -460,6 +517,67 @@ static struct i2c_mux_reg_platform_data mlxplat_modular_mux_data[] = {
},
};
+/* Platform channels for rack switch system family */
+static const int mlxplat_rack_switch_channels[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+};
+
+/* Platform rack switch mux data */
+static struct i2c_mux_reg_platform_data mlxplat_rack_switch_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_rack_switch_channels,
+ .n_values = ARRAY_SIZE(mlxplat_rack_switch_channels),
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2_RACK_SWITCH,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_msn21xx_channels,
+ .n_values = ARRAY_SIZE(mlxplat_msn21xx_channels),
+ },
+
+};
+
+/* Platform channels for ng800 system family */
+static const int mlxplat_ng800_channels[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32
+};
+
+/* Platform ng800 mux data */
+static struct i2c_mux_reg_platform_data mlxplat_ng800_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_ng800_channels,
+ .n_values = ARRAY_SIZE(mlxplat_ng800_channels),
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2_NG800,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_msn21xx_channels,
+ .n_values = ARRAY_SIZE(mlxplat_msn21xx_channels),
+ },
+
+};
+
/* Platform hotplug devices */
static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
{
@@ -479,6 +597,15 @@ static struct i2c_board_info mlxplat_mlxcpld_ext_pwr[] = {
},
};
+static struct i2c_board_info mlxplat_mlxcpld_pwr_ng800[] = {
+ {
+ I2C_BOARD_INFO("dps460", 0x59),
+ },
+ {
+ I2C_BOARD_INFO("dps460", 0x5a),
+ },
+};
+
static struct i2c_board_info mlxplat_mlxcpld_fan[] = {
{
I2C_BOARD_INFO("24c32", 0x50),
@@ -558,6 +685,23 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_pwr_wc_items_data[] = {
},
};
+static struct mlxreg_core_data mlxplat_mlxcpld_default_pwr_ng800_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr_ng800[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr_ng800[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+};
+
static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_items_data[] = {
{
.label = "fan1",
@@ -1181,6 +1325,47 @@ static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = {
}
};
+static struct mlxreg_core_item mlxplat_mlxcpld_ng800_items[] = {
+ {
+ .data = mlxplat_mlxcpld_default_ng_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_pwr_ng800_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_ng800_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
.items = mlxplat_mlxcpld_ext_items,
@@ -1191,6 +1376,16 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2,
};
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ng800_data = {
+ .items = mlxplat_mlxcpld_ng800_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_ng800_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_ASIC2,
+};
+
static struct mlxreg_core_data mlxplat_mlxcpld_modular_pwr_items_data[] = {
{
.label = "pwr1",
@@ -2042,7 +2237,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_global_wp_items_data[] = {
},
};
-static struct mlxreg_core_item mlxplat_mlxcpld_nvlink_blade_items[] = {
+static struct mlxreg_core_item mlxplat_mlxcpld_chassis_blade_items[] = {
{
.data = mlxplat_mlxcpld_global_wp_items_data,
.aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
@@ -2055,15 +2250,256 @@ static struct mlxreg_core_item mlxplat_mlxcpld_nvlink_blade_items[] = {
};
static
-struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_nvlink_blade_data = {
- .items = mlxplat_mlxcpld_nvlink_blade_items,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_nvlink_blade_items),
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_chassis_blade_data = {
+ .items = mlxplat_mlxcpld_chassis_blade_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_chassis_blade_items),
.cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
.mask = MLXPLAT_CPLD_AGGR_MASK_COMEX,
.cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+/* Platform hotplug for switch systems family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_erot_ap_items_data[] = {
+ {
+ .label = "erot1_ap",
+ .reg = MLXPLAT_CPLD_LPC_REG_EROT_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "erot2_ap",
+ .reg = MLXPLAT_CPLD_LPC_REG_EROT_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_erot_error_items_data[] = {
+ {
+ .label = "erot1_error",
+ .reg = MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "erot2_error",
+ .reg = MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_rack_switch_items[] = {
+ {
+ .data = mlxplat_mlxcpld_ext_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_ext_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_erot_ap_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_EROT_OFFSET,
+ .mask = MLXPLAT_CPLD_EROT_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_erot_ap_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_erot_error_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET,
+ .mask = MLXPLAT_CPLD_EROT_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_erot_error_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_rack_switch_data = {
+ .items = mlxplat_mlxcpld_rack_switch_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_rack_switch_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
+/* Callback performs graceful shutdown after notification about power button event */
+static int
+mlxplat_mlxcpld_l1_switch_pwr_events_handler(void *handle, enum mlxreg_hotplug_kind kind,
+ u8 action)
+{
+ dev_info(&mlxplat_dev->dev, "System shutdown due to short press of power button");
+ kernel_halt();
+ return 0;
+}
+
+static struct mlxreg_core_hotplug_notifier mlxplat_mlxcpld_l1_switch_pwr_events_notifier = {
+ .user_handler = mlxplat_mlxcpld_l1_switch_pwr_events_handler,
+};
+
+/* Platform hotplug for l1 switch systems family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_l1_switch_pwr_events_items_data[] = {
+ {
+ .label = "power_button",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_BUTTON_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ .hpdev.notifier = &mlxplat_mlxcpld_l1_switch_pwr_events_notifier,
+ },
+};
+
+/* Callback activates latch reset flow after notification about intrusion event */
+static int
+mlxplat_mlxcpld_l1_switch_intrusion_events_handler(void *handle, enum mlxreg_hotplug_kind kind,
+ u8 action)
+{
+ struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
+ u32 regval;
+ int err;
+
+ err = regmap_read(priv->regmap, MLXPLAT_CPLD_LPC_REG_GP1_OFFSET, &regval);
+ if (err)
+ goto fail_regmap_read;
+
+ if (action) {
+ dev_info(&mlxplat_dev->dev, "Detected intrusion - system latch is opened");
+ err = regmap_write(priv->regmap, MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ regval | MLXPLAT_CPLD_LATCH_RST_MASK);
+ } else {
+ dev_info(&mlxplat_dev->dev, "System latch is properly closed");
+ err = regmap_write(priv->regmap, MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ regval & ~MLXPLAT_CPLD_LATCH_RST_MASK);
+ }
+
+ if (err)
+ goto fail_regmap_write;
+
+ return 0;
+
+fail_regmap_read:
+fail_regmap_write:
+ dev_err(&mlxplat_dev->dev, "Register access failed");
+ return err;
+}
+
+static struct mlxreg_core_hotplug_notifier mlxplat_mlxcpld_l1_switch_intrusion_events_notifier = {
+ .user_handler = mlxplat_mlxcpld_l1_switch_intrusion_events_handler,
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_l1_switch_health_events_items_data[] = {
+ {
+ .label = "thermal1_pdb",
+ .reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
+ .mask = MLXPLAT_CPLD_THERMAL1_PDB_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "thermal2_pdb",
+ .reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
+ .mask = MLXPLAT_CPLD_THERMAL2_PDB_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "intrusion",
+ .reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
+ .mask = MLXPLAT_CPLD_INTRUSION_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ .hpdev.notifier = &mlxplat_mlxcpld_l1_switch_intrusion_events_notifier,
+ },
+ {
+ .label = "pwm_pg",
+ .reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
+ .mask = MLXPLAT_CPLD_PWM_PG_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_l1_switch_events_items[] = {
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_erot_ap_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_EROT_OFFSET,
+ .mask = MLXPLAT_CPLD_EROT_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_erot_ap_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_erot_error_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET,
+ .mask = MLXPLAT_CPLD_EROT_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_erot_error_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_l1_switch_pwr_events_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_BUTTON_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_pwr_events_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_l1_switch_health_events_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_BRD_OFFSET,
+ .mask = MLXPLAT_CPLD_L1_CHA_HEALTH_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_health_events_items_data),
+ .inversed = 0,
+ .health = false,
+ .ind = 8,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_l1_switch_data = {
+ .items = mlxplat_mlxcpld_l1_switch_events_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_events_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW | MLXPLAT_CPLD_LOW_AGGR_MASK_PWR_BUT,
+};
+
/* Platform led default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
{
@@ -2593,6 +3029,114 @@ static struct mlxreg_core_platform_data mlxplat_modular_led_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_led_data),
};
+/* Platform led data for chassis system */
+static struct mlxreg_core_data mlxplat_mlxcpld_l1_switch_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "fan1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
+ },
+ {
+ .label = "fan1:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
+ },
+ {
+ .label = "fan2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
+ },
+ {
+ .label = "fan2:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
+ },
+ {
+ .label = "fan3:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
+ },
+ {
+ .label = "fan3:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
+ },
+ {
+ .label = "fan4:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
+ },
+ {
+ .label = "fan4:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
+ },
+ {
+ .label = "fan5:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
+ },
+ {
+ .label = "fan5:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
+ },
+ {
+ .label = "fan6:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
+ },
+ {
+ .label = "fan6:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_l1_switch_led_data = {
+ .data = mlxplat_mlxcpld_l1_switch_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_l1_switch_led_data),
+};
+
/* Platform register access default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
{
@@ -2948,6 +3492,51 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0200,
},
{
+ .label = "erot1_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "erot2_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0644,
+ },
+ {
+ .label = "clk_brd_prog_en",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "erot1_recovery",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "erot2_recovery",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0644,
+ },
+ {
+ .label = "erot1_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "erot2_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
.label = "reset_long_pb",
.reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -3014,7 +3603,7 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
- .label = "reset_voltmon_upgrade_fail",
+ .label = "reset_pwr_converter_fail",
.reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
.mode = 0444,
@@ -3050,6 +3639,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "reset_ac_ok_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
.label = "psu1_on",
.reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(0),
@@ -3074,12 +3669,48 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0200,
},
{
+ .label = "deep_pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0200,
+ },
+ {
+ .label = "latch_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0200,
+ },
+ {
.label = "jtag_enable",
.reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(4),
.mode = 0644,
},
{
+ .label = "dbg1",
+ .reg = MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0644,
+ },
+ {
+ .label = "dbg2",
+ .reg = MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0644,
+ },
+ {
+ .label = "dbg3",
+ .reg = MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0644,
+ },
+ {
+ .label = "dbg4",
+ .reg = MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0644,
+ },
+ {
.label = "asic_health",
.reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
.mask = MLXPLAT_CPLD_ASIC_MASK,
@@ -3131,6 +3762,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "pwr_converter_prog_en",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
.label = "vpd_wp",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
.mask = GENMASK(7, 0) & ~BIT(3),
@@ -3143,6 +3781,49 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0644,
},
{
+ .label = "erot1_ap_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "erot2_ap_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "clk_brd1_boot_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "clk_brd2_boot_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "clk_brd_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "asic_pg_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "spi_chnl_select",
+ .reg = MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT,
+ .mask = GENMASK(7, 0),
+ .bit = 1,
+ .mode = 0644,
+ },
+ {
.label = "config1",
.reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
.bit = GENMASK(7, 0),
@@ -3657,8 +4338,8 @@ static struct mlxreg_core_platform_data mlxplat_modular_regs_io_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_regs_io_data),
};
-/* Platform register access for NVLink blade systems family data */
-static struct mlxreg_core_data mlxplat_mlxcpld_nvlink_blade_regs_io_data[] = {
+/* Platform register access for chassis blade systems family data */
+static struct mlxreg_core_data mlxplat_mlxcpld_chassis_blade_regs_io_data[] = {
{
.label = "cpld1_version",
.reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
@@ -3855,9 +4536,9 @@ static struct mlxreg_core_data mlxplat_mlxcpld_nvlink_blade_regs_io_data[] = {
},
};
-static struct mlxreg_core_platform_data mlxplat_nvlink_blade_regs_io_data = {
- .data = mlxplat_mlxcpld_nvlink_blade_regs_io_data,
- .counter = ARRAY_SIZE(mlxplat_mlxcpld_nvlink_blade_regs_io_data),
+static struct mlxreg_core_platform_data mlxplat_chassis_blade_regs_io_data = {
+ .data = mlxplat_mlxcpld_chassis_blade_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_chassis_blade_regs_io_data),
};
/* Platform FAN default */
@@ -4242,11 +4923,18 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_BRD_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_BRD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_BRD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC2_EVENT_OFFSET:
@@ -4257,6 +4945,12 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROT_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROT_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROTE_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROTE_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWRB_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWRB_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET:
@@ -4274,6 +4968,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
+ case MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
@@ -4284,6 +4979,11 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
@@ -4334,6 +5034,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
@@ -4343,6 +5047,9 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_BRD_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_BRD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_BRD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -4358,6 +5065,15 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROT_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROT_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROTE_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROTE_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWRB_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWRB_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET:
@@ -4382,6 +5098,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
+ case MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
@@ -4392,6 +5110,11 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
@@ -4468,6 +5191,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET:
@@ -4477,6 +5204,9 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_GWP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GWP_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_BRD_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_BRD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_BRD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET:
@@ -4492,6 +5222,15 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROT_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROT_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROTE_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROTE_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_EROTE_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWRB_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWRB_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWRB_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET:
@@ -4516,10 +5255,17 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
+ case MLXPLAT_CPLD_LPC_REG_GP4_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_SPI_CHNL_SELECT:
case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_DBG_CTRL_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_I2C_CH4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
@@ -4583,6 +5329,13 @@ static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = {
{ MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
};
+static const struct reg_default mlxplat_mlxcpld_regmap_rack_switch[] = {
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, MLXPLAT_REGMAP_NVSWITCH_PWM_DEFAULT },
+ { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
+};
+
static const struct reg_default mlxplat_mlxcpld_regmap_eth_modular[] = {
{ MLXPLAT_CPLD_LPC_REG_GP2_OFFSET, 0x61 },
{ MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
@@ -4676,6 +5429,20 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = {
.reg_write = mlxplat_mlxcpld_reg_write,
};
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_rack_switch = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_rack_switch,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_rack_switch),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
static const struct regmap_config mlxplat_mlxcpld_regmap_config_eth_modular = {
.reg_bits = 8,
.val_bits = 8,
@@ -4694,7 +5461,6 @@ static struct resource mlxplat_mlxcpld_resources[] = {
[0] = DEFINE_RES_IRQ_NAMED(MLXPLAT_CPLD_LPC_SYSIRQ, "mlxreg-hotplug"),
};
-static struct platform_device *mlxplat_dev;
static struct mlxreg_core_hotplug_platform_data *mlxplat_i2c;
static struct mlxreg_core_hotplug_platform_data *mlxplat_hotplug;
static struct mlxreg_core_platform_data *mlxplat_led;
@@ -4704,6 +5470,14 @@ static struct mlxreg_core_platform_data
*mlxplat_wd_data[MLXPLAT_CPLD_WD_MAX_DEVS];
static const struct regmap_config *mlxplat_regmap_config;
+/* Platform default poweroff function */
+static void mlxplat_poweroff(void)
+{
+ struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
+
+ regmap_write(priv->regmap, MLXPLAT_CPLD_LPC_REG_GP1_OFFSET, MLXPLAT_CPLD_HALT_MASK);
+}
+
static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
{
int i;
@@ -4722,6 +5496,7 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
mlxplat_led = &mlxplat_default_led_data;
mlxplat_regs_io = &mlxplat_default_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
return 1;
}
@@ -4744,6 +5519,7 @@ static int __init mlxplat_dmi_default_wc_matched(const struct dmi_system_id *dmi
mlxplat_led = &mlxplat_default_led_wc_data;
mlxplat_regs_io = &mlxplat_default_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
return 1;
}
@@ -4791,6 +5567,7 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
mlxplat_led = &mlxplat_msn21xx_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
return 1;
}
@@ -4813,6 +5590,7 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi)
mlxplat_led = &mlxplat_default_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
return 1;
}
@@ -4835,6 +5613,7 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi)
mlxplat_led = &mlxplat_msn21xx_led_data;
mlxplat_regs_io = &mlxplat_msn21xx_regs_io_data;
mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
return 1;
}
@@ -4884,6 +5663,7 @@ static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi)
mlxplat_fan = &mlxplat_default_fan_data;
for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_default_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex;
return 1;
@@ -4935,14 +5715,14 @@ static int __init mlxplat_dmi_modular_matched(const struct dmi_system_id *dmi)
return 1;
}
-static int __init mlxplat_dmi_nvlink_blade_matched(const struct dmi_system_id *dmi)
+static int __init mlxplat_dmi_chassis_blade_matched(const struct dmi_system_id *dmi)
{
int i;
mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
mlxplat_mux_data = mlxplat_default_mux_data;
- mlxplat_hotplug = &mlxplat_mlxcpld_nvlink_blade_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_chassis_blade_data;
mlxplat_hotplug->deferred_nr =
mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
for (i = 0; i < mlxplat_mux_num; i++) {
@@ -4950,13 +5730,77 @@ static int __init mlxplat_dmi_nvlink_blade_matched(const struct dmi_system_id *d
mlxplat_mux_data[i].n_values =
ARRAY_SIZE(mlxplat_msn21xx_channels);
}
- mlxplat_regs_io = &mlxplat_nvlink_blade_regs_io_data;
+ mlxplat_regs_io = &mlxplat_chassis_blade_regs_io_data;
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_rack_switch_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_rack_switch_mux_data);
+ mlxplat_mux_data = mlxplat_rack_switch_mux_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_rack_switch_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_rack_switch;
+
+ return 1;
+}
+
+static int __init mlxplat_dmi_ng800_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_ng800_mux_data);
+ mlxplat_mux_data = mlxplat_ng800_mux_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_ng800_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_ng_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400;
return 1;
}
+static int __init mlxplat_dmi_l1_switch_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_rack_switch_mux_data);
+ mlxplat_mux_data = mlxplat_rack_switch_mux_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_l1_switch_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_l1_switch_led_data;
+ mlxplat_regs_io = &mlxplat_default_ng_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_rack_switch;
+ pm_power_off = mlxplat_poweroff;
+
+ return 1;
+}
+
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
.callback = mlxplat_dmi_default_wc_matched,
@@ -5015,6 +5859,13 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_rack_switch_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI142"),
+ },
+ },
+ {
.callback = mlxplat_dmi_ng400_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"),
@@ -5027,12 +5878,24 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
- .callback = mlxplat_dmi_nvlink_blade_matched,
+ .callback = mlxplat_dmi_ng800_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0013"),
+ },
+ },
+ {
+ .callback = mlxplat_dmi_chassis_blade_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0015"),
},
},
{
+ .callback = mlxplat_dmi_l1_switch_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0017"),
+ },
+ },
+ {
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
@@ -5145,10 +6008,11 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr)
shift = *nr - mlxplat_mux_data[i].parent;
mlxplat_mux_data[i].parent = *nr;
mlxplat_mux_data[i].base_nr += shift;
- if (shift > 0)
- mlxplat_hotplug->shift_nr = shift;
}
+ if (shift > 0)
+ mlxplat_hotplug->shift_nr = shift;
+
return 0;
}
@@ -5173,73 +6037,53 @@ static int mlxplat_mlxcpld_check_wd_capability(void *regmap)
return 0;
}
-static int __init mlxplat_init(void)
+static int mlxplat_lpc_cpld_device_init(struct resource **hotplug_resources,
+ unsigned int *hotplug_resources_size)
{
- struct mlxplat_priv *priv;
- int i, j, nr, err;
-
- if (!dmi_check_system(mlxplat_dmi_table))
- return -ENODEV;
+ int err;
mlxplat_dev = platform_device_register_simple(MLX_PLAT_DEVICE_NAME, PLATFORM_DEVID_NONE,
- mlxplat_lpc_resources,
- ARRAY_SIZE(mlxplat_lpc_resources));
-
+ mlxplat_lpc_resources,
+ ARRAY_SIZE(mlxplat_lpc_resources));
if (IS_ERR(mlxplat_dev))
return PTR_ERR(mlxplat_dev);
- priv = devm_kzalloc(&mlxplat_dev->dev, sizeof(struct mlxplat_priv),
- GFP_KERNEL);
- if (!priv) {
- err = -ENOMEM;
- goto fail_alloc;
- }
- platform_set_drvdata(mlxplat_dev, priv);
-
mlxplat_mlxcpld_regmap_ctx.base = devm_ioport_map(&mlxplat_dev->dev,
- mlxplat_lpc_resources[1].start, 1);
+ mlxplat_lpc_resources[1].start, 1);
if (!mlxplat_mlxcpld_regmap_ctx.base) {
err = -ENOMEM;
- goto fail_alloc;
+ goto fail_devm_ioport_map;
}
- if (!mlxplat_regmap_config)
- mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config;
+ *hotplug_resources = mlxplat_mlxcpld_resources;
+ *hotplug_resources_size = ARRAY_SIZE(mlxplat_mlxcpld_resources);
- priv->regmap = devm_regmap_init(&mlxplat_dev->dev, NULL,
- &mlxplat_mlxcpld_regmap_ctx,
- mlxplat_regmap_config);
- if (IS_ERR(priv->regmap)) {
- err = PTR_ERR(priv->regmap);
- goto fail_alloc;
- }
+ return 0;
- err = mlxplat_mlxcpld_verify_bus_topology(&nr);
- if (nr < 0)
- goto fail_alloc;
+fail_devm_ioport_map:
+ platform_device_unregister(mlxplat_dev);
+ return err;
+}
- nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
- if (mlxplat_i2c)
- mlxplat_i2c->regmap = priv->regmap;
- priv->pdev_i2c = platform_device_register_resndata(&mlxplat_dev->dev, "i2c_mlxcpld",
- nr, mlxplat_mlxcpld_resources,
- ARRAY_SIZE(mlxplat_mlxcpld_resources),
- mlxplat_i2c, sizeof(*mlxplat_i2c));
- if (IS_ERR(priv->pdev_i2c)) {
- err = PTR_ERR(priv->pdev_i2c);
- goto fail_alloc;
- }
+static void mlxplat_lpc_cpld_device_exit(void)
+{
+ platform_device_unregister(mlxplat_dev);
+}
- for (i = 0; i < mlxplat_mux_num; i++) {
- priv->pdev_mux[i] = platform_device_register_resndata(&priv->pdev_i2c->dev,
- "i2c-mux-reg", i, NULL, 0,
- &mlxplat_mux_data[i],
- sizeof(mlxplat_mux_data[i]));
- if (IS_ERR(priv->pdev_mux[i])) {
- err = PTR_ERR(priv->pdev_mux[i]);
- goto fail_platform_mux_register;
- }
- }
+static int
+mlxplat_pre_init(struct resource **hotplug_resources, unsigned int *hotplug_resources_size)
+{
+ return mlxplat_lpc_cpld_device_init(hotplug_resources, hotplug_resources_size);
+}
+
+static void mlxplat_post_exit(void)
+{
+ mlxplat_lpc_cpld_device_exit();
+}
+
+static int mlxplat_post_init(struct mlxplat_priv *priv)
+{
+ int i = 0, err;
/* Add hotplug driver */
if (mlxplat_hotplug) {
@@ -5247,24 +6091,15 @@ static int __init mlxplat_init(void)
priv->pdev_hotplug =
platform_device_register_resndata(&mlxplat_dev->dev,
"mlxreg-hotplug", PLATFORM_DEVID_NONE,
- mlxplat_mlxcpld_resources,
- ARRAY_SIZE(mlxplat_mlxcpld_resources),
+ priv->hotplug_resources,
+ priv->hotplug_resources_size,
mlxplat_hotplug, sizeof(*mlxplat_hotplug));
if (IS_ERR(priv->pdev_hotplug)) {
err = PTR_ERR(priv->pdev_hotplug);
- goto fail_platform_mux_register;
+ goto fail_platform_hotplug_register;
}
}
- /* Set default registers. */
- for (j = 0; j < mlxplat_regmap_config->num_reg_defaults; j++) {
- err = regmap_write(priv->regmap,
- mlxplat_regmap_config->reg_defaults[j].reg,
- mlxplat_regmap_config->reg_defaults[j].def);
- if (err)
- goto fail_platform_mux_register;
- }
-
/* Add LED driver. */
if (mlxplat_led) {
mlxplat_led->regmap = priv->regmap;
@@ -5274,7 +6109,7 @@ static int __init mlxplat_init(void)
sizeof(*mlxplat_led));
if (IS_ERR(priv->pdev_led)) {
err = PTR_ERR(priv->pdev_led);
- goto fail_platform_hotplug_register;
+ goto fail_platform_leds_register;
}
}
@@ -5288,7 +6123,7 @@ static int __init mlxplat_init(void)
sizeof(*mlxplat_regs_io));
if (IS_ERR(priv->pdev_io_regs)) {
err = PTR_ERR(priv->pdev_io_regs);
- goto fail_platform_led_register;
+ goto fail_platform_io_register;
}
}
@@ -5301,7 +6136,7 @@ static int __init mlxplat_init(void)
sizeof(*mlxplat_fan));
if (IS_ERR(priv->pdev_fan)) {
err = PTR_ERR(priv->pdev_fan);
- goto fail_platform_io_regs_register;
+ goto fail_platform_fan_register;
}
}
@@ -5309,56 +6144,40 @@ static int __init mlxplat_init(void)
err = mlxplat_mlxcpld_check_wd_capability(priv->regmap);
if (err)
goto fail_platform_wd_register;
- for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) {
- if (mlxplat_wd_data[j]) {
- mlxplat_wd_data[j]->regmap = priv->regmap;
- priv->pdev_wd[j] =
- platform_device_register_resndata(&mlxplat_dev->dev, "mlx-wdt", j,
- NULL, 0, mlxplat_wd_data[j],
- sizeof(*mlxplat_wd_data[j]));
- if (IS_ERR(priv->pdev_wd[j])) {
- err = PTR_ERR(priv->pdev_wd[j]);
+ for (i = 0; i < MLXPLAT_CPLD_WD_MAX_DEVS; i++) {
+ if (mlxplat_wd_data[i]) {
+ mlxplat_wd_data[i]->regmap = priv->regmap;
+ priv->pdev_wd[i] =
+ platform_device_register_resndata(&mlxplat_dev->dev, "mlx-wdt", i,
+ NULL, 0, mlxplat_wd_data[i],
+ sizeof(*mlxplat_wd_data[i]));
+ if (IS_ERR(priv->pdev_wd[i])) {
+ err = PTR_ERR(priv->pdev_wd[i]);
goto fail_platform_wd_register;
}
}
}
- /* Sync registers with hardware. */
- regcache_mark_dirty(priv->regmap);
- err = regcache_sync(priv->regmap);
- if (err)
- goto fail_platform_wd_register;
-
return 0;
fail_platform_wd_register:
- while (--j >= 0)
- platform_device_unregister(priv->pdev_wd[j]);
- if (mlxplat_fan)
- platform_device_unregister(priv->pdev_fan);
-fail_platform_io_regs_register:
+ while (--i >= 0)
+ platform_device_unregister(priv->pdev_wd[i]);
+fail_platform_fan_register:
if (mlxplat_regs_io)
platform_device_unregister(priv->pdev_io_regs);
-fail_platform_led_register:
+fail_platform_io_register:
if (mlxplat_led)
platform_device_unregister(priv->pdev_led);
-fail_platform_hotplug_register:
+fail_platform_leds_register:
if (mlxplat_hotplug)
platform_device_unregister(priv->pdev_hotplug);
-fail_platform_mux_register:
- while (--i >= 0)
- platform_device_unregister(priv->pdev_mux[i]);
- platform_device_unregister(priv->pdev_i2c);
-fail_alloc:
- platform_device_unregister(mlxplat_dev);
-
+fail_platform_hotplug_register:
return err;
}
-module_init(mlxplat_init);
-static void __exit mlxplat_exit(void)
+static void mlxplat_pre_exit(struct mlxplat_priv *priv)
{
- struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
int i;
for (i = MLXPLAT_CPLD_WD_MAX_DEVS - 1; i >= 0 ; i--)
@@ -5371,12 +6190,185 @@ static void __exit mlxplat_exit(void)
platform_device_unregister(priv->pdev_led);
if (priv->pdev_hotplug)
platform_device_unregister(priv->pdev_hotplug);
+}
+
+static int
+mlxplat_i2c_mux_complition_notify(void *handle, struct i2c_adapter *parent,
+ struct i2c_adapter *adapters[])
+{
+ struct mlxplat_priv *priv = handle;
+
+ return mlxplat_post_init(priv);
+}
+
+static int mlxplat_i2c_mux_topolgy_init(struct mlxplat_priv *priv)
+{
+ int i, err;
+
+ if (!priv->pdev_i2c) {
+ priv->i2c_main_init_status = MLXPLAT_I2C_MAIN_BUS_NOTIFIED;
+ return 0;
+ }
+
+ priv->i2c_main_init_status = MLXPLAT_I2C_MAIN_BUS_HANDLE_CREATED;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ priv->pdev_mux[i] = platform_device_register_resndata(&priv->pdev_i2c->dev,
+ "i2c-mux-reg", i, NULL, 0,
+ &mlxplat_mux_data[i],
+ sizeof(mlxplat_mux_data[i]));
+ if (IS_ERR(priv->pdev_mux[i])) {
+ err = PTR_ERR(priv->pdev_mux[i]);
+ goto fail_platform_mux_register;
+ }
+ }
- for (i = mlxplat_mux_num - 1; i >= 0 ; i--)
+ return mlxplat_i2c_mux_complition_notify(priv, NULL, NULL);
+
+fail_platform_mux_register:
+ while (--i >= 0)
platform_device_unregister(priv->pdev_mux[i]);
+ return err;
+}
- platform_device_unregister(priv->pdev_i2c);
- platform_device_unregister(mlxplat_dev);
+static void mlxplat_i2c_mux_topolgy_exit(struct mlxplat_priv *priv)
+{
+ int i;
+
+ for (i = mlxplat_mux_num - 1; i >= 0 ; i--) {
+ if (priv->pdev_mux[i])
+ platform_device_unregister(priv->pdev_mux[i]);
+ }
+
+ mlxplat_post_exit();
+}
+
+static int mlxplat_i2c_main_complition_notify(void *handle, int id)
+{
+ struct mlxplat_priv *priv = handle;
+
+ return mlxplat_i2c_mux_topolgy_init(priv);
+}
+
+static int mlxplat_i2c_main_init(struct mlxplat_priv *priv)
+{
+ int nr, err;
+
+ if (!mlxplat_i2c)
+ return 0;
+
+ err = mlxplat_mlxcpld_verify_bus_topology(&nr);
+ if (nr < 0)
+ goto fail_mlxplat_mlxcpld_verify_bus_topology;
+
+ nr = (nr == mlxplat_max_adap_num) ? -1 : nr;
+ mlxplat_i2c->regmap = priv->regmap;
+ mlxplat_i2c->handle = priv;
+
+ priv->pdev_i2c = platform_device_register_resndata(&mlxplat_dev->dev, "i2c_mlxcpld",
+ nr, priv->hotplug_resources,
+ priv->hotplug_resources_size,
+ mlxplat_i2c, sizeof(*mlxplat_i2c));
+ if (IS_ERR(priv->pdev_i2c)) {
+ err = PTR_ERR(priv->pdev_i2c);
+ goto fail_platform_i2c_register;
+ }
+
+ if (priv->i2c_main_init_status == MLXPLAT_I2C_MAIN_BUS_NOTIFIED) {
+ err = mlxplat_i2c_mux_topolgy_init(priv);
+ if (err)
+ goto fail_mlxplat_i2c_mux_topolgy_init;
+ }
+
+ return 0;
+
+fail_mlxplat_i2c_mux_topolgy_init:
+fail_platform_i2c_register:
+fail_mlxplat_mlxcpld_verify_bus_topology:
+ return err;
+}
+
+static void mlxplat_i2c_main_exit(struct mlxplat_priv *priv)
+{
+ mlxplat_i2c_mux_topolgy_exit(priv);
+ if (priv->pdev_i2c)
+ platform_device_unregister(priv->pdev_i2c);
+}
+
+static int __init mlxplat_init(void)
+{
+ unsigned int hotplug_resources_size;
+ struct resource *hotplug_resources;
+ struct mlxplat_priv *priv;
+ int i, err;
+
+ if (!dmi_check_system(mlxplat_dmi_table))
+ return -ENODEV;
+
+ err = mlxplat_pre_init(&hotplug_resources, &hotplug_resources_size);
+ if (err)
+ return err;
+
+ priv = devm_kzalloc(&mlxplat_dev->dev, sizeof(struct mlxplat_priv),
+ GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto fail_alloc;
+ }
+ platform_set_drvdata(mlxplat_dev, priv);
+ priv->hotplug_resources = hotplug_resources;
+ priv->hotplug_resources_size = hotplug_resources_size;
+
+ if (!mlxplat_regmap_config)
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config;
+
+ priv->regmap = devm_regmap_init(&mlxplat_dev->dev, NULL,
+ &mlxplat_mlxcpld_regmap_ctx,
+ mlxplat_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ err = PTR_ERR(priv->regmap);
+ goto fail_alloc;
+ }
+
+ /* Set default registers. */
+ for (i = 0; i < mlxplat_regmap_config->num_reg_defaults; i++) {
+ err = regmap_write(priv->regmap,
+ mlxplat_regmap_config->reg_defaults[i].reg,
+ mlxplat_regmap_config->reg_defaults[i].def);
+ if (err)
+ goto fail_regmap_write;
+ }
+
+ err = mlxplat_i2c_main_init(priv);
+ if (err)
+ goto fail_mlxplat_i2c_main_init;
+
+ /* Sync registers with hardware. */
+ regcache_mark_dirty(priv->regmap);
+ err = regcache_sync(priv->regmap);
+ if (err)
+ goto fail_regcache_sync;
+
+ return 0;
+
+fail_regcache_sync:
+ mlxplat_pre_exit(priv);
+fail_mlxplat_i2c_main_init:
+fail_regmap_write:
+fail_alloc:
+ mlxplat_post_exit();
+
+ return err;
+}
+module_init(mlxplat_init);
+
+static void __exit mlxplat_exit(void)
+{
+ struct mlxplat_priv *priv = platform_get_drvdata(mlxplat_dev);
+
+ if (pm_power_off)
+ pm_power_off = NULL;
+ mlxplat_pre_exit(priv);
+ mlxplat_i2c_main_exit(priv);
}
module_exit(mlxplat_exit);
diff --git a/drivers/platform/x86/nvidia-wmi-ec-backlight.c b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
index baccdf658538..1b572c90c76e 100644
--- a/drivers/platform/x86/nvidia-wmi-ec-backlight.c
+++ b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
@@ -12,6 +12,10 @@
#include <linux/wmi.h>
#include <acpi/video.h>
+static bool force;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Force loading (disable acpi_backlight=xxx checks");
+
/**
* wmi_brightness_notify() - helper function for calling WMI-wrapped ACPI method
* @w: Pointer to the struct wmi_device identified by %WMI_BRIGHTNESS_GUID
@@ -91,7 +95,7 @@ static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ct
int ret;
/* drivers/acpi/video_detect.c also checks that SOURCE == EC */
- if (acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
+ if (!force && acpi_video_get_backlight_type() != acpi_backlight_nvidia_wmi_ec)
return -ENODEV;
/*
diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c
index a01a92769c1a..86b33b74519b 100644
--- a/drivers/platform/x86/think-lmi.c
+++ b/drivers/platform/x86/think-lmi.c
@@ -317,8 +317,8 @@ static int tlmi_get_pwd_settings(struct tlmi_pwdcfg *pwdcfg)
return -EIO;
}
- copy_size = obj->buffer.length < sizeof(struct tlmi_pwdcfg) ?
- obj->buffer.length : sizeof(struct tlmi_pwdcfg);
+ copy_size = min_t(size_t, obj->buffer.length, sizeof(struct tlmi_pwdcfg));
+
memcpy(pwdcfg, obj->buffer.pointer, copy_size);
kfree(obj);
@@ -1089,12 +1089,12 @@ static void tlmi_pwd_setting_release(struct kobject *kobj)
kfree(setting);
}
-static struct kobj_type tlmi_attr_setting_ktype = {
+static const struct kobj_type tlmi_attr_setting_ktype = {
.release = &tlmi_attr_setting_release,
.sysfs_ops = &tlmi_kobj_sysfs_ops,
};
-static struct kobj_type tlmi_pwd_setting_ktype = {
+static const struct kobj_type tlmi_pwd_setting_ktype = {
.release = &tlmi_pwd_setting_release,
.sysfs_ops = &tlmi_kobj_sysfs_ops,
};
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 02860c32625e..32c10457399e 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -5563,7 +5563,7 @@ static int light_sysfs_set(struct led_classdev *led_cdev,
static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev)
{
- return (light_get_status() == 1) ? LED_FULL : LED_OFF;
+ return (light_get_status() == 1) ? LED_ON : LED_OFF;
}
static struct tpacpi_led_classdev tpacpi_led_thinklight = {
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index f00995390fdf..13802a3c3591 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -1098,6 +1098,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi Vi8 (CWI501) */
+ .driver_data = (void *)&chuwi_vi8_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Insyde"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "i86"),
+ DMI_MATCH(DMI_BIOS_VERSION, "CHUWI.W86JLBNR01"),
+ },
+ },
+ {
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
diff --git a/drivers/platform/x86/uv_sysfs.c b/drivers/platform/x86/uv_sysfs.c
index 73fc38ee7430..38d1b692d3c0 100644
--- a/drivers/platform/x86/uv_sysfs.c
+++ b/drivers/platform/x86/uv_sysfs.c
@@ -203,7 +203,7 @@ static const struct sysfs_ops hub_sysfs_ops = {
.show = hub_type_show,
};
-static struct kobj_type hub_attr_type = {
+static const struct kobj_type hub_attr_type = {
.release = hub_release,
.sysfs_ops = &hub_sysfs_ops,
.default_groups = uv_hub_groups,
@@ -356,7 +356,7 @@ static const struct sysfs_ops uv_port_sysfs_ops = {
.show = uv_port_type_show,
};
-static struct kobj_type uv_port_attr_type = {
+static const struct kobj_type uv_port_attr_type = {
.release = uv_port_release,
.sysfs_ops = &uv_port_sysfs_ops,
.default_groups = uv_port_groups,
@@ -528,7 +528,7 @@ static const struct sysfs_ops uv_pci_top_sysfs_ops = {
.show = pci_top_type_show,
};
-static struct kobj_type uv_pci_top_attr_type = {
+static const struct kobj_type uv_pci_top_attr_type = {
.release = uv_pci_top_release,
.sysfs_ops = &uv_pci_top_sysfs_ops,
};
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 5ffc00480aef..4fe7650dd014 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -693,15 +693,8 @@ char *wmi_get_acpi_device_uid(const char *guid_string)
}
EXPORT_SYMBOL_GPL(wmi_get_acpi_device_uid);
-static struct wmi_block *dev_to_wblock(struct device *dev)
-{
- return container_of(dev, struct wmi_block, dev.dev);
-}
-
-static struct wmi_device *dev_to_wdev(struct device *dev)
-{
- return container_of(dev, struct wmi_device, dev);
-}
+#define dev_to_wblock(__dev) container_of_const(__dev, struct wmi_block, dev.dev)
+#define dev_to_wdev(__dev) container_of_const(__dev, struct wmi_device, dev)
static inline struct wmi_driver *drv_to_wdrv(struct device_driver *drv)
{
@@ -804,9 +797,9 @@ static struct attribute *wmi_method_attrs[] = {
};
ATTRIBUTE_GROUPS(wmi_method);
-static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int wmi_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct wmi_block *wblock = dev_to_wblock(dev);
+ const struct wmi_block *wblock = dev_to_wblock(dev);
if (add_uevent_var(env, "MODALIAS=wmi:%pUL", &wblock->gblock.guid))
return -ENOMEM;
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
index 123a4618db55..111b007656fc 100644
--- a/drivers/platform/x86/x86-android-tablets.c
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -187,7 +187,7 @@ struct x86_dev_info {
/* Generic / shared charger / battery settings */
static const char * const tusb1211_chg_det_psy[] = { "tusb1211-charger-detect" };
static const char * const bq24190_psy[] = { "bq24190-charger" };
-static const char * const bq25890_psy[] = { "bq25890-charger" };
+static const char * const bq25890_psy[] = { "bq25890-charger-0" };
static const struct property_entry fg_bq24190_supply_props[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_psy),
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index a8c46ba5878f..8c87eeda0fec 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -141,6 +141,13 @@ config POWER_RESET_OCELOT_RESET
help
This driver supports restart for Microsemi Ocelot SoC and similar.
+config POWER_RESET_ODROID_GO_ULTRA_POWEROFF
+ bool "Odroid Go Ultra power-off driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on I2C=y && OF
+ help
+ This driver supports Power off for Odroid Go Ultra device.
+
config POWER_RESET_OXNAS
bool "OXNAS SoC restart driver"
depends on ARCH_OXNAS
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 0a39424fc558..d763e6735ee3 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o
obj-$(CONFIG_POWER_RESET_OXNAS) += oxnas-restart.o
obj-$(CONFIG_POWER_RESET_QCOM_PON) += qcom-pon.o
obj-$(CONFIG_POWER_RESET_OCELOT_RESET) += ocelot-reset.o
+obj-$(CONFIG_POWER_RESET_ODROID_GO_ULTRA_POWEROFF) += odroid-go-ultra-poweroff.o
obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
diff --git a/drivers/power/reset/odroid-go-ultra-poweroff.c b/drivers/power/reset/odroid-go-ultra-poweroff.c
new file mode 100644
index 000000000000..f46271da4e8e
--- /dev/null
+++ b/drivers/power/reset/odroid-go-ultra-poweroff.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2023 Neil Armstrong <neil.armstrong@linaro.org>
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/mfd/rk808.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/i2c.h>
+
+/*
+ * The Odroid Go Ultra has 2 PMICs:
+ * - RK818 (manages the battery and USB-C power supply)
+ * - RK817
+ * Both PMICs feeds power to the S922X SoC, so they must be powered-off in sequence.
+ * Vendor does power-off the RK817 first, then the RK818 so here we follow this sequence.
+ */
+
+struct odroid_go_ultra_poweroff_data {
+ struct device *dev;
+ struct device *rk817;
+ struct device *rk818;
+};
+
+static int odroid_go_ultra_poweroff_prepare(struct sys_off_data *data)
+{
+ struct odroid_go_ultra_poweroff_data *poweroff_data = data->cb_data;
+ struct regmap *rk817, *rk818;
+ int ret;
+
+ /* RK817 Regmap */
+ rk817 = dev_get_regmap(poweroff_data->rk817, NULL);
+ if (!rk817) {
+ dev_err(poweroff_data->dev, "failed to get rk817 regmap\n");
+ return notifier_from_errno(-EINVAL);
+ }
+
+ /* RK818 Regmap */
+ rk818 = dev_get_regmap(poweroff_data->rk818, NULL);
+ if (!rk818) {
+ dev_err(poweroff_data->dev, "failed to get rk818 regmap\n");
+ return notifier_from_errno(-EINVAL);
+ }
+
+ dev_info(poweroff_data->dev, "Setting PMICs for power off");
+
+ /* RK817 */
+ ret = regmap_update_bits(rk817, RK817_SYS_CFG(3), DEV_OFF, DEV_OFF);
+ if (ret) {
+ dev_err(poweroff_data->dev, "failed to poweroff rk817\n");
+ return notifier_from_errno(ret);
+ }
+
+ /* RK818 */
+ ret = regmap_update_bits(rk818, RK818_DEVCTRL_REG, DEV_OFF, DEV_OFF);
+ if (ret) {
+ dev_err(poweroff_data->dev, "failed to poweroff rk818\n");
+ return notifier_from_errno(ret);
+ }
+
+ return NOTIFY_OK;
+}
+
+static void odroid_go_ultra_poweroff_put_pmic_device(void *data)
+{
+ struct device *dev = data;
+
+ put_device(dev);
+}
+
+static int odroid_go_ultra_poweroff_get_pmic_device(struct device *dev, const char *compatible,
+ struct device **pmic)
+{
+ struct device_node *pmic_node;
+ struct i2c_client *pmic_client;
+
+ pmic_node = of_find_compatible_node(NULL, NULL, compatible);
+ if (!pmic_node)
+ return -ENODEV;
+
+ pmic_client = of_find_i2c_device_by_node(pmic_node);
+ of_node_put(pmic_node);
+ if (!pmic_client)
+ return -EPROBE_DEFER;
+
+ *pmic = &pmic_client->dev;
+
+ return devm_add_action_or_reset(dev, odroid_go_ultra_poweroff_put_pmic_device, *pmic);
+}
+
+static int odroid_go_ultra_poweroff_probe(struct platform_device *pdev)
+{
+ struct odroid_go_ultra_poweroff_data *poweroff_data;
+ int ret;
+
+ poweroff_data = devm_kzalloc(&pdev->dev, sizeof(*poweroff_data), GFP_KERNEL);
+ if (!poweroff_data)
+ return -ENOMEM;
+
+ dev_set_drvdata(&pdev->dev, poweroff_data);
+
+ /* RK818 PMIC Device */
+ ret = odroid_go_ultra_poweroff_get_pmic_device(&pdev->dev, "rockchip,rk818",
+ &poweroff_data->rk818);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to get rk818 mfd data\n");
+
+ /* RK817 PMIC Device */
+ ret = odroid_go_ultra_poweroff_get_pmic_device(&pdev->dev, "rockchip,rk817",
+ &poweroff_data->rk817);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to get rk817 mfd data\n");
+
+ /* Register as SYS_OFF_MODE_POWER_OFF_PREPARE because regmap_update_bits may sleep */
+ ret = devm_register_sys_off_handler(&pdev->dev,
+ SYS_OFF_MODE_POWER_OFF_PREPARE,
+ SYS_OFF_PRIO_DEFAULT,
+ odroid_go_ultra_poweroff_prepare,
+ poweroff_data);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to register sys-off handler\n");
+
+ dev_info(&pdev->dev, "Registered Power-Off handler\n");
+
+ return 0;
+}
+static struct platform_device *pdev;
+
+static struct platform_driver odroid_go_ultra_poweroff_driver = {
+ .driver = {
+ .name = "odroid-go-ultra-poweroff",
+ },
+ .probe = odroid_go_ultra_poweroff_probe,
+};
+
+static int __init odroid_go_ultra_poweroff_init(void)
+{
+ int ret;
+
+ /* Only create when running on the Odroid Go Ultra device */
+ if (!of_device_is_compatible(of_root, "hardkernel,odroid-go-ultra"))
+ return -ENODEV;
+
+ ret = platform_driver_register(&odroid_go_ultra_poweroff_driver);
+ if (ret)
+ return ret;
+
+ pdev = platform_device_register_resndata(NULL, "odroid-go-ultra-poweroff", -1,
+ NULL, 0, NULL, 0);
+
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&odroid_go_ultra_poweroff_driver);
+ return PTR_ERR(pdev);
+ }
+
+ return 0;
+}
+
+static void __exit odroid_go_ultra_poweroff_exit(void)
+{
+ /* Only delete when running on the Odroid Go Ultra device */
+ if (!of_device_is_compatible(of_root, "hardkernel,odroid-go-ultra"))
+ return;
+
+ platform_device_unregister(pdev);
+ platform_driver_unregister(&odroid_go_ultra_poweroff_driver);
+}
+
+module_init(odroid_go_ultra_poweroff_init);
+module_exit(odroid_go_ultra_poweroff_exit);
+
+MODULE_AUTHOR("Neil Armstrong <neil.armstrong@linaro.org>");
+MODULE_DESCRIPTION("Odroid Go Ultra poweroff driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c
index 510e363381ca..45e34e6885f7 100644
--- a/drivers/power/reset/syscon-reboot.c
+++ b/drivers/power/reset/syscon-reboot.c
@@ -44,6 +44,7 @@ static int syscon_reboot_probe(struct platform_device *pdev)
struct syscon_reboot_context *ctx;
struct device *dev = &pdev->dev;
int mask_err, value_err;
+ int priority;
int err;
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
@@ -57,6 +58,9 @@ static int syscon_reboot_probe(struct platform_device *pdev)
return PTR_ERR(ctx->map);
}
+ if (of_property_read_s32(pdev->dev.of_node, "priority", &priority))
+ priority = 192;
+
if (of_property_read_u32(pdev->dev.of_node, "offset", &ctx->offset))
return -EINVAL;
@@ -77,7 +81,7 @@ static int syscon_reboot_probe(struct platform_device *pdev)
}
ctx->restart_handler.notifier_call = syscon_restart_handle;
- ctx->restart_handler.priority = 192;
+ ctx->restart_handler.priority = priority;
err = register_restart_handler(&ctx->restart_handler);
if (err)
dev_err(dev, "can't register restart notifier (err=%d)\n", err);
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 0bbfe6a7ce4d..6a32f022943b 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -28,15 +28,6 @@ config POWER_SUPPLY_HWMON
Say 'Y' here if you want power supplies to
have hwmon sysfs interface too.
-
-config PDA_POWER
- tristate "Generic PDA/phone power driver"
- depends on !S390
- help
- Say Y here to enable generic power driver for PDAs and phones with
- one or two external power supplies (AC/USB) connected to main and
- backup batteries, and optional builtin charger.
-
config APM_POWER
tristate "APM emulation for class batteries"
depends on APM_EMULATION
@@ -195,13 +186,6 @@ config BATTERY_SAMSUNG_SDI
Say Y to enable support for Samsung SDI battery data.
These batteries are used in Samsung mobile phones.
-config BATTERY_TOSA
- tristate "Sharp SL-6000 (tosa) battery"
- depends on MACH_TOSA && MFD_TC6393XB && TOUCHSCREEN_WM97XX
- help
- Say Y to enable support for the battery on the Sharp Zaurus
- SL-6000 (tosa) models.
-
config BATTERY_COLLIE
tristate "Sharp SL-5500 (collie) battery"
depends on SA1100_COLLIE && MCP_UCB1200
@@ -422,18 +406,6 @@ config BATTERY_MAX1721X
Say Y here to enable support for the MAX17211/MAX17215 standalone
battery gas-gauge.
-config BATTERY_Z2
- tristate "Z2 battery driver"
- depends on I2C && MACH_ZIPIT2
- help
- Say Y to include support for the battery on the Zipit Z2.
-
-config BATTERY_S3C_ADC
- tristate "Battery driver for Samsung ADC based monitoring"
- depends on S3C_ADC
- help
- Say Y here to enable support for iPAQ h1930/h1940/rx1950 battery
-
config BATTERY_TWL4030_MADC
tristate "TWL4030 MADC battery driver"
depends on TWL4030_MADC
@@ -793,6 +765,41 @@ config CHARGER_RT9455
help
Say Y to enable support for Richtek RT9455 battery charger.
+config CHARGER_RT9467
+ tristate "Richtek RT9467 Battery Charger Driver"
+ depends on I2C && GPIOLIB && REGULATOR
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select LINEAR_RANGES
+ help
+ Say Y here to enable RT9467 Battery Charger.
+ RT9467 is a switch-mode single cell Li-Ion/Li-Polymer battery charger
+ for portable applications. It integrates a synchronous PWM controller,
+ power MOSFETs, input current sensing and regulation, high-accuracy
+ voltage regulation, and charge termination. The charge current is
+ regulated through integrated sensing resistors. It also features
+ USB On-The-Go (OTG) support and integrates D+/D- pin for USB
+ host/charging port detection.
+
+ This driver can also be built as a module. If so, the module
+ will be called "rt9467-charger".
+
+config CHARGER_RT9471
+ tristate "Richtek RT9471 battery charger driver"
+ depends on I2C && GPIOLIB && REGULATOR
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select LINEAR_RANGES
+ help
+ This adds support for Richtek RT9471 battery charger. RT9471 is
+ highly-integrated switch mode battery charger which is system power
+ patch manageable device for single cell Li-Ion and Li-polymer battery.
+ It can support BC12 detection on DPDM, and current and voltage
+ regulation on both charging and boost mode.
+
+ This driver can also be built as a module. If so, the module will be
+ called rt9471.
+
config CHARGER_CROS_USBPD
tristate "ChromeOS EC based USBPD charger"
depends on CROS_USBPD_NOTIFY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index 0ee8653e882e..3aec9bc10f3c 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_POWER_SUPPLY) += power_supply.o
obj-$(CONFIG_POWER_SUPPLY_HWMON) += power_supply_hwmon.o
obj-$(CONFIG_GENERIC_ADC_BATTERY) += generic-adc-battery.o
-obj-$(CONFIG_PDA_POWER) += pda_power.o
obj-$(CONFIG_APM_POWER) += apm_power.o
obj-$(CONFIG_AXP20X_POWER) += axp20x_usb_power.o
obj-$(CONFIG_IP5XXX_POWER) += ip5xxx_power.o
@@ -36,7 +35,6 @@ obj-$(CONFIG_BATTERY_LEGO_EV3) += lego_ev3_battery.o
obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o
obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_SAMSUNG_SDI) += samsung-sdi-battery.o
-obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
obj-$(CONFIG_BATTERY_INGENIC) += ingenic-battery.o
obj-$(CONFIG_BATTERY_IPAQ_MICRO) += ipaq_micro_battery.o
@@ -54,10 +52,10 @@ obj-$(CONFIG_BATTERY_DA9150) += da9150-fg.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
obj-$(CONFIG_BATTERY_MAX1721X) += max1721x_battery.o
-obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
obj-$(CONFIG_BATTERY_RT5033) += rt5033_battery.o
obj-$(CONFIG_CHARGER_RT9455) += rt9455_charger.o
-obj-$(CONFIG_BATTERY_S3C_ADC) += s3c_adc_battery.o
+obj-$(CONFIG_CHARGER_RT9467) += rt9467-charger.o
+obj-$(CONFIG_CHARGER_RT9471) += rt9471.o
obj-$(CONFIG_BATTERY_TWL4030_MADC) += twl4030_madc_battery.o
obj-$(CONFIG_CHARGER_88PM860X) += 88pm860x_charger.o
obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index c6c9804280db..41a7bff9ac37 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2453,7 +2453,7 @@ struct ab8500_fg_sysfs_entry {
static ssize_t charge_full_show(struct ab8500_fg *di, char *buf)
{
- return sprintf(buf, "%d\n", di->bat_cap.max_mah);
+ return sysfs_emit(buf, "%d\n", di->bat_cap.max_mah);
}
static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf,
@@ -2472,7 +2472,7 @@ static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf,
static ssize_t charge_now_show(struct ab8500_fg *di, char *buf)
{
- return sprintf(buf, "%d\n", di->bat_cap.prev_mah);
+ return sysfs_emit(buf, "%d\n", di->bat_cap.prev_mah);
}
static ssize_t charge_now_store(struct ab8500_fg *di, const char *buf,
@@ -2594,7 +2594,7 @@ static ssize_t ab8505_powercut_flagtime_read(struct device *dev,
goto fail;
}
- return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0x7F));
+ return sysfs_emit(buf, "%d\n", (reg_value & 0x7F));
fail:
return ret;
@@ -2644,7 +2644,7 @@ static ssize_t ab8505_powercut_maxtime_read(struct device *dev,
goto fail;
}
- return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0x7F));
+ return sysfs_emit(buf, "%d\n", (reg_value & 0x7F));
fail:
return ret;
@@ -2695,7 +2695,7 @@ static ssize_t ab8505_powercut_restart_read(struct device *dev,
goto fail;
}
- return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0xF));
+ return sysfs_emit(buf, "%d\n", (reg_value & 0xF));
fail:
return ret;
@@ -2746,7 +2746,7 @@ static ssize_t ab8505_powercut_timer_read(struct device *dev,
goto fail;
}
- return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0x7F));
+ return sysfs_emit(buf, "%d\n", (reg_value & 0x7F));
fail:
return ret;
@@ -2769,7 +2769,7 @@ static ssize_t ab8505_powercut_restart_counter_read(struct device *dev,
goto fail;
}
- return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0xF0) >> 4);
+ return sysfs_emit(buf, "%d\n", (reg_value & 0xF0) >> 4);
fail:
return ret;
@@ -2790,7 +2790,7 @@ static ssize_t ab8505_powercut_read(struct device *dev,
if (ret < 0)
goto fail;
- return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0x1));
+ return sysfs_emit(buf, "%d\n", (reg_value & 0x1));
fail:
return ret;
@@ -2841,7 +2841,7 @@ static ssize_t ab8505_powercut_flag_read(struct device *dev,
goto fail;
}
- return scnprintf(buf, PAGE_SIZE, "%d\n", ((reg_value & 0x10) >> 4));
+ return sysfs_emit(buf, "%d\n", ((reg_value & 0x10) >> 4));
fail:
return ret;
@@ -2864,7 +2864,7 @@ static ssize_t ab8505_powercut_debounce_read(struct device *dev,
goto fail;
}
- return scnprintf(buf, PAGE_SIZE, "%d\n", (reg_value & 0x7));
+ return sysfs_emit(buf, "%d\n", (reg_value & 0x7));
fail:
return ret;
@@ -2914,7 +2914,7 @@ static ssize_t ab8505_powercut_enable_status_read(struct device *dev,
goto fail;
}
- return scnprintf(buf, PAGE_SIZE, "%d\n", ((reg_value & 0x20) >> 5));
+ return sysfs_emit(buf, "%d\n", ((reg_value & 0x20) >> 5));
fail:
return ret;
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index d2cb7431dced..349b69d634e6 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -1059,7 +1059,7 @@ static ssize_t bq2415x_sysfs_show_status(struct device *dev,
ret = bq2415x_exec_command(bq, command);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
/*
@@ -1098,11 +1098,11 @@ static ssize_t bq2415x_sysfs_show_timer(struct device *dev,
struct bq2415x_device *bq = power_supply_get_drvdata(psy);
if (bq->timer_error)
- return sprintf(buf, "%s\n", bq->timer_error);
+ return sysfs_emit(buf, "%s\n", bq->timer_error);
if (bq->autotimer)
- return sprintf(buf, "auto\n");
- return sprintf(buf, "off\n");
+ return sysfs_emit(buf, "auto\n");
+ return sysfs_emit(buf, "off\n");
}
/*
@@ -1175,30 +1175,30 @@ static ssize_t bq2415x_sysfs_show_mode(struct device *dev,
ssize_t ret = 0;
if (bq->automode > 0)
- ret += sprintf(buf+ret, "auto (");
+ ret += sysfs_emit_at(buf, ret, "auto (");
switch (bq->mode) {
case BQ2415X_MODE_OFF:
- ret += sprintf(buf+ret, "off");
+ ret += sysfs_emit_at(buf, ret, "off");
break;
case BQ2415X_MODE_NONE:
- ret += sprintf(buf+ret, "none");
+ ret += sysfs_emit_at(buf, ret, "none");
break;
case BQ2415X_MODE_HOST_CHARGER:
- ret += sprintf(buf+ret, "host");
+ ret += sysfs_emit_at(buf, ret, "host");
break;
case BQ2415X_MODE_DEDICATED_CHARGER:
- ret += sprintf(buf+ret, "dedicated");
+ ret += sysfs_emit_at(buf, ret, "dedicated");
break;
case BQ2415X_MODE_BOOST:
- ret += sprintf(buf+ret, "boost");
+ ret += sysfs_emit_at(buf, ret, "boost");
break;
}
if (bq->automode > 0)
- ret += sprintf(buf+ret, ")");
+ ret += sysfs_emit_at(buf, ret, ")");
- ret += sprintf(buf+ret, "\n");
+ ret += sysfs_emit_at(buf, ret, "\n");
return ret;
}
@@ -1215,15 +1215,15 @@ static ssize_t bq2415x_sysfs_show_reported_mode(struct device *dev,
switch (bq->reported_mode) {
case BQ2415X_MODE_OFF:
- return sprintf(buf, "off\n");
+ return sysfs_emit(buf, "off\n");
case BQ2415X_MODE_NONE:
- return sprintf(buf, "none\n");
+ return sysfs_emit(buf, "none\n");
case BQ2415X_MODE_HOST_CHARGER:
- return sprintf(buf, "host\n");
+ return sysfs_emit(buf, "host\n");
case BQ2415X_MODE_DEDICATED_CHARGER:
- return sprintf(buf, "dedicated\n");
+ return sysfs_emit(buf, "dedicated\n");
case BQ2415X_MODE_BOOST:
- return sprintf(buf, "boost\n");
+ return sysfs_emit(buf, "boost\n");
}
return -EINVAL;
@@ -1261,8 +1261,8 @@ static ssize_t bq2415x_sysfs_print_reg(struct bq2415x_device *bq,
int ret = bq2415x_i2c_read(bq, reg);
if (ret < 0)
- return sprintf(buf, "%#.2x=error %d\n", reg, ret);
- return sprintf(buf, "%#.2x=%#.2x\n", reg, ret);
+ return sysfs_emit(buf, "%#.2x=error %d\n", reg, ret);
+ return sysfs_emit(buf, "%#.2x=%#.2x\n", reg, ret);
}
/* show all raw values of chip register, format per line: 'register=value' */
@@ -1338,7 +1338,7 @@ static ssize_t bq2415x_sysfs_show_limit(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
/* set *_enable entries */
@@ -1401,7 +1401,7 @@ static ssize_t bq2415x_sysfs_show_enable(struct device *dev,
ret = bq2415x_exec_command(bq, command);
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
static DEVICE_ATTR(current_limit, S_IWUSR | S_IRUGO,
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 2b2c3a4391c1..be34b9848450 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -463,7 +463,7 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
if (ret)
count = ret;
else
- count = scnprintf(buf, PAGE_SIZE, "%hhx\n", v);
+ count = sysfs_emit(buf, "%hhx\n", v);
pm_runtime_mark_last_busy(bdi->dev);
pm_runtime_put_autosuspend(bdi->dev);
diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c
index ab4c49788c58..103ddc2b3def 100644
--- a/drivers/power/supply/bq24257_charger.c
+++ b/drivers/power/supply/bq24257_charger.c
@@ -767,8 +767,7 @@ static ssize_t bq24257_show_ovp_voltage(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct bq24257_device *bq = power_supply_get_drvdata(psy);
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- bq24257_vovp_map[bq->init_data.vovp]);
+ return sysfs_emit(buf, "%u\n", bq24257_vovp_map[bq->init_data.vovp]);
}
static ssize_t bq24257_show_in_dpm_voltage(struct device *dev,
@@ -778,8 +777,7 @@ static ssize_t bq24257_show_in_dpm_voltage(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct bq24257_device *bq = power_supply_get_drvdata(psy);
- return scnprintf(buf, PAGE_SIZE, "%u\n",
- bq24257_vindpm_map[bq->init_data.vindpm]);
+ return sysfs_emit(buf, "%u\n", bq24257_vindpm_map[bq->init_data.vindpm]);
}
static ssize_t bq24257_sysfs_show_enable(struct device *dev,
@@ -800,7 +798,7 @@ static ssize_t bq24257_sysfs_show_enable(struct device *dev,
if (ret < 0)
return ret;
- return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
+ return sysfs_emit(buf, "%d\n", ret);
}
static ssize_t bq24257_sysfs_set_enable(struct device *dev,
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index db13e288e439..9cf4936440c9 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -1563,7 +1563,7 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
return ret;
ret = bq->chip_info->bq256xx_set_ichg(bq,
- bat_info->constant_charge_current_max_ua);
+ bq->chip_info->bq256xx_def_ichg);
if (ret)
return ret;
@@ -1573,7 +1573,7 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
return ret;
ret = bq->chip_info->bq256xx_set_vbatreg(bq,
- bat_info->constant_charge_voltage_max_uv);
+ bq->chip_info->bq256xx_def_vbatreg);
if (ret)
return ret;
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 2d731ea58323..bfe08d7bfaf3 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -95,6 +95,7 @@ struct bq25890_init_data {
struct bq25890_state {
u8 online;
+ u8 hiz;
u8 chrg_status;
u8 chrg_fault;
u8 vsys_status;
@@ -107,6 +108,10 @@ struct bq25890_device {
struct i2c_client *client;
struct device *dev;
struct power_supply *charger;
+ struct power_supply *secondary_chrg;
+ struct power_supply_desc desc;
+ char name[28]; /* "bq25890-charger-%d" */
+ int id;
struct usb_phy *usb_phy;
struct notifier_block usb_nb;
@@ -119,7 +124,9 @@ struct bq25890_device {
bool skip_reset;
bool read_back_init_data;
+ bool force_hiz;
u32 pump_express_vbus_max;
+ u32 iinlim_percentage;
enum bq25890_chip_version chip_version;
struct bq25890_init_data init_data;
struct bq25890_state state;
@@ -127,6 +134,9 @@ struct bq25890_device {
struct mutex lock; /* protect state data */
};
+static DEFINE_IDR(bq25890_id);
+static DEFINE_MUTEX(bq25890_id_mutex);
+
static const struct regmap_range bq25890_readonly_reg_ranges[] = {
regmap_reg_range(0x0b, 0x0c),
regmap_reg_range(0x0e, 0x13),
@@ -454,20 +464,18 @@ static int bq25890_get_vbus_voltage(struct bq25890_device *bq)
return bq25890_find_val(ret, TBL_VBUSV);
}
-static int bq25890_power_supply_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
+static void bq25890_update_state(struct bq25890_device *bq,
+ enum power_supply_property psp,
+ struct bq25890_state *state)
{
- struct bq25890_device *bq = power_supply_get_drvdata(psy);
- struct bq25890_state state;
bool do_adc_conv;
int ret;
mutex_lock(&bq->lock);
/* update state in case we lost an interrupt */
__bq25890_handle_irq(bq);
- state = bq->state;
- do_adc_conv = !state.online && bq25890_is_adc_property(psp);
+ *state = bq->state;
+ do_adc_conv = (!state->online || state->hiz) && bq25890_is_adc_property(psp);
if (do_adc_conv)
bq25890_field_write(bq, F_CONV_START, 1);
mutex_unlock(&bq->lock);
@@ -475,10 +483,21 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
if (do_adc_conv)
regmap_field_read_poll_timeout(bq->rmap_fields[F_CONV_START],
ret, !ret, 25000, 1000000);
+}
+
+static int bq25890_power_supply_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct bq25890_device *bq = power_supply_get_drvdata(psy);
+ struct bq25890_state state;
+ int ret;
+
+ bq25890_update_state(bq, psp, &state);
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (!state.online)
+ if (!state.online || state.hiz)
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
else if (state.chrg_status == STATUS_NOT_CHARGING)
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -493,7 +512,8 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
- if (!state.online || state.chrg_status == STATUS_NOT_CHARGING ||
+ if (!state.online || state.hiz ||
+ state.chrg_status == STATUS_NOT_CHARGING ||
state.chrg_status == STATUS_TERMINATION_DONE)
val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
else if (state.chrg_status == STATUS_PRE_CHARGING)
@@ -513,7 +533,7 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_ONLINE:
- val->intval = state.online;
+ val->intval = state.online && !state.hiz;
break;
case POWER_SUPPLY_PROP_HEALTH:
@@ -667,7 +687,8 @@ static int bq25890_power_supply_set_property(struct power_supply *psy,
const union power_supply_propval *val)
{
struct bq25890_device *bq = power_supply_get_drvdata(psy);
- int maxval;
+ struct bq25890_state state;
+ int maxval, ret;
u8 lval;
switch (psp) {
@@ -682,6 +703,12 @@ static int bq25890_power_supply_set_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
lval = bq25890_find_idx(val->intval, TBL_IINLIM);
return bq25890_field_write(bq, F_IINLIM, lval);
+ case POWER_SUPPLY_PROP_ONLINE:
+ ret = bq25890_field_write(bq, F_EN_HIZ, !val->intval);
+ if (!ret)
+ bq->force_hiz = !val->intval;
+ bq25890_update_state(bq, psp, &state);
+ return ret;
default:
return -EINVAL;
}
@@ -694,12 +721,25 @@ static int bq25890_power_supply_property_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ case POWER_SUPPLY_PROP_ONLINE:
return true;
default:
return false;
}
}
+/*
+ * If there are multiple chargers the maximum current the external power-supply
+ * can deliver needs to be divided over the chargers. This is done according
+ * to the bq->iinlim_percentage setting.
+ */
+static int bq25890_charger_get_scaled_iinlim_regval(struct bq25890_device *bq,
+ int iinlim_ua)
+{
+ iinlim_ua = iinlim_ua * bq->iinlim_percentage / 100;
+ return bq25890_find_idx(iinlim_ua, TBL_IINLIM);
+}
+
/* On the BQ25892 try to get charger-type info from our supplier */
static void bq25890_charger_external_power_changed(struct power_supply *psy)
{
@@ -718,7 +758,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
switch (val.intval) {
case POWER_SUPPLY_USB_TYPE_DCP:
- input_current_limit = bq25890_find_idx(2000000, TBL_IINLIM);
+ input_current_limit = bq25890_charger_get_scaled_iinlim_regval(bq, 2000000);
if (bq->pump_express_vbus_max) {
queue_delayed_work(system_power_efficient_wq,
&bq->pump_express_work,
@@ -727,11 +767,11 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
break;
case POWER_SUPPLY_USB_TYPE_CDP:
case POWER_SUPPLY_USB_TYPE_ACA:
- input_current_limit = bq25890_find_idx(1500000, TBL_IINLIM);
+ input_current_limit = bq25890_charger_get_scaled_iinlim_regval(bq, 1500000);
break;
case POWER_SUPPLY_USB_TYPE_SDP:
default:
- input_current_limit = bq25890_find_idx(500000, TBL_IINLIM);
+ input_current_limit = bq25890_charger_get_scaled_iinlim_regval(bq, 500000);
}
bq25890_field_write(bq, F_IINLIM, input_current_limit);
@@ -748,6 +788,7 @@ static int bq25890_get_chip_state(struct bq25890_device *bq,
} state_fields[] = {
{F_CHG_STAT, &state->chrg_status},
{F_PG_STAT, &state->online},
+ {F_EN_HIZ, &state->hiz},
{F_VSYS_STAT, &state->vsys_status},
{F_BOOST_FAULT, &state->boost_fault},
{F_BAT_FAULT, &state->bat_fault},
@@ -763,16 +804,18 @@ static int bq25890_get_chip_state(struct bq25890_device *bq,
*state_fields[i].data = ret;
}
- dev_dbg(bq->dev, "S:CHG/PG/VSYS=%d/%d/%d, F:CHG/BOOST/BAT/NTC=%d/%d/%d/%d\n",
- state->chrg_status, state->online, state->vsys_status,
- state->chrg_fault, state->boost_fault, state->bat_fault,
- state->ntc_fault);
+ dev_dbg(bq->dev, "S:CHG/PG/HIZ/VSYS=%d/%d/%d/%d, F:CHG/BOOST/BAT/NTC=%d/%d/%d/%d\n",
+ state->chrg_status, state->online,
+ state->hiz, state->vsys_status,
+ state->chrg_fault, state->boost_fault,
+ state->bat_fault, state->ntc_fault);
return 0;
}
static irqreturn_t __bq25890_handle_irq(struct bq25890_device *bq)
{
+ bool adc_conv_rate, new_adc_conv_rate;
struct bq25890_state new_state;
int ret;
@@ -783,14 +826,23 @@ static irqreturn_t __bq25890_handle_irq(struct bq25890_device *bq)
if (!memcmp(&bq->state, &new_state, sizeof(new_state)))
return IRQ_NONE;
- if (!new_state.online && bq->state.online) { /* power removed */
- /* disable ADC */
- ret = bq25890_field_write(bq, F_CONV_RATE, 0);
+ /*
+ * Restore HiZ bit in case it was set by user. The chip does not retain
+ * this bit on cable replug, hence the bit must be reset manually here.
+ */
+ if (new_state.online && !bq->state.online && bq->force_hiz) {
+ ret = bq25890_field_write(bq, F_EN_HIZ, bq->force_hiz);
if (ret < 0)
goto error;
- } else if (new_state.online && !bq->state.online) { /* power inserted */
- /* enable ADC, to have control of charge current/voltage */
- ret = bq25890_field_write(bq, F_CONV_RATE, 1);
+ new_state.hiz = 1;
+ }
+
+ /* Should period ADC sampling be enabled? */
+ adc_conv_rate = bq->state.online && !bq->state.hiz;
+ new_adc_conv_rate = new_state.online && !new_state.hiz;
+
+ if (new_adc_conv_rate != adc_conv_rate) {
+ ret = bq25890_field_write(bq, F_CONV_RATE, new_adc_conv_rate);
if (ret < 0)
goto error;
}
@@ -924,7 +976,7 @@ static int bq25890_hw_init(struct bq25890_device *bq)
}
/* Configure ADC for continuous conversions when charging */
- ret = bq25890_field_write(bq, F_CONV_RATE, !!bq->state.online);
+ ret = bq25890_field_write(bq, F_CONV_RATE, bq->state.online && !bq->state.hiz);
if (ret < 0) {
dev_dbg(bq->dev, "Config ADC failed %d\n", ret);
return ret;
@@ -957,7 +1009,6 @@ static char *bq25890_charger_supplied_to[] = {
};
static const struct power_supply_desc bq25890_power_supply_desc = {
- .name = "bq25890-charger",
.type = POWER_SUPPLY_TYPE_USB,
.properties = bq25890_power_supply_props,
.num_properties = ARRAY_SIZE(bq25890_power_supply_props),
@@ -971,12 +1022,21 @@ static int bq25890_power_supply_init(struct bq25890_device *bq)
{
struct power_supply_config psy_cfg = { .drv_data = bq, };
+ /* Get ID for the device */
+ mutex_lock(&bq25890_id_mutex);
+ bq->id = idr_alloc(&bq25890_id, bq, 0, 0, GFP_KERNEL);
+ mutex_unlock(&bq25890_id_mutex);
+ if (bq->id < 0)
+ return bq->id;
+
+ snprintf(bq->name, sizeof(bq->name), "bq25890-charger-%d", bq->id);
+ bq->desc = bq25890_power_supply_desc;
+ bq->desc.name = bq->name;
+
psy_cfg.supplied_to = bq25890_charger_supplied_to;
psy_cfg.num_supplicants = ARRAY_SIZE(bq25890_charger_supplied_to);
- bq->charger = devm_power_supply_register(bq->dev,
- &bq25890_power_supply_desc,
- &psy_cfg);
+ bq->charger = devm_power_supply_register(bq->dev, &bq->desc, &psy_cfg);
return PTR_ERR_OR_ZERO(bq->charger);
}
@@ -996,10 +1056,17 @@ static void bq25890_pump_express_work(struct work_struct *data)
{
struct bq25890_device *bq =
container_of(data, struct bq25890_device, pump_express_work.work);
+ union power_supply_propval value;
int voltage, i, ret;
dev_dbg(bq->dev, "Start to request input voltage increasing\n");
+ /* If there is a second charger put in Hi-Z mode */
+ if (bq->secondary_chrg) {
+ value.intval = 0;
+ power_supply_set_property(bq->secondary_chrg, POWER_SUPPLY_PROP_ONLINE, &value);
+ }
+
/* Enable current pulse voltage control protocol */
ret = bq25890_field_write(bq, F_PUMPX_EN, 1);
if (ret < 0)
@@ -1031,6 +1098,11 @@ static void bq25890_pump_express_work(struct work_struct *data)
bq25890_field_write(bq, F_PUMPX_EN, 0);
+ if (bq->secondary_chrg) {
+ value.intval = 1;
+ power_supply_set_property(bq->secondary_chrg, POWER_SUPPLY_PROP_ONLINE, &value);
+ }
+
dev_info(bq->dev, "Hi-voltage charging requested, input voltage is %d mV\n",
voltage);
@@ -1077,6 +1149,17 @@ static int bq25890_usb_notifier(struct notifier_block *nb, unsigned long val,
static int bq25890_vbus_enable(struct regulator_dev *rdev)
{
struct bq25890_device *bq = rdev_get_drvdata(rdev);
+ union power_supply_propval val = {
+ .intval = 0,
+ };
+
+ /*
+ * When enabling 5V boost / Vbus output, we need to put the secondary
+ * charger in Hi-Z mode to avoid it trying to charge the secondary
+ * battery from the 5V boost output.
+ */
+ if (bq->secondary_chrg)
+ power_supply_set_property(bq->secondary_chrg, POWER_SUPPLY_PROP_ONLINE, &val);
return bq25890_set_otg_cfg(bq, 1);
}
@@ -1084,8 +1167,19 @@ static int bq25890_vbus_enable(struct regulator_dev *rdev)
static int bq25890_vbus_disable(struct regulator_dev *rdev)
{
struct bq25890_device *bq = rdev_get_drvdata(rdev);
+ union power_supply_propval val = {
+ .intval = 1,
+ };
+ int ret;
+
+ ret = bq25890_set_otg_cfg(bq, 0);
+ if (ret)
+ return ret;
- return bq25890_set_otg_cfg(bq, 0);
+ if (bq->secondary_chrg)
+ power_supply_set_property(bq->secondary_chrg, POWER_SUPPLY_PROP_ONLINE, &val);
+
+ return 0;
}
static int bq25890_vbus_is_enabled(struct regulator_dev *rdev)
@@ -1296,11 +1390,31 @@ static int bq25890_fw_probe(struct bq25890_device *bq)
{
int ret;
struct bq25890_init_data *init = &bq->init_data;
+ const char *str;
+ u32 val;
+
+ ret = device_property_read_string(bq->dev, "linux,secondary-charger-name", &str);
+ if (ret == 0) {
+ bq->secondary_chrg = power_supply_get_by_name(str);
+ if (!bq->secondary_chrg)
+ return -EPROBE_DEFER;
+ }
/* Optional, left at 0 if property is not present */
device_property_read_u32(bq->dev, "linux,pump-express-vbus-max",
&bq->pump_express_vbus_max);
+ ret = device_property_read_u32(bq->dev, "linux,iinlim-percentage", &val);
+ if (ret == 0) {
+ if (val > 100) {
+ dev_err(bq->dev, "Error linux,iinlim-percentage %u > 100\n", val);
+ return -EINVAL;
+ }
+ bq->iinlim_percentage = val;
+ } else {
+ bq->iinlim_percentage = 100;
+ }
+
bq->skip_reset = device_property_read_bool(bq->dev, "linux,skip-reset");
bq->read_back_init_data = device_property_read_bool(bq->dev,
"linux,read-back-settings");
@@ -1322,6 +1436,12 @@ static void bq25890_non_devm_cleanup(void *data)
struct bq25890_device *bq = data;
cancel_delayed_work_sync(&bq->pump_express_work);
+
+ if (bq->id >= 0) {
+ mutex_lock(&bq25890_id_mutex);
+ idr_remove(&bq25890_id, bq->id);
+ mutex_unlock(&bq25890_id_mutex);
+ }
}
static int bq25890_probe(struct i2c_client *client)
@@ -1336,6 +1456,7 @@ static int bq25890_probe(struct i2c_client *client)
bq->client = client;
bq->dev = dev;
+ bq->id = -1;
mutex_init(&bq->lock);
INIT_DELAYED_WORK(&bq->pump_express_work, bq25890_pump_express_work);
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 8bf048fbd36a..5ff6f44fd47b 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -1917,10 +1917,10 @@ static int bq27xxx_battery_capacity_level(struct bq27xxx_device_info *di,
if (di->opts & BQ27XXX_O_ZERO) {
if (di->cache.flags & BQ27000_FLAG_FC)
level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
- else if (di->cache.flags & BQ27000_FLAG_EDV1)
- level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
else if (di->cache.flags & BQ27000_FLAG_EDVF)
level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else if (di->cache.flags & BQ27000_FLAG_EDV1)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
else
level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
} else if (di->opts & BQ27Z561_O_BITS) {
@@ -1933,10 +1933,10 @@ static int bq27xxx_battery_capacity_level(struct bq27xxx_device_info *di,
} else {
if (di->cache.flags & BQ27XXX_FLAG_FC)
level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
- else if (di->cache.flags & BQ27XXX_FLAG_SOC1)
- level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
else if (di->cache.flags & BQ27XXX_FLAG_SOCF)
level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else if (di->cache.flags & BQ27XXX_FLAG_SOC1)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
else
level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
}
diff --git a/drivers/power/supply/charger-manager.c b/drivers/power/supply/charger-manager.c
index 92db79400a6a..c9e8450c646f 100644
--- a/drivers/power/supply/charger-manager.c
+++ b/drivers/power/supply/charger-manager.c
@@ -1075,7 +1075,7 @@ static ssize_t charger_name_show(struct device *dev,
struct charger_regulator *charger
= container_of(attr, struct charger_regulator, attr_name);
- return sprintf(buf, "%s\n", charger->regulator_name);
+ return sysfs_emit(buf, "%s\n", charger->regulator_name);
}
static ssize_t charger_state_show(struct device *dev,
@@ -1088,7 +1088,7 @@ static ssize_t charger_state_show(struct device *dev,
if (!charger->externally_control)
state = regulator_is_enabled(charger->consumer);
- return sprintf(buf, "%s\n", state ? "enabled" : "disabled");
+ return sysfs_emit(buf, "%s\n", state ? "enabled" : "disabled");
}
static ssize_t charger_externally_control_show(struct device *dev,
@@ -1097,7 +1097,7 @@ static ssize_t charger_externally_control_show(struct device *dev,
struct charger_regulator *charger = container_of(attr,
struct charger_regulator, attr_externally_control);
- return sprintf(buf, "%d\n", charger->externally_control);
+ return sysfs_emit(buf, "%d\n", charger->externally_control);
}
static ssize_t charger_externally_control_store(struct device *dev,
diff --git a/drivers/power/supply/collie_battery.c b/drivers/power/supply/collie_battery.c
index 7fb9b549f2de..68390bd1004f 100644
--- a/drivers/power/supply/collie_battery.c
+++ b/drivers/power/supply/collie_battery.c
@@ -404,7 +404,7 @@ static int collie_bat_probe(struct ucb1x00_dev *dev)
goto err_psy_reg_bu;
}
- ret = request_irq(gpio_to_irq(COLLIE_GPIO_CO),
+ ret = request_irq(gpiod_to_irq(collie_bat_main.gpio_full),
collie_bat_gpio_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"main full", &collie_bat_main);
@@ -440,7 +440,7 @@ err_put_gpio_full:
static void collie_bat_remove(struct ucb1x00_dev *dev)
{
- free_irq(gpio_to_irq(COLLIE_GPIO_CO), &collie_bat_main);
+ free_irq(gpiod_to_irq(collie_bat_main.gpio_full), &collie_bat_main);
power_supply_unregister(collie_bat_bu.psy);
power_supply_unregister(collie_bat_main.psy);
diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c
index f9314cc0cd75..14da5c595dd9 100644
--- a/drivers/power/supply/da9150-charger.c
+++ b/drivers/power/supply/da9150-charger.c
@@ -466,10 +466,8 @@ static int da9150_charger_register_irq(struct platform_device *pdev,
int irq, ret;
irq = platform_get_irq_byname(pdev, irq_name);
- if (irq < 0) {
- dev_err(dev, "Failed to get IRQ CHG_STATUS: %d\n", irq);
+ if (irq < 0)
return irq;
- }
ret = request_threaded_irq(irq, NULL, handler, IRQF_ONESHOT, irq_name,
charger);
@@ -482,15 +480,12 @@ static int da9150_charger_register_irq(struct platform_device *pdev,
static void da9150_charger_unregister_irq(struct platform_device *pdev,
const char *irq_name)
{
- struct device *dev = &pdev->dev;
struct da9150_charger *charger = platform_get_drvdata(pdev);
int irq;
irq = platform_get_irq_byname(pdev, irq_name);
- if (irq < 0) {
- dev_err(dev, "Failed to get IRQ CHG_STATUS: %d\n", irq);
+ if (irq < 0)
return;
- }
free_irq(irq, charger);
}
diff --git a/drivers/power/supply/ds2760_battery.c b/drivers/power/supply/ds2760_battery.c
index 5f50da524f41..40fba31be174 100644
--- a/drivers/power/supply/ds2760_battery.c
+++ b/drivers/power/supply/ds2760_battery.c
@@ -227,20 +227,12 @@ static int rated_capacities[] = {
920, /* NEC */
1440, /* Samsung */
1440, /* BYD */
-#ifdef CONFIG_MACH_H4700
- 1800, /* HP iPAQ hx4700 3.7V 1800mAh (359113-001) */
-#else
1440, /* Lishen */
-#endif
1440, /* NEC */
2880, /* Samsung */
2880, /* BYD */
2880, /* Lishen */
2880, /* NEC */
-#ifdef CONFIG_MACH_H4700
- 0,
- 3600, /* HP iPAQ hx4700 3.7V 3600mAh (359114-001) */
-#endif
};
/* array is level at temps 0°C, 10°C, 20°C, 30°C, 40°C
diff --git a/drivers/power/supply/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c
index 2b8c90d84325..1e7f297f6cb1 100644
--- a/drivers/power/supply/ds2780_battery.c
+++ b/drivers/power/supply/ds2780_battery.c
@@ -454,7 +454,7 @@ static ssize_t ds2780_get_pmod_enabled(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n",
+ return sysfs_emit(buf, "%d\n",
!!(control_reg & DS2780_CONTROL_REG_PMOD));
}
@@ -507,7 +507,7 @@ static ssize_t ds2780_get_sense_resistor_value(struct device *dev,
if (ret < 0)
return ret;
- ret = sprintf(buf, "%d\n", sense_resistor);
+ ret = sysfs_emit(buf, "%d\n", sense_resistor);
return ret;
}
@@ -545,7 +545,7 @@ static ssize_t ds2780_get_rsgain_setting(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", rsgain);
+ return sysfs_emit(buf, "%d\n", rsgain);
}
static ssize_t ds2780_set_rsgain_setting(struct device *dev,
@@ -588,7 +588,7 @@ static ssize_t ds2780_get_pio_pin(struct device *dev,
if (ret < 0)
return ret;
- ret = sprintf(buf, "%d\n", sfr & DS2780_SFR_REG_PIOSC);
+ ret = sysfs_emit(buf, "%d\n", sfr & DS2780_SFR_REG_PIOSC);
return ret;
}
diff --git a/drivers/power/supply/ds2781_battery.c b/drivers/power/supply/ds2781_battery.c
index 05b859bf2dc0..c4f8ccc687f9 100644
--- a/drivers/power/supply/ds2781_battery.c
+++ b/drivers/power/supply/ds2781_battery.c
@@ -456,7 +456,7 @@ static ssize_t ds2781_get_pmod_enabled(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n",
+ return sysfs_emit(buf, "%d\n",
!!(control_reg & DS2781_CONTROL_PMOD));
}
@@ -509,7 +509,7 @@ static ssize_t ds2781_get_sense_resistor_value(struct device *dev,
if (ret < 0)
return ret;
- ret = sprintf(buf, "%d\n", sense_resistor);
+ ret = sysfs_emit(buf, "%d\n", sense_resistor);
return ret;
}
@@ -547,7 +547,7 @@ static ssize_t ds2781_get_rsgain_setting(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", rsgain);
+ return sysfs_emit(buf, "%d\n", rsgain);
}
static ssize_t ds2781_set_rsgain_setting(struct device *dev,
@@ -590,7 +590,7 @@ static ssize_t ds2781_get_pio_pin(struct device *dev,
if (ret < 0)
return ret;
- ret = sprintf(buf, "%d\n", sfr & DS2781_SFR_PIOSC);
+ ret = sysfs_emit(buf, "%d\n", sfr & DS2781_SFR_PIOSC);
return ret;
}
diff --git a/drivers/power/supply/lp8788-charger.c b/drivers/power/supply/lp8788-charger.c
index f5f47a0aa1e3..755b6a4379b8 100644
--- a/drivers/power/supply/lp8788-charger.c
+++ b/drivers/power/supply/lp8788-charger.c
@@ -602,7 +602,7 @@ static ssize_t lp8788_show_charger_status(struct device *dev,
lp8788_read_byte(pchg->lp, LP8788_CHG_STATUS, &data);
state = (data & LP8788_CHG_STATE_M) >> LP8788_CHG_STATE_S;
- return scnprintf(buf, PAGE_SIZE, "%s\n", desc[state]);
+ return sysfs_emit(buf, "%s\n", desc[state]);
}
static ssize_t lp8788_show_eoc_time(struct device *dev,
@@ -618,8 +618,7 @@ static ssize_t lp8788_show_eoc_time(struct device *dev,
lp8788_read_byte(pchg->lp, LP8788_CHG_EOC, &val);
val = (val & LP8788_CHG_EOC_TIME_M) >> LP8788_CHG_EOC_TIME_S;
- return scnprintf(buf, PAGE_SIZE, "End Of Charge Time: %s\n",
- stime[val]);
+ return sysfs_emit(buf, "End Of Charge Time: %s\n", stime[val]);
}
static ssize_t lp8788_show_eoc_level(struct device *dev,
@@ -642,7 +641,7 @@ static ssize_t lp8788_show_eoc_level(struct device *dev,
val = (val & LP8788_CHG_EOC_LEVEL_M) >> LP8788_CHG_EOC_LEVEL_S;
level = mode ? abs_level[val] : relative_level[val];
- return scnprintf(buf, PAGE_SIZE, "End Of Charge Level: %s\n", level);
+ return sysfs_emit(buf, "End Of Charge Level: %s\n", level);
}
static DEVICE_ATTR(charger_status, S_IRUSR, lp8788_show_charger_status, NULL);
diff --git a/drivers/power/supply/ltc4162-l-charger.c b/drivers/power/supply/ltc4162-l-charger.c
index db2bb5233570..0e95c65369b8 100644
--- a/drivers/power/supply/ltc4162-l-charger.c
+++ b/drivers/power/supply/ltc4162-l-charger.c
@@ -525,7 +525,7 @@ static ssize_t charge_status_show(struct device *dev,
}
}
- return sprintf(buf, "%s\n", result);
+ return sysfs_emit(buf, "%s\n", result);
}
static DEVICE_ATTR_RO(charge_status);
@@ -541,7 +541,7 @@ static ssize_t vbat_show(struct device *dev,
if (ret)
return ret;
- return sprintf(buf, "%d\n", val.intval);
+ return sysfs_emit(buf, "%d\n", val.intval);
}
static DEVICE_ATTR_RO(vbat);
@@ -557,7 +557,7 @@ static ssize_t vbat_avg_show(struct device *dev,
if (ret)
return ret;
- return sprintf(buf, "%d\n", val.intval);
+ return sysfs_emit(buf, "%d\n", val.intval);
}
static DEVICE_ATTR_RO(vbat_avg);
@@ -573,7 +573,7 @@ static ssize_t ibat_show(struct device *dev,
if (ret)
return ret;
- return sprintf(buf, "%d\n", val.intval);
+ return sysfs_emit(buf, "%d\n", val.intval);
}
static DEVICE_ATTR_RO(ibat);
@@ -589,7 +589,7 @@ static ssize_t force_telemetry_show(struct device *dev,
if (ret)
return ret;
- return sprintf(buf, "%u\n", regval & BIT(2) ? 1 : 0);
+ return sysfs_emit(buf, "%u\n", regval & BIT(2) ? 1 : 0);
}
static ssize_t force_telemetry_store(struct device *dev,
@@ -628,7 +628,7 @@ static ssize_t arm_ship_mode_show(struct device *dev,
if (ret)
return ret;
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
regval == LTC4162L_ARM_SHIP_MODE_MAGIC ? 1 : 0);
}
diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c
index f244cd902eb9..96f9de775043 100644
--- a/drivers/power/supply/max14577_charger.c
+++ b/drivers/power/supply/max14577_charger.c
@@ -532,7 +532,7 @@ static ssize_t show_fast_charge_timer(struct device *dev,
break;
}
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static ssize_t store_fast_charge_timer(struct device *dev,
diff --git a/drivers/power/supply/max1721x_battery.c b/drivers/power/supply/max1721x_battery.c
index d8d52e09da7b..bac43ab9e97c 100644
--- a/drivers/power/supply/max1721x_battery.c
+++ b/drivers/power/supply/max1721x_battery.c
@@ -384,7 +384,7 @@ static int devm_w1_max1721x_add_device(struct w1_slave *sl)
}
if (!info->ManufacturerName[0])
- strncpy(info->ManufacturerName, DEF_MFG_NAME,
+ strscpy(info->ManufacturerName, DEF_MFG_NAME,
2 * MAX1721X_REG_MFG_NUMB);
if (get_string(info, MAX1721X_REG_DEV_STR,
@@ -403,15 +403,15 @@ static int devm_w1_max1721x_add_device(struct w1_slave *sl)
switch (dev_name & MAX172XX_DEV_MASK) {
case MAX172X1_DEV:
- strncpy(info->DeviceName, DEF_DEV_NAME_MAX17211,
+ strscpy(info->DeviceName, DEF_DEV_NAME_MAX17211,
2 * MAX1721X_REG_DEV_NUMB);
break;
case MAX172X5_DEV:
- strncpy(info->DeviceName, DEF_DEV_NAME_MAX17215,
+ strscpy(info->DeviceName, DEF_DEV_NAME_MAX17215,
2 * MAX1721X_REG_DEV_NUMB);
break;
default:
- strncpy(info->DeviceName, DEF_DEV_NAME_UNKNOWN,
+ strscpy(info->DeviceName, DEF_DEV_NAME_UNKNOWN,
2 * MAX1721X_REG_DEV_NUMB);
}
}
diff --git a/drivers/power/supply/max77650-charger.c b/drivers/power/supply/max77650-charger.c
index d913428bedc0..e8c25da40ab2 100644
--- a/drivers/power/supply/max77650-charger.c
+++ b/drivers/power/supply/max77650-charger.c
@@ -141,7 +141,7 @@ static int max77650_charger_enable(struct max77650_charger_data *chg)
return rv;
}
-static int max77650_charger_disable(struct max77650_charger_data *chg)
+static void max77650_charger_disable(struct max77650_charger_data *chg)
{
int rv;
@@ -151,8 +151,6 @@ static int max77650_charger_disable(struct max77650_charger_data *chg)
MAX77650_CHARGER_DISABLED);
if (rv)
dev_err(chg->dev, "unable to disable the charger: %d\n", rv);
-
- return rv;
}
static irqreturn_t max77650_charger_check_status(int irq, void *data)
@@ -351,7 +349,9 @@ static int max77650_charger_remove(struct platform_device *pdev)
{
struct max77650_charger_data *chg = platform_get_drvdata(pdev);
- return max77650_charger_disable(chg);
+ max77650_charger_disable(chg);
+
+ return 0;
}
static const struct of_device_id max77650_charger_of_match[] = {
diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c
index a2c5c9858639..794c8c054450 100644
--- a/drivers/power/supply/max77693_charger.c
+++ b/drivers/power/supply/max77693_charger.c
@@ -296,7 +296,7 @@ static ssize_t fast_charge_timer_show(struct device *dev,
break;
}
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static int max77693_set_fast_charge_timer(struct max77693_charger *chg,
@@ -357,7 +357,7 @@ static ssize_t top_off_threshold_current_show(struct device *dev,
else
val = data * 50000;
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static int max77693_set_top_off_threshold_current(struct max77693_charger *chg,
@@ -405,7 +405,7 @@ static ssize_t top_off_timer_show(struct device *dev,
val = data * 10;
- return scnprintf(buf, PAGE_SIZE, "%u\n", val);
+ return sysfs_emit(buf, "%u\n", val);
}
static int max77693_set_top_off_timer(struct max77693_charger *chg,
diff --git a/drivers/power/supply/mp2629_charger.c b/drivers/power/supply/mp2629_charger.c
index bf9c27b463a8..3a2a28fbba73 100644
--- a/drivers/power/supply/mp2629_charger.c
+++ b/drivers/power/supply/mp2629_charger.c
@@ -519,7 +519,7 @@ static ssize_t batt_impedance_compensation_show(struct device *dev,
return ret;
rval = (rval >> 4) * 10;
- return sprintf(buf, "%d mohm\n", rval);
+ return sysfs_emit(buf, "%d mohm\n", rval);
}
static ssize_t batt_impedance_compensation_store(struct device *dev,
diff --git a/drivers/power/supply/olpc_battery.c b/drivers/power/supply/olpc_battery.c
index a5da20ffd685..9f60094a5599 100644
--- a/drivers/power/supply/olpc_battery.c
+++ b/drivers/power/supply/olpc_battery.c
@@ -568,7 +568,7 @@ static ssize_t olpc_bat_error_read(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ec_byte);
+ return sysfs_emit(buf, "%d\n", ec_byte);
}
static struct device_attribute olpc_bat_error = {
diff --git a/drivers/power/supply/pcf50633-charger.c b/drivers/power/supply/pcf50633-charger.c
index 8c5d892f6350..fd44cb8ac0e2 100644
--- a/drivers/power/supply/pcf50633-charger.c
+++ b/drivers/power/supply/pcf50633-charger.c
@@ -153,7 +153,7 @@ show_chgmode(struct device *dev, struct device_attribute *attr, char *buf)
u8 mbcs2 = pcf50633_reg_read(mbc->pcf, PCF50633_REG_MBCS2);
u8 chgmod = (mbcs2 & PCF50633_MBCS2_MBC_MASK);
- return sprintf(buf, "%d\n", chgmod);
+ return sysfs_emit(buf, "%d\n", chgmod);
}
static DEVICE_ATTR(chgmode, S_IRUGO, show_chgmode, NULL);
@@ -174,7 +174,7 @@ show_usblim(struct device *dev, struct device_attribute *attr, char *buf)
else
ma = 0;
- return sprintf(buf, "%u\n", ma);
+ return sysfs_emit(buf, "%u\n", ma);
}
static ssize_t set_usblim(struct device *dev,
@@ -207,7 +207,7 @@ show_chglim(struct device *dev, struct device_attribute *attr, char *buf)
ma = (mbc->pcf->pdata->charger_reference_current_ma * mbcc5) >> 8;
- return sprintf(buf, "%u\n", ma);
+ return sysfs_emit(buf, "%u\n", ma);
}
static ssize_t set_chglim(struct device *dev,
diff --git a/drivers/power/supply/pda_power.c b/drivers/power/supply/pda_power.c
deleted file mode 100644
index 03a37fd6be27..000000000000
--- a/drivers/power/supply/pda_power.c
+++ /dev/null
@@ -1,520 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Common power driver for PDAs and phones with one or two external
- * power supplies (AC/USB) connected to main and backup batteries,
- * and optional builtin charger.
- *
- * Copyright © 2007 Anton Vorontsov <cbou@mail.ru>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/notifier.h>
-#include <linux/power_supply.h>
-#include <linux/pda_power.h>
-#include <linux/regulator/consumer.h>
-#include <linux/timer.h>
-#include <linux/jiffies.h>
-#include <linux/usb/otg.h>
-
-static inline unsigned int get_irq_flags(struct resource *res)
-{
- return IRQF_SHARED | (res->flags & IRQF_TRIGGER_MASK);
-}
-
-static struct device *dev;
-static struct pda_power_pdata *pdata;
-static struct resource *ac_irq, *usb_irq;
-static struct delayed_work charger_work;
-static struct delayed_work polling_work;
-static struct delayed_work supply_work;
-static int polling;
-static struct power_supply *pda_psy_ac, *pda_psy_usb;
-
-#if IS_ENABLED(CONFIG_USB_PHY)
-static struct usb_phy *transceiver;
-static struct notifier_block otg_nb;
-#endif
-
-static struct regulator *ac_draw;
-
-enum {
- PDA_PSY_OFFLINE = 0,
- PDA_PSY_ONLINE = 1,
- PDA_PSY_TO_CHANGE,
-};
-static int new_ac_status = -1;
-static int new_usb_status = -1;
-static int ac_status = -1;
-static int usb_status = -1;
-
-static int pda_power_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- switch (psp) {
- case POWER_SUPPLY_PROP_ONLINE:
- if (psy->desc->type == POWER_SUPPLY_TYPE_MAINS)
- val->intval = pdata->is_ac_online ?
- pdata->is_ac_online() : 0;
- else
- val->intval = pdata->is_usb_online ?
- pdata->is_usb_online() : 0;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static enum power_supply_property pda_power_props[] = {
- POWER_SUPPLY_PROP_ONLINE,
-};
-
-static char *pda_power_supplied_to[] = {
- "main-battery",
- "backup-battery",
-};
-
-static const struct power_supply_desc pda_psy_ac_desc = {
- .name = "ac",
- .type = POWER_SUPPLY_TYPE_MAINS,
- .properties = pda_power_props,
- .num_properties = ARRAY_SIZE(pda_power_props),
- .get_property = pda_power_get_property,
-};
-
-static const struct power_supply_desc pda_psy_usb_desc = {
- .name = "usb",
- .type = POWER_SUPPLY_TYPE_USB,
- .properties = pda_power_props,
- .num_properties = ARRAY_SIZE(pda_power_props),
- .get_property = pda_power_get_property,
-};
-
-static void update_status(void)
-{
- if (pdata->is_ac_online)
- new_ac_status = !!pdata->is_ac_online();
-
- if (pdata->is_usb_online)
- new_usb_status = !!pdata->is_usb_online();
-}
-
-static void update_charger(void)
-{
- static int regulator_enabled;
- int max_uA = pdata->ac_max_uA;
-
- if (pdata->set_charge) {
- if (new_ac_status > 0) {
- dev_dbg(dev, "charger on (AC)\n");
- pdata->set_charge(PDA_POWER_CHARGE_AC);
- } else if (new_usb_status > 0) {
- dev_dbg(dev, "charger on (USB)\n");
- pdata->set_charge(PDA_POWER_CHARGE_USB);
- } else {
- dev_dbg(dev, "charger off\n");
- pdata->set_charge(0);
- }
- } else if (ac_draw) {
- if (new_ac_status > 0) {
- regulator_set_current_limit(ac_draw, max_uA, max_uA);
- if (!regulator_enabled) {
- dev_dbg(dev, "charger on (AC)\n");
- WARN_ON(regulator_enable(ac_draw));
- regulator_enabled = 1;
- }
- } else {
- if (regulator_enabled) {
- dev_dbg(dev, "charger off\n");
- WARN_ON(regulator_disable(ac_draw));
- regulator_enabled = 0;
- }
- }
- }
-}
-
-static void supply_work_func(struct work_struct *work)
-{
- if (ac_status == PDA_PSY_TO_CHANGE) {
- ac_status = new_ac_status;
- power_supply_changed(pda_psy_ac);
- }
-
- if (usb_status == PDA_PSY_TO_CHANGE) {
- usb_status = new_usb_status;
- power_supply_changed(pda_psy_usb);
- }
-}
-
-static void psy_changed(void)
-{
- update_charger();
-
- /*
- * Okay, charger set. Now wait a bit before notifying supplicants,
- * charge power should stabilize.
- */
- cancel_delayed_work(&supply_work);
- schedule_delayed_work(&supply_work,
- msecs_to_jiffies(pdata->wait_for_charger));
-}
-
-static void charger_work_func(struct work_struct *work)
-{
- update_status();
- psy_changed();
-}
-
-static irqreturn_t power_changed_isr(int irq, void *power_supply)
-{
- if (power_supply == pda_psy_ac)
- ac_status = PDA_PSY_TO_CHANGE;
- else if (power_supply == pda_psy_usb)
- usb_status = PDA_PSY_TO_CHANGE;
- else
- return IRQ_NONE;
-
- /*
- * Wait a bit before reading ac/usb line status and setting charger,
- * because ac/usb status readings may lag from irq.
- */
- cancel_delayed_work(&charger_work);
- schedule_delayed_work(&charger_work,
- msecs_to_jiffies(pdata->wait_for_status));
-
- return IRQ_HANDLED;
-}
-
-static void polling_work_func(struct work_struct *work)
-{
- int changed = 0;
-
- dev_dbg(dev, "polling...\n");
-
- update_status();
-
- if (!ac_irq && new_ac_status != ac_status) {
- ac_status = PDA_PSY_TO_CHANGE;
- changed = 1;
- }
-
- if (!usb_irq && new_usb_status != usb_status) {
- usb_status = PDA_PSY_TO_CHANGE;
- changed = 1;
- }
-
- if (changed)
- psy_changed();
-
- cancel_delayed_work(&polling_work);
- schedule_delayed_work(&polling_work,
- msecs_to_jiffies(pdata->polling_interval));
-}
-
-#if IS_ENABLED(CONFIG_USB_PHY)
-static int otg_is_usb_online(void)
-{
- return (transceiver->last_event == USB_EVENT_VBUS ||
- transceiver->last_event == USB_EVENT_ENUMERATED);
-}
-
-static int otg_is_ac_online(void)
-{
- return (transceiver->last_event == USB_EVENT_CHARGER);
-}
-
-static int otg_handle_notification(struct notifier_block *nb,
- unsigned long event, void *unused)
-{
- switch (event) {
- case USB_EVENT_CHARGER:
- ac_status = PDA_PSY_TO_CHANGE;
- break;
- case USB_EVENT_VBUS:
- case USB_EVENT_ENUMERATED:
- usb_status = PDA_PSY_TO_CHANGE;
- break;
- case USB_EVENT_NONE:
- ac_status = PDA_PSY_TO_CHANGE;
- usb_status = PDA_PSY_TO_CHANGE;
- break;
- default:
- return NOTIFY_OK;
- }
-
- /*
- * Wait a bit before reading ac/usb line status and setting charger,
- * because ac/usb status readings may lag from irq.
- */
- cancel_delayed_work(&charger_work);
- schedule_delayed_work(&charger_work,
- msecs_to_jiffies(pdata->wait_for_status));
-
- return NOTIFY_OK;
-}
-#endif
-
-static int pda_power_probe(struct platform_device *pdev)
-{
- struct power_supply_config psy_cfg = {};
- int ret = 0;
-
- dev = &pdev->dev;
-
- if (pdev->id != -1) {
- dev_err(dev, "it's meaningless to register several "
- "pda_powers; use id = -1\n");
- ret = -EINVAL;
- goto wrongid;
- }
-
- pdata = pdev->dev.platform_data;
-
- if (pdata->init) {
- ret = pdata->init(dev);
- if (ret < 0)
- goto init_failed;
- }
-
- ac_draw = regulator_get(dev, "ac_draw");
- if (IS_ERR(ac_draw)) {
- dev_dbg(dev, "couldn't get ac_draw regulator\n");
- ac_draw = NULL;
- }
-
- update_status();
- update_charger();
-
- if (!pdata->wait_for_status)
- pdata->wait_for_status = 500;
-
- if (!pdata->wait_for_charger)
- pdata->wait_for_charger = 500;
-
- if (!pdata->polling_interval)
- pdata->polling_interval = 2000;
-
- if (!pdata->ac_max_uA)
- pdata->ac_max_uA = 500000;
-
- INIT_DELAYED_WORK(&charger_work, charger_work_func);
- INIT_DELAYED_WORK(&supply_work, supply_work_func);
-
- ac_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ac");
- usb_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usb");
-
- if (pdata->supplied_to) {
- psy_cfg.supplied_to = pdata->supplied_to;
- psy_cfg.num_supplicants = pdata->num_supplicants;
- } else {
- psy_cfg.supplied_to = pda_power_supplied_to;
- psy_cfg.num_supplicants = ARRAY_SIZE(pda_power_supplied_to);
- }
-
-#if IS_ENABLED(CONFIG_USB_PHY)
- transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
- if (!IS_ERR_OR_NULL(transceiver)) {
- if (!pdata->is_usb_online)
- pdata->is_usb_online = otg_is_usb_online;
- if (!pdata->is_ac_online)
- pdata->is_ac_online = otg_is_ac_online;
- }
-#endif
-
- if (pdata->is_ac_online) {
- pda_psy_ac = power_supply_register(&pdev->dev,
- &pda_psy_ac_desc, &psy_cfg);
- if (IS_ERR(pda_psy_ac)) {
- dev_err(dev, "failed to register %s power supply\n",
- pda_psy_ac_desc.name);
- ret = PTR_ERR(pda_psy_ac);
- goto ac_supply_failed;
- }
-
- if (ac_irq) {
- ret = request_irq(ac_irq->start, power_changed_isr,
- get_irq_flags(ac_irq), ac_irq->name,
- pda_psy_ac);
- if (ret) {
- dev_err(dev, "request ac irq failed\n");
- goto ac_irq_failed;
- }
- } else {
- polling = 1;
- }
- }
-
- if (pdata->is_usb_online) {
- pda_psy_usb = power_supply_register(&pdev->dev,
- &pda_psy_usb_desc,
- &psy_cfg);
- if (IS_ERR(pda_psy_usb)) {
- dev_err(dev, "failed to register %s power supply\n",
- pda_psy_usb_desc.name);
- ret = PTR_ERR(pda_psy_usb);
- goto usb_supply_failed;
- }
-
- if (usb_irq) {
- ret = request_irq(usb_irq->start, power_changed_isr,
- get_irq_flags(usb_irq),
- usb_irq->name, pda_psy_usb);
- if (ret) {
- dev_err(dev, "request usb irq failed\n");
- goto usb_irq_failed;
- }
- } else {
- polling = 1;
- }
- }
-
-#if IS_ENABLED(CONFIG_USB_PHY)
- if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
- otg_nb.notifier_call = otg_handle_notification;
- ret = usb_register_notifier(transceiver, &otg_nb);
- if (ret) {
- dev_err(dev, "failure to register otg notifier\n");
- goto otg_reg_notifier_failed;
- }
- polling = 0;
- }
-#endif
-
- if (polling) {
- dev_dbg(dev, "will poll for status\n");
- INIT_DELAYED_WORK(&polling_work, polling_work_func);
- cancel_delayed_work(&polling_work);
- schedule_delayed_work(&polling_work,
- msecs_to_jiffies(pdata->polling_interval));
- }
-
- if (ac_irq || usb_irq)
- device_init_wakeup(&pdev->dev, 1);
-
- return 0;
-
-#if IS_ENABLED(CONFIG_USB_PHY)
-otg_reg_notifier_failed:
- if (pdata->is_usb_online && usb_irq)
- free_irq(usb_irq->start, pda_psy_usb);
-#endif
-usb_irq_failed:
- if (pdata->is_usb_online)
- power_supply_unregister(pda_psy_usb);
-usb_supply_failed:
- if (pdata->is_ac_online && ac_irq)
- free_irq(ac_irq->start, pda_psy_ac);
-#if IS_ENABLED(CONFIG_USB_PHY)
- if (!IS_ERR_OR_NULL(transceiver))
- usb_put_phy(transceiver);
-#endif
-ac_irq_failed:
- if (pdata->is_ac_online)
- power_supply_unregister(pda_psy_ac);
-ac_supply_failed:
- if (ac_draw) {
- regulator_put(ac_draw);
- ac_draw = NULL;
- }
- if (pdata->exit)
- pdata->exit(dev);
-init_failed:
-wrongid:
- return ret;
-}
-
-static int pda_power_remove(struct platform_device *pdev)
-{
-#if IS_ENABLED(CONFIG_USB_PHY)
- if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier)
- usb_unregister_notifier(transceiver, &otg_nb);
-#endif
- if (pdata->is_usb_online && usb_irq)
- free_irq(usb_irq->start, pda_psy_usb);
- if (pdata->is_ac_online && ac_irq)
- free_irq(ac_irq->start, pda_psy_ac);
-
- if (polling)
- cancel_delayed_work_sync(&polling_work);
- cancel_delayed_work_sync(&charger_work);
- cancel_delayed_work_sync(&supply_work);
-
- if (pdata->is_usb_online)
- power_supply_unregister(pda_psy_usb);
- if (pdata->is_ac_online)
- power_supply_unregister(pda_psy_ac);
-#if IS_ENABLED(CONFIG_USB_PHY)
- if (!IS_ERR_OR_NULL(transceiver))
- usb_put_phy(transceiver);
-#endif
- if (ac_draw) {
- regulator_put(ac_draw);
- ac_draw = NULL;
- }
- if (pdata->exit)
- pdata->exit(dev);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ac_wakeup_enabled;
-static int usb_wakeup_enabled;
-
-static int pda_power_suspend(struct platform_device *pdev, pm_message_t state)
-{
- if (pdata->suspend) {
- int ret = pdata->suspend(state);
-
- if (ret)
- return ret;
- }
-
- if (device_may_wakeup(&pdev->dev)) {
- if (ac_irq)
- ac_wakeup_enabled = !enable_irq_wake(ac_irq->start);
- if (usb_irq)
- usb_wakeup_enabled = !enable_irq_wake(usb_irq->start);
- }
-
- return 0;
-}
-
-static int pda_power_resume(struct platform_device *pdev)
-{
- if (device_may_wakeup(&pdev->dev)) {
- if (usb_irq && usb_wakeup_enabled)
- disable_irq_wake(usb_irq->start);
- if (ac_irq && ac_wakeup_enabled)
- disable_irq_wake(ac_irq->start);
- }
-
- if (pdata->resume)
- return pdata->resume();
-
- return 0;
-}
-#else
-#define pda_power_suspend NULL
-#define pda_power_resume NULL
-#endif /* CONFIG_PM */
-
-static struct platform_driver pda_power_pdrv = {
- .driver = {
- .name = "pda-power",
- },
- .probe = pda_power_probe,
- .remove = pda_power_remove,
- .suspend = pda_power_suspend,
- .resume = pda_power_resume,
-};
-
-module_platform_driver(pda_power_pdrv);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Anton Vorontsov <cbou@mail.ru>");
-MODULE_ALIAS("platform:pda-power");
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 7c790c41e2fe..cc5b2e22b42a 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1186,83 +1186,6 @@ static void psy_unregister_thermal(struct power_supply *psy)
thermal_zone_device_unregister(psy->tzd);
}
-/* thermal cooling device callbacks */
-static int ps_get_max_charge_cntl_limit(struct thermal_cooling_device *tcd,
- unsigned long *state)
-{
- struct power_supply *psy;
- union power_supply_propval val;
- int ret;
-
- psy = tcd->devdata;
- ret = power_supply_get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, &val);
- if (ret)
- return ret;
-
- *state = val.intval;
-
- return ret;
-}
-
-static int ps_get_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
- unsigned long *state)
-{
- struct power_supply *psy;
- union power_supply_propval val;
- int ret;
-
- psy = tcd->devdata;
- ret = power_supply_get_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
- if (ret)
- return ret;
-
- *state = val.intval;
-
- return ret;
-}
-
-static int ps_set_cur_charge_cntl_limit(struct thermal_cooling_device *tcd,
- unsigned long state)
-{
- struct power_supply *psy;
- union power_supply_propval val;
- int ret;
-
- psy = tcd->devdata;
- val.intval = state;
- ret = psy->desc->set_property(psy,
- POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, &val);
-
- return ret;
-}
-
-static const struct thermal_cooling_device_ops psy_tcd_ops = {
- .get_max_state = ps_get_max_charge_cntl_limit,
- .get_cur_state = ps_get_cur_charge_cntl_limit,
- .set_cur_state = ps_set_cur_charge_cntl_limit,
-};
-
-static int psy_register_cooler(struct power_supply *psy)
-{
- /* Register for cooling device if psy can control charging */
- if (psy_has_property(psy->desc, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT)) {
- psy->tcd = thermal_cooling_device_register(
- (char *)psy->desc->name,
- psy, &psy_tcd_ops);
- return PTR_ERR_OR_ZERO(psy->tcd);
- }
-
- return 0;
-}
-
-static void psy_unregister_cooler(struct power_supply *psy)
-{
- if (IS_ERR_OR_NULL(psy->tcd))
- return;
- thermal_cooling_device_unregister(psy->tcd);
-}
#else
static int psy_register_thermal(struct power_supply *psy)
{
@@ -1272,15 +1195,6 @@ static int psy_register_thermal(struct power_supply *psy)
static void psy_unregister_thermal(struct power_supply *psy)
{
}
-
-static int psy_register_cooler(struct power_supply *psy)
-{
- return 0;
-}
-
-static void psy_unregister_cooler(struct power_supply *psy)
-{
-}
#endif
static struct power_supply *__must_check
@@ -1354,10 +1268,6 @@ __power_supply_register(struct device *parent,
if (rc)
goto register_thermal_failed;
- rc = psy_register_cooler(psy);
- if (rc)
- goto register_cooler_failed;
-
rc = power_supply_create_triggers(psy);
if (rc)
goto create_triggers_failed;
@@ -1387,8 +1297,6 @@ __power_supply_register(struct device *parent,
add_hwmon_sysfs_failed:
power_supply_remove_triggers(psy);
create_triggers_failed:
- psy_unregister_cooler(psy);
-register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
wakeup_init_failed:
@@ -1540,7 +1448,6 @@ void power_supply_unregister(struct power_supply *psy)
sysfs_remove_link(&psy->dev.kobj, "powers");
power_supply_remove_hwmon_sysfs(psy);
power_supply_remove_triggers(psy);
- psy_unregister_cooler(psy);
psy_unregister_thermal(psy);
device_init_wakeup(&psy->dev, false);
device_unregister(&psy->dev);
diff --git a/drivers/power/supply/power_supply_leds.c b/drivers/power/supply/power_supply_leds.c
index d69880cc3593..702bf83f6e6d 100644
--- a/drivers/power/supply/power_supply_leds.c
+++ b/drivers/power/supply/power_supply_leds.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
+#include <linux/leds.h>
#include "power_supply.h"
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 6ca7d3985a40..c228205e0953 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -249,11 +249,11 @@ static ssize_t power_supply_show_usb_type(struct device *dev,
usb_type = desc->usb_types[i];
if (value->intval == usb_type) {
- count += sprintf(buf + count, "[%s] ",
+ count += sysfs_emit_at(buf, count, "[%s] ",
POWER_SUPPLY_USB_TYPE_TEXT[usb_type]);
match = true;
} else {
- count += sprintf(buf + count, "%s ",
+ count += sysfs_emit_at(buf, count, "%s ",
POWER_SUPPLY_USB_TYPE_TEXT[usb_type]);
}
}
@@ -297,7 +297,7 @@ static ssize_t power_supply_show_property(struct device *dev,
if (ps_attr->text_values_len > 0 &&
value.intval < ps_attr->text_values_len && value.intval >= 0) {
- return sprintf(buf, "%s\n", ps_attr->text_values[value.intval]);
+ return sysfs_emit(buf, "%s\n", ps_attr->text_values[value.intval]);
}
switch (psp) {
@@ -306,10 +306,10 @@ static ssize_t power_supply_show_property(struct device *dev,
&value, buf);
break;
case POWER_SUPPLY_PROP_MODEL_NAME ... POWER_SUPPLY_PROP_SERIAL_NUMBER:
- ret = sprintf(buf, "%s\n", value.strval);
+ ret = sysfs_emit(buf, "%s\n", value.strval);
break;
default:
- ret = sprintf(buf, "%d\n", value.intval);
+ ret = sysfs_emit(buf, "%d\n", value.intval);
}
return ret;
diff --git a/drivers/power/supply/rt9467-charger.c b/drivers/power/supply/rt9467-charger.c
new file mode 100644
index 000000000000..73f744a3155d
--- /dev/null
+++ b/drivers/power/supply/rt9467-charger.c
@@ -0,0 +1,1282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Author: ChiYuan Huang <cy_huang@richtek.com>
+ * ChiaEn Wu <chiaen_wu@richtek.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/kstrtox.h>
+#include <linux/linear_range.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/units.h>
+#include <linux/sysfs.h>
+
+#define RT9467_REG_CORE_CTRL0 0x00
+#define RT9467_REG_CHG_CTRL1 0x01
+#define RT9467_REG_CHG_CTRL2 0x02
+#define RT9467_REG_CHG_CTRL3 0x03
+#define RT9467_REG_CHG_CTRL4 0x04
+#define RT9467_REG_CHG_CTRL5 0x05
+#define RT9467_REG_CHG_CTRL6 0x06
+#define RT9467_REG_CHG_CTRL7 0x07
+#define RT9467_REG_CHG_CTRL8 0x08
+#define RT9467_REG_CHG_CTRL9 0x09
+#define RT9467_REG_CHG_CTRL10 0x0A
+#define RT9467_REG_CHG_CTRL12 0x0C
+#define RT9467_REG_CHG_CTRL13 0x0D
+#define RT9467_REG_CHG_CTRL14 0x0E
+#define RT9467_REG_CHG_ADC 0x11
+#define RT9467_REG_CHG_DPDM1 0x12
+#define RT9467_REG_CHG_DPDM2 0x13
+#define RT9467_REG_DEVICE_ID 0x40
+#define RT9467_REG_CHG_STAT 0x42
+#define RT9467_REG_ADC_DATA_H 0x44
+#define RT9467_REG_CHG_STATC 0x50
+#define RT9467_REG_CHG_IRQ1 0x53
+#define RT9467_REG_CHG_STATC_CTRL 0x60
+#define RT9467_REG_CHG_IRQ1_CTRL 0x63
+
+#define RT9467_MASK_PWR_RDY BIT(7)
+#define RT9467_MASK_MIVR_STAT BIT(6)
+#define RT9467_MASK_OTG_CSEL GENMASK(2, 0)
+#define RT9467_MASK_OTG_VSEL GENMASK(7, 2)
+#define RT9467_MASK_OTG_EN BIT(0)
+#define RT9467_MASK_ADC_IN_SEL GENMASK(7, 4)
+#define RT9467_MASK_ADC_START BIT(0)
+
+#define RT9467_NUM_IRQ_REGS 4
+#define RT9467_ICHG_MIN_uA 100000
+#define RT9467_ICHG_MAX_uA 5000000
+#define RT9467_CV_MAX_uV 4710000
+#define RT9467_OTG_MIN_uV 4425000
+#define RT9467_OTG_MAX_uV 5825000
+#define RT9467_OTG_STEP_uV 25000
+#define RT9467_NUM_VOTG (RT9467_OTG_MAX_uV - RT9467_OTG_MIN_uV + 1)
+#define RT9467_AICLVTH_GAP_uV 200000
+#define RT9467_ADCCONV_TIME_MS 35
+
+#define RT9466_VID 0x8
+#define RT9467_VID 0x9
+
+/* IRQ number */
+#define RT9467_IRQ_TS_STATC 0
+#define RT9467_IRQ_CHG_FAULT 1
+#define RT9467_IRQ_CHG_STATC 2
+#define RT9467_IRQ_CHG_TMR 3
+#define RT9467_IRQ_CHG_BATABS 4
+#define RT9467_IRQ_CHG_ADPBAD 5
+#define RT9467_IRQ_CHG_RVP 6
+#define RT9467_IRQ_OTP 7
+
+#define RT9467_IRQ_CHG_AICLM 8
+#define RT9467_IRQ_CHG_ICHGM 9
+#define RT9467_IRQ_WDTMR 11
+#define RT9467_IRQ_SSFINISH 12
+#define RT9467_IRQ_CHG_RECHG 13
+#define RT9467_IRQ_CHG_TERM 14
+#define RT9467_IRQ_CHG_IEOC 15
+
+#define RT9467_IRQ_ADC_DONE 16
+#define RT9467_IRQ_PUMPX_DONE 17
+#define RT9467_IRQ_BST_BATUV 21
+#define RT9467_IRQ_BST_MIDOV 22
+#define RT9467_IRQ_BST_OLP 23
+
+#define RT9467_IRQ_ATTACH 24
+#define RT9467_IRQ_DETACH 25
+#define RT9467_IRQ_HVDCP_DET 29
+#define RT9467_IRQ_CHGDET 30
+#define RT9467_IRQ_DCDT 31
+
+enum rt9467_fields {
+ /* RT9467_REG_CORE_CTRL0 */
+ F_RST = 0,
+ /* RT9467_REG_CHG_CTRL1 */
+ F_HZ, F_OTG_PIN_EN, F_OPA_MODE,
+ /* RT9467_REG_CHG_CTRL2 */
+ F_SHIP_MODE, F_TE, F_IINLMTSEL, F_CFO_EN, F_CHG_EN,
+ /* RT9467_REG_CHG_CTRL3 */
+ F_IAICR, F_ILIM_EN,
+ /* RT9467_REG_CHG_CTRL4 */
+ F_VOREG,
+ /* RT9467_REG_CHG_CTRL6 */
+ F_VMIVR,
+ /* RT9467_REG_CHG_CTRL7 */
+ F_ICHG,
+ /* RT9467_REG_CHG_CTRL8 */
+ F_IPREC,
+ /* RT9467_REG_CHG_CTRL9 */
+ F_IEOC,
+ /* RT9467_REG_CHG_CTRL12 */
+ F_WT_FC,
+ /* RT9467_REG_CHG_CTRL13 */
+ F_OCP,
+ /* RT9467_REG_CHG_CTRL14 */
+ F_AICL_MEAS, F_AICL_VTH,
+ /* RT9467_REG_CHG_DPDM1 */
+ F_USBCHGEN,
+ /* RT9467_REG_CHG_DPDM2 */
+ F_USB_STATUS,
+ /* RT9467_REG_DEVICE_ID */
+ F_VENDOR,
+ /* RT9467_REG_CHG_STAT */
+ F_CHG_STAT,
+ /* RT9467_REG_CHG_STATC */
+ F_PWR_RDY, F_CHG_MIVR,
+ F_MAX_FIELDS
+};
+
+static const struct regmap_irq rt9467_irqs[] = {
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_TS_STATC, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHG_FAULT, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHG_STATC, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHG_TMR, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHG_BATABS, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHG_ADPBAD, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHG_RVP, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_OTP, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHG_AICLM, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHG_ICHGM, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_WDTMR, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_SSFINISH, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHG_RECHG, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHG_TERM, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHG_IEOC, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_ADC_DONE, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_PUMPX_DONE, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_BST_BATUV, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_BST_MIDOV, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_BST_OLP, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_ATTACH, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_DETACH, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_HVDCP_DET, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_CHGDET, 8),
+ REGMAP_IRQ_REG_LINE(RT9467_IRQ_DCDT, 8)
+};
+
+static const struct regmap_irq_chip rt9467_irq_chip = {
+ .name = "rt9467-irqs",
+ .status_base = RT9467_REG_CHG_IRQ1,
+ .mask_base = RT9467_REG_CHG_IRQ1_CTRL,
+ .num_regs = RT9467_NUM_IRQ_REGS,
+ .irqs = rt9467_irqs,
+ .num_irqs = ARRAY_SIZE(rt9467_irqs),
+};
+
+enum rt9467_ranges {
+ RT9467_RANGE_IAICR = 0,
+ RT9467_RANGE_VOREG,
+ RT9467_RANGE_VMIVR,
+ RT9467_RANGE_ICHG,
+ RT9467_RANGE_IPREC,
+ RT9467_RANGE_IEOC,
+ RT9467_RANGE_AICL_VTH,
+ RT9467_RANGES_MAX
+};
+
+static const struct linear_range rt9467_ranges[RT9467_RANGES_MAX] = {
+ LINEAR_RANGE_IDX(RT9467_RANGE_IAICR, 100000, 0x0, 0x3F, 50000),
+ LINEAR_RANGE_IDX(RT9467_RANGE_VOREG, 3900000, 0x0, 0x51, 10000),
+ LINEAR_RANGE_IDX(RT9467_RANGE_VMIVR, 3900000, 0x0, 0x5F, 100000),
+ LINEAR_RANGE_IDX(RT9467_RANGE_ICHG, 900000, 0x08, 0x31, 100000),
+ LINEAR_RANGE_IDX(RT9467_RANGE_IPREC, 100000, 0x0, 0x0F, 50000),
+ LINEAR_RANGE_IDX(RT9467_RANGE_IEOC, 100000, 0x0, 0x0F, 50000),
+ LINEAR_RANGE_IDX(RT9467_RANGE_AICL_VTH, 4100000, 0x0, 0x7, 100000),
+};
+
+static const struct reg_field rt9467_chg_fields[] = {
+ [F_RST] = REG_FIELD(RT9467_REG_CORE_CTRL0, 7, 7),
+ [F_HZ] = REG_FIELD(RT9467_REG_CHG_CTRL1, 2, 2),
+ [F_OTG_PIN_EN] = REG_FIELD(RT9467_REG_CHG_CTRL1, 1, 1),
+ [F_OPA_MODE] = REG_FIELD(RT9467_REG_CHG_CTRL1, 0, 0),
+ [F_SHIP_MODE] = REG_FIELD(RT9467_REG_CHG_CTRL2, 7, 7),
+ [F_TE] = REG_FIELD(RT9467_REG_CHG_CTRL2, 4, 4),
+ [F_IINLMTSEL] = REG_FIELD(RT9467_REG_CHG_CTRL2, 2, 3),
+ [F_CFO_EN] = REG_FIELD(RT9467_REG_CHG_CTRL2, 1, 1),
+ [F_CHG_EN] = REG_FIELD(RT9467_REG_CHG_CTRL2, 0, 0),
+ [F_IAICR] = REG_FIELD(RT9467_REG_CHG_CTRL3, 2, 7),
+ [F_ILIM_EN] = REG_FIELD(RT9467_REG_CHG_CTRL3, 0, 0),
+ [F_VOREG] = REG_FIELD(RT9467_REG_CHG_CTRL4, 1, 7),
+ [F_VMIVR] = REG_FIELD(RT9467_REG_CHG_CTRL6, 1, 7),
+ [F_ICHG] = REG_FIELD(RT9467_REG_CHG_CTRL7, 2, 7),
+ [F_IPREC] = REG_FIELD(RT9467_REG_CHG_CTRL8, 0, 3),
+ [F_IEOC] = REG_FIELD(RT9467_REG_CHG_CTRL9, 4, 7),
+ [F_WT_FC] = REG_FIELD(RT9467_REG_CHG_CTRL12, 5, 7),
+ [F_OCP] = REG_FIELD(RT9467_REG_CHG_CTRL13, 2, 2),
+ [F_AICL_MEAS] = REG_FIELD(RT9467_REG_CHG_CTRL14, 7, 7),
+ [F_AICL_VTH] = REG_FIELD(RT9467_REG_CHG_CTRL14, 0, 2),
+ [F_USBCHGEN] = REG_FIELD(RT9467_REG_CHG_DPDM1, 7, 7),
+ [F_USB_STATUS] = REG_FIELD(RT9467_REG_CHG_DPDM2, 0, 2),
+ [F_VENDOR] = REG_FIELD(RT9467_REG_DEVICE_ID, 4, 7),
+ [F_CHG_STAT] = REG_FIELD(RT9467_REG_CHG_STAT, 6, 7),
+ [F_PWR_RDY] = REG_FIELD(RT9467_REG_CHG_STATC, 7, 7),
+ [F_CHG_MIVR] = REG_FIELD(RT9467_REG_CHG_STATC, 6, 6),
+};
+
+enum {
+ RT9467_STAT_READY = 0,
+ RT9467_STAT_PROGRESS,
+ RT9467_STAT_CHARGE_DONE,
+ RT9467_STAT_FAULT
+};
+
+enum rt9467_adc_chan {
+ RT9467_ADC_VBUS_DIV5 = 0,
+ RT9467_ADC_VBUS_DIV2,
+ RT9467_ADC_VSYS,
+ RT9467_ADC_VBAT,
+ RT9467_ADC_TS_BAT,
+ RT9467_ADC_IBUS,
+ RT9467_ADC_IBAT,
+ RT9467_ADC_REGN,
+ RT9467_ADC_TEMP_JC
+};
+
+enum rt9467_chg_type {
+ RT9467_CHG_TYPE_NOVBUS = 0,
+ RT9467_CHG_TYPE_UNDER_GOING,
+ RT9467_CHG_TYPE_SDP,
+ RT9467_CHG_TYPE_SDPNSTD,
+ RT9467_CHG_TYPE_DCP,
+ RT9467_CHG_TYPE_CDP,
+ RT9467_CHG_TYPE_MAX
+};
+
+enum rt9467_iin_limit_sel {
+ RT9467_IINLMTSEL_3_2A = 0,
+ RT9467_IINLMTSEL_CHG_TYP,
+ RT9467_IINLMTSEL_AICR,
+ RT9467_IINLMTSEL_LOWER_LEVEL, /* lower of above three */
+};
+
+struct rt9467_chg_data {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_field *rm_field[F_MAX_FIELDS];
+ struct regmap_irq_chip_data *irq_chip_data;
+ struct power_supply *psy;
+ struct mutex adc_lock;
+ struct mutex attach_lock;
+ struct mutex ichg_ieoc_lock;
+ struct regulator_dev *rdev;
+ struct completion aicl_done;
+ enum power_supply_usb_type psy_usb_type;
+ unsigned int old_stat;
+ unsigned int vid;
+ int ichg_ua;
+ int ieoc_ua;
+};
+
+static int rt9467_otg_of_parse_cb(struct device_node *of,
+ const struct regulator_desc *desc,
+ struct regulator_config *cfg)
+{
+ struct rt9467_chg_data *data = cfg->driver_data;
+
+ cfg->ena_gpiod = fwnode_gpiod_get_index(of_fwnode_handle(of),
+ "enable", 0, GPIOD_OUT_LOW |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ desc->name);
+ if (IS_ERR(cfg->ena_gpiod)) {
+ cfg->ena_gpiod = NULL;
+ return 0;
+ }
+
+ return regmap_field_write(data->rm_field[F_OTG_PIN_EN], 1);
+}
+
+static const struct regulator_ops rt9467_otg_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+};
+
+static const u32 rt9467_otg_microamp[] = {
+ 500000, 700000, 1100000, 1300000, 1800000, 2100000, 2400000, 3000000
+};
+
+static const struct regulator_desc rt9467_otg_desc = {
+ .name = "rt9476-usb-otg-vbus",
+ .of_match = "usb-otg-vbus-regulator",
+ .of_parse_cb = rt9467_otg_of_parse_cb,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .min_uV = RT9467_OTG_MIN_uV,
+ .uV_step = RT9467_OTG_STEP_uV,
+ .n_voltages = RT9467_NUM_VOTG,
+ .curr_table = rt9467_otg_microamp,
+ .n_current_limits = ARRAY_SIZE(rt9467_otg_microamp),
+ .csel_reg = RT9467_REG_CHG_CTRL10,
+ .csel_mask = RT9467_MASK_OTG_CSEL,
+ .vsel_reg = RT9467_REG_CHG_CTRL5,
+ .vsel_mask = RT9467_MASK_OTG_VSEL,
+ .enable_reg = RT9467_REG_CHG_CTRL1,
+ .enable_mask = RT9467_MASK_OTG_EN,
+ .ops = &rt9467_otg_regulator_ops,
+};
+
+static int rt9467_register_otg_regulator(struct rt9467_chg_data *data)
+{
+ struct regulator_config cfg = {
+ .dev = data->dev,
+ .regmap = data->regmap,
+ .driver_data = data,
+ };
+
+ data->rdev = devm_regulator_register(data->dev, &rt9467_otg_desc, &cfg);
+ return PTR_ERR_OR_ZERO(data->rdev);
+}
+
+static int rt9467_get_value_from_ranges(struct rt9467_chg_data *data,
+ enum rt9467_fields field,
+ enum rt9467_ranges rsel,
+ int *value)
+{
+ const struct linear_range *range = rt9467_ranges + rsel;
+ unsigned int sel;
+ int ret;
+
+ ret = regmap_field_read(data->rm_field[field], &sel);
+ if (ret)
+ return ret;
+
+ return linear_range_get_value(range, sel, value);
+}
+
+static int rt9467_set_value_from_ranges(struct rt9467_chg_data *data,
+ enum rt9467_fields field,
+ enum rt9467_ranges rsel,
+ int value)
+{
+ const struct linear_range *range = rt9467_ranges + rsel;
+ unsigned int sel;
+ bool found;
+ int ret;
+
+ if (rsel == RT9467_RANGE_VMIVR) {
+ ret = linear_range_get_selector_high(range, value, &sel, &found);
+ if (ret)
+ value = range->max_sel;
+ } else {
+ linear_range_get_selector_within(range, value, &sel);
+ }
+
+ return regmap_field_write(data->rm_field[field], sel);
+}
+
+static int rt9467_get_adc_sel(enum rt9467_adc_chan chan, int *sel)
+{
+ switch (chan) {
+ case RT9467_ADC_VBUS_DIV5:
+ case RT9467_ADC_VBUS_DIV2:
+ case RT9467_ADC_VSYS:
+ case RT9467_ADC_VBAT:
+ *sel = chan + 1;
+ return 0;
+ case RT9467_ADC_TS_BAT:
+ *sel = chan + 2;
+ return 0;
+ case RT9467_ADC_IBUS:
+ case RT9467_ADC_IBAT:
+ *sel = chan + 3;
+ return 0;
+ case RT9467_ADC_REGN:
+ case RT9467_ADC_TEMP_JC:
+ *sel = chan + 4;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rt9467_get_adc_raw_data(struct rt9467_chg_data *data,
+ enum rt9467_adc_chan chan, int *val)
+{
+ unsigned int adc_stat, reg_val, adc_sel;
+ __be16 chan_raw_data;
+ int ret;
+
+ mutex_lock(&data->adc_lock);
+
+ ret = rt9467_get_adc_sel(chan, &adc_sel);
+ if (ret)
+ goto adc_unlock;
+
+ ret = regmap_write(data->regmap, RT9467_REG_CHG_ADC, 0);
+ if (ret) {
+ dev_err(data->dev, "Failed to clear ADC enable\n");
+ goto adc_unlock;
+ }
+
+ reg_val = RT9467_MASK_ADC_START | FIELD_PREP(RT9467_MASK_ADC_IN_SEL, adc_sel);
+ ret = regmap_write(data->regmap, RT9467_REG_CHG_ADC, reg_val);
+ if (ret)
+ goto adc_unlock;
+
+ /* Minimum wait time for one channel processing */
+ msleep(RT9467_ADCCONV_TIME_MS);
+
+ ret = regmap_read_poll_timeout(data->regmap, RT9467_REG_CHG_ADC,
+ adc_stat,
+ !(adc_stat & RT9467_MASK_ADC_START),
+ MILLI, RT9467_ADCCONV_TIME_MS * MILLI);
+ if (ret) {
+ dev_err(data->dev, "Failed to wait ADC conversion, chan = %d\n", chan);
+ goto adc_unlock;
+ }
+
+ ret = regmap_raw_read(data->regmap, RT9467_REG_ADC_DATA_H,
+ &chan_raw_data, sizeof(chan_raw_data));
+ if (ret)
+ goto adc_unlock;
+
+ *val = be16_to_cpu(chan_raw_data);
+
+adc_unlock:
+ mutex_unlock(&data->adc_lock);
+ return ret;
+}
+
+static int rt9467_get_adc(struct rt9467_chg_data *data,
+ enum rt9467_adc_chan chan, int *val)
+{
+ unsigned int aicr_ua, ichg_ua;
+ int ret;
+
+ ret = rt9467_get_adc_raw_data(data, chan, val);
+ if (ret)
+ return ret;
+
+ switch (chan) {
+ case RT9467_ADC_VBUS_DIV5:
+ *val *= 25000;
+ return 0;
+ case RT9467_ADC_VBUS_DIV2:
+ *val *= 10000;
+ return 0;
+ case RT9467_ADC_VBAT:
+ case RT9467_ADC_VSYS:
+ case RT9467_ADC_REGN:
+ *val *= 5000;
+ return 0;
+ case RT9467_ADC_TS_BAT:
+ *val /= 400;
+ return 0;
+ case RT9467_ADC_IBUS:
+ /* UUG MOS turn-on ratio will affect the IBUS adc scale */
+ ret = rt9467_get_value_from_ranges(data, F_IAICR,
+ RT9467_RANGE_IAICR, &aicr_ua);
+ if (ret)
+ return ret;
+
+ *val *= aicr_ua < 400000 ? 29480 : 50000;
+ return 0;
+ case RT9467_ADC_IBAT:
+ /* PP MOS turn-on ratio will affect the ICHG adc scale */
+ ret = rt9467_get_value_from_ranges(data, F_ICHG,
+ RT9467_RANGE_ICHG, &ichg_ua);
+ if (ret)
+ return ret;
+
+ *val *= ichg_ua <= 400000 ? 28500 :
+ ichg_ua <= 800000 ? 31500 : 500000;
+ return 0;
+ case RT9467_ADC_TEMP_JC:
+ *val = ((*val * 2) - 40) * 10;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rt9467_psy_get_status(struct rt9467_chg_data *data, int *state)
+{
+ unsigned int status;
+ int ret;
+
+ ret = regmap_field_read(data->rm_field[F_CHG_STAT], &status);
+ if (ret)
+ return ret;
+
+ switch (status) {
+ case RT9467_STAT_READY:
+ *state = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ return 0;
+ case RT9467_STAT_PROGRESS:
+ *state = POWER_SUPPLY_STATUS_CHARGING;
+ return 0;
+ case RT9467_STAT_CHARGE_DONE:
+ *state = POWER_SUPPLY_STATUS_FULL;
+ return 0;
+ default:
+ *state = POWER_SUPPLY_STATUS_UNKNOWN;
+ return 0;
+ }
+}
+
+static int rt9467_psy_set_ichg(struct rt9467_chg_data *data, int microamp)
+{
+ int ret;
+
+ mutex_lock(&data->ichg_ieoc_lock);
+
+ if (microamp < 500000) {
+ dev_err(data->dev, "Minimum value must be 500mA\n");
+ microamp = 500000;
+ }
+
+ ret = rt9467_set_value_from_ranges(data, F_ICHG, RT9467_RANGE_ICHG, microamp);
+ if (ret)
+ goto out;
+
+ ret = rt9467_get_value_from_ranges(data, F_ICHG, RT9467_RANGE_ICHG,
+ &data->ichg_ua);
+ if (ret)
+ goto out;
+
+out:
+ mutex_unlock(&data->ichg_ieoc_lock);
+ return ret;
+}
+
+static int rt9467_run_aicl(struct rt9467_chg_data *data)
+{
+ unsigned int statc, aicl_vth;
+ int mivr_vth, aicr_get;
+ int ret = 0;
+
+
+ ret = regmap_read(data->regmap, RT9467_REG_CHG_STATC, &statc);
+ if (ret) {
+ dev_err(data->dev, "Failed to read status\n");
+ return ret;
+ }
+
+ if (!(statc & RT9467_MASK_PWR_RDY) || !(statc & RT9467_MASK_MIVR_STAT)) {
+ dev_info(data->dev, "Condition not matched %d\n", statc);
+ return 0;
+ }
+
+ ret = rt9467_get_value_from_ranges(data, F_VMIVR, RT9467_RANGE_VMIVR,
+ &mivr_vth);
+ if (ret) {
+ dev_err(data->dev, "Failed to get mivr\n");
+ return ret;
+ }
+
+ /* AICL_VTH = MIVR_VTH + 200mV */
+ aicl_vth = mivr_vth + RT9467_AICLVTH_GAP_uV;
+ ret = rt9467_set_value_from_ranges(data, F_AICL_VTH,
+ RT9467_RANGE_AICL_VTH, aicl_vth);
+
+ /* Trigger AICL function */
+ ret = regmap_field_write(data->rm_field[F_AICL_MEAS], 1);
+ if (ret) {
+ dev_err(data->dev, "Failed to set aicl measurement\n");
+ return ret;
+ }
+
+ reinit_completion(&data->aicl_done);
+ ret = wait_for_completion_timeout(&data->aicl_done, msecs_to_jiffies(3500));
+ if (ret)
+ return ret;
+
+ ret = rt9467_get_value_from_ranges(data, F_IAICR, RT9467_RANGE_IAICR, &aicr_get);
+ if (ret) {
+ dev_err(data->dev, "Failed to get aicr\n");
+ return ret;
+ }
+
+ dev_info(data->dev, "aicr get = %d uA\n", aicr_get);
+ return 0;
+}
+
+static int rt9467_psy_set_ieoc(struct rt9467_chg_data *data, int microamp)
+{
+ int ret;
+
+ mutex_lock(&data->ichg_ieoc_lock);
+
+ ret = rt9467_set_value_from_ranges(data, F_IEOC, RT9467_RANGE_IEOC, microamp);
+ if (ret)
+ goto out;
+
+ ret = rt9467_get_value_from_ranges(data, F_IEOC, RT9467_RANGE_IEOC, &data->ieoc_ua);
+ if (ret)
+ goto out;
+
+out:
+ mutex_unlock(&data->ichg_ieoc_lock);
+ return ret;
+}
+
+static const enum power_supply_usb_type rt9467_chg_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+};
+
+static const enum power_supply_property rt9467_chg_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+};
+
+static int rt9467_psy_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct rt9467_chg_data *data = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ return rt9467_psy_get_status(data, &val->intval);
+ case POWER_SUPPLY_PROP_ONLINE:
+ return regmap_field_read(data->rm_field[F_PWR_RDY], &val->intval);
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ mutex_lock(&data->attach_lock);
+ if (data->psy_usb_type == POWER_SUPPLY_USB_TYPE_UNKNOWN ||
+ data->psy_usb_type == POWER_SUPPLY_USB_TYPE_SDP)
+ val->intval = 500000;
+ else
+ val->intval = 1500000;
+ mutex_unlock(&data->attach_lock);
+ return 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ mutex_lock(&data->ichg_ieoc_lock);
+ val->intval = data->ichg_ua;
+ mutex_unlock(&data->ichg_ieoc_lock);
+ return 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ val->intval = RT9467_ICHG_MAX_uA;
+ return 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return rt9467_get_value_from_ranges(data, F_VOREG,
+ RT9467_RANGE_VOREG,
+ &val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = RT9467_CV_MAX_uV;
+ return 0;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return rt9467_get_value_from_ranges(data, F_IAICR,
+ RT9467_RANGE_IAICR,
+ &val->intval);
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return rt9467_get_value_from_ranges(data, F_VMIVR,
+ RT9467_RANGE_VMIVR,
+ &val->intval);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ mutex_lock(&data->attach_lock);
+ val->intval = data->psy_usb_type;
+ mutex_unlock(&data->attach_lock);
+ return 0;
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return rt9467_get_value_from_ranges(data, F_IPREC,
+ RT9467_RANGE_IPREC,
+ &val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ mutex_lock(&data->ichg_ieoc_lock);
+ val->intval = data->ieoc_ua;
+ mutex_unlock(&data->ichg_ieoc_lock);
+ return 0;
+ default:
+ return -ENODATA;
+ }
+}
+
+static int rt9467_psy_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct rt9467_chg_data *data = power_supply_get_drvdata(psy);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ return regmap_field_write(data->rm_field[F_CHG_EN], val->intval);
+ case POWER_SUPPLY_PROP_ONLINE:
+ return regmap_field_write(data->rm_field[F_HZ], val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return rt9467_psy_set_ichg(data, val->intval);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return rt9467_set_value_from_ranges(data, F_VOREG,
+ RT9467_RANGE_VOREG, val->intval);
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ if (val->intval == -1)
+ return rt9467_run_aicl(data);
+ else
+ return rt9467_set_value_from_ranges(data, F_IAICR,
+ RT9467_RANGE_IAICR,
+ val->intval);
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return rt9467_set_value_from_ranges(data, F_VMIVR,
+ RT9467_RANGE_VMIVR, val->intval);
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return rt9467_set_value_from_ranges(data, F_IPREC,
+ RT9467_RANGE_IPREC, val->intval);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return rt9467_psy_set_ieoc(data, val->intval);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return regmap_field_write(data->rm_field[F_USBCHGEN], val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rt9467_chg_prop_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ case POWER_SUPPLY_PROP_ONLINE:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static const struct power_supply_desc rt9467_chg_psy_desc = {
+ .name = "rt9467-charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .usb_types = rt9467_chg_usb_types,
+ .num_usb_types = ARRAY_SIZE(rt9467_chg_usb_types),
+ .properties = rt9467_chg_properties,
+ .num_properties = ARRAY_SIZE(rt9467_chg_properties),
+ .property_is_writeable = rt9467_chg_prop_is_writeable,
+ .get_property = rt9467_psy_get_property,
+ .set_property = rt9467_psy_set_property,
+};
+
+static inline struct rt9467_chg_data *psy_device_to_chip(struct device *dev)
+{
+ return power_supply_get_drvdata(to_power_supply(dev));
+}
+
+static ssize_t sysoff_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rt9467_chg_data *data = psy_device_to_chip(dev);
+ unsigned int sysoff_enable;
+ int ret;
+
+ ret = regmap_field_read(data->rm_field[F_SHIP_MODE], &sysoff_enable);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", sysoff_enable);
+}
+
+static ssize_t sysoff_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rt9467_chg_data *data = psy_device_to_chip(dev);
+ unsigned int tmp;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &tmp);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(data->rm_field[F_SHIP_MODE], !!tmp);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(sysoff_enable);
+
+static struct attribute *rt9467_sysfs_attrs[] = {
+ &dev_attr_sysoff_enable.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(rt9467_sysfs);
+
+static int rt9467_register_psy(struct rt9467_chg_data *data)
+{
+ struct power_supply_config cfg = {
+ .drv_data = data,
+ .of_node = dev_of_node(data->dev),
+ .attr_grp = rt9467_sysfs_groups,
+ };
+
+ data->psy = devm_power_supply_register(data->dev, &rt9467_chg_psy_desc,
+ &cfg);
+ return PTR_ERR_OR_ZERO(data->psy);
+}
+
+static int rt9467_mivr_handler(struct rt9467_chg_data *data)
+{
+ unsigned int mivr_act;
+ int ret, ibus_ma;
+
+ /*
+ * back-boost workaround
+ * If (mivr_active & ibus < 100mA), toggle cfo bit
+ */
+ ret = regmap_field_read(data->rm_field[F_CHG_MIVR], &mivr_act);
+ if (ret) {
+ dev_err(data->dev, "Failed to read MIVR stat\n");
+ return ret;
+ }
+
+ if (!mivr_act)
+ return 0;
+
+ ret = rt9467_get_adc(data, RT9467_ADC_IBUS, &ibus_ma);
+ if (ret) {
+ dev_err(data->dev, "Failed to get IBUS\n");
+ return ret;
+ }
+
+ if (ibus_ma < 100000) {
+ ret = regmap_field_write(data->rm_field[F_CFO_EN], 0);
+ ret |= regmap_field_write(data->rm_field[F_CFO_EN], 1);
+ if (ret)
+ dev_err(data->dev, "Failed to toggle cfo\n");
+ }
+
+ return ret;
+}
+
+static irqreturn_t rt9467_statc_handler(int irq, void *priv)
+{
+ struct rt9467_chg_data *data = priv;
+ unsigned int new_stat, evts = 0;
+ int ret;
+
+ ret = regmap_read(data->regmap, RT9467_REG_CHG_STATC, &new_stat);
+ if (ret) {
+ dev_err(data->dev, "Failed to read chg_statc\n");
+ return IRQ_NONE;
+ }
+
+ evts = data->old_stat ^ new_stat;
+ data->old_stat = new_stat;
+
+ if ((evts & new_stat) & RT9467_MASK_MIVR_STAT) {
+ ret = rt9467_mivr_handler(data);
+ if (ret)
+ dev_err(data->dev, "Failed to handle mivr stat\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rt9467_wdt_handler(int irq, void *priv)
+{
+ struct rt9467_chg_data *data = priv;
+ unsigned int dev_id;
+ int ret;
+
+ /* Any i2c communication can kick watchdog timer */
+ ret = regmap_read(data->regmap, RT9467_REG_DEVICE_ID, &dev_id);
+ if (ret) {
+ dev_err(data->dev, "Failed to kick wdt (%d)\n", ret);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rt9467_report_usb_state(struct rt9467_chg_data *data)
+{
+ unsigned int usb_stat, power_ready;
+ bool psy_changed = true;
+ int ret;
+
+ ret = regmap_field_read(data->rm_field[F_USB_STATUS], &usb_stat);
+ ret |= regmap_field_read(data->rm_field[F_PWR_RDY], &power_ready);
+ if (ret)
+ return ret;
+
+ if (!power_ready)
+ usb_stat = RT9467_CHG_TYPE_NOVBUS;
+
+ mutex_lock(&data->attach_lock);
+
+ switch (usb_stat) {
+ case RT9467_CHG_TYPE_NOVBUS:
+ data->psy_usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ break;
+ case RT9467_CHG_TYPE_SDP:
+ data->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ break;
+ case RT9467_CHG_TYPE_SDPNSTD:
+ data->psy_usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ case RT9467_CHG_TYPE_DCP:
+ data->psy_usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ break;
+ case RT9467_CHG_TYPE_CDP:
+ data->psy_usb_type = POWER_SUPPLY_USB_TYPE_CDP;
+ break;
+ case RT9467_CHG_TYPE_UNDER_GOING:
+ default:
+ psy_changed = false;
+ break;
+ }
+
+ mutex_unlock(&data->attach_lock);
+
+ if (psy_changed)
+ power_supply_changed(data->psy);
+
+ return 0;
+}
+
+static irqreturn_t rt9467_usb_state_handler(int irq, void *priv)
+{
+ struct rt9467_chg_data *data = priv;
+ int ret;
+
+ ret = rt9467_report_usb_state(data);
+ if (ret) {
+ dev_err(data->dev, "Failed to report attach type (%d)\n", ret);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rt9467_aiclmeas_handler(int irq, void *priv)
+{
+ struct rt9467_chg_data *data = priv;
+
+ complete(&data->aicl_done);
+ return IRQ_HANDLED;
+}
+
+#define RT9467_IRQ_DESC(_name, _handler_func, _hwirq) \
+{ \
+ .name = #_name, \
+ .handler = rt9467_##_handler_func##_handler, \
+ .hwirq = _hwirq, \
+}
+
+static int rt9467_request_interrupt(struct rt9467_chg_data *data)
+{
+ struct device *dev = data->dev;
+ static const struct {
+ const char *name;
+ int hwirq;
+ irq_handler_t handler;
+ } rt9467_exclusive_irqs[] = {
+ RT9467_IRQ_DESC(statc, statc, RT9467_IRQ_TS_STATC),
+ RT9467_IRQ_DESC(wdt, wdt, RT9467_IRQ_WDTMR),
+ RT9467_IRQ_DESC(attach, usb_state, RT9467_IRQ_ATTACH),
+ RT9467_IRQ_DESC(detach, usb_state, RT9467_IRQ_DETACH),
+ RT9467_IRQ_DESC(aiclmeas, aiclmeas, RT9467_IRQ_CHG_AICLM),
+ }, rt9466_exclusive_irqs[] = {
+ RT9467_IRQ_DESC(statc, statc, RT9467_IRQ_TS_STATC),
+ RT9467_IRQ_DESC(wdt, wdt, RT9467_IRQ_WDTMR),
+ RT9467_IRQ_DESC(aiclmeas, aiclmeas, RT9467_IRQ_CHG_AICLM),
+ }, *chg_irqs;
+ int num_chg_irqs, i, virq, ret;
+
+ if (data->vid == RT9466_VID) {
+ chg_irqs = rt9466_exclusive_irqs;
+ num_chg_irqs = ARRAY_SIZE(rt9466_exclusive_irqs);
+ } else {
+ chg_irqs = rt9467_exclusive_irqs;
+ num_chg_irqs = ARRAY_SIZE(rt9467_exclusive_irqs);
+ }
+
+ for (i = 0; i < num_chg_irqs; i++) {
+ virq = regmap_irq_get_virq(data->irq_chip_data, chg_irqs[i].hwirq);
+ if (virq <= 0)
+ return dev_err_probe(dev, virq, "Failed to get (%s) irq\n",
+ chg_irqs[i].name);
+
+ ret = devm_request_threaded_irq(dev, virq, NULL, chg_irqs[i].handler,
+ IRQF_ONESHOT, chg_irqs[i].name, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request (%s) irq\n",
+ chg_irqs[i].name);
+ }
+
+ return 0;
+}
+
+static int rt9467_do_charger_init(struct rt9467_chg_data *data)
+{
+ struct device *dev = data->dev;
+ int ret;
+
+ ret = regmap_write(data->regmap, RT9467_REG_CHG_ADC, 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to reset ADC\n");
+
+ ret = rt9467_get_value_from_ranges(data, F_ICHG, RT9467_RANGE_ICHG,
+ &data->ichg_ua);
+ ret |= rt9467_get_value_from_ranges(data, F_IEOC, RT9467_RANGE_IEOC,
+ &data->ieoc_ua);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init ichg/ieoc value\n");
+
+ ret = regmap_update_bits(data->regmap, RT9467_REG_CHG_STATC_CTRL,
+ RT9467_MASK_PWR_RDY | RT9467_MASK_MIVR_STAT, 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to make statc unmask\n");
+
+ /* Select IINLMTSEL to use AICR */
+ ret = regmap_field_write(data->rm_field[F_IINLMTSEL],
+ RT9467_IINLMTSEL_AICR);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set iinlmtsel to AICR\n");
+
+ /* Wait for AICR Rampping */
+ msleep(150);
+
+ /* Disable hardware ILIM */
+ ret = regmap_field_write(data->rm_field[F_ILIM_EN], 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to disable hardware ILIM\n");
+
+ /* Set inductor OCP to high level */
+ ret = regmap_field_write(data->rm_field[F_OCP], 1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set higher inductor OCP level\n");
+
+ /* Set charge termination default enable */
+ ret = regmap_field_write(data->rm_field[F_TE], 1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set TE=1\n");
+
+ /* Set 12hrs fast charger timer */
+ ret = regmap_field_write(data->rm_field[F_WT_FC], 4);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set WT_FC\n");
+
+ /* Toggle BC12 function */
+ ret = regmap_field_write(data->rm_field[F_USBCHGEN], 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to disable BC12\n");
+
+ return regmap_field_write(data->rm_field[F_USBCHGEN], 1);
+}
+
+static bool rt9467_is_accessible_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x00 ... 0x1A:
+ case 0x20 ... 0x38:
+ case 0x40 ... 0x49:
+ case 0x50 ... 0x57:
+ case 0x60 ... 0x67:
+ case 0x70 ... 0x79:
+ case 0x82 ... 0x85:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config rt9467_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x85,
+ .writeable_reg = rt9467_is_accessible_reg,
+ .readable_reg = rt9467_is_accessible_reg,
+};
+
+static int rt9467_check_vendor_info(struct rt9467_chg_data *data)
+{
+ unsigned int vid;
+ int ret;
+
+ ret = regmap_field_read(data->rm_field[F_VENDOR], &vid);
+ if (ret) {
+ dev_err(data->dev, "Failed to get vid\n");
+ return ret;
+ }
+
+ if ((vid != RT9466_VID) && (vid != RT9467_VID))
+ return dev_err_probe(data->dev, -ENODEV,
+ "VID not correct [0x%02X]\n", vid);
+
+ data->vid = vid;
+ return 0;
+}
+
+static int rt9467_reset_chip(struct rt9467_chg_data *data)
+{
+ int ret;
+
+ /* Disable HZ before reset chip */
+ ret = regmap_field_write(data->rm_field[F_HZ], 0);
+ if (ret)
+ return ret;
+
+ return regmap_field_write(data->rm_field[F_RST], 1);
+}
+
+static void rt9467_chg_destroy_adc_lock(void *data)
+{
+ struct mutex *adc_lock = data;
+
+ mutex_destroy(adc_lock);
+}
+
+static void rt9467_chg_destroy_attach_lock(void *data)
+{
+ struct mutex *attach_lock = data;
+
+ mutex_destroy(attach_lock);
+}
+
+static void rt9467_chg_destroy_ichg_ieoc_lock(void *data)
+{
+ struct mutex *ichg_ieoc_lock = data;
+
+ mutex_destroy(ichg_ieoc_lock);
+}
+
+static void rt9467_chg_complete_aicl_done(void *data)
+{
+ struct completion *aicl_done = data;
+
+ complete(aicl_done);
+}
+
+static int rt9467_charger_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct rt9467_chg_data *data;
+ struct gpio_desc *ceb_gpio;
+ int ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &i2c->dev;
+ i2c_set_clientdata(i2c, data);
+
+ /* Default pull charge enable gpio to make 'CHG_EN' by SW control only */
+ ceb_gpio = devm_gpiod_get_optional(dev, "charge-enable", GPIOD_OUT_LOW);
+ if (IS_ERR(ceb_gpio))
+ return dev_err_probe(dev, PTR_ERR(ceb_gpio),
+ "Failed to config charge enable gpio\n");
+
+ data->regmap = devm_regmap_init_i2c(i2c, &rt9467_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "Failed to init regmap\n");
+
+ ret = devm_regmap_field_bulk_alloc(dev, data->regmap,
+ data->rm_field, rt9467_chg_fields,
+ ARRAY_SIZE(rt9467_chg_fields));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to alloc regmap fields\n");
+
+ ret = rt9467_check_vendor_info(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to check vendor info");
+
+ ret = rt9467_reset_chip(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to reset chip\n");
+
+ ret = devm_regmap_add_irq_chip(dev, data->regmap, i2c->irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT, 0,
+ &rt9467_irq_chip, &data->irq_chip_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add irq chip\n");
+
+ mutex_init(&data->adc_lock);
+ ret = devm_add_action_or_reset(dev, rt9467_chg_destroy_adc_lock,
+ &data->adc_lock);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init ADC lock\n");
+
+ mutex_init(&data->attach_lock);
+ ret = devm_add_action_or_reset(dev, rt9467_chg_destroy_attach_lock,
+ &data->attach_lock);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init attach lock\n");
+
+ mutex_init(&data->ichg_ieoc_lock);
+ ret = devm_add_action_or_reset(dev, rt9467_chg_destroy_ichg_ieoc_lock,
+ &data->ichg_ieoc_lock);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init ICHG/IEOC lock\n");
+
+ init_completion(&data->aicl_done);
+ ret = devm_add_action_or_reset(dev, rt9467_chg_complete_aicl_done,
+ &data->aicl_done);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init AICL done completion\n");
+
+ ret = rt9467_do_charger_init(data);
+ if (ret)
+ return ret;
+
+ ret = rt9467_register_otg_regulator(data);
+ if (ret)
+ return ret;
+
+ ret = rt9467_register_psy(data);
+ if (ret)
+ return ret;
+
+ return rt9467_request_interrupt(data);
+}
+
+static const struct of_device_id rt9467_charger_of_match_table[] = {
+ { .compatible = "richtek,rt9467", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt9467_charger_of_match_table);
+
+static struct i2c_driver rt9467_charger_driver = {
+ .driver = {
+ .name = "rt9467-charger",
+ .of_match_table = rt9467_charger_of_match_table,
+ },
+ .probe_new = rt9467_charger_probe,
+};
+module_i2c_driver(rt9467_charger_driver);
+
+MODULE_DESCRIPTION("Richtek RT9467 Charger Driver");
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_AUTHOR("ChiaEn Wu <chiaen_wu@richtek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/rt9471.c b/drivers/power/supply/rt9471.c
new file mode 100644
index 000000000000..1ea40876494b
--- /dev/null
+++ b/drivers/power/supply/rt9471.c
@@ -0,0 +1,930 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2022 Richtek Technology Corp.
+ *
+ * Authors: Alina Yu <alina_yu@richtek.com>
+ * ChiYuan Huang <cy_huang@richtek.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kstrtox.h>
+#include <linux/linear_range.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/power_supply.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/sysfs.h>
+
+#define RT9471_REG_OTGCFG 0x00
+#define RT9471_REG_TOP 0x01
+#define RT9471_REG_FUNC 0x02
+#define RT9471_REG_IBUS 0x03
+#define RT9471_REG_VBUS 0x04
+#define RT9471_REG_PRECHG 0x05
+#define RT9471_REG_VCHG 0x07
+#define RT9471_REG_ICHG 0x08
+#define RT9471_REG_CHGTMR 0x09
+#define RT9471_REG_EOC 0x0A
+#define RT9471_REG_INFO 0x0B
+#define RT9471_REG_JEITA 0x0C
+#define RT9471_REG_PUMP_EXP 0x0D
+#define RT9471_REG_DPDMDET 0x0E
+#define RT9471_REG_ICSTAT 0x0F
+#define RT9471_REG_STAT0 0x10
+#define RT9471_REG_STAT1 0x11
+#define RT9471_REG_STAT2 0x12
+#define RT9471_REG_IRQ0 0x20
+#define RT9471_REG_MASK0 0x30
+
+#define RT9471_OTGCV_MASK GENMASK(7, 6)
+#define RT9471_OTGCC_MASK BIT(0)
+#define RT9471_OTGEN_MASK BIT(1)
+#define RT9471_CHGFAULT_MASK GENMASK(4, 1)
+
+#define RT9471_NUM_IRQ_REGS 4
+#define RT9471_OTGCV_MINUV 4850000
+#define RT9471_OTGCV_STEPUV 150000
+#define RT9471_NUM_VOTG 4
+#define RT9471_VCHG_MAXUV 4700000
+#define RT9471_ICHG_MAXUA 3150000
+
+/* Device ID */
+#define RT9470_DEVID 0x09
+#define RT9470D_DEVID 0x0A
+#define RT9471_DEVID 0x0D
+#define RT9471D_DEVID 0x0E
+
+/* IRQ number */
+#define RT9471_IRQ_BC12_DONE 0
+#define RT9471_IRQ_DETACH 1
+#define RT9471_IRQ_RECHG 2
+#define RT9471_IRQ_CHG_DONE 3
+#define RT9471_IRQ_BG_CHG 4
+#define RT9471_IRQ_IE0C 5
+#define RT9471_IRQ_CHG_RDY 6
+#define RT9471_IRQ_VBUS_GD 7
+#define RT9471_IRQ_CHG_BATOV 9
+#define RT9471_IRQ_CHG_SYSOV 10
+#define RT9471_IRQ_CHG_TOUT 11
+#define RT9471_IRQ_CHG_BUSUV 12
+#define RT9471_IRQ_CHG_THREG 13
+#define RT9471_IRQ_CHG_AICR 14
+#define RT9471_IRQ_CHG_MIVR 15
+#define RT9471_IRQ_SYS_SHORT 16
+#define RT9471_IRQ_SYS_MIN 17
+#define RT9471_IRQ_AICC_DONE 18
+#define RT9471_IRQ_PE_DONE 19
+#define RT9471_IRQ_JEITA_COLD 20
+#define RT9471_IRQ_JEITA_COOL 21
+#define RT9471_IRQ_JEITA_WARM 22
+#define RT9471_IRQ_JEITA_HOT 23
+#define RT9471_IRQ_OTG_FAULT 24
+#define RT9471_IRQ_OTG_LBP 25
+#define RT9471_IRQ_OTG_CC 26
+#define RT9471_IRQ_WDT 29
+#define RT9471_IRQ_VAC_OV 30
+#define RT9471_IRQ_OTP 31
+
+enum rt9471_fields {
+ F_WDT = 0,
+ F_WDT_RST,
+ F_CHG_EN,
+ F_HZ,
+ F_BATFET_DIS,
+ F_AICR,
+ F_AICC_EN,
+ F_MIVR,
+ F_IPRE_CHG,
+ F_VPRE_CHG,
+ F_VBAT_REG,
+ F_ICHG_REG,
+ F_EOC_RST,
+ F_TE,
+ F_IEOC_CHG,
+ F_DEVICE_ID,
+ F_REG_RST,
+ F_BC12_EN,
+ F_IC_STAT,
+ F_PORT_STAT,
+ F_ST_CHG_DONE,
+ F_ST_CHG_RDY,
+ F_ST_VBUS_GD,
+ F_MAX_FIELDS
+};
+
+enum rt9471_ranges {
+ RT9471_RANGE_AICR = 0,
+ RT9471_RANGE_MIVR,
+ RT9471_RANGE_IPRE,
+ RT9471_RANGE_VCHG,
+ RT9471_RANGE_ICHG,
+ RT9471_RANGE_IEOC,
+ RT9471_MAX_RANGES
+};
+
+enum {
+ RT9471_PORTSTAT_APPLE_10W = 8,
+ RT9471_PORTSTAT_SAMSUNG_10W,
+ RT9471_PORTSTAT_APPLE_5W,
+ RT9471_PORTSTAT_APPLE_12W,
+ RT9471_PORTSTAT_NSTD,
+ RT9471_PORTSTAT_SDP,
+ RT9471_PORTSTAT_CDP,
+ RT9471_PORTSTAT_DCP,
+};
+
+struct rt9471_chip {
+ struct device *dev;
+ struct regmap *regmap;
+ struct regmap_field *rm_fields[F_MAX_FIELDS];
+ struct regmap_irq_chip_data *irq_chip_data;
+ struct regulator_dev *otg_rdev;
+ struct power_supply *psy;
+ struct power_supply_desc psy_desc;
+ struct mutex var_lock;
+ enum power_supply_usb_type psy_usb_type;
+ int psy_usb_curr;
+};
+
+static const struct reg_field rt9471_reg_fields[F_MAX_FIELDS] = {
+ [F_WDT] = REG_FIELD(RT9471_REG_TOP, 0, 0),
+ [F_WDT_RST] = REG_FIELD(RT9471_REG_TOP, 1, 1),
+ [F_CHG_EN] = REG_FIELD(RT9471_REG_FUNC, 0, 0),
+ [F_HZ] = REG_FIELD(RT9471_REG_FUNC, 5, 5),
+ [F_BATFET_DIS] = REG_FIELD(RT9471_REG_FUNC, 7, 7),
+ [F_AICR] = REG_FIELD(RT9471_REG_IBUS, 0, 5),
+ [F_AICC_EN] = REG_FIELD(RT9471_REG_IBUS, 7, 7),
+ [F_MIVR] = REG_FIELD(RT9471_REG_VBUS, 0, 3),
+ [F_IPRE_CHG] = REG_FIELD(RT9471_REG_PRECHG, 0, 3),
+ [F_VPRE_CHG] = REG_FIELD(RT9471_REG_PRECHG, 4, 6),
+ [F_VBAT_REG] = REG_FIELD(RT9471_REG_VCHG, 0, 6),
+ [F_ICHG_REG] = REG_FIELD(RT9471_REG_ICHG, 0, 5),
+ [F_EOC_RST] = REG_FIELD(RT9471_REG_EOC, 0, 0),
+ [F_TE] = REG_FIELD(RT9471_REG_EOC, 1, 1),
+ [F_IEOC_CHG] = REG_FIELD(RT9471_REG_EOC, 4, 7),
+ [F_DEVICE_ID] = REG_FIELD(RT9471_REG_INFO, 3, 6),
+ [F_REG_RST] = REG_FIELD(RT9471_REG_INFO, 7, 7),
+ [F_BC12_EN] = REG_FIELD(RT9471_REG_DPDMDET, 7, 7),
+ [F_IC_STAT] = REG_FIELD(RT9471_REG_ICSTAT, 0, 3),
+ [F_PORT_STAT] = REG_FIELD(RT9471_REG_ICSTAT, 4, 7),
+ [F_ST_CHG_DONE] = REG_FIELD(RT9471_REG_STAT0, 3, 3),
+ [F_ST_CHG_RDY] = REG_FIELD(RT9471_REG_STAT0, 6, 6),
+ [F_ST_VBUS_GD] = REG_FIELD(RT9471_REG_STAT0, 7, 7),
+};
+
+static const struct linear_range rt9471_chg_ranges[RT9471_MAX_RANGES] = {
+ [RT9471_RANGE_AICR] = { .min = 50000, .min_sel = 1, .max_sel = 63, .step = 50000 },
+ [RT9471_RANGE_MIVR] = { .min = 3900000, .min_sel = 0, .max_sel = 15, .step = 100000 },
+ [RT9471_RANGE_IPRE] = { .min = 50000, .min_sel = 0, .max_sel = 15, .step = 50000 },
+ [RT9471_RANGE_VCHG] = { .min = 3900000, .min_sel = 0, .max_sel = 80, .step = 10000 },
+ [RT9471_RANGE_ICHG] = { .min = 0, .min_sel = 0, .max_sel = 63, .step = 50000 },
+ [RT9471_RANGE_IEOC] = { .min = 50000, .min_sel = 0, .max_sel = 15, .step = 50000 },
+};
+
+static int rt9471_set_value_by_field_range(struct rt9471_chip *chip,
+ enum rt9471_fields field,
+ enum rt9471_ranges range, int val)
+{
+ unsigned int sel;
+
+ if (val < 0)
+ return -EINVAL;
+
+ linear_range_get_selector_within(rt9471_chg_ranges + range, val, &sel);
+
+ return regmap_field_write(chip->rm_fields[field], sel);
+}
+
+
+static int rt9471_get_value_by_field_range(struct rt9471_chip *chip,
+ enum rt9471_fields field,
+ enum rt9471_ranges range, int *val)
+{
+ unsigned int sel, rvalue;
+ int ret;
+
+ ret = regmap_field_read(chip->rm_fields[field], &sel);
+ if (ret)
+ return ret;
+
+ ret = linear_range_get_value(rt9471_chg_ranges + range, sel, &rvalue);
+ if (ret)
+ return ret;
+
+ *val = rvalue;
+ return 0;
+}
+
+static int rt9471_set_ieoc(struct rt9471_chip *chip, int microamp)
+{
+ int ret;
+
+ if (microamp == 0)
+ return regmap_field_write(chip->rm_fields[F_TE], 0);
+
+ ret = rt9471_set_value_by_field_range(chip, F_IEOC_CHG, RT9471_RANGE_IEOC, microamp);
+ if (ret)
+ return ret;
+
+ /* After applying the new IEOC value, enable charge termination */
+ return regmap_field_write(chip->rm_fields[F_TE], 1);
+}
+
+static int rt9471_get_ieoc(struct rt9471_chip *chip, int *microamp)
+{
+ unsigned int chg_term_enable;
+ int ret;
+
+ ret = regmap_field_read(chip->rm_fields[F_TE], &chg_term_enable);
+ if (ret)
+ return ret;
+
+ if (!chg_term_enable) {
+ *microamp = 0;
+ return 0;
+ }
+
+ return rt9471_get_value_by_field_range(chip, F_IEOC_CHG, RT9471_RANGE_IEOC, microamp);
+}
+
+static int rt9471_get_status(struct rt9471_chip *chip, int *status)
+{
+ unsigned int chg_ready, chg_done, fault_stat;
+ int ret;
+
+ ret = regmap_field_read(chip->rm_fields[F_ST_CHG_RDY], &chg_ready);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_read(chip->rm_fields[F_ST_CHG_DONE], &chg_done);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(chip->regmap, RT9471_REG_STAT1, &fault_stat);
+ if (ret)
+ return ret;
+
+ fault_stat &= RT9471_CHGFAULT_MASK;
+
+ if (chg_ready && chg_done)
+ *status = POWER_SUPPLY_STATUS_FULL;
+ else if (chg_ready && fault_stat)
+ *status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else if (chg_ready && !fault_stat)
+ *status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ *status = POWER_SUPPLY_STATUS_DISCHARGING;
+
+ return 0;
+}
+
+static int rt9471_get_vbus_good(struct rt9471_chip *chip, int *stat)
+{
+ unsigned int vbus_gd;
+ int ret;
+
+ ret = regmap_field_read(chip->rm_fields[F_ST_VBUS_GD], &vbus_gd);
+ if (ret)
+ return ret;
+
+ *stat = vbus_gd;
+ return 0;
+}
+
+static int rt9471_get_usb_type(struct rt9471_chip *chip, int *usb_type)
+{
+ mutex_lock(&chip->var_lock);
+ *usb_type = chip->psy_usb_type;
+ mutex_unlock(&chip->var_lock);
+
+ return 0;
+}
+
+static int rt9471_get_usb_type_current(struct rt9471_chip *chip,
+ int *microamp)
+{
+ mutex_lock(&chip->var_lock);
+ *microamp = chip->psy_usb_curr;
+ mutex_unlock(&chip->var_lock);
+
+ return 0;
+}
+
+static enum power_supply_property rt9471_charger_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+ POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT,
+ POWER_SUPPLY_PROP_USB_TYPE,
+ POWER_SUPPLY_PROP_PRECHARGE_CURRENT,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_MANUFACTURER,
+};
+
+static enum power_supply_usb_type rt9471_charger_usb_types[] = {
+ POWER_SUPPLY_USB_TYPE_UNKNOWN,
+ POWER_SUPPLY_USB_TYPE_SDP,
+ POWER_SUPPLY_USB_TYPE_DCP,
+ POWER_SUPPLY_USB_TYPE_CDP,
+ POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID,
+};
+
+static int rt9471_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ case POWER_SUPPLY_PROP_ONLINE:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int rt9471_charger_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct rt9471_chip *chip = power_supply_get_drvdata(psy);
+ int value = val->intval;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ return regmap_field_write(chip->rm_fields[F_CHG_EN], !!value);
+ case POWER_SUPPLY_PROP_ONLINE:
+ return regmap_field_write(chip->rm_fields[F_HZ], !value);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return rt9471_set_value_by_field_range(chip, F_ICHG_REG, RT9471_RANGE_ICHG, value);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return rt9471_set_value_by_field_range(chip, F_VBAT_REG, RT9471_RANGE_VCHG, value);
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return rt9471_set_value_by_field_range(chip, F_AICR, RT9471_RANGE_AICR, value);
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return rt9471_set_value_by_field_range(chip, F_MIVR, RT9471_RANGE_MIVR, value);
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return rt9471_set_value_by_field_range(chip, F_IPRE_CHG, RT9471_RANGE_IPRE, value);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return rt9471_set_ieoc(chip, val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const char * const rt9471_manufacturer = "Richtek Technology Corp.";
+static const char * const rt9471_model = "RT9471";
+
+static int rt9471_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct rt9471_chip *chip = power_supply_get_drvdata(psy);
+ int *pvalue = &val->intval;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ return rt9471_get_status(chip, pvalue);
+ case POWER_SUPPLY_PROP_ONLINE:
+ return rt9471_get_vbus_good(chip, pvalue);
+ case POWER_SUPPLY_PROP_CURRENT_MAX:
+ return rt9471_get_usb_type_current(chip, pvalue);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ return rt9471_get_value_by_field_range(chip, F_ICHG_REG, RT9471_RANGE_ICHG, pvalue);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
+ *pvalue = RT9471_ICHG_MAXUA;
+ return 0;
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
+ return rt9471_get_value_by_field_range(chip, F_VBAT_REG, RT9471_RANGE_VCHG, pvalue);
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
+ val->intval = RT9471_VCHG_MAXUV;
+ return 0;
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ return rt9471_get_value_by_field_range(chip, F_AICR, RT9471_RANGE_AICR, pvalue);
+ case POWER_SUPPLY_PROP_INPUT_VOLTAGE_LIMIT:
+ return rt9471_get_value_by_field_range(chip, F_MIVR, RT9471_RANGE_MIVR, pvalue);
+ case POWER_SUPPLY_PROP_USB_TYPE:
+ return rt9471_get_usb_type(chip, pvalue);
+ case POWER_SUPPLY_PROP_PRECHARGE_CURRENT:
+ return rt9471_get_value_by_field_range(chip, F_IPRE_CHG, RT9471_RANGE_IPRE, pvalue);
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ return rt9471_get_ieoc(chip, pvalue);
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = rt9471_model;
+ return 0;
+ case POWER_SUPPLY_PROP_MANUFACTURER:
+ val->strval = rt9471_manufacturer;
+ return 0;
+ default:
+ return -ENODATA;
+ }
+}
+
+static irqreturn_t rt9471_vbus_gd_handler(int irqno, void *devid)
+{
+ struct rt9471_chip *chip = devid;
+
+ power_supply_changed(chip->psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rt9471_detach_handler(int irqno, void *devid)
+{
+ struct rt9471_chip *chip = devid;
+ unsigned int vbus_gd;
+ int ret;
+
+ ret = regmap_field_read(chip->rm_fields[F_ST_VBUS_GD], &vbus_gd);
+ if (ret)
+ return IRQ_NONE;
+
+ /* Only focus on really detached */
+ if (vbus_gd)
+ return IRQ_HANDLED;
+
+ mutex_lock(&chip->var_lock);
+ chip->psy_usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ chip->psy_usb_curr = 0;
+ mutex_unlock(&chip->var_lock);
+
+ power_supply_changed(chip->psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rt9471_bc12_done_handler(int irqno, void *devid)
+{
+ struct rt9471_chip *chip = devid;
+ enum power_supply_usb_type usb_type;
+ unsigned int port_stat;
+ int usb_curr, ret;
+
+ ret = regmap_field_read(chip->rm_fields[F_PORT_STAT], &port_stat);
+ if (ret)
+ return IRQ_NONE;
+
+ switch (port_stat) {
+ case RT9471_PORTSTAT_APPLE_10W:
+ usb_type = POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID;
+ usb_curr = 2000000;
+ break;
+ case RT9471_PORTSTAT_APPLE_5W:
+ usb_type = POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID;
+ usb_curr = 1000000;
+ break;
+ case RT9471_PORTSTAT_APPLE_12W:
+ usb_type = POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID;
+ usb_curr = 2400000;
+ break;
+ case RT9471_PORTSTAT_SAMSUNG_10W:
+ usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ usb_curr = 2000000;
+ break;
+ case RT9471_PORTSTAT_DCP:
+ usb_type = POWER_SUPPLY_USB_TYPE_DCP;
+ usb_curr = 1500000;
+ break;
+ case RT9471_PORTSTAT_NSTD:
+ case RT9471_PORTSTAT_SDP:
+ usb_type = POWER_SUPPLY_USB_TYPE_SDP;
+ usb_curr = 500000;
+ break;
+ case RT9471_PORTSTAT_CDP:
+ usb_type = POWER_SUPPLY_USB_TYPE_CDP;
+ usb_curr = 1500000;
+ break;
+ default:
+ usb_type = POWER_SUPPLY_USB_TYPE_UNKNOWN;
+ usb_curr = 0;
+ break;
+ }
+
+ mutex_lock(&chip->var_lock);
+ chip->psy_usb_type = usb_type;
+ chip->psy_usb_curr = usb_curr;
+ mutex_unlock(&chip->var_lock);
+
+ power_supply_changed(chip->psy);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rt9471_wdt_handler(int irqno, void *devid)
+{
+ struct rt9471_chip *chip = devid;
+ int ret;
+
+ ret = regmap_field_write(chip->rm_fields[F_WDT_RST], 1);
+
+ return ret ? IRQ_NONE : IRQ_HANDLED;
+}
+
+static irqreturn_t rt9471_otg_fault_handler(int irqno, void *devid)
+{
+ struct rt9471_chip *chip = devid;
+
+ regulator_notifier_call_chain(chip->otg_rdev, REGULATOR_EVENT_FAIL, NULL);
+
+ return IRQ_HANDLED;
+}
+
+#define RT9471_IRQ_DESC(_name, _hwirq) \
+{ \
+ .name = #_name, \
+ .hwirq = _hwirq, \
+ .handler = rt9471_##_name##_handler, \
+}
+
+static int rt9471_register_interrupts(struct rt9471_chip *chip)
+{
+ struct device *dev = chip->dev;
+ static const struct {
+ char *name;
+ int hwirq;
+ irq_handler_t handler;
+ } chg_irqs[] = {
+ RT9471_IRQ_DESC(vbus_gd, RT9471_IRQ_VBUS_GD),
+ RT9471_IRQ_DESC(detach, RT9471_IRQ_DETACH),
+ RT9471_IRQ_DESC(bc12_done, RT9471_IRQ_BC12_DONE),
+ RT9471_IRQ_DESC(wdt, RT9471_IRQ_WDT),
+ RT9471_IRQ_DESC(otg_fault, RT9471_IRQ_OTG_FAULT),
+ }, *curr;
+ int i, virq, ret;
+
+ for (i = 0; i < ARRAY_SIZE(chg_irqs); i++) {
+ curr = chg_irqs + i;
+
+ virq = regmap_irq_get_virq(chip->irq_chip_data, curr->hwirq);
+ if (virq <= 0)
+ return virq;
+
+ ret = devm_request_threaded_irq(dev, virq, NULL, curr->handler,
+ IRQF_ONESHOT, curr->name, chip);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register IRQ (%s)\n",
+ curr->name);
+ }
+
+ return 0;
+}
+
+static const struct regulator_ops rt9471_otg_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .set_current_limit = regulator_set_current_limit_regmap,
+ .get_current_limit = regulator_get_current_limit_regmap,
+};
+
+static const unsigned int rt9471_otg_microamp[] = { 500000, 1200000, };
+
+static const struct regulator_desc rt9471_otg_rdesc = {
+ .of_match = of_match_ptr("usb-otg-vbus-regulator"),
+ .name = "rt9471-otg-vbus",
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+ .ops = &rt9471_otg_ops,
+ .min_uV = RT9471_OTGCV_MINUV,
+ .uV_step = RT9471_OTGCV_STEPUV,
+ .n_voltages = RT9471_NUM_VOTG,
+ .curr_table = rt9471_otg_microamp,
+ .n_current_limits = ARRAY_SIZE(rt9471_otg_microamp),
+ .enable_mask = RT9471_OTGEN_MASK,
+ .enable_reg = RT9471_REG_FUNC,
+ .vsel_reg = RT9471_REG_OTGCFG,
+ .vsel_mask = RT9471_OTGCV_MASK,
+ .csel_reg = RT9471_REG_OTGCFG,
+ .csel_mask = RT9471_OTGCC_MASK,
+};
+
+static int rt9471_register_otg_regulator(struct rt9471_chip *chip)
+{
+ struct device *dev = chip->dev;
+ struct regulator_config cfg = { .dev = dev, .driver_data = chip };
+
+ chip->otg_rdev = devm_regulator_register(dev, &rt9471_otg_rdesc, &cfg);
+
+ return PTR_ERR_OR_ZERO(chip->otg_rdev);
+}
+
+static inline struct rt9471_chip *psy_device_to_chip(struct device *dev)
+{
+ return power_supply_get_drvdata(to_power_supply(dev));
+}
+
+static ssize_t sysoff_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rt9471_chip *chip = psy_device_to_chip(dev);
+ unsigned int sysoff_enable;
+ int ret;
+
+ ret = regmap_field_read(chip->rm_fields[F_BATFET_DIS], &sysoff_enable);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", sysoff_enable);
+}
+
+static ssize_t sysoff_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rt9471_chip *chip = psy_device_to_chip(dev);
+ unsigned int tmp;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &tmp);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(chip->rm_fields[F_BATFET_DIS], !!tmp);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t port_detect_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rt9471_chip *chip = psy_device_to_chip(dev);
+ unsigned int bc12_enable;
+ int ret;
+
+ ret = regmap_field_read(chip->rm_fields[F_BC12_EN], &bc12_enable);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", bc12_enable);
+}
+
+static ssize_t port_detect_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rt9471_chip *chip = psy_device_to_chip(dev);
+ unsigned int tmp;
+ int ret;
+
+ ret = kstrtouint(buf, 10, &tmp);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(chip->rm_fields[F_BC12_EN], !!tmp);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(sysoff_enable);
+static DEVICE_ATTR_RW(port_detect_enable);
+
+static struct attribute *rt9471_sysfs_attrs[] = {
+ &dev_attr_sysoff_enable.attr,
+ &dev_attr_port_detect_enable.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(rt9471_sysfs);
+
+static int rt9471_register_psy(struct rt9471_chip *chip)
+{
+ struct device *dev = chip->dev;
+ struct power_supply_desc *desc = &chip->psy_desc;
+ struct power_supply_config cfg = {};
+ char *psy_name;
+
+ cfg.drv_data = chip;
+ cfg.of_node = dev->of_node;
+ cfg.attr_grp = rt9471_sysfs_groups;
+
+ psy_name = devm_kasprintf(dev, GFP_KERNEL, "rt9471-%s", dev_name(dev));
+ if (!psy_name)
+ return -ENOMEM;
+
+ desc->name = psy_name;
+ desc->type = POWER_SUPPLY_TYPE_USB;
+ desc->usb_types = rt9471_charger_usb_types;
+ desc->num_usb_types = ARRAY_SIZE(rt9471_charger_usb_types);
+ desc->properties = rt9471_charger_properties;
+ desc->num_properties = ARRAY_SIZE(rt9471_charger_properties);
+ desc->get_property = rt9471_charger_get_property;
+ desc->set_property = rt9471_charger_set_property;
+ desc->property_is_writeable = rt9471_charger_property_is_writeable;
+
+ chip->psy = devm_power_supply_register(dev, desc, &cfg);
+
+ return PTR_ERR_OR_ZERO(chip->psy);
+}
+
+static const struct regmap_irq rt9471_regmap_irqs[] = {
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_BC12_DONE, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_DETACH, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_RECHG, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_CHG_DONE, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_BG_CHG, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_IE0C, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_CHG_RDY, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_VBUS_GD, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_CHG_BATOV, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_CHG_SYSOV, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_CHG_TOUT, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_CHG_BUSUV, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_CHG_THREG, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_CHG_AICR, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_CHG_MIVR, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_SYS_SHORT, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_SYS_MIN, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_AICC_DONE, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_PE_DONE, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_JEITA_COLD, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_JEITA_COOL, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_JEITA_WARM, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_JEITA_HOT, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_OTG_FAULT, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_OTG_LBP, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_OTG_CC, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_WDT, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_VAC_OV, 8),
+ REGMAP_IRQ_REG_LINE(RT9471_IRQ_OTP, 8),
+};
+
+static const struct regmap_irq_chip rt9471_irq_chip = {
+ .name = "rt9471-irqs",
+ .status_base = RT9471_REG_IRQ0,
+ .mask_base = RT9471_REG_MASK0,
+ .num_regs = RT9471_NUM_IRQ_REGS,
+ .irqs = rt9471_regmap_irqs,
+ .num_irqs = ARRAY_SIZE(rt9471_regmap_irqs),
+};
+
+static const struct reg_sequence rt9471_init_regs[] = {
+ REG_SEQ0(RT9471_REG_INFO, 0x80), /* REG_RST */
+ REG_SEQ0(RT9471_REG_TOP, 0xC0), /* WDT = 0 */
+ REG_SEQ0(RT9471_REG_FUNC, 0x01), /* BATFET_DIS_DLY = 0 */
+ REG_SEQ0(RT9471_REG_IBUS, 0x0A), /* AUTO_AICR = 0 */
+ REG_SEQ0(RT9471_REG_VBUS, 0xC6), /* VAC_OVP = 14V */
+ REG_SEQ0(RT9471_REG_JEITA, 0x38), /* JEITA = 0 */
+ REG_SEQ0(RT9471_REG_DPDMDET, 0x31), /* BC12_EN = 0, DCP_DP_OPT = 1 */
+};
+
+static int rt9471_check_devinfo(struct rt9471_chip *chip)
+{
+ struct device *dev = chip->dev;
+ unsigned int dev_id;
+ int ret;
+
+ ret = regmap_field_read(chip->rm_fields[F_DEVICE_ID], &dev_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read device_id\n");
+
+ switch (dev_id) {
+ case RT9470_DEVID:
+ case RT9470D_DEVID:
+ case RT9471_DEVID:
+ case RT9471D_DEVID:
+ return 0;
+ default:
+ return dev_err_probe(dev, -ENODEV, "Incorrect device id\n");
+ }
+}
+
+static bool rt9471_accessible_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case 0x00 ... 0x0F:
+ case 0x10 ... 0x13:
+ case 0x20 ... 0x33:
+ case 0x40 ... 0xA1:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config rt9471_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xA1,
+ .writeable_reg = rt9471_accessible_reg,
+ .readable_reg = rt9471_accessible_reg,
+};
+
+static int rt9471_probe(struct i2c_client *i2c)
+{
+ struct device *dev = &i2c->dev;
+ struct rt9471_chip *chip;
+ struct gpio_desc *ce_gpio;
+ struct regmap *regmap;
+ int ret;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->dev = dev;
+ mutex_init(&chip->var_lock);
+ i2c_set_clientdata(i2c, chip);
+
+ /* Default pull charge enable gpio to make 'CHG_EN' by SW control only */
+ ce_gpio = devm_gpiod_get_optional(dev, "charge-enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(ce_gpio))
+ return dev_err_probe(dev, PTR_ERR(ce_gpio),
+ "Failed to config charge enable gpio\n");
+
+ regmap = devm_regmap_init_i2c(i2c, &rt9471_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "Failed to init regmap\n");
+
+ chip->regmap = regmap;
+
+ ret = devm_regmap_field_bulk_alloc(dev, regmap, chip->rm_fields,
+ rt9471_reg_fields,
+ ARRAY_SIZE(rt9471_reg_fields));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to alloc regmap field\n");
+
+ ret = rt9471_check_devinfo(chip);
+ if (ret)
+ return ret;
+
+ ret = regmap_register_patch(regmap, rt9471_init_regs,
+ ARRAY_SIZE(rt9471_init_regs));
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to init registers\n");
+
+ ret = devm_regmap_add_irq_chip(dev, regmap, i2c->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 0,
+ &rt9471_irq_chip, &chip->irq_chip_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add IRQ chip\n");
+
+ ret = rt9471_register_psy(chip);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register psy\n");
+
+ ret = rt9471_register_otg_regulator(chip);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register otg\n");
+
+ ret = rt9471_register_interrupts(chip);
+ if (ret)
+ return ret;
+
+ /* After IRQs are all initialized, enable port detection by default */
+ return regmap_field_write(chip->rm_fields[F_BC12_EN], 1);
+}
+
+static void rt9471_shutdown(struct i2c_client *i2c)
+{
+ struct rt9471_chip *chip = i2c_get_clientdata(i2c);
+
+ /*
+ * There's no external reset pin. Do register reset to guarantee charger
+ * function is normal after shutdown
+ */
+ regmap_field_write(chip->rm_fields[F_REG_RST], 1);
+}
+
+static const struct of_device_id rt9471_of_device_id[] = {
+ { .compatible = "richtek,rt9471" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, rt9471_of_device_id);
+
+static struct i2c_driver rt9471_driver = {
+ .driver = {
+ .name = "rt9471",
+ .of_match_table = rt9471_of_device_id,
+ },
+ .probe_new = rt9471_probe,
+ .shutdown = rt9471_shutdown,
+};
+module_i2c_driver(rt9471_driver);
+
+MODULE_DESCRIPTION("Richtek RT9471 charger driver");
+MODULE_AUTHOR("Alina Yu <alina_yu@richtek.com>");
+MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
deleted file mode 100644
index 68d31a3bee48..000000000000
--- a/drivers/power/supply/s3c_adc_battery.c
+++ /dev/null
@@ -1,453 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-//
-// iPAQ h1930/h1940/rx1950 battery controller driver
-// Copyright (c) Vasily Khoruzhick
-// Based on h1940_battery.c by Arnaud Patard
-
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/power_supply.h>
-#include <linux/leds.h>
-#include <linux/gpio/consumer.h>
-#include <linux/err.h>
-#include <linux/timer.h>
-#include <linux/jiffies.h>
-#include <linux/s3c_adc_battery.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-#include <linux/soc/samsung/s3c-adc.h>
-
-#define BAT_POLL_INTERVAL 10000 /* ms */
-#define JITTER_DELAY 500 /* ms */
-
-struct s3c_adc_bat {
- struct power_supply *psy;
- struct s3c_adc_client *client;
- struct s3c_adc_bat_pdata *pdata;
- struct gpio_desc *charge_finished;
- int volt_value;
- int cur_value;
- unsigned int timestamp;
- int level;
- int status;
- int cable_plugged:1;
-};
-
-static struct delayed_work bat_work;
-
-static void s3c_adc_bat_ext_power_changed(struct power_supply *psy)
-{
- schedule_delayed_work(&bat_work,
- msecs_to_jiffies(JITTER_DELAY));
-}
-
-static int gather_samples(struct s3c_adc_client *client, int num, int channel)
-{
- int value, i;
-
- /* default to 1 if nothing is set */
- if (num < 1)
- num = 1;
-
- value = 0;
- for (i = 0; i < num; i++)
- value += s3c_adc_read(client, channel);
- value /= num;
-
- return value;
-}
-
-static enum power_supply_property s3c_adc_backup_bat_props[] = {
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_VOLTAGE_MIN,
- POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
-};
-
-static int s3c_adc_backup_bat_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct s3c_adc_bat *bat = power_supply_get_drvdata(psy);
-
- if (!bat) {
- dev_err(&psy->dev, "%s: no battery infos ?!\n", __func__);
- return -EINVAL;
- }
-
- if (bat->volt_value < 0 ||
- jiffies_to_msecs(jiffies - bat->timestamp) >
- BAT_POLL_INTERVAL) {
- bat->volt_value = gather_samples(bat->client,
- bat->pdata->backup_volt_samples,
- bat->pdata->backup_volt_channel);
- bat->volt_value *= bat->pdata->backup_volt_mult;
- bat->timestamp = jiffies;
- }
-
- switch (psp) {
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- val->intval = bat->volt_value;
- return 0;
- case POWER_SUPPLY_PROP_VOLTAGE_MIN:
- val->intval = bat->pdata->backup_volt_min;
- return 0;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- val->intval = bat->pdata->backup_volt_max;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static const struct power_supply_desc backup_bat_desc = {
- .name = "backup-battery",
- .type = POWER_SUPPLY_TYPE_BATTERY,
- .properties = s3c_adc_backup_bat_props,
- .num_properties = ARRAY_SIZE(s3c_adc_backup_bat_props),
- .get_property = s3c_adc_backup_bat_get_property,
- .use_for_apm = 1,
-};
-
-static struct s3c_adc_bat backup_bat;
-
-static enum power_supply_property s3c_adc_main_bat_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
- POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN,
- POWER_SUPPLY_PROP_CHARGE_NOW,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_CURRENT_NOW,
-};
-
-static int calc_full_volt(int volt_val, int cur_val, int impedance)
-{
- return volt_val + cur_val * impedance / 1000;
-}
-
-static int charge_finished(struct s3c_adc_bat *bat)
-{
- return gpiod_get_value(bat->charge_finished);
-}
-
-static int s3c_adc_bat_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct s3c_adc_bat *bat = power_supply_get_drvdata(psy);
-
- int new_level;
- int full_volt;
- const struct s3c_adc_bat_thresh *lut;
- unsigned int lut_size;
-
- if (!bat) {
- dev_err(&psy->dev, "no battery infos ?!\n");
- return -EINVAL;
- }
-
- lut = bat->pdata->lut_noac;
- lut_size = bat->pdata->lut_noac_cnt;
-
- if (bat->volt_value < 0 || bat->cur_value < 0 ||
- jiffies_to_msecs(jiffies - bat->timestamp) >
- BAT_POLL_INTERVAL) {
- bat->volt_value = gather_samples(bat->client,
- bat->pdata->volt_samples,
- bat->pdata->volt_channel) * bat->pdata->volt_mult;
- bat->cur_value = gather_samples(bat->client,
- bat->pdata->current_samples,
- bat->pdata->current_channel) * bat->pdata->current_mult;
- bat->timestamp = jiffies;
- }
-
- if (bat->cable_plugged &&
- (!bat->charge_finished ||
- !charge_finished(bat))) {
- lut = bat->pdata->lut_acin;
- lut_size = bat->pdata->lut_acin_cnt;
- }
-
- new_level = 100000;
- full_volt = calc_full_volt((bat->volt_value / 1000),
- (bat->cur_value / 1000), bat->pdata->internal_impedance);
-
- if (full_volt < calc_full_volt(lut->volt, lut->cur,
- bat->pdata->internal_impedance)) {
- lut_size--;
- while (lut_size--) {
- int lut_volt1;
- int lut_volt2;
-
- lut_volt1 = calc_full_volt(lut[0].volt, lut[0].cur,
- bat->pdata->internal_impedance);
- lut_volt2 = calc_full_volt(lut[1].volt, lut[1].cur,
- bat->pdata->internal_impedance);
- if (full_volt < lut_volt1 && full_volt >= lut_volt2) {
- new_level = (lut[1].level +
- (lut[0].level - lut[1].level) *
- (full_volt - lut_volt2) /
- (lut_volt1 - lut_volt2)) * 1000;
- break;
- }
- new_level = lut[1].level * 1000;
- lut++;
- }
- }
-
- bat->level = new_level;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- if (!bat->charge_finished)
- val->intval = bat->level == 100000 ?
- POWER_SUPPLY_STATUS_FULL : bat->status;
- else
- val->intval = bat->status;
- return 0;
- case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- val->intval = 100000;
- return 0;
- case POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN:
- val->intval = 0;
- return 0;
- case POWER_SUPPLY_PROP_CHARGE_NOW:
- val->intval = bat->level;
- return 0;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- val->intval = bat->volt_value;
- return 0;
- case POWER_SUPPLY_PROP_CURRENT_NOW:
- val->intval = bat->cur_value;
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-static const struct power_supply_desc main_bat_desc = {
- .name = "main-battery",
- .type = POWER_SUPPLY_TYPE_BATTERY,
- .properties = s3c_adc_main_bat_props,
- .num_properties = ARRAY_SIZE(s3c_adc_main_bat_props),
- .get_property = s3c_adc_bat_get_property,
- .external_power_changed = s3c_adc_bat_ext_power_changed,
- .use_for_apm = 1,
-};
-
-static struct s3c_adc_bat main_bat;
-
-static void s3c_adc_bat_work(struct work_struct *work)
-{
- struct s3c_adc_bat *bat = &main_bat;
- int is_charged;
- int is_plugged;
- static int was_plugged;
-
- is_plugged = power_supply_am_i_supplied(bat->psy);
- bat->cable_plugged = is_plugged;
- if (is_plugged != was_plugged) {
- was_plugged = is_plugged;
- if (is_plugged) {
- if (bat->pdata->enable_charger)
- bat->pdata->enable_charger();
- bat->status = POWER_SUPPLY_STATUS_CHARGING;
- } else {
- if (bat->pdata->disable_charger)
- bat->pdata->disable_charger();
- bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
- }
- } else {
- if (bat->charge_finished && is_plugged) {
- is_charged = charge_finished(&main_bat);
- if (is_charged) {
- if (bat->pdata->disable_charger)
- bat->pdata->disable_charger();
- bat->status = POWER_SUPPLY_STATUS_FULL;
- } else {
- if (bat->pdata->enable_charger)
- bat->pdata->enable_charger();
- bat->status = POWER_SUPPLY_STATUS_CHARGING;
- }
- }
- }
-
- power_supply_changed(bat->psy);
-}
-
-static irqreturn_t s3c_adc_bat_charged(int irq, void *dev_id)
-{
- schedule_delayed_work(&bat_work,
- msecs_to_jiffies(JITTER_DELAY));
- return IRQ_HANDLED;
-}
-
-static int s3c_adc_bat_probe(struct platform_device *pdev)
-{
- struct s3c_adc_client *client;
- struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
- struct power_supply_config psy_cfg = {};
- struct gpio_desc *gpiod;
- int ret;
-
- client = s3c_adc_register(pdev, NULL, NULL, 0);
- if (IS_ERR(client)) {
- dev_err(&pdev->dev, "cannot register adc\n");
- return PTR_ERR(client);
- }
-
- platform_set_drvdata(pdev, client);
-
- gpiod = devm_gpiod_get_optional(&pdev->dev, "charge-status", GPIOD_IN);
- if (IS_ERR(gpiod)) {
- /* Could be probe deferral etc */
- ret = PTR_ERR(gpiod);
- dev_err(&pdev->dev, "no GPIO %d\n", ret);
- return ret;
- }
-
- main_bat.client = client;
- main_bat.pdata = pdata;
- main_bat.charge_finished = gpiod;
- main_bat.volt_value = -1;
- main_bat.cur_value = -1;
- main_bat.cable_plugged = 0;
- main_bat.status = POWER_SUPPLY_STATUS_DISCHARGING;
- psy_cfg.drv_data = &main_bat;
-
- main_bat.psy = power_supply_register(&pdev->dev, &main_bat_desc, &psy_cfg);
- if (IS_ERR(main_bat.psy)) {
- ret = PTR_ERR(main_bat.psy);
- goto err_reg_main;
- }
- if (pdata->backup_volt_mult) {
- const struct power_supply_config backup_psy_cfg
- = { .drv_data = &backup_bat, };
-
- backup_bat.client = client;
- backup_bat.pdata = pdev->dev.platform_data;
- backup_bat.charge_finished = gpiod;
- backup_bat.volt_value = -1;
- backup_bat.psy = power_supply_register(&pdev->dev,
- &backup_bat_desc,
- &backup_psy_cfg);
- if (IS_ERR(backup_bat.psy)) {
- ret = PTR_ERR(backup_bat.psy);
- goto err_reg_backup;
- }
- }
-
- INIT_DELAYED_WORK(&bat_work, s3c_adc_bat_work);
-
- if (gpiod) {
- ret = request_irq(gpiod_to_irq(gpiod),
- s3c_adc_bat_charged,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "battery charged", NULL);
- if (ret)
- goto err_irq;
- }
-
- if (pdata->init) {
- ret = pdata->init();
- if (ret)
- goto err_platform;
- }
-
- dev_info(&pdev->dev, "successfully loaded\n");
- device_init_wakeup(&pdev->dev, 1);
-
- /* Schedule timer to check current status */
- schedule_delayed_work(&bat_work,
- msecs_to_jiffies(JITTER_DELAY));
-
- return 0;
-
-err_platform:
- if (gpiod)
- free_irq(gpiod_to_irq(gpiod), NULL);
-err_irq:
- if (pdata->backup_volt_mult)
- power_supply_unregister(backup_bat.psy);
-err_reg_backup:
- power_supply_unregister(main_bat.psy);
-err_reg_main:
- return ret;
-}
-
-static int s3c_adc_bat_remove(struct platform_device *pdev)
-{
- struct s3c_adc_client *client = platform_get_drvdata(pdev);
- struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
-
- power_supply_unregister(main_bat.psy);
- if (pdata->backup_volt_mult)
- power_supply_unregister(backup_bat.psy);
-
- s3c_adc_release(client);
-
- if (main_bat.charge_finished)
- free_irq(gpiod_to_irq(main_bat.charge_finished), NULL);
-
- cancel_delayed_work_sync(&bat_work);
-
- if (pdata->exit)
- pdata->exit();
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int s3c_adc_bat_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- if (main_bat.charge_finished) {
- if (device_may_wakeup(&pdev->dev))
- enable_irq_wake(
- gpiod_to_irq(main_bat.charge_finished));
- else {
- disable_irq(gpiod_to_irq(main_bat.charge_finished));
- main_bat.pdata->disable_charger();
- }
- }
-
- return 0;
-}
-
-static int s3c_adc_bat_resume(struct platform_device *pdev)
-{
- if (main_bat.charge_finished) {
- if (device_may_wakeup(&pdev->dev))
- disable_irq_wake(
- gpiod_to_irq(main_bat.charge_finished));
- else
- enable_irq(gpiod_to_irq(main_bat.charge_finished));
- }
-
- /* Schedule timer to check current status */
- schedule_delayed_work(&bat_work,
- msecs_to_jiffies(JITTER_DELAY));
-
- return 0;
-}
-#else
-#define s3c_adc_bat_suspend NULL
-#define s3c_adc_bat_resume NULL
-#endif
-
-static struct platform_driver s3c_adc_bat_driver = {
- .driver = {
- .name = "s3c-adc-battery",
- },
- .probe = s3c_adc_bat_probe,
- .remove = s3c_adc_bat_remove,
- .suspend = s3c_adc_bat_suspend,
- .resume = s3c_adc_bat_resume,
-};
-
-module_platform_driver(s3c_adc_bat_driver);
-
-MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
-MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controller driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/surface_battery.c b/drivers/power/supply/surface_battery.c
index 540707882bb0..19d2f8834e56 100644
--- a/drivers/power/supply/surface_battery.c
+++ b/drivers/power/supply/surface_battery.c
@@ -852,8 +852,8 @@ static const struct spwr_psy_properties spwr_psy_props_bat2_sb3 = {
};
static const struct ssam_device_id surface_battery_match[] = {
- { SSAM_SDEV(BAT, 0x01, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat1 },
- { SSAM_SDEV(BAT, 0x02, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat2_sb3 },
+ { SSAM_SDEV(BAT, SAM, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat1 },
+ { SSAM_SDEV(BAT, KIP, 0x01, 0x00), (unsigned long)&spwr_psy_props_bat2_sb3 },
{ },
};
MODULE_DEVICE_TABLE(ssam, surface_battery_match);
diff --git a/drivers/power/supply/surface_charger.c b/drivers/power/supply/surface_charger.c
index 59182d55742d..cabdd8da12d0 100644
--- a/drivers/power/supply/surface_charger.c
+++ b/drivers/power/supply/surface_charger.c
@@ -260,7 +260,7 @@ static const struct spwr_psy_properties spwr_psy_props_adp1 = {
};
static const struct ssam_device_id surface_ac_match[] = {
- { SSAM_SDEV(BAT, 0x01, 0x01, 0x01), (unsigned long)&spwr_psy_props_adp1 },
+ { SSAM_SDEV(BAT, SAM, 0x01, 0x01), (unsigned long)&spwr_psy_props_adp1 },
{ },
};
MODULE_DEVICE_TABLE(ssam, surface_ac_match);
diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c
index 5f510ddc946d..0d0a77584c5d 100644
--- a/drivers/power/supply/test_power.c
+++ b/drivers/power/supply/test_power.c
@@ -306,8 +306,7 @@ static int map_get_value(struct battery_property_map *map, const char *key,
char buf[MAX_KEYLENGTH];
int cr;
- strncpy(buf, key, MAX_KEYLENGTH);
- buf[MAX_KEYLENGTH-1] = '\0';
+ strscpy(buf, key, MAX_KEYLENGTH);
cr = strnlen(buf, MAX_KEYLENGTH) - 1;
if (cr < 0)
diff --git a/drivers/power/supply/tosa_battery.c b/drivers/power/supply/tosa_battery.c
deleted file mode 100644
index 73d4aca4c386..000000000000
--- a/drivers/power/supply/tosa_battery.c
+++ /dev/null
@@ -1,512 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Battery and Power Management code for the Sharp SL-6000x
- *
- * Copyright (c) 2005 Dirk Opfer
- * Copyright (c) 2008 Dmitry Baryshkov
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/power_supply.h>
-#include <linux/wm97xx.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/gpio/consumer.h>
-
-#include <asm/mach-types.h>
-
-static DEFINE_MUTEX(bat_lock); /* protects gpio pins */
-static struct work_struct bat_work;
-
-struct tosa_bat {
- int status;
- struct power_supply *psy;
- int full_chrg;
-
- struct mutex work_lock; /* protects data */
-
- bool (*is_present)(struct tosa_bat *bat);
- struct gpio_desc *gpiod_full;
- struct gpio_desc *gpiod_charge_off;
-
- int technology;
-
- struct gpio_desc *gpiod_bat;
- int adc_bat;
- int adc_bat_divider;
- int bat_max;
- int bat_min;
-
- struct gpio_desc *gpiod_temp;
- int adc_temp;
- int adc_temp_divider;
-};
-
-static struct gpio_desc *jacket_detect;
-static struct tosa_bat tosa_bat_main;
-static struct tosa_bat tosa_bat_jacket;
-
-static unsigned long tosa_read_bat(struct tosa_bat *bat)
-{
- unsigned long value = 0;
-
- if (!bat->gpiod_bat || bat->adc_bat < 0)
- return 0;
-
- mutex_lock(&bat_lock);
- gpiod_set_value(bat->gpiod_bat, 1);
- msleep(5);
- value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy->dev.parent),
- bat->adc_bat);
- gpiod_set_value(bat->gpiod_bat, 0);
- mutex_unlock(&bat_lock);
-
- value = value * 1000000 / bat->adc_bat_divider;
-
- return value;
-}
-
-static unsigned long tosa_read_temp(struct tosa_bat *bat)
-{
- unsigned long value = 0;
-
- if (!bat->gpiod_temp || bat->adc_temp < 0)
- return 0;
-
- mutex_lock(&bat_lock);
- gpiod_set_value(bat->gpiod_temp, 1);
- msleep(5);
- value = wm97xx_read_aux_adc(dev_get_drvdata(bat->psy->dev.parent),
- bat->adc_temp);
- gpiod_set_value(bat->gpiod_temp, 0);
- mutex_unlock(&bat_lock);
-
- value = value * 10000 / bat->adc_temp_divider;
-
- return value;
-}
-
-static int tosa_bat_get_property(struct power_supply *psy,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- int ret = 0;
- struct tosa_bat *bat = power_supply_get_drvdata(psy);
-
- if (bat->is_present && !bat->is_present(bat)
- && psp != POWER_SUPPLY_PROP_PRESENT) {
- return -ENODEV;
- }
-
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- val->intval = bat->status;
- break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = bat->technology;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- val->intval = tosa_read_bat(bat);
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- if (bat->full_chrg == -1)
- val->intval = bat->bat_max;
- else
- val->intval = bat->full_chrg;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- val->intval = bat->bat_max;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- val->intval = bat->bat_min;
- break;
- case POWER_SUPPLY_PROP_TEMP:
- val->intval = tosa_read_temp(bat);
- break;
- case POWER_SUPPLY_PROP_PRESENT:
- val->intval = bat->is_present ? bat->is_present(bat) : 1;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- return ret;
-}
-
-static bool tosa_jacket_bat_is_present(struct tosa_bat *bat)
-{
- return gpiod_get_value(jacket_detect) == 0;
-}
-
-static void tosa_bat_external_power_changed(struct power_supply *psy)
-{
- schedule_work(&bat_work);
-}
-
-static irqreturn_t tosa_bat_gpio_isr(int irq, void *data)
-{
- pr_info("tosa_bat_gpio irq\n");
- schedule_work(&bat_work);
- return IRQ_HANDLED;
-}
-
-static void tosa_bat_update(struct tosa_bat *bat)
-{
- int old;
- struct power_supply *psy = bat->psy;
-
- mutex_lock(&bat->work_lock);
-
- old = bat->status;
-
- if (bat->is_present && !bat->is_present(bat)) {
- printk(KERN_NOTICE "%s not present\n", psy->desc->name);
- bat->status = POWER_SUPPLY_STATUS_UNKNOWN;
- bat->full_chrg = -1;
- } else if (power_supply_am_i_supplied(psy)) {
- if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) {
- gpiod_set_value(bat->gpiod_charge_off, 0);
- mdelay(15);
- }
-
- if (gpiod_get_value(bat->gpiod_full)) {
- if (old == POWER_SUPPLY_STATUS_CHARGING ||
- bat->full_chrg == -1)
- bat->full_chrg = tosa_read_bat(bat);
-
- gpiod_set_value(bat->gpiod_charge_off, 1);
- bat->status = POWER_SUPPLY_STATUS_FULL;
- } else {
- gpiod_set_value(bat->gpiod_charge_off, 0);
- bat->status = POWER_SUPPLY_STATUS_CHARGING;
- }
- } else {
- gpiod_set_value(bat->gpiod_charge_off, 1);
- bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
- }
-
- if (old != bat->status)
- power_supply_changed(psy);
-
- mutex_unlock(&bat->work_lock);
-}
-
-static void tosa_bat_work(struct work_struct *work)
-{
- tosa_bat_update(&tosa_bat_main);
- tosa_bat_update(&tosa_bat_jacket);
-}
-
-
-static enum power_supply_property tosa_bat_main_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_TECHNOLOGY,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_VOLTAGE_MAX,
- POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
- POWER_SUPPLY_PROP_TEMP,
- POWER_SUPPLY_PROP_PRESENT,
-};
-
-static enum power_supply_property tosa_bat_bu_props[] = {
- POWER_SUPPLY_PROP_STATUS,
- POWER_SUPPLY_PROP_TECHNOLOGY,
- POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
- POWER_SUPPLY_PROP_VOLTAGE_NOW,
- POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
- POWER_SUPPLY_PROP_PRESENT,
-};
-
-static const struct power_supply_desc tosa_bat_main_desc = {
- .name = "main-battery",
- .type = POWER_SUPPLY_TYPE_BATTERY,
- .properties = tosa_bat_main_props,
- .num_properties = ARRAY_SIZE(tosa_bat_main_props),
- .get_property = tosa_bat_get_property,
- .external_power_changed = tosa_bat_external_power_changed,
- .use_for_apm = 1,
-};
-
-static const struct power_supply_desc tosa_bat_jacket_desc = {
- .name = "jacket-battery",
- .type = POWER_SUPPLY_TYPE_BATTERY,
- .properties = tosa_bat_main_props,
- .num_properties = ARRAY_SIZE(tosa_bat_main_props),
- .get_property = tosa_bat_get_property,
- .external_power_changed = tosa_bat_external_power_changed,
-};
-
-static const struct power_supply_desc tosa_bat_bu_desc = {
- .name = "backup-battery",
- .type = POWER_SUPPLY_TYPE_BATTERY,
- .properties = tosa_bat_bu_props,
- .num_properties = ARRAY_SIZE(tosa_bat_bu_props),
- .get_property = tosa_bat_get_property,
- .external_power_changed = tosa_bat_external_power_changed,
-};
-
-static struct tosa_bat tosa_bat_main = {
- .status = POWER_SUPPLY_STATUS_DISCHARGING,
- .full_chrg = -1,
- .psy = NULL,
-
- .gpiod_full = NULL,
- .gpiod_charge_off = NULL,
-
- .technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
-
- .gpiod_bat = NULL,
- .adc_bat = WM97XX_AUX_ID3,
- .adc_bat_divider = 414,
- .bat_max = 4310000,
- .bat_min = 1551 * 1000000 / 414,
-
- .gpiod_temp = NULL,
- .adc_temp = WM97XX_AUX_ID2,
- .adc_temp_divider = 10000,
-};
-
-static struct tosa_bat tosa_bat_jacket = {
- .status = POWER_SUPPLY_STATUS_DISCHARGING,
- .full_chrg = -1,
- .psy = NULL,
-
- .is_present = tosa_jacket_bat_is_present,
- .gpiod_full = NULL,
- .gpiod_charge_off = NULL,
-
- .technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
-
- .gpiod_bat = NULL,
- .adc_bat = WM97XX_AUX_ID3,
- .adc_bat_divider = 414,
- .bat_max = 4310000,
- .bat_min = 1551 * 1000000 / 414,
-
- .gpiod_temp = NULL,
- .adc_temp = WM97XX_AUX_ID2,
- .adc_temp_divider = 10000,
-};
-
-static struct tosa_bat tosa_bat_bu = {
- .status = POWER_SUPPLY_STATUS_UNKNOWN,
- .full_chrg = -1,
- .psy = NULL,
-
- .gpiod_full = NULL,
- .gpiod_charge_off = NULL,
-
- .technology = POWER_SUPPLY_TECHNOLOGY_LiMn,
-
- .gpiod_bat = NULL,
- .adc_bat = WM97XX_AUX_ID4,
- .adc_bat_divider = 1266,
-
- .gpiod_temp = NULL,
- .adc_temp = -1,
- .adc_temp_divider = -1,
-};
-
-#ifdef CONFIG_PM
-static int tosa_bat_suspend(struct platform_device *dev, pm_message_t state)
-{
- /* flush all pending status updates */
- flush_work(&bat_work);
- return 0;
-}
-
-static int tosa_bat_resume(struct platform_device *dev)
-{
- /* things may have changed while we were away */
- schedule_work(&bat_work);
- return 0;
-}
-#else
-#define tosa_bat_suspend NULL
-#define tosa_bat_resume NULL
-#endif
-
-static int tosa_bat_probe(struct platform_device *pdev)
-{
- int ret;
- struct power_supply_config main_psy_cfg = {},
- jacket_psy_cfg = {},
- bu_psy_cfg = {};
- struct device *dev = &pdev->dev;
- struct gpio_desc *dummy;
-
- if (!machine_is_tosa())
- return -ENODEV;
-
- /* Main charging control GPIOs */
- tosa_bat_main.gpiod_charge_off = devm_gpiod_get(dev, "main charge off", GPIOD_OUT_HIGH);
- if (IS_ERR(tosa_bat_main.gpiod_charge_off))
- return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_charge_off),
- "no main charger GPIO\n");
- tosa_bat_jacket.gpiod_charge_off = devm_gpiod_get(dev, "jacket charge off", GPIOD_OUT_HIGH);
- if (IS_ERR(tosa_bat_jacket.gpiod_charge_off))
- return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_charge_off),
- "no jacket charger GPIO\n");
-
- /* Per-battery output check (routes battery voltage to ADC) */
- tosa_bat_main.gpiod_bat = devm_gpiod_get(dev, "main battery", GPIOD_OUT_LOW);
- if (IS_ERR(tosa_bat_main.gpiod_bat))
- return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_bat),
- "no main battery GPIO\n");
- tosa_bat_jacket.gpiod_bat = devm_gpiod_get(dev, "jacket battery", GPIOD_OUT_LOW);
- if (IS_ERR(tosa_bat_jacket.gpiod_bat))
- return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_bat),
- "no jacket battery GPIO\n");
- tosa_bat_bu.gpiod_bat = devm_gpiod_get(dev, "backup battery", GPIOD_OUT_LOW);
- if (IS_ERR(tosa_bat_bu.gpiod_bat))
- return dev_err_probe(dev, PTR_ERR(tosa_bat_bu.gpiod_bat),
- "no backup battery GPIO\n");
-
- /* Battery full detect GPIOs (using PXA SoC GPIOs) */
- tosa_bat_main.gpiod_full = devm_gpiod_get(dev, "main battery full", GPIOD_IN);
- if (IS_ERR(tosa_bat_main.gpiod_full))
- return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_full),
- "no main battery full GPIO\n");
- tosa_bat_jacket.gpiod_full = devm_gpiod_get(dev, "jacket battery full", GPIOD_IN);
- if (IS_ERR(tosa_bat_jacket.gpiod_full))
- return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_full),
- "no jacket battery full GPIO\n");
-
- /* Battery temperature GPIOs (routes thermistor voltage to ADC) */
- tosa_bat_main.gpiod_temp = devm_gpiod_get(dev, "main battery temp", GPIOD_OUT_LOW);
- if (IS_ERR(tosa_bat_main.gpiod_temp))
- return dev_err_probe(dev, PTR_ERR(tosa_bat_main.gpiod_temp),
- "no main battery temp GPIO\n");
- tosa_bat_jacket.gpiod_temp = devm_gpiod_get(dev, "jacket battery temp", GPIOD_OUT_LOW);
- if (IS_ERR(tosa_bat_jacket.gpiod_temp))
- return dev_err_probe(dev, PTR_ERR(tosa_bat_jacket.gpiod_temp),
- "no jacket battery temp GPIO\n");
-
- /* Jacket detect GPIO */
- jacket_detect = devm_gpiod_get(dev, "jacket detect", GPIOD_IN);
- if (IS_ERR(jacket_detect))
- return dev_err_probe(dev, PTR_ERR(jacket_detect),
- "no jacket detect GPIO\n");
-
- /* Battery low indication GPIOs (not used, we just request them) */
- dummy = devm_gpiod_get(dev, "main battery low", GPIOD_IN);
- if (IS_ERR(dummy))
- return dev_err_probe(dev, PTR_ERR(dummy),
- "no main battery low GPIO\n");
- dummy = devm_gpiod_get(dev, "jacket battery low", GPIOD_IN);
- if (IS_ERR(dummy))
- return dev_err_probe(dev, PTR_ERR(dummy),
- "no jacket battery low GPIO\n");
-
- /* Battery switch GPIO (not used just requested) */
- dummy = devm_gpiod_get(dev, "battery switch", GPIOD_OUT_LOW);
- if (IS_ERR(dummy))
- return dev_err_probe(dev, PTR_ERR(dummy),
- "no battery switch GPIO\n");
-
- mutex_init(&tosa_bat_main.work_lock);
- mutex_init(&tosa_bat_jacket.work_lock);
-
- INIT_WORK(&bat_work, tosa_bat_work);
-
- main_psy_cfg.drv_data = &tosa_bat_main;
- tosa_bat_main.psy = power_supply_register(dev,
- &tosa_bat_main_desc,
- &main_psy_cfg);
- if (IS_ERR(tosa_bat_main.psy)) {
- ret = PTR_ERR(tosa_bat_main.psy);
- goto err_psy_reg_main;
- }
-
- jacket_psy_cfg.drv_data = &tosa_bat_jacket;
- tosa_bat_jacket.psy = power_supply_register(dev,
- &tosa_bat_jacket_desc,
- &jacket_psy_cfg);
- if (IS_ERR(tosa_bat_jacket.psy)) {
- ret = PTR_ERR(tosa_bat_jacket.psy);
- goto err_psy_reg_jacket;
- }
-
- bu_psy_cfg.drv_data = &tosa_bat_bu;
- tosa_bat_bu.psy = power_supply_register(dev, &tosa_bat_bu_desc,
- &bu_psy_cfg);
- if (IS_ERR(tosa_bat_bu.psy)) {
- ret = PTR_ERR(tosa_bat_bu.psy);
- goto err_psy_reg_bu;
- }
-
- ret = request_irq(gpiod_to_irq(tosa_bat_main.gpiod_full),
- tosa_bat_gpio_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "main full", &tosa_bat_main);
- if (ret)
- goto err_req_main;
-
- ret = request_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full),
- tosa_bat_gpio_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "jacket full", &tosa_bat_jacket);
- if (ret)
- goto err_req_jacket;
-
- ret = request_irq(gpiod_to_irq(jacket_detect),
- tosa_bat_gpio_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "jacket detect", &tosa_bat_jacket);
- if (!ret) {
- schedule_work(&bat_work);
- return 0;
- }
-
- free_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full), &tosa_bat_jacket);
-err_req_jacket:
- free_irq(gpiod_to_irq(tosa_bat_main.gpiod_full), &tosa_bat_main);
-err_req_main:
- power_supply_unregister(tosa_bat_bu.psy);
-err_psy_reg_bu:
- power_supply_unregister(tosa_bat_jacket.psy);
-err_psy_reg_jacket:
- power_supply_unregister(tosa_bat_main.psy);
-err_psy_reg_main:
-
- /* see comment in tosa_bat_remove */
- cancel_work_sync(&bat_work);
-
- return ret;
-}
-
-static int tosa_bat_remove(struct platform_device *dev)
-{
- free_irq(gpiod_to_irq(jacket_detect), &tosa_bat_jacket);
- free_irq(gpiod_to_irq(tosa_bat_jacket.gpiod_full), &tosa_bat_jacket);
- free_irq(gpiod_to_irq(tosa_bat_main.gpiod_full), &tosa_bat_main);
-
- power_supply_unregister(tosa_bat_bu.psy);
- power_supply_unregister(tosa_bat_jacket.psy);
- power_supply_unregister(tosa_bat_main.psy);
-
- /*
- * Now cancel the bat_work. We won't get any more schedules,
- * since all sources (isr and external_power_changed) are
- * unregistered now.
- */
- cancel_work_sync(&bat_work);
- return 0;
-}
-
-static struct platform_driver tosa_bat_driver = {
- .driver.name = "wm97xx-battery",
- .driver.owner = THIS_MODULE,
- .probe = tosa_bat_probe,
- .remove = tosa_bat_remove,
- .suspend = tosa_bat_suspend,
- .resume = tosa_bat_resume,
-};
-
-module_platform_driver(tosa_bat_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dmitry Baryshkov");
-MODULE_DESCRIPTION("Tosa battery driver");
-MODULE_ALIAS("platform:wm97xx-battery");
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index 1bc49b2e12e8..53a0ea5a61da 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -726,11 +726,9 @@ twl4030_bci_mode_show(struct device *dev,
for (i = 0; i < ARRAY_SIZE(modes); i++)
if (mode == i)
- len += scnprintf(buf+len, PAGE_SIZE-len,
- "[%s] ", modes[i]);
+ len += sysfs_emit_at(buf, len, "[%s] ", modes[i]);
else
- len += scnprintf(buf+len, PAGE_SIZE-len,
- "%s ", modes[i]);
+ len += sysfs_emit_at(buf, len, "%s ", modes[i]);
buf[len-1] = '\n';
return len;
}
diff --git a/drivers/power/supply/wm8350_power.c b/drivers/power/supply/wm8350_power.c
index 908cfd45d262..f2786761299c 100644
--- a/drivers/power/supply/wm8350_power.c
+++ b/drivers/power/supply/wm8350_power.c
@@ -176,7 +176,7 @@ static ssize_t charger_state_show(struct device *dev,
return 0;
}
- return sprintf(buf, "%s\n", charge);
+ return sysfs_emit(buf, "%s\n", charge);
}
static DEVICE_ATTR_RO(charger_state);
diff --git a/drivers/power/supply/z2_battery.c b/drivers/power/supply/z2_battery.c
deleted file mode 100644
index 0ba4a590a0a5..000000000000
--- a/drivers/power/supply/z2_battery.c
+++ /dev/null
@@ -1,318 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Battery measurement code for Zipit Z2
- *
- * Copyright (C) 2009 Peter Edwards <sweetlilmre@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/gpio/consumer.h>
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/power_supply.h>
-#include <linux/slab.h>
-#include <linux/z2_battery.h>
-
-#define Z2_DEFAULT_NAME "Z2"
-
-struct z2_charger {
- struct z2_battery_info *info;
- struct gpio_desc *charge_gpiod;
- int bat_status;
- struct i2c_client *client;
- struct power_supply *batt_ps;
- struct power_supply_desc batt_ps_desc;
- struct mutex work_lock;
- struct work_struct bat_work;
-};
-
-static unsigned long z2_read_bat(struct z2_charger *charger)
-{
- int data;
- data = i2c_smbus_read_byte_data(charger->client,
- charger->info->batt_I2C_reg);
- if (data < 0)
- return 0;
-
- return data * charger->info->batt_mult / charger->info->batt_div;
-}
-
-static int z2_batt_get_property(struct power_supply *batt_ps,
- enum power_supply_property psp,
- union power_supply_propval *val)
-{
- struct z2_charger *charger = power_supply_get_drvdata(batt_ps);
- struct z2_battery_info *info = charger->info;
-
- switch (psp) {
- case POWER_SUPPLY_PROP_STATUS:
- val->intval = charger->bat_status;
- break;
- case POWER_SUPPLY_PROP_TECHNOLOGY:
- val->intval = info->batt_tech;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- if (info->batt_I2C_reg >= 0)
- val->intval = z2_read_bat(charger);
- else
- return -EINVAL;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- if (info->max_voltage >= 0)
- val->intval = info->max_voltage;
- else
- return -EINVAL;
- break;
- case POWER_SUPPLY_PROP_VOLTAGE_MIN:
- if (info->min_voltage >= 0)
- val->intval = info->min_voltage;
- else
- return -EINVAL;
- break;
- case POWER_SUPPLY_PROP_PRESENT:
- val->intval = 1;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void z2_batt_ext_power_changed(struct power_supply *batt_ps)
-{
- struct z2_charger *charger = power_supply_get_drvdata(batt_ps);
-
- schedule_work(&charger->bat_work);
-}
-
-static void z2_batt_update(struct z2_charger *charger)
-{
- int old_status = charger->bat_status;
-
- mutex_lock(&charger->work_lock);
-
- charger->bat_status = charger->charge_gpiod ?
- (gpiod_get_value(charger->charge_gpiod) ?
- POWER_SUPPLY_STATUS_CHARGING :
- POWER_SUPPLY_STATUS_DISCHARGING) :
- POWER_SUPPLY_STATUS_UNKNOWN;
-
- if (old_status != charger->bat_status) {
- pr_debug("%s: %i -> %i\n", charger->batt_ps->desc->name,
- old_status,
- charger->bat_status);
- power_supply_changed(charger->batt_ps);
- }
-
- mutex_unlock(&charger->work_lock);
-}
-
-static void z2_batt_work(struct work_struct *work)
-{
- struct z2_charger *charger;
- charger = container_of(work, struct z2_charger, bat_work);
- z2_batt_update(charger);
-}
-
-static irqreturn_t z2_charge_switch_irq(int irq, void *devid)
-{
- struct z2_charger *charger = devid;
- schedule_work(&charger->bat_work);
- return IRQ_HANDLED;
-}
-
-static int z2_batt_ps_init(struct z2_charger *charger, int props)
-{
- int i = 0;
- enum power_supply_property *prop;
- struct z2_battery_info *info = charger->info;
-
- if (charger->charge_gpiod)
- props++; /* POWER_SUPPLY_PROP_STATUS */
- if (info->batt_tech >= 0)
- props++; /* POWER_SUPPLY_PROP_TECHNOLOGY */
- if (info->batt_I2C_reg >= 0)
- props++; /* POWER_SUPPLY_PROP_VOLTAGE_NOW */
- if (info->max_voltage >= 0)
- props++; /* POWER_SUPPLY_PROP_VOLTAGE_MAX */
- if (info->min_voltage >= 0)
- props++; /* POWER_SUPPLY_PROP_VOLTAGE_MIN */
-
- prop = kcalloc(props, sizeof(*prop), GFP_KERNEL);
- if (!prop)
- return -ENOMEM;
-
- prop[i++] = POWER_SUPPLY_PROP_PRESENT;
- if (charger->charge_gpiod)
- prop[i++] = POWER_SUPPLY_PROP_STATUS;
- if (info->batt_tech >= 0)
- prop[i++] = POWER_SUPPLY_PROP_TECHNOLOGY;
- if (info->batt_I2C_reg >= 0)
- prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_NOW;
- if (info->max_voltage >= 0)
- prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MAX;
- if (info->min_voltage >= 0)
- prop[i++] = POWER_SUPPLY_PROP_VOLTAGE_MIN;
-
- if (!info->batt_name) {
- dev_info(&charger->client->dev,
- "Please consider setting proper battery "
- "name in platform definition file, falling "
- "back to name \" Z2_DEFAULT_NAME \"\n");
- charger->batt_ps_desc.name = Z2_DEFAULT_NAME;
- } else
- charger->batt_ps_desc.name = info->batt_name;
-
- charger->batt_ps_desc.properties = prop;
- charger->batt_ps_desc.num_properties = props;
- charger->batt_ps_desc.type = POWER_SUPPLY_TYPE_BATTERY;
- charger->batt_ps_desc.get_property = z2_batt_get_property;
- charger->batt_ps_desc.external_power_changed =
- z2_batt_ext_power_changed;
- charger->batt_ps_desc.use_for_apm = 1;
-
- return 0;
-}
-
-static int z2_batt_probe(struct i2c_client *client)
-{
- int ret = 0;
- int props = 1; /* POWER_SUPPLY_PROP_PRESENT */
- struct z2_charger *charger;
- struct z2_battery_info *info = client->dev.platform_data;
- struct power_supply_config psy_cfg = {};
-
- if (info == NULL) {
- dev_err(&client->dev,
- "Please set platform device platform_data"
- " to a valid z2_battery_info pointer!\n");
- return -EINVAL;
- }
-
- charger = kzalloc(sizeof(*charger), GFP_KERNEL);
- if (charger == NULL)
- return -ENOMEM;
-
- charger->bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
- charger->info = info;
- charger->client = client;
- i2c_set_clientdata(client, charger);
- psy_cfg.drv_data = charger;
-
- mutex_init(&charger->work_lock);
-
- charger->charge_gpiod = devm_gpiod_get_optional(&client->dev,
- NULL, GPIOD_IN);
- if (IS_ERR(charger->charge_gpiod)) {
- ret = dev_err_probe(&client->dev,
- PTR_ERR(charger->charge_gpiod),
- "failed to get charge GPIO\n");
- goto err;
- }
-
- if (charger->charge_gpiod) {
- gpiod_set_consumer_name(charger->charge_gpiod, "BATT CHRG");
-
- irq_set_irq_type(gpiod_to_irq(charger->charge_gpiod),
- IRQ_TYPE_EDGE_BOTH);
- ret = request_irq(gpiod_to_irq(charger->charge_gpiod),
- z2_charge_switch_irq, 0,
- "AC Detect", charger);
- if (ret)
- goto err;
- }
-
- ret = z2_batt_ps_init(charger, props);
- if (ret)
- goto err3;
-
- INIT_WORK(&charger->bat_work, z2_batt_work);
-
- charger->batt_ps = power_supply_register(&client->dev,
- &charger->batt_ps_desc,
- &psy_cfg);
- if (IS_ERR(charger->batt_ps)) {
- ret = PTR_ERR(charger->batt_ps);
- goto err4;
- }
-
- schedule_work(&charger->bat_work);
-
- return 0;
-
-err4:
- kfree(charger->batt_ps_desc.properties);
-err3:
- if (charger->charge_gpiod)
- free_irq(gpiod_to_irq(charger->charge_gpiod), charger);
-err:
- kfree(charger);
- return ret;
-}
-
-static void z2_batt_remove(struct i2c_client *client)
-{
- struct z2_charger *charger = i2c_get_clientdata(client);
-
- cancel_work_sync(&charger->bat_work);
- power_supply_unregister(charger->batt_ps);
-
- kfree(charger->batt_ps_desc.properties);
- if (charger->charge_gpiod)
- free_irq(gpiod_to_irq(charger->charge_gpiod), charger);
-
- kfree(charger);
-}
-
-#ifdef CONFIG_PM
-static int z2_batt_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct z2_charger *charger = i2c_get_clientdata(client);
-
- flush_work(&charger->bat_work);
- return 0;
-}
-
-static int z2_batt_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct z2_charger *charger = i2c_get_clientdata(client);
-
- schedule_work(&charger->bat_work);
- return 0;
-}
-
-static const struct dev_pm_ops z2_battery_pm_ops = {
- .suspend = z2_batt_suspend,
- .resume = z2_batt_resume,
-};
-
-#define Z2_BATTERY_PM_OPS (&z2_battery_pm_ops)
-
-#else
-#define Z2_BATTERY_PM_OPS (NULL)
-#endif
-
-static const struct i2c_device_id z2_batt_id[] = {
- { "aer915", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, z2_batt_id);
-
-static struct i2c_driver z2_batt_driver = {
- .driver = {
- .name = "z2-battery",
- .pm = Z2_BATTERY_PM_OPS
- },
- .probe_new = z2_batt_probe,
- .remove = z2_batt_remove,
- .id_table = z2_batt_id,
-};
-module_i2c_driver(z2_batt_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Peter Edwards <sweetlilmre@gmail.com>");
-MODULE_DESCRIPTION("Zipit Z2 battery driver");
diff --git a/drivers/powercap/idle_inject.c b/drivers/powercap/idle_inject.c
index fe86a09e3b67..e18a2cc4e46a 100644
--- a/drivers/powercap/idle_inject.c
+++ b/drivers/powercap/idle_inject.c
@@ -63,13 +63,29 @@ struct idle_inject_thread {
* @idle_duration_us: duration of CPU idle time to inject
* @run_duration_us: duration of CPU run time to allow
* @latency_us: max allowed latency
+ * @update: Optional callback deciding whether or not to skip idle
+ * injection in the given cycle.
* @cpumask: mask of CPUs affected by idle injection
+ *
+ * This structure is used to define per instance idle inject device data. Each
+ * instance has an idle duration, a run duration and mask of CPUs to inject
+ * idle.
+ *
+ * Actual CPU idle time is injected by calling kernel scheduler interface
+ * play_idle_precise(). There is one optional callback that can be registered
+ * by calling idle_inject_register_full():
+ *
+ * update() - This callback is invoked just before waking up CPUs to inject
+ * idle. If it returns false, CPUs are not woken up to inject idle in the given
+ * cycle. It also allows the caller to readjust the idle and run duration by
+ * calling idle_inject_set_duration() for the next cycle.
*/
struct idle_inject_device {
struct hrtimer timer;
unsigned int idle_duration_us;
unsigned int run_duration_us;
unsigned int latency_us;
+ bool (*update)(void);
unsigned long cpumask[];
};
@@ -111,11 +127,12 @@ static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
struct idle_inject_device *ii_dev =
container_of(timer, struct idle_inject_device, timer);
+ if (!ii_dev->update || (ii_dev->update && ii_dev->update()))
+ idle_inject_wakeup(ii_dev);
+
duration_us = READ_ONCE(ii_dev->run_duration_us);
duration_us += READ_ONCE(ii_dev->idle_duration_us);
- idle_inject_wakeup(ii_dev);
-
hrtimer_forward_now(timer, ns_to_ktime(duration_us * NSEC_PER_USEC));
return HRTIMER_RESTART;
@@ -155,11 +172,14 @@ void idle_inject_set_duration(struct idle_inject_device *ii_dev,
unsigned int run_duration_us,
unsigned int idle_duration_us)
{
- if (run_duration_us && idle_duration_us) {
+ if (run_duration_us + idle_duration_us) {
WRITE_ONCE(ii_dev->run_duration_us, run_duration_us);
WRITE_ONCE(ii_dev->idle_duration_us, idle_duration_us);
}
+ if (!run_duration_us)
+ pr_debug("CPU is forced to 100 percent idle\n");
}
+EXPORT_SYMBOL_NS_GPL(idle_inject_set_duration, IDLE_INJECT);
/**
* idle_inject_get_duration - idle and run duration retrieval helper
@@ -174,6 +194,7 @@ void idle_inject_get_duration(struct idle_inject_device *ii_dev,
*run_duration_us = READ_ONCE(ii_dev->run_duration_us);
*idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
}
+EXPORT_SYMBOL_NS_GPL(idle_inject_get_duration, IDLE_INJECT);
/**
* idle_inject_set_latency - set the maximum latency allowed
@@ -185,6 +206,7 @@ void idle_inject_set_latency(struct idle_inject_device *ii_dev,
{
WRITE_ONCE(ii_dev->latency_us, latency_us);
}
+EXPORT_SYMBOL_NS_GPL(idle_inject_set_latency, IDLE_INJECT);
/**
* idle_inject_start - start idle injections
@@ -201,7 +223,7 @@ int idle_inject_start(struct idle_inject_device *ii_dev)
unsigned int idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
unsigned int run_duration_us = READ_ONCE(ii_dev->run_duration_us);
- if (!idle_duration_us || !run_duration_us)
+ if (!(idle_duration_us + run_duration_us))
return -EINVAL;
pr_debug("Starting injecting idle cycles on CPUs '%*pbl'\n",
@@ -216,6 +238,7 @@ int idle_inject_start(struct idle_inject_device *ii_dev)
return 0;
}
+EXPORT_SYMBOL_NS_GPL(idle_inject_start, IDLE_INJECT);
/**
* idle_inject_stop - stops idle injections
@@ -262,6 +285,7 @@ void idle_inject_stop(struct idle_inject_device *ii_dev)
cpu_hotplug_enable();
}
+EXPORT_SYMBOL_NS_GPL(idle_inject_stop, IDLE_INJECT);
/**
* idle_inject_setup - prepare the current task for idle injection
@@ -290,17 +314,22 @@ static int idle_inject_should_run(unsigned int cpu)
}
/**
- * idle_inject_register - initialize idle injection on a set of CPUs
+ * idle_inject_register_full - initialize idle injection on a set of CPUs
* @cpumask: CPUs to be affected by idle injection
+ * @update: This callback is called just before waking up CPUs to inject
+ * idle
*
* This function creates an idle injection control device structure for the
- * given set of CPUs and initializes the timer associated with it. It does not
- * start any injection cycles.
+ * given set of CPUs and initializes the timer associated with it. This
+ * function also allows to register update()callback.
+ * It does not start any injection cycles.
*
* Return: NULL if memory allocation fails, idle injection control device
* pointer on success.
*/
-struct idle_inject_device *idle_inject_register(struct cpumask *cpumask)
+
+struct idle_inject_device *idle_inject_register_full(struct cpumask *cpumask,
+ bool (*update)(void))
{
struct idle_inject_device *ii_dev;
int cpu, cpu_rb;
@@ -313,6 +342,7 @@ struct idle_inject_device *idle_inject_register(struct cpumask *cpumask)
hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ii_dev->timer.function = idle_inject_timer_fn;
ii_dev->latency_us = UINT_MAX;
+ ii_dev->update = update;
for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
@@ -337,6 +367,24 @@ out_rollback:
return NULL;
}
+EXPORT_SYMBOL_NS_GPL(idle_inject_register_full, IDLE_INJECT);
+
+/**
+ * idle_inject_register - initialize idle injection on a set of CPUs
+ * @cpumask: CPUs to be affected by idle injection
+ *
+ * This function creates an idle injection control device structure for the
+ * given set of CPUs and initializes the timer associated with it. It does not
+ * start any injection cycles.
+ *
+ * Return: NULL if memory allocation fails, idle injection control device
+ * pointer on success.
+ */
+struct idle_inject_device *idle_inject_register(struct cpumask *cpumask)
+{
+ return idle_inject_register_full(cpumask, NULL);
+}
+EXPORT_SYMBOL_NS_GPL(idle_inject_register, IDLE_INJECT);
/**
* idle_inject_unregister - unregister idle injection control device
@@ -357,6 +405,7 @@ void idle_inject_unregister(struct idle_inject_device *ii_dev)
kfree(ii_dev);
}
+EXPORT_SYMBOL_NS_GPL(idle_inject_unregister, IDLE_INJECT);
static struct smp_hotplug_thread idle_inject_threads = {
.store = &idle_inject_thread.tsk,
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 26d00b1853b4..8970c7b80884 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -999,7 +999,15 @@ static u64 rapl_compute_time_window_core(struct rapl_package *rp, u64 value,
do_div(value, rp->time_unit);
y = ilog2(value);
- f = div64_u64(4 * (value - (1 << y)), 1 << y);
+
+ /*
+ * The target hardware field is 7 bits wide, so return all ones
+ * if the exponent is too large.
+ */
+ if (y > 0x1f)
+ return 0x7f;
+
+ f = div64_u64(4 * (value - (1ULL << y)), 1ULL << y);
value = (y & 0x1f) | ((f & 0x3) << 5);
}
return value;
@@ -1113,7 +1121,10 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &rapl_defaults_core),
+ X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
+ X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 1f968353d479..e180dee0f83d 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -530,9 +530,6 @@ struct powercap_zone *powercap_register_zone(
power_zone->name = kstrdup(name, GFP_KERNEL);
if (!power_zone->name)
goto err_name_alloc;
- dev_set_name(&power_zone->dev, "%s:%x",
- dev_name(power_zone->dev.parent),
- power_zone->id);
power_zone->constraints = kcalloc(nr_constraints,
sizeof(*power_zone->constraints),
GFP_KERNEL);
@@ -555,9 +552,16 @@ struct powercap_zone *powercap_register_zone(
power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
power_zone->dev_attr_groups[1] = NULL;
power_zone->dev.groups = power_zone->dev_attr_groups;
+ dev_set_name(&power_zone->dev, "%s:%x",
+ dev_name(power_zone->dev.parent),
+ power_zone->id);
result = device_register(&power_zone->dev);
- if (result)
- goto err_dev_ret;
+ if (result) {
+ put_device(&power_zone->dev);
+ mutex_unlock(&control_type->lock);
+
+ return ERR_PTR(result);
+ }
control_type->nr_zones++;
mutex_unlock(&control_type->lock);
diff --git a/drivers/pps/clients/pps-ldisc.c b/drivers/pps/clients/pps-ldisc.c
index d73c4c2ed4e1..443d6bae19d1 100644
--- a/drivers/pps/clients/pps-ldisc.c
+++ b/drivers/pps/clients/pps-ldisc.c
@@ -13,7 +13,7 @@
#include <linux/pps_kernel.h>
#include <linux/bug.h>
-static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status)
+static void pps_tty_dcd_change(struct tty_struct *tty, bool active)
{
struct pps_device *pps;
struct pps_event_time ts;
@@ -29,11 +29,11 @@ static void pps_tty_dcd_change(struct tty_struct *tty, unsigned int status)
return;
/* Now do the PPS event report */
- pps_event(pps, &ts, status ? PPS_CAPTUREASSERT :
+ pps_event(pps, &ts, active ? PPS_CAPTUREASSERT :
PPS_CAPTURECLEAR, NULL);
dev_dbg(pps->dev, "PPS %s at %lu\n",
- status ? "assert" : "clear", jiffies);
+ active ? "assert" : "clear", jiffies);
}
static int (*alias_n_tty_open)(struct tty_struct *tty);
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index 08f4cf0ad9e3..61530167efe4 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -48,6 +48,29 @@ static void tmr_cnt_write(struct ptp_qoriq *ptp_qoriq, u64 ns)
ptp_qoriq->write(&regs->ctrl_regs->tmr_cnt_h, hi);
}
+static u64 tmr_offset_read(struct ptp_qoriq *ptp_qoriq)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 lo, hi;
+ u64 ns;
+
+ lo = ptp_qoriq->read(&regs->ctrl_regs->tmroff_l);
+ hi = ptp_qoriq->read(&regs->ctrl_regs->tmroff_h);
+ ns = ((u64) hi) << 32;
+ ns |= lo;
+ return ns;
+}
+
+static void tmr_offset_write(struct ptp_qoriq *ptp_qoriq, u64 delta_ns)
+{
+ struct ptp_qoriq_registers *regs = &ptp_qoriq->regs;
+ u32 lo = delta_ns & 0xffffffff;
+ u32 hi = delta_ns >> 32;
+
+ ptp_qoriq->write(&regs->ctrl_regs->tmroff_l, lo);
+ ptp_qoriq->write(&regs->ctrl_regs->tmroff_h, hi);
+}
+
/* Caller must hold ptp_qoriq->lock. */
static void set_alarm(struct ptp_qoriq *ptp_qoriq)
{
@@ -55,7 +78,9 @@ static void set_alarm(struct ptp_qoriq *ptp_qoriq)
u64 ns;
u32 lo, hi;
- ns = tmr_cnt_read(ptp_qoriq) + 1500000000ULL;
+ ns = tmr_cnt_read(ptp_qoriq) + tmr_offset_read(ptp_qoriq)
+ + 1500000000ULL;
+
ns = div_u64(ns, 1000000000UL) * 1000000000ULL;
ns -= ptp_qoriq->tclk_period;
hi = ns >> 32;
@@ -207,15 +232,24 @@ EXPORT_SYMBOL_GPL(ptp_qoriq_adjfine);
int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- s64 now;
- unsigned long flags;
struct ptp_qoriq *ptp_qoriq = container_of(ptp, struct ptp_qoriq, caps);
+ s64 now, curr_delta;
+ unsigned long flags;
spin_lock_irqsave(&ptp_qoriq->lock, flags);
- now = tmr_cnt_read(ptp_qoriq);
- now += delta;
- tmr_cnt_write(ptp_qoriq, now);
+ /* On LS1021A, eTSEC2 and eTSEC3 do not take into account the TMR_OFF
+ * adjustment
+ */
+ if (ptp_qoriq->etsec) {
+ now = tmr_cnt_read(ptp_qoriq);
+ now += delta;
+ tmr_cnt_write(ptp_qoriq, now);
+ } else {
+ curr_delta = tmr_offset_read(ptp_qoriq);
+ curr_delta += delta;
+ tmr_offset_write(ptp_qoriq, curr_delta);
+ }
set_fipers(ptp_qoriq);
spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
@@ -232,7 +266,7 @@ int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
spin_lock_irqsave(&ptp_qoriq->lock, flags);
- ns = tmr_cnt_read(ptp_qoriq);
+ ns = tmr_cnt_read(ptp_qoriq) + tmr_offset_read(ptp_qoriq);
spin_unlock_irqrestore(&ptp_qoriq->lock, flags);
@@ -253,6 +287,7 @@ int ptp_qoriq_settime(struct ptp_clock_info *ptp,
spin_lock_irqsave(&ptp_qoriq->lock, flags);
+ tmr_offset_write(ptp_qoriq, 0);
tmr_cnt_write(ptp_qoriq, ns);
set_fipers(ptp_qoriq);
@@ -488,6 +523,7 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
/* The eTSEC uses differnt memory map with DPAA/ENETC */
if (of_device_is_compatible(node, "fsl,etsec-ptp")) {
+ ptp_qoriq->etsec = true;
ptp_qoriq->regs.ctrl_regs = base + ETSEC_CTRL_REGS_OFFSET;
ptp_qoriq->regs.alarm_regs = base + ETSEC_ALARM_REGS_OFFSET;
ptp_qoriq->regs.fiper_regs = base + ETSEC_FIPER_REGS_OFFSET;
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c
index a72bb0a40fcf..e60e49769bed 100644
--- a/drivers/rapidio/rio-driver.c
+++ b/drivers/rapidio/rio-driver.c
@@ -204,9 +204,9 @@ static int rio_match_bus(struct device *dev, struct device_driver *drv)
out:return 0;
}
-static int rio_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int rio_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct rio_dev *rdev;
+ const struct rio_dev *rdev;
if (!dev)
return -ENODEV;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 820c9a0788e5..aae28d0a489c 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -655,6 +655,14 @@ config REGULATOR_MAX20086
protectorvia I2C bus. The regulator has 2 or 4 outputs depending on
the device model. This driver is only capable to turn on/off them.
+config REGULATOR_MAX20411
+ tristate "Maxim MAX20411 High-Efficiency Single Step-Down Converter"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ This driver controls the Maxim MAX20411 family of high-efficiency,
+ syncrhonous step-down converters.
+
config REGULATOR_MAX77686
tristate "Maxim 77686 regulator"
depends on MFD_MAX77686 || COMPILE_TEST
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b9f5eb35bf5f..ee383d8fc835 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -80,6 +80,7 @@ obj-$(CONFIG_REGULATOR_MAX8973) += max8973-regulator.o
obj-$(CONFIG_REGULATOR_MAX8997) += max8997-regulator.o
obj-$(CONFIG_REGULATOR_MAX8998) += max8998.o
obj-$(CONFIG_REGULATOR_MAX20086) += max20086-regulator.o
+obj-$(CONFIG_REGULATOR_MAX20411) += max20411-regulator.o
obj-$(CONFIG_REGULATOR_MAX77686) += max77686-regulator.o
obj-$(CONFIG_REGULATOR_MAX77693) += max77693-regulator.o
obj-$(CONFIG_REGULATOR_MAX77802) += max77802-regulator.o
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
index 6a62f946ccae..1db1c6423779 100644
--- a/drivers/regulator/act8945a-regulator.c
+++ b/drivers/regulator/act8945a-regulator.c
@@ -15,7 +15,7 @@
#include <linux/regulator/machine.h>
#include <dt-bindings/regulator/active-semi,8945a-regulator.h>
-/**
+/*
* ACT8945A Global Register Map.
*/
#define ACT8945A_SYS_MODE 0x00
@@ -46,13 +46,13 @@
#define ACT8945A_LDO4_CTRL 0x65
#define ACT8945A_LDO4_SUS 0x66
-/**
+/*
* Field Definitions.
*/
#define ACT8945A_ENA 0x80 /* ON - [7] */
#define ACT8945A_VSEL_MASK 0x3F /* VSET - [5:0] */
-/**
+/*
* ACT8945A Voltage Number
*/
#define ACT8945A_VOLTAGE_NUM 64
diff --git a/drivers/regulator/fixed-helper.c b/drivers/regulator/fixed-helper.c
index 2c6098e6f4bc..0eb2442456f0 100644
--- a/drivers/regulator/fixed-helper.c
+++ b/drivers/regulator/fixed-helper.c
@@ -20,7 +20,7 @@ static void regulator_fixed_release(struct device *dev)
}
/**
- * regulator_register_fixed_name - register a no-op fixed regulator
+ * regulator_register_always_on - register an always-on regulator with a fixed name
* @id: platform device id
* @name: name to be used for the regulator
* @supplies: consumers for this regulator
diff --git a/drivers/regulator/max20411-regulator.c b/drivers/regulator/max20411-regulator.c
new file mode 100644
index 000000000000..83dacb4ff173
--- /dev/null
+++ b/drivers/regulator/max20411-regulator.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Linaro Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#define MAX20411_UV_STEP 6250
+#define MAX20411_BASE_UV 243750
+#define MAX20411_MIN_SEL 41 /* 0.5V */
+#define MAX20411_MAX_SEL 165 /* 1.275V */
+#define MAX20411_VID_OFFSET 0x7
+#define MAX20411_VID_MASK 0xff
+#define MAX20411_SLEW_OFFSET 0x6
+#define MAX20411_SLEW_DVS_MASK 0xc
+#define MAX20411_SLEW_SR_MASK 0x3
+
+struct max20411 {
+ struct device *dev;
+ struct device_node *of_node;
+ struct regulator_desc desc;
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+};
+
+static const unsigned int max20411_slew_rates[] = { 13100, 6600, 3300, 1600 };
+
+static int max20411_enable_time(struct regulator_dev *rdev)
+{
+ int voltage, rate, ret;
+ unsigned int val;
+
+ /* get voltage */
+ ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
+ if (ret)
+ return ret;
+
+ val &= rdev->desc->vsel_mask;
+ voltage = regulator_list_voltage_linear(rdev, val);
+
+ /* get rate */
+ ret = regmap_read(rdev->regmap, MAX20411_SLEW_OFFSET, &val);
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(MAX20411_SLEW_SR_MASK, val);
+ rate = max20411_slew_rates[val];
+
+ return DIV_ROUND_UP(voltage, rate);
+}
+
+static const struct regmap_config max20411_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xe,
+};
+
+static const struct regulator_ops max20411_ops = {
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .enable_time = max20411_enable_time,
+};
+
+static const struct regulator_desc max20411_desc = {
+ .ops = &max20411_ops,
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+ .supply_name = "vin",
+ .name = "max20411",
+
+ /*
+ * voltage = 0.24375V + selector * 6.25mV
+ * with valid selector between 41 to 165 (0.5V to 1.275V)
+ */
+ .min_uV = MAX20411_BASE_UV,
+ .uV_step = MAX20411_UV_STEP,
+ .linear_min_sel = MAX20411_MIN_SEL,
+ .n_voltages = MAX20411_MAX_SEL + 1,
+
+ .vsel_reg = MAX20411_VID_OFFSET,
+ .vsel_mask = MAX20411_VID_MASK,
+
+ .ramp_reg = MAX20411_SLEW_OFFSET,
+ .ramp_mask = MAX20411_SLEW_DVS_MASK,
+ .ramp_delay_table = max20411_slew_rates,
+ .n_ramp_values = ARRAY_SIZE(max20411_slew_rates),
+};
+
+static int max20411_probe(struct i2c_client *client)
+{
+ struct regulator_init_data *init_data;
+ struct device *dev = &client->dev;
+ struct regulator_config cfg = {};
+ struct max20411 *max20411;
+
+ max20411 = devm_kzalloc(dev, sizeof(*max20411), GFP_KERNEL);
+ if (!max20411)
+ return -ENOMEM;
+
+ max20411->regmap = devm_regmap_init_i2c(client, &max20411_regmap_config);
+ if (IS_ERR(max20411->regmap)) {
+ dev_err(dev, "Failed to allocate regmap!\n");
+ return PTR_ERR(max20411->regmap);
+ }
+
+ max20411->dev = dev;
+ max20411->of_node = dev->of_node;
+
+ max20411->desc = max20411_desc;
+ init_data = of_get_regulator_init_data(max20411->dev, max20411->of_node, &max20411->desc);
+ if (!init_data)
+ return -ENODATA;
+
+ cfg.dev = max20411->dev;
+ cfg.init_data = init_data;
+ cfg.of_node = max20411->of_node;
+ cfg.driver_data = max20411;
+
+ cfg.ena_gpiod = gpiod_get(max20411->dev, "enable", GPIOD_ASIS);
+ if (IS_ERR(cfg.ena_gpiod))
+ return dev_err_probe(dev, PTR_ERR(cfg.ena_gpiod),
+ "unable to acquire enable gpio\n");
+
+ max20411->rdev = devm_regulator_register(max20411->dev, &max20411->desc, &cfg);
+ if (IS_ERR(max20411->rdev))
+ dev_err(max20411->dev, "Failed to register regulator\n");
+
+ return PTR_ERR_OR_ZERO(max20411->rdev);
+}
+
+static const struct of_device_id of_max20411_match_tbl[] = {
+ { .compatible = "maxim,max20411", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, of_max20411_match_tbl);
+
+static const struct i2c_device_id max20411_id[] = {
+ { "max20411", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, max20411_id);
+
+static struct i2c_driver max20411_i2c_driver = {
+ .driver = {
+ .name = "max20411",
+ .of_match_table = of_max20411_match_tbl,
+ },
+ .probe_new = max20411_probe,
+ .id_table = max20411_id,
+};
+module_i2c_driver(max20411_i2c_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/max597x-regulator.c b/drivers/regulator/max597x-regulator.c
index 39f803ff0a90..f0fb0f56e420 100644
--- a/drivers/regulator/max597x-regulator.c
+++ b/drivers/regulator/max597x-regulator.c
@@ -357,12 +357,6 @@ static int max597x_irq_handler(int irq, struct regulator_irq_data *rid,
return 0;
}
-static const struct regmap_config max597x_regmap_config = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = MAX_REGISTERS,
-};
-
static int max597x_adc_range(struct regmap *regmap, const int ch,
u32 *irng, u32 *mon_rng)
{
@@ -431,41 +425,59 @@ static int max597x_setup_irq(struct device *dev,
static int max597x_regulator_probe(struct platform_device *pdev)
{
-
-
- struct max597x_data *max597x = dev_get_drvdata(pdev->dev.parent);
+ struct max597x_data *max597x;
+ struct regmap *regmap = dev_get_regmap(pdev->dev.parent, NULL);
struct max597x_regulator *data;
-
+ struct i2c_client *i2c = to_i2c_client(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
struct regulator_dev *rdevs[MAX5970_NUM_SWITCHES];
- int num_switches = max597x->num_switches;
+ int num_switches;
int ret, i;
+ if (!regmap)
+ return -EPROBE_DEFER;
+
+ max597x = devm_kzalloc(&i2c->dev, sizeof(struct max597x_data), GFP_KERNEL);
+ if (!max597x)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, max597x);
+
+ if (of_device_is_compatible(i2c->dev.of_node, "maxim,max5978"))
+ max597x->num_switches = MAX597x_TYPE_MAX5978;
+ else if (of_device_is_compatible(i2c->dev.of_node, "maxim,max5970"))
+ max597x->num_switches = MAX597x_TYPE_MAX5970;
+ else
+ return -ENODEV;
+
+ i2c_set_clientdata(i2c, max597x);
+ num_switches = max597x->num_switches;
+
for (i = 0; i < num_switches; i++) {
data =
- devm_kzalloc(max597x->dev, sizeof(struct max597x_regulator),
+ devm_kzalloc(&i2c->dev, sizeof(struct max597x_regulator),
GFP_KERNEL);
if (!data)
return -ENOMEM;
data->num_switches = num_switches;
- data->regmap = max597x->regmap;
+ data->regmap = regmap;
- ret = max597x_adc_range(data->regmap, i, &max597x->irng[i], &max597x->mon_rng[i]);
+ ret = max597x_adc_range(regmap, i, &max597x->irng[i], &max597x->mon_rng[i]);
if (ret < 0)
return ret;
data->irng = max597x->irng[i];
data->mon_rng = max597x->mon_rng[i];
- config.dev = max597x->dev;
+ config.dev = &i2c->dev;
config.driver_data = (void *)data;
config.regmap = data->regmap;
- rdev = devm_regulator_register(max597x->dev,
+ rdev = devm_regulator_register(&i2c->dev,
&regulators[i], &config);
if (IS_ERR(rdev)) {
- dev_err(max597x->dev, "failed to register regulator %s\n",
+ dev_err(&i2c->dev, "failed to register regulator %s\n",
regulators[i].name);
return PTR_ERR(rdev);
}
@@ -473,12 +485,12 @@ static int max597x_regulator_probe(struct platform_device *pdev)
max597x->shunt_micro_ohms[i] = data->shunt_micro_ohms;
}
- if (max597x->irq) {
+ if (i2c->irq) {
ret =
- max597x_setup_irq(max597x->dev, max597x->irq, rdevs, num_switches,
+ max597x_setup_irq(&i2c->dev, i2c->irq, rdevs, num_switches,
data);
if (ret) {
- dev_err(max597x->dev, "IRQ setup failed");
+ dev_err(&i2c->dev, "IRQ setup failed");
return ret;
}
}
diff --git a/drivers/regulator/max77802-regulator.c b/drivers/regulator/max77802-regulator.c
index 21e0eb0f43f9..befe5f319819 100644
--- a/drivers/regulator/max77802-regulator.c
+++ b/drivers/regulator/max77802-regulator.c
@@ -94,9 +94,11 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev)
{
unsigned int val = MAX77802_OFF_PWRREQ;
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
+ unsigned int id = rdev_get_id(rdev);
int shift = max77802_get_opmode_shift(id);
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
+ return -EINVAL;
max77802->opmode[id] = val;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val << shift);
@@ -110,7 +112,7 @@ static int max77802_set_suspend_disable(struct regulator_dev *rdev)
static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
+ unsigned int id = rdev_get_id(rdev);
unsigned int val;
int shift = max77802_get_opmode_shift(id);
@@ -127,6 +129,9 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
return -EINVAL;
}
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
+ return -EINVAL;
+
max77802->opmode[id] = val;
return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
rdev->desc->enable_mask, val << shift);
@@ -135,8 +140,10 @@ static int max77802_set_mode(struct regulator_dev *rdev, unsigned int mode)
static unsigned max77802_get_mode(struct regulator_dev *rdev)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
+ unsigned int id = rdev_get_id(rdev);
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
+ return -EINVAL;
return max77802_map_mode(max77802->opmode[id]);
}
@@ -160,10 +167,13 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev,
unsigned int mode)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
+ unsigned int id = rdev_get_id(rdev);
unsigned int val;
int shift = max77802_get_opmode_shift(id);
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
+ return -EINVAL;
+
/*
* If the regulator has been disabled for suspend
* then is invalid to try setting a suspend mode.
@@ -209,9 +219,11 @@ static int max77802_set_suspend_mode(struct regulator_dev *rdev,
static int max77802_enable(struct regulator_dev *rdev)
{
struct max77802_regulator_prv *max77802 = rdev_get_drvdata(rdev);
- int id = rdev_get_id(rdev);
+ unsigned int id = rdev_get_id(rdev);
int shift = max77802_get_opmode_shift(id);
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(max77802->opmode)))
+ return -EINVAL;
if (max77802->opmode[id] == MAX77802_OFF_PWRREQ)
max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
@@ -495,7 +507,7 @@ static int max77802_pmic_probe(struct platform_device *pdev)
for (i = 0; i < MAX77802_REG_MAX; i++) {
struct regulator_dev *rdev;
- int id = regulators[i].id;
+ unsigned int id = regulators[i].id;
int shift = max77802_get_opmode_shift(id);
int ret;
@@ -513,10 +525,12 @@ static int max77802_pmic_probe(struct platform_device *pdev)
* the hardware reports OFF as the regulator operating mode.
* Default to operating mode NORMAL in that case.
*/
- if (val == MAX77802_STATUS_OFF)
- max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
- else
- max77802->opmode[id] = val;
+ if (id < ARRAY_SIZE(max77802->opmode)) {
+ if (val == MAX77802_STATUS_OFF)
+ max77802->opmode[id] = MAX77802_OPMODE_NORMAL;
+ else
+ max77802->opmode[id] = val;
+ }
rdev = devm_regulator_register(&pdev->dev,
&regulators[i], &config);
diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
index 042668385678..abee1b09008d 100644
--- a/drivers/regulator/mcp16502.c
+++ b/drivers/regulator/mcp16502.c
@@ -77,6 +77,7 @@
* @MCP16502_REG_A: active state register
* @MCP16502_REG_LPM: low power mode state register
* @MCP16502_REG_HIB: hibernate state register
+ * @MCP16502_REG_HPM: high-performance mode register
* @MCP16502_REG_SEQ: startup sequence register
* @MCP16502_REG_CFG: configuration register
*/
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 35269f998210..754c6fcc6e64 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -923,10 +923,14 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
for (i = 0; i < pdata->num_regulators; i++) {
const struct sec_voltage_desc *desc;
- int id = pdata->regulators[i].id;
+ unsigned int id = pdata->regulators[i].id;
int enable_reg, enable_val;
struct regulator_dev *rdev;
+ BUILD_BUG_ON(ARRAY_SIZE(regulators) != ARRAY_SIZE(reg_voltage_map));
+ if (WARN_ON_ONCE(id >= ARRAY_SIZE(regulators)))
+ continue;
+
desc = reg_voltage_map[id];
if (desc) {
regulators[id].n_voltages =
diff --git a/drivers/regulator/scmi-regulator.c b/drivers/regulator/scmi-regulator.c
index b9918f4fd241..29ab217297d6 100644
--- a/drivers/regulator/scmi-regulator.c
+++ b/drivers/regulator/scmi-regulator.c
@@ -311,16 +311,12 @@ static int scmi_regulator_probe(struct scmi_device *sdev)
return PTR_ERR(voltage_ops);
num_doms = voltage_ops->num_domains_get(ph);
- if (num_doms <= 0) {
- if (!num_doms) {
- dev_err(&sdev->dev,
- "number of voltage domains invalid\n");
- num_doms = -EINVAL;
- } else {
- dev_err(&sdev->dev,
- "failed to get voltage domains - err:%d\n",
- num_doms);
- }
+ if (!num_doms)
+ return 0;
+
+ if (num_doms < 0) {
+ dev_err(&sdev->dev, "failed to get voltage domains - err:%d\n",
+ num_doms);
return num_doms;
}
diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
index c484c943e467..4b5acaa45049 100644
--- a/drivers/regulator/tps65219-regulator.c
+++ b/drivers/regulator/tps65219-regulator.c
@@ -173,24 +173,6 @@ static unsigned int tps65219_get_mode(struct regulator_dev *dev)
return REGULATOR_MODE_NORMAL;
}
-/*
- * generic regulator_set_bypass_regmap does not fully match requirements
- * TPS65219 Requires explicitly that regulator is disabled before switch
- */
-static int tps65219_set_bypass(struct regulator_dev *dev, bool enable)
-{
- struct tps65219 *tps = rdev_get_drvdata(dev);
- unsigned int rid = rdev_get_id(dev);
-
- if (dev->desc->ops->is_enabled(dev)) {
- dev_err(tps->dev,
- "%s LDO%d enabled, must be shut down to set bypass ",
- __func__, rid);
- return -EBUSY;
- }
- return regulator_set_bypass_regmap(dev, enable);
-}
-
/* Operations permitted on BUCK1/2/3 */
static const struct regulator_ops tps65219_bucks_ops = {
.is_enabled = regulator_is_enabled_regmap,
@@ -217,7 +199,7 @@ static const struct regulator_ops tps65219_ldos_1_2_ops = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear_range,
.map_voltage = regulator_map_voltage_linear_range,
- .set_bypass = tps65219_set_bypass,
+ .set_bypass = regulator_set_bypass_regmap,
.get_bypass = regulator_get_bypass_regmap,
};
@@ -342,7 +324,7 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
&config);
if (IS_ERR(rdev)) {
dev_err(tps->dev, "failed to register %s regulator\n",
- pdev->name);
+ regulators[i].name);
return PTR_ERR(rdev);
}
rdevtbl[i] = rdev;
@@ -367,7 +349,7 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
irq_data[i].type = irq_type;
tps65219_get_rdev_by_name(irq_type->regulator_name, rdevtbl, rdev);
- if (rdev < 0) {
+ if (IS_ERR(rdev)) {
dev_err(tps->dev, "Failed to get rdev for %s\n",
irq_type->regulator_name);
return -EINVAL;
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 1cd4815a6dd1..80072b6b6283 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -643,7 +643,8 @@ static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
if (!mapping)
return -ENOMEM;
- ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags);
+ ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags,
+ GFP_KERNEL);
if (ret) {
dev_err(dev, "failed to map devmem: %d\n", ret);
goto out;
@@ -737,7 +738,7 @@ static int rproc_alloc_carveout(struct rproc *rproc,
}
ret = iommu_map(rproc->domain, mem->da, dma, mem->len,
- mem->flags);
+ mem->flags, GFP_KERNEL);
if (ret) {
dev_err(dev, "iommu_map failed: %d\n", ret);
goto free_mapping;
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index d6dde00efdae..a2207c0cf432 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -492,9 +492,9 @@ static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
return of_driver_match_device(dev, drv);
}
-static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int rpmsg_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct rpmsg_device *rpdev = to_rpmsg_device(dev);
+ const struct rpmsg_device *rpdev = to_rpmsg_device(dev);
int ret;
ret = of_device_uevent_modalias(dev, env);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 677d2601d305..2ba72de0fa47 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1212,15 +1212,6 @@ config RTC_DRV_RP5C01
This driver can also be built as a module. If so, the module
will be called rtc-rp5c01.
-config RTC_DRV_V3020
- tristate "EM Microelectronic V3020"
- help
- If you say yes here you will get support for the
- EM Microelectronic v3020 RTC chip.
-
- This driver can also be built as a module. If so, the module
- will be called rtc-v3020.
-
config RTC_DRV_GAMECUBE
tristate "Nintendo GameCube, Wii and Wii U RTC"
depends on GAMECUBE || WII || COMPILE_TEST
@@ -1415,18 +1406,14 @@ config RTC_DRV_OMAP
config RTC_DRV_S3C
tristate "Samsung S3C series SoC RTC"
- depends on ARCH_EXYNOS || ARCH_S3C64XX || ARCH_S3C24XX || ARCH_S5PV210 || \
+ depends on ARCH_EXYNOS || ARCH_S3C64XX || ARCH_S5PV210 || \
COMPILE_TEST
help
RTC (Realtime Clock) driver for the clock inbuilt into the
- Samsung S3C24XX series of SoCs. This can provide periodic
+ Samsung S3C64XX series of SoCs. This can provide periodic
interrupt rates from 1Hz to 64Hz for user programs, and
wakeup from Alarm.
- The driver currently supports the common features on all the
- S3C24XX range, such as the S3C2410, S3C2412, S3C2413, S3C2440
- and S3C2442.
-
This driver can also be build as a module. If so, the module
will be called rtc-s3c.
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index d3c042dcbc73..59eb30289335 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -176,7 +176,6 @@ obj-$(CONFIG_RTC_DRV_TI_K3) += rtc-ti-k3.o
obj-$(CONFIG_RTC_DRV_TPS6586X) += rtc-tps6586x.o
obj-$(CONFIG_RTC_DRV_TPS65910) += rtc-tps65910.o
obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o
-obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o
obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o
obj-$(CONFIG_RTC_DRV_WILCO_EC) += rtc-wilco-ec.o
obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index e991cccdb6e9..1e8bc6cc1e12 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -188,9 +188,10 @@ static int efi_set_time(struct device *dev, struct rtc_time *tm)
static int efi_procfs(struct device *dev, struct seq_file *seq)
{
- efi_time_t eft, alm;
- efi_time_cap_t cap;
- efi_bool_t enabled, pending;
+ efi_time_t eft, alm;
+ efi_time_cap_t cap;
+ efi_bool_t enabled, pending;
+ struct rtc_device *rtc = dev_get_drvdata(dev);
memset(&eft, 0, sizeof(eft));
memset(&alm, 0, sizeof(alm));
@@ -213,23 +214,25 @@ static int efi_procfs(struct device *dev, struct seq_file *seq)
/* XXX fixme: convert to string? */
seq_printf(seq, "Timezone\t: %u\n", eft.timezone);
- seq_printf(seq,
- "Alarm Time\t: %u:%u:%u.%09u\n"
- "Alarm Date\t: %u-%u-%u\n"
- "Alarm Daylight\t: %u\n"
- "Enabled\t\t: %s\n"
- "Pending\t\t: %s\n",
- alm.hour, alm.minute, alm.second, alm.nanosecond,
- alm.year, alm.month, alm.day,
- alm.daylight,
- enabled == 1 ? "yes" : "no",
- pending == 1 ? "yes" : "no");
-
- if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
- seq_puts(seq, "Timezone\t: unspecified\n");
- else
- /* XXX fixme: convert to string? */
- seq_printf(seq, "Timezone\t: %u\n", alm.timezone);
+ if (test_bit(RTC_FEATURE_ALARM, rtc->features)) {
+ seq_printf(seq,
+ "Alarm Time\t: %u:%u:%u.%09u\n"
+ "Alarm Date\t: %u-%u-%u\n"
+ "Alarm Daylight\t: %u\n"
+ "Enabled\t\t: %s\n"
+ "Pending\t\t: %s\n",
+ alm.hour, alm.minute, alm.second, alm.nanosecond,
+ alm.year, alm.month, alm.day,
+ alm.daylight,
+ enabled == 1 ? "yes" : "no",
+ pending == 1 ? "yes" : "no");
+
+ if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
+ seq_puts(seq, "Timezone\t: unspecified\n");
+ else
+ /* XXX fixme: convert to string? */
+ seq_printf(seq, "Timezone\t: %u\n", alm.timezone);
+ }
/*
* now prints the capabilities
@@ -269,7 +272,10 @@ static int __init efi_rtc_probe(struct platform_device *dev)
rtc->ops = &efi_rtc_ops;
clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->features);
- set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_WAKEUP_SERVICES))
+ set_bit(RTC_FEATURE_ALARM_WAKEUP_ONLY, rtc->features);
+ else
+ clear_bit(RTC_FEATURE_ALARM, rtc->features);
device_init_wakeup(&dev->dev, true);
diff --git a/drivers/rtc/rtc-sunplus.c b/drivers/rtc/rtc-sunplus.c
index e8e2ab1103fc..4b578e4d44f6 100644
--- a/drivers/rtc/rtc-sunplus.c
+++ b/drivers/rtc/rtc-sunplus.c
@@ -240,8 +240,8 @@ static int sp_rtc_probe(struct platform_device *plat_dev)
if (IS_ERR(sp_rtc->reg_base))
return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->reg_base),
"%s devm_ioremap_resource fail\n", RTC_REG_NAME);
- dev_dbg(&plat_dev->dev, "res = 0x%x, reg_base = 0x%lx\n",
- sp_rtc->res->start, (unsigned long)sp_rtc->reg_base);
+ dev_dbg(&plat_dev->dev, "res = %pR, reg_base = %p\n",
+ sp_rtc->res, sp_rtc->reg_base);
sp_rtc->irq = platform_get_irq(plat_dev, 0);
if (sp_rtc->irq < 0)
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
deleted file mode 100644
index 4e8341c49f51..000000000000
--- a/drivers/rtc/rtc-v3020.c
+++ /dev/null
@@ -1,369 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* drivers/rtc/rtc-v3020.c
- *
- * Copyright (C) 2006 8D Technologies inc.
- * Copyright (C) 2004 Compulab Ltd.
- *
- * Driver for the V3020 RTC
- *
- * Changelog:
- *
- * 10-May-2006: Raphael Assenat <raph@8d.com>
- * - Converted to platform driver
- * - Use the generic rtc class
- *
- * ??-???-2004: Someone at Compulab
- * - Initial driver creation.
- */
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/rtc.h>
-#include <linux/types.h>
-#include <linux/bcd.h>
-#include <linux/platform_data/rtc-v3020.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/slab.h>
-
-#include <linux/io.h>
-
-#undef DEBUG
-
-struct v3020;
-
-struct v3020_chip_ops {
- int (*map_io)(struct v3020 *chip, struct platform_device *pdev,
- struct v3020_platform_data *pdata);
- void (*unmap_io)(struct v3020 *chip);
- unsigned char (*read_bit)(struct v3020 *chip);
- void (*write_bit)(struct v3020 *chip, unsigned char bit);
-};
-
-#define V3020_CS 0
-#define V3020_WR 1
-#define V3020_RD 2
-#define V3020_IO 3
-
-struct v3020 {
- /* MMIO access */
- void __iomem *ioaddress;
- int leftshift;
-
- /* GPIO access */
- struct gpio *gpio;
-
- const struct v3020_chip_ops *ops;
-
- struct rtc_device *rtc;
-};
-
-
-static int v3020_mmio_map(struct v3020 *chip, struct platform_device *pdev,
- struct v3020_platform_data *pdata)
-{
- if (pdev->num_resources != 1)
- return -EBUSY;
-
- if (pdev->resource[0].flags != IORESOURCE_MEM)
- return -EBUSY;
-
- chip->leftshift = pdata->leftshift;
- chip->ioaddress = ioremap(pdev->resource[0].start, 1);
- if (chip->ioaddress == NULL)
- return -EBUSY;
-
- return 0;
-}
-
-static void v3020_mmio_unmap(struct v3020 *chip)
-{
- iounmap(chip->ioaddress);
-}
-
-static void v3020_mmio_write_bit(struct v3020 *chip, unsigned char bit)
-{
- writel(bit << chip->leftshift, chip->ioaddress);
-}
-
-static unsigned char v3020_mmio_read_bit(struct v3020 *chip)
-{
- return !!(readl(chip->ioaddress) & (1 << chip->leftshift));
-}
-
-static const struct v3020_chip_ops v3020_mmio_ops = {
- .map_io = v3020_mmio_map,
- .unmap_io = v3020_mmio_unmap,
- .read_bit = v3020_mmio_read_bit,
- .write_bit = v3020_mmio_write_bit,
-};
-
-static struct gpio v3020_gpio[] = {
- { 0, GPIOF_OUT_INIT_HIGH, "RTC CS"},
- { 0, GPIOF_OUT_INIT_HIGH, "RTC WR"},
- { 0, GPIOF_OUT_INIT_HIGH, "RTC RD"},
- { 0, GPIOF_OUT_INIT_HIGH, "RTC IO"},
-};
-
-static int v3020_gpio_map(struct v3020 *chip, struct platform_device *pdev,
- struct v3020_platform_data *pdata)
-{
- int err;
-
- v3020_gpio[V3020_CS].gpio = pdata->gpio_cs;
- v3020_gpio[V3020_WR].gpio = pdata->gpio_wr;
- v3020_gpio[V3020_RD].gpio = pdata->gpio_rd;
- v3020_gpio[V3020_IO].gpio = pdata->gpio_io;
-
- err = gpio_request_array(v3020_gpio, ARRAY_SIZE(v3020_gpio));
-
- if (!err)
- chip->gpio = v3020_gpio;
-
- return err;
-}
-
-static void v3020_gpio_unmap(struct v3020 *chip)
-{
- gpio_free_array(v3020_gpio, ARRAY_SIZE(v3020_gpio));
-}
-
-static void v3020_gpio_write_bit(struct v3020 *chip, unsigned char bit)
-{
- gpio_direction_output(chip->gpio[V3020_IO].gpio, bit);
- gpio_set_value(chip->gpio[V3020_CS].gpio, 0);
- gpio_set_value(chip->gpio[V3020_WR].gpio, 0);
- udelay(1);
- gpio_set_value(chip->gpio[V3020_WR].gpio, 1);
- gpio_set_value(chip->gpio[V3020_CS].gpio, 1);
-}
-
-static unsigned char v3020_gpio_read_bit(struct v3020 *chip)
-{
- int bit;
-
- gpio_direction_input(chip->gpio[V3020_IO].gpio);
- gpio_set_value(chip->gpio[V3020_CS].gpio, 0);
- gpio_set_value(chip->gpio[V3020_RD].gpio, 0);
- udelay(1);
- bit = !!gpio_get_value(chip->gpio[V3020_IO].gpio);
- udelay(1);
- gpio_set_value(chip->gpio[V3020_RD].gpio, 1);
- gpio_set_value(chip->gpio[V3020_CS].gpio, 1);
-
- return bit;
-}
-
-static const struct v3020_chip_ops v3020_gpio_ops = {
- .map_io = v3020_gpio_map,
- .unmap_io = v3020_gpio_unmap,
- .read_bit = v3020_gpio_read_bit,
- .write_bit = v3020_gpio_write_bit,
-};
-
-static void v3020_set_reg(struct v3020 *chip, unsigned char address,
- unsigned char data)
-{
- int i;
- unsigned char tmp;
-
- tmp = address;
- for (i = 0; i < 4; i++) {
- chip->ops->write_bit(chip, (tmp & 1));
- tmp >>= 1;
- udelay(1);
- }
-
- /* Commands dont have data */
- if (!V3020_IS_COMMAND(address)) {
- for (i = 0; i < 8; i++) {
- chip->ops->write_bit(chip, (data & 1));
- data >>= 1;
- udelay(1);
- }
- }
-}
-
-static unsigned char v3020_get_reg(struct v3020 *chip, unsigned char address)
-{
- unsigned int data = 0;
- int i;
-
- for (i = 0; i < 4; i++) {
- chip->ops->write_bit(chip, (address & 1));
- address >>= 1;
- udelay(1);
- }
-
- for (i = 0; i < 8; i++) {
- data >>= 1;
- if (chip->ops->read_bit(chip))
- data |= 0x80;
- udelay(1);
- }
-
- return data;
-}
-
-static int v3020_read_time(struct device *dev, struct rtc_time *dt)
-{
- struct v3020 *chip = dev_get_drvdata(dev);
- int tmp;
-
- /* Copy the current time to ram... */
- v3020_set_reg(chip, V3020_CMD_CLOCK2RAM, 0);
-
- /* ...and then read constant values. */
- tmp = v3020_get_reg(chip, V3020_SECONDS);
- dt->tm_sec = bcd2bin(tmp);
- tmp = v3020_get_reg(chip, V3020_MINUTES);
- dt->tm_min = bcd2bin(tmp);
- tmp = v3020_get_reg(chip, V3020_HOURS);
- dt->tm_hour = bcd2bin(tmp);
- tmp = v3020_get_reg(chip, V3020_MONTH_DAY);
- dt->tm_mday = bcd2bin(tmp);
- tmp = v3020_get_reg(chip, V3020_MONTH);
- dt->tm_mon = bcd2bin(tmp) - 1;
- tmp = v3020_get_reg(chip, V3020_WEEK_DAY);
- dt->tm_wday = bcd2bin(tmp);
- tmp = v3020_get_reg(chip, V3020_YEAR);
- dt->tm_year = bcd2bin(tmp)+100;
-
- dev_dbg(dev, "\n%s : Read RTC values\n", __func__);
- dev_dbg(dev, "tm_hour: %i\n", dt->tm_hour);
- dev_dbg(dev, "tm_min : %i\n", dt->tm_min);
- dev_dbg(dev, "tm_sec : %i\n", dt->tm_sec);
- dev_dbg(dev, "tm_year: %i\n", dt->tm_year);
- dev_dbg(dev, "tm_mon : %i\n", dt->tm_mon);
- dev_dbg(dev, "tm_mday: %i\n", dt->tm_mday);
- dev_dbg(dev, "tm_wday: %i\n", dt->tm_wday);
-
- return 0;
-}
-
-
-static int v3020_set_time(struct device *dev, struct rtc_time *dt)
-{
- struct v3020 *chip = dev_get_drvdata(dev);
-
- dev_dbg(dev, "\n%s : Setting RTC values\n", __func__);
- dev_dbg(dev, "tm_sec : %i\n", dt->tm_sec);
- dev_dbg(dev, "tm_min : %i\n", dt->tm_min);
- dev_dbg(dev, "tm_hour: %i\n", dt->tm_hour);
- dev_dbg(dev, "tm_mday: %i\n", dt->tm_mday);
- dev_dbg(dev, "tm_wday: %i\n", dt->tm_wday);
- dev_dbg(dev, "tm_year: %i\n", dt->tm_year);
-
- /* Write all the values to ram... */
- v3020_set_reg(chip, V3020_SECONDS, bin2bcd(dt->tm_sec));
- v3020_set_reg(chip, V3020_MINUTES, bin2bcd(dt->tm_min));
- v3020_set_reg(chip, V3020_HOURS, bin2bcd(dt->tm_hour));
- v3020_set_reg(chip, V3020_MONTH_DAY, bin2bcd(dt->tm_mday));
- v3020_set_reg(chip, V3020_MONTH, bin2bcd(dt->tm_mon + 1));
- v3020_set_reg(chip, V3020_WEEK_DAY, bin2bcd(dt->tm_wday));
- v3020_set_reg(chip, V3020_YEAR, bin2bcd(dt->tm_year % 100));
-
- /* ...and set the clock. */
- v3020_set_reg(chip, V3020_CMD_RAM2CLOCK, 0);
-
- /* Compulab used this delay here. I dont know why,
- * the datasheet does not specify a delay. */
- /*mdelay(5);*/
-
- return 0;
-}
-
-static const struct rtc_class_ops v3020_rtc_ops = {
- .read_time = v3020_read_time,
- .set_time = v3020_set_time,
-};
-
-static int rtc_probe(struct platform_device *pdev)
-{
- struct v3020_platform_data *pdata = dev_get_platdata(&pdev->dev);
- struct v3020 *chip;
- int retval;
- int i;
-
- chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
- if (!chip)
- return -ENOMEM;
-
- if (pdata->use_gpio)
- chip->ops = &v3020_gpio_ops;
- else
- chip->ops = &v3020_mmio_ops;
-
- retval = chip->ops->map_io(chip, pdev, pdata);
- if (retval)
- return retval;
-
- /* Make sure the v3020 expects a communication cycle
- * by reading 8 times */
- for (i = 0; i < 8; i++)
- chip->ops->read_bit(chip);
-
- /* Test chip by doing a write/read sequence
- * to the chip ram */
- v3020_set_reg(chip, V3020_SECONDS, 0x33);
- if (v3020_get_reg(chip, V3020_SECONDS) != 0x33) {
- retval = -ENODEV;
- goto err_io;
- }
-
- /* Make sure frequency measurement mode, test modes, and lock
- * are all disabled */
- v3020_set_reg(chip, V3020_STATUS_0, 0x0);
-
- if (pdata->use_gpio)
- dev_info(&pdev->dev, "Chip available at GPIOs "
- "%d, %d, %d, %d\n",
- chip->gpio[V3020_CS].gpio, chip->gpio[V3020_WR].gpio,
- chip->gpio[V3020_RD].gpio, chip->gpio[V3020_IO].gpio);
- else
- dev_info(&pdev->dev, "Chip available at "
- "physical address 0x%llx,"
- "data connected to D%d\n",
- (unsigned long long)pdev->resource[0].start,
- chip->leftshift);
-
- platform_set_drvdata(pdev, chip);
-
- chip->rtc = devm_rtc_device_register(&pdev->dev, "v3020",
- &v3020_rtc_ops, THIS_MODULE);
- if (IS_ERR(chip->rtc)) {
- retval = PTR_ERR(chip->rtc);
- goto err_io;
- }
-
- return 0;
-
-err_io:
- chip->ops->unmap_io(chip);
-
- return retval;
-}
-
-static int rtc_remove(struct platform_device *dev)
-{
- struct v3020 *chip = platform_get_drvdata(dev);
-
- chip->ops->unmap_io(chip);
-
- return 0;
-}
-
-static struct platform_driver rtc_device_driver = {
- .probe = rtc_probe,
- .remove = rtc_remove,
- .driver = {
- .name = "v3020",
- },
-};
-
-module_platform_driver(rtc_device_driver);
-
-MODULE_DESCRIPTION("V3020 RTC");
-MODULE_AUTHOR("Raphael Assenat");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:v3020");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 5a6d9c15395f..a9c2a8d76c45 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3978,7 +3978,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
ccw = cqr->cpaddr;
ccw->cmd_code = CCW_CMD_RDC;
- ccw->cda = (__u32)(addr_t) cqr->data;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
ccw->flags = 0;
ccw->count = rdc_buffer_size;
cqr->startdev = device;
@@ -4022,8 +4022,7 @@ char *dasd_get_sense(struct irb *irb)
if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
if (irb->scsw.tm.tcw)
- tsb = tcw_get_tsb((struct tcw *)(unsigned long)
- irb->scsw.tm.tcw);
+ tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
if (tsb && tsb->length == 64 && tsb->flags)
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 81d283b3cd3b..9fd36c468706 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -220,7 +220,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
memset(ccw, 0, sizeof(struct ccw1));
ccw->cmd_code = CCW_CMD_DCTL;
ccw->count = 4;
- ccw->cda = (__u32)(addr_t) DCTL_data;
+ ccw->cda = (__u32)virt_to_phys(DCTL_data);
dctl_cqr->flags = erp->flags;
dctl_cqr->function = dasd_3990_erp_DCTL;
dctl_cqr->refers = erp;
@@ -1714,7 +1714,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = CCW_FLAG_CC;
ccw->count = 16;
- ccw->cda = (__u32)(addr_t) DE_data;
+ ccw->cda = (__u32)virt_to_phys(DE_data);
/* create LO ccw */
ccw++;
@@ -1722,7 +1722,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
ccw->flags = CCW_FLAG_CC;
ccw->count = 16;
- ccw->cda = (__u32)(addr_t) LO_data;
+ ccw->cda = (__u32)virt_to_phys(LO_data);
/* TIC to the failed ccw */
ccw++;
@@ -2419,7 +2419,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
tcw = erp->cpaddr;
tsb = (struct tsb *) &tcw[1];
*tcw = *((struct tcw *)cqr->cpaddr);
- tcw->tsb = (long)tsb;
+ tcw->tsb = virt_to_phys(tsb);
} else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
/* PSF cannot be chained from NOOP/TIC */
erp->cpaddr = cqr->cpaddr;
@@ -2430,7 +2430,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
ccw->flags = CCW_FLAG_CC;
ccw++;
ccw->cmd_code = CCW_CMD_TIC;
- ccw->cda = (long)(cqr->cpaddr);
+ ccw->cda = (__u32)virt_to_phys(cqr->cpaddr);
}
erp->flags = cqr->flags;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index b6b938aa6615..c9740ae88d1a 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -443,7 +443,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)(addr_t) prssdp;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - feature codes */
memset(lcu->uac, 0, sizeof(*(lcu->uac)));
@@ -451,7 +451,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*(lcu->uac));
- ccw->cda = (__u32)(addr_t) lcu->uac;
+ ccw->cda = (__u32)virt_to_phys(lcu->uac);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -747,7 +747,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 16;
- ccw->cda = (__u32)(addr_t) cqr->data;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
((char *)cqr->data)[0] = reason;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5d0b9991e91a..1a69f97e88fb 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -288,7 +288,7 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32)__pa(data);
+ ccw->cda = (__u32)virt_to_phys(data);
}
memset(data, 0, sizeof(struct DE_eckd_data));
@@ -398,7 +398,7 @@ static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
ccw->count = 22;
else
ccw->count = 20;
- ccw->cda = (__u32)__pa(data);
+ ccw->cda = (__u32)virt_to_phys(data);
}
memset(data, 0, sizeof(*data));
@@ -544,11 +544,11 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
ccw->flags = 0;
if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
ccw->count = sizeof(*pfxdata) + 2;
- ccw->cda = (__u32) __pa(pfxdata);
+ ccw->cda = (__u32)virt_to_phys(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata) + 2);
} else {
ccw->count = sizeof(*pfxdata);
- ccw->cda = (__u32) __pa(pfxdata);
+ ccw->cda = (__u32)virt_to_phys(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata));
}
@@ -615,7 +615,7 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32) __pa(data);
+ ccw->cda = (__u32)virt_to_phys(data);
memset(data, 0, sizeof(struct LO_eckd_data));
sector = 0;
@@ -830,7 +830,7 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RCD;
ccw->flags = 0;
- ccw->cda = (__u32)(addr_t)rcd_buffer;
+ ccw->cda = (__u32)virt_to_phys(rcd_buffer);
ccw->count = DASD_ECKD_RCD_DATA_SIZE;
cqr->magic = DASD_ECKD_MAGIC;
@@ -858,7 +858,7 @@ static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
if (cqr->status != DASD_CQR_DONE) {
ccw = cqr->cpaddr;
- rcd_buffer = (__u8 *)((addr_t) ccw->cda);
+ rcd_buffer = phys_to_virt(ccw->cda);
memset(rcd_buffer, 0, sizeof(*rcd_buffer));
rcd_buffer[0] = 0xE5;
@@ -1547,7 +1547,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)(addr_t) prssdp;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - feature codes */
features = (struct dasd_rssd_features *) (prssdp + 1);
@@ -1556,7 +1556,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_features);
- ccw->cda = (__u32)(addr_t) features;
+ ccw->cda = (__u32)virt_to_phys(features);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1616,7 +1616,7 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)(addr_t)prssdp;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - Volume Storage Query */
vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
@@ -1626,7 +1626,7 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*vsq);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t)vsq;
+ ccw->cda = (__u32)virt_to_phys(vsq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1801,7 +1801,7 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)(addr_t)prssdp;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
memset(lcq, 0, sizeof(*lcq));
@@ -1810,7 +1810,7 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*lcq);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t)lcq;
+ ccw->cda = (__u32)virt_to_phys(lcq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1907,7 +1907,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
- ccw->cda = (__u32)(addr_t)psf_ssc_data;
+ ccw->cda = (__u32)virt_to_phys(psf_ssc_data);
ccw->count = 66;
cqr->startdev = device;
@@ -2262,7 +2262,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32)(addr_t) count_data;
+ ccw->cda = (__u32)virt_to_phys(count_data);
ccw++;
count_data++;
}
@@ -2276,7 +2276,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32)(addr_t) count_data;
+ ccw->cda = (__u32)virt_to_phys(count_data);
cqr->block = NULL;
cqr->startdev = device;
@@ -2647,7 +2647,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)(addr_t) fmt_buffer;
+ ccw->cda = (__u32)virt_to_phys(fmt_buffer);
ccw++;
fmt_buffer++;
}
@@ -2857,7 +2857,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)(addr_t) ect;
+ ccw->cda = (__u32)virt_to_phys(ect);
ccw++;
}
if ((intensity & ~0x08) & 0x04) { /* erase track */
@@ -2872,7 +2872,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)(addr_t) ect;
+ ccw->cda = (__u32)virt_to_phys(ect);
} else { /* write remaining records */
for (i = 0; i < rpt; i++) {
ect = (struct eckd_count *) data;
@@ -2907,7 +2907,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
DASD_ECKD_CCW_WRITE_CKD_MT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)(addr_t) ect;
+ ccw->cda = (__u32)virt_to_phys(ect);
ccw++;
}
}
@@ -3821,7 +3821,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
}
ccw = cqr->cpaddr;
- ccw->cda = (__u32)(addr_t)cqr->data;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
ccw->cmd_code = DASD_ECKD_CCW_DSO;
ccw->count = size;
@@ -4090,11 +4090,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
ccw->cmd_code = rcmd;
ccw->count = count;
if (idal_is_needed(dst, blksize)) {
- ccw->cda = (__u32)(addr_t) idaws;
+ ccw->cda = (__u32)virt_to_phys(idaws);
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
- ccw->cda = (__u32)(addr_t) dst;
+ ccw->cda = (__u32)virt_to_phys(dst);
ccw->flags = 0;
}
ccw++;
@@ -4228,7 +4228,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
ccw->count = len_to_track_end;
- ccw->cda = (__u32)(addr_t)idaws;
+ ccw->cda = (__u32)virt_to_phys(idaws);
ccw->flags = CCW_FLAG_IDA;
ccw++;
recid += count;
@@ -4244,7 +4244,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
* idaw ends
*/
if (!idaw_dst) {
- if (__pa(dst) & (IDA_BLOCK_SIZE-1)) {
+ if ((__u32)virt_to_phys(dst) & (IDA_BLOCK_SIZE - 1)) {
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-ERANGE);
} else
@@ -4264,7 +4264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
* idal_create_words will handle cases where idaw_len
* is larger then IDA_BLOCK_SIZE
*/
- if (!(__pa(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE-1)))
+ if (!((__u32)virt_to_phys(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
end_idaw = 1;
/* We also need to end the idaw at track end */
if (!len_to_track_end) {
@@ -4817,7 +4817,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536 - start_padding_sectors * 512;
- ccw->cda = (__u32)(addr_t)idaws;
+ ccw->cda = (__u32)virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
@@ -4836,7 +4836,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536;
- ccw->cda = (__u32)(addr_t)idaws;
+ ccw->cda = (__u32)virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
@@ -4893,9 +4893,9 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
- cda = *((char **)((addr_t) ccw->cda));
+ cda = *((char **)phys_to_virt(ccw->cda));
else
- cda = (char *)((addr_t) ccw->cda);
+ cda = phys_to_virt(ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
@@ -5045,7 +5045,7 @@ dasd_eckd_release(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)(addr_t) cqr->data;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5100,7 +5100,7 @@ dasd_eckd_reserve(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)(addr_t) cqr->data;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5154,7 +5154,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_SLCK;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)(addr_t) cqr->data;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5215,7 +5215,7 @@ static int dasd_eckd_snid(struct dasd_device *device,
ccw->cmd_code = DASD_ECKD_CCW_SNID;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 12;
- ccw->cda = (__u32)(addr_t) cqr->data;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5282,7 +5282,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)(addr_t) prssdp;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - Performance Statistics */
stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
@@ -5291,7 +5291,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
- ccw->cda = (__u32)(addr_t) stats;
+ ccw->cda = (__u32)virt_to_phys(stats);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -5435,7 +5435,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = usrparm.psf_data_len;
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)(addr_t) psf_data;
+ ccw->cda = (__u32)virt_to_phys(psf_data);
ccw++;
@@ -5443,7 +5443,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = usrparm.rssd_result_len;
ccw->flags = CCW_FLAG_SLI ;
- ccw->cda = (__u32)(addr_t) rssd_result;
+ ccw->cda = (__u32)virt_to_phys(rssd_result);
rc = dasd_sleep_on(cqr);
if (rc)
@@ -5512,9 +5512,9 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
/* get pointer to data (consider IDALs) */
if (from->flags & CCW_FLAG_IDA)
- datap = (char *) *((addr_t *) (addr_t) from->cda);
+ datap = (char *)*((addr_t *)phys_to_virt(from->cda));
else
- datap = (char *) ((addr_t) from->cda);
+ datap = phys_to_virt(from->cda);
/* dump data (max 128 bytes) */
for (count = 0; count < from->count && count < 128; count++) {
@@ -5585,7 +5585,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing CCW: %p\n",
dev_name(&device->cdev->dev),
- (void *) (addr_t) irb->scsw.cmd.cpa);
+ phys_to_virt(irb->scsw.cmd.cpa));
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, PRINTK_HEADER
@@ -5632,8 +5632,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
from = ++to;
- fail = (struct ccw1 *)(addr_t)
- irb->scsw.cmd.cpa; /* failing CCW */
+ fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
printk(KERN_ERR PRINTK_HEADER "......\n");
@@ -5687,13 +5686,12 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
len += sprintf(page + len, PRINTK_HEADER
" device %s: Failing TCW: %p\n",
dev_name(&device->cdev->dev),
- (void *) (addr_t) irb->scsw.tm.tcw);
+ phys_to_virt(irb->scsw.tm.tcw));
tsb = NULL;
sense = NULL;
if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
- tsb = tcw_get_tsb(
- (struct tcw *)(unsigned long)irb->scsw.tm.tcw);
+ tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
if (tsb) {
len += sprintf(page + len, PRINTK_HEADER
@@ -5917,7 +5915,7 @@ retry:
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t) prssdp;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - message buffer */
message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
@@ -5927,7 +5925,7 @@ retry:
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_messages);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t) message_buf;
+ ccw->cda = (__u32)virt_to_phys(message_buf);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -6008,14 +6006,14 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t) prssdp;
+ ccw->cda = (__u32)virt_to_phys(prssdp);
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_psf_query_host_access);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t) host_access;
+ ccw->cda = (__u32)virt_to_phys(host_access);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -6351,7 +6349,7 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
psf_cuir->ssid = device->path[pos].ssid;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
- ccw->cda = (__u32)(addr_t)psf_cuir;
+ ccw->cda = (__u32)virt_to_phys(psf_cuir);
ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(struct dasd_psf_cuir_response);
@@ -6956,8 +6954,10 @@ dasd_eckd_init(void)
return -ENOMEM;
dasd_vol_info_req = kmalloc(sizeof(*dasd_vol_info_req),
GFP_KERNEL | GFP_DMA);
- if (!dasd_vol_info_req)
+ if (!dasd_vol_info_req) {
+ kfree(dasd_reserve_req);
return -ENOMEM;
+ }
pe_handler_worker = kmalloc(sizeof(*pe_handler_worker),
GFP_KERNEL | GFP_DMA);
if (!pe_handler_worker) {
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index d4d31cd11d26..a4cc772208a6 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -491,7 +491,7 @@ int dasd_eer_enable(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_SNSS;
ccw->count = SNSS_DATA_SIZE;
ccw->flags = 0;
- ccw->cda = (__u32)(addr_t) cqr->data;
+ ccw->cda = (__u32)virt_to_phys(cqr->data);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index cddfb01a3dca..bcb67fa747a7 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -83,7 +83,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32) __pa(data);
+ ccw->cda = (__u32)virt_to_phys(data);
memset(data, 0, sizeof (struct DE_fba_data));
if (rw == WRITE)
(data->mask).perm = 0x0;
@@ -103,7 +103,7 @@ locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
ccw->cmd_code = DASD_FBA_CCW_LOCATE;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32) __pa(data);
+ ccw->cda = (__u32)virt_to_phys(data);
memset(data, 0, sizeof (struct LO_fba_data));
if (rw == WRITE)
data->operation.cmd = 0x5;
@@ -262,7 +262,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count)
ccw->cmd_code = DASD_FBA_CCW_WRITE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = count;
- ccw->cda = (__u32) (addr_t) dasd_fba_zero_page;
+ ccw->cda = (__u32)virt_to_phys(dasd_fba_zero_page);
}
/*
@@ -528,11 +528,11 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
ccw->cmd_code = cmd;
ccw->count = block->bp_block;
if (idal_is_needed(dst, blksize)) {
- ccw->cda = (__u32)(addr_t) idaws;
+ ccw->cda = (__u32)virt_to_phys(idaws);
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
- ccw->cda = (__u32)(addr_t) dst;
+ ccw->cda = (__u32)virt_to_phys(dst);
ccw->flags = 0;
}
ccw++;
@@ -590,9 +590,9 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
- cda = *((char **)((addr_t) ccw->cda));
+ cda = *((char **)phys_to_virt(ccw->cda));
else
- cda = (char *)((addr_t) ccw->cda);
+ cda = phys_to_virt(ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index c0f85ffb2b62..c09f2e053bf8 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -864,10 +864,6 @@ dcssblk_submit_bio(struct bio *bio)
unsigned long source_addr;
unsigned long bytes_done;
- bio = bio_split_to_limits(bio);
- if (!bio)
- return;
-
bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data;
if (dev_info == NULL)
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index 7d1749b0d378..80c4e5101c97 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -5,17 +5,10 @@ comment "S/390 character device drivers"
config TN3270
def_tristate y
prompt "Support for locally attached 3270 terminals"
- depends on CCW
+ depends on CCW && TTY
help
Include support for IBM 3270 terminals.
-config TN3270_TTY
- def_tristate y
- prompt "Support for tty input/output on 3270 terminals"
- depends on TN3270 && TTY
- help
- Include support for using an IBM 3270 terminal as a Linux tty.
-
config TN3270_FS
def_tristate m
prompt "Support for fullscreen applications on 3270 terminals"
@@ -26,7 +19,7 @@ config TN3270_FS
config TN3270_CONSOLE
def_bool y
prompt "Support for console on 3270 terminal"
- depends on TN3270=y && TN3270_TTY=y
+ depends on TN3270=y
help
Include support for using an IBM 3270 terminal as a Linux system
console. Available only if 3270 support is compiled in statically.
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index ce32270082f5..b0f6b3201636 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -21,9 +21,7 @@ obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o sclp_early_core.o sclp_sd.o
-obj-$(CONFIG_TN3270) += raw3270.o
-obj-$(CONFIG_TN3270_CONSOLE) += con3270.o
-obj-$(CONFIG_TN3270_TTY) += tty3270.o
+obj-$(CONFIG_TN3270) += raw3270.o con3270.o
obj-$(CONFIG_TN3270_FS) += fs3270.o
obj-$(CONFIG_TN3215) += con3215.o
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 72ba83c1bc79..0b05cd76b7d0 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -629,7 +629,7 @@ static int raw3215_startup(struct raw3215_info *raw)
if (tty_port_initialized(&raw->port))
return 0;
raw->line_pos = 0;
- tty_port_set_initialized(&raw->port, 1);
+ tty_port_set_initialized(&raw->port, true);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_try_io(raw);
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
@@ -659,7 +659,7 @@ static void raw3215_shutdown(struct raw3215_info *raw)
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
remove_wait_queue(&raw->empty_wait, &wait);
set_current_state(TASK_RUNNING);
- tty_port_set_initialized(&raw->port, 1);
+ tty_port_set_initialized(&raw->port, true);
}
spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
}
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c
index 10f6a37fb153..d9983550062d 100644
--- a/drivers/s390/char/con3270.c
+++ b/drivers/s390/char/con3270.c
@@ -1,518 +1,2075 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * IBM/3270 Driver - console view.
+ * IBM/3270 Driver - tty functions.
*
- * Author(s):
- * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
- * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- * Copyright IBM Corp. 2003, 2009
+ * Author(s):
+ * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
+ * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * -- Copyright IBM Corp. 2003
*/
#include <linux/module.h>
-#include <linux/console.h>
+#include <linux/types.h>
+#include <linux/kdev_t.h>
+#include <linux/tty.h>
+#include <linux/vt_kern.h>
#include <linux/init.h>
+#include <linux/console.h>
#include <linux/interrupt.h>
-#include <linux/list.h>
+#include <linux/workqueue.h>
#include <linux/panic_notifier.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/err.h>
#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/memblock.h>
+#include <linux/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
-#include <asm/cpcmd.h>
#include <asm/ebcdic.h>
+#include <asm/cpcmd.h>
+#include <linux/uaccess.h>
#include "raw3270.h"
-#include "tty3270.h"
-#include "ctrlchar.h"
+#include "keyboard.h"
+
+#define TTY3270_CHAR_BUF_SIZE 256
+#define TTY3270_OUTPUT_BUFFER_SIZE 4096
+#define TTY3270_SCREEN_PAGES 8 /* has to be power-of-two */
+#define TTY3270_RECALL_SIZE 16 /* has to be power-of-two */
+#define TTY3270_STATUS_AREA_SIZE 40
+
+static struct tty_driver *tty3270_driver;
+static int tty3270_max_index;
+static struct raw3270_fn tty3270_fn;
+
+#define TTY3270_HIGHLIGHT_BLINK 1
+#define TTY3270_HIGHLIGHT_REVERSE 2
+#define TTY3270_HIGHLIGHT_UNDERSCORE 4
+
+struct tty3270_attribute {
+ unsigned char alternate_charset:1; /* Graphics charset */
+ unsigned char highlight:3; /* Blink/reverse/underscore */
+ unsigned char f_color:4; /* Foreground color */
+ unsigned char b_color:4; /* Background color */
+};
-#define CON3270_OUTPUT_BUFFER_SIZE 1024
-#define CON3270_STRING_PAGES 4
+struct tty3270_cell {
+ unsigned char character;
+ struct tty3270_attribute attributes;
+};
-static struct raw3270_fn con3270_fn;
+struct tty3270_line {
+ struct tty3270_cell *cells;
+ int len;
+ int dirty;
+};
-static bool auto_update = true;
-module_param(auto_update, bool, 0);
+static const unsigned char sfq_read_partition[] = {
+ 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81
+};
+
+#define ESCAPE_NPAR 8
/*
- * Main 3270 console view data structure.
+ * The main tty view data structure.
+ * FIXME:
+ * 1) describe line orientation & lines list concept against screen
+ * 2) describe conversion of screen to lines
+ * 3) describe line format.
*/
-struct con3270 {
+struct tty3270 {
struct raw3270_view view;
- struct list_head freemem; /* list of free memory for strings. */
+ struct tty_port port;
/* Output stuff. */
- struct list_head lines; /* list of lines. */
- struct list_head update; /* list of lines to update. */
- int line_nr; /* line number for next update. */
- int nr_lines; /* # lines in list. */
+ unsigned char wcc; /* Write control character. */
int nr_up; /* # lines up in history. */
unsigned long update_flags; /* Update indication bits. */
- struct string *cline; /* current output line. */
- struct string *status; /* last line of display. */
- struct raw3270_request *write; /* single write request. */
- struct timer_list timer;
+ struct raw3270_request *write; /* Single write request. */
+ struct timer_list timer; /* Output delay timer. */
+ char *converted_line; /* RAW 3270 data stream */
+ unsigned int line_view_start; /* Start of visible area */
+ unsigned int line_write_start; /* current write position */
+ unsigned int oops_line; /* line counter used when print oops */
+
+ /* Current tty screen. */
+ unsigned int cx, cy; /* Current output position. */
+ struct tty3270_attribute attributes;
+ struct tty3270_attribute saved_attributes;
+ int allocated_lines;
+ struct tty3270_line *screen;
/* Input stuff. */
- struct string *input; /* input string for read request. */
- struct raw3270_request *read; /* single read request. */
- struct raw3270_request *kreset; /* single keyboard reset request. */
- struct tasklet_struct readlet; /* tasklet to issue read request. */
+ char *prompt; /* Output string for input area. */
+ char *input; /* Input string for read request. */
+ struct raw3270_request *read; /* Single read request. */
+ struct raw3270_request *kreset; /* Single keyboard reset request. */
+ struct raw3270_request *readpartreq;
+ unsigned char inattr; /* Visible/invisible input. */
+ int throttle, attn; /* tty throttle/unthrottle. */
+ struct tasklet_struct readlet; /* Tasklet to issue read request. */
+ struct tasklet_struct hanglet; /* Tasklet to hang up the tty. */
+ struct kbd_data *kbd; /* key_maps stuff. */
+
+ /* Escape sequence parsing. */
+ int esc_state, esc_ques, esc_npar;
+ int esc_par[ESCAPE_NPAR];
+ unsigned int saved_cx, saved_cy;
+
+ /* Command recalling. */
+ char **rcl_lines; /* Array of recallable lines */
+ int rcl_write_index; /* Write index of recallable items */
+ int rcl_read_index; /* Read index of recallable items */
+
+ /* Character array for put_char/flush_chars. */
+ unsigned int char_count;
+ char char_buf[TTY3270_CHAR_BUF_SIZE];
};
-static struct con3270 *condev;
-
-/* con3270->update_flags. See con3270_update for details. */
-#define CON_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
-#define CON_UPDATE_LIST 2 /* Update lines in tty3270->update. */
-#define CON_UPDATE_STATUS 4 /* Update status line. */
-#define CON_UPDATE_ALL 8 /* Recreate screen. */
+/* tty3270->update_flags. See tty3270_update for details. */
+#define TTY_UPDATE_INPUT 0x1 /* Update input line. */
+#define TTY_UPDATE_STATUS 0x2 /* Update status line. */
+#define TTY_UPDATE_LINES 0x4 /* Update visible screen lines */
+#define TTY_UPDATE_ALL 0x7 /* Recreate screen. */
-static void con3270_update(struct timer_list *);
+#define TTY3270_INPUT_AREA_ROWS 2
/*
* Setup timeout for a device. On timeout trigger an update.
*/
-static void con3270_set_timer(struct con3270 *cp, int expires)
+static void tty3270_set_timer(struct tty3270 *tp, int expires)
{
- if (expires == 0)
- del_timer(&cp->timer);
- else
- mod_timer(&cp->timer, jiffies + expires);
+ mod_timer(&tp->timer, jiffies + expires);
}
-/*
- * The status line is the last line of the screen. It shows the string
- * "console view" in the lower left corner and "Running"/"More..."/"Holding"
- * in the lower right corner of the screen.
- */
-static void
-con3270_update_status(struct con3270 *cp)
+static int tty3270_tty_rows(struct tty3270 *tp)
{
- char *str;
+ return tp->view.rows - TTY3270_INPUT_AREA_ROWS;
+}
- str = (cp->nr_up != 0) ? "History" : "Running";
- memcpy(cp->status->string + 24, str, 7);
- codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
- cp->update_flags |= CON_UPDATE_STATUS;
+static char *tty3270_add_ba(struct tty3270 *tp, char *cp, char order, int x, int y)
+{
+ *cp++ = order;
+ raw3270_buffer_address(tp->view.dev, cp, x, y);
+ return cp + 2;
}
-static void
-con3270_create_status(struct con3270 *cp)
+static char *tty3270_add_ra(struct tty3270 *tp, char *cp, int x, int y, char c)
+{
+ cp = tty3270_add_ba(tp, cp, TO_RA, x, y);
+ *cp++ = c;
+ return cp;
+}
+
+static char *tty3270_add_sa(struct tty3270 *tp, char *cp, char attr, char value)
+{
+ *cp++ = TO_SA;
+ *cp++ = attr;
+ *cp++ = value;
+ return cp;
+}
+
+static char *tty3270_add_ge(struct tty3270 *tp, char *cp, char c)
+{
+ *cp++ = TO_GE;
+ *cp++ = c;
+ return cp;
+}
+
+static char *tty3270_add_sf(struct tty3270 *tp, char *cp, char type)
+{
+ *cp++ = TO_SF;
+ *cp++ = type;
+ return cp;
+}
+
+static int tty3270_line_increment(struct tty3270 *tp, unsigned int line, unsigned int incr)
+{
+ return (line + incr) & (tp->allocated_lines - 1);
+}
+
+static struct tty3270_line *tty3270_get_write_line(struct tty3270 *tp, unsigned int num)
{
- static const unsigned char blueprint[] =
- { TO_SBA, 0, 0, TO_SF,TF_LOG,TO_SA,TAT_COLOR, TAC_GREEN,
- 'c','o','n','s','o','l','e',' ','v','i','e','w',
- TO_RA,0,0,0,'R','u','n','n','i','n','g',TO_SF,TF_LOG };
+ return tp->screen + tty3270_line_increment(tp, tp->line_write_start, num);
+}
+
+static struct tty3270_line *tty3270_get_view_line(struct tty3270 *tp, unsigned int num)
+{
+ return tp->screen + tty3270_line_increment(tp, tp->line_view_start, num - tp->nr_up);
+}
- cp->status = alloc_string(&cp->freemem, sizeof(blueprint));
- /* Copy blueprint to status line */
- memcpy(cp->status->string, blueprint, sizeof(blueprint));
- /* Set TO_RA addresses. */
- raw3270_buffer_address(cp->view.dev, cp->status->string + 1,
- cp->view.cols * (cp->view.rows - 1));
- raw3270_buffer_address(cp->view.dev, cp->status->string + 21,
- cp->view.cols * cp->view.rows - 8);
- /* Convert strings to ebcdic. */
- codepage_convert(cp->view.ascebc, cp->status->string + 8, 12);
- codepage_convert(cp->view.ascebc, cp->status->string + 24, 7);
+static int tty3270_input_size(int cols)
+{
+ return cols * 2 - 11;
+}
+
+static void tty3270_update_prompt(struct tty3270 *tp, char *input)
+{
+ strcpy(tp->prompt, input);
+ tp->update_flags |= TTY_UPDATE_INPUT;
+ tty3270_set_timer(tp, 1);
}
/*
- * Set output offsets to 3270 datastream fragment of a console string.
+ * The input line are the two last lines of the screen.
*/
-static void
-con3270_update_string(struct con3270 *cp, struct string *s, int nr)
+static int tty3270_add_prompt(struct tty3270 *tp)
{
- if (s->len < 4) {
- /* This indicates a bug, but printing a warning would
- * cause a deadlock. */
- return;
+ int count = 0;
+ char *cp;
+
+ cp = tp->converted_line;
+ cp = tty3270_add_ba(tp, cp, TO_SBA, 0, -2);
+ *cp++ = tp->view.ascebc['>'];
+
+ if (*tp->prompt) {
+ cp = tty3270_add_sf(tp, cp, TF_INMDT);
+ count = min_t(int, strlen(tp->prompt),
+ tp->view.cols * 2 - TTY3270_STATUS_AREA_SIZE - 2);
+ memcpy(cp, tp->prompt, count);
+ cp += count;
+ } else {
+ cp = tty3270_add_sf(tp, cp, tp->inattr);
}
- if (s->string[s->len - 4] != TO_RA)
- return;
- raw3270_buffer_address(cp->view.dev, s->string + s->len - 3,
- cp->view.cols * (nr + 1));
+ *cp++ = TO_IC;
+ /* Clear to end of input line. */
+ if (count < tp->view.cols * 2 - 11)
+ cp = tty3270_add_ra(tp, cp, -TTY3270_STATUS_AREA_SIZE, -1, 0);
+ return cp - tp->converted_line;
+}
+
+static char *tty3270_ebcdic_convert(struct tty3270 *tp, char *d, char *s)
+{
+ while (*s)
+ *d++ = tp->view.ascebc[(int)*s++];
+ return d;
}
/*
- * Rebuild update list to print all lines.
+ * The status line is the last line of the screen. It shows the string
+ * "Running"/"History X" in the lower right corner of the screen.
*/
-static void
-con3270_rebuild_update(struct con3270 *cp)
+static int tty3270_add_status(struct tty3270 *tp)
{
- struct string *s, *n;
- int nr;
+ char *cp = tp->converted_line;
+ int len;
+
+ cp = tty3270_add_ba(tp, cp, TO_SBA, -TTY3270_STATUS_AREA_SIZE, -1);
+ cp = tty3270_add_sf(tp, cp, TF_LOG);
+ cp = tty3270_add_sa(tp, cp, TAT_FGCOLOR, TAC_GREEN);
+ cp = tty3270_ebcdic_convert(tp, cp, " 7");
+ cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_REVER);
+ cp = tty3270_ebcdic_convert(tp, cp, "PrevPg");
+ cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_RESET);
+ cp = tty3270_ebcdic_convert(tp, cp, " 8");
+ cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_REVER);
+ cp = tty3270_ebcdic_convert(tp, cp, "NextPg");
+ cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_RESET);
+ cp = tty3270_ebcdic_convert(tp, cp, " 12");
+ cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_REVER);
+ cp = tty3270_ebcdic_convert(tp, cp, "Recall");
+ cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_RESET);
+ cp = tty3270_ebcdic_convert(tp, cp, " ");
+ if (tp->nr_up) {
+ len = sprintf(cp, "History %d", -tp->nr_up);
+ codepage_convert(tp->view.ascebc, cp, len);
+ cp += len;
+ } else {
+ cp = tty3270_ebcdic_convert(tp, cp, oops_in_progress ? "Crashed" : "Running");
+ }
+ cp = tty3270_add_sf(tp, cp, TF_LOG);
+ cp = tty3270_add_sa(tp, cp, TAT_FGCOLOR, TAC_RESET);
+ return cp - (char *)tp->converted_line;
+}
- /*
- * Throw away update list and create a new one,
- * containing all lines that will fit on the screen.
- */
- list_for_each_entry_safe(s, n, &cp->update, update)
- list_del_init(&s->update);
- nr = cp->view.rows - 2 + cp->nr_up;
- list_for_each_entry_reverse(s, &cp->lines, list) {
- if (nr < cp->view.rows - 1)
- list_add(&s->update, &cp->update);
- if (--nr < 0)
- break;
+static void tty3270_blank_screen(struct tty3270 *tp)
+{
+ struct tty3270_line *line;
+ int i;
+
+ for (i = 0; i < tty3270_tty_rows(tp); i++) {
+ line = tty3270_get_write_line(tp, i);
+ line->len = 0;
+ line->dirty = 1;
}
- cp->line_nr = 0;
- cp->update_flags |= CON_UPDATE_LIST;
+ tp->nr_up = 0;
}
/*
- * Alloc string for size bytes. Free strings from history if necessary.
+ * Write request completion callback.
*/
-static struct string *
-con3270_alloc_string(struct con3270 *cp, size_t size)
+static void tty3270_write_callback(struct raw3270_request *rq, void *data)
{
- struct string *s, *n;
+ struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
- s = alloc_string(&cp->freemem, size);
- if (s)
- return s;
- list_for_each_entry_safe(s, n, &cp->lines, list) {
- list_del(&s->list);
- if (!list_empty(&s->update))
- list_del(&s->update);
- cp->nr_lines--;
- if (free_string(&cp->freemem, s) >= size)
- break;
+ if (rq->rc != 0) {
+ /* Write wasn't successful. Refresh all. */
+ tp->update_flags = TTY_UPDATE_ALL;
+ tty3270_set_timer(tp, 1);
}
- s = alloc_string(&cp->freemem, size);
- BUG_ON(!s);
- if (cp->nr_up != 0 && cp->nr_up + cp->view.rows > cp->nr_lines) {
- cp->nr_up = cp->nr_lines - cp->view.rows + 1;
- con3270_rebuild_update(cp);
- con3270_update_status(cp);
+ raw3270_request_reset(rq);
+ xchg(&tp->write, rq);
+}
+
+static int tty3270_required_length(struct tty3270 *tp, struct tty3270_line *line)
+{
+ unsigned char f_color, b_color, highlight;
+ struct tty3270_cell *cell;
+ int i, flen = 3; /* Prefix (TO_SBA). */
+
+ flen += line->len;
+ highlight = 0;
+ f_color = TAC_RESET;
+ b_color = TAC_RESET;
+
+ for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
+ if (cell->attributes.highlight != highlight) {
+ flen += 3; /* TO_SA to switch highlight. */
+ highlight = cell->attributes.highlight;
+ }
+ if (cell->attributes.f_color != f_color) {
+ flen += 3; /* TO_SA to switch color. */
+ f_color = cell->attributes.f_color;
+ }
+ if (cell->attributes.b_color != b_color) {
+ flen += 3; /* TO_SA to switch color. */
+ b_color = cell->attributes.b_color;
+ }
+ if (cell->attributes.alternate_charset)
+ flen += 1; /* TO_GE to switch to graphics extensions */
}
- return s;
+ if (highlight)
+ flen += 3; /* TO_SA to reset hightlight. */
+ if (f_color != TAC_RESET)
+ flen += 3; /* TO_SA to reset color. */
+ if (b_color != TAC_RESET)
+ flen += 3; /* TO_SA to reset color. */
+ if (line->len < tp->view.cols)
+ flen += 4; /* Postfix (TO_RA). */
+
+ return flen;
+}
+
+static char *tty3270_add_reset_attributes(struct tty3270 *tp, struct tty3270_line *line,
+ char *cp, struct tty3270_attribute *attr, int lineno)
+{
+ if (attr->highlight)
+ cp = tty3270_add_sa(tp, cp, TAT_EXTHI, TAX_RESET);
+ if (attr->f_color != TAC_RESET)
+ cp = tty3270_add_sa(tp, cp, TAT_FGCOLOR, TAX_RESET);
+ if (attr->b_color != TAC_RESET)
+ cp = tty3270_add_sa(tp, cp, TAT_BGCOLOR, TAX_RESET);
+ if (line->len < tp->view.cols)
+ cp = tty3270_add_ra(tp, cp, 0, lineno + 1, 0);
+ return cp;
+}
+
+static char tty3270_graphics_translate(struct tty3270 *tp, char ch)
+{
+ switch (ch) {
+ case 'q': /* - */
+ return 0xa2;
+ case 'x': /* '|' */
+ return 0x85;
+ case 'l': /* |- */
+ return 0xc5;
+ case 't': /* |_ */
+ return 0xc6;
+ case 'u': /* _| */
+ return 0xd6;
+ case 'k': /* -| */
+ return 0xd5;
+ case 'j':
+ return 0xd4;
+ case 'm':
+ return 0xc4;
+ case 'n': /* + */
+ return 0xd3;
+ case 'v':
+ return 0xc7;
+ case 'w':
+ return 0xd7;
+ default:
+ return ch;
+ }
+}
+
+static char *tty3270_add_attributes(struct tty3270 *tp, struct tty3270_line *line,
+ struct tty3270_attribute *attr, char *cp, int lineno)
+{
+ const unsigned char colors[16] = {
+ [0] = TAC_DEFAULT,
+ [1] = TAC_RED,
+ [2] = TAC_GREEN,
+ [3] = TAC_YELLOW,
+ [4] = TAC_BLUE,
+ [5] = TAC_PINK,
+ [6] = TAC_TURQ,
+ [7] = TAC_WHITE,
+ [9] = TAC_DEFAULT
+ };
+
+ const unsigned char highlights[8] = {
+ [TTY3270_HIGHLIGHT_BLINK] = TAX_BLINK,
+ [TTY3270_HIGHLIGHT_REVERSE] = TAX_REVER,
+ [TTY3270_HIGHLIGHT_UNDERSCORE] = TAX_UNDER,
+ };
+
+ struct tty3270_cell *cell;
+ int c, i;
+
+ cp = tty3270_add_ba(tp, cp, TO_SBA, 0, lineno);
+
+ for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
+ if (cell->attributes.highlight != attr->highlight) {
+ attr->highlight = cell->attributes.highlight;
+ cp = tty3270_add_sa(tp, cp, TAT_EXTHI, highlights[attr->highlight]);
+ }
+ if (cell->attributes.f_color != attr->f_color) {
+ attr->f_color = cell->attributes.f_color;
+ cp = tty3270_add_sa(tp, cp, TAT_FGCOLOR, colors[attr->f_color]);
+ }
+ if (cell->attributes.b_color != attr->b_color) {
+ attr->b_color = cell->attributes.b_color;
+ cp = tty3270_add_sa(tp, cp, TAT_BGCOLOR, colors[attr->b_color]);
+ }
+ c = cell->character;
+ if (cell->attributes.alternate_charset)
+ cp = tty3270_add_ge(tp, cp, tty3270_graphics_translate(tp, c));
+ else
+ *cp++ = tp->view.ascebc[c];
+ }
+ return cp;
+}
+
+static void tty3270_reset_attributes(struct tty3270_attribute *attr)
+{
+ attr->highlight = TAX_RESET;
+ attr->f_color = TAC_RESET;
+ attr->b_color = TAC_RESET;
}
/*
- * Write completion callback.
+ * Convert a tty3270_line to a 3270 data fragment usable for output.
*/
-static void
-con3270_write_callback(struct raw3270_request *rq, void *data)
+static unsigned int tty3270_convert_line(struct tty3270 *tp, struct tty3270_line *line, int lineno)
{
- raw3270_request_reset(rq);
- xchg(&((struct con3270 *) rq->view)->write, rq);
+ struct tty3270_attribute attr;
+ int flen;
+ char *cp;
+
+ /* Determine how long the fragment will be. */
+ flen = tty3270_required_length(tp, line);
+ if (flen > PAGE_SIZE)
+ return 0;
+ /* Write 3270 data fragment. */
+ tty3270_reset_attributes(&attr);
+ cp = tty3270_add_attributes(tp, line, &attr, tp->converted_line, lineno);
+ cp = tty3270_add_reset_attributes(tp, line, cp, &attr, lineno);
+ return cp - (char *)tp->converted_line;
+}
+
+static void tty3270_update_lines_visible(struct tty3270 *tp, struct raw3270_request *rq)
+{
+ struct tty3270_line *line;
+ int len, i;
+
+ for (i = 0; i < tty3270_tty_rows(tp); i++) {
+ line = tty3270_get_view_line(tp, i);
+ if (!line->dirty)
+ continue;
+ len = tty3270_convert_line(tp, line, i);
+ if (raw3270_request_add_data(rq, tp->converted_line, len))
+ break;
+ line->dirty = 0;
+ }
+ if (i == tty3270_tty_rows(tp)) {
+ for (i = 0; i < tp->allocated_lines; i++)
+ tp->screen[i].dirty = 0;
+ tp->update_flags &= ~TTY_UPDATE_LINES;
+ }
+}
+
+static void tty3270_update_lines_all(struct tty3270 *tp, struct raw3270_request *rq)
+{
+ struct tty3270_line *line;
+ char buf[4];
+ int len, i;
+
+ for (i = 0; i < tp->allocated_lines; i++) {
+ line = tty3270_get_write_line(tp, i + tp->cy + 1);
+ if (!line->dirty)
+ continue;
+ len = tty3270_convert_line(tp, line, tp->oops_line);
+ if (raw3270_request_add_data(rq, tp->converted_line, len))
+ break;
+ line->dirty = 0;
+ if (++tp->oops_line >= tty3270_tty_rows(tp))
+ tp->oops_line = 0;
+ }
+
+ if (i == tp->allocated_lines) {
+ if (tp->oops_line < tty3270_tty_rows(tp)) {
+ tty3270_add_ra(tp, buf, 0, tty3270_tty_rows(tp), 0);
+ if (raw3270_request_add_data(rq, buf, sizeof(buf)))
+ return;
+ }
+ tp->update_flags &= ~TTY_UPDATE_LINES;
+ }
}
/*
- * Update console display.
+ * Update 3270 display.
*/
-static void
-con3270_update(struct timer_list *t)
+static void tty3270_update(struct timer_list *t)
{
- struct con3270 *cp = from_timer(cp, t, timer);
+ struct tty3270 *tp = from_timer(tp, t, timer);
struct raw3270_request *wrq;
- char wcc, prolog[6];
- unsigned long flags;
- unsigned long updated;
- struct string *s, *n;
- int rc;
+ u8 cmd = TC_WRITE;
+ int rc, len;
- if (!auto_update && !raw3270_view_active(&cp->view))
- return;
- if (cp->view.dev)
- raw3270_activate_view(&cp->view);
-
- wrq = xchg(&cp->write, 0);
+ wrq = xchg(&tp->write, 0);
if (!wrq) {
- con3270_set_timer(cp, 1);
+ tty3270_set_timer(tp, 1);
return;
}
- spin_lock_irqsave(&cp->view.lock, flags);
- updated = 0;
- if (cp->update_flags & CON_UPDATE_ALL) {
- con3270_rebuild_update(cp);
- con3270_update_status(cp);
- cp->update_flags = CON_UPDATE_ERASE | CON_UPDATE_LIST |
- CON_UPDATE_STATUS;
- }
- if (cp->update_flags & CON_UPDATE_ERASE) {
- /* Use erase write alternate to initialize display. */
- raw3270_request_set_cmd(wrq, TC_EWRITEA);
- updated |= CON_UPDATE_ERASE;
- } else
- raw3270_request_set_cmd(wrq, TC_WRITE);
+ spin_lock_irq(&tp->view.lock);
+ if (tp->update_flags == TTY_UPDATE_ALL)
+ cmd = TC_EWRITEA;
- wcc = TW_NONE;
- raw3270_request_add_data(wrq, &wcc, 1);
+ raw3270_request_set_cmd(wrq, cmd);
+ raw3270_request_add_data(wrq, &tp->wcc, 1);
+ tp->wcc = TW_NONE;
/*
* Update status line.
*/
- if (cp->update_flags & CON_UPDATE_STATUS)
- if (raw3270_request_add_data(wrq, cp->status->string,
- cp->status->len) == 0)
- updated |= CON_UPDATE_STATUS;
-
- if (cp->update_flags & CON_UPDATE_LIST) {
- prolog[0] = TO_SBA;
- prolog[3] = TO_SA;
- prolog[4] = TAT_COLOR;
- prolog[5] = TAC_TURQ;
- raw3270_buffer_address(cp->view.dev, prolog + 1,
- cp->view.cols * cp->line_nr);
- raw3270_request_add_data(wrq, prolog, 6);
- /* Write strings in the update list to the screen. */
- list_for_each_entry_safe(s, n, &cp->update, update) {
- if (s != cp->cline)
- con3270_update_string(cp, s, cp->line_nr);
- if (raw3270_request_add_data(wrq, s->string,
- s->len) != 0)
- break;
- list_del_init(&s->update);
- if (s != cp->cline)
- cp->line_nr++;
- }
- if (list_empty(&cp->update))
- updated |= CON_UPDATE_LIST;
+ if (tp->update_flags & TTY_UPDATE_STATUS) {
+ len = tty3270_add_status(tp);
+ if (raw3270_request_add_data(wrq, tp->converted_line, len) == 0)
+ tp->update_flags &= ~TTY_UPDATE_STATUS;
+ }
+
+ /*
+ * Write input line.
+ */
+ if (tp->update_flags & TTY_UPDATE_INPUT) {
+ len = tty3270_add_prompt(tp);
+ if (raw3270_request_add_data(wrq, tp->converted_line, len) == 0)
+ tp->update_flags &= ~TTY_UPDATE_INPUT;
+ }
+
+ if (tp->update_flags & TTY_UPDATE_LINES) {
+ if (oops_in_progress)
+ tty3270_update_lines_all(tp, wrq);
+ else
+ tty3270_update_lines_visible(tp, wrq);
}
- wrq->callback = con3270_write_callback;
- rc = raw3270_start(&cp->view, wrq);
+
+ wrq->callback = tty3270_write_callback;
+ rc = raw3270_start(&tp->view, wrq);
if (rc == 0) {
- cp->update_flags &= ~updated;
- if (cp->update_flags)
- con3270_set_timer(cp, 1);
+ if (tp->update_flags)
+ tty3270_set_timer(tp, 1);
} else {
raw3270_request_reset(wrq);
- xchg(&cp->write, wrq);
+ xchg(&tp->write, wrq);
}
- spin_unlock_irqrestore(&cp->view.lock, flags);
+ spin_unlock_irq(&tp->view.lock);
}
/*
- * Read tasklet.
+ * Command recalling.
*/
-static void
-con3270_read_tasklet(unsigned long data)
+static void tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
+{
+ char *p;
+
+ if (len <= 0)
+ return;
+ p = tp->rcl_lines[tp->rcl_write_index++];
+ tp->rcl_write_index &= TTY3270_RECALL_SIZE - 1;
+ memcpy(p, input, len);
+ p[len] = '\0';
+ tp->rcl_read_index = tp->rcl_write_index;
+}
+
+static void tty3270_rcl_backward(struct kbd_data *kbd)
+{
+ struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+ int i = 0;
+
+ spin_lock_irq(&tp->view.lock);
+ if (tp->inattr == TF_INPUT) {
+ do {
+ tp->rcl_read_index--;
+ tp->rcl_read_index &= TTY3270_RECALL_SIZE - 1;
+ } while (!*tp->rcl_lines[tp->rcl_read_index] &&
+ i++ < TTY3270_RECALL_SIZE - 1);
+ tty3270_update_prompt(tp, tp->rcl_lines[tp->rcl_read_index]);
+ }
+ spin_unlock_irq(&tp->view.lock);
+}
+
+/*
+ * Deactivate tty view.
+ */
+static void tty3270_exit_tty(struct kbd_data *kbd)
+{
+ struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+
+ raw3270_deactivate_view(&tp->view);
+}
+
+static void tty3270_redraw(struct tty3270 *tp)
+{
+ int i;
+
+ for (i = 0; i < tty3270_tty_rows(tp); i++)
+ tty3270_get_view_line(tp, i)->dirty = 1;
+ tp->update_flags = TTY_UPDATE_ALL;
+ tty3270_set_timer(tp, 1);
+}
+
+/*
+ * Scroll forward in history.
+ */
+static void tty3270_scroll_forward(struct kbd_data *kbd)
+{
+ struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+
+ spin_lock_irq(&tp->view.lock);
+
+ if (tp->nr_up >= tty3270_tty_rows(tp))
+ tp->nr_up -= tty3270_tty_rows(tp) / 2;
+ else
+ tp->nr_up = 0;
+ tty3270_redraw(tp);
+ spin_unlock_irq(&tp->view.lock);
+}
+
+/*
+ * Scroll backward in history.
+ */
+static void tty3270_scroll_backward(struct kbd_data *kbd)
+{
+ struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
+
+ spin_lock_irq(&tp->view.lock);
+ tp->nr_up += tty3270_tty_rows(tp) / 2;
+ if (tp->nr_up > tp->allocated_lines - tty3270_tty_rows(tp))
+ tp->nr_up = tp->allocated_lines - tty3270_tty_rows(tp);
+ tty3270_redraw(tp);
+ spin_unlock_irq(&tp->view.lock);
+}
+
+/*
+ * Pass input line to tty.
+ */
+static void tty3270_read_tasklet(unsigned long data)
{
+ struct raw3270_request *rrq = (struct raw3270_request *)data;
static char kreset_data = TW_KR;
- struct raw3270_request *rrq;
- struct con3270 *cp;
- unsigned long flags;
- int nr_up, deactivate;
-
- rrq = (struct raw3270_request *)data;
- cp = (struct con3270 *) rrq->view;
- spin_lock_irqsave(&cp->view.lock, flags);
- nr_up = cp->nr_up;
- deactivate = 0;
- /* Check aid byte. */
- switch (cp->input->string[0]) {
- case 0x7d: /* enter: jump to bottom. */
- nr_up = 0;
- break;
- case 0xf3: /* PF3: deactivate the console view. */
- deactivate = 1;
+ struct tty3270 *tp = container_of(rrq->view, struct tty3270, view);
+ char *input;
+ int len;
+
+ spin_lock_irq(&tp->view.lock);
+ /*
+ * Two AID keys are special: For 0x7d (enter) the input line
+ * has to be emitted to the tty and for 0x6d the screen
+ * needs to be redrawn.
+ */
+ input = NULL;
+ len = 0;
+ switch (tp->input[0]) {
+ case AID_ENTER:
+ /* Enter: write input to tty. */
+ input = tp->input + 6;
+ len = tty3270_input_size(tp->view.cols) - 6 - rrq->rescnt;
+ if (tp->inattr != TF_INPUTN)
+ tty3270_rcl_add(tp, input, len);
+ if (tp->nr_up > 0)
+ tp->nr_up = 0;
+ /* Clear input area. */
+ tty3270_update_prompt(tp, "");
+ tty3270_set_timer(tp, 1);
break;
- case 0x6d: /* clear: start from scratch. */
- cp->update_flags = CON_UPDATE_ALL;
- con3270_set_timer(cp, 1);
+ case AID_CLEAR:
+ /* Display has been cleared. Redraw. */
+ tp->update_flags = TTY_UPDATE_ALL;
+ tty3270_set_timer(tp, 1);
+ if (!list_empty(&tp->readpartreq->list))
+ break;
+ raw3270_start_request(&tp->view, tp->readpartreq, TC_WRITESF,
+ (char *)sfq_read_partition, sizeof(sfq_read_partition));
break;
- case 0xf7: /* PF7: do a page up in the console log. */
- nr_up += cp->view.rows - 2;
- if (nr_up + cp->view.rows - 1 > cp->nr_lines) {
- nr_up = cp->nr_lines - cp->view.rows + 1;
- if (nr_up < 0)
- nr_up = 0;
- }
+ case AID_READ_PARTITION:
+ raw3270_read_modified_cb(tp->readpartreq, tp->input);
break;
- case 0xf8: /* PF8: do a page down in the console log. */
- nr_up -= cp->view.rows - 2;
- if (nr_up < 0)
- nr_up = 0;
+ default:
break;
}
- if (nr_up != cp->nr_up) {
- cp->nr_up = nr_up;
- con3270_rebuild_update(cp);
- con3270_update_status(cp);
- con3270_set_timer(cp, 1);
- }
- spin_unlock_irqrestore(&cp->view.lock, flags);
+ spin_unlock_irq(&tp->view.lock);
/* Start keyboard reset command. */
- raw3270_request_reset(cp->kreset);
- raw3270_request_set_cmd(cp->kreset, TC_WRITE);
- raw3270_request_add_data(cp->kreset, &kreset_data, 1);
- raw3270_start(&cp->view, cp->kreset);
+ raw3270_start_request(&tp->view, tp->kreset, TC_WRITE, &kreset_data, 1);
- if (deactivate)
- raw3270_deactivate_view(&cp->view);
+ while (len-- > 0)
+ kbd_keycode(tp->kbd, *input++);
+ /* Emit keycode for AID byte. */
+ kbd_keycode(tp->kbd, 256 + tp->input[0]);
raw3270_request_reset(rrq);
- xchg(&cp->read, rrq);
- raw3270_put_view(&cp->view);
+ xchg(&tp->read, rrq);
+ raw3270_put_view(&tp->view);
}
/*
* Read request completion callback.
*/
-static void
-con3270_read_callback(struct raw3270_request *rq, void *data)
+static void tty3270_read_callback(struct raw3270_request *rq, void *data)
{
+ struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
+
raw3270_get_view(rq->view);
/* Schedule tasklet to pass input to tty. */
- tasklet_schedule(&((struct con3270 *) rq->view)->readlet);
+ tasklet_schedule(&tp->readlet);
}
/*
- * Issue a read request. Called only from interrupt function.
+ * Issue a read request. Call with device lock.
*/
-static void
-con3270_issue_read(struct con3270 *cp)
+static void tty3270_issue_read(struct tty3270 *tp, int lock)
{
struct raw3270_request *rrq;
int rc;
- rrq = xchg(&cp->read, 0);
+ rrq = xchg(&tp->read, 0);
if (!rrq)
/* Read already scheduled. */
return;
- rrq->callback = con3270_read_callback;
- rrq->callback_data = cp;
+ rrq->callback = tty3270_read_callback;
+ rrq->callback_data = tp;
raw3270_request_set_cmd(rrq, TC_READMOD);
- raw3270_request_set_data(rrq, cp->input->string, cp->input->len);
+ raw3270_request_set_data(rrq, tp->input, tty3270_input_size(tp->view.cols));
/* Issue the read modified request. */
- rc = raw3270_start_irq(&cp->view, rrq);
- if (rc)
+ if (lock)
+ rc = raw3270_start(&tp->view, rrq);
+ else
+ rc = raw3270_start_irq(&tp->view, rrq);
+ if (rc) {
raw3270_request_reset(rrq);
+ xchg(&tp->read, rrq);
+ }
}
/*
- * Switch to the console view.
+ * Hang up the tty
*/
-static int
-con3270_activate(struct raw3270_view *view)
+static void tty3270_hangup_tasklet(unsigned long data)
{
- struct con3270 *cp;
+ struct tty3270 *tp = (struct tty3270 *)data;
+
+ tty_port_tty_hangup(&tp->port, true);
+ raw3270_put_view(&tp->view);
+}
- cp = (struct con3270 *) view;
- cp->update_flags = CON_UPDATE_ALL;
- con3270_set_timer(cp, 1);
+/*
+ * Switch to the tty view.
+ */
+static int tty3270_activate(struct raw3270_view *view)
+{
+ struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+ tp->update_flags = TTY_UPDATE_ALL;
+ tty3270_set_timer(tp, 1);
return 0;
}
-static void
-con3270_deactivate(struct raw3270_view *view)
+static void tty3270_deactivate(struct raw3270_view *view)
{
- struct con3270 *cp;
+ struct tty3270 *tp = container_of(view, struct tty3270, view);
- cp = (struct con3270 *) view;
- del_timer(&cp->timer);
+ del_timer(&tp->timer);
}
-static void
-con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb)
+static void tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
{
/* Handle ATTN. Schedule tasklet to read aid. */
- if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION)
- con3270_issue_read(cp);
+ if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
+ if (!tp->throttle)
+ tty3270_issue_read(tp, 0);
+ else
+ tp->attn = 1;
+ }
if (rq) {
- if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)
+ if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
rq->rc = -EIO;
- else
+ raw3270_get_view(&tp->view);
+ tasklet_schedule(&tp->hanglet);
+ } else {
/* Normal end. Copy residual count. */
rq->rescnt = irb->scsw.cmd.count;
+ }
} else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
/* Interrupt without an outstanding request -> update all */
- cp->update_flags = CON_UPDATE_ALL;
- con3270_set_timer(cp, 1);
+ tp->update_flags = TTY_UPDATE_ALL;
+ tty3270_set_timer(tp, 1);
+ }
+}
+
+/*
+ * Allocate tty3270 structure.
+ */
+static struct tty3270 *tty3270_alloc_view(void)
+{
+ struct tty3270 *tp;
+
+ tp = kzalloc(sizeof(*tp), GFP_KERNEL);
+ if (!tp)
+ goto out_err;
+
+ tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
+ if (IS_ERR(tp->write))
+ goto out_tp;
+ tp->read = raw3270_request_alloc(0);
+ if (IS_ERR(tp->read))
+ goto out_write;
+ tp->kreset = raw3270_request_alloc(1);
+ if (IS_ERR(tp->kreset))
+ goto out_read;
+ tp->readpartreq = raw3270_request_alloc(sizeof(sfq_read_partition));
+ if (IS_ERR(tp->readpartreq))
+ goto out_reset;
+ tp->kbd = kbd_alloc();
+ if (!tp->kbd)
+ goto out_readpartreq;
+
+ tty_port_init(&tp->port);
+ timer_setup(&tp->timer, tty3270_update, 0);
+ tasklet_init(&tp->readlet, tty3270_read_tasklet,
+ (unsigned long)tp->read);
+ tasklet_init(&tp->hanglet, tty3270_hangup_tasklet,
+ (unsigned long)tp);
+ return tp;
+
+out_readpartreq:
+ raw3270_request_free(tp->readpartreq);
+out_reset:
+ raw3270_request_free(tp->kreset);
+out_read:
+ raw3270_request_free(tp->read);
+out_write:
+ raw3270_request_free(tp->write);
+out_tp:
+ kfree(tp);
+out_err:
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Free tty3270 structure.
+ */
+static void tty3270_free_view(struct tty3270 *tp)
+{
+ kbd_free(tp->kbd);
+ raw3270_request_free(tp->kreset);
+ raw3270_request_free(tp->read);
+ raw3270_request_free(tp->write);
+ free_page((unsigned long)tp->converted_line);
+ tty_port_destroy(&tp->port);
+ kfree(tp);
+}
+
+/*
+ * Allocate tty3270 screen.
+ */
+static struct tty3270_line *tty3270_alloc_screen(struct tty3270 *tp, unsigned int rows,
+ unsigned int cols, int *allocated_out)
+{
+ struct tty3270_line *screen;
+ int allocated, lines;
+
+ allocated = __roundup_pow_of_two(rows) * TTY3270_SCREEN_PAGES;
+ screen = kcalloc(allocated, sizeof(struct tty3270_line), GFP_KERNEL);
+ if (!screen)
+ goto out_err;
+ for (lines = 0; lines < allocated; lines++) {
+ screen[lines].cells = kcalloc(cols, sizeof(struct tty3270_cell), GFP_KERNEL);
+ if (!screen[lines].cells)
+ goto out_screen;
+ }
+ *allocated_out = allocated;
+ return screen;
+out_screen:
+ while (lines--)
+ kfree(screen[lines].cells);
+ kfree(screen);
+out_err:
+ return ERR_PTR(-ENOMEM);
+}
+
+static char **tty3270_alloc_recall(int cols)
+{
+ char **lines;
+ int i;
+
+ lines = kmalloc_array(TTY3270_RECALL_SIZE, sizeof(char *), GFP_KERNEL);
+ if (!lines)
+ return NULL;
+ for (i = 0; i < TTY3270_RECALL_SIZE; i++) {
+ lines[i] = kcalloc(1, tty3270_input_size(cols) + 1, GFP_KERNEL);
+ if (!lines[i])
+ break;
}
+
+ if (i == TTY3270_RECALL_SIZE)
+ return lines;
+
+ while (i--)
+ kfree(lines[i]);
+ kfree(lines);
+ return NULL;
}
-/* Console view to a 3270 device. */
-static struct raw3270_fn con3270_fn = {
- .activate = con3270_activate,
- .deactivate = con3270_deactivate,
- .intv = (void *) con3270_irq
+static void tty3270_free_recall(char **lines)
+{
+ int i;
+
+ for (i = 0; i < TTY3270_RECALL_SIZE; i++)
+ kfree(lines[i]);
+ kfree(lines);
+}
+
+/*
+ * Free tty3270 screen.
+ */
+static void tty3270_free_screen(struct tty3270_line *screen, int old_lines)
+{
+ int lines;
+
+ for (lines = 0; lines < old_lines; lines++)
+ kfree(screen[lines].cells);
+ kfree(screen);
+}
+
+/*
+ * Resize tty3270 screen
+ */
+static void tty3270_resize(struct raw3270_view *view,
+ int new_model, int new_rows, int new_cols,
+ int old_model, int old_rows, int old_cols)
+{
+ struct tty3270 *tp = container_of(view, struct tty3270, view);
+ struct tty3270_line *screen, *oscreen;
+ char **old_rcl_lines, **new_rcl_lines;
+ char *old_prompt, *new_prompt;
+ char *old_input, *new_input;
+ struct tty_struct *tty;
+ struct winsize ws;
+ int new_allocated, old_allocated = tp->allocated_lines;
+
+ if (old_model == new_model &&
+ old_cols == new_cols &&
+ old_rows == new_rows) {
+ spin_lock_irq(&tp->view.lock);
+ tty3270_redraw(tp);
+ spin_unlock_irq(&tp->view.lock);
+ return;
+ }
+
+ new_input = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL | GFP_DMA);
+ if (!new_input)
+ return;
+ new_prompt = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL);
+ if (!new_prompt)
+ goto out_input;
+ screen = tty3270_alloc_screen(tp, new_rows, new_cols, &new_allocated);
+ if (IS_ERR(screen))
+ goto out_prompt;
+ new_rcl_lines = tty3270_alloc_recall(new_cols);
+ if (!new_rcl_lines)
+ goto out_screen;
+
+ /* Switch to new output size */
+ spin_lock_irq(&tp->view.lock);
+ tty3270_blank_screen(tp);
+ oscreen = tp->screen;
+ tp->screen = screen;
+ tp->allocated_lines = new_allocated;
+ tp->view.rows = new_rows;
+ tp->view.cols = new_cols;
+ tp->view.model = new_model;
+ tp->update_flags = TTY_UPDATE_ALL;
+ old_input = tp->input;
+ old_prompt = tp->prompt;
+ old_rcl_lines = tp->rcl_lines;
+ tp->input = new_input;
+ tp->prompt = new_prompt;
+ tp->rcl_lines = new_rcl_lines;
+ tp->rcl_read_index = 0;
+ tp->rcl_write_index = 0;
+ spin_unlock_irq(&tp->view.lock);
+ tty3270_free_screen(oscreen, old_allocated);
+ kfree(old_input);
+ kfree(old_prompt);
+ tty3270_free_recall(old_rcl_lines);
+ tty3270_set_timer(tp, 1);
+ /* Informat tty layer about new size */
+ tty = tty_port_tty_get(&tp->port);
+ if (!tty)
+ return;
+ ws.ws_row = tty3270_tty_rows(tp);
+ ws.ws_col = tp->view.cols;
+ tty_do_resize(tty, &ws);
+ tty_kref_put(tty);
+ return;
+out_screen:
+ tty3270_free_screen(screen, new_rows);
+out_prompt:
+ kfree(new_prompt);
+out_input:
+ kfree(new_input);
+}
+
+/*
+ * Unlink tty3270 data structure from tty.
+ */
+static void tty3270_release(struct raw3270_view *view)
+{
+ struct tty3270 *tp = container_of(view, struct tty3270, view);
+ struct tty_struct *tty = tty_port_tty_get(&tp->port);
+
+ if (tty) {
+ tty->driver_data = NULL;
+ tty_port_tty_set(&tp->port, NULL);
+ tty_hangup(tty);
+ raw3270_put_view(&tp->view);
+ tty_kref_put(tty);
+ }
+}
+
+/*
+ * Free tty3270 data structure
+ */
+static void tty3270_free(struct raw3270_view *view)
+{
+ struct tty3270 *tp = container_of(view, struct tty3270, view);
+
+ del_timer_sync(&tp->timer);
+ tty3270_free_screen(tp->screen, tp->allocated_lines);
+ free_page((unsigned long)tp->converted_line);
+ kfree(tp->input);
+ kfree(tp->prompt);
+ tty3270_free_view(tp);
+}
+
+/*
+ * Delayed freeing of tty3270 views.
+ */
+static void tty3270_del_views(void)
+{
+ int i;
+
+ for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) {
+ struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i);
+
+ if (!IS_ERR(view))
+ raw3270_del_view(view);
+ }
+}
+
+static struct raw3270_fn tty3270_fn = {
+ .activate = tty3270_activate,
+ .deactivate = tty3270_deactivate,
+ .intv = (void *)tty3270_irq,
+ .release = tty3270_release,
+ .free = tty3270_free,
+ .resize = tty3270_resize
};
-static inline void
-con3270_cline_add(struct con3270 *cp)
+static int
+tty3270_create_view(int index, struct tty3270 **newtp)
+{
+ struct tty3270 *tp;
+ int rc;
+
+ if (tty3270_max_index < index + 1)
+ tty3270_max_index = index + 1;
+
+ /* Allocate tty3270 structure on first open. */
+ tp = tty3270_alloc_view();
+ if (IS_ERR(tp))
+ return PTR_ERR(tp);
+
+ rc = raw3270_add_view(&tp->view, &tty3270_fn,
+ index + RAW3270_FIRSTMINOR,
+ RAW3270_VIEW_LOCK_IRQ);
+ if (rc)
+ goto out_free_view;
+
+ tp->screen = tty3270_alloc_screen(tp, tp->view.rows, tp->view.cols,
+ &tp->allocated_lines);
+ if (IS_ERR(tp->screen)) {
+ rc = PTR_ERR(tp->screen);
+ goto out_put_view;
+ }
+
+ tp->converted_line = (void *)__get_free_page(GFP_KERNEL);
+ if (!tp->converted_line) {
+ rc = -ENOMEM;
+ goto out_free_screen;
+ }
+
+ tp->input = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL | GFP_DMA);
+ if (!tp->input) {
+ rc = -ENOMEM;
+ goto out_free_converted_line;
+ }
+
+ tp->prompt = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL);
+ if (!tp->prompt) {
+ rc = -ENOMEM;
+ goto out_free_input;
+ }
+
+ tp->rcl_lines = tty3270_alloc_recall(tp->view.cols);
+ if (!tp->rcl_lines) {
+ rc = -ENOMEM;
+ goto out_free_prompt;
+ }
+
+ /* Create blank line for every line in the tty output area. */
+ tty3270_blank_screen(tp);
+
+ tp->kbd->port = &tp->port;
+ tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
+ tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
+ tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
+ tp->kbd->fn_handler[KVAL(K_CONS)] = tty3270_rcl_backward;
+ kbd_ascebc(tp->kbd, tp->view.ascebc);
+
+ raw3270_activate_view(&tp->view);
+ raw3270_put_view(&tp->view);
+ *newtp = tp;
+ return 0;
+
+out_free_prompt:
+ kfree(tp->prompt);
+out_free_input:
+ kfree(tp->input);
+out_free_converted_line:
+ free_page((unsigned long)tp->converted_line);
+out_free_screen:
+ tty3270_free_screen(tp->screen, tp->view.rows);
+out_put_view:
+ raw3270_put_view(&tp->view);
+ raw3270_del_view(&tp->view);
+out_free_view:
+ tty3270_free_view(tp);
+ return rc;
+}
+
+/*
+ * This routine is called whenever a 3270 tty is opened first time.
+ */
+static int
+tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct raw3270_view *view;
+ struct tty3270 *tp;
+ int rc;
+
+ /* Check if the tty3270 is already there. */
+ view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);
+ if (IS_ERR(view)) {
+ rc = tty3270_create_view(tty->index, &tp);
+ if (rc)
+ return rc;
+ } else {
+ tp = container_of(view, struct tty3270, view);
+ tty->driver_data = tp;
+ tp->inattr = TF_INPUT;
+ }
+
+ tty->winsize.ws_row = tty3270_tty_rows(tp);
+ tty->winsize.ws_col = tp->view.cols;
+ rc = tty_port_install(&tp->port, driver, tty);
+ if (rc) {
+ raw3270_put_view(&tp->view);
+ return rc;
+ }
+ tty->driver_data = tp;
+ return 0;
+}
+
+/*
+ * This routine is called whenever a 3270 tty is opened.
+ */
+static int tty3270_open(struct tty_struct *tty, struct file *filp)
+{
+ struct tty3270 *tp = tty->driver_data;
+ struct tty_port *port = &tp->port;
+
+ port->count++;
+ tty_port_tty_set(port, tty);
+ return 0;
+}
+
+/*
+ * This routine is called when the 3270 tty is closed. We wait
+ * for the remaining request to be completed. Then we clean up.
+ */
+static void tty3270_close(struct tty_struct *tty, struct file *filp)
{
- if (!list_empty(&cp->cline->list))
- /* Already added. */
+ struct tty3270 *tp = tty->driver_data;
+
+ if (tty->count > 1)
return;
- list_add_tail(&cp->cline->list, &cp->lines);
- cp->nr_lines++;
- con3270_rebuild_update(cp);
+ if (tp)
+ tty_port_tty_set(&tp->port, NULL);
+}
+
+static void tty3270_cleanup(struct tty_struct *tty)
+{
+ struct tty3270 *tp = tty->driver_data;
+
+ if (tp) {
+ tty->driver_data = NULL;
+ raw3270_put_view(&tp->view);
+ }
+}
+
+/*
+ * We always have room.
+ */
+static unsigned int tty3270_write_room(struct tty_struct *tty)
+{
+ return INT_MAX;
}
-static inline void
-con3270_cline_insert(struct con3270 *cp, unsigned char c)
+/*
+ * Insert character into the screen at the current position with the
+ * current color and highlight. This function does NOT do cursor movement.
+ */
+static void tty3270_put_character(struct tty3270 *tp, char ch)
{
- cp->cline->string[cp->cline->len++] =
- cp->view.ascebc[(c < ' ') ? ' ' : c];
- if (list_empty(&cp->cline->update)) {
- list_add_tail(&cp->cline->update, &cp->update);
- cp->update_flags |= CON_UPDATE_LIST;
+ struct tty3270_line *line;
+ struct tty3270_cell *cell;
+
+ line = tty3270_get_write_line(tp, tp->cy);
+ if (line->len <= tp->cx) {
+ while (line->len < tp->cx) {
+ cell = line->cells + line->len;
+ cell->character = ' ';
+ cell->attributes = tp->attributes;
+ line->len++;
+ }
+ line->len++;
}
+ cell = line->cells + tp->cx;
+ cell->character = ch;
+ cell->attributes = tp->attributes;
+ line->dirty = 1;
+}
+
+/*
+ * Do carriage return.
+ */
+static void tty3270_cr(struct tty3270 *tp)
+{
+ tp->cx = 0;
}
-static inline void
-con3270_cline_end(struct con3270 *cp)
+/*
+ * Do line feed.
+ */
+static void tty3270_lf(struct tty3270 *tp)
{
- struct string *s;
- unsigned int size;
+ struct tty3270_line *line;
+ int i;
- /* Copy cline. */
- size = (cp->cline->len < cp->view.cols - 5) ?
- cp->cline->len + 4 : cp->view.cols;
- s = con3270_alloc_string(cp, size);
- memcpy(s->string, cp->cline->string, cp->cline->len);
- if (cp->cline->len < cp->view.cols - 5) {
- s->string[s->len - 4] = TO_RA;
- s->string[s->len - 1] = 0;
+ if (tp->cy < tty3270_tty_rows(tp) - 1) {
+ tp->cy++;
} else {
- while (--size >= cp->cline->len)
- s->string[size] = cp->view.ascebc[' '];
+ tp->line_view_start = tty3270_line_increment(tp, tp->line_view_start, 1);
+ tp->line_write_start = tty3270_line_increment(tp, tp->line_write_start, 1);
+ for (i = 0; i < tty3270_tty_rows(tp); i++)
+ tty3270_get_view_line(tp, i)->dirty = 1;
+ }
+
+ line = tty3270_get_write_line(tp, tp->cy);
+ line->len = 0;
+ line->dirty = 1;
+}
+
+static void tty3270_ri(struct tty3270 *tp)
+{
+ if (tp->cy > 0)
+ tp->cy--;
+}
+
+static void tty3270_reset_cell(struct tty3270 *tp, struct tty3270_cell *cell)
+{
+ cell->character = ' ';
+ tty3270_reset_attributes(&cell->attributes);
+}
+
+/*
+ * Insert characters at current position.
+ */
+static void tty3270_insert_characters(struct tty3270 *tp, int n)
+{
+ struct tty3270_line *line;
+ int k;
+
+ line = tty3270_get_write_line(tp, tp->cy);
+ while (line->len < tp->cx)
+ tty3270_reset_cell(tp, &line->cells[line->len++]);
+ if (n > tp->view.cols - tp->cx)
+ n = tp->view.cols - tp->cx;
+ k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n);
+ while (k--)
+ line->cells[tp->cx + n + k] = line->cells[tp->cx + k];
+ line->len += n;
+ if (line->len > tp->view.cols)
+ line->len = tp->view.cols;
+ while (n-- > 0) {
+ line->cells[tp->cx + n].character = ' ';
+ line->cells[tp->cx + n].attributes = tp->attributes;
+ }
+}
+
+/*
+ * Delete characters at current position.
+ */
+static void tty3270_delete_characters(struct tty3270 *tp, int n)
+{
+ struct tty3270_line *line;
+ int i;
+
+ line = tty3270_get_write_line(tp, tp->cy);
+ if (line->len <= tp->cx)
+ return;
+ if (line->len - tp->cx <= n) {
+ line->len = tp->cx;
+ return;
+ }
+ for (i = tp->cx; i + n < line->len; i++)
+ line->cells[i] = line->cells[i + n];
+ line->len -= n;
+}
+
+/*
+ * Erase characters at current position.
+ */
+static void tty3270_erase_characters(struct tty3270 *tp, int n)
+{
+ struct tty3270_line *line;
+ struct tty3270_cell *cell;
+
+ line = tty3270_get_write_line(tp, tp->cy);
+ while (line->len > tp->cx && n-- > 0) {
+ cell = line->cells + tp->cx++;
+ tty3270_reset_cell(tp, cell);
+ }
+ tp->cx += n;
+ tp->cx = min_t(int, tp->cx, tp->view.cols - 1);
+}
+
+/*
+ * Erase line, 3 different cases:
+ * Esc [ 0 K Erase from current position to end of line inclusive
+ * Esc [ 1 K Erase from beginning of line to current position inclusive
+ * Esc [ 2 K Erase entire line (without moving cursor)
+ */
+static void tty3270_erase_line(struct tty3270 *tp, int mode)
+{
+ struct tty3270_line *line;
+ struct tty3270_cell *cell;
+ int i, start, end;
+
+ line = tty3270_get_write_line(tp, tp->cy);
+
+ switch (mode) {
+ case 0:
+ start = tp->cx;
+ end = tp->view.cols;
+ break;
+ case 1:
+ start = 0;
+ end = tp->cx;
+ break;
+ case 2:
+ start = 0;
+ end = tp->view.cols;
+ break;
+ default:
+ return;
+ }
+
+ for (i = start; i < end; i++) {
+ cell = line->cells + i;
+ tty3270_reset_cell(tp, cell);
+ cell->attributes.b_color = tp->attributes.b_color;
+ }
+
+ if (line->len <= end)
+ line->len = end;
+}
+
+/*
+ * Erase display, 3 different cases:
+ * Esc [ 0 J Erase from current position to bottom of screen inclusive
+ * Esc [ 1 J Erase from top of screen to current position inclusive
+ * Esc [ 2 J Erase entire screen (without moving the cursor)
+ */
+static void tty3270_erase_display(struct tty3270 *tp, int mode)
+{
+ struct tty3270_line *line;
+ int i, start, end;
+
+ switch (mode) {
+ case 0:
+ tty3270_erase_line(tp, 0);
+ start = tp->cy + 1;
+ end = tty3270_tty_rows(tp);
+ break;
+ case 1:
+ start = 0;
+ end = tp->cy;
+ tty3270_erase_line(tp, 1);
+ break;
+ case 2:
+ start = 0;
+ end = tty3270_tty_rows(tp);
+ break;
+ default:
+ return;
+ }
+ for (i = start; i < end; i++) {
+ line = tty3270_get_write_line(tp, i);
+ line->len = 0;
+ line->dirty = 1;
+ }
+}
+
+/*
+ * Set attributes found in an escape sequence.
+ * Esc [ <attr> ; <attr> ; ... m
+ */
+static void tty3270_set_attributes(struct tty3270 *tp)
+{
+ int i, attr;
+
+ for (i = 0; i <= tp->esc_npar; i++) {
+ attr = tp->esc_par[i];
+ switch (attr) {
+ case 0: /* Reset */
+ tty3270_reset_attributes(&tp->attributes);
+ break;
+ /* Highlight. */
+ case 4: /* Start underlining. */
+ tp->attributes.highlight = TTY3270_HIGHLIGHT_UNDERSCORE;
+ break;
+ case 5: /* Start blink. */
+ tp->attributes.highlight = TTY3270_HIGHLIGHT_BLINK;
+ break;
+ case 7: /* Start reverse. */
+ tp->attributes.highlight = TTY3270_HIGHLIGHT_REVERSE;
+ break;
+ case 24: /* End underlining */
+ tp->attributes.highlight &= ~TTY3270_HIGHLIGHT_UNDERSCORE;
+ break;
+ case 25: /* End blink. */
+ tp->attributes.highlight &= ~TTY3270_HIGHLIGHT_BLINK;
+ break;
+ case 27: /* End reverse. */
+ tp->attributes.highlight &= ~TTY3270_HIGHLIGHT_REVERSE;
+ break;
+ /* Foreground color. */
+ case 30: /* Black */
+ case 31: /* Red */
+ case 32: /* Green */
+ case 33: /* Yellow */
+ case 34: /* Blue */
+ case 35: /* Magenta */
+ case 36: /* Cyan */
+ case 37: /* White */
+ case 39: /* Black */
+ tp->attributes.f_color = attr - 30;
+ break;
+ /* Background color. */
+ case 40: /* Black */
+ case 41: /* Red */
+ case 42: /* Green */
+ case 43: /* Yellow */
+ case 44: /* Blue */
+ case 45: /* Magenta */
+ case 46: /* Cyan */
+ case 47: /* White */
+ case 49: /* Black */
+ tp->attributes.b_color = attr - 40;
+ break;
+ }
+ }
+}
+
+static inline int tty3270_getpar(struct tty3270 *tp, int ix)
+{
+ return (tp->esc_par[ix] > 0) ? tp->esc_par[ix] : 1;
+}
+
+static void tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
+{
+ struct tty3270_line *line;
+ struct tty3270_cell *cell;
+ int max_cx = max(0, cx);
+ int max_cy = max(0, cy);
+
+ tp->cx = min_t(int, tp->view.cols - 1, max_cx);
+ line = tty3270_get_write_line(tp, tp->cy);
+ while (line->len < tp->cx) {
+ cell = line->cells + line->len;
+ cell->character = ' ';
+ cell->attributes = tp->attributes;
+ line->len++;
+ }
+ tp->cy = min_t(int, tty3270_tty_rows(tp) - 1, max_cy);
+}
+
+/*
+ * Process escape sequences. Known sequences:
+ * Esc 7 Save Cursor Position
+ * Esc 8 Restore Cursor Position
+ * Esc [ Pn ; Pn ; .. m Set attributes
+ * Esc [ Pn ; Pn H Cursor Position
+ * Esc [ Pn ; Pn f Cursor Position
+ * Esc [ Pn A Cursor Up
+ * Esc [ Pn B Cursor Down
+ * Esc [ Pn C Cursor Forward
+ * Esc [ Pn D Cursor Backward
+ * Esc [ Pn G Cursor Horizontal Absolute
+ * Esc [ Pn X Erase Characters
+ * Esc [ Ps J Erase in Display
+ * Esc [ Ps K Erase in Line
+ * // FIXME: add all the new ones.
+ *
+ * Pn is a numeric parameter, a string of zero or more decimal digits.
+ * Ps is a selective parameter.
+ */
+static void tty3270_escape_sequence(struct tty3270 *tp, char ch)
+{
+ enum { ES_NORMAL, ES_ESC, ES_SQUARE, ES_PAREN, ES_GETPARS };
+
+ if (tp->esc_state == ES_NORMAL) {
+ if (ch == 0x1b)
+ /* Starting new escape sequence. */
+ tp->esc_state = ES_ESC;
+ return;
+ }
+ if (tp->esc_state == ES_ESC) {
+ tp->esc_state = ES_NORMAL;
+ switch (ch) {
+ case '[':
+ tp->esc_state = ES_SQUARE;
+ break;
+ case '(':
+ tp->esc_state = ES_PAREN;
+ break;
+ case 'E':
+ tty3270_cr(tp);
+ tty3270_lf(tp);
+ break;
+ case 'M':
+ tty3270_ri(tp);
+ break;
+ case 'D':
+ tty3270_lf(tp);
+ break;
+ case 'Z': /* Respond ID. */
+ kbd_puts_queue(&tp->port, "\033[?6c");
+ break;
+ case '7': /* Save cursor position. */
+ tp->saved_cx = tp->cx;
+ tp->saved_cy = tp->cy;
+ tp->saved_attributes = tp->attributes;
+ break;
+ case '8': /* Restore cursor position. */
+ tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
+ tp->attributes = tp->saved_attributes;
+ break;
+ case 'c': /* Reset terminal. */
+ tp->cx = 0;
+ tp->cy = 0;
+ tp->saved_cx = 0;
+ tp->saved_cy = 0;
+ tty3270_reset_attributes(&tp->attributes);
+ tty3270_reset_attributes(&tp->saved_attributes);
+ tty3270_erase_display(tp, 2);
+ break;
+ }
+ return;
+ }
+
+ switch (tp->esc_state) {
+ case ES_PAREN:
+ tp->esc_state = ES_NORMAL;
+ switch (ch) {
+ case 'B':
+ tp->attributes.alternate_charset = 0;
+ break;
+ case '0':
+ tp->attributes.alternate_charset = 1;
+ break;
+ }
+ return;
+ case ES_SQUARE:
+ tp->esc_state = ES_GETPARS;
+ memset(tp->esc_par, 0, sizeof(tp->esc_par));
+ tp->esc_npar = 0;
+ tp->esc_ques = (ch == '?');
+ if (tp->esc_ques)
+ return;
+ fallthrough;
+ case ES_GETPARS:
+ if (ch == ';' && tp->esc_npar < ESCAPE_NPAR - 1) {
+ tp->esc_npar++;
+ return;
+ }
+ if (ch >= '0' && ch <= '9') {
+ tp->esc_par[tp->esc_npar] *= 10;
+ tp->esc_par[tp->esc_npar] += ch - '0';
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ tp->esc_state = ES_NORMAL;
+ if (ch == 'n' && !tp->esc_ques) {
+ if (tp->esc_par[0] == 5) /* Status report. */
+ kbd_puts_queue(&tp->port, "\033[0n");
+ else if (tp->esc_par[0] == 6) { /* Cursor report. */
+ char buf[40];
+
+ sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
+ kbd_puts_queue(&tp->port, buf);
+ }
+ return;
+ }
+ if (tp->esc_ques)
+ return;
+ switch (ch) {
+ case 'm':
+ tty3270_set_attributes(tp);
+ break;
+ case 'H': /* Set cursor position. */
+ case 'f':
+ tty3270_goto_xy(tp, tty3270_getpar(tp, 1) - 1,
+ tty3270_getpar(tp, 0) - 1);
+ break;
+ case 'd': /* Set y position. */
+ tty3270_goto_xy(tp, tp->cx, tty3270_getpar(tp, 0) - 1);
+ break;
+ case 'A': /* Cursor up. */
+ case 'F':
+ tty3270_goto_xy(tp, tp->cx, tp->cy - tty3270_getpar(tp, 0));
+ break;
+ case 'B': /* Cursor down. */
+ case 'e':
+ case 'E':
+ tty3270_goto_xy(tp, tp->cx, tp->cy + tty3270_getpar(tp, 0));
+ break;
+ case 'C': /* Cursor forward. */
+ case 'a':
+ tty3270_goto_xy(tp, tp->cx + tty3270_getpar(tp, 0), tp->cy);
+ break;
+ case 'D': /* Cursor backward. */
+ tty3270_goto_xy(tp, tp->cx - tty3270_getpar(tp, 0), tp->cy);
+ break;
+ case 'G': /* Set x position. */
+ case '`':
+ tty3270_goto_xy(tp, tty3270_getpar(tp, 0), tp->cy);
+ break;
+ case 'X': /* Erase Characters. */
+ tty3270_erase_characters(tp, tty3270_getpar(tp, 0));
+ break;
+ case 'J': /* Erase display. */
+ tty3270_erase_display(tp, tp->esc_par[0]);
+ break;
+ case 'K': /* Erase line. */
+ tty3270_erase_line(tp, tp->esc_par[0]);
+ break;
+ case 'P': /* Delete characters. */
+ tty3270_delete_characters(tp, tty3270_getpar(tp, 0));
+ break;
+ case '@': /* Insert characters. */
+ tty3270_insert_characters(tp, tty3270_getpar(tp, 0));
+ break;
+ case 's': /* Save cursor position. */
+ tp->saved_cx = tp->cx;
+ tp->saved_cy = tp->cy;
+ tp->saved_attributes = tp->attributes;
+ break;
+ case 'u': /* Restore cursor position. */
+ tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
+ tp->attributes = tp->saved_attributes;
+ break;
+ }
+}
+
+/*
+ * String write routine for 3270 ttys
+ */
+static void tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ int i_msg, i;
+
+ spin_lock_irq(&tp->view.lock);
+ for (i_msg = 0; !tty->flow.stopped && i_msg < count; i_msg++) {
+ if (tp->esc_state != 0) {
+ /* Continue escape sequence. */
+ tty3270_escape_sequence(tp, buf[i_msg]);
+ continue;
+ }
+
+ switch (buf[i_msg]) {
+ case 0x00:
+ break;
+ case 0x07: /* '\a' -- Alarm */
+ tp->wcc |= TW_PLUSALARM;
+ break;
+ case 0x08: /* Backspace. */
+ if (tp->cx > 0) {
+ tp->cx--;
+ tty3270_put_character(tp, ' ');
+ }
+ break;
+ case 0x09: /* '\t' -- Tabulate */
+ for (i = tp->cx % 8; i < 8; i++) {
+ if (tp->cx >= tp->view.cols) {
+ tty3270_cr(tp);
+ tty3270_lf(tp);
+ break;
+ }
+ tty3270_put_character(tp, ' ');
+ tp->cx++;
+ }
+ break;
+ case 0x0a: /* '\n' -- New Line */
+ tty3270_cr(tp);
+ tty3270_lf(tp);
+ break;
+ case 0x0c: /* '\f' -- Form Feed */
+ tty3270_erase_display(tp, 2);
+ tp->cx = 0;
+ tp->cy = 0;
+ break;
+ case 0x0d: /* '\r' -- Carriage Return */
+ tp->cx = 0;
+ break;
+ case 0x0e:
+ tp->attributes.alternate_charset = 1;
+ break;
+ case 0x0f: /* SuSE "exit alternate mode" */
+ tp->attributes.alternate_charset = 0;
+ break;
+ case 0x1b: /* Start escape sequence. */
+ tty3270_escape_sequence(tp, buf[i_msg]);
+ break;
+ default: /* Insert normal character. */
+ if (tp->cx >= tp->view.cols) {
+ tty3270_cr(tp);
+ tty3270_lf(tp);
+ }
+ tty3270_put_character(tp, buf[i_msg]);
+ tp->cx++;
+ break;
+ }
+ }
+ /* Setup timer to update display after 1/10 second */
+ tp->update_flags |= TTY_UPDATE_LINES;
+ if (!timer_pending(&tp->timer))
+ tty3270_set_timer(tp, msecs_to_jiffies(100));
+
+ spin_unlock_irq(&tp->view.lock);
+}
+
+/*
+ * String write routine for 3270 ttys
+ */
+static int tty3270_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return 0;
+ if (tp->char_count > 0) {
+ tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
+ tp->char_count = 0;
+ }
+ tty3270_do_write(tp, tty, buf, count);
+ return count;
+}
+
+/*
+ * Put single characters to the ttys character buffer
+ */
+static int tty3270_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE)
+ return 0;
+ tp->char_buf[tp->char_count++] = ch;
+ return 1;
+}
+
+/*
+ * Flush all characters from the ttys characeter buffer put there
+ * by tty3270_put_char.
+ */
+static void tty3270_flush_chars(struct tty_struct *tty)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return;
+ if (tp->char_count > 0) {
+ tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
+ tp->char_count = 0;
}
- /* Replace cline with allocated line s and reset cline. */
- list_add(&s->list, &cp->cline->list);
- list_del_init(&cp->cline->list);
- if (!list_empty(&cp->cline->update)) {
- list_add(&s->update, &cp->cline->update);
- list_del_init(&cp->cline->update);
+}
+
+/*
+ * Check for visible/invisible input switches
+ */
+static void tty3270_set_termios(struct tty_struct *tty, const struct ktermios *old)
+{
+ struct tty3270 *tp;
+ int new;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return;
+ spin_lock_irq(&tp->view.lock);
+ if (L_ICANON(tty)) {
+ new = L_ECHO(tty) ? TF_INPUT : TF_INPUTN;
+ if (new != tp->inattr) {
+ tp->inattr = new;
+ tty3270_update_prompt(tp, "");
+ tty3270_set_timer(tp, 1);
+ }
}
- cp->cline->len = 0;
+ spin_unlock_irq(&tp->view.lock);
}
/*
- * Write a string to the 3270 console
+ * Disable reading from a 3270 tty
*/
+static void tty3270_throttle(struct tty_struct *tty)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return;
+ tp->throttle = 1;
+}
+
+/*
+ * Enable reading from a 3270 tty
+ */
+static void tty3270_unthrottle(struct tty_struct *tty)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return;
+ tp->throttle = 0;
+ if (tp->attn)
+ tty3270_issue_read(tp, 1);
+}
+
+/*
+ * Hang up the tty device.
+ */
+static void tty3270_hangup(struct tty_struct *tty)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return;
+ spin_lock_irq(&tp->view.lock);
+ tp->cx = 0;
+ tp->cy = 0;
+ tp->saved_cx = 0;
+ tp->saved_cy = 0;
+ tty3270_reset_attributes(&tp->attributes);
+ tty3270_reset_attributes(&tp->saved_attributes);
+ tty3270_blank_screen(tp);
+ tp->update_flags = TTY_UPDATE_ALL;
+ spin_unlock_irq(&tp->view.lock);
+ tty3270_set_timer(tp, 1);
+}
+
+static void tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+}
+
+static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return -ENODEV;
+ if (tty_io_error(tty))
+ return -EIO;
+ return kbd_ioctl(tp->kbd, cmd, arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long tty3270_compat_ioctl(struct tty_struct *tty,
+ unsigned int cmd, unsigned long arg)
+{
+ struct tty3270 *tp;
+
+ tp = tty->driver_data;
+ if (!tp)
+ return -ENODEV;
+ if (tty_io_error(tty))
+ return -EIO;
+ return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct tty_operations tty3270_ops = {
+ .install = tty3270_install,
+ .cleanup = tty3270_cleanup,
+ .open = tty3270_open,
+ .close = tty3270_close,
+ .write = tty3270_write,
+ .put_char = tty3270_put_char,
+ .flush_chars = tty3270_flush_chars,
+ .write_room = tty3270_write_room,
+ .throttle = tty3270_throttle,
+ .unthrottle = tty3270_unthrottle,
+ .hangup = tty3270_hangup,
+ .wait_until_sent = tty3270_wait_until_sent,
+ .ioctl = tty3270_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = tty3270_compat_ioctl,
+#endif
+ .set_termios = tty3270_set_termios
+};
+
+static void tty3270_create_cb(int minor)
+{
+ tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
+}
+
+static void tty3270_destroy_cb(int minor)
+{
+ tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
+}
+
+static struct raw3270_notifier tty3270_notifier = {
+ .create = tty3270_create_cb,
+ .destroy = tty3270_destroy_cb,
+};
+
+/*
+ * 3270 tty registration code called from tty_init().
+ * Most kernel services (incl. kmalloc) are available at this poimt.
+ */
+static int __init tty3270_init(void)
+{
+ struct tty_driver *driver;
+ int ret;
+
+ driver = tty_alloc_driver(RAW3270_MAXDEVS,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_RESET_TERMIOS);
+ if (IS_ERR(driver))
+ return PTR_ERR(driver);
+
+ /*
+ * Initialize the tty_driver structure
+ * Entries in tty3270_driver that are NOT initialized:
+ * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
+ */
+ driver->driver_name = "tty3270";
+ driver->name = "3270/tty";
+ driver->major = IBM_TTY3270_MAJOR;
+ driver->minor_start = RAW3270_FIRSTMINOR;
+ driver->name_base = RAW3270_FIRSTMINOR;
+ driver->type = TTY_DRIVER_TYPE_SYSTEM;
+ driver->subtype = SYSTEM_TYPE_TTY;
+ driver->init_termios = tty_std_termios;
+ tty_set_operations(driver, &tty3270_ops);
+ ret = tty_register_driver(driver);
+ if (ret) {
+ tty_driver_kref_put(driver);
+ return ret;
+ }
+ tty3270_driver = driver;
+ raw3270_register_notifier(&tty3270_notifier);
+ return 0;
+}
+
+static void __exit tty3270_exit(void)
+{
+ struct tty_driver *driver;
+
+ raw3270_unregister_notifier(&tty3270_notifier);
+ driver = tty3270_driver;
+ tty3270_driver = NULL;
+ tty_unregister_driver(driver);
+ tty_driver_kref_put(driver);
+ tty3270_del_views();
+}
+
+#if IS_ENABLED(CONFIG_TN3270_CONSOLE)
+
+static struct tty3270 *condev;
+
static void
con3270_write(struct console *co, const char *str, unsigned int count)
{
- struct con3270 *cp;
+ struct tty3270 *tp = co->data;
unsigned long flags;
- unsigned char c;
+ char c;
- cp = condev;
- spin_lock_irqsave(&cp->view.lock, flags);
- while (count-- > 0) {
+ spin_lock_irqsave(&tp->view.lock, flags);
+ while (count--) {
c = *str++;
- if (cp->cline->len == 0)
- con3270_cline_add(cp);
- if (c != '\n')
- con3270_cline_insert(cp, c);
- if (c == '\n' || cp->cline->len >= cp->view.cols)
- con3270_cline_end(cp);
+ if (c == 0x0a) {
+ tty3270_cr(tp);
+ tty3270_lf(tp);
+ } else {
+ if (tp->cx >= tp->view.cols) {
+ tty3270_cr(tp);
+ tty3270_lf(tp);
+ }
+ tty3270_put_character(tp, c);
+ tp->cx++;
+ }
}
- /* Setup timer to output current console buffer after 1/10 second */
- cp->nr_up = 0;
- if (cp->view.dev && !timer_pending(&cp->timer))
- con3270_set_timer(cp, HZ/10);
- spin_unlock_irqrestore(&cp->view.lock,flags);
+ spin_unlock_irqrestore(&tp->view.lock, flags);
}
static struct tty_driver *
@@ -522,14 +2079,11 @@ con3270_device(struct console *c, int *index)
return tty3270_driver;
}
-/*
- * Wait for end of write request.
- */
static void
-con3270_wait_write(struct con3270 *cp)
+con3270_wait_write(struct tty3270 *tp)
{
- while (!cp->write) {
- raw3270_wait_cons_dev(cp->view.dev);
+ while (!tp->write) {
+ raw3270_wait_cons_dev(tp->view.dev);
barrier();
}
}
@@ -545,28 +2099,30 @@ con3270_wait_write(struct con3270 *cp)
static int con3270_notify(struct notifier_block *self,
unsigned long event, void *data)
{
- struct con3270 *cp;
+ struct tty3270 *tp;
unsigned long flags;
+ int rc;
- cp = condev;
- if (!cp->view.dev)
+ tp = condev;
+ if (!tp->view.dev)
return NOTIFY_DONE;
- if (!raw3270_view_lock_unavailable(&cp->view))
- raw3270_activate_view(&cp->view);
- if (!spin_trylock_irqsave(&cp->view.lock, flags))
+ if (!raw3270_view_lock_unavailable(&tp->view)) {
+ rc = raw3270_activate_view(&tp->view);
+ if (rc)
+ return NOTIFY_DONE;
+ }
+ if (!spin_trylock_irqsave(&tp->view.lock, flags))
return NOTIFY_DONE;
- con3270_wait_write(cp);
- cp->nr_up = 0;
- con3270_rebuild_update(cp);
- con3270_update_status(cp);
- while (cp->update_flags != 0) {
- spin_unlock_irqrestore(&cp->view.lock, flags);
- con3270_update(&cp->timer);
- spin_lock_irqsave(&cp->view.lock, flags);
- con3270_wait_write(cp);
- }
- spin_unlock_irqrestore(&cp->view.lock, flags);
-
+ con3270_wait_write(tp);
+ tp->nr_up = 0;
+ tp->update_flags = TTY_UPDATE_ALL;
+ while (tp->update_flags != 0) {
+ spin_unlock_irqrestore(&tp->view.lock, flags);
+ tty3270_update(&tp->timer);
+ spin_lock_irqsave(&tp->view.lock, flags);
+ con3270_wait_write(tp);
+ }
+ spin_unlock_irqrestore(&tp->view.lock, flags);
return NOTIFY_DONE;
}
@@ -580,9 +2136,6 @@ static struct notifier_block on_reboot_nb = {
.priority = INT_MIN + 1, /* run the callback late */
};
-/*
- * The console structure for the 3270 console
- */
static struct console con3270 = {
.name = "tty3270",
.write = con3270_write,
@@ -590,15 +2143,13 @@ static struct console con3270 = {
.flags = CON_PRINTBUFFER,
};
-/*
- * 3270 console initialization code called from console_init().
- */
static int __init
con3270_init(void)
{
+ struct raw3270_view *view;
struct raw3270 *rp;
- void *cbuf;
- int i;
+ struct tty3270 *tp;
+ int rc;
/* Check if 3270 is to be the console */
if (!CONSOLE_IS_3270)
@@ -614,38 +2165,28 @@ con3270_init(void)
if (IS_ERR(rp))
return PTR_ERR(rp);
- condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
- if (!condev)
- return -ENOMEM;
- condev->view.dev = rp;
-
- condev->read = raw3270_request_alloc(0);
- condev->read->callback = con3270_read_callback;
- condev->read->callback_data = condev;
- condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
- condev->kreset = raw3270_request_alloc(1);
-
- INIT_LIST_HEAD(&condev->lines);
- INIT_LIST_HEAD(&condev->update);
- timer_setup(&condev->timer, con3270_update, 0);
- tasklet_init(&condev->readlet, con3270_read_tasklet,
- (unsigned long) condev->read);
-
- raw3270_add_view(&condev->view, &con3270_fn, 1, RAW3270_VIEW_LOCK_IRQ);
-
- INIT_LIST_HEAD(&condev->freemem);
- for (i = 0; i < CON3270_STRING_PAGES; i++) {
- cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
- add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
- }
- condev->cline = alloc_string(&condev->freemem, condev->view.cols);
- condev->cline->len = 0;
- con3270_create_status(condev);
- condev->input = alloc_string(&condev->freemem, 80);
+ /* Check if the tty3270 is already there. */
+ view = raw3270_find_view(&tty3270_fn, RAW3270_FIRSTMINOR);
+ if (IS_ERR(view)) {
+ rc = tty3270_create_view(0, &tp);
+ if (rc)
+ return rc;
+ } else {
+ tp = container_of(view, struct tty3270, view);
+ tp->inattr = TF_INPUT;
+ }
+ con3270.data = tp;
+ condev = tp;
atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
register_reboot_notifier(&on_reboot_nb);
register_console(&con3270);
return 0;
}
-
console_initcall(con3270_init);
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR);
+
+module_init(tty3270_init);
+module_exit(tty3270_exit);
diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c
index 36bbd6b6e210..65c7f2d565d8 100644
--- a/drivers/s390/char/diag_ftp.c
+++ b/drivers/s390/char/diag_ftp.c
@@ -159,8 +159,8 @@ ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize)
goto out;
}
- len = strlcpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident));
- if (len >= HMCDRV_FTP_FIDENT_MAX) {
+ len = strscpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident));
+ if (len < 0) {
len = -EINVAL;
goto out_free;
}
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 4c4683d8784a..4f26b0a55620 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include <uapi/asm/fs3270.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
@@ -44,14 +45,12 @@ struct fs3270 {
static DEFINE_MUTEX(fs3270_mutex);
-static void
-fs3270_wake_up(struct raw3270_request *rq, void *data)
+static void fs3270_wake_up(struct raw3270_request *rq, void *data)
{
- wake_up((wait_queue_head_t *) data);
+ wake_up((wait_queue_head_t *)data);
}
-static inline int
-fs3270_working(struct fs3270 *fp)
+static inline int fs3270_working(struct fs3270 *fp)
{
/*
* The fullscreen view is in working order if the view
@@ -60,13 +59,12 @@ fs3270_working(struct fs3270 *fp)
return fp->active && raw3270_request_final(fp->init);
}
-static int
-fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
+static int fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
{
struct fs3270 *fp;
int rc;
- fp = (struct fs3270 *) view;
+ fp = (struct fs3270 *)view;
rq->callback = fs3270_wake_up;
rq->callback_data = &fp->wait;
@@ -90,22 +88,20 @@ fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq)
/*
* Switch to the fullscreen view.
*/
-static void
-fs3270_reset_callback(struct raw3270_request *rq, void *data)
+static void fs3270_reset_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
- fp = (struct fs3270 *) rq->view;
+ fp = (struct fs3270 *)rq->view;
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
-static void
-fs3270_restore_callback(struct raw3270_request *rq, void *data)
+static void fs3270_restore_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
- fp = (struct fs3270 *) rq->view;
+ fp = (struct fs3270 *)rq->view;
if (rq->rc != 0 || rq->rescnt != 0) {
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
@@ -115,29 +111,31 @@ fs3270_restore_callback(struct raw3270_request *rq, void *data)
wake_up(&fp->wait);
}
-static int
-fs3270_activate(struct raw3270_view *view)
+static int fs3270_activate(struct raw3270_view *view)
{
struct fs3270 *fp;
char *cp;
int rc;
- fp = (struct fs3270 *) view;
+ fp = (struct fs3270 *)view;
/* If an old init command is still running just return. */
if (!raw3270_request_final(fp->init))
return 0;
+ raw3270_request_set_cmd(fp->init, TC_EWRITEA);
+ raw3270_request_set_idal(fp->init, fp->rdbuf);
+ fp->init->rescnt = 0;
+ cp = fp->rdbuf->data[0];
if (fp->rdbuf_size == 0) {
/* No saved buffer. Just clear the screen. */
- raw3270_request_set_cmd(fp->init, TC_EWRITEA);
+ fp->init->ccw.count = 1;
fp->init->callback = fs3270_reset_callback;
+ cp[0] = 0;
} else {
/* Restore fullscreen buffer saved by fs3270_deactivate. */
- raw3270_request_set_cmd(fp->init, TC_EWRITEA);
- raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->ccw.count = fp->rdbuf_size;
- cp = fp->rdbuf->data[0];
+ fp->init->callback = fs3270_restore_callback;
cp[0] = TW_KR;
cp[1] = TO_SBA;
cp[2] = cp[6];
@@ -146,10 +144,9 @@ fs3270_activate(struct raw3270_view *view)
cp[5] = TO_SBA;
cp[6] = 0x40;
cp[7] = 0x40;
- fp->init->rescnt = 0;
- fp->init->callback = fs3270_restore_callback;
}
- rc = fp->init->rc = raw3270_start_locked(view, fp->init);
+ rc = raw3270_start_locked(view, fp->init);
+ fp->init->rc = rc;
if (rc)
fp->init->callback(fp->init, NULL);
else
@@ -160,12 +157,11 @@ fs3270_activate(struct raw3270_view *view)
/*
* Shutdown fullscreen view.
*/
-static void
-fs3270_save_callback(struct raw3270_request *rq, void *data)
+static void fs3270_save_callback(struct raw3270_request *rq, void *data)
{
struct fs3270 *fp;
- fp = (struct fs3270 *) rq->view;
+ fp = (struct fs3270 *)rq->view;
/* Correct idal buffer element 0 address. */
fp->rdbuf->data[0] -= 5;
@@ -181,18 +177,18 @@ fs3270_save_callback(struct raw3270_request *rq, void *data)
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
fp->rdbuf_size = 0;
- } else
+ } else {
fp->rdbuf_size = fp->rdbuf->size - rq->rescnt;
+ }
raw3270_request_reset(rq);
wake_up(&fp->wait);
}
-static void
-fs3270_deactivate(struct raw3270_view *view)
+static void fs3270_deactivate(struct raw3270_view *view)
{
struct fs3270 *fp;
- fp = (struct fs3270 *) view;
+ fp = (struct fs3270 *)view;
fp->active = 0;
/* If an old init command is still running just return. */
@@ -218,8 +214,8 @@ fs3270_deactivate(struct raw3270_view *view)
fp->init->callback(fp->init, NULL);
}
-static void
-fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
+static void fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq,
+ struct irb *irb)
{
/* Handle ATTN. Set indication and wake waiters for attention. */
if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
@@ -239,14 +235,14 @@ fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb)
/*
* Process reads from fullscreen 3270.
*/
-static ssize_t
-fs3270_read(struct file *filp, char __user *data, size_t count, loff_t *off)
+static ssize_t fs3270_read(struct file *filp, char __user *data,
+ size_t count, loff_t *off)
{
struct fs3270 *fp;
struct raw3270_request *rq;
struct idal_buffer *ib;
ssize_t rc;
-
+
if (count == 0 || count > 65535)
return -EINVAL;
fp = filp->private_data;
@@ -271,12 +267,12 @@ fs3270_read(struct file *filp, char __user *data, size_t count, loff_t *off)
rc = -EFAULT;
else
rc = count;
-
}
}
raw3270_request_free(rq);
- } else
+ } else {
rc = PTR_ERR(rq);
+ }
idal_buffer_free(ib);
return rc;
}
@@ -284,8 +280,8 @@ fs3270_read(struct file *filp, char __user *data, size_t count, loff_t *off)
/*
* Process writes to fullscreen 3270.
*/
-static ssize_t
-fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *off)
+static ssize_t fs3270_write(struct file *filp, const char __user *data,
+ size_t count, loff_t *off)
{
struct fs3270 *fp;
struct raw3270_request *rq;
@@ -310,11 +306,13 @@ fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *o
rc = fs3270_do_io(&fp->view, rq);
if (rc == 0)
rc = count - rq->rescnt;
- } else
+ } else {
rc = -EFAULT;
+ }
raw3270_request_free(rq);
- } else
+ } else {
rc = PTR_ERR(rq);
+ }
idal_buffer_free(ib);
return rc;
}
@@ -322,8 +320,7 @@ fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *o
/*
* process ioctl commands for the tube driver
*/
-static long
-fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+static long fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
char __user *argp;
struct fs3270 *fp;
@@ -370,12 +367,11 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*
* Allocate fs3270 structure.
*/
-static struct fs3270 *
-fs3270_alloc_view(void)
+static struct fs3270 *fs3270_alloc_view(void)
{
struct fs3270 *fp;
- fp = kzalloc(sizeof(struct fs3270),GFP_KERNEL);
+ fp = kzalloc(sizeof(*fp), GFP_KERNEL);
if (!fp)
return ERR_PTR(-ENOMEM);
fp->init = raw3270_request_alloc(0);
@@ -389,27 +385,25 @@ fs3270_alloc_view(void)
/*
* Free fs3270 structure.
*/
-static void
-fs3270_free_view(struct raw3270_view *view)
+static void fs3270_free_view(struct raw3270_view *view)
{
struct fs3270 *fp;
- fp = (struct fs3270 *) view;
+ fp = (struct fs3270 *)view;
if (fp->rdbuf)
idal_buffer_free(fp->rdbuf);
- raw3270_request_free(((struct fs3270 *) view)->init);
+ raw3270_request_free(((struct fs3270 *)view)->init);
kfree(view);
}
/*
* Unlink fs3270 data structure from filp.
*/
-static void
-fs3270_release(struct raw3270_view *view)
+static void fs3270_release(struct raw3270_view *view)
{
struct fs3270 *fp;
- fp = (struct fs3270 *) view;
+ fp = (struct fs3270 *)view;
if (fp->fs_pid)
kill_pid(fp->fs_pid, SIGHUP, 1);
}
@@ -418,7 +412,7 @@ fs3270_release(struct raw3270_view *view)
static struct raw3270_fn fs3270_fn = {
.activate = fs3270_activate,
.deactivate = fs3270_deactivate,
- .intv = (void *) fs3270_irq,
+ .intv = (void *)fs3270_irq,
.release = fs3270_release,
.free = fs3270_free_view
};
@@ -426,8 +420,7 @@ static struct raw3270_fn fs3270_fn = {
/*
* This routine is called whenever a 3270 fullscreen device is opened.
*/
-static int
-fs3270_open(struct inode *inode, struct file *filp)
+static int fs3270_open(struct inode *inode, struct file *filp)
{
struct fs3270 *fp;
struct idal_buffer *ib;
@@ -439,6 +432,7 @@ fs3270_open(struct inode *inode, struct file *filp)
/* Check for minor 0 multiplexer. */
if (minor == 0) {
struct tty_struct *tty = get_current_tty();
+
if (!tty || tty->driver->major != IBM_TTY3270_MAJOR) {
tty_kref_put(tty);
return -ENODEV;
@@ -448,7 +442,7 @@ fs3270_open(struct inode *inode, struct file *filp)
}
mutex_lock(&fs3270_mutex);
/* Check if some other program is already using fullscreen mode. */
- fp = (struct fs3270 *) raw3270_find_view(&fs3270_fn, minor);
+ fp = (struct fs3270 *)raw3270_find_view(&fs3270_fn, minor);
if (!IS_ERR(fp)) {
raw3270_put_view(&fp->view);
rc = -EBUSY;
@@ -471,7 +465,7 @@ fs3270_open(struct inode *inode, struct file *filp)
}
/* Allocate idal-buffer. */
- ib = idal_buffer_alloc(2*fp->view.rows*fp->view.cols + 5, 0);
+ ib = idal_buffer_alloc(2 * fp->view.rows * fp->view.cols + 5, 0);
if (IS_ERR(ib)) {
raw3270_put_view(&fp->view);
raw3270_del_view(&fp->view);
@@ -497,8 +491,7 @@ out:
* This routine is called when the 3270 tty is closed. We wait
* for the remaining request to be completed. Then we clean up.
*/
-static int
-fs3270_close(struct inode *inode, struct file *filp)
+static int fs3270_close(struct inode *inode, struct file *filp)
{
struct fs3270 *fp;
@@ -538,8 +531,7 @@ static void fs3270_destroy_cb(int minor)
__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
}
-static struct raw3270_notifier fs3270_notifier =
-{
+static struct raw3270_notifier fs3270_notifier = {
.create = fs3270_create_cb,
.destroy = fs3270_destroy_cb,
};
@@ -547,8 +539,7 @@ static struct raw3270_notifier fs3270_notifier =
/*
* 3270 fullscreen driver initialization.
*/
-static int __init
-fs3270_init(void)
+static int __init fs3270_init(void)
{
int rc;
@@ -561,8 +552,7 @@ fs3270_init(void)
return 0;
}
-static void __exit
-fs3270_exit(void)
+static void __exit fs3270_exit(void)
{
raw3270_unregister_notifier(&fs3270_notifier);
device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index fb3f62ac8be4..09d7570d3b7d 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -30,6 +30,7 @@
#include <linux/mutex.h>
struct class *class3270;
+EXPORT_SYMBOL(class3270);
/* The main 3270 data structure. */
struct raw3270 {
@@ -37,7 +38,8 @@ struct raw3270 {
struct ccw_device *cdev;
int minor;
- short model, rows, cols;
+ int model, rows, cols;
+ int old_model, old_rows, old_cols;
unsigned int state;
unsigned long flags;
@@ -54,6 +56,7 @@ struct raw3270 {
struct raw3270_request init_readpart;
struct raw3270_request init_readmod;
unsigned char init_data[256];
+ struct work_struct resize_work;
};
/* raw3270->state */
@@ -89,6 +92,7 @@ module_param(tubxcorrect, bool, 0);
* Wait queue for device init/delete, view delete.
*/
DECLARE_WAIT_QUEUE_HEAD(raw3270_wait_queue);
+EXPORT_SYMBOL(raw3270_wait_queue);
static void __raw3270_disconnect(struct raw3270 *rp);
@@ -111,9 +115,15 @@ static inline int raw3270_state_ready(struct raw3270 *rp)
return rp->state == RAW3270_STATE_READY;
}
-void
-raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
+void raw3270_buffer_address(struct raw3270 *rp, char *cp, int x, int y)
{
+ int addr;
+
+ if (x < 0)
+ x = max_t(int, 0, rp->view->cols + x);
+ if (y < 0)
+ y = max_t(int, 0, rp->view->rows + y);
+ addr = (y * rp->view->cols) + x;
if (test_bit(RAW3270_FLAGS_14BITADDR, &rp->flags)) {
cp[0] = (addr >> 8) & 0x3f;
cp[1] = addr & 0xff;
@@ -122,17 +132,17 @@ raw3270_buffer_address(struct raw3270 *rp, char *cp, unsigned short addr)
cp[1] = raw3270_ebcgraf[addr & 0x3f];
}
}
+EXPORT_SYMBOL(raw3270_buffer_address);
/*
* Allocate a new 3270 ccw request
*/
-struct raw3270_request *
-raw3270_request_alloc(size_t size)
+struct raw3270_request *raw3270_request_alloc(size_t size)
{
struct raw3270_request *rq;
/* Allocate request structure */
- rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA);
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL | GFP_DMA);
if (!rq)
return ERR_PTR(-ENOMEM);
@@ -155,46 +165,48 @@ raw3270_request_alloc(size_t size)
return rq;
}
+EXPORT_SYMBOL(raw3270_request_alloc);
/*
* Free 3270 ccw request
*/
-void
-raw3270_request_free (struct raw3270_request *rq)
+void raw3270_request_free(struct raw3270_request *rq)
{
kfree(rq->buffer);
kfree(rq);
}
+EXPORT_SYMBOL(raw3270_request_free);
/*
* Reset request to initial state.
*/
-void
-raw3270_request_reset(struct raw3270_request *rq)
+int raw3270_request_reset(struct raw3270_request *rq)
{
- BUG_ON(!list_empty(&rq->list));
+ if (WARN_ON_ONCE(!list_empty(&rq->list)))
+ return -EBUSY;
rq->ccw.cmd_code = 0;
rq->ccw.count = 0;
rq->ccw.cda = __pa(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
rq->rescnt = 0;
rq->rc = 0;
+ return 0;
}
+EXPORT_SYMBOL(raw3270_request_reset);
/*
* Set command code to ccw of a request.
*/
-void
-raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
+void raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd)
{
rq->ccw.cmd_code = cmd;
}
+EXPORT_SYMBOL(raw3270_request_set_cmd);
/*
* Add data fragment to output buffer.
*/
-int
-raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
+int raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
{
if (size + rq->ccw.count > rq->size)
return -E2BIG;
@@ -202,35 +214,35 @@ raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size)
rq->ccw.count += size;
return 0;
}
+EXPORT_SYMBOL(raw3270_request_add_data);
/*
* Set address/length pair to ccw of a request.
*/
-void
-raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
+void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
{
rq->ccw.cda = __pa(data);
rq->ccw.count = size;
}
+EXPORT_SYMBOL(raw3270_request_set_data);
/*
* Set idal buffer to ccw of a request.
*/
-void
-raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
+void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
{
rq->ccw.cda = __pa(ib->data);
rq->ccw.count = ib->size;
rq->ccw.flags |= CCW_FLAG_IDA;
}
+EXPORT_SYMBOL(raw3270_request_set_idal);
/*
* Add the request to the request queue, try to start it if the
* 3270 device is idle. Return without waiting for end of i/o.
*/
-static int
-__raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
- struct raw3270_request *rq)
+static int __raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
+ struct raw3270_request *rq)
{
rq->view = view;
raw3270_get_view(view);
@@ -238,7 +250,7 @@ __raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
!test_bit(RAW3270_FLAGS_BUSY, &rp->flags)) {
/* No other requests are on the queue. Start this one. */
rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
- (unsigned long) rq, 0, 0);
+ (unsigned long)rq, 0, 0);
if (rq->rc) {
raw3270_put_view(view);
return rq->rc;
@@ -248,16 +260,14 @@ __raw3270_start(struct raw3270 *rp, struct raw3270_view *view,
return 0;
}
-int
-raw3270_view_active(struct raw3270_view *view)
+int raw3270_view_active(struct raw3270_view *view)
{
struct raw3270 *rp = view->dev;
return rp && rp->view == view;
}
-int
-raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
+int raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
{
unsigned long flags;
struct raw3270 *rp;
@@ -274,9 +284,25 @@ raw3270_start(struct raw3270_view *view, struct raw3270_request *rq)
spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
return rc;
}
+EXPORT_SYMBOL(raw3270_start);
-int
-raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
+int raw3270_start_request(struct raw3270_view *view, struct raw3270_request *rq,
+ int cmd, void *data, size_t len)
+{
+ int rc;
+
+ rc = raw3270_request_reset(rq);
+ if (rc)
+ return rc;
+ raw3270_request_set_cmd(rq, cmd);
+ rc = raw3270_request_add_data(rq, data, len);
+ if (rc)
+ return rc;
+ return raw3270_start(view, rq);
+}
+EXPORT_SYMBOL(raw3270_start_request);
+
+int raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
{
struct raw3270 *rp;
int rc;
@@ -290,9 +316,9 @@ raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq)
rc = __raw3270_start(rp, view, rq);
return rc;
}
+EXPORT_SYMBOL(raw3270_start_locked);
-int
-raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
+int raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
{
struct raw3270 *rp;
@@ -302,12 +328,12 @@ raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq)
list_add_tail(&rq->list, &rp->req_queue);
return 0;
}
+EXPORT_SYMBOL(raw3270_start_irq);
/*
* 3270 interrupt routine, called from the ccw_device layer
*/
-static void
-raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
+static void raw3270_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
{
struct raw3270 *rp;
struct raw3270_view *view;
@@ -316,7 +342,7 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
rp = dev_get_drvdata(&cdev->dev);
if (!rp)
return;
- rq = (struct raw3270_request *) intparm;
+ rq = (struct raw3270_request *)intparm;
view = rq ? rq->view : rp->view;
if (!IS_ERR(irb)) {
@@ -357,9 +383,9 @@ raw3270_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
* started successful.
*/
while (!list_empty(&rp->req_queue)) {
- rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
+ rq = list_entry(rp->req_queue.next, struct raw3270_request, list);
rq->rc = ccw_device_start(rp->cdev, &rq->ccw,
- (unsigned long) rq, 0, 0);
+ (unsigned long)rq, 0, 0);
if (rq->rc == 0)
break;
/* Start failed. Remove request and do callback. */
@@ -399,7 +425,7 @@ struct raw3270_ua { /* Query Reply structure for Usable Area */
char ymin;
char xmax;
char ymax;
- } __attribute__ ((packed)) uab;
+ } __packed uab;
struct { /* Alternate Usable Area Self-Defining Parameter */
char l; /* Length of this Self-Defining Parm */
char sdpid; /* 0x02 if Alternate Usable Area */
@@ -412,17 +438,27 @@ struct raw3270_ua { /* Query Reply structure for Usable Area */
int auayr;
char awauai;
char ahauai;
- } __attribute__ ((packed)) aua;
-} __attribute__ ((packed));
+ } __packed aua;
+} __packed;
-static void
-raw3270_size_device_vm(struct raw3270 *rp)
+static void raw3270_size_device_vm(struct raw3270 *rp)
{
int rc, model;
struct ccw_dev_id dev_id;
struct diag210 diag_data;
+ struct diag8c diag8c_data;
ccw_device_get_id(rp->cdev, &dev_id);
+ rc = diag8c(&diag8c_data, &dev_id);
+ if (!rc) {
+ rp->model = 2;
+ rp->rows = diag8c_data.height;
+ rp->cols = diag8c_data.width;
+ if (diag8c_data.flags & 1)
+ set_bit(RAW3270_FLAGS_14BITADDR, &rp->flags);
+ return;
+ }
+
diag_data.vrdcdvno = dev_id.devno;
diag_data.vrdclen = sizeof(struct diag210);
rc = diag210(&diag_data);
@@ -454,16 +490,14 @@ raw3270_size_device_vm(struct raw3270 *rp)
}
}
-static void
-raw3270_size_device(struct raw3270 *rp)
+static void raw3270_size_device(struct raw3270 *rp, char *init_data)
{
struct raw3270_ua *uap;
/* Got a Query Reply */
- uap = (struct raw3270_ua *) (rp->init_data + 1);
+ uap = (struct raw3270_ua *)(init_data + 1);
/* Paranoia check. */
- if (rp->init_readmod.rc || rp->init_data[0] != 0x88 ||
- uap->uab.qcode != 0x81) {
+ if (init_data[0] != 0x88 || uap->uab.qcode != 0x81) {
/* Couldn't detect size. Use default model 2. */
rp->model = 2;
rp->rows = 24;
@@ -494,17 +528,20 @@ raw3270_size_device(struct raw3270 *rp)
rp->model = 5;
}
-static void
-raw3270_size_device_done(struct raw3270 *rp)
+static void raw3270_resize_work(struct work_struct *work)
{
+ struct raw3270 *rp = container_of(work, struct raw3270, resize_work);
struct raw3270_view *view;
- rp->view = NULL;
- rp->state = RAW3270_STATE_READY;
/* Notify views about new size */
- list_for_each_entry(view, &rp->view_list, list)
+ list_for_each_entry(view, &rp->view_list, list) {
if (view->fn->resize)
- view->fn->resize(view, rp->model, rp->rows, rp->cols);
+ view->fn->resize(view, rp->model, rp->rows, rp->cols,
+ rp->old_model, rp->old_rows, rp->old_cols);
+ }
+ rp->old_cols = rp->cols;
+ rp->old_rows = rp->rows;
+ rp->old_model = rp->model;
/* Setup processing done, now activate a view */
list_for_each_entry(view, &rp->view_list, list) {
rp->view = view;
@@ -514,17 +551,23 @@ raw3270_size_device_done(struct raw3270 *rp)
}
}
-static void
-raw3270_read_modified_cb(struct raw3270_request *rq, void *data)
+static void raw3270_size_device_done(struct raw3270 *rp)
+{
+ rp->view = NULL;
+ rp->state = RAW3270_STATE_READY;
+ schedule_work(&rp->resize_work);
+}
+
+void raw3270_read_modified_cb(struct raw3270_request *rq, void *data)
{
struct raw3270 *rp = rq->view->dev;
- raw3270_size_device(rp);
+ raw3270_size_device(rp, data);
raw3270_size_device_done(rp);
}
+EXPORT_SYMBOL(raw3270_read_modified_cb);
-static void
-raw3270_read_modified(struct raw3270 *rp)
+static void raw3270_read_modified(struct raw3270 *rp)
{
if (rp->state != RAW3270_STATE_W4ATTN)
return;
@@ -534,17 +577,18 @@ raw3270_read_modified(struct raw3270 *rp)
rp->init_readmod.ccw.cmd_code = TC_READMOD;
rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
rp->init_readmod.ccw.count = sizeof(rp->init_data);
- rp->init_readmod.ccw.cda = (__u32) __pa(rp->init_data);
+ rp->init_readmod.ccw.cda = (__u32)__pa(rp->init_data);
rp->init_readmod.callback = raw3270_read_modified_cb;
+ rp->init_readmod.callback_data = rp->init_data;
rp->state = RAW3270_STATE_READMOD;
raw3270_start_irq(&rp->init_view, &rp->init_readmod);
}
-static void
-raw3270_writesf_readpart(struct raw3270 *rp)
+static void raw3270_writesf_readpart(struct raw3270 *rp)
{
- static const unsigned char wbuf[] =
- { 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81 };
+ static const unsigned char wbuf[] = {
+ 0x00, 0x07, 0x01, 0xff, 0x03, 0x00, 0x81
+ };
/* Store 'read partition' data stream to init_data */
memset(&rp->init_readpart, 0, sizeof(rp->init_readpart));
@@ -553,7 +597,7 @@ raw3270_writesf_readpart(struct raw3270 *rp)
rp->init_readpart.ccw.cmd_code = TC_WRITESF;
rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
rp->init_readpart.ccw.count = sizeof(wbuf);
- rp->init_readpart.ccw.cda = (__u32) __pa(&rp->init_data);
+ rp->init_readpart.ccw.cda = (__u32)__pa(&rp->init_data);
rp->state = RAW3270_STATE_W4ATTN;
raw3270_start_irq(&rp->init_view, &rp->init_readpart);
}
@@ -561,8 +605,7 @@ raw3270_writesf_readpart(struct raw3270 *rp)
/*
* Device reset
*/
-static void
-raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
+static void raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
{
struct raw3270 *rp = rq->view->dev;
@@ -574,13 +617,13 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
} else if (MACHINE_IS_VM) {
raw3270_size_device_vm(rp);
raw3270_size_device_done(rp);
- } else
+ } else {
raw3270_writesf_readpart(rp);
+ }
memset(&rp->init_reset, 0, sizeof(rp->init_reset));
}
-static int
-__raw3270_reset_device(struct raw3270 *rp)
+static int __raw3270_reset_device(struct raw3270 *rp)
{
int rc;
@@ -592,7 +635,7 @@ __raw3270_reset_device(struct raw3270 *rp)
rp->init_reset.ccw.cmd_code = TC_EWRITEA;
rp->init_reset.ccw.flags = CCW_FLAG_SLI;
rp->init_reset.ccw.count = 1;
- rp->init_reset.ccw.cda = (__u32) __pa(rp->init_data);
+ rp->init_reset.ccw.cda = (__u32)__pa(rp->init_data);
rp->init_reset.callback = raw3270_reset_device_cb;
rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
if (rc == 0 && rp->state == RAW3270_STATE_INIT)
@@ -600,8 +643,7 @@ __raw3270_reset_device(struct raw3270 *rp)
return rc;
}
-static int
-raw3270_reset_device(struct raw3270 *rp)
+static int raw3270_reset_device(struct raw3270 *rp)
{
unsigned long flags;
int rc;
@@ -612,8 +654,7 @@ raw3270_reset_device(struct raw3270 *rp)
return rc;
}
-int
-raw3270_reset(struct raw3270_view *view)
+int raw3270_reset(struct raw3270_view *view)
{
struct raw3270 *rp;
int rc;
@@ -627,9 +668,9 @@ raw3270_reset(struct raw3270_view *view)
rc = raw3270_reset_device(view->dev);
return rc;
}
+EXPORT_SYMBOL(raw3270_reset);
-static void
-__raw3270_disconnect(struct raw3270 *rp)
+static void __raw3270_disconnect(struct raw3270 *rp)
{
struct raw3270_request *rq;
struct raw3270_view *view;
@@ -638,7 +679,7 @@ __raw3270_disconnect(struct raw3270 *rp)
rp->view = &rp->init_view;
/* Cancel all queued requests */
while (!list_empty(&rp->req_queue)) {
- rq = list_entry(rp->req_queue.next,struct raw3270_request,list);
+ rq = list_entry(rp->req_queue.next, struct raw3270_request, list);
view = rq->view;
rq->rc = -EACCES;
list_del_init(&rq->list);
@@ -650,9 +691,8 @@ __raw3270_disconnect(struct raw3270 *rp)
__raw3270_reset_device(rp);
}
-static void
-raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
- struct irb *irb)
+static void raw3270_init_irq(struct raw3270_view *view, struct raw3270_request *rq,
+ struct irb *irb)
{
struct raw3270 *rp;
@@ -678,8 +718,8 @@ static struct raw3270_fn raw3270_init_fn = {
/*
* Setup new 3270 device.
*/
-static int
-raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
+static int raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp,
+ char *ascebc)
{
struct list_head *l;
struct raw3270 *tmp;
@@ -699,6 +739,8 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
/* Set defaults. */
rp->rows = 24;
rp->cols = 80;
+ rp->old_rows = rp->rows;
+ rp->old_cols = rp->cols;
INIT_LIST_HEAD(&rp->req_queue);
INIT_LIST_HEAD(&rp->view_list);
@@ -706,6 +748,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc)
rp->init_view.dev = rp;
rp->init_view.fn = &raw3270_init_fn;
rp->view = &rp->init_view;
+ INIT_WORK(&rp->resize_work, raw3270_resize_work);
/*
* Add device to list and find the smallest unused minor
@@ -764,7 +807,7 @@ struct raw3270 __init *raw3270_setup_console(void)
if (IS_ERR(cdev))
return ERR_CAST(cdev);
- rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+ rp = kzalloc(sizeof(*rp), GFP_KERNEL | GFP_DMA);
ascebc = kzalloc(256, GFP_KERNEL);
rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc)
@@ -789,8 +832,7 @@ struct raw3270 __init *raw3270_setup_console(void)
return rp;
}
-void
-raw3270_wait_cons_dev(struct raw3270 *rp)
+void raw3270_wait_cons_dev(struct raw3270 *rp)
{
unsigned long flags;
@@ -804,14 +846,13 @@ raw3270_wait_cons_dev(struct raw3270 *rp)
/*
* Create a 3270 device structure.
*/
-static struct raw3270 *
-raw3270_create_device(struct ccw_device *cdev)
+static struct raw3270 *raw3270_create_device(struct ccw_device *cdev)
{
struct raw3270 *rp;
char *ascebc;
int rc;
- rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
+ rp = kzalloc(sizeof(*rp), GFP_KERNEL | GFP_DMA);
if (!rp)
return ERR_PTR(-ENOMEM);
ascebc = kmalloc(256, GFP_KERNEL);
@@ -845,14 +886,57 @@ int raw3270_view_lock_unavailable(struct raw3270_view *view)
return 0;
}
+static int raw3270_assign_activate_view(struct raw3270 *rp, struct raw3270_view *view)
+{
+ rp->view = view;
+ return view->fn->activate(view);
+}
+
+static int __raw3270_activate_view(struct raw3270 *rp, struct raw3270_view *view)
+{
+ struct raw3270_view *oldview = NULL, *nv;
+ int rc;
+
+ if (rp->view == view)
+ return 0;
+
+ if (!raw3270_state_ready(rp))
+ return -EBUSY;
+
+ if (rp->view && rp->view->fn->deactivate) {
+ oldview = rp->view;
+ oldview->fn->deactivate(oldview);
+ }
+
+ rc = raw3270_assign_activate_view(rp, view);
+ if (!rc)
+ return 0;
+
+ /* Didn't work. Try to reactivate the old view. */
+ if (oldview) {
+ rc = raw3270_assign_activate_view(rp, oldview);
+ if (!rc)
+ return 0;
+ }
+
+ /* Didn't work as well. Try any other view. */
+ list_for_each_entry(nv, &rp->view_list, list) {
+ if (nv == view || nv == oldview)
+ continue;
+ rc = raw3270_assign_activate_view(rp, nv);
+ if (!rc)
+ break;
+ rp->view = NULL;
+ }
+ return rc;
+}
+
/*
* Activate a view.
*/
-int
-raw3270_activate_view(struct raw3270_view *view)
+int raw3270_activate_view(struct raw3270_view *view)
{
struct raw3270 *rp;
- struct raw3270_view *oldview, *nv;
unsigned long flags;
int rc;
@@ -860,42 +944,16 @@ raw3270_activate_view(struct raw3270_view *view)
if (!rp)
return -ENODEV;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
- if (rp->view == view)
- rc = 0;
- else if (!raw3270_state_ready(rp))
- rc = -EBUSY;
- else {
- oldview = NULL;
- if (rp->view && rp->view->fn->deactivate) {
- oldview = rp->view;
- oldview->fn->deactivate(oldview);
- }
- rp->view = view;
- rc = view->fn->activate(view);
- if (rc) {
- /* Didn't work. Try to reactivate the old view. */
- rp->view = oldview;
- if (!oldview || oldview->fn->activate(oldview) != 0) {
- /* Didn't work as well. Try any other view. */
- list_for_each_entry(nv, &rp->view_list, list)
- if (nv != view && nv != oldview) {
- rp->view = nv;
- if (nv->fn->activate(nv) == 0)
- break;
- rp->view = NULL;
- }
- }
- }
- }
+ rc = __raw3270_activate_view(rp, view);
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
return rc;
}
+EXPORT_SYMBOL(raw3270_activate_view);
/*
* Deactivate current view.
*/
-void
-raw3270_deactivate_view(struct raw3270_view *view)
+void raw3270_deactivate_view(struct raw3270_view *view)
{
unsigned long flags;
struct raw3270 *rp;
@@ -922,12 +980,13 @@ raw3270_deactivate_view(struct raw3270_view *view)
}
spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags);
}
+EXPORT_SYMBOL(raw3270_deactivate_view);
/*
* Add view to device with minor "minor".
*/
-int
-raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass)
+int raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn,
+ int minor, int subclass)
{
unsigned long flags;
struct raw3270 *rp;
@@ -958,12 +1017,12 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, in
mutex_unlock(&raw3270_mutex);
return rc;
}
+EXPORT_SYMBOL(raw3270_add_view);
/*
* Find specific view of device with minor "minor".
*/
-struct raw3270_view *
-raw3270_find_view(struct raw3270_fn *fn, int minor)
+struct raw3270_view *raw3270_find_view(struct raw3270_fn *fn, int minor)
{
struct raw3270 *rp;
struct raw3270_view *view, *tmp;
@@ -988,12 +1047,12 @@ raw3270_find_view(struct raw3270_fn *fn, int minor)
mutex_unlock(&raw3270_mutex);
return view;
}
+EXPORT_SYMBOL(raw3270_find_view);
/*
* Remove view from device and free view structure via call to view->fn->free.
*/
-void
-raw3270_del_view(struct raw3270_view *view)
+void raw3270_del_view(struct raw3270_view *view)
{
unsigned long flags;
struct raw3270 *rp;
@@ -1022,12 +1081,12 @@ raw3270_del_view(struct raw3270_view *view)
if (view->fn->free)
view->fn->free(view);
}
+EXPORT_SYMBOL(raw3270_del_view);
/*
* Remove a 3270 device structure.
*/
-static void
-raw3270_delete_device(struct raw3270 *rp)
+static void raw3270_delete_device(struct raw3270 *rp)
{
struct ccw_device *cdev;
@@ -1050,8 +1109,7 @@ raw3270_delete_device(struct raw3270 *rp)
kfree(rp);
}
-static int
-raw3270_probe (struct ccw_device *cdev)
+static int raw3270_probe(struct ccw_device *cdev)
{
return 0;
}
@@ -1059,31 +1117,32 @@ raw3270_probe (struct ccw_device *cdev)
/*
* Additional attributes for a 3270 device
*/
-static ssize_t
-raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t model_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return sysfs_emit(buf, "%i\n",
((struct raw3270 *)dev_get_drvdata(dev))->model);
}
-static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL);
+static DEVICE_ATTR_RO(model);
-static ssize_t
-raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t rows_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return sysfs_emit(buf, "%i\n",
((struct raw3270 *)dev_get_drvdata(dev))->rows);
}
-static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL);
+static DEVICE_ATTR_RO(rows);
static ssize_t
-raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf)
+columns_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return sysfs_emit(buf, "%i\n",
((struct raw3270 *)dev_get_drvdata(dev))->cols);
}
-static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL);
+static DEVICE_ATTR_RO(columns);
-static struct attribute * raw3270_attrs[] = {
+static struct attribute *raw3270_attrs[] = {
&dev_attr_model.attr,
&dev_attr_rows.attr,
&dev_attr_columns.attr,
@@ -1115,6 +1174,7 @@ int raw3270_register_notifier(struct raw3270_notifier *notifier)
mutex_unlock(&raw3270_mutex);
return 0;
}
+EXPORT_SYMBOL(raw3270_register_notifier);
void raw3270_unregister_notifier(struct raw3270_notifier *notifier)
{
@@ -1126,12 +1186,12 @@ void raw3270_unregister_notifier(struct raw3270_notifier *notifier)
list_del(&notifier->list);
mutex_unlock(&raw3270_mutex);
}
+EXPORT_SYMBOL(raw3270_unregister_notifier);
/*
* Set 3270 device online.
*/
-static int
-raw3270_set_online (struct ccw_device *cdev)
+static int raw3270_set_online(struct ccw_device *cdev)
{
struct raw3270_notifier *np;
struct raw3270 *rp;
@@ -1158,8 +1218,7 @@ failure:
/*
* Remove 3270 device structure.
*/
-static void
-raw3270_remove (struct ccw_device *cdev)
+static void raw3270_remove(struct ccw_device *cdev)
{
unsigned long flags;
struct raw3270 *rp;
@@ -1173,7 +1232,7 @@ raw3270_remove (struct ccw_device *cdev)
* devices even if they haven't been varied online.
* Thus, rp may validly be NULL here.
*/
- if (rp == NULL)
+ if (!rp)
return;
sysfs_remove_group(&cdev->dev.kobj, &raw3270_attr_group);
@@ -1209,8 +1268,7 @@ raw3270_remove (struct ccw_device *cdev)
/*
* Set 3270 device offline.
*/
-static int
-raw3270_set_offline (struct ccw_device *cdev)
+static int raw3270_set_offline(struct ccw_device *cdev)
{
struct raw3270 *rp;
@@ -1249,8 +1307,7 @@ static struct ccw_driver raw3270_ccw_driver = {
.int_class = IRQIO_C70,
};
-static int
-raw3270_init(void)
+static int raw3270_init(void)
{
struct raw3270 *rp;
int rc;
@@ -1272,8 +1329,7 @@ raw3270_init(void)
return rc;
}
-static void
-raw3270_exit(void)
+static void raw3270_exit(void)
{
ccw_driver_unregister(&raw3270_ccw_driver);
class_destroy(class3270);
@@ -1283,25 +1339,3 @@ MODULE_LICENSE("GPL");
module_init(raw3270_init);
module_exit(raw3270_exit);
-
-EXPORT_SYMBOL(class3270);
-EXPORT_SYMBOL(raw3270_request_alloc);
-EXPORT_SYMBOL(raw3270_request_free);
-EXPORT_SYMBOL(raw3270_request_reset);
-EXPORT_SYMBOL(raw3270_request_set_cmd);
-EXPORT_SYMBOL(raw3270_request_add_data);
-EXPORT_SYMBOL(raw3270_request_set_data);
-EXPORT_SYMBOL(raw3270_request_set_idal);
-EXPORT_SYMBOL(raw3270_buffer_address);
-EXPORT_SYMBOL(raw3270_add_view);
-EXPORT_SYMBOL(raw3270_del_view);
-EXPORT_SYMBOL(raw3270_find_view);
-EXPORT_SYMBOL(raw3270_activate_view);
-EXPORT_SYMBOL(raw3270_deactivate_view);
-EXPORT_SYMBOL(raw3270_start);
-EXPORT_SYMBOL(raw3270_start_locked);
-EXPORT_SYMBOL(raw3270_start_irq);
-EXPORT_SYMBOL(raw3270_reset);
-EXPORT_SYMBOL(raw3270_register_notifier);
-EXPORT_SYMBOL(raw3270_unregister_notifier);
-EXPORT_SYMBOL(raw3270_wait_queue);
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index 4cb6b5ee44ca..b1beecc7a0a9 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -8,88 +8,10 @@
* Copyright IBM Corp. 2003, 2009
*/
+#include <uapi/asm/raw3270.h>
#include <asm/idals.h>
#include <asm/ioctl.h>
-/* ioctls for fullscreen 3270 */
-#define TUBICMD _IO('3', 3) /* set ccw command for fs reads. */
-#define TUBOCMD _IO('3', 4) /* set ccw command for fs writes. */
-#define TUBGETI _IO('3', 7) /* get ccw command for fs reads. */
-#define TUBGETO _IO('3', 8) /* get ccw command for fs writes. */
-#define TUBSETMOD _IO('3',12) /* FIXME: what does it do ?*/
-#define TUBGETMOD _IO('3',13) /* FIXME: what does it do ?*/
-
-/* Local Channel Commands */
-#define TC_WRITE 0x01 /* Write */
-#define TC_RDBUF 0x02 /* Read Buffer */
-#define TC_EWRITE 0x05 /* Erase write */
-#define TC_READMOD 0x06 /* Read modified */
-#define TC_EWRITEA 0x0d /* Erase write alternate */
-#define TC_WRITESF 0x11 /* Write structured field */
-
-/* Buffer Control Orders */
-#define TO_SF 0x1d /* Start field */
-#define TO_SBA 0x11 /* Set buffer address */
-#define TO_IC 0x13 /* Insert cursor */
-#define TO_PT 0x05 /* Program tab */
-#define TO_RA 0x3c /* Repeat to address */
-#define TO_SFE 0x29 /* Start field extended */
-#define TO_EUA 0x12 /* Erase unprotected to address */
-#define TO_MF 0x2c /* Modify field */
-#define TO_SA 0x28 /* Set attribute */
-
-/* Field Attribute Bytes */
-#define TF_INPUT 0x40 /* Visible input */
-#define TF_INPUTN 0x4c /* Invisible input */
-#define TF_INMDT 0xc1 /* Visible, Set-MDT */
-#define TF_LOG 0x60
-
-/* Character Attribute Bytes */
-#define TAT_RESET 0x00
-#define TAT_FIELD 0xc0
-#define TAT_EXTHI 0x41
-#define TAT_COLOR 0x42
-#define TAT_CHARS 0x43
-#define TAT_TRANS 0x46
-
-/* Extended-Highlighting Bytes */
-#define TAX_RESET 0x00
-#define TAX_BLINK 0xf1
-#define TAX_REVER 0xf2
-#define TAX_UNDER 0xf4
-
-/* Reset value */
-#define TAR_RESET 0x00
-
-/* Color values */
-#define TAC_RESET 0x00
-#define TAC_BLUE 0xf1
-#define TAC_RED 0xf2
-#define TAC_PINK 0xf3
-#define TAC_GREEN 0xf4
-#define TAC_TURQ 0xf5
-#define TAC_YELLOW 0xf6
-#define TAC_WHITE 0xf7
-#define TAC_DEFAULT 0x00
-
-/* Write Control Characters */
-#define TW_NONE 0x40 /* No particular action */
-#define TW_KR 0xc2 /* Keyboard restore */
-#define TW_PLUSALARM 0x04 /* Add this bit for alarm */
-
-#define RAW3270_FIRSTMINOR 1 /* First minor number */
-#define RAW3270_MAXDEVS 255 /* Max number of 3270 devices */
-
-/* For TUBGETMOD and TUBSETMOD. Should include. */
-struct raw3270_iocb {
- short model;
- short line_cnt;
- short col_cnt;
- short pf_cnt;
- short re_cnt;
- short map;
-};
-
struct raw3270;
struct raw3270_view;
extern struct class *class3270;
@@ -105,17 +27,17 @@ struct raw3270_request {
int rc; /* return code for this request. */
/* Callback for delivering final status. */
- void (*callback)(struct raw3270_request *, void *);
+ void (*callback)(struct raw3270_request *rq, void *data);
void *callback_data;
};
struct raw3270_request *raw3270_request_alloc(size_t size);
-void raw3270_request_free(struct raw3270_request *);
-void raw3270_request_reset(struct raw3270_request *);
-void raw3270_request_set_cmd(struct raw3270_request *, u8 cmd);
-int raw3270_request_add_data(struct raw3270_request *, void *, size_t);
-void raw3270_request_set_data(struct raw3270_request *, void *, size_t);
-void raw3270_request_set_idal(struct raw3270_request *, struct idal_buffer *);
+void raw3270_request_free(struct raw3270_request *rq);
+int raw3270_request_reset(struct raw3270_request *rq);
+void raw3270_request_set_cmd(struct raw3270_request *rq, u8 cmd);
+int raw3270_request_add_data(struct raw3270_request *rq, void *data, size_t size);
+void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size);
+void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib);
static inline int
raw3270_request_final(struct raw3270_request *rq)
@@ -123,19 +45,21 @@ raw3270_request_final(struct raw3270_request *rq)
return list_empty(&rq->list);
}
-void raw3270_buffer_address(struct raw3270 *, char *, unsigned short);
+void raw3270_buffer_address(struct raw3270 *, char *, int, int);
/*
* Functions of a 3270 view.
*/
struct raw3270_fn {
- int (*activate)(struct raw3270_view *);
- void (*deactivate)(struct raw3270_view *);
- void (*intv)(struct raw3270_view *,
- struct raw3270_request *, struct irb *);
- void (*release)(struct raw3270_view *);
- void (*free)(struct raw3270_view *);
- void (*resize)(struct raw3270_view *, int, int, int);
+ int (*activate)(struct raw3270_view *rq);
+ void (*deactivate)(struct raw3270_view *rq);
+ void (*intv)(struct raw3270_view *view,
+ struct raw3270_request *rq, struct irb *ib);
+ void (*release)(struct raw3270_view *view);
+ void (*free)(struct raw3270_view *view);
+ void (*resize)(struct raw3270_view *view,
+ int new_model, int new_cols, int new_rows,
+ int old_model, int old_cols, int old_rows);
};
/*
@@ -148,7 +72,7 @@ struct raw3270_fn {
*/
struct raw3270_view {
struct list_head list;
- spinlock_t lock;
+ spinlock_t lock; /* protects members of view */
#define RAW3270_VIEW_LOCK_IRQ 0
#define RAW3270_VIEW_LOCK_BH 1
atomic_t ref_count;
@@ -159,18 +83,21 @@ struct raw3270_view {
unsigned char *ascebc; /* ascii -> ebcdic table */
};
-int raw3270_add_view(struct raw3270_view *, struct raw3270_fn *, int, int);
+int raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor, int subclass);
int raw3270_view_lock_unavailable(struct raw3270_view *view);
-int raw3270_activate_view(struct raw3270_view *);
-void raw3270_del_view(struct raw3270_view *);
-void raw3270_deactivate_view(struct raw3270_view *);
-struct raw3270_view *raw3270_find_view(struct raw3270_fn *, int);
-int raw3270_start(struct raw3270_view *, struct raw3270_request *);
-int raw3270_start_locked(struct raw3270_view *, struct raw3270_request *);
-int raw3270_start_irq(struct raw3270_view *, struct raw3270_request *);
-int raw3270_reset(struct raw3270_view *);
-struct raw3270_view *raw3270_view(struct raw3270_view *);
-int raw3270_view_active(struct raw3270_view *);
+int raw3270_activate_view(struct raw3270_view *view);
+void raw3270_del_view(struct raw3270_view *view);
+void raw3270_deactivate_view(struct raw3270_view *view);
+struct raw3270_view *raw3270_find_view(struct raw3270_fn *fn, int minor);
+int raw3270_start(struct raw3270_view *view, struct raw3270_request *rq);
+int raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq);
+int raw3270_start_irq(struct raw3270_view *view, struct raw3270_request *rq);
+int raw3270_reset(struct raw3270_view *view);
+struct raw3270_view *raw3270_view(struct raw3270_view *view);
+int raw3270_view_active(struct raw3270_view *view);
+int raw3270_start_request(struct raw3270_view *view, struct raw3270_request *rq,
+ int cmd, void *data, size_t len);
+void raw3270_read_modified_cb(struct raw3270_request *rq, void *data);
/* Reference count inliner for view structures. */
static inline void
@@ -189,7 +116,7 @@ raw3270_put_view(struct raw3270_view *view)
}
struct raw3270 *raw3270_setup_console(void);
-void raw3270_wait_cons_dev(struct raw3270 *);
+void raw3270_wait_cons_dev(struct raw3270 *rp);
/* Notifier for device addition/removal */
struct raw3270_notifier {
@@ -198,87 +125,5 @@ struct raw3270_notifier {
void (*destroy)(int minor);
};
-int raw3270_register_notifier(struct raw3270_notifier *);
-void raw3270_unregister_notifier(struct raw3270_notifier *);
-
-/*
- * Little memory allocator for string objects.
- */
-struct string
-{
- struct list_head list;
- struct list_head update;
- unsigned long size;
- unsigned long len;
- char string[];
-} __attribute__ ((aligned(8)));
-
-static inline struct string *
-alloc_string(struct list_head *free_list, unsigned long len)
-{
- struct string *cs, *tmp;
- unsigned long size;
-
- size = (len + 7L) & -8L;
- list_for_each_entry(cs, free_list, list) {
- if (cs->size < size)
- continue;
- if (cs->size > size + sizeof(struct string)) {
- char *endaddr = (char *) (cs + 1) + cs->size;
- tmp = (struct string *) (endaddr - size) - 1;
- tmp->size = size;
- cs->size -= size + sizeof(struct string);
- cs = tmp;
- } else
- list_del(&cs->list);
- cs->len = len;
- INIT_LIST_HEAD(&cs->list);
- INIT_LIST_HEAD(&cs->update);
- return cs;
- }
- return NULL;
-}
-
-static inline unsigned long
-free_string(struct list_head *free_list, struct string *cs)
-{
- struct string *tmp;
- struct list_head *p, *left;
-
- /* Find out the left neighbour in free memory list. */
- left = free_list;
- list_for_each(p, free_list) {
- if (list_entry(p, struct string, list) > cs)
- break;
- left = p;
- }
- /* Try to merge with right neighbour = next element from left. */
- if (left->next != free_list) {
- tmp = list_entry(left->next, struct string, list);
- if ((char *) (cs + 1) + cs->size == (char *) tmp) {
- list_del(&tmp->list);
- cs->size += tmp->size + sizeof(struct string);
- }
- }
- /* Try to merge with left neighbour. */
- if (left != free_list) {
- tmp = list_entry(left, struct string, list);
- if ((char *) (tmp + 1) + tmp->size == (char *) cs) {
- tmp->size += cs->size + sizeof(struct string);
- return tmp->size;
- }
- }
- __list_add(&cs->list, left, left->next);
- return cs->size;
-}
-
-static inline void
-add_string_memory(struct list_head *free_list, void *mem, unsigned long size)
-{
- struct string *cs;
-
- cs = (struct string *) mem;
- cs->size = size - sizeof(struct string);
- free_string(free_list, cs);
-}
-
+int raw3270_register_notifier(struct raw3270_notifier *notifier);
+void raw3270_unregister_notifier(struct raw3270_notifier *notifier);
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index c1c70a161c0e..f480d6c7fd39 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -163,7 +163,7 @@ static void __init sclp_early_console_detect(struct init_sccb *sccb)
sclp.has_linemode = 1;
}
-void __init sclp_early_adjust_va(void)
+void __init __no_sanitize_address sclp_early_adjust_va(void)
{
sclp_early_sccb = __va((unsigned long)sclp_early_sccb);
}
diff --git a/drivers/s390/char/sclp_ftp.c b/drivers/s390/char/sclp_ftp.c
index ec5a0e2b9255..d27e2cbfbccb 100644
--- a/drivers/s390/char/sclp_ftp.c
+++ b/drivers/s390/char/sclp_ftp.c
@@ -90,7 +90,7 @@ static int sclp_ftp_et7(const struct hmcdrv_ftp_cmdspec *ftp)
struct completion completion;
struct sclp_diag_sccb *sccb;
struct sclp_req *req;
- size_t len;
+ ssize_t len;
int rc;
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -117,9 +117,9 @@ static int sclp_ftp_et7(const struct hmcdrv_ftp_cmdspec *ftp)
sccb->evbuf.mdd.ftp.length = ftp->len;
sccb->evbuf.mdd.ftp.bufaddr = virt_to_phys(ftp->buf);
- len = strlcpy(sccb->evbuf.mdd.ftp.fident, ftp->fname,
+ len = strscpy(sccb->evbuf.mdd.ftp.fident, ftp->fname,
HMCDRV_FTP_FIDENT_MAX);
- if (len >= HMCDRV_FTP_FIDENT_MAX) {
+ if (len < 0) {
rc = -EINVAL;
goto out_free;
}
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
deleted file mode 100644
index 26e3995ac062..000000000000
--- a/drivers/s390/char/tty3270.c
+++ /dev/null
@@ -1,1963 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * IBM/3270 Driver - tty functions.
- *
- * Author(s):
- * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global)
- * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com>
- * -- Copyright IBM Corp. 2003
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kdev_t.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/interrupt.h>
-#include <linux/workqueue.h>
-
-#include <linux/slab.h>
-#include <linux/memblock.h>
-#include <linux/compat.h>
-
-#include <asm/ccwdev.h>
-#include <asm/cio.h>
-#include <asm/ebcdic.h>
-#include <linux/uaccess.h>
-
-#include "raw3270.h"
-#include "tty3270.h"
-#include "keyboard.h"
-
-#define TTY3270_CHAR_BUF_SIZE 256
-#define TTY3270_OUTPUT_BUFFER_SIZE 1024
-#define TTY3270_STRING_PAGES 5
-
-struct tty_driver *tty3270_driver;
-static int tty3270_max_index;
-
-static struct raw3270_fn tty3270_fn;
-
-struct tty3270_cell {
- unsigned char character;
- unsigned char highlight;
- unsigned char f_color;
-};
-
-struct tty3270_line {
- struct tty3270_cell *cells;
- int len;
-};
-
-#define ESCAPE_NPAR 8
-
-/*
- * The main tty view data structure.
- * FIXME:
- * 1) describe line orientation & lines list concept against screen
- * 2) describe conversion of screen to lines
- * 3) describe line format.
- */
-struct tty3270 {
- struct raw3270_view view;
- struct tty_port port;
- void **freemem_pages; /* Array of pages used for freemem. */
- struct list_head freemem; /* List of free memory for strings. */
-
- /* Output stuff. */
- struct list_head lines; /* List of lines. */
- struct list_head update; /* List of lines to update. */
- unsigned char wcc; /* Write control character. */
- int nr_lines; /* # lines in list. */
- int nr_up; /* # lines up in history. */
- unsigned long update_flags; /* Update indication bits. */
- struct string *status; /* Lower right of display. */
- struct raw3270_request *write; /* Single write request. */
- struct timer_list timer; /* Output delay timer. */
-
- /* Current tty screen. */
- unsigned int cx, cy; /* Current output position. */
- unsigned int highlight; /* Blink/reverse/underscore */
- unsigned int f_color; /* Foreground color */
- struct tty3270_line *screen;
- unsigned int n_model, n_cols, n_rows; /* New model & size */
- struct work_struct resize_work;
-
- /* Input stuff. */
- struct string *prompt; /* Output string for input area. */
- struct string *input; /* Input string for read request. */
- struct raw3270_request *read; /* Single read request. */
- struct raw3270_request *kreset; /* Single keyboard reset request. */
- unsigned char inattr; /* Visible/invisible input. */
- int throttle, attn; /* tty throttle/unthrottle. */
- struct tasklet_struct readlet; /* Tasklet to issue read request. */
- struct tasklet_struct hanglet; /* Tasklet to hang up the tty. */
- struct kbd_data *kbd; /* key_maps stuff. */
-
- /* Escape sequence parsing. */
- int esc_state, esc_ques, esc_npar;
- int esc_par[ESCAPE_NPAR];
- unsigned int saved_cx, saved_cy;
- unsigned int saved_highlight, saved_f_color;
-
- /* Command recalling. */
- struct list_head rcl_lines; /* List of recallable lines. */
- struct list_head *rcl_walk; /* Point in rcl_lines list. */
- int rcl_nr, rcl_max; /* Number/max number of rcl_lines. */
-
- /* Character array for put_char/flush_chars. */
- unsigned int char_count;
- char char_buf[TTY3270_CHAR_BUF_SIZE];
-};
-
-/* tty3270->update_flags. See tty3270_update for details. */
-#define TTY_UPDATE_ERASE 1 /* Use EWRITEA instead of WRITE. */
-#define TTY_UPDATE_LIST 2 /* Update lines in tty3270->update. */
-#define TTY_UPDATE_INPUT 4 /* Update input line. */
-#define TTY_UPDATE_STATUS 8 /* Update status line. */
-#define TTY_UPDATE_ALL 16 /* Recreate screen. */
-
-static void tty3270_update(struct timer_list *);
-static void tty3270_resize_work(struct work_struct *work);
-
-/*
- * Setup timeout for a device. On timeout trigger an update.
- */
-static void tty3270_set_timer(struct tty3270 *tp, int expires)
-{
- mod_timer(&tp->timer, jiffies + expires);
-}
-
-/*
- * The input line are the two last lines of the screen.
- */
-static void
-tty3270_update_prompt(struct tty3270 *tp, char *input, int count)
-{
- struct string *line;
- unsigned int off;
-
- line = tp->prompt;
- if (count != 0)
- line->string[5] = TF_INMDT;
- else
- line->string[5] = tp->inattr;
- if (count > tp->view.cols * 2 - 11)
- count = tp->view.cols * 2 - 11;
- memcpy(line->string + 6, input, count);
- line->string[6 + count] = TO_IC;
- /* Clear to end of input line. */
- if (count < tp->view.cols * 2 - 11) {
- line->string[7 + count] = TO_RA;
- line->string[10 + count] = 0;
- off = tp->view.cols * tp->view.rows - 9;
- raw3270_buffer_address(tp->view.dev, line->string+count+8, off);
- line->len = 11 + count;
- } else
- line->len = 7 + count;
- tp->update_flags |= TTY_UPDATE_INPUT;
-}
-
-static void
-tty3270_create_prompt(struct tty3270 *tp)
-{
- static const unsigned char blueprint[] =
- { TO_SBA, 0, 0, 0x6e, TO_SF, TF_INPUT,
- /* empty input string */
- TO_IC, TO_RA, 0, 0, 0 };
- struct string *line;
- unsigned int offset;
-
- line = alloc_string(&tp->freemem,
- sizeof(blueprint) + tp->view.cols * 2 - 9);
- tp->prompt = line;
- tp->inattr = TF_INPUT;
- /* Copy blueprint to status line */
- memcpy(line->string, blueprint, sizeof(blueprint));
- line->len = sizeof(blueprint);
- /* Set output offsets. */
- offset = tp->view.cols * (tp->view.rows - 2);
- raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
- offset = tp->view.cols * tp->view.rows - 9;
- raw3270_buffer_address(tp->view.dev, line->string + 8, offset);
-
- /* Allocate input string for reading. */
- tp->input = alloc_string(&tp->freemem, tp->view.cols * 2 - 9 + 6);
-}
-
-/*
- * The status line is the last line of the screen. It shows the string
- * "Running"/"Holding" in the lower right corner of the screen.
- */
-static void
-tty3270_update_status(struct tty3270 * tp)
-{
- char *str;
-
- str = (tp->nr_up != 0) ? "History" : "Running";
- memcpy(tp->status->string + 8, str, 7);
- codepage_convert(tp->view.ascebc, tp->status->string + 8, 7);
- tp->update_flags |= TTY_UPDATE_STATUS;
-}
-
-static void
-tty3270_create_status(struct tty3270 * tp)
-{
- static const unsigned char blueprint[] =
- { TO_SBA, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR, TAC_GREEN,
- 0, 0, 0, 0, 0, 0, 0, TO_SF, TF_LOG, TO_SA, TAT_COLOR,
- TAC_RESET };
- struct string *line;
- unsigned int offset;
-
- line = alloc_string(&tp->freemem,sizeof(blueprint));
- tp->status = line;
- /* Copy blueprint to status line */
- memcpy(line->string, blueprint, sizeof(blueprint));
- /* Set address to start of status string (= last 9 characters). */
- offset = tp->view.cols * tp->view.rows - 9;
- raw3270_buffer_address(tp->view.dev, line->string + 1, offset);
-}
-
-/*
- * Set output offsets to 3270 datastream fragment of a tty string.
- * (TO_SBA offset at the start and TO_RA offset at the end of the string)
- */
-static void
-tty3270_update_string(struct tty3270 *tp, struct string *line, int nr)
-{
- unsigned char *cp;
-
- raw3270_buffer_address(tp->view.dev, line->string + 1,
- tp->view.cols * nr);
- cp = line->string + line->len - 4;
- if (*cp == TO_RA)
- raw3270_buffer_address(tp->view.dev, cp + 1,
- tp->view.cols * (nr + 1));
-}
-
-/*
- * Rebuild update list to print all lines.
- */
-static void
-tty3270_rebuild_update(struct tty3270 *tp)
-{
- struct string *s, *n;
- int line, nr_up;
-
- /*
- * Throw away update list and create a new one,
- * containing all lines that will fit on the screen.
- */
- list_for_each_entry_safe(s, n, &tp->update, update)
- list_del_init(&s->update);
- line = tp->view.rows - 3;
- nr_up = tp->nr_up;
- list_for_each_entry_reverse(s, &tp->lines, list) {
- if (nr_up > 0) {
- nr_up--;
- continue;
- }
- tty3270_update_string(tp, s, line);
- list_add(&s->update, &tp->update);
- if (--line < 0)
- break;
- }
- tp->update_flags |= TTY_UPDATE_LIST;
-}
-
-/*
- * Alloc string for size bytes. If there is not enough room in
- * freemem, free strings until there is room.
- */
-static struct string *
-tty3270_alloc_string(struct tty3270 *tp, size_t size)
-{
- struct string *s, *n;
-
- s = alloc_string(&tp->freemem, size);
- if (s)
- return s;
- list_for_each_entry_safe(s, n, &tp->lines, list) {
- BUG_ON(tp->nr_lines <= tp->view.rows - 2);
- list_del(&s->list);
- if (!list_empty(&s->update))
- list_del(&s->update);
- tp->nr_lines--;
- if (free_string(&tp->freemem, s) >= size)
- break;
- }
- s = alloc_string(&tp->freemem, size);
- BUG_ON(!s);
- if (tp->nr_up != 0 &&
- tp->nr_up + tp->view.rows - 2 >= tp->nr_lines) {
- tp->nr_up = tp->nr_lines - tp->view.rows + 2;
- tty3270_rebuild_update(tp);
- tty3270_update_status(tp);
- }
- return s;
-}
-
-/*
- * Add an empty line to the list.
- */
-static void
-tty3270_blank_line(struct tty3270 *tp)
-{
- static const unsigned char blueprint[] =
- { TO_SBA, 0, 0, TO_SA, TAT_EXTHI, TAX_RESET,
- TO_SA, TAT_COLOR, TAC_RESET, TO_RA, 0, 0, 0 };
- struct string *s;
-
- s = tty3270_alloc_string(tp, sizeof(blueprint));
- memcpy(s->string, blueprint, sizeof(blueprint));
- s->len = sizeof(blueprint);
- list_add_tail(&s->list, &tp->lines);
- tp->nr_lines++;
- if (tp->nr_up != 0)
- tp->nr_up++;
-}
-
-/*
- * Create a blank screen and remove all lines from the history.
- */
-static void
-tty3270_blank_screen(struct tty3270 *tp)
-{
- struct string *s, *n;
- int i;
-
- for (i = 0; i < tp->view.rows - 2; i++)
- tp->screen[i].len = 0;
- tp->nr_up = 0;
- list_for_each_entry_safe(s, n, &tp->lines, list) {
- list_del(&s->list);
- if (!list_empty(&s->update))
- list_del(&s->update);
- tp->nr_lines--;
- free_string(&tp->freemem, s);
- }
-}
-
-/*
- * Write request completion callback.
- */
-static void
-tty3270_write_callback(struct raw3270_request *rq, void *data)
-{
- struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
-
- if (rq->rc != 0) {
- /* Write wasn't successful. Refresh all. */
- tp->update_flags = TTY_UPDATE_ALL;
- tty3270_set_timer(tp, 1);
- }
- raw3270_request_reset(rq);
- xchg(&tp->write, rq);
-}
-
-/*
- * Update 3270 display.
- */
-static void
-tty3270_update(struct timer_list *t)
-{
- struct tty3270 *tp = from_timer(tp, t, timer);
- static char invalid_sba[2] = { 0xff, 0xff };
- struct raw3270_request *wrq;
- unsigned long updated;
- struct string *s, *n;
- char *sba, *str;
- int rc, len;
-
- wrq = xchg(&tp->write, 0);
- if (!wrq) {
- tty3270_set_timer(tp, 1);
- return;
- }
-
- spin_lock(&tp->view.lock);
- updated = 0;
- if (tp->update_flags & TTY_UPDATE_ALL) {
- tty3270_rebuild_update(tp);
- tty3270_update_status(tp);
- tp->update_flags = TTY_UPDATE_ERASE | TTY_UPDATE_LIST |
- TTY_UPDATE_INPUT | TTY_UPDATE_STATUS;
- }
- if (tp->update_flags & TTY_UPDATE_ERASE) {
- /* Use erase write alternate to erase display. */
- raw3270_request_set_cmd(wrq, TC_EWRITEA);
- updated |= TTY_UPDATE_ERASE;
- } else
- raw3270_request_set_cmd(wrq, TC_WRITE);
-
- raw3270_request_add_data(wrq, &tp->wcc, 1);
- tp->wcc = TW_NONE;
-
- /*
- * Update status line.
- */
- if (tp->update_flags & TTY_UPDATE_STATUS)
- if (raw3270_request_add_data(wrq, tp->status->string,
- tp->status->len) == 0)
- updated |= TTY_UPDATE_STATUS;
-
- /*
- * Write input line.
- */
- if (tp->update_flags & TTY_UPDATE_INPUT)
- if (raw3270_request_add_data(wrq, tp->prompt->string,
- tp->prompt->len) == 0)
- updated |= TTY_UPDATE_INPUT;
-
- sba = invalid_sba;
-
- if (tp->update_flags & TTY_UPDATE_LIST) {
- /* Write strings in the update list to the screen. */
- list_for_each_entry_safe(s, n, &tp->update, update) {
- str = s->string;
- len = s->len;
- /*
- * Skip TO_SBA at the start of the string if the
- * last output position matches the start address
- * of this line.
- */
- if (s->string[1] == sba[0] && s->string[2] == sba[1]) {
- str += 3;
- len -= 3;
- }
- if (raw3270_request_add_data(wrq, str, len) != 0)
- break;
- list_del_init(&s->update);
- if (s->string[s->len - 4] == TO_RA)
- sba = s->string + s->len - 3;
- else
- sba = invalid_sba;
- }
- if (list_empty(&tp->update))
- updated |= TTY_UPDATE_LIST;
- }
- wrq->callback = tty3270_write_callback;
- rc = raw3270_start(&tp->view, wrq);
- if (rc == 0) {
- tp->update_flags &= ~updated;
- if (tp->update_flags)
- tty3270_set_timer(tp, 1);
- } else {
- raw3270_request_reset(wrq);
- xchg(&tp->write, wrq);
- }
- spin_unlock(&tp->view.lock);
-}
-
-/*
- * Command recalling.
- */
-static void
-tty3270_rcl_add(struct tty3270 *tp, char *input, int len)
-{
- struct string *s;
-
- tp->rcl_walk = NULL;
- if (len <= 0)
- return;
- if (tp->rcl_nr >= tp->rcl_max) {
- s = list_entry(tp->rcl_lines.next, struct string, list);
- list_del(&s->list);
- free_string(&tp->freemem, s);
- tp->rcl_nr--;
- }
- s = tty3270_alloc_string(tp, len);
- memcpy(s->string, input, len);
- list_add_tail(&s->list, &tp->rcl_lines);
- tp->rcl_nr++;
-}
-
-static void
-tty3270_rcl_backward(struct kbd_data *kbd)
-{
- struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
- struct string *s;
-
- spin_lock_bh(&tp->view.lock);
- if (tp->inattr == TF_INPUT) {
- if (tp->rcl_walk && tp->rcl_walk->prev != &tp->rcl_lines)
- tp->rcl_walk = tp->rcl_walk->prev;
- else if (!list_empty(&tp->rcl_lines))
- tp->rcl_walk = tp->rcl_lines.prev;
- s = tp->rcl_walk ?
- list_entry(tp->rcl_walk, struct string, list) : NULL;
- if (tp->rcl_walk) {
- s = list_entry(tp->rcl_walk, struct string, list);
- tty3270_update_prompt(tp, s->string, s->len);
- } else
- tty3270_update_prompt(tp, NULL, 0);
- tty3270_set_timer(tp, 1);
- }
- spin_unlock_bh(&tp->view.lock);
-}
-
-/*
- * Deactivate tty view.
- */
-static void
-tty3270_exit_tty(struct kbd_data *kbd)
-{
- struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
-
- raw3270_deactivate_view(&tp->view);
-}
-
-/*
- * Scroll forward in history.
- */
-static void
-tty3270_scroll_forward(struct kbd_data *kbd)
-{
- struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
- int nr_up;
-
- spin_lock_bh(&tp->view.lock);
- nr_up = tp->nr_up - tp->view.rows + 2;
- if (nr_up < 0)
- nr_up = 0;
- if (nr_up != tp->nr_up) {
- tp->nr_up = nr_up;
- tty3270_rebuild_update(tp);
- tty3270_update_status(tp);
- tty3270_set_timer(tp, 1);
- }
- spin_unlock_bh(&tp->view.lock);
-}
-
-/*
- * Scroll backward in history.
- */
-static void
-tty3270_scroll_backward(struct kbd_data *kbd)
-{
- struct tty3270 *tp = container_of(kbd->port, struct tty3270, port);
- int nr_up;
-
- spin_lock_bh(&tp->view.lock);
- nr_up = tp->nr_up + tp->view.rows - 2;
- if (nr_up + tp->view.rows - 2 > tp->nr_lines)
- nr_up = tp->nr_lines - tp->view.rows + 2;
- if (nr_up != tp->nr_up) {
- tp->nr_up = nr_up;
- tty3270_rebuild_update(tp);
- tty3270_update_status(tp);
- tty3270_set_timer(tp, 1);
- }
- spin_unlock_bh(&tp->view.lock);
-}
-
-/*
- * Pass input line to tty.
- */
-static void
-tty3270_read_tasklet(unsigned long data)
-{
- struct raw3270_request *rrq = (struct raw3270_request *)data;
- static char kreset_data = TW_KR;
- struct tty3270 *tp = container_of(rrq->view, struct tty3270, view);
- char *input;
- int len;
-
- spin_lock_bh(&tp->view.lock);
- /*
- * Two AID keys are special: For 0x7d (enter) the input line
- * has to be emitted to the tty and for 0x6d the screen
- * needs to be redrawn.
- */
- input = NULL;
- len = 0;
- if (tp->input->string[0] == 0x7d) {
- /* Enter: write input to tty. */
- input = tp->input->string + 6;
- len = tp->input->len - 6 - rrq->rescnt;
- if (tp->inattr != TF_INPUTN)
- tty3270_rcl_add(tp, input, len);
- if (tp->nr_up > 0) {
- tp->nr_up = 0;
- tty3270_rebuild_update(tp);
- tty3270_update_status(tp);
- }
- /* Clear input area. */
- tty3270_update_prompt(tp, NULL, 0);
- tty3270_set_timer(tp, 1);
- } else if (tp->input->string[0] == 0x6d) {
- /* Display has been cleared. Redraw. */
- tp->update_flags = TTY_UPDATE_ALL;
- tty3270_set_timer(tp, 1);
- }
- spin_unlock_bh(&tp->view.lock);
-
- /* Start keyboard reset command. */
- raw3270_request_reset(tp->kreset);
- raw3270_request_set_cmd(tp->kreset, TC_WRITE);
- raw3270_request_add_data(tp->kreset, &kreset_data, 1);
- raw3270_start(&tp->view, tp->kreset);
-
- while (len-- > 0)
- kbd_keycode(tp->kbd, *input++);
- /* Emit keycode for AID byte. */
- kbd_keycode(tp->kbd, 256 + tp->input->string[0]);
-
- raw3270_request_reset(rrq);
- xchg(&tp->read, rrq);
- raw3270_put_view(&tp->view);
-}
-
-/*
- * Read request completion callback.
- */
-static void
-tty3270_read_callback(struct raw3270_request *rq, void *data)
-{
- struct tty3270 *tp = container_of(rq->view, struct tty3270, view);
- raw3270_get_view(rq->view);
- /* Schedule tasklet to pass input to tty. */
- tasklet_schedule(&tp->readlet);
-}
-
-/*
- * Issue a read request. Call with device lock.
- */
-static void
-tty3270_issue_read(struct tty3270 *tp, int lock)
-{
- struct raw3270_request *rrq;
- int rc;
-
- rrq = xchg(&tp->read, 0);
- if (!rrq)
- /* Read already scheduled. */
- return;
- rrq->callback = tty3270_read_callback;
- rrq->callback_data = tp;
- raw3270_request_set_cmd(rrq, TC_READMOD);
- raw3270_request_set_data(rrq, tp->input->string, tp->input->len);
- /* Issue the read modified request. */
- if (lock) {
- rc = raw3270_start(&tp->view, rrq);
- } else
- rc = raw3270_start_irq(&tp->view, rrq);
- if (rc) {
- raw3270_request_reset(rrq);
- xchg(&tp->read, rrq);
- }
-}
-
-/*
- * Hang up the tty
- */
-static void
-tty3270_hangup_tasklet(unsigned long data)
-{
- struct tty3270 *tp = (struct tty3270 *)data;
- tty_port_tty_hangup(&tp->port, true);
- raw3270_put_view(&tp->view);
-}
-
-/*
- * Switch to the tty view.
- */
-static int
-tty3270_activate(struct raw3270_view *view)
-{
- struct tty3270 *tp = container_of(view, struct tty3270, view);
-
- tp->update_flags = TTY_UPDATE_ALL;
- tty3270_set_timer(tp, 1);
- return 0;
-}
-
-static void
-tty3270_deactivate(struct raw3270_view *view)
-{
- struct tty3270 *tp = container_of(view, struct tty3270, view);
-
- del_timer(&tp->timer);
-}
-
-static void
-tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb)
-{
- /* Handle ATTN. Schedule tasklet to read aid. */
- if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) {
- if (!tp->throttle)
- tty3270_issue_read(tp, 0);
- else
- tp->attn = 1;
- }
-
- if (rq) {
- if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) {
- rq->rc = -EIO;
- raw3270_get_view(&tp->view);
- tasklet_schedule(&tp->hanglet);
- } else {
- /* Normal end. Copy residual count. */
- rq->rescnt = irb->scsw.cmd.count;
- }
- } else if (irb->scsw.cmd.dstat & DEV_STAT_DEV_END) {
- /* Interrupt without an outstanding request -> update all */
- tp->update_flags = TTY_UPDATE_ALL;
- tty3270_set_timer(tp, 1);
- }
-}
-
-/*
- * Allocate tty3270 structure.
- */
-static struct tty3270 *
-tty3270_alloc_view(void)
-{
- struct tty3270 *tp;
- int pages;
-
- tp = kzalloc(sizeof(struct tty3270), GFP_KERNEL);
- if (!tp)
- goto out_err;
- tp->freemem_pages =
- kmalloc_array(TTY3270_STRING_PAGES, sizeof(void *),
- GFP_KERNEL);
- if (!tp->freemem_pages)
- goto out_tp;
- INIT_LIST_HEAD(&tp->freemem);
- INIT_LIST_HEAD(&tp->lines);
- INIT_LIST_HEAD(&tp->update);
- INIT_LIST_HEAD(&tp->rcl_lines);
- tp->rcl_max = 20;
-
- for (pages = 0; pages < TTY3270_STRING_PAGES; pages++) {
- tp->freemem_pages[pages] = (void *)
- __get_free_pages(GFP_KERNEL|GFP_DMA, 0);
- if (!tp->freemem_pages[pages])
- goto out_pages;
- add_string_memory(&tp->freemem,
- tp->freemem_pages[pages], PAGE_SIZE);
- }
- tp->write = raw3270_request_alloc(TTY3270_OUTPUT_BUFFER_SIZE);
- if (IS_ERR(tp->write))
- goto out_pages;
- tp->read = raw3270_request_alloc(0);
- if (IS_ERR(tp->read))
- goto out_write;
- tp->kreset = raw3270_request_alloc(1);
- if (IS_ERR(tp->kreset))
- goto out_read;
- tp->kbd = kbd_alloc();
- if (!tp->kbd)
- goto out_reset;
-
- tty_port_init(&tp->port);
- timer_setup(&tp->timer, tty3270_update, 0);
- tasklet_init(&tp->readlet, tty3270_read_tasklet,
- (unsigned long) tp->read);
- tasklet_init(&tp->hanglet, tty3270_hangup_tasklet,
- (unsigned long) tp);
- INIT_WORK(&tp->resize_work, tty3270_resize_work);
-
- return tp;
-
-out_reset:
- raw3270_request_free(tp->kreset);
-out_read:
- raw3270_request_free(tp->read);
-out_write:
- raw3270_request_free(tp->write);
-out_pages:
- while (pages--)
- free_pages((unsigned long) tp->freemem_pages[pages], 0);
- kfree(tp->freemem_pages);
- tty_port_destroy(&tp->port);
-out_tp:
- kfree(tp);
-out_err:
- return ERR_PTR(-ENOMEM);
-}
-
-/*
- * Free tty3270 structure.
- */
-static void
-tty3270_free_view(struct tty3270 *tp)
-{
- int pages;
-
- kbd_free(tp->kbd);
- raw3270_request_free(tp->kreset);
- raw3270_request_free(tp->read);
- raw3270_request_free(tp->write);
- for (pages = 0; pages < TTY3270_STRING_PAGES; pages++)
- free_pages((unsigned long) tp->freemem_pages[pages], 0);
- kfree(tp->freemem_pages);
- tty_port_destroy(&tp->port);
- kfree(tp);
-}
-
-/*
- * Allocate tty3270 screen.
- */
-static struct tty3270_line *
-tty3270_alloc_screen(unsigned int rows, unsigned int cols)
-{
- struct tty3270_line *screen;
- unsigned long size;
- int lines;
-
- size = sizeof(struct tty3270_line) * (rows - 2);
- screen = kzalloc(size, GFP_KERNEL);
- if (!screen)
- goto out_err;
- for (lines = 0; lines < rows - 2; lines++) {
- size = sizeof(struct tty3270_cell) * cols;
- screen[lines].cells = kzalloc(size, GFP_KERNEL);
- if (!screen[lines].cells)
- goto out_screen;
- }
- return screen;
-out_screen:
- while (lines--)
- kfree(screen[lines].cells);
- kfree(screen);
-out_err:
- return ERR_PTR(-ENOMEM);
-}
-
-/*
- * Free tty3270 screen.
- */
-static void
-tty3270_free_screen(struct tty3270_line *screen, unsigned int rows)
-{
- int lines;
-
- for (lines = 0; lines < rows - 2; lines++)
- kfree(screen[lines].cells);
- kfree(screen);
-}
-
-/*
- * Resize tty3270 screen
- */
-static void tty3270_resize_work(struct work_struct *work)
-{
- struct tty3270 *tp = container_of(work, struct tty3270, resize_work);
- struct tty3270_line *screen, *oscreen;
- struct tty_struct *tty;
- unsigned int orows;
- struct winsize ws;
-
- screen = tty3270_alloc_screen(tp->n_rows, tp->n_cols);
- if (IS_ERR(screen))
- return;
- /* Switch to new output size */
- spin_lock_bh(&tp->view.lock);
- tty3270_blank_screen(tp);
- oscreen = tp->screen;
- orows = tp->view.rows;
- tp->view.model = tp->n_model;
- tp->view.rows = tp->n_rows;
- tp->view.cols = tp->n_cols;
- tp->screen = screen;
- free_string(&tp->freemem, tp->prompt);
- free_string(&tp->freemem, tp->status);
- tty3270_create_prompt(tp);
- tty3270_create_status(tp);
- while (tp->nr_lines < tp->view.rows - 2)
- tty3270_blank_line(tp);
- tp->update_flags = TTY_UPDATE_ALL;
- spin_unlock_bh(&tp->view.lock);
- tty3270_free_screen(oscreen, orows);
- tty3270_set_timer(tp, 1);
- /* Informat tty layer about new size */
- tty = tty_port_tty_get(&tp->port);
- if (!tty)
- return;
- ws.ws_row = tp->view.rows - 2;
- ws.ws_col = tp->view.cols;
- tty_do_resize(tty, &ws);
- tty_kref_put(tty);
-}
-
-static void
-tty3270_resize(struct raw3270_view *view, int model, int rows, int cols)
-{
- struct tty3270 *tp = container_of(view, struct tty3270, view);
-
- if (tp->n_model == model && tp->n_rows == rows && tp->n_cols == cols)
- return;
- tp->n_model = model;
- tp->n_rows = rows;
- tp->n_cols = cols;
- schedule_work(&tp->resize_work);
-}
-
-/*
- * Unlink tty3270 data structure from tty.
- */
-static void
-tty3270_release(struct raw3270_view *view)
-{
- struct tty3270 *tp = container_of(view, struct tty3270, view);
- struct tty_struct *tty = tty_port_tty_get(&tp->port);
-
- if (tty) {
- tty->driver_data = NULL;
- tty_port_tty_set(&tp->port, NULL);
- tty_hangup(tty);
- raw3270_put_view(&tp->view);
- tty_kref_put(tty);
- }
-}
-
-/*
- * Free tty3270 data structure
- */
-static void
-tty3270_free(struct raw3270_view *view)
-{
- struct tty3270 *tp = container_of(view, struct tty3270, view);
-
- del_timer_sync(&tp->timer);
- tty3270_free_screen(tp->screen, tp->view.rows);
- tty3270_free_view(tp);
-}
-
-/*
- * Delayed freeing of tty3270 views.
- */
-static void
-tty3270_del_views(void)
-{
- int i;
-
- for (i = RAW3270_FIRSTMINOR; i <= tty3270_max_index; i++) {
- struct raw3270_view *view = raw3270_find_view(&tty3270_fn, i);
- if (!IS_ERR(view))
- raw3270_del_view(view);
- }
-}
-
-static struct raw3270_fn tty3270_fn = {
- .activate = tty3270_activate,
- .deactivate = tty3270_deactivate,
- .intv = (void *) tty3270_irq,
- .release = tty3270_release,
- .free = tty3270_free,
- .resize = tty3270_resize
-};
-
-/*
- * This routine is called whenever a 3270 tty is opened first time.
- */
-static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
-{
- struct raw3270_view *view;
- struct tty3270 *tp;
- int i, rc;
-
- /* Check if the tty3270 is already there. */
- view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);
- if (!IS_ERR(view)) {
- tp = container_of(view, struct tty3270, view);
- tty->driver_data = tp;
- tty->winsize.ws_row = tp->view.rows - 2;
- tty->winsize.ws_col = tp->view.cols;
- tp->inattr = TF_INPUT;
- goto port_install;
- }
- if (tty3270_max_index < tty->index + 1)
- tty3270_max_index = tty->index + 1;
-
- /* Allocate tty3270 structure on first open. */
- tp = tty3270_alloc_view();
- if (IS_ERR(tp))
- return PTR_ERR(tp);
-
- rc = raw3270_add_view(&tp->view, &tty3270_fn,
- tty->index + RAW3270_FIRSTMINOR,
- RAW3270_VIEW_LOCK_BH);
- if (rc) {
- tty3270_free_view(tp);
- return rc;
- }
-
- tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
- if (IS_ERR(tp->screen)) {
- rc = PTR_ERR(tp->screen);
- raw3270_put_view(&tp->view);
- raw3270_del_view(&tp->view);
- tty3270_free_view(tp);
- return rc;
- }
-
- tty->winsize.ws_row = tp->view.rows - 2;
- tty->winsize.ws_col = tp->view.cols;
-
- tty3270_create_prompt(tp);
- tty3270_create_status(tp);
- tty3270_update_status(tp);
-
- /* Create blank line for every line in the tty output area. */
- for (i = 0; i < tp->view.rows - 2; i++)
- tty3270_blank_line(tp);
-
- tp->kbd->port = &tp->port;
- tp->kbd->fn_handler[KVAL(K_INCRCONSOLE)] = tty3270_exit_tty;
- tp->kbd->fn_handler[KVAL(K_SCROLLBACK)] = tty3270_scroll_backward;
- tp->kbd->fn_handler[KVAL(K_SCROLLFORW)] = tty3270_scroll_forward;
- tp->kbd->fn_handler[KVAL(K_CONS)] = tty3270_rcl_backward;
- kbd_ascebc(tp->kbd, tp->view.ascebc);
-
- raw3270_activate_view(&tp->view);
-
-port_install:
- rc = tty_port_install(&tp->port, driver, tty);
- if (rc) {
- raw3270_put_view(&tp->view);
- return rc;
- }
-
- tty->driver_data = tp;
-
- return 0;
-}
-
-/*
- * This routine is called whenever a 3270 tty is opened.
- */
-static int
-tty3270_open(struct tty_struct *tty, struct file *filp)
-{
- struct tty3270 *tp = tty->driver_data;
- struct tty_port *port = &tp->port;
-
- port->count++;
- tty_port_tty_set(port, tty);
- return 0;
-}
-
-/*
- * This routine is called when the 3270 tty is closed. We wait
- * for the remaining request to be completed. Then we clean up.
- */
-static void
-tty3270_close(struct tty_struct *tty, struct file * filp)
-{
- struct tty3270 *tp = tty->driver_data;
-
- if (tty->count > 1)
- return;
- if (tp)
- tty_port_tty_set(&tp->port, NULL);
-}
-
-static void tty3270_cleanup(struct tty_struct *tty)
-{
- struct tty3270 *tp = tty->driver_data;
-
- if (tp) {
- tty->driver_data = NULL;
- raw3270_put_view(&tp->view);
- }
-}
-
-/*
- * We always have room.
- */
-static unsigned int
-tty3270_write_room(struct tty_struct *tty)
-{
- return INT_MAX;
-}
-
-/*
- * Insert character into the screen at the current position with the
- * current color and highlight. This function does NOT do cursor movement.
- */
-static void tty3270_put_character(struct tty3270 *tp, char ch)
-{
- struct tty3270_line *line;
- struct tty3270_cell *cell;
-
- line = tp->screen + tp->cy;
- if (line->len <= tp->cx) {
- while (line->len < tp->cx) {
- cell = line->cells + line->len;
- cell->character = tp->view.ascebc[' '];
- cell->highlight = tp->highlight;
- cell->f_color = tp->f_color;
- line->len++;
- }
- line->len++;
- }
- cell = line->cells + tp->cx;
- cell->character = tp->view.ascebc[(unsigned int) ch];
- cell->highlight = tp->highlight;
- cell->f_color = tp->f_color;
-}
-
-/*
- * Convert a tty3270_line to a 3270 data fragment usable for output.
- */
-static void
-tty3270_convert_line(struct tty3270 *tp, int line_nr)
-{
- struct tty3270_line *line;
- struct tty3270_cell *cell;
- struct string *s, *n;
- unsigned char highlight;
- unsigned char f_color;
- char *cp;
- int flen, i;
-
- /* Determine how long the fragment will be. */
- flen = 3; /* Prefix (TO_SBA). */
- line = tp->screen + line_nr;
- flen += line->len;
- highlight = TAX_RESET;
- f_color = TAC_RESET;
- for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
- if (cell->highlight != highlight) {
- flen += 3; /* TO_SA to switch highlight. */
- highlight = cell->highlight;
- }
- if (cell->f_color != f_color) {
- flen += 3; /* TO_SA to switch color. */
- f_color = cell->f_color;
- }
- }
- if (highlight != TAX_RESET)
- flen += 3; /* TO_SA to reset hightlight. */
- if (f_color != TAC_RESET)
- flen += 3; /* TO_SA to reset color. */
- if (line->len < tp->view.cols)
- flen += 4; /* Postfix (TO_RA). */
-
- /* Find the line in the list. */
- i = tp->view.rows - 2 - line_nr;
- list_for_each_entry_reverse(s, &tp->lines, list)
- if (--i <= 0)
- break;
- /*
- * Check if the line needs to get reallocated.
- */
- if (s->len != flen) {
- /* Reallocate string. */
- n = tty3270_alloc_string(tp, flen);
- list_add(&n->list, &s->list);
- list_del_init(&s->list);
- if (!list_empty(&s->update))
- list_del_init(&s->update);
- free_string(&tp->freemem, s);
- s = n;
- }
-
- /* Write 3270 data fragment. */
- cp = s->string;
- *cp++ = TO_SBA;
- *cp++ = 0;
- *cp++ = 0;
-
- highlight = TAX_RESET;
- f_color = TAC_RESET;
- for (i = 0, cell = line->cells; i < line->len; i++, cell++) {
- if (cell->highlight != highlight) {
- *cp++ = TO_SA;
- *cp++ = TAT_EXTHI;
- *cp++ = cell->highlight;
- highlight = cell->highlight;
- }
- if (cell->f_color != f_color) {
- *cp++ = TO_SA;
- *cp++ = TAT_COLOR;
- *cp++ = cell->f_color;
- f_color = cell->f_color;
- }
- *cp++ = cell->character;
- }
- if (highlight != TAX_RESET) {
- *cp++ = TO_SA;
- *cp++ = TAT_EXTHI;
- *cp++ = TAX_RESET;
- }
- if (f_color != TAC_RESET) {
- *cp++ = TO_SA;
- *cp++ = TAT_COLOR;
- *cp++ = TAC_RESET;
- }
- if (line->len < tp->view.cols) {
- *cp++ = TO_RA;
- *cp++ = 0;
- *cp++ = 0;
- *cp++ = 0;
- }
-
- if (tp->nr_up + line_nr < tp->view.rows - 2) {
- /* Line is currently visible on screen. */
- tty3270_update_string(tp, s, line_nr);
- /* Add line to update list. */
- if (list_empty(&s->update)) {
- list_add_tail(&s->update, &tp->update);
- tp->update_flags |= TTY_UPDATE_LIST;
- }
- }
-}
-
-/*
- * Do carriage return.
- */
-static void
-tty3270_cr(struct tty3270 *tp)
-{
- tp->cx = 0;
-}
-
-/*
- * Do line feed.
- */
-static void
-tty3270_lf(struct tty3270 *tp)
-{
- struct tty3270_line temp;
- int i;
-
- tty3270_convert_line(tp, tp->cy);
- if (tp->cy < tp->view.rows - 3) {
- tp->cy++;
- return;
- }
- /* Last line just filled up. Add new, blank line. */
- tty3270_blank_line(tp);
- temp = tp->screen[0];
- temp.len = 0;
- for (i = 0; i < tp->view.rows - 3; i++)
- tp->screen[i] = tp->screen[i+1];
- tp->screen[tp->view.rows - 3] = temp;
- tty3270_rebuild_update(tp);
-}
-
-static void
-tty3270_ri(struct tty3270 *tp)
-{
- if (tp->cy > 0) {
- tty3270_convert_line(tp, tp->cy);
- tp->cy--;
- }
-}
-
-/*
- * Insert characters at current position.
- */
-static void
-tty3270_insert_characters(struct tty3270 *tp, int n)
-{
- struct tty3270_line *line;
- int k;
-
- line = tp->screen + tp->cy;
- while (line->len < tp->cx) {
- line->cells[line->len].character = tp->view.ascebc[' '];
- line->cells[line->len].highlight = TAX_RESET;
- line->cells[line->len].f_color = TAC_RESET;
- line->len++;
- }
- if (n > tp->view.cols - tp->cx)
- n = tp->view.cols - tp->cx;
- k = min_t(int, line->len - tp->cx, tp->view.cols - tp->cx - n);
- while (k--)
- line->cells[tp->cx + n + k] = line->cells[tp->cx + k];
- line->len += n;
- if (line->len > tp->view.cols)
- line->len = tp->view.cols;
- while (n-- > 0) {
- line->cells[tp->cx + n].character = tp->view.ascebc[' '];
- line->cells[tp->cx + n].highlight = tp->highlight;
- line->cells[tp->cx + n].f_color = tp->f_color;
- }
-}
-
-/*
- * Delete characters at current position.
- */
-static void
-tty3270_delete_characters(struct tty3270 *tp, int n)
-{
- struct tty3270_line *line;
- int i;
-
- line = tp->screen + tp->cy;
- if (line->len <= tp->cx)
- return;
- if (line->len - tp->cx <= n) {
- line->len = tp->cx;
- return;
- }
- for (i = tp->cx; i + n < line->len; i++)
- line->cells[i] = line->cells[i + n];
- line->len -= n;
-}
-
-/*
- * Erase characters at current position.
- */
-static void
-tty3270_erase_characters(struct tty3270 *tp, int n)
-{
- struct tty3270_line *line;
- struct tty3270_cell *cell;
-
- line = tp->screen + tp->cy;
- while (line->len > tp->cx && n-- > 0) {
- cell = line->cells + tp->cx++;
- cell->character = ' ';
- cell->highlight = TAX_RESET;
- cell->f_color = TAC_RESET;
- }
- tp->cx += n;
- tp->cx = min_t(int, tp->cx, tp->view.cols - 1);
-}
-
-/*
- * Erase line, 3 different cases:
- * Esc [ 0 K Erase from current position to end of line inclusive
- * Esc [ 1 K Erase from beginning of line to current position inclusive
- * Esc [ 2 K Erase entire line (without moving cursor)
- */
-static void
-tty3270_erase_line(struct tty3270 *tp, int mode)
-{
- struct tty3270_line *line;
- struct tty3270_cell *cell;
- int i;
-
- line = tp->screen + tp->cy;
- if (mode == 0)
- line->len = tp->cx;
- else if (mode == 1) {
- for (i = 0; i < tp->cx; i++) {
- cell = line->cells + i;
- cell->character = ' ';
- cell->highlight = TAX_RESET;
- cell->f_color = TAC_RESET;
- }
- if (line->len <= tp->cx)
- line->len = tp->cx + 1;
- } else if (mode == 2)
- line->len = 0;
- tty3270_convert_line(tp, tp->cy);
-}
-
-/*
- * Erase display, 3 different cases:
- * Esc [ 0 J Erase from current position to bottom of screen inclusive
- * Esc [ 1 J Erase from top of screen to current position inclusive
- * Esc [ 2 J Erase entire screen (without moving the cursor)
- */
-static void
-tty3270_erase_display(struct tty3270 *tp, int mode)
-{
- int i;
-
- if (mode == 0) {
- tty3270_erase_line(tp, 0);
- for (i = tp->cy + 1; i < tp->view.rows - 2; i++) {
- tp->screen[i].len = 0;
- tty3270_convert_line(tp, i);
- }
- } else if (mode == 1) {
- for (i = 0; i < tp->cy; i++) {
- tp->screen[i].len = 0;
- tty3270_convert_line(tp, i);
- }
- tty3270_erase_line(tp, 1);
- } else if (mode == 2) {
- for (i = 0; i < tp->view.rows - 2; i++) {
- tp->screen[i].len = 0;
- tty3270_convert_line(tp, i);
- }
- }
- tty3270_rebuild_update(tp);
-}
-
-/*
- * Set attributes found in an escape sequence.
- * Esc [ <attr> ; <attr> ; ... m
- */
-static void
-tty3270_set_attributes(struct tty3270 *tp)
-{
- static unsigned char f_colors[] = {
- TAC_DEFAULT, TAC_RED, TAC_GREEN, TAC_YELLOW, TAC_BLUE,
- TAC_PINK, TAC_TURQ, TAC_WHITE, 0, TAC_DEFAULT
- };
- int i, attr;
-
- for (i = 0; i <= tp->esc_npar; i++) {
- attr = tp->esc_par[i];
- switch (attr) {
- case 0: /* Reset */
- tp->highlight = TAX_RESET;
- tp->f_color = TAC_RESET;
- break;
- /* Highlight. */
- case 4: /* Start underlining. */
- tp->highlight = TAX_UNDER;
- break;
- case 5: /* Start blink. */
- tp->highlight = TAX_BLINK;
- break;
- case 7: /* Start reverse. */
- tp->highlight = TAX_REVER;
- break;
- case 24: /* End underlining */
- if (tp->highlight == TAX_UNDER)
- tp->highlight = TAX_RESET;
- break;
- case 25: /* End blink. */
- if (tp->highlight == TAX_BLINK)
- tp->highlight = TAX_RESET;
- break;
- case 27: /* End reverse. */
- if (tp->highlight == TAX_REVER)
- tp->highlight = TAX_RESET;
- break;
- /* Foreground color. */
- case 30: /* Black */
- case 31: /* Red */
- case 32: /* Green */
- case 33: /* Yellow */
- case 34: /* Blue */
- case 35: /* Magenta */
- case 36: /* Cyan */
- case 37: /* White */
- case 39: /* Black */
- tp->f_color = f_colors[attr - 30];
- break;
- }
- }
-}
-
-static inline int
-tty3270_getpar(struct tty3270 *tp, int ix)
-{
- return (tp->esc_par[ix] > 0) ? tp->esc_par[ix] : 1;
-}
-
-static void
-tty3270_goto_xy(struct tty3270 *tp, int cx, int cy)
-{
- int max_cx = max(0, cx);
- int max_cy = max(0, cy);
-
- tp->cx = min_t(int, tp->view.cols - 1, max_cx);
- cy = min_t(int, tp->view.rows - 3, max_cy);
- if (cy != tp->cy) {
- tty3270_convert_line(tp, tp->cy);
- tp->cy = cy;
- }
-}
-
-/*
- * Process escape sequences. Known sequences:
- * Esc 7 Save Cursor Position
- * Esc 8 Restore Cursor Position
- * Esc [ Pn ; Pn ; .. m Set attributes
- * Esc [ Pn ; Pn H Cursor Position
- * Esc [ Pn ; Pn f Cursor Position
- * Esc [ Pn A Cursor Up
- * Esc [ Pn B Cursor Down
- * Esc [ Pn C Cursor Forward
- * Esc [ Pn D Cursor Backward
- * Esc [ Pn G Cursor Horizontal Absolute
- * Esc [ Pn X Erase Characters
- * Esc [ Ps J Erase in Display
- * Esc [ Ps K Erase in Line
- * // FIXME: add all the new ones.
- *
- * Pn is a numeric parameter, a string of zero or more decimal digits.
- * Ps is a selective parameter.
- */
-static void
-tty3270_escape_sequence(struct tty3270 *tp, char ch)
-{
- enum { ESnormal, ESesc, ESsquare, ESgetpars };
-
- if (tp->esc_state == ESnormal) {
- if (ch == 0x1b)
- /* Starting new escape sequence. */
- tp->esc_state = ESesc;
- return;
- }
- if (tp->esc_state == ESesc) {
- tp->esc_state = ESnormal;
- switch (ch) {
- case '[':
- tp->esc_state = ESsquare;
- break;
- case 'E':
- tty3270_cr(tp);
- tty3270_lf(tp);
- break;
- case 'M':
- tty3270_ri(tp);
- break;
- case 'D':
- tty3270_lf(tp);
- break;
- case 'Z': /* Respond ID. */
- kbd_puts_queue(&tp->port, "\033[?6c");
- break;
- case '7': /* Save cursor position. */
- tp->saved_cx = tp->cx;
- tp->saved_cy = tp->cy;
- tp->saved_highlight = tp->highlight;
- tp->saved_f_color = tp->f_color;
- break;
- case '8': /* Restore cursor position. */
- tty3270_convert_line(tp, tp->cy);
- tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
- tp->highlight = tp->saved_highlight;
- tp->f_color = tp->saved_f_color;
- break;
- case 'c': /* Reset terminal. */
- tp->cx = tp->saved_cx = 0;
- tp->cy = tp->saved_cy = 0;
- tp->highlight = tp->saved_highlight = TAX_RESET;
- tp->f_color = tp->saved_f_color = TAC_RESET;
- tty3270_erase_display(tp, 2);
- break;
- }
- return;
- }
- if (tp->esc_state == ESsquare) {
- tp->esc_state = ESgetpars;
- memset(tp->esc_par, 0, sizeof(tp->esc_par));
- tp->esc_npar = 0;
- tp->esc_ques = (ch == '?');
- if (tp->esc_ques)
- return;
- }
- if (tp->esc_state == ESgetpars) {
- if (ch == ';' && tp->esc_npar < ESCAPE_NPAR - 1) {
- tp->esc_npar++;
- return;
- }
- if (ch >= '0' && ch <= '9') {
- tp->esc_par[tp->esc_npar] *= 10;
- tp->esc_par[tp->esc_npar] += ch - '0';
- return;
- }
- }
- tp->esc_state = ESnormal;
- if (ch == 'n' && !tp->esc_ques) {
- if (tp->esc_par[0] == 5) /* Status report. */
- kbd_puts_queue(&tp->port, "\033[0n");
- else if (tp->esc_par[0] == 6) { /* Cursor report. */
- char buf[40];
- sprintf(buf, "\033[%d;%dR", tp->cy + 1, tp->cx + 1);
- kbd_puts_queue(&tp->port, buf);
- }
- return;
- }
- if (tp->esc_ques)
- return;
- switch (ch) {
- case 'm':
- tty3270_set_attributes(tp);
- break;
- case 'H': /* Set cursor position. */
- case 'f':
- tty3270_goto_xy(tp, tty3270_getpar(tp, 1) - 1,
- tty3270_getpar(tp, 0) - 1);
- break;
- case 'd': /* Set y position. */
- tty3270_goto_xy(tp, tp->cx, tty3270_getpar(tp, 0) - 1);
- break;
- case 'A': /* Cursor up. */
- case 'F':
- tty3270_goto_xy(tp, tp->cx, tp->cy - tty3270_getpar(tp, 0));
- break;
- case 'B': /* Cursor down. */
- case 'e':
- case 'E':
- tty3270_goto_xy(tp, tp->cx, tp->cy + tty3270_getpar(tp, 0));
- break;
- case 'C': /* Cursor forward. */
- case 'a':
- tty3270_goto_xy(tp, tp->cx + tty3270_getpar(tp, 0), tp->cy);
- break;
- case 'D': /* Cursor backward. */
- tty3270_goto_xy(tp, tp->cx - tty3270_getpar(tp, 0), tp->cy);
- break;
- case 'G': /* Set x position. */
- case '`':
- tty3270_goto_xy(tp, tty3270_getpar(tp, 0), tp->cy);
- break;
- case 'X': /* Erase Characters. */
- tty3270_erase_characters(tp, tty3270_getpar(tp, 0));
- break;
- case 'J': /* Erase display. */
- tty3270_erase_display(tp, tp->esc_par[0]);
- break;
- case 'K': /* Erase line. */
- tty3270_erase_line(tp, tp->esc_par[0]);
- break;
- case 'P': /* Delete characters. */
- tty3270_delete_characters(tp, tty3270_getpar(tp, 0));
- break;
- case '@': /* Insert characters. */
- tty3270_insert_characters(tp, tty3270_getpar(tp, 0));
- break;
- case 's': /* Save cursor position. */
- tp->saved_cx = tp->cx;
- tp->saved_cy = tp->cy;
- tp->saved_highlight = tp->highlight;
- tp->saved_f_color = tp->f_color;
- break;
- case 'u': /* Restore cursor position. */
- tty3270_convert_line(tp, tp->cy);
- tty3270_goto_xy(tp, tp->saved_cx, tp->saved_cy);
- tp->highlight = tp->saved_highlight;
- tp->f_color = tp->saved_f_color;
- break;
- }
-}
-
-/*
- * String write routine for 3270 ttys
- */
-static void
-tty3270_do_write(struct tty3270 *tp, struct tty_struct *tty,
- const unsigned char *buf, int count)
-{
- int i_msg, i;
-
- spin_lock_bh(&tp->view.lock);
- for (i_msg = 0; !tty->flow.stopped && i_msg < count; i_msg++) {
- if (tp->esc_state != 0) {
- /* Continue escape sequence. */
- tty3270_escape_sequence(tp, buf[i_msg]);
- continue;
- }
-
- switch (buf[i_msg]) {
- case 0x07: /* '\a' -- Alarm */
- tp->wcc |= TW_PLUSALARM;
- break;
- case 0x08: /* Backspace. */
- if (tp->cx > 0) {
- tp->cx--;
- tty3270_put_character(tp, ' ');
- }
- break;
- case 0x09: /* '\t' -- Tabulate */
- for (i = tp->cx % 8; i < 8; i++) {
- if (tp->cx >= tp->view.cols) {
- tty3270_cr(tp);
- tty3270_lf(tp);
- break;
- }
- tty3270_put_character(tp, ' ');
- tp->cx++;
- }
- break;
- case 0x0a: /* '\n' -- New Line */
- tty3270_cr(tp);
- tty3270_lf(tp);
- break;
- case 0x0c: /* '\f' -- Form Feed */
- tty3270_erase_display(tp, 2);
- tp->cx = tp->cy = 0;
- break;
- case 0x0d: /* '\r' -- Carriage Return */
- tp->cx = 0;
- break;
- case 0x0f: /* SuSE "exit alternate mode" */
- break;
- case 0x1b: /* Start escape sequence. */
- tty3270_escape_sequence(tp, buf[i_msg]);
- break;
- default: /* Insert normal character. */
- if (tp->cx >= tp->view.cols) {
- tty3270_cr(tp);
- tty3270_lf(tp);
- }
- tty3270_put_character(tp, buf[i_msg]);
- tp->cx++;
- break;
- }
- }
- /* Convert current line to 3270 data fragment. */
- tty3270_convert_line(tp, tp->cy);
-
- /* Setup timer to update display after 1/10 second */
- if (!timer_pending(&tp->timer))
- tty3270_set_timer(tp, HZ/10);
-
- spin_unlock_bh(&tp->view.lock);
-}
-
-/*
- * String write routine for 3270 ttys
- */
-static int
-tty3270_write(struct tty_struct * tty,
- const unsigned char *buf, int count)
-{
- struct tty3270 *tp;
-
- tp = tty->driver_data;
- if (!tp)
- return 0;
- if (tp->char_count > 0) {
- tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
- tp->char_count = 0;
- }
- tty3270_do_write(tp, tty, buf, count);
- return count;
-}
-
-/*
- * Put single characters to the ttys character buffer
- */
-static int tty3270_put_char(struct tty_struct *tty, unsigned char ch)
-{
- struct tty3270 *tp;
-
- tp = tty->driver_data;
- if (!tp || tp->char_count >= TTY3270_CHAR_BUF_SIZE)
- return 0;
- tp->char_buf[tp->char_count++] = ch;
- return 1;
-}
-
-/*
- * Flush all characters from the ttys characeter buffer put there
- * by tty3270_put_char.
- */
-static void
-tty3270_flush_chars(struct tty_struct *tty)
-{
- struct tty3270 *tp;
-
- tp = tty->driver_data;
- if (!tp)
- return;
- if (tp->char_count > 0) {
- tty3270_do_write(tp, tty, tp->char_buf, tp->char_count);
- tp->char_count = 0;
- }
-}
-
-/*
- * Check for visible/invisible input switches
- */
-static void
-tty3270_set_termios(struct tty_struct *tty, const struct ktermios *old)
-{
- struct tty3270 *tp;
- int new;
-
- tp = tty->driver_data;
- if (!tp)
- return;
- spin_lock_bh(&tp->view.lock);
- if (L_ICANON(tty)) {
- new = L_ECHO(tty) ? TF_INPUT: TF_INPUTN;
- if (new != tp->inattr) {
- tp->inattr = new;
- tty3270_update_prompt(tp, NULL, 0);
- tty3270_set_timer(tp, 1);
- }
- }
- spin_unlock_bh(&tp->view.lock);
-}
-
-/*
- * Disable reading from a 3270 tty
- */
-static void
-tty3270_throttle(struct tty_struct * tty)
-{
- struct tty3270 *tp;
-
- tp = tty->driver_data;
- if (!tp)
- return;
- tp->throttle = 1;
-}
-
-/*
- * Enable reading from a 3270 tty
- */
-static void
-tty3270_unthrottle(struct tty_struct * tty)
-{
- struct tty3270 *tp;
-
- tp = tty->driver_data;
- if (!tp)
- return;
- tp->throttle = 0;
- if (tp->attn)
- tty3270_issue_read(tp, 1);
-}
-
-/*
- * Hang up the tty device.
- */
-static void
-tty3270_hangup(struct tty_struct *tty)
-{
- struct tty3270 *tp;
-
- tp = tty->driver_data;
- if (!tp)
- return;
- spin_lock_bh(&tp->view.lock);
- tp->cx = tp->saved_cx = 0;
- tp->cy = tp->saved_cy = 0;
- tp->highlight = tp->saved_highlight = TAX_RESET;
- tp->f_color = tp->saved_f_color = TAC_RESET;
- tty3270_blank_screen(tp);
- while (tp->nr_lines < tp->view.rows - 2)
- tty3270_blank_line(tp);
- tp->update_flags = TTY_UPDATE_ALL;
- spin_unlock_bh(&tp->view.lock);
- tty3270_set_timer(tp, 1);
-}
-
-static void
-tty3270_wait_until_sent(struct tty_struct *tty, int timeout)
-{
-}
-
-static int tty3270_ioctl(struct tty_struct *tty, unsigned int cmd,
- unsigned long arg)
-{
- struct tty3270 *tp;
-
- tp = tty->driver_data;
- if (!tp)
- return -ENODEV;
- if (tty_io_error(tty))
- return -EIO;
- return kbd_ioctl(tp->kbd, cmd, arg);
-}
-
-#ifdef CONFIG_COMPAT
-static long tty3270_compat_ioctl(struct tty_struct *tty,
- unsigned int cmd, unsigned long arg)
-{
- struct tty3270 *tp;
-
- tp = tty->driver_data;
- if (!tp)
- return -ENODEV;
- if (tty_io_error(tty))
- return -EIO;
- return kbd_ioctl(tp->kbd, cmd, (unsigned long)compat_ptr(arg));
-}
-#endif
-
-static const struct tty_operations tty3270_ops = {
- .install = tty3270_install,
- .cleanup = tty3270_cleanup,
- .open = tty3270_open,
- .close = tty3270_close,
- .write = tty3270_write,
- .put_char = tty3270_put_char,
- .flush_chars = tty3270_flush_chars,
- .write_room = tty3270_write_room,
- .throttle = tty3270_throttle,
- .unthrottle = tty3270_unthrottle,
- .hangup = tty3270_hangup,
- .wait_until_sent = tty3270_wait_until_sent,
- .ioctl = tty3270_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = tty3270_compat_ioctl,
-#endif
- .set_termios = tty3270_set_termios
-};
-
-static void tty3270_create_cb(int minor)
-{
- tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
-}
-
-static void tty3270_destroy_cb(int minor)
-{
- tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
-}
-
-static struct raw3270_notifier tty3270_notifier =
-{
- .create = tty3270_create_cb,
- .destroy = tty3270_destroy_cb,
-};
-
-/*
- * 3270 tty registration code called from tty_init().
- * Most kernel services (incl. kmalloc) are available at this poimt.
- */
-static int __init tty3270_init(void)
-{
- struct tty_driver *driver;
- int ret;
-
- driver = tty_alloc_driver(RAW3270_MAXDEVS,
- TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_RESET_TERMIOS);
- if (IS_ERR(driver))
- return PTR_ERR(driver);
-
- /*
- * Initialize the tty_driver structure
- * Entries in tty3270_driver that are NOT initialized:
- * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc
- */
- driver->driver_name = "tty3270";
- driver->name = "3270/tty";
- driver->major = IBM_TTY3270_MAJOR;
- driver->minor_start = RAW3270_FIRSTMINOR;
- driver->name_base = RAW3270_FIRSTMINOR;
- driver->type = TTY_DRIVER_TYPE_SYSTEM;
- driver->subtype = SYSTEM_TYPE_TTY;
- driver->init_termios = tty_std_termios;
- tty_set_operations(driver, &tty3270_ops);
- ret = tty_register_driver(driver);
- if (ret) {
- tty_driver_kref_put(driver);
- return ret;
- }
- tty3270_driver = driver;
- raw3270_register_notifier(&tty3270_notifier);
- return 0;
-}
-
-static void __exit
-tty3270_exit(void)
-{
- struct tty_driver *driver;
-
- raw3270_unregister_notifier(&tty3270_notifier);
- driver = tty3270_driver;
- tty3270_driver = NULL;
- tty_unregister_driver(driver);
- tty_driver_kref_put(driver);
- tty3270_del_views();
-}
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS_CHARDEV_MAJOR(IBM_TTY3270_MAJOR);
-
-module_init(tty3270_init);
-module_exit(tty3270_exit);
diff --git a/drivers/s390/char/tty3270.h b/drivers/s390/char/tty3270.h
deleted file mode 100644
index 52ceed6f8408..000000000000
--- a/drivers/s390/char/tty3270.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright IBM Corp. 2007
- *
- */
-
-#ifndef __DRIVERS_S390_CHAR_TTY3270_H
-#define __DRIVERS_S390_CHAR_TTY3270_H
-
-#include <linux/tty.h>
-#include <linux/tty_driver.h>
-
-extern struct tty_driver *tty3270_driver;
-
-#endif /* __DRIVERS_S390_CHAR_TTY3270_H */
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index c7db95398500..3ef636935a54 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -740,12 +740,21 @@ void css_schedule_eval_all(void)
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
}
-static int __unset_registered(struct device *dev, void *data)
+static int __unset_validpath(struct device *dev, void *data)
{
struct idset *set = data;
struct subchannel *sch = to_subchannel(dev);
+ struct pmcw *pmcw = &sch->schib.pmcw;
+
+ /* Here we want to make sure that we are considering only those subchannels
+ * which do not have an operational device attached to it. This can be found
+ * with the help of PAM and POM values of pmcw. OPM provides the information
+ * about any path which is currently vary-off, so that we should not consider.
+ */
+ if (sch->st == SUBCHANNEL_TYPE_IO &&
+ (sch->opm & pmcw->pam & pmcw->pom))
+ idset_sch_del(set, sch->schid);
- idset_sch_del(set, sch->schid);
return 0;
}
@@ -774,8 +783,8 @@ void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
}
idset_fill(set);
switch (cond) {
- case CSS_EVAL_UNREG:
- bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
+ case CSS_EVAL_NO_PATH:
+ bus_for_each_dev(&css_bus_type, NULL, set, __unset_validpath);
break;
case CSS_EVAL_NOT_ONLINE:
bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
@@ -798,11 +807,11 @@ void css_wait_for_slow_path(void)
flush_workqueue(cio_work_q);
}
-/* Schedule reprobing of all unregistered subchannels. */
+/* Schedule reprobing of all subchannels with no valid operational path. */
void css_schedule_reprobe(void)
{
/* Schedule with a delay to allow merging of subsequent calls. */
- css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
+ css_schedule_eval_cond(CSS_EVAL_NO_PATH, 1 * HZ);
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
@@ -1402,9 +1411,9 @@ static void css_shutdown(struct device *dev)
sch->driver->shutdown(sch);
}
-static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct subchannel *sch = to_subchannel(dev);
+ const struct subchannel *sch = to_subchannel(dev);
int ret;
ret = add_uevent_var(env, "ST=%01X", sch->st);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index ede0b905bc6f..ea5550554297 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -38,7 +38,7 @@
* Conditions used to specify which subchannels need evaluation
*/
enum css_eval_cond {
- CSS_EVAL_UNREG, /* unregistered subchannels */
+ CSS_EVAL_NO_PATH, /* Subchannels with no operational paths */
CSS_EVAL_NOT_ONLINE /* sch without an online-device */
};
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 9e0cf44ff9d4..8eb089b99cde 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -80,7 +80,7 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
* specified size. Return length of resulting string (excluding trailing '\0')
* even if string doesn't fit buffer (snprintf semantics). */
static int snprint_alias(char *buf, size_t size,
- struct ccw_device_id *id, const char *suffix)
+ const struct ccw_device_id *id, const char *suffix)
{
int len;
@@ -101,10 +101,10 @@ static int snprint_alias(char *buf, size_t size,
/* Set up environment variables for ccw device uevent. Return 0 on success,
* non-zero otherwise. */
-static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ccw_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct ccw_device *cdev = to_ccwdev(dev);
- struct ccw_device_id *id = &(cdev->id);
+ const struct ccw_device *cdev = to_ccwdev(dev);
+ const struct ccw_device_id *id = &(cdev->id);
int ret;
char modalias_buf[30];
@@ -244,10 +244,13 @@ int ccw_device_is_orphan(struct ccw_device *cdev)
static void ccw_device_unregister(struct ccw_device *cdev)
{
+ mutex_lock(&cdev->reg_mutex);
if (device_is_registered(&cdev->dev)) {
/* Undo device_add(). */
device_del(&cdev->dev);
}
+ mutex_unlock(&cdev->reg_mutex);
+
if (cdev->private->flags.initialized) {
cdev->private->flags.initialized = 0;
/* Release reference from device_initialize(). */
@@ -653,11 +656,13 @@ static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
{
int ret;
+ mutex_lock(&cdev->reg_mutex);
if (device_is_registered(&cdev->dev)) {
device_release_driver(&cdev->dev);
ret = device_attach(&cdev->dev);
WARN_ON(ret == -ENODEV);
}
+ mutex_unlock(&cdev->reg_mutex);
}
static void
@@ -740,6 +745,7 @@ static int io_subchannel_initialize_dev(struct subchannel *sch,
INIT_LIST_HEAD(&priv->cmb_list);
init_waitqueue_head(&priv->wait_q);
timer_setup(&priv->timer, ccw_device_timeout, 0);
+ mutex_init(&cdev->reg_mutex);
atomic_set(&priv->onoff, 0);
cdev->ccwlock = sch->lock;
@@ -825,6 +831,7 @@ static void io_subchannel_register(struct ccw_device *cdev)
* be registered). We need to reprobe since we may now have sense id
* information.
*/
+ mutex_lock(&cdev->reg_mutex);
if (device_is_registered(&cdev->dev)) {
if (!cdev->drv) {
ret = device_reprobe(&cdev->dev);
@@ -847,12 +854,14 @@ static void io_subchannel_register(struct ccw_device *cdev)
spin_lock_irqsave(sch->lock, flags);
sch_set_cdev(sch, NULL);
spin_unlock_irqrestore(sch->lock, flags);
+ mutex_unlock(&cdev->reg_mutex);
/* Release initial device reference. */
put_device(&cdev->dev);
goto out_err;
}
out:
cdev->private->flags.recog_done = 1;
+ mutex_unlock(&cdev->reg_mutex);
wake_up(&cdev->private->wait_q);
out_err:
if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index b6b4589c70bd..6b21ba68c1fe 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -37,7 +37,7 @@ static void scmdev_remove(struct device *dev)
scmdrv->remove(scmdev);
}
-static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int scmdev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
return add_uevent_var(env, "MODALIAS=scm:scmdev");
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index c0a09fa8991a..1c31e81ca8de 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -42,8 +42,7 @@ struct ccwchain {
/*
* page_array_alloc() - alloc memory for page array
* @pa: page_array on which to perform the operation
- * @iova: target guest physical address
- * @len: number of bytes that should be pinned from @iova
+ * @len: number of pages that should be pinned from @iova
*
* Attempt to allocate memory for page array.
*
@@ -56,31 +55,24 @@ struct ccwchain {
* -EINVAL if pa->pa_nr is not initially zero, or pa->pa_iova is not NULL
* -ENOMEM if alloc failed
*/
-static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
+static int page_array_alloc(struct page_array *pa, unsigned int len)
{
- int i;
-
if (pa->pa_nr || pa->pa_iova)
return -EINVAL;
- pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- if (!pa->pa_nr)
+ if (len == 0)
return -EINVAL;
- pa->pa_iova = kcalloc(pa->pa_nr,
- sizeof(*pa->pa_iova) + sizeof(*pa->pa_page),
- GFP_KERNEL);
- if (unlikely(!pa->pa_iova)) {
- pa->pa_nr = 0;
+ pa->pa_nr = len;
+
+ pa->pa_iova = kcalloc(len, sizeof(*pa->pa_iova), GFP_KERNEL);
+ if (!pa->pa_iova)
return -ENOMEM;
- }
- pa->pa_page = (struct page **)&pa->pa_iova[pa->pa_nr];
- pa->pa_iova[0] = iova;
- pa->pa_page[0] = NULL;
- for (i = 1; i < pa->pa_nr; i++) {
- pa->pa_iova[i] = pa->pa_iova[i - 1] + PAGE_SIZE;
- pa->pa_page[i] = NULL;
+ pa->pa_page = kcalloc(len, sizeof(*pa->pa_page), GFP_KERNEL);
+ if (!pa->pa_page) {
+ kfree(pa->pa_iova);
+ return -ENOMEM;
}
return 0;
@@ -91,12 +83,13 @@ static int page_array_alloc(struct page_array *pa, u64 iova, unsigned int len)
* @pa: page_array on which to perform the operation
* @vdev: the vfio device to perform the operation
* @pa_nr: number of user pages to unpin
+ * @unaligned: were pages unaligned on the pin request
*
* Only unpin if any pages were pinned to begin with, i.e. pa_nr > 0,
* otherwise only clear pa->pa_nr
*/
static void page_array_unpin(struct page_array *pa,
- struct vfio_device *vdev, int pa_nr)
+ struct vfio_device *vdev, int pa_nr, bool unaligned)
{
int unpinned = 0, npage = 1;
@@ -105,7 +98,8 @@ static void page_array_unpin(struct page_array *pa,
dma_addr_t *last = &first[npage];
if (unpinned + npage < pa_nr &&
- *first + npage * PAGE_SIZE == *last) {
+ *first + npage * PAGE_SIZE == *last &&
+ !unaligned) {
npage++;
continue;
}
@@ -121,13 +115,20 @@ static void page_array_unpin(struct page_array *pa,
/*
* page_array_pin() - Pin user pages in memory
* @pa: page_array on which to perform the operation
- * @mdev: the mediated device to perform pin operations
+ * @vdev: the vfio device to perform pin operations
+ * @unaligned: are pages aligned to 4K boundary?
*
* Returns number of pages pinned upon success.
* If the pin request partially succeeds, or fails completely,
* all pages are left unpinned and a negative error value is returned.
+ *
+ * Requests to pin "aligned" pages can be coalesced into a single
+ * vfio_pin_pages request for the sake of efficiency, based on the
+ * expectation of 4K page requests. Unaligned requests are probably
+ * dealing with 2K "pages", and cannot be coalesced without
+ * reworking this logic to incorporate that math.
*/
-static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
+static int page_array_pin(struct page_array *pa, struct vfio_device *vdev, bool unaligned)
{
int pinned = 0, npage = 1;
int ret = 0;
@@ -137,7 +138,8 @@ static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
dma_addr_t *last = &first[npage];
if (pinned + npage < pa->pa_nr &&
- *first + npage * PAGE_SIZE == *last) {
+ *first + npage * PAGE_SIZE == *last &&
+ !unaligned) {
npage++;
continue;
}
@@ -159,14 +161,15 @@ static int page_array_pin(struct page_array *pa, struct vfio_device *vdev)
return ret;
err_out:
- page_array_unpin(pa, vdev, pinned);
+ page_array_unpin(pa, vdev, pinned, unaligned);
return ret;
}
/* Unpin the pages before releasing the memory. */
-static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev)
+static void page_array_unpin_free(struct page_array *pa, struct vfio_device *vdev, bool unaligned)
{
- page_array_unpin(pa, vdev, pa->pa_nr);
+ page_array_unpin(pa, vdev, pa->pa_nr, unaligned);
+ kfree(pa->pa_page);
kfree(pa->pa_iova);
}
@@ -199,11 +202,12 @@ static inline void page_array_idal_create_words(struct page_array *pa,
* idaw.
*/
- for (i = 0; i < pa->pa_nr; i++)
+ for (i = 0; i < pa->pa_nr; i++) {
idaws[i] = page_to_phys(pa->pa_page[i]);
- /* Adjust the first IDAW, since it may not start on a page boundary */
- idaws[0] += pa->pa_iova[0] & (PAGE_SIZE - 1);
+ /* Incorporate any offset from each starting address */
+ idaws[i] += pa->pa_iova[i] & (PAGE_SIZE - 1);
+ }
}
static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
@@ -228,50 +232,7 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
}
}
-/*
- * Within the domain (@mdev), copy @n bytes from a guest physical
- * address (@iova) to a host physical address (@to).
- */
-static long copy_from_iova(struct vfio_device *vdev, void *to, u64 iova,
- unsigned long n)
-{
- struct page_array pa = {0};
- int i, ret;
- unsigned long l, m;
-
- ret = page_array_alloc(&pa, iova, n);
- if (ret < 0)
- return ret;
-
- ret = page_array_pin(&pa, vdev);
- if (ret < 0) {
- page_array_unpin_free(&pa, vdev);
- return ret;
- }
-
- l = n;
- for (i = 0; i < pa.pa_nr; i++) {
- void *from = kmap_local_page(pa.pa_page[i]);
-
- m = PAGE_SIZE;
- if (i == 0) {
- from += iova & (PAGE_SIZE - 1);
- m -= iova & (PAGE_SIZE - 1);
- }
-
- m = min(l, m);
- memcpy(to + (n - l), from, m);
- kunmap_local(from);
-
- l -= m;
- if (l == 0)
- break;
- }
-
- page_array_unpin_free(&pa, vdev);
-
- return l;
-}
+#define idal_is_2k(_cp) (!(_cp)->orb.cmd.c64 || (_cp)->orb.cmd.i2k)
/*
* Helpers to operate ccwchain.
@@ -356,40 +317,41 @@ static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len)
static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
{
struct ccwchain *chain;
- void *data;
- size_t size;
-
- /* Make ccw address aligned to 8. */
- size = ((sizeof(*chain) + 7L) & -8L) +
- sizeof(*chain->ch_ccw) * len +
- sizeof(*chain->ch_pa) * len;
- chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
if (!chain)
return NULL;
- data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L);
- chain->ch_ccw = (struct ccw1 *)data;
+ chain->ch_ccw = kcalloc(len, sizeof(*chain->ch_ccw), GFP_DMA | GFP_KERNEL);
+ if (!chain->ch_ccw)
+ goto out_err;
- data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
- chain->ch_pa = (struct page_array *)data;
-
- chain->ch_len = len;
+ chain->ch_pa = kcalloc(len, sizeof(*chain->ch_pa), GFP_KERNEL);
+ if (!chain->ch_pa)
+ goto out_err;
list_add_tail(&chain->next, &cp->ccwchain_list);
return chain;
+
+out_err:
+ kfree(chain->ch_ccw);
+ kfree(chain);
+ return NULL;
}
static void ccwchain_free(struct ccwchain *chain)
{
list_del(&chain->next);
+ kfree(chain->ch_pa);
+ kfree(chain->ch_ccw);
kfree(chain);
}
/* Free resource for a ccw that allocated memory for its cda. */
static void ccwchain_cda_free(struct ccwchain *chain, int idx)
{
- struct ccw1 *ccw = chain->ch_ccw + idx;
+ struct ccw1 *ccw = &chain->ch_ccw[idx];
if (ccw_is_tic(ccw))
return;
@@ -419,14 +381,6 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
cnt++;
/*
- * As we don't want to fail direct addressing even if the
- * orb specified one of the unsupported formats, we defer
- * checking for IDAWs in unsupported formats to here.
- */
- if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
- return -EOPNOTSUPP;
-
- /*
* We want to keep counting if the current CCW has the
* command-chaining flag enabled, or if it is a TIC CCW
* that loops back into the current chain. The latter
@@ -471,10 +425,9 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
int len, ret;
/* Copy 2K (the most we support today) of possible CCWs */
- len = copy_from_iova(vdev, cp->guest_cp, cda,
- CCWCHAIN_LEN_MAX * sizeof(struct ccw1));
- if (len)
- return len;
+ ret = vfio_dma_rw(vdev, cda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false);
+ if (ret)
+ return ret;
/* Convert any Format-0 CCWs to Format-1 */
if (!cp->orb.cmd.fmt)
@@ -489,6 +442,8 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
chain = ccwchain_alloc(cp, len);
if (!chain)
return -ENOMEM;
+
+ chain->ch_len = len;
chain->ch_iova = cda;
/* Copy the actual CCWs into the new chain */
@@ -510,7 +465,7 @@ static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
int i, ret;
for (i = 0; i < chain->ch_len; i++) {
- tic = chain->ch_ccw + i;
+ tic = &chain->ch_ccw[i];
if (!ccw_is_tic(tic))
continue;
@@ -528,11 +483,9 @@ static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
return 0;
}
-static int ccwchain_fetch_tic(struct ccwchain *chain,
- int idx,
+static int ccwchain_fetch_tic(struct ccw1 *ccw,
struct channel_program *cp)
{
- struct ccw1 *ccw = chain->ch_ccw + idx;
struct ccwchain *iter;
u32 ccw_head;
@@ -548,43 +501,124 @@ static int ccwchain_fetch_tic(struct ccwchain *chain,
return -EFAULT;
}
-static int ccwchain_fetch_direct(struct ccwchain *chain,
- int idx,
- struct channel_program *cp)
+static unsigned long *get_guest_idal(struct ccw1 *ccw,
+ struct channel_program *cp,
+ int idaw_nr)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
- struct ccw1 *ccw;
- struct page_array *pa;
- u64 iova;
unsigned long *idaws;
+ unsigned int *idaws_f1;
+ int idal_len = idaw_nr * sizeof(*idaws);
+ int idaw_size = idal_is_2k(cp) ? PAGE_SIZE / 2 : PAGE_SIZE;
+ int idaw_mask = ~(idaw_size - 1);
+ int i, ret;
+
+ idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
+ if (!idaws)
+ return ERR_PTR(-ENOMEM);
+
+ if (ccw_is_idal(ccw)) {
+ /* Copy IDAL from guest */
+ ret = vfio_dma_rw(vdev, ccw->cda, idaws, idal_len, false);
+ if (ret) {
+ kfree(idaws);
+ return ERR_PTR(ret);
+ }
+ } else {
+ /* Fabricate an IDAL based off CCW data address */
+ if (cp->orb.cmd.c64) {
+ idaws[0] = ccw->cda;
+ for (i = 1; i < idaw_nr; i++)
+ idaws[i] = (idaws[i - 1] + idaw_size) & idaw_mask;
+ } else {
+ idaws_f1 = (unsigned int *)idaws;
+ idaws_f1[0] = ccw->cda;
+ for (i = 1; i < idaw_nr; i++)
+ idaws_f1[i] = (idaws_f1[i - 1] + idaw_size) & idaw_mask;
+ }
+ }
+
+ return idaws;
+}
+
+/*
+ * ccw_count_idaws() - Calculate the number of IDAWs needed to transfer
+ * a specified amount of data
+ *
+ * @ccw: The Channel Command Word being translated
+ * @cp: Channel Program being processed
+ *
+ * The ORB is examined, since it specifies what IDAWs could actually be
+ * used by any CCW in the channel program, regardless of whether or not
+ * the CCW actually does. An ORB that does not specify Format-2-IDAW
+ * Control could still contain a CCW with an IDAL, which would be
+ * Format-1 and thus only move 2K with each IDAW. Thus all CCWs within
+ * the channel program must follow the same size requirements.
+ */
+static int ccw_count_idaws(struct ccw1 *ccw,
+ struct channel_program *cp)
+{
+ struct vfio_device *vdev =
+ &container_of(cp, struct vfio_ccw_private, cp)->vdev;
+ u64 iova;
+ int size = cp->orb.cmd.c64 ? sizeof(u64) : sizeof(u32);
int ret;
int bytes = 1;
- int idaw_nr, idal_len;
- int i;
-
- ccw = chain->ch_ccw + idx;
if (ccw->count)
bytes = ccw->count;
- /* Calculate size of IDAL */
if (ccw_is_idal(ccw)) {
- /* Read first IDAW to see if it's 4K-aligned or not. */
- /* All subsequent IDAws will be 4K-aligned. */
- ret = copy_from_iova(vdev, &iova, ccw->cda, sizeof(iova));
+ /* Read first IDAW to check its starting address. */
+ /* All subsequent IDAWs will be 2K- or 4K-aligned. */
+ ret = vfio_dma_rw(vdev, ccw->cda, &iova, size, false);
if (ret)
return ret;
+
+ /*
+ * Format-1 IDAWs only occupy the first 32 bits,
+ * and bit 0 is always off.
+ */
+ if (!cp->orb.cmd.c64)
+ iova = iova >> 32;
} else {
iova = ccw->cda;
}
- idaw_nr = idal_nr_words((void *)iova, bytes);
- idal_len = idaw_nr * sizeof(*idaws);
+
+ /* Format-1 IDAWs operate on 2K each */
+ if (!cp->orb.cmd.c64)
+ return idal_2k_nr_words((void *)iova, bytes);
+
+ /* Using the 2K variant of Format-2 IDAWs? */
+ if (cp->orb.cmd.i2k)
+ return idal_2k_nr_words((void *)iova, bytes);
+
+ /* The 'usual' case is 4K Format-2 IDAWs */
+ return idal_nr_words((void *)iova, bytes);
+}
+
+static int ccwchain_fetch_ccw(struct ccw1 *ccw,
+ struct page_array *pa,
+ struct channel_program *cp)
+{
+ struct vfio_device *vdev =
+ &container_of(cp, struct vfio_ccw_private, cp)->vdev;
+ unsigned long *idaws;
+ unsigned int *idaws_f1;
+ int ret;
+ int idaw_nr;
+ int i;
+
+ /* Calculate size of IDAL */
+ idaw_nr = ccw_count_idaws(ccw, cp);
+ if (idaw_nr < 0)
+ return idaw_nr;
/* Allocate an IDAL from host storage */
- idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
- if (!idaws) {
- ret = -ENOMEM;
+ idaws = get_guest_idal(ccw, cp, idaw_nr);
+ if (IS_ERR(idaws)) {
+ ret = PTR_ERR(idaws);
goto out_init;
}
@@ -594,33 +628,24 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
* required for the data transfer, since we only only support
* 4K IDAWs today.
*/
- pa = chain->ch_pa + idx;
- ret = page_array_alloc(pa, iova, bytes);
+ ret = page_array_alloc(pa, idaw_nr);
if (ret < 0)
goto out_free_idaws;
- if (ccw_is_idal(ccw)) {
- /* Copy guest IDAL into host IDAL */
- ret = copy_from_iova(vdev, idaws, ccw->cda, idal_len);
- if (ret)
- goto out_unpin;
-
- /*
- * Copy guest IDAWs into page_array, in case the memory they
- * occupy is not contiguous.
- */
- for (i = 0; i < idaw_nr; i++)
+ /*
+ * Copy guest IDAWs into page_array, in case the memory they
+ * occupy is not contiguous.
+ */
+ idaws_f1 = (unsigned int *)idaws;
+ for (i = 0; i < idaw_nr; i++) {
+ if (cp->orb.cmd.c64)
pa->pa_iova[i] = idaws[i];
- } else {
- /*
- * No action is required here; the iova addresses in page_array
- * were initialized sequentially in page_array_alloc() beginning
- * with the contents of ccw->cda.
- */
+ else
+ pa->pa_iova[i] = idaws_f1[i];
}
if (ccw_does_data_transfer(ccw)) {
- ret = page_array_pin(pa, vdev);
+ ret = page_array_pin(pa, vdev, idal_is_2k(cp));
if (ret < 0)
goto out_unpin;
} else {
@@ -636,7 +661,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
return 0;
out_unpin:
- page_array_unpin_free(pa, vdev);
+ page_array_unpin_free(pa, vdev, idal_is_2k(cp));
out_free_idaws:
kfree(idaws);
out_init:
@@ -650,22 +675,20 @@ out_init:
* and to get rid of the cda 2G limitiaion of ccw1, we'll translate
* direct ccws to idal ccws.
*/
-static int ccwchain_fetch_one(struct ccwchain *chain,
- int idx,
+static int ccwchain_fetch_one(struct ccw1 *ccw,
+ struct page_array *pa,
struct channel_program *cp)
-{
- struct ccw1 *ccw = chain->ch_ccw + idx;
+{
if (ccw_is_tic(ccw))
- return ccwchain_fetch_tic(chain, idx, cp);
+ return ccwchain_fetch_tic(ccw, cp);
- return ccwchain_fetch_direct(chain, idx, cp);
+ return ccwchain_fetch_ccw(ccw, pa, cp);
}
/**
* cp_init() - allocate ccwchains for a channel program.
* @cp: channel_program on which to perform the operation
- * @mdev: the mediated device to perform pin/unpin operations
* @orb: control block for the channel program from the guest
*
* This creates one or more ccwchain(s), and copies the raw data of
@@ -708,15 +731,9 @@ int cp_init(struct channel_program *cp, union orb *orb)
/* Build a ccwchain for the first CCW segment */
ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
- if (!ret) {
+ if (!ret)
cp->initialized = true;
- /* It is safe to force: if it was not set but idals used
- * ccwchain_calc_length would have returned an error.
- */
- cp->orb.cmd.c64 = 1;
- }
-
return ret;
}
@@ -742,7 +759,7 @@ void cp_free(struct channel_program *cp)
cp->initialized = false;
list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++) {
- page_array_unpin_free(chain->ch_pa + i, vdev);
+ page_array_unpin_free(&chain->ch_pa[i], vdev, idal_is_2k(cp));
ccwchain_cda_free(chain, i);
}
ccwchain_free(chain);
@@ -789,6 +806,8 @@ void cp_free(struct channel_program *cp)
int cp_prefetch(struct channel_program *cp)
{
struct ccwchain *chain;
+ struct ccw1 *ccw;
+ struct page_array *pa;
int len, idx, ret;
/* this is an error in the caller */
@@ -798,7 +817,10 @@ int cp_prefetch(struct channel_program *cp)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
len = chain->ch_len;
for (idx = 0; idx < len; idx++) {
- ret = ccwchain_fetch_one(chain, idx, cp);
+ ccw = &chain->ch_ccw[idx];
+ pa = &chain->ch_pa[idx];
+
+ ret = ccwchain_fetch_one(ccw, pa, cp);
if (ret)
goto out_err;
}
@@ -817,14 +839,13 @@ out_err:
/**
* cp_get_orb() - get the orb of the channel program
* @cp: channel_program on which to perform the operation
- * @intparm: new intparm for the returned orb
- * @lpm: candidate value of the logical-path mask for the returned orb
+ * @sch: subchannel the operation will be performed against
*
* This function returns the address of the updated orb of the channel
* program. Channel I/O device drivers could use this orb to issue a
* ssch.
*/
-union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
+union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch)
{
union orb *orb;
struct ccwchain *chain;
@@ -836,12 +857,20 @@ union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
orb = &cp->orb;
- orb->cmd.intparm = intparm;
+ orb->cmd.intparm = (u32)virt_to_phys(sch);
orb->cmd.fmt = 1;
- orb->cmd.key = PAGE_DEFAULT_KEY >> 4;
+
+ /*
+ * Everything built by vfio-ccw is a Format-2 IDAL.
+ * If the input was a Format-1 IDAL, indicate that
+ * 2K Format-2 IDAWs were created here.
+ */
+ if (!orb->cmd.c64)
+ orb->cmd.i2k = 1;
+ orb->cmd.c64 = 1;
if (orb->cmd.lpm == 0)
- orb->cmd.lpm = lpm;
+ orb->cmd.lpm = sch->lpm;
chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
cpa = chain->ch_ccw;
@@ -919,7 +948,7 @@ bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length)
list_for_each_entry(chain, &cp->ccwchain_list, next) {
for (i = 0; i < chain->ch_len; i++)
- if (page_array_iova_pinned(chain->ch_pa + i, iova, length))
+ if (page_array_iova_pinned(&chain->ch_pa[i], iova, length))
return true;
}
diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h
index 54d26e242533..fc31eb699807 100644
--- a/drivers/s390/cio/vfio_ccw_cp.h
+++ b/drivers/s390/cio/vfio_ccw_cp.h
@@ -27,7 +27,6 @@
* struct channel_program - manage information for channel program
* @ccwchain_list: list head of ccwchains
* @orb: orb for the currently processed ssch request
- * @mdev: the mediated device to perform page pinning/unpinning
* @initialized: whether this instance is actually initialized
*
* @ccwchain_list is the head of a ccwchain list, that contents the
@@ -44,7 +43,7 @@ struct channel_program {
int cp_init(struct channel_program *cp, union orb *orb);
void cp_free(struct channel_program *cp);
int cp_prefetch(struct channel_program *cp);
-union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm);
+union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch);
void cp_update_scsw(struct channel_program *cp, union scsw *scsw);
bool cp_iova_pinned(struct channel_program *cp, u64 iova, u64 length);
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 54aba7cceb33..ff538a086fc7 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -225,7 +225,7 @@ static void vfio_ccw_sch_shutdown(struct subchannel *sch)
struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev);
struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev);
- if (WARN_ON(!private))
+ if (!private)
return;
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE);
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 2784a4e4d2be..757b73141246 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -27,7 +27,7 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
spin_lock_irqsave(sch->lock, flags);
- orb = cp_get_orb(&private->cp, (u32)virt_to_phys(sch), sch->lpm);
+ orb = cp_get_orb(&private->cp, sch);
if (!orb) {
ret = -EIO;
goto out;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index b02c631f3b71..f4cc1720156f 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -613,10 +613,10 @@ static int ap_bus_match(struct device *dev, struct device_driver *drv)
* It sets up a single environment variable DEV_TYPE which contains the
* hardware device type.
*/
-static int ap_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ap_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
int rc = 0;
- struct ap_device *ap_dev = to_ap_dev(dev);
+ const struct ap_device *ap_dev = to_ap_dev(dev);
/* Uevents from ap bus core don't need extensions to the env */
if (dev == ap_root_device)
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 9c01957e56b3..28a36e016ea9 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -30,10 +30,13 @@
#define AP_QUEUE_UNASSIGNED "unassigned"
#define AP_QUEUE_IN_USE "in use"
+#define MAX_RESET_CHECK_WAIT 200 /* Sleep max 200ms for reset check */
+#define AP_RESET_INTERVAL 20 /* Reset sleep interval (20ms) */
+
static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable);
static struct vfio_ap_queue *vfio_ap_find_queue(int apqn);
static const struct vfio_device_ops vfio_ap_matrix_dev_ops;
-static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, unsigned int retry);
+static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q);
/**
* get_update_locks_for_kvm: Acquire the locks required to dynamically update a
@@ -349,6 +352,8 @@ static int vfio_ap_validate_nib(struct kvm_vcpu *vcpu, dma_addr_t *nib)
{
*nib = vcpu->run->s.regs.gprs[2];
+ if (!*nib)
+ return -EINVAL;
if (kvm_is_error_hva(gfn_to_hva(vcpu->kvm, *nib >> PAGE_SHIFT)))
return -EINVAL;
@@ -1598,12 +1603,56 @@ static struct vfio_ap_queue *vfio_ap_find_queue(int apqn)
return q;
}
-static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q,
- unsigned int retry)
+static int apq_status_check(int apqn, struct ap_queue_status *status)
+{
+ switch (status->response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ if (status->queue_empty && !status->irq_enabled)
+ return 0;
+ return -EBUSY;
+ case AP_RESPONSE_DECONFIGURED:
+ /*
+ * If the AP queue is deconfigured, any subsequent AP command
+ * targeting the queue will fail with the same response code. On the
+ * other hand, when an AP adapter is deconfigured, the associated
+ * queues are reset, so let's return a value indicating the reset
+ * for which we're waiting completed successfully.
+ */
+ return 0;
+ default:
+ WARN(true,
+ "failed to verify reset of queue %02x.%04x: TAPQ rc=%u\n",
+ AP_QID_CARD(apqn), AP_QID_QUEUE(apqn),
+ status->response_code);
+ return -EIO;
+ }
+}
+
+static int apq_reset_check(struct vfio_ap_queue *q)
+{
+ int ret;
+ int iters = MAX_RESET_CHECK_WAIT / AP_RESET_INTERVAL;
+ struct ap_queue_status status;
+
+ for (; iters > 0; iters--) {
+ msleep(AP_RESET_INTERVAL);
+ status = ap_tapq(q->apqn, NULL);
+ ret = apq_status_check(q->apqn, &status);
+ if (ret != -EBUSY)
+ return ret;
+ }
+ WARN_ONCE(iters <= 0,
+ "timeout verifying reset of queue %02x.%04x (%u, %u, %u)",
+ AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
+ status.queue_empty, status.irq_enabled, status.response_code);
+ return ret;
+}
+
+static int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
{
struct ap_queue_status status;
int ret;
- int retry2 = 2;
if (!q)
return 0;
@@ -1613,25 +1662,29 @@ retry_zapq:
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
ret = 0;
+ /* if the reset has not completed, wait for it to take effect */
+ if (!status.queue_empty || status.irq_enabled)
+ ret = apq_reset_check(q);
break;
case AP_RESPONSE_RESET_IN_PROGRESS:
- if (retry--) {
- msleep(20);
- goto retry_zapq;
- }
- ret = -EBUSY;
- break;
- case AP_RESPONSE_Q_NOT_AVAIL:
+ /*
+ * There is a reset issued by another process in progress. Let's wait
+ * for that to complete. Since we have no idea whether it was a RAPQ or
+ * ZAPQ, then if it completes successfully, let's issue the ZAPQ.
+ */
+ ret = apq_reset_check(q);
+ if (ret)
+ break;
+ goto retry_zapq;
case AP_RESPONSE_DECONFIGURED:
- case AP_RESPONSE_CHECKSTOPPED:
- WARN_ONCE(status.irq_enabled,
- "PQAP/ZAPQ for %02x.%04x failed with rc=%u while IRQ enabled",
- AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
- status.response_code);
- ret = -EBUSY;
- goto free_resources;
+ /*
+ * When an AP adapter is deconfigured, the associated
+ * queues are reset, so let's return a value indicating the reset
+ * completed successfully.
+ */
+ ret = 0;
+ break;
default:
- /* things are really broken, give up */
WARN(true,
"PQAP/ZAPQ for %02x.%04x failed with invalid rc=%u\n",
AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn),
@@ -1639,17 +1692,6 @@ retry_zapq:
return -EIO;
}
- /* wait for the reset to take effect */
- while (retry2--) {
- if (status.queue_empty && !status.irq_enabled)
- break;
- msleep(20);
- status = ap_tapq(q->apqn, NULL);
- }
- WARN_ONCE(retry2 <= 0, "unable to verify reset of queue %02x.%04x",
- AP_QID_CARD(q->apqn), AP_QID_QUEUE(q->apqn));
-
-free_resources:
vfio_ap_free_aqic_resources(q);
return ret;
@@ -1661,7 +1703,7 @@ static int vfio_ap_mdev_reset_queues(struct ap_queue_table *qtable)
struct vfio_ap_queue *q;
hash_for_each(qtable->queues, loop_cursor, q, mdev_qnode) {
- ret = vfio_ap_mdev_reset_queue(q, 1);
+ ret = vfio_ap_mdev_reset_queue(q);
/*
* Regardless whether a queue turns out to be busy, or
* is not operational, we need to continue resetting
@@ -1857,8 +1899,10 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
return ret;
q = kzalloc(sizeof(*q), GFP_KERNEL);
- if (!q)
- return -ENOMEM;
+ if (!q) {
+ ret = -ENOMEM;
+ goto err_remove_group;
+ }
q->apqn = to_ap_queue(&apdev->device)->qid;
q->saved_isc = VFIO_AP_ISC_INVALID;
@@ -1876,6 +1920,10 @@ int vfio_ap_mdev_probe_queue(struct ap_device *apdev)
release_update_locks_for_mdev(matrix_mdev);
return 0;
+
+err_remove_group:
+ sysfs_remove_group(&apdev->device.kobj, &vfio_queue_attr_group);
+ return ret;
}
void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
@@ -1906,7 +1954,7 @@ void vfio_ap_mdev_remove_queue(struct ap_device *apdev)
}
}
- vfio_ap_mdev_reset_queue(q, 1);
+ vfio_ap_mdev_reset_queue(q);
dev_set_drvdata(&apdev->device, NULL);
kfree(q);
release_update_locks_for_mdev(matrix_mdev);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 4bf36e53fe3e..6fe05bb82c77 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -347,8 +347,7 @@ static ssize_t zcdn_create_store(struct class *class,
int rc;
char name[ZCDN_MAX_NAME];
- strncpy(name, skip_spaces(buf), sizeof(name));
- name[sizeof(name) - 1] = '\0';
+ strscpy(name, skip_spaces(buf), sizeof(name));
rc = zcdn_create(strim(name));
@@ -365,8 +364,7 @@ static ssize_t zcdn_destroy_store(struct class *class,
int rc;
char name[ZCDN_MAX_NAME];
- strncpy(name, skip_spaces(buf), sizeof(name));
- name[sizeof(name) - 1] = '\0';
+ strscpy(name, skip_spaces(buf), sizeof(name));
rc = zcdn_destroy(strim(name));
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index dfb84bb03d32..90ec477386a8 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -370,7 +370,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
CTCM_FUNTAIL, dev->name, len);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
- goto again;
+ goto again;
}
if (len > ch->max_bufsize) {
CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
@@ -378,7 +378,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
- goto again;
+ goto again;
}
/*
@@ -403,7 +403,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
*((__u16 *)skb->data) = len;
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
- goto again;
+ goto again;
}
if (block_len > 2) {
*((__u16 *)skb->data) = block_len - 2;
@@ -1006,7 +1006,7 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
use gptr as mpc indicator */
if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
ctcm_chx_restart(fi, event, arg);
- goto done;
+ goto done;
}
CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
@@ -1024,7 +1024,7 @@ static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
CTCM_FUNTAIL, ch->id);
fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
ctcm_chx_restart(fi, event, arg);
- goto done;
+ goto done;
}
fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
@@ -1251,12 +1251,12 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
spin_unlock(&ch->collect_lock);
fsm_newstate(fi, CTC_STATE_TXIDLE);
- goto done;
+ goto done;
}
if (ctcm_checkalloc_buffer(ch)) {
spin_unlock(&ch->collect_lock);
- goto done;
+ goto done;
}
ch->trans_skb->data = ch->trans_skb_data;
skb_reset_tail_pointer(ch->trans_skb);
@@ -1389,7 +1389,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): TRANS_SKB = NULL",
CTCM_FUNTAIL, dev->name);
- goto again;
+ goto again;
}
if (len < TH_HEADER_LENGTH) {
@@ -1409,7 +1409,7 @@ static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
"%s(%s): skb allocation failed",
CTCM_FUNTAIL, dev->name);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
- goto again;
+ goto again;
}
switch (fsm_getstate(grp->fsm)) {
case MPCG_STATE_RESET:
@@ -1441,9 +1441,9 @@ again:
skb_reset_tail_pointer(ch->trans_skb);
ch->trans_skb->len = 0;
ch->ccw[1].count = ch->max_bufsize;
- if (do_debug_ccw)
+ if (do_debug_ccw)
ctcmpc_dumpit((char *)&ch->ccw[0],
- sizeof(struct ccw1) * 3);
+ sizeof(struct ccw1) * 3);
dolock = !in_hardirq();
if (dolock)
spin_lock_irqsave(
@@ -1562,7 +1562,7 @@ void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
if (rc != 0) {
fsm_newstate(fi, CTC_STATE_RXINIT);
ctcm_ccw_check_rc(ch, rc, "initial RX");
- goto done;
+ goto done;
}
break;
default:
@@ -1677,10 +1677,10 @@ static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
fsm_deltimer(&grp->timer);
- goto done;
+ goto done;
}
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
case MPCG_STATE_XID2INITX:
/* XID2 was received before ATTN Busy for second
channel.Send yside xid for second channel.
@@ -1768,7 +1768,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
/* give the previous IO time to complete */
fsm_addtimer(&wch->sweep_timer,
200, CTC_EVENT_RSWEEP_TIMER, wch);
- goto done;
+ goto done;
}
skb = skb_dequeue(&wch->sweep_queue);
@@ -1780,7 +1780,7 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
ctcm_clear_busy_do(dev);
dev_kfree_skb_any(skb);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
} else {
refcount_inc(&skb->users);
skb_queue_tail(&wch->io_queue, skb);
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index bdfab9ea0046..28db69d91f17 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -494,7 +494,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
ch->collect_len += l;
}
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
- goto done;
+ goto done;
}
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
/*
@@ -685,7 +685,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
ch->collect_len += skb->len;
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
- goto done;
+ goto done;
}
/*
@@ -885,7 +885,7 @@ static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
"%s(%s): NULL sk_buff passed",
CTCM_FUNTAIL, dev->name);
priv->stats.tx_dropped++;
- goto done;
+ goto done;
}
if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) {
CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
@@ -908,7 +908,7 @@ static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
}
newskb->protocol = skb->protocol;
skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH);
@@ -931,7 +931,7 @@ static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_dropped++;
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
- goto done;
+ goto done;
}
if (ctcm_test_and_set_busy(dev)) {
@@ -943,7 +943,7 @@ static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_errors++;
priv->stats.tx_carrier_errors++;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
}
netif_trans_update(dev);
@@ -957,7 +957,7 @@ static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev)
priv->stats.tx_carrier_errors++;
ctcm_clear_busy(dev);
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
}
ctcm_clear_busy(dev);
done:
@@ -1421,7 +1421,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
"%s (%s) already in list, using old entry",
__func__, (*c)->id);
- goto free_return;
+ goto free_return;
}
spin_lock_init(&ch->collect_lock);
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 8ac213a55141..b8a226c6e1a9 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -481,7 +481,7 @@ void ctc_mpc_establish_connectivity(int port_num,
grp->estconnfunc = NULL;
}
fsm_deltimer(&grp->timer);
- goto done;
+ goto done;
}
if ((wch->in_mpcgroup) &&
(fsm_getstate(wch->fsm) == CH_XID0_PENDING))
@@ -495,7 +495,7 @@ void ctc_mpc_establish_connectivity(int port_num,
grp->estconnfunc = NULL;
}
fsm_deltimer(&grp->timer);
- goto done;
+ goto done;
}
break;
case MPCG_STATE_XID0IOWAIT:
@@ -896,8 +896,9 @@ void mpc_group_ready(unsigned long adev)
grp->estconnfunc(grp->port_num, 0,
grp->group_max_buflen);
grp->estconnfunc = NULL;
- } else if (grp->allochanfunc)
+ } else if (grp->allochanfunc) {
grp->allochanfunc(grp->port_num, grp->group_max_buflen);
+ }
grp->send_qllc_disc = 1;
grp->changed_side = 0;
@@ -1109,7 +1110,7 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
priv->stats.rx_dropped++;
priv->stats.rx_length_errors++;
- goto done;
+ goto done;
}
skb_reset_mac_header(pskb);
new_len = curr_pdu->pdu_offset;
@@ -1132,7 +1133,7 @@ static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
CTCM_FUNTAIL, dev->name);
priv->stats.rx_dropped++;
fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
- goto done;
+ goto done;
}
skb_put_data(skb, pskb->data, new_len);
@@ -1543,7 +1544,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): xid = NULL",
CTCM_FUNTAIL, ch->id);
- goto done;
+ goto done;
}
CTCM_D3_DUMP((char *)xid, XID2_LENGTH);
@@ -1556,7 +1557,7 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo)
CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
"%s(%s): r/w channel pairing mismatch",
CTCM_FUNTAIL, ch->id);
- goto done;
+ goto done;
}
if (xid->xid2_dlc_type == XID2_READ_SIDE) {
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 38fe90c2597d..70c5bbda0fea 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
+#include <linux/ism.h>
#include <net/smc.h>
#include <asm/pci_insn.h>
@@ -15,7 +16,6 @@
*/
#define ISM_DMB_WORD_OFFSET 1
#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32)
-#define ISM_NR_DMBS 1920
#define ISM_IDENT_MASK 0x00FFFF
#define ISM_REG_SBA 0x1
@@ -177,7 +177,7 @@ struct ism_eq_header {
struct ism_eq {
struct ism_eq_header header;
- struct smcd_event entry[15];
+ struct ism_event entry[15];
};
struct ism_sba {
@@ -189,21 +189,6 @@ struct ism_sba {
u16 dmbe_mask[ISM_NR_DMBS];
};
-struct ism_dev {
- spinlock_t lock;
- struct pci_dev *pdev;
- struct smcd_dev *smcd;
-
- struct ism_sba *sba;
- dma_addr_t sba_dma_addr;
- DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS);
-
- struct ism_eq *ieq;
- dma_addr_t ieq_dma_addr;
-
- int ieq_idx;
-};
-
#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
((dmb) | (idx) << 24 | (sf) << 23 | (offset))
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index dfd401d9e362..eb7e13486087 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -15,9 +15,6 @@
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/processor.h>
-#include <net/smc.h>
-
-#include <asm/debug.h>
#include "ism.h"
@@ -34,6 +31,84 @@ static const struct pci_device_id ism_device_table[] = {
MODULE_DEVICE_TABLE(pci, ism_device_table);
static debug_info_t *ism_debug_info;
+static const struct smcd_ops ism_ops;
+
+#define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */
+static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */
+ /* a list for fast mapping */
+static u8 max_client;
+static DEFINE_SPINLOCK(clients_lock);
+struct ism_dev_list {
+ struct list_head list;
+ struct mutex mutex; /* protects ism device list */
+};
+
+static struct ism_dev_list ism_dev_list = {
+ .list = LIST_HEAD_INIT(ism_dev_list.list),
+ .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex),
+};
+
+int ism_register_client(struct ism_client *client)
+{
+ struct ism_dev *ism;
+ unsigned long flags;
+ int i, rc = -ENOSPC;
+
+ mutex_lock(&ism_dev_list.mutex);
+ spin_lock_irqsave(&clients_lock, flags);
+ for (i = 0; i < MAX_CLIENTS; ++i) {
+ if (!clients[i]) {
+ clients[i] = client;
+ client->id = i;
+ if (i == max_client)
+ max_client++;
+ rc = 0;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&clients_lock, flags);
+ if (i < MAX_CLIENTS) {
+ /* initialize with all devices that we got so far */
+ list_for_each_entry(ism, &ism_dev_list.list, list) {
+ ism->priv[i] = NULL;
+ client->add(ism);
+ }
+ }
+ mutex_unlock(&ism_dev_list.mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ism_register_client);
+
+int ism_unregister_client(struct ism_client *client)
+{
+ struct ism_dev *ism;
+ unsigned long flags;
+ int rc = 0;
+
+ mutex_lock(&ism_dev_list.mutex);
+ spin_lock_irqsave(&clients_lock, flags);
+ clients[client->id] = NULL;
+ if (client->id + 1 == max_client)
+ max_client--;
+ spin_unlock_irqrestore(&clients_lock, flags);
+ list_for_each_entry(ism, &ism_dev_list.list, list) {
+ for (int i = 0; i < ISM_NR_DMBS; ++i) {
+ if (ism->sba_client_arr[i] == client->id) {
+ pr_err("%s: attempt to unregister client '%s'"
+ "with registered dmb(s)\n", __func__,
+ client->name);
+ rc = -EBUSY;
+ goto out;
+ }
+ }
+ }
+out:
+ mutex_unlock(&ism_dev_list.mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ism_unregister_client);
static int ism_cmd(struct ism_dev *ism, void *cmd)
{
@@ -193,15 +268,14 @@ static int ism_read_local_gid(struct ism_dev *ism)
if (ret)
goto out;
- ism->smcd->local_gid = cmd.response.gid;
+ ism->local_gid = cmd.response.gid;
out:
return ret;
}
-static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
+static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid,
u32 vid)
{
- struct ism_dev *ism = smcd->priv;
union ism_query_rgid cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -215,14 +289,14 @@ static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
return ism_cmd(ism, &cmd);
}
-static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
clear_bit(dmb->sba_idx, ism->sba_bitmap);
dma_free_coherent(&ism->pdev->dev, dmb->dmb_len,
dmb->cpu_addr, dmb->dma_addr);
}
-static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
+static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
unsigned long bit;
@@ -251,9 +325,9 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb)
return dmb->cpu_addr ? 0 : -ENOMEM;
}
-static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
+ struct ism_client *client)
{
- struct ism_dev *ism = smcd->priv;
union ism_reg_dmb cmd;
int ret;
@@ -278,13 +352,14 @@ static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
goto out;
}
dmb->dmb_tok = cmd.response.dmb_tok;
+ ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id;
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ism_register_dmb);
-static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{
- struct ism_dev *ism = smcd->priv;
union ism_unreg_dmb cmd;
int ret;
@@ -294,6 +369,8 @@ static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
cmd.request.dmb_tok = dmb->dmb_tok;
+ ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT;
+
ret = ism_cmd(ism, &cmd);
if (ret && ret != ISM_ERROR)
goto out;
@@ -302,10 +379,10 @@ static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
out:
return ret;
}
+EXPORT_SYMBOL_GPL(ism_unregister_dmb);
-static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id)
{
- struct ism_dev *ism = smcd->priv;
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -317,9 +394,8 @@ static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
-static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id)
{
- struct ism_dev *ism = smcd->priv;
union ism_set_vlan_id cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -331,20 +407,9 @@ static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
return ism_cmd(ism, &cmd);
}
-static int ism_set_vlan_required(struct smcd_dev *smcd)
-{
- return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
-}
-
-static int ism_reset_vlan_required(struct smcd_dev *smcd)
-{
- return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
-}
-
-static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
+static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq,
u32 event_code, u64 info)
{
- struct ism_dev *ism = smcd->priv;
union ism_sig_ieq cmd;
memset(&cmd, 0, sizeof(cmd));
@@ -365,10 +430,9 @@ static unsigned int max_bytes(unsigned int start, unsigned int len,
return min(boundary - (start & (boundary - 1)), len);
}
-static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
- bool sf, unsigned int offset, void *data, unsigned int size)
+int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf,
+ unsigned int offset, void *data, unsigned int size)
{
- struct ism_dev *ism = smcd->priv;
unsigned int bytes;
u64 dmb_req;
int ret;
@@ -389,6 +453,7 @@ static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
return 0;
}
+EXPORT_SYMBOL_GPL(ism_move);
static struct ism_systemeid SYSTEM_EID = {
.seid_string = "IBM-SYSZ-ISMSEID00000000",
@@ -410,15 +475,14 @@ static void ism_create_system_eid(void)
memcpy(&SYSTEM_EID.type, tmp, 4);
}
-static u8 *ism_get_system_eid(void)
+u8 *ism_get_seid(void)
{
return SYSTEM_EID.seid_string;
}
+EXPORT_SYMBOL_GPL(ism_get_seid);
-static u16 ism_get_chid(struct smcd_dev *smcd)
+static u16 ism_get_chid(struct ism_dev *ism)
{
- struct ism_dev *ism = (struct ism_dev *)smcd->priv;
-
if (!ism || !ism->pdev)
return 0;
@@ -427,7 +491,8 @@ static u16 ism_get_chid(struct smcd_dev *smcd)
static void ism_handle_event(struct ism_dev *ism)
{
- struct smcd_event *entry;
+ struct ism_event *entry;
+ int i;
while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) {
if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry))
@@ -435,13 +500,18 @@ static void ism_handle_event(struct ism_dev *ism)
entry = &ism->ieq->entry[ism->ieq_idx];
debug_event(ism_debug_info, 2, entry, sizeof(*entry));
- smcd_handle_event(ism->smcd, entry);
+ spin_lock(&clients_lock);
+ for (i = 0; i < max_client; ++i)
+ if (clients[i])
+ clients[i]->handle_event(ism, entry);
+ spin_unlock(&clients_lock);
}
}
static irqreturn_t ism_handle_irq(int irq, void *data)
{
struct ism_dev *ism = data;
+ struct ism_client *clt;
unsigned long bit, end;
unsigned long *bv;
u16 dmbemask;
@@ -461,7 +531,8 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET];
ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0;
barrier();
- smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET, dmbemask);
+ clt = clients[ism->sba_client_arr[bit]];
+ clt->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask);
}
if (ism->sba->e) {
@@ -473,33 +544,40 @@ static irqreturn_t ism_handle_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static const struct smcd_ops ism_ops = {
- .query_remote_gid = ism_query_rgid,
- .register_dmb = ism_register_dmb,
- .unregister_dmb = ism_unregister_dmb,
- .add_vlan_id = ism_add_vlan_id,
- .del_vlan_id = ism_del_vlan_id,
- .set_vlan_required = ism_set_vlan_required,
- .reset_vlan_required = ism_reset_vlan_required,
- .signal_event = ism_signal_ieq,
- .move_data = ism_move,
- .get_system_eid = ism_get_system_eid,
- .get_chid = ism_get_chid,
-};
+static u64 ism_get_local_gid(struct ism_dev *ism)
+{
+ return ism->local_gid;
+}
+
+static void ism_dev_add_work_func(struct work_struct *work)
+{
+ struct ism_client *client = container_of(work, struct ism_client,
+ add_work);
+
+ client->add(client->tgt_ism);
+ atomic_dec(&client->tgt_ism->add_dev_cnt);
+ wake_up(&client->tgt_ism->waitq);
+}
static int ism_dev_init(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
- int ret;
+ unsigned long flags;
+ int i, ret;
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (ret <= 0)
goto out;
+ ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL);
+ if (!ism->sba_client_arr)
+ goto free_vectors;
+ memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS);
+
ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0,
pci_name(pdev), ism);
if (ret)
- goto free_vectors;
+ goto free_client_arr;
ret = register_sba(ism);
if (ret)
@@ -513,13 +591,31 @@ static int ism_dev_init(struct ism_dev *ism)
if (ret)
goto unreg_ieq;
- if (!ism_add_vlan_id(ism->smcd, ISM_RESERVED_VLANID))
+ if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID))
/* hardware is V2 capable */
ism_create_system_eid();
- ret = smcd_register_dev(ism->smcd);
- if (ret)
- goto unreg_ieq;
+ init_waitqueue_head(&ism->waitq);
+ atomic_set(&ism->free_clients_cnt, 0);
+ atomic_set(&ism->add_dev_cnt, 0);
+
+ wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
+ spin_lock_irqsave(&clients_lock, flags);
+ for (i = 0; i < max_client; ++i)
+ if (clients[i]) {
+ INIT_WORK(&clients[i]->add_work,
+ ism_dev_add_work_func);
+ clients[i]->tgt_ism = ism;
+ atomic_inc(&ism->add_dev_cnt);
+ schedule_work(&clients[i]->add_work);
+ }
+ spin_unlock_irqrestore(&clients_lock, flags);
+
+ wait_event(ism->waitq, !atomic_read(&ism->add_dev_cnt));
+
+ mutex_lock(&ism_dev_list.mutex);
+ list_add(&ism->list, &ism_dev_list.list);
+ mutex_unlock(&ism_dev_list.mutex);
query_info(ism);
return 0;
@@ -530,6 +626,8 @@ unreg_sba:
unregister_sba(ism);
free_irq:
free_irq(pci_irq_vector(pdev, 0), ism);
+free_client_arr:
+ kfree(ism->sba_client_arr);
free_vectors:
pci_free_irq_vectors(pdev);
out:
@@ -548,6 +646,12 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ism->lock);
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
+ ism->dev.parent = &pdev->dev;
+ device_initialize(&ism->dev);
+ dev_set_name(&ism->dev, dev_name(&pdev->dev));
+ ret = device_add(&ism->dev);
+ if (ret)
+ goto err_dev;
ret = pci_enable_device_mem(pdev);
if (ret)
@@ -565,55 +669,80 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dma_set_max_seg_size(&pdev->dev, SZ_1M);
pci_set_master(pdev);
- ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
- ISM_NR_DMBS);
- if (!ism->smcd) {
- ret = -ENOMEM;
- goto err_resource;
- }
-
- ism->smcd->priv = ism;
ret = ism_dev_init(ism);
if (ret)
- goto err_free;
+ goto err_resource;
return 0;
-err_free:
- smcd_free_dev(ism->smcd);
err_resource:
+ pci_clear_master(pdev);
pci_release_mem_regions(pdev);
err_disable:
pci_disable_device(pdev);
err:
- kfree(ism);
+ device_del(&ism->dev);
+err_dev:
dev_set_drvdata(&pdev->dev, NULL);
+ kfree(ism);
+
return ret;
}
+static void ism_dev_remove_work_func(struct work_struct *work)
+{
+ struct ism_client *client = container_of(work, struct ism_client,
+ remove_work);
+
+ client->remove(client->tgt_ism);
+ atomic_dec(&client->tgt_ism->free_clients_cnt);
+ wake_up(&client->tgt_ism->waitq);
+}
+
+/* Callers must hold ism_dev_list.mutex */
static void ism_dev_exit(struct ism_dev *ism)
{
struct pci_dev *pdev = ism->pdev;
+ unsigned long flags;
+ int i;
+
+ wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
+ spin_lock_irqsave(&clients_lock, flags);
+ for (i = 0; i < max_client; ++i)
+ if (clients[i]) {
+ INIT_WORK(&clients[i]->remove_work,
+ ism_dev_remove_work_func);
+ clients[i]->tgt_ism = ism;
+ atomic_inc(&ism->free_clients_cnt);
+ schedule_work(&clients[i]->remove_work);
+ }
+ spin_unlock_irqrestore(&clients_lock, flags);
+
+ wait_event(ism->waitq, !atomic_read(&ism->free_clients_cnt));
- smcd_unregister_dev(ism->smcd);
if (SYSTEM_EID.serial_number[0] != '0' ||
SYSTEM_EID.type[0] != '0')
- ism_del_vlan_id(ism->smcd, ISM_RESERVED_VLANID);
+ ism_del_vlan_id(ism, ISM_RESERVED_VLANID);
unregister_ieq(ism);
unregister_sba(ism);
free_irq(pci_irq_vector(pdev, 0), ism);
+ kfree(ism->sba_client_arr);
pci_free_irq_vectors(pdev);
+ list_del_init(&ism->list);
}
static void ism_remove(struct pci_dev *pdev)
{
struct ism_dev *ism = dev_get_drvdata(&pdev->dev);
+ mutex_lock(&ism_dev_list.mutex);
ism_dev_exit(ism);
+ mutex_unlock(&ism_dev_list.mutex);
- smcd_free_dev(ism->smcd);
+ pci_clear_master(pdev);
pci_release_mem_regions(pdev);
pci_disable_device(pdev);
+ device_del(&ism->dev);
dev_set_drvdata(&pdev->dev, NULL);
kfree(ism);
}
@@ -633,6 +762,8 @@ static int __init ism_init(void)
if (!ism_debug_info)
return -ENODEV;
+ memset(clients, 0, sizeof(clients));
+ max_client = 0;
debug_register_view(ism_debug_info, &debug_hex_ascii_view);
ret = pci_register_driver(&ism_driver);
if (ret)
@@ -643,9 +774,110 @@ static int __init ism_init(void)
static void __exit ism_exit(void)
{
+ struct ism_dev *ism;
+
+ mutex_lock(&ism_dev_list.mutex);
+ list_for_each_entry(ism, &ism_dev_list.list, list) {
+ ism_dev_exit(ism);
+ }
+ mutex_unlock(&ism_dev_list.mutex);
+
pci_unregister_driver(&ism_driver);
debug_unregister(ism_debug_info);
}
module_init(ism_init);
module_exit(ism_exit);
+
+/*************************** SMC-D Implementation *****************************/
+
+#if IS_ENABLED(CONFIG_SMC)
+static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid,
+ u32 vid)
+{
+ return ism_query_rgid(smcd->priv, rgid, vid_valid, vid);
+}
+
+static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb,
+ struct ism_client *client)
+{
+ return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client);
+}
+
+static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
+{
+ return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb);
+}
+
+static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+ return ism_add_vlan_id(smcd->priv, vlan_id);
+}
+
+static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id)
+{
+ return ism_del_vlan_id(smcd->priv, vlan_id);
+}
+
+static int smcd_set_vlan_required(struct smcd_dev *smcd)
+{
+ return ism_cmd_simple(smcd->priv, ISM_SET_VLAN);
+}
+
+static int smcd_reset_vlan_required(struct smcd_dev *smcd)
+{
+ return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN);
+}
+
+static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info)
+{
+ return ism_signal_ieq(smcd->priv, rgid, trigger_irq, event_code, info);
+}
+
+static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size)
+{
+ return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size);
+}
+
+static u64 smcd_get_local_gid(struct smcd_dev *smcd)
+{
+ return ism_get_local_gid(smcd->priv);
+}
+
+static u16 smcd_get_chid(struct smcd_dev *smcd)
+{
+ return ism_get_chid(smcd->priv);
+}
+
+static inline struct device *smcd_get_dev(struct smcd_dev *dev)
+{
+ struct ism_dev *ism = dev->priv;
+
+ return &ism->dev;
+}
+
+static const struct smcd_ops ism_ops = {
+ .query_remote_gid = smcd_query_rgid,
+ .register_dmb = smcd_register_dmb,
+ .unregister_dmb = smcd_unregister_dmb,
+ .add_vlan_id = smcd_add_vlan_id,
+ .del_vlan_id = smcd_del_vlan_id,
+ .set_vlan_required = smcd_set_vlan_required,
+ .reset_vlan_required = smcd_reset_vlan_required,
+ .signal_event = smcd_signal_ieq,
+ .move_data = smcd_move,
+ .get_system_eid = ism_get_seid,
+ .get_local_gid = smcd_get_local_gid,
+ .get_chid = smcd_get_chid,
+ .get_dev = smcd_get_dev,
+};
+
+const struct smcd_ops *ism_get_smcd_ops(void)
+{
+ return &ism_ops;
+}
+EXPORT_SYMBOL_GPL(ism_get_smcd_ops);
+#endif
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 8bd9fd51208c..1d5b207c2b9e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2801,9 +2801,11 @@ static void qeth_print_status_message(struct qeth_card *card)
* of the level OSA sets the first character to zero
* */
if (!card->info.mcl_level[0]) {
- sprintf(card->info.mcl_level, "%02x%02x",
- card->info.mcl_level[2],
- card->info.mcl_level[3]);
+ scnprintf(card->info.mcl_level,
+ sizeof(card->info.mcl_level),
+ "%02x%02x",
+ card->info.mcl_level[2],
+ card->info.mcl_level[3]);
break;
}
fallthrough;
@@ -6090,7 +6092,7 @@ void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
if (!debug_level_enabled(id, level))
return;
va_start(args, fmt);
- vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
+ vscnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
va_end(args);
debug_text_event(id, level, dbf_txt_buf);
}
@@ -6330,8 +6332,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
goto err_dev;
}
- snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
- dev_name(&gdev->dev));
+ scnprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
+ dev_name(&gdev->dev));
card->debug = qeth_get_dbf_entry(dbf_name);
if (!card->debug) {
rc = qeth_add_dbf_entry(card, dbf_name);
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index d1adc4b83193..eea93f8f106f 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -23,15 +23,15 @@ static ssize_t qeth_dev_state_show(struct device *dev,
switch (card->state) {
case CARD_STATE_DOWN:
- return sprintf(buf, "DOWN\n");
+ return sysfs_emit(buf, "DOWN\n");
case CARD_STATE_SOFTSETUP:
if (card->dev->flags & IFF_UP)
- return sprintf(buf, "UP (LAN %s)\n",
- netif_carrier_ok(card->dev) ? "ONLINE" :
- "OFFLINE");
- return sprintf(buf, "SOFTSETUP\n");
+ return sysfs_emit(buf, "UP (LAN %s)\n",
+ netif_carrier_ok(card->dev) ?
+ "ONLINE" : "OFFLINE");
+ return sysfs_emit(buf, "SOFTSETUP\n");
default:
- return sprintf(buf, "UNKNOWN\n");
+ return sysfs_emit(buf, "UNKNOWN\n");
}
}
@@ -42,7 +42,7 @@ static ssize_t qeth_dev_chpid_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%02X\n", card->info.chpid);
+ return sysfs_emit(buf, "%02X\n", card->info.chpid);
}
static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL);
@@ -52,7 +52,7 @@ static ssize_t qeth_dev_if_name_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", netdev_name(card->dev));
+ return sysfs_emit(buf, "%s\n", netdev_name(card->dev));
}
static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
@@ -62,7 +62,7 @@ static ssize_t qeth_dev_card_type_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", qeth_get_cardname_short(card));
+ return sysfs_emit(buf, "%s\n", qeth_get_cardname_short(card));
}
static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL);
@@ -86,7 +86,7 @@ static ssize_t qeth_dev_inbuf_size_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", qeth_get_bufsize_str(card));
+ return sysfs_emit(buf, "%s\n", qeth_get_bufsize_str(card));
}
static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL);
@@ -96,7 +96,7 @@ static ssize_t qeth_dev_portno_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->dev->dev_port);
+ return sysfs_emit(buf, "%i\n", card->dev->dev_port);
}
static ssize_t qeth_dev_portno_store(struct device *dev,
@@ -134,7 +134,7 @@ static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store);
static ssize_t qeth_dev_portname_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "no portname required\n");
+ return sysfs_emit(buf, "no portname required\n");
}
static ssize_t qeth_dev_portname_store(struct device *dev,
@@ -157,18 +157,18 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
switch (card->qdio.do_prio_queueing) {
case QETH_PRIO_Q_ING_PREC:
- return sprintf(buf, "%s\n", "by precedence");
+ return sysfs_emit(buf, "%s\n", "by precedence");
case QETH_PRIO_Q_ING_TOS:
- return sprintf(buf, "%s\n", "by type of service");
+ return sysfs_emit(buf, "%s\n", "by type of service");
case QETH_PRIO_Q_ING_SKB:
- return sprintf(buf, "%s\n", "by skb-priority");
+ return sysfs_emit(buf, "%s\n", "by skb-priority");
case QETH_PRIO_Q_ING_VLAN:
- return sprintf(buf, "%s\n", "by VLAN headers");
+ return sysfs_emit(buf, "%s\n", "by VLAN headers");
case QETH_PRIO_Q_ING_FIXED:
- return sprintf(buf, "always queue %i\n",
+ return sysfs_emit(buf, "always queue %i\n",
card->qdio.default_out_queue);
default:
- return sprintf(buf, "disabled\n");
+ return sysfs_emit(buf, "disabled\n");
}
}
@@ -242,7 +242,7 @@ static ssize_t qeth_dev_bufcnt_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
+ return sysfs_emit(buf, "%i\n", card->qdio.in_buf_pool.buf_count);
}
static ssize_t qeth_dev_bufcnt_store(struct device *dev,
@@ -298,7 +298,7 @@ static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store);
static ssize_t qeth_dev_performance_stats_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "1\n");
+ return sysfs_emit(buf, "1\n");
}
static ssize_t qeth_dev_performance_stats_store(struct device *dev,
@@ -335,7 +335,7 @@ static ssize_t qeth_dev_layer2_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->options.layer);
+ return sysfs_emit(buf, "%i\n", card->options.layer);
}
static ssize_t qeth_dev_layer2_store(struct device *dev,
@@ -470,23 +470,25 @@ static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
int rc = 0;
if (!qeth_card_hw_is_reachable(card))
- return sprintf(buf, "n/a\n");
+ return sysfs_emit(buf, "n/a\n");
rc = qeth_query_switch_attributes(card, &sw_info);
if (rc)
return rc;
if (!sw_info.capabilities)
- rc = sprintf(buf, "unknown");
+ rc = sysfs_emit(buf, "unknown");
if (sw_info.capabilities & QETH_SWITCH_FORW_802_1)
- rc = sprintf(buf, (sw_info.settings & QETH_SWITCH_FORW_802_1 ?
- "[802.1]" : "802.1"));
+ rc = sysfs_emit(buf,
+ (sw_info.settings & QETH_SWITCH_FORW_802_1 ?
+ "[802.1]" : "802.1"));
if (sw_info.capabilities & QETH_SWITCH_FORW_REFL_RELAY)
- rc += sprintf(buf + rc,
- (sw_info.settings & QETH_SWITCH_FORW_REFL_RELAY ?
- " [rr]" : " rr"));
- rc += sprintf(buf + rc, "\n");
+ rc += sysfs_emit_at(buf, rc,
+ (sw_info.settings &
+ QETH_SWITCH_FORW_REFL_RELAY ?
+ " [rr]" : " rr"));
+ rc += sysfs_emit_at(buf, rc, "\n");
return rc;
}
@@ -573,7 +575,7 @@ static ssize_t qeth_dev_blkt_total_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->info.blkt.time_total);
+ return sysfs_emit(buf, "%i\n", card->info.blkt.time_total);
}
static ssize_t qeth_dev_blkt_total_store(struct device *dev,
@@ -593,7 +595,7 @@ static ssize_t qeth_dev_blkt_inter_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->info.blkt.inter_packet);
+ return sysfs_emit(buf, "%i\n", card->info.blkt.inter_packet);
}
static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
@@ -613,7 +615,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->info.blkt.inter_packet_jumbo);
+ return sysfs_emit(buf, "%i\n", card->info.blkt.inter_packet_jumbo);
}
static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index e250f49535fa..c1caf7734c3e 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -172,7 +172,7 @@ static void qeth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
qeth_add_stat_strings(&data, prefix, card_stats,
CARD_STATS_LEN);
for (i = 0; i < card->qdio.no_out_queues; i++) {
- snprintf(prefix, ETH_GSTRING_LEN, "tx%u ", i);
+ scnprintf(prefix, ETH_GSTRING_LEN, "tx%u ", i);
qeth_add_stat_strings(&data, prefix, txq_stats,
TXQ_STATS_LEN);
}
@@ -192,8 +192,8 @@ static void qeth_get_drvinfo(struct net_device *dev,
sizeof(info->driver));
strscpy(info->fw_version, card->info.mcl_level,
sizeof(info->fw_version));
- snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
- CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
+ scnprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
+ CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card));
}
static void qeth_get_channels(struct net_device *dev,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index c6ded3fdd715..9f13ed170a43 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1255,37 +1255,38 @@ static void qeth_bridge_emit_host_event(struct qeth_card *card,
switch (evtype) {
case anev_reg_unreg:
- snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
- (code & IPA_ADDR_CHANGE_CODE_REMOVAL)
- ? "deregister" : "register");
+ scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s",
+ (code & IPA_ADDR_CHANGE_CODE_REMOVAL)
+ ? "deregister" : "register");
env[i] = str[i]; i++;
if (code & IPA_ADDR_CHANGE_CODE_VLANID) {
- snprintf(str[i], sizeof(str[i]), "VLAN=%d",
- addr_lnid->lnid);
+ scnprintf(str[i], sizeof(str[i]), "VLAN=%d",
+ addr_lnid->lnid);
env[i] = str[i]; i++;
}
if (code & IPA_ADDR_CHANGE_CODE_MACADDR) {
- snprintf(str[i], sizeof(str[i]), "MAC=%pM",
- addr_lnid->mac);
+ scnprintf(str[i], sizeof(str[i]), "MAC=%pM",
+ addr_lnid->mac);
env[i] = str[i]; i++;
}
- snprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
- token->cssid, token->ssid, token->devnum);
+ scnprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x",
+ token->cssid, token->ssid, token->devnum);
env[i] = str[i]; i++;
- snprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
+ scnprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid);
env[i] = str[i]; i++;
- snprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
- token->chpid);
+ scnprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x",
+ token->chpid);
env[i] = str[i]; i++;
- snprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x", token->chid);
+ scnprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x",
+ token->chid);
env[i] = str[i]; i++;
break;
case anev_abort:
- snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
+ scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort");
env[i] = str[i]; i++;
break;
case anev_reset:
- snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
+ scnprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset");
env[i] = str[i]; i++;
break;
}
@@ -1314,17 +1315,17 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
NULL
};
- snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
- snprintf(env_role, sizeof(env_role), "ROLE=%s",
- (data->role == QETH_SBP_ROLE_NONE) ? "none" :
- (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
- (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
- "<INVALID>");
- snprintf(env_state, sizeof(env_state), "STATE=%s",
- (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
- (data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
- (data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
- "<INVALID>");
+ scnprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
+ scnprintf(env_role, sizeof(env_role), "ROLE=%s",
+ (data->role == QETH_SBP_ROLE_NONE) ? "none" :
+ (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
+ (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
+ "<INVALID>");
+ scnprintf(env_state, sizeof(env_state), "STATE=%s",
+ (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
+ (data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
+ (data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
+ "<INVALID>");
kobject_uevent_env(&data->card->gdev->dev.kobj,
KOBJ_CHANGE, env);
kfree(data);
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index a617351fff57..7f592f912517 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -19,7 +19,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
char *word;
if (!qeth_bridgeport_allowed(card))
- return sprintf(buf, "n/a (VNIC characteristics)\n");
+ return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
mutex_lock(&card->sbp_lock);
if (qeth_card_hw_is_reachable(card) &&
@@ -53,7 +53,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
QETH_CARD_TEXT_(card, 2, "SBP%02x:%02x",
card->options.sbp.role, state);
else
- rc = sprintf(buf, "%s\n", word);
+ rc = sysfs_emit(buf, "%s\n", word);
}
mutex_unlock(&card->sbp_lock);
@@ -66,7 +66,7 @@ static ssize_t qeth_bridge_port_role_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
if (!qeth_bridgeport_allowed(card))
- return sprintf(buf, "n/a (VNIC characteristics)\n");
+ return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
}
@@ -117,7 +117,7 @@ static ssize_t qeth_bridge_port_state_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
if (!qeth_bridgeport_allowed(card))
- return sprintf(buf, "n/a (VNIC characteristics)\n");
+ return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
}
@@ -132,11 +132,11 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
int enabled;
if (!qeth_bridgeport_allowed(card))
- return sprintf(buf, "n/a (VNIC characteristics)\n");
+ return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
enabled = card->options.sbp.hostnotification;
- return sprintf(buf, "%d\n", enabled);
+ return sysfs_emit(buf, "%d\n", enabled);
}
static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
@@ -180,7 +180,7 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
char *state;
if (!qeth_bridgeport_allowed(card))
- return sprintf(buf, "n/a (VNIC characteristics)\n");
+ return sysfs_emit(buf, "n/a (VNIC characteristics)\n");
if (card->options.sbp.reflect_promisc) {
if (card->options.sbp.reflect_promisc_primary)
@@ -190,7 +190,7 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
} else
state = "none";
- return sprintf(buf, "%s\n", state);
+ return sysfs_emit(buf, "%s\n", state);
}
static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
@@ -280,10 +280,10 @@ static ssize_t qeth_vnicc_timeout_show(struct device *dev,
rc = qeth_l2_vnicc_get_timeout(card, &timeout);
if (rc == -EBUSY)
- return sprintf(buf, "n/a (BridgePort)\n");
+ return sysfs_emit(buf, "n/a (BridgePort)\n");
if (rc == -EOPNOTSUPP)
- return sprintf(buf, "n/a\n");
- return rc ? rc : sprintf(buf, "%d\n", timeout);
+ return sysfs_emit(buf, "n/a\n");
+ return rc ? rc : sysfs_emit(buf, "%d\n", timeout);
}
/* change timeout setting */
@@ -318,10 +318,10 @@ static ssize_t qeth_vnicc_char_show(struct device *dev,
rc = qeth_l2_vnicc_get_state(card, vnicc, &state);
if (rc == -EBUSY)
- return sprintf(buf, "n/a (BridgePort)\n");
+ return sysfs_emit(buf, "n/a (BridgePort)\n");
if (rc == -EOPNOTSUPP)
- return sprintf(buf, "n/a\n");
- return rc ? rc : sprintf(buf, "%d\n", state);
+ return sysfs_emit(buf, "n/a\n");
+ return rc ? rc : sysfs_emit(buf, "%d\n", state);
}
/* change setting of characteristic */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d8487a10cd55..af4e60d2917e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -19,6 +19,7 @@
#include <linux/etherdevice.h>
#include <linux/ip.h>
#include <linux/in.h>
+#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
@@ -46,9 +47,9 @@ int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr,
char *buf)
{
if (proto == QETH_PROT_IPV4)
- return sprintf(buf, "%pI4", addr);
+ return scnprintf(buf, INET_ADDRSTRLEN, "%pI4", addr);
else
- return sprintf(buf, "%pI6", addr);
+ return scnprintf(buf, INET6_ADDRSTRLEN, "%pI6", addr);
}
static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card,
@@ -158,7 +159,7 @@ static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
{
int rc = 0;
struct qeth_ipaddr *addr;
- char buf[40];
+ char buf[INET6_ADDRSTRLEN];
if (tmp_addr->type == QETH_IP_TYPE_RXIP)
QETH_CARD_TEXT(card, 2, "addrxip");
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 1082380b21f8..9f90a860ca2c 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -32,26 +32,26 @@ static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
{
switch (route->type) {
case PRIMARY_ROUTER:
- return sprintf(buf, "%s\n", "primary router");
+ return sysfs_emit(buf, "%s\n", "primary router");
case SECONDARY_ROUTER:
- return sprintf(buf, "%s\n", "secondary router");
+ return sysfs_emit(buf, "%s\n", "secondary router");
case MULTICAST_ROUTER:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
- return sprintf(buf, "%s\n", "multicast router+");
+ return sysfs_emit(buf, "%s\n", "multicast router+");
else
- return sprintf(buf, "%s\n", "multicast router");
+ return sysfs_emit(buf, "%s\n", "multicast router");
case PRIMARY_CONNECTOR:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
- return sprintf(buf, "%s\n", "primary connector+");
+ return sysfs_emit(buf, "%s\n", "primary connector+");
else
- return sprintf(buf, "%s\n", "primary connector");
+ return sysfs_emit(buf, "%s\n", "primary connector");
case SECONDARY_CONNECTOR:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
- return sprintf(buf, "%s\n", "secondary connector+");
+ return sysfs_emit(buf, "%s\n", "secondary connector+");
else
- return sprintf(buf, "%s\n", "secondary connector");
+ return sysfs_emit(buf, "%s\n", "secondary connector");
default:
- return sprintf(buf, "%s\n", "no");
+ return sysfs_emit(buf, "%s\n", "no");
}
}
@@ -138,7 +138,7 @@ static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
+ return sysfs_emit(buf, "%i\n", card->options.sniffer ? 1 : 0);
}
static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
@@ -200,7 +200,7 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
EBCASC(tmp_hsuid, 8);
- return sprintf(buf, "%s\n", tmp_hsuid);
+ return sysfs_emit(buf, "%s\n", tmp_hsuid);
}
static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
@@ -252,8 +252,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
goto out;
}
- snprintf(card->options.hsuid, sizeof(card->options.hsuid),
- "%-8s", tmp);
+ scnprintf(card->options.hsuid, sizeof(card->options.hsuid),
+ "%-8s", tmp);
ASCEBC(card->options.hsuid, 8);
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
@@ -285,7 +285,7 @@ static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", card->ipato.enabled ? 1 : 0);
+ return sysfs_emit(buf, "%u\n", card->ipato.enabled ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
@@ -330,7 +330,7 @@ static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", card->ipato.invert4 ? 1 : 0);
+ return sysfs_emit(buf, "%u\n", card->ipato.invert4 ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
@@ -367,35 +367,21 @@ static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
- int str_len = 0;
+ char addr_str[INET6_ADDRSTRLEN];
+ int offset = 0;
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
- char addr_str[40];
- int entry_len;
-
if (ipatoe->proto != proto)
continue;
- entry_len = qeth_l3_ipaddr_to_string(proto, ipatoe->addr,
- addr_str);
- if (entry_len < 0)
- continue;
-
- /* Append /%mask to the entry: */
- entry_len += 1 + ((proto == QETH_PROT_IPV4) ? 2 : 3);
- /* Enough room to format %entry\n into null terminated page? */
- if (entry_len + 1 > PAGE_SIZE - str_len - 1)
- break;
-
- entry_len = scnprintf(buf, PAGE_SIZE - str_len,
- "%s/%i\n", addr_str, ipatoe->mask_bits);
- str_len += entry_len;
- buf += entry_len;
+ qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
+ offset += sysfs_emit_at(buf, offset, "%s/%i\n",
+ addr_str, ipatoe->mask_bits);
}
mutex_unlock(&card->ip_lock);
- return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
+ return offset ? offset : sysfs_emit(buf, "\n");
}
static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
@@ -413,7 +399,7 @@ static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
int rc;
/* Expected input pattern: %addr/%mask */
- sep = strnchr(buf, 40, '/');
+ sep = strnchr(buf, INET6_ADDRSTRLEN, '/');
if (!sep)
return -EINVAL;
@@ -501,7 +487,7 @@ static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%u\n", card->ipato.invert6 ? 1 : 0);
+ return sysfs_emit(buf, "%u\n", card->ipato.invert6 ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
@@ -586,35 +572,22 @@ static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf,
enum qeth_ip_types type)
{
struct qeth_card *card = dev_get_drvdata(dev);
+ char addr_str[INET6_ADDRSTRLEN];
struct qeth_ipaddr *ipaddr;
- int str_len = 0;
+ int offset = 0;
int i;
mutex_lock(&card->ip_lock);
hash_for_each(card->ip_htable, i, ipaddr, hnode) {
- char addr_str[40];
- int entry_len;
-
if (ipaddr->proto != proto || ipaddr->type != type)
continue;
- entry_len = qeth_l3_ipaddr_to_string(proto, (u8 *)&ipaddr->u,
- addr_str);
- if (entry_len < 0)
- continue;
-
- /* Enough room to format %addr\n into null terminated page? */
- if (entry_len + 1 > PAGE_SIZE - str_len - 1)
- break;
-
- entry_len = scnprintf(buf, PAGE_SIZE - str_len, "%s\n",
- addr_str);
- str_len += entry_len;
- buf += entry_len;
+ qeth_l3_ipaddr_to_string(proto, (u8 *)&ipaddr->u, addr_str);
+ offset += sysfs_emit_at(buf, offset, "%s\n", addr_str);
}
mutex_unlock(&card->ip_lock);
- return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n");
+ return offset ? offset : sysfs_emit(buf, "\n");
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
index 21b7cb6e7e70..e300cf26bc2a 100644
--- a/drivers/sbus/char/oradax.c
+++ b/drivers/sbus/char/oradax.c
@@ -389,7 +389,7 @@ static int dax_devmap(struct file *f, struct vm_area_struct *vma)
/* completion area is mapped read-only for user */
if (vma->vm_flags & VM_WRITE)
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
+ vm_flags_clear(vma, VM_MAYWRITE);
if (remap_pfn_range(vma, vma->vm_start, ctx->ca_buf_ra >> PAGE_SHIFT,
len, vma->vm_page_prot))
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 3ebe66151dcb..f41c93454f0c 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -690,7 +690,7 @@ static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
newcommand->request_id__lunl =
cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
if (length) {
- newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
newcommand->sg_list[0].length = TW_CPU_TO_SGL(length);
}
newcommand->sgl_entries__lunh =
@@ -702,7 +702,7 @@ static void twl_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
/* Load the sg list */
sgl = (TW_SG_Entry_ISO *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry_ISO)/4) + pae + (sizeof(dma_addr_t) > 4 ? 1 : 0));
- sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
+ sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
sgl->length = TW_CPU_TO_SGL(length);
oldcommand->size += pae;
oldcommand->size += sizeof(dma_addr_t) > 4 ? 1 : 0;
@@ -748,7 +748,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
/* Now allocate ioctl buf memory */
- cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_Ioctl_Buf_Apache), &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
retval = -ENOMEM;
goto out2;
@@ -757,7 +757,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
/* Now copy down the entire ioctl */
- if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
+ if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache)))
goto out3;
/* See which ioctl we are doing */
@@ -815,11 +815,11 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
}
/* Now copy the entire response to userspace */
- if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
+ if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0)
retval = 0;
out3:
/* Now free ioctl buf memory */
- dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted + sizeof(TW_Ioctl_Buf_Apache), cpu_addr, dma_handle);
out2:
mutex_unlock(&tw_dev->ioctl_lock);
out:
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index b0508039a280..096dec29e2ac 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -335,7 +335,7 @@ typedef struct TAG_TW_Ioctl_Apache {
TW_Ioctl_Driver_Command driver_command;
char padding[488];
TW_Command_Full firmware_command;
- char data_buffer[1];
+ char data_buffer[];
} TW_Ioctl_Buf_Apache;
/* GetParam descriptor */
@@ -344,7 +344,7 @@ typedef struct {
unsigned short parameter_id;
unsigned short parameter_size_bytes;
unsigned short actual_parameter_size_bytes;
- unsigned char data[1];
+ unsigned char data[];
} TW_Param_Apache;
/* Compatibility information structure */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 4d4cb47b3846..24c049eff157 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -818,8 +818,8 @@ static void aac_probe_container_scsi_done(struct scsi_cmnd *scsi_cmnd)
int aac_probe_container(struct aac_dev *dev, int cid)
{
- struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd), GFP_KERNEL);
- struct aac_cmd_priv *cmd_priv = aac_priv(scsicmd);
+ struct aac_cmd_priv *cmd_priv;
+ struct scsi_cmnd *scsicmd = kzalloc(sizeof(*scsicmd) + sizeof(*cmd_priv), GFP_KERNEL);
struct scsi_device *scsidev = kzalloc(sizeof(*scsidev), GFP_KERNEL);
int status;
@@ -838,6 +838,7 @@ int aac_probe_container(struct aac_dev *dev, int cid)
while (scsicmd->device == scsidev)
schedule();
kfree(scsidev);
+ cmd_priv = aac_priv(scsicmd);
status = cmd_priv->status;
kfree(scsicmd);
return status;
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index ed119a3f6f2e..7f0208300110 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -50,6 +50,9 @@ static int asd_map_scatterlist(struct sas_task *task,
dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
task->total_xfer_len,
task->data_dir);
+ if (dma_mapping_error(&asd_ha->pcidev->dev, dma))
+ return -ENOMEM;
+
sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
sg_arr[0].size = cpu_to_le32(task->total_xfer_len);
sg_arr[0].flags |= ASD_SG_EL_LIST_EOL;
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 7ab29eaec6f3..72fe6df78bc5 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -184,20 +184,21 @@ static int ch_find_errno(struct scsi_sense_hdr *sshdr)
static int
ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
- void *buffer, unsigned buflength,
- enum dma_data_direction direction)
+ void *buffer, unsigned int buflength, enum req_op op)
{
int errno, retries = 0, timeout, result;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
timeout = (cmd[0] == INITIALIZE_ELEMENT_STATUS)
? timeout_init : timeout_move;
retry:
errno = 0;
- result = scsi_execute_req(ch->device, cmd, direction, buffer,
- buflength, &sshdr, timeout * HZ,
- MAX_RETRIES, NULL);
+ result = scsi_execute_cmd(ch->device, cmd, op, buffer, buflength,
+ timeout * HZ, MAX_RETRIES, &exec_args);
if (result < 0)
return result;
if (scsi_sense_valid(&sshdr)) {
@@ -254,7 +255,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
cmd[5] = 1;
cmd[9] = 255;
if (0 == (result = ch_do_scsi(ch, cmd, 12,
- buffer, 256, DMA_FROM_DEVICE))) {
+ buffer, 256, REQ_OP_DRV_IN))) {
if (((buffer[16] << 8) | buffer[17]) != elem) {
DPRINTK("asked for element 0x%02x, got 0x%02x\n",
elem,(buffer[16] << 8) | buffer[17]);
@@ -284,7 +285,7 @@ ch_init_elem(scsi_changer *ch)
memset(cmd,0,sizeof(cmd));
cmd[0] = INITIALIZE_ELEMENT_STATUS;
cmd[1] = (ch->device->lun & 0x7) << 5;
- err = ch_do_scsi(ch, cmd, 6, NULL, 0, DMA_NONE);
+ err = ch_do_scsi(ch, cmd, 6, NULL, 0, REQ_OP_DRV_IN);
VPRINTK(KERN_INFO, "... finished\n");
return err;
}
@@ -306,10 +307,10 @@ ch_readconfig(scsi_changer *ch)
cmd[1] = (ch->device->lun & 0x7) << 5;
cmd[2] = 0x1d;
cmd[4] = 255;
- result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE);
+ result = ch_do_scsi(ch, cmd, 10, buffer, 255, REQ_OP_DRV_IN);
if (0 != result) {
cmd[1] |= (1<<3);
- result = ch_do_scsi(ch, cmd, 10, buffer, 255, DMA_FROM_DEVICE);
+ result = ch_do_scsi(ch, cmd, 10, buffer, 255, REQ_OP_DRV_IN);
}
if (0 == result) {
ch->firsts[CHET_MT] =
@@ -434,7 +435,7 @@ ch_position(scsi_changer *ch, u_int trans, u_int elem, int rotate)
cmd[4] = (elem >> 8) & 0xff;
cmd[5] = elem & 0xff;
cmd[8] = rotate ? 1 : 0;
- return ch_do_scsi(ch, cmd, 10, NULL, 0, DMA_NONE);
+ return ch_do_scsi(ch, cmd, 10, NULL, 0, REQ_OP_DRV_IN);
}
static int
@@ -455,7 +456,7 @@ ch_move(scsi_changer *ch, u_int trans, u_int src, u_int dest, int rotate)
cmd[6] = (dest >> 8) & 0xff;
cmd[7] = dest & 0xff;
cmd[10] = rotate ? 1 : 0;
- return ch_do_scsi(ch, cmd, 12, NULL,0, DMA_NONE);
+ return ch_do_scsi(ch, cmd, 12, NULL, 0, REQ_OP_DRV_IN);
}
static int
@@ -481,7 +482,7 @@ ch_exchange(scsi_changer *ch, u_int trans, u_int src,
cmd[9] = dest2 & 0xff;
cmd[10] = (rotate1 ? 1 : 0) | (rotate2 ? 2 : 0);
- return ch_do_scsi(ch, cmd, 12, NULL, 0, DMA_NONE);
+ return ch_do_scsi(ch, cmd, 12, NULL, 0, REQ_OP_DRV_IN);
}
static void
@@ -531,7 +532,7 @@ ch_set_voltag(scsi_changer *ch, u_int elem,
memcpy(buffer,tag,32);
ch_check_voltag(buffer);
- result = ch_do_scsi(ch, cmd, 12, buffer, 256, DMA_TO_DEVICE);
+ result = ch_do_scsi(ch, cmd, 12, buffer, 256, REQ_OP_DRV_OUT);
kfree(buffer);
return result;
}
@@ -799,8 +800,7 @@ static long ch_ioctl(struct file *file,
ch_cmd[5] = 1;
ch_cmd[9] = 255;
- result = ch_do_scsi(ch, ch_cmd, 12,
- buffer, 256, DMA_FROM_DEVICE);
+ result = ch_do_scsi(ch, ch_cmd, 12, buffer, 256, REQ_OP_DRV_IN);
if (!result) {
cge.cge_status = buffer[18];
cge.cge_flags = 0;
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 631eda2d467e..6542818e595a 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -1167,7 +1167,7 @@ static int afu_mmap(struct file *file, struct vm_area_struct *vma)
(ctx->psn_size >> PAGE_SHIFT))
return -EINVAL;
- vma->vm_flags |= VM_IO | VM_PFNMAP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &ocxlflash_vmops;
return 0;
diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c
index df0ebabbf387..22cfc2e1dfb9 100644
--- a/drivers/scsi/cxlflash/superpipe.c
+++ b/drivers/scsi/cxlflash/superpipe.c
@@ -308,19 +308,19 @@ out:
* @lli: LUN destined for capacity request.
*
* The READ_CAP16 can take quite a while to complete. Should an EEH occur while
- * in scsi_execute(), the EEH handler will attempt to recover. As part of the
- * recovery, the handler drains all currently running ioctls, waiting until they
- * have completed before proceeding with a reset. As this routine is used on the
- * ioctl path, this can create a condition where the EEH handler becomes stuck,
- * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily
- * unmark this thread as an ioctl thread by releasing the ioctl read semaphore.
- * This will allow the EEH handler to proceed with a recovery while this thread
- * is still running. Once the scsi_execute() returns, reacquire the ioctl read
- * semaphore and check the adapter state in case it changed while inside of
- * scsi_execute(). The state check will wait if the adapter is still being
- * recovered or return a failure if the recovery failed. In the event that the
- * adapter reset failed, simply return the failure as the ioctl would be unable
- * to continue.
+ * in scsi_execute_cmd(), the EEH handler will attempt to recover. As part of
+ * the recovery, the handler drains all currently running ioctls, waiting until
+ * they have completed before proceeding with a reset. As this routine is used
+ * on the ioctl path, this can create a condition where the EEH handler becomes
+ * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
+ * temporarily unmark this thread as an ioctl thread by releasing the ioctl
+ * read semaphore. This will allow the EEH handler to proceed with a recovery
+ * while this thread is still running. Once the scsi_execute_cmd() returns,
+ * reacquire the ioctl read semaphore and check the adapter state in case it
+ * changed while inside of scsi_execute_cmd(). The state check will wait if the
+ * adapter is still being recovered or return a failure if the recovery failed.
+ * In the event that the adapter reset failed, simply return the failure as the
+ * ioctl would be unable to continue.
*
* Note that the above puts a requirement on this routine to only be called on
* an ioctl thread.
@@ -333,6 +333,9 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
struct device *dev = &cfg->dev->dev;
struct glun_info *gli = lli->parent;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
u8 *cmd_buf = NULL;
u8 *scsi_cmd = NULL;
int rc = 0;
@@ -357,9 +360,8 @@ retry:
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
- result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf,
- CMD_BUFSIZE, NULL, &sshdr, to, CMD_RETRIES,
- 0, 0, NULL);
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, cmd_buf,
+ CMD_BUFSIZE, to, CMD_RETRIES, &exec_args);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
if (rc) {
diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c
index 5c74dc7c2288..9caabf550436 100644
--- a/drivers/scsi/cxlflash/vlun.c
+++ b/drivers/scsi/cxlflash/vlun.c
@@ -397,19 +397,19 @@ static int init_vlun(struct llun_info *lli)
* @nblks: Number of logical blocks to write same.
*
* The SCSI WRITE_SAME16 can take quite a while to complete. Should an EEH occur
- * while in scsi_execute(), the EEH handler will attempt to recover. As part of
- * the recovery, the handler drains all currently running ioctls, waiting until
- * they have completed before proceeding with a reset. As this routine is used
- * on the ioctl path, this can create a condition where the EEH handler becomes
- * stuck, infinitely waiting for this ioctl thread. To avoid this behavior,
- * temporarily unmark this thread as an ioctl thread by releasing the ioctl read
- * semaphore. This will allow the EEH handler to proceed with a recovery while
- * this thread is still running. Once the scsi_execute() returns, reacquire the
- * ioctl read semaphore and check the adapter state in case it changed while
- * inside of scsi_execute(). The state check will wait if the adapter is still
- * being recovered or return a failure if the recovery failed. In the event that
- * the adapter reset failed, simply return the failure as the ioctl would be
- * unable to continue.
+ * while in scsi_execute_cmd(), the EEH handler will attempt to recover. As
+ * part of the recovery, the handler drains all currently running ioctls,
+ * waiting until they have completed before proceeding with a reset. As this
+ * routine is used on the ioctl path, this can create a condition where the
+ * EEH handler becomes stuck, infinitely waiting for this ioctl thread. To
+ * avoid this behavior, temporarily unmark this thread as an ioctl thread by
+ * releasing the ioctl read semaphore. This will allow the EEH handler to
+ * proceed with a recovery while this thread is still running. Once the
+ * scsi_execute_cmd() returns, reacquire the ioctl read semaphore and check the
+ * adapter state in case it changed while inside of scsi_execute_cmd(). The
+ * state check will wait if the adapter is still being recovered or return a
+ * failure if the recovery failed. In the event that the adapter reset failed,
+ * simply return the failure as the ioctl would be unable to continue.
*
* Note that the above puts a requirement on this routine to only be called on
* an ioctl thread.
@@ -450,9 +450,9 @@ static int write_same16(struct scsi_device *sdev,
/* Drop the ioctl read semahpore across lengthy call */
up_read(&cfg->ioctl_rwsem);
- result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf,
- CMD_BUFSIZE, NULL, NULL, to,
- CMD_RETRIES, 0, 0, NULL);
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_OUT,
+ cmd_buf, CMD_BUFSIZE, to,
+ CMD_RETRIES, NULL);
down_read(&cfg->ioctl_rwsem);
rc = check_state(cfg);
if (rc) {
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 29a2865b8e2e..362fa631f39b 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -127,8 +127,11 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
int bufflen, struct scsi_sense_hdr *sshdr, int flags)
{
u8 cdb[MAX_COMMAND_SIZE];
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = sshdr,
+ };
/* Prepare the command. */
memset(cdb, 0x0, MAX_COMMAND_SIZE);
@@ -139,9 +142,9 @@ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
cdb[1] = MI_REPORT_TARGET_PGS;
put_unaligned_be32(bufflen, &cdb[6]);
- return scsi_execute(sdev, cdb, DMA_FROM_DEVICE, buff, bufflen, NULL,
- sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
- ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
+ return scsi_execute_cmd(sdev, cdb, opf, buff, bufflen,
+ ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, &exec_args);
}
/*
@@ -157,8 +160,11 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
u8 cdb[MAX_COMMAND_SIZE];
unsigned char stpg_data[8];
int stpg_len = 8;
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = sshdr,
+ };
/* Prepare the data buffer */
memset(stpg_data, 0, stpg_len);
@@ -171,9 +177,9 @@ static int submit_stpg(struct scsi_device *sdev, int group_id,
cdb[1] = MO_SET_TARGET_PGS;
put_unaligned_be32(stpg_len, &cdb[6]);
- return scsi_execute(sdev, cdb, DMA_TO_DEVICE, stpg_data, stpg_len, NULL,
- sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
- ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
+ return scsi_execute_cmd(sdev, cdb, opf, stpg_data,
+ stpg_len, ALUA_FAILOVER_TIMEOUT * HZ,
+ ALUA_FAILOVER_RETRIES, &exec_args);
}
static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 2e21ab447873..3cf88db2d5b2 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -239,8 +239,11 @@ static int send_trespass_cmd(struct scsi_device *sdev,
unsigned char cdb[MAX_COMMAND_SIZE];
int err, res = SCSI_DH_OK, len;
struct scsi_sense_hdr sshdr;
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
if (csdev->flags & CLARIION_SHORT_TRESPASS) {
page22 = short_trespass;
@@ -263,9 +266,9 @@ static int send_trespass_cmd(struct scsi_device *sdev,
BUG_ON((len > CLARIION_BUFFER_SIZE));
memcpy(csdev->buffer, page22, len);
- err = scsi_execute(sdev, cdb, DMA_TO_DEVICE, csdev->buffer, len, NULL,
- &sshdr, CLARIION_TIMEOUT * HZ, CLARIION_RETRIES,
- req_flags, 0, NULL);
+ err = scsi_execute_cmd(sdev, cdb, opf, csdev->buffer, len,
+ CLARIION_TIMEOUT * HZ, CLARIION_RETRIES,
+ &exec_args);
if (err) {
if (scsi_sense_valid(&sshdr))
res = trespass_endio(sdev, &sshdr);
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 0d2cfa60aa06..5f2f943d926c 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -83,12 +83,15 @@ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h)
unsigned char cmd[6] = { TEST_UNIT_READY };
struct scsi_sense_hdr sshdr;
int ret = SCSI_DH_OK, res;
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
retry:
- res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL);
+ res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
+ HP_SW_RETRIES, &exec_args);
if (res) {
if (scsi_sense_valid(&sshdr))
ret = tur_done(sdev, h, &sshdr);
@@ -121,12 +124,15 @@ static int hp_sw_start_stop(struct hp_sw_dh_data *h)
struct scsi_device *sdev = h->sdev;
int res, rc = SCSI_DH_OK;
int retry_cnt = HP_SW_RETRIES;
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
retry:
- res = scsi_execute(sdev, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- HP_SW_TIMEOUT, HP_SW_RETRIES, req_flags, 0, NULL);
+ res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT,
+ HP_SW_RETRIES, &exec_args);
if (res) {
if (!scsi_sense_valid(&sshdr)) {
sdev_printk(KERN_WARNING, sdev,
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index bf8754741f85..c5538645057a 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -536,8 +536,11 @@ static void send_mode_select(struct work_struct *work)
unsigned char cdb[MAX_COMMAND_SIZE];
struct scsi_sense_hdr sshdr;
unsigned int data_size;
- blk_opf_t req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER;
+ blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV |
+ REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
spin_lock(&ctlr->ms_lock);
list_splice_init(&ctlr->ms_head, &list);
@@ -555,9 +558,8 @@ static void send_mode_select(struct work_struct *work)
(char *) h->ctlr->array_name, h->ctlr->index,
(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
- if (scsi_execute(sdev, cdb, DMA_TO_DEVICE, &h->ctlr->mode_select,
- data_size, NULL, &sshdr, RDAC_TIMEOUT * HZ,
- RDAC_RETRIES, req_flags, 0, NULL)) {
+ if (scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size,
+ RDAC_TIMEOUT * HZ, RDAC_RETRIES, &exec_args)) {
err = mode_select_handle_sense(sdev, &sshdr);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
index b8c048cdb17f..8f96049f62dd 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.c
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -4,7 +4,7 @@
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*/
-/**
+/*
* All common (i.e. transport-independent) SLI-4 functions are implemented
* in this file.
*/
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 2022ffb45041..198d3f20d682 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3912,7 +3912,6 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
{
int bsize_elem, i, result = 0;
struct scatterlist *sg;
- void *kaddr;
/* Determine the actual number of bytes per element */
bsize_elem = PAGE_SIZE * (1 << sglist->order);
@@ -3923,9 +3922,7 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
buffer += bsize_elem) {
struct page *page = sg_page(sg);
- kaddr = kmap(page);
- memcpy(kaddr, buffer, bsize_elem);
- kunmap(page);
+ memcpy_to_page(page, 0, buffer, bsize_elem);
sg->length = bsize_elem;
@@ -3938,9 +3935,7 @@ static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
if (len % bsize_elem) {
struct page *page = sg_page(sg);
- kaddr = kmap(page);
- memcpy(kaddr, buffer, len % bsize_elem);
- kunmap(page);
+ memcpy_to_page(page, 0, buffer, len % bsize_elem);
sg->length = len % bsize_elem;
}
@@ -5370,9 +5365,9 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
continue;
ipr_cmd->done = ipr_sata_eh_done;
- if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
+ if (!(ipr_cmd->qc->flags & ATA_QCFLAG_EH)) {
ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
- ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+ ipr_cmd->qc->flags |= ATA_QCFLAG_EH;
}
}
}
@@ -7142,11 +7137,8 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
/**
* ipr_qc_fill_rtf - Read result TF
* @qc: ATA queued command
- *
- * Return value:
- * true
**/
-static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct ipr_sata_port *sata_port = qc->ap->private_data;
struct ipr_ioasa_gata *g = &sata_port->ioasa;
@@ -7163,8 +7155,6 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
tf->hob_lbal = g->hob_lbal;
tf->hob_lbam = g->hob_lbam;
tf->hob_lbah = g->hob_lbah;
-
- return true;
}
static struct ata_port_operations ipr_sata_ops = {
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 16419aeec02d..bb206509265e 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1499,17 +1499,16 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
struct scatterlist *sg = scsi_sglist(SC);
char *buffer;
- /* kmap_atomic() ensures addressability of the user buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags);
- buffer = kmap_atomic(sg_page(sg)) + sg->offset;
- if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
- buffer[2] == 'P' && buffer[3] == 'P') {
- kunmap_atomic(buffer - sg->offset);
+ buffer = kmap_local_page(sg_page(sg)) + sg->offset;
+ if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
+ buffer[2] == 'P' && buffer[3] == 'P') {
+ kunmap_local(buffer);
local_irq_restore(flags);
return 1;
}
- kunmap_atomic(buffer - sg->offset);
+ kunmap_local(buffer);
local_irq_restore(flags);
}
return 0;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0454d94e8cf0..c76f82fb8b63 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -36,6 +36,7 @@
#include <scsi/scsi.h>
#include <scsi/scsi_transport_iscsi.h>
#include <trace/events/iscsi.h>
+#include <trace/events/sock.h>
#include "iscsi_tcp.h"
@@ -170,6 +171,8 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_conn *conn;
+ trace_sk_data_ready(sk);
+
read_lock_bh(&sk->sk_callback_lock);
conn = sk->sk_user_data;
if (!conn) {
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 5e80225b5308..77714a495cbb 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -125,7 +125,7 @@ static void sas_ata_task_done(struct sas_task *task)
} else {
link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
if (unlikely(link->eh_info.err_mask))
- qc->flags |= ATA_QCFLAG_FAILED;
+ qc->flags |= ATA_QCFLAG_EH;
}
} else {
ac = sas_to_ata_err(stat);
@@ -136,7 +136,7 @@ static void sas_ata_task_done(struct sas_task *task)
qc->err_mask = ac;
} else {
link->eh_info.err_mask |= AC_ERR_DEV;
- qc->flags |= ATA_QCFLAG_FAILED;
+ qc->flags |= ATA_QCFLAG_EH;
}
dev->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */
@@ -226,12 +226,11 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
return ret;
}
-static bool sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
+static void sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct domain_device *dev = qc->ap->private_data;
ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
- return true;
}
static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
@@ -239,7 +238,17 @@ static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
return to_sas_internal(dev->port->ha->core.shost->transportt);
}
-static int sas_get_ata_command_set(struct domain_device *dev);
+static int sas_get_ata_command_set(struct domain_device *dev)
+{
+ struct ata_taskfile tf;
+
+ if (dev->dev_type == SAS_SATA_PENDING)
+ return ATA_DEV_UNKNOWN;
+
+ ata_tf_from_fis(dev->frame_rcvd, &tf);
+
+ return ata_dev_classify(&tf);
+}
int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
{
@@ -476,7 +485,7 @@ static void sas_ata_internal_abort(struct sas_task *task)
static void sas_ata_post_internal(struct ata_queued_cmd *qc)
{
- if (qc->flags & ATA_QCFLAG_FAILED)
+ if (qc->flags & ATA_QCFLAG_EH)
qc->err_mask |= AC_ERR_OTHER;
if (qc->err_mask) {
@@ -631,26 +640,12 @@ void sas_ata_task_abort(struct sas_task *task)
/* Internal command, fake a timeout and complete. */
qc->flags &= ~ATA_QCFLAG_ACTIVE;
- qc->flags |= ATA_QCFLAG_FAILED;
+ qc->flags |= ATA_QCFLAG_EH;
qc->err_mask |= AC_ERR_TIMEOUT;
waiting = qc->private_data;
complete(waiting);
}
-static int sas_get_ata_command_set(struct domain_device *dev)
-{
- struct dev_to_host_fis *fis =
- (struct dev_to_host_fis *) dev->frame_rcvd;
- struct ata_taskfile tf;
-
- if (dev->dev_type == SAS_SATA_PENDING)
- return ATA_DEV_UNKNOWN;
-
- ata_tf_from_fis((const u8 *)fis, &tf);
-
- return ata_dev_classify(&tf);
-}
-
void sas_probe_sata(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
@@ -679,6 +674,68 @@ void sas_probe_sata(struct asd_sas_port *port)
}
+int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
+ struct domain_device *child, int phy_id)
+{
+ struct sas_rphy *rphy;
+ int ret;
+
+ if (child->linkrate > parent->min_linkrate) {
+ struct sas_phy *cphy = child->phy;
+ enum sas_linkrate min_prate = cphy->minimum_linkrate,
+ parent_min_lrate = parent->min_linkrate,
+ min_linkrate = (min_prate > parent_min_lrate) ?
+ parent_min_lrate : 0;
+ struct sas_phy_linkrates rates = {
+ .maximum_linkrate = parent->min_linkrate,
+ .minimum_linkrate = min_linkrate,
+ };
+
+ pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n",
+ SAS_ADDR(child->sas_addr), phy_id);
+ ret = sas_smp_phy_control(parent, phy_id,
+ PHY_FUNC_LINK_RESET, &rates);
+ if (ret) {
+ pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n",
+ SAS_ADDR(child->sas_addr), phy_id, ret);
+ return ret;
+ }
+ pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n",
+ SAS_ADDR(child->sas_addr), phy_id);
+ child->linkrate = child->min_linkrate;
+ }
+ ret = sas_get_ata_info(child, phy);
+ if (ret)
+ return ret;
+
+ sas_init_dev(child);
+ ret = sas_ata_init(child);
+ if (ret)
+ return ret;
+
+ rphy = sas_end_device_alloc(phy->port);
+ if (!rphy)
+ return -ENOMEM;
+
+ rphy->identify.phy_identifier = phy_id;
+ child->rphy = rphy;
+ get_device(&rphy->dev);
+
+ list_add_tail(&child->disco_list_node, &parent->port->disco_list);
+
+ ret = sas_discover_sata(child);
+ if (ret) {
+ pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n",
+ SAS_ADDR(child->sas_addr),
+ SAS_ADDR(parent->sas_addr), phy_id, ret);
+ sas_rphy_free(child->rphy);
+ list_del(&child->disco_list_node);
+ return ret;
+ }
+
+ return 0;
+}
+
static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
{
struct domain_device *dev, *n;
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d5bc1314c341..72fdb2e5d047 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -455,14 +455,8 @@ static void sas_discover_domain(struct work_struct *work)
break;
case SAS_SATA_DEV:
case SAS_SATA_PM:
-#ifdef CONFIG_SCSI_SAS_ATA
error = sas_discover_sata(dev);
break;
-#else
- pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n");
- fallthrough;
-#endif
- /* Fall through - only for the #else condition above. */
default:
error = -ENXIO;
pr_err("unhandled device %d\n", dev->dev_type);
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index a04cad620e93..dc670304f181 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -751,13 +751,46 @@ static void sas_ex_get_linkrate(struct domain_device *parent,
child->pathways = min(child->pathways, parent->pathways);
}
+static int sas_ex_add_dev(struct domain_device *parent, struct ex_phy *phy,
+ struct domain_device *child, int phy_id)
+{
+ struct sas_rphy *rphy;
+ int res;
+
+ child->dev_type = SAS_END_DEVICE;
+ rphy = sas_end_device_alloc(phy->port);
+ if (!rphy)
+ return -ENOMEM;
+
+ child->tproto = phy->attached_tproto;
+ sas_init_dev(child);
+
+ child->rphy = rphy;
+ get_device(&rphy->dev);
+ rphy->identify.phy_identifier = phy_id;
+ sas_fill_in_rphy(child, rphy);
+
+ list_add_tail(&child->disco_list_node, &parent->port->disco_list);
+
+ res = sas_notify_lldd_dev_found(child);
+ if (res) {
+ pr_notice("notify lldd for device %016llx at %016llx:%02d returned 0x%x\n",
+ SAS_ADDR(child->sas_addr),
+ SAS_ADDR(parent->sas_addr), phy_id, res);
+ sas_rphy_free(child->rphy);
+ list_del(&child->disco_list_node);
+ return res;
+ }
+
+ return 0;
+}
+
static struct domain_device *sas_ex_discover_end_dev(
struct domain_device *parent, int phy_id)
{
struct expander_device *parent_ex = &parent->ex_dev;
struct ex_phy *phy = &parent_ex->ex_phy[phy_id];
struct domain_device *child = NULL;
- struct sas_rphy *rphy;
int res;
if (phy->attached_sata_host || phy->attached_sata_ps)
@@ -785,99 +818,23 @@ static struct domain_device *sas_ex_discover_end_dev(
sas_ex_get_linkrate(parent, child, phy);
sas_device_set_phy(child, phy->port);
-#ifdef CONFIG_SCSI_SAS_ATA
if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
- if (child->linkrate > parent->min_linkrate) {
- struct sas_phy *cphy = child->phy;
- enum sas_linkrate min_prate = cphy->minimum_linkrate,
- parent_min_lrate = parent->min_linkrate,
- min_linkrate = (min_prate > parent_min_lrate) ?
- parent_min_lrate : 0;
- struct sas_phy_linkrates rates = {
- .maximum_linkrate = parent->min_linkrate,
- .minimum_linkrate = min_linkrate,
- };
- int ret;
-
- pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n",
- SAS_ADDR(child->sas_addr), phy_id);
- ret = sas_smp_phy_control(parent, phy_id,
- PHY_FUNC_LINK_RESET, &rates);
- if (ret) {
- pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n",
- SAS_ADDR(child->sas_addr), phy_id, ret);
- goto out_free;
- }
- pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n",
- SAS_ADDR(child->sas_addr), phy_id);
- child->linkrate = child->min_linkrate;
- }
- res = sas_get_ata_info(child, phy);
- if (res)
- goto out_free;
-
- sas_init_dev(child);
- res = sas_ata_init(child);
- if (res)
- goto out_free;
- rphy = sas_end_device_alloc(phy->port);
- if (!rphy)
- goto out_free;
- rphy->identify.phy_identifier = phy_id;
-
- child->rphy = rphy;
- get_device(&rphy->dev);
-
- list_add_tail(&child->disco_list_node, &parent->port->disco_list);
-
- res = sas_discover_sata(child);
- if (res) {
- pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n",
- SAS_ADDR(child->sas_addr),
- SAS_ADDR(parent->sas_addr), phy_id, res);
- goto out_list_del;
- }
- } else
-#endif
- if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
- child->dev_type = SAS_END_DEVICE;
- rphy = sas_end_device_alloc(phy->port);
- /* FIXME: error handling */
- if (unlikely(!rphy))
- goto out_free;
- child->tproto = phy->attached_tproto;
- sas_init_dev(child);
-
- child->rphy = rphy;
- get_device(&rphy->dev);
- rphy->identify.phy_identifier = phy_id;
- sas_fill_in_rphy(child, rphy);
-
- list_add_tail(&child->disco_list_node, &parent->port->disco_list);
-
- res = sas_discover_end_dev(child);
- if (res) {
- pr_notice("sas_discover_end_dev() for device %016llx at %016llx:%02d returned 0x%x\n",
- SAS_ADDR(child->sas_addr),
- SAS_ADDR(parent->sas_addr), phy_id, res);
- goto out_list_del;
- }
+ res = sas_ata_add_dev(parent, phy, child, phy_id);
+ } else if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
+ res = sas_ex_add_dev(parent, phy, child, phy_id);
} else {
pr_notice("target proto 0x%x at %016llx:0x%x not handled\n",
phy->attached_tproto, SAS_ADDR(parent->sas_addr),
phy_id);
- goto out_free;
+ res = -ENODEV;
}
+ if (res)
+ goto out_free;
+
list_add_tail(&child->siblings, &parent_ex->children);
return child;
- out_list_del:
- sas_rphy_free(child->rphy);
- list_del(&child->disco_list_node);
- spin_lock_irq(&parent->port->dev_list_lock);
- list_del(&child->dev_list_node);
- spin_unlock_irq(&parent->port->dev_list_lock);
out_free:
sas_port_delete(phy->port);
out_err:
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 9ad233b40a9e..cf55f8e3bd9f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1592,8 +1592,6 @@ struct lpfc_hba {
struct timer_list cpuhp_poll_timer;
struct list_head poll_list; /* slowpath eq polling list */
#define LPFC_POLL_HB 1 /* slowpath heartbeat */
-#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
-#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
char os_host_name[MAXHOSTNAMELEN];
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 77e1b2911cb4..76c3434f8976 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1905,8 +1905,7 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr,
goto out_free_rdp;
}
- strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_NAME], 16);
- chbuf[16] = 0;
+ strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_NAME], 16);
len = scnprintf(buf, PAGE_SIZE - len, "VendorName:\t%s\n", chbuf);
len += scnprintf(buf + len, PAGE_SIZE - len,
@@ -1914,17 +1913,13 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr,
(uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI],
(uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 1],
(uint8_t)rdp_context->page_a0[SSF_VENDOR_OUI + 2]);
- strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_PN], 16);
- chbuf[16] = 0;
+ strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_PN], 16);
len += scnprintf(buf + len, PAGE_SIZE - len, "VendorPN:\t%s\n", chbuf);
- strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_SN], 16);
- chbuf[16] = 0;
+ strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_SN], 16);
len += scnprintf(buf + len, PAGE_SIZE - len, "VendorSN:\t%s\n", chbuf);
- strncpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_REV], 4);
- chbuf[4] = 0;
+ strscpy(chbuf, &rdp_context->page_a0[SSF_VENDOR_REV], 4);
len += scnprintf(buf + len, PAGE_SIZE - len, "VendorRev:\t%s\n", chbuf);
- strncpy(chbuf, &rdp_context->page_a0[SSF_DATE_CODE], 8);
- chbuf[8] = 0;
+ strscpy(chbuf, &rdp_context->page_a0[SSF_DATE_CODE], 8);
len += scnprintf(buf + len, PAGE_SIZE - len, "DateCode:\t%s\n", chbuf);
len += scnprintf(buf + len, PAGE_SIZE - len, "Identifier:\t%xh\n",
(uint8_t)rdp_context->page_a0[SSF_IDENTIFIER]);
@@ -1941,33 +1936,25 @@ lpfc_xcvr_data_show(struct device *dev, struct device_attribute *attr,
&rdp_context->page_a0[SSF_TRANSCEIVER_CODE_B7];
len += scnprintf(buf + len, PAGE_SIZE - len, "Speeds: \t");
- if (*(uint8_t *)trasn_code_byte7 == 0) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "Unknown\n");
- } else {
- if (trasn_code_byte7->fc_sp_100MB)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "1 ");
- if (trasn_code_byte7->fc_sp_200mb)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "2 ");
- if (trasn_code_byte7->fc_sp_400MB)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "4 ");
- if (trasn_code_byte7->fc_sp_800MB)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "8 ");
- if (trasn_code_byte7->fc_sp_1600MB)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "16 ");
- if (trasn_code_byte7->fc_sp_3200MB)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "32 ");
- if (trasn_code_byte7->speed_chk_ecc)
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "64 ");
- len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n");
- }
+ if (*(uint8_t *)trasn_code_byte7 == 0) {
+ len += scnprintf(buf + len, PAGE_SIZE - len, "Unknown\n");
+ } else {
+ if (trasn_code_byte7->fc_sp_100MB)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "1 ");
+ if (trasn_code_byte7->fc_sp_200mb)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "2 ");
+ if (trasn_code_byte7->fc_sp_400MB)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "4 ");
+ if (trasn_code_byte7->fc_sp_800MB)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "8 ");
+ if (trasn_code_byte7->fc_sp_1600MB)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "16 ");
+ if (trasn_code_byte7->fc_sp_3200MB)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "32 ");
+ if (trasn_code_byte7->speed_chk_ecc)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "64 ");
+ len += scnprintf(buf + len, PAGE_SIZE - len, "GB\n");
+ }
temperature = (rdp_context->page_a2[SFF_TEMPERATURE_B1] << 8 |
rdp_context->page_a2[SFF_TEMPERATURE_B0]);
vcc = (rdp_context->page_a2[SFF_VCC_B1] << 8 |
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 8928f016d09e..976fd5ee7f7e 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -253,7 +253,6 @@ int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
uint32_t len);
void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
-int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
void lpfc_sli4_poll_hbtimer(struct timer_list *t);
void lpfc_sli4_start_polling(struct lpfc_queue *q);
void lpfc_sli4_stop_polling(struct lpfc_queue *q);
@@ -684,6 +683,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
union lpfc_vmid_io_tag *tag);
void lpfc_vmid_vport_cleanup(struct lpfc_vport *vport);
int lpfc_issue_els_qfpa(struct lpfc_vport *vport);
+void lpfc_reinit_vmid(struct lpfc_vport *vport);
void lpfc_sli_rpi_release(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 919741bbe267..569639dc8b2c 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1123,6 +1123,9 @@ stop_rr_fcf_flogi:
if (sp->cmn.priority_tagging)
vport->phba->pport->vmid_flag |= (LPFC_VMID_ISSUE_QFPA |
LPFC_VMID_TYPE_PRIO);
+ /* reinitialize the VMID datastructure before returning */
+ if (lpfc_is_vmid_enabled(phba))
+ lpfc_reinit_vmid(vport);
/*
* Address a timing race with dev_loss. If dev_loss is active on
@@ -2373,15 +2376,30 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* PRLI failed */
lpfc_printf_vlog(vport, mode, loglevel,
"2754 PRLI failure DID:%06X Status:x%x/x%x, "
- "data: x%x\n",
+ "data: x%x x%x\n",
ndlp->nlp_DID, ulp_status,
- ulp_word4, ndlp->fc4_prli_sent);
+ ulp_word4, ndlp->nlp_state,
+ ndlp->fc4_prli_sent);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
if (!lpfc_error_lost_link(ulp_status, ulp_word4))
lpfc_disc_state_machine(vport, ndlp, cmdiocb,
NLP_EVT_CMPL_PRLI);
+ /* The following condition catches an inflight transition
+ * mismatch typically caused by an RSCN. Skip any
+ * processing to allow recovery.
+ */
+ if (ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE &&
+ ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
+ "2784 PRLI cmpl: state mismatch "
+ "DID x%06x nstate x%x nflag x%x\n",
+ ndlp->nlp_DID, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ goto out;
+ }
+
/*
* For P2P topology, retain the node so that PLOGI can be
* attempted on it again.
@@ -4673,6 +4691,15 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* the nameserver fails */
maxretry = 0;
delay = 100;
+ } else if (cmd == ELS_CMD_PRLI &&
+ ndlp->nlp_state != NLP_STE_PRLI_ISSUE) {
+ /* State-command disagreement. The PRLI was
+ * failed with an invalid rpi meaning there
+ * some unexpected state change. Don't retry.
+ */
+ maxretry = 0;
+ retry = 0;
+ break;
}
retry = 1;
break;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 80375d73b732..a6df0a5b4006 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1129,21 +1129,6 @@ lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
struct lpfc_nodelist *ndlp, *next_ndlp;
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
- /* It's possible the FLOGI to the fabric node never
- * successfully completed and never registered with the
- * transport. In this case there is no way to clean up
- * the node.
- */
- if (ndlp->nlp_DID == Fabric_DID) {
- if (ndlp->nlp_prev_state ==
- NLP_STE_UNUSED_NODE &&
- !ndlp->fc4_xpt_flags)
- lpfc_nlp_put(ndlp);
- }
- continue;
- }
-
if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
((vport->port_type == LPFC_NPIV_PORT) &&
((ndlp->nlp_DID == NameServer_DID) ||
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index fb3504dbb899..58fa39c403a0 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -4201,6 +4201,8 @@ struct lpfc_acqe_fc_la {
#define LPFC_FC_LA_TYPE_MDS_LOOPBACK 0x5
#define LPFC_FC_LA_TYPE_UNEXP_WWPN 0x6
#define LPFC_FC_LA_TYPE_TRUNKING_EVENT 0x7
+#define LPFC_FC_LA_TYPE_ACTIVATE_FAIL 0x8
+#define LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT 0x9
#define lpfc_acqe_fc_la_port_type_SHIFT 6
#define lpfc_acqe_fc_la_port_type_MASK 0x00000003
#define lpfc_acqe_fc_la_port_type_WORD word0
@@ -4242,6 +4244,9 @@ struct lpfc_acqe_fc_la {
#define lpfc_acqe_fc_la_fault_SHIFT 0
#define lpfc_acqe_fc_la_fault_MASK 0x000000FF
#define lpfc_acqe_fc_la_fault_WORD word1
+#define lpfc_acqe_fc_la_link_status_SHIFT 8
+#define lpfc_acqe_fc_la_link_status_MASK 0x0000007F
+#define lpfc_acqe_fc_la_link_status_WORD word1
#define lpfc_acqe_fc_la_trunk_fault_SHIFT 0
#define lpfc_acqe_fc_la_trunk_fault_MASK 0x0000000F
#define lpfc_acqe_fc_la_trunk_fault_WORD word1
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 25ba20e42825..6eb4085a3a22 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -30,6 +30,7 @@
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
+#include <linux/sched/clock.h>
#include <linux/ctype.h>
#include <linux/aer.h>
#include <linux/slab.h>
@@ -5189,16 +5190,25 @@ static void
lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
struct lpfc_acqe_link *acqe_link)
{
- switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
- case LPFC_ASYNC_LINK_FAULT_NONE:
- case LPFC_ASYNC_LINK_FAULT_LOCAL:
- case LPFC_ASYNC_LINK_FAULT_REMOTE:
- case LPFC_ASYNC_LINK_FAULT_LR_LRR:
+ switch (bf_get(lpfc_acqe_fc_la_att_type, acqe_link)) {
+ case LPFC_FC_LA_TYPE_LINK_DOWN:
+ case LPFC_FC_LA_TYPE_TRUNKING_EVENT:
+ case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
+ case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
break;
default:
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "0398 Unknown link fault code: x%x\n",
- bf_get(lpfc_acqe_link_fault, acqe_link));
+ switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
+ case LPFC_ASYNC_LINK_FAULT_NONE:
+ case LPFC_ASYNC_LINK_FAULT_LOCAL:
+ case LPFC_ASYNC_LINK_FAULT_REMOTE:
+ case LPFC_ASYNC_LINK_FAULT_LR_LRR:
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ "0398 Unknown link fault code: x%x\n",
+ bf_get(lpfc_acqe_link_fault, acqe_link));
+ break;
+ }
break;
}
}
@@ -6281,6 +6291,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
struct lpfc_mbx_read_top *la;
+ char *log_level;
int rc;
if (bf_get(lpfc_trailer_type, acqe_fc) !=
@@ -6312,25 +6323,70 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
phba->sli4_hba.link_state.fault =
bf_get(lpfc_acqe_link_fault, acqe_fc);
+ phba->sli4_hba.link_state.link_status =
+ bf_get(lpfc_acqe_fc_la_link_status, acqe_fc);
- if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
- LPFC_FC_LA_TYPE_LINK_DOWN)
- phba->sli4_hba.link_state.logical_speed = 0;
- else if (!phba->sli4_hba.conf_trunk)
- phba->sli4_hba.link_state.logical_speed =
+ /*
+ * Only select attention types need logical speed modification to what
+ * was previously set.
+ */
+ if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP &&
+ phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
+ if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
+ LPFC_FC_LA_TYPE_LINK_DOWN)
+ phba->sli4_hba.link_state.logical_speed = 0;
+ else if (!phba->sli4_hba.conf_trunk)
+ phba->sli4_hba.link_state.logical_speed =
bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
+ }
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2896 Async FC event - Speed:%dGBaud Topology:x%x "
"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
- "%dMbps Fault:%d\n",
+ "%dMbps Fault:x%x Link Status:x%x\n",
phba->sli4_hba.link_state.speed,
phba->sli4_hba.link_state.topology,
phba->sli4_hba.link_state.status,
phba->sli4_hba.link_state.type,
phba->sli4_hba.link_state.number,
phba->sli4_hba.link_state.logical_speed,
- phba->sli4_hba.link_state.fault);
+ phba->sli4_hba.link_state.fault,
+ phba->sli4_hba.link_state.link_status);
+
+ /*
+ * The following attention types are informational only, providing
+ * further details about link status. Overwrite the value of
+ * link_state.status appropriately. No further action is required.
+ */
+ if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
+ switch (phba->sli4_hba.link_state.status) {
+ case LPFC_FC_LA_TYPE_ACTIVATE_FAIL:
+ log_level = KERN_WARNING;
+ phba->sli4_hba.link_state.status =
+ LPFC_FC_LA_TYPE_LINK_DOWN;
+ break;
+ case LPFC_FC_LA_TYPE_LINK_RESET_PRTCL_EVT:
+ /*
+ * During bb credit recovery establishment, receiving
+ * this attention type is normal. Link Up attention
+ * type is expected to occur before this informational
+ * attention type so keep the Link Up status.
+ */
+ log_level = KERN_INFO;
+ phba->sli4_hba.link_state.status =
+ LPFC_FC_LA_TYPE_LINK_UP;
+ break;
+ default:
+ log_level = KERN_INFO;
+ break;
+ }
+ lpfc_log_msg(phba, log_level, LOG_SLI,
+ "2992 Async FC event - Informational Link "
+ "Attention Type x%x\n",
+ bf_get(lpfc_acqe_fc_la_att_type, acqe_fc));
+ return;
+ }
+
pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
@@ -13917,6 +13973,13 @@ fcponly:
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
+ rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
+ if (unlikely(rc)) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "6400 Can't set dma maximum segment size\n");
+ return rc;
+ }
+
/*
* Check whether the adapter supports an embedded copy of the
* FCP CMD IU within the WQE for FCP_Ixxx commands. In order
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7a1563564df7..e989f130434e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -1689,7 +1689,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct lpfc_pde6 *pde6 = NULL;
struct lpfc_pde7 *pde7 = NULL;
dma_addr_t dataphysaddr, protphysaddr;
- unsigned short curr_data = 0, curr_prot = 0;
+ unsigned short curr_prot = 0;
unsigned int split_offset;
unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
unsigned int protgrp_blks, protgrp_bytes;
@@ -1858,7 +1858,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bpl->tus.w = le32_to_cpu(bpl->tus.w);
num_bde++;
- curr_data++;
if (split_offset)
break;
@@ -2119,7 +2118,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct scatterlist *sgpe = NULL; /* s/g prot entry */
struct sli4_sge_diseed *diseed = NULL;
dma_addr_t dataphysaddr, protphysaddr;
- unsigned short curr_data = 0, curr_prot = 0;
+ unsigned short curr_prot = 0;
unsigned int split_offset;
unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
unsigned int protgrp_blks, protgrp_bytes;
@@ -2364,7 +2363,6 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
dma_offset += dma_len;
num_sge++;
- curr_data++;
if (split_offset) {
sgl++;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 182aaae60386..edbd81c3b643 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -11270,6 +11270,30 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
}
}
+inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
+{
+ struct lpfc_hba *phba = eq->phba;
+
+ /*
+ * Unlocking an irq is one of the entry point to check
+ * for re-schedule, but we are good for io submission
+ * path as midlayer does a get_cpu to glue us in. Flush
+ * out the invalidate queue so we can see the updated
+ * value for flag.
+ */
+ smp_rmb();
+
+ if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
+ /* We will not likely get the completion for the caller
+ * during this iteration but i guess that's fine.
+ * Future io's coming on this eq should be able to
+ * pick it up. As for the case of single io's, they
+ * will be handled through a sched from polling timer
+ * function which is currently triggered every 1msec.
+ */
+ lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
+}
+
/**
* lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object.
@@ -11309,7 +11333,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(eq);
} else {
/* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags);
@@ -15625,12 +15649,11 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t)
{
struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
struct lpfc_queue *eq;
- int i = 0;
rcu_read_lock();
list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
- i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH);
+ lpfc_sli4_poll_eq(eq);
if (!list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
@@ -15638,33 +15661,6 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t)
rcu_read_unlock();
}
-inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
-{
- struct lpfc_hba *phba = eq->phba;
- int i = 0;
-
- /*
- * Unlocking an irq is one of the entry point to check
- * for re-schedule, but we are good for io submission
- * path as midlayer does a get_cpu to glue us in. Flush
- * out the invalidate queue so we can see the updated
- * value for flag.
- */
- smp_rmb();
-
- if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
- /* We will not likely get the completion for the caller
- * during this iteration but i guess that's fine.
- * Future io's coming on this eq should be able to
- * pick it up. As for the case of single io's, they
- * will be handled through a sched from polling timer
- * function which is currently triggered every 1msec.
- */
- i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
-
- return i;
-}
-
static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
{
struct lpfc_hba *phba = eq->phba;
@@ -20819,6 +20815,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
struct lpfc_mbx_wr_object *wr_object;
LPFC_MBOXQ_t *mbox;
int rc = 0, i = 0;
+ int mbox_status = 0;
uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
uint32_t shdr_change_status = 0, shdr_csf = 0;
uint32_t mbox_tmo;
@@ -20864,11 +20861,15 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
wr_object->u.request.bde_count = i;
bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+ mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
else {
mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
- rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+ mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
}
+
+ /* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */
+ rc = mbox_status;
+
/* The IOCTL status is embedded in the mailbox subheader. */
shdr_status = bf_get(lpfc_mbox_hdr_status,
&wr_object->header.cfg_shdr.response);
@@ -20883,10 +20884,6 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
&wr_object->u.response);
}
- if (!phba->sli4_hba.intr_enable)
- mempool_free(mbox, phba->mbox_mem_pool);
- else if (rc != MBX_TIMEOUT)
- mempool_free(mbox, phba->mbox_mem_pool);
if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"3025 Write Object mailbox failed with "
@@ -20904,6 +20901,12 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
shdr_add_status_2, shdr_change_status,
shdr_csf);
+
+ if (!phba->sli4_hba.intr_enable)
+ mempool_free(mbox, phba->mbox_mem_pool);
+ else if (mbox_status != MBX_TIMEOUT)
+ mempool_free(mbox, phba->mbox_mem_pool);
+
return rc;
}
@@ -21276,7 +21279,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(qp->hba_eq);
return 0;
}
@@ -21298,7 +21301,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(qp->hba_eq);
return 0;
}
@@ -21328,7 +21331,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
- lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH);
+ lpfc_sli4_poll_eq(qp->hba_eq);
return 0;
}
return WQE_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index f927c2a25d54..3b62c4032c31 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2009-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -291,8 +291,9 @@ struct lpfc_sli4_link {
uint8_t type;
uint8_t number;
uint8_t fault;
- uint32_t logical_speed;
+ uint8_t link_status;
uint16_t topology;
+ uint32_t logical_speed;
};
struct lpfc_fcf_rec {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 41a1128f8651..0238208cdd11 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.2.0.9"
+#define LPFC_DRIVER_VERSION "14.2.0.10"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
@@ -32,6 +32,6 @@
#define LPFC_MODULE_DESC "Emulex LightPulse Fibre Channel SCSI driver " \
LPFC_DRIVER_VERSION
-#define LPFC_COPYRIGHT "Copyright (C) 2017-2022 Broadcom. All Rights " \
+#define LPFC_COPYRIGHT "Copyright (C) 2017-2023 Broadcom. All Rights " \
"Reserved. The term \"Broadcom\" refers to Broadcom Inc. " \
"and/or its subsidiaries."
diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c
index ed1d7f7b88a3..cf8ba840d0ea 100644
--- a/drivers/scsi/lpfc/lpfc_vmid.c
+++ b/drivers/scsi/lpfc/lpfc_vmid.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -284,3 +284,42 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
}
return rc;
}
+
+/*
+ * lpfc_reinit_vmid - reinitializes the vmid data structure
+ * @vport: pointer to vport data structure
+ *
+ * This routine reinitializes the vmid post flogi completion
+ *
+ * Return codes
+ * None
+ */
+void
+lpfc_reinit_vmid(struct lpfc_vport *vport)
+{
+ u32 bucket, i, cpu;
+ struct lpfc_vmid *cur;
+ struct lpfc_vmid *vmp = NULL;
+ struct hlist_node *tmp;
+
+ write_lock(&vport->vmid_lock);
+ vport->cur_vmid_cnt = 0;
+
+ for (i = 0; i < vport->max_vmid; i++) {
+ vmp = &vport->vmid[i];
+ vmp->flag = LPFC_VMID_SLOT_FREE;
+ memset(vmp->host_vmid, 0, sizeof(vmp->host_vmid));
+ vmp->io_rd_cnt = 0;
+ vmp->io_wr_cnt = 0;
+
+ if (vmp->last_io_time)
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(vmp->last_io_time, cpu) = 0;
+ }
+
+ /* for all elements in the hash table */
+ if (!hash_empty(vport->hash_table))
+ hash_for_each_safe(vport->hash_table, bucket, tmp, cur, hnode)
+ hash_del(&cur->hnode);
+ write_unlock(&vport->vmid_lock);
+}
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 4d171f5c213f..6c7559cf1a4b 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
+ * Copyright (C) 2017-2023 Broadcom. All Rights Reserved. The term *
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@@ -534,7 +534,7 @@ disable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
+ struct lpfc_nodelist *ndlp = NULL;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
/* Can't disable during an outstanding delete. */
@@ -546,17 +546,7 @@ disable_vport(struct fc_vport *fc_vport)
(void)lpfc_send_npiv_logo(vport, ndlp);
lpfc_sli_host_down(vport);
-
- /* Mark all nodes for discovery so we can remove them by
- * calling lpfc_cleanup_rpis(vport, 1)
- */
- list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
- if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
- continue;
- lpfc_disc_state_machine(vport, ndlp, NULL,
- NLP_EVT_DEVICE_RECOVERY);
- }
- lpfc_cleanup_rpis(vport, 1);
+ lpfc_cleanup_rpis(vport, 0);
lpfc_stop_vport_timers(vport);
lpfc_unreg_all_rpis(vport);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index fe70f8f11435..6597e118c805 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3323,7 +3323,7 @@ static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
/* copy the io request frame as well as 8 SGEs data for r1 command*/
memcpy(r1_cmd->io_request, cmd->io_request,
(sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)));
- memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
+ memcpy(r1_cmd->io_request->SGLs, cmd->io_request->SGLs,
(fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
/*sense buffer is different for r1 command*/
r1_cmd->io_request->SenseBufferLowAddress =
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index 49e9a9048ee7..b677d80e5874 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -526,7 +526,10 @@ struct MPI2_RAID_SCSI_IO_REQUEST {
__le32 Control; /* 0x3C */
union MPI2_SCSI_IO_CDB_UNION CDB; /* 0x40 */
union RAID_CONTEXT_UNION RaidContext; /* 0x60 */
- union MPI2_SGE_IO_UNION SGL; /* 0x80 */
+ union {
+ union MPI2_SGE_IO_UNION SGL; /* 0x80 */
+ DECLARE_FLEX_ARRAY(union MPI2_SGE_IO_UNION, SGLs);
+ };
};
/*
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 69061545d9d2..2ee9ea57554d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5849,6 +5849,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
dma_pool_destroy(ioc->pcie_sgl_dma_pool);
}
+ kfree(ioc->pcie_sg_lookup);
+ ioc->pcie_sg_lookup = NULL;
+
if (ioc->config_page) {
dexitprintk(ioc,
ioc_info(ioc, "config_page(0x%p): free\n",
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 0d8b1e942ded..efdb8178db32 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -1884,7 +1884,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
diag_register.requested_buffer_size>>10);
else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE]
& MPT3_DIAG_BUFFER_IS_REGISTERED) {
- ioc_err(ioc, "Trace buffer memory %d KB allocated\n",
+ ioc_info(ioc, "Trace buffer memory %d KB allocated\n",
diag_register.requested_buffer_size>>10);
if (ioc->hba_mpi_version_belonged != MPI2_VERSION)
ioc->diag_buffer_status[
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index b3dcb8918618..60c65586f30e 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -1841,7 +1841,7 @@ static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
cmd->frame->request_id = mhba->io_seq++;
cmd->request_id = cmd->frame->request_id;
mhba->tag_cmd[cmd->frame->tag] = cmd;
- frame_len = sizeof(*ib_frame) - 4 +
+ frame_len = sizeof(*ib_frame) +
ib_frame->sg_counts * sizeof(struct mvumi_sgl);
if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
struct mvumi_dyn_list_entry *dle;
@@ -2387,7 +2387,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
struct Scsi_Host *host = mhba->shost;
struct scsi_device *sdev = NULL;
int ret;
- unsigned int max_sg = (mhba->ib_max_size + 4 -
+ unsigned int max_sg = (mhba->ib_max_size -
sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
host->irq = mhba->pdev->irq;
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
index a88c58787b68..1306a4abf19a 100644
--- a/drivers/scsi/mvumi.h
+++ b/drivers/scsi/mvumi.h
@@ -279,7 +279,7 @@ struct mvumi_msg_frame {
u16 request_id;
u16 reserved1;
u8 cdb[MAX_COMMAND_SIZE];
- u32 payload[1];
+ u32 payload[];
};
/*
@@ -294,7 +294,7 @@ struct mvumi_rsp_frame {
u8 req_status;
u8 rsp_flag; /* Indicates the type of Data_Payload.*/
u16 request_id;
- u32 payload[1];
+ u32 payload[];
};
struct mvumi_ob_data {
@@ -380,7 +380,7 @@ struct mvumi_hs_header {
u8 page_code;
u8 checksum;
u16 frame_length;
- u32 frame_content[1];
+ u32 frame_content[];
};
/*
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 73f036bed128..5c26a13ffbd2 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -61,10 +61,10 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev);
} else {
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev);
}
}
@@ -86,7 +86,7 @@ static ssize_t controller_fatal_error_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->controller_fatal_error);
}
static DEVICE_ATTR_RO(controller_fatal_error);
@@ -107,13 +107,13 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8),
(u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev));
} else {
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8),
@@ -138,7 +138,7 @@ static ssize_t pm8001_ctl_ila_version_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id != chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version >> 8),
@@ -164,7 +164,7 @@ static ssize_t pm8001_ctl_inactive_fw_version_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id != chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n",
+ return sysfs_emit(buf, "%02x.%02x.%02x.%02x\n",
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 24),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 16),
(u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version >> 8),
@@ -191,10 +191,10 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io);
} else {
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io);
}
}
@@ -215,13 +215,11 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%04d\n",
- (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16)
- );
+ return sysfs_emit(buf, "%04d\n",
+ (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16));
} else {
- return snprintf(buf, PAGE_SIZE, "%04d\n",
- (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16)
- );
+ return sysfs_emit(buf, "%04d\n",
+ (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16));
}
}
static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL);
@@ -242,13 +240,11 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev,
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
if (pm8001_ha->chip_id == chip_8001) {
- return snprintf(buf, PAGE_SIZE, "%04d\n",
- pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF
- );
+ return sysfs_emit(buf, "%04d\n",
+ pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF);
} else {
- return snprintf(buf, PAGE_SIZE, "%04d\n",
- pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF
- );
+ return sysfs_emit(buf, "%04d\n",
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF);
}
}
static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL);
@@ -315,7 +311,7 @@ static ssize_t pm8001_ctl_host_sas_address_show(struct device *cdev,
struct Scsi_Host *shost = class_to_shost(cdev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
- return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ return sysfs_emit(buf, "0x%016llx\n",
be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr));
}
static DEVICE_ATTR(host_sas_address, S_IRUGO,
@@ -336,7 +332,7 @@ static ssize_t pm8001_ctl_logging_level_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
- return snprintf(buf, PAGE_SIZE, "%08xh\n", pm8001_ha->logging_level);
+ return sysfs_emit(buf, "%08xh\n", pm8001_ha->logging_level);
}
static ssize_t pm8001_ctl_logging_level_store(struct device *cdev,
@@ -517,7 +513,7 @@ static ssize_t event_log_size_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size);
}
static DEVICE_ATTR_RO(event_log_size);
@@ -604,7 +600,7 @@ static ssize_t non_fatal_count_show(struct device *cdev,
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
- return snprintf(buf, PAGE_SIZE, "%08x",
+ return sysfs_emit(buf, "%08x\n",
pm8001_ha->non_fatal_count);
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index b67ad30d56e6..70cfc94c3d43 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2732,7 +2732,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
spin_lock_irqsave(host->host_lock, flags);
/* Confirm port has not reappeared before clearing pointers. */
if (rport->port_state != FC_PORTSTATE_ONLINE) {
- fcport->rport = fcport->drport = NULL;
+ fcport->rport = NULL;
*((fc_port_t **)rport->dd_data) = NULL;
}
spin_unlock_irqrestore(host->host_lock, flags);
@@ -3171,8 +3171,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
set_bit(VPORT_DELETE, &vha->dpc_flags);
- while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
- test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
+ while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
msleep(1000);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index cd75b179410d..dba7bba788d7 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -278,8 +278,8 @@ qla2x00_process_els(struct bsg_job *bsg_job)
const char *type;
int req_sg_cnt, rsp_sg_cnt;
int rval = (DID_ERROR << 16);
- uint16_t nextlid = 0;
uint32_t els_cmd = 0;
+ int qla_port_allocated = 0;
if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
rport = fc_bsg_to_rport(bsg_job);
@@ -329,9 +329,9 @@ qla2x00_process_els(struct bsg_job *bsg_job)
/* make sure the rport is logged in,
* if not perform fabric login
*/
- if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
+ if (atomic_read(&fcport->state) != FCS_ONLINE) {
ql_dbg(ql_dbg_user, vha, 0x7003,
- "Failed to login port %06X for ELS passthru.\n",
+ "Port %06X is not online for ELS passthru.\n",
fcport->d_id.b24);
rval = -EIO;
goto done;
@@ -348,6 +348,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
goto done;
}
+ qla_port_allocated = 1;
/* Initialize all required fields of fcport */
fcport->vha = vha;
fcport->d_id.b.al_pa =
@@ -432,7 +433,7 @@ done_unmap_sg:
goto done_free_fcport;
done_free_fcport:
- if (bsg_request->msgcode != FC_BSG_RPT_ELS)
+ if (qla_port_allocated)
qla2x00_free_fcport(fcport);
done:
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a26a373be9da..ec0e987b71fa 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -384,6 +384,13 @@ struct els_reject {
struct req_que;
struct qla_tgt_sess;
+struct qla_buf_dsc {
+ u16 tag;
+#define TAG_FREED 0xffff
+ void *buf;
+ dma_addr_t buf_dma;
+};
+
/*
* SCSI Request Block
*/
@@ -392,14 +399,16 @@ struct srb_cmd {
uint32_t request_sense_length;
uint32_t fw_sense_length;
uint8_t *request_sense_ptr;
- struct ct6_dsd *ct6_ctx;
struct crc_context *crc_ctx;
+ struct ct6_dsd ct6_ctx;
+ struct qla_buf_dsc buf_dsc;
};
/*
* SRB flag definitions
*/
#define SRB_DMA_VALID BIT_0 /* Command sent to ISP */
+#define SRB_GOT_BUF BIT_1
#define SRB_FCP_CMND_DMA_VALID BIT_12 /* DIF: DSD List valid */
#define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */
#define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */
@@ -660,7 +669,7 @@ enum {
struct iocb_resource {
u8 res_type;
- u8 pad;
+ u8 exch_cnt;
u16 iocb_cnt;
};
@@ -2485,7 +2494,6 @@ struct ct_sns_desc {
enum discovery_state {
DSC_DELETED,
- DSC_GNN_ID,
DSC_GNL,
DSC_LOGIN_PEND,
DSC_LOGIN_FAILED,
@@ -2596,7 +2604,7 @@ typedef struct fc_port {
int login_retry;
- struct fc_rport *rport, *drport;
+ struct fc_rport *rport;
u32 supported_classes;
uint8_t fc4_type;
@@ -2699,7 +2707,6 @@ extern const char *const port_state_str[5];
static const char *const port_dstate_str[] = {
[DSC_DELETED] = "DELETED",
- [DSC_GNN_ID] = "GNN_ID",
[DSC_GNL] = "GNL",
[DSC_LOGIN_PEND] = "LOGIN_PEND",
[DSC_LOGIN_FAILED] = "LOGIN_FAILED",
@@ -3462,6 +3469,7 @@ struct qla_msix_entry {
int have_irq;
int in_use;
uint32_t vector;
+ uint32_t vector_base0;
uint16_t entry;
char name[30];
void *handle;
@@ -3479,7 +3487,6 @@ enum qla_work_type {
QLA_EVT_ASYNC_ADISC,
QLA_EVT_UEVENT,
QLA_EVT_AENFX,
- QLA_EVT_GPNID,
QLA_EVT_UNMAP,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
@@ -3493,7 +3500,6 @@ enum qla_work_type {
QLA_EVT_GPNFT,
QLA_EVT_GPNFT_DONE,
QLA_EVT_GNNFT_DONE,
- QLA_EVT_GNNID,
QLA_EVT_GFPNID,
QLA_EVT_SP_RETRY,
QLA_EVT_IIDMA,
@@ -3536,15 +3542,12 @@ struct qla_work_evt {
} iosb;
struct {
port_id_t id;
- } gpnid;
- struct {
- port_id_t id;
u8 port_name[8];
u8 node_name[8];
void *pla;
u8 fc4_type;
} new_sess;
- struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */
+ struct { /*Get PDB, Get Speed, update fcport, gnl */
fc_port_t *fcport;
u8 opt;
} fcport;
@@ -3721,10 +3724,27 @@ struct qla_fw_resources {
u16 iocbs_limit;
u16 iocbs_qp_limit;
u16 iocbs_used;
+ u16 exch_total;
+ u16 exch_limit;
+ u16 exch_used;
+ u16 pad;
};
#define QLA_IOCB_PCT_LIMIT 95
+struct qla_buf_pool {
+ u16 num_bufs;
+ u16 num_active;
+ u16 max_used;
+ u16 num_alloc;
+ u16 prev_max;
+ u16 pad;
+ uint32_t take_snapshot:1;
+ unsigned long *buf_map;
+ void **buf_array;
+ dma_addr_t *dma_array;
+};
+
/*Queue pair data structure */
struct qla_qpair {
spinlock_t qp_lock;
@@ -3778,6 +3798,7 @@ struct qla_qpair {
struct qla_tgt_counters tgt_counters;
uint16_t cpuid;
struct qla_fw_resources fwres ____cacheline_aligned;
+ struct qla_buf_pool buf_pool;
u32 cmd_cnt;
u32 cmd_completion_cnt;
u32 prev_completion_cnt;
@@ -3938,7 +3959,6 @@ struct qlt_hw_data {
__le32 __iomem *atio_q_out;
const struct qla_tgt_func_tmpl *tgt_ops;
- struct qla_tgt_vp_map *tgt_vp_map;
int saved_set;
__le16 saved_exchange_count;
@@ -4106,6 +4126,7 @@ struct qla_hw_data {
struct req_que **req_q_map;
struct rsp_que **rsp_q_map;
struct qla_qpair **queue_pair_map;
+ struct qla_qpair **qp_cpu_map;
unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8)
@@ -4762,6 +4783,7 @@ struct qla_hw_data {
spinlock_t sadb_lock; /* protects list */
struct els_reject elsrej;
u8 edif_post_stop_cnt_down;
+ struct qla_vp_map *vp_map;
};
#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES))
@@ -4857,6 +4879,7 @@ typedef struct scsi_qla_host {
#define LOOP_READY 5
#define LOOP_DEAD 6
+ unsigned long buf_expired;
unsigned long relogin_jif;
unsigned long dpc_flags;
#define RESET_MARKER_NEEDED 0 /* Send marker to ISP. */
@@ -4872,7 +4895,6 @@ typedef struct scsi_qla_host {
#define ISP_ABORT_RETRY 10 /* ISP aborted. */
#define BEACON_BLINK_NEEDED 11
#define REGISTER_FDMI_NEEDED 12
-#define FCPORT_UPDATE_NEEDED 13
#define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */
#define UNLOADING 15
#define NPIV_CONFIG_NEEDED 16
@@ -5022,7 +5044,6 @@ typedef struct scsi_qla_host {
uint8_t n2n_port_name[WWN_SIZE];
uint16_t n2n_id;
__le16 dport_data[4];
- struct list_head gpnid_list;
struct fab_scan scan;
uint8_t scm_fabric_connection_flags;
@@ -5064,7 +5085,7 @@ struct qla27xx_image_status {
#define SET_AL_PA 2
#define RESET_VP_IDX 3
#define RESET_AL_PA 4
-struct qla_tgt_vp_map {
+struct qla_vp_map {
uint8_t idx;
scsi_qla_host_t *vha;
};
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 777808af5634..1925cc6897b6 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -235,7 +235,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
uint16_t mb[MAX_IOCB_MB_REG];
int rc;
struct qla_hw_data *ha = vha->hw;
- u16 iocbs_used, i;
+ u16 iocbs_used, i, exch_used;
rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
if (rc != QLA_SUCCESS) {
@@ -263,13 +263,19 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
if (ql2xenforce_iocb_limit) {
/* lock is not require. It's an estimate. */
iocbs_used = ha->base_qpair->fwres.iocbs_used;
+ exch_used = ha->base_qpair->fwres.exch_used;
for (i = 0; i < ha->max_qpairs; i++) {
- if (ha->queue_pair_map[i])
+ if (ha->queue_pair_map[i]) {
iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
+ exch_used += ha->queue_pair_map[i]->fwres.exch_used;
+ }
}
seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
iocbs_used, ha->base_qpair->fwres.iocbs_limit);
+
+ seq_printf(s, "estimate exchange used[%d] high water limit [%d] n",
+ exch_used, ha->base_qpair->fwres.exch_limit);
}
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c
index e4240aae5f9e..ec0e20255bd3 100644
--- a/drivers/scsi/qla2xxx/qla_edif.c
+++ b/drivers/scsi/qla2xxx/qla_edif.c
@@ -480,6 +480,49 @@ void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport)
}
/**
+ * qla_delete_n2n_sess_and_wait: search for N2N session, tear it down and
+ * wait for tear down to complete. In N2N topology, there is only one
+ * session being active in tracking the remote device.
+ * @vha: host adapter pointer
+ * return code: 0 - found the session and completed the tear down.
+ * 1 - timeout occurred. Caller to use link bounce to reset.
+ */
+static int qla_delete_n2n_sess_and_wait(scsi_qla_host_t *vha)
+{
+ struct fc_port *fcport;
+ int rc = -EIO;
+ ulong expire = jiffies + 23 * HZ;
+
+ if (!N2N_TOPO(vha->hw))
+ return 0;
+
+ fcport = NULL;
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (!fcport->n2n_flag)
+ continue;
+
+ ql_dbg(ql_dbg_disc, fcport->vha, 0x2016,
+ "%s reset sess at app start \n", __func__);
+
+ qla_edif_sa_ctl_init(vha, fcport);
+ qlt_schedule_sess_for_deletion(fcport);
+
+ while (time_before_eq(jiffies, expire)) {
+ if (fcport->disc_state != DSC_DELETE_PEND) {
+ rc = 0;
+ break;
+ }
+ msleep(1);
+ }
+
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+
+ return rc;
+}
+
+/**
* qla_edif_app_start: application has announce its present
* @vha: host adapter pointer
* @bsg_job: user request
@@ -518,18 +561,17 @@ qla_edif_app_start(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
fcport->n2n_link_reset_cnt = 0;
if (vha->hw->flags.n2n_fw_acc_sec) {
- list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list)
- qla_edif_sa_ctl_init(vha, fcport);
-
+ bool link_bounce = false;
/*
* While authentication app was not running, remote device
* could still try to login with this local port. Let's
- * clear the state and try again.
+ * reset the session, reconnect and re-authenticate.
*/
- qla2x00_wait_for_sess_deletion(vha);
+ if (qla_delete_n2n_sess_and_wait(vha))
+ link_bounce = true;
- /* bounce the link to get the other guy to relogin */
- if (!vha->hw->flags.n2n_bigger) {
+ /* bounce the link to start login */
+ if (!vha->hw->flags.n2n_bigger || link_bounce) {
set_bit(N2N_LINK_RESET, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
@@ -925,7 +967,9 @@ qla_edif_app_getfcinfo(scsi_qla_host_t *vha, struct bsg_job *bsg_job)
if (!(fcport->flags & FCF_FCSP_DEVICE))
continue;
- tdid = app_req.remote_pid;
+ tdid.b.domain = app_req.remote_pid.domain;
+ tdid.b.area = app_req.remote_pid.area;
+ tdid.b.al_pa = app_req.remote_pid.al_pa;
ql_dbg(ql_dbg_edif, vha, 0x2058,
"APP request entry - portid=%06x.\n", tdid.b24);
@@ -2989,9 +3033,10 @@ qla28xx_start_scsi_edif(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- sp->iores.res_type = RESOURCE_INI;
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
sp->iores.iocb_cnt = req_cnt;
- if (qla_get_iocbs(sp->qpair, &sp->iores))
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
@@ -3006,26 +3051,16 @@ qla28xx_start_scsi_edif(srb_t *sp)
goto queuing_error;
}
- ctx = sp->u.scmd.ct6_ctx =
- mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
- if (!ctx) {
- ql_log(ql_log_fatal, vha, 0x3010,
- "Failed to allocate ctx for cmd=%p.\n", cmd);
- goto queuing_error;
- }
-
- memset(ctx, 0, sizeof(struct ct6_dsd));
- ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
- GFP_ATOMIC, &ctx->fcp_cmnd_dma);
- if (!ctx->fcp_cmnd) {
+ if (qla_get_buf(vha, sp->qpair, &sp->u.scmd.buf_dsc)) {
ql_log(ql_log_fatal, vha, 0x3011,
- "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ "Failed to allocate buf for fcp_cmnd for cmd=%p.\n", cmd);
goto queuing_error;
}
- /* Initialize the DSD list and dma handle */
- INIT_LIST_HEAD(&ctx->dsd_list);
- ctx->dsd_use_cnt = 0;
+ sp->flags |= SRB_GOT_BUF;
+ ctx = &sp->u.scmd.ct6_ctx;
+ ctx->fcp_cmnd = sp->u.scmd.buf_dsc.buf;
+ ctx->fcp_cmnd_dma = sp->u.scmd.buf_dsc.buf_dma;
if (cmd->cmd_len > 16) {
additional_cdb_len = cmd->cmd_len - 16;
@@ -3144,7 +3179,6 @@ no_dsds:
cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address);
- sp->flags |= SRB_FCP_CMND_DMA_VALID;
cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
/* Set total data segment count. */
cmd_pkt->entry_count = (uint8_t)req_cnt;
@@ -3176,16 +3210,12 @@ no_dsds:
return QLA_SUCCESS;
queuing_error_fcp_cmnd:
- dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- if (sp->u.scmd.ct6_ctx) {
- mempool_free(sp->u.scmd.ct6_ctx, ha->ctx_mempool);
- sp->u.scmd.ct6_ctx = NULL;
- }
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(lock, flags);
return QLA_FUNCTION_FAILED;
diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h
index 7cdb89ccdc6e..aa566cdb77e5 100644
--- a/drivers/scsi/qla2xxx/qla_edif.h
+++ b/drivers/scsi/qla2xxx/qla_edif.h
@@ -145,4 +145,6 @@ struct enode {
(qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \
_s->disc_state == DSC_DELETED))
+#define EDIF_CAP(_ha) (ql2xsecenable && IS_QLA28XX(_ha))
+
#endif /* __QLA_EDIF_H */
diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h
index 0931f4e4e127..514c265ba86e 100644
--- a/drivers/scsi/qla2xxx/qla_edif_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h
@@ -89,7 +89,20 @@ struct app_plogi_reply {
struct app_pinfo_req {
struct app_id app_info;
uint8_t num_ports;
- port_id_t remote_pid;
+ struct {
+#ifdef __BIG_ENDIAN
+ uint8_t domain;
+ uint8_t area;
+ uint8_t al_pa;
+#elif defined(__LITTLE_ENDIAN)
+ uint8_t al_pa;
+ uint8_t area;
+ uint8_t domain;
+#else
+#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!"
+#endif
+ uint8_t rsvd_1;
+ } remote_pid;
uint8_t version;
uint8_t pad[VND_CMD_PAD_SIZE];
uint8_t reserved[VND_CMD_APP_RESERVED_SIZE];
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e3256e721be1..9142df876c73 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -257,6 +257,7 @@ struct edif_sa_ctl *qla_edif_find_sa_ctl_by_index(fc_port_t *fcport,
/*
* Global Functions in qla_mid.c source file.
*/
+extern void qla_update_vp_map(struct scsi_qla_host *, int);
extern struct scsi_host_template qla2xxx_driver_template;
extern struct scsi_transport_template *qla2xxx_transport_vport_template;
extern void qla2x00_timer(struct timer_list *);
@@ -292,6 +293,7 @@ extern void qla2x00_alert_all_vps(struct rsp_que *, uint16_t *);
extern void qla2x00_async_event(scsi_qla_host_t *, struct rsp_que *,
uint16_t *);
extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
+void qla_adjust_buf(struct scsi_qla_host *);
/*
* Global Function Prototypes in qla_iocb.c source file.
@@ -721,10 +723,6 @@ extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
extern void qla2x00_async_iocb_timeout(void *data);
-extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *);
-extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *);
-void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *);
-
int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *);
@@ -734,9 +732,6 @@ int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool);
int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *);
void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *);
void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *);
-int qla24xx_async_gnnid(scsi_qla_host_t *, fc_port_t *);
-void qla24xx_handle_gnnid_event(scsi_qla_host_t *, struct event_arg *);
-int qla24xx_post_gnnid_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *);
int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
@@ -962,7 +957,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *);
-void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
+void qla_update_host_map(struct scsi_qla_host *, port_id_t);
void qla_remove_hostmap(struct qla_hw_data *ha);
void qlt_clr_qp_table(struct scsi_qla_host *vha);
void qlt_set_mode(struct scsi_qla_host *);
@@ -975,6 +970,8 @@ extern void qla_nvme_abort_set_option
(struct abort_entry_24xx *abt, srb_t *sp);
extern void qla_nvme_abort_process_comp_status
(struct abort_entry_24xx *abt, srb_t *sp);
+struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha,
+ uint16_t vp_idx);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
@@ -1019,5 +1016,8 @@ int qla2xxx_enable_port(struct Scsi_Host *shost);
uint64_t qla2x00_get_num_tgts(scsi_qla_host_t *vha);
uint64_t qla2x00_count_set_bits(u32 num);
-
+int qla_create_buf_pool(struct scsi_qla_host *, struct qla_qpair *);
+void qla_free_buf_pool(struct qla_qpair *);
+int qla_get_buf(struct scsi_qla_host *, struct qla_qpair *, struct qla_buf_dsc *);
+void qla_put_buf(struct qla_qpair *, struct qla_buf_dsc *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 64ab070b8716..4738f8935f7f 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2949,22 +2949,6 @@ done:
return rval;
}
-int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
-{
- struct qla_work_evt *e;
-
- if (test_bit(UNLOADING, &vha->dpc_flags) ||
- (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
- return 0;
-
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
- if (!e)
- return QLA_FUNCTION_FAILED;
-
- e->u.gpnid.id = *id;
- return qla2x00_post_work(vha, e);
-}
-
void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
{
struct srb_iocb *c = &sp->u.iocb_cmd;
@@ -2997,287 +2981,6 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
kref_put(&sp->cmd_kref, qla2x00_sp_release);
}
-void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- fc_port_t *fcport, *conflict, *t;
- u16 data[2];
-
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d port_id: %06x\n",
- __func__, __LINE__, ea->id.b24);
-
- if (ea->rc) {
- /* cable is disconnected */
- list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
- if (fcport->d_id.b24 == ea->id.b24)
- fcport->scan_state = QLA_FCPORT_SCAN;
-
- qlt_schedule_sess_for_deletion(fcport);
- }
- } else {
- /* cable is connected */
- fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
- if (fcport) {
- list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
- list) {
- if ((conflict->d_id.b24 == ea->id.b24) &&
- (fcport != conflict))
- /*
- * 2 fcports with conflict Nport ID or
- * an existing fcport is having nport ID
- * conflict with new fcport.
- */
-
- conflict->scan_state = QLA_FCPORT_SCAN;
-
- qlt_schedule_sess_for_deletion(conflict);
- }
-
- fcport->scan_needed = 0;
- fcport->rscn_gen++;
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->flags |= FCF_FABRIC_DEVICE;
- if (fcport->login_retry == 0) {
- fcport->login_retry =
- vha->hw->login_retry_count;
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
- fcport->port_name, fcport->loop_id,
- fcport->login_retry);
- }
- switch (fcport->disc_state) {
- case DSC_LOGIN_COMPLETE:
- /* recheck session is still intact. */
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC revalidate session with ADISC\n",
- __func__, __LINE__, fcport->port_name);
- data[0] = data[1] = 0;
- qla2x00_post_async_adisc_work(vha, fcport,
- data);
- break;
- case DSC_DELETED:
- ql_dbg(ql_dbg_disc, vha, 0x210d,
- "%s %d %8phC login\n", __func__, __LINE__,
- fcport->port_name);
- fcport->d_id = ea->id;
- qla24xx_fcport_handle_login(vha, fcport);
- break;
- case DSC_DELETE_PEND:
- fcport->d_id = ea->id;
- break;
- default:
- fcport->d_id = ea->id;
- break;
- }
- } else {
- list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
- list) {
- if (conflict->d_id.b24 == ea->id.b24) {
- /* 2 fcports with conflict Nport ID or
- * an existing fcport is having nport ID
- * conflict with new fcport.
- */
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC DS %d\n",
- __func__, __LINE__,
- conflict->port_name,
- conflict->disc_state);
-
- conflict->scan_state = QLA_FCPORT_SCAN;
- qlt_schedule_sess_for_deletion(conflict);
- }
- }
-
- /* create new fcport */
- ql_dbg(ql_dbg_disc, vha, 0x2065,
- "%s %d %8phC post new sess\n",
- __func__, __LINE__, ea->port_name);
- qla24xx_post_newsess_work(vha, &ea->id,
- ea->port_name, NULL, NULL, 0);
- }
- }
-}
-
-static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res)
-{
- struct scsi_qla_host *vha = sp->vha;
- struct ct_sns_req *ct_req =
- (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
- struct ct_sns_rsp *ct_rsp =
- (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
- struct event_arg ea;
- struct qla_work_evt *e;
- unsigned long flags;
-
- if (res)
- ql_dbg(ql_dbg_disc, vha, 0x2066,
- "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
- sp->name, res, sp->gen1, &ct_req->req.port_id.port_id,
- ct_rsp->rsp.gpn_id.port_name);
- else
- ql_dbg(ql_dbg_disc, vha, 0x2066,
- "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
- sp->name, sp->gen1, &ct_req->req.port_id.port_id,
- ct_rsp->rsp.gpn_id.port_name);
-
- memset(&ea, 0, sizeof(ea));
- memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
- ea.sp = sp;
- ea.id = be_to_port_id(ct_req->req.port_id.port_id);
- ea.rc = res;
-
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- list_del(&sp->elem);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
-
- if (res) {
- if (res == QLA_FUNCTION_TIMEOUT) {
- qla24xx_post_gpnid_work(sp->vha, &ea.id);
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
- return;
- }
- } else if (sp->gen1) {
- /* There was another RSCN for this Nport ID */
- qla24xx_post_gpnid_work(sp->vha, &ea.id);
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
- return;
- }
-
- qla24xx_handle_gpnid_event(vha, &ea);
-
- e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
- if (!e) {
- /* please ignore kernel warning. otherwise, we have mem leak. */
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
-
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
-
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
- return;
- }
-
- e->u.iosb.sp = sp;
- qla2x00_post_work(vha, e);
-}
-
-/* Get WWPN with Nport ID. */
-int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
-{
- int rval = QLA_FUNCTION_FAILED;
- struct ct_sns_req *ct_req;
- srb_t *sp, *tsp;
- struct ct_sns_pkt *ct_sns;
- unsigned long flags;
-
- if (!vha->flags.online)
- goto done;
-
- /* ref: INIT */
- sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
- if (!sp)
- goto done;
-
- sp->type = SRB_CT_PTHRU_CMD;
- sp->name = "gpnid";
- sp->u.iocb_cmd.u.ctarg.id = *id;
- sp->gen1 = 0;
- qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
- qla2x00_async_gpnid_sp_done);
-
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
- list_for_each_entry(tsp, &vha->gpnid_list, elem) {
- if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
- tsp->gen1++;
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
- goto done;
- }
- }
- list_add_tail(&sp->elem, &vha->gpnid_list);
- spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
-
- sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
- GFP_KERNEL);
- sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
- if (!sp->u.iocb_cmd.u.ctarg.req) {
- ql_log(ql_log_warn, vha, 0xd041,
- "Failed to allocate ct_sns request.\n");
- goto done_free_sp;
- }
-
- sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
- GFP_KERNEL);
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
- if (!sp->u.iocb_cmd.u.ctarg.rsp) {
- ql_log(ql_log_warn, vha, 0xd042,
- "Failed to allocate ct_sns request.\n");
- goto done_free_sp;
- }
-
- ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
- memset(ct_sns, 0, sizeof(*ct_sns));
-
- ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
- /* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
-
- /* GPN_ID req */
- ct_req->req.port_id.port_id = port_id_to_be_id(*id);
-
- sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
- sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
- sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
-
- ql_dbg(ql_dbg_disc, vha, 0x2067,
- "Async-%s hdl=%x ID %3phC.\n", sp->name,
- sp->handle, &ct_req->req.port_id.port_id);
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
- return rval;
-
-done_free_sp:
- spin_lock_irqsave(&vha->hw->vport_slock, flags);
- list_del(&sp->elem);
- spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
-
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- }
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
-done:
- return rval;
-}
-
-
void qla24xx_async_gffid_sp_done(srb_t *sp, int res)
{
struct scsi_qla_host *vha = sp->vha;
@@ -4190,116 +3893,6 @@ void qla_scan_work_fn(struct work_struct *work)
spin_unlock_irqrestore(&vha->work_lock, flags);
}
-/* GNN_ID */
-void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- qla24xx_post_gnl_work(vha, ea->fcport);
-}
-
-static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res)
-{
- struct scsi_qla_host *vha = sp->vha;
- fc_port_t *fcport = sp->fcport;
- u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
- struct event_arg ea;
- u64 wwnn;
-
- fcport->flags &= ~FCF_ASYNC_SENT;
- wwnn = wwn_to_u64(node_name);
- if (wwnn)
- memcpy(fcport->node_name, node_name, WWN_SIZE);
-
- memset(&ea, 0, sizeof(ea));
- ea.fcport = fcport;
- ea.sp = sp;
- ea.rc = res;
-
- ql_dbg(ql_dbg_disc, vha, 0x204f,
- "Async done-%s res %x, WWPN %8phC %8phC\n",
- sp->name, res, fcport->port_name, fcport->node_name);
-
- qla24xx_handle_gnnid_event(vha, &ea);
-
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
-}
-
-int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
-{
- int rval = QLA_FUNCTION_FAILED;
- struct ct_sns_req *ct_req;
- srb_t *sp;
-
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
- return rval;
-
- qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID);
- /* ref: INIT */
- sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
- if (!sp)
- goto done;
-
- fcport->flags |= FCF_ASYNC_SENT;
- sp->type = SRB_CT_PTHRU_CMD;
- sp->name = "gnnid";
- sp->gen1 = fcport->rscn_gen;
- sp->gen2 = fcport->login_gen;
- qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
- qla2x00_async_gnnid_sp_done);
-
- /* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
- GNN_ID_RSP_SIZE);
-
- /* GNN_ID req */
- ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id);
-
-
- /* req & rsp use the same buffer */
- sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
- sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
- sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
-
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
- sp->name, fcport->port_name,
- sp->handle, fcport->loop_id, fcport->d_id.b24);
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
- return rval;
-
-done_free_sp:
- /* ref: INIT */
- kref_put(&sp->cmd_kref, qla2x00_sp_release);
- fcport->flags &= ~FCF_ASYNC_SENT;
-done:
- return rval;
-}
-
-int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
-{
- struct qla_work_evt *e;
- int ls;
-
- ls = atomic_read(&vha->loop_state);
- if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
- test_bit(UNLOADING, &vha->dpc_flags))
- return 0;
-
- e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
- if (!e)
- return QLA_FUNCTION_FAILED;
-
- e->u.fcport.fcport = fcport;
- return qla2x00_post_work(vha, e);
-}
-
/* GPFN_ID */
void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
{
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 8d9ecabb1aac..1dbc1496ebed 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -128,12 +128,14 @@ static void qla24xx_abort_iocb_timeout(void *data)
sp->cmd_sp)) {
qpair->req->outstanding_cmds[handle] = NULL;
cmdsp_found = 1;
+ qla_put_fw_resources(qpair, &sp->cmd_sp->iores);
}
/* removing the abort */
if (qpair->req->outstanding_cmds[handle] == sp) {
qpair->req->outstanding_cmds[handle] = NULL;
sp_found = 1;
+ qla_put_fw_resources(qpair, &sp->iores);
break;
}
}
@@ -388,6 +390,12 @@ done_free_sp:
fcport->flags &= ~FCF_ASYNC_SENT;
done:
fcport->flags &= ~FCF_ASYNC_ACTIVE;
+
+ /*
+ * async login failed. Could be due to iocb/exchange resource
+ * being low. Set state DELETED for re-login process to start again.
+ */
+ qla2x00_set_fcport_disc_state(fcport, DSC_DELETED);
return rval;
}
@@ -1646,7 +1654,6 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
{
u16 data[2];
- u64 wwn;
u16 sec;
ql_dbg(ql_dbg_disc, vha, 0x20d8,
@@ -1686,7 +1693,6 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
switch (fcport->disc_state) {
case DSC_DELETED:
- wwn = wwn_to_u64(fcport->node_name);
switch (vha->hw->current_topology) {
case ISP_CFG_N:
if (fcport_is_smaller(fcport)) {
@@ -1710,12 +1716,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
}
break;
default:
- if (wwn == 0) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post GNNID\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gnnid_work(vha, fcport);
- } else if (fcport->loop_id == FC_NO_LOOP_ID) {
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
ql_dbg(ql_dbg_disc, vha, 0x20bd,
"%s %d %8phC post gnl\n",
__func__, __LINE__, fcport->port_name);
@@ -2000,6 +2001,7 @@ qla2x00_tmf_iocb_timeout(void *data)
for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
if (sp->qpair->req->outstanding_cmds[h] == sp) {
sp->qpair->req->outstanding_cmds[h] = NULL;
+ qla_put_fw_resources(sp->qpair, &sp->iores);
break;
}
}
@@ -2073,7 +2075,6 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
done_free_sp:
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
- fcport->flags &= ~FCF_ASYNC_SENT;
done:
return rval;
}
@@ -2315,7 +2316,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
ea->fcport->login_pause = 1;
ql_dbg(ql_dbg_disc, vha, 0x20ed,
- "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
+ "%s %d %8phC NPortId %06x inuse with loopid 0x%x.\n",
__func__, __LINE__, ea->fcport->port_name,
ea->fcport->d_id.b24, lid);
} else {
@@ -3943,6 +3944,12 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
ha->base_qpair->fwres.iocbs_limit = limit;
ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
ha->base_qpair->fwres.iocbs_used = 0;
+
+ ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count;
+ ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
+ QLA_IOCB_PCT_LIMIT) / 100;
+ ha->base_qpair->fwres.exch_used = 0;
+
for (i = 0; i < ha->max_qpairs; i++) {
if (ha->queue_pair_map[i]) {
ha->queue_pair_map[i]->fwres.iocbs_total =
@@ -3951,6 +3958,10 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha)
ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
limit / num_qps;
ha->queue_pair_map[i]->fwres.iocbs_used = 0;
+ ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count;
+ ha->queue_pair_map[i]->fwres.exch_limit =
+ (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
+ ha->queue_pair_map[i]->fwres.exch_used = 0;
}
}
}
@@ -4809,9 +4820,9 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (vha->hw->flags.edif_enabled) {
if (topo != 2)
- qlt_update_host_map(vha, id);
+ qla_update_host_map(vha, id);
} else if (!(topo == 2 && ha->flags.n2n_bigger))
- qlt_update_host_map(vha, id);
+ qla_update_host_map(vha, id);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!vha->flags.init_done)
@@ -5206,27 +5217,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
return (rval);
}
-static void
-qla2x00_rport_del(void *data)
-{
- fc_port_t *fcport = data;
- struct fc_rport *rport;
- unsigned long flags;
-
- spin_lock_irqsave(fcport->vha->host->host_lock, flags);
- rport = fcport->drport ? fcport->drport : fcport->rport;
- fcport->drport = NULL;
- spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
- if (rport) {
- ql_dbg(ql_dbg_disc, fcport->vha, 0x210b,
- "%s %8phN. rport %p roles %x\n",
- __func__, fcport->port_name, rport,
- rport->roles);
-
- fc_remote_port_delete(rport);
- }
-}
-
void qla2x00_set_fcport_state(fc_port_t *fcport, int state)
{
int old_state;
@@ -6743,33 +6733,6 @@ int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
return rval;
}
-void
-qla2x00_update_fcports(scsi_qla_host_t *base_vha)
-{
- fc_port_t *fcport;
- struct scsi_qla_host *vha, *tvp;
- struct qla_hw_data *ha = base_vha->hw;
- unsigned long flags;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- /* Go with deferred removal of rport references. */
- list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list) {
- atomic_inc(&vha->vref_count);
- list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->drport &&
- atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
- spin_unlock_irqrestore(&ha->vport_slock, flags);
- qla2x00_rport_del(fcport);
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- }
- }
- atomic_dec(&vha->vref_count);
- wake_up(&vha->vref_waitq);
- }
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-}
-
/* Assumes idc_lock always held on entry */
void
qla83xx_reset_ownership(scsi_qla_host_t *vha)
@@ -9461,8 +9424,6 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
qpair->req = ha->req_q_map[req_id];
qpair->rsp->req = qpair->req;
qpair->rsp->qpair = qpair;
- /* init qpair to this cpu. Will adjust at run time. */
- qla_cpu_update(qpair, raw_smp_processor_id());
if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4)
@@ -9477,6 +9438,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
goto fail_mempool;
}
+ if (qla_create_buf_pool(vha, qpair)) {
+ ql_log(ql_log_warn, vha, 0xd036,
+ "Failed to initialize buf pool for qpair %d\n",
+ qpair->id);
+ goto fail_bufpool;
+ }
+
/* Mark as online */
qpair->online = 1;
@@ -9492,7 +9460,10 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
}
return qpair;
+fail_bufpool:
+ mempool_destroy(qpair->srb_mempool);
fail_mempool:
+ qla25xx_delete_req_que(vha, qpair->req);
fail_req:
qla25xx_delete_rsp_que(vha, qpair->rsp);
fail_rsp:
@@ -9518,6 +9489,8 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
qpair->delete_in_progress = 1;
+ qla_free_buf_pool(qpair);
+
ret = qla25xx_delete_req_que(vha, qpair->req);
if (ret != QLA_SUCCESS)
goto fail;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 5185dc5daf80..cce6e425c121 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -380,24 +380,26 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
enum {
RESOURCE_NONE,
- RESOURCE_INI,
+ RESOURCE_IOCB = BIT_0,
+ RESOURCE_EXCH = BIT_1, /* exchange */
+ RESOURCE_FORCE = BIT_2,
};
static inline int
-qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
u16 iocbs_used, i;
+ u16 exch_used;
struct qla_hw_data *ha = qp->vha->hw;
if (!ql2xenforce_iocb_limit) {
iores->res_type = RESOURCE_NONE;
return 0;
}
+ if (iores->res_type & RESOURCE_FORCE)
+ goto force;
- if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
- qp->fwres.iocbs_used += iores->iocb_cnt;
- return 0;
- } else {
+ if ((iores->iocb_cnt + qp->fwres.iocbs_used) >= qp->fwres.iocbs_qp_limit) {
/* no need to acquire qpair lock. It's just rough calculation */
iocbs_used = ha->base_qpair->fwres.iocbs_used;
for (i = 0; i < ha->max_qpairs; i++) {
@@ -405,30 +407,49 @@ qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
}
- if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
- qp->fwres.iocbs_used += iores->iocb_cnt;
- return 0;
- } else {
+ if ((iores->iocb_cnt + iocbs_used) >= qp->fwres.iocbs_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+ }
+
+ if (iores->res_type & RESOURCE_EXCH) {
+ exch_used = ha->base_qpair->fwres.exch_used;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ exch_used += ha->queue_pair_map[i]->fwres.exch_used;
+ }
+
+ if ((exch_used + iores->exch_cnt) >= qp->fwres.exch_limit) {
iores->res_type = RESOURCE_NONE;
return -ENOSPC;
}
}
+force:
+ qp->fwres.iocbs_used += iores->iocb_cnt;
+ qp->fwres.exch_used += iores->exch_cnt;
+ return 0;
}
static inline void
-qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores)
{
- switch (iores->res_type) {
- case RESOURCE_NONE:
- break;
- default:
+ if (iores->res_type & RESOURCE_IOCB) {
if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
qp->fwres.iocbs_used -= iores->iocb_cnt;
} else {
- // should not happen
+ /* should not happen */
qp->fwres.iocbs_used = 0;
}
- break;
+ }
+
+ if (iores->res_type & RESOURCE_EXCH) {
+ if (qp->fwres.exch_used >= iores->exch_cnt) {
+ qp->fwres.exch_used -= iores->exch_cnt;
+ } else {
+ /* should not happen */
+ qp->fwres.exch_used = 0;
+ }
}
iores->res_type = RESOURCE_NONE;
}
@@ -494,3 +515,58 @@ fcport_is_bigger(fc_port_t *fcport)
{
return !fcport_is_smaller(fcport);
}
+
+static inline struct qla_qpair *
+qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair)
+{
+ int cpuid = smp_processor_id();
+
+ if (qpair->cpuid != cpuid &&
+ ha->qp_cpu_map[cpuid]) {
+ qpair = ha->qp_cpu_map[cpuid];
+ }
+ return qpair;
+}
+
+static inline void
+qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha,
+ struct qla_msix_entry *msix,
+ struct qla_qpair *qpair)
+{
+ const struct cpumask *mask;
+ unsigned int cpu;
+
+ if (!ha->qp_cpu_map)
+ return;
+ mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0);
+ qpair->cpuid = cpumask_first(mask);
+ for_each_cpu(cpu, mask) {
+ ha->qp_cpu_map[cpu] = qpair;
+ }
+ msix->cpuid = qpair->cpuid;
+}
+
+static inline void
+qla_mapq_free_qp_cpu_map(struct qla_hw_data *ha)
+{
+ if (ha->qp_cpu_map) {
+ kfree(ha->qp_cpu_map);
+ ha->qp_cpu_map = NULL;
+ }
+}
+
+static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha)
+{
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ if (!ha->qp_cpu_map) {
+ ha->qp_cpu_map = kcalloc(NR_CPUS, sizeof(struct qla_qpair *),
+ GFP_KERNEL);
+ if (!ha->qp_cpu_map) {
+ ql_log(ql_log_fatal, vha, 0x0180,
+ "Unable to allocate memory for qp_cpu_map ptrs.\n");
+ return -1;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 42ce4e1fe744..b9b3e6f80ea9 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -623,7 +623,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
}
cur_seg = scsi_sglist(cmd);
- ctx = sp->u.scmd.ct6_ctx;
+ ctx = &sp->u.scmd.ct6_ctx;
while (tot_dsds) {
avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -1589,9 +1589,10 @@ qla24xx_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- sp->iores.res_type = RESOURCE_INI;
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
sp->iores.iocb_cnt = req_cnt;
- if (qla_get_iocbs(sp->qpair, &sp->iores))
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
@@ -1678,7 +1679,7 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -1793,9 +1794,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
tot_prot_dsds = nseg;
tot_dsds += nseg;
- sp->iores.res_type = RESOURCE_INI;
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- if (qla_get_iocbs(sp->qpair, &sp->iores))
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
@@ -1883,7 +1885,7 @@ queuing_error:
}
/* Cleanup will be performed by the caller (queuecommand) */
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -1952,9 +1954,10 @@ qla2xxx_start_scsi_mq(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- sp->iores.res_type = RESOURCE_INI;
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
sp->iores.iocb_cnt = req_cnt;
- if (qla_get_iocbs(sp->qpair, &sp->iores))
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
@@ -2041,7 +2044,7 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -2171,9 +2174,10 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
tot_prot_dsds = nseg;
tot_dsds += nseg;
- sp->iores.res_type = RESOURCE_INI;
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- if (qla_get_iocbs(sp->qpair, &sp->iores))
+ if (qla_get_fw_resources(sp->qpair, &sp->iores))
goto queuing_error;
if (req->cnt < (req_cnt + 2)) {
@@ -2260,7 +2264,7 @@ queuing_error:
}
/* Cleanup will be performed by the caller (queuecommand) */
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -2916,7 +2920,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
conflict_fcport->conflict = fcport;
fcport->login_pause = 1;
ql_dbg(ql_dbg_disc, vha, 0x20ed,
- "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
+ "%s %d %8phC pid %06x inuse with lid %#x.\n",
__func__, __LINE__,
fcport->port_name,
fcport->d_id.b24, lid);
@@ -3455,13 +3459,7 @@ sufficient_dsds:
goto queuing_error;
}
- ctx = sp->u.scmd.ct6_ctx =
- mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
- if (!ctx) {
- ql_log(ql_log_fatal, vha, 0x3010,
- "Failed to allocate ctx for cmd=%p.\n", cmd);
- goto queuing_error;
- }
+ ctx = &sp->u.scmd.ct6_ctx;
memset(ctx, 0, sizeof(struct ct6_dsd));
ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool,
@@ -3813,6 +3811,65 @@ qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->vp_index = sp->fcport->vha->vp_idx;
}
+static int qla_get_iocbs_resource(struct srb *sp)
+{
+ bool get_exch;
+ bool push_it_through = false;
+
+ if (!ql2xenforce_iocb_limit) {
+ sp->iores.res_type = RESOURCE_NONE;
+ return 0;
+ }
+ sp->iores.res_type = RESOURCE_NONE;
+
+ switch (sp->type) {
+ case SRB_TM_CMD:
+ case SRB_PRLI_CMD:
+ case SRB_ADISC_CMD:
+ push_it_through = true;
+ fallthrough;
+ case SRB_LOGIN_CMD:
+ case SRB_ELS_CMD_RPT:
+ case SRB_ELS_CMD_HST:
+ case SRB_ELS_CMD_HST_NOLOGIN:
+ case SRB_CT_CMD:
+ case SRB_NVME_LS:
+ case SRB_ELS_DCMD:
+ get_exch = true;
+ break;
+
+ case SRB_FXIOCB_DCMD:
+ case SRB_FXIOCB_BCMD:
+ sp->iores.res_type = RESOURCE_NONE;
+ return 0;
+
+ case SRB_SA_UPDATE:
+ case SRB_SA_REPLACE:
+ case SRB_MB_IOCB:
+ case SRB_ABT_CMD:
+ case SRB_NACK_PLOGI:
+ case SRB_NACK_PRLI:
+ case SRB_NACK_LOGO:
+ case SRB_LOGOUT_CMD:
+ case SRB_CTRL_VP:
+ push_it_through = true;
+ fallthrough;
+ default:
+ get_exch = false;
+ }
+
+ sp->iores.res_type |= RESOURCE_IOCB;
+ sp->iores.iocb_cnt = 1;
+ if (get_exch) {
+ sp->iores.res_type |= RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
+ }
+ if (push_it_through)
+ sp->iores.res_type |= RESOURCE_FORCE;
+
+ return qla_get_fw_resources(sp->qpair, &sp->iores);
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -3827,6 +3884,12 @@ qla2x00_start_sp(srb_t *sp)
return -EIO;
spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ rval = qla_get_iocbs_resource(sp);
+ if (rval) {
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ return -EAGAIN;
+ }
+
pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
if (!pkt) {
rval = EAGAIN;
@@ -3927,6 +3990,8 @@ qla2x00_start_sp(srb_t *sp)
wmb();
qla2x00_start_iocbs(vha, qp->req);
done:
+ if (rval)
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e19fde304e5c..46e8b38603f0 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3112,6 +3112,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
}
bsg_reply->reply_payload_rcv_len = 0;
+ qla_put_fw_resources(sp->qpair, &sp->iores);
done:
/* Return the vendor specific reply to API */
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
@@ -3197,7 +3198,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
return;
}
- qla_put_iocbs(sp->qpair, &sp->iores);
+ qla_put_fw_resources(sp->qpair, &sp->iores);
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
@@ -3362,8 +3363,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
"Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
- vha->interface_err_cnt++;
-
res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
}
@@ -3618,7 +3617,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
default:
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
- qla_put_iocbs(sp->qpair, &sp->iores);
sp->done(sp, res);
return 0;
}
@@ -3771,7 +3769,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
rsp->qpair->rcv_intr = 1;
- qla_cpu_update(rsp->qpair, smp_processor_id());
}
#define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in) \
@@ -4379,6 +4376,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
qentry->vector = pci_irq_vector(ha->pdev, i);
+ qentry->vector_base0 = i;
qentry->entry = i;
qentry->have_irq = 0;
qentry->in_use = 0;
@@ -4606,5 +4604,6 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
}
msix->have_irq = 1;
msix->handle = qpair;
+ qla_mapq_init_qp_cpu_map(ha, msix, qpair);
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 359595a64664..254fd4c64262 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -4010,7 +4010,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
ha->current_topology = ISP_CFG_NL;
- qlt_update_host_map(vha, id);
+ qla_update_host_map(vha, id);
} else if (rptid_entry->format == 1) {
/* fabric */
@@ -4126,7 +4126,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
WWN_SIZE);
}
- qlt_update_host_map(vha, id);
+ qla_update_host_map(vha, id);
}
set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
@@ -4153,7 +4153,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (!found)
return;
- qlt_update_host_map(vp, id);
+ qla_update_host_map(vp, id);
/*
* Cannot configure here as we are still sitting on the
@@ -4184,7 +4184,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
ha->flags.n2n_ae = 1;
spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
+ qla_update_vp_map(vha, SET_AL_PA);
spin_unlock_irqrestore(&ha->vport_slock, flags);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 16a9f22bb860..78661b658dcd 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -52,7 +52,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_update_vp_map(vha, SET_VP_IDX);
+ qla_update_vp_map(vha, SET_VP_IDX);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&ha->vport_lock);
@@ -80,7 +80,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
spin_lock_irqsave(&ha->vport_slock, flags);
if (atomic_read(&vha->vref_count) == 0) {
list_del(&vha->list);
- qlt_update_vp_map(vha, RESET_VP_IDX);
+ qla_update_vp_map(vha, RESET_VP_IDX);
bailout = 1;
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
@@ -95,7 +95,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
"vha->vref_count=%u timeout\n", vha->vref_count.counter);
spin_lock_irqsave(&ha->vport_slock, flags);
list_del(&vha->list);
- qlt_update_vp_map(vha, RESET_VP_IDX);
+ qla_update_vp_map(vha, RESET_VP_IDX);
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
@@ -187,7 +187,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
/* Remove port id from vp target map */
spin_lock_irqsave(&vha->hw->hardware_lock, flags);
- qlt_update_vp_map(vha, RESET_AL_PA);
+ qla_update_vp_map(vha, RESET_AL_PA);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
qla2x00_mark_vp_devices_dead(vha);
@@ -384,15 +384,6 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
}
}
- if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
- ql_dbg(ql_dbg_dpc, vha, 0x4016,
- "FCPort update scheduled.\n");
- qla2x00_update_fcports(vha);
- clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
- ql_dbg(ql_dbg_dpc, vha, 0x4017,
- "FCPort update end.\n");
- }
-
if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -1014,3 +1005,288 @@ done:
kref_put(&sp->cmd_kref, qla2x00_sp_release);
return rval;
}
+
+struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, uint16_t vp_idx)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->vp_idx == vp_idx)
+ return vha;
+
+ BUG_ON(ha->vp_map == NULL);
+ if (likely(test_bit(vp_idx, ha->vp_idx_map)))
+ return ha->vp_map[vp_idx].vha;
+
+ return NULL;
+}
+
+/* vport_slock to be held by the caller */
+void
+qla_update_vp_map(struct scsi_qla_host *vha, int cmd)
+{
+ void *slot;
+ u32 key;
+ int rc;
+
+ if (!vha->hw->vp_map)
+ return;
+
+ key = vha->d_id.b24;
+
+ switch (cmd) {
+ case SET_VP_IDX:
+ vha->hw->vp_map[vha->vp_idx].vha = vha;
+ break;
+ case SET_AL_PA:
+ slot = btree_lookup32(&vha->hw->host_map, key);
+ if (!slot) {
+ ql_dbg(ql_dbg_disc, vha, 0xf018,
+ "Save vha in host_map %p %06x\n", vha, key);
+ rc = btree_insert32(&vha->hw->host_map,
+ key, vha, GFP_ATOMIC);
+ if (rc)
+ ql_log(ql_log_info, vha, 0xd03e,
+ "Unable to insert s_id into host_map: %06x\n",
+ key);
+ return;
+ }
+ ql_dbg(ql_dbg_disc, vha, 0xf019,
+ "replace existing vha in host_map %p %06x\n", vha, key);
+ btree_update32(&vha->hw->host_map, key, vha);
+ break;
+ case RESET_VP_IDX:
+ vha->hw->vp_map[vha->vp_idx].vha = NULL;
+ break;
+ case RESET_AL_PA:
+ ql_dbg(ql_dbg_disc, vha, 0xf01a,
+ "clear vha in host_map %p %06x\n", vha, key);
+ slot = btree_lookup32(&vha->hw->host_map, key);
+ if (slot)
+ btree_remove32(&vha->hw->host_map, key);
+ vha->d_id.b24 = 0;
+ break;
+ }
+}
+
+void qla_update_host_map(struct scsi_qla_host *vha, port_id_t id)
+{
+
+ if (!vha->d_id.b24) {
+ vha->d_id = id;
+ qla_update_vp_map(vha, SET_AL_PA);
+ } else if (vha->d_id.b24 != id.b24) {
+ qla_update_vp_map(vha, RESET_AL_PA);
+ vha->d_id = id;
+ qla_update_vp_map(vha, SET_AL_PA);
+ }
+}
+
+int qla_create_buf_pool(struct scsi_qla_host *vha, struct qla_qpair *qp)
+{
+ int sz;
+
+ qp->buf_pool.num_bufs = qp->req->length;
+
+ sz = BITS_TO_LONGS(qp->req->length);
+ qp->buf_pool.buf_map = kcalloc(sz, sizeof(long), GFP_KERNEL);
+ if (!qp->buf_pool.buf_map) {
+ ql_log(ql_log_warn, vha, 0x0186,
+ "Failed to allocate buf_map(%zd).\n", sz * sizeof(unsigned long));
+ return -ENOMEM;
+ }
+ sz = qp->req->length * sizeof(void *);
+ qp->buf_pool.buf_array = kcalloc(qp->req->length, sizeof(void *), GFP_KERNEL);
+ if (!qp->buf_pool.buf_array) {
+ ql_log(ql_log_warn, vha, 0x0186,
+ "Failed to allocate buf_array(%d).\n", sz);
+ kfree(qp->buf_pool.buf_map);
+ return -ENOMEM;
+ }
+ sz = qp->req->length * sizeof(dma_addr_t);
+ qp->buf_pool.dma_array = kcalloc(qp->req->length, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!qp->buf_pool.dma_array) {
+ ql_log(ql_log_warn, vha, 0x0186,
+ "Failed to allocate dma_array(%d).\n", sz);
+ kfree(qp->buf_pool.buf_map);
+ kfree(qp->buf_pool.buf_array);
+ return -ENOMEM;
+ }
+ set_bit(0, qp->buf_pool.buf_map);
+ return 0;
+}
+
+void qla_free_buf_pool(struct qla_qpair *qp)
+{
+ int i;
+ struct qla_hw_data *ha = qp->vha->hw;
+
+ for (i = 0; i < qp->buf_pool.num_bufs; i++) {
+ if (qp->buf_pool.buf_array[i] && qp->buf_pool.dma_array[i])
+ dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[i],
+ qp->buf_pool.dma_array[i]);
+ qp->buf_pool.buf_array[i] = NULL;
+ qp->buf_pool.dma_array[i] = 0;
+ }
+
+ kfree(qp->buf_pool.dma_array);
+ kfree(qp->buf_pool.buf_array);
+ kfree(qp->buf_pool.buf_map);
+}
+
+/* it is assume qp->qp_lock is held at this point */
+int qla_get_buf(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_buf_dsc *dsc)
+{
+ u16 tag, i = 0;
+ void *buf;
+ dma_addr_t buf_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ dsc->tag = TAG_FREED;
+again:
+ tag = find_first_zero_bit(qp->buf_pool.buf_map, qp->buf_pool.num_bufs);
+ if (tag >= qp->buf_pool.num_bufs) {
+ ql_dbg(ql_dbg_io, vha, 0x00e2,
+ "qp(%d) ran out of buf resource.\n", qp->id);
+ return -EIO;
+ }
+ if (tag == 0) {
+ set_bit(0, qp->buf_pool.buf_map);
+ i++;
+ if (i == 5) {
+ ql_dbg(ql_dbg_io, vha, 0x00e3,
+ "qp(%d) unable to get tag.\n", qp->id);
+ return -EIO;
+ }
+ goto again;
+ }
+
+ if (!qp->buf_pool.buf_array[tag]) {
+ buf = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &buf_dma);
+ if (!buf) {
+ ql_log(ql_log_fatal, vha, 0x13b1,
+ "Failed to allocate buf.\n");
+ return -ENOMEM;
+ }
+
+ dsc->buf = qp->buf_pool.buf_array[tag] = buf;
+ dsc->buf_dma = qp->buf_pool.dma_array[tag] = buf_dma;
+ qp->buf_pool.num_alloc++;
+ } else {
+ dsc->buf = qp->buf_pool.buf_array[tag];
+ dsc->buf_dma = qp->buf_pool.dma_array[tag];
+ memset(dsc->buf, 0, FCP_CMND_DMA_POOL_SIZE);
+ }
+
+ qp->buf_pool.num_active++;
+ if (qp->buf_pool.num_active > qp->buf_pool.max_used)
+ qp->buf_pool.max_used = qp->buf_pool.num_active;
+
+ dsc->tag = tag;
+ set_bit(tag, qp->buf_pool.buf_map);
+ return 0;
+}
+
+static void qla_trim_buf(struct qla_qpair *qp, u16 trim)
+{
+ int i, j;
+ struct qla_hw_data *ha = qp->vha->hw;
+
+ if (!trim)
+ return;
+
+ for (i = 0; i < trim; i++) {
+ j = qp->buf_pool.num_alloc - 1;
+ if (test_bit(j, qp->buf_pool.buf_map)) {
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x300b,
+ "QP id(%d): trim active buf[%d]. Remain %d bufs\n",
+ qp->id, j, qp->buf_pool.num_alloc);
+ return;
+ }
+
+ if (qp->buf_pool.buf_array[j]) {
+ dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[j],
+ qp->buf_pool.dma_array[j]);
+ qp->buf_pool.buf_array[j] = NULL;
+ qp->buf_pool.dma_array[j] = 0;
+ }
+ qp->buf_pool.num_alloc--;
+ if (!qp->buf_pool.num_alloc)
+ break;
+ }
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x3010,
+ "QP id(%d): trimmed %d bufs. Remain %d bufs\n",
+ qp->id, trim, qp->buf_pool.num_alloc);
+}
+
+static void __qla_adjust_buf(struct qla_qpair *qp)
+{
+ u32 trim;
+
+ qp->buf_pool.take_snapshot = 0;
+ qp->buf_pool.prev_max = qp->buf_pool.max_used;
+ qp->buf_pool.max_used = qp->buf_pool.num_active;
+
+ if (qp->buf_pool.prev_max > qp->buf_pool.max_used &&
+ qp->buf_pool.num_alloc > qp->buf_pool.max_used) {
+ /* down trend */
+ trim = qp->buf_pool.num_alloc - qp->buf_pool.max_used;
+ trim = (trim * 10) / 100;
+ trim = trim ? trim : 1;
+ qla_trim_buf(qp, trim);
+ } else if (!qp->buf_pool.prev_max && !qp->buf_pool.max_used) {
+ /* 2 periods of no io */
+ qla_trim_buf(qp, qp->buf_pool.num_alloc);
+ }
+}
+
+/* it is assume qp->qp_lock is held at this point */
+void qla_put_buf(struct qla_qpair *qp, struct qla_buf_dsc *dsc)
+{
+ if (dsc->tag == TAG_FREED)
+ return;
+ lockdep_assert_held(qp->qp_lock_ptr);
+
+ clear_bit(dsc->tag, qp->buf_pool.buf_map);
+ qp->buf_pool.num_active--;
+ dsc->tag = TAG_FREED;
+
+ if (qp->buf_pool.take_snapshot)
+ __qla_adjust_buf(qp);
+}
+
+#define EXPIRE (60 * HZ)
+void qla_adjust_buf(struct scsi_qla_host *vha)
+{
+ unsigned long flags;
+ int i;
+ struct qla_qpair *qp;
+
+ if (vha->vp_idx)
+ return;
+
+ if (!vha->buf_expired) {
+ vha->buf_expired = jiffies + EXPIRE;
+ return;
+ }
+ if (time_before(jiffies, vha->buf_expired))
+ return;
+
+ vha->buf_expired = jiffies + EXPIRE;
+
+ for (i = 0; i < vha->hw->num_qpairs; i++) {
+ qp = vha->hw->queue_pair_map[i];
+ if (!qp)
+ continue;
+ if (!qp->buf_pool.num_alloc)
+ continue;
+
+ if (qp->buf_pool.take_snapshot) {
+ /* no io has gone through in the last EXPIRE period */
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ __qla_adjust_buf(qp);
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ } else {
+ qp->buf_pool.take_snapshot = 1;
+ }
+ }
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 02fdeb0d31ec..648e8f798606 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -170,18 +170,6 @@ out:
qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
-static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd)
-{
- if (sp->flags & SRB_DMA_VALID) {
- struct srb_iocb *nvme = &sp->u.iocb_cmd;
- struct qla_hw_data *ha = sp->fcport->vha->hw;
-
- dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
- fd->rqstlen, DMA_TO_DEVICE);
- sp->flags &= ~SRB_DMA_VALID;
- }
-}
-
static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
{
struct srb *sp = container_of(kref, struct srb, cmd_kref);
@@ -199,7 +187,6 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
fd = priv->fd;
- qla_nvme_ls_unmap(sp, fd);
fd->done(fd, priv->comp_status);
out:
qla2x00_rel_sp(sp);
@@ -365,13 +352,10 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
nvme->u.nvme.rsp_len = fd->rsplen;
nvme->u.nvme.rsp_dma = fd->rspdma;
nvme->u.nvme.timeout_sec = fd->timeout;
- nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
- fd->rqstlen, DMA_TO_DEVICE);
+ nvme->u.nvme.cmd_dma = fd->rqstdma;
dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
fd->rqstlen, DMA_TO_DEVICE);
- sp->flags |= SRB_DMA_VALID;
-
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
@@ -379,7 +363,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
wake_up(&sp->nvme_ls_waitq);
sp->priv = NULL;
priv->sp = NULL;
- qla_nvme_ls_unmap(sp, fd);
qla2x00_rel_sp(sp);
return rval;
}
@@ -445,13 +428,24 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
goto queuing_error;
}
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_IOCB | RESOURCE_EXCH;
+ sp->iores.exch_cnt = 1;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_fw_resources(sp->qpair, &sp->iores)) {
+ rval = -EBUSY;
+ goto queuing_error;
+ }
+
if (req->cnt < (req_cnt + 2)) {
if (IS_SHADOW_REG_CAPABLE(ha)) {
cnt = *req->out_ptr;
} else {
cnt = rd_reg_dword_relaxed(req->req_q_out);
- if (qla2x00_check_reg16_for_disconnect(vha, cnt))
+ if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
+ rval = -EBUSY;
goto queuing_error;
+ }
}
if (req->ring_index < cnt)
@@ -600,6 +594,8 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
qla24xx_process_response_queue(vha, rsp);
queuing_error:
+ if (rval)
+ qla_put_fw_resources(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return rval;
@@ -613,6 +609,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
fc_port_t *fcport;
struct srb_iocb *nvme;
struct scsi_qla_host *vha;
+ struct qla_hw_data *ha;
int rval;
srb_t *sp;
struct qla_qpair *qpair = hw_queue_handle;
@@ -633,6 +630,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
return -ENODEV;
vha = fcport->vha;
+ ha = vha->hw;
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
return -EBUSY;
@@ -647,6 +645,8 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
if (fcport->nvme_flag & NVME_FLAG_RESETTING)
return -EBUSY;
+ qpair = qla_mapq_nvme_select_qpair(ha, qpair);
+
/* Alloc SRB structure */
sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
if (!sp)
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 7fb28c207ee5..545167627e48 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -472,6 +472,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
"Unable to allocate memory for queue pair ptrs.\n");
goto fail_qpair_map;
}
+ if (qla_mapq_alloc_qp_cpu_map(ha) != 0) {
+ kfree(ha->queue_pair_map);
+ ha->queue_pair_map = NULL;
+ goto fail_qpair_map;
+ }
}
/*
@@ -546,6 +551,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
ha->base_qpair = NULL;
}
+ qla_mapq_free_qp_cpu_map(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
@@ -733,15 +739,17 @@ void qla2x00_sp_free_dma(srb_t *sp)
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
+ struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
- mempool_free(ctx1, ha->ctx_mempool);
}
+
+ if (sp->flags & SRB_GOT_BUF)
+ qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
}
void qla2x00_sp_compl(srb_t *sp, int res)
@@ -817,14 +825,13 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp)
}
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
- struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx;
+ struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd,
ctx1->fcp_cmnd_dma);
list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list);
ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt;
ha->gbl_dsd_avail += ctx1->dsd_use_cnt;
- mempool_free(ctx1, ha->ctx_mempool);
sp->flags &= ~SRB_FCP_CMND_DMA_VALID;
}
@@ -834,6 +841,9 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp)
dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma);
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
+
+ if (sp->flags & SRB_GOT_BUF)
+ qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc);
}
void qla2xxx_qpair_sp_compl(srb_t *sp, int res)
@@ -4118,10 +4128,16 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
char name[16];
int rc;
+ if (QLA_TGT_MODE_ENABLED() || EDIF_CAP(ha)) {
+ ha->vp_map = kcalloc(MAX_MULTI_ID_FABRIC, sizeof(struct qla_vp_map), GFP_KERNEL);
+ if (!ha->vp_map)
+ goto fail;
+ }
+
ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size,
&ha->init_cb_dma, GFP_KERNEL);
if (!ha->init_cb)
- goto fail;
+ goto fail_free_vp_map;
rc = btree_init32(&ha->host_map);
if (rc)
@@ -4540,6 +4556,8 @@ fail_free_init_cb:
ha->init_cb_dma);
ha->init_cb = NULL;
ha->init_cb_dma = 0;
+fail_free_vp_map:
+ kfree(ha->vp_map);
fail:
ql_log(ql_log_fatal, NULL, 0x0030,
"Memory allocation failure.\n");
@@ -4981,6 +4999,9 @@ qla2x00_mem_free(struct qla_hw_data *ha)
ha->sf_init_cb = NULL;
ha->sf_init_cb_dma = 0;
ha->loop_id_map = NULL;
+
+ kfree(ha->vp_map);
+ ha->vp_map = NULL;
}
struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -5016,7 +5037,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
INIT_LIST_HEAD(&vha->plogi_ack_list);
INIT_LIST_HEAD(&vha->qp_list);
INIT_LIST_HEAD(&vha->gnl.fcports);
- INIT_LIST_HEAD(&vha->gpnid_list);
INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn);
INIT_LIST_HEAD(&vha->purex_list.head);
@@ -5461,9 +5481,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_AENFX:
qlafx00_process_aen(vha, e);
break;
- case QLA_EVT_GPNID:
- qla24xx_async_gpnid(vha, &e->u.gpnid.id);
- break;
case QLA_EVT_UNMAP:
qla24xx_sp_unmap(vha, e->u.iosb.sp);
break;
@@ -5506,9 +5523,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_GNNFT_DONE:
qla24xx_async_gnnft_done(vha, e->u.iosb.sp);
break;
- case QLA_EVT_GNNID:
- qla24xx_async_gnnid(vha, e->u.fcport.fcport);
- break;
case QLA_EVT_GFPNID:
qla24xx_async_gfpnid(vha, e->u.fcport.fcport);
break;
@@ -7025,11 +7039,6 @@ qla2x00_do_dpc(void *data)
}
}
- if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
- &base_vha->dpc_flags)) {
- qla2x00_update_fcports(base_vha);
- }
-
if (IS_QLAFX00(ha))
goto loop_resync_check;
@@ -7094,9 +7103,12 @@ qla2x00_do_dpc(void *data)
}
}
loop_resync_check:
- if (test_and_clear_bit(LOOP_RESYNC_NEEDED,
+ if (!qla2x00_reset_active(base_vha) &&
+ test_and_clear_bit(LOOP_RESYNC_NEEDED,
&base_vha->dpc_flags)) {
-
+ /*
+ * Allow abort_isp to complete before moving on to scanning.
+ */
ql_dbg(ql_dbg_dpc, base_vha, 0x400f,
"Loop resync scheduled.\n");
@@ -7447,7 +7459,7 @@ qla2x00_timer(struct timer_list *t)
/* if the loop has been down for 4 minutes, reinit adapter */
if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
- if (!(vha->device_flags & DFLG_NO_CABLE)) {
+ if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) {
ql_log(ql_log_warn, vha, 0x6009,
"Loop down - aborting ISP.\n");
@@ -7516,13 +7528,13 @@ qla2x00_timer(struct timer_list *t)
set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
start_dpc++;
}
+ qla_adjust_buf(vha);
/* borrowing w to signify dpc will run */
w = 0;
/* Schedule the DPC routine if needed */
if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) ||
- test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) ||
start_dpc ||
test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) ||
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) ||
@@ -7533,13 +7545,10 @@ qla2x00_timer(struct timer_list *t)
test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) {
ql_dbg(ql_dbg_timer, vha, 0x600b,
"isp_abort_needed=%d loop_resync_needed=%d "
- "fcport_update_needed=%d start_dpc=%d "
- "reset_marker_needed=%d",
+ "start_dpc=%d reset_marker_needed=%d",
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags),
test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags),
- test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags),
- start_dpc,
- test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
+ start_dpc, test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags));
ql_dbg(ql_dbg_timer, vha, 0x600c,
"beacon_blink_needed=%d isp_unrecoverable=%d "
"fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 548f22705ddc..aa0cf5ca6c1c 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -198,22 +198,6 @@ struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
return host;
}
-static inline
-struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
- uint16_t vp_idx)
-{
- struct qla_hw_data *ha = vha->hw;
-
- if (vha->vp_idx == vp_idx)
- return vha;
-
- BUG_ON(ha->tgt.tgt_vp_map == NULL);
- if (likely(test_bit(vp_idx, ha->vp_idx_map)))
- return ha->tgt.tgt_vp_map[vp_idx].vha;
-
- return NULL;
-}
-
static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
{
unsigned long flags;
@@ -371,7 +355,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
if ((entry->u.isp24.vp_index != 0xFF) &&
(entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) {
- host = qlt_find_host_by_vp_idx(vha,
+ host = qla_find_host_by_vp_idx(vha,
entry->u.isp24.vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe03f,
@@ -395,7 +379,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
{
struct abts_recv_from_24xx *entry =
(struct abts_recv_from_24xx *)atio;
- struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
entry->vp_index);
unsigned long flags;
@@ -438,7 +422,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
case CTIO_TYPE7:
{
struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
- struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
entry->vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe041,
@@ -457,7 +441,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
struct imm_ntfy_from_isp *entry =
(struct imm_ntfy_from_isp *)pkt;
- host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
+ host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe042,
"qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
@@ -475,7 +459,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
if (0xFF != entry->u.isp24.vp_index) {
- host = qlt_find_host_by_vp_idx(vha,
+ host = qla_find_host_by_vp_idx(vha,
entry->u.isp24.vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe043,
@@ -495,7 +479,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
{
struct abts_recv_from_24xx *entry =
(struct abts_recv_from_24xx *)pkt;
- struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
entry->vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe044,
@@ -512,7 +496,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
{
struct abts_resp_to_24xx *entry =
(struct abts_resp_to_24xx *)pkt;
- struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
+ struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
entry->vp_index);
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xe045,
@@ -1028,8 +1012,7 @@ void qlt_free_session_done(struct work_struct *work)
}
if (ha->flags.edif_enabled &&
- (!own || (own &&
- own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
+ (!own || own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
sess->edif.authok = 0;
if (!ha->flags.host_shutting_down) {
ql_dbg(ql_dbg_edif, vha, 0x911e,
@@ -7145,7 +7128,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
qlt_clear_mode(base_vha);
- qlt_update_vp_map(base_vha, SET_VP_IDX);
+ qla_update_vp_map(base_vha, SET_VP_IDX);
}
irqreturn_t
@@ -7224,17 +7207,10 @@ qlt_mem_alloc(struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return 0;
- ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC,
- sizeof(struct qla_tgt_vp_map),
- GFP_KERNEL);
- if (!ha->tgt.tgt_vp_map)
- return -ENOMEM;
-
ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
(ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
&ha->tgt.atio_dma, GFP_KERNEL);
if (!ha->tgt.atio_ring) {
- kfree(ha->tgt.tgt_vp_map);
return -ENOMEM;
}
return 0;
@@ -7253,70 +7229,6 @@ qlt_mem_free(struct qla_hw_data *ha)
}
ha->tgt.atio_ring = NULL;
ha->tgt.atio_dma = 0;
- kfree(ha->tgt.tgt_vp_map);
- ha->tgt.tgt_vp_map = NULL;
-}
-
-/* vport_slock to be held by the caller */
-void
-qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
-{
- void *slot;
- u32 key;
- int rc;
-
- key = vha->d_id.b24;
-
- switch (cmd) {
- case SET_VP_IDX:
- if (!QLA_TGT_MODE_ENABLED())
- return;
- vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
- break;
- case SET_AL_PA:
- slot = btree_lookup32(&vha->hw->host_map, key);
- if (!slot) {
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018,
- "Save vha in host_map %p %06x\n", vha, key);
- rc = btree_insert32(&vha->hw->host_map,
- key, vha, GFP_ATOMIC);
- if (rc)
- ql_log(ql_log_info, vha, 0xd03e,
- "Unable to insert s_id into host_map: %06x\n",
- key);
- return;
- }
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
- "replace existing vha in host_map %p %06x\n", vha, key);
- btree_update32(&vha->hw->host_map, key, vha);
- break;
- case RESET_VP_IDX:
- if (!QLA_TGT_MODE_ENABLED())
- return;
- vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
- break;
- case RESET_AL_PA:
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
- "clear vha in host_map %p %06x\n", vha, key);
- slot = btree_lookup32(&vha->hw->host_map, key);
- if (slot)
- btree_remove32(&vha->hw->host_map, key);
- vha->d_id.b24 = 0;
- break;
- }
-}
-
-void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
-{
-
- if (!vha->d_id.b24) {
- vha->d_id = id;
- qlt_update_vp_map(vha, SET_AL_PA);
- } else if (vha->d_id.b24 != id.b24) {
- qlt_update_vp_map(vha, RESET_AL_PA);
- vha->d_id = id;
- qlt_update_vp_map(vha, SET_AL_PA);
- }
}
static int __init qlt_parse_ini_mode(void)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 7df86578214f..354fca2e7feb 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -1017,7 +1017,6 @@ extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
extern int __init qlt_init(void);
extern void qlt_exit(void);
-extern void qlt_update_vp_map(struct scsi_qla_host *, int);
extern void qlt_free_session_done(struct work_struct *);
/*
* This macro is used during early initializations when host->active_mode
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 03f3e2cd62b5..42d69d89834f 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.07.900-k"
+#define QLA2XXX_VERSION "10.02.08.200-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
-#define QLA_DRIVER_PATCH_VER 7
-#define QLA_DRIVER_BETA_VER 900
+#define QLA_DRIVER_PATCH_VER 8
+#define QLA_DRIVER_BETA_VER 200
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 8fa0056b56dd..8024322c9c5a 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1552,6 +1552,7 @@ static const struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
{
int rc;
+ size_t map_sz;
rc = btree_init32(&lport->lport_fcport_map);
if (rc) {
@@ -1559,17 +1560,15 @@ static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
return rc;
}
- lport->lport_loopid_map =
- vzalloc(array_size(65536,
- sizeof(struct tcm_qla2xxx_fc_loopid)));
+ map_sz = array_size(65536, sizeof(struct tcm_qla2xxx_fc_loopid));
+
+ lport->lport_loopid_map = vzalloc(map_sz);
if (!lport->lport_loopid_map) {
- pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
- sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", map_sz);
btree_destroy32(&lport->lport_fcport_map);
return -ENOMEM;
}
- pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
- sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
+ pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", map_sz);
return 0;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 1426b9b03612..7d2210a006f0 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -309,8 +309,8 @@ static int scsi_vpd_inquiry(struct scsi_device *sdev, unsigned char *buffer,
* I'm not convinced we need to try quite this hard to get VPD, but
* all the existing users tried this hard.
*/
- result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
- len, NULL, 30 * HZ, 3, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len,
+ 30 * HZ, 3, NULL);
if (result)
return -EIO;
@@ -510,6 +510,9 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
unsigned char cmd[16];
struct scsi_sense_hdr sshdr;
int result, request_len;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
return -EINVAL;
@@ -531,9 +534,8 @@ int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
put_unaligned_be32(request_len, &cmd[6]);
memset(buffer, 0, len);
- result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
- request_len, &sshdr, 30 * HZ, 3, NULL);
-
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer,
+ request_len, 30 * HZ, 3, &exec_args);
if (result < 0)
return result;
if (result && scsi_sense_valid(&sshdr) &&
@@ -588,8 +590,6 @@ void scsi_device_put(struct scsi_device *sdev)
{
struct module *mod = sdev->host->hostt->module;
- might_sleep();
-
put_device(&sdev->sdev_gendev);
module_put(mod);
}
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 1126a265d5ee..e3b31d32b6a9 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -69,12 +69,15 @@ static int ioctl_internal_command(struct scsi_device *sdev, char *cmd,
{
int result;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
SCSI_LOG_IOCTL(1, sdev_printk(KERN_INFO, sdev,
"Trying ioctl with scsi command %d\n", *cmd));
- result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0,
- &sshdr, timeout, retries, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0, timeout,
+ retries, &exec_args);
SCSI_LOG_IOCTL(2, sdev_printk(KERN_INFO, sdev,
"Ioctl returned 0x%x\n", result));
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ed1ebcb7443..abe93ec8b7d0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -185,39 +185,37 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
__scsi_queue_insert(cmd, reason, true);
}
-
/**
- * __scsi_execute - insert request and wait for the result
- * @sdev: scsi device
+ * scsi_execute_cmd - insert request and wait for the result
+ * @sdev: scsi_device
* @cmd: scsi command
- * @data_direction: data direction
+ * @opf: block layer request cmd_flags
* @buffer: data buffer
* @bufflen: len of buffer
- * @sense: optional sense buffer
- * @sshdr: optional decoded sense header
* @timeout: request timeout in HZ
* @retries: number of times to retry request
- * @flags: flags for ->cmd_flags
- * @rq_flags: flags for ->rq_flags
- * @resid: optional residual length
+ * @args: Optional args. See struct definition for field descriptions
*
* Returns the scsi_cmnd result field if a command was executed, or a negative
* Linux error code if we didn't get that far.
*/
-int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
- int data_direction, void *buffer, unsigned bufflen,
- unsigned char *sense, struct scsi_sense_hdr *sshdr,
- int timeout, int retries, blk_opf_t flags,
- req_flags_t rq_flags, int *resid)
+int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd,
+ blk_opf_t opf, void *buffer, unsigned int bufflen,
+ int timeout, int retries,
+ const struct scsi_exec_args *args)
{
+ static const struct scsi_exec_args default_args;
struct request *req;
struct scsi_cmnd *scmd;
int ret;
- req = scsi_alloc_request(sdev->request_queue,
- data_direction == DMA_TO_DEVICE ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
- rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
+ if (!args)
+ args = &default_args;
+ else if (WARN_ON_ONCE(args->sense &&
+ args->sense_len != SCSI_SENSE_BUFFERSIZE))
+ return -EINVAL;
+
+ req = scsi_alloc_request(sdev->request_queue, opf, args->req_flags);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -232,8 +230,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
memcpy(scmd->cmnd, cmd, scmd->cmd_len);
scmd->allowed = retries;
req->timeout = timeout;
- req->cmd_flags |= flags;
- req->rq_flags |= rq_flags | RQF_QUIET;
+ req->rq_flags |= RQF_QUIET;
/*
* head injection *required* here otherwise quiesce won't work
@@ -249,20 +246,21 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen))
memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len);
- if (resid)
- *resid = scmd->resid_len;
- if (sense && scmd->sense_len)
- memcpy(sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
- if (sshdr)
+ if (args->resid)
+ *args->resid = scmd->resid_len;
+ if (args->sense)
+ memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
+ if (args->sshdr)
scsi_normalize_sense(scmd->sense_buffer, scmd->sense_len,
- sshdr);
+ args->sshdr);
+
ret = scmd->result;
out:
blk_mq_free_request(req);
return ret;
}
-EXPORT_SYMBOL(__scsi_execute);
+EXPORT_SYMBOL(scsi_execute_cmd);
/*
* Wake up the error handler if necessary. Avoid as follows that the error
@@ -2086,6 +2084,9 @@ int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
{
unsigned char cmd[10];
unsigned char *real_buffer;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = sshdr,
+ };
int ret;
memset(cmd, 0, sizeof(cmd));
@@ -2135,8 +2136,8 @@ int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
cmd[4] = len;
}
- ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
- sshdr, timeout, retries, NULL);
+ ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len,
+ timeout, retries, &exec_args);
kfree(real_buffer);
return ret;
}
@@ -2167,6 +2168,10 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
int header_length;
int result, retry_count = retries;
struct scsi_sense_hdr my_sshdr;
+ const struct scsi_exec_args exec_args = {
+ /* caller might not be interested in sense, but we need it */
+ .sshdr = sshdr ? : &my_sshdr,
+ };
memset(data, 0, sizeof(*data));
memset(&cmd[0], 0, 12);
@@ -2175,9 +2180,7 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
cmd[2] = modepage;
- /* caller might not be interested in sense, but we need it */
- if (!sshdr)
- sshdr = &my_sshdr;
+ sshdr = exec_args.sshdr;
retry:
use_10_for_ms = sdev->use_10_for_ms || len > 255;
@@ -2200,8 +2203,8 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
memset(buffer, 0, len);
- result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
- sshdr, timeout, retries, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len,
+ timeout, retries, &exec_args);
if (result < 0)
return result;
@@ -2281,12 +2284,15 @@ scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
char cmd[] = {
TEST_UNIT_READY, 0, 0, 0, 0, 0,
};
+ const struct scsi_exec_args exec_args = {
+ .sshdr = sshdr,
+ };
int result;
/* try to eat the UNIT_ATTENTION if there are enough retries */
do {
- result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
- timeout, 1, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0,
+ timeout, 1, &exec_args);
if (sdev->removable && scsi_sense_valid(sshdr) &&
sshdr->sense_key == UNIT_ATTENTION)
sdev->changed = 1;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 7a6904a3928e..4e842d79de31 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -210,7 +210,7 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
scsi_cmd[3] = 0;
scsi_cmd[4] = 0x2a; /* size */
scsi_cmd[5] = 0;
- scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, result, 0x2a, NULL,
+ scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN, result, 0x2a,
SCSI_TIMEOUT, 3, NULL);
}
@@ -646,8 +646,12 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
int first_inquiry_len, try_inquiry_len, next_inquiry_len;
int response_len = 0;
- int pass, count, result;
+ int pass, count, result, resid;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ .resid = &resid,
+ };
*bflags = 0;
@@ -665,18 +669,16 @@ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
/* Each pass gets up to three chances to ignore Unit Attention */
for (count = 0; count < 3; ++count) {
- int resid;
-
memset(scsi_cmd, 0, 6);
scsi_cmd[0] = INQUIRY;
scsi_cmd[4] = (unsigned char) try_inquiry_len;
memset(inq_result, 0, try_inquiry_len);
- result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
- inq_result, try_inquiry_len, &sshdr,
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
+ inq_result, try_inquiry_len,
HZ / 2 + HZ * scsi_inq_timeout, 3,
- &resid);
+ &exec_args);
SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
"scsi scan: INQUIRY %s with code 0x%x\n",
@@ -1232,8 +1234,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* that no LUN is present, so don't add sdev in these cases.
* Two specific examples are:
* 1) NetApp targets: return PQ=1, PDT=0x1f
- * 2) IBM/2145 targets: return PQ=1, PDT=0
- * 3) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
+ * 2) USB UFI: returns PDT=0x1f, with the PQ bits being "reserved"
* in the UFI 1.0 spec (we cannot rely on reserved bits).
*
* References:
@@ -1247,8 +1248,8 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
* PDT=00h Direct-access device (floppy)
* PDT=1Fh none (no FDD connected to the requested logical unit)
*/
- if (((result[0] >> 5) == 1 ||
- (starget->pdt_1f_for_no_lun && (result[0] & 0x1f) == 0x1f)) &&
+ if (((result[0] >> 5) == 1 || starget->pdt_1f_for_no_lun) &&
+ (result[0] & 0x1f) == 0x1f &&
!scsi_is_wlun(lun)) {
SCSI_LOG_SCAN_BUS(3, sdev_printk(KERN_INFO, sdev,
"scsi scan: peripheral device type"
@@ -1402,6 +1403,9 @@ static int scsi_report_lun_scan(struct scsi_target *starget, blist_flags_t bflag
struct scsi_sense_hdr sshdr;
struct scsi_device *sdev;
struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int ret = 0;
/*
@@ -1476,9 +1480,10 @@ retry:
"scsi scan: Sending REPORT LUNS to (try %d)\n",
retries));
- result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
- lun_data, length, &sshdr,
- SCSI_REPORT_LUNS_TIMEOUT, 3, NULL);
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
+ lun_data, length,
+ SCSI_REPORT_LUNS_TIMEOUT, 3,
+ &exec_args);
SCSI_LOG_SCAN_BUS(3, sdev_printk (KERN_INFO, sdev,
"scsi scan: REPORT LUNS"
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 981d1bab2120..ee28f73af4d4 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -451,6 +451,8 @@ static void scsi_device_dev_release(struct device *dev)
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
+ might_sleep();
+
scsi_dh_release_device(sdev);
parent = sdev->sdev_gendev.parent;
@@ -534,9 +536,9 @@ static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
}
-static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int scsi_bus_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct scsi_device *sdev;
+ const struct scsi_device *sdev;
if (dev->type != &scsi_dev_type)
return 0;
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f569cf0095c2..2442d4d2e3f3 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -105,28 +105,27 @@ static int sprint_frac(char *dest, int value, int denom)
}
static int spi_execute(struct scsi_device *sdev, const void *cmd,
- enum dma_data_direction dir,
- void *buffer, unsigned bufflen,
+ enum req_op op, void *buffer, unsigned int bufflen,
struct scsi_sense_hdr *sshdr)
{
int i, result;
- unsigned char sense[SCSI_SENSE_BUFFERSIZE];
struct scsi_sense_hdr sshdr_tmp;
+ blk_opf_t opf = op | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+ REQ_FAILFAST_DRIVER;
+ const struct scsi_exec_args exec_args = {
+ .req_flags = BLK_MQ_REQ_PM,
+ .sshdr = sshdr ? : &sshdr_tmp,
+ };
- if (!sshdr)
- sshdr = &sshdr_tmp;
+ sshdr = exec_args.sshdr;
for(i = 0; i < DV_RETRIES; i++) {
/*
* The purpose of the RQF_PM flag below is to bypass the
* SDEV_QUIESCE state.
*/
- result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
- sshdr, DV_TIMEOUT, /* retries */ 1,
- REQ_FAILFAST_DEV |
- REQ_FAILFAST_TRANSPORT |
- REQ_FAILFAST_DRIVER,
- RQF_PM, NULL);
+ result = scsi_execute_cmd(sdev, cmd, opf, buffer, bufflen,
+ DV_TIMEOUT, 1, &exec_args);
if (result < 0 || !scsi_sense_valid(sshdr) ||
sshdr->sense_key != UNIT_ATTENTION)
break;
@@ -675,7 +674,7 @@ spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
}
for (r = 0; r < retries; r++) {
- result = spi_execute(sdev, spi_write_buffer, DMA_TO_DEVICE,
+ result = spi_execute(sdev, spi_write_buffer, REQ_OP_DRV_OUT,
buffer, len, &sshdr);
if(result || !scsi_device_online(sdev)) {
@@ -697,7 +696,7 @@ spi_dv_device_echo_buffer(struct scsi_device *sdev, u8 *buffer,
}
memset(ptr, 0, len);
- spi_execute(sdev, spi_read_buffer, DMA_FROM_DEVICE,
+ spi_execute(sdev, spi_read_buffer, REQ_OP_DRV_IN,
ptr, len, NULL);
scsi_device_set_state(sdev, SDEV_QUIESCE);
@@ -722,7 +721,7 @@ spi_dv_device_compare_inquiry(struct scsi_device *sdev, u8 *buffer,
for (r = 0; r < retries; r++) {
memset(ptr, 0, len);
- result = spi_execute(sdev, spi_inquiry, DMA_FROM_DEVICE,
+ result = spi_execute(sdev, spi_inquiry, REQ_OP_DRV_IN,
ptr, len, NULL);
if(result || !scsi_device_online(sdev)) {
@@ -828,7 +827,7 @@ spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer)
* (reservation conflict, device not ready, etc) just
* skip the write tests */
for (l = 0; ; l++) {
- result = spi_execute(sdev, spi_test_unit_ready, DMA_NONE,
+ result = spi_execute(sdev, spi_test_unit_ready, REQ_OP_DRV_IN,
NULL, 0, NULL);
if(result) {
@@ -841,7 +840,7 @@ spi_dv_device_get_echo_buffer(struct scsi_device *sdev, u8 *buffer)
}
result = spi_execute(sdev, spi_read_buffer_descriptor,
- DMA_FROM_DEVICE, buffer, 4, NULL);
+ REQ_OP_DRV_IN, buffer, 4, NULL);
if (result)
/* Device has no echo buffer */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 47dafe6b8a66..a38c71511bc9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -664,6 +664,9 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
struct scsi_disk *sdkp = data;
struct scsi_device *sdev = sdkp->device;
u8 cdb[12] = { 0, };
+ const struct scsi_exec_args exec_args = {
+ .req_flags = BLK_MQ_REQ_PM,
+ };
int ret;
cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
@@ -671,9 +674,9 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
put_unaligned_be16(spsp, &cdb[2]);
put_unaligned_be32(len, &cdb[6]);
- ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
- buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0,
- RQF_PM, NULL);
+ ret = scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
+ buffer, len, SD_TIMEOUT, sdkp->max_retries,
+ &exec_args);
return ret <= 0 ? ret : -EIO;
}
#endif /* CONFIG_BLK_SED_OPAL */
@@ -831,6 +834,19 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
}
+static void *sd_set_special_bvec(struct request *rq, unsigned int data_len)
+{
+ struct page *page;
+
+ page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
+ if (!page)
+ return NULL;
+ clear_highpage(page);
+ bvec_set_page(&rq->special_vec, page, data_len, 0);
+ rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
+ return bvec_virt(&rq->special_vec);
+}
+
static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
@@ -841,19 +857,14 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
unsigned int data_len = 24;
char *buf;
- rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
- if (!rq->special_vec.bv_page)
+ buf = sd_set_special_bvec(rq, data_len);
+ if (!buf)
return BLK_STS_RESOURCE;
- clear_highpage(rq->special_vec.bv_page);
- rq->special_vec.bv_offset = 0;
- rq->special_vec.bv_len = data_len;
- rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
cmd->cmd_len = 10;
cmd->cmnd[0] = UNMAP;
cmd->cmnd[8] = 24;
- buf = bvec_virt(&rq->special_vec);
put_unaligned_be16(6 + 16, &buf[0]);
put_unaligned_be16(16, &buf[2]);
put_unaligned_be64(lba, &buf[8]);
@@ -876,13 +887,8 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
- rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
- if (!rq->special_vec.bv_page)
+ if (!sd_set_special_bvec(rq, data_len))
return BLK_STS_RESOURCE;
- clear_highpage(rq->special_vec.bv_page);
- rq->special_vec.bv_offset = 0;
- rq->special_vec.bv_len = data_len;
- rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
cmd->cmd_len = 16;
cmd->cmnd[0] = WRITE_SAME_16;
@@ -908,13 +914,8 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
- rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
- if (!rq->special_vec.bv_page)
+ if (!sd_set_special_bvec(rq, data_len))
return BLK_STS_RESOURCE;
- clear_highpage(rq->special_vec.bv_page);
- rq->special_vec.bv_offset = 0;
- rq->special_vec.bv_len = data_len;
- rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
cmd->cmd_len = 10;
cmd->cmnd[0] = WRITE_SAME;
@@ -1583,13 +1584,16 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
const int timeout = sdp->request_queue->rq_timeout
* SD_FLUSH_TIMEOUT_MULTIPLIER;
struct scsi_sense_hdr my_sshdr;
+ const struct scsi_exec_args exec_args = {
+ .req_flags = BLK_MQ_REQ_PM,
+ /* caller might not be interested in sense, but we need it */
+ .sshdr = sshdr ? : &my_sshdr,
+ };
if (!scsi_device_online(sdp))
return -ENODEV;
- /* caller might not be interested in sense, but we need it */
- if (!sshdr)
- sshdr = &my_sshdr;
+ sshdr = exec_args.sshdr;
for (retries = 3; retries > 0; --retries) {
unsigned char cmd[16] = { 0 };
@@ -1602,8 +1606,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
* Leave the rest of the command zero to indicate
* flush everything.
*/
- res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
- timeout, sdkp->max_retries, 0, RQF_PM, NULL);
+ res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
+ timeout, sdkp->max_retries, &exec_args);
if (res == 0)
break;
}
@@ -1745,6 +1749,9 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
struct scsi_device *sdev = sdkp->device;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int result;
u8 cmd[16] = { 0, };
u8 data[24] = { 0, };
@@ -1758,8 +1765,9 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
put_unaligned_be64(sa_key, &data[8]);
data[20] = flags;
- result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
- &sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, &data,
+ sizeof(data), SD_TIMEOUT, sdkp->max_retries,
+ &exec_args);
if (scsi_status_is_check_condition(result) &&
scsi_sense_valid(&sshdr)) {
@@ -2088,6 +2096,9 @@ sd_spinup_disk(struct scsi_disk *sdkp)
int retries, spintime;
unsigned int the_result;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int sense_valid = 0;
spintime = 0;
@@ -2103,10 +2114,11 @@ sd_spinup_disk(struct scsi_disk *sdkp)
cmd[0] = TEST_UNIT_READY;
memset((void *) &cmd[1], 0, 9);
- the_result = scsi_execute_req(sdkp->device, cmd,
- DMA_NONE, NULL, 0,
- &sshdr, SD_TIMEOUT,
- sdkp->max_retries, NULL);
+ the_result = scsi_execute_cmd(sdkp->device, cmd,
+ REQ_OP_DRV_IN, NULL, 0,
+ SD_TIMEOUT,
+ sdkp->max_retries,
+ &exec_args);
/*
* If the drive has indicated to us that it
@@ -2163,10 +2175,10 @@ sd_spinup_disk(struct scsi_disk *sdkp)
cmd[4] = 1; /* Start spin cycle */
if (sdkp->device->start_stop_pwr_cond)
cmd[4] |= 1 << 4;
- scsi_execute_req(sdkp->device, cmd, DMA_NONE,
- NULL, 0, &sshdr,
+ scsi_execute_cmd(sdkp->device, cmd,
+ REQ_OP_DRV_IN, NULL, 0,
SD_TIMEOUT, sdkp->max_retries,
- NULL);
+ &exec_args);
spintime_expire = jiffies + 100 * HZ;
spintime = 1;
}
@@ -2296,6 +2308,9 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
{
unsigned char cmd[16];
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int sense_valid = 0;
int the_result;
int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
@@ -2313,9 +2328,9 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
cmd[13] = RC16_LEN;
memset(buffer, 0, RC16_LEN);
- the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
- buffer, RC16_LEN, &sshdr,
- SD_TIMEOUT, sdkp->max_retries, NULL);
+ the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN,
+ buffer, RC16_LEN, SD_TIMEOUT,
+ sdkp->max_retries, &exec_args);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
@@ -2387,6 +2402,9 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
{
unsigned char cmd[16];
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int sense_valid = 0;
int the_result;
int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
@@ -2398,9 +2416,9 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
memset(&cmd[1], 0, 9);
memset(buffer, 0, 8);
- the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
- buffer, 8, &sshdr,
- SD_TIMEOUT, sdkp->max_retries, NULL);
+ the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
+ 8, SD_TIMEOUT, sdkp->max_retries,
+ &exec_args);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
@@ -3637,6 +3655,10 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
{
unsigned char cmd[6] = { START_STOP }; /* START_VALID */
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ .req_flags = BLK_MQ_REQ_PM,
+ };
struct scsi_device *sdp = sdkp->device;
int res;
@@ -3649,8 +3671,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
if (!scsi_device_online(sdp))
return -ENODEV;
- res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
+ res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT,
+ sdkp->max_retries, &exec_args);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
if (res > 0 && scsi_sense_valid(&sshdr)) {
@@ -3790,10 +3812,13 @@ static int sd_resume_runtime(struct device *dev)
if (sdp->ignore_media_change) {
/* clear the device's sense data */
static const u8 cmd[10] = { REQUEST_SENSE };
+ const struct scsi_exec_args exec_args = {
+ .req_flags = BLK_MQ_REQ_PM,
+ };
- if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL,
- NULL, sdp->request_queue->rq_timeout, 1, 0,
- RQF_PM, NULL))
+ if (scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
+ sdp->request_queue->rq_timeout, 1,
+ &exec_args))
sd_printk(KERN_NOTICE, sdkp,
"Failed to clear sense data\n");
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 62abebbaf2e7..6b3a02d4406c 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -148,6 +148,9 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
struct scsi_device *sdp = sdkp->device;
const int timeout = sdp->request_queue->rq_timeout;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
unsigned char cmd[16];
unsigned int rep_len;
int result;
@@ -160,9 +163,8 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
if (partial)
cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
- result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
- buf, buflen, &sshdr,
- timeout, SD_MAX_RETRIES, NULL);
+ result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buf, buflen,
+ timeout, SD_MAX_RETRIES, &exec_args);
if (result) {
sd_printk(KERN_ERR, sdkp,
"REPORT ZONES start lba %llu failed\n", lba);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 0a1734f34587..869ca9c7f23f 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -89,10 +89,13 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
unsigned char recv_page_code;
unsigned int retries = SES_RETRIES;
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
do {
- ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
- &sshdr, SES_TIMEOUT, 1, NULL);
+ ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, bufflen,
+ SES_TIMEOUT, 1, &exec_args);
} while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&
(sshdr.sense_key == NOT_READY ||
(sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
@@ -130,10 +133,13 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
};
struct scsi_sense_hdr sshdr;
unsigned int retries = SES_RETRIES;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
do {
- result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
- &sshdr, SES_TIMEOUT, 1, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, buf,
+ bufflen, SES_TIMEOUT, 1, &exec_args);
} while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&
(sshdr.sense_key == NOT_READY ||
(sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index ff9854f59964..a91049213203 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1288,7 +1288,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
}
sfp->mmap_called = 1;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
out:
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index af27bb0f3133..228838eb3686 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -954,7 +954,7 @@ struct report_log_lun {
struct report_log_lun_list {
struct report_lun_header header;
- struct report_log_lun lun_entries[1];
+ struct report_log_lun lun_entries[];
};
struct report_phys_lun_8byte_wwid {
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index d0446d4d4465..49a8f91810b6 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1259,7 +1259,8 @@ static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
"report logical LUNs failed\n");
/*
- * Tack the controller itself onto the end of the logical device list.
+ * Tack the controller itself onto the end of the logical device list
+ * by adding a list entry that is all zeros.
*/
logdev_data = *logdev_list;
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 57bdc3ba49d9..9dd975b36b5b 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -437,6 +437,6 @@ void snic_trc_debugfs_init(void)
void
snic_trc_debugfs_term(void)
{
- debugfs_remove(debugfs_lookup(TRC_FILE, snic_glob->trc_root));
- debugfs_remove(debugfs_lookup(TRC_ENABLE_FILE, snic_glob->trc_root));
+ debugfs_lookup_and_remove(TRC_FILE, snic_glob->trc_root);
+ debugfs_lookup_and_remove(TRC_ENABLE_FILE, snic_glob->trc_root);
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index a278b739d0c5..9e51dcd30bfd 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -170,10 +170,13 @@ static unsigned int sr_get_events(struct scsi_device *sdev)
struct event_header *eh = (void *)buf;
struct media_event_desc *med = (void *)(buf + 4);
struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
int result;
- result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, sizeof(buf),
- &sshdr, SR_TIMEOUT, MAX_RETRIES, NULL);
+ result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buf, sizeof(buf),
+ SR_TIMEOUT, MAX_RETRIES, &exec_args);
if (scsi_sense_valid(&sshdr) && sshdr.sense_key == UNIT_ATTENTION)
return DISK_EVENT_MEDIA_CHANGE;
@@ -730,8 +733,8 @@ static void get_sectorsize(struct scsi_cd *cd)
memset(buffer, 0, sizeof(buffer));
/* Do the command and wait.. */
- the_result = scsi_execute_req(cd->device, cmd, DMA_FROM_DEVICE,
- buffer, sizeof(buffer), NULL,
+ the_result = scsi_execute_cmd(cd->device, cmd, REQ_OP_DRV_IN,
+ buffer, sizeof(buffer),
SR_TIMEOUT, MAX_RETRIES, NULL);
retries--;
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index fbdb5124d7f7..5b0b35e60e61 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -188,13 +188,15 @@ static int sr_play_trkind(struct cdrom_device_info *cdi,
int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
{
struct scsi_device *SDev;
- struct scsi_sense_hdr local_sshdr, *sshdr = &local_sshdr;
+ struct scsi_sense_hdr local_sshdr, *sshdr;
int result, err = 0, retries = 0;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = cgc->sshdr ? : &local_sshdr,
+ };
SDev = cd->device;
- if (cgc->sshdr)
- sshdr = cgc->sshdr;
+ sshdr = exec_args.sshdr;
retry:
if (!scsi_block_when_processing_errors(SDev)) {
@@ -202,10 +204,11 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc)
goto out;
}
- result = scsi_execute(SDev, cgc->cmd, cgc->data_direction,
- cgc->buffer, cgc->buflen, NULL, sshdr,
- cgc->timeout, IOCTL_RETRIES, 0, 0, NULL);
-
+ result = scsi_execute_cmd(SDev, cgc->cmd,
+ cgc->data_direction == DMA_TO_DEVICE ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, cgc->buffer,
+ cgc->buflen, cgc->timeout, IOCTL_RETRIES,
+ &exec_args);
/* Minimal error checking. Ignore cases we know about, and report the rest. */
if (result < 0) {
err = result;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 22705eb781b0..33f568b7f54d 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -2095,7 +2095,7 @@ static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth)
return scsi_change_queue_depth(sdev, queue_depth);
}
-static int storvsc_remove(struct hv_device *dev)
+static void storvsc_remove(struct hv_device *dev)
{
struct storvsc_device *stor_device = hv_get_drvdata(dev);
struct Scsi_Host *host = stor_device->host;
@@ -2111,8 +2111,6 @@ static int storvsc_remove(struct hv_device *dev)
scsi_remove_host(host);
storvsc_dev_remove(dev);
scsi_host_put(host);
-
- return 0;
}
static int storvsc_suspend(struct hv_device *hv_dev)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index d07d24c06b54..b221c3c99320 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -347,8 +347,8 @@ static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi)
memset(inq_result, 0, inq_result_len);
- result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE,
- inq_result, inquiry_len, NULL,
+ result = scsi_execute_cmd(sdev, scsi_cmd, REQ_OP_DRV_IN,
+ inq_result, inquiry_len,
SD_TIMEOUT, SD_MAX_RETRIES, NULL);
if (result == 0 && inq_result[0] >> 5) {
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index e24e220e56ee..e05473c5c267 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -760,12 +760,6 @@ static int maple_match_bus_driver(struct device *devptr,
return 0;
}
-static int maple_bus_uevent(struct device *dev,
- struct kobj_uevent_env *env)
-{
- return 0;
-}
-
static void maple_bus_release(struct device *dev)
{
}
@@ -782,7 +776,6 @@ static struct maple_driver maple_unsupported_device = {
struct bus_type maple_bus_type = {
.name = "maple",
.match = maple_match_bus_driver,
- .uevent = maple_bus_uevent,
};
EXPORT_SYMBOL_GPL(maple_bus_type);
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 219483b79c09..d43873bb5fe6 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -93,9 +93,9 @@ static void slim_device_remove(struct device *dev)
}
}
-static int slim_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int slim_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct slim_device *sbdev = to_slim_device(dev);
+ const struct slim_device *sbdev = to_slim_device(dev);
return add_uevent_var(env, "MODALIAS=slim:%s", dev_name(&sbdev->dev));
}
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index dae8a2e0f745..cc9a3e107479 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -235,6 +235,15 @@ static const struct at91_soc socs[] __initconst = {
AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G54_EXID_MATCH,
"sama7g54", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G54_D1G_EXID_MATCH,
+ "SAMA7G54 1Gb DDR3L SiP", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G54_D2G_EXID_MATCH,
+ "SAMA7G54 2Gb DDR3L SiP", "sama7g5"),
+ AT91_SOC(SAMA7G5_CIDR_MATCH, AT91_CIDR_MATCH_MASK,
+ AT91_CIDR_VERSION_MASK_SAMA7G5, SAMA7G54_D4G_EXID_MATCH,
+ "SAMA7G54 4Gb DDR3L SiP", "sama7g5"),
#endif
{ /* sentinel */ },
};
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index 2ecaa75b00f0..7a9f47ce85fb 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -70,6 +70,9 @@ at91_soc_init(const struct at91_soc *socs);
#define SAMA7G52_EXID_MATCH 0x2
#define SAMA7G53_EXID_MATCH 0x1
#define SAMA7G54_EXID_MATCH 0x0
+#define SAMA7G54_D1G_EXID_MATCH 0x00000018
+#define SAMA7G54_D2G_EXID_MATCH 0x00000020
+#define SAMA7G54_D4G_EXID_MATCH 0x00000028
#define AT91SAM9XE128_CIDR_MATCH 0x329973a0
#define AT91SAM9XE256_CIDR_MATCH 0x329a93a0
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 7a47d14fde44..4b3300b090a8 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -1518,7 +1518,7 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
domain->genpd.power_off = imx_pgc_power_down;
pd_pdev->dev.parent = dev;
- pd_pdev->dev.of_node = np;
+ device_set_node(&pd_pdev->dev, of_fwnode_handle(np));
ret = platform_device_add(pd_pdev);
if (ret) {
diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c
index 58240e320c13..5be9988f30ce 100644
--- a/drivers/soc/ixp4xx/ixp4xx-npe.c
+++ b/drivers/soc/ixp4xx/ixp4xx-npe.c
@@ -519,15 +519,15 @@ int npe_load_firmware(struct npe *npe, const char *name, struct device *dev)
u32 id;
u32 size;
union {
- u32 data[0];
- struct dl_block blocks[0];
+ DECLARE_FLEX_ARRAY(u32, data);
+ DECLARE_FLEX_ARRAY(struct dl_block, blocks);
};
} *image;
struct dl_codeblock {
u32 npe_addr;
u32 size;
- u32 data[0];
+ u32 data[];
} *cb;
int i, j, err, data_size, instr_size, blocks, table_end;
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index d51abb462ae5..30f81d6d9d9d 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -387,9 +387,9 @@ static void apr_device_remove(struct device *dev)
spin_unlock(&apr->svcs_lock);
}
-static int apr_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int apr_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct apr_device *adev = to_apr_device(dev);
+ const struct apr_device *adev = to_apr_device(dev);
int ret;
ret = of_device_uevent_modalias(dev, env);
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index f0475b93ca73..795a2e1d59b3 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -14,7 +14,7 @@
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
-#include <linux/qcom-geni-se.h>
+#include <linux/soc/qcom/geni-se.h>
/**
* DOC: Overview
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 57052726299d..820bdd9f8e46 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <net/sock.h>
#include <linux/workqueue.h>
+#include <trace/events/sock.h>
#include <linux/soc/qcom/qmi.h>
static struct socket *qmi_sock_create(struct qmi_handle *qmi,
@@ -569,6 +570,8 @@ static void qmi_data_ready(struct sock *sk)
{
struct qmi_handle *qmi = sk->sk_user_data;
+ trace_sk_data_ready(sk);
+
/*
* This will be NULL if we receive data while being in
* qmi_handle_release()
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index b0a80de34c98..eed47696e825 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -437,7 +437,7 @@ static int __init rcar_sysc_pd_init(void)
error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
if (!error)
- of_node_set_flag(np, OF_POPULATED);
+ fwnode_dev_initialized(of_fwnode_handle(np), true);
out_put:
of_node_put(np);
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 02e319508cc6..7a8f291e7704 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -52,29 +52,9 @@ config EXYNOS_PM_DOMAINS
bool "Exynos PM domains" if COMPILE_TEST
depends on (ARCH_EXYNOS && PM_GENERIC_DOMAINS) || COMPILE_TEST
-config SAMSUNG_PM_DEBUG
- bool "Samsung PM Suspend debug"
- depends on PM && DEBUG_KERNEL
- depends on PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210
- depends on DEBUG_S3C24XX_UART || DEBUG_S3C2410_UART
- depends on DEBUG_LL && MMU
- help
- Say Y here if you want verbose debugging from the PM Suspend and
- Resume code. See <file:Documentation/arm/samsung-s3c24xx/suspend.rst>
- for more information.
-
-config S3C_PM_DEBUG_LED_SMDK
- bool "SMDK LED suspend/resume debugging"
- depends on PM && (MACH_SMDK6410)
- help
- Say Y here to enable the use of the SMDK LEDs on the baseboard
- for debugging of the state of the suspend and resume process.
-
- Note, this currently only works for S3C64XX based SMDK boards.
-
config SAMSUNG_PM_CHECK
bool "S3C2410 PM Suspend Memory CRC"
- depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210)
+ depends on PM && (ARCH_S3C64XX || ARCH_S5PV210)
select CRC32
help
Enable the PM code's memory area checksum over sleep. This option
@@ -85,8 +65,6 @@ config SAMSUNG_PM_CHECK
Note, this can take several seconds depending on memory size
and CPU speed.
- See <file:Documentation/arm/samsung-s3c24xx/suspend.rst>
-
config SAMSUNG_PM_CHECK_CHUNKSIZE
int "S3C2410 PM Suspend CRC Chunksize (KiB)"
depends on PM && SAMSUNG_PM_CHECK
@@ -97,8 +75,6 @@ config SAMSUNG_PM_CHECK_CHUNKSIZE
the CRC data block will take more memory, but will identify any
faults with better precision.
- See <file:Documentation/arm/samsung-s3c24xx/suspend.rst>
-
config EXYNOS_REGULATOR_COUPLER
bool "Exynos SoC Regulator Coupler" if COMPILE_TEST
depends on ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 9f59d1905ab0..d35270fc6b2b 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -14,4 +14,3 @@ obj-$(CONFIG_EXYNOS_PM_DOMAINS) += pm_domains.o
obj-$(CONFIG_EXYNOS_REGULATOR_COUPLER) += exynos-regulator-coupler.o
obj-$(CONFIG_SAMSUNG_PM_CHECK) += s3c-pm-check.o
-obj-$(CONFIG_SAMSUNG_PM_DEBUG) += s3c-pm-debug.o
diff --git a/drivers/soc/samsung/s3c-pm-debug.c b/drivers/soc/samsung/s3c-pm-debug.c
deleted file mode 100644
index b5ce0e9a41e5..000000000000
--- a/drivers/soc/samsung/s3c-pm-debug.c
+++ /dev/null
@@ -1,79 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-//
-// Copyright (C) 2013 Samsung Electronics Co., Ltd.
-// Tomasz Figa <t.figa@samsung.com>
-// Copyright (C) 2008 Openmoko, Inc.
-// Copyright (C) 2004-2008 Simtec Electronics
-// Ben Dooks <ben@simtec.co.uk>
-// http://armlinux.simtec.co.uk/
-//
-// Samsung common power management (suspend to RAM) debug support
-
-#include <linux/serial_core.h>
-#include <linux/serial_s3c.h>
-#include <linux/io.h>
-
-#include <asm/mach/map.h>
-
-#include <linux/soc/samsung/s3c-pm.h>
-
-static struct pm_uart_save uart_save;
-
-extern void printascii(const char *);
-
-void s3c_pm_dbg(const char *fmt, ...)
-{
- va_list va;
- char buff[256];
-
- va_start(va, fmt);
- vsnprintf(buff, sizeof(buff), fmt, va);
- va_end(va);
-
- printascii(buff);
-}
-
-static inline void __iomem *s3c_pm_uart_base(void)
-{
- unsigned long paddr;
- unsigned long vaddr;
-
- debug_ll_addr(&paddr, &vaddr);
-
- return (void __iomem *)vaddr;
-}
-
-void s3c_pm_save_uarts(bool is_s3c2410)
-{
- void __iomem *regs = s3c_pm_uart_base();
- struct pm_uart_save *save = &uart_save;
-
- save->ulcon = __raw_readl(regs + S3C2410_ULCON);
- save->ucon = __raw_readl(regs + S3C2410_UCON);
- save->ufcon = __raw_readl(regs + S3C2410_UFCON);
- save->umcon = __raw_readl(regs + S3C2410_UMCON);
- save->ubrdiv = __raw_readl(regs + S3C2410_UBRDIV);
-
- if (!is_s3c2410)
- save->udivslot = __raw_readl(regs + S3C2443_DIVSLOT);
-
- S3C_PMDBG("UART[%p]: ULCON=%04x, UCON=%04x, UFCON=%04x, UBRDIV=%04x\n",
- regs, save->ulcon, save->ucon, save->ufcon, save->ubrdiv);
-}
-
-void s3c_pm_restore_uarts(bool is_s3c2410)
-{
- void __iomem *regs = s3c_pm_uart_base();
- struct pm_uart_save *save = &uart_save;
-
- s3c_pm_arch_update_uart(regs, save);
-
- __raw_writel(save->ulcon, regs + S3C2410_ULCON);
- __raw_writel(save->ucon, regs + S3C2410_UCON);
- __raw_writel(save->ufcon, regs + S3C2410_UFCON);
- __raw_writel(save->umcon, regs + S3C2410_UMCON);
- __raw_writel(save->ubrdiv, regs + S3C2410_UBRDIV);
-
- if (!is_s3c2410)
- __raw_writel(save->udivslot, regs + S3C2443_DIVSLOT);
-}
diff --git a/drivers/soc/ti/smartreflex.c b/drivers/soc/ti/smartreflex.c
index 6a389a6444f3..9d9496e0a94c 100644
--- a/drivers/soc/ti/smartreflex.c
+++ b/drivers/soc/ti/smartreflex.c
@@ -198,7 +198,6 @@ static void sr_stop_vddautocomp(struct omap_sr *sr)
*/
static int sr_late_init(struct omap_sr *sr_info)
{
- struct omap_sr_data *pdata = sr_info->pdev->dev.platform_data;
int ret = 0;
if (sr_class->notify && sr_class->notify_flags && sr_info->irq) {
@@ -209,9 +208,6 @@ static int sr_late_init(struct omap_sr *sr_info)
disable_irq(sr_info->irq);
}
- if (pdata && pdata->enable_on_init)
- sr_start_vddautocomp(sr_info);
-
return ret;
error:
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index 76515c33e639..b6aca59c3130 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -225,9 +225,9 @@ static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg)
}
static inline int do_transfer_defer(struct sdw_bus *bus,
- struct sdw_msg *msg,
- struct sdw_defer *defer)
+ struct sdw_msg *msg)
{
+ struct sdw_defer *defer = &bus->defer_msg;
int retry = bus->prop.err_threshold;
enum sdw_command_response resp;
int ret = 0, i;
@@ -237,24 +237,7 @@ static inline int do_transfer_defer(struct sdw_bus *bus,
init_completion(&defer->complete);
for (i = 0; i <= retry; i++) {
- resp = bus->ops->xfer_msg_defer(bus, msg, defer);
- ret = find_response_code(resp);
- /* if cmd is ok or ignored return */
- if (ret == 0 || ret == -ENODATA)
- return ret;
- }
-
- return ret;
-}
-
-static int sdw_reset_page(struct sdw_bus *bus, u16 dev_num)
-{
- int retry = bus->prop.err_threshold;
- enum sdw_command_response resp;
- int ret = 0, i;
-
- for (i = 0; i <= retry; i++) {
- resp = bus->ops->reset_page_addr(bus, dev_num);
+ resp = bus->ops->xfer_msg_defer(bus);
ret = find_response_code(resp);
/* if cmd is ok or ignored return */
if (ret == 0 || ret == -ENODATA)
@@ -275,9 +258,6 @@ static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg)
(msg->flags & SDW_MSG_FLAG_WRITE) ? "write" : "read",
msg->addr, msg->len);
- if (msg->page)
- sdw_reset_page(bus, msg->dev_num);
-
return ret;
}
@@ -335,26 +315,21 @@ EXPORT_SYMBOL(sdw_show_ping_status);
* sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device
* @bus: SDW bus
* @msg: SDW message to be xfered
- * @defer: Defer block for signal completion
*
* Caller needs to hold the msg_lock lock while calling this
*/
-int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
- struct sdw_defer *defer)
+int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg)
{
int ret;
if (!bus->ops->xfer_msg_defer)
return -ENOTSUPP;
- ret = do_transfer_defer(bus, msg, defer);
+ ret = do_transfer_defer(bus, msg);
if (ret != 0 && ret != -ENODATA)
dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n",
msg->dev_num, ret);
- if (msg->page)
- sdw_reset_page(bus, msg->dev_num);
-
return ret;
}
@@ -414,8 +389,7 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
* all clients need to use the pm versions
*/
-static int
-sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
+int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
struct sdw_msg msg;
int ret;
@@ -430,9 +404,9 @@ sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
ret = 0;
return ret;
}
+EXPORT_SYMBOL(sdw_nread_no_pm);
-static int
-sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
+int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
{
struct sdw_msg msg;
int ret;
@@ -447,6 +421,7 @@ sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
ret = 0;
return ret;
}
+EXPORT_SYMBOL(sdw_nwrite_no_pm);
int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
{
@@ -1214,7 +1189,7 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave,
val &= ~SDW_DPN_INT_PORT_READY;
}
- ret = sdw_update(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
+ ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val);
if (ret < 0)
dev_err(&slave->dev,
"SDW_DPN_INTMASK write failed:%d\n", val);
@@ -1233,10 +1208,11 @@ static int sdw_slave_set_frequency(struct sdw_slave *slave)
/*
* frequency base and scale registers are required for SDCA
- * devices. They may also be used for 1.2+/non-SDCA devices,
- * but we will need a DisCo property to cover this case
+ * devices. They may also be used for 1.2+/non-SDCA devices.
+ * Driver can set the property, we will need a DisCo property
+ * to discover this case from platform firmware.
*/
- if (!slave->id.class_id)
+ if (!slave->id.class_id && !slave->prop.clock_reg_supported)
return 0;
if (!mclk_freq) {
@@ -1587,7 +1563,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
goto io_err;
}
- if (slave->prop.is_sdca) {
+ if (slave->id.class_id) {
ret = sdw_read_no_pm(slave, SDW_DP0_INT);
if (ret < 0) {
dev_err(&slave->dev,
@@ -1724,7 +1700,7 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
goto io_err;
}
- if (slave->prop.is_sdca) {
+ if (slave->id.class_id) {
ret = sdw_read_no_pm(slave, SDW_DP0_INT);
if (ret < 0) {
dev_err(&slave->dev,
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index 7631ef5e71fb..96927a143796 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -151,8 +151,7 @@ int sdw_configure_dpn_intr(struct sdw_slave *slave, int port,
bool enable, int mask);
int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg);
-int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg,
- struct sdw_defer *defer);
+int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg);
#define SDW_READ_INTR_CLEAR_RETRY 10
diff --git a/drivers/soundwire/bus_type.c b/drivers/soundwire/bus_type.c
index 04b3529f8929..1f43ee848eac 100644
--- a/drivers/soundwire/bus_type.c
+++ b/drivers/soundwire/bus_type.c
@@ -58,9 +58,9 @@ int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size)
slave->id.sdw_version, slave->id.class_id);
}
-int sdw_slave_uevent(struct device *dev, struct kobj_uevent_env *env)
+int sdw_slave_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ const struct sdw_slave *slave = dev_to_sdw_dev(dev);
char modalias[32];
sdw_slave_modalias(slave, modalias, sizeof(modalias));
@@ -105,20 +105,19 @@ static int sdw_drv_probe(struct device *dev)
if (ret)
return ret;
- mutex_lock(&slave->sdw_dev_lock);
-
ret = drv->probe(slave, id);
if (ret) {
name = drv->name;
if (!name)
name = drv->driver.name;
- mutex_unlock(&slave->sdw_dev_lock);
dev_err(dev, "Probe of %s failed: %d\n", name, ret);
dev_pm_domain_detach(dev, false);
return ret;
}
+ mutex_lock(&slave->sdw_dev_lock);
+
/* device is probed so let's read the properties now */
if (drv->ops && drv->ops->read_prop)
drv->ops->read_prop(slave);
@@ -167,14 +166,12 @@ static int sdw_drv_remove(struct device *dev)
int ret = 0;
mutex_lock(&slave->sdw_dev_lock);
-
slave->probed = false;
+ mutex_unlock(&slave->sdw_dev_lock);
if (drv->remove)
ret = drv->remove(slave);
- mutex_unlock(&slave->sdw_dev_lock);
-
dev_pm_domain_detach(dev, false);
return ret;
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index a1de363eba3f..e835dabb516c 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -127,7 +127,8 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_MCP_CMD_BASE 0x80
#define CDNS_MCP_RESP_BASE 0x80
-#define CDNS_MCP_CMD_LEN 0x20
+/* FIFO can hold 8 commands */
+#define CDNS_MCP_CMD_LEN 8
#define CDNS_MCP_CMD_WORD_LEN 0x4
#define CDNS_MCP_CMD_SSP_TAG BIT(31)
@@ -554,6 +555,29 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
return SDW_CMD_OK;
}
+static void cdns_read_response(struct sdw_cdns *cdns)
+{
+ u32 num_resp, cmd_base;
+ int i;
+
+ /* RX_FIFO_AVAIL can be 2 entries more than the FIFO size */
+ BUILD_BUG_ON(ARRAY_SIZE(cdns->response_buf) < CDNS_MCP_CMD_LEN + 2);
+
+ num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT);
+ num_resp &= CDNS_MCP_RX_FIFO_AVAIL;
+ if (num_resp > ARRAY_SIZE(cdns->response_buf)) {
+ dev_warn(cdns->dev, "RX AVAIL %d too long\n", num_resp);
+ num_resp = ARRAY_SIZE(cdns->response_buf);
+ }
+
+ cmd_base = CDNS_MCP_CMD_BASE;
+
+ for (i = 0; i < num_resp; i++) {
+ cdns->response_buf[i] = cdns_readl(cdns, cmd_base);
+ cmd_base += CDNS_MCP_CMD_WORD_LEN;
+ }
+}
+
static enum sdw_command_response
_cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
int offset, int count, bool defer)
@@ -595,6 +619,10 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
dev_err(cdns->dev, "IO transfer timed out, cmd %d device %d addr %x len %d\n",
cmd, msg->dev_num, msg->addr, msg->len);
msg->len = 0;
+
+ /* Drain anything in the RX_FIFO */
+ cdns_read_response(cdns);
+
return SDW_CMD_TIMEOUT;
}
@@ -721,10 +749,11 @@ cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg)
EXPORT_SYMBOL(cdns_xfer_msg);
enum sdw_command_response
-cdns_xfer_msg_defer(struct sdw_bus *bus,
- struct sdw_msg *msg, struct sdw_defer *defer)
+cdns_xfer_msg_defer(struct sdw_bus *bus)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
+ struct sdw_defer *defer = &bus->defer_msg;
+ struct sdw_msg *msg = defer->msg;
int cmd = 0, ret;
/* for defer only 1 message is supported */
@@ -735,27 +764,10 @@ cdns_xfer_msg_defer(struct sdw_bus *bus,
if (ret)
return SDW_CMD_FAIL_OTHER;
- cdns->defer = defer;
- cdns->defer->length = msg->len;
-
return _cdns_xfer_msg(cdns, msg, cmd, 0, msg->len, true);
}
EXPORT_SYMBOL(cdns_xfer_msg_defer);
-enum sdw_command_response
-cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num)
-{
- struct sdw_cdns *cdns = bus_to_cdns(bus);
- struct sdw_msg msg;
-
- /* Create dummy message with valid device number */
- memset(&msg, 0, sizeof(msg));
- msg.dev_num = dev_num;
-
- return cdns_program_scp_addr(cdns, &msg);
-}
-EXPORT_SYMBOL(cdns_reset_page_addr);
-
u32 cdns_read_ping_status(struct sdw_bus *bus)
{
struct sdw_cdns *cdns = bus_to_cdns(bus);
@@ -768,22 +780,6 @@ EXPORT_SYMBOL(cdns_read_ping_status);
* IRQ handling
*/
-static void cdns_read_response(struct sdw_cdns *cdns)
-{
- u32 num_resp, cmd_base;
- int i;
-
- num_resp = cdns_readl(cdns, CDNS_MCP_FIFOSTAT);
- num_resp &= CDNS_MCP_RX_FIFO_AVAIL;
-
- cmd_base = CDNS_MCP_CMD_BASE;
-
- for (i = 0; i < num_resp; i++) {
- cdns->response_buf[i] = cdns_readl(cdns, cmd_base);
- cmd_base += CDNS_MCP_CMD_WORD_LEN;
- }
-}
-
static int cdns_update_slave_status(struct sdw_cdns *cdns,
u64 slave_intstat)
{
@@ -881,13 +877,15 @@ irqreturn_t sdw_cdns_irq(int irq, void *dev_id)
return IRQ_NONE;
if (int_status & CDNS_MCP_INT_RX_WL) {
+ struct sdw_bus *bus = &cdns->bus;
+ struct sdw_defer *defer = &bus->defer_msg;
+
cdns_read_response(cdns);
- if (cdns->defer) {
- cdns_fill_msg_resp(cdns, cdns->defer->msg,
- cdns->defer->length, 0);
- complete(&cdns->defer->complete);
- cdns->defer = NULL;
+ if (defer && defer->msg) {
+ cdns_fill_msg_resp(cdns, defer->msg,
+ defer->length, 0);
+ complete(&defer->complete);
} else {
complete(&cdns->tx_complete);
}
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index 0434d70d4b1f..dec0b4f993c1 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -8,6 +8,12 @@
#define SDW_CADENCE_GSYNC_KHZ 4 /* 4 kHz */
#define SDW_CADENCE_GSYNC_HZ (SDW_CADENCE_GSYNC_KHZ * 1000)
+/*
+ * The Cadence IP supports up to 32 entries in the FIFO, though implementations
+ * can configure the IP to have a smaller FIFO.
+ */
+#define CDNS_MCP_IP_MAX_CMD_LEN 32
+
/**
* struct sdw_cdns_pdi: PDI (Physical Data Interface) instance
*
@@ -103,7 +109,6 @@ struct sdw_cdns_dai_runtime {
* @instance: instance number
* @response_buf: SoundWire response buffer
* @tx_complete: Tx completion
- * @defer: Defer pointer
* @ports: Data ports
* @num_ports: Total number of data ports
* @pcm: PCM streams
@@ -117,9 +122,13 @@ struct sdw_cdns {
struct sdw_bus bus;
unsigned int instance;
- u32 response_buf[0x80];
+ /*
+ * The datasheet says the RX FIFO AVAIL can be 2 entries more
+ * than the FIFO capacity, so allow for this.
+ */
+ u32 response_buf[CDNS_MCP_IP_MAX_CMD_LEN + 2];
+
struct completion tx_complete;
- struct sdw_defer *defer;
struct sdw_cdns_port *ports;
int num_ports;
@@ -147,7 +156,6 @@ struct sdw_cdns {
/* Exported symbols */
int sdw_cdns_probe(struct sdw_cdns *cdns);
-extern struct sdw_master_ops sdw_cdns_master_ops;
irqreturn_t sdw_cdns_irq(int irq, void *dev_id);
irqreturn_t sdw_cdns_thread(int irq, void *dev_id);
@@ -173,14 +181,10 @@ void sdw_cdns_config_stream(struct sdw_cdns *cdns,
u32 ch, u32 dir, struct sdw_cdns_pdi *pdi);
enum sdw_command_response
-cdns_reset_page_addr(struct sdw_bus *bus, unsigned int dev_num);
-
-enum sdw_command_response
cdns_xfer_msg(struct sdw_bus *bus, struct sdw_msg *msg);
enum sdw_command_response
-cdns_xfer_msg_defer(struct sdw_bus *bus,
- struct sdw_msg *msg, struct sdw_defer *defer);
+cdns_xfer_msg_defer(struct sdw_bus *bus);
u32 cdns_read_ping_status(struct sdw_bus *bus);
diff --git a/drivers/soundwire/debugfs.c b/drivers/soundwire/debugfs.c
index 49900cd207bc..dea782e0edc4 100644
--- a/drivers/soundwire/debugfs.c
+++ b/drivers/soundwire/debugfs.c
@@ -4,6 +4,7 @@
#include <linux/device.h>
#include <linux/debugfs.h>
#include <linux/mod_devicetable.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_registers.h>
@@ -35,7 +36,7 @@ static ssize_t sdw_sprintf(struct sdw_slave *slave,
{
int value;
- value = sdw_read(slave, reg);
+ value = sdw_read_no_pm(slave, reg);
if (value < 0)
return scnprintf(buf + pos, RD_BUF - pos, "%3x\tXX\n", reg);
@@ -55,6 +56,12 @@ static int sdw_slave_reg_show(struct seq_file *s_file, void *data)
if (!buf)
return -ENOMEM;
+ ret = pm_runtime_resume_and_get(&slave->dev);
+ if (ret < 0 && ret != -EACCES) {
+ kfree(buf);
+ return ret;
+ }
+
ret = scnprintf(buf, RD_BUF, "Register Value\n");
/* DP0 non-banked registers */
@@ -112,6 +119,10 @@ static int sdw_slave_reg_show(struct seq_file *s_file, void *data)
}
seq_printf(s_file, "%s", buf);
+
+ pm_runtime_mark_last_busy(&slave->dev);
+ pm_runtime_put(&slave->dev);
+
kfree(buf);
return 0;
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index bc9c50bacc49..2651767272c7 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -778,22 +778,6 @@ unlock:
* DAI routines
*/
-static int intel_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
- int ret;
-
- ret = pm_runtime_resume_and_get(cdns->dev);
- if (ret < 0 && ret != -EACCES) {
- dev_err_ratelimited(cdns->dev,
- "pm_runtime_resume_and_get failed in %s, ret %d\n",
- __func__, ret);
- return ret;
- }
- return 0;
-}
-
static int intel_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
@@ -954,15 +938,6 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
return 0;
}
-static void intel_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
-
- pm_runtime_mark_last_busy(cdns->dev);
- pm_runtime_put_autosuspend(cdns->dev);
-}
-
static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
void *stream, int direction)
{
@@ -1088,12 +1063,10 @@ static int intel_component_dais_suspend(struct snd_soc_component *component)
}
static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
- .startup = intel_startup,
.hw_params = intel_hw_params,
.prepare = intel_prepare,
.hw_free = intel_hw_free,
.trigger = intel_trigger,
- .shutdown = intel_shutdown,
.set_stream = intel_pcm_set_sdw_stream,
.get_stream = intel_get_sdw_stream,
};
diff --git a/drivers/soundwire/intel_auxdevice.c b/drivers/soundwire/intel_auxdevice.c
index 96c6b2112feb..5021be0f4158 100644
--- a/drivers/soundwire/intel_auxdevice.c
+++ b/drivers/soundwire/intel_auxdevice.c
@@ -113,7 +113,6 @@ static struct sdw_master_ops sdw_intel_ops = {
.override_adr = sdw_dmi_override_adr,
.xfer_msg = cdns_xfer_msg,
.xfer_msg_defer = cdns_xfer_msg_defer,
- .reset_page_addr = cdns_reset_page_addr,
.set_bus_conf = cdns_bus_conf,
.pre_bank_switch = generic_pre_bank_switch,
.post_bank_switch = generic_post_bank_switch,
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index bd502368339e..8c6da1739e3d 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -81,14 +81,14 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
}
/* Program DPN_OffsetCtrl2 registers */
- ret = sdw_write(slave, addr1, t_params->offset2);
+ ret = sdw_write_no_pm(slave, addr1, t_params->offset2);
if (ret < 0) {
dev_err(bus->dev, "DPN_OffsetCtrl2 register write failed\n");
return ret;
}
/* Program DPN_BlockCtrl3 register */
- ret = sdw_write(slave, addr2, t_params->blk_pkg_mode);
+ ret = sdw_write_no_pm(slave, addr2, t_params->blk_pkg_mode);
if (ret < 0) {
dev_err(bus->dev, "DPN_BlockCtrl3 register write failed\n");
return ret;
@@ -105,7 +105,7 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
/* Program DPN_SampleCtrl2 register */
wbuf = FIELD_GET(SDW_DPN_SAMPLECTRL_HIGH, t_params->sample_interval - 1);
- ret = sdw_write(slave, addr3, wbuf);
+ ret = sdw_write_no_pm(slave, addr3, wbuf);
if (ret < 0) {
dev_err(bus->dev, "DPN_SampleCtrl2 register write failed\n");
return ret;
@@ -115,7 +115,7 @@ static int _sdw_program_slave_port_params(struct sdw_bus *bus,
wbuf = FIELD_PREP(SDW_DPN_HCTRL_HSTART, t_params->hstart);
wbuf |= FIELD_PREP(SDW_DPN_HCTRL_HSTOP, t_params->hstop);
- ret = sdw_write(slave, addr4, wbuf);
+ ret = sdw_write_no_pm(slave, addr4, wbuf);
if (ret < 0)
dev_err(bus->dev, "DPN_HCtrl register write failed\n");
@@ -163,7 +163,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
wbuf = FIELD_PREP(SDW_DPN_PORTCTRL_DATAMODE, p_params->data_mode);
wbuf |= FIELD_PREP(SDW_DPN_PORTCTRL_FLOWMODE, p_params->flow_mode);
- ret = sdw_update(s_rt->slave, addr1, 0xF, wbuf);
+ ret = sdw_update_no_pm(s_rt->slave, addr1, 0xF, wbuf);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
"DPN_PortCtrl register write failed for port %d\n",
@@ -173,7 +173,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
if (!dpn_prop->read_only_wordlength) {
/* Program DPN_BlockCtrl1 register */
- ret = sdw_write(s_rt->slave, addr2, (p_params->bps - 1));
+ ret = sdw_write_no_pm(s_rt->slave, addr2, (p_params->bps - 1));
if (ret < 0) {
dev_err(&s_rt->slave->dev,
"DPN_BlockCtrl1 register write failed for port %d\n",
@@ -184,7 +184,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
/* Program DPN_SampleCtrl1 register */
wbuf = (t_params->sample_interval - 1) & SDW_DPN_SAMPLECTRL_LOW;
- ret = sdw_write(s_rt->slave, addr3, wbuf);
+ ret = sdw_write_no_pm(s_rt->slave, addr3, wbuf);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
"DPN_SampleCtrl1 register write failed for port %d\n",
@@ -193,7 +193,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
}
/* Program DPN_OffsetCtrl1 registers */
- ret = sdw_write(s_rt->slave, addr4, t_params->offset1);
+ ret = sdw_write_no_pm(s_rt->slave, addr4, t_params->offset1);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
"DPN_OffsetCtrl1 register write failed for port %d\n",
@@ -203,7 +203,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
/* Program DPN_BlockCtrl2 register*/
if (t_params->blk_grp_ctrl_valid) {
- ret = sdw_write(s_rt->slave, addr5, t_params->blk_grp_ctrl);
+ ret = sdw_write_no_pm(s_rt->slave, addr5, t_params->blk_grp_ctrl);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
"DPN_BlockCtrl2 reg write failed for port %d\n",
@@ -214,7 +214,7 @@ static int sdw_program_slave_port_params(struct sdw_bus *bus,
/* program DPN_LaneCtrl register */
if (slave_prop->lane_control_support) {
- ret = sdw_write(s_rt->slave, addr6, t_params->lane_ctrl);
+ ret = sdw_write_no_pm(s_rt->slave, addr6, t_params->lane_ctrl);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
"DPN_LaneCtrl register write failed for port %d\n",
@@ -319,9 +319,9 @@ static int sdw_enable_disable_slave_ports(struct sdw_bus *bus,
* it is safe to reset this register
*/
if (en)
- ret = sdw_write(s_rt->slave, addr, p_rt->ch_mask);
+ ret = sdw_write_no_pm(s_rt->slave, addr, p_rt->ch_mask);
else
- ret = sdw_write(s_rt->slave, addr, 0x0);
+ ret = sdw_write_no_pm(s_rt->slave, addr, 0x0);
if (ret < 0)
dev_err(&s_rt->slave->dev,
@@ -469,16 +469,16 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
}
/* Inform slave about the impending port prepare */
- sdw_do_port_prep(s_rt, prep_ch, SDW_OPS_PORT_PRE_PREP);
+ sdw_do_port_prep(s_rt, prep_ch, prep ? SDW_OPS_PORT_PRE_PREP : SDW_OPS_PORT_PRE_DEPREP);
/* Prepare Slave port implementing CP_SM */
if (!dpn_prop->simple_ch_prep_sm) {
addr = SDW_DPN_PREPARECTRL(p_rt->num);
if (prep)
- ret = sdw_write(s_rt->slave, addr, p_rt->ch_mask);
+ ret = sdw_write_no_pm(s_rt->slave, addr, p_rt->ch_mask);
else
- ret = sdw_write(s_rt->slave, addr, 0x0);
+ ret = sdw_write_no_pm(s_rt->slave, addr, 0x0);
if (ret < 0) {
dev_err(&s_rt->slave->dev,
@@ -491,7 +491,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
wait_for_completion_timeout(port_ready,
msecs_to_jiffies(dpn_prop->ch_prep_timeout));
- val = sdw_read(s_rt->slave, SDW_DPN_PREPARESTATUS(p_rt->num));
+ val = sdw_read_no_pm(s_rt->slave, SDW_DPN_PREPARESTATUS(p_rt->num));
if ((val < 0) || (val & p_rt->ch_mask)) {
ret = (val < 0) ? val : -ETIMEDOUT;
dev_err(&s_rt->slave->dev,
@@ -501,7 +501,7 @@ static int sdw_prep_deprep_slave_ports(struct sdw_bus *bus,
}
/* Inform slaves about ports prepared */
- sdw_do_port_prep(s_rt, prep_ch, SDW_OPS_PORT_POST_PREP);
+ sdw_do_port_prep(s_rt, prep_ch, prep ? SDW_OPS_PORT_POST_PREP : SDW_OPS_PORT_POST_DEPREP);
/* Disable interrupt after Port de-prepare */
if (!prep && intr)
@@ -684,8 +684,6 @@ static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
if (!wr_msg)
return -ENOMEM;
- bus->defer_msg.msg = wr_msg;
-
wbuf = kzalloc(sizeof(*wbuf), GFP_KERNEL);
if (!wbuf) {
ret = -ENOMEM;
@@ -713,7 +711,7 @@ static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
multi_link = bus->multi_link && (m_rt_count >= bus->hw_sync_min_links);
if (multi_link)
- ret = sdw_transfer_defer(bus, wr_msg, &bus->defer_msg);
+ ret = sdw_transfer_defer(bus, wr_msg);
else
ret = sdw_transfer(bus, wr_msg);
@@ -723,8 +721,8 @@ static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
}
if (!multi_link) {
- kfree(wr_msg);
kfree(wbuf);
+ kfree(wr_msg);
bus->defer_msg.msg = NULL;
bus->params.curr_bank = !bus->params.curr_bank;
bus->params.next_bank = !bus->params.next_bank;
@@ -769,6 +767,7 @@ static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
if (bus->defer_msg.msg) {
kfree(bus->defer_msg.msg->buf);
kfree(bus->defer_msg.msg);
+ bus->defer_msg.msg = NULL;
}
return 0;
@@ -867,6 +866,7 @@ error:
if (bus->defer_msg.msg) {
kfree(bus->defer_msg.msg->buf);
kfree(bus->defer_msg.msg);
+ bus->defer_msg.msg = NULL;
}
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 3b1c0878bb85..47bbba04fe3a 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -199,6 +199,15 @@ config SPI_BCM_QSPI
based platforms. This driver works for both SPI master for SPI NOR
flash device as well as MSPI device.
+config SPI_BCMBCA_HSSPI
+ tristate "Broadcom BCMBCA HS SPI controller driver"
+ depends on ARCH_BCMBCA || COMPILE_TEST
+ help
+ This enables support for the High Speed SPI controller present on
+ newer Broadcom BCMBCA SoCs. These SoCs include an updated SPI controller
+ that adds the capability to allow the driver to control chip select
+ explicitly.
+
config SPI_BITBANG
tristate "Utilities for Bitbanging SPI masters"
help
@@ -247,7 +256,7 @@ config SPI_CADENCE_XSPI
Enable support for the Cadence XSPI Flash controller.
Cadence XSPI is a specialized controller for connecting an SPI
- Flash over upto 8bit wide bus. Enable this option if you have a
+ Flash over up to 8-bit wide bus. Enable this option if you have a
device with a Cadence XSPI controller and want to access the
Flash as an MTD device.
@@ -295,7 +304,6 @@ config SPI_DW_BT1
tristate "Baikal-T1 SPI driver for DW SPI core"
depends on MIPS_BAIKAL_T1 || COMPILE_TEST
select MULTIPLEXER
- select MUX_MMIO
help
Baikal-T1 SoC is equipped with three DW APB SSI-based MMIO SPI
controllers. Two of them are pretty much normal: with IRQ, DMA,
@@ -448,19 +456,19 @@ config SPI_INTEL
tristate
config SPI_INTEL_PCI
- tristate "Intel PCH/PCU SPI flash PCI driver (DANGEROUS)"
+ tristate "Intel PCH/PCU SPI flash PCI driver"
depends on PCI
depends on X86 || COMPILE_TEST
depends on SPI_MEM
select SPI_INTEL
help
This enables PCI support for the Intel PCH/PCU SPI controller in
- master mode. This controller is present in modern Intel hardware
- and is used to hold BIOS and other persistent settings. Using
- this driver it is possible to upgrade BIOS directly from Linux.
-
- Say N here unless you know what you are doing. Overwriting the
- SPI flash may render the system unbootable.
+ master mode. This controller is used to hold BIOS and other
+ persistent settings. Controllers present in modern Intel hardware
+ only work in hardware sequencing mode, this means that the
+ controller exposes a subset of operations that makes it safer to
+ use. Using this driver it is possible to upgrade BIOS directly
+ from Linux.
To compile this driver as a module, choose M here: the module
will be called spi-intel-pci.
@@ -472,10 +480,11 @@ config SPI_INTEL_PLATFORM
select SPI_INTEL
help
This enables platform support for the Intel PCH/PCU SPI
- controller in master mode. This controller is present in modern
- Intel hardware and is used to hold BIOS and other persistent
- settings. Using this driver it is possible to upgrade BIOS
- directly from Linux.
+ controller in master mode that is used to hold BIOS and other
+ persistent settings. Most of these controllers work in
+ software sequencing mode, which means that the controller
+ exposes the low level SPI-NOR opcodes to the software. Using
+ this driver it is possible to upgrade BIOS directly from Linux.
Say N here unless you know what you are doing. Overwriting the
SPI flash may render the system unbootable.
@@ -708,12 +717,6 @@ config SPI_TI_QSPI
This device supports single, dual and quad read support, while
it only supports single write mode.
-config SPI_OMAP_100K
- tristate "OMAP SPI 100K"
- depends on ARCH_OMAP850 || ARCH_OMAP730 || COMPILE_TEST
- help
- OMAP SPI 100K master controller for omap7xx boards.
-
config SPI_ORION
tristate "Orion SPI master"
depends on PLAT_ORION || ARCH_MVEBU || COMPILE_TEST
@@ -844,24 +847,6 @@ config SPI_QCOM_GENI
This driver can also be built as a module. If so, the module
will be called spi-geni-qcom.
-config SPI_S3C24XX
- tristate "Samsung S3C24XX series SPI"
- depends on ARCH_S3C24XX
- select SPI_BITBANG
- help
- SPI driver for Samsung S3C24XX series ARM SoCs
-
-config SPI_S3C24XX_FIQ
- bool "S3C24XX driver with FIQ pseudo-DMA"
- depends on SPI_S3C24XX
- select FIQ
- help
- Enable FIQ support for the S3C24XX SPI driver to provide pseudo
- DMA by using the fast-interrupt request framework, This allows
- the driver to get DMA-like performance when there are either
- no free DMA channels, or when doing transfers that required both
- TX and RX data paths.
-
config SPI_S3C64XX
tristate "Samsung S3C64XX/Exynos SoC series type SPI"
depends on (PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST)
@@ -1166,9 +1151,6 @@ config SPI_SPIDEV
help
This supports user mode SPI protocol drivers.
- Note that this application programming interface is EXPERIMENTAL
- and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes.
-
config SPI_LOOPBACK_TEST
tristate "spi loopback test framework support"
depends on m
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index be9ba40ef8d0..d87cf75bee6a 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
+obj-$(CONFIG_SPI_BCMBCA_HSSPI) += spi-bcmbca-hsspi.o
obj-$(CONFIG_SPI_BCM_QSPI) += spi-iproc-qspi.o spi-brcmstb-qspi.o spi-bcm-qspi.o
obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
@@ -91,7 +92,6 @@ obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
-obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o
obj-$(CONFIG_SPI_ORION) += spi-orion.o
@@ -112,8 +112,6 @@ obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
obj-$(CONFIG_MACH_REALTEK_RTL) += spi-realtek-rtl.o
obj-$(CONFIG_SPI_RPCIF) += spi-rpc-if.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
-obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
-spi-s3c24xx-hw-y := spi-s3c24xx.o
obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o
obj-$(CONFIG_SPI_SC18IS602) += spi-sc18is602.o
obj-$(CONFIG_SPI_SH) += spi-sh.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 70637e46290a..f4632cb07495 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -406,7 +406,7 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
- struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
+ struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->controller);
u32 sr, offset;
int err;
@@ -476,7 +476,7 @@ static const struct spi_controller_mem_ops atmel_qspi_mem_ops = {
static int atmel_qspi_setup(struct spi_device *spi)
{
- struct spi_controller *ctrl = spi->master;
+ struct spi_controller *ctrl = spi->controller;
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
unsigned long src_rate;
u32 scbr;
@@ -512,7 +512,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
static int atmel_qspi_set_cs_timing(struct spi_device *spi)
{
- struct spi_controller *ctrl = spi->master;
+ struct spi_controller *ctrl = spi->controller;
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
unsigned long clk_rate;
u32 cs_setup;
@@ -582,7 +582,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
struct resource *res;
int irq, err = 0;
- ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*aq));
+ ctrl = devm_spi_alloc_host(&pdev->dev, sizeof(*aq));
if (!ctrl)
return -ENOMEM;
diff --git a/drivers/spi/spi-altera-core.c b/drivers/spi/spi-altera-core.c
index de4d31c530d9..94fe6bf1b9a6 100644
--- a/drivers/spi/spi-altera-core.c
+++ b/drivers/spi/spi-altera-core.c
@@ -24,7 +24,7 @@
#define ALTERA_SPI_TXDATA 4
#define ALTERA_SPI_STATUS 8
#define ALTERA_SPI_CONTROL 12
-#define ALTERA_SPI_SLAVE_SEL 20
+#define ALTERA_SPI_TARGET_SEL 20
#define ALTERA_SPI_STATUS_ROE_MSK 0x8
#define ALTERA_SPI_STATUS_TOE_MSK 0x10
@@ -67,7 +67,7 @@ static int altr_spi_readl(struct altera_spi *hw, unsigned int reg,
static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev)
{
- return spi_master_get_devdata(sdev->master);
+ return spi_controller_get_devdata(sdev->controller);
}
static void altera_spi_set_cs(struct spi_device *spi, bool is_high)
@@ -77,9 +77,9 @@ static void altera_spi_set_cs(struct spi_device *spi, bool is_high)
if (is_high) {
hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK;
altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
- altr_spi_writel(hw, ALTERA_SPI_SLAVE_SEL, 0);
+ altr_spi_writel(hw, ALTERA_SPI_TARGET_SEL, 0);
} else {
- altr_spi_writel(hw, ALTERA_SPI_SLAVE_SEL,
+ altr_spi_writel(hw, ALTERA_SPI_TARGET_SEL,
BIT(spi->chip_select));
hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK;
altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
@@ -139,10 +139,10 @@ static void altera_spi_rx_word(struct altera_spi *hw)
hw->count++;
}
-static int altera_spi_txrx(struct spi_master *master,
+static int altera_spi_txrx(struct spi_controller *host,
struct spi_device *spi, struct spi_transfer *t)
{
- struct altera_spi *hw = spi_master_get_devdata(master);
+ struct altera_spi *hw = spi_controller_get_devdata(host);
u32 val;
hw->tx = t->tx_buf;
@@ -175,15 +175,15 @@ static int altera_spi_txrx(struct spi_master *master,
altera_spi_rx_word(hw);
}
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return 0;
}
irqreturn_t altera_spi_irq(int irq, void *dev)
{
- struct spi_master *master = dev;
- struct altera_spi *hw = spi_master_get_devdata(master);
+ struct spi_controller *host = dev;
+ struct altera_spi *hw = spi_controller_get_devdata(host);
altera_spi_rx_word(hw);
@@ -194,20 +194,20 @@ irqreturn_t altera_spi_irq(int irq, void *dev)
hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK;
altr_spi_writel(hw, ALTERA_SPI_CONTROL, hw->imr);
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
}
return IRQ_HANDLED;
}
EXPORT_SYMBOL_GPL(altera_spi_irq);
-void altera_spi_init_master(struct spi_master *master)
+void altera_spi_init_host(struct spi_controller *host)
{
- struct altera_spi *hw = spi_master_get_devdata(master);
+ struct altera_spi *hw = spi_controller_get_devdata(host);
u32 val;
- master->transfer_one = altera_spi_txrx;
- master->set_cs = altera_spi_set_cs;
+ host->transfer_one = altera_spi_txrx;
+ host->set_cs = altera_spi_set_cs;
/* program defaults into the registers */
hw->imr = 0; /* disable spi interrupts */
@@ -217,6 +217,6 @@ void altera_spi_init_master(struct spi_master *master)
if (val & ALTERA_SPI_STATUS_RRDY_MSK)
altr_spi_readl(hw, ALTERA_SPI_RXDATA, &val); /* flush rxdata */
}
-EXPORT_SYMBOL_GPL(altera_spi_init_master);
+EXPORT_SYMBOL_GPL(altera_spi_init_host);
MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-altera-dfl.c b/drivers/spi/spi-altera-dfl.c
index 596e181ae136..5d6e08c12dff 100644
--- a/drivers/spi/spi-altera-dfl.c
+++ b/drivers/spi/spi-altera-dfl.c
@@ -104,20 +104,20 @@ static const struct regmap_config indirect_regbus_cfg = {
.reg_read = indirect_bus_reg_read,
};
-static void config_spi_master(void __iomem *base, struct spi_master *master)
+static void config_spi_host(void __iomem *base, struct spi_controller *host)
{
u64 v;
v = readq(base + SPI_CORE_PARAMETER);
- master->mode_bits = SPI_CS_HIGH;
+ host->mode_bits = SPI_CS_HIGH;
if (FIELD_GET(CLK_POLARITY, v))
- master->mode_bits |= SPI_CPOL;
+ host->mode_bits |= SPI_CPOL;
if (FIELD_GET(CLK_PHASE, v))
- master->mode_bits |= SPI_CPHA;
+ host->mode_bits |= SPI_CPHA;
- master->num_chipselect = FIELD_GET(NUM_CHIPSELECT, v);
- master->bits_per_word_mask =
+ host->num_chipselect = FIELD_GET(NUM_CHIPSELECT, v);
+ host->bits_per_word_mask =
SPI_BPW_RANGE_MASK(1, FIELD_GET(DATA_WIDTH, v));
}
@@ -125,18 +125,18 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
{
struct spi_board_info board_info = { 0 };
struct device *dev = &dfl_dev->dev;
- struct spi_master *master;
+ struct spi_controller *host;
struct altera_spi *hw;
void __iomem *base;
int err;
- master = devm_spi_alloc_master(dev, sizeof(struct altera_spi));
- if (!master)
+ host = devm_spi_alloc_host(dev, sizeof(struct altera_spi));
+ if (!host)
return -ENOMEM;
- master->bus_num = -1;
+ host->bus_num = -1;
- hw = spi_master_get_devdata(master);
+ hw = spi_controller_get_devdata(host);
hw->dev = dev;
@@ -145,10 +145,10 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
if (IS_ERR(base))
return PTR_ERR(base);
- config_spi_master(base, master);
+ config_spi_host(base, host);
dev_dbg(dev, "%s cs %u bpm 0x%x mode 0x%x\n", __func__,
- master->num_chipselect, master->bits_per_word_mask,
- master->mode_bits);
+ host->num_chipselect, host->bits_per_word_mask,
+ host->mode_bits);
hw->regmap = devm_regmap_init(dev, NULL, base, &indirect_regbus_cfg);
if (IS_ERR(hw->regmap))
@@ -156,11 +156,11 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
hw->irq = -EINVAL;
- altera_spi_init_master(master);
+ altera_spi_init_host(host);
- err = devm_spi_register_master(dev, master);
+ err = devm_spi_register_controller(dev, host);
if (err)
- return dev_err_probe(dev, err, "%s failed to register spi master\n",
+ return dev_err_probe(dev, err, "%s failed to register spi host\n",
__func__);
if (dfl_dev->revision == FME_FEATURE_REV_MAX10_SPI_N5010)
@@ -172,7 +172,7 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
board_info.bus_num = 0;
board_info.chip_select = 0;
- if (!spi_new_device(master, &board_info)) {
+ if (!spi_new_device(host, &board_info)) {
dev_err(dev, "%s failed to create SPI device: %s\n",
__func__, board_info.modalias);
}
diff --git a/drivers/spi/spi-altera-platform.c b/drivers/spi/spi-altera-platform.c
index 65147aae82a1..72e7a0f21793 100644
--- a/drivers/spi/spi-altera-platform.c
+++ b/drivers/spi/spi-altera-platform.c
@@ -39,16 +39,16 @@ static int altera_spi_probe(struct platform_device *pdev)
struct altera_spi_platform_data *pdata = dev_get_platdata(&pdev->dev);
enum altera_spi_type type = ALTERA_SPI_TYPE_UNKNOWN;
struct altera_spi *hw;
- struct spi_master *master;
+ struct spi_controller *host;
int err = -ENODEV;
u16 i;
- master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(struct altera_spi));
+ if (!host)
return err;
- /* setup the master state. */
- master->bus_num = -1;
+ /* setup the host state. */
+ host->bus_num = -1;
if (pdata) {
if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
@@ -59,18 +59,18 @@ static int altera_spi_probe(struct platform_device *pdev)
goto exit;
}
- master->num_chipselect = pdata->num_chipselect;
- master->mode_bits = pdata->mode_bits;
- master->bits_per_word_mask = pdata->bits_per_word_mask;
+ host->num_chipselect = pdata->num_chipselect;
+ host->mode_bits = pdata->mode_bits;
+ host->bits_per_word_mask = pdata->bits_per_word_mask;
} else {
- master->num_chipselect = 16;
- master->mode_bits = SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ host->num_chipselect = 16;
+ host->mode_bits = SPI_CS_HIGH;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
}
- master->dev.of_node = pdev->dev.of_node;
+ host->dev.of_node = pdev->dev.of_node;
- hw = spi_master_get_devdata(master);
+ hw = spi_controller_get_devdata(host);
hw->dev = &pdev->dev;
if (platid)
@@ -107,24 +107,24 @@ static int altera_spi_probe(struct platform_device *pdev)
}
}
- altera_spi_init_master(master);
+ altera_spi_init_host(host);
/* irq is optional */
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq >= 0) {
err = devm_request_irq(&pdev->dev, hw->irq, altera_spi_irq, 0,
- pdev->name, master);
+ pdev->name, host);
if (err)
goto exit;
}
- err = devm_spi_register_master(&pdev->dev, master);
+ err = devm_spi_register_controller(&pdev->dev, host);
if (err)
goto exit;
if (pdata) {
for (i = 0; i < pdata->num_devices; i++) {
- if (!spi_new_device(master, pdata->devices + i))
+ if (!spi_new_device(host, pdata->devices + i))
dev_warn(&pdev->dev,
"unable to create SPI device: %s\n",
pdata->devices[i].modalias);
@@ -135,7 +135,7 @@ static int altera_spi_probe(struct platform_device *pdev)
return 0;
exit:
- spi_master_put(master);
+ spi_controller_put(host);
return err;
}
diff --git a/drivers/spi/spi-ar934x.c b/drivers/spi/spi-ar934x.c
index ec7250c4c810..4a6ecaa0a9c9 100644
--- a/drivers/spi/spi-ar934x.c
+++ b/drivers/spi/spi-ar934x.c
@@ -61,7 +61,7 @@ static inline int ar934x_spi_clk_div(struct ar934x_spi *sp, unsigned int freq)
static int ar934x_spi_setup(struct spi_device *spi)
{
- struct ar934x_spi *sp = spi_controller_get_devdata(spi->master);
+ struct ar934x_spi *sp = spi_controller_get_devdata(spi->controller);
if ((spi->max_speed_hz == 0) ||
(spi->max_speed_hz > (sp->clk_freq / 2))) {
@@ -74,10 +74,10 @@ static int ar934x_spi_setup(struct spi_device *spi)
return 0;
}
-static int ar934x_spi_transfer_one_message(struct spi_controller *master,
+static int ar934x_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *m)
{
- struct ar934x_spi *sp = spi_controller_get_devdata(master);
+ struct ar934x_spi *sp = spi_controller_get_devdata(ctlr);
struct spi_transfer *t = NULL;
struct spi_device *spi = m->spi;
unsigned long trx_done, trx_cur;
@@ -150,7 +150,7 @@ static int ar934x_spi_transfer_one_message(struct spi_controller *master,
msg_done:
m->status = stat;
- spi_finalize_current_message(master);
+ spi_finalize_current_message(ctlr);
return 0;
}
@@ -183,7 +183,7 @@ static int ar934x_spi_probe(struct platform_device *pdev)
if (ret)
return ret;
- ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*sp));
+ ctlr = devm_spi_alloc_host(&pdev->dev, sizeof(*sp));
if (!ctlr) {
dev_info(&pdev->dev, "failed to allocate spi controller\n");
ret = -ENOMEM;
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index 9df9fc40b783..4d554b948d71 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -100,7 +100,7 @@
#define A3700_SPI_CLK_CAPT_EDGE BIT(7)
struct a3700_spi {
- struct spi_master *master;
+ struct spi_controller *host;
void __iomem *base;
struct clk *clk;
unsigned int irq;
@@ -174,7 +174,7 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi,
val |= A3700_SPI_ADDR_PIN;
break;
default:
- dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode);
+ dev_err(&a3700_spi->host->dev, "wrong pin mode %u", pin_mode);
return -EINVAL;
}
@@ -278,7 +278,7 @@ static int a3700_spi_fifo_flush(struct a3700_spi *a3700_spi)
static void a3700_spi_init(struct a3700_spi *a3700_spi)
{
- struct spi_master *master = a3700_spi->master;
+ struct spi_controller *host = a3700_spi->host;
u32 val;
int i;
@@ -295,14 +295,14 @@ static void a3700_spi_init(struct a3700_spi *a3700_spi)
/* Disable AUTO_CS and deactivate all chip-selects */
a3700_spi_auto_cs_unset(a3700_spi);
- for (i = 0; i < master->num_chipselect; i++)
+ for (i = 0; i < host->num_chipselect; i++)
a3700_spi_deactivate_cs(a3700_spi, i);
/* Enable FIFO mode */
a3700_spi_fifo_mode_set(a3700_spi, true);
/* Set SPI mode */
- a3700_spi_mode_set(a3700_spi, master->mode_bits);
+ a3700_spi_mode_set(a3700_spi, host->mode_bits);
/* Reset counters */
spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0);
@@ -315,11 +315,11 @@ static void a3700_spi_init(struct a3700_spi *a3700_spi)
static irqreturn_t a3700_spi_interrupt(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
+ struct spi_controller *host = dev_id;
struct a3700_spi *a3700_spi;
u32 cause;
- a3700_spi = spi_master_get_devdata(master);
+ a3700_spi = spi_controller_get_devdata(host);
/* Get interrupt causes */
cause = spireg_read(a3700_spi, A3700_SPI_INT_STAT_REG);
@@ -344,7 +344,7 @@ static bool a3700_spi_wait_completion(struct spi_device *spi)
unsigned int ctrl_reg;
unsigned long timeout_jiffies;
- a3700_spi = spi_master_get_devdata(spi->master);
+ a3700_spi = spi_controller_get_devdata(spi->controller);
/* SPI interrupt is edge-triggered, which means an interrupt will
* be generated only when detecting a specific status bit changed
@@ -393,7 +393,7 @@ static bool a3700_spi_transfer_wait(struct spi_device *spi,
{
struct a3700_spi *a3700_spi;
- a3700_spi = spi_master_get_devdata(spi->master);
+ a3700_spi = spi_controller_get_devdata(spi->controller);
a3700_spi->wait_mask = bit_mask;
return a3700_spi_wait_completion(spi);
@@ -417,7 +417,7 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
{
struct a3700_spi *a3700_spi;
- a3700_spi = spi_master_get_devdata(spi->master);
+ a3700_spi = spi_controller_get_devdata(spi->controller);
a3700_spi_clock_set(a3700_spi, xfer->speed_hz);
@@ -434,7 +434,7 @@ static void a3700_spi_transfer_setup(struct spi_device *spi,
static void a3700_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct a3700_spi *a3700_spi = spi_master_get_devdata(spi->master);
+ struct a3700_spi *a3700_spi = spi_controller_get_devdata(spi->controller);
if (!enable)
a3700_spi_activate_cs(a3700_spi, spi->chip_select);
@@ -565,10 +565,10 @@ static void a3700_spi_transfer_abort_fifo(struct a3700_spi *a3700_spi)
spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val);
}
-static int a3700_spi_prepare_message(struct spi_master *master,
+static int a3700_spi_prepare_message(struct spi_controller *host,
struct spi_message *message)
{
- struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
struct spi_device *spi = message->spi;
int ret;
@@ -588,11 +588,11 @@ static int a3700_spi_prepare_message(struct spi_master *master,
return 0;
}
-static int a3700_spi_transfer_one_fifo(struct spi_master *master,
+static int a3700_spi_transfer_one_fifo(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
int ret = 0, timeout = A3700_SPI_TIMEOUT;
unsigned int nbits = 0, byte_len;
u32 val;
@@ -732,16 +732,16 @@ static int a3700_spi_transfer_one_fifo(struct spi_master *master,
error:
a3700_spi_transfer_abort_fifo(a3700_spi);
out:
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return ret;
}
-static int a3700_spi_transfer_one_full_duplex(struct spi_master *master,
+static int a3700_spi_transfer_one_full_duplex(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
u32 val;
/* Disable FIFO mode */
@@ -777,27 +777,27 @@ static int a3700_spi_transfer_one_full_duplex(struct spi_master *master,
}
- spi_finalize_current_transfer(master);
+ spi_finalize_current_transfer(host);
return 0;
}
-static int a3700_spi_transfer_one(struct spi_master *master,
+static int a3700_spi_transfer_one(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
a3700_spi_transfer_setup(spi, xfer);
if (xfer->tx_buf && xfer->rx_buf)
- return a3700_spi_transfer_one_full_duplex(master, spi, xfer);
+ return a3700_spi_transfer_one_full_duplex(host, spi, xfer);
- return a3700_spi_transfer_one_fifo(master, spi, xfer);
+ return a3700_spi_transfer_one_fifo(host, spi, xfer);
}
-static int a3700_spi_unprepare_message(struct spi_master *master,
+static int a3700_spi_unprepare_message(struct spi_controller *host,
struct spi_message *message)
{
- struct a3700_spi *a3700_spi = spi_master_get_devdata(master);
+ struct a3700_spi *a3700_spi = spi_controller_get_devdata(host);
clk_disable(a3700_spi->clk);
@@ -815,14 +815,14 @@ static int a3700_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *of_node = dev->of_node;
- struct spi_master *master;
+ struct spi_controller *host;
struct a3700_spi *spi;
u32 num_cs = 0;
int irq, ret = 0;
- master = spi_alloc_master(dev, sizeof(*spi));
- if (!master) {
- dev_err(dev, "master allocation failed\n");
+ host = spi_alloc_host(dev, sizeof(*spi));
+ if (!host) {
+ dev_err(dev, "host allocation failed\n");
ret = -ENOMEM;
goto out;
}
@@ -833,23 +833,23 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
- master->bus_num = pdev->id;
- master->dev.of_node = of_node;
- master->mode_bits = SPI_MODE_3;
- master->num_chipselect = num_cs;
- master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32);
- master->prepare_message = a3700_spi_prepare_message;
- master->transfer_one = a3700_spi_transfer_one;
- master->unprepare_message = a3700_spi_unprepare_message;
- master->set_cs = a3700_spi_set_cs;
- master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
+ host->bus_num = pdev->id;
+ host->dev.of_node = of_node;
+ host->mode_bits = SPI_MODE_3;
+ host->num_chipselect = num_cs;
+ host->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(32);
+ host->prepare_message = a3700_spi_prepare_message;
+ host->transfer_one = a3700_spi_transfer_one;
+ host->unprepare_message = a3700_spi_unprepare_message;
+ host->set_cs = a3700_spi_set_cs;
+ host->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
- platform_set_drvdata(pdev, master);
+ platform_set_drvdata(pdev, host);
- spi = spi_master_get_devdata(master);
+ spi = spi_controller_get_devdata(host);
- spi->master = master;
+ spi->host = host;
spi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(spi->base)) {
@@ -878,23 +878,23 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
- master->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
+ host->max_speed_hz = min_t(unsigned long, A3700_SPI_MAX_SPEED_HZ,
clk_get_rate(spi->clk));
- master->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
+ host->min_speed_hz = DIV_ROUND_UP(clk_get_rate(spi->clk),
A3700_SPI_MAX_PRESCALE);
a3700_spi_init(spi);
ret = devm_request_irq(dev, spi->irq, a3700_spi_interrupt, 0,
- dev_name(dev), master);
+ dev_name(dev), host);
if (ret) {
dev_err(dev, "could not request IRQ: %d\n", ret);
goto error_clk;
}
- ret = devm_spi_register_master(dev, master);
+ ret = devm_spi_register_controller(dev, host);
if (ret) {
- dev_err(dev, "Failed to register master\n");
+ dev_err(dev, "Failed to register host\n");
goto error_clk;
}
@@ -903,15 +903,15 @@ static int a3700_spi_probe(struct platform_device *pdev)
error_clk:
clk_unprepare(spi->clk);
error:
- spi_master_put(master);
+ spi_controller_put(host);
out:
return ret;
}
static int a3700_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct a3700_spi *spi = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct a3700_spi *spi = spi_controller_get_devdata(host);
clk_unprepare(spi->clk);
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index 9cd738682aab..fab9d223e24a 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -38,7 +38,7 @@
#define US_CR_TXEN BIT(6)
#define US_CR_TXDIS BIT(7)
-#define US_MR_SPI_MASTER 0x0E
+#define US_MR_SPI_HOST 0x0E
#define US_MR_CHRL GENMASK(7, 6)
#define US_MR_CPHA BIT(8)
#define US_MR_CPOL BIT(16)
@@ -61,7 +61,7 @@
#define US_OVRE_RXRDY_IRQS (US_IR_OVRE | US_IR_RXRDY)
#define US_INIT \
- (US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
+ (US_MR_SPI_HOST | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
#define US_DMA_MIN_BYTES 16
#define US_DMA_TIMEOUT (msecs_to_jiffies(1000))
@@ -104,7 +104,7 @@ struct at91_usart_spi {
static void dma_callback(void *data)
{
struct spi_controller *ctlr = data;
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
aus->current_rx_remaining_bytes = 0;
@@ -115,7 +115,7 @@ static bool at91_usart_spi_can_dma(struct spi_controller *ctrl,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctrl);
return aus->use_dma && xfer->len >= US_DMA_MIN_BYTES;
}
@@ -216,7 +216,7 @@ static void at91_usart_spi_stop_dma(struct spi_controller *ctlr)
static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr,
struct spi_transfer *xfer)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
struct dma_chan *rxchan = ctlr->dma_rx;
struct dma_chan *txchan = ctlr->dma_tx;
struct dma_async_tx_descriptor *rxdesc;
@@ -334,7 +334,7 @@ at91_usart_spi_set_xfer_speed(struct at91_usart_spi *aus,
static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
{
struct spi_controller *controller = dev_id;
- struct at91_usart_spi *aus = spi_master_get_devdata(controller);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(controller);
spin_lock(&aus->lock);
at91_usart_spi_read_status(aus);
@@ -359,7 +359,7 @@ static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
static int at91_usart_spi_setup(struct spi_device *spi)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(spi->controller);
u32 *ausd = spi->controller_state;
unsigned int mr = at91_usart_spi_readl(aus, MR);
@@ -399,7 +399,7 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
unsigned long dma_timeout = 0;
int ret = 0;
@@ -444,7 +444,7 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
struct spi_message *message)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
struct spi_device *spi = message->spi;
u32 *ausd = spi->controller_state;
@@ -458,7 +458,7 @@ static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
static int at91_usart_spi_unprepare_message(struct spi_controller *ctlr,
struct spi_message *message)
{
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
at91_usart_spi_writel(aus, IDR, US_OVRE_RXRDY_IRQS);
@@ -515,7 +515,7 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
return PTR_ERR(clk);
ret = -ENOMEM;
- controller = spi_alloc_master(&pdev->dev, sizeof(*aus));
+ controller = spi_alloc_host(&pdev->dev, sizeof(*aus));
if (!controller)
goto at91_usart_spi_probe_fail;
@@ -539,7 +539,7 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
US_MAX_CLK_DIV);
platform_set_drvdata(pdev, controller);
- aus = spi_master_get_devdata(controller);
+ aus = spi_controller_get_devdata(controller);
aus->dev = &pdev->dev;
aus->regs = devm_ioremap_resource(&pdev->dev, regs);
@@ -574,9 +574,9 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
spin_lock_init(&aus->lock);
init_completion(&aus->xfer_completion);
- ret = devm_spi_register_master(&pdev->dev, controller);
+ ret = devm_spi_register_controller(&pdev->dev, controller);
if (ret)
- goto at91_usart_fail_register_master;
+ goto at91_usart_fail_register_controller;
dev_info(&pdev->dev,
"AT91 USART SPI Controller version 0x%x at %pa (irq %d)\n",
@@ -585,19 +585,19 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
return 0;
-at91_usart_fail_register_master:
+at91_usart_fail_register_controller:
at91_usart_spi_release_dma(controller);
at91_usart_fail_dma:
clk_disable_unprepare(clk);
at91_usart_spi_probe_fail:
- spi_master_put(controller);
+ spi_controller_put(controller);
return ret;
}
__maybe_unused static int at91_usart_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
clk_disable_unprepare(aus->clk);
pinctrl_pm_select_sleep_state(dev);
@@ -608,7 +608,7 @@ __maybe_unused static int at91_usart_spi_runtime_suspend(struct device *dev)
__maybe_unused static int at91_usart_spi_runtime_resume(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
- struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctrl);
pinctrl_pm_select_default_state(dev);
@@ -633,7 +633,7 @@ __maybe_unused static int at91_usart_spi_suspend(struct device *dev)
__maybe_unused static int at91_usart_spi_resume(struct device *dev)
{
struct spi_controller *ctrl = dev_get_drvdata(dev);
- struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctrl);
int ret;
if (!pm_runtime_suspended(dev)) {
@@ -650,7 +650,7 @@ __maybe_unused static int at91_usart_spi_resume(struct device *dev)
static int at91_usart_spi_remove(struct platform_device *pdev)
{
struct spi_controller *ctlr = platform_get_drvdata(pdev);
- struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct at91_usart_spi *aus = spi_controller_get_devdata(ctlr);
at91_usart_spi_release_dma(ctlr);
clk_disable_unprepare(aus->clk);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 607e7a49fb89..795e88dbef1b 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -58,7 +58,7 @@ static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned int reg, u32 val)
static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
{
- return spi_master_get_devdata(spi->master);
+ return spi_controller_get_devdata(spi->controller);
}
static inline void ath79_spi_delay(struct ath79_spi *sp, unsigned int nsecs)
@@ -120,7 +120,7 @@ static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
else
out = ioc & ~AR71XX_SPI_IOC_DO;
- /* setup MSB (to slave) on trailing edge */
+ /* setup MSB (to target) on trailing edge */
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
ath79_spi_delay(sp, nsecs);
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
@@ -168,28 +168,28 @@ static const struct spi_controller_mem_ops ath79_mem_ops = {
static int ath79_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
+ struct spi_controller *host;
struct ath79_spi *sp;
unsigned long rate;
int ret;
- master = spi_alloc_master(&pdev->dev, sizeof(*sp));
- if (master == NULL) {
- dev_err(&pdev->dev, "failed to allocate spi master\n");
+ host = spi_alloc_host(&pdev->dev, sizeof(*sp));
+ if (host == NULL) {
+ dev_err(&pdev->dev, "failed to allocate spi host\n");
return -ENOMEM;
}
- sp = spi_master_get_devdata(master);
- master->dev.of_node = pdev->dev.of_node;
+ sp = spi_controller_get_devdata(host);
+ host->dev.of_node = pdev->dev.of_node;
platform_set_drvdata(pdev, sp);
- master->use_gpio_descriptors = true;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
- master->flags = SPI_MASTER_GPIO_SS;
- master->num_chipselect = 3;
- master->mem_ops = &ath79_mem_ops;
+ host->use_gpio_descriptors = true;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ host->flags = SPI_MASTER_GPIO_SS;
+ host->num_chipselect = 3;
+ host->mem_ops = &ath79_mem_ops;
- sp->bitbang.master = master;
+ sp->bitbang.master = host;
sp->bitbang.chipselect = ath79_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
sp->bitbang.flags = SPI_CS_HIGH;
@@ -197,18 +197,18 @@ static int ath79_spi_probe(struct platform_device *pdev)
sp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sp->base)) {
ret = PTR_ERR(sp->base);
- goto err_put_master;
+ goto err_put_host;
}
sp->clk = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(sp->clk)) {
ret = PTR_ERR(sp->clk);
- goto err_put_master;
+ goto err_put_host;
}
ret = clk_prepare_enable(sp->clk);
if (ret)
- goto err_put_master;
+ goto err_put_host;
rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
if (!rate) {
@@ -231,8 +231,8 @@ err_disable:
ath79_spi_disable(sp);
err_clk_disable:
clk_disable_unprepare(sp->clk);
-err_put_master:
- spi_master_put(sp->bitbang.master);
+err_put_host:
+ spi_controller_put(host);
return ret;
}
@@ -244,7 +244,7 @@ static int ath79_spi_remove(struct platform_device *pdev)
spi_bitbang_stop(&sp->bitbang);
ath79_spi_disable(sp);
clk_disable_unprepare(sp->clk);
- spi_master_put(sp->bitbang.master);
+ spi_controller_put(sp->bitbang.master);
return 0;
}
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index c4f22d50dba5..5c5678f065f3 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -358,7 +358,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
u32 csr;
/* Make sure clock polarity is correct */
- for (i = 0; i < spi->master->num_chipselect; i++) {
+ for (i = 0; i < spi->controller->num_chipselect; i++) {
csr = spi_readl(as, CSR0 + 4 * i);
if ((csr ^ cpol) & SPI_BIT(CPOL))
spi_writel(as, CSR0 + 4 * i,
@@ -419,11 +419,11 @@ static inline bool atmel_spi_use_dma(struct atmel_spi *as,
return as->use_dma && xfer->len >= DMA_MIN_BYTES;
}
-static bool atmel_spi_can_dma(struct spi_master *master,
+static bool atmel_spi_can_dma(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5))
return atmel_spi_use_dma(as, xfer) &&
@@ -435,7 +435,7 @@ static bool atmel_spi_can_dma(struct spi_master *master,
static int atmel_spi_dma_slave_config(struct atmel_spi *as, u8 bits_per_word)
{
- struct spi_master *master = platform_get_drvdata(as->pdev);
+ struct spi_controller *host = platform_get_drvdata(as->pdev);
struct dma_slave_config slave_config;
int err = 0;
@@ -467,21 +467,21 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as, u8 bits_per_word)
* So we'd rather write only one data at the time. Hence the transmit
* path works the same whether FIFOs are available (and enabled) or not.
*/
- if (dmaengine_slave_config(master->dma_tx, &slave_config)) {
+ if (dmaengine_slave_config(host->dma_tx, &slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure tx dma channel\n");
err = -EINVAL;
}
/*
- * This driver configures the spi controller for master mode (MSTR bit
+ * This driver configures the spi controller for host mode (MSTR bit
* set to '1' in the Mode Register).
* So according to the datasheet, when FIFOs are available (and
* enabled), the Receive FIFO operates in Single Data Mode.
* So the receive path works the same whether FIFOs are available (and
* enabled) or not.
*/
- if (dmaengine_slave_config(master->dma_rx, &slave_config)) {
+ if (dmaengine_slave_config(host->dma_rx, &slave_config)) {
dev_err(&as->pdev->dev,
"failed to configure rx dma channel\n");
err = -EINVAL;
@@ -490,22 +490,22 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as, u8 bits_per_word)
return err;
}
-static int atmel_spi_configure_dma(struct spi_master *master,
+static int atmel_spi_configure_dma(struct spi_controller *host,
struct atmel_spi *as)
{
struct device *dev = &as->pdev->dev;
int err;
- master->dma_tx = dma_request_chan(dev, "tx");
- if (IS_ERR(master->dma_tx)) {
- err = PTR_ERR(master->dma_tx);
+ host->dma_tx = dma_request_chan(dev, "tx");
+ if (IS_ERR(host->dma_tx)) {
+ err = PTR_ERR(host->dma_tx);
dev_dbg(dev, "No TX DMA channel, DMA is disabled\n");
goto error_clear;
}
- master->dma_rx = dma_request_chan(dev, "rx");
- if (IS_ERR(master->dma_rx)) {
- err = PTR_ERR(master->dma_rx);
+ host->dma_rx = dma_request_chan(dev, "rx");
+ if (IS_ERR(host->dma_rx)) {
+ err = PTR_ERR(host->dma_rx);
/*
* No reason to check EPROBE_DEFER here since we have already
* requested tx channel.
@@ -520,45 +520,45 @@ static int atmel_spi_configure_dma(struct spi_master *master,
dev_info(&as->pdev->dev,
"Using %s (tx) and %s (rx) for DMA transfers\n",
- dma_chan_name(master->dma_tx),
- dma_chan_name(master->dma_rx));
+ dma_chan_name(host->dma_tx),
+ dma_chan_name(host->dma_rx));
return 0;
error:
- if (!IS_ERR(master->dma_rx))
- dma_release_channel(master->dma_rx);
- if (!IS_ERR(master->dma_tx))
- dma_release_channel(master->dma_tx);
+ if (!IS_ERR(host->dma_rx))
+ dma_release_channel(host->dma_rx);
+ if (!IS_ERR(host->dma_tx))
+ dma_release_channel(host->dma_tx);
error_clear:
- master->dma_tx = master->dma_rx = NULL;
+ host->dma_tx = host->dma_rx = NULL;
return err;
}
-static void atmel_spi_stop_dma(struct spi_master *master)
+static void atmel_spi_stop_dma(struct spi_controller *host)
{
- if (master->dma_rx)
- dmaengine_terminate_all(master->dma_rx);
- if (master->dma_tx)
- dmaengine_terminate_all(master->dma_tx);
+ if (host->dma_rx)
+ dmaengine_terminate_all(host->dma_rx);
+ if (host->dma_tx)
+ dmaengine_terminate_all(host->dma_tx);
}
-static void atmel_spi_release_dma(struct spi_master *master)
+static void atmel_spi_release_dma(struct spi_controller *host)
{
- if (master->dma_rx) {
- dma_release_channel(master->dma_rx);
- master->dma_rx = NULL;
+ if (host->dma_rx) {
+ dma_release_channel(host->dma_rx);
+ host->dma_rx = NULL;
}
- if (master->dma_tx) {
- dma_release_channel(master->dma_tx);
- master->dma_tx = NULL;
+ if (host->dma_tx) {
+ dma_release_channel(host->dma_tx);
+ host->dma_tx = NULL;
}
}
/* This function is called by the DMA driver from tasklet context */
static void dma_callback(void *data)
{
- struct spi_master *master = data;
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = data;
+ struct atmel_spi *as = spi_controller_get_devdata(host);
if (is_vmalloc_addr(as->current_transfer->rx_buf) &&
IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
@@ -571,13 +571,13 @@ static void dma_callback(void *data)
/*
* Next transfer using PIO without FIFO.
*/
-static void atmel_spi_next_xfer_single(struct spi_master *master,
+static void atmel_spi_next_xfer_single(struct spi_controller *host,
struct spi_transfer *xfer)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
- dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
+ dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_pio\n");
/* Make sure data is not remaining in RDR */
spi_readl(as, RDR);
@@ -591,7 +591,7 @@ static void atmel_spi_next_xfer_single(struct spi_master *master,
else
spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
- dev_dbg(master->dev.parent,
+ dev_dbg(host->dev.parent,
" start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
xfer->bits_per_word);
@@ -603,10 +603,10 @@ static void atmel_spi_next_xfer_single(struct spi_master *master,
/*
* Next transfer using PIO with FIFO.
*/
-static void atmel_spi_next_xfer_fifo(struct spi_master *master,
+static void atmel_spi_next_xfer_fifo(struct spi_controller *host,
struct spi_transfer *xfer)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
u32 current_remaining_data, num_data;
u32 offset = xfer->len - as->current_remaining_bytes;
const u16 *words = (const u16 *)((u8 *)xfer->tx_buf + offset);
@@ -614,7 +614,7 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
u16 td0, td1;
u32 fifomr;
- dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_fifo\n");
+ dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_fifo\n");
/* Compute the number of data to transfer in the current iteration */
current_remaining_data = ((xfer->bits_per_word > 8) ?
@@ -658,7 +658,7 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
num_data--;
}
- dev_dbg(master->dev.parent,
+ dev_dbg(host->dev.parent,
" start fifo xfer %p: len %u tx %p rx %p bitpw %d\n",
xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
xfer->bits_per_word);
@@ -673,32 +673,32 @@ static void atmel_spi_next_xfer_fifo(struct spi_master *master,
/*
* Next transfer using PIO.
*/
-static void atmel_spi_next_xfer_pio(struct spi_master *master,
+static void atmel_spi_next_xfer_pio(struct spi_controller *host,
struct spi_transfer *xfer)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
if (as->fifo_size)
- atmel_spi_next_xfer_fifo(master, xfer);
+ atmel_spi_next_xfer_fifo(host, xfer);
else
- atmel_spi_next_xfer_single(master, xfer);
+ atmel_spi_next_xfer_single(host, xfer);
}
/*
* Submit next transfer for DMA.
*/
-static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
+static int atmel_spi_next_xfer_dma_submit(struct spi_controller *host,
struct spi_transfer *xfer,
u32 *plen)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
- struct dma_chan *rxchan = master->dma_rx;
- struct dma_chan *txchan = master->dma_tx;
+ struct atmel_spi *as = spi_controller_get_devdata(host);
+ struct dma_chan *rxchan = host->dma_rx;
+ struct dma_chan *txchan = host->dma_tx;
struct dma_async_tx_descriptor *rxdesc;
struct dma_async_tx_descriptor *txdesc;
dma_cookie_t cookie;
- dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
+ dev_vdbg(host->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
/* Check that the channels are available */
if (!rxchan || !txchan)
@@ -749,7 +749,7 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
if (!txdesc)
goto err_dma;
- dev_dbg(master->dev.parent,
+ dev_dbg(host->dev.parent,
" start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
xfer->rx_buf, (unsigned long long)xfer->rx_dma);
@@ -759,7 +759,7 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
/* Put the callback on the RX transfer only, that should finish last */
rxdesc->callback = dma_callback;
- rxdesc->callback_param = master;
+ rxdesc->callback_param = host;
/* Submit and fire RX and TX with TX last so we're ready to read! */
cookie = rxdesc->tx_submit(rxdesc);
@@ -775,12 +775,12 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
err_dma:
spi_writel(as, IDR, SPI_BIT(OVRES));
- atmel_spi_stop_dma(master);
+ atmel_spi_stop_dma(host);
err_exit:
return -ENOMEM;
}
-static void atmel_spi_next_xfer_data(struct spi_master *master,
+static void atmel_spi_next_xfer_data(struct spi_controller *host,
struct spi_transfer *xfer,
dma_addr_t *tx_dma,
dma_addr_t *rx_dma,
@@ -788,8 +788,8 @@ static void atmel_spi_next_xfer_data(struct spi_master *master,
{
*rx_dma = xfer->rx_dma + xfer->len - *plen;
*tx_dma = xfer->tx_dma + xfer->len - *plen;
- if (*plen > master->max_dma_len)
- *plen = master->max_dma_len;
+ if (*plen > host->max_dma_len)
+ *plen = host->max_dma_len;
}
static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
@@ -844,17 +844,17 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
* Submit next transfer for PDC.
* lock is held, spi irq is blocked
*/
-static void atmel_spi_pdc_next_xfer(struct spi_master *master,
+static void atmel_spi_pdc_next_xfer(struct spi_controller *host,
struct spi_transfer *xfer)
{
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
u32 len;
dma_addr_t tx_dma, rx_dma;
spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
len = as->current_remaining_bytes;
- atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ atmel_spi_next_xfer_data(host, xfer, &tx_dma, &rx_dma, &len);
as->current_remaining_bytes -= len;
spi_writel(as, RPR, rx_dma);
@@ -865,7 +865,7 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, RCR, len);
spi_writel(as, TCR, len);
- dev_dbg(&master->dev,
+ dev_dbg(&host->dev,
" start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
@@ -873,7 +873,7 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
if (as->current_remaining_bytes) {
len = as->current_remaining_bytes;
- atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ atmel_spi_next_xfer_data(host, xfer, &tx_dma, &rx_dma, &len);
as->current_remaining_bytes -= len;
spi_writel(as, RNPR, rx_dma);
@@ -884,7 +884,7 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, RNCR, len);
spi_writel(as, TNCR, len);
- dev_dbg(&master->dev,
+ dev_dbg(&host->dev,
" next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
xfer, xfer->len, xfer->tx_buf,
(unsigned long long)xfer->tx_dma, xfer->rx_buf,
@@ -944,14 +944,14 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
return 0;
}
-static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
+static void atmel_spi_dma_unmap_xfer(struct spi_controller *host,
struct spi_transfer *xfer)
{
if (xfer->tx_dma != INVALID_DMA_ADDRESS)
- dma_unmap_single(master->dev.parent, xfer->tx_dma,
+ dma_unmap_single(host->dev.parent, xfer->tx_dma,
xfer->len, DMA_TO_DEVICE);
if (xfer->rx_dma != INVALID_DMA_ADDRESS)
- dma_unmap_single(master->dev.parent, xfer->rx_dma,
+ dma_unmap_single(host->dev.parent, xfer->rx_dma,
xfer->len, DMA_FROM_DEVICE);
}
@@ -1039,8 +1039,8 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
static irqreturn_t
atmel_spi_pio_interrupt(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_id;
+ struct atmel_spi *as = spi_controller_get_devdata(host);
u32 status, pending, imr;
struct spi_transfer *xfer;
int ret = IRQ_NONE;
@@ -1052,7 +1052,7 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
if (pending & SPI_BIT(OVRES)) {
ret = IRQ_HANDLED;
spi_writel(as, IDR, SPI_BIT(OVRES));
- dev_warn(master->dev.parent, "overrun\n");
+ dev_warn(host->dev.parent, "overrun\n");
/*
* When we get an overrun, we disregard the current
@@ -1097,8 +1097,8 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
static irqreturn_t
atmel_spi_pdc_interrupt(int irq, void *dev_id)
{
- struct spi_master *master = dev_id;
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_id;
+ struct atmel_spi *as = spi_controller_get_devdata(host);
u32 status, pending, imr;
int ret = IRQ_NONE;
@@ -1152,12 +1152,12 @@ static int atmel_word_delay_csr(struct spi_device *spi, struct atmel_spi *as)
static void initialize_native_cs_for_gpio(struct atmel_spi *as)
{
int i;
- struct spi_master *master = platform_get_drvdata(as->pdev);
+ struct spi_controller *host = platform_get_drvdata(as->pdev);
if (!as->native_cs_free)
return; /* already initialized */
- if (!master->cs_gpiods)
+ if (!host->cs_gpiods)
return; /* No CS GPIO */
/*
@@ -1170,7 +1170,7 @@ static void initialize_native_cs_for_gpio(struct atmel_spi *as)
i = 1;
for (; i < 4; i++)
- if (master->cs_gpiods[i])
+ if (host->cs_gpiods[i])
as->native_cs_free |= BIT(i);
if (as->native_cs_free)
@@ -1186,7 +1186,7 @@ static int atmel_spi_setup(struct spi_device *spi)
int chip_select;
int word_delay_csr;
- as = spi_master_get_devdata(spi->master);
+ as = spi_controller_get_devdata(spi->controller);
/* see notes above re chipselect */
if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH)) {
@@ -1254,7 +1254,7 @@ static int atmel_spi_setup(struct spi_device *spi)
static void atmel_spi_set_cs(struct spi_device *spi, bool enable)
{
- struct atmel_spi *as = spi_master_get_devdata(spi->master);
+ struct atmel_spi *as = spi_controller_get_devdata(spi->controller);
/* the core doesn't really pass us enable/disable, but CS HIGH vs CS LOW
* since we already have routines for activate/deactivate translate
* high/low to active/inactive
@@ -1269,7 +1269,7 @@ static void atmel_spi_set_cs(struct spi_device *spi, bool enable)
}
-static int atmel_spi_one_transfer(struct spi_master *master,
+static int atmel_spi_one_transfer(struct spi_controller *host,
struct spi_device *spi,
struct spi_transfer *xfer)
{
@@ -1281,7 +1281,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
int ret;
unsigned long dma_timeout;
- as = spi_master_get_devdata(master);
+ as = spi_controller_get_devdata(host);
asd = spi->controller_state;
bits = (asd->csr >> 4) & 0xf;
@@ -1295,7 +1295,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
* DMA map early, for performance (empties dcache ASAP) and
* better fault reporting.
*/
- if ((!master->cur_msg->is_dma_mapped)
+ if ((!host->cur_msg->is_dma_mapped)
&& as->use_pdc) {
if (atmel_spi_dma_map_xfer(as, xfer) < 0)
return -ENOMEM;
@@ -1311,11 +1311,11 @@ static int atmel_spi_one_transfer(struct spi_master *master,
if (as->use_pdc) {
atmel_spi_lock(as);
- atmel_spi_pdc_next_xfer(master, xfer);
+ atmel_spi_pdc_next_xfer(host, xfer);
atmel_spi_unlock(as);
} else if (atmel_spi_use_dma(as, xfer)) {
len = as->current_remaining_bytes;
- ret = atmel_spi_next_xfer_dma_submit(master,
+ ret = atmel_spi_next_xfer_dma_submit(host,
xfer, &len);
if (ret) {
dev_err(&spi->dev,
@@ -1329,7 +1329,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
}
} else {
atmel_spi_lock(as);
- atmel_spi_next_xfer_pio(master, xfer);
+ atmel_spi_next_xfer_pio(host, xfer);
atmel_spi_unlock(as);
}
@@ -1346,7 +1346,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
if (as->done_status) {
if (as->use_pdc) {
- dev_warn(master->dev.parent,
+ dev_warn(host->dev.parent,
"overrun (%u/%u remaining)\n",
spi_readl(as, TCR), spi_readl(as, RCR));
@@ -1362,7 +1362,7 @@ static int atmel_spi_one_transfer(struct spi_master *master,
if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
break;
if (!timeout)
- dev_warn(master->dev.parent,
+ dev_warn(host->dev.parent,
"timeout waiting for TXEMPTY");
while (spi_readl(as, SR) & SPI_BIT(RDRF))
spi_readl(as, RDR);
@@ -1371,13 +1371,13 @@ static int atmel_spi_one_transfer(struct spi_master *master,
spi_readl(as, SR);
} else if (atmel_spi_use_dma(as, xfer)) {
- atmel_spi_stop_dma(master);
+ atmel_spi_stop_dma(host);
}
}
- if (!master->cur_msg->is_dma_mapped
+ if (!host->cur_msg->is_dma_mapped
&& as->use_pdc)
- atmel_spi_dma_unmap_xfer(master, xfer);
+ atmel_spi_dma_unmap_xfer(host, xfer);
if (as->use_pdc)
atmel_spi_disable_pdc_transfer(as);
@@ -1440,7 +1440,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
int irq;
struct clk *clk;
int ret;
- struct spi_master *master;
+ struct spi_controller *host;
struct atmel_spi *as;
/* Select default pin state */
@@ -1459,29 +1459,29 @@ static int atmel_spi_probe(struct platform_device *pdev)
return PTR_ERR(clk);
/* setup spi core then atmel-specific driver state */
- master = spi_alloc_master(&pdev->dev, sizeof(*as));
- if (!master)
+ host = spi_alloc_host(&pdev->dev, sizeof(*as));
+ if (!host)
return -ENOMEM;
/* the spi->mode bits understood by this driver: */
- master->use_gpio_descriptors = true;
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
- master->dev.of_node = pdev->dev.of_node;
- master->bus_num = pdev->id;
- master->num_chipselect = 4;
- master->setup = atmel_spi_setup;
- master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX |
+ host->use_gpio_descriptors = true;
+ host->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ host->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
+ host->dev.of_node = pdev->dev.of_node;
+ host->bus_num = pdev->id;
+ host->num_chipselect = 4;
+ host->setup = atmel_spi_setup;
+ host->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX |
SPI_MASTER_GPIO_SS);
- master->transfer_one = atmel_spi_one_transfer;
- master->set_cs = atmel_spi_set_cs;
- master->cleanup = atmel_spi_cleanup;
- master->auto_runtime_pm = true;
- master->max_dma_len = SPI_MAX_DMA_XFER;
- master->can_dma = atmel_spi_can_dma;
- platform_set_drvdata(pdev, master);
+ host->transfer_one = atmel_spi_one_transfer;
+ host->set_cs = atmel_spi_set_cs;
+ host->cleanup = atmel_spi_cleanup;
+ host->auto_runtime_pm = true;
+ host->max_dma_len = SPI_MAX_DMA_XFER;
+ host->can_dma = atmel_spi_can_dma;
+ platform_set_drvdata(pdev, host);
- as = spi_master_get_devdata(master);
+ as = spi_controller_get_devdata(host);
spin_lock_init(&as->lock);
@@ -1502,7 +1502,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {
- ret = atmel_spi_configure_dma(master, as);
+ ret = atmel_spi_configure_dma(host, as);
if (ret == 0) {
as->use_dma = true;
} else if (ret == -EPROBE_DEFER) {
@@ -1532,7 +1532,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
}
}
if (!as->use_dma)
- dev_info(master->dev.parent,
+ dev_info(host->dev.parent,
" can not allocate dma coherent memory\n");
}
@@ -1541,10 +1541,10 @@ static int atmel_spi_probe(struct platform_device *pdev)
if (as->use_pdc) {
ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
- 0, dev_name(&pdev->dev), master);
+ 0, dev_name(&pdev->dev), host);
} else {
ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
- 0, dev_name(&pdev->dev), master);
+ 0, dev_name(&pdev->dev), host);
}
if (ret)
goto out_unmap_regs;
@@ -1569,7 +1569,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- ret = devm_spi_register_master(&pdev->dev, master);
+ ret = devm_spi_register_controller(&pdev->dev, host);
if (ret)
goto out_free_dma;
@@ -1585,28 +1585,28 @@ out_free_dma:
pm_runtime_set_suspended(&pdev->dev);
if (as->use_dma)
- atmel_spi_release_dma(master);
+ atmel_spi_release_dma(host);
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
clk_disable_unprepare(clk);
out_free_irq:
out_unmap_regs:
- spi_master_put(master);
+ spi_controller_put(host);
return ret;
}
static int atmel_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = platform_get_drvdata(pdev);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
pm_runtime_get_sync(&pdev->dev);
/* reset the hardware and block queue progress */
if (as->use_dma) {
- atmel_spi_stop_dma(master);
- atmel_spi_release_dma(master);
+ atmel_spi_stop_dma(host);
+ atmel_spi_release_dma(host);
if (IS_ENABLED(CONFIG_SOC_SAM_V4_V5)) {
dma_free_coherent(&pdev->dev, SPI_MAX_DMA_XFER,
as->addr_tx_bbuf,
@@ -1633,8 +1633,8 @@ static int atmel_spi_remove(struct platform_device *pdev)
static int atmel_spi_runtime_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
clk_disable_unprepare(as->clk);
pinctrl_pm_select_sleep_state(dev);
@@ -1644,8 +1644,8 @@ static int atmel_spi_runtime_suspend(struct device *dev)
static int atmel_spi_runtime_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
pinctrl_pm_select_default_state(dev);
@@ -1654,11 +1654,11 @@ static int atmel_spi_runtime_resume(struct device *dev)
static int atmel_spi_suspend(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_controller *host = dev_get_drvdata(dev);
int ret;
/* Stop the queue running */
- ret = spi_master_suspend(master);
+ ret = spi_controller_suspend(host);
if (ret)
return ret;
@@ -1670,8 +1670,8 @@ static int atmel_spi_suspend(struct device *dev)
static int atmel_spi_resume(struct device *dev)
{
- struct spi_master *master = dev_get_drvdata(dev);
- struct atmel_spi *as = spi_master_get_devdata(master);
+ struct spi_controller *host = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_controller_get_devdata(host);
int ret;
ret = clk_prepare_enable(as->clk);
@@ -1689,7 +1689,7 @@ static int atmel_spi_resume(struct device *dev)
}
/* Start the queue running */
- return spi_master_resume(master);
+ return spi_controller_resume(host);
}
static const struct dev_pm_ops atmel_spi_pm_ops = {
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index b871fd810d80..cd0a6478f5e7 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -20,6 +20,8 @@
#include <linux/spi/spi.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/mtd/spi-nor.h>
#include <linux/reset.h>
#include <linux/pm_runtime.h>
@@ -57,6 +59,7 @@
#define PINGPONG_CMD_SS_SHIFT 12
#define HSSPI_PINGPONG_STATUS_REG(x) (0x84 + (x) * 0x40)
+#define HSSPI_PINGPONG_STATUS_SRC_BUSY BIT(1)
#define HSSPI_PROFILE_CLK_CTRL_REG(x) (0x100 + (x) * 0x20)
#define CLK_CTRL_FREQ_CTRL_MASK 0x0000ffff
@@ -92,15 +95,42 @@
#define HSSPI_MAX_PREPEND_LEN 15
-#define HSSPI_MAX_SYNC_CLOCK 30000000
+/*
+ * Some chip require 30MHz but other require 25MHz. Use smaller value to cover
+ * both cases.
+ */
+#define HSSPI_MAX_SYNC_CLOCK 25000000
#define HSSPI_SPI_MAX_CS 8
#define HSSPI_BUS_NUM 1 /* 0 is legacy SPI */
+#define HSSPI_POLL_STATUS_TIMEOUT_MS 100
+
+#define HSSPI_WAIT_MODE_POLLING 0
+#define HSSPI_WAIT_MODE_INTR 1
+#define HSSPI_WAIT_MODE_MAX HSSPI_WAIT_MODE_INTR
+
+/*
+ * Default transfer mode is auto. If the msg is prependable, use the prepend
+ * mode. If not, falls back to use the dummy cs workaround mode but limit the
+ * clock to 25MHz to make sure it works in all board design.
+ */
+#define HSSPI_XFER_MODE_AUTO 0
+#define HSSPI_XFER_MODE_PREPEND 1
+#define HSSPI_XFER_MODE_DUMMYCS 2
+#define HSSPI_XFER_MODE_MAX HSSPI_XFER_MODE_DUMMYCS
+
+#define bcm63xx_prepend_printk_on_checkfail(bs, fmt, ...) \
+do { \
+ if (bs->xfer_mode == HSSPI_XFER_MODE_AUTO) \
+ dev_dbg(&bs->pdev->dev, fmt, ##__VA_ARGS__); \
+ else if (bs->xfer_mode == HSSPI_XFER_MODE_PREPEND) \
+ dev_err(&bs->pdev->dev, fmt, ##__VA_ARGS__); \
+} while (0)
struct bcm63xx_hsspi {
struct completion done;
struct mutex bus_mutex;
-
+ struct mutex msg_mutex;
struct platform_device *pdev;
struct clk *clk;
struct clk *pll_clk;
@@ -109,8 +139,289 @@ struct bcm63xx_hsspi {
u32 speed_hz;
u8 cs_polarity;
+ u32 wait_mode;
+ u32 xfer_mode;
+ u32 prepend_cnt;
+ u8 *prepend_buf;
+};
+
+static ssize_t wait_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+
+ return sprintf(buf, "%d\n", bs->wait_mode);
+}
+
+static ssize_t wait_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+ u32 val;
+
+ if (kstrtou32(buf, 10, &val))
+ return -EINVAL;
+
+ if (val > HSSPI_WAIT_MODE_MAX) {
+ dev_warn(dev, "invalid wait mode %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bs->msg_mutex);
+ bs->wait_mode = val;
+ /* clear interrupt status to avoid spurious int on next transfer */
+ if (val == HSSPI_WAIT_MODE_INTR)
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+ mutex_unlock(&bs->msg_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(wait_mode);
+
+static ssize_t xfer_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+
+ return sprintf(buf, "%d\n", bs->xfer_mode);
+}
+
+static ssize_t xfer_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(ctrl);
+ u32 val;
+
+ if (kstrtou32(buf, 10, &val))
+ return -EINVAL;
+
+ if (val > HSSPI_XFER_MODE_MAX) {
+ dev_warn(dev, "invalid xfer mode %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bs->msg_mutex);
+ bs->xfer_mode = val;
+ mutex_unlock(&bs->msg_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(xfer_mode);
+
+static struct attribute *bcm63xx_hsspi_attrs[] = {
+ &dev_attr_wait_mode.attr,
+ &dev_attr_xfer_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group bcm63xx_hsspi_group = {
+ .attrs = bcm63xx_hsspi_attrs,
};
+static void bcm63xx_hsspi_set_clk(struct bcm63xx_hsspi *bs,
+ struct spi_device *spi, int hz);
+
+static size_t bcm63xx_hsspi_max_message_size(struct spi_device *spi)
+{
+ return HSSPI_BUFFER_LEN - HSSPI_OPCODE_LEN;
+}
+
+static int bcm63xx_hsspi_wait_cmd(struct bcm63xx_hsspi *bs)
+{
+ unsigned long limit;
+ u32 reg = 0;
+ int rc = 0;
+
+ if (bs->wait_mode == HSSPI_WAIT_MODE_INTR) {
+ if (wait_for_completion_timeout(&bs->done, HZ) == 0)
+ rc = 1;
+ } else {
+ /* polling mode checks for status busy bit */
+ limit = jiffies + msecs_to_jiffies(HSSPI_POLL_STATUS_TIMEOUT_MS);
+
+ while (!time_after(jiffies, limit)) {
+ reg = __raw_readl(bs->regs + HSSPI_PINGPONG_STATUS_REG(0));
+ if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
+ cpu_relax();
+ else
+ break;
+ }
+ if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
+ rc = 1;
+ }
+
+ if (rc)
+ dev_err(&bs->pdev->dev, "transfer timed out!\n");
+
+ return rc;
+}
+
+static bool bcm63xx_prepare_prepend_transfer(struct spi_master *master,
+ struct spi_message *msg,
+ struct spi_transfer *t_prepend)
+{
+
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ bool tx_only = false;
+ struct spi_transfer *t;
+
+ /*
+ * Multiple transfers within a message may be combined into one transfer
+ * to the controller using its prepend feature. A SPI message is prependable
+ * only if the following are all true:
+ * 1. One or more half duplex write transfer in single bit mode
+ * 2. Optional full duplex read/write at the end
+ * 3. No delay and cs_change between transfers
+ */
+ bs->prepend_cnt = 0;
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ if ((spi_delay_to_ns(&t->delay, t) > 0) || t->cs_change) {
+ bcm63xx_prepend_printk_on_checkfail(bs,
+ "Delay or cs change not supported in prepend mode!\n");
+ return false;
+ }
+
+ tx_only = false;
+ if (t->tx_buf && !t->rx_buf) {
+ tx_only = true;
+ if (bs->prepend_cnt + t->len >
+ (HSSPI_BUFFER_LEN - HSSPI_OPCODE_LEN)) {
+ bcm63xx_prepend_printk_on_checkfail(bs,
+ "exceed max buf len, abort prepending transfers!\n");
+ return false;
+ }
+
+ if (t->tx_nbits > SPI_NBITS_SINGLE &&
+ !list_is_last(&t->transfer_list, &msg->transfers)) {
+ bcm63xx_prepend_printk_on_checkfail(bs,
+ "multi-bit prepend buf not supported!\n");
+ return false;
+ }
+
+ if (t->tx_nbits == SPI_NBITS_SINGLE) {
+ memcpy(bs->prepend_buf + bs->prepend_cnt, t->tx_buf, t->len);
+ bs->prepend_cnt += t->len;
+ }
+ } else {
+ if (!list_is_last(&t->transfer_list, &msg->transfers)) {
+ bcm63xx_prepend_printk_on_checkfail(bs,
+ "rx/tx_rx transfer not supported when it is not last one!\n");
+ return false;
+ }
+ }
+
+ if (list_is_last(&t->transfer_list, &msg->transfers)) {
+ memcpy(t_prepend, t, sizeof(struct spi_transfer));
+
+ if (tx_only && t->tx_nbits == SPI_NBITS_SINGLE) {
+ /*
+ * if the last one is also a single bit tx only transfer, merge
+ * all of them into one single tx transfer
+ */
+ t_prepend->len = bs->prepend_cnt;
+ t_prepend->tx_buf = bs->prepend_buf;
+ bs->prepend_cnt = 0;
+ } else {
+ /*
+ * if the last one is not a tx only transfer or dual tx xfer, all
+ * the previous transfers are sent through prepend bytes and
+ * make sure it does not exceed the max prepend len
+ */
+ if (bs->prepend_cnt > HSSPI_MAX_PREPEND_LEN) {
+ bcm63xx_prepend_printk_on_checkfail(bs,
+ "exceed max prepend len, abort prepending transfers!\n");
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+static int bcm63xx_hsspi_do_prepend_txrx(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ unsigned int chip_select = spi->chip_select;
+ u16 opcode = 0, val;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+ u32 reg = 0;
+
+ /*
+ * shouldn't happen as we set the max_message_size in the probe.
+ * but check it again in case some driver does not honor the max size
+ */
+ if (t->len + bs->prepend_cnt > (HSSPI_BUFFER_LEN - HSSPI_OPCODE_LEN)) {
+ dev_warn(&bs->pdev->dev,
+ "Prepend message large than fifo size len %d prepend %d\n",
+ t->len, bs->prepend_cnt);
+ return -EINVAL;
+ }
+
+ bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
+
+ if (tx && rx)
+ opcode = HSSPI_OP_READ_WRITE;
+ else if (tx)
+ opcode = HSSPI_OP_WRITE;
+ else if (rx)
+ opcode = HSSPI_OP_READ;
+
+ if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
+ (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
+ opcode |= HSSPI_OP_MULTIBIT;
+
+ if (t->rx_nbits == SPI_NBITS_DUAL) {
+ reg |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
+ reg |= bs->prepend_cnt << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT;
+ }
+ if (t->tx_nbits == SPI_NBITS_DUAL) {
+ reg |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
+ reg |= bs->prepend_cnt << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT;
+ }
+ }
+
+ reg |= bs->prepend_cnt << MODE_CTRL_PREPENDBYTE_CNT_SHIFT;
+ __raw_writel(reg | 0xff,
+ bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+
+ reinit_completion(&bs->done);
+ if (bs->prepend_cnt)
+ memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, bs->prepend_buf,
+ bs->prepend_cnt);
+ if (tx)
+ memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN + bs->prepend_cnt, tx,
+ t->len);
+
+ *(__be16 *)(&val) = cpu_to_be16(opcode | t->len);
+ __raw_writew(val, bs->fifo);
+ /* enable interrupt */
+ if (bs->wait_mode == HSSPI_WAIT_MODE_INTR)
+ __raw_writel(HSSPI_PINGx_CMD_DONE(0), bs->regs + HSSPI_INT_MASK_REG);
+
+ /* start the transfer */
+ reg = chip_select << PINGPONG_CMD_SS_SHIFT |
+ chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+ PINGPONG_COMMAND_START_NOW;
+ __raw_writel(reg, bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+
+ if (bcm63xx_hsspi_wait_cmd(bs))
+ return -ETIMEDOUT;
+
+ if (rx)
+ memcpy_fromio(rx, bs->fifo, t->len);
+
+ return 0;
+}
+
static void bcm63xx_hsspi_set_cs(struct bcm63xx_hsspi *bs, unsigned int cs,
bool active)
{
@@ -158,14 +469,16 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
{
struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
unsigned int chip_select = spi->chip_select;
- u16 opcode = 0;
+ u16 opcode = 0, val;
int pending = t->len;
int step_size = HSSPI_BUFFER_LEN;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
+ u32 reg = 0;
bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
- bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+ if (!t->cs_off)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
if (tx && rx)
opcode = HSSPI_OP_READ_WRITE;
@@ -178,11 +491,16 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
step_size -= HSSPI_OPCODE_LEN;
if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
- (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL))
+ (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
opcode |= HSSPI_OP_MULTIBIT;
- __raw_writel(1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT |
- 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT | 0xff,
+ if (t->rx_nbits == SPI_NBITS_DUAL)
+ reg |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
+ if (t->tx_nbits == SPI_NBITS_DUAL)
+ reg |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
+ }
+
+ __raw_writel(reg | 0xff,
bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
while (pending > 0) {
@@ -194,22 +512,21 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
tx += curr_step;
}
- __raw_writew(opcode | curr_step, bs->fifo);
+ *(__be16 *)(&val) = cpu_to_be16(opcode | curr_step);
+ __raw_writew(val, bs->fifo);
/* enable interrupt */
- __raw_writel(HSSPI_PINGx_CMD_DONE(0),
- bs->regs + HSSPI_INT_MASK_REG);
+ if (bs->wait_mode == HSSPI_WAIT_MODE_INTR)
+ __raw_writel(HSSPI_PINGx_CMD_DONE(0),
+ bs->regs + HSSPI_INT_MASK_REG);
- /* start the transfer */
- __raw_writel(!chip_select << PINGPONG_CMD_SS_SHIFT |
- chip_select << PINGPONG_CMD_PROFILE_SHIFT |
- PINGPONG_COMMAND_START_NOW,
- bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+ reg = !chip_select << PINGPONG_CMD_SS_SHIFT |
+ chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+ PINGPONG_COMMAND_START_NOW;
+ __raw_writel(reg, bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
- if (wait_for_completion_timeout(&bs->done, HZ) == 0) {
- dev_err(&bs->pdev->dev, "transfer timed out!\n");
+ if (bcm63xx_hsspi_wait_cmd(bs))
return -ETIMEDOUT;
- }
if (rx) {
memcpy_fromio(rx, bs->fifo, curr_step);
@@ -259,17 +576,17 @@ static int bcm63xx_hsspi_setup(struct spi_device *spi)
return 0;
}
-static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
+static int bcm63xx_hsspi_do_dummy_cs_txrx(struct spi_device *spi,
struct spi_message *msg)
{
- struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
- struct spi_transfer *t;
- struct spi_device *spi = msg->spi;
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
int status = -EINVAL;
int dummy_cs;
- u32 reg;
+ bool keep_cs = false;
+ struct spi_transfer *t;
- /* This controller does not support keeping CS active during idle.
+ /*
+ * This controller does not support keeping CS active during idle.
* To work around this, we use the following ugly hack:
*
* a. Invert the target chip select's polarity so it will be active.
@@ -287,6 +604,21 @@ static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
bcm63xx_hsspi_set_cs(bs, dummy_cs, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
+ /*
+ * We are here because one of reasons below:
+ * a. Message is not prependable and in default auto xfer mode. This mean
+ * we fallback to dummy cs mode at maximum 25MHz safe clock rate.
+ * b. User set to use the dummy cs mode.
+ */
+ if (bs->xfer_mode == HSSPI_XFER_MODE_AUTO) {
+ if (t->speed_hz > HSSPI_MAX_SYNC_CLOCK) {
+ t->speed_hz = HSSPI_MAX_SYNC_CLOCK;
+ dev_warn_once(&bs->pdev->dev,
+ "Force to dummy cs mode. Reduce the speed to %dHz",
+ t->speed_hz);
+ }
+ }
+
status = bcm63xx_hsspi_do_txrx(spi, t);
if (status)
break;
@@ -295,23 +627,85 @@ static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
spi_transfer_delay_exec(t);
- if (t->cs_change)
- bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+ /* use existing cs change logic from spi_transfer_one_message */
+ if (t->cs_change) {
+ if (list_is_last(&t->transfer_list, &msg->transfers)) {
+ keep_cs = true;
+ } else {
+ if (!t->cs_off)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+
+ spi_transfer_cs_change_delay_exec(msg, t);
+
+ if (!list_next_entry(t, transfer_list)->cs_off)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+ }
+ } else if (!list_is_last(&t->transfer_list, &msg->transfers) &&
+ t->cs_off != list_next_entry(t, transfer_list)->cs_off) {
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, t->cs_off);
+ }
}
- mutex_lock(&bs->bus_mutex);
- reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
- reg &= ~GLOBAL_CTRL_CS_POLARITY_MASK;
- reg |= bs->cs_polarity;
- __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
- mutex_unlock(&bs->bus_mutex);
+ bcm63xx_hsspi_set_cs(bs, dummy_cs, false);
+ if (status || !keep_cs)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+
+ return status;
+}
+static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ int status = -EINVAL;
+ bool prependable = false;
+ struct spi_transfer t_prepend;
+
+ mutex_lock(&bs->msg_mutex);
+
+ if (bs->xfer_mode != HSSPI_XFER_MODE_DUMMYCS)
+ prependable = bcm63xx_prepare_prepend_transfer(master, msg, &t_prepend);
+
+ if (prependable) {
+ status = bcm63xx_hsspi_do_prepend_txrx(spi, &t_prepend);
+ msg->actual_length = (t_prepend.len + bs->prepend_cnt);
+ } else {
+ if (bs->xfer_mode == HSSPI_XFER_MODE_PREPEND) {
+ dev_err(&bs->pdev->dev,
+ "User sets prepend mode but msg not prependable! Abort transfer\n");
+ status = -EINVAL;
+ } else
+ status = bcm63xx_hsspi_do_dummy_cs_txrx(spi, msg);
+ }
+
+ mutex_unlock(&bs->msg_mutex);
msg->status = status;
spi_finalize_current_message(master);
return 0;
}
+static bool bcm63xx_hsspi_mem_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ if (!spi_mem_default_supports_op(mem, op))
+ return false;
+
+ /* Controller doesn't support spi mem dual io mode */
+ if ((op->cmd.opcode == SPINOR_OP_READ_1_2_2) ||
+ (op->cmd.opcode == SPINOR_OP_READ_1_2_2_4B) ||
+ (op->cmd.opcode == SPINOR_OP_READ_1_2_2_DTR) ||
+ (op->cmd.opcode == SPINOR_OP_READ_1_2_2_DTR_4B))
+ return false;
+
+ return true;
+}
+
+static const struct spi_controller_mem_ops bcm63xx_hsspi_mem_ops = {
+ .supports_op = bcm63xx_hsspi_mem_supports_op,
+};
+
static irqreturn_t bcm63xx_hsspi_interrupt(int irq, void *dev_id)
{
struct bcm63xx_hsspi *bs = (struct bcm63xx_hsspi *)dev_id;
@@ -398,10 +792,18 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
bs->regs = regs;
bs->speed_hz = rate;
bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
+ bs->wait_mode = HSSPI_WAIT_MODE_POLLING;
+ bs->prepend_buf = devm_kzalloc(dev, HSSPI_BUFFER_LEN, GFP_KERNEL);
+ if (!bs->prepend_buf) {
+ ret = -ENOMEM;
+ goto out_put_master;
+ }
mutex_init(&bs->bus_mutex);
+ mutex_init(&bs->msg_mutex);
init_completion(&bs->done);
+ master->mem_ops = &bcm63xx_hsspi_mem_ops;
master->dev.of_node = dev->of_node;
if (!dev->of_node)
master->bus_num = HSSPI_BUS_NUM;
@@ -415,6 +817,9 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
master->num_chipselect = num_cs;
master->setup = bcm63xx_hsspi_setup;
master->transfer_one_message = bcm63xx_hsspi_transfer_one;
+ master->max_transfer_size = bcm63xx_hsspi_max_message_size;
+ master->max_message_size = bcm63xx_hsspi_max_message_size;
+
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
SPI_RX_DUAL | SPI_TX_DUAL;
master->bits_per_word_mask = SPI_BPW_MASK(8);
@@ -434,21 +839,33 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
__raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
bs->regs + HSSPI_GLOBAL_CTRL_REG);
- ret = devm_request_irq(dev, irq, bcm63xx_hsspi_interrupt, IRQF_SHARED,
- pdev->name, bs);
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, bcm63xx_hsspi_interrupt, IRQF_SHARED,
+ pdev->name, bs);
- if (ret)
- goto out_put_master;
+ if (ret)
+ goto out_put_master;
+ }
pm_runtime_enable(&pdev->dev);
+ ret = sysfs_create_group(&pdev->dev.kobj, &bcm63xx_hsspi_group);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't register sysfs group\n");
+ goto out_pm_disable;
+ }
+
/* register and we are done */
ret = devm_spi_register_master(dev, master);
if (ret)
- goto out_pm_disable;
+ goto out_sysgroup_disable;
+
+ dev_info(dev, "Broadcom 63XX High Speed SPI Controller driver");
return 0;
+out_sysgroup_disable:
+ sysfs_remove_group(&pdev->dev.kobj, &bcm63xx_hsspi_group);
out_pm_disable:
pm_runtime_disable(&pdev->dev);
out_put_master:
@@ -470,6 +887,7 @@ static int bcm63xx_hsspi_remove(struct platform_device *pdev)
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
+ sysfs_remove_group(&pdev->dev.kobj, &bcm63xx_hsspi_group);
return 0;
}
@@ -516,6 +934,7 @@ static SIMPLE_DEV_PM_OPS(bcm63xx_hsspi_pm_ops, bcm63xx_hsspi_suspend,
static const struct of_device_id bcm63xx_hsspi_of_match[] = {
{ .compatible = "brcm,bcm6328-hsspi", },
+ { .compatible = "brcm,bcmbca-hsspi-v1.0", },
{ },
};
MODULE_DEVICE_TABLE(of, bcm63xx_hsspi_of_match);
diff --git a/drivers/spi/spi-bcmbca-hsspi.c b/drivers/spi/spi-bcmbca-hsspi.c
new file mode 100644
index 000000000000..3f9e6131ad86
--- /dev/null
+++ b/drivers/spi/spi-bcmbca-hsspi.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Broadcom BCMBCA High Speed SPI Controller driver
+ *
+ * Copyright 2000-2010 Broadcom Corporation
+ * Copyright 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ * Copyright 2019-2022 Broadcom Ltd
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/pm_runtime.h>
+
+#define HSSPI_GLOBAL_CTRL_REG 0x0
+#define GLOBAL_CTRL_CS_POLARITY_SHIFT 0
+#define GLOBAL_CTRL_CS_POLARITY_MASK 0x000000ff
+#define GLOBAL_CTRL_PLL_CLK_CTRL_SHIFT 8
+#define GLOBAL_CTRL_PLL_CLK_CTRL_MASK 0x0000ff00
+#define GLOBAL_CTRL_CLK_GATE_SSOFF BIT(16)
+#define GLOBAL_CTRL_CLK_POLARITY BIT(17)
+#define GLOBAL_CTRL_MOSI_IDLE BIT(18)
+
+#define HSSPI_GLOBAL_EXT_TRIGGER_REG 0x4
+
+#define HSSPI_INT_STATUS_REG 0x8
+#define HSSPI_INT_STATUS_MASKED_REG 0xc
+#define HSSPI_INT_MASK_REG 0x10
+
+#define HSSPI_PINGx_CMD_DONE(i) BIT((i * 8) + 0)
+#define HSSPI_PINGx_RX_OVER(i) BIT((i * 8) + 1)
+#define HSSPI_PINGx_TX_UNDER(i) BIT((i * 8) + 2)
+#define HSSPI_PINGx_POLL_TIMEOUT(i) BIT((i * 8) + 3)
+#define HSSPI_PINGx_CTRL_INVAL(i) BIT((i * 8) + 4)
+
+#define HSSPI_INT_CLEAR_ALL 0xff001f1f
+
+#define HSSPI_PINGPONG_COMMAND_REG(x) (0x80 + (x) * 0x40)
+#define PINGPONG_CMD_COMMAND_MASK 0xf
+#define PINGPONG_COMMAND_NOOP 0
+#define PINGPONG_COMMAND_START_NOW 1
+#define PINGPONG_COMMAND_START_TRIGGER 2
+#define PINGPONG_COMMAND_HALT 3
+#define PINGPONG_COMMAND_FLUSH 4
+#define PINGPONG_CMD_PROFILE_SHIFT 8
+#define PINGPONG_CMD_SS_SHIFT 12
+
+#define HSSPI_PINGPONG_STATUS_REG(x) (0x84 + (x) * 0x40)
+#define HSSPI_PINGPONG_STATUS_SRC_BUSY BIT(1)
+
+#define HSSPI_PROFILE_CLK_CTRL_REG(x) (0x100 + (x) * 0x20)
+#define CLK_CTRL_FREQ_CTRL_MASK 0x0000ffff
+#define CLK_CTRL_SPI_CLK_2X_SEL BIT(14)
+#define CLK_CTRL_ACCUM_RST_ON_LOOP BIT(15)
+#define CLK_CTRL_CLK_POLARITY BIT(16)
+
+#define HSSPI_PROFILE_SIGNAL_CTRL_REG(x) (0x104 + (x) * 0x20)
+#define SIGNAL_CTRL_LATCH_RISING BIT(12)
+#define SIGNAL_CTRL_LAUNCH_RISING BIT(13)
+#define SIGNAL_CTRL_ASYNC_INPUT_PATH BIT(16)
+
+#define HSSPI_PROFILE_MODE_CTRL_REG(x) (0x108 + (x) * 0x20)
+#define MODE_CTRL_MULTIDATA_RD_STRT_SHIFT 8
+#define MODE_CTRL_MULTIDATA_WR_STRT_SHIFT 12
+#define MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT 16
+#define MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT 18
+#define MODE_CTRL_MODE_3WIRE BIT(20)
+#define MODE_CTRL_PREPENDBYTE_CNT_SHIFT 24
+
+#define HSSPI_FIFO_REG(x) (0x200 + (x) * 0x200)
+
+#define HSSPI_OP_MULTIBIT BIT(11)
+#define HSSPI_OP_CODE_SHIFT 13
+#define HSSPI_OP_SLEEP (0 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ_WRITE (1 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_WRITE (2 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ (3 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_SETIRQ (4 << HSSPI_OP_CODE_SHIFT)
+
+#define HSSPI_BUFFER_LEN 512
+#define HSSPI_OPCODE_LEN 2
+
+#define HSSPI_MAX_PREPEND_LEN 15
+
+#define HSSPI_MAX_SYNC_CLOCK 30000000
+
+#define HSSPI_SPI_MAX_CS 8
+#define HSSPI_BUS_NUM 1 /* 0 is legacy SPI */
+#define HSSPI_POLL_STATUS_TIMEOUT_MS 100
+
+#define HSSPI_WAIT_MODE_POLLING 0
+#define HSSPI_WAIT_MODE_INTR 1
+#define HSSPI_WAIT_MODE_MAX HSSPI_WAIT_MODE_INTR
+
+#define SPIM_CTRL_CS_OVERRIDE_SEL_SHIFT 0
+#define SPIM_CTRL_CS_OVERRIDE_SEL_MASK 0xff
+#define SPIM_CTRL_CS_OVERRIDE_VAL_SHIFT 8
+#define SPIM_CTRL_CS_OVERRIDE_VAL_MASK 0xff
+
+struct bcmbca_hsspi {
+ struct completion done;
+ struct mutex bus_mutex;
+ struct mutex msg_mutex;
+ struct platform_device *pdev;
+ struct clk *clk;
+ struct clk *pll_clk;
+ void __iomem *regs;
+ void __iomem *spim_ctrl;
+ u8 __iomem *fifo;
+ u32 speed_hz;
+ u8 cs_polarity;
+ u32 wait_mode;
+};
+
+static ssize_t wait_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(ctrl);
+
+ return sprintf(buf, "%d\n", bs->wait_mode);
+}
+
+static ssize_t wait_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct spi_controller *ctrl = dev_get_drvdata(dev);
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(ctrl);
+ u32 val;
+
+ if (kstrtou32(buf, 10, &val))
+ return -EINVAL;
+
+ if (val > HSSPI_WAIT_MODE_MAX) {
+ dev_warn(dev, "invalid wait mode %u\n", val);
+ return -EINVAL;
+ }
+
+ mutex_lock(&bs->msg_mutex);
+ bs->wait_mode = val;
+ /* clear interrupt status to avoid spurious int on next transfer */
+ if (val == HSSPI_WAIT_MODE_INTR)
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+ mutex_unlock(&bs->msg_mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(wait_mode);
+
+static struct attribute *bcmbca_hsspi_attrs[] = {
+ &dev_attr_wait_mode.attr,
+ NULL,
+};
+
+static const struct attribute_group bcmbca_hsspi_group = {
+ .attrs = bcmbca_hsspi_attrs,
+};
+
+static void bcmbca_hsspi_set_cs(struct bcmbca_hsspi *bs, unsigned int cs,
+ bool active)
+{
+ u32 reg;
+
+ /* No cs orerriden needed for SS7 internal cs on pcm based voice dev */
+ if (cs == 7)
+ return;
+
+ mutex_lock(&bs->bus_mutex);
+
+ reg = __raw_readl(bs->spim_ctrl);
+ if (active)
+ reg |= BIT(cs + SPIM_CTRL_CS_OVERRIDE_SEL_SHIFT);
+ else
+ reg &= ~BIT(cs + SPIM_CTRL_CS_OVERRIDE_SEL_SHIFT);
+
+ __raw_writel(reg, bs->spim_ctrl);
+
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static void bcmbca_hsspi_set_clk(struct bcmbca_hsspi *bs,
+ struct spi_device *spi, int hz)
+{
+ unsigned int profile = spi->chip_select;
+ u32 reg;
+
+ reg = DIV_ROUND_UP(2048, DIV_ROUND_UP(bs->speed_hz, hz));
+ __raw_writel(CLK_CTRL_ACCUM_RST_ON_LOOP | reg,
+ bs->regs + HSSPI_PROFILE_CLK_CTRL_REG(profile));
+
+ reg = __raw_readl(bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+ if (hz > HSSPI_MAX_SYNC_CLOCK)
+ reg |= SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ else
+ reg &= ~SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ __raw_writel(reg, bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+
+ mutex_lock(&bs->bus_mutex);
+ /* setup clock polarity */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CLK_POLARITY;
+ if (spi->mode & SPI_CPOL)
+ reg |= GLOBAL_CTRL_CLK_POLARITY;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static int bcmbca_hsspi_wait_cmd(struct bcmbca_hsspi *bs, unsigned int cs)
+{
+ unsigned long limit;
+ u32 reg = 0;
+ int rc = 0;
+
+ if (bs->wait_mode == HSSPI_WAIT_MODE_INTR) {
+ if (wait_for_completion_timeout(&bs->done, HZ) == 0)
+ rc = 1;
+ } else {
+ limit = jiffies + msecs_to_jiffies(HSSPI_POLL_STATUS_TIMEOUT_MS);
+
+ while (!time_after(jiffies, limit)) {
+ reg = __raw_readl(bs->regs + HSSPI_PINGPONG_STATUS_REG(0));
+ if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
+ cpu_relax();
+ else
+ break;
+ }
+ if (reg & HSSPI_PINGPONG_STATUS_SRC_BUSY)
+ rc = 1;
+ }
+
+ if (rc)
+ dev_err(&bs->pdev->dev, "transfer timed out!\n");
+
+ return rc;
+}
+
+static int bcmbca_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t,
+ struct spi_message *msg)
+{
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(spi->master);
+ unsigned int chip_select = spi->chip_select;
+ u16 opcode = 0, val;
+ int pending = t->len;
+ int step_size = HSSPI_BUFFER_LEN;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+ u32 reg = 0, cs_act = 0;
+
+ bcmbca_hsspi_set_clk(bs, spi, t->speed_hz);
+
+ if (tx && rx)
+ opcode = HSSPI_OP_READ_WRITE;
+ else if (tx)
+ opcode = HSSPI_OP_WRITE;
+ else if (rx)
+ opcode = HSSPI_OP_READ;
+
+ if (opcode != HSSPI_OP_READ)
+ step_size -= HSSPI_OPCODE_LEN;
+
+ if ((opcode == HSSPI_OP_READ && t->rx_nbits == SPI_NBITS_DUAL) ||
+ (opcode == HSSPI_OP_WRITE && t->tx_nbits == SPI_NBITS_DUAL)) {
+ opcode |= HSSPI_OP_MULTIBIT;
+
+ if (t->rx_nbits == SPI_NBITS_DUAL)
+ reg |= 1 << MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT;
+ if (t->tx_nbits == SPI_NBITS_DUAL)
+ reg |= 1 << MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT;
+ }
+
+ __raw_writel(reg | 0xff,
+ bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+
+ while (pending > 0) {
+ int curr_step = min_t(int, step_size, pending);
+
+ reinit_completion(&bs->done);
+ if (tx) {
+ memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
+ tx += curr_step;
+ }
+
+ *(__be16 *)(&val) = cpu_to_be16(opcode | curr_step);
+ __raw_writew(val, bs->fifo);
+
+ /* enable interrupt */
+ if (bs->wait_mode == HSSPI_WAIT_MODE_INTR)
+ __raw_writel(HSSPI_PINGx_CMD_DONE(0),
+ bs->regs + HSSPI_INT_MASK_REG);
+
+ if (!cs_act) {
+ /* must apply cs signal as close as the cmd starts */
+ bcmbca_hsspi_set_cs(bs, chip_select, true);
+ cs_act = 1;
+ }
+
+ reg = chip_select << PINGPONG_CMD_SS_SHIFT |
+ chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+ PINGPONG_COMMAND_START_NOW;
+ __raw_writel(reg, bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+
+ if (bcmbca_hsspi_wait_cmd(bs, spi->chip_select))
+ return -ETIMEDOUT;
+
+ pending -= curr_step;
+
+ if (rx) {
+ memcpy_fromio(rx, bs->fifo, curr_step);
+ rx += curr_step;
+ }
+ }
+
+ return 0;
+}
+
+static int bcmbca_hsspi_setup(struct spi_device *spi)
+{
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = __raw_readl(bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+ reg &= ~(SIGNAL_CTRL_LAUNCH_RISING | SIGNAL_CTRL_LATCH_RISING);
+ if (spi->mode & SPI_CPHA)
+ reg |= SIGNAL_CTRL_LAUNCH_RISING;
+ else
+ reg |= SIGNAL_CTRL_LATCH_RISING;
+ __raw_writel(reg, bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ if (spi->mode & SPI_CS_HIGH)
+ reg |= BIT(spi->chip_select);
+ else
+ reg &= ~BIT(spi->chip_select);
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ if (spi->mode & SPI_CS_HIGH)
+ bs->cs_polarity |= BIT(spi->chip_select);
+ else
+ bs->cs_polarity &= ~BIT(spi->chip_select);
+
+ reg = __raw_readl(bs->spim_ctrl);
+ reg &= ~BIT(spi->chip_select + SPIM_CTRL_CS_OVERRIDE_VAL_SHIFT);
+ if (spi->mode & SPI_CS_HIGH)
+ reg |= BIT(spi->chip_select + SPIM_CTRL_CS_OVERRIDE_VAL_SHIFT);
+ __raw_writel(reg, bs->spim_ctrl);
+
+ mutex_unlock(&bs->bus_mutex);
+
+ return 0;
+}
+
+static int bcmbca_hsspi_transfer_one(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ struct spi_device *spi = msg->spi;
+ int status = -EINVAL;
+ bool keep_cs = false;
+
+ mutex_lock(&bs->msg_mutex);
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ status = bcmbca_hsspi_do_txrx(spi, t, msg);
+ if (status)
+ break;
+
+ spi_transfer_delay_exec(t);
+
+ if (t->cs_change) {
+ if (list_is_last(&t->transfer_list, &msg->transfers)) {
+ keep_cs = true;
+ } else {
+ if (!t->cs_off)
+ bcmbca_hsspi_set_cs(bs, spi->chip_select, false);
+
+ spi_transfer_cs_change_delay_exec(msg, t);
+
+ if (!list_next_entry(t, transfer_list)->cs_off)
+ bcmbca_hsspi_set_cs(bs, spi->chip_select, true);
+ }
+ } else if (!list_is_last(&t->transfer_list, &msg->transfers) &&
+ t->cs_off != list_next_entry(t, transfer_list)->cs_off) {
+ bcmbca_hsspi_set_cs(bs, spi->chip_select, t->cs_off);
+ }
+
+ msg->actual_length += t->len;
+ }
+
+ mutex_unlock(&bs->msg_mutex);
+
+ if (status || !keep_cs)
+ bcmbca_hsspi_set_cs(bs, spi->chip_select, false);
+
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static irqreturn_t bcmbca_hsspi_interrupt(int irq, void *dev_id)
+{
+ struct bcmbca_hsspi *bs = (struct bcmbca_hsspi *)dev_id;
+
+ if (__raw_readl(bs->regs + HSSPI_INT_STATUS_MASKED_REG) == 0)
+ return IRQ_NONE;
+
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ complete(&bs->done);
+
+ return IRQ_HANDLED;
+}
+
+static int bcmbca_hsspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcmbca_hsspi *bs;
+ struct resource *res_mem;
+ void __iomem *spim_ctrl;
+ void __iomem *regs;
+ struct device *dev = &pdev->dev;
+ struct clk *clk, *pll_clk = NULL;
+ int irq, ret;
+ u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hsspi");
+ if (!res_mem)
+ return -EINVAL;
+ regs = devm_ioremap_resource(dev, res_mem);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "spim-ctrl");
+ if (!res_mem)
+ return -EINVAL;
+ spim_ctrl = devm_ioremap_resource(dev, res_mem);
+ if (IS_ERR(spim_ctrl))
+ return PTR_ERR(spim_ctrl);
+
+ clk = devm_clk_get(dev, "hsspi");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ rate = clk_get_rate(clk);
+ if (!rate) {
+ pll_clk = devm_clk_get(dev, "pll");
+
+ if (IS_ERR(pll_clk)) {
+ ret = PTR_ERR(pll_clk);
+ goto out_disable_clk;
+ }
+
+ ret = clk_prepare_enable(pll_clk);
+ if (ret)
+ goto out_disable_clk;
+
+ rate = clk_get_rate(pll_clk);
+ if (!rate) {
+ ret = -EINVAL;
+ goto out_disable_pll_clk;
+ }
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ ret = -ENOMEM;
+ goto out_disable_pll_clk;
+ }
+
+ bs = spi_master_get_devdata(master);
+ bs->pdev = pdev;
+ bs->clk = clk;
+ bs->pll_clk = pll_clk;
+ bs->regs = regs;
+ bs->spim_ctrl = spim_ctrl;
+ bs->speed_hz = rate;
+ bs->fifo = (u8 __iomem *) (bs->regs + HSSPI_FIFO_REG(0));
+ bs->wait_mode = HSSPI_WAIT_MODE_POLLING;
+
+ mutex_init(&bs->bus_mutex);
+ mutex_init(&bs->msg_mutex);
+ init_completion(&bs->done);
+
+ master->dev.of_node = dev->of_node;
+ if (!dev->of_node)
+ master->bus_num = HSSPI_BUS_NUM;
+
+ of_property_read_u32(dev->of_node, "num-cs", &num_cs);
+ if (num_cs > 8) {
+ dev_warn(dev, "unsupported number of cs (%i), reducing to 8\n",
+ num_cs);
+ num_cs = HSSPI_SPI_MAX_CS;
+ }
+ master->num_chipselect = num_cs;
+ master->setup = bcmbca_hsspi_setup;
+ master->transfer_one_message = bcmbca_hsspi_transfer_one;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH |
+ SPI_RX_DUAL | SPI_TX_DUAL;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ /* Initialize the hardware */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ /* clean up any pending interrupts */
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+
+ /* read out default CS polarities */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ bs->cs_polarity = reg & GLOBAL_CTRL_CS_POLARITY_MASK;
+ __raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
+ bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, bcmbca_hsspi_interrupt, IRQF_SHARED,
+ pdev->name, bs);
+ if (ret)
+ goto out_put_master;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't register sysfs group\n");
+ goto out_pm_disable;
+ }
+
+ /* register and we are done */
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto out_sysgroup_disable;
+
+ dev_info(dev, "Broadcom BCMBCA High Speed SPI Controller driver");
+
+ return 0;
+
+out_sysgroup_disable:
+ sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
+out_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+out_put_master:
+ spi_master_put(master);
+out_disable_pll_clk:
+ clk_disable_unprepare(pll_clk);
+out_disable_clk:
+ clk_disable_unprepare(clk);
+ return ret;
+}
+
+static int bcmbca_hsspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+
+ /* reset the hardware and block queue progress */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+ clk_disable_unprepare(bs->pll_clk);
+ clk_disable_unprepare(bs->clk);
+ sysfs_remove_group(&pdev->dev.kobj, &bcmbca_hsspi_group);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcmbca_hsspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+ clk_disable_unprepare(bs->pll_clk);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static int bcmbca_hsspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcmbca_hsspi *bs = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
+
+ if (bs->pll_clk) {
+ ret = clk_prepare_enable(bs->pll_clk);
+ if (ret) {
+ clk_disable_unprepare(bs->clk);
+ return ret;
+ }
+ }
+
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bcmbca_hsspi_pm_ops, bcmbca_hsspi_suspend,
+ bcmbca_hsspi_resume);
+
+static const struct of_device_id bcmbca_hsspi_of_match[] = {
+ { .compatible = "brcm,bcmbca-hsspi-v1.1", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, bcmbca_hsspi_of_match);
+
+static struct platform_driver bcmbca_hsspi_driver = {
+ .driver = {
+ .name = "bcmbca-hsspi",
+ .pm = &bcmbca_hsspi_pm_ops,
+ .of_match_table = bcmbca_hsspi_of_match,
+ },
+ .probe = bcmbca_hsspi_probe,
+ .remove = bcmbca_hsspi_remove,
+};
+
+module_platform_driver(bcmbca_hsspi_driver);
+
+MODULE_ALIAS("platform:bcmbca_hsspi");
+MODULE_DESCRIPTION("Broadcom BCMBCA High Speed SPI Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 676313e1bdad..2954c06a7f57 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -84,6 +84,7 @@ struct cqspi_st {
u32 trigger_address;
u32 wr_delay;
bool use_direct_mode;
+ bool use_direct_mode_wr;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
bool use_dma_read;
u32 pd_dev_id;
@@ -531,6 +532,17 @@ static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
/* 0 means 1 byte. */
reg |= (((n_rx - 1) & CQSPI_REG_CMDCTRL_RD_BYTES_MASK)
<< CQSPI_REG_CMDCTRL_RD_BYTES_LSB);
+
+ /* setup ADDR BIT field */
+ if (op->addr.nbytes) {
+ reg |= (0x1 << CQSPI_REG_CMDCTRL_ADDR_EN_LSB);
+ reg |= ((op->addr.nbytes - 1) &
+ CQSPI_REG_CMDCTRL_ADD_BYTES_MASK)
+ << CQSPI_REG_CMDCTRL_ADD_BYTES_LSB;
+
+ writel(op->addr.val, reg_base + CQSPI_REG_CMDADDRESS);
+ }
+
status = cqspi_exec_flash_cmd(cqspi, reg);
if (status)
return status;
@@ -549,6 +561,9 @@ static int cqspi_command_read(struct cqspi_flash_pdata *f_pdata,
memcpy(rxbuf, &reg, read_len);
}
+ /* Reset CMD_CTRL Reg once command read completes */
+ writel(0, reg_base + CQSPI_REG_CMDCTRL);
+
return 0;
}
@@ -613,7 +628,12 @@ static int cqspi_command_write(struct cqspi_flash_pdata *f_pdata,
}
}
- return cqspi_exec_flash_cmd(cqspi, reg);
+ ret = cqspi_exec_flash_cmd(cqspi, reg);
+
+ /* Reset CMD_CTRL Reg once command write completes */
+ writel(0, reg_base + CQSPI_REG_CMDCTRL);
+
+ return ret;
}
static int cqspi_read_setup(struct cqspi_flash_pdata *f_pdata,
@@ -937,6 +957,12 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL;
writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL);
+ /*
+ * DAC mode require auto polling as flash needs to be polled
+ * for write completion in case of bubble in SPI transaction
+ * due to slow CPU/DMA master.
+ */
+ cqspi->use_direct_mode_wr = false;
}
reg = readl(reg_base + CQSPI_REG_SIZE);
@@ -1222,7 +1248,7 @@ static ssize_t cqspi_write(struct cqspi_flash_pdata *f_pdata,
* data.
*/
if (!op->cmd.dtr && cqspi->use_direct_mode &&
- ((to + len) <= cqspi->ahb_size)) {
+ cqspi->use_direct_mode_wr && ((to + len) <= cqspi->ahb_size)) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
return cqspi_wait_idle(cqspi);
}
@@ -1333,7 +1359,13 @@ static int cqspi_mem_process(struct spi_mem *mem, const struct spi_mem_op *op)
cqspi_configure(f_pdata, mem->spi->max_speed_hz);
if (op->data.dir == SPI_MEM_DATA_IN && op->data.buf.in) {
- if (!op->addr.nbytes)
+ /*
+ * Performing reads in DAC mode forces to read minimum 4 bytes
+ * which is unsupported on some flash devices during register
+ * reads, prefer STIG mode for such small reads.
+ */
+ if (!op->addr.nbytes ||
+ op->data.nbytes <= CQSPI_STIG_DATA_LEN_MAX)
return cqspi_command_read(f_pdata, op);
return cqspi_read(f_pdata, op);
@@ -1692,8 +1724,10 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->master_ref_clk_hz);
if (ddata->hwcaps_mask & CQSPI_SUPPORTS_OCTAL)
master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
- if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE))
+ if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE)) {
cqspi->use_direct_mode = true;
+ cqspi->use_direct_mode_wr = true;
+ }
if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
cqspi->use_dma_read = true;
if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
index 99edddf9958b..c3bfb6c84cab 100644
--- a/drivers/spi/spi-dw-core.c
+++ b/drivers/spi/spi-dw-core.c
@@ -366,7 +366,7 @@ static void dw_spi_irq_setup(struct dw_spi *dws)
* will be adjusted at the final stage of the IRQ-based SPI transfer
* execution so not to lose the leftover of the incoming data.
*/
- level = min_t(u16, dws->fifo_len / 2, dws->tx_len);
+ level = min_t(unsigned int, dws->fifo_len / 2, dws->tx_len);
dw_writel(dws, DW_SPI_TXFTLR, level);
dw_writel(dws, DW_SPI_RXFTLR, level - 1);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 4e83cc5b445d..babb039bcb43 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -12,7 +12,7 @@
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/pm_runtime.h>
-#include <linux/qcom-geni-se.h>
+#include <linux/soc/qcom/geni-se.h>
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
@@ -87,6 +87,8 @@ struct spi_geni_master {
struct completion cs_done;
struct completion cancel_done;
struct completion abort_done;
+ struct completion tx_reset_done;
+ struct completion rx_reset_done;
unsigned int oversampling;
spinlock_t lock;
int irq;
@@ -95,6 +97,8 @@ struct spi_geni_master {
struct dma_chan *tx;
struct dma_chan *rx;
int cur_xfer_mode;
+ dma_addr_t tx_se_dma;
+ dma_addr_t rx_se_dma;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -129,23 +133,27 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
return ret;
}
-static void handle_fifo_timeout(struct spi_master *spi,
+static void handle_se_timeout(struct spi_master *spi,
struct spi_message *msg)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
unsigned long time_left;
struct geni_se *se = &mas->se;
+ const struct spi_transfer *xfer;
spin_lock_irq(&mas->lock);
reinit_completion(&mas->cancel_done);
- writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ if (mas->cur_xfer_mode == GENI_SE_FIFO)
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+
+ xfer = mas->cur_xfer;
mas->cur_xfer = NULL;
geni_se_cancel_m_cmd(se);
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
if (time_left)
- return;
+ goto unmap_if_dma;
spin_lock_irq(&mas->lock);
reinit_completion(&mas->abort_done);
@@ -162,6 +170,39 @@ static void handle_fifo_timeout(struct spi_master *spi,
*/
mas->abort_failed = true;
}
+
+unmap_if_dma:
+ if (mas->cur_xfer_mode == GENI_SE_DMA) {
+ if (xfer) {
+ if (xfer->tx_buf && mas->tx_se_dma) {
+ spin_lock_irq(&mas->lock);
+ reinit_completion(&mas->tx_reset_done);
+ writel(1, se->base + SE_DMA_TX_FSM_RST);
+ spin_unlock_irq(&mas->lock);
+ time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
+ if (!time_left)
+ dev_err(mas->dev, "DMA TX RESET failed\n");
+ geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
+ }
+ if (xfer->rx_buf && mas->rx_se_dma) {
+ spin_lock_irq(&mas->lock);
+ reinit_completion(&mas->rx_reset_done);
+ writel(1, se->base + SE_DMA_RX_FSM_RST);
+ spin_unlock_irq(&mas->lock);
+ time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
+ if (!time_left)
+ dev_err(mas->dev, "DMA RX RESET failed\n");
+ geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
+ }
+ } else {
+ /*
+ * This can happen if a timeout happened and we had to wait
+ * for lock in this function because isr was holding the lock
+ * and handling transfer completion at that time.
+ */
+ dev_warn(mas->dev, "Cancel/Abort on completed SPI transfer\n");
+ }
+ }
}
static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
@@ -178,7 +219,8 @@ static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
switch (mas->cur_xfer_mode) {
case GENI_SE_FIFO:
- handle_fifo_timeout(spi, msg);
+ case GENI_SE_DMA:
+ handle_se_timeout(spi, msg);
break;
case GENI_GPI_DMA:
handle_gpi_timeout(spi, msg);
@@ -250,6 +292,8 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
}
mas->cs_flag = set_flag;
+ /* set xfer_mode to FIFO to complete cs_done in isr */
+ mas->cur_xfer_mode = GENI_SE_FIFO;
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -260,7 +304,7 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
if (!time_left) {
dev_warn(mas->dev, "Timeout setting chip select\n");
- handle_fifo_timeout(spi, NULL);
+ handle_se_timeout(spi, NULL);
}
exit:
@@ -482,8 +526,12 @@ static bool geni_can_dma(struct spi_controller *ctlr,
{
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
- /* check if dma is supported */
- return mas->cur_xfer_mode != GENI_SE_FIFO;
+ /*
+ * Return true if transfer needs to be mapped prior to
+ * calling transfer_one which is the case only for GPI_DMA.
+ * For SE_DMA mode, map/unmap is done in geni_se_*x_dma_prep.
+ */
+ return mas->cur_xfer_mode == GENI_GPI_DMA;
}
static int spi_geni_prepare_message(struct spi_master *spi,
@@ -494,6 +542,7 @@ static int spi_geni_prepare_message(struct spi_master *spi,
switch (mas->cur_xfer_mode) {
case GENI_SE_FIFO:
+ case GENI_SE_DMA:
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
ret = setup_fifo_params(spi_msg->spi, spi);
@@ -597,7 +646,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
break;
}
/*
- * in case of failure to get dma channel, we can still do the
+ * in case of failure to get gpi dma channel, we can still do the
* FIFO mode, so fallthrough
*/
dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
@@ -716,12 +765,12 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
mas->rx_rem_bytes -= rx_bytes;
}
-static void setup_fifo_xfer(struct spi_transfer *xfer,
+static int setup_se_xfer(struct spi_transfer *xfer,
struct spi_geni_master *mas,
u16 mode, struct spi_master *spi)
{
u32 m_cmd = 0;
- u32 len;
+ u32 len, fifo_size;
struct geni_se *se = &mas->se;
int ret;
@@ -748,7 +797,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
/* Speed and bits per word can be overridden per transfer */
ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
if (ret)
- return;
+ return ret;
mas->tx_rem_bytes = 0;
mas->rx_rem_bytes = 0;
@@ -772,17 +821,50 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
mas->rx_rem_bytes = xfer->len;
}
+ /* Select transfer mode based on transfer length */
+ fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
+ mas->cur_xfer_mode = (len <= fifo_size) ? GENI_SE_FIFO : GENI_SE_DMA;
+ geni_se_select_mode(se, mas->cur_xfer_mode);
+
/*
* Lock around right before we start the transfer since our
* interrupt could come in at any time now.
*/
spin_lock_irq(&mas->lock);
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
- if (m_cmd & SPI_TX_ONLY) {
+
+ if (mas->cur_xfer_mode == GENI_SE_DMA) {
+ if (m_cmd & SPI_RX_ONLY) {
+ ret = geni_se_rx_dma_prep(se, xfer->rx_buf,
+ xfer->len, &mas->rx_se_dma);
+ if (ret) {
+ dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
+ mas->rx_se_dma = 0;
+ goto unlock_and_return;
+ }
+ }
+ if (m_cmd & SPI_TX_ONLY) {
+ ret = geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
+ xfer->len, &mas->tx_se_dma);
+ if (ret) {
+ dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
+ mas->tx_se_dma = 0;
+ if (m_cmd & SPI_RX_ONLY) {
+ /* Unmap rx buffer if duplex transfer */
+ geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
+ mas->rx_se_dma = 0;
+ }
+ goto unlock_and_return;
+ }
+ }
+ } else if (m_cmd & SPI_TX_ONLY) {
if (geni_spi_handle_tx(mas))
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
}
+
+unlock_and_return:
spin_unlock_irq(&mas->lock);
+ return ret;
}
static int spi_geni_transfer_one(struct spi_master *spi,
@@ -790,6 +872,7 @@ static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_transfer *xfer)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
if (spi_geni_is_abort_still_pending(mas))
return -EBUSY;
@@ -798,9 +881,12 @@ static int spi_geni_transfer_one(struct spi_master *spi,
if (!xfer->len)
return 0;
- if (mas->cur_xfer_mode == GENI_SE_FIFO) {
- setup_fifo_xfer(xfer, mas, slv->mode, spi);
- return 1;
+ if (mas->cur_xfer_mode == GENI_SE_FIFO || mas->cur_xfer_mode == GENI_SE_DMA) {
+ ret = setup_se_xfer(xfer, mas, slv->mode, spi);
+ /* SPI framework expects +ve ret code to wait for transfer complete */
+ if (!ret)
+ ret = 1;
+ return ret;
}
return setup_gsi_xfer(xfer, mas, slv, spi);
}
@@ -823,39 +909,70 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
spin_lock(&mas->lock);
- if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
- geni_spi_handle_rx(mas);
-
- if (m_irq & M_TX_FIFO_WATERMARK_EN)
- geni_spi_handle_tx(mas);
-
- if (m_irq & M_CMD_DONE_EN) {
- if (mas->cur_xfer) {
+ if (mas->cur_xfer_mode == GENI_SE_FIFO) {
+ if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
+ geni_spi_handle_rx(mas);
+
+ if (m_irq & M_TX_FIFO_WATERMARK_EN)
+ geni_spi_handle_tx(mas);
+
+ if (m_irq & M_CMD_DONE_EN) {
+ if (mas->cur_xfer) {
+ spi_finalize_current_transfer(spi);
+ mas->cur_xfer = NULL;
+ /*
+ * If this happens, then a CMD_DONE came before all the
+ * Tx buffer bytes were sent out. This is unusual, log
+ * this condition and disable the WM interrupt to
+ * prevent the system from stalling due an interrupt
+ * storm.
+ *
+ * If this happens when all Rx bytes haven't been
+ * received, log the condition. The only known time
+ * this can happen is if bits_per_word != 8 and some
+ * registers that expect xfer lengths in num spi_words
+ * weren't written correctly.
+ */
+ if (mas->tx_rem_bytes) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
+ mas->tx_rem_bytes, mas->cur_bits_per_word);
+ }
+ if (mas->rx_rem_bytes)
+ dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
+ mas->rx_rem_bytes, mas->cur_bits_per_word);
+ } else {
+ complete(&mas->cs_done);
+ }
+ }
+ } else if (mas->cur_xfer_mode == GENI_SE_DMA) {
+ const struct spi_transfer *xfer = mas->cur_xfer;
+ u32 dma_tx_status = readl_relaxed(se->base + SE_DMA_TX_IRQ_STAT);
+ u32 dma_rx_status = readl_relaxed(se->base + SE_DMA_RX_IRQ_STAT);
+
+ if (dma_tx_status)
+ writel(dma_tx_status, se->base + SE_DMA_TX_IRQ_CLR);
+ if (dma_rx_status)
+ writel(dma_rx_status, se->base + SE_DMA_RX_IRQ_CLR);
+ if (dma_tx_status & TX_DMA_DONE)
+ mas->tx_rem_bytes = 0;
+ if (dma_rx_status & RX_DMA_DONE)
+ mas->rx_rem_bytes = 0;
+ if (dma_tx_status & TX_RESET_DONE)
+ complete(&mas->tx_reset_done);
+ if (dma_rx_status & RX_RESET_DONE)
+ complete(&mas->rx_reset_done);
+ if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
+ if (xfer->tx_buf && mas->tx_se_dma) {
+ geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
+ mas->tx_se_dma = 0;
+ }
+ if (xfer->rx_buf && mas->rx_se_dma) {
+ geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
+ mas->rx_se_dma = 0;
+ }
spi_finalize_current_transfer(spi);
mas->cur_xfer = NULL;
- /*
- * If this happens, then a CMD_DONE came before all the
- * Tx buffer bytes were sent out. This is unusual, log
- * this condition and disable the WM interrupt to
- * prevent the system from stalling due an interrupt
- * storm.
- *
- * If this happens when all Rx bytes haven't been
- * received, log the condition. The only known time
- * this can happen is if bits_per_word != 8 and some
- * registers that expect xfer lengths in num spi_words
- * weren't written correctly.
- */
- if (mas->tx_rem_bytes) {
- writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
- dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
- mas->tx_rem_bytes, mas->cur_bits_per_word);
- }
- if (mas->rx_rem_bytes)
- dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
- mas->rx_rem_bytes, mas->cur_bits_per_word);
- } else {
- complete(&mas->cs_done);
}
}
@@ -949,6 +1066,8 @@ static int spi_geni_probe(struct platform_device *pdev)
init_completion(&mas->cs_done);
init_completion(&mas->cancel_done);
init_completion(&mas->abort_done);
+ init_completion(&mas->tx_reset_done);
+ init_completion(&mas->rx_reset_done);
spin_lock_init(&mas->lock);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index f0d532ea40e8..4d69e320d018 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -60,12 +60,12 @@ static int intel_spi_pci_probe(struct pci_dev *pdev,
}
static const struct pci_device_id intel_spi_pci_ids[] = {
- { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x02a4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x38a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info },
@@ -75,11 +75,14 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x9d24), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0x9da4), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info },
{ PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
- { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
{ },
};
MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c
index f619212b0d5c..f4679868c49f 100644
--- a/drivers/spi/spi-intel.c
+++ b/drivers/spi/spi-intel.c
@@ -104,7 +104,7 @@
#define BXT_PR 0x84
#define BXT_SSFSTS_CTL 0xa0
#define BXT_FREG_NUM 12
-#define BXT_PR_NUM 6
+#define BXT_PR_NUM 5
#define CNL_PR 0x84
#define CNL_FREG_NUM 6
@@ -1368,14 +1368,14 @@ static int intel_spi_populate_chip(struct intel_spi *ispi)
if (!spi_new_device(ispi->master, &chip))
return -ENODEV;
- /* Add the second chip if present */
- if (ispi->master->num_chipselect < 2)
- return 0;
-
ret = intel_spi_read_desc(ispi);
if (ret)
return ret;
+ /* Add the second chip if present */
+ if (ispi->master->num_chipselect < 2)
+ return 0;
+
chip.platform_data = NULL;
chip.chip_select = 1;
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index dd7de8fa37d0..313106eb8d40 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -71,6 +71,11 @@ module_param(check_ranges, int, 0644);
MODULE_PARM_DESC(check_ranges,
"checks rx_buffer pattern are valid");
+static unsigned int delay_ms = 100;
+module_param(delay_ms, uint, 0644);
+MODULE_PARM_DESC(delay_ms,
+ "delay between tests, in milliseconds (default: 100)");
+
/* the actual tests to execute */
static struct spi_test spi_tests[] = {
{
@@ -1098,7 +1103,8 @@ int spi_test_run_tests(struct spi_device *spi,
* detect the individual tests when using a logic analyzer
* we also add scheduling to avoid potential spi_timeouts...
*/
- mdelay(100);
+ if (delay_ms)
+ mdelay(delay_ms);
schedule();
}
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index 0c79193d9697..701838b6f0c4 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -325,7 +325,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (!spi_mem_internal_supports_op(mem, op))
return -ENOTSUPP;
- if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
+ if (ctlr->mem_ops && ctlr->mem_ops->exec_op && !mem->spi->cs_gpiod) {
ret = spi_mem_access_start(mem);
if (ret)
return ret;
diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
index fa8412ba20e2..f3f95eb37365 100644
--- a/drivers/spi/spi-mtk-snfi.c
+++ b/drivers/spi/spi-mtk-snfi.c
@@ -195,6 +195,8 @@
#define DATA_READ_MODE_X4 2
#define DATA_READ_MODE_DUAL 5
#define DATA_READ_MODE_QUAD 6
+#define DATA_READ_LATCH_LAT GENMASK(9, 8)
+#define DATA_READ_LATCH_LAT_S 8
#define PG_LOAD_CUSTOM_EN BIT(7)
#define DATARD_CUSTOM_EN BIT(6)
#define CS_DESELECT_CYC_S 0
@@ -205,6 +207,9 @@
#define SNF_DLY_CTL3 0x548
#define SFCK_SAM_DLY_S 0
+#define SFCK_SAM_DLY GENMASK(5, 0)
+#define SFCK_SAM_DLY_TOTAL 9
+#define SFCK_SAM_DLY_RANGE 47
#define SNF_STA_CTL1 0x550
#define CUS_PG_DONE BIT(28)
@@ -297,6 +302,7 @@ struct mtk_snand {
struct device *dev;
struct clk *nfi_clk;
struct clk *pad_clk;
+ struct clk *nfi_hclk;
void __iomem *nfi_base;
int irq;
struct completion op_done;
@@ -1339,7 +1345,16 @@ static int mtk_snand_enable_clk(struct mtk_snand *ms)
dev_err(ms->dev, "unable to enable pad clk\n");
goto err1;
}
+ ret = clk_prepare_enable(ms->nfi_hclk);
+ if (ret) {
+ dev_err(ms->dev, "unable to enable nfi hclk\n");
+ goto err2;
+ }
+
return 0;
+
+err2:
+ clk_disable_unprepare(ms->pad_clk);
err1:
clk_disable_unprepare(ms->nfi_clk);
return ret;
@@ -1347,6 +1362,7 @@ err1:
static void mtk_snand_disable_clk(struct mtk_snand *ms)
{
+ clk_disable_unprepare(ms->nfi_hclk);
clk_disable_unprepare(ms->pad_clk);
clk_disable_unprepare(ms->nfi_clk);
}
@@ -1357,6 +1373,8 @@ static int mtk_snand_probe(struct platform_device *pdev)
const struct of_device_id *dev_id;
struct spi_controller *ctlr;
struct mtk_snand *ms;
+ unsigned long spi_freq;
+ u32 val = 0;
int ret;
dev_id = of_match_node(mtk_snand_ids, np);
@@ -1401,6 +1419,13 @@ static int mtk_snand_probe(struct platform_device *pdev)
goto release_ecc;
}
+ ms->nfi_hclk = devm_clk_get_optional(&pdev->dev, "nfi_hclk");
+ if (IS_ERR(ms->nfi_hclk)) {
+ ret = PTR_ERR(ms->nfi_hclk);
+ dev_err(&pdev->dev, "unable to get nfi_hclk, err = %d\n", ret);
+ goto release_ecc;
+ }
+
ret = mtk_snand_enable_clk(ms);
if (ret)
goto release_ecc;
@@ -1428,10 +1453,22 @@ static int mtk_snand_probe(struct platform_device *pdev)
// switch to SNFI mode
nfi_write32(ms, SNF_CFG, SPI_MODE);
+ ret = of_property_read_u32(np, "rx-sample-delay-ns", &val);
+ if (!ret)
+ nfi_rmw32(ms, SNF_DLY_CTL3, SFCK_SAM_DLY,
+ val * SFCK_SAM_DLY_RANGE / SFCK_SAM_DLY_TOTAL);
+
+ ret = of_property_read_u32(np, "mediatek,rx-latch-latency-ns", &val);
+ if (!ret) {
+ spi_freq = clk_get_rate(ms->pad_clk);
+ val = DIV_ROUND_CLOSEST(val, NSEC_PER_SEC / spi_freq);
+ nfi_rmw32(ms, SNF_MISC_CTL, DATA_READ_LATCH_LAT,
+ val << DATA_READ_LATCH_LAT_S);
+ }
+
// setup an initial page format for ops matching page_cache_op template
// before ECC is called.
- ret = mtk_snand_setup_pagefmt(ms, ms->caps->sector_size,
- ms->caps->spare_sizes[0]);
+ ret = mtk_snand_setup_pagefmt(ms, SZ_2K, SZ_64);
if (ret) {
dev_err(ms->dev, "failed to set initial page format\n");
goto disable_clk;
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
deleted file mode 100644
index 061f7394e5b9..000000000000
--- a/drivers/spi/spi-omap-100k.c
+++ /dev/null
@@ -1,490 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * OMAP7xx SPI 100k controller driver
- * Author: Fabrice Crohas <fcrohas@gmail.com>
- * from original omap1_mcspi driver
- *
- * Copyright (C) 2005, 2006 Nokia Corporation
- * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
- * Juha Yrjola <juha.yrjola@nokia.com>
- */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <linux/spi/spi.h>
-
-#define OMAP1_SPI100K_MAX_FREQ 48000000
-
-#define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12)
-
-#define SPI_SETUP1 0x00
-#define SPI_SETUP2 0x02
-#define SPI_CTRL 0x04
-#define SPI_STATUS 0x06
-#define SPI_TX_LSB 0x08
-#define SPI_TX_MSB 0x0a
-#define SPI_RX_LSB 0x0c
-#define SPI_RX_MSB 0x0e
-
-#define SPI_SETUP1_INT_READ_ENABLE (1UL << 5)
-#define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4)
-#define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1)
-#define SPI_SETUP1_CLOCK_ENABLE (1UL << 0)
-
-#define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0)
-#define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0)
-#define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5)
-#define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5)
-#define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10)
-#define SPI_SETUP2_EDGE_TRIGGER (1UL << 10)
-
-#define SPI_CTRL_SEN(x) ((x) << 7)
-#define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2)
-#define SPI_CTRL_WR (1UL << 1)
-#define SPI_CTRL_RD (1UL << 0)
-
-#define SPI_STATUS_WE (1UL << 1)
-#define SPI_STATUS_RD (1UL << 0)
-
-/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
- * cache operations; better heuristics consider wordsize and bitrate.
- */
-#define DMA_MIN_BYTES 8
-
-#define SPI_RUNNING 0
-#define SPI_SHUTDOWN 1
-
-struct omap1_spi100k {
- struct clk *ick;
- struct clk *fck;
-
- /* Virtual base address of the controller */
- void __iomem *base;
-};
-
-struct omap1_spi100k_cs {
- void __iomem *base;
- int word_len;
-};
-
-static void spi100k_enable_clock(struct spi_master *master)
-{
- unsigned int val;
- struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
-
- /* enable SPI */
- val = readw(spi100k->base + SPI_SETUP1);
- val |= SPI_SETUP1_CLOCK_ENABLE;
- writew(val, spi100k->base + SPI_SETUP1);
-}
-
-static void spi100k_disable_clock(struct spi_master *master)
-{
- unsigned int val;
- struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
-
- /* disable SPI */
- val = readw(spi100k->base + SPI_SETUP1);
- val &= ~SPI_SETUP1_CLOCK_ENABLE;
- writew(val, spi100k->base + SPI_SETUP1);
-}
-
-static void spi100k_write_data(struct spi_master *master, int len, int data)
-{
- struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
-
- /* write 16-bit word, shifting 8-bit data if necessary */
- if (len <= 8) {
- data <<= 8;
- len = 16;
- }
-
- spi100k_enable_clock(master);
- writew(data, spi100k->base + SPI_TX_MSB);
-
- writew(SPI_CTRL_SEN(0) |
- SPI_CTRL_WORD_SIZE(len) |
- SPI_CTRL_WR,
- spi100k->base + SPI_CTRL);
-
- /* Wait for bit ack send change */
- while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE)
- ;
- udelay(1000);
-
- spi100k_disable_clock(master);
-}
-
-static int spi100k_read_data(struct spi_master *master, int len)
-{
- int dataL;
- struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
-
- /* Always do at least 16 bits */
- if (len <= 8)
- len = 16;
-
- spi100k_enable_clock(master);
- writew(SPI_CTRL_SEN(0) |
- SPI_CTRL_WORD_SIZE(len) |
- SPI_CTRL_RD,
- spi100k->base + SPI_CTRL);
-
- while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD)
- ;
- udelay(1000);
-
- dataL = readw(spi100k->base + SPI_RX_LSB);
- readw(spi100k->base + SPI_RX_MSB);
- spi100k_disable_clock(master);
-
- return dataL;
-}
-
-static void spi100k_open(struct spi_master *master)
-{
- /* get control of SPI */
- struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
-
- writew(SPI_SETUP1_INT_READ_ENABLE |
- SPI_SETUP1_INT_WRITE_ENABLE |
- SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1);
-
- /* configure clock and interrupts */
- writew(SPI_SETUP2_ACTIVE_EDGE_FALLING |
- SPI_SETUP2_NEGATIVE_LEVEL |
- SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2);
-}
-
-static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable)
-{
- if (enable)
- writew(0x05fc, spi100k->base + SPI_CTRL);
- else
- writew(0x05fd, spi100k->base + SPI_CTRL);
-}
-
-static unsigned
-omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
-{
- struct omap1_spi100k_cs *cs = spi->controller_state;
- unsigned int count, c;
- int word_len;
-
- count = xfer->len;
- c = count;
- word_len = cs->word_len;
-
- if (word_len <= 8) {
- u8 *rx;
- const u8 *tx;
-
- rx = xfer->rx_buf;
- tx = xfer->tx_buf;
- do {
- c -= 1;
- if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master, word_len, *tx++);
- if (xfer->rx_buf != NULL)
- *rx++ = spi100k_read_data(spi->master, word_len);
- } while (c);
- } else if (word_len <= 16) {
- u16 *rx;
- const u16 *tx;
-
- rx = xfer->rx_buf;
- tx = xfer->tx_buf;
- do {
- c -= 2;
- if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master, word_len, *tx++);
- if (xfer->rx_buf != NULL)
- *rx++ = spi100k_read_data(spi->master, word_len);
- } while (c);
- } else if (word_len <= 32) {
- u32 *rx;
- const u32 *tx;
-
- rx = xfer->rx_buf;
- tx = xfer->tx_buf;
- do {
- c -= 4;
- if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master, word_len, *tx);
- if (xfer->rx_buf != NULL)
- *rx = spi100k_read_data(spi->master, word_len);
- } while (c);
- }
- return count - c;
-}
-
-/* called only when no transfer is active to this device */
-static int omap1_spi100k_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master);
- struct omap1_spi100k_cs *cs = spi->controller_state;
- u8 word_len;
-
- if (t != NULL)
- word_len = t->bits_per_word;
- else
- word_len = spi->bits_per_word;
-
- if (word_len > 32)
- return -EINVAL;
- cs->word_len = word_len;
-
- /* SPI init before transfer */
- writew(0x3e, spi100k->base + SPI_SETUP1);
- writew(0x00, spi100k->base + SPI_STATUS);
- writew(0x3e, spi100k->base + SPI_CTRL);
-
- return 0;
-}
-
-/* the spi->mode bits understood by this driver: */
-#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
-
-static int omap1_spi100k_setup(struct spi_device *spi)
-{
- int ret;
- struct omap1_spi100k *spi100k;
- struct omap1_spi100k_cs *cs = spi->controller_state;
-
- spi100k = spi_master_get_devdata(spi->master);
-
- if (!cs) {
- cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
- if (!cs)
- return -ENOMEM;
- cs->base = spi100k->base + spi->chip_select * 0x14;
- spi->controller_state = cs;
- }
-
- spi100k_open(spi->master);
-
- clk_prepare_enable(spi100k->ick);
- clk_prepare_enable(spi100k->fck);
-
- ret = omap1_spi100k_setup_transfer(spi, NULL);
-
- clk_disable_unprepare(spi100k->ick);
- clk_disable_unprepare(spi100k->fck);
-
- return ret;
-}
-
-static int omap1_spi100k_transfer_one_message(struct spi_master *master,
- struct spi_message *m)
-{
- struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
- struct spi_device *spi = m->spi;
- struct spi_transfer *t = NULL;
- int cs_active = 0;
- int status = 0;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
- break;
- }
- status = omap1_spi100k_setup_transfer(spi, t);
- if (status < 0)
- break;
-
- if (!cs_active) {
- omap1_spi100k_force_cs(spi100k, 1);
- cs_active = 1;
- }
-
- if (t->len) {
- unsigned count;
-
- count = omap1_spi100k_txrx_pio(spi, t);
- m->actual_length += count;
-
- if (count != t->len) {
- break;
- }
- }
-
- spi_transfer_delay_exec(t);
-
- /* ignore the "leave it on after last xfer" hint */
-
- if (t->cs_change) {
- omap1_spi100k_force_cs(spi100k, 0);
- cs_active = 0;
- }
- }
-
- status = omap1_spi100k_setup_transfer(spi, NULL);
-
- if (cs_active)
- omap1_spi100k_force_cs(spi100k, 0);
-
- m->status = status;
-
- spi_finalize_current_message(master);
-
- return status;
-}
-
-static int omap1_spi100k_probe(struct platform_device *pdev)
-{
- struct spi_master *master;
- struct omap1_spi100k *spi100k;
- int status = 0;
-
- if (!pdev->id)
- return -EINVAL;
-
- master = spi_alloc_master(&pdev->dev, sizeof(*spi100k));
- if (master == NULL) {
- dev_dbg(&pdev->dev, "master allocation failed\n");
- return -ENOMEM;
- }
-
- if (pdev->id != -1)
- master->bus_num = pdev->id;
-
- master->setup = omap1_spi100k_setup;
- master->transfer_one_message = omap1_spi100k_transfer_one_message;
- master->num_chipselect = 2;
- master->mode_bits = MODEBITS;
- master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
- master->min_speed_hz = OMAP1_SPI100K_MAX_FREQ/(1<<16);
- master->max_speed_hz = OMAP1_SPI100K_MAX_FREQ;
- master->auto_runtime_pm = true;
-
- spi100k = spi_master_get_devdata(master);
-
- /*
- * The memory region base address is taken as the platform_data.
- * You should allocate this with ioremap() before initializing
- * the SPI.
- */
- spi100k->base = (void __iomem *)dev_get_platdata(&pdev->dev);
-
- spi100k->ick = devm_clk_get(&pdev->dev, "ick");
- if (IS_ERR(spi100k->ick)) {
- dev_dbg(&pdev->dev, "can't get spi100k_ick\n");
- status = PTR_ERR(spi100k->ick);
- goto err;
- }
-
- spi100k->fck = devm_clk_get(&pdev->dev, "fck");
- if (IS_ERR(spi100k->fck)) {
- dev_dbg(&pdev->dev, "can't get spi100k_fck\n");
- status = PTR_ERR(spi100k->fck);
- goto err;
- }
-
- status = clk_prepare_enable(spi100k->ick);
- if (status != 0) {
- dev_err(&pdev->dev, "failed to enable ick: %d\n", status);
- goto err;
- }
-
- status = clk_prepare_enable(spi100k->fck);
- if (status != 0) {
- dev_err(&pdev->dev, "failed to enable fck: %d\n", status);
- goto err_ick;
- }
-
- pm_runtime_enable(&pdev->dev);
- pm_runtime_set_active(&pdev->dev);
-
- status = devm_spi_register_master(&pdev->dev, master);
- if (status < 0)
- goto err_fck;
-
- return status;
-
-err_fck:
- pm_runtime_disable(&pdev->dev);
- clk_disable_unprepare(spi100k->fck);
-err_ick:
- clk_disable_unprepare(spi100k->ick);
-err:
- spi_master_put(master);
- return status;
-}
-
-static int omap1_spi100k_remove(struct platform_device *pdev)
-{
- struct spi_master *master = platform_get_drvdata(pdev);
- struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
-
- pm_runtime_disable(&pdev->dev);
-
- clk_disable_unprepare(spi100k->fck);
- clk_disable_unprepare(spi100k->ick);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int omap1_spi100k_runtime_suspend(struct device *dev)
-{
- struct spi_master *master = dev_get_drvdata(dev);
- struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
-
- clk_disable_unprepare(spi100k->ick);
- clk_disable_unprepare(spi100k->fck);
-
- return 0;
-}
-
-static int omap1_spi100k_runtime_resume(struct device *dev)
-{
- struct spi_master *master = dev_get_drvdata(dev);
- struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
- int ret;
-
- ret = clk_prepare_enable(spi100k->ick);
- if (ret != 0) {
- dev_err(dev, "Failed to enable ick: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(spi100k->fck);
- if (ret != 0) {
- dev_err(dev, "Failed to enable fck: %d\n", ret);
- clk_disable_unprepare(spi100k->ick);
- return ret;
- }
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops omap1_spi100k_pm = {
- SET_RUNTIME_PM_OPS(omap1_spi100k_runtime_suspend,
- omap1_spi100k_runtime_resume, NULL)
-};
-
-static struct platform_driver omap1_spi100k_driver = {
- .driver = {
- .name = "omap1_spi100k",
- .pm = &omap1_spi100k_pm,
- },
- .probe = omap1_spi100k_probe,
- .remove = omap1_spi100k_remove,
-};
-
-module_platform_driver(omap1_spi100k_driver);
-
-MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver");
-MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 29198e6815b2..20c87163d612 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -99,7 +99,7 @@ struct uwire_state {
* Or, put it in a structure which is used throughout the driver;
* that avoids having to issue two loads for each bit of static data.
*/
-static unsigned int uwire_idx_shift;
+static unsigned int uwire_idx_shift = 2;
static void __iomem *uwire_base;
static inline void uwire_write_reg(int idx, u16 val)
@@ -481,11 +481,6 @@ static int uwire_probe(struct platform_device *pdev)
}
clk_prepare_enable(uwire->ck);
- if (cpu_is_omap7xx())
- uwire_idx_shift = 1;
- else
- uwire_idx_shift = 2;
-
uwire_write_reg(UWIRE_SR3, 1);
/* the spi->mode bits understood by this driver: */
@@ -536,15 +531,6 @@ static struct platform_driver uwire_driver = {
static int __init omap_uwire_init(void)
{
- /* FIXME move these into the relevant board init code. also, include
- * H3 support; it uses tsc2101 like H2 (on a different chipselect).
- */
-
- if (machine_is_omap_h2()) {
- /* defaults: W21 SDO, U18 SDI, V19 SCL */
- omap_cfg_reg(N14_1610_UWIRE_CS0);
- omap_cfg_reg(N15_1610_UWIRE_CS1);
- }
return platform_driver_register(&uwire_driver);
}
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index e4484ace584e..a17ff839117f 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2091,7 +2091,6 @@ pl022_platform_data_dt_get(struct device *dev)
return NULL;
pd->bus_id = -1;
- pd->enable_dma = 1;
of_property_read_u32(np, "pl022,autosuspend-delay",
&pd->autosuspend_delay);
pd->rt = of_property_read_bool(np, "pl022,rt");
diff --git a/drivers/spi/spi-s3c24xx-regs.h b/drivers/spi/spi-s3c24xx-regs.h
deleted file mode 100644
index f51464ab5677..000000000000
--- a/drivers/spi/spi-s3c24xx-regs.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2004 Fetron GmbH
- *
- * S3C2410 SPI register definition
- */
-
-#ifndef __SPI_S3C2410_H
-#define __SPI_S3C2410_H
-
-#define S3C2410_SPCON (0x00)
-
-#define S3C2410_SPCON_SMOD_DMA (2 << 5) /* DMA mode */
-#define S3C2410_SPCON_SMOD_INT (1 << 5) /* interrupt mode */
-#define S3C2410_SPCON_SMOD_POLL (0 << 5) /* polling mode */
-#define S3C2410_SPCON_ENSCK (1 << 4) /* Enable SCK */
-#define S3C2410_SPCON_MSTR (1 << 3) /* Master:1, Slave:0 select */
-#define S3C2410_SPCON_CPOL_HIGH (1 << 2) /* Clock polarity select */
-#define S3C2410_SPCON_CPOL_LOW (0 << 2) /* Clock polarity select */
-
-#define S3C2410_SPCON_CPHA_FMTB (1 << 1) /* Clock Phase Select */
-#define S3C2410_SPCON_CPHA_FMTA (0 << 1) /* Clock Phase Select */
-
-#define S3C2410_SPSTA (0x04)
-
-#define S3C2410_SPSTA_DCOL (1 << 2) /* Data Collision Error */
-#define S3C2410_SPSTA_MULD (1 << 1) /* Multi Master Error */
-#define S3C2410_SPSTA_READY (1 << 0) /* Data Tx/Rx ready */
-#define S3C2412_SPSTA_READY_ORG (1 << 3)
-
-#define S3C2410_SPPIN (0x08)
-
-#define S3C2410_SPPIN_ENMUL (1 << 2) /* Multi Master Error detect */
-#define S3C2410_SPPIN_RESERVED (1 << 1)
-#define S3C2410_SPPIN_KEEP (1 << 0) /* Master Out keep */
-
-#define S3C2410_SPPRE (0x0C)
-#define S3C2410_SPTDAT (0x10)
-#define S3C2410_SPRDAT (0x14)
-
-#endif /* __SPI_S3C2410_H */
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
deleted file mode 100644
index ef25b5e93900..000000000000
--- a/drivers/spi/spi-s3c24xx.c
+++ /dev/null
@@ -1,596 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2006 Ben Dooks
- * Copyright 2006-2009 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
-*/
-
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
-#include <linux/spi/s3c24xx.h>
-#include <linux/spi/s3c24xx-fiq.h>
-#include <linux/module.h>
-
-#include <asm/fiq.h>
-
-#include "spi-s3c24xx-regs.h"
-
-/**
- * struct s3c24xx_spi_devstate - per device data
- * @hz: Last frequency calculated for @sppre field.
- * @mode: Last mode setting for the @spcon field.
- * @spcon: Value to write to the SPCON register.
- * @sppre: Value to write to the SPPRE register.
- */
-struct s3c24xx_spi_devstate {
- unsigned int hz;
- unsigned int mode;
- u8 spcon;
- u8 sppre;
-};
-
-enum spi_fiq_mode {
- FIQ_MODE_NONE = 0,
- FIQ_MODE_TX = 1,
- FIQ_MODE_RX = 2,
- FIQ_MODE_TXRX = 3,
-};
-
-struct s3c24xx_spi {
- /* bitbang has to be first */
- struct spi_bitbang bitbang;
- struct completion done;
-
- void __iomem *regs;
- int irq;
- int len;
- int count;
-
- struct fiq_handler fiq_handler;
- enum spi_fiq_mode fiq_mode;
- unsigned char fiq_inuse;
- unsigned char fiq_claimed;
-
- /* data buffers */
- const unsigned char *tx;
- unsigned char *rx;
-
- struct clk *clk;
- struct spi_master *master;
- struct spi_device *curdev;
- struct device *dev;
- struct s3c2410_spi_info *pdata;
-};
-
-#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
-#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
-
-static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev)
-{
- return spi_master_get_devdata(sdev->master);
-}
-
-static void s3c24xx_spi_chipsel(struct spi_device *spi, int value)
-{
- struct s3c24xx_spi_devstate *cs = spi->controller_state;
- struct s3c24xx_spi *hw = to_hw(spi);
-
- /* change the chipselect state and the state of the spi engine clock */
-
- switch (value) {
- case BITBANG_CS_INACTIVE:
- writeb(cs->spcon, hw->regs + S3C2410_SPCON);
- break;
-
- case BITBANG_CS_ACTIVE:
- writeb(cs->spcon | S3C2410_SPCON_ENSCK,
- hw->regs + S3C2410_SPCON);
- break;
- }
-}
-
-static int s3c24xx_spi_update_state(struct spi_device *spi,
- struct spi_transfer *t)
-{
- struct s3c24xx_spi *hw = to_hw(spi);
- struct s3c24xx_spi_devstate *cs = spi->controller_state;
- unsigned int hz;
- unsigned int div;
- unsigned long clk;
-
- hz = t ? t->speed_hz : spi->max_speed_hz;
-
- if (!hz)
- hz = spi->max_speed_hz;
-
- if (spi->mode != cs->mode) {
- u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
-
- if (spi->mode & SPI_CPHA)
- spcon |= S3C2410_SPCON_CPHA_FMTB;
-
- if (spi->mode & SPI_CPOL)
- spcon |= S3C2410_SPCON_CPOL_HIGH;
-
- cs->mode = spi->mode;
- cs->spcon = spcon;
- }
-
- if (cs->hz != hz) {
- clk = clk_get_rate(hw->clk);
- div = DIV_ROUND_UP(clk, hz * 2) - 1;
-
- if (div > 255)
- div = 255;
-
- dev_dbg(&spi->dev, "pre-scaler=%d (wanted %d, got %ld)\n",
- div, hz, clk / (2 * (div + 1)));
-
- cs->hz = hz;
- cs->sppre = div;
- }
-
- return 0;
-}
-
-static int s3c24xx_spi_setupxfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- struct s3c24xx_spi_devstate *cs = spi->controller_state;
- struct s3c24xx_spi *hw = to_hw(spi);
- int ret;
-
- ret = s3c24xx_spi_update_state(spi, t);
- if (!ret)
- writeb(cs->sppre, hw->regs + S3C2410_SPPRE);
-
- return ret;
-}
-
-static int s3c24xx_spi_setup(struct spi_device *spi)
-{
- struct s3c24xx_spi_devstate *cs = spi->controller_state;
- struct s3c24xx_spi *hw = to_hw(spi);
- int ret;
-
- /* allocate settings on the first call */
- if (!cs) {
- cs = devm_kzalloc(&spi->dev,
- sizeof(struct s3c24xx_spi_devstate),
- GFP_KERNEL);
- if (!cs)
- return -ENOMEM;
-
- cs->spcon = SPCON_DEFAULT;
- cs->hz = -1;
- spi->controller_state = cs;
- }
-
- /* initialise the state from the device */
- ret = s3c24xx_spi_update_state(spi, NULL);
- if (ret)
- return ret;
-
- mutex_lock(&hw->bitbang.lock);
- if (!hw->bitbang.busy) {
- hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
- /* need to ndelay for 0.5 clocktick ? */
- }
- mutex_unlock(&hw->bitbang.lock);
-
- return 0;
-}
-
-static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
-{
- return hw->tx ? hw->tx[count] : 0;
-}
-
-#ifdef CONFIG_SPI_S3C24XX_FIQ
-/* Support for FIQ based pseudo-DMA to improve the transfer speed.
- *
- * This code uses the assembly helper in spi_s3c24xx_spi.S which is
- * used by the FIQ core to move data between main memory and the peripheral
- * block. Since this is code running on the processor, there is no problem
- * with cache coherency of the buffers, so we can use any buffer we like.
- */
-
-/**
- * struct spi_fiq_code - FIQ code and header
- * @length: The length of the code fragment, excluding this header.
- * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at.
- * @data: The code itself to install as a FIQ handler.
- */
-struct spi_fiq_code {
- u32 length;
- u32 ack_offset;
- u8 data[];
-};
-
-/**
- * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer
- * @hw: The hardware state.
- *
- * Claim the FIQ handler (only one can be active at any one time) and
- * then setup the correct transfer code for this transfer.
- *
- * This call updates all the necessary state information if successful,
- * so the caller does not need to do anything more than start the transfer
- * as normal, since the IRQ will have been re-routed to the FIQ handler.
-*/
-static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
-{
- struct pt_regs regs;
- enum spi_fiq_mode mode;
- struct spi_fiq_code *code;
- u32 *ack_ptr = NULL;
- int ret;
-
- if (!hw->fiq_claimed) {
- /* try and claim fiq if we haven't got it, and if not
- * then return and simply use another transfer method */
-
- ret = claim_fiq(&hw->fiq_handler);
- if (ret)
- return;
- }
-
- if (hw->tx && !hw->rx)
- mode = FIQ_MODE_TX;
- else if (hw->rx && !hw->tx)
- mode = FIQ_MODE_RX;
- else
- mode = FIQ_MODE_TXRX;
-
- regs.uregs[fiq_rspi] = (long)hw->regs;
- regs.uregs[fiq_rrx] = (long)hw->rx;
- regs.uregs[fiq_rtx] = (long)hw->tx + 1;
- regs.uregs[fiq_rcount] = hw->len - 1;
-
- set_fiq_regs(&regs);
-
- if (hw->fiq_mode != mode) {
- hw->fiq_mode = mode;
-
- switch (mode) {
- case FIQ_MODE_TX:
- code = &s3c24xx_spi_fiq_tx;
- break;
- case FIQ_MODE_RX:
- code = &s3c24xx_spi_fiq_rx;
- break;
- case FIQ_MODE_TXRX:
- code = &s3c24xx_spi_fiq_txrx;
- break;
- default:
- code = NULL;
- }
-
- BUG_ON(!code);
-
- ack_ptr = (u32 *)&code->data[code->ack_offset];
- set_fiq_handler(&code->data, code->length);
- }
-
- s3c24xx_set_fiq(hw->irq, ack_ptr, true);
-
- hw->fiq_mode = mode;
- hw->fiq_inuse = 1;
-}
-
-/**
- * s3c24xx_spi_fiqop - FIQ core code callback
- * @pw: Data registered with the handler
- * @release: Whether this is a release or a return.
- *
- * Called by the FIQ code when another module wants to use the FIQ, so
- * return whether we are currently using this or not and then update our
- * internal state.
- */
-static int s3c24xx_spi_fiqop(void *pw, int release)
-{
- struct s3c24xx_spi *hw = pw;
- int ret = 0;
-
- if (release) {
- if (hw->fiq_inuse)
- ret = -EBUSY;
-
- /* note, we do not need to unroute the FIQ, as the FIQ
- * vector code de-routes it to signal the end of transfer */
-
- hw->fiq_mode = FIQ_MODE_NONE;
- hw->fiq_claimed = 0;
- } else {
- hw->fiq_claimed = 1;
- }
-
- return ret;
-}
-
-/**
- * s3c24xx_spi_initfiq - setup the information for the FIQ core
- * @hw: The hardware state.
- *
- * Setup the fiq_handler block to pass to the FIQ core.
- */
-static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw)
-{
- hw->fiq_handler.dev_id = hw;
- hw->fiq_handler.name = dev_name(hw->dev);
- hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop;
-}
-
-/**
- * s3c24xx_spi_usefiq - return if we should be using FIQ.
- * @hw: The hardware state.
- *
- * Return true if the platform data specifies whether this channel is
- * allowed to use the FIQ.
- */
-static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw)
-{
- return hw->pdata->use_fiq;
-}
-
-/**
- * s3c24xx_spi_usingfiq - return if channel is using FIQ
- * @spi: The hardware state.
- *
- * Return whether the channel is currently using the FIQ (separate from
- * whether the FIQ is claimed).
- */
-static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi)
-{
- return spi->fiq_inuse;
-}
-#else
-
-static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { }
-static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { }
-static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; }
-static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; }
-
-#endif /* CONFIG_SPI_S3C24XX_FIQ */
-
-static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
-{
- struct s3c24xx_spi *hw = to_hw(spi);
-
- hw->tx = t->tx_buf;
- hw->rx = t->rx_buf;
- hw->len = t->len;
- hw->count = 0;
-
- init_completion(&hw->done);
-
- hw->fiq_inuse = 0;
- if (s3c24xx_spi_usefiq(hw) && t->len >= 3)
- s3c24xx_spi_tryfiq(hw);
-
- /* send the first byte */
- writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT);
-
- wait_for_completion(&hw->done);
- return hw->count;
-}
-
-static irqreturn_t s3c24xx_spi_irq(int irq, void *dev)
-{
- struct s3c24xx_spi *hw = dev;
- unsigned int spsta = readb(hw->regs + S3C2410_SPSTA);
- unsigned int count = hw->count;
-
- if (spsta & S3C2410_SPSTA_DCOL) {
- dev_dbg(hw->dev, "data-collision\n");
- complete(&hw->done);
- goto irq_done;
- }
-
- if (!(spsta & S3C2410_SPSTA_READY)) {
- dev_dbg(hw->dev, "spi not ready for tx?\n");
- complete(&hw->done);
- goto irq_done;
- }
-
- if (!s3c24xx_spi_usingfiq(hw)) {
- hw->count++;
-
- if (hw->rx)
- hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
-
- count++;
-
- if (count < hw->len)
- writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
- else
- complete(&hw->done);
- } else {
- hw->count = hw->len;
- hw->fiq_inuse = 0;
-
- if (hw->rx)
- hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT);
-
- complete(&hw->done);
- }
-
- irq_done:
- return IRQ_HANDLED;
-}
-
-static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw)
-{
- /* for the moment, permanently enable the clock */
-
- clk_enable(hw->clk);
-
- /* program defaults into the registers */
-
- writeb(0xff, hw->regs + S3C2410_SPPRE);
- writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN);
- writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON);
-}
-
-static int s3c24xx_spi_probe(struct platform_device *pdev)
-{
- struct s3c2410_spi_info *pdata;
- struct s3c24xx_spi *hw;
- struct spi_master *master;
- int err = 0;
-
- master = devm_spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
- if (master == NULL) {
- dev_err(&pdev->dev, "No memory for spi_master\n");
- return -ENOMEM;
- }
-
- hw = spi_master_get_devdata(master);
-
- hw->master = master;
- hw->pdata = pdata = dev_get_platdata(&pdev->dev);
- hw->dev = &pdev->dev;
-
- if (pdata == NULL) {
- dev_err(&pdev->dev, "No platform data supplied\n");
- return -ENOENT;
- }
-
- platform_set_drvdata(pdev, hw);
- init_completion(&hw->done);
-
- /* initialise fiq handler */
-
- s3c24xx_spi_initfiq(hw);
-
- /* setup the master state. */
-
- /* the spi->mode bits understood by this driver: */
- master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-
- master->num_chipselect = hw->pdata->num_cs;
- master->bus_num = pdata->bus_num;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
- /* we need to call the local chipselect callback */
- master->flags = SPI_MASTER_GPIO_SS;
- master->use_gpio_descriptors = true;
-
- /* setup the state for the bitbang driver */
-
- hw->bitbang.master = hw->master;
- hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer;
- hw->bitbang.chipselect = s3c24xx_spi_chipsel;
- hw->bitbang.txrx_bufs = s3c24xx_spi_txrx;
-
- hw->master->setup = s3c24xx_spi_setup;
-
- dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
-
- /* find and map our resources */
- hw->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hw->regs))
- return PTR_ERR(hw->regs);
-
- hw->irq = platform_get_irq(pdev, 0);
- if (hw->irq < 0)
- return -ENOENT;
-
- err = devm_request_irq(&pdev->dev, hw->irq, s3c24xx_spi_irq, 0,
- pdev->name, hw);
- if (err) {
- dev_err(&pdev->dev, "Cannot claim IRQ\n");
- return err;
- }
-
- hw->clk = devm_clk_get(&pdev->dev, "spi");
- if (IS_ERR(hw->clk)) {
- dev_err(&pdev->dev, "No clock for device\n");
- return PTR_ERR(hw->clk);
- }
-
- s3c24xx_spi_initialsetup(hw);
-
- /* register our spi controller */
-
- err = spi_bitbang_start(&hw->bitbang);
- if (err) {
- dev_err(&pdev->dev, "Failed to register SPI master\n");
- goto err_register;
- }
-
- return 0;
-
- err_register:
- clk_disable(hw->clk);
-
- return err;
-}
-
-static int s3c24xx_spi_remove(struct platform_device *dev)
-{
- struct s3c24xx_spi *hw = platform_get_drvdata(dev);
-
- spi_bitbang_stop(&hw->bitbang);
- clk_disable(hw->clk);
- spi_master_put(hw->master);
- return 0;
-}
-
-
-#ifdef CONFIG_PM
-
-static int s3c24xx_spi_suspend(struct device *dev)
-{
- struct s3c24xx_spi *hw = dev_get_drvdata(dev);
- int ret;
-
- ret = spi_master_suspend(hw->master);
- if (ret)
- return ret;
-
- clk_disable(hw->clk);
- return 0;
-}
-
-static int s3c24xx_spi_resume(struct device *dev)
-{
- struct s3c24xx_spi *hw = dev_get_drvdata(dev);
-
- s3c24xx_spi_initialsetup(hw);
- return spi_master_resume(hw->master);
-}
-
-static const struct dev_pm_ops s3c24xx_spi_pmops = {
- .suspend = s3c24xx_spi_suspend,
- .resume = s3c24xx_spi_resume,
-};
-
-#define S3C24XX_SPI_PMOPS &s3c24xx_spi_pmops
-#else
-#define S3C24XX_SPI_PMOPS NULL
-#endif /* CONFIG_PM */
-
-MODULE_ALIAS("platform:s3c2410-spi");
-static struct platform_driver s3c24xx_spi_driver = {
- .probe = s3c24xx_spi_probe,
- .remove = s3c24xx_spi_remove,
- .driver = {
- .name = "s3c2410-spi",
- .pm = S3C24XX_SPI_PMOPS,
- },
-};
-module_platform_driver(s3c24xx_spi_driver);
-
-MODULE_DESCRIPTION("S3C24XX SPI Driver");
-MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
index 47cbe73137c2..dc188f9202c9 100644
--- a/drivers/spi/spi-synquacer.c
+++ b/drivers/spi/spi-synquacer.c
@@ -472,10 +472,9 @@ static int synquacer_spi_transfer_one(struct spi_master *master,
read_fifo(sspi);
}
- if (status < 0) {
- dev_err(sspi->dev, "failed to transfer. status: 0x%x\n",
- status);
- return status;
+ if (status == 0) {
+ dev_err(sspi->dev, "failed to transfer. Timeout.\n");
+ return -ETIMEDOUT;
}
return 0;
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 7377d3b81302..1411548f4255 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -83,7 +83,7 @@ struct xilinx_spi {
void __iomem *regs; /* virt. address of the control registers */
int irq;
-
+ bool force_irq; /* force irq to setup master inhibit */
u8 *rx_ptr; /* pointer in the Tx buffer */
const u8 *tx_ptr; /* pointer in the Rx buffer */
u8 bytes_per_word;
@@ -248,7 +248,8 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
xspi->rx_ptr = t->rx_buf;
remaining_words = t->len / xspi->bytes_per_word;
- if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
+ if (xspi->irq >= 0 &&
+ (xspi->force_irq || remaining_words > xspi->buffer_size)) {
u32 isr;
use_irq = true;
/* Inhibit irq to avoid spurious irqs on tx_empty*/
@@ -393,6 +394,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
struct resource *res;
int ret, num_cs = 0, bits_per_word;
struct spi_master *master;
+ bool force_irq = false;
u32 tmp;
u8 i;
@@ -400,6 +402,7 @@ static int xilinx_spi_probe(struct platform_device *pdev)
if (pdata) {
num_cs = pdata->num_chipselect;
bits_per_word = pdata->bits_per_word;
+ force_irq = pdata->force_irq;
} else {
of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
&num_cs);
@@ -477,6 +480,8 @@ static int xilinx_spi_probe(struct platform_device *pdev)
dev_name(&pdev->dev), xspi);
if (ret)
return ret;
+
+ xspi->force_irq = force_irq;
}
/* SPI controller initializations */
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 15f174f4e056..44b85a8d47f1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -395,7 +395,7 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
return strcmp(spi->modalias, drv->name) == 0;
}
-static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int spi_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct spi_device *spi = to_spi_device(dev);
int rc;
@@ -604,7 +604,7 @@ static void spi_dev_set_name(struct spi_device *spi)
}
dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->controller->dev),
- spi->chip_select);
+ spi_get_chipselect(spi, 0));
}
static int spi_dev_check(struct device *dev, void *data)
@@ -613,7 +613,7 @@ static int spi_dev_check(struct device *dev, void *data)
struct spi_device *new_spi = data;
if (spi->controller == new_spi->controller &&
- spi->chip_select == new_spi->chip_select)
+ spi_get_chipselect(spi, 0) == spi_get_chipselect(new_spi, 0))
return -EBUSY;
return 0;
}
@@ -638,7 +638,7 @@ static int __spi_add_device(struct spi_device *spi)
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
if (status) {
dev_err(dev, "chipselect %d already in use\n",
- spi->chip_select);
+ spi_get_chipselect(spi, 0));
return status;
}
@@ -649,7 +649,7 @@ static int __spi_add_device(struct spi_device *spi)
}
if (ctlr->cs_gpiods)
- spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
+ spi_set_csgpiod(spi, 0, ctlr->cs_gpiods[spi_get_chipselect(spi, 0)]);
/*
* Drivers may modify this initial i/o setup, but will
@@ -692,8 +692,8 @@ int spi_add_device(struct spi_device *spi)
int status;
/* Chipselects are numbered 0..max; validate. */
- if (spi->chip_select >= ctlr->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
+ if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
ctlr->num_chipselect);
return -EINVAL;
}
@@ -714,8 +714,8 @@ static int spi_add_device_locked(struct spi_device *spi)
struct device *dev = ctlr->dev.parent;
/* Chipselects are numbered 0..max; validate. */
- if (spi->chip_select >= ctlr->num_chipselect) {
- dev_err(dev, "cs%d >= max %d\n", spi->chip_select,
+ if (spi_get_chipselect(spi, 0) >= ctlr->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, 0),
ctlr->num_chipselect);
return -EINVAL;
}
@@ -761,7 +761,7 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
- proxy->chip_select = chip->chip_select;
+ spi_set_chipselect(proxy, 0, chip->chip_select);
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
proxy->irq = chip->irq;
@@ -970,24 +970,23 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* Avoid calling into the driver (or doing delays) if the chip select
* isn't actually changing from the last time this was called.
*/
- if (!force && ((enable && spi->controller->last_cs == spi->chip_select) ||
- (!enable && spi->controller->last_cs != spi->chip_select)) &&
+ if (!force && ((enable && spi->controller->last_cs == spi_get_chipselect(spi, 0)) ||
+ (!enable && spi->controller->last_cs != spi_get_chipselect(spi, 0))) &&
(spi->controller->last_cs_mode_high == (spi->mode & SPI_CS_HIGH)))
return;
trace_spi_set_cs(spi, activate);
- spi->controller->last_cs = enable ? spi->chip_select : -1;
+ spi->controller->last_cs = enable ? spi_get_chipselect(spi, 0) : -1;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
- if ((spi->cs_gpiod || !spi->controller->set_cs_timing) && !activate) {
+ if ((spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) && !activate)
spi_delay_exec(&spi->cs_hold, NULL);
- }
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (spi->cs_gpiod) {
+ if (spi_get_csgpiod(spi, 0)) {
if (!(spi->mode & SPI_NO_CS)) {
/*
* Historically ACPI has no means of the GPIO polarity and
@@ -1000,10 +999,10 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* into account.
*/
if (has_acpi_companion(&spi->dev))
- gpiod_set_value_cansleep(spi->cs_gpiod, !enable);
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), !enable);
else
/* Polarity handled by GPIO library */
- gpiod_set_value_cansleep(spi->cs_gpiod, activate);
+ gpiod_set_value_cansleep(spi_get_csgpiod(spi, 0), activate);
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
@@ -1013,7 +1012,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi->controller->set_cs(spi, !enable);
}
- if (spi->cs_gpiod || !spi->controller->set_cs_timing) {
+ if (spi_get_csgpiod(spi, 0) || !spi->controller->set_cs_timing) {
if (activate)
spi_delay_exec(&spi->cs_setup, NULL);
else
@@ -1484,6 +1483,13 @@ static void _spi_transfer_cs_change_delay(struct spi_message *msg,
}
}
+void spi_transfer_cs_change_delay_exec(struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ _spi_transfer_cs_change_delay(msg, xfer);
+}
+EXPORT_SYMBOL_GPL(spi_transfer_cs_change_delay_exec);
+
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
@@ -1921,7 +1927,7 @@ void spi_take_timestamp_post(struct spi_controller *ctlr,
/* Capture the resolution of the timestamp */
xfer->ptp_sts_word_post = progress;
- xfer->timestamped = true;
+ xfer->timestamped = 1;
}
EXPORT_SYMBOL_GPL(spi_take_timestamp_post);
@@ -2220,11 +2226,26 @@ void spi_flush_queue(struct spi_controller *ctlr)
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_OF)
+static void of_spi_parse_dt_cs_delay(struct device_node *nc,
+ struct spi_delay *delay, const char *prop)
+{
+ u32 value;
+
+ if (!of_property_read_u32(nc, prop, &value)) {
+ if (value > U16_MAX) {
+ delay->value = DIV_ROUND_UP(value, 1000);
+ delay->unit = SPI_DELAY_UNIT_USECS;
+ } else {
+ delay->value = value;
+ delay->unit = SPI_DELAY_UNIT_NSECS;
+ }
+ }
+}
+
static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
struct device_node *nc)
{
u32 value;
- u16 cs_setup;
int rc;
/* Mode (clock phase/polarity/etc.) */
@@ -2304,16 +2325,16 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
nc, rc);
return rc;
}
- spi->chip_select = value;
+ spi_set_chipselect(spi, 0, value);
/* Device speed */
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
spi->max_speed_hz = value;
- if (!of_property_read_u16(nc, "spi-cs-setup-delay-ns", &cs_setup)) {
- spi->cs_setup.value = cs_setup;
- spi->cs_setup.unit = SPI_DELAY_UNIT_NSECS;
- }
+ /* Device CS delays */
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_setup, "spi-cs-setup-delay-ns");
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_hold, "spi-cs-hold-delay-ns");
+ of_spi_parse_dt_cs_delay(nc, &spi->cs_inactive, "spi-cs-inactive-delay-ns");
return 0;
}
@@ -2423,7 +2444,7 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
/* Use provided chip-select for ancillary device */
- ancillary->chip_select = chip_select;
+ spi_set_chipselect(ancillary, 0, chip_select);
/* Take over SPI mode/speed from SPI main device */
ancillary->max_speed_hz = spi->max_speed_hz;
@@ -2670,7 +2691,7 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
spi->mode |= lookup.mode;
spi->irq = lookup.irq;
spi->bits_per_word = lookup.bits_per_word;
- spi->chip_select = lookup.chip_select;
+ spi_set_chipselect(spi, 0, lookup.chip_select);
return spi;
}
@@ -3051,15 +3072,14 @@ static int spi_controller_check_ops(struct spi_controller *ctlr)
* The controller may implement only the high-level SPI-memory like
* operations if it does not support regular SPI transfers, and this is
* valid use case.
- * If ->mem_ops is NULL, we request that at least one of the
- * ->transfer_xxx() method be implemented.
+ * If ->mem_ops or ->mem_ops->exec_op is NULL, we request that at least
+ * one of the ->transfer_xxx() method be implemented.
*/
- if (ctlr->mem_ops) {
- if (!ctlr->mem_ops->exec_op)
- return -EINVAL;
- } else if (!ctlr->transfer && !ctlr->transfer_one &&
+ if (!ctlr->mem_ops || (ctlr->mem_ops && !ctlr->mem_ops->exec_op)) {
+ if (!ctlr->transfer && !ctlr->transfer_one &&
!ctlr->transfer_one_message) {
- return -EINVAL;
+ return -EINVAL;
+ }
}
return 0;
@@ -3632,7 +3652,7 @@ static int spi_set_cs_timing(struct spi_device *spi)
struct device *parent = spi->controller->dev.parent;
int status = 0;
- if (spi->controller->set_cs_timing && !spi->cs_gpiod) {
+ if (spi->controller->set_cs_timing && !spi_get_csgpiod(spi, 0)) {
if (spi->controller->auto_runtime_pm) {
status = pm_runtime_get_sync(parent);
if (status < 0) {
@@ -3837,7 +3857,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* cs_change is set for each transfer.
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
- spi->cs_gpiod)) {
+ spi_get_csgpiod(spi, 0))) {
size_t maxsize;
int ret;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 1935ca613447..5a038c667401 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -90,9 +90,21 @@ MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
/*-------------------------------------------------------------------------*/
static ssize_t
+spidev_sync_unlocked(struct spi_device *spi, struct spi_message *message)
+{
+ ssize_t status;
+
+ status = spi_sync(spi, message);
+ if (status == 0)
+ status = message->actual_length;
+
+ return status;
+}
+
+static ssize_t
spidev_sync(struct spidev_data *spidev, struct spi_message *message)
{
- int status;
+ ssize_t status;
struct spi_device *spi;
mutex_lock(&spidev->spi_lock);
@@ -101,10 +113,7 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
if (spi == NULL)
status = -ESHUTDOWN;
else
- status = spi_sync(spi, message);
-
- if (status == 0)
- status = message->actual_length;
+ status = spidev_sync_unlocked(spi, message);
mutex_unlock(&spidev->spi_lock);
return status;
@@ -294,7 +303,7 @@ static int spidev_message(struct spidev_data *spidev,
spi_message_add_tail(k_tmp, &msg);
}
- status = spidev_sync(spidev, &msg);
+ status = spidev_sync_unlocked(spidev->spi, &msg);
if (status < 0)
goto done;
@@ -702,6 +711,8 @@ static const struct spi_device_id spidev_spi_ids[] = {
{ .name = "m53cpld" },
{ .name = "spi-petra" },
{ .name = "spi-authenta" },
+ { .name = "em3581" },
+ { .name = "si3210" },
{},
};
MODULE_DEVICE_TABLE(spi, spidev_spi_ids);
@@ -720,14 +731,16 @@ static int spidev_of_check(struct device *dev)
}
static const struct of_device_id spidev_dt_ids[] = {
- { .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
+ { .compatible = "cisco,spi-petra", .data = &spidev_of_check },
+ { .compatible = "dh,dhcom-board", .data = &spidev_of_check },
{ .compatible = "lineartechnology,ltc2488", .data = &spidev_of_check },
- { .compatible = "semtech,sx1301", .data = &spidev_of_check },
{ .compatible = "lwn,bk4", .data = &spidev_of_check },
- { .compatible = "dh,dhcom-board", .data = &spidev_of_check },
{ .compatible = "menlo,m53cpld", .data = &spidev_of_check },
- { .compatible = "cisco,spi-petra", .data = &spidev_of_check },
{ .compatible = "micron,spi-authenta", .data = &spidev_of_check },
+ { .compatible = "rohm,dh2228fv", .data = &spidev_of_check },
+ { .compatible = "semtech,sx1301", .data = &spidev_of_check },
+ { .compatible = "silabs,em3581", .data = &spidev_of_check },
+ { .compatible = "silabs,si3210", .data = &spidev_of_check },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index 55381592bb5a..73551531ed43 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -366,7 +366,7 @@ static void spmi_drv_shutdown(struct device *dev)
sdrv->shutdown(to_spmi_device(dev));
}
-static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int spmi_drv_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
int ret;
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 8a93c83cb6f8..ab080cf26c9f 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -339,9 +339,9 @@ static int ssb_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int ssb_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ssb_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
+ const struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
if (!dev)
return -ENODEV;
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index b4e19174bef2..f9765841c4aa 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -2587,10 +2587,15 @@ static int nbu2ss_ep_queue(struct usb_ep *_ep,
req->unaligned = false;
if (req->unaligned) {
- if (!ep->virt_buf)
+ if (!ep->virt_buf) {
ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
&ep->phys_buf,
GFP_ATOMIC | GFP_DMA);
+ if (!ep->virt_buf) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -ENOMEM;
+ }
+ }
if (ep->epnum > 0) {
if (ep->direct == USB_DIR_IN)
memcpy(ep->virt_buf, req->req.buf,
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
index 0ad8aeabccbf..72ace74ea605 100644
--- a/drivers/staging/greybus/audio_codec.c
+++ b/drivers/staging/greybus/audio_codec.c
@@ -1075,11 +1075,6 @@ static int gbaudio_codec_probe(struct platform_device *pdev)
gbaudio_dai, ARRAY_SIZE(gbaudio_dai));
}
-static int gbaudio_codec_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static const struct of_device_id greybus_asoc_machine_of_match[] = {
{ .compatible = "toshiba,apb-dummy-codec", },
{},
@@ -1094,7 +1089,6 @@ static struct platform_driver gbaudio_codec_driver = {
.of_match_table = greybus_asoc_machine_of_match,
},
.probe = gbaudio_codec_probe,
- .remove = gbaudio_codec_remove,
};
module_platform_driver(gbaudio_codec_driver);
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
index 5a5c17a4519b..6a7d8cf2a1eb 100644
--- a/drivers/staging/greybus/gbphy.c
+++ b/drivers/staging/greybus/gbphy.c
@@ -71,14 +71,14 @@ static const struct device_type greybus_gbphy_dev_type = {
.pm = &gb_gbphy_pm_ops,
};
-static int gbphy_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int gbphy_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
- struct greybus_descriptor_cport *cport_desc = gbphy_dev->cport_desc;
- struct gb_bundle *bundle = gbphy_dev->bundle;
- struct gb_interface *intf = bundle->intf;
- struct gb_module *module = intf->module;
- struct gb_host_device *hd = intf->hd;
+ const struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
+ const struct greybus_descriptor_cport *cport_desc = gbphy_dev->cport_desc;
+ const struct gb_bundle *bundle = gbphy_dev->bundle;
+ const struct gb_interface *intf = bundle->intf;
+ const struct gb_module *module = intf->module;
+ const struct gb_host_device *hd = intf->hd;
if (add_uevent_var(env, "BUS=%u", hd->bus_id))
return -ENOMEM;
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
index 8a7cf1d0e968..d729b922a750 100644
--- a/drivers/staging/greybus/gpio.c
+++ b/drivers/staging/greybus/gpio.c
@@ -43,7 +43,11 @@ struct gb_gpio_controller {
};
#define gpio_chip_to_gb_gpio_controller(chip) \
container_of(chip, struct gb_gpio_controller, chip)
-#define irq_data_to_gpio_chip(d) (d->domain->host_data)
+
+static struct gpio_chip *irq_data_to_gpio_chip(struct irq_data *d)
+{
+ return d->domain->host_data;
+}
static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
{
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
index adb91286803a..15335c38cb26 100644
--- a/drivers/staging/greybus/hid.c
+++ b/drivers/staging/greybus/hid.c
@@ -381,7 +381,7 @@ static int gb_hid_power(struct hid_device *hid, int lvl)
}
/* HID structure to pass callbacks */
-static struct hid_ll_driver gb_hid_ll_driver = {
+static const struct hid_ll_driver gb_hid_ll_driver = {
.parse = gb_hid_parse,
.start = gb_hid_start,
.stop = gb_hid_stop,
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index 90ff07f2cbf7..20a34599859f 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -701,7 +701,7 @@ static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return -ENOIOCTLCMD;
}
-static void gb_tty_dtr_rts(struct tty_port *port, int on)
+static void gb_tty_dtr_rts(struct tty_port *port, bool active)
{
struct gb_tty *gb_tty;
u8 newctrl;
@@ -709,7 +709,7 @@ static void gb_tty_dtr_rts(struct tty_port *port, int on)
gb_tty = container_of(port, struct gb_tty, port);
newctrl = gb_tty->ctrlout;
- if (on)
+ if (active)
newctrl |= (GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
else
newctrl &= ~(GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
diff --git a/drivers/staging/greybus/usb.c b/drivers/staging/greybus/usb.c
index 8e9d9d59a357..b7badf87a3f0 100644
--- a/drivers/staging/greybus/usb.c
+++ b/drivers/staging/greybus/usb.c
@@ -27,7 +27,7 @@ struct gb_usb_hub_control_request {
};
struct gb_usb_hub_control_response {
- u8 buf[0];
+ DECLARE_FLEX_ARRAY(u8, buf);
};
struct gb_usb_device {
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 044c807ca022..e03c87f0bfe7 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -382,8 +382,7 @@ static int ks_wlan_get_nick(struct net_device *dev,
return -EPERM;
/* for SLEEP MODE */
- strncpy(extra, priv->nick, 16);
- extra[16] = '\0';
+ strscpy(extra, priv->nick, 17);
dwrq->data.length = strlen(extra) + 1;
return 0;
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index b79f93684c4f..617012e09a37 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -58,7 +58,6 @@ source "drivers/staging/media/deprecated/meye/Kconfig"
source "drivers/staging/media/deprecated/saa7146/Kconfig"
source "drivers/staging/media/deprecated/stkwebcam/Kconfig"
source "drivers/staging/media/deprecated/tm6000/Kconfig"
-source "drivers/staging/media/deprecated/vpfe_capture/Kconfig"
source "drivers/staging/media/deprecated/zr364xx/Kconfig"
endif
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 54bbdd4b0d08..1e14edc2d44c 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -15,5 +15,4 @@ obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/
obj-$(CONFIG_VIDEO_TM6000) += deprecated/tm6000/
obj-$(CONFIG_VIDEO_VIU) += deprecated/fsl-viu/
obj-$(CONFIG_USB_ZR364XX) += deprecated/zr364xx/
-obj-y += deprecated/vpfe_capture/
obj-y += deprecated/saa7146/
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
index 5e53eed8ae95..095cd0ba8c21 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
@@ -1072,7 +1072,7 @@ int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
vma->vm_private_data = bo;
vma->vm_ops = &hmm_bo_vm_ops;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
/*
* call hmm_bo_vm_open explicitly.
diff --git a/drivers/staging/media/deprecated/meye/meye.c b/drivers/staging/media/deprecated/meye/meye.c
index 5d87efd9b95c..746c6ea1c0a7 100644
--- a/drivers/staging/media/deprecated/meye/meye.c
+++ b/drivers/staging/media/deprecated/meye/meye.c
@@ -1476,8 +1476,8 @@ static int meye_mmap(struct file *file, struct vm_area_struct *vma)
}
vma->vm_ops = &meye_vm_ops;
- vma->vm_flags &= ~VM_IO; /* not I/O memory */
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ /* not I/O memory */
+ vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
vma->vm_private_data = (void *) (offset / gbufsize);
meye_vm_open(vma);
diff --git a/drivers/staging/media/deprecated/stkwebcam/stk-webcam.c b/drivers/staging/media/deprecated/stkwebcam/stk-webcam.c
index 787edb3d47c2..a1b7ad350a90 100644
--- a/drivers/staging/media/deprecated/stkwebcam/stk-webcam.c
+++ b/drivers/staging/media/deprecated/stkwebcam/stk-webcam.c
@@ -779,7 +779,7 @@ static int v4l_stk_mmap(struct file *fp, struct vm_area_struct *vma)
ret = remap_vmalloc_range(vma, sbuf->buffer, 0);
if (ret)
return ret;
- vma->vm_flags |= VM_DONTEXPAND;
+ vm_flags_set(vma, VM_DONTEXPAND);
vma->vm_private_data = sbuf;
vma->vm_ops = &stk_v4l_vm_ops;
sbuf->v4lbuf.flags |= V4L2_BUF_FLAG_MAPPED;
diff --git a/drivers/staging/media/deprecated/vpfe_capture/Kconfig b/drivers/staging/media/deprecated/vpfe_capture/Kconfig
deleted file mode 100644
index 10250e7e566b..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/Kconfig
+++ /dev/null
@@ -1,58 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config VIDEO_DM6446_CCDC
- tristate "TI DM6446 CCDC video capture driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV
- depends on ARCH_DAVINCI || COMPILE_TEST
- depends on I2C
- select VIDEOBUF_DMA_CONTIG
- help
- Enables DaVinci CCD hw module. DaVinci CCDC hw interfaces
- with decoder modules such as TVP5146 over BT656 or
- sensor module such as MT9T001 over a raw interface. This
- module configures the interface and CCDC/ISIF to do
- video frame capture from slave decoders.
-
- This driver is deprecated and is scheduled for removal by
- the beginning of 2023. See the TODO file for more information.
-
- To compile this driver as a module, choose M here. There will
- be two modules called vpfe_capture.ko and dm644x_ccdc.ko
-
-config VIDEO_DM355_CCDC
- tristate "TI DM355 CCDC video capture driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV
- depends on ARCH_DAVINCI || COMPILE_TEST
- depends on I2C
- select VIDEOBUF_DMA_CONTIG
- help
- Enables DM355 CCD hw module. DM355 CCDC hw interfaces
- with decoder modules such as TVP5146 over BT656 or
- sensor module such as MT9T001 over a raw interface. This
- module configures the interface and CCDC/ISIF to do
- video frame capture from a slave decoders
-
- This driver is deprecated and is scheduled for removal by
- the beginning of 2023. See the TODO file for more information.
-
- To compile this driver as a module, choose M here. There will
- be two modules called vpfe_capture.ko and dm355_ccdc.ko
-
-config VIDEO_DM365_ISIF
- tristate "TI DM365 ISIF video capture driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV
- depends on ARCH_DAVINCI || COMPILE_TEST
- depends on I2C
- select VIDEOBUF_DMA_CONTIG
- help
- Enables ISIF hw module. This is the hardware module for
- configuring ISIF in VPFE to capture Raw Bayer RGB data from
- a image sensor or YUV data from a YUV source.
-
- This driver is deprecated and is scheduled for removal by
- the beginning of 2023. See the TODO file for more information.
-
- To compile this driver as a module, choose M here. There will
- be two modules called vpfe_capture.ko and isif.ko
diff --git a/drivers/staging/media/deprecated/vpfe_capture/Makefile b/drivers/staging/media/deprecated/vpfe_capture/Makefile
deleted file mode 100644
index 609e8dc09ce7..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_DM6446_CCDC) += vpfe_capture.o dm644x_ccdc.o
-obj-$(CONFIG_VIDEO_DM355_CCDC) += vpfe_capture.o dm355_ccdc.o
-obj-$(CONFIG_VIDEO_DM365_ISIF) += vpfe_capture.o isif.o
diff --git a/drivers/staging/media/deprecated/vpfe_capture/TODO b/drivers/staging/media/deprecated/vpfe_capture/TODO
deleted file mode 100644
index ce654d7337af..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/TODO
+++ /dev/null
@@ -1,7 +0,0 @@
-These are one of the few drivers still not using the vb2
-framework, so these drivers are now deprecated with the intent of
-removing them altogether by the beginning of 2023.
-
-In order to keep these drivers they have to be converted to vb2.
-If someone is interested in doing this work, then contact the
-linux-media mailinglist (https://linuxtv.org/lists.php).
diff --git a/drivers/staging/media/deprecated/vpfe_capture/ccdc_hw_device.h b/drivers/staging/media/deprecated/vpfe_capture/ccdc_hw_device.h
deleted file mode 100644
index a545052a95a9..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/ccdc_hw_device.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- * ccdc device API
- */
-#ifndef _CCDC_HW_DEVICE_H
-#define _CCDC_HW_DEVICE_H
-
-#ifdef __KERNEL__
-#include <linux/videodev2.h>
-#include <linux/device.h>
-#include <media/davinci/vpfe_types.h>
-#include <media/davinci/ccdc_types.h>
-
-/*
- * ccdc hw operations
- */
-struct ccdc_hw_ops {
- /* Pointer to initialize function to initialize ccdc device */
- int (*open) (struct device *dev);
- /* Pointer to deinitialize function */
- int (*close) (struct device *dev);
- /* set ccdc base address */
- void (*set_ccdc_base)(void *base, int size);
- /* Pointer to function to enable or disable ccdc */
- void (*enable) (int en);
- /* reset sbl. only for 6446 */
- void (*reset) (void);
- /* enable output to sdram */
- void (*enable_out_to_sdram) (int en);
- /* Pointer to function to set hw parameters */
- int (*set_hw_if_params) (struct vpfe_hw_if_param *param);
- /* get interface parameters */
- int (*get_hw_if_params) (struct vpfe_hw_if_param *param);
- /* Pointer to function to configure ccdc */
- int (*configure) (void);
-
- /* Pointer to function to set buffer type */
- int (*set_buftype) (enum ccdc_buftype buf_type);
- /* Pointer to function to get buffer type */
- enum ccdc_buftype (*get_buftype) (void);
- /* Pointer to function to set frame format */
- int (*set_frame_format) (enum ccdc_frmfmt frm_fmt);
- /* Pointer to function to get frame format */
- enum ccdc_frmfmt (*get_frame_format) (void);
- /* enumerate hw pix formats */
- int (*enum_pix)(u32 *hw_pix, int i);
- /* Pointer to function to set buffer type */
- u32 (*get_pixel_format) (void);
- /* Pointer to function to get pixel format. */
- int (*set_pixel_format) (u32 pixfmt);
- /* Pointer to function to set image window */
- int (*set_image_window) (struct v4l2_rect *win);
- /* Pointer to function to set image window */
- void (*get_image_window) (struct v4l2_rect *win);
- /* Pointer to function to get line length */
- unsigned int (*get_line_length) (void);
-
- /* Pointer to function to set frame buffer address */
- void (*setfbaddr) (unsigned long addr);
- /* Pointer to function to get field id */
- int (*getfid) (void);
-};
-
-struct ccdc_hw_device {
- /* ccdc device name */
- char name[32];
- /* module owner */
- struct module *owner;
- /* hw ops */
- struct ccdc_hw_ops hw_ops;
-};
-
-/* Used by CCDC module to register & unregister with vpfe capture driver */
-int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev);
-void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev);
-
-#endif
-#endif
diff --git a/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.c b/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.c
deleted file mode 100644
index da8db53e9498..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.c
+++ /dev/null
@@ -1,934 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2005-2009 Texas Instruments Inc
- *
- * CCDC hardware module for DM355
- * ------------------------------
- *
- * This module is for configuring DM355 CCD controller of VPFE to capture
- * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
- * such as Defect Pixel Correction, Color Space Conversion etc to
- * pre-process the Bayer RGB data, before writing it to SDRAM.
- *
- * TODO: 1) Raw bayer parameter settings and bayer capture
- * 2) Split module parameter structure to module specific ioctl structs
- * 3) add support for lense shading correction
- * 4) investigate if enum used for user space type definition
- * to be replaced by #defines or integer
- */
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/videodev2.h>
-#include <linux/err.h>
-#include <linux/module.h>
-
-#include "dm355_ccdc.h"
-#include <media/davinci/vpss.h>
-
-#include "dm355_ccdc_regs.h"
-#include "ccdc_hw_device.h"
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CCDC Driver for DM355");
-MODULE_AUTHOR("Texas Instruments");
-
-static struct ccdc_oper_config {
- struct device *dev;
- /* CCDC interface type */
- enum vpfe_hw_if_type if_type;
- /* Raw Bayer configuration */
- struct ccdc_params_raw bayer;
- /* YCbCr configuration */
- struct ccdc_params_ycbcr ycbcr;
- /* ccdc base address */
- void __iomem *base_addr;
-} ccdc_cfg = {
- /* Raw configurations */
- .bayer = {
- .pix_fmt = CCDC_PIXFMT_RAW,
- .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
- .win = CCDC_WIN_VGA,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .gain = {
- .r_ye = 256,
- .gb_g = 256,
- .gr_cy = 256,
- .b_mg = 256
- },
- .config_params = {
- .datasft = 2,
- .mfilt1 = CCDC_NO_MEDIAN_FILTER1,
- .mfilt2 = CCDC_NO_MEDIAN_FILTER2,
- .alaw = {
- .gamma_wd = 2,
- },
- .blk_clamp = {
- .sample_pixel = 1,
- .dc_sub = 25
- },
- .col_pat_field0 = {
- .olop = CCDC_GREEN_BLUE,
- .olep = CCDC_BLUE,
- .elop = CCDC_RED,
- .elep = CCDC_GREEN_RED
- },
- .col_pat_field1 = {
- .olop = CCDC_GREEN_BLUE,
- .olep = CCDC_BLUE,
- .elop = CCDC_RED,
- .elep = CCDC_GREEN_RED
- },
- },
- },
- /* YCbCr configuration */
- .ycbcr = {
- .win = CCDC_WIN_PAL,
- .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
- .frm_fmt = CCDC_FRMFMT_INTERLACED,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .bt656_enable = 1,
- .pix_order = CCDC_PIXORDER_CBYCRY,
- .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED
- },
-};
-
-
-/* Raw Bayer formats */
-static u32 ccdc_raw_bayer_pix_formats[] =
- {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
-
-/* Raw YUV formats */
-static u32 ccdc_raw_yuv_pix_formats[] =
- {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
-
-/* register access routines */
-static inline u32 regr(u32 offset)
-{
- return __raw_readl(ccdc_cfg.base_addr + offset);
-}
-
-static inline void regw(u32 val, u32 offset)
-{
- __raw_writel(val, ccdc_cfg.base_addr + offset);
-}
-
-static void ccdc_enable(int en)
-{
- unsigned int temp;
- temp = regr(SYNCEN);
- temp &= (~CCDC_SYNCEN_VDHDEN_MASK);
- temp |= (en & CCDC_SYNCEN_VDHDEN_MASK);
- regw(temp, SYNCEN);
-}
-
-static void ccdc_enable_output_to_sdram(int en)
-{
- unsigned int temp;
- temp = regr(SYNCEN);
- temp &= (~(CCDC_SYNCEN_WEN_MASK));
- temp |= ((en << CCDC_SYNCEN_WEN_SHIFT) & CCDC_SYNCEN_WEN_MASK);
- regw(temp, SYNCEN);
-}
-
-static void ccdc_config_gain_offset(void)
-{
- /* configure gain */
- regw(ccdc_cfg.bayer.gain.r_ye, RYEGAIN);
- regw(ccdc_cfg.bayer.gain.gr_cy, GRCYGAIN);
- regw(ccdc_cfg.bayer.gain.gb_g, GBGGAIN);
- regw(ccdc_cfg.bayer.gain.b_mg, BMGGAIN);
- /* configure offset */
- regw(ccdc_cfg.bayer.ccdc_offset, OFFSET);
-}
-
-/*
- * ccdc_restore_defaults()
- * This function restore power on defaults in the ccdc registers
- */
-static int ccdc_restore_defaults(void)
-{
- int i;
-
- dev_dbg(ccdc_cfg.dev, "\nstarting ccdc_restore_defaults...");
- /* set all registers to zero */
- for (i = 0; i <= CCDC_REG_LAST; i += 4)
- regw(0, i);
-
- /* now override the values with power on defaults in registers */
- regw(MODESET_DEFAULT, MODESET);
- /* no culling support */
- regw(CULH_DEFAULT, CULH);
- regw(CULV_DEFAULT, CULV);
- /* Set default Gain and Offset */
- ccdc_cfg.bayer.gain.r_ye = GAIN_DEFAULT;
- ccdc_cfg.bayer.gain.gb_g = GAIN_DEFAULT;
- ccdc_cfg.bayer.gain.gr_cy = GAIN_DEFAULT;
- ccdc_cfg.bayer.gain.b_mg = GAIN_DEFAULT;
- ccdc_config_gain_offset();
- regw(OUTCLIP_DEFAULT, OUTCLIP);
- regw(LSCCFG2_DEFAULT, LSCCFG2);
- /* select ccdc input */
- if (vpss_select_ccdc_source(VPSS_CCDCIN)) {
- dev_dbg(ccdc_cfg.dev, "\ncouldn't select ccdc input source");
- return -EFAULT;
- }
- /* select ccdc clock */
- if (vpss_enable_clock(VPSS_CCDC_CLOCK, 1) < 0) {
- dev_dbg(ccdc_cfg.dev, "\ncouldn't enable ccdc clock");
- return -EFAULT;
- }
- dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_restore_defaults...");
- return 0;
-}
-
-static int ccdc_open(struct device *device)
-{
- return ccdc_restore_defaults();
-}
-
-static int ccdc_close(struct device *device)
-{
- /* disable clock */
- vpss_enable_clock(VPSS_CCDC_CLOCK, 0);
- /* do nothing for now */
- return 0;
-}
-/*
- * ccdc_setwin()
- * This function will configure the window size to
- * be capture in CCDC reg.
- */
-static void ccdc_setwin(struct v4l2_rect *image_win,
- enum ccdc_frmfmt frm_fmt, int ppc)
-{
- int horz_start, horz_nr_pixels;
- int vert_start, vert_nr_lines;
- int mid_img = 0;
-
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin...");
-
- /*
- * ppc - per pixel count. indicates how many pixels per cell
- * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
- * raw capture this is 1
- */
- horz_start = image_win->left << (ppc - 1);
- horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1;
-
- /* Writing the horizontal info into the registers */
- regw(horz_start, SPH);
- regw(horz_nr_pixels, NPH);
- vert_start = image_win->top;
-
- if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
- vert_nr_lines = (image_win->height >> 1) - 1;
- vert_start >>= 1;
- /* Since first line doesn't have any data */
- vert_start += 1;
- /* configure VDINT0 and VDINT1 */
- regw(vert_start, VDINT0);
- } else {
- /* Since first line doesn't have any data */
- vert_start += 1;
- vert_nr_lines = image_win->height - 1;
- /* configure VDINT0 and VDINT1 */
- mid_img = vert_start + (image_win->height / 2);
- regw(vert_start, VDINT0);
- regw(mid_img, VDINT1);
- }
- regw(vert_start & CCDC_START_VER_ONE_MASK, SLV0);
- regw(vert_start & CCDC_START_VER_TWO_MASK, SLV1);
- regw(vert_nr_lines & CCDC_NUM_LINES_VER, NLV);
- dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
-}
-
-/* This function will configure CCDC for YCbCr video capture */
-static void ccdc_config_ycbcr(void)
-{
- struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr;
- u32 temp;
-
- /* first set the CCDC power on defaults values in all registers */
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr...");
- ccdc_restore_defaults();
-
- /* configure pixel format & video frame format */
- temp = (((params->pix_fmt & CCDC_INPUT_MODE_MASK) <<
- CCDC_INPUT_MODE_SHIFT) |
- ((params->frm_fmt & CCDC_FRM_FMT_MASK) <<
- CCDC_FRM_FMT_SHIFT));
-
- /* setup BT.656 sync mode */
- if (params->bt656_enable) {
- regw(CCDC_REC656IF_BT656_EN, REC656IF);
- /*
- * configure the FID, VD, HD pin polarity fld,hd pol positive,
- * vd negative, 8-bit pack mode
- */
- temp |= CCDC_VD_POL_NEGATIVE;
- } else { /* y/c external sync mode */
- temp |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
- CCDC_FID_POL_SHIFT) |
- ((params->hd_pol & CCDC_HD_POL_MASK) <<
- CCDC_HD_POL_SHIFT) |
- ((params->vd_pol & CCDC_VD_POL_MASK) <<
- CCDC_VD_POL_SHIFT));
- }
-
- /* pack the data to 8-bit */
- temp |= CCDC_DATA_PACK_ENABLE;
-
- regw(temp, MODESET);
-
- /* configure video window */
- ccdc_setwin(&params->win, params->frm_fmt, 2);
-
- /* configure the order of y cb cr in SD-RAM */
- temp = (params->pix_order << CCDC_Y8POS_SHIFT);
- temp |= CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC;
- regw(temp, CCDCFG);
-
- /*
- * configure the horizontal line offset. This is done by rounding up
- * width to a multiple of 16 pixels and multiply by two to account for
- * y:cb:cr 4:2:2 data
- */
- regw(((params->win.width * 2 + 31) >> 5), HSIZE);
-
- /* configure the memory line offset */
- if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
- /* two fields are interleaved in memory */
- regw(CCDC_SDOFST_FIELD_INTERLEAVED, SDOFST);
- }
-
- dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
-}
-
-/*
- * ccdc_config_black_clamp()
- * configure parameters for Optical Black Clamp
- */
-static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
-{
- u32 val;
-
- if (!bclamp->b_clamp_enable) {
- /* configure DCSub */
- regw(bclamp->dc_sub & CCDC_BLK_DC_SUB_MASK, DCSUB);
- regw(0x0000, CLAMP);
- return;
- }
- /* Enable the Black clamping, set sample lines and pixels */
- val = (bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) |
- ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) <<
- CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE;
- regw(val, CLAMP);
-
- /* If Black clamping is enable then make dcsub 0 */
- val = (bclamp->sample_ln & CCDC_NUM_LINE_CALC_MASK)
- << CCDC_NUM_LINE_CALC_SHIFT;
- regw(val, DCSUB);
-}
-
-/*
- * ccdc_config_black_compense()
- * configure parameters for Black Compensation
- */
-static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
-{
- u32 val;
-
- val = (bcomp->b & CCDC_BLK_COMP_MASK) |
- ((bcomp->gb & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_GB_COMP_SHIFT);
- regw(val, BLKCMP1);
-
- val = ((bcomp->gr & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_GR_COMP_SHIFT) |
- ((bcomp->r & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_R_COMP_SHIFT);
- regw(val, BLKCMP0);
-}
-
-/*
- * ccdc_write_dfc_entry()
- * write an entry in the dfc table.
- */
-static int ccdc_write_dfc_entry(int index, struct ccdc_vertical_dft *dfc)
-{
-/* TODO This is to be re-visited and adjusted */
-#define DFC_WRITE_WAIT_COUNT 1000
- u32 val, count = DFC_WRITE_WAIT_COUNT;
-
- regw(dfc->dft_corr_vert[index], DFCMEM0);
- regw(dfc->dft_corr_horz[index], DFCMEM1);
- regw(dfc->dft_corr_sub1[index], DFCMEM2);
- regw(dfc->dft_corr_sub2[index], DFCMEM3);
- regw(dfc->dft_corr_sub3[index], DFCMEM4);
- /* set WR bit to write */
- val = regr(DFCMEMCTL) | CCDC_DFCMEMCTL_DFCMWR_MASK;
- regw(val, DFCMEMCTL);
-
- /*
- * Assume, it is very short. If we get an error, we need to
- * adjust this value
- */
- while (regr(DFCMEMCTL) & CCDC_DFCMEMCTL_DFCMWR_MASK)
- count--;
- /*
- * TODO We expect the count to be non-zero to be successful. Adjust
- * the count if write requires more time
- */
-
- if (count) {
- dev_err(ccdc_cfg.dev, "defect table write timeout !!!\n");
- return -1;
- }
- return 0;
-}
-
-/*
- * ccdc_config_vdfc()
- * configure parameters for Vertical Defect Correction
- */
-static int ccdc_config_vdfc(struct ccdc_vertical_dft *dfc)
-{
- u32 val;
- int i;
-
- /* Configure General Defect Correction. The table used is from IPIPE */
- val = dfc->gen_dft_en & CCDC_DFCCTL_GDFCEN_MASK;
-
- /* Configure Vertical Defect Correction if needed */
- if (!dfc->ver_dft_en) {
- /* Enable only General Defect Correction */
- regw(val, DFCCTL);
- return 0;
- }
-
- if (dfc->table_size > CCDC_DFT_TABLE_SIZE)
- return -EINVAL;
-
- val |= CCDC_DFCCTL_VDFC_DISABLE;
- val |= (dfc->dft_corr_ctl.vdfcsl & CCDC_DFCCTL_VDFCSL_MASK) <<
- CCDC_DFCCTL_VDFCSL_SHIFT;
- val |= (dfc->dft_corr_ctl.vdfcuda & CCDC_DFCCTL_VDFCUDA_MASK) <<
- CCDC_DFCCTL_VDFCUDA_SHIFT;
- val |= (dfc->dft_corr_ctl.vdflsft & CCDC_DFCCTL_VDFLSFT_MASK) <<
- CCDC_DFCCTL_VDFLSFT_SHIFT;
- regw(val , DFCCTL);
-
- /* clear address ptr to offset 0 */
- val = CCDC_DFCMEMCTL_DFCMARST_MASK << CCDC_DFCMEMCTL_DFCMARST_SHIFT;
-
- /* write defect table entries */
- for (i = 0; i < dfc->table_size; i++) {
- /* increment address for non zero index */
- if (i != 0)
- val = CCDC_DFCMEMCTL_INC_ADDR;
- regw(val, DFCMEMCTL);
- if (ccdc_write_dfc_entry(i, dfc) < 0)
- return -EFAULT;
- }
-
- /* update saturation level and enable dfc */
- regw(dfc->saturation_ctl & CCDC_VDC_DFCVSAT_MASK, DFCVSAT);
- val = regr(DFCCTL) | (CCDC_DFCCTL_VDFCEN_MASK <<
- CCDC_DFCCTL_VDFCEN_SHIFT);
- regw(val, DFCCTL);
- return 0;
-}
-
-/*
- * ccdc_config_csc()
- * configure parameters for color space conversion
- * Each register CSCM0-7 has two values in S8Q5 format.
- */
-static void ccdc_config_csc(struct ccdc_csc *csc)
-{
- u32 val1 = 0, val2;
- int i;
-
- if (!csc->enable)
- return;
-
- /* Enable the CSC sub-module */
- regw(CCDC_CSC_ENABLE, CSCCTL);
-
- /* Converting the co-eff as per the format of the register */
- for (i = 0; i < CCDC_CSC_COEFF_TABLE_SIZE; i++) {
- if ((i % 2) == 0) {
- /* CSCM - LSB */
- val1 = (csc->coeff[i].integer &
- CCDC_CSC_COEF_INTEG_MASK)
- << CCDC_CSC_COEF_INTEG_SHIFT;
- /*
- * convert decimal part to binary. Use 2 decimal
- * precision, user values range from .00 - 0.99
- */
- val1 |= (((csc->coeff[i].decimal &
- CCDC_CSC_COEF_DECIMAL_MASK) *
- CCDC_CSC_DEC_MAX) / 100);
- } else {
-
- /* CSCM - MSB */
- val2 = (csc->coeff[i].integer &
- CCDC_CSC_COEF_INTEG_MASK)
- << CCDC_CSC_COEF_INTEG_SHIFT;
- val2 |= (((csc->coeff[i].decimal &
- CCDC_CSC_COEF_DECIMAL_MASK) *
- CCDC_CSC_DEC_MAX) / 100);
- val2 <<= CCDC_CSCM_MSB_SHIFT;
- val2 |= val1;
- regw(val2, (CSCM0 + ((i - 1) << 1)));
- }
- }
-}
-
-/*
- * ccdc_config_color_patterns()
- * configure parameters for color patterns
- */
-static void ccdc_config_color_patterns(struct ccdc_col_pat *pat0,
- struct ccdc_col_pat *pat1)
-{
- u32 val;
-
- val = (pat0->olop | (pat0->olep << 2) | (pat0->elop << 4) |
- (pat0->elep << 6) | (pat1->olop << 8) | (pat1->olep << 10) |
- (pat1->elop << 12) | (pat1->elep << 14));
- regw(val, COLPTN);
-}
-
-/* This function will configure CCDC for Raw mode image capture */
-static int ccdc_config_raw(void)
-{
- struct ccdc_params_raw *params = &ccdc_cfg.bayer;
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int val;
-
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw...");
-
- /* restore power on defaults to register */
- ccdc_restore_defaults();
-
- /* CCDCFG register:
- * set CCD Not to swap input since input is RAW data
- * set FID detection function to Latch at V-Sync
- * set WENLOG - ccdc valid area to AND
- * set TRGSEL to WENBIT
- * set EXTRG to DISABLE
- * disable latching function on VSYNC - shadowed registers
- */
- regw(CCDC_YCINSWP_RAW | CCDC_CCDCFG_FIDMD_LATCH_VSYNC |
- CCDC_CCDCFG_WENLOG_AND | CCDC_CCDCFG_TRGSEL_WEN |
- CCDC_CCDCFG_EXTRG_DISABLE | CCDC_LATCH_ON_VSYNC_DISABLE, CCDCFG);
-
- /*
- * Set VDHD direction to input, input type to raw input
- * normal data polarity, do not use external WEN
- */
- val = (CCDC_VDHDOUT_INPUT | CCDC_RAW_IP_MODE | CCDC_DATAPOL_NORMAL |
- CCDC_EXWEN_DISABLE);
-
- /*
- * Configure the vertical sync polarity (MODESET.VDPOL), horizontal
- * sync polarity (MODESET.HDPOL), field id polarity (MODESET.FLDPOL),
- * frame format(progressive or interlace), & pixel format (Input mode)
- */
- val |= (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) |
- ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) |
- ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) |
- ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) |
- ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT));
-
- /* set pack for alaw compression */
- if ((config_params->data_sz == CCDC_DATA_8BITS) ||
- config_params->alaw.enable)
- val |= CCDC_DATA_PACK_ENABLE;
-
- /* Configure for LPF */
- if (config_params->lpf_enable)
- val |= (config_params->lpf_enable & CCDC_LPF_MASK) <<
- CCDC_LPF_SHIFT;
-
- /* Configure the data shift */
- val |= (config_params->datasft & CCDC_DATASFT_MASK) <<
- CCDC_DATASFT_SHIFT;
- regw(val , MODESET);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to MODESET...\n", val);
-
- /* Configure the Median Filter threshold */
- regw((config_params->med_filt_thres) & CCDC_MED_FILT_THRESH, MEDFILT);
-
- /* Configure GAMMAWD register. defaur 11-2, and Mosaic cfa pattern */
- val = CCDC_GAMMA_BITS_11_2 << CCDC_GAMMAWD_INPUT_SHIFT |
- CCDC_CFA_MOSAIC;
-
- /* Enable and configure aLaw register if needed */
- if (config_params->alaw.enable) {
- val |= (CCDC_ALAW_ENABLE |
- ((config_params->alaw.gamma_wd &
- CCDC_ALAW_GAMMA_WD_MASK) <<
- CCDC_GAMMAWD_INPUT_SHIFT));
- }
-
- /* Configure Median filter1 & filter2 */
- val |= ((config_params->mfilt1 << CCDC_MFILT1_SHIFT) |
- (config_params->mfilt2 << CCDC_MFILT2_SHIFT));
-
- regw(val, GAMMAWD);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to GAMMAWD...\n", val);
-
- /* configure video window */
- ccdc_setwin(&params->win, params->frm_fmt, 1);
-
- /* Optical Clamp Averaging */
- ccdc_config_black_clamp(&config_params->blk_clamp);
-
- /* Black level compensation */
- ccdc_config_black_compense(&config_params->blk_comp);
-
- /* Vertical Defect Correction if needed */
- if (ccdc_config_vdfc(&config_params->vertical_dft) < 0)
- return -EFAULT;
-
- /* color space conversion */
- ccdc_config_csc(&config_params->csc);
-
- /* color pattern */
- ccdc_config_color_patterns(&config_params->col_pat_field0,
- &config_params->col_pat_field1);
-
- /* Configure the Gain & offset control */
- ccdc_config_gain_offset();
-
- dev_dbg(ccdc_cfg.dev, "\nWriting %x to COLPTN...\n", val);
-
- /* Configure DATAOFST register */
- val = (config_params->data_offset.horz_offset & CCDC_DATAOFST_MASK) <<
- CCDC_DATAOFST_H_SHIFT;
- val |= (config_params->data_offset.vert_offset & CCDC_DATAOFST_MASK) <<
- CCDC_DATAOFST_V_SHIFT;
- regw(val, DATAOFST);
-
- /* configuring HSIZE register */
- val = (params->horz_flip_enable & CCDC_HSIZE_FLIP_MASK) <<
- CCDC_HSIZE_FLIP_SHIFT;
-
- /* If pack 8 is enable then 1 pixel will take 1 byte */
- if ((config_params->data_sz == CCDC_DATA_8BITS) ||
- config_params->alaw.enable) {
- val |= (((params->win.width) + 31) >> 5) &
- CCDC_HSIZE_VAL_MASK;
-
- /* adjust to multiple of 32 */
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to HSIZE...\n",
- (((params->win.width) + 31) >> 5) &
- CCDC_HSIZE_VAL_MASK);
- } else {
- /* else one pixel will take 2 byte */
- val |= (((params->win.width * 2) + 31) >> 5) &
- CCDC_HSIZE_VAL_MASK;
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to HSIZE...\n",
- (((params->win.width * 2) + 31) >> 5) &
- CCDC_HSIZE_VAL_MASK);
- }
- regw(val, HSIZE);
-
- /* Configure SDOFST register */
- if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
- if (params->image_invert_enable) {
- /* For interlace inverse mode */
- regw(CCDC_SDOFST_INTERLACE_INVERSE, SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
- CCDC_SDOFST_INTERLACE_INVERSE);
- } else {
- /* For interlace non inverse mode */
- regw(CCDC_SDOFST_INTERLACE_NORMAL, SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
- CCDC_SDOFST_INTERLACE_NORMAL);
- }
- } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
- if (params->image_invert_enable) {
- /* For progessive inverse mode */
- regw(CCDC_SDOFST_PROGRESSIVE_INVERSE, SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
- CCDC_SDOFST_PROGRESSIVE_INVERSE);
- } else {
- /* For progessive non inverse mode */
- regw(CCDC_SDOFST_PROGRESSIVE_NORMAL, SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
- CCDC_SDOFST_PROGRESSIVE_NORMAL);
- }
- }
- dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw...");
- return 0;
-}
-
-static int ccdc_configure(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- return ccdc_config_raw();
- else
- ccdc_config_ycbcr();
- return 0;
-}
-
-static int ccdc_set_buftype(enum ccdc_buftype buf_type)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.buf_type = buf_type;
- else
- ccdc_cfg.ycbcr.buf_type = buf_type;
- return 0;
-}
-static enum ccdc_buftype ccdc_get_buftype(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- return ccdc_cfg.bayer.buf_type;
- return ccdc_cfg.ycbcr.buf_type;
-}
-
-static int ccdc_enum_pix(u32 *pix, int i)
-{
- int ret = -EINVAL;
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) {
- *pix = ccdc_raw_bayer_pix_formats[i];
- ret = 0;
- }
- } else {
- if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) {
- *pix = ccdc_raw_yuv_pix_formats[i];
- ret = 0;
- }
- }
- return ret;
-}
-
-static int ccdc_set_pixel_format(u32 pixfmt)
-{
- struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
-
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
- if (pixfmt == V4L2_PIX_FMT_SBGGR8)
- alaw->enable = 1;
- else if (pixfmt != V4L2_PIX_FMT_SBGGR16)
- return -EINVAL;
- } else {
- if (pixfmt == V4L2_PIX_FMT_YUYV)
- ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
- else if (pixfmt == V4L2_PIX_FMT_UYVY)
- ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
- else
- return -EINVAL;
- }
- return 0;
-}
-static u32 ccdc_get_pixel_format(void)
-{
- struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
- u32 pixfmt;
-
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- if (alaw->enable)
- pixfmt = V4L2_PIX_FMT_SBGGR8;
- else
- pixfmt = V4L2_PIX_FMT_SBGGR16;
- else {
- if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
- pixfmt = V4L2_PIX_FMT_YUYV;
- else
- pixfmt = V4L2_PIX_FMT_UYVY;
- }
- return pixfmt;
-}
-static int ccdc_set_image_window(struct v4l2_rect *win)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.win = *win;
- else
- ccdc_cfg.ycbcr.win = *win;
- return 0;
-}
-
-static void ccdc_get_image_window(struct v4l2_rect *win)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- *win = ccdc_cfg.bayer.win;
- else
- *win = ccdc_cfg.ycbcr.win;
-}
-
-static unsigned int ccdc_get_line_length(void)
-{
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int len;
-
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- if ((config_params->alaw.enable) ||
- (config_params->data_sz == CCDC_DATA_8BITS))
- len = ccdc_cfg.bayer.win.width;
- else
- len = ccdc_cfg.bayer.win.width * 2;
- } else
- len = ccdc_cfg.ycbcr.win.width * 2;
- return ALIGN(len, 32);
-}
-
-static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.frm_fmt = frm_fmt;
- else
- ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
- return 0;
-}
-
-static enum ccdc_frmfmt ccdc_get_frame_format(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- return ccdc_cfg.bayer.frm_fmt;
- else
- return ccdc_cfg.ycbcr.frm_fmt;
-}
-
-static int ccdc_getfid(void)
-{
- return (regr(MODESET) >> 15) & 1;
-}
-
-/* misc operations */
-static inline void ccdc_setfbaddr(unsigned long addr)
-{
- regw((addr >> 21) & 0x007f, STADRH);
- regw((addr >> 5) & 0x0ffff, STADRL);
-}
-
-static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
-{
- ccdc_cfg.if_type = params->if_type;
-
- switch (params->if_type) {
- case VPFE_BT656:
- case VPFE_YCBCR_SYNC_16:
- case VPFE_YCBCR_SYNC_8:
- ccdc_cfg.ycbcr.vd_pol = params->vdpol;
- ccdc_cfg.ycbcr.hd_pol = params->hdpol;
- break;
- default:
- /* TODO add support for raw bayer here */
- return -EINVAL;
- }
- return 0;
-}
-
-static const struct ccdc_hw_device ccdc_hw_dev = {
- .name = "DM355 CCDC",
- .owner = THIS_MODULE,
- .hw_ops = {
- .open = ccdc_open,
- .close = ccdc_close,
- .enable = ccdc_enable,
- .enable_out_to_sdram = ccdc_enable_output_to_sdram,
- .set_hw_if_params = ccdc_set_hw_if_params,
- .configure = ccdc_configure,
- .set_buftype = ccdc_set_buftype,
- .get_buftype = ccdc_get_buftype,
- .enum_pix = ccdc_enum_pix,
- .set_pixel_format = ccdc_set_pixel_format,
- .get_pixel_format = ccdc_get_pixel_format,
- .set_frame_format = ccdc_set_frame_format,
- .get_frame_format = ccdc_get_frame_format,
- .set_image_window = ccdc_set_image_window,
- .get_image_window = ccdc_get_image_window,
- .get_line_length = ccdc_get_line_length,
- .setfbaddr = ccdc_setfbaddr,
- .getfid = ccdc_getfid,
- },
-};
-
-static int dm355_ccdc_probe(struct platform_device *pdev)
-{
- void (*setup_pinmux)(void);
- struct resource *res;
- int status = 0;
-
- /*
- * first try to register with vpfe. If not correct platform, then we
- * don't have to iomap
- */
- status = vpfe_register_ccdc_device(&ccdc_hw_dev);
- if (status < 0)
- return status;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- status = -ENODEV;
- goto fail_nores;
- }
-
- res = request_mem_region(res->start, resource_size(res), res->name);
- if (!res) {
- status = -EBUSY;
- goto fail_nores;
- }
-
- ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
- if (!ccdc_cfg.base_addr) {
- status = -ENOMEM;
- goto fail_nomem;
- }
-
- /* Platform data holds setup_pinmux function ptr */
- if (NULL == pdev->dev.platform_data) {
- status = -ENODEV;
- goto fail_nomap;
- }
- setup_pinmux = pdev->dev.platform_data;
- /*
- * setup Mux configuration for ccdc which may be different for
- * different SoCs using this CCDC
- */
- setup_pinmux();
- ccdc_cfg.dev = &pdev->dev;
- printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
- return 0;
-fail_nomap:
- iounmap(ccdc_cfg.base_addr);
-fail_nomem:
- release_mem_region(res->start, resource_size(res));
-fail_nores:
- vpfe_unregister_ccdc_device(&ccdc_hw_dev);
- return status;
-}
-
-static int dm355_ccdc_remove(struct platform_device *pdev)
-{
- struct resource *res;
-
- iounmap(ccdc_cfg.base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
- vpfe_unregister_ccdc_device(&ccdc_hw_dev);
- return 0;
-}
-
-static struct platform_driver dm355_ccdc_driver = {
- .driver = {
- .name = "dm355_ccdc",
- },
- .remove = dm355_ccdc_remove,
- .probe = dm355_ccdc_probe,
-};
-
-module_platform_driver(dm355_ccdc_driver);
diff --git a/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h b/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h
deleted file mode 100644
index 1f3d00aa46d1..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2005-2009 Texas Instruments Inc
- */
-#ifndef _DM355_CCDC_H
-#define _DM355_CCDC_H
-#include <media/davinci/ccdc_types.h>
-#include <media/davinci/vpfe_types.h>
-
-/* enum for No of pixel per line to be avg. in Black Clamping */
-enum ccdc_sample_length {
- CCDC_SAMPLE_1PIXELS,
- CCDC_SAMPLE_2PIXELS,
- CCDC_SAMPLE_4PIXELS,
- CCDC_SAMPLE_8PIXELS,
- CCDC_SAMPLE_16PIXELS
-};
-
-/* enum for No of lines in Black Clamping */
-enum ccdc_sample_line {
- CCDC_SAMPLE_1LINES,
- CCDC_SAMPLE_2LINES,
- CCDC_SAMPLE_4LINES,
- CCDC_SAMPLE_8LINES,
- CCDC_SAMPLE_16LINES
-};
-
-/* enum for Alaw gamma width */
-enum ccdc_gamma_width {
- CCDC_GAMMA_BITS_13_4,
- CCDC_GAMMA_BITS_12_3,
- CCDC_GAMMA_BITS_11_2,
- CCDC_GAMMA_BITS_10_1,
- CCDC_GAMMA_BITS_09_0
-};
-
-enum ccdc_colpats {
- CCDC_RED,
- CCDC_GREEN_RED,
- CCDC_GREEN_BLUE,
- CCDC_BLUE
-};
-
-struct ccdc_col_pat {
- enum ccdc_colpats olop;
- enum ccdc_colpats olep;
- enum ccdc_colpats elop;
- enum ccdc_colpats elep;
-};
-
-enum ccdc_datasft {
- CCDC_DATA_NO_SHIFT,
- CCDC_DATA_SHIFT_1BIT,
- CCDC_DATA_SHIFT_2BIT,
- CCDC_DATA_SHIFT_3BIT,
- CCDC_DATA_SHIFT_4BIT,
- CCDC_DATA_SHIFT_5BIT,
- CCDC_DATA_SHIFT_6BIT
-};
-
-enum ccdc_data_size {
- CCDC_DATA_16BITS,
- CCDC_DATA_15BITS,
- CCDC_DATA_14BITS,
- CCDC_DATA_13BITS,
- CCDC_DATA_12BITS,
- CCDC_DATA_11BITS,
- CCDC_DATA_10BITS,
- CCDC_DATA_8BITS
-};
-enum ccdc_mfilt1 {
- CCDC_NO_MEDIAN_FILTER1,
- CCDC_AVERAGE_FILTER1,
- CCDC_MEDIAN_FILTER1
-};
-
-enum ccdc_mfilt2 {
- CCDC_NO_MEDIAN_FILTER2,
- CCDC_AVERAGE_FILTER2,
- CCDC_MEDIAN_FILTER2
-};
-
-/* structure for ALaw */
-struct ccdc_a_law {
- /* Enable/disable A-Law */
- unsigned char enable;
- /* Gamma Width Input */
- enum ccdc_gamma_width gamma_wd;
-};
-
-/* structure for Black Clamping */
-struct ccdc_black_clamp {
- /* only if bClampEnable is TRUE */
- unsigned char b_clamp_enable;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_length sample_pixel;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_line sample_ln;
- /* only if bClampEnable is TRUE */
- unsigned short start_pixel;
- /* only if bClampEnable is FALSE */
- unsigned short sgain;
- unsigned short dc_sub;
-};
-
-/* structure for Black Level Compensation */
-struct ccdc_black_compensation {
- /* Constant value to subtract from Red component */
- unsigned char r;
- /* Constant value to subtract from Gr component */
- unsigned char gr;
- /* Constant value to subtract from Blue component */
- unsigned char b;
- /* Constant value to subtract from Gb component */
- unsigned char gb;
-};
-
-struct ccdc_float {
- int integer;
- unsigned int decimal;
-};
-
-#define CCDC_CSC_COEFF_TABLE_SIZE 16
-/* structure for color space converter */
-struct ccdc_csc {
- unsigned char enable;
- /*
- * S8Q5. Use 2 decimal precision, user values range from -3.00 to 3.99.
- * example - to use 1.03, set integer part as 1, and decimal part as 3
- * to use -1.03, set integer part as -1 and decimal part as 3
- */
- struct ccdc_float coeff[CCDC_CSC_COEFF_TABLE_SIZE];
-};
-
-/* Structures for Vertical Defect Correction*/
-enum ccdc_vdf_csl {
- CCDC_VDF_NORMAL,
- CCDC_VDF_HORZ_INTERPOL_SAT,
- CCDC_VDF_HORZ_INTERPOL
-};
-
-enum ccdc_vdf_cuda {
- CCDC_VDF_WHOLE_LINE_CORRECT,
- CCDC_VDF_UPPER_DISABLE
-};
-
-enum ccdc_dfc_mwr {
- CCDC_DFC_MWR_WRITE_COMPLETE,
- CCDC_DFC_WRITE_REG
-};
-
-enum ccdc_dfc_mrd {
- CCDC_DFC_READ_COMPLETE,
- CCDC_DFC_READ_REG
-};
-
-enum ccdc_dfc_ma_rst {
- CCDC_DFC_INCR_ADDR,
- CCDC_DFC_CLR_ADDR
-};
-
-enum ccdc_dfc_mclr {
- CCDC_DFC_CLEAR_COMPLETE,
- CCDC_DFC_CLEAR
-};
-
-struct ccdc_dft_corr_ctl {
- enum ccdc_vdf_csl vdfcsl;
- enum ccdc_vdf_cuda vdfcuda;
- unsigned int vdflsft;
-};
-
-struct ccdc_dft_corr_mem_ctl {
- enum ccdc_dfc_mwr dfcmwr;
- enum ccdc_dfc_mrd dfcmrd;
- enum ccdc_dfc_ma_rst dfcmarst;
- enum ccdc_dfc_mclr dfcmclr;
-};
-
-#define CCDC_DFT_TABLE_SIZE 16
-/*
- * Main Structure for vertical defect correction. Vertical defect
- * correction can correct up to 16 defects if defects less than 16
- * then pad the rest with 0
- */
-struct ccdc_vertical_dft {
- unsigned char ver_dft_en;
- unsigned char gen_dft_en;
- unsigned int saturation_ctl;
- struct ccdc_dft_corr_ctl dft_corr_ctl;
- struct ccdc_dft_corr_mem_ctl dft_corr_mem_ctl;
- int table_size;
- unsigned int dft_corr_horz[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_vert[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_sub1[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_sub2[CCDC_DFT_TABLE_SIZE];
- unsigned int dft_corr_sub3[CCDC_DFT_TABLE_SIZE];
-};
-
-struct ccdc_data_offset {
- unsigned char horz_offset;
- unsigned char vert_offset;
-};
-
-/*
- * Structure for CCDC configuration parameters for raw capture mode passed
- * by application
- */
-struct ccdc_config_params_raw {
- /* data shift to be applied before storing */
- enum ccdc_datasft datasft;
- /* data size value from 8 to 16 bits */
- enum ccdc_data_size data_sz;
- /* median filter for sdram */
- enum ccdc_mfilt1 mfilt1;
- enum ccdc_mfilt2 mfilt2;
- /* low pass filter enable/disable */
- unsigned char lpf_enable;
- /* Threshold of median filter */
- int med_filt_thres;
- /*
- * horz and vertical data offset. Applicable for defect correction
- * and lsc
- */
- struct ccdc_data_offset data_offset;
- /* Structure for Optional A-Law */
- struct ccdc_a_law alaw;
- /* Structure for Optical Black Clamp */
- struct ccdc_black_clamp blk_clamp;
- /* Structure for Black Compensation */
- struct ccdc_black_compensation blk_comp;
- /* structure for vertical Defect Correction Module Configuration */
- struct ccdc_vertical_dft vertical_dft;
- /* structure for color space converter Module Configuration */
- struct ccdc_csc csc;
- /* color patters for bayer capture */
- struct ccdc_col_pat col_pat_field0;
- struct ccdc_col_pat col_pat_field1;
-};
-
-#ifdef __KERNEL__
-#include <linux/io.h>
-
-#define CCDC_WIN_PAL {0, 0, 720, 576}
-#define CCDC_WIN_VGA {0, 0, 640, 480}
-
-struct ccdc_params_ycbcr {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* enable BT.656 embedded sync mode */
- int bt656_enable;
- /* cb:y:cr:y or y:cb:y:cr in memory */
- enum ccdc_pixorder pix_order;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
-};
-
-/* Gain applied to Raw Bayer data */
-struct ccdc_gain {
- unsigned short r_ye;
- unsigned short gr_cy;
- unsigned short gb_g;
- unsigned short b_mg;
-};
-
-/* Structure for CCDC configuration parameters for raw capture mode */
-struct ccdc_params_raw {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
- /* Gain values */
- struct ccdc_gain gain;
- /* offset */
- unsigned int ccdc_offset;
- /* horizontal flip enable */
- unsigned char horz_flip_enable;
- /*
- * enable to store the image in inverse order in memory
- * (bottom to top)
- */
- unsigned char image_invert_enable;
- /* Configurable part of raw data */
- struct ccdc_config_params_raw config_params;
-};
-
-#endif
-#endif /* DM355_CCDC_H */
diff --git a/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc_regs.h b/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc_regs.h
deleted file mode 100644
index eb381f075245..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/dm355_ccdc_regs.h
+++ /dev/null
@@ -1,297 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2005-2009 Texas Instruments Inc
- */
-#ifndef _DM355_CCDC_REGS_H
-#define _DM355_CCDC_REGS_H
-
-/**************************************************************************\
-* Register OFFSET Definitions
-\**************************************************************************/
-#define SYNCEN 0x00
-#define MODESET 0x04
-#define HDWIDTH 0x08
-#define VDWIDTH 0x0c
-#define PPLN 0x10
-#define LPFR 0x14
-#define SPH 0x18
-#define NPH 0x1c
-#define SLV0 0x20
-#define SLV1 0x24
-#define NLV 0x28
-#define CULH 0x2c
-#define CULV 0x30
-#define HSIZE 0x34
-#define SDOFST 0x38
-#define STADRH 0x3c
-#define STADRL 0x40
-#define CLAMP 0x44
-#define DCSUB 0x48
-#define COLPTN 0x4c
-#define BLKCMP0 0x50
-#define BLKCMP1 0x54
-#define MEDFILT 0x58
-#define RYEGAIN 0x5c
-#define GRCYGAIN 0x60
-#define GBGGAIN 0x64
-#define BMGGAIN 0x68
-#define OFFSET 0x6c
-#define OUTCLIP 0x70
-#define VDINT0 0x74
-#define VDINT1 0x78
-#define RSV0 0x7c
-#define GAMMAWD 0x80
-#define REC656IF 0x84
-#define CCDCFG 0x88
-#define FMTCFG 0x8c
-#define FMTPLEN 0x90
-#define FMTSPH 0x94
-#define FMTLNH 0x98
-#define FMTSLV 0x9c
-#define FMTLNV 0xa0
-#define FMTRLEN 0xa4
-#define FMTHCNT 0xa8
-#define FMT_ADDR_PTR_B 0xac
-#define FMT_ADDR_PTR(i) (FMT_ADDR_PTR_B + (i * 4))
-#define FMTPGM_VF0 0xcc
-#define FMTPGM_VF1 0xd0
-#define FMTPGM_AP0 0xd4
-#define FMTPGM_AP1 0xd8
-#define FMTPGM_AP2 0xdc
-#define FMTPGM_AP3 0xe0
-#define FMTPGM_AP4 0xe4
-#define FMTPGM_AP5 0xe8
-#define FMTPGM_AP6 0xec
-#define FMTPGM_AP7 0xf0
-#define LSCCFG1 0xf4
-#define LSCCFG2 0xf8
-#define LSCH0 0xfc
-#define LSCV0 0x100
-#define LSCKH 0x104
-#define LSCKV 0x108
-#define LSCMEMCTL 0x10c
-#define LSCMEMD 0x110
-#define LSCMEMQ 0x114
-#define DFCCTL 0x118
-#define DFCVSAT 0x11c
-#define DFCMEMCTL 0x120
-#define DFCMEM0 0x124
-#define DFCMEM1 0x128
-#define DFCMEM2 0x12c
-#define DFCMEM3 0x130
-#define DFCMEM4 0x134
-#define CSCCTL 0x138
-#define CSCM0 0x13c
-#define CSCM1 0x140
-#define CSCM2 0x144
-#define CSCM3 0x148
-#define CSCM4 0x14c
-#define CSCM5 0x150
-#define CSCM6 0x154
-#define CSCM7 0x158
-#define DATAOFST 0x15c
-#define CCDC_REG_LAST DATAOFST
-/**************************************************************
-* Define for various register bit mask and shifts for CCDC
-*
-**************************************************************/
-#define CCDC_RAW_IP_MODE 0
-#define CCDC_VDHDOUT_INPUT 0
-#define CCDC_YCINSWP_RAW (0 << 4)
-#define CCDC_EXWEN_DISABLE 0
-#define CCDC_DATAPOL_NORMAL 0
-#define CCDC_CCDCFG_FIDMD_LATCH_VSYNC 0
-#define CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC (1 << 6)
-#define CCDC_CCDCFG_WENLOG_AND 0
-#define CCDC_CCDCFG_TRGSEL_WEN 0
-#define CCDC_CCDCFG_EXTRG_DISABLE 0
-#define CCDC_CFA_MOSAIC 0
-#define CCDC_Y8POS_SHIFT 11
-
-#define CCDC_VDC_DFCVSAT_MASK 0x3fff
-#define CCDC_DATAOFST_MASK 0x0ff
-#define CCDC_DATAOFST_H_SHIFT 0
-#define CCDC_DATAOFST_V_SHIFT 8
-#define CCDC_GAMMAWD_CFA_MASK 1
-#define CCDC_GAMMAWD_CFA_SHIFT 5
-#define CCDC_GAMMAWD_INPUT_SHIFT 2
-#define CCDC_FID_POL_MASK 1
-#define CCDC_FID_POL_SHIFT 4
-#define CCDC_HD_POL_MASK 1
-#define CCDC_HD_POL_SHIFT 3
-#define CCDC_VD_POL_MASK 1
-#define CCDC_VD_POL_SHIFT 2
-#define CCDC_VD_POL_NEGATIVE (1 << 2)
-#define CCDC_FRM_FMT_MASK 1
-#define CCDC_FRM_FMT_SHIFT 7
-#define CCDC_DATA_SZ_MASK 7
-#define CCDC_DATA_SZ_SHIFT 8
-#define CCDC_VDHDOUT_MASK 1
-#define CCDC_VDHDOUT_SHIFT 0
-#define CCDC_EXWEN_MASK 1
-#define CCDC_EXWEN_SHIFT 5
-#define CCDC_INPUT_MODE_MASK 3
-#define CCDC_INPUT_MODE_SHIFT 12
-#define CCDC_PIX_FMT_MASK 3
-#define CCDC_PIX_FMT_SHIFT 12
-#define CCDC_DATAPOL_MASK 1
-#define CCDC_DATAPOL_SHIFT 6
-#define CCDC_WEN_ENABLE (1 << 1)
-#define CCDC_VDHDEN_ENABLE (1 << 16)
-#define CCDC_LPF_ENABLE (1 << 14)
-#define CCDC_ALAW_ENABLE 1
-#define CCDC_ALAW_GAMMA_WD_MASK 7
-#define CCDC_REC656IF_BT656_EN 3
-
-#define CCDC_FMTCFG_FMTMODE_MASK 3
-#define CCDC_FMTCFG_FMTMODE_SHIFT 1
-#define CCDC_FMTCFG_LNUM_MASK 3
-#define CCDC_FMTCFG_LNUM_SHIFT 4
-#define CCDC_FMTCFG_ADDRINC_MASK 7
-#define CCDC_FMTCFG_ADDRINC_SHIFT 8
-
-#define CCDC_CCDCFG_FIDMD_SHIFT 6
-#define CCDC_CCDCFG_WENLOG_SHIFT 8
-#define CCDC_CCDCFG_TRGSEL_SHIFT 9
-#define CCDC_CCDCFG_EXTRG_SHIFT 10
-#define CCDC_CCDCFG_MSBINVI_SHIFT 13
-
-#define CCDC_HSIZE_FLIP_SHIFT 12
-#define CCDC_HSIZE_FLIP_MASK 1
-#define CCDC_HSIZE_VAL_MASK 0xFFF
-#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
-#define CCDC_SDOFST_INTERLACE_INVERSE 0x4B6D
-#define CCDC_SDOFST_INTERLACE_NORMAL 0x0B6D
-#define CCDC_SDOFST_PROGRESSIVE_INVERSE 0x4000
-#define CCDC_SDOFST_PROGRESSIVE_NORMAL 0
-#define CCDC_START_PX_HOR_MASK 0x7FFF
-#define CCDC_NUM_PX_HOR_MASK 0x7FFF
-#define CCDC_START_VER_ONE_MASK 0x7FFF
-#define CCDC_START_VER_TWO_MASK 0x7FFF
-#define CCDC_NUM_LINES_VER 0x7FFF
-
-#define CCDC_BLK_CLAMP_ENABLE (1 << 15)
-#define CCDC_BLK_SGAIN_MASK 0x1F
-#define CCDC_BLK_ST_PXL_MASK 0x1FFF
-#define CCDC_BLK_SAMPLE_LN_MASK 3
-#define CCDC_BLK_SAMPLE_LN_SHIFT 13
-
-#define CCDC_NUM_LINE_CALC_MASK 3
-#define CCDC_NUM_LINE_CALC_SHIFT 14
-
-#define CCDC_BLK_DC_SUB_MASK 0x3FFF
-#define CCDC_BLK_COMP_MASK 0xFF
-#define CCDC_BLK_COMP_GB_COMP_SHIFT 8
-#define CCDC_BLK_COMP_GR_COMP_SHIFT 0
-#define CCDC_BLK_COMP_R_COMP_SHIFT 8
-#define CCDC_LATCH_ON_VSYNC_DISABLE (1 << 15)
-#define CCDC_LATCH_ON_VSYNC_ENABLE (0 << 15)
-#define CCDC_FPC_ENABLE (1 << 15)
-#define CCDC_FPC_FPC_NUM_MASK 0x7FFF
-#define CCDC_DATA_PACK_ENABLE (1 << 11)
-#define CCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF
-#define CCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF
-#define CCDC_FMT_HORZ_FMTSPH_SHIFT 16
-#define CCDC_FMT_VERT_FMTLNV_MASK 0x1FFF
-#define CCDC_FMT_VERT_FMTSLV_MASK 0x1FFF
-#define CCDC_FMT_VERT_FMTSLV_SHIFT 16
-#define CCDC_VP_OUT_VERT_NUM_MASK 0x3FFF
-#define CCDC_VP_OUT_VERT_NUM_SHIFT 17
-#define CCDC_VP_OUT_HORZ_NUM_MASK 0x1FFF
-#define CCDC_VP_OUT_HORZ_NUM_SHIFT 4
-#define CCDC_VP_OUT_HORZ_ST_MASK 0xF
-
-#define CCDC_CSC_COEF_INTEG_MASK 7
-#define CCDC_CSC_COEF_DECIMAL_MASK 0x1f
-#define CCDC_CSC_COEF_INTEG_SHIFT 5
-#define CCDC_CSCM_MSB_SHIFT 8
-#define CCDC_CSC_ENABLE 1
-#define CCDC_CSC_DEC_MAX 32
-
-#define CCDC_MFILT1_SHIFT 10
-#define CCDC_MFILT2_SHIFT 8
-#define CCDC_MED_FILT_THRESH 0x3FFF
-#define CCDC_LPF_MASK 1
-#define CCDC_LPF_SHIFT 14
-#define CCDC_OFFSET_MASK 0x3FF
-#define CCDC_DATASFT_MASK 7
-#define CCDC_DATASFT_SHIFT 8
-
-#define CCDC_DF_ENABLE 1
-
-#define CCDC_FMTPLEN_P0_MASK 0xF
-#define CCDC_FMTPLEN_P1_MASK 0xF
-#define CCDC_FMTPLEN_P2_MASK 7
-#define CCDC_FMTPLEN_P3_MASK 7
-#define CCDC_FMTPLEN_P0_SHIFT 0
-#define CCDC_FMTPLEN_P1_SHIFT 4
-#define CCDC_FMTPLEN_P2_SHIFT 8
-#define CCDC_FMTPLEN_P3_SHIFT 12
-
-#define CCDC_FMTSPH_MASK 0x1FFF
-#define CCDC_FMTLNH_MASK 0x1FFF
-#define CCDC_FMTSLV_MASK 0x1FFF
-#define CCDC_FMTLNV_MASK 0x7FFF
-#define CCDC_FMTRLEN_MASK 0x1FFF
-#define CCDC_FMTHCNT_MASK 0x1FFF
-
-#define CCDC_ADP_INIT_MASK 0x1FFF
-#define CCDC_ADP_LINE_SHIFT 13
-#define CCDC_ADP_LINE_MASK 3
-#define CCDC_FMTPGN_APTR_MASK 7
-
-#define CCDC_DFCCTL_GDFCEN_MASK 1
-#define CCDC_DFCCTL_VDFCEN_MASK 1
-#define CCDC_DFCCTL_VDFC_DISABLE (0 << 4)
-#define CCDC_DFCCTL_VDFCEN_SHIFT 4
-#define CCDC_DFCCTL_VDFCSL_MASK 3
-#define CCDC_DFCCTL_VDFCSL_SHIFT 5
-#define CCDC_DFCCTL_VDFCUDA_MASK 1
-#define CCDC_DFCCTL_VDFCUDA_SHIFT 7
-#define CCDC_DFCCTL_VDFLSFT_MASK 3
-#define CCDC_DFCCTL_VDFLSFT_SHIFT 8
-#define CCDC_DFCMEMCTL_DFCMARST_MASK 1
-#define CCDC_DFCMEMCTL_DFCMARST_SHIFT 2
-#define CCDC_DFCMEMCTL_DFCMWR_MASK 1
-#define CCDC_DFCMEMCTL_DFCMWR_SHIFT 0
-#define CCDC_DFCMEMCTL_INC_ADDR (0 << 2)
-
-#define CCDC_LSCCFG_GFTSF_MASK 7
-#define CCDC_LSCCFG_GFTSF_SHIFT 1
-#define CCDC_LSCCFG_GFTINV_MASK 0xf
-#define CCDC_LSCCFG_GFTINV_SHIFT 4
-#define CCDC_LSC_GFTABLE_SEL_MASK 3
-#define CCDC_LSC_GFTABLE_EPEL_SHIFT 8
-#define CCDC_LSC_GFTABLE_OPEL_SHIFT 10
-#define CCDC_LSC_GFTABLE_EPOL_SHIFT 12
-#define CCDC_LSC_GFTABLE_OPOL_SHIFT 14
-#define CCDC_LSC_GFMODE_MASK 3
-#define CCDC_LSC_GFMODE_SHIFT 4
-#define CCDC_LSC_DISABLE 0
-#define CCDC_LSC_ENABLE 1
-#define CCDC_LSC_TABLE1_SLC 0
-#define CCDC_LSC_TABLE2_SLC 1
-#define CCDC_LSC_TABLE3_SLC 2
-#define CCDC_LSC_MEMADDR_RESET (1 << 2)
-#define CCDC_LSC_MEMADDR_INCR (0 << 2)
-#define CCDC_LSC_FRAC_MASK_T1 0xFF
-#define CCDC_LSC_INT_MASK 3
-#define CCDC_LSC_FRAC_MASK 0x3FFF
-#define CCDC_LSC_CENTRE_MASK 0x3FFF
-#define CCDC_LSC_COEF_MASK 0xff
-#define CCDC_LSC_COEFL_SHIFT 0
-#define CCDC_LSC_COEFU_SHIFT 8
-#define CCDC_GAIN_MASK 0x7FF
-#define CCDC_SYNCEN_VDHDEN_MASK (1 << 0)
-#define CCDC_SYNCEN_WEN_MASK (1 << 1)
-#define CCDC_SYNCEN_WEN_SHIFT 1
-
-/* Power on Defaults in hardware */
-#define MODESET_DEFAULT 0x200
-#define CULH_DEFAULT 0xFFFF
-#define CULV_DEFAULT 0xFF
-#define GAIN_DEFAULT 256
-#define OUTCLIP_DEFAULT 0x3FFF
-#define LSCCFG2_DEFAULT 0xE
-
-#endif
diff --git a/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.c b/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.c
deleted file mode 100644
index 4a93e5ad6415..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.c
+++ /dev/null
@@ -1,879 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2006-2009 Texas Instruments Inc
- *
- * CCDC hardware module for DM6446
- * ------------------------------
- *
- * This module is for configuring CCD controller of DM6446 VPFE to capture
- * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
- * such as Defect Pixel Correction, Color Space Conversion etc to
- * pre-process the Raw Bayer RGB data, before writing it to SDRAM.
- * This file is named DM644x so that other variants such DM6443
- * may be supported using the same module.
- *
- * TODO: Test Raw bayer parameter settings and bayer capture
- * Split module parameter structure to module specific ioctl structs
- * investigate if enum used for user space type definition
- * to be replaced by #defines or integer
- */
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/videodev2.h>
-#include <linux/gfp.h>
-#include <linux/err.h>
-#include <linux/module.h>
-
-#include "dm644x_ccdc.h"
-#include <media/davinci/vpss.h>
-
-#include "dm644x_ccdc_regs.h"
-#include "ccdc_hw_device.h"
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CCDC Driver for DM6446");
-MODULE_AUTHOR("Texas Instruments");
-
-static struct ccdc_oper_config {
- struct device *dev;
- /* CCDC interface type */
- enum vpfe_hw_if_type if_type;
- /* Raw Bayer configuration */
- struct ccdc_params_raw bayer;
- /* YCbCr configuration */
- struct ccdc_params_ycbcr ycbcr;
- /* ccdc base address */
- void __iomem *base_addr;
-} ccdc_cfg = {
- /* Raw configurations */
- .bayer = {
- .pix_fmt = CCDC_PIXFMT_RAW,
- .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
- .win = CCDC_WIN_VGA,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .config_params = {
- .data_sz = CCDC_DATA_10BITS,
- },
- },
- .ycbcr = {
- .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
- .frm_fmt = CCDC_FRMFMT_INTERLACED,
- .win = CCDC_WIN_PAL,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .bt656_enable = 1,
- .pix_order = CCDC_PIXORDER_CBYCRY,
- .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED
- },
-};
-
-#define CCDC_MAX_RAW_YUV_FORMATS 2
-
-/* Raw Bayer formats */
-static u32 ccdc_raw_bayer_pix_formats[] =
- {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
-
-/* Raw YUV formats */
-static u32 ccdc_raw_yuv_pix_formats[] =
- {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
-
-/* CCDC Save/Restore context */
-static u32 ccdc_ctx[CCDC_REG_END / sizeof(u32)];
-
-/* register access routines */
-static inline u32 regr(u32 offset)
-{
- return __raw_readl(ccdc_cfg.base_addr + offset);
-}
-
-static inline void regw(u32 val, u32 offset)
-{
- __raw_writel(val, ccdc_cfg.base_addr + offset);
-}
-
-static void ccdc_enable(int flag)
-{
- regw(flag, CCDC_PCR);
-}
-
-static void ccdc_enable_vport(int flag)
-{
- if (flag)
- /* enable video port */
- regw(CCDC_ENABLE_VIDEO_PORT, CCDC_FMTCFG);
- else
- regw(CCDC_DISABLE_VIDEO_PORT, CCDC_FMTCFG);
-}
-
-/*
- * ccdc_setwin()
- * This function will configure the window size
- * to be capture in CCDC reg
- */
-static void ccdc_setwin(struct v4l2_rect *image_win,
- enum ccdc_frmfmt frm_fmt,
- int ppc)
-{
- int horz_start, horz_nr_pixels;
- int vert_start, vert_nr_lines;
- int val = 0, mid_img = 0;
-
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin...");
- /*
- * ppc - per pixel count. indicates how many pixels per cell
- * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
- * raw capture this is 1
- */
- horz_start = image_win->left << (ppc - 1);
- horz_nr_pixels = (image_win->width << (ppc - 1)) - 1;
- regw((horz_start << CCDC_HORZ_INFO_SPH_SHIFT) | horz_nr_pixels,
- CCDC_HORZ_INFO);
-
- vert_start = image_win->top;
-
- if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
- vert_nr_lines = (image_win->height >> 1) - 1;
- vert_start >>= 1;
- /* Since first line doesn't have any data */
- vert_start += 1;
- /* configure VDINT0 */
- val = (vert_start << CCDC_VDINT_VDINT0_SHIFT);
- regw(val, CCDC_VDINT);
-
- } else {
- /* Since first line doesn't have any data */
- vert_start += 1;
- vert_nr_lines = image_win->height - 1;
- /*
- * configure VDINT0 and VDINT1. VDINT1 will be at half
- * of image height
- */
- mid_img = vert_start + (image_win->height / 2);
- val = (vert_start << CCDC_VDINT_VDINT0_SHIFT) |
- (mid_img & CCDC_VDINT_VDINT1_MASK);
- regw(val, CCDC_VDINT);
-
- }
- regw((vert_start << CCDC_VERT_START_SLV0_SHIFT) | vert_start,
- CCDC_VERT_START);
- regw(vert_nr_lines, CCDC_VERT_LINES);
- dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
-}
-
-static void ccdc_readregs(void)
-{
- unsigned int val = 0;
-
- val = regr(CCDC_ALAW);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to ALAW...\n", val);
- val = regr(CCDC_CLAMP);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to CLAMP...\n", val);
- val = regr(CCDC_DCSUB);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to DCSUB...\n", val);
- val = regr(CCDC_BLKCMP);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to BLKCMP...\n", val);
- val = regr(CCDC_FPC_ADDR);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC_ADDR...\n", val);
- val = regr(CCDC_FPC);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC...\n", val);
- val = regr(CCDC_FMTCFG);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMTCFG...\n", val);
- val = regr(CCDC_COLPTN);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to COLPTN...\n", val);
- val = regr(CCDC_FMT_HORZ);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_HORZ...\n", val);
- val = regr(CCDC_FMT_VERT);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_VERT...\n", val);
- val = regr(CCDC_HSIZE_OFF);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HSIZE_OFF...\n", val);
- val = regr(CCDC_SDOFST);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SDOFST...\n", val);
- val = regr(CCDC_VP_OUT);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VP_OUT...\n", val);
- val = regr(CCDC_SYN_MODE);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SYN_MODE...\n", val);
- val = regr(CCDC_HORZ_INFO);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HORZ_INFO...\n", val);
- val = regr(CCDC_VERT_START);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_START...\n", val);
- val = regr(CCDC_VERT_LINES);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val);
-}
-
-static int ccdc_close(struct device *dev)
-{
- return 0;
-}
-
-/*
- * ccdc_restore_defaults()
- * This function will write defaults to all CCDC registers
- */
-static void ccdc_restore_defaults(void)
-{
- int i;
-
- /* disable CCDC */
- ccdc_enable(0);
- /* set all registers to default value */
- for (i = 4; i <= 0x94; i += 4)
- regw(0, i);
- regw(CCDC_NO_CULLING, CCDC_CULLING);
- regw(CCDC_GAMMA_BITS_11_2, CCDC_ALAW);
-}
-
-static int ccdc_open(struct device *device)
-{
- ccdc_restore_defaults();
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_enable_vport(1);
- return 0;
-}
-
-static void ccdc_sbl_reset(void)
-{
- vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O);
-}
-
-/*
- * ccdc_config_ycbcr()
- * This function will configure CCDC for YCbCr video capture
- */
-static void ccdc_config_ycbcr(void)
-{
- struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr;
- u32 syn_mode;
-
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr...");
- /*
- * first restore the CCDC registers to default values
- * This is important since we assume default values to be set in
- * a lot of registers that we didn't touch
- */
- ccdc_restore_defaults();
-
- /*
- * configure pixel format, frame format, configure video frame
- * format, enable output to SDRAM, enable internal timing generator
- * and 8bit pack mode
- */
- syn_mode = (((params->pix_fmt & CCDC_SYN_MODE_INPMOD_MASK) <<
- CCDC_SYN_MODE_INPMOD_SHIFT) |
- ((params->frm_fmt & CCDC_SYN_FLDMODE_MASK) <<
- CCDC_SYN_FLDMODE_SHIFT) | CCDC_VDHDEN_ENABLE |
- CCDC_WEN_ENABLE | CCDC_DATA_PACK_ENABLE);
-
- /* setup BT.656 sync mode */
- if (params->bt656_enable) {
- regw(CCDC_REC656IF_BT656_EN, CCDC_REC656IF);
-
- /*
- * configure the FID, VD, HD pin polarity,
- * fld,hd pol positive, vd negative, 8-bit data
- */
- syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE;
- if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
- syn_mode |= CCDC_SYN_MODE_10BITS;
- else
- syn_mode |= CCDC_SYN_MODE_8BITS;
- } else {
- /* y/c external sync mode */
- syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
- CCDC_FID_POL_SHIFT) |
- ((params->hd_pol & CCDC_HD_POL_MASK) <<
- CCDC_HD_POL_SHIFT) |
- ((params->vd_pol & CCDC_VD_POL_MASK) <<
- CCDC_VD_POL_SHIFT));
- }
- regw(syn_mode, CCDC_SYN_MODE);
-
- /* configure video window */
- ccdc_setwin(&params->win, params->frm_fmt, 2);
-
- /*
- * configure the order of y cb cr in SDRAM, and disable latch
- * internal register on vsync
- */
- if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
- regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
- CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_BW656_10BIT,
- CCDC_CCDCFG);
- else
- regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
- CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
-
- /*
- * configure the horizontal line offset. This should be a
- * on 32 byte boundary. So clear LSB 5 bits
- */
- regw(((params->win.width * 2 + 31) & ~0x1f), CCDC_HSIZE_OFF);
-
- /* configure the memory line offset */
- if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
- /* two fields are interleaved in memory */
- regw(CCDC_SDOFST_FIELD_INTERLEAVED, CCDC_SDOFST);
-
- ccdc_sbl_reset();
- dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
-}
-
-static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
-{
- u32 val;
-
- if (!bclamp->enable) {
- /* configure DCSub */
- val = (bclamp->dc_sub) & CCDC_BLK_DC_SUB_MASK;
- regw(val, CCDC_DCSUB);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to DCSUB...\n", val);
- regw(CCDC_CLAMP_DEFAULT_VAL, CCDC_CLAMP);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to CLAMP...\n");
- return;
- }
- /*
- * Configure gain, Start pixel, No of line to be avg,
- * No of pixel/line to be avg, & Enable the Black clamping
- */
- val = ((bclamp->sgain & CCDC_BLK_SGAIN_MASK) |
- ((bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) <<
- CCDC_BLK_ST_PXL_SHIFT) |
- ((bclamp->sample_ln & CCDC_BLK_SAMPLE_LINE_MASK) <<
- CCDC_BLK_SAMPLE_LINE_SHIFT) |
- ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) <<
- CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE);
- regw(val, CCDC_CLAMP);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to CLAMP...\n", val);
- /* If Black clamping is enable then make dcsub 0 */
- regw(CCDC_DCSUB_DEFAULT_VAL, CCDC_DCSUB);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x00000000 to DCSUB...\n");
-}
-
-static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
-{
- u32 val;
-
- val = ((bcomp->b & CCDC_BLK_COMP_MASK) |
- ((bcomp->gb & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_GB_COMP_SHIFT) |
- ((bcomp->gr & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_GR_COMP_SHIFT) |
- ((bcomp->r & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_R_COMP_SHIFT));
- regw(val, CCDC_BLKCMP);
-}
-
-/*
- * ccdc_config_raw()
- * This function will configure CCDC for Raw capture mode
- */
-static void ccdc_config_raw(void)
-{
- struct ccdc_params_raw *params = &ccdc_cfg.bayer;
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int syn_mode = 0;
- unsigned int val;
-
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw...");
-
- /* Reset CCDC */
- ccdc_restore_defaults();
-
- /* Disable latching function registers on VSYNC */
- regw(CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
-
- /*
- * Configure the vertical sync polarity(SYN_MODE.VDPOL),
- * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
- * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
- * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
- * SDRAM, enable internal timing generator
- */
- syn_mode =
- (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) |
- ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) |
- ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) |
- ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) |
- ((config_params->data_sz & CCDC_DATA_SZ_MASK) <<
- CCDC_DATA_SZ_SHIFT) |
- ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT) |
- CCDC_WEN_ENABLE | CCDC_VDHDEN_ENABLE);
-
- /* Enable and configure aLaw register if needed */
- if (config_params->alaw.enable) {
- val = ((config_params->alaw.gamma_wd &
- CCDC_ALAW_GAMMA_WD_MASK) | CCDC_ALAW_ENABLE);
- regw(val, CCDC_ALAW);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to ALAW...\n", val);
- }
-
- /* Configure video window */
- ccdc_setwin(&params->win, params->frm_fmt, CCDC_PPC_RAW);
-
- /* Configure Black Clamp */
- ccdc_config_black_clamp(&config_params->blk_clamp);
-
- /* Configure Black level compensation */
- ccdc_config_black_compense(&config_params->blk_comp);
-
- /* If data size is 8 bit then pack the data */
- if ((config_params->data_sz == CCDC_DATA_8BITS) ||
- config_params->alaw.enable)
- syn_mode |= CCDC_DATA_PACK_ENABLE;
-
- /* disable video port */
- val = CCDC_DISABLE_VIDEO_PORT;
-
- if (config_params->data_sz == CCDC_DATA_8BITS)
- val |= (CCDC_DATA_10BITS & CCDC_FMTCFG_VPIN_MASK)
- << CCDC_FMTCFG_VPIN_SHIFT;
- else
- val |= (config_params->data_sz & CCDC_FMTCFG_VPIN_MASK)
- << CCDC_FMTCFG_VPIN_SHIFT;
- /* Write value in FMTCFG */
- regw(val, CCDC_FMTCFG);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMTCFG...\n", val);
- /* Configure the color pattern according to mt9t001 sensor */
- regw(CCDC_COLPTN_VAL, CCDC_COLPTN);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0xBB11BB11 to COLPTN...\n");
- /*
- * Configure Data formatter(Video port) pixel selection
- * (FMT_HORZ, FMT_VERT)
- */
- val = ((params->win.left & CCDC_FMT_HORZ_FMTSPH_MASK) <<
- CCDC_FMT_HORZ_FMTSPH_SHIFT) |
- (params->win.width & CCDC_FMT_HORZ_FMTLNH_MASK);
- regw(val, CCDC_FMT_HORZ);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_HORZ...\n", val);
- val = (params->win.top & CCDC_FMT_VERT_FMTSLV_MASK)
- << CCDC_FMT_VERT_FMTSLV_SHIFT;
- if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
- val |= (params->win.height) & CCDC_FMT_VERT_FMTLNV_MASK;
- else
- val |= (params->win.height >> 1) & CCDC_FMT_VERT_FMTLNV_MASK;
-
- dev_dbg(ccdc_cfg.dev, "\nparams->win.height 0x%x ...\n",
- params->win.height);
- regw(val, CCDC_FMT_VERT);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_VERT...\n", val);
-
- dev_dbg(ccdc_cfg.dev, "\nbelow regw(val, FMT_VERT)...");
-
- /*
- * Configure Horizontal offset register. If pack 8 is enabled then
- * 1 pixel will take 1 byte
- */
- if ((config_params->data_sz == CCDC_DATA_8BITS) ||
- config_params->alaw.enable)
- regw((params->win.width + CCDC_32BYTE_ALIGN_VAL) &
- CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF);
- else
- /* else one pixel will take 2 byte */
- regw(((params->win.width * CCDC_TWO_BYTES_PER_PIXEL) +
- CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK,
- CCDC_HSIZE_OFF);
-
- /* Set value for SDOFST */
- if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
- if (params->image_invert_enable) {
- /* For intelace inverse mode */
- regw(CCDC_INTERLACED_IMAGE_INVERT, CCDC_SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x4B6D to SDOFST..\n");
- }
-
- else {
- /* For intelace non inverse mode */
- regw(CCDC_INTERLACED_NO_IMAGE_INVERT, CCDC_SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x0249 to SDOFST..\n");
- }
- } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
- regw(CCDC_PROGRESSIVE_NO_IMAGE_INVERT, CCDC_SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to SDOFST...\n");
- }
-
- /*
- * Configure video port pixel selection (VPOUT)
- * Here -1 is to make the height value less than FMT_VERT.FMTLNV
- */
- if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
- val = (((params->win.height - 1) & CCDC_VP_OUT_VERT_NUM_MASK))
- << CCDC_VP_OUT_VERT_NUM_SHIFT;
- else
- val =
- ((((params->win.height >> CCDC_INTERLACED_HEIGHT_SHIFT) -
- 1) & CCDC_VP_OUT_VERT_NUM_MASK)) <<
- CCDC_VP_OUT_VERT_NUM_SHIFT;
-
- val |= ((((params->win.width))) & CCDC_VP_OUT_HORZ_NUM_MASK)
- << CCDC_VP_OUT_HORZ_NUM_SHIFT;
- val |= (params->win.left) & CCDC_VP_OUT_HORZ_ST_MASK;
- regw(val, CCDC_VP_OUT);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to VP_OUT...\n", val);
- regw(syn_mode, CCDC_SYN_MODE);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to SYN_MODE...\n", syn_mode);
-
- ccdc_sbl_reset();
- dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw...");
- ccdc_readregs();
-}
-
-static int ccdc_configure(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_config_raw();
- else
- ccdc_config_ycbcr();
- return 0;
-}
-
-static int ccdc_set_buftype(enum ccdc_buftype buf_type)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.buf_type = buf_type;
- else
- ccdc_cfg.ycbcr.buf_type = buf_type;
- return 0;
-}
-
-static enum ccdc_buftype ccdc_get_buftype(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- return ccdc_cfg.bayer.buf_type;
- return ccdc_cfg.ycbcr.buf_type;
-}
-
-static int ccdc_enum_pix(u32 *pix, int i)
-{
- int ret = -EINVAL;
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) {
- *pix = ccdc_raw_bayer_pix_formats[i];
- ret = 0;
- }
- } else {
- if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) {
- *pix = ccdc_raw_yuv_pix_formats[i];
- ret = 0;
- }
- }
- return ret;
-}
-
-static int ccdc_set_pixel_format(u32 pixfmt)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
- if (pixfmt == V4L2_PIX_FMT_SBGGR8)
- ccdc_cfg.bayer.config_params.alaw.enable = 1;
- else if (pixfmt != V4L2_PIX_FMT_SBGGR16)
- return -EINVAL;
- } else {
- if (pixfmt == V4L2_PIX_FMT_YUYV)
- ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
- else if (pixfmt == V4L2_PIX_FMT_UYVY)
- ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
- else
- return -EINVAL;
- }
- return 0;
-}
-
-static u32 ccdc_get_pixel_format(void)
-{
- struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
- u32 pixfmt;
-
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- if (alaw->enable)
- pixfmt = V4L2_PIX_FMT_SBGGR8;
- else
- pixfmt = V4L2_PIX_FMT_SBGGR16;
- else {
- if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
- pixfmt = V4L2_PIX_FMT_YUYV;
- else
- pixfmt = V4L2_PIX_FMT_UYVY;
- }
- return pixfmt;
-}
-
-static int ccdc_set_image_window(struct v4l2_rect *win)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.win = *win;
- else
- ccdc_cfg.ycbcr.win = *win;
- return 0;
-}
-
-static void ccdc_get_image_window(struct v4l2_rect *win)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- *win = ccdc_cfg.bayer.win;
- else
- *win = ccdc_cfg.ycbcr.win;
-}
-
-static unsigned int ccdc_get_line_length(void)
-{
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int len;
-
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- if ((config_params->alaw.enable) ||
- (config_params->data_sz == CCDC_DATA_8BITS))
- len = ccdc_cfg.bayer.win.width;
- else
- len = ccdc_cfg.bayer.win.width * 2;
- } else
- len = ccdc_cfg.ycbcr.win.width * 2;
- return ALIGN(len, 32);
-}
-
-static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.frm_fmt = frm_fmt;
- else
- ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
- return 0;
-}
-
-static enum ccdc_frmfmt ccdc_get_frame_format(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- return ccdc_cfg.bayer.frm_fmt;
- else
- return ccdc_cfg.ycbcr.frm_fmt;
-}
-
-static int ccdc_getfid(void)
-{
- return (regr(CCDC_SYN_MODE) >> 15) & 1;
-}
-
-/* misc operations */
-static inline void ccdc_setfbaddr(unsigned long addr)
-{
- regw(addr & 0xffffffe0, CCDC_SDR_ADDR);
-}
-
-static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
-{
- ccdc_cfg.if_type = params->if_type;
-
- switch (params->if_type) {
- case VPFE_BT656:
- case VPFE_YCBCR_SYNC_16:
- case VPFE_YCBCR_SYNC_8:
- case VPFE_BT656_10BIT:
- ccdc_cfg.ycbcr.vd_pol = params->vdpol;
- ccdc_cfg.ycbcr.hd_pol = params->hdpol;
- break;
- default:
- /* TODO add support for raw bayer here */
- return -EINVAL;
- }
- return 0;
-}
-
-static void ccdc_save_context(void)
-{
- ccdc_ctx[CCDC_PCR >> 2] = regr(CCDC_PCR);
- ccdc_ctx[CCDC_SYN_MODE >> 2] = regr(CCDC_SYN_MODE);
- ccdc_ctx[CCDC_HD_VD_WID >> 2] = regr(CCDC_HD_VD_WID);
- ccdc_ctx[CCDC_PIX_LINES >> 2] = regr(CCDC_PIX_LINES);
- ccdc_ctx[CCDC_HORZ_INFO >> 2] = regr(CCDC_HORZ_INFO);
- ccdc_ctx[CCDC_VERT_START >> 2] = regr(CCDC_VERT_START);
- ccdc_ctx[CCDC_VERT_LINES >> 2] = regr(CCDC_VERT_LINES);
- ccdc_ctx[CCDC_CULLING >> 2] = regr(CCDC_CULLING);
- ccdc_ctx[CCDC_HSIZE_OFF >> 2] = regr(CCDC_HSIZE_OFF);
- ccdc_ctx[CCDC_SDOFST >> 2] = regr(CCDC_SDOFST);
- ccdc_ctx[CCDC_SDR_ADDR >> 2] = regr(CCDC_SDR_ADDR);
- ccdc_ctx[CCDC_CLAMP >> 2] = regr(CCDC_CLAMP);
- ccdc_ctx[CCDC_DCSUB >> 2] = regr(CCDC_DCSUB);
- ccdc_ctx[CCDC_COLPTN >> 2] = regr(CCDC_COLPTN);
- ccdc_ctx[CCDC_BLKCMP >> 2] = regr(CCDC_BLKCMP);
- ccdc_ctx[CCDC_FPC >> 2] = regr(CCDC_FPC);
- ccdc_ctx[CCDC_FPC_ADDR >> 2] = regr(CCDC_FPC_ADDR);
- ccdc_ctx[CCDC_VDINT >> 2] = regr(CCDC_VDINT);
- ccdc_ctx[CCDC_ALAW >> 2] = regr(CCDC_ALAW);
- ccdc_ctx[CCDC_REC656IF >> 2] = regr(CCDC_REC656IF);
- ccdc_ctx[CCDC_CCDCFG >> 2] = regr(CCDC_CCDCFG);
- ccdc_ctx[CCDC_FMTCFG >> 2] = regr(CCDC_FMTCFG);
- ccdc_ctx[CCDC_FMT_HORZ >> 2] = regr(CCDC_FMT_HORZ);
- ccdc_ctx[CCDC_FMT_VERT >> 2] = regr(CCDC_FMT_VERT);
- ccdc_ctx[CCDC_FMT_ADDR0 >> 2] = regr(CCDC_FMT_ADDR0);
- ccdc_ctx[CCDC_FMT_ADDR1 >> 2] = regr(CCDC_FMT_ADDR1);
- ccdc_ctx[CCDC_FMT_ADDR2 >> 2] = regr(CCDC_FMT_ADDR2);
- ccdc_ctx[CCDC_FMT_ADDR3 >> 2] = regr(CCDC_FMT_ADDR3);
- ccdc_ctx[CCDC_FMT_ADDR4 >> 2] = regr(CCDC_FMT_ADDR4);
- ccdc_ctx[CCDC_FMT_ADDR5 >> 2] = regr(CCDC_FMT_ADDR5);
- ccdc_ctx[CCDC_FMT_ADDR6 >> 2] = regr(CCDC_FMT_ADDR6);
- ccdc_ctx[CCDC_FMT_ADDR7 >> 2] = regr(CCDC_FMT_ADDR7);
- ccdc_ctx[CCDC_PRGEVEN_0 >> 2] = regr(CCDC_PRGEVEN_0);
- ccdc_ctx[CCDC_PRGEVEN_1 >> 2] = regr(CCDC_PRGEVEN_1);
- ccdc_ctx[CCDC_PRGODD_0 >> 2] = regr(CCDC_PRGODD_0);
- ccdc_ctx[CCDC_PRGODD_1 >> 2] = regr(CCDC_PRGODD_1);
- ccdc_ctx[CCDC_VP_OUT >> 2] = regr(CCDC_VP_OUT);
-}
-
-static void ccdc_restore_context(void)
-{
- regw(ccdc_ctx[CCDC_SYN_MODE >> 2], CCDC_SYN_MODE);
- regw(ccdc_ctx[CCDC_HD_VD_WID >> 2], CCDC_HD_VD_WID);
- regw(ccdc_ctx[CCDC_PIX_LINES >> 2], CCDC_PIX_LINES);
- regw(ccdc_ctx[CCDC_HORZ_INFO >> 2], CCDC_HORZ_INFO);
- regw(ccdc_ctx[CCDC_VERT_START >> 2], CCDC_VERT_START);
- regw(ccdc_ctx[CCDC_VERT_LINES >> 2], CCDC_VERT_LINES);
- regw(ccdc_ctx[CCDC_CULLING >> 2], CCDC_CULLING);
- regw(ccdc_ctx[CCDC_HSIZE_OFF >> 2], CCDC_HSIZE_OFF);
- regw(ccdc_ctx[CCDC_SDOFST >> 2], CCDC_SDOFST);
- regw(ccdc_ctx[CCDC_SDR_ADDR >> 2], CCDC_SDR_ADDR);
- regw(ccdc_ctx[CCDC_CLAMP >> 2], CCDC_CLAMP);
- regw(ccdc_ctx[CCDC_DCSUB >> 2], CCDC_DCSUB);
- regw(ccdc_ctx[CCDC_COLPTN >> 2], CCDC_COLPTN);
- regw(ccdc_ctx[CCDC_BLKCMP >> 2], CCDC_BLKCMP);
- regw(ccdc_ctx[CCDC_FPC >> 2], CCDC_FPC);
- regw(ccdc_ctx[CCDC_FPC_ADDR >> 2], CCDC_FPC_ADDR);
- regw(ccdc_ctx[CCDC_VDINT >> 2], CCDC_VDINT);
- regw(ccdc_ctx[CCDC_ALAW >> 2], CCDC_ALAW);
- regw(ccdc_ctx[CCDC_REC656IF >> 2], CCDC_REC656IF);
- regw(ccdc_ctx[CCDC_CCDCFG >> 2], CCDC_CCDCFG);
- regw(ccdc_ctx[CCDC_FMTCFG >> 2], CCDC_FMTCFG);
- regw(ccdc_ctx[CCDC_FMT_HORZ >> 2], CCDC_FMT_HORZ);
- regw(ccdc_ctx[CCDC_FMT_VERT >> 2], CCDC_FMT_VERT);
- regw(ccdc_ctx[CCDC_FMT_ADDR0 >> 2], CCDC_FMT_ADDR0);
- regw(ccdc_ctx[CCDC_FMT_ADDR1 >> 2], CCDC_FMT_ADDR1);
- regw(ccdc_ctx[CCDC_FMT_ADDR2 >> 2], CCDC_FMT_ADDR2);
- regw(ccdc_ctx[CCDC_FMT_ADDR3 >> 2], CCDC_FMT_ADDR3);
- regw(ccdc_ctx[CCDC_FMT_ADDR4 >> 2], CCDC_FMT_ADDR4);
- regw(ccdc_ctx[CCDC_FMT_ADDR5 >> 2], CCDC_FMT_ADDR5);
- regw(ccdc_ctx[CCDC_FMT_ADDR6 >> 2], CCDC_FMT_ADDR6);
- regw(ccdc_ctx[CCDC_FMT_ADDR7 >> 2], CCDC_FMT_ADDR7);
- regw(ccdc_ctx[CCDC_PRGEVEN_0 >> 2], CCDC_PRGEVEN_0);
- regw(ccdc_ctx[CCDC_PRGEVEN_1 >> 2], CCDC_PRGEVEN_1);
- regw(ccdc_ctx[CCDC_PRGODD_0 >> 2], CCDC_PRGODD_0);
- regw(ccdc_ctx[CCDC_PRGODD_1 >> 2], CCDC_PRGODD_1);
- regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT);
- regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR);
-}
-static const struct ccdc_hw_device ccdc_hw_dev = {
- .name = "DM6446 CCDC",
- .owner = THIS_MODULE,
- .hw_ops = {
- .open = ccdc_open,
- .close = ccdc_close,
- .reset = ccdc_sbl_reset,
- .enable = ccdc_enable,
- .set_hw_if_params = ccdc_set_hw_if_params,
- .configure = ccdc_configure,
- .set_buftype = ccdc_set_buftype,
- .get_buftype = ccdc_get_buftype,
- .enum_pix = ccdc_enum_pix,
- .set_pixel_format = ccdc_set_pixel_format,
- .get_pixel_format = ccdc_get_pixel_format,
- .set_frame_format = ccdc_set_frame_format,
- .get_frame_format = ccdc_get_frame_format,
- .set_image_window = ccdc_set_image_window,
- .get_image_window = ccdc_get_image_window,
- .get_line_length = ccdc_get_line_length,
- .setfbaddr = ccdc_setfbaddr,
- .getfid = ccdc_getfid,
- },
-};
-
-static int dm644x_ccdc_probe(struct platform_device *pdev)
-{
- struct resource *res;
- int status = 0;
-
- /*
- * first try to register with vpfe. If not correct platform, then we
- * don't have to iomap
- */
- status = vpfe_register_ccdc_device(&ccdc_hw_dev);
- if (status < 0)
- return status;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- status = -ENODEV;
- goto fail_nores;
- }
-
- res = request_mem_region(res->start, resource_size(res), res->name);
- if (!res) {
- status = -EBUSY;
- goto fail_nores;
- }
-
- ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
- if (!ccdc_cfg.base_addr) {
- status = -ENOMEM;
- goto fail_nomem;
- }
-
- ccdc_cfg.dev = &pdev->dev;
- printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
- return 0;
-fail_nomem:
- release_mem_region(res->start, resource_size(res));
-fail_nores:
- vpfe_unregister_ccdc_device(&ccdc_hw_dev);
- return status;
-}
-
-static int dm644x_ccdc_remove(struct platform_device *pdev)
-{
- struct resource *res;
-
- iounmap(ccdc_cfg.base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
- vpfe_unregister_ccdc_device(&ccdc_hw_dev);
- return 0;
-}
-
-static int dm644x_ccdc_suspend(struct device *dev)
-{
- /* Save CCDC context */
- ccdc_save_context();
- /* Disable CCDC */
- ccdc_enable(0);
-
- return 0;
-}
-
-static int dm644x_ccdc_resume(struct device *dev)
-{
- /* Restore CCDC context */
- ccdc_restore_context();
-
- return 0;
-}
-
-static const struct dev_pm_ops dm644x_ccdc_pm_ops = {
- .suspend = dm644x_ccdc_suspend,
- .resume = dm644x_ccdc_resume,
-};
-
-static struct platform_driver dm644x_ccdc_driver = {
- .driver = {
- .name = "dm644x_ccdc",
- .pm = &dm644x_ccdc_pm_ops,
- },
- .remove = dm644x_ccdc_remove,
- .probe = dm644x_ccdc_probe,
-};
-
-module_platform_driver(dm644x_ccdc_driver);
diff --git a/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h b/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h
deleted file mode 100644
index c20dba3d76d6..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006-2009 Texas Instruments Inc
- */
-#ifndef _DM644X_CCDC_H
-#define _DM644X_CCDC_H
-#include <media/davinci/ccdc_types.h>
-#include <media/davinci/vpfe_types.h>
-
-/* enum for No of pixel per line to be avg. in Black Clamping*/
-enum ccdc_sample_length {
- CCDC_SAMPLE_1PIXELS,
- CCDC_SAMPLE_2PIXELS,
- CCDC_SAMPLE_4PIXELS,
- CCDC_SAMPLE_8PIXELS,
- CCDC_SAMPLE_16PIXELS
-};
-
-/* enum for No of lines in Black Clamping */
-enum ccdc_sample_line {
- CCDC_SAMPLE_1LINES,
- CCDC_SAMPLE_2LINES,
- CCDC_SAMPLE_4LINES,
- CCDC_SAMPLE_8LINES,
- CCDC_SAMPLE_16LINES
-};
-
-/* enum for Alaw gamma width */
-enum ccdc_gamma_width {
- CCDC_GAMMA_BITS_15_6, /* use bits 15-6 for gamma */
- CCDC_GAMMA_BITS_14_5,
- CCDC_GAMMA_BITS_13_4,
- CCDC_GAMMA_BITS_12_3,
- CCDC_GAMMA_BITS_11_2,
- CCDC_GAMMA_BITS_10_1,
- CCDC_GAMMA_BITS_09_0 /* use bits 9-0 for gamma */
-};
-
-/* returns the highest bit used for the gamma */
-static inline u8 ccdc_gamma_width_max_bit(enum ccdc_gamma_width width)
-{
- return 15 - width;
-}
-
-enum ccdc_data_size {
- CCDC_DATA_16BITS,
- CCDC_DATA_15BITS,
- CCDC_DATA_14BITS,
- CCDC_DATA_13BITS,
- CCDC_DATA_12BITS,
- CCDC_DATA_11BITS,
- CCDC_DATA_10BITS,
- CCDC_DATA_8BITS
-};
-
-/* returns the highest bit used for this data size */
-static inline u8 ccdc_data_size_max_bit(enum ccdc_data_size sz)
-{
- return sz == CCDC_DATA_8BITS ? 7 : 15 - sz;
-}
-
-/* structure for ALaw */
-struct ccdc_a_law {
- /* Enable/disable A-Law */
- unsigned char enable;
- /* Gamma Width Input */
- enum ccdc_gamma_width gamma_wd;
-};
-
-/* structure for Black Clamping */
-struct ccdc_black_clamp {
- unsigned char enable;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_length sample_pixel;
- /* only if bClampEnable is TRUE */
- enum ccdc_sample_line sample_ln;
- /* only if bClampEnable is TRUE */
- unsigned short start_pixel;
- /* only if bClampEnable is TRUE */
- unsigned short sgain;
- /* only if bClampEnable is FALSE */
- unsigned short dc_sub;
-};
-
-/* structure for Black Level Compensation */
-struct ccdc_black_compensation {
- /* Constant value to subtract from Red component */
- char r;
- /* Constant value to subtract from Gr component */
- char gr;
- /* Constant value to subtract from Blue component */
- char b;
- /* Constant value to subtract from Gb component */
- char gb;
-};
-
-/* Structure for CCDC configuration parameters for raw capture mode passed
- * by application
- */
-struct ccdc_config_params_raw {
- /* data size value from 8 to 16 bits */
- enum ccdc_data_size data_sz;
- /* Structure for Optional A-Law */
- struct ccdc_a_law alaw;
- /* Structure for Optical Black Clamp */
- struct ccdc_black_clamp blk_clamp;
- /* Structure for Black Compensation */
- struct ccdc_black_compensation blk_comp;
-};
-
-
-#ifdef __KERNEL__
-#include <linux/io.h>
-/* Define to enable/disable video port */
-#define FP_NUM_BYTES 4
-/* Define for extra pixel/line and extra lines/frame */
-#define NUM_EXTRAPIXELS 8
-#define NUM_EXTRALINES 8
-
-/* settings for commonly used video formats */
-#define CCDC_WIN_PAL {0, 0, 720, 576}
-/* ntsc square pixel */
-#define CCDC_WIN_VGA {0, 0, (640 + NUM_EXTRAPIXELS), (480 + NUM_EXTRALINES)}
-
-/* Structure for CCDC configuration parameters for raw capture mode */
-struct ccdc_params_raw {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
- /*
- * enable to store the image in inverse
- * order in memory(bottom to top)
- */
- unsigned char image_invert_enable;
- /* configurable parameters */
- struct ccdc_config_params_raw config_params;
-};
-
-struct ccdc_params_ycbcr {
- /* pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* progressive or interlaced frame */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field id polarity */
- enum vpfe_pin_pol fid_pol;
- /* vertical sync polarity */
- enum vpfe_pin_pol vd_pol;
- /* horizontal sync polarity */
- enum vpfe_pin_pol hd_pol;
- /* enable BT.656 embedded sync mode */
- int bt656_enable;
- /* cb:y:cr:y or y:cb:y:cr in memory */
- enum ccdc_pixorder pix_order;
- /* interleaved or separated fields */
- enum ccdc_buftype buf_type;
-};
-#endif
-#endif /* _DM644X_CCDC_H */
diff --git a/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc_regs.h b/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc_regs.h
deleted file mode 100644
index c4894f6a254e..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/dm644x_ccdc_regs.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006-2009 Texas Instruments Inc
- */
-#ifndef _DM644X_CCDC_REGS_H
-#define _DM644X_CCDC_REGS_H
-
-/**************************************************************************\
-* Register OFFSET Definitions
-\**************************************************************************/
-#define CCDC_PID 0x0
-#define CCDC_PCR 0x4
-#define CCDC_SYN_MODE 0x8
-#define CCDC_HD_VD_WID 0xc
-#define CCDC_PIX_LINES 0x10
-#define CCDC_HORZ_INFO 0x14
-#define CCDC_VERT_START 0x18
-#define CCDC_VERT_LINES 0x1c
-#define CCDC_CULLING 0x20
-#define CCDC_HSIZE_OFF 0x24
-#define CCDC_SDOFST 0x28
-#define CCDC_SDR_ADDR 0x2c
-#define CCDC_CLAMP 0x30
-#define CCDC_DCSUB 0x34
-#define CCDC_COLPTN 0x38
-#define CCDC_BLKCMP 0x3c
-#define CCDC_FPC 0x40
-#define CCDC_FPC_ADDR 0x44
-#define CCDC_VDINT 0x48
-#define CCDC_ALAW 0x4c
-#define CCDC_REC656IF 0x50
-#define CCDC_CCDCFG 0x54
-#define CCDC_FMTCFG 0x58
-#define CCDC_FMT_HORZ 0x5c
-#define CCDC_FMT_VERT 0x60
-#define CCDC_FMT_ADDR0 0x64
-#define CCDC_FMT_ADDR1 0x68
-#define CCDC_FMT_ADDR2 0x6c
-#define CCDC_FMT_ADDR3 0x70
-#define CCDC_FMT_ADDR4 0x74
-#define CCDC_FMT_ADDR5 0x78
-#define CCDC_FMT_ADDR6 0x7c
-#define CCDC_FMT_ADDR7 0x80
-#define CCDC_PRGEVEN_0 0x84
-#define CCDC_PRGEVEN_1 0x88
-#define CCDC_PRGODD_0 0x8c
-#define CCDC_PRGODD_1 0x90
-#define CCDC_VP_OUT 0x94
-#define CCDC_REG_END 0x98
-
-/***************************************************************
-* Define for various register bit mask and shifts for CCDC
-****************************************************************/
-#define CCDC_FID_POL_MASK 1
-#define CCDC_FID_POL_SHIFT 4
-#define CCDC_HD_POL_MASK 1
-#define CCDC_HD_POL_SHIFT 3
-#define CCDC_VD_POL_MASK 1
-#define CCDC_VD_POL_SHIFT 2
-#define CCDC_HSIZE_OFF_MASK 0xffffffe0
-#define CCDC_32BYTE_ALIGN_VAL 31
-#define CCDC_FRM_FMT_MASK 0x1
-#define CCDC_FRM_FMT_SHIFT 7
-#define CCDC_DATA_SZ_MASK 7
-#define CCDC_DATA_SZ_SHIFT 8
-#define CCDC_PIX_FMT_MASK 3
-#define CCDC_PIX_FMT_SHIFT 12
-#define CCDC_VP2SDR_DISABLE 0xFFFBFFFF
-#define CCDC_WEN_ENABLE BIT(17)
-#define CCDC_SDR2RSZ_DISABLE 0xFFF7FFFF
-#define CCDC_VDHDEN_ENABLE BIT(16)
-#define CCDC_LPF_ENABLE BIT(14)
-#define CCDC_ALAW_ENABLE BIT(3)
-#define CCDC_ALAW_GAMMA_WD_MASK 7
-#define CCDC_BLK_CLAMP_ENABLE BIT(31)
-#define CCDC_BLK_SGAIN_MASK 0x1F
-#define CCDC_BLK_ST_PXL_MASK 0x7FFF
-#define CCDC_BLK_ST_PXL_SHIFT 10
-#define CCDC_BLK_SAMPLE_LN_MASK 7
-#define CCDC_BLK_SAMPLE_LN_SHIFT 28
-#define CCDC_BLK_SAMPLE_LINE_MASK 7
-#define CCDC_BLK_SAMPLE_LINE_SHIFT 25
-#define CCDC_BLK_DC_SUB_MASK 0x03FFF
-#define CCDC_BLK_COMP_MASK 0xFF
-#define CCDC_BLK_COMP_GB_COMP_SHIFT 8
-#define CCDC_BLK_COMP_GR_COMP_SHIFT 16
-#define CCDC_BLK_COMP_R_COMP_SHIFT 24
-#define CCDC_LATCH_ON_VSYNC_DISABLE BIT(15)
-#define CCDC_FPC_ENABLE BIT(15)
-#define CCDC_FPC_DISABLE 0
-#define CCDC_FPC_FPC_NUM_MASK 0x7FFF
-#define CCDC_DATA_PACK_ENABLE BIT(11)
-#define CCDC_FMTCFG_VPIN_MASK 7
-#define CCDC_FMTCFG_VPIN_SHIFT 12
-#define CCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF
-#define CCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF
-#define CCDC_FMT_HORZ_FMTSPH_SHIFT 16
-#define CCDC_FMT_VERT_FMTLNV_MASK 0x1FFF
-#define CCDC_FMT_VERT_FMTSLV_MASK 0x1FFF
-#define CCDC_FMT_VERT_FMTSLV_SHIFT 16
-#define CCDC_VP_OUT_VERT_NUM_MASK 0x3FFF
-#define CCDC_VP_OUT_VERT_NUM_SHIFT 17
-#define CCDC_VP_OUT_HORZ_NUM_MASK 0x1FFF
-#define CCDC_VP_OUT_HORZ_NUM_SHIFT 4
-#define CCDC_VP_OUT_HORZ_ST_MASK 0xF
-#define CCDC_HORZ_INFO_SPH_SHIFT 16
-#define CCDC_VERT_START_SLV0_SHIFT 16
-#define CCDC_VDINT_VDINT0_SHIFT 16
-#define CCDC_VDINT_VDINT1_MASK 0xFFFF
-#define CCDC_PPC_RAW 1
-#define CCDC_DCSUB_DEFAULT_VAL 0
-#define CCDC_CLAMP_DEFAULT_VAL 0
-#define CCDC_ENABLE_VIDEO_PORT 0x8000
-#define CCDC_DISABLE_VIDEO_PORT 0
-#define CCDC_COLPTN_VAL 0xBB11BB11
-#define CCDC_TWO_BYTES_PER_PIXEL 2
-#define CCDC_INTERLACED_IMAGE_INVERT 0x4B6D
-#define CCDC_INTERLACED_NO_IMAGE_INVERT 0x0249
-#define CCDC_PROGRESSIVE_IMAGE_INVERT 0x4000
-#define CCDC_PROGRESSIVE_NO_IMAGE_INVERT 0
-#define CCDC_INTERLACED_HEIGHT_SHIFT 1
-#define CCDC_SYN_MODE_INPMOD_SHIFT 12
-#define CCDC_SYN_MODE_INPMOD_MASK 3
-#define CCDC_SYN_MODE_8BITS (7 << 8)
-#define CCDC_SYN_MODE_10BITS (6 << 8)
-#define CCDC_SYN_MODE_11BITS (5 << 8)
-#define CCDC_SYN_MODE_12BITS (4 << 8)
-#define CCDC_SYN_MODE_13BITS (3 << 8)
-#define CCDC_SYN_MODE_14BITS (2 << 8)
-#define CCDC_SYN_MODE_15BITS (1 << 8)
-#define CCDC_SYN_MODE_16BITS (0 << 8)
-#define CCDC_SYN_FLDMODE_MASK 1
-#define CCDC_SYN_FLDMODE_SHIFT 7
-#define CCDC_REC656IF_BT656_EN 3
-#define CCDC_SYN_MODE_VD_POL_NEGATIVE BIT(2)
-#define CCDC_CCDCFG_Y8POS_SHIFT 11
-#define CCDC_CCDCFG_BW656_10BIT BIT(5)
-#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
-#define CCDC_NO_CULLING 0xffff00ff
-#endif
diff --git a/drivers/staging/media/deprecated/vpfe_capture/isif.c b/drivers/staging/media/deprecated/vpfe_capture/isif.c
deleted file mode 100644
index 4059891c2824..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/isif.c
+++ /dev/null
@@ -1,1127 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- * Image Sensor Interface (ISIF) driver
- *
- * This driver is for configuring the ISIF IP available on DM365 or any other
- * TI SoCs. This is used for capturing yuv or bayer video or image data
- * from a decoder or sensor. This IP is similar to the CCDC IP on DM355
- * and DM6446, but with enhanced or additional ip blocks. The driver
- * configures the ISIF upon commands from the vpfe bridge driver through
- * ccdc_hw_device interface.
- *
- * TODO: 1) Raw bayer parameter settings and bayer capture
- * 2) Add support for control ioctl
- */
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/videodev2.h>
-#include <linux/err.h>
-#include <linux/module.h>
-
-#include "isif.h"
-#include <media/davinci/vpss.h>
-
-#include "isif_regs.h"
-#include "ccdc_hw_device.h"
-
-/* Defaults for module configuration parameters */
-static const struct isif_config_params_raw isif_config_defaults = {
- .linearize = {
- .en = 0,
- .corr_shft = ISIF_NO_SHIFT,
- .scale_fact = {1, 0},
- },
- .df_csc = {
- .df_or_csc = 0,
- .csc = {
- .en = 0,
- },
- },
- .dfc = {
- .en = 0,
- },
- .bclamp = {
- .en = 0,
- },
- .gain_offset = {
- .gain = {
- .r_ye = {1, 0},
- .gr_cy = {1, 0},
- .gb_g = {1, 0},
- .b_mg = {1, 0},
- },
- },
- .culling = {
- .hcpat_odd = 0xff,
- .hcpat_even = 0xff,
- .vcpat = 0xff,
- },
- .compress = {
- .alg = ISIF_ALAW,
- },
-};
-
-/* ISIF operation configuration */
-static struct isif_oper_config {
- struct device *dev;
- enum vpfe_hw_if_type if_type;
- struct isif_ycbcr_config ycbcr;
- struct isif_params_raw bayer;
- enum isif_data_pack data_pack;
- /* ISIF base address */
- void __iomem *base_addr;
- /* ISIF Linear Table 0 */
- void __iomem *linear_tbl0_addr;
- /* ISIF Linear Table 1 */
- void __iomem *linear_tbl1_addr;
-} isif_cfg = {
- .ycbcr = {
- .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
- .frm_fmt = CCDC_FRMFMT_INTERLACED,
- .win = ISIF_WIN_NTSC,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .pix_order = CCDC_PIXORDER_CBYCRY,
- .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED,
- },
- .bayer = {
- .pix_fmt = CCDC_PIXFMT_RAW,
- .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
- .win = ISIF_WIN_VGA,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .gain = {
- .r_ye = {1, 0},
- .gr_cy = {1, 0},
- .gb_g = {1, 0},
- .b_mg = {1, 0},
- },
- .cfa_pat = ISIF_CFA_PAT_MOSAIC,
- .data_msb = ISIF_BIT_MSB_11,
- .config_params = {
- .data_shift = ISIF_NO_SHIFT,
- .col_pat_field0 = {
- .olop = ISIF_GREEN_BLUE,
- .olep = ISIF_BLUE,
- .elop = ISIF_RED,
- .elep = ISIF_GREEN_RED,
- },
- .col_pat_field1 = {
- .olop = ISIF_GREEN_BLUE,
- .olep = ISIF_BLUE,
- .elop = ISIF_RED,
- .elep = ISIF_GREEN_RED,
- },
- .test_pat_gen = 0,
- },
- },
- .data_pack = ISIF_DATA_PACK8,
-};
-
-/* Raw Bayer formats */
-static const u32 isif_raw_bayer_pix_formats[] = {
- V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
-
-/* Raw YUV formats */
-static const u32 isif_raw_yuv_pix_formats[] = {
- V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
-
-/* register access routines */
-static inline u32 regr(u32 offset)
-{
- return __raw_readl(isif_cfg.base_addr + offset);
-}
-
-static inline void regw(u32 val, u32 offset)
-{
- __raw_writel(val, isif_cfg.base_addr + offset);
-}
-
-/* reg_modify() - read, modify and write register */
-static inline u32 reg_modify(u32 mask, u32 val, u32 offset)
-{
- u32 new_val = (regr(offset) & ~mask) | (val & mask);
-
- regw(new_val, offset);
- return new_val;
-}
-
-static inline void regw_lin_tbl(u32 val, u32 offset, int i)
-{
- if (!i)
- __raw_writel(val, isif_cfg.linear_tbl0_addr + offset);
- else
- __raw_writel(val, isif_cfg.linear_tbl1_addr + offset);
-}
-
-static void isif_disable_all_modules(void)
-{
- /* disable BC */
- regw(0, CLAMPCFG);
- /* disable vdfc */
- regw(0, DFCCTL);
- /* disable CSC */
- regw(0, CSCCTL);
- /* disable linearization */
- regw(0, LINCFG0);
- /* disable other modules here as they are supported */
-}
-
-static void isif_enable(int en)
-{
- if (!en) {
- /* Before disable isif, disable all ISIF modules */
- isif_disable_all_modules();
- /*
- * wait for next VD. Assume lowest scan rate is 12 Hz. So
- * 100 msec delay is good enough
- */
- msleep(100);
- }
- reg_modify(ISIF_SYNCEN_VDHDEN_MASK, en, SYNCEN);
-}
-
-static void isif_enable_output_to_sdram(int en)
-{
- reg_modify(ISIF_SYNCEN_WEN_MASK, en << ISIF_SYNCEN_WEN_SHIFT, SYNCEN);
-}
-
-static void isif_config_culling(struct isif_cul *cul)
-{
- u32 val;
-
- /* Horizontal pattern */
- val = (cul->hcpat_even << CULL_PAT_EVEN_LINE_SHIFT) | cul->hcpat_odd;
- regw(val, CULH);
-
- /* vertical pattern */
- regw(cul->vcpat, CULV);
-
- /* LPF */
- reg_modify(ISIF_LPF_MASK << ISIF_LPF_SHIFT,
- cul->en_lpf << ISIF_LPF_SHIFT, MODESET);
-}
-
-static void isif_config_gain_offset(void)
-{
- struct isif_gain_offsets_adj *gain_off_p =
- &isif_cfg.bayer.config_params.gain_offset;
- u32 val;
-
- val = (!!gain_off_p->gain_sdram_en << GAIN_SDRAM_EN_SHIFT) |
- (!!gain_off_p->gain_ipipe_en << GAIN_IPIPE_EN_SHIFT) |
- (!!gain_off_p->gain_h3a_en << GAIN_H3A_EN_SHIFT) |
- (!!gain_off_p->offset_sdram_en << OFST_SDRAM_EN_SHIFT) |
- (!!gain_off_p->offset_ipipe_en << OFST_IPIPE_EN_SHIFT) |
- (!!gain_off_p->offset_h3a_en << OFST_H3A_EN_SHIFT);
-
- reg_modify(GAIN_OFFSET_EN_MASK, val, CGAMMAWD);
-
- val = (gain_off_p->gain.r_ye.integer << GAIN_INTEGER_SHIFT) |
- gain_off_p->gain.r_ye.decimal;
- regw(val, CRGAIN);
-
- val = (gain_off_p->gain.gr_cy.integer << GAIN_INTEGER_SHIFT) |
- gain_off_p->gain.gr_cy.decimal;
- regw(val, CGRGAIN);
-
- val = (gain_off_p->gain.gb_g.integer << GAIN_INTEGER_SHIFT) |
- gain_off_p->gain.gb_g.decimal;
- regw(val, CGBGAIN);
-
- val = (gain_off_p->gain.b_mg.integer << GAIN_INTEGER_SHIFT) |
- gain_off_p->gain.b_mg.decimal;
- regw(val, CBGAIN);
-
- regw(gain_off_p->offset, COFSTA);
-}
-
-static void isif_restore_defaults(void)
-{
- enum vpss_ccdc_source_sel source = VPSS_CCDCIN;
-
- dev_dbg(isif_cfg.dev, "\nstarting isif_restore_defaults...");
- isif_cfg.bayer.config_params = isif_config_defaults;
- /* Enable clock to ISIF, IPIPEIF and BL */
- vpss_enable_clock(VPSS_CCDC_CLOCK, 1);
- vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
- vpss_enable_clock(VPSS_BL_CLOCK, 1);
- /* Set default offset and gain */
- isif_config_gain_offset();
- vpss_select_ccdc_source(source);
- dev_dbg(isif_cfg.dev, "\nEnd of isif_restore_defaults...");
-}
-
-static int isif_open(struct device *device)
-{
- isif_restore_defaults();
- return 0;
-}
-
-/* This function will configure the window size to be capture in ISIF reg */
-static void isif_setwin(struct v4l2_rect *image_win,
- enum ccdc_frmfmt frm_fmt, int ppc)
-{
- int horz_start, horz_nr_pixels;
- int vert_start, vert_nr_lines;
- int mid_img = 0;
-
- dev_dbg(isif_cfg.dev, "\nStarting isif_setwin...");
- /*
- * ppc - per pixel count. indicates how many pixels per cell
- * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
- * raw capture this is 1
- */
- horz_start = image_win->left << (ppc - 1);
- horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1;
-
- /* Writing the horizontal info into the registers */
- regw(horz_start & START_PX_HOR_MASK, SPH);
- regw(horz_nr_pixels & NUM_PX_HOR_MASK, LNH);
- vert_start = image_win->top;
-
- if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
- vert_nr_lines = (image_win->height >> 1) - 1;
- vert_start >>= 1;
- /* To account for VD since line 0 doesn't have any data */
- vert_start += 1;
- } else {
- /* To account for VD since line 0 doesn't have any data */
- vert_start += 1;
- vert_nr_lines = image_win->height - 1;
- /* configure VDINT0 and VDINT1 */
- mid_img = vert_start + (image_win->height / 2);
- regw(mid_img, VDINT1);
- }
-
- regw(0, VDINT0);
- regw(vert_start & START_VER_ONE_MASK, SLV0);
- regw(vert_start & START_VER_TWO_MASK, SLV1);
- regw(vert_nr_lines & NUM_LINES_VER, LNV);
-}
-
-static void isif_config_bclamp(struct isif_black_clamp *bc)
-{
- u32 val;
-
- /*
- * DC Offset is always added to image data irrespective of bc enable
- * status
- */
- regw(bc->dc_offset, CLDCOFST);
-
- if (bc->en) {
- val = bc->bc_mode_color << ISIF_BC_MODE_COLOR_SHIFT;
-
- /* Enable BC and horizontal clamp calculation parameters */
- val = val | 1 | (bc->horz.mode << ISIF_HORZ_BC_MODE_SHIFT);
-
- regw(val, CLAMPCFG);
-
- if (bc->horz.mode != ISIF_HORZ_BC_DISABLE) {
- /*
- * Window count for calculation
- * Base window selection
- * pixel limit
- * Horizontal size of window
- * vertical size of the window
- * Horizontal start position of the window
- * Vertical start position of the window
- */
- val = bc->horz.win_count_calc |
- ((!!bc->horz.base_win_sel_calc) <<
- ISIF_HORZ_BC_WIN_SEL_SHIFT) |
- ((!!bc->horz.clamp_pix_limit) <<
- ISIF_HORZ_BC_PIX_LIMIT_SHIFT) |
- (bc->horz.win_h_sz_calc <<
- ISIF_HORZ_BC_WIN_H_SIZE_SHIFT) |
- (bc->horz.win_v_sz_calc <<
- ISIF_HORZ_BC_WIN_V_SIZE_SHIFT);
- regw(val, CLHWIN0);
-
- regw(bc->horz.win_start_h_calc, CLHWIN1);
- regw(bc->horz.win_start_v_calc, CLHWIN2);
- }
-
- /* vertical clamp calculation parameters */
-
- /* Reset clamp value sel for previous line */
- val |=
- (bc->vert.reset_val_sel << ISIF_VERT_BC_RST_VAL_SEL_SHIFT) |
- (bc->vert.line_ave_coef << ISIF_VERT_BC_LINE_AVE_COEF_SHIFT);
- regw(val, CLVWIN0);
-
- /* Optical Black horizontal start position */
- regw(bc->vert.ob_start_h, CLVWIN1);
- /* Optical Black vertical start position */
- regw(bc->vert.ob_start_v, CLVWIN2);
- /* Optical Black vertical size for calculation */
- regw(bc->vert.ob_v_sz_calc, CLVWIN3);
- /* Vertical start position for BC subtraction */
- regw(bc->vert_start_sub, CLSV);
- }
-}
-
-static void isif_config_linearization(struct isif_linearize *linearize)
-{
- u32 val, i;
-
- if (!linearize->en) {
- regw(0, LINCFG0);
- return;
- }
-
- /* shift value for correction & enable linearization (set lsb) */
- val = (linearize->corr_shft << ISIF_LIN_CORRSFT_SHIFT) | 1;
- regw(val, LINCFG0);
-
- /* Scale factor */
- val = ((!!linearize->scale_fact.integer) <<
- ISIF_LIN_SCALE_FACT_INTEG_SHIFT) |
- linearize->scale_fact.decimal;
- regw(val, LINCFG1);
-
- for (i = 0; i < ISIF_LINEAR_TAB_SIZE; i++) {
- if (i % 2)
- regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 1);
- else
- regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 0);
- }
-}
-
-static int isif_config_dfc(struct isif_dfc *vdfc)
-{
- /* initialize retries to loop for max ~ 250 usec */
- u32 val, count, retries = loops_per_jiffy / (4000/HZ);
- int i;
-
- if (!vdfc->en)
- return 0;
-
- /* Correction mode */
- val = (vdfc->corr_mode << ISIF_VDFC_CORR_MOD_SHIFT);
-
- /* Correct whole line or partial */
- if (vdfc->corr_whole_line)
- val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT;
-
- /* level shift value */
- val |= vdfc->def_level_shift << ISIF_VDFC_LEVEL_SHFT_SHIFT;
-
- regw(val, DFCCTL);
-
- /* Defect saturation level */
- regw(vdfc->def_sat_level, VDFSATLV);
-
- regw(vdfc->table[0].pos_vert, DFCMEM0);
- regw(vdfc->table[0].pos_horz, DFCMEM1);
- if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
- vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
- regw(vdfc->table[0].level_at_pos, DFCMEM2);
- regw(vdfc->table[0].level_up_pixels, DFCMEM3);
- regw(vdfc->table[0].level_low_pixels, DFCMEM4);
- }
-
- /* set DFCMARST and set DFCMWR */
- val = regr(DFCMEMCTL) | (1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT) | 1;
- regw(val, DFCMEMCTL);
-
- count = retries;
- while (count && (regr(DFCMEMCTL) & 0x1))
- count--;
-
- if (!count) {
- dev_dbg(isif_cfg.dev, "defect table write timeout !!!\n");
- return -1;
- }
-
- for (i = 1; i < vdfc->num_vdefects; i++) {
- regw(vdfc->table[i].pos_vert, DFCMEM0);
- regw(vdfc->table[i].pos_horz, DFCMEM1);
- if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
- vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
- regw(vdfc->table[i].level_at_pos, DFCMEM2);
- regw(vdfc->table[i].level_up_pixels, DFCMEM3);
- regw(vdfc->table[i].level_low_pixels, DFCMEM4);
- }
- val = regr(DFCMEMCTL);
- /* clear DFCMARST and set DFCMWR */
- val &= ~BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
- val |= 1;
- regw(val, DFCMEMCTL);
-
- count = retries;
- while (count && (regr(DFCMEMCTL) & 0x1))
- count--;
-
- if (!count) {
- dev_err(isif_cfg.dev,
- "defect table write timeout !!!\n");
- return -1;
- }
- }
- if (vdfc->num_vdefects < ISIF_VDFC_TABLE_SIZE) {
- /* Extra cycle needed */
- regw(0, DFCMEM0);
- regw(0x1FFF, DFCMEM1);
- regw(1, DFCMEMCTL);
- }
-
- /* enable VDFC */
- reg_modify((1 << ISIF_VDFC_EN_SHIFT), (1 << ISIF_VDFC_EN_SHIFT),
- DFCCTL);
- return 0;
-}
-
-static void isif_config_csc(struct isif_df_csc *df_csc)
-{
- u32 val1 = 0, val2 = 0, i;
-
- if (!df_csc->csc.en) {
- regw(0, CSCCTL);
- return;
- }
- for (i = 0; i < ISIF_CSC_NUM_COEFF; i++) {
- if ((i % 2) == 0) {
- /* CSCM - LSB */
- val1 = (df_csc->csc.coeff[i].integer <<
- ISIF_CSC_COEF_INTEG_SHIFT) |
- df_csc->csc.coeff[i].decimal;
- } else {
-
- /* CSCM - MSB */
- val2 = (df_csc->csc.coeff[i].integer <<
- ISIF_CSC_COEF_INTEG_SHIFT) |
- df_csc->csc.coeff[i].decimal;
- val2 <<= ISIF_CSCM_MSB_SHIFT;
- val2 |= val1;
- regw(val2, (CSCM0 + ((i - 1) << 1)));
- }
- }
-
- /* program the active area */
- regw(df_csc->start_pix, FMTSPH);
- /*
- * one extra pixel as required for CSC. Actually number of
- * pixel - 1 should be configured in this register. So we
- * need to subtract 1 before writing to FMTSPH, but we will
- * not do this since csc requires one extra pixel
- */
- regw(df_csc->num_pixels, FMTLNH);
- regw(df_csc->start_line, FMTSLV);
- /*
- * one extra line as required for CSC. See reason documented for
- * num_pixels
- */
- regw(df_csc->num_lines, FMTLNV);
-
- /* Enable CSC */
- regw(1, CSCCTL);
-}
-
-static int isif_config_raw(void)
-{
- struct isif_params_raw *params = &isif_cfg.bayer;
- struct isif_config_params_raw *module_params =
- &isif_cfg.bayer.config_params;
- struct vpss_pg_frame_size frame_size;
- struct vpss_sync_pol sync;
- u32 val;
-
- dev_dbg(isif_cfg.dev, "\nStarting isif_config_raw..\n");
-
- /*
- * Configure CCDCFG register:-
- * Set CCD Not to swap input since input is RAW data
- * Set FID detection function to Latch at V-Sync
- * Set WENLOG - isif valid area
- * Set TRGSEL
- * Set EXTRG
- * Packed to 8 or 16 bits
- */
-
- val = ISIF_YCINSWP_RAW | ISIF_CCDCFG_FIDMD_LATCH_VSYNC |
- ISIF_CCDCFG_WENLOG_AND | ISIF_CCDCFG_TRGSEL_WEN |
- ISIF_CCDCFG_EXTRG_DISABLE | isif_cfg.data_pack;
-
- dev_dbg(isif_cfg.dev, "Writing 0x%x to ...CCDCFG \n", val);
- regw(val, CCDCFG);
-
- /*
- * Configure the vertical sync polarity(MODESET.VDPOL)
- * Configure the horizontal sync polarity (MODESET.HDPOL)
- * Configure frame id polarity (MODESET.FLDPOL)
- * Configure data polarity
- * Configure External WEN Selection
- * Configure frame format(progressive or interlace)
- * Configure pixel format (Input mode)
- * Configure the data shift
- */
-
- val = ISIF_VDHDOUT_INPUT | (params->vd_pol << ISIF_VD_POL_SHIFT) |
- (params->hd_pol << ISIF_HD_POL_SHIFT) |
- (params->fid_pol << ISIF_FID_POL_SHIFT) |
- (ISIF_DATAPOL_NORMAL << ISIF_DATAPOL_SHIFT) |
- (ISIF_EXWEN_DISABLE << ISIF_EXWEN_SHIFT) |
- (params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
- (params->pix_fmt << ISIF_INPUT_SHIFT) |
- (params->config_params.data_shift << ISIF_DATASFT_SHIFT);
-
- regw(val, MODESET);
- dev_dbg(isif_cfg.dev, "Writing 0x%x to MODESET...\n", val);
-
- /*
- * Configure GAMMAWD register
- * CFA pattern setting
- */
- val = params->cfa_pat << ISIF_GAMMAWD_CFA_SHIFT;
-
- /* Gamma msb */
- if (module_params->compress.alg == ISIF_ALAW)
- val |= ISIF_ALAW_ENABLE;
-
- val |= (params->data_msb << ISIF_ALAW_GAMMA_WD_SHIFT);
- regw(val, CGAMMAWD);
-
- /* Configure DPCM compression settings */
- if (module_params->compress.alg == ISIF_DPCM) {
- val = BIT(ISIF_DPCM_EN_SHIFT) |
- (module_params->compress.pred <<
- ISIF_DPCM_PREDICTOR_SHIFT);
- }
-
- regw(val, MISC);
-
- /* Configure Gain & Offset */
- isif_config_gain_offset();
-
- /* Configure Color pattern */
- val = (params->config_params.col_pat_field0.olop) |
- (params->config_params.col_pat_field0.olep << 2) |
- (params->config_params.col_pat_field0.elop << 4) |
- (params->config_params.col_pat_field0.elep << 6) |
- (params->config_params.col_pat_field1.olop << 8) |
- (params->config_params.col_pat_field1.olep << 10) |
- (params->config_params.col_pat_field1.elop << 12) |
- (params->config_params.col_pat_field1.elep << 14);
- regw(val, CCOLP);
- dev_dbg(isif_cfg.dev, "Writing %x to CCOLP ...\n", val);
-
- /* Configure HSIZE register */
- val = (!!params->horz_flip_en) << ISIF_HSIZE_FLIP_SHIFT;
-
- /* calculate line offset in 32 bytes based on pack value */
- if (isif_cfg.data_pack == ISIF_PACK_8BIT)
- val |= ((params->win.width + 31) >> 5);
- else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
- val |= (((params->win.width +
- (params->win.width >> 2)) + 31) >> 5);
- else
- val |= (((params->win.width * 2) + 31) >> 5);
- regw(val, HSIZE);
-
- /* Configure SDOFST register */
- if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
- if (params->image_invert_en) {
- /* For interlace inverse mode */
- regw(0x4B6D, SDOFST);
- dev_dbg(isif_cfg.dev, "Writing 0x4B6D to SDOFST...\n");
- } else {
- /* For interlace non inverse mode */
- regw(0x0B6D, SDOFST);
- dev_dbg(isif_cfg.dev, "Writing 0x0B6D to SDOFST...\n");
- }
- } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
- if (params->image_invert_en) {
- /* For progressive inverse mode */
- regw(0x4000, SDOFST);
- dev_dbg(isif_cfg.dev, "Writing 0x4000 to SDOFST...\n");
- } else {
- /* For progressive non inverse mode */
- regw(0x0000, SDOFST);
- dev_dbg(isif_cfg.dev, "Writing 0x0000 to SDOFST...\n");
- }
- }
-
- /* Configure video window */
- isif_setwin(&params->win, params->frm_fmt, 1);
-
- /* Configure Black Clamp */
- isif_config_bclamp(&module_params->bclamp);
-
- /* Configure Vertical Defection Pixel Correction */
- if (isif_config_dfc(&module_params->dfc) < 0)
- return -EFAULT;
-
- if (!module_params->df_csc.df_or_csc)
- /* Configure Color Space Conversion */
- isif_config_csc(&module_params->df_csc);
-
- isif_config_linearization(&module_params->linearize);
-
- /* Configure Culling */
- isif_config_culling(&module_params->culling);
-
- /* Configure horizontal and vertical offsets(DFC,LSC,Gain) */
- regw(module_params->horz_offset, DATAHOFST);
- regw(module_params->vert_offset, DATAVOFST);
-
- /* Setup test pattern if enabled */
- if (params->config_params.test_pat_gen) {
- /* Use the HD/VD pol settings from user */
- sync.ccdpg_hdpol = params->hd_pol;
- sync.ccdpg_vdpol = params->vd_pol;
- dm365_vpss_set_sync_pol(sync);
- frame_size.hlpfr = isif_cfg.bayer.win.width;
- frame_size.pplen = isif_cfg.bayer.win.height;
- dm365_vpss_set_pg_frame_size(frame_size);
- vpss_select_ccdc_source(VPSS_PGLPBK);
- }
-
- dev_dbg(isif_cfg.dev, "\nEnd of isif_config_ycbcr...\n");
- return 0;
-}
-
-static int isif_set_buftype(enum ccdc_buftype buf_type)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- isif_cfg.bayer.buf_type = buf_type;
- else
- isif_cfg.ycbcr.buf_type = buf_type;
-
- return 0;
-
-}
-static enum ccdc_buftype isif_get_buftype(void)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- return isif_cfg.bayer.buf_type;
-
- return isif_cfg.ycbcr.buf_type;
-}
-
-static int isif_enum_pix(u32 *pix, int i)
-{
- int ret = -EINVAL;
-
- if (isif_cfg.if_type == VPFE_RAW_BAYER) {
- if (i < ARRAY_SIZE(isif_raw_bayer_pix_formats)) {
- *pix = isif_raw_bayer_pix_formats[i];
- ret = 0;
- }
- } else {
- if (i < ARRAY_SIZE(isif_raw_yuv_pix_formats)) {
- *pix = isif_raw_yuv_pix_formats[i];
- ret = 0;
- }
- }
-
- return ret;
-}
-
-static int isif_set_pixel_format(unsigned int pixfmt)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER) {
- if (pixfmt == V4L2_PIX_FMT_SBGGR8) {
- if ((isif_cfg.bayer.config_params.compress.alg !=
- ISIF_ALAW) &&
- (isif_cfg.bayer.config_params.compress.alg !=
- ISIF_DPCM)) {
- dev_dbg(isif_cfg.dev,
- "Either configure A-Law or DPCM\n");
- return -EINVAL;
- }
- isif_cfg.data_pack = ISIF_PACK_8BIT;
- } else if (pixfmt == V4L2_PIX_FMT_SBGGR16) {
- isif_cfg.bayer.config_params.compress.alg =
- ISIF_NO_COMPRESSION;
- isif_cfg.data_pack = ISIF_PACK_16BIT;
- } else
- return -EINVAL;
- isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
- } else {
- if (pixfmt == V4L2_PIX_FMT_YUYV)
- isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
- else if (pixfmt == V4L2_PIX_FMT_UYVY)
- isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
- else
- return -EINVAL;
- isif_cfg.data_pack = ISIF_PACK_8BIT;
- }
- return 0;
-}
-
-static u32 isif_get_pixel_format(void)
-{
- u32 pixfmt;
-
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- if (isif_cfg.bayer.config_params.compress.alg == ISIF_ALAW ||
- isif_cfg.bayer.config_params.compress.alg == ISIF_DPCM)
- pixfmt = V4L2_PIX_FMT_SBGGR8;
- else
- pixfmt = V4L2_PIX_FMT_SBGGR16;
- else {
- if (isif_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
- pixfmt = V4L2_PIX_FMT_YUYV;
- else
- pixfmt = V4L2_PIX_FMT_UYVY;
- }
- return pixfmt;
-}
-
-static int isif_set_image_window(struct v4l2_rect *win)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER) {
- isif_cfg.bayer.win.top = win->top;
- isif_cfg.bayer.win.left = win->left;
- isif_cfg.bayer.win.width = win->width;
- isif_cfg.bayer.win.height = win->height;
- } else {
- isif_cfg.ycbcr.win.top = win->top;
- isif_cfg.ycbcr.win.left = win->left;
- isif_cfg.ycbcr.win.width = win->width;
- isif_cfg.ycbcr.win.height = win->height;
- }
- return 0;
-}
-
-static void isif_get_image_window(struct v4l2_rect *win)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- *win = isif_cfg.bayer.win;
- else
- *win = isif_cfg.ycbcr.win;
-}
-
-static unsigned int isif_get_line_length(void)
-{
- unsigned int len;
-
- if (isif_cfg.if_type == VPFE_RAW_BAYER) {
- if (isif_cfg.data_pack == ISIF_PACK_8BIT)
- len = ((isif_cfg.bayer.win.width));
- else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
- len = (((isif_cfg.bayer.win.width * 2) +
- (isif_cfg.bayer.win.width >> 2)));
- else
- len = (((isif_cfg.bayer.win.width * 2)));
- } else
- len = (((isif_cfg.ycbcr.win.width * 2)));
- return ALIGN(len, 32);
-}
-
-static int isif_set_frame_format(enum ccdc_frmfmt frm_fmt)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- isif_cfg.bayer.frm_fmt = frm_fmt;
- else
- isif_cfg.ycbcr.frm_fmt = frm_fmt;
- return 0;
-}
-static enum ccdc_frmfmt isif_get_frame_format(void)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- return isif_cfg.bayer.frm_fmt;
- return isif_cfg.ycbcr.frm_fmt;
-}
-
-static int isif_getfid(void)
-{
- return (regr(MODESET) >> 15) & 0x1;
-}
-
-/* misc operations */
-static void isif_setfbaddr(unsigned long addr)
-{
- regw((addr >> 21) & 0x07ff, CADU);
- regw((addr >> 5) & 0x0ffff, CADL);
-}
-
-static int isif_set_hw_if_params(struct vpfe_hw_if_param *params)
-{
- isif_cfg.if_type = params->if_type;
-
- switch (params->if_type) {
- case VPFE_BT656:
- case VPFE_BT656_10BIT:
- case VPFE_YCBCR_SYNC_8:
- isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
- isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
- break;
- case VPFE_BT1120:
- case VPFE_YCBCR_SYNC_16:
- isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_16BIT;
- isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
- break;
- case VPFE_RAW_BAYER:
- isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
- break;
- default:
- dev_dbg(isif_cfg.dev, "Invalid interface type\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* This function will configure ISIF for YCbCr parameters. */
-static int isif_config_ycbcr(void)
-{
- struct isif_ycbcr_config *params = &isif_cfg.ycbcr;
- u32 modeset = 0, ccdcfg = 0;
-
- dev_dbg(isif_cfg.dev, "\nStarting isif_config_ycbcr...");
-
- /* configure pixel format or input mode */
- modeset = modeset | (params->pix_fmt << ISIF_INPUT_SHIFT) |
- (params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
- (params->fid_pol << ISIF_FID_POL_SHIFT) |
- (params->hd_pol << ISIF_HD_POL_SHIFT) |
- (params->vd_pol << ISIF_VD_POL_SHIFT);
-
- /* pack the data to 8-bit ISIFCFG */
- switch (isif_cfg.if_type) {
- case VPFE_BT656:
- if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
- dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- modeset |= (VPFE_PINPOL_NEGATIVE << ISIF_VD_POL_SHIFT);
- regw(3, REC656IF);
- ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR;
- break;
- case VPFE_BT656_10BIT:
- if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
- dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- /* setup BT.656, embedded sync */
- regw(3, REC656IF);
- /* enable 10 bit mode in ccdcfg */
- ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR |
- ISIF_BW656_ENABLE;
- break;
- case VPFE_BT1120:
- if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
- dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- regw(3, REC656IF);
- break;
-
- case VPFE_YCBCR_SYNC_8:
- ccdcfg |= ISIF_DATA_PACK8;
- ccdcfg |= ISIF_YCINSWP_YCBCR;
- if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
- dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- break;
- case VPFE_YCBCR_SYNC_16:
- if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
- dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- break;
- default:
- /* should never come here */
- dev_dbg(isif_cfg.dev, "Invalid interface type\n");
- return -EINVAL;
- }
-
- regw(modeset, MODESET);
-
- /* Set up pix order */
- ccdcfg |= params->pix_order << ISIF_PIX_ORDER_SHIFT;
-
- regw(ccdcfg, CCDCFG);
-
- /* configure video window */
- if ((isif_cfg.if_type == VPFE_BT1120) ||
- (isif_cfg.if_type == VPFE_YCBCR_SYNC_16))
- isif_setwin(&params->win, params->frm_fmt, 1);
- else
- isif_setwin(&params->win, params->frm_fmt, 2);
-
- /*
- * configure the horizontal line offset
- * this is done by rounding up width to a multiple of 16 pixels
- * and multiply by two to account for y:cb:cr 4:2:2 data
- */
- regw(((((params->win.width * 2) + 31) & 0xffffffe0) >> 5), HSIZE);
-
- /* configure the memory line offset */
- if ((params->frm_fmt == CCDC_FRMFMT_INTERLACED) &&
- (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED))
- /* two fields are interleaved in memory */
- regw(0x00000249, SDOFST);
-
- return 0;
-}
-
-static int isif_configure(void)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- return isif_config_raw();
- return isif_config_ycbcr();
-}
-
-static int isif_close(struct device *device)
-{
- /* copy defaults to module params */
- isif_cfg.bayer.config_params = isif_config_defaults;
- return 0;
-}
-
-static const struct ccdc_hw_device isif_hw_dev = {
- .name = "ISIF",
- .owner = THIS_MODULE,
- .hw_ops = {
- .open = isif_open,
- .close = isif_close,
- .enable = isif_enable,
- .enable_out_to_sdram = isif_enable_output_to_sdram,
- .set_hw_if_params = isif_set_hw_if_params,
- .configure = isif_configure,
- .set_buftype = isif_set_buftype,
- .get_buftype = isif_get_buftype,
- .enum_pix = isif_enum_pix,
- .set_pixel_format = isif_set_pixel_format,
- .get_pixel_format = isif_get_pixel_format,
- .set_frame_format = isif_set_frame_format,
- .get_frame_format = isif_get_frame_format,
- .set_image_window = isif_set_image_window,
- .get_image_window = isif_get_image_window,
- .get_line_length = isif_get_line_length,
- .setfbaddr = isif_setfbaddr,
- .getfid = isif_getfid,
- },
-};
-
-static int isif_probe(struct platform_device *pdev)
-{
- void (*setup_pinmux)(void);
- struct resource *res;
- void __iomem *addr;
- int status = 0, i;
-
- /* Platform data holds setup_pinmux function ptr */
- if (!pdev->dev.platform_data)
- return -ENODEV;
-
- /*
- * first try to register with vpfe. If not correct platform, then we
- * don't have to iomap
- */
- status = vpfe_register_ccdc_device(&isif_hw_dev);
- if (status < 0)
- return status;
-
- setup_pinmux = pdev->dev.platform_data;
- /*
- * setup Mux configuration for ccdc which may be different for
- * different SoCs using this CCDC
- */
- setup_pinmux();
-
- i = 0;
- /* Get the ISIF base address, linearization table0 and table1 addr. */
- while (i < 3) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- status = -ENODEV;
- goto fail_nobase_res;
- }
- res = request_mem_region(res->start, resource_size(res),
- res->name);
- if (!res) {
- status = -EBUSY;
- goto fail_nobase_res;
- }
- addr = ioremap(res->start, resource_size(res));
- if (!addr) {
- status = -ENOMEM;
- goto fail_base_iomap;
- }
- switch (i) {
- case 0:
- /* ISIF base address */
- isif_cfg.base_addr = addr;
- break;
- case 1:
- /* ISIF linear tbl0 address */
- isif_cfg.linear_tbl0_addr = addr;
- break;
- default:
- /* ISIF linear tbl0 address */
- isif_cfg.linear_tbl1_addr = addr;
- break;
- }
- i++;
- }
- isif_cfg.dev = &pdev->dev;
-
- printk(KERN_NOTICE "%s is registered with vpfe.\n",
- isif_hw_dev.name);
- return 0;
-fail_base_iomap:
- release_mem_region(res->start, resource_size(res));
- i--;
-fail_nobase_res:
- if (isif_cfg.base_addr) {
- iounmap(isif_cfg.base_addr);
- isif_cfg.base_addr = NULL;
- }
- if (isif_cfg.linear_tbl0_addr) {
- iounmap(isif_cfg.linear_tbl0_addr);
- isif_cfg.linear_tbl0_addr = NULL;
- }
-
- while (i >= 0) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (res)
- release_mem_region(res->start, resource_size(res));
- i--;
- }
- vpfe_unregister_ccdc_device(&isif_hw_dev);
- return status;
-}
-
-static int isif_remove(struct platform_device *pdev)
-{
- struct resource *res;
- int i = 0;
-
- iounmap(isif_cfg.base_addr);
- isif_cfg.base_addr = NULL;
- iounmap(isif_cfg.linear_tbl0_addr);
- isif_cfg.linear_tbl0_addr = NULL;
- iounmap(isif_cfg.linear_tbl1_addr);
- isif_cfg.linear_tbl1_addr = NULL;
- while (i < 3) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- release_mem_region(res->start, resource_size(res));
- i++;
- }
- vpfe_unregister_ccdc_device(&isif_hw_dev);
- return 0;
-}
-
-static struct platform_driver isif_driver = {
- .driver = {
- .name = "isif",
- },
- .remove = isif_remove,
- .probe = isif_probe,
-};
-
-module_platform_driver(isif_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/deprecated/vpfe_capture/isif.h b/drivers/staging/media/deprecated/vpfe_capture/isif.h
deleted file mode 100644
index 8369acd26e7e..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/isif.h
+++ /dev/null
@@ -1,518 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- * isif header file
- */
-#ifndef _ISIF_H
-#define _ISIF_H
-
-#include <media/davinci/ccdc_types.h>
-#include <media/davinci/vpfe_types.h>
-
-/* isif float type S8Q8/U8Q8 */
-struct isif_float_8 {
- /* 8 bit integer part */
- __u8 integer;
- /* 8 bit decimal part */
- __u8 decimal;
-};
-
-/* isif float type U16Q16/S16Q16 */
-struct isif_float_16 {
- /* 16 bit integer part */
- __u16 integer;
- /* 16 bit decimal part */
- __u16 decimal;
-};
-
-/************************************************************************
- * Vertical Defect Correction parameters
- ***********************************************************************/
-/* Defect Correction (DFC) table entry */
-struct isif_vdfc_entry {
- /* vertical position of defect */
- __u16 pos_vert;
- /* horizontal position of defect */
- __u16 pos_horz;
- /*
- * Defect level of Vertical line defect position. This is subtracted
- * from the data at the defect position
- */
- __u8 level_at_pos;
- /*
- * Defect level of the pixels upper than the vertical line defect.
- * This is subtracted from the data
- */
- __u8 level_up_pixels;
- /*
- * Defect level of the pixels lower than the vertical line defect.
- * This is subtracted from the data
- */
- __u8 level_low_pixels;
-};
-
-#define ISIF_VDFC_TABLE_SIZE 8
-struct isif_dfc {
- /* enable vertical defect correction */
- __u8 en;
- /* Defect level subtraction. Just fed through if saturating */
-#define ISIF_VDFC_NORMAL 0
- /*
- * Defect level subtraction. Horizontal interpolation ((i-2)+(i+2))/2
- * if data saturating
- */
-#define ISIF_VDFC_HORZ_INTERPOL_IF_SAT 1
- /* Horizontal interpolation (((i-2)+(i+2))/2) */
-#define ISIF_VDFC_HORZ_INTERPOL 2
- /* one of the vertical defect correction modes above */
- __u8 corr_mode;
- /* 0 - whole line corrected, 1 - not pixels upper than the defect */
- __u8 corr_whole_line;
-#define ISIF_VDFC_NO_SHIFT 0
-#define ISIF_VDFC_SHIFT_1 1
-#define ISIF_VDFC_SHIFT_2 2
-#define ISIF_VDFC_SHIFT_3 3
-#define ISIF_VDFC_SHIFT_4 4
- /*
- * defect level shift value. level_at_pos, level_upper_pos,
- * and level_lower_pos can be shifted up by this value. Choose
- * one of the values above
- */
- __u8 def_level_shift;
- /* defect saturation level */
- __u16 def_sat_level;
- /* number of vertical defects. Max is ISIF_VDFC_TABLE_SIZE */
- __u16 num_vdefects;
- /* VDFC table ptr */
- struct isif_vdfc_entry table[ISIF_VDFC_TABLE_SIZE];
-};
-
-struct isif_horz_bclamp {
-
- /* Horizontal clamp disabled. Only vertical clamp value is subtracted */
-#define ISIF_HORZ_BC_DISABLE 0
- /*
- * Horizontal clamp value is calculated and subtracted from image data
- * along with vertical clamp value
- */
-#define ISIF_HORZ_BC_CLAMP_CALC_ENABLED 1
- /*
- * Horizontal clamp value calculated from previous image is subtracted
- * from image data along with vertical clamp value.
- */
-#define ISIF_HORZ_BC_CLAMP_NOT_UPDATED 2
- /* horizontal clamp mode. One of the values above */
- __u8 mode;
- /*
- * pixel value limit enable.
- * 0 - limit disabled
- * 1 - pixel value limited to 1023
- */
- __u8 clamp_pix_limit;
- /* Select Most left window for bc calculation */
-#define ISIF_SEL_MOST_LEFT_WIN 0
- /* Select Most right window for bc calculation */
-#define ISIF_SEL_MOST_RIGHT_WIN 1
- /* Select most left or right window for clamp val calculation */
- __u8 base_win_sel_calc;
- /* Window count per color for calculation. range 1-32 */
- __u8 win_count_calc;
- /* Window start position - horizontal for calculation. 0 - 8191 */
- __u16 win_start_h_calc;
- /* Window start position - vertical for calculation 0 - 8191 */
- __u16 win_start_v_calc;
-#define ISIF_HORZ_BC_SZ_H_2PIXELS 0
-#define ISIF_HORZ_BC_SZ_H_4PIXELS 1
-#define ISIF_HORZ_BC_SZ_H_8PIXELS 2
-#define ISIF_HORZ_BC_SZ_H_16PIXELS 3
- /* Width of the sample window in pixels for calculation */
- __u8 win_h_sz_calc;
-#define ISIF_HORZ_BC_SZ_V_32PIXELS 0
-#define ISIF_HORZ_BC_SZ_V_64PIXELS 1
-#define ISIF_HORZ_BC_SZ_V_128PIXELS 2
-#define ISIF_HORZ_BC_SZ_V_256PIXELS 3
- /* Height of the sample window in pixels for calculation */
- __u8 win_v_sz_calc;
-};
-
-/************************************************************************
- * Black Clamp parameters
- ***********************************************************************/
-struct isif_vert_bclamp {
- /* Reset value used is the clamp value calculated */
-#define ISIF_VERT_BC_USE_HORZ_CLAMP_VAL 0
- /* Reset value used is reset_clamp_val configured */
-#define ISIF_VERT_BC_USE_CONFIG_CLAMP_VAL 1
- /* No update, previous image value is used */
-#define ISIF_VERT_BC_NO_UPDATE 2
- /*
- * Reset value selector for vertical clamp calculation. Use one of
- * the above values
- */
- __u8 reset_val_sel;
- /* U8Q8. Line average coefficient used in vertical clamp calculation */
- __u8 line_ave_coef;
- /* Height of the optical black region for calculation */
- __u16 ob_v_sz_calc;
- /* Optical black region start position - horizontal. 0 - 8191 */
- __u16 ob_start_h;
- /* Optical black region start position - vertical 0 - 8191 */
- __u16 ob_start_v;
-};
-
-struct isif_black_clamp {
- /*
- * This offset value is added irrespective of the clamp enable status.
- * S13
- */
- __u16 dc_offset;
- /*
- * Enable black/digital clamp value to be subtracted from the image data
- */
- __u8 en;
- /*
- * black clamp mode. same/separate clamp for 4 colors
- * 0 - disable - same clamp value for all colors
- * 1 - clamp value calculated separately for all colors
- */
- __u8 bc_mode_color;
- /* Vertical start position for bc subtraction */
- __u16 vert_start_sub;
- /* Black clamp for horizontal direction */
- struct isif_horz_bclamp horz;
- /* Black clamp for vertical direction */
- struct isif_vert_bclamp vert;
-};
-
-/*************************************************************************
-** Color Space Conversion (CSC)
-*************************************************************************/
-#define ISIF_CSC_NUM_COEFF 16
-struct isif_color_space_conv {
- /* Enable color space conversion */
- __u8 en;
- /*
- * csc coefficient table. S8Q5, M00 at index 0, M01 at index 1, and
- * so forth
- */
- struct isif_float_8 coeff[ISIF_CSC_NUM_COEFF];
-};
-
-
-/*************************************************************************
-** Black Compensation parameters
-*************************************************************************/
-struct isif_black_comp {
- /* Comp for Red */
- __s8 r_comp;
- /* Comp for Gr */
- __s8 gr_comp;
- /* Comp for Blue */
- __s8 b_comp;
- /* Comp for Gb */
- __s8 gb_comp;
-};
-
-/*************************************************************************
-** Gain parameters
-*************************************************************************/
-struct isif_gain {
- /* Gain for Red or ye */
- struct isif_float_16 r_ye;
- /* Gain for Gr or cy */
- struct isif_float_16 gr_cy;
- /* Gain for Gb or g */
- struct isif_float_16 gb_g;
- /* Gain for Blue or mg */
- struct isif_float_16 b_mg;
-};
-
-#define ISIF_LINEAR_TAB_SIZE 192
-/*************************************************************************
-** Linearization parameters
-*************************************************************************/
-struct isif_linearize {
- /* Enable or Disable linearization of data */
- __u8 en;
- /* Shift value applied */
- __u8 corr_shft;
- /* scale factor applied U11Q10 */
- struct isif_float_16 scale_fact;
- /* Size of the linear table */
- __u16 table[ISIF_LINEAR_TAB_SIZE];
-};
-
-/* Color patterns */
-#define ISIF_RED 0
-#define ISIF_GREEN_RED 1
-#define ISIF_GREEN_BLUE 2
-#define ISIF_BLUE 3
-struct isif_col_pat {
- __u8 olop;
- __u8 olep;
- __u8 elop;
- __u8 elep;
-};
-
-/*************************************************************************
-** Data formatter parameters
-*************************************************************************/
-struct isif_fmtplen {
- /*
- * number of program entries for SET0, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen0;
- /*
- * number of program entries for SET1, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen1;
- /**
- * number of program entries for SET2, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen2;
- /**
- * number of program entries for SET3, range 1 - 16
- * when fmtmode is ISIF_SPLIT, 1 - 8 when fmtmode is
- * ISIF_COMBINE
- */
- __u16 plen3;
-};
-
-struct isif_fmt_cfg {
-#define ISIF_SPLIT 0
-#define ISIF_COMBINE 1
- /* Split or combine or line alternate */
- __u8 fmtmode;
- /* enable or disable line alternating mode */
- __u8 ln_alter_en;
-#define ISIF_1LINE 0
-#define ISIF_2LINES 1
-#define ISIF_3LINES 2
-#define ISIF_4LINES 3
- /* Split/combine line number */
- __u8 lnum;
- /* Address increment Range 1 - 16 */
- __u8 addrinc;
-};
-
-struct isif_fmt_addr_ptr {
- /* Initial address */
- __u32 init_addr;
- /* output line number */
-#define ISIF_1STLINE 0
-#define ISIF_2NDLINE 1
-#define ISIF_3RDLINE 2
-#define ISIF_4THLINE 3
- __u8 out_line;
-};
-
-struct isif_fmtpgm_ap {
- /* program address pointer */
- __u8 pgm_aptr;
- /* program address increment or decrement */
- __u8 pgmupdt;
-};
-
-struct isif_data_formatter {
- /* Enable/Disable data formatter */
- __u8 en;
- /* data formatter configuration */
- struct isif_fmt_cfg cfg;
- /* Formatter program entries length */
- struct isif_fmtplen plen;
- /* first pixel in a line fed to formatter */
- __u16 fmtrlen;
- /* HD interval for output line. Only valid when split line */
- __u16 fmthcnt;
- /* formatter address pointers */
- struct isif_fmt_addr_ptr fmtaddr_ptr[16];
- /* program enable/disable */
- __u8 pgm_en[32];
- /* program address pointers */
- struct isif_fmtpgm_ap fmtpgm_ap[32];
-};
-
-struct isif_df_csc {
- /* Color Space Conversion configuration, 0 - csc, 1 - df */
- __u8 df_or_csc;
- /* csc configuration valid if df_or_csc is 0 */
- struct isif_color_space_conv csc;
- /* data formatter configuration valid if df_or_csc is 1 */
- struct isif_data_formatter df;
- /* start pixel in a line at the input */
- __u32 start_pix;
- /* number of pixels in input line */
- __u32 num_pixels;
- /* start line at the input */
- __u32 start_line;
- /* number of lines at the input */
- __u32 num_lines;
-};
-
-struct isif_gain_offsets_adj {
- /* Gain adjustment per color */
- struct isif_gain gain;
- /* Offset adjustment */
- __u16 offset;
- /* Enable or Disable Gain adjustment for SDRAM data */
- __u8 gain_sdram_en;
- /* Enable or Disable Gain adjustment for IPIPE data */
- __u8 gain_ipipe_en;
- /* Enable or Disable Gain adjustment for H3A data */
- __u8 gain_h3a_en;
- /* Enable or Disable Gain adjustment for SDRAM data */
- __u8 offset_sdram_en;
- /* Enable or Disable Gain adjustment for IPIPE data */
- __u8 offset_ipipe_en;
- /* Enable or Disable Gain adjustment for H3A data */
- __u8 offset_h3a_en;
-};
-
-struct isif_cul {
- /* Horizontal Cull pattern for odd lines */
- __u8 hcpat_odd;
- /* Horizontal Cull pattern for even lines */
- __u8 hcpat_even;
- /* Vertical Cull pattern */
- __u8 vcpat;
- /* Enable or disable lpf. Apply when cull is enabled */
- __u8 en_lpf;
-};
-
-struct isif_compress {
-#define ISIF_ALAW 0
-#define ISIF_DPCM 1
-#define ISIF_NO_COMPRESSION 2
- /* Compression Algorithm used */
- __u8 alg;
- /* Choose Predictor1 for DPCM compression */
-#define ISIF_DPCM_PRED1 0
- /* Choose Predictor2 for DPCM compression */
-#define ISIF_DPCM_PRED2 1
- /* Predictor for DPCM compression */
- __u8 pred;
-};
-
-/* all the stuff in this struct will be provided by userland */
-struct isif_config_params_raw {
- /* Linearization parameters for image sensor data input */
- struct isif_linearize linearize;
- /* Data formatter or CSC */
- struct isif_df_csc df_csc;
- /* Defect Pixel Correction (DFC) configuration */
- struct isif_dfc dfc;
- /* Black/Digital Clamp configuration */
- struct isif_black_clamp bclamp;
- /* Gain, offset adjustments */
- struct isif_gain_offsets_adj gain_offset;
- /* Culling */
- struct isif_cul culling;
- /* A-Law and DPCM compression options */
- struct isif_compress compress;
- /* horizontal offset for Gain/LSC/DFC */
- __u16 horz_offset;
- /* vertical offset for Gain/LSC/DFC */
- __u16 vert_offset;
- /* color pattern for field 0 */
- struct isif_col_pat col_pat_field0;
- /* color pattern for field 1 */
- struct isif_col_pat col_pat_field1;
-#define ISIF_NO_SHIFT 0
-#define ISIF_1BIT_SHIFT 1
-#define ISIF_2BIT_SHIFT 2
-#define ISIF_3BIT_SHIFT 3
-#define ISIF_4BIT_SHIFT 4
-#define ISIF_5BIT_SHIFT 5
-#define ISIF_6BIT_SHIFT 6
- /* Data shift applied before storing to SDRAM */
- __u8 data_shift;
- /* enable input test pattern generation */
- __u8 test_pat_gen;
-};
-
-#ifdef __KERNEL__
-struct isif_ycbcr_config {
- /* isif pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* isif frame format */
- enum ccdc_frmfmt frm_fmt;
- /* ISIF crop window */
- struct v4l2_rect win;
- /* field polarity */
- enum vpfe_pin_pol fid_pol;
- /* interface VD polarity */
- enum vpfe_pin_pol vd_pol;
- /* interface HD polarity */
- enum vpfe_pin_pol hd_pol;
- /* isif pix order. Only used for ycbcr capture */
- enum ccdc_pixorder pix_order;
- /* isif buffer type. Only used for ycbcr capture */
- enum ccdc_buftype buf_type;
-};
-
-/* MSB of image data connected to sensor port */
-enum isif_data_msb {
- ISIF_BIT_MSB_15,
- ISIF_BIT_MSB_14,
- ISIF_BIT_MSB_13,
- ISIF_BIT_MSB_12,
- ISIF_BIT_MSB_11,
- ISIF_BIT_MSB_10,
- ISIF_BIT_MSB_9,
- ISIF_BIT_MSB_8,
- ISIF_BIT_MSB_7
-};
-
-enum isif_cfa_pattern {
- ISIF_CFA_PAT_MOSAIC,
- ISIF_CFA_PAT_STRIPE
-};
-
-struct isif_params_raw {
- /* isif pixel format */
- enum ccdc_pixfmt pix_fmt;
- /* isif frame format */
- enum ccdc_frmfmt frm_fmt;
- /* video window */
- struct v4l2_rect win;
- /* field polarity */
- enum vpfe_pin_pol fid_pol;
- /* interface VD polarity */
- enum vpfe_pin_pol vd_pol;
- /* interface HD polarity */
- enum vpfe_pin_pol hd_pol;
- /* buffer type. Applicable for interlaced mode */
- enum ccdc_buftype buf_type;
- /* Gain values */
- struct isif_gain gain;
- /* cfa pattern */
- enum isif_cfa_pattern cfa_pat;
- /* Data MSB position */
- enum isif_data_msb data_msb;
- /* Enable horizontal flip */
- unsigned char horz_flip_en;
- /* Enable image invert vertically */
- unsigned char image_invert_en;
-
- /* all the userland defined stuff*/
- struct isif_config_params_raw config_params;
-};
-
-enum isif_data_pack {
- ISIF_PACK_16BIT,
- ISIF_PACK_12BIT,
- ISIF_PACK_8BIT
-};
-
-#define ISIF_WIN_NTSC {0, 0, 720, 480}
-#define ISIF_WIN_VGA {0, 0, 640, 480}
-
-#endif
-#endif
diff --git a/drivers/staging/media/deprecated/vpfe_capture/isif_regs.h b/drivers/staging/media/deprecated/vpfe_capture/isif_regs.h
deleted file mode 100644
index d68d38841ae7..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/isif_regs.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- */
-#ifndef _ISIF_REGS_H
-#define _ISIF_REGS_H
-
-/* ISIF registers relative offsets */
-#define SYNCEN 0x00
-#define MODESET 0x04
-#define HDW 0x08
-#define VDW 0x0c
-#define PPLN 0x10
-#define LPFR 0x14
-#define SPH 0x18
-#define LNH 0x1c
-#define SLV0 0x20
-#define SLV1 0x24
-#define LNV 0x28
-#define CULH 0x2c
-#define CULV 0x30
-#define HSIZE 0x34
-#define SDOFST 0x38
-#define CADU 0x3c
-#define CADL 0x40
-#define LINCFG0 0x44
-#define LINCFG1 0x48
-#define CCOLP 0x4c
-#define CRGAIN 0x50
-#define CGRGAIN 0x54
-#define CGBGAIN 0x58
-#define CBGAIN 0x5c
-#define COFSTA 0x60
-#define FLSHCFG0 0x64
-#define FLSHCFG1 0x68
-#define FLSHCFG2 0x6c
-#define VDINT0 0x70
-#define VDINT1 0x74
-#define VDINT2 0x78
-#define MISC 0x7c
-#define CGAMMAWD 0x80
-#define REC656IF 0x84
-#define CCDCFG 0x88
-/*****************************************************
-* Defect Correction registers
-*****************************************************/
-#define DFCCTL 0x8c
-#define VDFSATLV 0x90
-#define DFCMEMCTL 0x94
-#define DFCMEM0 0x98
-#define DFCMEM1 0x9c
-#define DFCMEM2 0xa0
-#define DFCMEM3 0xa4
-#define DFCMEM4 0xa8
-/****************************************************
-* Black Clamp registers
-****************************************************/
-#define CLAMPCFG 0xac
-#define CLDCOFST 0xb0
-#define CLSV 0xb4
-#define CLHWIN0 0xb8
-#define CLHWIN1 0xbc
-#define CLHWIN2 0xc0
-#define CLVRV 0xc4
-#define CLVWIN0 0xc8
-#define CLVWIN1 0xcc
-#define CLVWIN2 0xd0
-#define CLVWIN3 0xd4
-/****************************************************
-* Lense Shading Correction
-****************************************************/
-#define DATAHOFST 0xd8
-#define DATAVOFST 0xdc
-#define LSCHVAL 0xe0
-#define LSCVVAL 0xe4
-#define TWODLSCCFG 0xe8
-#define TWODLSCOFST 0xec
-#define TWODLSCINI 0xf0
-#define TWODLSCGRBU 0xf4
-#define TWODLSCGRBL 0xf8
-#define TWODLSCGROF 0xfc
-#define TWODLSCORBU 0x100
-#define TWODLSCORBL 0x104
-#define TWODLSCOROF 0x108
-#define TWODLSCIRQEN 0x10c
-#define TWODLSCIRQST 0x110
-/****************************************************
-* Data formatter
-****************************************************/
-#define FMTCFG 0x114
-#define FMTPLEN 0x118
-#define FMTSPH 0x11c
-#define FMTLNH 0x120
-#define FMTSLV 0x124
-#define FMTLNV 0x128
-#define FMTRLEN 0x12c
-#define FMTHCNT 0x130
-#define FMTAPTR_BASE 0x134
-/* Below macro for addresses FMTAPTR0 - FMTAPTR15 */
-#define FMTAPTR(i) (FMTAPTR_BASE + (i * 4))
-#define FMTPGMVF0 0x174
-#define FMTPGMVF1 0x178
-#define FMTPGMAPU0 0x17c
-#define FMTPGMAPU1 0x180
-#define FMTPGMAPS0 0x184
-#define FMTPGMAPS1 0x188
-#define FMTPGMAPS2 0x18c
-#define FMTPGMAPS3 0x190
-#define FMTPGMAPS4 0x194
-#define FMTPGMAPS5 0x198
-#define FMTPGMAPS6 0x19c
-#define FMTPGMAPS7 0x1a0
-/************************************************
-* Color Space Converter
-************************************************/
-#define CSCCTL 0x1a4
-#define CSCM0 0x1a8
-#define CSCM1 0x1ac
-#define CSCM2 0x1b0
-#define CSCM3 0x1b4
-#define CSCM4 0x1b8
-#define CSCM5 0x1bc
-#define CSCM6 0x1c0
-#define CSCM7 0x1c4
-#define OBWIN0 0x1c8
-#define OBWIN1 0x1cc
-#define OBWIN2 0x1d0
-#define OBWIN3 0x1d4
-#define OBVAL0 0x1d8
-#define OBVAL1 0x1dc
-#define OBVAL2 0x1e0
-#define OBVAL3 0x1e4
-#define OBVAL4 0x1e8
-#define OBVAL5 0x1ec
-#define OBVAL6 0x1f0
-#define OBVAL7 0x1f4
-#define CLKCTL 0x1f8
-
-/* Masks & Shifts below */
-#define START_PX_HOR_MASK 0x7FFF
-#define NUM_PX_HOR_MASK 0x7FFF
-#define START_VER_ONE_MASK 0x7FFF
-#define START_VER_TWO_MASK 0x7FFF
-#define NUM_LINES_VER 0x7FFF
-
-/* gain - offset masks */
-#define GAIN_INTEGER_SHIFT 9
-#define OFFSET_MASK 0xFFF
-#define GAIN_SDRAM_EN_SHIFT 12
-#define GAIN_IPIPE_EN_SHIFT 13
-#define GAIN_H3A_EN_SHIFT 14
-#define OFST_SDRAM_EN_SHIFT 8
-#define OFST_IPIPE_EN_SHIFT 9
-#define OFST_H3A_EN_SHIFT 10
-#define GAIN_OFFSET_EN_MASK 0x7700
-
-/* Culling */
-#define CULL_PAT_EVEN_LINE_SHIFT 8
-
-/* CCDCFG register */
-#define ISIF_YCINSWP_RAW (0x00 << 4)
-#define ISIF_YCINSWP_YCBCR (0x01 << 4)
-#define ISIF_CCDCFG_FIDMD_LATCH_VSYNC (0x00 << 6)
-#define ISIF_CCDCFG_WENLOG_AND (0x00 << 8)
-#define ISIF_CCDCFG_TRGSEL_WEN (0x00 << 9)
-#define ISIF_CCDCFG_EXTRG_DISABLE (0x00 << 10)
-#define ISIF_LATCH_ON_VSYNC_DISABLE (0x01 << 15)
-#define ISIF_LATCH_ON_VSYNC_ENABLE (0x00 << 15)
-#define ISIF_DATA_PACK_MASK 3
-#define ISIF_DATA_PACK16 0
-#define ISIF_DATA_PACK12 1
-#define ISIF_DATA_PACK8 2
-#define ISIF_PIX_ORDER_SHIFT 11
-#define ISIF_BW656_ENABLE (0x01 << 5)
-
-/* MODESET registers */
-#define ISIF_VDHDOUT_INPUT (0x00 << 0)
-#define ISIF_INPUT_SHIFT 12
-#define ISIF_RAW_INPUT_MODE 0
-#define ISIF_FID_POL_SHIFT 4
-#define ISIF_HD_POL_SHIFT 3
-#define ISIF_VD_POL_SHIFT 2
-#define ISIF_DATAPOL_NORMAL 0
-#define ISIF_DATAPOL_SHIFT 6
-#define ISIF_EXWEN_DISABLE 0
-#define ISIF_EXWEN_SHIFT 5
-#define ISIF_FRM_FMT_SHIFT 7
-#define ISIF_DATASFT_SHIFT 8
-#define ISIF_LPF_SHIFT 14
-#define ISIF_LPF_MASK 1
-
-/* GAMMAWD registers */
-#define ISIF_ALAW_GAMMA_WD_MASK 0xF
-#define ISIF_ALAW_GAMMA_WD_SHIFT 1
-#define ISIF_ALAW_ENABLE 1
-#define ISIF_GAMMAWD_CFA_SHIFT 5
-
-/* HSIZE registers */
-#define ISIF_HSIZE_FLIP_MASK 1
-#define ISIF_HSIZE_FLIP_SHIFT 12
-
-/* MISC registers */
-#define ISIF_DPCM_EN_SHIFT 12
-#define ISIF_DPCM_PREDICTOR_SHIFT 13
-
-/* Black clamp related */
-#define ISIF_BC_MODE_COLOR_SHIFT 4
-#define ISIF_HORZ_BC_MODE_SHIFT 1
-#define ISIF_HORZ_BC_WIN_SEL_SHIFT 5
-#define ISIF_HORZ_BC_PIX_LIMIT_SHIFT 6
-#define ISIF_HORZ_BC_WIN_H_SIZE_SHIFT 8
-#define ISIF_HORZ_BC_WIN_V_SIZE_SHIFT 12
-#define ISIF_VERT_BC_RST_VAL_SEL_SHIFT 4
-#define ISIF_VERT_BC_LINE_AVE_COEF_SHIFT 8
-
-/* VDFC registers */
-#define ISIF_VDFC_EN_SHIFT 4
-#define ISIF_VDFC_CORR_MOD_SHIFT 5
-#define ISIF_VDFC_CORR_WHOLE_LN_SHIFT 7
-#define ISIF_VDFC_LEVEL_SHFT_SHIFT 8
-#define ISIF_VDFC_POS_MASK 0x1FFF
-#define ISIF_DFCMEMCTL_DFCMARST_SHIFT 2
-
-/* CSC registers */
-#define ISIF_CSC_COEF_INTEG_MASK 7
-#define ISIF_CSC_COEF_DECIMAL_MASK 0x1f
-#define ISIF_CSC_COEF_INTEG_SHIFT 5
-#define ISIF_CSCM_MSB_SHIFT 8
-#define ISIF_DF_CSC_SPH_MASK 0x1FFF
-#define ISIF_DF_CSC_LNH_MASK 0x1FFF
-#define ISIF_DF_CSC_SLV_MASK 0x1FFF
-#define ISIF_DF_CSC_LNV_MASK 0x1FFF
-#define ISIF_DF_NUMLINES 0x7FFF
-#define ISIF_DF_NUMPIX 0x1FFF
-
-/* Offsets for LSC/DFC/Gain */
-#define ISIF_DATA_H_OFFSET_MASK 0x1FFF
-#define ISIF_DATA_V_OFFSET_MASK 0x1FFF
-
-/* Linearization */
-#define ISIF_LIN_CORRSFT_SHIFT 4
-#define ISIF_LIN_SCALE_FACT_INTEG_SHIFT 10
-
-
-/* Pattern registers */
-#define ISIF_PG_EN (1 << 3)
-#define ISIF_SEL_PG_SRC (3 << 4)
-#define ISIF_PG_VD_POL_SHIFT 0
-#define ISIF_PG_HD_POL_SHIFT 1
-
-/*random other junk*/
-#define ISIF_SYNCEN_VDHDEN_MASK (1 << 0)
-#define ISIF_SYNCEN_WEN_MASK (1 << 1)
-#define ISIF_SYNCEN_WEN_SHIFT 1
-
-#endif
diff --git a/drivers/staging/media/deprecated/vpfe_capture/vpfe_capture.c b/drivers/staging/media/deprecated/vpfe_capture/vpfe_capture.c
deleted file mode 100644
index 0a2226b321d7..000000000000
--- a/drivers/staging/media/deprecated/vpfe_capture/vpfe_capture.c
+++ /dev/null
@@ -1,1902 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- * Driver name : VPFE Capture driver
- * VPFE Capture driver allows applications to capture and stream video
- * frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as
- * TVP5146 or Raw Bayer RGB image data from an image sensor
- * such as Microns' MT9T001, MT9T031 etc.
- *
- * These SoCs have, in common, a Video Processing Subsystem (VPSS) that
- * consists of a Video Processing Front End (VPFE) for capturing
- * video/raw image data and Video Processing Back End (VPBE) for displaying
- * YUV data through an in-built analog encoder or Digital LCD port. This
- * driver is for capture through VPFE. A typical EVM using these SoCs have
- * following high level configuration.
- *
- * decoder(TVP5146/ YUV/
- * MT9T001) --> Raw Bayer RGB ---> MUX -> VPFE (CCDC/ISIF)
- * data input | |
- * V |
- * SDRAM |
- * V
- * Image Processor
- * |
- * V
- * SDRAM
- * The data flow happens from a decoder connected to the VPFE over a
- * YUV embedded (BT.656/BT.1120) or separate sync or raw bayer rgb interface
- * and to the input of VPFE through an optional MUX (if more inputs are
- * to be interfaced on the EVM). The input data is first passed through
- * CCDC (CCD Controller, a.k.a Image Sensor Interface, ISIF). The CCDC
- * does very little or no processing on YUV data and does pre-process Raw
- * Bayer RGB data through modules such as Defect Pixel Correction (DFC)
- * Color Space Conversion (CSC), data gain/offset etc. After this, data
- * can be written to SDRAM or can be connected to the image processing
- * block such as IPIPE (on DM355 only).
- *
- * Features supported
- * - MMAP IO
- * - Capture using TVP5146 over BT.656
- * - support for interfacing decoders using sub device model
- * - Work with DM355 or DM6446 CCDC to do Raw Bayer RGB/YUV
- * data capture to SDRAM.
- * TODO list
- * - Support multiple REQBUF after open
- * - Support for de-allocating buffers through REQBUF
- * - Support for Raw Bayer RGB capture
- * - Support for chaining Image Processor
- * - Support for static allocation of buffers
- * - Support for USERPTR IO
- * - Support for STREAMON before QBUF
- * - Support for control ioctls
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <media/v4l2-common.h>
-#include <linux/io.h>
-#include <media/davinci/vpfe_capture.h>
-#include "ccdc_hw_device.h"
-
-static int debug;
-static u32 numbuffers = 3;
-static u32 bufsize = (720 * 576 * 2);
-
-module_param(numbuffers, uint, S_IRUGO);
-module_param(bufsize, uint, S_IRUGO);
-module_param(debug, int, 0644);
-
-MODULE_PARM_DESC(numbuffers, "buffer count (default:3)");
-MODULE_PARM_DESC(bufsize, "buffer size in bytes (default:720 x 576 x 2)");
-MODULE_PARM_DESC(debug, "Debug level 0-1");
-
-MODULE_DESCRIPTION("VPFE Video for Linux Capture Driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Texas Instruments");
-
-/* standard information */
-struct vpfe_standard {
- v4l2_std_id std_id;
- unsigned int width;
- unsigned int height;
- struct v4l2_fract pixelaspect;
- /* 0 - progressive, 1 - interlaced */
- int frame_format;
-};
-
-/* ccdc configuration */
-struct ccdc_config {
- /* This make sure vpfe is probed and ready to go */
- int vpfe_probed;
- /* name of ccdc device */
- char name[32];
-};
-
-/* data structures */
-static struct vpfe_config_params config_params = {
- .min_numbuffers = 3,
- .numbuffers = 3,
- .min_bufsize = 720 * 480 * 2,
- .device_bufsize = 720 * 576 * 2,
-};
-
-/* ccdc device registered */
-static const struct ccdc_hw_device *ccdc_dev;
-/* lock for accessing ccdc information */
-static DEFINE_MUTEX(ccdc_lock);
-/* ccdc configuration */
-static struct ccdc_config *ccdc_cfg;
-
-static const struct vpfe_standard vpfe_standards[] = {
- {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
- {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
-};
-
-/* Used when raw Bayer image from ccdc is directly captured to SDRAM */
-static const struct vpfe_pixel_format vpfe_pix_fmts[] = {
- {
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
- .bpp = 1,
- },
- {
- .pixelformat = V4L2_PIX_FMT_SBGGR16,
- .bpp = 2,
- },
- {
- .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
- .bpp = 1,
- },
- {
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .bpp = 2,
- },
- {
- .pixelformat = V4L2_PIX_FMT_YUYV,
- .bpp = 2,
- },
- {
- .pixelformat = V4L2_PIX_FMT_NV12,
- .bpp = 1,
- },
-};
-
-/*
- * vpfe_lookup_pix_format()
- * lookup an entry in the vpfe pix format table based on pix_format
- */
-static const struct vpfe_pixel_format *vpfe_lookup_pix_format(u32 pix_format)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vpfe_pix_fmts); i++) {
- if (pix_format == vpfe_pix_fmts[i].pixelformat)
- return &vpfe_pix_fmts[i];
- }
- return NULL;
-}
-
-/*
- * vpfe_register_ccdc_device. CCDC module calls this to
- * register with vpfe capture
- */
-int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev)
-{
- int ret = 0;
- printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name);
-
- if (!dev->hw_ops.open ||
- !dev->hw_ops.enable ||
- !dev->hw_ops.set_hw_if_params ||
- !dev->hw_ops.configure ||
- !dev->hw_ops.set_buftype ||
- !dev->hw_ops.get_buftype ||
- !dev->hw_ops.enum_pix ||
- !dev->hw_ops.set_frame_format ||
- !dev->hw_ops.get_frame_format ||
- !dev->hw_ops.get_pixel_format ||
- !dev->hw_ops.set_pixel_format ||
- !dev->hw_ops.set_image_window ||
- !dev->hw_ops.get_image_window ||
- !dev->hw_ops.get_line_length ||
- !dev->hw_ops.getfid)
- return -EINVAL;
-
- mutex_lock(&ccdc_lock);
- if (!ccdc_cfg) {
- /*
- * TODO. Will this ever happen? if so, we need to fix it.
- * Probably we need to add the request to a linked list and
- * walk through it during vpfe probe
- */
- printk(KERN_ERR "vpfe capture not initialized\n");
- ret = -EFAULT;
- goto unlock;
- }
-
- if (strcmp(dev->name, ccdc_cfg->name)) {
- /* ignore this ccdc */
- ret = -EINVAL;
- goto unlock;
- }
-
- if (ccdc_dev) {
- printk(KERN_ERR "ccdc already registered\n");
- ret = -EINVAL;
- goto unlock;
- }
-
- ccdc_dev = dev;
-unlock:
- mutex_unlock(&ccdc_lock);
- return ret;
-}
-EXPORT_SYMBOL(vpfe_register_ccdc_device);
-
-/*
- * vpfe_unregister_ccdc_device. CCDC module calls this to
- * unregister with vpfe capture
- */
-void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev)
-{
- if (!dev) {
- printk(KERN_ERR "invalid ccdc device ptr\n");
- return;
- }
-
- printk(KERN_NOTICE "vpfe_unregister_ccdc_device, dev->name = %s\n",
- dev->name);
-
- if (strcmp(dev->name, ccdc_cfg->name)) {
- /* ignore this ccdc */
- return;
- }
-
- mutex_lock(&ccdc_lock);
- ccdc_dev = NULL;
- mutex_unlock(&ccdc_lock);
-}
-EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
-
-/*
- * vpfe_config_ccdc_image_format()
- * For a pix format, configure ccdc to setup the capture
- */
-static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe_dev)
-{
- enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
- int ret = 0;
-
- if (ccdc_dev->hw_ops.set_pixel_format(
- vpfe_dev->fmt.fmt.pix.pixelformat) < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "couldn't set pix format in ccdc\n");
- return -EINVAL;
- }
- /* configure the image window */
- ccdc_dev->hw_ops.set_image_window(&vpfe_dev->crop);
-
- switch (vpfe_dev->fmt.fmt.pix.field) {
- case V4L2_FIELD_INTERLACED:
- /* do nothing, since it is default */
- ret = ccdc_dev->hw_ops.set_buftype(
- CCDC_BUFTYPE_FLD_INTERLEAVED);
- break;
- case V4L2_FIELD_NONE:
- frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
- /* buffer type only applicable for interlaced scan */
- break;
- case V4L2_FIELD_SEQ_TB:
- ret = ccdc_dev->hw_ops.set_buftype(
- CCDC_BUFTYPE_FLD_SEPARATED);
- break;
- default:
- return -EINVAL;
- }
-
- /* set the frame format */
- if (!ret)
- ret = ccdc_dev->hw_ops.set_frame_format(frm_fmt);
- return ret;
-}
-/*
- * vpfe_config_image_format()
- * For a given standard, this functions sets up the default
- * pix format & crop values in the vpfe device and ccdc. It first
- * starts with defaults based values from the standard table.
- * It then checks if sub device supports get_fmt and then override the
- * values based on that.Sets crop values to match with scan resolution
- * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
- * values in ccdc
- */
-static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
- v4l2_std_id std_id)
-{
- struct vpfe_subdev_info *sdinfo = vpfe_dev->current_subdev;
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mbus_fmt = &fmt.format;
- struct v4l2_pix_format *pix = &vpfe_dev->fmt.fmt.pix;
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
- if (vpfe_standards[i].std_id & std_id) {
- vpfe_dev->std_info.active_pixels =
- vpfe_standards[i].width;
- vpfe_dev->std_info.active_lines =
- vpfe_standards[i].height;
- vpfe_dev->std_info.frame_format =
- vpfe_standards[i].frame_format;
- vpfe_dev->std_index = i;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(vpfe_standards)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "standard not supported\n");
- return -EINVAL;
- }
-
- vpfe_dev->crop.top = 0;
- vpfe_dev->crop.left = 0;
- vpfe_dev->crop.width = vpfe_dev->std_info.active_pixels;
- vpfe_dev->crop.height = vpfe_dev->std_info.active_lines;
- pix->width = vpfe_dev->crop.width;
- pix->height = vpfe_dev->crop.height;
-
- /* first field and frame format based on standard frame format */
- if (vpfe_dev->std_info.frame_format) {
- pix->field = V4L2_FIELD_INTERLACED;
- /* assume V4L2_PIX_FMT_UYVY as default */
- pix->pixelformat = V4L2_PIX_FMT_UYVY;
- v4l2_fill_mbus_format(mbus_fmt, pix,
- MEDIA_BUS_FMT_YUYV10_2X10);
- } else {
- pix->field = V4L2_FIELD_NONE;
- /* assume V4L2_PIX_FMT_SBGGR8 */
- pix->pixelformat = V4L2_PIX_FMT_SBGGR8;
- v4l2_fill_mbus_format(mbus_fmt, pix,
- MEDIA_BUS_FMT_SBGGR8_1X8);
- }
-
- /* if sub device supports get_fmt, override the defaults */
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
- sdinfo->grp_id, pad, get_fmt, NULL, &fmt);
-
- if (ret && ret != -ENOIOCTLCMD) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "error in getting get_fmt from sub device\n");
- return ret;
- }
- v4l2_fill_pix_format(pix, mbus_fmt);
- pix->bytesperline = pix->width * 2;
- pix->sizeimage = pix->bytesperline * pix->height;
-
- /* Sets the values in CCDC */
- ret = vpfe_config_ccdc_image_format(vpfe_dev);
- if (ret)
- return ret;
-
- /* Update the values of sizeimage and bytesperline */
- pix->bytesperline = ccdc_dev->hw_ops.get_line_length();
- pix->sizeimage = pix->bytesperline * pix->height;
-
- return 0;
-}
-
-static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
-{
- int ret;
-
- /* set first input of current subdevice as the current input */
- vpfe_dev->current_input = 0;
-
- /* set default standard */
- vpfe_dev->std_index = 0;
-
- /* Configure the default format information */
- ret = vpfe_config_image_format(vpfe_dev,
- vpfe_standards[vpfe_dev->std_index].std_id);
- if (ret)
- return ret;
-
- /* now open the ccdc device to initialize it */
- mutex_lock(&ccdc_lock);
- if (!ccdc_dev) {
- v4l2_err(&vpfe_dev->v4l2_dev, "ccdc device not registered\n");
- ret = -ENODEV;
- goto unlock;
- }
-
- if (!try_module_get(ccdc_dev->owner)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Couldn't lock ccdc module\n");
- ret = -ENODEV;
- goto unlock;
- }
- ret = ccdc_dev->hw_ops.open(vpfe_dev->pdev);
- if (!ret)
- vpfe_dev->initialized = 1;
-
- /* Clear all VPFE/CCDC interrupts */
- if (vpfe_dev->cfg->clr_intr)
- vpfe_dev->cfg->clr_intr(-1);
-
-unlock:
- mutex_unlock(&ccdc_lock);
- return ret;
-}
-
-/*
- * vpfe_open : It creates object of file handle structure and
- * stores it in private_data member of filepointer
- */
-static int vpfe_open(struct file *file)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct video_device *vdev = video_devdata(file);
- struct vpfe_fh *fh;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_open\n");
-
- if (!vpfe_dev->cfg->num_subdevs) {
- v4l2_err(&vpfe_dev->v4l2_dev, "No decoder registered\n");
- return -ENODEV;
- }
-
- /* Allocate memory for the file handle object */
- fh = kmalloc(sizeof(*fh), GFP_KERNEL);
- if (!fh)
- return -ENOMEM;
-
- /* store pointer to fh in private_data member of file */
- file->private_data = fh;
- fh->vpfe_dev = vpfe_dev;
- v4l2_fh_init(&fh->fh, vdev);
- mutex_lock(&vpfe_dev->lock);
- /* If decoder is not initialized. initialize it */
- if (!vpfe_dev->initialized) {
- if (vpfe_initialize_device(vpfe_dev)) {
- mutex_unlock(&vpfe_dev->lock);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
- return -ENODEV;
- }
- }
- /* Increment device usrs counter */
- vpfe_dev->usrs++;
- /* Set io_allowed member to false */
- fh->io_allowed = 0;
- v4l2_fh_add(&fh->fh);
- mutex_unlock(&vpfe_dev->lock);
- return 0;
-}
-
-static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe_dev)
-{
- unsigned long addr;
-
- vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next,
- struct videobuf_buffer, queue);
- list_del(&vpfe_dev->next_frm->queue);
- vpfe_dev->next_frm->state = VIDEOBUF_ACTIVE;
- addr = videobuf_to_dma_contig(vpfe_dev->next_frm);
-
- ccdc_dev->hw_ops.setfbaddr(addr);
-}
-
-static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
-{
- unsigned long addr;
-
- addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
- addr += vpfe_dev->field_off;
- ccdc_dev->hw_ops.setfbaddr(addr);
-}
-
-static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev)
-{
- vpfe_dev->cur_frm->ts = ktime_get_ns();
- vpfe_dev->cur_frm->state = VIDEOBUF_DONE;
- vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage;
- wake_up_interruptible(&vpfe_dev->cur_frm->done);
- vpfe_dev->cur_frm = vpfe_dev->next_frm;
-}
-
-/* ISR for VINT0*/
-static irqreturn_t vpfe_isr(int irq, void *dev_id)
-{
- struct vpfe_device *vpfe_dev = dev_id;
- enum v4l2_field field;
- int fid;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nStarting vpfe_isr...\n");
- field = vpfe_dev->fmt.fmt.pix.field;
-
- /* if streaming not started, don't do anything */
- if (!vpfe_dev->started)
- goto clear_intr;
-
- /* only for 6446 this will be applicable */
- if (ccdc_dev->hw_ops.reset)
- ccdc_dev->hw_ops.reset();
-
- if (field == V4L2_FIELD_NONE) {
- /* handle progressive frame capture */
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "frame format is progressive...\n");
- if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
- vpfe_process_buffer_complete(vpfe_dev);
- goto clear_intr;
- }
-
- /* interlaced or TB capture check which field we are in hardware */
- fid = ccdc_dev->hw_ops.getfid();
-
- /* switch the software maintained field id */
- vpfe_dev->field_id ^= 1;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "field id = %x:%x.\n",
- fid, vpfe_dev->field_id);
- if (fid == vpfe_dev->field_id) {
- /* we are in-sync here,continue */
- if (fid == 0) {
- /*
- * One frame is just being captured. If the next frame
- * is available, release the current frame and move on
- */
- if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
- vpfe_process_buffer_complete(vpfe_dev);
- /*
- * based on whether the two fields are stored
- * interleavely or separately in memory, reconfigure
- * the CCDC memory address
- */
- if (field == V4L2_FIELD_SEQ_TB)
- vpfe_schedule_bottom_field(vpfe_dev);
- goto clear_intr;
- }
- /*
- * if one field is just being captured configure
- * the next frame get the next frame from the empty
- * queue if no frame is available hold on to the
- * current buffer
- */
- spin_lock(&vpfe_dev->dma_queue_lock);
- if (!list_empty(&vpfe_dev->dma_queue) &&
- vpfe_dev->cur_frm == vpfe_dev->next_frm)
- vpfe_schedule_next_buffer(vpfe_dev);
- spin_unlock(&vpfe_dev->dma_queue_lock);
- } else if (fid == 0) {
- /*
- * out of sync. Recover from any hardware out-of-sync.
- * May loose one frame
- */
- vpfe_dev->field_id = fid;
- }
-clear_intr:
- if (vpfe_dev->cfg->clr_intr)
- vpfe_dev->cfg->clr_intr(irq);
-
- return IRQ_HANDLED;
-}
-
-/* vdint1_isr - isr handler for VINT1 interrupt */
-static irqreturn_t vdint1_isr(int irq, void *dev_id)
-{
- struct vpfe_device *vpfe_dev = dev_id;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nInside vdint1_isr...\n");
-
- /* if streaming not started, don't do anything */
- if (!vpfe_dev->started) {
- if (vpfe_dev->cfg->clr_intr)
- vpfe_dev->cfg->clr_intr(irq);
- return IRQ_HANDLED;
- }
-
- spin_lock(&vpfe_dev->dma_queue_lock);
- if ((vpfe_dev->fmt.fmt.pix.field == V4L2_FIELD_NONE) &&
- !list_empty(&vpfe_dev->dma_queue) &&
- vpfe_dev->cur_frm == vpfe_dev->next_frm)
- vpfe_schedule_next_buffer(vpfe_dev);
- spin_unlock(&vpfe_dev->dma_queue_lock);
-
- if (vpfe_dev->cfg->clr_intr)
- vpfe_dev->cfg->clr_intr(irq);
-
- return IRQ_HANDLED;
-}
-
-static void vpfe_detach_irq(struct vpfe_device *vpfe_dev)
-{
- enum ccdc_frmfmt frame_format;
-
- frame_format = ccdc_dev->hw_ops.get_frame_format();
- if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
- free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
-}
-
-static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
-{
- enum ccdc_frmfmt frame_format;
-
- frame_format = ccdc_dev->hw_ops.get_frame_format();
- if (frame_format == CCDC_FRMFMT_PROGRESSIVE) {
- return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr,
- 0, "vpfe_capture1",
- vpfe_dev);
- }
- return 0;
-}
-
-/* vpfe_stop_ccdc_capture: stop streaming in ccdc/isif */
-static void vpfe_stop_ccdc_capture(struct vpfe_device *vpfe_dev)
-{
- vpfe_dev->started = 0;
- ccdc_dev->hw_ops.enable(0);
- if (ccdc_dev->hw_ops.enable_out_to_sdram)
- ccdc_dev->hw_ops.enable_out_to_sdram(0);
-}
-
-/*
- * vpfe_release : This function deletes buffer queue, frees the
- * buffers and the vpfe file handle
- */
-static int vpfe_release(struct file *file)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_fh *fh = file->private_data;
- struct vpfe_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n");
-
- /* Get the device lock */
- mutex_lock(&vpfe_dev->lock);
- /* if this instance is doing IO */
- if (fh->io_allowed) {
- if (vpfe_dev->started) {
- sdinfo = vpfe_dev->current_subdev;
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
- sdinfo->grp_id,
- video, s_stream, 0);
- if (ret && (ret != -ENOIOCTLCMD))
- v4l2_err(&vpfe_dev->v4l2_dev,
- "stream off failed in subdev\n");
- vpfe_stop_ccdc_capture(vpfe_dev);
- vpfe_detach_irq(vpfe_dev);
- videobuf_streamoff(&vpfe_dev->buffer_queue);
- }
- vpfe_dev->io_usrs = 0;
- vpfe_dev->numbuffers = config_params.numbuffers;
- videobuf_stop(&vpfe_dev->buffer_queue);
- videobuf_mmap_free(&vpfe_dev->buffer_queue);
- }
-
- /* Decrement device usrs counter */
- vpfe_dev->usrs--;
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- /* If this is the last file handle */
- if (!vpfe_dev->usrs) {
- vpfe_dev->initialized = 0;
- if (ccdc_dev->hw_ops.close)
- ccdc_dev->hw_ops.close(vpfe_dev->pdev);
- module_put(ccdc_dev->owner);
- }
- mutex_unlock(&vpfe_dev->lock);
- file->private_data = NULL;
- /* Free memory allocated to file handle object */
- kfree(fh);
- return 0;
-}
-
-/*
- * vpfe_mmap : It is used to map kernel space buffers
- * into user spaces
- */
-static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
-{
- /* Get the device object and file handle object */
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n");
-
- return videobuf_mmap_mapper(&vpfe_dev->buffer_queue, vma);
-}
-
-/*
- * vpfe_poll: It is used for select/poll system call
- */
-static __poll_t vpfe_poll(struct file *file, poll_table *wait)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n");
-
- if (vpfe_dev->started)
- return videobuf_poll_stream(file,
- &vpfe_dev->buffer_queue, wait);
- return 0;
-}
-
-/* vpfe capture driver file operations */
-static const struct v4l2_file_operations vpfe_fops = {
- .owner = THIS_MODULE,
- .open = vpfe_open,
- .release = vpfe_release,
- .unlocked_ioctl = video_ioctl2,
- .mmap = vpfe_mmap,
- .poll = vpfe_poll
-};
-
-/*
- * vpfe_check_format()
- * This function adjust the input pixel format as per hardware
- * capabilities and update the same in pixfmt.
- * Following algorithm used :-
- *
- * If given pixformat is not in the vpfe list of pix formats or not
- * supported by the hardware, current value of pixformat in the device
- * is used
- * If given field is not supported, then current field is used. If field
- * is different from current, then it is matched with that from sub device.
- * Minimum height is 2 lines for interlaced or tb field and 1 line for
- * progressive. Maximum height is clamped to active active lines of scan
- * Minimum width is 32 bytes in memory and width is clamped to active
- * pixels of scan.
- * bytesperline is a multiple of 32.
- */
-static const struct vpfe_pixel_format *
- vpfe_check_format(struct vpfe_device *vpfe_dev,
- struct v4l2_pix_format *pixfmt)
-{
- u32 min_height = 1, min_width = 32, max_width, max_height;
- const struct vpfe_pixel_format *vpfe_pix_fmt;
- u32 pix;
- int temp, found;
-
- vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
- if (!vpfe_pix_fmt) {
- /*
- * use current pixel format in the vpfe device. We
- * will find this pix format in the table
- */
- pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
- vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
- }
-
- /* check if hw supports it */
- temp = 0;
- found = 0;
- while (ccdc_dev->hw_ops.enum_pix(&pix, temp) >= 0) {
- if (vpfe_pix_fmt->pixelformat == pix) {
- found = 1;
- break;
- }
- temp++;
- }
-
- if (!found) {
- /* use current pixel format */
- pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
- /*
- * Since this is currently used in the vpfe device, we
- * will find this pix format in the table
- */
- vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
- }
-
- /* check what field format is supported */
- if (pixfmt->field == V4L2_FIELD_ANY) {
- /* if field is any, use current value as default */
- pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
- }
-
- /*
- * if field is not same as current field in the vpfe device
- * try matching the field with the sub device field
- */
- if (vpfe_dev->fmt.fmt.pix.field != pixfmt->field) {
- /*
- * If field value is not in the supported fields, use current
- * field used in the device as default
- */
- switch (pixfmt->field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_SEQ_TB:
- /* if sub device is supporting progressive, use that */
- if (!vpfe_dev->std_info.frame_format)
- pixfmt->field = V4L2_FIELD_NONE;
- break;
- case V4L2_FIELD_NONE:
- if (vpfe_dev->std_info.frame_format)
- pixfmt->field = V4L2_FIELD_INTERLACED;
- break;
-
- default:
- /* use current field as default */
- pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
- break;
- }
- }
-
- /* Now adjust image resolutions supported */
- if (pixfmt->field == V4L2_FIELD_INTERLACED ||
- pixfmt->field == V4L2_FIELD_SEQ_TB)
- min_height = 2;
-
- max_width = vpfe_dev->std_info.active_pixels;
- max_height = vpfe_dev->std_info.active_lines;
- min_width /= vpfe_pix_fmt->bpp;
-
- v4l2_info(&vpfe_dev->v4l2_dev, "width = %d, height = %d, bpp = %d\n",
- pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp);
-
- pixfmt->width = clamp((pixfmt->width), min_width, max_width);
- pixfmt->height = clamp((pixfmt->height), min_height, max_height);
-
- /* If interlaced, adjust height to be a multiple of 2 */
- if (pixfmt->field == V4L2_FIELD_INTERLACED)
- pixfmt->height &= (~1);
- /*
- * recalculate bytesperline and sizeimage since width
- * and height might have changed
- */
- pixfmt->bytesperline = (((pixfmt->width * vpfe_pix_fmt->bpp) + 31)
- & ~31);
- if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
- pixfmt->sizeimage =
- pixfmt->bytesperline * pixfmt->height +
- ((pixfmt->bytesperline * pixfmt->height) >> 1);
- else
- pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
-
- v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height = %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
- pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp,
- pixfmt->bytesperline, pixfmt->sizeimage);
- return vpfe_pix_fmt;
-}
-
-static int vpfe_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
-
- strscpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
- strscpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
- strscpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
- return 0;
-}
-
-static int vpfe_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt_vid_cap\n");
- /* Fill in the information about format */
- *fmt = vpfe_dev->fmt;
- return 0;
-}
-
-static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *fmt)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- const struct vpfe_pixel_format *pix_fmt;
- u32 pix;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt_vid_cap\n");
-
- if (ccdc_dev->hw_ops.enum_pix(&pix, fmt->index) < 0)
- return -EINVAL;
-
- /* Fill in the information about format */
- pix_fmt = vpfe_lookup_pix_format(pix);
- if (pix_fmt) {
- fmt->pixelformat = pix_fmt->pixelformat;
- return 0;
- }
- return -EINVAL;
-}
-
-static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- const struct vpfe_pixel_format *pix_fmts;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt_vid_cap\n");
-
- /* If streaming is started, return error */
- if (vpfe_dev->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n");
- return -EBUSY;
- }
-
- /* Check for valid frame format */
- pix_fmts = vpfe_check_format(vpfe_dev, &fmt->fmt.pix);
- if (!pix_fmts)
- return -EINVAL;
-
- /* store the pixel format in the device object */
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- /* First detach any IRQ if currently attached */
- vpfe_detach_irq(vpfe_dev);
- vpfe_dev->fmt = *fmt;
- /* set image capture parameters in the ccdc */
- ret = vpfe_config_ccdc_image_format(vpfe_dev);
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- const struct vpfe_pixel_format *pix_fmts;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt_vid_cap\n");
-
- pix_fmts = vpfe_check_format(vpfe_dev, &f->fmt.pix);
- if (!pix_fmts)
- return -EINVAL;
- return 0;
-}
-
-/*
- * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
- * given app input index
- */
-static int vpfe_get_subdev_input_index(struct vpfe_device *vpfe_dev,
- int *subdev_index,
- int *subdev_input_index,
- int app_input_index)
-{
- struct vpfe_config *cfg = vpfe_dev->cfg;
- struct vpfe_subdev_info *sdinfo;
- int i, j = 0;
-
- for (i = 0; i < cfg->num_subdevs; i++) {
- sdinfo = &cfg->sub_devs[i];
- if (app_input_index < (j + sdinfo->num_inputs)) {
- *subdev_index = i;
- *subdev_input_index = app_input_index - j;
- return 0;
- }
- j += sdinfo->num_inputs;
- }
- return -EINVAL;
-}
-
-/*
- * vpfe_get_app_input - Get app input index for a given subdev input index
- * driver stores the input index of the current sub device and translate it
- * when application request the current input
- */
-static int vpfe_get_app_input_index(struct vpfe_device *vpfe_dev,
- int *app_input_index)
-{
- struct vpfe_config *cfg = vpfe_dev->cfg;
- struct vpfe_subdev_info *sdinfo;
- int i, j = 0;
-
- for (i = 0; i < cfg->num_subdevs; i++) {
- sdinfo = &cfg->sub_devs[i];
- if (!strcmp(sdinfo->name, vpfe_dev->current_subdev->name)) {
- if (vpfe_dev->current_input >= sdinfo->num_inputs)
- return -1;
- *app_input_index = j + vpfe_dev->current_input;
- return 0;
- }
- j += sdinfo->num_inputs;
- }
- return -EINVAL;
-}
-
-static int vpfe_enum_input(struct file *file, void *priv,
- struct v4l2_input *inp)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_subdev_info *sdinfo;
- int subdev, index ;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n");
-
- if (vpfe_get_subdev_input_index(vpfe_dev,
- &subdev,
- &index,
- inp->index) < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "input information not found for the subdev\n");
- return -EINVAL;
- }
- sdinfo = &vpfe_dev->cfg->sub_devs[subdev];
- *inp = sdinfo->inputs[index];
- return 0;
-}
-
-static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n");
-
- return vpfe_get_app_input_index(vpfe_dev, index);
-}
-
-
-static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct v4l2_subdev *sd;
- struct vpfe_subdev_info *sdinfo;
- int subdev_index, inp_index;
- struct vpfe_route *route;
- u32 input, output;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- /*
- * If streaming is started return device busy
- * error
- */
- if (vpfe_dev->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n");
- ret = -EBUSY;
- goto unlock_out;
- }
- ret = vpfe_get_subdev_input_index(vpfe_dev,
- &subdev_index,
- &inp_index,
- index);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "invalid input index\n");
- goto unlock_out;
- }
-
- sdinfo = &vpfe_dev->cfg->sub_devs[subdev_index];
- sd = vpfe_dev->sd[subdev_index];
- route = &sdinfo->routes[inp_index];
- if (route && sdinfo->can_route) {
- input = route->input;
- output = route->output;
- } else {
- input = 0;
- output = 0;
- }
-
- if (sd)
- ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0);
-
- if (ret) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "vpfe_doioctl:error in setting input in decoder\n");
- ret = -EINVAL;
- goto unlock_out;
- }
- vpfe_dev->current_subdev = sdinfo;
- if (sd)
- vpfe_dev->v4l2_dev.ctrl_handler = sd->ctrl_handler;
- vpfe_dev->current_input = index;
- vpfe_dev->std_index = 0;
-
- /* set the bus/interface parameter for the sub device in ccdc */
- ret = ccdc_dev->hw_ops.set_hw_if_params(&sdinfo->ccdc_if_params);
- if (ret)
- goto unlock_out;
-
- /* set the default image parameters in the device */
- ret = vpfe_config_image_format(vpfe_dev,
- vpfe_standards[vpfe_dev->std_index].std_id);
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- sdinfo = vpfe_dev->current_subdev;
- if (ret)
- return ret;
- /* Call querystd function of decoder device */
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, querystd, std_id);
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
-
- /* Call decoder driver function to set the standard */
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- sdinfo = vpfe_dev->current_subdev;
- /* If streaming is started, return device busy error */
- if (vpfe_dev->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n");
- ret = -EBUSY;
- goto unlock_out;
- }
-
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, s_std, std_id);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n");
- goto unlock_out;
- }
- ret = vpfe_config_image_format(vpfe_dev, std_id);
-
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n");
-
- *std_id = vpfe_standards[vpfe_dev->std_index].std_id;
- return 0;
-}
-/*
- * Videobuf operations
- */
-static int vpfe_videobuf_setup(struct videobuf_queue *vq,
- unsigned int *count,
- unsigned int *size)
-{
- struct vpfe_fh *fh = vq->priv_data;
- struct vpfe_device *vpfe_dev = fh->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_setup\n");
- *size = vpfe_dev->fmt.fmt.pix.sizeimage;
- if (vpfe_dev->memory == V4L2_MEMORY_MMAP &&
- vpfe_dev->fmt.fmt.pix.sizeimage > config_params.device_bufsize)
- *size = config_params.device_bufsize;
-
- if (*count < config_params.min_numbuffers)
- *count = config_params.min_numbuffers;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "count=%d, size=%d\n", *count, *size);
- return 0;
-}
-
-static int vpfe_videobuf_prepare(struct videobuf_queue *vq,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
-{
- struct vpfe_fh *fh = vq->priv_data;
- struct vpfe_device *vpfe_dev = fh->vpfe_dev;
- unsigned long addr;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
-
- /* If buffer is not initialized, initialize it */
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- vb->width = vpfe_dev->fmt.fmt.pix.width;
- vb->height = vpfe_dev->fmt.fmt.pix.height;
- vb->size = vpfe_dev->fmt.fmt.pix.sizeimage;
- vb->field = field;
-
- ret = videobuf_iolock(vq, vb, NULL);
- if (ret < 0)
- return ret;
-
- addr = videobuf_to_dma_contig(vb);
- /* Make sure user addresses are aligned to 32 bytes */
- if (!ALIGN(addr, 32))
- return -EINVAL;
-
- vb->state = VIDEOBUF_PREPARED;
- }
- return 0;
-}
-
-static void vpfe_videobuf_queue(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
-{
- /* Get the file handle object and device object */
- struct vpfe_fh *fh = vq->priv_data;
- struct vpfe_device *vpfe_dev = fh->vpfe_dev;
- unsigned long flags;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue\n");
-
- /* add the buffer to the DMA queue */
- spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags);
- list_add_tail(&vb->queue, &vpfe_dev->dma_queue);
- spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags);
-
- /* Change state of the buffer */
- vb->state = VIDEOBUF_QUEUED;
-}
-
-static void vpfe_videobuf_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
-{
- struct vpfe_fh *fh = vq->priv_data;
- struct vpfe_device *vpfe_dev = fh->vpfe_dev;
- unsigned long flags;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_videobuf_release\n");
-
- /*
- * We need to flush the buffer from the dma queue since
- * they are de-allocated
- */
- spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags);
- INIT_LIST_HEAD(&vpfe_dev->dma_queue);
- spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags);
- videobuf_dma_contig_free(vq, vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
-}
-
-static const struct videobuf_queue_ops vpfe_videobuf_qops = {
- .buf_setup = vpfe_videobuf_setup,
- .buf_prepare = vpfe_videobuf_prepare,
- .buf_queue = vpfe_videobuf_queue,
- .buf_release = vpfe_videobuf_release,
-};
-
-/*
- * vpfe_reqbufs. currently support REQBUF only once opening
- * the device.
- */
-static int vpfe_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *req_buf)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_fh *fh = file->private_data;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
- return -EINVAL;
- }
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- if (vpfe_dev->io_usrs != 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
- ret = -EBUSY;
- goto unlock_out;
- }
-
- vpfe_dev->memory = req_buf->memory;
- videobuf_queue_dma_contig_init(&vpfe_dev->buffer_queue,
- &vpfe_videobuf_qops,
- vpfe_dev->pdev,
- &vpfe_dev->irqlock,
- req_buf->type,
- vpfe_dev->fmt.fmt.pix.field,
- sizeof(struct videobuf_buffer),
- fh, NULL);
-
- fh->io_allowed = 1;
- vpfe_dev->io_usrs = 1;
- INIT_LIST_HEAD(&vpfe_dev->dma_queue);
- ret = videobuf_reqbufs(&vpfe_dev->buffer_queue, req_buf);
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- if (vpfe_dev->memory != V4L2_MEMORY_MMAP) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n");
- return -EINVAL;
- }
- /* Call videobuf_querybuf to get information */
- return videobuf_querybuf(&vpfe_dev->buffer_queue, buf);
-}
-
-static int vpfe_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_fh *fh = file->private_data;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- /*
- * If this file handle is not allowed to do IO,
- * return error
- */
- if (!fh->io_allowed) {
- v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
- return videobuf_qbuf(&vpfe_dev->buffer_queue, p);
-}
-
-static int vpfe_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
- return videobuf_dqbuf(&vpfe_dev->buffer_queue,
- buf, file->f_flags & O_NONBLOCK);
-}
-
-/*
- * vpfe_calculate_offsets : This function calculates buffers offset
- * for top and bottom field
- */
-static void vpfe_calculate_offsets(struct vpfe_device *vpfe_dev)
-{
- struct v4l2_rect image_win;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_calculate_offsets\n");
-
- ccdc_dev->hw_ops.get_image_window(&image_win);
- vpfe_dev->field_off = image_win.height * image_win.width;
-}
-
-/* vpfe_start_ccdc_capture: start streaming in ccdc/isif */
-static void vpfe_start_ccdc_capture(struct vpfe_device *vpfe_dev)
-{
- ccdc_dev->hw_ops.enable(1);
- if (ccdc_dev->hw_ops.enable_out_to_sdram)
- ccdc_dev->hw_ops.enable_out_to_sdram(1);
- vpfe_dev->started = 1;
-}
-
-/*
- * vpfe_streamon. Assume the DMA queue is not empty.
- * application is expected to call QBUF before calling
- * this ioctl. If not, driver returns error
- */
-static int vpfe_streamon(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_fh *fh = file->private_data;
- struct vpfe_subdev_info *sdinfo;
- unsigned long addr;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- /* If file handle is not allowed IO, return error */
- if (!fh->io_allowed) {
- v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
-
- sdinfo = vpfe_dev->current_subdev;
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, s_stream, 1);
-
- if (ret && (ret != -ENOIOCTLCMD)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "stream on failed in subdev\n");
- return -EINVAL;
- }
-
- /* If buffer queue is empty, return error */
- if (list_empty(&vpfe_dev->buffer_queue.stream)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n");
- return -EIO;
- }
-
- /* Call videobuf_streamon to start streaming * in videobuf */
- ret = videobuf_streamon(&vpfe_dev->buffer_queue);
- if (ret)
- return ret;
-
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- goto streamoff;
- /* Get the next frame from the buffer queue */
- vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next,
- struct videobuf_buffer, queue);
- vpfe_dev->cur_frm = vpfe_dev->next_frm;
- /* Remove buffer from the buffer queue */
- list_del(&vpfe_dev->cur_frm->queue);
- /* Mark state of the current frame to active */
- vpfe_dev->cur_frm->state = VIDEOBUF_ACTIVE;
- /* Initialize field_id and started member */
- vpfe_dev->field_id = 0;
- addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
-
- /* Calculate field offset */
- vpfe_calculate_offsets(vpfe_dev);
-
- if (vpfe_attach_irq(vpfe_dev) < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Error in attaching interrupt handle\n");
- ret = -EFAULT;
- goto unlock_out;
- }
- if (ccdc_dev->hw_ops.configure() < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Error in configuring ccdc\n");
- ret = -EINVAL;
- goto unlock_out;
- }
- ccdc_dev->hw_ops.setfbaddr((unsigned long)(addr));
- vpfe_start_ccdc_capture(vpfe_dev);
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
-streamoff:
- videobuf_streamoff(&vpfe_dev->buffer_queue);
- return ret;
-}
-
-static int vpfe_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_fh *fh = file->private_data;
- struct vpfe_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- /* If io is allowed for this file handle, return error */
- if (!fh->io_allowed) {
- v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
-
- /* If streaming is not started, return error */
- if (!vpfe_dev->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "device started\n");
- return -EINVAL;
- }
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- vpfe_stop_ccdc_capture(vpfe_dev);
- vpfe_detach_irq(vpfe_dev);
-
- sdinfo = vpfe_dev->current_subdev;
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, s_stream, 0);
-
- if (ret && (ret != -ENOIOCTLCMD))
- v4l2_err(&vpfe_dev->v4l2_dev, "stream off failed in subdev\n");
- ret = videobuf_streamoff(&vpfe_dev->buffer_queue);
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_g_pixelaspect(struct file *file, void *priv,
- int type, struct v4l2_fract *f)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_pixelaspect\n");
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- /* If std_index is invalid, then just return (== 1:1 aspect) */
- if (vpfe_dev->std_index >= ARRAY_SIZE(vpfe_standards))
- return 0;
-
- *f = vpfe_standards[vpfe_dev->std_index].pixelaspect;
- return 0;
-}
-
-static int vpfe_g_selection(struct file *file, void *priv,
- struct v4l2_selection *sel)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_selection\n");
-
- if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP:
- sel->r = vpfe_dev->crop;
- break;
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.width = vpfe_standards[vpfe_dev->std_index].width;
- sel->r.height = vpfe_standards[vpfe_dev->std_index].height;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int vpfe_s_selection(struct file *file, void *priv,
- struct v4l2_selection *sel)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct v4l2_rect rect = sel->r;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_selection\n");
-
- if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- if (vpfe_dev->started) {
- /* make sure streaming is not started */
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Cannot change crop when streaming is ON\n");
- return -EBUSY;
- }
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- if (rect.top < 0 || rect.left < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "doesn't support negative values for top & left\n");
- ret = -EINVAL;
- goto unlock_out;
- }
-
- /* adjust the width to 16 pixel boundary */
- rect.width = ((rect.width + 15) & ~0xf);
-
- /* make sure parameters are valid */
- if ((rect.left + rect.width >
- vpfe_dev->std_info.active_pixels) ||
- (rect.top + rect.height >
- vpfe_dev->std_info.active_lines)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_SELECTION params\n");
- ret = -EINVAL;
- goto unlock_out;
- }
- ccdc_dev->hw_ops.set_image_window(&rect);
- vpfe_dev->fmt.fmt.pix.width = rect.width;
- vpfe_dev->fmt.fmt.pix.height = rect.height;
- vpfe_dev->fmt.fmt.pix.bytesperline =
- ccdc_dev->hw_ops.get_line_length();
- vpfe_dev->fmt.fmt.pix.sizeimage =
- vpfe_dev->fmt.fmt.pix.bytesperline *
- vpfe_dev->fmt.fmt.pix.height;
- vpfe_dev->crop = rect;
- sel->r = rect;
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-/* vpfe capture ioctl operations */
-static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
- .vidioc_querycap = vpfe_querycap,
- .vidioc_g_fmt_vid_cap = vpfe_g_fmt_vid_cap,
- .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vpfe_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vpfe_try_fmt_vid_cap,
- .vidioc_enum_input = vpfe_enum_input,
- .vidioc_g_input = vpfe_g_input,
- .vidioc_s_input = vpfe_s_input,
- .vidioc_querystd = vpfe_querystd,
- .vidioc_s_std = vpfe_s_std,
- .vidioc_g_std = vpfe_g_std,
- .vidioc_reqbufs = vpfe_reqbufs,
- .vidioc_querybuf = vpfe_querybuf,
- .vidioc_qbuf = vpfe_qbuf,
- .vidioc_dqbuf = vpfe_dqbuf,
- .vidioc_streamon = vpfe_streamon,
- .vidioc_streamoff = vpfe_streamoff,
- .vidioc_g_pixelaspect = vpfe_g_pixelaspect,
- .vidioc_g_selection = vpfe_g_selection,
- .vidioc_s_selection = vpfe_s_selection,
-};
-
-static struct vpfe_device *vpfe_initialize(void)
-{
- struct vpfe_device *vpfe_dev;
-
- /* Default number of buffers should be 3 */
- if ((numbuffers > 0) &&
- (numbuffers < config_params.min_numbuffers))
- numbuffers = config_params.min_numbuffers;
-
- /*
- * Set buffer size to min buffers size if invalid buffer size is
- * given
- */
- if (bufsize < config_params.min_bufsize)
- bufsize = config_params.min_bufsize;
-
- config_params.numbuffers = numbuffers;
-
- if (numbuffers)
- config_params.device_bufsize = bufsize;
-
- /* Allocate memory for device objects */
- vpfe_dev = kzalloc(sizeof(*vpfe_dev), GFP_KERNEL);
-
- return vpfe_dev;
-}
-
-/*
- * vpfe_probe : This function creates device entries by register
- * itself to the V4L2 driver and initializes fields of each
- * device objects
- */
-static int vpfe_probe(struct platform_device *pdev)
-{
- struct vpfe_subdev_info *sdinfo;
- struct vpfe_config *vpfe_cfg;
- struct resource *res1;
- struct vpfe_device *vpfe_dev;
- struct i2c_adapter *i2c_adap;
- struct video_device *vfd;
- int ret, i, j;
- int num_subdevs = 0;
-
- /* Get the pointer to the device object */
- vpfe_dev = vpfe_initialize();
-
- if (!vpfe_dev) {
- v4l2_err(pdev->dev.driver,
- "Failed to allocate memory for vpfe_dev\n");
- return -ENOMEM;
- }
-
- vpfe_dev->pdev = &pdev->dev;
-
- if (!pdev->dev.platform_data) {
- v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
- ret = -ENODEV;
- goto probe_free_dev_mem;
- }
-
- vpfe_cfg = pdev->dev.platform_data;
- vpfe_dev->cfg = vpfe_cfg;
- if (!vpfe_cfg->ccdc || !vpfe_cfg->card_name || !vpfe_cfg->sub_devs) {
- v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
-
- /* Allocate memory for ccdc configuration */
- ccdc_cfg = kmalloc(sizeof(*ccdc_cfg), GFP_KERNEL);
- if (!ccdc_cfg) {
- ret = -ENOMEM;
- goto probe_free_dev_mem;
- }
-
- mutex_lock(&ccdc_lock);
-
- strscpy(ccdc_cfg->name, vpfe_cfg->ccdc, sizeof(ccdc_cfg->name));
- /* Get VINT0 irq resource */
- res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res1) {
- v4l2_err(pdev->dev.driver,
- "Unable to get interrupt for VINT0\n");
- ret = -ENODEV;
- goto probe_free_ccdc_cfg_mem;
- }
- vpfe_dev->ccdc_irq0 = res1->start;
-
- /* Get VINT1 irq resource */
- res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- if (!res1) {
- v4l2_err(pdev->dev.driver,
- "Unable to get interrupt for VINT1\n");
- ret = -ENODEV;
- goto probe_free_ccdc_cfg_mem;
- }
- vpfe_dev->ccdc_irq1 = res1->start;
-
- ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0,
- "vpfe_capture0", vpfe_dev);
-
- if (0 != ret) {
- v4l2_err(pdev->dev.driver, "Unable to request interrupt\n");
- goto probe_free_ccdc_cfg_mem;
- }
-
- vfd = &vpfe_dev->video_dev;
- /* Initialize field of video device */
- vfd->release = video_device_release_empty;
- vfd->fops = &vpfe_fops;
- vfd->ioctl_ops = &vpfe_ioctl_ops;
- vfd->tvnorms = 0;
- vfd->v4l2_dev = &vpfe_dev->v4l2_dev;
- vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- snprintf(vfd->name, sizeof(vfd->name),
- "%s_V%d.%d.%d",
- CAPTURE_DRV_NAME,
- (VPFE_CAPTURE_VERSION_CODE >> 16) & 0xff,
- (VPFE_CAPTURE_VERSION_CODE >> 8) & 0xff,
- (VPFE_CAPTURE_VERSION_CODE) & 0xff);
-
- ret = v4l2_device_register(&pdev->dev, &vpfe_dev->v4l2_dev);
- if (ret) {
- v4l2_err(pdev->dev.driver,
- "Unable to register v4l2 device.\n");
- goto probe_out_release_irq;
- }
- v4l2_info(&vpfe_dev->v4l2_dev, "v4l2 device registered\n");
- spin_lock_init(&vpfe_dev->irqlock);
- spin_lock_init(&vpfe_dev->dma_queue_lock);
- mutex_init(&vpfe_dev->lock);
-
- /* Initialize field of the device objects */
- vpfe_dev->numbuffers = config_params.numbuffers;
-
- /* register video device */
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "trying to register vpfe device.\n");
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "video_dev=%p\n", &vpfe_dev->video_dev);
- vpfe_dev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ret = video_register_device(&vpfe_dev->video_dev,
- VFL_TYPE_VIDEO, -1);
-
- if (ret) {
- v4l2_err(pdev->dev.driver,
- "Unable to register video device.\n");
- goto probe_out_v4l2_unregister;
- }
-
- v4l2_info(&vpfe_dev->v4l2_dev, "video device registered\n");
- /* set the driver data in platform device */
- platform_set_drvdata(pdev, vpfe_dev);
- /* set driver private data */
- video_set_drvdata(&vpfe_dev->video_dev, vpfe_dev);
- i2c_adap = i2c_get_adapter(vpfe_cfg->i2c_adapter_id);
- num_subdevs = vpfe_cfg->num_subdevs;
- vpfe_dev->sd = kmalloc_array(num_subdevs,
- sizeof(*vpfe_dev->sd),
- GFP_KERNEL);
- if (!vpfe_dev->sd) {
- ret = -ENOMEM;
- goto probe_out_video_unregister;
- }
-
- for (i = 0; i < num_subdevs; i++) {
- struct v4l2_input *inps;
-
- sdinfo = &vpfe_cfg->sub_devs[i];
-
- /* Load up the subdevice */
- vpfe_dev->sd[i] =
- v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
- i2c_adap,
- &sdinfo->board_info,
- NULL);
- if (vpfe_dev->sd[i]) {
- v4l2_info(&vpfe_dev->v4l2_dev,
- "v4l2 sub device %s registered\n",
- sdinfo->name);
- vpfe_dev->sd[i]->grp_id = sdinfo->grp_id;
- /* update tvnorms from the sub devices */
- for (j = 0; j < sdinfo->num_inputs; j++) {
- inps = &sdinfo->inputs[j];
- vfd->tvnorms |= inps->std;
- }
- } else {
- v4l2_info(&vpfe_dev->v4l2_dev,
- "v4l2 sub device %s register fails\n",
- sdinfo->name);
- ret = -ENXIO;
- goto probe_sd_out;
- }
- }
-
- /* set first sub device as current one */
- vpfe_dev->current_subdev = &vpfe_cfg->sub_devs[0];
- vpfe_dev->v4l2_dev.ctrl_handler = vpfe_dev->sd[0]->ctrl_handler;
-
- /* We have at least one sub device to work with */
- mutex_unlock(&ccdc_lock);
- return 0;
-
-probe_sd_out:
- kfree(vpfe_dev->sd);
-probe_out_video_unregister:
- video_unregister_device(&vpfe_dev->video_dev);
-probe_out_v4l2_unregister:
- v4l2_device_unregister(&vpfe_dev->v4l2_dev);
-probe_out_release_irq:
- free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
-probe_free_ccdc_cfg_mem:
- kfree(ccdc_cfg);
- mutex_unlock(&ccdc_lock);
-probe_free_dev_mem:
- kfree(vpfe_dev);
- return ret;
-}
-
-/*
- * vpfe_remove : It un-register device from V4L2 driver
- */
-static int vpfe_remove(struct platform_device *pdev)
-{
- struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
-
- v4l2_info(pdev->dev.driver, "vpfe_remove\n");
-
- free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
- kfree(vpfe_dev->sd);
- v4l2_device_unregister(&vpfe_dev->v4l2_dev);
- video_unregister_device(&vpfe_dev->video_dev);
- kfree(vpfe_dev);
- kfree(ccdc_cfg);
- return 0;
-}
-
-static int vpfe_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int vpfe_resume(struct device *dev)
-{
- return 0;
-}
-
-static const struct dev_pm_ops vpfe_dev_pm_ops = {
- .suspend = vpfe_suspend,
- .resume = vpfe_resume,
-};
-
-static struct platform_driver vpfe_driver = {
- .driver = {
- .name = CAPTURE_DRV_NAME,
- .pm = &vpfe_dev_pm_ops,
- },
- .probe = vpfe_probe,
- .remove = vpfe_remove,
-};
-
-module_platform_driver(vpfe_driver);
diff --git a/drivers/staging/pi433/TODO b/drivers/staging/pi433/TODO
index 5cf3fd99d521..23c808fc99de 100644
--- a/drivers/staging/pi433/TODO
+++ b/drivers/staging/pi433/TODO
@@ -3,3 +3,6 @@
* Some missing data (marked with ###) needs to be added in the documentation
* Change (struct pi433_tx_cfg)->bit_rate to be a u32 so that we can support
bit rates up to 300kbps per the spec.
+ -> This configuration needs to be moved to sysfs instead of being done through
+ IOCTL. Going forward, we need to port userspace tools to use sysfs instead
+ of IOCTL and then we would delete IOCTL.
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index d4e06a3929f3..b59f6a4cb611 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -55,6 +55,7 @@
static dev_t pi433_dev;
static DEFINE_IDR(pi433_idr);
static DEFINE_MUTEX(minor_lock); /* Protect idr accesses */
+static struct dentry *root_dir; /* debugfs root directory for the driver */
static struct class *pi433_class; /* mainly for udev to create /dev/pi433 */
@@ -1306,8 +1307,7 @@ static int pi433_probe(struct spi_device *spi)
/* spi setup */
spi_set_drvdata(spi, device);
- entry = debugfs_create_dir(dev_name(device->dev),
- debugfs_lookup(KBUILD_MODNAME, NULL));
+ entry = debugfs_create_dir(dev_name(device->dev), root_dir);
debugfs_create_file("regs", 0400, entry, device, &pi433_debugfs_regs_fops);
return 0;
@@ -1333,9 +1333,8 @@ RX_failed:
static void pi433_remove(struct spi_device *spi)
{
struct pi433_device *device = spi_get_drvdata(spi);
- struct dentry *mod_entry = debugfs_lookup(KBUILD_MODNAME, NULL);
- debugfs_remove(debugfs_lookup(dev_name(device->dev), mod_entry));
+ debugfs_lookup_and_remove(dev_name(device->dev), root_dir);
/* free GPIOs */
free_gpio(device);
@@ -1408,7 +1407,7 @@ static int __init pi433_init(void)
return PTR_ERR(pi433_class);
}
- debugfs_create_dir(KBUILD_MODNAME, NULL);
+ root_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
status = spi_register_driver(&pi433_spi_driver);
if (status < 0) {
@@ -1427,7 +1426,7 @@ static void __exit pi433_exit(void)
spi_unregister_driver(&pi433_spi_driver);
class_destroy(pi433_class);
unregister_chrdev(MAJOR(pi433_dev), pi433_spi_driver.driver.name);
- debugfs_remove_recursive(debugfs_lookup(KBUILD_MODNAME, NULL));
+ debugfs_remove(root_dir);
}
module_exit(pi433_exit);
diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c
index 19b2f73d481d..ca9e3d4ee7f4 100644
--- a/drivers/staging/r8188eu/core/rtw_cmd.c
+++ b/drivers/staging/r8188eu/core/rtw_cmd.c
@@ -28,32 +28,6 @@ void rtw_free_evt_priv(struct evt_priv *pevtpriv)
}
}
-/* Calling Context:
- *
- * rtw_enqueue_cmd can only be called between kernel thread,
- * since only spin_lock is used.
- *
- * ISR/Call-Back functions can't call this sub-function.
- */
-
-static int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
-{
- unsigned long flags;
-
- if (!obj)
- goto exit;
-
- spin_lock_irqsave(&queue->lock, flags);
-
- list_add_tail(&obj->list, &queue->queue);
-
- spin_unlock_irqrestore(&queue->lock, flags);
-
-exit:
-
- return _SUCCESS;
-}
-
int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
init_completion(&pcmdpriv->enqueue_cmd);
@@ -65,8 +39,6 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
- pcmdpriv->cmd_seq = 1;
-
pcmdpriv->cmd_allocated_buf = kzalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ,
GFP_KERNEL);
@@ -127,28 +99,25 @@ static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
{
- int res = _FAIL;
+ unsigned long flags;
struct adapter *padapter = pcmdpriv->padapter;
if (!cmd_obj)
- goto exit;
+ return _FAIL;
cmd_obj->padapter = padapter;
- res = rtw_cmd_filter(pcmdpriv, cmd_obj);
- if (res == _FAIL) {
+ if (rtw_cmd_filter(pcmdpriv, cmd_obj) == _FAIL) {
rtw_free_cmd_obj(cmd_obj);
- goto exit;
+ return _FAIL;
}
- res = _rtw_enqueue_cmd(&pcmdpriv->cmd_queue, cmd_obj);
-
- if (res == _SUCCESS)
- complete(&pcmdpriv->enqueue_cmd);
-
-exit:
+ spin_lock_irqsave(&pcmdpriv->cmd_queue.lock, flags);
+ list_add_tail(&cmd_obj->list, &pcmdpriv->cmd_queue.queue);
+ spin_unlock_irqrestore(&pcmdpriv->cmd_queue.lock, flags);
- return res;
+ complete(&pcmdpriv->enqueue_cmd);
+ return _SUCCESS;
}
struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
@@ -233,8 +202,6 @@ _next:
ret = cmd_hdl(pcmd->padapter, pcmdbuf);
pcmd->res = ret;
}
-
- pcmdpriv->cmd_seq++;
} else {
pcmd->res = H2C_PARAMETERS_ERROR;
}
@@ -1201,24 +1168,20 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
}
}
-u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
+void rtw_chk_hi_queue_cmd(struct adapter *padapter)
{
struct cmd_obj *ph2c;
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
ph2c = kzalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
+ if (!ph2c)
+ return;
pdrvextra_cmd_parm = kzalloc(sizeof(*pdrvextra_cmd_parm), GFP_ATOMIC);
if (!pdrvextra_cmd_parm) {
kfree(ph2c);
- res = _FAIL;
- goto exit;
+ return;
}
pdrvextra_cmd_parm->ec_id = CHECK_HIQ_WK_CID;
@@ -1227,9 +1190,7 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
- return res;
+ rtw_enqueue_cmd(pcmdpriv, ph2c);
}
u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
diff --git a/drivers/staging/r8188eu/core/rtw_fw.c b/drivers/staging/r8188eu/core/rtw_fw.c
index 682c65b1e04c..1e4baf74ecd5 100644
--- a/drivers/staging/r8188eu/core/rtw_fw.c
+++ b/drivers/staging/r8188eu/core/rtw_fw.c
@@ -89,9 +89,8 @@ static int block_write(struct adapter *padapter, u8 *buffer, u32 size)
addr = FW_8188E_START_ADDRESS + i * block_size;
data = buffer + i * block_size;
- ret = rtw_writeN(padapter, addr, block_size, data);
- if (ret == _FAIL)
- goto exit;
+ if (rtw_writeN(padapter, addr, block_size, data))
+ return _FAIL;
}
if (remain) {
@@ -105,9 +104,8 @@ static int block_write(struct adapter *padapter, u8 *buffer, u32 size)
addr = FW_8188E_START_ADDRESS + offset + i * block_size;
data = buffer + offset + i * block_size;
- ret = rtw_writeN(padapter, addr, block_size, data);
- if (ret == _FAIL)
- goto exit;
+ if (rtw_writeN(padapter, addr, block_size, data))
+ return _FAIL;
}
}
diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c
index b272123626ac..fb7d0e161fdd 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme.c
@@ -444,8 +444,6 @@ static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex
if (check_fwstate(pmlmepriv, _FW_LINKED) &&
is_same_network(&pmlmepriv->cur_network.network, pnetwork)) {
update_network(&pmlmepriv->cur_network.network, pnetwork, adapter, true);
- rtw_update_protection(adapter, (pmlmepriv->cur_network.network.IEs) + sizeof(struct ndis_802_11_fixed_ie),
- pmlmepriv->cur_network.network.IELength);
}
}
@@ -1027,9 +1025,6 @@ static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_net
break;
}
- rtw_update_protection(padapter, (cur_network->network.IEs) +
- sizeof(struct ndis_802_11_fixed_ie),
- (cur_network->network.IELength));
rtw_update_ht_cap(padapter, cur_network->network.IEs, cur_network->network.IELength);
}
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
index 1b9cf7596a76..dc181e491b34 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
@@ -3735,35 +3735,18 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
return _SUCCESS;
}
-static void on_action_public_vendor(struct recv_frame *precv_frame)
-{
- u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
-
- if (!memcmp(frame_body + 2, P2P_OUI, 4))
- on_action_public_p2p(precv_frame);
-}
-
-static void on_action_public_default(struct recv_frame *precv_frame)
-{
- u8 *pframe = precv_frame->rx_data;
- u8 *frame_body = pframe + sizeof(struct ieee80211_hdr_3addr);
- u8 token;
-
- token = frame_body[2];
-
- rtw_action_public_decache(precv_frame, token);
-}
-
static void on_action_public(struct adapter *padapter, struct recv_frame *precv_frame)
{
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)precv_frame->rx_data;
+ u8 *frame_body = (u8 *)&mgmt->u;
/* All members of the action enum start with action_code. */
- if (mgmt->u.action.u.s1g.action_code == WLAN_PUB_ACTION_VENDOR_SPECIFIC)
- on_action_public_vendor(precv_frame);
- else
- on_action_public_default(precv_frame);
+ if (mgmt->u.action.u.s1g.action_code == WLAN_PUB_ACTION_VENDOR_SPECIFIC) {
+ if (!memcmp(frame_body + 2, P2P_OUI, 4))
+ on_action_public_p2p(precv_frame);
+ } else {
+ rtw_action_public_decache(precv_frame, frame_body[2]);
+ }
}
static void OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_frame)
diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
index 5290ac36f08c..051cdcb11ff5 100644
--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
@@ -8,7 +8,7 @@
#include "../include/osdep_intf.h"
#include "../include/linux/usb.h"
-void ips_enter(struct adapter *padapter)
+static void ips_enter(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct xmit_priv *pxmit_priv = &padapter->xmitpriv;
@@ -25,21 +25,20 @@ void ips_enter(struct adapter *padapter)
pwrpriv->ips_mode = pwrpriv->ips_mode_req;
pwrpriv->ips_enter_cnts++;
- if (rf_off == pwrpriv->change_rfpwrstate) {
- pwrpriv->bpower_saving = true;
+ pwrpriv->bpower_saving = true;
- if (pwrpriv->ips_mode == IPS_LEVEL_2)
- pwrpriv->bkeepfwalive = true;
+ if (pwrpriv->ips_mode == IPS_LEVEL_2)
+ pwrpriv->bkeepfwalive = true;
+
+ rtw_ips_pwr_down(padapter);
+ pwrpriv->rf_pwrstate = rf_off;
- rtw_ips_pwr_down(padapter);
- pwrpriv->rf_pwrstate = rf_off;
- }
pwrpriv->bips_processing = false;
mutex_unlock(&pwrpriv->lock);
}
-int ips_leave(struct adapter *padapter)
+static int ips_leave(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
@@ -51,7 +50,6 @@ int ips_leave(struct adapter *padapter)
if ((pwrpriv->rf_pwrstate == rf_off) && (!pwrpriv->bips_processing)) {
pwrpriv->bips_processing = true;
- pwrpriv->change_rfpwrstate = rf_on;
pwrpriv->ips_leave_cnts++;
result = rtw_ips_pwr_up(padapter);
@@ -133,10 +131,9 @@ void rtw_ps_processor(struct adapter *padapter)
if (!rtw_pwr_unassociated_idle(padapter))
goto exit;
- if (pwrpriv->rf_pwrstate == rf_on) {
- pwrpriv->change_rfpwrstate = rf_off;
+ if (pwrpriv->rf_pwrstate == rf_on)
ips_enter(padapter);
- }
+
exit:
rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
pwrpriv->ps_processing = false;
diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c
index 631c500dda42..fc7568cf948b 100644
--- a/drivers/staging/r8188eu/core/rtw_recv.c
+++ b/drivers/staging/r8188eu/core/rtw_recv.c
@@ -38,7 +38,7 @@ void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
static int rtl8188eu_init_recv_priv(struct adapter *padapter)
{
struct recv_priv *precvpriv = &padapter->recvpriv;
- int i, res = _SUCCESS;
+ int i, err = 0;
struct recv_buf *precvbuf;
tasklet_init(&precvpriv->recv_tasklet,
@@ -50,10 +50,8 @@ static int rtl8188eu_init_recv_priv(struct adapter *padapter)
precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4,
GFP_KERNEL);
- if (!precvpriv->pallocated_recv_buf) {
- res = _FAIL;
- goto exit;
- }
+ if (!precvpriv->pallocated_recv_buf)
+ return -ENOMEM;
precvpriv->precv_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_recv_buf), 4);
@@ -64,7 +62,7 @@ static int rtl8188eu_init_recv_priv(struct adapter *padapter)
precvbuf->reuse = false;
precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
if (!precvbuf->purb) {
- res = _FAIL;
+ err = -ENOMEM;
break;
}
precvbuf->adapter = padapter;
@@ -94,17 +92,15 @@ static int rtl8188eu_init_recv_priv(struct adapter *padapter)
pskb = NULL;
}
}
-exit:
- return res;
+
+ return err;
}
int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
{
int i;
-
struct recv_frame *precvframe;
-
- int res = _SUCCESS;
+ int err;
spin_lock_init(&precvpriv->lock);
@@ -117,11 +113,8 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
precvpriv->free_recvframe_cnt = NR_RECVFRAME;
precvpriv->pallocated_frame_buf = vzalloc(NR_RECVFRAME * sizeof(struct recv_frame) + RXFRAME_ALIGN_SZ);
-
- if (!precvpriv->pallocated_frame_buf) {
- res = _FAIL;
- goto exit;
- }
+ if (!precvpriv->pallocated_frame_buf)
+ return -ENOMEM;
precvpriv->precv_frame_buf = (u8 *)ALIGN((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
@@ -141,15 +134,14 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
}
precvpriv->rx_pending_cnt = 1;
- res = rtl8188eu_init_recv_priv(padapter);
+ err = rtl8188eu_init_recv_priv(padapter);
timer_setup(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl, 0);
precvpriv->signal_stat_sampling_interval = 1000; /* ms */
rtw_set_signal_stat_timer(precvpriv);
-exit:
- return res;
+ return err;
}
static void rtl8188eu_free_recv_priv(struct adapter *padapter)
diff --git a/drivers/staging/r8188eu/core/rtw_sta_mgt.c b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
index b4aee8623099..e1ae1859686e 100644
--- a/drivers/staging/r8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
@@ -260,22 +260,22 @@ void rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
spin_lock_bh(&pxmitpriv->lock);
- rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
+ rtw_free_xmitframe_list(pxmitpriv, get_list_head(&psta->sleep_q));
psta->sleepq_len = 0;
- rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
+ rtw_free_xmitframe_list(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
list_del_init(&pstaxmitpriv->vo_q.tx_pending);
- rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
+ rtw_free_xmitframe_list(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
list_del_init(&pstaxmitpriv->vi_q.tx_pending);
- rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
+ rtw_free_xmitframe_list(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
list_del_init(&pstaxmitpriv->bk_q.tx_pending);
- rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
+ rtw_free_xmitframe_list(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
list_del_init(&pstaxmitpriv->be_q.tx_pending);
@@ -391,8 +391,7 @@ void rtw_free_all_stainfo(struct adapter *padapter)
/* any station allocated can be searched by hash list */
struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
{
- struct list_head *plist, *phead;
- struct sta_info *psta = NULL;
+ struct sta_info *ploop, *psta = NULL;
u32 index;
u8 *addr;
u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
@@ -409,18 +408,11 @@ struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
spin_lock_bh(&pstapriv->sta_hash_lock);
- phead = &pstapriv->sta_hash[index];
- plist = phead->next;
-
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, hash_list);
-
- if ((!memcmp(psta->hwaddr, addr, ETH_ALEN))) {
- /* if found the matched address */
+ list_for_each_entry(ploop, &pstapriv->sta_hash[index], hash_list) {
+ if (!memcmp(ploop->hwaddr, addr, ETH_ALEN)) {
+ psta = ploop;
break;
}
- psta = NULL;
- plist = plist->next;
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
index 34494f08c0cd..df88b3e29e77 100644
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ b/drivers/staging/r8188eu/core/rtw_xmit.c
@@ -17,7 +17,7 @@ static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
static void _init_txservq(struct tx_servq *ptxservq)
{
INIT_LIST_HEAD(&ptxservq->tx_pending);
- rtw_init_queue(&ptxservq->sta_pending);
+ INIT_LIST_HEAD(&ptxservq->sta_pending);
ptxservq->qcnt = 0;
}
@@ -29,8 +29,6 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
_init_txservq(&psta_xmitpriv->bk_q);
_init_txservq(&psta_xmitpriv->vi_q);
_init_txservq(&psta_xmitpriv->vo_q);
- INIT_LIST_HEAD(&psta_xmitpriv->legacy_dz);
- INIT_LIST_HEAD(&psta_xmitpriv->apsd);
}
static int rtw_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf,
@@ -41,7 +39,6 @@ static int rtw_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *px
return -ENOMEM;
pxmitbuf->pbuf = (u8 *)ALIGN((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
- pxmitbuf->dma_transfer_addr = 0;
pxmitbuf->pxmit_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!pxmitbuf->pxmit_urb) {
@@ -70,7 +67,6 @@ int _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
/* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
spin_lock_init(&pxmitpriv->lock);
- sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
/*
* Please insert all the queue initializaiton using rtw_init_queue below
@@ -78,11 +74,10 @@ int _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->adapter = padapter;
- rtw_init_queue(&pxmitpriv->be_pending);
- rtw_init_queue(&pxmitpriv->bk_pending);
- rtw_init_queue(&pxmitpriv->vi_pending);
- rtw_init_queue(&pxmitpriv->vo_pending);
- rtw_init_queue(&pxmitpriv->bm_pending);
+ INIT_LIST_HEAD(&pxmitpriv->be_pending);
+ INIT_LIST_HEAD(&pxmitpriv->bk_pending);
+ INIT_LIST_HEAD(&pxmitpriv->vi_pending);
+ INIT_LIST_HEAD(&pxmitpriv->vo_pending);
rtw_init_queue(&pxmitpriv->free_xmit_queue);
@@ -153,7 +148,7 @@ int _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
goto free_xmitbuf;
}
- pxmitbuf->flags = XMIT_VO_QUEUE;
+ pxmitbuf->high_queue = false;
list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmitbuf_queue.queue);
pxmitbuf++;
@@ -192,26 +187,14 @@ int _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
if (rtw_alloc_hwxmits(padapter))
goto free_xmit_extbuf;
- rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
-
for (i = 0; i < 4; i++)
pxmitpriv->wmm_para_seq[i] = i;
- pxmitpriv->txirp_cnt = 1;
-
- sema_init(&pxmitpriv->tx_retevt, 0);
-
- /* per AC pending irp */
- pxmitpriv->beq_cnt = 0;
- pxmitpriv->bkq_cnt = 0;
- pxmitpriv->viq_cnt = 0;
- pxmitpriv->voq_cnt = 0;
-
pxmitpriv->ack_tx = false;
mutex_init(&pxmitpriv->ack_tx_mutex);
rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0);
- rtl8188eu_init_xmit_priv(padapter);
+ tasklet_init(&pxmitpriv->xmit_tasklet, rtl8188eu_xmit_tasklet, (unsigned long)padapter);
return 0;
@@ -296,7 +279,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
vfree(pxmitpriv->pallocated_xmit_extbuf);
- rtw_free_hwxmits(padapter);
+ kfree(pxmitpriv->hwxmits);
mutex_destroy(&pxmitpriv->ack_tx_mutex);
}
@@ -761,28 +744,27 @@ static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitfr
return _SUCCESS;
}
-static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
+static void xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- if (pattrib->bswenc) {
- switch (pattrib->encrypt) {
- case _WEP40_:
- case _WEP104_:
- rtw_wep_encrypt(padapter, pxmitframe);
- break;
- case _TKIP_:
- rtw_tkip_encrypt(padapter, pxmitframe);
- break;
- case _AES_:
- rtw_aes_encrypt(padapter, pxmitframe);
- break;
- default:
- break;
- }
- }
+ if (!pattrib->bswenc)
+ return;
- return _SUCCESS;
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ rtw_wep_encrypt(padapter, pxmitframe);
+ break;
+ case _TKIP_:
+ rtw_tkip_encrypt(padapter, pxmitframe);
+ break;
+ case _AES_:
+ rtw_aes_encrypt(padapter, pxmitframe);
+ break;
+ default:
+ break;
+ }
}
s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib)
@@ -792,9 +774,7 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
- u8 qos_option = false;
-
- int res = _SUCCESS;
+ bool qos_option;
__le16 *fctrl = &pwlanhdr->frame_control;
struct sta_info *psta;
@@ -810,106 +790,99 @@ s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr, struct pkt_attrib *pattr
SetFrameSubType(fctrl, pattrib->subtype);
- if (pattrib->subtype & IEEE80211_FTYPE_DATA) {
- if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
- /* to_ds = 1, fr_ds = 0; */
- /* Data transfer to AP */
- SetToDs(fctrl);
- memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
- memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
- memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
-
- if (pqospriv->qos_option)
- qos_option = true;
- } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
- /* to_ds = 0, fr_ds = 1; */
- SetFrDs(fctrl);
- memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
- memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
- memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
-
- if (psta->qos_option)
- qos_option = true;
- } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
- memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
- memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
- memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
-
- if (psta->qos_option)
- qos_option = true;
- } else {
- res = _FAIL;
- goto exit;
- }
+ if (!(pattrib->subtype & IEEE80211_FTYPE_DATA))
+ return _SUCCESS;
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ /* to_ds = 1, fr_ds = 0; */
+ /* Data transfer to AP */
+ SetToDs(fctrl);
+ memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
+ qos_option = pqospriv->qos_option;
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ /* to_ds = 0, fr_ds = 1; */
+ SetFrDs(fctrl);
+ memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
+ qos_option = psta->qos_option;
+ } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
+ qos_option = psta->qos_option;
+ } else {
+ return _FAIL;
+ }
- if (pattrib->mdata)
- SetMData(fctrl);
+ if (pattrib->mdata)
+ SetMData(fctrl);
- if (pattrib->encrypt)
- SetPrivacy(fctrl);
+ if (pattrib->encrypt)
+ SetPrivacy(fctrl);
- if (qos_option) {
- qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
+ if (qos_option) {
+ qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
- if (pattrib->priority)
- SetPriority(qc, pattrib->priority);
+ if (pattrib->priority)
+ SetPriority(qc, pattrib->priority);
- SetEOSP(qc, pattrib->eosp);
+ SetEOSP(qc, pattrib->eosp);
- SetAckpolicy(qc, pattrib->ack_policy);
- }
+ SetAckpolicy(qc, pattrib->ack_policy);
+ }
- /* TODO: fill HT Control Field */
+ /* TODO: fill HT Control Field */
- /* Update Seq Num will be handled by f/w */
- if (psta) {
- psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
- psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
+ /* Update Seq Num will be handled by f/w */
+ if (psta) {
+ psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
+ psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
- pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
+ pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
- SetSeqNum(hdr, pattrib->seqnum);
+ SetSeqNum(hdr, pattrib->seqnum);
- /* check if enable ampdu */
- if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
- if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
- pattrib->ampdu_en = true;
- }
+ /* check if enable ampdu */
+ if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
+ if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
+ pattrib->ampdu_en = true;
+ }
- /* re-check if enable ampdu by BA_starting_seqctrl */
- if (pattrib->ampdu_en) {
- u16 tx_seq;
+ /* re-check if enable ampdu by BA_starting_seqctrl */
+ if (pattrib->ampdu_en) {
+ u16 tx_seq;
- tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f];
+ tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f];
- /* check BA_starting_seqctrl */
- if (SN_LESS(pattrib->seqnum, tx_seq)) {
- pattrib->ampdu_en = false;/* AGG BK */
- } else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
- psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq + 1) & 0xfff;
+ /* check BA_starting_seqctrl */
+ if (SN_LESS(pattrib->seqnum, tx_seq)) {
+ pattrib->ampdu_en = false;/* AGG BK */
+ } else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq + 1) & 0xfff;
- pattrib->ampdu_en = true;/* AGG EN */
- } else {
- psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum + 1) & 0xfff;
- pattrib->ampdu_en = true;/* AGG EN */
- }
+ pattrib->ampdu_en = true;/* AGG EN */
+ } else {
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum + 1) & 0xfff;
+ pattrib->ampdu_en = true;/* AGG EN */
}
}
}
-exit:
- return res;
+ return _SUCCESS;
}
s32 rtw_txframes_pending(struct adapter *padapter)
{
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- return (!list_empty(&pxmitpriv->be_pending.queue) ||
- !list_empty(&pxmitpriv->bk_pending.queue) ||
- !list_empty(&pxmitpriv->vi_pending.queue) ||
- !list_empty(&pxmitpriv->vo_pending.queue));
+ return (!list_empty(&pxmitpriv->be_pending) ||
+ !list_empty(&pxmitpriv->bk_pending) ||
+ !list_empty(&pxmitpriv->vi_pending) ||
+ !list_empty(&pxmitpriv->vo_pending));
}
s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pattrib)
@@ -1010,25 +983,23 @@ s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct
/* adding icv, if necessary... */
if (pattrib->iv_len) {
- if (psta) {
- switch (pattrib->encrypt) {
- case _WEP40_:
- case _WEP104_:
- WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
- break;
- case _TKIP_:
- if (bmcst)
- TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
- else
- TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
- break;
- case _AES_:
- if (bmcst)
- AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
- else
- AES_IV(pattrib->iv, psta->dot11txpn, 0);
- break;
- }
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ break;
+ case _TKIP_:
+ if (bmcst)
+ TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
+ break;
+ case _AES_:
+ if (bmcst)
+ AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ AES_IV(pattrib->iv, psta->dot11txpn, 0);
+ break;
}
memcpy(pframe, pattrib->iv, pattrib->iv_len);
@@ -1126,40 +1097,6 @@ s32 rtw_put_snap(u8 *data, u16 h_proto)
return SNAP_SIZE + sizeof(u16);
}
-void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
-{
- uint protection;
- u8 *perp;
- int erp_len;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
-
- switch (pxmitpriv->vcs_setting) {
- case DISABLE_VCS:
- pxmitpriv->vcs = NONE_VCS;
- break;
- case ENABLE_VCS:
- break;
- case AUTO_VCS:
- default:
- perp = rtw_get_ie(ie, _ERPINFO_IE_, &erp_len, ie_len);
- if (!perp) {
- pxmitpriv->vcs = NONE_VCS;
- } else {
- protection = (*(perp + 2)) & BIT(1);
- if (protection) {
- if (pregistrypriv->vcs_type == RTS_CTS)
- pxmitpriv->vcs = RTS_CTS;
- else
- pxmitpriv->vcs = CTS_TO_SELF;
- } else {
- pxmitpriv->vcs = NONE_VCS;
- }
- }
- break;
- }
-}
-
void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz)
{
struct sta_info *psta = NULL;
@@ -1319,38 +1256,32 @@ struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pf
spin_lock_bh(&pfree_xmit_queue->lock);
- if (list_empty(&pfree_xmit_queue->queue)) {
- pxframe = NULL;
- } else {
- phead = get_list_head(pfree_xmit_queue);
-
- plist = phead->next;
-
- pxframe = container_of(plist, struct xmit_frame, list);
+ if (list_empty(&pfree_xmit_queue->queue))
+ goto out;
- list_del_init(&pxframe->list);
- }
+ phead = get_list_head(pfree_xmit_queue);
+ plist = phead->next;
+ pxframe = container_of(plist, struct xmit_frame, list);
+ list_del_init(&pxframe->list);
- if (pxframe) { /* default value setting */
- pxmitpriv->free_xmitframe_cnt--;
+ pxmitpriv->free_xmitframe_cnt--;
- pxframe->buf_addr = NULL;
- pxframe->pxmitbuf = NULL;
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
- memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
- /* pxframe->attrib.psta = NULL; */
+ memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
+ /* pxframe->attrib.psta = NULL; */
- pxframe->frame_tag = DATA_FRAMETAG;
+ pxframe->frame_tag = DATA_FRAMETAG;
- pxframe->pkt = NULL;
- pxframe->pkt_offset = 1;/* default use pkt_offset to fill tx desc */
+ pxframe->pkt = NULL;
+ pxframe->pkt_offset = 1;/* default use pkt_offset to fill tx desc */
- pxframe->agg_num = 1;
- pxframe->ack_report = 0;
- }
+ pxframe->agg_num = 1;
+ pxframe->ack_report = 0;
+out:
spin_unlock_bh(&pfree_xmit_queue->lock);
-
return pxframe;
}
@@ -1386,101 +1317,49 @@ exit:
return _SUCCESS;
}
-void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue)
+void rtw_free_xmitframe_list(struct xmit_priv *pxmitpriv, struct list_head *xframe_list)
{
- struct list_head *plist, *phead;
- struct xmit_frame *pxmitframe;
-
- spin_lock_bh(&pframequeue->lock);
-
- phead = get_list_head(pframequeue);
- plist = phead->next;
-
- while (phead != plist) {
- pxmitframe = container_of(plist, struct xmit_frame, list);
-
- plist = plist->next;
+ struct xmit_frame *pxmitframe, *tmp_xmitframe;
+ list_for_each_entry_safe(pxmitframe, tmp_xmitframe, xframe_list, list)
rtw_free_xmitframe(pxmitpriv, pxmitframe);
- }
- spin_unlock_bh(&pframequeue->lock);
-}
-
-s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
-{
- if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) {
-/* pxmitframe->pkt = NULL; */
- return _FAIL;
- }
-
- return _SUCCESS;
-}
-
-static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit, struct tx_servq *ptxservq, struct __queue *pframe_queue)
-{
- struct list_head *xmitframe_plist, *xmitframe_phead;
- struct xmit_frame *pxmitframe = NULL;
-
- xmitframe_phead = get_list_head(pframe_queue);
- xmitframe_plist = xmitframe_phead->next;
-
- if (xmitframe_phead != xmitframe_plist) {
- pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
-
- xmitframe_plist = xmitframe_plist->next;
-
- list_del_init(&pxmitframe->list);
-
- ptxservq->qcnt--;
- }
- return pxmitframe;
}
-struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i, int entry)
+struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i)
{
- struct list_head *sta_plist, *sta_phead;
struct hw_xmit *phwxmit;
- struct tx_servq *ptxservq = NULL;
- struct __queue *pframe_queue = NULL;
+ struct tx_servq *ptxservq, *tmp_txservq;
+ struct list_head *xframe_list;
struct xmit_frame *pxmitframe = NULL;
struct adapter *padapter = pxmitpriv->adapter;
struct registry_priv *pregpriv = &padapter->registrypriv;
- int i, inx[4];
-
- inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
+ int i, inx[] = { 0, 1, 2, 3 };
if (pregpriv->wifi_spec == 1) {
- int j;
-
- for (j = 0; j < 4; j++)
- inx[j] = pxmitpriv->wmm_para_seq[j];
+ for (i = 0; i < ARRAY_SIZE(inx); i++)
+ inx[i] = pxmitpriv->wmm_para_seq[i];
}
spin_lock_bh(&pxmitpriv->lock);
- for (i = 0; i < entry; i++) {
+ for (i = 0; i < HWXMIT_ENTRY; i++) {
phwxmit = phwxmit_i + inx[i];
+ list_for_each_entry_safe(ptxservq, tmp_txservq, phwxmit->sta_list, tx_pending) {
+ xframe_list = &ptxservq->sta_pending;
+ if (list_empty(xframe_list))
+ continue;
- sta_phead = get_list_head(phwxmit->sta_queue);
- sta_plist = sta_phead->next;
-
- while (sta_phead != sta_plist) {
- ptxservq = container_of(sta_plist, struct tx_servq, tx_pending);
-
- pframe_queue = &ptxservq->sta_pending;
+ pxmitframe = container_of(xframe_list->next, struct xmit_frame, list);
+ list_del_init(&pxmitframe->list);
- pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue);
+ phwxmit->accnt--;
+ ptxservq->qcnt--;
- if (pxmitframe) {
- phwxmit->accnt--;
+ /* Remove sta node when there are no pending packets. */
+ if (list_empty(xframe_list))
+ list_del_init(&ptxservq->tx_pending);
- /* Remove sta node when there are no pending packets. */
- if (list_empty(&pframe_queue->queue)) /* must be done after get_next and before break */
- list_del_init(&ptxservq->tx_pending);
- goto exit;
- }
-
- sta_plist = sta_plist->next;
+ goto exit;
}
}
exit:
@@ -1547,9 +1426,9 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
if (list_empty(&ptxservq->tx_pending))
- list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
+ list_add_tail(&ptxservq->tx_pending, phwxmits[ac_index].sta_list);
- list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
+ list_add_tail(&pxmitframe->list, &ptxservq->sta_pending);
ptxservq->qcnt++;
phwxmits[ac_index].accnt++;
exit:
@@ -1562,39 +1441,20 @@ int rtw_alloc_hwxmits(struct adapter *padapter)
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
-
- pxmitpriv->hwxmits = kzalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry, GFP_KERNEL);
+ pxmitpriv->hwxmits = kcalloc(HWXMIT_ENTRY, sizeof(struct hw_xmit), GFP_KERNEL);
if (!pxmitpriv->hwxmits)
return -ENOMEM;
hwxmits = pxmitpriv->hwxmits;
- hwxmits[0].sta_queue = &pxmitpriv->vo_pending;
- hwxmits[1].sta_queue = &pxmitpriv->vi_pending;
- hwxmits[2].sta_queue = &pxmitpriv->be_pending;
- hwxmits[3].sta_queue = &pxmitpriv->bk_pending;
+ hwxmits[0].sta_list = &pxmitpriv->vo_pending;
+ hwxmits[1].sta_list = &pxmitpriv->vi_pending;
+ hwxmits[2].sta_list = &pxmitpriv->be_pending;
+ hwxmits[3].sta_list = &pxmitpriv->bk_pending;
return 0;
}
-void rtw_free_hwxmits(struct adapter *padapter)
-{
- struct hw_xmit *hwxmits;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-
- hwxmits = pxmitpriv->hwxmits;
- kfree(hwxmits);
-}
-
-void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry)
-{
- int i;
-
- for (i = 0; i < entry; i++, phwxmit++)
- phwxmit->accnt = 0;
-}
-
static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
@@ -1743,15 +1603,6 @@ u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
return addr;
}
-static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib)
-{
- u8 qsel;
-
- qsel = pattrib->priority;
-
- pattrib->qsel = qsel;
-}
-
/*
* The main transmit(tx) entry
*
@@ -1765,18 +1616,14 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct xmit_frame *pxmitframe = NULL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- void *br_port = NULL;
s32 res;
pxmitframe = rtw_alloc_xmitframe(pxmitpriv);
if (!pxmitframe)
return -1;
- rcu_read_lock();
- br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
- rcu_read_unlock();
-
- if (br_port && check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE)) {
+ if (rcu_access_pointer(padapter->pnetdev->rx_handler_data) &&
+ check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE)) {
res = rtw_br_client_tx(padapter, ppkt);
if (res == -1) {
rtw_free_xmitframe(pxmitpriv, pxmitframe);
@@ -1794,7 +1641,7 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
rtw_led_control(padapter, LED_CTL_TX);
- do_queue_select(padapter, &pxmitframe->attrib);
+ pxmitframe->attrib.qsel = pxmitframe->attrib.priority;
spin_lock_bh(&pxmitpriv->lock);
if (xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe)) {
@@ -1911,16 +1758,15 @@ int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_fra
return ret;
}
-static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct __queue *pframequeue)
+static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct list_head *phead)
{
- struct list_head *plist, *phead;
+ struct list_head *plist;
u8 ac_index;
struct tx_servq *ptxservq;
struct pkt_attrib *pattrib;
struct xmit_frame *pxmitframe;
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
- phead = get_list_head(pframequeue);
plist = phead->next;
while (phead != plist) {
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
index 8310d7f53982..788904d4655c 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
@@ -193,9 +193,9 @@ void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode)
}
-void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
+void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, u16 mstatus_rpt)
{
- u16 mst_rpt = le16_to_cpu(mstatus_rpt);
+ __le16 mst_rpt = cpu_to_le16(mstatus_rpt);
FillH2CCmd_88E(adapt, H2C_COM_MEDIA_STATUS_RPT, sizeof(mst_rpt), (u8 *)&mst_rpt);
}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
index b7f3c7a670fb..f4edf4a8f5c2 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
@@ -315,21 +315,20 @@ rtl8188e_PHY_SetRFReg(
* 08/12/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
-s32 PHY_MACConfig8188E(struct adapter *Adapter)
+int PHY_MACConfig8188E(struct adapter *Adapter)
{
struct hal_data_8188e *pHalData = &Adapter->haldata;
- int rtStatus = _SUCCESS;
+ int err;
/* */
/* Config MAC */
/* */
- if (ODM_ReadAndConfig_MAC_REG_8188E(&pHalData->odmpriv))
- rtStatus = _FAIL;
+ err = ODM_ReadAndConfig_MAC_REG_8188E(&pHalData->odmpriv);
/* 2010.07.13 AMPDU aggregation number B */
rtw_write16(Adapter, REG_MAX_AGGR_NUM, MAX_AGGR_NUM);
- return rtStatus;
+ return err;
}
/**
@@ -450,13 +449,15 @@ static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
{
struct eeprom_priv *pEEPROM = &Adapter->eeprompriv;
struct hal_data_8188e *pHalData = &Adapter->haldata;
+ int err;
/* */
/* 1. Read PHY_REG.TXT BB INIT!! */
/* We will separate as 88C / 92C according to chip version */
/* */
- if (ODM_ReadAndConfig_PHY_REG_1T_8188E(&pHalData->odmpriv))
- return _FAIL;
+ err = ODM_ReadAndConfig_PHY_REG_1T_8188E(&pHalData->odmpriv);
+ if (err)
+ return err;
/* 2. If EEPROM or EFUSE autoload OK, We must config by PHY_REG_PG.txt */
if (!pEEPROM->bautoload_fail_flag) {
@@ -465,10 +466,11 @@ static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
}
/* 3. BB AGC table Initialization */
- if (ODM_ReadAndConfig_AGC_TAB_1T_8188E(&pHalData->odmpriv))
- return _FAIL;
+ err = ODM_ReadAndConfig_AGC_TAB_1T_8188E(&pHalData->odmpriv);
+ if (err)
+ return err;
- return _SUCCESS;
+ return 0;
}
int
@@ -476,18 +478,17 @@ PHY_BBConfig8188E(
struct adapter *Adapter
)
{
- int rtStatus = _SUCCESS;
struct hal_data_8188e *pHalData = &Adapter->haldata;
u16 RegVal;
u8 CrystalCap;
- int res;
+ int err;
phy_InitBBRFRegisterDefinition(Adapter);
/* Enable BB and RF */
- res = rtw_read16(Adapter, REG_SYS_FUNC_EN, &RegVal);
- if (res)
- return _FAIL;
+ err = rtw_read16(Adapter, REG_SYS_FUNC_EN, &RegVal);
+ if (err)
+ return err;
rtw_write16(Adapter, REG_SYS_FUNC_EN, (u16)(RegVal | BIT(13) | BIT(0) | BIT(1)));
@@ -498,13 +499,13 @@ PHY_BBConfig8188E(
rtw_write8(Adapter, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD | FEN_BB_GLB_RSTn | FEN_BBRSTB);
/* Config BB and AGC */
- rtStatus = phy_BB8188E_Config_ParaFile(Adapter);
+ err = phy_BB8188E_Config_ParaFile(Adapter);
/* write 0x24[16:11] = 0x24[22:17] = CrystalCap */
CrystalCap = pHalData->CrystalCap & 0x3F;
rtl8188e_PHY_SetBBReg(Adapter, REG_AFE_XTAL_CTRL, 0x7ff800, (CrystalCap | (CrystalCap << 6)));
- return rtStatus;
+ return err;
}
static void getTxPowerIndex88E(struct adapter *Adapter, u8 channel, u8 *cckPowerLevel,
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
index e5ec6e563fbd..1988fb6e780a 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
@@ -371,7 +371,7 @@ int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
struct bb_reg_def *pPhyReg;
struct hal_data_8188e *pHalData = &Adapter->haldata;
u32 u4RegValue = 0;
- int rtStatus = _SUCCESS;
+ int err;
/* Initialize RF */
@@ -396,11 +396,10 @@ int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
udelay(1);/* PlatformStallExecution(1); */
/*----Initialize RF fom connfiguration file----*/
- if (ODM_ReadAndConfig_RadioA_1T_8188E(&pHalData->odmpriv))
- rtStatus = _FAIL;
+ err = ODM_ReadAndConfig_RadioA_1T_8188E(&pHalData->odmpriv);
/*----Restore RFENV control type----*/;
rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
- return rtStatus;
+ return err;
}
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
index 6d1f56d1f9d7..3ffab4953a5c 100644
--- a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
@@ -9,16 +9,6 @@
#include "../include/usb_ops.h"
#include "../include/rtl8188e_hal.h"
-s32 rtl8188eu_init_xmit_priv(struct adapter *adapt)
-{
- struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
-
- tasklet_init(&pxmitpriv->xmit_tasklet,
- rtl8188eu_xmit_tasklet,
- (unsigned long)adapt);
- return _SUCCESS;
-}
-
static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
{
u16 *usptr = (u16 *)ptxdesc;
@@ -147,7 +137,7 @@ static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
}
}
-static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
+static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz)
{
uint qsel;
u8 data_rate, pwr_status, offset;
@@ -329,7 +319,7 @@ static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
sz = pattrib->last_txcmdsz;
}
- pull = update_txdesc(pxmitframe, mem_addr, sz, false);
+ pull = update_txdesc(pxmitframe, mem_addr, sz);
if (pull) {
mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
@@ -375,11 +365,13 @@ static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
return len;
}
-bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+bool rtl8188eu_xmitframe_complete(struct adapter *adapt)
{
+ struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapt);
struct xmit_frame *pxmitframe = NULL;
struct xmit_frame *pfirstframe = NULL;
+ struct xmit_buf *pxmitbuf;
/* aggregate variable */
struct hw_xmit *phwxmit;
@@ -403,17 +395,11 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
else
bulksize = USB_FULL_SPEED_BULK_SIZE;
- /* check xmitbuffer is ok */
- if (!pxmitbuf) {
- pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
- if (!pxmitbuf)
- return false;
- }
-
- /* 3 1. pick up first frame */
- rtw_free_xmitframe(pxmitpriv, pxmitframe);
+ pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
+ if (!pxmitbuf)
+ return false;
- pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
+ pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits);
if (!pxmitframe) {
/* no more xmit frame, release xmit buffer */
rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
@@ -475,7 +461,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
}
spin_lock_bh(&pxmitpriv->lock);
- xmitframe_phead = get_list_head(&ptxservq->sta_pending);
+ xmitframe_phead = &ptxservq->sta_pending;
xmitframe_plist = xmitframe_phead->next;
while (xmitframe_phead != xmitframe_plist) {
@@ -503,7 +489,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
rtw_xmit_complete(adapt, pxmitframe);
/* (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
- update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
+ update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz);
/* don't need xmitframe any more */
rtw_free_xmitframe(pxmitpriv, pxmitframe);
@@ -526,7 +512,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
}
} /* end while (aggregate same priority and same DA(AP or STA) frames) */
- if (list_empty(&ptxservq->sta_pending.queue))
+ if (list_empty(&ptxservq->sta_pending))
list_del_init(&ptxservq->tx_pending);
spin_unlock_bh(&pxmitpriv->lock);
@@ -543,7 +529,7 @@ bool rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmit
pfirstframe->pkt_offset--;
}
- update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);
+ update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz);
/* 3 4. write xmit buffer to USB FIFO */
ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
@@ -610,7 +596,7 @@ static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
return true;
enqueue:
- res = rtw_xmitframe_enqueue(adapt, pxmitframe);
+ res = rtw_xmit_classifier(adapt, pxmitframe);
spin_unlock_bh(&pxmitpriv->lock);
if (res != _SUCCESS) {
diff --git a/drivers/staging/r8188eu/hal/usb_halinit.c b/drivers/staging/r8188eu/hal/usb_halinit.c
index d28b4dc2a767..a1051ac1cac4 100644
--- a/drivers/staging/r8188eu/hal/usb_halinit.c
+++ b/drivers/staging/r8188eu/hal/usb_halinit.c
@@ -600,20 +600,17 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
Adapter->pwrctrlpriv.bFwCurrentInPSMode = false;
haldata->LastHMEBoxNum = 0;
- status = PHY_MACConfig8188E(Adapter);
- if (status == _FAIL)
- goto exit;
+ if (PHY_MACConfig8188E(Adapter))
+ return _FAIL;
/* */
/* d. Initialize BB related configurations. */
/* */
- status = PHY_BBConfig8188E(Adapter);
- if (status == _FAIL)
- goto exit;
+ if (PHY_BBConfig8188E(Adapter))
+ return _FAIL;
- status = phy_RF6052_Config_ParaFile(Adapter);
- if (status == _FAIL)
- goto exit;
+ if (phy_RF6052_Config_ParaFile(Adapter))
+ return _FAIL;
status = rtl8188e_iol_efuse_patch(Adapter);
if (status == _FAIL)
@@ -851,29 +848,25 @@ u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
return _SUCCESS;
}
-unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
+int rtl8188eu_inirp_init(struct adapter *Adapter)
{
u8 i;
struct recv_buf *precvbuf;
- uint status;
struct recv_priv *precvpriv = &Adapter->recvpriv;
-
- status = _SUCCESS;
+ int ret;
/* issue Rx irp to receive data */
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
- if (!rtw_read_port(Adapter, (unsigned char *)precvbuf)) {
- status = _FAIL;
- goto exit;
- }
+ ret = rtw_read_port(Adapter, precvbuf);
+ if (ret)
+ return ret;
precvbuf++;
precvpriv->free_recv_buf_queue_cnt--;
}
-exit:
- return status;
+ return 0;
}
/* */
diff --git a/drivers/staging/r8188eu/hal/usb_ops_linux.c b/drivers/staging/r8188eu/hal/usb_ops_linux.c
index 7c72f5e04d9b..9611b19ab55b 100644
--- a/drivers/staging/r8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/hal/usb_ops_linux.c
@@ -7,9 +7,12 @@
#include "../include/usb_ops.h"
#include "../include/rtl8188e_hal.h"
-static int usb_read(struct intf_hdl *intf, u16 value, void *data, u8 size)
+#define VENDOR_CMD_MAX_DATA_LEN 254
+
+#define RTW_USB_CONTROL_MSG_TIMEOUT 500/* ms */
+
+static int usb_read(struct adapter *adapt, u16 value, void *data, u8 size)
{
- struct adapter *adapt = intf->padapter;
struct dvobj_priv *dvobjpriv = adapter_to_dvobj(adapt);
struct usb_device *udev = dvobjpriv->pusbdev;
int status;
@@ -50,9 +53,8 @@ static int usb_read(struct intf_hdl *intf, u16 value, void *data, u8 size)
return status;
}
-static int usb_write(struct intf_hdl *intf, u16 value, void *data, u8 size)
+static int usb_write(struct adapter *adapt, u16 value, void *data, u8 size)
{
- struct adapter *adapt = intf->padapter;
struct dvobj_priv *dvobjpriv = adapter_to_dvobj(adapt);
struct usb_device *udev = dvobjpriv->pusbdev;
int status;
@@ -95,22 +97,18 @@ static int usb_write(struct intf_hdl *intf, u16 value, void *data, u8 size)
int __must_check rtw_read8(struct adapter *adapter, u32 addr, u8 *data)
{
- struct io_priv *io_priv = &adapter->iopriv;
- struct intf_hdl *intf = &io_priv->intf;
u16 value = addr & 0xffff;
- return usb_read(intf, value, data, 1);
+ return usb_read(adapter, value, data, 1);
}
int __must_check rtw_read16(struct adapter *adapter, u32 addr, u16 *data)
{
- struct io_priv *io_priv = &adapter->iopriv;
- struct intf_hdl *intf = &io_priv->intf;
u16 value = addr & 0xffff;
__le16 le_data;
int res;
- res = usb_read(intf, value, &le_data, 2);
+ res = usb_read(adapter, value, &le_data, 2);
if (res)
return res;
@@ -121,13 +119,11 @@ int __must_check rtw_read16(struct adapter *adapter, u32 addr, u16 *data)
int __must_check rtw_read32(struct adapter *adapter, u32 addr, u32 *data)
{
- struct io_priv *io_priv = &adapter->iopriv;
- struct intf_hdl *intf = &io_priv->intf;
u16 value = addr & 0xffff;
__le32 le_data;
int res;
- res = usb_read(intf, value, &le_data, 4);
+ res = usb_read(adapter, value, &le_data, 4);
if (res)
return res;
@@ -138,55 +134,44 @@ int __must_check rtw_read32(struct adapter *adapter, u32 addr, u32 *data)
int rtw_write8(struct adapter *adapter, u32 addr, u8 val)
{
- struct io_priv *io_priv = &adapter->iopriv;
- struct intf_hdl *intf = &io_priv->intf;
u16 value = addr & 0xffff;
int ret;
- ret = usb_write(intf, value, &val, 1);
+ ret = usb_write(adapter, value, &val, 1);
return RTW_STATUS_CODE(ret);
}
int rtw_write16(struct adapter *adapter, u32 addr, u16 val)
{
- struct io_priv *io_priv = &adapter->iopriv;
- struct intf_hdl *intf = &io_priv->intf;
u16 value = addr & 0xffff;
__le16 data = cpu_to_le16(val);
int ret;
- ret = usb_write(intf, value, &data, 2);
+ ret = usb_write(adapter, value, &data, 2);
return RTW_STATUS_CODE(ret);
}
int rtw_write32(struct adapter *adapter, u32 addr, u32 val)
{
- struct io_priv *io_priv = &adapter->iopriv;
- struct intf_hdl *intf = &io_priv->intf;
u16 value = addr & 0xffff;
__le32 data = cpu_to_le32(val);
int ret;
- ret = usb_write(intf, value, &data, 4);
+ ret = usb_write(adapter, value, &data, 4);
return RTW_STATUS_CODE(ret);
}
int rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *data)
{
- struct io_priv *io_priv = &adapter->iopriv;
- struct intf_hdl *intf = &io_priv->intf;
u16 value = addr & 0xffff;
- int ret;
if (length > VENDOR_CMD_MAX_DATA_LEN)
- return _FAIL;
+ return -EINVAL;
- ret = usb_write(intf, value, data, length);
-
- return RTW_STATUS_CODE(ret);
+ return usb_write(adapter, value, data, length);
}
static void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf)
@@ -363,7 +348,7 @@ void rtl8188eu_recv_tasklet(unsigned long priv)
}
}
-static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
+static void usb_read_port_complete(struct urb *purb)
{
struct recv_buf *precvbuf = (struct recv_buf *)purb->context;
struct adapter *adapt = (struct adapter *)precvbuf->adapter;
@@ -379,7 +364,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
if (purb->status == 0) { /* SUCCESS */
if ((purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)) {
precvbuf->reuse = true;
- rtw_read_port(adapt, (unsigned char *)precvbuf);
+ rtw_read_port(adapt, precvbuf);
} else {
rtw_reset_continual_urb_error(adapter_to_dvobj(adapt));
@@ -391,7 +376,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
precvbuf->pskb = NULL;
precvbuf->reuse = false;
- rtw_read_port(adapt, (unsigned char *)precvbuf);
+ rtw_read_port(adapt, precvbuf);
}
} else {
skb_put(precvbuf->pskb, purb->actual_length);
@@ -411,7 +396,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
case -EPROTO:
case -EOVERFLOW:
precvbuf->reuse = true;
- rtw_read_port(adapt, (unsigned char *)precvbuf);
+ rtw_read_port(adapt, precvbuf);
break;
case -EINPROGRESS:
break;
@@ -421,10 +406,9 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
}
}
-u32 rtw_read_port(struct adapter *adapter, u8 *rmem)
+int rtw_read_port(struct adapter *adapter, struct recv_buf *precvbuf)
{
struct urb *purb = NULL;
- struct recv_buf *precvbuf = (struct recv_buf *)rmem;
struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
struct recv_priv *precvpriv = &adapter->recvpriv;
struct usb_device *pusbd = pdvobj->pusbdev;
@@ -432,13 +416,12 @@ u32 rtw_read_port(struct adapter *adapter, u8 *rmem)
unsigned int pipe;
size_t tmpaddr = 0;
size_t alignment = 0;
- u32 ret = _SUCCESS;
if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
- return _FAIL;
+ return -EPERM;
if (!precvbuf)
- return _FAIL;
+ return -ENOMEM;
if (!precvbuf->reuse || !precvbuf->pskb) {
precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
@@ -450,7 +433,7 @@ u32 rtw_read_port(struct adapter *adapter, u8 *rmem)
if (!precvbuf->reuse || !precvbuf->pskb) {
precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
if (!precvbuf->pskb)
- return _FAIL;
+ return -ENOMEM;
tmpaddr = (size_t)precvbuf->pskb->data;
alignment = tmpaddr & (RECVBUFF_ALIGN_SZ - 1);
@@ -474,29 +457,20 @@ u32 rtw_read_port(struct adapter *adapter, u8 *rmem)
err = usb_submit_urb(purb, GFP_ATOMIC);
if ((err) && (err != (-EPERM)))
- ret = _FAIL;
+ return err;
- return ret;
+ return 0;
}
void rtl8188eu_xmit_tasklet(unsigned long priv)
{
- int ret = false;
struct adapter *adapt = (struct adapter *)priv;
- struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
return;
- while (1) {
- if ((adapt->bDriverStopped) ||
- (adapt->bSurpriseRemoved) ||
- (adapt->bWritePortCancel))
- break;
-
- ret = rtl8188eu_xmitframe_complete(adapt, pxmitpriv, NULL);
-
- if (!ret)
+ do {
+ if (adapt->bDriverStopped || adapt->bSurpriseRemoved || adapt->bWritePortCancel)
break;
- }
+ } while (rtl8188eu_xmitframe_complete(adapt));
}
diff --git a/drivers/staging/r8188eu/include/drv_types.h b/drivers/staging/r8188eu/include/drv_types.h
index 8fef5759c36a..159990facb8a 100644
--- a/drivers/staging/r8188eu/include/drv_types.h
+++ b/drivers/staging/r8188eu/include/drv_types.h
@@ -152,7 +152,6 @@ struct adapter {
struct mlme_ext_priv mlmeextpriv;
struct cmd_priv cmdpriv;
struct evt_priv evtpriv;
- struct io_priv iopriv;
struct xmit_priv xmitpriv;
struct recv_priv recvpriv;
struct sta_priv stapriv;
@@ -172,8 +171,6 @@ struct adapter {
s8 signal_strength;
void *cmdThread;
- void (*intf_start)(struct adapter *adapter);
- void (*intf_stop)(struct adapter *adapter);
struct net_device *pnetdev;
/* used by rtw_rereg_nd_name related function */
@@ -187,7 +184,6 @@ struct adapter {
int bup;
struct net_device_stats stats;
struct iw_statistics iwstats;
- struct proc_dir_entry *dir_dev;/* for proc directory */
int net_closed;
u8 bFWReady;
diff --git a/drivers/staging/r8188eu/include/hal_intf.h b/drivers/staging/r8188eu/include/hal_intf.h
index ac6e3f95c5b7..296aa5b8268d 100644
--- a/drivers/staging/r8188eu/include/hal_intf.h
+++ b/drivers/staging/r8188eu/include/hal_intf.h
@@ -26,7 +26,7 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level);
int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter,
struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt);
-unsigned int rtl8188eu_inirp_init(struct adapter *Adapter);
+int rtl8188eu_inirp_init(struct adapter *Adapter);
uint rtw_hal_init(struct adapter *padapter);
uint rtw_hal_deinit(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/osdep_intf.h b/drivers/staging/r8188eu/include/osdep_intf.h
index 6d66cb57225e..457fb3852a19 100644
--- a/drivers/staging/r8188eu/include/osdep_intf.h
+++ b/drivers/staging/r8188eu/include/osdep_intf.h
@@ -7,38 +7,6 @@
#include "osdep_service.h"
#include "drv_types.h"
-struct intf_priv {
- u8 *intf_dev;
- u32 max_iosz; /* USB2.0: 128, USB1.1: 64, SDIO:64 */
- u32 max_xmitsz; /* USB2.0: unlimited, SDIO:512 */
- u32 max_recvsz; /* USB2.0: unlimited, SDIO:512 */
-
- u8 *io_rwmem;
- u8 *allocated_io_rwmem;
- u32 io_wsz; /* unit: 4bytes */
- u32 io_rsz;/* unit: 4bytes */
- u8 intf_status;
-
- void (*_bus_io)(u8 *priv);
-
-/*
-Under Sync. IRP (SDIO/USB)
-A protection mechanism is necessary for the io_rwmem(read/write protocol)
-
-Under Async. IRP (SDIO/USB)
-The protection mechanism is through the pending queue.
-*/
- struct mutex ioctl_mutex;
- /* when in USB, IO is through interrupt in/out endpoints */
- struct usb_device *udev;
- struct urb *piorw_urb;
- u8 io_irp_cnt;
- u8 bio_irp_pending;
- struct timer_list io_timer;
- u8 bio_irp_timeout;
- u8 bio_timer_cancel;
-};
-
int netdev_open(struct net_device *pnetdev);
int netdev_close(struct net_device *pnetdev);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_cmd.h b/drivers/staging/r8188eu/include/rtl8188e_cmd.h
index 1e01c1662f9a..c785cf8ed683 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_cmd.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_cmd.h
@@ -85,6 +85,6 @@ void rtl8188e_Add_RateATid(struct adapter *padapter, u32 bitmap, u8 arg,
void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state);
void CheckFwRsvdPageContent(struct adapter *adapt);
-void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt);
+void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, u16 mstatus_rpt);
#endif/* __RTL8188E_CMD_H__ */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h
index 3fa3b3e5dd64..25b31417cd58 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_spec.h
@@ -510,28 +510,7 @@ Default: 00b.
/* 8192C BW_OPMODE bits (Offset 0x203, 8bit) */
#define BW_OPMODE_20MHZ BIT(2)
-/* 8192C CAM Config Setting (offset 0x250, 1 byte) */
-#define CAM_VALID BIT(15)
-#define CAM_NOTVALID 0x0000
-#define CAM_USEDK BIT(5)
-
-#define CAM_CONTENT_COUNT 8
-
-#define CAM_NONE 0x0
-#define CAM_WEP40 0x01
-#define CAM_TKIP 0x02
-#define CAM_AES 0x04
-#define CAM_WEP104 0x05
-#define CAM_SMS4 0x6
-
-#define TOTAL_CAM_ENTRY 32
-#define HALF_CAM_ENTRY 16
-
-#define CAM_CONFIG_USEDK true
-#define CAM_CONFIG_NO_USEDK false
-
#define CAM_WRITE BIT(16)
-#define CAM_READ 0x00000000
#define CAM_POLLINIG BIT(31)
#define SCR_UseDK 0x01
diff --git a/drivers/staging/r8188eu/include/rtl8188e_xmit.h b/drivers/staging/r8188eu/include/rtl8188e_xmit.h
index 6db7fabebea9..a023dd792da7 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_xmit.h
@@ -5,17 +5,7 @@
#define __RTL8188E_XMIT_H__
#define MAX_TX_AGG_PACKET_NUMBER 0xFF
-/* */
-/* Queue Select Value in TxDesc */
-/* */
-#define QSLT_BK 0x2/* 0x01 */
-#define QSLT_BE 0x0
-#define QSLT_VI 0x5/* 0x4 */
-#define QSLT_VO 0x7/* 0x6 */
-#define QSLT_BEACON 0x10
-#define QSLT_HIGH 0x11
#define QSLT_MGNT 0x12
-#define QSLT_CMD 0x13
/* For 88e early mode */
#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
@@ -131,14 +121,10 @@ struct txrpt_ccx_88e {
void rtl8188e_fill_fake_txdesc(struct adapter *padapter, u8 *pDesc,
u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull);
-s32 rtl8188eu_init_xmit_priv(struct adapter *padapter);
s32 rtl8188eu_hal_xmit(struct adapter *padapter, struct xmit_frame *frame);
s32 rtl8188eu_mgnt_xmit(struct adapter *padapter, struct xmit_frame *frame);
s32 rtl8188eu_xmit_buf_handler(struct adapter *padapter);
-#define hal_xmit_handler rtl8188eu_xmit_buf_handler
void rtl8188eu_xmit_tasklet(unsigned long priv);
-bool rtl8188eu_xmitframe_complete(struct adapter *padapter,
- struct xmit_priv *pxmitpriv,
- struct xmit_buf *pxmitbuf);
+bool rtl8188eu_xmitframe_complete(struct adapter *padapter);
#endif /* __RTL8188E_XMIT_H__ */
diff --git a/drivers/staging/r8188eu/include/rtw_cmd.h b/drivers/staging/r8188eu/include/rtw_cmd.h
index c330a4435b31..e8eecd52d1d8 100644
--- a/drivers/staging/r8188eu/include/rtw_cmd.h
+++ b/drivers/staging/r8188eu/include/rtw_cmd.h
@@ -32,7 +32,6 @@ struct cmd_priv {
struct completion start_cmd_thread;
struct completion stop_cmd_thread;
struct __queue cmd_queue;
- u8 cmd_seq;
u8 *cmd_buf; /* shall be non-paged, and 4 bytes aligned */
u8 *cmd_allocated_buf;
u8 *rsp_buf; /* shall be non-paged, and 4 bytes aligned */
@@ -744,7 +743,7 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 minRptTime);
u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue);
u8 rtw_ps_cmd(struct adapter *padapter);
-u8 rtw_chk_hi_queue_cmd(struct adapter *padapter);
+void rtw_chk_hi_queue_cmd(struct adapter *padapter);
u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan);
diff --git a/drivers/staging/r8188eu/include/rtw_io.h b/drivers/staging/r8188eu/include/rtw_io.h
index e9744694204b..e1718f739cc9 100644
--- a/drivers/staging/r8188eu/include/rtw_io.h
+++ b/drivers/staging/r8188eu/include/rtw_io.h
@@ -16,215 +16,10 @@
#include <linux/usb.h>
#include <linux/usb/ch9.h>
-#define rtw_usb_buffer_alloc(dev, size, dma) \
- usb_alloc_coherent((dev), (size), (in_interrupt() ? \
- GFP_ATOMIC : GFP_KERNEL), (dma))
-#define rtw_usb_buffer_free(dev, size, addr, dma) \
- usb_free_coherent((dev), (size), (addr), (dma))
-
-#define NUM_IOREQ 8
-
-#define MAX_PROT_SZ (64-16)
-
-#define _IOREADY 0
-#define _IO_WAIT_COMPLETE 1
-#define _IO_WAIT_RSP 2
-
-/* IO COMMAND TYPE */
-#define _IOSZ_MASK_ (0x7F)
-#define _IO_WRITE_ BIT(7)
-#define _IO_FIXED_ BIT(8)
-#define _IO_BURST_ BIT(9)
-#define _IO_BYTE_ BIT(10)
-#define _IO_HW_ BIT(11)
-#define _IO_WORD_ BIT(12)
-#define _IO_SYNC_ BIT(13)
-#define _IO_CMDMASK_ (0x1F80)
-
-/*
- For prompt mode accessing, caller shall free io_req
- Otherwise, io_handler will free io_req
-*/
-
-/* IO STATUS TYPE */
-#define _IO_ERR_ BIT(2)
-#define _IO_SUCCESS_ BIT(1)
-#define _IO_DONE_ BIT(0)
-
-#define IO_RD32 (_IO_SYNC_ | _IO_WORD_)
-#define IO_RD16 (_IO_SYNC_ | _IO_HW_)
-#define IO_RD8 (_IO_SYNC_ | _IO_BYTE_)
-
-#define IO_RD32_ASYNC (_IO_WORD_)
-#define IO_RD16_ASYNC (_IO_HW_)
-#define IO_RD8_ASYNC (_IO_BYTE_)
-
-#define IO_WR32 (_IO_WRITE_ | _IO_SYNC_ | _IO_WORD_)
-#define IO_WR16 (_IO_WRITE_ | _IO_SYNC_ | _IO_HW_)
-#define IO_WR8 (_IO_WRITE_ | _IO_SYNC_ | _IO_BYTE_)
-
-#define IO_WR32_ASYNC (_IO_WRITE_ | _IO_WORD_)
-#define IO_WR16_ASYNC (_IO_WRITE_ | _IO_HW_)
-#define IO_WR8_ASYNC (_IO_WRITE_ | _IO_BYTE_)
-
-/*
- Only Sync. burst accessing is provided.
-*/
-
-#define IO_WR_BURST(x) \
- (_IO_WRITE_ | _IO_SYNC_ | _IO_BURST_ | ((x) & _IOSZ_MASK_))
-#define IO_RD_BURST(x) \
- (_IO_SYNC_ | _IO_BURST_ | ((x) & _IOSZ_MASK_))
-
-/* below is for the intf_option bit defition... */
-
-#define _INTF_ASYNC_ BIT(0) /* support async io */
-
-struct intf_priv;
-struct intf_hdl;
-struct io_queue;
-
-struct io_req {
- struct list_head list;
- u32 addr;
- u32 val;
- u32 command;
- u32 status;
- u8 *pbuf;
- struct semaphore sema;
-
- void (*_async_io_callback)(struct adapter *padater,
- struct io_req *pio_req, u8 *cnxt);
- u8 *cnxt;
-};
-
-struct intf_hdl {
- struct adapter *padapter;
- struct dvobj_priv *pintf_dev;
-};
-
-struct reg_protocol_rd {
-#ifdef __LITTLE_ENDIAN
- /* DW1 */
- u32 NumOfTrans:4;
- u32 Reserved1:4;
- u32 Reserved2:24;
- /* DW2 */
- u32 ByteCount:7;
- u32 WriteEnable:1; /* 0:read, 1:write */
- u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
- u32 BurstMode:1;
- u32 Byte1Access:1;
- u32 Byte2Access:1;
- u32 Byte4Access:1;
- u32 Reserved3:3;
- u32 Reserved4:16;
- /* DW3 */
- u32 BusAddress;
- /* DW4 */
- /* u32 Value; */
-#else
-/* DW1 */
- u32 Reserved1:4;
- u32 NumOfTrans:4;
- u32 Reserved2:24;
- /* DW2 */
- u32 WriteEnable:1;
- u32 ByteCount:7;
- u32 Reserved3:3;
- u32 Byte4Access:1;
-
- u32 Byte2Access:1;
- u32 Byte1Access:1;
- u32 BurstMode:1;
- u32 FixOrContinuous:1;
- u32 Reserved4:16;
- /* DW3 */
- u32 BusAddress;
-
- /* DW4 */
-#endif
-};
-
-struct reg_protocol_wt {
-#ifdef __LITTLE_ENDIAN
- /* DW1 */
- u32 NumOfTrans:4;
- u32 Reserved1:4;
- u32 Reserved2:24;
- /* DW2 */
- u32 ByteCount:7;
- u32 WriteEnable:1; /* 0:read, 1:write */
- u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
- u32 BurstMode:1;
- u32 Byte1Access:1;
- u32 Byte2Access:1;
- u32 Byte4Access:1;
- u32 Reserved3:3;
- u32 Reserved4:16;
- /* DW3 */
- u32 BusAddress;
- /* DW4 */
- u32 Value;
-#else
- /* DW1 */
- u32 Reserved1 :4;
- u32 NumOfTrans:4;
- u32 Reserved2:24;
- /* DW2 */
- u32 WriteEnable:1;
- u32 ByteCount:7;
- u32 Reserved3:3;
- u32 Byte4Access:1;
- u32 Byte2Access:1;
- u32 Byte1Access:1;
- u32 BurstMode:1;
- u32 FixOrContinuous:1;
- u32 Reserved4:16;
- /* DW3 */
- u32 BusAddress;
- /* DW4 */
- u32 Value;
-#endif
-};
-
-/*
-Below is the data structure used by _io_handler
-*/
-
-struct io_queue {
- spinlock_t lock;
- struct list_head free_ioreqs;
- struct list_head pending; /* The io_req list that will be served
- * in the single protocol read/write.*/
- struct list_head processing;
- u8 *free_ioreqs_buf; /* 4-byte aligned */
- u8 *pallocated_free_ioreqs_buf;
- struct intf_hdl intf;
-};
-
-struct io_priv {
- struct adapter *padapter;
- struct intf_hdl intf;
-};
-
-uint ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-void sync_ioreq_enqueue(struct io_req *preq, struct io_queue *ioqueue);
-uint sync_ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
-uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
-struct io_req *alloc_ioreq(struct io_queue *pio_q);
-
-uint register_intf_hdl(u8 *dev, struct intf_hdl *pintfhdl);
-void unregister_intf_hdl(struct intf_hdl *pintfhdl);
-
-void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-
int __must_check rtw_read8(struct adapter *adapter, u32 addr, u8 *data);
int __must_check rtw_read16(struct adapter *adapter, u32 addr, u16 *data);
int __must_check rtw_read32(struct adapter *adapter, u32 addr, u32 *data);
-void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-u32 rtw_read_port(struct adapter *adapter, u8 *pmem);
+int rtw_read_port(struct adapter *adapter, struct recv_buf *precvbuf);
void rtw_read_port_cancel(struct adapter *adapter);
int rtw_write8(struct adapter *adapter, u32 addr, u8 val);
@@ -232,57 +27,7 @@ int rtw_write16(struct adapter *adapter, u32 addr, u16 val);
int rtw_write32(struct adapter *adapter, u32 addr, u32 val);
int rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *pdata);
-void _rtw_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
u32 rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
void rtw_write_port_cancel(struct adapter *adapter);
-void rtw_write_scsi(struct adapter *adapter, u32 cnt, u8 *pmem);
-
-/* ioreq */
-void ioreq_read8(struct adapter *adapter, u32 addr, u8 *pval);
-void ioreq_read16(struct adapter *adapter, u32 addr, u16 *pval);
-void ioreq_read32(struct adapter *adapter, u32 addr, u32 *pval);
-void ioreq_write8(struct adapter *adapter, u32 addr, u8 val);
-void ioreq_write16(struct adapter *adapter, u32 addr, u16 val);
-void ioreq_write32(struct adapter *adapter, u32 addr, u32 val);
-
-uint async_read8(struct adapter *adapter, u32 addr, u8 *pbuff,
- void (*_async_io_callback)(struct adapter *padater,
- struct io_req *pio_req,
- u8 *cnxt), u8 *cnxt);
-uint async_read16(struct adapter *adapter, u32 addr, u8 *pbuff,
- void (*_async_io_callback)(struct adapter *padater,
- struct io_req *pio_req,
- u8 *cnxt), u8 *cnxt);
-uint async_read32(struct adapter *adapter, u32 addr, u8 *pbuff,
- void (*_async_io_callback)(struct adapter *padater,
- struct io_req *pio_req,
- u8 *cnxt), u8 *cnxt);
-
-void async_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-void async_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-
-void async_write8(struct adapter *adapter, u32 addr, u8 val,
- void (*_async_io_callback)(struct adapter *padater,
- struct io_req *pio_req,
- u8 *cnxt), u8 *cnxt);
-void async_write16(struct adapter *adapter, u32 addr, u16 val,
- void (*_async_io_callback)(struct adapter *padater,
- struct io_req *pio_req,
- u8 *cnxt), u8 *cnxt);
-void async_write32(struct adapter *adapter, u32 addr, u32 val,
- void (*_async_io_callback)(struct adapter *padater,
- struct io_req *pio_req,
- u8 *cnxt), u8 *cnxt);
-
-void async_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-void async_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-
-uint alloc_io_queue(struct adapter *adapter);
-void free_io_queue(struct adapter *adapter);
-void async_bus_io(struct io_queue *pio_q);
-void bus_sync_io(struct io_queue *pio_q);
-u32 _ioreq2rwmem(struct io_queue *pio_q);
-void dev_power_down(struct adapter *Adapter, u8 bpwrup);
-
#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_pwrctrl.h b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
index 6e9fdd66fad1..9f5cffd8bfb1 100644
--- a/drivers/staging/r8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
@@ -71,7 +71,6 @@ struct pwrctrl_priv {
int pwr_state_check_interval;
enum rt_rf_power_state rf_pwrstate;/* cur power state */
- enum rt_rf_power_state change_rfpwrstate;
u8 bkeepfwalive;
};
@@ -99,8 +98,6 @@ void rtw_set_firmware_ps_mode(struct adapter *adapter, u8 mode);
void rtw_set_ps_mode(struct adapter *adapter, u8 ps_mode, u8 smart_ps,
u8 bcn_ant_mode);
void LeaveAllPowerSaveMode(struct adapter *adapter);
-void ips_enter(struct adapter *padapter);
-int ips_leave(struct adapter *padapter);
void rtw_ps_processor(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/rtw_xmit.h b/drivers/staging/r8188eu/include/rtw_xmit.h
index 6e7ebea5362d..feeac85aedb0 100644
--- a/drivers/staging/r8188eu/include/rtw_xmit.h
+++ b/drivers/staging/r8188eu/include/rtw_xmit.h
@@ -97,7 +97,7 @@ union txdesc {
};
struct hw_xmit {
- struct __queue *sta_queue;
+ struct list_head *sta_list;
int accnt;
};
@@ -152,14 +152,7 @@ struct pkt_attrib {
#define NULL_FRAMETAG (0x0)
#define DATA_FRAMETAG 0x01
-#define L2_FRAMETAG 0x02
#define MGNT_FRAMETAG 0x03
-#define AMSDU_FRAMETAG 0x04
-
-#define EII_FRAMETAG 0x05
-#define IEEE8023_FRAMETAG 0x06
-
-#define MP_FRAMETAG 0x07
#define TXAGG_FRAMETAG 0x08
@@ -196,14 +189,11 @@ struct xmit_buf {
u8 *pbuf;
void *priv_data;
u16 ext_tag; /* 0: Normal xmitbuf, 1: extension xmitbuf. */
- u16 flags;
+ bool high_queue;
u32 alloc_sz;
u32 len;
struct submit_ctx *sctx;
- u32 ff_hwaddr;
struct urb *pxmit_urb;
- dma_addr_t dma_transfer_addr; /* (in) dma addr for transfer_buffer */
- u8 bpending[8];
int last[8];
};
@@ -223,21 +213,16 @@ struct xmit_frame {
struct tx_servq {
struct list_head tx_pending;
- struct __queue sta_pending;
+ struct list_head sta_pending;
int qcnt;
};
struct sta_xmit_priv {
spinlock_t lock;
- int option;
- int apsd_setting; /* When bit mask is on, the associated edca
- * queue supports APSD. */
struct tx_servq be_q; /* priority == 0,3 */
struct tx_servq bk_q; /* priority == 1,2 */
struct tx_servq vi_q; /* priority == 4,5 */
struct tx_servq vo_q; /* priority == 6,7 */
- struct list_head legacy_dz;
- struct list_head apsd;
u16 txseq_tid[16];
};
@@ -252,46 +237,28 @@ struct hw_txqueue {
int ac_tag;
};
-struct agg_pkt_info {
- u16 offset;
- u16 pkt_len;
-};
-
struct xmit_priv {
spinlock_t lock;
- struct semaphore terminate_xmitthread_sema;
- struct __queue be_pending;
- struct __queue bk_pending;
- struct __queue vi_pending;
- struct __queue vo_pending;
- struct __queue bm_pending;
+ struct list_head be_pending;
+ struct list_head bk_pending;
+ struct list_head vi_pending;
+ struct list_head vo_pending;
u8 *pallocated_frame_buf;
u8 *pxmit_frame_buf;
uint free_xmitframe_cnt;
struct __queue free_xmit_queue;
uint frag_len;
struct adapter *adapter;
- u8 vcs_setting;
- u8 vcs;
- u8 vcs_type;
u64 tx_bytes;
u64 tx_pkts;
u64 tx_drop;
u64 last_tx_bytes;
u64 last_tx_pkts;
struct hw_xmit *hwxmits;
- u8 hwxmit_entry;
u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength
* from large to small. it's value is 0->vo,
* 1->vi, 2->be, 3->bk. */
- struct semaphore tx_retevt;/* all tx return event; */
- u8 txirp_cnt;/* */
struct tasklet_struct xmit_tasklet;
- /* per AC pending irp */
- int beq_cnt;
- int bkq_cnt;
- int viq_cnt;
- int voq_cnt;
struct __queue free_xmitbuf_queue;
struct __queue pending_xmitbuf_queue;
u8 *pallocated_xmitbuf;
@@ -324,7 +291,6 @@ s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv,
struct xmit_buf *pxmitbuf);
void rtw_count_tx_stats(struct adapter *padapter,
struct xmit_frame *pxmitframe, int sz);
-void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len);
s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr,
struct pkt_attrib *pattrib);
s32 rtw_put_snap(u8 *data, u16 h_proto);
@@ -332,14 +298,11 @@ s32 rtw_put_snap(u8 *data, u16 h_proto);
struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv);
s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv,
struct xmit_frame *pxmitframe);
-void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv,
- struct __queue *pframequeue);
+void rtw_free_xmitframe_list(struct xmit_priv *pxmitpriv, struct list_head *xframe_list);
struct tx_servq *rtw_get_sta_pending(struct adapter *padapter,
struct sta_info *psta, int up, u8 *ac);
-s32 rtw_xmitframe_enqueue(struct adapter *padapter,
- struct xmit_frame *pxmitframe);
struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv,
- struct hw_xmit *phwxmit_i, int entry);
+ struct hw_xmit *phwxmit_i);
s32 rtw_xmit_classifier(struct adapter *padapter,
struct xmit_frame *pxmitframe);
@@ -350,11 +313,9 @@ void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv);
s32 rtw_txframes_pending(struct adapter *padapter);
s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
struct pkt_attrib *pattrib);
-void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
int _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
int rtw_alloc_hwxmits(struct adapter *padapter);
-void rtw_free_hwxmits(struct adapter *padapter);
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe);
diff --git a/drivers/staging/r8188eu/include/usb_ops.h b/drivers/staging/r8188eu/include/usb_ops.h
index ddc46cb44358..5bd8ce37aebf 100644
--- a/drivers/staging/r8188eu/include/usb_ops.h
+++ b/drivers/staging/r8188eu/include/usb_ops.h
@@ -17,8 +17,6 @@
#define MAX_VENDOR_REQ_CMD_SIZE 254 /* 8188cu SIE Support */
#define MAX_USB_IO_CTL_SIZE (MAX_VENDOR_REQ_CMD_SIZE + ALIGNMENT_UNIT)
-#include "usb_ops_linux.h"
-
/*
* Increase and check if the continual_urb_error of this @param dvobjprivei
* is larger than MAX_CONTINUAL_URB_ERR
diff --git a/drivers/staging/r8188eu/include/usb_ops_linux.h b/drivers/staging/r8188eu/include/usb_ops_linux.h
deleted file mode 100644
index 966688eedf66..000000000000
--- a/drivers/staging/r8188eu/include/usb_ops_linux.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __USB_OPS_LINUX_H__
-#define __USB_OPS_LINUX_H__
-
-#define VENDOR_CMD_MAX_DATA_LEN 254
-
-#define RTW_USB_CONTROL_MSG_TIMEOUT_TEST 10/* ms */
-#define RTW_USB_CONTROL_MSG_TIMEOUT 500/* ms */
-
-#define MAX_USBCTRL_VENDORREQ_TIMES 10
-
-#define RTW_USB_BULKOUT_TIME 5000/* ms */
-
-#define _usbctrl_vendorreq_async_callback(urb, regs) \
- _usbctrl_vendorreq_async_callback(urb)
-#define usb_bulkout_zero_complete(purb, regs) \
- usb_bulkout_zero_complete(purb)
-#define usb_write_mem_complete(purb, regs) \
- usb_write_mem_complete(purb)
-#define usb_write_port_complete(purb, regs) \
- usb_write_port_complete(purb)
-#define usb_read_port_complete(purb, regs) \
- usb_read_port_complete(purb)
-#define usb_read_interrupt_complete(purb, regs) \
- usb_read_interrupt_complete(purb)
-
-#endif
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 8e9b7b0664bc..e0a819970546 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -3061,8 +3061,7 @@ static int rtw_rereg_nd_name(struct net_device *dev,
char *reg_ifname;
reg_ifname = padapter->registrypriv.if2name;
- strncpy(rereg_priv->old_ifname, reg_ifname, IFNAMSIZ);
- rereg_priv->old_ifname[IFNAMSIZ - 1] = 0;
+ strscpy(rereg_priv->old_ifname, reg_ifname, IFNAMSIZ);
}
if (wrqu->data.length > IFNAMSIZ)
@@ -3084,8 +3083,7 @@ static int rtw_rereg_nd_name(struct net_device *dev,
rtw_ips_mode_req(&padapter->pwrctrlpriv, rereg_priv->old_ips_mode);
}
- strncpy(rereg_priv->old_ifname, new_ifname, IFNAMSIZ);
- rereg_priv->old_ifname[IFNAMSIZ - 1] = 0;
+ strscpy(rereg_priv->old_ifname, new_ifname, IFNAMSIZ);
if (!memcmp(new_ifname, "disable%d", 9)) {
/* free network queue for Android's timming issue */
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index 2f59bb994796..dc419fd1ffa5 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -392,9 +392,6 @@ static void rtw_init_default_value(struct adapter *padapter)
struct security_priv *psecuritypriv = &padapter->securitypriv;
/* xmit_priv */
- pxmitpriv->vcs_setting = pregistrypriv->vrtl_carrier_sense;
- pxmitpriv->vcs = pregistrypriv->vcs_type;
- pxmitpriv->vcs_type = pregistrypriv->vcs_type;
pxmitpriv->frag_len = pregistrypriv->frag_thresh;
/* mlme_priv */
@@ -485,7 +482,7 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
goto free_mlme_ext;
}
- if (_rtw_init_recv_priv(&padapter->recvpriv, padapter) == _FAIL) {
+ if (_rtw_init_recv_priv(&padapter->recvpriv, padapter)) {
dev_err(dvobj_to_dev(padapter->dvobj), "_rtw_init_recv_priv failed\n");
goto free_xmit_priv;
}
@@ -634,8 +631,8 @@ static int _netdev_open(struct net_device *pnetdev)
pr_info("can't init mlme_ext_priv\n");
goto netdev_open_error;
}
- if (padapter->intf_start)
- padapter->intf_start(padapter);
+ if (rtl8188eu_inirp_init(padapter))
+ goto netdev_open_error;
rtw_led_control(padapter, LED_CTL_NO_LINK);
@@ -687,8 +684,8 @@ static int ips_netdrv_open(struct adapter *padapter)
if (status == _FAIL)
goto netdev_open_error;
- if (padapter->intf_start)
- padapter->intf_start(padapter);
+ if (rtl8188eu_inirp_init(padapter))
+ goto netdev_open_error;
rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
_set_timer(&padapter->mlmepriv.dynamic_chk_timer, 5000);
@@ -764,8 +761,8 @@ void rtw_ips_dev_unload(struct adapter *padapter)
{
rtw_fifo_cleanup(padapter);
- if (padapter->intf_stop)
- padapter->intf_stop(padapter);
+ rtw_read_port_cancel(padapter);
+ rtw_write_port_cancel(padapter);
/* s5. */
if (!padapter->bSurpriseRemoved)
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index 5fbfbcd95de2..74a16d1757ce 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -152,22 +152,6 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
}
-static void usb_intf_start(struct adapter *padapter)
-{
- rtl8188eu_inirp_init(padapter);
-}
-
-static void usb_intf_stop(struct adapter *padapter)
-{
- /* cancel in irp */
- rtw_read_port_cancel(padapter);
-
- /* cancel out irp */
- rtw_write_port_cancel(padapter);
-
- /* todo:cancel other irps */
-}
-
static void rtw_dev_unload(struct adapter *padapter)
{
if (padapter->bup) {
@@ -175,8 +159,9 @@ static void rtw_dev_unload(struct adapter *padapter)
if (padapter->xmitpriv.ack_tx)
rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
/* s3. */
- if (padapter->intf_stop)
- padapter->intf_stop(padapter);
+ rtw_read_port_cancel(padapter);
+ rtw_write_port_cancel(padapter);
+
/* s4. */
rtw_stop_drv_threads(padapter);
@@ -290,8 +275,6 @@ static int rtw_usb_if1_init(struct dvobj_priv *dvobj, struct usb_interface *pusb
{
struct adapter *padapter = NULL;
struct net_device *pnetdev = NULL;
- struct io_priv *piopriv;
- struct intf_hdl *pintf;
int ret;
padapter = vzalloc(sizeof(*padapter));
@@ -315,16 +298,6 @@ static int rtw_usb_if1_init(struct dvobj_priv *dvobj, struct usb_interface *pusb
SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
padapter = rtw_netdev_priv(pnetdev);
- padapter->intf_start = &usb_intf_start;
- padapter->intf_stop = &usb_intf_stop;
-
- /* step init_io_priv */
- piopriv = &padapter->iopriv;
- pintf = &piopriv->intf;
- piopriv->padapter = padapter;
- pintf->padapter = padapter;
- pintf->pintf_dev = adapter_to_dvobj(padapter);
-
/* step read_chip_version */
rtl8188e_read_chip_version(padapter);
diff --git a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
index 220e592b757c..ca09f7ed7e4d 100644
--- a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
@@ -4,7 +4,6 @@
#define _USB_OPS_LINUX_C_
#include "../include/drv_types.h"
-#include "../include/usb_ops_linux.h"
#include "../include/rtl8188e_recv.h"
static unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr)
@@ -29,68 +28,40 @@ void rtw_read_port_cancel(struct adapter *padapter)
for (i = 0; i < NR_RECVBUFF; i++) {
precvbuf->reuse = true;
- if (precvbuf->purb)
- usb_kill_urb(precvbuf->purb);
+ usb_kill_urb(precvbuf->purb);
precvbuf++;
}
}
-static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
+static void usb_write_port_complete(struct urb *purb)
{
struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context;
- struct adapter *padapter = pxmitbuf->padapter;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct adapter *padapter = pxmitbuf->padapter;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- switch (pxmitbuf->flags) {
- case VO_QUEUE_INX:
- pxmitpriv->voq_cnt--;
- break;
- case VI_QUEUE_INX:
- pxmitpriv->viq_cnt--;
- break;
- case BE_QUEUE_INX:
- pxmitpriv->beq_cnt--;
- break;
- case BK_QUEUE_INX:
- pxmitpriv->bkq_cnt--;
- break;
- case HIGH_QUEUE_INX:
+ if (pxmitbuf->high_queue)
rtw_chk_hi_queue_cmd(padapter);
+
+ switch (purb->status) {
+ case 0:
+ case -EINPROGRESS:
+ case -ENOENT:
+ case -ECONNRESET:
+ case -EPIPE:
+ case -EPROTO:
+ break;
+ case -ESHUTDOWN:
+ padapter->bDriverStopped = true;
break;
default:
+ padapter->bSurpriseRemoved = true;
break;
}
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped ||
- padapter->bWritePortCancel)
- goto check_completion;
-
- if (purb->status) {
- if (purb->status == -EINPROGRESS) {
- goto check_completion;
- } else if (purb->status == -ENOENT) {
- goto check_completion;
- } else if (purb->status == -ECONNRESET) {
- goto check_completion;
- } else if (purb->status == -ESHUTDOWN) {
- padapter->bDriverStopped = true;
- goto check_completion;
- } else if ((purb->status != -EPIPE) && (purb->status != -EPROTO)) {
- padapter->bSurpriseRemoved = true;
-
- goto check_completion;
- }
- }
-
-check_completion:
rtw_sctx_done_err(&pxmitbuf->sctx,
- purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR :
- RTW_SCTX_DONE_SUCCESS);
-
+ purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR : RTW_SCTX_DONE_SUCCESS);
rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
-
tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
-
}
u32 rtw_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
@@ -112,32 +83,7 @@ u32 rtw_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
}
spin_lock_irqsave(&pxmitpriv->lock, irqL);
-
- switch (addr) {
- case VO_QUEUE_INX:
- pxmitpriv->voq_cnt++;
- pxmitbuf->flags = VO_QUEUE_INX;
- break;
- case VI_QUEUE_INX:
- pxmitpriv->viq_cnt++;
- pxmitbuf->flags = VI_QUEUE_INX;
- break;
- case BE_QUEUE_INX:
- pxmitpriv->beq_cnt++;
- pxmitbuf->flags = BE_QUEUE_INX;
- break;
- case BK_QUEUE_INX:
- pxmitpriv->bkq_cnt++;
- pxmitbuf->flags = BK_QUEUE_INX;
- break;
- case HIGH_QUEUE_INX:
- pxmitbuf->flags = HIGH_QUEUE_INX;
- break;
- default:
- pxmitbuf->flags = MGT_QUEUE_INX;
- break;
- }
-
+ pxmitbuf->high_queue = (addr == HIGH_QUEUE_INX);
spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
purb = pxmitbuf->pxmit_urb;
@@ -154,14 +100,8 @@ u32 rtw_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
status = usb_submit_urb(purb, GFP_ATOMIC);
if (status) {
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
-
- switch (status) {
- case -ENODEV:
+ if (status == -ENODEV)
padapter->bDriverStopped = true;
- break;
- default:
- break;
- }
goto exit;
}
@@ -184,15 +124,13 @@ void rtw_write_port_cancel(struct adapter *padapter)
padapter->bWritePortCancel = true;
for (i = 0; i < NR_XMITBUFF; i++) {
- if (pxmitbuf->pxmit_urb)
- usb_kill_urb(pxmitbuf->pxmit_urb);
+ usb_kill_urb(pxmitbuf->pxmit_urb);
pxmitbuf++;
}
pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmit_extbuf;
for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
- if (pxmitbuf->pxmit_urb)
- usb_kill_urb(pxmitbuf->pxmit_urb);
+ usb_kill_urb(pxmitbuf->pxmit_urb);
pxmitbuf++;
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index ab2e9b729883..73a86e1d0701 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
@@ -21,7 +21,7 @@ void rtl92e_set_bandwidth(struct net_device *dev,
return;
}
- for (eRFPath = 0; eRFPath < priv->NumTotalRFPath; eRFPath++) {
+ for (eRFPath = 0; eRFPath < priv->num_total_rf_path; eRFPath++) {
if (!rtl92e_is_legal_rf_path(dev, eRFPath))
continue;
@@ -63,14 +63,14 @@ bool rtl92e_config_rf(struct net_device *dev)
u8 ConstRetryTimes = 5, RetryTimes = 5;
u8 ret = 0;
- priv->NumTotalRFPath = RTL819X_TOTAL_RF_PATH;
+ priv->num_total_rf_path = RTL819X_TOTAL_RF_PATH;
for (eRFPath = (enum rf90_radio_path)RF90_PATH_A;
- eRFPath < priv->NumTotalRFPath; eRFPath++) {
+ eRFPath < priv->num_total_rf_path; eRFPath++) {
if (!rtl92e_is_legal_rf_path(dev, eRFPath))
continue;
- pPhyReg = &priv->PHYRegDef[eRFPath];
+ pPhyReg = &priv->phy_reg_def[eRFPath];
switch (eRFPath) {
case RF90_PATH_A:
@@ -150,11 +150,11 @@ void rtl92e_set_cck_tx_power(struct net_device *dev, u8 powerlevel)
struct r8192_priv *priv = rtllib_priv(dev);
TxAGC = powerlevel;
- if (priv->bDynamicTxLowPower) {
- if (priv->CustomerID == RT_CID_819x_Netcore)
+ if (priv->dynamic_tx_low_pwr) {
+ if (priv->customer_id == RT_CID_819X_NETCORE)
TxAGC = 0x22;
else
- TxAGC += priv->CckPwEnl;
+ TxAGC += priv->cck_pwr_enl;
}
if (TxAGC > 0x24)
TxAGC = 0x24;
@@ -169,7 +169,7 @@ void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
u16 RegOffset[6] = {0xe00, 0xe04, 0xe10, 0xe14, 0xe18, 0xe1c};
u8 byte0, byte1, byte2, byte3;
- powerBase0 = powerlevel + priv->LegacyHTTxPowerDiff;
+ powerBase0 = powerlevel + priv->legacy_ht_tx_pwr_diff;
powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
(powerBase0 << 8) | powerBase0;
powerBase1 = powerlevel;
@@ -177,7 +177,7 @@ void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
(powerBase1 << 8) | powerBase1;
for (index = 0; index < 6; index++) {
- writeVal = (u32)(priv->MCSTxPowerLevelOriginalOffset[index] +
+ writeVal = (u32)(priv->mcs_tx_pwr_level_org_offset[index] +
((index < 2) ? powerBase0 : powerBase1));
byte0 = writeVal & 0x7f;
byte1 = (writeVal & 0x7f00) >> 8;
@@ -195,10 +195,10 @@ void rtl92e_set_ofdm_tx_power(struct net_device *dev, u8 powerlevel)
if (index == 3) {
writeVal_tmp = (byte3 << 24) | (byte2 << 16) |
(byte1 << 8) | byte0;
- priv->Pwr_Track = writeVal_tmp;
+ priv->pwr_track = writeVal_tmp;
}
- if (priv->bDynamicTxHighPower)
+ if (priv->dynamic_tx_high_pwr)
writeVal = 0x03030303;
else
writeVal = (byte3 << 24) | (byte2 << 16) |
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index 8bf06f736ffb..eba8364d0ff2 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
@@ -76,7 +76,7 @@ bool rtl92e_send_cmd_pkt(struct net_device *dev, u32 type, const void *data,
} while (frag_offset < len);
- rtl92e_writeb(dev, TPPoll, TPPoll_CQ);
+ rtl92e_writeb(dev, TP_POLL, TP_POLL_CQ);
Failed:
return rt_status;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index f02e67f68e23..0b5b2ae27f9e 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -54,22 +54,16 @@ static void _rtl92e_update_msr(struct net_device *dev)
switch (priv->rtllib->iw_mode) {
case IW_MODE_INFRA:
if (priv->rtllib->state == RTLLIB_LINKED)
- msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
- else
- msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
+ msr |= MSR_LINK_MANAGED;
LedAction = LED_CTL_LINK;
break;
case IW_MODE_ADHOC:
if (priv->rtllib->state == RTLLIB_LINKED)
- msr |= (MSR_LINK_ADHOC << MSR_LINK_SHIFT);
- else
- msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
+ msr |= MSR_LINK_ADHOC;
break;
case IW_MODE_MASTER:
if (priv->rtllib->state == RTLLIB_LINKED)
- msr |= (MSR_LINK_MASTER << MSR_LINK_SHIFT);
- else
- msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
+ msr |= MSR_LINK_MASTER;
break;
default:
break;
@@ -127,7 +121,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
Type = val[0];
RegRCR = rtl92e_readl(dev, RCR);
- priv->ReceiveConfig = RegRCR;
+ priv->receive_config = RegRCR;
if (Type)
RegRCR |= (RCR_CBSSID);
@@ -135,7 +129,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
RegRCR &= (~RCR_CBSSID);
rtl92e_writel(dev, RCR, RegRCR);
- priv->ReceiveConfig = RegRCR;
+ priv->receive_config = RegRCR;
}
break;
@@ -222,41 +216,41 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
union aci_aifsn *pAciAifsn = (union aci_aifsn *)&
(qos_parameters->aifs[0]);
u8 acm = pAciAifsn->f.acm;
- u8 AcmCtrl = rtl92e_readb(dev, AcmHwCtrl);
+ u8 AcmCtrl = rtl92e_readb(dev, ACM_HW_CTRL);
if (acm) {
switch (eACI) {
case AC0_BE:
- AcmCtrl |= AcmHw_BeqEn;
+ AcmCtrl |= ACM_HW_BEQ_EN;
break;
case AC2_VI:
- AcmCtrl |= AcmHw_ViqEn;
+ AcmCtrl |= ACM_HW_VIQ_EN;
break;
case AC3_VO:
- AcmCtrl |= AcmHw_VoqEn;
+ AcmCtrl |= ACM_HW_VOQ_EN;
break;
}
} else {
switch (eACI) {
case AC0_BE:
- AcmCtrl &= (~AcmHw_BeqEn);
+ AcmCtrl &= (~ACM_HW_BEQ_EN);
break;
case AC2_VI:
- AcmCtrl &= (~AcmHw_ViqEn);
+ AcmCtrl &= (~ACM_HW_VIQ_EN);
break;
case AC3_VO:
- AcmCtrl &= (~AcmHw_BeqEn);
+ AcmCtrl &= (~ACM_HW_BEQ_EN);
break;
default:
break;
}
}
- rtl92e_writeb(dev, AcmHwCtrl, AcmCtrl);
+ rtl92e_writeb(dev, ACM_HW_CTRL, AcmCtrl);
break;
}
@@ -292,21 +286,21 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
if (EEPROMId != RTL8190_EEPROM_ID) {
netdev_err(dev, "%s(): Invalid EEPROM ID: %x\n", __func__,
EEPROMId);
- priv->AutoloadFailFlag = true;
+ priv->autoload_fail_flag = true;
} else {
- priv->AutoloadFailFlag = false;
+ priv->autoload_fail_flag = false;
}
- if (!priv->AutoloadFailFlag) {
+ if (!priv->autoload_fail_flag) {
priv->eeprom_vid = rtl92e_eeprom_read(dev, EEPROM_VID >> 1);
priv->eeprom_did = rtl92e_eeprom_read(dev, EEPROM_DID >> 1);
usValue = rtl92e_eeprom_read(dev,
(EEPROM_Customer_ID >> 1)) >> 8;
- priv->eeprom_CustomerID = usValue & 0xff;
+ priv->eeprom_customer_id = usValue & 0xff;
usValue = rtl92e_eeprom_read(dev,
EEPROM_ICVersion_ChannelPlan>>1);
- priv->eeprom_ChannelPlan = usValue&0xff;
+ priv->eeprom_chnl_plan = usValue&0xff;
IC_Version = (usValue & 0xff00)>>8;
ICVer8192 = IC_Version & 0xf;
@@ -327,11 +321,11 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->card_8192_version = VERSION_8190_BD;
priv->eeprom_vid = 0;
priv->eeprom_did = 0;
- priv->eeprom_CustomerID = 0;
- priv->eeprom_ChannelPlan = 0;
+ priv->eeprom_customer_id = 0;
+ priv->eeprom_chnl_plan = 0;
}
- if (!priv->AutoloadFailFlag) {
+ if (!priv->autoload_fail_flag) {
u8 addr[ETH_ALEN];
for (i = 0; i < 6; i += 2) {
@@ -345,125 +339,94 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
}
if (priv->card_8192_version > VERSION_8190_BD)
- priv->bTXPowerDataReadFromEEPORM = true;
+ priv->tx_pwr_data_read_from_eeprom = true;
else
- priv->bTXPowerDataReadFromEEPORM = false;
+ priv->tx_pwr_data_read_from_eeprom = false;
priv->rf_type = RTL819X_DEFAULT_RF_TYPE;
if (priv->card_8192_version > VERSION_8190_BD) {
- if (!priv->AutoloadFailFlag) {
+ if (!priv->autoload_fail_flag) {
tempval = (rtl92e_eeprom_read(dev,
(EEPROM_RFInd_PowerDiff >> 1))) & 0xff;
- priv->EEPROMLegacyHTTxPowerDiff = tempval & 0xf;
+ priv->eeprom_legacy_ht_tx_pwr_diff = tempval & 0xf;
if (tempval&0x80)
priv->rf_type = RF_1T2R;
else
priv->rf_type = RF_2T4R;
} else {
- priv->EEPROMLegacyHTTxPowerDiff = 0x04;
+ priv->eeprom_legacy_ht_tx_pwr_diff = 0x04;
}
- if (!priv->AutoloadFailFlag)
- priv->EEPROMThermalMeter = ((rtl92e_eeprom_read(dev,
+ if (!priv->autoload_fail_flag)
+ priv->eeprom_thermal_meter = ((rtl92e_eeprom_read(dev,
(EEPROM_ThermalMeter>>1))) &
0xff00) >> 8;
else
- priv->EEPROMThermalMeter = EEPROM_Default_ThermalMeter;
- priv->TSSI_13dBm = priv->EEPROMThermalMeter * 100;
+ priv->eeprom_thermal_meter = EEPROM_Default_ThermalMeter;
+ priv->tssi_13dBm = priv->eeprom_thermal_meter * 100;
if (priv->epromtype == EEPROM_93C46) {
- if (!priv->AutoloadFailFlag) {
+ if (!priv->autoload_fail_flag) {
usValue = rtl92e_eeprom_read(dev,
EEPROM_TxPwDiff_CrystalCap >> 1);
- priv->EEPROMAntPwDiff = usValue & 0x0fff;
- priv->EEPROMCrystalCap = (usValue & 0xf000)
+ priv->eeprom_ant_pwr_diff = usValue & 0x0fff;
+ priv->eeprom_crystal_cap = (usValue & 0xf000)
>> 12;
} else {
- priv->EEPROMAntPwDiff =
+ priv->eeprom_ant_pwr_diff =
EEPROM_Default_AntTxPowerDiff;
- priv->EEPROMCrystalCap =
+ priv->eeprom_crystal_cap =
EEPROM_Default_TxPwDiff_CrystalCap;
}
for (i = 0; i < 14; i += 2) {
- if (!priv->AutoloadFailFlag)
+ if (!priv->autoload_fail_flag)
usValue = rtl92e_eeprom_read(dev,
(EEPROM_TxPwIndex_CCK + i) >> 1);
else
usValue = EEPROM_Default_TxPower;
- *((u16 *)(&priv->EEPROMTxPowerLevelCCK[i])) =
+ *((u16 *)(&priv->eeprom_tx_pwr_level_cck[i])) =
usValue;
}
for (i = 0; i < 14; i += 2) {
- if (!priv->AutoloadFailFlag)
+ if (!priv->autoload_fail_flag)
usValue = rtl92e_eeprom_read(dev,
(EEPROM_TxPwIndex_OFDM_24G + i) >> 1);
else
usValue = EEPROM_Default_TxPower;
- *((u16 *)(&priv->EEPROMTxPowerLevelOFDM24G[i]))
+ *((u16 *)(&priv->eeprom_tx_pwr_level_ofdm24g[i]))
= usValue;
}
}
if (priv->epromtype == EEPROM_93C46) {
for (i = 0; i < 14; i++) {
- priv->TxPowerLevelCCK[i] =
- priv->EEPROMTxPowerLevelCCK[i];
- priv->TxPowerLevelOFDM24G[i] =
- priv->EEPROMTxPowerLevelOFDM24G[i];
+ priv->tx_pwr_level_cck[i] =
+ priv->eeprom_tx_pwr_level_cck[i];
+ priv->tx_pwr_level_ofdm_24g[i] =
+ priv->eeprom_tx_pwr_level_ofdm24g[i];
}
- priv->LegacyHTTxPowerDiff =
- priv->EEPROMLegacyHTTxPowerDiff;
- priv->AntennaTxPwDiff[0] = priv->EEPROMAntPwDiff & 0xf;
- priv->AntennaTxPwDiff[1] = (priv->EEPROMAntPwDiff &
+ priv->legacy_ht_tx_pwr_diff =
+ priv->eeprom_legacy_ht_tx_pwr_diff;
+ priv->antenna_tx_pwr_diff[0] = priv->eeprom_ant_pwr_diff & 0xf;
+ priv->antenna_tx_pwr_diff[1] = (priv->eeprom_ant_pwr_diff &
0xf0) >> 4;
- priv->AntennaTxPwDiff[2] = (priv->EEPROMAntPwDiff &
+ priv->antenna_tx_pwr_diff[2] = (priv->eeprom_ant_pwr_diff &
0xf00) >> 8;
- priv->CrystalCap = priv->EEPROMCrystalCap;
- priv->ThermalMeter[0] = priv->EEPROMThermalMeter & 0xf;
- priv->ThermalMeter[1] = (priv->EEPROMThermalMeter &
+ priv->crystal_cap = priv->eeprom_crystal_cap;
+ priv->thermal_meter[0] = priv->eeprom_thermal_meter & 0xf;
+ priv->thermal_meter[1] = (priv->eeprom_thermal_meter &
0xf0) >> 4;
} else if (priv->epromtype == EEPROM_93C56) {
-
- for (i = 0; i < 3; i++) {
- priv->TxPowerLevelCCK_A[i] =
- priv->EEPROMRfACCKChnl1TxPwLevel[0];
- priv->TxPowerLevelOFDM24G_A[i] =
- priv->EEPROMRfAOfdmChnlTxPwLevel[0];
- priv->TxPowerLevelCCK_C[i] =
- priv->EEPROMRfCCCKChnl1TxPwLevel[0];
- priv->TxPowerLevelOFDM24G_C[i] =
- priv->EEPROMRfCOfdmChnlTxPwLevel[0];
- }
- for (i = 3; i < 9; i++) {
- priv->TxPowerLevelCCK_A[i] =
- priv->EEPROMRfACCKChnl1TxPwLevel[1];
- priv->TxPowerLevelOFDM24G_A[i] =
- priv->EEPROMRfAOfdmChnlTxPwLevel[1];
- priv->TxPowerLevelCCK_C[i] =
- priv->EEPROMRfCCCKChnl1TxPwLevel[1];
- priv->TxPowerLevelOFDM24G_C[i] =
- priv->EEPROMRfCOfdmChnlTxPwLevel[1];
- }
- for (i = 9; i < 14; i++) {
- priv->TxPowerLevelCCK_A[i] =
- priv->EEPROMRfACCKChnl1TxPwLevel[2];
- priv->TxPowerLevelOFDM24G_A[i] =
- priv->EEPROMRfAOfdmChnlTxPwLevel[2];
- priv->TxPowerLevelCCK_C[i] =
- priv->EEPROMRfCCCKChnl1TxPwLevel[2];
- priv->TxPowerLevelOFDM24G_C[i] =
- priv->EEPROMRfCOfdmChnlTxPwLevel[2];
- }
- priv->LegacyHTTxPowerDiff =
- priv->EEPROMLegacyHTTxPowerDiff;
- priv->AntennaTxPwDiff[0] = 0;
- priv->AntennaTxPwDiff[1] = 0;
- priv->AntennaTxPwDiff[2] = 0;
- priv->CrystalCap = priv->EEPROMCrystalCap;
- priv->ThermalMeter[0] = priv->EEPROMThermalMeter & 0xf;
- priv->ThermalMeter[1] = (priv->EEPROMThermalMeter &
+ priv->legacy_ht_tx_pwr_diff =
+ priv->eeprom_legacy_ht_tx_pwr_diff;
+ priv->antenna_tx_pwr_diff[0] = 0;
+ priv->antenna_tx_pwr_diff[1] = 0;
+ priv->antenna_tx_pwr_diff[2] = 0;
+ priv->crystal_cap = priv->eeprom_crystal_cap;
+ priv->thermal_meter[0] = priv->eeprom_thermal_meter & 0xf;
+ priv->thermal_meter[1] = (priv->eeprom_thermal_meter &
0xf0) >> 4;
}
}
@@ -473,41 +436,41 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->rf_chip = RF_8256;
if (priv->reg_chnl_plan == 0xf)
- priv->ChannelPlan = priv->eeprom_ChannelPlan;
+ priv->chnl_plan = priv->eeprom_chnl_plan;
else
- priv->ChannelPlan = priv->reg_chnl_plan;
+ priv->chnl_plan = priv->reg_chnl_plan;
if (priv->eeprom_vid == 0x1186 && priv->eeprom_did == 0x3304)
- priv->CustomerID = RT_CID_DLINK;
+ priv->customer_id = RT_CID_DLINK;
- switch (priv->eeprom_CustomerID) {
+ switch (priv->eeprom_customer_id) {
case EEPROM_CID_DEFAULT:
- priv->CustomerID = RT_CID_DEFAULT;
+ priv->customer_id = RT_CID_DEFAULT;
break;
case EEPROM_CID_CAMEO:
- priv->CustomerID = RT_CID_819x_CAMEO;
+ priv->customer_id = RT_CID_819x_CAMEO;
break;
case EEPROM_CID_RUNTOP:
- priv->CustomerID = RT_CID_819x_RUNTOP;
+ priv->customer_id = RT_CID_819x_RUNTOP;
break;
case EEPROM_CID_NetCore:
- priv->CustomerID = RT_CID_819x_Netcore;
+ priv->customer_id = RT_CID_819X_NETCORE;
break;
case EEPROM_CID_TOSHIBA:
- priv->CustomerID = RT_CID_TOSHIBA;
- if (priv->eeprom_ChannelPlan&0x80)
- priv->ChannelPlan = priv->eeprom_ChannelPlan&0x7f;
+ priv->customer_id = RT_CID_TOSHIBA;
+ if (priv->eeprom_chnl_plan & 0x80)
+ priv->chnl_plan = priv->eeprom_chnl_plan & 0x7f;
else
- priv->ChannelPlan = 0x0;
+ priv->chnl_plan = 0x0;
break;
case EEPROM_CID_Nettronix:
- priv->CustomerID = RT_CID_Nettronix;
+ priv->customer_id = RT_CID_Nettronix;
break;
case EEPROM_CID_Pronet:
- priv->CustomerID = RT_CID_PRONET;
+ priv->customer_id = RT_CID_PRONET;
break;
case EEPROM_CID_DLINK:
- priv->CustomerID = RT_CID_DLINK;
+ priv->customer_id = RT_CID_DLINK;
break;
case EEPROM_CID_WHQL:
@@ -516,9 +479,9 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
break;
}
- if (priv->ChannelPlan > CHANNEL_PLAN_LEN - 1)
- priv->ChannelPlan = 0;
- priv->ChannelPlan = COUNTRY_CODE_WORLD_WIDE_13;
+ if (priv->chnl_plan > CHANNEL_PLAN_LEN - 1)
+ priv->chnl_plan = 0;
+ priv->chnl_plan = COUNTRY_CODE_WORLD_WIDE_13;
if (priv->eeprom_vid == 0x1186 && priv->eeprom_did == 0x3304)
priv->rtllib->bSupportRemoteWakeUp = true;
@@ -594,8 +557,8 @@ static void _rtl92e_hwconfig(struct net_device *dev)
rtl92e_writel(dev, RRSR, regRRSR);
rtl92e_writew(dev, RETRY_LIMIT,
- priv->ShortRetryLimit << RETRY_LIMIT_SHORT_SHIFT |
- priv->LongRetryLimit << RETRY_LIMIT_LONG_SHIFT);
+ priv->short_retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+ priv->long_retry_limit << RETRY_LIMIT_LONG_SHIFT);
}
bool rtl92e_start_adapter(struct net_device *dev)
@@ -614,21 +577,21 @@ bool rtl92e_start_adapter(struct net_device *dev)
start:
rtl92e_reset_desc_ring(dev);
- priv->Rf_Mode = RF_OP_By_SW_3wire;
+ priv->rf_mode = RF_OP_By_SW_3wire;
if (priv->rst_progress == RESET_TYPE_NORESET) {
rtl92e_writeb(dev, ANAPAR, 0x37);
mdelay(500);
}
- priv->pFirmware->status = FW_STATUS_0_INIT;
+ priv->fw_info->status = FW_STATUS_0_INIT;
ulRegRead = rtl92e_readl(dev, CPU_GEN);
- if (priv->pFirmware->status == FW_STATUS_0_INIT)
+ if (priv->fw_info->status == FW_STATUS_0_INIT)
ulRegRead |= CPU_GEN_SYSTEM_RESET;
- else if (priv->pFirmware->status == FW_STATUS_5_READY)
+ else if (priv->fw_info->status == FW_STATUS_5_READY)
ulRegRead |= CPU_GEN_FIRMWARE_RESET;
else
netdev_err(dev, "%s(): undefined firmware state: %d.\n",
- __func__, priv->pFirmware->status);
+ __func__, priv->fw_info->status);
rtl92e_writel(dev, CPU_GEN, ulRegRead);
@@ -647,13 +610,13 @@ start:
return rtStatus;
}
- priv->LoopbackMode = RTL819X_NO_LOOPBACK;
+ priv->loopback_mode = RTL819X_NO_LOOPBACK;
if (priv->rst_progress == RESET_TYPE_NORESET) {
ulRegRead = rtl92e_readl(dev, CPU_GEN);
- if (priv->LoopbackMode == RTL819X_NO_LOOPBACK)
+ if (priv->loopback_mode == RTL819X_NO_LOOPBACK)
ulRegRead = (ulRegRead & CPU_GEN_NO_LOOPBACK_MSK) |
CPU_GEN_NO_LOOPBACK_SET;
- else if (priv->LoopbackMode == RTL819X_MAC_LOOPBACK)
+ else if (priv->loopback_mode == RTL819X_MAC_LOOPBACK)
ulRegRead |= CPU_CCK_LOOPBACK;
else
netdev_err(dev, "%s: Invalid loopback mode setting.\n",
@@ -666,11 +629,11 @@ start:
_rtl92e_hwconfig(dev);
rtl92e_writeb(dev, CMDR, CR_RE | CR_TE);
- rtl92e_writeb(dev, PCIF, ((MXDMA2_NoLimit<<MXDMA2_RX_SHIFT) |
- (MXDMA2_NoLimit<<MXDMA2_TX_SHIFT)));
+ rtl92e_writeb(dev, PCIF, ((MXDMA2_NO_LIMIT << MXDMA2_RX_SHIFT) |
+ (MXDMA2_NO_LIMIT << MXDMA2_TX_SHIFT)));
rtl92e_writel(dev, MAC0, ((u32 *)dev->dev_addr)[0]);
rtl92e_writew(dev, MAC4, ((u16 *)(dev->dev_addr + 4))[0]);
- rtl92e_writel(dev, RCR, priv->ReceiveConfig);
+ rtl92e_writel(dev, RCR, priv->receive_config);
rtl92e_writel(dev, RQPN1, NUM_OF_PAGE_IN_FW_QUEUE_BK <<
RSVD_FW_QUEUE_PAGE_BK_SHIFT |
@@ -724,7 +687,7 @@ start:
}
tmpvalue = rtl92e_readb(dev, IC_VERRSION);
- priv->IC_Cut = tmpvalue;
+ priv->ic_cut = tmpvalue;
bfirmwareok = rtl92e_init_fw(dev);
if (!bfirmwareok) {
@@ -760,19 +723,19 @@ start:
}
if (priv->rtllib->FwRWRF)
- priv->Rf_Mode = RF_OP_By_FW;
+ priv->rf_mode = RF_OP_By_FW;
else
- priv->Rf_Mode = RF_OP_By_SW_3wire;
+ priv->rf_mode = RF_OP_By_SW_3wire;
if (priv->rst_progress == RESET_TYPE_NORESET) {
rtl92e_dm_init_txpower_tracking(dev);
- if (priv->IC_Cut >= IC_VersionCut_D) {
+ if (priv->ic_cut >= IC_VersionCut_D) {
tmpRegA = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
bMaskDWord);
rtl92e_get_bb_reg(dev, rOFDM0_XCTxIQImbalance, bMaskDWord);
- for (i = 0; i < TxBBGainTableLength; i++) {
+ for (i = 0; i < TX_BB_GAIN_TABLE_LEN; i++) {
if (tmpRegA == dm_tx_bb_gain[i]) {
priv->rfa_txpowertrackingindex = i;
priv->rfa_txpowertrackingindex_real = i;
@@ -785,16 +748,16 @@ start:
TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1,
bMaskByte2);
- for (i = 0; i < CCKTxBBGainTableLength; i++) {
+ for (i = 0; i < CCK_TX_BB_GAIN_TABLE_LEN; i++) {
if (TempCCk == dm_cck_tx_bb_gain[i][0]) {
- priv->CCKPresentAttentuation_20Mdefault = i;
+ priv->cck_present_attn_20m_def = i;
break;
}
}
- priv->CCKPresentAttentuation_40Mdefault = 0;
- priv->CCKPresentAttentuation_difference = 0;
+ priv->cck_present_attn_40m_def = 0;
+ priv->cck_present_attn_diff = 0;
priv->cck_present_attn =
- priv->CCKPresentAttentuation_20Mdefault;
+ priv->cck_present_attn_20m_def;
priv->btxpower_tracking = false;
}
}
@@ -860,9 +823,9 @@ void rtl92e_link_change(struct net_device *dev)
if (ieee->intel_promiscuous_md_info.promiscuous_on)
;
else
- priv->ReceiveConfig = reg |= RCR_CBSSID;
+ priv->receive_config = reg |= RCR_CBSSID;
} else
- priv->ReceiveConfig = reg &= ~RCR_CBSSID;
+ priv->receive_config = reg &= ~RCR_CBSSID;
rtl92e_writel(dev, RCR, reg);
}
@@ -874,12 +837,12 @@ void rtl92e_set_monitor_mode(struct net_device *dev, bool bAllowAllDA,
struct r8192_priv *priv = rtllib_priv(dev);
if (bAllowAllDA)
- priv->ReceiveConfig |= RCR_AAP;
+ priv->receive_config |= RCR_AAP;
else
- priv->ReceiveConfig &= ~RCR_AAP;
+ priv->receive_config &= ~RCR_AAP;
if (WriteIntoReg)
- rtl92e_writel(dev, RCR, priv->ReceiveConfig);
+ rtl92e_writel(dev, RCR, priv->receive_config);
}
static u8 _rtl92e_rate_mgn_to_hw(u8 rate)
@@ -1068,13 +1031,13 @@ void rtl92e_fill_tx_desc(struct net_device *dev, struct tx_desc *pdesc,
pTxFwInfo->RtsShort = (pTxFwInfo->RtsHT == 0) ?
(cb_desc->bRTSUseShortPreamble ? 1 : 0) :
(cb_desc->bRTSUseShortGI ? 1 : 0);
- if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20_40) {
+ if (priv->current_chnl_bw == HT_CHANNEL_WIDTH_20_40) {
if (cb_desc->bPacketBW) {
pTxFwInfo->TxBandwidth = 1;
pTxFwInfo->TxSubCarrier = 0;
} else {
pTxFwInfo->TxBandwidth = 0;
- pTxFwInfo->TxSubCarrier = priv->nCur40MhzPrimeSC;
+ pTxFwInfo->TxSubCarrier = priv->n_cur_40mhz_prime_sc;
}
} else {
pTxFwInfo->TxBandwidth = 0;
@@ -1327,11 +1290,10 @@ static void _rtl92e_query_rxphystatus(
{
struct phy_sts_ofdm_819xpci *pofdm_buf;
struct phy_sts_cck_819xpci *pcck_buf;
- struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *prxsc;
u8 *prxpkt;
- u8 i, max_spatial_stream, tmp_rxsnr, tmp_rxevm, rxsc_sgien_exflg;
+ u8 i, max_spatial_stream, tmp_rxevm;
s8 rx_pwr[4], rx_pwr_all = 0;
- s8 rx_snrX, rx_evmX;
+ s8 rx_evmX;
u8 evm, pwdb_all;
u32 RSSI, total_rssi = 0;
u8 is_cck_rate = 0;
@@ -1339,8 +1301,6 @@ static void _rtl92e_query_rxphystatus(
static u8 check_reg824;
static u32 reg824_bit9;
- priv->stats.numqry_phystatus++;
-
is_cck_rate = rx_hal_is_cck_rate(pdrvinfo);
memset(precord_stats, 0, sizeof(struct rtllib_rx_stats));
pstats->bPacketMatchBSSID = precord_stats->bPacketMatchBSSID =
@@ -1372,7 +1332,6 @@ static void _rtl92e_query_rxphystatus(
if (is_cck_rate) {
u8 report;
- priv->stats.numqry_phystatusCCK++;
if (!reg824_bit9) {
report = pcck_buf->cck_agc_rpt & 0xc0;
report >>= 6;
@@ -1447,7 +1406,6 @@ static void _rtl92e_query_rxphystatus(
precord_stats->RxMIMOSignalQuality[1] = -1;
}
} else {
- priv->stats.numqry_phystatusHT++;
for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
if (priv->brfpath_rxenable[i])
rf_rx_num++;
@@ -1455,11 +1413,6 @@ static void _rtl92e_query_rxphystatus(
rx_pwr[i] = ((pofdm_buf->trsw_gain_X[i] & 0x3F) *
2) - 110;
- tmp_rxsnr = pofdm_buf->rxsnr_X[i];
- rx_snrX = (s8)(tmp_rxsnr);
- rx_snrX /= 2;
- priv->stats.rxSNRdB[i] = (long)rx_snrX;
-
RSSI = rtl92e_rx_db_to_percent(rx_pwr[i]);
if (priv->brfpath_rxenable[i])
total_rssi += RSSI;
@@ -1499,15 +1452,6 @@ static void _rtl92e_query_rxphystatus(
precord_stats->RxMIMOSignalQuality[i] = evm & 0xff;
}
}
-
-
- rxsc_sgien_exflg = pofdm_buf->rxsc_sgien_exflg;
- prxsc = (struct phy_ofdm_rx_status_rxsc_sgien_exintfflag *)
- &rxsc_sgien_exflg;
- if (pdrvinfo->BW)
- priv->stats.received_bwtype[1+prxsc->rxsc]++;
- else
- priv->stats.received_bwtype[0]++;
}
if (is_cck_rate) {
@@ -1570,7 +1514,6 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
if (!bcheck)
return;
- priv->stats.num_process_phyinfo++;
if (!prev_st->bIsCCK && prev_st->bPacketToSelf) {
for (rfpath = RF90_PATH_A; rfpath < RF90_PATH_C; rfpath++) {
if (!rtl92e_is_legal_rf_path(priv->rtllib->dev, rfpath))
@@ -1605,17 +1548,17 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
PHY_Beacon_RSSI_SLID_WIN_MAX) {
slide_beacon_adc_pwdb_statistics =
PHY_Beacon_RSSI_SLID_WIN_MAX;
- last_beacon_adc_pwdb = priv->stats.Slide_Beacon_pwdb
+ last_beacon_adc_pwdb = priv->stats.slide_beacon_pwdb
[slide_beacon_adc_pwdb_index];
- priv->stats.Slide_Beacon_Total -= last_beacon_adc_pwdb;
+ priv->stats.slide_beacon_total -= last_beacon_adc_pwdb;
}
- priv->stats.Slide_Beacon_Total += prev_st->RxPWDBAll;
- priv->stats.Slide_Beacon_pwdb[slide_beacon_adc_pwdb_index] =
+ priv->stats.slide_beacon_total += prev_st->RxPWDBAll;
+ priv->stats.slide_beacon_pwdb[slide_beacon_adc_pwdb_index] =
prev_st->RxPWDBAll;
slide_beacon_adc_pwdb_index++;
if (slide_beacon_adc_pwdb_index >= PHY_Beacon_RSSI_SLID_WIN_MAX)
slide_beacon_adc_pwdb_index = 0;
- prev_st->RxPWDBAll = priv->stats.Slide_Beacon_Total /
+ prev_st->RxPWDBAll = priv->stats.slide_beacon_total /
slide_beacon_adc_pwdb_statistics;
if (prev_st->RxPWDBAll >= 3)
prev_st->RxPWDBAll -= 3;
@@ -1659,7 +1602,6 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
tmp_val = priv->stats.slide_evm_total /
slide_evm_statistics;
- priv->stats.signal_quality = tmp_val;
priv->stats.last_signal_strength_inpercent = tmp_val;
}
@@ -1716,10 +1658,6 @@ static void _rtl92e_translate_rx_signal_stats(struct net_device *dev,
ether_addr_equal(praddr, priv->rtllib->dev->dev_addr);
if (WLAN_FC_GET_FRAMETYPE(fc) == RTLLIB_STYPE_BEACON)
bPacketBeacon = true;
- if (bpacket_match_bssid)
- priv->stats.numpacket_matchbssid++;
- if (bpacket_toself)
- priv->stats.numpacket_toself++;
_rtl92e_process_phyinfo(priv, tmp_buf, &previous_stats, pstats);
_rtl92e_query_rxphystatus(priv, pstats, pdesc, pdrvinfo,
&previous_stats, bpacket_match_bssid,
@@ -1734,18 +1672,12 @@ static void _rtl92e_update_received_rate_histogram_stats(
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u32 rcvType = 1;
u32 rateIndex;
- u32 preamble_guardinterval;
if (pstats->bCRC)
rcvType = 2;
else if (pstats->bICV)
rcvType = 3;
- if (pstats->bShortPreamble)
- preamble_guardinterval = 1;
- else
- preamble_guardinterval = 0;
-
switch (pstats->rate) {
case MGN_1M:
rateIndex = 0;
@@ -1835,7 +1767,6 @@ static void _rtl92e_update_received_rate_histogram_stats(
rateIndex = 28;
break;
}
- priv->stats.received_preamble_GI[preamble_guardinterval][rateIndex]++;
priv->stats.received_rate_histogram[0][rateIndex]++;
priv->stats.received_rate_histogram[rcvType][rateIndex]++;
}
@@ -1843,7 +1774,6 @@ static void _rtl92e_update_received_rate_histogram_stats(
bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
struct rx_desc *pdesc, struct sk_buff *skb)
{
- struct r8192_priv *priv = rtllib_priv(dev);
struct rx_fwinfo *pDrvInfo = NULL;
stats->bICV = pdesc->ICV;
@@ -1856,15 +1786,6 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
if (stats->bHwError) {
stats->bShift = false;
-
- if (pdesc->CRC32) {
- if (pdesc->Length < 500)
- priv->stats.rxcrcerrmin++;
- else if (pdesc->Length > 1000)
- priv->stats.rxcrcerrmax++;
- else
- priv->stats.rxcrcerrmid++;
- }
return false;
}
@@ -1895,7 +1816,7 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->RxIs40MHzPacket = pDrvInfo->BW;
_rtl92e_translate_rx_signal_stats(dev, skb, stats, pdesc, pDrvInfo);
- skb_trim(skb, skb->len - 4/*sCrcLng*/);
+ skb_trim(skb, skb->len - S_CRC_LEN);
stats->packetlength = stats->Length-4;
@@ -1940,7 +1861,7 @@ void rtl92e_stop_adapter(struct net_device *dev, bool reset)
rtl92e_writeb(dev, PMR, 0x5);
- rtl92e_writeb(dev, MacBlkCtrl, 0xa);
+ rtl92e_writeb(dev, MAC_BLK_CTRL, 0xa);
}
}
@@ -2010,10 +1931,10 @@ rtl92e_init_variables(struct net_device *dev)
priv->rtllib->tx_headroom = sizeof(struct tx_fwinfo_8190pci);
- priv->ShortRetryLimit = 0x30;
- priv->LongRetryLimit = 0x30;
+ priv->short_retry_limit = 0x30;
+ priv->long_retry_limit = 0x30;
- priv->ReceiveConfig = RCR_ADD3 |
+ priv->receive_config = RCR_ADD3 |
RCR_AMF | RCR_ADF |
RCR_AICV |
RCR_AB | RCR_AM | RCR_APM |
@@ -2027,8 +1948,6 @@ rtl92e_init_variables(struct net_device *dev)
IMR_RDU | IMR_RXFOVW | IMR_TXFOVW |
IMR_BcnInt | IMR_TBDOK | IMR_TBDER);
- priv->PwrDomainProtect = false;
-
priv->bfirst_after_down = false;
}
@@ -2098,21 +2017,21 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
u8 SilentResetRxSoltNum = 4;
rx_chk_cnt++;
- if (priv->undecorated_smoothed_pwdb >= (RateAdaptiveTH_High+5)) {
+ if (priv->undecorated_smoothed_pwdb >= (RATE_ADAPTIVE_TH_HIGH + 5)) {
rx_chk_cnt = 0;
- } else if ((priv->undecorated_smoothed_pwdb < (RateAdaptiveTH_High + 5))
- && (((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) &&
- (priv->undecorated_smoothed_pwdb >= RateAdaptiveTH_Low_40M))
- || ((priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20) &&
- (priv->undecorated_smoothed_pwdb >= RateAdaptiveTH_Low_20M)))) {
+ } else if ((priv->undecorated_smoothed_pwdb < (RATE_ADAPTIVE_TH_HIGH + 5))
+ && (((priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) &&
+ (priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_40M))
+ || ((priv->current_chnl_bw == HT_CHANNEL_WIDTH_20) &&
+ (priv->undecorated_smoothed_pwdb >= RATE_ADAPTIVE_TH_LOW_20M)))) {
if (rx_chk_cnt < 2)
return bStuck;
rx_chk_cnt = 0;
- } else if ((((priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) &&
- (priv->undecorated_smoothed_pwdb < RateAdaptiveTH_Low_40M)) ||
- ((priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20) &&
- (priv->undecorated_smoothed_pwdb < RateAdaptiveTH_Low_20M))) &&
- priv->undecorated_smoothed_pwdb >= VeryLowRSSI) {
+ } else if ((((priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) &&
+ (priv->undecorated_smoothed_pwdb < RATE_ADAPTIVE_TH_LOW_40M)) ||
+ ((priv->current_chnl_bw == HT_CHANNEL_WIDTH_20) &&
+ (priv->undecorated_smoothed_pwdb < RATE_ADAPTIVE_TH_LOW_20M))) &&
+ priv->undecorated_smoothed_pwdb >= VERY_LOW_RSSI) {
if (rx_chk_cnt < 4)
return bStuck;
rx_chk_cnt = 0;
@@ -2123,24 +2042,24 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
}
- SlotIndex = (priv->SilentResetRxSlotIndex++)%SilentResetRxSoltNum;
+ SlotIndex = (priv->silent_reset_rx_slot_index++)%SilentResetRxSoltNum;
if (priv->rx_ctr == RegRxCounter) {
- priv->SilentResetRxStuckEvent[SlotIndex] = 1;
+ priv->silent_reset_rx_stuck_event[SlotIndex] = 1;
for (i = 0; i < SilentResetRxSoltNum; i++)
- TotalRxStuckCount += priv->SilentResetRxStuckEvent[i];
+ TotalRxStuckCount += priv->silent_reset_rx_stuck_event[i];
if (TotalRxStuckCount == SilentResetRxSoltNum) {
bStuck = true;
for (i = 0; i < SilentResetRxSoltNum; i++)
TotalRxStuckCount +=
- priv->SilentResetRxStuckEvent[i];
+ priv->silent_reset_rx_stuck_event[i];
}
} else {
- priv->SilentResetRxStuckEvent[SlotIndex] = 0;
+ priv->silent_reset_rx_stuck_event[SlotIndex] = 0;
}
priv->rx_ctr = RegRxCounter;
@@ -2154,10 +2073,10 @@ bool rtl92e_is_tx_stuck(struct net_device *dev)
bool bStuck = false;
u16 RegTxCounter = rtl92e_readw(dev, 0x128);
- if (priv->TxCounter == RegTxCounter)
+ if (priv->tx_counter == RegTxCounter)
bStuck = true;
- priv->TxCounter = RegTxCounter;
+ priv->tx_counter = RegTxCounter;
return bStuck;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 789d288d7503..b011ec8c8a41 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
@@ -51,7 +51,7 @@ static bool _rtl92e_fw_check_ready(struct net_device *dev,
u8 load_fw_status)
{
struct r8192_priv *priv = rtllib_priv(dev);
- struct rt_firmware *pfirmware = priv->pFirmware;
+ struct rt_firmware *pfirmware = priv->fw_info;
bool rt_status = true;
switch (load_fw_status) {
@@ -134,7 +134,7 @@ bool rtl92e_init_fw(struct net_device *dev)
enum opt_rst_type rst_opt = OPT_SYSTEM_RESET;
enum firmware_init_step starting_state = FW_INIT_STEP0_BOOT;
- struct rt_firmware *pfirmware = priv->pFirmware;
+ struct rt_firmware *pfirmware = priv->fw_info;
netdev_dbg(dev, " PlatformInitFirmware()==>\n");
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index 3e223151d4b7..99640c4779f7 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
@@ -8,141 +8,72 @@
#define R8180_HW
enum baseband_config {
- BaseBand_Config_PHY_REG = 0,
- BaseBand_Config_AGC_TAB = 1,
+ BB_CONFIG_PHY_REG = 0,
+ BB_CONFIG_AGC_TAB = 1,
};
-#define RTL8187_REQT_READ 0xc0
-#define RTL8187_REQT_WRITE 0x40
-#define RTL8187_REQ_GET_REGS 0x05
-#define RTL8187_REQ_SET_REGS 0x05
-
-#define MAX_TX_URB 5
-#define MAX_RX_URB 16
-#define RX_URB_SIZE 9100
-
-#define BB_ANTATTEN_CHAN14 0x0c
-#define BB_ANTENNA_B 0x40
-
-#define BB_HOST_BANG (1<<30)
-#define BB_HOST_BANG_EN (1<<2)
-#define BB_HOST_BANG_CLK (1<<1)
-#define BB_HOST_BANG_RW (1<<3)
-#define BB_HOST_BANG_DATA 1
-
#define RTL8190_EEPROM_ID 0x8129
#define EEPROM_VID 0x02
#define EEPROM_DID 0x04
#define EEPROM_NODE_ADDRESS_BYTE_0 0x0C
-#define EEPROM_TxPowerDiff 0x1F
-
-
-#define EEPROM_PwDiff 0x21
-#define EEPROM_CrystalCap 0x22
-
-
-
-#define EEPROM_TxPwIndex_CCK_V1 0x29
-#define EEPROM_TxPwIndex_OFDM_24G_V1 0x2C
-#define EEPROM_TxPwIndex_Ver 0x27
-
-#define EEPROM_Default_TxPowerDiff 0x0
#define EEPROM_Default_ThermalMeter 0x77
#define EEPROM_Default_AntTxPowerDiff 0x0
#define EEPROM_Default_TxPwDiff_CrystalCap 0x5
-#define EEPROM_Default_PwDiff 0x4
-#define EEPROM_Default_CrystalCap 0x5
#define EEPROM_Default_TxPower 0x1010
#define EEPROM_ICVersion_ChannelPlan 0x7C
#define EEPROM_Customer_ID 0x7B
#define EEPROM_RFInd_PowerDiff 0x28
+
#define EEPROM_ThermalMeter 0x29
#define EEPROM_TxPwDiff_CrystalCap 0x2A
#define EEPROM_TxPwIndex_CCK 0x2C
#define EEPROM_TxPwIndex_OFDM_24G 0x3A
-#define EEPROM_Default_TxPowerLevel 0x10
-#define EEPROM_IC_VER 0x7d
-#define EEPROM_CRC 0x7e
#define EEPROM_CID_DEFAULT 0x0
#define EEPROM_CID_CAMEO 0x1
#define EEPROM_CID_RUNTOP 0x2
-#define EEPROM_CID_Senao 0x3
#define EEPROM_CID_TOSHIBA 0x4
#define EEPROM_CID_NetCore 0x5
#define EEPROM_CID_Nettronix 0x6
#define EEPROM_CID_Pronet 0x7
#define EEPROM_CID_DLINK 0x8
#define EEPROM_CID_WHQL 0xFE
-enum _RTL8192Pci_HW {
+enum _RTL8192PCI_HW {
MAC0 = 0x000,
- MAC1 = 0x001,
- MAC2 = 0x002,
- MAC3 = 0x003,
MAC4 = 0x004,
- MAC5 = 0x005,
PCIF = 0x009,
-#define MXDMA2_16bytes 0x000
-#define MXDMA2_32bytes 0x001
-#define MXDMA2_64bytes 0x010
-#define MXDMA2_128bytes 0x011
-#define MXDMA2_256bytes 0x100
-#define MXDMA2_512bytes 0x101
-#define MXDMA2_1024bytes 0x110
-#define MXDMA2_NoLimit 0x7
+#define MXDMA2_NO_LIMIT 0x7
-#define MULRW_SHIFT 3
#define MXDMA2_RX_SHIFT 4
#define MXDMA2_TX_SHIFT 0
PMR = 0x00c,
EPROM_CMD = 0x00e,
-#define EPROM_CMD_RESERVED_MASK BIT5
+
#define EPROM_CMD_9356SEL BIT4
#define EPROM_CMD_OPERATING_MODE_SHIFT 6
-#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
-#define EPROM_CMD_CONFIG 0x3
#define EPROM_CMD_NORMAL 0
-#define EPROM_CMD_LOAD 1
#define EPROM_CMD_PROGRAM 2
#define EPROM_CS_BIT 3
#define EPROM_CK_BIT 2
#define EPROM_W_BIT 1
#define EPROM_R_BIT 0
- AFR = 0x010,
-#define AFR_CardBEn (1<<0)
-#define AFR_CLKRUN_SEL (1<<1)
-#define AFR_FuncRegEn (1<<2)
-
ANAPAR = 0x17,
#define BB_GLOBAL_RESET_BIT 0x1
BB_GLOBAL_RESET = 0x020,
BSSIDR = 0x02E,
CMDR = 0x037,
-#define CR_RST 0x10
#define CR_RE 0x08
#define CR_TE 0x04
-#define CR_MulRW 0x01
SIFS = 0x03E,
- TCR = 0x040,
RCR = 0x044,
-#define RCR_FILTER_MASK (BIT0 | BIT1 | BIT2 | BIT3 | BIT5 | BIT12 | \
- BIT18 | BIT19 | BIT20 | BIT21 | BIT22 | BIT23)
#define RCR_ONLYERLPKT BIT31
-#define RCR_ENCS2 BIT30
-#define RCR_ENCS1 BIT29
-#define RCR_ENMBID BIT27
-#define RCR_ACKTXBW (BIT24|BIT25)
#define RCR_CBSSID BIT23
-#define RCR_APWRMGT BIT22
#define RCR_ADD3 BIT21
#define RCR_AMF BIT20
-#define RCR_ACF BIT19
#define RCR_ADF BIT18
-#define RCR_RXFTH BIT13
#define RCR_AICV BIT12
-#define RCR_ACRC32 BIT5
#define RCR_AB BIT3
#define RCR_AM BIT2
#define RCR_APM BIT1
@@ -151,8 +82,6 @@ enum _RTL8192Pci_HW {
#define RCR_FIFO_OFFSET 13
SLOT_TIME = 0x049,
ACK_TIMEOUT = 0x04c,
- PIFS_TIME = 0x04d,
- USTIME = 0x04e,
EDCAPARA_BE = 0x050,
EDCAPARA_BK = 0x054,
EDCAPARA_VO = 0x058,
@@ -161,53 +90,25 @@ enum _RTL8192Pci_HW {
#define AC_PARAM_ECW_MAX_OFFSET 12
#define AC_PARAM_ECW_MIN_OFFSET 8
#define AC_PARAM_AIFS_OFFSET 0
- RFPC = 0x05F,
- CWRR = 0x060,
BCN_TCFG = 0x062,
#define BCN_TCFG_CW_SHIFT 8
#define BCN_TCFG_IFS 0
BCN_INTERVAL = 0x070,
ATIMWND = 0x072,
BCN_DRV_EARLY_INT = 0x074,
-#define BCN_DRV_EARLY_INT_SWBCN_SHIFT 8
-#define BCN_DRV_EARLY_INT_TIME_SHIFT 0
BCN_DMATIME = 0x076,
BCN_ERR_THRESH = 0x078,
RWCAM = 0x0A0,
-#define CAM_CM_SecCAMPolling BIT31
-#define CAM_CM_SecCAMClr BIT30
-#define CAM_CM_SecCAMWE BIT16
-#define CAM_VALID BIT15
-#define CAM_NOTVALID 0x0000
-#define CAM_USEDK BIT5
-
-#define CAM_NONE 0x0
-#define CAM_WEP40 0x01
-#define CAM_TKIP 0x02
-#define CAM_AES 0x04
-#define CAM_WEP104 0x05
-
#define TOTAL_CAM_ENTRY 32
-
-#define CAM_CONFIG_USEDK true
-#define CAM_CONFIG_NO_USEDK false
-#define CAM_WRITE BIT16
-#define CAM_READ 0x00000000
-#define CAM_POLLINIG BIT31
-#define SCR_UseDK 0x01
WCAMI = 0x0A4,
- RCAMO = 0x0A8,
SECR = 0x0B0,
#define SCR_TxUseDK BIT0
#define SCR_RxUseDK BIT1
#define SCR_TxEncEnable BIT2
#define SCR_RxDecEnable BIT3
-#define SCR_SKByA2 BIT4
#define SCR_NoSKMC BIT5
SWREGULATOR = 0x0BD,
INTA_MASK = 0x0f4,
-#define IMR8190_DISABLED 0x0
-#define IMR_ATIMEND BIT28
#define IMR_TBDOK BIT27
#define IMR_TBDER BIT26
#define IMR_TXFOVW BIT15
@@ -227,30 +128,10 @@ enum _RTL8192Pci_HW {
#define IMR_VODOK BIT1
#define IMR_ROK BIT0
ISR = 0x0f8,
- TPPoll = 0x0fd,
-#define TPPoll_BKQ BIT0
-#define TPPoll_BEQ BIT1
-#define TPPoll_VIQ BIT2
-#define TPPoll_VOQ BIT3
-#define TPPoll_BQ BIT4
-#define TPPoll_CQ BIT5
-#define TPPoll_MQ BIT6
-#define TPPoll_HQ BIT7
-#define TPPoll_HCCAQ BIT8
-#define TPPoll_StopBK BIT9
-#define TPPoll_StopBE BIT10
-#define TPPoll_StopVI BIT11
-#define TPPoll_StopVO BIT12
-#define TPPoll_StopMgt BIT13
-#define TPPoll_StopHigh BIT14
-#define TPPoll_StopHCCA BIT15
-#define TPPoll_SHIFT 8
-
+ TP_POLL = 0x0fd,
+#define TP_POLL_CQ BIT5
PSR = 0x0ff,
-#define PSR_GEN 0x0
-#define PSR_CPU 0x1
CPU_GEN = 0x100,
- BB_RESET = 0x101,
#define CPU_CCK_LOOPBACK 0x00030000
#define CPU_GEN_SYSTEM_RESET 0x00000001
#define CPU_GEN_FIRMWARE_RESET 0x00000008
@@ -261,31 +142,13 @@ enum _RTL8192Pci_HW {
#define CPU_GEN_PWR_STB_CPU 0x00000004
#define CPU_GEN_NO_LOOPBACK_MSK 0xFFF8FFFF
#define CPU_GEN_NO_LOOPBACK_SET 0x00080000
-#define CPU_GEN_GPIO_UART 0x00007000
-
- LED1Cfg = 0x154,
- LED0Cfg = 0x155,
-
- AcmAvg = 0x170,
- AcmHwCtrl = 0x171,
-#define AcmHw_HwEn BIT0
-#define AcmHw_BeqEn BIT1
-#define AcmHw_ViqEn BIT2
-#define AcmHw_VoqEn BIT3
-#define AcmHw_BeqStatus BIT4
-#define AcmHw_ViqStatus BIT5
-#define AcmHw_VoqStatus BIT6
- AcmFwCtrl = 0x172,
-#define AcmFw_BeqStatus BIT0
-#define AcmFw_ViqStatus BIT1
-#define AcmFw_VoqStatus BIT2
- VOAdmTime = 0x174,
- VIAdmTime = 0x178,
- BEAdmTime = 0x17C,
+ ACM_HW_CTRL = 0x171,
+#define ACM_HW_BEQ_EN BIT1
+#define ACM_HW_VIQ_EN BIT2
+#define ACM_HW_VOQ_EN BIT3
RQPN1 = 0x180,
RQPN2 = 0x184,
RQPN3 = 0x188,
- QPRR = 0x1E0,
QPNR = 0x1F0,
BQDA = 0x200,
HQDA = 0x204,
@@ -296,51 +159,21 @@ enum _RTL8192Pci_HW {
VIQDA = 0x218,
BEQDA = 0x21C,
BKQDA = 0x220,
- RCQDA = 0x224,
RDQDA = 0x228,
- MAR0 = 0x240,
- MAR4 = 0x244,
-
- CCX_PERIOD = 0x250,
- CLM_RESULT = 0x251,
- NHM_PERIOD = 0x252,
-
- NHM_THRESHOLD0 = 0x253,
- NHM_THRESHOLD1 = 0x254,
- NHM_THRESHOLD2 = 0x255,
- NHM_THRESHOLD3 = 0x256,
- NHM_THRESHOLD4 = 0x257,
- NHM_THRESHOLD5 = 0x258,
- NHM_THRESHOLD6 = 0x259,
-
- MCTRL = 0x25A,
-
- NHM_RPI_COUNTER0 = 0x264,
- NHM_RPI_COUNTER1 = 0x265,
- NHM_RPI_COUNTER2 = 0x266,
- NHM_RPI_COUNTER3 = 0x267,
- NHM_RPI_COUNTER4 = 0x268,
- NHM_RPI_COUNTER5 = 0x269,
- NHM_RPI_COUNTER6 = 0x26A,
- NHM_RPI_COUNTER7 = 0x26B,
WFCRC0 = 0x2f0,
WFCRC1 = 0x2f4,
WFCRC2 = 0x2f8,
BW_OPMODE = 0x300,
-#define BW_OPMODE_11J BIT0
#define BW_OPMODE_5G BIT1
#define BW_OPMODE_20MHZ BIT2
IC_VERRSION = 0x301,
MSR = 0x303,
-#define MSR_LINK_MASK ((1<<0)|(1<<1))
+#define MSR_LINK_MASK (BIT(1) | BIT(0))
#define MSR_LINK_MANAGED 2
-#define MSR_LINK_NONE 0
-#define MSR_LINK_SHIFT 0
#define MSR_LINK_ADHOC 1
#define MSR_LINK_MASTER 3
-#define MSR_LINK_ENEDCA (1<<4)
#define MSR_NOLINK 0x00
#define MSR_ADHOC 0x01
@@ -352,12 +185,7 @@ enum _RTL8192Pci_HW {
#define RETRY_LIMIT_LONG_SHIFT 0
TSFR = 0x308,
RRSR = 0x310,
-#define RRSR_RSC_OFFSET 21
#define RRSR_SHORT_OFFSET 23
-#define RRSR_RSC_DUPLICATE 0x600000
-#define RRSR_RSC_UPSUBCHNL 0x400000
-#define RRSR_RSC_LOWSUBCHNL 0x200000
-#define RRSR_SHORT 0x800000
#define RRSR_1M BIT0
#define RRSR_2M BIT1
#define RRSR_5_5M BIT2
@@ -370,14 +198,6 @@ enum _RTL8192Pci_HW {
#define RRSR_36M BIT9
#define RRSR_48M BIT10
#define RRSR_54M BIT11
-#define RRSR_MCS0 BIT12
-#define RRSR_MCS1 BIT13
-#define RRSR_MCS2 BIT14
-#define RRSR_MCS3 BIT15
-#define RRSR_MCS4 BIT16
-#define RRSR_MCS5 BIT17
-#define RRSR_MCS6 BIT18
-#define RRSR_MCS7 BIT19
#define BRSR_AckShortPmb BIT23
UFWP = 0x318,
RATR0 = 0x320,
@@ -419,21 +239,14 @@ enum _RTL8192Pci_HW {
RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \
RATR_MCS14|RATR_MCS15)
-
DRIVER_RSSI = 0x32c,
MCS_TXAGC = 0x340,
CCK_TXAGC = 0x348,
- MacBlkCtrl = 0x403,
-
-}
-;
+ MAC_BLK_CTRL = 0x403,
+};
#define GPI 0x108
-#define GPO 0x109
-#define GPE 0x10a
-
-#define HWSET_MAX_SIZE_92S 128
-#define ANAPAR_FOR_8192PciE 0x17
+#define ANAPAR_FOR_8192PCIE 0x17
#endif
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index a813eded4cb3..c6cbdea6d5b2 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -94,24 +94,24 @@ static u32 _rtl92e_phy_rf_read(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
u32 ret = 0;
u32 NewOffset = 0;
- struct bb_reg_definition *pPhyReg = &priv->PHYRegDef[eRFPath];
+ struct bb_reg_definition *pPhyReg = &priv->phy_reg_def[eRFPath];
Offset &= 0x3f;
if (priv->rf_chip == RF_8256) {
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
if (Offset >= 31) {
- priv->RfReg0Value[eRFPath] |= 0x140;
+ priv->rf_reg_0value[eRFPath] |= 0x140;
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
- (priv->RfReg0Value[eRFPath]<<16));
+ (priv->rf_reg_0value[eRFPath] << 16));
NewOffset = Offset - 30;
} else if (Offset >= 16) {
- priv->RfReg0Value[eRFPath] |= 0x100;
- priv->RfReg0Value[eRFPath] &= (~0x40);
+ priv->rf_reg_0value[eRFPath] |= 0x100;
+ priv->rf_reg_0value[eRFPath] &= (~0x40);
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
- (priv->RfReg0Value[eRFPath]<<16));
+ (priv->rf_reg_0value[eRFPath] << 16));
NewOffset = Offset - 15;
} else
@@ -130,10 +130,10 @@ static u32 _rtl92e_phy_rf_read(struct net_device *dev,
bLSSIReadBackData);
if (priv->rf_chip == RF_8256) {
- priv->RfReg0Value[eRFPath] &= 0xebf;
+ priv->rf_reg_0value[eRFPath] &= 0xebf;
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset, bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
+ (priv->rf_reg_0value[eRFPath] << 16));
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
}
@@ -149,24 +149,24 @@ static void _rtl92e_phy_rf_write(struct net_device *dev,
{
struct r8192_priv *priv = rtllib_priv(dev);
u32 DataAndAddr = 0, NewOffset = 0;
- struct bb_reg_definition *pPhyReg = &priv->PHYRegDef[eRFPath];
+ struct bb_reg_definition *pPhyReg = &priv->phy_reg_def[eRFPath];
Offset &= 0x3f;
if (priv->rf_chip == RF_8256) {
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);
if (Offset >= 31) {
- priv->RfReg0Value[eRFPath] |= 0x140;
+ priv->rf_reg_0value[eRFPath] |= 0x140;
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
+ (priv->rf_reg_0value[eRFPath] << 16));
NewOffset = Offset - 30;
} else if (Offset >= 16) {
- priv->RfReg0Value[eRFPath] |= 0x100;
- priv->RfReg0Value[eRFPath] &= (~0x40);
+ priv->rf_reg_0value[eRFPath] |= 0x100;
+ priv->rf_reg_0value[eRFPath] &= (~0x40);
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
+ (priv->rf_reg_0value[eRFPath] << 16));
NewOffset = Offset - 15;
} else
NewOffset = Offset;
@@ -179,14 +179,14 @@ static void _rtl92e_phy_rf_write(struct net_device *dev,
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
if (Offset == 0x0)
- priv->RfReg0Value[eRFPath] = Data;
+ priv->rf_reg_0value[eRFPath] = Data;
if (priv->rf_chip == RF_8256) {
if (Offset != 0) {
- priv->RfReg0Value[eRFPath] &= 0xebf;
+ priv->rf_reg_0value[eRFPath] &= 0xebf;
rtl92e_set_bb_reg(dev, pPhyReg->rf3wireOffset,
bMaskDWord,
- (priv->RfReg0Value[eRFPath] << 16));
+ (priv->rf_reg_0value[eRFPath] << 16));
}
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);
}
@@ -203,7 +203,7 @@ void rtl92e_set_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
if (priv->rtllib->rf_power_state != rf_on && !priv->being_init_adapter)
return;
- if (priv->Rf_Mode == RF_OP_By_FW) {
+ if (priv->rf_mode == RF_OP_By_FW) {
if (BitMask != bMask12Bits) {
Original_Value = _rtl92e_phy_rf_fw_read(dev, eRFPath,
RegAddr);
@@ -240,7 +240,7 @@ u32 rtl92e_get_rf_reg(struct net_device *dev, enum rf90_radio_path eRFPath,
if (priv->rtllib->rf_power_state != rf_on && !priv->being_init_adapter)
return 0;
mutex_lock(&priv->rf_mutex);
- if (priv->Rf_Mode == RF_OP_By_FW) {
+ if (priv->rf_mode == RF_OP_By_FW) {
Original_Value = _rtl92e_phy_rf_fw_read(dev, eRFPath, RegAddr);
udelay(200);
} else {
@@ -306,7 +306,7 @@ void rtl92e_config_mac(struct net_device *dev)
u32 *pdwArray = NULL;
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->bTXPowerDataReadFromEEPORM) {
+ if (priv->tx_pwr_data_read_from_eeprom) {
dwArrayLen = MACPHY_Array_PGLength;
pdwArray = Rtl819XMACPHY_Array_PG;
@@ -342,13 +342,13 @@ static void _rtl92e_phy_config_bb(struct net_device *dev, u8 ConfigType)
Rtl819XPHY_REGArray_Table = Rtl819XPHY_REG_1T2RArray;
}
- if (ConfigType == BaseBand_Config_PHY_REG) {
+ if (ConfigType == BB_CONFIG_PHY_REG) {
for (i = 0; i < PHY_REGArrayLen; i += 2) {
rtl92e_set_bb_reg(dev, Rtl819XPHY_REGArray_Table[i],
bMaskDWord,
Rtl819XPHY_REGArray_Table[i+1]);
}
- } else if (ConfigType == BaseBand_Config_AGC_TAB) {
+ } else if (ConfigType == BB_CONFIG_AGC_TAB) {
for (i = 0; i < AGCTAB_ArrayLen; i += 2) {
rtl92e_set_bb_reg(dev, Rtl819XAGCTAB_Array_Table[i],
bMaskDWord,
@@ -361,90 +361,90 @@ static void _rtl92e_init_bb_rf_reg_def(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- priv->PHYRegDef[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW;
- priv->PHYRegDef[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW;
- priv->PHYRegDef[RF90_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;
- priv->PHYRegDef[RF90_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;
-
- priv->PHYRegDef[RF90_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB;
- priv->PHYRegDef[RF90_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;
- priv->PHYRegDef[RF90_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;
- priv->PHYRegDef[RF90_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;
-
- priv->PHYRegDef[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE;
- priv->PHYRegDef[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE;
- priv->PHYRegDef[RF90_PATH_C].rfintfo = rFPGA0_XC_RFInterfaceOE;
- priv->PHYRegDef[RF90_PATH_D].rfintfo = rFPGA0_XD_RFInterfaceOE;
-
- priv->PHYRegDef[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE;
- priv->PHYRegDef[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE;
- priv->PHYRegDef[RF90_PATH_C].rfintfe = rFPGA0_XC_RFInterfaceOE;
- priv->PHYRegDef[RF90_PATH_D].rfintfe = rFPGA0_XD_RFInterfaceOE;
-
- priv->PHYRegDef[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter;
- priv->PHYRegDef[RF90_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
- priv->PHYRegDef[RF90_PATH_C].rf3wireOffset = rFPGA0_XC_LSSIParameter;
- priv->PHYRegDef[RF90_PATH_D].rf3wireOffset = rFPGA0_XD_LSSIParameter;
-
- priv->PHYRegDef[RF90_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter;
- priv->PHYRegDef[RF90_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
- priv->PHYRegDef[RF90_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter;
- priv->PHYRegDef[RF90_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter;
-
- priv->PHYRegDef[RF90_PATH_A].rfTxGainStage = rFPGA0_TxGainStage;
- priv->PHYRegDef[RF90_PATH_B].rfTxGainStage = rFPGA0_TxGainStage;
- priv->PHYRegDef[RF90_PATH_C].rfTxGainStage = rFPGA0_TxGainStage;
- priv->PHYRegDef[RF90_PATH_D].rfTxGainStage = rFPGA0_TxGainStage;
-
- priv->PHYRegDef[RF90_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1;
- priv->PHYRegDef[RF90_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1;
- priv->PHYRegDef[RF90_PATH_C].rfHSSIPara1 = rFPGA0_XC_HSSIParameter1;
- priv->PHYRegDef[RF90_PATH_D].rfHSSIPara1 = rFPGA0_XD_HSSIParameter1;
-
- priv->PHYRegDef[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2;
- priv->PHYRegDef[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2;
- priv->PHYRegDef[RF90_PATH_C].rfHSSIPara2 = rFPGA0_XC_HSSIParameter2;
- priv->PHYRegDef[RF90_PATH_D].rfHSSIPara2 = rFPGA0_XD_HSSIParameter2;
-
- priv->PHYRegDef[RF90_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl;
- priv->PHYRegDef[RF90_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl;
- priv->PHYRegDef[RF90_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl;
- priv->PHYRegDef[RF90_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl;
-
- priv->PHYRegDef[RF90_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
- priv->PHYRegDef[RF90_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
- priv->PHYRegDef[RF90_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1;
- priv->PHYRegDef[RF90_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1;
-
- priv->PHYRegDef[RF90_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
- priv->PHYRegDef[RF90_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
- priv->PHYRegDef[RF90_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2;
- priv->PHYRegDef[RF90_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2;
-
- priv->PHYRegDef[RF90_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
- priv->PHYRegDef[RF90_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
- priv->PHYRegDef[RF90_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
- priv->PHYRegDef[RF90_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
-
- priv->PHYRegDef[RF90_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
- priv->PHYRegDef[RF90_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
- priv->PHYRegDef[RF90_PATH_C].rfRxAFE = rOFDM0_XCRxAFE;
- priv->PHYRegDef[RF90_PATH_D].rfRxAFE = rOFDM0_XDRxAFE;
-
- priv->PHYRegDef[RF90_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
- priv->PHYRegDef[RF90_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
- priv->PHYRegDef[RF90_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
- priv->PHYRegDef[RF90_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
-
- priv->PHYRegDef[RF90_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
- priv->PHYRegDef[RF90_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
- priv->PHYRegDef[RF90_PATH_C].rfTxAFE = rOFDM0_XCTxAFE;
- priv->PHYRegDef[RF90_PATH_D].rfTxAFE = rOFDM0_XDTxAFE;
-
- priv->PHYRegDef[RF90_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
- priv->PHYRegDef[RF90_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
- priv->PHYRegDef[RF90_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
- priv->PHYRegDef[RF90_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
+ priv->phy_reg_def[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW;
+ priv->phy_reg_def[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW;
+ priv->phy_reg_def[RF90_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;
+ priv->phy_reg_def[RF90_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;
+
+ priv->phy_reg_def[RF90_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB;
+ priv->phy_reg_def[RF90_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;
+ priv->phy_reg_def[RF90_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;
+ priv->phy_reg_def[RF90_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;
+
+ priv->phy_reg_def[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE;
+ priv->phy_reg_def[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE;
+ priv->phy_reg_def[RF90_PATH_C].rfintfo = rFPGA0_XC_RFInterfaceOE;
+ priv->phy_reg_def[RF90_PATH_D].rfintfo = rFPGA0_XD_RFInterfaceOE;
+
+ priv->phy_reg_def[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE;
+ priv->phy_reg_def[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE;
+ priv->phy_reg_def[RF90_PATH_C].rfintfe = rFPGA0_XC_RFInterfaceOE;
+ priv->phy_reg_def[RF90_PATH_D].rfintfe = rFPGA0_XD_RFInterfaceOE;
+
+ priv->phy_reg_def[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter;
+ priv->phy_reg_def[RF90_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
+ priv->phy_reg_def[RF90_PATH_C].rf3wireOffset = rFPGA0_XC_LSSIParameter;
+ priv->phy_reg_def[RF90_PATH_D].rf3wireOffset = rFPGA0_XD_LSSIParameter;
+
+ priv->phy_reg_def[RF90_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter;
+ priv->phy_reg_def[RF90_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
+ priv->phy_reg_def[RF90_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter;
+ priv->phy_reg_def[RF90_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter;
+
+ priv->phy_reg_def[RF90_PATH_A].rfTxGainStage = rFPGA0_TxGainStage;
+ priv->phy_reg_def[RF90_PATH_B].rfTxGainStage = rFPGA0_TxGainStage;
+ priv->phy_reg_def[RF90_PATH_C].rfTxGainStage = rFPGA0_TxGainStage;
+ priv->phy_reg_def[RF90_PATH_D].rfTxGainStage = rFPGA0_TxGainStage;
+
+ priv->phy_reg_def[RF90_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1;
+ priv->phy_reg_def[RF90_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1;
+ priv->phy_reg_def[RF90_PATH_C].rfHSSIPara1 = rFPGA0_XC_HSSIParameter1;
+ priv->phy_reg_def[RF90_PATH_D].rfHSSIPara1 = rFPGA0_XD_HSSIParameter1;
+
+ priv->phy_reg_def[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2;
+ priv->phy_reg_def[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2;
+ priv->phy_reg_def[RF90_PATH_C].rfHSSIPara2 = rFPGA0_XC_HSSIParameter2;
+ priv->phy_reg_def[RF90_PATH_D].rfHSSIPara2 = rFPGA0_XD_HSSIParameter2;
+
+ priv->phy_reg_def[RF90_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl;
+ priv->phy_reg_def[RF90_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl;
+ priv->phy_reg_def[RF90_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl;
+ priv->phy_reg_def[RF90_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl;
+
+ priv->phy_reg_def[RF90_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
+ priv->phy_reg_def[RF90_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
+ priv->phy_reg_def[RF90_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1;
+ priv->phy_reg_def[RF90_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1;
+
+ priv->phy_reg_def[RF90_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
+ priv->phy_reg_def[RF90_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
+ priv->phy_reg_def[RF90_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2;
+ priv->phy_reg_def[RF90_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2;
+
+ priv->phy_reg_def[RF90_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
+ priv->phy_reg_def[RF90_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
+ priv->phy_reg_def[RF90_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
+ priv->phy_reg_def[RF90_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
+
+ priv->phy_reg_def[RF90_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
+ priv->phy_reg_def[RF90_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
+ priv->phy_reg_def[RF90_PATH_C].rfRxAFE = rOFDM0_XCRxAFE;
+ priv->phy_reg_def[RF90_PATH_D].rfRxAFE = rOFDM0_XDRxAFE;
+
+ priv->phy_reg_def[RF90_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
+ priv->phy_reg_def[RF90_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
+ priv->phy_reg_def[RF90_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
+ priv->phy_reg_def[RF90_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
+
+ priv->phy_reg_def[RF90_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
+ priv->phy_reg_def[RF90_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
+ priv->phy_reg_def[RF90_PATH_C].rfTxAFE = rOFDM0_XCTxAFE;
+ priv->phy_reg_def[RF90_PATH_D].rfTxAFE = rOFDM0_XDTxAFE;
+
+ priv->phy_reg_def[RF90_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
+ priv->phy_reg_def[RF90_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
+ priv->phy_reg_def[RF90_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
+ priv->phy_reg_def[RF90_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
}
@@ -526,25 +526,25 @@ static bool _rtl92e_bb_config_para_file(struct net_device *dev)
return rtStatus;
}
rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0);
- _rtl92e_phy_config_bb(dev, BaseBand_Config_PHY_REG);
+ _rtl92e_phy_config_bb(dev, BB_CONFIG_PHY_REG);
dwRegValue = rtl92e_readl(dev, CPU_GEN);
rtl92e_writel(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST));
- _rtl92e_phy_config_bb(dev, BaseBand_Config_AGC_TAB);
+ _rtl92e_phy_config_bb(dev, BB_CONFIG_AGC_TAB);
- if (priv->IC_Cut > VERSION_8190_BD) {
+ if (priv->ic_cut > VERSION_8190_BD) {
if (priv->rf_type == RF_2T4R)
- dwRegValue = priv->AntennaTxPwDiff[2]<<8 |
- priv->AntennaTxPwDiff[1]<<4 |
- priv->AntennaTxPwDiff[0];
+ dwRegValue = priv->antenna_tx_pwr_diff[2] << 8 |
+ priv->antenna_tx_pwr_diff[1] << 4 |
+ priv->antenna_tx_pwr_diff[0];
else
dwRegValue = 0x0;
rtl92e_set_bb_reg(dev, rFPGA0_TxGainStage,
(bXBTxAGC|bXCTxAGC|bXDTxAGC), dwRegValue);
- dwRegValue = priv->CrystalCap;
+ dwRegValue = priv->crystal_cap;
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, bXtalCap92x,
dwRegValue);
}
@@ -561,65 +561,43 @@ void rtl92e_get_tx_power(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- priv->MCSTxPowerLevelOriginalOffset[0] =
+ priv->mcs_tx_pwr_level_org_offset[0] =
rtl92e_readl(dev, rTxAGC_Rate18_06);
- priv->MCSTxPowerLevelOriginalOffset[1] =
+ priv->mcs_tx_pwr_level_org_offset[1] =
rtl92e_readl(dev, rTxAGC_Rate54_24);
- priv->MCSTxPowerLevelOriginalOffset[2] =
+ priv->mcs_tx_pwr_level_org_offset[2] =
rtl92e_readl(dev, rTxAGC_Mcs03_Mcs00);
- priv->MCSTxPowerLevelOriginalOffset[3] =
+ priv->mcs_tx_pwr_level_org_offset[3] =
rtl92e_readl(dev, rTxAGC_Mcs07_Mcs04);
- priv->MCSTxPowerLevelOriginalOffset[4] =
+ priv->mcs_tx_pwr_level_org_offset[4] =
rtl92e_readl(dev, rTxAGC_Mcs11_Mcs08);
- priv->MCSTxPowerLevelOriginalOffset[5] =
+ priv->mcs_tx_pwr_level_org_offset[5] =
rtl92e_readl(dev, rTxAGC_Mcs15_Mcs12);
- priv->DefaultInitialGain[0] = rtl92e_readb(dev, rOFDM0_XAAGCCore1);
- priv->DefaultInitialGain[1] = rtl92e_readb(dev, rOFDM0_XBAGCCore1);
- priv->DefaultInitialGain[2] = rtl92e_readb(dev, rOFDM0_XCAGCCore1);
- priv->DefaultInitialGain[3] = rtl92e_readb(dev, rOFDM0_XDAGCCore1);
+ priv->def_initial_gain[0] = rtl92e_readb(dev, rOFDM0_XAAGCCore1);
+ priv->def_initial_gain[1] = rtl92e_readb(dev, rOFDM0_XBAGCCore1);
+ priv->def_initial_gain[2] = rtl92e_readb(dev, rOFDM0_XCAGCCore1);
+ priv->def_initial_gain[3] = rtl92e_readb(dev, rOFDM0_XDAGCCore1);
priv->framesync = rtl92e_readb(dev, rOFDM0_RxDetector3);
- priv->framesyncC34 = rtl92e_readl(dev, rOFDM0_RxDetector2);
- priv->SifsTime = rtl92e_readw(dev, SIFS);
}
void rtl92e_set_tx_power(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
u8 powerlevel = 0, powerlevelOFDM24G = 0;
- s8 ant_pwr_diff;
- u32 u4RegValue;
if (priv->epromtype == EEPROM_93C46) {
- powerlevel = priv->TxPowerLevelCCK[channel-1];
- powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1];
+ powerlevel = priv->tx_pwr_level_cck[channel - 1];
+ powerlevelOFDM24G = priv->tx_pwr_level_ofdm_24g[channel - 1];
} else if (priv->epromtype == EEPROM_93C56) {
- if (priv->rf_type == RF_1T2R) {
- powerlevel = priv->TxPowerLevelCCK_C[channel-1];
- powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_C[channel-1];
- } else if (priv->rf_type == RF_2T4R) {
- powerlevel = priv->TxPowerLevelCCK_A[channel-1];
- powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_A[channel-1];
-
- ant_pwr_diff = priv->TxPowerLevelOFDM24G_C[channel-1]
- - priv->TxPowerLevelOFDM24G_A[channel-1];
-
- priv->RF_C_TxPwDiff = ant_pwr_diff;
-
- ant_pwr_diff &= 0xf;
-
- priv->AntennaTxPwDiff[2] = 0;
- priv->AntennaTxPwDiff[1] = (u8)(ant_pwr_diff);
- priv->AntennaTxPwDiff[0] = 0;
-
- u4RegValue = priv->AntennaTxPwDiff[2]<<8 |
- priv->AntennaTxPwDiff[1]<<4 |
- priv->AntennaTxPwDiff[0];
+ if (priv->rf_type == RF_2T4R) {
+ priv->antenna_tx_pwr_diff[2] = 0;
+ priv->antenna_tx_pwr_diff[1] = 0;
+ priv->antenna_tx_pwr_diff[0] = 0;
rtl92e_set_bb_reg(dev, rFPGA0_TxGainStage,
- (bXBTxAGC|bXCTxAGC|bXDTxAGC),
- u4RegValue);
+ (bXBTxAGC | bXCTxAGC | bXDTxAGC), 0);
}
}
switch (priv->rf_chip) {
@@ -726,8 +704,8 @@ u8 rtl92e_config_rf_path(struct net_device *dev, enum rf90_radio_path eRFPath)
static void _rtl92e_set_tx_power_level(struct net_device *dev, u8 channel)
{
struct r8192_priv *priv = rtllib_priv(dev);
- u8 powerlevel = priv->TxPowerLevelCCK[channel-1];
- u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1];
+ u8 powerlevel = priv->tx_pwr_level_cck[channel - 1];
+ u8 powerlevelOFDM24G = priv->tx_pwr_level_ofdm_24g[channel - 1];
switch (priv->rf_chip) {
case RF_8225:
@@ -886,7 +864,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
continue;
switch (CurrentCmd->CmdID) {
case CmdID_SetTxPowerLevel:
- if (priv->IC_Cut > VERSION_8190_BD)
+ if (priv->ic_cut > VERSION_8190_BD)
_rtl92e_set_tx_power_level(dev,
channel);
break;
@@ -904,7 +882,7 @@ static u8 _rtl92e_phy_switch_channel_step(struct net_device *dev, u8 channel,
break;
case CmdID_RF_WriteReg:
for (eRFPath = 0; eRFPath <
- priv->NumTotalRFPath; eRFPath++)
+ priv->num_total_rf_path; eRFPath++)
rtl92e_set_rf_reg(dev,
(enum rf90_radio_path)eRFPath,
CurrentCmd->Para1, bMask12Bits,
@@ -929,8 +907,8 @@ static void _rtl92e_phy_switch_channel(struct net_device *dev, u8 channel)
u32 delay = 0;
while (!_rtl92e_phy_switch_channel_step(dev, channel,
- &priv->SwChnlStage,
- &priv->SwChnlStep, &delay)) {
+ &priv->sw_chnl_stage,
+ &priv->sw_chnl_step, &delay)) {
if (delay > 0)
msleep(delay);
if (!priv->up)
@@ -954,7 +932,7 @@ u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
netdev_err(dev, "%s(): Driver is not initialized\n", __func__);
return false;
}
- if (priv->SwChnlInProgress)
+ if (priv->sw_chnl_in_progress)
return false;
@@ -987,18 +965,18 @@ u8 rtl92e_set_channel(struct net_device *dev, u8 channel)
break;
}
- priv->SwChnlInProgress = true;
+ priv->sw_chnl_in_progress = true;
if (channel == 0)
channel = 1;
priv->chan = channel;
- priv->SwChnlStage = 0;
- priv->SwChnlStep = 0;
+ priv->sw_chnl_stage = 0;
+ priv->sw_chnl_step = 0;
if (priv->up)
_rtl92e_phy_switch_channel_work_item(dev);
- priv->SwChnlInProgress = false;
+ priv->sw_chnl_in_progress = false;
return true;
}
@@ -1006,16 +984,16 @@ static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- switch (priv->CurrentChannelBW) {
+ switch (priv->current_chnl_bw) {
case HT_CHANNEL_WIDTH_20:
priv->cck_present_attn =
- priv->CCKPresentAttentuation_20Mdefault +
- priv->CCKPresentAttentuation_difference;
+ priv->cck_present_attn_20m_def +
+ priv->cck_present_attn_diff;
if (priv->cck_present_attn >
- (CCKTxBBGainTableLength-1))
+ (CCK_TX_BB_GAIN_TABLE_LEN - 1))
priv->cck_present_attn =
- CCKTxBBGainTableLength-1;
+ CCK_TX_BB_GAIN_TABLE_LEN - 1;
if (priv->cck_present_attn < 0)
priv->cck_present_attn = 0;
@@ -1034,13 +1012,13 @@ static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev)
case HT_CHANNEL_WIDTH_20_40:
priv->cck_present_attn =
- priv->CCKPresentAttentuation_40Mdefault +
- priv->CCKPresentAttentuation_difference;
+ priv->cck_present_attn_40m_def +
+ priv->cck_present_attn_diff;
if (priv->cck_present_attn >
- (CCKTxBBGainTableLength - 1))
+ (CCK_TX_BB_GAIN_TABLE_LEN - 1))
priv->cck_present_attn =
- CCKTxBBGainTableLength-1;
+ CCK_TX_BB_GAIN_TABLE_LEN - 1;
if (priv->cck_present_attn < 0)
priv->cck_present_attn = 0;
@@ -1070,15 +1048,15 @@ static void _rtl92e_cck_tx_power_track_bw_switch_thermal(struct net_device *dev)
priv->bcck_in_ch14)
priv->bcck_in_ch14 = false;
- switch (priv->CurrentChannelBW) {
+ switch (priv->current_chnl_bw) {
case HT_CHANNEL_WIDTH_20:
- if (priv->Record_CCK_20Mindex == 0)
- priv->Record_CCK_20Mindex = 6;
- priv->CCK_index = priv->Record_CCK_20Mindex;
+ if (priv->rec_cck_20m_idx == 0)
+ priv->rec_cck_20m_idx = 6;
+ priv->cck_index = priv->rec_cck_20m_idx;
break;
case HT_CHANNEL_WIDTH_20_40:
- priv->CCK_index = priv->Record_CCK_40Mindex;
+ priv->cck_index = priv->rec_cck_40m_idx;
break;
}
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
@@ -1088,7 +1066,7 @@ static void _rtl92e_cck_tx_power_track_bw_switch(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->IC_Cut >= IC_VersionCut_D)
+ if (priv->ic_cut >= IC_VersionCut_D)
_rtl92e_cck_tx_power_track_bw_switch_tssi(dev);
else
_rtl92e_cck_tx_power_track_bw_switch_thermal(dev);
@@ -1101,7 +1079,7 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
u8 regBwOpMode;
if (priv->rf_chip == RF_PSEUDO_11N) {
- priv->SetBWModeInProgress = false;
+ priv->set_bw_mode_in_progress = false;
return;
}
if (!priv->up) {
@@ -1110,7 +1088,7 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
}
regBwOpMode = rtl92e_readb(dev, BW_OPMODE);
- switch (priv->CurrentChannelBW) {
+ switch (priv->current_chnl_bw) {
case HT_CHANNEL_WIDTH_20:
regBwOpMode |= BW_OPMODE_20MHZ;
rtl92e_writeb(dev, BW_OPMODE, regBwOpMode);
@@ -1123,11 +1101,11 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
default:
netdev_err(dev, "%s(): unknown Bandwidth: %#X\n", __func__,
- priv->CurrentChannelBW);
+ priv->current_chnl_bw);
break;
}
- switch (priv->CurrentChannelBW) {
+ switch (priv->current_chnl_bw) {
case HT_CHANNEL_WIDTH_20:
rtl92e_set_bb_reg(dev, rFPGA0_RFMOD, bRFMOD, 0x0);
rtl92e_set_bb_reg(dev, rFPGA1_RFMOD, bRFMOD, 0x0);
@@ -1156,15 +1134,15 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
}
rtl92e_set_bb_reg(dev, rCCK0_System, bCCKSideBand,
- (priv->nCur40MhzPrimeSC>>1));
+ (priv->n_cur_40mhz_prime_sc>>1));
rtl92e_set_bb_reg(dev, rOFDM1_LSTF, 0xC00,
- priv->nCur40MhzPrimeSC);
+ priv->n_cur_40mhz_prime_sc);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0);
break;
default:
netdev_err(dev, "%s(): unknown Bandwidth: %#X\n", __func__,
- priv->CurrentChannelBW);
+ priv->current_chnl_bw);
break;
}
@@ -1174,7 +1152,7 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
break;
case RF_8256:
- rtl92e_set_bandwidth(dev, priv->CurrentChannelBW);
+ rtl92e_set_bandwidth(dev, priv->current_chnl_bw);
break;
case RF_8258:
@@ -1190,7 +1168,7 @@ static void _rtl92e_set_bw_mode_work_item(struct net_device *dev)
}
atomic_dec(&(priv->rtllib->atm_swbw));
- priv->SetBWModeInProgress = false;
+ priv->set_bw_mode_in_progress = false;
}
void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width bandwidth,
@@ -1199,20 +1177,20 @@ void rtl92e_set_bw_mode(struct net_device *dev, enum ht_channel_width bandwidth,
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->SetBWModeInProgress)
+ if (priv->set_bw_mode_in_progress)
return;
atomic_inc(&(priv->rtllib->atm_swbw));
- priv->SetBWModeInProgress = true;
+ priv->set_bw_mode_in_progress = true;
- priv->CurrentChannelBW = bandwidth;
+ priv->current_chnl_bw = bandwidth;
if (Offset == HT_EXTCHNL_OFFSET_LOWER)
- priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_UPPER;
+ priv->n_cur_40mhz_prime_sc = HAL_PRIME_CHNL_OFFSET_UPPER;
else if (Offset == HT_EXTCHNL_OFFSET_UPPER)
- priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_LOWER;
+ priv->n_cur_40mhz_prime_sc = HAL_PRIME_CHNL_OFFSET_LOWER;
else
- priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ priv->n_cur_40mhz_prime_sc = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
_rtl92e_set_bw_mode_work_item(dev);
@@ -1295,7 +1273,7 @@ void rtl92e_set_rf_off(struct net_device *dev)
rtl92e_set_bb_reg(dev, rOFDM1_TRxPathEnable, 0xf, 0x0);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x60, 0x0);
rtl92e_set_bb_reg(dev, rFPGA0_AnalogParameter1, 0x4, 0x0);
- rtl92e_writeb(dev, ANAPAR_FOR_8192PciE, 0x07);
+ rtl92e_writeb(dev, ANAPAR_FOR_8192PCIE, 0x07);
}
@@ -1309,9 +1287,9 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
u8 i = 0, QueueID = 0;
struct rtl8192_tx_ring *ring = NULL;
- if (priv->SetRFPowerStateInProgress)
+ if (priv->set_rf_pwr_state_in_progress)
return false;
- priv->SetRFPowerStateInProgress = true;
+ priv->set_rf_pwr_state_in_progress = true;
switch (priv->rf_chip) {
case RF_8256:
@@ -1331,7 +1309,7 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
netdev_err(dev,
"%s(): Failed to initialize Adapter.\n",
__func__);
- priv->SetRFPowerStateInProgress = false;
+ priv->set_rf_pwr_state_in_progress = false;
return false;
}
@@ -1438,7 +1416,7 @@ static bool _rtl92e_set_rf_power_state(struct net_device *dev,
}
}
- priv->SetRFPowerStateInProgress = false;
+ priv->set_rf_pwr_state_in_progress = false;
return bResult;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
index 433272a2aae8..f846f109ed98 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
@@ -42,8 +42,6 @@
#define CCK_TXAGC 0x348
/* Mac block on/off control register */
-#define MacBlkCtrl 0x403
-
#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC */
#define rFPGA0_TxInfo 0x804
#define rFPGA0_PSDFunction 0x808
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index f8fbe78ccad9..104b16cfa979 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -692,16 +692,13 @@ static int _rtl92e_sta_up(struct net_device *dev, bool is_silent_reset)
priv->rtllib->ieee_up = 1;
priv->up_first_time = 0;
- priv->bfirst_init = true;
init_status = priv->ops->initialize_adapter(dev);
if (!init_status) {
netdev_err(dev, "%s(): Initialization failed!\n", __func__);
- priv->bfirst_init = false;
return -1;
}
RT_CLEAR_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC);
- priv->bfirst_init = false;
if (priv->polling_timer_on == 0)
rtl92e_check_rfctrl_gpio_timer(&priv->gpio_polling_timer);
@@ -837,7 +834,6 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->blinked_ingpio = false;
priv->being_init_adapter = false;
priv->bdisable_nic = false;
- priv->bfirst_init = false;
priv->txringcount = 64;
priv->rxbuffersize = 9100;
priv->rxringcount = MAX_RX_COUNT;
@@ -862,7 +858,7 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->cck_present_attn = 0;
priv->rfa_txpowertrackingindex = 0;
priv->rfc_txpowertrackingindex = 0;
- priv->CckPwEnl = 6;
+ priv->cck_pwr_enl = 6;
priv->rst_progress = RESET_TYPE_NORESET;
priv->force_reset = false;
memset(priv->rtllib->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
@@ -872,7 +868,7 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->rtllib->rf_off_reason = 0;
priv->rf_change_in_progress = false;
priv->hw_rf_off_action = 0;
- priv->SetRFPowerStateInProgress = false;
+ priv->set_rf_pwr_state_in_progress = false;
priv->rtllib->pwr_save_ctrl.bLeisurePs = true;
priv->rtllib->LPSDelayCnt = 0;
priv->rtllib->sta_sleep = LPS_IS_WAKE;
@@ -891,8 +887,8 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->card_type = PCI;
- priv->pFirmware = vzalloc(sizeof(struct rt_firmware));
- if (!priv->pFirmware)
+ priv->fw_info = vzalloc(sizeof(struct rt_firmware));
+ if (!priv->fw_info)
netdev_err(dev,
"rtl8192e: Unable to allocate space for firmware\n");
@@ -952,13 +948,13 @@ static short _rtl92e_get_channel_map(struct net_device *dev)
return -1;
}
- if (priv->ChannelPlan >= COUNTRY_CODE_MAX) {
+ if (priv->chnl_plan >= COUNTRY_CODE_MAX) {
netdev_info(dev,
"rtl819x_init:Error channel plan! Set to default.\n");
- priv->ChannelPlan = COUNTRY_CODE_FCC;
+ priv->chnl_plan = COUNTRY_CODE_FCC;
}
dot11d_init(priv->rtllib);
- dot11d_channel_map(priv->ChannelPlan, priv->rtllib);
+ dot11d_channel_map(priv->chnl_plan, priv->rtllib);
for (i = 1; i <= 11; i++)
(priv->rtllib->active_channel_map)[i] = 1;
(priv->rtllib->active_channel_map)[12] = 2;
@@ -1138,7 +1134,7 @@ static void _rtl92e_if_silent_reset(struct net_device *dev)
goto END;
}
priv->rf_change_in_progress = true;
- priv->bResetInProgress = true;
+ priv->reset_in_progress = true;
spin_unlock_irqrestore(&priv->rf_ps_lock, flag);
RESET_START:
@@ -1229,7 +1225,7 @@ RESET_START:
END:
priv->rst_progress = RESET_TYPE_NORESET;
priv->reset_count++;
- priv->bResetInProgress = false;
+ priv->reset_in_progress = false;
rtl92e_writeb(dev, UFWP, 1);
}
@@ -1397,7 +1393,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
if ((priv->force_reset || ResetType == RESET_TYPE_SILENT))
_rtl92e_if_silent_reset(dev);
priv->force_reset = false;
- priv->bResetInProgress = false;
+ priv->reset_in_progress = false;
}
static void _rtl92e_watchdog_timer_cb(struct timer_list *t)
@@ -1486,7 +1482,7 @@ static void _rtl92e_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
u8 queue_index = tcb_desc->queue_index;
if ((priv->rtllib->rf_power_state == rf_off) || !priv->up ||
- priv->bResetInProgress) {
+ priv->reset_in_progress) {
kfree_skb(skb);
return;
}
@@ -1519,7 +1515,7 @@ static int _rtl92e_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (queue_index != TXCMD_QUEUE) {
if ((priv->rtllib->rf_power_state == rf_off) ||
- !priv->up || priv->bResetInProgress) {
+ !priv->up || priv->reset_in_progress) {
kfree_skb(skb);
return 0;
}
@@ -1620,11 +1616,7 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
type = WLAN_FC_GET_TYPE(fc);
pda_addr = header->addr1;
- if (is_broadcast_ether_addr(pda_addr))
- priv->stats.txbytesbroadcast += skb->len - fwinfo_size;
- else if (is_multicast_ether_addr(pda_addr))
- priv->stats.txbytesmulticast += skb->len - fwinfo_size;
- else
+ if (!is_broadcast_ether_addr(pda_addr) && !is_multicast_ether_addr(pda_addr))
priv->stats.txbytesunicast += skb->len - fwinfo_size;
spin_lock_irqsave(&priv->irq_th_lock, flags);
@@ -1654,7 +1646,7 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
netif_trans_update(dev);
- rtl92e_writew(dev, TPPoll, 0x01 << tcb_desc->queue_index);
+ rtl92e_writew(dev, TP_POLL, 0x01 << tcb_desc->queue_index);
return 0;
}
@@ -1807,9 +1799,9 @@ void rtl92e_update_rx_pkt_timestamp(struct net_device *dev,
struct r8192_priv *priv = rtllib_priv(dev);
if (stats->bIsAMPDU && !stats->bFirstMPDU)
- stats->mac_time = priv->LastRxDescTSF;
+ stats->mac_time = priv->last_rx_desc_tsf;
else
- priv->LastRxDescTSF = stats->mac_time;
+ priv->last_rx_desc_tsf = stats->mac_time;
}
long rtl92e_translate_to_dbm(struct r8192_priv *priv, u8 signal_strength_index)
@@ -1914,7 +1906,7 @@ static void _rtl92e_rx_normal(struct net_device *dev)
skb_put(skb, pdesc->Length);
skb_reserve(skb, stats.RxDrvInfoSize +
stats.RxBufShift);
- skb_trim(skb, skb->len - 4/*sCrcLng*/);
+ skb_trim(skb, skb->len - S_CRC_LEN);
rtllib_hdr = (struct rtllib_hdr_1addr *)skb->data;
if (!is_multicast_ether_addr(rtllib_hdr->addr1)) {
/* unicast packet */
@@ -1930,19 +1922,11 @@ static void _rtl92e_rx_normal(struct net_device *dev)
priv->rtllib->LedControlHandler(dev,
LED_CTL_RX);
- if (stats.bCRC) {
- if (type != RTLLIB_FTYPE_MGMT)
- priv->stats.rxdatacrcerr++;
- else
- priv->stats.rxmgmtcrcerr++;
- }
-
skb_len = skb->len;
if (!rtllib_rx(priv->rtllib, skb, &stats)) {
dev_kfree_skb_any(skb);
} else {
- priv->stats.rxok++;
if (unicast_packet)
priv->stats.rxbytesunicast += skb_len;
}
@@ -2135,7 +2119,6 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
spin_lock_irqsave(&priv->irq_th_lock, flags);
priv->ops->interrupt_recognized(dev, &inta, &intb);
- priv->stats.shints++;
if (!inta) {
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
@@ -2147,21 +2130,12 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
goto done;
}
- priv->stats.ints++;
-
if (!netif_running(dev)) {
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
goto done;
}
- if (inta & IMR_TBDOK)
- priv->stats.txbeaconokint++;
-
- if (inta & IMR_TBDER)
- priv->stats.txbeaconerr++;
-
if (inta & IMR_MGNTDOK) {
- priv->stats.txmanageokint++;
_rtl92e_tx_isr(dev, MGNT_QUEUE);
spin_unlock_irqrestore(&priv->irq_th_lock, flags);
if (priv->rtllib->ack_tx_to_ieee) {
@@ -2173,57 +2147,43 @@ static irqreturn_t _rtl92e_irq(int irq, void *netdev)
spin_lock_irqsave(&priv->irq_th_lock, flags);
}
- if (inta & IMR_COMDOK) {
- priv->stats.txcmdpktokint++;
+ if (inta & IMR_COMDOK)
_rtl92e_tx_isr(dev, TXCMD_QUEUE);
- }
if (inta & IMR_HIGHDOK)
_rtl92e_tx_isr(dev, HIGH_QUEUE);
- if (inta & IMR_ROK) {
- priv->stats.rxint++;
+ if (inta & IMR_ROK)
tasklet_schedule(&priv->irq_rx_tasklet);
- }
if (inta & IMR_BcnInt)
tasklet_schedule(&priv->irq_prepare_beacon_tasklet);
if (inta & IMR_RDU) {
- priv->stats.rxrdu++;
rtl92e_writel(dev, INTA_MASK,
rtl92e_readl(dev, INTA_MASK) & ~IMR_RDU);
tasklet_schedule(&priv->irq_rx_tasklet);
}
- if (inta & IMR_RXFOVW) {
- priv->stats.rxoverflow++;
+ if (inta & IMR_RXFOVW)
tasklet_schedule(&priv->irq_rx_tasklet);
- }
-
- if (inta & IMR_TXFOVW)
- priv->stats.txoverflow++;
if (inta & IMR_BKDOK) {
- priv->stats.txbkokint++;
priv->rtllib->link_detect_info.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, BK_QUEUE);
}
if (inta & IMR_BEDOK) {
- priv->stats.txbeokint++;
priv->rtllib->link_detect_info.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, BE_QUEUE);
}
if (inta & IMR_VIDOK) {
- priv->stats.txviokint++;
priv->rtllib->link_detect_info.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, VI_QUEUE);
}
if (inta & IMR_VODOK) {
- priv->stats.txvookint++;
priv->rtllib->link_detect_info.NumTxOkInPeriod++;
_rtl92e_tx_isr(dev, VO_QUEUE);
}
@@ -2386,8 +2346,8 @@ static void _rtl92e_pci_disconnect(struct pci_dev *pdev)
priv->polling_timer_on = 0;
_rtl92e_down(dev, true);
rtl92e_dm_deinit(dev);
- vfree(priv->pFirmware);
- priv->pFirmware = NULL;
+ vfree(priv->fw_info);
+ priv->fw_info = NULL;
_rtl92e_free_rx_ring(dev);
for (i = 0; i < MAX_TX_QUEUE_COUNT; i++)
_rtl92e_free_tx_ring(dev, i);
@@ -2423,7 +2383,6 @@ bool rtl92e_enable_nic(struct net_device *dev)
return false;
}
- priv->bfirst_init = true;
init_status = priv->ops->initialize_adapter(dev);
if (!init_status) {
netdev_warn(dev, "%s(): Initialization failed!\n", __func__);
@@ -2431,7 +2390,6 @@ bool rtl92e_enable_nic(struct net_device *dev)
return false;
}
RT_CLEAR_PS_LEVEL(psc, RT_RF_OFF_LEVL_HALT_NIC);
- priv->bfirst_init = false;
rtl92e_irq_enable(dev);
priv->bdisable_nic = false;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index cceb77492363..fd96eef90c7f 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -90,11 +90,11 @@
#define PHY_RSSI_SLID_WIN_MAX 100
-#define TxBBGainTableLength 37
-#define CCKTxBBGainTableLength 23
+#define TX_BB_GAIN_TABLE_LEN 37
+#define CCK_TX_BB_GAIN_TABLE_LEN 23
#define CHANNEL_PLAN_LEN 10
-#define sCrcLng 4
+#define S_CRC_LEN 4
#define NIC_SEND_HANG_THRESHOLD_NORMAL 4
#define NIC_SEND_HANG_THRESHOLD_POWERSAVE 8
@@ -145,35 +145,13 @@ enum rt_rf_type_819xu {
enum rt_customer_id {
RT_CID_DEFAULT = 0,
- RT_CID_8187_ALPHA0 = 1,
- RT_CID_8187_SERCOMM_PS = 2,
- RT_CID_8187_HW_LED = 3,
- RT_CID_8187_NETGEAR = 4,
- RT_CID_WHQL = 5,
RT_CID_819x_CAMEO = 6,
RT_CID_819x_RUNTOP = 7,
- RT_CID_819x_Senao = 8,
RT_CID_TOSHIBA = 9,
- RT_CID_819x_Netcore = 10,
+ RT_CID_819X_NETCORE = 10,
RT_CID_Nettronix = 11,
RT_CID_DLINK = 12,
RT_CID_PRONET = 13,
- RT_CID_COREGA = 14,
- RT_CID_819x_ALPHA = 15,
- RT_CID_819x_Sitecom = 16,
- RT_CID_CCX = 17,
- RT_CID_819x_Lenovo = 18,
- RT_CID_819x_QMI = 19,
- RT_CID_819x_Edimax_Belkin = 20,
- RT_CID_819x_Sercomm_Belkin = 21,
- RT_CID_819x_CAMEO1 = 22,
- RT_CID_819x_MSI = 23,
- RT_CID_819x_Acer = 24,
- RT_CID_819x_HP = 27,
- RT_CID_819x_CLEVO = 28,
- RT_CID_819x_Arcadyan_Belkin = 29,
- RT_CID_819x_SAMSUNG = 30,
- RT_CID_819x_WNC_COREGA = 31,
};
enum reset_type {
@@ -183,37 +161,7 @@ enum reset_type {
};
struct rt_stats {
- unsigned long rxrdu;
- unsigned long rxok;
- unsigned long rxdatacrcerr;
- unsigned long rxmgmtcrcerr;
- unsigned long rxcrcerrmin;
- unsigned long rxcrcerrmid;
- unsigned long rxcrcerrmax;
unsigned long received_rate_histogram[4][32];
- unsigned long received_preamble_GI[2][32];
- unsigned long numpacket_matchbssid;
- unsigned long numpacket_toself;
- unsigned long num_process_phyinfo;
- unsigned long numqry_phystatus;
- unsigned long numqry_phystatusCCK;
- unsigned long numqry_phystatusHT;
- unsigned long received_bwtype[5];
- unsigned long rxoverflow;
- unsigned long rxint;
- unsigned long ints;
- unsigned long shints;
- unsigned long txoverflow;
- unsigned long txbeokint;
- unsigned long txbkokint;
- unsigned long txviokint;
- unsigned long txvookint;
- unsigned long txbeaconokint;
- unsigned long txbeaconerr;
- unsigned long txmanageokint;
- unsigned long txcmdpktokint;
- unsigned long txbytesmulticast;
- unsigned long txbytesbroadcast;
unsigned long txbytesunicast;
unsigned long rxbytesunicast;
unsigned long txretrycount;
@@ -223,14 +171,12 @@ struct rt_stats {
unsigned long slide_rssi_total;
unsigned long slide_evm_total;
long signal_strength;
- long signal_quality;
long last_signal_strength_inpercent;
long recv_signal_power;
u8 rx_rssi_percentage[4];
u8 rx_evm_percentage[2];
- long rxSNRdB[4];
- u32 Slide_Beacon_pwdb[100];
- u32 Slide_Beacon_Total;
+ u32 slide_beacon_pwdb[100];
+ u32 slide_beacon_total;
u32 CurrentShowTxate;
};
@@ -257,8 +203,6 @@ struct rtl8192_tx_ring {
struct sk_buff_head queue;
};
-
-
struct rtl819x_ops {
enum nic_t nic_type;
void (*get_eeprom_size)(struct net_device *dev);
@@ -298,7 +242,6 @@ struct r8192_priv {
struct pci_dev *pdev;
struct pci_dev *bridge_pdev;
- bool bfirst_init;
bool bfirst_after_down;
bool being_init_adapter;
@@ -317,16 +260,15 @@ struct r8192_priv {
struct work_struct reset_wq;
- enum rt_customer_id CustomerID;
-
+ enum rt_customer_id customer_id;
enum rt_rf_type_819xu rf_chip;
- enum ht_channel_width CurrentChannelBW;
- struct bb_reg_definition PHYRegDef[4];
+ enum ht_channel_width current_chnl_bw;
+ struct bb_reg_definition phy_reg_def[4];
struct rate_adaptive rate_adaptive;
- struct rt_firmware *pFirmware;
- enum rtl819x_loopback LoopbackMode;
+ struct rt_firmware *fw_info;
+ enum rtl819x_loopback loopback_mode;
struct timer_list watch_dog_timer;
struct timer_list fsync_timer;
@@ -360,9 +302,9 @@ struct r8192_priv {
int rxringcount;
u16 rxbuffersize;
- u64 LastRxDescTSF;
+ u64 last_rx_desc_tsf;
- u32 ReceiveConfig;
+ u32 receive_config;
u8 retry_data;
u8 retry_rts;
u16 rts;
@@ -371,8 +313,8 @@ struct r8192_priv {
int txringcount;
atomic_t tx_pending[0x10];
- u16 ShortRetryLimit;
- u16 LongRetryLimit;
+ u16 short_retry_limit;
+ u16 long_retry_limit;
bool hw_radio_off;
bool blinked_ingpio;
@@ -396,94 +338,82 @@ struct r8192_priv {
u32 irq_mask[2];
- u8 Rf_Mode;
+ u8 rf_mode;
enum nic_t card_8192;
u8 card_8192_version;
u8 rf_type;
- u8 IC_Cut;
+ u8 ic_cut;
char nick[IW_ESSID_MAX_SIZE + 1];
u8 check_roaming_cnt;
- u32 SilentResetRxSlotIndex;
- u32 SilentResetRxStuckEvent[MAX_SILENT_RESET_RX_SLOT_NUM];
+ u32 silent_reset_rx_slot_index;
+ u32 silent_reset_rx_stuck_event[MAX_SILENT_RESET_RX_SLOT_NUM];
u16 basic_rate;
u8 short_preamble;
u8 dot11_current_preamble_mode;
u8 slot_time;
- u16 SifsTime;
- bool AutoloadFailFlag;
+ bool autoload_fail_flag;
short epromtype;
u16 eeprom_vid;
u16 eeprom_did;
- u8 eeprom_CustomerID;
- u16 eeprom_ChannelPlan;
+ u8 eeprom_customer_id;
+ u16 eeprom_chnl_plan;
- u8 EEPROMTxPowerLevelCCK[14];
- u8 EEPROMTxPowerLevelOFDM24G[14];
- u8 EEPROMRfACCKChnl1TxPwLevel[3];
- u8 EEPROMRfAOfdmChnlTxPwLevel[3];
- u8 EEPROMRfCCCKChnl1TxPwLevel[3];
- u8 EEPROMRfCOfdmChnlTxPwLevel[3];
- u16 EEPROMAntPwDiff;
- u8 EEPROMThermalMeter;
- u8 EEPROMCrystalCap;
+ u8 eeprom_tx_pwr_level_cck[14];
+ u8 eeprom_tx_pwr_level_ofdm24g[14];
+ u16 eeprom_ant_pwr_diff;
+ u8 eeprom_thermal_meter;
+ u8 eeprom_crystal_cap;
- u8 EEPROMLegacyHTTxPowerDiff;
+ u8 eeprom_legacy_ht_tx_pwr_diff;
- u8 CrystalCap;
- u8 ThermalMeter[2];
+ u8 crystal_cap;
+ u8 thermal_meter[2];
- u8 SwChnlInProgress;
- u8 SwChnlStage;
- u8 SwChnlStep;
- u8 SetBWModeInProgress;
+ u8 sw_chnl_in_progress;
+ u8 sw_chnl_stage;
+ u8 sw_chnl_step;
+ u8 set_bw_mode_in_progress;
- u8 nCur40MhzPrimeSC;
+ u8 n_cur_40mhz_prime_sc;
- u32 RfReg0Value[4];
- u8 NumTotalRFPath;
+ u32 rf_reg_0value[4];
+ u8 num_total_rf_path;
bool brfpath_rxenable[4];
- bool bTXPowerDataReadFromEEPORM;
+ bool tx_pwr_data_read_from_eeprom;
u16 reg_chnl_plan;
- u16 ChannelPlan;
+ u16 chnl_plan;
u8 hw_rf_off_action;
bool rf_change_in_progress;
- bool SetRFPowerStateInProgress;
+ bool set_rf_pwr_state_in_progress;
bool bdisable_nic;
- u8 DM_Type;
-
- u8 CckPwEnl;
- u16 TSSI_13dBm;
- u32 Pwr_Track;
- u8 CCKPresentAttentuation_20Mdefault;
- u8 CCKPresentAttentuation_40Mdefault;
- s8 CCKPresentAttentuation_difference;
+ u8 cck_pwr_enl;
+ u16 tssi_13dBm;
+ u32 pwr_track;
+ u8 cck_present_attn_20m_def;
+ u8 cck_present_attn_40m_def;
+ s8 cck_present_attn_diff;
s8 cck_present_attn;
long undecorated_smoothed_pwdb;
- u32 MCSTxPowerLevelOriginalOffset[6];
- u8 TxPowerLevelCCK[14];
- u8 TxPowerLevelCCK_A[14];
- u8 TxPowerLevelCCK_C[14];
- u8 TxPowerLevelOFDM24G[14];
- u8 TxPowerLevelOFDM24G_A[14];
- u8 TxPowerLevelOFDM24G_C[14];
- u8 LegacyHTTxPowerDiff;
- s8 RF_C_TxPwDiff;
- u8 AntennaTxPwDiff[3];
-
- bool bDynamicTxHighPower;
- bool bDynamicTxLowPower;
- bool bLastDTPFlag_High;
- bool bLastDTPFlag_Low;
+ u32 mcs_tx_pwr_level_org_offset[6];
+ u8 tx_pwr_level_cck[14];
+ u8 tx_pwr_level_ofdm_24g[14];
+ u8 legacy_ht_tx_pwr_diff;
+ u8 antenna_tx_pwr_diff[3];
+
+ bool dynamic_tx_high_pwr;
+ bool dynamic_tx_low_pwr;
+ bool last_dtp_flag_high;
+ bool last_dtp_flag_low;
u8 rfa_txpowertrackingindex;
u8 rfa_txpowertrackingindex_real;
@@ -494,42 +424,38 @@ struct r8192_priv {
bool bcck_in_ch14;
u8 txpower_count;
- bool btxpower_trackingInit;
+ bool tx_pwr_tracking_init;
- u8 OFDM_index[2];
- u8 CCK_index;
+ u8 ofdm_index[2];
+ u8 cck_index;
- u8 Record_CCK_20Mindex;
- u8 Record_CCK_40Mindex;
+ u8 rec_cck_20m_idx;
+ u8 rec_cck_40m_idx;
struct init_gain initgain_backup;
- u8 DefaultInitialGain[4];
+ u8 def_initial_gain[4];
bool bis_any_nonbepkts;
bool bcurrent_turbo_EDCA;
bool bis_cur_rdlstate;
bool bfsync_processing;
u32 rate_record;
- u32 rateCountDiffRecord;
- u32 ContinueDiffCount;
+ u32 rate_count_diff_rec;
+ u32 continue_diff_count;
bool bswitch_fsync;
u8 framesync;
- u32 framesyncC34;
- u8 framesyncMonitor;
+ u8 frame_sync_monitor;
u32 reset_count;
enum reset_type rst_progress;
- u16 TxCounter;
+ u16 tx_counter;
u16 rx_ctr;
- bool bResetInProgress;
+ bool reset_in_progress;
bool force_reset;
bool force_lps;
bool chan_forced;
-
- u8 PwrDomainProtect;
- u8 H2CTxCmdSeq;
};
extern const struct ethtool_ops rtl819x_ethtool_ops;
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index a18393c8a833..d8455b23e555 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -46,7 +46,7 @@ static u32 edca_setting_UL[HT_IOT_PEER_MAX] = {
0x5e4332
};
-const u32 dm_tx_bb_gain[TxBBGainTableLength] = {
+const u32 dm_tx_bb_gain[TX_BB_GAIN_TABLE_LEN] = {
0x7f8001fe, /* 12 dB */
0x788001e2, /* 11 dB */
0x71c001c7,
@@ -86,7 +86,7 @@ const u32 dm_tx_bb_gain[TxBBGainTableLength] = {
0x10000040, /* -24 dB */
};
-const u8 dm_cck_tx_bb_gain[CCKTxBBGainTableLength][8] = {
+const u8 dm_cck_tx_bb_gain[CCK_TX_BB_GAIN_TABLE_LEN][8] = {
{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
{0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
@@ -112,7 +112,7 @@ const u8 dm_cck_tx_bb_gain[CCKTxBBGainTableLength][8] = {
{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}
};
-const u8 dm_cck_tx_bb_gain_ch14[CCKTxBBGainTableLength][8] = {
+const u8 dm_cck_tx_bb_gain_ch14[CCK_TX_BB_GAIN_TABLE_LEN][8] = {
{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
{0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
@@ -144,7 +144,7 @@ const u8 dm_cck_tx_bb_gain_ch14[CCKTxBBGainTableLength][8] = {
/*------------------------Define global variable-----------------------------*/
struct dig_t dm_digtable;
-struct drx_path_sel DM_RxPathSelTable;
+struct drx_path_sel dm_rx_path_sel_table;
/*------------------------Define global variable-----------------------------*/
@@ -203,8 +203,6 @@ void rtl92e_dm_init(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- priv->DM_Type = DM_Type_ByDriver;
-
priv->undecorated_smoothed_pwdb = -1;
_rtl92e_dm_init_dynamic_tx_power(dev);
@@ -284,15 +282,15 @@ void rtl92e_init_adaptive_rate(struct net_device *dev)
struct rate_adaptive *pra = &priv->rate_adaptive;
pra->ratr_state = DM_RATR_STA_MAX;
- pra->high2low_rssi_thresh_for_ra = RateAdaptiveTH_High;
- pra->low2high_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M+5;
- pra->low2high_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M+5;
+ pra->high2low_rssi_thresh_for_ra = RATE_ADAPTIVE_TH_HIGH;
+ pra->low2high_rssi_thresh_for_ra20M = RATE_ADAPTIVE_TH_LOW_20M + 5;
+ pra->low2high_rssi_thresh_for_ra40M = RATE_ADAPTIVE_TH_LOW_40M + 5;
- pra->high_rssi_thresh_for_ra = RateAdaptiveTH_High+5;
- pra->low_rssi_thresh_for_ra20M = RateAdaptiveTH_Low_20M;
- pra->low_rssi_thresh_for_ra40M = RateAdaptiveTH_Low_40M;
+ pra->high_rssi_thresh_for_ra = RATE_ADAPTIVE_TH_HIGH + 5;
+ pra->low_rssi_thresh_for_ra20M = RATE_ADAPTIVE_TH_LOW_20M;
+ pra->low_rssi_thresh_for_ra40M = RATE_ADAPTIVE_TH_LOW_40M;
- if (priv->CustomerID == RT_CID_819x_Netcore)
+ if (priv->customer_id == RT_CID_819X_NETCORE)
pra->ping_rssi_enable = 1;
else
pra->ping_rssi_enable = 0;
@@ -353,7 +351,7 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
(pra->middle_rssi_threshold_ratr & (~BIT31)) |
((bshort_gi_enabled) ? BIT31 : 0);
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) {
+ if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) {
pra->low_rssi_threshold_ratr =
(pra->low_rssi_threshold_ratr_40M & (~BIT31)) |
((bshort_gi_enabled) ? BIT31 : 0);
@@ -368,15 +366,15 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
if (pra->ratr_state == DM_RATR_STA_HIGH) {
HighRSSIThreshForRA = pra->high2low_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
+ LowRSSIThreshForRA = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
(pra->low_rssi_thresh_for_ra40M) : (pra->low_rssi_thresh_for_ra20M);
} else if (pra->ratr_state == DM_RATR_STA_LOW) {
HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
+ LowRSSIThreshForRA = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
(pra->low2high_rssi_thresh_for_ra40M) : (pra->low2high_rssi_thresh_for_ra20M);
} else {
HighRSSIThreshForRA = pra->high_rssi_thresh_for_ra;
- LowRSSIThreshForRA = (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20) ?
+ LowRSSIThreshForRA = (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20) ?
(pra->low_rssi_thresh_for_ra40M) : (pra->low_rssi_thresh_for_ra20M);
}
@@ -443,8 +441,8 @@ static void _rtl92e_dm_bandwidth_autoswitch(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20 ||
- !priv->rtllib->bandwidth_auto_switch.bautoswitch_enable)
+ if (priv->current_chnl_bw == HT_CHANNEL_WIDTH_20 ||
+ !priv->rtllib->bandwidth_auto_switch.bautoswitch_enable)
return;
if (!priv->rtllib->bandwidth_auto_switch.bforced_tx20Mhz) {
if (priv->undecorated_smoothed_pwdb <=
@@ -457,7 +455,7 @@ static void _rtl92e_dm_bandwidth_autoswitch(struct net_device *dev)
}
}
-static u32 OFDMSwingTable[OFDM_Table_Length] = {
+static u32 OFDMSwingTable[OFDM_TABLE_LEN] = {
0x7f8001fe,
0x71c001c7,
0x65400195,
@@ -479,7 +477,7 @@ static u32 OFDMSwingTable[OFDM_Table_Length] = {
0x10000040
};
-static u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = {
+static u8 CCKSwingTable_Ch1_Ch13[CCK_TABLE_LEN][8] = {
{0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
{0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
{0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
@@ -494,7 +492,7 @@ static u8 CCKSwingTable_Ch1_Ch13[CCK_Table_length][8] = {
{0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}
};
-static u8 CCKSwingTable_Ch14[CCK_Table_length][8] = {
+static u8 CCKSwingTable_Ch14[CCK_TABLE_LEN][8] = {
{0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
{0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
{0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
@@ -570,8 +568,8 @@ static void _rtl92e_dm_tx_update_tssi_strong_signal(struct net_device *dev,
struct r8192_priv *p = rtllib_priv(dev);
if (RF_Type == RF_2T4R) {
- if ((p->rfa_txpowertrackingindex < TxBBGainTableLength - 1) &&
- (p->rfc_txpowertrackingindex < TxBBGainTableLength - 1)) {
+ if ((p->rfa_txpowertrackingindex < TX_BB_GAIN_TABLE_LEN - 1) &&
+ (p->rfc_txpowertrackingindex < TX_BB_GAIN_TABLE_LEN - 1)) {
p->rfa_txpowertrackingindex++;
p->rfa_txpowertrackingindex_real++;
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
@@ -585,13 +583,13 @@ static void _rtl92e_dm_tx_update_tssi_strong_signal(struct net_device *dev,
} else {
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
bMaskDWord,
- dm_tx_bb_gain[TxBBGainTableLength - 1]);
+ dm_tx_bb_gain[TX_BB_GAIN_TABLE_LEN - 1]);
rtl92e_set_bb_reg(dev, rOFDM0_XCTxIQImbalance,
bMaskDWord,
- dm_tx_bb_gain[TxBBGainTableLength - 1]);
+ dm_tx_bb_gain[TX_BB_GAIN_TABLE_LEN - 1]);
}
} else {
- if (p->rfa_txpowertrackingindex < (TxBBGainTableLength - 1)) {
+ if (p->rfa_txpowertrackingindex < (TX_BB_GAIN_TABLE_LEN - 1)) {
p->rfa_txpowertrackingindex++;
p->rfa_txpowertrackingindex_real++;
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
@@ -600,7 +598,7 @@ static void _rtl92e_dm_tx_update_tssi_strong_signal(struct net_device *dev,
} else {
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance,
bMaskDWord,
- dm_tx_bb_gain[TxBBGainTableLength - 1]);
+ dm_tx_bb_gain[TX_BB_GAIN_TABLE_LEN - 1]);
}
}
}
@@ -615,14 +613,14 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
u8 RF_Type, tmp_report[5] = {0, 0, 0, 0, 0};
u32 Value;
u8 Pwr_Flag;
- u16 Avg_TSSI_Meas, TSSI_13dBm, Avg_TSSI_Meas_from_driver = 0;
+ u16 Avg_TSSI_Meas, tssi_13dBm, Avg_TSSI_Meas_from_driver = 0;
u32 delta = 0;
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
priv->rtllib->bdynamic_txpower_enable = false;
- powerlevelOFDM24G = priv->Pwr_Track >> 24;
+ powerlevelOFDM24G = priv->pwr_track >> 24;
RF_Type = priv->rf_type;
Value = (RF_Type<<8) | powerlevelOFDM24G;
@@ -640,7 +638,7 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
if (Pwr_Flag == 0) {
mdelay(1);
- if (priv->bResetInProgress) {
+ if (priv->reset_in_progress) {
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
@@ -688,12 +686,12 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
Avg_TSSI_Meas_from_driver += tmp_report[k];
Avg_TSSI_Meas_from_driver *= 100 / 5;
- TSSI_13dBm = priv->TSSI_13dBm;
+ tssi_13dBm = priv->tssi_13dBm;
- if (Avg_TSSI_Meas_from_driver > TSSI_13dBm)
- delta = Avg_TSSI_Meas_from_driver - TSSI_13dBm;
+ if (Avg_TSSI_Meas_from_driver > tssi_13dBm)
+ delta = Avg_TSSI_Meas_from_driver - tssi_13dBm;
else
- delta = TSSI_13dBm - Avg_TSSI_Meas_from_driver;
+ delta = tssi_13dBm - Avg_TSSI_Meas_from_driver;
if (delta <= E_FOR_TX_POWER_TRACK) {
priv->rtllib->bdynamic_txpower_enable = true;
@@ -701,36 +699,36 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
rtl92e_writeb(dev, FW_Busy_Flag, 0);
return;
}
- if (Avg_TSSI_Meas_from_driver < TSSI_13dBm - E_FOR_TX_POWER_TRACK)
+ if (Avg_TSSI_Meas_from_driver < tssi_13dBm - E_FOR_TX_POWER_TRACK)
_rtl92e_dm_tx_update_tssi_weak_signal(dev,
RF_Type);
else
_rtl92e_dm_tx_update_tssi_strong_signal(dev, RF_Type);
if (RF_Type == RF_2T4R) {
- priv->CCKPresentAttentuation_difference
+ priv->cck_present_attn_diff
= priv->rfa_txpowertrackingindex - priv->rfa_txpowertracking_default;
} else {
- priv->CCKPresentAttentuation_difference
+ priv->cck_present_attn_diff
= priv->rfa_txpowertrackingindex_real - priv->rfa_txpowertracking_default;
}
- if (priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ if (priv->current_chnl_bw == HT_CHANNEL_WIDTH_20)
priv->cck_present_attn =
- priv->CCKPresentAttentuation_20Mdefault +
- priv->CCKPresentAttentuation_difference;
+ priv->cck_present_attn_20m_def +
+ priv->cck_present_attn_diff;
else
priv->cck_present_attn =
- priv->CCKPresentAttentuation_40Mdefault +
- priv->CCKPresentAttentuation_difference;
+ priv->cck_present_attn_40m_def +
+ priv->cck_present_attn_diff;
- if (priv->cck_present_attn > (CCKTxBBGainTableLength-1))
- priv->cck_present_attn = CCKTxBBGainTableLength-1;
+ if (priv->cck_present_attn > (CCK_TX_BB_GAIN_TABLE_LEN - 1))
+ priv->cck_present_attn = CCK_TX_BB_GAIN_TABLE_LEN - 1;
if (priv->cck_present_attn < 0)
priv->cck_present_attn = 0;
if (priv->cck_present_attn > -1 &&
- priv->cck_present_attn < CCKTxBBGainTableLength) {
+ priv->cck_present_attn < CCK_TX_BB_GAIN_TABLE_LEN) {
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
priv->bcck_in_ch14 = true;
@@ -742,8 +740,8 @@ static void _rtl92e_dm_tx_power_tracking_callback_tssi(struct net_device *dev)
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
}
- if (priv->CCKPresentAttentuation_difference <= -12 ||
- priv->CCKPresentAttentuation_difference >= 24) {
+ if (priv->cck_present_attn_diff <= -12 ||
+ priv->cck_present_attn_diff >= 24) {
priv->rtllib->bdynamic_txpower_enable = true;
rtl92e_writeb(dev, Pw_Track_Flag, 0);
rtl92e_writeb(dev, FW_Busy_Flag, 0);
@@ -770,22 +768,22 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
u8 tmpOFDMindex, tmpCCKindex, tmpCCK20Mindex, tmpCCK40Mindex, tmpval;
int i = 0, CCKSwingNeedUpdate = 0;
- if (!priv->btxpower_trackingInit) {
+ if (!priv->tx_pwr_tracking_init) {
tmpRegA = rtl92e_get_bb_reg(dev, rOFDM0_XATxIQImbalance,
bMaskDWord);
- for (i = 0; i < OFDM_Table_Length; i++) {
+ for (i = 0; i < OFDM_TABLE_LEN; i++) {
if (tmpRegA == OFDMSwingTable[i])
- priv->OFDM_index[0] = i;
+ priv->ofdm_index[0] = i;
}
TempCCk = rtl92e_get_bb_reg(dev, rCCK0_TxFilter1, bMaskByte2);
- for (i = 0; i < CCK_Table_length; i++) {
+ for (i = 0; i < CCK_TABLE_LEN; i++) {
if (TempCCk == (u32)CCKSwingTable_Ch1_Ch13[i][0]) {
- priv->CCK_index = i;
+ priv->cck_index = i;
break;
}
}
- priv->btxpower_trackingInit = true;
+ priv->tx_pwr_tracking_init = true;
return;
}
@@ -794,21 +792,21 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
return;
if (tmpRegA >= 12)
tmpRegA = 12;
- priv->ThermalMeter[0] = ThermalMeterVal;
- priv->ThermalMeter[1] = ThermalMeterVal;
+ priv->thermal_meter[0] = ThermalMeterVal;
+ priv->thermal_meter[1] = ThermalMeterVal;
- if (priv->ThermalMeter[0] >= (u8)tmpRegA) {
- tmpOFDMindex = tmpCCK20Mindex = 6+(priv->ThermalMeter[0] -
+ if (priv->thermal_meter[0] >= (u8)tmpRegA) {
+ tmpOFDMindex = tmpCCK20Mindex = 6+(priv->thermal_meter[0] -
(u8)tmpRegA);
tmpCCK40Mindex = tmpCCK20Mindex - 6;
- if (tmpOFDMindex >= OFDM_Table_Length)
- tmpOFDMindex = OFDM_Table_Length-1;
- if (tmpCCK20Mindex >= CCK_Table_length)
- tmpCCK20Mindex = CCK_Table_length-1;
- if (tmpCCK40Mindex >= CCK_Table_length)
- tmpCCK40Mindex = CCK_Table_length-1;
+ if (tmpOFDMindex >= OFDM_TABLE_LEN)
+ tmpOFDMindex = OFDM_TABLE_LEN - 1;
+ if (tmpCCK20Mindex >= CCK_TABLE_LEN)
+ tmpCCK20Mindex = CCK_TABLE_LEN - 1;
+ if (tmpCCK40Mindex >= CCK_TABLE_LEN)
+ tmpCCK40Mindex = CCK_TABLE_LEN - 1;
} else {
- tmpval = (u8)tmpRegA - priv->ThermalMeter[0];
+ tmpval = (u8)tmpRegA - priv->thermal_meter[0];
if (tmpval >= 6) {
tmpOFDMindex = 0;
tmpCCK20Mindex = 0;
@@ -818,13 +816,13 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
}
tmpCCK40Mindex = 0;
}
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
tmpCCKindex = tmpCCK40Mindex;
else
tmpCCKindex = tmpCCK20Mindex;
- priv->Record_CCK_20Mindex = tmpCCK20Mindex;
- priv->Record_CCK_40Mindex = tmpCCK40Mindex;
+ priv->rec_cck_20m_idx = tmpCCK20Mindex;
+ priv->rec_cck_40m_idx = tmpCCK40Mindex;
if (priv->rtllib->current_network.channel == 14 &&
!priv->bcck_in_ch14) {
@@ -836,17 +834,17 @@ static void _rtl92e_dm_tx_power_tracking_cb_thermal(struct net_device *dev)
CCKSwingNeedUpdate = 1;
}
- if (priv->CCK_index != tmpCCKindex) {
- priv->CCK_index = tmpCCKindex;
+ if (priv->cck_index != tmpCCKindex) {
+ priv->cck_index = tmpCCKindex;
CCKSwingNeedUpdate = 1;
}
if (CCKSwingNeedUpdate)
rtl92e_dm_cck_txpower_adjust(dev, priv->bcck_in_ch14);
- if (priv->OFDM_index[0] != tmpOFDMindex) {
- priv->OFDM_index[0] = tmpOFDMindex;
+ if (priv->ofdm_index[0] != tmpOFDMindex) {
+ priv->ofdm_index[0] = tmpOFDMindex;
rtl92e_set_bb_reg(dev, rOFDM0_XATxIQImbalance, bMaskDWord,
- OFDMSwingTable[priv->OFDM_index[0]]);
+ OFDMSwingTable[priv->ofdm_index[0]]);
}
priv->txpower_count = 0;
}
@@ -857,7 +855,7 @@ void rtl92e_dm_txpower_tracking_wq(void *data)
struct r8192_priv, txpower_tracking_wq);
struct net_device *dev = priv->rtllib->dev;
- if (priv->IC_Cut >= IC_VersionCut_D)
+ if (priv->ic_cut >= IC_VersionCut_D)
_rtl92e_dm_tx_power_tracking_callback_tssi(dev);
else
_rtl92e_dm_tx_power_tracking_cb_thermal(dev);
@@ -870,7 +868,7 @@ static void _rtl92e_dm_initialize_tx_power_tracking_tssi(struct net_device *dev)
priv->btxpower_tracking = true;
priv->txpower_count = 0;
- priv->btxpower_trackingInit = false;
+ priv->tx_pwr_tracking_init = false;
}
@@ -884,14 +882,14 @@ static void _rtl92e_dm_init_tx_power_tracking_thermal(struct net_device *dev)
else
priv->btxpower_tracking = false;
priv->txpower_count = 0;
- priv->btxpower_trackingInit = false;
+ priv->tx_pwr_tracking_init = false;
}
void rtl92e_dm_init_txpower_tracking(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->IC_Cut >= IC_VersionCut_D)
+ if (priv->ic_cut >= IC_VersionCut_D)
_rtl92e_dm_initialize_tx_power_tracking_tssi(dev);
else
_rtl92e_dm_init_tx_power_tracking_thermal(dev);
@@ -952,7 +950,7 @@ static void _rtl92e_dm_check_tx_power_tracking(struct net_device *dev)
{
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->IC_Cut >= IC_VersionCut_D)
+ if (priv->ic_cut >= IC_VersionCut_D)
_rtl92e_dm_check_tx_power_tracking_tssi(dev);
else
_rtl92e_dm_check_tx_power_tracking_thermal(dev);
@@ -1005,30 +1003,30 @@ static void _rtl92e_dm_cck_tx_power_adjust_thermal_meter(struct net_device *dev,
TempVal = 0;
if (!bInCH14) {
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][0] +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][1] << 8);
+ TempVal = CCKSwingTable_Ch1_Ch13[priv->cck_index][0] +
+ (CCKSwingTable_Ch1_Ch13[priv->cck_index][1] << 8);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][2] +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][3] << 8) +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][4] << 16)+
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][5] << 24);
+ TempVal = CCKSwingTable_Ch1_Ch13[priv->cck_index][2] +
+ (CCKSwingTable_Ch1_Ch13[priv->cck_index][3] << 8) +
+ (CCKSwingTable_Ch1_Ch13[priv->cck_index][4] << 16)+
+ (CCKSwingTable_Ch1_Ch13[priv->cck_index][5] << 24);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- TempVal = CCKSwingTable_Ch1_Ch13[priv->CCK_index][6] +
- (CCKSwingTable_Ch1_Ch13[priv->CCK_index][7] << 8);
+ TempVal = CCKSwingTable_Ch1_Ch13[priv->cck_index][6] +
+ (CCKSwingTable_Ch1_Ch13[priv->cck_index][7] << 8);
rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
} else {
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][0] +
- (CCKSwingTable_Ch14[priv->CCK_index][1] << 8);
+ TempVal = CCKSwingTable_Ch14[priv->cck_index][0] +
+ (CCKSwingTable_Ch14[priv->cck_index][1] << 8);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter1, bMaskHWord, TempVal);
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][2] +
- (CCKSwingTable_Ch14[priv->CCK_index][3] << 8) +
- (CCKSwingTable_Ch14[priv->CCK_index][4] << 16)+
- (CCKSwingTable_Ch14[priv->CCK_index][5] << 24);
+ TempVal = CCKSwingTable_Ch14[priv->cck_index][2] +
+ (CCKSwingTable_Ch14[priv->cck_index][3] << 8) +
+ (CCKSwingTable_Ch14[priv->cck_index][4] << 16)+
+ (CCKSwingTable_Ch14[priv->cck_index][5] << 24);
rtl92e_set_bb_reg(dev, rCCK0_TxFilter2, bMaskDWord, TempVal);
- TempVal = CCKSwingTable_Ch14[priv->CCK_index][6] +
- (CCKSwingTable_Ch14[priv->CCK_index][7]<<8);
+ TempVal = CCKSwingTable_Ch14[priv->cck_index][6] +
+ (CCKSwingTable_Ch14[priv->cck_index][7]<<8);
rtl92e_set_bb_reg(dev, rCCK0_DebugPort, bMaskLWord, TempVal);
}
@@ -1038,7 +1036,7 @@ void rtl92e_dm_cck_txpower_adjust(struct net_device *dev, bool binch14)
{
struct r8192_priv *priv = rtllib_priv(dev);
- if (priv->IC_Cut >= IC_VersionCut_D)
+ if (priv->ic_cut >= IC_VersionCut_D)
_rtl92e_dm_cck_tx_power_adjust_tssi(dev, binch14);
else
_rtl92e_dm_cck_tx_power_adjust_thermal_meter(dev, binch14);
@@ -1075,7 +1073,7 @@ void rtl92e_dm_restore_state(struct net_device *dev)
ratr_value &= ~(RATE_ALL_OFDM_2SS);
rtl92e_writel(dev, RATR0, ratr_value);
rtl92e_writeb(dev, UFWP, 1);
- if (priv->btxpower_trackingInit && priv->btxpower_tracking)
+ if (priv->tx_pwr_tracking_init && priv->btxpower_tracking)
_rtl92e_dm_tx_power_reset_recovery(dev);
_rtl92e_dm_bb_initialgain_restore(dev);
@@ -1150,7 +1148,7 @@ static void _rtl92e_dm_dig_init(struct net_device *dev)
dm_digtable.rssi_val = 50;
dm_digtable.backoff_val = DM_DIG_BACKOFF;
dm_digtable.rx_gain_range_max = DM_DIG_MAX;
- if (priv->CustomerID == RT_CID_819x_Netcore)
+ if (priv->customer_id == RT_CID_819X_NETCORE)
dm_digtable.rx_gain_range_min = DM_DIG_MIN_Netcore;
else
dm_digtable.rx_gain_range_min = DM_DIG_MIN;
@@ -1260,7 +1258,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev)
rtl92e_writeb(dev, rOFDM0_XCAGCCore1, 0x17);
rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x17);
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x00);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x42);
@@ -1297,7 +1295,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_false_alarm(struct net_device *dev)
rtl92e_writeb(dev, rOFDM0_XDAGCCore1, 0x20);
}
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
@@ -1328,7 +1326,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev)
return;
dm_digtable.dig_highpwr_state = DM_STA_DIG_ON;
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x10);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x43);
@@ -1342,7 +1340,7 @@ static void _rtl92e_dm_ctrl_initgain_byrssi_highpwr(struct net_device *dev)
dm_digtable.rssi_high_power_lowthresh) &&
(priv->undecorated_smoothed_pwdb >=
dm_digtable.rssi_high_thresh)) {
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
@@ -1378,12 +1376,12 @@ static void _rtl92e_dm_initial_gain(struct net_device *dev)
dm_digtable.cur_ig_value = gain_range;
} else {
if (dm_digtable.cur_ig_value == 0)
- dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
+ dm_digtable.cur_ig_value = priv->def_initial_gain[0];
else
dm_digtable.cur_ig_value = dm_digtable.pre_ig_value;
}
} else {
- dm_digtable.cur_ig_value = priv->DefaultInitialGain[0];
+ dm_digtable.cur_ig_value = priv->def_initial_gain[0];
dm_digtable.pre_ig_value = 0;
}
@@ -1453,18 +1451,18 @@ static void _rtl92e_dm_pd_th(struct net_device *dev)
if ((dm_digtable.prepd_thstate != dm_digtable.curpd_thstate) ||
(initialized <= 3) || force_write) {
if (dm_digtable.curpd_thstate == DIG_PD_AT_LOW_POWER) {
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x00);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x42);
} else if (dm_digtable.curpd_thstate ==
DIG_PD_AT_NORMAL_POWER) {
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x20);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x44);
} else if (dm_digtable.curpd_thstate == DIG_PD_AT_HIGH_POWER) {
- if (priv->CurrentChannelBW != HT_CHANNEL_WIDTH_20)
+ if (priv->current_chnl_bw != HT_CHANNEL_WIDTH_20)
rtl92e_writeb(dev, (rOFDM0_XATxAFE+3), 0x10);
else
rtl92e_writeb(dev, rOFDM0_RxDetector1, 0x43);
@@ -1651,7 +1649,7 @@ static void _rtl92e_dm_init_wa_broadcom_iot(struct net_device *dev)
struct rt_hi_throughput *ht_info = priv->rtllib->ht_info;
ht_info->bWAIotBroadcom = false;
- ht_info->WAIotTH = WAIotTHVal;
+ ht_info->WAIotTH = WA_IOT_TH_VAL;
}
static void _rtl92e_dm_check_rf_ctrl_gpio(void *data)
@@ -1722,7 +1720,7 @@ void rtl92e_dm_rf_pathcheck_wq(void *data)
else
priv->brfpath_rxenable[i] = false;
}
- if (!DM_RxPathSelTable.Enable)
+ if (!dm_rx_path_sel_table.enable)
return;
_rtl92e_dm_rx_path_sel_byrssi(dev);
@@ -1733,18 +1731,18 @@ static void _rtl92e_dm_init_rx_path_selection(struct net_device *dev)
u8 i;
struct r8192_priv *priv = rtllib_priv(dev);
- DM_RxPathSelTable.Enable = 1;
- DM_RxPathSelTable.SS_TH_low = RxPathSelection_SS_TH_low;
- DM_RxPathSelTable.diff_TH = RxPathSelection_diff_TH;
- if (priv->CustomerID == RT_CID_819x_Netcore)
- DM_RxPathSelTable.cck_method = CCK_Rx_Version_2;
+ dm_rx_path_sel_table.enable = 1;
+ dm_rx_path_sel_table.ss_th_low = RX_PATH_SEL_SS_TH_LOW;
+ dm_rx_path_sel_table.diff_th = RX_PATH_SEL_DIFF_TH;
+ if (priv->customer_id == RT_CID_819X_NETCORE)
+ dm_rx_path_sel_table.cck_method = CCK_Rx_Version_2;
else
- DM_RxPathSelTable.cck_method = CCK_Rx_Version_1;
- DM_RxPathSelTable.disabledRF = 0;
+ dm_rx_path_sel_table.cck_method = CCK_Rx_Version_1;
+ dm_rx_path_sel_table.disabled_rf = 0;
for (i = 0; i < 4; i++) {
- DM_RxPathSelTable.rf_rssi[i] = 50;
- DM_RxPathSelTable.cck_pwdb_sta[i] = -64;
- DM_RxPathSelTable.rf_enable_rssi_th[i] = 100;
+ dm_rx_path_sel_table.rf_rssi[i] = 50;
+ dm_rx_path_sel_table.cck_pwdb_sta[i] = -64;
+ dm_rx_path_sel_table.rf_enable_rssi_th[i] = 100;
}
}
@@ -1771,22 +1769,22 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
return;
if (!cck_Rx_Path_initialized) {
- DM_RxPathSelTable.cck_Rx_path = (rtl92e_readb(dev, 0xa07)&0xf);
+ dm_rx_path_sel_table.cck_rx_path = (rtl92e_readb(dev, 0xa07)&0xf);
cck_Rx_Path_initialized = 1;
}
- DM_RxPathSelTable.disabledRF = 0xf;
- DM_RxPathSelTable.disabledRF &= ~(rtl92e_readb(dev, 0xc04));
+ dm_rx_path_sel_table.disabled_rf = 0xf;
+ dm_rx_path_sel_table.disabled_rf &= ~(rtl92e_readb(dev, 0xc04));
if (priv->rtllib->mode == WIRELESS_MODE_B)
- DM_RxPathSelTable.cck_method = CCK_Rx_Version_2;
+ dm_rx_path_sel_table.cck_method = CCK_Rx_Version_2;
for (i = 0; i < RF90_PATH_MAX; i++) {
- DM_RxPathSelTable.rf_rssi[i] = priv->stats.rx_rssi_percentage[i];
+ dm_rx_path_sel_table.rf_rssi[i] = priv->stats.rx_rssi_percentage[i];
if (priv->brfpath_rxenable[i]) {
rf_num++;
- cur_rf_rssi = DM_RxPathSelTable.rf_rssi[i];
+ cur_rf_rssi = dm_rx_path_sel_table.rf_rssi[i];
if (rf_num == 1) {
max_rssi_index = min_rssi_index = sec_rssi_index = i;
@@ -1834,12 +1832,12 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
}
rf_num = 0;
- if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) {
+ if (dm_rx_path_sel_table.cck_method == CCK_Rx_Version_2) {
for (i = 0; i < RF90_PATH_MAX; i++) {
if (priv->brfpath_rxenable[i]) {
rf_num++;
cur_cck_pwdb =
- DM_RxPathSelTable.cck_pwdb_sta[i];
+ dm_rx_path_sel_table.cck_pwdb_sta[i];
if (rf_num == 1) {
cck_rx_ver2_max_index = i;
@@ -1896,17 +1894,17 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
}
update_cck_rx_path = 0;
- if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_2) {
+ if (dm_rx_path_sel_table.cck_method == CCK_Rx_Version_2) {
cck_default_Rx = cck_rx_ver2_max_index;
cck_optional_Rx = cck_rx_ver2_sec_index;
if (tmp_cck_max_pwdb != -64)
update_cck_rx_path = 1;
}
- if (tmp_min_rssi < DM_RxPathSelTable.SS_TH_low && disabled_rf_cnt < 2) {
+ if (tmp_min_rssi < dm_rx_path_sel_table.ss_th_low && disabled_rf_cnt < 2) {
if ((tmp_max_rssi - tmp_min_rssi) >=
- DM_RxPathSelTable.diff_TH) {
- DM_RxPathSelTable.rf_enable_rssi_th[min_rssi_index] =
+ dm_rx_path_sel_table.diff_th) {
+ dm_rx_path_sel_table.rf_enable_rssi_th[min_rssi_index] =
tmp_max_rssi+5;
rtl92e_set_bb_reg(dev, rOFDM0_TRxPathEnable,
0x1<<min_rssi_index, 0x0);
@@ -1914,7 +1912,7 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
0x1<<min_rssi_index, 0x0);
disabled_rf_cnt++;
}
- if (DM_RxPathSelTable.cck_method == CCK_Rx_Version_1) {
+ if (dm_rx_path_sel_table.cck_method == CCK_Rx_Version_1) {
cck_default_Rx = max_rssi_index;
cck_optional_Rx = sec_rssi_index;
if (tmp_max_rssi)
@@ -1923,24 +1921,24 @@ static void _rtl92e_dm_rx_path_sel_byrssi(struct net_device *dev)
}
if (update_cck_rx_path) {
- DM_RxPathSelTable.cck_Rx_path = (cck_default_Rx<<2) |
+ dm_rx_path_sel_table.cck_rx_path = (cck_default_Rx<<2) |
(cck_optional_Rx);
rtl92e_set_bb_reg(dev, rCCK0_AFESetting, 0x0f000000,
- DM_RxPathSelTable.cck_Rx_path);
+ dm_rx_path_sel_table.cck_rx_path);
}
- if (DM_RxPathSelTable.disabledRF) {
+ if (dm_rx_path_sel_table.disabled_rf) {
for (i = 0; i < 4; i++) {
- if ((DM_RxPathSelTable.disabledRF>>i) & 0x1) {
+ if ((dm_rx_path_sel_table.disabled_rf >> i) & 0x1) {
if (tmp_max_rssi >=
- DM_RxPathSelTable.rf_enable_rssi_th[i]) {
+ dm_rx_path_sel_table.rf_enable_rssi_th[i]) {
rtl92e_set_bb_reg(dev,
rOFDM0_TRxPathEnable,
0x1 << i, 0x1);
rtl92e_set_bb_reg(dev,
rOFDM1_TRxPathEnable,
0x1 << i, 0x1);
- DM_RxPathSelTable.rf_enable_rssi_th[i]
+ dm_rx_path_sel_table.rf_enable_rssi_th[i]
= 100;
disabled_rf_cnt--;
}
@@ -1969,7 +1967,7 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev)
priv->rtllib->fsync_firstdiff_ratethreshold = 100;
priv->rtllib->fsync_seconddiff_ratethreshold = 200;
priv->rtllib->fsync_state = Default_Fsync;
- priv->framesyncMonitor = 1;
+ priv->frame_sync_monitor = 1;
timer_setup(&priv->fsync_timer, _rtl92e_dm_fsync_timer_callback, 0);
}
@@ -2008,31 +2006,31 @@ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
priv->rate_record;
else
rate_count_diff = rate_count - priv->rate_record;
- if (rate_count_diff < priv->rateCountDiffRecord) {
+ if (rate_count_diff < priv->rate_count_diff_rec) {
- u32 DiffNum = priv->rateCountDiffRecord -
+ u32 DiffNum = priv->rate_count_diff_rec -
rate_count_diff;
if (DiffNum >=
priv->rtllib->fsync_seconddiff_ratethreshold)
- priv->ContinueDiffCount++;
+ priv->continue_diff_count++;
else
- priv->ContinueDiffCount = 0;
+ priv->continue_diff_count = 0;
- if (priv->ContinueDiffCount >= 2) {
+ if (priv->continue_diff_count >= 2) {
bSwitchFromCountDiff = true;
- priv->ContinueDiffCount = 0;
+ priv->continue_diff_count = 0;
}
} else {
- priv->ContinueDiffCount = 0;
+ priv->continue_diff_count = 0;
}
if (rate_count_diff <=
priv->rtllib->fsync_firstdiff_ratethreshold) {
bSwitchFromCountDiff = true;
- priv->ContinueDiffCount = 0;
+ priv->continue_diff_count = 0;
}
priv->rate_record = rate_count;
- priv->rateCountDiffRecord = rate_count_diff;
+ priv->rate_count_diff_rec = rate_count_diff;
if (priv->undecorated_smoothed_pwdb >
priv->rtllib->fsync_rssi_threshold &&
bSwitchFromCountDiff) {
@@ -2073,7 +2071,7 @@ static void _rtl92e_dm_fsync_timer_callback(struct timer_list *t)
rtl92e_writeb(dev, 0xC36, 0x5c);
rtl92e_writeb(dev, 0xC3e, 0x96);
}
- priv->ContinueDiffCount = 0;
+ priv->continue_diff_count = 0;
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
}
}
@@ -2114,7 +2112,7 @@ static void _rtl92e_dm_end_sw_fsync(struct net_device *dev)
rtl92e_writeb(dev, 0xC3e, 0x96);
}
- priv->ContinueDiffCount = 0;
+ priv->continue_diff_count = 0;
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
}
@@ -2125,8 +2123,8 @@ static void _rtl92e_dm_start_sw_fsync(struct net_device *dev)
u32 rate_bitmap;
priv->rate_record = 0;
- priv->ContinueDiffCount = 0;
- priv->rateCountDiffRecord = 0;
+ priv->continue_diff_count = 0;
+ priv->rate_count_diff_rec = 0;
priv->bswitch_fsync = false;
if (priv->rtllib->mode == WIRELESS_MODE_N_24G) {
@@ -2196,7 +2194,7 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
}
}
- if (priv->framesyncMonitor) {
+ if (priv->frame_sync_monitor) {
if (reg_c38_State != RegC38_Fsync_AP_BCM) {
rtl92e_writeb(dev, rOFDM0_RxDetector3, 0x95);
@@ -2218,7 +2216,7 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
break;
}
- if (priv->framesyncMonitor) {
+ if (priv->frame_sync_monitor) {
if (priv->rtllib->state == RTLLIB_LINKED) {
if (priv->undecorated_smoothed_pwdb <=
RegC38_TH) {
@@ -2249,7 +2247,7 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
}
}
}
- if (priv->framesyncMonitor) {
+ if (priv->frame_sync_monitor) {
if (priv->reset_count != reset_cnt) {
rtl92e_writeb(dev, rOFDM0_RxDetector3,
priv->framesync);
@@ -2271,10 +2269,10 @@ static void _rtl92e_dm_init_dynamic_tx_power(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
priv->rtllib->bdynamic_txpower_enable = true;
- priv->bLastDTPFlag_High = false;
- priv->bLastDTPFlag_Low = false;
- priv->bDynamicTxHighPower = false;
- priv->bDynamicTxLowPower = false;
+ priv->last_dtp_flag_high = false;
+ priv->last_dtp_flag_low = false;
+ priv->dynamic_tx_high_pwr = false;
+ priv->dynamic_tx_low_pwr = false;
}
static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
@@ -2284,8 +2282,8 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
unsigned int txlowpower_threshold = 0;
if (!priv->rtllib->bdynamic_txpower_enable) {
- priv->bDynamicTxHighPower = false;
- priv->bDynamicTxLowPower = false;
+ priv->dynamic_tx_high_pwr = false;
+ priv->dynamic_tx_low_pwr = false;
return;
}
if ((priv->rtllib->ht_info->IOTPeer == HT_IOT_PEER_ATHEROS) &&
@@ -2299,28 +2297,28 @@ static void _rtl92e_dm_dynamic_tx_power(struct net_device *dev)
if (priv->rtllib->state == RTLLIB_LINKED) {
if (priv->undecorated_smoothed_pwdb >= txhipower_threshold) {
- priv->bDynamicTxHighPower = true;
- priv->bDynamicTxLowPower = false;
+ priv->dynamic_tx_high_pwr = true;
+ priv->dynamic_tx_low_pwr = false;
} else {
if (priv->undecorated_smoothed_pwdb <
- txlowpower_threshold && priv->bDynamicTxHighPower)
- priv->bDynamicTxHighPower = false;
+ txlowpower_threshold && priv->dynamic_tx_high_pwr)
+ priv->dynamic_tx_high_pwr = false;
if (priv->undecorated_smoothed_pwdb < 35)
- priv->bDynamicTxLowPower = true;
+ priv->dynamic_tx_low_pwr = true;
else if (priv->undecorated_smoothed_pwdb >= 40)
- priv->bDynamicTxLowPower = false;
+ priv->dynamic_tx_low_pwr = false;
}
} else {
- priv->bDynamicTxHighPower = false;
- priv->bDynamicTxLowPower = false;
+ priv->dynamic_tx_high_pwr = false;
+ priv->dynamic_tx_low_pwr = false;
}
- if ((priv->bDynamicTxHighPower != priv->bLastDTPFlag_High) ||
- (priv->bDynamicTxLowPower != priv->bLastDTPFlag_Low)) {
+ if ((priv->dynamic_tx_high_pwr != priv->last_dtp_flag_high) ||
+ (priv->dynamic_tx_low_pwr != priv->last_dtp_flag_low)) {
rtl92e_set_tx_power(dev, priv->rtllib->current_network.channel);
}
- priv->bLastDTPFlag_High = priv->bDynamicTxHighPower;
- priv->bLastDTPFlag_Low = priv->bDynamicTxLowPower;
+ priv->last_dtp_flag_high = priv->dynamic_tx_high_pwr;
+ priv->last_dtp_flag_low = priv->dynamic_tx_low_pwr;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
index 1d4d7d98a859..01587e2fec65 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
@@ -7,10 +7,9 @@
#ifndef __R8192UDM_H__
#define __R8192UDM_H__
-
/*--------------------------Define Parameters-------------------------------*/
-#define OFDM_Table_Length 19
-#define CCK_Table_length 12
+#define OFDM_TABLE_LEN 19
+#define CCK_TABLE_LEN 12
#define DM_DIG_THRESH_HIGH 40
#define DM_DIG_THRESH_LOW 35
@@ -26,15 +25,15 @@
#define DM_DIG_MIN 0x1c
#define DM_DIG_MIN_Netcore 0x12
-#define RxPathSelection_SS_TH_low 30
-#define RxPathSelection_diff_TH 18
+#define RX_PATH_SEL_SS_TH_LOW 30
+#define RX_PATH_SEL_DIFF_TH 18
-#define RateAdaptiveTH_High 50
-#define RateAdaptiveTH_Low_20M 30
-#define RateAdaptiveTH_Low_40M 10
-#define VeryLowRSSI 15
+#define RATE_ADAPTIVE_TH_HIGH 50
+#define RATE_ADAPTIVE_TH_LOW_20M 30
+#define RATE_ADAPTIVE_TH_LOW_40M 10
+#define VERY_LOW_RSSI 15
-#define WAIotTHVal 25
+#define WA_IOT_TH_VAL 25
#define E_FOR_TX_POWER_TRACK 300
#define TX_POWER_NEAR_FIELD_THRESH_HIGH 68
@@ -47,11 +46,8 @@
#define TX_RETRY_COUNT_REG 0x1ac
#define RegC38_TH 20
-#define DM_Type_ByDriver 1
-
/*--------------------------Define Parameters-------------------------------*/
-
/*------------------------------Define structure----------------------------*/
struct dig_t {
u8 dig_enable_flag;
@@ -90,7 +86,6 @@ enum dm_dig_sta {
DM_STA_DIG_MAX
};
-
enum dm_ratr_sta {
DM_RATR_STA_HIGH = 0,
DM_RATR_STA_MIDDLE = 1,
@@ -130,13 +125,13 @@ enum dm_dig_cs_ratio {
};
struct drx_path_sel {
- u8 Enable;
+ u8 enable;
u8 cck_method;
- u8 cck_Rx_path;
+ u8 cck_rx_path;
- u8 SS_TH_low;
- u8 diff_TH;
- u8 disabledRF;
+ u8 ss_th_low;
+ u8 diff_th;
+ u8 disabled_rf;
u8 reserved;
u8 rf_rssi[4];
@@ -150,29 +145,26 @@ enum dm_cck_rx_path_method {
CCK_Rx_Version_MAX
};
-
struct dcmd_txcmd {
u32 op;
u32 length;
u32 value;
};
-/*------------------------------Define structure----------------------------*/
+/*------------------------------Define structure----------------------------*/
/*------------------------Export global variable----------------------------*/
extern struct dig_t dm_digtable;
-extern struct drx_path_sel DM_RxPathSelTable;
/* Pre-calculated gain tables */
-extern const u32 dm_tx_bb_gain[TxBBGainTableLength];
-extern const u8 dm_cck_tx_bb_gain[CCKTxBBGainTableLength][8];
-extern const u8 dm_cck_tx_bb_gain_ch14[CCKTxBBGainTableLength][8];
+extern const u32 dm_tx_bb_gain[TX_BB_GAIN_TABLE_LEN];
+extern const u8 dm_cck_tx_bb_gain[CCK_TX_BB_GAIN_TABLE_LEN][8];
+extern const u8 dm_cck_tx_bb_gain_ch14[CCK_TX_BB_GAIN_TABLE_LEN][8];
/* Maps table index to iq amplify gain (dB, 12 to -24dB) */
#define dm_tx_bb_gain_idx_to_amplify(idx) (-idx + 12)
/*------------------------Export global variable----------------------------*/
-
/*--------------------------Exported Function prototype---------------------*/
/*--------------------------Exported Function prototype---------------------*/
@@ -181,7 +173,6 @@ void rtl92e_dm_deinit(struct net_device *dev);
void rtl92e_dm_watchdog(struct net_device *dev);
-
void rtl92e_init_adaptive_rate(struct net_device *dev);
void rtl92e_dm_txpower_tracking_wq(void *data);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 82b45c61ac75..9c80dc1b6e12 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -41,7 +41,7 @@ int rtl92e_suspend(struct device *dev_d)
rtl92e_writel(dev, WFCRC1, 0xffffffff);
rtl92e_writel(dev, WFCRC2, 0xffffffff);
rtl92e_writeb(dev, PMR, 0x5);
- rtl92e_writeb(dev, MacBlkCtrl, 0xa);
+ rtl92e_writeb(dev, MAC_BLK_CTRL, 0xa);
}
out_pci_suspend:
netdev_info(dev, "WOL is %s\n", priv->rtllib->bSupportRemoteWakeUp ?
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index c5c43d2fb93e..694d1b18f81c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -1823,7 +1823,7 @@ struct ieee80211_device {
struct work_struct associate_procedure_wq;
struct delayed_work softmac_scan_wq;
struct delayed_work associate_retry_wq;
- struct delayed_work start_ibss_wq;
+ struct delayed_work start_ibss_wq;
struct work_struct wx_sync_scan_wq;
struct workqueue_struct *wq;
// Qos related. Added by Annie, 2005-11-01.
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index f1fc077ed29c..0fcae6871108 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -653,7 +653,7 @@ static void Hal_EfusePowerSwitch(
if (PwrState) {
- /* To avoid cannot access efuse regsiters after disable/enable several times during DTM test. */
+ /* To avoid cannot access efuse registers after disable/enable several times during DTM test. */
/* Suggested by SD1 IsaacHsu. 2013.07.08, added by tynli. */
tempval = rtw_read8(padapter, SDIO_LOCAL_BASE|SDIO_REG_HSUS_CTRL);
if (tempval & BIT(0)) { /* SDIO local register is suspend */
@@ -1693,7 +1693,7 @@ void rtl8723b_InitBeaconParameters(struct adapter *padapter)
rtw_write8(padapter, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME_8723B); /* 2ms */
/* Suggested by designer timchen. Change beacon AIFS to the largest number */
- /* beacause test chip does not contension before sending beacon. by tynli. 2009.11.03 */
+ /* because test chip does not contension before sending beacon. by tynli. 2009.11.03 */
rtw_write16(padapter, REG_BCNTCFG, 0x660F);
pHalData->RegBcnCtrlVal = rtw_read8(padapter, REG_BCN_CTRL);
@@ -2089,7 +2089,7 @@ void Hal_EfuseParseIDCode(struct adapter *padapter, u8 *hwinfo)
u16 EEPROMId;
- /* Checl 0x8129 again for making sure autoload status!! */
+ /* Check 0x8129 again for making sure autoload status!! */
EEPROMId = le16_to_cpu(*((__le16 *)hwinfo));
if (EEPROMId != RTL_EEPROM_ID) {
pEEPROM->bautoload_fail_flag = true;
@@ -2510,7 +2510,7 @@ static void rtl8723b_cal_txdesc_chksum(struct tx_desc *ptxdesc)
/* Clear first */
ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
- /* checksume is always calculated by first 32 bytes, */
+ /* checksum is always calculated by first 32 bytes, */
/* and it doesn't depend on TX DESC length. */
/* Thomas, Lucas@SD4, 20130515 */
count = 16;
@@ -2723,7 +2723,7 @@ static void rtl8723b_fill_default_txdesc(
* multicast / mgnt frame should be controlled by Hw because Fw
* will also send null data which we cannot control when Fw LPS
* enable.
- * --> default enable non-Qos data sequense number. 2010.06.23.
+ * --> default enable non-Qos data sequence number. 2010.06.23.
* by tynli.
* (2) Enable HW SEQ control for beacon packet, because we use
* Hw beacon.
@@ -2777,7 +2777,7 @@ void rtl8723b_fill_fake_txdesc(
SET_TX_DESC_PKT_SIZE_8723B(pDesc, BufferLen); /* Buffer size + command header */
SET_TX_DESC_QUEUE_SEL_8723B(pDesc, QSLT_MGNT); /* Fixed queue of Mgnt queue */
- /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
+ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error value by Hw. */
if (IsPsPoll) {
SET_TX_DESC_NAV_USE_HDR_8723B(pDesc, 1);
} else {
@@ -3406,7 +3406,7 @@ void SetHwReg8723B(struct adapter *padapter, u8 variable, u8 *val)
/* polling bit, and No Write enable, and address */
ulCommand = CAM_CONTENT_COUNT*ucIndex+i;
ulCommand = ulCommand | CAM_POLLINIG | CAM_WRITE;
- /* write content 0 is equall to mark invalid */
+ /* write content 0 is equal to mark as invalid */
rtw_write32(padapter, WCAMI, ulContent); /* mdelay(40); */
rtw_write32(padapter, RWCAM, ulCommand); /* mdelay(40); */
}
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index 14449f8afad5..bfeb5873bf3b 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -1768,6 +1768,8 @@ static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk,
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
SYSTEM_PARAM, (6 + MS_EXTRA_SIZE));
+ if (retval != STATUS_SUCCESS)
+ return STATUS_FAIL;
ms_set_err_code(chip, MS_NO_ERROR);
diff --git a/drivers/staging/vc04_services/Makefile b/drivers/staging/vc04_services/Makefile
index 1fd191e2e2a5..44794bdf6173 100644
--- a/drivers/staging/vc04_services/Makefile
+++ b/drivers/staging/vc04_services/Makefile
@@ -15,5 +15,3 @@ obj-$(CONFIG_SND_BCM2835) += bcm2835-audio/
obj-$(CONFIG_VIDEO_BCM2835) += bcm2835-camera/
obj-$(CONFIG_BCM2835_VCHIQ_MMAL) += vchiq-mmal/
-ccflags-y += -I $(srctree)/$(src)/include
-
diff --git a/drivers/staging/vc04_services/bcm2835-audio/Makefile b/drivers/staging/vc04_services/bcm2835-audio/Makefile
index d59fe4dde615..01ceebdf88e7 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/Makefile
+++ b/drivers/staging/vc04_services/bcm2835-audio/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SND_BCM2835) += snd-bcm2835.o
snd-bcm2835-objs := bcm2835.o bcm2835-ctl.o bcm2835-pcm.o bcm2835-vchiq.o
-
-ccflags-y += -I $(srctree)/$(src)/../include -D__VCCOREVER__=0x04000000
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index f4c2c9506d86..d74110ca17ab 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -91,17 +91,17 @@ static int bcm2835_audio_send_simple(struct bcm2835_audio_instance *instance,
return bcm2835_audio_send_msg(instance, &m, wait);
}
-static enum vchiq_status audio_vchi_callback(struct vchiq_instance *vchiq_instance,
- enum vchiq_reason reason,
- struct vchiq_header *header,
- unsigned int handle, void *userdata)
+static int audio_vchi_callback(struct vchiq_instance *vchiq_instance,
+ enum vchiq_reason reason,
+ struct vchiq_header *header,
+ unsigned int handle, void *userdata)
{
struct bcm2835_audio_instance *instance = vchiq_get_service_userdata(vchiq_instance,
handle);
struct vc_audio_msg *m;
if (reason != VCHIQ_MESSAGE_AVAILABLE)
- return VCHIQ_SUCCESS;
+ return 0;
m = (void *)header->data;
if (m->type == VC_AUDIO_MSG_TYPE_RESULT) {
@@ -119,7 +119,7 @@ static enum vchiq_status audio_vchi_callback(struct vchiq_instance *vchiq_instan
}
vchiq_release_message(vchiq_instance, instance->service_handle, header);
- return VCHIQ_SUCCESS;
+ return 0;
}
static int
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
index 38b7451d77b2..0a81383c475a 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.h
@@ -6,11 +6,12 @@
#include <linux/device.h>
#include <linux/wait.h>
-#include <linux/raspberrypi/vchiq.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm-indirect.h>
+#include "../include/linux/raspberrypi/vchiq.h"
+
#define MAX_SUBSTREAMS (8)
#define AVAIL_SUBSTREAMS_MASK (0xff)
diff --git a/drivers/staging/vc04_services/bcm2835-camera/Makefile b/drivers/staging/vc04_services/bcm2835-camera/Makefile
index 3a76d6ade428..203b93899b20 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/Makefile
+++ b/drivers/staging/vc04_services/bcm2835-camera/Makefile
@@ -4,8 +4,3 @@ bcm2835-v4l2-$(CONFIG_VIDEO_BCM2835) := \
controls.o
obj-$(CONFIG_VIDEO_BCM2835) += bcm2835-v4l2.o
-
-ccflags-y += \
- -I $(srctree)/$(src)/.. \
- -I $(srctree)/$(src)/../vchiq-mmal/ \
- -D__VCCOREVER__=0x04000000
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 797ebe2a973a..4f81765912ea 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -26,11 +26,11 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include "mmal-common.h"
-#include "mmal-encodings.h"
-#include "mmal-vchiq.h"
-#include "mmal-msg.h"
-#include "mmal-parameters.h"
+#include "../vchiq-mmal/mmal-common.h"
+#include "../vchiq-mmal/mmal-encodings.h"
+#include "../vchiq-mmal/mmal-vchiq.h"
+#include "../vchiq-mmal/mmal-msg.h"
+#include "../vchiq-mmal/mmal-parameters.h"
#include "bcm2835-camera.h"
#define MIN_WIDTH 32
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index 5644d1d457b9..6bce45925bf1 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -23,9 +23,9 @@
#include <media/v4l2-event.h>
#include <media/v4l2-common.h>
-#include "mmal-common.h"
-#include "mmal-vchiq.h"
-#include "mmal-parameters.h"
+#include "../vchiq-mmal/mmal-common.h"
+#include "../vchiq-mmal/mmal-vchiq.h"
+#include "../vchiq-mmal/mmal-parameters.h"
#include "bcm2835-camera.h"
/* The supported V4L2_CID_AUTO_EXPOSURE_BIAS values are from -4.0 to +4.0.
diff --git a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
index 690ab7165b2c..66965da11443 100644
--- a/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
+++ b/drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
@@ -17,12 +17,6 @@ enum vchiq_reason {
VCHIQ_BULK_RECEIVE_ABORTED /* service, -, bulk_userdata */
};
-enum vchiq_status {
- VCHIQ_ERROR = -1,
- VCHIQ_SUCCESS = 0,
- VCHIQ_RETRY = 1
-};
-
enum vchiq_bulk_mode {
VCHIQ_BULK_MODE_CALLBACK,
VCHIQ_BULK_MODE_BLOCKING,
@@ -57,11 +51,11 @@ struct vchiq_instance;
struct vchiq_service_base {
int fourcc;
- enum vchiq_status (*callback)(struct vchiq_instance *instance,
- enum vchiq_reason reason,
- struct vchiq_header *header,
- unsigned int handle,
- void *bulk_userdata);
+ int (*callback)(struct vchiq_instance *instance,
+ enum vchiq_reason reason,
+ struct vchiq_header *header,
+ unsigned int handle,
+ void *bulk_userdata);
void *userdata;
};
@@ -74,11 +68,11 @@ struct vchiq_completion_data_kernel {
struct vchiq_service_params_kernel {
int fourcc;
- enum vchiq_status (*callback)(struct vchiq_instance *instance,
- enum vchiq_reason reason,
- struct vchiq_header *header,
- unsigned int handle,
- void *bulk_userdata);
+ int (*callback)(struct vchiq_instance *instance,
+ enum vchiq_reason reason,
+ struct vchiq_header *header,
+ unsigned int handle,
+ void *bulk_userdata);
void *userdata;
short version; /* Increment for non-trivial changes */
short version_min; /* Update for incompatible changes */
@@ -87,32 +81,31 @@ struct vchiq_service_params_kernel {
struct vchiq_instance;
extern int vchiq_initialise(struct vchiq_instance **pinstance);
-extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance);
-extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance);
-extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,
- const struct vchiq_service_params_kernel *params,
- unsigned int *pservice);
-extern enum vchiq_status vchiq_close_service(struct vchiq_instance *instance,
- unsigned int service);
-extern enum vchiq_status vchiq_use_service(struct vchiq_instance *instance, unsigned int service);
-extern enum vchiq_status vchiq_release_service(struct vchiq_instance *instance,
- unsigned int service);
+extern int vchiq_shutdown(struct vchiq_instance *instance);
+extern int vchiq_connect(struct vchiq_instance *instance);
+extern int vchiq_open_service(struct vchiq_instance *instance,
+ const struct vchiq_service_params_kernel *params,
+ unsigned int *pservice);
+extern int vchiq_close_service(struct vchiq_instance *instance,
+ unsigned int service);
+extern int vchiq_use_service(struct vchiq_instance *instance, unsigned int service);
+extern int vchiq_release_service(struct vchiq_instance *instance,
+ unsigned int service);
extern void vchiq_msg_queue_push(struct vchiq_instance *instance, unsigned int handle,
struct vchiq_header *header);
extern void vchiq_release_message(struct vchiq_instance *instance, unsigned int service,
struct vchiq_header *header);
extern int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle,
void *data, unsigned int size);
-extern enum vchiq_status vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int service,
- const void *data, unsigned int size, void *userdata,
- enum vchiq_bulk_mode mode);
-extern enum vchiq_status vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int service,
- void *data, unsigned int size, void *userdata,
- enum vchiq_bulk_mode mode);
+extern int vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int service,
+ const void *data, unsigned int size, void *userdata,
+ enum vchiq_bulk_mode mode);
+extern int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int service,
+ void *data, unsigned int size, void *userdata,
+ enum vchiq_bulk_mode mode);
extern void *vchiq_get_service_userdata(struct vchiq_instance *instance, unsigned int service);
-extern enum vchiq_status vchiq_get_peer_version(struct vchiq_instance *instance,
- unsigned int handle,
- short *peer_version);
+extern int vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle,
+ short *peer_version);
extern struct vchiq_header *vchiq_msg_hold(struct vchiq_instance *instance, unsigned int handle);
#endif /* VCHIQ_H */
diff --git a/drivers/staging/vc04_services/interface/TODO b/drivers/staging/vc04_services/interface/TODO
index 97085a0b3223..6d9d4a800aa7 100644
--- a/drivers/staging/vc04_services/interface/TODO
+++ b/drivers/staging/vc04_services/interface/TODO
@@ -40,11 +40,6 @@ beneficial to go over all of them and, if correct, comment on their merits.
Extra points to whomever confidently reviews the remote_event_*() family of
functions.
-* Get rid of custom function return values
-
-Most functions use a custom set of return values, we should force proper Linux
-error numbers. Special care is needed for VCHIQ_RETRY.
-
* Reformat core code with more sane indentations
The code follows the 80 characters limitation yet tends to go 3 or 4 levels of
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index dc33490ba7fb..cddcd3c596c9 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -151,7 +151,7 @@ static struct semaphore g_free_fragments_sema;
static DEFINE_SEMAPHORE(g_free_fragments_mutex);
-static enum vchiq_status
+static int
vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
unsigned int size, enum vchiq_bulk_dir dir);
@@ -501,7 +501,7 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
if (!vchiq_slot_zero)
- return -EINVAL;
+ return -ENOMEM;
vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
(int)slot_phys + slot_mem_size;
@@ -541,9 +541,15 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
channelbase = slot_phys;
err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
&channelbase, sizeof(channelbase));
- if (err || channelbase) {
- dev_err(dev, "failed to set channelbase\n");
- return err ? : -ENXIO;
+ if (err) {
+ dev_err(dev, "failed to send firmware property: %d\n", err);
+ return err;
+ }
+
+ if (channelbase) {
+ dev_err(dev, "failed to set channelbase (response: %x)\n",
+ channelbase);
+ return -ENXIO;
}
vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
@@ -722,13 +728,13 @@ void free_bulk_waiter(struct vchiq_instance *instance)
}
}
-enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
+int vchiq_shutdown(struct vchiq_instance *instance)
{
- enum vchiq_status status = VCHIQ_SUCCESS;
+ int status = 0;
struct vchiq_state *state = instance->state;
if (mutex_lock_killable(&state->mutex))
- return VCHIQ_RETRY;
+ return -EAGAIN;
/* Remove all services */
vchiq_shutdown_internal(state, instance);
@@ -749,19 +755,19 @@ static int vchiq_is_connected(struct vchiq_instance *instance)
return instance->connected;
}
-enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
+int vchiq_connect(struct vchiq_instance *instance)
{
- enum vchiq_status status;
+ int status;
struct vchiq_state *state = instance->state;
if (mutex_lock_killable(&state->mutex)) {
vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
goto failed;
}
status = vchiq_connect_internal(state, instance);
- if (status == VCHIQ_SUCCESS)
+ if (!status)
instance->connected = 1;
mutex_unlock(&state->mutex);
@@ -773,12 +779,12 @@ failed:
}
EXPORT_SYMBOL(vchiq_connect);
-static enum vchiq_status
+static int
vchiq_add_service(struct vchiq_instance *instance,
const struct vchiq_service_params_kernel *params,
unsigned int *phandle)
{
- enum vchiq_status status;
+ int status;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
int srvstate;
@@ -793,9 +799,9 @@ vchiq_add_service(struct vchiq_instance *instance,
if (service) {
*phandle = service->handle;
- status = VCHIQ_SUCCESS;
+ status = 0;
} else {
- status = VCHIQ_ERROR;
+ status = -EINVAL;
}
vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
@@ -803,12 +809,12 @@ vchiq_add_service(struct vchiq_instance *instance,
return status;
}
-enum vchiq_status
+int
vchiq_open_service(struct vchiq_instance *instance,
const struct vchiq_service_params_kernel *params,
unsigned int *phandle)
{
- enum vchiq_status status = VCHIQ_ERROR;
+ int status = -EINVAL;
struct vchiq_state *state = instance->state;
struct vchiq_service *service = NULL;
@@ -822,7 +828,7 @@ vchiq_open_service(struct vchiq_instance *instance,
if (service) {
*phandle = service->handle;
status = vchiq_open_service_internal(service, current->pid);
- if (status != VCHIQ_SUCCESS) {
+ if (status) {
vchiq_remove_service(instance, service->handle);
*phandle = VCHIQ_SERVICE_HANDLE_INVALID;
}
@@ -835,11 +841,11 @@ failed:
}
EXPORT_SYMBOL(vchiq_open_service);
-enum vchiq_status
+int
vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const void *data,
unsigned int size, void *userdata, enum vchiq_bulk_mode mode)
{
- enum vchiq_status status;
+ int status;
while (1) {
switch (mode) {
@@ -855,15 +861,15 @@ vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const
VCHIQ_BULK_TRANSMIT);
break;
default:
- return VCHIQ_ERROR;
+ return -EINVAL;
}
/*
- * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
+ * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
* to implement a retry mechanism since this function is
* supposed to block until queued
*/
- if (status != VCHIQ_RETRY)
+ if (status != -EAGAIN)
break;
msleep(1);
@@ -873,11 +879,11 @@ vchiq_bulk_transmit(struct vchiq_instance *instance, unsigned int handle, const
}
EXPORT_SYMBOL(vchiq_bulk_transmit);
-enum vchiq_status vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
- void *data, unsigned int size, void *userdata,
- enum vchiq_bulk_mode mode)
+int vchiq_bulk_receive(struct vchiq_instance *instance, unsigned int handle,
+ void *data, unsigned int size, void *userdata,
+ enum vchiq_bulk_mode mode)
{
- enum vchiq_status status;
+ int status;
while (1) {
switch (mode) {
@@ -892,15 +898,15 @@ enum vchiq_status vchiq_bulk_receive(struct vchiq_instance *instance, unsigned i
VCHIQ_BULK_RECEIVE);
break;
default:
- return VCHIQ_ERROR;
+ return -EINVAL;
}
/*
- * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
+ * vchiq_*_bulk_transfer() may return -EAGAIN, so we need
* to implement a retry mechanism since this function is
* supposed to block until queued
*/
- if (status != VCHIQ_RETRY)
+ if (status != -EAGAIN)
break;
msleep(1);
@@ -910,17 +916,17 @@ enum vchiq_status vchiq_bulk_receive(struct vchiq_instance *instance, unsigned i
}
EXPORT_SYMBOL(vchiq_bulk_receive);
-static enum vchiq_status
+static int
vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *data,
unsigned int size, enum vchiq_bulk_dir dir)
{
struct vchiq_service *service;
- enum vchiq_status status;
+ int status;
struct bulk_waiter_node *waiter = NULL, *iter;
service = find_service_by_handle(instance, handle);
if (!service)
- return VCHIQ_ERROR;
+ return -EINVAL;
vchiq_service_put(service);
@@ -954,14 +960,14 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
if (!waiter) {
vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
- return VCHIQ_ERROR;
+ return -ENOMEM;
}
}
status = vchiq_bulk_transfer(instance, handle, data, NULL, size,
&waiter->bulk_waiter,
VCHIQ_BULK_MODE_BLOCKING, dir);
- if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
+ if ((status != -EAGAIN) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
if (bulk) {
@@ -983,7 +989,7 @@ vchiq_blocking_bulk_transfer(struct vchiq_instance *instance, unsigned int handl
return status;
}
-static enum vchiq_status
+static int
add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
struct vchiq_header *header, struct user_service *user_service,
void *bulk_userdata)
@@ -1001,10 +1007,10 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
if (wait_for_completion_interruptible(&instance->remove_event)) {
vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
- return VCHIQ_RETRY;
+ return -EAGAIN;
} else if (instance->closing) {
vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
- return VCHIQ_SUCCESS;
+ return 0;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
}
@@ -1041,10 +1047,10 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
complete(&instance->insert_event);
- return VCHIQ_SUCCESS;
+ return 0;
}
-enum vchiq_status
+int
service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
struct vchiq_header *header, unsigned int handle, void *bulk_userdata)
{
@@ -1066,14 +1072,14 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
service = handle_to_service(instance, handle);
if (WARN_ON(!service)) {
rcu_read_unlock();
- return VCHIQ_SUCCESS;
+ return 0;
}
user_service = (struct user_service *)service->base.userdata;
if (!instance || instance->closing) {
rcu_read_unlock();
- return VCHIQ_SUCCESS;
+ return 0;
}
/*
@@ -1103,14 +1109,14 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
*/
if ((user_service->message_available_pos -
instance->completion_remove) < 0) {
- enum vchiq_status status;
+ int status;
vchiq_log_info(vchiq_arm_log_level,
"Inserting extra MESSAGE_AVAILABLE");
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
status = add_completion(instance, reason, NULL, user_service,
bulk_userdata);
- if (status != VCHIQ_SUCCESS) {
+ if (status) {
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_service_put(service);
return status;
@@ -1122,12 +1128,12 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_service_put(service);
- return VCHIQ_RETRY;
+ return -EAGAIN;
} else if (instance->closing) {
vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
vchiq_service_put(service);
- return VCHIQ_ERROR;
+ return -EINVAL;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
spin_lock(&msg_queue_spinlock);
@@ -1158,7 +1164,7 @@ service_callback(struct vchiq_instance *instance, enum vchiq_reason reason,
vchiq_service_put(service);
if (skip_completion)
- return VCHIQ_SUCCESS;
+ return 0;
return add_completion(instance, reason, header, user_service,
bulk_userdata);
@@ -1314,7 +1320,7 @@ vchiq_get_state(void)
* Autosuspend related functionality
*/
-static enum vchiq_status
+static int
vchiq_keepalive_vchiq_callback(struct vchiq_instance *instance,
enum vchiq_reason reason,
struct vchiq_header *header,
@@ -1330,7 +1336,7 @@ vchiq_keepalive_thread_func(void *v)
struct vchiq_state *state = (struct vchiq_state *)v;
struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
- enum vchiq_status status;
+ int status;
struct vchiq_instance *instance;
unsigned int ka_handle;
int ret;
@@ -1350,14 +1356,14 @@ vchiq_keepalive_thread_func(void *v)
}
status = vchiq_connect(instance);
- if (status != VCHIQ_SUCCESS) {
+ if (status) {
vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
status);
goto shutdown;
}
status = vchiq_add_service(instance, &params, &ka_handle);
- if (status != VCHIQ_SUCCESS) {
+ if (status) {
vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
status);
goto shutdown;
@@ -1386,14 +1392,14 @@ vchiq_keepalive_thread_func(void *v)
while (uc--) {
atomic_inc(&arm_state->ka_use_ack_count);
status = vchiq_use_service(instance, ka_handle);
- if (status != VCHIQ_SUCCESS) {
+ if (status) {
vchiq_log_error(vchiq_susp_log_level,
"%s vchiq_use_service error %d", __func__, status);
}
}
while (rc--) {
status = vchiq_release_service(instance, ka_handle);
- if (status != VCHIQ_SUCCESS) {
+ if (status) {
vchiq_log_error(vchiq_susp_log_level,
"%s vchiq_release_service error %d", __func__,
status);
@@ -1446,13 +1452,13 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
write_unlock_bh(&arm_state->susp_res_lock);
if (!ret) {
- enum vchiq_status status = VCHIQ_SUCCESS;
+ int status = 0;
long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
- while (ack_cnt && (status == VCHIQ_SUCCESS)) {
+ while (ack_cnt && !status) {
/* Send the use notify to videocore */
status = vchiq_send_remote_use_active(state);
- if (status == VCHIQ_SUCCESS)
+ if (!status)
ack_cnt--;
else
atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
@@ -1587,10 +1593,10 @@ vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
instance->trace = (trace != 0);
}
-enum vchiq_status
+int
vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
{
- enum vchiq_status ret = VCHIQ_ERROR;
+ int ret = -EINVAL;
struct vchiq_service *service = find_service_by_handle(instance, handle);
if (service) {
@@ -1601,10 +1607,10 @@ vchiq_use_service(struct vchiq_instance *instance, unsigned int handle)
}
EXPORT_SYMBOL(vchiq_use_service);
-enum vchiq_status
+int
vchiq_release_service(struct vchiq_instance *instance, unsigned int handle)
{
- enum vchiq_status ret = VCHIQ_ERROR;
+ int ret = -EINVAL;
struct vchiq_service *service = find_service_by_handle(instance, handle);
if (service) {
@@ -1695,11 +1701,11 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
kfree(service_data);
}
-enum vchiq_status
+int
vchiq_check_service(struct vchiq_service *service)
{
struct vchiq_arm_state *arm_state;
- enum vchiq_status ret = VCHIQ_ERROR;
+ int ret = -EINVAL;
if (!service || !service->state)
goto out;
@@ -1708,10 +1714,10 @@ vchiq_check_service(struct vchiq_service *service)
read_lock_bh(&arm_state->susp_res_lock);
if (service->service_use_count)
- ret = VCHIQ_SUCCESS;
+ ret = 0;
read_unlock_bh(&arm_state->susp_res_lock);
- if (ret == VCHIQ_ERROR) {
+ if (ret) {
vchiq_log_error(vchiq_susp_log_level,
"%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index cd20eb18f275..2fb31f9b527f 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -85,13 +85,13 @@ extern struct vchiq_state g_state;
extern struct vchiq_state *
vchiq_get_state(void);
-enum vchiq_status
+int
vchiq_use_service(struct vchiq_instance *instance, unsigned int handle);
-extern enum vchiq_status
+extern int
vchiq_release_service(struct vchiq_instance *instance, unsigned int handle);
-extern enum vchiq_status
+extern int
vchiq_check_service(struct vchiq_service *service);
extern void
@@ -137,7 +137,7 @@ static inline int vchiq_register_chrdev(struct device *parent) { return 0; }
#endif /* IS_ENABLED(CONFIG_VCHIQ_CDEV) */
-extern enum vchiq_status
+extern int
service_callback(struct vchiq_instance *vchiq_instance, enum vchiq_reason reason,
struct vchiq_header *header, unsigned int handle, void *bulk_userdata);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 45ed30bfdbf5..596894338cb4 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -463,22 +463,22 @@ mark_service_closing(struct vchiq_service *service)
mark_service_closing_internal(service, 0);
}
-static inline enum vchiq_status
+static inline int
make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
struct vchiq_header *header, void *bulk_userdata)
{
- enum vchiq_status status;
+ int status;
vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
service->state->id, service->localport, reason_names[reason],
header, bulk_userdata);
status = service->base.callback(service->instance, reason, header, service->handle,
bulk_userdata);
- if (status == VCHIQ_ERROR) {
+ if (status && (status != -EAGAIN)) {
vchiq_log_warning(vchiq_core_log_level,
"%d: ignoring ERROR from callback to service %x",
service->state->id, service->handle);
- status = VCHIQ_SUCCESS;
+ status = 0;
}
if (reason != VCHIQ_MESSAGE_AVAILABLE)
@@ -498,6 +498,7 @@ vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
vchiq_platform_conn_state_changed(state, oldstate, newstate);
}
+/* This initialises a single remote_event, and the associated wait_queue. */
static inline void
remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
{
@@ -536,6 +537,10 @@ remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
return 1;
}
+/*
+ * Acknowledge that the event has been signalled, and wake any waiters. Usually
+ * called as a result of the doorbell being rung.
+ */
static inline void
remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
{
@@ -544,6 +549,7 @@ remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
wake_up_all(wq);
}
+/* Check if a single event has been signalled, waking the waiters if it has. */
static inline void
remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
{
@@ -551,6 +557,10 @@ remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
remote_event_signal_local(wq, event);
}
+/*
+ * VCHIQ used a small, fixed number of remote events. It is simplest to
+ * enumerate them here for polling.
+ */
void
remote_event_pollall(struct vchiq_state *state)
{
@@ -900,7 +910,7 @@ copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t off
}
/* Called by the slot handler and application threads */
-static enum vchiq_status
+static int
queue_message(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
@@ -922,7 +932,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
mutex_lock_killable(&state->slot_mutex))
- return VCHIQ_RETRY;
+ return -EAGAIN;
if (type == VCHIQ_MSG_DATA) {
int tx_end_index;
@@ -930,7 +940,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
if (!service) {
WARN(1, "%s: service is NULL\n", __func__);
mutex_unlock(&state->slot_mutex);
- return VCHIQ_ERROR;
+ return -EINVAL;
}
WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
@@ -939,7 +949,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
if (service->closing) {
/* The service has been closed */
mutex_unlock(&state->slot_mutex);
- return VCHIQ_ERROR;
+ return -EHOSTDOWN;
}
quota = &state->service_quotas[service->localport];
@@ -963,7 +973,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
mutex_unlock(&state->slot_mutex);
if (wait_for_completion_interruptible(&state->data_quota_event))
- return VCHIQ_RETRY;
+ return -EAGAIN;
mutex_lock(&state->slot_mutex);
spin_lock(&quota_spinlock);
@@ -987,15 +997,15 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
mutex_unlock(&state->slot_mutex);
if (wait_for_completion_interruptible(&quota->quota_event))
- return VCHIQ_RETRY;
+ return -EAGAIN;
if (service->closing)
- return VCHIQ_ERROR;
+ return -EHOSTDOWN;
if (mutex_lock_killable(&state->slot_mutex))
- return VCHIQ_RETRY;
+ return -EAGAIN;
if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
/* The service has been closed */
mutex_unlock(&state->slot_mutex);
- return VCHIQ_ERROR;
+ return -EHOSTDOWN;
}
spin_lock(&quota_spinlock);
tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
@@ -1015,7 +1025,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
*/
if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
mutex_unlock(&state->slot_mutex);
- return VCHIQ_RETRY;
+ return -EAGAIN;
}
if (type == VCHIQ_MSG_DATA) {
@@ -1037,7 +1047,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
if (callback_result < 0) {
mutex_unlock(&state->slot_mutex);
VCHIQ_SERVICE_STATS_INC(service, error_count);
- return VCHIQ_ERROR;
+ return -EINVAL;
}
if (SRVTRACE_ENABLED(service,
@@ -1135,11 +1145,11 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
remote_event_signal(&state->remote->trigger);
- return VCHIQ_SUCCESS;
+ return 0;
}
/* Called by the slot handler and application threads */
-static enum vchiq_status
+static int
queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
int msgid,
ssize_t (*copy_callback)(void *context, void *dest,
@@ -1154,7 +1164,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
mutex_lock_killable(&state->sync_mutex))
- return VCHIQ_RETRY;
+ return -EAGAIN;
remote_event_wait(&state->sync_release_event, &local->sync_release);
@@ -1185,7 +1195,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
if (callback_result < 0) {
mutex_unlock(&state->slot_mutex);
VCHIQ_SERVICE_STATS_INC(service, error_count);
- return VCHIQ_ERROR;
+ return -EINVAL;
}
if (service) {
@@ -1223,7 +1233,7 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
mutex_unlock(&state->sync_mutex);
- return VCHIQ_SUCCESS;
+ return 0;
}
static inline void
@@ -1299,11 +1309,11 @@ get_bulk_reason(struct vchiq_bulk *bulk)
}
/* Called by the slot handler - don't hold the bulk mutex */
-static enum vchiq_status
+static int
notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
int retry_poll)
{
- enum vchiq_status status = VCHIQ_SUCCESS;
+ int status = 0;
vchiq_log_trace(vchiq_core_log_level, "%d: nb:%d %cx - p=%x rn=%x r=%x", service->state->id,
service->localport, (queue == &service->bulk_tx) ? 't' : 'r',
@@ -1348,7 +1358,7 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
get_bulk_reason(bulk);
status = make_service_callback(service, reason, NULL,
bulk->userdata);
- if (status == VCHIQ_RETRY)
+ if (status == -EAGAIN)
break;
}
}
@@ -1357,9 +1367,9 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
complete(&service->bulk_remove_event);
}
if (!retry_poll)
- status = VCHIQ_SUCCESS;
+ status = 0;
- if (status == VCHIQ_RETRY)
+ if (status == -EAGAIN)
request_poll(service->state, service, (queue == &service->bulk_tx) ?
VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
@@ -1398,13 +1408,12 @@ poll_services_of_group(struct vchiq_state *state, int group)
*/
service->public_fourcc = VCHIQ_FOURCC_INVALID;
- if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) !=
- VCHIQ_SUCCESS)
+ if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
request_poll(state, service, VCHIQ_POLL_REMOVE);
} else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
vchiq_log_info(vchiq_core_log_level, "%d: ps - terminate %d<->%d",
state->id, service->localport, service->remoteport);
- if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) != VCHIQ_SUCCESS)
+ if (vchiq_close_service_internal(service, NO_CLOSE_RECVD))
request_poll(state, service, VCHIQ_POLL_TERMINATE);
}
if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
@@ -1527,14 +1536,14 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
/* Acknowledge the OPEN */
if (service->sync) {
if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback,
- &ack_payload, sizeof(ack_payload), 0) == VCHIQ_RETRY)
+ &ack_payload, sizeof(ack_payload), 0) == -EAGAIN)
goto bail_not_ready;
/* The service is now open */
set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
} else {
if (queue_message(state, NULL, openack_id, memcpy_copy_callback,
- &ack_payload, sizeof(ack_payload), 0) == VCHIQ_RETRY)
+ &ack_payload, sizeof(ack_payload), 0) == -EAGAIN)
goto bail_not_ready;
/* The service is now open */
@@ -1549,7 +1558,7 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
fail_open:
/* No available service, or an invalid request - send a CLOSE */
if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
- NULL, NULL, 0, 0) == VCHIQ_RETRY)
+ NULL, NULL, 0, 0) == -EAGAIN)
goto bail_not_ready;
return 1;
@@ -1688,7 +1697,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
mark_service_closing_internal(service, 1);
- if (vchiq_close_service_internal(service, CLOSE_RECVD) == VCHIQ_RETRY)
+ if (vchiq_close_service_internal(service, CLOSE_RECVD) == -EAGAIN)
goto bail_not_ready;
vchiq_log_info(vchiq_core_log_level, "Close Service %c%c%c%c s:%u d:%d",
@@ -1705,7 +1714,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
claim_slot(state->rx_info);
DEBUG_TRACE(PARSE_LINE);
if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
- NULL) == VCHIQ_RETRY) {
+ NULL) == -EAGAIN) {
DEBUG_TRACE(PARSE_LINE);
goto bail_not_ready;
}
@@ -1803,7 +1812,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
/* Send a PAUSE in response */
if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
- QMFLAGS_NO_MUTEX_UNLOCK) == VCHIQ_RETRY)
+ QMFLAGS_NO_MUTEX_UNLOCK) == -EAGAIN)
goto bail_not_ready;
}
/* At this point slot_mutex is held */
@@ -1920,7 +1929,7 @@ handle_poll(struct vchiq_state *state)
case VCHIQ_CONNSTATE_PAUSING:
if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
- QMFLAGS_NO_MUTEX_UNLOCK) != VCHIQ_RETRY) {
+ QMFLAGS_NO_MUTEX_UNLOCK) != -EAGAIN) {
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
} else {
/* Retry later */
@@ -1930,7 +1939,7 @@ handle_poll(struct vchiq_state *state)
case VCHIQ_CONNSTATE_RESUMING:
if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
- QMFLAGS_NO_MUTEX_LOCK) != VCHIQ_RETRY) {
+ QMFLAGS_NO_MUTEX_LOCK) != -EAGAIN) {
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
} else {
/*
@@ -2086,9 +2095,9 @@ sync_func(void *v)
if ((service->remoteport == remoteport) &&
(service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
- NULL) == VCHIQ_RETRY)
+ NULL) == -EAGAIN)
vchiq_log_error(vchiq_sync_log_level,
- "synchronous callback to service %d returns VCHIQ_RETRY",
+ "synchronous callback to service %d returns -EAGAIN",
localport);
}
break;
@@ -2486,7 +2495,7 @@ vchiq_add_service_internal(struct vchiq_state *state,
return service;
}
-enum vchiq_status
+int
vchiq_open_service_internal(struct vchiq_service *service, int client_id)
{
struct vchiq_open_payload payload = {
@@ -2495,7 +2504,7 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
service->version,
service->version_min
};
- enum vchiq_status status = VCHIQ_SUCCESS;
+ int status = 0;
service->client_id = client_id;
vchiq_use_service_internal(service);
@@ -2506,12 +2515,12 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
sizeof(payload),
QMFLAGS_IS_BLOCKING);
- if (status != VCHIQ_SUCCESS)
+ if (status)
return status;
/* Wait for the ACK/NAK */
if (wait_for_completion_interruptible(&service->remove_event)) {
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
vchiq_release_service_internal(service);
} else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
(service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
@@ -2521,7 +2530,7 @@ vchiq_open_service_internal(struct vchiq_service *service, int client_id)
service->state->id,
srvstate_names[service->srvstate],
kref_read(&service->ref_count));
- status = VCHIQ_ERROR;
+ status = -EINVAL;
VCHIQ_SERVICE_STATS_INC(service, error_count);
vchiq_release_service_internal(service);
}
@@ -2592,7 +2601,7 @@ release_service_messages(struct vchiq_service *service)
static int
do_abort_bulks(struct vchiq_service *service)
{
- enum vchiq_status status;
+ int status;
/* Abort any outstanding bulk transfers */
if (mutex_lock_killable(&service->bulk_mutex))
@@ -2602,17 +2611,17 @@ do_abort_bulks(struct vchiq_service *service)
mutex_unlock(&service->bulk_mutex);
status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
- if (status != VCHIQ_SUCCESS)
+ if (status)
return 0;
status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
- return (status == VCHIQ_SUCCESS);
+ return !status;
}
-static enum vchiq_status
+static int
close_service_complete(struct vchiq_service *service, int failstate)
{
- enum vchiq_status status;
+ int status;
int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
int newstate;
@@ -2639,12 +2648,12 @@ close_service_complete(struct vchiq_service *service, int failstate)
vchiq_log_error(vchiq_core_log_level, "%s(%x) called in state %s", __func__,
service->handle, srvstate_names[service->srvstate]);
WARN(1, "%s in unexpected state\n", __func__);
- return VCHIQ_ERROR;
+ return -EINVAL;
}
status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL);
- if (status != VCHIQ_RETRY) {
+ if (status != -EAGAIN) {
int uc = service->service_use_count;
int i;
/* Complete the close process */
@@ -2674,11 +2683,11 @@ close_service_complete(struct vchiq_service *service, int failstate)
}
/* Called by the slot handler */
-enum vchiq_status
+int
vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
{
struct vchiq_state *state = service->state;
- enum vchiq_status status = VCHIQ_SUCCESS;
+ int status = 0;
int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
int close_id = MAKE_CLOSE(service->localport,
VCHIQ_MSG_DSTPORT(service->remoteport));
@@ -2696,7 +2705,7 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
__func__, srvstate_names[service->srvstate]);
} else if (is_server) {
if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
- status = VCHIQ_ERROR;
+ status = -EINVAL;
} else {
service->client_id = 0;
service->remoteport = VCHIQ_PORT_FREE;
@@ -2725,16 +2734,16 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
case VCHIQ_SRVSTATE_OPEN:
if (close_recvd) {
if (!do_abort_bulks(service))
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
}
release_service_messages(service);
- if (status == VCHIQ_SUCCESS)
+ if (!status)
status = queue_message(state, service, close_id, NULL,
NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
- if (status != VCHIQ_SUCCESS) {
+ if (status) {
if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
mutex_unlock(&state->sync_mutex);
break;
@@ -2764,11 +2773,11 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
break;
if (!do_abort_bulks(service)) {
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
break;
}
- if (status == VCHIQ_SUCCESS)
+ if (!status)
status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
break;
@@ -2832,7 +2841,7 @@ vchiq_free_service_internal(struct vchiq_service *service)
vchiq_service_put(service);
}
-enum vchiq_status
+int
vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
{
struct vchiq_service *service;
@@ -2848,21 +2857,21 @@ vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instanc
if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0,
- QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
- return VCHIQ_RETRY;
+ QMFLAGS_IS_BLOCKING) == -EAGAIN)
+ return -EAGAIN;
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
}
if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
if (wait_for_completion_interruptible(&state->connect))
- return VCHIQ_RETRY;
+ return -EAGAIN;
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
complete(&state->connect);
}
- return VCHIQ_SUCCESS;
+ return 0;
}
void
@@ -2879,15 +2888,15 @@ vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instan
}
}
-enum vchiq_status
+int
vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
{
/* Unregister the service */
struct vchiq_service *service = find_service_by_handle(instance, handle);
- enum vchiq_status status = VCHIQ_SUCCESS;
+ int status = 0;
if (!service)
- return VCHIQ_ERROR;
+ return -EINVAL;
vchiq_log_info(vchiq_core_log_level, "%d: close_service:%d",
service->state->id, service->localport);
@@ -2896,14 +2905,14 @@ vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
(service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
vchiq_service_put(service);
- return VCHIQ_ERROR;
+ return -EINVAL;
}
mark_service_closing(service);
if (current == service->state->slot_handler_thread) {
status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
- WARN_ON(status == VCHIQ_RETRY);
+ WARN_ON(status == -EAGAIN);
} else {
/* Mark the service for termination by the slot handler */
request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
@@ -2911,7 +2920,7 @@ vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
while (1) {
if (wait_for_completion_interruptible(&service->remove_event)) {
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
break;
}
@@ -2926,10 +2935,10 @@ vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
srvstate_names[service->srvstate]);
}
- if ((status == VCHIQ_SUCCESS) &&
+ if (!status &&
(service->srvstate != VCHIQ_SRVSTATE_FREE) &&
(service->srvstate != VCHIQ_SRVSTATE_LISTENING))
- status = VCHIQ_ERROR;
+ status = -EINVAL;
vchiq_service_put(service);
@@ -2937,22 +2946,22 @@ vchiq_close_service(struct vchiq_instance *instance, unsigned int handle)
}
EXPORT_SYMBOL(vchiq_close_service);
-enum vchiq_status
+int
vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
{
/* Unregister the service */
struct vchiq_service *service = find_service_by_handle(instance, handle);
- enum vchiq_status status = VCHIQ_SUCCESS;
+ int status = 0;
if (!service)
- return VCHIQ_ERROR;
+ return -EINVAL;
vchiq_log_info(vchiq_core_log_level, "%d: remove_service:%d",
service->state->id, service->localport);
if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
vchiq_service_put(service);
- return VCHIQ_ERROR;
+ return -EINVAL;
}
mark_service_closing(service);
@@ -2966,14 +2975,14 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
service->public_fourcc = VCHIQ_FOURCC_INVALID;
status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
- WARN_ON(status == VCHIQ_RETRY);
+ WARN_ON(status == -EAGAIN);
} else {
/* Mark the service for removal by the slot handler */
request_poll(service->state, service, VCHIQ_POLL_REMOVE);
}
while (1) {
if (wait_for_completion_interruptible(&service->remove_event)) {
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
break;
}
@@ -2987,9 +2996,8 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
srvstate_names[service->srvstate]);
}
- if ((status == VCHIQ_SUCCESS) &&
- (service->srvstate != VCHIQ_SRVSTATE_FREE))
- status = VCHIQ_ERROR;
+ if (!status && (service->srvstate != VCHIQ_SRVSTATE_FREE))
+ status = -EINVAL;
vchiq_service_put(service);
@@ -2998,15 +3006,15 @@ vchiq_remove_service(struct vchiq_instance *instance, unsigned int handle)
/*
* This function may be called by kernel threads or user threads.
- * User threads may receive VCHIQ_RETRY to indicate that a signal has been
+ * User threads may receive -EAGAIN to indicate that a signal has been
* received and the call should be retried after being returned to user
* context.
* When called in blocking mode, the userdata field points to a bulk_waiter
* structure.
*/
-enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
- void *offset, void __user *uoffset, int size, void *userdata,
- enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir)
+int vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle,
+ void *offset, void __user *uoffset, int size, void *userdata,
+ enum vchiq_bulk_mode mode, enum vchiq_bulk_dir dir)
{
struct vchiq_service *service = find_service_by_handle(instance, handle);
struct vchiq_bulk_queue *queue;
@@ -3016,7 +3024,7 @@ enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned
const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
- enum vchiq_status status = VCHIQ_ERROR;
+ int status = -EINVAL;
int payload[2];
if (!service)
@@ -3028,7 +3036,7 @@ enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned
if (!offset && !uoffset)
goto error_exit;
- if (vchiq_check_service(service) != VCHIQ_SUCCESS)
+ if (vchiq_check_service(service))
goto error_exit;
switch (mode) {
@@ -3055,7 +3063,7 @@ enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned
&service->bulk_tx : &service->bulk_rx;
if (mutex_lock_killable(&service->bulk_mutex)) {
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
goto error_exit;
}
@@ -3064,11 +3072,11 @@ enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned
do {
mutex_unlock(&service->bulk_mutex);
if (wait_for_completion_interruptible(&service->bulk_remove_event)) {
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
goto error_exit;
}
if (mutex_lock_killable(&service->bulk_mutex)) {
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
goto error_exit;
}
} while (queue->local_insert == queue->remove +
@@ -3101,7 +3109,7 @@ enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned
* claim it here to ensure that isn't happening
*/
if (mutex_lock_killable(&state->slot_mutex)) {
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
goto cancel_bulk_error_exit;
}
@@ -3121,7 +3129,7 @@ enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned
QMFLAGS_IS_BLOCKING |
QMFLAGS_NO_MUTEX_LOCK |
QMFLAGS_NO_MUTEX_UNLOCK);
- if (status != VCHIQ_SUCCESS)
+ if (status)
goto unlock_both_error_exit;
queue->local_insert++;
@@ -3136,14 +3144,14 @@ enum vchiq_status vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned
waiting:
vchiq_service_put(service);
- status = VCHIQ_SUCCESS;
+ status = 0;
if (bulk_waiter) {
bulk_waiter->bulk = bulk;
if (wait_for_completion_interruptible(&bulk_waiter->event))
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
- status = VCHIQ_ERROR;
+ status = -EINVAL;
}
return status;
@@ -3161,7 +3169,7 @@ error_exit:
return status;
}
-enum vchiq_status
+int
vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
@@ -3169,13 +3177,13 @@ vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
size_t size)
{
struct vchiq_service *service = find_service_by_handle(instance, handle);
- enum vchiq_status status = VCHIQ_ERROR;
+ int status = -EINVAL;
int data_id;
if (!service)
goto error_exit;
- if (vchiq_check_service(service) != VCHIQ_SUCCESS)
+ if (vchiq_check_service(service))
goto error_exit;
if (!size) {
@@ -3200,7 +3208,7 @@ vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
copy_callback, context, size, 1);
break;
default:
- status = VCHIQ_ERROR;
+ status = -EINVAL;
break;
}
@@ -3214,18 +3222,18 @@ error_exit:
int vchiq_queue_kernel_message(struct vchiq_instance *instance, unsigned int handle, void *data,
unsigned int size)
{
- enum vchiq_status status;
+ int status;
while (1) {
status = vchiq_queue_message(instance, handle, memcpy_copy_callback,
data, size);
/*
- * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
+ * vchiq_queue_message() may return -EAGAIN, so we need to
* implement a retry mechanism since this function is supposed
* to block until queued
*/
- if (status != VCHIQ_RETRY)
+ if (status != -EAGAIN)
break;
msleep(1);
@@ -3277,23 +3285,23 @@ release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
remote_event_signal(&state->remote->sync_release);
}
-enum vchiq_status
+int
vchiq_get_peer_version(struct vchiq_instance *instance, unsigned int handle, short *peer_version)
{
- enum vchiq_status status = VCHIQ_ERROR;
+ int status = -EINVAL;
struct vchiq_service *service = find_service_by_handle(instance, handle);
if (!service)
goto exit;
- if (vchiq_check_service(service) != VCHIQ_SUCCESS)
+ if (vchiq_check_service(service))
goto exit;
if (!peer_version)
goto exit;
*peer_version = service->peer_version;
- status = VCHIQ_SUCCESS;
+ status = 0;
exit:
if (service)
@@ -3640,18 +3648,18 @@ vchiq_loud_error_footer(void)
"============================================================================");
}
-enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
+int vchiq_send_remote_use(struct vchiq_state *state)
{
if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
- return VCHIQ_RETRY;
+ return -ENOTCONN;
return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
}
-enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
+int vchiq_send_remote_use_active(struct vchiq_state *state)
{
if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
- return VCHIQ_RETRY;
+ return -ENOTCONN;
return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
NULL, NULL, 0, 0);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 8b4a38f5b3f2..ec1a3caefaea 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -10,8 +10,8 @@
#include <linux/kref.h>
#include <linux/rcupdate.h>
#include <linux/wait.h>
-#include <linux/raspberrypi/vchiq.h>
+#include "../../include/linux/raspberrypi/vchiq.h"
#include "vchiq_cfg.h"
/* Do this so that we can test-build the code on non-rpi systems */
@@ -166,6 +166,24 @@ struct vchiq_bulk_queue {
struct vchiq_bulk bulks[VCHIQ_NUM_SERVICE_BULKS];
};
+/*
+ * Remote events provide a way of presenting several virtual doorbells to a
+ * peer (ARM host to VPU) using only one physical doorbell. They can be thought
+ * of as a way for the peer to signal a semaphore, in this case implemented as
+ * a workqueue.
+ *
+ * Remote events remain signalled until acknowledged by the receiver, and they
+ * are non-counting. They are designed in such a way as to minimise the number
+ * of interrupts and avoid unnecessary waiting.
+ *
+ * A remote_event is as small data structures that live in shared memory. It
+ * comprises two booleans - armed and fired:
+ *
+ * The sender sets fired when they signal the receiver.
+ * If fired is set, the receiver has been signalled and need not wait.
+ * The receiver sets the armed field before they begin to wait.
+ * If armed is set, the receiver is waiting and wishes to be woken by interrupt.
+ */
struct remote_event {
int armed;
int fired;
@@ -458,7 +476,7 @@ vchiq_init_slots(void *mem_base, int mem_size);
extern int
vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero, struct device *dev);
-extern enum vchiq_status
+extern int
vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance);
struct vchiq_service *
@@ -467,10 +485,10 @@ vchiq_add_service_internal(struct vchiq_state *state,
int srvstate, struct vchiq_instance *instance,
void (*userdata_term)(void *userdata));
-extern enum vchiq_status
+extern int
vchiq_open_service_internal(struct vchiq_service *service, int client_id);
-extern enum vchiq_status
+extern int
vchiq_close_service_internal(struct vchiq_service *service, int close_recvd);
extern void
@@ -485,7 +503,7 @@ vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instan
extern void
remote_event_pollall(struct vchiq_state *state);
-extern enum vchiq_status
+extern int
vchiq_bulk_transfer(struct vchiq_instance *instance, unsigned int handle, void *offset,
void __user *uoffset, int size, void *userdata, enum vchiq_bulk_mode mode,
enum vchiq_bulk_dir dir);
@@ -536,7 +554,7 @@ vchiq_service_get(struct vchiq_service *service);
extern void
vchiq_service_put(struct vchiq_service *service);
-extern enum vchiq_status
+extern int
vchiq_queue_message(struct vchiq_instance *instance, unsigned int handle,
ssize_t (*copy_callback)(void *context, void *dest,
size_t offset, size_t maxsize),
@@ -568,13 +586,13 @@ void vchiq_on_remote_release(struct vchiq_state *state);
int vchiq_platform_init_state(struct vchiq_state *state);
-enum vchiq_status vchiq_check_service(struct vchiq_service *service);
+int vchiq_check_service(struct vchiq_service *service);
void vchiq_on_remote_use_active(struct vchiq_state *state);
-enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state);
+int vchiq_send_remote_use(struct vchiq_state *state);
-enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state);
+int vchiq_send_remote_use_active(struct vchiq_state *state);
void vchiq_platform_conn_state_changed(struct vchiq_state *state,
enum vchiq_connstate oldstate,
@@ -584,7 +602,7 @@ void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newsta
void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes);
-enum vchiq_status vchiq_remove_service(struct vchiq_instance *instance, unsigned int service);
+int vchiq_remove_service(struct vchiq_instance *instance, unsigned int service);
int vchiq_get_client_id(struct vchiq_instance *instance, unsigned int service);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index 7e297494437e..841e1a535642 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -112,7 +112,7 @@ vchiq_ioc_queue_message(struct vchiq_instance *instance, unsigned int handle,
struct vchiq_element *elements, unsigned long count)
{
struct vchiq_io_copy_callback_context context;
- enum vchiq_status status = VCHIQ_SUCCESS;
+ int status = 0;
unsigned long i;
size_t total_size = 0;
@@ -130,9 +130,9 @@ vchiq_ioc_queue_message(struct vchiq_instance *instance, unsigned int handle,
status = vchiq_queue_message(instance, handle, vchiq_ioc_copy_element_data,
&context, total_size);
- if (status == VCHIQ_ERROR)
+ if (status == -EINVAL)
return -EIO;
- else if (status == VCHIQ_RETRY)
+ else if (status == -EAGAIN)
return -EINTR;
return 0;
}
@@ -142,7 +142,7 @@ static int vchiq_ioc_create_service(struct vchiq_instance *instance,
{
struct user_service *user_service = NULL;
struct vchiq_service *service;
- enum vchiq_status status = VCHIQ_SUCCESS;
+ int status = 0;
struct vchiq_service_params_kernel params;
int srvstate;
@@ -190,9 +190,9 @@ static int vchiq_ioc_create_service(struct vchiq_instance *instance,
if (args->is_open) {
status = vchiq_open_service_internal(service, instance->pid);
- if (status != VCHIQ_SUCCESS) {
+ if (status) {
vchiq_remove_service(instance, service->handle);
- return (status == VCHIQ_RETRY) ?
+ return (status == -EAGAIN) ?
-EINTR : -EIO;
}
}
@@ -338,7 +338,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
goto out;
}
- if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
+ if ((status != -EAGAIN) || fatal_signal_pending(current) ||
!waiter->bulk_waiter.bulk) {
if (waiter->bulk_waiter.bulk) {
/* Cancel the signal when the transfer completes. */
@@ -364,9 +364,9 @@ out:
vchiq_service_put(service);
if (ret)
return ret;
- else if (status == VCHIQ_ERROR)
+ else if (status == -EINVAL)
return -EIO;
- else if (status == VCHIQ_RETRY)
+ else if (status == -EAGAIN)
return -EINTR;
return 0;
}
@@ -577,7 +577,7 @@ static long
vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct vchiq_instance *instance = file->private_data;
- enum vchiq_status status = VCHIQ_SUCCESS;
+ int status = 0;
struct vchiq_service *service = NULL;
long ret = 0;
int i, rc;
@@ -598,12 +598,12 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
instance, &i))) {
status = vchiq_remove_service(instance, service->handle);
vchiq_service_put(service);
- if (status != VCHIQ_SUCCESS)
+ if (status)
break;
}
service = NULL;
- if (status == VCHIQ_SUCCESS) {
+ if (!status) {
/* Wake the completion thread and ask it to exit */
instance->closing = 1;
complete(&instance->insert_event);
@@ -627,7 +627,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
status = vchiq_connect_internal(instance->state, instance);
mutex_unlock(&instance->state->mutex);
- if (status == VCHIQ_SUCCESS)
+ if (!status)
instance->connected = 1;
else
vchiq_log_error(vchiq_arm_log_level,
@@ -675,7 +675,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
status = (cmd == VCHIQ_IOC_CLOSE_SERVICE) ?
vchiq_close_service(instance, service->handle) :
vchiq_remove_service(instance, service->handle);
- if (status != VCHIQ_SUCCESS)
+ if (status)
break;
}
@@ -686,7 +686,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
*/
if (user_service->close_pending &&
wait_for_completion_interruptible(&user_service->close_event))
- status = VCHIQ_RETRY;
+ status = -EAGAIN;
break;
}
@@ -862,13 +862,13 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
vchiq_service_put(service);
if (ret == 0) {
- if (status == VCHIQ_ERROR)
+ if (status == -EINVAL)
ret = -EIO;
- else if (status == VCHIQ_RETRY)
+ else if (status == -EAGAIN)
ret = -EINTR;
}
- if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK))
+ if (!status && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK))
vchiq_log_info(vchiq_arm_log_level,
" ioctl instance %pK, cmd %s -> status %d, %ld",
instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
index 86d77f2eeea5..17550831f86c 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_ioctl.h
@@ -5,17 +5,18 @@
#define VCHIQ_IOCTLS_H
#include <linux/ioctl.h>
-#include <linux/raspberrypi/vchiq.h>
+
+#include "../../include/linux/raspberrypi/vchiq.h"
#define VCHIQ_IOC_MAGIC 0xc4
#define VCHIQ_INVALID_HANDLE (~0)
struct vchiq_service_params {
int fourcc;
- enum vchiq_status __user (*callback)(enum vchiq_reason reason,
- struct vchiq_header *header,
- unsigned int handle,
- void *bulk_userdata);
+ int __user (*callback)(enum vchiq_reason reason,
+ struct vchiq_header *header,
+ unsigned int handle,
+ void *bulk_userdata);
void __user *userdata;
short version; /* Increment for non-trivial changes */
short version_min; /* Update for incompatible changes */
diff --git a/drivers/staging/vc04_services/vchiq-mmal/Makefile b/drivers/staging/vc04_services/vchiq-mmal/Makefile
index b2a830f48acc..6937f6534c26 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/Makefile
+++ b/drivers/staging/vc04_services/vchiq-mmal/Makefile
@@ -2,8 +2,3 @@
bcm2835-mmal-vchiq-objs := mmal-vchiq.o
obj-$(CONFIG_BCM2835_VCHIQ_MMAL) += bcm2835-mmal-vchiq.o
-
-ccflags-y += \
- -I$(srctree)/$(src)/.. \
- -I$(srctree)/$(src)/../include \
- -D__VCCOREVER__=0x04000000
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index 294b184d4a49..258aa0e37f55 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -23,9 +23,9 @@
#include <linux/slab.h>
#include <linux/completion.h>
#include <linux/vmalloc.h>
-#include <linux/raspberrypi/vchiq.h>
#include <media/videobuf2-vmalloc.h>
+#include "../include/linux/raspberrypi/vchiq.h"
#include "mmal-common.h"
#include "mmal-vchiq.h"
#include "mmal-msg.h"
@@ -245,7 +245,7 @@ static void event_to_host_cb(struct vchiq_mmal_instance *instance,
/* workqueue scheduled callback
*
* we do this because it is important we do not call any other vchiq
- * sync calls from witin the message delivery thread
+ * sync calls from within the message delivery thread
*/
static void buffer_work_cb(struct work_struct *work)
{
@@ -548,10 +548,9 @@ static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
}
/* incoming event service callback */
-static enum vchiq_status service_callback(struct vchiq_instance *vchiq_instance,
- enum vchiq_reason reason,
- struct vchiq_header *header,
- unsigned int handle, void *bulk_ctx)
+static int service_callback(struct vchiq_instance *vchiq_instance,
+ enum vchiq_reason reason, struct vchiq_header *header,
+ unsigned int handle, void *bulk_ctx)
{
struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(vchiq_instance, handle);
u32 msg_len;
@@ -560,7 +559,7 @@ static enum vchiq_status service_callback(struct vchiq_instance *vchiq_instance,
if (!instance) {
pr_err("Message callback passed NULL instance\n");
- return VCHIQ_SUCCESS;
+ return 0;
}
switch (reason) {
@@ -644,7 +643,7 @@ static enum vchiq_status service_callback(struct vchiq_instance *vchiq_instance,
break;
}
- return VCHIQ_SUCCESS;
+ return 0;
}
static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
diff --git a/drivers/staging/vme_user/vme.h b/drivers/staging/vme_user/vme.h
index 98da8d039d60..faa9816046a9 100644
--- a/drivers/staging/vme_user/vme.h
+++ b/drivers/staging/vme_user/vme.h
@@ -2,6 +2,8 @@
#ifndef _VME_H_
#define _VME_H_
+#include <linux/bitops.h>
+
/* Resource Type */
enum vme_resource_type {
VME_MASTER,
@@ -54,20 +56,20 @@ enum vme_resource_type {
#define VME_R_ROBIN_MODE 0x1
#define VME_PRIORITY_MODE 0x2
-#define VME_DMA_PATTERN (1<<0)
-#define VME_DMA_PCI (1<<1)
-#define VME_DMA_VME (1<<2)
+#define VME_DMA_PATTERN BIT(0)
+#define VME_DMA_PCI BIT(1)
+#define VME_DMA_VME BIT(2)
-#define VME_DMA_PATTERN_BYTE (1<<0)
-#define VME_DMA_PATTERN_WORD (1<<1)
-#define VME_DMA_PATTERN_INCREMENT (1<<2)
+#define VME_DMA_PATTERN_BYTE BIT(0)
+#define VME_DMA_PATTERN_WORD BIT(1)
+#define VME_DMA_PATTERN_INCREMENT BIT(2)
-#define VME_DMA_VME_TO_MEM (1<<0)
-#define VME_DMA_MEM_TO_VME (1<<1)
-#define VME_DMA_VME_TO_VME (1<<2)
-#define VME_DMA_MEM_TO_MEM (1<<3)
-#define VME_DMA_PATTERN_TO_VME (1<<4)
-#define VME_DMA_PATTERN_TO_MEM (1<<5)
+#define VME_DMA_VME_TO_MEM BIT(0)
+#define VME_DMA_MEM_TO_VME BIT(1)
+#define VME_DMA_VME_TO_VME BIT(2)
+#define VME_DMA_MEM_TO_MEM BIT(3)
+#define VME_DMA_PATTERN_TO_VME BIT(4)
+#define VME_DMA_PATTERN_TO_MEM BIT(5)
struct vme_dma_attr {
u32 type;
diff --git a/drivers/staging/vme_user/vme_bridge.h b/drivers/staging/vme_user/vme_bridge.h
index 0bbefe9851d7..11df0a5e7f7b 100644
--- a/drivers/staging/vme_user/vme_bridge.h
+++ b/drivers/staging/vme_user/vme_bridge.h
@@ -4,7 +4,7 @@
#include "vme.h"
-#define VME_CRCSR_BUF_SIZE (508*1024)
+#define VME_CRCSR_BUF_SIZE (508 * 1024)
/*
* Resource structures
*/
@@ -84,7 +84,7 @@ struct vme_error_handler {
unsigned long long end; /* End of error window */
unsigned long long first_error; /* Address of the first error */
u32 aspace; /* Address space of error window*/
- unsigned num_errors; /* Number of errors */
+ unsigned int num_errors; /* Number of errors */
};
struct vme_callback {
@@ -128,45 +128,45 @@ struct vme_bridge {
struct mutex irq_mtx;
/* Slave Functions */
- int (*slave_get) (struct vme_slave_resource *, int *,
+ int (*slave_get)(struct vme_slave_resource *, int *,
unsigned long long *, unsigned long long *, dma_addr_t *,
u32 *, u32 *);
- int (*slave_set) (struct vme_slave_resource *, int, unsigned long long,
+ int (*slave_set)(struct vme_slave_resource *, int, unsigned long long,
unsigned long long, dma_addr_t, u32, u32);
/* Master Functions */
- int (*master_get) (struct vme_master_resource *, int *,
+ int (*master_get)(struct vme_master_resource *, int *,
unsigned long long *, unsigned long long *, u32 *, u32 *,
u32 *);
- int (*master_set) (struct vme_master_resource *, int,
+ int (*master_set)(struct vme_master_resource *, int,
unsigned long long, unsigned long long, u32, u32, u32);
- ssize_t (*master_read) (struct vme_master_resource *, void *, size_t,
+ ssize_t (*master_read)(struct vme_master_resource *, void *, size_t,
loff_t);
- ssize_t (*master_write) (struct vme_master_resource *, void *, size_t,
+ ssize_t (*master_write)(struct vme_master_resource *, void *, size_t,
loff_t);
- unsigned int (*master_rmw) (struct vme_master_resource *, unsigned int,
+ unsigned int (*master_rmw)(struct vme_master_resource *, unsigned int,
unsigned int, unsigned int, loff_t);
/* DMA Functions */
- int (*dma_list_add) (struct vme_dma_list *, struct vme_dma_attr *,
+ int (*dma_list_add)(struct vme_dma_list *, struct vme_dma_attr *,
struct vme_dma_attr *, size_t);
- int (*dma_list_exec) (struct vme_dma_list *);
- int (*dma_list_empty) (struct vme_dma_list *);
+ int (*dma_list_exec)(struct vme_dma_list *);
+ int (*dma_list_empty)(struct vme_dma_list *);
/* Interrupt Functions */
- void (*irq_set) (struct vme_bridge *, int, int, int);
- int (*irq_generate) (struct vme_bridge *, int, int);
+ void (*irq_set)(struct vme_bridge *, int, int, int);
+ int (*irq_generate)(struct vme_bridge *, int, int);
/* Location monitor functions */
- int (*lm_set) (struct vme_lm_resource *, unsigned long long, u32, u32);
- int (*lm_get) (struct vme_lm_resource *, unsigned long long *, u32 *,
+ int (*lm_set)(struct vme_lm_resource *, unsigned long long, u32, u32);
+ int (*lm_get)(struct vme_lm_resource *, unsigned long long *, u32 *,
u32 *);
int (*lm_attach)(struct vme_lm_resource *, int,
void (*callback)(void *), void *);
- int (*lm_detach) (struct vme_lm_resource *, int);
+ int (*lm_detach)(struct vme_lm_resource *, int);
/* CR/CSR space functions */
- int (*slot_get) (struct vme_bridge *);
+ int (*slot_get)(struct vme_bridge *);
/* Bridge parent interface */
void *(*alloc_consistent)(struct device *dev, size_t size,
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 0611e37df6ac..e33dd1b9c40e 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -904,40 +904,6 @@ union hfa384x_usbin {
*--------------------------------------------------------------------
*/
-struct hfa384x_pdr_pcb_partnum {
- u8 num[8];
-} __packed;
-
-struct hfa384x_pdr_pcb_tracenum {
- u8 num[8];
-} __packed;
-
-struct hfa384x_pdr_nic_serial {
- u8 num[12];
-} __packed;
-
-struct hfa384x_pdr_mkk_measurements {
- double carrier_freq;
- double occupied_band;
- double power_density;
- double tx_spur_f1;
- double tx_spur_f2;
- double tx_spur_f3;
- double tx_spur_f4;
- double tx_spur_l1;
- double tx_spur_l2;
- double tx_spur_l3;
- double tx_spur_l4;
- double rx_spur_f1;
- double rx_spur_f2;
- double rx_spur_l1;
- double rx_spur_l2;
-} __packed;
-
-struct hfa384x_pdr_nic_ramsize {
- u8 size[12]; /* units of KB */
-} __packed;
-
struct hfa384x_pdr_mfisuprange {
u16 id;
u16 variant;
@@ -959,150 +925,13 @@ struct hfa384x_pdr_nicid {
u16 minor;
} __packed;
-struct hfa384x_pdr_refdac_measurements {
- u16 value[0];
-} __packed;
-
-struct hfa384x_pdr_vgdac_measurements {
- u16 value[0];
-} __packed;
-
-struct hfa384x_pdr_level_comp_measurements {
- u16 value[0];
-} __packed;
-
-struct hfa384x_pdr_mac_address {
- u8 addr[6];
-} __packed;
-
-struct hfa384x_pdr_mkk_callname {
- u8 callname[8];
-} __packed;
-
-struct hfa384x_pdr_regdomain {
- u16 numdomains;
- u16 domain[5];
-} __packed;
-
-struct hfa384x_pdr_allowed_channel {
- u16 ch_bitmap;
-} __packed;
-
-struct hfa384x_pdr_default_channel {
- u16 channel;
-} __packed;
-
-struct hfa384x_pdr_privacy_option {
- u16 available;
-} __packed;
-
-struct hfa384x_pdr_temptype {
- u16 type;
-} __packed;
-
-struct hfa384x_pdr_refdac_setup {
- u16 ch_value[14];
-} __packed;
-
-struct hfa384x_pdr_vgdac_setup {
- u16 ch_value[14];
-} __packed;
-
-struct hfa384x_pdr_level_comp_setup {
- u16 ch_value[14];
-} __packed;
-
-struct hfa384x_pdr_trimdac_setup {
- u16 trimidac;
- u16 trimqdac;
-} __packed;
-
-struct hfa384x_pdr_ifr_setting {
- u16 value[3];
-} __packed;
-
-struct hfa384x_pdr_rfr_setting {
- u16 value[3];
-} __packed;
-
-struct hfa384x_pdr_hfa3861_baseline {
- u16 value[50];
-} __packed;
-
-struct hfa384x_pdr_hfa3861_shadow {
- u32 value[32];
-} __packed;
-
-struct hfa384x_pdr_hfa3861_ifrf {
- u32 value[20];
-} __packed;
-
-struct hfa384x_pdr_hfa3861_chcalsp {
- u16 value[14];
-} __packed;
-
-struct hfa384x_pdr_hfa3861_chcali {
- u16 value[17];
-} __packed;
-
-struct hfa384x_pdr_hfa3861_nic_config {
- u16 config_bitmap;
-} __packed;
-
-struct hfa384x_pdr_hfo_delay {
- u8 hfo_delay;
-} __packed;
-
-struct hfa384x_pdr_hfa3861_manf_testsp {
- u16 value[30];
-} __packed;
-
-struct hfa384x_pdr_hfa3861_manf_testi {
- u16 value[30];
-} __packed;
-
-struct hfa384x_pdr_end_of_pda {
- u16 crc;
-} __packed;
-
struct hfa384x_pdrec {
__le16 len; /* in words */
__le16 code;
union pdr {
- struct hfa384x_pdr_pcb_partnum pcb_partnum;
- struct hfa384x_pdr_pcb_tracenum pcb_tracenum;
- struct hfa384x_pdr_nic_serial nic_serial;
- struct hfa384x_pdr_mkk_measurements mkk_measurements;
- struct hfa384x_pdr_nic_ramsize nic_ramsize;
struct hfa384x_pdr_mfisuprange mfisuprange;
struct hfa384x_pdr_cfisuprange cfisuprange;
struct hfa384x_pdr_nicid nicid;
- struct hfa384x_pdr_refdac_measurements refdac_measurements;
- struct hfa384x_pdr_vgdac_measurements vgdac_measurements;
- struct hfa384x_pdr_level_comp_measurements level_compc_measurements;
- struct hfa384x_pdr_mac_address mac_address;
- struct hfa384x_pdr_mkk_callname mkk_callname;
- struct hfa384x_pdr_regdomain regdomain;
- struct hfa384x_pdr_allowed_channel allowed_channel;
- struct hfa384x_pdr_default_channel default_channel;
- struct hfa384x_pdr_privacy_option privacy_option;
- struct hfa384x_pdr_temptype temptype;
- struct hfa384x_pdr_refdac_setup refdac_setup;
- struct hfa384x_pdr_vgdac_setup vgdac_setup;
- struct hfa384x_pdr_level_comp_setup level_comp_setup;
- struct hfa384x_pdr_trimdac_setup trimdac_setup;
- struct hfa384x_pdr_ifr_setting ifr_setting;
- struct hfa384x_pdr_rfr_setting rfr_setting;
- struct hfa384x_pdr_hfa3861_baseline hfa3861_baseline;
- struct hfa384x_pdr_hfa3861_shadow hfa3861_shadow;
- struct hfa384x_pdr_hfa3861_ifrf hfa3861_ifrf;
- struct hfa384x_pdr_hfa3861_chcalsp hfa3861_chcalsp;
- struct hfa384x_pdr_hfa3861_chcali hfa3861_chcali;
- struct hfa384x_pdr_hfa3861_nic_config nic_config;
- struct hfa384x_pdr_hfo_delay hfo_delay;
- struct hfa384x_pdr_hfa3861_manf_testsp hfa3861_manf_testsp;
- struct hfa384x_pdr_hfa3861_manf_testi hfa3861_manf_testi;
- struct hfa384x_pdr_end_of_pda end_of_pda;
} data;
} __packed;
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index ff49c8f3fe24..24040c118e49 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/sched/signal.h>
#include <net/sock.h>
+#include <trace/events/sock.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
@@ -384,6 +385,7 @@ static void iscsi_target_sk_data_ready(struct sock *sk)
struct iscsit_conn *conn = sk->sk_user_data;
bool rc;
+ trace_sk_data_ready(sk);
pr_debug("Entering iscsi_target_sk_data_ready: conn: %p\n", conn);
write_lock_bh(&sk->sk_callback_lock);
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index fd584111da45..ce0e000b74fc 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -281,10 +281,8 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
for_each_sg(sgl, sg, sgl_nents, i) {
- aio_cmd->bvecs[i].bv_page = sg_page(sg);
- aio_cmd->bvecs[i].bv_len = sg->length;
- aio_cmd->bvecs[i].bv_offset = sg->offset;
-
+ bvec_set_page(&aio_cmd->bvecs[i], sg_page(sg), sg->length,
+ sg->offset);
len += sg->length;
}
@@ -329,10 +327,7 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
}
for_each_sg(sgl, sg, sgl_nents, i) {
- bvec[i].bv_page = sg_page(sg);
- bvec[i].bv_len = sg->length;
- bvec[i].bv_offset = sg->offset;
-
+ bvec_set_page(&bvec[i], sg_page(sg), sg->length, sg->offset);
len += sg->length;
}
@@ -465,10 +460,9 @@ fd_execute_write_same(struct se_cmd *cmd)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
for (i = 0; i < nolb; i++) {
- bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]);
- bvec[i].bv_len = cmd->t_data_sg[0].length;
- bvec[i].bv_offset = cmd->t_data_sg[0].offset;
-
+ bvec_set_page(&bvec[i], sg_page(&cmd->t_data_sg[0]),
+ cmd->t_data_sg[0].length,
+ cmd->t_data_sg[0].offset);
len += se_dev->dev_attrib.block_size;
}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 69a4c9581e80..e7425549e39c 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -144,8 +144,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
cdb[0] = MODE_SENSE;
cdb[4] = 0x0c; /* 12 bytes */
- ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
- HZ, 1, NULL);
+ ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf, 12, HZ, 1, NULL);
if (ret)
goto out_free;
@@ -195,8 +194,8 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
cdb[2] = 0x80; /* Unit Serial Number */
put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]);
- ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
- INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
+ ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf,
+ INQUIRY_VPD_SERIAL_LEN, HZ, 1, NULL);
if (ret)
goto out_free;
@@ -230,9 +229,8 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
cdb[2] = 0x83; /* Device Identifier */
put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]);
- ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
- INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
- NULL, HZ, 1, NULL);
+ ret = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, buf,
+ INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, HZ, 1, NULL);
if (ret)
goto out;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 2940559c3086..15ffc8d2ac7b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1928,7 +1928,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
{
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &tcmu_vm_ops;
vma->vm_private_data = udev;
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 98da206cd761..452cbb8ad484 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -1207,7 +1207,7 @@ static int tee_client_device_match(struct device *dev,
return 0;
}
-static int tee_client_device_uevent(struct device *dev,
+static int tee_client_device_uevent(const struct device *dev,
struct kobj_uevent_env *env)
{
uuid_t *dev_id = &to_tee_client_device(dev)->id.uuid;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 27295bda3e0b..b1c6231defad 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -11,6 +11,7 @@
#include <linux/tee_drv.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
+#include <linux/highmem.h>
#include "tee_private.h"
static void shm_put_kernel_pages(struct page **pages, size_t page_count)
@@ -24,38 +25,20 @@ static void shm_put_kernel_pages(struct page **pages, size_t page_count)
static int shm_get_kernel_pages(unsigned long start, size_t page_count,
struct page **pages)
{
+ struct page *page;
size_t n;
- int rc;
-
- if (is_vmalloc_addr((void *)start)) {
- struct page *page;
-
- for (n = 0; n < page_count; n++) {
- page = vmalloc_to_page((void *)(start + PAGE_SIZE * n));
- if (!page)
- return -ENOMEM;
- get_page(page);
- pages[n] = page;
- }
- rc = page_count;
- } else {
- struct kvec *kiov;
-
- kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
- if (!kiov)
- return -ENOMEM;
-
- for (n = 0; n < page_count; n++) {
- kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
- kiov[n].iov_len = PAGE_SIZE;
- }
+ if (WARN_ON_ONCE(is_vmalloc_addr((void *)start) ||
+ is_kmap_addr((void *)start)))
+ return -EINVAL;
- rc = get_kernel_pages(kiov, page_count, 0, pages);
- kfree(kiov);
+ page = virt_to_page(start);
+ for (n = 0; n < page_count; n++) {
+ pages[n] = page + n;
+ get_page(pages[n]);
}
- return rc;
+ return page_count;
}
static void release_registered_pages(struct tee_shm *shm)
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index e052dae614eb..4cd7ab707315 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -76,6 +76,10 @@ config THERMAL_OF
Say 'Y' here if you need to build thermal infrastructure
based on device tree.
+config THERMAL_ACPI
+ depends on ACPI
+ bool
+
config THERMAL_WRITABLE_TRIPS
bool "Enable writable trip points"
help
@@ -412,16 +416,10 @@ config DA9062_THERMAL
zone.
Compatible with the DA9062 and DA9061 PMICs.
-config MTK_THERMAL
- tristate "Temperature sensor driver for mediatek SoCs"
- depends on ARCH_MEDIATEK || COMPILE_TEST
- depends on HAS_IOMEM
- depends on NVMEM || NVMEM=n
- depends on RESET_CONTROLLER
- default y
- help
- Enable this option if you want to have support for thermal management
- controller present in Mediatek SoCs
+menu "Mediatek thermal drivers"
+depends on ARCH_MEDIATEK || COMPILE_TEST
+source "drivers/thermal/mediatek/Kconfig"
+endmenu
config AMLOGIC_THERMAL
tristate "Amlogic Thermal Support"
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 2506c6c8ca83..eed300e83d48 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -4,8 +4,8 @@
#
obj-$(CONFIG_THERMAL) += thermal_sys.o
-thermal_sys-y += thermal_core.o thermal_sysfs.o \
- thermal_helpers.o
+thermal_sys-y += thermal_core.o thermal_sysfs.o
+thermal_sys-y += thermal_trip.o thermal_helpers.o
# netlink interface to manage the thermal framework
thermal_sys-$(CONFIG_THERMAL_NETLINK) += thermal_netlink.o
@@ -13,6 +13,7 @@ thermal_sys-$(CONFIG_THERMAL_NETLINK) += thermal_netlink.o
# interface to/from other layers providing sensors
thermal_sys-$(CONFIG_THERMAL_HWMON) += thermal_hwmon.o
thermal_sys-$(CONFIG_THERMAL_OF) += thermal_of.o
+thermal_sys-$(CONFIG_THERMAL_ACPI) += thermal_acpi.o
# governors
thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += gov_fair_share.o
@@ -55,7 +56,7 @@ obj-y += st/
obj-y += qcom/
obj-y += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
-obj-$(CONFIG_MTK_THERMAL) += mtk_thermal.o
+obj-y += mediatek/
obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o
obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o
obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o
diff --git a/drivers/thermal/amlogic_thermal.c b/drivers/thermal/amlogic_thermal.c
index d30cb791e63c..9235fda4ec1e 100644
--- a/drivers/thermal/amlogic_thermal.c
+++ b/drivers/thermal/amlogic_thermal.c
@@ -28,7 +28,6 @@
#include <linux/regmap.h>
#include <linux/thermal.h>
-#include "thermal_core.h"
#include "thermal_hwmon.h"
#define TSENSOR_CFG_REG1 0x4
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 52d63b3997fe..2efc222a379b 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -19,8 +19,6 @@
#include <linux/regmap.h>
#include <linux/interrupt.h>
-#include "thermal_core.h"
-
/* Thermal Manager Control and Status Register */
#define PMU_TDC0_SW_RST_MASK (0x1 << 1)
#define PMU_TM_DISABLE_OFFS 0
@@ -709,12 +707,10 @@ static int armada_thermal_probe_legacy(struct platform_device *pdev,
struct armada_thermal_priv *priv)
{
struct armada_thermal_data *data = priv->data;
- struct resource *res;
void __iomem *base;
/* First memory region points towards the status register */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -761,8 +757,7 @@ static void armada_set_sane_name(struct platform_device *pdev,
}
/* Save the name locally */
- strncpy(priv->zone_name, name, THERMAL_NAME_LENGTH - 1);
- priv->zone_name[THERMAL_NAME_LENGTH - 1] = '\0';
+ strscpy(priv->zone_name, name, THERMAL_NAME_LENGTH);
/* Then check there are no '-' or hwmon core will complain */
do {
@@ -785,30 +780,23 @@ static int armada_configure_overheat_int(struct armada_thermal_priv *priv,
int sensor_id)
{
/* Retrieve the critical trip point to enable the overheat interrupt */
- const struct thermal_trip *trips = of_thermal_get_trip_points(tz);
+ int temperature;
int ret;
- int i;
-
- if (!trips)
- return -EINVAL;
-
- for (i = 0; i < of_thermal_get_ntrips(tz); i++)
- if (trips[i].type == THERMAL_TRIP_CRITICAL)
- break;
- if (i == of_thermal_get_ntrips(tz))
- return -EINVAL;
+ ret = thermal_zone_get_crit_temp(tz, &temperature);
+ if (ret)
+ return ret;
ret = armada_select_channel(priv, sensor_id);
if (ret)
return ret;
- armada_set_overheat_thresholds(priv,
- trips[i].temperature,
- trips[i].hysteresis);
+ /*
+ * A critical temperature does not have a hysteresis
+ */
+ armada_set_overheat_thresholds(priv, temperature, 0);
priv->overheat_sensor = tz;
priv->interrupt_source = sensor_id;
-
armada_enable_overheat_interrupt(priv);
return 0;
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 2c67841a1115..23918bb76ae6 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -166,7 +166,6 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
const struct of_device_id *match;
struct thermal_zone_device *tz;
struct bcm2835_thermal_data *data;
- struct resource *res;
int err = 0;
u32 val;
unsigned long rate;
@@ -180,8 +179,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
if (!match)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(&pdev->dev, res);
+ data->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(data->regs)) {
err = PTR_ERR(data->regs);
return err;
@@ -224,7 +222,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
*/
val = readl(data->regs + BCM2835_TS_TSENSCTL);
if (!(val & BCM2835_TS_TSENSCTL_RSTB)) {
- int trip_temp, offset, slope;
+ struct thermal_trip trip;
+ int offset, slope;
slope = thermal_zone_get_slope(tz);
offset = thermal_zone_get_offset(tz);
@@ -232,7 +231,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
* For now we deal only with critical, otherwise
* would need to iterate
*/
- err = tz->ops->get_trip_temp(tz, 0, &trip_temp);
+ err = thermal_zone_get_trip(tz, 0, &trip);
if (err < 0) {
dev_err(&pdev->dev,
"Not able to read trip_temp: %d\n",
@@ -249,7 +248,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
val |= (0xFE << BCM2835_TS_TSENSCTL_RSTDELAY_SHIFT);
/* trip_adc value from info */
- val |= bcm2835_thermal_temp2adc(trip_temp,
+ val |= bcm2835_thermal_temp2adc(trip.temperature,
offset,
slope)
<< BCM2835_TS_TSENSCTL_THOLD_SHIFT;
diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
index c79c6cfdd74d..4d02c28331e3 100644
--- a/drivers/thermal/broadcom/brcmstb_thermal.c
+++ b/drivers/thermal/broadcom/brcmstb_thermal.c
@@ -321,7 +321,6 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
const struct thermal_zone_device_ops *of_ops;
struct thermal_zone_device *thermal;
struct brcmstb_thermal_priv *priv;
- struct resource *res;
int irq, ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -332,8 +331,7 @@ static int brcmstb_thermal_probe(struct platform_device *pdev)
if (!priv->temp_params)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->tmon_base = devm_ioremap_resource(&pdev->dev, res);
+ priv->tmon_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(priv->tmon_base))
return PTR_ERR(priv->tmon_base);
diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c
index 7dcfde7a9f2c..a805a6666c44 100644
--- a/drivers/thermal/da9062-thermal.c
+++ b/drivers/thermal/da9062-thermal.c
@@ -120,44 +120,6 @@ static irqreturn_t da9062_thermal_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int da9062_thermal_get_trip_type(struct thermal_zone_device *z,
- int trip,
- enum thermal_trip_type *type)
-{
- struct da9062_thermal *thermal = z->devdata;
-
- switch (trip) {
- case 0:
- *type = THERMAL_TRIP_HOT;
- break;
- default:
- dev_err(thermal->dev,
- "Driver does not support more than 1 trip-wire\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int da9062_thermal_get_trip_temp(struct thermal_zone_device *z,
- int trip,
- int *temp)
-{
- struct da9062_thermal *thermal = z->devdata;
-
- switch (trip) {
- case 0:
- *temp = DA9062_MILLI_CELSIUS(125);
- break;
- default:
- dev_err(thermal->dev,
- "Driver does not support more than 1 trip-wire\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int da9062_thermal_get_temp(struct thermal_zone_device *z,
int *temp)
{
@@ -172,8 +134,10 @@ static int da9062_thermal_get_temp(struct thermal_zone_device *z,
static struct thermal_zone_device_ops da9062_thermal_ops = {
.get_temp = da9062_thermal_get_temp,
- .get_trip_type = da9062_thermal_get_trip_type,
- .get_trip_temp = da9062_thermal_get_trip_temp,
+};
+
+static struct thermal_trip trips[] = {
+ { .temperature = DA9062_MILLI_CELSIUS(125), .type = THERMAL_TRIP_HOT },
};
static const struct da9062_thermal_config da9062_config = {
@@ -228,10 +192,10 @@ static int da9062_thermal_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&thermal->work, da9062_thermal_poll_on);
mutex_init(&thermal->lock);
- thermal->zone = thermal_zone_device_register(thermal->config->name,
- 1, 0, thermal,
- &da9062_thermal_ops, NULL, pp_tmp,
- 0);
+ thermal->zone = thermal_zone_device_register_with_trips(thermal->config->name,
+ trips, ARRAY_SIZE(trips), 0, thermal,
+ &da9062_thermal_ops, NULL, pp_tmp,
+ 0);
if (IS_ERR(thermal->zone)) {
dev_err(&pdev->dev, "Cannot register thermal zone device\n");
ret = PTR_ERR(thermal->zone);
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 73182eb94bc0..056622a58d00 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -122,20 +122,17 @@ static int dove_thermal_probe(struct platform_device *pdev)
{
struct thermal_zone_device *thermal = NULL;
struct dove_thermal_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->sensor = devm_ioremap_resource(&pdev->dev, res);
+ priv->sensor = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(priv->sensor))
return PTR_ERR(priv->sensor);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->control = devm_ioremap_resource(&pdev->dev, res);
+ priv->control = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
if (IS_ERR(priv->control))
return PTR_ERR(priv->control);
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
index a08bbe33be96..1b121066521f 100644
--- a/drivers/thermal/gov_bang_bang.c
+++ b/drivers/thermal/gov_bang_bang.c
@@ -13,26 +13,28 @@
#include "thermal_core.h"
-static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+static int thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id)
{
- int trip_temp, trip_hyst;
+ struct thermal_trip trip;
struct thermal_instance *instance;
+ int ret;
- tz->ops->get_trip_temp(tz, trip, &trip_temp);
+ ret = __thermal_zone_get_trip(tz, trip_id, &trip);
+ if (ret) {
+ pr_warn_once("Failed to retrieve trip point %d\n", trip_id);
+ return ret;
+ }
- if (!tz->ops->get_trip_hyst) {
- pr_warn_once("Undefined get_trip_hyst for thermal zone %s - "
- "running with default hysteresis zero\n", tz->type);
- trip_hyst = 0;
- } else
- tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
+ if (!trip.hysteresis)
+ dev_info_once(&tz->device,
+ "Zero hysteresis value for thermal zone %s\n", tz->type);
dev_dbg(&tz->device, "Trip%d[temp=%d]:temp=%d:hyst=%d\n",
- trip, trip_temp, tz->temperature,
- trip_hyst);
+ trip_id, trip.temperature, tz->temperature,
+ trip.hysteresis);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip)
+ if (instance->trip != trip_id)
continue;
/* in case fan is in initial state, switch the fan off */
@@ -50,10 +52,10 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
* enable fan when temperature exceeds trip_temp and disable
* the fan in case it falls below trip_temp minus hysteresis
*/
- if (instance->target == 0 && tz->temperature >= trip_temp)
+ if (instance->target == 0 && tz->temperature >= trip.temperature)
instance->target = 1;
else if (instance->target == 1 &&
- tz->temperature <= trip_temp - trip_hyst)
+ tz->temperature <= trip.temperature - trip.hysteresis)
instance->target = 0;
dev_dbg(&instance->cdev->device, "target=%d\n",
@@ -63,6 +65,8 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
instance->cdev->updated = false; /* cdev needs update */
mutex_unlock(&instance->cdev->lock);
}
+
+ return 0;
}
/**
@@ -95,10 +99,13 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
static int bang_bang_control(struct thermal_zone_device *tz, int trip)
{
struct thermal_instance *instance;
+ int ret;
lockdep_assert_held(&tz->lock);
- thermal_zone_trip_update(tz, trip);
+ ret = thermal_zone_trip_update(tz, trip);
+ if (ret)
+ return ret;
list_for_each_entry(instance, &tz->thermal_instances, tz_node)
thermal_cdev_update(instance->cdev);
diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c
index 1cfeac16e7ac..aad7d5fe3a14 100644
--- a/drivers/thermal/gov_fair_share.c
+++ b/drivers/thermal/gov_fair_share.c
@@ -21,16 +21,12 @@
*/
static int get_trip_level(struct thermal_zone_device *tz)
{
- int count = 0;
- int trip_temp;
- enum thermal_trip_type trip_type;
-
- if (tz->num_trips == 0 || !tz->ops->get_trip_temp)
- return 0;
+ struct thermal_trip trip;
+ int count;
for (count = 0; count < tz->num_trips; count++) {
- tz->ops->get_trip_temp(tz, count, &trip_temp);
- if (tz->temperature < trip_temp)
+ __thermal_zone_get_trip(tz, count, &trip);
+ if (tz->temperature < trip.temperature)
break;
}
@@ -38,10 +34,8 @@ static int get_trip_level(struct thermal_zone_device *tz)
* count > 0 only if temperature is greater than first trip
* point, in which case, trip_point = count - 1
*/
- if (count > 0) {
- tz->ops->get_trip_type(tz, count - 1, &trip_type);
- trace_thermal_zone_trip(tz, count - 1, trip_type);
- }
+ if (count > 0)
+ trace_thermal_zone_trip(tz, count - 1, trip.type);
return count;
}
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index d5d4eae16771..0eaf1527d3e3 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -124,16 +124,15 @@ static void estimate_pid_constants(struct thermal_zone_device *tz,
u32 sustainable_power, int trip_switch_on,
int control_temp)
{
+ struct thermal_trip trip;
+ u32 temperature_threshold = control_temp;
int ret;
- int switch_on_temp;
- u32 temperature_threshold;
s32 k_i;
- ret = tz->ops->get_trip_temp(tz, trip_switch_on, &switch_on_temp);
- if (ret)
- switch_on_temp = 0;
+ ret = __thermal_zone_get_trip(tz, trip_switch_on, &trip);
+ if (!ret)
+ temperature_threshold -= trip.temperature;
- temperature_threshold = control_temp - switch_on_temp;
/*
* estimate_pid_constants() tries to find appropriate default
* values for thermal zones that don't provide them. If a
@@ -519,10 +518,10 @@ static void get_governor_trips(struct thermal_zone_device *tz,
last_passive = INVALID_TRIP;
for (i = 0; i < tz->num_trips; i++) {
- enum thermal_trip_type type;
+ struct thermal_trip trip;
int ret;
- ret = tz->ops->get_trip_type(tz, i, &type);
+ ret = __thermal_zone_get_trip(tz, i, &trip);
if (ret) {
dev_warn(&tz->device,
"Failed to get trip point %d type: %d\n", i,
@@ -530,14 +529,14 @@ static void get_governor_trips(struct thermal_zone_device *tz,
continue;
}
- if (type == THERMAL_TRIP_PASSIVE) {
+ if (trip.type == THERMAL_TRIP_PASSIVE) {
if (!found_first_passive) {
params->trip_switch_on = i;
found_first_passive = true;
} else {
last_passive = i;
}
- } else if (type == THERMAL_TRIP_ACTIVE) {
+ } else if (trip.type == THERMAL_TRIP_ACTIVE) {
last_active = i;
} else {
break;
@@ -632,7 +631,7 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
{
int ret;
struct power_allocator_params *params;
- int control_temp;
+ struct thermal_trip trip;
ret = check_power_actors(tz);
if (ret)
@@ -658,13 +657,12 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
get_governor_trips(tz, params);
if (tz->num_trips > 0) {
- ret = tz->ops->get_trip_temp(tz,
- params->trip_max_desired_temperature,
- &control_temp);
+ ret = __thermal_zone_get_trip(tz, params->trip_max_desired_temperature,
+ &trip);
if (!ret)
estimate_pid_constants(tz, tz->tzp->sustainable_power,
params->trip_switch_on,
- control_temp);
+ trip.temperature);
}
reset_pid_controller(params);
@@ -694,11 +692,11 @@ static void power_allocator_unbind(struct thermal_zone_device *tz)
tz->governor_data = NULL;
}
-static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
+static int power_allocator_throttle(struct thermal_zone_device *tz, int trip_id)
{
- int ret;
- int switch_on_temp, control_temp;
struct power_allocator_params *params = tz->governor_data;
+ struct thermal_trip trip;
+ int ret;
bool update;
lockdep_assert_held(&tz->lock);
@@ -707,13 +705,12 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
* We get called for every trip point but we only need to do
* our calculations once
*/
- if (trip != params->trip_max_desired_temperature)
+ if (trip_id != params->trip_max_desired_temperature)
return 0;
- ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
- &switch_on_temp);
- if (!ret && (tz->temperature < switch_on_temp)) {
- update = (tz->last_temperature >= switch_on_temp);
+ ret = __thermal_zone_get_trip(tz, params->trip_switch_on, &trip);
+ if (!ret && (tz->temperature < trip.temperature)) {
+ update = (tz->last_temperature >= trip.temperature);
tz->passive = 0;
reset_pid_controller(params);
allow_maximum_power(tz, update);
@@ -722,16 +719,14 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
tz->passive = 1;
- ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature,
- &control_temp);
+ ret = __thermal_zone_get_trip(tz, params->trip_max_desired_temperature, &trip);
if (ret) {
- dev_warn(&tz->device,
- "Failed to get the maximum desired temperature: %d\n",
+ dev_warn(&tz->device, "Failed to get the maximum desired temperature: %d\n",
ret);
return ret;
}
- return allocate_power(tz, control_temp);
+ return allocate_power(tz, trip.temperature);
}
static struct thermal_governor thermal_gov_power_allocator = {
diff --git a/drivers/thermal/gov_step_wise.c b/drivers/thermal/gov_step_wise.c
index cdd3354bc27f..31235e169c5a 100644
--- a/drivers/thermal/gov_step_wise.c
+++ b/drivers/thermal/gov_step_wise.c
@@ -95,30 +95,28 @@ static void update_passive_instance(struct thermal_zone_device *tz,
tz->passive += value;
}
-static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
+static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip_id)
{
- int trip_temp;
- enum thermal_trip_type trip_type;
enum thermal_trend trend;
struct thermal_instance *instance;
+ struct thermal_trip trip;
bool throttle = false;
int old_target;
- tz->ops->get_trip_temp(tz, trip, &trip_temp);
- tz->ops->get_trip_type(tz, trip, &trip_type);
+ __thermal_zone_get_trip(tz, trip_id, &trip);
- trend = get_tz_trend(tz, trip);
+ trend = get_tz_trend(tz, trip_id);
- if (tz->temperature >= trip_temp) {
+ if (tz->temperature >= trip.temperature) {
throttle = true;
- trace_thermal_zone_trip(tz, trip, trip_type);
+ trace_thermal_zone_trip(tz, trip_id, trip.type);
}
dev_dbg(&tz->device, "Trip%d[type=%d,temp=%d]:trend=%d,throttle=%d\n",
- trip, trip_type, trip_temp, trend, throttle);
+ trip_id, trip.type, trip.temperature, trend, throttle);
list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
- if (instance->trip != trip)
+ if (instance->trip != trip_id)
continue;
old_target = instance->target;
@@ -132,11 +130,11 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
/* Activate a passive thermal instance */
if (old_target == THERMAL_NO_TARGET &&
instance->target != THERMAL_NO_TARGET)
- update_passive_instance(tz, trip_type, 1);
+ update_passive_instance(tz, trip.type, 1);
/* Deactivate a passive thermal instance */
else if (old_target != THERMAL_NO_TARGET &&
instance->target == THERMAL_NO_TARGET)
- update_passive_instance(tz, trip_type, -1);
+ update_passive_instance(tz, trip.type, -1);
instance->initialized = true;
mutex_lock(&instance->cdev->lock);
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index d6974db7aaf7..32a7c3cf073d 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -16,8 +16,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/of_device.h>
-
-#include "thermal_core.h"
+#include <linux/thermal.h>
#define HI6220_TEMP0_LAG (0x0)
#define HI6220_TEMP0_TH (0x4)
@@ -427,10 +426,6 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
data->sensor[0].irq_name = "tsensor_a73";
data->sensor[0].data = data;
- data->sensor[1].id = HI3660_LITTLE_SENSOR;
- data->sensor[1].irq_name = "tsensor_a53";
- data->sensor[1].data = data;
-
return 0;
}
@@ -482,7 +477,7 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
struct hisi_thermal_sensor *sensor)
{
int ret, i;
- const struct thermal_trip *trip;
+ struct thermal_trip trip;
sensor->tzd = devm_thermal_of_zone_register(&pdev->dev,
sensor->id, sensor,
@@ -495,11 +490,12 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
return ret;
}
- trip = of_thermal_get_trip_points(sensor->tzd);
+ for (i = 0; i < thermal_zone_get_num_trips(sensor->tzd); i++) {
+
+ thermal_zone_get_trip(sensor->tzd, i, &trip);
- for (i = 0; i < of_thermal_get_ntrips(sensor->tzd); i++) {
- if (trip[i].type == THERMAL_TRIP_PASSIVE) {
- sensor->thres_temp = trip[i].temperature;
+ if (trip.type == THERMAL_TRIP_PASSIVE) {
+ sensor->thres_temp = trip.temperature;
break;
}
}
diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
index d247b48696cb..72b5d6f319c1 100644
--- a/drivers/thermal/imx8mm_thermal.c
+++ b/drivers/thermal/imx8mm_thermal.c
@@ -17,7 +17,6 @@
#include <linux/slab.h>
#include <linux/thermal.h>
-#include "thermal_core.h"
#include "thermal_hwmon.h"
#define TER 0x0 /* TMU enable */
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
index 4df925e3a80b..f32e59e74623 100644
--- a/drivers/thermal/imx_sc_thermal.c
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -13,7 +13,6 @@
#include <linux/slab.h>
#include <linux/thermal.h>
-#include "thermal_core.h"
#include "thermal_hwmon.h"
#define IMX_SC_MISC_FUNC_GET_TEMP 13
@@ -88,7 +87,7 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
if (!resource_id)
return -EINVAL;
- for (i = 0; resource_id[i] > 0; i++) {
+ for (i = 0; resource_id[i] >= 0; i++) {
sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
if (!sensor)
@@ -127,7 +126,11 @@ static int imx_sc_thermal_probe(struct platform_device *pdev)
return 0;
}
-static int imx_sc_sensors[] = { IMX_SC_R_SYSTEM, IMX_SC_R_PMIC_0, -1 };
+static const int imx_sc_sensors[] = {
+ IMX_SC_R_SYSTEM, IMX_SC_R_PMIC_0,
+ IMX_SC_R_AP_0, IMX_SC_R_AP_1,
+ IMX_SC_R_GPU_0_PID0, IMX_SC_R_GPU_1_PID0,
+ IMX_SC_R_DRC_0, -1 };
static const struct of_device_id imx_sc_thermal_table[] = {
{ .compatible = "fsl,imx-sc-thermal", .data = imx_sc_sensors },
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 16663373b682..fb0d5cab70af 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -76,7 +76,6 @@
enum imx_thermal_trip {
IMX_TRIP_PASSIVE,
IMX_TRIP_CRITICAL,
- IMX_TRIP_NUM,
};
#define IMX_POLLING_DELAY 2000 /* millisecond */
@@ -115,6 +114,11 @@ struct thermal_soc_data {
u32 low_alarm_shift;
};
+static struct thermal_trip trips[] = {
+ [IMX_TRIP_PASSIVE] = { .type = THERMAL_TRIP_PASSIVE },
+ [IMX_TRIP_CRITICAL] = { .type = THERMAL_TRIP_CRITICAL },
+};
+
static struct thermal_soc_data thermal_imx6q_data = {
.version = TEMPMON_IMX6Q,
@@ -201,8 +205,6 @@ struct imx_thermal_data {
struct thermal_cooling_device *cdev;
struct regmap *tempmon;
u32 c1, c2; /* See formula in imx_init_calib() */
- int temp_passive;
- int temp_critical;
int temp_max;
int alarm_temp;
int last_temp;
@@ -279,12 +281,12 @@ static int imx_get_temp(struct thermal_zone_device *tz, int *temp)
/* Update alarm value to next higher trip point for TEMPMON_IMX6Q */
if (data->socdata->version == TEMPMON_IMX6Q) {
- if (data->alarm_temp == data->temp_passive &&
- *temp >= data->temp_passive)
- imx_set_alarm_temp(data, data->temp_critical);
- if (data->alarm_temp == data->temp_critical &&
- *temp < data->temp_passive) {
- imx_set_alarm_temp(data, data->temp_passive);
+ if (data->alarm_temp == trips[IMX_TRIP_PASSIVE].temperature &&
+ *temp >= trips[IMX_TRIP_PASSIVE].temperature)
+ imx_set_alarm_temp(data, trips[IMX_TRIP_CRITICAL].temperature);
+ if (data->alarm_temp == trips[IMX_TRIP_CRITICAL].temperature &&
+ *temp < trips[IMX_TRIP_PASSIVE].temperature) {
+ imx_set_alarm_temp(data, trips[IMX_TRIP_PASSIVE].temperature);
dev_dbg(&tz->device, "thermal alarm off: T < %d\n",
data->alarm_temp / 1000);
}
@@ -330,29 +332,10 @@ static int imx_change_mode(struct thermal_zone_device *tz,
return 0;
}
-static int imx_get_trip_type(struct thermal_zone_device *tz, int trip,
- enum thermal_trip_type *type)
-{
- *type = (trip == IMX_TRIP_PASSIVE) ? THERMAL_TRIP_PASSIVE :
- THERMAL_TRIP_CRITICAL;
- return 0;
-}
-
static int imx_get_crit_temp(struct thermal_zone_device *tz, int *temp)
{
- struct imx_thermal_data *data = tz->devdata;
-
- *temp = data->temp_critical;
- return 0;
-}
-
-static int imx_get_trip_temp(struct thermal_zone_device *tz, int trip,
- int *temp)
-{
- struct imx_thermal_data *data = tz->devdata;
+ *temp = trips[IMX_TRIP_CRITICAL].temperature;
- *temp = (trip == IMX_TRIP_PASSIVE) ? data->temp_passive :
- data->temp_critical;
return 0;
}
@@ -371,10 +354,10 @@ static int imx_set_trip_temp(struct thermal_zone_device *tz, int trip,
return -EPERM;
/* do not allow passive to be set higher than critical */
- if (temp < 0 || temp > data->temp_critical)
+ if (temp < 0 || temp > trips[IMX_TRIP_CRITICAL].temperature)
return -EINVAL;
- data->temp_passive = temp;
+ trips[IMX_TRIP_PASSIVE].temperature = temp;
imx_set_alarm_temp(data, temp);
@@ -423,8 +406,6 @@ static struct thermal_zone_device_ops imx_tz_ops = {
.unbind = imx_unbind,
.get_temp = imx_get_temp,
.change_mode = imx_change_mode,
- .get_trip_type = imx_get_trip_type,
- .get_trip_temp = imx_get_trip_temp,
.get_crit_temp = imx_get_crit_temp,
.set_trip_temp = imx_set_trip_temp,
};
@@ -507,8 +488,8 @@ static void imx_init_temp_grade(struct platform_device *pdev, u32 ocotp_mem0)
* Set the critical trip point at 5 °C under max
* Set the passive trip point at 10 °C under max (changeable via sysfs)
*/
- data->temp_critical = data->temp_max - (1000 * 5);
- data->temp_passive = data->temp_max - (1000 * 10);
+ trips[IMX_TRIP_PASSIVE].temperature = data->temp_max - (1000 * 10);
+ trips[IMX_TRIP_CRITICAL].temperature = data->temp_max - (1000 * 5);
}
static int imx_init_from_tempmon_data(struct platform_device *pdev)
@@ -743,12 +724,13 @@ static int imx_thermal_probe(struct platform_device *pdev)
goto legacy_cleanup;
}
- data->tz = thermal_zone_device_register("imx_thermal_zone",
- IMX_TRIP_NUM,
- BIT(IMX_TRIP_PASSIVE), data,
- &imx_tz_ops, NULL,
- IMX_PASSIVE_DELAY,
- IMX_POLLING_DELAY);
+ data->tz = thermal_zone_device_register_with_trips("imx_thermal_zone",
+ trips,
+ ARRAY_SIZE(trips),
+ BIT(IMX_TRIP_PASSIVE), data,
+ &imx_tz_ops, NULL,
+ IMX_PASSIVE_DELAY,
+ IMX_POLLING_DELAY);
if (IS_ERR(data->tz)) {
ret = PTR_ERR(data->tz);
dev_err(&pdev->dev,
@@ -758,8 +740,8 @@ static int imx_thermal_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC"
" critical:%dC passive:%dC\n", data->temp_grade,
- data->temp_max / 1000, data->temp_critical / 1000,
- data->temp_passive / 1000);
+ data->temp_max / 1000, trips[IMX_TRIP_CRITICAL].temperature / 1000,
+ trips[IMX_TRIP_PASSIVE].temperature / 1000);
/* Enable measurements at ~ 10 Hz */
regmap_write(map, data->socdata->measure_freq_ctrl + REG_CLR,
@@ -767,10 +749,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
regmap_write(map, data->socdata->measure_freq_ctrl + REG_SET,
measure_freq << data->socdata->measure_freq_shift);
- imx_set_alarm_temp(data, data->temp_passive);
+ imx_set_alarm_temp(data, trips[IMX_TRIP_PASSIVE].temperature);
if (data->socdata->version == TEMPMON_IMX6SX)
- imx_set_panic_temp(data, data->temp_critical);
+ imx_set_panic_temp(data, trips[IMX_TRIP_CRITICAL].temperature);
regmap_write(map, data->socdata->sensor_ctrl + REG_CLR,
data->socdata->power_down_mask);
diff --git a/drivers/thermal/intel/Kconfig b/drivers/thermal/intel/Kconfig
index f0c845679250..b5808f92702d 100644
--- a/drivers/thermal/intel/Kconfig
+++ b/drivers/thermal/intel/Kconfig
@@ -3,6 +3,9 @@ config INTEL_POWERCLAMP
tristate "Intel PowerClamp idle injection driver"
depends on X86
depends on CPU_SUP_INTEL
+ depends on CPU_IDLE
+ select POWERCAP
+ select IDLE_INJECT
help
Enable this to enable Intel PowerClamp idle injection driver. This
enforce idle time which results in more package C-state residency. The
@@ -12,11 +15,16 @@ config X86_THERMAL_VECTOR
def_bool y
depends on X86 && CPU_SUP_INTEL && X86_LOCAL_APIC
+config INTEL_TCC
+ bool
+ depends on X86
+
config X86_PKG_TEMP_THERMAL
tristate "X86 package temperature thermal driver"
depends on X86_THERMAL_VECTOR
select THERMAL_GOV_USER_SPACE
select THERMAL_WRITABLE_TRIPS
+ select INTEL_TCC
default m
help
Enable this to register CPU digital sensor for package temperature as
@@ -28,6 +36,7 @@ config INTEL_SOC_DTS_IOSF_CORE
tristate
depends on X86 && PCI
select IOSF_MBI
+ select INTEL_TCC
help
This is becoming a common feature for Intel SoCs to expose the additional
digital temperature sensors (DTSs) using side band interface (IOSF). This
@@ -75,6 +84,7 @@ config INTEL_BXT_PMIC_THERMAL
config INTEL_PCH_THERMAL
tristate "Intel PCH Thermal Reporting Driver"
depends on X86 && PCI
+ select THERMAL_ACPI if ACPI
help
Enable this to support thermal reporting on certain intel PCHs.
Thermal reporting device will provide temperature reading,
@@ -83,6 +93,7 @@ config INTEL_PCH_THERMAL
config INTEL_TCC_COOLING
tristate "Intel TCC offset cooling Driver"
depends on X86
+ select INTEL_TCC
help
Enable this to support system cooling by adjusting the effective TCC
activation temperature via the TCC Offset register, which is widely
diff --git a/drivers/thermal/intel/Makefile b/drivers/thermal/intel/Makefile
index 9a8d8054f316..5d8833c82ab6 100644
--- a/drivers/thermal/intel/Makefile
+++ b/drivers/thermal/intel/Makefile
@@ -2,6 +2,7 @@
#
# Makefile for various Intel thermal drivers.
+obj-$(CONFIG_INTEL_TCC) += intel_tcc.o
obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
obj-$(CONFIG_INTEL_SOC_DTS_IOSF_CORE) += intel_soc_dts_iosf.o
diff --git a/drivers/thermal/intel/int340x_thermal/Kconfig b/drivers/thermal/intel/int340x_thermal/Kconfig
index 5d046de96a5d..300ea53e9b33 100644
--- a/drivers/thermal/intel/int340x_thermal/Kconfig
+++ b/drivers/thermal/intel/int340x_thermal/Kconfig
@@ -9,7 +9,9 @@ config INT340X_THERMAL
select THERMAL_GOV_USER_SPACE
select ACPI_THERMAL_REL
select ACPI_FAN
+ select THERMAL_ACPI
select INTEL_SOC_DTS_IOSF_CORE
+ select INTEL_TCC
select PROC_THERMAL_MMIO_RAPL if POWERCAP
help
Newer laptops and tablets that use ACPI may have thermal sensors and
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index db8a6f63657d..d0295123cc3e 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -60,6 +60,7 @@ struct int3400_thermal_priv {
int odvp_count;
int *odvp;
u32 os_uuid_mask;
+ int production_mode;
struct odvp_attr *odvp_attrs;
};
@@ -130,10 +131,7 @@ static ssize_t available_uuids_show(struct device *dev,
for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) {
if (priv->uuid_bitmap & (1 << i))
- length += scnprintf(&buf[length],
- PAGE_SIZE - length,
- "%s\n",
- int3400_thermal_uuids[i]);
+ length += sysfs_emit_at(buf, length, int3400_thermal_uuids[i]);
}
return length;
@@ -151,10 +149,7 @@ static ssize_t current_uuid_show(struct device *dev,
for (i = 0; i <= INT3400_THERMAL_CRITICAL; i++) {
if (priv->os_uuid_mask & BIT(i))
- length += scnprintf(&buf[length],
- PAGE_SIZE - length,
- "%s\n",
- int3400_thermal_uuids[i]);
+ length += sysfs_emit_at(buf, length, int3400_thermal_uuids[i]);
}
if (length)
@@ -315,6 +310,44 @@ end:
return result;
}
+static ssize_t production_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct int3400_thermal_priv *priv = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", priv->production_mode);
+}
+
+static DEVICE_ATTR_RO(production_mode);
+
+static int production_mode_init(struct int3400_thermal_priv *priv)
+{
+ unsigned long long mode;
+ acpi_status status;
+ int ret;
+
+ priv->production_mode = -1;
+
+ status = acpi_evaluate_integer(priv->adev->handle, "DCFG", NULL, &mode);
+ /* If the method is not present, this is not an error */
+ if (ACPI_FAILURE(status))
+ return 0;
+
+ ret = sysfs_create_file(&priv->pdev->dev.kobj, &dev_attr_production_mode.attr);
+ if (ret)
+ return ret;
+
+ priv->production_mode = mode;
+
+ return 0;
+}
+
+static void production_mode_exit(struct int3400_thermal_priv *priv)
+{
+ if (priv->production_mode >= 0)
+ sysfs_remove_file(&priv->pdev->dev.kobj, &dev_attr_production_mode.attr);
+}
+
static ssize_t odvp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -610,8 +643,15 @@ static int int3400_thermal_probe(struct platform_device *pdev)
if (result)
goto free_sysfs;
+ result = production_mode_init(priv);
+ if (result)
+ goto free_notify;
+
return 0;
+free_notify:
+ acpi_remove_notify_handler(priv->adev->handle, ACPI_DEVICE_NOTIFY,
+ int3400_notify);
free_sysfs:
cleanup_odvp(priv);
if (!ZERO_OR_NULL_PTR(priv->data_vault)) {
@@ -638,6 +678,8 @@ static int int3400_thermal_remove(struct platform_device *pdev)
{
struct int3400_thermal_priv *priv = platform_get_drvdata(pdev);
+ production_mode_exit(priv);
+
acpi_remove_notify_handler(
priv->adev->handle, ACPI_DEVICE_NOTIFY,
int3400_notify);
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index 71d084c4c456..e418d270bc76 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -69,7 +69,7 @@ static void int3403_notify(acpi_handle handle,
THERMAL_TRIP_VIOLATED);
break;
case INT3403_PERF_TRIP_POINT_CHANGED:
- int340x_thermal_read_trips(obj->int340x_zone);
+ int340x_thermal_update_trips(obj->int340x_zone);
int340x_thermal_zone_device_update(obj->int340x_zone,
THERMAL_TRIP_CHANGED);
break;
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
index 0a4eaa307156..00665967ca52 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
@@ -18,9 +18,6 @@ static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
unsigned long long tmp;
acpi_status status;
- if (d->override_ops && d->override_ops->get_temp)
- return d->override_ops->get_temp(zone, temp);
-
status = acpi_evaluate_integer(d->adev->handle, "_TMP", NULL, &tmp);
if (ACPI_FAILURE(status))
return -EIO;
@@ -32,125 +29,30 @@ static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
if (conv_temp < 0)
return conv_temp;
- *temp = (unsigned long)conv_temp * 10;
- } else
+ *temp = conv_temp * 10;
+ } else {
/* _TMP returns the temperature in tenths of degrees Kelvin */
*temp = deci_kelvin_to_millicelsius(tmp);
-
- return 0;
-}
-
-static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
- int trip, int *temp)
-{
- struct int34x_thermal_zone *d = zone->devdata;
- int i, ret = 0;
-
- if (d->override_ops && d->override_ops->get_trip_temp)
- return d->override_ops->get_trip_temp(zone, trip, temp);
-
- mutex_lock(&d->trip_mutex);
-
- if (trip < d->aux_trip_nr)
- *temp = d->aux_trips[trip];
- else if (trip == d->crt_trip_id)
- *temp = d->crt_temp;
- else if (trip == d->psv_trip_id)
- *temp = d->psv_temp;
- else if (trip == d->hot_trip_id)
- *temp = d->hot_temp;
- else {
- for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
- if (d->act_trips[i].valid &&
- d->act_trips[i].id == trip) {
- *temp = d->act_trips[i].temp;
- break;
- }
- }
- if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
- ret = -EINVAL;
- }
-
- mutex_unlock(&d->trip_mutex);
-
- return ret;
-}
-
-static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
- int trip,
- enum thermal_trip_type *type)
-{
- struct int34x_thermal_zone *d = zone->devdata;
- int i, ret = 0;
-
- if (d->override_ops && d->override_ops->get_trip_type)
- return d->override_ops->get_trip_type(zone, trip, type);
-
- mutex_lock(&d->trip_mutex);
-
- if (trip < d->aux_trip_nr)
- *type = THERMAL_TRIP_PASSIVE;
- else if (trip == d->crt_trip_id)
- *type = THERMAL_TRIP_CRITICAL;
- else if (trip == d->hot_trip_id)
- *type = THERMAL_TRIP_HOT;
- else if (trip == d->psv_trip_id)
- *type = THERMAL_TRIP_PASSIVE;
- else {
- for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
- if (d->act_trips[i].valid &&
- d->act_trips[i].id == trip) {
- *type = THERMAL_TRIP_ACTIVE;
- break;
- }
- }
- if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
- ret = -EINVAL;
}
- mutex_unlock(&d->trip_mutex);
-
- return ret;
+ return 0;
}
static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
- int trip, int temp)
+ int trip, int temp)
{
struct int34x_thermal_zone *d = zone->devdata;
+ char name[] = {'P', 'A', 'T', '0' + trip, '\0'};
acpi_status status;
- char name[10];
- if (d->override_ops && d->override_ops->set_trip_temp)
- return d->override_ops->set_trip_temp(zone, trip, temp);
+ if (trip > 9)
+ return -EINVAL;
- snprintf(name, sizeof(name), "PAT%d", trip);
status = acpi_execute_simple_method(d->adev->handle, name,
- millicelsius_to_deci_kelvin(temp));
+ millicelsius_to_deci_kelvin(temp));
if (ACPI_FAILURE(status))
return -EIO;
- d->aux_trips[trip] = temp;
-
- return 0;
-}
-
-
-static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone,
- int trip, int *temp)
-{
- struct int34x_thermal_zone *d = zone->devdata;
- acpi_status status;
- unsigned long long hyst;
-
- if (d->override_ops && d->override_ops->get_trip_hyst)
- return d->override_ops->get_trip_hyst(zone, trip, temp);
-
- status = acpi_evaluate_integer(d->adev->handle, "GTSH", NULL, &hyst);
- if (ACPI_FAILURE(status))
- *temp = 0;
- else
- *temp = hyst * 100;
-
return 0;
}
@@ -161,67 +63,49 @@ static void int340x_thermal_critical(struct thermal_zone_device *zone)
static struct thermal_zone_device_ops int340x_thermal_zone_ops = {
.get_temp = int340x_thermal_get_zone_temp,
- .get_trip_temp = int340x_thermal_get_trip_temp,
- .get_trip_type = int340x_thermal_get_trip_type,
.set_trip_temp = int340x_thermal_set_trip_temp,
- .get_trip_hyst = int340x_thermal_get_trip_hyst,
.critical = int340x_thermal_critical,
};
-static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
- int *temp)
-{
- unsigned long long r;
- acpi_status status;
-
- status = acpi_evaluate_integer(handle, name, NULL, &r);
- if (ACPI_FAILURE(status))
- return -EIO;
-
- *temp = deci_kelvin_to_millicelsius(r);
-
- return 0;
-}
-
-int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone)
+static int int340x_thermal_read_trips(struct acpi_device *zone_adev,
+ struct thermal_trip *zone_trips,
+ int trip_cnt)
{
- int trip_cnt = int34x_zone->aux_trip_nr;
- int i;
+ int i, ret;
- mutex_lock(&int34x_zone->trip_mutex);
-
- int34x_zone->crt_trip_id = -1;
- if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_CRT",
- &int34x_zone->crt_temp))
- int34x_zone->crt_trip_id = trip_cnt++;
+ ret = thermal_acpi_critical_trip_temp(zone_adev,
+ &zone_trips[trip_cnt].temperature);
+ if (!ret) {
+ zone_trips[trip_cnt].type = THERMAL_TRIP_CRITICAL;
+ trip_cnt++;
+ }
- int34x_zone->hot_trip_id = -1;
- if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_HOT",
- &int34x_zone->hot_temp))
- int34x_zone->hot_trip_id = trip_cnt++;
+ ret = thermal_acpi_hot_trip_temp(zone_adev,
+ &zone_trips[trip_cnt].temperature);
+ if (!ret) {
+ zone_trips[trip_cnt].type = THERMAL_TRIP_HOT;
+ trip_cnt++;
+ }
- int34x_zone->psv_trip_id = -1;
- if (!int340x_thermal_get_trip_config(int34x_zone->adev->handle, "_PSV",
- &int34x_zone->psv_temp))
- int34x_zone->psv_trip_id = trip_cnt++;
+ ret = thermal_acpi_passive_trip_temp(zone_adev,
+ &zone_trips[trip_cnt].temperature);
+ if (!ret) {
+ zone_trips[trip_cnt].type = THERMAL_TRIP_PASSIVE;
+ trip_cnt++;
+ }
for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
- char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
-
- if (int340x_thermal_get_trip_config(int34x_zone->adev->handle,
- name,
- &int34x_zone->act_trips[i].temp))
+ ret = thermal_acpi_active_trip_temp(zone_adev, i,
+ &zone_trips[trip_cnt].temperature);
+ if (ret)
break;
- int34x_zone->act_trips[i].id = trip_cnt++;
- int34x_zone->act_trips[i].valid = true;
+ zone_trips[trip_cnt].type = THERMAL_TRIP_ACTIVE;
+ trip_cnt++;
}
- mutex_unlock(&int34x_zone->trip_mutex);
-
return trip_cnt;
}
-EXPORT_SYMBOL_GPL(int340x_thermal_read_trips);
static struct thermal_zone_params int340x_thermal_params = {
.governor_name = "user_space",
@@ -229,89 +113,147 @@ static struct thermal_zone_params int340x_thermal_params = {
};
struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
- struct thermal_zone_device_ops *override_ops)
+ int (*get_temp) (struct thermal_zone_device *, int *))
{
- struct int34x_thermal_zone *int34x_thermal_zone;
- acpi_status status;
- unsigned long long trip_cnt;
+ struct int34x_thermal_zone *int34x_zone;
+ struct thermal_trip *zone_trips;
+ unsigned long long trip_cnt = 0;
+ unsigned long long hyst;
int trip_mask = 0;
- int ret;
+ acpi_status status;
+ int i, ret;
- int34x_thermal_zone = kzalloc(sizeof(*int34x_thermal_zone),
- GFP_KERNEL);
- if (!int34x_thermal_zone)
+ int34x_zone = kzalloc(sizeof(*int34x_zone), GFP_KERNEL);
+ if (!int34x_zone)
return ERR_PTR(-ENOMEM);
- mutex_init(&int34x_thermal_zone->trip_mutex);
+ int34x_zone->adev = adev;
- int34x_thermal_zone->adev = adev;
- int34x_thermal_zone->override_ops = override_ops;
+ int34x_zone->ops = kmemdup(&int340x_thermal_zone_ops,
+ sizeof(int340x_thermal_zone_ops), GFP_KERNEL);
+ if (!int34x_zone->ops) {
+ ret = -ENOMEM;
+ goto err_ops_alloc;
+ }
+
+ if (get_temp)
+ int34x_zone->ops->get_temp = get_temp;
status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt);
- if (ACPI_FAILURE(status))
- trip_cnt = 0;
- else {
- int i;
-
- int34x_thermal_zone->aux_trips =
- kcalloc(trip_cnt,
- sizeof(*int34x_thermal_zone->aux_trips),
- GFP_KERNEL);
- if (!int34x_thermal_zone->aux_trips) {
- ret = -ENOMEM;
- goto err_trip_alloc;
- }
+ if (ACPI_SUCCESS(status)) {
+ int34x_zone->aux_trip_nr = trip_cnt;
trip_mask = BIT(trip_cnt) - 1;
- int34x_thermal_zone->aux_trip_nr = trip_cnt;
- for (i = 0; i < trip_cnt; ++i)
- int34x_thermal_zone->aux_trips[i] = THERMAL_TEMP_INVALID;
}
- trip_cnt = int340x_thermal_read_trips(int34x_thermal_zone);
+ zone_trips = kzalloc(sizeof(*zone_trips) * (trip_cnt + INT340X_THERMAL_MAX_TRIP_COUNT),
+ GFP_KERNEL);
+ if (!zone_trips) {
+ ret = -ENOMEM;
+ goto err_trips_alloc;
+ }
+
+ for (i = 0; i < trip_cnt; i++) {
+ zone_trips[i].type = THERMAL_TRIP_PASSIVE;
+ zone_trips[i].temperature = THERMAL_TEMP_INVALID;
+ }
+
+ trip_cnt = int340x_thermal_read_trips(adev, zone_trips, trip_cnt);
- int34x_thermal_zone->lpat_table = acpi_lpat_get_conversion_table(
- adev->handle);
+ status = acpi_evaluate_integer(adev->handle, "GTSH", NULL, &hyst);
+ if (ACPI_SUCCESS(status))
+ hyst *= 100;
+ else
+ hyst = 0;
+
+ for (i = 0; i < trip_cnt; ++i)
+ zone_trips[i].hysteresis = hyst;
+
+ int34x_zone->trips = zone_trips;
- int34x_thermal_zone->zone = thermal_zone_device_register(
- acpi_device_bid(adev),
- trip_cnt,
- trip_mask, int34x_thermal_zone,
- &int340x_thermal_zone_ops,
- &int340x_thermal_params,
- 0, 0);
- if (IS_ERR(int34x_thermal_zone->zone)) {
- ret = PTR_ERR(int34x_thermal_zone->zone);
+ int34x_zone->lpat_table = acpi_lpat_get_conversion_table(adev->handle);
+
+ int34x_zone->zone = thermal_zone_device_register_with_trips(
+ acpi_device_bid(adev),
+ zone_trips, trip_cnt,
+ trip_mask, int34x_zone,
+ int34x_zone->ops,
+ &int340x_thermal_params,
+ 0, 0);
+ if (IS_ERR(int34x_zone->zone)) {
+ ret = PTR_ERR(int34x_zone->zone);
goto err_thermal_zone;
}
- ret = thermal_zone_device_enable(int34x_thermal_zone->zone);
+ ret = thermal_zone_device_enable(int34x_zone->zone);
if (ret)
goto err_enable;
- return int34x_thermal_zone;
+ return int34x_zone;
err_enable:
- thermal_zone_device_unregister(int34x_thermal_zone->zone);
+ thermal_zone_device_unregister(int34x_zone->zone);
err_thermal_zone:
- acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
- kfree(int34x_thermal_zone->aux_trips);
-err_trip_alloc:
- mutex_destroy(&int34x_thermal_zone->trip_mutex);
- kfree(int34x_thermal_zone);
+ kfree(int34x_zone->trips);
+ acpi_lpat_free_conversion_table(int34x_zone->lpat_table);
+err_trips_alloc:
+ kfree(int34x_zone->ops);
+err_ops_alloc:
+ kfree(int34x_zone);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(int340x_thermal_zone_add);
-void int340x_thermal_zone_remove(struct int34x_thermal_zone
- *int34x_thermal_zone)
+void int340x_thermal_zone_remove(struct int34x_thermal_zone *int34x_zone)
{
- thermal_zone_device_unregister(int34x_thermal_zone->zone);
- acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
- kfree(int34x_thermal_zone->aux_trips);
- mutex_destroy(&int34x_thermal_zone->trip_mutex);
- kfree(int34x_thermal_zone);
+ thermal_zone_device_unregister(int34x_zone->zone);
+ acpi_lpat_free_conversion_table(int34x_zone->lpat_table);
+ kfree(int34x_zone->trips);
+ kfree(int34x_zone->ops);
+ kfree(int34x_zone);
}
EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
+void int340x_thermal_update_trips(struct int34x_thermal_zone *int34x_zone)
+{
+ struct acpi_device *zone_adev = int34x_zone->adev;
+ struct thermal_trip *zone_trips = int34x_zone->trips;
+ int trip_cnt = int34x_zone->zone->num_trips;
+ int act_trip_nr = 0;
+ int i;
+
+ mutex_lock(&int34x_zone->zone->lock);
+
+ for (i = int34x_zone->aux_trip_nr; i < trip_cnt; i++) {
+ int temp, err;
+
+ switch (zone_trips[i].type) {
+ case THERMAL_TRIP_CRITICAL:
+ err = thermal_acpi_critical_trip_temp(zone_adev, &temp);
+ break;
+ case THERMAL_TRIP_HOT:
+ err = thermal_acpi_hot_trip_temp(zone_adev, &temp);
+ break;
+ case THERMAL_TRIP_PASSIVE:
+ err = thermal_acpi_passive_trip_temp(zone_adev, &temp);
+ break;
+ case THERMAL_TRIP_ACTIVE:
+ err = thermal_acpi_active_trip_temp(zone_adev, act_trip_nr++,
+ &temp);
+ break;
+ default:
+ err = -ENODEV;
+ }
+ if (err) {
+ zone_trips[i].temperature = THERMAL_TEMP_INVALID;
+ continue;
+ }
+
+ zone_trips[i].temperature = temp;
+ }
+
+ mutex_unlock(&int34x_zone->zone->lock);
+}
+EXPORT_SYMBOL_GPL(int340x_thermal_update_trips);
+
MODULE_AUTHOR("Aaron Lu <aaron.lu@intel.com>");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_DESCRIPTION("Intel INT340x common thermal zone handler");
diff --git a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
index 8f9872afd0d3..e0df6271facc 100644
--- a/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
+++ b/drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
@@ -10,6 +10,7 @@
#include <acpi/acpi_lpat.h>
#define INT340X_THERMAL_MAX_ACT_TRIP_COUNT 10
+#define INT340X_THERMAL_MAX_TRIP_COUNT INT340X_THERMAL_MAX_ACT_TRIP_COUNT + 3
struct active_trip {
int temp;
@@ -19,26 +20,18 @@ struct active_trip {
struct int34x_thermal_zone {
struct acpi_device *adev;
- struct active_trip act_trips[INT340X_THERMAL_MAX_ACT_TRIP_COUNT];
- unsigned long *aux_trips;
+ struct thermal_trip *trips;
int aux_trip_nr;
- int psv_temp;
- int psv_trip_id;
- int crt_temp;
- int crt_trip_id;
- int hot_temp;
- int hot_trip_id;
struct thermal_zone_device *zone;
- struct thermal_zone_device_ops *override_ops;
+ struct thermal_zone_device_ops *ops;
void *priv_data;
struct acpi_lpat_conversion_table *lpat_table;
- struct mutex trip_mutex;
};
struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
- struct thermal_zone_device_ops *override_ops);
+ int (*get_temp) (struct thermal_zone_device *, int *));
void int340x_thermal_zone_remove(struct int34x_thermal_zone *);
-int int340x_thermal_read_trips(struct int34x_thermal_zone *int34x_zone);
+void int340x_thermal_update_trips(struct int34x_thermal_zone *int34x_zone);
static inline void int340x_thermal_zone_set_priv_data(
struct int34x_thermal_zone *tzone, void *priv_data)
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index a8d98f1bd6c6..a1dc18be7609 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -4,6 +4,7 @@
* Copyright (c) 2014, Intel Corporation.
*/
#include <linux/acpi.h>
+#include <linux/intel_tcc.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -68,54 +69,17 @@ static const struct attribute_group power_limit_attribute_group = {
.name = "power_limits"
};
-static int tcc_get_offset(void)
-{
- u64 val;
- int err;
-
- err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
- if (err)
- return err;
-
- return (val >> 24) & 0x3f;
-}
-
static ssize_t tcc_offset_degree_celsius_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int tcc;
-
- tcc = tcc_get_offset();
- if (tcc < 0)
- return tcc;
-
- return sprintf(buf, "%d\n", tcc);
-}
-
-static int tcc_offset_update(unsigned int tcc)
-{
- u64 val;
- int err;
+ int offset;
- if (tcc > 63)
- return -EINVAL;
-
- err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
- if (err)
- return err;
+ offset = intel_tcc_get_offset(-1);
+ if (offset < 0)
+ return offset;
- if (val & BIT(31))
- return -EPERM;
-
- val &= ~GENMASK_ULL(29, 24);
- val |= (tcc & 0x3f) << 24;
-
- err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val);
- if (err)
- return err;
-
- return 0;
+ return sprintf(buf, "%d\n", offset);
}
static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
@@ -136,7 +100,7 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
if (kstrtouint(buf, 0, &tcc))
return -EINVAL;
- err = tcc_offset_update(tcc);
+ err = intel_tcc_set_offset(-1, tcc);
if (err)
return err;
@@ -145,72 +109,27 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
static DEVICE_ATTR_RW(tcc_offset_degree_celsius);
-static int stored_tjmax; /* since it is fixed, we can have local storage */
-
-static int get_tjmax(void)
-{
- u32 eax, edx;
- u32 val;
- int err;
-
- err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
- if (err)
- return err;
-
- val = (eax >> 16) & 0xff;
- if (val)
- return val;
-
- return -EINVAL;
-}
-
-static int read_temp_msr(int *temp)
+static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
+ int *temp)
{
int cpu;
- u32 eax, edx;
- int err;
- unsigned long curr_temp_off = 0;
+ int curr_temp;
*temp = 0;
for_each_online_cpu(cpu) {
- err = rdmsr_safe_on_cpu(cpu, MSR_IA32_THERM_STATUS, &eax,
- &edx);
- if (err)
- goto err_ret;
- else {
- if (eax & 0x80000000) {
- curr_temp_off = (eax >> 16) & 0x7f;
- if (!*temp || curr_temp_off < *temp)
- *temp = curr_temp_off;
- } else {
- err = -EINVAL;
- goto err_ret;
- }
- }
+ curr_temp = intel_tcc_get_temp(cpu, false);
+ if (curr_temp < 0)
+ return curr_temp;
+ if (!*temp || curr_temp > *temp)
+ *temp = curr_temp;
}
- return 0;
-err_ret:
- return err;
-}
+ *temp *= 1000;
-static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
- int *temp)
-{
- int ret;
-
- ret = read_temp_msr(temp);
- if (!ret)
- *temp = (stored_tjmax - *temp) * 1000;
-
- return ret;
+ return 0;
}
-static struct thermal_zone_device_ops proc_thermal_local_ops = {
- .get_temp = proc_thermal_get_zone_temp,
-};
-
static int proc_thermal_read_ppcc(struct proc_thermal_device *proc_priv)
{
int i;
@@ -285,7 +204,7 @@ int proc_thermal_add(struct device *dev, struct proc_thermal_device *proc_priv)
struct acpi_device *adev;
acpi_status status;
unsigned long long tmp;
- struct thermal_zone_device_ops *ops = NULL;
+ int (*get_temp) (struct thermal_zone_device *, int *) = NULL;
int ret;
adev = ACPI_COMPANION(dev);
@@ -302,12 +221,11 @@ int proc_thermal_add(struct device *dev, struct proc_thermal_device *proc_priv)
status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp);
if (ACPI_FAILURE(status)) {
/* there is no _TMP method, add local method */
- stored_tjmax = get_tjmax();
- if (stored_tjmax > 0)
- ops = &proc_thermal_local_ops;
+ if (intel_tcc_get_tjmax(-1) > 0)
+ get_temp = proc_thermal_get_zone_temp;
}
- proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
+ proc_priv->int340x_zone = int340x_thermal_zone_add(adev, get_temp);
if (IS_ERR(proc_priv->int340x_zone)) {
return PTR_ERR(proc_priv->int340x_zone);
} else
@@ -356,7 +274,7 @@ static int tcc_offset_save = -1;
int proc_thermal_suspend(struct device *dev)
{
- tcc_offset_save = tcc_get_offset();
+ tcc_offset_save = intel_tcc_get_offset(-1);
if (tcc_offset_save < 0)
dev_warn(dev, "failed to save offset (%d)\n", tcc_offset_save);
@@ -373,7 +291,7 @@ int proc_thermal_resume(struct device *dev)
/* Do not update if saving failed */
if (tcc_offset_save >= 0)
- tcc_offset_update(tcc_offset_save);
+ intel_tcc_set_offset(-1, tcc_offset_save);
return 0;
}
@@ -460,6 +378,7 @@ void proc_thermal_mmio_remove(struct pci_dev *pdev, struct proc_thermal_device *
}
EXPORT_SYMBOL_GPL(proc_thermal_mmio_remove);
+MODULE_IMPORT_NS(INTEL_TCC);
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index bf1b1cdfade4..40725cbc6eb0 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -144,34 +144,6 @@ static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
return 0;
}
-static int sys_get_trip_temp(struct thermal_zone_device *tzd,
- int trip, int *temp)
-{
- struct proc_thermal_pci *pci_info = tzd->devdata;
- u32 _temp;
-
- proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_THRES_0, &_temp);
- if (!_temp) {
- *temp = THERMAL_TEMP_INVALID;
- } else {
- int tjmax;
-
- proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax);
- _temp = tjmax - _temp;
- *temp = (unsigned long)_temp * 1000;
- }
-
- return 0;
-}
-
-static int sys_get_trip_type(struct thermal_zone_device *tzd, int trip,
- enum thermal_trip_type *type)
-{
- *type = THERMAL_TRIP_PASSIVE;
-
- return 0;
-}
-
static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
{
struct proc_thermal_pci *pci_info = tzd->devdata;
@@ -200,10 +172,26 @@ static int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp
return 0;
}
+static int get_trip_temp(struct proc_thermal_pci *pci_info)
+{
+ int temp, tjmax;
+
+ proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_THRES_0, &temp);
+ if (!temp)
+ return THERMAL_TEMP_INVALID;
+
+ proc_thermal_mmio_read(pci_info, PROC_THERMAL_MMIO_TJMAX, &tjmax);
+ temp = (tjmax - temp) * 1000;
+
+ return temp;
+}
+
+static struct thermal_trip psv_trip = {
+ .type = THERMAL_TRIP_PASSIVE,
+};
+
static struct thermal_zone_device_ops tzone_ops = {
.get_temp = sys_get_curr_temp,
- .get_trip_temp = sys_get_trip_temp,
- .get_trip_type = sys_get_trip_type,
.set_trip_temp = sys_set_trip_temp,
};
@@ -251,7 +239,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev, const struct pci_device_
if (ret)
goto err_ret_thermal;
- pci_info->tzone = thermal_zone_device_register("TCPU_PCI", 1, 1, pci_info,
+ psv_trip.temperature = get_trip_temp(pci_info);
+
+ pci_info->tzone = thermal_zone_device_register_with_trips("TCPU_PCI", &psv_trip,
+ 1, 1, pci_info,
&tzone_ops,
&tzone_params, 0, 0);
if (IS_ERR(pci_info->tzone)) {
diff --git a/drivers/thermal/intel/intel_hfi.c b/drivers/thermal/intel/intel_hfi.c
index 6e604bda2b93..c69db6c90869 100644
--- a/drivers/thermal/intel/intel_hfi.c
+++ b/drivers/thermal/intel/intel_hfi.c
@@ -40,10 +40,11 @@
#include <asm/msr.h>
-#include "../thermal_core.h"
#include "intel_hfi.h"
#include "thermal_interrupt.h"
+#include "../thermal_netlink.h"
+
/* Hardware Feedback Interface MSR configuration bits */
#define HW_FEEDBACK_PTR_VALID_BIT BIT(0)
#define HW_FEEDBACK_CONFIG_HFI_ENABLE_BIT BIT(0)
diff --git a/drivers/thermal/intel/intel_menlow.c b/drivers/thermal/intel/intel_menlow.c
index 3f885b08a490..5a6ad0552311 100644
--- a/drivers/thermal/intel/intel_menlow.c
+++ b/drivers/thermal/intel/intel_menlow.c
@@ -232,9 +232,9 @@ static DEFINE_MUTEX(intel_menlow_attr_lock);
/*
* sensor_get_auxtrip - get the current auxtrip value from sensor
- * @name: Thermalzone name
- * @auxtype : AUX0/AUX1
- * @buf: syfs buffer
+ * @handle: Object handle
+ * @index : GET_AUX1/GET_AUX0
+ * @value : The address will be fill by the value
*/
static int sensor_get_auxtrip(acpi_handle handle, int index,
unsigned long long *value)
@@ -254,9 +254,9 @@ static int sensor_get_auxtrip(acpi_handle handle, int index,
/*
* sensor_set_auxtrip - set the new auxtrip value to sensor
- * @name: Thermalzone name
- * @auxtype : AUX0/AUX1
- * @buf: syfs buffer
+ * @handle: Object handle
+ * @index : GET_AUX1/GET_AUX0
+ * @value : The value will be set
*/
static int sensor_set_auxtrip(acpi_handle handle, int index, int value)
{
diff --git a/drivers/thermal/intel/intel_pch_thermal.c b/drivers/thermal/intel/intel_pch_thermal.c
index dabf11a687a1..b855d031a855 100644
--- a/drivers/thermal/intel/intel_pch_thermal.c
+++ b/drivers/thermal/intel/intel_pch_thermal.c
@@ -29,6 +29,7 @@
#define PCH_THERMAL_DID_CNL_LP 0x02F9 /* CNL-LP PCH */
#define PCH_THERMAL_DID_CML_H 0X06F9 /* CML-H PCH */
#define PCH_THERMAL_DID_LWB 0xA1B1 /* Lewisburg PCH */
+#define PCH_THERMAL_DID_WBG 0x8D24 /* Wellsburg PCH */
/* Wildcat Point-LP PCH Thermal registers */
#define WPT_TEMP 0x0000 /* Temperature */
@@ -65,6 +66,8 @@
#define WPT_TEMP_OFFSET (PCH_TEMP_OFFSET * MILLIDEGREE_PER_DEGREE)
#define GET_PCH_TEMP(x) (((x) / 2) + PCH_TEMP_OFFSET)
+#define PCH_MAX_TRIPS 3 /* critical, hot, passive */
+
/* Amount of time for each cooling delay, 100ms by default for now */
static unsigned int delay_timeout = 100;
module_param(delay_timeout, int, 0644);
@@ -79,66 +82,114 @@ static char driver_name[] = "Intel PCH thermal driver";
struct pch_thermal_device {
void __iomem *hw_base;
- const struct pch_dev_ops *ops;
struct pci_dev *pdev;
struct thermal_zone_device *tzd;
- int crt_trip_id;
- unsigned long crt_temp;
- int hot_trip_id;
- unsigned long hot_temp;
- int psv_trip_id;
- unsigned long psv_temp;
+ struct thermal_trip trips[PCH_MAX_TRIPS];
bool bios_enabled;
};
#ifdef CONFIG_ACPI
-
/*
* On some platforms, there is a companion ACPI device, which adds
* passive trip temperature using _PSV method. There is no specific
* passive temperature setting in MMIO interface of this PCI device.
*/
-static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
- int *nr_trips)
+static int pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd, int trip)
{
struct acpi_device *adev;
-
- ptd->psv_trip_id = -1;
+ int temp;
adev = ACPI_COMPANION(&ptd->pdev->dev);
- if (adev) {
- unsigned long long r;
- acpi_status status;
-
- status = acpi_evaluate_integer(adev->handle, "_PSV", NULL,
- &r);
- if (ACPI_SUCCESS(status)) {
- unsigned long trip_temp;
-
- trip_temp = deci_kelvin_to_millicelsius(r);
- if (trip_temp) {
- ptd->psv_temp = trip_temp;
- ptd->psv_trip_id = *nr_trips;
- ++(*nr_trips);
- }
- }
- }
+ if (!adev)
+ return 0;
+
+ if (thermal_acpi_passive_trip_temp(adev, &temp) || temp <= 0)
+ return 0;
+
+ ptd->trips[trip].type = THERMAL_TRIP_PASSIVE;
+ ptd->trips[trip].temperature = temp;
+ return 1;
}
#else
-static void pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd,
- int *nr_trips)
+static int pch_wpt_add_acpi_psv_trip(struct pch_thermal_device *ptd, int trip)
{
- ptd->psv_trip_id = -1;
-
+ return 0;
}
#endif
-static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
+static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp)
{
- u8 tsel;
+ struct pch_thermal_device *ptd = tzd->devdata;
+
+ *temp = GET_WPT_TEMP(WPT_TEMP_TSR & readw(ptd->hw_base + WPT_TEMP));
+ return 0;
+}
+
+static void pch_critical(struct thermal_zone_device *tzd)
+{
+ dev_dbg(&tzd->device, "%s: critical temperature reached\n", tzd->type);
+}
+
+static struct thermal_zone_device_ops tzd_ops = {
+ .get_temp = pch_thermal_get_temp,
+ .critical = pch_critical,
+};
+
+enum pch_board_ids {
+ PCH_BOARD_HSW = 0,
+ PCH_BOARD_WPT,
+ PCH_BOARD_SKL,
+ PCH_BOARD_CNL,
+ PCH_BOARD_CML,
+ PCH_BOARD_LWB,
+ PCH_BOARD_WBG,
+};
+
+static const char *board_names[] = {
+ [PCH_BOARD_HSW] = "pch_haswell",
+ [PCH_BOARD_WPT] = "pch_wildcat_point",
+ [PCH_BOARD_SKL] = "pch_skylake",
+ [PCH_BOARD_CNL] = "pch_cannonlake",
+ [PCH_BOARD_CML] = "pch_cometlake",
+ [PCH_BOARD_LWB] = "pch_lewisburg",
+ [PCH_BOARD_WBG] = "pch_wellsburg",
+};
+
+static int intel_pch_thermal_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ enum pch_board_ids board_id = id->driver_data;
+ struct pch_thermal_device *ptd;
+ int nr_trips = 0;
u16 trip_temp;
+ u8 tsel;
+ int err;
+
+ ptd = devm_kzalloc(&pdev->dev, sizeof(*ptd), GFP_KERNEL);
+ if (!ptd)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, ptd);
+ ptd->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to enable pci device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "failed to request pci region\n");
+ goto error_disable;
+ }
- *nr_trips = 0;
+ ptd->hw_base = pci_ioremap_bar(pdev, 0);
+ if (!ptd->hw_base) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "failed to map mem base\n");
+ goto error_release;
+ }
/* Check if BIOS has already enabled thermal sensor */
if (WPT_TSEL_ETS & readb(ptd->hw_base + WPT_TSEL)) {
@@ -153,52 +204,79 @@ static int pch_wpt_init(struct pch_thermal_device *ptd, int *nr_trips)
*/
if (tsel & WPT_TSEL_PLDB) {
dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto error_cleanup;
}
writeb(tsel|WPT_TSEL_ETS, ptd->hw_base + WPT_TSEL);
if (!(WPT_TSEL_ETS & readb(ptd->hw_base + WPT_TSEL))) {
dev_err(&ptd->pdev->dev, "Sensor can't be enabled\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto error_cleanup;
}
read_trips:
- ptd->crt_trip_id = -1;
trip_temp = readw(ptd->hw_base + WPT_CTT);
trip_temp &= 0x1FF;
if (trip_temp) {
- ptd->crt_temp = GET_WPT_TEMP(trip_temp);
- ptd->crt_trip_id = 0;
- ++(*nr_trips);
+ ptd->trips[nr_trips].temperature = GET_WPT_TEMP(trip_temp);
+ ptd->trips[nr_trips++].type = THERMAL_TRIP_CRITICAL;
}
- ptd->hot_trip_id = -1;
trip_temp = readw(ptd->hw_base + WPT_PHL);
trip_temp &= 0x1FF;
if (trip_temp) {
- ptd->hot_temp = GET_WPT_TEMP(trip_temp);
- ptd->hot_trip_id = *nr_trips;
- ++(*nr_trips);
+ ptd->trips[nr_trips].temperature = GET_WPT_TEMP(trip_temp);
+ ptd->trips[nr_trips++].type = THERMAL_TRIP_HOT;
}
- pch_wpt_add_acpi_psv_trip(ptd, nr_trips);
+ nr_trips += pch_wpt_add_acpi_psv_trip(ptd, nr_trips);
+
+ ptd->tzd = thermal_zone_device_register_with_trips(board_names[board_id],
+ ptd->trips, nr_trips,
+ 0, ptd, &tzd_ops,
+ NULL, 0, 0);
+ if (IS_ERR(ptd->tzd)) {
+ dev_err(&pdev->dev, "Failed to register thermal zone %s\n",
+ board_names[board_id]);
+ err = PTR_ERR(ptd->tzd);
+ goto error_cleanup;
+ }
+ err = thermal_zone_device_enable(ptd->tzd);
+ if (err)
+ goto err_unregister;
return 0;
+
+err_unregister:
+ thermal_zone_device_unregister(ptd->tzd);
+error_cleanup:
+ iounmap(ptd->hw_base);
+error_release:
+ pci_release_regions(pdev);
+error_disable:
+ pci_disable_device(pdev);
+ dev_err(&pdev->dev, "pci device failed to probe\n");
+ return err;
}
-static int pch_wpt_get_temp(struct pch_thermal_device *ptd, int *temp)
+static void intel_pch_thermal_remove(struct pci_dev *pdev)
{
- *temp = GET_WPT_TEMP(WPT_TEMP_TSR & readw(ptd->hw_base + WPT_TEMP));
+ struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
- return 0;
+ thermal_zone_device_unregister(ptd->tzd);
+ iounmap(ptd->hw_base);
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
}
-/* Cool the PCH when it's overheat in .suspend_noirq phase */
-static int pch_wpt_suspend(struct pch_thermal_device *ptd)
+static int intel_pch_thermal_suspend_noirq(struct device *device)
{
- u8 tsel;
- int pch_delay_cnt = 0;
+ struct pch_thermal_device *ptd = dev_get_drvdata(device);
u16 pch_thr_temp, pch_cur_temp;
+ int pch_delay_cnt = 0;
+ u8 tsel;
/* Shutdown the thermal sensor if it is not enabled by BIOS */
if (!ptd->bios_enabled) {
@@ -261,8 +339,9 @@ static int pch_wpt_suspend(struct pch_thermal_device *ptd)
return 0;
}
-static int pch_wpt_resume(struct pch_thermal_device *ptd)
+static int intel_pch_thermal_resume(struct device *device)
{
+ struct pch_thermal_device *ptd = dev_get_drvdata(device);
u8 tsel;
if (ptd->bios_enabled)
@@ -275,226 +354,29 @@ static int pch_wpt_resume(struct pch_thermal_device *ptd)
return 0;
}
-struct pch_dev_ops {
- int (*hw_init)(struct pch_thermal_device *ptd, int *nr_trips);
- int (*get_temp)(struct pch_thermal_device *ptd, int *temp);
- int (*suspend)(struct pch_thermal_device *ptd);
- int (*resume)(struct pch_thermal_device *ptd);
-};
-
-
-/* dev ops for Wildcat Point */
-static const struct pch_dev_ops pch_dev_ops_wpt = {
- .hw_init = pch_wpt_init,
- .get_temp = pch_wpt_get_temp,
- .suspend = pch_wpt_suspend,
- .resume = pch_wpt_resume,
-};
-
-static int pch_thermal_get_temp(struct thermal_zone_device *tzd, int *temp)
-{
- struct pch_thermal_device *ptd = tzd->devdata;
-
- return ptd->ops->get_temp(ptd, temp);
-}
-
-static int pch_get_trip_type(struct thermal_zone_device *tzd, int trip,
- enum thermal_trip_type *type)
-{
- struct pch_thermal_device *ptd = tzd->devdata;
-
- if (ptd->crt_trip_id == trip)
- *type = THERMAL_TRIP_CRITICAL;
- else if (ptd->hot_trip_id == trip)
- *type = THERMAL_TRIP_HOT;
- else if (ptd->psv_trip_id == trip)
- *type = THERMAL_TRIP_PASSIVE;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static int pch_get_trip_temp(struct thermal_zone_device *tzd, int trip, int *temp)
-{
- struct pch_thermal_device *ptd = tzd->devdata;
-
- if (ptd->crt_trip_id == trip)
- *temp = ptd->crt_temp;
- else if (ptd->hot_trip_id == trip)
- *temp = ptd->hot_temp;
- else if (ptd->psv_trip_id == trip)
- *temp = ptd->psv_temp;
- else
- return -EINVAL;
-
- return 0;
-}
-
-static void pch_critical(struct thermal_zone_device *tzd)
-{
- dev_dbg(&tzd->device, "%s: critical temperature reached\n", tzd->type);
-}
-
-static struct thermal_zone_device_ops tzd_ops = {
- .get_temp = pch_thermal_get_temp,
- .get_trip_type = pch_get_trip_type,
- .get_trip_temp = pch_get_trip_temp,
- .critical = pch_critical,
-};
-
-enum board_ids {
- board_hsw,
- board_wpt,
- board_skl,
- board_cnl,
- board_cml,
- board_lwb,
-};
-
-static const struct board_info {
- const char *name;
- const struct pch_dev_ops *ops;
-} board_info[] = {
- [board_hsw] = {
- .name = "pch_haswell",
- .ops = &pch_dev_ops_wpt,
- },
- [board_wpt] = {
- .name = "pch_wildcat_point",
- .ops = &pch_dev_ops_wpt,
- },
- [board_skl] = {
- .name = "pch_skylake",
- .ops = &pch_dev_ops_wpt,
- },
- [board_cnl] = {
- .name = "pch_cannonlake",
- .ops = &pch_dev_ops_wpt,
- },
- [board_cml] = {
- .name = "pch_cometlake",
- .ops = &pch_dev_ops_wpt,
- },
- [board_lwb] = {
- .name = "pch_lewisburg",
- .ops = &pch_dev_ops_wpt,
- },
-};
-
-static int intel_pch_thermal_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- enum board_ids board_id = id->driver_data;
- const struct board_info *bi = &board_info[board_id];
- struct pch_thermal_device *ptd;
- int err;
- int nr_trips;
-
- ptd = devm_kzalloc(&pdev->dev, sizeof(*ptd), GFP_KERNEL);
- if (!ptd)
- return -ENOMEM;
-
- ptd->ops = bi->ops;
-
- pci_set_drvdata(pdev, ptd);
- ptd->pdev = pdev;
-
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "failed to enable pci device\n");
- return err;
- }
-
- err = pci_request_regions(pdev, driver_name);
- if (err) {
- dev_err(&pdev->dev, "failed to request pci region\n");
- goto error_disable;
- }
-
- ptd->hw_base = pci_ioremap_bar(pdev, 0);
- if (!ptd->hw_base) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "failed to map mem base\n");
- goto error_release;
- }
-
- err = ptd->ops->hw_init(ptd, &nr_trips);
- if (err)
- goto error_cleanup;
-
- ptd->tzd = thermal_zone_device_register(bi->name, nr_trips, 0, ptd,
- &tzd_ops, NULL, 0, 0);
- if (IS_ERR(ptd->tzd)) {
- dev_err(&pdev->dev, "Failed to register thermal zone %s\n",
- bi->name);
- err = PTR_ERR(ptd->tzd);
- goto error_cleanup;
- }
- err = thermal_zone_device_enable(ptd->tzd);
- if (err)
- goto err_unregister;
-
- return 0;
-
-err_unregister:
- thermal_zone_device_unregister(ptd->tzd);
-error_cleanup:
- iounmap(ptd->hw_base);
-error_release:
- pci_release_regions(pdev);
-error_disable:
- pci_disable_device(pdev);
- dev_err(&pdev->dev, "pci device failed to probe\n");
- return err;
-}
-
-static void intel_pch_thermal_remove(struct pci_dev *pdev)
-{
- struct pch_thermal_device *ptd = pci_get_drvdata(pdev);
-
- thermal_zone_device_unregister(ptd->tzd);
- iounmap(ptd->hw_base);
- pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
-
-static int intel_pch_thermal_suspend_noirq(struct device *device)
-{
- struct pch_thermal_device *ptd = dev_get_drvdata(device);
-
- return ptd->ops->suspend(ptd);
-}
-
-static int intel_pch_thermal_resume(struct device *device)
-{
- struct pch_thermal_device *ptd = dev_get_drvdata(device);
-
- return ptd->ops->resume(ptd);
-}
-
static const struct pci_device_id intel_pch_thermal_id[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_1),
- .driver_data = board_hsw, },
+ .driver_data = PCH_BOARD_HSW, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_HSW_2),
- .driver_data = board_hsw, },
+ .driver_data = PCH_BOARD_HSW, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WPT),
- .driver_data = board_wpt, },
+ .driver_data = PCH_BOARD_WPT, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL),
- .driver_data = board_skl, },
+ .driver_data = PCH_BOARD_SKL, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_SKL_H),
- .driver_data = board_skl, },
+ .driver_data = PCH_BOARD_SKL, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL),
- .driver_data = board_cnl, },
+ .driver_data = PCH_BOARD_CNL, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL_H),
- .driver_data = board_cnl, },
+ .driver_data = PCH_BOARD_CNL, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CNL_LP),
- .driver_data = board_cnl, },
+ .driver_data = PCH_BOARD_CNL, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_CML_H),
- .driver_data = board_cml, },
+ .driver_data = PCH_BOARD_CML, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_LWB),
- .driver_data = board_lwb, },
+ .driver_data = PCH_BOARD_LWB, },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCH_THERMAL_DID_WBG),
+ .driver_data = PCH_BOARD_WBG, },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, intel_pch_thermal_id);
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index b80e25ec1261..c7ba5680cd48 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -2,7 +2,7 @@
/*
* intel_powerclamp.c - package c-state idle injection
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012-2023, Intel Corporation.
*
* Authors:
* Arjan van de Ven <arjan@linux.intel.com>
@@ -27,23 +27,17 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/kthread.h>
#include <linux/cpu.h>
#include <linux/thermal.h>
-#include <linux/slab.h>
-#include <linux/tick.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/sched/rt.h>
-#include <uapi/linux/sched/types.h>
+#include <linux/idle_inject.h>
-#include <asm/nmi.h>
#include <asm/msr.h>
#include <asm/mwait.h>
#include <asm/cpu_device_id.h>
-#include <asm/hardirq.h>
-#define MAX_TARGET_RATIO (50U)
+#define MAX_TARGET_RATIO (100U)
/* For each undisturbed clamping period (no extra wake ups during idle time),
* we increment the confidence counter for the given target ratio.
* CONFIDENCE_OK defines the level where runtime calibration results are
@@ -57,37 +51,30 @@
static unsigned int target_mwait;
static struct dentry *debug_dir;
+static bool poll_pkg_cstate_enable;
-/* user selected target */
-static unsigned int set_target_ratio;
+/* Idle ratio observed using package C-state counters */
static unsigned int current_ratio;
-static bool should_skip;
-static unsigned int control_cpu; /* The cpu assigned to collect stat and update
- * control parameters. default to BSP but BSP
- * can be offlined.
- */
-static bool clamping;
+/* Skip the idle injection till set to true */
+static bool should_skip;
-struct powerclamp_worker_data {
- struct kthread_worker *worker;
- struct kthread_work balancing_work;
- struct kthread_delayed_work idle_injection_work;
+struct powerclamp_data {
unsigned int cpu;
unsigned int count;
unsigned int guard;
unsigned int window_size_now;
unsigned int target_ratio;
- unsigned int duration_jiffies;
bool clamping;
};
-static struct powerclamp_worker_data __percpu *worker_data;
+static struct powerclamp_data powerclamp_data;
+
static struct thermal_cooling_device *cooling_dev;
-static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
- * clamping kthread worker
- */
+static DEFINE_MUTEX(powerclamp_lock);
+
+/* This duration is in microseconds */
static unsigned int duration;
static unsigned int pkg_cstate_ratio_cur;
static unsigned int window_size;
@@ -104,25 +91,171 @@ static int duration_set(const char *arg, const struct kernel_param *kp)
pr_err("Out of recommended range %lu, between 6-25ms\n",
new_duration);
ret = -EINVAL;
+ goto exit;
}
- duration = clamp(new_duration, 6ul, 25ul);
- smp_mb();
-
+ mutex_lock(&powerclamp_lock);
+ duration = clamp(new_duration, 6ul, 25ul) * 1000;
+ mutex_unlock(&powerclamp_lock);
exit:
return ret;
}
+static int duration_get(char *buf, const struct kernel_param *kp)
+{
+ int ret;
+
+ mutex_lock(&powerclamp_lock);
+ ret = sysfs_emit(buf, "%d\n", duration / 1000);
+ mutex_unlock(&powerclamp_lock);
+
+ return ret;
+}
+
static const struct kernel_param_ops duration_ops = {
.set = duration_set,
- .get = param_get_int,
+ .get = duration_get,
};
-
-module_param_cb(duration, &duration_ops, &duration, 0644);
+module_param_cb(duration, &duration_ops, NULL, 0644);
MODULE_PARM_DESC(duration, "forced idle time for each attempt in msec.");
+#define DEFAULT_MAX_IDLE 50
+#define MAX_ALL_CPU_IDLE 75
+
+static u8 max_idle = DEFAULT_MAX_IDLE;
+
+static cpumask_var_t idle_injection_cpu_mask;
+
+static int allocate_copy_idle_injection_mask(const struct cpumask *copy_mask)
+{
+ if (cpumask_available(idle_injection_cpu_mask))
+ goto copy_mask;
+
+ /* This mask is allocated only one time and freed during module exit */
+ if (!alloc_cpumask_var(&idle_injection_cpu_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+copy_mask:
+ cpumask_copy(idle_injection_cpu_mask, copy_mask);
+
+ return 0;
+}
+
+/* Return true if the cpumask and idle percent combination is invalid */
+static bool check_invalid(cpumask_var_t mask, u8 idle)
+{
+ if (cpumask_equal(cpu_present_mask, mask) && idle > MAX_ALL_CPU_IDLE)
+ return true;
+
+ return false;
+}
+
+static int cpumask_set(const char *arg, const struct kernel_param *kp)
+{
+ cpumask_var_t new_mask;
+ int ret;
+
+ mutex_lock(&powerclamp_lock);
+
+ /* Can't set mask when cooling device is in use */
+ if (powerclamp_data.clamping) {
+ ret = -EAGAIN;
+ goto skip_cpumask_set;
+ }
+
+ ret = alloc_cpumask_var(&new_mask, GFP_KERNEL);
+ if (!ret)
+ goto skip_cpumask_set;
+
+ ret = bitmap_parse(arg, strlen(arg), cpumask_bits(new_mask),
+ nr_cpumask_bits);
+ if (ret)
+ goto free_cpumask_set;
+
+ if (cpumask_empty(new_mask) || check_invalid(new_mask, max_idle)) {
+ ret = -EINVAL;
+ goto free_cpumask_set;
+ }
+
+ /*
+ * When module parameters are passed from kernel command line
+ * during insmod, the module parameter callback is called
+ * before powerclamp_init(), so we can't assume that some
+ * cpumask can be allocated and copied before here. Also
+ * in this case this cpumask is used as the default mask.
+ */
+ ret = allocate_copy_idle_injection_mask(new_mask);
+
+free_cpumask_set:
+ free_cpumask_var(new_mask);
+skip_cpumask_set:
+ mutex_unlock(&powerclamp_lock);
+
+ return ret;
+}
+
+static int cpumask_get(char *buf, const struct kernel_param *kp)
+{
+ if (!cpumask_available(idle_injection_cpu_mask))
+ return -ENODEV;
+
+ return bitmap_print_to_pagebuf(false, buf, cpumask_bits(idle_injection_cpu_mask),
+ nr_cpumask_bits);
+}
+
+static const struct kernel_param_ops cpumask_ops = {
+ .set = cpumask_set,
+ .get = cpumask_get,
+};
+
+module_param_cb(cpumask, &cpumask_ops, NULL, 0644);
+MODULE_PARM_DESC(cpumask, "Mask of CPUs to use for idle injection.");
+
+static int max_idle_set(const char *arg, const struct kernel_param *kp)
+{
+ u8 new_max_idle;
+ int ret = 0;
+
+ mutex_lock(&powerclamp_lock);
+
+ /* Can't set mask when cooling device is in use */
+ if (powerclamp_data.clamping) {
+ ret = -EAGAIN;
+ goto skip_limit_set;
+ }
+
+ ret = kstrtou8(arg, 10, &new_max_idle);
+ if (ret)
+ goto skip_limit_set;
+
+ if (new_max_idle > MAX_TARGET_RATIO) {
+ ret = -EINVAL;
+ goto skip_limit_set;
+ }
+
+ if (check_invalid(idle_injection_cpu_mask, new_max_idle)) {
+ ret = -EINVAL;
+ goto skip_limit_set;
+ }
+
+ max_idle = new_max_idle;
+
+skip_limit_set:
+ mutex_unlock(&powerclamp_lock);
+
+ return ret;
+}
+
+static const struct kernel_param_ops max_idle_ops = {
+ .set = max_idle_set,
+ .get = param_get_int,
+};
+
+module_param_cb(max_idle, &max_idle_ops, &max_idle, 0644);
+MODULE_PARM_DESC(max_idle, "maximum injected idle time to the total CPU time ratio in percent range:1-100");
+
struct powerclamp_calibration_data {
unsigned long confidence; /* used for calibration, basically a counter
* gets incremented each time a clamping
@@ -261,6 +394,9 @@ static unsigned int get_compensation(int ratio)
{
unsigned int comp = 0;
+ if (!poll_pkg_cstate_enable)
+ return 0;
+
/* we only use compensation if all adjacent ones are good */
if (ratio == 1 &&
cal_data[ratio].confidence >= CONFIDENCE_OK &&
@@ -302,7 +438,7 @@ static void adjust_compensation(int target_ratio, unsigned int win)
if (d->confidence >= CONFIDENCE_OK)
return;
- delta = set_target_ratio - current_ratio;
+ delta = powerclamp_data.target_ratio - current_ratio;
/* filter out bad data */
if (delta >= 0 && delta <= (1+target_ratio/10)) {
if (d->steady_comp)
@@ -341,82 +477,39 @@ static bool powerclamp_adjust_controls(unsigned int target_ratio,
adjust_compensation(target_ratio, win);
/* if we are above target+guard, skip */
- return set_target_ratio + guard <= current_ratio;
+ return powerclamp_data.target_ratio + guard <= current_ratio;
}
-static void clamp_balancing_func(struct kthread_work *work)
+/*
+ * This function calculates runtime from the current target ratio.
+ * This function gets called under powerclamp_lock.
+ */
+static unsigned int get_run_time(void)
{
- struct powerclamp_worker_data *w_data;
- int sleeptime;
- unsigned long target_jiffies;
unsigned int compensated_ratio;
- int interval; /* jiffies to sleep for each attempt */
-
- w_data = container_of(work, struct powerclamp_worker_data,
- balancing_work);
+ unsigned int runtime;
/*
* make sure user selected ratio does not take effect until
* the next round. adjust target_ratio if user has changed
* target such that we can converge quickly.
*/
- w_data->target_ratio = READ_ONCE(set_target_ratio);
- w_data->guard = 1 + w_data->target_ratio / 20;
- w_data->window_size_now = window_size;
- w_data->duration_jiffies = msecs_to_jiffies(duration);
- w_data->count++;
+ powerclamp_data.guard = 1 + powerclamp_data.target_ratio / 20;
+ powerclamp_data.window_size_now = window_size;
/*
* systems may have different ability to enter package level
* c-states, thus we need to compensate the injected idle ratio
* to achieve the actual target reported by the HW.
*/
- compensated_ratio = w_data->target_ratio +
- get_compensation(w_data->target_ratio);
+ compensated_ratio = powerclamp_data.target_ratio +
+ get_compensation(powerclamp_data.target_ratio);
if (compensated_ratio <= 0)
compensated_ratio = 1;
- interval = w_data->duration_jiffies * 100 / compensated_ratio;
-
- /* align idle time */
- target_jiffies = roundup(jiffies, interval);
- sleeptime = target_jiffies - jiffies;
- if (sleeptime <= 0)
- sleeptime = 1;
-
- if (clamping && w_data->clamping && cpu_online(w_data->cpu))
- kthread_queue_delayed_work(w_data->worker,
- &w_data->idle_injection_work,
- sleeptime);
-}
-static void clamp_idle_injection_func(struct kthread_work *work)
-{
- struct powerclamp_worker_data *w_data;
-
- w_data = container_of(work, struct powerclamp_worker_data,
- idle_injection_work.work);
-
- /*
- * only elected controlling cpu can collect stats and update
- * control parameters.
- */
- if (w_data->cpu == control_cpu &&
- !(w_data->count % w_data->window_size_now)) {
- should_skip =
- powerclamp_adjust_controls(w_data->target_ratio,
- w_data->guard,
- w_data->window_size_now);
- smp_mb();
- }
+ runtime = duration * 100 / compensated_ratio - duration;
- if (should_skip)
- goto balance;
-
- play_idle(jiffies_to_usecs(w_data->duration_jiffies));
-
-balance:
- if (clamping && w_data->clamping && cpu_online(w_data->cpu))
- kthread_queue_work(w_data->worker, &w_data->balancing_work);
+ return runtime;
}
/*
@@ -452,126 +545,129 @@ static void poll_pkg_cstate(struct work_struct *dummy)
msr_last = msr_now;
tsc_last = tsc_now;
- if (true == clamping)
+ mutex_lock(&powerclamp_lock);
+ if (powerclamp_data.clamping)
schedule_delayed_work(&poll_pkg_cstate_work, HZ);
+ mutex_unlock(&powerclamp_lock);
}
-static void start_power_clamp_worker(unsigned long cpu)
+static struct idle_inject_device *ii_dev;
+
+/*
+ * This function is called from idle injection core on timer expiry
+ * for the run duration. This allows powerclamp to readjust or skip
+ * injecting idle for this cycle.
+ */
+static bool idle_inject_update(void)
{
- struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
- struct kthread_worker *worker;
+ bool update = false;
- worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
- if (IS_ERR(worker))
- return;
+ /* We can't sleep in this callback */
+ if (!mutex_trylock(&powerclamp_lock))
+ return true;
- w_data->worker = worker;
- w_data->count = 0;
- w_data->cpu = cpu;
- w_data->clamping = true;
- set_bit(cpu, cpu_clamping_mask);
- sched_set_fifo(worker->task);
- kthread_init_work(&w_data->balancing_work, clamp_balancing_func);
- kthread_init_delayed_work(&w_data->idle_injection_work,
- clamp_idle_injection_func);
- kthread_queue_work(w_data->worker, &w_data->balancing_work);
-}
+ if (!(powerclamp_data.count % powerclamp_data.window_size_now)) {
-static void stop_power_clamp_worker(unsigned long cpu)
-{
- struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
+ should_skip = powerclamp_adjust_controls(powerclamp_data.target_ratio,
+ powerclamp_data.guard,
+ powerclamp_data.window_size_now);
+ update = true;
+ }
- if (!w_data->worker)
- return;
+ if (update) {
+ unsigned int runtime = get_run_time();
- w_data->clamping = false;
- /*
- * Make sure that all works that get queued after this point see
- * the clamping disabled. The counter part is not needed because
- * there is an implicit memory barrier when the queued work
- * is proceed.
- */
- smp_wmb();
- kthread_cancel_work_sync(&w_data->balancing_work);
- kthread_cancel_delayed_work_sync(&w_data->idle_injection_work);
- /*
- * The balancing work still might be queued here because
- * the handling of the "clapming" variable, cancel, and queue
- * operations are not synchronized via a lock. But it is not
- * a big deal. The balancing work is fast and destroy kthread
- * will wait for it.
- */
- clear_bit(w_data->cpu, cpu_clamping_mask);
- kthread_destroy_worker(w_data->worker);
+ idle_inject_set_duration(ii_dev, runtime, duration);
+ }
+
+ powerclamp_data.count++;
+
+ mutex_unlock(&powerclamp_lock);
+
+ if (should_skip)
+ return false;
- w_data->worker = NULL;
+ return true;
}
-static int start_power_clamp(void)
+/* This function starts idle injection by calling idle_inject_start() */
+static void trigger_idle_injection(void)
{
- unsigned long cpu;
-
- set_target_ratio = clamp(set_target_ratio, 0U, MAX_TARGET_RATIO - 1);
- /* prevent cpu hotplug */
- cpus_read_lock();
+ unsigned int runtime = get_run_time();
- /* prefer BSP */
- control_cpu = cpumask_first(cpu_online_mask);
+ idle_inject_set_duration(ii_dev, runtime, duration);
+ idle_inject_start(ii_dev);
+ powerclamp_data.clamping = true;
+}
- clamping = true;
- schedule_delayed_work(&poll_pkg_cstate_work, 0);
+/*
+ * This function is called from start_power_clamp() to register
+ * CPUS with powercap idle injection register and set default
+ * idle duration and latency.
+ */
+static int powerclamp_idle_injection_register(void)
+{
+ poll_pkg_cstate_enable = false;
+ if (cpumask_equal(cpu_present_mask, idle_injection_cpu_mask)) {
+ ii_dev = idle_inject_register_full(idle_injection_cpu_mask, idle_inject_update);
+ if (topology_max_packages() == 1 && topology_max_die_per_package() == 1)
+ poll_pkg_cstate_enable = true;
+ } else {
+ ii_dev = idle_inject_register(idle_injection_cpu_mask);
+ }
- /* start one kthread worker per online cpu */
- for_each_online_cpu(cpu) {
- start_power_clamp_worker(cpu);
+ if (!ii_dev) {
+ pr_err("powerclamp: idle_inject_register failed\n");
+ return -EAGAIN;
}
- cpus_read_unlock();
+
+ idle_inject_set_duration(ii_dev, TICK_USEC, duration);
+ idle_inject_set_latency(ii_dev, UINT_MAX);
return 0;
}
-static void end_power_clamp(void)
+/*
+ * This function is called from end_power_clamp() to stop idle injection
+ * and unregister CPUS from powercap idle injection core.
+ */
+static void remove_idle_injection(void)
{
- int i;
+ if (!powerclamp_data.clamping)
+ return;
- /*
- * Block requeuing in all the kthread workers. They will flush and
- * stop faster.
- */
- clamping = false;
- for_each_set_bit(i, cpu_clamping_mask, num_possible_cpus()) {
- pr_debug("clamping worker for cpu %d alive, destroy\n", i);
- stop_power_clamp_worker(i);
- }
+ powerclamp_data.clamping = false;
+ idle_inject_stop(ii_dev);
}
-static int powerclamp_cpu_online(unsigned int cpu)
+/*
+ * This function is called when user change the cooling device
+ * state from zero to some other value.
+ */
+static int start_power_clamp(void)
{
- if (clamping == false)
- return 0;
- start_power_clamp_worker(cpu);
- /* prefer BSP as controlling CPU */
- if (cpu == 0) {
- control_cpu = 0;
- smp_mb();
+ int ret;
+
+ ret = powerclamp_idle_injection_register();
+ if (!ret) {
+ trigger_idle_injection();
+ if (poll_pkg_cstate_enable)
+ schedule_delayed_work(&poll_pkg_cstate_work, 0);
}
- return 0;
+
+ return ret;
}
-static int powerclamp_cpu_predown(unsigned int cpu)
+/*
+ * This function is called when user change the cooling device
+ * state from non zero value zero.
+ */
+static void end_power_clamp(void)
{
- if (clamping == false)
- return 0;
-
- stop_power_clamp_worker(cpu);
- if (cpu != control_cpu)
- return 0;
-
- control_cpu = cpumask_first(cpu_online_mask);
- if (control_cpu == cpu)
- control_cpu = cpumask_next(cpu, cpu_online_mask);
- smp_mb();
- return 0;
+ if (powerclamp_data.clamping) {
+ remove_idle_injection();
+ idle_inject_unregister(ii_dev);
+ }
}
static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
@@ -585,11 +681,9 @@ static int powerclamp_get_max_state(struct thermal_cooling_device *cdev,
static int powerclamp_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- if (true == clamping)
- *state = pkg_cstate_ratio_cur;
- else
- /* to save power, do not poll idle ratio while not clamping */
- *state = -1; /* indicates invalid state */
+ mutex_lock(&powerclamp_lock);
+ *state = powerclamp_data.target_ratio;
+ mutex_unlock(&powerclamp_lock);
return 0;
}
@@ -599,24 +693,32 @@ static int powerclamp_set_cur_state(struct thermal_cooling_device *cdev,
{
int ret = 0;
+ mutex_lock(&powerclamp_lock);
+
new_target_ratio = clamp(new_target_ratio, 0UL,
- (unsigned long) (MAX_TARGET_RATIO-1));
- if (set_target_ratio == 0 && new_target_ratio > 0) {
+ (unsigned long) (max_idle - 1));
+ if (!powerclamp_data.target_ratio && new_target_ratio > 0) {
pr_info("Start idle injection to reduce power\n");
- set_target_ratio = new_target_ratio;
+ powerclamp_data.target_ratio = new_target_ratio;
ret = start_power_clamp();
+ if (ret)
+ powerclamp_data.target_ratio = 0;
goto exit_set;
- } else if (set_target_ratio > 0 && new_target_ratio == 0) {
+ } else if (powerclamp_data.target_ratio > 0 && new_target_ratio == 0) {
pr_info("Stop forced idle injection\n");
end_power_clamp();
- set_target_ratio = 0;
+ powerclamp_data.target_ratio = 0;
} else /* adjust currently running */ {
- set_target_ratio = new_target_ratio;
- /* make new set_target_ratio visible to other cpus */
- smp_mb();
+ unsigned int runtime;
+
+ powerclamp_data.target_ratio = new_target_ratio;
+ runtime = get_run_time();
+ idle_inject_set_duration(ii_dev, runtime, duration);
}
exit_set:
+ mutex_unlock(&powerclamp_lock);
+
return ret;
}
@@ -657,7 +759,6 @@ static int powerclamp_debug_show(struct seq_file *m, void *unused)
{
int i = 0;
- seq_printf(m, "controlling cpu: %d\n", control_cpu);
seq_printf(m, "pct confidence steady dynamic (compensation)\n");
for (i = 0; i < MAX_TARGET_RATIO; i++) {
seq_printf(m, "%d\t%lu\t%lu\t%lu\n",
@@ -680,75 +781,57 @@ static inline void powerclamp_create_debug_files(void)
&powerclamp_debug_fops);
}
-static enum cpuhp_state hp_state;
-
static int __init powerclamp_init(void)
{
int retval;
- cpu_clamping_mask = bitmap_zalloc(num_possible_cpus(), GFP_KERNEL);
- if (!cpu_clamping_mask)
- return -ENOMEM;
-
/* probe cpu features and ids here */
retval = powerclamp_probe();
if (retval)
- goto exit_free;
+ return retval;
+
+ mutex_lock(&powerclamp_lock);
+ retval = allocate_copy_idle_injection_mask(cpu_present_mask);
+ mutex_unlock(&powerclamp_lock);
+
+ if (retval)
+ return retval;
/* set default limit, maybe adjusted during runtime based on feedback */
window_size = 2;
- retval = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "thermal/intel_powerclamp:online",
- powerclamp_cpu_online,
- powerclamp_cpu_predown);
- if (retval < 0)
- goto exit_free;
-
- hp_state = retval;
-
- worker_data = alloc_percpu(struct powerclamp_worker_data);
- if (!worker_data) {
- retval = -ENOMEM;
- goto exit_unregister;
- }
cooling_dev = thermal_cooling_device_register("intel_powerclamp", NULL,
- &powerclamp_cooling_ops);
- if (IS_ERR(cooling_dev)) {
- retval = -ENODEV;
- goto exit_free_thread;
- }
+ &powerclamp_cooling_ops);
+ if (IS_ERR(cooling_dev))
+ return -ENODEV;
if (!duration)
- duration = jiffies_to_msecs(DEFAULT_DURATION_JIFFIES);
+ duration = jiffies_to_usecs(DEFAULT_DURATION_JIFFIES);
powerclamp_create_debug_files();
return 0;
-
-exit_free_thread:
- free_percpu(worker_data);
-exit_unregister:
- cpuhp_remove_state_nocalls(hp_state);
-exit_free:
- bitmap_free(cpu_clamping_mask);
- return retval;
}
module_init(powerclamp_init);
static void __exit powerclamp_exit(void)
{
+ mutex_lock(&powerclamp_lock);
end_power_clamp();
- cpuhp_remove_state_nocalls(hp_state);
- free_percpu(worker_data);
+ mutex_unlock(&powerclamp_lock);
+
thermal_cooling_device_unregister(cooling_dev);
- bitmap_free(cpu_clamping_mask);
cancel_delayed_work_sync(&poll_pkg_cstate_work);
debugfs_remove_recursive(debug_dir);
+
+ if (cpumask_available(idle_injection_cpu_mask))
+ free_cpumask_var(idle_injection_cpu_mask);
}
module_exit(powerclamp_exit);
+MODULE_IMPORT_NS(IDLE_INJECT);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
diff --git a/drivers/thermal/intel/intel_quark_dts_thermal.c b/drivers/thermal/intel/intel_quark_dts_thermal.c
index 3eafc6b0e6c3..97b843fa7568 100644
--- a/drivers/thermal/intel/intel_quark_dts_thermal.c
+++ b/drivers/thermal/intel/intel_quark_dts_thermal.c
@@ -84,6 +84,7 @@
#define QRK_DTS_MASK_TP_THRES 0xFF
#define QRK_DTS_SHIFT_TP 8
#define QRK_DTS_ID_TP_CRITICAL 0
+#define QRK_DTS_ID_TP_HOT 1
#define QRK_DTS_SAFE_TP_THRES 105
/* Thermal Sensor Register Lock */
@@ -104,6 +105,7 @@ struct soc_sensor_entry {
u32 store_ptps;
u32 store_dts_enable;
struct thermal_zone_device *tzone;
+ struct thermal_trip trips[QRK_MAX_DTS_TRIPS];
};
static struct soc_sensor_entry *soc_dts;
@@ -172,9 +174,9 @@ static int soc_dts_disable(struct thermal_zone_device *tzd)
return ret;
}
-static int _get_trip_temp(int trip, int *temp)
+static int get_trip_temp(int trip)
{
- int status;
+ int status, temp;
u32 out;
mutex_lock(&dts_update_mutex);
@@ -183,7 +185,7 @@ static int _get_trip_temp(int trip, int *temp)
mutex_unlock(&dts_update_mutex);
if (status)
- return status;
+ return THERMAL_TEMP_INVALID;
/*
* Thermal Sensor Programmable Trip Point Register has 8-bit
@@ -191,21 +193,10 @@ static int _get_trip_temp(int trip, int *temp)
* thresholds. The threshold value is always offset by its
* temperature base (50 degree Celsius).
*/
- *temp = (out >> (trip * QRK_DTS_SHIFT_TP)) & QRK_DTS_MASK_TP_THRES;
- *temp -= QRK_DTS_TEMP_BASE;
+ temp = (out >> (trip * QRK_DTS_SHIFT_TP)) & QRK_DTS_MASK_TP_THRES;
+ temp -= QRK_DTS_TEMP_BASE;
- return 0;
-}
-
-static inline int sys_get_trip_temp(struct thermal_zone_device *tzd,
- int trip, int *temp)
-{
- return _get_trip_temp(trip, temp);
-}
-
-static inline int sys_get_crit_temp(struct thermal_zone_device *tzd, int *temp)
-{
- return _get_trip_temp(QRK_DTS_ID_TP_CRITICAL, temp);
+ return temp;
}
static int update_trip_temp(struct soc_sensor_entry *aux_entry,
@@ -262,17 +253,6 @@ static inline int sys_set_trip_temp(struct thermal_zone_device *tzd, int trip,
return update_trip_temp(tzd->devdata, trip, temp);
}
-static int sys_get_trip_type(struct thermal_zone_device *thermal,
- int trip, enum thermal_trip_type *type)
-{
- if (trip)
- *type = THERMAL_TRIP_HOT;
- else
- *type = THERMAL_TRIP_CRITICAL;
-
- return 0;
-}
-
static int sys_get_curr_temp(struct thermal_zone_device *tzd,
int *temp)
{
@@ -315,10 +295,7 @@ static int sys_change_mode(struct thermal_zone_device *tzd,
static struct thermal_zone_device_ops tzone_ops = {
.get_temp = sys_get_curr_temp,
- .get_trip_temp = sys_get_trip_temp,
- .get_trip_type = sys_get_trip_type,
.set_trip_temp = sys_set_trip_temp,
- .get_crit_temp = sys_get_crit_temp,
.change_mode = sys_change_mode,
};
@@ -385,10 +362,18 @@ static struct soc_sensor_entry *alloc_soc_dts(void)
goto err_ret;
}
- aux_entry->tzone = thermal_zone_device_register("quark_dts",
- QRK_MAX_DTS_TRIPS,
- wr_mask,
- aux_entry, &tzone_ops, NULL, 0, polling_delay);
+ aux_entry->trips[QRK_DTS_ID_TP_CRITICAL].temperature = get_trip_temp(QRK_DTS_ID_TP_CRITICAL);
+ aux_entry->trips[QRK_DTS_ID_TP_CRITICAL].type = THERMAL_TRIP_CRITICAL;
+
+ aux_entry->trips[QRK_DTS_ID_TP_HOT].temperature = get_trip_temp(QRK_DTS_ID_TP_HOT);
+ aux_entry->trips[QRK_DTS_ID_TP_HOT].type = THERMAL_TRIP_HOT;
+
+ aux_entry->tzone = thermal_zone_device_register_with_trips("quark_dts",
+ aux_entry->trips,
+ QRK_MAX_DTS_TRIPS,
+ wr_mask,
+ aux_entry, &tzone_ops,
+ NULL, 0, polling_delay);
if (IS_ERR(aux_entry->tzone)) {
err = PTR_ERR(aux_entry->tzone);
goto err_ret;
diff --git a/drivers/thermal/intel/intel_soc_dts_iosf.c b/drivers/thermal/intel/intel_soc_dts_iosf.c
index 342b0bb5a56d..8c26f7b2316b 100644
--- a/drivers/thermal/intel/intel_soc_dts_iosf.c
+++ b/drivers/thermal/intel/intel_soc_dts_iosf.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
+#include <linux/intel_tcc.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
@@ -45,32 +46,6 @@
/* DTS0 and DTS 1 */
#define SOC_MAX_DTS_SENSORS 2
-static int get_tj_max(u32 *tj_max)
-{
- u32 eax, edx;
- u32 val;
- int err;
-
- err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
- if (err)
- goto err_ret;
- else {
- val = (eax >> 16) & 0xff;
- if (val)
- *tj_max = val * 1000;
- else {
- err = -EINVAL;
- goto err_ret;
- }
- }
-
- return 0;
-err_ret:
- *tj_max = 0;
-
- return err;
-}
-
static int sys_get_trip_temp(struct thermal_zone_device *tzd, int trip,
int *temp)
{
@@ -405,7 +380,7 @@ struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
{
struct intel_soc_dts_sensors *sensors;
bool notification;
- u32 tj_max;
+ int tj_max;
int ret;
int i;
@@ -415,8 +390,9 @@ struct intel_soc_dts_sensors *intel_soc_dts_iosf_init(
if (!trip_count || read_only_trip_count > trip_count)
return ERR_PTR(-EINVAL);
- if (get_tj_max(&tj_max))
- return ERR_PTR(-EINVAL);
+ tj_max = intel_tcc_get_tjmax(-1);
+ if (tj_max < 0)
+ return ERR_PTR(tj_max);
sensors = kzalloc(sizeof(*sensors), GFP_KERNEL);
if (!sensors)
@@ -475,4 +451,5 @@ void intel_soc_dts_iosf_exit(struct intel_soc_dts_sensors *sensors)
}
EXPORT_SYMBOL_GPL(intel_soc_dts_iosf_exit);
+MODULE_IMPORT_NS(INTEL_TCC);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/intel_tcc.c b/drivers/thermal/intel/intel_tcc.c
new file mode 100644
index 000000000000..2e5c741c41ca
--- /dev/null
+++ b/drivers/thermal/intel/intel_tcc.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * intel_tcc.c - Library for Intel TCC (thermal control circuitry) MSR access
+ * Copyright (c) 2022, Intel Corporation.
+ */
+
+#include <linux/errno.h>
+#include <linux/intel_tcc.h>
+#include <asm/msr.h>
+
+/**
+ * intel_tcc_get_tjmax() - returns the default TCC activation Temperature
+ * @cpu: cpu that the MSR should be run on, nagative value means any cpu.
+ *
+ * Get the TjMax value, which is the default thermal throttling or TCC
+ * activation temperature in degrees C.
+ *
+ * Return: Tjmax value in degrees C on success, negative error code otherwise.
+ */
+int intel_tcc_get_tjmax(int cpu)
+{
+ u32 low, high;
+ int val, err;
+
+ if (cpu < 0)
+ err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &low, &high);
+ else
+ err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &low, &high);
+ if (err)
+ return err;
+
+ val = (low >> 16) & 0xff;
+
+ return val ? val : -ENODATA;
+}
+EXPORT_SYMBOL_NS_GPL(intel_tcc_get_tjmax, INTEL_TCC);
+
+/**
+ * intel_tcc_get_offset() - returns the TCC Offset value to Tjmax
+ * @cpu: cpu that the MSR should be run on, nagative value means any cpu.
+ *
+ * Get the TCC offset value to Tjmax. The effective thermal throttling or TCC
+ * activation temperature equals "Tjmax" - "TCC Offset", in degrees C.
+ *
+ * Return: Tcc offset value in degrees C on success, negative error code otherwise.
+ */
+int intel_tcc_get_offset(int cpu)
+{
+ u32 low, high;
+ int err;
+
+ if (cpu < 0)
+ err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &low, &high);
+ else
+ err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &low, &high);
+ if (err)
+ return err;
+
+ return (low >> 24) & 0x3f;
+}
+EXPORT_SYMBOL_NS_GPL(intel_tcc_get_offset, INTEL_TCC);
+
+/**
+ * intel_tcc_set_offset() - set the TCC offset value to Tjmax
+ * @cpu: cpu that the MSR should be run on, nagative value means any cpu.
+ * @offset: TCC offset value in degree C
+ *
+ * Set the TCC Offset value to Tjmax. The effective thermal throttling or TCC
+ * activation temperature equals "Tjmax" - "TCC Offset", in degree C.
+ *
+ * Return: On success returns 0, negative error code otherwise.
+ */
+
+int intel_tcc_set_offset(int cpu, int offset)
+{
+ u32 low, high;
+ int err;
+
+ if (offset < 0 || offset > 0x3f)
+ return -EINVAL;
+
+ if (cpu < 0)
+ err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &low, &high);
+ else
+ err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &low, &high);
+ if (err)
+ return err;
+
+ /* MSR Locked */
+ if (low & BIT(31))
+ return -EPERM;
+
+ low &= ~(0x3f << 24);
+ low |= offset << 24;
+
+ if (cpu < 0)
+ return wrmsr_safe(MSR_IA32_TEMPERATURE_TARGET, low, high);
+ else
+ return wrmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, low, high);
+}
+EXPORT_SYMBOL_NS_GPL(intel_tcc_set_offset, INTEL_TCC);
+
+/**
+ * intel_tcc_get_temp() - returns the current temperature
+ * @cpu: cpu that the MSR should be run on, nagative value means any cpu.
+ * @pkg: true: Package Thermal Sensor. false: Core Thermal Sensor.
+ *
+ * Get the current temperature returned by the CPU core/package level
+ * thermal sensor, in degrees C.
+ *
+ * Return: Temperature in degrees C on success, negative error code otherwise.
+ */
+int intel_tcc_get_temp(int cpu, bool pkg)
+{
+ u32 low, high;
+ u32 msr = pkg ? MSR_IA32_PACKAGE_THERM_STATUS : MSR_IA32_THERM_STATUS;
+ int tjmax, temp, err;
+
+ tjmax = intel_tcc_get_tjmax(cpu);
+ if (tjmax < 0)
+ return tjmax;
+
+ if (cpu < 0)
+ err = rdmsr_safe(msr, &low, &high);
+ else
+ err = rdmsr_safe_on_cpu(cpu, msr, &low, &high);
+ if (err)
+ return err;
+
+ /* Temperature is beyond the valid thermal sensor range */
+ if (!(low & BIT(31)))
+ return -ENODATA;
+
+ temp = tjmax - ((low >> 16) & 0x7f);
+
+ /* Do not allow negative CPU temperature */
+ return temp >= 0 ? temp : -ENODATA;
+}
+EXPORT_SYMBOL_NS_GPL(intel_tcc_get_temp, INTEL_TCC);
diff --git a/drivers/thermal/intel/intel_tcc_cooling.c b/drivers/thermal/intel/intel_tcc_cooling.c
index a89e7e1890e4..e95f799454fe 100644
--- a/drivers/thermal/intel/intel_tcc_cooling.c
+++ b/drivers/thermal/intel/intel_tcc_cooling.c
@@ -7,12 +7,11 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
+#include <linux/intel_tcc.h>
#include <linux/module.h>
#include <linux/thermal.h>
#include <asm/cpu_device_id.h>
-#define TCC_SHIFT 24
-#define TCC_MASK (0x3fULL<<24)
#define TCC_PROGRAMMABLE BIT(30)
#define TCC_LOCKED BIT(31)
@@ -21,47 +20,26 @@ static struct thermal_cooling_device *tcc_cdev;
static int tcc_get_max_state(struct thermal_cooling_device *cdev, unsigned long
*state)
{
- *state = TCC_MASK >> TCC_SHIFT;
- return 0;
-}
-
-static int tcc_offset_update(int tcc)
-{
- u64 val;
- int err;
-
- err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
- if (err)
- return err;
-
- val &= ~TCC_MASK;
- val |= tcc << TCC_SHIFT;
-
- err = wrmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, val);
- if (err)
- return err;
-
+ *state = 0x3f;
return 0;
}
static int tcc_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
*state)
{
- u64 val;
- int err;
+ int offset = intel_tcc_get_offset(-1);
- err = rdmsrl_safe(MSR_IA32_TEMPERATURE_TARGET, &val);
- if (err)
- return err;
+ if (offset < 0)
+ return offset;
- *state = (val & TCC_MASK) >> TCC_SHIFT;
+ *state = offset;
return 0;
}
static int tcc_set_cur_state(struct thermal_cooling_device *cdev, unsigned long
state)
{
- return tcc_offset_update(state);
+ return intel_tcc_set_offset(-1, (int)state);
}
static const struct thermal_cooling_device_ops tcc_cooling_ops = {
@@ -140,6 +118,7 @@ static void __exit tcc_cooling_exit(void)
module_exit(tcc_cooling_exit)
+MODULE_IMPORT_NS(INTEL_TCC);
MODULE_DESCRIPTION("TCC offset cooling device Driver");
MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel/x86_pkg_temp_thermal.c b/drivers/thermal/intel/x86_pkg_temp_thermal.c
index 84c3a116ed04..1c2de84742df 100644
--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/intel_tcc.h>
#include <linux/err.h>
#include <linux/param.h>
#include <linux/device.h>
@@ -48,11 +49,11 @@ MODULE_PARM_DESC(notify_delay_ms,
struct zone_device {
int cpu;
bool work_scheduled;
- u32 tj_max;
u32 msr_pkg_therm_low;
u32 msr_pkg_therm_high;
struct delayed_work work;
struct thermal_zone_device *tzone;
+ struct thermal_trip *trips;
struct cpumask cpumask;
};
@@ -104,71 +105,17 @@ static struct zone_device *pkg_temp_thermal_get_dev(unsigned int cpu)
return NULL;
}
-/*
-* tj-max is interesting because threshold is set relative to this
-* temperature.
-*/
-static int get_tj_max(int cpu, u32 *tj_max)
-{
- u32 eax, edx, val;
- int err;
-
- err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
- if (err)
- return err;
-
- val = (eax >> 16) & 0xff;
- *tj_max = val * 1000;
-
- return val ? 0 : -EINVAL;
-}
-
static int sys_get_curr_temp(struct thermal_zone_device *tzd, int *temp)
{
struct zone_device *zonedev = tzd->devdata;
- u32 eax, edx;
-
- rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_STATUS,
- &eax, &edx);
- if (eax & 0x80000000) {
- *temp = zonedev->tj_max - ((eax >> 16) & 0x7f) * 1000;
- pr_debug("sys_get_curr_temp %d\n", *temp);
- return 0;
- }
- return -EINVAL;
-}
-
-static int sys_get_trip_temp(struct thermal_zone_device *tzd,
- int trip, int *temp)
-{
- struct zone_device *zonedev = tzd->devdata;
- unsigned long thres_reg_value;
- u32 mask, shift, eax, edx;
- int ret;
-
- if (trip >= MAX_NUMBER_OF_TRIPS)
- return -EINVAL;
-
- if (trip) {
- mask = THERM_MASK_THRESHOLD1;
- shift = THERM_SHIFT_THRESHOLD1;
- } else {
- mask = THERM_MASK_THRESHOLD0;
- shift = THERM_SHIFT_THRESHOLD0;
- }
-
- ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
- &eax, &edx);
- if (ret < 0)
- return ret;
+ int val;
- thres_reg_value = (eax & mask) >> shift;
- if (thres_reg_value)
- *temp = zonedev->tj_max - thres_reg_value * 1000;
- else
- *temp = THERMAL_TEMP_INVALID;
- pr_debug("sys_get_trip_temp %d\n", *temp);
+ val = intel_tcc_get_temp(zonedev->cpu, true);
+ if (val < 0)
+ return val;
+ *temp = val * 1000;
+ pr_debug("sys_get_curr_temp %d\n", *temp);
return 0;
}
@@ -177,9 +124,14 @@ sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
{
struct zone_device *zonedev = tzd->devdata;
u32 l, h, mask, shift, intr;
- int ret;
+ int tj_max, ret;
+
+ tj_max = intel_tcc_get_tjmax(zonedev->cpu);
+ if (tj_max < 0)
+ return tj_max;
+ tj_max *= 1000;
- if (trip >= MAX_NUMBER_OF_TRIPS || temp >= zonedev->tj_max)
+ if (trip >= MAX_NUMBER_OF_TRIPS || temp >= tj_max)
return -EINVAL;
ret = rdmsr_on_cpu(zonedev->cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
@@ -204,7 +156,7 @@ sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
if (!temp) {
l &= ~intr;
} else {
- l |= (zonedev->tj_max - temp)/1000 << shift;
+ l |= (tj_max - temp)/1000 << shift;
l |= intr;
}
@@ -212,18 +164,9 @@ sys_set_trip_temp(struct thermal_zone_device *tzd, int trip, int temp)
l, h);
}
-static int sys_get_trip_type(struct thermal_zone_device *thermal, int trip,
- enum thermal_trip_type *type)
-{
- *type = THERMAL_TRIP_PASSIVE;
- return 0;
-}
-
/* Thermal zone callback registry */
static struct thermal_zone_device_ops tzone_ops = {
.get_temp = sys_get_curr_temp,
- .get_trip_temp = sys_get_trip_temp,
- .get_trip_type = sys_get_trip_type,
.set_trip_temp = sys_set_trip_temp,
};
@@ -323,12 +266,55 @@ static int pkg_thermal_notify(u64 msr_val)
return 0;
}
+static struct thermal_trip *pkg_temp_thermal_trips_init(int cpu, int tj_max, int num_trips)
+{
+ struct thermal_trip *trips;
+ unsigned long thres_reg_value;
+ u32 mask, shift, eax, edx;
+ int ret, i;
+
+ trips = kzalloc(sizeof(*trips) * num_trips, GFP_KERNEL);
+ if (!trips)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < num_trips; i++) {
+
+ if (i) {
+ mask = THERM_MASK_THRESHOLD1;
+ shift = THERM_SHIFT_THRESHOLD1;
+ } else {
+ mask = THERM_MASK_THRESHOLD0;
+ shift = THERM_SHIFT_THRESHOLD0;
+ }
+
+ ret = rdmsr_on_cpu(cpu, MSR_IA32_PACKAGE_THERM_INTERRUPT,
+ &eax, &edx);
+ if (ret < 0) {
+ kfree(trips);
+ return ERR_PTR(ret);
+ }
+
+ thres_reg_value = (eax & mask) >> shift;
+
+ trips[i].temperature = thres_reg_value ?
+ tj_max - thres_reg_value * 1000 : THERMAL_TEMP_INVALID;
+
+ trips[i].type = THERMAL_TRIP_PASSIVE;
+
+ pr_debug("%s: cpu=%d, trip=%d, temp=%d\n",
+ __func__, cpu, i, trips[i].temperature);
+ }
+
+ return trips;
+}
+
static int pkg_temp_thermal_device_add(unsigned int cpu)
{
int id = topology_logical_die_id(cpu);
- u32 tj_max, eax, ebx, ecx, edx;
+ u32 eax, ebx, ecx, edx;
struct zone_device *zonedev;
int thres_count, err;
+ int tj_max;
if (id >= max_id)
return -ENOMEM;
@@ -340,32 +326,34 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
thres_count = clamp_val(thres_count, 0, MAX_NUMBER_OF_TRIPS);
- err = get_tj_max(cpu, &tj_max);
- if (err)
- return err;
+ tj_max = intel_tcc_get_tjmax(cpu);
+ if (tj_max < 0)
+ return tj_max;
zonedev = kzalloc(sizeof(*zonedev), GFP_KERNEL);
if (!zonedev)
return -ENOMEM;
+ zonedev->trips = pkg_temp_thermal_trips_init(cpu, tj_max, thres_count);
+ if (IS_ERR(zonedev->trips)) {
+ err = PTR_ERR(zonedev->trips);
+ goto out_kfree_zonedev;
+ }
+
INIT_DELAYED_WORK(&zonedev->work, pkg_temp_thermal_threshold_work_fn);
zonedev->cpu = cpu;
- zonedev->tj_max = tj_max;
- zonedev->tzone = thermal_zone_device_register("x86_pkg_temp",
- thres_count,
+ zonedev->tzone = thermal_zone_device_register_with_trips("x86_pkg_temp",
+ zonedev->trips, thres_count,
(thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01,
zonedev, &tzone_ops, &pkg_temp_tz_params, 0, 0);
if (IS_ERR(zonedev->tzone)) {
err = PTR_ERR(zonedev->tzone);
- kfree(zonedev);
- return err;
+ goto out_kfree_trips;
}
err = thermal_zone_device_enable(zonedev->tzone);
- if (err) {
- thermal_zone_device_unregister(zonedev->tzone);
- kfree(zonedev);
- return err;
- }
+ if (err)
+ goto out_unregister_tz;
+
/* Store MSR value for package thermal interrupt, to restore at exit */
rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, zonedev->msr_pkg_therm_low,
zonedev->msr_pkg_therm_high);
@@ -374,7 +362,16 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
raw_spin_lock_irq(&pkg_temp_lock);
zones[id] = zonedev;
raw_spin_unlock_irq(&pkg_temp_lock);
+
return 0;
+
+out_unregister_tz:
+ thermal_zone_device_unregister(zonedev->tzone);
+out_kfree_trips:
+ kfree(zonedev->trips);
+out_kfree_zonedev:
+ kfree(zonedev);
+ return err;
}
static int pkg_thermal_cpu_offline(unsigned int cpu)
@@ -458,8 +455,10 @@ static int pkg_thermal_cpu_offline(unsigned int cpu)
raw_spin_unlock_irq(&pkg_temp_lock);
/* Final cleanup if this is the last cpu */
- if (lastcpu)
+ if (lastcpu) {
+ kfree(zonedev->trips);
kfree(zonedev);
+ }
return 0;
}
@@ -531,6 +530,7 @@ static void __exit pkg_temp_thermal_exit(void)
}
module_exit(pkg_temp_thermal_exit)
+MODULE_IMPORT_NS(INTEL_TCC);
MODULE_DESCRIPTION("X86 PKG TEMP Thermal Driver");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/kirkwood_thermal.c b/drivers/thermal/kirkwood_thermal.c
index 7fb6e476c82a..bec7ec20e79d 100644
--- a/drivers/thermal/kirkwood_thermal.c
+++ b/drivers/thermal/kirkwood_thermal.c
@@ -64,15 +64,13 @@ static int kirkwood_thermal_probe(struct platform_device *pdev)
{
struct thermal_zone_device *thermal = NULL;
struct kirkwood_thermal_priv *priv;
- struct resource *res;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->sensor = devm_ioremap_resource(&pdev->dev, res);
+ priv->sensor = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(priv->sensor))
return PTR_ERR(priv->sensor);
diff --git a/drivers/thermal/mediatek/Kconfig b/drivers/thermal/mediatek/Kconfig
new file mode 100644
index 000000000000..d82c86d9be56
--- /dev/null
+++ b/drivers/thermal/mediatek/Kconfig
@@ -0,0 +1,37 @@
+config MTK_THERMAL
+ tristate "MediaTek thermal drivers"
+ depends on THERMAL_OF
+ help
+ This is the option for MediaTek thermal software solutions.
+ Please enable corresponding options to get temperature
+ information from thermal sensors or turn on throttle
+ mechaisms for thermal mitigation.
+
+if MTK_THERMAL
+
+config MTK_SOC_THERMAL
+ tristate "AUXADC temperature sensor driver for MediaTek SoCs"
+ depends on HAS_IOMEM
+ help
+ Enable this option if you want to get SoC temperature
+ information for MediaTek platforms.
+ This driver configures thermal controllers to collect
+ temperature via AUXADC interface.
+
+config MTK_LVTS_THERMAL
+ tristate "LVTS Thermal Driver for MediaTek SoCs"
+ depends on HAS_IOMEM
+ help
+ Enable this option if you want to get SoC temperature
+ information for supported MediaTek platforms.
+ This driver configures LVTS (Low Voltage Thermal Sensor)
+ thermal controllers to collect temperatures via ASIF
+ (Analog Serial Interface).
+
+config MTK_LVTS_THERMAL_DEBUGFS
+ bool "LVTS thermal debugfs"
+ depends on MTK_LVTS_THERMAL && DEBUG_FS
+ help
+ Enable this option to debug the internals of the device driver.
+
+endif
diff --git a/drivers/thermal/mediatek/Makefile b/drivers/thermal/mediatek/Makefile
new file mode 100644
index 000000000000..1c6daa1e644b
--- /dev/null
+++ b/drivers/thermal/mediatek/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MTK_SOC_THERMAL) += auxadc_thermal.o
+obj-$(CONFIG_MTK_LVTS_THERMAL) += lvts_thermal.o
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
index 8440692e3890..ab730f9552d0 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mediatek/auxadc_thermal.c
@@ -23,7 +23,7 @@
#include <linux/reset.h>
#include <linux/types.h>
-#include "thermal_hwmon.h"
+#include "../thermal_hwmon.h"
/* AUXADC Registers */
#define AUXADC_CON1_SET_V 0x008
@@ -150,6 +150,20 @@
#define CALIB_BUF1_VALID_V2(x) (((x) >> 4) & 0x1)
#define CALIB_BUF1_O_SLOPE_SIGN_V2(x) (((x) >> 3) & 0x1)
+/*
+ * Layout of the fuses providing the calibration data
+ * These macros can be used for MT7981 and MT7986.
+ */
+#define CALIB_BUF0_ADC_GE_V3(x) (((x) >> 0) & 0x3ff)
+#define CALIB_BUF0_DEGC_CALI_V3(x) (((x) >> 20) & 0x3f)
+#define CALIB_BUF0_O_SLOPE_V3(x) (((x) >> 26) & 0x3f)
+#define CALIB_BUF1_VTS_TS1_V3(x) (((x) >> 0) & 0x1ff)
+#define CALIB_BUF1_VTS_TS2_V3(x) (((x) >> 21) & 0x1ff)
+#define CALIB_BUF1_VTS_TSABB_V3(x) (((x) >> 9) & 0x1ff)
+#define CALIB_BUF1_VALID_V3(x) (((x) >> 18) & 0x1)
+#define CALIB_BUF1_O_SLOPE_SIGN_V3(x) (((x) >> 19) & 0x1)
+#define CALIB_BUF1_ID_V3(x) (((x) >> 20) & 0x1)
+
enum {
VTS1,
VTS2,
@@ -163,6 +177,7 @@ enum {
enum mtk_thermal_version {
MTK_THERMAL_V1 = 1,
MTK_THERMAL_V2,
+ MTK_THERMAL_V3,
};
/* MT2701 thermal sensors */
@@ -245,6 +260,27 @@ enum mtk_thermal_version {
/* The calibration coefficient of sensor */
#define MT8183_CALIBRATION 153
+/* AUXADC channel 11 is used for the temperature sensors */
+#define MT7986_TEMP_AUXADC_CHANNEL 11
+
+/* The total number of temperature sensors in the MT7986 */
+#define MT7986_NUM_SENSORS 1
+
+/* The number of banks in the MT7986 */
+#define MT7986_NUM_ZONES 1
+
+/* The number of sensing points per bank */
+#define MT7986_NUM_SENSORS_PER_ZONE 1
+
+/* MT7986 thermal sensors */
+#define MT7986_TS1 0
+
+/* The number of controller in the MT7986 */
+#define MT7986_NUM_CONTROLLER 1
+
+/* The calibration coefficient of sensor */
+#define MT7986_CALIBRATION 165
+
struct mtk_thermal;
struct thermal_bank_cfg {
@@ -292,6 +328,8 @@ struct mtk_thermal {
const struct mtk_thermal_data *conf;
struct mtk_thermal_bank banks[MAX_NUM_ZONES];
+
+ int (*raw_to_mcelsius)(struct mtk_thermal *mt, int sensno, s32 raw);
};
/* MT8183 thermal sensor data */
@@ -386,6 +424,14 @@ static const int mt7622_mux_values[MT7622_NUM_SENSORS] = { 0, };
static const int mt7622_vts_index[MT7622_NUM_SENSORS] = { VTS1 };
static const int mt7622_tc_offset[MT7622_NUM_CONTROLLER] = { 0x0, };
+/* MT7986 thermal sensor data */
+static const int mt7986_bank_data[MT7986_NUM_SENSORS] = { MT7986_TS1, };
+static const int mt7986_msr[MT7986_NUM_SENSORS_PER_ZONE] = { TEMP_MSR0, };
+static const int mt7986_adcpnp[MT7986_NUM_SENSORS_PER_ZONE] = { TEMP_ADCPNP0, };
+static const int mt7986_mux_values[MT7986_NUM_SENSORS] = { 0, };
+static const int mt7986_vts_index[MT7986_NUM_SENSORS] = { VTS1 };
+static const int mt7986_tc_offset[MT7986_NUM_CONTROLLER] = { 0x0, };
+
/*
* The MT8173 thermal controller has four banks. Each bank can read up to
* four temperature sensors simultaneously. The MT8173 has a total of 5
@@ -549,8 +595,32 @@ static const struct mtk_thermal_data mt8183_thermal_data = {
.version = MTK_THERMAL_V1,
};
+/*
+ * MT7986 uses AUXADC Channel 11 for raw data access.
+ */
+static const struct mtk_thermal_data mt7986_thermal_data = {
+ .auxadc_channel = MT7986_TEMP_AUXADC_CHANNEL,
+ .num_banks = MT7986_NUM_ZONES,
+ .num_sensors = MT7986_NUM_SENSORS,
+ .vts_index = mt7986_vts_index,
+ .cali_val = MT7986_CALIBRATION,
+ .num_controller = MT7986_NUM_CONTROLLER,
+ .controller_offset = mt7986_tc_offset,
+ .need_switch_bank = true,
+ .bank_data = {
+ {
+ .num_sensors = 1,
+ .sensors = mt7986_bank_data,
+ },
+ },
+ .msr = mt7986_msr,
+ .adcpnp = mt7986_adcpnp,
+ .sensor_mux_values = mt7986_mux_values,
+ .version = MTK_THERMAL_V3,
+};
+
/**
- * raw_to_mcelsius - convert a raw ADC value to mcelsius
+ * raw_to_mcelsius_v1 - convert a raw ADC value to mcelsius
* @mt: The thermal controller
* @sensno: sensor number
* @raw: raw ADC value
@@ -603,6 +673,22 @@ static int raw_to_mcelsius_v2(struct mtk_thermal *mt, int sensno, s32 raw)
return (format_2 - tmp) * 100;
}
+static int raw_to_mcelsius_v3(struct mtk_thermal *mt, int sensno, s32 raw)
+{
+ s32 tmp;
+
+ if (raw == 0)
+ return 0;
+
+ raw &= 0xfff;
+ tmp = 100000 * 15 / 16 * 10000;
+ tmp /= 4096 - 512 + mt->adc_ge;
+ tmp /= 1490;
+ tmp *= raw - mt->vts[sensno] - 2900;
+
+ return mt->degc_cali * 500 - tmp;
+}
+
/**
* mtk_thermal_get_bank - get bank
* @bank: The bank
@@ -656,13 +742,9 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) {
raw = readl(mt->thermal_base + conf->msr[i]);
- if (mt->conf->version == MTK_THERMAL_V1) {
- temp = raw_to_mcelsius_v1(
- mt, conf->bank_data[bank->id].sensors[i], raw);
- } else {
- temp = raw_to_mcelsius_v2(
- mt, conf->bank_data[bank->id].sensors[i], raw);
- }
+ temp = mt->raw_to_mcelsius(
+ mt, conf->bank_data[bank->id].sensors[i], raw);
+
/*
* The first read of a sensor often contains very high bogus
@@ -887,6 +969,25 @@ static int mtk_thermal_extract_efuse_v2(struct mtk_thermal *mt, u32 *buf)
return 0;
}
+static int mtk_thermal_extract_efuse_v3(struct mtk_thermal *mt, u32 *buf)
+{
+ if (!CALIB_BUF1_VALID_V3(buf[1]))
+ return -EINVAL;
+
+ mt->adc_ge = CALIB_BUF0_ADC_GE_V3(buf[0]);
+ mt->degc_cali = CALIB_BUF0_DEGC_CALI_V3(buf[0]);
+ mt->o_slope = CALIB_BUF0_O_SLOPE_V3(buf[0]);
+ mt->vts[VTS1] = CALIB_BUF1_VTS_TS1_V3(buf[1]);
+ mt->vts[VTS2] = CALIB_BUF1_VTS_TS2_V3(buf[1]);
+ mt->vts[VTSABB] = CALIB_BUF1_VTS_TSABB_V3(buf[1]);
+ mt->o_slope_sign = CALIB_BUF1_O_SLOPE_SIGN_V3(buf[1]);
+
+ if (CALIB_BUF1_ID_V3(buf[1]) == 0)
+ mt->o_slope = 0;
+
+ return 0;
+}
+
static int mtk_thermal_get_calibration_data(struct device *dev,
struct mtk_thermal *mt)
{
@@ -897,6 +998,7 @@ static int mtk_thermal_get_calibration_data(struct device *dev,
/* Start with default values */
mt->adc_ge = 512;
+ mt->adc_oe = 512;
for (i = 0; i < mt->conf->num_sensors; i++)
mt->vts[i] = 260;
mt->degc_cali = 40;
@@ -922,10 +1024,20 @@ static int mtk_thermal_get_calibration_data(struct device *dev,
goto out;
}
- if (mt->conf->version == MTK_THERMAL_V1)
+ switch (mt->conf->version) {
+ case MTK_THERMAL_V1:
ret = mtk_thermal_extract_efuse_v1(mt, buf);
- else
+ break;
+ case MTK_THERMAL_V2:
ret = mtk_thermal_extract_efuse_v2(mt, buf);
+ break;
+ case MTK_THERMAL_V3:
+ ret = mtk_thermal_extract_efuse_v3(mt, buf);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
if (ret) {
dev_info(dev, "Device not calibrated, using default calibration values\n");
@@ -956,6 +1068,10 @@ static const struct of_device_id mtk_thermal_of_match[] = {
.data = (void *)&mt7622_thermal_data,
},
{
+ .compatible = "mediatek,mt7986-thermal",
+ .data = (void *)&mt7986_thermal_data,
+ },
+ {
.compatible = "mediatek,mt8183-thermal",
.data = (void *)&mt8183_thermal_data,
}, {
@@ -990,7 +1106,6 @@ static int mtk_thermal_probe(struct platform_device *pdev)
int ret, i, ctrl_id;
struct device_node *auxadc, *apmixedsys, *np = pdev->dev.of_node;
struct mtk_thermal *mt;
- struct resource *res;
u64 auxadc_phys_base, apmixed_phys_base;
struct thermal_zone_device *tzdev;
void __iomem *apmixed_base, *auxadc_base;
@@ -1009,8 +1124,7 @@ static int mtk_thermal_probe(struct platform_device *pdev)
if (IS_ERR(mt->clk_auxadc))
return PTR_ERR(mt->clk_auxadc);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mt->thermal_base = devm_ioremap_resource(&pdev->dev, res);
+ mt->thermal_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(mt->thermal_base))
return PTR_ERR(mt->thermal_base);
@@ -1070,11 +1184,18 @@ static int mtk_thermal_probe(struct platform_device *pdev)
goto err_disable_clk_auxadc;
}
- if (mt->conf->version == MTK_THERMAL_V2) {
+ if (mt->conf->version != MTK_THERMAL_V1) {
mtk_thermal_turn_on_buffer(apmixed_base);
mtk_thermal_release_periodic_ts(mt, auxadc_base);
}
+ if (mt->conf->version == MTK_THERMAL_V1)
+ mt->raw_to_mcelsius = raw_to_mcelsius_v1;
+ else if (mt->conf->version == MTK_THERMAL_V2)
+ mt->raw_to_mcelsius = raw_to_mcelsius_v2;
+ else
+ mt->raw_to_mcelsius = raw_to_mcelsius_v3;
+
for (ctrl_id = 0; ctrl_id < mt->conf->num_controller ; ctrl_id++)
for (i = 0; i < mt->conf->num_banks; i++)
mtk_thermal_init_bank(mt, i, apmixed_phys_base,
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
new file mode 100644
index 000000000000..84ba65a27acf
--- /dev/null
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -0,0 +1,1224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 MediaTek Inc.
+ * Author: Balsam CHIHI <bchihi@baylibre.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/thermal.h>
+#include <dt-bindings/thermal/mediatek,lvts-thermal.h>
+
+#define LVTS_MONCTL0(__base) (__base + 0x0000)
+#define LVTS_MONCTL1(__base) (__base + 0x0004)
+#define LVTS_MONCTL2(__base) (__base + 0x0008)
+#define LVTS_MONINT(__base) (__base + 0x000C)
+#define LVTS_MONINTSTS(__base) (__base + 0x0010)
+#define LVTS_MONIDET0(__base) (__base + 0x0014)
+#define LVTS_MONIDET1(__base) (__base + 0x0018)
+#define LVTS_MONIDET2(__base) (__base + 0x001C)
+#define LVTS_MONIDET3(__base) (__base + 0x0020)
+#define LVTS_H2NTHRE(__base) (__base + 0x0024)
+#define LVTS_HTHRE(__base) (__base + 0x0028)
+#define LVTS_OFFSETH(__base) (__base + 0x0030)
+#define LVTS_OFFSETL(__base) (__base + 0x0034)
+#define LVTS_MSRCTL0(__base) (__base + 0x0038)
+#define LVTS_MSRCTL1(__base) (__base + 0x003C)
+#define LVTS_TSSEL(__base) (__base + 0x0040)
+#define LVTS_CALSCALE(__base) (__base + 0x0048)
+#define LVTS_ID(__base) (__base + 0x004C)
+#define LVTS_CONFIG(__base) (__base + 0x0050)
+#define LVTS_EDATA00(__base) (__base + 0x0054)
+#define LVTS_EDATA01(__base) (__base + 0x0058)
+#define LVTS_EDATA02(__base) (__base + 0x005C)
+#define LVTS_EDATA03(__base) (__base + 0x0060)
+#define LVTS_MSR0(__base) (__base + 0x0090)
+#define LVTS_MSR1(__base) (__base + 0x0094)
+#define LVTS_MSR2(__base) (__base + 0x0098)
+#define LVTS_MSR3(__base) (__base + 0x009C)
+#define LVTS_IMMD0(__base) (__base + 0x00A0)
+#define LVTS_IMMD1(__base) (__base + 0x00A4)
+#define LVTS_IMMD2(__base) (__base + 0x00A8)
+#define LVTS_IMMD3(__base) (__base + 0x00AC)
+#define LVTS_PROTCTL(__base) (__base + 0x00C0)
+#define LVTS_PROTTA(__base) (__base + 0x00C4)
+#define LVTS_PROTTB(__base) (__base + 0x00C8)
+#define LVTS_PROTTC(__base) (__base + 0x00CC)
+#define LVTS_CLKEN(__base) (__base + 0x00E4)
+
+#define LVTS_PERIOD_UNIT ((118 * 1000) / (256 * 38))
+#define LVTS_GROUP_INTERVAL 1
+#define LVTS_FILTER_INTERVAL 1
+#define LVTS_SENSOR_INTERVAL 1
+#define LVTS_HW_FILTER 0x2
+#define LVTS_TSSEL_CONF 0x13121110
+#define LVTS_CALSCALE_CONF 0x300
+#define LVTS_MONINT_CONF 0x9FBF7BDE
+
+#define LVTS_INT_SENSOR0 0x0009001F
+#define LVTS_INT_SENSOR1 0X000881F0
+#define LVTS_INT_SENSOR2 0x00247C00
+#define LVTS_INT_SENSOR3 0x1FC00000
+
+#define LVTS_SENSOR_MAX 4
+#define LVTS_GOLDEN_TEMP_MAX 62
+#define LVTS_GOLDEN_TEMP_DEFAULT 50
+#define LVTS_COEFF_A -250460
+#define LVTS_COEFF_B 250460
+
+#define LVTS_MSR_IMMEDIATE_MODE 0
+#define LVTS_MSR_FILTERED_MODE 1
+
+#define LVTS_HW_SHUTDOWN_MT8195 105000
+
+static int golden_temp = LVTS_GOLDEN_TEMP_DEFAULT;
+static int coeff_b = LVTS_COEFF_B;
+
+struct lvts_sensor_data {
+ int dt_id;
+};
+
+struct lvts_ctrl_data {
+ struct lvts_sensor_data lvts_sensor[LVTS_SENSOR_MAX];
+ int cal_offset[LVTS_SENSOR_MAX];
+ int hw_tshut_temp;
+ int num_lvts_sensor;
+ int offset;
+ int mode;
+};
+
+struct lvts_data {
+ const struct lvts_ctrl_data *lvts_ctrl;
+ int num_lvts_ctrl;
+};
+
+struct lvts_sensor {
+ struct thermal_zone_device *tz;
+ void __iomem *msr;
+ void __iomem *base;
+ int id;
+ int dt_id;
+};
+
+struct lvts_ctrl {
+ struct lvts_sensor sensors[LVTS_SENSOR_MAX];
+ u32 calibration[LVTS_SENSOR_MAX];
+ u32 hw_tshut_raw_temp;
+ int num_lvts_sensor;
+ int mode;
+ void __iomem *base;
+};
+
+struct lvts_domain {
+ struct lvts_ctrl *lvts_ctrl;
+ struct reset_control *reset;
+ struct clk *clk;
+ int num_lvts_ctrl;
+ void __iomem *base;
+ size_t calib_len;
+ u8 *calib;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dom_dentry;
+#endif
+};
+
+#ifdef CONFIG_MTK_LVTS_THERMAL_DEBUGFS
+
+#define LVTS_DEBUG_FS_REGS(__reg) \
+{ \
+ .name = __stringify(__reg), \
+ .offset = __reg(0), \
+}
+
+static const struct debugfs_reg32 lvts_regs[] = {
+ LVTS_DEBUG_FS_REGS(LVTS_MONCTL0),
+ LVTS_DEBUG_FS_REGS(LVTS_MONCTL1),
+ LVTS_DEBUG_FS_REGS(LVTS_MONCTL2),
+ LVTS_DEBUG_FS_REGS(LVTS_MONINT),
+ LVTS_DEBUG_FS_REGS(LVTS_MONINTSTS),
+ LVTS_DEBUG_FS_REGS(LVTS_MONIDET0),
+ LVTS_DEBUG_FS_REGS(LVTS_MONIDET1),
+ LVTS_DEBUG_FS_REGS(LVTS_MONIDET2),
+ LVTS_DEBUG_FS_REGS(LVTS_MONIDET3),
+ LVTS_DEBUG_FS_REGS(LVTS_H2NTHRE),
+ LVTS_DEBUG_FS_REGS(LVTS_HTHRE),
+ LVTS_DEBUG_FS_REGS(LVTS_OFFSETH),
+ LVTS_DEBUG_FS_REGS(LVTS_OFFSETL),
+ LVTS_DEBUG_FS_REGS(LVTS_MSRCTL0),
+ LVTS_DEBUG_FS_REGS(LVTS_MSRCTL1),
+ LVTS_DEBUG_FS_REGS(LVTS_TSSEL),
+ LVTS_DEBUG_FS_REGS(LVTS_CALSCALE),
+ LVTS_DEBUG_FS_REGS(LVTS_ID),
+ LVTS_DEBUG_FS_REGS(LVTS_CONFIG),
+ LVTS_DEBUG_FS_REGS(LVTS_EDATA00),
+ LVTS_DEBUG_FS_REGS(LVTS_EDATA01),
+ LVTS_DEBUG_FS_REGS(LVTS_EDATA02),
+ LVTS_DEBUG_FS_REGS(LVTS_EDATA03),
+ LVTS_DEBUG_FS_REGS(LVTS_MSR0),
+ LVTS_DEBUG_FS_REGS(LVTS_MSR1),
+ LVTS_DEBUG_FS_REGS(LVTS_MSR2),
+ LVTS_DEBUG_FS_REGS(LVTS_MSR3),
+ LVTS_DEBUG_FS_REGS(LVTS_IMMD0),
+ LVTS_DEBUG_FS_REGS(LVTS_IMMD1),
+ LVTS_DEBUG_FS_REGS(LVTS_IMMD2),
+ LVTS_DEBUG_FS_REGS(LVTS_IMMD3),
+ LVTS_DEBUG_FS_REGS(LVTS_PROTCTL),
+ LVTS_DEBUG_FS_REGS(LVTS_PROTTA),
+ LVTS_DEBUG_FS_REGS(LVTS_PROTTB),
+ LVTS_DEBUG_FS_REGS(LVTS_PROTTC),
+ LVTS_DEBUG_FS_REGS(LVTS_CLKEN),
+};
+
+static int lvts_debugfs_init(struct device *dev, struct lvts_domain *lvts_td)
+{
+ struct debugfs_regset32 *regset;
+ struct lvts_ctrl *lvts_ctrl;
+ struct dentry *dentry;
+ char name[64];
+ int i;
+
+ lvts_td->dom_dentry = debugfs_create_dir(dev_name(dev), NULL);
+ if (!lvts_td->dom_dentry)
+ return 0;
+
+ for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
+
+ lvts_ctrl = &lvts_td->lvts_ctrl[i];
+
+ sprintf(name, "controller%d", i);
+ dentry = debugfs_create_dir(name, lvts_td->dom_dentry);
+ if (!dentry)
+ continue;
+
+ regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL);
+ if (!regset)
+ continue;
+
+ regset->base = lvts_ctrl->base;
+ regset->regs = lvts_regs;
+ regset->nregs = ARRAY_SIZE(lvts_regs);
+
+ debugfs_create_regset32("registers", 0400, dentry, regset);
+ }
+
+ return 0;
+}
+
+static void lvts_debugfs_exit(struct lvts_domain *lvts_td)
+{
+ debugfs_remove_recursive(lvts_td->dom_dentry);
+}
+
+#else
+
+static inline int lvts_debugfs_init(struct device *dev,
+ struct lvts_domain *lvts_td)
+{
+ return 0;
+}
+
+static void lvts_debugfs_exit(struct lvts_domain *lvts_td) { }
+
+#endif
+
+static int lvts_raw_to_temp(u32 raw_temp)
+{
+ int temperature;
+
+ temperature = ((s64)(raw_temp & 0xFFFF) * LVTS_COEFF_A) >> 14;
+ temperature += coeff_b;
+
+ return temperature;
+}
+
+static u32 lvts_temp_to_raw(int temperature)
+{
+ u32 raw_temp = ((s64)(coeff_b - temperature)) << 14;
+
+ raw_temp = div_s64(raw_temp, -LVTS_COEFF_A);
+
+ return raw_temp;
+}
+
+static int lvts_get_temp(struct thermal_zone_device *tz, int *temp)
+{
+ struct lvts_sensor *lvts_sensor = tz->devdata;
+ void __iomem *msr = lvts_sensor->msr;
+ u32 value;
+
+ /*
+ * Measurement registers:
+ *
+ * LVTS_MSR[0-3] / LVTS_IMMD[0-3]
+ *
+ * Bits:
+ *
+ * 32-17: Unused
+ * 16 : Valid temperature
+ * 15-0 : Raw temperature
+ */
+ value = readl(msr);
+
+ /*
+ * As the thermal zone temperature will read before the
+ * hardware sensor is fully initialized, we have to check the
+ * validity of the temperature returned when reading the
+ * measurement register. The thermal controller will set the
+ * valid bit temperature only when it is totally initialized.
+ *
+ * Otherwise, we may end up with garbage values out of the
+ * functionning temperature and directly jump to a system
+ * shutdown.
+ */
+ if (!(value & BIT(16)))
+ return -EAGAIN;
+
+ *temp = lvts_raw_to_temp(value & 0xFFFF);
+
+ return 0;
+}
+
+static int lvts_set_trips(struct thermal_zone_device *tz, int low, int high)
+{
+ struct lvts_sensor *lvts_sensor = tz->devdata;
+ void __iomem *base = lvts_sensor->base;
+ u32 raw_low = lvts_temp_to_raw(low);
+ u32 raw_high = lvts_temp_to_raw(high);
+
+ /*
+ * Hot to normal temperature threshold
+ *
+ * LVTS_H2NTHRE
+ *
+ * Bits:
+ *
+ * 14-0 : Raw temperature for threshold
+ */
+ if (low != -INT_MAX) {
+ dev_dbg(&tz->device, "Setting low limit temperature interrupt: %d\n", low);
+ writel(raw_low, LVTS_H2NTHRE(base));
+ }
+
+ /*
+ * Hot temperature threshold
+ *
+ * LVTS_HTHRE
+ *
+ * Bits:
+ *
+ * 14-0 : Raw temperature for threshold
+ */
+ dev_dbg(&tz->device, "Setting high limit temperature interrupt: %d\n", high);
+ writel(raw_high, LVTS_HTHRE(base));
+
+ return 0;
+}
+
+static irqreturn_t lvts_ctrl_irq_handler(struct lvts_ctrl *lvts_ctrl)
+{
+ irqreturn_t iret = IRQ_NONE;
+ u32 value;
+ u32 masks[] = {
+ LVTS_INT_SENSOR0,
+ LVTS_INT_SENSOR1,
+ LVTS_INT_SENSOR2,
+ LVTS_INT_SENSOR3
+ };
+ int i;
+
+ /*
+ * Interrupt monitoring status
+ *
+ * LVTS_MONINTST
+ *
+ * Bits:
+ *
+ * 31 : Interrupt for stage 3
+ * 30 : Interrupt for stage 2
+ * 29 : Interrupt for state 1
+ * 28 : Interrupt using filter on sensor 3
+ *
+ * 27 : Interrupt using immediate on sensor 3
+ * 26 : Interrupt normal to hot on sensor 3
+ * 25 : Interrupt high offset on sensor 3
+ * 24 : Interrupt low offset on sensor 3
+ *
+ * 23 : Interrupt hot threshold on sensor 3
+ * 22 : Interrupt cold threshold on sensor 3
+ * 21 : Interrupt using filter on sensor 2
+ * 20 : Interrupt using filter on sensor 1
+ *
+ * 19 : Interrupt using filter on sensor 0
+ * 18 : Interrupt using immediate on sensor 2
+ * 17 : Interrupt using immediate on sensor 1
+ * 16 : Interrupt using immediate on sensor 0
+ *
+ * 15 : Interrupt device access timeout interrupt
+ * 14 : Interrupt normal to hot on sensor 2
+ * 13 : Interrupt high offset interrupt on sensor 2
+ * 12 : Interrupt low offset interrupt on sensor 2
+ *
+ * 11 : Interrupt hot threshold on sensor 2
+ * 10 : Interrupt cold threshold on sensor 2
+ * 9 : Interrupt normal to hot on sensor 1
+ * 8 : Interrupt high offset interrupt on sensor 1
+ *
+ * 7 : Interrupt low offset interrupt on sensor 1
+ * 6 : Interrupt hot threshold on sensor 1
+ * 5 : Interrupt cold threshold on sensor 1
+ * 4 : Interrupt normal to hot on sensor 0
+ *
+ * 3 : Interrupt high offset interrupt on sensor 0
+ * 2 : Interrupt low offset interrupt on sensor 0
+ * 1 : Interrupt hot threshold on sensor 0
+ * 0 : Interrupt cold threshold on sensor 0
+ *
+ * We are interested in the sensor(s) responsible of the
+ * interrupt event. We update the thermal framework with the
+ * thermal zone associated with the sensor. The framework will
+ * take care of the rest whatever the kind of interrupt, we
+ * are only interested in which sensor raised the interrupt.
+ *
+ * sensor 3 interrupt: 0001 1111 1100 0000 0000 0000 0000 0000
+ * => 0x1FC00000
+ * sensor 2 interrupt: 0000 0000 0010 0100 0111 1100 0000 0000
+ * => 0x00247C00
+ * sensor 1 interrupt: 0000 0000 0001 0001 0000 0011 1110 0000
+ * => 0X000881F0
+ * sensor 0 interrupt: 0000 0000 0000 1001 0000 0000 0001 1111
+ * => 0x0009001F
+ */
+ value = readl(LVTS_MONINTSTS(lvts_ctrl->base));
+
+ /*
+ * Let's figure out which sensors raised the interrupt
+ *
+ * NOTE: the masks array must be ordered with the index
+ * corresponding to the sensor id eg. index=0, mask for
+ * sensor0.
+ */
+ for (i = 0; i < ARRAY_SIZE(masks); i++) {
+
+ if (!(value & masks[i]))
+ continue;
+
+ thermal_zone_device_update(lvts_ctrl->sensors[i].tz,
+ THERMAL_TRIP_VIOLATED);
+ iret = IRQ_HANDLED;
+ }
+
+ /*
+ * Write back to clear the interrupt status (W1C)
+ */
+ writel(value, LVTS_MONINTSTS(lvts_ctrl->base));
+
+ return iret;
+}
+
+/*
+ * Temperature interrupt handler. Even if the driver supports more
+ * interrupt modes, we use the interrupt when the temperature crosses
+ * the hot threshold the way up and the way down (modulo the
+ * hysteresis).
+ *
+ * Each thermal domain has a couple of interrupts, one for hardware
+ * reset and another one for all the thermal events happening on the
+ * different sensors.
+ *
+ * The interrupt is configured for thermal events when crossing the
+ * hot temperature limit. At each interrupt, we check in every
+ * controller if there is an interrupt pending.
+ */
+static irqreturn_t lvts_irq_handler(int irq, void *data)
+{
+ struct lvts_domain *lvts_td = data;
+ irqreturn_t aux, iret = IRQ_NONE;
+ int i;
+
+ for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
+
+ aux = lvts_ctrl_irq_handler(lvts_td->lvts_ctrl);
+ if (aux != IRQ_HANDLED)
+ continue;
+
+ iret = IRQ_HANDLED;
+ }
+
+ return iret;
+}
+
+static struct thermal_zone_device_ops lvts_ops = {
+ .get_temp = lvts_get_temp,
+ .set_trips = lvts_set_trips,
+};
+
+static int lvts_sensor_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
+ const struct lvts_ctrl_data *lvts_ctrl_data)
+{
+ struct lvts_sensor *lvts_sensor = lvts_ctrl->sensors;
+ void __iomem *msr_regs[] = {
+ LVTS_MSR0(lvts_ctrl->base),
+ LVTS_MSR1(lvts_ctrl->base),
+ LVTS_MSR2(lvts_ctrl->base),
+ LVTS_MSR3(lvts_ctrl->base)
+ };
+
+ void __iomem *imm_regs[] = {
+ LVTS_IMMD0(lvts_ctrl->base),
+ LVTS_IMMD1(lvts_ctrl->base),
+ LVTS_IMMD2(lvts_ctrl->base),
+ LVTS_IMMD3(lvts_ctrl->base)
+ };
+
+ int i;
+
+ for (i = 0; i < lvts_ctrl_data->num_lvts_sensor; i++) {
+
+ int dt_id = lvts_ctrl_data->lvts_sensor[i].dt_id;
+
+ /*
+ * At this point, we don't know which id matches which
+ * sensor. Let's set arbitrally the id from the index.
+ */
+ lvts_sensor[i].id = i;
+
+ /*
+ * The thermal zone registration will set the trip
+ * point interrupt in the thermal controller
+ * register. But this one will be reset in the
+ * initialization after. So we need to post pone the
+ * thermal zone creation after the controller is
+ * setup. For this reason, we store the device tree
+ * node id from the data in the sensor structure
+ */
+ lvts_sensor[i].dt_id = dt_id;
+
+ /*
+ * We assign the base address of the thermal
+ * controller as a back pointer. So it will be
+ * accessible from the different thermal framework ops
+ * as we pass the lvts_sensor pointer as thermal zone
+ * private data.
+ */
+ lvts_sensor[i].base = lvts_ctrl->base;
+
+ /*
+ * Each sensor has its own register address to read from.
+ */
+ lvts_sensor[i].msr = lvts_ctrl_data->mode == LVTS_MSR_IMMEDIATE_MODE ?
+ imm_regs[i] : msr_regs[i];
+ };
+
+ lvts_ctrl->num_lvts_sensor = lvts_ctrl_data->num_lvts_sensor;
+
+ return 0;
+}
+
+/*
+ * The efuse blob values follows the sensor enumeration per thermal
+ * controller. The decoding of the stream is as follow:
+ *
+ * <--?-> <----big0 ???---> <-sensor0-> <-0->
+ * ------------------------------------------
+ * index in the stream: : | 0x0 | 0x1 | 0x2 | 0x3 | 0x4 | 0x5 | 0x6 |
+ * ------------------------------------------
+ *
+ * <--sensor1--><-0-> <----big1 ???---> <-sen
+ * ------------------------------------------
+ * | 0x7 | 0x8 | 0x9 | 0xA | 0xB | OxC | OxD |
+ * ------------------------------------------
+ *
+ * sor0-> <-0-> <-sensor1-> <-0-> ..........
+ * ------------------------------------------
+ * | 0x7 | 0x8 | 0x9 | 0xA | 0xB | OxC | OxD |
+ * ------------------------------------------
+ *
+ * And so on ...
+ *
+ * The data description gives the offset of the calibration data in
+ * this bytes stream for each sensor.
+ *
+ * Each thermal controller can handle up to 4 sensors max, we don't
+ * care if there are less as the array of calibration is sized to 4
+ * anyway. The unused sensor slot will be zeroed.
+ */
+static int lvts_calibration_init(struct device *dev, struct lvts_ctrl *lvts_ctrl,
+ const struct lvts_ctrl_data *lvts_ctrl_data,
+ u8 *efuse_calibration)
+{
+ int i;
+
+ for (i = 0; i < lvts_ctrl_data->num_lvts_sensor; i++)
+ memcpy(&lvts_ctrl->calibration[i],
+ efuse_calibration + lvts_ctrl_data->cal_offset[i], 2);
+
+ return 0;
+}
+
+/*
+ * The efuse bytes stream can be split into different chunk of
+ * nvmems. This function reads and concatenate those into a single
+ * buffer so it can be read sequentially when initializing the
+ * calibration data.
+ */
+static int lvts_calibration_read(struct device *dev, struct lvts_domain *lvts_td,
+ const struct lvts_data *lvts_data)
+{
+ struct device_node *np = dev_of_node(dev);
+ struct nvmem_cell *cell;
+ struct property *prop;
+ const char *cell_name;
+
+ of_property_for_each_string(np, "nvmem-cell-names", prop, cell_name) {
+ size_t len;
+ u8 *efuse;
+
+ cell = of_nvmem_cell_get(np, cell_name);
+ if (IS_ERR(cell)) {
+ dev_err(dev, "Failed to get cell '%s'\n", cell_name);
+ return PTR_ERR(cell);
+ }
+
+ efuse = nvmem_cell_read(cell, &len);
+
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(efuse)) {
+ dev_err(dev, "Failed to read cell '%s'\n", cell_name);
+ return PTR_ERR(efuse);
+ }
+
+ lvts_td->calib = devm_krealloc(dev, lvts_td->calib,
+ lvts_td->calib_len + len, GFP_KERNEL);
+ if (!lvts_td->calib)
+ return -ENOMEM;
+
+ memcpy(lvts_td->calib + lvts_td->calib_len, efuse, len);
+
+ lvts_td->calib_len += len;
+
+ kfree(efuse);
+ }
+
+ return 0;
+}
+
+static int lvts_golden_temp_init(struct device *dev, u32 *value)
+{
+ u32 gt;
+
+ gt = (*value) >> 24;
+
+ if (gt && gt < LVTS_GOLDEN_TEMP_MAX)
+ golden_temp = gt;
+
+ coeff_b = golden_temp * 500 + LVTS_COEFF_B;
+
+ return 0;
+}
+
+static int lvts_ctrl_init(struct device *dev, struct lvts_domain *lvts_td,
+ const struct lvts_data *lvts_data)
+{
+ size_t size = sizeof(*lvts_td->lvts_ctrl) * lvts_data->num_lvts_ctrl;
+ struct lvts_ctrl *lvts_ctrl;
+ int i, ret;
+
+ /*
+ * Create the calibration bytes stream from efuse data
+ */
+ ret = lvts_calibration_read(dev, lvts_td, lvts_data);
+ if (ret)
+ return ret;
+
+ /*
+ * The golden temp information is contained in the first chunk
+ * of efuse data.
+ */
+ ret = lvts_golden_temp_init(dev, (u32 *)lvts_td->calib);
+ if (ret)
+ return ret;
+
+ lvts_ctrl = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!lvts_ctrl)
+ return -ENOMEM;
+
+ for (i = 0; i < lvts_data->num_lvts_ctrl; i++) {
+
+ lvts_ctrl[i].base = lvts_td->base + lvts_data->lvts_ctrl[i].offset;
+
+ ret = lvts_sensor_init(dev, &lvts_ctrl[i],
+ &lvts_data->lvts_ctrl[i]);
+ if (ret)
+ return ret;
+
+ ret = lvts_calibration_init(dev, &lvts_ctrl[i],
+ &lvts_data->lvts_ctrl[i],
+ lvts_td->calib);
+ if (ret)
+ return ret;
+
+ /*
+ * The mode the ctrl will use to read the temperature
+ * (filtered or immediate)
+ */
+ lvts_ctrl[i].mode = lvts_data->lvts_ctrl[i].mode;
+
+ /*
+ * The temperature to raw temperature must be done
+ * after initializing the calibration.
+ */
+ lvts_ctrl[i].hw_tshut_raw_temp =
+ lvts_temp_to_raw(lvts_data->lvts_ctrl[i].hw_tshut_temp);
+ }
+
+ /*
+ * We no longer need the efuse bytes stream, let's free it
+ */
+ devm_kfree(dev, lvts_td->calib);
+
+ lvts_td->lvts_ctrl = lvts_ctrl;
+ lvts_td->num_lvts_ctrl = lvts_data->num_lvts_ctrl;
+
+ return 0;
+}
+
+/*
+ * At this point the configuration register is the only place in the
+ * driver where we write multiple values. Per hardware constraint,
+ * each write in the configuration register must be separated by a
+ * delay of 2 us.
+ */
+static void lvts_write_config(struct lvts_ctrl *lvts_ctrl, u32 *cmds, int nr_cmds)
+{
+ int i;
+
+ /*
+ * Configuration register
+ */
+ for (i = 0; i < nr_cmds; i++) {
+ writel(cmds[i], LVTS_CONFIG(lvts_ctrl->base));
+ usleep_range(2, 4);
+ }
+}
+
+static int lvts_irq_init(struct lvts_ctrl *lvts_ctrl)
+{
+ /*
+ * LVTS_PROTCTL : Thermal Protection Sensor Selection
+ *
+ * Bits:
+ *
+ * 19-18 : Sensor to base the protection on
+ * 17-16 : Strategy:
+ * 00 : Average of 4 sensors
+ * 01 : Max of 4 sensors
+ * 10 : Selected sensor with bits 19-18
+ * 11 : Reserved
+ */
+ writel(BIT(16), LVTS_PROTCTL(lvts_ctrl->base));
+
+ /*
+ * LVTS_PROTTA : Stage 1 temperature threshold
+ * LVTS_PROTTB : Stage 2 temperature threshold
+ * LVTS_PROTTC : Stage 3 temperature threshold
+ *
+ * Bits:
+ *
+ * 14-0: Raw temperature threshold
+ *
+ * writel(0x0, LVTS_PROTTA(lvts_ctrl->base));
+ * writel(0x0, LVTS_PROTTB(lvts_ctrl->base));
+ */
+ writel(lvts_ctrl->hw_tshut_raw_temp, LVTS_PROTTC(lvts_ctrl->base));
+
+ /*
+ * LVTS_MONINT : Interrupt configuration register
+ *
+ * The LVTS_MONINT register layout is the same as the LVTS_MONINTSTS
+ * register, except we set the bits to enable the interrupt.
+ */
+ writel(LVTS_MONINT_CONF, LVTS_MONINT(lvts_ctrl->base));
+
+ return 0;
+}
+
+static int lvts_domain_reset(struct device *dev, struct reset_control *reset)
+{
+ int ret;
+
+ ret = reset_control_assert(reset);
+ if (ret)
+ return ret;
+
+ return reset_control_deassert(reset);
+}
+
+/*
+ * Enable or disable the clocks of a specified thermal controller
+ */
+static int lvts_ctrl_set_enable(struct lvts_ctrl *lvts_ctrl, int enable)
+{
+ /*
+ * LVTS_CLKEN : Internal LVTS clock
+ *
+ * Bits:
+ *
+ * 0 : enable / disable clock
+ */
+ writel(enable, LVTS_CLKEN(lvts_ctrl->base));
+
+ return 0;
+}
+
+static int lvts_ctrl_connect(struct device *dev, struct lvts_ctrl *lvts_ctrl)
+{
+ u32 id, cmds[] = { 0xC103FFFF, 0xC502FF55 };
+
+ lvts_write_config(lvts_ctrl, cmds, ARRAY_SIZE(cmds));
+
+ /*
+ * LVTS_ID : Get ID and status of the thermal controller
+ *
+ * Bits:
+ *
+ * 0-5 : thermal controller id
+ * 7 : thermal controller connection is valid
+ */
+ id = readl(LVTS_ID(lvts_ctrl->base));
+ if (!(id & BIT(7)))
+ return -EIO;
+
+ return 0;
+}
+
+static int lvts_ctrl_initialize(struct device *dev, struct lvts_ctrl *lvts_ctrl)
+{
+ /*
+ * Write device mask: 0xC1030000
+ */
+ u32 cmds[] = {
+ 0xC1030E01, 0xC1030CFC, 0xC1030A8C, 0xC103098D, 0xC10308F1,
+ 0xC10307A6, 0xC10306B8, 0xC1030500, 0xC1030420, 0xC1030300,
+ 0xC1030030, 0xC10300F6, 0xC1030050, 0xC1030060, 0xC10300AC,
+ 0xC10300FC, 0xC103009D, 0xC10300F1, 0xC10300E1
+ };
+
+ lvts_write_config(lvts_ctrl, cmds, ARRAY_SIZE(cmds));
+
+ return 0;
+}
+
+static int lvts_ctrl_calibrate(struct device *dev, struct lvts_ctrl *lvts_ctrl)
+{
+ int i;
+ void __iomem *lvts_edata[] = {
+ LVTS_EDATA00(lvts_ctrl->base),
+ LVTS_EDATA01(lvts_ctrl->base),
+ LVTS_EDATA02(lvts_ctrl->base),
+ LVTS_EDATA03(lvts_ctrl->base)
+ };
+
+ /*
+ * LVTS_EDATA0X : Efuse calibration reference value for sensor X
+ *
+ * Bits:
+ *
+ * 20-0 : Efuse value for normalization data
+ */
+ for (i = 0; i < LVTS_SENSOR_MAX; i++)
+ writel(lvts_ctrl->calibration[i], lvts_edata[i]);
+
+ return 0;
+}
+
+static int lvts_ctrl_configure(struct device *dev, struct lvts_ctrl *lvts_ctrl)
+{
+ u32 value;
+
+ /*
+ * LVTS_TSSEL : Sensing point index numbering
+ *
+ * Bits:
+ *
+ * 31-24: ADC Sense 3
+ * 23-16: ADC Sense 2
+ * 15-8 : ADC Sense 1
+ * 7-0 : ADC Sense 0
+ */
+ value = LVTS_TSSEL_CONF;
+ writel(value, LVTS_TSSEL(lvts_ctrl->base));
+
+ /*
+ * LVTS_CALSCALE : ADC voltage round
+ */
+ value = 0x300;
+ value = LVTS_CALSCALE_CONF;
+
+ /*
+ * LVTS_MSRCTL0 : Sensor filtering strategy
+ *
+ * Filters:
+ *
+ * 000 : One sample
+ * 001 : Avg 2 samples
+ * 010 : 4 samples, drop min and max, avg 2 samples
+ * 011 : 6 samples, drop min and max, avg 4 samples
+ * 100 : 10 samples, drop min and max, avg 8 samples
+ * 101 : 18 samples, drop min and max, avg 16 samples
+ *
+ * Bits:
+ *
+ * 0-2 : Sensor0 filter
+ * 3-5 : Sensor1 filter
+ * 6-8 : Sensor2 filter
+ * 9-11 : Sensor3 filter
+ */
+ value = LVTS_HW_FILTER << 9 | LVTS_HW_FILTER << 6 |
+ LVTS_HW_FILTER << 3 | LVTS_HW_FILTER;
+ writel(value, LVTS_MSRCTL0(lvts_ctrl->base));
+
+ /*
+ * LVTS_MSRCTL1 : Measurement control
+ *
+ * Bits:
+ *
+ * 9: Ignore MSRCTL0 config and do immediate measurement on sensor3
+ * 6: Ignore MSRCTL0 config and do immediate measurement on sensor2
+ * 5: Ignore MSRCTL0 config and do immediate measurement on sensor1
+ * 4: Ignore MSRCTL0 config and do immediate measurement on sensor0
+ *
+ * That configuration will ignore the filtering and the delays
+ * introduced below in MONCTL1 and MONCTL2
+ */
+ if (lvts_ctrl->mode == LVTS_MSR_IMMEDIATE_MODE) {
+ value = BIT(9) | BIT(6) | BIT(5) | BIT(4);
+ writel(value, LVTS_MSRCTL1(lvts_ctrl->base));
+ }
+
+ /*
+ * LVTS_MONCTL1 : Period unit and group interval configuration
+ *
+ * The clock source of LVTS thermal controller is 26MHz.
+ *
+ * The period unit is a time base for all the interval delays
+ * specified in the registers. By default we use 12. The time
+ * conversion is done by multiplying by 256 and 1/26.10^6
+ *
+ * An interval delay multiplied by the period unit gives the
+ * duration in seconds.
+ *
+ * - Filter interval delay is a delay between two samples of
+ * the same sensor.
+ *
+ * - Sensor interval delay is a delay between two samples of
+ * different sensors.
+ *
+ * - Group interval delay is a delay between different rounds.
+ *
+ * For example:
+ * If Period unit = C, filter delay = 1, sensor delay = 2, group delay = 1,
+ * and two sensors, TS1 and TS2, are in a LVTS thermal controller
+ * and then
+ * Period unit time = C * 1/26M * 256 = 12 * 38.46ns * 256 = 118.149us
+ * Filter interval delay = 1 * Period unit = 118.149us
+ * Sensor interval delay = 2 * Period unit = 236.298us
+ * Group interval delay = 1 * Period unit = 118.149us
+ *
+ * TS1 TS1 ... TS1 TS2 TS2 ... TS2 TS1...
+ * <--> Filter interval delay
+ * <--> Sensor interval delay
+ * <--> Group interval delay
+ * Bits:
+ * 29 - 20 : Group interval
+ * 16 - 13 : Send a single interrupt when crossing the hot threshold (1)
+ * or an interrupt everytime the hot threshold is crossed (0)
+ * 9 - 0 : Period unit
+ *
+ */
+ value = LVTS_GROUP_INTERVAL << 20 | LVTS_PERIOD_UNIT;
+ writel(value, LVTS_MONCTL1(lvts_ctrl->base));
+
+ /*
+ * LVTS_MONCTL2 : Filtering and sensor interval
+ *
+ * Bits:
+ *
+ * 25-16 : Interval unit in PERIOD_UNIT between sample on
+ * the same sensor, filter interval
+ * 9-0 : Interval unit in PERIOD_UNIT between each sensor
+ *
+ */
+ value = LVTS_FILTER_INTERVAL << 16 | LVTS_SENSOR_INTERVAL;
+ writel(value, LVTS_MONCTL2(lvts_ctrl->base));
+
+ return lvts_irq_init(lvts_ctrl);
+}
+
+static int lvts_ctrl_start(struct device *dev, struct lvts_ctrl *lvts_ctrl)
+{
+ struct lvts_sensor *lvts_sensors = lvts_ctrl->sensors;
+ struct thermal_zone_device *tz;
+ u32 sensor_map = 0;
+ int i;
+
+ for (i = 0; i < lvts_ctrl->num_lvts_sensor; i++) {
+
+ int dt_id = lvts_sensors[i].dt_id;
+
+ tz = devm_thermal_of_zone_register(dev, dt_id, &lvts_sensors[i],
+ &lvts_ops);
+ if (IS_ERR(tz)) {
+ /*
+ * This thermal zone is not described in the
+ * device tree. It is not an error from the
+ * thermal OF code POV, we just continue.
+ */
+ if (PTR_ERR(tz) == -ENODEV)
+ continue;
+
+ return PTR_ERR(tz);
+ }
+
+ /*
+ * The thermal zone pointer will be needed in the
+ * interrupt handler, we store it in the sensor
+ * structure. The thermal domain structure will be
+ * passed to the interrupt handler private data as the
+ * interrupt is shared for all the controller
+ * belonging to the thermal domain.
+ */
+ lvts_sensors[i].tz = tz;
+
+ /*
+ * This sensor was correctly associated with a thermal
+ * zone, let's set the corresponding bit in the sensor
+ * map, so we can enable the temperature monitoring in
+ * the hardware thermal controller.
+ */
+ sensor_map |= BIT(i);
+ }
+
+ /*
+ * Bits:
+ * 9: Single point access flow
+ * 0-3: Enable sensing point 0-3
+ *
+ * The initialization of the thermal zones give us
+ * which sensor point to enable. If any thermal zone
+ * was not described in the device tree, it won't be
+ * enabled here in the sensor map.
+ */
+ writel(sensor_map | BIT(9), LVTS_MONCTL0(lvts_ctrl->base));
+
+ return 0;
+}
+
+static int lvts_domain_init(struct device *dev, struct lvts_domain *lvts_td,
+ const struct lvts_data *lvts_data)
+{
+ struct lvts_ctrl *lvts_ctrl;
+ int i, ret;
+
+ ret = lvts_ctrl_init(dev, lvts_td, lvts_data);
+ if (ret)
+ return ret;
+
+ ret = lvts_domain_reset(dev, lvts_td->reset);
+ if (ret) {
+ dev_dbg(dev, "Failed to reset domain");
+ return ret;
+ }
+
+ for (i = 0; i < lvts_td->num_lvts_ctrl; i++) {
+
+ lvts_ctrl = &lvts_td->lvts_ctrl[i];
+
+ /*
+ * Initialization steps:
+ *
+ * - Enable the clock
+ * - Connect to the LVTS
+ * - Initialize the LVTS
+ * - Prepare the calibration data
+ * - Select monitored sensors
+ * [ Configure sampling ]
+ * [ Configure the interrupt ]
+ * - Start measurement
+ */
+ ret = lvts_ctrl_set_enable(lvts_ctrl, true);
+ if (ret) {
+ dev_dbg(dev, "Failed to enable LVTS clock");
+ return ret;
+ }
+
+ ret = lvts_ctrl_connect(dev, lvts_ctrl);
+ if (ret) {
+ dev_dbg(dev, "Failed to connect to LVTS controller");
+ return ret;
+ }
+
+ ret = lvts_ctrl_initialize(dev, lvts_ctrl);
+ if (ret) {
+ dev_dbg(dev, "Failed to initialize controller");
+ return ret;
+ }
+
+ ret = lvts_ctrl_calibrate(dev, lvts_ctrl);
+ if (ret) {
+ dev_dbg(dev, "Failed to calibrate controller");
+ return ret;
+ }
+
+ ret = lvts_ctrl_configure(dev, lvts_ctrl);
+ if (ret) {
+ dev_dbg(dev, "Failed to configure controller");
+ return ret;
+ }
+
+ ret = lvts_ctrl_start(dev, lvts_ctrl);
+ if (ret) {
+ dev_dbg(dev, "Failed to start controller");
+ return ret;
+ }
+ }
+
+ return lvts_debugfs_init(dev, lvts_td);
+}
+
+static int lvts_probe(struct platform_device *pdev)
+{
+ const struct lvts_data *lvts_data;
+ struct lvts_domain *lvts_td;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq, ret;
+
+ lvts_td = devm_kzalloc(dev, sizeof(*lvts_td), GFP_KERNEL);
+ if (!lvts_td)
+ return -ENOMEM;
+
+ lvts_data = of_device_get_match_data(dev);
+
+ lvts_td->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(lvts_td->clk))
+ return dev_err_probe(dev, PTR_ERR(lvts_td->clk), "Failed to retrieve clock\n");
+
+ res = platform_get_mem_or_io(pdev, 0);
+ if (!res)
+ return dev_err_probe(dev, (-ENXIO), "No IO resource\n");
+
+ lvts_td->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(lvts_td->base))
+ return dev_err_probe(dev, PTR_ERR(lvts_td->base), "Failed to map io resource\n");
+
+ lvts_td->reset = devm_reset_control_get_by_index(dev, 0);
+ if (IS_ERR(lvts_td->reset))
+ return dev_err_probe(dev, PTR_ERR(lvts_td->reset), "Failed to get reset control\n");
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return dev_err_probe(dev, irq, "No irq resource\n");
+
+ ret = lvts_domain_init(dev, lvts_td, lvts_data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize the lvts domain\n");
+
+ /*
+ * At this point the LVTS is initialized and enabled. We can
+ * safely enable the interrupt.
+ */
+ ret = devm_request_threaded_irq(dev, irq, NULL, lvts_irq_handler,
+ IRQF_ONESHOT, dev_name(dev), lvts_td);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to request interrupt\n");
+
+ platform_set_drvdata(pdev, lvts_td);
+
+ return 0;
+}
+
+static int lvts_remove(struct platform_device *pdev)
+{
+ struct lvts_domain *lvts_td;
+ int i;
+
+ lvts_td = platform_get_drvdata(pdev);
+
+ for (i = 0; i < lvts_td->num_lvts_ctrl; i++)
+ lvts_ctrl_set_enable(&lvts_td->lvts_ctrl[i], false);
+
+ lvts_debugfs_exit(lvts_td);
+
+ return 0;
+}
+
+static const struct lvts_ctrl_data mt8195_lvts_data_ctrl[] = {
+ {
+ .cal_offset = { 0x04, 0x07 },
+ .lvts_sensor = {
+ { .dt_id = MT8195_MCU_BIG_CPU0 },
+ { .dt_id = MT8195_MCU_BIG_CPU1 }
+ },
+ .num_lvts_sensor = 2,
+ .offset = 0x0,
+ .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
+ },
+ {
+ .cal_offset = { 0x0d, 0x10 },
+ .lvts_sensor = {
+ { .dt_id = MT8195_MCU_BIG_CPU2 },
+ { .dt_id = MT8195_MCU_BIG_CPU3 }
+ },
+ .num_lvts_sensor = 2,
+ .offset = 0x100,
+ .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
+ },
+ {
+ .cal_offset = { 0x16, 0x19, 0x1c, 0x1f },
+ .lvts_sensor = {
+ { .dt_id = MT8195_MCU_LITTLE_CPU0 },
+ { .dt_id = MT8195_MCU_LITTLE_CPU1 },
+ { .dt_id = MT8195_MCU_LITTLE_CPU2 },
+ { .dt_id = MT8195_MCU_LITTLE_CPU3 }
+ },
+ .num_lvts_sensor = 4,
+ .offset = 0x200,
+ .hw_tshut_temp = LVTS_HW_SHUTDOWN_MT8195,
+ }
+};
+
+static const struct lvts_data mt8195_lvts_mcu_data = {
+ .lvts_ctrl = mt8195_lvts_data_ctrl,
+ .num_lvts_ctrl = ARRAY_SIZE(mt8195_lvts_data_ctrl),
+};
+
+static const struct of_device_id lvts_of_match[] = {
+ { .compatible = "mediatek,mt8195-lvts-mcu", .data = &mt8195_lvts_mcu_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, lvts_of_match);
+
+static struct platform_driver lvts_driver = {
+ .probe = lvts_probe,
+ .remove = lvts_remove,
+ .driver = {
+ .name = "mtk-lvts-thermal",
+ .of_match_table = lvts_of_match,
+ },
+};
+module_platform_driver(lvts_driver);
+
+MODULE_AUTHOR("Balsam CHIHI <bchihi@baylibre.com>");
+MODULE_DESCRIPTION("MediaTek LVTS Thermal Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index ff47fc9ac9c5..31164ade2dd1 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -18,7 +18,8 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/thermal.h>
-#include <asm-generic/unaligned.h>
+
+#include <asm/unaligned.h>
#include "../thermal_hwmon.h"
diff --git a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
index ad84978109e6..101c75d0e13f 100644
--- a/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom/qcom-spmi-temp-alarm.c
@@ -15,7 +15,6 @@
#include <linux/regmap.h>
#include <linux/thermal.h>
-#include "../thermal_core.h"
#include "../thermal_hwmon.h"
#define QPNP_TM_REG_DIG_MAJOR 0x01
@@ -264,17 +263,17 @@ skip:
return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
}
-static int qpnp_tm_set_trip_temp(struct thermal_zone_device *tz, int trip, int temp)
+static int qpnp_tm_set_trip_temp(struct thermal_zone_device *tz, int trip_id, int temp)
{
struct qpnp_tm_chip *chip = tz->devdata;
- const struct thermal_trip *trip_points;
+ struct thermal_trip trip;
int ret;
- trip_points = of_thermal_get_trip_points(chip->tz_dev);
- if (!trip_points)
- return -EINVAL;
+ ret = __thermal_zone_get_trip(chip->tz_dev, trip_id, &trip);
+ if (ret)
+ return ret;
- if (trip_points[trip].type != THERMAL_TRIP_CRITICAL)
+ if (trip.type != THERMAL_TRIP_CRITICAL)
return 0;
mutex_lock(&chip->lock);
@@ -300,22 +299,17 @@ static irqreturn_t qpnp_tm_isr(int irq, void *data)
static int qpnp_tm_get_critical_trip_temp(struct qpnp_tm_chip *chip)
{
- int ntrips;
- const struct thermal_trip *trips;
- int i;
-
- ntrips = of_thermal_get_ntrips(chip->tz_dev);
- if (ntrips <= 0)
- return THERMAL_TEMP_INVALID;
-
- trips = of_thermal_get_trip_points(chip->tz_dev);
- if (!trips)
- return THERMAL_TEMP_INVALID;
-
- for (i = 0; i < ntrips; i++) {
- if (of_thermal_is_trip_valid(chip->tz_dev, i) &&
- trips[i].type == THERMAL_TRIP_CRITICAL)
- return trips[i].temperature;
+ struct thermal_trip trip;
+ int i, ret;
+
+ for (i = 0; i < thermal_zone_get_num_trips(chip->tz_dev); i++) {
+
+ ret = thermal_zone_get_trip(chip->tz_dev, i, &trip);
+ if (ret)
+ continue;
+
+ if (trip.type == THERMAL_TRIP_CRITICAL)
+ return trip.temperature;
}
return THERMAL_TEMP_INVALID;
@@ -353,7 +347,12 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip)
if (stage)
chip->temp = qpnp_tm_decode_temp(chip, stage);
+ mutex_unlock(&chip->lock);
+
crit_temp = qpnp_tm_get_critical_trip_temp(chip);
+
+ mutex_lock(&chip->lock);
+
ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp);
if (ret < 0)
goto out;
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index 04d012e4f728..e89c6f39a3ae 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -3,6 +3,8 @@
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
+#include <linux/bitfield.h>
+#include <linux/nvmem-consumer.h>
#include <linux/platform_device.h>
#include "tsens.h"
@@ -15,220 +17,117 @@
#define TM_Sn_STATUS_OFF 0x0030
#define TM_TRDY_OFF 0x005c
-/* eeprom layout data for 8916 */
-#define MSM8916_BASE0_MASK 0x0000007f
-#define MSM8916_BASE1_MASK 0xfe000000
-#define MSM8916_BASE0_SHIFT 0
-#define MSM8916_BASE1_SHIFT 25
-
-#define MSM8916_S0_P1_MASK 0x00000f80
-#define MSM8916_S1_P1_MASK 0x003e0000
-#define MSM8916_S2_P1_MASK 0xf8000000
-#define MSM8916_S3_P1_MASK 0x000003e0
-#define MSM8916_S4_P1_MASK 0x000f8000
-
-#define MSM8916_S0_P2_MASK 0x0001f000
-#define MSM8916_S1_P2_MASK 0x07c00000
-#define MSM8916_S2_P2_MASK 0x0000001f
-#define MSM8916_S3_P2_MASK 0x00007c00
-#define MSM8916_S4_P2_MASK 0x01f00000
-
-#define MSM8916_S0_P1_SHIFT 7
-#define MSM8916_S1_P1_SHIFT 17
-#define MSM8916_S2_P1_SHIFT 27
-#define MSM8916_S3_P1_SHIFT 5
-#define MSM8916_S4_P1_SHIFT 15
-
-#define MSM8916_S0_P2_SHIFT 12
-#define MSM8916_S1_P2_SHIFT 22
-#define MSM8916_S2_P2_SHIFT 0
-#define MSM8916_S3_P2_SHIFT 10
-#define MSM8916_S4_P2_SHIFT 20
-
-#define MSM8916_CAL_SEL_MASK 0xe0000000
-#define MSM8916_CAL_SEL_SHIFT 29
-
-/* eeprom layout data for 8939 */
-#define MSM8939_BASE0_MASK 0x000000ff
-#define MSM8939_BASE1_MASK 0xff000000
-#define MSM8939_BASE0_SHIFT 0
-#define MSM8939_BASE1_SHIFT 24
-
-#define MSM8939_S0_P1_MASK 0x000001f8
-#define MSM8939_S1_P1_MASK 0x001f8000
-#define MSM8939_S2_P1_MASK_0_4 0xf8000000
-#define MSM8939_S2_P1_MASK_5 0x00000001
-#define MSM8939_S3_P1_MASK 0x00001f80
-#define MSM8939_S4_P1_MASK 0x01f80000
-#define MSM8939_S5_P1_MASK 0x00003f00
-#define MSM8939_S6_P1_MASK 0x03f00000
-#define MSM8939_S7_P1_MASK 0x0000003f
-#define MSM8939_S8_P1_MASK 0x0003f000
-#define MSM8939_S9_P1_MASK 0x07e00000
-
-#define MSM8939_S0_P2_MASK 0x00007e00
-#define MSM8939_S1_P2_MASK 0x07e00000
-#define MSM8939_S2_P2_MASK 0x0000007e
-#define MSM8939_S3_P2_MASK 0x0007e000
-#define MSM8939_S4_P2_MASK 0x7e000000
-#define MSM8939_S5_P2_MASK 0x000fc000
-#define MSM8939_S6_P2_MASK 0xfc000000
-#define MSM8939_S7_P2_MASK 0x00000fc0
-#define MSM8939_S8_P2_MASK 0x00fc0000
-#define MSM8939_S9_P2_MASK_0_4 0xf8000000
-#define MSM8939_S9_P2_MASK_5 0x00002000
-
-#define MSM8939_S0_P1_SHIFT 3
-#define MSM8939_S1_P1_SHIFT 15
-#define MSM8939_S2_P1_SHIFT_0_4 27
-#define MSM8939_S2_P1_SHIFT_5 0
-#define MSM8939_S3_P1_SHIFT 7
-#define MSM8939_S4_P1_SHIFT 19
-#define MSM8939_S5_P1_SHIFT 8
-#define MSM8939_S6_P1_SHIFT 20
-#define MSM8939_S7_P1_SHIFT 0
-#define MSM8939_S8_P1_SHIFT 12
-#define MSM8939_S9_P1_SHIFT 21
-
-#define MSM8939_S0_P2_SHIFT 9
-#define MSM8939_S1_P2_SHIFT 21
-#define MSM8939_S2_P2_SHIFT 1
-#define MSM8939_S3_P2_SHIFT 13
-#define MSM8939_S4_P2_SHIFT 25
-#define MSM8939_S5_P2_SHIFT 14
-#define MSM8939_S6_P2_SHIFT 26
-#define MSM8939_S7_P2_SHIFT 6
-#define MSM8939_S8_P2_SHIFT 18
-#define MSM8939_S9_P2_SHIFT_0_4 27
-#define MSM8939_S9_P2_SHIFT_5 13
-
-#define MSM8939_CAL_SEL_MASK 0x7
-#define MSM8939_CAL_SEL_SHIFT 0
-
-/* eeprom layout data for 8974 */
-#define BASE1_MASK 0xff
-#define S0_P1_MASK 0x3f00
-#define S1_P1_MASK 0xfc000
-#define S2_P1_MASK 0x3f00000
-#define S3_P1_MASK 0xfc000000
-#define S4_P1_MASK 0x3f
-#define S5_P1_MASK 0xfc0
-#define S6_P1_MASK 0x3f000
-#define S7_P1_MASK 0xfc0000
-#define S8_P1_MASK 0x3f000000
-#define S8_P1_MASK_BKP 0x3f
-#define S9_P1_MASK 0x3f
-#define S9_P1_MASK_BKP 0xfc0
-#define S10_P1_MASK 0xfc0
-#define S10_P1_MASK_BKP 0x3f000
-#define CAL_SEL_0_1 0xc0000000
-#define CAL_SEL_2 0x40000000
-#define CAL_SEL_SHIFT 30
-#define CAL_SEL_SHIFT_2 28
-
-#define S0_P1_SHIFT 8
-#define S1_P1_SHIFT 14
-#define S2_P1_SHIFT 20
-#define S3_P1_SHIFT 26
-#define S5_P1_SHIFT 6
-#define S6_P1_SHIFT 12
-#define S7_P1_SHIFT 18
-#define S8_P1_SHIFT 24
-#define S9_P1_BKP_SHIFT 6
-#define S10_P1_SHIFT 6
-#define S10_P1_BKP_SHIFT 12
-
-#define BASE2_SHIFT 12
-#define BASE2_BKP_SHIFT 18
-#define S0_P2_SHIFT 20
-#define S0_P2_BKP_SHIFT 26
-#define S1_P2_SHIFT 26
-#define S2_P2_BKP_SHIFT 6
-#define S3_P2_SHIFT 6
-#define S3_P2_BKP_SHIFT 12
-#define S4_P2_SHIFT 12
-#define S4_P2_BKP_SHIFT 18
-#define S5_P2_SHIFT 18
-#define S5_P2_BKP_SHIFT 24
-#define S6_P2_SHIFT 24
-#define S7_P2_BKP_SHIFT 6
-#define S8_P2_SHIFT 6
-#define S8_P2_BKP_SHIFT 12
-#define S9_P2_SHIFT 12
-#define S9_P2_BKP_SHIFT 18
-#define S10_P2_SHIFT 18
-#define S10_P2_BKP_SHIFT 24
-
-#define BASE2_MASK 0xff000
-#define BASE2_BKP_MASK 0xfc0000
-#define S0_P2_MASK 0x3f00000
-#define S0_P2_BKP_MASK 0xfc000000
-#define S1_P2_MASK 0xfc000000
-#define S1_P2_BKP_MASK 0x3f
-#define S2_P2_MASK 0x3f
-#define S2_P2_BKP_MASK 0xfc0
-#define S3_P2_MASK 0xfc0
-#define S3_P2_BKP_MASK 0x3f000
-#define S4_P2_MASK 0x3f000
-#define S4_P2_BKP_MASK 0xfc0000
-#define S5_P2_MASK 0xfc0000
-#define S5_P2_BKP_MASK 0x3f000000
-#define S6_P2_MASK 0x3f000000
-#define S6_P2_BKP_MASK 0x3f
-#define S7_P2_MASK 0x3f
-#define S7_P2_BKP_MASK 0xfc0
-#define S8_P2_MASK 0xfc0
-#define S8_P2_BKP_MASK 0x3f000
-#define S9_P2_MASK 0x3f000
-#define S9_P2_BKP_MASK 0xfc0000
-#define S10_P2_MASK 0xfc0000
-#define S10_P2_BKP_MASK 0x3f000000
-
+/* extra data for 8974 */
#define BKP_SEL 0x3
#define BKP_REDUN_SEL 0xe0000000
-#define BKP_REDUN_SHIFT 29
#define BIT_APPEND 0x3
-/* eeprom layout data for mdm9607 */
-#define MDM9607_BASE0_MASK 0x000000ff
-#define MDM9607_BASE1_MASK 0x000ff000
-#define MDM9607_BASE0_SHIFT 0
-#define MDM9607_BASE1_SHIFT 12
-
-#define MDM9607_S0_P1_MASK 0x00003f00
-#define MDM9607_S1_P1_MASK 0x03f00000
-#define MDM9607_S2_P1_MASK 0x0000003f
-#define MDM9607_S3_P1_MASK 0x0003f000
-#define MDM9607_S4_P1_MASK 0x0000003f
-
-#define MDM9607_S0_P2_MASK 0x000fc000
-#define MDM9607_S1_P2_MASK 0xfc000000
-#define MDM9607_S2_P2_MASK 0x00000fc0
-#define MDM9607_S3_P2_MASK 0x00fc0000
-#define MDM9607_S4_P2_MASK 0x00000fc0
-
-#define MDM9607_S0_P1_SHIFT 8
-#define MDM9607_S1_P1_SHIFT 20
-#define MDM9607_S2_P1_SHIFT 0
-#define MDM9607_S3_P1_SHIFT 12
-#define MDM9607_S4_P1_SHIFT 0
-
-#define MDM9607_S0_P2_SHIFT 14
-#define MDM9607_S1_P2_SHIFT 26
-#define MDM9607_S2_P2_SHIFT 6
-#define MDM9607_S3_P2_SHIFT 18
-#define MDM9607_S4_P2_SHIFT 6
-
-#define MDM9607_CAL_SEL_MASK 0x00700000
-#define MDM9607_CAL_SEL_SHIFT 20
+struct tsens_legacy_calibration_format tsens_8916_nvmem = {
+ .base_len = 7,
+ .base_shift = 3,
+ .sp_len = 5,
+ .mode = { 0, 29, 1 },
+ .invalid = { 0, 31, 1 },
+ .base = { { 0, 0 }, { 1, 25 } },
+ .sp = {
+ { { 0, 7 }, { 0, 12 } },
+ { { 0, 17 }, { 0, 22 } },
+ { { 0, 27 }, { 1, 0 } },
+ { { 1, 5 }, { 1, 10 } },
+ { { 1, 15 }, { 1, 20 } },
+ },
+};
+
+struct tsens_legacy_calibration_format tsens_8939_nvmem = {
+ .base_len = 8,
+ .base_shift = 2,
+ .sp_len = 6,
+ .mode = { 12, 0 },
+ .invalid = { 12, 2 },
+ .base = { { 0, 0 }, { 1, 24 } },
+ .sp = {
+ { { 12, 3 }, { 12, 9 } },
+ { { 12, 15 }, { 12, 21 } },
+ { { 12, 27 }, { 13, 1 } },
+ { { 13, 7 }, { 13, 13 } },
+ { { 13, 19 }, { 13, 25 } },
+ { { 0, 8 }, { 0, 14 } },
+ { { 0, 20 }, { 0, 26 } },
+ { { 1, 0 }, { 1, 6 } },
+ { { 1, 12 }, { 1, 18 } },
+ },
+};
+
+struct tsens_legacy_calibration_format tsens_8974_nvmem = {
+ .base_len = 8,
+ .base_shift = 2,
+ .sp_len = 6,
+ .mode = { 1, 30 },
+ .invalid = { 3, 30 },
+ .base = { { 0, 0 }, { 2, 12 } },
+ .sp = {
+ { { 0, 8 }, { 2, 20 } },
+ { { 0, 14 }, { 2, 26 } },
+ { { 0, 20 }, { 3, 0 } },
+ { { 0, 26 }, { 3, 6 } },
+ { { 1, 0 }, { 3, 12 } },
+ { { 1, 6 }, { 3, 18 } },
+ { { 1, 12 }, { 3, 24 } },
+ { { 1, 18 }, { 4, 0 } },
+ { { 1, 24 }, { 4, 6 } },
+ { { 2, 0 }, { 4, 12 } },
+ { { 2, 6 }, { 4, 18 } },
+ },
+};
+
+struct tsens_legacy_calibration_format tsens_8974_backup_nvmem = {
+ .base_len = 8,
+ .base_shift = 2,
+ .sp_len = 6,
+ .mode = { 4, 30, 1 },
+ .invalid = { 5, 30, 1 },
+ .base = { { 0, 0 }, { 2, 18 } },
+ .sp = {
+ { { 0, 8 }, { 2, 26 } },
+ { { 0, 14 }, { 3, 0 } },
+ { { 0, 20 }, { 3, 6 } },
+ { { 0, 26 }, { 3, 12 } },
+ { { 1, 0 }, { 3, 18 } },
+ { { 1, 6 }, { 3, 24, 1 } },
+ { { 1, 12 }, { 4, 0, 1 } },
+ { { 1, 18 }, { 4, 6, 1 } },
+ { { 2, 0 }, { 4, 12, 1 } },
+ { { 2, 6 }, { 4, 18, 1 } },
+ { { 2, 12 }, { 4, 24, 1 } },
+ },
+};
+
+struct tsens_legacy_calibration_format tsens_9607_nvmem = {
+ .base_len = 8,
+ .base_shift = 2,
+ .sp_len = 6,
+ .mode = { 2, 20 },
+ .invalid = { 2, 22 },
+ .base = { { 0, 0 }, { 2, 12 } },
+ .sp = {
+ { { 0, 8 }, { 0, 14 } },
+ { { 0, 20 }, { 0, 26 } },
+ { { 1, 0 }, { 1, 6 } },
+ { { 1, 12 }, { 1, 18 } },
+ { { 2, 0 }, { 2, 6 } },
+ },
+};
static int calibrate_8916(struct tsens_priv *priv)
{
- int base0 = 0, base1 = 0, i;
u32 p1[5], p2[5];
- int mode = 0;
u32 *qfprom_cdata, *qfprom_csel;
+ int mode, ret;
+
+ ret = tsens_calibrate_nvmem(priv, 3);
+ if (!ret)
+ return 0;
qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
if (IS_ERR(qfprom_cdata))
@@ -240,37 +139,9 @@ static int calibrate_8916(struct tsens_priv *priv)
return PTR_ERR(qfprom_csel);
}
- mode = (qfprom_csel[0] & MSM8916_CAL_SEL_MASK) >> MSM8916_CAL_SEL_SHIFT;
- dev_dbg(priv->dev, "calibration mode is %d\n", mode);
-
- switch (mode) {
- case TWO_PT_CALIB:
- base1 = (qfprom_cdata[1] & MSM8916_BASE1_MASK) >> MSM8916_BASE1_SHIFT;
- p2[0] = (qfprom_cdata[0] & MSM8916_S0_P2_MASK) >> MSM8916_S0_P2_SHIFT;
- p2[1] = (qfprom_cdata[0] & MSM8916_S1_P2_MASK) >> MSM8916_S1_P2_SHIFT;
- p2[2] = (qfprom_cdata[1] & MSM8916_S2_P2_MASK) >> MSM8916_S2_P2_SHIFT;
- p2[3] = (qfprom_cdata[1] & MSM8916_S3_P2_MASK) >> MSM8916_S3_P2_SHIFT;
- p2[4] = (qfprom_cdata[1] & MSM8916_S4_P2_MASK) >> MSM8916_S4_P2_SHIFT;
- for (i = 0; i < priv->num_sensors; i++)
- p2[i] = ((base1 + p2[i]) << 3);
- fallthrough;
- case ONE_PT_CALIB2:
- base0 = (qfprom_cdata[0] & MSM8916_BASE0_MASK);
- p1[0] = (qfprom_cdata[0] & MSM8916_S0_P1_MASK) >> MSM8916_S0_P1_SHIFT;
- p1[1] = (qfprom_cdata[0] & MSM8916_S1_P1_MASK) >> MSM8916_S1_P1_SHIFT;
- p1[2] = (qfprom_cdata[0] & MSM8916_S2_P1_MASK) >> MSM8916_S2_P1_SHIFT;
- p1[3] = (qfprom_cdata[1] & MSM8916_S3_P1_MASK) >> MSM8916_S3_P1_SHIFT;
- p1[4] = (qfprom_cdata[1] & MSM8916_S4_P1_MASK) >> MSM8916_S4_P1_SHIFT;
- for (i = 0; i < priv->num_sensors; i++)
- p1[i] = (((base0) + p1[i]) << 3);
- break;
- default:
- for (i = 0; i < priv->num_sensors; i++) {
- p1[i] = 500;
- p2[i] = 780;
- }
- break;
- }
+ mode = tsens_read_calibration_legacy(priv, &tsens_8916_nvmem,
+ p1, p2,
+ qfprom_cdata, qfprom_csel);
compute_intercept_slope(priv, p1, p2, mode);
kfree(qfprom_cdata);
@@ -279,83 +150,68 @@ static int calibrate_8916(struct tsens_priv *priv)
return 0;
}
-static int calibrate_8939(struct tsens_priv *priv)
+static void fixup_8974_points(int mode, u32 *p1, u32 *p2)
{
- int base0 = 0, base1 = 0, i;
- u32 p1[10], p2[10];
- int mode = 0;
- u32 *qfprom_cdata;
- u32 cdata[6];
-
- qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
- if (IS_ERR(qfprom_cdata))
- return PTR_ERR(qfprom_cdata);
-
- /* Mapping between qfprom nvmem and calibration data */
- cdata[0] = qfprom_cdata[12];
- cdata[1] = qfprom_cdata[13];
- cdata[2] = qfprom_cdata[0];
- cdata[3] = qfprom_cdata[1];
- cdata[4] = qfprom_cdata[22];
- cdata[5] = qfprom_cdata[21];
-
- mode = (cdata[0] & MSM8939_CAL_SEL_MASK) >> MSM8939_CAL_SEL_SHIFT;
- dev_dbg(priv->dev, "calibration mode is %d\n", mode);
-
- switch (mode) {
- case TWO_PT_CALIB:
- base1 = (cdata[3] & MSM8939_BASE1_MASK) >> MSM8939_BASE1_SHIFT;
- p2[0] = (cdata[0] & MSM8939_S0_P2_MASK) >> MSM8939_S0_P2_SHIFT;
- p2[1] = (cdata[0] & MSM8939_S1_P2_MASK) >> MSM8939_S1_P2_SHIFT;
- p2[2] = (cdata[1] & MSM8939_S2_P2_MASK) >> MSM8939_S2_P2_SHIFT;
- p2[3] = (cdata[1] & MSM8939_S3_P2_MASK) >> MSM8939_S3_P2_SHIFT;
- p2[4] = (cdata[1] & MSM8939_S4_P2_MASK) >> MSM8939_S4_P2_SHIFT;
- p2[5] = (cdata[2] & MSM8939_S5_P2_MASK) >> MSM8939_S5_P2_SHIFT;
- p2[6] = (cdata[2] & MSM8939_S6_P2_MASK) >> MSM8939_S6_P2_SHIFT;
- p2[7] = (cdata[3] & MSM8939_S7_P2_MASK) >> MSM8939_S7_P2_SHIFT;
- p2[8] = (cdata[3] & MSM8939_S8_P2_MASK) >> MSM8939_S8_P2_SHIFT;
- p2[9] = (cdata[4] & MSM8939_S9_P2_MASK_0_4) >> MSM8939_S9_P2_SHIFT_0_4;
- p2[9] |= ((cdata[5] & MSM8939_S9_P2_MASK_5) >> MSM8939_S9_P2_SHIFT_5) << 5;
- for (i = 0; i < priv->num_sensors; i++)
- p2[i] = (base1 + p2[i]) << 2;
- fallthrough;
- case ONE_PT_CALIB2:
- base0 = (cdata[2] & MSM8939_BASE0_MASK) >> MSM8939_BASE0_SHIFT;
- p1[0] = (cdata[0] & MSM8939_S0_P1_MASK) >> MSM8939_S0_P1_SHIFT;
- p1[1] = (cdata[0] & MSM8939_S1_P1_MASK) >> MSM8939_S1_P1_SHIFT;
- p1[2] = (cdata[0] & MSM8939_S2_P1_MASK_0_4) >> MSM8939_S2_P1_SHIFT_0_4;
- p1[2] |= ((cdata[1] & MSM8939_S2_P1_MASK_5) >> MSM8939_S2_P1_SHIFT_5) << 5;
- p1[3] = (cdata[1] & MSM8939_S3_P1_MASK) >> MSM8939_S3_P1_SHIFT;
- p1[4] = (cdata[1] & MSM8939_S4_P1_MASK) >> MSM8939_S4_P1_SHIFT;
- p1[5] = (cdata[2] & MSM8939_S5_P1_MASK) >> MSM8939_S5_P1_SHIFT;
- p1[6] = (cdata[2] & MSM8939_S6_P1_MASK) >> MSM8939_S6_P1_SHIFT;
- p1[7] = (cdata[3] & MSM8939_S7_P1_MASK) >> MSM8939_S7_P1_SHIFT;
- p1[8] = (cdata[3] & MSM8939_S8_P1_MASK) >> MSM8939_S8_P1_SHIFT;
- p1[9] = (cdata[4] & MSM8939_S9_P1_MASK) >> MSM8939_S9_P1_SHIFT;
- for (i = 0; i < priv->num_sensors; i++)
- p1[i] = ((base0) + p1[i]) << 2;
- break;
- default:
- for (i = 0; i < priv->num_sensors; i++) {
- p1[i] = 500;
- p2[i] = 780;
+ int i;
+
+ if (mode == NO_PT_CALIB) {
+ p1[0] += 2;
+ p1[1] += 9;
+ p1[2] += 3;
+ p1[3] += 9;
+ p1[4] += 5;
+ p1[5] += 9;
+ p1[6] += 7;
+ p1[7] += 10;
+ p1[8] += 8;
+ p1[9] += 9;
+ p1[10] += 8;
+ } else {
+ for (i = 0; i < 11; i++) {
+ /*
+ * ONE_PT_CALIB requires using addition here instead of
+ * using OR operation.
+ */
+ p1[i] += BIT_APPEND;
+ p2[i] += BIT_APPEND;
}
- break;
}
+}
+
+static int calibrate_8974_nvmem(struct tsens_priv *priv)
+{
+ u32 p1[11], p2[11];
+ u32 backup;
+ int ret, mode;
+
+ ret = nvmem_cell_read_variable_le_u32(priv->dev, "use_backup", &backup);
+ if (ret == -ENOENT)
+ dev_warn(priv->dev, "Please migrate to separate nvmem cells for calibration data\n");
+ if (ret < 0)
+ return ret;
+
+ mode = tsens_read_calibration(priv, 2, p1, p2, backup == BKP_SEL);
+ if (mode < 0)
+ return mode;
+
+ fixup_8974_points(mode, p1, p2);
+
compute_intercept_slope(priv, p1, p2, mode);
- kfree(qfprom_cdata);
return 0;
}
static int calibrate_8974(struct tsens_priv *priv)
{
- int base1 = 0, base2 = 0, i;
u32 p1[11], p2[11];
- int mode = 0;
u32 *calib, *bkp;
u32 calib_redun_sel;
+ int mode, ret;
+
+ ret = calibrate_8974_nvmem(priv);
+ if (ret == 0)
+ return 0;
calib = (u32 *)qfprom_read(priv->dev, "calib");
if (IS_ERR(calib))
@@ -367,116 +223,18 @@ static int calibrate_8974(struct tsens_priv *priv)
return PTR_ERR(bkp);
}
- calib_redun_sel = bkp[1] & BKP_REDUN_SEL;
- calib_redun_sel >>= BKP_REDUN_SHIFT;
-
- if (calib_redun_sel == BKP_SEL) {
- mode = (calib[4] & CAL_SEL_0_1) >> CAL_SEL_SHIFT;
- mode |= (calib[5] & CAL_SEL_2) >> CAL_SEL_SHIFT_2;
-
- switch (mode) {
- case TWO_PT_CALIB:
- base2 = (bkp[2] & BASE2_BKP_MASK) >> BASE2_BKP_SHIFT;
- p2[0] = (bkp[2] & S0_P2_BKP_MASK) >> S0_P2_BKP_SHIFT;
- p2[1] = (bkp[3] & S1_P2_BKP_MASK);
- p2[2] = (bkp[3] & S2_P2_BKP_MASK) >> S2_P2_BKP_SHIFT;
- p2[3] = (bkp[3] & S3_P2_BKP_MASK) >> S3_P2_BKP_SHIFT;
- p2[4] = (bkp[3] & S4_P2_BKP_MASK) >> S4_P2_BKP_SHIFT;
- p2[5] = (calib[4] & S5_P2_BKP_MASK) >> S5_P2_BKP_SHIFT;
- p2[6] = (calib[5] & S6_P2_BKP_MASK);
- p2[7] = (calib[5] & S7_P2_BKP_MASK) >> S7_P2_BKP_SHIFT;
- p2[8] = (calib[5] & S8_P2_BKP_MASK) >> S8_P2_BKP_SHIFT;
- p2[9] = (calib[5] & S9_P2_BKP_MASK) >> S9_P2_BKP_SHIFT;
- p2[10] = (calib[5] & S10_P2_BKP_MASK) >> S10_P2_BKP_SHIFT;
- fallthrough;
- case ONE_PT_CALIB:
- case ONE_PT_CALIB2:
- base1 = bkp[0] & BASE1_MASK;
- p1[0] = (bkp[0] & S0_P1_MASK) >> S0_P1_SHIFT;
- p1[1] = (bkp[0] & S1_P1_MASK) >> S1_P1_SHIFT;
- p1[2] = (bkp[0] & S2_P1_MASK) >> S2_P1_SHIFT;
- p1[3] = (bkp[0] & S3_P1_MASK) >> S3_P1_SHIFT;
- p1[4] = (bkp[1] & S4_P1_MASK);
- p1[5] = (bkp[1] & S5_P1_MASK) >> S5_P1_SHIFT;
- p1[6] = (bkp[1] & S6_P1_MASK) >> S6_P1_SHIFT;
- p1[7] = (bkp[1] & S7_P1_MASK) >> S7_P1_SHIFT;
- p1[8] = (bkp[2] & S8_P1_MASK_BKP) >> S8_P1_SHIFT;
- p1[9] = (bkp[2] & S9_P1_MASK_BKP) >> S9_P1_BKP_SHIFT;
- p1[10] = (bkp[2] & S10_P1_MASK_BKP) >> S10_P1_BKP_SHIFT;
- break;
- }
- } else {
- mode = (calib[1] & CAL_SEL_0_1) >> CAL_SEL_SHIFT;
- mode |= (calib[3] & CAL_SEL_2) >> CAL_SEL_SHIFT_2;
-
- switch (mode) {
- case TWO_PT_CALIB:
- base2 = (calib[2] & BASE2_MASK) >> BASE2_SHIFT;
- p2[0] = (calib[2] & S0_P2_MASK) >> S0_P2_SHIFT;
- p2[1] = (calib[2] & S1_P2_MASK) >> S1_P2_SHIFT;
- p2[2] = (calib[3] & S2_P2_MASK);
- p2[3] = (calib[3] & S3_P2_MASK) >> S3_P2_SHIFT;
- p2[4] = (calib[3] & S4_P2_MASK) >> S4_P2_SHIFT;
- p2[5] = (calib[3] & S5_P2_MASK) >> S5_P2_SHIFT;
- p2[6] = (calib[3] & S6_P2_MASK) >> S6_P2_SHIFT;
- p2[7] = (calib[4] & S7_P2_MASK);
- p2[8] = (calib[4] & S8_P2_MASK) >> S8_P2_SHIFT;
- p2[9] = (calib[4] & S9_P2_MASK) >> S9_P2_SHIFT;
- p2[10] = (calib[4] & S10_P2_MASK) >> S10_P2_SHIFT;
- fallthrough;
- case ONE_PT_CALIB:
- case ONE_PT_CALIB2:
- base1 = calib[0] & BASE1_MASK;
- p1[0] = (calib[0] & S0_P1_MASK) >> S0_P1_SHIFT;
- p1[1] = (calib[0] & S1_P1_MASK) >> S1_P1_SHIFT;
- p1[2] = (calib[0] & S2_P1_MASK) >> S2_P1_SHIFT;
- p1[3] = (calib[0] & S3_P1_MASK) >> S3_P1_SHIFT;
- p1[4] = (calib[1] & S4_P1_MASK);
- p1[5] = (calib[1] & S5_P1_MASK) >> S5_P1_SHIFT;
- p1[6] = (calib[1] & S6_P1_MASK) >> S6_P1_SHIFT;
- p1[7] = (calib[1] & S7_P1_MASK) >> S7_P1_SHIFT;
- p1[8] = (calib[1] & S8_P1_MASK) >> S8_P1_SHIFT;
- p1[9] = (calib[2] & S9_P1_MASK);
- p1[10] = (calib[2] & S10_P1_MASK) >> S10_P1_SHIFT;
- break;
- }
- }
+ calib_redun_sel = FIELD_GET(BKP_REDUN_SEL, bkp[1]);
- switch (mode) {
- case ONE_PT_CALIB:
- for (i = 0; i < priv->num_sensors; i++)
- p1[i] += (base1 << 2) | BIT_APPEND;
- break;
- case TWO_PT_CALIB:
- for (i = 0; i < priv->num_sensors; i++) {
- p2[i] += base2;
- p2[i] <<= 2;
- p2[i] |= BIT_APPEND;
- }
- fallthrough;
- case ONE_PT_CALIB2:
- for (i = 0; i < priv->num_sensors; i++) {
- p1[i] += base1;
- p1[i] <<= 2;
- p1[i] |= BIT_APPEND;
- }
- break;
- default:
- for (i = 0; i < priv->num_sensors; i++)
- p2[i] = 780;
- p1[0] = 502;
- p1[1] = 509;
- p1[2] = 503;
- p1[3] = 509;
- p1[4] = 505;
- p1[5] = 509;
- p1[6] = 507;
- p1[7] = 510;
- p1[8] = 508;
- p1[9] = 509;
- p1[10] = 508;
- break;
- }
+ if (calib_redun_sel == BKP_SEL)
+ mode = tsens_read_calibration_legacy(priv, &tsens_8974_backup_nvmem,
+ p1, p2,
+ bkp, calib);
+ else
+ mode = tsens_read_calibration_legacy(priv, &tsens_8974_nvmem,
+ p1, p2,
+ calib, NULL);
+
+ fixup_8974_points(mode, p1, p2);
compute_intercept_slope(priv, p1, p2, mode);
kfree(calib);
@@ -485,53 +243,19 @@ static int calibrate_8974(struct tsens_priv *priv)
return 0;
}
-static int calibrate_9607(struct tsens_priv *priv)
-{
- int base, i;
- u32 p1[5], p2[5];
- int mode = 0;
- u32 *qfprom_cdata;
-
- qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
- if (IS_ERR(qfprom_cdata))
- return PTR_ERR(qfprom_cdata);
-
- mode = (qfprom_cdata[2] & MDM9607_CAL_SEL_MASK) >> MDM9607_CAL_SEL_SHIFT;
- dev_dbg(priv->dev, "calibration mode is %d\n", mode);
-
- switch (mode) {
- case TWO_PT_CALIB:
- base = (qfprom_cdata[2] & MDM9607_BASE1_MASK) >> MDM9607_BASE1_SHIFT;
- p2[0] = (qfprom_cdata[0] & MDM9607_S0_P2_MASK) >> MDM9607_S0_P2_SHIFT;
- p2[1] = (qfprom_cdata[0] & MDM9607_S1_P2_MASK) >> MDM9607_S1_P2_SHIFT;
- p2[2] = (qfprom_cdata[1] & MDM9607_S2_P2_MASK) >> MDM9607_S2_P2_SHIFT;
- p2[3] = (qfprom_cdata[1] & MDM9607_S3_P2_MASK) >> MDM9607_S3_P2_SHIFT;
- p2[4] = (qfprom_cdata[2] & MDM9607_S4_P2_MASK) >> MDM9607_S4_P2_SHIFT;
- for (i = 0; i < priv->num_sensors; i++)
- p2[i] = ((base + p2[i]) << 2);
- fallthrough;
- case ONE_PT_CALIB2:
- base = (qfprom_cdata[0] & MDM9607_BASE0_MASK);
- p1[0] = (qfprom_cdata[0] & MDM9607_S0_P1_MASK) >> MDM9607_S0_P1_SHIFT;
- p1[1] = (qfprom_cdata[0] & MDM9607_S1_P1_MASK) >> MDM9607_S1_P1_SHIFT;
- p1[2] = (qfprom_cdata[1] & MDM9607_S2_P1_MASK) >> MDM9607_S2_P1_SHIFT;
- p1[3] = (qfprom_cdata[1] & MDM9607_S3_P1_MASK) >> MDM9607_S3_P1_SHIFT;
- p1[4] = (qfprom_cdata[2] & MDM9607_S4_P1_MASK) >> MDM9607_S4_P1_SHIFT;
- for (i = 0; i < priv->num_sensors; i++)
- p1[i] = ((base + p1[i]) << 2);
- break;
- default:
- for (i = 0; i < priv->num_sensors; i++) {
- p1[i] = 500;
- p2[i] = 780;
- }
- break;
- }
-
- compute_intercept_slope(priv, p1, p2, mode);
- kfree(qfprom_cdata);
-
- return 0;
+static int __init init_8939(struct tsens_priv *priv) {
+ priv->sensor[0].slope = 2911;
+ priv->sensor[1].slope = 2789;
+ priv->sensor[2].slope = 2906;
+ priv->sensor[3].slope = 2763;
+ priv->sensor[4].slope = 2922;
+ priv->sensor[5].slope = 2867;
+ priv->sensor[6].slope = 2833;
+ priv->sensor[7].slope = 2838;
+ priv->sensor[8].slope = 2840;
+ /* priv->sensor[9].slope = 2852; */
+
+ return init_common(priv);
}
/* v0.1: 8916, 8939, 8974, 9607 */
@@ -583,6 +307,12 @@ static const struct reg_field tsens_v0_1_regfields[MAX_REGFIELDS] = {
[TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
};
+static const struct tsens_ops ops_v0_1 = {
+ .init = init_common,
+ .calibrate = tsens_calibrate_common,
+ .get_temp = get_temp_common,
+};
+
static const struct tsens_ops ops_8916 = {
.init = init_common,
.calibrate = calibrate_8916,
@@ -599,15 +329,15 @@ struct tsens_plat_data data_8916 = {
};
static const struct tsens_ops ops_8939 = {
- .init = init_common,
- .calibrate = calibrate_8939,
+ .init = init_8939,
+ .calibrate = tsens_calibrate_common,
.get_temp = get_temp_common,
};
struct tsens_plat_data data_8939 = {
- .num_sensors = 10,
+ .num_sensors = 9,
.ops = &ops_8939,
- .hw_ids = (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, 10 },
+ .hw_ids = (unsigned int []){ 0, 1, 2, 3, 5, 6, 7, 8, 9, /* 10 */ },
.feat = &tsens_v0_1_feat,
.fields = tsens_v0_1_regfields,
@@ -626,16 +356,9 @@ struct tsens_plat_data data_8974 = {
.fields = tsens_v0_1_regfields,
};
-static const struct tsens_ops ops_9607 = {
- .init = init_common,
- .calibrate = calibrate_9607,
- .get_temp = get_temp_common,
-};
-
struct tsens_plat_data data_9607 = {
.num_sensors = 5,
- .ops = &ops_9607,
- .hw_ids = (unsigned int []){ 0, 1, 2, 3, 4 },
+ .ops = &ops_v0_1,
.feat = &tsens_v0_1_feat,
.fields = tsens_v0_1_regfields,
};
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index 1d7f8a80bd13..b822a426066d 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -21,277 +21,68 @@
#define TM_HIGH_LOW_INT_STATUS_OFF 0x0088
#define TM_HIGH_LOW_Sn_INT_THRESHOLD_OFF 0x0090
-/* eeprom layout data for msm8956/76 (v1) */
-#define MSM8976_BASE0_MASK 0xff
-#define MSM8976_BASE1_MASK 0xff
-#define MSM8976_BASE1_SHIFT 8
-
-#define MSM8976_S0_P1_MASK 0x3f00
-#define MSM8976_S1_P1_MASK 0x3f00000
-#define MSM8976_S2_P1_MASK 0x3f
-#define MSM8976_S3_P1_MASK 0x3f000
-#define MSM8976_S4_P1_MASK 0x3f00
-#define MSM8976_S5_P1_MASK 0x3f00000
-#define MSM8976_S6_P1_MASK 0x3f
-#define MSM8976_S7_P1_MASK 0x3f000
-#define MSM8976_S8_P1_MASK 0x1f8
-#define MSM8976_S9_P1_MASK 0x1f8000
-#define MSM8976_S10_P1_MASK 0xf8000000
-#define MSM8976_S10_P1_MASK_1 0x1
-
-#define MSM8976_S0_P2_MASK 0xfc000
-#define MSM8976_S1_P2_MASK 0xfc000000
-#define MSM8976_S2_P2_MASK 0xfc0
-#define MSM8976_S3_P2_MASK 0xfc0000
-#define MSM8976_S4_P2_MASK 0xfc000
-#define MSM8976_S5_P2_MASK 0xfc000000
-#define MSM8976_S6_P2_MASK 0xfc0
-#define MSM8976_S7_P2_MASK 0xfc0000
-#define MSM8976_S8_P2_MASK 0x7e00
-#define MSM8976_S9_P2_MASK 0x7e00000
-#define MSM8976_S10_P2_MASK 0x7e
-
-#define MSM8976_S0_P1_SHIFT 8
-#define MSM8976_S1_P1_SHIFT 20
-#define MSM8976_S2_P1_SHIFT 0
-#define MSM8976_S3_P1_SHIFT 12
-#define MSM8976_S4_P1_SHIFT 8
-#define MSM8976_S5_P1_SHIFT 20
-#define MSM8976_S6_P1_SHIFT 0
-#define MSM8976_S7_P1_SHIFT 12
-#define MSM8976_S8_P1_SHIFT 3
-#define MSM8976_S9_P1_SHIFT 15
-#define MSM8976_S10_P1_SHIFT 27
-#define MSM8976_S10_P1_SHIFT_1 0
-
-#define MSM8976_S0_P2_SHIFT 14
-#define MSM8976_S1_P2_SHIFT 26
-#define MSM8976_S2_P2_SHIFT 6
-#define MSM8976_S3_P2_SHIFT 18
-#define MSM8976_S4_P2_SHIFT 14
-#define MSM8976_S5_P2_SHIFT 26
-#define MSM8976_S6_P2_SHIFT 6
-#define MSM8976_S7_P2_SHIFT 18
-#define MSM8976_S8_P2_SHIFT 9
-#define MSM8976_S9_P2_SHIFT 21
-#define MSM8976_S10_P2_SHIFT 1
-
-#define MSM8976_CAL_SEL_MASK 0x3
-
-#define MSM8976_CAL_DEGC_PT1 30
-#define MSM8976_CAL_DEGC_PT2 120
-#define MSM8976_SLOPE_FACTOR 1000
-#define MSM8976_SLOPE_DEFAULT 3200
-
-/* eeprom layout data for qcs404/405 (v1) */
-#define BASE0_MASK 0x000007f8
-#define BASE1_MASK 0x0007f800
-#define BASE0_SHIFT 3
-#define BASE1_SHIFT 11
-
-#define S0_P1_MASK 0x0000003f
-#define S1_P1_MASK 0x0003f000
-#define S2_P1_MASK 0x3f000000
-#define S3_P1_MASK 0x000003f0
-#define S4_P1_MASK 0x003f0000
-#define S5_P1_MASK 0x0000003f
-#define S6_P1_MASK 0x0003f000
-#define S7_P1_MASK 0x3f000000
-#define S8_P1_MASK 0x000003f0
-#define S9_P1_MASK 0x003f0000
-
-#define S0_P2_MASK 0x00000fc0
-#define S1_P2_MASK 0x00fc0000
-#define S2_P2_MASK_1_0 0xc0000000
-#define S2_P2_MASK_5_2 0x0000000f
-#define S3_P2_MASK 0x0000fc00
-#define S4_P2_MASK 0x0fc00000
-#define S5_P2_MASK 0x00000fc0
-#define S6_P2_MASK 0x00fc0000
-#define S7_P2_MASK_1_0 0xc0000000
-#define S7_P2_MASK_5_2 0x0000000f
-#define S8_P2_MASK 0x0000fc00
-#define S9_P2_MASK 0x0fc00000
-
-#define S0_P1_SHIFT 0
-#define S0_P2_SHIFT 6
-#define S1_P1_SHIFT 12
-#define S1_P2_SHIFT 18
-#define S2_P1_SHIFT 24
-#define S2_P2_SHIFT_1_0 30
-
-#define S2_P2_SHIFT_5_2 0
-#define S3_P1_SHIFT 4
-#define S3_P2_SHIFT 10
-#define S4_P1_SHIFT 16
-#define S4_P2_SHIFT 22
-
-#define S5_P1_SHIFT 0
-#define S5_P2_SHIFT 6
-#define S6_P1_SHIFT 12
-#define S6_P2_SHIFT 18
-#define S7_P1_SHIFT 24
-#define S7_P2_SHIFT_1_0 30
-
-#define S7_P2_SHIFT_5_2 0
-#define S8_P1_SHIFT 4
-#define S8_P2_SHIFT 10
-#define S9_P1_SHIFT 16
-#define S9_P2_SHIFT 22
-
-#define CAL_SEL_MASK 7
-#define CAL_SEL_SHIFT 0
-
-static void compute_intercept_slope_8976(struct tsens_priv *priv,
- u32 *p1, u32 *p2, u32 mode)
-{
- int i;
-
- priv->sensor[0].slope = 3313;
- priv->sensor[1].slope = 3275;
- priv->sensor[2].slope = 3320;
- priv->sensor[3].slope = 3246;
- priv->sensor[4].slope = 3279;
- priv->sensor[5].slope = 3257;
- priv->sensor[6].slope = 3234;
- priv->sensor[7].slope = 3269;
- priv->sensor[8].slope = 3255;
- priv->sensor[9].slope = 3239;
- priv->sensor[10].slope = 3286;
+struct tsens_legacy_calibration_format tsens_qcs404_nvmem = {
+ .base_len = 8,
+ .base_shift = 2,
+ .sp_len = 6,
+ .mode = { 4, 0 },
+ .invalid = { 4, 2 },
+ .base = { { 4, 3 }, { 4, 11 } },
+ .sp = {
+ { { 0, 0 }, { 0, 6 } },
+ { { 0, 12 }, { 0, 18 } },
+ { { 0, 24 }, { 0, 30 } },
+ { { 1, 4 }, { 1, 10 } },
+ { { 1, 16 }, { 1, 22 } },
+ { { 2, 0 }, { 2, 6 } },
+ { { 2, 12 }, { 2, 18 } },
+ { { 2, 24 }, { 2, 30 } },
+ { { 3, 4 }, { 3, 10 } },
+ { { 3, 16 }, { 3, 22 } },
+ },
+};
- for (i = 0; i < priv->num_sensors; i++) {
- priv->sensor[i].offset = (p1[i] * MSM8976_SLOPE_FACTOR) -
- (MSM8976_CAL_DEGC_PT1 *
- priv->sensor[i].slope);
- }
-}
+struct tsens_legacy_calibration_format tsens_8976_nvmem = {
+ .base_len = 8,
+ .base_shift = 2,
+ .sp_len = 6,
+ .mode = { 4, 0 },
+ .invalid = { 4, 2 },
+ .base = { { 0, 0 }, { 2, 8 } },
+ .sp = {
+ { { 0, 8 }, { 0, 14 } },
+ { { 0, 20 }, { 0, 26 } },
+ { { 1, 0 }, { 1, 6 } },
+ { { 1, 12 }, { 1, 18 } },
+ { { 2, 8 }, { 2, 14 } },
+ { { 2, 20 }, { 2, 26 } },
+ { { 3, 0 }, { 3, 6 } },
+ { { 3, 12 }, { 3, 18 } },
+ { { 4, 2 }, { 4, 9 } },
+ { { 4, 14 }, { 4, 21 } },
+ { { 4, 26 }, { 5, 1 } },
+ },
+};
static int calibrate_v1(struct tsens_priv *priv)
{
- u32 base0 = 0, base1 = 0;
u32 p1[10], p2[10];
- u32 mode = 0, lsb = 0, msb = 0;
u32 *qfprom_cdata;
- int i;
-
- qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
- if (IS_ERR(qfprom_cdata))
- return PTR_ERR(qfprom_cdata);
-
- mode = (qfprom_cdata[4] & CAL_SEL_MASK) >> CAL_SEL_SHIFT;
- dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+ int mode, ret;
- switch (mode) {
- case TWO_PT_CALIB:
- base1 = (qfprom_cdata[4] & BASE1_MASK) >> BASE1_SHIFT;
- p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT;
- p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT;
- /* This value is split over two registers, 2 bits and 4 bits */
- lsb = (qfprom_cdata[0] & S2_P2_MASK_1_0) >> S2_P2_SHIFT_1_0;
- msb = (qfprom_cdata[1] & S2_P2_MASK_5_2) >> S2_P2_SHIFT_5_2;
- p2[2] = msb << 2 | lsb;
- p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT;
- p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT;
- p2[5] = (qfprom_cdata[2] & S5_P2_MASK) >> S5_P2_SHIFT;
- p2[6] = (qfprom_cdata[2] & S6_P2_MASK) >> S6_P2_SHIFT;
- /* This value is split over two registers, 2 bits and 4 bits */
- lsb = (qfprom_cdata[2] & S7_P2_MASK_1_0) >> S7_P2_SHIFT_1_0;
- msb = (qfprom_cdata[3] & S7_P2_MASK_5_2) >> S7_P2_SHIFT_5_2;
- p2[7] = msb << 2 | lsb;
- p2[8] = (qfprom_cdata[3] & S8_P2_MASK) >> S8_P2_SHIFT;
- p2[9] = (qfprom_cdata[3] & S9_P2_MASK) >> S9_P2_SHIFT;
- for (i = 0; i < priv->num_sensors; i++)
- p2[i] = ((base1 + p2[i]) << 2);
- fallthrough;
- case ONE_PT_CALIB2:
- base0 = (qfprom_cdata[4] & BASE0_MASK) >> BASE0_SHIFT;
- p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
- p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT;
- p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT;
- p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT;
- p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT;
- p1[5] = (qfprom_cdata[2] & S5_P1_MASK) >> S5_P1_SHIFT;
- p1[6] = (qfprom_cdata[2] & S6_P1_MASK) >> S6_P1_SHIFT;
- p1[7] = (qfprom_cdata[2] & S7_P1_MASK) >> S7_P1_SHIFT;
- p1[8] = (qfprom_cdata[3] & S8_P1_MASK) >> S8_P1_SHIFT;
- p1[9] = (qfprom_cdata[3] & S9_P1_MASK) >> S9_P1_SHIFT;
- for (i = 0; i < priv->num_sensors; i++)
- p1[i] = (((base0) + p1[i]) << 2);
- break;
- default:
- for (i = 0; i < priv->num_sensors; i++) {
- p1[i] = 500;
- p2[i] = 780;
- }
- break;
- }
-
- compute_intercept_slope(priv, p1, p2, mode);
- kfree(qfprom_cdata);
-
- return 0;
-}
-
-static int calibrate_8976(struct tsens_priv *priv)
-{
- int base0 = 0, base1 = 0, i;
- u32 p1[11], p2[11];
- int mode = 0, tmp = 0;
- u32 *qfprom_cdata;
+ ret = tsens_calibrate_common(priv);
+ if (!ret)
+ return 0;
qfprom_cdata = (u32 *)qfprom_read(priv->dev, "calib");
if (IS_ERR(qfprom_cdata))
return PTR_ERR(qfprom_cdata);
- mode = (qfprom_cdata[4] & MSM8976_CAL_SEL_MASK);
- dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+ mode = tsens_read_calibration_legacy(priv, &tsens_qcs404_nvmem,
+ p1, p2,
+ qfprom_cdata, NULL);
- switch (mode) {
- case TWO_PT_CALIB:
- base1 = (qfprom_cdata[2] & MSM8976_BASE1_MASK) >> MSM8976_BASE1_SHIFT;
- p2[0] = (qfprom_cdata[0] & MSM8976_S0_P2_MASK) >> MSM8976_S0_P2_SHIFT;
- p2[1] = (qfprom_cdata[0] & MSM8976_S1_P2_MASK) >> MSM8976_S1_P2_SHIFT;
- p2[2] = (qfprom_cdata[1] & MSM8976_S2_P2_MASK) >> MSM8976_S2_P2_SHIFT;
- p2[3] = (qfprom_cdata[1] & MSM8976_S3_P2_MASK) >> MSM8976_S3_P2_SHIFT;
- p2[4] = (qfprom_cdata[2] & MSM8976_S4_P2_MASK) >> MSM8976_S4_P2_SHIFT;
- p2[5] = (qfprom_cdata[2] & MSM8976_S5_P2_MASK) >> MSM8976_S5_P2_SHIFT;
- p2[6] = (qfprom_cdata[3] & MSM8976_S6_P2_MASK) >> MSM8976_S6_P2_SHIFT;
- p2[7] = (qfprom_cdata[3] & MSM8976_S7_P2_MASK) >> MSM8976_S7_P2_SHIFT;
- p2[8] = (qfprom_cdata[4] & MSM8976_S8_P2_MASK) >> MSM8976_S8_P2_SHIFT;
- p2[9] = (qfprom_cdata[4] & MSM8976_S9_P2_MASK) >> MSM8976_S9_P2_SHIFT;
- p2[10] = (qfprom_cdata[5] & MSM8976_S10_P2_MASK) >> MSM8976_S10_P2_SHIFT;
-
- for (i = 0; i < priv->num_sensors; i++)
- p2[i] = ((base1 + p2[i]) << 2);
- fallthrough;
- case ONE_PT_CALIB2:
- base0 = qfprom_cdata[0] & MSM8976_BASE0_MASK;
- p1[0] = (qfprom_cdata[0] & MSM8976_S0_P1_MASK) >> MSM8976_S0_P1_SHIFT;
- p1[1] = (qfprom_cdata[0] & MSM8976_S1_P1_MASK) >> MSM8976_S1_P1_SHIFT;
- p1[2] = (qfprom_cdata[1] & MSM8976_S2_P1_MASK) >> MSM8976_S2_P1_SHIFT;
- p1[3] = (qfprom_cdata[1] & MSM8976_S3_P1_MASK) >> MSM8976_S3_P1_SHIFT;
- p1[4] = (qfprom_cdata[2] & MSM8976_S4_P1_MASK) >> MSM8976_S4_P1_SHIFT;
- p1[5] = (qfprom_cdata[2] & MSM8976_S5_P1_MASK) >> MSM8976_S5_P1_SHIFT;
- p1[6] = (qfprom_cdata[3] & MSM8976_S6_P1_MASK) >> MSM8976_S6_P1_SHIFT;
- p1[7] = (qfprom_cdata[3] & MSM8976_S7_P1_MASK) >> MSM8976_S7_P1_SHIFT;
- p1[8] = (qfprom_cdata[4] & MSM8976_S8_P1_MASK) >> MSM8976_S8_P1_SHIFT;
- p1[9] = (qfprom_cdata[4] & MSM8976_S9_P1_MASK) >> MSM8976_S9_P1_SHIFT;
- p1[10] = (qfprom_cdata[4] & MSM8976_S10_P1_MASK) >> MSM8976_S10_P1_SHIFT;
- tmp = (qfprom_cdata[5] & MSM8976_S10_P1_MASK_1) << MSM8976_S10_P1_SHIFT_1;
- p1[10] |= tmp;
-
- for (i = 0; i < priv->num_sensors; i++)
- p1[i] = (((base0) + p1[i]) << 2);
- break;
- default:
- for (i = 0; i < priv->num_sensors; i++) {
- p1[i] = 500;
- p2[i] = 780;
- }
- break;
- }
-
- compute_intercept_slope_8976(priv, p1, p2, mode);
+ compute_intercept_slope(priv, p1, p2, mode);
kfree(qfprom_cdata);
return 0;
@@ -365,6 +156,22 @@ static const struct reg_field tsens_v1_regfields[MAX_REGFIELDS] = {
[TRDY] = REG_FIELD(TM_TRDY_OFF, 0, 0),
};
+static int __init init_8956(struct tsens_priv *priv) {
+ priv->sensor[0].slope = 3313;
+ priv->sensor[1].slope = 3275;
+ priv->sensor[2].slope = 3320;
+ priv->sensor[3].slope = 3246;
+ priv->sensor[4].slope = 3279;
+ priv->sensor[5].slope = 3257;
+ priv->sensor[6].slope = 3234;
+ priv->sensor[7].slope = 3269;
+ priv->sensor[8].slope = 3255;
+ priv->sensor[9].slope = 3239;
+ priv->sensor[10].slope = 3286;
+
+ return init_common(priv);
+}
+
static const struct tsens_ops ops_generic_v1 = {
.init = init_common,
.calibrate = calibrate_v1,
@@ -377,17 +184,28 @@ struct tsens_plat_data data_tsens_v1 = {
.fields = tsens_v1_regfields,
};
+static const struct tsens_ops ops_8956 = {
+ .init = init_8956,
+ .calibrate = tsens_calibrate_common,
+ .get_temp = get_temp_tsens_valid,
+};
+
+struct tsens_plat_data data_8956 = {
+ .num_sensors = 11,
+ .ops = &ops_8956,
+ .feat = &tsens_v1_feat,
+ .fields = tsens_v1_regfields,
+};
+
static const struct tsens_ops ops_8976 = {
.init = init_common,
- .calibrate = calibrate_8976,
+ .calibrate = tsens_calibrate_common,
.get_temp = get_temp_tsens_valid,
};
-/* Valid for both MSM8956 and MSM8976. */
struct tsens_plat_data data_8976 = {
.num_sensors = 11,
.ops = &ops_8976,
- .hw_ids = (unsigned int[]){0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
.feat = &tsens_v1_feat,
.fields = tsens_v1_regfields,
};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index b5b136ff323f..8020ead2794e 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -70,6 +70,171 @@ char *qfprom_read(struct device *dev, const char *cname)
return ret;
}
+int tsens_read_calibration(struct tsens_priv *priv, int shift, u32 *p1, u32 *p2, bool backup)
+{
+ u32 mode;
+ u32 base1, base2;
+ char name[] = "sXX_pY_backup"; /* s10_p1_backup */
+ int i, ret;
+
+ if (priv->num_sensors > MAX_SENSORS)
+ return -EINVAL;
+
+ ret = snprintf(name, sizeof(name), "mode%s", backup ? "_backup" : "");
+ if (ret < 0)
+ return ret;
+
+ ret = nvmem_cell_read_variable_le_u32(priv->dev, name, &mode);
+ if (ret == -ENOENT)
+ dev_warn(priv->dev, "Please migrate to separate nvmem cells for calibration data\n");
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ ret = snprintf(name, sizeof(name), "base1%s", backup ? "_backup" : "");
+ if (ret < 0)
+ return ret;
+
+ ret = nvmem_cell_read_variable_le_u32(priv->dev, name, &base1);
+ if (ret < 0)
+ return ret;
+
+ ret = snprintf(name, sizeof(name), "base2%s", backup ? "_backup" : "");
+ if (ret < 0)
+ return ret;
+
+ ret = nvmem_cell_read_variable_le_u32(priv->dev, name, &base2);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->num_sensors; i++) {
+ ret = snprintf(name, sizeof(name), "s%d_p1%s", priv->sensor[i].hw_id,
+ backup ? "_backup" : "");
+ if (ret < 0)
+ return ret;
+
+ ret = nvmem_cell_read_variable_le_u32(priv->dev, name, &p1[i]);
+ if (ret)
+ return ret;
+
+ ret = snprintf(name, sizeof(name), "s%d_p2%s", priv->sensor[i].hw_id,
+ backup ? "_backup" : "");
+ if (ret < 0)
+ return ret;
+
+ ret = nvmem_cell_read_variable_le_u32(priv->dev, name, &p2[i]);
+ if (ret)
+ return ret;
+ }
+
+ switch (mode) {
+ case ONE_PT_CALIB:
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = p1[i] + (base1 << shift);
+ break;
+ case TWO_PT_CALIB:
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = (p2[i] + base2) << shift;
+ fallthrough;
+ case ONE_PT_CALIB2:
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = (p1[i] + base1) << shift;
+ break;
+ default:
+ dev_dbg(priv->dev, "calibrationless mode\n");
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ }
+
+ return mode;
+}
+
+int tsens_calibrate_nvmem(struct tsens_priv *priv, int shift)
+{
+ u32 p1[MAX_SENSORS], p2[MAX_SENSORS];
+ int mode;
+
+ mode = tsens_read_calibration(priv, shift, p1, p2, false);
+ if (mode < 0)
+ return mode;
+
+ compute_intercept_slope(priv, p1, p2, mode);
+
+ return 0;
+}
+
+int tsens_calibrate_common(struct tsens_priv *priv)
+{
+ return tsens_calibrate_nvmem(priv, 2);
+}
+
+static u32 tsens_read_cell(const struct tsens_single_value *cell, u8 len, u32 *data0, u32 *data1)
+{
+ u32 val;
+ u32 *data = cell->blob ? data1 : data0;
+
+ if (cell->shift + len <= 32) {
+ val = data[cell->idx] >> cell->shift;
+ } else {
+ u8 part = 32 - cell->shift;
+
+ val = data[cell->idx] >> cell->shift;
+ val |= data[cell->idx + 1] << part;
+ }
+
+ return val & ((1 << len) - 1);
+}
+
+int tsens_read_calibration_legacy(struct tsens_priv *priv,
+ const struct tsens_legacy_calibration_format *format,
+ u32 *p1, u32 *p2,
+ u32 *cdata0, u32 *cdata1)
+{
+ u32 mode, invalid;
+ u32 base1, base2;
+ int i;
+
+ mode = tsens_read_cell(&format->mode, 2, cdata0, cdata1);
+ invalid = tsens_read_cell(&format->invalid, 1, cdata0, cdata1);
+ if (invalid)
+ mode = NO_PT_CALIB;
+ dev_dbg(priv->dev, "calibration mode is %d\n", mode);
+
+ base1 = tsens_read_cell(&format->base[0], format->base_len, cdata0, cdata1);
+ base2 = tsens_read_cell(&format->base[1], format->base_len, cdata0, cdata1);
+
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = tsens_read_cell(&format->sp[i][0], format->sp_len, cdata0, cdata1);
+ p2[i] = tsens_read_cell(&format->sp[i][1], format->sp_len, cdata0, cdata1);
+ }
+
+ switch (mode) {
+ case ONE_PT_CALIB:
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = p1[i] + (base1 << format->base_shift);
+ break;
+ case TWO_PT_CALIB:
+ for (i = 0; i < priv->num_sensors; i++)
+ p2[i] = (p2[i] + base2) << format->base_shift;
+ fallthrough;
+ case ONE_PT_CALIB2:
+ for (i = 0; i < priv->num_sensors; i++)
+ p1[i] = (p1[i] + base1) << format->base_shift;
+ break;
+ default:
+ dev_dbg(priv->dev, "calibrationless mode\n");
+ for (i = 0; i < priv->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ }
+
+ return mode;
+}
+
/*
* Use this function on devices where slope and offset calculations
* depend on calibration data read from qfprom. On others the slope
@@ -459,12 +624,9 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
{
struct tsens_priv *priv = data;
struct tsens_irq_data d;
- bool enable = true, disable = false;
- unsigned long flags;
- int temp, ret, i;
+ int i;
for (i = 0; i < priv->num_sensors; i++) {
- bool trigger = false;
const struct tsens_sensor *s = &priv->sensor[i];
u32 hw_id = s->hw_id;
@@ -472,52 +634,8 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
continue;
if (!tsens_threshold_violated(priv, hw_id, &d))
continue;
- ret = get_temp_tsens_valid(s, &temp);
- if (ret) {
- dev_err(priv->dev, "[%u] %s: error reading sensor\n",
- hw_id, __func__);
- continue;
- }
-
- spin_lock_irqsave(&priv->ul_lock, flags);
-
- tsens_read_irq_state(priv, hw_id, s, &d);
-
- if (d.up_viol &&
- !masked_irq(hw_id, d.up_irq_mask, tsens_version(priv))) {
- tsens_set_interrupt(priv, hw_id, UPPER, disable);
- if (d.up_thresh > temp) {
- dev_dbg(priv->dev, "[%u] %s: re-arm upper\n",
- hw_id, __func__);
- tsens_set_interrupt(priv, hw_id, UPPER, enable);
- } else {
- trigger = true;
- /* Keep irq masked */
- }
- } else if (d.low_viol &&
- !masked_irq(hw_id, d.low_irq_mask, tsens_version(priv))) {
- tsens_set_interrupt(priv, hw_id, LOWER, disable);
- if (d.low_thresh < temp) {
- dev_dbg(priv->dev, "[%u] %s: re-arm low\n",
- hw_id, __func__);
- tsens_set_interrupt(priv, hw_id, LOWER, enable);
- } else {
- trigger = true;
- /* Keep irq masked */
- }
- }
- spin_unlock_irqrestore(&priv->ul_lock, flags);
-
- if (trigger) {
- dev_dbg(priv->dev, "[%u] %s: TZ update trigger (%d mC)\n",
- hw_id, __func__, temp);
- thermal_zone_device_update(s->tzd,
- THERMAL_EVENT_UNSPECIFIED);
- } else {
- dev_dbg(priv->dev, "[%u] %s: no violation: %d\n",
- hw_id, __func__, temp);
- }
+ thermal_zone_device_update(s->tzd, THERMAL_EVENT_UNSPECIFIED);
if (tsens_version(priv) < VER_0_1) {
/* Constraint: There is only 1 interrupt control register for all
@@ -984,6 +1102,9 @@ static const struct of_device_id tsens_table[] = {
.compatible = "qcom,msm8939-tsens",
.data = &data_8939,
}, {
+ .compatible = "qcom,msm8956-tsens",
+ .data = &data_8956,
+ }, {
.compatible = "qcom,msm8960-tsens",
.data = &data_8960,
}, {
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 899af128855f..dba9cd38f637 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -6,6 +6,7 @@
#ifndef __QCOM_TSENS_H__
#define __QCOM_TSENS_H__
+#define NO_PT_CALIB 0x0
#define ONE_PT_CALIB 0x1
#define ONE_PT_CALIB2 0x2
#define TWO_PT_CALIB 0x3
@@ -17,6 +18,8 @@
#define THRESHOLD_MAX_ADC_CODE 0x3ff
#define THRESHOLD_MIN_ADC_CODE 0x0
+#define MAX_SENSORS 16
+
#include <linux/interrupt.h>
#include <linux/thermal.h>
#include <linux/regmap.h>
@@ -581,7 +584,48 @@ struct tsens_priv {
struct tsens_sensor sensor[];
};
+/**
+ * struct tsens_single_value - internal representation of a single field inside nvmem calibration data
+ * @idx: index into the u32 data array
+ * @shift: the shift of the first bit in the value
+ * @blob: index of the data blob to use for this cell
+ */
+struct tsens_single_value {
+ u8 idx;
+ u8 shift;
+ u8 blob;
+};
+
+/**
+ * struct tsens_legacy_calibration_format - description of calibration data used when parsing the legacy nvmem blob
+ * @base_len: the length of the base fields inside calibration data
+ * @base_shift: the shift to be applied to base data
+ * @sp_len: the length of the sN_pM fields inside calibration data
+ * @mode: descriptor of the calibration mode field
+ * @invalid: descriptor of the calibration mode invalid field
+ * @base: descriptors of the base0 and base1 fields
+ * @sp: descriptors of the sN_pM fields
+ */
+struct tsens_legacy_calibration_format {
+ unsigned int base_len;
+ unsigned int base_shift;
+ unsigned int sp_len;
+ /* just two bits */
+ struct tsens_single_value mode;
+ /* on all platforms except 8974 invalid is the third bit of what downstream calls 'mode' */
+ struct tsens_single_value invalid;
+ struct tsens_single_value base[2];
+ struct tsens_single_value sp[][2];
+};
+
char *qfprom_read(struct device *dev, const char *cname);
+int tsens_read_calibration_legacy(struct tsens_priv *priv,
+ const struct tsens_legacy_calibration_format *format,
+ u32 *p1, u32 *p2,
+ u32 *cdata, u32 *csel);
+int tsens_read_calibration(struct tsens_priv *priv, int shift, u32 *p1, u32 *p2, bool backup);
+int tsens_calibrate_nvmem(struct tsens_priv *priv, int shift);
+int tsens_calibrate_common(struct tsens_priv *priv);
void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mode);
int init_common(struct tsens_priv *priv);
int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp);
@@ -594,7 +638,7 @@ extern struct tsens_plat_data data_8960;
extern struct tsens_plat_data data_8916, data_8939, data_8974, data_9607;
/* TSENS v1 targets */
-extern struct tsens_plat_data data_tsens_v1, data_8976;
+extern struct tsens_plat_data data_tsens_v1, data_8976, data_8956;
/* TSENS v2 targets */
extern struct tsens_plat_data data_8996, data_ipq8074, data_tsens_v2;
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index d111e218f362..431c29c0898a 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -13,7 +13,6 @@
#include <linux/thermal.h>
#include <linux/units.h>
-#include "thermal_core.h"
#include "thermal_hwmon.h"
#define SITES_MAX 16
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 4c1c6f89aa2f..d6b5b59c5c53 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -17,7 +17,6 @@
#include <linux/sys_soc.h>
#include <linux/thermal.h>
-#include "thermal_core.h"
#include "thermal_hwmon.h"
/* Register offsets */
@@ -87,8 +86,10 @@ struct rcar_gen3_thermal_tsc {
struct rcar_gen3_thermal_priv {
struct rcar_gen3_thermal_tsc *tscs[TSC_MAX_NUM];
+ struct thermal_zone_device_ops ops;
unsigned int num_tscs;
- void (*thermal_init)(struct rcar_gen3_thermal_tsc *tsc);
+ void (*thermal_init)(struct rcar_gen3_thermal_priv *priv,
+ struct rcar_gen3_thermal_tsc *tsc);
int ptat[3];
};
@@ -225,7 +226,7 @@ static int rcar_gen3_thermal_set_trips(struct thermal_zone_device *tz, int low,
return 0;
}
-static struct thermal_zone_device_ops rcar_gen3_tz_of_ops = {
+static const struct thermal_zone_device_ops rcar_gen3_tz_of_ops = {
.get_temp = rcar_gen3_thermal_get_temp,
.set_trips = rcar_gen3_thermal_set_trips,
};
@@ -239,7 +240,7 @@ static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
for (i = 0; i < priv->num_tscs; i++) {
status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR);
rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0);
- if (status)
+ if (status && priv->tscs[i]->zone)
thermal_zone_device_update(priv->tscs[i]->zone,
THERMAL_EVENT_UNSPECIFIED);
}
@@ -310,7 +311,8 @@ static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv)
return true;
}
-static void rcar_gen3_thermal_init_r8a7795es1(struct rcar_gen3_thermal_tsc *tsc)
+static void rcar_gen3_thermal_init_r8a7795es1(struct rcar_gen3_thermal_priv *priv,
+ struct rcar_gen3_thermal_tsc *tsc)
{
rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, CTSR_THBGR);
rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, 0x0);
@@ -321,7 +323,7 @@ static void rcar_gen3_thermal_init_r8a7795es1(struct rcar_gen3_thermal_tsc *tsc)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0x3F);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0);
- if (tsc->zone->ops->set_trips)
+ if (priv->ops.set_trips)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN,
IRQ_TEMPD1 | IRQ_TEMP2);
@@ -337,7 +339,8 @@ static void rcar_gen3_thermal_init_r8a7795es1(struct rcar_gen3_thermal_tsc *tsc)
usleep_range(1000, 2000);
}
-static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
+static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_priv *priv,
+ struct rcar_gen3_thermal_tsc *tsc)
{
u32 reg_val;
@@ -349,7 +352,7 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQCTL, 0);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQMSK, 0);
- if (tsc->zone->ops->set_trips)
+ if (priv->ops.set_trips)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQEN,
IRQ_TEMPD1 | IRQ_TEMP2);
@@ -403,6 +406,10 @@ static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
.compatible = "renesas,r8a779f0-thermal",
.data = &rcar_gen3_ths_tj_1,
},
+ {
+ .compatible = "renesas,r8a779g0-thermal",
+ .data = &rcar_gen3_ths_tj_1,
+ },
{},
};
MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
@@ -466,6 +473,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
+ priv->ops = rcar_gen3_tz_of_ops;
priv->thermal_init = rcar_gen3_thermal_init;
if (soc_device_match(r8a7795es1))
priv->thermal_init = rcar_gen3_thermal_init_r8a7795es1;
@@ -473,7 +481,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
if (rcar_gen3_thermal_request_irqs(priv, pdev))
- rcar_gen3_tz_of_ops.set_trips = NULL;
+ priv->ops.set_trips = NULL;
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
@@ -508,8 +516,10 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
for (i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
- zone = devm_thermal_of_zone_register(dev, i, tsc,
- &rcar_gen3_tz_of_ops);
+ priv->thermal_init(priv, tsc);
+ rcar_gen3_thermal_calc_coefs(priv, tsc, *ths_tj_1);
+
+ zone = devm_thermal_of_zone_register(dev, i, tsc, &priv->ops);
if (IS_ERR(zone)) {
dev_err(dev, "Sensor %u: Can't register thermal zone\n", i);
ret = PTR_ERR(zone);
@@ -517,9 +527,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
}
tsc->zone = zone;
- priv->thermal_init(tsc);
- rcar_gen3_thermal_calc_coefs(priv, tsc, *ths_tj_1);
-
tsc->zone->tzp->no_hwmon = false;
ret = thermal_add_hwmon_sysfs(tsc->zone);
if (ret)
@@ -529,7 +536,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
if (ret)
goto error_unregister;
- ret = of_thermal_get_ntrips(tsc->zone);
+ ret = thermal_zone_get_num_trips(tsc->zone);
if (ret < 0)
goto error_unregister;
@@ -556,12 +563,8 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev)
for (i = 0; i < priv->num_tscs; i++) {
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
- struct thermal_zone_device *zone = tsc->zone;
- priv->thermal_init(tsc);
- if (zone->ops->set_trips)
- rcar_gen3_thermal_set_trips(zone, zone->prev_low_trip,
- zone->prev_high_trip);
+ priv->thermal_init(priv, tsc);
}
return 0;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 61c2b8855cb8..436f5f9cf729 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -278,52 +278,12 @@ static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
return rcar_thermal_get_current_temp(priv, temp);
}
-static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
- int trip, enum thermal_trip_type *type)
-{
- struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
- struct device *dev = rcar_priv_to_dev(priv);
-
- /* see rcar_thermal_get_temp() */
- switch (trip) {
- case 0: /* +90 <= temp */
- *type = THERMAL_TRIP_CRITICAL;
- break;
- default:
- dev_err(dev, "rcar driver trip error\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int rcar_thermal_get_trip_temp(struct thermal_zone_device *zone,
- int trip, int *temp)
-{
- struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
- struct device *dev = rcar_priv_to_dev(priv);
-
- /* see rcar_thermal_get_temp() */
- switch (trip) {
- case 0: /* +90 <= temp */
- *temp = MCELSIUS(90);
- break;
- default:
- dev_err(dev, "rcar driver trip error\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct thermal_zone_device_ops rcar_thermal_zone_of_ops = {
+static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
.get_temp = rcar_thermal_get_temp,
};
-static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
- .get_temp = rcar_thermal_get_temp,
- .get_trip_type = rcar_thermal_get_trip_type,
- .get_trip_temp = rcar_thermal_get_trip_temp,
+static struct thermal_trip trips[] = {
+ { .type = THERMAL_TRIP_CRITICAL, .temperature = 90000 }
};
/*
@@ -529,11 +489,10 @@ static int rcar_thermal_probe(struct platform_device *pdev)
if (chip->use_of_thermal) {
priv->zone = devm_thermal_of_zone_register(
dev, i, priv,
- &rcar_thermal_zone_of_ops);
+ &rcar_thermal_zone_ops);
} else {
- priv->zone = thermal_zone_device_register(
- "rcar_thermal",
- 1, 0, priv,
+ priv->zone = thermal_zone_device_register_with_trips(
+ "rcar_thermal", trips, ARRAY_SIZE(trips), 0, priv,
&rcar_thermal_zone_ops, NULL, 0,
idle);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 819e059cde71..4b7c43f34d1a 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -60,7 +60,7 @@ enum adc_sort_mode {
#include "thermal_hwmon.h"
-/**
+/*
* The max sensors is two in rockchip SoCs.
* Two sensors: CPU and GPU sensor.
*/
@@ -169,7 +169,7 @@ struct rockchip_thermal_data {
enum tshut_polarity tshut_polarity;
};
-/**
+/*
* TSADC Sensor Register description:
*
* TSADCV2_* are used for RK3288 SoCs, the other chips can reuse it.
@@ -1339,7 +1339,7 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
}
/**
- * Reset TSADC Controller, reset all tsadc registers.
+ * rockchip_thermal_reset_controller - Reset TSADC Controller, reset all tsadc registers.
* @reset: the reset controller of tsadc
*/
static void rockchip_thermal_reset_controller(struct reset_control *reset)
@@ -1354,7 +1354,6 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct rockchip_thermal_data *thermal;
const struct of_device_id *match;
- struct resource *res;
int irq;
int i;
int error;
@@ -1378,8 +1377,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
if (!thermal->chip)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- thermal->regs = devm_ioremap_resource(&pdev->dev, res);
+ thermal->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(thermal->regs))
return PTR_ERR(thermal->regs);
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 51874d0a284c..527d1eb0663a 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -20,11 +20,10 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/thermal.h>
#include <dt-bindings/thermal/thermal_exynos.h>
-#include "../thermal_core.h"
-
/* Exynos generic registers */
#define EXYNOS_TMU_REG_TRIMINFO 0x0
#define EXYNOS_TMU_REG_CONTROL 0x20
@@ -260,31 +259,23 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct thermal_zone_device *tzd = data->tzd;
- const struct thermal_trip * const trips =
- of_thermal_get_trip_points(tzd);
+ int num_trips = thermal_zone_get_num_trips(tzd);
unsigned int status;
- int ret = 0, temp, hyst;
-
- if (!trips) {
- dev_err(&pdev->dev,
- "Cannot get trip points from device tree!\n");
- return -ENODEV;
- }
+ int ret = 0, temp;
- if (data->soc != SOC_ARCH_EXYNOS5433) /* FIXME */
- ret = tzd->ops->get_crit_temp(tzd, &temp);
- if (ret) {
+ ret = thermal_zone_get_crit_temp(tzd, &temp);
+ if (ret && data->soc != SOC_ARCH_EXYNOS5433) { /* FIXME */
dev_err(&pdev->dev,
"No CRITICAL trip point defined in device tree!\n");
goto out;
}
- if (of_thermal_get_ntrips(tzd) > data->ntrip) {
+ if (num_trips > data->ntrip) {
dev_info(&pdev->dev,
"More trip points than supported by this TMU.\n");
dev_info(&pdev->dev,
"%d trip points should be configured in polling mode.\n",
- (of_thermal_get_ntrips(tzd) - data->ntrip));
+ num_trips - data->ntrip);
}
mutex_lock(&data->lock);
@@ -297,25 +288,22 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
ret = -EBUSY;
} else {
int i, ntrips =
- min_t(int, of_thermal_get_ntrips(tzd), data->ntrip);
+ min_t(int, num_trips, data->ntrip);
data->tmu_initialize(pdev);
/* Write temperature code for rising and falling threshold */
for (i = 0; i < ntrips; i++) {
- /* Write temperature code for rising threshold */
- ret = tzd->ops->get_trip_temp(tzd, i, &temp);
- if (ret)
- goto err;
- temp /= MCELSIUS;
- data->tmu_set_trip_temp(data, i, temp);
- /* Write temperature code for falling threshold */
- ret = tzd->ops->get_trip_hyst(tzd, i, &hyst);
+ struct thermal_trip trip;
+
+ ret = thermal_zone_get_trip(tzd, i, &trip);
if (ret)
goto err;
- hyst /= MCELSIUS;
- data->tmu_set_trip_hyst(data, i, temp, hyst);
+
+ data->tmu_set_trip_temp(data, i, trip.temperature / MCELSIUS);
+ data->tmu_set_trip_hyst(data, i, trip.temperature / MCELSIUS,
+ trip.hysteresis / MCELSIUS);
}
data->tmu_clear_irqs(data);
@@ -360,21 +348,23 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
}
static void exynos4210_tmu_set_trip_temp(struct exynos_tmu_data *data,
- int trip, u8 temp)
+ int trip_id, u8 temp)
{
- const struct thermal_trip * const trips =
- of_thermal_get_trip_points(data->tzd);
+ struct thermal_trip trip;
u8 ref, th_code;
- ref = trips[0].temperature / MCELSIUS;
+ if (thermal_zone_get_trip(data->tzd, 0, &trip))
+ return;
+
+ ref = trip.temperature / MCELSIUS;
- if (trip == 0) {
+ if (trip_id == 0) {
th_code = temp_to_code(data, ref);
writeb(th_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
}
temp -= ref;
- writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + trip * 4);
+ writeb(temp, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + trip_id * 4);
}
/* failing thresholds are not supported on Exynos4210 */
@@ -562,13 +552,14 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct thermal_zone_device *tz = data->tzd;
+ struct thermal_trip trip;
unsigned int con, interrupt_en = 0, i;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
if (on) {
for (i = 0; i < data->ntrip; i++) {
- if (!of_thermal_is_trip_valid(tz, i))
+ if (thermal_zone_get_trip(tz, i, &trip))
continue;
interrupt_en |=
@@ -592,13 +583,14 @@ static void exynos5433_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct thermal_zone_device *tz = data->tzd;
+ struct thermal_trip trip;
unsigned int con, interrupt_en = 0, pd_det_en, i;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
if (on) {
for (i = 0; i < data->ntrip; i++) {
- if (!of_thermal_is_trip_valid(tz, i))
+ if (thermal_zone_get_trip(tz, i, &trip))
continue;
interrupt_en |=
@@ -623,13 +615,14 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
{
struct exynos_tmu_data *data = platform_get_drvdata(pdev);
struct thermal_zone_device *tz = data->tzd;
+ struct thermal_trip trip;
unsigned int con, interrupt_en = 0, i;
con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
if (on) {
for (i = 0; i < data->ntrip; i++) {
- if (!of_thermal_is_trip_valid(tz, i))
+ if (thermal_zone_get_trip(tz, i, &trip))
continue;
interrupt_en |=
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index ee33ed692e4f..6a722b10d738 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -91,7 +91,6 @@ static int spear_thermal_probe(struct platform_device *pdev)
struct thermal_zone_device *spear_thermal = NULL;
struct spear_thermal_dev *stdev;
struct device_node *np = pdev->dev.of_node;
- struct resource *res;
int ret = 0, val;
if (!np || !of_property_read_u32(np, "st,thermal-flags", &val)) {
@@ -104,8 +103,7 @@ static int spear_thermal_probe(struct platform_device *pdev)
return -ENOMEM;
/* Enable thermal sensor */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- stdev->thermal_base = devm_ioremap_resource(&pdev->dev, res);
+ stdev->thermal_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(stdev->thermal_base))
return PTR_ERR(stdev->thermal_base);
diff --git a/drivers/thermal/st/Kconfig b/drivers/thermal/st/Kconfig
index 58ece381956b..ecbdf4ef00f4 100644
--- a/drivers/thermal/st/Kconfig
+++ b/drivers/thermal/st/Kconfig
@@ -8,10 +8,6 @@ config ST_THERMAL
help
Support for thermal sensors on STMicroelectronics STi series of SoCs.
-config ST_THERMAL_SYSCFG
- select ST_THERMAL
- tristate "STi series syscfg register access based thermal sensors"
-
config ST_THERMAL_MEMMAP
select ST_THERMAL
tristate "STi series memory mapped access based thermal sensors"
diff --git a/drivers/thermal/st/Makefile b/drivers/thermal/st/Makefile
index c4cfa3c4a660..9bb0342b77f4 100644
--- a/drivers/thermal/st/Makefile
+++ b/drivers/thermal/st/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_ST_THERMAL) := st_thermal.o
-obj-$(CONFIG_ST_THERMAL_SYSCFG) += st_thermal_syscfg.o
obj-$(CONFIG_ST_THERMAL_MEMMAP) += st_thermal_memmap.o
obj-$(CONFIG_STM32_THERMAL) += stm_thermal.o
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index 1276b95604fe..1009f08e64e3 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -134,48 +134,12 @@ static int st_thermal_get_temp(struct thermal_zone_device *th, int *temperature)
return 0;
}
-static int st_thermal_get_trip_type(struct thermal_zone_device *th,
- int trip, enum thermal_trip_type *type)
-{
- struct st_thermal_sensor *sensor = th->devdata;
- struct device *dev = sensor->dev;
-
- switch (trip) {
- case 0:
- *type = THERMAL_TRIP_CRITICAL;
- break;
- default:
- dev_err(dev, "invalid trip point\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int st_thermal_get_trip_temp(struct thermal_zone_device *th,
- int trip, int *temp)
-{
- struct st_thermal_sensor *sensor = th->devdata;
- struct device *dev = sensor->dev;
-
- switch (trip) {
- case 0:
- *temp = mcelsius(sensor->cdata->crit_temp);
- break;
- default:
- dev_err(dev, "Invalid trip point\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
static struct thermal_zone_device_ops st_tz_ops = {
.get_temp = st_thermal_get_temp,
- .get_trip_type = st_thermal_get_trip_type,
- .get_trip_temp = st_thermal_get_trip_temp,
};
+static struct thermal_trip trip;
+
int st_thermal_register(struct platform_device *pdev,
const struct of_device_id *st_thermal_of_match)
{
@@ -238,9 +202,12 @@ int st_thermal_register(struct platform_device *pdev,
polling_delay = sensor->ops->register_enable_irq ? 0 : 1000;
+ trip.temperature = sensor->cdata->crit_temp;
+ trip.type = THERMAL_TRIP_CRITICAL;
+
sensor->thermal_dev =
- thermal_zone_device_register(dev_name(dev), 1, 0, sensor,
- &st_tz_ops, NULL, 0, polling_delay);
+ thermal_zone_device_register_with_trips(dev_name(dev), &trip, 1, 0, sensor,
+ &st_tz_ops, NULL, 0, polling_delay);
if (IS_ERR(sensor->thermal_dev)) {
dev_err(dev, "failed to register thermal zone device\n");
ret = PTR_ERR(sensor->thermal_dev);
diff --git a/drivers/thermal/st/st_thermal_syscfg.c b/drivers/thermal/st/st_thermal_syscfg.c
deleted file mode 100644
index 94efecf35cf8..000000000000
--- a/drivers/thermal/st/st_thermal_syscfg.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * ST Thermal Sensor Driver for syscfg based sensors.
- * Author: Ajit Pal Singh <ajitpal.singh@st.com>
- *
- * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
- */
-
-#include <linux/of.h>
-#include <linux/module.h>
-#include <linux/mfd/syscon.h>
-
-#include "st_thermal.h"
-
-/* STiH415 */
-#define STIH415_SYSCFG_FRONT(num) ((num - 100) * 4)
-#define STIH415_SAS_THSENS_CONF STIH415_SYSCFG_FRONT(178)
-#define STIH415_SAS_THSENS_STATUS STIH415_SYSCFG_FRONT(198)
-#define STIH415_SYSCFG_MPE(num) ((num - 600) * 4)
-#define STIH415_MPE_THSENS_CONF STIH415_SYSCFG_MPE(607)
-#define STIH415_MPE_THSENS_STATUS STIH415_SYSCFG_MPE(667)
-
-/* STiH416 */
-#define STIH416_SYSCFG_FRONT(num) ((num - 1000) * 4)
-#define STIH416_SAS_THSENS_CONF STIH416_SYSCFG_FRONT(1552)
-#define STIH416_SAS_THSENS_STATUS1 STIH416_SYSCFG_FRONT(1554)
-#define STIH416_SAS_THSENS_STATUS2 STIH416_SYSCFG_FRONT(1594)
-
-/* STiD127 */
-#define STID127_SYSCFG_CPU(num) ((num - 700) * 4)
-#define STID127_THSENS_CONF STID127_SYSCFG_CPU(743)
-#define STID127_THSENS_STATUS STID127_SYSCFG_CPU(767)
-
-static const struct reg_field st_415sas_regfields[MAX_REGFIELDS] = {
- [TEMP_PWR] = REG_FIELD(STIH415_SAS_THSENS_CONF, 9, 9),
- [DCORRECT] = REG_FIELD(STIH415_SAS_THSENS_CONF, 4, 8),
- [OVERFLOW] = REG_FIELD(STIH415_SAS_THSENS_STATUS, 8, 8),
- [DATA] = REG_FIELD(STIH415_SAS_THSENS_STATUS, 10, 16),
-};
-
-static const struct reg_field st_415mpe_regfields[MAX_REGFIELDS] = {
- [TEMP_PWR] = REG_FIELD(STIH415_MPE_THSENS_CONF, 8, 8),
- [DCORRECT] = REG_FIELD(STIH415_MPE_THSENS_CONF, 3, 7),
- [OVERFLOW] = REG_FIELD(STIH415_MPE_THSENS_STATUS, 9, 9),
- [DATA] = REG_FIELD(STIH415_MPE_THSENS_STATUS, 11, 18),
-};
-
-static const struct reg_field st_416sas_regfields[MAX_REGFIELDS] = {
- [TEMP_PWR] = REG_FIELD(STIH416_SAS_THSENS_CONF, 9, 9),
- [DCORRECT] = REG_FIELD(STIH416_SAS_THSENS_CONF, 4, 8),
- [OVERFLOW] = REG_FIELD(STIH416_SAS_THSENS_STATUS1, 8, 8),
- [DATA] = REG_FIELD(STIH416_SAS_THSENS_STATUS2, 10, 16),
-};
-
-static const struct reg_field st_127_regfields[MAX_REGFIELDS] = {
- [TEMP_PWR] = REG_FIELD(STID127_THSENS_CONF, 7, 7),
- [DCORRECT] = REG_FIELD(STID127_THSENS_CONF, 2, 6),
- [OVERFLOW] = REG_FIELD(STID127_THSENS_STATUS, 9, 9),
- [DATA] = REG_FIELD(STID127_THSENS_STATUS, 11, 18),
-};
-
-/* Private OPs for System Configuration Register based thermal sensors */
-static int st_syscfg_power_ctrl(struct st_thermal_sensor *sensor,
- enum st_thermal_power_state power_state)
-{
- return regmap_field_write(sensor->pwr, power_state);
-}
-
-static int st_syscfg_alloc_regfields(struct st_thermal_sensor *sensor)
-{
- struct device *dev = sensor->dev;
-
- sensor->pwr = devm_regmap_field_alloc(dev, sensor->regmap,
- sensor->cdata->reg_fields[TEMP_PWR]);
-
- if (IS_ERR(sensor->pwr)) {
- dev_err(dev, "failed to alloc syscfg regfields\n");
- return PTR_ERR(sensor->pwr);
- }
-
- return 0;
-}
-
-static int st_syscfg_regmap_init(struct st_thermal_sensor *sensor)
-{
- sensor->regmap =
- syscon_regmap_lookup_by_compatible(sensor->cdata->sys_compat);
- if (IS_ERR(sensor->regmap)) {
- dev_err(sensor->dev, "failed to find syscfg regmap\n");
- return PTR_ERR(sensor->regmap);
- }
-
- return 0;
-}
-
-static const struct st_thermal_sensor_ops st_syscfg_sensor_ops = {
- .power_ctrl = st_syscfg_power_ctrl,
- .alloc_regfields = st_syscfg_alloc_regfields,
- .regmap_init = st_syscfg_regmap_init,
-};
-
-/* Compatible device data for stih415 sas thermal sensor */
-static const struct st_thermal_compat_data st_415sas_cdata = {
- .sys_compat = "st,stih415-front-syscfg",
- .reg_fields = st_415sas_regfields,
- .ops = &st_syscfg_sensor_ops,
- .calibration_val = 16,
- .temp_adjust_val = 20,
- .crit_temp = 120,
-};
-
-/* Compatible device data for stih415 mpe thermal sensor */
-static const struct st_thermal_compat_data st_415mpe_cdata = {
- .sys_compat = "st,stih415-system-syscfg",
- .reg_fields = st_415mpe_regfields,
- .ops = &st_syscfg_sensor_ops,
- .calibration_val = 16,
- .temp_adjust_val = -103,
- .crit_temp = 120,
-};
-
-/* Compatible device data for stih416 sas thermal sensor */
-static const struct st_thermal_compat_data st_416sas_cdata = {
- .sys_compat = "st,stih416-front-syscfg",
- .reg_fields = st_416sas_regfields,
- .ops = &st_syscfg_sensor_ops,
- .calibration_val = 16,
- .temp_adjust_val = 20,
- .crit_temp = 120,
-};
-
-/* Compatible device data for stid127 thermal sensor */
-static const struct st_thermal_compat_data st_127_cdata = {
- .sys_compat = "st,stid127-cpu-syscfg",
- .reg_fields = st_127_regfields,
- .ops = &st_syscfg_sensor_ops,
- .calibration_val = 8,
- .temp_adjust_val = -103,
- .crit_temp = 120,
-};
-
-static const struct of_device_id st_syscfg_thermal_of_match[] = {
- { .compatible = "st,stih415-sas-thermal", .data = &st_415sas_cdata },
- { .compatible = "st,stih415-mpe-thermal", .data = &st_415mpe_cdata },
- { .compatible = "st,stih416-sas-thermal", .data = &st_416sas_cdata },
- { .compatible = "st,stid127-thermal", .data = &st_127_cdata },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, st_syscfg_thermal_of_match);
-
-static int st_syscfg_probe(struct platform_device *pdev)
-{
- return st_thermal_register(pdev, st_syscfg_thermal_of_match);
-}
-
-static int st_syscfg_remove(struct platform_device *pdev)
-{
- return st_thermal_unregister(pdev);
-}
-
-static struct platform_driver st_syscfg_thermal_driver = {
- .driver = {
- .name = "st_syscfg_thermal",
- .pm = &st_thermal_pm_ops,
- .of_match_table = st_syscfg_thermal_of_match,
- },
- .probe = st_syscfg_probe,
- .remove = st_syscfg_remove,
-};
-module_platform_driver(st_syscfg_thermal_driver);
-
-MODULE_AUTHOR("STMicroelectronics (R&D) Limited <ajitpal.singh@st.com>");
-MODULE_DESCRIPTION("STMicroelectronics STi SoC Thermal Sensor Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index e7834ccc7976..735401958f01 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -19,7 +19,6 @@
#include <linux/platform_device.h>
#include <linux/thermal.h>
-#include "../thermal_core.h"
#include "../thermal_hwmon.h"
/* DTS register offsets */
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index e64d06d1328c..497beac63e5d 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -210,7 +210,7 @@ static int sun8i_h3_ths_calibrate(struct ths_device *tmdev,
regmap_update_bits(tmdev->regmap,
SUN8I_THS_TEMP_CALIB + (4 * (i >> 1)),
- 0xfff << offset,
+ TEMP_CALIB_MASK << offset,
caldata[i] << offset);
}
@@ -271,7 +271,7 @@ static int sun50i_h6_ths_calibrate(struct ths_device *tmdev,
offset = (i % 2) * 16;
regmap_update_bits(tmdev->regmap,
SUN50I_H6_THS_TEMP_CALIB + (i / 2 * 4),
- 0xfff << offset,
+ TEMP_CALIB_MASK << offset,
cdata << offset);
}
diff --git a/drivers/thermal/tegra/soctherm.c b/drivers/thermal/tegra/soctherm.c
index 1efe470f31e9..220873298d77 100644
--- a/drivers/thermal/tegra/soctherm.c
+++ b/drivers/thermal/tegra/soctherm.c
@@ -582,23 +582,23 @@ static int tsensor_group_thermtrip_get(struct tegra_soctherm *ts, int id)
return temp;
}
-static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip, int temp)
+static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip_id, int temp)
{
struct tegra_thermctl_zone *zone = tz->devdata;
struct tegra_soctherm *ts = zone->ts;
+ struct thermal_trip trip;
const struct tegra_tsensor_group *sg = zone->sg;
struct device *dev = zone->dev;
- enum thermal_trip_type type;
int ret;
if (!tz)
return -EINVAL;
- ret = tz->ops->get_trip_type(tz, trip, &type);
+ ret = __thermal_zone_get_trip(tz, trip_id, &trip);
if (ret)
return ret;
- if (type == THERMAL_TRIP_CRITICAL) {
+ if (trip.type == THERMAL_TRIP_CRITICAL) {
/*
* If thermtrips property is set in DT,
* doesn't need to program critical type trip to HW,
@@ -609,7 +609,7 @@ static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip
else
return 0;
- } else if (type == THERMAL_TRIP_HOT) {
+ } else if (trip.type == THERMAL_TRIP_HOT) {
int i;
for (i = 0; i < THROTTLE_SIZE; i++) {
@@ -620,7 +620,7 @@ static int tegra_thermctl_set_trip_temp(struct thermal_zone_device *tz, int trip
continue;
cdev = ts->throt_cfgs[i].cdev;
- if (get_thermal_instance(tz, cdev, trip))
+ if (get_thermal_instance(tz, cdev, trip_id))
stc = find_throttle_cfg_by_name(ts, cdev->type);
else
continue;
@@ -687,25 +687,20 @@ static const struct thermal_zone_device_ops tegra_of_thermal_ops = {
.set_trips = tegra_thermctl_set_trips,
};
-static int get_hot_temp(struct thermal_zone_device *tz, int *trip, int *temp)
+static int get_hot_temp(struct thermal_zone_device *tz, int *trip_id, int *temp)
{
- int ntrips, i, ret;
- enum thermal_trip_type type;
+ int i, ret;
+ struct thermal_trip trip;
- ntrips = of_thermal_get_ntrips(tz);
- if (ntrips <= 0)
- return -EINVAL;
+ for (i = 0; i < thermal_zone_get_num_trips(tz); i++) {
- for (i = 0; i < ntrips; i++) {
- ret = tz->ops->get_trip_type(tz, i, &type);
+ ret = thermal_zone_get_trip(tz, i, &trip);
if (ret)
return -EINVAL;
- if (type == THERMAL_TRIP_HOT) {
- ret = tz->ops->get_trip_temp(tz, i, temp);
- if (!ret)
- *trip = i;
- return ret;
+ if (trip.type == THERMAL_TRIP_HOT) {
+ *trip_id = i;
+ return 0;
}
}
@@ -747,7 +742,7 @@ static int tegra_soctherm_set_hwtrips(struct device *dev,
/* Get thermtrips. If missing, try to get critical trips. */
temperature = tsensor_group_thermtrip_get(ts, sg->id);
if (min_low_temp == temperature)
- if (tz->ops->get_crit_temp(tz, &temperature))
+ if (thermal_zone_get_crit_temp(tz, &temperature))
temperature = max_high_temp;
ret = thermtrip_program(dev, sg, temperature);
diff --git a/drivers/thermal/tegra/tegra30-tsensor.c b/drivers/thermal/tegra/tegra30-tsensor.c
index c34501287e96..b3218b71b6d9 100644
--- a/drivers/thermal/tegra/tegra30-tsensor.c
+++ b/drivers/thermal/tegra/tegra30-tsensor.c
@@ -28,7 +28,6 @@
#include <soc/tegra/fuse.h>
-#include "../thermal_core.h"
#include "../thermal_hwmon.h"
#define TSENSOR_SENSOR0_CONFIG0 0x0
@@ -316,18 +315,17 @@ static void tegra_tsensor_get_hw_channel_trips(struct thermal_zone_device *tzd,
*hot_trip = 85000;
*crit_trip = 90000;
- for (i = 0; i < tzd->num_trips; i++) {
- enum thermal_trip_type type;
- int trip_temp;
+ for (i = 0; i < thermal_zone_get_num_trips(tzd); i++) {
- tzd->ops->get_trip_temp(tzd, i, &trip_temp);
- tzd->ops->get_trip_type(tzd, i, &type);
+ struct thermal_trip trip;
- if (type == THERMAL_TRIP_HOT)
- *hot_trip = trip_temp;
+ thermal_zone_get_trip(tzd, i, &trip);
- if (type == THERMAL_TRIP_CRITICAL)
- *crit_trip = trip_temp;
+ if (trip.type == THERMAL_TRIP_HOT)
+ *hot_trip = trip.temperature;
+
+ if (trip.type == THERMAL_TRIP_CRITICAL)
+ *crit_trip = trip.temperature;
}
/* clamp hardware trips to the calibration limits */
diff --git a/drivers/thermal/thermal_acpi.c b/drivers/thermal/thermal_acpi.c
new file mode 100644
index 000000000000..0e5698818f69
--- /dev/null
+++ b/drivers/thermal/thermal_acpi.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2023 Linaro Limited
+ * Copyright 2023 Intel Corporation
+ *
+ * Library routines for populating a generic thermal trip point structure
+ * with data obtained by evaluating a specific object in the ACPI Namespace.
+ */
+#include <linux/acpi.h>
+#include <linux/units.h>
+
+#include "thermal_core.h"
+
+/*
+ * Minimum temperature for full military grade is 218°K (-55°C) and
+ * max temperature is 448°K (175°C). We can consider those values as
+ * the boundaries for the [trips] temperature returned by the
+ * firmware. Any values out of these boundaries may be considered
+ * bogus and we can assume the firmware has no data to provide.
+ */
+#define TEMP_MIN_DECIK 2180
+#define TEMP_MAX_DECIK 4480
+
+static int thermal_acpi_trip_temp(struct acpi_device *adev, char *obj_name,
+ int *ret_temp)
+{
+ unsigned long long temp;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(adev->handle, obj_name, NULL, &temp);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_debug(adev->handle, "%s evaluation failed\n", obj_name);
+ return -ENODATA;
+ }
+
+ if (temp >= TEMP_MIN_DECIK && temp <= TEMP_MAX_DECIK) {
+ *ret_temp = deci_kelvin_to_millicelsius(temp);
+ } else {
+ acpi_handle_debug(adev->handle, "%s result %llu out of range\n",
+ obj_name, temp);
+ *ret_temp = THERMAL_TEMP_INVALID;
+ }
+
+ return 0;
+}
+
+/**
+ * thermal_acpi_active_trip_temp - Retrieve active trip point temperature
+ * @adev: Target thermal zone ACPI device object.
+ * @id: Active cooling level (0 - 9).
+ * @ret_temp: Address to store the retrieved temperature value on success.
+ *
+ * Evaluate the _ACx object for the thermal zone represented by @adev to obtain
+ * the temperature of the active cooling trip point corresponding to the active
+ * cooling level given by @id.
+ *
+ * Return 0 on success or a negative error value on failure.
+ */
+int thermal_acpi_active_trip_temp(struct acpi_device *adev, int id, int *ret_temp)
+{
+ char obj_name[] = {'_', 'A', 'C', '0' + id, '\0'};
+
+ if (id < 0 || id > 9)
+ return -EINVAL;
+
+ return thermal_acpi_trip_temp(adev, obj_name, ret_temp);
+}
+EXPORT_SYMBOL_GPL(thermal_acpi_active_trip_temp);
+
+/**
+ * thermal_acpi_passive_trip_temp - Retrieve passive trip point temperature
+ * @adev: Target thermal zone ACPI device object.
+ * @ret_temp: Address to store the retrieved temperature value on success.
+ *
+ * Evaluate the _PSV object for the thermal zone represented by @adev to obtain
+ * the temperature of the passive cooling trip point.
+ *
+ * Return 0 on success or -ENODATA on failure.
+ */
+int thermal_acpi_passive_trip_temp(struct acpi_device *adev, int *ret_temp)
+{
+ return thermal_acpi_trip_temp(adev, "_PSV", ret_temp);
+}
+EXPORT_SYMBOL_GPL(thermal_acpi_passive_trip_temp);
+
+/**
+ * thermal_acpi_hot_trip_temp - Retrieve hot trip point temperature
+ * @adev: Target thermal zone ACPI device object.
+ * @ret_temp: Address to store the retrieved temperature value on success.
+ *
+ * Evaluate the _HOT object for the thermal zone represented by @adev to obtain
+ * the temperature of the trip point at which the system is expected to be put
+ * into the S4 sleep state.
+ *
+ * Return 0 on success or -ENODATA on failure.
+ */
+int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp)
+{
+ return thermal_acpi_trip_temp(adev, "_HOT", ret_temp);
+}
+EXPORT_SYMBOL_GPL(thermal_acpi_hot_trip_temp);
+
+/**
+ * thermal_acpi_critical_trip_temp - Retrieve critical trip point temperature
+ * @adev: Target thermal zone ACPI device object.
+ * @ret_temp: Address to store the retrieved temperature value on success.
+ *
+ * Evaluate the _CRT object for the thermal zone represented by @adev to obtain
+ * the temperature of the critical cooling trip point.
+ *
+ * Return 0 on success or -ENODATA on failure.
+ */
+int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp)
+{
+ return thermal_acpi_trip_temp(adev, "_CRT", ret_temp);
+}
+EXPORT_SYMBOL_GPL(thermal_acpi_critical_trip_temp);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 77bd47d976a2..55679fd86505 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -229,10 +229,9 @@ int thermal_build_list_of_policies(char *buf)
mutex_lock(&thermal_governor_lock);
list_for_each_entry(pos, &thermal_governor_list, governor_list) {
- count += scnprintf(buf + count, PAGE_SIZE - count, "%s ",
- pos->name);
+ count += sysfs_emit_at(buf, count, "%s ", pos->name);
}
- count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
+ count += sysfs_emit_at(buf, count, "\n");
mutex_unlock(&thermal_governor_lock);
@@ -344,35 +343,31 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
tz->ops->critical(tz);
}
-static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
+static void handle_thermal_trip(struct thermal_zone_device *tz, int trip_id)
{
- enum thermal_trip_type type;
- int trip_temp, hyst = 0;
+ struct thermal_trip trip;
/* Ignore disabled trip points */
- if (test_bit(trip, &tz->trips_disabled))
+ if (test_bit(trip_id, &tz->trips_disabled))
return;
- tz->ops->get_trip_temp(tz, trip, &trip_temp);
- tz->ops->get_trip_type(tz, trip, &type);
- if (tz->ops->get_trip_hyst)
- tz->ops->get_trip_hyst(tz, trip, &hyst);
+ __thermal_zone_get_trip(tz, trip_id, &trip);
if (tz->last_temperature != THERMAL_TEMP_INVALID) {
- if (tz->last_temperature < trip_temp &&
- tz->temperature >= trip_temp)
- thermal_notify_tz_trip_up(tz->id, trip,
+ if (tz->last_temperature < trip.temperature &&
+ tz->temperature >= trip.temperature)
+ thermal_notify_tz_trip_up(tz->id, trip_id,
tz->temperature);
- if (tz->last_temperature >= trip_temp &&
- tz->temperature < (trip_temp - hyst))
- thermal_notify_tz_trip_down(tz->id, trip,
+ if (tz->last_temperature >= trip.temperature &&
+ tz->temperature < (trip.temperature - trip.hysteresis))
+ thermal_notify_tz_trip_down(tz->id, trip_id,
tz->temperature);
}
- if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
- handle_critical_trips(tz, trip, trip_temp, type);
+ if (trip.type == THERMAL_TRIP_CRITICAL || trip.type == THERMAL_TRIP_HOT)
+ handle_critical_trips(tz, trip_id, trip.temperature, trip.type);
else
- handle_non_critical_trips(tz, trip);
+ handle_non_critical_trips(tz, trip_id);
}
static void update_temperature(struct thermal_zone_device *tz)
@@ -774,14 +769,14 @@ static void thermal_release(struct device *dev)
} else if (!strncmp(dev_name(dev), "cooling_device",
sizeof("cooling_device") - 1)) {
cdev = to_cooling_device(dev);
+ thermal_cooling_device_destroy_sysfs(cdev);
+ kfree(cdev->type);
+ ida_free(&thermal_cdev_ida, cdev->id);
kfree(cdev);
}
}
-static struct class thermal_class = {
- .name = "thermal",
- .dev_release = thermal_release,
-};
+static struct class *thermal_class;
static inline
void print_bind_err_msg(struct thermal_zone_device *tz,
@@ -884,6 +879,9 @@ __thermal_cooling_device_register(struct device_node *np,
!ops->set_cur_state)
return ERR_PTR(-EINVAL);
+ if (!thermal_class)
+ return ERR_PTR(-ENODEV);
+
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
return ERR_PTR(-ENOMEM);
@@ -905,27 +903,25 @@ __thermal_cooling_device_register(struct device_node *np,
cdev->np = np;
cdev->ops = ops;
cdev->updated = false;
- cdev->device.class = &thermal_class;
+ cdev->device.class = thermal_class;
cdev->devdata = devdata;
ret = cdev->ops->get_max_state(cdev, &cdev->max_state);
- if (ret) {
- kfree(cdev->type);
- goto out_ida_remove;
- }
+ if (ret)
+ goto out_cdev_type;
thermal_cooling_device_setup_sysfs(cdev);
ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
- if (ret) {
- kfree(cdev->type);
- thermal_cooling_device_destroy_sysfs(cdev);
- goto out_ida_remove;
- }
+ if (ret)
+ goto out_cooling_dev;
ret = device_register(&cdev->device);
- if (ret)
- goto out_kfree_type;
+ if (ret) {
+ /* thermal_release() handles rest of the cleanup */
+ put_device(&cdev->device);
+ return ERR_PTR(ret);
+ }
/* Add 'this' new cdev to the global cdev list */
mutex_lock(&thermal_list_lock);
@@ -944,13 +940,10 @@ __thermal_cooling_device_register(struct device_node *np,
return cdev;
-out_kfree_type:
+out_cooling_dev:
thermal_cooling_device_destroy_sysfs(cdev);
+out_cdev_type:
kfree(cdev->type);
- put_device(&cdev->device);
-
- /* thermal_release() takes care of the rest */
- cdev = NULL;
out_ida_remove:
ida_free(&thermal_cdev_ida, id);
out_kfree_cdev:
@@ -1111,11 +1104,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev)
mutex_unlock(&thermal_list_lock);
- ida_free(&thermal_cdev_ida, cdev->id);
- device_del(&cdev->device);
- thermal_cooling_device_destroy_sysfs(cdev);
- kfree(cdev->type);
- put_device(&cdev->device);
+ device_unregister(&cdev->device);
}
EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister);
@@ -1166,6 +1155,32 @@ static void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms
*delay_jiffies = round_jiffies(*delay_jiffies);
}
+int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp)
+{
+ int i, ret = -EINVAL;
+
+ if (tz->ops->get_crit_temp)
+ return tz->ops->get_crit_temp(tz, temp);
+
+ if (!tz->trips)
+ return -EINVAL;
+
+ mutex_lock(&tz->lock);
+
+ for (i = 0; i < tz->num_trips; i++) {
+ if (tz->trips[i].type == THERMAL_TRIP_CRITICAL) {
+ *temp = tz->trips[i].temperature;
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&tz->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_crit_temp);
+
/**
* thermal_zone_device_register_with_trips() - register a new thermal zone device
* @type: the thermal zone device type
@@ -1198,8 +1213,6 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
int polling_delay)
{
struct thermal_zone_device *tz;
- enum thermal_trip_type trip_type;
- int trip_temp;
int id;
int result;
int count;
@@ -1239,9 +1252,12 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
return ERR_PTR(-EINVAL);
}
- if (num_trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp))
+ if (num_trips > 0 && (!ops->get_trip_type || !ops->get_trip_temp) && !trips)
return ERR_PTR(-EINVAL);
+ if (!thermal_class)
+ return ERR_PTR(-ENODEV);
+
tz = kzalloc(sizeof(*tz), GFP_KERNEL);
if (!tz)
return ERR_PTR(-ENOMEM);
@@ -1263,7 +1279,7 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
tz->ops = ops;
tz->tzp = tzp;
- tz->device.class = &thermal_class;
+ tz->device.class = thermal_class;
tz->devdata = devdata;
tz->trips = trips;
tz->num_trips = num_trips;
@@ -1290,9 +1306,10 @@ thermal_zone_device_register_with_trips(const char *type, struct thermal_trip *t
goto release_device;
for (count = 0; count < num_trips; count++) {
- if (tz->ops->get_trip_type(tz, count, &trip_type) ||
- tz->ops->get_trip_temp(tz, count, &trip_temp) ||
- !trip_temp)
+ struct thermal_trip trip;
+
+ result = thermal_zone_get_trip(tz, count, &trip);
+ if (result)
set_bit(count, &tz->trips_disabled);
}
@@ -1505,11 +1522,23 @@ static int __init thermal_init(void)
result = thermal_register_governors();
if (result)
- goto error;
+ goto unregister_netlink;
- result = class_register(&thermal_class);
- if (result)
+ thermal_class = kzalloc(sizeof(*thermal_class), GFP_KERNEL);
+ if (!thermal_class) {
+ result = -ENOMEM;
goto unregister_governors;
+ }
+
+ thermal_class->name = "thermal";
+ thermal_class->dev_release = thermal_release;
+
+ result = class_register(thermal_class);
+ if (result) {
+ kfree(thermal_class);
+ thermal_class = NULL;
+ goto unregister_governors;
+ }
result = register_pm_notifier(&thermal_pm_nb);
if (result)
@@ -1520,9 +1549,9 @@ static int __init thermal_init(void)
unregister_governors:
thermal_unregister_governors();
+unregister_netlink:
+ thermal_netlink_exit();
error:
- ida_destroy(&thermal_tz_ida);
- ida_destroy(&thermal_cdev_ida);
mutex_destroy(&thermal_list_lock);
mutex_destroy(&thermal_governor_lock);
return result;
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index b834cb273429..7af54382e915 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -52,6 +52,10 @@ int for_each_thermal_cooling_device(int (*cb)(struct thermal_cooling_device *,
int for_each_thermal_governor(int (*cb)(struct thermal_governor *, void *),
void *thermal_governor);
+int __for_each_thermal_trip(struct thermal_zone_device *,
+ int (*cb)(struct thermal_trip *, void *),
+ void *);
+
struct thermal_zone_device *thermal_zone_get_by_id(int id);
struct thermal_attr {
@@ -114,6 +118,8 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz,
/* Helpers */
void __thermal_zone_set_trips(struct thermal_zone_device *tz);
+int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
+ struct thermal_trip *trip);
int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp);
/* sysfs I/F */
@@ -137,28 +143,6 @@ thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
#endif /* CONFIG_THERMAL_STATISTICS */
/* device tree support */
-#ifdef CONFIG_THERMAL_OF
-int of_thermal_get_ntrips(struct thermal_zone_device *);
-bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
-const struct thermal_trip *
-of_thermal_get_trip_points(struct thermal_zone_device *);
-#else
-static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz)
-{
- return 0;
-}
-static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
- int trip)
-{
- return false;
-}
-static inline const struct thermal_trip *
-of_thermal_get_trip_points(struct thermal_zone_device *tz)
-{
- return NULL;
-}
-#endif
-
int thermal_zone_device_is_enabled(struct thermal_zone_device *tz);
#endif /* __THERMAL_CORE_H__ */
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index 56aa2e88f34f..0f648131b0b5 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -83,7 +83,7 @@ int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
int ret = -EINVAL;
int count;
int crit_temp = INT_MAX;
- enum thermal_trip_type type;
+ struct thermal_trip trip;
lockdep_assert_held(&tz->lock);
@@ -91,10 +91,9 @@ int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp)
if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) {
for (count = 0; count < tz->num_trips; count++) {
- ret = tz->ops->get_trip_type(tz, count, &type);
- if (!ret && type == THERMAL_TRIP_CRITICAL) {
- ret = tz->ops->get_trip_temp(tz, count,
- &crit_temp);
+ ret = __thermal_zone_get_trip(tz, count, &trip);
+ if (!ret && trip.type == THERMAL_TRIP_CRITICAL) {
+ crit_temp = trip.temperature;
break;
}
}
@@ -147,67 +146,6 @@ unlock:
}
EXPORT_SYMBOL_GPL(thermal_zone_get_temp);
-/**
- * __thermal_zone_set_trips - Computes the next trip points for the driver
- * @tz: a pointer to a thermal zone device structure
- *
- * The function computes the next temperature boundaries by browsing
- * the trip points. The result is the closer low and high trip points
- * to the current temperature. These values are passed to the backend
- * driver to let it set its own notification mechanism (usually an
- * interrupt).
- *
- * This function must be called with tz->lock held. Both tz and tz->ops
- * must be valid pointers.
- *
- * It does not return a value
- */
-void __thermal_zone_set_trips(struct thermal_zone_device *tz)
-{
- int low = -INT_MAX;
- int high = INT_MAX;
- int trip_temp, hysteresis;
- int i, ret;
-
- lockdep_assert_held(&tz->lock);
-
- if (!tz->ops->set_trips || !tz->ops->get_trip_hyst)
- return;
-
- for (i = 0; i < tz->num_trips; i++) {
- int trip_low;
-
- tz->ops->get_trip_temp(tz, i, &trip_temp);
- tz->ops->get_trip_hyst(tz, i, &hysteresis);
-
- trip_low = trip_temp - hysteresis;
-
- if (trip_low < tz->temperature && trip_low > low)
- low = trip_low;
-
- if (trip_temp > tz->temperature && trip_temp < high)
- high = trip_temp;
- }
-
- /* No need to change trip points */
- if (tz->prev_low_trip == low && tz->prev_high_trip == high)
- return;
-
- tz->prev_low_trip = low;
- tz->prev_high_trip = high;
-
- dev_dbg(&tz->device,
- "new temperature boundaries: %d < x < %d\n", low, high);
-
- /*
- * Set a temperature window. When this window is left the driver
- * must inform the thermal core via thermal_zone_device_update.
- */
- ret = tz->ops->set_trips(tz, low, high);
- if (ret)
- dev_err(&tz->device, "Failed to set trips: %d\n", ret);
-}
-
static void thermal_cdev_set_cur_state(struct thermal_cooling_device *cdev,
int target)
{
diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c
index 39c921415989..ea616731066c 100644
--- a/drivers/thermal/thermal_mmio.c
+++ b/drivers/thermal/thermal_mmio.c
@@ -39,7 +39,6 @@ static const struct thermal_zone_device_ops thermal_mmio_ops = {
static int thermal_mmio_probe(struct platform_device *pdev)
{
- struct resource *resource;
struct thermal_mmio *sensor;
int (*sensor_init_func)(struct platform_device *pdev,
struct thermal_mmio *sensor);
@@ -51,8 +50,7 @@ static int thermal_mmio_probe(struct platform_device *pdev)
if (!sensor)
return -ENOMEM;
- resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sensor->mmio_base = devm_ioremap_resource(&pdev->dev, resource);
+ sensor->mmio_base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
if (IS_ERR(sensor->mmio_base))
return PTR_ERR(sensor->mmio_base);
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index e2d78a996b5f..08bc46c3ec7b 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -452,7 +452,8 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
struct sk_buff *msg = p->msg;
struct thermal_zone_device *tz;
struct nlattr *start_trip;
- int i, id;
+ struct thermal_trip trip;
+ int ret, i, id;
if (!p->attrs[THERMAL_GENL_ATTR_TZ_ID])
return -EINVAL;
@@ -471,18 +472,14 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p)
for (i = 0; i < tz->num_trips; i++) {
- enum thermal_trip_type type;
- int temp, hyst = 0;
-
- tz->ops->get_trip_type(tz, i, &type);
- tz->ops->get_trip_temp(tz, i, &temp);
- if (tz->ops->get_trip_hyst)
- tz->ops->get_trip_hyst(tz, i, &hyst);
+ ret = __thermal_zone_get_trip(tz, i, &trip);
+ if (ret)
+ goto out_cancel_nest;
if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, i) ||
- nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, type) ||
- nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, temp) ||
- nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, hyst))
+ nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, trip.type) ||
+ nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TEMP, trip.temperature) ||
+ nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_HYST, trip.hysteresis))
goto out_cancel_nest;
}
@@ -702,3 +699,8 @@ int __init thermal_netlink_init(void)
{
return genl_register_family(&thermal_gnl_family);
}
+
+void __init thermal_netlink_exit(void)
+{
+ genl_unregister_family(&thermal_gnl_family);
+}
diff --git a/drivers/thermal/thermal_netlink.h b/drivers/thermal/thermal_netlink.h
index 1052f523188d..0a9987c3bc57 100644
--- a/drivers/thermal/thermal_netlink.h
+++ b/drivers/thermal/thermal_netlink.h
@@ -13,6 +13,7 @@ struct thermal_genl_cpu_caps {
/* Netlink notification function */
#ifdef CONFIG_THERMAL_NETLINK
int __init thermal_netlink_init(void);
+void __init thermal_netlink_exit(void);
int thermal_notify_tz_create(int tz_id, const char *name);
int thermal_notify_tz_delete(int tz_id);
int thermal_notify_tz_enable(int tz_id);
@@ -115,4 +116,6 @@ static inline int thermal_genl_cpu_capability_event(int count, struct thermal_ge
return 0;
}
+static inline void __init thermal_netlink_exit(void) {}
+
#endif /* CONFIG_THERMAL_NETLINK */
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index aacba30bc10c..ff4d12ef51bc 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -19,117 +19,6 @@
#include "thermal_core.h"
-/**
- * of_thermal_get_ntrips - function to export number of available trip
- * points.
- * @tz: pointer to a thermal zone
- *
- * This function is a globally visible wrapper to get number of trip points
- * stored in the local struct __thermal_zone
- *
- * Return: number of available trip points, -ENODEV when data not available
- */
-int of_thermal_get_ntrips(struct thermal_zone_device *tz)
-{
- return tz->num_trips;
-}
-EXPORT_SYMBOL_GPL(of_thermal_get_ntrips);
-
-/**
- * of_thermal_is_trip_valid - function to check if trip point is valid
- *
- * @tz: pointer to a thermal zone
- * @trip: trip point to evaluate
- *
- * This function is responsible for checking if passed trip point is valid
- *
- * Return: true if trip point is valid, false otherwise
- */
-bool of_thermal_is_trip_valid(struct thermal_zone_device *tz, int trip)
-{
- if (trip >= tz->num_trips || trip < 0)
- return false;
-
- return true;
-}
-EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
-
-/**
- * of_thermal_get_trip_points - function to get access to a globally exported
- * trip points
- *
- * @tz: pointer to a thermal zone
- *
- * This function provides a pointer to trip points table
- *
- * Return: pointer to trip points table, NULL otherwise
- */
-const struct thermal_trip *
-of_thermal_get_trip_points(struct thermal_zone_device *tz)
-{
- return tz->trips;
-}
-EXPORT_SYMBOL_GPL(of_thermal_get_trip_points);
-
-static int of_thermal_get_trip_type(struct thermal_zone_device *tz, int trip,
- enum thermal_trip_type *type)
-{
- if (trip >= tz->num_trips || trip < 0)
- return -EDOM;
-
- *type = tz->trips[trip].type;
-
- return 0;
-}
-
-static int of_thermal_get_trip_temp(struct thermal_zone_device *tz, int trip,
- int *temp)
-{
- if (trip >= tz->num_trips || trip < 0)
- return -EDOM;
-
- *temp = tz->trips[trip].temperature;
-
- return 0;
-}
-
-static int of_thermal_get_trip_hyst(struct thermal_zone_device *tz, int trip,
- int *hyst)
-{
- if (trip >= tz->num_trips || trip < 0)
- return -EDOM;
-
- *hyst = tz->trips[trip].hysteresis;
-
- return 0;
-}
-
-static int of_thermal_set_trip_hyst(struct thermal_zone_device *tz, int trip,
- int hyst)
-{
- if (trip >= tz->num_trips || trip < 0)
- return -EDOM;
-
- /* thermal framework should take care of data->mask & (1 << trip) */
- tz->trips[trip].hysteresis = hyst;
-
- return 0;
-}
-
-static int of_thermal_get_crit_temp(struct thermal_zone_device *tz,
- int *temp)
-{
- int i;
-
- for (i = 0; i < tz->num_trips; i++)
- if (tz->trips[i].type == THERMAL_TRIP_CRITICAL) {
- *temp = tz->trips[i].temperature;
- return 0;
- }
-
- return -EINVAL;
-}
-
/*** functions parsing device tree nodes ***/
static int of_find_trip_id(struct device_node *np, struct device_node *trip)
@@ -628,11 +517,6 @@ struct thermal_zone_device *thermal_of_zone_register(struct device_node *sensor,
goto out_kfree_trips;
}
- of_ops->get_trip_type = of_ops->get_trip_type ? : of_thermal_get_trip_type;
- of_ops->get_trip_temp = of_ops->get_trip_temp ? : of_thermal_get_trip_temp;
- of_ops->get_trip_hyst = of_ops->get_trip_hyst ? : of_thermal_get_trip_hyst;
- of_ops->set_trip_hyst = of_ops->set_trip_hyst ? : of_thermal_set_trip_hyst;
- of_ops->get_crit_temp = of_ops->get_crit_temp ? : of_thermal_get_crit_temp;
of_ops->bind = thermal_of_bind;
of_ops->unbind = thermal_of_unbind;
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index d97f0bc0a26b..cef860deaf91 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -83,27 +83,25 @@ trip_point_type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- enum thermal_trip_type type;
- int trip, result;
+ struct thermal_trip trip;
+ int trip_id, result;
- if (!tz->ops->get_trip_type)
- return -EPERM;
-
- if (sscanf(attr->attr.name, "trip_point_%d_type", &trip) != 1)
+ if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1)
return -EINVAL;
mutex_lock(&tz->lock);
if (device_is_registered(dev))
- result = tz->ops->get_trip_type(tz, trip, &type);
+ result = __thermal_zone_get_trip(tz, trip_id, &trip);
else
result = -ENODEV;
mutex_unlock(&tz->lock);
+
if (result)
return result;
- switch (type) {
+ switch (trip.type) {
case THERMAL_TRIP_CRITICAL:
return sprintf(buf, "critical\n");
case THERMAL_TRIP_HOT:
@@ -122,17 +120,10 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip, ret;
- int temperature, hyst = 0;
- enum thermal_trip_type type;
-
- if (!tz->ops->set_trip_temp && !tz->trips)
- return -EPERM;
-
- if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip) != 1)
- return -EINVAL;
+ struct thermal_trip trip;
+ int trip_id, ret;
- if (kstrtoint(buf, 10, &temperature))
+ if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1)
return -EINVAL;
mutex_lock(&tz->lock);
@@ -142,36 +133,19 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
goto unlock;
}
- if (tz->ops->set_trip_temp) {
- ret = tz->ops->set_trip_temp(tz, trip, temperature);
- if (ret)
- goto unlock;
- }
-
- if (tz->trips)
- tz->trips[trip].temperature = temperature;
-
- if (tz->ops->get_trip_hyst) {
- ret = tz->ops->get_trip_hyst(tz, trip, &hyst);
- if (ret)
- goto unlock;
- }
-
- ret = tz->ops->get_trip_type(tz, trip, &type);
+ ret = __thermal_zone_get_trip(tz, trip_id, &trip);
if (ret)
goto unlock;
- thermal_notify_tz_trip_change(tz->id, trip, type, temperature, hyst);
-
- __thermal_zone_device_update(tz, THERMAL_EVENT_UNSPECIFIED);
+ ret = kstrtoint(buf, 10, &trip.temperature);
+ if (ret)
+ goto unlock;
+ ret = thermal_zone_set_trip(tz, trip_id, &trip);
unlock:
mutex_unlock(&tz->lock);
-
- if (ret)
- return ret;
-
- return count;
+
+ return ret ? ret : count;
}
static ssize_t
@@ -179,19 +153,16 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip, ret;
- int temperature;
-
- if (!tz->ops->get_trip_temp)
- return -EPERM;
+ struct thermal_trip trip;
+ int trip_id, ret;
- if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip) != 1)
+ if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1)
return -EINVAL;
mutex_lock(&tz->lock);
if (device_is_registered(dev))
- ret = tz->ops->get_trip_temp(tz, trip, &temperature);
+ ret = __thermal_zone_get_trip(tz, trip_id, &trip);
else
ret = -ENODEV;
@@ -200,7 +171,7 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- return sprintf(buf, "%d\n", temperature);
+ return sprintf(buf, "%d\n", trip.temperature);
}
static ssize_t
@@ -208,16 +179,13 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip, ret;
- int temperature;
-
- if (!tz->ops->set_trip_hyst)
- return -EPERM;
+ struct thermal_trip trip;
+ int trip_id, ret;
- if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip) != 1)
+ if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
return -EINVAL;
- if (kstrtoint(buf, 10, &temperature))
+ if (kstrtoint(buf, 10, &trip.hysteresis))
return -EINVAL;
mutex_lock(&tz->lock);
@@ -227,16 +195,11 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr,
goto unlock;
}
- /*
- * We are not doing any check on the 'temperature' value
- * here. The driver implementing 'set_trip_hyst' has to
- * take care of this.
- */
- ret = tz->ops->set_trip_hyst(tz, trip, temperature);
-
- if (!ret)
- __thermal_zone_set_trips(tz);
-
+ ret = __thermal_zone_get_trip(tz, trip_id, &trip);
+ if (ret)
+ goto unlock;
+
+ ret = thermal_zone_set_trip(tz, trip_id, &trip);
unlock:
mutex_unlock(&tz->lock);
@@ -248,25 +211,22 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct thermal_zone_device *tz = to_thermal_zone(dev);
- int trip, ret;
- int temperature;
-
- if (!tz->ops->get_trip_hyst)
- return -EPERM;
+ struct thermal_trip trip;
+ int trip_id, ret;
- if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip) != 1)
+ if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1)
return -EINVAL;
mutex_lock(&tz->lock);
if (device_is_registered(dev))
- ret = tz->ops->get_trip_hyst(tz, trip, &temperature);
+ ret = __thermal_zone_get_trip(tz, trip_id, &trip);
else
ret = -ENODEV;
mutex_unlock(&tz->lock);
- return ret ? ret : sprintf(buf, "%d\n", temperature);
+ return ret ? ret : sprintf(buf, "%d\n", trip.hysteresis);
}
static ssize_t
@@ -491,23 +451,20 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
return -ENOMEM;
}
- if (tz->ops->get_trip_hyst) {
- tz->trip_hyst_attrs = kcalloc(tz->num_trips,
- sizeof(*tz->trip_hyst_attrs),
- GFP_KERNEL);
- if (!tz->trip_hyst_attrs) {
- kfree(tz->trip_type_attrs);
- kfree(tz->trip_temp_attrs);
- return -ENOMEM;
- }
+ tz->trip_hyst_attrs = kcalloc(tz->num_trips,
+ sizeof(*tz->trip_hyst_attrs),
+ GFP_KERNEL);
+ if (!tz->trip_hyst_attrs) {
+ kfree(tz->trip_type_attrs);
+ kfree(tz->trip_temp_attrs);
+ return -ENOMEM;
}
attrs = kcalloc(tz->num_trips * 3 + 1, sizeof(*attrs), GFP_KERNEL);
if (!attrs) {
kfree(tz->trip_type_attrs);
kfree(tz->trip_temp_attrs);
- if (tz->ops->get_trip_hyst)
- kfree(tz->trip_hyst_attrs);
+ kfree(tz->trip_hyst_attrs);
return -ENOMEM;
}
@@ -540,9 +497,6 @@ static int create_trip_attrs(struct thermal_zone_device *tz, int mask)
}
attrs[indx + tz->num_trips] = &tz->trip_temp_attrs[indx].attr.attr;
- /* create Optional trip hyst attribute */
- if (!tz->ops->get_trip_hyst)
- continue;
snprintf(tz->trip_hyst_attrs[indx].name, THERMAL_NAME_LENGTH,
"trip_point_%d_hyst", indx);
@@ -579,8 +533,7 @@ static void destroy_trip_attrs(struct thermal_zone_device *tz)
kfree(tz->trip_type_attrs);
kfree(tz->trip_temp_attrs);
- if (tz->ops->get_trip_hyst)
- kfree(tz->trip_hyst_attrs);
+ kfree(tz->trip_hyst_attrs);
kfree(tz->trips_attribute_group.attrs);
}
diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c
new file mode 100644
index 000000000000..907f3a4d7bc8
--- /dev/null
+++ b/drivers/thermal/thermal_trip.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2008 Intel Corp
+ * Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
+ * Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
+ * Copyright 2022 Linaro Limited
+ *
+ * Thermal trips handling
+ */
+#include "thermal_core.h"
+
+int __for_each_thermal_trip(struct thermal_zone_device *tz,
+ int (*cb)(struct thermal_trip *, void *),
+ void *data)
+{
+ int i, ret;
+ struct thermal_trip trip;
+
+ lockdep_assert_held(&tz->lock);
+
+ for (i = 0; i < tz->num_trips; i++) {
+
+ ret = __thermal_zone_get_trip(tz, i, &trip);
+ if (ret)
+ return ret;
+
+ ret = cb(&trip, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int thermal_zone_get_num_trips(struct thermal_zone_device *tz)
+{
+ return tz->num_trips;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips);
+
+/**
+ * __thermal_zone_set_trips - Computes the next trip points for the driver
+ * @tz: a pointer to a thermal zone device structure
+ *
+ * The function computes the next temperature boundaries by browsing
+ * the trip points. The result is the closer low and high trip points
+ * to the current temperature. These values are passed to the backend
+ * driver to let it set its own notification mechanism (usually an
+ * interrupt).
+ *
+ * This function must be called with tz->lock held. Both tz and tz->ops
+ * must be valid pointers.
+ *
+ * It does not return a value
+ */
+void __thermal_zone_set_trips(struct thermal_zone_device *tz)
+{
+ struct thermal_trip trip;
+ int low = -INT_MAX, high = INT_MAX;
+ int i, ret;
+
+ lockdep_assert_held(&tz->lock);
+
+ if (!tz->ops->set_trips)
+ return;
+
+ for (i = 0; i < tz->num_trips; i++) {
+ int trip_low;
+
+ ret = __thermal_zone_get_trip(tz, i , &trip);
+ if (ret)
+ return;
+
+ trip_low = trip.temperature - trip.hysteresis;
+
+ if (trip_low < tz->temperature && trip_low > low)
+ low = trip_low;
+
+ if (trip.temperature > tz->temperature &&
+ trip.temperature < high)
+ high = trip.temperature;
+ }
+
+ /* No need to change trip points */
+ if (tz->prev_low_trip == low && tz->prev_high_trip == high)
+ return;
+
+ tz->prev_low_trip = low;
+ tz->prev_high_trip = high;
+
+ dev_dbg(&tz->device,
+ "new temperature boundaries: %d < x < %d\n", low, high);
+
+ /*
+ * Set a temperature window. When this window is left the driver
+ * must inform the thermal core via thermal_zone_device_update.
+ */
+ ret = tz->ops->set_trips(tz, low, high);
+ if (ret)
+ dev_err(&tz->device, "Failed to set trips: %d\n", ret);
+}
+
+int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
+ struct thermal_trip *trip)
+{
+ int ret;
+
+ if (!tz || trip_id < 0 || trip_id >= tz->num_trips || !trip)
+ return -EINVAL;
+
+ if (tz->trips) {
+ *trip = tz->trips[trip_id];
+ return 0;
+ }
+
+ if (tz->ops->get_trip_hyst) {
+ ret = tz->ops->get_trip_hyst(tz, trip_id, &trip->hysteresis);
+ if (ret)
+ return ret;
+ } else {
+ trip->hysteresis = 0;
+ }
+
+ ret = tz->ops->get_trip_temp(tz, trip_id, &trip->temperature);
+ if (ret)
+ return ret;
+
+ return tz->ops->get_trip_type(tz, trip_id, &trip->type);
+}
+EXPORT_SYMBOL_GPL(__thermal_zone_get_trip);
+
+int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id,
+ struct thermal_trip *trip)
+{
+ int ret;
+
+ mutex_lock(&tz->lock);
+ ret = __thermal_zone_get_trip(tz, trip_id, trip);
+ mutex_unlock(&tz->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_get_trip);
+
+int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id,
+ const struct thermal_trip *trip)
+{
+ struct thermal_trip t;
+ int ret;
+
+ if (!tz->ops->set_trip_temp && !tz->ops->set_trip_hyst && !tz->trips)
+ return -EINVAL;
+
+ ret = __thermal_zone_get_trip(tz, trip_id, &t);
+ if (ret)
+ return ret;
+
+ if (t.type != trip->type)
+ return -EINVAL;
+
+ if (t.temperature != trip->temperature && tz->ops->set_trip_temp) {
+ ret = tz->ops->set_trip_temp(tz, trip_id, trip->temperature);
+ if (ret)
+ return ret;
+ }
+
+ if (t.hysteresis != trip->hysteresis && tz->ops->set_trip_hyst) {
+ ret = tz->ops->set_trip_hyst(tz, trip_id, trip->hysteresis);
+ if (ret)
+ return ret;
+ }
+
+ if (tz->trips && (t.temperature != trip->temperature || t.hysteresis != trip->hysteresis))
+ tz->trips[trip_id] = *trip;
+
+ thermal_notify_tz_trip_change(tz->id, trip_id, trip->type,
+ trip->temperature, trip->hysteresis);
+
+ __thermal_zone_device_update(tz, THERMAL_TRIP_CHANGED);
+
+ return 0;
+}
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal.h b/drivers/thermal/ti-soc-thermal/ti-thermal.h
index c388ecf31834..4fd2c20182d7 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal.h
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal.h
@@ -38,21 +38,6 @@
/* Update rates */
#define FAST_TEMP_MONITORING_RATE 250
-/* helper macros */
-/**
- * ti_thermal_get_trip_value - returns trip temperature based on index
- * @i: trip index
- */
-#define ti_thermal_get_trip_value(i) \
- (OMAP_TRIP_HOT + ((i) * OMAP_TRIP_STEP))
-
-/**
- * ti_thermal_is_valid_trip - check for trip index
- * @i: trip index
- */
-#define ti_thermal_is_valid_trip(trip) \
- ((trip) >= 0 && (trip) < OMAP_TRIP_NUMBER)
-
#ifdef CONFIG_TI_THERMAL
int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, char *domain);
int ti_thermal_remove_sensor(struct ti_bandgap *bgp, int id);
diff --git a/drivers/thermal/uniphier_thermal.c b/drivers/thermal/uniphier_thermal.c
index 4111d99ef50e..47801841b3f5 100644
--- a/drivers/thermal/uniphier_thermal.c
+++ b/drivers/thermal/uniphier_thermal.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* uniphier_thermal.c - Socionext UniPhier thermal driver
* Copyright 2014 Panasonic Corporation
* Copyright 2016-2017 Socionext Inc.
@@ -17,8 +17,6 @@
#include <linux/regmap.h>
#include <linux/thermal.h>
-#include "thermal_core.h"
-
/*
* block registers
* addresses are the offset from .block_base
@@ -248,8 +246,7 @@ static int uniphier_tm_probe(struct platform_device *pdev)
struct regmap *regmap;
struct device_node *parent;
struct uniphier_tm_dev *tdev;
- const struct thermal_trip *trips;
- int i, ret, irq, ntrips, crit_temp = INT_MAX;
+ int i, ret, irq, crit_temp = INT_MAX;
tdev = devm_kzalloc(dev, sizeof(*tdev), GFP_KERNEL);
if (!tdev)
@@ -296,20 +293,18 @@ static int uniphier_tm_probe(struct platform_device *pdev)
return PTR_ERR(tdev->tz_dev);
}
- /* get trip points */
- trips = of_thermal_get_trip_points(tdev->tz_dev);
- ntrips = of_thermal_get_ntrips(tdev->tz_dev);
- if (ntrips > ALERT_CH_NUM) {
- dev_err(dev, "thermal zone has too many trips\n");
- return -E2BIG;
- }
-
/* set alert temperatures */
- for (i = 0; i < ntrips; i++) {
- if (trips[i].type == THERMAL_TRIP_CRITICAL &&
- trips[i].temperature < crit_temp)
- crit_temp = trips[i].temperature;
- uniphier_tm_set_alert(tdev, i, trips[i].temperature);
+ for (i = 0; i < thermal_zone_get_num_trips(tdev->tz_dev); i++) {
+ struct thermal_trip trip;
+
+ ret = thermal_zone_get_trip(tdev->tz_dev, i, &trip);
+ if (ret)
+ return ret;
+
+ if (trip.type == THERMAL_TRIP_CRITICAL &&
+ trip.temperature < crit_temp)
+ crit_temp = trip.temperature;
+ uniphier_tm_set_alert(tdev, i, trip.temperature);
tdev->alert_en[i] = true;
}
if (crit_temp > CRITICAL_TEMP_LIMIT) {
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index 317e4f5fdb97..628225deb8fe 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -36,16 +36,13 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
* We need to do this because the xHCI driver might not yet be
* bound so the USB3 SuperSpeed ports are not yet created.
*/
- dev = acpi_get_first_physical_node(adev);
- while (!dev) {
- adev = acpi_dev_parent(adev);
- if (!adev)
- break;
+ do {
dev = acpi_get_first_physical_node(adev);
- }
+ if (dev)
+ break;
- if (!dev)
- goto out_put;
+ adev = acpi_dev_parent(adev);
+ } while (adev);
/*
* Check that the device is PCIe. This is because USB3
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 0c661a706160..6e7d28e8d81a 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -230,7 +230,6 @@ static int check_config_address(struct tb_cfg_address addr,
static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
{
struct cfg_error_pkg *pkg = response->buffer;
- struct tb_ctl *ctl = response->ctl;
struct tb_cfg_result res = { 0 };
res.response_route = tb_cfg_get_route(&pkg->header);
res.response_port = 0;
@@ -239,13 +238,6 @@ static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
if (res.err)
return res;
- if (pkg->zero1)
- tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1);
- if (pkg->zero2)
- tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2);
- if (pkg->zero3)
- tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3);
-
res.err = 1;
res.tb_error = pkg->error;
res.response_port = pkg->port;
@@ -416,6 +408,7 @@ static int tb_async_error(const struct ctl_pkg *pkg)
case TB_CFG_ERROR_LINK_ERROR:
case TB_CFG_ERROR_HEC_ERROR_DETECTED:
case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
+ case TB_CFG_ERROR_DP_BW:
return true;
default:
@@ -736,6 +729,47 @@ void tb_ctl_stop(struct tb_ctl *ctl)
/* public interface, commands */
/**
+ * tb_cfg_ack_notification() - Ack notification
+ * @ctl: Control channel to use
+ * @route: Router that originated the event
+ * @error: Pointer to the notification package
+ *
+ * Call this as response for non-plug notification to ack it. Returns
+ * %0 on success or an error code on failure.
+ */
+int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
+ const struct cfg_error_pkg *error)
+{
+ struct cfg_ack_pkg pkg = {
+ .header = tb_cfg_make_header(route),
+ };
+ const char *name;
+
+ switch (error->error) {
+ case TB_CFG_ERROR_LINK_ERROR:
+ name = "link error";
+ break;
+ case TB_CFG_ERROR_HEC_ERROR_DETECTED:
+ name = "HEC error";
+ break;
+ case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
+ name = "flow control error";
+ break;
+ case TB_CFG_ERROR_DP_BW:
+ name = "DP_BW";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+
+ tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name,
+ error->error, route);
+
+ return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK);
+}
+
+/**
* tb_cfg_ack_plug() - Ack hot plug/unplug event
* @ctl: Control channel to use
* @route: Router that originated the event
@@ -754,7 +788,7 @@ int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
.pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
: TB_CFG_ERROR_PG_HOT_PLUG,
};
- tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%x\n",
+ tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n",
unplug ? "un" : "", route, port);
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
}
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 7c7d80f96c0c..eec5c953c743 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -122,6 +122,8 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route)
return header;
}
+int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
+ const struct cfg_error_pkg *error);
int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug);
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route);
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c
index 834bcad42e9f..4339e706cc3a 100644
--- a/drivers/thunderbolt/debugfs.c
+++ b/drivers/thunderbolt/debugfs.c
@@ -1159,7 +1159,10 @@ static void port_cap_show(struct tb_port *port, struct seq_file *s,
if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
length = PORT_CAP_PCIE_LEN;
} else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
- length = PORT_CAP_DP_LEN;
+ if (usb4_dp_port_bw_mode_supported(port))
+ length = PORT_CAP_DP_LEN + 1;
+ else
+ length = PORT_CAP_DP_LEN;
} else if (tb_port_is_usb3_down(port) ||
tb_port_is_usb3_up(port)) {
length = PORT_CAP_USB3_LEN;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 363d712aa364..3370e18ba05f 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -513,36 +513,44 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
while (retries--) {
state = tb_port_state(port);
- if (state < 0)
- return state;
- if (state == TB_PORT_DISABLED) {
+ switch (state) {
+ case TB_PORT_DISABLED:
tb_port_dbg(port, "is disabled (state: 0)\n");
return 0;
- }
- if (state == TB_PORT_UNPLUGGED) {
+
+ case TB_PORT_UNPLUGGED:
if (wait_if_unplugged) {
/* used during resume */
tb_port_dbg(port,
"is unplugged (state: 7), retrying...\n");
msleep(100);
- continue;
+ break;
}
tb_port_dbg(port, "is unplugged (state: 7)\n");
return 0;
- }
- if (state == TB_PORT_UP) {
- tb_port_dbg(port, "is connected, link is up (state: 2)\n");
+
+ case TB_PORT_UP:
+ case TB_PORT_TX_CL0S:
+ case TB_PORT_RX_CL0S:
+ case TB_PORT_CL1:
+ case TB_PORT_CL2:
+ tb_port_dbg(port, "is connected, link is up (state: %d)\n", state);
return 1;
+
+ default:
+ if (state < 0)
+ return state;
+
+ /*
+ * After plug-in the state is TB_PORT_CONNECTING. Give it some
+ * time.
+ */
+ tb_port_dbg(port,
+ "is connected, link is not up (state: %d), retrying...\n",
+ state);
+ msleep(100);
}
- /*
- * After plug-in the state is TB_PORT_CONNECTING. Give it some
- * time.
- */
- tb_port_dbg(port,
- "is connected, link is not up (state: %d), retrying...\n",
- state);
- msleep(100);
}
tb_port_warn(port,
"failed to reach state TB_PORT_UP. Ignoring port...\n");
@@ -2176,9 +2184,9 @@ static void tb_switch_release(struct device *dev)
kfree(sw);
}
-static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct tb_switch *sw = tb_to_switch(dev);
+ const struct tb_switch *sw = tb_to_switch(dev);
const char *type;
if (sw->config.thunderbolt_version == USB4_VERSION_1_0) {
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 3f1ab30c4fb1..7bfbc9ca9ba4 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -16,7 +16,8 @@
#include "tb_regs.h"
#include "tunnel.h"
-#define TB_TIMEOUT 100 /* ms */
+#define TB_TIMEOUT 100 /* ms */
+#define MAX_GROUPS 7 /* max Group_ID is 7 */
/**
* struct tb_cm - Simple Thunderbolt connection manager
@@ -28,12 +29,14 @@
* after cfg has been paused.
* @remove_work: Work used to remove any unplugged routers after
* runtime resume
+ * @groups: Bandwidth groups used in this domain.
*/
struct tb_cm {
struct list_head tunnel_list;
struct list_head dp_resources;
bool hotplug_active;
struct delayed_work remove_work;
+ struct tb_bandwidth_group groups[MAX_GROUPS];
};
static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
@@ -49,6 +52,112 @@ struct tb_hotplug_event {
bool unplug;
};
+static void tb_init_bandwidth_groups(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ group->tb = tcm_to_tb(tcm);
+ group->index = i + 1;
+ INIT_LIST_HEAD(&group->ports);
+ }
+}
+
+static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
+ struct tb_port *in)
+{
+ if (!group || WARN_ON(in->group))
+ return;
+
+ in->group = group;
+ list_add_tail(&in->group_list, &group->ports);
+
+ tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
+}
+
+static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (list_empty(&group->ports))
+ return group;
+ }
+
+ return NULL;
+}
+
+static struct tb_bandwidth_group *
+tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ struct tb_bandwidth_group *group;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Find all DP tunnels that go through all the same USB4 links
+ * as this one. Because we always setup tunnels the same way we
+ * can just check for the routers at both ends of the tunnels
+ * and if they are the same we have a match.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (!tb_tunnel_is_dp(tunnel))
+ continue;
+
+ if (tunnel->src_port->sw == in->sw &&
+ tunnel->dst_port->sw == out->sw) {
+ group = tunnel->src_port->group;
+ if (group) {
+ tb_bandwidth_group_attach_port(group, in);
+ return group;
+ }
+ }
+ }
+
+ /* Pick up next available group then */
+ group = tb_find_free_bandwidth_group(tcm);
+ if (group)
+ tb_bandwidth_group_attach_port(group, in);
+ else
+ tb_port_warn(in, "no available bandwidth groups\n");
+
+ return group;
+}
+
+static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ if (usb4_dp_port_bw_mode_enabled(in)) {
+ int index, i;
+
+ index = usb4_dp_port_group_id(in);
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ if (tcm->groups[i].index == index) {
+ tb_bandwidth_group_attach_port(&tcm->groups[i], in);
+ return;
+ }
+ }
+ }
+
+ tb_attach_bandwidth_group(tcm, in, out);
+}
+
+static void tb_detach_bandwidth_group(struct tb_port *in)
+{
+ struct tb_bandwidth_group *group = in->group;
+
+ if (group) {
+ in->group = NULL;
+ list_del_init(&in->group_list);
+
+ tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
+ }
+}
+
static void tb_handle_hotplug(struct work_struct *work);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
@@ -193,9 +302,14 @@ static void tb_discover_tunnels(struct tb *tb)
parent = tb_switch_parent(parent);
}
} else if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+
/* Keep the domain from powering down */
- pm_runtime_get_sync(&tunnel->src_port->sw->dev);
- pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
+ pm_runtime_get_sync(&in->sw->dev);
+ pm_runtime_get_sync(&out->sw->dev);
+
+ tb_discover_bandwidth_group(tcm, in, out);
}
}
}
@@ -350,10 +464,13 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
struct tb_tunnel *tunnel;
struct tb_port *port;
- tb_port_dbg(dst_port, "calculating available bandwidth\n");
+ tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
+ tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
+ dst_port->port);
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
- if (tunnel) {
+ if (tunnel && tunnel->src_port != src_port &&
+ tunnel->dst_port != dst_port) {
ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
&usb3_consumed_down);
if (ret)
@@ -387,7 +504,8 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
up_bw -= up_bw / 10;
down_bw = up_bw;
- tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw);
+ tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
+ down_bw);
/*
* Find all DP tunnels that cross the port and reduce
@@ -396,12 +514,24 @@ static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
int dp_consumed_up, dp_consumed_down;
+ if (tb_tunnel_is_invalid(tunnel))
+ continue;
+
if (!tb_tunnel_is_dp(tunnel))
continue;
if (!tb_tunnel_port_on_path(tunnel, port))
continue;
+ /*
+ * Ignore the DP tunnel between src_port and
+ * dst_port because it is the same tunnel and we
+ * may be re-calculating estimated bandwidth.
+ */
+ if (tunnel->src_port == src_port &&
+ tunnel->dst_port == dst_port)
+ continue;
+
ret = tb_tunnel_consumed_bandwidth(tunnel,
&dp_consumed_up,
&dp_consumed_down);
@@ -762,6 +892,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
switch (tunnel->type) {
case TB_TUNNEL_DP:
+ tb_detach_bandwidth_group(src_port);
/*
* In case of DP tunnel make sure the DP IN resource is
* deallocated properly.
@@ -879,6 +1010,99 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
+static void
+tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *first_tunnel;
+ struct tb *tb = group->tb;
+ struct tb_port *in;
+ int ret;
+
+ tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
+ group->index);
+
+ first_tunnel = NULL;
+ list_for_each_entry(in, &group->ports, group_list) {
+ int estimated_bw, estimated_up, estimated_down;
+ struct tb_tunnel *tunnel;
+ struct tb_port *out;
+
+ if (!usb4_dp_port_bw_mode_enabled(in))
+ continue;
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (WARN_ON(!tunnel))
+ break;
+
+ if (!first_tunnel) {
+ /*
+ * Since USB3 bandwidth is shared by all DP
+ * tunnels under the host router USB4 port, even
+ * if they do not begin from the host router, we
+ * can release USB3 bandwidth just once and not
+ * for each tunnel separately.
+ */
+ first_tunnel = tunnel;
+ ret = tb_release_unused_usb3_bandwidth(tb,
+ first_tunnel->src_port, first_tunnel->dst_port);
+ if (ret) {
+ tb_port_warn(in,
+ "failed to release unused bandwidth\n");
+ break;
+ }
+ }
+
+ out = tunnel->dst_port;
+ ret = tb_available_bandwidth(tb, in, out, &estimated_up,
+ &estimated_down);
+ if (ret) {
+ tb_port_warn(in,
+ "failed to re-calculate estimated bandwidth\n");
+ break;
+ }
+
+ /*
+ * Estimated bandwidth includes:
+ * - already allocated bandwidth for the DP tunnel
+ * - available bandwidth along the path
+ * - bandwidth allocated for USB 3.x but not used.
+ */
+ tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
+ estimated_up, estimated_down);
+
+ if (in->sw->config.depth < out->sw->config.depth)
+ estimated_bw = estimated_down;
+ else
+ estimated_bw = estimated_up;
+
+ if (usb4_dp_port_set_estimated_bw(in, estimated_bw))
+ tb_port_warn(in, "failed to update estimated bandwidth\n");
+ }
+
+ if (first_tunnel)
+ tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
+ first_tunnel->dst_port);
+
+ tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
+}
+
+static void tb_recalc_estimated_bandwidth(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (!list_empty(&group->ports))
+ tb_recalc_estimated_bandwidth_for_group(group);
+ }
+
+ tb_dbg(tb, "bandwidth re-calculation done\n");
+}
+
static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
{
struct tb_port *host_port, *port;
@@ -892,7 +1116,7 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
continue;
if (tb_port_is_enabled(port)) {
- tb_port_dbg(port, "in use\n");
+ tb_port_dbg(port, "DP OUT in use\n");
continue;
}
@@ -941,7 +1165,7 @@ static void tb_tunnel_dp(struct tb *tb)
continue;
if (tb_port_is_enabled(port)) {
- tb_port_dbg(port, "in use\n");
+ tb_port_dbg(port, "DP IN in use\n");
continue;
}
@@ -993,17 +1217,19 @@ static void tb_tunnel_dp(struct tb *tb)
goto err_rpm_put;
}
+ if (!tb_attach_bandwidth_group(tcm, in, out))
+ goto err_dealloc_dp;
+
/* Make all unused USB3 bandwidth available for the new DP tunnel */
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret) {
tb_warn(tb, "failed to release unused bandwidth\n");
- goto err_dealloc_dp;
+ goto err_detach_group;
}
- ret = tb_available_bandwidth(tb, in, out, &available_up,
- &available_down);
+ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
if (ret)
- goto err_reclaim;
+ goto err_reclaim_usb;
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
available_up, available_down);
@@ -1012,7 +1238,7 @@ static void tb_tunnel_dp(struct tb *tb)
available_down);
if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n");
- goto err_reclaim;
+ goto err_reclaim_usb;
}
if (tb_tunnel_activate(tunnel)) {
@@ -1022,6 +1248,10 @@ static void tb_tunnel_dp(struct tb *tb)
list_add_tail(&tunnel->list, &tcm->tunnel_list);
tb_reclaim_usb3_bandwidth(tb, in, out);
+
+ /* Update the domain with the new bandwidth estimation */
+ tb_recalc_estimated_bandwidth(tb);
+
/*
* In case of DP tunnel exists, change host router's 1st children
* TMU mode to HiFi for CL0s to work.
@@ -1032,8 +1262,10 @@ static void tb_tunnel_dp(struct tb *tb)
err_free:
tb_tunnel_free(tunnel);
-err_reclaim:
+err_reclaim_usb:
tb_reclaim_usb3_bandwidth(tb, in, out);
+err_detach_group:
+ tb_detach_bandwidth_group(in);
err_dealloc_dp:
tb_switch_dealloc_dp_resource(in->sw, in);
err_rpm_put:
@@ -1066,6 +1298,7 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
* See if there is another DP OUT port that can be used for
* to create another tunnel.
*/
+ tb_recalc_estimated_bandwidth(tb);
tb_tunnel_dp(tb);
}
@@ -1313,6 +1546,7 @@ static void tb_handle_hotplug(struct work_struct *work)
if (port->dual_link_port)
port->dual_link_port->remote = NULL;
/* Maybe we can create another DP tunnel */
+ tb_recalc_estimated_bandwidth(tb);
tb_tunnel_dp(tb);
} else if (port->xdomain) {
struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
@@ -1370,6 +1604,239 @@ out:
kfree(ev);
}
+static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
+ int *requested_down)
+{
+ int allocated_up, allocated_down, available_up, available_down, ret;
+ int requested_up_corrected, requested_down_corrected, granularity;
+ int max_up, max_down, max_up_rounded, max_down_rounded;
+ struct tb *tb = tunnel->tb;
+ struct tb_port *in, *out;
+
+ ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
+ if (ret)
+ return ret;
+
+ in = tunnel->src_port;
+ out = tunnel->dst_port;
+
+ tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
+ allocated_up, allocated_down);
+
+ /*
+ * If we get rounded up request from graphics side, say HBR2 x 4
+ * that is 17500 instead of 17280 (this is because of the
+ * granularity), we allow it too. Here the graphics has already
+ * negotiated with the DPRX the maximum possible rates (which is
+ * 17280 in this case).
+ *
+ * Since the link cannot go higher than 17280 we use that in our
+ * calculations but the DP IN adapter Allocated BW write must be
+ * the same value (17500) otherwise the adapter will mark it as
+ * failed for graphics.
+ */
+ ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
+ if (ret)
+ return ret;
+
+ ret = usb4_dp_port_granularity(in);
+ if (ret < 0)
+ return ret;
+ granularity = ret;
+
+ max_up_rounded = roundup(max_up, granularity);
+ max_down_rounded = roundup(max_down, granularity);
+
+ /*
+ * This will "fix" the request down to the maximum supported
+ * rate * lanes if it is at the maximum rounded up level.
+ */
+ requested_up_corrected = *requested_up;
+ if (requested_up_corrected == max_up_rounded)
+ requested_up_corrected = max_up;
+ else if (requested_up_corrected < 0)
+ requested_up_corrected = 0;
+ requested_down_corrected = *requested_down;
+ if (requested_down_corrected == max_down_rounded)
+ requested_down_corrected = max_down;
+ else if (requested_down_corrected < 0)
+ requested_down_corrected = 0;
+
+ tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
+ requested_up_corrected, requested_down_corrected);
+
+ if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
+ (*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
+ tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
+ requested_up_corrected, requested_down_corrected,
+ max_up_rounded, max_down_rounded);
+ return -ENOBUFS;
+ }
+
+ if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
+ (*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
+ /*
+ * If requested bandwidth is less or equal than what is
+ * currently allocated to that tunnel we simply change
+ * the reservation of the tunnel. Since all the tunnels
+ * going out from the same USB4 port are in the same
+ * group the released bandwidth will be taken into
+ * account for the other tunnels automatically below.
+ */
+ return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
+ requested_down);
+ }
+
+ /*
+ * More bandwidth is requested. Release all the potential
+ * bandwidth from USB3 first.
+ */
+ ret = tb_release_unused_usb3_bandwidth(tb, in, out);
+ if (ret)
+ return ret;
+
+ /*
+ * Then go over all tunnels that cross the same USB4 ports (they
+ * are also in the same group but we use the same function here
+ * that we use with the normal bandwidth allocation).
+ */
+ ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
+ if (ret)
+ goto reclaim;
+
+ tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
+ available_up, available_down);
+
+ if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
+ (*requested_down >= 0 && available_down >= requested_down_corrected)) {
+ ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
+ requested_down);
+ } else {
+ ret = -ENOBUFS;
+ }
+
+reclaim:
+ tb_reclaim_usb3_bandwidth(tb, in, out);
+ return ret;
+}
+
+static void tb_handle_dp_bandwidth_request(struct work_struct *work)
+{
+ struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
+ int requested_bw, requested_up, requested_down, ret;
+ struct tb_port *in, *out;
+ struct tb_tunnel *tunnel;
+ struct tb *tb = ev->tb;
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_switch *sw;
+
+ pm_runtime_get_sync(&tb->dev);
+
+ mutex_lock(&tb->lock);
+ if (!tcm->hotplug_active)
+ goto unlock;
+
+ sw = tb_switch_find_by_route(tb, ev->route);
+ if (!sw) {
+ tb_warn(tb, "bandwidth request from non-existent router %llx\n",
+ ev->route);
+ goto unlock;
+ }
+
+ in = &sw->ports[ev->port];
+ if (!tb_port_is_dpin(in)) {
+ tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
+ goto unlock;
+ }
+
+ tb_port_dbg(in, "handling bandwidth allocation request\n");
+
+ if (!usb4_dp_port_bw_mode_enabled(in)) {
+ tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ goto unlock;
+ }
+
+ ret = usb4_dp_port_requested_bw(in);
+ if (ret < 0) {
+ if (ret == -ENODATA)
+ tb_port_dbg(in, "no bandwidth request active\n");
+ else
+ tb_port_warn(in, "failed to read requested bandwidth\n");
+ goto unlock;
+ }
+ requested_bw = ret;
+
+ tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (!tunnel) {
+ tb_port_warn(in, "failed to find tunnel\n");
+ goto unlock;
+ }
+
+ out = tunnel->dst_port;
+
+ if (in->sw->config.depth < out->sw->config.depth) {
+ requested_up = -1;
+ requested_down = requested_bw;
+ } else {
+ requested_up = requested_bw;
+ requested_down = -1;
+ }
+
+ ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
+ if (ret) {
+ if (ret == -ENOBUFS)
+ tb_port_warn(in, "not enough bandwidth available\n");
+ else
+ tb_port_warn(in, "failed to change bandwidth allocation\n");
+ } else {
+ tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
+ requested_up, requested_down);
+
+ /* Update other clients about the allocation change */
+ tb_recalc_estimated_bandwidth(tb);
+ }
+
+unlock:
+ mutex_unlock(&tb->lock);
+
+ pm_runtime_mark_last_busy(&tb->dev);
+ pm_runtime_put_autosuspend(&tb->dev);
+}
+
+static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
+{
+ struct tb_hotplug_event *ev;
+
+ ev = kmalloc(sizeof(*ev), GFP_KERNEL);
+ if (!ev)
+ return;
+
+ ev->tb = tb;
+ ev->route = route;
+ ev->port = port;
+ INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
+ queue_work(tb->wq, &ev->work);
+}
+
+static void tb_handle_notification(struct tb *tb, u64 route,
+ const struct cfg_error_pkg *error)
+{
+ if (tb_cfg_ack_notification(tb->ctl, route, error))
+ tb_warn(tb, "could not ack notification on %llx\n", route);
+
+ switch (error->error) {
+ case TB_CFG_ERROR_DP_BW:
+ tb_queue_dp_bandwidth_request(tb, route, error->port);
+ break;
+
+ default:
+ /* Ack is enough */
+ return;
+ }
+}
+
/*
* tb_schedule_hotplug_handler() - callback function for the control channel
*
@@ -1379,15 +1846,19 @@ static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
const struct cfg_event_pkg *pkg = buf;
- u64 route;
+ u64 route = tb_cfg_get_route(&pkg->header);
- if (type != TB_CFG_PKG_EVENT) {
+ switch (type) {
+ case TB_CFG_PKG_ERROR:
+ tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
+ return;
+ case TB_CFG_PKG_EVENT:
+ break;
+ default:
tb_warn(tb, "unexpected event %#x, ignoring\n", type);
return;
}
- route = tb_cfg_get_route(&pkg->header);
-
if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
pkg->port);
@@ -1817,6 +2288,7 @@ struct tb *tb_probe(struct tb_nhi *nhi)
INIT_LIST_HEAD(&tcm->tunnel_list);
INIT_LIST_HEAD(&tcm->dp_resources);
INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
+ tb_init_bandwidth_groups(tcm);
tb_dbg(tb, "using software connection manager\n");
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index f9786976f5ec..cbb20a277346 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -224,6 +224,23 @@ struct tb_switch {
};
/**
+ * struct tb_bandwidth_group - Bandwidth management group
+ * @tb: Pointer to the domain the group belongs to
+ * @index: Index of the group (aka Group_ID). Valid values %1-%7
+ * @ports: DP IN adapters belonging to this group are linked here
+ *
+ * Any tunnel that requires isochronous bandwidth (that's DP for now) is
+ * attached to a bandwidth group. All tunnels going through the same
+ * USB4 links share the same group and can dynamically distribute the
+ * bandwidth within the group.
+ */
+struct tb_bandwidth_group {
+ struct tb *tb;
+ int index;
+ struct list_head ports;
+};
+
+/**
* struct tb_port - a thunderbolt port, part of a tb_switch
* @config: Cached port configuration read from registers
* @sw: Switch the port belongs to
@@ -247,6 +264,9 @@ struct tb_switch {
* @ctl_credits: Buffers reserved for control path
* @dma_credits: Number of credits allocated for DMA tunneling for all
* DMA paths through this port.
+ * @group: Bandwidth allocation group the adapter is assigned to. Only
+ * used for DP IN adapters for now.
+ * @group_list: The adapter is linked to the group's list of ports through this
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
@@ -272,6 +292,8 @@ struct tb_port {
unsigned int total_credits;
unsigned int ctl_credits;
unsigned int dma_credits;
+ struct tb_bandwidth_group *group;
+ struct list_head group_list;
};
/**
@@ -815,7 +837,7 @@ static inline bool tb_is_switch(const struct device *dev)
return dev->type == &tb_switch_type;
}
-static inline struct tb_switch *tb_to_switch(struct device *dev)
+static inline struct tb_switch *tb_to_switch(const struct device *dev)
{
if (tb_is_switch(dev))
return container_of(dev, struct tb_switch, dev);
@@ -1047,7 +1069,7 @@ void tb_port_lane_bonding_disable(struct tb_port *port);
int tb_port_wait_for_link_width(struct tb_port *port, int width,
int timeout_msec);
int tb_port_update_credits(struct tb_port *port);
-bool tb_port_is_clx_enabled(struct tb_port *port, enum tb_clx clx);
+bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx);
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec);
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap);
@@ -1238,6 +1260,21 @@ int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw);
+int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id);
+bool usb4_dp_port_bw_mode_supported(struct tb_port *port);
+bool usb4_dp_port_bw_mode_enabled(struct tb_port *port);
+int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported);
+int usb4_dp_port_group_id(struct tb_port *port);
+int usb4_dp_port_set_group_id(struct tb_port *port, int group_id);
+int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes);
+int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes);
+int usb4_dp_port_granularity(struct tb_port *port);
+int usb4_dp_port_set_granularity(struct tb_port *port, int granularity);
+int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw);
+int usb4_dp_port_allocated_bw(struct tb_port *port);
+int usb4_dp_port_allocate_bw(struct tb_port *port, int bw);
+int usb4_dp_port_requested_bw(struct tb_port *port);
+
static inline bool tb_is_usb4_port_device(const struct device *dev)
{
return dev->type == &usb4_port_device_type;
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 33c4c7aed56d..3234bff07899 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -29,6 +29,7 @@ enum tb_cfg_error {
TB_CFG_ERROR_HEC_ERROR_DETECTED = 12,
TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13,
TB_CFG_ERROR_LOCK = 15,
+ TB_CFG_ERROR_DP_BW = 32,
};
/* common header */
@@ -64,14 +65,16 @@ struct cfg_write_pkg {
/* TB_CFG_PKG_ERROR */
struct cfg_error_pkg {
struct tb_cfg_header header;
- enum tb_cfg_error error:4;
- u32 zero1:4;
+ enum tb_cfg_error error:8;
u32 port:6;
- u32 zero2:2; /* Both should be zero, still they are different fields. */
- u32 zero3:14;
+ u32 reserved:16;
u32 pg:2;
} __packed;
+struct cfg_ack_pkg {
+ struct tb_cfg_header header;
+};
+
#define TB_CFG_ERROR_PG_HOT_PLUG 0x2
#define TB_CFG_ERROR_PG_HOT_UNPLUG 0x3
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 3c38b0cb8f74..2636423748cd 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -50,6 +50,10 @@ enum tb_port_state {
TB_PORT_DISABLED = 0, /* tb_cap_phy.disable == 1 */
TB_PORT_CONNECTING = 1, /* retry */
TB_PORT_UP = 2,
+ TB_PORT_TX_CL0S = 3,
+ TB_PORT_RX_CL0S = 4,
+ TB_PORT_CL1 = 5,
+ TB_PORT_CL2 = 6,
TB_PORT_UNPLUGGED = 7,
};
@@ -381,15 +385,42 @@ struct tb_regs_port_header {
#define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11)
#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11
#define ADP_DP_CS_2 0x02
+#define ADP_DP_CS_2_NRD_MLC_MASK GENMASK(2, 0)
#define ADP_DP_CS_2_HDP BIT(6)
+#define ADP_DP_CS_2_NRD_MLR_MASK GENMASK(9, 7)
+#define ADP_DP_CS_2_NRD_MLR_SHIFT 7
+#define ADP_DP_CS_2_CA BIT(10)
+#define ADP_DP_CS_2_GR_MASK GENMASK(12, 11)
+#define ADP_DP_CS_2_GR_SHIFT 11
+#define ADP_DP_CS_2_GR_0_25G 0x0
+#define ADP_DP_CS_2_GR_0_5G 0x1
+#define ADP_DP_CS_2_GR_1G 0x2
+#define ADP_DP_CS_2_GROUP_ID_MASK GENMASK(15, 13)
+#define ADP_DP_CS_2_GROUP_ID_SHIFT 13
+#define ADP_DP_CS_2_CM_ID_MASK GENMASK(19, 16)
+#define ADP_DP_CS_2_CM_ID_SHIFT 16
+#define ADP_DP_CS_2_CMMS BIT(20)
+#define ADP_DP_CS_2_ESTIMATED_BW_MASK GENMASK(31, 24)
+#define ADP_DP_CS_2_ESTIMATED_BW_SHIFT 24
#define ADP_DP_CS_3 0x03
#define ADP_DP_CS_3_HDPC BIT(9)
#define DP_LOCAL_CAP 0x04
#define DP_REMOTE_CAP 0x05
+/* For DP IN adapter */
+#define DP_STATUS 0x06
+#define DP_STATUS_ALLOCATED_BW_MASK GENMASK(31, 24)
+#define DP_STATUS_ALLOCATED_BW_SHIFT 24
+/* For DP OUT adapter */
#define DP_STATUS_CTRL 0x06
#define DP_STATUS_CTRL_CMHS BIT(25)
#define DP_STATUS_CTRL_UF BIT(26)
#define DP_COMMON_CAP 0x07
+/* Only if DP IN supports BW allocation mode */
+#define ADP_DP_CS_8 0x08
+#define ADP_DP_CS_8_REQUESTED_BW_MASK GENMASK(7, 0)
+#define ADP_DP_CS_8_DPME BIT(30)
+#define ADP_DP_CS_8_DR BIT(31)
+
/*
* DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP
* with exception of DPRX done.
@@ -406,7 +437,12 @@ struct tb_regs_port_header {
#define DP_COMMON_CAP_2_LANES 0x1
#define DP_COMMON_CAP_4_LANES 0x2
#define DP_COMMON_CAP_LTTPR_NS BIT(27)
+#define DP_COMMON_CAP_BW_MODE BIT(28)
#define DP_COMMON_CAP_DPRX_DONE BIT(31)
+/* Only present if DP IN supports BW allocation mode */
+#define ADP_DP_CS_8 0x08
+#define ADP_DP_CS_8_DPME BIT(30)
+#define ADP_DP_CS_8_DR BIT(31)
/* PCIe adapter registers */
#define ADP_PCIE_CS_0 0x00
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 1fc3c29b24f8..9099ae73e78f 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -9,6 +9,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/list.h>
+#include <linux/ktime.h>
#include "tunnel.h"
#include "tb.h"
@@ -44,12 +45,17 @@
/* Minimum number of credits for DMA path */
#define TB_MIN_DMA_CREDITS 1U
+static bool bw_alloc_mode = true;
+module_param(bw_alloc_mode, bool, 0444);
+MODULE_PARM_DESC(bw_alloc_mode,
+ "enable bandwidth allocation mode if supported (default: true)");
+
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
do { \
struct tb_tunnel *__tunnel = (tunnel); \
- level(__tunnel->tb, "%llx:%x <-> %llx:%x (%s): " fmt, \
+ level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
tb_route(__tunnel->src_port->sw), \
__tunnel->src_port->port, \
tb_route(__tunnel->dst_port->sw), \
@@ -339,9 +345,10 @@ static bool tb_dp_is_usb4(const struct tb_switch *sw)
return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
}
-static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
+static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
+ int timeout_msec)
{
- int timeout = 10;
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
u32 val;
int ret;
@@ -368,8 +375,8 @@ static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out)
return ret;
if (!(val & DP_STATUS_CTRL_CMHS))
return 0;
- usleep_range(10, 100);
- } while (timeout--);
+ usleep_range(100, 150);
+ } while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
}
@@ -519,7 +526,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
* Perform connection manager handshake between IN and OUT ports
* before capabilities exchange can take place.
*/
- ret = tb_dp_cm_handshake(in, out);
+ ret = tb_dp_cm_handshake(in, out, 1500);
if (ret)
return ret;
@@ -597,6 +604,133 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
in->cap_adap + DP_REMOTE_CAP, 1);
}
+static int tb_dp_bw_alloc_mode_enable(struct tb_tunnel *tunnel)
+{
+ int ret, estimated_bw, granularity, tmp;
+ struct tb_port *out = tunnel->dst_port;
+ struct tb_port *in = tunnel->src_port;
+ u32 out_dp_cap, out_rate, out_lanes;
+ u32 in_dp_cap, in_rate, in_lanes;
+ u32 rate, lanes;
+
+ if (!bw_alloc_mode)
+ return 0;
+
+ ret = usb4_dp_port_set_cm_bw_mode_supported(in, true);
+ if (ret)
+ return ret;
+
+ ret = usb4_dp_port_set_group_id(in, in->group->index);
+ if (ret)
+ return ret;
+
+ /*
+ * Get the non-reduced rate and lanes based on the lowest
+ * capability of both adapters.
+ */
+ ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
+ in->cap_adap + DP_LOCAL_CAP, 1);
+ if (ret)
+ return ret;
+
+ ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
+ out->cap_adap + DP_LOCAL_CAP, 1);
+ if (ret)
+ return ret;
+
+ in_rate = tb_dp_cap_get_rate(in_dp_cap);
+ in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
+ out_rate = tb_dp_cap_get_rate(out_dp_cap);
+ out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
+
+ rate = min(in_rate, out_rate);
+ lanes = min(in_lanes, out_lanes);
+ tmp = tb_dp_bandwidth(rate, lanes);
+
+ tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate,
+ lanes, tmp);
+
+ ret = usb4_dp_port_set_nrd(in, rate, lanes);
+ if (ret)
+ return ret;
+
+ for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
+ granularity *= 2)
+ ;
+
+ tb_port_dbg(in, "granularity %d Mb/s\n", granularity);
+
+ /*
+ * Returns -EINVAL if granularity above is outside of the
+ * accepted ranges.
+ */
+ ret = usb4_dp_port_set_granularity(in, granularity);
+ if (ret)
+ return ret;
+
+ /*
+ * Bandwidth estimation is pretty much what we have in
+ * max_up/down fields. For discovery we just read what the
+ * estimation was set to.
+ */
+ if (in->sw->config.depth < out->sw->config.depth)
+ estimated_bw = tunnel->max_down;
+ else
+ estimated_bw = tunnel->max_up;
+
+ tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
+
+ ret = usb4_dp_port_set_estimated_bw(in, estimated_bw);
+ if (ret)
+ return ret;
+
+ /* Initial allocation should be 0 according the spec */
+ ret = usb4_dp_port_allocate_bw(in, 0);
+ if (ret)
+ return ret;
+
+ tb_port_dbg(in, "bandwidth allocation mode enabled\n");
+ return 0;
+}
+
+static int tb_dp_init(struct tb_tunnel *tunnel)
+{
+ struct tb_port *in = tunnel->src_port;
+ struct tb_switch *sw = in->sw;
+ struct tb *tb = in->sw->tb;
+ int ret;
+
+ ret = tb_dp_xchg_caps(tunnel);
+ if (ret)
+ return ret;
+
+ if (!tb_switch_is_usb4(sw))
+ return 0;
+
+ if (!usb4_dp_port_bw_mode_supported(in))
+ return 0;
+
+ tb_port_dbg(in, "bandwidth allocation mode supported\n");
+
+ ret = usb4_dp_port_set_cm_id(in, tb->index);
+ if (ret)
+ return ret;
+
+ return tb_dp_bw_alloc_mode_enable(tunnel);
+}
+
+static void tb_dp_deinit(struct tb_tunnel *tunnel)
+{
+ struct tb_port *in = tunnel->src_port;
+
+ if (!usb4_dp_port_bw_mode_supported(in))
+ return;
+ if (usb4_dp_port_bw_mode_enabled(in)) {
+ usb4_dp_port_set_cm_bw_mode_supported(in, false);
+ tb_port_dbg(in, "bandwidth allocation mode disabled\n");
+ }
+}
+
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
{
int ret;
@@ -634,49 +768,275 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0;
}
+/* max_bw is rounded up to next granularity */
+static int tb_dp_nrd_bandwidth(struct tb_tunnel *tunnel, int *max_bw)
+{
+ struct tb_port *in = tunnel->src_port;
+ int ret, rate, lanes, nrd_bw;
+
+ ret = usb4_dp_port_nrd(in, &rate, &lanes);
+ if (ret)
+ return ret;
+
+ nrd_bw = tb_dp_bandwidth(rate, lanes);
+
+ if (max_bw) {
+ ret = usb4_dp_port_granularity(in);
+ if (ret < 0)
+ return ret;
+ *max_bw = roundup(nrd_bw, ret);
+ }
+
+ return nrd_bw;
+}
+
+static int tb_dp_bw_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
+ int *consumed_up, int *consumed_down)
+{
+ struct tb_port *out = tunnel->dst_port;
+ struct tb_port *in = tunnel->src_port;
+ int ret, allocated_bw, max_bw;
+
+ if (!usb4_dp_port_bw_mode_enabled(in))
+ return -EOPNOTSUPP;
+
+ if (!tunnel->bw_mode)
+ return -EOPNOTSUPP;
+
+ /* Read what was allocated previously if any */
+ ret = usb4_dp_port_allocated_bw(in);
+ if (ret < 0)
+ return ret;
+ allocated_bw = ret;
+
+ ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
+ if (ret < 0)
+ return ret;
+ if (allocated_bw == max_bw)
+ allocated_bw = ret;
+
+ tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n",
+ allocated_bw);
+
+ if (in->sw->config.depth < out->sw->config.depth) {
+ *consumed_up = 0;
+ *consumed_down = allocated_bw;
+ } else {
+ *consumed_up = allocated_bw;
+ *consumed_down = 0;
+ }
+
+ return 0;
+}
+
+static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
+ int *allocated_down)
+{
+ struct tb_port *out = tunnel->dst_port;
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * If we have already set the allocated bandwidth then use that.
+ * Otherwise we read it from the DPRX.
+ */
+ if (usb4_dp_port_bw_mode_enabled(in) && tunnel->bw_mode) {
+ int ret, allocated_bw, max_bw;
+
+ ret = usb4_dp_port_allocated_bw(in);
+ if (ret < 0)
+ return ret;
+ allocated_bw = ret;
+
+ ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
+ if (ret < 0)
+ return ret;
+ if (allocated_bw == max_bw)
+ allocated_bw = ret;
+
+ if (in->sw->config.depth < out->sw->config.depth) {
+ *allocated_up = 0;
+ *allocated_down = allocated_bw;
+ } else {
+ *allocated_up = allocated_bw;
+ *allocated_down = 0;
+ }
+ return 0;
+ }
+
+ return tunnel->consumed_bandwidth(tunnel, allocated_up,
+ allocated_down);
+}
+
+static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
+ int *alloc_down)
+{
+ struct tb_port *out = tunnel->dst_port;
+ struct tb_port *in = tunnel->src_port;
+ int max_bw, ret, tmp;
+
+ if (!usb4_dp_port_bw_mode_enabled(in))
+ return -EOPNOTSUPP;
+
+ ret = tb_dp_nrd_bandwidth(tunnel, &max_bw);
+ if (ret < 0)
+ return ret;
+
+ if (in->sw->config.depth < out->sw->config.depth) {
+ tmp = min(*alloc_down, max_bw);
+ ret = usb4_dp_port_allocate_bw(in, tmp);
+ if (ret)
+ return ret;
+ *alloc_down = tmp;
+ *alloc_up = 0;
+ } else {
+ tmp = min(*alloc_up, max_bw);
+ ret = usb4_dp_port_allocate_bw(in, tmp);
+ if (ret)
+ return ret;
+ *alloc_down = 0;
+ *alloc_up = tmp;
+ }
+
+ /* Now we can use BW mode registers to figure out the bandwidth */
+ /* TODO: need to handle discovery too */
+ tunnel->bw_mode = true;
+ return 0;
+}
+
+static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
+ int timeout_msec)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+ struct tb_port *in = tunnel->src_port;
+
+ /*
+ * Wait for DPRX done. Normally it should be already set for
+ * active tunnel.
+ */
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_port_read(in, &val, TB_CFG_PORT,
+ in->cap_adap + DP_COMMON_CAP, 1);
+ if (ret)
+ return ret;
+
+ if (val & DP_COMMON_CAP_DPRX_DONE) {
+ *rate = tb_dp_cap_get_rate(val);
+ *lanes = tb_dp_cap_get_lanes(val);
+
+ tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n",
+ tb_dp_bandwidth(*rate, *lanes));
+ return 0;
+ }
+ usleep_range(100, 150);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
+/* Read cap from tunnel DP IN */
+static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
+ u32 *lanes)
+{
+ struct tb_port *in = tunnel->src_port;
+ u32 val;
+ int ret;
+
+ switch (cap) {
+ case DP_LOCAL_CAP:
+ case DP_REMOTE_CAP:
+ break;
+
+ default:
+ tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
+ return -EINVAL;
+ }
+
+ /*
+ * Read from the copied remote cap so that we take into account
+ * if capabilities were reduced during exchange.
+ */
+ ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
+ if (ret)
+ return ret;
+
+ *rate = tb_dp_cap_get_rate(val);
+ *lanes = tb_dp_cap_get_lanes(val);
+
+ tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap,
+ tb_dp_bandwidth(*rate, *lanes));
+ return 0;
+}
+
+static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
+ int *max_down)
+{
+ struct tb_port *in = tunnel->src_port;
+ u32 rate, lanes;
+ int ret;
+
+ /*
+ * DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX read
+ * parameter values so this so we can use this to determine the
+ * maximum possible bandwidth over this link.
+ */
+ ret = tb_dp_read_cap(tunnel, DP_LOCAL_CAP, &rate, &lanes);
+ if (ret)
+ return ret;
+
+ if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
+ *max_up = 0;
+ *max_down = tb_dp_bandwidth(rate, lanes);
+ } else {
+ *max_up = tb_dp_bandwidth(rate, lanes);
+ *max_down = 0;
+ }
+
+ return 0;
+}
+
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
struct tb_port *in = tunnel->src_port;
const struct tb_switch *sw = in->sw;
- u32 val, rate = 0, lanes = 0;
+ u32 rate = 0, lanes = 0;
int ret;
if (tb_dp_is_usb4(sw)) {
- int timeout = 20;
-
/*
- * Wait for DPRX done. Normally it should be already set
- * for active tunnel.
+ * On USB4 routers check if the bandwidth allocation
+ * mode is enabled first and then read the bandwidth
+ * through those registers.
*/
- do {
- ret = tb_port_read(in, &val, TB_CFG_PORT,
- in->cap_adap + DP_COMMON_CAP, 1);
- if (ret)
+ ret = tb_dp_bw_mode_consumed_bandwidth(tunnel, consumed_up,
+ consumed_down);
+ if (ret < 0) {
+ if (ret != -EOPNOTSUPP)
return ret;
-
- if (val & DP_COMMON_CAP_DPRX_DONE) {
- rate = tb_dp_cap_get_rate(val);
- lanes = tb_dp_cap_get_lanes(val);
- break;
- }
- msleep(250);
- } while (timeout--);
-
- if (!timeout)
- return -ETIMEDOUT;
- } else if (sw->generation >= 2) {
+ } else if (!ret) {
+ return 0;
+ }
/*
- * Read from the copied remote cap so that we take into
- * account if capabilities were reduced during exchange.
+ * Then see if the DPRX negotiation is ready and if yes
+ * return that bandwidth (it may be smaller than the
+ * reduced one). Otherwise return the remote (possibly
+ * reduced) caps.
*/
- ret = tb_port_read(in, &val, TB_CFG_PORT,
- in->cap_adap + DP_REMOTE_CAP, 1);
+ ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150);
+ if (ret) {
+ if (ret == -ETIMEDOUT)
+ ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
+ &rate, &lanes);
+ if (ret)
+ return ret;
+ }
+ } else if (sw->generation >= 2) {
+ ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
if (ret)
return ret;
-
- rate = tb_dp_cap_get_rate(val);
- lanes = tb_dp_cap_get_lanes(val);
} else {
/* No bandwidth management for legacy devices */
*consumed_up = 0;
@@ -798,8 +1158,12 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_xchg_caps;
+ tunnel->init = tb_dp_init;
+ tunnel->deinit = tb_dp_deinit;
tunnel->activate = tb_dp_activate;
+ tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
+ tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
+ tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
@@ -887,8 +1251,12 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
if (!tunnel)
return NULL;
- tunnel->init = tb_dp_xchg_caps;
+ tunnel->init = tb_dp_init;
+ tunnel->deinit = tb_dp_deinit;
tunnel->activate = tb_dp_activate;
+ tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
+ tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
+ tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
tunnel->dst_port = out;
@@ -1714,6 +2082,72 @@ static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
}
/**
+ * tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
+ * @tunnel: Tunnel to check
+ * @max_up: Maximum upstream bandwidth in Mb/s
+ * @max_down: Maximum downstream bandwidth in Mb/s
+ *
+ * Returns maximum possible bandwidth this tunnel can go if not limited
+ * by other bandwidth clients. If the tunnel does not support this
+ * returns %-EOPNOTSUPP.
+ */
+int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
+ int *max_down)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return -EINVAL;
+
+ if (tunnel->maximum_bandwidth)
+ return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
+ * @tunnel: Tunnel to check
+ * @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
+ * @allocated_down: Currently allocated downstream bandwidth in Mb/s is
+ * stored here
+ *
+ * Returns the bandwidth allocated for the tunnel. This may be higher
+ * than what the tunnel actually consumes.
+ */
+int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
+ int *allocated_down)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return -EINVAL;
+
+ if (tunnel->allocated_bandwidth)
+ return tunnel->allocated_bandwidth(tunnel, allocated_up,
+ allocated_down);
+ return -EOPNOTSUPP;
+}
+
+/**
+ * tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
+ * @tunnel: Tunnel whose bandwidth allocation to change
+ * @alloc_up: New upstream bandwidth in Mb/s
+ * @alloc_down: New downstream bandwidth in Mb/s
+ *
+ * Tries to change tunnel bandwidth allocation. If succeeds returns %0
+ * and updates @alloc_up and @alloc_down to that was actually allocated
+ * (it may not be the same as passed originally). Returns negative errno
+ * in case of failure.
+ */
+int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
+ int *alloc_down)
+{
+ if (!tb_tunnel_is_active(tunnel))
+ return -EINVAL;
+
+ if (tunnel->alloc_bandwidth)
+ return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
+
+ return -EOPNOTSUPP;
+}
+
+/**
* tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
* @tunnel: Tunnel to check
* @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index bb4d1f1d6d0b..bf690f7beeee 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -29,6 +29,9 @@ enum tb_tunnel_type {
* @init: Optional tunnel specific initialization
* @deinit: Optional tunnel specific de-initialization
* @activate: Optional tunnel specific activation/deactivation
+ * @maximum_bandwidth: Returns maximum possible bandwidth for this tunnel
+ * @allocated_bandwidth: Return how much bandwidth is allocated for the tunnel
+ * @alloc_bandwidth: Change tunnel bandwidth allocation
* @consumed_bandwidth: Return how much bandwidth the tunnel consumes
* @release_unused_bandwidth: Release all unused bandwidth
* @reclaim_available_bandwidth: Reclaim back available bandwidth
@@ -40,6 +43,8 @@ enum tb_tunnel_type {
* Only set if the bandwidth needs to be limited.
* @allocated_up: Allocated upstream bandwidth (only for USB3)
* @allocated_down: Allocated downstream bandwidth (only for USB3)
+ * @bw_mode: DP bandwidth allocation mode registers can be used to
+ * determine consumed and allocated bandwidth
*/
struct tb_tunnel {
struct tb *tb;
@@ -50,6 +55,12 @@ struct tb_tunnel {
int (*init)(struct tb_tunnel *tunnel);
void (*deinit)(struct tb_tunnel *tunnel);
int (*activate)(struct tb_tunnel *tunnel, bool activate);
+ int (*maximum_bandwidth)(struct tb_tunnel *tunnel, int *max_up,
+ int *max_down);
+ int (*allocated_bandwidth)(struct tb_tunnel *tunnel, int *allocated_up,
+ int *allocated_down);
+ int (*alloc_bandwidth)(struct tb_tunnel *tunnel, int *alloc_up,
+ int *alloc_down);
int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down);
int (*release_unused_bandwidth)(struct tb_tunnel *tunnel);
@@ -62,6 +73,7 @@ struct tb_tunnel {
int max_down;
int allocated_up;
int allocated_down;
+ bool bw_mode;
};
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
@@ -92,6 +104,12 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel);
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel);
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
const struct tb_port *port);
+int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
+ int *max_down);
+int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
+ int *allocated_down);
+int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
+ int *alloc_down);
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down);
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel);
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 2ed50fcbcca7..1e5e9c147a31 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -2186,3 +2186,575 @@ err_request:
usb4_usb3_port_clear_cm_request(port);
return ret;
}
+
+static bool is_usb4_dpin(const struct tb_port *port)
+{
+ if (!tb_port_is_dpin(port))
+ return false;
+ if (!tb_switch_is_usb4(port->sw))
+ return false;
+ return true;
+}
+
+/**
+ * usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter
+ * @port: DP IN adapter
+ * @cm_id: CM ID to assign
+ *
+ * Sets CM ID for the @port. Returns %0 on success and negative errno
+ * otherwise. Speficially returns %-EOPNOTSUPP if the @port does not
+ * support this.
+ */
+int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_CM_ID_MASK;
+ val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_bw_mode_supported() - Is the bandwidth allocation mode supported
+ * @port: DP IN adapter to check
+ *
+ * Can be called to any DP IN adapter. Returns true if the adapter
+ * supports USB4 bandwidth allocation mode, false otherwise.
+ */
+bool usb4_dp_port_bw_mode_supported(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ if (!is_usb4_dpin(port))
+ return false;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + DP_LOCAL_CAP, 1);
+ if (ret)
+ return false;
+
+ return !!(val & DP_COMMON_CAP_BW_MODE);
+}
+
+/**
+ * usb4_dp_port_bw_mode_enabled() - Is the bandwidth allocation mode enabled
+ * @port: DP IN adapter to check
+ *
+ * Can be called to any DP IN adapter. Returns true if the bandwidth
+ * allocation mode has been enabled, false otherwise.
+ */
+bool usb4_dp_port_bw_mode_enabled(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ if (!is_usb4_dpin(port))
+ return false;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_8, 1);
+ if (ret)
+ return false;
+
+ return !!(val & ADP_DP_CS_8_DPME);
+}
+
+/**
+ * usb4_dp_port_set_cm_bw_mode_supported() - Set/clear CM support for bandwidth allocation mode
+ * @port: DP IN adapter
+ * @supported: Does the CM support bandwidth allocation mode
+ *
+ * Can be called to any DP IN adapter. Sets or clears the CM support bit
+ * of the DP IN adapter. Returns %0 in success and negative errno
+ * otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
+ * does not support this.
+ */
+int usb4_dp_port_set_cm_bw_mode_supported(struct tb_port *port, bool supported)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ if (supported)
+ val |= ADP_DP_CS_2_CMMS;
+ else
+ val &= ~ADP_DP_CS_2_CMMS;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_group_id() - Return Group ID assigned for the adapter
+ * @port: DP IN adapter
+ *
+ * Reads bandwidth allocation Group ID from the DP IN adapter and
+ * returns it. If the adapter does not support setting Group_ID
+ * %-EOPNOTSUPP is returned.
+ */
+int usb4_dp_port_group_id(struct tb_port *port)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT;
+}
+
+/**
+ * usb4_dp_port_set_group_id() - Set adapter Group ID
+ * @port: DP IN adapter
+ * @group_id: Group ID for the adapter
+ *
+ * Sets bandwidth allocation mode Group ID for the DP IN adapter.
+ * Returns %0 in case of success and negative errno otherwise.
+ * Specifically returns %-EOPNOTSUPP if the adapter does not support
+ * this.
+ */
+int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_GROUP_ID_MASK;
+ val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_nrd() - Read non-reduced rate and lanes
+ * @port: DP IN adapter
+ * @rate: Non-reduced rate in Mb/s is placed here
+ * @lanes: Non-reduced lanes are placed here
+ *
+ * Reads the non-reduced rate and lanes from the DP IN adapter. Returns
+ * %0 in success and negative errno otherwise. Specifically returns
+ * %-EOPNOTSUPP if the adapter does not support this.
+ */
+int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
+{
+ u32 val, tmp;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT;
+ switch (tmp) {
+ case DP_COMMON_CAP_RATE_RBR:
+ *rate = 1620;
+ break;
+ case DP_COMMON_CAP_RATE_HBR:
+ *rate = 2700;
+ break;
+ case DP_COMMON_CAP_RATE_HBR2:
+ *rate = 5400;
+ break;
+ case DP_COMMON_CAP_RATE_HBR3:
+ *rate = 8100;
+ break;
+ }
+
+ tmp = val & ADP_DP_CS_2_NRD_MLC_MASK;
+ switch (tmp) {
+ case DP_COMMON_CAP_1_LANE:
+ *lanes = 1;
+ break;
+ case DP_COMMON_CAP_2_LANES:
+ *lanes = 2;
+ break;
+ case DP_COMMON_CAP_4_LANES:
+ *lanes = 4;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * usb4_dp_port_set_nrd() - Set non-reduced rate and lanes
+ * @port: DP IN adapter
+ * @rate: Non-reduced rate in Mb/s
+ * @lanes: Non-reduced lanes
+ *
+ * Before the capabilities reduction this function can be used to set
+ * the non-reduced values for the DP IN adapter. Returns %0 in success
+ * and negative errno otherwise. If the adapter does not support this
+ * %-EOPNOTSUPP is returned.
+ */
+int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_NRD_MLR_MASK;
+
+ switch (rate) {
+ case 1620:
+ break;
+ case 2700:
+ val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT)
+ & ADP_DP_CS_2_NRD_MLR_MASK;
+ break;
+ case 5400:
+ val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT)
+ & ADP_DP_CS_2_NRD_MLR_MASK;
+ break;
+ case 8100:
+ val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT)
+ & ADP_DP_CS_2_NRD_MLR_MASK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val &= ~ADP_DP_CS_2_NRD_MLC_MASK;
+
+ switch (lanes) {
+ case 1:
+ break;
+ case 2:
+ val |= DP_COMMON_CAP_2_LANES;
+ break;
+ case 4:
+ val |= DP_COMMON_CAP_4_LANES;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_granularity() - Return granularity for the bandwidth values
+ * @port: DP IN adapter
+ *
+ * Reads the programmed granularity from @port. If the DP IN adapter does
+ * not support bandwidth allocation mode returns %-EOPNOTSUPP and negative
+ * errno in other error cases.
+ */
+int usb4_dp_port_granularity(struct tb_port *port)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ADP_DP_CS_2_GR_MASK;
+ val >>= ADP_DP_CS_2_GR_SHIFT;
+
+ switch (val) {
+ case ADP_DP_CS_2_GR_0_25G:
+ return 250;
+ case ADP_DP_CS_2_GR_0_5G:
+ return 500;
+ case ADP_DP_CS_2_GR_1G:
+ return 1000;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * usb4_dp_port_set_granularity() - Set granularity for the bandwidth values
+ * @port: DP IN adapter
+ * @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250.
+ *
+ * Sets the granularity used with the estimated, allocated and requested
+ * bandwidth. Returns %0 in success and negative errno otherwise. If the
+ * adapter does not support this %-EOPNOTSUPP is returned.
+ */
+int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
+{
+ u32 val;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_GR_MASK;
+
+ switch (granularity) {
+ case 250:
+ val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT;
+ break;
+ case 500:
+ val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT;
+ break;
+ case 1000:
+ val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_set_estimated_bw() - Set estimated bandwidth
+ * @port: DP IN adapter
+ * @bw: Estimated bandwidth in Mb/s.
+ *
+ * Sets the estimated bandwidth to @bw. Set the granularity by calling
+ * usb4_dp_port_set_granularity() before calling this. The @bw is round
+ * down to the closest granularity multiplier. Returns %0 in success
+ * and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
+ * the adapter does not support this.
+ */
+int usb4_dp_port_set_estimated_bw(struct tb_port *port, int bw)
+{
+ u32 val, granularity;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = usb4_dp_port_granularity(port);
+ if (ret < 0)
+ return ret;
+ granularity = ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK;
+ val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_allocated_bw() - Return allocated bandwidth
+ * @port: DP IN adapter
+ *
+ * Reads and returns allocated bandwidth for @port in Mb/s (taking into
+ * account the programmed granularity). Returns negative errno in case
+ * of error.
+ */
+int usb4_dp_port_allocated_bw(struct tb_port *port)
+{
+ u32 val, granularity;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = usb4_dp_port_granularity(port);
+ if (ret < 0)
+ return ret;
+ granularity = ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + DP_STATUS, 1);
+ if (ret)
+ return ret;
+
+ val &= DP_STATUS_ALLOCATED_BW_MASK;
+ val >>= DP_STATUS_ALLOCATED_BW_SHIFT;
+
+ return val * granularity;
+}
+
+static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack)
+{
+ u32 val;
+ int ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ if (ack)
+ val |= ADP_DP_CS_2_CA;
+ else
+ val &= ~ADP_DP_CS_2_CA;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+static inline int usb4_dp_port_set_cm_ack(struct tb_port *port)
+{
+ return __usb4_dp_port_set_cm_ack(port, true);
+}
+
+static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
+ int timeout_msec)
+{
+ ktime_t end;
+ u32 val;
+ int ret;
+
+ ret = __usb4_dp_port_set_cm_ack(port, false);
+ if (ret)
+ return ret;
+
+ end = ktime_add_ms(ktime_get(), timeout_msec);
+ do {
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_8, 1);
+ if (ret)
+ return ret;
+
+ if (!(val & ADP_DP_CS_8_DR))
+ break;
+
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), end));
+
+ if (val & ADP_DP_CS_8_DR)
+ return -ETIMEDOUT;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+ if (ret)
+ return ret;
+
+ val &= ~ADP_DP_CS_2_CA;
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_2, 1);
+}
+
+/**
+ * usb4_dp_port_allocate_bw() - Set allocated bandwidth
+ * @port: DP IN adapter
+ * @bw: New allocated bandwidth in Mb/s
+ *
+ * Communicates the new allocated bandwidth with the DPCD (graphics
+ * driver). Takes into account the programmed granularity. Returns %0 in
+ * success and negative errno in case of error.
+ */
+int usb4_dp_port_allocate_bw(struct tb_port *port, int bw)
+{
+ u32 val, granularity;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = usb4_dp_port_granularity(port);
+ if (ret < 0)
+ return ret;
+ granularity = ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + DP_STATUS, 1);
+ if (ret)
+ return ret;
+
+ val &= ~DP_STATUS_ALLOCATED_BW_MASK;
+ val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_adap + DP_STATUS, 1);
+ if (ret)
+ return ret;
+
+ ret = usb4_dp_port_set_cm_ack(port);
+ if (ret)
+ return ret;
+
+ return usb4_dp_port_wait_and_clear_cm_ack(port, 500);
+}
+
+/**
+ * usb4_dp_port_requested_bw() - Read requested bandwidth
+ * @port: DP IN adapter
+ *
+ * Reads the DPCD (graphics driver) requested bandwidth and returns it
+ * in Mb/s. Takes the programmed granularity into account. In case of
+ * error returns negative errno. Specifically returns %-EOPNOTSUPP if
+ * the adapter does not support bandwidth allocation mode, and %ENODATA
+ * if there is no active bandwidth request from the graphics driver.
+ */
+int usb4_dp_port_requested_bw(struct tb_port *port)
+{
+ u32 val, granularity;
+ int ret;
+
+ if (!is_usb4_dpin(port))
+ return -EOPNOTSUPP;
+
+ ret = usb4_dp_port_granularity(port);
+ if (ret < 0)
+ return ret;
+ granularity = ret;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_adap + ADP_DP_CS_8, 1);
+ if (ret)
+ return ret;
+
+ if (!(val & ADP_DP_CS_8_DR))
+ return -ENODATA;
+
+ return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
+}
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 3c51e47dd86b..a48335c95d39 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -881,7 +881,7 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(key);
-static int get_modalias(struct tb_service *svc, char *buf, size_t size)
+static int get_modalias(const struct tb_service *svc, char *buf, size_t size)
{
return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
svc->prtcid, svc->prtcvers, svc->prtcrevs);
@@ -953,9 +953,9 @@ static const struct attribute_group *tb_service_attr_groups[] = {
NULL,
};
-static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int tb_service_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct tb_service *svc = container_of(dev, struct tb_service, dev);
+ const struct tb_service *svc = container_of_const(dev, struct tb_service, dev);
char modalias[64];
get_modalias(svc, modalias, sizeof(modalias));
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index f52266766df9..d7515d61659e 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -502,7 +502,7 @@ static int startup(struct tty_struct *tty, struct serial_state *info)
*/
change_speed(tty, info, NULL);
- tty_port_set_initialized(port, 1);
+ tty_port_set_initialized(port, true);
local_irq_restore(flags);
return 0;
@@ -556,7 +556,7 @@ static void shutdown(struct tty_struct *tty, struct serial_state *info)
set_bit(TTY_IO_ERROR, &tty->flags);
- tty_port_set_initialized(&info->tport, 0);
+ tty_port_set_initialized(&info->tport, false);
local_irq_restore(flags);
}
@@ -1329,7 +1329,7 @@ static void rs_hangup(struct tty_struct *tty)
rs_flush_buffer(tty);
shutdown(tty, info);
info->tport.count = 0;
- tty_port_set_active(&info->tport, 0);
+ tty_port_set_active(&info->tport, false);
info->tport.tty = NULL;
wake_up_interruptible(&info->tport.open_wait);
}
@@ -1454,18 +1454,18 @@ static const struct tty_operations serial_ops = {
.proc_show = rs_proc_show,
};
-static int amiga_carrier_raised(struct tty_port *port)
+static bool amiga_carrier_raised(struct tty_port *port)
{
return !(ciab.pra & SER_DCD);
}
-static void amiga_dtr_rts(struct tty_port *port, int raise)
+static void amiga_dtr_rts(struct tty_port *port, bool active)
{
struct serial_state *info = container_of(port, struct serial_state,
tport);
unsigned long flags;
- if (raise)
+ if (active)
info->MCR |= SER_DTR|SER_RTS;
else
info->MCR &= ~(SER_DTR|SER_RTS);
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index a683e21df19c..10c10cfdf92a 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -376,7 +376,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
/* We are ready... raise DTR/RTS */
if (C_BAUD(tty))
if (hp->ops->dtr_rts)
- hp->ops->dtr_rts(hp, 1);
+ hp->ops->dtr_rts(hp, true);
tty_port_set_initialized(&hp->port, true);
}
@@ -406,7 +406,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
if (C_HUPCL(tty))
if (hp->ops->dtr_rts)
- hp->ops->dtr_rts(hp, 0);
+ hp->ops->dtr_rts(hp, false);
if (hp->ops->notifier_del)
hp->ops->notifier_del(hp, hp->data);
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 18d005814e4b..9668f821db01 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -66,7 +66,7 @@ struct hv_ops {
int (*tiocmset)(struct hvc_struct *hp, unsigned int set, unsigned int clear);
/* Callbacks to handle tty ports */
- void (*dtr_rts)(struct hvc_struct *hp, int raise);
+ void (*dtr_rts)(struct hvc_struct *hp, bool active);
};
/* Register a vterm and a slot index for use as a console (console_init) */
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index 7d49a872de48..543f35ddf523 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -658,13 +658,13 @@ static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
/**
* hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
* @hp: Pointer the HVC device (struct hvc_struct)
- * @raise: Non-zero to raise or zero to lower DTR/RTS lines
+ * @active: True to raise or false to lower DTR/RTS lines
*
* This routine notifies the HVC back-end to raise or lower DTR/RTS
* lines. Raising DTR/RTS is ignored. Lowering DTR/RTS indicates to
* drop the IUCV connection (similar to hang up the modem).
*/
-static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
+static void hvc_iucv_dtr_rts(struct hvc_struct *hp, bool active)
{
struct hvc_iucv_private *priv;
struct iucv_path *path;
@@ -672,7 +672,7 @@ static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
/* Raising the DTR/RTS is ignored as IUCV connections can be
* established at any times.
*/
- if (raise)
+ if (active)
return;
priv = hvc_iucv_get_private(hp->vtermno);
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index 4ba24963685e..1de1a09bf82d 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -52,6 +52,7 @@
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/completion.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kref.h>
@@ -285,6 +286,7 @@ struct hvcs_struct {
char p_location_code[HVCS_CLC_LENGTH + 1]; /* CLC + Null Term */
struct list_head next; /* list management */
struct vio_dev *vdev;
+ struct completion *destroyed;
};
static LIST_HEAD(hvcs_structs);
@@ -432,7 +434,7 @@ static ssize_t hvcs_index_show(struct device *dev, struct device_attribute *attr
static DEVICE_ATTR(index, S_IRUGO, hvcs_index_show, NULL);
-static struct attribute *hvcs_attrs[] = {
+static struct attribute *hvcs_dev_attrs[] = {
&dev_attr_partner_vtys.attr,
&dev_attr_partner_clcs.attr,
&dev_attr_current_vty.attr,
@@ -441,9 +443,7 @@ static struct attribute *hvcs_attrs[] = {
NULL,
};
-static struct attribute_group hvcs_attr_group = {
- .attrs = hvcs_attrs,
-};
+ATTRIBUTE_GROUPS(hvcs_dev);
static ssize_t rescan_show(struct device_driver *ddp, char *buf)
{
@@ -468,6 +468,13 @@ static ssize_t rescan_store(struct device_driver *ddp, const char * buf,
static DRIVER_ATTR_RW(rescan);
+static struct attribute *hvcs_attrs[] = {
+ &driver_attr_rescan.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(hvcs);
+
static void hvcs_kick(void)
{
hvcs_kicked = 1;
@@ -658,11 +665,13 @@ static void hvcs_destruct_port(struct tty_port *p)
{
struct hvcs_struct *hvcsd = container_of(p, struct hvcs_struct, port);
struct vio_dev *vdev;
+ struct completion *comp;
unsigned long flags;
spin_lock(&hvcs_structs_lock);
spin_lock_irqsave(&hvcsd->lock, flags);
+ comp = hvcsd->destroyed;
/* the list_del poisons the pointers */
list_del(&(hvcsd->next));
@@ -682,15 +691,16 @@ static void hvcs_destruct_port(struct tty_port *p)
hvcsd->p_unit_address = 0;
hvcsd->p_partition_ID = 0;
+ hvcsd->destroyed = NULL;
hvcs_return_index(hvcsd->index);
memset(&hvcsd->p_location_code[0], 0x00, HVCS_CLC_LENGTH + 1);
spin_unlock_irqrestore(&hvcsd->lock, flags);
spin_unlock(&hvcs_structs_lock);
- sysfs_remove_group(&vdev->dev.kobj, &hvcs_attr_group);
-
kfree(hvcsd);
+ if (comp)
+ complete(comp);
}
static const struct tty_port_operations hvcs_port_ops = {
@@ -721,7 +731,6 @@ static int hvcs_probe(
{
struct hvcs_struct *hvcsd;
int index, rc;
- int retval;
if (!dev || !id) {
printk(KERN_ERR "HVCS: probed with invalid parameter.\n");
@@ -778,13 +787,6 @@ static int hvcs_probe(
list_add_tail(&(hvcsd->next), &hvcs_structs);
spin_unlock(&hvcs_structs_lock);
- retval = sysfs_create_group(&dev->dev.kobj, &hvcs_attr_group);
- if (retval) {
- printk(KERN_ERR "HVCS: Can't create sysfs attrs for vty-server@%X\n",
- hvcsd->vdev->unit_address);
- return retval;
- }
-
printk(KERN_INFO "HVCS: vty-server@%X added to the vio bus.\n", dev->unit_address);
/*
@@ -797,6 +799,7 @@ static int hvcs_probe(
static void hvcs_remove(struct vio_dev *dev)
{
struct hvcs_struct *hvcsd = dev_get_drvdata(&dev->dev);
+ DECLARE_COMPLETION_ONSTACK(comp);
unsigned long flags;
struct tty_struct *tty;
@@ -804,24 +807,22 @@ static void hvcs_remove(struct vio_dev *dev)
spin_lock_irqsave(&hvcsd->lock, flags);
- tty = hvcsd->port.tty;
+ hvcsd->destroyed = &comp;
+ tty = tty_port_tty_get(&hvcsd->port);
spin_unlock_irqrestore(&hvcsd->lock, flags);
/*
- * Let the last holder of this object cause it to be removed, which
- * would probably be tty_hangup below.
- */
- tty_port_put(&hvcsd->port);
-
- /*
- * The hangup is a scheduled function which will auto chain call
- * hvcs_hangup. The tty should always be valid at this time unless a
+ * The tty should always be valid at this time unless a
* simultaneous tty close already cleaned up the hvcs_struct.
*/
- if (tty)
- tty_hangup(tty);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+ tty_port_put(&hvcsd->port);
+ wait_for_completion(&comp);
printk(KERN_INFO "HVCS: vty-server@%X removed from the"
" vio bus.\n", dev->unit_address);
};
@@ -831,6 +832,10 @@ static struct vio_driver hvcs_vio_driver = {
.probe = hvcs_probe,
.remove = hvcs_remove,
.name = hvcs_driver_name,
+ .driver = {
+ .groups = hvcs_groups,
+ .dev_groups = hvcs_dev_groups,
+ },
};
/* Only called from hvcs_get_pi please */
@@ -1171,7 +1176,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
hvcsd = tty->driver_data;
spin_lock_irqsave(&hvcsd->lock, flags);
- if (--hvcsd->port.count == 0) {
+ if (hvcsd->port.count == 0) {
+ spin_unlock_irqrestore(&hvcsd->lock, flags);
+ return;
+ } else if (--hvcsd->port.count == 0) {
vio_disable_interrupts(hvcsd->vdev);
@@ -1215,12 +1223,9 @@ static void hvcs_hangup(struct tty_struct * tty)
{
struct hvcs_struct *hvcsd = tty->driver_data;
unsigned long flags;
- int temp_open_count;
int irq;
spin_lock_irqsave(&hvcsd->lock, flags);
- /* Preserve this so that we know how many kref refs to put */
- temp_open_count = hvcsd->port.count;
/*
* Don't kref put inside the spinlock because the destruction
@@ -1230,11 +1235,7 @@ static void hvcs_hangup(struct tty_struct * tty)
vio_disable_interrupts(hvcsd->vdev);
hvcsd->todo_mask = 0;
-
- /* I don't think the tty needs the hvcs_struct pointer after a hangup */
- tty->driver_data = NULL;
hvcsd->port.tty = NULL;
-
hvcsd->port.count = 0;
/* This will drop any buffered data on the floor which is OK in a hangup
@@ -1247,21 +1248,6 @@ static void hvcs_hangup(struct tty_struct * tty)
spin_unlock_irqrestore(&hvcsd->lock, flags);
free_irq(irq, hvcsd);
-
- /*
- * We need to kref_put() for every open_count we have since the
- * tty_hangup() function doesn't invoke a close per open connection on a
- * non-console device.
- */
- while(temp_open_count) {
- --temp_open_count;
- /*
- * The final put will trigger destruction of the hvcs_struct.
- * NOTE: If this hangup was signaled from user space then the
- * final put will never happen.
- */
- tty_port_put(&hvcsd->port);
- }
}
/*
@@ -1525,13 +1511,6 @@ static int __init hvcs_module_init(void)
pr_info("HVCS: Driver registered.\n");
- /* This needs to be done AFTER the vio_register_driver() call or else
- * the kobjects won't be initialized properly.
- */
- rc = driver_create_file(&(hvcs_vio_driver.driver), &driver_attr_rescan);
- if (rc)
- pr_warn("HVCS: Failed to create rescan file (err %d)\n", rc);
-
return 0;
}
@@ -1556,8 +1535,6 @@ static void __exit hvcs_module_exit(void)
hvcs_pi_buff = NULL;
spin_unlock(&hvcs_pi_lock);
- driver_remove_file(&hvcs_vio_driver.driver, &driver_attr_rescan);
-
tty_unregister_driver(hvcs_tty_driver);
hvcs_free_index_list();
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index 35b6fddf0341..42fa4c878b2e 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -501,16 +501,16 @@ static int moxa_tiocmset(struct tty_struct *tty,
static void moxa_poll(struct timer_list *);
static void moxa_set_tty_param(struct tty_struct *, const struct ktermios *);
static void moxa_shutdown(struct tty_port *);
-static int moxa_carrier_raised(struct tty_port *);
-static void moxa_dtr_rts(struct tty_port *, int);
+static bool moxa_carrier_raised(struct tty_port *);
+static void moxa_dtr_rts(struct tty_port *, bool);
/*
* moxa board interface functions:
*/
static void MoxaPortEnable(struct moxa_port *);
static void MoxaPortDisable(struct moxa_port *);
static int MoxaPortSetTermio(struct moxa_port *, struct ktermios *, speed_t);
-static int MoxaPortGetLineOut(struct moxa_port *, int *, int *);
-static void MoxaPortLineCtrl(struct moxa_port *, int, int);
+static int MoxaPortGetLineOut(struct moxa_port *, bool *, bool *);
+static void MoxaPortLineCtrl(struct moxa_port *, bool, bool);
static void MoxaPortFlowCtrl(struct moxa_port *, int, int, int, int, int);
static int MoxaPortLineStatus(struct moxa_port *);
static void MoxaPortFlushData(struct moxa_port *, int);
@@ -1432,7 +1432,7 @@ static void moxa_shutdown(struct tty_port *port)
MoxaPortFlushData(ch, 2);
}
-static int moxa_carrier_raised(struct tty_port *port)
+static bool moxa_carrier_raised(struct tty_port *port)
{
struct moxa_port *ch = container_of(port, struct moxa_port, port);
int dcd;
@@ -1443,10 +1443,10 @@ static int moxa_carrier_raised(struct tty_port *port)
return dcd;
}
-static void moxa_dtr_rts(struct tty_port *port, int onoff)
+static void moxa_dtr_rts(struct tty_port *port, bool active)
{
struct moxa_port *ch = container_of(port, struct moxa_port, port);
- MoxaPortLineCtrl(ch, onoff, onoff);
+ MoxaPortLineCtrl(ch, active, active);
}
@@ -1481,10 +1481,10 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
if (!tty_port_initialized(&ch->port)) {
ch->statusflags = 0;
moxa_set_tty_param(tty, &tty->termios);
- MoxaPortLineCtrl(ch, 1, 1);
+ MoxaPortLineCtrl(ch, true, true);
MoxaPortEnable(ch);
MoxaSetFifo(ch, ch->type == PORT_16550A);
- tty_port_set_initialized(&ch->port, 1);
+ tty_port_set_initialized(&ch->port, true);
}
mutex_unlock(&ch->port.mutex);
mutex_unlock(&moxa_openlock);
@@ -1557,19 +1557,21 @@ static unsigned int moxa_chars_in_buffer(struct tty_struct *tty)
static int moxa_tiocmget(struct tty_struct *tty)
{
struct moxa_port *ch = tty->driver_data;
- int flag = 0, dtr, rts;
+ bool dtr_active, rts_active;
+ int flag = 0;
+ int status;
- MoxaPortGetLineOut(ch, &dtr, &rts);
- if (dtr)
+ MoxaPortGetLineOut(ch, &dtr_active, &rts_active);
+ if (dtr_active)
flag |= TIOCM_DTR;
- if (rts)
+ if (rts_active)
flag |= TIOCM_RTS;
- dtr = MoxaPortLineStatus(ch);
- if (dtr & 1)
+ status = MoxaPortLineStatus(ch);
+ if (status & 1)
flag |= TIOCM_CTS;
- if (dtr & 2)
+ if (status & 2)
flag |= TIOCM_DSR;
- if (dtr & 4)
+ if (status & 4)
flag |= TIOCM_CD;
return flag;
}
@@ -1577,8 +1579,8 @@ static int moxa_tiocmget(struct tty_struct *tty)
static int moxa_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
+ bool dtr_active, rts_active;
struct moxa_port *ch;
- int dtr, rts;
mutex_lock(&moxa_openlock);
ch = tty->driver_data;
@@ -1587,16 +1589,16 @@ static int moxa_tiocmset(struct tty_struct *tty,
return -EINVAL;
}
- MoxaPortGetLineOut(ch, &dtr, &rts);
+ MoxaPortGetLineOut(ch, &dtr_active, &rts_active);
if (set & TIOCM_RTS)
- rts = 1;
+ rts_active = true;
if (set & TIOCM_DTR)
- dtr = 1;
+ dtr_active = true;
if (clear & TIOCM_RTS)
- rts = 0;
+ rts_active = false;
if (clear & TIOCM_DTR)
- dtr = 0;
- MoxaPortLineCtrl(ch, dtr, rts);
+ dtr_active = false;
+ MoxaPortLineCtrl(ch, dtr_active, rts_active);
mutex_unlock(&moxa_openlock);
return 0;
}
@@ -1664,8 +1666,8 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
u16 __iomem *ip)
{
struct tty_struct *tty = tty_port_tty_get(&p->port);
+ bool inited = tty_port_initialized(&p->port);
void __iomem *ofsAddr;
- unsigned int inited = tty_port_initialized(&p->port);
u16 intr;
if (tty) {
@@ -1877,12 +1879,12 @@ static void MoxaPortFlushData(struct moxa_port *port, int mode)
*
* Function 13: Get the DTR/RTS state of this port.
* Syntax:
- * int MoxaPortGetLineOut(int port, int *dtrState, int *rtsState);
+ * int MoxaPortGetLineOut(int port, bool *dtrState, bool *rtsState);
* int port : port number (0 - 127)
- * int * dtrState : pointer to INT to receive the current DTR
+ * bool * dtr_active : pointer to bool to receive the current DTR
* state. (if NULL, this function will not
* write to this address)
- * int * rtsState : pointer to INT to receive the current RTS
+ * bool * rts_active : pointer to bool to receive the current RTS
* state. (if NULL, this function will not
* write to this address)
*
@@ -1892,10 +1894,10 @@ static void MoxaPortFlushData(struct moxa_port *port, int mode)
*
* Function 14: Setting the DTR/RTS output state of this port.
* Syntax:
- * void MoxaPortLineCtrl(int port, int dtrState, int rtsState);
+ * void MoxaPortLineCtrl(int port, bool dtrState, bool rtsState);
* int port : port number (0 - 127)
- * int dtrState : DTR output state (0: off, 1: on)
- * int rtsState : RTS output state (0: off, 1: on)
+ * bool dtr_active : DTR output state
+ * bool rts_active : RTS output state
*
*
* Function 15: Setting the flow control of this port.
@@ -2103,24 +2105,24 @@ static int MoxaPortSetTermio(struct moxa_port *port, struct ktermios *termio,
return baud;
}
-static int MoxaPortGetLineOut(struct moxa_port *port, int *dtrState,
- int *rtsState)
+static int MoxaPortGetLineOut(struct moxa_port *port, bool *dtr_active,
+ bool *rts_active)
{
- if (dtrState)
- *dtrState = !!(port->lineCtrl & DTR_ON);
- if (rtsState)
- *rtsState = !!(port->lineCtrl & RTS_ON);
+ if (dtr_active)
+ *dtr_active = port->lineCtrl & DTR_ON;
+ if (rts_active)
+ *rts_active = port->lineCtrl & RTS_ON;
return 0;
}
-static void MoxaPortLineCtrl(struct moxa_port *port, int dtr, int rts)
+static void MoxaPortLineCtrl(struct moxa_port *port, bool dtr_active, bool rts_active)
{
u8 mode = 0;
- if (dtr)
+ if (dtr_active)
mode |= DTR_ON;
- if (rts)
+ if (rts_active)
mode |= RTS_ON;
port->lineCtrl = mode;
moxafunc(port->tableAddr, FC_LineControl, mode);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 2436e0b10f9a..ef3116e87975 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -458,13 +458,14 @@ static void __mxser_stop_tx(struct mxser_port *info)
outb(info->IER, info->ioaddr + UART_IER);
}
-static int mxser_carrier_raised(struct tty_port *port)
+static bool mxser_carrier_raised(struct tty_port *port)
{
struct mxser_port *mp = container_of(port, struct mxser_port, port);
- return (inb(mp->ioaddr + UART_MSR) & UART_MSR_DCD)?1:0;
+
+ return inb(mp->ioaddr + UART_MSR) & UART_MSR_DCD;
}
-static void mxser_dtr_rts(struct tty_port *port, int on)
+static void mxser_dtr_rts(struct tty_port *port, bool active)
{
struct mxser_port *mp = container_of(port, struct mxser_port, port);
unsigned long flags;
@@ -472,7 +473,7 @@ static void mxser_dtr_rts(struct tty_port *port, int on)
spin_lock_irqsave(&mp->slock, flags);
mcr = inb(mp->ioaddr + UART_MCR);
- if (on)
+ if (active)
mcr |= UART_MCR_DTR | UART_MCR_RTS;
else
mcr &= ~(UART_MCR_DTR | UART_MCR_RTS);
@@ -1063,7 +1064,7 @@ static int mxser_set_serial_info(struct tty_struct *tty,
} else {
retval = mxser_activate(port, tty);
if (retval == 0)
- tty_port_set_initialized(port, 1);
+ tty_port_set_initialized(port, true);
}
mutex_unlock(&port->mutex);
return retval;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 348d85c13f91..7aa05ebed8e1 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -318,6 +318,11 @@ struct gsm_mux {
struct gsm_control *pending_cmd;/* Our current pending command */
spinlock_t control_lock; /* Protects the pending command */
+ /* Keep-alive */
+ struct timer_list ka_timer; /* Keep-alive response timer */
+ u8 ka_num; /* Keep-alive match pattern */
+ signed int ka_retries; /* Keep-alive retry counter, -1 if not yet initialized */
+
/* Configuration */
int adaption; /* 1 or 2 supported */
u8 ftype; /* UI or UIH */
@@ -325,6 +330,7 @@ struct gsm_mux {
unsigned int t3; /* Power wake-up timer in seconds. */
int n2; /* Retry count */
u8 k; /* Window size */
+ u32 keep_alive; /* Control channel keep-alive in 10ms */
/* Statistics (not currently exposed) */
unsigned long bad_fcs;
@@ -540,6 +546,11 @@ static u8 gsm_encode_modem(const struct gsm_dlci *dlci)
modembits |= MDM_IC;
if (dlci->modem_tx & TIOCM_CD || dlci->gsm->initiator)
modembits |= MDM_DV;
+ /* special mappings for passive side to operate as UE */
+ if (dlci->modem_tx & TIOCM_OUT1)
+ modembits |= MDM_IC;
+ if (dlci->modem_tx & TIOCM_OUT2)
+ modembits |= MDM_DV;
return modembits;
}
@@ -1531,6 +1542,7 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
if (brk & 0x01)
tty_insert_flip_char(&dlci->port, 0, TTY_BREAK);
dlci->modem_rx = mlines;
+ wake_up_interruptible(&dlci->gsm->event);
}
/**
@@ -1897,11 +1909,13 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
const u8 *data, int clen)
{
struct gsm_control *ctrl;
+ struct gsm_dlci *dlci;
unsigned long flags;
spin_lock_irqsave(&gsm->control_lock, flags);
ctrl = gsm->pending_cmd;
+ dlci = gsm->dlci[0];
command |= 1;
/* Does the reply match our command */
if (ctrl != NULL && (command == ctrl->cmd || command == CMD_NSC)) {
@@ -1916,6 +1930,53 @@ static void gsm_control_response(struct gsm_mux *gsm, unsigned int command,
/* Or did we receive the PN response to our PN command */
} else if (command == CMD_PN) {
gsm_control_negotiation(gsm, 0, data, clen);
+ /* Or did we receive the TEST response to our TEST command */
+ } else if (command == CMD_TEST && clen == 1 && *data == gsm->ka_num) {
+ gsm->ka_retries = -1; /* trigger new keep-alive message */
+ if (dlci && !dlci->dead)
+ mod_timer(&gsm->ka_timer, jiffies + gsm->keep_alive * HZ / 100);
+ }
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+}
+
+/**
+ * gsm_control_keep_alive - check timeout or start keep-alive
+ * @t: timer contained in our gsm object
+ *
+ * Called off the keep-alive timer expiry signaling that our link
+ * partner is not responding anymore. Link will be closed.
+ * This is also called to startup our timer.
+ */
+
+static void gsm_control_keep_alive(struct timer_list *t)
+{
+ struct gsm_mux *gsm = from_timer(gsm, t, ka_timer);
+ unsigned long flags;
+
+ spin_lock_irqsave(&gsm->control_lock, flags);
+ if (gsm->ka_num && gsm->ka_retries == 0) {
+ /* Keep-alive expired -> close the link */
+ if (debug & DBG_ERRORS)
+ pr_debug("%s keep-alive timed out\n", __func__);
+ spin_unlock_irqrestore(&gsm->control_lock, flags);
+ if (gsm->dlci[0])
+ gsm_dlci_begin_close(gsm->dlci[0]);
+ return;
+ } else if (gsm->keep_alive && gsm->dlci[0] && !gsm->dlci[0]->dead) {
+ if (gsm->ka_retries > 0) {
+ /* T2 expired for keep-alive -> resend */
+ gsm->ka_retries--;
+ } else {
+ /* Start keep-alive timer */
+ gsm->ka_num++;
+ if (!gsm->ka_num)
+ gsm->ka_num++;
+ gsm->ka_retries = (signed int)gsm->n2;
+ }
+ gsm_control_command(gsm, CMD_TEST, &gsm->ka_num,
+ sizeof(gsm->ka_num));
+ mod_timer(&gsm->ka_timer,
+ jiffies + gsm->t2 * HZ / 100);
}
spin_unlock_irqrestore(&gsm->control_lock, flags);
}
@@ -2059,14 +2120,16 @@ static void gsm_dlci_close(struct gsm_dlci *dlci)
tty_port_tty_hangup(&dlci->port, false);
gsm_dlci_clear_queues(dlci->gsm, dlci);
/* Ensure that gsmtty_open() can return. */
- tty_port_set_initialized(&dlci->port, 0);
+ tty_port_set_initialized(&dlci->port, false);
wake_up_interruptible(&dlci->port.open_wait);
- } else
+ } else {
+ del_timer(&dlci->gsm->ka_timer);
dlci->gsm->dead = true;
+ }
/* A DLCI 0 close is a MUX termination so we need to kick that
back to userspace somehow */
gsm_dlci_data_kick(dlci);
- wake_up(&dlci->gsm->event);
+ wake_up_all(&dlci->gsm->event);
}
/**
@@ -2078,6 +2141,8 @@ static void gsm_dlci_close(struct gsm_dlci *dlci)
static void gsm_dlci_open(struct gsm_dlci *dlci)
{
+ struct gsm_mux *gsm = dlci->gsm;
+
/* Note that SABM UA .. SABM UA first UA lost can mean that we go
open -> open */
del_timer(&dlci->t1);
@@ -2087,8 +2152,15 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
if (debug & DBG_ERRORS)
pr_debug("DLCI %d goes open.\n", dlci->addr);
/* Send current modem state */
- if (dlci->addr)
+ if (dlci->addr) {
gsm_modem_update(dlci, 0);
+ } else {
+ /* Start keep-alive control */
+ gsm->ka_num = 0;
+ gsm->ka_retries = -1;
+ mod_timer(&gsm->ka_timer,
+ jiffies + gsm->keep_alive * HZ / 100);
+ }
gsm_dlci_data_kick(dlci);
wake_up(&dlci->gsm->event);
}
@@ -2267,6 +2339,7 @@ static void gsm_dlci_begin_close(struct gsm_dlci *dlci)
dlci->state = DLCI_CLOSING;
gsm_command(dlci->gsm, dlci->addr, DISC|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
+ wake_up_interruptible(&gsm->event);
}
/**
@@ -2840,6 +2913,7 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
/* Finish outstanding timers, making sure they are done */
del_timer_sync(&gsm->kick_timer);
del_timer_sync(&gsm->t2_timer);
+ del_timer_sync(&gsm->ka_timer);
/* Finish writing to ldisc */
flush_work(&gsm->tx_work);
@@ -2987,6 +3061,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
INIT_LIST_HEAD(&gsm->tx_data_list);
timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ timer_setup(&gsm->ka_timer, gsm_control_keep_alive, 0);
INIT_WORK(&gsm->tx_work, gsmld_write_task);
init_waitqueue_head(&gsm->event);
spin_lock_init(&gsm->control_lock);
@@ -3003,6 +3078,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
gsm->mru = 64; /* Default to encoding 1 so these should be 64 */
gsm->mtu = 64;
gsm->dead = true; /* Avoid early tty opens */
+ gsm->keep_alive = 0; /* Disabled */
/* Store the instance to the mux array or abort if no space is
* available.
@@ -3138,6 +3214,29 @@ static int gsm_config(struct gsm_mux *gsm, struct gsm_config *c)
return 0;
}
+static void gsm_copy_config_ext_values(struct gsm_mux *gsm,
+ struct gsm_config_ext *ce)
+{
+ memset(ce, 0, sizeof(*ce));
+ ce->keep_alive = gsm->keep_alive;
+}
+
+static int gsm_config_ext(struct gsm_mux *gsm, struct gsm_config_ext *ce)
+{
+ unsigned int i;
+
+ /*
+ * Check that userspace doesn't put stuff in here to prevent breakages
+ * in the future.
+ */
+ for (i = 0; i < ARRAY_SIZE(ce->reserved); i++)
+ if (ce->reserved[i])
+ return -EINVAL;
+
+ gsm->keep_alive = ce->keep_alive;
+ return 0;
+}
+
/**
* gsmld_output - write to link
* @gsm: our mux
@@ -3456,6 +3555,7 @@ static int gsmld_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct gsm_config c;
+ struct gsm_config_ext ce;
struct gsm_mux *gsm = tty->disc_data;
unsigned int base;
@@ -3472,6 +3572,15 @@ static int gsmld_ioctl(struct tty_struct *tty, unsigned int cmd,
case GSMIOC_GETFIRST:
base = mux_num_to_base(gsm);
return put_user(base + 1, (__u32 __user *)arg);
+ case GSMIOC_GETCONF_EXT:
+ gsm_copy_config_ext_values(gsm, &ce);
+ if (copy_to_user((void __user *)arg, &ce, sizeof(ce)))
+ return -EFAULT;
+ return 0;
+ case GSMIOC_SETCONF_EXT:
+ if (copy_from_user(&ce, (void __user *)arg, sizeof(ce)))
+ return -EFAULT;
+ return gsm_config_ext(gsm, &ce);
default:
return n_tty_ioctl_helper(tty, cmd, arg);
}
@@ -3770,16 +3879,43 @@ static int gsm_modem_update(struct gsm_dlci *dlci, u8 brk)
return -EPROTONOSUPPORT;
}
-static int gsm_carrier_raised(struct tty_port *port)
+/**
+ * gsm_wait_modem_change - wait for modem status line change
+ * @dlci: channel
+ * @mask: modem status line bits
+ *
+ * The function returns if:
+ * - any given modem status line bit changed
+ * - the wait event function got interrupted (e.g. by a signal)
+ * - the underlying DLCI was closed
+ * - the underlying ldisc device was removed
+ */
+static int gsm_wait_modem_change(struct gsm_dlci *dlci, u32 mask)
+{
+ struct gsm_mux *gsm = dlci->gsm;
+ u32 old = dlci->modem_rx;
+ int ret;
+
+ ret = wait_event_interruptible(gsm->event, gsm->dead ||
+ dlci->state != DLCI_OPEN ||
+ (old ^ dlci->modem_rx) & mask);
+ if (gsm->dead)
+ return -ENODEV;
+ if (dlci->state != DLCI_OPEN)
+ return -EL2NSYNC;
+ return ret;
+}
+
+static bool gsm_carrier_raised(struct tty_port *port)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
struct gsm_mux *gsm = dlci->gsm;
/* Not yet open so no carrier info */
if (dlci->state != DLCI_OPEN)
- return 0;
+ return false;
if (debug & DBG_CD_ON)
- return 1;
+ return true;
/*
* Basic mode with control channel in ADM mode may not respond
@@ -3787,16 +3923,16 @@ static int gsm_carrier_raised(struct tty_port *port)
*/
if (gsm->encoding == GSM_BASIC_OPT &&
gsm->dlci[0]->mode == DLCI_MODE_ADM && !dlci->modem_rx)
- return 1;
+ return true;
return dlci->modem_rx & TIOCM_CD;
}
-static void gsm_dtr_rts(struct tty_port *port, int onoff)
+static void gsm_dtr_rts(struct tty_port *port, bool active)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
unsigned int modem_tx = dlci->modem_tx;
- if (onoff)
+ if (active)
modem_tx |= TIOCM_DTR | TIOCM_RTS;
else
modem_tx &= ~(TIOCM_DTR | TIOCM_RTS);
@@ -3880,7 +4016,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
dlci->modem_rx = 0;
/* We could in theory open and close before we wait - eg if we get
a DM straight back. This is ok as that will have caused a hangup */
- tty_port_set_initialized(port, 1);
+ tty_port_set_initialized(port, true);
/* Start sending off SABM messages */
if (gsm->initiator)
gsm_dlci_begin_open(dlci);
@@ -4029,6 +4165,8 @@ static int gsmtty_ioctl(struct tty_struct *tty,
gsm_destroy_network(dlci);
mutex_unlock(&dlci->mutex);
return 0;
+ case TIOCMIWAIT:
+ return gsm_wait_modem_change(dlci, (u32)arg);
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 0180e1e4e75d..aa80de3a8194 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -42,7 +42,7 @@ static struct attribute *serdev_device_attrs[] = {
};
ATTRIBUTE_GROUPS(serdev_device);
-static int serdev_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int serdev_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
int rc;
diff --git a/drivers/tty/serial/8250/8250_dfl.c b/drivers/tty/serial/8250/8250_dfl.c
new file mode 100644
index 000000000000..6c5ff019df4b
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_dfl.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for FPGA UART
+ *
+ * Copyright (C) 2022 Intel Corporation.
+ *
+ * Authors:
+ * Ananda Ravuri <ananda.ravuri@intel.com>
+ * Matthew Gerlach <matthew.gerlach@linux.intel.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/dfl.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/types.h>
+
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+
+#define DFHv1_PARAM_ID_CLK_FRQ 0x2
+#define DFHv1_PARAM_ID_FIFO_LEN 0x3
+
+#define DFHv1_PARAM_ID_REG_LAYOUT 0x4
+#define DFHv1_PARAM_REG_LAYOUT_WIDTH GENMASK_ULL(63, 32)
+#define DFHv1_PARAM_REG_LAYOUT_SHIFT GENMASK_ULL(31, 0)
+
+struct dfl_uart {
+ int line;
+};
+
+static int dfh_get_u64_param_val(struct dfl_device *dfl_dev, int param_id, u64 *pval)
+{
+ size_t psize;
+ u64 *p;
+
+ p = dfh_find_param(dfl_dev, param_id, &psize);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ if (psize != sizeof(*pval))
+ return -EINVAL;
+
+ *pval = *p;
+
+ return 0;
+}
+
+static int dfl_uart_get_params(struct dfl_device *dfl_dev, struct uart_8250_port *uart)
+{
+ struct device *dev = &dfl_dev->dev;
+ u64 fifo_len, clk_freq, reg_layout;
+ u32 reg_width;
+ int ret;
+
+ ret = dfh_get_u64_param_val(dfl_dev, DFHv1_PARAM_ID_CLK_FRQ, &clk_freq);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing CLK_FRQ param\n");
+
+ uart->port.uartclk = clk_freq;
+
+ ret = dfh_get_u64_param_val(dfl_dev, DFHv1_PARAM_ID_FIFO_LEN, &fifo_len);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing FIFO_LEN param\n");
+
+ switch (fifo_len) {
+ case 32:
+ uart->port.type = PORT_ALTR_16550_F32;
+ break;
+
+ case 64:
+ uart->port.type = PORT_ALTR_16550_F64;
+ break;
+
+ case 128:
+ uart->port.type = PORT_ALTR_16550_F128;
+ break;
+
+ default:
+ return dev_err_probe(dev, -EINVAL, "unsupported FIFO_LEN %llu\n", fifo_len);
+ }
+
+ ret = dfh_get_u64_param_val(dfl_dev, DFHv1_PARAM_ID_REG_LAYOUT, &reg_layout);
+ if (ret)
+ return dev_err_probe(dev, ret, "missing REG_LAYOUT param\n");
+
+ uart->port.regshift = FIELD_GET(DFHv1_PARAM_REG_LAYOUT_SHIFT, reg_layout);
+ reg_width = FIELD_GET(DFHv1_PARAM_REG_LAYOUT_WIDTH, reg_layout);
+ switch (reg_width) {
+ case 4:
+ uart->port.iotype = UPIO_MEM32;
+ break;
+
+ case 2:
+ uart->port.iotype = UPIO_MEM16;
+ break;
+
+ default:
+ return dev_err_probe(dev, -EINVAL, "unsupported reg-width %u\n", reg_width);
+
+ }
+
+ return 0;
+}
+
+static int dfl_uart_probe(struct dfl_device *dfl_dev)
+{
+ struct device *dev = &dfl_dev->dev;
+ struct uart_8250_port uart = { };
+ struct dfl_uart *dfluart;
+ int ret;
+
+ uart.port.flags = UPF_IOREMAP;
+ uart.port.mapbase = dfl_dev->mmio_res.start;
+ uart.port.mapsize = resource_size(&dfl_dev->mmio_res);
+
+ ret = dfl_uart_get_params(dfl_dev, &uart);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed uart feature walk\n");
+
+ if (dfl_dev->num_irqs == 1)
+ uart.port.irq = dfl_dev->irqs[0];
+
+ dfluart = devm_kzalloc(dev, sizeof(*dfluart), GFP_KERNEL);
+ if (!dfluart)
+ return -ENOMEM;
+
+ dfluart->line = serial8250_register_8250_port(&uart);
+ if (dfluart->line < 0)
+ return dev_err_probe(dev, dfluart->line, "unable to register 8250 port.\n");
+
+ dev_set_drvdata(dev, dfluart);
+
+ return 0;
+}
+
+static void dfl_uart_remove(struct dfl_device *dfl_dev)
+{
+ struct dfl_uart *dfluart = dev_get_drvdata(&dfl_dev->dev);
+
+ serial8250_unregister_port(dfluart->line);
+}
+
+#define FME_FEATURE_ID_UART 0x24
+
+static const struct dfl_device_id dfl_uart_ids[] = {
+ { FME_ID, FME_FEATURE_ID_UART },
+ { }
+};
+MODULE_DEVICE_TABLE(dfl, dfl_uart_ids);
+
+static struct dfl_driver dfl_uart_driver = {
+ .drv = {
+ .name = "dfl-uart",
+ },
+ .id_table = dfl_uart_ids,
+ .probe = dfl_uart_probe,
+ .remove = dfl_uart_remove,
+};
+module_dfl_driver(dfl_uart_driver);
+
+MODULE_DESCRIPTION("DFL Intel UART driver");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index 37d6af2ec427..7fa66501792d 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -43,15 +43,23 @@ static void __dma_rx_complete(struct uart_8250_port *p)
struct uart_8250_dma *dma = p->dma;
struct tty_port *tty_port = &p->port.state->port;
struct dma_tx_state state;
+ enum dma_status dma_status;
int count;
- dma->rx_running = 0;
- dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ /*
+ * New DMA Rx can be started during the completion handler before it
+ * could acquire port's lock and it might still be ongoing. Don't to
+ * anything in such case.
+ */
+ dma_status = dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+ if (dma_status == DMA_IN_PROGRESS)
+ return;
count = dma->rx_size - state.residue;
tty_insert_flip_string(tty_port, dma->rx_buf, count);
p->port.icount.rx += count;
+ dma->rx_running = 0;
tty_flip_buffer_push(tty_port);
}
@@ -62,9 +70,14 @@ static void dma_rx_complete(void *param)
struct uart_8250_dma *dma = p->dma;
unsigned long flags;
- __dma_rx_complete(p);
-
spin_lock_irqsave(&p->port.lock, flags);
+ if (dma->rx_running)
+ __dma_rx_complete(p);
+
+ /*
+ * Cannot be combined with the previous check because __dma_rx_complete()
+ * changes dma->rx_running.
+ */
if (!dma->rx_running && (serial_lsr_in(p) & UART_LSR_DR))
p->dma->rx_dma(p);
spin_unlock_irqrestore(&p->port.lock, flags);
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index f271becfc46c..0ebde0ab8167 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -136,11 +136,11 @@ static void __init init_port(struct earlycon_device *device)
unsigned char c;
unsigned int ier;
- serial8250_early_out(port, UART_LCR, 0x3); /* 8n1 */
+ serial8250_early_out(port, UART_LCR, UART_LCR_WLEN8); /* 8n1 */
ier = serial8250_early_in(port, UART_IER);
serial8250_early_out(port, UART_IER, ier & UART_IER_UUE); /* no interrupt */
serial8250_early_out(port, UART_FCR, 0); /* no fifo */
- serial8250_early_out(port, UART_MCR, 0x3); /* DTR + RTS */
+ serial8250_early_out(port, UART_MCR, UART_MCR_DTR | UART_MCR_RTS);
if (port->uartclk) {
divisor = DIV_ROUND_CLOSEST(port->uartclk, 16 * device->baud);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 8e9f247590bd..c55be6fda0ca 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -24,6 +24,7 @@
#include <asm/io.h>
#include "8250.h"
+#include "8250_pcilib.h"
/*
* init function returns:
@@ -89,28 +90,7 @@ static int
setup_port(struct serial_private *priv, struct uart_8250_port *port,
u8 bar, unsigned int offset, int regshift)
{
- struct pci_dev *dev = priv->dev;
-
- if (bar >= PCI_STD_NUM_BARS)
- return -EINVAL;
-
- if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) {
- if (!pcim_iomap(dev, bar, 0) && !pcim_iomap_table(dev))
- return -ENOMEM;
-
- port->port.iotype = UPIO_MEM;
- port->port.iobase = 0;
- port->port.mapbase = pci_resource_start(dev, bar) + offset;
- port->port.membase = pcim_iomap_table(dev)[bar] + offset;
- port->port.regshift = regshift;
- } else {
- port->port.iotype = UPIO_PORT;
- port->port.iobase = pci_resource_start(dev, bar) + offset;
- port->port.mapbase = 0;
- port->port.membase = NULL;
- port->port.regshift = 0;
- }
- return 0;
+ return serial8250_pci_setup_port(priv->dev, port, bar, offset, regshift);
}
/*
@@ -5757,3 +5737,4 @@ module_pci_driver(serial_pci_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Generic 8250/16x50 PCI serial probe module");
MODULE_DEVICE_TABLE(pci, serial_pci_tbl);
+MODULE_IMPORT_NS(SERIAL_8250_PCI);
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
new file mode 100644
index 000000000000..a3b25779d921
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Probe module for 8250/16550-type MCHP PCI serial ports.
+ *
+ * Based on drivers/tty/serial/8250/8250_pci.c,
+ *
+ * Copyright (C) 2022 Microchip Technology Inc., All Rights Reserved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/units.h>
+#include <linux/tty.h>
+
+#include <asm/byteorder.h>
+
+#include "8250.h"
+#include "8250_pcilib.h"
+
+#define PCI_DEVICE_ID_EFAR_PCI12000 0xa002
+#define PCI_DEVICE_ID_EFAR_PCI11010 0xa012
+#define PCI_DEVICE_ID_EFAR_PCI11101 0xa022
+#define PCI_DEVICE_ID_EFAR_PCI11400 0xa032
+#define PCI_DEVICE_ID_EFAR_PCI11414 0xa042
+
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_4p 0x0001
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_3p012 0x0002
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_3p013 0x0003
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_3p023 0x0004
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_3p123 0x0005
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p01 0x0006
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p02 0x0007
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p03 0x0008
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p12 0x0009
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p13 0x000a
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p23 0x000b
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_1p0 0x000c
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_1p1 0x000d
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_1p2 0x000e
+#define PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_1p3 0x000f
+
+#define PCI_SUBDEVICE_ID_EFAR_PCI12000 PCI_DEVICE_ID_EFAR_PCI12000
+#define PCI_SUBDEVICE_ID_EFAR_PCI11010 PCI_DEVICE_ID_EFAR_PCI11010
+#define PCI_SUBDEVICE_ID_EFAR_PCI11101 PCI_DEVICE_ID_EFAR_PCI11101
+#define PCI_SUBDEVICE_ID_EFAR_PCI11400 PCI_DEVICE_ID_EFAR_PCI11400
+#define PCI_SUBDEVICE_ID_EFAR_PCI11414 PCI_DEVICE_ID_EFAR_PCI11414
+
+#define UART_ACTV_REG 0x11
+#define UART_BLOCK_SET_ACTIVE BIT(0)
+
+#define UART_PCI_CTRL_REG 0x80
+#define UART_PCI_CTRL_SET_MULTIPLE_MSI BIT(4)
+#define UART_PCI_CTRL_D3_CLK_ENABLE BIT(0)
+
+#define ADCL_CFG_REG 0x40
+#define ADCL_CFG_POL_SEL BIT(2)
+#define ADCL_CFG_PIN_SEL BIT(1)
+#define ADCL_CFG_EN BIT(0)
+
+#define UART_BIT_SAMPLE_CNT 16
+#define BAUD_CLOCK_DIV_INT_MSK GENMASK(31, 8)
+#define ADCL_CFG_RTS_DELAY_MASK GENMASK(11, 8)
+#define UART_CLOCK_DEFAULT (62500 * HZ_PER_KHZ)
+
+#define UART_WAKE_REG 0x8C
+#define UART_WAKE_MASK_REG 0x90
+#define UART_WAKE_N_PIN BIT(2)
+#define UART_WAKE_NCTS BIT(1)
+#define UART_WAKE_INT BIT(0)
+#define UART_WAKE_SRCS \
+ (UART_WAKE_N_PIN | UART_WAKE_NCTS | UART_WAKE_INT)
+
+#define UART_BAUD_CLK_DIVISOR_REG 0x54
+
+#define UART_RESET_REG 0x94
+#define UART_RESET_D3_RESET_DISABLE BIT(16)
+
+#define MAX_PORTS 4
+#define PORT_OFFSET 0x100
+
+static const int logical_to_physical_port_idx[][MAX_PORTS] = {
+ {0, 1, 2, 3}, /* PCI12000, PCI11010, PCI11101, PCI11400, PCI11414 */
+ {0, 1, 2, 3}, /* PCI4p */
+ {0, 1, 2, -1}, /* PCI3p012 */
+ {0, 1, 3, -1}, /* PCI3p013 */
+ {0, 2, 3, -1}, /* PCI3p023 */
+ {1, 2, 3, -1}, /* PCI3p123 */
+ {0, 1, -1, -1}, /* PCI2p01 */
+ {0, 2, -1, -1}, /* PCI2p02 */
+ {0, 3, -1, -1}, /* PCI2p03 */
+ {1, 2, -1, -1}, /* PCI2p12 */
+ {1, 3, -1, -1}, /* PCI2p13 */
+ {2, 3, -1, -1}, /* PCI2p23 */
+ {0, -1, -1, -1}, /* PCI1p0 */
+ {1, -1, -1, -1}, /* PCI1p1 */
+ {2, -1, -1, -1}, /* PCI1p2 */
+ {3, -1, -1, -1}, /* PCI1p3 */
+};
+
+struct pci1xxxx_8250 {
+ unsigned int nr;
+ void __iomem *membase;
+ int line[];
+};
+
+static int pci1xxxx_get_num_ports(struct pci_dev *dev)
+{
+ switch (dev->subsystem_device) {
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_1p0:
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_1p1:
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_1p2:
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_1p3:
+ case PCI_SUBDEVICE_ID_EFAR_PCI12000:
+ case PCI_SUBDEVICE_ID_EFAR_PCI11010:
+ case PCI_SUBDEVICE_ID_EFAR_PCI11101:
+ case PCI_SUBDEVICE_ID_EFAR_PCI11400:
+ default:
+ return 1;
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p01:
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p02:
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p03:
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p12:
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p13:
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_2p23:
+ return 2;
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_3p012:
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_3p123:
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_3p013:
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_3p023:
+ return 3;
+ case PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_4p:
+ case PCI_SUBDEVICE_ID_EFAR_PCI11414:
+ return 4;
+ }
+}
+
+static unsigned int pci1xxxx_get_divisor(struct uart_port *port,
+ unsigned int baud, unsigned int *frac)
+{
+ unsigned int quot;
+
+ /*
+ * Calculate baud rate sampling period in nanoseconds.
+ * Fractional part x denotes x/255 parts of a nanosecond.
+ */
+ quot = NSEC_PER_SEC / (baud * UART_BIT_SAMPLE_CNT);
+ *frac = (NSEC_PER_SEC - quot * baud * UART_BIT_SAMPLE_CNT) *
+ 255 / UART_BIT_SAMPLE_CNT / baud;
+
+ return quot;
+}
+
+static void pci1xxxx_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot, unsigned int frac)
+{
+ writel(FIELD_PREP(BAUD_CLOCK_DIV_INT_MSK, quot) | frac,
+ port->membase + UART_BAUD_CLK_DIVISOR_REG);
+}
+
+static int pci1xxxx_rs485_config(struct uart_port *port,
+ struct ktermios *termios,
+ struct serial_rs485 *rs485)
+{
+ u32 delay_in_baud_periods;
+ u32 baud_period_in_ns;
+ u32 mode_cfg = 0;
+ u32 clock_div;
+
+ /*
+ * pci1xxxx's uart hardware supports only RTS delay after
+ * Tx and in units of bit times to a maximum of 15
+ */
+ if (rs485->flags & SER_RS485_ENABLED) {
+ mode_cfg = ADCL_CFG_EN | ADCL_CFG_PIN_SEL;
+
+ if (!(rs485->flags & SER_RS485_RTS_ON_SEND))
+ mode_cfg |= ADCL_CFG_POL_SEL;
+
+ if (rs485->delay_rts_after_send) {
+ clock_div = readl(port->membase + UART_BAUD_CLK_DIVISOR_REG);
+ baud_period_in_ns =
+ FIELD_GET(BAUD_CLOCK_DIV_INT_MSK, clock_div) *
+ UART_BIT_SAMPLE_CNT;
+ delay_in_baud_periods =
+ rs485->delay_rts_after_send * NSEC_PER_MSEC /
+ baud_period_in_ns;
+ delay_in_baud_periods =
+ min_t(u32, delay_in_baud_periods,
+ FIELD_MAX(ADCL_CFG_RTS_DELAY_MASK));
+ mode_cfg |= FIELD_PREP(ADCL_CFG_RTS_DELAY_MASK,
+ delay_in_baud_periods);
+ rs485->delay_rts_after_send =
+ baud_period_in_ns * delay_in_baud_periods /
+ NSEC_PER_MSEC;
+ }
+ }
+ writel(mode_cfg, port->membase + ADCL_CFG_REG);
+ return 0;
+}
+
+static const struct serial_rs485 pci1xxxx_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_after_send = 1,
+ /* Delay RTS before send is not supported */
+};
+
+static bool pci1xxxx_port_suspend(int line)
+{
+ struct uart_8250_port *up = serial8250_get_port(line);
+ struct uart_port *port = &up->port;
+ struct tty_port *tport = &port->state->port;
+ unsigned long flags;
+ bool ret = false;
+ u8 wakeup_mask;
+
+ mutex_lock(&tport->mutex);
+ if (port->suspended == 0 && port->dev) {
+ wakeup_mask = readb(up->port.membase + UART_WAKE_MASK_REG);
+
+ spin_lock_irqsave(&port->lock, flags);
+ port->mctrl &= ~TIOCM_OUT2;
+ port->ops->set_mctrl(port, port->mctrl);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ ret = (wakeup_mask & UART_WAKE_SRCS) != UART_WAKE_SRCS;
+ }
+
+ writeb(UART_WAKE_SRCS, port->membase + UART_WAKE_REG);
+ mutex_unlock(&tport->mutex);
+
+ return ret;
+}
+
+static void pci1xxxx_port_resume(int line)
+{
+ struct uart_8250_port *up = serial8250_get_port(line);
+ struct uart_port *port = &up->port;
+ struct tty_port *tport = &port->state->port;
+ unsigned long flags;
+
+ mutex_lock(&tport->mutex);
+ writeb(UART_BLOCK_SET_ACTIVE, port->membase + UART_ACTV_REG);
+ writeb(UART_WAKE_SRCS, port->membase + UART_WAKE_REG);
+
+ if (port->suspended == 0) {
+ spin_lock_irqsave(&port->lock, flags);
+ port->mctrl |= TIOCM_OUT2;
+ port->ops->set_mctrl(port, port->mctrl);
+ spin_unlock_irqrestore(&port->lock, flags);
+ }
+ mutex_unlock(&tport->mutex);
+}
+
+static int pci1xxxx_suspend(struct device *dev)
+{
+ struct pci1xxxx_8250 *priv = dev_get_drvdata(dev);
+ struct pci_dev *pcidev = to_pci_dev(dev);
+ bool wakeup = false;
+ unsigned int data;
+ void __iomem *p;
+ int i;
+
+ for (i = 0; i < priv->nr; i++) {
+ if (priv->line[i] >= 0) {
+ serial8250_suspend_port(priv->line[i]);
+ wakeup |= pci1xxxx_port_suspend(priv->line[i]);
+ }
+ }
+
+ p = pci_ioremap_bar(pcidev, 0);
+ if (!p) {
+ dev_err(dev, "remapping of bar 0 memory failed");
+ return -ENOMEM;
+ }
+
+ data = readl(p + UART_RESET_REG);
+ writel(data | UART_RESET_D3_RESET_DISABLE, p + UART_RESET_REG);
+
+ if (wakeup)
+ writeb(UART_PCI_CTRL_D3_CLK_ENABLE, p + UART_PCI_CTRL_REG);
+
+ iounmap(p);
+ device_set_wakeup_enable(dev, true);
+ pci_wake_from_d3(pcidev, true);
+
+ return 0;
+}
+
+static int pci1xxxx_resume(struct device *dev)
+{
+ struct pci1xxxx_8250 *priv = dev_get_drvdata(dev);
+ struct pci_dev *pcidev = to_pci_dev(dev);
+ unsigned int data;
+ void __iomem *p;
+ int i;
+
+ p = pci_ioremap_bar(pcidev, 0);
+ if (!p) {
+ dev_err(dev, "remapping of bar 0 memory failed");
+ return -ENOMEM;
+ }
+
+ data = readl(p + UART_RESET_REG);
+ writel(data & ~UART_RESET_D3_RESET_DISABLE, p + UART_RESET_REG);
+ iounmap(p);
+
+ for (i = 0; i < priv->nr; i++) {
+ if (priv->line[i] >= 0) {
+ pci1xxxx_port_resume(priv->line[i]);
+ serial8250_resume_port(priv->line[i]);
+ }
+ }
+
+ return 0;
+}
+
+static int pci1xxxx_setup(struct pci_dev *pdev,
+ struct uart_8250_port *port, int port_idx)
+{
+ int ret;
+
+ port->port.flags |= UPF_FIXED_TYPE | UPF_SKIP_TEST;
+ port->port.type = PORT_MCHP16550A;
+ port->port.set_termios = serial8250_do_set_termios;
+ port->port.get_divisor = pci1xxxx_get_divisor;
+ port->port.set_divisor = pci1xxxx_set_divisor;
+ port->port.rs485_config = pci1xxxx_rs485_config;
+ port->port.rs485_supported = pci1xxxx_rs485_supported;
+
+ ret = serial8250_pci_setup_port(pdev, port, 0, PORT_OFFSET * port_idx, 0);
+ if (ret < 0)
+ return ret;
+
+ writeb(UART_BLOCK_SET_ACTIVE, port->port.membase + UART_ACTV_REG);
+ writeb(UART_WAKE_SRCS, port->port.membase + UART_WAKE_REG);
+ writeb(UART_WAKE_N_PIN, port->port.membase + UART_WAKE_MASK_REG);
+
+ return 0;
+}
+
+static unsigned int pci1xxxx_get_max_port(int subsys_dev)
+{
+ unsigned int i = MAX_PORTS;
+
+ if (subsys_dev < ARRAY_SIZE(logical_to_physical_port_idx))
+ while (i--) {
+ if (logical_to_physical_port_idx[subsys_dev][i] != -1)
+ return logical_to_physical_port_idx[subsys_dev][i] + 1;
+ }
+
+ if (subsys_dev == PCI_SUBDEVICE_ID_EFAR_PCI11414)
+ return 4;
+
+ return 1;
+}
+
+static int pci1xxxx_logical_to_physical_port_translate(int subsys_dev, int port)
+{
+ if (subsys_dev < ARRAY_SIZE(logical_to_physical_port_idx))
+ return logical_to_physical_port_idx[subsys_dev][port];
+
+ return logical_to_physical_port_idx[0][port];
+}
+
+static int pci1xxxx_serial_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct pci1xxxx_8250 *priv;
+ struct uart_8250_port uart;
+ unsigned int max_vec_reqd;
+ unsigned int nr_ports, i;
+ int num_vectors;
+ int subsys_dev;
+ int port_idx;
+ int rc;
+
+ rc = pcim_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ nr_ports = pci1xxxx_get_num_ports(pdev);
+
+ priv = devm_kzalloc(dev, struct_size(priv, line, nr_ports), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->membase = pci_ioremap_bar(pdev, 0);
+ if (!priv->membase)
+ return -ENOMEM;
+
+ pci_set_master(pdev);
+
+ priv->nr = nr_ports;
+
+ subsys_dev = pdev->subsystem_device;
+ max_vec_reqd = pci1xxxx_get_max_port(subsys_dev);
+
+ num_vectors = pci_alloc_irq_vectors(pdev, 1, max_vec_reqd, PCI_IRQ_ALL_TYPES);
+ if (num_vectors < 0) {
+ pci_iounmap(pdev, priv->membase);
+ return num_vectors;
+ }
+
+ memset(&uart, 0, sizeof(uart));
+ uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT;
+ uart.port.uartclk = UART_CLOCK_DEFAULT;
+ uart.port.dev = dev;
+
+ if (num_vectors == max_vec_reqd)
+ writeb(UART_PCI_CTRL_SET_MULTIPLE_MSI, priv->membase + UART_PCI_CTRL_REG);
+
+ for (i = 0; i < nr_ports; i++) {
+ priv->line[i] = -ENODEV;
+
+ port_idx = pci1xxxx_logical_to_physical_port_translate(subsys_dev, i);
+
+ if (num_vectors == max_vec_reqd)
+ uart.port.irq = pci_irq_vector(pdev, port_idx);
+ else
+ uart.port.irq = pci_irq_vector(pdev, 0);
+
+ rc = pci1xxxx_setup(pdev, &uart, port_idx);
+ if (rc) {
+ dev_warn(dev, "Failed to setup port %u\n", i);
+ continue;
+ }
+
+ priv->line[i] = serial8250_register_8250_port(&uart);
+ if (priv->line[i] < 0) {
+ dev_warn(dev,
+ "Couldn't register serial port %lx, irq %d, type %d, error %d\n",
+ uart.port.iobase, uart.port.irq, uart.port.iotype,
+ priv->line[i]);
+ }
+ }
+
+ pci_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static void pci1xxxx_serial_remove(struct pci_dev *dev)
+{
+ struct pci1xxxx_8250 *priv = pci_get_drvdata(dev);
+ unsigned int i;
+
+ for (i = 0; i < priv->nr; i++) {
+ if (priv->line[i] >= 0)
+ serial8250_unregister_port(priv->line[i]);
+ }
+
+ pci_free_irq_vectors(dev);
+ pci_iounmap(dev, priv->membase);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(pci1xxxx_pm_ops, pci1xxxx_suspend, pci1xxxx_resume);
+
+static const struct pci_device_id pci1xxxx_pci_tbl[] = {
+ { PCI_VDEVICE(EFAR, PCI_DEVICE_ID_EFAR_PCI11010) },
+ { PCI_VDEVICE(EFAR, PCI_DEVICE_ID_EFAR_PCI11101) },
+ { PCI_VDEVICE(EFAR, PCI_DEVICE_ID_EFAR_PCI11400) },
+ { PCI_VDEVICE(EFAR, PCI_DEVICE_ID_EFAR_PCI11414) },
+ { PCI_VDEVICE(EFAR, PCI_DEVICE_ID_EFAR_PCI12000) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, pci1xxxx_pci_tbl);
+
+static struct pci_driver pci1xxxx_pci_driver = {
+ .name = "pci1xxxx serial",
+ .probe = pci1xxxx_serial_probe,
+ .remove = pci1xxxx_serial_remove,
+ .driver = {
+ .pm = pm_sleep_ptr(&pci1xxxx_pm_ops),
+ },
+ .id_table = pci1xxxx_pci_tbl,
+};
+module_pci_driver(pci1xxxx_pci_driver);
+
+static_assert((ARRAY_SIZE(logical_to_physical_port_idx) == PCI_SUBDEVICE_ID_EFAR_PCI1XXXX_1p3 + 1));
+
+MODULE_IMPORT_NS(SERIAL_8250_PCI);
+MODULE_DESCRIPTION("Microchip Technology Inc. PCIe to UART module");
+MODULE_AUTHOR("Kumaravel Thiagarajan <kumaravel.thiagarajan@microchip.com>");
+MODULE_AUTHOR("Tharun Kumar P <tharunkumar.pasumarthi@microchip.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/8250_pcilib.c b/drivers/tty/serial/8250/8250_pcilib.c
new file mode 100644
index 000000000000..d234e9194feb
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_pcilib.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * 8250 PCI library.
+ *
+ * Copyright (C) 2001 Russell King, All Rights Reserved.
+ */
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "8250.h"
+#include "8250_pcilib.h"
+
+int serial8250_pci_setup_port(struct pci_dev *dev, struct uart_8250_port *port,
+ u8 bar, unsigned int offset, int regshift)
+{
+ if (bar >= PCI_STD_NUM_BARS)
+ return -EINVAL;
+
+ if (pci_resource_flags(dev, bar) & IORESOURCE_MEM) {
+ if (!pcim_iomap(dev, bar, 0) && !pcim_iomap_table(dev))
+ return -ENOMEM;
+
+ port->port.iotype = UPIO_MEM;
+ port->port.iobase = 0;
+ port->port.mapbase = pci_resource_start(dev, bar) + offset;
+ port->port.membase = pcim_iomap_table(dev)[bar] + offset;
+ port->port.regshift = regshift;
+ } else {
+ port->port.iotype = UPIO_PORT;
+ port->port.iobase = pci_resource_start(dev, bar) + offset;
+ port->port.mapbase = 0;
+ port->port.membase = NULL;
+ port->port.regshift = 0;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(serial8250_pci_setup_port, SERIAL_8250_PCI);
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250/8250_pcilib.h b/drivers/tty/serial/8250/8250_pcilib.h
new file mode 100644
index 000000000000..1aaf1b50ce9c
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_pcilib.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * 8250 PCI library header file.
+ *
+ * Copyright (C) 2001 Russell King, All Rights Reserved.
+ */
+
+#include <linux/types.h>
+
+struct pci_dev;
+
+struct uart_8250_port;
+
+int serial8250_pci_setup_port(struct pci_dev *dev, struct uart_8250_port *port, u8 bar,
+ unsigned int offset, int regshift);
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index beba8f38b3dc..fa43df05342b 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -313,6 +313,14 @@ static const struct serial8250_config uart_config[] = {
.rxtrig_bytes = {1, 4, 8, 14},
.flags = UART_CAP_FIFO,
},
+ [PORT_MCHP16550A] = {
+ .name = "MCHP16550A",
+ .fifo_size = 256,
+ .tx_loadsz = 256,
+ .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_01,
+ .rxtrig_bytes = {2, 66, 130, 194},
+ .flags = UART_CAP_FIFO,
+ },
};
/* Uart divisor latch read */
@@ -1050,11 +1058,12 @@ static void autoconfig_16550a(struct uart_8250_port *up)
serial_out(up, UART_LCR, 0);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO |
UART_FCR7_64BYTE);
- status1 = serial_in(up, UART_IIR) >> 5;
+ status1 = serial_in(up, UART_IIR) & (UART_IIR_64BYTE_FIFO |
+ UART_IIR_FIFO_ENABLED);
serial_out(up, UART_FCR, 0);
serial_out(up, UART_LCR, 0);
- if (status1 == 7)
+ if (status1 == (UART_IIR_64BYTE_FIFO | UART_IIR_FIFO_ENABLED))
up->port.type = PORT_16550A_FSL64;
else
DEBUG_AUTOCONF("Motorola 8xxx DUART ");
@@ -1122,17 +1131,20 @@ static void autoconfig_16550a(struct uart_8250_port *up)
*/
serial_out(up, UART_LCR, 0);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
- status1 = serial_in(up, UART_IIR) >> 5;
+ status1 = serial_in(up, UART_IIR) & (UART_IIR_64BYTE_FIFO | UART_IIR_FIFO_ENABLED);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO | UART_FCR7_64BYTE);
- status2 = serial_in(up, UART_IIR) >> 5;
+ status2 = serial_in(up, UART_IIR) & (UART_IIR_64BYTE_FIFO | UART_IIR_FIFO_ENABLED);
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
+
serial_out(up, UART_LCR, 0);
DEBUG_AUTOCONF("iir1=%d iir2=%d ", status1, status2);
- if (status1 == 6 && status2 == 7) {
+ if (status1 == UART_IIR_FIFO_ENABLED_16550A &&
+ status2 == (UART_IIR_64BYTE_FIFO | UART_IIR_FIFO_ENABLED_16550A)) {
up->port.type = PORT_16750;
up->capabilities |= UART_CAP_AFE | UART_CAP_SLEEP;
return;
@@ -1236,14 +1248,14 @@ static void autoconfig(struct uart_8250_port *up)
* Mask out IER[7:4] bits for test as some UARTs (e.g. TL
* 16C754B) allow only to modify them if an EFR bit is set.
*/
- scratch2 = serial_in(up, UART_IER) & 0x0f;
- serial_out(up, UART_IER, 0x0F);
+ scratch2 = serial_in(up, UART_IER) & UART_IER_ALL_INTR;
+ serial_out(up, UART_IER, UART_IER_ALL_INTR);
#ifdef __i386__
outb(0, 0x080);
#endif
- scratch3 = serial_in(up, UART_IER) & 0x0f;
+ scratch3 = serial_in(up, UART_IER) & UART_IER_ALL_INTR;
serial_out(up, UART_IER, scratch);
- if (scratch2 != 0 || scratch3 != 0x0F) {
+ if (scratch2 != 0 || scratch3 != UART_IER_ALL_INTR) {
/*
* We failed; there's nothing here
*/
@@ -1267,10 +1279,10 @@ static void autoconfig(struct uart_8250_port *up)
* that conflicts with COM 1-4 --- we hope!
*/
if (!(port->flags & UPF_SKIP_TEST)) {
- serial8250_out_MCR(up, UART_MCR_LOOP | 0x0A);
- status1 = serial_in(up, UART_MSR) & 0xF0;
+ serial8250_out_MCR(up, UART_MCR_LOOP | UART_MCR_OUT2 | UART_MCR_RTS);
+ status1 = serial_in(up, UART_MSR) & UART_MSR_STATUS_BITS;
serial8250_out_MCR(up, save_mcr);
- if (status1 != 0x90) {
+ if (status1 != (UART_MSR_DCD | UART_MSR_CTS)) {
spin_unlock_irqrestore(&port->lock, flags);
DEBUG_AUTOCONF("LOOP test failed (%02x) ",
status1);
@@ -1293,22 +1305,19 @@ static void autoconfig(struct uart_8250_port *up)
serial_out(up, UART_FCR, UART_FCR_ENABLE_FIFO);
- /* Assign this as it is to truncate any bits above 7. */
- scratch = serial_in(up, UART_IIR);
-
- switch (scratch >> 6) {
- case 0:
+ switch (serial_in(up, UART_IIR) & UART_IIR_FIFO_ENABLED) {
+ case UART_IIR_FIFO_ENABLED_8250:
autoconfig_8250(up);
break;
- case 1:
- port->type = PORT_UNKNOWN;
- break;
- case 2:
+ case UART_IIR_FIFO_ENABLED_16550:
port->type = PORT_16550;
break;
- case 3:
+ case UART_IIR_FIFO_ENABLED_16550A:
autoconfig_16550a(up);
break;
+ default:
+ port->type = PORT_UNKNOWN;
+ break;
}
#ifdef CONFIG_SERIAL_8250_RSA
@@ -1394,7 +1403,7 @@ static void autoconfig_irq(struct uart_8250_port *up)
serial8250_out_MCR(up,
UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2);
}
- serial_out(up, UART_IER, 0x0f); /* enable all intrs */
+ serial_out(up, UART_IER, UART_IER_ALL_INTR);
serial_in(up, UART_LSR);
serial_in(up, UART_RX);
serial_in(up, UART_IIR);
@@ -1511,8 +1520,6 @@ static inline void __stop_tx(struct uart_8250_port *p)
u16 lsr = serial_lsr_in(p);
u64 stop_delay = 0;
- p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
-
if (!(lsr & UART_LSR_THRE))
return;
/*
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index b0f62345bc84..978dc196c29b 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -129,9 +129,13 @@ config SERIAL_8250_DMA
This builds DMA support that can be used with 8250/16650
compatible UART controllers that support DMA signaling.
+config SERIAL_8250_PCILIB
+ bool
+
config SERIAL_8250_PCI
tristate "8250/16550 PCI device support"
depends on SERIAL_8250 && PCI
+ select SERIAL_8250_PCILIB
default SERIAL_8250
help
This builds standard PCI serial support. You may be able to
@@ -291,6 +295,17 @@ config SERIAL_8250_HUB6
To compile this driver as a module, choose M here: the module
will be called 8250_hub6.
+config SERIAL_8250_PCI1XXXX
+ tristate "Microchip 8250 based serial port"
+ depends on SERIAL_8250 && PCI
+ select SERIAL_8250_PCILIB
+ default SERIAL_8250
+ help
+ Select this option if you have a setup with Microchip PCIe
+ Switch with serial port enabled and wish to enable 8250
+ serial driver for the serial interface. This driver support
+ will ensure to support baud rates upto 1.5Mpbs.
+
#
# Misc. options/drivers.
#
@@ -370,6 +385,18 @@ config SERIAL_8250_FSL
erratum for Freescale 16550 UARTs in the 8250 driver. It also
enables support for ACPI enumeration.
+config SERIAL_8250_DFL
+ tristate "DFL bus driver for Altera 16550 UART"
+ depends on SERIAL_8250 && FPGA_DFL
+ help
+ This option enables support for a Device Feature List (DFL) bus
+ driver for the Altera 16550 UART. One or more Altera 16550 UARTs
+ can be instantiated in a FPGA and then be discovered during
+ enumeration of the DFL bus.
+
+ To compile this driver as a module, chose M here: the
+ module will be called 8250_dfl.
+
config SERIAL_8250_DW
tristate "Support for Synopsys DesignWare 8250 quirks"
depends on SERIAL_8250
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index 1615bfdde2a0..4fc2fc1f41b6 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o
8250_base-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
8250_base-$(CONFIG_SERIAL_8250_DWLIB) += 8250_dwlib.o
8250_base-$(CONFIG_SERIAL_8250_FINTEK) += 8250_fintek.o
+8250_base-$(CONFIG_SERIAL_8250_PCILIB) += 8250_pcilib.o
obj-$(CONFIG_SERIAL_8250_PARISC) += 8250_parisc.o
obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
obj-$(CONFIG_SERIAL_8250_EXAR) += 8250_exar.o
@@ -26,8 +27,10 @@ obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
+obj-$(CONFIG_SERIAL_8250_PCI1XXXX) += 8250_pci1xxxx.o
obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o
+obj-$(CONFIG_SERIAL_8250_DFL) += 8250_dfl.o
obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o
obj-$(CONFIG_SERIAL_8250_IOC3) += 8250_ioc3.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index c55b947f3cdb..625358f44419 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -73,17 +73,17 @@ config SERIAL_AMBA_PL011_CONSOLE
your boot loader (lilo or loadlin) about how to pass options to the
kernel at boot time.)
-config SERIAL_EARLYCON_ARM_SEMIHOST
- bool "Early console using ARM semihosting"
- depends on ARM64 || ARM
+config SERIAL_EARLYCON_SEMIHOST
+ bool "Early console using Arm compatible semihosting"
+ depends on ARM64 || ARM || RISCV
select SERIAL_CORE
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
- Support for early debug console using ARM semihosting. This enables
- the console before standard serial driver is probed. This is enabled
- with "earlycon=smh" on the kernel command line. The console is
- enabled when early_param is processed.
+ Support for early debug console using Arm compatible semihosting.
+ This enables the console before standard serial driver is probed.
+ This is enabled with "earlycon=smh" on the kernel command line.
+ The console is enabled when early_param is processed.
config SERIAL_EARLYCON_RISCV_SBI
bool "Early console using RISC-V SBI"
@@ -242,23 +242,23 @@ config SERIAL_SAMSUNG
select SERIAL_CORE
help
Support for the on-chip UARTs on the Samsung
- S3C24xx/S3C64xx/S5Pv210/Exynos and Apple M1 SoCs, providing
+ S3C64xx/S5Pv210/Exynos and Apple M1 SoCs, providing
/dev/ttySAC0, 1 and 2 (note, some machines may not provide all of
these ports, depending on how the serial port pins are configured.
+
Choose Y/M here only if you build for such SoC.
config SERIAL_SAMSUNG_UARTS_4
bool
depends on SERIAL_SAMSUNG
- default y if !(CPU_S3C2410 || CPU_S3C2412 || CPU_S3C2440 || CPU_S3C2442)
+ default y
help
Internal node for the common case of 4 Samsung compatible UARTs
config SERIAL_SAMSUNG_UARTS
int
depends on SERIAL_SAMSUNG
- default 4 if SERIAL_SAMSUNG_UARTS_4 || CPU_S3C2416
- default 3
+ default 4
help
Select the number of available UART ports for the Samsung S3C
serial driver
@@ -1507,7 +1507,7 @@ config SERIAL_MILBEAUT_USIO_CONSOLE
config SERIAL_LITEUART
tristate "LiteUART serial port support"
depends on HAS_IOMEM
- depends on OF || COMPILE_TEST
+ depends on OF
depends on LITEX || COMPILE_TEST
select SERIAL_CORE
help
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 238a9557b487..cd9afd9e3018 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -6,7 +6,7 @@
obj-$(CONFIG_SERIAL_CORE) += serial_core.o
obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o
-obj-$(CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST) += earlycon-arm-semihost.o
+obj-$(CONFIG_SERIAL_EARLYCON_SEMIHOST) += earlycon-semihost.o
obj-$(CONFIG_SERIAL_EARLYCON_RISCV_SBI) += earlycon-riscv-sbi.o
# These Sparc drivers have to appear before others such as 8250
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index 748e8b1cf4f7..59e25f2b6632 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -631,12 +631,6 @@ static int arc_serial_probe(struct platform_device *pdev)
return uart_add_one_port(&arc_uart_driver, &arc_uart_ports[dev_id].port);
}
-static int arc_serial_remove(struct platform_device *pdev)
-{
- /* This will never be called */
- return 0;
-}
-
static const struct of_device_id arc_uart_dt_ids[] = {
{ .compatible = "snps,arc-uart" },
{ /* Sentinel */ }
@@ -645,7 +639,6 @@ MODULE_DEVICE_TABLE(of, arc_uart_dt_ids);
static struct platform_driver arc_platform_driver = {
.probe = arc_serial_probe,
- .remove = arc_serial_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = arc_uart_dt_ids,
diff --git a/drivers/tty/serial/earlycon-arm-semihost.c b/drivers/tty/serial/earlycon-semihost.c
index fcdec5f42376..e4692a8433f9 100644
--- a/drivers/tty/serial/earlycon-arm-semihost.c
+++ b/drivers/tty/serial/earlycon-semihost.c
@@ -11,30 +11,7 @@
#include <linux/console.h>
#include <linux/init.h>
#include <linux/serial_core.h>
-
-#ifdef CONFIG_THUMB2_KERNEL
-#define SEMIHOST_SWI "0xab"
-#else
-#define SEMIHOST_SWI "0x123456"
-#endif
-
-/*
- * Semihosting-based debug console
- */
-static void smh_putc(struct uart_port *port, unsigned char c)
-{
-#ifdef CONFIG_ARM64
- asm volatile("mov x1, %0\n"
- "mov x0, #3\n"
- "hlt 0xf000\n"
- : : "r" (&c) : "x0", "x1", "memory");
-#else
- asm volatile("mov r1, %0\n"
- "mov r0, #3\n"
- "svc " SEMIHOST_SWI "\n"
- : : "r" (&c) : "r0", "r1", "memory");
-#endif
-}
+#include <asm/semihost.h>
static void smh_write(struct console *con, const char *s, unsigned n)
{
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 4f6e9bf57169..a5fbb6ed38ae 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -120,7 +120,13 @@ static int __init parse_options(struct earlycon_device *device, char *options)
}
if (options) {
+ char *uartclk;
+
device->baud = simple_strtoul(options, NULL, 0);
+ uartclk = strchr(options, ',');
+ if (uartclk && kstrtouint(uartclk + 1, 0, &port->uartclk) < 0)
+ pr_warn("[%s] unsupported earlycon uart clkrate option\n",
+ options);
length = min(strcspn(options, " ") + 1,
(size_t)(sizeof(device->options)));
strscpy(device->options, options, length);
@@ -139,7 +145,8 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
buf = NULL;
spin_lock_init(&port->lock);
- port->uartclk = BASE_BAUD * 16;
+ if (!port->uartclk)
+ port->uartclk = BASE_BAUD * 16;
if (port->mapbase)
port->membase = earlycon_map(port->mapbase, 64);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 5e69fb73f570..e945f41b93d4 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -5,6 +5,8 @@
* Copyright 2012-2014 Freescale Semiconductor, Inc.
*/
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -181,7 +183,7 @@
#define UARTCTRL_SBK 0x00010000
#define UARTCTRL_MA1IE 0x00008000
#define UARTCTRL_MA2IE 0x00004000
-#define UARTCTRL_IDLECFG 0x00000100
+#define UARTCTRL_IDLECFG GENMASK(10, 8)
#define UARTCTRL_LOOPS 0x00000080
#define UARTCTRL_DOZEEN 0x00000040
#define UARTCTRL_RSRC 0x00000020
@@ -199,6 +201,7 @@
#define UARTDATA_MASK 0x3ff
#define UARTMODIR_IREN 0x00020000
+#define UARTMODIR_RTSWATER GENMASK(10, 8)
#define UARTMODIR_TXCTSSRC 0x00000020
#define UARTMODIR_TXCTSC 0x00000010
#define UARTMODIR_RXRTSE 0x00000008
@@ -212,6 +215,7 @@
#define UARTFIFO_RXUF 0x00010000
#define UARTFIFO_TXFLUSH 0x00008000
#define UARTFIFO_RXFLUSH 0x00004000
+#define UARTFIFO_RXIDEN GENMASK(12, 10)
#define UARTFIFO_TXOFE 0x00000200
#define UARTFIFO_RXUFE 0x00000100
#define UARTFIFO_TXFE 0x00000080
@@ -238,7 +242,7 @@
#define DRIVER_NAME "fsl-lpuart"
#define DEV_NAME "ttyLP"
-#define UART_NR 6
+#define UART_NR 8
/* IMX lpuart has four extra unused regs located at the beginning */
#define IMX_REG_OFF 0x10
@@ -248,6 +252,7 @@ enum lpuart_type {
LS1021A_LPUART,
LS1028A_LPUART,
IMX7ULP_LPUART,
+ IMX8ULP_LPUART,
IMX8QXP_LPUART,
IMXRT1050_LPUART,
};
@@ -260,6 +265,7 @@ struct lpuart_port {
unsigned int txfifo_size;
unsigned int rxfifo_size;
+ u8 rx_watermark;
bool lpuart_dma_tx_use;
bool lpuart_dma_rx_use;
struct dma_chan *dma_tx_chan;
@@ -286,38 +292,52 @@ struct lpuart_soc_data {
enum lpuart_type devtype;
char iotype;
u8 reg_off;
+ u8 rx_watermark;
};
static const struct lpuart_soc_data vf_data = {
.devtype = VF610_LPUART,
.iotype = UPIO_MEM,
+ .rx_watermark = 1,
};
static const struct lpuart_soc_data ls1021a_data = {
.devtype = LS1021A_LPUART,
.iotype = UPIO_MEM32BE,
+ .rx_watermark = 1,
};
static const struct lpuart_soc_data ls1028a_data = {
.devtype = LS1028A_LPUART,
.iotype = UPIO_MEM32,
+ .rx_watermark = 1,
};
static struct lpuart_soc_data imx7ulp_data = {
.devtype = IMX7ULP_LPUART,
.iotype = UPIO_MEM32,
.reg_off = IMX_REG_OFF,
+ .rx_watermark = 1,
+};
+
+static struct lpuart_soc_data imx8ulp_data = {
+ .devtype = IMX8ULP_LPUART,
+ .iotype = UPIO_MEM32,
+ .reg_off = IMX_REG_OFF,
+ .rx_watermark = 3,
};
static struct lpuart_soc_data imx8qxp_data = {
.devtype = IMX8QXP_LPUART,
.iotype = UPIO_MEM32,
.reg_off = IMX_REG_OFF,
+ .rx_watermark = 31,
};
static struct lpuart_soc_data imxrt1050_data = {
.devtype = IMXRT1050_LPUART,
.iotype = UPIO_MEM32,
.reg_off = IMX_REG_OFF,
+ .rx_watermark = 1,
};
static const struct of_device_id lpuart_dt_ids[] = {
@@ -325,6 +345,7 @@ static const struct of_device_id lpuart_dt_ids[] = {
{ .compatible = "fsl,ls1021a-lpuart", .data = &ls1021a_data, },
{ .compatible = "fsl,ls1028a-lpuart", .data = &ls1028a_data, },
{ .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, },
+ { .compatible = "fsl,imx8ulp-lpuart", .data = &imx8ulp_data, },
{ .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, },
{ .compatible = "fsl,imxrt1050-lpuart", .data = &imxrt1050_data},
{ /* sentinel */ }
@@ -345,6 +366,11 @@ static inline bool is_imx7ulp_lpuart(struct lpuart_port *sport)
return sport->devtype == IMX7ULP_LPUART;
}
+static inline bool is_imx8ulp_lpuart(struct lpuart_port *sport)
+{
+ return sport->devtype == IMX8ULP_LPUART;
+}
+
static inline bool is_imx8qxp_lpuart(struct lpuart_port *sport)
{
return sport->devtype == IMX8QXP_LPUART;
@@ -1387,9 +1413,9 @@ static int lpuart32_config_rs485(struct uart_port *port, struct ktermios *termio
* Note: UART is assumed to be active high.
*/
if (rs485->flags & SER_RS485_RTS_ON_SEND)
- modem &= ~UARTMODEM_TXRTSPOL;
- else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
modem |= UARTMODEM_TXRTSPOL;
+ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ modem &= ~UARTMODEM_TXRTSPOL;
}
lpuart32_write(&sport->port, modem, UARTMODIR);
@@ -1462,12 +1488,32 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state)
static void lpuart32_break_ctl(struct uart_port *port, int break_state)
{
- unsigned long temp;
+ unsigned long temp, modem;
+ struct tty_struct *tty;
+ unsigned int cflag = 0;
+
+ tty = tty_port_tty_get(&port->state->port);
+ if (tty) {
+ cflag = tty->termios.c_cflag;
+ tty_kref_put(tty);
+ }
temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK;
+ modem = lpuart32_read(port, UARTMODIR);
- if (break_state != 0)
+ if (break_state != 0) {
temp |= UARTCTRL_SBK;
+ /*
+ * LPUART CTS has higher priority than SBK, need to disable CTS before
+ * asserting SBK to avoid any interference if flow control is enabled.
+ */
+ if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE)
+ lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
+ } else {
+ /* Re-enable the CTS when break off. */
+ if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE))
+ lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR);
+ }
lpuart32_write(port, temp, UARTCTRL);
}
@@ -1497,8 +1543,10 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
writeb(UARTSFIFO_RXUF, sport->port.membase + UARTSFIFO);
}
+ if (uart_console(&sport->port))
+ sport->rx_watermark = 1;
writeb(0, sport->port.membase + UARTTWFIFO);
- writeb(1, sport->port.membase + UARTRWFIFO);
+ writeb(sport->rx_watermark, sport->port.membase + UARTRWFIFO);
/* Restore cr2 */
writeb(cr2_saved, sport->port.membase + UARTCR2);
@@ -1523,19 +1571,30 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport)
ctrl = lpuart32_read(&sport->port, UARTCTRL);
ctrl_saved = ctrl;
ctrl &= ~(UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_TE |
- UARTCTRL_RIE | UARTCTRL_RE);
+ UARTCTRL_RIE | UARTCTRL_RE | UARTCTRL_ILIE);
lpuart32_write(&sport->port, ctrl, UARTCTRL);
/* enable FIFO mode */
val = lpuart32_read(&sport->port, UARTFIFO);
val |= UARTFIFO_TXFE | UARTFIFO_RXFE;
val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
+ val |= FIELD_PREP(UARTFIFO_RXIDEN, 0x3);
lpuart32_write(&sport->port, val, UARTFIFO);
/* set the watermark */
- val = (0x1 << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF);
+ if (uart_console(&sport->port))
+ sport->rx_watermark = 1;
+ val = (sport->rx_watermark << UARTWATER_RXWATER_OFF) |
+ (0x0 << UARTWATER_TXWATER_OFF);
lpuart32_write(&sport->port, val, UARTWATER);
+ /* set RTS watermark */
+ if (!uart_console(&sport->port)) {
+ val = lpuart32_read(&sport->port, UARTMODIR);
+ val |= FIELD_PREP(UARTMODIR_RTSWATER, sport->rxfifo_size >> 1);
+ lpuart32_write(&sport->port, val, UARTMODIR);
+ }
+
/* Restore cr2 */
lpuart32_write(&sport->port, ctrl_saved, UARTCTRL);
}
@@ -1547,7 +1606,8 @@ static void lpuart32_setup_watermark_enable(struct lpuart_port *sport)
lpuart32_setup_watermark(sport);
temp = lpuart32_read(&sport->port, UARTCTRL);
- temp |= UARTCTRL_RE | UARTCTRL_TE | UARTCTRL_ILIE;
+ temp |= UARTCTRL_RE | UARTCTRL_TE;
+ temp |= FIELD_PREP(UARTCTRL_IDLECFG, 0x7);
lpuart32_write(&sport->port, temp, UARTCTRL);
}
@@ -1679,19 +1739,23 @@ static int lpuart_startup(struct uart_port *port)
return 0;
}
+static void lpuart32_hw_disable(struct lpuart_port *sport)
+{
+ unsigned long temp;
+
+ temp = lpuart32_read(&sport->port, UARTCTRL);
+ temp &= ~(UARTCTRL_RIE | UARTCTRL_ILIE | UARTCTRL_RE |
+ UARTCTRL_TIE | UARTCTRL_TE);
+ lpuart32_write(&sport->port, temp, UARTCTRL);
+}
+
static void lpuart32_configure(struct lpuart_port *sport)
{
unsigned long temp;
- if (sport->lpuart_dma_rx_use) {
- /* RXWATER must be 0 */
- temp = lpuart32_read(&sport->port, UARTWATER);
- temp &= ~(UARTWATER_WATER_MASK << UARTWATER_RXWATER_OFF);
- lpuart32_write(&sport->port, temp, UARTWATER);
- }
temp = lpuart32_read(&sport->port, UARTCTRL);
if (!sport->lpuart_dma_rx_use)
- temp |= UARTCTRL_RIE;
+ temp |= UARTCTRL_RIE | UARTCTRL_ILIE;
if (!sport->lpuart_dma_tx_use)
temp |= UARTCTRL_TIE;
lpuart32_write(&sport->port, temp, UARTCTRL);
@@ -1703,11 +1767,12 @@ static void lpuart32_hw_setup(struct lpuart_port *sport)
spin_lock_irqsave(&sport->port.lock, flags);
- lpuart32_setup_watermark_enable(sport);
+ lpuart32_hw_disable(sport);
lpuart_rx_dma_startup(sport);
lpuart_tx_dma_startup(sport);
+ lpuart32_setup_watermark_enable(sport);
lpuart32_configure(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -1796,10 +1861,19 @@ static void lpuart32_shutdown(struct uart_port *port)
spin_lock_irqsave(&port->lock, flags);
- /* disable Rx/Tx and interrupts */
+ /* clear status */
+ temp = lpuart32_read(&sport->port, UARTSTAT);
+ lpuart32_write(&sport->port, temp, UARTSTAT);
+
+ /* disable Rx/Tx DMA */
+ temp = lpuart32_read(port, UARTBAUD);
+ temp &= ~(UARTBAUD_TDMAE | UARTBAUD_RDMAE);
+ lpuart32_write(port, temp, UARTBAUD);
+
+ /* disable Rx/Tx and interrupts and break condition */
temp = lpuart32_read(port, UARTCTRL);
- temp &= ~(UARTCTRL_TE | UARTCTRL_RE |
- UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE);
+ temp &= ~(UARTCTRL_TE | UARTCTRL_RE | UARTCTRL_ILIE |
+ UARTCTRL_TIE | UARTCTRL_TCIE | UARTCTRL_RIE | UARTCTRL_SBK);
lpuart32_write(port, temp, UARTCTRL);
spin_unlock_irqrestore(&port->lock, flags);
@@ -2631,7 +2705,7 @@ static int lpuart_global_reset(struct lpuart_port *sport)
return ret;
}
- if (is_imx7ulp_lpuart(sport) || is_imx8qxp_lpuart(sport)) {
+ if (is_imx7ulp_lpuart(sport) || is_imx8ulp_lpuart(sport) || is_imx8qxp_lpuart(sport)) {
/*
* If the transmitter is used by earlycon, wait for transmit engine to
* complete and then reset.
@@ -2688,6 +2762,7 @@ static int lpuart_probe(struct platform_device *pdev)
sport->port.dev = &pdev->dev;
sport->port.type = PORT_LPUART;
sport->devtype = sdata->devtype;
+ sport->rx_watermark = sdata->rx_watermark;
ret = platform_get_irq(pdev, 0);
if (ret < 0)
return ret;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 757825edb0cd..523f296d5747 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -210,12 +210,8 @@ struct imx_port {
struct mctrl_gpios *gpios;
- /* shadow registers */
- unsigned int ucr1;
- unsigned int ucr2;
- unsigned int ucr3;
- unsigned int ucr4;
- unsigned int ufcr;
+ /* counter to stop 0xff flood */
+ int idle_counter;
/* DMA fields */
unsigned int dma_is_enabled:1;
@@ -273,59 +269,14 @@ static const struct of_device_id imx_uart_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, imx_uart_dt_ids);
-static void imx_uart_writel(struct imx_port *sport, u32 val, u32 offset)
-{
- switch (offset) {
- case UCR1:
- sport->ucr1 = val;
- break;
- case UCR2:
- sport->ucr2 = val;
- break;
- case UCR3:
- sport->ucr3 = val;
- break;
- case UCR4:
- sport->ucr4 = val;
- break;
- case UFCR:
- sport->ufcr = val;
- break;
- default:
- break;
- }
+static inline void imx_uart_writel(struct imx_port *sport, u32 val, u32 offset)
+{
writel(val, sport->port.membase + offset);
}
-static u32 imx_uart_readl(struct imx_port *sport, u32 offset)
+static inline u32 imx_uart_readl(struct imx_port *sport, u32 offset)
{
- switch (offset) {
- case UCR1:
- return sport->ucr1;
- break;
- case UCR2:
- /*
- * UCR2_SRST is the only bit in the cached registers that might
- * differ from the value that was last written. As it only
- * automatically becomes one after being cleared, reread
- * conditionally.
- */
- if (!(sport->ucr2 & UCR2_SRST))
- sport->ucr2 = readl(sport->port.membase + offset);
- return sport->ucr2;
- break;
- case UCR3:
- return sport->ucr3;
- break;
- case UCR4:
- return sport->ucr4;
- break;
- case UFCR:
- return sport->ufcr;
- break;
- default:
- return readl(sport->port.membase + offset);
- }
+ return readl(sport->port.membase + offset);
}
static inline unsigned imx_uart_uts_reg(struct imx_port *sport)
@@ -398,6 +349,41 @@ static void start_hrtimer_ms(struct hrtimer *hrt, unsigned long msec)
}
/* called with port.lock taken and irqs off */
+static void imx_uart_soft_reset(struct imx_port *sport)
+{
+ int i = 10;
+ u32 ucr2, ubir, ubmr, uts;
+
+ /*
+ * According to the Reference Manual description of the UART SRST bit:
+ *
+ * "Reset the transmit and receive state machines,
+ * all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD
+ * and UTS[6-3]".
+ *
+ * We don't need to restore the old values from USR1, USR2, URXD and
+ * UTXD. UBRC is read only, so only save/restore the other three
+ * registers.
+ */
+ ubir = imx_uart_readl(sport, UBIR);
+ ubmr = imx_uart_readl(sport, UBMR);
+ uts = imx_uart_readl(sport, IMX21_UTS);
+
+ ucr2 = imx_uart_readl(sport, UCR2);
+ imx_uart_writel(sport, ucr2 & ~UCR2_SRST, UCR2);
+
+ while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0))
+ udelay(1);
+
+ /* Restore the registers */
+ imx_uart_writel(sport, ubir, UBIR);
+ imx_uart_writel(sport, ubmr, UBMR);
+ imx_uart_writel(sport, uts, IMX21_UTS);
+
+ sport->idle_counter = 0;
+}
+
+/* called with port.lock taken and irqs off */
static void imx_uart_start_rx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
@@ -476,7 +462,8 @@ static void imx_uart_stop_tx(struct uart_port *port)
imx_uart_rts_inactive(sport, &ucr2);
imx_uart_writel(sport, ucr2, UCR2);
- imx_uart_start_rx(port);
+ if (!port->rs485_rx_during_tx_gpio)
+ imx_uart_start_rx(port);
sport->tx_state = OFF;
}
@@ -705,7 +692,8 @@ static void imx_uart_start_tx(struct uart_port *port)
imx_uart_rts_inactive(sport, &ucr2);
imx_uart_writel(sport, ucr2, UCR2);
- if (!(port->rs485.flags & SER_RS485_RX_DURING_TX))
+ if (!(port->rs485.flags & SER_RS485_RX_DURING_TX) &&
+ !port->rs485_rx_during_tx_gpio)
imx_uart_stop_rx(port);
sport->tx_state = WAIT_AFTER_RTS;
@@ -771,7 +759,7 @@ static irqreturn_t __imx_uart_rtsint(int irq, void *dev_id)
imx_uart_writel(sport, USR1_RTSD, USR1);
usr1 = imx_uart_readl(sport, USR1) & USR1_RTSS;
- uart_handle_cts_change(&sport->port, !!usr1);
+ uart_handle_cts_change(&sport->port, usr1);
wake_up_interruptible(&sport->port.state->port.delta_msr_wait);
return IRQ_HANDLED;
@@ -801,33 +789,73 @@ static irqreturn_t imx_uart_txint(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/* Check if hardware Rx flood is in progress, and issue soft reset to stop it.
+ * This is to be called from Rx ISRs only when some bytes were actually
+ * received.
+ *
+ * A way to reproduce the flood (checked on iMX6SX) is: open iMX UART at 9600
+ * 8N1, and from external source send 0xf0 char at 115200 8N1. In about 90% of
+ * cases this starts a flood of "receiving" of 0xff characters by the iMX6 UART
+ * that is terminated by any activity on RxD line, or could be stopped by
+ * issuing soft reset to the UART (just stop/start of RX does not help). Note
+ * that what we do here is sending isolated start bit about 2.4 times shorter
+ * than it is to be on UART configured baud rate.
+ */
+static void imx_uart_check_flood(struct imx_port *sport, u32 usr2)
+{
+ /* To detect hardware 0xff flood we monitor RxD line between RX
+ * interrupts to isolate "receiving" of char(s) with no activity
+ * on RxD line, that'd never happen on actual data transfers.
+ *
+ * We use USR2_WAKE bit to check for activity on RxD line, but we have a
+ * race here if we clear USR2_WAKE when receiving of a char is in
+ * progress, so we might get RX interrupt later with USR2_WAKE bit
+ * cleared. Note though that as we don't try to clear USR2_WAKE when we
+ * detected no activity, this race may hide actual activity only once.
+ *
+ * Yet another case where receive interrupt may occur without RxD
+ * activity is expiration of aging timer, so we consider this as well.
+ *
+ * We use 'idle_counter' to ensure that we got at least so many RX
+ * interrupts without any detected activity on RxD line. 2 cases
+ * described plus 1 to be on the safe side gives us a margin of 3,
+ * below. In practice I was not able to produce a false positive to
+ * induce soft reset at regular data transfers even using 1 as the
+ * margin, so 3 is actually very strong.
+ *
+ * We count interrupts, not chars in 'idle-counter' for simplicity.
+ */
+
+ if (usr2 & USR2_WAKE) {
+ imx_uart_writel(sport, USR2_WAKE, USR2);
+ sport->idle_counter = 0;
+ } else if (++sport->idle_counter > 3) {
+ dev_warn(sport->port.dev, "RX flood detected: soft reset.");
+ imx_uart_soft_reset(sport); /* also clears 'sport->idle_counter' */
+ }
+}
+
static irqreturn_t __imx_uart_rxint(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
- unsigned int rx, flg, ignored = 0;
struct tty_port *port = &sport->port.state->port;
+ u32 usr2, rx;
- while (imx_uart_readl(sport, USR2) & USR2_RDR) {
- u32 usr2;
+ /* If we received something, check for 0xff flood */
+ usr2 = imx_uart_readl(sport, USR2);
+ if (usr2 & USR2_RDR)
+ imx_uart_check_flood(sport, usr2);
- flg = TTY_NORMAL;
+ while ((rx = imx_uart_readl(sport, URXD0)) & URXD_CHARRDY) {
+ unsigned int flg = TTY_NORMAL;
sport->port.icount.rx++;
- rx = imx_uart_readl(sport, URXD0);
-
- usr2 = imx_uart_readl(sport, USR2);
- if (usr2 & USR2_BRCD) {
- imx_uart_writel(sport, USR2_BRCD, USR2);
- if (uart_handle_break(&sport->port))
- continue;
- }
-
- if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx))
- continue;
-
if (unlikely(rx & URXD_ERR)) {
- if (rx & URXD_BRK)
+ if (rx & URXD_BRK) {
sport->port.icount.brk++;
+ if (uart_handle_break(&sport->port))
+ continue;
+ }
else if (rx & URXD_PRERR)
sport->port.icount.parity++;
else if (rx & URXD_FRMERR)
@@ -835,11 +863,8 @@ static irqreturn_t __imx_uart_rxint(int irq, void *dev_id)
if (rx & URXD_OVRRUN)
sport->port.icount.overrun++;
- if (rx & sport->port.ignore_status_mask) {
- if (++ignored > 100)
- goto out;
+ if (rx & sport->port.ignore_status_mask)
continue;
- }
rx &= (sport->port.read_status_mask | 0xFF);
@@ -853,16 +878,17 @@ static irqreturn_t __imx_uart_rxint(int irq, void *dev_id)
flg = TTY_OVERRUN;
sport->port.sysrq = 0;
+ } else if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx)) {
+ continue;
}
if (sport->port.ignore_status_mask & URXD_DUMMY_READ)
- goto out;
+ continue;
if (tty_insert_flip_char(port, rx, flg) == 0)
sport->port.icount.buf_overrun++;
}
-out:
tty_flip_buffer_push(port);
return IRQ_HANDLED;
@@ -1147,55 +1173,62 @@ static void imx_uart_dma_rx_callback(void *data)
status = dmaengine_tx_status(chan, sport->rx_cookie, &state);
if (status == DMA_ERROR) {
+ spin_lock(&sport->port.lock);
imx_uart_clear_rx_errors(sport);
+ spin_unlock(&sport->port.lock);
return;
}
- if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
+ /*
+ * The state-residue variable represents the empty space
+ * relative to the entire buffer. Taking this in consideration
+ * the head is always calculated base on the buffer total
+ * length - DMA transaction residue. The UART script from the
+ * SDMA firmware will jump to the next buffer descriptor,
+ * once a DMA transaction if finalized (IMX53 RM - A.4.1.2.4).
+ * Taking this in consideration the tail is always at the
+ * beginning of the buffer descriptor that contains the head.
+ */
- /*
- * The state-residue variable represents the empty space
- * relative to the entire buffer. Taking this in consideration
- * the head is always calculated base on the buffer total
- * length - DMA transaction residue. The UART script from the
- * SDMA firmware will jump to the next buffer descriptor,
- * once a DMA transaction if finalized (IMX53 RM - A.4.1.2.4).
- * Taking this in consideration the tail is always at the
- * beginning of the buffer descriptor that contains the head.
- */
+ /* Calculate the head */
+ rx_ring->head = sg_dma_len(sgl) - state.residue;
+
+ /* Calculate the tail. */
+ bd_size = sg_dma_len(sgl) / sport->rx_periods;
+ rx_ring->tail = ((rx_ring->head-1) / bd_size) * bd_size;
- /* Calculate the head */
- rx_ring->head = sg_dma_len(sgl) - state.residue;
+ if (rx_ring->head <= sg_dma_len(sgl) &&
+ rx_ring->head > rx_ring->tail) {
- /* Calculate the tail. */
- bd_size = sg_dma_len(sgl) / sport->rx_periods;
- rx_ring->tail = ((rx_ring->head-1) / bd_size) * bd_size;
+ /* Move data from tail to head */
+ r_bytes = rx_ring->head - rx_ring->tail;
- if (rx_ring->head <= sg_dma_len(sgl) &&
- rx_ring->head > rx_ring->tail) {
+ /* If we received something, check for 0xff flood */
+ spin_lock(&sport->port.lock);
+ imx_uart_check_flood(sport, imx_uart_readl(sport, USR2));
+ spin_unlock(&sport->port.lock);
- /* Move data from tail to head */
- r_bytes = rx_ring->head - rx_ring->tail;
+ if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) {
/* CPU claims ownership of RX DMA buffer */
dma_sync_sg_for_cpu(sport->port.dev, sgl, 1,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
w_bytes = tty_insert_flip_string(port,
- sport->rx_buf + rx_ring->tail, r_bytes);
+ sport->rx_buf + rx_ring->tail, r_bytes);
/* UART retrieves ownership of RX DMA buffer */
dma_sync_sg_for_device(sport->port.dev, sgl, 1,
- DMA_FROM_DEVICE);
+ DMA_FROM_DEVICE);
if (w_bytes != r_bytes)
sport->port.icount.buf_overrun++;
sport->port.icount.rx += w_bytes;
- } else {
- WARN_ON(rx_ring->head > sg_dma_len(sgl));
- WARN_ON(rx_ring->head <= rx_ring->tail);
}
+ } else {
+ WARN_ON(rx_ring->head > sg_dma_len(sgl));
+ WARN_ON(rx_ring->head <= rx_ring->tail);
}
if (w_bytes) {
@@ -1271,6 +1304,8 @@ static void imx_uart_clear_rx_errors(struct imx_port *sport)
imx_uart_writel(sport, USR2_ORE, USR2);
}
+ sport->idle_counter = 0;
+
}
#define TXTL_DEFAULT 2 /* reset default */
@@ -1398,7 +1433,7 @@ static void imx_uart_disable_dma(struct imx_port *sport)
static int imx_uart_startup(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
- int retval, i;
+ int retval;
unsigned long flags;
int dma_is_inited = 0;
u32 ucr1, ucr2, ucr3, ucr4, uts;
@@ -1430,15 +1465,9 @@ static int imx_uart_startup(struct uart_port *port)
dma_is_inited = 1;
spin_lock_irqsave(&sport->port.lock, flags);
- /* Reset fifo's and state machines */
- i = 100;
- ucr2 = imx_uart_readl(sport, UCR2);
- ucr2 &= ~UCR2_SRST;
- imx_uart_writel(sport, ucr2, UCR2);
-
- while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0))
- udelay(1);
+ /* Reset fifo's and state machines */
+ imx_uart_soft_reset(sport);
/*
* Finally, clear and enable interrupts
@@ -1564,7 +1593,8 @@ static void imx_uart_shutdown(struct uart_port *port)
spin_lock_irqsave(&sport->port.lock, flags);
ucr1 = imx_uart_readl(sport, UCR1);
- ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN | UCR1_ATDMAEN);
+ ucr1 &= ~(UCR1_TRDYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_RXDMAEN |
+ UCR1_ATDMAEN | UCR1_SNDBRK);
/* See SER_RS485_ENABLED/UTS_LOOP comment in imx_uart_probe() */
if (port->rs485.flags & SER_RS485_ENABLED &&
port->rs485.flags & SER_RS485_RTS_ON_SEND &&
@@ -1593,8 +1623,6 @@ static void imx_uart_flush_buffer(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
struct scatterlist *sgl = &sport->tx_sgl[0];
- u32 ucr2;
- int i = 100, ubir, ubmr, uts;
if (!sport->dma_chan_tx)
return;
@@ -1612,32 +1640,8 @@ static void imx_uart_flush_buffer(struct uart_port *port)
sport->dma_is_txing = 0;
}
- /*
- * According to the Reference Manual description of the UART SRST bit:
- *
- * "Reset the transmit and receive state machines,
- * all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD
- * and UTS[6-3]".
- *
- * We don't need to restore the old values from USR1, USR2, URXD and
- * UTXD. UBRC is read only, so only save/restore the other three
- * registers.
- */
- ubir = imx_uart_readl(sport, UBIR);
- ubmr = imx_uart_readl(sport, UBMR);
- uts = imx_uart_readl(sport, IMX21_UTS);
-
- ucr2 = imx_uart_readl(sport, UCR2);
- ucr2 &= ~UCR2_SRST;
- imx_uart_writel(sport, ucr2, UCR2);
-
- while (!(imx_uart_readl(sport, UCR2) & UCR2_SRST) && (--i > 0))
- udelay(1);
+ imx_uart_soft_reset(sport);
- /* Restore the registers */
- imx_uart_writel(sport, ubir, UBIR);
- imx_uart_writel(sport, ubmr, UBMR);
- imx_uart_writel(sport, uts, IMX21_UTS);
}
static void
@@ -1955,6 +1959,10 @@ static int imx_uart_rs485_config(struct uart_port *port, struct ktermios *termio
rs485conf->flags & SER_RS485_RX_DURING_TX)
imx_uart_start_rx(port);
+ if (port->rs485_rx_during_tx_gpio)
+ gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
+ !!(rs485conf->flags & SER_RS485_RX_DURING_TX));
+
return 0;
}
@@ -2340,13 +2348,6 @@ static int imx_uart_probe(struct platform_device *pdev)
return ret;
}
- /* initialize shadow register values */
- sport->ucr1 = readl(sport->port.membase + UCR1);
- sport->ucr2 = readl(sport->port.membase + UCR2);
- sport->ucr3 = readl(sport->port.membase + UCR3);
- sport->ucr4 = readl(sport->port.membase + UCR4);
- sport->ufcr = readl(sport->port.membase + UFCR);
-
ret = uart_get_rs485_mode(&sport->port);
if (ret) {
clk_disable_unprepare(sport->clk_ipg);
@@ -2374,6 +2375,11 @@ static int imx_uart_probe(struct platform_device *pdev)
ucr1 &= ~(UCR1_ADEN | UCR1_TRDYEN | UCR1_IDEN | UCR1_RRDYEN | UCR1_RTSDEN);
imx_uart_writel(sport, ucr1, UCR1);
+ /* Disable Ageing Timer interrupt */
+ ucr2 = imx_uart_readl(sport, UCR2);
+ ucr2 &= ~UCR2_ATEN;
+ imx_uart_writel(sport, ucr2, UCR2);
+
/*
* In case RS485 is enabled without GPIO RTS control, the UART IP
* is used to control CTS signal. Keep both the UART and Receiver
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
index 062812fe1b09..80de3a42b67b 100644
--- a/drivers/tty/serial/liteuart.c
+++ b/drivers/tty/serial/liteuart.c
@@ -5,7 +5,9 @@
* Copyright (C) 2019-2020 Antmicro <www.antmicro.com>
*/
+#include <linux/bits.h>
#include <linux/console.h>
+#include <linux/interrupt.h>
#include <linux/litex.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -38,13 +40,13 @@
#define OFF_EV_ENABLE 0x14
/* events */
-#define EV_TX 0x1
-#define EV_RX 0x2
+#define EV_TX BIT(0)
+#define EV_RX BIT(1)
struct liteuart_port {
struct uart_port port;
struct timer_list timer;
- u32 id;
+ u8 irq_reg;
};
#define to_liteuart_port(port) container_of(port, struct liteuart_port, port)
@@ -57,7 +59,7 @@ static struct console liteuart_console;
static struct uart_driver liteuart_driver = {
.owner = THIS_MODULE,
- .driver_name = "liteuart",
+ .driver_name = KBUILD_MODNAME,
.dev_name = "ttyLXU",
.major = 0,
.minor = 0,
@@ -67,38 +69,95 @@ static struct uart_driver liteuart_driver = {
#endif
};
-static void liteuart_timer(struct timer_list *t)
+static void liteuart_update_irq_reg(struct uart_port *port, bool set, u8 mask)
+{
+ struct liteuart_port *uart = to_liteuart_port(port);
+
+ if (set)
+ uart->irq_reg |= mask;
+ else
+ uart->irq_reg &= ~mask;
+
+ if (port->irq)
+ litex_write8(port->membase + OFF_EV_ENABLE, uart->irq_reg);
+}
+
+static void liteuart_stop_tx(struct uart_port *port)
+{
+ liteuart_update_irq_reg(port, false, EV_TX);
+}
+
+static void liteuart_start_tx(struct uart_port *port)
+{
+ liteuart_update_irq_reg(port, true, EV_TX);
+}
+
+static void liteuart_stop_rx(struct uart_port *port)
+{
+ struct liteuart_port *uart = to_liteuart_port(port);
+
+ /* just delete timer */
+ del_timer(&uart->timer);
+}
+
+static void liteuart_rx_chars(struct uart_port *port)
{
- struct liteuart_port *uart = from_timer(uart, t, timer);
- struct uart_port *port = &uart->port;
unsigned char __iomem *membase = port->membase;
- unsigned int flg = TTY_NORMAL;
- int ch;
- unsigned long status;
+ u8 ch;
- while ((status = !litex_read8(membase + OFF_RXEMPTY)) == 1) {
+ while (!litex_read8(membase + OFF_RXEMPTY)) {
ch = litex_read8(membase + OFF_RXTX);
port->icount.rx++;
/* necessary for RXEMPTY to refresh its value */
- litex_write8(membase + OFF_EV_PENDING, EV_TX | EV_RX);
+ litex_write8(membase + OFF_EV_PENDING, EV_RX);
/* no overflow bits in status */
if (!(uart_handle_sysrq_char(port, ch)))
- uart_insert_char(port, status, 0, ch, flg);
-
- tty_flip_buffer_push(&port->state->port);
+ uart_insert_char(port, 1, 0, ch, TTY_NORMAL);
}
- mod_timer(&uart->timer, jiffies + uart_poll_timeout(port));
+ tty_flip_buffer_push(&port->state->port);
}
-static void liteuart_putchar(struct uart_port *port, unsigned char ch)
+static void liteuart_tx_chars(struct uart_port *port)
{
- while (litex_read8(port->membase + OFF_TXFULL))
- cpu_relax();
+ u8 ch;
- litex_write8(port->membase + OFF_RXTX, ch);
+ uart_port_tx(port, ch,
+ !litex_read8(port->membase + OFF_TXFULL),
+ litex_write8(port->membase + OFF_RXTX, ch));
+}
+
+static irqreturn_t liteuart_interrupt(int irq, void *data)
+{
+ struct liteuart_port *uart = data;
+ struct uart_port *port = &uart->port;
+ unsigned long flags;
+ u8 isr;
+
+ /*
+ * if polling, the context would be "in_serving_softirq", so use
+ * irq[save|restore] spin_lock variants to cover all possibilities
+ */
+ spin_lock_irqsave(&port->lock, flags);
+ isr = litex_read8(port->membase + OFF_EV_PENDING) & uart->irq_reg;
+ if (isr & EV_RX)
+ liteuart_rx_chars(port);
+ if (isr & EV_TX)
+ liteuart_tx_chars(port);
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ return IRQ_RETVAL(isr);
+}
+
+static void liteuart_timer(struct timer_list *t)
+{
+ struct liteuart_port *uart = from_timer(uart, t, timer);
+ struct uart_port *port = &uart->port;
+
+ liteuart_interrupt(0, port);
+ mod_timer(&uart->timer, jiffies + uart_poll_timeout(port));
}
static unsigned int liteuart_tx_empty(struct uart_port *port)
@@ -120,60 +179,49 @@ static unsigned int liteuart_get_mctrl(struct uart_port *port)
return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
}
-static void liteuart_stop_tx(struct uart_port *port)
-{
-}
-
-static void liteuart_start_tx(struct uart_port *port)
+static int liteuart_startup(struct uart_port *port)
{
- struct circ_buf *xmit = &port->state->xmit;
- unsigned char ch;
-
- if (unlikely(port->x_char)) {
- litex_write8(port->membase + OFF_RXTX, port->x_char);
- port->icount.tx++;
- port->x_char = 0;
- } else if (!uart_circ_empty(xmit)) {
- while (xmit->head != xmit->tail) {
- ch = xmit->buf[xmit->tail];
- uart_xmit_advance(port, 1);
- liteuart_putchar(port, ch);
+ struct liteuart_port *uart = to_liteuart_port(port);
+ unsigned long flags;
+ int ret;
+
+ if (port->irq) {
+ ret = request_irq(port->irq, liteuart_interrupt, 0,
+ KBUILD_MODNAME, uart);
+ if (ret) {
+ dev_warn(port->dev,
+ "line %d irq %d failed: switch to polling\n",
+ port->line, port->irq);
+ port->irq = 0;
}
}
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-}
-
-static void liteuart_stop_rx(struct uart_port *port)
-{
- struct liteuart_port *uart = to_liteuart_port(port);
+ spin_lock_irqsave(&port->lock, flags);
+ /* only enabling rx irqs during startup */
+ liteuart_update_irq_reg(port, true, EV_RX);
+ spin_unlock_irqrestore(&port->lock, flags);
- /* just delete timer */
- del_timer(&uart->timer);
-}
+ if (!port->irq) {
+ timer_setup(&uart->timer, liteuart_timer, 0);
+ mod_timer(&uart->timer, jiffies + uart_poll_timeout(port));
+ }
-static void liteuart_break_ctl(struct uart_port *port, int break_state)
-{
- /* LiteUART doesn't support sending break signal */
+ return 0;
}
-static int liteuart_startup(struct uart_port *port)
+static void liteuart_shutdown(struct uart_port *port)
{
struct liteuart_port *uart = to_liteuart_port(port);
+ unsigned long flags;
- /* disable events */
- litex_write8(port->membase + OFF_EV_ENABLE, 0);
-
- /* prepare timer for polling */
- timer_setup(&uart->timer, liteuart_timer, 0);
- mod_timer(&uart->timer, jiffies + uart_poll_timeout(port));
-
- return 0;
-}
+ spin_lock_irqsave(&port->lock, flags);
+ liteuart_update_irq_reg(port, false, EV_RX | EV_TX);
+ spin_unlock_irqrestore(&port->lock, flags);
-static void liteuart_shutdown(struct uart_port *port)
-{
+ if (port->irq)
+ free_irq(port->irq, port);
+ else
+ del_timer_sync(&uart->timer);
}
static void liteuart_set_termios(struct uart_port *port, struct ktermios *new,
@@ -196,15 +244,6 @@ static const char *liteuart_type(struct uart_port *port)
return "liteuart";
}
-static void liteuart_release_port(struct uart_port *port)
-{
-}
-
-static int liteuart_request_port(struct uart_port *port)
-{
- return 0;
-}
-
static void liteuart_config_port(struct uart_port *port, int flags)
{
/*
@@ -231,13 +270,10 @@ static const struct uart_ops liteuart_ops = {
.stop_tx = liteuart_stop_tx,
.start_tx = liteuart_start_tx,
.stop_rx = liteuart_stop_rx,
- .break_ctl = liteuart_break_ctl,
.startup = liteuart_startup,
.shutdown = liteuart_shutdown,
.set_termios = liteuart_set_termios,
.type = liteuart_type,
- .release_port = liteuart_release_port,
- .request_port = liteuart_request_port,
.config_port = liteuart_config_port,
.verify_port = liteuart_verify_port,
};
@@ -249,6 +285,23 @@ static int liteuart_probe(struct platform_device *pdev)
struct xa_limit limit;
int dev_id, ret;
+ uart = devm_kzalloc(&pdev->dev, sizeof(struct liteuart_port), GFP_KERNEL);
+ if (!uart)
+ return -ENOMEM;
+
+ port = &uart->port;
+
+ /* get membase */
+ port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(port->membase))
+ return PTR_ERR(port->membase);
+
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ if (ret > 0)
+ port->irq = ret;
+
/* look for aliases; auto-enumerate for free index if not found */
dev_id = of_alias_get_id(pdev->dev.of_node, "serial");
if (dev_id < 0)
@@ -256,32 +309,16 @@ static int liteuart_probe(struct platform_device *pdev)
else
limit = XA_LIMIT(dev_id, dev_id);
- uart = devm_kzalloc(&pdev->dev, sizeof(struct liteuart_port), GFP_KERNEL);
- if (!uart)
- return -ENOMEM;
-
ret = xa_alloc(&liteuart_array, &dev_id, uart, limit, GFP_KERNEL);
if (ret)
return ret;
- uart->id = dev_id;
- port = &uart->port;
-
- /* get membase */
- port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
- if (IS_ERR(port->membase)) {
- ret = PTR_ERR(port->membase);
- goto err_erase_id;
- }
-
/* values not from device tree */
port->dev = &pdev->dev;
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &liteuart_ops;
- port->regshift = 2;
port->fifosize = 16;
- port->iobase = 1;
port->type = PORT_UNKNOWN;
port->line = dev_id;
spin_lock_init(&port->lock);
@@ -295,7 +332,7 @@ static int liteuart_probe(struct platform_device *pdev)
return 0;
err_erase_id:
- xa_erase(&liteuart_array, uart->id);
+ xa_erase(&liteuart_array, dev_id);
return ret;
}
@@ -303,10 +340,10 @@ err_erase_id:
static int liteuart_remove(struct platform_device *pdev)
{
struct uart_port *port = platform_get_drvdata(pdev);
- struct liteuart_port *uart = to_liteuart_port(port);
+ unsigned int line = port->line;
uart_remove_one_port(&liteuart_driver, port);
- xa_erase(&liteuart_array, uart->id);
+ xa_erase(&liteuart_array, line);
return 0;
}
@@ -321,13 +358,21 @@ static struct platform_driver liteuart_platform_driver = {
.probe = liteuart_probe,
.remove = liteuart_remove,
.driver = {
- .name = "liteuart",
+ .name = KBUILD_MODNAME,
.of_match_table = liteuart_of_match,
},
};
#ifdef CONFIG_SERIAL_LITEUART_CONSOLE
+static void liteuart_putchar(struct uart_port *port, unsigned char ch)
+{
+ while (litex_read8(port->membase + OFF_TXFULL))
+ cpu_relax();
+
+ litex_write8(port->membase + OFF_RXTX, ch);
+}
+
static void liteuart_console_write(struct console *co, const char *s,
unsigned int count)
{
@@ -367,7 +412,7 @@ static int liteuart_console_setup(struct console *co, char *options)
}
static struct console liteuart_console = {
- .name = "liteuart",
+ .name = KBUILD_MODNAME,
.write = liteuart_console_write,
.device = uart_console_device,
.setup = liteuart_console_setup,
@@ -415,12 +460,10 @@ static int __init liteuart_init(void)
return res;
res = platform_driver_register(&liteuart_platform_driver);
- if (res) {
+ if (res)
uart_unregister_driver(&liteuart_driver);
- return res;
- }
- return 0;
+ return res;
}
static void __exit liteuart_exit(void)
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index bb74f23251fe..86dcbff8faa3 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -247,7 +247,7 @@ static int max3100_handlerx(struct max3100_port *s, u16 rx)
cts = (rx & MAX3100_CTS) > 0;
if (s->cts != cts) {
s->cts = cts;
- uart_handle_cts_change(&s->port, cts ? TIOCM_CTS : 0);
+ uart_handle_cts_change(&s->port, cts);
}
return ret;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 4eb24e3407f8..e9cacfe7e032 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -819,8 +819,7 @@ static irqreturn_t max310x_port_irq(struct max310x_port *s, int portno)
if (ists & MAX310X_IRQ_CTS_BIT) {
lsr = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
- uart_handle_cts_change(port,
- !!(lsr & MAX310X_LSR_CTS_BIT));
+ uart_handle_cts_change(port, lsr & MAX310X_LSR_CTS_BIT);
}
if (rxlen)
max310x_handle_rx(port, rxlen);
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 843798e63084..90953e679e38 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -1120,6 +1120,7 @@ msm_find_best_baud(struct uart_port *port, unsigned int baud,
static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
unsigned long *saved_flags)
+ __must_hold(&port->lock)
{
unsigned int rxstale, watermark, mask;
struct msm_port *msm_port = to_msm_port(port);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 9576ba8bbc40..cc83b772b7ca 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1775,7 +1775,7 @@ static void pch_uart_exit_port(struct eg20t_port *priv)
char name[32];
snprintf(name, sizeof(name), "uart%d_regs", priv->port.line);
- debugfs_remove(debugfs_lookup(name, NULL));
+ debugfs_lookup_and_remove(name, NULL);
uart_remove_one_port(&pch_uart_driver, &priv->port);
free_page((unsigned long)priv->rxbuf.buf);
}
diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c
index ba3435263c1f..196a4e678451 100644
--- a/drivers/tty/serial/pic32_uart.c
+++ b/drivers/tty/serial/pic32_uart.c
@@ -889,6 +889,8 @@ static int pic32_uart_probe(struct platform_device *pdev)
sport->irq_rx = irq_of_parse_and_map(np, 1);
sport->irq_tx = irq_of_parse_and_map(np, 2);
sport->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sport->clk))
+ return PTR_ERR(sport->clk);
sport->dev = &pdev->dev;
/* Hardware flow control: gpios
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 57f04f8bf504..d69592e5e2ec 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -16,7 +16,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
-#include <linux/qcom-geni-se.h>
+#include <linux/soc/qcom/geni-se.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/slab.h>
@@ -39,76 +39,70 @@
#define SE_UART_MANUAL_RFR 0x2ac
/* SE_UART_TRANS_CFG */
-#define UART_TX_PAR_EN BIT(0)
-#define UART_CTS_MASK BIT(1)
-
-/* SE_UART_TX_WORD_LEN */
-#define TX_WORD_LEN_MSK GENMASK(9, 0)
+#define UART_TX_PAR_EN BIT(0)
+#define UART_CTS_MASK BIT(1)
/* SE_UART_TX_STOP_BIT_LEN */
-#define TX_STOP_BIT_LEN_MSK GENMASK(23, 0)
-#define TX_STOP_BIT_LEN_1 0
-#define TX_STOP_BIT_LEN_1_5 1
-#define TX_STOP_BIT_LEN_2 2
-
-/* SE_UART_TX_TRANS_LEN */
-#define TX_TRANS_LEN_MSK GENMASK(23, 0)
+#define TX_STOP_BIT_LEN_1 0
+#define TX_STOP_BIT_LEN_2 2
/* SE_UART_RX_TRANS_CFG */
-#define UART_RX_INS_STATUS_BIT BIT(2)
-#define UART_RX_PAR_EN BIT(3)
+#define UART_RX_PAR_EN BIT(3)
/* SE_UART_RX_WORD_LEN */
-#define RX_WORD_LEN_MASK GENMASK(9, 0)
+#define RX_WORD_LEN_MASK GENMASK(9, 0)
/* SE_UART_RX_STALE_CNT */
-#define RX_STALE_CNT GENMASK(23, 0)
+#define RX_STALE_CNT GENMASK(23, 0)
/* SE_UART_TX_PARITY_CFG/RX_PARITY_CFG */
-#define PAR_CALC_EN BIT(0)
-#define PAR_MODE_MSK GENMASK(2, 1)
-#define PAR_MODE_SHFT 1
-#define PAR_EVEN 0x00
-#define PAR_ODD 0x01
-#define PAR_SPACE 0x10
-#define PAR_MARK 0x11
+#define PAR_CALC_EN BIT(0)
+#define PAR_EVEN 0x00
+#define PAR_ODD 0x01
+#define PAR_SPACE 0x10
/* SE_UART_MANUAL_RFR register fields */
-#define UART_MANUAL_RFR_EN BIT(31)
-#define UART_RFR_NOT_READY BIT(1)
-#define UART_RFR_READY BIT(0)
+#define UART_MANUAL_RFR_EN BIT(31)
+#define UART_RFR_NOT_READY BIT(1)
+#define UART_RFR_READY BIT(0)
/* UART M_CMD OP codes */
-#define UART_START_TX 0x1
-#define UART_START_BREAK 0x4
-#define UART_STOP_BREAK 0x5
+#define UART_START_TX 0x1
/* UART S_CMD OP codes */
-#define UART_START_READ 0x1
-#define UART_PARAM 0x1
-
-#define UART_OVERSAMPLING 32
-#define STALE_TIMEOUT 16
-#define DEFAULT_BITS_PER_CHAR 10
-#define GENI_UART_CONS_PORTS 1
-#define GENI_UART_PORTS 3
-#define DEF_FIFO_DEPTH_WORDS 16
-#define DEF_TX_WM 2
-#define DEF_FIFO_WIDTH_BITS 32
-#define UART_RX_WM 2
+#define UART_START_READ 0x1
+#define UART_PARAM 0x1
+#define UART_PARAM_RFR_OPEN BIT(7)
+
+#define UART_OVERSAMPLING 32
+#define STALE_TIMEOUT 16
+#define DEFAULT_BITS_PER_CHAR 10
+#define GENI_UART_CONS_PORTS 1
+#define GENI_UART_PORTS 3
+#define DEF_FIFO_DEPTH_WORDS 16
+#define DEF_TX_WM 2
+#define DEF_FIFO_WIDTH_BITS 32
+#define UART_RX_WM 2
/* SE_UART_LOOPBACK_CFG */
-#define RX_TX_SORTED BIT(0)
-#define CTS_RTS_SORTED BIT(1)
-#define RX_TX_CTS_RTS_SORTED (RX_TX_SORTED | CTS_RTS_SORTED)
+#define RX_TX_SORTED BIT(0)
+#define CTS_RTS_SORTED BIT(1)
+#define RX_TX_CTS_RTS_SORTED (RX_TX_SORTED | CTS_RTS_SORTED)
/* UART pin swap value */
-#define DEFAULT_IO_MACRO_IO0_IO1_MASK GENMASK(3, 0)
+#define DEFAULT_IO_MACRO_IO0_IO1_MASK GENMASK(3, 0)
#define IO_MACRO_IO0_SEL 0x3
-#define DEFAULT_IO_MACRO_IO2_IO3_MASK GENMASK(15, 4)
+#define DEFAULT_IO_MACRO_IO2_IO3_MASK GENMASK(15, 4)
#define IO_MACRO_IO2_IO3_SWAP 0x4640
/* We always configure 4 bytes per FIFO word */
-#define BYTES_PER_FIFO_WORD 4
+#define BYTES_PER_FIFO_WORD 4U
+
+#define DMA_RX_BUF_SIZE 2048
+
+struct qcom_geni_device_data {
+ bool console;
+ enum geni_se_xfer_mode mode;
+};
struct qcom_geni_private_data {
/* NOTE: earlycon port will have NULL here */
@@ -128,10 +122,11 @@ struct qcom_geni_serial_port {
u32 tx_fifo_depth;
u32 tx_fifo_width;
u32 rx_fifo_depth;
+ dma_addr_t tx_dma_addr;
+ dma_addr_t rx_dma_addr;
bool setup;
- int (*handle_rx)(struct uart_port *uport, u32 bytes, bool drop);
unsigned int baud;
- void *rx_fifo;
+ void *rx_buf;
u32 loopback;
bool brk;
@@ -141,44 +136,42 @@ struct qcom_geni_serial_port {
bool cts_rts_swap;
struct qcom_geni_private_data private_data;
+ const struct qcom_geni_device_data *dev_data;
};
static const struct uart_ops qcom_geni_console_pops;
static const struct uart_ops qcom_geni_uart_pops;
static struct uart_driver qcom_geni_console_driver;
static struct uart_driver qcom_geni_uart_driver;
-static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
-static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop);
-static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
-static void qcom_geni_serial_stop_rx(struct uart_port *uport);
-static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
-#define to_dev_port(ptr, member) \
- container_of(ptr, struct qcom_geni_serial_port, member)
+static inline struct qcom_geni_serial_port *to_dev_port(struct uart_port *uport)
+{
+ return container_of(uport, struct qcom_geni_serial_port, uport);
+}
static struct qcom_geni_serial_port qcom_geni_uart_ports[GENI_UART_PORTS] = {
[0] = {
.uport = {
- .iotype = UPIO_MEM,
- .ops = &qcom_geni_uart_pops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 0,
+ .iotype = UPIO_MEM,
+ .ops = &qcom_geni_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 0,
},
},
[1] = {
.uport = {
- .iotype = UPIO_MEM,
- .ops = &qcom_geni_uart_pops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 1,
+ .iotype = UPIO_MEM,
+ .ops = &qcom_geni_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 1,
},
},
[2] = {
.uport = {
- .iotype = UPIO_MEM,
- .ops = &qcom_geni_uart_pops,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 2,
+ .iotype = UPIO_MEM,
+ .ops = &qcom_geni_uart_pops,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 2,
},
},
};
@@ -195,7 +188,7 @@ static struct qcom_geni_serial_port qcom_geni_console_port = {
static int qcom_geni_serial_request_port(struct uart_port *uport)
{
struct platform_device *pdev = to_platform_device(uport->dev);
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
uport->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(uport->membase))
@@ -232,7 +225,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
unsigned int mctrl)
{
u32 uart_manual_rfr = 0;
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
if (uart_console(uport))
return;
@@ -262,6 +255,16 @@ static struct qcom_geni_serial_port *get_port_from_line(int line, bool console)
return port;
}
+static bool qcom_geni_serial_main_active(struct uart_port *uport)
+{
+ return readl(uport->membase + SE_GENI_STATUS) & M_GENI_CMD_ACTIVE;
+}
+
+static bool qcom_geni_serial_secondary_active(struct uart_port *uport)
+{
+ return readl(uport->membase + SE_GENI_STATUS) & S_GENI_CMD_ACTIVE;
+}
+
static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
int offset, int field, bool set)
{
@@ -273,7 +276,7 @@ static bool qcom_geni_serial_poll_bit(struct uart_port *uport,
struct qcom_geni_private_data *private_data = uport->private_data;
if (private_data->drv) {
- port = to_dev_port(uport, uport);
+ port = to_dev_port(uport);
baud = port->baud;
if (!baud)
baud = 115200;
@@ -338,7 +341,6 @@ static void qcom_geni_serial_abort_rx(struct uart_port *uport)
}
#ifdef CONFIG_CONSOLE_POLL
-
static int qcom_geni_serial_get_char(struct uart_port *uport)
{
struct qcom_geni_private_data *private_data = uport->private_data;
@@ -521,12 +523,12 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
spin_unlock_irqrestore(&uport->lock, flags);
}
-static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
+static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
{
u32 i;
unsigned char buf[sizeof(u32)];
struct tty_port *tport;
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
tport = &uport->state->port;
for (i = 0; i < bytes; ) {
@@ -556,30 +558,21 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
}
if (!drop)
tty_flip_buffer_push(tport);
- return 0;
}
#else
-static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
+static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
{
- return -EPERM;
-}
+}
#endif /* CONFIG_SERIAL_QCOM_GENI_CONSOLE */
-static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop)
+static void handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop)
{
- struct tty_port *tport;
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
- u32 num_bytes_pw = port->tx_fifo_width / BITS_PER_BYTE;
- u32 words = ALIGN(bytes, num_bytes_pw) / num_bytes_pw;
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+ struct tty_port *tport = &uport->state->port;
int ret;
- tport = &uport->state->port;
- ioread32_rep(uport->membase + SE_GENI_RX_FIFOn, port->rx_fifo, words);
- if (drop)
- return 0;
-
- ret = tty_insert_flip_string(tport, port->rx_fifo, bytes);
+ ret = tty_insert_flip_string(tport, port->rx_buf, bytes);
if (ret != bytes) {
dev_err(uport->dev, "%s:Unable to push data ret %d_bytes %d\n",
__func__, ret, bytes);
@@ -587,19 +580,82 @@ static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop)
}
uport->icount.rx += ret;
tty_flip_buffer_push(tport);
- return ret;
}
-static void qcom_geni_serial_start_tx(struct uart_port *uport)
+static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport)
{
- u32 irq_en;
- u32 status;
+ return !readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
+}
+
+static void qcom_geni_serial_stop_tx_dma(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+ bool done;
+ u32 m_irq_en;
- status = readl(uport->membase + SE_GENI_STATUS);
- if (status & M_GENI_CMD_ACTIVE)
+ if (!qcom_geni_serial_main_active(uport))
return;
- if (!qcom_geni_serial_tx_empty(uport))
+ if (port->rx_dma_addr) {
+ geni_se_tx_dma_unprep(&port->se, port->tx_dma_addr,
+ port->tx_remaining);
+ port->tx_dma_addr = 0;
+ port->tx_remaining = 0;
+ }
+
+ m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
+ writel(m_irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ geni_se_cancel_m_cmd(&port->se);
+
+ done = qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
+ S_CMD_CANCEL_EN, true);
+ if (!done) {
+ geni_se_abort_m_cmd(&port->se);
+ done = qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_CMD_ABORT_EN, true);
+ if (!done)
+ dev_err_ratelimited(uport->dev, "M_CMD_ABORT_EN not set");
+ writel(M_CMD_ABORT_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ }
+
+ writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+}
+
+static void qcom_geni_serial_start_tx_dma(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+ struct circ_buf *xmit = &uport->state->xmit;
+ unsigned int xmit_size;
+ int ret;
+
+ if (port->tx_dma_addr)
+ return;
+
+ xmit_size = uart_circ_chars_pending(xmit);
+ if (xmit_size < WAKEUP_CHARS)
+ uart_write_wakeup(uport);
+
+ xmit_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ qcom_geni_serial_setup_tx(uport, xmit_size);
+
+ ret = geni_se_tx_dma_prep(&port->se, &xmit->buf[xmit->tail],
+ xmit_size, &port->tx_dma_addr);
+ if (ret) {
+ dev_err(uport->dev, "unable to start TX SE DMA: %d\n", ret);
+ qcom_geni_serial_stop_tx_dma(uport);
+ return;
+ }
+
+ port->tx_remaining = xmit_size;
+}
+
+static void qcom_geni_serial_start_tx_fifo(struct uart_port *uport)
+{
+ u32 irq_en;
+
+ if (qcom_geni_serial_main_active(uport) ||
+ !qcom_geni_serial_tx_empty(uport))
return;
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
@@ -609,19 +665,17 @@ static void qcom_geni_serial_start_tx(struct uart_port *uport)
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
}
-static void qcom_geni_serial_stop_tx(struct uart_port *uport)
+static void qcom_geni_serial_stop_tx_fifo(struct uart_port *uport)
{
u32 irq_en;
- u32 status;
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
irq_en &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
writel(0, uport->membase + SE_GENI_TX_WATERMARK_REG);
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
- status = readl(uport->membase + SE_GENI_STATUS);
/* Possible stop tx is called multiple times. */
- if (!(status & M_GENI_CMD_ACTIVE))
+ if (!qcom_geni_serial_main_active(uport))
return;
geni_se_cancel_m_cmd(&port->se);
@@ -635,32 +689,34 @@ static void qcom_geni_serial_stop_tx(struct uart_port *uport)
writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
}
-static void qcom_geni_serial_start_rx(struct uart_port *uport)
+static void qcom_geni_serial_handle_rx_fifo(struct uart_port *uport, bool drop)
{
- u32 irq_en;
u32 status;
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
-
- status = readl(uport->membase + SE_GENI_STATUS);
- if (status & S_GENI_CMD_ACTIVE)
- qcom_geni_serial_stop_rx(uport);
-
- geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
+ u32 word_cnt;
+ u32 last_word_byte_cnt;
+ u32 last_word_partial;
+ u32 total_bytes;
- irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
- irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
- writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+ status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
+ word_cnt = status & RX_FIFO_WC_MSK;
+ last_word_partial = status & RX_LAST;
+ last_word_byte_cnt = (status & RX_LAST_BYTE_VALID_MSK) >>
+ RX_LAST_BYTE_VALID_SHFT;
- irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
- irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
- writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+ if (!word_cnt)
+ return;
+ total_bytes = BYTES_PER_FIFO_WORD * (word_cnt - 1);
+ if (last_word_partial && last_word_byte_cnt)
+ total_bytes += last_word_byte_cnt;
+ else
+ total_bytes += BYTES_PER_FIFO_WORD;
+ handle_rx_console(uport, total_bytes, drop);
}
-static void qcom_geni_serial_stop_rx(struct uart_port *uport)
+static void qcom_geni_serial_stop_rx_fifo(struct uart_port *uport)
{
u32 irq_en;
- u32 status;
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
u32 s_irq_status;
irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
@@ -671,9 +727,7 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
- status = readl(uport->membase + SE_GENI_STATUS);
- /* Possible stop rx is called multiple times. */
- if (!(status & S_GENI_CMD_ACTIVE))
+ if (!qcom_geni_serial_secondary_active(uport))
return;
geni_se_cancel_s_cmd(&port->se);
@@ -686,52 +740,154 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
/* Flush the Rx buffer */
if (s_irq_status & S_RX_FIFO_LAST_EN)
- qcom_geni_serial_handle_rx(uport, true);
+ qcom_geni_serial_handle_rx_fifo(uport, true);
writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
- status = readl(uport->membase + SE_GENI_STATUS);
- if (status & S_GENI_CMD_ACTIVE)
+ if (qcom_geni_serial_secondary_active(uport))
qcom_geni_serial_abort_rx(uport);
}
-static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop)
+static void qcom_geni_serial_start_rx_fifo(struct uart_port *uport)
{
- u32 status;
- u32 word_cnt;
- u32 last_word_byte_cnt;
- u32 last_word_partial;
- u32 total_bytes;
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ u32 irq_en;
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
- status = readl(uport->membase + SE_GENI_RX_FIFO_STATUS);
- word_cnt = status & RX_FIFO_WC_MSK;
- last_word_partial = status & RX_LAST;
- last_word_byte_cnt = (status & RX_LAST_BYTE_VALID_MSK) >>
- RX_LAST_BYTE_VALID_SHFT;
+ if (qcom_geni_serial_secondary_active(uport))
+ qcom_geni_serial_stop_rx_fifo(uport);
- if (!word_cnt)
+ geni_se_setup_s_cmd(&port->se, UART_START_READ, 0);
+
+ irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
+ irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
+ writel(irq_en, uport->membase + SE_GENI_S_IRQ_EN);
+
+ irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
+ irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
+ writel(irq_en, uport->membase + SE_GENI_M_IRQ_EN);
+}
+
+static void qcom_geni_serial_stop_rx_dma(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+
+ if (!qcom_geni_serial_secondary_active(uport))
return;
- total_bytes = BYTES_PER_FIFO_WORD * (word_cnt - 1);
- if (last_word_partial && last_word_byte_cnt)
- total_bytes += last_word_byte_cnt;
- else
- total_bytes += BYTES_PER_FIFO_WORD;
- port->handle_rx(uport, total_bytes, drop);
+
+ geni_se_cancel_s_cmd(&port->se);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
+ S_CMD_CANCEL_EN, true);
+
+ if (qcom_geni_serial_secondary_active(uport))
+ qcom_geni_serial_abort_rx(uport);
+
+ if (port->rx_dma_addr) {
+ geni_se_rx_dma_unprep(&port->se, port->rx_dma_addr,
+ DMA_RX_BUF_SIZE);
+ port->rx_dma_addr = 0;
+ }
+}
+
+static void qcom_geni_serial_start_rx_dma(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+ int ret;
+
+ if (qcom_geni_serial_secondary_active(uport))
+ qcom_geni_serial_stop_rx_dma(uport);
+
+ geni_se_setup_s_cmd(&port->se, UART_START_READ, UART_PARAM_RFR_OPEN);
+
+ ret = geni_se_rx_dma_prep(&port->se, port->rx_buf,
+ DMA_RX_BUF_SIZE,
+ &port->rx_dma_addr);
+ if (ret) {
+ dev_err(uport->dev, "unable to start RX SE DMA: %d\n", ret);
+ qcom_geni_serial_stop_rx_dma(uport);
+ }
+}
+
+static void qcom_geni_serial_handle_rx_dma(struct uart_port *uport, bool drop)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+ u32 rx_in;
+ int ret;
+
+ if (!qcom_geni_serial_secondary_active(uport))
+ return;
+
+ if (!port->rx_dma_addr)
+ return;
+
+ geni_se_rx_dma_unprep(&port->se, port->rx_dma_addr, DMA_RX_BUF_SIZE);
+ port->rx_dma_addr = 0;
+
+ rx_in = readl(uport->membase + SE_DMA_RX_LEN_IN);
+ if (!rx_in) {
+ dev_warn(uport->dev, "serial engine reports 0 RX bytes in!\n");
+ return;
+ }
+
+ if (!drop)
+ handle_rx_uart(uport, rx_in, drop);
+
+ ret = geni_se_rx_dma_prep(&port->se, port->rx_buf,
+ DMA_RX_BUF_SIZE,
+ &port->rx_dma_addr);
+ if (ret) {
+ dev_err(uport->dev, "unable to start RX SE DMA: %d\n", ret);
+ qcom_geni_serial_stop_rx_dma(uport);
+ }
+}
+
+static void qcom_geni_serial_start_rx(struct uart_port *uport)
+{
+ uport->ops->start_rx(uport);
+}
+
+static void qcom_geni_serial_stop_rx(struct uart_port *uport)
+{
+ uport->ops->stop_rx(uport);
+}
+
+static void qcom_geni_serial_stop_tx(struct uart_port *uport)
+{
+ uport->ops->stop_tx(uport);
+}
+
+static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport,
+ unsigned int chunk)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+ struct circ_buf *xmit = &uport->state->xmit;
+ unsigned int tx_bytes, c, remaining = chunk;
+ u8 buf[BYTES_PER_FIFO_WORD];
+
+ while (remaining) {
+ memset(buf, 0, sizeof(buf));
+ tx_bytes = min(remaining, BYTES_PER_FIFO_WORD);
+
+ for (c = 0; c < tx_bytes ; c++) {
+ buf[c] = xmit->buf[xmit->tail];
+ uart_xmit_advance(uport, 1);
+ }
+
+ iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
+
+ remaining -= tx_bytes;
+ port->tx_remaining -= tx_bytes;
+ }
}
-static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
- bool active)
+static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport,
+ bool done, bool active)
{
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
struct circ_buf *xmit = &uport->state->xmit;
size_t avail;
- size_t remaining;
size_t pending;
- int i;
u32 status;
u32 irq_en;
unsigned int chunk;
- int tail;
status = readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
@@ -743,14 +899,13 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
/* All data has been transmitted and acknowledged as received */
if (!pending && !status && done) {
- qcom_geni_serial_stop_tx(uport);
+ qcom_geni_serial_stop_tx_fifo(uport);
goto out_write_wakeup;
}
avail = port->tx_fifo_depth - (status & TX_FIFO_WC);
avail *= BYTES_PER_FIFO_WORD;
- tail = xmit->tail;
chunk = min(avail, pending);
if (!chunk)
goto out_write_wakeup;
@@ -765,29 +920,7 @@ static void qcom_geni_serial_handle_tx(struct uart_port *uport, bool done,
uport->membase + SE_GENI_M_IRQ_EN);
}
- remaining = chunk;
- for (i = 0; i < chunk; ) {
- unsigned int tx_bytes;
- u8 buf[sizeof(u32)];
- int c;
-
- memset(buf, 0, sizeof(buf));
- tx_bytes = min_t(size_t, remaining, BYTES_PER_FIFO_WORD);
-
- for (c = 0; c < tx_bytes ; c++) {
- buf[c] = xmit->buf[tail++];
- tail &= UART_XMIT_SIZE - 1;
- }
-
- iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1);
-
- i += tx_bytes;
- uport->icount.tx += tx_bytes;
- remaining -= tx_bytes;
- port->tx_remaining -= tx_bytes;
- }
-
- xmit->tail = tail;
+ qcom_geni_serial_send_chunk_fifo(uport, chunk);
/*
* The tx fifo watermark is level triggered and latched. Though we had
@@ -809,16 +942,36 @@ out_write_wakeup:
uart_write_wakeup(uport);
}
+static void qcom_geni_serial_handle_tx_dma(struct uart_port *uport)
+{
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
+ struct circ_buf *xmit = &uport->state->xmit;
+
+ uart_xmit_advance(uport, port->tx_remaining);
+ geni_se_tx_dma_unprep(&port->se, port->tx_dma_addr, port->tx_remaining);
+ port->tx_dma_addr = 0;
+ port->tx_remaining = 0;
+
+ if (!uart_circ_empty(xmit))
+ qcom_geni_serial_start_tx_dma(uport);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(uport);
+}
+
static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
{
u32 m_irq_en;
u32 m_irq_status;
u32 s_irq_status;
u32 geni_status;
+ u32 dma;
+ u32 dma_tx_status;
+ u32 dma_rx_status;
struct uart_port *uport = dev;
bool drop_rx = false;
struct tty_port *tport = &uport->state->port;
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
if (uport->suspended)
return IRQ_NONE;
@@ -827,10 +980,15 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
m_irq_status = readl(uport->membase + SE_GENI_M_IRQ_STATUS);
s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
+ dma_tx_status = readl(uport->membase + SE_DMA_TX_IRQ_STAT);
+ dma_rx_status = readl(uport->membase + SE_DMA_RX_IRQ_STAT);
geni_status = readl(uport->membase + SE_GENI_STATUS);
+ dma = readl(uport->membase + SE_GENI_DMA_MODE_EN);
m_irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN);
writel(m_irq_status, uport->membase + SE_GENI_M_IRQ_CLEAR);
writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+ writel(dma_tx_status, uport->membase + SE_DMA_TX_IRQ_CLR);
+ writel(dma_rx_status, uport->membase + SE_DMA_RX_IRQ_CLR);
if (WARN_ON(m_irq_status & M_ILLEGAL_CMD_EN))
goto out_unlock;
@@ -840,23 +998,44 @@ static irqreturn_t qcom_geni_serial_isr(int isr, void *dev)
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
}
- if (m_irq_status & m_irq_en & (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
- qcom_geni_serial_handle_tx(uport, m_irq_status & M_CMD_DONE_EN,
- geni_status & M_GENI_CMD_ACTIVE);
-
- if (s_irq_status & S_GP_IRQ_0_EN || s_irq_status & S_GP_IRQ_1_EN) {
+ if (s_irq_status & (S_GP_IRQ_0_EN | S_GP_IRQ_1_EN)) {
if (s_irq_status & S_GP_IRQ_0_EN)
uport->icount.parity++;
drop_rx = true;
- } else if (s_irq_status & S_GP_IRQ_2_EN ||
- s_irq_status & S_GP_IRQ_3_EN) {
+ } else if (s_irq_status & (S_GP_IRQ_2_EN | S_GP_IRQ_3_EN)) {
uport->icount.brk++;
port->brk = true;
}
- if (s_irq_status & S_RX_FIFO_WATERMARK_EN ||
- s_irq_status & S_RX_FIFO_LAST_EN)
- qcom_geni_serial_handle_rx(uport, drop_rx);
+ if (dma) {
+ if (dma_tx_status & TX_DMA_DONE)
+ qcom_geni_serial_handle_tx_dma(uport);
+
+ if (dma_rx_status) {
+ if (dma_rx_status & RX_RESET_DONE)
+ goto out_unlock;
+
+ if (dma_rx_status & RX_DMA_PARITY_ERR) {
+ uport->icount.parity++;
+ drop_rx = true;
+ }
+
+ if (dma_rx_status & RX_DMA_BREAK)
+ uport->icount.brk++;
+
+ if (dma_rx_status & (RX_DMA_DONE | RX_EOT))
+ qcom_geni_serial_handle_rx_dma(uport, drop_rx);
+ }
+ } else {
+ if (m_irq_status & m_irq_en &
+ (M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN))
+ qcom_geni_serial_handle_tx_fifo(uport,
+ m_irq_status & M_CMD_DONE_EN,
+ geni_status & M_GENI_CMD_ACTIVE);
+
+ if (s_irq_status & (S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN))
+ qcom_geni_serial_handle_rx_fifo(uport, drop_rx);
+ }
out_unlock:
uart_unlock_and_check_sysrq(uport);
@@ -876,11 +1055,11 @@ static int setup_fifos(struct qcom_geni_serial_port *port)
uport->fifosize =
(port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
- if (port->rx_fifo && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) {
- port->rx_fifo = devm_krealloc(uport->dev, port->rx_fifo,
- port->rx_fifo_depth * sizeof(u32),
- GFP_KERNEL);
- if (!port->rx_fifo)
+ if (port->rx_buf && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) {
+ port->rx_buf = devm_krealloc(uport->dev, port->rx_buf,
+ port->rx_fifo_depth * sizeof(u32),
+ GFP_KERNEL);
+ if (!port->rx_buf)
return -ENOMEM;
}
@@ -891,11 +1070,13 @@ static int setup_fifos(struct qcom_geni_serial_port *port)
static void qcom_geni_serial_shutdown(struct uart_port *uport)
{
disable_irq(uport->irq);
+ qcom_geni_serial_stop_tx(uport);
+ qcom_geni_serial_stop_rx(uport);
}
static int qcom_geni_serial_port_setup(struct uart_port *uport)
{
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
u32 proto;
u32 pin_swap;
@@ -937,7 +1118,7 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
geni_se_config_packing(&port->se, BITS_PER_BYTE, BYTES_PER_FIFO_WORD,
false, true, true);
geni_se_init(&port->se, UART_RX_WM, port->rx_fifo_depth - 2);
- geni_se_select_mode(&port->se, GENI_SE_FIFO);
+ geni_se_select_mode(&port->se, port->dev_data->mode);
qcom_geni_serial_start_rx(uport);
port->setup = true;
@@ -947,7 +1128,7 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
static int qcom_geni_serial_startup(struct uart_port *uport)
{
int ret;
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
if (!port->setup) {
ret = qcom_geni_serial_port_setup(uport);
@@ -1033,7 +1214,7 @@ static void qcom_geni_serial_set_termios(struct uart_port *uport,
u32 stop_bit_len;
unsigned int clk_div;
u32 ser_clk_cfg;
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
unsigned long clk_rate;
u32 ver, sampling_rate;
unsigned int avg_bw_core;
@@ -1137,11 +1318,6 @@ out_restart_rx:
qcom_geni_serial_start_rx(uport);
}
-static unsigned int qcom_geni_serial_tx_empty(struct uart_port *uport)
-{
- return !readl(uport->membase + SE_GENI_TX_FIFO_STATUS);
-}
-
#ifdef CONFIG_SERIAL_QCOM_GENI_CONSOLE
static int qcom_geni_console_setup(struct console *co, char *options)
{
@@ -1323,7 +1499,7 @@ static struct uart_driver qcom_geni_uart_driver = {
static void qcom_geni_serial_pm(struct uart_port *uport,
unsigned int new_state, unsigned int old_state)
{
- struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
+ struct qcom_geni_serial_port *port = to_dev_port(uport);
/* If we've never been called, treat it as off */
if (old_state == UART_PM_STATE_UNDEFINED)
@@ -1341,10 +1517,10 @@ static void qcom_geni_serial_pm(struct uart_port *uport,
static const struct uart_ops qcom_geni_console_pops = {
.tx_empty = qcom_geni_serial_tx_empty,
- .stop_tx = qcom_geni_serial_stop_tx,
- .start_tx = qcom_geni_serial_start_tx,
- .stop_rx = qcom_geni_serial_stop_rx,
- .start_rx = qcom_geni_serial_start_rx,
+ .stop_tx = qcom_geni_serial_stop_tx_fifo,
+ .start_tx = qcom_geni_serial_start_tx_fifo,
+ .stop_rx = qcom_geni_serial_stop_rx_fifo,
+ .start_rx = qcom_geni_serial_start_rx_fifo,
.set_termios = qcom_geni_serial_set_termios,
.startup = qcom_geni_serial_startup,
.request_port = qcom_geni_serial_request_port,
@@ -1362,9 +1538,10 @@ static const struct uart_ops qcom_geni_console_pops = {
static const struct uart_ops qcom_geni_uart_pops = {
.tx_empty = qcom_geni_serial_tx_empty,
- .stop_tx = qcom_geni_serial_stop_tx,
- .start_tx = qcom_geni_serial_start_tx,
- .stop_rx = qcom_geni_serial_stop_rx,
+ .stop_tx = qcom_geni_serial_stop_tx_dma,
+ .start_tx = qcom_geni_serial_start_tx_dma,
+ .start_rx = qcom_geni_serial_start_rx_dma,
+ .stop_rx = qcom_geni_serial_stop_rx_dma,
.set_termios = qcom_geni_serial_set_termios,
.startup = qcom_geni_serial_startup,
.request_port = qcom_geni_serial_request_port,
@@ -1384,13 +1561,14 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
struct uart_port *uport;
struct resource *res;
int irq;
- bool console = false;
struct uart_driver *drv;
+ const struct qcom_geni_device_data *data;
- if (of_device_is_compatible(pdev->dev.of_node, "qcom,geni-debug-uart"))
- console = true;
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -EINVAL;
- if (console) {
+ if (data->console) {
drv = &qcom_geni_console_driver;
line = of_alias_get_id(pdev->dev.of_node, "serial");
} else {
@@ -1400,7 +1578,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
line = of_alias_get_id(pdev->dev.of_node, "hsuart");
}
- port = get_port_from_line(line, console);
+ port = get_port_from_line(line, data->console);
if (IS_ERR(port)) {
dev_err(&pdev->dev, "Invalid line %d\n", line);
return PTR_ERR(port);
@@ -1412,6 +1590,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
return -ENODEV;
uport->dev = &pdev->dev;
+ port->dev_data = data;
port->se.dev = &pdev->dev;
port->se.wrapper = dev_get_drvdata(pdev->dev.parent);
port->se.clk = devm_clk_get(&pdev->dev, "se");
@@ -1430,10 +1609,10 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
- if (!console) {
- port->rx_fifo = devm_kcalloc(uport->dev,
- port->rx_fifo_depth, sizeof(u32), GFP_KERNEL);
- if (!port->rx_fifo)
+ if (!data->console) {
+ port->rx_buf = devm_kzalloc(uport->dev,
+ DMA_RX_BUF_SIZE, GFP_KERNEL);
+ if (!port->rx_buf)
return -ENOMEM;
}
@@ -1460,7 +1639,7 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
uport->irq = irq;
uport->has_sysrq = IS_ENABLED(CONFIG_SERIAL_QCOM_GENI_CONSOLE);
- if (!console)
+ if (!data->console)
port->wakeup_irq = platform_get_irq_optional(pdev, 1);
if (of_property_read_bool(pdev->dev.of_node, "rx-tx-swap"))
@@ -1482,7 +1661,6 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
port->private_data.drv = drv;
uport->private_data = &port->private_data;
platform_set_drvdata(pdev, port);
- port->handle_rx = console ? handle_rx_console : handle_rx_uart;
ret = uart_add_one_port(drv, uport);
if (ret)
@@ -1594,6 +1772,16 @@ static int qcom_geni_serial_sys_hib_resume(struct device *dev)
return ret;
}
+static const struct qcom_geni_device_data qcom_geni_console_data = {
+ .console = true,
+ .mode = GENI_SE_FIFO,
+};
+
+static const struct qcom_geni_device_data qcom_geni_uart_data = {
+ .console = false,
+ .mode = GENI_SE_DMA,
+};
+
static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
.suspend = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
.resume = pm_sleep_ptr(qcom_geni_serial_sys_resume),
@@ -1604,8 +1792,14 @@ static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
};
static const struct of_device_id qcom_geni_serial_match_table[] = {
- { .compatible = "qcom,geni-debug-uart", },
- { .compatible = "qcom,geni-uart", },
+ {
+ .compatible = "qcom,geni-debug-uart",
+ .data = &qcom_geni_console_data,
+ },
+ {
+ .compatible = "qcom,geni-uart",
+ .data = &qcom_geni_uart_data,
+ },
{}
};
MODULE_DEVICE_TABLE(of, qcom_geni_serial_match_table);
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 0fce856434da..2a7520ad3abd 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -152,10 +152,6 @@ struct s3c24xx_uart_port {
const struct s3c2410_uartcfg *cfg;
struct s3c24xx_uart_dma *dma;
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
- struct notifier_block freq_transition;
-#endif
};
static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport);
@@ -1855,93 +1851,6 @@ static void s3c24xx_serial_resetport(struct uart_port *port,
udelay(1);
}
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
-
-static int s3c24xx_serial_cpufreq_transition(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct s3c24xx_uart_port *port;
- struct uart_port *uport;
-
- port = container_of(nb, struct s3c24xx_uart_port, freq_transition);
- uport = &port->port;
-
- /* check to see if port is enabled */
-
- if (port->pm_level != 0)
- return 0;
-
- /* try and work out if the baudrate is changing, we can detect
- * a change in rate, but we do not have support for detecting
- * a disturbance in the clock-rate over the change.
- */
-
- if (IS_ERR(port->baudclk))
- goto exit;
-
- if (port->baudclk_rate == clk_get_rate(port->baudclk))
- goto exit;
-
- if (val == CPUFREQ_PRECHANGE) {
- /* we should really shut the port down whilst the
- * frequency change is in progress.
- */
-
- } else if (val == CPUFREQ_POSTCHANGE) {
- struct ktermios *termios;
- struct tty_struct *tty;
-
- if (uport->state == NULL)
- goto exit;
-
- tty = uport->state->port.tty;
-
- if (tty == NULL)
- goto exit;
-
- termios = &tty->termios;
-
- if (termios == NULL) {
- dev_warn(uport->dev, "%s: no termios?\n", __func__);
- goto exit;
- }
-
- s3c24xx_serial_set_termios(uport, termios, NULL);
- }
-
-exit:
- return 0;
-}
-
-static inline int
-s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
-{
- port->freq_transition.notifier_call = s3c24xx_serial_cpufreq_transition;
-
- return cpufreq_register_notifier(&port->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-static inline void
-s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
-{
- cpufreq_unregister_notifier(&port->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-#else
-static inline int
-s3c24xx_serial_cpufreq_register(struct s3c24xx_uart_port *port)
-{
- return 0;
-}
-
-static inline void
-s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *port)
-{
-}
-#endif
-
static int s3c24xx_serial_enable_baudclk(struct s3c24xx_uart_port *ourport)
{
struct device *dev = ourport->port.dev;
@@ -2233,10 +2142,6 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
if (!IS_ERR(ourport->baudclk))
clk_disable_unprepare(ourport->baudclk);
- ret = s3c24xx_serial_cpufreq_register(ourport);
- if (ret < 0)
- dev_err(&pdev->dev, "failed to add cpufreq notifier\n");
-
probe_index++;
return 0;
@@ -2247,7 +2152,6 @@ static int s3c24xx_serial_remove(struct platform_device *dev)
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
if (port) {
- s3c24xx_serial_cpufreq_deregister(to_ourport(port));
uart_remove_one_port(&s3c24xx_uart_drv, port);
}
@@ -2585,94 +2489,6 @@ static struct console s3c24xx_serial_console = {
};
#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */
-#ifdef CONFIG_CPU_S3C2410
-static const struct s3c24xx_serial_drv_data s3c2410_serial_drv_data = {
- .info = {
- .name = "Samsung S3C2410 UART",
- .type = TYPE_S3C24XX,
- .port_type = PORT_S3C2410,
- .fifosize = 16,
- .rx_fifomask = S3C2410_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2410_UFSTAT_RXFULL,
- .tx_fifofull = S3C2410_UFSTAT_TXFULL,
- .tx_fifomask = S3C2410_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
- .def_clk_sel = S3C2410_UCON_CLKSEL0,
- .num_clks = 2,
- .clksel_mask = S3C2410_UCON_CLKMASK,
- .clksel_shift = S3C2410_UCON_CLKSHIFT,
- },
- .def_cfg = {
- .ucon = S3C2410_UCON_DEFAULT,
- .ufcon = S3C2410_UFCON_DEFAULT,
- },
-};
-#define S3C2410_SERIAL_DRV_DATA (&s3c2410_serial_drv_data)
-#else
-#define S3C2410_SERIAL_DRV_DATA NULL
-#endif
-
-#ifdef CONFIG_CPU_S3C2412
-static const struct s3c24xx_serial_drv_data s3c2412_serial_drv_data = {
- .info = {
- .name = "Samsung S3C2412 UART",
- .type = TYPE_S3C24XX,
- .port_type = PORT_S3C2412,
- .fifosize = 64,
- .has_divslot = 1,
- .rx_fifomask = S3C2440_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2440_UFSTAT_RXFULL,
- .tx_fifofull = S3C2440_UFSTAT_TXFULL,
- .tx_fifomask = S3C2440_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
- .def_clk_sel = S3C2410_UCON_CLKSEL2,
- .num_clks = 4,
- .clksel_mask = S3C2412_UCON_CLKMASK,
- .clksel_shift = S3C2412_UCON_CLKSHIFT,
- },
- .def_cfg = {
- .ucon = S3C2410_UCON_DEFAULT,
- .ufcon = S3C2410_UFCON_DEFAULT,
- },
-};
-#define S3C2412_SERIAL_DRV_DATA (&s3c2412_serial_drv_data)
-#else
-#define S3C2412_SERIAL_DRV_DATA NULL
-#endif
-
-#if defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2416) || \
- defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2442)
-static const struct s3c24xx_serial_drv_data s3c2440_serial_drv_data = {
- .info = {
- .name = "Samsung S3C2440 UART",
- .type = TYPE_S3C24XX,
- .port_type = PORT_S3C2440,
- .fifosize = 64,
- .has_divslot = 1,
- .rx_fifomask = S3C2440_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2440_UFSTAT_RXFULL,
- .tx_fifofull = S3C2440_UFSTAT_TXFULL,
- .tx_fifomask = S3C2440_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
- .def_clk_sel = S3C2410_UCON_CLKSEL2,
- .num_clks = 4,
- .clksel_mask = S3C2412_UCON_CLKMASK,
- .clksel_shift = S3C2412_UCON_CLKSHIFT,
- .ucon_mask = S3C2440_UCON0_DIVMASK,
- },
- .def_cfg = {
- .ucon = S3C2410_UCON_DEFAULT,
- .ufcon = S3C2410_UFCON_DEFAULT,
- },
-};
-#define S3C2440_SERIAL_DRV_DATA (&s3c2440_serial_drv_data)
-#else
-#define S3C2440_SERIAL_DRV_DATA NULL
-#endif
-
#if defined(CONFIG_CPU_S3C6400) || defined(CONFIG_CPU_S3C6410)
static const struct s3c24xx_serial_drv_data s3c6400_serial_drv_data = {
.info = {
@@ -2841,15 +2657,6 @@ static const struct s3c24xx_serial_drv_data artpec8_serial_drv_data = {
static const struct platform_device_id s3c24xx_serial_driver_ids[] = {
{
- .name = "s3c2410-uart",
- .driver_data = (kernel_ulong_t)S3C2410_SERIAL_DRV_DATA,
- }, {
- .name = "s3c2412-uart",
- .driver_data = (kernel_ulong_t)S3C2412_SERIAL_DRV_DATA,
- }, {
- .name = "s3c2440-uart",
- .driver_data = (kernel_ulong_t)S3C2440_SERIAL_DRV_DATA,
- }, {
.name = "s3c6400-uart",
.driver_data = (kernel_ulong_t)S3C6400_SERIAL_DRV_DATA,
}, {
@@ -2877,12 +2684,6 @@ MODULE_DEVICE_TABLE(platform, s3c24xx_serial_driver_ids);
#ifdef CONFIG_OF
static const struct of_device_id s3c24xx_uart_dt_match[] = {
- { .compatible = "samsung,s3c2410-uart",
- .data = S3C2410_SERIAL_DRV_DATA },
- { .compatible = "samsung,s3c2412-uart",
- .data = S3C2412_SERIAL_DRV_DATA },
- { .compatible = "samsung,s3c2440-uart",
- .data = S3C2440_SERIAL_DRV_DATA },
{ .compatible = "samsung,s3c6400-uart",
.data = S3C6400_SERIAL_DRV_DATA },
{ .compatible = "samsung,s5pv210-uart",
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 39f92eb1e698..29c94be09159 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1423,25 +1423,6 @@ static int sc16is7xx_probe(struct device *dev,
}
sched_set_fifo(s->kworker_task);
-#ifdef CONFIG_GPIOLIB
- if (devtype->nr_gpio) {
- /* Setup GPIO cotroller */
- s->gpio.owner = THIS_MODULE;
- s->gpio.parent = dev;
- s->gpio.label = dev_name(dev);
- s->gpio.direction_input = sc16is7xx_gpio_direction_input;
- s->gpio.get = sc16is7xx_gpio_get;
- s->gpio.direction_output = sc16is7xx_gpio_direction_output;
- s->gpio.set = sc16is7xx_gpio_set;
- s->gpio.base = -1;
- s->gpio.ngpio = devtype->nr_gpio;
- s->gpio.can_sleep = 1;
- ret = gpiochip_add_data(&s->gpio, s);
- if (ret)
- goto out_thread;
- }
-#endif
-
/* reset device, purging any pending irq / data */
regmap_write(s->regmap, SC16IS7XX_IOCONTROL_REG << SC16IS7XX_REG_SHIFT,
SC16IS7XX_IOCONTROL_SRESET_BIT);
@@ -1518,6 +1499,25 @@ static int sc16is7xx_probe(struct device *dev,
s->p[u].irda_mode = true;
}
+#ifdef CONFIG_GPIOLIB
+ if (devtype->nr_gpio) {
+ /* Setup GPIO cotroller */
+ s->gpio.owner = THIS_MODULE;
+ s->gpio.parent = dev;
+ s->gpio.label = dev_name(dev);
+ s->gpio.direction_input = sc16is7xx_gpio_direction_input;
+ s->gpio.get = sc16is7xx_gpio_get;
+ s->gpio.direction_output = sc16is7xx_gpio_direction_output;
+ s->gpio.set = sc16is7xx_gpio_set;
+ s->gpio.base = -1;
+ s->gpio.ngpio = devtype->nr_gpio;
+ s->gpio.can_sleep = 1;
+ ret = gpiochip_add_data(&s->gpio, s);
+ if (ret)
+ goto out_thread;
+ }
+#endif
+
/*
* Setup interrupt. We first try to acquire the IRQ line as level IRQ.
* If that succeeds, we can allow sharing the interrupt as well.
@@ -1537,18 +1537,19 @@ static int sc16is7xx_probe(struct device *dev,
if (!ret)
return 0;
-out_ports:
- for (i--; i >= 0; i--) {
- uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
- clear_bit(s->p[i].port.line, &sc16is7xx_lines);
- }
-
#ifdef CONFIG_GPIOLIB
if (devtype->nr_gpio)
gpiochip_remove(&s->gpio);
out_thread:
#endif
+
+out_ports:
+ for (i--; i >= 0; i--) {
+ uart_remove_one_port(&sc16is7xx_uart, &s->p[i].port);
+ clear_bit(s->p[i].port.line, &sc16is7xx_lines);
+ }
+
kthread_stop(s->kworker_task);
out_clk:
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 7df687822634..4f2fc5f7bb19 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -913,23 +913,13 @@ static int sccnxp_probe(struct platform_device *pdev)
} else if (PTR_ERR(s->regulator) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- clk = devm_clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
if (ret == -EPROBE_DEFER)
goto err_out;
uartclk = 0;
} else {
- ret = clk_prepare_enable(clk);
- if (ret)
- goto err_out;
-
- ret = devm_add_action_or_reset(&pdev->dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
- if (ret)
- goto err_out;
-
uartclk = clk_get_rate(clk);
}
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index e5b9773db5e3..1cf08b33456c 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -1046,6 +1046,7 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
if (tup->cdata->fifo_mode_enable_status) {
ret = tegra_uart_wait_fifo_mode_enabled(tup);
if (ret < 0) {
+ clk_disable_unprepare(tup->uart_clk);
dev_err(tup->uport.dev,
"Failed to enable FIFO mode: %d\n", ret);
return ret;
@@ -1067,6 +1068,7 @@ static int tegra_uart_hw_init(struct tegra_uart_port *tup)
*/
ret = tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
if (ret < 0) {
+ clk_disable_unprepare(tup->uart_clk);
dev_err(tup->uport.dev, "Failed to set baud rate\n");
return ret;
}
@@ -1226,10 +1228,13 @@ static int tegra_uart_startup(struct uart_port *u)
dev_name(u->dev), tup);
if (ret < 0) {
dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
- goto fail_hw_init;
+ goto fail_request_irq;
}
return 0;
+fail_request_irq:
+ /* tup->uart_clk is already enabled in tegra_uart_hw_init */
+ clk_disable_unprepare(tup->uart_clk);
fail_hw_init:
if (!tup->use_rx_pio)
tegra_uart_dma_channel_free(tup, true);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index ec874f3a567c..2bd32c8ece39 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -169,9 +169,9 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
#define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0)
#define uart_clear_mctrl(port, clear) uart_update_mctrl(port, 0, clear)
-static void uart_port_dtr_rts(struct uart_port *uport, int raise)
+static void uart_port_dtr_rts(struct uart_port *uport, bool active)
{
- if (raise)
+ if (active)
uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
else
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
@@ -182,7 +182,7 @@ static void uart_port_dtr_rts(struct uart_port *uport, int raise)
* will be serialised by the per-port mutex.
*/
static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
- int init_hw)
+ bool init_hw)
{
struct uart_port *uport = uart_port_check(state);
unsigned long flags;
@@ -239,7 +239,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
* port is open and ready to respond.
*/
if (init_hw && C_BAUD(tty))
- uart_port_dtr_rts(uport, 1);
+ uart_port_dtr_rts(uport, true);
}
/*
@@ -254,7 +254,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
}
static int uart_startup(struct tty_struct *tty, struct uart_state *state,
- int init_hw)
+ bool init_hw)
{
struct tty_port *port = &state->port;
int retval;
@@ -290,7 +290,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
set_bit(TTY_IO_ERROR, &tty->flags);
if (tty_port_initialized(port)) {
- tty_port_set_initialized(port, 0);
+ tty_port_set_initialized(port, false);
/*
* Turn off DTR and RTS early.
@@ -302,7 +302,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
}
if (!tty || C_HUPCL(tty))
- uart_port_dtr_rts(uport, 0);
+ uart_port_dtr_rts(uport, false);
uart_port_shutdown(port);
}
@@ -312,7 +312,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
* a DCD drop (hangup) at just the right time. Clear suspended bit so
* we don't try to resume a port that has been shutdown.
*/
- tty_port_set_suspended(port, 0);
+ tty_port_set_suspended(port, false);
/*
* Do not free() the transmit buffer page under the port lock since
@@ -997,7 +997,7 @@ static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
uart_change_speed(tty, state, NULL);
}
} else {
- retval = uart_startup(tty, state, 1);
+ retval = uart_startup(tty, state, true);
if (retval == 0)
tty_port_set_initialized(port, true);
if (retval > 0)
@@ -1165,7 +1165,7 @@ static int uart_do_autoconfig(struct tty_struct *tty, struct uart_state *state)
*/
uport->ops->config_port(uport, flags);
- ret = uart_startup(tty, state, 1);
+ ret = uart_startup(tty, state, true);
if (ret == 0)
tty_port_set_initialized(port, true);
if (ret > 0)
@@ -1725,7 +1725,7 @@ static void uart_tty_port_shutdown(struct tty_port *port)
* a DCD drop (hangup) at just the right time. Clear suspended bit so
* we don't try to resume a port that has been shutdown.
*/
- tty_port_set_suspended(port, 0);
+ tty_port_set_suspended(port, false);
/*
* Free the transmit buffer.
@@ -1827,7 +1827,7 @@ static void uart_hangup(struct tty_struct *tty)
spin_lock_irqsave(&port->lock, flags);
port->count = 0;
spin_unlock_irqrestore(&port->lock, flags);
- tty_port_set_active(port, 0);
+ tty_port_set_active(port, false);
tty_port_tty_set(port, NULL);
if (uport && !uart_console(uport))
uart_change_pm(state, UART_PM_STATE_OFF);
@@ -1861,7 +1861,7 @@ static void uart_port_shutdown(struct tty_port *port)
}
}
-static int uart_carrier_raised(struct tty_port *port)
+static bool uart_carrier_raised(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
@@ -1875,18 +1875,17 @@ static int uart_carrier_raised(struct tty_port *port)
* continue and not sleep
*/
if (WARN_ON(!uport))
- return 1;
+ return true;
spin_lock_irq(&uport->lock);
uart_enable_ms(uport);
mctrl = uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
uart_port_deref(uport);
- if (mctrl & TIOCM_CAR)
- return 1;
- return 0;
+
+ return mctrl & TIOCM_CAR;
}
-static void uart_dtr_rts(struct tty_port *port, int raise)
+static void uart_dtr_rts(struct tty_port *port, bool active)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
@@ -1894,7 +1893,7 @@ static void uart_dtr_rts(struct tty_port *port, int raise)
uport = uart_port_ref(state);
if (!uport)
return;
- uart_port_dtr_rts(uport, raise);
+ uart_port_dtr_rts(uport, active);
uart_port_deref(uport);
}
@@ -1943,9 +1942,9 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
/*
* Start up the serial port.
*/
- ret = uart_startup(tty, state, 0);
+ ret = uart_startup(tty, state, false);
if (ret > 0)
- tty_port_set_active(port, 1);
+ tty_port_set_active(port, true);
return ret;
}
@@ -2349,8 +2348,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
int tries;
unsigned int mctrl;
- tty_port_set_suspended(port, 1);
- tty_port_set_initialized(port, 0);
+ tty_port_set_suspended(port, true);
+ tty_port_set_initialized(port, false);
spin_lock_irq(&uport->lock);
ops->stop_tx(uport);
@@ -2461,7 +2460,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
uart_rs485_config(uport);
ops->start_tx(uport);
spin_unlock_irq(&uport->lock);
- tty_port_set_initialized(port, 1);
+ tty_port_set_initialized(port, true);
} else {
/*
* Failed to resume - maybe hardware went away?
@@ -2472,7 +2471,7 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
}
}
- tty_port_set_suspended(port, 0);
+ tty_port_set_suspended(port, false);
}
mutex_unlock(&port->mutex);
@@ -3258,11 +3257,11 @@ EXPORT_SYMBOL(uart_match_port);
/**
* uart_handle_dcd_change - handle a change of carrier detect state
* @uport: uart_port structure for the open port
- * @status: new carrier detect status, nonzero if active
+ * @active: new carrier detect status
*
* Caller must hold uport->lock.
*/
-void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
+void uart_handle_dcd_change(struct uart_port *uport, bool active)
{
struct tty_port *port = &uport->state->port;
struct tty_struct *tty = port->tty;
@@ -3274,7 +3273,7 @@ void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
ld = tty_ldisc_ref(tty);
if (ld) {
if (ld->ops->dcd_change)
- ld->ops->dcd_change(tty, status);
+ ld->ops->dcd_change(tty, active);
tty_ldisc_deref(ld);
}
}
@@ -3282,7 +3281,7 @@ void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
uport->icount.dcd++;
if (uart_dcd_enabled(uport)) {
- if (status)
+ if (active)
wake_up_interruptible(&port->open_wait);
else if (tty)
tty_hangup(tty);
@@ -3293,11 +3292,11 @@ EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
/**
* uart_handle_cts_change - handle a change of clear-to-send state
* @uport: uart_port structure for the open port
- * @status: new clear to send status, nonzero if active
+ * @active: new clear-to-send status
*
* Caller must hold uport->lock.
*/
-void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
+void uart_handle_cts_change(struct uart_port *uport, bool active)
{
lockdep_assert_held_once(&uport->lock);
@@ -3305,13 +3304,13 @@ void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
if (uart_softcts_mode(uport)) {
if (uport->hw_stopped) {
- if (status) {
+ if (active) {
uport->hw_stopped = 0;
uport->ops->start_tx(uport);
uart_write_wakeup(uport);
}
} else {
- if (!status) {
+ if (!active) {
uport->hw_stopped = 1;
uport->ops->stop_tx(uport);
}
@@ -3415,6 +3414,7 @@ int uart_get_rs485_mode(struct uart_port *port)
struct device *dev = port->dev;
u32 rs485_delay[2];
int ret;
+ int rx_during_tx_gpio_flag;
ret = device_property_read_u32_array(dev, "rs485-rts-delay",
rs485_delay, 2);
@@ -3463,6 +3463,17 @@ int uart_get_rs485_mode(struct uart_port *port)
if (port->rs485_term_gpio)
port->rs485_supported.flags |= SER_RS485_TERMINATE_BUS;
+ rx_during_tx_gpio_flag = (rs485conf->flags & SER_RS485_RX_DURING_TX) ?
+ GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ port->rs485_rx_during_tx_gpio = devm_gpiod_get_optional(dev,
+ "rs485-rx-during-tx",
+ rx_during_tx_gpio_flag);
+ if (IS_ERR(port->rs485_rx_during_tx_gpio)) {
+ ret = PTR_ERR(port->rs485_rx_during_tx_gpio);
+ port->rs485_rx_during_tx_gpio = NULL;
+ return dev_err_probe(dev, ret, "Cannot get rs485-rx-during-tx-gpios\n");
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(uart_get_rs485_mode);
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index a1490033aa16..767ff9fdb2e5 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -226,7 +226,11 @@ static int stm32_usart_config_rs485(struct uart_port *port, struct ktermios *ter
stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
- rs485conf->flags |= SER_RS485_RX_DURING_TX;
+ if (port->rs485_rx_during_tx_gpio)
+ gpiod_set_value_cansleep(port->rs485_rx_during_tx_gpio,
+ !!(rs485conf->flags & SER_RS485_RX_DURING_TX));
+ else
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
if (rs485conf->flags & SER_RS485_ENABLED) {
cr1 = readl_relaxed(port->membase + ofs->cr1);
@@ -797,25 +801,11 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
spin_unlock(&port->lock);
}
- if (stm32_usart_rx_dma_enabled(port))
- return IRQ_WAKE_THREAD;
- else
- return IRQ_HANDLED;
-}
-
-static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
-{
- struct uart_port *port = ptr;
- struct tty_port *tport = &port->state->port;
- struct stm32_port *stm32_port = to_stm32_port(port);
- unsigned int size;
- unsigned long flags;
-
/* Receiver timeout irq for DMA RX */
- if (!stm32_port->throttled) {
- spin_lock_irqsave(&port->lock, flags);
+ if (stm32_usart_rx_dma_enabled(port) && !stm32_port->throttled) {
+ spin_lock(&port->lock);
size = stm32_usart_receive_chars(port, false);
- uart_unlock_and_check_sysrq_irqrestore(port, flags);
+ uart_unlock_and_check_sysrq(port);
if (size)
tty_flip_buffer_push(tport);
}
@@ -1015,10 +1005,8 @@ static int stm32_usart_startup(struct uart_port *port)
u32 val;
int ret;
- ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
- stm32_usart_threaded_interrupt,
- IRQF_ONESHOT | IRQF_NO_SUSPEND,
- name, port);
+ ret = request_irq(port->irq, stm32_usart_interrupt,
+ IRQF_NO_SUSPEND, name, port);
if (ret)
return ret;
@@ -1601,13 +1589,6 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
struct dma_slave_config config;
int ret;
- /*
- * Using DMA and threaded handler for the console could lead to
- * deadlocks.
- */
- if (uart_console(port))
- return -ENODEV;
-
stm32port->rx_buf = dma_alloc_coherent(dev, RX_BUF_L,
&stm32port->rx_dma_buf,
GFP_KERNEL);
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 16c746a63258..7d38c33ef506 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -87,10 +87,10 @@ static int receive_chars_getchar(struct uart_port *port)
if (c == CON_HUP) {
hung_up = 1;
- uart_handle_dcd_change(port, 0);
+ uart_handle_dcd_change(port, false);
} else if (hung_up) {
hung_up = 0;
- uart_handle_dcd_change(port, 1);
+ uart_handle_dcd_change(port, true);
}
if (port->state == NULL) {
@@ -133,7 +133,7 @@ static int receive_chars_read(struct uart_port *port)
bytes_read = 1;
} else if (stat == CON_HUP) {
hung_up = 1;
- uart_handle_dcd_change(port, 0);
+ uart_handle_dcd_change(port, false);
continue;
} else {
/* HV_EWOULDBLOCK, etc. */
@@ -143,7 +143,7 @@ static int receive_chars_read(struct uart_port *port)
if (hung_up) {
hung_up = 0;
- uart_handle_dcd_change(port, 1);
+ uart_handle_dcd_change(port, true);
}
if (port->sysrq != 0 && *con_read_page) {
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index b09b6496ee3e..32c7a5b43f8e 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -1468,6 +1468,8 @@ static int ucc_uart_remove(struct platform_device *ofdev)
uart_remove_one_port(&ucc_uart_driver, &qe_port->port);
+ of_node_put(qe_port->np);
+
kfree(qe_port);
return 0;
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 72b76cdde534..33f258d6fef9 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -694,7 +694,7 @@ static void hangup(struct tty_struct *tty)
info->port.count = 0;
info->port.tty = NULL;
spin_unlock_irqrestore(&info->port.lock, flags);
- tty_port_set_active(&info->port, 0);
+ tty_port_set_active(&info->port, false);
mutex_unlock(&info->port.mutex);
wake_up_interruptible(&info->port.open_wait);
@@ -2354,7 +2354,7 @@ static int startup(struct slgt_info *info)
if (info->port.tty)
clear_bit(TTY_IO_ERROR, &info->port.tty->flags);
- tty_port_set_initialized(&info->port, 1);
+ tty_port_set_initialized(&info->port, true);
return 0;
}
@@ -2401,7 +2401,7 @@ static void shutdown(struct slgt_info *info)
if (info->port.tty)
set_bit(TTY_IO_ERROR, &info->port.tty->flags);
- tty_port_set_initialized(&info->port, 0);
+ tty_port_set_initialized(&info->port, false);
}
static void program_hw(struct slgt_info *info)
@@ -3126,7 +3126,7 @@ static int tiocmset(struct tty_struct *tty,
return 0;
}
-static int carrier_raised(struct tty_port *port)
+static bool carrier_raised(struct tty_port *port)
{
unsigned long flags;
struct slgt_info *info = container_of(port, struct slgt_info, port);
@@ -3134,16 +3134,17 @@ static int carrier_raised(struct tty_port *port)
spin_lock_irqsave(&info->lock,flags);
get_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
- return (info->signals & SerialSignal_DCD) ? 1 : 0;
+
+ return info->signals & SerialSignal_DCD;
}
-static void dtr_rts(struct tty_port *port, int on)
+static void dtr_rts(struct tty_port *port, bool active)
{
unsigned long flags;
struct slgt_info *info = container_of(port, struct slgt_info, port);
spin_lock_irqsave(&info->lock,flags);
- if (on)
+ if (active)
info->signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
@@ -3162,14 +3163,14 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
int retval;
bool do_clocal = false;
unsigned long flags;
- int cd;
+ bool cd;
struct tty_port *port = &info->port;
DBGINFO(("%s block_til_ready\n", tty->driver->name));
if (filp->f_flags & O_NONBLOCK || tty_io_error(tty)) {
/* nonblock mode is set or port is not enabled */
- tty_port_set_active(port, 1);
+ tty_port_set_active(port, true);
return 0;
}
@@ -3226,7 +3227,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
port->blocked_open--;
if (!retval)
- tty_port_set_active(port, 1);
+ tty_port_set_active(port, true);
DBGINFO(("%s block_til_ready ready, rc=%d\n", tty->driver->name, retval));
return retval;
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 3149114bf130..36fb945fdad4 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1224,14 +1224,16 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
{
struct tty_struct *tty;
- if (driver->ops->lookup)
+ if (driver->ops->lookup) {
if (!file)
tty = ERR_PTR(-EIO);
else
tty = driver->ops->lookup(driver, file, idx);
- else
+ } else {
+ if (idx >= driver->num)
+ return ERR_PTR(-EINVAL);
tty = driver->ttys[idx];
-
+ }
if (!IS_ERR(tty))
tty_kref_get(tty);
return tty;
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index ce511557b98b..12983ce4e43e 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -270,13 +270,13 @@ EXPORT_SYMBOL(tty_termios_copy_hw);
* between the two termios structures, or a speed change is needed.
*/
-int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b)
+bool tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b)
{
if (a->c_ispeed != b->c_ispeed || a->c_ospeed != b->c_ospeed)
- return 1;
+ return true;
if ((a->c_cflag ^ b->c_cflag) & ~(HUPCL | CREAD | CLOCAL))
- return 1;
- return 0;
+ return true;
+ return false;
}
EXPORT_SYMBOL(tty_termios_hw_change);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index dce08a6d7b5e..a788a6bf487d 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -367,7 +367,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
goto out;
if (tty_port_initialized(port)) {
- tty_port_set_initialized(port, 0);
+ tty_port_set_initialized(port, false);
/*
* Drop DTR/RTS if HUPCL is set. This causes any attached
* modem to hang up the line.
@@ -403,7 +403,7 @@ void tty_port_hangup(struct tty_port *port)
set_bit(TTY_IO_ERROR, &tty->flags);
port->tty = NULL;
spin_unlock_irqrestore(&port->lock, flags);
- tty_port_set_active(port, 0);
+ tty_port_set_active(port, false);
tty_port_shutdown(port, tty);
tty_kref_put(tty);
wake_up_interruptible(&port->open_wait);
@@ -444,10 +444,10 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
* to hide some internal details. This will eventually become entirely
* internal to the tty port.
*/
-int tty_port_carrier_raised(struct tty_port *port)
+bool tty_port_carrier_raised(struct tty_port *port)
{
if (port->ops->carrier_raised == NULL)
- return 1;
+ return true;
return port->ops->carrier_raised(port);
}
EXPORT_SYMBOL(tty_port_carrier_raised);
@@ -463,7 +463,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
void tty_port_raise_dtr_rts(struct tty_port *port)
{
if (port->ops->dtr_rts)
- port->ops->dtr_rts(port, 1);
+ port->ops->dtr_rts(port, true);
}
EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -478,7 +478,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
void tty_port_lower_dtr_rts(struct tty_port *port)
{
if (port->ops->dtr_rts)
- port->ops->dtr_rts(port, 0);
+ port->ops->dtr_rts(port, false);
}
EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -518,14 +518,14 @@ int tty_port_block_til_ready(struct tty_port *port,
* the port has just hung up or is in another error state.
*/
if (tty_io_error(tty)) {
- tty_port_set_active(port, 1);
+ tty_port_set_active(port, true);
return 0;
}
if (filp == NULL || (filp->f_flags & O_NONBLOCK)) {
/* Indicate we are open */
if (C_BAUD(tty))
tty_port_raise_dtr_rts(port);
- tty_port_set_active(port, 1);
+ tty_port_set_active(port, true);
return 0;
}
@@ -588,7 +588,7 @@ int tty_port_block_til_ready(struct tty_port *port,
port->blocked_open--;
spin_unlock_irqrestore(&port->lock, flags);
if (retval == 0)
- tty_port_set_active(port, 1);
+ tty_port_set_active(port, true);
return retval;
}
EXPORT_SYMBOL(tty_port_block_til_ready);
@@ -695,7 +695,7 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
wake_up_interruptible(&port->open_wait);
}
spin_unlock_irqrestore(&port->lock, flags);
- tty_port_set_active(port, 0);
+ tty_port_set_active(port, false);
}
EXPORT_SYMBOL(tty_port_close_end);
@@ -788,7 +788,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
return retval;
}
}
- tty_port_set_initialized(port, 1);
+ tty_port_set_initialized(port, true);
}
mutex_unlock(&port->mutex);
return tty_port_block_til_ready(port, tty, filp);
diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
index 1850bacdb5b0..f566eb1839dc 100644
--- a/drivers/tty/vt/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -386,10 +386,6 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
uni_mode = use_unicode(inode);
attr = use_attributes(inode);
- ret = -ENXIO;
- vc = vcs_vc(inode, &viewed);
- if (!vc)
- goto unlock_out;
ret = -EINVAL;
if (pos < 0)
@@ -407,6 +403,11 @@ vcs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
unsigned int this_round, skip = 0;
int size;
+ ret = -ENXIO;
+ vc = vcs_vc(inode, &viewed);
+ if (!vc)
+ goto unlock_out;
+
/* Check whether we are above size each round,
* as copy_to_user at the end of this loop
* could sleep.
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 981d2bfcf9a5..57a5c23b51d4 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -316,73 +316,55 @@ void schedule_console_callback(void)
* Code to manage unicode-based screen buffers
*/
-#ifdef NO_VC_UNI_SCREEN
-/* this disables and optimizes related code away at compile time */
-#define get_vc_uniscr(vc) NULL
-#else
-#define get_vc_uniscr(vc) vc->vc_uni_screen
-#endif
-
-#define VC_UNI_SCREEN_DEBUG 0
-
-typedef uint32_t char32_t;
-
/*
* Our screen buffer is preceded by an array of line pointers so that
* scrolling only implies some pointer shuffling.
*/
-struct uni_screen {
- char32_t *lines[0];
-};
-static struct uni_screen *vc_uniscr_alloc(unsigned int cols, unsigned int rows)
+static u32 **vc_uniscr_alloc(unsigned int cols, unsigned int rows)
{
- struct uni_screen *uniscr;
+ u32 **uni_lines;
void *p;
- unsigned int memsize, i;
+ unsigned int memsize, i, col_size = cols * sizeof(**uni_lines);
/* allocate everything in one go */
- memsize = cols * rows * sizeof(char32_t);
- memsize += rows * sizeof(char32_t *);
- p = vzalloc(memsize);
- if (!p)
+ memsize = col_size * rows;
+ memsize += rows * sizeof(*uni_lines);
+ uni_lines = vzalloc(memsize);
+ if (!uni_lines)
return NULL;
/* initial line pointers */
- uniscr = p;
- p = uniscr->lines + rows;
+ p = uni_lines + rows;
for (i = 0; i < rows; i++) {
- uniscr->lines[i] = p;
- p += cols * sizeof(char32_t);
+ uni_lines[i] = p;
+ p += col_size;
}
- return uniscr;
+
+ return uni_lines;
}
-static void vc_uniscr_free(struct uni_screen *uniscr)
+static void vc_uniscr_free(u32 **uni_lines)
{
- vfree(uniscr);
+ vfree(uni_lines);
}
-static void vc_uniscr_set(struct vc_data *vc, struct uni_screen *new_uniscr)
+static void vc_uniscr_set(struct vc_data *vc, u32 **new_uni_lines)
{
- vc_uniscr_free(vc->vc_uni_screen);
- vc->vc_uni_screen = new_uniscr;
+ vc_uniscr_free(vc->vc_uni_lines);
+ vc->vc_uni_lines = new_uni_lines;
}
-static void vc_uniscr_putc(struct vc_data *vc, char32_t uc)
+static void vc_uniscr_putc(struct vc_data *vc, u32 uc)
{
- struct uni_screen *uniscr = get_vc_uniscr(vc);
-
- if (uniscr)
- uniscr->lines[vc->state.y][vc->state.x] = uc;
+ if (vc->vc_uni_lines)
+ vc->vc_uni_lines[vc->state.y][vc->state.x] = uc;
}
static void vc_uniscr_insert(struct vc_data *vc, unsigned int nr)
{
- struct uni_screen *uniscr = get_vc_uniscr(vc);
-
- if (uniscr) {
- char32_t *ln = uniscr->lines[vc->state.y];
+ if (vc->vc_uni_lines) {
+ u32 *ln = vc->vc_uni_lines[vc->state.y];
unsigned int x = vc->state.x, cols = vc->vc_cols;
memmove(&ln[x + nr], &ln[x], (cols - x - nr) * sizeof(*ln));
@@ -392,10 +374,8 @@ static void vc_uniscr_insert(struct vc_data *vc, unsigned int nr)
static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr)
{
- struct uni_screen *uniscr = get_vc_uniscr(vc);
-
- if (uniscr) {
- char32_t *ln = uniscr->lines[vc->state.y];
+ if (vc->vc_uni_lines) {
+ u32 *ln = vc->vc_uni_lines[vc->state.y];
unsigned int x = vc->state.x, cols = vc->vc_cols;
memcpy(&ln[x], &ln[x + nr], (cols - x - nr) * sizeof(*ln));
@@ -406,86 +386,84 @@ static void vc_uniscr_delete(struct vc_data *vc, unsigned int nr)
static void vc_uniscr_clear_line(struct vc_data *vc, unsigned int x,
unsigned int nr)
{
- struct uni_screen *uniscr = get_vc_uniscr(vc);
-
- if (uniscr) {
- char32_t *ln = uniscr->lines[vc->state.y];
-
- memset32(&ln[x], ' ', nr);
- }
+ if (vc->vc_uni_lines)
+ memset32(&vc->vc_uni_lines[vc->state.y][x], ' ', nr);
}
static void vc_uniscr_clear_lines(struct vc_data *vc, unsigned int y,
unsigned int nr)
{
- struct uni_screen *uniscr = get_vc_uniscr(vc);
+ if (vc->vc_uni_lines)
+ while (nr--)
+ memset32(vc->vc_uni_lines[y++], ' ', vc->vc_cols);
+}
+
+/* juggling array rotation algorithm (complexity O(N), size complexity O(1)) */
+static void juggle_array(u32 **array, unsigned int size, unsigned int nr)
+{
+ unsigned int gcd_idx;
- if (uniscr) {
- unsigned int cols = vc->vc_cols;
+ for (gcd_idx = 0; gcd_idx < gcd(nr, size); gcd_idx++) {
+ u32 *gcd_idx_val = array[gcd_idx];
+ unsigned int dst_idx = gcd_idx;
- while (nr--)
- memset32(uniscr->lines[y++], ' ', cols);
+ while (1) {
+ unsigned int src_idx = (dst_idx + nr) % size;
+ if (src_idx == gcd_idx)
+ break;
+
+ array[dst_idx] = array[src_idx];
+ dst_idx = src_idx;
+ }
+
+ array[dst_idx] = gcd_idx_val;
}
}
-static void vc_uniscr_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
- enum con_scroll dir, unsigned int nr)
+static void vc_uniscr_scroll(struct vc_data *vc, unsigned int top,
+ unsigned int bottom, enum con_scroll dir,
+ unsigned int nr)
{
- struct uni_screen *uniscr = get_vc_uniscr(vc);
+ u32 **uni_lines = vc->vc_uni_lines;
+ unsigned int size = bottom - top;
- if (uniscr) {
- unsigned int i, j, k, sz, d, clear;
+ if (!uni_lines)
+ return;
- sz = b - t;
- clear = b - nr;
- d = nr;
- if (dir == SM_DOWN) {
- clear = t;
- d = sz - nr;
- }
- for (i = 0; i < gcd(d, sz); i++) {
- char32_t *tmp = uniscr->lines[t + i];
- j = i;
- while (1) {
- k = j + d;
- if (k >= sz)
- k -= sz;
- if (k == i)
- break;
- uniscr->lines[t + j] = uniscr->lines[t + k];
- j = k;
- }
- uniscr->lines[t + j] = tmp;
- }
- vc_uniscr_clear_lines(vc, clear, nr);
+ if (dir == SM_DOWN) {
+ juggle_array(&uni_lines[top], size, size - nr);
+ vc_uniscr_clear_lines(vc, top, nr);
+ } else {
+ juggle_array(&uni_lines[top], size, nr);
+ vc_uniscr_clear_lines(vc, bottom - nr, nr);
}
}
-static void vc_uniscr_copy_area(struct uni_screen *dst,
+static void vc_uniscr_copy_area(u32 **dst_lines,
unsigned int dst_cols,
unsigned int dst_rows,
- struct uni_screen *src,
+ u32 **src_lines,
unsigned int src_cols,
unsigned int src_top_row,
unsigned int src_bot_row)
{
unsigned int dst_row = 0;
- if (!dst)
+ if (!dst_lines)
return;
while (src_top_row < src_bot_row) {
- char32_t *src_line = src->lines[src_top_row];
- char32_t *dst_line = dst->lines[dst_row];
+ u32 *src_line = src_lines[src_top_row];
+ u32 *dst_line = dst_lines[dst_row];
- memcpy(dst_line, src_line, src_cols * sizeof(char32_t));
+ memcpy(dst_line, src_line, src_cols * sizeof(*src_line));
if (dst_cols - src_cols)
memset32(dst_line + src_cols, ' ', dst_cols - src_cols);
src_top_row++;
dst_row++;
}
while (dst_row < dst_rows) {
- char32_t *dst_line = dst->lines[dst_row];
+ u32 *dst_line = dst_lines[dst_row];
memset32(dst_line, ' ', dst_cols);
dst_row++;
@@ -500,23 +478,20 @@ static void vc_uniscr_copy_area(struct uni_screen *dst,
*/
int vc_uniscr_check(struct vc_data *vc)
{
- struct uni_screen *uniscr;
+ u32 **uni_lines;
unsigned short *p;
int x, y, mask;
- if (__is_defined(NO_VC_UNI_SCREEN))
- return -EOPNOTSUPP;
-
WARN_CONSOLE_UNLOCKED();
if (!vc->vc_utf)
return -ENODATA;
- if (vc->vc_uni_screen)
+ if (vc->vc_uni_lines)
return 0;
- uniscr = vc_uniscr_alloc(vc->vc_cols, vc->vc_rows);
- if (!uniscr)
+ uni_lines = vc_uniscr_alloc(vc->vc_cols, vc->vc_rows);
+ if (!uni_lines)
return -ENOMEM;
/*
@@ -528,14 +503,15 @@ int vc_uniscr_check(struct vc_data *vc)
p = (unsigned short *)vc->vc_origin;
mask = vc->vc_hi_font_mask | 0xff;
for (y = 0; y < vc->vc_rows; y++) {
- char32_t *line = uniscr->lines[y];
+ u32 *line = uni_lines[y];
for (x = 0; x < vc->vc_cols; x++) {
u16 glyph = scr_readw(p++) & mask;
line[x] = inverse_translate(vc, glyph, true);
}
}
- vc->vc_uni_screen = uniscr;
+ vc->vc_uni_lines = uni_lines;
+
return 0;
}
@@ -547,11 +523,12 @@ int vc_uniscr_check(struct vc_data *vc)
void vc_uniscr_copy_line(const struct vc_data *vc, void *dest, bool viewed,
unsigned int row, unsigned int col, unsigned int nr)
{
- struct uni_screen *uniscr = get_vc_uniscr(vc);
+ u32 **uni_lines = vc->vc_uni_lines;
int offset = row * vc->vc_size_row + col * 2;
unsigned long pos;
- BUG_ON(!uniscr);
+ if (WARN_ON_ONCE(!uni_lines))
+ return;
pos = (unsigned long)screenpos(vc, offset, viewed);
if (pos >= vc->vc_origin && pos < vc->vc_scr_end) {
@@ -562,7 +539,7 @@ void vc_uniscr_copy_line(const struct vc_data *vc, void *dest, bool viewed,
*/
row = (pos - vc->vc_origin) / vc->vc_size_row;
col = ((pos - vc->vc_origin) % vc->vc_size_row) / 2;
- memcpy(dest, &uniscr->lines[row][col], nr * sizeof(char32_t));
+ memcpy(dest, &uni_lines[row][col], nr * sizeof(u32));
} else {
/*
* Scrollback is active. For now let's simply backtranslate
@@ -572,7 +549,7 @@ void vc_uniscr_copy_line(const struct vc_data *vc, void *dest, bool viewed,
*/
u16 *p = (u16 *)pos;
int mask = vc->vc_hi_font_mask | 0xff;
- char32_t *uni_buf = dest;
+ u32 *uni_buf = dest;
while (nr--) {
u16 glyph = scr_readw(p++) & mask;
*uni_buf++ = inverse_translate(vc, glyph, true);
@@ -580,64 +557,31 @@ void vc_uniscr_copy_line(const struct vc_data *vc, void *dest, bool viewed,
}
}
-/* this is for validation and debugging only */
-static void vc_uniscr_debug_check(struct vc_data *vc)
+static void con_scroll(struct vc_data *vc, unsigned int top,
+ unsigned int bottom, enum con_scroll dir,
+ unsigned int nr)
{
- struct uni_screen *uniscr = get_vc_uniscr(vc);
- unsigned short *p;
- int x, y, mask;
+ unsigned int rows = bottom - top;
+ u16 *clear, *dst, *src;
- if (!VC_UNI_SCREEN_DEBUG || !uniscr)
+ if (top + nr >= bottom)
+ nr = rows - 1;
+ if (bottom > vc->vc_rows || top >= bottom || nr < 1)
return;
- WARN_CONSOLE_UNLOCKED();
-
- /*
- * Make sure our unicode screen translates into the same glyphs
- * as the actual screen. This is brutal indeed.
- */
- p = (unsigned short *)vc->vc_origin;
- mask = vc->vc_hi_font_mask | 0xff;
- for (y = 0; y < vc->vc_rows; y++) {
- char32_t *line = uniscr->lines[y];
- for (x = 0; x < vc->vc_cols; x++) {
- u16 glyph = scr_readw(p++) & mask;
- char32_t uc = line[x];
- int tc = conv_uni_to_pc(vc, uc);
- if (tc == -4)
- tc = conv_uni_to_pc(vc, 0xfffd);
- if (tc == -4)
- tc = conv_uni_to_pc(vc, '?');
- if (tc != glyph)
- pr_err_ratelimited(
- "%s: mismatch at %d,%d: glyph=%#x tc=%#x\n",
- __func__, x, y, glyph, tc);
- }
- }
-}
-
-
-static void con_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
- enum con_scroll dir, unsigned int nr)
-{
- u16 *clear, *d, *s;
-
- if (t + nr >= b)
- nr = b - t - 1;
- if (b > vc->vc_rows || t >= b || nr < 1)
- return;
- vc_uniscr_scroll(vc, t, b, dir, nr);
- if (con_is_visible(vc) && vc->vc_sw->con_scroll(vc, t, b, dir, nr))
+ vc_uniscr_scroll(vc, top, bottom, dir, nr);
+ if (con_is_visible(vc) &&
+ vc->vc_sw->con_scroll(vc, top, bottom, dir, nr))
return;
- s = clear = (u16 *)(vc->vc_origin + vc->vc_size_row * t);
- d = (u16 *)(vc->vc_origin + vc->vc_size_row * (t + nr));
+ src = clear = (u16 *)(vc->vc_origin + vc->vc_size_row * top);
+ dst = (u16 *)(vc->vc_origin + vc->vc_size_row * (top + nr));
if (dir == SM_UP) {
- clear = s + (b - t - nr) * vc->vc_cols;
- swap(s, d);
+ clear = src + (rows - nr) * vc->vc_cols;
+ swap(src, dst);
}
- scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
+ scr_memmovew(dst, src, (rows - nr) * vc->vc_size_row);
scr_memsetw(clear, vc->vc_video_erase_char, vc->vc_size_row * nr);
}
@@ -1201,7 +1145,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
unsigned int new_cols, new_rows, new_row_size, new_screen_size;
unsigned int user;
unsigned short *oldscreen, *newscreen;
- struct uni_screen *new_uniscr = NULL;
+ u32 **new_uniscr = NULL;
WARN_CONSOLE_UNLOCKED();
@@ -1245,7 +1189,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (!newscreen)
return -ENOMEM;
- if (get_vc_uniscr(vc)) {
+ if (vc->vc_uni_lines) {
new_uniscr = vc_uniscr_alloc(new_cols, new_rows);
if (!new_uniscr) {
kfree(newscreen);
@@ -1297,7 +1241,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
end = old_origin + old_row_size * min(old_rows, new_rows);
vc_uniscr_copy_area(new_uniscr, new_cols, new_rows,
- get_vc_uniscr(vc), rlth/2, first_copied_row,
+ vc->vc_uni_lines, rlth/2, first_copied_row,
min(old_rows, new_rows));
vc_uniscr_set(vc, new_uniscr);
@@ -2959,7 +2903,6 @@ rescan_last_byte:
goto rescan_last_byte;
}
con_flush(vc, &draw);
- vc_uniscr_debug_check(vc);
console_conditional_schedule();
notify_update(vc);
console_unlock();
@@ -3156,8 +3099,14 @@ static struct tty_driver *vt_console_device(struct console *c, int *index)
return console_driver;
}
+static int vt_console_setup(struct console *co, char *options)
+{
+ return co->index >= MAX_NR_CONSOLES ? -EINVAL : 0;
+}
+
static struct console vt_console_driver = {
.name = "tty",
+ .setup = vt_console_setup,
.write = vt_console_print,
.device = vt_console_device,
.unblank = unblank_screen,
@@ -4574,26 +4523,30 @@ void reset_palette(struct vc_data *vc)
/*
* Font switching
*
- * Currently we only support fonts up to 32 pixels wide, at a maximum height
- * of 32 pixels. Userspace fontdata is stored with 32 bytes (shorts/ints,
- * depending on width) reserved for each character which is kinda wasty, but
- * this is done in order to maintain compatibility with the EGA/VGA fonts. It
- * is up to the actual low-level console-driver convert data into its favorite
- * format (maybe we should add a `fontoffset' field to the `display'
- * structure so we won't have to convert the fontdata all the time.
+ * Currently we only support fonts up to 128 pixels wide, at a maximum height
+ * of 128 pixels. Userspace fontdata may have to be stored with 32 bytes
+ * (shorts/ints, depending on width) reserved for each character which is
+ * kinda wasty, but this is done in order to maintain compatibility with the
+ * EGA/VGA fonts. It is up to the actual low-level console-driver convert data
+ * into its favorite format (maybe we should add a `fontoffset' field to the
+ * `display' structure so we won't have to convert the fontdata all the time.
* /Jes
*/
-#define max_font_size 65536
+#define max_font_width 64
+#define max_font_height 128
+#define max_font_glyphs 512
+#define max_font_size (max_font_glyphs*max_font_width*max_font_height)
static int con_font_get(struct vc_data *vc, struct console_font_op *op)
{
struct console_font font;
int rc = -EINVAL;
int c;
+ unsigned int vpitch = op->op == KD_FONT_OP_GET_TALL ? op->height : 32;
if (op->data) {
- font.data = kmalloc(max_font_size, GFP_KERNEL);
+ font.data = kvmalloc(max_font_size, GFP_KERNEL);
if (!font.data)
return -ENOMEM;
} else
@@ -4603,7 +4556,7 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
else if (vc->vc_sw->con_font_get)
- rc = vc->vc_sw->con_font_get(vc, &font);
+ rc = vc->vc_sw->con_font_get(vc, &font, vpitch);
else
rc = -ENOSYS;
console_unlock();
@@ -4611,7 +4564,7 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
if (rc)
goto out;
- c = (font.width+7)/8 * 32 * font.charcount;
+ c = (font.width+7)/8 * vpitch * font.charcount;
if (op->data && font.charcount > op->charcount)
rc = -ENOSPC;
@@ -4628,7 +4581,7 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
rc = -EFAULT;
out:
- kfree(font.data);
+ kvfree(font.data);
return rc;
}
@@ -4637,16 +4590,20 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
struct console_font font;
int rc = -EINVAL;
int size;
+ unsigned int vpitch = op->op == KD_FONT_OP_SET_TALL ? op->height : 32;
if (vc->vc_mode != KD_TEXT)
return -EINVAL;
if (!op->data)
return -EINVAL;
- if (op->charcount > 512)
+ if (op->charcount > max_font_glyphs)
+ return -EINVAL;
+ if (op->width <= 0 || op->width > max_font_width || !op->height ||
+ op->height > max_font_height)
return -EINVAL;
- if (op->width <= 0 || op->width > 32 || !op->height || op->height > 32)
+ if (vpitch < op->height)
return -EINVAL;
- size = (op->width+7)/8 * 32 * op->charcount;
+ size = (op->width+7)/8 * vpitch * op->charcount;
if (size > max_font_size)
return -ENOSPC;
@@ -4664,7 +4621,7 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
else if (vc->vc_sw->con_font_set) {
if (vc_is_sel(vc))
clear_selection();
- rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
+ rc = vc->vc_sw->con_font_set(vc, &font, vpitch, op->flags);
} else
rc = -ENOSYS;
console_unlock();
@@ -4710,8 +4667,10 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op)
{
switch (op->op) {
case KD_FONT_OP_SET:
+ case KD_FONT_OP_SET_TALL:
return con_font_set(vc, op);
case KD_FONT_OP_GET:
+ case KD_FONT_OP_GET_TALL:
return con_font_get(vc, op);
case KD_FONT_OP_SET_DEFAULT:
return con_font_default(vc, op);
@@ -4740,10 +4699,11 @@ EXPORT_SYMBOL_GPL(screen_glyph);
u32 screen_glyph_unicode(const struct vc_data *vc, int n)
{
- struct uni_screen *uniscr = get_vc_uniscr(vc);
+ u32 **uni_lines = vc->vc_uni_lines;
+
+ if (uni_lines)
+ return uni_lines[n / vc->vc_cols][n % vc->vc_cols];
- if (uniscr)
- return uniscr->lines[n / vc->vc_cols][n % vc->vc_cols];
return inverse_translate(vc, screen_glyph(vc, n * 2), true);
}
EXPORT_SYMBOL_GPL(screen_glyph_unicode);
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
index 62f38c5bf857..4d02e0f2de10 100644
--- a/drivers/ufs/core/Makefile
+++ b/drivers/ufs/core/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c
new file mode 100644
index 000000000000..31df052fbc41
--- /dev/null
+++ b/drivers/ufs/core/ufs-mcq.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
+ *
+ * Authors:
+ * Asutosh Das <quic_asutoshd@quicinc.com>
+ * Can Guo <quic_cang@quicinc.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "ufshcd-priv.h"
+
+#define MAX_QUEUE_SUP GENMASK(7, 0)
+#define UFS_MCQ_MIN_RW_QUEUES 2
+#define UFS_MCQ_MIN_READ_QUEUES 0
+#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1
+#define UFS_MCQ_MIN_POLL_QUEUES 0
+#define QUEUE_EN_OFFSET 31
+#define QUEUE_ID_OFFSET 16
+
+#define MAX_DEV_CMD_ENTRIES 2
+#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
+#define MCQ_QCFG_SIZE 0x40
+#define MCQ_ENTRY_SIZE_IN_DWORD 8
+#define CQE_UCD_BA GENMASK_ULL(63, 7)
+
+static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops rw_queue_count_ops = {
+ .set = rw_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int rw_queues;
+module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
+MODULE_PARM_DESC(rw_queues,
+ "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
+
+static int read_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops read_queue_count_ops = {
+ .set = read_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int read_queues;
+module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
+MODULE_PARM_DESC(read_queues,
+ "Number of interrupt driven read queues used for read. Default value is 0");
+
+static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
+ num_possible_cpus());
+}
+
+static const struct kernel_param_ops poll_queue_count_ops = {
+ .set = poll_queue_count_set,
+ .get = param_get_uint,
+};
+
+static unsigned int poll_queues = 1;
+module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
+MODULE_PARM_DESC(poll_queues,
+ "Number of poll queues used for r/w. Default value is 1");
+
+/**
+ * ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
+ * @hba: per adapter instance
+ * @max_active_cmds: maximum # of active commands to the device at any time.
+ *
+ * The controller won't send more than the max_active_cmds to the device at
+ * any time.
+ */
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
+{
+ u32 val;
+
+ val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
+ val &= ~MCQ_CFG_MAC_MASK;
+ val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
+ ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
+}
+
+/**
+ * ufshcd_mcq_req_to_hwq - find the hardware queue on which the
+ * request would be issued.
+ * @hba: per adapter instance
+ * @req: pointer to the request to be issued
+ *
+ * Returns the hardware queue instance on which the request would
+ * be queued.
+ */
+struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
+ struct request *req)
+{
+ u32 utag = blk_mq_unique_tag(req);
+ u32 hwq = blk_mq_unique_tag_to_hwq(utag);
+
+ /* uhq[0] is used to serve device commands */
+ return &hba->uhq[hwq + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+}
+
+/**
+ * ufshcd_mcq_decide_queue_depth - decide the queue depth
+ * @hba: per adapter instance
+ *
+ * Returns queue-depth on success, non-zero on error
+ *
+ * MAC - Max. Active Command of the Host Controller (HC)
+ * HC wouldn't send more than this commands to the device.
+ * It is mandatory to implement get_hba_mac() to enable MCQ mode.
+ * Calculates and adjusts the queue depth based on the depth
+ * supported by the HC and ufs device.
+ */
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
+{
+ int mac;
+
+ /* Mandatory to implement get_hba_mac() */
+ mac = ufshcd_mcq_vops_get_hba_mac(hba);
+ if (mac < 0) {
+ dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
+ return mac;
+ }
+
+ WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
+ /*
+ * max. value of bqueuedepth = 256, mac is host dependent.
+ * It is mandatory for UFS device to define bQueueDepth if
+ * shared queuing architecture is enabled.
+ */
+ return min_t(int, mac, hba->dev_info.bqueuedepth);
+}
+
+static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
+{
+ int i;
+ u32 hba_maxq, rem, tot_queues;
+ struct Scsi_Host *host = hba->host;
+
+ hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities);
+
+ tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
+ rw_queues;
+
+ if (hba_maxq < tot_queues) {
+ dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
+ tot_queues, hba_maxq);
+ return -EOPNOTSUPP;
+ }
+
+ rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES;
+
+ if (rw_queues) {
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ rw_queues = num_possible_cpus();
+ }
+
+ if (poll_queues) {
+ hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_POLL];
+ }
+
+ if (read_queues) {
+ hba->nr_queues[HCTX_TYPE_READ] = read_queues;
+ rem -= hba->nr_queues[HCTX_TYPE_READ];
+ }
+
+ if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
+ num_possible_cpus());
+
+ for (i = 0; i < HCTX_MAX_TYPES; i++)
+ host->nr_hw_queues += hba->nr_queues[i];
+
+ hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES;
+ return 0;
+}
+
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ size_t utrdl_size, cqe_size;
+ int i;
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+
+ utrdl_size = sizeof(struct utp_transfer_req_desc) *
+ hwq->max_entries;
+ hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
+ &hwq->sqe_dma_addr,
+ GFP_KERNEL);
+ if (!hwq->sqe_dma_addr) {
+ dev_err(hba->dev, "SQE allocation failed\n");
+ return -ENOMEM;
+ }
+
+ cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
+ hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
+ &hwq->cqe_dma_addr,
+ GFP_KERNEL);
+ if (!hwq->cqe_dma_addr) {
+ dev_err(hba->dev, "CQE allocation failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+
+/* Operation and runtime registers configuration */
+#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
+#define MCQ_OPR_OFFSET_n(p, i) \
+ (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
+
+static void __iomem *mcq_opr_base(struct ufs_hba *hba,
+ enum ufshcd_mcq_opr n, int i)
+{
+ struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n];
+
+ return opr->base + opr->stride * i;
+}
+
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
+{
+ return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
+{
+ writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
+
+/*
+ * Current MCQ specification doesn't provide a Task Tag or its equivalent in
+ * the Completion Queue Entry. Find the Task Tag using an indirect method.
+ */
+static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq,
+ struct cq_entry *cqe)
+{
+ u64 addr;
+
+ /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
+ BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
+
+ /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
+ addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
+ hba->ucdl_dma_addr;
+
+ return div_u64(addr, sizeof(struct utp_transfer_cmd_desc));
+}
+
+static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
+ int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
+
+ ufshcd_compl_one_cqe(hba, tag, cqe);
+}
+
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs = 0;
+
+ ufshcd_mcq_update_cq_tail_slot(hwq);
+ while (!ufshcd_mcq_is_cq_empty(hwq)) {
+ ufshcd_mcq_process_cqe(hba, hwq);
+ ufshcd_mcq_inc_cq_head_slot(hwq);
+ completed_reqs++;
+ }
+
+ if (completed_reqs)
+ ufshcd_mcq_update_cq_head(hwq);
+
+ return completed_reqs;
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock);
+
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq)
+{
+ unsigned long completed_reqs;
+
+ spin_lock(&hwq->cq_lock);
+ completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ spin_unlock(&hwq->cq_lock);
+
+ return completed_reqs;
+}
+
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ u16 qsize;
+ int i;
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+ hwq->id = i;
+ qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1;
+
+ /* Submission Queue Lower Base Address */
+ ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
+ MCQ_CFG_n(REG_SQLBA, i));
+ /* Submission Queue Upper Base Address */
+ ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
+ MCQ_CFG_n(REG_SQUBA, i));
+ /* Submission Queue Doorbell Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
+ MCQ_CFG_n(REG_SQDAO, i));
+ /* Submission Queue Interrupt Status Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
+ MCQ_CFG_n(REG_SQISAO, i));
+
+ /* Completion Queue Lower Base Address */
+ ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
+ MCQ_CFG_n(REG_CQLBA, i));
+ /* Completion Queue Upper Base Address */
+ ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
+ MCQ_CFG_n(REG_CQUBA, i));
+ /* Completion Queue Doorbell Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
+ MCQ_CFG_n(REG_CQDAO, i));
+ /* Completion Queue Interrupt Status Address Offset */
+ ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
+ MCQ_CFG_n(REG_CQISAO, i));
+
+ /* Save the base addresses for quicker access */
+ hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
+ hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP;
+ hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP;
+ hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP;
+
+ /* Reinitializing is needed upon HC reset */
+ hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0;
+
+ /* Enable Tail Entry Push Status interrupt only for non-poll queues */
+ if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL])
+ writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE);
+
+ /* Completion Queue Enable|Size to Completion Queue Attribute */
+ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
+ MCQ_CFG_n(REG_CQATTR, i));
+
+ /*
+ * Submission Qeueue Enable|Size|Completion Queue ID to
+ * Submission Queue Attribute
+ */
+ ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
+ (i << QUEUE_ID_OFFSET),
+ MCQ_CFG_n(REG_SQATTR, i));
+ }
+}
+
+void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
+{
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
+ REG_UFS_MEM_CFG);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
+
+void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
+{
+ ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
+ ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA);
+}
+EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi);
+
+int ufshcd_mcq_init(struct ufs_hba *hba)
+{
+ struct Scsi_Host *host = hba->host;
+ struct ufs_hw_queue *hwq;
+ int ret, i;
+
+ ret = ufshcd_mcq_config_nr_queues(hba);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_vops_mcq_config_resource(hba);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_mcq_vops_op_runtime_config(hba);
+ if (ret) {
+ dev_err(hba->dev, "Operation runtime config failed, ret=%d\n",
+ ret);
+ return ret;
+ }
+ hba->uhq = devm_kzalloc(hba->dev,
+ hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
+ GFP_KERNEL);
+ if (!hba->uhq) {
+ dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hba->nr_hw_queues; i++) {
+ hwq = &hba->uhq[i];
+ hwq->max_entries = hba->nutrs;
+ spin_lock_init(&hwq->sq_lock);
+ spin_lock_init(&hwq->cq_lock);
+ }
+
+ /* The very first HW queue serves device commands */
+ hba->dev_cmd_queue = &hba->uhq[0];
+ /* Give dev_cmd_queue the minimal number of entries */
+ hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES;
+
+ host->host_tagset = 1;
+ return 0;
+}
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index b99e3f3dc4ef..0d38e7fa34cc 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -6,6 +6,7 @@
*/
#include <linux/bsg-lib.h>
+#include <linux/dma-mapping.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include "ufs_bsg.h"
@@ -16,31 +17,11 @@ static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
struct utp_upiu_query *qr)
{
int desc_size = be16_to_cpu(qr->length);
- int desc_id = qr->idn;
if (desc_size <= 0)
return -EINVAL;
- ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
- if (!*desc_len)
- return -EINVAL;
-
- *desc_len = min_t(int, *desc_len, desc_size);
-
- return 0;
-}
-
-static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
- unsigned int request_len,
- unsigned int reply_len)
-{
- int min_req_len = sizeof(struct ufs_bsg_request);
- int min_rsp_len = sizeof(struct ufs_bsg_reply);
-
- if (min_req_len > request_len || min_rsp_len > reply_len) {
- dev_err(hba->dev, "not enough space assigned\n");
- return -EINVAL;
- }
+ *desc_len = min_t(int, QUERY_DESC_MAX_SIZE, desc_size);
return 0;
}
@@ -83,23 +64,84 @@ out:
return 0;
}
+static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job)
+{
+ struct ufs_rpmb_request *rpmb_request = job->request;
+ struct ufs_rpmb_reply *rpmb_reply = job->reply;
+ struct bsg_buffer *payload = NULL;
+ enum dma_data_direction dir;
+ struct scatterlist *sg_list = NULL;
+ int rpmb_req_type;
+ int sg_cnt = 0;
+ int ret;
+ int data_len;
+
+ if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en ||
+ !(hba->capabilities & MASK_EHSLUTRD_SUPPORTED))
+ return -EINVAL;
+
+ if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
+ return -EINVAL;
+
+ rpmb_req_type = be16_to_cpu(rpmb_request->ehs_req.meta.req_resp_type);
+
+ switch (rpmb_req_type) {
+ case UFS_RPMB_WRITE_KEY:
+ case UFS_RPMB_READ_CNT:
+ case UFS_RPMB_PURGE_ENABLE:
+ dir = DMA_NONE;
+ break;
+ case UFS_RPMB_WRITE:
+ case UFS_RPMB_SEC_CONF_WRITE:
+ dir = DMA_TO_DEVICE;
+ break;
+ case UFS_RPMB_READ:
+ case UFS_RPMB_SEC_CONF_READ:
+ case UFS_RPMB_PURGE_STATUS_READ:
+ dir = DMA_FROM_DEVICE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dir != DMA_NONE) {
+ payload = &job->request_payload;
+ if (!payload || !payload->payload_len || !payload->sg_cnt)
+ return -EINVAL;
+
+ sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
+ if (unlikely(!sg_cnt))
+ return -ENOMEM;
+ sg_list = payload->sg_list;
+ data_len = payload->payload_len;
+ }
+
+ ret = ufshcd_advanced_rpmb_req_handler(hba, &rpmb_request->bsg_request.upiu_req,
+ &rpmb_reply->bsg_reply.upiu_rsp, &rpmb_request->ehs_req,
+ &rpmb_reply->ehs_rsp, sg_cnt, sg_list, dir);
+
+ if (dir != DMA_NONE) {
+ dma_unmap_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
+
+ if (!ret)
+ rpmb_reply->bsg_reply.reply_payload_rcv_len = data_len;
+ }
+
+ return ret;
+}
+
static int ufs_bsg_request(struct bsg_job *job)
{
struct ufs_bsg_request *bsg_request = job->request;
struct ufs_bsg_reply *bsg_reply = job->reply;
struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
- unsigned int req_len = job->request_len;
- unsigned int reply_len = job->reply_len;
struct uic_command uc = {};
int msgcode;
- uint8_t *desc_buff = NULL;
+ uint8_t *buff = NULL;
int desc_len = 0;
enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
int ret;
-
- ret = ufs_bsg_verify_query_size(hba, req_len, reply_len);
- if (ret)
- goto out;
+ bool rpmb = false;
bsg_reply->reply_payload_rcv_len = 0;
@@ -109,34 +151,39 @@ static int ufs_bsg_request(struct bsg_job *job)
switch (msgcode) {
case UPIU_TRANSACTION_QUERY_REQ:
desc_op = bsg_request->upiu_req.qr.opcode;
- ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
- &desc_len, desc_op);
- if (ret) {
- ufshcd_rpm_put_sync(hba);
+ ret = ufs_bsg_alloc_desc_buffer(hba, job, &buff, &desc_len, desc_op);
+ if (ret)
goto out;
- }
-
fallthrough;
case UPIU_TRANSACTION_NOP_OUT:
case UPIU_TRANSACTION_TASK_REQ:
ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
&bsg_reply->upiu_rsp, msgcode,
- desc_buff, &desc_len, desc_op);
+ buff, &desc_len, desc_op);
if (ret)
- dev_err(hba->dev,
- "exe raw upiu: error code %d\n", ret);
-
+ dev_err(hba->dev, "exe raw upiu: error code %d\n", ret);
+ else if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ buff, desc_len);
+ }
break;
case UPIU_TRANSACTION_UIC_CMD:
memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
ret = ufshcd_send_uic_cmd(hba, &uc);
if (ret)
- dev_err(hba->dev,
- "send uic cmd: error code %d\n", ret);
+ dev_err(hba->dev, "send uic cmd: error code %d\n", ret);
memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
break;
+ case UPIU_TRANSACTION_ARPMB_CMD:
+ rpmb = true;
+ ret = ufs_bsg_exec_advanced_rpmb_req(hba, job);
+ if (ret)
+ dev_err(hba->dev, "ARPMB OP failed: error code %d\n", ret);
+ break;
default:
ret = -ENOTSUPP;
dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
@@ -144,22 +191,11 @@ static int ufs_bsg_request(struct bsg_job *job)
break;
}
- ufshcd_rpm_put_sync(hba);
-
- if (!desc_buff)
- goto out;
-
- if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len)
- bsg_reply->reply_payload_rcv_len =
- sg_copy_from_buffer(job->request_payload.sg_list,
- job->request_payload.sg_cnt,
- desc_buff, desc_len);
-
- kfree(desc_buff);
-
out:
+ ufshcd_rpm_put_sync(hba);
+ kfree(buff);
bsg_reply->result = ret;
- job->reply_len = sizeof(struct ufs_bsg_reply);
+ job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
/* complete the job here only if no error */
if (ret == 0)
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index a9e8e1f5afe7..529f8507a5e4 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -61,7 +61,24 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
-
+void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ struct cq_entry *cqe);
+int ufshcd_mcq_init(struct ufs_hba *hba);
+int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba);
+int ufshcd_mcq_memory_alloc(struct ufs_hba *hba);
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
+void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba);
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
+void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
+unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
+ struct request *req);
+unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
+ struct ufs_hw_queue *hwq);
+
+#define UFSHCD_MCQ_IO_QUEUE_OFFSET 1
#define SD_ASCII_STD true
#define SD_RAW false
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
@@ -70,9 +87,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_length);
-
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
@@ -226,6 +240,53 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
hba->vops->config_scaling_param(hba, p, data);
}
+static inline void ufshcd_vops_reinit_notify(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->reinit_notify)
+ hba->vops->reinit_notify(hba);
+}
+
+static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->mcq_config_resource)
+ return hba->vops->mcq_config_resource(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->get_hba_mac)
+ return hba->vops->get_hba_mac(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->op_runtime_config)
+ return hba->vops->op_runtime_config(hba);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_vops_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ if (hba->vops && hba->vops->get_outstanding_cqs)
+ return hba->vops->get_outstanding_cqs(hba, ocqs);
+
+ return -EOPNOTSUPP;
+}
+
+static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba)
+{
+ if (hba->vops && hba->vops->config_esi)
+ return hba->vops->config_esi(hba);
+
+ return -EOPNOTSUPP;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
@@ -302,4 +363,44 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
}
+static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q)
+{
+ u32 mask = q->max_entries - 1;
+ u32 val;
+
+ q->sq_tail_slot = (q->sq_tail_slot + 1) & mask;
+ val = q->sq_tail_slot * sizeof(struct utp_transfer_req_desc);
+ writel(val, q->mcq_sq_tail);
+}
+
+static inline void ufshcd_mcq_update_cq_tail_slot(struct ufs_hw_queue *q)
+{
+ u32 val = readl(q->mcq_cq_tail);
+
+ q->cq_tail_slot = val / sizeof(struct cq_entry);
+}
+
+static inline bool ufshcd_mcq_is_cq_empty(struct ufs_hw_queue *q)
+{
+ return q->cq_head_slot == q->cq_tail_slot;
+}
+
+static inline void ufshcd_mcq_inc_cq_head_slot(struct ufs_hw_queue *q)
+{
+ q->cq_head_slot++;
+ if (q->cq_head_slot == q->max_entries)
+ q->cq_head_slot = 0;
+}
+
+static inline void ufshcd_mcq_update_cq_head(struct ufs_hw_queue *q)
+{
+ writel(q->cq_head_slot * sizeof(struct cq_entry), q->mcq_cq_head);
+}
+
+static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q)
+{
+ struct cq_entry *cqe = q->cqe_base_addr;
+
+ return cqe + q->cq_head_slot;
+}
#endif /* _UFSHCD_PRIV_H_ */
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 3a1c4d31e010..276a82b2e5ee 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -43,6 +43,12 @@
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
+
+#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
+ UFSHCD_ERROR_MASK |\
+ MCQ_CQ_EVENT_STATUS)
+
+
/* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500
@@ -56,6 +62,9 @@
/* Query request timeout */
#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
+/* Advanced RPMB request timeout */
+#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
+
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
@@ -89,6 +98,33 @@
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
+/* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */
+static bool use_mcq_mode = true;
+
+static bool is_mcq_supported(struct ufs_hba *hba)
+{
+ return hba->mcq_sup && use_mcq_mode;
+}
+
+static int param_set_mcq_mode(const char *val, const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(val, kp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct kernel_param_ops mcq_mode_ops = {
+ .set = param_set_mcq_mode,
+ .get = param_get_bool,
+};
+
+module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644);
+MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -528,7 +564,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
prdt_length = le16_to_cpu(
lrbp->utr_descriptor_ptr->prd_table_length);
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
- prdt_length /= sizeof(struct ufshcd_sg_entry);
+ prdt_length /= ufshcd_sg_entry_size(hba);
dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
@@ -537,7 +573,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
if (pr_prdt)
ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
- sizeof(struct ufshcd_sg_entry) * prdt_length);
+ ufshcd_sg_entry_size(hba) * prdt_length);
}
}
@@ -740,12 +776,17 @@ static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
/**
* ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
* @lrbp: pointer to local command reference block
+ * @cqe: pointer to the completion queue entry
*
* This function is used to get the OCS field from UTRD
* Returns the OCS field in the UTRD
*/
-static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
+static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
+ struct cq_entry *cqe)
{
+ if (cqe)
+ return le32_to_cpu(cqe->status) & MASK_OCS;
+
return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
}
@@ -1121,6 +1162,12 @@ static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
return pending;
}
+/*
+ * Wait until all pending SCSI commands and TMFs have finished or the timeout
+ * has expired.
+ *
+ * Return: 0 upon success; -EBUSY upon timeout.
+ */
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
@@ -1154,7 +1201,7 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- schedule();
+ io_schedule_timeout(msecs_to_jiffies(20));
if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) {
timeout = true;
@@ -1225,9 +1272,14 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
return ret;
}
-static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+/*
+ * Wait until all pending SCSI commands and TMFs have finished or the timeout
+ * has expired.
+ *
+ * Return: 0 upon success; -EBUSY upon timeout.
+ */
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
{
- #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
int ret = 0;
/*
* make sure that there are no outstanding requests when
@@ -1238,7 +1290,7 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
- ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
@@ -1280,7 +1332,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
- ret = ufshcd_clock_scaling_prepare(hba);
+ ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
@@ -2136,9 +2188,11 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
* @task_tag: Task tag of the command
+ * @hwq: pointer to hardware queue instance
*/
static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
+ struct ufs_hw_queue *hwq)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
unsigned long flags;
@@ -2152,12 +2206,24 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- if (hba->vops && hba->vops->setup_xfer_req)
- hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd);
- __set_bit(task_tag, &hba->outstanding_reqs);
- ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ if (is_mcq_enabled(hba)) {
+ int utrd_size = sizeof(struct utp_transfer_req_desc);
+
+ spin_lock(&hwq->sq_lock);
+ memcpy(hwq->sqe_base_addr + (hwq->sq_tail_slot * utrd_size),
+ lrbp->utr_descriptor_ptr, utrd_size);
+ ufshcd_inc_sq_tail(hwq);
+ spin_unlock(&hwq->sq_lock);
+ } else {
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ if (hba->vops && hba->vops->setup_xfer_req)
+ hba->vops->setup_xfer_req(hba, lrbp->task_tag,
+ !!lrbp->cmd);
+ __set_bit(lrbp->task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << lrbp->task_tag,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ }
}
/**
@@ -2245,6 +2311,14 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
if (err)
dev_err(hba->dev, "crypto setup failed\n");
+ hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
+ if (!hba->mcq_sup)
+ return err;
+
+ hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
+ hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
+ hba->mcq_capabilities);
+
return err;
}
@@ -2397,38 +2471,30 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
}
/**
- * ufshcd_map_sg - Map scatter-gather list to prdt
- * @hba: per adapter instance
- * @lrbp: pointer to local reference block
- *
- * Returns 0 in case of success, non-zero value in case of failure
+ * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
+ * @hba: per-adapter instance
+ * @lrbp: pointer to local reference block
+ * @sg_entries: The number of sg lists actually used
+ * @sg_list: Pointer to SG list
*/
-static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
+ struct scatterlist *sg_list)
{
- struct ufshcd_sg_entry *prd_table;
+ struct ufshcd_sg_entry *prd;
struct scatterlist *sg;
- struct scsi_cmnd *cmd;
- int sg_segments;
int i;
- cmd = lrbp->cmd;
- sg_segments = scsi_dma_map(cmd);
- if (sg_segments < 0)
- return sg_segments;
-
- if (sg_segments) {
+ if (sg_entries) {
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16((sg_segments *
- sizeof(struct ufshcd_sg_entry)));
+ cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
else
- lrbp->utr_descriptor_ptr->prd_table_length =
- cpu_to_le16(sg_segments);
+ lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
- prd_table = lrbp->ucd_prdt_ptr;
+ prd = lrbp->ucd_prdt_ptr;
- scsi_for_each_sg(cmd, sg, sg_segments, i) {
+ for_each_sg(sg_list, sg, sg_entries, i) {
const unsigned int len = sg_dma_len(sg);
/*
@@ -2440,13 +2506,32 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* indicates 4 bytes, '7' indicates 8 bytes, etc."
*/
WARN_ONCE(len > 256 * 1024, "len = %#x\n", len);
- prd_table[i].size = cpu_to_le32(len - 1);
- prd_table[i].addr = cpu_to_le64(sg->dma_address);
- prd_table[i].reserved = 0;
+ prd->size = cpu_to_le32(len - 1);
+ prd->addr = cpu_to_le64(sg->dma_address);
+ prd->reserved = 0;
+ prd = (void *)prd + ufshcd_sg_entry_size(hba);
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
}
+}
+
+/**
+ * ufshcd_map_sg - Map scatter-gather list to prdt
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ *
+ * Returns 0 in case of success, non-zero value in case of failure
+ */
+static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct scsi_cmnd *cmd = lrbp->cmd;
+ int sg_segments = scsi_dma_map(cmd);
+
+ if (sg_segments < 0)
+ return sg_segments;
+
+ ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
return 0;
}
@@ -2494,14 +2579,15 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
}
/**
- * ufshcd_prepare_req_desc_hdr() - Fills the requests header
+ * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
* @lrbp: pointer to local reference block
* @upiu_flags: flags required in the header
* @cmd_dir: requests data direction
+ * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
*/
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
- u8 *upiu_flags, enum dma_data_direction cmd_dir)
+static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
+ enum dma_data_direction cmd_dir, int ehs_length)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
u32 data_direction;
@@ -2520,8 +2606,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
*upiu_flags = UPIU_CMD_FLAGS_NONE;
}
- dword_0 = data_direction | (lrbp->command_type
- << UPIU_COMMAND_TYPE_OFFSET);
+ dword_0 = data_direction | (lrbp->command_type << UPIU_COMMAND_TYPE_OFFSET) |
+ ehs_length << 8;
if (lrbp->intr_cmd)
dword_0 |= UTP_REQ_DESC_INT_CMD;
@@ -2576,8 +2662,7 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
}
/**
- * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
- * for query requsts
+ * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
* @hba: UFS hba
* @lrbp: local reference block pointer
* @upiu_flags: flags
@@ -2648,7 +2733,7 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
@@ -2676,8 +2761,7 @@ static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
if (likely(lrbp->cmd)) {
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
- lrbp->cmd->sc_data_direction);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
} else {
ret = -EINVAL;
@@ -2709,33 +2793,38 @@ static inline bool is_device_wlun(struct scsi_device *sdev)
*/
static void ufshcd_map_queues(struct Scsi_Host *shost)
{
- int i;
+ struct ufs_hba *hba = shost_priv(shost);
+ int i, queue_offset = 0;
+
+ if (!is_mcq_supported(hba)) {
+ hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
+ hba->nr_queues[HCTX_TYPE_READ] = 0;
+ hba->nr_queues[HCTX_TYPE_POLL] = 1;
+ hba->nr_hw_queues = 1;
+ }
for (i = 0; i < shost->nr_maps; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
- switch (i) {
- case HCTX_TYPE_DEFAULT:
- case HCTX_TYPE_POLL:
- map->nr_queues = 1;
- break;
- case HCTX_TYPE_READ:
- map->nr_queues = 0;
+ map->nr_queues = hba->nr_queues[i];
+ if (!map->nr_queues)
continue;
- default:
- WARN_ON_ONCE(true);
- }
- map->queue_offset = 0;
+ map->queue_offset = queue_offset;
+ if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
+ map->queue_offset = 0;
+
blk_mq_map_queues(map);
+ queue_offset += map->nr_queues;
}
}
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
- struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
+ struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
+ i * sizeof_utp_transfer_cmd_desc(hba);
struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
- i * sizeof(struct utp_transfer_cmd_desc);
+ i * sizeof_utp_transfer_cmd_desc(hba);
u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
response_upiu);
u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
@@ -2743,11 +2832,11 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
lrb->utr_descriptor_ptr = utrdlp + i;
lrb->utrd_dma_addr = hba->utrdl_dma_addr +
i * sizeof(struct utp_transfer_req_desc);
- lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
+ lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
lrb->ucd_req_dma_addr = cmd_desc_element_addr;
- lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
- lrb->ucd_prdt_ptr = cmd_descp[i].prd_table;
+ lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
}
@@ -2764,6 +2853,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp;
int err = 0;
+ struct ufs_hw_queue *hwq = NULL;
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
@@ -2848,7 +2938,10 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
- ufshcd_send_command(hba, tag);
+ if (is_mcq_enabled(hba))
+ hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
+
+ ufshcd_send_command(hba, tag, hwq);
out:
rcu_read_unlock();
@@ -2943,6 +3036,12 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
__func__);
break;
+ case UPIU_TRANSACTION_RESPONSE:
+ if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
+ }
+ break;
default:
err = -EINVAL;
dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
@@ -2972,7 +3071,7 @@ retry:
* not trigger any race conditions.
*/
hba->dev_cmd.complete = NULL;
- err = ufshcd_get_tr_ocs(lrbp);
+ err = ufshcd_get_tr_ocs(lrbp, hba->dev_cmd.cqe);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
} else {
@@ -3008,6 +3107,22 @@ retry:
} else {
dev_err(hba->dev, "%s: failed to clear tag %d\n",
__func__, lrbp->task_tag);
+
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ pending = test_bit(lrbp->task_tag,
+ &hba->outstanding_reqs);
+ if (pending)
+ hba->dev_cmd.complete = NULL;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (!pending) {
+ /*
+ * The completion handler ran while we tried to
+ * clear the command.
+ */
+ time_left = 1;
+ goto retry;
+ }
}
}
@@ -3043,10 +3158,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
goto out;
hba->dev_cmd.complete = &wait;
+ hba->dev_cmd.cqe = NULL;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- ufshcd_send_command(hba, tag);
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
@@ -3367,37 +3483,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
}
/**
- * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
- * @hba: Pointer to adapter instance
- * @desc_id: descriptor idn value
- * @desc_len: mapped desc length (out)
- */
-void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
- int *desc_len)
-{
- if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 ||
- desc_id == QUERY_DESC_IDN_RFU_1)
- *desc_len = 0;
- else
- *desc_len = hba->desc_size[desc_id];
-}
-EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
-
-static void ufshcd_update_desc_length(struct ufs_hba *hba,
- enum desc_idn desc_id, int desc_index,
- unsigned char desc_len)
-{
- if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE &&
- desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT)
- /* For UFS 3.1, the normal unit descriptor is 10 bytes larger
- * than the RPMB unit, however, both descriptors share the same
- * desc_idn, to cover both unit descriptors with one length, we
- * choose the normal unit descriptor length by desc_index.
- */
- hba->desc_size[desc_id] = desc_len;
-}
-
-/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
@@ -3417,26 +3502,13 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
{
int ret;
u8 *desc_buf;
- int buff_len;
+ int buff_len = QUERY_DESC_MAX_SIZE;
bool is_kmalloc = true;
/* Safety check */
if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
return -EINVAL;
- /* Get the length of descriptor */
- ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
- if (!buff_len) {
- dev_err(hba->dev, "%s: Failed to get desc length\n", __func__);
- return -EINVAL;
- }
-
- if (param_offset >= buff_len) {
- dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
- __func__, param_offset, desc_id, buff_len);
- return -EINVAL;
- }
-
/* Check whether we need temp memory */
if (param_offset != 0 || param_size < buff_len) {
desc_buf = kzalloc(buff_len, GFP_KERNEL);
@@ -3449,15 +3521,24 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
/* Request for full descriptor */
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0,
- desc_buf, &buff_len);
-
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
if (ret) {
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
__func__, desc_id, desc_index, param_offset, ret);
goto out;
}
+ /* Update descriptor length */
+ buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
+
+ if (param_offset >= buff_len) {
+ dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
+ __func__, param_offset, desc_id, buff_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
/* Sanity check */
if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
@@ -3466,10 +3547,6 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
goto out;
}
- /* Update descriptor length */
- buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
- ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len);
-
if (is_kmalloc) {
/* Make sure we don't copy more data than available */
if (param_offset >= buff_len)
@@ -3656,7 +3733,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
size_t utmrdl_size, utrdl_size, ucdl_size;
/* Allocate memory for UTP command descriptors */
- ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+ ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
@@ -3664,12 +3741,9 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/*
* UFSHCI requires UTP command descriptor to be 128 byte aligned.
- * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
- * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
- * be aligned to 128 bytes as well
*/
if (!hba->ucdl_base_addr ||
- WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
dev_err(hba->dev,
"Command Descriptor Memory allocation failed\n");
goto out;
@@ -3685,13 +3759,21 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utrdl_dma_addr,
GFP_KERNEL);
if (!hba->utrdl_base_addr ||
- WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->utrdl_dma_addr & (1024 - 1))) {
dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n");
goto out;
}
/*
+ * Skip utmrdl allocation; it may have been
+ * allocated during first pass and not released during
+ * MCQ memory allocation.
+ * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
+ */
+ if (hba->utmrdl_base_addr)
+ goto skip_utmrdl;
+ /*
* Allocate memory for UTP Task Management descriptors
* UFSHCI requires 1024 byte alignment of UTMRD
*/
@@ -3701,12 +3783,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
&hba->utmrdl_dma_addr,
GFP_KERNEL);
if (!hba->utmrdl_base_addr ||
- WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
+ WARN_ON(hba->utmrdl_dma_addr & (1024 - 1))) {
dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n");
goto out;
}
+skip_utmrdl:
/* Allocate memory for local reference block */
hba->lrb = devm_kcalloc(hba->dev,
hba->nutrs, sizeof(struct ufshcd_lrb),
@@ -3750,7 +3833,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
prdt_offset =
offsetof(struct utp_transfer_cmd_desc, prd_table);
- cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+ cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
cmd_desc_dma_addr = hba->ucdl_dma_addr;
for (i = 0; i < hba->nutrs; i++) {
@@ -4907,7 +4990,7 @@ static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
*/
static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
{
- int len = hba->desc_size[QUERY_DESC_IDN_UNIT];
+ int len = QUERY_DESC_MAX_SIZE;
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
u8 lun_qdepth = hba->nutrs;
u8 *desc_buf;
@@ -4942,6 +5025,12 @@ static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
hba->dev_info.is_lu_power_on_wp = true;
+ /* In case of RPMB LU, check if advanced RPMB mode is enabled */
+ if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
+ desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
+ hba->dev_info.b_advanced_rpmb_en = true;
+
+
kfree(desc_buf);
set_qdepth:
/*
@@ -5030,8 +5119,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
ufshcd_hpb_configure(hba, sdev);
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
- if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
- blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
+ if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
+ blk_queue_update_dma_alignment(q, 4096 - 1);
/*
* Block runtime-pm until all consumers are added.
* Refer ufshcd_setup_links().
@@ -5130,18 +5219,20 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
* ufshcd_transfer_rsp_status - Get overall status of the response
* @hba: per adapter instance
* @lrbp: pointer to local reference block of completed command
+ * @cqe: pointer to the completion queue entry
*
* Returns result of the command to notify SCSI midlayer
*/
static inline int
-ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ struct cq_entry *cqe)
{
int result = 0;
int scsi_status;
enum utp_ocs ocs;
/* overall command status of utrd */
- ocs = ufshcd_get_tr_ocs(lrbp);
+ ocs = ufshcd_get_tr_ocs(lrbp, cqe);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
if (be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_1) &
@@ -5306,42 +5397,53 @@ static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
}
/**
- * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * ufshcd_compl_one_cqe - handle a completion queue entry
* @hba: per adapter instance
- * @completed_reqs: bitmask that indicates which requests to complete
+ * @task_tag: the task tag of the request to be completed
+ * @cqe: pointer to the completion queue entry
*/
-static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
- unsigned long completed_reqs)
+void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
+ struct cq_entry *cqe)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
- int index;
-
- for_each_set_bit(index, &completed_reqs, hba->nutrs) {
- lrbp = &hba->lrb[index];
- lrbp->compl_time_stamp = ktime_get();
- lrbp->compl_time_stamp_local_clock = local_clock();
- cmd = lrbp->cmd;
- if (cmd) {
- if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
- ufshcd_update_monitor(hba, lrbp);
- ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
- ufshcd_release_scsi_cmd(hba, lrbp);
- /* Do not touch lrbp after scsi done */
- scsi_done(cmd);
- } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
- lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
- if (hba->dev_cmd.complete) {
- ufshcd_add_command_trace(hba, index,
- UFS_DEV_COMP);
- complete(hba->dev_cmd.complete);
- ufshcd_clk_scaling_update_busy(hba);
- }
+
+ lrbp = &hba->lrb[task_tag];
+ lrbp->compl_time_stamp = ktime_get();
+ cmd = lrbp->cmd;
+ if (cmd) {
+ if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
+ ufshcd_update_monitor(hba, lrbp);
+ ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
+ cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
+ ufshcd_release_scsi_cmd(hba, lrbp);
+ /* Do not touch lrbp after scsi done */
+ scsi_done(cmd);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
+ lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
+ if (hba->dev_cmd.complete) {
+ hba->dev_cmd.cqe = cqe;
+ ufshcd_add_command_trace(hba, task_tag, UFS_DEV_COMP);
+ complete(hba->dev_cmd.complete);
+ ufshcd_clk_scaling_update_busy(hba);
}
}
}
+/**
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ * @completed_reqs: bitmask that indicates which requests to complete
+ */
+static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ unsigned long completed_reqs)
+{
+ int tag;
+
+ for_each_set_bit(tag, &completed_reqs, hba->nutrs)
+ ufshcd_compl_one_cqe(hba, tag, NULL);
+}
+
/* Any value that is not an existing queue number is fine for this constant. */
enum {
UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
@@ -5371,6 +5473,13 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
struct ufs_hba *hba = shost_priv(shost);
unsigned long completed_reqs, flags;
u32 tr_doorbell;
+ struct ufs_hw_queue *hwq;
+
+ if (is_mcq_enabled(hba)) {
+ hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET];
+
+ return ufshcd_mcq_poll_cqe_lock(hba, hwq);
+ }
spin_lock_irqsave(&hba->outstanding_lock, flags);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -6604,6 +6713,40 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
}
/**
+ * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
+ * @hba: per adapter instance
+ *
+ * Returns IRQ_HANDLED if interrupt is handled
+ */
+static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
+{
+ struct ufs_hw_queue *hwq;
+ unsigned long outstanding_cqs;
+ unsigned int nr_queues;
+ int i, ret;
+ u32 events;
+
+ ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
+ if (ret)
+ outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
+
+ /* Exclude the poll queues */
+ nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ for_each_set_bit(i, &outstanding_cqs, nr_queues) {
+ hwq = &hba->uhq[i];
+
+ events = ufshcd_mcq_read_cqis(hba, i);
+ if (events)
+ ufshcd_mcq_write_cqis(hba, events, i);
+
+ if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
+ ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
@@ -6628,6 +6771,9 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
if (intr_status & UTP_TRANSFER_REQ_COMPL)
retval |= ufshcd_transfer_req_compl(hba);
+ if (intr_status & MCQ_CQ_EVENT_STATUS)
+ retval |= ufshcd_handle_mcq_cq_events(hba);
+
return retval;
}
@@ -6876,7 +7022,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
/* update the task tag in the request upiu */
req_upiu->header.dword_0 |= cpu_to_be32(tag);
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
/* just copy the upiu request as it is */
memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
@@ -6895,7 +7041,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
- ufshcd_send_command(hba, tag);
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
/*
* ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
@@ -7000,6 +7146,100 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
}
/**
+ * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
+ * @hba: per adapter instance
+ * @req_upiu: upiu request
+ * @rsp_upiu: upiu reply
+ * @req_ehs: EHS field which contains Advanced RPMB Request Message
+ * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
+ * @sg_cnt: The number of sg lists actually used
+ * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
+ * @dir: DMA direction
+ *
+ * Returns zero on success, non-zero on failure
+ */
+int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
+ struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
+ enum dma_data_direction dir)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ const u32 tag = hba->reserved_slot;
+ struct ufshcd_lrb *lrbp;
+ int err = 0;
+ int result;
+ u8 upiu_flags;
+ u8 *ehs_data;
+ u16 ehs_len;
+
+ /* Protects use of hba->reserved_slot. */
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ down_read(&hba->clk_scaling_lock);
+
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+ lrbp->cmd = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = UFS_UPIU_RPMB_WLUN;
+
+ lrbp->intr_cmd = true;
+ ufshcd_prepare_lrbp_crypto(NULL, lrbp);
+ hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
+
+ /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
+
+ /* update the task tag and LUN in the request upiu */
+ req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag);
+
+ /* copy the UPIU(contains CDB) request as it is */
+ memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
+ /* Copy EHS, starting with byte32, immediately after the CDB package */
+ memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
+
+ if (dir != DMA_NONE && sg_list)
+ ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
+ hba->dev_cmd.complete = &wait;
+
+ ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
+
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
+
+ if (!err) {
+ /* Just copy the upiu response as it is */
+ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
+ /* Get the response UPIU result */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ ehs_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) >> 24;
+ /*
+ * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
+ * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
+ * Message is 02h
+ */
+ if (ehs_len == 2 && rsp_ehs) {
+ /*
+ * ucd_rsp_ptr points to a buffer with a length of 512 bytes
+ * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
+ */
+ ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
+ memcpy(rsp_ehs, ehs_data, ehs_len * 32);
+ }
+ }
+
+ up_read(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+ return err ? : result;
+}
+
+/**
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
*
@@ -7441,12 +7681,11 @@ static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
* In case regulators are not initialized we'll return 0
* @hba: per-adapter instance
* @desc_buf: power descriptor buffer to extract ICC levels from.
- * @len: length of desc_buff
*
* Returns calculated ICC level
*/
static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
- const u8 *desc_buf, int len)
+ const u8 *desc_buf)
{
u32 icc_level = 0;
@@ -7488,25 +7727,23 @@ out:
static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
{
int ret;
- int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER];
u8 *desc_buf;
u32 icc_level;
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf)
return;
ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
- desc_buf, buff_len);
+ desc_buf, QUERY_DESC_MAX_SIZE);
if (ret) {
dev_err(hba->dev,
- "%s: Failed reading power descriptor.len = %d ret = %d",
- __func__, buff_len, ret);
+ "%s: Failed reading power descriptor ret = %d",
+ __func__, ret);
goto out;
}
- icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf,
- buff_len);
+ icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
@@ -7616,10 +7853,6 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
(hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
goto wb_disabled;
- if (hba->desc_size[QUERY_DESC_IDN_DEVICE] <
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
- goto wb_disabled;
-
ext_ufs_feature = get_unaligned_be32(desc_buf +
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
@@ -7690,6 +7923,31 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
}
}
+static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
+{
+ struct ufs_dev_info *dev_info = &hba->dev_info;
+ u32 ext_ufs_feature;
+ u32 ext_iid_en = 0;
+ int err;
+
+ /* Only UFS-4.0 and above may support EXT_IID */
+ if (dev_info->wspecversion < 0x400)
+ goto out;
+
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
+ goto out;
+
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
+ if (err)
+ dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
+
+out:
+ dev_info->b_ext_iid_en = ext_iid_en;
+}
+
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups)
{
@@ -7727,14 +7985,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
u8 *desc_buf;
struct ufs_dev_info *dev_info = &hba->dev_info;
- desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
err = -ENOMEM;
goto out;
}
err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
- hba->desc_size[QUERY_DESC_IDN_DEVICE]);
+ QUERY_DESC_MAX_SIZE);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
@@ -7751,6 +8009,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
/* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
+ dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
@@ -7788,6 +8047,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
ufshcd_temp_notif_probe(hba, desc_buf);
+ if (hba->ext_iid_sup)
+ ufshcd_ext_iid_probe(hba, desc_buf);
+
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
@@ -7981,18 +8243,16 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
{
int err;
- size_t buff_len;
u8 *desc_buf;
- buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY];
- desc_buf = kmalloc(buff_len, GFP_KERNEL);
+ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
err = -ENOMEM;
goto out;
}
err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
- desc_buf, buff_len);
+ desc_buf, QUERY_DESC_MAX_SIZE);
if (err) {
dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
__func__, err);
@@ -8004,7 +8264,7 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
- if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >=
+ if (desc_buf[QUERY_DESC_LENGTH_OFFSET] >=
GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS)
ufshpb_get_geo_info(hba, desc_buf);
@@ -8089,11 +8349,7 @@ out:
static int ufshcd_device_params_init(struct ufs_hba *hba)
{
bool flag;
- int ret, i;
-
- /* Init device descriptor sizes */
- for (i = 0; i < QUERY_DESC_IDN_MAX; i++)
- hba->desc_size[i] = QUERY_DESC_MAX_SIZE;
+ int ret;
/* Init UFS geometry descriptor related parameters */
ret = ufshcd_device_geo_params_init(hba);
@@ -8161,27 +8417,96 @@ out:
return ret;
}
-/**
- * ufshcd_probe_hba - probe hba to detect device and initialize it
- * @hba: per-adapter instance
- * @init_dev_params: whether or not to call ufshcd_device_params_init().
- *
- * Execute link-startup and verify device initialization
- */
-static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+/* SDB - Single Doorbell */
+static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
+{
+ size_t ucdl_size, utrdl_size;
+
+ ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
+ dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
+ hba->ucdl_dma_addr);
+
+ utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
+ dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
+ hba->utrdl_dma_addr);
+
+ devm_kfree(hba->dev, hba->lrb);
+}
+
+static int ufshcd_alloc_mcq(struct ufs_hba *hba)
{
int ret;
- unsigned long flags;
- ktime_t start = ktime_get();
+ int old_nutrs = hba->nutrs;
+
+ ret = ufshcd_mcq_decide_queue_depth(hba);
+ if (ret < 0)
+ return ret;
+
+ hba->nutrs = ret;
+ ret = ufshcd_mcq_init(hba);
+ if (ret)
+ goto err;
+
+ /*
+ * Previously allocated memory for nutrs may not be enough in MCQ mode.
+ * Number of supported tags in MCQ mode may be larger than SDB mode.
+ */
+ if (hba->nutrs != old_nutrs) {
+ ufshcd_release_sdb_queue(hba, old_nutrs);
+ ret = ufshcd_memory_alloc(hba);
+ if (ret)
+ goto err;
+ ufshcd_host_memory_configure(hba);
+ }
+
+ ret = ufshcd_mcq_memory_alloc(hba);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ hba->nutrs = old_nutrs;
+ return ret;
+}
+
+static void ufshcd_config_mcq(struct ufs_hba *hba)
+{
+ int ret;
+
+ ret = ufshcd_mcq_vops_config_esi(hba);
+ dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
+
+ ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS);
+ ufshcd_mcq_make_queues_operational(hba);
+ ufshcd_mcq_config_mac(hba, hba->nutrs);
+
+ hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
+ hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
+
+ /* Select MCQ mode */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
+ REG_UFS_MEM_CFG);
+ hba->mcq_enabled = true;
+
+ dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
+ hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
+ hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
+ hba->nutrs);
+}
+
+static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
+{
+ int ret;
+ struct Scsi_Host *host = hba->host;
hba->ufshcd_state = UFSHCD_STATE_RESET;
ret = ufshcd_link_startup(hba);
if (ret)
- goto out;
+ return ret;
if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
- goto out;
+ return ret;
/* Debug counters initialization */
ufshcd_clear_dbg_ufs_stats(hba);
@@ -8189,15 +8514,19 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
/* UniPro link is active now */
ufshcd_set_link_active(hba);
+ /* Reconfigure MCQ upon reset */
+ if (is_mcq_enabled(hba) && !init_dev_params)
+ ufshcd_config_mcq(hba);
+
/* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba);
if (ret)
- goto out;
+ return ret;
/* Initiate UFS initialization, and waiting until completion */
ret = ufshcd_complete_dev_init(hba);
if (ret)
- goto out;
+ return ret;
/*
* Initialize UFS device parameters used by driver, these
@@ -8206,7 +8535,25 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (init_dev_params) {
ret = ufshcd_device_params_init(hba);
if (ret)
- goto out;
+ return ret;
+ if (is_mcq_supported(hba) && !hba->scsi_host_added) {
+ ret = ufshcd_alloc_mcq(hba);
+ if (ret) {
+ /* Continue with SDB mode */
+ use_mcq_mode = false;
+ dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
+ ret);
+ }
+ ret = scsi_add_host(host, hba->dev);
+ if (ret) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ return ret;
+ }
+ hba->scsi_host_added = true;
+ }
+ /* MCQ may be disabled if ufshcd_alloc_mcq() fails */
+ if (is_mcq_supported(hba) && use_mcq_mode)
+ ufshcd_config_mcq(hba);
}
ufshcd_tune_unipro_params(hba);
@@ -8227,11 +8574,51 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_probe_hba - probe hba to detect device and initialize it
+ * @hba: per-adapter instance
+ * @init_dev_params: whether or not to call ufshcd_device_params_init().
+ *
+ * Execute link-startup and verify device initialization
+ */
+static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
+{
+ ktime_t start = ktime_get();
+ unsigned long flags;
+ int ret;
+
+ ret = ufshcd_device_init(hba, init_dev_params);
+ if (ret)
+ goto out;
+
+ if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
+ /* Reset the device and controller before doing reinit */
+ ufshcd_device_reset(hba);
+ ufshcd_hba_stop(hba);
+ ufshcd_vops_reinit_notify(hba);
+ ret = ufshcd_hba_enable(hba);
+ if (ret) {
+ dev_err(hba->dev, "Host controller enable failed\n");
+ ufshcd_print_evt_hist(hba);
+ ufshcd_print_host_state(hba);
goto out;
}
- ufshcd_print_pwr_info(hba);
+
+ /* Reinit the device */
+ ret = ufshcd_device_init(hba, init_dev_params);
+ if (ret)
+ goto out;
}
+ ufshcd_print_pwr_info(hba);
+
/*
* bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
* and for removable UFS card as well, hence always set the parameter.
@@ -8359,7 +8746,6 @@ static struct scsi_host_template ufshcd_driver_template = {
.max_host_blocked = 1,
.track_queue_depth = 1,
.sdev_groups = ufshcd_driver_groups,
- .dma_boundary = PAGE_SIZE - 1,
.rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
};
@@ -9453,6 +9839,7 @@ static int ufshcd_resume(struct ufs_hba *hba)
/* enable the host irq as host controller would be active soon */
ufshcd_enable_irq(hba);
+
goto out;
disable_vreg:
@@ -9616,6 +10003,56 @@ void ufshcd_remove(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
+#ifdef CONFIG_PM_SLEEP
+int ufshcd_system_freeze(struct device *dev)
+{
+
+ return ufshcd_system_suspend(dev);
+
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
+
+int ufshcd_system_restore(struct device *dev)
+{
+
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ufshcd_system_resume(dev);
+ if (ret)
+ return ret;
+
+ /* Configure UTRL and UTMRL base address registers */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+ /*
+ * Make sure that UTRL and UTMRL base address registers
+ * are updated with the latest queue addresses. Only after
+ * updating these addresses, we can queue the new commands.
+ */
+ mb();
+
+ /* Resuming from hibernate, assume that link was OFF */
+ ufshcd_set_link_off(hba);
+
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_restore);
+
+int ufshcd_system_thaw(struct device *dev)
+{
+ return ufshcd_system_resume(dev);
+}
+EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
+#endif /* CONFIG_PM_SLEEP */
+
/**
* ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
* @hba: pointer to Host Bus Adapter (HBA)
@@ -9674,6 +10111,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
hba->dev = dev;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
hba->nop_out_timeout = NOP_OUT_TIMEOUT;
+ ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
INIT_LIST_HEAD(&hba->clk_list_head);
spin_lock_init(&hba->outstanding_lock);
@@ -9823,10 +10261,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->is_irq_enabled = true;
}
- err = scsi_add_host(host, hba->dev);
- if (err) {
- dev_err(hba->dev, "scsi_add_host failed\n");
- goto out_disable;
+ if (!is_mcq_supported(hba)) {
+ err = scsi_add_host(host, hba->dev);
+ if (err) {
+ dev_err(hba->dev, "scsi_add_host failed\n");
+ goto out_disable;
+ }
}
hba->tmf_tag_set = (struct blk_mq_tag_set) {
@@ -10054,11 +10494,6 @@ static int __init ufshcd_core_init(void)
{
int ret;
- /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */
- static_assert(sizeof(struct utp_transfer_cmd_desc) ==
- 2 * ALIGNED_UPIU_SIZE +
- SG_ALL * sizeof(struct ufshcd_sg_entry));
-
ufs_debugfs_init();
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c
index 994f4ac9df5a..a46a7666c891 100644
--- a/drivers/ufs/core/ufshpb.c
+++ b/drivers/ufs/core/ufshpb.c
@@ -2382,12 +2382,10 @@ static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun,
{
u16 max_active_rgns;
u8 lu_enable;
- int size;
+ int size = QUERY_DESC_MAX_SIZE;
int ret;
char desc_buf[QUERY_DESC_MAX_SIZE];
- ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size);
-
ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
QUERY_DESC_IDN_UNIT, lun, 0,
diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig
index 4cc2dbd79ed0..663881437921 100644
--- a/drivers/ufs/host/Kconfig
+++ b/drivers/ufs/host/Kconfig
@@ -57,8 +57,9 @@ config SCSI_UFS_DWC_TC_PLATFORM
config SCSI_UFS_QCOM
tristate "QCOM specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
+ depends on GENERIC_MSI_IRQ
+ depends on RESET_CONTROLLER
select QCOM_SCM if SCSI_UFS_CRYPTO
- select RESET_CONTROLLER
help
This selects the QCOM specific additions to UFSHCD platform driver.
UFS host on QCOM needs some vendor specific configuration before
@@ -124,3 +125,19 @@ config SCSI_UFS_EXYNOS
Select this if you have UFS host controller on Samsung Exynos SoC.
If unsure, say N.
+
+config SCSI_UFS_VARIABLE_SG_ENTRY_SIZE
+ bool
+ default y if SCSI_UFS_EXYNOS && SCSI_UFS_CRYPTO
+
+config SCSI_UFS_SPRD
+ tristate "Unisoc specific hooks to UFS controller platform driver"
+ depends on SCSI_UFSHCD_PLATFORM && (ARCH_SPRD || COMPILE_TEST)
+ help
+ This selects the Unisoc specific additions to UFSHCD platform driver.
+ UFS host on Unisoc needs some vendor specific configuration before
+ accessing the hardware which includes PHY configuration and vendor
+ specific registers.
+
+ Select this if you have UFS controller on Unisoc chipset.
+ If unsure, say N.
diff --git a/drivers/ufs/host/Makefile b/drivers/ufs/host/Makefile
index 7717ca93e7d5..d7c5bf7fa512 100644
--- a/drivers/ufs/host/Makefile
+++ b/drivers/ufs/host/Makefile
@@ -12,4 +12,5 @@ obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o
obj-$(CONFIG_SCSI_UFS_RENESAS) += ufs-renesas.o
+obj-$(CONFIG_SCSI_UFS_SPRD) += ufs-sprd.o
obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index c3628a8645a5..7c985fc38db1 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -1300,6 +1300,14 @@ static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
+ /*
+ * The maximum segment size must be set after scsi_host_alloc()
+ * has been called and before LUN scanning starts
+ * (ufshcd_async_scan()). Note: this callback may also be called
+ * from other functions than ufshcd_init().
+ */
+ hba->host->max_segment_size = 4096;
+
if (ufs->drv_data->pre_hce_enable) {
ret = ufs->drv_data->pre_hce_enable(ufs);
if (ret)
@@ -1673,7 +1681,7 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
- UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
+ UFSHCD_QUIRK_4KB_DMA_ALIGNMENT,
.opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 8ad1415e10b6..34fc453f3eb1 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -22,8 +22,11 @@
#include <ufs/ufshci.h>
#include <ufs/ufs_quirks.h>
-#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
- (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+#define MCQ_QCFGPTR_MASK GENMASK(7, 0)
+#define MCQ_QCFGPTR_UNIT 0x200
+#define MCQ_SQATTR_OFFSET(c) \
+ ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
+#define MCQ_QCFG_SIZE 0x40
enum {
TSTBUS_UAWM,
@@ -52,12 +55,6 @@ static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
return container_of(rcd, struct ufs_qcom_host, rcdev);
}
-static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
- const char *prefix, void *priv)
-{
- ufshcd_dump_regs(hba, offset, len * 4, prefix);
-}
-
static int ufs_qcom_host_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool optional)
{
@@ -110,7 +107,7 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
{
- int err = 0;
+ int err;
struct device *dev = host->hba->dev;
if (host->is_lane_clks_enabled)
@@ -119,7 +116,7 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
host->rx_l0_sync_clk);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
host->tx_l0_sync_clk);
@@ -137,7 +134,8 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
goto disable_rx_l1;
host->is_lane_clks_enabled = true;
- goto out;
+
+ return 0;
disable_rx_l1:
clk_disable_unprepare(host->rx_l1_sync_clk);
@@ -145,7 +143,7 @@ disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
clk_disable_unprepare(host->rx_l0_sync_clk);
-out:
+
return err;
}
@@ -160,25 +158,25 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
&host->rx_l0_sync_clk, false);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
&host->tx_l0_sync_clk, false);
if (err)
- goto out;
+ return err;
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
&host->rx_l1_sync_clk, false);
if (err)
- goto out;
+ return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
&host->tx_l1_sync_clk, true);
}
-out:
- return err;
+
+ return 0;
}
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
@@ -226,6 +224,10 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
ufshcd_rmwl(host->hba, QUNIPRO_SEL,
ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
REG_UFS_CFG1);
+
+ if (host->hw_ver.major == 0x05)
+ ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
+
/* make sure above configuration is applied before we return */
mb();
}
@@ -241,7 +243,7 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
if (!host->core_reset) {
dev_warn(hba->dev, "%s: reset control not set\n", __func__);
- goto out;
+ return 0;
}
reenable_intr = hba->is_irq_enabled;
@@ -252,7 +254,7 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
if (ret) {
dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
__func__, ret);
- goto out;
+ return ret;
}
/*
@@ -274,16 +276,35 @@ static int ufs_qcom_host_reset(struct ufs_hba *hba)
hba->is_irq_enabled = true;
}
-out:
- return ret;
+ return 0;
+}
+
+static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (host->hw_ver.major == 0x1) {
+ /*
+ * HS-G3 operations may not reliably work on legacy QCOM
+ * UFS host controller hardware even though capability
+ * exchange during link startup phase may end up
+ * negotiating maximum supported gear as G3.
+ * Hence downgrade the maximum supported gear to HS-G2.
+ */
+ return UFS_HS_G2;
+ } else if (host->hw_ver.major >= 0x4) {
+ return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
+ }
+
+ /* Default is HS-G3 */
+ return UFS_HS_G3;
}
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
- int ret = 0;
- bool is_rate_B = UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B;
+ int ret;
/* Reset UFS Host Controller and PHY */
ret = ufs_qcom_host_reset(hba);
@@ -291,17 +312,16 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
dev_warn(hba->dev, "%s: host reset returned %d\n",
__func__, ret);
- if (is_rate_B)
- phy_set_mode(phy, PHY_MODE_UFS_HS_B);
-
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
- goto out;
+ return ret;
}
+ phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->hs_gear);
+
/* power on phy - start serdes and phy's power and clocks */
ret = phy_power_on(phy);
if (ret) {
@@ -316,7 +336,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
out_disable_phy:
phy_exit(phy);
-out:
+
return ret;
}
@@ -374,7 +394,6 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer)
{
- int ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
u32 core_clk_period_in_ns;
@@ -409,11 +428,11 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
* Aggregation logic.
*/
if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
- goto out;
+ return 0;
if (gear == 0) {
dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
- goto out_error;
+ return -EINVAL;
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
@@ -436,7 +455,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
}
if (ufs_qcom_cap_qunipro(host))
- goto out;
+ return 0;
core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
@@ -451,7 +470,7 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rA));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
} else if (rate == PA_HS_MODE_B) {
@@ -460,13 +479,13 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rB));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
} else {
dev_err(hba->dev, "%s: invalid rate = %d\n",
__func__, rate);
- goto out_error;
+ return -EINVAL;
}
break;
case SLOWAUTO_MODE:
@@ -476,14 +495,14 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(pwm_fr_table));
- goto out_error;
+ return -EINVAL;
}
tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
break;
case UNCHANGED:
default:
dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
- goto out_error;
+ return -EINVAL;
}
if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
@@ -498,21 +517,17 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
mb();
}
- if (update_link_startup_timer) {
+ if (update_link_startup_timer && host->hw_ver.major != 0x5) {
ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
- REG_UFS_PA_LINK_STARTUP_TIMER);
+ REG_UFS_CFG0);
/*
* make sure that this configuration is applied before
* we return
*/
mb();
}
- goto out;
-out_error:
- ret = -EINVAL;
-out:
- return ret;
+ return 0;
}
static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
@@ -527,8 +542,7 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
0, true)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
- err = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (ufs_qcom_cap_qunipro(host))
@@ -554,7 +568,6 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
break;
}
-out:
return err;
}
@@ -691,8 +704,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
if (!dev_req_params) {
pr_err("%s: incoming dev_req_params is NULL\n", __func__);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
switch (status) {
@@ -700,29 +712,21 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
- if (host->hw_ver.major == 0x1) {
- /*
- * HS-G3 operations may not reliably work on legacy QCOM
- * UFS host controller hardware even though capability
- * exchange during link startup phase may end up
- * negotiating maximum supported gear as G3.
- * Hence downgrade the maximum supported gear to HS-G2.
- */
- if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
- if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
- }
+ /* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
+ ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba);
ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
dev_max_params,
dev_req_params);
if (ret) {
- pr_err("%s: failed to determine capabilities\n",
+ dev_err(hba->dev, "%s: failed to determine capabilities\n",
__func__);
- goto out;
+ return ret;
}
+ /* Use the agreed gear */
+ host->hs_gear = dev_req_params->gear_tx;
+
/* enable the device ref clock before changing to HS mode */
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
ufshcd_is_hs_mode(dev_req_params))
@@ -761,7 +765,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ret = -EINVAL;
break;
}
-out:
+
return ret;
}
@@ -773,14 +777,11 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
&pa_vs_config_reg1);
if (err)
- goto out;
+ return err;
/* Allow extension of MSB bits of PA_SaveConfigTime attribute */
- err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
(pa_vs_config_reg1 | (1 << 12)));
-
-out:
- return err;
}
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
@@ -839,6 +840,9 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
}
+
+ if (host->hw_ver.major > 0x3)
+ hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
}
static void ufs_qcom_set_caps(struct ufs_hba *hba)
@@ -906,8 +910,6 @@ ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
ufs_qcom_assert_reset(host->hba);
/* provide 1ms delay to let the reset pulse propagate. */
usleep_range(1000, 1100);
@@ -919,8 +921,6 @@ ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
- /* Currently this code only knows about a single reset. */
- WARN_ON(id);
ufs_qcom_deassert_reset(host->hba);
/*
@@ -957,9 +957,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
- err = -ENOMEM;
dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
- goto out;
+ return -ENOMEM;
}
/* Make a two way bind between the qcom host and the hba */
@@ -980,10 +979,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->rcdev.owner = dev->driver->owner;
host->rcdev.nr_resets = 1;
err = devm_reset_controller_register(dev, &host->rcdev);
- if (err) {
+ if (err)
dev_warn(dev, "Failed to register reset controller\n");
- err = 0;
- }
if (!has_acpi_companion(dev)) {
host->generic_phy = devm_phy_get(dev, "ufsphy");
@@ -1046,20 +1043,24 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
ufs_qcom_hosts[hba->dev->id] = host;
- host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
ufs_qcom_get_default_testbus_cfg(host);
err = ufs_qcom_testbus_config(host);
- if (err) {
+ if (err)
+ /* Failure is non-fatal */
dev_warn(dev, "%s: failed to configure the testbus %d\n",
__func__, err);
- err = 0;
- }
- goto out;
+ /*
+ * Power up the PHY using the minimum supported gear (UFS_HS_G2).
+ * Switching to max gear will be performed during reinit if supported.
+ */
+ host->hs_gear = UFS_HS_G2;
+
+ return 0;
out_variant_clear:
ufshcd_set_variant(hba, NULL);
-out:
+
return err;
}
@@ -1085,7 +1086,7 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
if (err)
- goto out;
+ return err;
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
core_clk_ctrl_reg |= clk_cycles;
@@ -1093,11 +1094,9 @@ static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
/* Clear CORE_CLK_DIV_EN */
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- err = ufshcd_dme_set(hba,
+ return ufshcd_dme_set(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
core_clk_ctrl_reg);
-out:
- return err;
}
static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
@@ -1180,7 +1179,7 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
if (err || !dev_req_params) {
ufshcd_uic_hibern8_exit(hba);
- goto out;
+ return err;
}
ufs_qcom_cfg_timers(hba,
@@ -1191,81 +1190,14 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
ufshcd_uic_hibern8_exit(hba);
}
-out:
- return err;
-}
-
-static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
- void *priv, void (*print_fn)(struct ufs_hba *hba,
- int offset, int num_regs, const char *str, void *priv))
-{
- u32 reg;
- struct ufs_qcom_host *host;
-
- if (unlikely(!hba)) {
- pr_err("%s: hba is NULL\n", __func__);
- return;
- }
- if (unlikely(!print_fn)) {
- dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
- return;
- }
-
- host = ufshcd_get_variant(hba);
- if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
- return;
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
- print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
-
- reg = ufshcd_readl(hba, REG_UFS_CFG1);
- reg |= UTP_DBG_RAMS_EN;
- ufshcd_writel(hba, reg, REG_UFS_CFG1);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
- print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
- print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
- print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
-
- /* clear bit 17 - UTP_DBG_RAMS_EN */
- ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
- print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
- print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
- print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
- print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
- print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
- print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
-
- reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
- print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
+ return 0;
}
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
{
- if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
- ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
- UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
- } else {
- ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
- }
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
+ UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
+ ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
}
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
@@ -1374,10 +1306,53 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
+ u32 reg;
+ struct ufs_qcom_host *host;
+
+ host = ufshcd_get_variant(hba);
+
ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
"HCI Vendor Specific Registers ");
- ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
+ ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC ");
+
+ reg = ufshcd_readl(hba, REG_UFS_CFG1);
+ reg |= UTP_DBG_RAMS_EN;
+ ufshcd_writel(hba, reg, REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
+ ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
+ ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
+ ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM ");
+
+ /* clear bit 17 - UTP_DBG_RAMS_EN */
+ ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
+ ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
+ ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
+ ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
+ ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
+ ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
+ ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT ");
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
+ ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
}
/**
@@ -1424,6 +1399,236 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
}
#endif
+static void ufs_qcom_reinit_notify(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ phy_power_off(host->generic_phy);
+}
+
+/* Resources */
+static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
+ {.name = "ufs_mem",},
+ {.name = "mcq",},
+ /* Submission Queue DAO */
+ {.name = "mcq_sqd",},
+ /* Submission Queue Interrupt Status */
+ {.name = "mcq_sqis",},
+ /* Completion Queue DAO */
+ {.name = "mcq_cqd",},
+ /* Completion Queue Interrupt Status */
+ {.name = "mcq_cqis",},
+ /* MCQ vendor specific */
+ {.name = "mcq_vs",},
+};
+
+static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
+{
+ struct platform_device *pdev = to_platform_device(hba->dev);
+ struct ufshcd_res_info *res;
+ struct resource *res_mem, *res_mcq;
+ int i, ret = 0;
+
+ memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
+
+ for (i = 0; i < RES_MAX; i++) {
+ res = &hba->res[i];
+ res->resource = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM,
+ res->name);
+ if (!res->resource) {
+ dev_info(hba->dev, "Resource %s not provided\n", res->name);
+ if (i == RES_UFS)
+ return -ENOMEM;
+ continue;
+ } else if (i == RES_UFS) {
+ res_mem = res->resource;
+ res->base = hba->mmio_base;
+ continue;
+ }
+
+ res->base = devm_ioremap_resource(hba->dev, res->resource);
+ if (IS_ERR(res->base)) {
+ dev_err(hba->dev, "Failed to map res %s, err=%d\n",
+ res->name, (int)PTR_ERR(res->base));
+ res->base = NULL;
+ ret = PTR_ERR(res->base);
+ return ret;
+ }
+ }
+
+ /* MCQ resource provided in DT */
+ res = &hba->res[RES_MCQ];
+ /* Bail if MCQ resource is provided */
+ if (res->base)
+ goto out;
+
+ /* Explicitly allocate MCQ resource from ufs_mem */
+ res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
+ if (!res_mcq)
+ return ret;
+
+ res_mcq->start = res_mem->start +
+ MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
+ res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
+ res_mcq->flags = res_mem->flags;
+ res_mcq->name = "mcq";
+
+ ret = insert_resource(&iomem_resource, res_mcq);
+ if (ret) {
+ dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
+ ret);
+ goto insert_res_err;
+ }
+
+ res->base = devm_ioremap_resource(hba->dev, res_mcq);
+ if (IS_ERR(res->base)) {
+ dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
+ (int)PTR_ERR(res->base));
+ ret = PTR_ERR(res->base);
+ goto ioremap_err;
+ }
+
+out:
+ hba->mcq_base = res->base;
+ return 0;
+ioremap_err:
+ res->base = NULL;
+ remove_resource(res_mcq);
+insert_res_err:
+ devm_kfree(hba->dev, res_mcq);
+ return ret;
+}
+
+static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
+{
+ struct ufshcd_res_info *mem_res, *sqdao_res;
+ struct ufshcd_mcq_opr_info_t *opr;
+ int i;
+
+ mem_res = &hba->res[RES_UFS];
+ sqdao_res = &hba->res[RES_MCQ_SQD];
+
+ if (!mem_res->base || !sqdao_res->base)
+ return -EINVAL;
+
+ for (i = 0; i < OPR_MAX; i++) {
+ opr = &hba->mcq_opr[i];
+ opr->offset = sqdao_res->resource->start -
+ mem_res->resource->start + 0x40 * i;
+ opr->stride = 0x100;
+ opr->base = sqdao_res->base + 0x40 * i;
+ }
+
+ return 0;
+}
+
+static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
+{
+ /* Qualcomm HC supports up to 64 */
+ return MAX_SUPP_MAC;
+}
+
+static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
+ unsigned long *ocqs)
+{
+ struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
+
+ if (!mcq_vs_res->base)
+ return -EINVAL;
+
+ *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
+
+ return 0;
+}
+
+static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
+{
+ struct device *dev = msi_desc_to_dev(desc);
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ ufshcd_mcq_config_esi(hba, msg);
+}
+
+static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *__hba)
+{
+ struct ufs_hba *hba = __hba;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ u32 id = irq - host->esi_base;
+ struct ufs_hw_queue *hwq = &hba->uhq[id];
+
+ ufshcd_mcq_write_cqis(hba, 0x1, id);
+ ufshcd_mcq_poll_cqe_nolock(hba, hwq);
+
+ return IRQ_HANDLED;
+}
+
+static int ufs_qcom_config_esi(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct msi_desc *desc;
+ struct msi_desc *failed_desc = NULL;
+ int nr_irqs, ret;
+
+ if (host->esi_enabled)
+ return 0;
+ else if (host->esi_base < 0)
+ return -EINVAL;
+
+ /*
+ * 1. We only handle CQs as of now.
+ * 2. Poll queues do not need ESI.
+ */
+ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+ ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
+ ufs_qcom_write_msi_msg);
+ if (ret)
+ goto out;
+
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ if (!desc->msi_index)
+ host->esi_base = desc->irq;
+
+ ret = devm_request_irq(hba->dev, desc->irq,
+ ufs_qcom_mcq_esi_handler,
+ IRQF_SHARED, "qcom-mcq-esi", hba);
+ if (ret) {
+ dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
+ __func__, desc->irq, ret);
+ failed_desc = desc;
+ break;
+ }
+ }
+
+ if (ret) {
+ /* Rewind */
+ msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
+ if (desc == failed_desc)
+ break;
+ devm_free_irq(hba->dev, desc->irq, hba);
+ }
+ platform_msi_domain_free_irqs(hba->dev);
+ } else {
+ if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0) {
+ ufshcd_writel(hba,
+ ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000,
+ REG_UFS_CFG3);
+ }
+ ufshcd_mcq_enable_esi(hba);
+ }
+
+out:
+ if (ret) {
+ host->esi_base = -1;
+ dev_warn(hba->dev, "Failed to request Platform MSI %d\n", ret);
+ } else {
+ host->esi_enabled = true;
+ }
+
+ return ret;
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1447,6 +1652,12 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.device_reset = ufs_qcom_device_reset,
.config_scaling_param = ufs_qcom_config_scaling_param,
.program_key = ufs_qcom_ice_program_key,
+ .reinit_notify = ufs_qcom_reinit_notify,
+ .mcq_config_resource = ufs_qcom_mcq_config_resource,
+ .get_hba_mac = ufs_qcom_get_hba_mac,
+ .op_runtime_config = ufs_qcom_op_runtime_config,
+ .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
+ .config_esi = ufs_qcom_config_esi,
};
/**
@@ -1463,9 +1674,9 @@ static int ufs_qcom_probe(struct platform_device *pdev)
/* Perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+ return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n");
- return err;
+ return 0;
}
/**
@@ -1480,6 +1691,7 @@ static int ufs_qcom_remove(struct platform_device *pdev)
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
+ platform_msi_domain_free_irqs(hba->dev);
return 0;
}
@@ -1498,10 +1710,16 @@ MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
#endif
static const struct dev_pm_ops ufs_qcom_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_system_suspend,
+ .resume = ufshcd_system_resume,
+ .freeze = ufshcd_system_freeze,
+ .restore = ufshcd_system_restore,
+ .thaw = ufshcd_system_thaw,
+#endif
};
static struct platform_driver ufs_qcom_pltform = {
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 44466a395bb5..39e774254fb2 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -16,13 +16,11 @@
#define HBRN8_POLL_TOUT_MS 100
#define DEFAULT_CLK_RATE_HZ 1000000
#define BUS_VECTOR_NAME_LEN 32
+#define MAX_SUPP_MAC 64
-#define UFS_HW_VER_MAJOR_SHFT (28)
-#define UFS_HW_VER_MAJOR_MASK (0x000F << UFS_HW_VER_MAJOR_SHFT)
-#define UFS_HW_VER_MINOR_SHFT (16)
-#define UFS_HW_VER_MINOR_MASK (0x0FFF << UFS_HW_VER_MINOR_SHFT)
-#define UFS_HW_VER_STEP_SHFT (0)
-#define UFS_HW_VER_STEP_MASK (0xFFFF << UFS_HW_VER_STEP_SHFT)
+#define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28)
+#define UFS_HW_VER_MINOR_MASK GENMASK(27, 16)
+#define UFS_HW_VER_STEP_MASK GENMASK(15, 0)
/* vendor specific pre-defined parameters */
#define SLOW 1
@@ -36,8 +34,10 @@ enum {
REG_UFS_TX_SYMBOL_CLK_NS_US = 0xC4,
REG_UFS_LOCAL_PORT_ID_REG = 0xC8,
REG_UFS_PA_ERR_CODE = 0xCC,
- REG_UFS_RETRY_TIMER_REG = 0xD0,
- REG_UFS_PA_LINK_STARTUP_TIMER = 0xD8,
+ /* On older UFS revisions, this register is called "RETRY_TIMER_REG" */
+ REG_UFS_PARAM0 = 0xD0,
+ /* On older UFS revisions, this register is called "REG_UFS_PA_LINK_STARTUP_TIMER" */
+ REG_UFS_CFG0 = 0xD8,
REG_UFS_CFG1 = 0xDC,
REG_UFS_CFG2 = 0xE0,
REG_UFS_HW_VERSION = 0xE4,
@@ -53,6 +53,8 @@ enum {
* added in HW Version 3.0.0
*/
UFS_AH8_CFG = 0xFC,
+
+ REG_UFS_CFG3 = 0x271C,
};
/* QCOM UFS host controller vendor specific debug registers */
@@ -72,28 +74,43 @@ enum {
UFS_UFS_DBG_RD_EDTL_RAM = 0x1900,
};
+enum {
+ UFS_MEM_CQIS_VS = 0x8,
+};
+
#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)
+/* bit definitions for REG_UFS_CFG0 register */
+#define QUNIPRO_G4_SEL BIT(5)
+
/* bit definitions for REG_UFS_CFG1 register */
-#define QUNIPRO_SEL 0x1
-#define UTP_DBG_RAMS_EN 0x20000
+#define QUNIPRO_SEL BIT(0)
+#define UFS_PHY_SOFT_RESET BIT(1)
+#define UTP_DBG_RAMS_EN BIT(17)
#define TEST_BUS_EN BIT(18)
#define TEST_BUS_SEL GENMASK(22, 19)
#define UFS_REG_TEST_BUS_EN BIT(30)
+#define UFS_PHY_RESET_ENABLE 1
+#define UFS_PHY_RESET_DISABLE 0
+
/* bit definitions for REG_UFS_CFG2 register */
-#define UAWM_HW_CGC_EN (1 << 0)
-#define UARM_HW_CGC_EN (1 << 1)
-#define TXUC_HW_CGC_EN (1 << 2)
-#define RXUC_HW_CGC_EN (1 << 3)
-#define DFC_HW_CGC_EN (1 << 4)
-#define TRLUT_HW_CGC_EN (1 << 5)
-#define TMRLUT_HW_CGC_EN (1 << 6)
-#define OCSC_HW_CGC_EN (1 << 7)
+#define UAWM_HW_CGC_EN BIT(0)
+#define UARM_HW_CGC_EN BIT(1)
+#define TXUC_HW_CGC_EN BIT(2)
+#define RXUC_HW_CGC_EN BIT(3)
+#define DFC_HW_CGC_EN BIT(4)
+#define TRLUT_HW_CGC_EN BIT(5)
+#define TMRLUT_HW_CGC_EN BIT(6)
+#define OCSC_HW_CGC_EN BIT(7)
+
+/* bit definitions for REG_UFS_PARAM0 */
+#define MAX_HS_GEAR_MASK GENMASK(6, 4)
+#define UFS_QCOM_MAX_GEAR(x) FIELD_GET(MAX_HS_GEAR_MASK, (x))
/* bit definition for UFS_UFS_TEST_BUS_CTRL_n */
-#define TEST_BUS_SUB_SEL_MASK 0x1F /* All XXX_SEL fields are 5 bits wide */
+#define TEST_BUS_SUB_SEL_MASK GENMASK(4, 0) /* All XXX_SEL fields are 5 bits wide */
#define REG_UFS_CFG2_CGC_EN_ALL (UAWM_HW_CGC_EN | UARM_HW_CGC_EN |\
TXUC_HW_CGC_EN | RXUC_HW_CGC_EN |\
@@ -101,26 +118,11 @@ enum {
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
/* bit offset */
-enum {
- OFFSET_UFS_PHY_SOFT_RESET = 1,
- OFFSET_CLK_NS_REG = 10,
-};
+#define OFFSET_CLK_NS_REG 0xa
/* bit masks */
-enum {
- MASK_UFS_PHY_SOFT_RESET = 0x2,
- MASK_TX_SYMBOL_CLK_1US_REG = 0x3FF,
- MASK_CLK_NS_REG = 0xFFFC00,
-};
-
-/* QCOM UFS debug print bit mask */
-#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0)
-#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1)
-#define UFS_QCOM_DBG_PRINT_TEST_BUS_EN BIT(2)
-
-#define UFS_QCOM_DBG_PRINT_ALL \
- (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_ICE_REGS_EN | \
- UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
+#define MASK_TX_SYMBOL_CLK_1US_REG GENMASK(9, 0)
+#define MASK_CLK_NS_REG GENMASK(23, 10)
/* QUniPro Vendor specific attributes */
#define PA_VS_CONFIG_REG1 0x9000
@@ -135,15 +137,15 @@ ufs_qcom_get_controller_revision(struct ufs_hba *hba,
{
u32 ver = ufshcd_readl(hba, REG_UFS_HW_VERSION);
- *major = (ver & UFS_HW_VER_MAJOR_MASK) >> UFS_HW_VER_MAJOR_SHFT;
- *minor = (ver & UFS_HW_VER_MINOR_MASK) >> UFS_HW_VER_MINOR_SHFT;
- *step = (ver & UFS_HW_VER_STEP_MASK) >> UFS_HW_VER_STEP_SHFT;
+ *major = FIELD_GET(UFS_HW_VER_MAJOR_MASK, ver);
+ *minor = FIELD_GET(UFS_HW_VER_MINOR_MASK, ver);
+ *step = FIELD_GET(UFS_HW_VER_STEP_MASK, ver);
};
static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 1 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_ENABLE),
+ REG_UFS_CFG1);
/*
* Make sure assertion of ufs phy reset is written to
@@ -154,8 +156,8 @@ static inline void ufs_qcom_assert_reset(struct ufs_hba *hba)
static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
{
- ufshcd_rmwl(hba, MASK_UFS_PHY_SOFT_RESET,
- 0 << OFFSET_UFS_PHY_SOFT_RESET, REG_UFS_CFG1);
+ ufshcd_rmwl(hba, UFS_PHY_SOFT_RESET, FIELD_PREP(UFS_PHY_SOFT_RESET, UFS_PHY_RESET_DISABLE),
+ REG_UFS_CFG1);
/*
* Make sure de-assertion of ufs phy reset is written to
@@ -212,8 +214,6 @@ struct ufs_qcom_host {
u32 dev_ref_clk_en_mask;
- /* Bitmask for enabling debug prints */
- u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
/* Reset control of HCI */
@@ -221,6 +221,11 @@ struct ufs_qcom_host {
struct reset_controller_dev rcdev;
struct gpio_desc *device_reset;
+
+ u32 hs_gear;
+
+ int esi_base;
+ bool esi_enabled;
};
static inline u32
diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c
new file mode 100644
index 000000000000..051f3f40d92c
--- /dev/null
+++ b/drivers/ufs/host/ufs-sprd.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * UNISOC UFS Host Controller driver
+ *
+ * Copyright (C) 2022 Unisoc, Inc.
+ * Author: Zhe Wang <zhe.wang1@unisoc.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/regulator/consumer.h>
+
+#include <ufs/ufshcd.h>
+#include "ufshcd-pltfrm.h"
+#include "ufs-sprd.h"
+
+static const struct of_device_id ufs_sprd_of_match[];
+
+static struct ufs_sprd_priv *ufs_sprd_get_priv_data(struct ufs_hba *hba)
+{
+ struct ufs_sprd_host *host = ufshcd_get_variant(hba);
+
+ WARN_ON(!host->priv);
+ return host->priv;
+}
+
+static void ufs_sprd_regmap_update(struct ufs_sprd_priv *priv, unsigned int index,
+ unsigned int reg, unsigned int bits, unsigned int val)
+{
+ regmap_update_bits(priv->sysci[index].regmap, reg, bits, val);
+}
+
+static void ufs_sprd_regmap_read(struct ufs_sprd_priv *priv, unsigned int index,
+ unsigned int reg, unsigned int *val)
+{
+ regmap_read(priv->sysci[index].regmap, reg, val);
+}
+
+static void ufs_sprd_get_unipro_ver(struct ufs_hba *hba)
+{
+ struct ufs_sprd_host *host = ufshcd_get_variant(hba);
+
+ if (ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &host->unipro_ver))
+ host->unipro_ver = 0;
+}
+
+static void ufs_sprd_ctrl_uic_compl(struct ufs_hba *hba, bool enable)
+{
+ u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+
+ if (enable == true)
+ set |= UIC_COMMAND_COMPL;
+ else
+ set &= ~UIC_COMMAND_COMPL;
+ ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
+}
+
+static int ufs_sprd_get_reset_ctrl(struct device *dev, struct ufs_sprd_rst *rci)
+{
+ rci->rc = devm_reset_control_get(dev, rci->name);
+ if (IS_ERR(rci->rc)) {
+ dev_err(dev, "failed to get reset ctrl:%s\n", rci->name);
+ return PTR_ERR(rci->rc);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_get_syscon_reg(struct device *dev, struct ufs_sprd_syscon *sysci)
+{
+ sysci->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, sysci->name);
+ if (IS_ERR(sysci->regmap)) {
+ dev_err(dev, "failed to get ufs syscon:%s\n", sysci->name);
+ return PTR_ERR(sysci->regmap);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_get_vreg(struct device *dev, struct ufs_sprd_vreg *vregi)
+{
+ vregi->vreg = devm_regulator_get(dev, vregi->name);
+ if (IS_ERR(vregi->vreg)) {
+ dev_err(dev, "failed to get vreg:%s\n", vregi->name);
+ return PTR_ERR(vregi->vreg);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_parse_dt(struct device *dev, struct ufs_hba *hba, struct ufs_sprd_host *host)
+{
+ u32 i;
+ struct ufs_sprd_priv *priv = host->priv;
+ int ret = 0;
+
+ /* Parse UFS reset ctrl info */
+ for (i = 0; i < SPRD_UFS_RST_MAX; i++) {
+ if (!priv->rci[i].name)
+ continue;
+ ret = ufs_sprd_get_reset_ctrl(dev, &priv->rci[i]);
+ if (ret)
+ goto out;
+ }
+
+ /* Parse UFS syscon reg info */
+ for (i = 0; i < SPRD_UFS_SYSCON_MAX; i++) {
+ if (!priv->sysci[i].name)
+ continue;
+ ret = ufs_sprd_get_syscon_reg(dev, &priv->sysci[i]);
+ if (ret)
+ goto out;
+ }
+
+ /* Parse UFS vreg info */
+ for (i = 0; i < SPRD_UFS_VREG_MAX; i++) {
+ if (!priv->vregi[i].name)
+ continue;
+ ret = ufs_sprd_get_vreg(dev, &priv->vregi[i]);
+ if (ret)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ufs_sprd_common_init(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct ufs_sprd_host *host;
+ struct platform_device __maybe_unused *pdev = to_platform_device(dev);
+ const struct of_device_id *of_id;
+ int ret = 0;
+
+ host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+
+ of_id = of_match_node(ufs_sprd_of_match, pdev->dev.of_node);
+ if (of_id->data != NULL)
+ host->priv = container_of(of_id->data, struct ufs_sprd_priv,
+ ufs_hba_sprd_vops);
+
+ host->hba = hba;
+ ufshcd_set_variant(hba, host);
+
+ hba->caps |= UFSHCD_CAP_CLK_GATING |
+ UFSHCD_CAP_CRYPTO |
+ UFSHCD_CAP_WB_EN;
+ hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
+
+ ret = ufs_sprd_parse_dt(dev, hba, host);
+
+ return ret;
+}
+
+static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_sprd_host *host = ufshcd_get_variant(hba);
+
+ if (status == PRE_CHANGE) {
+ memcpy(dev_req_params, dev_max_params,
+ sizeof(struct ufs_pa_layer_attr));
+ if (host->unipro_ver >= UFS_UNIPRO_VER_1_8)
+ ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
+ PA_INITIAL_ADAPT);
+ }
+
+ return 0;
+}
+
+static int ufs_sprd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
+ enum ufs_notify_change_status status)
+{
+ unsigned long flags;
+
+ if (status == PRE_CHANGE) {
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+ }
+
+ return 0;
+}
+
+static void ufs_sprd_n6_host_reset(struct ufs_hba *hba)
+{
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ dev_info(hba->dev, "ufs host reset!\n");
+
+ reset_control_assert(priv->rci[SPRD_UFSHCI_SOFT_RST].rc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rci[SPRD_UFSHCI_SOFT_RST].rc);
+}
+
+static int ufs_sprd_n6_device_reset(struct ufs_hba *hba)
+{
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ dev_info(hba->dev, "ufs device reset!\n");
+
+ reset_control_assert(priv->rci[SPRD_UFS_DEV_RST].rc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rci[SPRD_UFS_DEV_RST].rc);
+
+ return 0;
+}
+
+static void ufs_sprd_n6_key_acc_enable(struct ufs_hba *hba)
+{
+ u32 val;
+ u32 retry = 10;
+ struct arm_smccc_res res;
+
+check_hce:
+ /* Key access only can be enabled under HCE enable */
+ val = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
+ if (!(val & CONTROLLER_ENABLE)) {
+ ufs_sprd_n6_host_reset(hba);
+ val |= CONTROLLER_ENABLE;
+ ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
+ usleep_range(1000, 1100);
+ if (retry) {
+ retry--;
+ goto check_hce;
+ }
+ goto disable_crypto;
+ }
+
+ arm_smccc_smc(SPRD_SIP_SVC_STORAGE_UFS_CRYPTO_ENABLE,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if (!res.a0)
+ return;
+
+disable_crypto:
+ dev_err(hba->dev, "key reg access enable fail, disable crypto\n");
+ hba->caps &= ~UFSHCD_CAP_CRYPTO;
+}
+
+static int ufs_sprd_n6_init(struct ufs_hba *hba)
+{
+ struct ufs_sprd_priv *priv;
+ int ret = 0;
+
+ ret = ufs_sprd_common_init(hba);
+ if (ret != 0)
+ return ret;
+
+ priv = ufs_sprd_get_priv_data(hba);
+
+ ret = regulator_enable(priv->vregi[SPRD_UFS_VDD_MPHY].vreg);
+ if (ret)
+ return -ENODEV;
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ ufs_sprd_n6_key_acc_enable(hba);
+
+ return 0;
+}
+
+static int ufs_sprd_n6_phy_init(struct ufs_hba *hba)
+{
+ int ret = 0;
+ uint32_t val = 0;
+ uint32_t retry = 10;
+ uint32_t offset;
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBREFCLKCTRL2), 0x90);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCRCTRL), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RXSQCONTROL,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RXSQCONTROL,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(1)), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), 0x01);
+
+ do {
+ /* phy_sram_init_done */
+ ufs_sprd_regmap_read(priv, SPRD_UFS_ANLG, 0xc, &val);
+ if ((val & 0x1) == 0x1) {
+ for (offset = 0x40; offset < 0x42; offset++) {
+ /* Lane afe calibration */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGADDRLSB), 0x1c);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGADDRMSB), offset);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGWRLSB), 0x04);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGWRMSB), 0x00);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGRDWRSEL), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ }
+
+ goto update_phy;
+ }
+ udelay(1000);
+ retry--;
+ } while (retry > 0);
+
+ ret = -ETIMEDOUT;
+ goto out;
+
+update_phy:
+ /* phy_sram_ext_ld_done */
+ ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x2, 0);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0x0);
+out:
+ return ret;
+}
+
+
+static int sprd_ufs_n6_hce_enable_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status)
+{
+ int err = 0;
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ if (status == PRE_CHANGE) {
+ /* phy_sram_ext_ld_done */
+ ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x2, 0x2);
+ /* phy_sram_bypass */
+ ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x4, 0x4);
+
+ ufs_sprd_n6_host_reset(hba);
+
+ if (hba->caps & UFSHCD_CAP_CRYPTO)
+ ufs_sprd_n6_key_acc_enable(hba);
+ }
+
+ if (status == POST_CHANGE) {
+ err = ufs_sprd_n6_phy_init(hba);
+ if (err) {
+ dev_err(hba->dev, "Phy setup failed (%d)\n", err);
+ goto out;
+ }
+
+ ufs_sprd_get_unipro_ver(hba);
+ }
+out:
+ return err;
+}
+
+static void sprd_ufs_n6_h8_notify(struct ufs_hba *hba,
+ enum uic_cmd_dme cmd,
+ enum ufs_notify_change_status status)
+{
+ struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
+
+ if (status == PRE_CHANGE) {
+ if (cmd == UIC_CMD_DME_HIBER_ENTER)
+ /*
+ * Disable UIC COMPL INTR to prevent access to UFSHCI after
+ * checking HCS.UPMCRS
+ */
+ ufs_sprd_ctrl_uic_compl(hba, false);
+
+ if (cmd == UIC_CMD_DME_HIBER_EXIT) {
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_UFSDEV_REG,
+ APB_UFSDEV_REFCLK_EN, APB_UFSDEV_REFCLK_EN);
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_USB31PLL_CTRL,
+ APB_USB31PLLV_REF2MPHY, APB_USB31PLLV_REF2MPHY);
+ }
+ }
+
+ if (status == POST_CHANGE) {
+ if (cmd == UIC_CMD_DME_HIBER_EXIT)
+ ufs_sprd_ctrl_uic_compl(hba, true);
+
+ if (cmd == UIC_CMD_DME_HIBER_ENTER) {
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_UFSDEV_REG,
+ APB_UFSDEV_REFCLK_EN, 0);
+ ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_USB31PLL_CTRL,
+ APB_USB31PLLV_REF2MPHY, 0);
+ }
+ }
+}
+
+static struct ufs_sprd_priv n6_ufs = {
+ .rci[SPRD_UFSHCI_SOFT_RST] = { .name = "controller", },
+ .rci[SPRD_UFS_DEV_RST] = { .name = "device", },
+
+ .sysci[SPRD_UFS_ANLG] = { .name = "sprd,ufs-anlg-syscon", },
+ .sysci[SPRD_UFS_AON_APB] = { .name = "sprd,aon-apb-syscon", },
+
+ .vregi[SPRD_UFS_VDD_MPHY] = { .name = "vdd-mphy", },
+
+ .ufs_hba_sprd_vops = {
+ .name = "sprd,ums9620-ufs",
+ .init = ufs_sprd_n6_init,
+ .hce_enable_notify = sprd_ufs_n6_hce_enable_notify,
+ .pwr_change_notify = sprd_ufs_pwr_change_notify,
+ .hibern8_notify = sprd_ufs_n6_h8_notify,
+ .device_reset = ufs_sprd_n6_device_reset,
+ .suspend = ufs_sprd_suspend,
+ },
+};
+
+static const struct of_device_id __maybe_unused ufs_sprd_of_match[] = {
+ { .compatible = "sprd,ums9620-ufs", .data = &n6_ufs.ufs_hba_sprd_vops},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ufs_sprd_of_match);
+
+static int ufs_sprd_probe(struct platform_device *pdev)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(ufs_sprd_of_match, dev->of_node);
+ err = ufshcd_pltfrm_init(pdev, of_id->data);
+ if (err)
+ dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
+
+ return err;
+}
+
+static int ufs_sprd_remove(struct platform_device *pdev)
+{
+ struct ufs_hba *hba = platform_get_drvdata(pdev);
+
+ pm_runtime_get_sync(&(pdev)->dev);
+ ufshcd_remove(hba);
+ return 0;
+}
+
+static const struct dev_pm_ops ufs_sprd_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
+ SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
+ .prepare = ufshcd_suspend_prepare,
+ .complete = ufshcd_resume_complete,
+};
+
+static struct platform_driver ufs_sprd_pltform = {
+ .probe = ufs_sprd_probe,
+ .remove = ufs_sprd_remove,
+ .shutdown = ufshcd_pltfrm_shutdown,
+ .driver = {
+ .name = "ufshcd-sprd",
+ .pm = &ufs_sprd_pm_ops,
+ .of_match_table = of_match_ptr(ufs_sprd_of_match),
+ },
+};
+module_platform_driver(ufs_sprd_pltform);
+
+MODULE_AUTHOR("Zhe Wang <zhe.wang1@unisoc.com>");
+MODULE_DESCRIPTION("Unisoc UFS Host Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ufs/host/ufs-sprd.h b/drivers/ufs/host/ufs-sprd.h
new file mode 100644
index 000000000000..26ad5c3af4c1
--- /dev/null
+++ b/drivers/ufs/host/ufs-sprd.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * UNISOC UFS Host Controller driver
+ *
+ * Copyright (C) 2022 Unisoc, Inc.
+ * Author: Zhe Wang <zhe.wang1@unisoc.com>
+ */
+
+#ifndef _UFS_SPRD_H_
+#define _UFS_SPRD_H_
+
+/* Vendor specific attributes */
+#define RXSQCONTROL 0x8009
+#define CBRATESEL 0x8114
+#define CBCREGADDRLSB 0x8116
+#define CBCREGADDRMSB 0x8117
+#define CBCREGWRLSB 0x8118
+#define CBCREGWRMSB 0x8119
+#define CBCREGRDWRSEL 0x811C
+#define CBCRCTRL 0x811F
+#define CBREFCLKCTRL2 0x8132
+#define VS_MPHYDISABLE 0xD0C1
+
+#define APB_UFSDEV_REG 0xCE8
+#define APB_UFSDEV_REFCLK_EN 0x2
+#define APB_USB31PLL_CTRL 0xCFC
+#define APB_USB31PLLV_REF2MPHY 0x1
+
+#define SPRD_SIP_SVC_STORAGE_UFS_CRYPTO_ENABLE \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_32, \
+ ARM_SMCCC_OWNER_SIP, \
+ 0x0301)
+
+enum SPRD_UFS_RST_INDEX {
+ SPRD_UFSHCI_SOFT_RST,
+ SPRD_UFS_DEV_RST,
+
+ SPRD_UFS_RST_MAX
+};
+
+enum SPRD_UFS_SYSCON_INDEX {
+ SPRD_UFS_ANLG,
+ SPRD_UFS_AON_APB,
+
+ SPRD_UFS_SYSCON_MAX
+};
+
+enum SPRD_UFS_VREG_INDEX {
+ SPRD_UFS_VDD_MPHY,
+
+ SPRD_UFS_VREG_MAX
+};
+
+struct ufs_sprd_rst {
+ const char *name;
+ struct reset_control *rc;
+};
+
+struct ufs_sprd_syscon {
+ const char *name;
+ struct regmap *regmap;
+};
+
+struct ufs_sprd_vreg {
+ const char *name;
+ struct regulator *vreg;
+};
+
+struct ufs_sprd_priv {
+ struct ufs_sprd_rst rci[SPRD_UFS_RST_MAX];
+ struct ufs_sprd_syscon sysci[SPRD_UFS_SYSCON_MAX];
+ struct ufs_sprd_vreg vregi[SPRD_UFS_VREG_MAX];
+ const struct ufs_hba_variant_ops ufs_hba_sprd_vops;
+};
+
+struct ufs_sprd_host {
+ struct ufs_hba *hba;
+ struct ufs_sprd_priv *priv;
+ void __iomem *ufs_dbg_mmio;
+
+ enum ufs_unipro_ver unipro_ver;
+};
+
+#endif /* _UFS_SPRD_H_ */
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 43afbb7c5ab9..62082d64ece0 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -713,7 +713,7 @@ static const struct vm_operations_struct uio_logical_vm_ops = {
static int uio_mmap_logical(struct vm_area_struct *vma)
{
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &uio_logical_vm_ops;
return 0;
}
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index c08a6cfd119f..20d9762331bd 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -355,20 +355,19 @@ fail_free_ring:
return ret;
}
-static int
+static void
hv_uio_remove(struct hv_device *dev)
{
struct hv_uio_private_data *pdata = hv_get_drvdata(dev);
if (!pdata)
- return 0;
+ return;
sysfs_remove_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
uio_unregister_device(&pdata->info);
hv_uio_cleanup(dev, pdata);
vmbus_free_ring(dev->channel);
- return 0;
}
static struct hv_driver hv_uio_drv = {
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index f9aa50ff14d4..fff9ec9c391f 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -378,7 +378,7 @@ int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
ret = cdnsp_queue_bulk_tx(pdev, preq);
break;
case USB_ENDPOINT_XFER_ISOC:
- ret = cdnsp_queue_isoc_tx_prepare(pdev, preq);
+ ret = cdnsp_queue_isoc_tx(pdev, preq);
}
if (ret)
diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
index f740fa6089d8..e1b5801fdddf 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.h
+++ b/drivers/usb/cdns3/cdnsp-gadget.h
@@ -1532,8 +1532,8 @@ void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev,
unsigned int ep_index);
int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq);
int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq);
-int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
- struct cdnsp_request *preq);
+int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq);
void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
dma_addr_t in_ctx_ptr);
void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index);
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
index b23e543b3a3d..07f6068342d4 100644
--- a/drivers/usb/cdns3/cdnsp-ring.c
+++ b/drivers/usb/cdns3/cdnsp-ring.c
@@ -1333,6 +1333,20 @@ static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
ep_ring->dequeue, td->last_trb,
ep_trb_dma);
+ desc = td->preq->pep->endpoint.desc;
+
+ if (ep_seg) {
+ ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
+ / sizeof(*ep_trb)];
+
+ trace_cdnsp_handle_transfer(ep_ring,
+ (struct cdnsp_generic_trb *)ep_trb);
+
+ if (pep->skip && usb_endpoint_xfer_isoc(desc) &&
+ td->last_trb != ep_trb)
+ return -EAGAIN;
+ }
+
/*
* Skip the Force Stopped Event. The event_trb(ep_trb_dma)
* of FSE is not in the current TD pointed by ep_ring->dequeue
@@ -1347,7 +1361,6 @@ static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
goto cleanup;
}
- desc = td->preq->pep->endpoint.desc;
if (!ep_seg) {
if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) {
/* Something is busted, give up! */
@@ -1374,12 +1387,6 @@ static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
goto cleanup;
}
- ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
- / sizeof(*ep_trb)];
-
- trace_cdnsp_handle_transfer(ep_ring,
- (struct cdnsp_generic_trb *)ep_trb);
-
if (cdnsp_trb_is_noop(ep_trb))
goto cleanup;
@@ -1726,11 +1733,6 @@ static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq)
return num_trbs;
}
-static unsigned int count_isoc_trbs_needed(struct cdnsp_request *preq)
-{
- return cdnsp_count_trbs(preq->request.dma, preq->request.length);
-}
-
static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
{
if (running_total != preq->request.length)
@@ -2192,28 +2194,48 @@ static unsigned int
}
/* Queue function isoc transfer */
-static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
- struct cdnsp_request *preq)
+int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
+ struct cdnsp_request *preq)
{
- int trb_buff_len, td_len, td_remain_len, ret;
+ unsigned int trb_buff_len, td_len, td_remain_len, block_len;
unsigned int burst_count, last_burst_pkt;
unsigned int total_pkt_count, max_pkt;
struct cdnsp_generic_trb *start_trb;
+ struct scatterlist *sg = NULL;
bool more_trbs_coming = true;
struct cdnsp_ring *ep_ring;
+ unsigned int num_sgs = 0;
int running_total = 0;
u32 field, length_field;
+ u64 addr, send_addr;
int start_cycle;
int trbs_per_td;
- u64 addr;
- int i;
+ int i, sent_len, ret;
ep_ring = preq->pep->ring;
+
+ td_len = preq->request.length;
+
+ if (preq->request.num_sgs) {
+ num_sgs = preq->request.num_sgs;
+ sg = preq->request.sg;
+ addr = (u64)sg_dma_address(sg);
+ block_len = sg_dma_len(sg);
+ trbs_per_td = count_sg_trbs_needed(preq);
+ } else {
+ addr = (u64)preq->request.dma;
+ block_len = td_len;
+ trbs_per_td = count_trbs_needed(preq);
+ }
+
+ ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
+ if (ret)
+ return ret;
+
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
- td_len = preq->request.length;
- addr = (u64)preq->request.dma;
td_remain_len = td_len;
+ send_addr = addr;
max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
@@ -2225,11 +2247,6 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count);
last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq,
total_pkt_count);
- trbs_per_td = count_isoc_trbs_needed(preq);
-
- ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
- if (ret)
- goto cleanup;
/*
* Set isoc specific data for the first TRB in a TD.
@@ -2248,6 +2265,7 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
/* Calculate TRB length. */
trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+ trb_buff_len = min(trb_buff_len, block_len);
if (trb_buff_len > td_remain_len)
trb_buff_len = td_remain_len;
@@ -2256,7 +2274,8 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
trb_buff_len, td_len, preq,
more_trbs_coming, 0);
- length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0);
+ length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
+ TRB_INTR_TARGET(0);
/* Only first TRB is isoc, overwrite otherwise. */
if (i) {
@@ -2281,12 +2300,27 @@ static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
}
cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
- lower_32_bits(addr), upper_32_bits(addr),
+ lower_32_bits(send_addr), upper_32_bits(send_addr),
length_field, field);
running_total += trb_buff_len;
addr += trb_buff_len;
td_remain_len -= trb_buff_len;
+
+ sent_len = trb_buff_len;
+ while (sg && sent_len >= block_len) {
+ /* New sg entry */
+ --num_sgs;
+ sent_len -= block_len;
+ if (num_sgs != 0) {
+ sg = sg_next(sg);
+ block_len = sg_dma_len(sg);
+ addr = (u64)sg_dma_address(sg);
+ addr += sent_len;
+ }
+ }
+ block_len -= sent_len;
+ send_addr = addr;
}
/* Check TD length */
@@ -2324,30 +2358,6 @@ cleanup:
return ret;
}
-int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
- struct cdnsp_request *preq)
-{
- struct cdnsp_ring *ep_ring;
- u32 ep_state;
- int num_trbs;
- int ret;
-
- ep_ring = preq->pep->ring;
- ep_state = GET_EP_CTX_STATE(preq->pep->out_ctx);
- num_trbs = count_isoc_trbs_needed(preq);
-
- /*
- * Check the ring to guarantee there is enough room for the whole
- * request. Do not insert any td of the USB Request to the ring if the
- * check failed.
- */
- ret = cdnsp_prepare_ring(pdev, ep_ring, ep_state, num_trbs, GFP_ATOMIC);
- if (ret)
- return ret;
-
- return cdnsp_queue_isoc_tx(pdev, preq);
-}
-
/**** Command Ring Operations ****/
/*
* Generic function for queuing a command TRB on the command ring.
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 0dc482542d85..2eeccf4ec9d6 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -413,15 +413,19 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
data->phy = devm_usb_get_phy_by_phandle(dev, "fsl,usbphy", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
- if (ret != -ENODEV)
+ if (ret != -ENODEV) {
+ dev_err_probe(dev, ret, "Failed to parse fsl,usbphy\n");
goto err_clk;
+ }
data->phy = devm_usb_get_phy_by_phandle(dev, "phys", 0);
if (IS_ERR(data->phy)) {
ret = PTR_ERR(data->phy);
- if (ret == -ENODEV)
+ if (ret == -ENODEV) {
data->phy = NULL;
- else
+ } else {
+ dev_err_probe(dev, ret, "Failed to parse phys\n");
goto err_clk;
+ }
}
}
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index faf6b078b6c4..bbc610e5bd69 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -364,5 +364,5 @@ void dbg_create_files(struct ci_hdrc *ci)
*/
void dbg_remove_files(struct ci_hdrc *ci)
{
- debugfs_remove(debugfs_lookup(dev_name(ci->dev), usb_debug_root));
+ debugfs_lookup_and_remove(dev_name(ci->dev), usb_debug_root);
}
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index acdb13316cd0..c57c1a71a513 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -1263,14 +1263,8 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
return 0;
}
-static int usbmisc_imx_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
static struct platform_driver usbmisc_imx_driver = {
.probe = usbmisc_imx_probe,
- .remove = usbmisc_imx_remove,
.driver = {
.name = "usbmisc_imx",
.of_match_table = usbmisc_imx_dt_ids,
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 36bf051b345b..11da5fb284d0 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -651,13 +651,13 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
return tty_port_open(&acm->port, tty, filp);
}
-static void acm_port_dtr_rts(struct tty_port *port, int raise)
+static void acm_port_dtr_rts(struct tty_port *port, bool active)
{
struct acm *acm = container_of(port, struct acm, port);
int val;
int res;
- if (raise)
+ if (active)
val = USB_CDC_CTRL_DTR | USB_CDC_CTRL_RTS;
else
val = 0;
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index d7c8461976ce..a98b2108376a 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -55,9 +55,9 @@ static int ulpi_match(struct device *dev, struct device_driver *driver)
return 0;
}
-static int ulpi_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int ulpi_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct ulpi *ulpi = to_ulpi_dev(dev);
+ const struct ulpi *ulpi = to_ulpi_dev(dev);
int ret;
ret = of_device_uevent_modalias(dev, env);
@@ -271,7 +271,7 @@ static int ulpi_regs_show(struct seq_file *seq, void *data)
}
DEFINE_SHOW_ATTRIBUTE(ulpi_regs);
-#define ULPI_ROOT debugfs_lookup(KBUILD_MODNAME, NULL)
+static struct dentry *ulpi_root;
static int ulpi_register(struct device *dev, struct ulpi *ulpi)
{
@@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
return ret;
}
- root = debugfs_create_dir(dev_name(dev), ULPI_ROOT);
+ root = debugfs_create_dir(dev_name(dev), ulpi_root);
debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops);
dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
@@ -349,8 +349,7 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface);
*/
void ulpi_unregister_interface(struct ulpi *ulpi)
{
- debugfs_remove_recursive(debugfs_lookup(dev_name(&ulpi->dev),
- ULPI_ROOT));
+ debugfs_lookup_and_remove(dev_name(&ulpi->dev), ulpi_root);
device_unregister(&ulpi->dev);
}
EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
@@ -360,12 +359,11 @@ EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
static int __init ulpi_init(void)
{
int ret;
- struct dentry *root;
- root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ ulpi_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
ret = bus_register(&ulpi_bus);
if (ret)
- debugfs_remove(root);
+ debugfs_remove(ulpi_root);
return ret;
}
subsys_initcall(ulpi_init);
@@ -373,7 +371,7 @@ subsys_initcall(ulpi_init);
static void __exit ulpi_exit(void)
{
bus_unregister(&ulpi_bus);
- debugfs_remove_recursive(ULPI_ROOT);
+ debugfs_remove(ulpi_root);
}
module_exit(ulpi_exit);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 837f3e57f580..e501a03d6c70 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -279,8 +279,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
}
}
- vma->vm_flags |= VM_IO;
- vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
+ vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &usbdev_vm_ops;
vma->vm_private_data = usbm;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 7e7e119c253f..a0e076c6f3a4 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -899,14 +899,14 @@ static int usb_device_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int usb_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct usb_device *usb_dev;
+ const struct usb_device *usb_dev;
if (is_usb_device(dev)) {
usb_dev = to_usb_device(dev);
} else if (is_usb_interface(dev)) {
- struct usb_interface *intf = to_usb_interface(dev);
+ const struct usb_interface *intf = to_usb_interface(dev);
usb_dev = interface_to_usbdev(intf);
} else {
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 9eca403af2a8..97a0f8faea6e 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2389,9 +2389,8 @@ static int usb_enumerate_device_otg(struct usb_device *udev)
* usb_enumerate_device - Read device configs/intfs/otg (usbcore-internal)
* @udev: newly addressed device (in ADDRESS state)
*
- * This is only called by usb_new_device() and usb_authorize_device()
- * and FIXME -- all comments that apply to them apply here wrt to
- * environment.
+ * This is only called by usb_new_device() -- all comments that apply there
+ * apply here wrt to environment.
*
* If the device is WUSB and not authorized, we don't attempt to read
* the string descriptors, as they will be errored out by the device
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 127fac1af676..cc404bb7e8f7 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1819,11 +1819,11 @@ void usb_authorize_interface(struct usb_interface *intf)
}
}
-static int usb_if_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int usb_if_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct usb_device *usb_dev;
- struct usb_interface *intf;
- struct usb_host_interface *alt;
+ const struct usb_device *usb_dev;
+ const struct usb_interface *intf;
+ const struct usb_host_interface *alt;
intf = to_usb_interface(dev);
usb_dev = interface_to_usbdev(intf);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 079e183cf3bf..934b3d997702 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -526,6 +526,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* DJI CineSSD */
{ USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
+ /* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */
+ { USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM },
+
/* DELL USB GEN2 */
{ USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 8217032dfb85..b63f78e48c74 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -869,11 +869,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
size_t srclen, n;
int cfgno;
void *src;
- int retval;
- retval = usb_lock_device_interruptible(udev);
- if (retval < 0)
- return -EINTR;
/* The binary attribute begins with the device descriptor.
* Following that are the raw descriptor entries for all the
* configurations (config plus subsidiary descriptors).
@@ -898,7 +894,6 @@ read_descriptors(struct file *filp, struct kobject *kobj,
off -= srclen;
}
}
- usb_unlock_device(udev);
return count - nleft;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 11b15d7b357a..34742fbbd84d 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -423,9 +423,9 @@ static void usb_release_dev(struct device *dev)
kfree(udev);
}
-static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int usb_dev_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct usb_device *usb_dev;
+ const struct usb_device *usb_dev;
usb_dev = to_usb_device(dev);
@@ -505,10 +505,10 @@ static const struct dev_pm_ops usb_device_pm_ops = {
#endif /* CONFIG_PM */
-static char *usb_devnode(struct device *dev,
+static char *usb_devnode(const struct device *dev,
umode_t *mode, kuid_t *uid, kgid_t *gid)
{
- struct usb_device *usb_dev;
+ const struct usb_device *usb_dev;
usb_dev = to_usb_device(dev);
return kasprintf(GFP_KERNEL, "bus/usb/%03d/%03d",
@@ -998,7 +998,7 @@ static void usb_debugfs_init(void)
static void usb_debugfs_cleanup(void)
{
- debugfs_remove(debugfs_lookup("devices", usb_debug_root));
+ debugfs_lookup_and_remove("devices", usb_debug_root);
}
/*
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 8f9959ba9fd4..582ebd9cf9c2 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -1117,6 +1117,7 @@ struct dwc3_scratchpad_array {
* address.
* @num_ep_resized: carries the current number endpoints which have had its tx
* fifo resized.
+ * @debug_root: root debugfs directory for this device to put its files in.
*/
struct dwc3 {
struct work_struct drd_work;
@@ -1332,6 +1333,7 @@ struct dwc3 {
int max_cfg_eps;
int last_fifo_depth;
int num_ep_resized;
+ struct dentry *debug_root;
};
#define INCRX_BURST_MODE 0
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 48b44b88dc25..8bb2c9e3b9ac 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -414,11 +414,14 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
#ifdef CONFIG_DEBUG_FS
extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
+extern void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep);
extern void dwc3_debugfs_init(struct dwc3 *d);
extern void dwc3_debugfs_exit(struct dwc3 *d);
#else
static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
{ }
+static inline void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep)
+{ }
static inline void dwc3_debugfs_init(struct dwc3 *d)
{ }
static inline void dwc3_debugfs_exit(struct dwc3 *d)
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index f2b7675c7f62..850df0e6bcab 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -873,27 +873,23 @@ static const struct dwc3_ep_file_map dwc3_ep_file_map[] = {
{ "GDBGEPINFO", &dwc3_ep_info_register_fops, },
};
-static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
- struct dentry *parent)
+void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
{
+ struct dentry *dir;
int i;
+ dir = debugfs_create_dir(dep->name, dep->dwc->debug_root);
for (i = 0; i < ARRAY_SIZE(dwc3_ep_file_map); i++) {
const struct file_operations *fops = dwc3_ep_file_map[i].fops;
const char *name = dwc3_ep_file_map[i].name;
- debugfs_create_file(name, 0444, parent, dep, fops);
+ debugfs_create_file(name, 0444, dir, dep, fops);
}
}
-void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
+void dwc3_debugfs_remove_endpoint_dir(struct dwc3_ep *dep)
{
- struct dentry *dir;
- struct dentry *root;
-
- root = debugfs_lookup(dev_name(dep->dwc->dev), usb_debug_root);
- dir = debugfs_create_dir(dep->name, root);
- dwc3_debugfs_create_endpoint_files(dep, dir);
+ debugfs_lookup_and_remove(dep->name, dep->dwc->debug_root);
}
void dwc3_debugfs_init(struct dwc3 *dwc)
@@ -911,6 +907,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
+ dwc->debug_root = root;
debugfs_create_regset32("regdump", 0444, root, dwc->regset);
debugfs_create_file("lsp_dump", 0644, root, dwc, &dwc3_lsp_fops);
@@ -929,6 +926,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
void dwc3_debugfs_exit(struct dwc3 *dwc)
{
- debugfs_remove(debugfs_lookup(dev_name(dwc->dev), usb_debug_root));
+ debugfs_lookup_and_remove(dev_name(dwc->dev), usb_debug_root);
kfree(dwc->regset);
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 89c9ab2b19f8..a23ddbb81979 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -47,6 +47,7 @@
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
#define PCI_DEVICE_ID_INTEL_RPL 0xa70e
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
+#define PCI_DEVICE_ID_INTEL_MTLM 0x7eb1
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
@@ -467,6 +468,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLM),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTLP),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index b0a0351d2d8b..959fc925ca7c 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -901,7 +901,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev);
/* enable vbus override for device mode */
- if (qcom->mode == USB_DR_MODE_PERIPHERAL)
+ if (qcom->mode != USB_DR_MODE_HOST)
dwc3_qcom_vbus_override_enable(qcom, true);
/* register extcon to override sw_vbus on Vbus change later */
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 0745e9f11b2e..2c36f97652ca 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -14,7 +14,6 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 89dcfac01235..3c63fa97a680 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3194,9 +3194,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
list_del(&dep->endpoint.ep_list);
}
- debugfs_remove_recursive(debugfs_lookup(dep->name,
- debugfs_lookup(dev_name(dep->dwc->dev),
- usb_debug_root)));
+ dwc3_debugfs_remove_endpoint_dir(dep);
kfree(dep);
}
}
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index 797047154820..341408410ed9 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -499,8 +499,7 @@ static int xdbc_bulk_transfer(void *data, int size, bool read)
addr = xdbc.in_dma;
xdbc.flags |= XDBC_FLAGS_IN_PROCESS;
} else {
- memset(xdbc.out_buf, 0, XDBC_MAX_PACKET);
- memcpy(xdbc.out_buf, data, size);
+ memcpy_and_pad(xdbc.out_buf, XDBC_MAX_PACKET, data, size, 0);
addr = xdbc.out_dma;
xdbc.flags |= XDBC_FLAGS_OUT_PROCESS;
}
@@ -874,13 +873,14 @@ retry:
static void early_xdbc_write(struct console *con, const char *str, u32 n)
{
- static char buf[XDBC_MAX_PACKET];
+ /* static variables are zeroed, so buf is always NULL terminated */
+ static char buf[XDBC_MAX_PACKET + 1];
int chunk, ret;
int use_cr = 0;
if (!xdbc.xdbc_reg)
return;
- memset(buf, 0, XDBC_MAX_PACKET);
+
while (n > 0) {
for (chunk = 0; chunk < XDBC_MAX_PACKET && n > 0; str++, chunk++, n--) {
diff --git a/drivers/usb/fotg210/Kconfig b/drivers/usb/fotg210/Kconfig
index 2b05968735ba..87a16258274e 100644
--- a/drivers/usb/fotg210/Kconfig
+++ b/drivers/usb/fotg210/Kconfig
@@ -29,7 +29,7 @@ config USB_FOTG210_UDC
bool "Faraday FOTG210 USB Peripheral Controller support"
help
Faraday USB2.0 OTG controller which can be configured as
- high speed or full speed USB device. This driver suppports
+ high speed or full speed USB device. This driver supports
Bulk Transfer so far.
Say "y" to link the driver statically, or "m" to build a
diff --git a/drivers/usb/fotg210/fotg210-core.c b/drivers/usb/fotg210/fotg210-core.c
index ee740a6da463..cb75464ab290 100644
--- a/drivers/usb/fotg210/fotg210-core.c
+++ b/drivers/usb/fotg210/fotg210-core.c
@@ -6,6 +6,7 @@
* driver.
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
@@ -17,6 +18,11 @@
#include "fotg210.h"
+/* Role Register 0x80 */
+#define FOTG210_RR 0x80
+#define FOTG210_RR_ID BIT(21) /* 1 = B-device, 0 = A-device */
+#define FOTG210_RR_CROLE BIT(20) /* 1 = device, 0 = host */
+
/*
* Gemini-specific initialization function, only executed on the
* Gemini SoC using the global misc control register.
@@ -33,9 +39,10 @@
#define GEMINI_MISC_USB0_MINI_B BIT(29)
#define GEMINI_MISC_USB1_MINI_B BIT(30)
-static int fotg210_gemini_init(struct device *dev, struct resource *res,
+static int fotg210_gemini_init(struct fotg210 *fotg, struct resource *res,
enum usb_dr_mode mode)
{
+ struct device *dev = fotg->dev;
struct device_node *np = dev->of_node;
struct regmap *map;
bool wakeup;
@@ -43,10 +50,9 @@ static int fotg210_gemini_init(struct device *dev, struct resource *res,
int ret;
map = syscon_regmap_lookup_by_phandle(np, "syscon");
- if (IS_ERR(map)) {
- dev_err(dev, "no syscon\n");
- return PTR_ERR(map);
- }
+ if (IS_ERR(map))
+ return dev_err_probe(dev, PTR_ERR(map), "no syscon\n");
+ fotg->map = map;
wakeup = of_property_read_bool(np, "wakeup-source");
/*
@@ -55,6 +61,7 @@ static int fotg210_gemini_init(struct device *dev, struct resource *res,
*/
mask = 0;
if (res->start == 0x69000000) {
+ fotg->port = GEMINI_PORT_1;
mask = GEMINI_MISC_USB1_VBUS_ON | GEMINI_MISC_USB1_MINI_B |
GEMINI_MISC_USB1_WAKEUP;
if (mode == USB_DR_MODE_HOST)
@@ -64,6 +71,7 @@ static int fotg210_gemini_init(struct device *dev, struct resource *res,
if (wakeup)
val |= GEMINI_MISC_USB1_WAKEUP;
} else {
+ fotg->port = GEMINI_PORT_0;
mask = GEMINI_MISC_USB0_VBUS_ON | GEMINI_MISC_USB0_MINI_B |
GEMINI_MISC_USB0_WAKEUP;
if (mode == USB_DR_MODE_HOST)
@@ -85,27 +93,74 @@ static int fotg210_gemini_init(struct device *dev, struct resource *res,
return 0;
}
+/**
+ * fotg210_vbus() - Called by gadget driver to enable/disable VBUS
+ * @enable: true to enable VBUS, false to disable VBUS
+ */
+void fotg210_vbus(struct fotg210 *fotg, bool enable)
+{
+ u32 mask;
+ u32 val;
+ int ret;
+
+ switch (fotg->port) {
+ case GEMINI_PORT_0:
+ mask = GEMINI_MISC_USB0_VBUS_ON;
+ val = enable ? GEMINI_MISC_USB0_VBUS_ON : 0;
+ break;
+ case GEMINI_PORT_1:
+ mask = GEMINI_MISC_USB1_VBUS_ON;
+ val = enable ? GEMINI_MISC_USB1_VBUS_ON : 0;
+ break;
+ default:
+ return;
+ }
+ ret = regmap_update_bits(fotg->map, GEMINI_GLOBAL_MISC_CTRL, mask, val);
+ if (ret)
+ dev_err(fotg->dev, "failed to %s VBUS\n",
+ enable ? "enable" : "disable");
+ dev_info(fotg->dev, "%s: %s VBUS\n", __func__, enable ? "enable" : "disable");
+}
+
static int fotg210_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
enum usb_dr_mode mode;
+ struct fotg210 *fotg;
+ u32 val;
int ret;
+ fotg = devm_kzalloc(dev, sizeof(*fotg), GFP_KERNEL);
+ if (!fotg)
+ return -ENOMEM;
+ fotg->dev = dev;
+
+ fotg->base = devm_platform_get_and_ioremap_resource(pdev, 0, &fotg->res);
+ if (IS_ERR(fotg->base))
+ return PTR_ERR(fotg->base);
+
+ fotg->pclk = devm_clk_get_optional_enabled(dev, "PCLK");
+ if (IS_ERR(fotg->pclk))
+ return PTR_ERR(fotg->pclk);
+
mode = usb_get_dr_mode(dev);
if (of_device_is_compatible(dev->of_node, "cortina,gemini-usb")) {
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ret = fotg210_gemini_init(dev, res, mode);
+ ret = fotg210_gemini_init(fotg, fotg->res, mode);
if (ret)
return ret;
}
- if (mode == USB_DR_MODE_PERIPHERAL)
- ret = fotg210_udc_probe(pdev);
- else
- ret = fotg210_hcd_probe(pdev);
+ val = readl(fotg->base + FOTG210_RR);
+ if (mode == USB_DR_MODE_PERIPHERAL) {
+ if (!(val & FOTG210_RR_CROLE))
+ dev_err(dev, "block not in device role\n");
+ ret = fotg210_udc_probe(pdev, fotg);
+ } else {
+ if (val & FOTG210_RR_CROLE)
+ dev_err(dev, "block not in host role\n");
+ ret = fotg210_hcd_probe(pdev, fotg);
+ }
return ret;
}
@@ -127,7 +182,9 @@ static int fotg210_remove(struct platform_device *pdev)
#ifdef CONFIG_OF
static const struct of_device_id fotg210_of_match[] = {
+ { .compatible = "faraday,fotg200" },
{ .compatible = "faraday,fotg210" },
+ /* TODO: can we also handle FUSB220? */
{},
};
MODULE_DEVICE_TABLE(of, fotg210_of_match);
diff --git a/drivers/usb/fotg210/fotg210-hcd.c b/drivers/usb/fotg210/fotg210-hcd.c
index 51ac93a2eb98..929106c16b29 100644
--- a/drivers/usb/fotg210/fotg210-hcd.c
+++ b/drivers/usb/fotg210/fotg210-hcd.c
@@ -33,7 +33,6 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/clk.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
@@ -862,7 +861,7 @@ static inline void remove_debug_files(struct fotg210_hcd *fotg210)
{
struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
- debugfs_remove(debugfs_lookup(bus->bus_name, fotg210_debug_root));
+ debugfs_lookup_and_remove(bus->bus_name, fotg210_debug_root);
}
/* handshake - spin reading hc until handshake completes or fails
@@ -4687,14 +4686,11 @@ static ssize_t uframe_periodic_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fotg210_hcd *fotg210;
- int n;
fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
- n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max);
- return n;
+ return sysfs_emit(buf, "%d\n", fotg210->uframe_periodic_max);
}
-
static ssize_t uframe_periodic_max_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -4706,8 +4702,10 @@ static ssize_t uframe_periodic_max_store(struct device *dev,
ssize_t ret;
fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
- if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
- return -EINVAL;
+
+ ret = kstrtouint(buf, 0, &uframe_periodic_max);
+ if (ret)
+ return ret;
if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n",
@@ -5557,11 +5555,10 @@ static void fotg210_init(struct fotg210_hcd *fotg210)
* then invokes the start() method for the HCD associated with it
* through the hotplug entry's driver_data.
*/
-int fotg210_hcd_probe(struct platform_device *pdev)
+int fotg210_hcd_probe(struct platform_device *pdev, struct fotg210 *fotg)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd;
- struct resource *res;
int irq;
int retval;
struct fotg210_hcd *fotg210;
@@ -5578,70 +5575,42 @@ int fotg210_hcd_probe(struct platform_device *pdev)
hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
dev_name(dev));
if (!hcd) {
- dev_err(dev, "failed to create hcd\n");
- retval = -ENOMEM;
+ retval = dev_err_probe(dev, -ENOMEM, "failed to create hcd\n");
goto fail_create_hcd;
}
hcd->has_tt = 1;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- retval = PTR_ERR(hcd->regs);
- goto failed_put_hcd;
- }
+ hcd->regs = fotg->base;
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
+ hcd->rsrc_start = fotg->res->start;
+ hcd->rsrc_len = resource_size(fotg->res);
fotg210 = hcd_to_fotg210(hcd);
+ fotg210->fotg = fotg;
fotg210->caps = hcd->regs;
- /* It's OK not to supply this clock */
- fotg210->pclk = clk_get(dev, "PCLK");
- if (!IS_ERR(fotg210->pclk)) {
- retval = clk_prepare_enable(fotg210->pclk);
- if (retval) {
- dev_err(dev, "failed to enable PCLK\n");
- goto failed_put_hcd;
- }
- } else if (PTR_ERR(fotg210->pclk) == -EPROBE_DEFER) {
- /*
- * Percolate deferrals, for anything else,
- * just live without the clocking.
- */
- retval = PTR_ERR(fotg210->pclk);
- goto failed_dis_clk;
- }
-
retval = fotg210_setup(hcd);
if (retval)
- goto failed_dis_clk;
+ goto failed_put_hcd;
fotg210_init(fotg210);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval) {
- dev_err(dev, "failed to add hcd with err %d\n", retval);
- goto failed_dis_clk;
+ dev_err_probe(dev, retval, "failed to add hcd\n");
+ goto failed_put_hcd;
}
device_wakeup_enable(hcd->self.controller);
platform_set_drvdata(pdev, hcd);
return retval;
-failed_dis_clk:
- if (!IS_ERR(fotg210->pclk)) {
- clk_disable_unprepare(fotg210->pclk);
- clk_put(fotg210->pclk);
- }
failed_put_hcd:
usb_put_hcd(hcd);
fail_create_hcd:
- dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
- return retval;
+ return dev_err_probe(dev, retval, "init %s fail\n", dev_name(dev));
}
/*
@@ -5652,12 +5621,6 @@ fail_create_hcd:
int fotg210_hcd_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
-
- if (!IS_ERR(fotg210->pclk)) {
- clk_disable_unprepare(fotg210->pclk);
- clk_put(fotg210->pclk);
- }
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/fotg210/fotg210-hcd.h b/drivers/usb/fotg210/fotg210-hcd.h
index 0781442b7a24..13c9342982ee 100644
--- a/drivers/usb/fotg210/fotg210-hcd.h
+++ b/drivers/usb/fotg210/fotg210-hcd.h
@@ -182,6 +182,7 @@ struct fotg210_hcd { /* one per controller */
# define INCR(x) do {} while (0)
#endif
+ struct fotg210 *fotg; /* Overarching FOTG210 device */
/* silicon clock */
struct clk *pclk;
};
diff --git a/drivers/usb/fotg210/fotg210-udc.c b/drivers/usb/fotg210/fotg210-udc.c
index 87cca81bf4ac..f7ea84070554 100644
--- a/drivers/usb/fotg210/fotg210-udc.c
+++ b/drivers/usb/fotg210/fotg210-udc.c
@@ -7,6 +7,7 @@
* Author : Yuan-Hsin Chen <yhchen@faraday-tech.com>
*/
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -15,7 +16,6 @@
#include <linux/platform_device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
-#include <linux/clk.h>
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
@@ -29,6 +29,14 @@ static const char udc_name[] = "fotg210_udc";
static const char * const fotg210_ep_name[] = {
"ep0", "ep1", "ep2", "ep3", "ep4"};
+static void fotg210_ack_int(struct fotg210_udc *fotg210, u32 offset, u32 mask)
+{
+ u32 value = ioread32(fotg210->reg + offset);
+
+ value &= ~mask;
+ iowrite32(value, fotg210->reg + offset);
+}
+
static void fotg210_disable_fifo_int(struct fotg210_ep *ep)
{
u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR1);
@@ -304,8 +312,7 @@ static void fotg210_wait_dma_done(struct fotg210_ep *ep)
goto dma_reset;
} while (!(value & DISGR2_DMA_CMPLT));
- value &= ~DISGR2_DMA_CMPLT;
- iowrite32(value, ep->fotg210->reg + FOTG210_DISGR2);
+ fotg210_ack_int(ep->fotg210, FOTG210_DISGR2, DISGR2_DMA_CMPLT);
return;
dma_reset:
@@ -710,6 +717,20 @@ static int fotg210_is_epnstall(struct fotg210_ep *ep)
return value & INOUTEPMPSR_STL_EP ? 1 : 0;
}
+/* For EP0 requests triggered by this driver (currently GET_STATUS response) */
+static void fotg210_ep0_complete(struct usb_ep *_ep, struct usb_request *req)
+{
+ struct fotg210_ep *ep;
+ struct fotg210_udc *fotg210;
+
+ ep = container_of(_ep, struct fotg210_ep, ep);
+ fotg210 = ep->fotg210;
+
+ if (req->status || req->actual != req->length) {
+ dev_warn(&fotg210->gadget.dev, "EP0 request failed: %d\n", req->status);
+ }
+}
+
static void fotg210_get_status(struct fotg210_udc *fotg210,
struct usb_ctrlrequest *ctrl)
{
@@ -831,14 +852,6 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210)
}
}
-static void fotg210_clear_comabt_int(struct fotg210_udc *fotg210)
-{
- u32 value = ioread32(fotg210->reg + FOTG210_DISGR0);
-
- value &= ~DISGR0_CX_COMABT_INT;
- iowrite32(value, fotg210->reg + FOTG210_DISGR0);
-}
-
static void fotg210_in_fifo_handler(struct fotg210_ep *ep)
{
struct fotg210_request *req = list_entry(ep->queue.next,
@@ -880,60 +893,43 @@ static irqreturn_t fotg210_irq(int irq, void *_fotg210)
void __iomem *reg = fotg210->reg + FOTG210_DISGR2;
u32 int_grp2 = ioread32(reg);
u32 int_msk2 = ioread32(fotg210->reg + FOTG210_DMISGR2);
- u32 value;
int_grp2 &= ~int_msk2;
if (int_grp2 & DISGR2_USBRST_INT) {
usb_gadget_udc_reset(&fotg210->gadget,
fotg210->driver);
- value = ioread32(reg);
- value &= ~DISGR2_USBRST_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_USBRST_INT);
pr_info("fotg210 udc reset\n");
}
if (int_grp2 & DISGR2_SUSP_INT) {
- value = ioread32(reg);
- value &= ~DISGR2_SUSP_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_SUSP_INT);
pr_info("fotg210 udc suspend\n");
}
if (int_grp2 & DISGR2_RESM_INT) {
- value = ioread32(reg);
- value &= ~DISGR2_RESM_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_RESM_INT);
pr_info("fotg210 udc resume\n");
}
if (int_grp2 & DISGR2_ISO_SEQ_ERR_INT) {
- value = ioread32(reg);
- value &= ~DISGR2_ISO_SEQ_ERR_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_ISO_SEQ_ERR_INT);
pr_info("fotg210 iso sequence error\n");
}
if (int_grp2 & DISGR2_ISO_SEQ_ABORT_INT) {
- value = ioread32(reg);
- value &= ~DISGR2_ISO_SEQ_ABORT_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_ISO_SEQ_ABORT_INT);
pr_info("fotg210 iso sequence abort\n");
}
if (int_grp2 & DISGR2_TX0BYTE_INT) {
fotg210_clear_tx0byte(fotg210);
- value = ioread32(reg);
- value &= ~DISGR2_TX0BYTE_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_TX0BYTE_INT);
pr_info("fotg210 transferred 0 byte\n");
}
if (int_grp2 & DISGR2_RX0BYTE_INT) {
fotg210_clear_rx0byte(fotg210);
- value = ioread32(reg);
- value &= ~DISGR2_RX0BYTE_INT;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_RX0BYTE_INT);
pr_info("fotg210 received 0 byte\n");
}
if (int_grp2 & DISGR2_DMA_ERROR) {
- value = ioread32(reg);
- value &= ~DISGR2_DMA_ERROR;
- iowrite32(value, reg);
+ fotg210_ack_int(fotg210, FOTG210_DISGR2, DISGR2_DMA_ERROR);
}
}
@@ -947,7 +943,7 @@ static irqreturn_t fotg210_irq(int irq, void *_fotg210)
/* the highest priority in this source register */
if (int_grp0 & DISGR0_CX_COMABT_INT) {
- fotg210_clear_comabt_int(fotg210);
+ fotg210_ack_int(fotg210, FOTG210_DISGR0, DISGR0_CX_COMABT_INT);
pr_info("fotg210 CX command abort\n");
}
@@ -1014,8 +1010,11 @@ static int fotg210_udc_start(struct usb_gadget *g,
int ret;
/* hook up the driver */
- driver->driver.bus = NULL;
fotg210->driver = driver;
+ fotg210->gadget.dev.of_node = fotg210->dev->of_node;
+ fotg210->gadget.speed = USB_SPEED_UNKNOWN;
+
+ dev_info(fotg210->dev, "bound driver %s\n", driver->driver.name);
if (!IS_ERR_OR_NULL(fotg210->phy)) {
ret = otg_set_peripheral(fotg210->phy->otg,
@@ -1024,6 +1023,11 @@ static int fotg210_udc_start(struct usb_gadget *g,
dev_err(fotg210->dev, "can't bind to phy\n");
}
+ /* chip enable */
+ value = ioread32(fotg210->reg + FOTG210_DMCR);
+ value |= DMCR_CHIP_EN;
+ iowrite32(value, fotg210->reg + FOTG210_DMCR);
+
/* enable device global interrupt */
value = ioread32(fotg210->reg + FOTG210_DMCR);
value |= DMCR_GLINT_EN;
@@ -1040,6 +1044,15 @@ static void fotg210_init(struct fotg210_udc *fotg210)
iowrite32(GMIR_MHC_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
fotg210->reg + FOTG210_GMIR);
+ /* mask interrupts for groups other than 0-2 */
+ iowrite32(~(DMIGR_MINT_G0 | DMIGR_MINT_G1 | DMIGR_MINT_G2),
+ fotg210->reg + FOTG210_DMIGR);
+
+ /* udc software reset */
+ iowrite32(DMCR_SFRST, fotg210->reg + FOTG210_DMCR);
+ /* Better wait a bit, but without a datasheet, no idea how long. */
+ usleep_range(100, 200);
+
/* disable device global interrupt */
value = ioread32(fotg210->reg + FOTG210_DMCR);
value &= ~DMCR_GLINT_EN;
@@ -1072,15 +1085,33 @@ static int fotg210_udc_stop(struct usb_gadget *g)
fotg210_init(fotg210);
fotg210->driver = NULL;
+ fotg210->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock_irqrestore(&fotg210->lock, flags);
return 0;
}
+/**
+ * fotg210_vbus_session - Called by external transceiver to enable/disable udc
+ * @_gadget: usb gadget
+ * @is_active: 0 if should disable UDC VBUS, 1 if should enable
+ *
+ * Returns 0
+ */
+static int fotg210_vbus_session(struct usb_gadget *g, int is_active)
+{
+ struct fotg210_udc *fotg210 = gadget_to_fotg210(g);
+
+ /* Call down to core integration layer to drive or disable VBUS */
+ fotg210_vbus(fotg210->fotg, is_active);
+ return 0;
+}
+
static const struct usb_gadget_ops fotg210_gadget_ops = {
.udc_start = fotg210_udc_start,
.udc_stop = fotg210_udc_stop,
+ .vbus_session = fotg210_vbus_session,
};
/**
@@ -1134,34 +1165,22 @@ int fotg210_udc_remove(struct platform_device *pdev)
for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
kfree(fotg210->ep[i]);
- if (!IS_ERR(fotg210->pclk))
- clk_disable_unprepare(fotg210->pclk);
-
kfree(fotg210);
return 0;
}
-int fotg210_udc_probe(struct platform_device *pdev)
+int fotg210_udc_probe(struct platform_device *pdev, struct fotg210 *fotg)
{
- struct resource *res;
struct fotg210_udc *fotg210 = NULL;
struct device *dev = &pdev->dev;
int irq;
int ret = 0;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- pr_err("platform_get_resource error.\n");
- return -ENODEV;
- }
-
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- pr_err("could not get irq\n");
- return -ENODEV;
- }
+ if (irq < 0)
+ return irq;
/* initialize udc */
fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
@@ -1169,35 +1188,19 @@ int fotg210_udc_probe(struct platform_device *pdev)
return -ENOMEM;
fotg210->dev = dev;
-
- /* It's OK not to supply this clock */
- fotg210->pclk = devm_clk_get(dev, "PCLK");
- if (!IS_ERR(fotg210->pclk)) {
- ret = clk_prepare_enable(fotg210->pclk);
- if (ret) {
- dev_err(dev, "failed to enable PCLK\n");
- goto err;
- }
- } else if (PTR_ERR(fotg210->pclk) == -EPROBE_DEFER) {
- /*
- * Percolate deferrals, for anything else,
- * just live without the clocking.
- */
- ret = -EPROBE_DEFER;
- goto err;
- }
+ fotg210->fotg = fotg;
fotg210->phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
if (IS_ERR(fotg210->phy)) {
ret = PTR_ERR(fotg210->phy);
if (ret == -EPROBE_DEFER)
- goto err_pclk;
+ goto err_free;
dev_info(dev, "no PHY found\n");
fotg210->phy = NULL;
} else {
ret = usb_phy_init(fotg210->phy);
if (ret)
- goto err_pclk;
+ goto err_free;
dev_info(dev, "found and initialized PHY\n");
}
@@ -1209,11 +1212,7 @@ int fotg210_udc_probe(struct platform_device *pdev)
goto err_alloc;
}
- fotg210->reg = ioremap(res->start, resource_size(res));
- if (fotg210->reg == NULL) {
- dev_err(dev, "ioremap error\n");
- goto err_alloc;
- }
+ fotg210->reg = fotg->base;
spin_lock_init(&fotg210->lock);
@@ -1262,6 +1261,8 @@ int fotg210_udc_probe(struct platform_device *pdev)
if (fotg210->ep0_req == NULL)
goto err_map;
+ fotg210->ep0_req->complete = fotg210_ep0_complete;
+
fotg210_init(fotg210);
fotg210_disable_unplug(fotg210);
@@ -1269,7 +1270,7 @@ int fotg210_udc_probe(struct platform_device *pdev)
ret = request_irq(irq, fotg210_irq, IRQF_SHARED,
udc_name, fotg210);
if (ret < 0) {
- dev_err(dev, "request_irq error (%d)\n", ret);
+ dev_err_probe(dev, ret, "request_irq error\n");
goto err_req;
}
@@ -1298,11 +1299,8 @@ err_map:
err_alloc:
for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
kfree(fotg210->ep[i]);
-err_pclk:
- if (!IS_ERR(fotg210->pclk))
- clk_disable_unprepare(fotg210->pclk);
-err:
+err_free:
kfree(fotg210);
return ret;
}
diff --git a/drivers/usb/fotg210/fotg210-udc.h b/drivers/usb/fotg210/fotg210-udc.h
index fadb57ca8d78..252cb2b8e2fe 100644
--- a/drivers/usb/fotg210/fotg210-udc.h
+++ b/drivers/usb/fotg210/fotg210-udc.h
@@ -58,6 +58,8 @@
/* Device Mask of Interrupt Group Register (0x130) */
#define FOTG210_DMIGR 0x130
+#define DMIGR_MINT_G2 (1 << 2)
+#define DMIGR_MINT_G1 (1 << 1)
#define DMIGR_MINT_G0 (1 << 0)
/* Device Mask of Interrupt Source Group 0(0x134) */
@@ -231,11 +233,11 @@ struct fotg210_ep {
struct fotg210_udc {
spinlock_t lock; /* protect the struct */
void __iomem *reg;
- struct clk *pclk;
unsigned long irq_trigger;
struct device *dev;
+ struct fotg210 *fotg;
struct usb_phy *phy;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
diff --git a/drivers/usb/fotg210/fotg210.h b/drivers/usb/fotg210/fotg210.h
index ef79d8323d89..c44c0afe2956 100644
--- a/drivers/usb/fotg210/fotg210.h
+++ b/drivers/usb/fotg210/fotg210.h
@@ -2,13 +2,31 @@
#ifndef __FOTG210_H
#define __FOTG210_H
+enum gemini_port {
+ GEMINI_PORT_NONE = 0,
+ GEMINI_PORT_0,
+ GEMINI_PORT_1,
+};
+
+struct fotg210 {
+ struct device *dev;
+ struct resource *res;
+ void __iomem *base;
+ struct clk *pclk;
+ struct regmap *map;
+ enum gemini_port port;
+};
+
+void fotg210_vbus(struct fotg210 *fotg, bool enable);
+
#ifdef CONFIG_USB_FOTG210_HCD
-int fotg210_hcd_probe(struct platform_device *pdev);
+int fotg210_hcd_probe(struct platform_device *pdev, struct fotg210 *fotg);
int fotg210_hcd_remove(struct platform_device *pdev);
int fotg210_hcd_init(void);
void fotg210_hcd_cleanup(void);
#else
-static inline int fotg210_hcd_probe(struct platform_device *pdev)
+static inline int fotg210_hcd_probe(struct platform_device *pdev,
+ struct fotg210 *fotg)
{
return 0;
}
@@ -26,10 +44,11 @@ static inline void fotg210_hcd_cleanup(void)
#endif
#ifdef CONFIG_USB_FOTG210_UDC
-int fotg210_udc_probe(struct platform_device *pdev);
+int fotg210_udc_probe(struct platform_device *pdev, struct fotg210 *fotg);
int fotg210_udc_remove(struct platform_device *pdev);
#else
-static inline int fotg210_udc_probe(struct platform_device *pdev)
+static inline int fotg210_udc_probe(struct platform_device *pdev,
+ struct fotg210 *fotg)
{
return 0;
}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 4fa2ddf322b4..336db8f92afa 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -203,6 +203,7 @@ config USB_F_UAC2
config USB_F_UVC
tristate
+ select UVC_COMMON
config USB_F_MIDI
tristate
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 403563c06477..fa7dd6cf014d 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -14,9 +14,11 @@
#include <linux/device.h>
#include <linux/utsname.h>
#include <linux/bitfield.h>
+#include <linux/uuid.h>
#include <linux/usb/composite.h>
#include <linux/usb/otg.h>
+#include <linux/usb/webusb.h>
#include <asm/unaligned.h>
#include "u_os_desc.h"
@@ -713,14 +715,16 @@ static int bos_desc(struct usb_composite_dev *cdev)
* A SuperSpeed device shall include the USB2.0 extension descriptor
* and shall support LPM when operating in USB2.0 HS mode.
*/
- usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
- bos->bNumDeviceCaps++;
- le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE);
- usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
- usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
- usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
- usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT |
- USB_BESL_SUPPORT | besl);
+ if (cdev->gadget->lpm_capable) {
+ usb_ext = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+ bos->bNumDeviceCaps++;
+ le16_add_cpu(&bos->wTotalLength, USB_DT_USB_EXT_CAP_SIZE);
+ usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
+ usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+ usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
+ usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT |
+ USB_BESL_SUPPORT | besl);
+ }
/*
* The Superspeed USB Capability descriptor shall be implemented by all
@@ -821,6 +825,37 @@ static int bos_desc(struct usb_composite_dev *cdev)
}
}
+ /* The WebUSB Platform Capability descriptor */
+ if (cdev->use_webusb) {
+ struct usb_plat_dev_cap_descriptor *webusb_cap;
+ struct usb_webusb_cap_data *webusb_cap_data;
+ guid_t webusb_uuid = WEBUSB_UUID;
+
+ webusb_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+ webusb_cap_data = (struct usb_webusb_cap_data *) webusb_cap->CapabilityData;
+ bos->bNumDeviceCaps++;
+ le16_add_cpu(&bos->wTotalLength,
+ USB_DT_USB_PLAT_DEV_CAP_SIZE(USB_WEBUSB_CAP_DATA_SIZE));
+
+ webusb_cap->bLength = USB_DT_USB_PLAT_DEV_CAP_SIZE(USB_WEBUSB_CAP_DATA_SIZE);
+ webusb_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
+ webusb_cap->bDevCapabilityType = USB_PLAT_DEV_CAP_TYPE;
+ webusb_cap->bReserved = 0;
+ export_guid(webusb_cap->UUID, &webusb_uuid);
+
+ if (cdev->bcd_webusb_version != 0)
+ webusb_cap_data->bcdVersion = cpu_to_le16(cdev->bcd_webusb_version);
+ else
+ webusb_cap_data->bcdVersion = WEBUSB_VERSION_1_00;
+
+ webusb_cap_data->bVendorCode = cdev->b_webusb_vendor_code;
+
+ if (strnlen(cdev->landing_page, sizeof(cdev->landing_page)) > 0)
+ webusb_cap_data->iLandingPage = WEBUSB_LANDING_PAGE_PRESENT;
+ else
+ webusb_cap_data->iLandingPage = WEBUSB_LANDING_PAGE_NOT_PRESENT;
+ }
+
return le16_to_cpu(bos->wTotalLength);
}
@@ -1744,7 +1779,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
cdev->desc.bcdUSB = cpu_to_le16(0x0210);
}
} else {
- if (gadget->lpm_capable)
+ if (gadget->lpm_capable || cdev->use_webusb)
cdev->desc.bcdUSB = cpu_to_le16(0x0201);
else
cdev->desc.bcdUSB = cpu_to_le16(0x0200);
@@ -1779,7 +1814,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
break;
case USB_DT_BOS:
if (gadget_is_superspeed(gadget) ||
- gadget->lpm_capable) {
+ gadget->lpm_capable || cdev->use_webusb) {
value = bos_desc(cdev);
value = min(w_length, (u16) value);
}
@@ -2013,6 +2048,53 @@ unknown:
goto check_value;
}
+ /*
+ * WebUSB URL descriptor handling, following:
+ * https://wicg.github.io/webusb/#device-requests
+ */
+ if (cdev->use_webusb &&
+ ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_VENDOR) &&
+ w_index == WEBUSB_GET_URL &&
+ w_value == WEBUSB_LANDING_PAGE_PRESENT &&
+ ctrl->bRequest == cdev->b_webusb_vendor_code) {
+ unsigned int landing_page_length;
+ unsigned int landing_page_offset;
+ struct webusb_url_descriptor *url_descriptor =
+ (struct webusb_url_descriptor *)cdev->req->buf;
+
+ url_descriptor->bDescriptorType = WEBUSB_URL_DESCRIPTOR_TYPE;
+
+ if (strncasecmp(cdev->landing_page, "https://", 8) == 0) {
+ landing_page_offset = 8;
+ url_descriptor->bScheme = WEBUSB_URL_SCHEME_HTTPS;
+ } else if (strncasecmp(cdev->landing_page, "http://", 7) == 0) {
+ landing_page_offset = 7;
+ url_descriptor->bScheme = WEBUSB_URL_SCHEME_HTTP;
+ } else {
+ landing_page_offset = 0;
+ url_descriptor->bScheme = WEBUSB_URL_SCHEME_NONE;
+ }
+
+ landing_page_length = strnlen(cdev->landing_page,
+ sizeof(url_descriptor->URL)
+ - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset);
+
+ if (ctrl->wLength < WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH
+ + landing_page_length)
+ landing_page_length = ctrl->wLength
+ - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + landing_page_offset;
+
+ memcpy(url_descriptor->URL,
+ cdev->landing_page + landing_page_offset,
+ landing_page_length - landing_page_offset);
+ url_descriptor->bLength = landing_page_length
+ - landing_page_offset + WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH;
+
+ value = url_descriptor->bLength;
+
+ goto check_value;
+ }
+
VDBG(cdev,
"non-core control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 0853536cbf2e..b9f1136aa0a2 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -7,6 +7,7 @@
#include <linux/nls.h>
#include <linux/usb/composite.h>
#include <linux/usb/gadget_configfs.h>
+#include <linux/usb/webusb.h>
#include "configfs.h"
#include "u_f.h"
#include "u_os_desc.h"
@@ -39,6 +40,7 @@ struct gadget_info {
struct config_group configs_group;
struct config_group strings_group;
struct config_group os_desc_group;
+ struct config_group webusb_group;
struct mutex lock;
struct usb_gadget_strings *gstrings[MAX_USB_STRING_LANGS + 1];
@@ -50,6 +52,11 @@ struct gadget_info {
bool use_os_desc;
char b_vendor_code;
char qw_sign[OS_STRING_QW_SIGN_LEN];
+ bool use_webusb;
+ u16 bcd_webusb_version;
+ u8 b_webusb_vendor_code;
+ char landing_page[WEBUSB_URL_RAW_MAX_LENGTH];
+
spinlock_t spinlock;
bool unbind;
};
@@ -79,7 +86,7 @@ static inline struct gadget_info *cfg_to_gadget_info(struct config_usb_cfg *cfg)
return container_of(cfg->c.cdev, struct gadget_info, cdev);
}
-struct gadget_strings {
+struct gadget_language {
struct usb_gadget_strings stringtab_dev;
struct usb_string strings[USB_GADGET_FIRST_AVAIL_IDX];
char *manufacturer;
@@ -88,6 +95,8 @@ struct gadget_strings {
struct config_group group;
struct list_head list;
+ struct list_head gadget_strings;
+ unsigned int nstrings;
};
struct gadget_config_name {
@@ -365,9 +374,9 @@ static struct configfs_attribute *gadget_root_attrs[] = {
NULL,
};
-static inline struct gadget_strings *to_gadget_strings(struct config_item *item)
+static inline struct gadget_language *to_gadget_language(struct config_item *item)
{
- return container_of(to_config_group(item), struct gadget_strings,
+ return container_of(to_config_group(item), struct gadget_language,
group);
}
@@ -430,6 +439,12 @@ static int config_usb_cfg_link(
* from another gadget or a random directory.
* Also a function instance can only be linked once.
*/
+
+ if (gi->composite.gadget_driver.udc_name) {
+ ret = -EINVAL;
+ goto out;
+ }
+
list_for_each_entry(iter, &gi->available_func, cfs_list) {
if (iter != fi)
continue;
@@ -755,20 +770,20 @@ static const struct config_item_type config_desc_type = {
.ct_owner = THIS_MODULE,
};
-GS_STRINGS_RW(gadget_strings, manufacturer);
-GS_STRINGS_RW(gadget_strings, product);
-GS_STRINGS_RW(gadget_strings, serialnumber);
+GS_STRINGS_RW(gadget_language, manufacturer);
+GS_STRINGS_RW(gadget_language, product);
+GS_STRINGS_RW(gadget_language, serialnumber);
-static struct configfs_attribute *gadget_strings_langid_attrs[] = {
- &gadget_strings_attr_manufacturer,
- &gadget_strings_attr_product,
- &gadget_strings_attr_serialnumber,
+static struct configfs_attribute *gadget_language_langid_attrs[] = {
+ &gadget_language_attr_manufacturer,
+ &gadget_language_attr_product,
+ &gadget_language_attr_serialnumber,
NULL,
};
-static void gadget_strings_attr_release(struct config_item *item)
+static void gadget_language_attr_release(struct config_item *item)
{
- struct gadget_strings *gs = to_gadget_strings(item);
+ struct gadget_language *gs = to_gadget_language(item);
kfree(gs->manufacturer);
kfree(gs->product);
@@ -778,8 +793,317 @@ static void gadget_strings_attr_release(struct config_item *item)
kfree(gs);
}
-USB_CONFIG_STRING_RW_OPS(gadget_strings);
-USB_CONFIG_STRINGS_LANG(gadget_strings, gadget_info);
+static struct configfs_item_operations gadget_language_langid_item_ops = {
+ .release = gadget_language_attr_release,
+};
+
+static ssize_t gadget_string_id_show(struct config_item *item, char *page)
+{
+ struct gadget_string *string = to_gadget_string(item);
+ int ret;
+
+ ret = sprintf(page, "%u\n", string->usb_string.id);
+ return ret;
+}
+CONFIGFS_ATTR_RO(gadget_string_, id);
+
+static ssize_t gadget_string_s_show(struct config_item *item, char *page)
+{
+ struct gadget_string *string = to_gadget_string(item);
+ int ret;
+
+ ret = snprintf(page, sizeof(string->string), "%s\n", string->string);
+ return ret;
+}
+
+static ssize_t gadget_string_s_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct gadget_string *string = to_gadget_string(item);
+ int size = min(sizeof(string->string), len + 1);
+
+ if (len > USB_MAX_STRING_LEN)
+ return -EINVAL;
+
+ return strscpy(string->string, page, size);
+}
+CONFIGFS_ATTR(gadget_string_, s);
+
+static struct configfs_attribute *gadget_string_attrs[] = {
+ &gadget_string_attr_id,
+ &gadget_string_attr_s,
+ NULL,
+};
+
+static void gadget_string_release(struct config_item *item)
+{
+ struct gadget_string *string = to_gadget_string(item);
+
+ kfree(string);
+}
+
+static struct configfs_item_operations gadget_string_item_ops = {
+ .release = gadget_string_release,
+};
+
+static const struct config_item_type gadget_string_type = {
+ .ct_item_ops = &gadget_string_item_ops,
+ .ct_attrs = gadget_string_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_item *gadget_language_string_make(struct config_group *group,
+ const char *name)
+{
+ struct gadget_language *language;
+ struct gadget_string *string;
+
+ language = to_gadget_language(&group->cg_item);
+
+ string = kzalloc(sizeof(*string), GFP_KERNEL);
+ if (!string)
+ return ERR_PTR(-ENOMEM);
+
+ string->usb_string.id = language->nstrings++;
+ string->usb_string.s = string->string;
+ list_add_tail(&string->list, &language->gadget_strings);
+
+ config_item_init_type_name(&string->item, name, &gadget_string_type);
+
+ return &string->item;
+}
+
+static void gadget_language_string_drop(struct config_group *group,
+ struct config_item *item)
+{
+ struct gadget_language *language;
+ struct gadget_string *string;
+ unsigned int i = USB_GADGET_FIRST_AVAIL_IDX;
+
+ language = to_gadget_language(&group->cg_item);
+ string = to_gadget_string(item);
+
+ list_del(&string->list);
+ language->nstrings--;
+
+ /* Reset the ids for the language's strings to guarantee a continuous set */
+ list_for_each_entry(string, &language->gadget_strings, list)
+ string->usb_string.id = i++;
+}
+
+static struct configfs_group_operations gadget_language_langid_group_ops = {
+ .make_item = gadget_language_string_make,
+ .drop_item = gadget_language_string_drop,
+};
+
+static struct config_item_type gadget_language_type = {
+ .ct_item_ops = &gadget_language_langid_item_ops,
+ .ct_group_ops = &gadget_language_langid_group_ops,
+ .ct_attrs = gadget_language_langid_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct config_group *gadget_language_make(struct config_group *group,
+ const char *name)
+{
+ struct gadget_info *gi;
+ struct gadget_language *gs;
+ struct gadget_language *new;
+ int langs = 0;
+ int ret;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return ERR_PTR(-ENOMEM);
+
+ ret = check_user_usb_string(name, &new->stringtab_dev);
+ if (ret)
+ goto err;
+ config_group_init_type_name(&new->group, name,
+ &gadget_language_type);
+
+ gi = container_of(group, struct gadget_info, strings_group);
+ ret = -EEXIST;
+ list_for_each_entry(gs, &gi->string_list, list) {
+ if (gs->stringtab_dev.language == new->stringtab_dev.language)
+ goto err;
+ langs++;
+ }
+ ret = -EOVERFLOW;
+ if (langs >= MAX_USB_STRING_LANGS)
+ goto err;
+
+ list_add_tail(&new->list, &gi->string_list);
+ INIT_LIST_HEAD(&new->gadget_strings);
+
+ /* We have the default manufacturer, product and serialnumber strings */
+ new->nstrings = 3;
+ return &new->group;
+err:
+ kfree(new);
+ return ERR_PTR(ret);
+}
+
+static void gadget_language_drop(struct config_group *group,
+ struct config_item *item)
+{
+ config_item_put(item);
+}
+
+static struct configfs_group_operations gadget_language_group_ops = {
+ .make_group = &gadget_language_make,
+ .drop_item = &gadget_language_drop,
+};
+
+static struct config_item_type gadget_language_strings_type = {
+ .ct_group_ops = &gadget_language_group_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+static inline struct gadget_info *webusb_item_to_gadget_info(
+ struct config_item *item)
+{
+ return container_of(to_config_group(item),
+ struct gadget_info, webusb_group);
+}
+
+static ssize_t webusb_use_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "%d\n",
+ webusb_item_to_gadget_info(item)->use_webusb);
+}
+
+static ssize_t webusb_use_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct gadget_info *gi = webusb_item_to_gadget_info(item);
+ int ret;
+ bool use;
+
+ ret = kstrtobool(page, &use);
+ if (ret)
+ return ret;
+
+ mutex_lock(&gi->lock);
+ gi->use_webusb = use;
+ mutex_unlock(&gi->lock);
+
+ return len;
+}
+
+static ssize_t webusb_bcdVersion_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "0x%04x\n",
+ webusb_item_to_gadget_info(item)->bcd_webusb_version);
+}
+
+static ssize_t webusb_bcdVersion_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct gadget_info *gi = webusb_item_to_gadget_info(item);
+ u16 bcdVersion;
+ int ret;
+
+ ret = kstrtou16(page, 0, &bcdVersion);
+ if (ret)
+ return ret;
+
+ ret = is_valid_bcd(bcdVersion);
+ if (ret)
+ return ret;
+
+ mutex_lock(&gi->lock);
+ gi->bcd_webusb_version = bcdVersion;
+ mutex_unlock(&gi->lock);
+
+ return len;
+}
+
+static ssize_t webusb_bVendorCode_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "0x%02x\n",
+ webusb_item_to_gadget_info(item)->b_webusb_vendor_code);
+}
+
+static ssize_t webusb_bVendorCode_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct gadget_info *gi = webusb_item_to_gadget_info(item);
+ int ret;
+ u8 b_vendor_code;
+
+ ret = kstrtou8(page, 0, &b_vendor_code);
+ if (ret)
+ return ret;
+
+ mutex_lock(&gi->lock);
+ gi->b_webusb_vendor_code = b_vendor_code;
+ mutex_unlock(&gi->lock);
+
+ return len;
+}
+
+static ssize_t webusb_landingPage_show(struct config_item *item, char *page)
+{
+ return sysfs_emit(page, "%s\n", webusb_item_to_gadget_info(item)->landing_page);
+}
+
+static ssize_t webusb_landingPage_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct gadget_info *gi = webusb_item_to_gadget_info(item);
+ unsigned int bytes_to_strip = 0;
+ int l = len;
+
+ if (page[l - 1] == '\n') {
+ --l;
+ ++bytes_to_strip;
+ }
+
+ if (l > sizeof(gi->landing_page)) {
+ pr_err("webusb: landingPage URL too long\n");
+ return -EINVAL;
+ }
+
+ // validation
+ if (strncasecmp(page, "https://", 8) == 0)
+ bytes_to_strip = 8;
+ else if (strncasecmp(page, "http://", 7) == 0)
+ bytes_to_strip = 7;
+ else
+ bytes_to_strip = 0;
+
+ if (l > U8_MAX - WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH + bytes_to_strip) {
+ pr_err("webusb: landingPage URL %d bytes too long for given URL scheme\n",
+ l - U8_MAX + WEBUSB_URL_DESCRIPTOR_HEADER_LENGTH - bytes_to_strip);
+ return -EINVAL;
+ }
+
+ mutex_lock(&gi->lock);
+ // ensure 0 bytes are set, in case the new landing page is shorter then the old one.
+ memcpy_and_pad(gi->landing_page, sizeof(gi->landing_page), page, l, 0);
+ mutex_unlock(&gi->lock);
+
+ return len;
+}
+
+CONFIGFS_ATTR(webusb_, use);
+CONFIGFS_ATTR(webusb_, bVendorCode);
+CONFIGFS_ATTR(webusb_, bcdVersion);
+CONFIGFS_ATTR(webusb_, landingPage);
+
+static struct configfs_attribute *webusb_attrs[] = {
+ &webusb_attr_use,
+ &webusb_attr_bcdVersion,
+ &webusb_attr_bVendorCode,
+ &webusb_attr_landingPage,
+ NULL,
+};
+
+static struct config_item_type webusb_type = {
+ .ct_attrs = webusb_attrs,
+ .ct_owner = THIS_MODULE,
+};
static inline struct gadget_info *os_desc_item_to_gadget_info(
struct config_item *item)
@@ -801,15 +1125,15 @@ static ssize_t os_desc_use_store(struct config_item *item, const char *page,
int ret;
bool use;
- mutex_lock(&gi->lock);
ret = kstrtobool(page, &use);
- if (!ret) {
- gi->use_os_desc = use;
- ret = len;
- }
+ if (ret)
+ return ret;
+
+ mutex_lock(&gi->lock);
+ gi->use_os_desc = use;
mutex_unlock(&gi->lock);
- return ret;
+ return len;
}
static ssize_t os_desc_b_vendor_code_show(struct config_item *item, char *page)
@@ -825,15 +1149,15 @@ static ssize_t os_desc_b_vendor_code_store(struct config_item *item,
int ret;
u8 b_vendor_code;
- mutex_lock(&gi->lock);
ret = kstrtou8(page, 0, &b_vendor_code);
- if (!ret) {
- gi->b_vendor_code = b_vendor_code;
- ret = len;
- }
+ if (ret)
+ return ret;
+
+ mutex_lock(&gi->lock);
+ gi->b_vendor_code = b_vendor_code;
mutex_unlock(&gi->lock);
- return ret;
+ return len;
}
static ssize_t os_desc_qw_sign_show(struct config_item *item, char *page)
@@ -958,15 +1282,15 @@ static ssize_t ext_prop_type_store(struct config_item *item,
u8 type;
int ret;
- if (desc->opts_mutex)
- mutex_lock(desc->opts_mutex);
ret = kstrtou8(page, 0, &type);
if (ret)
- goto end;
- if (type < USB_EXT_PROP_UNICODE || type > USB_EXT_PROP_UNICODE_MULTI) {
- ret = -EINVAL;
- goto end;
- }
+ return ret;
+
+ if (type < USB_EXT_PROP_UNICODE || type > USB_EXT_PROP_UNICODE_MULTI)
+ return -EINVAL;
+
+ if (desc->opts_mutex)
+ mutex_lock(desc->opts_mutex);
if ((ext_prop->type == USB_EXT_PROP_BINARY ||
ext_prop->type == USB_EXT_PROP_LE32 ||
@@ -983,12 +1307,10 @@ static ssize_t ext_prop_type_store(struct config_item *item,
type == USB_EXT_PROP_BE32))
ext_prop->data_len >>= 1;
ext_prop->type = type;
- ret = len;
-end:
if (desc->opts_mutex)
mutex_unlock(desc->opts_mutex);
- return ret;
+ return len;
}
static ssize_t ext_prop_data_show(struct config_item *item, char *page)
@@ -1273,6 +1595,80 @@ static void purge_configs_funcs(struct gadget_info *gi)
}
}
+static struct usb_string *
+configfs_attach_gadget_strings(struct gadget_info *gi)
+{
+ struct usb_gadget_strings **gadget_strings;
+ struct gadget_language *language;
+ struct gadget_string *string;
+ unsigned int nlangs = 0;
+ struct list_head *iter;
+ struct usb_string *us;
+ unsigned int i = 0;
+ int nstrings = -1;
+ unsigned int j;
+
+ list_for_each(iter, &gi->string_list)
+ nlangs++;
+
+ /* Bail out early if no languages are configured */
+ if (!nlangs)
+ return NULL;
+
+ gadget_strings = kcalloc(nlangs + 1, /* including NULL terminator */
+ sizeof(struct usb_gadget_strings *), GFP_KERNEL);
+ if (!gadget_strings)
+ return ERR_PTR(-ENOMEM);
+
+ list_for_each_entry(language, &gi->string_list, list) {
+ struct usb_string *stringtab;
+
+ if (nstrings == -1) {
+ nstrings = language->nstrings;
+ } else if (nstrings != language->nstrings) {
+ pr_err("languages must contain the same number of strings\n");
+ us = ERR_PTR(-EINVAL);
+ goto cleanup;
+ }
+
+ stringtab = kcalloc(language->nstrings + 1, sizeof(struct usb_string),
+ GFP_KERNEL);
+ if (!stringtab) {
+ us = ERR_PTR(-ENOMEM);
+ goto cleanup;
+ }
+
+ stringtab[USB_GADGET_MANUFACTURER_IDX].id = USB_GADGET_MANUFACTURER_IDX;
+ stringtab[USB_GADGET_MANUFACTURER_IDX].s = language->manufacturer;
+ stringtab[USB_GADGET_PRODUCT_IDX].id = USB_GADGET_PRODUCT_IDX;
+ stringtab[USB_GADGET_PRODUCT_IDX].s = language->product;
+ stringtab[USB_GADGET_SERIAL_IDX].id = USB_GADGET_SERIAL_IDX;
+ stringtab[USB_GADGET_SERIAL_IDX].s = language->serialnumber;
+
+ j = USB_GADGET_FIRST_AVAIL_IDX;
+ list_for_each_entry(string, &language->gadget_strings, list) {
+ memcpy(&stringtab[j], &string->usb_string, sizeof(struct usb_string));
+ j++;
+ }
+
+ language->stringtab_dev.strings = stringtab;
+ gadget_strings[i] = &language->stringtab_dev;
+ i++;
+ }
+
+ us = usb_gstrings_attach(&gi->cdev, gadget_strings, nstrings);
+
+cleanup:
+ list_for_each_entry(language, &gi->string_list, list) {
+ kfree(language->stringtab_dev.strings);
+ language->stringtab_dev.strings = NULL;
+ }
+
+ kfree(gadget_strings);
+
+ return us;
+}
+
static int configfs_composite_bind(struct usb_gadget *gadget,
struct usb_gadget_driver *gdriver)
{
@@ -1316,22 +1712,7 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
/* init all strings */
if (!list_empty(&gi->string_list)) {
- struct gadget_strings *gs;
-
- i = 0;
- list_for_each_entry(gs, &gi->string_list, list) {
-
- gi->gstrings[i] = &gs->stringtab_dev;
- gs->stringtab_dev.strings = gs->strings;
- gs->strings[USB_GADGET_MANUFACTURER_IDX].s =
- gs->manufacturer;
- gs->strings[USB_GADGET_PRODUCT_IDX].s = gs->product;
- gs->strings[USB_GADGET_SERIAL_IDX].s = gs->serialnumber;
- i++;
- }
- gi->gstrings[i] = NULL;
- s = usb_gstrings_attach(&gi->cdev, gi->gstrings,
- USB_GADGET_FIRST_AVAIL_IDX);
+ s = configfs_attach_gadget_strings(gi);
if (IS_ERR(s)) {
ret = PTR_ERR(s);
goto err_comp_cleanup;
@@ -1340,6 +1721,15 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id;
gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id;
gi->cdev.desc.iSerialNumber = s[USB_GADGET_SERIAL_IDX].id;
+
+ gi->cdev.usb_strings = s;
+ }
+
+ if (gi->use_webusb) {
+ cdev->use_webusb = true;
+ cdev->bcd_webusb_version = gi->bcd_webusb_version;
+ cdev->b_webusb_vendor_code = gi->b_webusb_vendor_code;
+ memcpy(cdev->landing_page, gi->landing_page, WEBUSB_URL_RAW_MAX_LENGTH);
}
if (gi->use_os_desc) {
@@ -1598,13 +1988,17 @@ static struct config_group *gadgets_make(
configfs_add_default_group(&gi->configs_group, &gi->group);
config_group_init_type_name(&gi->strings_group, "strings",
- &gadget_strings_strings_type);
+ &gadget_language_strings_type);
configfs_add_default_group(&gi->strings_group, &gi->group);
config_group_init_type_name(&gi->os_desc_group, "os_desc",
&os_desc_type);
configfs_add_default_group(&gi->os_desc_group, &gi->group);
+ config_group_init_type_name(&gi->webusb_group, "webusb",
+ &webusb_type);
+ configfs_add_default_group(&gi->webusb_group, &gi->group);
+
gi->composite.bind = configfs_do_nothing;
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 523a961b910b..ddfc537c7526 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -279,8 +279,10 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
struct usb_request *req = ffs->ep0req;
int ret;
- if (!req)
+ if (!req) {
+ spin_unlock_irq(&ffs->ev.waitq.lock);
return -EINVAL;
+ }
req->zero = len < le16_to_cpu(ffs->ev.setup.wLength);
@@ -828,8 +830,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
{
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
work);
- int ret = io_data->req->status ? io_data->req->status :
- io_data->req->actual;
+ int ret = io_data->status;
bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
if (io_data->read && ret > 0) {
@@ -843,8 +844,6 @@ static void ffs_user_copy_worker(struct work_struct *work)
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
- usb_ep_free_request(io_data->ep, io_data->req);
-
if (io_data->read)
kfree(io_data->to_free);
ffs_free_buffer(io_data);
@@ -859,6 +858,9 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
ENTER();
+ io_data->status = req->status ? req->status : req->actual;
+ usb_ep_free_request(_ep, req);
+
INIT_WORK(&io_data->work, ffs_user_copy_worker);
queue_work(ffs->io_completion_wq, &io_data->work);
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 08726e4c68a5..0219cd79493a 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1142,6 +1142,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
std_as_out_if0_desc.bInterfaceNumber = ret;
std_as_out_if1_desc.bInterfaceNumber = ret;
+ std_as_out_if1_desc.bNumEndpoints = 1;
uac2->as_out_intf = ret;
uac2->as_out_alt = 0;
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 32f2c1645467..5e919fb65833 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -76,14 +76,14 @@ static struct usb_interface_descriptor uvc_control_intf = {
.bDescriptorType = USB_DT_INTERFACE,
.bInterfaceNumber = UVC_INTF_VIDEO_CONTROL,
.bAlternateSetting = 0,
- .bNumEndpoints = 1,
+ .bNumEndpoints = 0,
.bInterfaceClass = USB_CLASS_VIDEO,
.bInterfaceSubClass = UVC_SC_VIDEOCONTROL,
.bInterfaceProtocol = 0x00,
.iInterface = 0,
};
-static struct usb_endpoint_descriptor uvc_control_ep = {
+static struct usb_endpoint_descriptor uvc_interrupt_ep = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
@@ -92,8 +92,8 @@ static struct usb_endpoint_descriptor uvc_control_ep = {
.bInterval = 8,
};
-static struct usb_ss_ep_comp_descriptor uvc_ss_control_comp = {
- .bLength = sizeof(uvc_ss_control_comp),
+static struct usb_ss_ep_comp_descriptor uvc_ss_interrupt_comp = {
+ .bLength = sizeof(uvc_ss_interrupt_comp),
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* The following 3 values can be tweaked if necessary. */
.bMaxBurst = 0,
@@ -101,7 +101,7 @@ static struct usb_ss_ep_comp_descriptor uvc_ss_control_comp = {
.wBytesPerInterval = cpu_to_le16(UVC_STATUS_MAX_PACKET_SIZE),
};
-static struct uvc_control_endpoint_descriptor uvc_control_cs_ep = {
+static struct uvc_control_endpoint_descriptor uvc_interrupt_cs_ep = {
.bLength = UVC_DT_CONTROL_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubType = UVC_EP_INTERRUPT,
@@ -300,14 +300,17 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
if (alt)
return -EINVAL;
- uvcg_info(f, "reset UVC Control\n");
- usb_ep_disable(uvc->control_ep);
+ if (uvc->enable_interrupt_ep) {
+ uvcg_info(f, "reset UVC interrupt endpoint\n");
+ usb_ep_disable(uvc->interrupt_ep);
- if (!uvc->control_ep->desc)
- if (config_ep_by_speed(cdev->gadget, f, uvc->control_ep))
- return -EINVAL;
+ if (!uvc->interrupt_ep->desc)
+ if (config_ep_by_speed(cdev->gadget, f,
+ uvc->interrupt_ep))
+ return -EINVAL;
- usb_ep_enable(uvc->control_ep);
+ usb_ep_enable(uvc->interrupt_ep);
+ }
if (uvc->state == UVC_STATE_DISCONNECTED) {
memset(&v4l2_event, 0, sizeof(v4l2_event));
@@ -385,7 +388,8 @@ uvc_function_disable(struct usb_function *f)
uvc->state = UVC_STATE_DISCONNECTED;
usb_ep_disable(uvc->video.ep);
- usb_ep_disable(uvc->control_ep);
+ if (uvc->enable_interrupt_ep)
+ usb_ep_disable(uvc->interrupt_ep);
}
/* --------------------------------------------------------------------------
@@ -474,6 +478,25 @@ uvc_register_video(struct uvc_device *uvc)
} \
} while (0)
+#define UVC_COPY_XU_DESCRIPTOR(mem, dst, desc) \
+ do { \
+ *(dst)++ = mem; \
+ memcpy(mem, desc, 22); /* bLength to bNrInPins */ \
+ mem += 22; \
+ \
+ memcpy(mem, (desc)->baSourceID, (desc)->bNrInPins); \
+ mem += (desc)->bNrInPins; \
+ \
+ memcpy(mem, &(desc)->bControlSize, 1); \
+ mem++; \
+ \
+ memcpy(mem, (desc)->bmControls, (desc)->bControlSize); \
+ mem += (desc)->bControlSize; \
+ \
+ memcpy(mem, &(desc)->iExtension, 1); \
+ mem++; \
+ } while (0)
+
static struct usb_descriptor_header **
uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
{
@@ -485,6 +508,7 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
const struct usb_descriptor_header * const *src;
struct usb_descriptor_header **dst;
struct usb_descriptor_header **hdr;
+ struct uvcg_extension *xu;
unsigned int control_size;
unsigned int streaming_size;
unsigned int n_desc;
@@ -521,9 +545,9 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
* uvc_iad
* uvc_control_intf
* Class-specific UVC control descriptors
- * uvc_control_ep
- * uvc_control_cs_ep
- * uvc_ss_control_comp (for SS only)
+ * uvc_interrupt_ep
+ * uvc_interrupt_cs_ep
+ * uvc_ss_interrupt_comp (for SS only)
* uvc_streaming_intf_alt0
* Class-specific UVC streaming descriptors
* uvc_{fs|hs}_streaming
@@ -533,14 +557,17 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
control_size = 0;
streaming_size = 0;
bytes = uvc_iad.bLength + uvc_control_intf.bLength
- + uvc_control_ep.bLength + uvc_control_cs_ep.bLength
+ uvc_streaming_intf_alt0.bLength;
- if (speed == USB_SPEED_SUPER) {
- bytes += uvc_ss_control_comp.bLength;
- n_desc = 6;
- } else {
- n_desc = 5;
+ n_desc = 3;
+ if (uvc->enable_interrupt_ep) {
+ bytes += uvc_interrupt_ep.bLength + uvc_interrupt_cs_ep.bLength;
+ n_desc += 2;
+
+ if (speed == USB_SPEED_SUPER) {
+ bytes += uvc_ss_interrupt_comp.bLength;
+ n_desc += 1;
+ }
}
for (src = (const struct usb_descriptor_header **)uvc_control_desc;
@@ -549,6 +576,13 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
bytes += (*src)->bLength;
n_desc++;
}
+
+ list_for_each_entry(xu, uvc->desc.extension_units, list) {
+ control_size += xu->desc.bLength;
+ bytes += xu->desc.bLength;
+ n_desc++;
+ }
+
for (src = (const struct usb_descriptor_header **)uvc_streaming_cls;
*src; ++src) {
streaming_size += (*src)->bLength;
@@ -575,15 +609,22 @@ uvc_copy_descriptors(struct uvc_device *uvc, enum usb_device_speed speed)
uvc_control_header = mem;
UVC_COPY_DESCRIPTORS(mem, dst,
(const struct usb_descriptor_header **)uvc_control_desc);
+
+ list_for_each_entry(xu, uvc->desc.extension_units, list)
+ UVC_COPY_XU_DESCRIPTOR(mem, dst, &xu->desc);
+
uvc_control_header->wTotalLength = cpu_to_le16(control_size);
uvc_control_header->bInCollection = 1;
uvc_control_header->baInterfaceNr[0] = uvc->streaming_intf;
- UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_ep);
- if (speed == USB_SPEED_SUPER)
- UVC_COPY_DESCRIPTOR(mem, dst, &uvc_ss_control_comp);
+ if (uvc->enable_interrupt_ep) {
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_interrupt_ep);
+ if (speed == USB_SPEED_SUPER)
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_ss_interrupt_comp);
+
+ UVC_COPY_DESCRIPTOR(mem, dst, &uvc_interrupt_cs_ep);
+ }
- UVC_COPY_DESCRIPTOR(mem, dst, &uvc_control_cs_ep);
UVC_COPY_DESCRIPTOR(mem, dst, &uvc_streaming_intf_alt0);
uvc_streaming_header = mem;
@@ -603,6 +644,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
+ struct uvcg_extension *xu;
struct usb_string *us;
unsigned int max_packet_mult;
unsigned int max_packet_size;
@@ -666,12 +708,16 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
(opts->streaming_maxburst + 1));
/* Allocate endpoints. */
- ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
- if (!ep) {
- uvcg_info(f, "Unable to allocate control EP\n");
- goto error;
+ if (opts->enable_interrupt_ep) {
+ ep = usb_ep_autoconfig(cdev->gadget, &uvc_interrupt_ep);
+ if (!ep) {
+ uvcg_info(f, "Unable to allocate interrupt EP\n");
+ goto error;
+ }
+ uvc->interrupt_ep = ep;
+ uvc_control_intf.bNumEndpoints = 1;
}
- uvc->control_ep = ep;
+ uvc->enable_interrupt_ep = opts->enable_interrupt_ep;
if (gadget_is_superspeed(c->cdev->gadget))
ep = usb_ep_autoconfig_ss(cdev->gadget, &uvc_ss_streaming_ep,
@@ -691,6 +737,18 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc_hs_streaming_ep.bEndpointAddress = uvc->video.ep->address;
uvc_ss_streaming_ep.bEndpointAddress = uvc->video.ep->address;
+ /*
+ * XUs can have an arbitrary string descriptor describing them. If they
+ * have one pick up the ID.
+ */
+ list_for_each_entry(xu, &opts->extension_units, list)
+ if (xu->string_descriptor_index)
+ xu->desc.iExtension = cdev->usb_strings[xu->string_descriptor_index].id;
+
+ /*
+ * We attach the hard-coded defaults incase the user does not provide
+ * any more appropriate strings through configfs.
+ */
uvc_en_us_strings[UVC_STRING_CONTROL_IDX].s = opts->function_name;
us = usb_gstrings_attach(cdev, uvc_function_strings,
ARRAY_SIZE(uvc_en_us_strings));
@@ -698,11 +756,15 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
ret = PTR_ERR(us);
goto error;
}
- uvc_iad.iFunction = us[UVC_STRING_CONTROL_IDX].id;
- uvc_control_intf.iInterface = us[UVC_STRING_CONTROL_IDX].id;
- ret = us[UVC_STRING_STREAMING_IDX].id;
- uvc_streaming_intf_alt0.iInterface = ret;
- uvc_streaming_intf_alt1.iInterface = ret;
+
+ uvc_iad.iFunction = opts->iad_index ? cdev->usb_strings[opts->iad_index].id :
+ us[UVC_STRING_CONTROL_IDX].id;
+ uvc_streaming_intf_alt0.iInterface = opts->vs0_index ?
+ cdev->usb_strings[opts->vs0_index].id :
+ us[UVC_STRING_STREAMING_IDX].id;
+ uvc_streaming_intf_alt1.iInterface = opts->vs1_index ?
+ cdev->usb_strings[opts->vs1_index].id :
+ us[UVC_STRING_STREAMING_IDX].id;
/* Allocate interface IDs. */
if ((ret = usb_interface_id(c, f)) < 0)
@@ -803,7 +865,6 @@ static struct usb_function_instance *uvc_alloc_inst(void)
struct uvc_camera_terminal_descriptor *cd;
struct uvc_processing_unit_descriptor *pd;
struct uvc_output_terminal_descriptor *od;
- struct uvc_color_matching_descriptor *md;
struct uvc_descriptor_header **ctl_cls;
int ret;
@@ -852,13 +913,12 @@ static struct usb_function_instance *uvc_alloc_inst(void)
od->bSourceID = 2;
od->iTerminal = 0;
- md = &opts->uvc_color_matching;
- md->bLength = UVC_DT_COLOR_MATCHING_SIZE;
- md->bDescriptorType = USB_DT_CS_INTERFACE;
- md->bDescriptorSubType = UVC_VS_COLORFORMAT;
- md->bColorPrimaries = 1;
- md->bTransferCharacteristics = 1;
- md->bMatrixCoefficients = 4;
+ /*
+ * With the ability to add XUs to the UVC function graph, we need to be
+ * able to allocate unique unit IDs to them. The IDs are 1-based, with
+ * the CT, PU and OT above consuming the first 3.
+ */
+ opts->last_unit_id = 3;
/* Prepare fs control class descriptors for configfs-based gadgets */
ctl_cls = opts->uvc_fs_control_cls;
@@ -880,6 +940,8 @@ static struct usb_function_instance *uvc_alloc_inst(void)
opts->ss_control =
(const struct uvc_descriptor_header * const *)ctl_cls;
+ INIT_LIST_HEAD(&opts->extension_units);
+
opts->streaming_interval = 1;
opts->streaming_maxpacket = 1024;
snprintf(opts->function_name, sizeof(opts->function_name), "UVC Camera");
@@ -1011,6 +1073,8 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
return ERR_PTR(-EBUSY);
}
+ uvc->desc.extension_units = &opts->extension_units;
+
++opts->refcnt;
mutex_unlock(&opts->lock);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 8f12f3f8f6ee..f259975dfba4 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -17,6 +17,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
+#include <linux/usb/composite.h>
#include "u_ether.h"
@@ -103,41 +104,6 @@ static inline int qlen(struct usb_gadget *gadget, unsigned qmult)
/*-------------------------------------------------------------------------*/
-/* REVISIT there must be a better way than having two sets
- * of debug calls ...
- */
-
-#undef DBG
-#undef VDBG
-#undef ERROR
-#undef INFO
-
-#define xprintk(d, level, fmt, args...) \
- printk(level "%s: " fmt , (d)->net->name , ## args)
-
-#ifdef DEBUG
-#undef DEBUG
-#define DBG(dev, fmt, args...) \
- xprintk(dev , KERN_DEBUG , fmt , ## args)
-#else
-#define DBG(dev, fmt, args...) \
- do { } while (0)
-#endif /* DEBUG */
-
-#ifdef VERBOSE_DEBUG
-#define VDBG DBG
-#else
-#define VDBG(dev, fmt, args...) \
- do { } while (0)
-#endif /* DEBUG */
-
-#define ERROR(dev, fmt, args...) \
- xprintk(dev , KERN_ERR , fmt , ## args)
-#define INFO(dev, fmt, args...) \
- xprintk(dev , KERN_INFO , fmt , ## args)
-
-/*-------------------------------------------------------------------------*/
-
/* NETWORK DRIVER HOOKUP (to the layer above this driver) */
static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
@@ -798,6 +764,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
SET_NETDEV_DEVTYPE(net, &gadget_type);
status = register_netdev(net);
@@ -845,13 +812,11 @@ struct net_device *gether_setup_name_default(const char *netname)
snprintf(net->name, sizeof(net->name), "%s%%d", netname);
eth_random_addr(dev->dev_mac);
- pr_warn("using random %s ethernet address\n", "self");
/* by default we always have a random MAC address */
net->addr_assign_type = NET_ADDR_RANDOM;
eth_random_addr(dev->host_mac);
- pr_warn("using random %s ethernet address\n", "host");
net->netdev_ops = &eth_netdev_ops;
@@ -872,6 +837,8 @@ int gether_register_netdev(struct net_device *net)
struct usb_gadget *g;
int status;
+ if (!net->dev.parent)
+ return -EINVAL;
dev = netdev_priv(net);
g = dev->gadget;
@@ -902,6 +869,7 @@ void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
dev = netdev_priv(net);
dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
}
EXPORT_SYMBOL_GPL(gether_set_gadget);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 840626e064e1..a0ca47fbff0f 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -82,6 +82,9 @@
#define WRITE_BUF_SIZE 8192 /* TX only */
#define GS_CONSOLE_BUF_SIZE 8192
+/* Prevents race conditions while accessing gser->ioport */
+static DEFINE_SPINLOCK(serial_port_lock);
+
/* console info */
struct gs_console {
struct console console;
@@ -1375,8 +1378,10 @@ void gserial_disconnect(struct gserial *gser)
if (!port)
return;
+ spin_lock_irqsave(&serial_port_lock, flags);
+
/* tell the TTY glue not to do I/O here any more */
- spin_lock_irqsave(&port->port_lock, flags);
+ spin_lock(&port->port_lock);
gs_console_disconnect(port);
@@ -1391,7 +1396,8 @@ void gserial_disconnect(struct gserial *gser)
tty_hangup(port->port.tty);
}
port->suspended = false;
- spin_unlock_irqrestore(&port->port_lock, flags);
+ spin_unlock(&port->port_lock);
+ spin_unlock_irqrestore(&serial_port_lock, flags);
/* disable endpoints, aborting down any active I/O */
usb_ep_disable(gser->out);
@@ -1425,10 +1431,19 @@ EXPORT_SYMBOL_GPL(gserial_suspend);
void gserial_resume(struct gserial *gser)
{
- struct gs_port *port = gser->ioport;
+ struct gs_port *port;
unsigned long flags;
- spin_lock_irqsave(&port->port_lock, flags);
+ spin_lock_irqsave(&serial_port_lock, flags);
+ port = gser->ioport;
+
+ if (!port) {
+ spin_unlock_irqrestore(&serial_port_lock, flags);
+ return;
+ }
+
+ spin_lock(&port->port_lock);
+ spin_unlock(&serial_port_lock);
port->suspended = false;
if (!port->start_delayed) {
spin_unlock_irqrestore(&port->port_lock, flags);
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 24b8681b0d6f..1ce58f61253c 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -28,6 +28,9 @@ struct f_uvc_opts {
unsigned int control_interface;
unsigned int streaming_interface;
char function_name[32];
+ unsigned int last_unit_id;
+
+ bool enable_interrupt_ep;
/*
* Control descriptors array pointers for full-/high-speed and
@@ -52,7 +55,6 @@ struct f_uvc_opts {
struct uvc_camera_terminal_descriptor uvc_camera_terminal;
struct uvc_processing_unit_descriptor uvc_processing;
struct uvc_output_terminal_descriptor uvc_output_terminal;
- struct uvc_color_matching_descriptor uvc_color_matching;
/*
* Control descriptors pointers arrays for full-/high-speed and
@@ -65,6 +67,12 @@ struct f_uvc_opts {
struct uvc_descriptor_header *uvc_ss_control_cls[5];
/*
+ * Control descriptors for extension units. There could be any number
+ * of these, including none at all.
+ */
+ struct list_head extension_units;
+
+ /*
* Streaming descriptors for full-speed, high-speed and super-speed.
* Used by configfs only, must not be touched by legacy gadgets. The
* arrays are allocated at runtime as the number of descriptors isn't
@@ -75,6 +83,14 @@ struct f_uvc_opts {
struct uvc_descriptor_header **uvc_ss_streaming_cls;
/*
+ * Indexes into the function's string descriptors allowing users to set
+ * custom descriptions rather than the hard-coded defaults.
+ */
+ u8 iad_index;
+ u8 vs0_index;
+ u8 vs1_index;
+
+ /*
* Read/write access to configfs attributes is handled by configfs.
*
* This lock protects the descriptors from concurrent access by
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 40226b1f7e14..100475b1363e 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -143,12 +143,14 @@ struct uvc_device {
const struct uvc_descriptor_header * const *fs_streaming;
const struct uvc_descriptor_header * const *hs_streaming;
const struct uvc_descriptor_header * const *ss_streaming;
+ struct list_head *extension_units;
} desc;
unsigned int control_intf;
- struct usb_ep *control_ep;
+ struct usb_ep *interrupt_ep;
struct usb_request *control_req;
void *control_buf;
+ bool enable_interrupt_ep;
unsigned int streaming_intf;
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index 76cb60d13049..62b759bb7613 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -13,6 +13,7 @@
#include "uvc_configfs.h"
#include <linux/sort.h>
+#include <linux/usb/video.h>
/* -----------------------------------------------------------------------------
* Global Utility Structures and Macros
@@ -46,6 +47,71 @@ static int uvcg_config_compare_u32(const void *l, const void *r)
return li < ri ? -1 : li == ri ? 0 : 1;
}
+static inline int __uvcg_count_item_entries(char *buf, void *priv, unsigned int size)
+{
+ ++*((int *)priv);
+ return 0;
+}
+
+static inline int __uvcg_fill_item_entries(char *buf, void *priv, unsigned int size)
+{
+ unsigned int num;
+ u8 **values;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &num);
+ if (ret)
+ return ret;
+
+ if (num != (num & GENMASK((size * 8) - 1, 0)))
+ return -ERANGE;
+
+ values = priv;
+ memcpy(*values, &num, size);
+ *values += size;
+
+ return 0;
+}
+
+static int __uvcg_iter_item_entries(const char *page, size_t len,
+ int (*fun)(char *, void *, unsigned int),
+ void *priv, unsigned int size)
+{
+ /* sign, base 2 representation, newline, terminator */
+ unsigned int bufsize = 1 + size * 8 + 1 + 1;
+ const char *pg = page;
+ int i, ret = 0;
+ char *buf;
+
+ if (!fun)
+ return -EINVAL;
+
+ buf = kzalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ while (pg - page < len) {
+ i = 0;
+ while (i < sizeof(buf) && (pg - page < len) &&
+ *pg != '\0' && *pg != '\n')
+ buf[i++] = *pg++;
+ if (i == sizeof(buf)) {
+ ret = -EINVAL;
+ goto out_free_buf;
+ }
+ while ((pg - page < len) && (*pg == '\0' || *pg == '\n'))
+ ++pg;
+ buf[i] = '\0';
+ ret = fun(buf, priv, size);
+ if (ret)
+ goto out_free_buf;
+ }
+
+out_free_buf:
+ kfree(buf);
+ return ret;
+}
+
struct uvcg_config_group_type {
struct config_item_type type;
const char *name;
@@ -483,11 +549,68 @@ UVC_ATTR_RO(uvcg_default_output_, cname, aname)
UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, 8);
UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, 16);
UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, 8);
-UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, 8);
UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, 8);
#undef UVCG_DEFAULT_OUTPUT_ATTR
+static ssize_t uvcg_default_output_b_source_id_show(struct config_item *item,
+ char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvc_output_terminal_descriptor *cd;
+ int result;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = group->cg_item.ci_parent->ci_parent->
+ ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+ cd = &opts->uvc_output_terminal;
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%u\n", le8_to_cpu(cd->bSourceID));
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return result;
+}
+
+static ssize_t uvcg_default_output_b_source_id_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvc_output_terminal_descriptor *cd;
+ int result;
+ u8 num;
+
+ result = kstrtou8(page, 0, &num);
+ if (result)
+ return result;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = group->cg_item.ci_parent->ci_parent->
+ ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+ cd = &opts->uvc_output_terminal;
+
+ mutex_lock(&opts->lock);
+ cd->bSourceID = num;
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return len;
+}
+UVC_ATTR(uvcg_default_output_, b_source_id, bSourceID);
+
static struct configfs_attribute *uvcg_default_output_attrs[] = {
&uvcg_default_output_attr_b_terminal_id,
&uvcg_default_output_attr_w_terminal_type,
@@ -540,6 +663,537 @@ static const struct uvcg_config_group_type uvcg_terminal_grp_type = {
};
/* -----------------------------------------------------------------------------
+ * control/extensions
+ */
+
+#define UVCG_EXTENSION_ATTR(cname, aname, ro...) \
+static ssize_t uvcg_extension_##cname##_show(struct config_item *item, \
+ char *page) \
+{ \
+ struct config_group *group = to_config_group(item->ci_parent); \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
+ struct uvcg_extension *xu = to_uvcg_extension(item); \
+ struct config_item *opts_item; \
+ struct f_uvc_opts *opts; \
+ int ret; \
+ \
+ mutex_lock(su_mutex); \
+ \
+ opts_item = item->ci_parent->ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ ret = sprintf(page, "%u\n", xu->desc.aname); \
+ mutex_unlock(&opts->lock); \
+ \
+ mutex_unlock(su_mutex); \
+ \
+ return ret; \
+} \
+UVC_ATTR##ro(uvcg_extension_, cname, aname)
+
+UVCG_EXTENSION_ATTR(b_length, bLength, _RO);
+UVCG_EXTENSION_ATTR(b_unit_id, bUnitID, _RO);
+UVCG_EXTENSION_ATTR(i_extension, iExtension, _RO);
+
+static ssize_t uvcg_extension_b_num_controls_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int ret;
+ u8 num;
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ return ret;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ xu->desc.bNumControls = num;
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return len;
+}
+UVCG_EXTENSION_ATTR(b_num_controls, bNumControls);
+
+/*
+ * In addition to storing bNrInPins, this function needs to realloc the
+ * memory for the baSourceID array and additionally expand bLength.
+ */
+static ssize_t uvcg_extension_b_nr_in_pins_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ void *tmp_buf;
+ int ret;
+ u8 num;
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ return ret;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ if (num == xu->desc.bNrInPins) {
+ ret = len;
+ goto unlock;
+ }
+
+ tmp_buf = krealloc_array(xu->desc.baSourceID, num, sizeof(u8),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ xu->desc.baSourceID = tmp_buf;
+ xu->desc.bNrInPins = num;
+ xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
+ xu->desc.bControlSize);
+
+ ret = len;
+
+unlock:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+UVCG_EXTENSION_ATTR(b_nr_in_pins, bNrInPins);
+
+/*
+ * In addition to storing bControlSize, this function needs to realloc the
+ * memory for the bmControls array and additionally expand bLength.
+ */
+static ssize_t uvcg_extension_b_control_size_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ void *tmp_buf;
+ int ret;
+ u8 num;
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ return ret;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ if (num == xu->desc.bControlSize) {
+ ret = len;
+ goto unlock;
+ }
+
+ tmp_buf = krealloc_array(xu->desc.bmControls, num, sizeof(u8),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!tmp_buf) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ xu->desc.bmControls = tmp_buf;
+ xu->desc.bControlSize = num;
+ xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
+ xu->desc.bControlSize);
+
+ ret = len;
+
+unlock:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+UVCG_EXTENSION_ATTR(b_control_size, bControlSize);
+
+static ssize_t uvcg_extension_guid_extension_code_show(struct config_item *item,
+ char *page)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ memcpy(page, xu->desc.guidExtensionCode, sizeof(xu->desc.guidExtensionCode));
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return sizeof(xu->desc.guidExtensionCode);
+}
+
+static ssize_t uvcg_extension_guid_extension_code_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int ret;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ memcpy(xu->desc.guidExtensionCode, page,
+ min(sizeof(xu->desc.guidExtensionCode), len));
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ ret = sizeof(xu->desc.guidExtensionCode);
+
+ return ret;
+}
+
+UVC_ATTR(uvcg_extension_, guid_extension_code, guidExtensionCode);
+
+static ssize_t uvcg_extension_ba_source_id_show(struct config_item *item,
+ char *page)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ char *pg = page;
+ int ret, i;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ for (ret = 0, i = 0; i < xu->desc.bNrInPins; ++i) {
+ ret += sprintf(pg, "%u\n", xu->desc.baSourceID[i]);
+ pg = page + ret;
+ }
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return ret;
+}
+
+static ssize_t uvcg_extension_ba_source_id_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ u8 *source_ids, *iter;
+ int ret, n = 0;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_count_item_entries, &n,
+ sizeof(u8));
+ if (ret)
+ goto unlock;
+
+ iter = source_ids = kcalloc(n, sizeof(u8), GFP_KERNEL);
+ if (!source_ids) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &iter,
+ sizeof(u8));
+ if (ret) {
+ kfree(source_ids);
+ goto unlock;
+ }
+
+ kfree(xu->desc.baSourceID);
+ xu->desc.baSourceID = source_ids;
+ xu->desc.bNrInPins = n;
+ xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
+ xu->desc.bControlSize);
+
+ ret = len;
+
+unlock:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+UVC_ATTR(uvcg_extension_, ba_source_id, baSourceID);
+
+static ssize_t uvcg_extension_bm_controls_show(struct config_item *item,
+ char *page)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ char *pg = page;
+ int ret, i;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ for (ret = 0, i = 0; i < xu->desc.bControlSize; ++i) {
+ ret += sprintf(pg, "0x%02x\n", xu->desc.bmControls[i]);
+ pg = page + ret;
+ }
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return ret;
+}
+
+static ssize_t uvcg_extension_bm_controls_store(struct config_item *item,
+ const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item->ci_parent);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ u8 *bm_controls, *iter;
+ int ret, n = 0;
+
+ mutex_lock(su_mutex);
+
+ opts_item = item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_count_item_entries, &n,
+ sizeof(u8));
+ if (ret)
+ goto unlock;
+
+ iter = bm_controls = kcalloc(n, sizeof(u8), GFP_KERNEL);
+ if (!bm_controls) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &iter,
+ sizeof(u8));
+ if (ret) {
+ kfree(bm_controls);
+ goto unlock;
+ }
+
+ kfree(xu->desc.bmControls);
+ xu->desc.bmControls = bm_controls;
+ xu->desc.bControlSize = n;
+ xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(xu->desc.bNrInPins,
+ xu->desc.bControlSize);
+
+ ret = len;
+
+unlock:
+ mutex_unlock(&opts->lock);
+ mutex_unlock(su_mutex);
+ return ret;
+}
+
+UVC_ATTR(uvcg_extension_, bm_controls, bmControls);
+
+static struct configfs_attribute *uvcg_extension_attrs[] = {
+ &uvcg_extension_attr_b_length,
+ &uvcg_extension_attr_b_unit_id,
+ &uvcg_extension_attr_b_num_controls,
+ &uvcg_extension_attr_b_nr_in_pins,
+ &uvcg_extension_attr_b_control_size,
+ &uvcg_extension_attr_guid_extension_code,
+ &uvcg_extension_attr_ba_source_id,
+ &uvcg_extension_attr_bm_controls,
+ &uvcg_extension_attr_i_extension,
+ NULL,
+};
+
+static void uvcg_extension_release(struct config_item *item)
+{
+ struct uvcg_extension *xu = container_of(item, struct uvcg_extension, item);
+
+ kfree(xu);
+}
+
+static int uvcg_extension_allow_link(struct config_item *src, struct config_item *tgt)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(src);
+ struct config_item *gadget_item;
+ struct gadget_string *string;
+ struct config_item *strings;
+ int ret = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ /* Validate that the target of the link is an entry in strings/<langid> */
+ gadget_item = src->ci_parent->ci_parent->ci_parent->ci_parent->ci_parent;
+ strings = config_group_find_item(to_config_group(gadget_item), "strings");
+ if (!strings || tgt->ci_parent->ci_parent != strings) {
+ ret = -EINVAL;
+ goto put_strings;
+ }
+
+ string = to_gadget_string(tgt);
+ xu->string_descriptor_index = string->usb_string.id;
+
+put_strings:
+ config_item_put(strings);
+ mutex_unlock(su_mutex);
+
+ return ret;
+}
+
+static void uvcg_extension_drop_link(struct config_item *src, struct config_item *tgt)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvcg_extension *xu = to_uvcg_extension(src);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = src->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ xu->string_descriptor_index = 0;
+
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+}
+
+static struct configfs_item_operations uvcg_extension_item_ops = {
+ .release = uvcg_extension_release,
+ .allow_link = uvcg_extension_allow_link,
+ .drop_link = uvcg_extension_drop_link,
+};
+
+static const struct config_item_type uvcg_extension_type = {
+ .ct_item_ops = &uvcg_extension_item_ops,
+ .ct_attrs = uvcg_extension_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static void uvcg_extension_drop(struct config_group *group, struct config_item *item)
+{
+ struct uvcg_extension *xu = container_of(item, struct uvcg_extension, item);
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+
+ opts_item = group->cg_item.ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+
+ config_item_put(item);
+ list_del(&xu->list);
+ kfree(xu->desc.baSourceID);
+ kfree(xu->desc.bmControls);
+
+ mutex_unlock(&opts->lock);
+}
+
+static struct config_item *uvcg_extension_make(struct config_group *group, const char *name)
+{
+ struct config_item *opts_item;
+ struct uvcg_extension *xu;
+ struct f_uvc_opts *opts;
+
+ opts_item = group->cg_item.ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ xu = kzalloc(sizeof(*xu), GFP_KERNEL);
+ if (!xu)
+ return ERR_PTR(-ENOMEM);
+
+ xu->desc.bLength = UVC_DT_EXTENSION_UNIT_SIZE(0, 0);
+ xu->desc.bDescriptorType = USB_DT_CS_INTERFACE;
+ xu->desc.bDescriptorSubType = UVC_VC_EXTENSION_UNIT;
+ xu->desc.bNumControls = 0;
+ xu->desc.bNrInPins = 0;
+ xu->desc.baSourceID = NULL;
+ xu->desc.bControlSize = 0;
+ xu->desc.bmControls = NULL;
+
+ mutex_lock(&opts->lock);
+
+ xu->desc.bUnitID = ++opts->last_unit_id;
+
+ config_item_init_type_name(&xu->item, name, &uvcg_extension_type);
+ list_add_tail(&xu->list, &opts->extension_units);
+
+ mutex_unlock(&opts->lock);
+
+ return &xu->item;
+}
+
+static struct configfs_group_operations uvcg_extensions_grp_ops = {
+ .make_item = uvcg_extension_make,
+ .drop_item = uvcg_extension_drop,
+};
+
+static const struct uvcg_config_group_type uvcg_extensions_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_extensions_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "extensions",
+};
+
+/* -----------------------------------------------------------------------------
* control/class/{fs|ss}
*/
@@ -716,8 +1370,61 @@ static ssize_t uvcg_default_control_b_interface_number_show(
UVC_ATTR_RO(uvcg_default_control_, b_interface_number, bInterfaceNumber);
+static ssize_t uvcg_default_control_enable_interrupt_ep_show(
+ struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int result = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = item->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result += sprintf(page, "%u\n", opts->enable_interrupt_ep);
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return result;
+}
+
+static ssize_t uvcg_default_control_enable_interrupt_ep_store(
+ struct config_item *item, const char *page, size_t len)
+{
+ struct config_group *group = to_config_group(item);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ ssize_t ret;
+ u8 num;
+
+ ret = kstrtou8(page, 0, &num);
+ if (ret)
+ return ret;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = item->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ opts->enable_interrupt_ep = num;
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return len;
+}
+UVC_ATTR(uvcg_default_control_, enable_interrupt_ep, enable_interrupt_ep);
+
static struct configfs_attribute *uvcg_default_control_attrs[] = {
&uvcg_default_control_attr_b_interface_number,
+ &uvcg_default_control_attr_enable_interrupt_ep,
NULL,
};
@@ -733,6 +1440,7 @@ static const struct uvcg_config_group_type uvcg_control_grp_type = {
&uvcg_processing_grp_type,
&uvcg_terminal_grp_type,
&uvcg_control_class_grp_type,
+ &uvcg_extensions_grp_type,
NULL,
},
};
@@ -747,6 +1455,100 @@ static const char * const uvcg_format_names[] = {
"mjpeg",
};
+static struct uvcg_color_matching *
+uvcg_format_get_default_color_match(struct config_item *streaming)
+{
+ struct config_item *color_matching_item, *cm_default;
+ struct uvcg_color_matching *color_match;
+
+ color_matching_item = config_group_find_item(to_config_group(streaming),
+ "color_matching");
+ if (!color_matching_item)
+ return NULL;
+
+ cm_default = config_group_find_item(to_config_group(color_matching_item),
+ "default");
+ config_item_put(color_matching_item);
+ if (!cm_default)
+ return NULL;
+
+ color_match = to_uvcg_color_matching(to_config_group(cm_default));
+ config_item_put(cm_default);
+
+ return color_match;
+}
+
+static int uvcg_format_allow_link(struct config_item *src, struct config_item *tgt)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvcg_color_matching *color_matching_desc;
+ struct config_item *streaming, *color_matching;
+ struct uvcg_format *fmt;
+ int ret = 0;
+
+ mutex_lock(su_mutex);
+
+ streaming = src->ci_parent->ci_parent;
+ color_matching = config_group_find_item(to_config_group(streaming), "color_matching");
+ if (!color_matching || color_matching != tgt->ci_parent) {
+ ret = -EINVAL;
+ goto out_put_cm;
+ }
+
+ fmt = to_uvcg_format(src);
+
+ /*
+ * There's always a color matching descriptor associated with the format
+ * but without a symlink it should only ever be the default one. If it's
+ * not the default, there's already a symlink and we should bail out.
+ */
+ color_matching_desc = uvcg_format_get_default_color_match(streaming);
+ if (fmt->color_matching != color_matching_desc) {
+ ret = -EBUSY;
+ goto out_put_cm;
+ }
+
+ color_matching_desc->refcnt--;
+
+ color_matching_desc = to_uvcg_color_matching(to_config_group(tgt));
+ fmt->color_matching = color_matching_desc;
+ color_matching_desc->refcnt++;
+
+out_put_cm:
+ config_item_put(color_matching);
+ mutex_unlock(su_mutex);
+
+ return ret;
+}
+
+static void uvcg_format_drop_link(struct config_item *src, struct config_item *tgt)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct uvcg_color_matching *color_matching_desc;
+ struct config_item *streaming;
+ struct uvcg_format *fmt;
+
+ mutex_lock(su_mutex);
+
+ color_matching_desc = to_uvcg_color_matching(to_config_group(tgt));
+ color_matching_desc->refcnt--;
+
+ streaming = src->ci_parent->ci_parent;
+ color_matching_desc = uvcg_format_get_default_color_match(streaming);
+
+ fmt = to_uvcg_format(src);
+ fmt->color_matching = color_matching_desc;
+ color_matching_desc->refcnt++;
+
+ mutex_unlock(su_mutex);
+}
+
+static struct configfs_item_operations uvcg_format_item_operations = {
+ .release = uvcg_config_item_release,
+ .allow_link = uvcg_format_allow_link,
+ .drop_link = uvcg_format_drop_link,
+};
+
static ssize_t uvcg_format_bma_controls_show(struct uvcg_format *f, char *page)
{
struct f_uvc_opts *opts;
@@ -1131,57 +1933,6 @@ static ssize_t uvcg_frame_dw_frame_interval_show(struct config_item *item,
return result;
}
-static inline int __uvcg_count_frm_intrv(char *buf, void *priv)
-{
- ++*((int *)priv);
- return 0;
-}
-
-static inline int __uvcg_fill_frm_intrv(char *buf, void *priv)
-{
- u32 num, **interv;
- int ret;
-
- ret = kstrtou32(buf, 0, &num);
- if (ret)
- return ret;
-
- interv = priv;
- **interv = num;
- ++*interv;
-
- return 0;
-}
-
-static int __uvcg_iter_frm_intrv(const char *page, size_t len,
- int (*fun)(char *, void *), void *priv)
-{
- /* sign, base 2 representation, newline, terminator */
- char buf[1 + sizeof(u32) * 8 + 1 + 1];
- const char *pg = page;
- int i, ret;
-
- if (!fun)
- return -EINVAL;
-
- while (pg - page < len) {
- i = 0;
- while (i < sizeof(buf) && (pg - page < len) &&
- *pg != '\0' && *pg != '\n')
- buf[i++] = *pg++;
- if (i == sizeof(buf))
- return -EINVAL;
- while ((pg - page < len) && (*pg == '\0' || *pg == '\n'))
- ++pg;
- buf[i] = '\0';
- ret = fun(buf, priv);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item,
const char *page, size_t len)
{
@@ -1205,7 +1956,7 @@ static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item,
goto end;
}
- ret = __uvcg_iter_frm_intrv(page, len, __uvcg_count_frm_intrv, &n);
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_count_item_entries, &n, sizeof(u32));
if (ret)
goto end;
@@ -1215,7 +1966,7 @@ static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item,
goto end;
}
- ret = __uvcg_iter_frm_intrv(page, len, __uvcg_fill_frm_intrv, &tmp);
+ ret = __uvcg_iter_item_entries(page, len, __uvcg_fill_item_entries, &tmp, sizeof(u32));
if (ret) {
kfree(frm_intrv);
goto end;
@@ -1547,7 +2298,7 @@ static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
};
static const struct config_item_type uvcg_uncompressed_type = {
- .ct_item_ops = &uvcg_config_item_ops,
+ .ct_item_ops = &uvcg_format_item_operations,
.ct_group_ops = &uvcg_uncompressed_group_ops,
.ct_attrs = uvcg_uncompressed_attrs,
.ct_owner = THIS_MODULE,
@@ -1560,8 +2311,15 @@ static struct config_group *uvcg_uncompressed_make(struct config_group *group,
'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00,
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71
};
+ struct uvcg_color_matching *color_match;
+ struct config_item *streaming;
struct uvcg_uncompressed *h;
+ streaming = group->cg_item.ci_parent;
+ color_match = uvcg_format_get_default_color_match(streaming);
+ if (!color_match)
+ return ERR_PTR(-EINVAL);
+
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return ERR_PTR(-ENOMEM);
@@ -1579,6 +2337,8 @@ static struct config_group *uvcg_uncompressed_make(struct config_group *group,
INIT_LIST_HEAD(&h->fmt.frames);
h->fmt.type = UVCG_UNCOMPRESSED;
+ h->fmt.color_matching = color_match;
+ color_match->refcnt++;
config_group_init_type_name(&h->fmt.group, name,
&uvcg_uncompressed_type);
@@ -1734,7 +2494,7 @@ static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
};
static const struct config_item_type uvcg_mjpeg_type = {
- .ct_item_ops = &uvcg_config_item_ops,
+ .ct_item_ops = &uvcg_format_item_operations,
.ct_group_ops = &uvcg_mjpeg_group_ops,
.ct_attrs = uvcg_mjpeg_attrs,
.ct_owner = THIS_MODULE,
@@ -1743,8 +2503,15 @@ static const struct config_item_type uvcg_mjpeg_type = {
static struct config_group *uvcg_mjpeg_make(struct config_group *group,
const char *name)
{
+ struct uvcg_color_matching *color_match;
+ struct config_item *streaming;
struct uvcg_mjpeg *h;
+ streaming = group->cg_item.ci_parent;
+ color_match = uvcg_format_get_default_color_match(streaming);
+ if (!color_match)
+ return ERR_PTR(-EINVAL);
+
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
return ERR_PTR(-ENOMEM);
@@ -1760,6 +2527,8 @@ static struct config_group *uvcg_mjpeg_make(struct config_group *group,
INIT_LIST_HEAD(&h->fmt.frames);
h->fmt.type = UVCG_MJPEG;
+ h->fmt.color_matching = color_match;
+ color_match->refcnt++;
config_group_init_type_name(&h->fmt.group, name,
&uvcg_mjpeg_type);
@@ -1783,70 +2552,159 @@ static const struct uvcg_config_group_type uvcg_mjpeg_grp_type = {
* streaming/color_matching/default
*/
-#define UVCG_DEFAULT_COLOR_MATCHING_ATTR(cname, aname, bits) \
-static ssize_t uvcg_default_color_matching_##cname##_show( \
+#define UVCG_COLOR_MATCHING_ATTR(cname, aname, bits) \
+static ssize_t uvcg_color_matching_##cname##_show( \
struct config_item *item, char *page) \
{ \
struct config_group *group = to_config_group(item); \
+ struct uvcg_color_matching *color_match = \
+ to_uvcg_color_matching(group); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
- struct uvc_color_matching_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
- cd = &opts->uvc_color_matching; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
+ result = sprintf(page, "%u\n", \
+ le##bits##_to_cpu(color_match->desc.aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
return result; \
} \
\
-UVC_ATTR_RO(uvcg_default_color_matching_, cname, aname)
+static ssize_t uvcg_color_matching_##cname##_store( \
+ struct config_item *item, const char *page, size_t len) \
+{ \
+ struct config_group *group = to_config_group(item); \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
+ struct uvcg_color_matching *color_match = \
+ to_uvcg_color_matching(group); \
+ struct f_uvc_opts *opts; \
+ struct config_item *opts_item; \
+ int ret; \
+ u##bits num; \
+ \
+ ret = kstrtou##bits(page, 0, &num); \
+ if (ret) \
+ return ret; \
+ \
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
+ \
+ if (color_match->refcnt) { \
+ ret = -EBUSY; \
+ goto unlock_su; \
+ } \
+ \
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
+ opts = to_f_uvc_opts(opts_item); \
+ \
+ mutex_lock(&opts->lock); \
+ \
+ color_match->desc.aname = num; \
+ ret = len; \
+ \
+ mutex_unlock(&opts->lock); \
+unlock_su: \
+ mutex_unlock(su_mutex); \
+ \
+ return ret; \
+} \
+UVC_ATTR(uvcg_color_matching_, cname, aname)
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries, 8);
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_transfer_characteristics,
- bTransferCharacteristics, 8);
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients, 8);
+UVCG_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries, 8);
+UVCG_COLOR_MATCHING_ATTR(b_transfer_characteristics, bTransferCharacteristics, 8);
+UVCG_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients, 8);
-#undef UVCG_DEFAULT_COLOR_MATCHING_ATTR
+#undef UVCG_COLOR_MATCHING_ATTR
-static struct configfs_attribute *uvcg_default_color_matching_attrs[] = {
- &uvcg_default_color_matching_attr_b_color_primaries,
- &uvcg_default_color_matching_attr_b_transfer_characteristics,
- &uvcg_default_color_matching_attr_b_matrix_coefficients,
+static struct configfs_attribute *uvcg_color_matching_attrs[] = {
+ &uvcg_color_matching_attr_b_color_primaries,
+ &uvcg_color_matching_attr_b_transfer_characteristics,
+ &uvcg_color_matching_attr_b_matrix_coefficients,
NULL,
};
-static const struct uvcg_config_group_type uvcg_default_color_matching_type = {
- .type = {
- .ct_item_ops = &uvcg_config_item_ops,
- .ct_attrs = uvcg_default_color_matching_attrs,
- .ct_owner = THIS_MODULE,
- },
- .name = "default",
+static void uvcg_color_matching_release(struct config_item *item)
+{
+ struct uvcg_color_matching *color_match =
+ to_uvcg_color_matching(to_config_group(item));
+
+ kfree(color_match);
+}
+
+static struct configfs_item_operations uvcg_color_matching_item_ops = {
+ .release = uvcg_color_matching_release,
+};
+
+static const struct config_item_type uvcg_color_matching_type = {
+ .ct_item_ops = &uvcg_color_matching_item_ops,
+ .ct_attrs = uvcg_color_matching_attrs,
+ .ct_owner = THIS_MODULE,
};
/* -----------------------------------------------------------------------------
* streaming/color_matching
*/
+static struct config_group *uvcg_color_matching_make(struct config_group *group,
+ const char *name)
+{
+ struct uvcg_color_matching *color_match;
+
+ color_match = kzalloc(sizeof(*color_match), GFP_KERNEL);
+ if (!color_match)
+ return ERR_PTR(-ENOMEM);
+
+ color_match->desc.bLength = UVC_DT_COLOR_MATCHING_SIZE;
+ color_match->desc.bDescriptorType = USB_DT_CS_INTERFACE;
+ color_match->desc.bDescriptorSubType = UVC_VS_COLORFORMAT;
+
+ config_group_init_type_name(&color_match->group, name,
+ &uvcg_color_matching_type);
+
+ return &color_match->group;
+}
+
+static struct configfs_group_operations uvcg_color_matching_grp_group_ops = {
+ .make_group = uvcg_color_matching_make,
+};
+
+static int uvcg_color_matching_create_children(struct config_group *parent)
+{
+ struct uvcg_color_matching *color_match;
+
+ color_match = kzalloc(sizeof(*color_match), GFP_KERNEL);
+ if (!color_match)
+ return -ENOMEM;
+
+ color_match->desc.bLength = UVC_DT_COLOR_MATCHING_SIZE;
+ color_match->desc.bDescriptorType = USB_DT_CS_INTERFACE;
+ color_match->desc.bDescriptorSubType = UVC_VS_COLORFORMAT;
+ color_match->desc.bColorPrimaries = UVC_COLOR_PRIMARIES_BT_709_SRGB;
+ color_match->desc.bTransferCharacteristics = UVC_TRANSFER_CHARACTERISTICS_BT_709;
+ color_match->desc.bMatrixCoefficients = UVC_MATRIX_COEFFICIENTS_SMPTE_170M;
+
+ config_group_init_type_name(&color_match->group, "default",
+ &uvcg_color_matching_type);
+ configfs_add_default_group(&color_match->group, parent);
+
+ return 0;
+}
+
static const struct uvcg_config_group_type uvcg_color_matching_grp_type = {
.type = {
.ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_color_matching_grp_group_ops,
.ct_owner = THIS_MODULE,
},
.name = "color_matching",
- .children = (const struct uvcg_config_group_type*[]) {
- &uvcg_default_color_matching_type,
- NULL,
- },
+ .create_children = uvcg_color_matching_create_children,
};
/* -----------------------------------------------------------------------------
@@ -1880,7 +2738,8 @@ static inline struct uvc_descriptor_header
enum uvcg_strm_type {
UVCG_HEADER = 0,
UVCG_FORMAT,
- UVCG_FRAME
+ UVCG_FRAME,
+ UVCG_COLOR_MATCHING,
};
/*
@@ -1930,6 +2789,11 @@ static int __uvcg_iter_strm_cls(struct uvcg_streaming_header *h,
if (ret)
return ret;
}
+
+ ret = fun(f->fmt->color_matching, priv2, priv3, 0,
+ UVCG_COLOR_MATCHING);
+ if (ret)
+ return ret;
}
return ret;
@@ -1985,6 +2849,12 @@ static int __uvcg_cnt_strm(void *priv1, void *priv2, void *priv3, int n,
*size += frm->frame.b_frame_interval_type * sz;
}
break;
+ case UVCG_COLOR_MATCHING: {
+ struct uvcg_color_matching *color_match = priv1;
+
+ *size += sizeof(color_match->desc);
+ }
+ break;
}
++*count;
@@ -2070,6 +2940,13 @@ static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n,
frm->frame.b_frame_interval_type);
}
break;
+ case UVCG_COLOR_MATCHING: {
+ struct uvcg_color_matching *color_match = priv1;
+
+ memcpy(*dest, &color_match->desc, sizeof(color_match->desc));
+ *dest += sizeof(color_match->desc);
+ }
+ break;
}
return 0;
@@ -2109,7 +2986,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
if (ret)
goto unlock;
- count += 2; /* color_matching, NULL */
+ count += 1; /* NULL */
*class_array = kcalloc(count, sizeof(void *), GFP_KERNEL);
if (!*class_array) {
ret = -ENOMEM;
@@ -2136,7 +3013,6 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
kfree(data_save);
goto unlock;
}
- *cl_arr = (struct uvc_descriptor_header *)&opts->uvc_color_matching;
++target_hdr->linked;
ret = 0;
@@ -2298,8 +3174,68 @@ static void uvc_func_item_release(struct config_item *item)
usb_put_function_instance(&opts->func_inst);
}
+static int uvc_func_allow_link(struct config_item *src, struct config_item *tgt)
+{
+ struct mutex *su_mutex = &src->ci_group->cg_subsys->su_mutex;
+ struct gadget_string *string;
+ struct config_item *strings;
+ struct f_uvc_opts *opts;
+ int ret = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ /* Validate that the target is an entry in strings/<langid> */
+ strings = config_group_find_item(to_config_group(src->ci_parent->ci_parent),
+ "strings");
+ if (!strings || tgt->ci_parent->ci_parent != strings) {
+ ret = -EINVAL;
+ goto put_strings;
+ }
+
+ string = to_gadget_string(tgt);
+
+ opts = to_f_uvc_opts(src);
+ mutex_lock(&opts->lock);
+
+ if (!strcmp(tgt->ci_name, "iad_desc"))
+ opts->iad_index = string->usb_string.id;
+ else if (!strcmp(tgt->ci_name, "vs0_desc"))
+ opts->vs0_index = string->usb_string.id;
+ else if (!strcmp(tgt->ci_name, "vs1_desc"))
+ opts->vs1_index = string->usb_string.id;
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&opts->lock);
+
+put_strings:
+ config_item_put(strings);
+ mutex_unlock(su_mutex);
+
+ return ret;
+}
+
+static void uvc_func_drop_link(struct config_item *src, struct config_item *tgt)
+{
+ struct f_uvc_opts *opts;
+
+ opts = to_f_uvc_opts(src);
+ mutex_lock(&opts->lock);
+
+ if (!strcmp(tgt->ci_name, "iad_desc"))
+ opts->iad_index = 0;
+ else if (!strcmp(tgt->ci_name, "vs0_desc"))
+ opts->vs0_index = 0;
+ else if (!strcmp(tgt->ci_name, "vs1_desc"))
+ opts->vs1_index = 0;
+
+ mutex_unlock(&opts->lock);
+}
+
static struct configfs_item_operations uvc_func_item_ops = {
.release = uvc_func_item_release,
+ .allow_link = uvc_func_allow_link,
+ .drop_link = uvc_func_drop_link,
};
#define UVCG_OPTS_ATTR(cname, aname, limit) \
diff --git a/drivers/usb/gadget/function/uvc_configfs.h b/drivers/usb/gadget/function/uvc_configfs.h
index ad2ec8c4c78c..c6a690158138 100644
--- a/drivers/usb/gadget/function/uvc_configfs.h
+++ b/drivers/usb/gadget/function/uvc_configfs.h
@@ -37,18 +37,28 @@ static inline struct uvcg_control_header *to_uvcg_control_header(struct config_i
return container_of(item, struct uvcg_control_header, item);
}
+struct uvcg_color_matching {
+ struct config_group group;
+ struct uvc_color_matching_descriptor desc;
+ unsigned int refcnt;
+};
+
+#define to_uvcg_color_matching(group_ptr) \
+container_of(group_ptr, struct uvcg_color_matching, group)
+
enum uvcg_format_type {
UVCG_UNCOMPRESSED = 0,
UVCG_MJPEG,
};
struct uvcg_format {
- struct config_group group;
- enum uvcg_format_type type;
- unsigned linked;
- struct list_head frames;
- unsigned num_frames;
- __u8 bmaControls[UVCG_STREAMING_CONTROL_SIZE];
+ struct config_group group;
+ enum uvcg_format_type type;
+ unsigned linked;
+ struct list_head frames;
+ unsigned num_frames;
+ __u8 bmaControls[UVCG_STREAMING_CONTROL_SIZE];
+ struct uvcg_color_matching *color_matching;
};
struct uvcg_format_ptr {
@@ -132,6 +142,36 @@ static inline struct uvcg_mjpeg *to_uvcg_mjpeg(struct config_item *item)
return container_of(to_uvcg_format(item), struct uvcg_mjpeg, fmt);
}
+/* -----------------------------------------------------------------------------
+ * control/extensions/<NAME>
+ */
+
+struct uvcg_extension_unit_descriptor {
+ u8 bLength;
+ u8 bDescriptorType;
+ u8 bDescriptorSubType;
+ u8 bUnitID;
+ u8 guidExtensionCode[16];
+ u8 bNumControls;
+ u8 bNrInPins;
+ u8 *baSourceID;
+ u8 bControlSize;
+ u8 *bmControls;
+ u8 iExtension;
+} __packed;
+
+struct uvcg_extension {
+ struct config_item item;
+ struct list_head list;
+ u8 string_descriptor_index;
+ struct uvcg_extension_unit_descriptor desc;
+};
+
+static inline struct uvcg_extension *to_uvcg_extension(struct config_item *item)
+{
+ return container_of(item, struct uvcg_extension, item);
+}
+
int uvcg_attach_configfs(struct f_uvc_opts *opts);
#endif /* UVC_CONFIGFS_H */
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index a189b08bba80..3f0a9795c0d4 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/usb/g_uvc.h>
+#include <linux/usb/uvc.h>
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
@@ -18,7 +19,6 @@
#include <media/v4l2-dev.h>
#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
-#include <media/v4l2-uvc.h>
#include "f_uvc.h"
#include "uvc.h"
@@ -27,10 +27,10 @@
#include "uvc_v4l2.h"
#include "uvc_configfs.h"
-static struct uvc_format_desc *to_uvc_format(struct uvcg_format *uformat)
+static const struct uvc_format_desc *to_uvc_format(struct uvcg_format *uformat)
{
char guid[16] = UVC_GUID_FORMAT_MJPEG;
- struct uvc_format_desc *format;
+ const struct uvc_format_desc *format;
struct uvcg_uncompressed *unc;
if (uformat->type == UVCG_UNCOMPRESSED) {
@@ -119,7 +119,7 @@ static struct uvcg_format *find_format_by_pix(struct uvc_device *uvc,
struct uvcg_format *uformat = NULL;
list_for_each_entry(format, &uvc->header->formats, entry) {
- struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt);
+ const struct uvc_format_desc *fmtdesc = to_uvc_format(format->fmt);
if (fmtdesc->fcc == pixelformat) {
uformat = format->fmt;
@@ -364,7 +364,7 @@ uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
{
struct video_device *vdev = video_devdata(file);
struct uvc_device *uvc = video_get_drvdata(vdev);
- struct uvc_format_desc *fmtdesc;
+ const struct uvc_format_desc *fmtdesc;
struct uvcg_format *uformat;
if (f->index >= uvc->header->num_fmt)
@@ -374,15 +374,9 @@ uvc_v4l2_enum_format(struct file *file, void *fh, struct v4l2_fmtdesc *f)
if (!uformat)
return -EINVAL;
- if (uformat->type != UVCG_UNCOMPRESSED)
- f->flags |= V4L2_FMT_FLAG_COMPRESSED;
-
fmtdesc = to_uvc_format(uformat);
f->pixelformat = fmtdesc->fcc;
- strscpy(f->description, fmtdesc->name, sizeof(f->description));
- f->description[strlen(fmtdesc->name) - 1] = 0;
-
return 0;
}
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index 1187ee4f316a..133daf88162e 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -133,14 +133,11 @@ static struct usb_configuration config_driver = {
static int hid_bind(struct usb_composite_dev *cdev)
{
struct usb_gadget *gadget = cdev->gadget;
- struct list_head *tmp;
struct hidg_func_node *n = NULL, *m, *iter_n;
struct f_hid_opts *hid_opts;
- int status, funcs = 0;
-
- list_for_each(tmp, &hidg_func_list)
- funcs++;
+ int status, funcs;
+ funcs = list_count_nodes(&hidg_func_list);
if (!funcs)
return -ENODEV;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index b3006d8b04ab..83cae6bb12eb 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -118,7 +118,6 @@ config USB_GR_UDC
config USB_OMAP
tristate "OMAP USB Device Controller"
depends on ARCH_OMAP1
- depends on ISP1301_OMAP || !(MACH_OMAP_H2 || MACH_OMAP_H3)
help
Many Texas Instruments OMAP processors have flexible full
speed USB device controllers, with support for up to 30
@@ -180,9 +179,20 @@ config USB_RENESAS_USBHS_UDC
dynamically linked module called "renesas_usbhs" and force all
gadget drivers to also be dynamically linked.
+config USB_RZV2M_USB3DRD
+ tristate 'Renesas USB3.1 DRD controller'
+ depends on ARCH_R9A09G011 || COMPILE_TEST
+ help
+ Renesas USB3.1 DRD controller is a USB DRD controller
+ that supports both host and device switching.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "rzv2m_usb3drd".
+
config USB_RENESAS_USB3
tristate 'Renesas USB3.0 Peripheral controller'
depends on ARCH_RENESAS || COMPILE_TEST
+ depends on USB_RZV2M_USB3DRD || !USB_RZV2M_USB3DRD
depends on EXTCON
select USB_ROLE_SWITCH
help
@@ -193,6 +203,17 @@ config USB_RENESAS_USB3
dynamically linked module called "renesas_usb3" and force all
gadget drivers to also be dynamically linked.
+config USB_RENESAS_USBF
+ tristate 'Renesas USB Function controller'
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Renesas USB Function controller is a USB peripheral controller
+ available on RZ/N1 Renesas SoCs.
+
+ Say "y" to link the driver statically, or "m" to build a
+ dynamically linked module called "renesas_usbf" and force all
+ gadget drivers to also be dynamically linked.
+
config USB_PXA27X
tristate "PXA 27x"
depends on HAS_IOMEM
@@ -207,31 +228,6 @@ config USB_PXA27X
dynamically linked module called "pxa27x_udc" and force all
gadget drivers to also be dynamically linked.
-config USB_S3C2410
- tristate "S3C2410 USB Device Controller"
- depends on ARCH_S3C24XX
- help
- Samsung's S3C2410 is an ARM-4 processor with an integrated
- full speed USB 1.1 device controller. It has 4 configurable
- endpoints, as well as endpoint zero (for control transfers).
-
- This driver has been tested on the S3C2410, S3C2412, and
- S3C2440 processors.
-
-config USB_S3C2410_DEBUG
- bool "S3C2410 udc debug messages"
- depends on USB_S3C2410
-
-config USB_S3C_HSUDC
- tristate "S3C2416, S3C2443 and S3C2450 USB Device Controller"
- depends on ARCH_S3C24XX
- help
- Samsung's S3C2416, S3C2443 and S3C2450 is an ARM9 based SoC
- integrated with dual speed USB 2.0 device controller. It has
- 8 endpoints, as well as endpoint zero.
-
- This driver has been tested on S3C2416 and S3C2450 processors.
-
config USB_MV_UDC
tristate "Marvell USB2.0 Device Controller"
depends on HAS_DMA
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index 39daf36a2baa..ee569f63c74a 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -17,7 +17,6 @@ obj-$(CONFIG_USB_PXA25X) += pxa25x_udc.o
obj-$(CONFIG_USB_PXA27X) += pxa27x_udc.o
obj-$(CONFIG_USB_GOKU) += goku_udc.o
obj-$(CONFIG_USB_OMAP) += omap_udc.o
-obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o
obj-$(CONFIG_USB_AT91) += at91_udc.o
obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
@@ -27,8 +26,9 @@ obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
obj-$(CONFIG_USB_RENESAS_USB3) += renesas_usb3.o
+obj-$(CONFIG_USB_RZV2M_USB3DRD) += rzv2m_usb3drd.o
+obj-$(CONFIG_USB_RENESAS_USBF) += renesas_usbf.o
obj-$(CONFIG_USB_FSL_QE) += fsl_qe_udc.o
-obj-$(CONFIG_USB_S3C_HSUDC) += s3c-hsudc.o
obj-$(CONFIG_USB_LPC32XX) += lpc32xx_udc.o
obj-$(CONFIG_USB_EG20T) += pch_udc.o
obj-$(CONFIG_USB_MV_UDC) += mv_udc.o
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index 2cdb07905bde..a3055dd4acfb 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -1830,7 +1830,6 @@ static int bcm63xx_udc_start(struct usb_gadget *gadget,
bcm63xx_select_phy_mode(udc, true);
udc->driver = driver;
- driver->driver.bus = NULL;
udc->gadget.dev.of_node = udc->dev->of_node;
spin_unlock_irqrestore(&udc->lock, flags);
@@ -2172,7 +2171,6 @@ static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
for (ch_idx = 0; ch_idx < BCM63XX_NUM_IUDMA; ch_idx++) {
struct iudma_ch *iudma = &udc->iudma[ch_idx];
- struct list_head *pos;
seq_printf(s, "IUDMA channel %d -- ", ch_idx);
switch (iudma_defaults[ch_idx].ep_type) {
@@ -2205,14 +2203,10 @@ static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
iudma->n_bds);
- if (iudma->bep) {
- i = 0;
- list_for_each(pos, &iudma->bep->queue)
- i++;
- seq_printf(s, "; %d queued\n", i);
- } else {
+ if (iudma->bep)
+ seq_printf(s, "; %zu queued\n", list_count_nodes(&iudma->bep->queue));
+ else
seq_printf(s, "\n");
- }
for (i = 0; i < iudma->n_bds; i++) {
struct bcm_enet_desc *d = &iudma->bd_ring[i];
@@ -2259,7 +2253,7 @@ static void bcm63xx_udc_init_debugfs(struct bcm63xx_udc *udc)
*/
static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
{
- debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
+ debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
}
/***********************************************************************
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index bf745358e28e..3b1cc8fa30c8 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2285,7 +2285,6 @@ static int fsl_qe_start(struct usb_gadget *gadget,
/* lock is needed but whether should use this lock or another */
spin_lock_irqsave(&udc->lock, flags);
- driver->driver.bus = NULL;
/* hook up the driver */
udc->driver = driver;
udc->gadget.speed = driver->max_speed;
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index 50435e804118..a67873a074b7 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1943,7 +1943,6 @@ static int fsl_udc_start(struct usb_gadget *g,
/* lock is needed but whether should use this lock or another */
spin_lock_irqsave(&udc_controller->lock, flags);
- driver->driver.bus = NULL;
/* hook up the driver */
udc_controller->driver = driver;
spin_unlock_irqrestore(&udc_controller->lock, flags);
diff --git a/drivers/usb/gadget/udc/fusb300_udc.c b/drivers/usb/gadget/udc/fusb300_udc.c
index 9af8b415f303..08ba9c8c1e67 100644
--- a/drivers/usb/gadget/udc/fusb300_udc.c
+++ b/drivers/usb/gadget/udc/fusb300_udc.c
@@ -1311,7 +1311,6 @@ static int fusb300_udc_start(struct usb_gadget *g,
struct fusb300 *fusb300 = to_fusb300(g);
/* hook up the driver */
- driver->driver.bus = NULL;
fusb300->driver = driver;
return 0;
@@ -1347,6 +1346,7 @@ static int fusb300_remove(struct platform_device *pdev)
usb_del_gadget_udc(&fusb300->gadget);
iounmap(fusb300->reg);
free_irq(platform_get_irq(pdev, 0), fusb300);
+ free_irq(platform_get_irq(pdev, 1), fusb300);
fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
for (i = 0; i < FUSB300_MAX_NUM_EP; i++)
@@ -1432,7 +1432,7 @@ static int fusb300_probe(struct platform_device *pdev)
IRQF_SHARED, udc_name, fusb300);
if (ret < 0) {
pr_err("request_irq1 error (%d)\n", ret);
- goto clean_up;
+ goto err_request_irq1;
}
INIT_LIST_HEAD(&fusb300->gadget.ep_list);
@@ -1471,7 +1471,7 @@ static int fusb300_probe(struct platform_device *pdev)
GFP_KERNEL);
if (fusb300->ep0_req == NULL) {
ret = -ENOMEM;
- goto clean_up3;
+ goto err_alloc_request;
}
init_controller(fusb300);
@@ -1486,7 +1486,10 @@ static int fusb300_probe(struct platform_device *pdev)
err_add_udc:
fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req);
-clean_up3:
+err_alloc_request:
+ free_irq(ires1->start, fusb300);
+
+err_request_irq1:
free_irq(ires->start, fusb300);
clean_up:
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index bdc56b24b5c9..5ffb3d5c635b 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -1375,7 +1375,6 @@ static int goku_udc_start(struct usb_gadget *g,
struct goku_udc *dev = to_goku_udc(g);
/* hook up the driver */
- driver->driver.bus = NULL;
dev->driver = driver;
/*
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 22096f8505de..09762559912d 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -215,7 +215,7 @@ static void gr_dfs_create(struct gr_udc *dev)
static void gr_dfs_delete(struct gr_udc *dev)
{
- debugfs_remove(debugfs_lookup(dev_name(dev->dev), usb_debug_root));
+ debugfs_lookup_and_remove(dev_name(dev->dev), usb_debug_root);
}
#else /* !CONFIG_USB_GADGET_DEBUG_FS */
@@ -1906,7 +1906,6 @@ static int gr_udc_start(struct usb_gadget *gadget,
spin_lock(&dev->lock);
/* Hook up the driver */
- driver->driver.bus = NULL;
dev->driver = driver;
/* Get ready for host detection */
diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
index cea10cdb83ae..fe62db32dd0e 100644
--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
+++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
@@ -532,7 +532,7 @@ static void create_debug_file(struct lpc32xx_udc *udc)
static void remove_debug_file(struct lpc32xx_udc *udc)
{
- debugfs_remove(debugfs_lookup(debug_filename, NULL));
+ debugfs_lookup_and_remove(debug_filename, NULL);
}
#else
diff --git a/drivers/usb/gadget/udc/m66592-udc.c b/drivers/usb/gadget/udc/m66592-udc.c
index c7e421b449f3..06e21cee431b 100644
--- a/drivers/usb/gadget/udc/m66592-udc.c
+++ b/drivers/usb/gadget/udc/m66592-udc.c
@@ -1454,7 +1454,6 @@ static int m66592_udc_start(struct usb_gadget *g,
struct m66592 *m66592 = to_m66592(g);
/* hook up the driver */
- driver->driver.bus = NULL;
m66592->driver = driver;
m66592_bset(m66592, M66592_VBSE | M66592_URST, M66592_INTENB0);
diff --git a/drivers/usb/gadget/udc/max3420_udc.c b/drivers/usb/gadget/udc/max3420_udc.c
index 3074da00c3df..ddf0ed3eb4f2 100644
--- a/drivers/usb/gadget/udc/max3420_udc.c
+++ b/drivers/usb/gadget/udc/max3420_udc.c
@@ -1108,7 +1108,6 @@ static int max3420_udc_start(struct usb_gadget *gadget,
spin_lock_irqsave(&udc->lock, flags);
/* hook up the driver */
- driver->driver.bus = NULL;
udc->driver = driver;
udc->gadget.speed = USB_SPEED_FULL;
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 598654a3cb41..411b6179782c 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1243,7 +1243,6 @@ static int mv_u3d_start(struct usb_gadget *g,
}
/* hook up the driver ... */
- driver->driver.bus = NULL;
u3d->driver = driver;
u3d->ep0_dir = USB_DIR_OUT;
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index fdb17d86cd65..b397f3a848cf 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -1359,7 +1359,6 @@ static int mv_udc_start(struct usb_gadget *gadget,
spin_lock_irqsave(&udc->lock, flags);
/* hook up the driver ... */
- driver->driver.bus = NULL;
udc->driver = driver;
udc->usb_state = USB_STATE_ATTACHED;
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 84605a4d0715..538c1b9a2883 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -1451,7 +1451,6 @@ static int net2272_start(struct usb_gadget *_gadget,
dev->ep[i].irqs = 0;
/* hook up the driver ... */
dev->softconnect = 1;
- driver->driver.bus = NULL;
dev->driver = driver;
/* ... then enable host detection and ep0; and we're ready
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index d6a68631354a..1b929c519cd7 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2423,7 +2423,6 @@ static int net2280_start(struct usb_gadget *_gadget,
dev->ep[i].irqs = 0;
/* hook up the driver ... */
- driver->driver.bus = NULL;
dev->driver = driver;
retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
diff --git a/drivers/usb/gadget/udc/omap_udc.c b/drivers/usb/gadget/udc/omap_udc.c
index bea346e362b2..2d87c7cd5f7e 100644
--- a/drivers/usb/gadget/udc/omap_udc.c
+++ b/drivers/usb/gadget/udc/omap_udc.c
@@ -2036,12 +2036,7 @@ static irqreturn_t omap_udc_iso_irq(int irq, void *_dev)
static inline int machine_without_vbus_sense(void)
{
- return machine_is_omap_innovator()
- || machine_is_omap_osk()
- || machine_is_omap_palmte()
- || machine_is_sx1()
- /* No known omap7xx boards with vbus sense */
- || cpu_is_omap7xx();
+ return machine_is_omap_osk() || machine_is_sx1();
}
static int omap_udc_start(struct usb_gadget *g,
@@ -2066,7 +2061,6 @@ static int omap_udc_start(struct usb_gadget *g,
udc->softconnect = 1;
/* hook up the driver */
- driver->driver.bus = NULL;
udc->driver = driver;
spin_unlock_irqrestore(&udc->lock, flags);
@@ -2759,9 +2753,6 @@ static int omap_udc_probe(struct platform_device *pdev)
struct clk *dc_clk = NULL;
struct clk *hhc_clk = NULL;
- if (cpu_is_omap7xx())
- use_dma = 0;
-
/* NOTE: "knows" the order of the resources! */
if (!request_mem_region(pdev->resource[0].start,
resource_size(&pdev->resource[0]),
@@ -2780,16 +2771,6 @@ static int omap_udc_probe(struct platform_device *pdev)
udelay(100);
}
- if (cpu_is_omap7xx()) {
- dc_clk = clk_get(&pdev->dev, "usb_dc_ck");
- hhc_clk = clk_get(&pdev->dev, "l3_ocpi_ck");
- BUG_ON(IS_ERR(dc_clk) || IS_ERR(hhc_clk));
- /* can't use omap_udc_enable_clock yet */
- clk_prepare_enable(dc_clk);
- clk_prepare_enable(hhc_clk);
- udelay(100);
- }
-
INFO("OMAP UDC rev %d.%d%s\n",
omap_readw(UDC_REV) >> 4, omap_readw(UDC_REV) & 0xf,
config->otg ? ", Mini-AB" : "");
@@ -2914,7 +2895,7 @@ bad_on_1710:
goto cleanup1;
}
#endif
- if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
+ if (cpu_is_omap16xx()) {
udc->dc_clk = dc_clk;
udc->hhc_clk = hhc_clk;
clk_disable(hhc_clk);
@@ -2933,7 +2914,7 @@ cleanup0:
if (!IS_ERR_OR_NULL(xceiv))
usb_put_phy(xceiv);
- if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
+ if (cpu_is_omap16xx()) {
clk_disable_unprepare(hhc_clk);
clk_disable_unprepare(dc_clk);
clk_put(hhc_clk);
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
index 9bb7a9d7a2fb..4f8617210d85 100644
--- a/drivers/usb/gadget/udc/pch_udc.c
+++ b/drivers/usb/gadget/udc/pch_udc.c
@@ -2908,7 +2908,6 @@ static int pch_udc_start(struct usb_gadget *g,
{
struct pch_udc_dev *dev = to_pch_udc(g);
- driver->driver.bus = NULL;
dev->driver = driver;
/* get ready for ep0 traffic */
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index c593fc383481..df0551ecc810 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -1340,7 +1340,7 @@ DEFINE_SHOW_ATTRIBUTE(udc_debug);
debugfs_create_file(dev->gadget.name, \
S_IRUGO, NULL, dev, &udc_debug_fops); \
} while (0)
-#define remove_debug_files(dev) debugfs_remove(debugfs_lookup(dev->gadget.name, NULL))
+#define remove_debug_files(dev) debugfs_lookup_and_remove(dev->gadget.name, NULL)
#else /* !CONFIG_USB_GADGET_DEBUG_FILES */
@@ -1561,40 +1561,6 @@ static int pxa25x_udc_stop(struct usb_gadget*g)
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_ARCH_LUBBOCK
-
-/* Lubbock has separate connect and disconnect irqs. More typical designs
- * use one GPIO as the VBUS IRQ, and another to control the D+ pullup.
- */
-
-static irqreturn_t
-lubbock_vbus_irq(int irq, void *_dev)
-{
- struct pxa25x_udc *dev = _dev;
- int vbus;
-
- dev->stats.irqs++;
- if (irq == dev->usb_irq) {
- vbus = 1;
- disable_irq(dev->usb_irq);
- enable_irq(dev->usb_disc_irq);
- } else if (irq == dev->usb_disc_irq) {
- vbus = 0;
- disable_irq(dev->usb_disc_irq);
- enable_irq(dev->usb_irq);
- } else {
- return IRQ_NONE;
- }
-
- pxa25x_udc_vbus_session(&dev->gadget, vbus);
- return IRQ_HANDLED;
-}
-
-#endif
-
-
-/*-------------------------------------------------------------------------*/
-
static inline void clear_ep_state (struct pxa25x_udc *dev)
{
unsigned i;
@@ -2413,34 +2379,6 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
}
dev->got_irq = 1;
-#ifdef CONFIG_ARCH_LUBBOCK
- if (machine_is_lubbock()) {
- dev->usb_irq = platform_get_irq(pdev, 1);
- if (dev->usb_irq < 0)
- return dev->usb_irq;
-
- dev->usb_disc_irq = platform_get_irq(pdev, 2);
- if (dev->usb_disc_irq < 0)
- return dev->usb_disc_irq;
-
- retval = devm_request_irq(&pdev->dev, dev->usb_disc_irq,
- lubbock_vbus_irq, 0, driver_name,
- dev);
- if (retval != 0) {
- pr_err("%s: can't get irq %i, err %d\n",
- driver_name, dev->usb_disc_irq, retval);
- goto err;
- }
- retval = devm_request_irq(&pdev->dev, dev->usb_irq,
- lubbock_vbus_irq, 0, driver_name,
- dev);
- if (retval != 0) {
- pr_err("%s: can't get irq %i, err %d\n",
- driver_name, dev->usb_irq, retval);
- goto err;
- }
- } else
-#endif
create_debug_files(dev);
retval = usb_add_gadget_udc(&pdev->dev, &dev->gadget);
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index ac980d6a4740..0ecdfd2ba9e9 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -215,7 +215,7 @@ static void pxa_init_debugfs(struct pxa_udc *udc)
static void pxa_cleanup_debugfs(struct pxa_udc *udc)
{
- debugfs_remove(debugfs_lookup(udc->gadget.name, usb_debug_root));
+ debugfs_lookup_and_remove(udc->gadget.name, usb_debug_root);
}
#else
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 615ba0a6fbee..bee6bceafc4f 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -7,6 +7,7 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/extcon-provider.h>
@@ -27,6 +28,7 @@
#include <linux/usb/gadget.h>
#include <linux/usb/of.h>
#include <linux/usb/role.h>
+#include <linux/usb/rzv2m_usb3drd.h>
/* register definitions */
#define USB3_AXI_INT_STA 0x008
@@ -334,7 +336,7 @@ struct renesas_usb3_priv {
struct renesas_usb3 {
void __iomem *reg;
- struct reset_control *drd_rstc;
+ void __iomem *drd_reg;
struct reset_control *usbp_rstc;
struct usb_gadget gadget;
@@ -426,6 +428,46 @@ static void usb3_clear_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
usb3_write(usb3, val, offs);
}
+static void usb3_drd_write(struct renesas_usb3 *usb3, u32 data, u32 offs)
+{
+ void __iomem *reg;
+
+ if (usb3->is_rzv2m)
+ reg = usb3->drd_reg + offs - USB3_DRD_CON(usb3);
+ else
+ reg = usb3->reg + offs;
+
+ iowrite32(data, reg);
+}
+
+static u32 usb3_drd_read(struct renesas_usb3 *usb3, u32 offs)
+{
+ void __iomem *reg;
+
+ if (usb3->is_rzv2m)
+ reg = usb3->drd_reg + offs - USB3_DRD_CON(usb3);
+ else
+ reg = usb3->reg + offs;
+
+ return ioread32(reg);
+}
+
+static void usb3_drd_set_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
+{
+ u32 val = usb3_drd_read(usb3, offs);
+
+ val |= bits;
+ usb3_drd_write(usb3, val, offs);
+}
+
+static void usb3_drd_clear_bit(struct renesas_usb3 *usb3, u32 bits, u32 offs)
+{
+ u32 val = usb3_drd_read(usb3, offs);
+
+ val &= ~bits;
+ usb3_drd_write(usb3, val, offs);
+}
+
static int usb3_wait(struct renesas_usb3 *usb3, u32 reg, u32 mask,
u32 expected)
{
@@ -474,7 +516,7 @@ static void usb3_disable_pipe_irq(struct renesas_usb3 *usb3, int num)
static bool usb3_is_host(struct renesas_usb3 *usb3)
{
- return !(usb3_read(usb3, USB3_DRD_CON(usb3)) & DRD_CON_PERI_CON);
+ return !(usb3_drd_read(usb3, USB3_DRD_CON(usb3)) & DRD_CON_PERI_CON);
}
static void usb3_init_axi_bridge(struct renesas_usb3 *usb3)
@@ -683,18 +725,18 @@ static void usb3_set_mode(struct renesas_usb3 *usb3, bool host)
{
if (usb3->is_rzv2m) {
if (host) {
- usb3_set_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
- usb3_clear_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
+ usb3_drd_set_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
+ usb3_drd_clear_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
} else {
- usb3_set_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
- usb3_clear_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
+ usb3_drd_set_bit(usb3, DRD_CON_HOST_RST, USB3_DRD_CON(usb3));
+ usb3_drd_clear_bit(usb3, DRD_CON_PERI_RST, USB3_DRD_CON(usb3));
}
}
if (host)
- usb3_clear_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
+ usb3_drd_clear_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
else
- usb3_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
+ usb3_drd_set_bit(usb3, DRD_CON_PERI_CON, USB3_DRD_CON(usb3));
}
static void usb3_set_mode_by_role_sw(struct renesas_usb3 *usb3, bool host)
@@ -710,9 +752,9 @@ static void usb3_set_mode_by_role_sw(struct renesas_usb3 *usb3, bool host)
static void usb3_vbus_out(struct renesas_usb3 *usb3, bool enable)
{
if (enable)
- usb3_set_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
+ usb3_drd_set_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
else
- usb3_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
+ usb3_drd_clear_bit(usb3, DRD_CON_VBOUT, USB3_DRD_CON(usb3));
}
static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
@@ -733,7 +775,7 @@ static void usb3_mode_config(struct renesas_usb3 *usb3, bool host, bool a_dev)
static bool usb3_is_a_device(struct renesas_usb3 *usb3)
{
- return !(usb3_read(usb3, USB3_USB_OTG_STA(usb3)) & USB_OTG_IDMON(usb3));
+ return !(usb3_drd_read(usb3, USB3_USB_OTG_STA(usb3)) & USB_OTG_IDMON(usb3));
}
static void usb3_check_id(struct renesas_usb3 *usb3)
@@ -756,8 +798,8 @@ static void renesas_usb3_init_controller(struct renesas_usb3 *usb3)
usb3_set_bit(usb3, USB_COM_CON_PN_WDATAIF_NL |
USB_COM_CON_PN_RDATAIF_NL | USB_COM_CON_PN_LSTTR_PP,
USB3_USB_COM_CON);
- usb3_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_STA(usb3));
- usb3_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_ENA(usb3));
+ usb3_drd_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_STA(usb3));
+ usb3_drd_write(usb3, USB_OTG_IDMON(usb3), USB3_USB_OTG_INT_ENA(usb3));
usb3_check_id(usb3);
usb3_check_vbus(usb3);
@@ -767,7 +809,7 @@ static void renesas_usb3_stop_controller(struct renesas_usb3 *usb3)
{
usb3_disconnect(usb3);
usb3_write(usb3, 0, USB3_P0_INT_ENA);
- usb3_write(usb3, 0, USB3_USB_OTG_INT_ENA(usb3));
+ usb3_drd_write(usb3, 0, USB3_USB_OTG_INT_ENA(usb3));
usb3_write(usb3, 0, USB3_USB_INT_ENA_1);
usb3_write(usb3, 0, USB3_USB_INT_ENA_2);
usb3_write(usb3, 0, USB3_AXI_INT_ENA);
@@ -2024,11 +2066,11 @@ static void usb3_irq_idmon_change(struct renesas_usb3 *usb3)
static void usb3_irq_otg_int(struct renesas_usb3 *usb3)
{
- u32 otg_int_sta = usb3_read(usb3, USB3_USB_OTG_INT_STA(usb3));
+ u32 otg_int_sta = usb3_drd_read(usb3, USB3_USB_OTG_INT_STA(usb3));
- otg_int_sta &= usb3_read(usb3, USB3_USB_OTG_INT_ENA(usb3));
+ otg_int_sta &= usb3_drd_read(usb3, USB3_USB_OTG_INT_ENA(usb3));
if (otg_int_sta)
- usb3_write(usb3, otg_int_sta, USB3_USB_OTG_INT_STA(usb3));
+ usb3_drd_write(usb3, otg_int_sta, USB3_USB_OTG_INT_STA(usb3));
if (otg_int_sta & USB_OTG_IDMON(usb3))
usb3_irq_idmon_change(usb3);
@@ -2325,6 +2367,9 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
usb3 = gadget_to_renesas_usb3(gadget);
+ if (usb3->is_rzv2m && usb3_is_a_device(usb3))
+ return -EBUSY;
+
/* hook up the driver */
usb3->driver = driver;
@@ -2333,6 +2378,10 @@ static int renesas_usb3_start(struct usb_gadget *gadget,
pm_runtime_get_sync(usb3_to_dev(usb3));
+ /* Peripheral Reset */
+ if (usb3->is_rzv2m)
+ rzv2m_usb3drd_reset(usb3_to_dev(usb3)->parent, false);
+
renesas_usb3_init_controller(usb3);
return 0;
@@ -2345,8 +2394,10 @@ static int renesas_usb3_stop(struct usb_gadget *gadget)
usb3->softconnect = false;
usb3->gadget.speed = USB_SPEED_UNKNOWN;
usb3->driver = NULL;
- renesas_usb3_stop_controller(usb3);
+ if (usb3->is_rzv2m)
+ rzv2m_usb3drd_reset(usb3_to_dev(usb3)->parent, false);
+ renesas_usb3_stop_controller(usb3);
if (usb3->phy)
phy_exit(usb3->phy);
@@ -2406,18 +2457,29 @@ static void handle_ext_role_switch_states(struct device *dev,
switch (role) {
case USB_ROLE_NONE:
usb3->connection_state = USB_ROLE_NONE;
- if (cur_role == USB_ROLE_HOST)
+ if (!usb3->is_rzv2m && cur_role == USB_ROLE_HOST)
device_release_driver(host);
- if (usb3->driver)
+ if (usb3->driver) {
+ if (usb3->is_rzv2m)
+ rzv2m_usb3drd_reset(dev->parent, false);
usb3_disconnect(usb3);
+ }
usb3_vbus_out(usb3, false);
+
+ if (usb3->is_rzv2m) {
+ rzv2m_usb3drd_reset(dev->parent, true);
+ device_release_driver(host);
+ }
break;
case USB_ROLE_DEVICE:
if (usb3->connection_state == USB_ROLE_NONE) {
usb3->connection_state = USB_ROLE_DEVICE;
usb3_set_mode(usb3, false);
- if (usb3->driver)
+ if (usb3->driver) {
+ if (usb3->is_rzv2m)
+ renesas_usb3_init_controller(usb3);
usb3_connect(usb3);
+ }
} else if (cur_role == USB_ROLE_HOST) {
device_release_driver(host);
usb3_set_mode(usb3, false);
@@ -2428,8 +2490,11 @@ static void handle_ext_role_switch_states(struct device *dev,
break;
case USB_ROLE_HOST:
if (usb3->connection_state == USB_ROLE_NONE) {
- if (usb3->driver)
+ if (usb3->driver) {
+ if (usb3->is_rzv2m)
+ rzv2m_usb3drd_reset(dev->parent, false);
usb3_disconnect(usb3);
+ }
usb3->connection_state = USB_ROLE_HOST;
usb3_set_mode(usb3, true);
@@ -2600,7 +2665,6 @@ static int renesas_usb3_remove(struct platform_device *pdev)
usb_del_gadget_udc(&usb3->gadget);
reset_control_assert(usb3->usbp_rstc);
- reset_control_assert(usb3->drd_rstc);
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
__renesas_usb3_ep_free_request(usb3->ep0_req);
@@ -2788,7 +2852,7 @@ static struct usb_role_switch_desc renesas_usb3_role_switch_desc = {
static int renesas_usb3_probe(struct platform_device *pdev)
{
struct renesas_usb3 *usb3;
- int irq, drd_irq, ret;
+ int irq, ret;
const struct renesas_usb3_priv *priv;
const struct soc_device_attribute *attr;
@@ -2802,12 +2866,6 @@ static int renesas_usb3_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- if (priv->is_rzv2m) {
- drd_irq = platform_get_irq_byname(pdev, "drd");
- if (drd_irq < 0)
- return drd_irq;
- }
-
usb3 = devm_kzalloc(&pdev->dev, sizeof(*usb3), GFP_KERNEL);
if (!usb3)
return -ENOMEM;
@@ -2836,9 +2894,12 @@ static int renesas_usb3_probe(struct platform_device *pdev)
return ret;
if (usb3->is_rzv2m) {
- ret = devm_request_irq(&pdev->dev, drd_irq,
+ struct rzv2m_usb3drd *ddata = dev_get_drvdata(pdev->dev.parent);
+
+ usb3->drd_reg = ddata->reg;
+ ret = devm_request_irq(ddata->dev, ddata->drd_irq,
renesas_usb3_otg_irq, 0,
- dev_name(&pdev->dev), usb3);
+ dev_name(ddata->dev), usb3);
if (ret < 0)
return ret;
}
@@ -2873,21 +2934,13 @@ static int renesas_usb3_probe(struct platform_device *pdev)
goto err_add_udc;
}
- usb3->drd_rstc = devm_reset_control_get_optional_shared(&pdev->dev,
- "drd_reset");
- if (IS_ERR(usb3->drd_rstc)) {
- ret = PTR_ERR(usb3->drd_rstc);
- goto err_add_udc;
- }
-
usb3->usbp_rstc = devm_reset_control_get_optional_shared(&pdev->dev,
- "aresetn_p");
+ NULL);
if (IS_ERR(usb3->usbp_rstc)) {
ret = PTR_ERR(usb3->usbp_rstc);
goto err_add_udc;
}
- reset_control_deassert(usb3->drd_rstc);
reset_control_deassert(usb3->usbp_rstc);
pm_runtime_enable(&pdev->dev);
@@ -2933,7 +2986,6 @@ err_dev_create:
err_reset:
reset_control_assert(usb3->usbp_rstc);
- reset_control_assert(usb3->drd_rstc);
err_add_udc:
renesas_usb3_dma_free_prd(usb3, &pdev->dev);
diff --git a/drivers/usb/gadget/udc/renesas_usbf.c b/drivers/usb/gadget/udc/renesas_usbf.c
new file mode 100644
index 000000000000..cb23e62e8a87
--- /dev/null
+++ b/drivers/usb/gadget/udc/renesas_usbf.c
@@ -0,0 +1,3406 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas USBF USB Function driver
+ *
+ * Copyright 2022 Schneider Electric
+ * Author: Herve Codina <herve.codina@bootlin.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/kfifo.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/role.h>
+
+#define USBF_NUM_ENDPOINTS 16
+#define USBF_EP0_MAX_PCKT_SIZE 64
+
+/* EPC registers */
+#define USBF_REG_USB_CONTROL 0x000
+#define USBF_USB_PUE2 BIT(2)
+#define USBF_USB_CONNECTB BIT(3)
+#define USBF_USB_DEFAULT BIT(4)
+#define USBF_USB_CONF BIT(5)
+#define USBF_USB_SUSPEND BIT(6)
+#define USBF_USB_RSUM_IN BIT(7)
+#define USBF_USB_SOF_RCV BIT(8)
+#define USBF_USB_FORCEFS BIT(9)
+#define USBF_USB_INT_SEL BIT(10)
+#define USBF_USB_SOF_CLK_MODE BIT(11)
+
+#define USBF_REG_USB_STATUS 0x004
+#define USBF_USB_RSUM_OUT BIT(1)
+#define USBF_USB_SPND_OUT BIT(2)
+#define USBF_USB_USB_RST BIT(3)
+#define USBF_USB_DEFAULT_ST BIT(4)
+#define USBF_USB_CONF_ST BIT(5)
+#define USBF_USB_SPEED_MODE BIT(6)
+#define USBF_USB_SOF_DELAY_STATUS BIT(31)
+
+#define USBF_REG_USB_ADDRESS 0x008
+#define USBF_USB_SOF_STATUS BIT(15)
+#define USBF_USB_SET_USB_ADDR(_a) ((_a) << 16)
+#define USBF_USB_GET_FRAME(_r) ((_r) & 0x7FF)
+
+#define USBF_REG_SETUP_DATA0 0x018
+#define USBF_REG_SETUP_DATA1 0x01C
+#define USBF_REG_USB_INT_STA 0x020
+#define USBF_USB_RSUM_INT BIT(1)
+#define USBF_USB_SPND_INT BIT(2)
+#define USBF_USB_USB_RST_INT BIT(3)
+#define USBF_USB_SOF_INT BIT(4)
+#define USBF_USB_SOF_ERROR_INT BIT(5)
+#define USBF_USB_SPEED_MODE_INT BIT(6)
+#define USBF_USB_EPN_INT(_n) (BIT(8) << (_n)) /* n=0..15 */
+
+#define USBF_REG_USB_INT_ENA 0x024
+#define USBF_USB_RSUM_EN BIT(1)
+#define USBF_USB_SPND_EN BIT(2)
+#define USBF_USB_USB_RST_EN BIT(3)
+#define USBF_USB_SOF_EN BIT(4)
+#define USBF_USB_SOF_ERROR_EN BIT(5)
+#define USBF_USB_SPEED_MODE_EN BIT(6)
+#define USBF_USB_EPN_EN(_n) (BIT(8) << (_n)) /* n=0..15 */
+
+#define USBF_BASE_EP0 0x028
+/* EP0 registers offsets from Base + USBF_BASE_EP0 (EP0 regs area) */
+#define USBF_REG_EP0_CONTROL 0x00
+#define USBF_EP0_ONAK BIT(0)
+#define USBF_EP0_INAK BIT(1)
+#define USBF_EP0_STL BIT(2)
+#define USBF_EP0_PERR_NAK_CLR BIT(3)
+#define USBF_EP0_INAK_EN BIT(4)
+#define USBF_EP0_DW_MASK (0x3 << 5)
+#define USBF_EP0_DW(_s) ((_s) << 5)
+#define USBF_EP0_DEND BIT(7)
+#define USBF_EP0_BCLR BIT(8)
+#define USBF_EP0_PIDCLR BIT(9)
+#define USBF_EP0_AUTO BIT(16)
+#define USBF_EP0_OVERSEL BIT(17)
+#define USBF_EP0_STGSEL BIT(18)
+
+#define USBF_REG_EP0_STATUS 0x04
+#define USBF_EP0_SETUP_INT BIT(0)
+#define USBF_EP0_STG_START_INT BIT(1)
+#define USBF_EP0_STG_END_INT BIT(2)
+#define USBF_EP0_STALL_INT BIT(3)
+#define USBF_EP0_IN_INT BIT(4)
+#define USBF_EP0_OUT_INT BIT(5)
+#define USBF_EP0_OUT_OR_INT BIT(6)
+#define USBF_EP0_OUT_NULL_INT BIT(7)
+#define USBF_EP0_IN_EMPTY BIT(8)
+#define USBF_EP0_IN_FULL BIT(9)
+#define USBF_EP0_IN_DATA BIT(10)
+#define USBF_EP0_IN_NAK_INT BIT(11)
+#define USBF_EP0_OUT_EMPTY BIT(12)
+#define USBF_EP0_OUT_FULL BIT(13)
+#define USBF_EP0_OUT_NULL BIT(14)
+#define USBF_EP0_OUT_NAK_INT BIT(15)
+#define USBF_EP0_PERR_NAK_INT BIT(16)
+#define USBF_EP0_PERR_NAK BIT(17)
+#define USBF_EP0_PID BIT(18)
+
+#define USBF_REG_EP0_INT_ENA 0x08
+#define USBF_EP0_SETUP_EN BIT(0)
+#define USBF_EP0_STG_START_EN BIT(1)
+#define USBF_EP0_STG_END_EN BIT(2)
+#define USBF_EP0_STALL_EN BIT(3)
+#define USBF_EP0_IN_EN BIT(4)
+#define USBF_EP0_OUT_EN BIT(5)
+#define USBF_EP0_OUT_OR_EN BIT(6)
+#define USBF_EP0_OUT_NULL_EN BIT(7)
+#define USBF_EP0_IN_NAK_EN BIT(11)
+#define USBF_EP0_OUT_NAK_EN BIT(15)
+#define USBF_EP0_PERR_NAK_EN BIT(16)
+
+#define USBF_REG_EP0_LENGTH 0x0C
+#define USBF_EP0_LDATA (0x7FF << 0)
+#define USBF_REG_EP0_READ 0x10
+#define USBF_REG_EP0_WRITE 0x14
+
+#define USBF_BASE_EPN(_n) (0x040 + (_n) * 0x020)
+/* EPn registers offsets from Base + USBF_BASE_EPN(n-1). n=1..15 */
+#define USBF_REG_EPN_CONTROL 0x000
+#define USBF_EPN_ONAK BIT(0)
+#define USBF_EPN_OSTL BIT(2)
+#define USBF_EPN_ISTL BIT(3)
+#define USBF_EPN_OSTL_EN BIT(4)
+#define USBF_EPN_DW_MASK (0x3 << 5)
+#define USBF_EPN_DW(_s) ((_s) << 5)
+#define USBF_EPN_DEND BIT(7)
+#define USBF_EPN_CBCLR BIT(8)
+#define USBF_EPN_BCLR BIT(9)
+#define USBF_EPN_OPIDCLR BIT(10)
+#define USBF_EPN_IPIDCLR BIT(11)
+#define USBF_EPN_AUTO BIT(16)
+#define USBF_EPN_OVERSEL BIT(17)
+#define USBF_EPN_MODE_MASK (0x3 << 24)
+#define USBF_EPN_MODE_BULK (0x0 << 24)
+#define USBF_EPN_MODE_INTR (0x1 << 24)
+#define USBF_EPN_MODE_ISO (0x2 << 24)
+#define USBF_EPN_DIR0 BIT(26)
+#define USBF_EPN_BUF_TYPE_DOUBLE BIT(30)
+#define USBF_EPN_EN BIT(31)
+
+#define USBF_REG_EPN_STATUS 0x004
+#define USBF_EPN_IN_EMPTY BIT(0)
+#define USBF_EPN_IN_FULL BIT(1)
+#define USBF_EPN_IN_DATA BIT(2)
+#define USBF_EPN_IN_INT BIT(3)
+#define USBF_EPN_IN_STALL_INT BIT(4)
+#define USBF_EPN_IN_NAK_ERR_INT BIT(5)
+#define USBF_EPN_IN_END_INT BIT(7)
+#define USBF_EPN_IPID BIT(10)
+#define USBF_EPN_OUT_EMPTY BIT(16)
+#define USBF_EPN_OUT_FULL BIT(17)
+#define USBF_EPN_OUT_NULL_INT BIT(18)
+#define USBF_EPN_OUT_INT BIT(19)
+#define USBF_EPN_OUT_STALL_INT BIT(20)
+#define USBF_EPN_OUT_NAK_ERR_INT BIT(21)
+#define USBF_EPN_OUT_OR_INT BIT(22)
+#define USBF_EPN_OUT_END_INT BIT(23)
+#define USBF_EPN_ISO_CRC BIT(24)
+#define USBF_EPN_ISO_OR BIT(26)
+#define USBF_EPN_OUT_NOTKN BIT(27)
+#define USBF_EPN_ISO_OPID BIT(28)
+#define USBF_EPN_ISO_PIDERR BIT(29)
+
+#define USBF_REG_EPN_INT_ENA 0x008
+#define USBF_EPN_IN_EN BIT(3)
+#define USBF_EPN_IN_STALL_EN BIT(4)
+#define USBF_EPN_IN_NAK_ERR_EN BIT(5)
+#define USBF_EPN_IN_END_EN BIT(7)
+#define USBF_EPN_OUT_NULL_EN BIT(18)
+#define USBF_EPN_OUT_EN BIT(19)
+#define USBF_EPN_OUT_STALL_EN BIT(20)
+#define USBF_EPN_OUT_NAK_ERR_EN BIT(21)
+#define USBF_EPN_OUT_OR_EN BIT(22)
+#define USBF_EPN_OUT_END_EN BIT(23)
+
+#define USBF_REG_EPN_DMA_CTRL 0x00C
+#define USBF_EPN_DMAMODE0 BIT(0)
+#define USBF_EPN_DMA_EN BIT(4)
+#define USBF_EPN_STOP_SET BIT(8)
+#define USBF_EPN_BURST_SET BIT(9)
+#define USBF_EPN_DEND_SET BIT(10)
+#define USBF_EPN_STOP_MODE BIT(11)
+
+#define USBF_REG_EPN_PCKT_ADRS 0x010
+#define USBF_EPN_MPKT(_l) ((_l) << 0)
+#define USBF_EPN_BASEAD(_a) ((_a) << 16)
+
+#define USBF_REG_EPN_LEN_DCNT 0x014
+#define USBF_EPN_GET_LDATA(_r) ((_r) & 0x7FF)
+#define USBF_EPN_SET_DMACNT(_c) ((_c) << 16)
+#define USBF_EPN_GET_DMACNT(_r) (((_r) >> 16) & 0x1ff)
+
+#define USBF_REG_EPN_READ 0x018
+#define USBF_REG_EPN_WRITE 0x01C
+
+/* AHB-EPC Bridge registers */
+#define USBF_REG_AHBSCTR 0x1000
+#define USBF_REG_AHBMCTR 0x1004
+#define USBF_SYS_WBURST_TYPE BIT(2)
+#define USBF_SYS_ARBITER_CTR BIT(31)
+
+#define USBF_REG_AHBBINT 0x1008
+#define USBF_SYS_ERR_MASTER (0x0F << 0)
+#define USBF_SYS_SBUS_ERRINT0 BIT(4)
+#define USBF_SYS_SBUS_ERRINT1 BIT(5)
+#define USBF_SYS_MBUS_ERRINT BIT(6)
+#define USBF_SYS_VBUS_INT BIT(13)
+#define USBF_SYS_DMA_ENDINT_EPN(_n) (BIT(16) << (_n)) /* _n=1..15 */
+
+#define USBF_REG_AHBBINTEN 0x100C
+#define USBF_SYS_SBUS_ERRINT0EN BIT(4)
+#define USBF_SYS_SBUS_ERRINT1EN BIT(5)
+#define USBF_SYS_MBUS_ERRINTEN BIT(6)
+#define USBF_SYS_VBUS_INTEN BIT(13)
+#define USBF_SYS_DMA_ENDINTEN_EPN(_n) (BIT(16) << (_n)) /* _n=1..15 */
+
+#define USBF_REG_EPCTR 0x1010
+#define USBF_SYS_EPC_RST BIT(0)
+#define USBF_SYS_PLL_RST BIT(2)
+#define USBF_SYS_PLL_LOCK BIT(4)
+#define USBF_SYS_PLL_RESUME BIT(5)
+#define USBF_SYS_VBUS_LEVEL BIT(8)
+#define USBF_SYS_DIRPD BIT(12)
+
+#define USBF_REG_USBSSVER 0x1020
+#define USBF_REG_USBSSCONF 0x1024
+#define USBF_SYS_DMA_AVAILABLE(_n) (BIT(0) << (_n)) /* _n=0..15 */
+#define USBF_SYS_EP_AVAILABLE(_n) (BIT(16) << (_n)) /* _n=0..15 */
+
+#define USBF_BASE_DMA_EPN(_n) (0x1110 + (_n) * 0x010)
+/* EPn DMA registers offsets from Base USBF_BASE_DMA_EPN(n-1). n=1..15*/
+#define USBF_REG_DMA_EPN_DCR1 0x00
+#define USBF_SYS_EPN_REQEN BIT(0)
+#define USBF_SYS_EPN_DIR0 BIT(1)
+#define USBF_SYS_EPN_SET_DMACNT(_c) ((_c) << 16)
+#define USBF_SYS_EPN_GET_DMACNT(_r) (((_r) >> 16) & 0x0FF)
+
+#define USBF_REG_DMA_EPN_DCR2 0x04
+#define USBF_SYS_EPN_MPKT(_s) ((_s) << 0)
+#define USBF_SYS_EPN_LMPKT(_l) ((_l) << 16)
+
+#define USBF_REG_DMA_EPN_TADR 0x08
+
+/* USB request */
+struct usbf_req {
+ struct usb_request req;
+ struct list_head queue;
+ unsigned int is_zero_sent : 1;
+ unsigned int is_mapped : 1;
+ enum {
+ USBF_XFER_START,
+ USBF_XFER_WAIT_DMA,
+ USBF_XFER_SEND_NULL,
+ USBF_XFER_WAIT_END,
+ USBF_XFER_WAIT_DMA_SHORT,
+ USBF_XFER_WAIT_BRIDGE,
+ } xfer_step;
+ size_t dma_size;
+};
+
+/* USB Endpoint */
+struct usbf_ep {
+ struct usb_ep ep;
+ char name[32];
+ struct list_head queue;
+ unsigned int is_processing : 1;
+ unsigned int is_in : 1;
+ struct usbf_udc *udc;
+ void __iomem *regs;
+ void __iomem *dma_regs;
+ unsigned int id : 8;
+ unsigned int disabled : 1;
+ unsigned int is_wedged : 1;
+ unsigned int delayed_status : 1;
+ u32 status;
+ void (*bridge_on_dma_end)(struct usbf_ep *ep);
+};
+
+enum usbf_ep0state {
+ EP0_IDLE,
+ EP0_IN_DATA_PHASE,
+ EP0_OUT_DATA_PHASE,
+ EP0_OUT_STATUS_START_PHASE,
+ EP0_OUT_STATUS_PHASE,
+ EP0_OUT_STATUS_END_PHASE,
+ EP0_IN_STATUS_START_PHASE,
+ EP0_IN_STATUS_PHASE,
+ EP0_IN_STATUS_END_PHASE,
+};
+
+struct usbf_udc {
+ struct usb_gadget gadget;
+ struct usb_gadget_driver *driver;
+ struct device *dev;
+ void __iomem *regs;
+ spinlock_t lock;
+ bool is_remote_wakeup;
+ bool is_usb_suspended;
+ struct usbf_ep ep[USBF_NUM_ENDPOINTS];
+ /* for EP0 control messages */
+ enum usbf_ep0state ep0state;
+ struct usbf_req setup_reply;
+ u8 ep0_buf[USBF_EP0_MAX_PCKT_SIZE];
+};
+
+struct usbf_ep_info {
+ const char *name;
+ struct usb_ep_caps caps;
+ u16 base_addr;
+ unsigned int is_double : 1;
+ u16 maxpacket_limit;
+};
+
+#define USBF_SINGLE_BUFFER 0
+#define USBF_DOUBLE_BUFFER 1
+#define USBF_EP_INFO(_name, _caps, _base_addr, _is_double, _maxpacket_limit) \
+ { \
+ .name = _name, \
+ .caps = _caps, \
+ .base_addr = _base_addr, \
+ .is_double = _is_double, \
+ .maxpacket_limit = _maxpacket_limit, \
+ }
+
+/* This table is computed from the recommended values provided in the SOC
+ * datasheet. The buffer type (single/double) and the endpoint type cannot
+ * be changed. The mapping in internal RAM (base_addr and number of words)
+ * for each endpoints depends on the max packet size and the buffer type.
+ */
+static const struct usbf_ep_info usbf_ep_info[USBF_NUM_ENDPOINTS] = {
+ /* ep0: buf @0x0000 64 bytes, fixed 32 words */
+ [0] = USBF_EP_INFO("ep0-ctrl",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0000, USBF_SINGLE_BUFFER, USBF_EP0_MAX_PCKT_SIZE),
+ /* ep1: buf @0x0020, 2 buffers 512 bytes -> (512 * 2 / 4) words */
+ [1] = USBF_EP_INFO("ep1-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0020, USBF_DOUBLE_BUFFER, 512),
+ /* ep2: buf @0x0120, 2 buffers 512 bytes -> (512 * 2 / 4) words */
+ [2] = USBF_EP_INFO("ep2-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0120, USBF_DOUBLE_BUFFER, 512),
+ /* ep3: buf @0x0220, 1 buffer 512 bytes -> (512 * 2 / 4) words */
+ [3] = USBF_EP_INFO("ep3-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0220, USBF_SINGLE_BUFFER, 512),
+ /* ep4: buf @0x02A0, 1 buffer 512 bytes -> (512 * 1 / 4) words */
+ [4] = USBF_EP_INFO("ep4-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ 0x02A0, USBF_SINGLE_BUFFER, 512),
+ /* ep5: buf @0x0320, 1 buffer 512 bytes -> (512 * 2 / 4) words */
+ [5] = USBF_EP_INFO("ep5-bulk",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0320, USBF_SINGLE_BUFFER, 512),
+ /* ep6: buf @0x03A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
+ [6] = USBF_EP_INFO("ep6-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ 0x03A0, USBF_SINGLE_BUFFER, 1024),
+ /* ep7: buf @0x04A0, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
+ [7] = USBF_EP_INFO("ep7-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ 0x04A0, USBF_SINGLE_BUFFER, 1024),
+ /* ep8: buf @0x0520, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
+ [8] = USBF_EP_INFO("ep8-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0520, USBF_SINGLE_BUFFER, 1024),
+ /* ep9: buf @0x0620, 1 buffer 1024 bytes -> (1024 * 1 / 4) words */
+ [9] = USBF_EP_INFO("ep9-int",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0620, USBF_SINGLE_BUFFER, 1024),
+ /* ep10: buf @0x0720, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [10] = USBF_EP_INFO("ep10-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0720, USBF_DOUBLE_BUFFER, 1024),
+ /* ep11: buf @0x0920, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [11] = USBF_EP_INFO("ep11-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0920, USBF_DOUBLE_BUFFER, 1024),
+ /* ep12: buf @0x0B20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [12] = USBF_EP_INFO("ep12-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0B20, USBF_DOUBLE_BUFFER, 1024),
+ /* ep13: buf @0x0D20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [13] = USBF_EP_INFO("ep13-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0D20, USBF_DOUBLE_BUFFER, 1024),
+ /* ep14: buf @0x0F20, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [14] = USBF_EP_INFO("ep14-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x0F20, USBF_DOUBLE_BUFFER, 1024),
+ /* ep15: buf @0x1120, 2 buffers 1024 bytes -> (1024 * 2 / 4) words */
+ [15] = USBF_EP_INFO("ep15-iso",
+ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO,
+ USB_EP_CAPS_DIR_ALL),
+ 0x1120, USBF_DOUBLE_BUFFER, 1024),
+};
+
+static inline u32 usbf_reg_readl(struct usbf_udc *udc, uint offset)
+{
+ return readl(udc->regs + offset);
+}
+
+static inline void usbf_reg_writel(struct usbf_udc *udc, uint offset, u32 val)
+{
+ writel(val, udc->regs + offset);
+}
+
+static inline void usbf_reg_bitset(struct usbf_udc *udc, uint offset, u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_reg_readl(udc, offset);
+ tmp |= set;
+ usbf_reg_writel(udc, offset, tmp);
+}
+
+static inline void usbf_reg_bitclr(struct usbf_udc *udc, uint offset, u32 clr)
+{
+ u32 tmp;
+
+ tmp = usbf_reg_readl(udc, offset);
+ tmp &= ~clr;
+ usbf_reg_writel(udc, offset, tmp);
+}
+
+static inline void usbf_reg_clrset(struct usbf_udc *udc, uint offset,
+ u32 clr, u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_reg_readl(udc, offset);
+ tmp &= ~clr;
+ tmp |= set;
+ usbf_reg_writel(udc, offset, tmp);
+}
+
+static inline u32 usbf_ep_reg_readl(struct usbf_ep *ep, uint offset)
+{
+ return readl(ep->regs + offset);
+}
+
+static inline void usbf_ep_reg_read_rep(struct usbf_ep *ep, uint offset,
+ void *dst, uint count)
+{
+ readsl(ep->regs + offset, dst, count);
+}
+
+static inline void usbf_ep_reg_writel(struct usbf_ep *ep, uint offset, u32 val)
+{
+ writel(val, ep->regs + offset);
+}
+
+static inline void usbf_ep_reg_write_rep(struct usbf_ep *ep, uint offset,
+ const void *src, uint count)
+{
+ writesl(ep->regs + offset, src, count);
+}
+
+static inline void usbf_ep_reg_bitset(struct usbf_ep *ep, uint offset, u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_reg_readl(ep, offset);
+ tmp |= set;
+ usbf_ep_reg_writel(ep, offset, tmp);
+}
+
+static inline void usbf_ep_reg_bitclr(struct usbf_ep *ep, uint offset, u32 clr)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_reg_readl(ep, offset);
+ tmp &= ~clr;
+ usbf_ep_reg_writel(ep, offset, tmp);
+}
+
+static inline void usbf_ep_reg_clrset(struct usbf_ep *ep, uint offset,
+ u32 clr, u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_reg_readl(ep, offset);
+ tmp &= ~clr;
+ tmp |= set;
+ usbf_ep_reg_writel(ep, offset, tmp);
+}
+
+static inline u32 usbf_ep_dma_reg_readl(struct usbf_ep *ep, uint offset)
+{
+ return readl(ep->dma_regs + offset);
+}
+
+static inline void usbf_ep_dma_reg_writel(struct usbf_ep *ep, uint offset,
+ u32 val)
+{
+ writel(val, ep->dma_regs + offset);
+}
+
+static inline void usbf_ep_dma_reg_bitset(struct usbf_ep *ep, uint offset,
+ u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_dma_reg_readl(ep, offset);
+ tmp |= set;
+ usbf_ep_dma_reg_writel(ep, offset, tmp);
+}
+
+static inline void usbf_ep_dma_reg_bitclr(struct usbf_ep *ep, uint offset,
+ u32 clr)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_dma_reg_readl(ep, offset);
+ tmp &= ~clr;
+ usbf_ep_dma_reg_writel(ep, offset, tmp);
+}
+
+static inline void usbf_ep_dma_reg_clrset(struct usbf_ep *ep, uint offset,
+ u32 clr, u32 set)
+{
+ u32 tmp;
+
+ tmp = usbf_ep_dma_reg_readl(ep, offset);
+ tmp &= ~clr;
+ tmp |= set;
+ usbf_ep_dma_reg_writel(ep, offset, tmp);
+}
+
+static void usbf_ep0_send_null(struct usbf_ep *ep0, bool is_data1)
+{
+ u32 set;
+
+ set = USBF_EP0_DEND;
+ if (is_data1)
+ set |= USBF_EP0_PIDCLR;
+
+ usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, set);
+}
+
+static int usbf_ep0_pio_in(struct usbf_ep *ep0, struct usbf_req *req)
+{
+ unsigned int left;
+ unsigned int nb;
+ const void *buf;
+ u32 ctrl;
+ u32 last;
+
+ left = req->req.length - req->req.actual;
+
+ if (left == 0) {
+ if (!req->is_zero_sent) {
+ if (req->req.length == 0) {
+ dev_dbg(ep0->udc->dev, "ep0 send null\n");
+ usbf_ep0_send_null(ep0, false);
+ req->is_zero_sent = 1;
+ return -EINPROGRESS;
+ }
+ if ((req->req.actual % ep0->ep.maxpacket) == 0) {
+ if (req->req.zero) {
+ dev_dbg(ep0->udc->dev, "ep0 send null\n");
+ usbf_ep0_send_null(ep0, false);
+ req->is_zero_sent = 1;
+ return -EINPROGRESS;
+ }
+ }
+ }
+ return 0;
+ }
+
+ if (left > ep0->ep.maxpacket)
+ left = ep0->ep.maxpacket;
+
+ buf = req->req.buf;
+ buf += req->req.actual;
+
+ nb = left / sizeof(u32);
+ if (nb) {
+ usbf_ep_reg_write_rep(ep0, USBF_REG_EP0_WRITE, buf, nb);
+ buf += (nb * sizeof(u32));
+ req->req.actual += (nb * sizeof(u32));
+ left -= (nb * sizeof(u32));
+ }
+ ctrl = usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL);
+ ctrl &= ~USBF_EP0_DW_MASK;
+ if (left) {
+ memcpy(&last, buf, left);
+ usbf_ep_reg_writel(ep0, USBF_REG_EP0_WRITE, last);
+ ctrl |= USBF_EP0_DW(left);
+ req->req.actual += left;
+ }
+ usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, ctrl | USBF_EP0_DEND);
+
+ dev_dbg(ep0->udc->dev, "ep0 send %u/%u\n",
+ req->req.actual, req->req.length);
+
+ return -EINPROGRESS;
+}
+
+static int usbf_ep0_pio_out(struct usbf_ep *ep0, struct usbf_req *req)
+{
+ int req_status = 0;
+ unsigned int count;
+ unsigned int recv;
+ unsigned int left;
+ unsigned int nb;
+ void *buf;
+ u32 last;
+
+ if (ep0->status & USBF_EP0_OUT_INT) {
+ recv = usbf_ep_reg_readl(ep0, USBF_REG_EP0_LENGTH) & USBF_EP0_LDATA;
+ count = recv;
+
+ buf = req->req.buf;
+ buf += req->req.actual;
+
+ left = req->req.length - req->req.actual;
+
+ dev_dbg(ep0->udc->dev, "ep0 recv %u, left %u\n", count, left);
+
+ if (left > ep0->ep.maxpacket)
+ left = ep0->ep.maxpacket;
+
+ if (count > left) {
+ req_status = -EOVERFLOW;
+ count = left;
+ }
+
+ if (count) {
+ nb = count / sizeof(u32);
+ if (nb) {
+ usbf_ep_reg_read_rep(ep0, USBF_REG_EP0_READ,
+ buf, nb);
+ buf += (nb * sizeof(u32));
+ req->req.actual += (nb * sizeof(u32));
+ count -= (nb * sizeof(u32));
+ }
+ if (count) {
+ last = usbf_ep_reg_readl(ep0, USBF_REG_EP0_READ);
+ memcpy(buf, &last, count);
+ req->req.actual += count;
+ }
+ }
+ dev_dbg(ep0->udc->dev, "ep0 recv %u/%u\n",
+ req->req.actual, req->req.length);
+
+ if (req_status) {
+ dev_dbg(ep0->udc->dev, "ep0 req.status=%d\n", req_status);
+ req->req.status = req_status;
+ return 0;
+ }
+
+ if (recv < ep0->ep.maxpacket) {
+ dev_dbg(ep0->udc->dev, "ep0 short packet\n");
+ /* This is a short packet -> It is the end */
+ req->req.status = 0;
+ return 0;
+ }
+
+ /* The Data stage of a control transfer from an endpoint to the
+ * host is complete when the endpoint does one of the following:
+ * - Has transferred exactly the expected amount of data
+ * - Transfers a packet with a payload size less than
+ * wMaxPacketSize or transfers a zero-length packet
+ */
+ if (req->req.actual == req->req.length) {
+ req->req.status = 0;
+ return 0;
+ }
+ }
+
+ if (ep0->status & USBF_EP0_OUT_NULL_INT) {
+ /* NULL packet received */
+ dev_dbg(ep0->udc->dev, "ep0 null packet\n");
+ if (req->req.actual != req->req.length) {
+ req->req.status = req->req.short_not_ok ?
+ -EREMOTEIO : 0;
+ } else {
+ req->req.status = 0;
+ }
+ return 0;
+ }
+
+ return -EINPROGRESS;
+}
+
+static void usbf_ep0_fifo_flush(struct usbf_ep *ep0)
+{
+ u32 sts;
+ int ret;
+
+ usbf_ep_reg_bitset(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_BCLR);
+
+ ret = readl_poll_timeout_atomic(ep0->regs + USBF_REG_EP0_STATUS, sts,
+ (sts & (USBF_EP0_IN_DATA | USBF_EP0_IN_EMPTY)) == USBF_EP0_IN_EMPTY,
+ 0, 10000);
+ if (ret)
+ dev_err(ep0->udc->dev, "ep0 flush fifo timed out\n");
+
+}
+
+static void usbf_epn_send_null(struct usbf_ep *epn)
+{
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_DEND);
+}
+
+static void usbf_epn_send_residue(struct usbf_ep *epn, const void *buf,
+ unsigned int size)
+{
+ u32 tmp;
+
+ memcpy(&tmp, buf, size);
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_WRITE, tmp);
+
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
+ USBF_EPN_DW_MASK,
+ USBF_EPN_DW(size) | USBF_EPN_DEND);
+}
+
+static int usbf_epn_pio_in(struct usbf_ep *epn, struct usbf_req *req)
+{
+ unsigned int left;
+ unsigned int nb;
+ const void *buf;
+
+ left = req->req.length - req->req.actual;
+
+ if (left == 0) {
+ if (!req->is_zero_sent) {
+ if (req->req.length == 0) {
+ dev_dbg(epn->udc->dev, "ep%u send_null\n", epn->id);
+ usbf_epn_send_null(epn);
+ req->is_zero_sent = 1;
+ return -EINPROGRESS;
+ }
+ if ((req->req.actual % epn->ep.maxpacket) == 0) {
+ if (req->req.zero) {
+ dev_dbg(epn->udc->dev, "ep%u send_null\n",
+ epn->id);
+ usbf_epn_send_null(epn);
+ req->is_zero_sent = 1;
+ return -EINPROGRESS;
+ }
+ }
+ }
+ return 0;
+ }
+
+ if (left > epn->ep.maxpacket)
+ left = epn->ep.maxpacket;
+
+ buf = req->req.buf;
+ buf += req->req.actual;
+
+ nb = left / sizeof(u32);
+ if (nb) {
+ usbf_ep_reg_write_rep(epn, USBF_REG_EPN_WRITE, buf, nb);
+ buf += (nb * sizeof(u32));
+ req->req.actual += (nb * sizeof(u32));
+ left -= (nb * sizeof(u32));
+ }
+
+ if (left) {
+ usbf_epn_send_residue(epn, buf, left);
+ req->req.actual += left;
+ } else {
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_CONTROL,
+ USBF_EPN_DW_MASK,
+ USBF_EPN_DEND);
+ }
+
+ dev_dbg(epn->udc->dev, "ep%u send %u/%u\n", epn->id, req->req.actual,
+ req->req.length);
+
+ return -EINPROGRESS;
+}
+
+static void usbf_epn_enable_in_end_int(struct usbf_ep *epn)
+{
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_END_EN);
+}
+
+static int usbf_epn_dma_in(struct usbf_ep *epn, struct usbf_req *req)
+{
+ unsigned int left;
+ u32 npkt;
+ u32 lastpkt;
+ int ret;
+
+ if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
+ dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
+ epn->id);
+ return usbf_epn_pio_in(epn, req);
+ }
+
+ left = req->req.length - req->req.actual;
+
+ switch (req->xfer_step) {
+ default:
+ case USBF_XFER_START:
+ if (left == 0) {
+ dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
+ usbf_epn_send_null(epn);
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+ }
+ if (left < 4) {
+ dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
+ left);
+ usbf_epn_send_residue(epn,
+ req->req.buf + req->req.actual, left);
+ req->req.actual += left;
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+ }
+
+ ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 1);
+ if (ret < 0) {
+ dev_err(epn->udc->dev, "usb_gadget_map_request failed (%d)\n",
+ ret);
+ return ret;
+ }
+ req->is_mapped = 1;
+
+ npkt = DIV_ROUND_UP(left, epn->ep.maxpacket);
+ lastpkt = (left % epn->ep.maxpacket);
+ if (lastpkt == 0)
+ lastpkt = epn->ep.maxpacket;
+ lastpkt &= ~0x3; /* DMA is done on 32bit units */
+
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2,
+ USBF_SYS_EPN_MPKT(epn->ep.maxpacket) | USBF_SYS_EPN_LMPKT(lastpkt));
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR,
+ req->req.dma);
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_SET_DMACNT(npkt));
+ usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_REQEN);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT, USBF_EPN_SET_DMACNT(npkt));
+
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
+
+ /* The end of DMA transfer at the USBF level needs to be handle
+ * after the detection of the end of DMA transfer at the brige
+ * level.
+ * To force this sequence, EPN_IN_END_EN will be set by the
+ * detection of the end of transfer at bridge level (ie. bridge
+ * interrupt).
+ */
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_IN_EN | USBF_EPN_IN_END_EN);
+ epn->bridge_on_dma_end = usbf_epn_enable_in_end_int;
+
+ /* Clear any pending IN_END interrupt */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_END_INT);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_BURST_SET | USBF_EPN_DMAMODE0);
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_DMA_EN);
+
+ req->dma_size = (npkt - 1) * epn->ep.maxpacket + lastpkt;
+
+ dev_dbg(epn->udc->dev, "ep%u dma xfer %zu\n", epn->id,
+ req->dma_size);
+
+ req->xfer_step = USBF_XFER_WAIT_DMA;
+ break;
+
+ case USBF_XFER_WAIT_DMA:
+ if (!(epn->status & USBF_EPN_IN_END_INT)) {
+ dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
+ break;
+ }
+ dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
+
+ usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 1);
+ req->is_mapped = 0;
+
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
+
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_IN_END_EN,
+ USBF_EPN_IN_EN);
+
+ req->req.actual += req->dma_size;
+
+ left = req->req.length - req->req.actual;
+ if (left) {
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(u32)USBF_EPN_IN_INT);
+
+ dev_dbg(epn->udc->dev, "ep%u send residue %u\n", epn->id,
+ left);
+ usbf_epn_send_residue(epn,
+ req->req.buf + req->req.actual, left);
+ req->req.actual += left;
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+ }
+
+ if (req->req.actual % epn->ep.maxpacket) {
+ /* last packet was a short packet. Tell the hardware to
+ * send it right now.
+ */
+ dev_dbg(epn->udc->dev, "ep%u send short\n", epn->id);
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
+ ~(u32)USBF_EPN_IN_INT);
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
+ USBF_EPN_DEND);
+
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+ }
+
+ /* Last packet size was a maxpacket size
+ * Send null packet if needed
+ */
+ if (req->req.zero) {
+ req->xfer_step = USBF_XFER_SEND_NULL;
+ break;
+ }
+
+ /* No more action to do. Wait for the end of the USB transfer */
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+
+ case USBF_XFER_SEND_NULL:
+ dev_dbg(epn->udc->dev, "ep%u send null\n", epn->id);
+ usbf_epn_send_null(epn);
+ req->xfer_step = USBF_XFER_WAIT_END;
+ break;
+
+ case USBF_XFER_WAIT_END:
+ if (!(epn->status & USBF_EPN_IN_INT)) {
+ dev_dbg(epn->udc->dev, "ep%u end not done\n", epn->id);
+ break;
+ }
+ dev_dbg(epn->udc->dev, "ep%u send done %u/%u\n", epn->id,
+ req->req.actual, req->req.length);
+ req->xfer_step = USBF_XFER_START;
+ return 0;
+ }
+
+ return -EINPROGRESS;
+}
+
+static void usbf_epn_recv_residue(struct usbf_ep *epn, void *buf,
+ unsigned int size)
+{
+ u32 last;
+
+ last = usbf_ep_reg_readl(epn, USBF_REG_EPN_READ);
+ memcpy(buf, &last, size);
+}
+
+static int usbf_epn_pio_out(struct usbf_ep *epn, struct usbf_req *req)
+{
+ int req_status = 0;
+ unsigned int count;
+ unsigned int recv;
+ unsigned int left;
+ unsigned int nb;
+ void *buf;
+
+ if (epn->status & USBF_EPN_OUT_INT) {
+ recv = USBF_EPN_GET_LDATA(
+ usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
+ count = recv;
+
+ buf = req->req.buf;
+ buf += req->req.actual;
+
+ left = req->req.length - req->req.actual;
+
+ dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
+ recv, left, epn->ep.maxpacket);
+
+ if (left > epn->ep.maxpacket)
+ left = epn->ep.maxpacket;
+
+ if (count > left) {
+ req_status = -EOVERFLOW;
+ count = left;
+ }
+
+ if (count) {
+ nb = count / sizeof(u32);
+ if (nb) {
+ usbf_ep_reg_read_rep(epn, USBF_REG_EPN_READ,
+ buf, nb);
+ buf += (nb * sizeof(u32));
+ req->req.actual += (nb * sizeof(u32));
+ count -= (nb * sizeof(u32));
+ }
+ if (count) {
+ usbf_epn_recv_residue(epn, buf, count);
+ req->req.actual += count;
+ }
+ }
+ dev_dbg(epn->udc->dev, "ep%u recv %u/%u\n", epn->id,
+ req->req.actual, req->req.length);
+
+ if (req_status) {
+ dev_dbg(epn->udc->dev, "ep%u req.status=%d\n", epn->id,
+ req_status);
+ req->req.status = req_status;
+ return 0;
+ }
+
+ if (recv < epn->ep.maxpacket) {
+ dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
+ /* This is a short packet -> It is the end */
+ req->req.status = 0;
+ return 0;
+ }
+
+ /* Request full -> complete */
+ if (req->req.actual == req->req.length) {
+ req->req.status = 0;
+ return 0;
+ }
+ }
+
+ if (epn->status & USBF_EPN_OUT_NULL_INT) {
+ /* NULL packet received */
+ dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
+ if (req->req.actual != req->req.length) {
+ req->req.status = req->req.short_not_ok ?
+ -EREMOTEIO : 0;
+ } else {
+ req->req.status = 0;
+ }
+ return 0;
+ }
+
+ return -EINPROGRESS;
+}
+
+static void usbf_epn_enable_out_end_int(struct usbf_ep *epn)
+{
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_OUT_END_EN);
+}
+
+static void usbf_epn_process_queue(struct usbf_ep *epn);
+
+static void usbf_epn_dma_out_send_dma(struct usbf_ep *epn, dma_addr_t addr, u32 npkt, bool is_short)
+{
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR2, USBF_SYS_EPN_MPKT(epn->ep.maxpacket));
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_TADR, addr);
+
+ if (is_short) {
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_SET_DMACNT(1) | USBF_SYS_EPN_DIR0);
+ usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_REQEN);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
+ USBF_EPN_SET_DMACNT(0));
+
+ /* The end of DMA transfer at the USBF level needs to be handled
+ * after the detection of the end of DMA transfer at the brige
+ * level.
+ * To force this sequence, enabling the OUT_END interrupt will
+ * be donee by the detection of the end of transfer at bridge
+ * level (ie. bridge interrupt).
+ */
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN | USBF_EPN_OUT_END_EN);
+ epn->bridge_on_dma_end = usbf_epn_enable_out_end_int;
+
+ /* Clear any pending OUT_END interrupt */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
+ ~(u32)USBF_EPN_OUT_END_INT);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0);
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_DMA_EN);
+ return;
+ }
+
+ usbf_ep_dma_reg_writel(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_SET_DMACNT(npkt) | USBF_SYS_EPN_DIR0);
+ usbf_ep_dma_reg_bitset(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_REQEN);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_LEN_DCNT,
+ USBF_EPN_SET_DMACNT(npkt));
+
+ /* Here, the bridge may or may not generate an interrupt to signal the
+ * end of DMA transfer.
+ * Keep only OUT_END interrupt and let handle the bridge later during
+ * the OUT_END processing.
+ */
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN,
+ USBF_EPN_OUT_END_EN);
+
+ /* Disable bridge interrupt. It will be renabled later */
+ usbf_reg_bitclr(epn->udc, USBF_REG_AHBBINTEN,
+ USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
+
+ /* Clear any pending DMA_END interrupt at bridge level */
+ usbf_reg_writel(epn->udc, USBF_REG_AHBBINT,
+ USBF_SYS_DMA_ENDINT_EPN(epn->id));
+
+ /* Clear any pending OUT_END interrupt */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
+ ~(u32)USBF_EPN_OUT_END_INT);
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_STOP_MODE | USBF_EPN_STOP_SET | USBF_EPN_DMAMODE0 | USBF_EPN_BURST_SET);
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_DMA_CTRL,
+ USBF_EPN_DMA_EN);
+}
+
+static size_t usbf_epn_dma_out_complete_dma(struct usbf_ep *epn, bool is_short)
+{
+ u32 dmacnt;
+ u32 tmp;
+ int ret;
+
+ /* Restore interrupt mask */
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_END_EN,
+ USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
+
+ if (is_short) {
+ /* Nothing more to do when the DMA was for a short packet */
+ return 0;
+ }
+
+ /* Enable the bridge interrupt */
+ usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
+ USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
+
+ tmp = usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT);
+ dmacnt = USBF_EPN_GET_DMACNT(tmp);
+
+ if (dmacnt) {
+ /* Some packet were not received (halted by a short or a null
+ * packet.
+ * The bridge never raises an interrupt in this case.
+ * Wait for the end of transfer at bridge level
+ */
+ ret = readl_poll_timeout_atomic(
+ epn->dma_regs + USBF_REG_DMA_EPN_DCR1,
+ tmp, (USBF_SYS_EPN_GET_DMACNT(tmp) == dmacnt),
+ 0, 10000);
+ if (ret) {
+ dev_err(epn->udc->dev, "ep%u wait bridge timed out\n",
+ epn->id);
+ }
+
+ usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1,
+ USBF_SYS_EPN_REQEN);
+
+ /* The dmacnt value tells how many packet were not transferred
+ * from the maximum number of packet we set for the DMA transfer.
+ * Compute the left DMA size based on this value.
+ */
+ return dmacnt * epn->ep.maxpacket;
+ }
+
+ return 0;
+}
+
+static int usbf_epn_dma_out(struct usbf_ep *epn, struct usbf_req *req)
+{
+ unsigned int dma_left;
+ unsigned int count;
+ unsigned int recv;
+ unsigned int left;
+ u32 npkt;
+ int ret;
+
+ if (!IS_ALIGNED((uintptr_t)req->req.buf, 4)) {
+ dev_dbg(epn->udc->dev, "ep%u buf unaligned -> fallback pio\n",
+ epn->id);
+ return usbf_epn_pio_out(epn, req);
+ }
+
+ switch (req->xfer_step) {
+ default:
+ case USBF_XFER_START:
+ if (epn->status & USBF_EPN_OUT_NULL_INT) {
+ dev_dbg(epn->udc->dev, "ep%u null packet\n", epn->id);
+ if (req->req.actual != req->req.length) {
+ req->req.status = req->req.short_not_ok ?
+ -EREMOTEIO : 0;
+ } else {
+ req->req.status = 0;
+ }
+ return 0;
+ }
+
+ if (!(epn->status & USBF_EPN_OUT_INT)) {
+ dev_dbg(epn->udc->dev, "ep%u OUT_INT not set -> spurious\n",
+ epn->id);
+ break;
+ }
+
+ recv = USBF_EPN_GET_LDATA(
+ usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
+ if (!recv) {
+ dev_dbg(epn->udc->dev, "ep%u recv = 0 -> spurious\n",
+ epn->id);
+ break;
+ }
+
+ left = req->req.length - req->req.actual;
+
+ dev_dbg(epn->udc->dev, "ep%u recv %u, left %u, mpkt %u\n", epn->id,
+ recv, left, epn->ep.maxpacket);
+
+ if (recv > left) {
+ dev_err(epn->udc->dev, "ep%u overflow (%u/%u)\n",
+ epn->id, recv, left);
+ req->req.status = -EOVERFLOW;
+ return -EOVERFLOW;
+ }
+
+ if (recv < epn->ep.maxpacket) {
+ /* Short packet received */
+ dev_dbg(epn->udc->dev, "ep%u short packet\n", epn->id);
+ if (recv <= 3) {
+ usbf_epn_recv_residue(epn,
+ req->req.buf + req->req.actual, recv);
+ req->req.actual += recv;
+
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
+ epn->id, req->req.actual, req->req.length);
+
+ req->xfer_step = USBF_XFER_START;
+ return 0;
+ }
+
+ ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
+ if (ret < 0) {
+ dev_err(epn->udc->dev, "map request failed (%d)\n",
+ ret);
+ return ret;
+ }
+ req->is_mapped = 1;
+
+ usbf_epn_dma_out_send_dma(epn,
+ req->req.dma + req->req.actual,
+ 1, true);
+ req->dma_size = recv & ~0x3;
+
+ dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n", epn->id,
+ req->dma_size);
+
+ req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
+ break;
+ }
+
+ ret = usb_gadget_map_request(&epn->udc->gadget, &req->req, 0);
+ if (ret < 0) {
+ dev_err(epn->udc->dev, "map request failed (%d)\n",
+ ret);
+ return ret;
+ }
+ req->is_mapped = 1;
+
+ /* Use the maximum DMA size according to the request buffer.
+ * We will adjust the received size later at the end of the DMA
+ * transfer with the left size computed from
+ * usbf_epn_dma_out_complete_dma().
+ */
+ npkt = left / epn->ep.maxpacket;
+ usbf_epn_dma_out_send_dma(epn,
+ req->req.dma + req->req.actual,
+ npkt, false);
+ req->dma_size = npkt * epn->ep.maxpacket;
+
+ dev_dbg(epn->udc->dev, "ep%u dma xfer %zu (%u)\n", epn->id,
+ req->dma_size, npkt);
+
+ req->xfer_step = USBF_XFER_WAIT_DMA;
+ break;
+
+ case USBF_XFER_WAIT_DMA_SHORT:
+ if (!(epn->status & USBF_EPN_OUT_END_INT)) {
+ dev_dbg(epn->udc->dev, "ep%u dma short not done\n", epn->id);
+ break;
+ }
+ dev_dbg(epn->udc->dev, "ep%u dma short done\n", epn->id);
+
+ usbf_epn_dma_out_complete_dma(epn, true);
+
+ usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
+ req->is_mapped = 0;
+
+ req->req.actual += req->dma_size;
+
+ recv = USBF_EPN_GET_LDATA(
+ usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
+
+ count = recv & 0x3;
+ if (count) {
+ dev_dbg(epn->udc->dev, "ep%u recv residue %u\n", epn->id,
+ count);
+ usbf_epn_recv_residue(epn,
+ req->req.buf + req->req.actual, count);
+ req->req.actual += count;
+ }
+
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
+ req->req.actual, req->req.length);
+
+ req->xfer_step = USBF_XFER_START;
+ return 0;
+
+ case USBF_XFER_WAIT_DMA:
+ if (!(epn->status & USBF_EPN_OUT_END_INT)) {
+ dev_dbg(epn->udc->dev, "ep%u dma not done\n", epn->id);
+ break;
+ }
+ dev_dbg(epn->udc->dev, "ep%u dma done\n", epn->id);
+
+ dma_left = usbf_epn_dma_out_complete_dma(epn, false);
+ if (dma_left) {
+ /* Adjust the final DMA size with */
+ count = req->dma_size - dma_left;
+
+ dev_dbg(epn->udc->dev, "ep%u dma xfer done %u\n", epn->id,
+ count);
+
+ req->req.actual += count;
+
+ if (epn->status & USBF_EPN_OUT_NULL_INT) {
+ /* DMA was stopped by a null packet reception */
+ dev_dbg(epn->udc->dev, "ep%u dma stopped by null pckt\n",
+ epn->id);
+ usb_gadget_unmap_request(&epn->udc->gadget,
+ &req->req, 0);
+ req->is_mapped = 0;
+
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
+ ~(u32)USBF_EPN_OUT_NULL_INT);
+
+ if (req->req.actual != req->req.length) {
+ req->req.status = req->req.short_not_ok ?
+ -EREMOTEIO : 0;
+ } else {
+ req->req.status = 0;
+ }
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n",
+ epn->id, req->req.actual, req->req.length);
+ req->xfer_step = USBF_XFER_START;
+ return 0;
+ }
+
+ recv = USBF_EPN_GET_LDATA(
+ usbf_ep_reg_readl(epn, USBF_REG_EPN_LEN_DCNT));
+ left = req->req.length - req->req.actual;
+ if (recv > left) {
+ dev_err(epn->udc->dev,
+ "ep%u overflow (%u/%u)\n", epn->id,
+ recv, left);
+ req->req.status = -EOVERFLOW;
+ usb_gadget_unmap_request(&epn->udc->gadget,
+ &req->req, 0);
+ req->is_mapped = 0;
+
+ req->xfer_step = USBF_XFER_START;
+ return -EOVERFLOW;
+ }
+
+ if (recv > 3) {
+ usbf_epn_dma_out_send_dma(epn,
+ req->req.dma + req->req.actual,
+ 1, true);
+ req->dma_size = recv & ~0x3;
+
+ dev_dbg(epn->udc->dev, "ep%u dma short xfer %zu\n",
+ epn->id, req->dma_size);
+
+ req->xfer_step = USBF_XFER_WAIT_DMA_SHORT;
+ break;
+ }
+
+ usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
+ req->is_mapped = 0;
+
+ count = recv & 0x3;
+ if (count) {
+ dev_dbg(epn->udc->dev, "ep%u recv residue %u\n",
+ epn->id, count);
+ usbf_epn_recv_residue(epn,
+ req->req.buf + req->req.actual, count);
+ req->req.actual += count;
+ }
+
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
+ req->req.actual, req->req.length);
+
+ req->xfer_step = USBF_XFER_START;
+ return 0;
+ }
+
+ /* Process queue at bridge interrupt only */
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_END_EN | USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
+ epn->status = 0;
+ epn->bridge_on_dma_end = usbf_epn_process_queue;
+
+ req->xfer_step = USBF_XFER_WAIT_BRIDGE;
+ break;
+
+ case USBF_XFER_WAIT_BRIDGE:
+ dev_dbg(epn->udc->dev, "ep%u bridge transfers done\n", epn->id);
+
+ /* Restore interrupt mask */
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_END_EN,
+ USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
+
+ usb_gadget_unmap_request(&epn->udc->gadget, &req->req, 0);
+ req->is_mapped = 0;
+
+ req->req.actual += req->dma_size;
+
+ req->xfer_step = USBF_XFER_START;
+ left = req->req.length - req->req.actual;
+ if (!left) {
+ /* No more data can be added to the buffer */
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u\n", epn->id,
+ req->req.actual, req->req.length);
+ return 0;
+ }
+ dev_dbg(epn->udc->dev, "ep%u recv done %u/%u, wait more data\n",
+ epn->id, req->req.actual, req->req.length);
+ break;
+ }
+
+ return -EINPROGRESS;
+}
+
+static void usbf_epn_dma_stop(struct usbf_ep *epn)
+{
+ usbf_ep_dma_reg_bitclr(epn, USBF_REG_DMA_EPN_DCR1, USBF_SYS_EPN_REQEN);
+
+ /* In the datasheet:
+ * If EP[m]_REQEN = 0b is set during DMA transfer, AHB-EPC stops DMA
+ * after 1 packet transfer completed.
+ * Therefore, wait sufficient time for ensuring DMA transfer
+ * completion. The WAIT time depends on the system, especially AHB
+ * bus activity
+ * So arbitrary 10ms would be sufficient.
+ */
+ mdelay(10);
+
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_DMA_CTRL, USBF_EPN_DMA_EN);
+}
+
+static void usbf_epn_dma_abort(struct usbf_ep *epn, struct usbf_req *req)
+{
+ dev_dbg(epn->udc->dev, "ep%u %s dma abort\n", epn->id,
+ epn->is_in ? "in" : "out");
+
+ epn->bridge_on_dma_end = NULL;
+
+ usbf_epn_dma_stop(epn);
+
+ usb_gadget_unmap_request(&epn->udc->gadget, &req->req,
+ epn->is_in ? 1 : 0);
+ req->is_mapped = 0;
+
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_AUTO);
+
+ if (epn->is_in) {
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_IN_END_EN,
+ USBF_EPN_IN_EN);
+ } else {
+ usbf_ep_reg_clrset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_END_EN,
+ USBF_EPN_OUT_EN | USBF_EPN_OUT_NULL_EN);
+ }
+
+ /* As dma is stopped, be sure that no DMA interrupt are pending */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS,
+ USBF_EPN_IN_END_INT | USBF_EPN_OUT_END_INT);
+
+ usbf_reg_writel(epn->udc, USBF_REG_AHBBINT, USBF_SYS_DMA_ENDINT_EPN(epn->id));
+
+ /* Enable DMA interrupt the bridge level */
+ usbf_reg_bitset(epn->udc, USBF_REG_AHBBINTEN,
+ USBF_SYS_DMA_ENDINTEN_EPN(epn->id));
+
+ /* Reset transfer step */
+ req->xfer_step = USBF_XFER_START;
+}
+
+static void usbf_epn_fifo_flush(struct usbf_ep *epn)
+{
+ u32 ctrl;
+ u32 sts;
+ int ret;
+
+ dev_dbg(epn->udc->dev, "ep%u %s fifo flush\n", epn->id,
+ epn->is_in ? "in" : "out");
+
+ ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl | USBF_EPN_BCLR);
+
+ if (ctrl & USBF_EPN_DIR0)
+ return;
+
+ ret = readl_poll_timeout_atomic(epn->regs + USBF_REG_EPN_STATUS, sts,
+ (sts & (USBF_EPN_IN_DATA | USBF_EPN_IN_EMPTY)) == USBF_EPN_IN_EMPTY,
+ 0, 10000);
+ if (ret)
+ dev_err(epn->udc->dev, "ep%u flush fifo timed out\n", epn->id);
+}
+
+static void usbf_ep_req_done(struct usbf_ep *ep, struct usbf_req *req,
+ int status)
+{
+ list_del_init(&req->queue);
+
+ if (status) {
+ req->req.status = status;
+ } else {
+ if (req->req.status == -EINPROGRESS)
+ req->req.status = status;
+ }
+
+ dev_dbg(ep->udc->dev, "ep%u %s req done length %u/%u, status=%d\n", ep->id,
+ ep->is_in ? "in" : "out",
+ req->req.actual, req->req.length, req->req.status);
+
+ if (req->is_mapped)
+ usbf_epn_dma_abort(ep, req);
+
+ spin_unlock(&ep->udc->lock);
+ usb_gadget_giveback_request(&ep->ep, &req->req);
+ spin_lock(&ep->udc->lock);
+}
+
+static void usbf_ep_nuke(struct usbf_ep *ep, int status)
+{
+ struct usbf_req *req;
+
+ dev_dbg(ep->udc->dev, "ep%u %s nuke status %d\n", ep->id,
+ ep->is_in ? "in" : "out",
+ status);
+
+ while (!list_empty(&ep->queue)) {
+ req = list_first_entry(&ep->queue, struct usbf_req, queue);
+ usbf_ep_req_done(ep, req, status);
+ }
+
+ if (ep->id == 0)
+ usbf_ep0_fifo_flush(ep);
+ else
+ usbf_epn_fifo_flush(ep);
+}
+
+static bool usbf_ep_is_stalled(struct usbf_ep *ep)
+{
+ u32 ctrl;
+
+ if (ep->id == 0) {
+ ctrl = usbf_ep_reg_readl(ep, USBF_REG_EP0_CONTROL);
+ return (ctrl & USBF_EP0_STL) ? true : false;
+ }
+
+ ctrl = usbf_ep_reg_readl(ep, USBF_REG_EPN_CONTROL);
+ if (ep->is_in)
+ return (ctrl & USBF_EPN_ISTL) ? true : false;
+
+ return (ctrl & USBF_EPN_OSTL) ? true : false;
+}
+
+static int usbf_epn_start_queue(struct usbf_ep *epn)
+{
+ struct usbf_req *req;
+ int ret;
+
+ if (usbf_ep_is_stalled(epn))
+ return 0;
+
+ req = list_first_entry_or_null(&epn->queue, struct usbf_req, queue);
+
+ if (epn->is_in) {
+ if (req && !epn->is_processing) {
+ ret = epn->dma_regs ?
+ usbf_epn_dma_in(epn, req) :
+ usbf_epn_pio_in(epn, req);
+ if (ret != -EINPROGRESS) {
+ dev_err(epn->udc->dev,
+ "queued next request not in progress\n");
+ /* The request cannot be completed (ie
+ * ret == 0) on the first call.
+ * stall and nuke the endpoint
+ */
+ return ret ? ret : -EIO;
+ }
+ }
+ } else {
+ if (req) {
+ /* Clear ONAK to accept OUT tokens */
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ONAK);
+
+ /* Enable interrupts */
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
+ } else {
+ /* Disable incoming data and interrupt.
+ * They will be enable on next usb_eb_queue call
+ */
+ usbf_ep_reg_bitset(epn, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ONAK);
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
+ }
+ }
+ return 0;
+}
+
+static int usbf_ep_process_queue(struct usbf_ep *ep)
+{
+ int (*usbf_ep_xfer)(struct usbf_ep *ep, struct usbf_req *req);
+ struct usbf_req *req;
+ int is_processing;
+ int ret;
+
+ if (ep->is_in) {
+ usbf_ep_xfer = usbf_ep0_pio_in;
+ if (ep->id) {
+ usbf_ep_xfer = ep->dma_regs ?
+ usbf_epn_dma_in : usbf_epn_pio_in;
+ }
+ } else {
+ usbf_ep_xfer = usbf_ep0_pio_out;
+ if (ep->id) {
+ usbf_ep_xfer = ep->dma_regs ?
+ usbf_epn_dma_out : usbf_epn_pio_out;
+ }
+ }
+
+ req = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
+ if (!req) {
+ dev_err(ep->udc->dev,
+ "no request available for ep%u %s process\n", ep->id,
+ ep->is_in ? "in" : "out");
+ return -ENOENT;
+ }
+
+ do {
+ /* Were going to read the FIFO for this current request.
+ * NAK any other incoming data to avoid a race condition if no
+ * more request are available.
+ */
+ if (!ep->is_in && ep->id != 0) {
+ usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ONAK);
+ }
+
+ ret = usbf_ep_xfer(ep, req);
+ if (ret == -EINPROGRESS) {
+ if (!ep->is_in && ep->id != 0) {
+ /* The current request needs more data.
+ * Allow incoming data
+ */
+ usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ONAK);
+ }
+ return ret;
+ }
+
+ is_processing = ep->is_processing;
+ ep->is_processing = 1;
+ usbf_ep_req_done(ep, req, ret);
+ ep->is_processing = is_processing;
+
+ if (ret) {
+ /* An error was detected during the request transfer.
+ * Any pending DMA transfers were aborted by the
+ * usbf_ep_req_done() call.
+ * It's time to flush the fifo
+ */
+ if (ep->id == 0)
+ usbf_ep0_fifo_flush(ep);
+ else
+ usbf_epn_fifo_flush(ep);
+ }
+
+ req = list_first_entry_or_null(&ep->queue, struct usbf_req,
+ queue);
+
+ if (ep->is_in)
+ continue;
+
+ if (ep->id != 0) {
+ if (req) {
+ /* An other request is available.
+ * Allow incoming data
+ */
+ usbf_ep_reg_bitclr(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ONAK);
+ } else {
+ /* No request queued. Disable interrupts.
+ * They will be enabled on usb_ep_queue
+ */
+ usbf_ep_reg_bitclr(ep, USBF_REG_EPN_INT_ENA,
+ USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
+ }
+ }
+ /* Do not recall usbf_ep_xfer() */
+ return req ? -EINPROGRESS : 0;
+
+ } while (req);
+
+ return 0;
+}
+
+static void usbf_ep_stall(struct usbf_ep *ep, bool stall)
+{
+ struct usbf_req *first;
+
+ dev_dbg(ep->udc->dev, "ep%u %s %s\n", ep->id,
+ ep->is_in ? "in" : "out",
+ stall ? "stall" : "unstall");
+
+ if (ep->id == 0) {
+ if (stall)
+ usbf_ep_reg_bitset(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
+ else
+ usbf_ep_reg_bitclr(ep, USBF_REG_EP0_CONTROL, USBF_EP0_STL);
+ return;
+ }
+
+ if (stall) {
+ if (ep->is_in)
+ usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ISTL);
+ else
+ usbf_ep_reg_bitset(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_OSTL | USBF_EPN_OSTL_EN);
+ } else {
+ first = list_first_entry_or_null(&ep->queue, struct usbf_req, queue);
+ if (first && first->is_mapped) {
+ /* This can appear if the host halts an endpoint using
+ * SET_FEATURE and then un-halts the endpoint
+ */
+ usbf_epn_dma_abort(ep, first);
+ }
+ usbf_epn_fifo_flush(ep);
+ if (ep->is_in) {
+ usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_ISTL,
+ USBF_EPN_IPIDCLR);
+ } else {
+ usbf_ep_reg_clrset(ep, USBF_REG_EPN_CONTROL,
+ USBF_EPN_OSTL,
+ USBF_EPN_OSTL_EN | USBF_EPN_OPIDCLR);
+ }
+ usbf_epn_start_queue(ep);
+ }
+}
+
+static void usbf_ep0_enable(struct usbf_ep *ep0)
+{
+ usbf_ep_reg_writel(ep0, USBF_REG_EP0_CONTROL, USBF_EP0_INAK_EN | USBF_EP0_BCLR);
+
+ usbf_ep_reg_writel(ep0, USBF_REG_EP0_INT_ENA,
+ USBF_EP0_SETUP_EN | USBF_EP0_STG_START_EN | USBF_EP0_STG_END_EN |
+ USBF_EP0_OUT_EN | USBF_EP0_OUT_NULL_EN | USBF_EP0_IN_EN);
+
+ ep0->udc->ep0state = EP0_IDLE;
+ ep0->disabled = 0;
+
+ /* enable interrupts for the ep0 */
+ usbf_reg_bitset(ep0->udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(0));
+}
+
+static int usbf_epn_enable(struct usbf_ep *epn)
+{
+ u32 base_addr;
+ u32 ctrl;
+
+ base_addr = usbf_ep_info[epn->id].base_addr;
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_PCKT_ADRS,
+ USBF_EPN_BASEAD(base_addr) | USBF_EPN_MPKT(epn->ep.maxpacket));
+
+ /* OUT transfer interrupt are enabled during usb_ep_queue */
+ if (epn->is_in) {
+ /* Will be changed in DMA processing */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, USBF_EPN_IN_EN);
+ }
+
+ /* Clear, set endpoint direction, set IN/OUT STL, and enable
+ * Send NAK for Data out as request are not queued yet
+ */
+ ctrl = USBF_EPN_EN | USBF_EPN_BCLR;
+ if (epn->is_in)
+ ctrl |= USBF_EPN_OSTL | USBF_EPN_OSTL_EN;
+ else
+ ctrl |= USBF_EPN_DIR0 | USBF_EPN_ISTL | USBF_EPN_OSTL_EN | USBF_EPN_ONAK;
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_CONTROL, ctrl);
+
+ return 0;
+}
+
+static int usbf_ep_enable(struct usb_ep *_ep,
+ const struct usb_endpoint_descriptor *desc)
+{
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ struct usbf_udc *udc = ep->udc;
+ unsigned long flags;
+ int ret;
+
+ if (ep->id == 0)
+ return -EINVAL;
+
+ if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT)
+ return -EINVAL;
+
+ dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
+ usb_endpoint_dir_in(desc) ? "in" : "out",
+ usb_endpoint_maxp(desc));
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ ep->is_in = usb_endpoint_dir_in(desc);
+ ep->ep.maxpacket = usb_endpoint_maxp(desc);
+
+ ret = usbf_epn_enable(ep);
+ if (ret)
+ goto end;
+
+ ep->disabled = 0;
+
+ /* enable interrupts for this endpoint */
+ usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
+
+ /* enable DMA interrupt at bridge level if DMA is used */
+ if (ep->dma_regs) {
+ ep->bridge_on_dma_end = NULL;
+ usbf_reg_bitset(udc, USBF_REG_AHBBINTEN,
+ USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
+ }
+
+ ret = 0;
+end:
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return ret;
+}
+
+static int usbf_epn_disable(struct usbf_ep *epn)
+{
+ /* Disable interrupts */
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_INT_ENA, 0);
+
+ /* Disable endpoint */
+ usbf_ep_reg_bitclr(epn, USBF_REG_EPN_CONTROL, USBF_EPN_EN);
+
+ /* remove anything that was pending */
+ usbf_ep_nuke(epn, -ESHUTDOWN);
+
+ return 0;
+}
+
+static int usbf_ep_disable(struct usb_ep *_ep)
+{
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ struct usbf_udc *udc = ep->udc;
+ unsigned long flags;
+ int ret;
+
+ if (ep->id == 0)
+ return -EINVAL;
+
+ dev_dbg(ep->udc->dev, "ep%u %s mpkts %d\n", ep->id,
+ ep->is_in ? "in" : "out", ep->ep.maxpacket);
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ ep->disabled = 1;
+ /* Disable DMA interrupt */
+ if (ep->dma_regs) {
+ usbf_reg_bitclr(udc, USBF_REG_AHBBINTEN,
+ USBF_SYS_DMA_ENDINTEN_EPN(ep->id));
+ ep->bridge_on_dma_end = NULL;
+ }
+ /* disable interrupts for this endpoint */
+ usbf_reg_bitclr(udc, USBF_REG_USB_INT_ENA, USBF_USB_EPN_EN(ep->id));
+ /* and the endpoint itself */
+ ret = usbf_epn_disable(ep);
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+ return ret;
+}
+
+static int usbf_ep0_queue(struct usbf_ep *ep0, struct usbf_req *req,
+ gfp_t gfp_flags)
+{
+ int ret;
+
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+ req->is_zero_sent = 0;
+
+ list_add_tail(&req->queue, &ep0->queue);
+
+ if (ep0->udc->ep0state == EP0_IN_STATUS_START_PHASE)
+ return 0;
+
+ if (!ep0->is_in)
+ return 0;
+
+ if (ep0->udc->ep0state == EP0_IN_STATUS_PHASE) {
+ if (req->req.length) {
+ dev_err(ep0->udc->dev,
+ "request lng %u for ep0 in status phase\n",
+ req->req.length);
+ return -EINVAL;
+ }
+ ep0->delayed_status = 0;
+ }
+ if (!ep0->is_processing) {
+ ret = usbf_ep0_pio_in(ep0, req);
+ if (ret != -EINPROGRESS) {
+ dev_err(ep0->udc->dev,
+ "queued request not in progress\n");
+ /* The request cannot be completed (ie
+ * ret == 0) on the first call
+ */
+ return ret ? ret : -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int usbf_epn_queue(struct usbf_ep *ep, struct usbf_req *req,
+ gfp_t gfp_flags)
+{
+ int was_empty;
+ int ret;
+
+ if (ep->disabled) {
+ dev_err(ep->udc->dev, "ep%u request queue while disable\n",
+ ep->id);
+ return -ESHUTDOWN;
+ }
+
+ req->req.actual = 0;
+ req->req.status = -EINPROGRESS;
+ req->is_zero_sent = 0;
+ req->xfer_step = USBF_XFER_START;
+
+ was_empty = list_empty(&ep->queue);
+ list_add_tail(&req->queue, &ep->queue);
+ if (was_empty) {
+ ret = usbf_epn_start_queue(ep);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int usbf_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
+ gfp_t gfp_flags)
+{
+ struct usbf_req *req = container_of(_req, struct usbf_req, req);
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ struct usbf_udc *udc = ep->udc;
+ unsigned long flags;
+ int ret;
+
+ if (!_req || !_req->buf)
+ return -EINVAL;
+
+ if (!udc || !udc->driver)
+ return -EINVAL;
+
+ dev_dbg(ep->udc->dev, "ep%u %s req queue length %u, zero %u, short_not_ok %u\n",
+ ep->id, ep->is_in ? "in" : "out",
+ req->req.length, req->req.zero, req->req.short_not_ok);
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ if (ep->id == 0)
+ ret = usbf_ep0_queue(ep, req, gfp_flags);
+ else
+ ret = usbf_epn_queue(ep, req, gfp_flags);
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return ret;
+}
+
+static int usbf_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct usbf_req *req = container_of(_req, struct usbf_req, req);
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ unsigned long flags;
+ int is_processing;
+ int first;
+ int ret;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+
+ dev_dbg(ep->udc->dev, "ep%u %s req dequeue length %u/%u\n",
+ ep->id, ep->is_in ? "in" : "out",
+ req->req.actual, req->req.length);
+
+ first = list_is_first(&req->queue, &ep->queue);
+
+ /* Complete the request but avoid any operation that could be done
+ * if a new request is queued during the request completion
+ */
+ is_processing = ep->is_processing;
+ ep->is_processing = 1;
+ usbf_ep_req_done(ep, req, -ECONNRESET);
+ ep->is_processing = is_processing;
+
+ if (first) {
+ /* The first item in the list was dequeued.
+ * This item could already be submitted to the hardware.
+ * So, flush the fifo
+ */
+ if (ep->id)
+ usbf_epn_fifo_flush(ep);
+ else
+ usbf_ep0_fifo_flush(ep);
+ }
+
+ if (ep->id == 0) {
+ /* We dequeue a request on ep0. On this endpoint, we can have
+ * 1 request related to the data stage and/or 1 request
+ * related to the status stage.
+ * We dequeue one of them and so the USB control transaction
+ * is no more coherent. The simple way to be consistent after
+ * dequeuing is to stall and nuke the endpoint and wait the
+ * next SETUP packet.
+ */
+ usbf_ep_stall(ep, true);
+ usbf_ep_nuke(ep, -ECONNRESET);
+ ep->udc->ep0state = EP0_IDLE;
+ goto end;
+ }
+
+ if (!first)
+ goto end;
+
+ ret = usbf_epn_start_queue(ep);
+ if (ret) {
+ usbf_ep_stall(ep, true);
+ usbf_ep_nuke(ep, -EIO);
+ }
+end:
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return 0;
+}
+
+static struct usb_request *usbf_ep_alloc_request(struct usb_ep *_ep,
+ gfp_t gfp_flags)
+{
+ struct usbf_req *req;
+
+ if (!_ep)
+ return NULL;
+
+ req = kzalloc(sizeof(*req), gfp_flags);
+ if (!req)
+ return NULL;
+
+ INIT_LIST_HEAD(&req->queue);
+
+ return &req->req;
+}
+
+static void usbf_ep_free_request(struct usb_ep *_ep, struct usb_request *_req)
+{
+ struct usbf_req *req;
+ unsigned long flags;
+ struct usbf_ep *ep;
+
+ if (!_ep || !_req)
+ return;
+
+ req = container_of(_req, struct usbf_req, req);
+ ep = container_of(_ep, struct usbf_ep, ep);
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ list_del_init(&req->queue);
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ kfree(req);
+}
+
+static int usbf_ep_set_halt(struct usb_ep *_ep, int halt)
+{
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ unsigned long flags;
+ int ret;
+
+ if (ep->id == 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+
+ if (!list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ goto end;
+ }
+
+ usbf_ep_stall(ep, halt);
+ if (!halt)
+ ep->is_wedged = 0;
+
+ ret = 0;
+end:
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+
+ return ret;
+}
+
+static int usbf_ep_set_wedge(struct usb_ep *_ep)
+{
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+ unsigned long flags;
+ int ret;
+
+ if (ep->id == 0)
+ return -EINVAL;
+
+ spin_lock_irqsave(&ep->udc->lock, flags);
+ if (!list_empty(&ep->queue)) {
+ ret = -EAGAIN;
+ goto end;
+ }
+ usbf_ep_stall(ep, 1);
+ ep->is_wedged = 1;
+
+ ret = 0;
+end:
+ spin_unlock_irqrestore(&ep->udc->lock, flags);
+ return ret;
+}
+
+static struct usb_ep_ops usbf_ep_ops = {
+ .enable = usbf_ep_enable,
+ .disable = usbf_ep_disable,
+ .queue = usbf_ep_queue,
+ .dequeue = usbf_ep_dequeue,
+ .set_halt = usbf_ep_set_halt,
+ .set_wedge = usbf_ep_set_wedge,
+ .alloc_request = usbf_ep_alloc_request,
+ .free_request = usbf_ep_free_request,
+};
+
+static void usbf_ep0_req_complete(struct usb_ep *_ep, struct usb_request *_req)
+{
+}
+
+static void usbf_ep0_fill_req(struct usbf_ep *ep0, struct usbf_req *req,
+ void *buf, unsigned int length,
+ void (*complete)(struct usb_ep *_ep,
+ struct usb_request *_req))
+{
+ if (buf && length)
+ memcpy(ep0->udc->ep0_buf, buf, length);
+
+ req->req.buf = ep0->udc->ep0_buf;
+ req->req.length = length;
+ req->req.dma = 0;
+ req->req.zero = true;
+ req->req.complete = complete ? complete : usbf_ep0_req_complete;
+ req->req.status = -EINPROGRESS;
+ req->req.context = NULL;
+ req->req.actual = 0;
+}
+
+static struct usbf_ep *usbf_get_ep_by_addr(struct usbf_udc *udc, u8 address)
+{
+ struct usbf_ep *ep;
+ unsigned int i;
+
+ if ((address & USB_ENDPOINT_NUMBER_MASK) == 0)
+ return &udc->ep[0];
+
+ for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
+ ep = &udc->ep[i];
+
+ if (!ep->ep.desc)
+ continue;
+
+ if (ep->ep.desc->bEndpointAddress == address)
+ return ep;
+ }
+
+ return NULL;
+}
+
+static int usbf_req_delegate(struct usbf_udc *udc,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ int ret;
+
+ spin_unlock(&udc->lock);
+ ret = udc->driver->setup(&udc->gadget, ctrlrequest);
+ spin_lock(&udc->lock);
+ if (ret < 0) {
+ dev_dbg(udc->dev, "udc driver setup failed %d\n", ret);
+ return ret;
+ }
+ if (ret == USB_GADGET_DELAYED_STATUS) {
+ dev_dbg(udc->dev, "delayed status set\n");
+ udc->ep[0].delayed_status = 1;
+ return 0;
+ }
+ return ret;
+}
+
+static int usbf_req_get_status(struct usbf_udc *udc,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ struct usbf_ep *ep;
+ u16 status_data;
+ u16 wLength;
+ u16 wValue;
+ u16 wIndex;
+
+ wValue = le16_to_cpu(ctrlrequest->wValue);
+ wLength = le16_to_cpu(ctrlrequest->wLength);
+ wIndex = le16_to_cpu(ctrlrequest->wIndex);
+
+ switch (ctrlrequest->bRequestType) {
+ case USB_DIR_IN | USB_RECIP_DEVICE | USB_TYPE_STANDARD:
+ if ((wValue != 0) || (wIndex != 0) || (wLength != 2))
+ goto delegate;
+
+ status_data = 0;
+ if (udc->gadget.is_selfpowered)
+ status_data |= BIT(USB_DEVICE_SELF_POWERED);
+
+ if (udc->is_remote_wakeup)
+ status_data |= BIT(USB_DEVICE_REMOTE_WAKEUP);
+
+ break;
+
+ case USB_DIR_IN | USB_RECIP_ENDPOINT | USB_TYPE_STANDARD:
+ if ((wValue != 0) || (wLength != 2))
+ goto delegate;
+
+ ep = usbf_get_ep_by_addr(udc, wIndex);
+ if (!ep)
+ return -EINVAL;
+
+ status_data = 0;
+ if (usbf_ep_is_stalled(ep))
+ status_data |= cpu_to_le16(1);
+ break;
+
+ case USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_STANDARD:
+ if ((wValue != 0) || (wLength != 2))
+ goto delegate;
+ status_data = 0;
+ break;
+
+ default:
+ goto delegate;
+ }
+
+ usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, &status_data,
+ sizeof(status_data), NULL);
+ usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
+
+ return 0;
+
+delegate:
+ return usbf_req_delegate(udc, ctrlrequest);
+}
+
+static int usbf_req_clear_set_feature(struct usbf_udc *udc,
+ const struct usb_ctrlrequest *ctrlrequest,
+ bool is_set)
+{
+ struct usbf_ep *ep;
+ u16 wLength;
+ u16 wValue;
+ u16 wIndex;
+
+ wValue = le16_to_cpu(ctrlrequest->wValue);
+ wLength = le16_to_cpu(ctrlrequest->wLength);
+ wIndex = le16_to_cpu(ctrlrequest->wIndex);
+
+ switch (ctrlrequest->bRequestType) {
+ case USB_DIR_OUT | USB_RECIP_DEVICE:
+ if ((wIndex != 0) || (wLength != 0))
+ goto delegate;
+
+ if (wValue != cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP))
+ goto delegate;
+
+ udc->is_remote_wakeup = is_set;
+ break;
+
+ case USB_DIR_OUT | USB_RECIP_ENDPOINT:
+ if (wLength != 0)
+ goto delegate;
+
+ ep = usbf_get_ep_by_addr(udc, wIndex);
+ if (!ep)
+ return -EINVAL;
+
+ if ((ep->id == 0) && is_set) {
+ /* Endpoint 0 cannot be halted (stalled)
+ * Returning an error code leads to a STALL on this ep0
+ * but keep the automate in a consistent state.
+ */
+ return -EINVAL;
+ }
+ if (ep->is_wedged && !is_set) {
+ /* Ignore CLEAR_FEATURE(HALT ENDPOINT) when the
+ * endpoint is wedged
+ */
+ break;
+ }
+ usbf_ep_stall(ep, is_set);
+ break;
+
+ default:
+ goto delegate;
+ }
+
+ return 0;
+
+delegate:
+ return usbf_req_delegate(udc, ctrlrequest);
+}
+
+static void usbf_ep0_req_set_address_complete(struct usb_ep *_ep,
+ struct usb_request *_req)
+{
+ struct usbf_ep *ep = container_of(_ep, struct usbf_ep, ep);
+
+ /* The status phase of the SET_ADDRESS request is completed ... */
+ if (_req->status == 0) {
+ /* ... without any errors -> Signaled the state to the core. */
+ usb_gadget_set_state(&ep->udc->gadget, USB_STATE_ADDRESS);
+ }
+
+ /* In case of request failure, there is no need to revert the address
+ * value set to the hardware as the hardware will take care of the
+ * value only if the status stage is completed normally.
+ */
+}
+
+static int usbf_req_set_address(struct usbf_udc *udc,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ u16 wLength;
+ u16 wValue;
+ u16 wIndex;
+ u32 addr;
+
+ wValue = le16_to_cpu(ctrlrequest->wValue);
+ wLength = le16_to_cpu(ctrlrequest->wLength);
+ wIndex = le16_to_cpu(ctrlrequest->wIndex);
+
+ if (ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE))
+ goto delegate;
+
+ if ((wIndex != 0) || (wLength != 0) || (wValue > 127))
+ return -EINVAL;
+
+ addr = wValue;
+ /* The hardware will take care of this USB address after the status
+ * stage of the SET_ADDRESS request is completed normally.
+ * It is safe to write it now
+ */
+ usbf_reg_writel(udc, USBF_REG_USB_ADDRESS, USBF_USB_SET_USB_ADDR(addr));
+
+ /* Queued the status request */
+ usbf_ep0_fill_req(&udc->ep[0], &udc->setup_reply, NULL, 0,
+ usbf_ep0_req_set_address_complete);
+ usbf_ep0_queue(&udc->ep[0], &udc->setup_reply, GFP_ATOMIC);
+
+ return 0;
+
+delegate:
+ return usbf_req_delegate(udc, ctrlrequest);
+}
+
+static int usbf_req_set_configuration(struct usbf_udc *udc,
+ const struct usb_ctrlrequest *ctrlrequest)
+{
+ u16 wLength;
+ u16 wValue;
+ u16 wIndex;
+ int ret;
+
+ ret = usbf_req_delegate(udc, ctrlrequest);
+ if (ret)
+ return ret;
+
+ wValue = le16_to_cpu(ctrlrequest->wValue);
+ wLength = le16_to_cpu(ctrlrequest->wLength);
+ wIndex = le16_to_cpu(ctrlrequest->wIndex);
+
+ if ((ctrlrequest->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) ||
+ (wIndex != 0) || (wLength != 0)) {
+ /* No error detected by driver->setup() but it is not an USB2.0
+ * Ch9 SET_CONFIGURATION.
+ * Nothing more to do
+ */
+ return 0;
+ }
+
+ if (wValue & 0x00FF) {
+ usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
+ } else {
+ usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_CONF);
+ /* Go back to Address State */
+ spin_unlock(&udc->lock);
+ usb_gadget_set_state(&udc->gadget, USB_STATE_ADDRESS);
+ spin_lock(&udc->lock);
+ }
+
+ return 0;
+}
+
+static int usbf_handle_ep0_setup(struct usbf_ep *ep0)
+{
+ union {
+ struct usb_ctrlrequest ctrlreq;
+ u32 raw[2];
+ } crq;
+ struct usbf_udc *udc = ep0->udc;
+ int ret;
+
+ /* Read setup data (ie the USB control request) */
+ crq.raw[0] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA0);
+ crq.raw[1] = usbf_reg_readl(udc, USBF_REG_SETUP_DATA1);
+
+ dev_dbg(ep0->udc->dev,
+ "ep0 req%02x.%02x, wValue 0x%04x, wIndex 0x%04x, wLength 0x%04x\n",
+ crq.ctrlreq.bRequestType, crq.ctrlreq.bRequest,
+ crq.ctrlreq.wValue, crq.ctrlreq.wIndex, crq.ctrlreq.wLength);
+
+ /* Set current EP0 state according to the received request */
+ if (crq.ctrlreq.wLength) {
+ if (crq.ctrlreq.bRequestType & USB_DIR_IN) {
+ udc->ep0state = EP0_IN_DATA_PHASE;
+ usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
+ USBF_EP0_INAK,
+ USBF_EP0_INAK_EN);
+ ep0->is_in = 1;
+ } else {
+ udc->ep0state = EP0_OUT_DATA_PHASE;
+ usbf_ep_reg_bitclr(ep0, USBF_REG_EP0_CONTROL,
+ USBF_EP0_ONAK);
+ ep0->is_in = 0;
+ }
+ } else {
+ udc->ep0state = EP0_IN_STATUS_START_PHASE;
+ ep0->is_in = 1;
+ }
+
+ /* We starts a new control transfer -> Clear the delayed status flag */
+ ep0->delayed_status = 0;
+
+ if ((crq.ctrlreq.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) {
+ /* This is not a USB standard request -> delelate */
+ goto delegate;
+ }
+
+ switch (crq.ctrlreq.bRequest) {
+ case USB_REQ_GET_STATUS:
+ ret = usbf_req_get_status(udc, &crq.ctrlreq);
+ break;
+
+ case USB_REQ_CLEAR_FEATURE:
+ ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, false);
+ break;
+
+ case USB_REQ_SET_FEATURE:
+ ret = usbf_req_clear_set_feature(udc, &crq.ctrlreq, true);
+ break;
+
+ case USB_REQ_SET_ADDRESS:
+ ret = usbf_req_set_address(udc, &crq.ctrlreq);
+ break;
+
+ case USB_REQ_SET_CONFIGURATION:
+ ret = usbf_req_set_configuration(udc, &crq.ctrlreq);
+ break;
+
+ default:
+ goto delegate;
+ }
+
+ return ret;
+
+delegate:
+ return usbf_req_delegate(udc, &crq.ctrlreq);
+}
+
+static int usbf_handle_ep0_data_status(struct usbf_ep *ep0,
+ const char *ep0state_name,
+ enum usbf_ep0state next_ep0state)
+{
+ struct usbf_udc *udc = ep0->udc;
+ int ret;
+
+ ret = usbf_ep_process_queue(ep0);
+ switch (ret) {
+ case -ENOENT:
+ dev_err(udc->dev,
+ "no request available for ep0 %s phase\n",
+ ep0state_name);
+ break;
+ case -EINPROGRESS:
+ /* More data needs to be processed */
+ ret = 0;
+ break;
+ case 0:
+ /* All requests in the queue are processed */
+ udc->ep0state = next_ep0state;
+ break;
+ default:
+ dev_err(udc->dev,
+ "process queue failed for ep0 %s phase (%d)\n",
+ ep0state_name, ret);
+ break;
+ }
+ return ret;
+}
+
+static int usbf_handle_ep0_out_status_start(struct usbf_ep *ep0)
+{
+ struct usbf_udc *udc = ep0->udc;
+ struct usbf_req *req;
+
+ usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
+ USBF_EP0_ONAK,
+ USBF_EP0_PIDCLR);
+ ep0->is_in = 0;
+
+ req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
+ if (!req) {
+ usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL, 0, NULL);
+ usbf_ep0_queue(ep0, &udc->setup_reply, GFP_ATOMIC);
+ } else {
+ if (req->req.length) {
+ dev_err(udc->dev,
+ "queued request length %u for ep0 out status phase\n",
+ req->req.length);
+ }
+ }
+ udc->ep0state = EP0_OUT_STATUS_PHASE;
+ return 0;
+}
+
+static int usbf_handle_ep0_in_status_start(struct usbf_ep *ep0)
+{
+ struct usbf_udc *udc = ep0->udc;
+ struct usbf_req *req;
+ int ret;
+
+ usbf_ep_reg_clrset(ep0, USBF_REG_EP0_CONTROL,
+ USBF_EP0_INAK,
+ USBF_EP0_INAK_EN | USBF_EP0_PIDCLR);
+ ep0->is_in = 1;
+
+ /* Queue request for status if needed */
+ req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
+ if (!req) {
+ if (ep0->delayed_status) {
+ dev_dbg(ep0->udc->dev,
+ "EP0_IN_STATUS_START_PHASE ep0->delayed_status set\n");
+ udc->ep0state = EP0_IN_STATUS_PHASE;
+ return 0;
+ }
+
+ usbf_ep0_fill_req(ep0, &udc->setup_reply, NULL,
+ 0, NULL);
+ usbf_ep0_queue(ep0, &udc->setup_reply,
+ GFP_ATOMIC);
+
+ req = list_first_entry_or_null(&ep0->queue, struct usbf_req, queue);
+ } else {
+ if (req->req.length) {
+ dev_err(udc->dev,
+ "queued request length %u for ep0 in status phase\n",
+ req->req.length);
+ }
+ }
+
+ ret = usbf_ep0_pio_in(ep0, req);
+ if (ret != -EINPROGRESS) {
+ usbf_ep_req_done(ep0, req, ret);
+ udc->ep0state = EP0_IN_STATUS_END_PHASE;
+ return 0;
+ }
+
+ udc->ep0state = EP0_IN_STATUS_PHASE;
+ return 0;
+}
+
+static void usbf_ep0_interrupt(struct usbf_ep *ep0)
+{
+ struct usbf_udc *udc = ep0->udc;
+ u32 sts, prev_sts;
+ int prev_ep0state;
+ int ret;
+
+ ep0->status = usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS);
+ usbf_ep_reg_writel(ep0, USBF_REG_EP0_STATUS, ~ep0->status);
+
+ dev_dbg(ep0->udc->dev, "ep0 status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
+ ep0->status,
+ usbf_ep_reg_readl(ep0, USBF_REG_EP0_INT_ENA),
+ usbf_ep_reg_readl(ep0, USBF_REG_EP0_CONTROL));
+
+ sts = ep0->status & (USBF_EP0_SETUP_INT | USBF_EP0_IN_INT | USBF_EP0_OUT_INT |
+ USBF_EP0_OUT_NULL_INT | USBF_EP0_STG_START_INT |
+ USBF_EP0_STG_END_INT);
+
+ ret = 0;
+ do {
+ dev_dbg(ep0->udc->dev, "udc->ep0state=%d\n", udc->ep0state);
+
+ prev_sts = sts;
+ prev_ep0state = udc->ep0state;
+ switch (udc->ep0state) {
+ case EP0_IDLE:
+ if (!(sts & USBF_EP0_SETUP_INT))
+ break;
+
+ sts &= ~USBF_EP0_SETUP_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle setup\n");
+ ret = usbf_handle_ep0_setup(ep0);
+ break;
+
+ case EP0_IN_DATA_PHASE:
+ if (!(sts & USBF_EP0_IN_INT))
+ break;
+
+ sts &= ~USBF_EP0_IN_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle in data phase\n");
+ ret = usbf_handle_ep0_data_status(ep0,
+ "in data", EP0_OUT_STATUS_START_PHASE);
+ break;
+
+ case EP0_OUT_STATUS_START_PHASE:
+ if (!(sts & USBF_EP0_STG_START_INT))
+ break;
+
+ sts &= ~USBF_EP0_STG_START_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle out status start phase\n");
+ ret = usbf_handle_ep0_out_status_start(ep0);
+ break;
+
+ case EP0_OUT_STATUS_PHASE:
+ if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
+ break;
+
+ sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
+ dev_dbg(ep0->udc->dev, "ep0 handle out status phase\n");
+ ret = usbf_handle_ep0_data_status(ep0,
+ "out status",
+ EP0_OUT_STATUS_END_PHASE);
+ break;
+
+ case EP0_OUT_STATUS_END_PHASE:
+ if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
+ break;
+
+ sts &= ~USBF_EP0_STG_END_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle out status end phase\n");
+ udc->ep0state = EP0_IDLE;
+ break;
+
+ case EP0_OUT_DATA_PHASE:
+ if (!(sts & (USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT)))
+ break;
+
+ sts &= ~(USBF_EP0_OUT_INT | USBF_EP0_OUT_NULL_INT);
+ dev_dbg(ep0->udc->dev, "ep0 handle out data phase\n");
+ ret = usbf_handle_ep0_data_status(ep0,
+ "out data", EP0_IN_STATUS_START_PHASE);
+ break;
+
+ case EP0_IN_STATUS_START_PHASE:
+ if (!(sts & USBF_EP0_STG_START_INT))
+ break;
+
+ sts &= ~USBF_EP0_STG_START_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle in status start phase\n");
+ ret = usbf_handle_ep0_in_status_start(ep0);
+ break;
+
+ case EP0_IN_STATUS_PHASE:
+ if (!(sts & USBF_EP0_IN_INT))
+ break;
+
+ sts &= ~USBF_EP0_IN_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle in status phase\n");
+ ret = usbf_handle_ep0_data_status(ep0,
+ "in status", EP0_IN_STATUS_END_PHASE);
+ break;
+
+ case EP0_IN_STATUS_END_PHASE:
+ if (!(sts & (USBF_EP0_STG_END_INT | USBF_EP0_SETUP_INT)))
+ break;
+
+ sts &= ~USBF_EP0_STG_END_INT;
+ dev_dbg(ep0->udc->dev, "ep0 handle in status end\n");
+ udc->ep0state = EP0_IDLE;
+ break;
+
+ default:
+ udc->ep0state = EP0_IDLE;
+ break;
+ }
+
+ if (ret) {
+ dev_dbg(ep0->udc->dev, "ep0 failed (%d)\n", ret);
+ /* Failure -> stall.
+ * This stall state will be automatically cleared when
+ * the IP receives the next SETUP packet
+ */
+ usbf_ep_stall(ep0, true);
+
+ /* Remove anything that was pending */
+ usbf_ep_nuke(ep0, -EPROTO);
+
+ udc->ep0state = EP0_IDLE;
+ break;
+ }
+
+ } while ((prev_ep0state != udc->ep0state) || (prev_sts != sts));
+
+ dev_dbg(ep0->udc->dev, "ep0 done udc->ep0state=%d, status=0x%08x. next=0x%08x\n",
+ udc->ep0state, sts,
+ usbf_ep_reg_readl(ep0, USBF_REG_EP0_STATUS));
+}
+
+static void usbf_epn_process_queue(struct usbf_ep *epn)
+{
+ int ret;
+
+ ret = usbf_ep_process_queue(epn);
+ switch (ret) {
+ case -ENOENT:
+ dev_warn(epn->udc->dev, "ep%u %s, no request available\n",
+ epn->id, epn->is_in ? "in" : "out");
+ break;
+ case -EINPROGRESS:
+ /* More data needs to be processed */
+ ret = 0;
+ break;
+ case 0:
+ /* All requests in the queue are processed */
+ break;
+ default:
+ dev_err(epn->udc->dev, "ep%u %s, process queue failed (%d)\n",
+ epn->id, epn->is_in ? "in" : "out", ret);
+ break;
+ }
+
+ if (ret) {
+ dev_dbg(epn->udc->dev, "ep%u %s failed (%d)\n", epn->id,
+ epn->is_in ? "in" : "out", ret);
+ usbf_ep_stall(epn, true);
+ usbf_ep_nuke(epn, ret);
+ }
+}
+
+static void usbf_epn_interrupt(struct usbf_ep *epn)
+{
+ u32 sts;
+ u32 ena;
+
+ epn->status = usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS);
+ ena = usbf_ep_reg_readl(epn, USBF_REG_EPN_INT_ENA);
+ usbf_ep_reg_writel(epn, USBF_REG_EPN_STATUS, ~(epn->status & ena));
+
+ dev_dbg(epn->udc->dev, "ep%u %s status=0x%08x, enable=%08x\n, ctrl=0x%08x\n",
+ epn->id, epn->is_in ? "in" : "out", epn->status, ena,
+ usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL));
+
+ if (epn->disabled) {
+ dev_warn(epn->udc->dev, "ep%u %s, interrupt while disabled\n",
+ epn->id, epn->is_in ? "in" : "out");
+ return;
+ }
+
+ sts = epn->status & ena;
+
+ if (sts & (USBF_EPN_IN_END_INT | USBF_EPN_IN_INT)) {
+ sts &= ~(USBF_EPN_IN_END_INT | USBF_EPN_IN_INT);
+ dev_dbg(epn->udc->dev, "ep%u %s process queue (in interrupts)\n",
+ epn->id, epn->is_in ? "in" : "out");
+ usbf_epn_process_queue(epn);
+ }
+
+ if (sts & (USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT)) {
+ sts &= ~(USBF_EPN_OUT_END_INT | USBF_EPN_OUT_INT | USBF_EPN_OUT_NULL_INT);
+ dev_dbg(epn->udc->dev, "ep%u %s process queue (out interrupts)\n",
+ epn->id, epn->is_in ? "in" : "out");
+ usbf_epn_process_queue(epn);
+ }
+
+ dev_dbg(epn->udc->dev, "ep%u %s done status=0x%08x. next=0x%08x\n",
+ epn->id, epn->is_in ? "in" : "out",
+ sts, usbf_ep_reg_readl(epn, USBF_REG_EPN_STATUS));
+}
+
+static void usbf_ep_reset(struct usbf_ep *ep)
+{
+ ep->status = 0;
+ /* Remove anything that was pending */
+ usbf_ep_nuke(ep, -ESHUTDOWN);
+}
+
+static void usbf_reset(struct usbf_udc *udc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
+ if (udc->ep[i].disabled)
+ continue;
+
+ usbf_ep_reset(&udc->ep[i]);
+ }
+
+ if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
+ udc->gadget.speed = USB_SPEED_HIGH;
+ else
+ udc->gadget.speed = USB_SPEED_FULL;
+
+ /* Remote wakeup feature must be disabled on USB bus reset */
+ udc->is_remote_wakeup = false;
+
+ /* Enable endpoint zero */
+ usbf_ep0_enable(&udc->ep[0]);
+
+ if (udc->driver) {
+ /* Signal the reset */
+ spin_unlock(&udc->lock);
+ usb_gadget_udc_reset(&udc->gadget, udc->driver);
+ spin_lock(&udc->lock);
+ }
+}
+
+static void usbf_driver_suspend(struct usbf_udc *udc)
+{
+ if (udc->is_usb_suspended) {
+ dev_dbg(udc->dev, "already suspended\n");
+ return;
+ }
+
+ dev_dbg(udc->dev, "do usb suspend\n");
+ udc->is_usb_suspended = true;
+
+ if (udc->driver && udc->driver->suspend) {
+ spin_unlock(&udc->lock);
+ udc->driver->suspend(&udc->gadget);
+ spin_lock(&udc->lock);
+
+ /* The datasheet tells to set the USB_CONTROL register SUSPEND
+ * bit when the USB bus suspend is detected.
+ * This bit stops the clocks (clocks for EPC, SIE, USBPHY) but
+ * these clocks seems not used only by the USB device. Some
+ * UARTs can be lost ...
+ * So, do not set the USB_CONTROL register SUSPEND bit.
+ */
+ }
+}
+
+static void usbf_driver_resume(struct usbf_udc *udc)
+{
+ if (!udc->is_usb_suspended)
+ return;
+
+ dev_dbg(udc->dev, "do usb resume\n");
+ udc->is_usb_suspended = false;
+
+ if (udc->driver && udc->driver->resume) {
+ spin_unlock(&udc->lock);
+ udc->driver->resume(&udc->gadget);
+ spin_lock(&udc->lock);
+ }
+}
+
+static irqreturn_t usbf_epc_irq(int irq, void *_udc)
+{
+ struct usbf_udc *udc = (struct usbf_udc *)_udc;
+ unsigned long flags;
+ struct usbf_ep *ep;
+ u32 int_sts;
+ u32 int_en;
+ int i;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ int_en = usbf_reg_readl(udc, USBF_REG_USB_INT_ENA);
+ int_sts = usbf_reg_readl(udc, USBF_REG_USB_INT_STA) & int_en;
+ usbf_reg_writel(udc, USBF_REG_USB_INT_STA, ~int_sts);
+
+ dev_dbg(udc->dev, "int_sts=0x%08x\n", int_sts);
+
+ if (int_sts & USBF_USB_RSUM_INT) {
+ dev_dbg(udc->dev, "handle resume\n");
+ usbf_driver_resume(udc);
+ }
+
+ if (int_sts & USBF_USB_USB_RST_INT) {
+ dev_dbg(udc->dev, "handle bus reset\n");
+ usbf_driver_resume(udc);
+ usbf_reset(udc);
+ }
+
+ if (int_sts & USBF_USB_SPEED_MODE_INT) {
+ if (usbf_reg_readl(udc, USBF_REG_USB_STATUS) & USBF_USB_SPEED_MODE)
+ udc->gadget.speed = USB_SPEED_HIGH;
+ else
+ udc->gadget.speed = USB_SPEED_FULL;
+ dev_dbg(udc->dev, "handle speed change (%s)\n",
+ udc->gadget.speed == USB_SPEED_HIGH ? "High" : "Full");
+ }
+
+ if (int_sts & USBF_USB_EPN_INT(0)) {
+ usbf_driver_resume(udc);
+ usbf_ep0_interrupt(&udc->ep[0]);
+ }
+
+ for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
+ ep = &udc->ep[i];
+
+ if (int_sts & USBF_USB_EPN_INT(i)) {
+ usbf_driver_resume(udc);
+ usbf_epn_interrupt(ep);
+ }
+ }
+
+ if (int_sts & USBF_USB_SPND_INT) {
+ dev_dbg(udc->dev, "handle suspend\n");
+ usbf_driver_suspend(udc);
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t usbf_ahb_epc_irq(int irq, void *_udc)
+{
+ struct usbf_udc *udc = (struct usbf_udc *)_udc;
+ unsigned long flags;
+ struct usbf_ep *epn;
+ u32 sysbint;
+ void (*ep_action)(struct usbf_ep *epn);
+ int i;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Read and ack interrupts */
+ sysbint = usbf_reg_readl(udc, USBF_REG_AHBBINT);
+ usbf_reg_writel(udc, USBF_REG_AHBBINT, sysbint);
+
+ if ((sysbint & USBF_SYS_VBUS_INT) == USBF_SYS_VBUS_INT) {
+ if (usbf_reg_readl(udc, USBF_REG_EPCTR) & USBF_SYS_VBUS_LEVEL) {
+ dev_dbg(udc->dev, "handle vbus (1)\n");
+ spin_unlock(&udc->lock);
+ usb_udc_vbus_handler(&udc->gadget, true);
+ usb_gadget_set_state(&udc->gadget, USB_STATE_POWERED);
+ spin_lock(&udc->lock);
+ } else {
+ dev_dbg(udc->dev, "handle vbus (0)\n");
+ udc->is_usb_suspended = false;
+ spin_unlock(&udc->lock);
+ usb_udc_vbus_handler(&udc->gadget, false);
+ usb_gadget_set_state(&udc->gadget,
+ USB_STATE_NOTATTACHED);
+ spin_lock(&udc->lock);
+ }
+ }
+
+ for (i = 1; i < ARRAY_SIZE(udc->ep); i++) {
+ if (sysbint & USBF_SYS_DMA_ENDINT_EPN(i)) {
+ epn = &udc->ep[i];
+ dev_dbg(epn->udc->dev,
+ "ep%u handle DMA complete. action=%ps\n",
+ epn->id, epn->bridge_on_dma_end);
+ ep_action = epn->bridge_on_dma_end;
+ if (ep_action) {
+ epn->bridge_on_dma_end = NULL;
+ ep_action(epn);
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int usbf_udc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+ unsigned long flags;
+
+ dev_info(udc->dev, "start (driver '%s')\n", driver->driver.name);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* hook up the driver */
+ udc->driver = driver;
+
+ /* Enable VBUS interrupt */
+ usbf_reg_writel(udc, USBF_REG_AHBBINTEN, USBF_SYS_VBUS_INTEN);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int usbf_udc_stop(struct usb_gadget *gadget)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ /* Disable VBUS interrupt */
+ usbf_reg_writel(udc, USBF_REG_AHBBINTEN, 0);
+
+ udc->driver = NULL;
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ dev_info(udc->dev, "stopped\n");
+
+ return 0;
+}
+
+static int usbf_get_frame(struct usb_gadget *gadget)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+
+ return USBF_USB_GET_FRAME(usbf_reg_readl(udc, USBF_REG_USB_ADDRESS));
+}
+
+static void usbf_attach(struct usbf_udc *udc)
+{
+ /* Enable USB signal to Function PHY
+ * D+ signal Pull-up
+ * Disable endpoint 0, it will be automatically enable when a USB reset
+ * is received.
+ * Disable the other endpoints
+ */
+ usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
+ USBF_USB_CONNECTB | USBF_USB_DEFAULT | USBF_USB_CONF,
+ USBF_USB_PUE2);
+
+ /* Enable reset and mode change interrupts */
+ usbf_reg_bitset(udc, USBF_REG_USB_INT_ENA,
+ USBF_USB_USB_RST_EN | USBF_USB_SPEED_MODE_EN | USBF_USB_RSUM_EN | USBF_USB_SPND_EN);
+}
+
+static void usbf_detach(struct usbf_udc *udc)
+{
+ int i;
+
+ /* Disable interrupts */
+ usbf_reg_writel(udc, USBF_REG_USB_INT_ENA, 0);
+
+ for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
+ if (udc->ep[i].disabled)
+ continue;
+
+ usbf_ep_reset(&udc->ep[i]);
+ }
+
+ /* Disable USB signal to Function PHY
+ * Do not Pull-up D+ signal
+ * Disable endpoint 0
+ * Disable the other endpoints
+ */
+ usbf_reg_clrset(udc, USBF_REG_USB_CONTROL,
+ USBF_USB_PUE2 | USBF_USB_DEFAULT | USBF_USB_CONF,
+ USBF_USB_CONNECTB);
+}
+
+static int usbf_pullup(struct usb_gadget *gadget, int is_on)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+ unsigned long flags;
+
+ dev_dbg(udc->dev, "pullup %d\n", is_on);
+
+ spin_lock_irqsave(&udc->lock, flags);
+ if (is_on)
+ usbf_attach(udc);
+ else
+ usbf_detach(udc);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int usbf_udc_set_selfpowered(struct usb_gadget *gadget,
+ int is_selfpowered)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+ unsigned long flags;
+
+ spin_lock_irqsave(&udc->lock, flags);
+ gadget->is_selfpowered = (is_selfpowered != 0);
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+
+static int usbf_udc_wakeup(struct usb_gadget *gadget)
+{
+ struct usbf_udc *udc = container_of(gadget, struct usbf_udc, gadget);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ if (!udc->is_remote_wakeup) {
+ dev_dbg(udc->dev, "remote wakeup not allowed\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ dev_dbg(udc->dev, "do wakeup\n");
+
+ /* Send the resume signal */
+ usbf_reg_bitset(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
+ usbf_reg_bitclr(udc, USBF_REG_USB_CONTROL, USBF_USB_RSUM_IN);
+
+ ret = 0;
+end:
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return ret;
+}
+
+static struct usb_gadget_ops usbf_gadget_ops = {
+ .get_frame = usbf_get_frame,
+ .pullup = usbf_pullup,
+ .udc_start = usbf_udc_start,
+ .udc_stop = usbf_udc_stop,
+ .set_selfpowered = usbf_udc_set_selfpowered,
+ .wakeup = usbf_udc_wakeup,
+};
+
+static int usbf_epn_check(struct usbf_ep *epn)
+{
+ const char *type_txt;
+ const char *buf_txt;
+ int ret = 0;
+ u32 ctrl;
+
+ ctrl = usbf_ep_reg_readl(epn, USBF_REG_EPN_CONTROL);
+
+ switch (ctrl & USBF_EPN_MODE_MASK) {
+ case USBF_EPN_MODE_BULK:
+ type_txt = "bulk";
+ if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
+ !epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
+ dev_err(epn->udc->dev,
+ "ep%u caps mismatch, bulk expected\n", epn->id);
+ ret = -EINVAL;
+ }
+ break;
+ case USBF_EPN_MODE_INTR:
+ type_txt = "intr";
+ if (epn->ep.caps.type_control || epn->ep.caps.type_iso ||
+ epn->ep.caps.type_bulk || !epn->ep.caps.type_int) {
+ dev_err(epn->udc->dev,
+ "ep%u caps mismatch, int expected\n", epn->id);
+ ret = -EINVAL;
+ }
+ break;
+ case USBF_EPN_MODE_ISO:
+ type_txt = "iso";
+ if (epn->ep.caps.type_control || !epn->ep.caps.type_iso ||
+ epn->ep.caps.type_bulk || epn->ep.caps.type_int) {
+ dev_err(epn->udc->dev,
+ "ep%u caps mismatch, iso expected\n", epn->id);
+ ret = -EINVAL;
+ }
+ break;
+ default:
+ type_txt = "unknown";
+ dev_err(epn->udc->dev, "ep%u unknown type\n", epn->id);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ctrl & USBF_EPN_BUF_TYPE_DOUBLE) {
+ buf_txt = "double";
+ if (!usbf_ep_info[epn->id].is_double) {
+ dev_err(epn->udc->dev,
+ "ep%u buffer mismatch, double expected\n",
+ epn->id);
+ ret = -EINVAL;
+ }
+ } else {
+ buf_txt = "single";
+ if (usbf_ep_info[epn->id].is_double) {
+ dev_err(epn->udc->dev,
+ "ep%u buffer mismatch, single expected\n",
+ epn->id);
+ ret = -EINVAL;
+ }
+ }
+
+ dev_dbg(epn->udc->dev, "ep%u (%s) %s, %s buffer %u, checked %s\n",
+ epn->id, epn->ep.name, type_txt, buf_txt,
+ epn->ep.maxpacket_limit, ret ? "failed" : "ok");
+
+ return ret;
+}
+
+static int usbf_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usbf_udc *udc;
+ struct usbf_ep *ep;
+ unsigned int i;
+ int irq;
+ int ret;
+
+ udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL);
+ if (!udc)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, udc);
+
+ udc->dev = dev;
+ spin_lock_init(&udc->lock);
+
+ udc->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(udc->regs))
+ return PTR_ERR(udc->regs);
+
+ devm_pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "USBF version: %08x\n",
+ usbf_reg_readl(udc, USBF_REG_USBSSVER));
+
+ /* Resetting the PLL is handled via the clock driver as it has common
+ * registers with USB Host
+ */
+ usbf_reg_bitclr(udc, USBF_REG_EPCTR, USBF_SYS_EPC_RST);
+
+ /* modify in register gadget process */
+ udc->gadget.speed = USB_SPEED_FULL;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
+ udc->gadget.ops = &usbf_gadget_ops;
+
+ udc->gadget.name = dev->driver->name;
+ udc->gadget.dev.parent = dev;
+ udc->gadget.ep0 = &udc->ep[0].ep;
+
+ /* The hardware DMA controller needs dma addresses aligned on 32bit.
+ * A fallback to pio is done if DMA addresses are not aligned.
+ */
+ udc->gadget.quirk_avoids_skb_reserve = 1;
+
+ INIT_LIST_HEAD(&udc->gadget.ep_list);
+ /* we have a canned request structure to allow sending packets as reply
+ * to get_status requests
+ */
+ INIT_LIST_HEAD(&udc->setup_reply.queue);
+
+ for (i = 0; i < ARRAY_SIZE(udc->ep); i++) {
+ ep = &udc->ep[i];
+
+ if (!(usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
+ USBF_SYS_EP_AVAILABLE(i))) {
+ continue;
+ }
+
+ INIT_LIST_HEAD(&ep->queue);
+
+ ep->id = i;
+ ep->disabled = 1;
+ ep->udc = udc;
+ ep->ep.ops = &usbf_ep_ops;
+ ep->ep.name = usbf_ep_info[i].name;
+ ep->ep.caps = usbf_ep_info[i].caps;
+ usb_ep_set_maxpacket_limit(&ep->ep,
+ usbf_ep_info[i].maxpacket_limit);
+
+ if (ep->id == 0) {
+ ep->regs = ep->udc->regs + USBF_BASE_EP0;
+ } else {
+ ep->regs = ep->udc->regs + USBF_BASE_EPN(ep->id - 1);
+ ret = usbf_epn_check(ep);
+ if (ret)
+ return ret;
+ if (usbf_reg_readl(udc, USBF_REG_USBSSCONF) &
+ USBF_SYS_DMA_AVAILABLE(i)) {
+ ep->dma_regs = ep->udc->regs +
+ USBF_BASE_DMA_EPN(ep->id - 1);
+ }
+ list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ ret = devm_request_irq(dev, irq, usbf_epc_irq, 0, "usbf-epc", udc);
+ if (ret) {
+ dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0)
+ return irq;
+ ret = devm_request_irq(dev, irq, usbf_ahb_epc_irq, 0, "usbf-ahb-epc", udc);
+ if (ret) {
+ dev_err(dev, "cannot request irq %d err %d\n", irq, ret);
+ return ret;
+ }
+
+ usbf_reg_bitset(udc, USBF_REG_AHBMCTR, USBF_SYS_WBURST_TYPE);
+
+ usbf_reg_bitset(udc, USBF_REG_USB_CONTROL,
+ USBF_USB_INT_SEL | USBF_USB_SOF_RCV | USBF_USB_SOF_CLK_MODE);
+
+ ret = usb_add_gadget_udc(dev, &udc->gadget);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int usbf_remove(struct platform_device *pdev)
+{
+ struct usbf_udc *udc = platform_get_drvdata(pdev);
+
+ usb_del_gadget_udc(&udc->gadget);
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id usbf_match[] = {
+ { .compatible = "renesas,rzn1-usbf" },
+ {} /* sentinel */
+};
+MODULE_DEVICE_TABLE(of, usbf_match);
+
+static struct platform_driver udc_driver = {
+ .driver = {
+ .name = "usbf_renesas",
+ .owner = THIS_MODULE,
+ .of_match_table = usbf_match,
+ },
+ .probe = usbf_probe,
+ .remove = usbf_remove,
+};
+
+module_platform_driver(udc_driver);
+
+MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 & RZ/N1 USB Function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/rzv2m_usb3drd.c b/drivers/usb/gadget/udc/rzv2m_usb3drd.c
new file mode 100644
index 000000000000..3c8bbf843038
--- /dev/null
+++ b/drivers/usb/gadget/udc/rzv2m_usb3drd.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/V2M USB3DRD driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/usb/rzv2m_usb3drd.h>
+
+#define USB_PERI_DRD_CON 0x000
+
+#define USB_PERI_DRD_CON_PERI_RST BIT(31)
+#define USB_PERI_DRD_CON_HOST_RST BIT(30)
+#define USB_PERI_DRD_CON_PERI_CON BIT(24)
+
+static void rzv2m_usb3drd_set_bit(struct rzv2m_usb3drd *usb3, u32 bits,
+ u32 offs)
+{
+ u32 val = readl(usb3->reg + offs);
+
+ val |= bits;
+ writel(val, usb3->reg + offs);
+}
+
+static void rzv2m_usb3drd_clear_bit(struct rzv2m_usb3drd *usb3, u32 bits,
+ u32 offs)
+{
+ u32 val = readl(usb3->reg + offs);
+
+ val &= ~bits;
+ writel(val, usb3->reg + offs);
+}
+
+void rzv2m_usb3drd_reset(struct device *dev, bool host)
+{
+ struct rzv2m_usb3drd *usb3 = dev_get_drvdata(dev);
+
+ if (host) {
+ rzv2m_usb3drd_clear_bit(usb3, USB_PERI_DRD_CON_PERI_CON,
+ USB_PERI_DRD_CON);
+ rzv2m_usb3drd_clear_bit(usb3, USB_PERI_DRD_CON_HOST_RST,
+ USB_PERI_DRD_CON);
+ rzv2m_usb3drd_set_bit(usb3, USB_PERI_DRD_CON_PERI_RST,
+ USB_PERI_DRD_CON);
+ } else {
+ rzv2m_usb3drd_set_bit(usb3, USB_PERI_DRD_CON_PERI_CON,
+ USB_PERI_DRD_CON);
+ rzv2m_usb3drd_set_bit(usb3, USB_PERI_DRD_CON_HOST_RST,
+ USB_PERI_DRD_CON);
+ rzv2m_usb3drd_clear_bit(usb3, USB_PERI_DRD_CON_PERI_RST,
+ USB_PERI_DRD_CON);
+ }
+}
+EXPORT_SYMBOL_GPL(rzv2m_usb3drd_reset);
+
+static int rzv2m_usb3drd_remove(struct platform_device *pdev)
+{
+ struct rzv2m_usb3drd *usb3 = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(usb3->dev);
+ pm_runtime_put(usb3->dev);
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(usb3->drd_rstc);
+
+ return 0;
+}
+
+static int rzv2m_usb3drd_probe(struct platform_device *pdev)
+{
+ struct rzv2m_usb3drd *usb3;
+ int ret;
+
+ usb3 = devm_kzalloc(&pdev->dev, sizeof(*usb3), GFP_KERNEL);
+ if (!usb3)
+ return -ENOMEM;
+
+ usb3->dev = &pdev->dev;
+
+ usb3->drd_irq = platform_get_irq_byname(pdev, "drd");
+ if (usb3->drd_irq < 0)
+ return usb3->drd_irq;
+
+ usb3->reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(usb3->reg))
+ return PTR_ERR(usb3->reg);
+
+ platform_set_drvdata(pdev, usb3);
+
+ usb3->drd_rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(usb3->drd_rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(usb3->drd_rstc),
+ "failed to get drd reset");
+
+ reset_control_deassert(usb3->drd_rstc);
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(usb3->dev);
+ if (ret)
+ goto err_rst;
+
+ ret = of_platform_populate(usb3->dev->of_node, NULL, NULL, usb3->dev);
+ if (ret)
+ goto err_pm;
+
+ return 0;
+
+err_pm:
+ pm_runtime_put(usb3->dev);
+
+err_rst:
+ pm_runtime_disable(&pdev->dev);
+ reset_control_assert(usb3->drd_rstc);
+ return ret;
+}
+
+static const struct of_device_id rzv2m_usb3drd_of_match[] = {
+ { .compatible = "renesas,rzv2m-usb3drd", },
+ { /* Sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzv2m_usb3drd_of_match);
+
+static struct platform_driver rzv2m_usb3drd_driver = {
+ .driver = {
+ .name = "rzv2m-usb3drd",
+ .of_match_table = of_match_ptr(rzv2m_usb3drd_of_match),
+ },
+ .probe = rzv2m_usb3drd_probe,
+ .remove = rzv2m_usb3drd_remove,
+};
+module_platform_driver(rzv2m_usb3drd_driver);
+
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/V2M USB3DRD driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rzv2m_usb3drd");
diff --git a/drivers/usb/gadget/udc/s3c-hsudc.c b/drivers/usb/gadget/udc/s3c-hsudc.c
deleted file mode 100644
index 4b7eb7701470..000000000000
--- a/drivers/usb/gadget/udc/s3c-hsudc.c
+++ /dev/null
@@ -1,1319 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* linux/drivers/usb/gadget/s3c-hsudc.c
- *
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * S3C24XX USB 2.0 High-speed USB controller gadget driver
- *
- * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints.
- * Each endpoint can be configured as either in or out endpoint. Endpoints
- * can be configured for Bulk or Interrupt transfer mode.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb/otg.h>
-#include <linux/prefetch.h>
-#include <linux/platform_data/s3c-hsudc.h>
-#include <linux/regulator/consumer.h>
-#include <linux/pm_runtime.h>
-
-#define S3C_HSUDC_REG(x) (x)
-
-/* Non-Indexed Registers */
-#define S3C_IR S3C_HSUDC_REG(0x00) /* Index Register */
-#define S3C_EIR S3C_HSUDC_REG(0x04) /* EP Intr Status */
-#define S3C_EIR_EP0 (1<<0)
-#define S3C_EIER S3C_HSUDC_REG(0x08) /* EP Intr Enable */
-#define S3C_FAR S3C_HSUDC_REG(0x0c) /* Gadget Address */
-#define S3C_FNR S3C_HSUDC_REG(0x10) /* Frame Number */
-#define S3C_EDR S3C_HSUDC_REG(0x14) /* EP Direction */
-#define S3C_TR S3C_HSUDC_REG(0x18) /* Test Register */
-#define S3C_SSR S3C_HSUDC_REG(0x1c) /* System Status */
-#define S3C_SSR_DTZIEN_EN (0xff8f)
-#define S3C_SSR_ERR (0xff80)
-#define S3C_SSR_VBUSON (1 << 8)
-#define S3C_SSR_HSP (1 << 4)
-#define S3C_SSR_SDE (1 << 3)
-#define S3C_SSR_RESUME (1 << 2)
-#define S3C_SSR_SUSPEND (1 << 1)
-#define S3C_SSR_RESET (1 << 0)
-#define S3C_SCR S3C_HSUDC_REG(0x20) /* System Control */
-#define S3C_SCR_DTZIEN_EN (1 << 14)
-#define S3C_SCR_RRD_EN (1 << 5)
-#define S3C_SCR_SUS_EN (1 << 1)
-#define S3C_SCR_RST_EN (1 << 0)
-#define S3C_EP0SR S3C_HSUDC_REG(0x24) /* EP0 Status */
-#define S3C_EP0SR_EP0_LWO (1 << 6)
-#define S3C_EP0SR_STALL (1 << 4)
-#define S3C_EP0SR_TX_SUCCESS (1 << 1)
-#define S3C_EP0SR_RX_SUCCESS (1 << 0)
-#define S3C_EP0CR S3C_HSUDC_REG(0x28) /* EP0 Control */
-#define S3C_BR(_x) S3C_HSUDC_REG(0x60 + (_x * 4))
-
-/* Indexed Registers */
-#define S3C_ESR S3C_HSUDC_REG(0x2c) /* EPn Status */
-#define S3C_ESR_FLUSH (1 << 6)
-#define S3C_ESR_STALL (1 << 5)
-#define S3C_ESR_LWO (1 << 4)
-#define S3C_ESR_PSIF_ONE (1 << 2)
-#define S3C_ESR_PSIF_TWO (2 << 2)
-#define S3C_ESR_TX_SUCCESS (1 << 1)
-#define S3C_ESR_RX_SUCCESS (1 << 0)
-#define S3C_ECR S3C_HSUDC_REG(0x30) /* EPn Control */
-#define S3C_ECR_DUEN (1 << 7)
-#define S3C_ECR_FLUSH (1 << 6)
-#define S3C_ECR_STALL (1 << 1)
-#define S3C_ECR_IEMS (1 << 0)
-#define S3C_BRCR S3C_HSUDC_REG(0x34) /* Read Count */
-#define S3C_BWCR S3C_HSUDC_REG(0x38) /* Write Count */
-#define S3C_MPR S3C_HSUDC_REG(0x3c) /* Max Pkt Size */
-
-#define WAIT_FOR_SETUP (0)
-#define DATA_STATE_XMIT (1)
-#define DATA_STATE_RECV (2)
-
-static const char * const s3c_hsudc_supply_names[] = {
- "vdda", /* analog phy supply, 3.3V */
- "vddi", /* digital phy supply, 1.2V */
- "vddosc", /* oscillator supply, 1.8V - 3.3V */
-};
-
-/**
- * struct s3c_hsudc_ep - Endpoint representation used by driver.
- * @ep: USB gadget layer representation of device endpoint.
- * @name: Endpoint name (as required by ep autoconfiguration).
- * @dev: Reference to the device controller to which this EP belongs.
- * @desc: Endpoint descriptor obtained from the gadget driver.
- * @queue: Transfer request queue for the endpoint.
- * @stopped: Maintains state of endpoint, set if EP is halted.
- * @bEndpointAddress: EP address (including direction bit).
- * @fifo: Base address of EP FIFO.
- */
-struct s3c_hsudc_ep {
- struct usb_ep ep;
- char name[20];
- struct s3c_hsudc *dev;
- struct list_head queue;
- u8 stopped;
- u8 wedge;
- u8 bEndpointAddress;
- void __iomem *fifo;
-};
-
-/**
- * struct s3c_hsudc_req - Driver encapsulation of USB gadget transfer request.
- * @req: Reference to USB gadget transfer request.
- * @queue: Used for inserting this request to the endpoint request queue.
- */
-struct s3c_hsudc_req {
- struct usb_request req;
- struct list_head queue;
-};
-
-/**
- * struct s3c_hsudc - Driver's abstraction of the device controller.
- * @gadget: Instance of usb_gadget which is referenced by gadget driver.
- * @driver: Reference to currently active gadget driver.
- * @dev: The device reference used by probe function.
- * @lock: Lock to synchronize the usage of Endpoints (EP's are indexed).
- * @regs: Remapped base address of controller's register space.
- * irq: IRQ number used by the controller.
- * uclk: Reference to the controller clock.
- * ep0state: Current state of EP0.
- * ep: List of endpoints supported by the controller.
- */
-struct s3c_hsudc {
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
- struct device *dev;
- struct s3c24xx_hsudc_platdata *pd;
- struct usb_phy *transceiver;
- struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsudc_supply_names)];
- spinlock_t lock;
- void __iomem *regs;
- int irq;
- struct clk *uclk;
- int ep0state;
- struct s3c_hsudc_ep ep[];
-};
-
-#define ep_maxpacket(_ep) ((_ep)->ep.maxpacket)
-#define ep_is_in(_ep) ((_ep)->bEndpointAddress & USB_DIR_IN)
-#define ep_index(_ep) ((_ep)->bEndpointAddress & \
- USB_ENDPOINT_NUMBER_MASK)
-
-static const char driver_name[] = "s3c-udc";
-static const char ep0name[] = "ep0-control";
-
-static inline struct s3c_hsudc_req *our_req(struct usb_request *req)
-{
- return container_of(req, struct s3c_hsudc_req, req);
-}
-
-static inline struct s3c_hsudc_ep *our_ep(struct usb_ep *ep)
-{
- return container_of(ep, struct s3c_hsudc_ep, ep);
-}
-
-static inline struct s3c_hsudc *to_hsudc(struct usb_gadget *gadget)
-{
- return container_of(gadget, struct s3c_hsudc, gadget);
-}
-
-static inline void set_index(struct s3c_hsudc *hsudc, int ep_addr)
-{
- ep_addr &= USB_ENDPOINT_NUMBER_MASK;
- writel(ep_addr, hsudc->regs + S3C_IR);
-}
-
-static inline void __orr32(void __iomem *ptr, u32 val)
-{
- writel(readl(ptr) | val, ptr);
-}
-
-/**
- * s3c_hsudc_complete_request - Complete a transfer request.
- * @hsep: Endpoint to which the request belongs.
- * @hsreq: Transfer request to be completed.
- * @status: Transfer completion status for the transfer request.
- */
-static void s3c_hsudc_complete_request(struct s3c_hsudc_ep *hsep,
- struct s3c_hsudc_req *hsreq, int status)
-{
- unsigned int stopped = hsep->stopped;
- struct s3c_hsudc *hsudc = hsep->dev;
-
- list_del_init(&hsreq->queue);
- hsreq->req.status = status;
-
- if (!ep_index(hsep)) {
- hsudc->ep0state = WAIT_FOR_SETUP;
- hsep->bEndpointAddress &= ~USB_DIR_IN;
- }
-
- hsep->stopped = 1;
- spin_unlock(&hsudc->lock);
- usb_gadget_giveback_request(&hsep->ep, &hsreq->req);
- spin_lock(&hsudc->lock);
- hsep->stopped = stopped;
-}
-
-/**
- * s3c_hsudc_nuke_ep - Terminate all requests queued for a endpoint.
- * @hsep: Endpoint for which queued requests have to be terminated.
- * @status: Transfer completion status for the transfer request.
- */
-static void s3c_hsudc_nuke_ep(struct s3c_hsudc_ep *hsep, int status)
-{
- struct s3c_hsudc_req *hsreq;
-
- while (!list_empty(&hsep->queue)) {
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- s3c_hsudc_complete_request(hsep, hsreq, status);
- }
-}
-
-/**
- * s3c_hsudc_stop_activity - Stop activity on all endpoints.
- * @hsudc: Device controller for which EP activity is to be stopped.
- *
- * All the endpoints are stopped and any pending transfer requests if any on
- * the endpoint are terminated.
- */
-static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc)
-{
- struct s3c_hsudc_ep *hsep;
- int epnum;
-
- hsudc->gadget.speed = USB_SPEED_UNKNOWN;
-
- for (epnum = 0; epnum < hsudc->pd->epnum; epnum++) {
- hsep = &hsudc->ep[epnum];
- hsep->stopped = 1;
- s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN);
- }
-}
-
-/**
- * s3c_hsudc_read_setup_pkt - Read the received setup packet from EP0 fifo.
- * @hsudc: Device controller from which setup packet is to be read.
- * @buf: The buffer into which the setup packet is read.
- *
- * The setup packet received in the EP0 fifo is read and stored into a
- * given buffer address.
- */
-
-static void s3c_hsudc_read_setup_pkt(struct s3c_hsudc *hsudc, u16 *buf)
-{
- int count;
-
- count = readl(hsudc->regs + S3C_BRCR);
- while (count--)
- *buf++ = (u16)readl(hsudc->regs + S3C_BR(0));
-
- writel(S3C_EP0SR_RX_SUCCESS, hsudc->regs + S3C_EP0SR);
-}
-
-/**
- * s3c_hsudc_write_fifo - Write next chunk of transfer data to EP fifo.
- * @hsep: Endpoint to which the data is to be written.
- * @hsreq: Transfer request from which the next chunk of data is written.
- *
- * Write the next chunk of data from a transfer request to the endpoint FIFO.
- * If the transfer request completes, 1 is returned, otherwise 0 is returned.
- */
-static int s3c_hsudc_write_fifo(struct s3c_hsudc_ep *hsep,
- struct s3c_hsudc_req *hsreq)
-{
- u16 *buf;
- u32 max = ep_maxpacket(hsep);
- u32 count, length;
- bool is_last;
- void __iomem *fifo = hsep->fifo;
-
- buf = hsreq->req.buf + hsreq->req.actual;
- prefetch(buf);
-
- length = hsreq->req.length - hsreq->req.actual;
- length = min(length, max);
- hsreq->req.actual += length;
-
- writel(length, hsep->dev->regs + S3C_BWCR);
- for (count = 0; count < length; count += 2)
- writel(*buf++, fifo);
-
- if (count != max) {
- is_last = true;
- } else {
- if (hsreq->req.length != hsreq->req.actual || hsreq->req.zero)
- is_last = false;
- else
- is_last = true;
- }
-
- if (is_last) {
- s3c_hsudc_complete_request(hsep, hsreq, 0);
- return 1;
- }
-
- return 0;
-}
-
-/**
- * s3c_hsudc_read_fifo - Read the next chunk of data from EP fifo.
- * @hsep: Endpoint from which the data is to be read.
- * @hsreq: Transfer request to which the next chunk of data read is written.
- *
- * Read the next chunk of data from the endpoint FIFO and a write it to the
- * transfer request buffer. If the transfer request completes, 1 is returned,
- * otherwise 0 is returned.
- */
-static int s3c_hsudc_read_fifo(struct s3c_hsudc_ep *hsep,
- struct s3c_hsudc_req *hsreq)
-{
- struct s3c_hsudc *hsudc = hsep->dev;
- u32 csr, offset;
- u16 *buf, word;
- u32 buflen, rcnt, rlen;
- void __iomem *fifo = hsep->fifo;
- u32 is_short = 0;
-
- offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR;
- csr = readl(hsudc->regs + offset);
- if (!(csr & S3C_ESR_RX_SUCCESS))
- return -EINVAL;
-
- buf = hsreq->req.buf + hsreq->req.actual;
- prefetchw(buf);
- buflen = hsreq->req.length - hsreq->req.actual;
-
- rcnt = readl(hsudc->regs + S3C_BRCR);
- rlen = (csr & S3C_ESR_LWO) ? (rcnt * 2 - 1) : (rcnt * 2);
-
- hsreq->req.actual += min(rlen, buflen);
- is_short = (rlen < hsep->ep.maxpacket);
-
- while (rcnt-- != 0) {
- word = (u16)readl(fifo);
- if (buflen) {
- *buf++ = word;
- buflen--;
- } else {
- hsreq->req.status = -EOVERFLOW;
- }
- }
-
- writel(S3C_ESR_RX_SUCCESS, hsudc->regs + offset);
-
- if (is_short || hsreq->req.actual == hsreq->req.length) {
- s3c_hsudc_complete_request(hsep, hsreq, 0);
- return 1;
- }
-
- return 0;
-}
-
-/**
- * s3c_hsudc_epin_intr - Handle in-endpoint interrupt.
- * @hsudc - Device controller for which the interrupt is to be handled.
- * @ep_idx - Endpoint number on which an interrupt is pending.
- *
- * Handles interrupt for a in-endpoint. The interrupts that are handled are
- * stall and data transmit complete interrupt.
- */
-static void s3c_hsudc_epin_intr(struct s3c_hsudc *hsudc, u32 ep_idx)
-{
- struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx];
- struct s3c_hsudc_req *hsreq;
- u32 csr;
-
- csr = readl(hsudc->regs + S3C_ESR);
- if (csr & S3C_ESR_STALL) {
- writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR);
- return;
- }
-
- if (csr & S3C_ESR_TX_SUCCESS) {
- writel(S3C_ESR_TX_SUCCESS, hsudc->regs + S3C_ESR);
- if (list_empty(&hsep->queue))
- return;
-
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- if ((s3c_hsudc_write_fifo(hsep, hsreq) == 0) &&
- (csr & S3C_ESR_PSIF_TWO))
- s3c_hsudc_write_fifo(hsep, hsreq);
- }
-}
-
-/**
- * s3c_hsudc_epout_intr - Handle out-endpoint interrupt.
- * @hsudc - Device controller for which the interrupt is to be handled.
- * @ep_idx - Endpoint number on which an interrupt is pending.
- *
- * Handles interrupt for a out-endpoint. The interrupts that are handled are
- * stall, flush and data ready interrupt.
- */
-static void s3c_hsudc_epout_intr(struct s3c_hsudc *hsudc, u32 ep_idx)
-{
- struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx];
- struct s3c_hsudc_req *hsreq;
- u32 csr;
-
- csr = readl(hsudc->regs + S3C_ESR);
- if (csr & S3C_ESR_STALL) {
- writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR);
- return;
- }
-
- if (csr & S3C_ESR_FLUSH) {
- __orr32(hsudc->regs + S3C_ECR, S3C_ECR_FLUSH);
- return;
- }
-
- if (csr & S3C_ESR_RX_SUCCESS) {
- if (list_empty(&hsep->queue))
- return;
-
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- if (((s3c_hsudc_read_fifo(hsep, hsreq)) == 0) &&
- (csr & S3C_ESR_PSIF_TWO))
- s3c_hsudc_read_fifo(hsep, hsreq);
- }
-}
-
-/** s3c_hsudc_set_halt - Set or clear a endpoint halt.
- * @_ep: Endpoint on which halt has to be set or cleared.
- * @value: 1 for setting halt on endpoint, 0 to clear halt.
- *
- * Set or clear endpoint halt. If halt is set, the endpoint is stopped.
- * If halt is cleared, for in-endpoints, if there are any pending
- * transfer requests, transfers are started.
- */
-static int s3c_hsudc_set_halt(struct usb_ep *_ep, int value)
-{
- struct s3c_hsudc_ep *hsep = our_ep(_ep);
- struct s3c_hsudc *hsudc = hsep->dev;
- struct s3c_hsudc_req *hsreq;
- unsigned long irqflags;
- u32 ecr;
- u32 offset;
-
- if (value && ep_is_in(hsep) && !list_empty(&hsep->queue))
- return -EAGAIN;
-
- spin_lock_irqsave(&hsudc->lock, irqflags);
- set_index(hsudc, ep_index(hsep));
- offset = (ep_index(hsep)) ? S3C_ECR : S3C_EP0CR;
- ecr = readl(hsudc->regs + offset);
-
- if (value) {
- ecr |= S3C_ECR_STALL;
- if (ep_index(hsep))
- ecr |= S3C_ECR_FLUSH;
- hsep->stopped = 1;
- } else {
- ecr &= ~S3C_ECR_STALL;
- hsep->stopped = hsep->wedge = 0;
- }
- writel(ecr, hsudc->regs + offset);
-
- if (ep_is_in(hsep) && !list_empty(&hsep->queue) && !value) {
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- if (hsreq)
- s3c_hsudc_write_fifo(hsep, hsreq);
- }
-
- spin_unlock_irqrestore(&hsudc->lock, irqflags);
- return 0;
-}
-
-/** s3c_hsudc_set_wedge - Sets the halt feature with the clear requests ignored
- * @_ep: Endpoint on which wedge has to be set.
- *
- * Sets the halt feature with the clear requests ignored.
- */
-static int s3c_hsudc_set_wedge(struct usb_ep *_ep)
-{
- struct s3c_hsudc_ep *hsep = our_ep(_ep);
-
- if (!hsep)
- return -EINVAL;
-
- hsep->wedge = 1;
- return usb_ep_set_halt(_ep);
-}
-
-/** s3c_hsudc_handle_reqfeat - Handle set feature or clear feature requests.
- * @_ep: Device controller on which the set/clear feature needs to be handled.
- * @ctrl: Control request as received on the endpoint 0.
- *
- * Handle set feature or clear feature control requests on the control endpoint.
- */
-static int s3c_hsudc_handle_reqfeat(struct s3c_hsudc *hsudc,
- struct usb_ctrlrequest *ctrl)
-{
- struct s3c_hsudc_ep *hsep;
- bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE);
- u8 ep_num = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK;
-
- if (ctrl->bRequestType == USB_RECIP_ENDPOINT) {
- hsep = &hsudc->ep[ep_num];
- switch (le16_to_cpu(ctrl->wValue)) {
- case USB_ENDPOINT_HALT:
- if (set || !hsep->wedge)
- s3c_hsudc_set_halt(&hsep->ep, set);
- return 0;
- }
- }
-
- return -ENOENT;
-}
-
-/**
- * s3c_hsudc_process_req_status - Handle get status control request.
- * @hsudc: Device controller on which get status request has be handled.
- * @ctrl: Control request as received on the endpoint 0.
- *
- * Handle get status control request received on control endpoint.
- */
-static void s3c_hsudc_process_req_status(struct s3c_hsudc *hsudc,
- struct usb_ctrlrequest *ctrl)
-{
- struct s3c_hsudc_ep *hsep0 = &hsudc->ep[0];
- struct s3c_hsudc_req hsreq;
- struct s3c_hsudc_ep *hsep;
- __le16 reply;
- u8 epnum;
-
- switch (ctrl->bRequestType & USB_RECIP_MASK) {
- case USB_RECIP_DEVICE:
- reply = cpu_to_le16(0);
- break;
-
- case USB_RECIP_INTERFACE:
- reply = cpu_to_le16(0);
- break;
-
- case USB_RECIP_ENDPOINT:
- epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
- hsep = &hsudc->ep[epnum];
- reply = cpu_to_le16(hsep->stopped ? 1 : 0);
- break;
- }
-
- INIT_LIST_HEAD(&hsreq.queue);
- hsreq.req.length = 2;
- hsreq.req.buf = &reply;
- hsreq.req.actual = 0;
- hsreq.req.complete = NULL;
- s3c_hsudc_write_fifo(hsep0, &hsreq);
-}
-
-/**
- * s3c_hsudc_process_setup - Process control request received on endpoint 0.
- * @hsudc: Device controller on which control request has been received.
- *
- * Read the control request received on endpoint 0, decode it and handle
- * the request.
- */
-static void s3c_hsudc_process_setup(struct s3c_hsudc *hsudc)
-{
- struct s3c_hsudc_ep *hsep = &hsudc->ep[0];
- struct usb_ctrlrequest ctrl = {0};
- int ret;
-
- s3c_hsudc_nuke_ep(hsep, -EPROTO);
- s3c_hsudc_read_setup_pkt(hsudc, (u16 *)&ctrl);
-
- if (ctrl.bRequestType & USB_DIR_IN) {
- hsep->bEndpointAddress |= USB_DIR_IN;
- hsudc->ep0state = DATA_STATE_XMIT;
- } else {
- hsep->bEndpointAddress &= ~USB_DIR_IN;
- hsudc->ep0state = DATA_STATE_RECV;
- }
-
- switch (ctrl.bRequest) {
- case USB_REQ_SET_ADDRESS:
- if (ctrl.bRequestType != (USB_TYPE_STANDARD | USB_RECIP_DEVICE))
- break;
- hsudc->ep0state = WAIT_FOR_SETUP;
- return;
-
- case USB_REQ_GET_STATUS:
- if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
- break;
- s3c_hsudc_process_req_status(hsudc, &ctrl);
- return;
-
- case USB_REQ_SET_FEATURE:
- case USB_REQ_CLEAR_FEATURE:
- if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
- break;
- s3c_hsudc_handle_reqfeat(hsudc, &ctrl);
- hsudc->ep0state = WAIT_FOR_SETUP;
- return;
- }
-
- if (hsudc->driver) {
- spin_unlock(&hsudc->lock);
- ret = hsudc->driver->setup(&hsudc->gadget, &ctrl);
- spin_lock(&hsudc->lock);
-
- if (ctrl.bRequest == USB_REQ_SET_CONFIGURATION) {
- hsep->bEndpointAddress &= ~USB_DIR_IN;
- hsudc->ep0state = WAIT_FOR_SETUP;
- }
-
- if (ret < 0) {
- dev_err(hsudc->dev, "setup failed, returned %d\n",
- ret);
- s3c_hsudc_set_halt(&hsep->ep, 1);
- hsudc->ep0state = WAIT_FOR_SETUP;
- hsep->bEndpointAddress &= ~USB_DIR_IN;
- }
- }
-}
-
-/** s3c_hsudc_handle_ep0_intr - Handle endpoint 0 interrupt.
- * @hsudc: Device controller on which endpoint 0 interrupt has occurred.
- *
- * Handle endpoint 0 interrupt when it occurs. EP0 interrupt could occur
- * when a stall handshake is sent to host or data is sent/received on
- * endpoint 0.
- */
-static void s3c_hsudc_handle_ep0_intr(struct s3c_hsudc *hsudc)
-{
- struct s3c_hsudc_ep *hsep = &hsudc->ep[0];
- struct s3c_hsudc_req *hsreq;
- u32 csr = readl(hsudc->regs + S3C_EP0SR);
- u32 ecr;
-
- if (csr & S3C_EP0SR_STALL) {
- ecr = readl(hsudc->regs + S3C_EP0CR);
- ecr &= ~(S3C_ECR_STALL | S3C_ECR_FLUSH);
- writel(ecr, hsudc->regs + S3C_EP0CR);
-
- writel(S3C_EP0SR_STALL, hsudc->regs + S3C_EP0SR);
- hsep->stopped = 0;
-
- s3c_hsudc_nuke_ep(hsep, -ECONNABORTED);
- hsudc->ep0state = WAIT_FOR_SETUP;
- hsep->bEndpointAddress &= ~USB_DIR_IN;
- return;
- }
-
- if (csr & S3C_EP0SR_TX_SUCCESS) {
- writel(S3C_EP0SR_TX_SUCCESS, hsudc->regs + S3C_EP0SR);
- if (ep_is_in(hsep)) {
- if (list_empty(&hsep->queue))
- return;
-
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- s3c_hsudc_write_fifo(hsep, hsreq);
- }
- }
-
- if (csr & S3C_EP0SR_RX_SUCCESS) {
- if (hsudc->ep0state == WAIT_FOR_SETUP)
- s3c_hsudc_process_setup(hsudc);
- else {
- if (!ep_is_in(hsep)) {
- if (list_empty(&hsep->queue))
- return;
- hsreq = list_entry(hsep->queue.next,
- struct s3c_hsudc_req, queue);
- s3c_hsudc_read_fifo(hsep, hsreq);
- }
- }
- }
-}
-
-/**
- * s3c_hsudc_ep_enable - Enable a endpoint.
- * @_ep: The endpoint to be enabled.
- * @desc: Endpoint descriptor.
- *
- * Enables a endpoint when called from the gadget driver. Endpoint stall if
- * any is cleared, transfer type is configured and endpoint interrupt is
- * enabled.
- */
-static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- struct s3c_hsudc_ep *hsep;
- struct s3c_hsudc *hsudc;
- unsigned long flags;
- u32 ecr = 0;
-
- hsep = our_ep(_ep);
- if (!_ep || !desc || _ep->name == ep0name
- || desc->bDescriptorType != USB_DT_ENDPOINT
- || hsep->bEndpointAddress != desc->bEndpointAddress
- || ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
- return -EINVAL;
-
- if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK
- && usb_endpoint_maxp(desc) != ep_maxpacket(hsep))
- || !desc->wMaxPacketSize)
- return -ERANGE;
-
- hsudc = hsep->dev;
- if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&hsudc->lock, flags);
-
- set_index(hsudc, hsep->bEndpointAddress);
- ecr |= ((usb_endpoint_xfer_int(desc)) ? S3C_ECR_IEMS : S3C_ECR_DUEN);
- writel(ecr, hsudc->regs + S3C_ECR);
-
- hsep->stopped = hsep->wedge = 0;
- hsep->ep.desc = desc;
- hsep->ep.maxpacket = usb_endpoint_maxp(desc);
-
- s3c_hsudc_set_halt(_ep, 0);
- __set_bit(ep_index(hsep), hsudc->regs + S3C_EIER);
-
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return 0;
-}
-
-/**
- * s3c_hsudc_ep_disable - Disable a endpoint.
- * @_ep: The endpoint to be disabled.
- * @desc: Endpoint descriptor.
- *
- * Disables a endpoint when called from the gadget driver.
- */
-static int s3c_hsudc_ep_disable(struct usb_ep *_ep)
-{
- struct s3c_hsudc_ep *hsep = our_ep(_ep);
- struct s3c_hsudc *hsudc = hsep->dev;
- unsigned long flags;
-
- if (!_ep || !hsep->ep.desc)
- return -EINVAL;
-
- spin_lock_irqsave(&hsudc->lock, flags);
-
- set_index(hsudc, hsep->bEndpointAddress);
- __clear_bit(ep_index(hsep), hsudc->regs + S3C_EIER);
-
- s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN);
-
- hsep->ep.desc = NULL;
- hsep->stopped = 1;
-
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return 0;
-}
-
-/**
- * s3c_hsudc_alloc_request - Allocate a new request.
- * @_ep: Endpoint for which request is allocated (not used).
- * @gfp_flags: Flags used for the allocation.
- *
- * Allocates a single transfer request structure when called from gadget driver.
- */
-static struct usb_request *s3c_hsudc_alloc_request(struct usb_ep *_ep,
- gfp_t gfp_flags)
-{
- struct s3c_hsudc_req *hsreq;
-
- hsreq = kzalloc(sizeof(*hsreq), gfp_flags);
- if (!hsreq)
- return NULL;
-
- INIT_LIST_HEAD(&hsreq->queue);
- return &hsreq->req;
-}
-
-/**
- * s3c_hsudc_free_request - Deallocate a request.
- * @ep: Endpoint for which request is deallocated (not used).
- * @_req: Request to be deallocated.
- *
- * Allocates a single transfer request structure when called from gadget driver.
- */
-static void s3c_hsudc_free_request(struct usb_ep *ep, struct usb_request *_req)
-{
- struct s3c_hsudc_req *hsreq;
-
- hsreq = our_req(_req);
- WARN_ON(!list_empty(&hsreq->queue));
- kfree(hsreq);
-}
-
-/**
- * s3c_hsudc_queue - Queue a transfer request for the endpoint.
- * @_ep: Endpoint for which the request is queued.
- * @_req: Request to be queued.
- * @gfp_flags: Not used.
- *
- * Start or enqueue a request for a endpoint when called from gadget driver.
- */
-static int s3c_hsudc_queue(struct usb_ep *_ep, struct usb_request *_req,
- gfp_t gfp_flags)
-{
- struct s3c_hsudc_req *hsreq;
- struct s3c_hsudc_ep *hsep;
- struct s3c_hsudc *hsudc;
- unsigned long flags;
- u32 offset;
- u32 csr;
-
- hsreq = our_req(_req);
- if ((!_req || !_req->complete || !_req->buf ||
- !list_empty(&hsreq->queue)))
- return -EINVAL;
-
- hsep = our_ep(_ep);
- hsudc = hsep->dev;
- if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&hsudc->lock, flags);
- set_index(hsudc, hsep->bEndpointAddress);
-
- _req->status = -EINPROGRESS;
- _req->actual = 0;
-
- if (!ep_index(hsep) && _req->length == 0) {
- hsudc->ep0state = WAIT_FOR_SETUP;
- s3c_hsudc_complete_request(hsep, hsreq, 0);
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return 0;
- }
-
- if (list_empty(&hsep->queue) && !hsep->stopped) {
- offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR;
- if (ep_is_in(hsep)) {
- csr = readl(hsudc->regs + offset);
- if (!(csr & S3C_ESR_TX_SUCCESS) &&
- (s3c_hsudc_write_fifo(hsep, hsreq) == 1))
- hsreq = NULL;
- } else {
- csr = readl(hsudc->regs + offset);
- if ((csr & S3C_ESR_RX_SUCCESS)
- && (s3c_hsudc_read_fifo(hsep, hsreq) == 1))
- hsreq = NULL;
- }
- }
-
- if (hsreq)
- list_add_tail(&hsreq->queue, &hsep->queue);
-
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return 0;
-}
-
-/**
- * s3c_hsudc_dequeue - Dequeue a transfer request from an endpoint.
- * @_ep: Endpoint from which the request is dequeued.
- * @_req: Request to be dequeued.
- *
- * Dequeue a request from a endpoint when called from gadget driver.
- */
-static int s3c_hsudc_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct s3c_hsudc_ep *hsep = our_ep(_ep);
- struct s3c_hsudc *hsudc = hsep->dev;
- struct s3c_hsudc_req *hsreq = NULL, *iter;
- unsigned long flags;
-
- hsep = our_ep(_ep);
- if (!_ep || hsep->ep.name == ep0name)
- return -EINVAL;
-
- spin_lock_irqsave(&hsudc->lock, flags);
-
- list_for_each_entry(iter, &hsep->queue, queue) {
- if (&iter->req != _req)
- continue;
- hsreq = iter;
- break;
- }
- if (!hsreq) {
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return -EINVAL;
- }
-
- set_index(hsudc, hsep->bEndpointAddress);
- s3c_hsudc_complete_request(hsep, hsreq, -ECONNRESET);
-
- spin_unlock_irqrestore(&hsudc->lock, flags);
- return 0;
-}
-
-static const struct usb_ep_ops s3c_hsudc_ep_ops = {
- .enable = s3c_hsudc_ep_enable,
- .disable = s3c_hsudc_ep_disable,
- .alloc_request = s3c_hsudc_alloc_request,
- .free_request = s3c_hsudc_free_request,
- .queue = s3c_hsudc_queue,
- .dequeue = s3c_hsudc_dequeue,
- .set_halt = s3c_hsudc_set_halt,
- .set_wedge = s3c_hsudc_set_wedge,
-};
-
-/**
- * s3c_hsudc_initep - Initialize a endpoint to default state.
- * @hsudc - Reference to the device controller.
- * @hsep - Endpoint to be initialized.
- * @epnum - Address to be assigned to the endpoint.
- *
- * Initialize a endpoint with default configuration.
- */
-static void s3c_hsudc_initep(struct s3c_hsudc *hsudc,
- struct s3c_hsudc_ep *hsep, int epnum)
-{
- char *dir;
-
- if ((epnum % 2) == 0) {
- dir = "out";
- } else {
- dir = "in";
- hsep->bEndpointAddress = USB_DIR_IN;
- }
-
- hsep->bEndpointAddress |= epnum;
- if (epnum)
- snprintf(hsep->name, sizeof(hsep->name), "ep%d%s", epnum, dir);
- else
- snprintf(hsep->name, sizeof(hsep->name), "%s", ep0name);
-
- INIT_LIST_HEAD(&hsep->queue);
- INIT_LIST_HEAD(&hsep->ep.ep_list);
- if (epnum)
- list_add_tail(&hsep->ep.ep_list, &hsudc->gadget.ep_list);
-
- hsep->dev = hsudc;
- hsep->ep.name = hsep->name;
- usb_ep_set_maxpacket_limit(&hsep->ep, epnum ? 512 : 64);
- hsep->ep.ops = &s3c_hsudc_ep_ops;
- hsep->fifo = hsudc->regs + S3C_BR(epnum);
- hsep->ep.desc = NULL;
- hsep->stopped = 0;
- hsep->wedge = 0;
-
- if (epnum == 0) {
- hsep->ep.caps.type_control = true;
- hsep->ep.caps.dir_in = true;
- hsep->ep.caps.dir_out = true;
- } else {
- hsep->ep.caps.type_iso = true;
- hsep->ep.caps.type_bulk = true;
- hsep->ep.caps.type_int = true;
- }
-
- if (epnum & 1)
- hsep->ep.caps.dir_in = true;
- else
- hsep->ep.caps.dir_out = true;
-
- set_index(hsudc, epnum);
- writel(hsep->ep.maxpacket, hsudc->regs + S3C_MPR);
-}
-
-/**
- * s3c_hsudc_setup_ep - Configure all endpoints to default state.
- * @hsudc: Reference to device controller.
- *
- * Configures all endpoints to default state.
- */
-static void s3c_hsudc_setup_ep(struct s3c_hsudc *hsudc)
-{
- int epnum;
-
- hsudc->ep0state = WAIT_FOR_SETUP;
- INIT_LIST_HEAD(&hsudc->gadget.ep_list);
- for (epnum = 0; epnum < hsudc->pd->epnum; epnum++)
- s3c_hsudc_initep(hsudc, &hsudc->ep[epnum], epnum);
-}
-
-/**
- * s3c_hsudc_reconfig - Reconfigure the device controller to default state.
- * @hsudc: Reference to device controller.
- *
- * Reconfigures the device controller registers to a default state.
- */
-static void s3c_hsudc_reconfig(struct s3c_hsudc *hsudc)
-{
- writel(0xAA, hsudc->regs + S3C_EDR);
- writel(1, hsudc->regs + S3C_EIER);
- writel(0, hsudc->regs + S3C_TR);
- writel(S3C_SCR_DTZIEN_EN | S3C_SCR_RRD_EN | S3C_SCR_SUS_EN |
- S3C_SCR_RST_EN, hsudc->regs + S3C_SCR);
- writel(0, hsudc->regs + S3C_EP0CR);
-
- s3c_hsudc_setup_ep(hsudc);
-}
-
-/**
- * s3c_hsudc_irq - Interrupt handler for device controller.
- * @irq: Not used.
- * @_dev: Reference to the device controller.
- *
- * Interrupt handler for the device controller. This handler handles controller
- * interrupts and endpoint interrupts.
- */
-static irqreturn_t s3c_hsudc_irq(int irq, void *_dev)
-{
- struct s3c_hsudc *hsudc = _dev;
- struct s3c_hsudc_ep *hsep;
- u32 ep_intr;
- u32 sys_status;
- u32 ep_idx;
-
- spin_lock(&hsudc->lock);
-
- sys_status = readl(hsudc->regs + S3C_SSR);
- ep_intr = readl(hsudc->regs + S3C_EIR) & 0x3FF;
-
- if (!ep_intr && !(sys_status & S3C_SSR_DTZIEN_EN)) {
- spin_unlock(&hsudc->lock);
- return IRQ_HANDLED;
- }
-
- if (sys_status) {
- if (sys_status & S3C_SSR_VBUSON)
- writel(S3C_SSR_VBUSON, hsudc->regs + S3C_SSR);
-
- if (sys_status & S3C_SSR_ERR)
- writel(S3C_SSR_ERR, hsudc->regs + S3C_SSR);
-
- if (sys_status & S3C_SSR_SDE) {
- writel(S3C_SSR_SDE, hsudc->regs + S3C_SSR);
- hsudc->gadget.speed = (sys_status & S3C_SSR_HSP) ?
- USB_SPEED_HIGH : USB_SPEED_FULL;
- }
-
- if (sys_status & S3C_SSR_SUSPEND) {
- writel(S3C_SSR_SUSPEND, hsudc->regs + S3C_SSR);
- if (hsudc->gadget.speed != USB_SPEED_UNKNOWN
- && hsudc->driver && hsudc->driver->suspend)
- hsudc->driver->suspend(&hsudc->gadget);
- }
-
- if (sys_status & S3C_SSR_RESUME) {
- writel(S3C_SSR_RESUME, hsudc->regs + S3C_SSR);
- if (hsudc->gadget.speed != USB_SPEED_UNKNOWN
- && hsudc->driver && hsudc->driver->resume)
- hsudc->driver->resume(&hsudc->gadget);
- }
-
- if (sys_status & S3C_SSR_RESET) {
- writel(S3C_SSR_RESET, hsudc->regs + S3C_SSR);
- for (ep_idx = 0; ep_idx < hsudc->pd->epnum; ep_idx++) {
- hsep = &hsudc->ep[ep_idx];
- hsep->stopped = 1;
- s3c_hsudc_nuke_ep(hsep, -ECONNRESET);
- }
- s3c_hsudc_reconfig(hsudc);
- hsudc->ep0state = WAIT_FOR_SETUP;
- }
- }
-
- if (ep_intr & S3C_EIR_EP0) {
- writel(S3C_EIR_EP0, hsudc->regs + S3C_EIR);
- set_index(hsudc, 0);
- s3c_hsudc_handle_ep0_intr(hsudc);
- }
-
- ep_intr >>= 1;
- ep_idx = 1;
- while (ep_intr) {
- if (ep_intr & 1) {
- hsep = &hsudc->ep[ep_idx];
- set_index(hsudc, ep_idx);
- writel(1 << ep_idx, hsudc->regs + S3C_EIR);
- if (ep_is_in(hsep))
- s3c_hsudc_epin_intr(hsudc, ep_idx);
- else
- s3c_hsudc_epout_intr(hsudc, ep_idx);
- }
- ep_intr >>= 1;
- ep_idx++;
- }
-
- spin_unlock(&hsudc->lock);
- return IRQ_HANDLED;
-}
-
-static int s3c_hsudc_start(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
-{
- struct s3c_hsudc *hsudc = to_hsudc(gadget);
- int ret;
-
- if (!driver
- || driver->max_speed < USB_SPEED_FULL
- || !driver->setup)
- return -EINVAL;
-
- if (!hsudc)
- return -ENODEV;
-
- if (hsudc->driver)
- return -EBUSY;
-
- hsudc->driver = driver;
-
- ret = regulator_bulk_enable(ARRAY_SIZE(hsudc->supplies),
- hsudc->supplies);
- if (ret != 0) {
- dev_err(hsudc->dev, "failed to enable supplies: %d\n", ret);
- goto err_supplies;
- }
-
- /* connect to bus through transceiver */
- if (!IS_ERR_OR_NULL(hsudc->transceiver)) {
- ret = otg_set_peripheral(hsudc->transceiver->otg,
- &hsudc->gadget);
- if (ret) {
- dev_err(hsudc->dev, "%s: can't bind to transceiver\n",
- hsudc->gadget.name);
- goto err_otg;
- }
- }
-
- enable_irq(hsudc->irq);
- s3c_hsudc_reconfig(hsudc);
-
- pm_runtime_get_sync(hsudc->dev);
-
- if (hsudc->pd->phy_init)
- hsudc->pd->phy_init();
- if (hsudc->pd->gpio_init)
- hsudc->pd->gpio_init();
-
- return 0;
-err_otg:
- regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
-err_supplies:
- hsudc->driver = NULL;
- return ret;
-}
-
-static int s3c_hsudc_stop(struct usb_gadget *gadget)
-{
- struct s3c_hsudc *hsudc = to_hsudc(gadget);
- unsigned long flags;
-
- if (!hsudc)
- return -ENODEV;
-
- spin_lock_irqsave(&hsudc->lock, flags);
- hsudc->gadget.speed = USB_SPEED_UNKNOWN;
- if (hsudc->pd->phy_uninit)
- hsudc->pd->phy_uninit();
-
- pm_runtime_put(hsudc->dev);
-
- if (hsudc->pd->gpio_uninit)
- hsudc->pd->gpio_uninit();
- s3c_hsudc_stop_activity(hsudc);
- spin_unlock_irqrestore(&hsudc->lock, flags);
-
- if (!IS_ERR_OR_NULL(hsudc->transceiver))
- (void) otg_set_peripheral(hsudc->transceiver->otg, NULL);
-
- disable_irq(hsudc->irq);
-
- regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
- hsudc->driver = NULL;
-
- return 0;
-}
-
-static inline u32 s3c_hsudc_read_frameno(struct s3c_hsudc *hsudc)
-{
- return readl(hsudc->regs + S3C_FNR) & 0x3FF;
-}
-
-static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget)
-{
- return s3c_hsudc_read_frameno(to_hsudc(gadget));
-}
-
-static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
-{
- struct s3c_hsudc *hsudc = to_hsudc(gadget);
-
- if (!hsudc)
- return -ENODEV;
-
- if (!IS_ERR_OR_NULL(hsudc->transceiver))
- return usb_phy_set_power(hsudc->transceiver, mA);
-
- return -EOPNOTSUPP;
-}
-
-static const struct usb_gadget_ops s3c_hsudc_gadget_ops = {
- .get_frame = s3c_hsudc_gadget_getframe,
- .udc_start = s3c_hsudc_start,
- .udc_stop = s3c_hsudc_stop,
- .vbus_draw = s3c_hsudc_vbus_draw,
-};
-
-static int s3c_hsudc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct s3c_hsudc *hsudc;
- struct s3c24xx_hsudc_platdata *pd = dev_get_platdata(&pdev->dev);
- int ret, i;
-
- hsudc = devm_kzalloc(&pdev->dev, struct_size(hsudc, ep, pd->epnum),
- GFP_KERNEL);
- if (!hsudc)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, dev);
- hsudc->dev = dev;
- hsudc->pd = dev_get_platdata(&pdev->dev);
-
- hsudc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
-
- for (i = 0; i < ARRAY_SIZE(hsudc->supplies); i++)
- hsudc->supplies[i].supply = s3c_hsudc_supply_names[i];
-
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies),
- hsudc->supplies);
- if (ret != 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to request supplies: %d\n", ret);
- goto err_supplies;
- }
-
- hsudc->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(hsudc->regs)) {
- ret = PTR_ERR(hsudc->regs);
- goto err_res;
- }
-
- spin_lock_init(&hsudc->lock);
-
- hsudc->gadget.max_speed = USB_SPEED_HIGH;
- hsudc->gadget.ops = &s3c_hsudc_gadget_ops;
- hsudc->gadget.name = dev_name(dev);
- hsudc->gadget.ep0 = &hsudc->ep[0].ep;
- hsudc->gadget.is_otg = 0;
- hsudc->gadget.is_a_peripheral = 0;
- hsudc->gadget.speed = USB_SPEED_UNKNOWN;
-
- s3c_hsudc_setup_ep(hsudc);
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- goto err_res;
- hsudc->irq = ret;
-
- ret = devm_request_irq(&pdev->dev, hsudc->irq, s3c_hsudc_irq, 0,
- driver_name, hsudc);
- if (ret < 0) {
- dev_err(dev, "irq request failed\n");
- goto err_res;
- }
-
- hsudc->uclk = devm_clk_get(&pdev->dev, "usb-device");
- if (IS_ERR(hsudc->uclk)) {
- dev_err(dev, "failed to find usb-device clock source\n");
- ret = PTR_ERR(hsudc->uclk);
- goto err_res;
- }
- clk_enable(hsudc->uclk);
-
- local_irq_disable();
-
- disable_irq(hsudc->irq);
- local_irq_enable();
-
- ret = usb_add_gadget_udc(&pdev->dev, &hsudc->gadget);
- if (ret)
- goto err_add_udc;
-
- pm_runtime_enable(dev);
-
- return 0;
-err_add_udc:
- clk_disable(hsudc->uclk);
-err_res:
- if (!IS_ERR_OR_NULL(hsudc->transceiver))
- usb_put_phy(hsudc->transceiver);
-
-err_supplies:
- return ret;
-}
-
-static struct platform_driver s3c_hsudc_driver = {
- .driver = {
- .name = "s3c-hsudc",
- },
- .probe = s3c_hsudc_probe,
-};
-
-module_platform_driver(s3c_hsudc_driver);
-
-MODULE_DESCRIPTION("Samsung S3C24XX USB high-speed controller driver");
-MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c-hsudc");
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
deleted file mode 100644
index 8c57b191e52b..000000000000
--- a/drivers/usb/gadget/udc/s3c2410_udc.c
+++ /dev/null
@@ -1,1980 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * linux/drivers/usb/gadget/s3c2410_udc.c
- *
- * Samsung S3C24xx series on-chip full speed USB device controllers
- *
- * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
- * Additional cleanups by Ben Dooks <ben-linux@fluff.org>
- */
-
-#define pr_fmt(fmt) "s3c2410_udc: " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/gpio/consumer.h>
-#include <linux/prefetch.h>
-#include <linux/io.h>
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include <linux/usb.h>
-#include <linux/usb/gadget.h>
-
-#include <asm/byteorder.h>
-#include <asm/irq.h>
-#include <asm/unaligned.h>
-
-#include <linux/platform_data/usb-s3c2410_udc.h>
-
-#include "s3c2410_udc.h"
-#include "s3c2410_udc_regs.h"
-
-#define DRIVER_DESC "S3C2410 USB Device Controller Gadget"
-#define DRIVER_AUTHOR "Herbert Pötzl <herbert@13thfloor.at>, " \
- "Arnaud Patard <arnaud.patard@rtp-net.org>"
-
-static const char gadget_name[] = "s3c2410_udc";
-static const char driver_desc[] = DRIVER_DESC;
-
-static struct s3c2410_udc *the_controller;
-static struct clk *udc_clock;
-static struct clk *usb_bus_clock;
-static void __iomem *base_addr;
-static int irq_usbd;
-static struct dentry *s3c2410_udc_debugfs_root;
-
-static inline u32 udc_read(u32 reg)
-{
- return readb(base_addr + reg);
-}
-
-static inline void udc_write(u32 value, u32 reg)
-{
- writeb(value, base_addr + reg);
-}
-
-static inline void udc_writeb(void __iomem *base, u32 value, u32 reg)
-{
- writeb(value, base + reg);
-}
-
-static struct s3c2410_udc_mach_info *udc_info;
-
-/*************************** DEBUG FUNCTION ***************************/
-#define DEBUG_NORMAL 1
-#define DEBUG_VERBOSE 2
-
-#ifdef CONFIG_USB_S3C2410_DEBUG
-#define USB_S3C2410_DEBUG_LEVEL 0
-
-static uint32_t s3c2410_ticks = 0;
-
-__printf(2, 3)
-static void dprintk(int level, const char *fmt, ...)
-{
- static long prevticks;
- static int invocation;
- struct va_format vaf;
- va_list args;
-
- if (level > USB_S3C2410_DEBUG_LEVEL)
- return;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- if (s3c2410_ticks != prevticks) {
- prevticks = s3c2410_ticks;
- invocation = 0;
- }
-
- pr_debug("%1lu.%02d USB: %pV", prevticks, invocation++, &vaf);
-
- va_end(args);
-}
-#else
-__printf(2, 3)
-static void dprintk(int level, const char *fmt, ...)
-{
-}
-#endif
-
-static int s3c2410_udc_debugfs_show(struct seq_file *m, void *p)
-{
- u32 addr_reg, pwr_reg, ep_int_reg, usb_int_reg;
- u32 ep_int_en_reg, usb_int_en_reg, ep0_csr;
- u32 ep1_i_csr1, ep1_i_csr2, ep1_o_csr1, ep1_o_csr2;
- u32 ep2_i_csr1, ep2_i_csr2, ep2_o_csr1, ep2_o_csr2;
-
- addr_reg = udc_read(S3C2410_UDC_FUNC_ADDR_REG);
- pwr_reg = udc_read(S3C2410_UDC_PWR_REG);
- ep_int_reg = udc_read(S3C2410_UDC_EP_INT_REG);
- usb_int_reg = udc_read(S3C2410_UDC_USB_INT_REG);
- ep_int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
- usb_int_en_reg = udc_read(S3C2410_UDC_USB_INT_EN_REG);
- udc_write(0, S3C2410_UDC_INDEX_REG);
- ep0_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
- udc_write(1, S3C2410_UDC_INDEX_REG);
- ep1_i_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
- ep1_i_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
- ep1_o_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
- ep1_o_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
- udc_write(2, S3C2410_UDC_INDEX_REG);
- ep2_i_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
- ep2_i_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
- ep2_o_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
- ep2_o_csr2 = udc_read(S3C2410_UDC_IN_CSR2_REG);
-
- seq_printf(m, "FUNC_ADDR_REG : 0x%04X\n"
- "PWR_REG : 0x%04X\n"
- "EP_INT_REG : 0x%04X\n"
- "USB_INT_REG : 0x%04X\n"
- "EP_INT_EN_REG : 0x%04X\n"
- "USB_INT_EN_REG : 0x%04X\n"
- "EP0_CSR : 0x%04X\n"
- "EP1_I_CSR1 : 0x%04X\n"
- "EP1_I_CSR2 : 0x%04X\n"
- "EP1_O_CSR1 : 0x%04X\n"
- "EP1_O_CSR2 : 0x%04X\n"
- "EP2_I_CSR1 : 0x%04X\n"
- "EP2_I_CSR2 : 0x%04X\n"
- "EP2_O_CSR1 : 0x%04X\n"
- "EP2_O_CSR2 : 0x%04X\n",
- addr_reg, pwr_reg, ep_int_reg, usb_int_reg,
- ep_int_en_reg, usb_int_en_reg, ep0_csr,
- ep1_i_csr1, ep1_i_csr2, ep1_o_csr1, ep1_o_csr2,
- ep2_i_csr1, ep2_i_csr2, ep2_o_csr1, ep2_o_csr2
- );
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(s3c2410_udc_debugfs);
-
-/* io macros */
-
-static inline void s3c2410_udc_clear_ep0_opr(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, S3C2410_UDC_EP0_CSR_SOPKTRDY,
- S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_clear_ep0_sst(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- writeb(0x00, base + S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_clear_ep0_se(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, S3C2410_UDC_EP0_CSR_SSE, S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_set_ep0_ipr(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, S3C2410_UDC_EP0_CSR_IPKRDY, S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_set_ep0_de(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, S3C2410_UDC_EP0_CSR_DE, S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_set_ep0_ss(void __iomem *b)
-{
- udc_writeb(b, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(b, S3C2410_UDC_EP0_CSR_SENDSTL, S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_set_ep0_de_out(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
-
- udc_writeb(base, (S3C2410_UDC_EP0_CSR_SOPKTRDY
- | S3C2410_UDC_EP0_CSR_DE),
- S3C2410_UDC_EP0_CSR_REG);
-}
-
-static inline void s3c2410_udc_set_ep0_de_in(void __iomem *base)
-{
- udc_writeb(base, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- udc_writeb(base, (S3C2410_UDC_EP0_CSR_IPKRDY
- | S3C2410_UDC_EP0_CSR_DE),
- S3C2410_UDC_EP0_CSR_REG);
-}
-
-/*------------------------- I/O ----------------------------------*/
-
-/*
- * s3c2410_udc_done
- */
-static void s3c2410_udc_done(struct s3c2410_ep *ep,
- struct s3c2410_request *req, int status)
-{
- unsigned halted = ep->halted;
-
- list_del_init(&req->queue);
-
- if (likely(req->req.status == -EINPROGRESS))
- req->req.status = status;
- else
- status = req->req.status;
-
- ep->halted = 1;
- usb_gadget_giveback_request(&ep->ep, &req->req);
- ep->halted = halted;
-}
-
-static void s3c2410_udc_nuke(struct s3c2410_udc *udc,
- struct s3c2410_ep *ep, int status)
-{
- while (!list_empty(&ep->queue)) {
- struct s3c2410_request *req;
- req = list_entry(ep->queue.next, struct s3c2410_request,
- queue);
- s3c2410_udc_done(ep, req, status);
- }
-}
-
-static inline int s3c2410_udc_fifo_count_out(void)
-{
- int tmp;
-
- tmp = udc_read(S3C2410_UDC_OUT_FIFO_CNT2_REG) << 8;
- tmp |= udc_read(S3C2410_UDC_OUT_FIFO_CNT1_REG);
- return tmp;
-}
-
-/*
- * s3c2410_udc_write_packet
- */
-static inline int s3c2410_udc_write_packet(int fifo,
- struct s3c2410_request *req,
- unsigned max)
-{
- unsigned len = min(req->req.length - req->req.actual, max);
- u8 *buf = req->req.buf + req->req.actual;
-
- prefetch(buf);
-
- dprintk(DEBUG_VERBOSE, "%s %d %d %d %d\n", __func__,
- req->req.actual, req->req.length, len, req->req.actual + len);
-
- req->req.actual += len;
-
- udelay(5);
- writesb(base_addr + fifo, buf, len);
- return len;
-}
-
-/*
- * s3c2410_udc_write_fifo
- *
- * return: 0 = still running, 1 = completed, negative = errno
- */
-static int s3c2410_udc_write_fifo(struct s3c2410_ep *ep,
- struct s3c2410_request *req)
-{
- unsigned count;
- int is_last;
- u32 idx;
- int fifo_reg;
- u32 ep_csr;
-
- idx = ep->bEndpointAddress & 0x7F;
- switch (idx) {
- default:
- idx = 0;
- fallthrough;
- case 0:
- fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
- break;
- case 1:
- fifo_reg = S3C2410_UDC_EP1_FIFO_REG;
- break;
- case 2:
- fifo_reg = S3C2410_UDC_EP2_FIFO_REG;
- break;
- case 3:
- fifo_reg = S3C2410_UDC_EP3_FIFO_REG;
- break;
- case 4:
- fifo_reg = S3C2410_UDC_EP4_FIFO_REG;
- break;
- }
-
- count = s3c2410_udc_write_packet(fifo_reg, req, ep->ep.maxpacket);
-
- /* last packet is often short (sometimes a zlp) */
- if (count != ep->ep.maxpacket)
- is_last = 1;
- else if (req->req.length != req->req.actual || req->req.zero)
- is_last = 0;
- else
- is_last = 2;
-
- /* Only ep0 debug messages are interesting */
- if (idx == 0)
- dprintk(DEBUG_NORMAL,
- "Written ep%d %d.%d of %d b [last %d,z %d]\n",
- idx, count, req->req.actual, req->req.length,
- is_last, req->req.zero);
-
- if (is_last) {
- /* The order is important. It prevents sending 2 packets
- * at the same time */
-
- if (idx == 0) {
- /* Reset signal => no need to say 'data sent' */
- if (!(udc_read(S3C2410_UDC_USB_INT_REG)
- & S3C2410_UDC_USBINT_RESET))
- s3c2410_udc_set_ep0_de_in(base_addr);
- ep->dev->ep0state = EP0_IDLE;
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY,
- S3C2410_UDC_IN_CSR1_REG);
- }
-
- s3c2410_udc_done(ep, req, 0);
- is_last = 1;
- } else {
- if (idx == 0) {
- /* Reset signal => no need to say 'data sent' */
- if (!(udc_read(S3C2410_UDC_USB_INT_REG)
- & S3C2410_UDC_USBINT_RESET))
- s3c2410_udc_set_ep0_ipr(base_addr);
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr | S3C2410_UDC_ICSR1_PKTRDY,
- S3C2410_UDC_IN_CSR1_REG);
- }
- }
-
- return is_last;
-}
-
-static inline int s3c2410_udc_read_packet(int fifo, u8 *buf,
- struct s3c2410_request *req, unsigned avail)
-{
- unsigned len;
-
- len = min(req->req.length - req->req.actual, avail);
- req->req.actual += len;
-
- readsb(fifo + base_addr, buf, len);
- return len;
-}
-
-/*
- * return: 0 = still running, 1 = queue empty, negative = errno
- */
-static int s3c2410_udc_read_fifo(struct s3c2410_ep *ep,
- struct s3c2410_request *req)
-{
- u8 *buf;
- u32 ep_csr;
- unsigned bufferspace;
- int is_last = 1;
- unsigned avail;
- int fifo_count = 0;
- u32 idx;
- int fifo_reg;
-
- idx = ep->bEndpointAddress & 0x7F;
-
- switch (idx) {
- default:
- idx = 0;
- fallthrough;
- case 0:
- fifo_reg = S3C2410_UDC_EP0_FIFO_REG;
- break;
- case 1:
- fifo_reg = S3C2410_UDC_EP1_FIFO_REG;
- break;
- case 2:
- fifo_reg = S3C2410_UDC_EP2_FIFO_REG;
- break;
- case 3:
- fifo_reg = S3C2410_UDC_EP3_FIFO_REG;
- break;
- case 4:
- fifo_reg = S3C2410_UDC_EP4_FIFO_REG;
- break;
- }
-
- if (!req->req.length)
- return 1;
-
- buf = req->req.buf + req->req.actual;
- bufferspace = req->req.length - req->req.actual;
- if (!bufferspace) {
- dprintk(DEBUG_NORMAL, "%s: buffer full!\n", __func__);
- return -1;
- }
-
- udc_write(idx, S3C2410_UDC_INDEX_REG);
-
- fifo_count = s3c2410_udc_fifo_count_out();
- dprintk(DEBUG_NORMAL, "%s fifo count : %d\n", __func__, fifo_count);
-
- if (fifo_count > ep->ep.maxpacket)
- avail = ep->ep.maxpacket;
- else
- avail = fifo_count;
-
- fifo_count = s3c2410_udc_read_packet(fifo_reg, buf, req, avail);
-
- /* checking this with ep0 is not accurate as we already
- * read a control request
- **/
- if (idx != 0 && fifo_count < ep->ep.maxpacket) {
- is_last = 1;
- /* overflowed this request? flush extra data */
- if (fifo_count != avail)
- req->req.status = -EOVERFLOW;
- } else {
- is_last = (req->req.length <= req->req.actual) ? 1 : 0;
- }
-
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- fifo_count = s3c2410_udc_fifo_count_out();
-
- /* Only ep0 debug messages are interesting */
- if (idx == 0)
- dprintk(DEBUG_VERBOSE, "%s fifo count : %d [last %d]\n",
- __func__, fifo_count, is_last);
-
- if (is_last) {
- if (idx == 0) {
- s3c2410_udc_set_ep0_de_out(base_addr);
- ep->dev->ep0state = EP0_IDLE;
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG);
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY,
- S3C2410_UDC_OUT_CSR1_REG);
- }
-
- s3c2410_udc_done(ep, req, 0);
- } else {
- if (idx == 0) {
- s3c2410_udc_clear_ep0_opr(base_addr);
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read(S3C2410_UDC_OUT_CSR1_REG);
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr & ~S3C2410_UDC_OCSR1_PKTRDY,
- S3C2410_UDC_OUT_CSR1_REG);
- }
- }
-
- return is_last;
-}
-
-static int s3c2410_udc_read_fifo_crq(struct usb_ctrlrequest *crq)
-{
- unsigned char *outbuf = (unsigned char *)crq;
- int bytes_read = 0;
-
- udc_write(0, S3C2410_UDC_INDEX_REG);
-
- bytes_read = s3c2410_udc_fifo_count_out();
-
- dprintk(DEBUG_NORMAL, "%s: fifo_count=%d\n", __func__, bytes_read);
-
- if (bytes_read > sizeof(struct usb_ctrlrequest))
- bytes_read = sizeof(struct usb_ctrlrequest);
-
- readsb(S3C2410_UDC_EP0_FIFO_REG + base_addr, outbuf, bytes_read);
-
- dprintk(DEBUG_VERBOSE, "%s: len=%d %02x:%02x {%x,%x,%x}\n", __func__,
- bytes_read, crq->bRequest, crq->bRequestType,
- crq->wValue, crq->wIndex, crq->wLength);
-
- return bytes_read;
-}
-
-static int s3c2410_udc_get_status(struct s3c2410_udc *dev,
- struct usb_ctrlrequest *crq)
-{
- u16 status = 0;
- u8 ep_num = crq->wIndex & 0x7F;
- u8 is_in = crq->wIndex & USB_DIR_IN;
-
- switch (crq->bRequestType & USB_RECIP_MASK) {
- case USB_RECIP_INTERFACE:
- break;
-
- case USB_RECIP_DEVICE:
- status = dev->devstatus;
- break;
-
- case USB_RECIP_ENDPOINT:
- if (ep_num > 4 || crq->wLength > 2)
- return 1;
-
- if (ep_num == 0) {
- udc_write(0, S3C2410_UDC_INDEX_REG);
- status = udc_read(S3C2410_UDC_IN_CSR1_REG);
- status = status & S3C2410_UDC_EP0_CSR_SENDSTL;
- } else {
- udc_write(ep_num, S3C2410_UDC_INDEX_REG);
- if (is_in) {
- status = udc_read(S3C2410_UDC_IN_CSR1_REG);
- status = status & S3C2410_UDC_ICSR1_SENDSTL;
- } else {
- status = udc_read(S3C2410_UDC_OUT_CSR1_REG);
- status = status & S3C2410_UDC_OCSR1_SENDSTL;
- }
- }
-
- status = status ? 1 : 0;
- break;
-
- default:
- return 1;
- }
-
- /* Seems to be needed to get it working. ouch :( */
- udelay(5);
- udc_write(status & 0xFF, S3C2410_UDC_EP0_FIFO_REG);
- udc_write(status >> 8, S3C2410_UDC_EP0_FIFO_REG);
- s3c2410_udc_set_ep0_de_in(base_addr);
-
- return 0;
-}
-/*------------------------- usb state machine -------------------------------*/
-static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value);
-
-static void s3c2410_udc_handle_ep0_idle(struct s3c2410_udc *dev,
- struct s3c2410_ep *ep,
- struct usb_ctrlrequest *crq,
- u32 ep0csr)
-{
- int len, ret, tmp;
-
- /* start control request? */
- if (!(ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY))
- return;
-
- s3c2410_udc_nuke(dev, ep, -EPROTO);
-
- len = s3c2410_udc_read_fifo_crq(crq);
- if (len != sizeof(*crq)) {
- dprintk(DEBUG_NORMAL, "setup begin: fifo READ ERROR"
- " wanted %d bytes got %d. Stalling out...\n",
- sizeof(*crq), len);
- s3c2410_udc_set_ep0_ss(base_addr);
- return;
- }
-
- dprintk(DEBUG_NORMAL, "bRequest = %d bRequestType %d wLength = %d\n",
- crq->bRequest, crq->bRequestType, crq->wLength);
-
- /* cope with automagic for some standard requests. */
- dev->req_std = (crq->bRequestType & USB_TYPE_MASK)
- == USB_TYPE_STANDARD;
- dev->req_config = 0;
- dev->req_pending = 1;
-
- switch (crq->bRequest) {
- case USB_REQ_SET_CONFIGURATION:
- dprintk(DEBUG_NORMAL, "USB_REQ_SET_CONFIGURATION ...\n");
-
- if (crq->bRequestType == USB_RECIP_DEVICE) {
- dev->req_config = 1;
- s3c2410_udc_set_ep0_de_out(base_addr);
- }
- break;
-
- case USB_REQ_SET_INTERFACE:
- dprintk(DEBUG_NORMAL, "USB_REQ_SET_INTERFACE ...\n");
-
- if (crq->bRequestType == USB_RECIP_INTERFACE) {
- dev->req_config = 1;
- s3c2410_udc_set_ep0_de_out(base_addr);
- }
- break;
-
- case USB_REQ_SET_ADDRESS:
- dprintk(DEBUG_NORMAL, "USB_REQ_SET_ADDRESS ...\n");
-
- if (crq->bRequestType == USB_RECIP_DEVICE) {
- tmp = crq->wValue & 0x7F;
- dev->address = tmp;
- udc_write((tmp | S3C2410_UDC_FUNCADDR_UPDATE),
- S3C2410_UDC_FUNC_ADDR_REG);
- s3c2410_udc_set_ep0_de_out(base_addr);
- return;
- }
- break;
-
- case USB_REQ_GET_STATUS:
- dprintk(DEBUG_NORMAL, "USB_REQ_GET_STATUS ...\n");
- s3c2410_udc_clear_ep0_opr(base_addr);
-
- if (dev->req_std) {
- if (!s3c2410_udc_get_status(dev, crq))
- return;
- }
- break;
-
- case USB_REQ_CLEAR_FEATURE:
- s3c2410_udc_clear_ep0_opr(base_addr);
-
- if (crq->bRequestType != USB_RECIP_ENDPOINT)
- break;
-
- if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0)
- break;
-
- s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 0);
- s3c2410_udc_set_ep0_de_out(base_addr);
- return;
-
- case USB_REQ_SET_FEATURE:
- s3c2410_udc_clear_ep0_opr(base_addr);
-
- if (crq->bRequestType != USB_RECIP_ENDPOINT)
- break;
-
- if (crq->wValue != USB_ENDPOINT_HALT || crq->wLength != 0)
- break;
-
- s3c2410_udc_set_halt(&dev->ep[crq->wIndex & 0x7f].ep, 1);
- s3c2410_udc_set_ep0_de_out(base_addr);
- return;
-
- default:
- s3c2410_udc_clear_ep0_opr(base_addr);
- break;
- }
-
- if (crq->bRequestType & USB_DIR_IN)
- dev->ep0state = EP0_IN_DATA_PHASE;
- else
- dev->ep0state = EP0_OUT_DATA_PHASE;
-
- if (!dev->driver)
- return;
-
- /* deliver the request to the gadget driver */
- ret = dev->driver->setup(&dev->gadget, crq);
- if (ret < 0) {
- if (dev->req_config) {
- dprintk(DEBUG_NORMAL, "config change %02x fail %d?\n",
- crq->bRequest, ret);
- return;
- }
-
- if (ret == -EOPNOTSUPP)
- dprintk(DEBUG_NORMAL, "Operation not supported\n");
- else
- dprintk(DEBUG_NORMAL,
- "dev->driver->setup failed. (%d)\n", ret);
-
- udelay(5);
- s3c2410_udc_set_ep0_ss(base_addr);
- s3c2410_udc_set_ep0_de_out(base_addr);
- dev->ep0state = EP0_IDLE;
- /* deferred i/o == no response yet */
- } else if (dev->req_pending) {
- dprintk(DEBUG_VERBOSE, "dev->req_pending... what now?\n");
- dev->req_pending = 0;
- }
-
- dprintk(DEBUG_VERBOSE, "ep0state %s\n", ep0states[dev->ep0state]);
-}
-
-static void s3c2410_udc_handle_ep0(struct s3c2410_udc *dev)
-{
- u32 ep0csr;
- struct s3c2410_ep *ep = &dev->ep[0];
- struct s3c2410_request *req;
- struct usb_ctrlrequest crq;
-
- if (list_empty(&ep->queue))
- req = NULL;
- else
- req = list_entry(ep->queue.next, struct s3c2410_request, queue);
-
- /* We make the assumption that S3C2410_UDC_IN_CSR1_REG equal to
- * S3C2410_UDC_EP0_CSR_REG when index is zero */
-
- udc_write(0, S3C2410_UDC_INDEX_REG);
- ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
-
- dprintk(DEBUG_NORMAL, "ep0csr %x ep0state %s\n",
- ep0csr, ep0states[dev->ep0state]);
-
- /* clear stall status */
- if (ep0csr & S3C2410_UDC_EP0_CSR_SENTSTL) {
- s3c2410_udc_nuke(dev, ep, -EPIPE);
- dprintk(DEBUG_NORMAL, "... clear SENT_STALL ...\n");
- s3c2410_udc_clear_ep0_sst(base_addr);
- dev->ep0state = EP0_IDLE;
- return;
- }
-
- /* clear setup end */
- if (ep0csr & S3C2410_UDC_EP0_CSR_SE) {
- dprintk(DEBUG_NORMAL, "... serviced SETUP_END ...\n");
- s3c2410_udc_nuke(dev, ep, 0);
- s3c2410_udc_clear_ep0_se(base_addr);
- dev->ep0state = EP0_IDLE;
- }
-
- switch (dev->ep0state) {
- case EP0_IDLE:
- s3c2410_udc_handle_ep0_idle(dev, ep, &crq, ep0csr);
- break;
-
- case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR etc */
- dprintk(DEBUG_NORMAL, "EP0_IN_DATA_PHASE ... what now?\n");
- if (!(ep0csr & S3C2410_UDC_EP0_CSR_IPKRDY) && req)
- s3c2410_udc_write_fifo(ep, req);
- break;
-
- case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR etc */
- dprintk(DEBUG_NORMAL, "EP0_OUT_DATA_PHASE ... what now?\n");
- if ((ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY) && req)
- s3c2410_udc_read_fifo(ep, req);
- break;
-
- case EP0_END_XFER:
- dprintk(DEBUG_NORMAL, "EP0_END_XFER ... what now?\n");
- dev->ep0state = EP0_IDLE;
- break;
-
- case EP0_STALL:
- dprintk(DEBUG_NORMAL, "EP0_STALL ... what now?\n");
- dev->ep0state = EP0_IDLE;
- break;
- }
-}
-
-/*
- * handle_ep - Manage I/O endpoints
- */
-
-static void s3c2410_udc_handle_ep(struct s3c2410_ep *ep)
-{
- struct s3c2410_request *req;
- int is_in = ep->bEndpointAddress & USB_DIR_IN;
- u32 ep_csr1;
- u32 idx;
-
- if (likely(!list_empty(&ep->queue)))
- req = list_entry(ep->queue.next,
- struct s3c2410_request, queue);
- else
- req = NULL;
-
- idx = ep->bEndpointAddress & 0x7F;
-
- if (is_in) {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr1 = udc_read(S3C2410_UDC_IN_CSR1_REG);
- dprintk(DEBUG_VERBOSE, "ep%01d write csr:%02x %d\n",
- idx, ep_csr1, req ? 1 : 0);
-
- if (ep_csr1 & S3C2410_UDC_ICSR1_SENTSTL) {
- dprintk(DEBUG_VERBOSE, "st\n");
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr1 & ~S3C2410_UDC_ICSR1_SENTSTL,
- S3C2410_UDC_IN_CSR1_REG);
- return;
- }
-
- if (!(ep_csr1 & S3C2410_UDC_ICSR1_PKTRDY) && req)
- s3c2410_udc_write_fifo(ep, req);
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr1 = udc_read(S3C2410_UDC_OUT_CSR1_REG);
- dprintk(DEBUG_VERBOSE, "ep%01d rd csr:%02x\n", idx, ep_csr1);
-
- if (ep_csr1 & S3C2410_UDC_OCSR1_SENTSTL) {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- udc_write(ep_csr1 & ~S3C2410_UDC_OCSR1_SENTSTL,
- S3C2410_UDC_OUT_CSR1_REG);
- return;
- }
-
- if ((ep_csr1 & S3C2410_UDC_OCSR1_PKTRDY) && req)
- s3c2410_udc_read_fifo(ep, req);
- }
-}
-
-/*
- * s3c2410_udc_irq - interrupt handler
- */
-static irqreturn_t s3c2410_udc_irq(int dummy, void *_dev)
-{
- struct s3c2410_udc *dev = _dev;
- int usb_status;
- int usbd_status;
- int pwr_reg;
- int ep0csr;
- int i;
- u32 idx, idx2;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->lock, flags);
-
- /* Driver connected ? */
- if (!dev->driver) {
- /* Clear interrupts */
- udc_write(udc_read(S3C2410_UDC_USB_INT_REG),
- S3C2410_UDC_USB_INT_REG);
- udc_write(udc_read(S3C2410_UDC_EP_INT_REG),
- S3C2410_UDC_EP_INT_REG);
- }
-
- /* Save index */
- idx = udc_read(S3C2410_UDC_INDEX_REG);
-
- /* Read status registers */
- usb_status = udc_read(S3C2410_UDC_USB_INT_REG);
- usbd_status = udc_read(S3C2410_UDC_EP_INT_REG);
- pwr_reg = udc_read(S3C2410_UDC_PWR_REG);
-
- udc_writeb(base_addr, S3C2410_UDC_INDEX_EP0, S3C2410_UDC_INDEX_REG);
- ep0csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
-
- dprintk(DEBUG_NORMAL, "usbs=%02x, usbds=%02x, pwr=%02x ep0csr=%02x\n",
- usb_status, usbd_status, pwr_reg, ep0csr);
-
- /*
- * Now, handle interrupts. There's two types :
- * - Reset, Resume, Suspend coming -> usb_int_reg
- * - EP -> ep_int_reg
- */
-
- /* RESET */
- if (usb_status & S3C2410_UDC_USBINT_RESET) {
- /* two kind of reset :
- * - reset start -> pwr reg = 8
- * - reset end -> pwr reg = 0
- **/
- dprintk(DEBUG_NORMAL, "USB reset csr %x pwr %x\n",
- ep0csr, pwr_reg);
-
- dev->gadget.speed = USB_SPEED_UNKNOWN;
- udc_write(0x00, S3C2410_UDC_INDEX_REG);
- udc_write((dev->ep[0].ep.maxpacket & 0x7ff) >> 3,
- S3C2410_UDC_MAXP_REG);
- dev->address = 0;
-
- dev->ep0state = EP0_IDLE;
- dev->gadget.speed = USB_SPEED_FULL;
-
- /* clear interrupt */
- udc_write(S3C2410_UDC_USBINT_RESET,
- S3C2410_UDC_USB_INT_REG);
-
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- spin_unlock_irqrestore(&dev->lock, flags);
- return IRQ_HANDLED;
- }
-
- /* RESUME */
- if (usb_status & S3C2410_UDC_USBINT_RESUME) {
- dprintk(DEBUG_NORMAL, "USB resume\n");
-
- /* clear interrupt */
- udc_write(S3C2410_UDC_USBINT_RESUME,
- S3C2410_UDC_USB_INT_REG);
-
- if (dev->gadget.speed != USB_SPEED_UNKNOWN
- && dev->driver
- && dev->driver->resume)
- dev->driver->resume(&dev->gadget);
- }
-
- /* SUSPEND */
- if (usb_status & S3C2410_UDC_USBINT_SUSPEND) {
- dprintk(DEBUG_NORMAL, "USB suspend\n");
-
- /* clear interrupt */
- udc_write(S3C2410_UDC_USBINT_SUSPEND,
- S3C2410_UDC_USB_INT_REG);
-
- if (dev->gadget.speed != USB_SPEED_UNKNOWN
- && dev->driver
- && dev->driver->suspend)
- dev->driver->suspend(&dev->gadget);
-
- dev->ep0state = EP0_IDLE;
- }
-
- /* EP */
- /* control traffic */
- /* check on ep0csr != 0 is not a good idea as clearing in_pkt_ready
- * generate an interrupt
- */
- if (usbd_status & S3C2410_UDC_INT_EP0) {
- dprintk(DEBUG_VERBOSE, "USB ep0 irq\n");
- /* Clear the interrupt bit by setting it to 1 */
- udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_REG);
- s3c2410_udc_handle_ep0(dev);
- }
-
- /* endpoint data transfers */
- for (i = 1; i < S3C2410_ENDPOINTS; i++) {
- u32 tmp = 1 << i;
- if (usbd_status & tmp) {
- dprintk(DEBUG_VERBOSE, "USB ep%d irq\n", i);
-
- /* Clear the interrupt bit by setting it to 1 */
- udc_write(tmp, S3C2410_UDC_EP_INT_REG);
- s3c2410_udc_handle_ep(&dev->ep[i]);
- }
- }
-
- /* what else causes this interrupt? a receive! who is it? */
- if (!usb_status && !usbd_status && !pwr_reg && !ep0csr) {
- for (i = 1; i < S3C2410_ENDPOINTS; i++) {
- idx2 = udc_read(S3C2410_UDC_INDEX_REG);
- udc_write(i, S3C2410_UDC_INDEX_REG);
-
- if (udc_read(S3C2410_UDC_OUT_CSR1_REG) & 0x1)
- s3c2410_udc_handle_ep(&dev->ep[i]);
-
- /* restore index */
- udc_write(idx2, S3C2410_UDC_INDEX_REG);
- }
- }
-
- dprintk(DEBUG_VERBOSE, "irq: %d s3c2410_udc_done.\n", irq_usbd);
-
- /* Restore old index */
- udc_write(idx, S3C2410_UDC_INDEX_REG);
-
- spin_unlock_irqrestore(&dev->lock, flags);
-
- return IRQ_HANDLED;
-}
-/*------------------------- s3c2410_ep_ops ----------------------------------*/
-
-static inline struct s3c2410_ep *to_s3c2410_ep(struct usb_ep *ep)
-{
- return container_of(ep, struct s3c2410_ep, ep);
-}
-
-static inline struct s3c2410_udc *to_s3c2410_udc(struct usb_gadget *gadget)
-{
- return container_of(gadget, struct s3c2410_udc, gadget);
-}
-
-static inline struct s3c2410_request *to_s3c2410_req(struct usb_request *req)
-{
- return container_of(req, struct s3c2410_request, req);
-}
-
-/*
- * s3c2410_udc_ep_enable
- */
-static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- struct s3c2410_udc *dev;
- struct s3c2410_ep *ep;
- u32 max, tmp;
- unsigned long flags;
- u32 csr1, csr2;
- u32 int_en_reg;
-
- ep = to_s3c2410_ep(_ep);
-
- if (!_ep || !desc
- || _ep->name == ep0name
- || desc->bDescriptorType != USB_DT_ENDPOINT)
- return -EINVAL;
-
- dev = ep->dev;
- if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
- return -ESHUTDOWN;
-
- max = usb_endpoint_maxp(desc);
-
- local_irq_save(flags);
- _ep->maxpacket = max;
- ep->ep.desc = desc;
- ep->halted = 0;
- ep->bEndpointAddress = desc->bEndpointAddress;
-
- /* set max packet */
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(max >> 3, S3C2410_UDC_MAXP_REG);
-
- /* set type, direction, address; reset fifo counters */
- if (desc->bEndpointAddress & USB_DIR_IN) {
- csr1 = S3C2410_UDC_ICSR1_FFLUSH|S3C2410_UDC_ICSR1_CLRDT;
- csr2 = S3C2410_UDC_ICSR2_MODEIN|S3C2410_UDC_ICSR2_DMAIEN;
-
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr1, S3C2410_UDC_IN_CSR1_REG);
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr2, S3C2410_UDC_IN_CSR2_REG);
- } else {
- /* don't flush in fifo or it will cause endpoint interrupt */
- csr1 = S3C2410_UDC_ICSR1_CLRDT;
- csr2 = S3C2410_UDC_ICSR2_DMAIEN;
-
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr1, S3C2410_UDC_IN_CSR1_REG);
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr2, S3C2410_UDC_IN_CSR2_REG);
-
- csr1 = S3C2410_UDC_OCSR1_FFLUSH | S3C2410_UDC_OCSR1_CLRDT;
- csr2 = S3C2410_UDC_OCSR2_DMAIEN;
-
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr1, S3C2410_UDC_OUT_CSR1_REG);
- udc_write(ep->num, S3C2410_UDC_INDEX_REG);
- udc_write(csr2, S3C2410_UDC_OUT_CSR2_REG);
- }
-
- /* enable irqs */
- int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
- udc_write(int_en_reg | (1 << ep->num), S3C2410_UDC_EP_INT_EN_REG);
-
- /* print some debug message */
- tmp = desc->bEndpointAddress;
- dprintk(DEBUG_NORMAL, "enable %s(%d) ep%x%s-blk max %02x\n",
- _ep->name, ep->num, tmp,
- desc->bEndpointAddress & USB_DIR_IN ? "in" : "out", max);
-
- local_irq_restore(flags);
- s3c2410_udc_set_halt(_ep, 0);
-
- return 0;
-}
-
-/*
- * s3c2410_udc_ep_disable
- */
-static int s3c2410_udc_ep_disable(struct usb_ep *_ep)
-{
- struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
- unsigned long flags;
- u32 int_en_reg;
-
- if (!_ep || !ep->ep.desc) {
- dprintk(DEBUG_NORMAL, "%s not enabled\n",
- _ep ? ep->ep.name : NULL);
- return -EINVAL;
- }
-
- local_irq_save(flags);
-
- dprintk(DEBUG_NORMAL, "ep_disable: %s\n", _ep->name);
-
- ep->ep.desc = NULL;
- ep->halted = 1;
-
- s3c2410_udc_nuke(ep->dev, ep, -ESHUTDOWN);
-
- /* disable irqs */
- int_en_reg = udc_read(S3C2410_UDC_EP_INT_EN_REG);
- udc_write(int_en_reg & ~(1<<ep->num), S3C2410_UDC_EP_INT_EN_REG);
-
- local_irq_restore(flags);
-
- dprintk(DEBUG_NORMAL, "%s disabled\n", _ep->name);
-
- return 0;
-}
-
-/*
- * s3c2410_udc_alloc_request
- */
-static struct usb_request *
-s3c2410_udc_alloc_request(struct usb_ep *_ep, gfp_t mem_flags)
-{
- struct s3c2410_request *req;
-
- dprintk(DEBUG_VERBOSE, "%s(%p,%d)\n", __func__, _ep, mem_flags);
-
- if (!_ep)
- return NULL;
-
- req = kzalloc(sizeof(struct s3c2410_request), mem_flags);
- if (!req)
- return NULL;
-
- INIT_LIST_HEAD(&req->queue);
- return &req->req;
-}
-
-/*
- * s3c2410_udc_free_request
- */
-static void
-s3c2410_udc_free_request(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
- struct s3c2410_request *req = to_s3c2410_req(_req);
-
- dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req);
-
- if (!ep || !_req || (!ep->ep.desc && _ep->name != ep0name))
- return;
-
- WARN_ON(!list_empty(&req->queue));
- kfree(req);
-}
-
-/*
- * s3c2410_udc_queue
- */
-static int s3c2410_udc_queue(struct usb_ep *_ep, struct usb_request *_req,
- gfp_t gfp_flags)
-{
- struct s3c2410_request *req = to_s3c2410_req(_req);
- struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
- struct s3c2410_udc *dev;
- u32 ep_csr = 0;
- int fifo_count = 0;
- unsigned long flags;
-
- if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) {
- dprintk(DEBUG_NORMAL, "%s: invalid args\n", __func__);
- return -EINVAL;
- }
-
- dev = ep->dev;
- if (unlikely(!dev->driver
- || dev->gadget.speed == USB_SPEED_UNKNOWN)) {
- return -ESHUTDOWN;
- }
-
- local_irq_save(flags);
-
- if (unlikely(!_req || !_req->complete
- || !_req->buf || !list_empty(&req->queue))) {
- if (!_req)
- dprintk(DEBUG_NORMAL, "%s: 1 X X X\n", __func__);
- else {
- dprintk(DEBUG_NORMAL, "%s: 0 %01d %01d %01d\n",
- __func__, !_req->complete, !_req->buf,
- !list_empty(&req->queue));
- }
-
- local_irq_restore(flags);
- return -EINVAL;
- }
-
- _req->status = -EINPROGRESS;
- _req->actual = 0;
-
- dprintk(DEBUG_VERBOSE, "%s: ep%x len %d\n",
- __func__, ep->bEndpointAddress, _req->length);
-
- if (ep->bEndpointAddress) {
- udc_write(ep->bEndpointAddress & 0x7F, S3C2410_UDC_INDEX_REG);
-
- ep_csr = udc_read((ep->bEndpointAddress & USB_DIR_IN)
- ? S3C2410_UDC_IN_CSR1_REG
- : S3C2410_UDC_OUT_CSR1_REG);
- fifo_count = s3c2410_udc_fifo_count_out();
- } else {
- udc_write(0, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read(S3C2410_UDC_IN_CSR1_REG);
- fifo_count = s3c2410_udc_fifo_count_out();
- }
-
- /* kickstart this i/o queue? */
- if (list_empty(&ep->queue) && !ep->halted) {
- if (ep->bEndpointAddress == 0 /* ep0 */) {
- switch (dev->ep0state) {
- case EP0_IN_DATA_PHASE:
- if (!(ep_csr&S3C2410_UDC_EP0_CSR_IPKRDY)
- && s3c2410_udc_write_fifo(ep,
- req)) {
- dev->ep0state = EP0_IDLE;
- req = NULL;
- }
- break;
-
- case EP0_OUT_DATA_PHASE:
- if ((!_req->length)
- || ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY)
- && s3c2410_udc_read_fifo(ep,
- req))) {
- dev->ep0state = EP0_IDLE;
- req = NULL;
- }
- break;
-
- default:
- local_irq_restore(flags);
- return -EL2HLT;
- }
- } else if ((ep->bEndpointAddress & USB_DIR_IN) != 0
- && (!(ep_csr&S3C2410_UDC_OCSR1_PKTRDY))
- && s3c2410_udc_write_fifo(ep, req)) {
- req = NULL;
- } else if ((ep_csr & S3C2410_UDC_OCSR1_PKTRDY)
- && fifo_count
- && s3c2410_udc_read_fifo(ep, req)) {
- req = NULL;
- }
- }
-
- /* pio or dma irq handler advances the queue. */
- if (likely(req))
- list_add_tail(&req->queue, &ep->queue);
-
- local_irq_restore(flags);
-
- dprintk(DEBUG_VERBOSE, "%s ok\n", __func__);
- return 0;
-}
-
-/*
- * s3c2410_udc_dequeue
- */
-static int s3c2410_udc_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
- int retval = -EINVAL;
- unsigned long flags;
- struct s3c2410_request *req = NULL, *iter;
-
- dprintk(DEBUG_VERBOSE, "%s(%p,%p)\n", __func__, _ep, _req);
-
- if (!the_controller->driver)
- return -ESHUTDOWN;
-
- if (!_ep || !_req)
- return retval;
-
- local_irq_save(flags);
-
- list_for_each_entry(iter, &ep->queue, queue) {
- if (&iter->req != _req)
- continue;
- list_del_init(&iter->queue);
- _req->status = -ECONNRESET;
- req = iter;
- retval = 0;
- break;
- }
-
- if (retval == 0) {
- dprintk(DEBUG_VERBOSE,
- "dequeued req %p from %s, len %d buf %p\n",
- req, _ep->name, _req->length, _req->buf);
-
- s3c2410_udc_done(ep, req, -ECONNRESET);
- }
-
- local_irq_restore(flags);
- return retval;
-}
-
-/*
- * s3c2410_udc_set_halt
- */
-static int s3c2410_udc_set_halt(struct usb_ep *_ep, int value)
-{
- struct s3c2410_ep *ep = to_s3c2410_ep(_ep);
- u32 ep_csr = 0;
- unsigned long flags;
- u32 idx;
-
- if (unlikely(!_ep || (!ep->ep.desc && ep->ep.name != ep0name))) {
- dprintk(DEBUG_NORMAL, "%s: inval 2\n", __func__);
- return -EINVAL;
- }
-
- local_irq_save(flags);
-
- idx = ep->bEndpointAddress & 0x7F;
-
- if (idx == 0) {
- s3c2410_udc_set_ep0_ss(base_addr);
- s3c2410_udc_set_ep0_de_out(base_addr);
- } else {
- udc_write(idx, S3C2410_UDC_INDEX_REG);
- ep_csr = udc_read((ep->bEndpointAddress & USB_DIR_IN)
- ? S3C2410_UDC_IN_CSR1_REG
- : S3C2410_UDC_OUT_CSR1_REG);
-
- if ((ep->bEndpointAddress & USB_DIR_IN) != 0) {
- if (value)
- udc_write(ep_csr | S3C2410_UDC_ICSR1_SENDSTL,
- S3C2410_UDC_IN_CSR1_REG);
- else {
- ep_csr &= ~S3C2410_UDC_ICSR1_SENDSTL;
- udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG);
- ep_csr |= S3C2410_UDC_ICSR1_CLRDT;
- udc_write(ep_csr, S3C2410_UDC_IN_CSR1_REG);
- }
- } else {
- if (value)
- udc_write(ep_csr | S3C2410_UDC_OCSR1_SENDSTL,
- S3C2410_UDC_OUT_CSR1_REG);
- else {
- ep_csr &= ~S3C2410_UDC_OCSR1_SENDSTL;
- udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG);
- ep_csr |= S3C2410_UDC_OCSR1_CLRDT;
- udc_write(ep_csr, S3C2410_UDC_OUT_CSR1_REG);
- }
- }
- }
-
- ep->halted = value ? 1 : 0;
- local_irq_restore(flags);
-
- return 0;
-}
-
-static const struct usb_ep_ops s3c2410_ep_ops = {
- .enable = s3c2410_udc_ep_enable,
- .disable = s3c2410_udc_ep_disable,
-
- .alloc_request = s3c2410_udc_alloc_request,
- .free_request = s3c2410_udc_free_request,
-
- .queue = s3c2410_udc_queue,
- .dequeue = s3c2410_udc_dequeue,
-
- .set_halt = s3c2410_udc_set_halt,
-};
-
-/*------------------------- usb_gadget_ops ----------------------------------*/
-
-/*
- * s3c2410_udc_get_frame
- */
-static int s3c2410_udc_get_frame(struct usb_gadget *_gadget)
-{
- int tmp;
-
- dprintk(DEBUG_VERBOSE, "%s()\n", __func__);
-
- tmp = udc_read(S3C2410_UDC_FRAME_NUM2_REG) << 8;
- tmp |= udc_read(S3C2410_UDC_FRAME_NUM1_REG);
- return tmp;
-}
-
-/*
- * s3c2410_udc_wakeup
- */
-static int s3c2410_udc_wakeup(struct usb_gadget *_gadget)
-{
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
- return 0;
-}
-
-/*
- * s3c2410_udc_set_selfpowered
- */
-static int s3c2410_udc_set_selfpowered(struct usb_gadget *gadget, int value)
-{
- struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
-
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- gadget->is_selfpowered = (value != 0);
- if (value)
- udc->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
- else
- udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
-
- return 0;
-}
-
-static void s3c2410_udc_disable(struct s3c2410_udc *dev);
-static void s3c2410_udc_enable(struct s3c2410_udc *dev);
-
-static int s3c2410_udc_set_pullup(struct s3c2410_udc *udc, int is_on)
-{
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- if (udc_info && (udc_info->udc_command || udc->pullup_gpiod)) {
-
- if (is_on)
- s3c2410_udc_enable(udc);
- else {
- if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
- if (udc->driver && udc->driver->disconnect)
- udc->driver->disconnect(&udc->gadget);
-
- }
- s3c2410_udc_disable(udc);
- }
- } else {
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int s3c2410_udc_vbus_session(struct usb_gadget *gadget, int is_active)
-{
- struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
-
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- udc->vbus = (is_active != 0);
- s3c2410_udc_set_pullup(udc, is_active);
- return 0;
-}
-
-static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on)
-{
- struct s3c2410_udc *udc = to_s3c2410_udc(gadget);
-
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- s3c2410_udc_set_pullup(udc, is_on);
- return 0;
-}
-
-static irqreturn_t s3c2410_udc_vbus_irq(int irq, void *_dev)
-{
- struct s3c2410_udc *dev = _dev;
- unsigned int value;
-
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- value = gpiod_get_value(dev->vbus_gpiod);
-
- if (value != dev->vbus)
- s3c2410_udc_vbus_session(&dev->gadget, value);
-
- return IRQ_HANDLED;
-}
-
-static int s3c2410_vbus_draw(struct usb_gadget *_gadget, unsigned ma)
-{
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- if (udc_info && udc_info->vbus_draw) {
- udc_info->vbus_draw(ma);
- return 0;
- }
-
- return -ENOTSUPP;
-}
-
-static int s3c2410_udc_start(struct usb_gadget *g,
- struct usb_gadget_driver *driver);
-static int s3c2410_udc_stop(struct usb_gadget *g);
-
-static const struct usb_gadget_ops s3c2410_ops = {
- .get_frame = s3c2410_udc_get_frame,
- .wakeup = s3c2410_udc_wakeup,
- .set_selfpowered = s3c2410_udc_set_selfpowered,
- .pullup = s3c2410_udc_pullup,
- .vbus_session = s3c2410_udc_vbus_session,
- .vbus_draw = s3c2410_vbus_draw,
- .udc_start = s3c2410_udc_start,
- .udc_stop = s3c2410_udc_stop,
-};
-
-static void s3c2410_udc_command(struct s3c2410_udc *udc,
- enum s3c2410_udc_cmd_e cmd)
-{
- if (!udc_info)
- return;
-
- if (udc_info->udc_command) {
- udc_info->udc_command(cmd);
- } else if (udc->pullup_gpiod) {
- int value;
-
- switch (cmd) {
- case S3C2410_UDC_P_ENABLE:
- value = 1;
- break;
- case S3C2410_UDC_P_DISABLE:
- value = 0;
- break;
- default:
- return;
- }
-
- gpiod_set_value(udc->pullup_gpiod, value);
- }
-}
-
-/*------------------------- gadget driver handling---------------------------*/
-/*
- * s3c2410_udc_disable
- */
-static void s3c2410_udc_disable(struct s3c2410_udc *dev)
-{
- dprintk(DEBUG_NORMAL, "%s()\n", __func__);
-
- /* Disable all interrupts */
- udc_write(0x00, S3C2410_UDC_USB_INT_EN_REG);
- udc_write(0x00, S3C2410_UDC_EP_INT_EN_REG);
-
- /* Clear the interrupt registers */
- udc_write(S3C2410_UDC_USBINT_RESET
- | S3C2410_UDC_USBINT_RESUME
- | S3C2410_UDC_USBINT_SUSPEND,
- S3C2410_UDC_USB_INT_REG);
-
- udc_write(0x1F, S3C2410_UDC_EP_INT_REG);
-
- /* Good bye, cruel world */
- s3c2410_udc_command(dev, S3C2410_UDC_P_DISABLE);
-
- /* Set speed to unknown */
- dev->gadget.speed = USB_SPEED_UNKNOWN;
-}
-
-/*
- * s3c2410_udc_reinit
- */
-static void s3c2410_udc_reinit(struct s3c2410_udc *dev)
-{
- u32 i;
-
- /* device/ep0 records init */
- INIT_LIST_HEAD(&dev->gadget.ep_list);
- INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
- dev->ep0state = EP0_IDLE;
-
- for (i = 0; i < S3C2410_ENDPOINTS; i++) {
- struct s3c2410_ep *ep = &dev->ep[i];
-
- if (i != 0)
- list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list);
-
- ep->dev = dev;
- ep->ep.desc = NULL;
- ep->halted = 0;
- INIT_LIST_HEAD(&ep->queue);
- usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket);
- }
-}
-
-/*
- * s3c2410_udc_enable
- */
-static void s3c2410_udc_enable(struct s3c2410_udc *dev)
-{
- int i;
-
- dprintk(DEBUG_NORMAL, "s3c2410_udc_enable called\n");
-
- /* dev->gadget.speed = USB_SPEED_UNKNOWN; */
- dev->gadget.speed = USB_SPEED_FULL;
-
- /* Set MAXP for all endpoints */
- for (i = 0; i < S3C2410_ENDPOINTS; i++) {
- udc_write(i, S3C2410_UDC_INDEX_REG);
- udc_write((dev->ep[i].ep.maxpacket & 0x7ff) >> 3,
- S3C2410_UDC_MAXP_REG);
- }
-
- /* Set default power state */
- udc_write(DEFAULT_POWER_STATE, S3C2410_UDC_PWR_REG);
-
- /* Enable reset and suspend interrupt interrupts */
- udc_write(S3C2410_UDC_USBINT_RESET | S3C2410_UDC_USBINT_SUSPEND,
- S3C2410_UDC_USB_INT_EN_REG);
-
- /* Enable ep0 interrupt */
- udc_write(S3C2410_UDC_INT_EP0, S3C2410_UDC_EP_INT_EN_REG);
-
- /* time to say "hello, world" */
- s3c2410_udc_command(dev, S3C2410_UDC_P_ENABLE);
-}
-
-static int s3c2410_udc_start(struct usb_gadget *g,
- struct usb_gadget_driver *driver)
-{
- struct s3c2410_udc *udc = to_s3c2410(g);
-
- dprintk(DEBUG_NORMAL, "%s() '%s'\n", __func__, driver->driver.name);
-
- /* Hook the driver */
- udc->driver = driver;
-
- /* Enable udc */
- s3c2410_udc_enable(udc);
-
- return 0;
-}
-
-static int s3c2410_udc_stop(struct usb_gadget *g)
-{
- struct s3c2410_udc *udc = to_s3c2410(g);
-
- udc->driver = NULL;
-
- /* Disable udc */
- s3c2410_udc_disable(udc);
-
- return 0;
-}
-
-/*---------------------------------------------------------------------------*/
-static struct s3c2410_udc memory = {
- .gadget = {
- .ops = &s3c2410_ops,
- .ep0 = &memory.ep[0].ep,
- .name = gadget_name,
- .dev = {
- .init_name = "gadget",
- },
- },
-
- /* control endpoint */
- .ep[0] = {
- .num = 0,
- .ep = {
- .name = ep0name,
- .ops = &s3c2410_ep_ops,
- .maxpacket = EP0_FIFO_SIZE,
- .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL,
- USB_EP_CAPS_DIR_ALL),
- },
- .dev = &memory,
- },
-
- /* first group of endpoints */
- .ep[1] = {
- .num = 1,
- .ep = {
- .name = "ep1-bulk",
- .ops = &s3c2410_ep_ops,
- .maxpacket = EP_FIFO_SIZE,
- .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
- USB_EP_CAPS_DIR_ALL),
- },
- .dev = &memory,
- .fifo_size = EP_FIFO_SIZE,
- .bEndpointAddress = 1,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- },
- .ep[2] = {
- .num = 2,
- .ep = {
- .name = "ep2-bulk",
- .ops = &s3c2410_ep_ops,
- .maxpacket = EP_FIFO_SIZE,
- .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
- USB_EP_CAPS_DIR_ALL),
- },
- .dev = &memory,
- .fifo_size = EP_FIFO_SIZE,
- .bEndpointAddress = 2,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- },
- .ep[3] = {
- .num = 3,
- .ep = {
- .name = "ep3-bulk",
- .ops = &s3c2410_ep_ops,
- .maxpacket = EP_FIFO_SIZE,
- .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
- USB_EP_CAPS_DIR_ALL),
- },
- .dev = &memory,
- .fifo_size = EP_FIFO_SIZE,
- .bEndpointAddress = 3,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- },
- .ep[4] = {
- .num = 4,
- .ep = {
- .name = "ep4-bulk",
- .ops = &s3c2410_ep_ops,
- .maxpacket = EP_FIFO_SIZE,
- .caps = USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK,
- USB_EP_CAPS_DIR_ALL),
- },
- .dev = &memory,
- .fifo_size = EP_FIFO_SIZE,
- .bEndpointAddress = 4,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- }
-
-};
-
-/*
- * probe - binds to the platform device
- */
-static int s3c2410_udc_probe(struct platform_device *pdev)
-{
- struct s3c2410_udc *udc = &memory;
- struct device *dev = &pdev->dev;
- int retval;
- int irq;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- usb_bus_clock = clk_get(NULL, "usb-bus-gadget");
- if (IS_ERR(usb_bus_clock)) {
- dev_err(dev, "failed to get usb bus clock source\n");
- return PTR_ERR(usb_bus_clock);
- }
-
- clk_prepare_enable(usb_bus_clock);
-
- udc_clock = clk_get(NULL, "usb-device");
- if (IS_ERR(udc_clock)) {
- dev_err(dev, "failed to get udc clock source\n");
- retval = PTR_ERR(udc_clock);
- goto err_usb_bus_clk;
- }
-
- clk_prepare_enable(udc_clock);
-
- mdelay(10);
-
- dev_dbg(dev, "got and enabled clocks\n");
-
- if (strncmp(pdev->name, "s3c2440", 7) == 0) {
- dev_info(dev, "S3C2440: increasing FIFO to 128 bytes\n");
- memory.ep[1].fifo_size = S3C2440_EP_FIFO_SIZE;
- memory.ep[2].fifo_size = S3C2440_EP_FIFO_SIZE;
- memory.ep[3].fifo_size = S3C2440_EP_FIFO_SIZE;
- memory.ep[4].fifo_size = S3C2440_EP_FIFO_SIZE;
- }
-
- spin_lock_init(&udc->lock);
- udc_info = dev_get_platdata(&pdev->dev);
-
- base_addr = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base_addr)) {
- retval = PTR_ERR(base_addr);
- goto err_udc_clk;
- }
-
- the_controller = udc;
- platform_set_drvdata(pdev, udc);
-
- s3c2410_udc_disable(udc);
- s3c2410_udc_reinit(udc);
-
- irq_usbd = platform_get_irq(pdev, 0);
- if (irq_usbd < 0) {
- retval = irq_usbd;
- goto err_udc_clk;
- }
-
- /* irq setup after old hardware state is cleaned up */
- retval = request_irq(irq_usbd, s3c2410_udc_irq,
- 0, gadget_name, udc);
-
- if (retval != 0) {
- dev_err(dev, "cannot get irq %i, err %d\n", irq_usbd, retval);
- retval = -EBUSY;
- goto err_udc_clk;
- }
-
- dev_dbg(dev, "got irq %i\n", irq_usbd);
-
- udc->vbus_gpiod = gpiod_get_optional(dev, "vbus", GPIOD_IN);
- if (IS_ERR(udc->vbus_gpiod)) {
- retval = PTR_ERR(udc->vbus_gpiod);
- goto err_int;
- }
- if (udc->vbus_gpiod) {
- gpiod_set_consumer_name(udc->vbus_gpiod, "udc vbus");
-
- irq = gpiod_to_irq(udc->vbus_gpiod);
- if (irq < 0) {
- dev_err(dev, "no irq for gpio vbus pin\n");
- retval = irq;
- goto err_gpio_claim;
- }
-
- retval = request_irq(irq, s3c2410_udc_vbus_irq,
- IRQF_TRIGGER_RISING
- | IRQF_TRIGGER_FALLING | IRQF_SHARED,
- gadget_name, udc);
-
- if (retval != 0) {
- dev_err(dev, "can't get vbus irq %d, err %d\n",
- irq, retval);
- retval = -EBUSY;
- goto err_gpio_claim;
- }
-
- dev_dbg(dev, "got irq %i\n", irq);
- } else {
- udc->vbus = 1;
- }
-
- udc->pullup_gpiod = gpiod_get_optional(dev, "pullup", GPIOD_OUT_LOW);
- if (IS_ERR(udc->pullup_gpiod)) {
- retval = PTR_ERR(udc->pullup_gpiod);
- goto err_vbus_irq;
- }
- gpiod_set_consumer_name(udc->pullup_gpiod, "udc pullup");
-
- retval = usb_add_gadget_udc(&pdev->dev, &udc->gadget);
- if (retval)
- goto err_add_udc;
-
- debugfs_create_file("registers", S_IRUGO, s3c2410_udc_debugfs_root, udc,
- &s3c2410_udc_debugfs_fops);
-
- dev_dbg(dev, "probe ok\n");
-
- return 0;
-
-err_add_udc:
-err_vbus_irq:
- if (udc->vbus_gpiod)
- free_irq(gpiod_to_irq(udc->vbus_gpiod), udc);
-err_gpio_claim:
-err_int:
- free_irq(irq_usbd, udc);
-err_udc_clk:
- clk_disable_unprepare(udc_clock);
- clk_put(udc_clock);
- udc_clock = NULL;
-err_usb_bus_clk:
- clk_disable_unprepare(usb_bus_clock);
- clk_put(usb_bus_clock);
- usb_bus_clock = NULL;
-
- return retval;
-}
-
-/*
- * s3c2410_udc_remove
- */
-static int s3c2410_udc_remove(struct platform_device *pdev)
-{
- struct s3c2410_udc *udc = platform_get_drvdata(pdev);
-
- dev_dbg(&pdev->dev, "%s()\n", __func__);
-
- if (udc->driver)
- return -EBUSY;
-
- usb_del_gadget_udc(&udc->gadget);
- debugfs_remove(debugfs_lookup("registers", s3c2410_udc_debugfs_root));
-
- if (udc->vbus_gpiod)
- free_irq(gpiod_to_irq(udc->vbus_gpiod), udc);
-
- free_irq(irq_usbd, udc);
-
- if (!IS_ERR(udc_clock) && udc_clock != NULL) {
- clk_disable_unprepare(udc_clock);
- clk_put(udc_clock);
- udc_clock = NULL;
- }
-
- if (!IS_ERR(usb_bus_clock) && usb_bus_clock != NULL) {
- clk_disable_unprepare(usb_bus_clock);
- clk_put(usb_bus_clock);
- usb_bus_clock = NULL;
- }
-
- dev_dbg(&pdev->dev, "%s: remove ok\n", __func__);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int
-s3c2410_udc_suspend(struct platform_device *pdev, pm_message_t message)
-{
- struct s3c2410_udc *udc = platform_get_drvdata(pdev);
-
- s3c2410_udc_command(udc, S3C2410_UDC_P_DISABLE);
-
- return 0;
-}
-
-static int s3c2410_udc_resume(struct platform_device *pdev)
-{
- struct s3c2410_udc *udc = platform_get_drvdata(pdev);
-
- s3c2410_udc_command(udc, S3C2410_UDC_P_ENABLE);
-
- return 0;
-}
-#else
-#define s3c2410_udc_suspend NULL
-#define s3c2410_udc_resume NULL
-#endif
-
-static const struct platform_device_id s3c_udc_ids[] = {
- { "s3c2410-usbgadget", },
- { "s3c2440-usbgadget", },
- { }
-};
-MODULE_DEVICE_TABLE(platform, s3c_udc_ids);
-
-static struct platform_driver udc_driver_24x0 = {
- .driver = {
- .name = "s3c24x0-usbgadget",
- },
- .probe = s3c2410_udc_probe,
- .remove = s3c2410_udc_remove,
- .suspend = s3c2410_udc_suspend,
- .resume = s3c2410_udc_resume,
- .id_table = s3c_udc_ids,
-};
-
-static int __init udc_init(void)
-{
- int retval;
-
- dprintk(DEBUG_NORMAL, "%s\n", gadget_name);
-
- s3c2410_udc_debugfs_root = debugfs_create_dir(gadget_name,
- usb_debug_root);
-
- retval = platform_driver_register(&udc_driver_24x0);
- if (retval)
- goto err;
-
- return 0;
-
-err:
- debugfs_remove(s3c2410_udc_debugfs_root);
- return retval;
-}
-
-static void __exit udc_exit(void)
-{
- platform_driver_unregister(&udc_driver_24x0);
- debugfs_remove_recursive(s3c2410_udc_debugfs_root);
-}
-
-module_init(udc_init);
-module_exit(udc_exit);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/udc/s3c2410_udc.h b/drivers/usb/gadget/udc/s3c2410_udc.h
deleted file mode 100644
index cdbf202e5ee8..000000000000
--- a/drivers/usb/gadget/udc/s3c2410_udc.h
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * linux/drivers/usb/gadget/s3c2410_udc.h
- * Samsung on-chip full speed USB device controllers
- *
- * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
- * Additional cleanups by Ben Dooks <ben-linux@fluff.org>
- */
-
-#ifndef _S3C2410_UDC_H
-#define _S3C2410_UDC_H
-
-struct s3c2410_ep {
- struct list_head queue;
- unsigned long last_io; /* jiffies timestamp */
- struct usb_gadget *gadget;
- struct s3c2410_udc *dev;
- struct usb_ep ep;
- u8 num;
-
- unsigned short fifo_size;
- u8 bEndpointAddress;
- u8 bmAttributes;
-
- unsigned halted : 1;
- unsigned already_seen : 1;
- unsigned setup_stage : 1;
-};
-
-
-/* Warning : ep0 has a fifo of 16 bytes */
-/* Don't try to set 32 or 64 */
-/* also testusb 14 fails wit 16 but is */
-/* fine with 8 */
-#define EP0_FIFO_SIZE 8
-#define EP_FIFO_SIZE 64
-#define DEFAULT_POWER_STATE 0x00
-
-#define S3C2440_EP_FIFO_SIZE 128
-
-static const char ep0name [] = "ep0";
-
-static const char *const ep_name[] = {
- ep0name, /* everyone has ep0 */
- /* s3c2410 four bidirectional bulk endpoints */
- "ep1-bulk", "ep2-bulk", "ep3-bulk", "ep4-bulk",
-};
-
-#define S3C2410_ENDPOINTS ARRAY_SIZE(ep_name)
-
-struct s3c2410_request {
- struct list_head queue; /* ep's requests */
- struct usb_request req;
-};
-
-enum ep0_state {
- EP0_IDLE,
- EP0_IN_DATA_PHASE,
- EP0_OUT_DATA_PHASE,
- EP0_END_XFER,
- EP0_STALL,
-};
-
-static const char *ep0states[]= {
- "EP0_IDLE",
- "EP0_IN_DATA_PHASE",
- "EP0_OUT_DATA_PHASE",
- "EP0_END_XFER",
- "EP0_STALL",
-};
-
-struct s3c2410_udc {
- spinlock_t lock;
-
- struct s3c2410_ep ep[S3C2410_ENDPOINTS];
- int address;
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
- struct s3c2410_request fifo_req;
- u8 fifo_buf[EP_FIFO_SIZE];
- u16 devstatus;
-
- u32 port_status;
- int ep0state;
-
- struct gpio_desc *vbus_gpiod;
- struct gpio_desc *pullup_gpiod;
-
- unsigned got_irq : 1;
-
- unsigned req_std : 1;
- unsigned req_config : 1;
- unsigned req_pending : 1;
- u8 vbus;
- int irq;
-};
-#define to_s3c2410(g) (container_of((g), struct s3c2410_udc, gadget))
-
-#endif
diff --git a/drivers/usb/gadget/udc/s3c2410_udc_regs.h b/drivers/usb/gadget/udc/s3c2410_udc_regs.h
deleted file mode 100644
index d8d2eeaca088..000000000000
--- a/drivers/usb/gadget/udc/s3c2410_udc_regs.h
+++ /dev/null
@@ -1,146 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (C) 2004 Herbert Poetzl <herbert@13thfloor.at>
- */
-
-#ifndef __ASM_ARCH_REGS_UDC_H
-#define __ASM_ARCH_REGS_UDC_H
-
-#define S3C2410_USBDREG(x) (x)
-
-#define S3C2410_UDC_FUNC_ADDR_REG S3C2410_USBDREG(0x0140)
-#define S3C2410_UDC_PWR_REG S3C2410_USBDREG(0x0144)
-#define S3C2410_UDC_EP_INT_REG S3C2410_USBDREG(0x0148)
-
-#define S3C2410_UDC_USB_INT_REG S3C2410_USBDREG(0x0158)
-#define S3C2410_UDC_EP_INT_EN_REG S3C2410_USBDREG(0x015c)
-
-#define S3C2410_UDC_USB_INT_EN_REG S3C2410_USBDREG(0x016c)
-
-#define S3C2410_UDC_FRAME_NUM1_REG S3C2410_USBDREG(0x0170)
-#define S3C2410_UDC_FRAME_NUM2_REG S3C2410_USBDREG(0x0174)
-
-#define S3C2410_UDC_EP0_FIFO_REG S3C2410_USBDREG(0x01c0)
-#define S3C2410_UDC_EP1_FIFO_REG S3C2410_USBDREG(0x01c4)
-#define S3C2410_UDC_EP2_FIFO_REG S3C2410_USBDREG(0x01c8)
-#define S3C2410_UDC_EP3_FIFO_REG S3C2410_USBDREG(0x01cc)
-#define S3C2410_UDC_EP4_FIFO_REG S3C2410_USBDREG(0x01d0)
-
-#define S3C2410_UDC_EP1_DMA_CON S3C2410_USBDREG(0x0200)
-#define S3C2410_UDC_EP1_DMA_UNIT S3C2410_USBDREG(0x0204)
-#define S3C2410_UDC_EP1_DMA_FIFO S3C2410_USBDREG(0x0208)
-#define S3C2410_UDC_EP1_DMA_TTC_L S3C2410_USBDREG(0x020c)
-#define S3C2410_UDC_EP1_DMA_TTC_M S3C2410_USBDREG(0x0210)
-#define S3C2410_UDC_EP1_DMA_TTC_H S3C2410_USBDREG(0x0214)
-
-#define S3C2410_UDC_EP2_DMA_CON S3C2410_USBDREG(0x0218)
-#define S3C2410_UDC_EP2_DMA_UNIT S3C2410_USBDREG(0x021c)
-#define S3C2410_UDC_EP2_DMA_FIFO S3C2410_USBDREG(0x0220)
-#define S3C2410_UDC_EP2_DMA_TTC_L S3C2410_USBDREG(0x0224)
-#define S3C2410_UDC_EP2_DMA_TTC_M S3C2410_USBDREG(0x0228)
-#define S3C2410_UDC_EP2_DMA_TTC_H S3C2410_USBDREG(0x022c)
-
-#define S3C2410_UDC_EP3_DMA_CON S3C2410_USBDREG(0x0240)
-#define S3C2410_UDC_EP3_DMA_UNIT S3C2410_USBDREG(0x0244)
-#define S3C2410_UDC_EP3_DMA_FIFO S3C2410_USBDREG(0x0248)
-#define S3C2410_UDC_EP3_DMA_TTC_L S3C2410_USBDREG(0x024c)
-#define S3C2410_UDC_EP3_DMA_TTC_M S3C2410_USBDREG(0x0250)
-#define S3C2410_UDC_EP3_DMA_TTC_H S3C2410_USBDREG(0x0254)
-
-#define S3C2410_UDC_EP4_DMA_CON S3C2410_USBDREG(0x0258)
-#define S3C2410_UDC_EP4_DMA_UNIT S3C2410_USBDREG(0x025c)
-#define S3C2410_UDC_EP4_DMA_FIFO S3C2410_USBDREG(0x0260)
-#define S3C2410_UDC_EP4_DMA_TTC_L S3C2410_USBDREG(0x0264)
-#define S3C2410_UDC_EP4_DMA_TTC_M S3C2410_USBDREG(0x0268)
-#define S3C2410_UDC_EP4_DMA_TTC_H S3C2410_USBDREG(0x026c)
-
-#define S3C2410_UDC_INDEX_REG S3C2410_USBDREG(0x0178)
-
-/* indexed registers */
-
-#define S3C2410_UDC_MAXP_REG S3C2410_USBDREG(0x0180)
-
-#define S3C2410_UDC_EP0_CSR_REG S3C2410_USBDREG(0x0184)
-
-#define S3C2410_UDC_IN_CSR1_REG S3C2410_USBDREG(0x0184)
-#define S3C2410_UDC_IN_CSR2_REG S3C2410_USBDREG(0x0188)
-
-#define S3C2410_UDC_OUT_CSR1_REG S3C2410_USBDREG(0x0190)
-#define S3C2410_UDC_OUT_CSR2_REG S3C2410_USBDREG(0x0194)
-#define S3C2410_UDC_OUT_FIFO_CNT1_REG S3C2410_USBDREG(0x0198)
-#define S3C2410_UDC_OUT_FIFO_CNT2_REG S3C2410_USBDREG(0x019c)
-
-#define S3C2410_UDC_FUNCADDR_UPDATE (1 << 7)
-
-#define S3C2410_UDC_PWR_ISOUP (1 << 7) /* R/W */
-#define S3C2410_UDC_PWR_RESET (1 << 3) /* R */
-#define S3C2410_UDC_PWR_RESUME (1 << 2) /* R/W */
-#define S3C2410_UDC_PWR_SUSPEND (1 << 1) /* R */
-#define S3C2410_UDC_PWR_ENSUSPEND (1 << 0) /* R/W */
-
-#define S3C2410_UDC_PWR_DEFAULT (0x00)
-
-#define S3C2410_UDC_INT_EP4 (1 << 4) /* R/W (clear only) */
-#define S3C2410_UDC_INT_EP3 (1 << 3) /* R/W (clear only) */
-#define S3C2410_UDC_INT_EP2 (1 << 2) /* R/W (clear only) */
-#define S3C2410_UDC_INT_EP1 (1 << 1) /* R/W (clear only) */
-#define S3C2410_UDC_INT_EP0 (1 << 0) /* R/W (clear only) */
-
-#define S3C2410_UDC_USBINT_RESET (1 << 2) /* R/W (clear only) */
-#define S3C2410_UDC_USBINT_RESUME (1 << 1) /* R/W (clear only) */
-#define S3C2410_UDC_USBINT_SUSPEND (1 << 0) /* R/W (clear only) */
-
-#define S3C2410_UDC_INTE_EP4 (1 << 4) /* R/W */
-#define S3C2410_UDC_INTE_EP3 (1 << 3) /* R/W */
-#define S3C2410_UDC_INTE_EP2 (1 << 2) /* R/W */
-#define S3C2410_UDC_INTE_EP1 (1 << 1) /* R/W */
-#define S3C2410_UDC_INTE_EP0 (1 << 0) /* R/W */
-
-#define S3C2410_UDC_USBINTE_RESET (1 << 2) /* R/W */
-#define S3C2410_UDC_USBINTE_SUSPEND (1 << 0) /* R/W */
-
-#define S3C2410_UDC_INDEX_EP0 (0x00)
-#define S3C2410_UDC_INDEX_EP1 (0x01)
-#define S3C2410_UDC_INDEX_EP2 (0x02)
-#define S3C2410_UDC_INDEX_EP3 (0x03)
-#define S3C2410_UDC_INDEX_EP4 (0x04)
-
-#define S3C2410_UDC_ICSR1_CLRDT (1 << 6) /* R/W */
-#define S3C2410_UDC_ICSR1_SENTSTL (1 << 5) /* R/W (clear only) */
-#define S3C2410_UDC_ICSR1_SENDSTL (1 << 4) /* R/W */
-#define S3C2410_UDC_ICSR1_FFLUSH (1 << 3) /* W (set only) */
-#define S3C2410_UDC_ICSR1_UNDRUN (1 << 2) /* R/W (clear only) */
-#define S3C2410_UDC_ICSR1_PKTRDY (1 << 0) /* R/W (set only) */
-
-#define S3C2410_UDC_ICSR2_AUTOSET (1 << 7) /* R/W */
-#define S3C2410_UDC_ICSR2_ISO (1 << 6) /* R/W */
-#define S3C2410_UDC_ICSR2_MODEIN (1 << 5) /* R/W */
-#define S3C2410_UDC_ICSR2_DMAIEN (1 << 4) /* R/W */
-
-#define S3C2410_UDC_OCSR1_CLRDT (1 << 7) /* R/W */
-#define S3C2410_UDC_OCSR1_SENTSTL (1 << 6) /* R/W (clear only) */
-#define S3C2410_UDC_OCSR1_SENDSTL (1 << 5) /* R/W */
-#define S3C2410_UDC_OCSR1_FFLUSH (1 << 4) /* R/W */
-#define S3C2410_UDC_OCSR1_DERROR (1 << 3) /* R */
-#define S3C2410_UDC_OCSR1_OVRRUN (1 << 2) /* R/W (clear only) */
-#define S3C2410_UDC_OCSR1_PKTRDY (1 << 0) /* R/W (clear only) */
-
-#define S3C2410_UDC_OCSR2_AUTOCLR (1 << 7) /* R/W */
-#define S3C2410_UDC_OCSR2_ISO (1 << 6) /* R/W */
-#define S3C2410_UDC_OCSR2_DMAIEN (1 << 5) /* R/W */
-
-#define S3C2410_UDC_EP0_CSR_OPKRDY (1 << 0)
-#define S3C2410_UDC_EP0_CSR_IPKRDY (1 << 1)
-#define S3C2410_UDC_EP0_CSR_SENTSTL (1 << 2)
-#define S3C2410_UDC_EP0_CSR_DE (1 << 3)
-#define S3C2410_UDC_EP0_CSR_SE (1 << 4)
-#define S3C2410_UDC_EP0_CSR_SENDSTL (1 << 5)
-#define S3C2410_UDC_EP0_CSR_SOPKTRDY (1 << 6)
-#define S3C2410_UDC_EP0_CSR_SSE (1 << 7)
-
-#define S3C2410_UDC_MAXP_8 (1 << 0)
-#define S3C2410_UDC_MAXP_16 (1 << 1)
-#define S3C2410_UDC_MAXP_32 (1 << 2)
-#define S3C2410_UDC_MAXP_64 (1 << 3)
-
-#endif
diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
index 52ea4dcf6a92..2fc5d4d277bc 100644
--- a/drivers/usb/gadget/udc/snps_udc_core.c
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -1933,7 +1933,6 @@ static int amd5536_udc_start(struct usb_gadget *g,
struct udc *dev = to_amd5536_udc(g);
u32 tmp;
- driver->driver.bus = NULL;
dev->driver = driver;
/* Some gadget drivers use both ep0 directions.
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index 76919d7570d2..2b71b33725f1 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -796,21 +796,16 @@ static int tegra_xudc_get_phy_index(struct tegra_xudc *xudc,
return -1;
}
-static int tegra_xudc_vbus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
+static void tegra_xudc_update_data_role(struct tegra_xudc *xudc,
+ struct usb_phy *usbphy)
{
- struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
- vbus_nb);
- struct usb_phy *usbphy = (struct usb_phy *)data;
int phy_index;
- dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
-
if ((xudc->device_mode && usbphy->last_event == USB_EVENT_VBUS) ||
(!xudc->device_mode && usbphy->last_event != USB_EVENT_VBUS)) {
dev_dbg(xudc->dev, "Same role(%d) received. Ignore",
xudc->device_mode);
- return NOTIFY_OK;
+ return;
}
xudc->device_mode = (usbphy->last_event == USB_EVENT_VBUS) ? true :
@@ -826,6 +821,18 @@ static int tegra_xudc_vbus_notify(struct notifier_block *nb,
xudc->curr_usbphy = usbphy;
schedule_work(&xudc->usb_role_sw_work);
}
+}
+
+static int tegra_xudc_vbus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct tegra_xudc *xudc = container_of(nb, struct tegra_xudc,
+ vbus_nb);
+ struct usb_phy *usbphy = (struct usb_phy *)data;
+
+ dev_dbg(xudc->dev, "%s(): event is %d\n", __func__, usbphy->last_event);
+
+ tegra_xudc_update_data_role(xudc, usbphy);
return NOTIFY_OK;
}
@@ -3521,7 +3528,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
/* Get usb-phy, if utmi phy is available */
xudc->usbphy[i] = devm_usb_get_phy_by_node(xudc->dev,
xudc->utmi_phy[i]->dev.of_node,
- &xudc->vbus_nb);
+ NULL);
if (IS_ERR(xudc->usbphy[i])) {
err = PTR_ERR(xudc->usbphy[i]);
dev_err_probe(xudc->dev, err,
@@ -3660,6 +3667,19 @@ static struct tegra_xudc_soc tegra194_xudc_soc_data = {
.has_ipfs = false,
};
+static struct tegra_xudc_soc tegra234_xudc_soc_data = {
+ .clock_names = tegra186_xudc_clock_names,
+ .num_clks = ARRAY_SIZE(tegra186_xudc_clock_names),
+ .num_phys = 4,
+ .u1_enable = true,
+ .u2_enable = true,
+ .lpm_enable = true,
+ .invalid_seq_num = false,
+ .pls_quirk = false,
+ .port_reset_quirk = false,
+ .has_ipfs = false,
+};
+
static const struct of_device_id tegra_xudc_of_match[] = {
{
.compatible = "nvidia,tegra210-xudc",
@@ -3673,6 +3693,10 @@ static const struct of_device_id tegra_xudc_of_match[] = {
.compatible = "nvidia,tegra194-xudc",
.data = &tegra194_xudc_soc_data
},
+ {
+ .compatible = "nvidia,tegra234-xudc",
+ .data = &tegra234_xudc_soc_data
+ },
{ }
};
MODULE_DEVICE_TABLE(of, tegra_xudc_of_match);
@@ -3856,6 +3880,14 @@ static int tegra_xudc_probe(struct platform_device *pdev)
goto free_eps;
}
+ for (i = 0; i < xudc->soc->num_phys; i++) {
+ if (!xudc->usbphy[i])
+ continue;
+
+ usb_register_notifier(xudc->usbphy[i], &xudc->vbus_nb);
+ tegra_xudc_update_data_role(xudc, xudc->usbphy[i]);
+ }
+
return 0;
free_eps:
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 8d799d23c476..eacb603ad1b2 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -53,7 +53,6 @@ config USB_XHCI_PCI_RENESAS
config USB_XHCI_PLATFORM
tristate "Generic xHCI driver for a platform device"
- select USB_XHCI_RCAR if ARCH_RENESAS
help
Adds an xHCI host driver for a generic platform device, which
provides a memory space and an irq.
@@ -91,10 +90,20 @@ config USB_XHCI_RCAR
tristate "xHCI support for Renesas R-Car SoCs"
depends on USB_XHCI_PLATFORM
depends on ARCH_RENESAS || COMPILE_TEST
+ default ARCH_RENESAS
help
Say 'Y' to enable the support for the xHCI host controller
found in Renesas R-Car ARM SoCs.
+config USB_XHCI_RZV2M
+ bool "xHCI support for Renesas RZ/V2M SoC"
+ depends on USB_XHCI_RCAR
+ depends on ARCH_R9A09G011 || COMPILE_TEST
+ depends on USB_RZV2M_USB3DRD=y || (USB_RZV2M_USB3DRD=USB_XHCI_RCAR)
+ help
+ Say 'Y' to enable the support for the xHCI host controller
+ found in Renesas RZ/V2M SoC.
+
config USB_XHCI_TEGRA
tristate "xHCI support for NVIDIA Tegra SoCs"
depends on PHY_TEGRA_XUSB
@@ -316,18 +325,6 @@ config USB_OCTEON_HCD
To compile this driver as a module, choose M here. The module
will be called octeon-hcd.
-config USB_CNS3XXX_EHCI
- bool "Cavium CNS3XXX EHCI Module (DEPRECATED)"
- depends on ARCH_CNS3XXX || COMPILE_TEST
- select USB_EHCI_HCD_PLATFORM
- help
- This option is deprecated now and the driver was removed, use
- USB_EHCI_HCD_PLATFORM instead.
-
- Enable support for the CNS3XXX SOC's on-chip EHCI controller.
- It is needed for high-speed (480Mbit/sec) USB 2.0 device
- support.
-
config USB_EHCI_HCD_PLATFORM
tristate "Generic EHCI driver for a platform device"
help
@@ -421,7 +418,6 @@ if USB_OHCI_HCD
config USB_OHCI_HCD_OMAP1
tristate "OHCI support for OMAP1/2 chips"
depends on ARCH_OMAP1
- depends on ISP1301_OMAP || !(MACH_OMAP_H2 || MACH_OMAP_H3)
default y
help
Enables support for the OHCI controller on OMAP1/2 chips.
@@ -444,12 +440,12 @@ config USB_OHCI_HCD_STI
STMicroelectronics consumer electronics SoC's.
config USB_OHCI_HCD_S3C2410
- tristate "OHCI support for Samsung S3C24xx/S3C64xx SoC series"
- depends on USB_OHCI_HCD && (ARCH_S3C24XX || ARCH_S3C64XX || COMPILE_TEST)
- default y if (ARCH_S3C24XX || ARCH_S3C64XX)
+ tristate "OHCI support for Samsung S3C64xx SoC series"
+ depends on USB_OHCI_HCD && (ARCH_S3C64XX || COMPILE_TEST)
+ default ARCH_S3C64XX
help
Enables support for the on-chip OHCI controller on
- S3C24xx/S3C64xx chips.
+ S3C64xx chips.
config USB_OHCI_HCD_LPC32XX
tristate "Support for LPC on-chip OHCI USB controller"
@@ -548,17 +544,6 @@ config USB_OHCI_HCD_SSB
If unsure, say N.
-config USB_OHCI_SH
- bool "OHCI support for SuperH USB controller (DEPRECATED)"
- depends on SUPERH || COMPILE_TEST
- select USB_OHCI_HCD_PLATFORM
- help
- This option is deprecated now and the driver was removed, use
- USB_OHCI_HCD_PLATFORM instead.
-
- Enables support for the on-chip OHCI controller on the SuperH.
- If you use the PCI OHCI controller, this option is not necessary.
-
config USB_OHCI_EXYNOS
tristate "OHCI support for Samsung S5P/Exynos SoC Series"
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
@@ -566,17 +551,6 @@ config USB_OHCI_EXYNOS
Enable support for the Samsung S5Pv210 and Exynos SOC's on-chip OHCI
controller.
-config USB_CNS3XXX_OHCI
- bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
- depends on ARCH_CNS3XXX || COMPILE_TEST
- select USB_OHCI_HCD_PLATFORM
- help
- This option is deprecated now and the driver was removed, use
- USB_OHCI_HCD_PLATFORM instead.
-
- Enable support for the CNS3XXX SOC's on-chip OHCI controller.
- It is needed for low-speed USB 1.0 device support.
-
config USB_OHCI_HCD_PLATFORM
tristate "Generic OHCI driver for a platform device"
help
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 6d8ee264c9b2..5a13712f367d 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -25,14 +25,13 @@ xhci-plat-hcd-y := xhci-plat.o
ifneq ($(CONFIG_USB_XHCI_MVEBU), )
xhci-plat-hcd-y += xhci-mvebu.o
endif
-ifneq ($(CONFIG_USB_XHCI_RCAR), )
- xhci-plat-hcd-y += xhci-rcar.o
-endif
-
ifneq ($(CONFIG_DEBUG_FS),)
xhci-hcd-y += xhci-debugfs.o
endif
+xhci-rcar-hcd-y += xhci-rcar.o
+xhci-rcar-hcd-$(CONFIG_USB_XHCI_RZV2M) += xhci-rzv2m.o
+
obj-$(CONFIG_USB_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
@@ -72,6 +71,7 @@ obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
obj-$(CONFIG_USB_XHCI_PCI_RENESAS) += xhci-pci-renesas.o
obj-$(CONFIG_USB_XHCI_PLATFORM) += xhci-plat-hcd.o
obj-$(CONFIG_USB_XHCI_HISTB) += xhci-histb.o
+obj-$(CONFIG_USB_XHCI_RCAR) += xhci-rcar-hcd.o
obj-$(CONFIG_USB_XHCI_MTK) += xhci-mtk-hcd.o
obj-$(CONFIG_USB_XHCI_TEGRA) += xhci-tegra.o
obj-$(CONFIG_USB_SL811_HCD) += sl811-hcd.o
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index a333231616f4..47c9f06c3d84 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -80,19 +80,11 @@ static int exynos_ehci_get_phy(struct device *dev,
return -EINVAL;
}
- phy = devm_of_phy_get(dev, child, NULL);
+ phy = devm_of_phy_optional_get(dev, child, NULL);
exynos_ehci->phy[phy_number] = phy;
if (IS_ERR(phy)) {
- ret = PTR_ERR(phy);
- if (ret == -EPROBE_DEFER) {
- of_node_put(child);
- return ret;
- } else if (ret != -ENOSYS && ret != -ENODEV) {
- dev_err(dev,
- "Error retrieving usb2 phy: %d\n", ret);
- of_node_put(child);
- return ret;
- }
+ of_node_put(child);
+ return PTR_ERR(phy);
}
}
@@ -108,12 +100,10 @@ static int exynos_ehci_phy_enable(struct device *dev)
int ret = 0;
for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
- if (!IS_ERR(exynos_ehci->phy[i]))
- ret = phy_power_on(exynos_ehci->phy[i]);
+ ret = phy_power_on(exynos_ehci->phy[i]);
if (ret)
for (i--; i >= 0; i--)
- if (!IS_ERR(exynos_ehci->phy[i]))
- phy_power_off(exynos_ehci->phy[i]);
+ phy_power_off(exynos_ehci->phy[i]);
return ret;
}
@@ -125,8 +115,7 @@ static void exynos_ehci_phy_disable(struct device *dev)
int i;
for (i = 0; i < PHY_NUMBER; i++)
- if (!IS_ERR(exynos_ehci->phy[i]))
- phy_power_off(exynos_ehci->phy[i]);
+ phy_power_off(exynos_ehci->phy[i]);
}
static void exynos_setup_vbus_gpio(struct device *dev)
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 38d06e5abfbb..d74fa5ba845b 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -712,7 +712,7 @@ static struct platform_driver ehci_fsl_driver = {
.remove = fsl_ehci_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
- .name = "fsl-ehci",
+ .name = DRV_NAME,
.pm = EHCI_FSL_PM_OPS,
},
};
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index e5df17522892..46c6a152b865 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -112,8 +112,7 @@ static struct platform_device *fsl_usb2_device_register(
goto error;
}
- pdev->dev.of_node = ofdev->dev.of_node;
- pdev->dev.of_node_reused = true;
+ device_set_of_node_from_dev(&pdev->dev, &ofdev->dev);
retval = platform_device_add(pdev);
if (retval)
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 4f564d71bb0b..49ae01487af4 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1205,7 +1205,7 @@ static void create_debug_file(struct isp116x *isp116x)
static void remove_debug_file(struct isp116x *isp116x)
{
- debugfs_remove(debugfs_lookup(hcd_name, usb_debug_root));
+ debugfs_lookup_and_remove(hcd_name, usb_debug_root);
}
#else
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 0e14d1d07709..b0da143ef4be 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2170,7 +2170,7 @@ static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
{
- debugfs_remove(debugfs_lookup("isp1362", usb_debug_root));
+ debugfs_lookup_and_remove("isp1362", usb_debug_root);
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 352e3ac2b377..28d1524ee2fa 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -72,12 +72,6 @@
#define USB_MAX_FRAME_NUMBER 0x7ff
#define USB_MAX_RETRIES 3 /* # of retries before error is reported */
-/*
- * Max. # of times we're willing to retransmit a request immediately in
- * resposne to a NAK. Afterwards, we fall back on trying once a frame.
- */
-#define NAK_MAX_FAST_RETRANSMITS 2
-
#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
/* Port-change mask: */
@@ -924,11 +918,8 @@ max3421_handle_error(struct usb_hcd *hcd, u8 hrsl)
* Device wasn't ready for data or has no data
* available: retry the packet again.
*/
- if (max3421_ep->naks++ < NAK_MAX_FAST_RETRANSMITS) {
- max3421_next_transfer(hcd, 1);
- switch_sndfifo = 0;
- } else
- max3421_slow_retransmit(hcd);
+ max3421_next_transfer(hcd, 1);
+ switch_sndfifo = 0;
break;
}
if (switch_sndfifo)
@@ -1436,7 +1427,7 @@ max3421_spi_thread(void *dev_id)
* use spi_wr_buf().
*/
for (i = 0; i < ARRAY_SIZE(max3421_hcd->iopins); ++i) {
- u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1);
+ u8 val = spi_rd8(hcd, MAX3421_REG_IOPINS1 + i);
val = ((val & 0xf0) |
(max3421_hcd->iopins[i] & 0x0f));
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 8d7977fd5d3b..8af17c1ee5cc 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -69,19 +69,11 @@ static int exynos_ohci_get_phy(struct device *dev,
return -EINVAL;
}
- phy = devm_of_phy_get(dev, child, NULL);
+ phy = devm_of_phy_optional_get(dev, child, NULL);
exynos_ohci->phy[phy_number] = phy;
if (IS_ERR(phy)) {
- ret = PTR_ERR(phy);
- if (ret == -EPROBE_DEFER) {
- of_node_put(child);
- return ret;
- } else if (ret != -ENOSYS && ret != -ENODEV) {
- dev_err(dev,
- "Error retrieving usb2 phy: %d\n", ret);
- of_node_put(child);
- return ret;
- }
+ of_node_put(child);
+ return PTR_ERR(phy);
}
}
@@ -97,12 +89,10 @@ static int exynos_ohci_phy_enable(struct device *dev)
int ret = 0;
for (i = 0; ret == 0 && i < PHY_NUMBER; i++)
- if (!IS_ERR(exynos_ohci->phy[i]))
- ret = phy_power_on(exynos_ohci->phy[i]);
+ ret = phy_power_on(exynos_ohci->phy[i]);
if (ret)
for (i--; i >= 0; i--)
- if (!IS_ERR(exynos_ohci->phy[i]))
- phy_power_off(exynos_ohci->phy[i]);
+ phy_power_off(exynos_ohci->phy[i]);
return ret;
}
@@ -114,8 +104,7 @@ static void exynos_ohci_phy_disable(struct device *dev)
int i;
for (i = 0; i < PHY_NUMBER; i++)
- if (!IS_ERR(exynos_ohci->phy[i]))
- phy_power_off(exynos_ohci->phy[i]);
+ phy_power_off(exynos_ohci->phy[i]);
}
static int exynos_ohci_probe(struct platform_device *pdev)
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 0457dd9f6c19..4f9982ecfb58 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1264,11 +1264,6 @@ MODULE_LICENSE ("GPL");
#define SM501_OHCI_DRIVER ohci_hcd_sm501_driver
#endif
-#ifdef CONFIG_MFD_TC6393XB
-#include "ohci-tmio.c"
-#define TMIO_OHCI_DRIVER ohci_hcd_tmio_driver
-#endif
-
static int __init ohci_hcd_mod_init(void)
{
int retval = 0;
@@ -1306,19 +1301,9 @@ static int __init ohci_hcd_mod_init(void)
goto error_sm501;
#endif
-#ifdef TMIO_OHCI_DRIVER
- retval = platform_driver_register(&TMIO_OHCI_DRIVER);
- if (retval < 0)
- goto error_tmio;
-#endif
-
return retval;
/* Error path */
-#ifdef TMIO_OHCI_DRIVER
- platform_driver_unregister(&TMIO_OHCI_DRIVER);
- error_tmio:
-#endif
#ifdef SM501_OHCI_DRIVER
platform_driver_unregister(&SM501_OHCI_DRIVER);
error_sm501:
@@ -1345,9 +1330,6 @@ module_init(ohci_hcd_mod_init);
static void __exit ohci_hcd_mod_exit(void)
{
-#ifdef TMIO_OHCI_DRIVER
- platform_driver_unregister(&TMIO_OHCI_DRIVER);
-#endif
#ifdef SM501_OHCI_DRIVER
platform_driver_unregister(&SM501_OHCI_DRIVER);
#endif
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index cb29701df911..c82121602511 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -67,8 +67,6 @@ static void omap_ohci_clock_power(struct ohci_omap_priv *priv, int on)
}
}
-#ifdef CONFIG_USB_OTG
-
static void start_hnp(struct ohci_hcd *ohci)
{
struct usb_hcd *hcd = ohci_to_hcd(ohci);
@@ -87,8 +85,6 @@ static void start_hnp(struct ohci_hcd *ohci)
local_irq_restore(flags);
}
-#endif
-
/*-------------------------------------------------------------------------*/
static int ohci_omap_reset(struct usb_hcd *hcd)
@@ -107,16 +103,11 @@ static int ohci_omap_reset(struct usb_hcd *hcd)
hcd->power_budget = 8;
}
- /* boards can use OTG transceivers in non-OTG modes */
- need_transceiver = need_transceiver
- || machine_is_omap_h2() || machine_is_omap_h3();
-
/* XXX OMAP16xx only */
if (config->ocpi_enable)
config->ocpi_enable();
-#ifdef CONFIG_USB_OTG
- if (need_transceiver) {
+ if (IS_ENABLED(CONFIG_USB_OTG) && need_transceiver) {
hcd->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
if (!IS_ERR_OR_NULL(hcd->usb_phy)) {
int status = otg_set_host(hcd->usb_phy->otg,
@@ -133,7 +124,6 @@ static int ohci_omap_reset(struct usb_hcd *hcd)
hcd->skip_phy_initialization = 1;
ohci->start_hnp = start_hnp;
}
-#endif
omap_ohci_clock_power(priv, 1);
@@ -150,7 +140,7 @@ static int ohci_omap_reset(struct usb_hcd *hcd)
}
/* board-specific power switching and overcurrent support */
- if (machine_is_omap_osk() || machine_is_omap_innovator()) {
+ if (machine_is_omap_osk()) {
u32 rh = roothub_a (ohci);
/* power switching (ganged by default) */
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index a1dad8745622..0bc7e96bcc93 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -29,7 +29,6 @@
#include <linux/of_platform.h>
#include <linux/of_gpio.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
-#include <linux/platform_data/usb-pxa3xx-ulpi.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/signal.h>
@@ -275,7 +274,6 @@ static int pxa27x_start_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
int retval;
struct pxaohci_platform_data *inf;
uint32_t uhchr;
- struct usb_hcd *hcd = dev_get_drvdata(dev);
inf = dev_get_platdata(dev);
@@ -301,9 +299,6 @@ static int pxa27x_start_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
return retval;
}
- if (cpu_is_pxa3xx())
- pxa3xx_u2d_start_hc(&hcd->self);
-
uhchr = __raw_readl(pxa_ohci->mmio_base + UHCHR) & ~UHCHR_SSE;
__raw_writel(uhchr, pxa_ohci->mmio_base + UHCHR);
__raw_writel(UHCHIE_UPRIE | UHCHIE_RWIE, pxa_ohci->mmio_base + UHCHIE);
@@ -316,14 +311,10 @@ static int pxa27x_start_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
static void pxa27x_stop_hc(struct pxa27x_ohci *pxa_ohci, struct device *dev)
{
struct pxaohci_platform_data *inf;
- struct usb_hcd *hcd = dev_get_drvdata(dev);
uint32_t uhccoms;
inf = dev_get_platdata(dev);
- if (cpu_is_pxa3xx())
- pxa3xx_u2d_stop_hc(&hcd->self);
-
if (inf->exit)
inf->exit(dev);
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index 75c2b28b3379..aca0338a2983 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -125,10 +125,7 @@ static int sa1111_start_hc(struct sa1111_dev *dev)
dev_dbg(&dev->dev, "starting SA-1111 OHCI USB Controller\n");
- if (machine_is_xp860() ||
- machine_is_assabet() ||
- machine_is_pfs168() ||
- machine_is_badge4())
+ if (machine_is_assabet())
usb_rst = USB_RESET_PWRSENSELOW | USB_RESET_PWRCTRLLOW;
/*
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
deleted file mode 100644
index 49539b9f0e94..000000000000
--- a/drivers/usb/host/ohci-tmio.c
+++ /dev/null
@@ -1,364 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * OHCI HCD(Host Controller Driver) for USB.
- *
- *(C) Copyright 1999 Roman Weissgaerber <weissg@vienna.at>
- *(C) Copyright 2000-2002 David Brownell <dbrownell@users.sourceforge.net>
- *(C) Copyright 2002 Hewlett-Packard Company
- *
- * Bus glue for Toshiba Mobile IO(TMIO) Controller's OHCI core
- * (C) Copyright 2005 Chris Humbert <mahadri-usb@drigon.com>
- * (C) Copyright 2007, 2008 Dmitry Baryshkov <dbaryshkov@gmail.com>
- *
- * This is known to work with the following variants:
- * TC6393XB revision 3 (32kB SRAM)
- *
- * The TMIO's OHCI core DMAs through a small internal buffer that
- * is directly addressable by the CPU.
- *
- * Written from sparse documentation from Toshiba and Sharp's driver
- * for the 2.4 kernel,
- * usb-ohci-tc6393.c(C) Copyright 2004 Lineo Solutions, Inc.
- */
-
-#include <linux/platform_device.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-#include <linux/dma-mapping.h>
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * USB Host Controller Configuration Register
- */
-#define CCR_REVID 0x08 /* b Revision ID */
-#define CCR_BASE 0x10 /* l USB Control Register Base Address Low */
-#define CCR_ILME 0x40 /* b Internal Local Memory Enable */
-#define CCR_PM 0x4c /* w Power Management */
-#define CCR_INTC 0x50 /* b INT Control */
-#define CCR_LMW1L 0x54 /* w Local Memory Window 1 LMADRS Low */
-#define CCR_LMW1H 0x56 /* w Local Memory Window 1 LMADRS High */
-#define CCR_LMW1BL 0x58 /* w Local Memory Window 1 Base Address Low */
-#define CCR_LMW1BH 0x5A /* w Local Memory Window 1 Base Address High */
-#define CCR_LMW2L 0x5C /* w Local Memory Window 2 LMADRS Low */
-#define CCR_LMW2H 0x5E /* w Local Memory Window 2 LMADRS High */
-#define CCR_LMW2BL 0x60 /* w Local Memory Window 2 Base Address Low */
-#define CCR_LMW2BH 0x62 /* w Local Memory Window 2 Base Address High */
-#define CCR_MISC 0xFC /* b MISC */
-
-#define CCR_PM_GKEN 0x0001
-#define CCR_PM_CKRNEN 0x0002
-#define CCR_PM_USBPW1 0x0004
-#define CCR_PM_USBPW2 0x0008
-#define CCR_PM_USBPW3 0x0010
-#define CCR_PM_PMEE 0x0100
-#define CCR_PM_PMES 0x8000
-
-/*-------------------------------------------------------------------------*/
-
-struct tmio_hcd {
- void __iomem *ccr;
- spinlock_t lock; /* protects RMW cycles */
-};
-
-#define hcd_to_tmio(hcd) ((struct tmio_hcd *)(hcd_to_ohci(hcd) + 1))
-
-/*-------------------------------------------------------------------------*/
-
-static void tmio_write_pm(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
- struct tmio_hcd *tmio = hcd_to_tmio(hcd);
- u16 pm;
- unsigned long flags;
-
- spin_lock_irqsave(&tmio->lock, flags);
-
- pm = CCR_PM_GKEN | CCR_PM_CKRNEN |
- CCR_PM_PMEE | CCR_PM_PMES;
-
- tmio_iowrite16(pm, tmio->ccr + CCR_PM);
- spin_unlock_irqrestore(&tmio->lock, flags);
-}
-
-static void tmio_stop_hc(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- struct tmio_hcd *tmio = hcd_to_tmio(hcd);
- u16 pm;
-
- pm = CCR_PM_GKEN | CCR_PM_CKRNEN;
- switch (ohci->num_ports) {
- default:
- dev_err(&dev->dev, "Unsupported amount of ports: %d\n", ohci->num_ports);
- fallthrough;
- case 3:
- pm |= CCR_PM_USBPW3;
- fallthrough;
- case 2:
- pm |= CCR_PM_USBPW2;
- fallthrough;
- case 1:
- pm |= CCR_PM_USBPW1;
- }
- tmio_iowrite8(0, tmio->ccr + CCR_INTC);
- tmio_iowrite8(0, tmio->ccr + CCR_ILME);
- tmio_iowrite16(0, tmio->ccr + CCR_BASE);
- tmio_iowrite16(0, tmio->ccr + CCR_BASE + 2);
- tmio_iowrite16(pm, tmio->ccr + CCR_PM);
-}
-
-static void tmio_start_hc(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
- struct tmio_hcd *tmio = hcd_to_tmio(hcd);
- unsigned long base = hcd->rsrc_start;
-
- tmio_write_pm(dev);
- tmio_iowrite16(base, tmio->ccr + CCR_BASE);
- tmio_iowrite16(base >> 16, tmio->ccr + CCR_BASE + 2);
- tmio_iowrite8(1, tmio->ccr + CCR_ILME);
- tmio_iowrite8(2, tmio->ccr + CCR_INTC);
-
- dev_info(&dev->dev, "revision %d @ 0x%08llx, irq %d\n",
- tmio_ioread8(tmio->ccr + CCR_REVID),
- (u64) hcd->rsrc_start, hcd->irq);
-}
-
-static int ohci_tmio_start(struct usb_hcd *hcd)
-{
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- int ret;
-
- if ((ret = ohci_init(ohci)) < 0)
- return ret;
-
- if ((ret = ohci_run(ohci)) < 0) {
- dev_err(hcd->self.controller, "can't start %s\n",
- hcd->self.bus_name);
- ohci_stop(hcd);
- return ret;
- }
-
- return 0;
-}
-
-static const struct hc_driver ohci_tmio_hc_driver = {
- .description = hcd_name,
- .product_desc = "TMIO OHCI USB Host Controller",
- .hcd_priv_size = sizeof(struct ohci_hcd) + sizeof (struct tmio_hcd),
-
- /* generic hardware linkage */
- .irq = ohci_irq,
- .flags = HCD_USB11 | HCD_MEMORY,
-
- /* basic lifecycle operations */
- .start = ohci_tmio_start,
- .stop = ohci_stop,
- .shutdown = ohci_shutdown,
-
- /* managing i/o requests and associated device resources */
- .urb_enqueue = ohci_urb_enqueue,
- .urb_dequeue = ohci_urb_dequeue,
- .endpoint_disable = ohci_endpoint_disable,
-
- /* scheduling support */
- .get_frame_number = ohci_get_frame,
-
- /* root hub support */
- .hub_status_data = ohci_hub_status_data,
- .hub_control = ohci_hub_control,
-#ifdef CONFIG_PM
- .bus_suspend = ohci_bus_suspend,
- .bus_resume = ohci_bus_resume,
-#endif
- .start_port_reset = ohci_start_port_reset,
-};
-
-/*-------------------------------------------------------------------------*/
-static struct platform_driver ohci_hcd_tmio_driver;
-
-static int ohci_hcd_tmio_drv_probe(struct platform_device *dev)
-{
- const struct mfd_cell *cell = mfd_get_cell(dev);
- struct resource *regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
- struct resource *config = platform_get_resource(dev, IORESOURCE_MEM, 1);
- struct resource *sram = platform_get_resource(dev, IORESOURCE_MEM, 2);
- int irq = platform_get_irq(dev, 0);
- struct tmio_hcd *tmio;
- struct ohci_hcd *ohci;
- struct usb_hcd *hcd;
- int ret;
-
- if (usb_disabled())
- return -ENODEV;
-
- if (!cell || !regs || !config || !sram)
- return -EINVAL;
-
- if (irq < 0)
- return irq;
-
- hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev));
- if (!hcd) {
- ret = -ENOMEM;
- goto err_usb_create_hcd;
- }
-
- hcd->rsrc_start = regs->start;
- hcd->rsrc_len = resource_size(regs);
-
- tmio = hcd_to_tmio(hcd);
-
- spin_lock_init(&tmio->lock);
-
- tmio->ccr = ioremap(config->start, resource_size(config));
- if (!tmio->ccr) {
- ret = -ENOMEM;
- goto err_ioremap_ccr;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (!hcd->regs) {
- ret = -ENOMEM;
- goto err_ioremap_regs;
- }
-
- if (cell->enable) {
- ret = cell->enable(dev);
- if (ret)
- goto err_enable;
- }
-
- tmio_start_hc(dev);
- ohci = hcd_to_ohci(hcd);
- ohci_hcd_init(ohci);
-
- ret = usb_hcd_setup_local_mem(hcd, sram->start, sram->start,
- resource_size(sram));
- if (ret < 0)
- goto err_enable;
-
- ret = usb_add_hcd(hcd, irq, 0);
- if (ret)
- goto err_add_hcd;
-
- device_wakeup_enable(hcd->self.controller);
- if (ret == 0)
- return ret;
-
- usb_remove_hcd(hcd);
-
-err_add_hcd:
- tmio_stop_hc(dev);
- if (cell->disable)
- cell->disable(dev);
-err_enable:
- iounmap(hcd->regs);
-err_ioremap_regs:
- iounmap(tmio->ccr);
-err_ioremap_ccr:
- usb_put_hcd(hcd);
-err_usb_create_hcd:
-
- return ret;
-}
-
-static int ohci_hcd_tmio_drv_remove(struct platform_device *dev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(dev);
- struct tmio_hcd *tmio = hcd_to_tmio(hcd);
- const struct mfd_cell *cell = mfd_get_cell(dev);
-
- usb_remove_hcd(hcd);
- tmio_stop_hc(dev);
- if (cell->disable)
- cell->disable(dev);
- iounmap(hcd->regs);
- iounmap(tmio->ccr);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t state)
-{
- const struct mfd_cell *cell = mfd_get_cell(dev);
- struct usb_hcd *hcd = platform_get_drvdata(dev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- struct tmio_hcd *tmio = hcd_to_tmio(hcd);
- unsigned long flags;
- u8 misc;
- int ret;
-
- if (time_before(jiffies, ohci->next_statechange))
- msleep(5);
- ohci->next_statechange = jiffies;
-
- spin_lock_irqsave(&tmio->lock, flags);
-
- misc = tmio_ioread8(tmio->ccr + CCR_MISC);
- misc |= 1 << 3; /* USSUSP */
- tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
-
- spin_unlock_irqrestore(&tmio->lock, flags);
-
- if (cell->suspend) {
- ret = cell->suspend(dev);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static int ohci_hcd_tmio_drv_resume(struct platform_device *dev)
-{
- const struct mfd_cell *cell = mfd_get_cell(dev);
- struct usb_hcd *hcd = platform_get_drvdata(dev);
- struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- struct tmio_hcd *tmio = hcd_to_tmio(hcd);
- unsigned long flags;
- u8 misc;
- int ret;
-
- if (time_before(jiffies, ohci->next_statechange))
- msleep(5);
- ohci->next_statechange = jiffies;
-
- if (cell->resume) {
- ret = cell->resume(dev);
- if (ret)
- return ret;
- }
-
- tmio_start_hc(dev);
-
- spin_lock_irqsave(&tmio->lock, flags);
-
- misc = tmio_ioread8(tmio->ccr + CCR_MISC);
- misc &= ~(1 << 3); /* USSUSP */
- tmio_iowrite8(misc, tmio->ccr + CCR_MISC);
-
- spin_unlock_irqrestore(&tmio->lock, flags);
-
- ohci_resume(hcd, false);
-
- return 0;
-}
-#else
-#define ohci_hcd_tmio_drv_suspend NULL
-#define ohci_hcd_tmio_drv_resume NULL
-#endif
-
-static struct platform_driver ohci_hcd_tmio_driver = {
- .probe = ohci_hcd_tmio_drv_probe,
- .remove = ohci_hcd_tmio_drv_remove,
- .shutdown = usb_hcd_platform_shutdown,
- .suspend = ohci_hcd_tmio_drv_suspend,
- .resume = ohci_hcd_tmio_drv_resume,
- .driver = {
- .name = "tmio-ohci",
- },
-};
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index d206bd95c7bb..b8b90eec9107 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1501,7 +1501,7 @@ static void create_debug_file(struct sl811 *sl811)
static void remove_debug_file(struct sl811 *sl811)
{
- debugfs_remove(debugfs_lookup("sl811h", usb_debug_root));
+ debugfs_lookup_and_remove("sl811h", usb_debug_root);
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index c22b51af83fc..7cdc2fa7c28f 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -536,8 +536,8 @@ static void release_uhci(struct uhci_hcd *uhci)
uhci->is_initialized = 0;
spin_unlock_irq(&uhci->lock);
- debugfs_remove(debugfs_lookup(uhci_to_hcd(uhci)->self.bus_name,
- uhci_debugfs_root));
+ debugfs_lookup_and_remove(uhci_to_hcd(uhci)->self.bus_name,
+ uhci_debugfs_root);
for (i = 0; i < UHCI_NUM_SKELQH; i++)
uhci_free_qh(uhci, uhci->skelqh[i]);
@@ -700,7 +700,7 @@ err_alloc_frame_cpu:
uhci->frame, uhci->frame_dma_handle);
err_alloc_frame:
- debugfs_remove(debugfs_lookup(hcd->self.bus_name, uhci_debugfs_root));
+ debugfs_lookup_and_remove(hcd->self.bus_name, uhci_debugfs_root);
return retval;
}
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index dc832ddf7033..0bc7fe11f749 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -692,7 +692,7 @@ void xhci_debugfs_init(struct xhci_hcd *xhci)
"command-ring",
xhci->debugfs_root);
- xhci_debugfs_create_ring_dir(xhci, &xhci->event_ring,
+ xhci_debugfs_create_ring_dir(xhci, &xhci->interrupter->event_ring,
"event-ring",
xhci->debugfs_root);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 94c94db3faf6..0054d02239e2 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -578,13 +578,16 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
return;
}
-static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
- u16 wIndex, __le32 __iomem *addr, u32 port_status)
+static void xhci_disable_port(struct xhci_hcd *xhci, struct xhci_port *port)
{
+ struct usb_hcd *hcd;
+ u32 portsc;
+
+ hcd = port->rhub->hcd;
+
/* Don't allow the USB core to disable SuperSpeed ports. */
if (hcd->speed >= HCD_USB3) {
- xhci_dbg(xhci, "Ignoring request to disable "
- "SuperSpeed port.\n");
+ xhci_dbg(xhci, "Ignoring request to disable SuperSpeed port.\n");
return;
}
@@ -594,11 +597,15 @@ static void xhci_disable_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
return;
}
+ portsc = readl(port->addr);
+ portsc = xhci_port_state_to_neutral(portsc);
+
/* Write 1 to disable the port */
- writel(port_status | PORT_PE, addr);
- port_status = readl(addr);
+ writel(portsc | PORT_PE, port->addr);
+
+ portsc = readl(port->addr);
xhci_dbg(xhci, "disable port %d-%d, portsc: 0x%x\n",
- hcd->self.busnum, wIndex + 1, port_status);
+ hcd->self.busnum, port->hcd_portnum + 1, portsc);
}
static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
@@ -666,20 +673,18 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
* It will release and re-aquire the lock while calling ACPI
* method.
*/
-static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
- u16 index, bool on, unsigned long *flags)
+static void xhci_set_port_power(struct xhci_hcd *xhci, struct xhci_port *port,
+ bool on, unsigned long *flags)
__must_hold(&xhci->lock)
{
- struct xhci_hub *rhub;
- struct xhci_port *port;
+ struct usb_hcd *hcd;
u32 temp;
- rhub = xhci_get_rhub(hcd);
- port = rhub->ports[index];
+ hcd = port->rhub->hcd;
temp = readl(port->addr);
xhci_dbg(xhci, "set port power %d-%d %s, portsc: 0x%x\n",
- hcd->self.busnum, index + 1, on ? "ON" : "OFF", temp);
+ hcd->self.busnum, port->hcd_portnum + 1, on ? "ON" : "OFF", temp);
temp = xhci_port_state_to_neutral(temp);
@@ -694,10 +699,10 @@ static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
spin_unlock_irqrestore(&xhci->lock, *flags);
temp = usb_acpi_power_manageable(hcd->self.root_hub,
- index);
+ port->hcd_portnum);
if (temp)
usb_acpi_set_power_state(hcd->self.root_hub,
- index, on);
+ port->hcd_portnum, on);
spin_lock_irqsave(&xhci->lock, *flags);
}
@@ -721,7 +726,6 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
u16 test_mode, u16 wIndex, unsigned long *flags)
__must_hold(&xhci->lock)
{
- struct usb_hcd *usb3_hcd = xhci_get_usb3_hcd(xhci);
int i, retval;
/* Disable all Device Slots */
@@ -742,10 +746,10 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Disable all port (PP = 0)\n");
/* Power off USB3 ports*/
for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
- xhci_set_port_power(xhci, usb3_hcd, i, false, flags);
+ xhci_set_port_power(xhci, xhci->usb3_rhub.ports[i], false, flags);
/* Power off USB2 ports*/
for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->main_hcd, i, false, flags);
+ xhci_set_port_power(xhci, xhci->usb2_rhub.ports[i], false, flags);
/* Stop the controller */
xhci_dbg(xhci, "Stop controller\n");
retval = xhci_halt(xhci);
@@ -920,7 +924,7 @@ static void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status,
}
static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
- u32 *status, u32 portsc,
+ u32 portsc,
unsigned long *flags)
{
struct xhci_bus_state *bus_state;
@@ -935,11 +939,10 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
wIndex = port->hcd_portnum;
if ((portsc & PORT_RESET) || !(portsc & PORT_PE)) {
- *status = 0xffffffff;
return -EINVAL;
}
/* did port event handler already start resume timing? */
- if (!bus_state->resume_done[wIndex]) {
+ if (!port->resume_timestamp) {
/* If not, maybe we are in a host initated resume? */
if (test_bit(wIndex, &bus_state->resuming_ports)) {
/* Host initated resume doesn't time the resume
@@ -956,28 +959,29 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(wIndex, &bus_state->resuming_ports);
- bus_state->resume_done[wIndex] = timeout;
+ port->resume_timestamp = timeout;
mod_timer(&hcd->rh_timer, timeout);
usb_hcd_start_port_resume(&hcd->self, wIndex);
}
/* Has resume been signalled for USB_RESUME_TIME yet? */
- } else if (time_after_eq(jiffies, bus_state->resume_done[wIndex])) {
+ } else if (time_after_eq(jiffies, port->resume_timestamp)) {
int time_left;
xhci_dbg(xhci, "resume USB2 port %d-%d\n",
hcd->self.busnum, wIndex + 1);
- bus_state->resume_done[wIndex] = 0;
+ port->resume_timestamp = 0;
clear_bit(wIndex, &bus_state->resuming_ports);
- set_bit(wIndex, &bus_state->rexit_ports);
+ reinit_completion(&port->rexit_done);
+ port->rexit_active = true;
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
xhci_set_link_state(xhci, port, XDEV_U0);
spin_unlock_irqrestore(&xhci->lock, *flags);
time_left = wait_for_completion_timeout(
- &bus_state->rexit_done[wIndex],
+ &port->rexit_done,
msecs_to_jiffies(XHCI_MAX_REXIT_TIMEOUT_MS));
spin_lock_irqsave(&xhci->lock, *flags);
@@ -986,7 +990,6 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
wIndex + 1);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
- *status = 0xffffffff;
return -ENODEV;
}
xhci_ring_device(xhci, slot_id);
@@ -995,22 +998,19 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
xhci_warn(xhci, "Port resume timed out, port %d-%d: 0x%x\n",
hcd->self.busnum, wIndex + 1, port_status);
- *status |= USB_PORT_STAT_SUSPEND;
- clear_bit(wIndex, &bus_state->rexit_ports);
+ /*
+ * keep rexit_active set if U0 transition failed so we
+ * know to report PORT_STAT_SUSPEND status back to
+ * usbcore. It will be cleared later once the port is
+ * out of RESUME/U3 state
+ */
}
usb_hcd_end_port_resume(&hcd->self, wIndex);
bus_state->port_c_suspend |= 1 << wIndex;
bus_state->suspended_ports &= ~(1 << wIndex);
- } else {
- /*
- * The resume has been signaling for less than
- * USB_RESUME_TIME. Report the port status as SUSPEND,
- * let the usbcore check port status again and clear
- * resume signaling later.
- */
- *status |= USB_PORT_STAT_SUSPEND;
}
+
return 0;
}
@@ -1087,7 +1087,7 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
struct xhci_bus_state *bus_state;
u32 link_state;
u32 portnum;
- int ret;
+ int err;
bus_state = &port->rhub->bus_state;
link_state = portsc & PORT_PLS_MASK;
@@ -1103,22 +1103,34 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
if (link_state == XDEV_U2)
*status |= USB_PORT_STAT_L1;
if (link_state == XDEV_U0) {
- if (bus_state->resume_done[portnum])
- usb_hcd_end_port_resume(&port->rhub->hcd->self,
- portnum);
- bus_state->resume_done[portnum] = 0;
- clear_bit(portnum, &bus_state->resuming_ports);
if (bus_state->suspended_ports & (1 << portnum)) {
bus_state->suspended_ports &= ~(1 << portnum);
bus_state->port_c_suspend |= 1 << portnum;
}
}
if (link_state == XDEV_RESUME) {
- ret = xhci_handle_usb2_port_link_resume(port, status,
- portsc, flags);
- if (ret)
- return;
+ err = xhci_handle_usb2_port_link_resume(port, portsc,
+ flags);
+ if (err < 0)
+ *status = 0xffffffff;
+ else if (port->resume_timestamp || port->rexit_active)
+ *status |= USB_PORT_STAT_SUSPEND;
+ }
+ }
+
+ /*
+ * Clear usb2 resume signalling variables if port is no longer suspended
+ * or resuming. Port either resumed to U0/U1/U2, disconnected, or in a
+ * error state. Resume related variables should be cleared in all those cases.
+ */
+ if (link_state != XDEV_U3 && link_state != XDEV_RESUME) {
+ if (port->resume_timestamp ||
+ test_bit(portnum, &bus_state->resuming_ports)) {
+ port->resume_timestamp = 0;
+ clear_bit(portnum, &bus_state->resuming_ports);
+ usb_hcd_end_port_resume(&port->rhub->hcd->self, portnum);
}
+ port->rexit_active = 0;
}
}
@@ -1174,18 +1186,6 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
else
xhci_get_usb2_port_status(port, &status, raw_port_status,
flags);
- /*
- * Clear stale usb2 resume signalling variables in case port changed
- * state during resume signalling. For example on error
- */
- if ((bus_state->resume_done[wIndex] ||
- test_bit(wIndex, &bus_state->resuming_ports)) &&
- (raw_port_status & PORT_PLS_MASK) != XDEV_U3 &&
- (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
- bus_state->resume_done[wIndex] = 0;
- clear_bit(wIndex, &bus_state->resuming_ports);
- usb_hcd_end_port_resume(&hcd->self, wIndex);
- }
if (bus_state->port_c_suspend & (1 << wIndex))
status |= USB_PORT_STAT_C_SUSPEND << 16;
@@ -1209,11 +1209,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 test_mode = 0;
struct xhci_hub *rhub;
struct xhci_port **ports;
+ struct xhci_port *port;
+ int portnum1;
rhub = xhci_get_rhub(hcd);
ports = rhub->ports;
max_ports = rhub->num_ports;
bus_state = &rhub->bus_state;
+ portnum1 = wIndex & 0xff;
spin_lock_irqsave(&xhci->lock, flags);
switch (typeReq) {
@@ -1247,10 +1250,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
spin_unlock_irqrestore(&xhci->lock, flags);
return retval;
case GetPortStatus:
- if (!wIndex || wIndex > max_ports)
+ if (!portnum1 || portnum1 > max_ports)
goto error;
+
wIndex--;
- temp = readl(ports[wIndex]->addr);
+ port = ports[portnum1 - 1];
+ temp = readl(port->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1263,7 +1268,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
xhci_dbg(xhci, "Get port status %d-%d read: 0x%x, return 0x%x",
- hcd->self.busnum, wIndex + 1, temp, status);
+ hcd->self.busnum, portnum1, temp, status);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
/* if USB 3.1 extended port status return additional 4 bytes */
@@ -1275,7 +1280,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
retval = -EINVAL;
break;
}
- port_li = readl(ports[wIndex]->addr + PORTLI);
+ port_li = readl(port->addr + PORTLI);
status = xhci_get_ext_port_status(temp, port_li);
put_unaligned_le32(status, &buf[4]);
}
@@ -1289,11 +1294,14 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
test_mode = (wIndex & 0xff00) >> 8;
/* The MSB of wIndex is the U1/U2 timeout */
timeout = (wIndex & 0xff00) >> 8;
+
wIndex &= 0xff;
- if (!wIndex || wIndex > max_ports)
+ if (!portnum1 || portnum1 > max_ports)
goto error;
+
+ port = ports[portnum1 - 1];
wIndex--;
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1303,11 +1311,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* FIXME: What new port features do we need to support? */
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
if ((temp & PORT_PLS_MASK) != XDEV_U0) {
/* Resume the port to U0 first */
- xhci_set_link_state(xhci, ports[wIndex],
- XDEV_U0);
+ xhci_set_link_state(xhci, port, XDEV_U0);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10);
spin_lock_irqsave(&xhci->lock, flags);
@@ -1316,16 +1323,16 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* a port unless the port reports that it is in the
* enabled (PED = ‘1’,PLS < ‘3’) state.
*/
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET)
|| (temp & PORT_PLS_MASK) >= XDEV_U3) {
xhci_warn(xhci, "USB core suspending port %d-%d not in U0/U1/U2\n",
- hcd->self.busnum, wIndex + 1);
+ hcd->self.busnum, portnum1);
goto error;
}
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
+ portnum1);
if (!slot_id) {
xhci_warn(xhci, "slot_id is zero\n");
goto error;
@@ -1335,21 +1342,21 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
- xhci_set_link_state(xhci, ports[wIndex], XDEV_U3);
+ xhci_set_link_state(xhci, port, XDEV_U3);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(10); /* wait device to enter */
spin_lock_irqsave(&xhci->lock, flags);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
bus_state->suspended_ports |= 1 << wIndex;
break;
case USB_PORT_FEAT_LINK_STATE:
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
/* Disable port */
if (link_state == USB_SS_PORT_LS_SS_DISABLED) {
xhci_dbg(xhci, "Disable port %d-%d\n",
- hcd->self.busnum, wIndex + 1);
+ hcd->self.busnum, portnum1);
temp = xhci_port_state_to_neutral(temp);
/*
* Clear all change bits, so that we get a new
@@ -1358,18 +1365,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp |= PORT_CSC | PORT_PEC | PORT_WRC |
PORT_OCC | PORT_RC | PORT_PLC |
PORT_CEC;
- writel(temp | PORT_PE, ports[wIndex]->addr);
- temp = readl(ports[wIndex]->addr);
+ writel(temp | PORT_PE, port->addr);
+ temp = readl(port->addr);
break;
}
/* Put link in RxDetect (enable port) */
if (link_state == USB_SS_PORT_LS_RX_DETECT) {
xhci_dbg(xhci, "Enable port %d-%d\n",
- hcd->self.busnum, wIndex + 1);
- xhci_set_link_state(xhci, ports[wIndex],
- link_state);
- temp = readl(ports[wIndex]->addr);
+ hcd->self.busnum, portnum1);
+ xhci_set_link_state(xhci, port, link_state);
+ temp = readl(port->addr);
break;
}
@@ -1399,11 +1405,10 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
xhci_dbg(xhci, "Enable compliance mode transition for port %d-%d\n",
- hcd->self.busnum, wIndex + 1);
- xhci_set_link_state(xhci, ports[wIndex],
- link_state);
+ hcd->self.busnum, portnum1);
+ xhci_set_link_state(xhci, port, link_state);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
break;
}
/* Port must be enabled */
@@ -1414,8 +1419,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
/* Can't set port link state above '3' (U3) */
if (link_state > USB_SS_PORT_LS_U3) {
xhci_warn(xhci, "Cannot set port %d-%d link state %d\n",
- hcd->self.busnum, wIndex + 1,
- link_state);
+ hcd->self.busnum, portnum1, link_state);
goto error;
}
@@ -1437,30 +1441,29 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
pls == XDEV_RESUME ||
pls == XDEV_RECOVERY) {
wait_u0 = true;
- reinit_completion(&bus_state->u3exit_done[wIndex]);
+ reinit_completion(&port->u3exit_done);
}
if (pls <= XDEV_U3) /* U1, U2, U3 */
- xhci_set_link_state(xhci, ports[wIndex],
- USB_SS_PORT_LS_U0);
+ xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U0);
if (!wait_u0) {
if (pls > XDEV_U3)
goto error;
break;
}
spin_unlock_irqrestore(&xhci->lock, flags);
- if (!wait_for_completion_timeout(&bus_state->u3exit_done[wIndex],
+ if (!wait_for_completion_timeout(&port->u3exit_done,
msecs_to_jiffies(500)))
xhci_dbg(xhci, "missing U0 port change event for port %d-%d\n",
- hcd->self.busnum, wIndex + 1);
+ hcd->self.busnum, portnum1);
spin_lock_irqsave(&xhci->lock, flags);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
break;
}
if (link_state == USB_SS_PORT_LS_U3) {
int retries = 16;
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
+ portnum1);
if (slot_id) {
/* unlock to execute stop endpoint
* commands */
@@ -1469,16 +1472,16 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_stop_device(xhci, slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
}
- xhci_set_link_state(xhci, ports[wIndex], USB_SS_PORT_LS_U3);
+ xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U3);
spin_unlock_irqrestore(&xhci->lock, flags);
while (retries--) {
usleep_range(4000, 8000);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
if ((temp & PORT_PLS_MASK) == XDEV_U3)
break;
}
spin_lock_irqsave(&xhci->lock, flags);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
bus_state->suspended_ports |= 1 << wIndex;
}
break;
@@ -1489,43 +1492,42 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
* However, hub_wq will ignore the roothub events until
* the roothub is registered.
*/
- xhci_set_port_power(xhci, hcd, wIndex, true, &flags);
+ xhci_set_port_power(xhci, port, true, &flags);
break;
case USB_PORT_FEAT_RESET:
temp = (temp | PORT_RESET);
- writel(temp, ports[wIndex]->addr);
+ writel(temp, port->addr);
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
xhci_dbg(xhci, "set port reset, actual port %d-%d status = 0x%x\n",
- hcd->self.busnum, wIndex + 1, temp);
+ hcd->self.busnum, portnum1, temp);
break;
case USB_PORT_FEAT_REMOTE_WAKE_MASK:
- xhci_set_remote_wake_mask(xhci, ports[wIndex],
- wake_mask);
- temp = readl(ports[wIndex]->addr);
+ xhci_set_remote_wake_mask(xhci, port, wake_mask);
+ temp = readl(port->addr);
xhci_dbg(xhci, "set port remote wake mask, actual port %d-%d status = 0x%x\n",
- hcd->self.busnum, wIndex + 1, temp);
+ hcd->self.busnum, portnum1, temp);
break;
case USB_PORT_FEAT_BH_PORT_RESET:
temp |= PORT_WR;
- writel(temp, ports[wIndex]->addr);
- temp = readl(ports[wIndex]->addr);
+ writel(temp, port->addr);
+ temp = readl(port->addr);
break;
case USB_PORT_FEAT_U1_TIMEOUT:
if (hcd->speed < HCD_USB3)
goto error;
- temp = readl(ports[wIndex]->addr + PORTPMSC);
+ temp = readl(port->addr + PORTPMSC);
temp &= ~PORT_U1_TIMEOUT_MASK;
temp |= PORT_U1_TIMEOUT(timeout);
- writel(temp, ports[wIndex]->addr + PORTPMSC);
+ writel(temp, port->addr + PORTPMSC);
break;
case USB_PORT_FEAT_U2_TIMEOUT:
if (hcd->speed < HCD_USB3)
goto error;
- temp = readl(ports[wIndex]->addr + PORTPMSC);
+ temp = readl(port->addr + PORTPMSC);
temp &= ~PORT_U2_TIMEOUT_MASK;
temp |= PORT_U2_TIMEOUT(timeout);
- writel(temp, ports[wIndex]->addr + PORTPMSC);
+ writel(temp, port->addr + PORTPMSC);
break;
case USB_PORT_FEAT_TEST:
/* 4.19.6 Port Test Modes (USB2 Test Mode) */
@@ -1541,13 +1543,16 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
/* unblock any posted writes */
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
break;
case ClearPortFeature:
- if (!wIndex || wIndex > max_ports)
+ if (!portnum1 || portnum1 > max_ports)
goto error;
+
+ port = ports[portnum1 - 1];
+
wIndex--;
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
if (temp == ~(u32)0) {
xhci_hc_died(xhci);
retval = -ENODEV;
@@ -1557,7 +1562,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = xhci_port_state_to_neutral(temp);
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- temp = readl(ports[wIndex]->addr);
+ temp = readl(port->addr);
xhci_dbg(xhci, "clear USB_PORT_FEAT_SUSPEND\n");
xhci_dbg(xhci, "PORTSC %04x\n", temp);
if (temp & PORT_RESET)
@@ -1568,20 +1573,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
set_bit(wIndex, &bus_state->resuming_ports);
usb_hcd_start_port_resume(&hcd->self, wIndex);
- xhci_set_link_state(xhci, ports[wIndex],
- XDEV_RESUME);
+ xhci_set_link_state(xhci, port, XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(USB_RESUME_TIMEOUT);
spin_lock_irqsave(&xhci->lock, flags);
- xhci_set_link_state(xhci, ports[wIndex],
- XDEV_U0);
+ xhci_set_link_state(xhci, port, XDEV_U0);
clear_bit(wIndex, &bus_state->resuming_ports);
usb_hcd_end_port_resume(&hcd->self, wIndex);
}
bus_state->port_c_suspend |= 1 << wIndex;
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
+ portnum1);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto error;
@@ -1599,14 +1602,13 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_C_PORT_LINK_STATE:
case USB_PORT_FEAT_C_PORT_CONFIG_ERROR:
xhci_clear_port_change_bit(xhci, wValue, wIndex,
- ports[wIndex]->addr, temp);
+ port->addr, temp);
break;
case USB_PORT_FEAT_ENABLE:
- xhci_disable_port(hcd, xhci, wIndex,
- ports[wIndex]->addr, temp);
+ xhci_disable_port(xhci, port);
break;
case USB_PORT_FEAT_POWER:
- xhci_set_port_power(xhci, hcd, wIndex, false, &flags);
+ xhci_set_port_power(xhci, port, false, &flags);
break;
case USB_PORT_FEAT_TEST:
retval = xhci_exit_test_mode(xhci);
@@ -1623,6 +1625,7 @@ error:
spin_unlock_irqrestore(&xhci->lock, flags);
return retval;
}
+EXPORT_SYMBOL_GPL(xhci_hub_control);
/*
* Returns 0 if the status hasn't changed, or the number of bytes in buf.
@@ -1687,8 +1690,8 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
if ((temp & mask) != 0 ||
(bus_state->port_c_suspend & 1 << i) ||
- (bus_state->resume_done[i] && time_after_eq(
- jiffies, bus_state->resume_done[i]))) {
+ (ports[i]->resume_timestamp && time_after_eq(
+ jiffies, ports[i]->resume_timestamp))) {
buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
status = 1;
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 81ca2bc1f0be..d0a9467aa5fc 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1819,17 +1819,43 @@ int xhci_alloc_erst(struct xhci_hcd *xhci,
return 0;
}
-void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
+static void
+xhci_free_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
- size_t size;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ size_t erst_size;
+ u64 tmp64;
+ u32 tmp;
- size = sizeof(struct xhci_erst_entry) * (erst->num_entries);
- if (erst->entries)
- dma_free_coherent(dev, size,
- erst->entries,
- erst->erst_dma_addr);
- erst->entries = NULL;
+ if (!ir)
+ return;
+
+ erst_size = sizeof(struct xhci_erst_entry) * (ir->erst.num_entries);
+ if (ir->erst.entries)
+ dma_free_coherent(dev, erst_size,
+ ir->erst.entries,
+ ir->erst.erst_dma_addr);
+ ir->erst.entries = NULL;
+
+ /*
+ * Clean out interrupter registers except ERSTBA. Clearing either the
+ * low or high 32 bits of ERSTBA immediately causes the controller to
+ * dereference the partially cleared 64 bit address, causing IOMMU error.
+ */
+ tmp = readl(&ir->ir_set->erst_size);
+ tmp &= ERST_SIZE_MASK;
+ writel(tmp, &ir->ir_set->erst_size);
+
+ tmp64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ tmp64 &= (u64) ERST_PTR_MASK;
+ xhci_write_64(xhci, tmp64, &ir->ir_set->erst_dequeue);
+
+ /* free interrrupter event ring */
+ if (ir->event_ring)
+ xhci_ring_free(xhci, ir->event_ring);
+ ir->event_ring = NULL;
+
+ kfree(ir);
}
void xhci_mem_cleanup(struct xhci_hcd *xhci)
@@ -1839,12 +1865,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
cancel_delayed_work_sync(&xhci->cmd_timer);
- xhci_free_erst(xhci, &xhci->erst);
-
- if (xhci->event_ring)
- xhci_ring_free(xhci, xhci->event_ring);
- xhci->event_ring = NULL;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
+ xhci_free_interrupter(xhci, xhci->interrupter);
+ xhci->interrupter = NULL;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed primary event ring");
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
@@ -1929,176 +1952,18 @@ no_bw:
xhci->usb3_rhub.bus_state.bus_suspended = 0;
}
-static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
- struct xhci_segment *input_seg,
- union xhci_trb *start_trb,
- union xhci_trb *end_trb,
- dma_addr_t input_dma,
- struct xhci_segment *result_seg,
- char *test_name, int test_number)
-{
- unsigned long long start_dma;
- unsigned long long end_dma;
- struct xhci_segment *seg;
-
- start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
- end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
-
- seg = trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma, false);
- if (seg != result_seg) {
- xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
- test_name, test_number);
- xhci_warn(xhci, "Tested TRB math w/ seg %p and "
- "input DMA 0x%llx\n",
- input_seg,
- (unsigned long long) input_dma);
- xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
- "ending TRB %p (0x%llx DMA)\n",
- start_trb, start_dma,
- end_trb, end_dma);
- xhci_warn(xhci, "Expected seg %p, got seg %p\n",
- result_seg, seg);
- trb_in_td(xhci, input_seg, start_trb, end_trb, input_dma,
- true);
- return -1;
- }
- return 0;
-}
-
-/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
-static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
-{
- struct {
- dma_addr_t input_dma;
- struct xhci_segment *result_seg;
- } simple_test_vector [] = {
- /* A zeroed DMA field should fail */
- { 0, NULL },
- /* One TRB before the ring start should fail */
- { xhci->event_ring->first_seg->dma - 16, NULL },
- /* One byte before the ring start should fail */
- { xhci->event_ring->first_seg->dma - 1, NULL },
- /* Starting TRB should succeed */
- { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
- /* Ending TRB should succeed */
- { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
- xhci->event_ring->first_seg },
- /* One byte after the ring end should fail */
- { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
- /* One TRB after the ring end should fail */
- { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
- /* An address of all ones should fail */
- { (dma_addr_t) (~0), NULL },
- };
- struct {
- struct xhci_segment *input_seg;
- union xhci_trb *start_trb;
- union xhci_trb *end_trb;
- dma_addr_t input_dma;
- struct xhci_segment *result_seg;
- } complex_test_vector [] = {
- /* Test feeding a valid DMA address from a different ring */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = xhci->event_ring->first_seg->trbs,
- .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- .input_dma = xhci->cmd_ring->first_seg->dma,
- .result_seg = NULL,
- },
- /* Test feeding a valid end TRB from a different ring */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = xhci->event_ring->first_seg->trbs,
- .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- .input_dma = xhci->cmd_ring->first_seg->dma,
- .result_seg = NULL,
- },
- /* Test feeding a valid start and end TRB from a different ring */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = xhci->cmd_ring->first_seg->trbs,
- .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- .input_dma = xhci->cmd_ring->first_seg->dma,
- .result_seg = NULL,
- },
- /* TRB in this ring, but after this TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[0],
- .end_trb = &xhci->event_ring->first_seg->trbs[3],
- .input_dma = xhci->event_ring->first_seg->dma + 4*16,
- .result_seg = NULL,
- },
- /* TRB in this ring, but before this TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[3],
- .end_trb = &xhci->event_ring->first_seg->trbs[6],
- .input_dma = xhci->event_ring->first_seg->dma + 2*16,
- .result_seg = NULL,
- },
- /* TRB in this ring, but after this wrapped TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
- .end_trb = &xhci->event_ring->first_seg->trbs[1],
- .input_dma = xhci->event_ring->first_seg->dma + 2*16,
- .result_seg = NULL,
- },
- /* TRB in this ring, but before this wrapped TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
- .end_trb = &xhci->event_ring->first_seg->trbs[1],
- .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
- .result_seg = NULL,
- },
- /* TRB not in this ring, and we have a wrapped TD */
- { .input_seg = xhci->event_ring->first_seg,
- .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
- .end_trb = &xhci->event_ring->first_seg->trbs[1],
- .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
- .result_seg = NULL,
- },
- };
-
- unsigned int num_tests;
- int i, ret;
-
- num_tests = ARRAY_SIZE(simple_test_vector);
- for (i = 0; i < num_tests; i++) {
- ret = xhci_test_trb_in_td(xhci,
- xhci->event_ring->first_seg,
- xhci->event_ring->first_seg->trbs,
- &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
- simple_test_vector[i].input_dma,
- simple_test_vector[i].result_seg,
- "Simple", i);
- if (ret < 0)
- return ret;
- }
-
- num_tests = ARRAY_SIZE(complex_test_vector);
- for (i = 0; i < num_tests; i++) {
- ret = xhci_test_trb_in_td(xhci,
- complex_test_vector[i].input_seg,
- complex_test_vector[i].start_trb,
- complex_test_vector[i].end_trb,
- complex_test_vector[i].input_dma,
- complex_test_vector[i].result_seg,
- "Complex", i);
- if (ret < 0)
- return ret;
- }
- xhci_dbg(xhci, "TRB math tests passed.\n");
- return 0;
-}
-
-static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
+static void xhci_set_hc_event_deq(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
u64 temp;
dma_addr_t deq;
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
+ deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue);
if (!deq)
xhci_warn(xhci, "WARN something wrong with SW event ring "
"dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
- temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
@@ -2108,7 +1973,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
"// Write event ring dequeue pointer, "
"preserving EHB bit");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
- &xhci->ir_set->erst_dequeue);
+ &ir->ir_set->erst_dequeue);
}
static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
@@ -2289,6 +2154,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci->hw_ports[i].addr = &xhci->op_regs->port_status_base +
NUM_PORT_REGS * i;
xhci->hw_ports[i].hw_portnum = i;
+
+ init_completion(&xhci->hw_ports[i].rexit_done);
+ init_completion(&xhci->hw_ports[i].u3exit_done);
}
xhci->rh_bw = kcalloc_node(num_ports, sizeof(*xhci->rh_bw), flags,
@@ -2375,6 +2243,68 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
return 0;
}
+static struct xhci_interrupter *
+xhci_alloc_interrupter(struct xhci_hcd *xhci, unsigned int intr_num, gfp_t flags)
+{
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ struct xhci_interrupter *ir;
+ u64 erst_base;
+ u32 erst_size;
+ int ret;
+
+ if (intr_num > xhci->max_interrupters) {
+ xhci_warn(xhci, "Can't allocate interrupter %d, max interrupters %d\n",
+ intr_num, xhci->max_interrupters);
+ return NULL;
+ }
+
+ if (xhci->interrupter) {
+ xhci_warn(xhci, "Can't allocate already set up interrupter %d\n", intr_num);
+ return NULL;
+ }
+
+ ir = kzalloc_node(sizeof(*ir), flags, dev_to_node(dev));
+ if (!ir)
+ return NULL;
+
+ ir->ir_set = &xhci->run_regs->ir_set[intr_num];
+ ir->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
+ 0, flags);
+ if (!ir->event_ring) {
+ xhci_warn(xhci, "Failed to allocate interrupter %d event ring\n", intr_num);
+ goto fail_ir;
+ }
+
+ ret = xhci_alloc_erst(xhci, ir->event_ring, &ir->erst, flags);
+ if (ret) {
+ xhci_warn(xhci, "Failed to allocate interrupter %d erst\n", intr_num);
+ goto fail_ev;
+
+ }
+ /* set ERST count with the number of entries in the segment table */
+ erst_size = readl(&ir->ir_set->erst_size);
+ erst_size &= ERST_SIZE_MASK;
+ erst_size |= ERST_NUM_SEGS;
+ writel(erst_size, &ir->ir_set->erst_size);
+
+ erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
+ erst_base &= ERST_PTR_MASK;
+ erst_base |= (ir->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
+ xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
+
+ /* Set the event ring dequeue address of this interrupter */
+ xhci_set_hc_event_deq(xhci, ir);
+
+ return ir;
+
+fail_ev:
+ xhci_ring_free(xhci, ir->event_ring);
+fail_ir:
+ kfree(ir);
+
+ return NULL;
+}
+
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
{
dma_addr_t dma;
@@ -2382,7 +2312,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
unsigned int val, val2;
u64 val_64;
u32 page_size, temp;
- int i, ret;
+ int i;
INIT_LIST_HEAD(&xhci->cmd_list);
@@ -2495,48 +2425,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
" from cap regs base addr", val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
/* Set ir_set to interrupt register set 0 */
- xhci->ir_set = &xhci->run_regs->ir_set[0];
-
- /*
- * Event ring setup: Allocate a normal ring, but also setup
- * the event ring segment table (ERST). Section 4.9.3.
- */
- xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
- xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
- 0, flags);
- if (!xhci->event_ring)
- goto fail;
- if (xhci_check_trb_in_td_math(xhci) < 0)
- goto fail;
-
- ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
- if (ret)
- goto fail;
-
- /* set ERST count with the number of entries in the segment table */
- val = readl(&xhci->ir_set->erst_size);
- val &= ERST_SIZE_MASK;
- val |= ERST_NUM_SEGS;
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Write ERST size = %i to ir_set 0 (some bits preserved)",
- val);
- writel(val, &xhci->ir_set->erst_size);
+ /* allocate and set up primary interrupter with an event ring. */
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set ERST entries to point to event ring.");
- /* set the segment table base address */
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set ERST base address for ir_set 0 = 0x%llx",
- (unsigned long long)xhci->erst.erst_dma_addr);
- val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
- val_64 &= ERST_PTR_MASK;
- val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
- xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
-
- /* Set the event ring dequeue address */
- xhci_set_hc_event_deq(xhci);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "Wrote ERST address to ir_set 0.");
+ "Allocating primary event ring");
+ xhci->interrupter = xhci_alloc_interrupter(xhci, 0, flags);
+ if (!xhci->interrupter)
+ goto fail;
xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
@@ -2547,13 +2442,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
*/
for (i = 0; i < MAX_HC_SLOTS; i++)
xhci->devs[i] = NULL;
- for (i = 0; i < USB_MAXCHILDREN; i++) {
- xhci->usb2_rhub.bus_state.resume_done[i] = 0;
- xhci->usb3_rhub.bus_state.resume_done[i] = 0;
- /* Only the USB 2.0 completions will ever be used. */
- init_completion(&xhci->usb2_rhub.bus_state.rexit_done[i]);
- init_completion(&xhci->usb3_rhub.bus_state.u3exit_done[i]);
- }
if (scratchpad_alloc(xhci, flags))
goto fail;
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 60651a50770f..87f1597a0e5a 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -32,7 +32,7 @@ static void xhci_mvebu_mbus_config(void __iomem *base,
/* Program each DRAM CS in a seperate window */
for (win = 0; win < dram->num_cs; win++) {
- const struct mbus_dram_window *cs = dram->cs + win;
+ const struct mbus_dram_window *cs = &dram->cs[win];
writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 5fb55bf19493..b9f9625467d6 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -19,11 +19,11 @@
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/usb/of.h>
+#include <linux/reset.h>
#include "xhci.h"
#include "xhci-plat.h"
#include "xhci-mvebu.h"
-#include "xhci-rcar.h"
static struct hc_driver __read_mostly xhci_plat_hc_driver;
@@ -114,14 +114,6 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
.init_quirk = xhci_mvebu_a3700_init_quirk,
};
-static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = {
- SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V1)
-};
-
-static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
- SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V3)
-};
-
static const struct xhci_plat_priv xhci_plat_brcm = {
.quirks = XHCI_RESET_ON_RESUME | XHCI_SUSPEND_RESUME_CLKS,
};
@@ -141,27 +133,6 @@ static const struct of_device_id usb_xhci_of_match[] = {
.compatible = "marvell,armada3700-xhci",
.data = &xhci_plat_marvell_armada3700,
}, {
- .compatible = "renesas,xhci-r8a7790",
- .data = &xhci_plat_renesas_rcar_gen2,
- }, {
- .compatible = "renesas,xhci-r8a7791",
- .data = &xhci_plat_renesas_rcar_gen2,
- }, {
- .compatible = "renesas,xhci-r8a7793",
- .data = &xhci_plat_renesas_rcar_gen2,
- }, {
- .compatible = "renesas,xhci-r8a7795",
- .data = &xhci_plat_renesas_rcar_gen3,
- }, {
- .compatible = "renesas,xhci-r8a7796",
- .data = &xhci_plat_renesas_rcar_gen3,
- }, {
- .compatible = "renesas,rcar-gen2-xhci",
- .data = &xhci_plat_renesas_rcar_gen2,
- }, {
- .compatible = "renesas,rcar-gen3-xhci",
- .data = &xhci_plat_renesas_rcar_gen3,
- }, {
.compatible = "brcm,xhci-brcm-v2",
.data = &xhci_plat_brcm,
}, {
@@ -173,11 +144,10 @@ static const struct of_device_id usb_xhci_of_match[] = {
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
#endif
-static int xhci_plat_probe(struct platform_device *pdev)
+int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const struct xhci_plat_priv *priv_match)
{
- const struct xhci_plat_priv *priv_match;
const struct hc_driver *driver;
- struct device *sysdev, *tmpdev;
+ struct device *tmpdev;
struct xhci_hcd *xhci;
struct resource *res;
struct usb_hcd *hcd, *usb3_hcd;
@@ -195,31 +165,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
- /*
- * sysdev must point to a device that is known to the system firmware
- * or PCI hardware. We handle these three cases here:
- * 1. xhci_plat comes from firmware
- * 2. xhci_plat is child of a device from firmware (dwc3-plat)
- * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
- */
- for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
- if (is_of_node(sysdev->fwnode) ||
- is_acpi_device_node(sysdev->fwnode))
- break;
-#ifdef CONFIG_PCI
- else if (sysdev->bus == &pci_bus_type)
- break;
-#endif
- }
-
if (!sysdev)
sysdev = &pdev->dev;
- if (WARN_ON(!sysdev->dma_mask))
- /* Platform did not initialize dma_mask */
- ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
- else
- ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
if (ret)
return ret;
@@ -257,25 +206,30 @@ static int xhci_plat_probe(struct platform_device *pdev)
goto put_hcd;
}
- ret = clk_prepare_enable(xhci->reg_clk);
- if (ret)
- goto put_hcd;
-
xhci->clk = devm_clk_get_optional(&pdev->dev, NULL);
if (IS_ERR(xhci->clk)) {
ret = PTR_ERR(xhci->clk);
- goto disable_reg_clk;
+ goto put_hcd;
}
+ xhci->reset = devm_reset_control_array_get_optional_shared(&pdev->dev);
+ if (IS_ERR(xhci->reset)) {
+ ret = PTR_ERR(xhci->reset);
+ goto put_hcd;
+ }
+
+ ret = reset_control_deassert(xhci->reset);
+ if (ret)
+ goto put_hcd;
+
+ ret = clk_prepare_enable(xhci->reg_clk);
+ if (ret)
+ goto err_reset;
+
ret = clk_prepare_enable(xhci->clk);
if (ret)
goto disable_reg_clk;
- if (pdev->dev.of_node)
- priv_match = of_device_get_match_data(&pdev->dev);
- else
- priv_match = dev_get_platdata(&pdev->dev);
-
if (priv_match) {
priv = hcd_to_xhci_priv(hcd);
/* Just copy data for now */
@@ -377,6 +331,9 @@ disable_clk:
disable_reg_clk:
clk_disable_unprepare(xhci->reg_clk);
+err_reset:
+ reset_control_assert(xhci->reset);
+
put_hcd:
usb_put_hcd(hcd);
@@ -386,8 +343,50 @@ disable_runtime:
return ret;
}
+EXPORT_SYMBOL_GPL(xhci_plat_probe);
+
+static int xhci_generic_plat_probe(struct platform_device *pdev)
+{
+ const struct xhci_plat_priv *priv_match;
+ struct device *sysdev;
+ int ret;
+
+ /*
+ * sysdev must point to a device that is known to the system firmware
+ * or PCI hardware. We handle these three cases here:
+ * 1. xhci_plat comes from firmware
+ * 2. xhci_plat is child of a device from firmware (dwc3-plat)
+ * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
+ */
+ for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
+ if (is_of_node(sysdev->fwnode) ||
+ is_acpi_device_node(sysdev->fwnode))
+ break;
+#ifdef CONFIG_PCI
+ else if (sysdev->bus == &pci_bus_type)
+ break;
+#endif
+ }
+
+ if (!sysdev)
+ sysdev = &pdev->dev;
+
+ if (WARN_ON(!sysdev->dma_mask)) {
+ /* Platform did not initialize dma_mask */
+ ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+ }
+
+ if (pdev->dev.of_node)
+ priv_match = of_device_get_match_data(&pdev->dev);
+ else
+ priv_match = dev_get_platdata(&pdev->dev);
+
+ return xhci_plat_probe(pdev, sysdev, priv_match);
+}
-static int xhci_plat_remove(struct platform_device *dev)
+int xhci_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -412,6 +411,7 @@ static int xhci_plat_remove(struct platform_device *dev)
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
+ reset_control_assert(xhci->reset);
usb_put_hcd(hcd);
pm_runtime_disable(&dev->dev);
@@ -420,6 +420,7 @@ static int xhci_plat_remove(struct platform_device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(xhci_plat_remove);
static int __maybe_unused xhci_plat_suspend(struct device *dev)
{
@@ -496,13 +497,14 @@ static int __maybe_unused xhci_plat_runtime_resume(struct device *dev)
return xhci_resume(xhci, 0);
}
-static const struct dev_pm_ops xhci_plat_pm_ops = {
+const struct dev_pm_ops xhci_plat_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend,
xhci_plat_runtime_resume,
NULL)
};
+EXPORT_SYMBOL_GPL(xhci_plat_pm_ops);
#ifdef CONFIG_ACPI
static const struct acpi_device_id usb_xhci_acpi_match[] = {
@@ -513,8 +515,8 @@ static const struct acpi_device_id usb_xhci_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
#endif
-static struct platform_driver usb_xhci_driver = {
- .probe = xhci_plat_probe,
+static struct platform_driver usb_generic_xhci_driver = {
+ .probe = xhci_generic_plat_probe,
.remove = xhci_plat_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
@@ -529,13 +531,13 @@ MODULE_ALIAS("platform:xhci-hcd");
static int __init xhci_plat_init(void)
{
xhci_init_driver(&xhci_plat_hc_driver, &xhci_plat_overrides);
- return platform_driver_register(&usb_xhci_driver);
+ return platform_driver_register(&usb_generic_xhci_driver);
}
module_init(xhci_plat_init);
static void __exit xhci_plat_exit(void)
{
- platform_driver_unregister(&usb_xhci_driver);
+ platform_driver_unregister(&usb_generic_xhci_driver);
}
module_exit(xhci_plat_exit);
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 1fb149d1fbce..83b5b5aa9f8e 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -21,4 +21,11 @@ struct xhci_plat_priv {
#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv)
#define xhci_to_priv(x) ((struct xhci_plat_priv *)(x)->priv)
+
+int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev,
+ const struct xhci_plat_priv *priv_match);
+
+int xhci_plat_remove(struct platform_device *dev);
+extern const struct dev_pm_ops xhci_plat_pm_ops;
+
#endif /* _XHCI_PLAT_H */
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index aef0258a7160..7f18509a1d39 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -10,12 +10,17 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/usb/phy.h>
#include <linux/sys_soc.h>
#include "xhci.h"
#include "xhci-plat.h"
-#include "xhci-rcar.h"
+#include "xhci-rzv2m.h"
+
+#define XHCI_RCAR_FIRMWARE_NAME_V1 "r8a779x_usb3_v1.dlmem"
+#define XHCI_RCAR_FIRMWARE_NAME_V2 "r8a779x_usb3_v2.dlmem"
+#define XHCI_RCAR_FIRMWARE_NAME_V3 "r8a779x_usb3_v3.dlmem"
/*
* - The V3 firmware is for almost all R-Car Gen3 (except r8a7795 ES1.x)
@@ -108,7 +113,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
}
-void xhci_rcar_start(struct usb_hcd *hcd)
+static void xhci_rcar_start(struct usb_hcd *hcd)
{
u32 temp;
@@ -203,7 +208,7 @@ static bool xhci_rcar_wait_for_pll_active(struct usb_hcd *hcd)
}
/* This function needs to initialize a "phy" of usb before */
-int xhci_rcar_init_quirk(struct usb_hcd *hcd)
+static int xhci_rcar_init_quirk(struct usb_hcd *hcd)
{
/* If hcd->regs is NULL, we don't just call the following function */
if (!hcd->regs)
@@ -215,7 +220,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
return xhci_rcar_download_firmware(hcd);
}
-int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
+static int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
{
int ret;
@@ -225,3 +230,92 @@ int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
return ret;
}
+
+/*
+ * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
+ * to 1. However, these SoCs don't support 64-bit address memory
+ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+ * xhci_gen_setup() by using the XHCI_NO_64BIT_SUPPORT quirk.
+ *
+ * And, since the firmware/internal CPU control the USBSTS.STS_HALT
+ * and the process speed is down when the roothub port enters U3,
+ * long delay for the handshake of STS_HALT is neeed in xhci_suspend()
+ * by using the XHCI_SLOW_SUSPEND quirk.
+ */
+#define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware) \
+ .firmware_name = firmware, \
+ .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | \
+ XHCI_SLOW_SUSPEND, \
+ .init_quirk = xhci_rcar_init_quirk, \
+ .plat_start = xhci_rcar_start, \
+ .resume_quirk = xhci_rcar_resume_quirk,
+
+static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen2 = {
+ SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V1)
+};
+
+static const struct xhci_plat_priv xhci_plat_renesas_rcar_gen3 = {
+ SET_XHCI_PLAT_PRIV_FOR_RCAR(XHCI_RCAR_FIRMWARE_NAME_V3)
+};
+
+static const struct xhci_plat_priv xhci_plat_renesas_rzv2m = {
+ .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH |
+ XHCI_SLOW_SUSPEND,
+ .init_quirk = xhci_rzv2m_init_quirk,
+ .plat_start = xhci_rzv2m_start,
+};
+
+static const struct of_device_id usb_xhci_of_match[] = {
+ {
+ .compatible = "renesas,xhci-r8a7790",
+ .data = &xhci_plat_renesas_rcar_gen2,
+ }, {
+ .compatible = "renesas,xhci-r8a7791",
+ .data = &xhci_plat_renesas_rcar_gen2,
+ }, {
+ .compatible = "renesas,xhci-r8a7793",
+ .data = &xhci_plat_renesas_rcar_gen2,
+ }, {
+ .compatible = "renesas,xhci-r8a7795",
+ .data = &xhci_plat_renesas_rcar_gen3,
+ }, {
+ .compatible = "renesas,xhci-r8a7796",
+ .data = &xhci_plat_renesas_rcar_gen3,
+ }, {
+ .compatible = "renesas,rcar-gen2-xhci",
+ .data = &xhci_plat_renesas_rcar_gen2,
+ }, {
+ .compatible = "renesas,rcar-gen3-xhci",
+ .data = &xhci_plat_renesas_rcar_gen3,
+ }, {
+ .compatible = "renesas,rzv2m-xhci",
+ .data = &xhci_plat_renesas_rzv2m,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
+
+static int xhci_renesas_probe(struct platform_device *pdev)
+{
+ const struct xhci_plat_priv *priv_match;
+
+ priv_match = of_device_get_match_data(&pdev->dev);
+
+ return xhci_plat_probe(pdev, NULL, priv_match);
+}
+
+static struct platform_driver usb_xhci_renesas_driver = {
+ .probe = xhci_renesas_probe,
+ .remove = xhci_plat_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+ .driver = {
+ .name = "xhci-renesas-hcd",
+ .pm = &xhci_plat_pm_ops,
+ .of_match_table = of_match_ptr(usb_xhci_of_match),
+ },
+};
+module_platform_driver(usb_xhci_renesas_driver);
+
+MODULE_DESCRIPTION("xHCI Platform Host Controller Driver for Renesas R-Car and RZ");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/xhci-rcar.h b/drivers/usb/host/xhci-rcar.h
deleted file mode 100644
index 048ad3b8a6c7..000000000000
--- a/drivers/usb/host/xhci-rcar.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * drivers/usb/host/xhci-rcar.h
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- */
-
-#ifndef _XHCI_RCAR_H
-#define _XHCI_RCAR_H
-
-#define XHCI_RCAR_FIRMWARE_NAME_V1 "r8a779x_usb3_v1.dlmem"
-#define XHCI_RCAR_FIRMWARE_NAME_V2 "r8a779x_usb3_v2.dlmem"
-#define XHCI_RCAR_FIRMWARE_NAME_V3 "r8a779x_usb3_v3.dlmem"
-
-#if IS_ENABLED(CONFIG_USB_XHCI_RCAR)
-void xhci_rcar_start(struct usb_hcd *hcd);
-int xhci_rcar_init_quirk(struct usb_hcd *hcd);
-int xhci_rcar_resume_quirk(struct usb_hcd *hcd);
-#else
-static inline void xhci_rcar_start(struct usb_hcd *hcd)
-{
-}
-
-static inline int xhci_rcar_init_quirk(struct usb_hcd *hcd)
-{
- return 0;
-}
-
-static inline int xhci_rcar_resume_quirk(struct usb_hcd *hcd)
-{
- return 0;
-}
-#endif
-
-/*
- * On R-Car Gen2 and Gen3, the AC64 bit (bit 0) of HCCPARAMS1 is set
- * to 1. However, these SoCs don't support 64-bit address memory
- * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
- * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
- * xhci_gen_setup() by using the XHCI_NO_64BIT_SUPPORT quirk.
- *
- * And, since the firmware/internal CPU control the USBSTS.STS_HALT
- * and the process speed is down when the roothub port enters U3,
- * long delay for the handshake of STS_HALT is neeed in xhci_suspend()
- * by using the XHCI_SLOW_SUSPEND quirk.
- */
-#define SET_XHCI_PLAT_PRIV_FOR_RCAR(firmware) \
- .firmware_name = firmware, \
- .quirks = XHCI_NO_64BIT_SUPPORT | XHCI_TRUST_TX_LENGTH | \
- XHCI_SLOW_SUSPEND, \
- .init_quirk = xhci_rcar_init_quirk, \
- .plat_start = xhci_rcar_start, \
- .resume_quirk = xhci_rcar_resume_quirk,
-
-#endif /* _XHCI_RCAR_H */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f5b0e1ce22af..eb788c60c1c0 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1833,7 +1833,8 @@ static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci)
}
static void handle_port_status(struct xhci_hcd *xhci,
- union xhci_trb *event)
+ struct xhci_interrupter *ir,
+ union xhci_trb *event)
{
struct usb_hcd *hcd;
u32 port_id;
@@ -1856,7 +1857,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
if ((port_id <= 0) || (port_id > max_ports)) {
xhci_warn(xhci, "Port change event with invalid port ID %d\n",
port_id);
- inc_deq(xhci, xhci->event_ring);
+ inc_deq(xhci, ir->event_ring);
return;
}
@@ -1923,7 +1924,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
goto cleanup;
} else if (!test_bit(hcd_portnum, &bus_state->resuming_ports)) {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
- bus_state->resume_done[hcd_portnum] = jiffies +
+ port->resume_timestamp = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(hcd_portnum, &bus_state->resuming_ports);
/* Do the rest in GetPortStatus after resume time delay.
@@ -1932,7 +1933,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
*/
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
mod_timer(&hcd->rh_timer,
- bus_state->resume_done[hcd_portnum]);
+ port->resume_timestamp);
usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
bogus_port_status = true;
}
@@ -1944,7 +1945,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
(portsc & PORT_PLS_MASK) == XDEV_U1 ||
(portsc & PORT_PLS_MASK) == XDEV_U2)) {
xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
- complete(&bus_state->u3exit_done[hcd_portnum]);
+ complete(&port->u3exit_done);
/* We've just brought the device into U0/1/2 through either the
* Resume state after a device remote wakeup, or through the
* U3Exit state after a host-initiated resume. If it's a device
@@ -1969,10 +1970,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
* RExit to a disconnect state). If so, let the driver know it's
* out of the RExit state.
*/
- if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 &&
- test_and_clear_bit(hcd_portnum,
- &bus_state->rexit_ports)) {
- complete(&bus_state->rexit_done[hcd_portnum]);
+ if (hcd->speed < HCD_USB3 && port->rexit_active) {
+ complete(&port->rexit_done);
+ port->rexit_active = false;
bogus_port_status = true;
goto cleanup;
}
@@ -1986,7 +1986,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
cleanup:
/* Update event ring dequeue pointer before dropping the lock */
- inc_deq(xhci, xhci->event_ring);
+ inc_deq(xhci, ir->event_ring);
/* Don't make the USB core poll the roothub if we got a bad port status
* change event. Besides, at that point we can't tell which roothub
@@ -2519,7 +2519,8 @@ finish_td:
* At this point, the host controller is probably hosed and should be reset.
*/
static int handle_tx_event(struct xhci_hcd *xhci,
- struct xhci_transfer_event *event)
+ struct xhci_interrupter *ir,
+ struct xhci_transfer_event *event)
{
struct xhci_virt_ep *ep;
struct xhci_ring *ep_ring;
@@ -2531,7 +2532,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
union xhci_trb *ep_trb;
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
- struct list_head *tmp;
u32 trb_comp_code;
int td_num = 0;
bool handling_skipped_tds = false;
@@ -2585,10 +2585,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
}
/* Count current td numbers if ep->skip is set */
- if (ep->skip) {
- list_for_each(tmp, &ep_ring->td_list)
- td_num++;
- }
+ if (ep->skip)
+ td_num += list_count_nodes(&ep_ring->td_list);
/* Look for common error cases */
switch (trb_comp_code) {
@@ -2871,7 +2869,7 @@ cleanup:
* processing missed tds.
*/
if (!handling_skipped_tds)
- inc_deq(xhci, xhci->event_ring);
+ inc_deq(xhci, ir->event_ring);
/*
* If ep->skip is set, it means there are missed tds on the
@@ -2886,8 +2884,8 @@ cleanup:
err_out:
xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
(unsigned long long) xhci_trb_virt_to_dma(
- xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue),
+ ir->event_ring->deq_seg,
+ ir->event_ring->dequeue),
lower_32_bits(le64_to_cpu(event->buffer)),
upper_32_bits(le64_to_cpu(event->buffer)),
le32_to_cpu(event->transfer_len),
@@ -2901,7 +2899,7 @@ err_out:
* Returns >0 for "possibly more events to process" (caller should call again),
* otherwise 0 if done. In future, <0 returns should indicate error code.
*/
-static int xhci_handle_event(struct xhci_hcd *xhci)
+static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
{
union xhci_trb *event;
int update_ptrs = 1;
@@ -2909,18 +2907,18 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
int ret;
/* Event ring hasn't been allocated yet. */
- if (!xhci->event_ring || !xhci->event_ring->dequeue) {
- xhci_err(xhci, "ERROR event ring not ready\n");
+ if (!ir || !ir->event_ring || !ir->event_ring->dequeue) {
+ xhci_err(xhci, "ERROR interrupter not ready\n");
return -ENOMEM;
}
- event = xhci->event_ring->dequeue;
+ event = ir->event_ring->dequeue;
/* Does the HC or OS own the TRB? */
if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
- xhci->event_ring->cycle_state)
+ ir->event_ring->cycle_state)
return 0;
- trace_xhci_handle_event(xhci->event_ring, &event->generic);
+ trace_xhci_handle_event(ir->event_ring, &event->generic);
/*
* Barrier between reading the TRB_CYCLE (valid) flag above and any
@@ -2935,11 +2933,11 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
handle_cmd_completion(xhci, &event->event_cmd);
break;
case TRB_PORT_STATUS:
- handle_port_status(xhci, event);
+ handle_port_status(xhci, ir, event);
update_ptrs = 0;
break;
case TRB_TRANSFER:
- ret = handle_tx_event(xhci, &event->trans_event);
+ ret = handle_tx_event(xhci, ir, &event->trans_event);
if (ret >= 0)
update_ptrs = 0;
break;
@@ -2963,7 +2961,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
if (update_ptrs)
/* Update SW event ring dequeue pointer */
- inc_deq(xhci, xhci->event_ring);
+ inc_deq(xhci, ir->event_ring);
/* Are there more items on the event ring? Caller will call us again to
* check.
@@ -2977,16 +2975,17 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
* - To avoid "Event Ring Full Error" condition
*/
static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
- union xhci_trb *event_ring_deq)
+ struct xhci_interrupter *ir,
+ union xhci_trb *event_ring_deq)
{
u64 temp_64;
dma_addr_t deq;
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
- if (event_ring_deq != xhci->event_ring->dequeue) {
- deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
- xhci->event_ring->dequeue);
+ if (event_ring_deq != ir->event_ring->dequeue) {
+ deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue);
if (deq == 0)
xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
/*
@@ -3004,7 +3003,7 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
/* Clear the event handler busy flag (RW1C) */
temp_64 |= ERST_EHB;
- xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
}
/*
@@ -3016,6 +3015,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
union xhci_trb *event_ring_deq;
+ struct xhci_interrupter *ir;
irqreturn_t ret = IRQ_NONE;
u64 temp_64;
u32 status;
@@ -3053,11 +3053,13 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
status |= STS_EINT;
writel(status, &xhci->op_regs->status);
+ /* This is the handler of the primary interrupter */
+ ir = xhci->interrupter;
if (!hcd->msi_enabled) {
u32 irq_pending;
- irq_pending = readl(&xhci->ir_set->irq_pending);
+ irq_pending = readl(&ir->ir_set->irq_pending);
irq_pending |= IMAN_IP;
- writel(irq_pending, &xhci->ir_set->irq_pending);
+ writel(irq_pending, &ir->ir_set->irq_pending);
}
if (xhci->xhc_state & XHCI_STATE_DYING ||
@@ -3067,22 +3069,22 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
/* Clear the event handler busy flag (RW1C);
* the event ring should be empty.
*/
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
xhci_write_64(xhci, temp_64 | ERST_EHB,
- &xhci->ir_set->erst_dequeue);
+ &ir->ir_set->erst_dequeue);
ret = IRQ_HANDLED;
goto out;
}
- event_ring_deq = xhci->event_ring->dequeue;
+ event_ring_deq = ir->event_ring->dequeue;
/* FIXME this should be a delayed service routine
* that clears the EHB.
*/
- while (xhci_handle_event(xhci) > 0) {
+ while (xhci_handle_event(xhci, ir) > 0) {
if (event_loop++ < TRBS_PER_SEGMENT / 2)
continue;
- xhci_update_erst_dequeue(xhci, event_ring_deq);
- event_ring_deq = xhci->event_ring->dequeue;
+ xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
+ event_ring_deq = ir->event_ring->dequeue;
/* ring is half-full, force isoc trbs to interrupt more often */
if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
@@ -3091,7 +3093,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
event_loop = 0;
}
- xhci_update_erst_dequeue(xhci, event_ring_deq);
+ xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
ret = IRQ_HANDLED;
out:
diff --git a/drivers/usb/host/xhci-rzv2m.c b/drivers/usb/host/xhci-rzv2m.c
new file mode 100644
index 000000000000..ec65b24eafa8
--- /dev/null
+++ b/drivers/usb/host/xhci-rzv2m.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xHCI host controller driver for RZ/V2M
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ */
+
+#include <linux/usb/rzv2m_usb3drd.h>
+#include "xhci-plat.h"
+#include "xhci-rzv2m.h"
+
+#define RZV2M_USB3_INTEN 0x1044 /* Interrupt Enable */
+
+#define RZV2M_USB3_INT_XHC_ENA BIT(0)
+#define RZV2M_USB3_INT_HSE_ENA BIT(2)
+#define RZV2M_USB3_INT_ENA_VAL (RZV2M_USB3_INT_XHC_ENA \
+ | RZV2M_USB3_INT_HSE_ENA)
+
+int xhci_rzv2m_init_quirk(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+
+ rzv2m_usb3drd_reset(dev->parent, true);
+
+ return 0;
+}
+
+void xhci_rzv2m_start(struct usb_hcd *hcd)
+{
+ u32 int_en;
+
+ if (hcd->regs) {
+ /* Interrupt Enable */
+ int_en = readl(hcd->regs + RZV2M_USB3_INTEN);
+ int_en |= RZV2M_USB3_INT_ENA_VAL;
+ writel(int_en, hcd->regs + RZV2M_USB3_INTEN);
+ }
+}
diff --git a/drivers/usb/host/xhci-rzv2m.h b/drivers/usb/host/xhci-rzv2m.h
new file mode 100644
index 000000000000..12448b0e8d5b
--- /dev/null
+++ b/drivers/usb/host/xhci-rzv2m.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __XHCI_RZV2M_H
+#define __XHCI_RZV2M_H
+
+#if IS_ENABLED(CONFIG_USB_XHCI_RZV2M)
+void xhci_rzv2m_start(struct usb_hcd *hcd);
+int xhci_rzv2m_init_quirk(struct usb_hcd *hcd);
+#else
+static inline void xhci_rzv2m_start(struct usb_hcd *hcd) {}
+static inline int xhci_rzv2m_init_quirk(struct usb_hcd *hcd)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif /* __XHCI_RZV2M_H */
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index bdb776553826..1ff22f675930 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -44,6 +44,9 @@
#define XUSB_CFG_4 0x010
#define XUSB_BASE_ADDR_SHIFT 15
#define XUSB_BASE_ADDR_MASK 0x1ffff
+#define XUSB_CFG_7 0x01c
+#define XUSB_BASE2_ADDR_SHIFT 16
+#define XUSB_BASE2_ADDR_MASK 0xffff
#define XUSB_CFG_16 0x040
#define XUSB_CFG_24 0x060
#define XUSB_CFG_AXI_CFG 0x0f8
@@ -75,6 +78,20 @@
#define MBOX_SMI_INTR_FW_HANG BIT(1)
#define MBOX_SMI_INTR_EN BIT(3)
+/* BAR2 registers */
+#define XUSB_BAR2_ARU_MBOX_CMD 0x004
+#define XUSB_BAR2_ARU_MBOX_DATA_IN 0x008
+#define XUSB_BAR2_ARU_MBOX_DATA_OUT 0x00c
+#define XUSB_BAR2_ARU_MBOX_OWNER 0x010
+#define XUSB_BAR2_ARU_SMI_INTR 0x014
+#define XUSB_BAR2_ARU_SMI_ARU_FW_SCRATCH_DATA0 0x01c
+#define XUSB_BAR2_ARU_IFRDMA_CFG0 0x0e0
+#define XUSB_BAR2_ARU_IFRDMA_CFG1 0x0e4
+#define XUSB_BAR2_ARU_IFRDMA_STREAMID_FIELD 0x0e8
+#define XUSB_BAR2_ARU_C11_CSBRANGE 0x9c
+#define XUSB_BAR2_ARU_FW_SCRATCH 0x1000
+#define XUSB_BAR2_CSB_BASE_ADDR 0x2000
+
/* IPFS registers */
#define IPFS_XUSB_HOST_MSI_BAR_SZ_0 0x0c0
#define IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0 0x0c4
@@ -111,6 +128,9 @@
#define IMFILLRNG1_TAG_HI_SHIFT 16
#define XUSB_FALC_IMFILLCTL 0x158
+/* CSB ARU registers */
+#define XUSB_CSB_ARU_SCRATCH0 0x100100
+
/* MP CSB registers */
#define XUSB_CSB_MP_ILOAD_ATTR 0x101a00
#define XUSB_CSB_MP_ILOAD_BASE_LO 0x101a04
@@ -131,6 +151,9 @@
#define IMEM_BLOCK_SIZE 256
+#define FW_IOCTL_TYPE_SHIFT 24
+#define FW_IOCTL_CFGTBL_READ 17
+
struct tegra_xusb_fw_header {
__le32 boot_loadaddr_in_imem;
__le32 boot_codedfi_offset;
@@ -175,6 +198,7 @@ struct tegra_xusb_mbox_regs {
u16 data_in;
u16 data_out;
u16 owner;
+ u16 smi_intr;
};
struct tegra_xusb_context_soc {
@@ -189,6 +213,14 @@ struct tegra_xusb_context_soc {
} fpci;
};
+struct tegra_xusb;
+struct tegra_xusb_soc_ops {
+ u32 (*mbox_reg_readl)(struct tegra_xusb *tegra, unsigned int offset);
+ void (*mbox_reg_writel)(struct tegra_xusb *tegra, u32 value, unsigned int offset);
+ u32 (*csb_reg_readl)(struct tegra_xusb *tegra, unsigned int offset);
+ void (*csb_reg_writel)(struct tegra_xusb *tegra, u32 value, unsigned int offset);
+};
+
struct tegra_xusb_soc {
const char *firmware;
const char * const *supply_names;
@@ -205,11 +237,14 @@ struct tegra_xusb_soc {
} ports;
struct tegra_xusb_mbox_regs mbox;
+ const struct tegra_xusb_soc_ops *ops;
bool scale_ss_clock;
bool has_ipfs;
bool lpm_support;
bool otg_reset_sspi;
+
+ bool has_bar2;
};
struct tegra_xusb_context {
@@ -230,6 +265,8 @@ struct tegra_xusb {
void __iomem *ipfs_base;
void __iomem *fpci_base;
+ void __iomem *bar2_base;
+ struct resource *bar2;
const struct tegra_xusb_soc *soc;
@@ -274,6 +311,7 @@ struct tegra_xusb {
bool suspended;
struct tegra_xusb_context context;
+ u8 lp0_utmi_pad_mask;
};
static struct hc_driver __read_mostly tegra_xhci_hc_driver;
@@ -300,8 +338,34 @@ static inline void ipfs_writel(struct tegra_xusb *tegra, u32 value,
writel(value, tegra->ipfs_base + offset);
}
+static inline u32 bar2_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+ return readl(tegra->bar2_base + offset);
+}
+
+static inline void bar2_writel(struct tegra_xusb *tegra, u32 value,
+ unsigned int offset)
+{
+ writel(value, tegra->bar2_base + offset);
+}
+
static u32 csb_readl(struct tegra_xusb *tegra, unsigned int offset)
{
+ const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
+
+ return ops->csb_reg_readl(tegra, offset);
+}
+
+static void csb_writel(struct tegra_xusb *tegra, u32 value,
+ unsigned int offset)
+{
+ const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
+
+ ops->csb_reg_writel(tegra, value, offset);
+}
+
+static u32 fpci_csb_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
u32 page = CSB_PAGE_SELECT(offset);
u32 ofs = CSB_PAGE_OFFSET(offset);
@@ -310,8 +374,8 @@ static u32 csb_readl(struct tegra_xusb *tegra, unsigned int offset)
return fpci_readl(tegra, XUSB_CFG_CSB_BASE_ADDR + ofs);
}
-static void csb_writel(struct tegra_xusb *tegra, u32 value,
- unsigned int offset)
+static void fpci_csb_writel(struct tegra_xusb *tegra, u32 value,
+ unsigned int offset)
{
u32 page = CSB_PAGE_SELECT(offset);
u32 ofs = CSB_PAGE_OFFSET(offset);
@@ -320,6 +384,26 @@ static void csb_writel(struct tegra_xusb *tegra, u32 value,
fpci_writel(tegra, value, XUSB_CFG_CSB_BASE_ADDR + ofs);
}
+static u32 bar2_csb_readl(struct tegra_xusb *tegra, unsigned int offset)
+{
+ u32 page = CSB_PAGE_SELECT(offset);
+ u32 ofs = CSB_PAGE_OFFSET(offset);
+
+ bar2_writel(tegra, page, XUSB_BAR2_ARU_C11_CSBRANGE);
+
+ return bar2_readl(tegra, XUSB_BAR2_CSB_BASE_ADDR + ofs);
+}
+
+static void bar2_csb_writel(struct tegra_xusb *tegra, u32 value,
+ unsigned int offset)
+{
+ u32 page = CSB_PAGE_SELECT(offset);
+ u32 ofs = CSB_PAGE_OFFSET(offset);
+
+ bar2_writel(tegra, page, XUSB_BAR2_ARU_C11_CSBRANGE);
+ bar2_writel(tegra, value, XUSB_BAR2_CSB_BASE_ADDR + ofs);
+}
+
static int tegra_xusb_set_ss_clk(struct tegra_xusb *tegra,
unsigned long rate)
{
@@ -451,6 +535,7 @@ static bool tegra_xusb_mbox_cmd_requires_ack(enum tegra_xusb_mbox_cmd cmd)
static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
const struct tegra_xusb_mbox_msg *msg)
{
+ const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
bool wait_for_idle = false;
u32 value;
@@ -459,15 +544,15 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
* ACK/NAK messages.
*/
if (!(msg->cmd == MBOX_CMD_ACK || msg->cmd == MBOX_CMD_NAK)) {
- value = fpci_readl(tegra, tegra->soc->mbox.owner);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE) {
dev_err(tegra->dev, "mailbox is busy\n");
return -EBUSY;
}
- fpci_writel(tegra, MBOX_OWNER_SW, tegra->soc->mbox.owner);
+ ops->mbox_reg_writel(tegra, MBOX_OWNER_SW, tegra->soc->mbox.owner);
- value = fpci_readl(tegra, tegra->soc->mbox.owner);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_SW) {
dev_err(tegra->dev, "failed to acquire mailbox\n");
return -EBUSY;
@@ -477,17 +562,17 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
value = tegra_xusb_mbox_pack(msg);
- fpci_writel(tegra, value, tegra->soc->mbox.data_in);
+ ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.data_in);
- value = fpci_readl(tegra, tegra->soc->mbox.cmd);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.cmd);
value |= MBOX_INT_EN | MBOX_DEST_FALC;
- fpci_writel(tegra, value, tegra->soc->mbox.cmd);
+ ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.cmd);
if (wait_for_idle) {
unsigned long timeout = jiffies + msecs_to_jiffies(250);
while (time_before(jiffies, timeout)) {
- value = fpci_readl(tegra, tegra->soc->mbox.owner);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value == MBOX_OWNER_NONE)
break;
@@ -495,7 +580,7 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
}
if (time_after(jiffies, timeout))
- value = fpci_readl(tegra, tegra->soc->mbox.owner);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.owner);
if (value != MBOX_OWNER_NONE)
return -ETIMEDOUT;
@@ -507,11 +592,12 @@ static int tegra_xusb_mbox_send(struct tegra_xusb *tegra,
static irqreturn_t tegra_xusb_mbox_irq(int irq, void *data)
{
struct tegra_xusb *tegra = data;
+ const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
u32 value;
/* clear mailbox interrupts */
- value = fpci_readl(tegra, XUSB_CFG_ARU_SMI_INTR);
- fpci_writel(tegra, value, XUSB_CFG_ARU_SMI_INTR);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.smi_intr);
+ ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.smi_intr);
if (value & MBOX_SMI_INTR_FW_HANG)
dev_err(tegra->dev, "controller firmware hang\n");
@@ -664,6 +750,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
{
struct tegra_xusb *tegra = data;
+ const struct tegra_xusb_soc_ops *ops = tegra->soc->ops;
struct tegra_xusb_mbox_msg msg;
u32 value;
@@ -672,16 +759,16 @@ static irqreturn_t tegra_xusb_mbox_thread(int irq, void *data)
if (pm_runtime_suspended(tegra->dev) || tegra->suspended)
goto out;
- value = fpci_readl(tegra, tegra->soc->mbox.data_out);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.data_out);
tegra_xusb_mbox_unpack(&msg, value);
- value = fpci_readl(tegra, tegra->soc->mbox.cmd);
+ value = ops->mbox_reg_readl(tegra, tegra->soc->mbox.cmd);
value &= ~MBOX_DEST_SMI;
- fpci_writel(tegra, value, tegra->soc->mbox.cmd);
+ ops->mbox_reg_writel(tegra, value, tegra->soc->mbox.cmd);
/* clear mailbox owner if no ACK/NAK is required */
if (!tegra_xusb_mbox_cmd_requires_ack(msg.cmd))
- fpci_writel(tegra, MBOX_OWNER_NONE, tegra->soc->mbox.owner);
+ ops->mbox_reg_writel(tegra, MBOX_OWNER_NONE, tegra->soc->mbox.owner);
tegra_xusb_mbox_handle(tegra, &msg);
@@ -709,6 +796,15 @@ static void tegra_xusb_config(struct tegra_xusb *tegra)
value |= regs & (XUSB_BASE_ADDR_MASK << XUSB_BASE_ADDR_SHIFT);
fpci_writel(tegra, value, XUSB_CFG_4);
+ /* Program BAR2 space */
+ if (tegra->bar2) {
+ value = fpci_readl(tegra, XUSB_CFG_7);
+ value &= ~(XUSB_BASE2_ADDR_MASK << XUSB_BASE2_ADDR_SHIFT);
+ value |= tegra->bar2->start &
+ (XUSB_BASE2_ADDR_MASK << XUSB_BASE2_ADDR_SHIFT);
+ fpci_writel(tegra, value, XUSB_CFG_7);
+ }
+
usleep_range(100, 200);
/* Enable bus master */
@@ -881,21 +977,36 @@ static int tegra_xusb_request_firmware(struct tegra_xusb *tegra)
return 0;
}
-static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+static int tegra_xusb_wait_for_falcon(struct tegra_xusb *tegra)
+{
+ struct xhci_cap_regs __iomem *cap_regs;
+ struct xhci_op_regs __iomem *op_regs;
+ int ret;
+ u32 value;
+
+ cap_regs = tegra->regs;
+ op_regs = tegra->regs + HC_LENGTH(readl(&cap_regs->hc_capbase));
+
+ ret = readl_poll_timeout(&op_regs->status, value, !(value & STS_CNR), 1000, 200000);
+
+ if (ret)
+ dev_err(tegra->dev, "XHCI Controller not ready. Falcon state: 0x%x\n",
+ csb_readl(tegra, XUSB_FALC_CPUCTL));
+
+ return ret;
+}
+
+static int tegra_xusb_load_firmware_rom(struct tegra_xusb *tegra)
{
unsigned int code_tag_blocks, code_size_blocks, code_blocks;
- struct xhci_cap_regs __iomem *cap = tegra->regs;
struct tegra_xusb_fw_header *header;
struct device *dev = tegra->dev;
- struct xhci_op_regs __iomem *op;
- unsigned long timeout;
time64_t timestamp;
u64 address;
u32 value;
int err;
header = (struct tegra_xusb_fw_header *)tegra->fw.virt;
- op = tegra->regs + HC_LENGTH(readl(&cap->hc_capbase));
if (csb_readl(tegra, XUSB_CSB_MP_ILOAD_BASE_LO) != 0) {
dev_info(dev, "Firmware already loaded, Falcon state %#x\n",
@@ -968,30 +1079,54 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
/* Boot Falcon CPU and wait for USBSTS_CNR to get cleared. */
csb_writel(tegra, CPUCTL_STARTCPU, XUSB_FALC_CPUCTL);
- timeout = jiffies + msecs_to_jiffies(200);
+ if (tegra_xusb_wait_for_falcon(tegra))
+ return -EIO;
- do {
- value = readl(&op->status);
- if ((value & STS_CNR) == 0)
- break;
+ timestamp = le32_to_cpu(header->fwimg_created_time);
+
+ dev_info(dev, "Firmware timestamp: %ptTs UTC\n", &timestamp);
+
+ return 0;
+}
+
+static u32 tegra_xusb_read_firmware_header(struct tegra_xusb *tegra, u32 offset)
+{
+ /*
+ * We only accept reading the firmware config table
+ * The offset should not exceed the fw header structure
+ */
+ if (offset >= sizeof(struct tegra_xusb_fw_header))
+ return 0;
- usleep_range(1000, 2000);
- } while (time_is_after_jiffies(timeout));
+ bar2_writel(tegra, (FW_IOCTL_CFGTBL_READ << FW_IOCTL_TYPE_SHIFT) | offset,
+ XUSB_BAR2_ARU_FW_SCRATCH);
+ return bar2_readl(tegra, XUSB_BAR2_ARU_SMI_ARU_FW_SCRATCH_DATA0);
+}
- value = readl(&op->status);
- if (value & STS_CNR) {
- value = csb_readl(tegra, XUSB_FALC_CPUCTL);
- dev_err(dev, "XHCI controller not read: %#010x\n", value);
+static int tegra_xusb_init_ifr_firmware(struct tegra_xusb *tegra)
+{
+ time64_t timestamp;
+
+ if (tegra_xusb_wait_for_falcon(tegra))
return -EIO;
- }
- timestamp = le32_to_cpu(header->fwimg_created_time);
+#define offsetof_32(X, Y) ((u8)(offsetof(X, Y) / sizeof(__le32)))
+ timestamp = tegra_xusb_read_firmware_header(tegra, offsetof_32(struct tegra_xusb_fw_header,
+ fwimg_created_time) << 2);
- dev_info(dev, "Firmware timestamp: %ptTs UTC\n", &timestamp);
+ dev_info(tegra->dev, "Firmware timestamp: %ptTs UTC\n", &timestamp);
return 0;
}
+static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
+{
+ if (!tegra->soc->firmware)
+ return tegra_xusb_init_ifr_firmware(tegra);
+ else
+ return tegra_xusb_load_firmware_rom(tegra);
+}
+
static void tegra_xusb_powerdomain_remove(struct device *dev,
struct tegra_xusb *tegra)
{
@@ -1435,6 +1570,10 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra->ipfs_base = devm_platform_ioremap_resource(pdev, 2);
if (IS_ERR(tegra->ipfs_base))
return PTR_ERR(tegra->ipfs_base);
+ } else if (tegra->soc->has_bar2) {
+ tegra->bar2_base = devm_platform_get_and_ioremap_resource(pdev, 2, &tegra->bar2);
+ if (IS_ERR(tegra->bar2_base))
+ return PTR_ERR(tegra->bar2_base);
}
tegra->xhci_irq = platform_get_irq(pdev, 0);
@@ -1651,10 +1790,13 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto disable_phy;
}
- err = tegra_xusb_request_firmware(tegra);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to request firmware: %d\n", err);
- goto disable_phy;
+ if (tegra->soc->firmware) {
+ err = tegra_xusb_request_firmware(tegra);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "failed to request firmware: %d\n", err);
+ goto disable_phy;
+ }
}
err = tegra_xusb_unpowergate_partitions(tegra);
@@ -1951,10 +2093,24 @@ static void tegra_xhci_disable_phy_wake(struct tegra_xusb *tegra)
struct tegra_xusb_padctl *padctl = tegra->padctl;
unsigned int i;
+ for (i = 0; i < tegra->num_usb_phys; i++) {
+ struct phy *phy = tegra_xusb_get_phy(tegra, "usb2", i);
+
+ if (!phy)
+ continue;
+
+ if (tegra_xusb_padctl_remote_wake_detected(padctl, phy))
+ tegra_phy_xusb_utmi_pad_power_on(phy);
+ }
+
for (i = 0; i < tegra->num_phys; i++) {
if (!tegra->phys[i])
continue;
+ if (tegra_xusb_padctl_remote_wake_detected(padctl, tegra->phys[i]))
+ dev_dbg(tegra->dev, "%pOF remote wake detected\n",
+ tegra->phys[i]->dev.of_node);
+
tegra_xusb_padctl_disable_phy_wake(padctl, tegra->phys[i]);
}
}
@@ -1972,6 +2128,28 @@ static void tegra_xhci_disable_phy_sleepwalk(struct tegra_xusb *tegra)
}
}
+static void tegra_xhci_program_utmi_power_lp0_exit(struct tegra_xusb *tegra)
+{
+ unsigned int i, index_to_usb2;
+ struct phy *phy;
+
+ for (i = 0; i < tegra->soc->num_types; i++) {
+ if (strcmp(tegra->soc->phy_types[i].name, "usb2") == 0)
+ index_to_usb2 = i;
+ }
+
+ for (i = 0; i < tegra->num_usb_phys; i++) {
+ if (!is_host_mode_phy(tegra, index_to_usb2, i))
+ continue;
+
+ phy = tegra_xusb_get_phy(tegra, "usb2", i);
+ if (tegra->lp0_utmi_pad_mask & BIT(i))
+ tegra_phy_xusb_utmi_pad_power_on(phy);
+ else
+ tegra_phy_xusb_utmi_pad_power_down(phy);
+ }
+}
+
static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
{
struct xhci_hcd *xhci = hcd_to_xhci(tegra->hcd);
@@ -1980,6 +2158,7 @@ static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
unsigned int i;
int err;
u32 usbcmd;
+ u32 portsc;
dev_dbg(dev, "entering ELPG\n");
@@ -1993,6 +2172,15 @@ static int tegra_xusb_enter_elpg(struct tegra_xusb *tegra, bool runtime)
goto out;
}
+ for (i = 0; i < tegra->num_usb_phys; i++) {
+ if (!xhci->usb2_rhub.ports[i])
+ continue;
+ portsc = readl(xhci->usb2_rhub.ports[i]->addr);
+ tegra->lp0_utmi_pad_mask &= ~BIT(i);
+ if (((portsc & PORT_PLS_MASK) == XDEV_U3) || ((portsc & DEV_SPEED_MASK) == XDEV_FS))
+ tegra->lp0_utmi_pad_mask |= BIT(i);
+ }
+
err = xhci_suspend(xhci, wakeup);
if (err < 0) {
dev_err(tegra->dev, "failed to suspend XHCI: %d\n", err);
@@ -2066,6 +2254,8 @@ static int tegra_xusb_exit_elpg(struct tegra_xusb *tegra, bool runtime)
phy_power_on(tegra->phys[i]);
}
+ if (tegra->suspended)
+ tegra_xhci_program_utmi_power_lp0_exit(tegra);
tegra_xusb_config(tegra);
tegra_xusb_restore_context(tegra);
@@ -2271,6 +2461,13 @@ static const struct tegra_xusb_context_soc tegra124_xusb_context = {
},
};
+static const struct tegra_xusb_soc_ops tegra124_ops = {
+ .mbox_reg_readl = &fpci_readl,
+ .mbox_reg_writel = &fpci_writel,
+ .csb_reg_readl = &fpci_csb_readl,
+ .csb_reg_writel = &fpci_csb_writel,
+};
+
static const struct tegra_xusb_soc tegra124_soc = {
.firmware = "nvidia/tegra124/xusb.bin",
.supply_names = tegra124_supply_names,
@@ -2286,11 +2483,13 @@ static const struct tegra_xusb_soc tegra124_soc = {
.scale_ss_clock = true,
.has_ipfs = true,
.otg_reset_sspi = false,
+ .ops = &tegra124_ops,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
.data_out = 0xec,
.owner = 0xf0,
+ .smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
};
MODULE_FIRMWARE("nvidia/tegra124/xusb.bin");
@@ -2322,11 +2521,13 @@ static const struct tegra_xusb_soc tegra210_soc = {
.scale_ss_clock = false,
.has_ipfs = true,
.otg_reset_sspi = true,
+ .ops = &tegra124_ops,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
.data_out = 0xec,
.owner = 0xf0,
+ .smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
};
MODULE_FIRMWARE("nvidia/tegra210/xusb.bin");
@@ -2363,11 +2564,13 @@ static const struct tegra_xusb_soc tegra186_soc = {
.scale_ss_clock = false,
.has_ipfs = false,
.otg_reset_sspi = false,
+ .ops = &tegra124_ops,
.mbox = {
.cmd = 0xe4,
.data_in = 0xe8,
.data_out = 0xec,
.owner = 0xf0,
+ .smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
.lpm_support = true,
};
@@ -2394,21 +2597,56 @@ static const struct tegra_xusb_soc tegra194_soc = {
.scale_ss_clock = false,
.has_ipfs = false,
.otg_reset_sspi = false,
+ .ops = &tegra124_ops,
.mbox = {
.cmd = 0x68,
.data_in = 0x6c,
.data_out = 0x70,
.owner = 0x74,
+ .smi_intr = XUSB_CFG_ARU_SMI_INTR,
},
.lpm_support = true,
};
MODULE_FIRMWARE("nvidia/tegra194/xusb.bin");
+static const struct tegra_xusb_soc_ops tegra234_ops = {
+ .mbox_reg_readl = &bar2_readl,
+ .mbox_reg_writel = &bar2_writel,
+ .csb_reg_readl = &bar2_csb_readl,
+ .csb_reg_writel = &bar2_csb_writel,
+};
+
+static const struct tegra_xusb_soc tegra234_soc = {
+ .supply_names = tegra194_supply_names,
+ .num_supplies = ARRAY_SIZE(tegra194_supply_names),
+ .phy_types = tegra194_phy_types,
+ .num_types = ARRAY_SIZE(tegra194_phy_types),
+ .context = &tegra186_xusb_context,
+ .ports = {
+ .usb3 = { .offset = 0, .count = 4, },
+ .usb2 = { .offset = 4, .count = 4, },
+ },
+ .scale_ss_clock = false,
+ .has_ipfs = false,
+ .otg_reset_sspi = false,
+ .ops = &tegra234_ops,
+ .mbox = {
+ .cmd = XUSB_BAR2_ARU_MBOX_CMD,
+ .data_in = XUSB_BAR2_ARU_MBOX_DATA_IN,
+ .data_out = XUSB_BAR2_ARU_MBOX_DATA_OUT,
+ .owner = XUSB_BAR2_ARU_MBOX_OWNER,
+ .smi_intr = XUSB_BAR2_ARU_SMI_INTR,
+ },
+ .lpm_support = true,
+ .has_bar2 = true,
+};
+
static const struct of_device_id tegra_xusb_of_match[] = {
{ .compatible = "nvidia,tegra124-xusb", .data = &tegra124_soc },
{ .compatible = "nvidia,tegra210-xusb", .data = &tegra210_soc },
{ .compatible = "nvidia,tegra186-xusb", .data = &tegra186_soc },
{ .compatible = "nvidia,tegra194-xusb", .data = &tegra194_soc },
+ { .compatible = "nvidia,tegra234-xusb", .data = &tegra234_soc },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_xusb_of_match);
@@ -2437,8 +2675,84 @@ static int tegra_xhci_setup(struct usb_hcd *hcd)
return xhci_gen_setup(hcd, tegra_xhci_quirks);
}
+static int tegra_xhci_hub_control(struct usb_hcd *hcd, u16 type_req, u16 value, u16 index,
+ char *buf, u16 length)
+{
+ struct tegra_xusb *tegra = dev_get_drvdata(hcd->self.controller);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_hub *rhub;
+ struct xhci_bus_state *bus_state;
+ int port = (index & 0xff) - 1;
+ unsigned int i;
+ struct xhci_port **ports;
+ u32 portsc;
+ int ret;
+ struct phy *phy;
+
+ rhub = &xhci->usb2_rhub;
+ bus_state = &rhub->bus_state;
+ if (bus_state->resuming_ports && hcd->speed == HCD_USB2) {
+ ports = rhub->ports;
+ i = rhub->num_ports;
+ while (i--) {
+ if (!test_bit(i, &bus_state->resuming_ports))
+ continue;
+ portsc = readl(ports[i]->addr);
+ if ((portsc & PORT_PLS_MASK) == XDEV_RESUME)
+ tegra_phy_xusb_utmi_pad_power_on(
+ tegra_xusb_get_phy(tegra, "usb2", (int) i));
+ }
+ }
+
+ if (hcd->speed == HCD_USB2) {
+ phy = tegra_xusb_get_phy(tegra, "usb2", port);
+ if ((type_req == ClearPortFeature) && (value == USB_PORT_FEAT_SUSPEND)) {
+ if (!index || index > rhub->num_ports)
+ return -EPIPE;
+ tegra_phy_xusb_utmi_pad_power_on(phy);
+ }
+ if ((type_req == SetPortFeature) && (value == USB_PORT_FEAT_RESET)) {
+ if (!index || index > rhub->num_ports)
+ return -EPIPE;
+ ports = rhub->ports;
+ portsc = readl(ports[port]->addr);
+ if (portsc & PORT_CONNECT)
+ tegra_phy_xusb_utmi_pad_power_on(phy);
+ }
+ }
+
+ ret = xhci_hub_control(hcd, type_req, value, index, buf, length);
+ if (ret < 0)
+ return ret;
+
+ if (hcd->speed == HCD_USB2) {
+ /* Use phy where we set previously */
+ if ((type_req == SetPortFeature) && (value == USB_PORT_FEAT_SUSPEND))
+ /* We don't suspend the PAD while HNP role swap happens on the OTG port */
+ if (!((hcd->self.otg_port == (port + 1)) && hcd->self.b_hnp_enable))
+ tegra_phy_xusb_utmi_pad_power_down(phy);
+
+ if ((type_req == ClearPortFeature) && (value == USB_PORT_FEAT_C_CONNECTION)) {
+ ports = rhub->ports;
+ portsc = readl(ports[port]->addr);
+ if (!(portsc & PORT_CONNECT)) {
+ /* We don't suspend the PAD while HNP role swap happens on the OTG
+ * port
+ */
+ if (!((hcd->self.otg_port == (port + 1)) && hcd->self.b_hnp_enable))
+ tegra_phy_xusb_utmi_pad_power_down(phy);
+ }
+ }
+ if ((type_req == SetPortFeature) && (value == USB_PORT_FEAT_TEST))
+ tegra_phy_xusb_utmi_pad_power_on(phy);
+ }
+
+ return ret;
+}
+
static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = {
.reset = tegra_xhci_setup,
+ .hub_control = tegra_xhci_hub_control,
};
static int __init tegra_xusb_init(void)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2b280beb0011..6183ce8574b1 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -292,6 +292,32 @@ static void xhci_zero_64b_regs(struct xhci_hcd *xhci)
xhci_info(xhci, "Fault detected\n");
}
+static int xhci_enable_interrupter(struct xhci_interrupter *ir)
+{
+ u32 iman;
+
+ if (!ir || !ir->ir_set)
+ return -EINVAL;
+
+ iman = readl(&ir->ir_set->irq_pending);
+ writel(ER_IRQ_ENABLE(iman), &ir->ir_set->irq_pending);
+
+ return 0;
+}
+
+static int xhci_disable_interrupter(struct xhci_interrupter *ir)
+{
+ u32 iman;
+
+ if (!ir || !ir->ir_set)
+ return -EINVAL;
+
+ iman = readl(&ir->ir_set->irq_pending);
+ writel(ER_IRQ_DISABLE(iman), &ir->ir_set->irq_pending);
+
+ return 0;
+}
+
#ifdef CONFIG_USB_PCI
/*
* Set up MSI
@@ -610,9 +636,9 @@ static int xhci_init(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-
static int xhci_run_finished(struct xhci_hcd *xhci)
{
+ struct xhci_interrupter *ir = xhci->interrupter;
unsigned long flags;
u32 temp;
@@ -628,8 +654,7 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
writel(temp, &xhci->op_regs->command);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter");
- temp = readl(&xhci->ir_set->irq_pending);
- writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending);
+ xhci_enable_interrupter(ir);
if (xhci_start(xhci)) {
xhci_halt(xhci);
@@ -665,7 +690,7 @@ int xhci_run(struct usb_hcd *hcd)
u64 temp_64;
int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
+ struct xhci_interrupter *ir = xhci->interrupter;
/* Start the xHCI host controller running only after the USB 2.0 roothub
* is setup.
*/
@@ -680,17 +705,17 @@ int xhci_run(struct usb_hcd *hcd)
if (ret)
return ret;
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set the interrupt modulation register");
- temp = readl(&xhci->ir_set->irq_control);
+ temp = readl(&ir->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK;
temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
- writel(temp, &xhci->ir_set->irq_control);
+ writel(temp, &ir->ir_set->irq_control);
if (xhci->quirks & XHCI_NEC_HOST) {
struct xhci_command *command;
@@ -733,6 +758,7 @@ static void xhci_stop(struct usb_hcd *hcd)
{
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_interrupter *ir = xhci->interrupter;
mutex_lock(&xhci->mutex);
@@ -769,8 +795,7 @@ static void xhci_stop(struct usb_hcd *hcd)
"// Disabling event ring interrupts");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
- temp = readl(&xhci->ir_set->irq_pending);
- writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
+ xhci_disable_interrupter(ir);
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
@@ -832,28 +857,36 @@ EXPORT_SYMBOL_GPL(xhci_shutdown);
#ifdef CONFIG_PM
static void xhci_save_registers(struct xhci_hcd *xhci)
{
+ struct xhci_interrupter *ir = xhci->interrupter;
+
xhci->s3.command = readl(&xhci->op_regs->command);
xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
- xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
- xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
- xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
- xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
+
+ if (!ir)
+ return;
+
+ ir->s3_erst_size = readl(&ir->ir_set->erst_size);
+ ir->s3_erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
+ ir->s3_erst_dequeue = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ ir->s3_irq_pending = readl(&ir->ir_set->irq_pending);
+ ir->s3_irq_control = readl(&ir->ir_set->irq_control);
}
static void xhci_restore_registers(struct xhci_hcd *xhci)
{
+ struct xhci_interrupter *ir = xhci->interrupter;
+
writel(xhci->s3.command, &xhci->op_regs->command);
writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
- writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
- xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
- xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
- writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
- writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
+ writel(ir->s3_erst_size, &ir->ir_set->erst_size);
+ xhci_write_64(xhci, ir->s3_erst_base, &ir->ir_set->erst_base);
+ xhci_write_64(xhci, ir->s3_erst_dequeue, &ir->ir_set->erst_dequeue);
+ writel(ir->s3_irq_pending, &ir->ir_set->irq_pending);
+ writel(ir->s3_irq_control, &ir->ir_set->irq_control);
}
static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
@@ -1218,8 +1251,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = readl(&xhci->op_regs->status);
writel((temp & ~0x1fff) | STS_EINT, &xhci->op_regs->status);
- temp = readl(&xhci->ir_set->irq_pending);
- writel(ER_IRQ_DISABLE(temp), &xhci->ir_set->irq_pending);
+ xhci_disable_interrupter(xhci->interrupter);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@@ -5334,6 +5366,11 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
if (xhci->hci_version > 0x100)
xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
+ /* xhci-plat or xhci-pci might have set max_interrupters already */
+ if ((!xhci->max_interrupters) ||
+ xhci->max_interrupters > HCS_MAX_INTRS(xhci->hcs_params1))
+ xhci->max_interrupters = HCS_MAX_INTRS(xhci->hcs_params1);
+
xhci->quirks |= quirks;
get_quirks(dev, xhci);
@@ -5518,6 +5555,8 @@ void xhci_init_driver(struct hc_driver *drv,
drv->reset_bandwidth = over->reset_bandwidth;
if (over->update_hub_device)
drv->update_hub_device = over->update_hub_device;
+ if (over->hub_control)
+ drv->hub_control = over->hub_control;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index dcee7f3207ad..786002bb35db 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -513,6 +513,9 @@ struct xhci_intr_reg {
/* Preserve bits 16:31 of erst_size */
#define ERST_SIZE_MASK (0xffff << 16)
+/* erst_base bitmasks */
+#define ERST_BASE_RSVDP (0x3f)
+
/* erst_dequeue bitmasks */
/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
* where the current dequeue pointer lies. This is an optional HW hint.
@@ -1684,11 +1687,6 @@ struct s3_save {
u32 dev_nt;
u64 dcbaa_ptr;
u32 config_reg;
- u32 irq_pending;
- u32 irq_control;
- u32 erst_size;
- u64 erst_base;
- u64 erst_dequeue;
};
/* Use for lpm */
@@ -1706,16 +1704,22 @@ struct xhci_bus_state {
u32 port_c_suspend;
u32 suspended_ports;
u32 port_remote_wakeup;
- unsigned long resume_done[USB_MAXCHILDREN];
/* which ports have started to resume */
unsigned long resuming_ports;
- /* Which ports are waiting on RExit to U0 transition. */
- unsigned long rexit_ports;
- struct completion rexit_done[USB_MAXCHILDREN];
- struct completion u3exit_done[USB_MAXCHILDREN];
};
-
+struct xhci_interrupter {
+ struct xhci_ring *event_ring;
+ struct xhci_erst erst;
+ struct xhci_intr_reg __iomem *ir_set;
+ unsigned int intr_num;
+ /* For interrupter registers save and restore over suspend/resume */
+ u32 s3_irq_pending;
+ u32 s3_irq_control;
+ u32 s3_erst_size;
+ u64 s3_erst_base;
+ u64 s3_erst_dequeue;
+};
/*
* It can take up to 20 ms to transition from RExit to U0 on the
* Intel Lynx Point LP xHCI host.
@@ -1736,6 +1740,10 @@ struct xhci_port {
struct xhci_hub *rhub;
struct xhci_port_cap *port_cap;
unsigned int lpm_incapable:1;
+ unsigned long resume_timestamp;
+ bool rexit_active;
+ struct completion rexit_done;
+ struct completion u3exit_done;
};
struct xhci_hub {
@@ -1758,8 +1766,6 @@ struct xhci_hcd {
struct xhci_op_regs __iomem *op_regs;
struct xhci_run_regs __iomem *run_regs;
struct xhci_doorbell_array __iomem *dba;
- /* Our HCD's current interrupter register set */
- struct xhci_intr_reg __iomem *ir_set;
/* Cached register copies of read-only HC data */
__u32 hcs_params1;
@@ -1774,7 +1780,7 @@ struct xhci_hcd {
u8 sbrn;
u16 hci_version;
u8 max_slots;
- u8 max_interrupters;
+ u16 max_interrupters;
u8 max_ports;
u8 isoc_threshold;
/* imod_interval in ns (I * 250ns) */
@@ -1794,6 +1800,7 @@ struct xhci_hcd {
struct reset_control *reset;
/* data structures */
struct xhci_device_context_array *dcbaa;
+ struct xhci_interrupter *interrupter;
struct xhci_ring *cmd_ring;
unsigned int cmd_ring_state;
#define CMD_RING_STATE_RUNNING (1 << 0)
@@ -1804,8 +1811,7 @@ struct xhci_hcd {
struct delayed_work cmd_timer;
struct completion cmd_ring_stop_completion;
struct xhci_command *current_cmd;
- struct xhci_ring *event_ring;
- struct xhci_erst erst;
+
/* Scratchpad */
struct xhci_scratchpad *scratchpad;
@@ -1946,6 +1952,8 @@ struct xhci_driver_overrides {
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
+ int (*hub_control)(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength);
};
#define XHCI_CFC_DELAY 10
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index 969c4c4f2ae9..5402e4b7267b 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -333,6 +333,7 @@ static struct platform_driver onboard_hub_driver = {
#define VENDOR_ID_MICROCHIP 0x0424
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_TI 0x0451
+#define VENDOR_ID_VIA 0x2109
/*
* Returns the onboard_hub platform device that is associated with the USB
@@ -407,6 +408,7 @@ static void onboard_hub_usbdev_disconnect(struct usb_device *udev)
static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
+ { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
{ USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
@@ -414,6 +416,8 @@ static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 */
+ { USB_DEVICE(VENDOR_ID_VIA, 0x0817) }, /* VIA VL817 3.1 */
+ { USB_DEVICE(VENDOR_ID_VIA, 0x2817) }, /* VIA VL817 2.0 */
{}
};
MODULE_DEVICE_TABLE(usb, onboard_hub_id_table);
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index 62129a6a1ba5..0a943a154649 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -26,15 +26,26 @@ static const struct onboard_hub_pdata genesys_gl850g_data = {
.reset_us = 3,
};
+static const struct onboard_hub_pdata genesys_gl852g_data = {
+ .reset_us = 50,
+};
+
+static const struct onboard_hub_pdata vialab_vl817_data = {
+ .reset_us = 10,
+};
+
static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb424,2514", .data = &microchip_usb424_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
{ .compatible = "usb5e3,608", .data = &genesys_gl850g_data, },
+ { .compatible = "usb5e3,610", .data = &genesys_gl852g_data, },
{ .compatible = "usbbda,411", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,5411", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,414", .data = &realtek_rts5411_data, },
{ .compatible = "usbbda,5414", .data = &realtek_rts5411_data, },
+ { .compatible = "usb2109,817", .data = &vialab_vl817_data, },
+ { .compatible = "usb2109,2817", .data = &vialab_vl817_data, },
{}
};
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 094e812e9e69..abb1cd35d8a6 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1272,8 +1272,7 @@ static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma)
if (vma->vm_flags & VM_WRITE)
return -EPERM;
- vma->vm_flags &= ~VM_MAYWRITE;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_MAYWRITE);
vma->vm_private_data = filp->private_data;
mon_bin_vma_open(vma);
return 0;
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index 80236e7b0895..c0264d5426bf 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -133,10 +133,9 @@ static int mtu3_ep_disable(struct mtu3_ep *mep)
{
struct mtu3 *mtu = mep->mtu;
- mtu3_qmu_stop(mep);
-
/* abort all pending requests */
nuke(mep, -ESHUTDOWN);
+ mtu3_qmu_stop(mep);
mtu3_deconfig_ep(mtu, mep);
mtu3_gpd_ring_free(mep);
diff --git a/drivers/usb/mtu3/mtu3_hw_regs.h b/drivers/usb/mtu3/mtu3_hw_regs.h
index 519a58301f45..ee30ae0a4b54 100644
--- a/drivers/usb/mtu3/mtu3_hw_regs.h
+++ b/drivers/usb/mtu3/mtu3_hw_regs.h
@@ -128,6 +128,7 @@
#define TX_FIFOEMPTY BIT(24)
#define TX_SENTSTALL BIT(22)
#define TX_SENDSTALL BIT(21)
+#define TX_FLUSHFIFO BIT(20)
#define TX_TXPKTRDY BIT(16)
#define TX_TXMAXPKTSZ_MSK GENMASK(10, 0)
#define TX_TXMAXPKTSZ(x) ((x) & TX_TXMAXPKTSZ_MSK)
diff --git a/drivers/usb/mtu3/mtu3_qmu.c b/drivers/usb/mtu3/mtu3_qmu.c
index 2ea3157ddb6e..a2fdab8b63b2 100644
--- a/drivers/usb/mtu3/mtu3_qmu.c
+++ b/drivers/usb/mtu3/mtu3_qmu.c
@@ -388,6 +388,9 @@ void mtu3_qmu_stop(struct mtu3_ep *mep)
}
mtu3_writel(mbase, qcsr, QMU_Q_STOP);
+ if (mep->is_in)
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO);
+
ret = readl_poll_timeout_atomic(mbase + qcsr, value,
!(value & QMU_Q_ACTIVE), 1, 1000);
if (ret) {
@@ -395,6 +398,10 @@ void mtu3_qmu_stop(struct mtu3_ep *mep)
return;
}
+ /* flush fifo again to make sure the fifo is empty */
+ if (mep->is_in)
+ mtu3_setbits(mbase, MU3D_EP_TXCR0(epnum), TX_FLUSHFIFO);
+
dev_dbg(mtu->dev, "%s's qmu stop now!\n", mep->name);
}
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index a4e55b0c52cf..d47e5c94587b 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -368,8 +368,10 @@ static int da8xx_musb_init(struct musb *musb)
/* Returns zero if e.g. not clocked */
rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
- if (!rev)
+ if (!rev) {
+ ret = -ENODEV;
goto fail;
+ }
musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(musb->xceiv)) {
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index cad991380b0c..27b9bd258340 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -294,7 +294,8 @@ static int mtk_musb_init(struct musb *musb)
err_phy_power_on:
phy_exit(glue->phy);
err_phy_init:
- mtk_otg_switch_exit(glue);
+ if (musb->port_mode == MUSB_OTG)
+ mtk_otg_switch_exit(glue);
return ret;
}
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index 7f9a999cd5ff..9b622cd9b2bd 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/phy/phy-sun4i-usb.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -67,6 +68,13 @@
#define SUNXI_MUSB_FL_NO_CONFIGDATA 7
#define SUNXI_MUSB_FL_PHY_MODE_PEND 8
+struct sunxi_musb_cfg {
+ const struct musb_hdrc_config *hdrc_config;
+ bool has_sram;
+ bool has_reset;
+ bool no_configdata;
+};
+
/* Our read/write methods need access and do not get passed in a musb ref :| */
static struct musb *sunxi_musb;
@@ -621,11 +629,10 @@ static const struct musb_platform_ops sunxi_musb_ops = {
.post_root_reset_end = sunxi_musb_post_root_reset_end,
};
-/* Allwinner OTG supports up to 5 endpoints */
-#define SUNXI_MUSB_MAX_EP_NUM 6
#define SUNXI_MUSB_RAM_BITS 11
-static struct musb_fifo_cfg sunxi_musb_mode_cfg[] = {
+/* Allwinner OTG supports up to 5 endpoints */
+static struct musb_fifo_cfg sunxi_musb_mode_cfg_5eps[] = {
MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
@@ -639,9 +646,7 @@ static struct musb_fifo_cfg sunxi_musb_mode_cfg[] = {
};
/* H3/V3s OTG supports only 4 endpoints */
-#define SUNXI_MUSB_MAX_EP_NUM_H3 5
-
-static struct musb_fifo_cfg sunxi_musb_mode_cfg_h3[] = {
+static struct musb_fifo_cfg sunxi_musb_mode_cfg_4eps[] = {
MUSB_EP_FIFO_SINGLE(1, FIFO_TX, 512),
MUSB_EP_FIFO_SINGLE(1, FIFO_RX, 512),
MUSB_EP_FIFO_SINGLE(2, FIFO_TX, 512),
@@ -652,31 +657,33 @@ static struct musb_fifo_cfg sunxi_musb_mode_cfg_h3[] = {
MUSB_EP_FIFO_SINGLE(4, FIFO_RX, 512),
};
-static const struct musb_hdrc_config sunxi_musb_hdrc_config = {
- .fifo_cfg = sunxi_musb_mode_cfg,
- .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg),
+static const struct musb_hdrc_config sunxi_musb_hdrc_config_5eps = {
+ .fifo_cfg = sunxi_musb_mode_cfg_5eps,
+ .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg_5eps),
.multipoint = true,
.dyn_fifo = true,
- .num_eps = SUNXI_MUSB_MAX_EP_NUM,
+ /* Two FIFOs per endpoint, plus ep_0. */
+ .num_eps = (ARRAY_SIZE(sunxi_musb_mode_cfg_5eps) / 2) + 1,
.ram_bits = SUNXI_MUSB_RAM_BITS,
};
-static struct musb_hdrc_config sunxi_musb_hdrc_config_h3 = {
- .fifo_cfg = sunxi_musb_mode_cfg_h3,
- .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg_h3),
+static const struct musb_hdrc_config sunxi_musb_hdrc_config_4eps = {
+ .fifo_cfg = sunxi_musb_mode_cfg_4eps,
+ .fifo_cfg_size = ARRAY_SIZE(sunxi_musb_mode_cfg_4eps),
.multipoint = true,
.dyn_fifo = true,
- .num_eps = SUNXI_MUSB_MAX_EP_NUM_H3,
+ /* Two FIFOs per endpoint, plus ep_0. */
+ .num_eps = (ARRAY_SIZE(sunxi_musb_mode_cfg_4eps) / 2) + 1,
.ram_bits = SUNXI_MUSB_RAM_BITS,
};
-
static int sunxi_musb_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data pdata;
struct platform_device_info pinfo;
struct sunxi_glue *glue;
struct device_node *np = pdev->dev.of_node;
+ const struct sunxi_musb_cfg *cfg;
int ret;
if (!np) {
@@ -713,26 +720,25 @@ static int sunxi_musb_probe(struct platform_device *pdev)
return -EINVAL;
}
pdata.platform_ops = &sunxi_musb_ops;
- if (!of_device_is_compatible(np, "allwinner,sun8i-h3-musb"))
- pdata.config = &sunxi_musb_hdrc_config;
- else
- pdata.config = &sunxi_musb_hdrc_config_h3;
+
+ cfg = of_device_get_match_data(&pdev->dev);
+ if (!cfg)
+ return -EINVAL;
+
+ pdata.config = cfg->hdrc_config;
glue->dev = &pdev->dev;
INIT_WORK(&glue->work, sunxi_musb_work);
glue->host_nb.notifier_call = sunxi_musb_host_notifier;
- if (of_device_is_compatible(np, "allwinner,sun4i-a10-musb"))
+ if (cfg->has_sram)
set_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags);
- if (of_device_is_compatible(np, "allwinner,sun6i-a31-musb"))
+ if (cfg->has_reset)
set_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags);
- if (of_device_is_compatible(np, "allwinner,sun8i-a33-musb") ||
- of_device_is_compatible(np, "allwinner,sun8i-h3-musb")) {
- set_bit(SUNXI_MUSB_FL_HAS_RESET, &glue->flags);
+ if (cfg->no_configdata)
set_bit(SUNXI_MUSB_FL_NO_CONFIGDATA, &glue->flags);
- }
glue->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(glue->clk)) {
@@ -810,11 +816,46 @@ static int sunxi_musb_remove(struct platform_device *pdev)
return 0;
}
+static const struct sunxi_musb_cfg sun4i_a10_musb_cfg = {
+ .hdrc_config = &sunxi_musb_hdrc_config_5eps,
+ .has_sram = true,
+};
+
+static const struct sunxi_musb_cfg sun6i_a31_musb_cfg = {
+ .hdrc_config = &sunxi_musb_hdrc_config_5eps,
+ .has_reset = true,
+};
+
+static const struct sunxi_musb_cfg sun8i_a33_musb_cfg = {
+ .hdrc_config = &sunxi_musb_hdrc_config_5eps,
+ .has_reset = true,
+ .no_configdata = true,
+};
+
+static const struct sunxi_musb_cfg sun8i_h3_musb_cfg = {
+ .hdrc_config = &sunxi_musb_hdrc_config_4eps,
+ .has_reset = true,
+ .no_configdata = true,
+};
+
+static const struct sunxi_musb_cfg suniv_f1c100s_musb_cfg = {
+ .hdrc_config = &sunxi_musb_hdrc_config_5eps,
+ .has_sram = true,
+ .has_reset = true,
+ .no_configdata = true,
+};
+
static const struct of_device_id sunxi_musb_match[] = {
- { .compatible = "allwinner,sun4i-a10-musb", },
- { .compatible = "allwinner,sun6i-a31-musb", },
- { .compatible = "allwinner,sun8i-a33-musb", },
- { .compatible = "allwinner,sun8i-h3-musb", },
+ { .compatible = "allwinner,sun4i-a10-musb",
+ .data = &sun4i_a10_musb_cfg, },
+ { .compatible = "allwinner,sun6i-a31-musb",
+ .data = &sun6i_a31_musb_cfg, },
+ { .compatible = "allwinner,sun8i-a33-musb",
+ .data = &sun8i_a33_musb_cfg, },
+ { .compatible = "allwinner,sun8i-h3-musb",
+ .data = &sun8i_h3_musb_cfg, },
+ { .compatible = "allwinner,suniv-f1c100s-musb",
+ .data = &suniv_f1c100s_musb_cfg, },
{}
};
MODULE_DEVICE_TABLE(of, sunxi_musb_match);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 915df5726a5c..5f629d7cad64 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -28,23 +28,6 @@ config FSL_USB2_OTG
help
Enable this to support Freescale USB OTG transceiver.
-config ISP1301_OMAP
- tristate "Philips ISP1301 with OMAP OTG"
- depends on I2C
- depends on ARCH_OMAP_OTG || (ARM && COMPILE_TEST)
- depends on USB
- depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
- select USB_PHY
- help
- If you say yes here you get support for the Philips ISP1301
- USB-On-The-Go transceiver working with the OMAP OTG controller.
- The ISP1301 is a full speed USB transceiver which is used in
- products including H2, H3, and H4 development boards for Texas
- Instruments OMAP processors.
-
- This driver can also be built as a module. If so, the module
- will be called phy-isp1301-omap.
-
config KEYSTONE_USB_PHY
tristate "Keystone USB PHY Driver"
depends on ARCH_KEYSTONE || COMPILE_TEST
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index df1d99010079..e5d619b4d8f6 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_OF) += of.o
obj-$(CONFIG_AB8500_USB) += phy-ab8500-usb.o
obj-$(CONFIG_FSL_USB2_OTG) += phy-fsl-usb.o
-obj-$(CONFIG_ISP1301_OMAP) += phy-isp1301-omap.o
obj-$(CONFIG_NOP_USB_XCEIV) += phy-generic.o
obj-$(CONFIG_TAHVO_USB) += phy-tahvo.o
obj-$(CONFIG_AM335X_CONTROL_USB) += phy-am335x-control.o
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
deleted file mode 100644
index 931610b76f3d..000000000000
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ /dev/null
@@ -1,1639 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * isp1301_omap - ISP 1301 USB transceiver, talking to OMAP OTG controller
- *
- * Copyright (C) 2004 Texas Instruments
- * Copyright (C) 2004 David Brownell
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb.h>
-#include <linux/usb/otg.h>
-#include <linux/i2c.h>
-#include <linux/workqueue.h>
-
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-
-#include <linux/soc/ti/omap1-mux.h>
-#include <linux/soc/ti/omap1-usb.h>
-#include <linux/soc/ti/omap1-io.h>
-
-#undef VERBOSE
-
-
-#define DRIVER_VERSION "24 August 2004"
-#define DRIVER_NAME (isp1301_driver.driver.name)
-
-MODULE_DESCRIPTION("ISP1301 USB OTG Transceiver Driver");
-MODULE_LICENSE("GPL");
-
-struct isp1301 {
- struct usb_phy phy;
- struct i2c_client *client;
- void (*i2c_release)(struct device *dev);
-
- int irq_type;
-
- u32 last_otg_ctrl;
- unsigned working:1;
-
- struct timer_list timer;
-
- /* use keventd context to change the state for us */
- struct work_struct work;
-
- unsigned long todo;
-# define WORK_UPDATE_ISP 0 /* update ISP from OTG */
-# define WORK_UPDATE_OTG 1 /* update OTG from ISP */
-# define WORK_HOST_RESUME 4 /* resume host */
-# define WORK_TIMER 6 /* timer fired */
-# define WORK_STOP 7 /* don't resubmit */
-};
-
-
-/* bits in OTG_CTRL */
-
-#define OTG_XCEIV_OUTPUTS \
- (OTG_ASESSVLD|OTG_BSESSEND|OTG_BSESSVLD|OTG_VBUSVLD|OTG_ID)
-#define OTG_XCEIV_INPUTS \
- (OTG_PULLDOWN|OTG_PULLUP|OTG_DRV_VBUS|OTG_PD_VBUS|OTG_PU_VBUS|OTG_PU_ID)
-#define OTG_CTRL_BITS \
- (OTG_A_BUSREQ|OTG_A_SETB_HNPEN|OTG_B_BUSREQ|OTG_B_HNPEN|OTG_BUSDROP)
- /* and OTG_PULLUP is sometimes written */
-
-#define OTG_CTRL_MASK (OTG_DRIVER_SEL| \
- OTG_XCEIV_OUTPUTS|OTG_XCEIV_INPUTS| \
- OTG_CTRL_BITS)
-
-
-/*-------------------------------------------------------------------------*/
-
-/* board-specific PM hooks */
-
-#if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3)
-
-#if IS_REACHABLE(CONFIG_TPS65010)
-
-#include <linux/mfd/tps65010.h>
-
-#else
-
-static inline int tps65010_set_vbus_draw(unsigned mA)
-{
- pr_debug("tps65010: draw %d mA (STUB)\n", mA);
- return 0;
-}
-
-#endif
-
-static void enable_vbus_draw(struct isp1301 *isp, unsigned mA)
-{
- int status = tps65010_set_vbus_draw(mA);
- if (status < 0)
- pr_debug(" VBUS %d mA error %d\n", mA, status);
-}
-
-#else
-
-static void enable_vbus_draw(struct isp1301 *isp, unsigned mA)
-{
- /* H4 controls this by DIP switch S2.4; no soft control.
- * ON means the charger is always enabled. Leave it OFF
- * unless the OTG port is used only in B-peripheral mode.
- */
-}
-
-#endif
-
-static void enable_vbus_source(struct isp1301 *isp)
-{
- /* this board won't supply more than 8mA vbus power.
- * some boards can switch a 100ma "unit load" (or more).
- */
-}
-
-
-/* products will deliver OTG messages with LEDs, GUI, etc */
-static inline void notresponding(struct isp1301 *isp)
-{
- printk(KERN_NOTICE "OTG device not responding.\n");
-}
-
-
-/*-------------------------------------------------------------------------*/
-
-static struct i2c_driver isp1301_driver;
-
-/* smbus apis are used for portability */
-
-static inline u8
-isp1301_get_u8(struct isp1301 *isp, u8 reg)
-{
- return i2c_smbus_read_byte_data(isp->client, reg + 0);
-}
-
-static inline int
-isp1301_get_u16(struct isp1301 *isp, u8 reg)
-{
- return i2c_smbus_read_word_data(isp->client, reg);
-}
-
-static inline int
-isp1301_set_bits(struct isp1301 *isp, u8 reg, u8 bits)
-{
- return i2c_smbus_write_byte_data(isp->client, reg + 0, bits);
-}
-
-static inline int
-isp1301_clear_bits(struct isp1301 *isp, u8 reg, u8 bits)
-{
- return i2c_smbus_write_byte_data(isp->client, reg + 1, bits);
-}
-
-/*-------------------------------------------------------------------------*/
-
-/* identification */
-#define ISP1301_VENDOR_ID 0x00 /* u16 read */
-#define ISP1301_PRODUCT_ID 0x02 /* u16 read */
-#define ISP1301_BCD_DEVICE 0x14 /* u16 read */
-
-#define I2C_VENDOR_ID_PHILIPS 0x04cc
-#define I2C_PRODUCT_ID_PHILIPS_1301 0x1301
-
-/* operational registers */
-#define ISP1301_MODE_CONTROL_1 0x04 /* u8 read, set, +1 clear */
-# define MC1_SPEED (1 << 0)
-# define MC1_SUSPEND (1 << 1)
-# define MC1_DAT_SE0 (1 << 2)
-# define MC1_TRANSPARENT (1 << 3)
-# define MC1_BDIS_ACON_EN (1 << 4)
-# define MC1_OE_INT_EN (1 << 5)
-# define MC1_UART_EN (1 << 6)
-# define MC1_MASK 0x7f
-#define ISP1301_MODE_CONTROL_2 0x12 /* u8 read, set, +1 clear */
-# define MC2_GLOBAL_PWR_DN (1 << 0)
-# define MC2_SPD_SUSP_CTRL (1 << 1)
-# define MC2_BI_DI (1 << 2)
-# define MC2_TRANSP_BDIR0 (1 << 3)
-# define MC2_TRANSP_BDIR1 (1 << 4)
-# define MC2_AUDIO_EN (1 << 5)
-# define MC2_PSW_EN (1 << 6)
-# define MC2_EN2V7 (1 << 7)
-#define ISP1301_OTG_CONTROL_1 0x06 /* u8 read, set, +1 clear */
-# define OTG1_DP_PULLUP (1 << 0)
-# define OTG1_DM_PULLUP (1 << 1)
-# define OTG1_DP_PULLDOWN (1 << 2)
-# define OTG1_DM_PULLDOWN (1 << 3)
-# define OTG1_ID_PULLDOWN (1 << 4)
-# define OTG1_VBUS_DRV (1 << 5)
-# define OTG1_VBUS_DISCHRG (1 << 6)
-# define OTG1_VBUS_CHRG (1 << 7)
-#define ISP1301_OTG_STATUS 0x10 /* u8 readonly */
-# define OTG_B_SESS_END (1 << 6)
-# define OTG_B_SESS_VLD (1 << 7)
-
-#define ISP1301_INTERRUPT_SOURCE 0x08 /* u8 read */
-#define ISP1301_INTERRUPT_LATCH 0x0A /* u8 read, set, +1 clear */
-
-#define ISP1301_INTERRUPT_FALLING 0x0C /* u8 read, set, +1 clear */
-#define ISP1301_INTERRUPT_RISING 0x0E /* u8 read, set, +1 clear */
-
-/* same bitfields in all interrupt registers */
-# define INTR_VBUS_VLD (1 << 0)
-# define INTR_SESS_VLD (1 << 1)
-# define INTR_DP_HI (1 << 2)
-# define INTR_ID_GND (1 << 3)
-# define INTR_DM_HI (1 << 4)
-# define INTR_ID_FLOAT (1 << 5)
-# define INTR_BDIS_ACON (1 << 6)
-# define INTR_CR_INT (1 << 7)
-
-/*-------------------------------------------------------------------------*/
-
-static inline const char *state_name(struct isp1301 *isp)
-{
- return usb_otg_state_string(isp->phy.otg->state);
-}
-
-/*-------------------------------------------------------------------------*/
-
-/* NOTE: some of this ISP1301 setup is specific to H2 boards;
- * not everything is guarded by board-specific checks, or even using
- * omap_usb_config data to deduce MC1_DAT_SE0 and MC2_BI_DI.
- *
- * ALSO: this currently doesn't use ISP1301 low-power modes
- * while OTG is running.
- */
-
-static void power_down(struct isp1301 *isp)
-{
- isp->phy.otg->state = OTG_STATE_UNDEFINED;
-
- // isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
- isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND);
-
- isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_ID_PULLDOWN);
- isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0);
-}
-
-static void __maybe_unused power_up(struct isp1301 *isp)
-{
- // isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_2, MC2_GLOBAL_PWR_DN);
- isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_SUSPEND);
-
- /* do this only when cpu is driving transceiver,
- * so host won't see a low speed device...
- */
- isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0);
-}
-
-#define NO_HOST_SUSPEND
-
-static int host_suspend(struct isp1301 *isp)
-{
-#ifdef NO_HOST_SUSPEND
- return 0;
-#else
- struct device *dev;
-
- if (!isp->phy.otg->host)
- return -ENODEV;
-
- /* Currently ASSUMES only the OTG port matters;
- * other ports could be active...
- */
- dev = isp->phy.otg->host->controller;
- return dev->driver->suspend(dev, 3, 0);
-#endif
-}
-
-static int host_resume(struct isp1301 *isp)
-{
-#ifdef NO_HOST_SUSPEND
- return 0;
-#else
- struct device *dev;
-
- if (!isp->phy.otg->host)
- return -ENODEV;
-
- dev = isp->phy.otg->host->controller;
- return dev->driver->resume(dev, 0);
-#endif
-}
-
-static int gadget_suspend(struct isp1301 *isp)
-{
- isp->phy.otg->gadget->b_hnp_enable = 0;
- isp->phy.otg->gadget->a_hnp_support = 0;
- isp->phy.otg->gadget->a_alt_hnp_support = 0;
- return usb_gadget_vbus_disconnect(isp->phy.otg->gadget);
-}
-
-/*-------------------------------------------------------------------------*/
-
-#define TIMER_MINUTES 10
-#define TIMER_JIFFIES (TIMER_MINUTES * 60 * HZ)
-
-/* Almost all our I2C messaging comes from a work queue's task context.
- * NOTE: guaranteeing certain response times might mean we shouldn't
- * share keventd's work queue; a realtime task might be safest.
- */
-static void isp1301_defer_work(struct isp1301 *isp, int work)
-{
- int status;
-
- if (isp && !test_and_set_bit(work, &isp->todo)) {
- (void) get_device(&isp->client->dev);
- status = schedule_work(&isp->work);
- if (!status && !isp->working)
- dev_vdbg(&isp->client->dev,
- "work item %d may be lost\n", work);
- }
-}
-
-/* called from irq handlers */
-static void a_idle(struct isp1301 *isp, const char *tag)
-{
- u32 l;
-
- if (isp->phy.otg->state == OTG_STATE_A_IDLE)
- return;
-
- isp->phy.otg->default_a = 1;
- if (isp->phy.otg->host) {
- isp->phy.otg->host->is_b_host = 0;
- host_suspend(isp);
- }
- if (isp->phy.otg->gadget) {
- isp->phy.otg->gadget->is_a_peripheral = 1;
- gadget_suspend(isp);
- }
- isp->phy.otg->state = OTG_STATE_A_IDLE;
- l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS;
- omap_writel(l, OTG_CTRL);
- isp->last_otg_ctrl = l;
- pr_debug(" --> %s/%s\n", state_name(isp), tag);
-}
-
-/* called from irq handlers */
-static void b_idle(struct isp1301 *isp, const char *tag)
-{
- u32 l;
-
- if (isp->phy.otg->state == OTG_STATE_B_IDLE)
- return;
-
- isp->phy.otg->default_a = 0;
- if (isp->phy.otg->host) {
- isp->phy.otg->host->is_b_host = 1;
- host_suspend(isp);
- }
- if (isp->phy.otg->gadget) {
- isp->phy.otg->gadget->is_a_peripheral = 0;
- gadget_suspend(isp);
- }
- isp->phy.otg->state = OTG_STATE_B_IDLE;
- l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS;
- omap_writel(l, OTG_CTRL);
- isp->last_otg_ctrl = l;
- pr_debug(" --> %s/%s\n", state_name(isp), tag);
-}
-
-static void
-dump_regs(struct isp1301 *isp, const char *label)
-{
- u8 ctrl = isp1301_get_u8(isp, ISP1301_OTG_CONTROL_1);
- u8 status = isp1301_get_u8(isp, ISP1301_OTG_STATUS);
- u8 src = isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE);
-
- pr_debug("otg: %06x, %s %s, otg/%02x stat/%02x.%02x\n",
- omap_readl(OTG_CTRL), label, state_name(isp),
- ctrl, status, src);
- /* mode control and irq enables don't change much */
-}
-
-/*-------------------------------------------------------------------------*/
-
-#ifdef CONFIG_USB_OTG
-
-/*
- * The OMAP OTG controller handles most of the OTG state transitions.
- *
- * We translate isp1301 outputs (mostly voltage comparator status) into
- * OTG inputs; OTG outputs (mostly pullup/pulldown controls) and HNP state
- * flags into isp1301 inputs ... and infer state transitions.
- */
-
-#ifdef VERBOSE
-
-static void check_state(struct isp1301 *isp, const char *tag)
-{
- enum usb_otg_state state = OTG_STATE_UNDEFINED;
- u8 fsm = omap_readw(OTG_TEST) & 0x0ff;
- unsigned extra = 0;
-
- switch (fsm) {
-
- /* default-b */
- case 0x0:
- state = OTG_STATE_B_IDLE;
- break;
- case 0x3:
- case 0x7:
- extra = 1;
- case 0x1:
- state = OTG_STATE_B_PERIPHERAL;
- break;
- case 0x11:
- state = OTG_STATE_B_SRP_INIT;
- break;
-
- /* extra dual-role default-b states */
- case 0x12:
- case 0x13:
- case 0x16:
- extra = 1;
- case 0x17:
- state = OTG_STATE_B_WAIT_ACON;
- break;
- case 0x34:
- state = OTG_STATE_B_HOST;
- break;
-
- /* default-a */
- case 0x36:
- state = OTG_STATE_A_IDLE;
- break;
- case 0x3c:
- state = OTG_STATE_A_WAIT_VFALL;
- break;
- case 0x7d:
- state = OTG_STATE_A_VBUS_ERR;
- break;
- case 0x9e:
- case 0x9f:
- extra = 1;
- case 0x89:
- state = OTG_STATE_A_PERIPHERAL;
- break;
- case 0xb7:
- state = OTG_STATE_A_WAIT_VRISE;
- break;
- case 0xb8:
- state = OTG_STATE_A_WAIT_BCON;
- break;
- case 0xb9:
- state = OTG_STATE_A_HOST;
- break;
- case 0xba:
- state = OTG_STATE_A_SUSPEND;
- break;
- default:
- break;
- }
- if (isp->phy.otg->state == state && !extra)
- return;
- pr_debug("otg: %s FSM %s/%02x, %s, %06x\n", tag,
- usb_otg_state_string(state), fsm, state_name(isp),
- omap_readl(OTG_CTRL));
-}
-
-#else
-
-static inline void check_state(struct isp1301 *isp, const char *tag) { }
-
-#endif
-
-/* outputs from ISP1301_INTERRUPT_SOURCE */
-static void update_otg1(struct isp1301 *isp, u8 int_src)
-{
- u32 otg_ctrl;
-
- otg_ctrl = omap_readl(OTG_CTRL) & OTG_CTRL_MASK;
- otg_ctrl &= ~OTG_XCEIV_INPUTS;
- otg_ctrl &= ~(OTG_ID|OTG_ASESSVLD|OTG_VBUSVLD);
-
- if (int_src & INTR_SESS_VLD)
- otg_ctrl |= OTG_ASESSVLD;
- else if (isp->phy.otg->state == OTG_STATE_A_WAIT_VFALL) {
- a_idle(isp, "vfall");
- otg_ctrl &= ~OTG_CTRL_BITS;
- }
- if (int_src & INTR_VBUS_VLD)
- otg_ctrl |= OTG_VBUSVLD;
- if (int_src & INTR_ID_GND) { /* default-A */
- if (isp->phy.otg->state == OTG_STATE_B_IDLE
- || isp->phy.otg->state
- == OTG_STATE_UNDEFINED) {
- a_idle(isp, "init");
- return;
- }
- } else { /* default-B */
- otg_ctrl |= OTG_ID;
- if (isp->phy.otg->state == OTG_STATE_A_IDLE
- || isp->phy.otg->state == OTG_STATE_UNDEFINED) {
- b_idle(isp, "init");
- return;
- }
- }
- omap_writel(otg_ctrl, OTG_CTRL);
-}
-
-/* outputs from ISP1301_OTG_STATUS */
-static void update_otg2(struct isp1301 *isp, u8 otg_status)
-{
- u32 otg_ctrl;
-
- otg_ctrl = omap_readl(OTG_CTRL) & OTG_CTRL_MASK;
- otg_ctrl &= ~OTG_XCEIV_INPUTS;
- otg_ctrl &= ~(OTG_BSESSVLD | OTG_BSESSEND);
- if (otg_status & OTG_B_SESS_VLD)
- otg_ctrl |= OTG_BSESSVLD;
- else if (otg_status & OTG_B_SESS_END)
- otg_ctrl |= OTG_BSESSEND;
- omap_writel(otg_ctrl, OTG_CTRL);
-}
-
-/* inputs going to ISP1301 */
-static void otg_update_isp(struct isp1301 *isp)
-{
- u32 otg_ctrl, otg_change;
- u8 set = OTG1_DM_PULLDOWN, clr = OTG1_DM_PULLUP;
-
- otg_ctrl = omap_readl(OTG_CTRL);
- otg_change = otg_ctrl ^ isp->last_otg_ctrl;
- isp->last_otg_ctrl = otg_ctrl;
- otg_ctrl = otg_ctrl & OTG_XCEIV_INPUTS;
-
- switch (isp->phy.otg->state) {
- case OTG_STATE_B_IDLE:
- case OTG_STATE_B_PERIPHERAL:
- case OTG_STATE_B_SRP_INIT:
- if (!(otg_ctrl & OTG_PULLUP)) {
- // if (otg_ctrl & OTG_B_HNPEN) {
- if (isp->phy.otg->gadget->b_hnp_enable) {
- isp->phy.otg->state = OTG_STATE_B_WAIT_ACON;
- pr_debug(" --> b_wait_acon\n");
- }
- goto pulldown;
- }
-pullup:
- set |= OTG1_DP_PULLUP;
- clr |= OTG1_DP_PULLDOWN;
- break;
- case OTG_STATE_A_SUSPEND:
- case OTG_STATE_A_PERIPHERAL:
- if (otg_ctrl & OTG_PULLUP)
- goto pullup;
- fallthrough;
- // case OTG_STATE_B_WAIT_ACON:
- default:
-pulldown:
- set |= OTG1_DP_PULLDOWN;
- clr |= OTG1_DP_PULLUP;
- break;
- }
-
-# define toggle(OTG,ISP) do { \
- if (otg_ctrl & OTG) set |= ISP; \
- else clr |= ISP; \
- } while (0)
-
- if (!(isp->phy.otg->host))
- otg_ctrl &= ~OTG_DRV_VBUS;
-
- switch (isp->phy.otg->state) {
- case OTG_STATE_A_SUSPEND:
- if (otg_ctrl & OTG_DRV_VBUS) {
- set |= OTG1_VBUS_DRV;
- break;
- }
- /* HNP failed for some reason (A_AIDL_BDIS timeout) */
- notresponding(isp);
-
- fallthrough;
- case OTG_STATE_A_VBUS_ERR:
- isp->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
- pr_debug(" --> a_wait_vfall\n");
- fallthrough;
- case OTG_STATE_A_WAIT_VFALL:
- /* FIXME usbcore thinks port power is still on ... */
- clr |= OTG1_VBUS_DRV;
- break;
- case OTG_STATE_A_IDLE:
- if (otg_ctrl & OTG_DRV_VBUS) {
- isp->phy.otg->state = OTG_STATE_A_WAIT_VRISE;
- pr_debug(" --> a_wait_vrise\n");
- }
- fallthrough;
- default:
- toggle(OTG_DRV_VBUS, OTG1_VBUS_DRV);
- }
-
- toggle(OTG_PU_VBUS, OTG1_VBUS_CHRG);
- toggle(OTG_PD_VBUS, OTG1_VBUS_DISCHRG);
-
-# undef toggle
-
- isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, set);
- isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, clr);
-
- /* HNP switch to host or peripheral; and SRP */
- if (otg_change & OTG_PULLUP) {
- u32 l;
-
- switch (isp->phy.otg->state) {
- case OTG_STATE_B_IDLE:
- if (clr & OTG1_DP_PULLUP)
- break;
- isp->phy.otg->state = OTG_STATE_B_PERIPHERAL;
- pr_debug(" --> b_peripheral\n");
- break;
- case OTG_STATE_A_SUSPEND:
- if (clr & OTG1_DP_PULLUP)
- break;
- isp->phy.otg->state = OTG_STATE_A_PERIPHERAL;
- pr_debug(" --> a_peripheral\n");
- break;
- default:
- break;
- }
- l = omap_readl(OTG_CTRL);
- l |= OTG_PULLUP;
- omap_writel(l, OTG_CTRL);
- }
-
- check_state(isp, __func__);
- dump_regs(isp, "otg->isp1301");
-}
-
-static irqreturn_t omap_otg_irq(int irq, void *_isp)
-{
- u16 otg_irq = omap_readw(OTG_IRQ_SRC);
- u32 otg_ctrl;
- int ret = IRQ_NONE;
- struct isp1301 *isp = _isp;
- struct usb_otg *otg = isp->phy.otg;
-
- /* update ISP1301 transceiver from OTG controller */
- if (otg_irq & OPRT_CHG) {
- omap_writew(OPRT_CHG, OTG_IRQ_SRC);
- isp1301_defer_work(isp, WORK_UPDATE_ISP);
- ret = IRQ_HANDLED;
-
- /* SRP to become b_peripheral failed */
- } else if (otg_irq & B_SRP_TMROUT) {
- pr_debug("otg: B_SRP_TIMEOUT, %06x\n", omap_readl(OTG_CTRL));
- notresponding(isp);
-
- /* gadget drivers that care should monitor all kinds of
- * remote wakeup (SRP, normal) using their own timer
- * to give "check cable and A-device" messages.
- */
- if (isp->phy.otg->state == OTG_STATE_B_SRP_INIT)
- b_idle(isp, "srp_timeout");
-
- omap_writew(B_SRP_TMROUT, OTG_IRQ_SRC);
- ret = IRQ_HANDLED;
-
- /* HNP to become b_host failed */
- } else if (otg_irq & B_HNP_FAIL) {
- pr_debug("otg: %s B_HNP_FAIL, %06x\n",
- state_name(isp), omap_readl(OTG_CTRL));
- notresponding(isp);
-
- otg_ctrl = omap_readl(OTG_CTRL);
- otg_ctrl |= OTG_BUSDROP;
- otg_ctrl &= OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS;
- omap_writel(otg_ctrl, OTG_CTRL);
-
- /* subset of b_peripheral()... */
- isp->phy.otg->state = OTG_STATE_B_PERIPHERAL;
- pr_debug(" --> b_peripheral\n");
-
- omap_writew(B_HNP_FAIL, OTG_IRQ_SRC);
- ret = IRQ_HANDLED;
-
- /* detect SRP from B-device ... */
- } else if (otg_irq & A_SRP_DETECT) {
- pr_debug("otg: %s SRP_DETECT, %06x\n",
- state_name(isp), omap_readl(OTG_CTRL));
-
- isp1301_defer_work(isp, WORK_UPDATE_OTG);
- switch (isp->phy.otg->state) {
- case OTG_STATE_A_IDLE:
- if (!otg->host)
- break;
- isp1301_defer_work(isp, WORK_HOST_RESUME);
- otg_ctrl = omap_readl(OTG_CTRL);
- otg_ctrl |= OTG_A_BUSREQ;
- otg_ctrl &= ~(OTG_BUSDROP|OTG_B_BUSREQ)
- & ~OTG_XCEIV_INPUTS
- & OTG_CTRL_MASK;
- omap_writel(otg_ctrl, OTG_CTRL);
- break;
- default:
- break;
- }
-
- omap_writew(A_SRP_DETECT, OTG_IRQ_SRC);
- ret = IRQ_HANDLED;
-
- /* timer expired: T(a_wait_bcon) and maybe T(a_wait_vrise)
- * we don't track them separately
- */
- } else if (otg_irq & A_REQ_TMROUT) {
- otg_ctrl = omap_readl(OTG_CTRL);
- pr_info("otg: BCON_TMOUT from %s, %06x\n",
- state_name(isp), otg_ctrl);
- notresponding(isp);
-
- otg_ctrl |= OTG_BUSDROP;
- otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS;
- omap_writel(otg_ctrl, OTG_CTRL);
- isp->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
-
- omap_writew(A_REQ_TMROUT, OTG_IRQ_SRC);
- ret = IRQ_HANDLED;
-
- /* A-supplied voltage fell too low; overcurrent */
- } else if (otg_irq & A_VBUS_ERR) {
- otg_ctrl = omap_readl(OTG_CTRL);
- printk(KERN_ERR "otg: %s, VBUS_ERR %04x ctrl %06x\n",
- state_name(isp), otg_irq, otg_ctrl);
-
- otg_ctrl |= OTG_BUSDROP;
- otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS;
- omap_writel(otg_ctrl, OTG_CTRL);
- isp->phy.otg->state = OTG_STATE_A_VBUS_ERR;
-
- omap_writew(A_VBUS_ERR, OTG_IRQ_SRC);
- ret = IRQ_HANDLED;
-
- /* switch driver; the transceiver code activates it,
- * ungating the udc clock or resuming OHCI.
- */
- } else if (otg_irq & DRIVER_SWITCH) {
- int kick = 0;
-
- otg_ctrl = omap_readl(OTG_CTRL);
- printk(KERN_NOTICE "otg: %s, SWITCH to %s, ctrl %06x\n",
- state_name(isp),
- (otg_ctrl & OTG_DRIVER_SEL)
- ? "gadget" : "host",
- otg_ctrl);
- isp1301_defer_work(isp, WORK_UPDATE_ISP);
-
- /* role is peripheral */
- if (otg_ctrl & OTG_DRIVER_SEL) {
- switch (isp->phy.otg->state) {
- case OTG_STATE_A_IDLE:
- b_idle(isp, __func__);
- break;
- default:
- break;
- }
- isp1301_defer_work(isp, WORK_UPDATE_ISP);
-
- /* role is host */
- } else {
- if (!(otg_ctrl & OTG_ID)) {
- otg_ctrl &= OTG_CTRL_MASK & ~OTG_XCEIV_INPUTS;
- omap_writel(otg_ctrl | OTG_A_BUSREQ, OTG_CTRL);
- }
-
- if (otg->host) {
- switch (isp->phy.otg->state) {
- case OTG_STATE_B_WAIT_ACON:
- isp->phy.otg->state = OTG_STATE_B_HOST;
- pr_debug(" --> b_host\n");
- kick = 1;
- break;
- case OTG_STATE_A_WAIT_BCON:
- isp->phy.otg->state = OTG_STATE_A_HOST;
- pr_debug(" --> a_host\n");
- break;
- case OTG_STATE_A_PERIPHERAL:
- isp->phy.otg->state = OTG_STATE_A_WAIT_BCON;
- pr_debug(" --> a_wait_bcon\n");
- break;
- default:
- break;
- }
- isp1301_defer_work(isp, WORK_HOST_RESUME);
- }
- }
-
- omap_writew(DRIVER_SWITCH, OTG_IRQ_SRC);
- ret = IRQ_HANDLED;
-
- if (kick)
- usb_bus_start_enum(otg->host, otg->host->otg_port);
- }
-
- check_state(isp, __func__);
- return ret;
-}
-
-static struct platform_device *otg_dev;
-
-static int isp1301_otg_init(struct isp1301 *isp)
-{
- u32 l;
-
- if (!otg_dev)
- return -ENODEV;
-
- dump_regs(isp, __func__);
- /* some of these values are board-specific... */
- l = omap_readl(OTG_SYSCON_2);
- l |= OTG_EN
- /* for B-device: */
- | SRP_GPDATA /* 9msec Bdev D+ pulse */
- | SRP_GPDVBUS /* discharge after VBUS pulse */
- // | (3 << 24) /* 2msec VBUS pulse */
- /* for A-device: */
- | (0 << 20) /* 200ms nominal A_WAIT_VRISE timer */
- | SRP_DPW /* detect 167+ns SRP pulses */
- | SRP_DATA | SRP_VBUS /* accept both kinds of SRP pulse */
- ;
- omap_writel(l, OTG_SYSCON_2);
-
- update_otg1(isp, isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE));
- update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS));
-
- check_state(isp, __func__);
- pr_debug("otg: %s, %s %06x\n",
- state_name(isp), __func__, omap_readl(OTG_CTRL));
-
- omap_writew(DRIVER_SWITCH | OPRT_CHG
- | B_SRP_TMROUT | B_HNP_FAIL
- | A_VBUS_ERR | A_SRP_DETECT | A_REQ_TMROUT, OTG_IRQ_EN);
-
- l = omap_readl(OTG_SYSCON_2);
- l |= OTG_EN;
- omap_writel(l, OTG_SYSCON_2);
-
- return 0;
-}
-
-static int otg_probe(struct platform_device *dev)
-{
- // struct omap_usb_config *config = dev->platform_data;
-
- otg_dev = dev;
- return 0;
-}
-
-static int otg_remove(struct platform_device *dev)
-{
- otg_dev = NULL;
- return 0;
-}
-
-static struct platform_driver omap_otg_driver = {
- .probe = otg_probe,
- .remove = otg_remove,
- .driver = {
- .name = "omap_otg",
- },
-};
-
-static int otg_bind(struct isp1301 *isp)
-{
- int status;
-
- if (otg_dev)
- return -EBUSY;
-
- status = platform_driver_register(&omap_otg_driver);
- if (status < 0)
- return status;
-
- if (otg_dev)
- status = request_irq(otg_dev->resource[1].start, omap_otg_irq,
- 0, DRIVER_NAME, isp);
- else
- status = -ENODEV;
-
- if (status < 0)
- platform_driver_unregister(&omap_otg_driver);
- return status;
-}
-
-static void otg_unbind(struct isp1301 *isp)
-{
- if (!otg_dev)
- return;
- free_irq(otg_dev->resource[1].start, isp);
-}
-
-#else
-
-/* OTG controller isn't clocked */
-
-#endif /* CONFIG_USB_OTG */
-
-/*-------------------------------------------------------------------------*/
-
-static void b_peripheral(struct isp1301 *isp)
-{
- u32 l;
-
- l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS;
- omap_writel(l, OTG_CTRL);
-
- usb_gadget_vbus_connect(isp->phy.otg->gadget);
-
-#ifdef CONFIG_USB_OTG
- enable_vbus_draw(isp, 8);
- otg_update_isp(isp);
-#else
- enable_vbus_draw(isp, 100);
- /* UDC driver just set OTG_BSESSVLD */
- isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_DP_PULLUP);
- isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_DP_PULLDOWN);
- isp->phy.otg->state = OTG_STATE_B_PERIPHERAL;
- pr_debug(" --> b_peripheral\n");
- dump_regs(isp, "2periph");
-#endif
-}
-
-static void isp_update_otg(struct isp1301 *isp, u8 stat)
-{
- struct usb_otg *otg = isp->phy.otg;
- u8 isp_stat, isp_bstat;
- enum usb_otg_state state = isp->phy.otg->state;
-
- if (stat & INTR_BDIS_ACON)
- pr_debug("OTG: BDIS_ACON, %s\n", state_name(isp));
-
- /* start certain state transitions right away */
- isp_stat = isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE);
- if (isp_stat & INTR_ID_GND) {
- if (otg->default_a) {
- switch (state) {
- case OTG_STATE_B_IDLE:
- a_idle(isp, "idle");
- fallthrough;
- case OTG_STATE_A_IDLE:
- enable_vbus_source(isp);
- fallthrough;
- case OTG_STATE_A_WAIT_VRISE:
- /* we skip over OTG_STATE_A_WAIT_BCON, since
- * the HC will transition to A_HOST (or
- * A_SUSPEND!) without our noticing except
- * when HNP is used.
- */
- if (isp_stat & INTR_VBUS_VLD)
- isp->phy.otg->state = OTG_STATE_A_HOST;
- break;
- case OTG_STATE_A_WAIT_VFALL:
- if (!(isp_stat & INTR_SESS_VLD))
- a_idle(isp, "vfell");
- break;
- default:
- if (!(isp_stat & INTR_VBUS_VLD))
- isp->phy.otg->state = OTG_STATE_A_VBUS_ERR;
- break;
- }
- isp_bstat = isp1301_get_u8(isp, ISP1301_OTG_STATUS);
- } else {
- switch (state) {
- case OTG_STATE_B_PERIPHERAL:
- case OTG_STATE_B_HOST:
- case OTG_STATE_B_WAIT_ACON:
- usb_gadget_vbus_disconnect(otg->gadget);
- break;
- default:
- break;
- }
- if (state != OTG_STATE_A_IDLE)
- a_idle(isp, "id");
- if (otg->host && state == OTG_STATE_A_IDLE)
- isp1301_defer_work(isp, WORK_HOST_RESUME);
- isp_bstat = 0;
- }
- } else {
- u32 l;
-
- /* if user unplugged mini-A end of cable,
- * don't bypass A_WAIT_VFALL.
- */
- if (otg->default_a) {
- switch (state) {
- default:
- isp->phy.otg->state = OTG_STATE_A_WAIT_VFALL;
- break;
- case OTG_STATE_A_WAIT_VFALL:
- state = OTG_STATE_A_IDLE;
- /* hub_wq may take a while to notice and
- * handle this disconnect, so don't go
- * to B_IDLE quite yet.
- */
- break;
- case OTG_STATE_A_IDLE:
- host_suspend(isp);
- isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1,
- MC1_BDIS_ACON_EN);
- isp->phy.otg->state = OTG_STATE_B_IDLE;
- l = omap_readl(OTG_CTRL) & OTG_CTRL_MASK;
- l &= ~OTG_CTRL_BITS;
- omap_writel(l, OTG_CTRL);
- break;
- case OTG_STATE_B_IDLE:
- break;
- }
- }
- isp_bstat = isp1301_get_u8(isp, ISP1301_OTG_STATUS);
-
- switch (isp->phy.otg->state) {
- case OTG_STATE_B_PERIPHERAL:
- case OTG_STATE_B_WAIT_ACON:
- case OTG_STATE_B_HOST:
- if (likely(isp_bstat & OTG_B_SESS_VLD))
- break;
- enable_vbus_draw(isp, 0);
-#ifndef CONFIG_USB_OTG
- /* UDC driver will clear OTG_BSESSVLD */
- isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1,
- OTG1_DP_PULLDOWN);
- isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1,
- OTG1_DP_PULLUP);
- dump_regs(isp, __func__);
-#endif
- fallthrough;
- case OTG_STATE_B_SRP_INIT:
- b_idle(isp, __func__);
- l = omap_readl(OTG_CTRL) & OTG_XCEIV_OUTPUTS;
- omap_writel(l, OTG_CTRL);
- fallthrough;
- case OTG_STATE_B_IDLE:
- if (otg->gadget && (isp_bstat & OTG_B_SESS_VLD)) {
-#ifdef CONFIG_USB_OTG
- update_otg1(isp, isp_stat);
- update_otg2(isp, isp_bstat);
-#endif
- b_peripheral(isp);
- } else if (!(isp_stat & (INTR_VBUS_VLD|INTR_SESS_VLD)))
- isp_bstat |= OTG_B_SESS_END;
- break;
- case OTG_STATE_A_WAIT_VFALL:
- break;
- default:
- pr_debug("otg: unsupported b-device %s\n",
- state_name(isp));
- break;
- }
- }
-
- if (state != isp->phy.otg->state)
- pr_debug(" isp, %s -> %s\n",
- usb_otg_state_string(state), state_name(isp));
-
-#ifdef CONFIG_USB_OTG
- /* update the OTG controller state to match the isp1301; may
- * trigger OPRT_CHG irqs for changes going to the isp1301.
- */
- update_otg1(isp, isp_stat);
- update_otg2(isp, isp_bstat);
- check_state(isp, __func__);
-#endif
-
- dump_regs(isp, "isp1301->otg");
-}
-
-/*-------------------------------------------------------------------------*/
-
-static u8 isp1301_clear_latch(struct isp1301 *isp)
-{
- u8 latch = isp1301_get_u8(isp, ISP1301_INTERRUPT_LATCH);
- isp1301_clear_bits(isp, ISP1301_INTERRUPT_LATCH, latch);
- return latch;
-}
-
-static void
-isp1301_work(struct work_struct *work)
-{
- struct isp1301 *isp = container_of(work, struct isp1301, work);
- int stop;
-
- /* implicit lock: we're the only task using this device */
- isp->working = 1;
- do {
- stop = test_bit(WORK_STOP, &isp->todo);
-
-#ifdef CONFIG_USB_OTG
- /* transfer state from otg engine to isp1301 */
- if (test_and_clear_bit(WORK_UPDATE_ISP, &isp->todo)) {
- otg_update_isp(isp);
- put_device(&isp->client->dev);
- }
-#endif
- /* transfer state from isp1301 to otg engine */
- if (test_and_clear_bit(WORK_UPDATE_OTG, &isp->todo)) {
- u8 stat = isp1301_clear_latch(isp);
-
- isp_update_otg(isp, stat);
- put_device(&isp->client->dev);
- }
-
- if (test_and_clear_bit(WORK_HOST_RESUME, &isp->todo)) {
- u32 otg_ctrl;
-
- /*
- * skip A_WAIT_VRISE; hc transitions invisibly
- * skip A_WAIT_BCON; same.
- */
- switch (isp->phy.otg->state) {
- case OTG_STATE_A_WAIT_BCON:
- case OTG_STATE_A_WAIT_VRISE:
- isp->phy.otg->state = OTG_STATE_A_HOST;
- pr_debug(" --> a_host\n");
- otg_ctrl = omap_readl(OTG_CTRL);
- otg_ctrl |= OTG_A_BUSREQ;
- otg_ctrl &= ~(OTG_BUSDROP|OTG_B_BUSREQ)
- & OTG_CTRL_MASK;
- omap_writel(otg_ctrl, OTG_CTRL);
- break;
- case OTG_STATE_B_WAIT_ACON:
- isp->phy.otg->state = OTG_STATE_B_HOST;
- pr_debug(" --> b_host (acon)\n");
- break;
- case OTG_STATE_B_HOST:
- case OTG_STATE_B_IDLE:
- case OTG_STATE_A_IDLE:
- break;
- default:
- pr_debug(" host resume in %s\n",
- state_name(isp));
- }
- host_resume(isp);
- // mdelay(10);
- put_device(&isp->client->dev);
- }
-
- if (test_and_clear_bit(WORK_TIMER, &isp->todo)) {
-#ifdef VERBOSE
- dump_regs(isp, "timer");
- if (!stop)
- mod_timer(&isp->timer, jiffies + TIMER_JIFFIES);
-#endif
- put_device(&isp->client->dev);
- }
-
- if (isp->todo)
- dev_vdbg(&isp->client->dev,
- "work done, todo = 0x%lx\n",
- isp->todo);
- if (stop) {
- dev_dbg(&isp->client->dev, "stop\n");
- break;
- }
- } while (isp->todo);
- isp->working = 0;
-}
-
-static irqreturn_t isp1301_irq(int irq, void *isp)
-{
- isp1301_defer_work(isp, WORK_UPDATE_OTG);
- return IRQ_HANDLED;
-}
-
-static void isp1301_timer(struct timer_list *t)
-{
- struct isp1301 *isp = from_timer(isp, t, timer);
-
- isp1301_defer_work(isp, WORK_TIMER);
-}
-
-/*-------------------------------------------------------------------------*/
-
-static void isp1301_release(struct device *dev)
-{
- struct isp1301 *isp;
-
- isp = dev_get_drvdata(dev);
-
- /* FIXME -- not with a "new style" driver, it doesn't!! */
-
- /* ugly -- i2c hijacks our memory hook to wait_for_completion() */
- if (isp->i2c_release)
- isp->i2c_release(dev);
- kfree(isp->phy.otg);
- kfree (isp);
-}
-
-static struct isp1301 *the_transceiver;
-
-static void isp1301_remove(struct i2c_client *i2c)
-{
- struct isp1301 *isp;
-
- isp = i2c_get_clientdata(i2c);
-
- isp1301_clear_bits(isp, ISP1301_INTERRUPT_FALLING, ~0);
- isp1301_clear_bits(isp, ISP1301_INTERRUPT_RISING, ~0);
- free_irq(i2c->irq, isp);
-#ifdef CONFIG_USB_OTG
- otg_unbind(isp);
-#endif
- set_bit(WORK_STOP, &isp->todo);
- del_timer_sync(&isp->timer);
- flush_work(&isp->work);
-
- put_device(&i2c->dev);
- the_transceiver = NULL;
-}
-
-/*-------------------------------------------------------------------------*/
-
-/* NOTE: three modes are possible here, only one of which
- * will be standards-conformant on any given system:
- *
- * - OTG mode (dual-role), required if there's a Mini-AB connector
- * - HOST mode, for when there's one or more A (host) connectors
- * - DEVICE mode, for when there's a B/Mini-B (device) connector
- *
- * As a rule, you won't have an isp1301 chip unless it's there to
- * support the OTG mode. Other modes help testing USB controllers
- * in isolation from (full) OTG support, or maybe so later board
- * revisions can help to support those feature.
- */
-
-#ifdef CONFIG_USB_OTG
-
-static int isp1301_otg_enable(struct isp1301 *isp)
-{
- power_up(isp);
- isp1301_otg_init(isp);
-
- /* NOTE: since we don't change this, this provides
- * a few more interrupts than are strictly needed.
- */
- isp1301_set_bits(isp, ISP1301_INTERRUPT_RISING,
- INTR_VBUS_VLD | INTR_SESS_VLD | INTR_ID_GND);
- isp1301_set_bits(isp, ISP1301_INTERRUPT_FALLING,
- INTR_VBUS_VLD | INTR_SESS_VLD | INTR_ID_GND);
-
- dev_info(&isp->client->dev, "ready for dual-role USB ...\n");
-
- return 0;
-}
-
-#endif
-
-/* add or disable the host device+driver */
-static int
-isp1301_set_host(struct usb_otg *otg, struct usb_bus *host)
-{
- struct isp1301 *isp = container_of(otg->usb_phy, struct isp1301, phy);
-
- if (isp != the_transceiver)
- return -ENODEV;
-
- if (!host) {
- omap_writew(0, OTG_IRQ_EN);
- power_down(isp);
- otg->host = NULL;
- return 0;
- }
-
-#ifdef CONFIG_USB_OTG
- otg->host = host;
- dev_dbg(&isp->client->dev, "registered host\n");
- host_suspend(isp);
- if (otg->gadget)
- return isp1301_otg_enable(isp);
- return 0;
-
-#elif !IS_ENABLED(CONFIG_USB_OMAP)
- // FIXME update its refcount
- otg->host = host;
-
- power_up(isp);
-
- if (machine_is_omap_h2())
- isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0);
-
- dev_info(&isp->client->dev, "A-Host sessions ok\n");
- isp1301_set_bits(isp, ISP1301_INTERRUPT_RISING,
- INTR_ID_GND);
- isp1301_set_bits(isp, ISP1301_INTERRUPT_FALLING,
- INTR_ID_GND);
-
- /* If this has a Mini-AB connector, this mode is highly
- * nonstandard ... but can be handy for testing, especially with
- * the Mini-A end of an OTG cable. (Or something nonstandard
- * like MiniB-to-StandardB, maybe built with a gender mender.)
- */
- isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1, OTG1_VBUS_DRV);
-
- dump_regs(isp, __func__);
-
- return 0;
-
-#else
- dev_dbg(&isp->client->dev, "host sessions not allowed\n");
- return -EINVAL;
-#endif
-
-}
-
-static int
-isp1301_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
-{
- struct isp1301 *isp = container_of(otg->usb_phy, struct isp1301, phy);
-
- if (isp != the_transceiver)
- return -ENODEV;
-
- if (!gadget) {
- omap_writew(0, OTG_IRQ_EN);
- if (!otg->default_a)
- enable_vbus_draw(isp, 0);
- usb_gadget_vbus_disconnect(otg->gadget);
- otg->gadget = NULL;
- power_down(isp);
- return 0;
- }
-
-#ifdef CONFIG_USB_OTG
- otg->gadget = gadget;
- dev_dbg(&isp->client->dev, "registered gadget\n");
- /* gadget driver may be suspended until vbus_connect () */
- if (otg->host)
- return isp1301_otg_enable(isp);
- return 0;
-
-#elif !defined(CONFIG_USB_OHCI_HCD) && !defined(CONFIG_USB_OHCI_HCD_MODULE)
- otg->gadget = gadget;
- // FIXME update its refcount
-
- {
- u32 l;
-
- l = omap_readl(OTG_CTRL) & OTG_CTRL_MASK;
- l &= ~(OTG_XCEIV_OUTPUTS|OTG_CTRL_BITS);
- l |= OTG_ID;
- omap_writel(l, OTG_CTRL);
- }
-
- power_up(isp);
- isp->phy.otg->state = OTG_STATE_B_IDLE;
-
- if (machine_is_omap_h2() || machine_is_omap_h3())
- isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1, MC1_DAT_SE0);
-
- isp1301_set_bits(isp, ISP1301_INTERRUPT_RISING,
- INTR_SESS_VLD);
- isp1301_set_bits(isp, ISP1301_INTERRUPT_FALLING,
- INTR_VBUS_VLD);
- dev_info(&isp->client->dev, "B-Peripheral sessions ok\n");
- dump_regs(isp, __func__);
-
- /* If this has a Mini-AB connector, this mode is highly
- * nonstandard ... but can be handy for testing, so long
- * as you don't plug a Mini-A cable into the jack.
- */
- if (isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE) & INTR_VBUS_VLD)
- b_peripheral(isp);
-
- return 0;
-
-#else
- dev_dbg(&isp->client->dev, "peripheral sessions not allowed\n");
- return -EINVAL;
-#endif
-}
-
-
-/*-------------------------------------------------------------------------*/
-
-static int
-isp1301_set_power(struct usb_phy *dev, unsigned mA)
-{
- if (!the_transceiver)
- return -ENODEV;
- if (dev->otg->state == OTG_STATE_B_PERIPHERAL)
- enable_vbus_draw(the_transceiver, mA);
- return 0;
-}
-
-static int
-isp1301_start_srp(struct usb_otg *otg)
-{
- struct isp1301 *isp = container_of(otg->usb_phy, struct isp1301, phy);
- u32 otg_ctrl;
-
- if (isp != the_transceiver || isp->phy.otg->state != OTG_STATE_B_IDLE)
- return -ENODEV;
-
- otg_ctrl = omap_readl(OTG_CTRL);
- if (!(otg_ctrl & OTG_BSESSEND))
- return -EINVAL;
-
- otg_ctrl |= OTG_B_BUSREQ;
- otg_ctrl &= ~OTG_A_BUSREQ & OTG_CTRL_MASK;
- omap_writel(otg_ctrl, OTG_CTRL);
- isp->phy.otg->state = OTG_STATE_B_SRP_INIT;
-
- pr_debug("otg: SRP, %s ... %06x\n", state_name(isp),
- omap_readl(OTG_CTRL));
-#ifdef CONFIG_USB_OTG
- check_state(isp, __func__);
-#endif
- return 0;
-}
-
-static int
-isp1301_start_hnp(struct usb_otg *otg)
-{
-#ifdef CONFIG_USB_OTG
- struct isp1301 *isp = container_of(otg->usb_phy, struct isp1301, phy);
- u32 l;
-
- if (isp != the_transceiver)
- return -ENODEV;
- if (otg->default_a && (otg->host == NULL || !otg->host->b_hnp_enable))
- return -ENOTCONN;
- if (!otg->default_a && (otg->gadget == NULL
- || !otg->gadget->b_hnp_enable))
- return -ENOTCONN;
-
- /* We want hardware to manage most HNP protocol timings.
- * So do this part as early as possible...
- */
- switch (isp->phy.otg->state) {
- case OTG_STATE_B_HOST:
- isp->phy.otg->state = OTG_STATE_B_PERIPHERAL;
- /* caller will suspend next */
- break;
- case OTG_STATE_A_HOST:
-#if 0
- /* autoconnect mode avoids irq latency bugs */
- isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1,
- MC1_BDIS_ACON_EN);
-#endif
- /* caller must suspend then clear A_BUSREQ */
- usb_gadget_vbus_connect(otg->gadget);
- l = omap_readl(OTG_CTRL);
- l |= OTG_A_SETB_HNPEN;
- omap_writel(l, OTG_CTRL);
-
- break;
- case OTG_STATE_A_PERIPHERAL:
- /* initiated by B-Host suspend */
- break;
- default:
- return -EILSEQ;
- }
- pr_debug("otg: HNP %s, %06x ...\n",
- state_name(isp), omap_readl(OTG_CTRL));
- check_state(isp, __func__);
- return 0;
-#else
- /* srp-only */
- return -EINVAL;
-#endif
-}
-
-/*-------------------------------------------------------------------------*/
-
-static int
-isp1301_probe(struct i2c_client *i2c)
-{
- int status;
- struct isp1301 *isp;
- int irq;
-
- if (the_transceiver)
- return 0;
-
- isp = kzalloc(sizeof *isp, GFP_KERNEL);
- if (!isp)
- return 0;
-
- isp->phy.otg = kzalloc(sizeof *isp->phy.otg, GFP_KERNEL);
- if (!isp->phy.otg) {
- kfree(isp);
- return 0;
- }
-
- INIT_WORK(&isp->work, isp1301_work);
- timer_setup(&isp->timer, isp1301_timer, 0);
-
- i2c_set_clientdata(i2c, isp);
- isp->client = i2c;
-
- /* verify the chip (shouldn't be necessary) */
- status = isp1301_get_u16(isp, ISP1301_VENDOR_ID);
- if (status != I2C_VENDOR_ID_PHILIPS) {
- dev_dbg(&i2c->dev, "not philips id: %d\n", status);
- goto fail;
- }
- status = isp1301_get_u16(isp, ISP1301_PRODUCT_ID);
- if (status != I2C_PRODUCT_ID_PHILIPS_1301) {
- dev_dbg(&i2c->dev, "not isp1301, %d\n", status);
- goto fail;
- }
- isp->i2c_release = i2c->dev.release;
- i2c->dev.release = isp1301_release;
-
- /* initial development used chiprev 2.00 */
- status = i2c_smbus_read_word_data(i2c, ISP1301_BCD_DEVICE);
- dev_info(&i2c->dev, "chiprev %x.%02x, driver " DRIVER_VERSION "\n",
- status >> 8, status & 0xff);
-
- /* make like power-on reset */
- isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_1, MC1_MASK);
-
- isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2, MC2_BI_DI);
- isp1301_clear_bits(isp, ISP1301_MODE_CONTROL_2, ~MC2_BI_DI);
-
- isp1301_set_bits(isp, ISP1301_OTG_CONTROL_1,
- OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN);
- isp1301_clear_bits(isp, ISP1301_OTG_CONTROL_1,
- ~(OTG1_DM_PULLDOWN | OTG1_DP_PULLDOWN));
-
- isp1301_clear_bits(isp, ISP1301_INTERRUPT_LATCH, ~0);
- isp1301_clear_bits(isp, ISP1301_INTERRUPT_FALLING, ~0);
- isp1301_clear_bits(isp, ISP1301_INTERRUPT_RISING, ~0);
-
-#ifdef CONFIG_USB_OTG
- status = otg_bind(isp);
- if (status < 0) {
- dev_dbg(&i2c->dev, "can't bind OTG\n");
- goto fail;
- }
-#endif
-
- if (machine_is_omap_h2()) {
- struct gpio_desc *gpiod;
-
- /* full speed signaling by default */
- isp1301_set_bits(isp, ISP1301_MODE_CONTROL_1,
- MC1_SPEED);
- isp1301_set_bits(isp, ISP1301_MODE_CONTROL_2,
- MC2_SPD_SUSP_CTRL);
-
- gpiod = devm_gpiod_get(&i2c->dev, NULL, GPIOD_IN);
- if (IS_ERR(gpiod)) {
- dev_err(&i2c->dev, "cannot obtain H2 GPIO\n");
- goto fail;
- }
- gpiod_set_consumer_name(gpiod, "isp1301");
- irq = gpiod_to_irq(gpiod);
- isp->irq_type = IRQF_TRIGGER_FALLING;
- } else {
- irq = i2c->irq;
- }
-
- status = request_irq(irq, isp1301_irq,
- isp->irq_type, DRIVER_NAME, isp);
- if (status < 0) {
- dev_dbg(&i2c->dev, "can't get IRQ %d, err %d\n",
- i2c->irq, status);
- goto fail;
- }
-
- isp->phy.dev = &i2c->dev;
- isp->phy.label = DRIVER_NAME;
- isp->phy.set_power = isp1301_set_power;
-
- isp->phy.otg->usb_phy = &isp->phy;
- isp->phy.otg->set_host = isp1301_set_host;
- isp->phy.otg->set_peripheral = isp1301_set_peripheral;
- isp->phy.otg->start_srp = isp1301_start_srp;
- isp->phy.otg->start_hnp = isp1301_start_hnp;
-
- enable_vbus_draw(isp, 0);
- power_down(isp);
- the_transceiver = isp;
-
-#ifdef CONFIG_USB_OTG
- update_otg1(isp, isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE));
- update_otg2(isp, isp1301_get_u8(isp, ISP1301_OTG_STATUS));
-#endif
-
- dump_regs(isp, __func__);
-
-#ifdef VERBOSE
- mod_timer(&isp->timer, jiffies + TIMER_JIFFIES);
- dev_dbg(&i2c->dev, "scheduled timer, %d min\n", TIMER_MINUTES);
-#endif
-
- status = usb_add_phy(&isp->phy, USB_PHY_TYPE_USB2);
- if (status < 0)
- dev_err(&i2c->dev, "can't register transceiver, %d\n",
- status);
-
- return 0;
-
-fail:
- kfree(isp->phy.otg);
- kfree(isp);
- return -ENODEV;
-}
-
-static const struct i2c_device_id isp1301_id[] = {
- { "isp1301_omap", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, isp1301_id);
-
-static struct i2c_driver isp1301_driver = {
- .driver = {
- .name = "isp1301_omap",
- },
- .probe_new = isp1301_probe,
- .remove = isp1301_remove,
- .id_table = isp1301_id,
-};
-
-/*-------------------------------------------------------------------------*/
-
-static int __init isp_init(void)
-{
- return i2c_add_driver(&isp1301_driver);
-}
-subsys_initcall(isp_init);
-
-static void __exit isp_exit(void)
-{
- if (the_transceiver)
- usb_remove_phy(&the_transceiver->phy);
- i2c_del_driver(&isp1301_driver);
-}
-module_exit(isp_exit);
-
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 1b24492bb4e5..4b468bde19cf 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -80,7 +80,7 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
return ERR_PTR(-EPROBE_DEFER);
}
-static struct usb_phy *__device_to_usb_phy(struct device *dev)
+static struct usb_phy *__device_to_usb_phy(const struct device *dev)
{
struct usb_phy *usb_phy;
@@ -145,9 +145,9 @@ static void usb_phy_notify_charger_work(struct work_struct *work)
kobject_uevent(&usb_phy->dev->kobj, KOBJ_CHANGE);
}
-static int usb_phy_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int usb_phy_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct usb_phy *usb_phy;
+ const struct usb_phy *usb_phy;
char uchger_state[50] = { 0 };
char uchger_type[50] = { 0 };
unsigned long flags;
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index eacb46ec2ab3..56814ef80c24 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -274,8 +274,7 @@ static const struct attribute_group *usb_role_switch_groups[] = {
NULL,
};
-static int
-usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int usb_role_switch_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
int ret;
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index da19a5fa414f..c3ea3a46ed76 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -169,7 +169,7 @@ static int usb_console_setup(struct console *co, char *options)
tty_save_termios(tty);
tty_kref_put(tty);
}
- tty_port_set_initialized(&port->port, 1);
+ tty_port_set_initialized(&port->port, true);
}
/* Now that any required fake tty operations are completed restore
* the tty port count */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index ee5ac4ef7e16..e6d8d9b35ad0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -402,6 +402,8 @@ static void option_instat_callback(struct urb *urb);
#define LONGCHEER_VENDOR_ID 0x1c9e
/* 4G Systems products */
+/* This one was sold as the VW and Skoda "Carstick LTE" */
+#define FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE 0x7605
/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
* It seems to contain a Qualcomm QSC6240/6290 chipset */
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
@@ -1976,6 +1978,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(2) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_CARSTICK_LTE),
+ .driver_info = RSVD(0) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
.driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 164521ee10c6..f8404073558b 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -754,7 +754,7 @@ static struct usb_serial_driver *search_serial_device(
return NULL;
}
-static int serial_port_carrier_raised(struct tty_port *port)
+static bool serial_port_carrier_raised(struct tty_port *port)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
struct usb_serial_driver *drv = p->serial->type;
@@ -762,10 +762,10 @@ static int serial_port_carrier_raised(struct tty_port *port)
if (drv->carrier_raised)
return drv->carrier_raised(p);
/* No carrier control - don't block */
- return 1;
+ return true;
}
-static void serial_port_dtr_rts(struct tty_port *port, int on)
+static void serial_port_dtr_rts(struct tty_port *port, bool on)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
struct usb_serial_driver *drv = p->serial->type;
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 6012603f3630..97c66c0d91f4 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -939,7 +939,7 @@ static int ms_lib_process_bootblock(struct us_data *us, u16 PhyBlock, u8 *PageDa
struct ms_lib_type_extdat ExtraData;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
- PageBuffer = kmalloc(MS_BYTES_PER_PAGE, GFP_KERNEL);
+ PageBuffer = kzalloc(MS_BYTES_PER_PAGE * 2, GFP_KERNEL);
if (PageBuffer == NULL)
return (u32)-1;
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index 9a6860285fbe..662cd043b50e 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -146,6 +146,7 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
if (dp->hpd != hpd) {
drm_connector_oob_hotplug_event(dp->connector_fwnode);
dp->hpd = hpd;
+ sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
}
}
@@ -276,9 +277,11 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
case CMDT_RSP_ACK:
switch (cmd) {
case CMD_ENTER_MODE:
+ typec_altmode_update_active(alt, true);
dp->state = DP_STATE_UPDATE;
break;
case CMD_EXIT_MODE:
+ typec_altmode_update_active(alt, false);
dp->data.status = 0;
dp->data.conf = 0;
break;
@@ -514,9 +517,18 @@ static ssize_t pin_assignment_show(struct device *dev,
}
static DEVICE_ATTR_RW(pin_assignment);
+static ssize_t hpd_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct dp_altmode *dp = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", dp->hpd);
+}
+static DEVICE_ATTR_RO(hpd);
+
static struct attribute *dp_altmode_attrs[] = {
&dev_attr_configuration.attr,
&dev_attr_pin_assignment.attr,
+ &dev_attr_hpd.attr,
NULL
};
@@ -535,10 +547,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
/* FIXME: Port can only be DFP_U. */
/* Make sure we have compatiple pin configurations */
- if (!(DP_CAP_DFP_D_PIN_ASSIGN(port->vdo) &
- DP_CAP_UFP_D_PIN_ASSIGN(alt->vdo)) &&
- !(DP_CAP_UFP_D_PIN_ASSIGN(port->vdo) &
- DP_CAP_DFP_D_PIN_ASSIGN(alt->vdo)))
+ if (!(DP_CAP_PIN_ASSIGN_DFP_D(port->vdo) &
+ DP_CAP_PIN_ASSIGN_UFP_D(alt->vdo)) &&
+ !(DP_CAP_PIN_ASSIGN_UFP_D(port->vdo) &
+ DP_CAP_PIN_ASSIGN_DFP_D(alt->vdo)))
return -ENODEV;
ret = sysfs_create_group(&alt->dev.kobj, &dp_altmode_group);
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index 31c2a3130cad..098f0efaa58d 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -11,6 +11,22 @@
#include "bus.h"
#include "class.h"
#include "mux.h"
+#include "retimer.h"
+
+static inline int
+typec_altmode_set_retimer(struct altmode *alt, unsigned long conf, void *data)
+{
+ struct typec_retimer_state state;
+
+ if (!alt->retimer)
+ return 0;
+
+ state.alt = &alt->adev;
+ state.mode = conf;
+ state.data = data;
+
+ return typec_retimer_set(alt->retimer, &state);
+}
static inline int
typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
@@ -27,6 +43,19 @@ typec_altmode_set_mux(struct altmode *alt, unsigned long conf, void *data)
return typec_mux_set(alt->mux, &state);
}
+/* Wrapper to set various Type-C port switches together. */
+static inline int
+typec_altmode_set_switches(struct altmode *alt, unsigned long conf, void *data)
+{
+ int ret;
+
+ ret = typec_altmode_set_retimer(alt, conf, data);
+ if (ret)
+ return ret;
+
+ return typec_altmode_set_mux(alt, conf, data);
+}
+
static int typec_altmode_set_state(struct typec_altmode *adev,
unsigned long conf, void *data)
{
@@ -35,7 +64,7 @@ static int typec_altmode_set_state(struct typec_altmode *adev,
port_altmode = is_port ? to_altmode(adev) : to_altmode(adev)->partner;
- return typec_altmode_set_mux(port_altmode, conf, data);
+ return typec_altmode_set_switches(port_altmode, conf, data);
}
/* -------------------------------------------------------------------------- */
@@ -73,7 +102,7 @@ int typec_altmode_notify(struct typec_altmode *adev,
is_port = is_typec_port(adev->dev.parent);
partner = altmode->partner;
- ret = typec_altmode_set_mux(is_port ? altmode : partner, conf, data);
+ ret = typec_altmode_set_switches(is_port ? altmode : partner, conf, data);
if (ret)
return ret;
@@ -321,9 +350,9 @@ static int typec_match(struct device *dev, struct device_driver *driver)
return 0;
}
-static int typec_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int typec_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct typec_altmode *altmode = to_typec_altmode(dev);
+ const struct typec_altmode *altmode = to_typec_altmode(dev);
if (add_uevent_var(env, "SVID=%04X", altmode->svid))
return -ENOMEM;
diff --git a/drivers/usb/typec/bus.h b/drivers/usb/typec/bus.h
index 56dec268d4dd..c89168857417 100644
--- a/drivers/usb/typec/bus.h
+++ b/drivers/usb/typec/bus.h
@@ -7,11 +7,13 @@
struct bus_type;
struct typec_mux;
+struct typec_retimer;
struct altmode {
unsigned int id;
struct typec_altmode adev;
struct typec_mux *mux;
+ struct typec_retimer *retimer;
enum typec_port_data roles;
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 5897905cb4f0..cc3182f70673 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -583,6 +583,7 @@ void typec_unregister_altmode(struct typec_altmode *adev)
{
if (IS_ERR_OR_NULL(adev))
return;
+ typec_retimer_put(to_altmode(adev)->retimer);
typec_mux_put(to_altmode(adev)->mux);
device_unregister(&adev->dev);
}
@@ -1737,7 +1738,7 @@ static const struct attribute_group *typec_groups[] = {
NULL
};
-static int typec_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int typec_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
int ret;
@@ -2108,16 +2109,26 @@ typec_port_register_altmode(struct typec_port *port,
{
struct typec_altmode *adev;
struct typec_mux *mux;
+ struct typec_retimer *retimer;
mux = typec_mux_get(&port->dev, desc);
if (IS_ERR(mux))
return ERR_CAST(mux);
+ retimer = typec_retimer_get(&port->dev);
+ if (IS_ERR(retimer)) {
+ typec_mux_put(mux);
+ return ERR_CAST(retimer);
+ }
+
adev = typec_register_altmode(&port->dev, desc);
- if (IS_ERR(adev))
+ if (IS_ERR(adev)) {
+ typec_retimer_put(retimer);
typec_mux_put(mux);
- else
+ } else {
to_altmode(adev)->mux = mux;
+ to_altmode(adev)->retimer = retimer;
+ }
return adev;
}
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
index f128664cb130..746ef3a75b76 100644
--- a/drivers/usb/typec/hd3ss3220.c
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/usb/typec.h>
#include <linux/delay.h>
+#include <linux/workqueue.h>
#define HD3SS3220_REG_CN_STAT_CTRL 0x09
#define HD3SS3220_REG_GEN_CTRL 0x0A
@@ -37,6 +38,9 @@ struct hd3ss3220 {
struct regmap *regmap;
struct usb_role_switch *role_sw;
struct typec_port *port;
+ struct delayed_work output_poll_work;
+ enum usb_role role_state;
+ bool poll;
};
static int hd3ss3220_set_source_pref(struct hd3ss3220 *hd3ss3220, int src_pref)
@@ -118,6 +122,22 @@ static void hd3ss3220_set_role(struct hd3ss3220 *hd3ss3220)
default:
break;
}
+
+ hd3ss3220->role_state = role_state;
+}
+
+static void output_poll_execute(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct hd3ss3220 *hd3ss3220 = container_of(delayed_work,
+ struct hd3ss3220,
+ output_poll_work);
+ enum usb_role role_state = hd3ss3220_get_attached_state(hd3ss3220);
+
+ if (hd3ss3220->role_state != role_state)
+ hd3ss3220_set_role(hd3ss3220);
+
+ schedule_delayed_work(&hd3ss3220->output_poll_work, HZ);
}
static irqreturn_t hd3ss3220_irq(struct hd3ss3220 *hd3ss3220)
@@ -223,6 +243,9 @@ static int hd3ss3220_probe(struct i2c_client *client)
"hd3ss3220", &client->dev);
if (ret)
goto err_unreg_port;
+ } else {
+ INIT_DELAYED_WORK(&hd3ss3220->output_poll_work, output_poll_execute);
+ hd3ss3220->poll = true;
}
ret = i2c_smbus_read_byte_data(client, HD3SS3220_REG_DEV_REV);
@@ -231,6 +254,9 @@ static int hd3ss3220_probe(struct i2c_client *client)
fwnode_handle_put(connector);
+ if (hd3ss3220->poll)
+ schedule_delayed_work(&hd3ss3220->output_poll_work, HZ);
+
dev_info(&client->dev, "probed revision=0x%x\n", ret);
return 0;
@@ -248,6 +274,9 @@ static void hd3ss3220_remove(struct i2c_client *client)
{
struct hd3ss3220 *hd3ss3220 = i2c_get_clientdata(client);
+ if (hd3ss3220->poll)
+ cancel_delayed_work_sync(&hd3ss3220->output_poll_work);
+
typec_unregister_port(hd3ss3220->port);
usb_role_switch_put(hd3ss3220->role_sw);
}
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index 5eb2c17d72c1..c46fa4f9d3df 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -12,6 +12,12 @@ config TYPEC_MUX_FSA4480
common USB Type-C connector.
If compiled as a module, the module will be named fsa4480.
+config TYPEC_MUX_GPIO_SBU
+ tristate "Generic GPIO based SBU mux for USB Type-C applications"
+ help
+ Say Y or M if your system uses a GPIO based mux for managing the
+ connected state and the swapping of the SBU lines in a Type-C port.
+
config TYPEC_MUX_PI3USB30532
tristate "Pericom PI3USB30532 Type-C cross switch driver"
depends on I2C
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index e52a56c16bfb..dda67e19b58b 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TYPEC_MUX_FSA4480) += fsa4480.o
+obj-$(CONFIG_TYPEC_MUX_GPIO_SBU) += gpio-sbu-mux.o
obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
diff --git a/drivers/usb/typec/mux/gpio-sbu-mux.c b/drivers/usb/typec/mux/gpio-sbu-mux.c
new file mode 100644
index 000000000000..f62516dafe8f
--- /dev/null
+++ b/drivers/usb/typec/mux/gpio-sbu-mux.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Linaro Ltd.
+ */
+
+#include <linux/bits.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+struct gpio_sbu_mux {
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *select_gpio;
+
+ struct typec_switch_dev *sw;
+ struct typec_mux_dev *mux;
+
+ struct mutex lock; /* protect enabled and swapped */
+ bool enabled;
+ bool swapped;
+};
+
+static int gpio_sbu_switch_set(struct typec_switch_dev *sw,
+ enum typec_orientation orientation)
+{
+ struct gpio_sbu_mux *sbu_mux = typec_switch_get_drvdata(sw);
+ bool enabled;
+ bool swapped;
+
+ mutex_lock(&sbu_mux->lock);
+
+ enabled = sbu_mux->enabled;
+ swapped = sbu_mux->swapped;
+
+ switch (orientation) {
+ case TYPEC_ORIENTATION_NONE:
+ enabled = false;
+ break;
+ case TYPEC_ORIENTATION_NORMAL:
+ swapped = false;
+ break;
+ case TYPEC_ORIENTATION_REVERSE:
+ swapped = true;
+ break;
+ }
+
+ if (enabled != sbu_mux->enabled)
+ gpiod_set_value(sbu_mux->enable_gpio, enabled);
+
+ if (swapped != sbu_mux->swapped)
+ gpiod_set_value(sbu_mux->select_gpio, swapped);
+
+ sbu_mux->enabled = enabled;
+ sbu_mux->swapped = swapped;
+
+ mutex_unlock(&sbu_mux->lock);
+
+ return 0;
+}
+
+static int gpio_sbu_mux_set(struct typec_mux_dev *mux,
+ struct typec_mux_state *state)
+{
+ struct gpio_sbu_mux *sbu_mux = typec_mux_get_drvdata(mux);
+
+ mutex_lock(&sbu_mux->lock);
+
+ switch (state->mode) {
+ case TYPEC_STATE_SAFE:
+ case TYPEC_STATE_USB:
+ sbu_mux->enabled = false;
+ break;
+ case TYPEC_DP_STATE_C:
+ case TYPEC_DP_STATE_D:
+ case TYPEC_DP_STATE_E:
+ sbu_mux->enabled = true;
+ break;
+ default:
+ break;
+ }
+
+ gpiod_set_value(sbu_mux->enable_gpio, sbu_mux->enabled);
+
+ mutex_unlock(&sbu_mux->lock);
+
+ return 0;
+}
+
+static int gpio_sbu_mux_probe(struct platform_device *pdev)
+{
+ struct typec_switch_desc sw_desc = { };
+ struct typec_mux_desc mux_desc = { };
+ struct device *dev = &pdev->dev;
+ struct gpio_sbu_mux *sbu_mux;
+
+ sbu_mux = devm_kzalloc(dev, sizeof(*sbu_mux), GFP_KERNEL);
+ if (!sbu_mux)
+ return -ENOMEM;
+
+ mutex_init(&sbu_mux->lock);
+
+ sbu_mux->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(sbu_mux->enable_gpio))
+ return dev_err_probe(dev, PTR_ERR(sbu_mux->enable_gpio),
+ "unable to acquire enable gpio\n");
+
+ sbu_mux->select_gpio = devm_gpiod_get(dev, "select", GPIOD_OUT_LOW);
+ if (IS_ERR(sbu_mux->select_gpio))
+ return dev_err_probe(dev, PTR_ERR(sbu_mux->select_gpio),
+ "unable to acquire select gpio\n");
+
+ sw_desc.drvdata = sbu_mux;
+ sw_desc.fwnode = dev_fwnode(dev);
+ sw_desc.set = gpio_sbu_switch_set;
+
+ sbu_mux->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(sbu_mux->sw))
+ return dev_err_probe(dev, PTR_ERR(sbu_mux->sw),
+ "failed to register typec switch\n");
+
+ mux_desc.drvdata = sbu_mux;
+ mux_desc.fwnode = dev_fwnode(dev);
+ mux_desc.set = gpio_sbu_mux_set;
+
+ sbu_mux->mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(sbu_mux->mux)) {
+ typec_switch_unregister(sbu_mux->sw);
+ return dev_err_probe(dev, PTR_ERR(sbu_mux->mux),
+ "failed to register typec mux\n");
+ }
+
+ platform_set_drvdata(pdev, sbu_mux);
+
+ return 0;
+}
+
+static int gpio_sbu_mux_remove(struct platform_device *pdev)
+{
+ struct gpio_sbu_mux *sbu_mux = platform_get_drvdata(pdev);
+
+ gpiod_set_value(sbu_mux->enable_gpio, 0);
+
+ typec_mux_unregister(sbu_mux->mux);
+ typec_switch_unregister(sbu_mux->sw);
+
+ return 0;
+}
+
+static const struct of_device_id gpio_sbu_mux_match[] = {
+ { .compatible = "gpio-sbu-mux", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_sbu_mux_match);
+
+static struct platform_driver gpio_sbu_mux_driver = {
+ .probe = gpio_sbu_mux_probe,
+ .remove = gpio_sbu_mux_remove,
+ .driver = {
+ .name = "gpio_sbu_mux",
+ .of_match_table = gpio_sbu_mux_match,
+ },
+};
+module_platform_driver(gpio_sbu_mux_driver);
+
+MODULE_DESCRIPTION("GPIO based SBU mux driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index fdbf3694e21f..34e4188a40ff 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -602,20 +602,21 @@ static int pmc_usb_probe_iom(struct pmc_usb *pmc)
int ret;
for (dev_id = &iom_acpi_ids[0]; dev_id->id[0]; dev_id++) {
- if (acpi_dev_present(dev_id->id, NULL, -1)) {
- pmc->iom_port_status_offset = (u32)dev_id->driver_data;
- adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1);
+ adev = acpi_dev_get_first_match_dev(dev_id->id, NULL, -1);
+ if (adev)
break;
- }
}
-
if (!adev)
return -ENODEV;
+ pmc->iom_port_status_offset = (u32)dev_id->driver_data;
+
INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_memory_resources(adev, &resource_list);
- if (ret < 0)
+ if (ret < 0) {
+ acpi_dev_put(adev);
return ret;
+ }
rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
if (rentry)
diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
index dc72005d68db..59c537a5e600 100644
--- a/drivers/usb/typec/pd.c
+++ b/drivers/usb/typec/pd.c
@@ -49,6 +49,13 @@ usb_suspend_supported_show(struct device *dev, struct device_attribute *attr, ch
static DEVICE_ATTR_RO(usb_suspend_supported);
static ssize_t
+higher_capability_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_HIGHER_CAP));
+}
+static DEVICE_ATTR_RO(higher_capability);
+
+static ssize_t
unconstrained_power_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sysfs_emit(buf, "%u\n", !!(to_pdo(dev)->pdo & PDO_FIXED_EXTPOWER));
@@ -161,7 +168,7 @@ static struct device_type source_fixed_supply_type = {
static struct attribute *sink_fixed_supply_attrs[] = {
&dev_attr_dual_role_power.attr,
- &dev_attr_usb_suspend_supported.attr,
+ &dev_attr_higher_capability.attr,
&dev_attr_unconstrained_power.attr,
&dev_attr_usb_communication_capable.attr,
&dev_attr_dual_role_data.attr,
diff --git a/drivers/usb/typec/retimer.h b/drivers/usb/typec/retimer.h
index e34bd23323be..d6a5ef9881e1 100644
--- a/drivers/usb/typec/retimer.h
+++ b/drivers/usb/typec/retimer.h
@@ -12,7 +12,7 @@ struct typec_retimer {
#define to_typec_retimer(_dev_) container_of(_dev_, struct typec_retimer, dev)
-const struct device_type typec_retimer_dev_type;
+extern const struct device_type typec_retimer_dev_type;
#define is_typec_retimer(dev) ((dev)->type == &typec_retimer_dev_type)
diff --git a/drivers/usb/typec/tcpm/Makefile b/drivers/usb/typec/tcpm/Makefile
index 906d9dced8e7..08e57bb499cb 100644
--- a/drivers/usb/typec/tcpm/Makefile
+++ b/drivers/usb/typec/tcpm/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
obj-$(CONFIG_TYPEC_MT6360) += tcpci_mt6360.o
obj-$(CONFIG_TYPEC_TCPCI_MT6370) += tcpci_mt6370.o
obj-$(CONFIG_TYPEC_TCPCI_MAXIM) += tcpci_maxim.o
+tcpci_maxim-y += tcpci_maxim_core.o maxim_contaminant.o
diff --git a/drivers/usb/typec/tcpm/maxim_contaminant.c b/drivers/usb/typec/tcpm/maxim_contaminant.c
new file mode 100644
index 000000000000..f8504a90da26
--- /dev/null
+++ b/drivers/usb/typec/tcpm/maxim_contaminant.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2022 Google, Inc
+ *
+ * USB-C module to reduce wakeups due to contaminants.
+ */
+
+#include <linux/device.h>
+#include <linux/irqreturn.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/usb/tcpci.h>
+#include <linux/usb/tcpm.h>
+#include <linux/usb/typec.h>
+
+#include "tcpci_maxim.h"
+
+enum fladc_select {
+ CC1_SCALE1 = 1,
+ CC1_SCALE2,
+ CC2_SCALE1,
+ CC2_SCALE2,
+ SBU1,
+ SBU2,
+};
+
+#define FLADC_1uA_LSB_MV 25
+/* High range CC */
+#define FLADC_CC_HIGH_RANGE_LSB_MV 208
+/* Low range CC */
+#define FLADC_CC_LOW_RANGE_LSB_MV 126
+
+/* 1uA current source */
+#define FLADC_CC_SCALE1 1
+/* 5 uA current source */
+#define FLADC_CC_SCALE2 5
+
+#define FLADC_1uA_CC_OFFSET_MV 300
+#define FLADC_CC_HIGH_RANGE_OFFSET_MV 624
+#define FLADC_CC_LOW_RANGE_OFFSET_MV 378
+
+#define CONTAMINANT_THRESHOLD_SBU_K 1000
+#define CONTAMINANT_THRESHOLD_CC_K 1000
+
+#define READ1_SLEEP_MS 10
+#define READ2_SLEEP_MS 5
+
+#define STATUS_CHECK(reg, mask, val) (((reg) & (mask)) == (val))
+
+#define IS_CC_OPEN(cc_status) \
+ (STATUS_CHECK((cc_status), TCPC_CC_STATUS_CC1_MASK << TCPC_CC_STATUS_CC1_SHIFT, \
+ TCPC_CC_STATE_SRC_OPEN) && STATUS_CHECK((cc_status), \
+ TCPC_CC_STATUS_CC2_MASK << \
+ TCPC_CC_STATUS_CC2_SHIFT, \
+ TCPC_CC_STATE_SRC_OPEN))
+
+static int max_contaminant_adc_to_mv(struct max_tcpci_chip *chip, enum fladc_select channel,
+ bool ua_src, u8 fladc)
+{
+ /* SBU channels only have 1 scale with 1uA. */
+ if ((ua_src && (channel == CC1_SCALE2 || channel == CC2_SCALE2 || channel == SBU1 ||
+ channel == SBU2)))
+ /* Mean of range */
+ return FLADC_1uA_CC_OFFSET_MV + (fladc * FLADC_1uA_LSB_MV);
+ else if (!ua_src && (channel == CC1_SCALE1 || channel == CC2_SCALE1))
+ return FLADC_CC_HIGH_RANGE_OFFSET_MV + (fladc * FLADC_CC_HIGH_RANGE_LSB_MV);
+ else if (!ua_src && (channel == CC1_SCALE2 || channel == CC2_SCALE2))
+ return FLADC_CC_LOW_RANGE_OFFSET_MV + (fladc * FLADC_CC_LOW_RANGE_LSB_MV);
+
+ dev_err_once(chip->dev, "ADC ERROR: SCALE UNKNOWN");
+
+ return -EINVAL;
+}
+
+static int max_contaminant_read_adc_mv(struct max_tcpci_chip *chip, enum fladc_select channel,
+ int sleep_msec, bool raw, bool ua_src)
+{
+ struct regmap *regmap = chip->data.regmap;
+ u8 fladc;
+ int ret;
+
+ /* Channel & scale select */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK,
+ channel << ADC_CHANNEL_OFFSET);
+ if (ret < 0)
+ return ret;
+
+ /* Enable ADC */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCEN, ADCEN);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(sleep_msec * 1000, (sleep_msec + 1) * 1000);
+ ret = max_tcpci_read8(chip, TCPC_VENDOR_FLADC_STATUS, &fladc);
+ if (ret < 0)
+ return ret;
+
+ /* Disable ADC */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCEN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_ADC_CTRL1, ADCINSEL_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ if (!raw)
+ return max_contaminant_adc_to_mv(chip, channel, ua_src, fladc);
+ else
+ return fladc;
+}
+
+static int max_contaminant_read_resistance_kohm(struct max_tcpci_chip *chip,
+ enum fladc_select channel, int sleep_msec, bool raw)
+{
+ struct regmap *regmap = chip->data.regmap;
+ int mv;
+ int ret;
+
+ if (channel == CC1_SCALE1 || channel == CC2_SCALE1 || channel == CC1_SCALE2 ||
+ channel == CC2_SCALE2) {
+ /* Enable 1uA current source */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK,
+ ULTRA_LOW_POWER_MODE);
+ if (ret < 0)
+ return ret;
+
+ /* Enable 1uA current source */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_1_SRC);
+ if (ret < 0)
+ return ret;
+
+ /* OVP disable */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCOVPDIS, CCOVPDIS);
+ if (ret < 0)
+ return ret;
+
+ mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
+ if (mv < 0)
+ return ret;
+
+ /* OVP enable */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCOVPDIS, 0);
+ if (ret < 0)
+ return ret;
+ /* returns KOhm as 1uA source is used. */
+ return mv;
+ }
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBUOVPDIS, SBUOVPDIS);
+ if (ret < 0)
+ return ret;
+
+ /* SBU switches auto configure when channel is selected. */
+ /* Enable 1ua current source */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBURPCTRL, SBURPCTRL);
+ if (ret < 0)
+ return ret;
+
+ mv = max_contaminant_read_adc_mv(chip, channel, sleep_msec, raw, true);
+ if (mv < 0)
+ return ret;
+ /* Disable current source */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBURPCTRL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* OVP disable */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, SBUOVPDIS, 0);
+ if (ret < 0)
+ return ret;
+
+ return mv;
+}
+
+static int max_contaminant_read_comparators(struct max_tcpci_chip *chip, u8 *vendor_cc_status2_cc1,
+ u8 *vendor_cc_status2_cc2)
+{
+ struct regmap *regmap = chip->data.regmap;
+ int ret;
+
+ /* Enable 80uA source */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, UA_80_SRC);
+ if (ret < 0)
+ return ret;
+
+ /* Enable comparators */
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCOMPEN, CCCOMPEN);
+ if (ret < 0)
+ return ret;
+
+ /* Sleep to allow comparators settle */
+ usleep_range(5000, 6000);
+ ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC1);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(5000, 6000);
+ ret = max_tcpci_read8(chip, VENDOR_CC_STATUS2, vendor_cc_status2_cc1);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_ORIENTATION, PLUG_ORNT_CC2);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(5000, 6000);
+ ret = max_tcpci_read8(chip, VENDOR_CC_STATUS2, vendor_cc_status2_cc2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCOMPEN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCRPCTRL_MASK, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int max_contaminant_detect_contaminant(struct max_tcpci_chip *chip)
+{
+ int cc1_k, cc2_k, sbu1_k, sbu2_k, ret;
+ u8 vendor_cc_status2_cc1 = 0xff, vendor_cc_status2_cc2 = 0xff;
+ u8 role_ctrl = 0, role_ctrl_backup = 0;
+ int inferred_state = NOT_DETECTED;
+
+ ret = max_tcpci_read8(chip, TCPC_ROLE_CTRL, &role_ctrl);
+ if (ret < 0)
+ return NOT_DETECTED;
+
+ role_ctrl_backup = role_ctrl;
+ role_ctrl = 0x0F;
+ ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl);
+ if (ret < 0)
+ return NOT_DETECTED;
+
+ cc1_k = max_contaminant_read_resistance_kohm(chip, CC1_SCALE2, READ1_SLEEP_MS, false);
+ if (cc1_k < 0)
+ goto exit;
+
+ cc2_k = max_contaminant_read_resistance_kohm(chip, CC2_SCALE2, READ2_SLEEP_MS, false);
+ if (cc2_k < 0)
+ goto exit;
+
+ sbu1_k = max_contaminant_read_resistance_kohm(chip, SBU1, READ1_SLEEP_MS, false);
+ if (sbu1_k < 0)
+ goto exit;
+
+ sbu2_k = max_contaminant_read_resistance_kohm(chip, SBU2, READ2_SLEEP_MS, false);
+ if (sbu2_k < 0)
+ goto exit;
+
+ ret = max_contaminant_read_comparators(chip, &vendor_cc_status2_cc1,
+ &vendor_cc_status2_cc2);
+
+ if (ret < 0)
+ goto exit;
+
+ if ((!(CC1_VUFP_RD0P5 & vendor_cc_status2_cc1) ||
+ !(CC2_VUFP_RD0P5 & vendor_cc_status2_cc2)) &&
+ !(CC1_VUFP_RD0P5 & vendor_cc_status2_cc1 && CC2_VUFP_RD0P5 & vendor_cc_status2_cc2))
+ inferred_state = SINK;
+ else if ((cc1_k < CONTAMINANT_THRESHOLD_CC_K || cc2_k < CONTAMINANT_THRESHOLD_CC_K) &&
+ (sbu1_k < CONTAMINANT_THRESHOLD_SBU_K || sbu2_k < CONTAMINANT_THRESHOLD_SBU_K))
+ inferred_state = DETECTED;
+
+ if (inferred_state == NOT_DETECTED)
+ max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl_backup);
+ else
+ max_tcpci_write8(chip, TCPC_ROLE_CTRL, (TCPC_ROLE_CTRL_DRP | 0xA));
+
+ return inferred_state;
+exit:
+ max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl_backup);
+ return NOT_DETECTED;
+}
+
+static int max_contaminant_enable_dry_detection(struct max_tcpci_chip *chip)
+{
+ struct regmap *regmap = chip->data.regmap;
+ u8 temp;
+ int ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL3, CCWTRDEB_MASK | CCWTRSEL_MASK
+ | WTRCYCLE_MASK, CCWTRDEB_1MS << CCWTRDEB_SHIFT |
+ CCWTRSEL_1V << CCWTRSEL_SHIFT | WTRCYCLE_4_8_S <<
+ WTRCYCLE_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_ROLE_CTRL, TCPC_ROLE_CTRL_DRP, TCPC_ROLE_CTRL_DRP);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL1, CCCONNDRY, CCCONNDRY);
+ if (ret < 0)
+ return ret;
+ ret = max_tcpci_read8(chip, TCPC_VENDOR_CC_CTRL1, &temp);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(regmap, TCPC_VENDOR_CC_CTRL2, CCLPMODESEL_MASK,
+ ULTRA_LOW_POWER_MODE);
+ if (ret < 0)
+ return ret;
+ ret = max_tcpci_read8(chip, TCPC_VENDOR_CC_CTRL2, &temp);
+ if (ret < 0)
+ return ret;
+
+ /* Enable Look4Connection before sending the command */
+ ret = regmap_update_bits(regmap, TCPC_TCPC_CTRL, TCPC_TCPC_CTRL_EN_LK4CONN_ALRT,
+ TCPC_TCPC_CTRL_EN_LK4CONN_ALRT);
+ if (ret < 0)
+ return ret;
+
+ ret = max_tcpci_write8(chip, TCPC_COMMAND, TCPC_CMD_LOOK4CONNECTION);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce)
+{
+ u8 cc_status, pwr_cntl;
+ int ret;
+
+ ret = max_tcpci_read8(chip, TCPC_CC_STATUS, &cc_status);
+ if (ret < 0)
+ return false;
+
+ ret = max_tcpci_read8(chip, TCPC_POWER_CTRL, &pwr_cntl);
+ if (ret < 0)
+ return false;
+
+ if (chip->contaminant_state == NOT_DETECTED || chip->contaminant_state == SINK) {
+ if (!disconnect_while_debounce)
+ msleep(100);
+
+ ret = max_tcpci_read8(chip, TCPC_CC_STATUS, &cc_status);
+ if (ret < 0)
+ return false;
+
+ if (IS_CC_OPEN(cc_status)) {
+ u8 role_ctrl, role_ctrl_backup;
+
+ ret = max_tcpci_read8(chip, TCPC_ROLE_CTRL, &role_ctrl);
+ if (ret < 0)
+ return false;
+
+ role_ctrl_backup = role_ctrl;
+ role_ctrl |= 0x0F;
+ role_ctrl &= ~(TCPC_ROLE_CTRL_DRP);
+ ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl);
+ if (ret < 0)
+ return false;
+
+ chip->contaminant_state = max_contaminant_detect_contaminant(chip);
+
+ ret = max_tcpci_write8(chip, TCPC_ROLE_CTRL, role_ctrl_backup);
+ if (ret < 0)
+ return false;
+
+ if (chip->contaminant_state == DETECTED) {
+ max_contaminant_enable_dry_detection(chip);
+ return true;
+ }
+ }
+ return false;
+ } else if (chip->contaminant_state == DETECTED) {
+ if (STATUS_CHECK(cc_status, TCPC_CC_STATUS_TOGGLING, 0)) {
+ chip->contaminant_state = max_contaminant_detect_contaminant(chip);
+ if (chip->contaminant_state == DETECTED) {
+ max_contaminant_enable_dry_detection(chip);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+MODULE_DESCRIPTION("MAXIM TCPC CONTAMINANT Module");
+MODULE_AUTHOR("Badhri Jagan Sridharan <badhri@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index fe781a38dc82..8da23240afbe 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -33,6 +33,7 @@ struct tcpci {
struct tcpm_port *port;
struct regmap *regmap;
+ unsigned int alert_mask;
bool controls_vbus;
@@ -403,6 +404,14 @@ static void tcpci_frs_sourcing_vbus(struct tcpc_dev *dev)
tcpci->data->frs_sourcing_vbus(tcpci, tcpci->data);
}
+static void tcpci_check_contaminant(struct tcpc_dev *dev)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(dev);
+
+ if (tcpci->data->check_contaminant)
+ tcpci->data->check_contaminant(tcpci, tcpci->data);
+}
+
static int tcpci_set_bist_data(struct tcpc_dev *tcpc, bool enable)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
@@ -632,6 +641,9 @@ static int tcpci_init(struct tcpc_dev *tcpc)
if (ret < 0)
return ret;
}
+
+ tcpci->alert_mask = reg;
+
return tcpci_write16(tcpci, TCPC_ALERT_MASK, reg);
}
@@ -715,7 +727,7 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
else if (status & TCPC_ALERT_TX_FAILED)
tcpm_pd_transmit_complete(tcpci->port, TCPC_TX_FAILED);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(status & tcpci->alert_mask);
}
EXPORT_SYMBOL_GPL(tcpci_irq);
@@ -778,6 +790,9 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable;
+ if (tcpci->data->check_contaminant)
+ tcpci->tcpc.check_contaminant = tcpci_check_contaminant;
+
if (tcpci->data->auto_discharge_disconnect) {
tcpci->tcpc.enable_auto_vbus_discharge = tcpci_enable_auto_vbus_discharge;
tcpci->tcpc.set_auto_vbus_discharge_threshold =
@@ -838,7 +853,7 @@ static int tcpci_probe(struct i2c_client *client)
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
_tcpci_irq,
- IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
dev_name(&client->dev), chip);
if (err < 0) {
tcpci_unregister_port(chip->tcpci);
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h
new file mode 100644
index 000000000000..2c1c4d161b0d
--- /dev/null
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2022 Google, Inc
+ *
+ * MAXIM TCPC header file.
+ */
+#ifndef TCPCI_MAXIM_H_
+#define TCPCI_MAXIM_H_
+
+#define VENDOR_CC_STATUS2 0x85
+#define CC1_VUFP_RD0P5 BIT(1)
+#define CC2_VUFP_RD0P5 BIT(5)
+#define TCPC_VENDOR_FLADC_STATUS 0x89
+
+#define TCPC_VENDOR_CC_CTRL1 0x8c
+#define CCCONNDRY BIT(7)
+#define CCCOMPEN BIT(5)
+
+#define TCPC_VENDOR_CC_CTRL2 0x8d
+#define SBUOVPDIS BIT(7)
+#define CCOVPDIS BIT(6)
+#define SBURPCTRL BIT(5)
+#define CCLPMODESEL_MASK GENMASK(4, 3)
+#define ULTRA_LOW_POWER_MODE BIT(3)
+#define CCRPCTRL_MASK GENMASK(2, 0)
+#define UA_1_SRC 1
+#define UA_80_SRC 3
+
+#define TCPC_VENDOR_CC_CTRL3 0x8e
+#define CCWTRDEB_MASK GENMASK(7, 6)
+#define CCWTRDEB_SHIFT 6
+#define CCWTRDEB_1MS 1
+#define CCWTRSEL_MASK GENMASK(5, 3)
+#define CCWTRSEL_SHIFT 3
+#define CCWTRSEL_1V 0x4
+#define CCLADDERDIS BIT(2)
+#define WTRCYCLE_MASK BIT(0)
+#define WTRCYCLE_SHIFT 0
+#define WTRCYCLE_2_4_S 0
+#define WTRCYCLE_4_8_S 1
+
+#define TCPC_VENDOR_ADC_CTRL1 0x91
+#define ADCINSEL_MASK GENMASK(7, 5)
+#define ADC_CHANNEL_OFFSET 5
+#define ADCEN BIT(0)
+
+enum contamiant_state {
+ NOT_DETECTED,
+ DETECTED,
+ SINK,
+};
+
+/*
+ * @potential_contaminant:
+ * Last returned result to tcpm indicating whether the TCPM port
+ * has potential contaminant.
+ */
+struct max_tcpci_chip {
+ struct tcpci_data data;
+ struct tcpci *tcpci;
+ struct device *dev;
+ struct i2c_client *client;
+ struct tcpm_port *port;
+ enum contamiant_state contaminant_state;
+};
+
+static inline int max_tcpci_read16(struct max_tcpci_chip *chip, unsigned int reg, u16 *val)
+{
+ return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u16));
+}
+
+static inline int max_tcpci_write16(struct max_tcpci_chip *chip, unsigned int reg, u16 val)
+{
+ return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u16));
+}
+
+static inline int max_tcpci_read8(struct max_tcpci_chip *chip, unsigned int reg, u8 *val)
+{
+ return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u8));
+}
+
+static inline int max_tcpci_write8(struct max_tcpci_chip *chip, unsigned int reg, u8 val)
+{
+ return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u8));
+}
+
+bool max_contaminant_is_contaminant(struct max_tcpci_chip *chip, bool disconnect_while_debounce);
+
+#endif // TCPCI_MAXIM_H_
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
index 83e140ffcc3e..f32cda2a5e3a 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
@@ -1,6 +1,6 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Copyright (C) 2020, Google LLC
+ * Copyright (C) 2020 - 2022, Google LLC
*
* MAXIM TCPCI based TCPC driver
*/
@@ -15,6 +15,8 @@
#include <linux/usb/tcpm.h>
#include <linux/usb/typec.h>
+#include "tcpci_maxim.h"
+
#define PD_ACTIVITY_TIMEOUT_MS 10000
#define TCPC_VENDOR_ALERT 0x80
@@ -39,14 +41,6 @@
#define MAX_BUCK_BOOST_SOURCE 0xa
#define MAX_BUCK_BOOST_SINK 0x5
-struct max_tcpci_chip {
- struct tcpci_data data;
- struct tcpci *tcpci;
- struct device *dev;
- struct i2c_client *client;
- struct tcpm_port *port;
-};
-
static const struct regmap_range max_tcpci_tcpci_range[] = {
regmap_reg_range(0x00, 0x95)
};
@@ -68,26 +62,6 @@ static struct max_tcpci_chip *tdata_to_max_tcpci(struct tcpci_data *tdata)
return container_of(tdata, struct max_tcpci_chip, data);
}
-static int max_tcpci_read16(struct max_tcpci_chip *chip, unsigned int reg, u16 *val)
-{
- return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u16));
-}
-
-static int max_tcpci_write16(struct max_tcpci_chip *chip, unsigned int reg, u16 val)
-{
- return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u16));
-}
-
-static int max_tcpci_read8(struct max_tcpci_chip *chip, unsigned int reg, u8 *val)
-{
- return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u8));
-}
-
-static int max_tcpci_write8(struct max_tcpci_chip *chip, unsigned int reg, u8 val)
-{
- return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u8));
-}
-
static void max_tcpci_init_regs(struct max_tcpci_chip *chip)
{
u16 alert_mask = 0;
@@ -348,8 +322,14 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
if (status & TCPC_ALERT_VBUS_DISCNCT)
tcpm_vbus_change(chip->port);
- if (status & TCPC_ALERT_CC_STATUS)
- tcpm_cc_change(chip->port);
+ if (status & TCPC_ALERT_CC_STATUS) {
+ if (chip->contaminant_state == DETECTED || tcpm_port_is_toggling(chip->port)) {
+ if (!max_contaminant_is_contaminant(chip, false))
+ tcpm_port_clean(chip->port);
+ } else {
+ tcpm_cc_change(chip->port);
+ }
+ }
if (status & TCPC_ALERT_POWER_STATUS)
process_power_status(chip);
@@ -438,6 +418,14 @@ static int tcpci_init(struct tcpci *tcpci, struct tcpci_data *data)
return -1;
}
+static void max_tcpci_check_contaminant(struct tcpci *tcpci, struct tcpci_data *tdata)
+{
+ struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
+
+ if (!max_contaminant_is_contaminant(chip, true))
+ tcpm_port_clean(chip->port);
+}
+
static int max_tcpci_probe(struct i2c_client *client)
{
int ret;
@@ -471,6 +459,7 @@ static int max_tcpci_probe(struct i2c_client *client)
chip->data.auto_discharge_disconnect = true;
chip->data.vbus_vsafe0v = true;
chip->data.set_partner_usb_comm_capable = max_tcpci_set_partner_usb_comm_capable;
+ chip->data.check_contaminant = max_tcpci_check_contaminant;
max_tcpci_init_regs(chip);
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 59b366b5c614..a0d943d78580 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -36,6 +36,7 @@
#define FOREACH_STATE(S) \
S(INVALID_STATE), \
S(TOGGLING), \
+ S(CHECK_CONTAMINANT), \
S(SRC_UNATTACHED), \
S(SRC_ATTACH_WAIT), \
S(SRC_ATTACHED), \
@@ -249,6 +250,7 @@ enum frs_typec_current {
#define TCPM_RESET_EVENT BIT(2)
#define TCPM_FRS_EVENT BIT(3)
#define TCPM_SOURCING_VBUS BIT(4)
+#define TCPM_PORT_CLEAN BIT(5)
#define LOG_BUFFER_ENTRIES 1024
#define LOG_BUFFER_ENTRY_SIZE 128
@@ -483,6 +485,13 @@ struct tcpm_port {
* SNK_READY for non-pd link.
*/
bool slow_charger_loop;
+
+ /*
+ * When true indicates that the lower level drivers indicate potential presence
+ * of contaminant in the connector pins based on the tcpm state machine
+ * transitions.
+ */
+ bool potential_contaminant;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
@@ -647,7 +656,7 @@ static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
/* Do not log while disconnected and unattached */
if (tcpm_port_is_disconnected(port) &&
(port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
- port->state == TOGGLING))
+ port->state == TOGGLING || port->state == CHECK_CONTAMINANT))
return;
va_start(args, fmt);
@@ -1693,14 +1702,11 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
}
break;
case CMD_ENTER_MODE:
- if (adev && pdev) {
- typec_altmode_update_active(pdev, true);
+ if (adev && pdev)
*adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
- }
return 0;
case CMD_EXIT_MODE:
if (adev && pdev) {
- typec_altmode_update_active(pdev, false);
/* Back to USB Operation */
*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
return 0;
@@ -3904,15 +3910,28 @@ static void run_state_machine(struct tcpm_port *port)
unsigned int msecs;
enum tcpm_state upcoming_state;
+ if (port->tcpc->check_contaminant && port->state != CHECK_CONTAMINANT)
+ port->potential_contaminant = ((port->enter_state == SRC_ATTACH_WAIT &&
+ port->state == SRC_UNATTACHED) ||
+ (port->enter_state == SNK_ATTACH_WAIT &&
+ port->state == SNK_UNATTACHED));
+
port->enter_state = port->state;
switch (port->state) {
case TOGGLING:
break;
+ case CHECK_CONTAMINANT:
+ port->tcpc->check_contaminant(port->tcpc);
+ break;
/* SRC states */
case SRC_UNATTACHED:
if (!port->non_pd_role_swap)
tcpm_swap_complete(port, -ENOTCONN);
tcpm_src_detach(port);
+ if (port->potential_contaminant) {
+ tcpm_set_state(port, CHECK_CONTAMINANT, 0);
+ break;
+ }
if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
tcpm_set_state(port, TOGGLING, 0);
break;
@@ -4150,6 +4169,10 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_swap_complete(port, -ENOTCONN);
tcpm_pps_complete(port, -ENOTCONN);
tcpm_snk_detach(port);
+ if (port->potential_contaminant) {
+ tcpm_set_state(port, CHECK_CONTAMINANT, 0);
+ break;
+ }
if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
tcpm_set_state(port, TOGGLING, 0);
break;
@@ -4925,6 +4948,9 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
else if (tcpm_port_is_sink(port))
tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
break;
+ case CHECK_CONTAMINANT:
+ /* Wait for Toggling to be resumed */
+ break;
case SRC_UNATTACHED:
case ACC_UNATTACHED:
if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
@@ -5424,6 +5450,15 @@ static void tcpm_pd_event_handler(struct kthread_work *work)
port->vbus_source = true;
_tcpm_pd_vbus_on(port);
}
+ if (events & TCPM_PORT_CLEAN) {
+ tcpm_log(port, "port clean");
+ if (port->state == CHECK_CONTAMINANT) {
+ if (tcpm_start_toggling(port, tcpm_rp_cc(port)))
+ tcpm_set_state(port, TOGGLING, 0);
+ else
+ tcpm_set_state(port, tcpm_default_state(port), 0);
+ }
+ }
spin_lock(&port->pd_event_lock);
}
@@ -5476,6 +5511,21 @@ void tcpm_sourcing_vbus(struct tcpm_port *port)
}
EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
+void tcpm_port_clean(struct tcpm_port *port)
+{
+ spin_lock(&port->pd_event_lock);
+ port->pd_events |= TCPM_PORT_CLEAN;
+ spin_unlock(&port->pd_event_lock);
+ kthread_queue_work(port->wq, &port->event_work);
+}
+EXPORT_SYMBOL_GPL(tcpm_port_clean);
+
+bool tcpm_port_is_toggling(struct tcpm_port *port)
+{
+ return port->port_type == TYPEC_PORT_DRP && port->state == TOGGLING;
+}
+EXPORT_SYMBOL_GPL(tcpm_port_is_toggling);
+
static void tcpm_enable_frs_work(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index 46a4d8b128f0..485b90c13078 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -95,6 +95,7 @@ struct tps6598x {
struct power_supply_desc psy_desc;
enum power_supply_usb_type usb_type;
+ int wakeup;
u16 pwr_status;
};
@@ -846,6 +847,12 @@ static int tps6598x_probe(struct i2c_client *client)
i2c_set_clientdata(client, tps);
fwnode_handle_put(fwnode);
+ tps->wakeup = device_property_read_bool(tps->dev, "wakeup-source");
+ if (tps->wakeup) {
+ device_init_wakeup(&client->dev, true);
+ enable_irq_wake(client->irq);
+ }
+
return 0;
err_disconnect:
@@ -870,6 +877,36 @@ static void tps6598x_remove(struct i2c_client *client)
usb_role_switch_put(tps->role_sw);
}
+static int __maybe_unused tps6598x_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tps6598x *tps = i2c_get_clientdata(client);
+
+ if (tps->wakeup) {
+ disable_irq(client->irq);
+ enable_irq_wake(client->irq);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused tps6598x_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct tps6598x *tps = i2c_get_clientdata(client);
+
+ if (tps->wakeup) {
+ disable_irq_wake(client->irq);
+ enable_irq(client->irq);
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops tps6598x_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(tps6598x_suspend, tps6598x_resume)
+};
+
static const struct of_device_id tps6598x_of_match[] = {
{ .compatible = "ti,tps6598x", },
{ .compatible = "apple,cd321x", },
@@ -886,6 +923,7 @@ MODULE_DEVICE_TABLE(i2c, tps6598x_id);
static struct i2c_driver tps6598x_i2c_driver = {
.driver = {
.name = "tps6598x",
+ .pm = &tps6598x_pm_ops,
.of_match_table = tps6598x_of_match,
},
.probe_new = tps6598x_probe,
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 1292241d581a..f632350f6dcb 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -567,8 +567,9 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
}
}
-static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
- u32 *pdos, int offset, int num_pdos)
+static int ucsi_read_pdos(struct ucsi_connector *con,
+ enum typec_role role, int is_partner,
+ u32 *pdos, int offset, int num_pdos)
{
struct ucsi *ucsi = con->ucsi;
u64 command;
@@ -578,7 +579,7 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
command |= UCSI_GET_PDOS_PARTNER_PDO(is_partner);
command |= UCSI_GET_PDOS_PDO_OFFSET(offset);
command |= UCSI_GET_PDOS_NUM_PDOS(num_pdos - 1);
- command |= UCSI_GET_PDOS_SRC_PDOS;
+ command |= is_source(role) ? UCSI_GET_PDOS_SRC_PDOS : 0;
ret = ucsi_send_command(ucsi, command, pdos + offset,
num_pdos * sizeof(u32));
if (ret < 0 && ret != -ETIMEDOUT)
@@ -587,30 +588,43 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
return ret;
}
-static int ucsi_get_src_pdos(struct ucsi_connector *con)
+static int ucsi_get_pdos(struct ucsi_connector *con, enum typec_role role,
+ int is_partner, u32 *pdos)
{
+ u8 num_pdos;
int ret;
/* UCSI max payload means only getting at most 4 PDOs at a time */
- ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
+ ret = ucsi_read_pdos(con, role, is_partner, pdos, 0, UCSI_MAX_PDOS);
if (ret < 0)
return ret;
- con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
- if (con->num_pdos < UCSI_MAX_PDOS)
- return 0;
+ num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
+ if (num_pdos < UCSI_MAX_PDOS)
+ return num_pdos;
/* get the remaining PDOs, if any */
- ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
- PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
+ ret = ucsi_read_pdos(con, role, is_partner, pdos, UCSI_MAX_PDOS,
+ PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
+ if (ret < 0)
+ return ret;
+
+ return ret / sizeof(u32) + num_pdos;
+}
+
+static int ucsi_get_src_pdos(struct ucsi_connector *con)
+{
+ int ret;
+
+ ret = ucsi_get_pdos(con, TYPEC_SOURCE, 1, con->src_pdos);
if (ret < 0)
return ret;
- con->num_pdos += ret / sizeof(u32);
+ con->num_pdos = ret;
ucsi_port_psy_changed(con);
- return 0;
+ return ret;
}
static int ucsi_check_altmodes(struct ucsi_connector *con)
@@ -635,6 +649,72 @@ static int ucsi_check_altmodes(struct ucsi_connector *con)
return ret;
}
+static int ucsi_register_partner_pdos(struct ucsi_connector *con)
+{
+ struct usb_power_delivery_desc desc = { con->ucsi->cap.pd_version };
+ struct usb_power_delivery_capabilities_desc caps;
+ struct usb_power_delivery_capabilities *cap;
+ int ret;
+
+ if (con->partner_pd)
+ return 0;
+
+ con->partner_pd = usb_power_delivery_register(NULL, &desc);
+ if (IS_ERR(con->partner_pd))
+ return PTR_ERR(con->partner_pd);
+
+ ret = ucsi_get_pdos(con, TYPEC_SOURCE, 1, caps.pdo);
+ if (ret > 0) {
+ if (ret < PDO_MAX_OBJECTS)
+ caps.pdo[ret] = 0;
+
+ caps.role = TYPEC_SOURCE;
+ cap = usb_power_delivery_register_capabilities(con->partner_pd, &caps);
+ if (IS_ERR(cap))
+ return PTR_ERR(cap);
+
+ con->partner_source_caps = cap;
+
+ ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
+ if (ret) {
+ usb_power_delivery_unregister_capabilities(con->partner_source_caps);
+ return ret;
+ }
+ }
+
+ ret = ucsi_get_pdos(con, TYPEC_SINK, 1, caps.pdo);
+ if (ret > 0) {
+ if (ret < PDO_MAX_OBJECTS)
+ caps.pdo[ret] = 0;
+
+ caps.role = TYPEC_SINK;
+
+ cap = usb_power_delivery_register_capabilities(con->partner_pd, &caps);
+ if (IS_ERR(cap))
+ return PTR_ERR(cap);
+
+ con->partner_sink_caps = cap;
+
+ ret = typec_partner_set_usb_power_delivery(con->partner, con->partner_pd);
+ if (ret) {
+ usb_power_delivery_unregister_capabilities(con->partner_sink_caps);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ucsi_unregister_partner_pdos(struct ucsi_connector *con)
+{
+ usb_power_delivery_unregister_capabilities(con->partner_sink_caps);
+ con->partner_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(con->partner_source_caps);
+ con->partner_source_caps = NULL;
+ usb_power_delivery_unregister(con->partner_pd);
+ con->partner_pd = NULL;
+}
+
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
@@ -643,6 +723,7 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
ucsi_partner_task(con, ucsi_get_src_pdos, 30, 0);
ucsi_partner_task(con, ucsi_check_altmodes, 30, 0);
+ ucsi_partner_task(con, ucsi_register_partner_pdos, 1, HZ);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
con->rdo = 0;
@@ -701,6 +782,7 @@ static void ucsi_unregister_partner(struct ucsi_connector *con)
if (!con->partner)
return;
+ ucsi_unregister_partner_pdos(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP);
typec_unregister_partner(con->partner);
con->partner = NULL;
@@ -805,6 +887,10 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
ucsi_register_partner(con);
ucsi_partner_task(con, ucsi_check_connection, 1, HZ);
+
+ if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
+ UCSI_CONSTAT_PWR_OPMODE_PD)
+ ucsi_partner_task(con, ucsi_register_partner_pdos, 1, HZ);
} else {
ucsi_unregister_partner(con);
}
@@ -1041,6 +1127,9 @@ static struct fwnode_handle *ucsi_find_fwnode(struct ucsi_connector *con)
static int ucsi_register_port(struct ucsi *ucsi, int index)
{
+ struct usb_power_delivery_desc desc = { ucsi->cap.pd_version};
+ struct usb_power_delivery_capabilities_desc pd_caps;
+ struct usb_power_delivery_capabilities *pd_cap;
struct ucsi_connector *con = &ucsi->connector[index];
struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory;
@@ -1120,6 +1209,41 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
goto out;
}
+ con->pd = usb_power_delivery_register(ucsi->dev, &desc);
+
+ ret = ucsi_get_pdos(con, TYPEC_SOURCE, 0, pd_caps.pdo);
+ if (ret > 0) {
+ if (ret < PDO_MAX_OBJECTS)
+ pd_caps.pdo[ret] = 0;
+
+ pd_caps.role = TYPEC_SOURCE;
+ pd_cap = usb_power_delivery_register_capabilities(con->pd, &pd_caps);
+ if (IS_ERR(pd_cap)) {
+ ret = PTR_ERR(pd_cap);
+ goto out;
+ }
+
+ con->port_source_caps = pd_cap;
+ typec_port_set_usb_power_delivery(con->port, con->pd);
+ }
+
+ memset(&pd_caps, 0, sizeof(pd_caps));
+ ret = ucsi_get_pdos(con, TYPEC_SINK, 0, pd_caps.pdo);
+ if (ret > 0) {
+ if (ret < PDO_MAX_OBJECTS)
+ pd_caps.pdo[ret] = 0;
+
+ pd_caps.role = TYPEC_SINK;
+ pd_cap = usb_power_delivery_register_capabilities(con->pd, &pd_caps);
+ if (IS_ERR(pd_cap)) {
+ ret = PTR_ERR(pd_cap);
+ goto out;
+ }
+
+ con->port_sink_caps = pd_cap;
+ typec_port_set_usb_power_delivery(con->port, con->pd);
+ }
+
/* Alternate modes */
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_CON);
if (ret) {
@@ -1158,8 +1282,8 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
typec_set_pwr_role(con->port,
!!(con->status.flags & UCSI_CONSTAT_PWR_DIR));
- ucsi_pwr_opmode_change(con);
ucsi_register_partner(con);
+ ucsi_pwr_opmode_change(con);
ucsi_port_psy_changed(con);
}
@@ -1265,10 +1389,20 @@ err_unregister:
ucsi_unregister_port_psy(con);
if (con->wq)
destroy_workqueue(con->wq);
+
+ usb_power_delivery_unregister_capabilities(con->port_sink_caps);
+ con->port_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(con->port_source_caps);
+ con->port_source_caps = NULL;
+ usb_power_delivery_unregister(con->pd);
+ con->pd = NULL;
typec_unregister_port(con->port);
con->port = NULL;
}
+ kfree(ucsi->connector);
+ ucsi->connector = NULL;
+
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
ucsi_reset_ppm(ucsi);
@@ -1300,7 +1434,8 @@ static void ucsi_resume_work(struct work_struct *work)
int ucsi_resume(struct ucsi *ucsi)
{
- queue_work(system_long_wq, &ucsi->resume_work);
+ if (ucsi->connector)
+ queue_work(system_long_wq, &ucsi->resume_work);
return 0;
}
EXPORT_SYMBOL_GPL(ucsi_resume);
@@ -1420,6 +1555,9 @@ void ucsi_unregister(struct ucsi *ucsi)
/* Disable notifications */
ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
+ if (!ucsi->connector)
+ return;
+
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
ucsi_unregister_partner(&ucsi->connector[i]);
@@ -1440,6 +1578,13 @@ void ucsi_unregister(struct ucsi *ucsi)
mutex_unlock(&ucsi->connector[i].lock);
destroy_workqueue(ucsi->connector[i].wq);
}
+
+ usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_sink_caps);
+ ucsi->connector[i].port_sink_caps = NULL;
+ usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_source_caps);
+ ucsi->connector[i].port_source_caps = NULL;
+ usb_power_delivery_unregister(ucsi->connector[i].pd);
+ ucsi->connector[i].pd = NULL;
typec_unregister_port(ucsi->connector[i].port);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 60ce9fb6e745..c09af859f573 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -340,6 +340,14 @@ struct ucsi_connector {
u32 src_pdos[PDO_MAX_OBJECTS];
int num_pdos;
+ /* USB PD objects */
+ struct usb_power_delivery *pd;
+ struct usb_power_delivery_capabilities *port_source_caps;
+ struct usb_power_delivery_capabilities *port_sink_caps;
+ struct usb_power_delivery *partner_pd;
+ struct usb_power_delivery_capabilities *partner_source_caps;
+ struct usb_power_delivery_capabilities *partner_sink_caps;
+
struct usb_role_switch *usb_role_sw;
};
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 46441f1477f2..e0ed465bd518 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -643,7 +643,7 @@ static int ccg_request_irq(struct ucsi_ccg *uc)
{
unsigned long flags = IRQF_ONESHOT;
- if (!has_acpi_companion(uc->dev))
+ if (!dev_fwnode(uc->dev))
flags |= IRQF_TRIGGER_HIGH;
return request_threaded_irq(uc->irq, NULL, ccg_irq_handler, flags, dev_name(uc->dev), uc);
@@ -1342,6 +1342,7 @@ static int ucsi_ccg_probe(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct ucsi_ccg *uc;
+ const char *fw_name;
int status;
uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
@@ -1357,9 +1358,15 @@ static int ucsi_ccg_probe(struct i2c_client *client)
INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
/* Only fail FW flashing when FW build information is not provided */
- status = device_property_read_u16(dev, "ccgx,firmware-build",
- &uc->fw_build);
- if (status)
+ status = device_property_read_string(dev, "firmware-name", &fw_name);
+ if (!status) {
+ if (!strcmp(fw_name, "nvidia,jetson-agx-xavier"))
+ uc->fw_build = CCG_FW_BUILD_NVIDIA_TEGRA;
+ else if (!strcmp(fw_name, "nvidia,gpu"))
+ uc->fw_build = CCG_FW_BUILD_NVIDIA;
+ }
+
+ if (!uc->fw_build)
dev_err(uc->dev, "failed to get FW build information\n");
/* reset ccg device and initialize ucsi */
@@ -1426,6 +1433,12 @@ static void ucsi_ccg_remove(struct i2c_client *client)
free_irq(uc->irq, uc);
}
+static const struct of_device_id ucsi_ccg_of_match_table[] = {
+ { .compatible = "cypress,cypd4226", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ucsi_ccg_of_match_table);
+
static const struct i2c_device_id ucsi_ccg_device_id[] = {
{"ccgx-ucsi", 0},
{}
@@ -1480,6 +1493,7 @@ static struct i2c_driver ucsi_ccg_driver = {
.pm = &ucsi_ccg_pm,
.dev_groups = ucsi_ccg_groups,
.acpi_match_table = amd_i2c_ucsi_match,
+ .of_match_table = ucsi_ccg_of_match_table,
},
.probe_new = ucsi_ccg_probe,
.remove = ucsi_ccg_remove,
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index f9c0044c6442..44b29289aa19 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -849,7 +849,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = ifcvf_init_hw(vf, pdev);
if (ret) {
IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
- return ret;
+ goto err;
}
for (i = 0; i < vf->nr_vring; i++)
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index e682bc7ee6c9..5e4a77b9bae6 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -512,7 +512,7 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
{
struct vduse_iova_domain *domain = file->private_data;
- vma->vm_flags |= VM_DONTDUMP | VM_DONTEXPAND;
+ vm_flags_set(vma, VM_DONTDUMP | VM_DONTEXPAND);
vma->vm_private_data = domain;
vma->vm_ops = &vduse_domain_mmap_ops;
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index 4e0e50e7ac15..173e979b84a9 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -14,7 +14,6 @@
#include <linux/iova.h>
#include <linux/dma-mapping.h>
#include <linux/vhost_iotlb.h>
-#include <linux/rwlock.h>
#define IOVA_START_PFN 1
diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig
index a8f544629467..89e06c981e43 100644
--- a/drivers/vfio/Kconfig
+++ b/drivers/vfio/Kconfig
@@ -32,6 +32,7 @@ config VFIO_IOMMU_SPAPR_TCE
tristate
depends on SPAPR_TCE_IOMMU
default VFIO
+endif
config VFIO_NOIOMMU
bool "VFIO No-IOMMU support"
@@ -46,7 +47,6 @@ config VFIO_NOIOMMU
this mode since there is no IOMMU to provide DMA translation.
If you don't know what to do here, say N.
-endif
config VFIO_VIRQFD
bool
diff --git a/drivers/vfio/container.c b/drivers/vfio/container.c
index b7a9560ab25e..89f10becf962 100644
--- a/drivers/vfio/container.c
+++ b/drivers/vfio/container.c
@@ -29,13 +29,6 @@ static struct vfio {
struct mutex iommu_drivers_lock;
} vfio;
-#ifdef CONFIG_VFIO_NOIOMMU
-bool vfio_noiommu __read_mostly;
-module_param_named(enable_unsafe_noiommu_mode,
- vfio_noiommu, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
-#endif
-
static void *vfio_noiommu_open(unsigned long arg)
{
if (arg != VFIO_NOIOMMU_IOMMU)
diff --git a/drivers/vfio/group.c b/drivers/vfio/group.c
index bb24b2f0271e..e166ad7ce6e7 100644
--- a/drivers/vfio/group.c
+++ b/drivers/vfio/group.c
@@ -133,9 +133,12 @@ static int vfio_group_ioctl_set_container(struct vfio_group *group,
iommufd = iommufd_ctx_from_file(f.file);
if (!IS_ERR(iommufd)) {
- u32 ioas_id;
+ if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
+ group->type == VFIO_NO_IOMMU)
+ ret = iommufd_vfio_compat_set_no_iommu(iommufd);
+ else
+ ret = iommufd_vfio_compat_ioas_create(iommufd);
- ret = iommufd_vfio_compat_ioas_id(iommufd, &ioas_id);
if (ret) {
iommufd_ctx_put(group->iommufd);
goto out_unlock;
diff --git a/drivers/vfio/iommufd.c b/drivers/vfio/iommufd.c
index 4f82a6fa7c6c..db4efbd56042 100644
--- a/drivers/vfio/iommufd.c
+++ b/drivers/vfio/iommufd.c
@@ -18,6 +18,20 @@ int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
lockdep_assert_held(&vdev->dev_set->lock);
+ if (vfio_device_is_noiommu(vdev)) {
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ /*
+ * Require no compat ioas to be assigned to proceed. The basic
+ * statement is that the user cannot have done something that
+ * implies they expected translation to exist
+ */
+ if (!iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id))
+ return -EPERM;
+ return 0;
+ }
+
/*
* If the driver doesn't provide this op then it means the device does
* not do DMA at all. So nothing to do.
@@ -29,7 +43,7 @@ int vfio_iommufd_bind(struct vfio_device *vdev, struct iommufd_ctx *ictx)
if (ret)
return ret;
- ret = iommufd_vfio_compat_ioas_id(ictx, &ioas_id);
+ ret = iommufd_vfio_compat_ioas_get_id(ictx, &ioas_id);
if (ret)
goto err_unbind;
ret = vdev->ops->attach_ioas(vdev, &ioas_id);
@@ -52,6 +66,9 @@ void vfio_iommufd_unbind(struct vfio_device *vdev)
{
lockdep_assert_held(&vdev->dev_set->lock);
+ if (vfio_device_is_noiommu(vdev))
+ return;
+
if (vdev->ops->unbind_iommufd)
vdev->ops->unbind_iommufd(vdev);
}
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 26a541cc64d1..c49f8f2b2865 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1799,7 +1799,7 @@ int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma
* See remap_pfn_range(), called from vfio_pci_fault() but we can't
* change vm_flags within the fault handler. Set them now.
*/
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &vfio_pci_mmap_ops;
return 0;
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
index f8219a438bfb..9e94abcf8ee1 100644
--- a/drivers/vfio/vfio.h
+++ b/drivers/vfio/vfio.h
@@ -10,10 +10,10 @@
#include <linux/device.h>
#include <linux/cdev.h>
#include <linux/module.h>
+#include <linux/vfio.h>
struct iommufd_ctx;
struct iommu_group;
-struct vfio_device;
struct vfio_container;
void vfio_device_put_registration(struct vfio_device *device);
@@ -88,6 +88,12 @@ bool vfio_device_has_container(struct vfio_device *device);
int __init vfio_group_init(void);
void vfio_group_cleanup(void);
+static inline bool vfio_device_is_noiommu(struct vfio_device *vdev)
+{
+ return IS_ENABLED(CONFIG_VFIO_NOIOMMU) &&
+ vdev->group->type == VFIO_NO_IOMMU;
+}
+
#if IS_ENABLED(CONFIG_VFIO_CONTAINER)
/* events for the backend driver notify callback */
enum vfio_iommu_notify_type {
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2209372f236d..a44ac3fe657c 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -37,7 +37,6 @@
#include <linux/vfio.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
-#include <linux/irqdomain.h>
#include "vfio.h"
#define DRIVER_VERSION "0.2"
@@ -1480,7 +1479,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
list_for_each_entry(d, &iommu->domain_list, next) {
ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
- npage << PAGE_SHIFT, prot | IOMMU_CACHE);
+ npage << PAGE_SHIFT, prot | IOMMU_CACHE,
+ GFP_KERNEL);
if (ret)
goto unwind;
@@ -1777,8 +1777,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
size = npage << PAGE_SHIFT;
}
- ret = iommu_map(domain->domain, iova, phys,
- size, dma->prot | IOMMU_CACHE);
+ ret = iommu_map(domain->domain, iova, phys, size,
+ dma->prot | IOMMU_CACHE, GFP_KERNEL);
if (ret) {
if (!dma->iommu_mapped) {
vfio_unpin_pages_remote(dma, iova,
@@ -1873,7 +1873,7 @@ static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *
continue;
ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2,
- IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE);
+ IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE, GFP_KERNEL);
if (!ret) {
size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE);
@@ -2169,12 +2169,6 @@ static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
list_splice_tail(iova_copy, iova);
}
-/* Redundantly walks non-present capabilities to simplify caller */
-static int vfio_iommu_device_capable(struct device *dev, void *data)
-{
- return device_iommu_capable(dev, (enum iommu_cap)data);
-}
-
static int vfio_iommu_domain_alloc(struct device *dev, void *data)
{
struct iommu_domain **domain = data;
@@ -2189,7 +2183,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
struct vfio_domain *domain, *d;
- bool resv_msi, msi_remap;
+ bool resv_msi;
phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry *geo;
LIST_HEAD(iova_copy);
@@ -2287,11 +2281,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
INIT_LIST_HEAD(&domain->group_list);
list_add(&group->next, &domain->group_list);
- msi_remap = irq_domain_check_msi_remap() ||
- iommu_group_for_each_dev(iommu_group, (void *)IOMMU_CAP_INTR_REMAP,
- vfio_iommu_device_capable);
-
- if (!allow_unsafe_interrupts && !msi_remap) {
+ if (!allow_unsafe_interrupts &&
+ !iommu_group_has_isolated_msi(iommu_group)) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
__func__);
ret = -EPERM;
diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c
index 5177bb061b17..90541fc94988 100644
--- a/drivers/vfio/vfio_main.c
+++ b/drivers/vfio/vfio_main.c
@@ -45,6 +45,13 @@ static struct vfio {
struct ida device_ida;
} vfio;
+#ifdef CONFIG_VFIO_NOIOMMU
+bool vfio_noiommu __read_mostly;
+module_param_named(enable_unsafe_noiommu_mode,
+ vfio_noiommu, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
+#endif
+
static DEFINE_XARRAY(vfio_device_set_xa);
int vfio_assign_device_set(struct vfio_device *device, void *set_id)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9af19b0cf3b7..4c538b30fd76 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1511,6 +1511,9 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
nvq = &n->vqs[index];
mutex_lock(&vq->mutex);
+ if (fd == -1)
+ vhost_clear_msg(&n->dev);
+
/* Verify that ring has been setup correctly. */
if (!vhost_vq_access_ok(vq)) {
r = -EFAULT;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index dca6346d75b3..d5ecb8876fc9 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -80,7 +80,7 @@ struct vhost_scsi_cmd {
struct scatterlist *tvc_prot_sgl;
struct page **tvc_upages;
/* Pointer to response header iovec */
- struct iovec tvc_resp_iov;
+ struct iovec *tvc_resp_iov;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi *tvc_vhost;
/* Pointer to vhost_virtqueue for the cmd */
@@ -563,7 +563,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
memcpy(v_rsp.sense, cmd->tvc_sense_buf,
se_cmd->scsi_sense_length);
- iov_iter_init(&iov_iter, ITER_DEST, &cmd->tvc_resp_iov,
+ iov_iter_init(&iov_iter, ITER_DEST, cmd->tvc_resp_iov,
cmd->tvc_in_iovs, sizeof(v_rsp));
ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
if (likely(ret == sizeof(v_rsp))) {
@@ -594,6 +594,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
struct vhost_scsi_cmd *cmd;
struct vhost_scsi_nexus *tv_nexus;
struct scatterlist *sg, *prot_sg;
+ struct iovec *tvc_resp_iov;
struct page **pages;
int tag;
@@ -613,6 +614,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
sg = cmd->tvc_sgl;
prot_sg = cmd->tvc_prot_sgl;
pages = cmd->tvc_upages;
+ tvc_resp_iov = cmd->tvc_resp_iov;
memset(cmd, 0, sizeof(*cmd));
cmd->tvc_sgl = sg;
cmd->tvc_prot_sgl = prot_sg;
@@ -625,6 +627,7 @@ vhost_scsi_get_cmd(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
cmd->tvc_data_direction = data_direction;
cmd->tvc_nexus = tv_nexus;
cmd->inflight = vhost_scsi_get_inflight(vq);
+ cmd->tvc_resp_iov = tvc_resp_iov;
memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
@@ -935,7 +938,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct iov_iter in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
- int ret, prot_bytes, c = 0;
+ int ret, prot_bytes, i, c = 0;
u16 lun;
u8 task_attr;
bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
@@ -1092,7 +1095,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
}
cmd->tvc_vhost = vs;
cmd->tvc_vq = vq;
- cmd->tvc_resp_iov = vq->iov[vc.out];
+ for (i = 0; i < vc.in ; i++)
+ cmd->tvc_resp_iov[i] = vq->iov[vc.out + i];
cmd->tvc_in_iovs = vc.in;
pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
@@ -1461,6 +1465,7 @@ static void vhost_scsi_destroy_vq_cmds(struct vhost_virtqueue *vq)
kfree(tv_cmd->tvc_sgl);
kfree(tv_cmd->tvc_prot_sgl);
kfree(tv_cmd->tvc_upages);
+ kfree(tv_cmd->tvc_resp_iov);
}
sbitmap_free(&svq->scsi_tags);
@@ -1508,6 +1513,14 @@ static int vhost_scsi_setup_vq_cmds(struct vhost_virtqueue *vq, int max_cmds)
goto out;
}
+ tv_cmd->tvc_resp_iov = kcalloc(UIO_MAXIOV,
+ sizeof(struct iovec),
+ GFP_KERNEL);
+ if (!tv_cmd->tvc_resp_iov) {
+ pr_err("Unable to allocate tv_cmd->tvc_resp_iov\n");
+ goto out;
+ }
+
tv_cmd->tvc_prot_sgl = kcalloc(VHOST_SCSI_PREALLOC_PROT_SGLS,
sizeof(struct scatterlist),
GFP_KERNEL);
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index ec32f785dfde..315f9ba47ff2 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -792,7 +792,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
r = ops->set_map(vdpa, asid, iotlb);
} else {
r = iommu_map(v->domain, iova, pa, size,
- perm_to_iommu_flags(perm));
+ perm_to_iommu_flags(perm), GFP_KERNEL);
}
if (r) {
vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
@@ -1315,7 +1315,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
if (vma->vm_end - vma->vm_start != notify.size)
return -ENOTSUPP;
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &vhost_vdpa_vm_ops;
return 0;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index cbe72bfd2f1f..43c9770b86e5 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -661,7 +661,7 @@ void vhost_dev_stop(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
-static void vhost_clear_msg(struct vhost_dev *dev)
+void vhost_clear_msg(struct vhost_dev *dev)
{
struct vhost_msg_node *node, *n;
@@ -679,6 +679,7 @@ static void vhost_clear_msg(struct vhost_dev *dev)
spin_unlock(&dev->iotlb_lock);
}
+EXPORT_SYMBOL_GPL(vhost_clear_msg);
void vhost_dev_cleanup(struct vhost_dev *dev)
{
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index d9109107af08..790b296271f1 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -181,6 +181,7 @@ long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
bool vhost_log_access_ok(struct vhost_dev *);
+void vhost_clear_msg(struct vhost_dev *dev);
int vhost_get_vq_desc(struct vhost_virtqueue *,
struct iovec iov[], unsigned int iov_count,
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index 33eb941fcf15..a1e27da54481 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -1126,9 +1126,8 @@ static int iotlb_translate(const struct vringh *vrh,
size = map->size - addr + map->start;
pa = map->addr + addr - map->start;
pfn = pa >> PAGE_SHIFT;
- iov[ret].bv_page = pfn_to_page(pfn);
- iov[ret].bv_len = min(len - s, size);
- iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
+ bvec_set_page(&iov[ret], pfn_to_page(pfn), min(len - s, size),
+ pa & (PAGE_SIZE - 1));
s += size;
addr += size;
++ret;
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a2b374372363..1f3b89c885cc 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -51,8 +51,7 @@ struct vhost_vsock {
struct hlist_node hash;
struct vhost_work send_pkt_work;
- spinlock_t send_pkt_list_lock;
- struct list_head send_pkt_list; /* host->guest pending packets */
+ struct sk_buff_head send_pkt_queue; /* host->guest pending packets */
atomic_t queued_replies;
@@ -108,40 +107,31 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
vhost_disable_notify(&vsock->dev, vq);
do {
- struct virtio_vsock_pkt *pkt;
+ struct virtio_vsock_hdr *hdr;
+ size_t iov_len, payload_len;
struct iov_iter iov_iter;
+ u32 flags_to_restore = 0;
+ struct sk_buff *skb;
unsigned out, in;
size_t nbytes;
- size_t iov_len, payload_len;
int head;
- u32 flags_to_restore = 0;
- spin_lock_bh(&vsock->send_pkt_list_lock);
- if (list_empty(&vsock->send_pkt_list)) {
- spin_unlock_bh(&vsock->send_pkt_list_lock);
+ skb = virtio_vsock_skb_dequeue(&vsock->send_pkt_queue);
+
+ if (!skb) {
vhost_enable_notify(&vsock->dev, vq);
break;
}
- pkt = list_first_entry(&vsock->send_pkt_list,
- struct virtio_vsock_pkt, list);
- list_del_init(&pkt->list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
-
head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
&out, &in, NULL, NULL);
if (head < 0) {
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_add(&pkt->list, &vsock->send_pkt_list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
break;
}
if (head == vq->num) {
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_add(&pkt->list, &vsock->send_pkt_list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
-
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
/* We cannot finish yet if more buffers snuck in while
* re-enabling notify.
*/
@@ -153,26 +143,27 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
}
if (out) {
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
vq_err(vq, "Expected 0 output buffers, got %u\n", out);
break;
}
iov_len = iov_length(&vq->iov[out], in);
- if (iov_len < sizeof(pkt->hdr)) {
- virtio_transport_free_pkt(pkt);
+ if (iov_len < sizeof(*hdr)) {
+ kfree_skb(skb);
vq_err(vq, "Buffer len [%zu] too small\n", iov_len);
break;
}
iov_iter_init(&iov_iter, ITER_DEST, &vq->iov[out], in, iov_len);
- payload_len = pkt->len - pkt->off;
+ payload_len = skb->len;
+ hdr = virtio_vsock_hdr(skb);
/* If the packet is greater than the space available in the
* buffer, we split it using multiple buffers.
*/
- if (payload_len > iov_len - sizeof(pkt->hdr)) {
- payload_len = iov_len - sizeof(pkt->hdr);
+ if (payload_len > iov_len - sizeof(*hdr)) {
+ payload_len = iov_len - sizeof(*hdr);
/* As we are copying pieces of large packet's buffer to
* small rx buffers, headers of packets in rx queue are
@@ -185,31 +176,30 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
* bits set. After initialized header will be copied to
* rx buffer, these required bits will be restored.
*/
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOM) {
- pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) {
+ hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOM);
flags_to_restore |= VIRTIO_VSOCK_SEQ_EOM;
- if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SEQ_EOR) {
- pkt->hdr.flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
+ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOR) {
+ hdr->flags &= ~cpu_to_le32(VIRTIO_VSOCK_SEQ_EOR);
flags_to_restore |= VIRTIO_VSOCK_SEQ_EOR;
}
}
}
/* Set the correct length in the header */
- pkt->hdr.len = cpu_to_le32(payload_len);
+ hdr->len = cpu_to_le32(payload_len);
- nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
- if (nbytes != sizeof(pkt->hdr)) {
- virtio_transport_free_pkt(pkt);
+ nbytes = copy_to_iter(hdr, sizeof(*hdr), &iov_iter);
+ if (nbytes != sizeof(*hdr)) {
+ kfree_skb(skb);
vq_err(vq, "Faulted on copying pkt hdr\n");
break;
}
- nbytes = copy_to_iter(pkt->buf + pkt->off, payload_len,
- &iov_iter);
+ nbytes = copy_to_iter(skb->data, payload_len, &iov_iter);
if (nbytes != payload_len) {
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
vq_err(vq, "Faulted on copying pkt buf\n");
break;
}
@@ -217,31 +207,28 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
/* Deliver to monitoring devices all packets that we
* will transmit.
*/
- virtio_transport_deliver_tap_pkt(pkt);
+ virtio_transport_deliver_tap_pkt(skb);
- vhost_add_used(vq, head, sizeof(pkt->hdr) + payload_len);
+ vhost_add_used(vq, head, sizeof(*hdr) + payload_len);
added = true;
- pkt->off += payload_len;
+ skb_pull(skb, payload_len);
total_len += payload_len;
/* If we didn't send all the payload we can requeue the packet
* to send it with the next available buffer.
*/
- if (pkt->off < pkt->len) {
- pkt->hdr.flags |= cpu_to_le32(flags_to_restore);
+ if (skb->len > 0) {
+ hdr->flags |= cpu_to_le32(flags_to_restore);
- /* We are queueing the same virtio_vsock_pkt to handle
+ /* We are queueing the same skb to handle
* the remaining bytes, and we want to deliver it
* to monitoring devices in the next iteration.
*/
- pkt->tap_delivered = false;
-
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_add(&pkt->list, &vsock->send_pkt_list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
+ virtio_vsock_skb_clear_tap_delivered(skb);
+ virtio_vsock_skb_queue_head(&vsock->send_pkt_queue, skb);
} else {
- if (pkt->reply) {
+ if (virtio_vsock_skb_reply(skb)) {
int val;
val = atomic_dec_return(&vsock->queued_replies);
@@ -253,7 +240,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
restart_tx = true;
}
- virtio_transport_free_pkt(pkt);
+ consume_skb(skb);
}
} while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
if (added)
@@ -278,28 +265,26 @@ static void vhost_transport_send_pkt_work(struct vhost_work *work)
}
static int
-vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
+vhost_transport_send_pkt(struct sk_buff *skb)
{
+ struct virtio_vsock_hdr *hdr = virtio_vsock_hdr(skb);
struct vhost_vsock *vsock;
- int len = pkt->len;
+ int len = skb->len;
rcu_read_lock();
/* Find the vhost_vsock according to guest context id */
- vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
+ vsock = vhost_vsock_get(le64_to_cpu(hdr->dst_cid));
if (!vsock) {
rcu_read_unlock();
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
return -ENODEV;
}
- if (pkt->reply)
+ if (virtio_vsock_skb_reply(skb))
atomic_inc(&vsock->queued_replies);
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_add_tail(&pkt->list, &vsock->send_pkt_list);
- spin_unlock_bh(&vsock->send_pkt_list_lock);
-
+ virtio_vsock_skb_queue_tail(&vsock->send_pkt_queue, skb);
vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
rcu_read_unlock();
@@ -310,10 +295,8 @@ static int
vhost_transport_cancel_pkt(struct vsock_sock *vsk)
{
struct vhost_vsock *vsock;
- struct virtio_vsock_pkt *pkt, *n;
int cnt = 0;
int ret = -ENODEV;
- LIST_HEAD(freeme);
rcu_read_lock();
@@ -322,20 +305,7 @@ vhost_transport_cancel_pkt(struct vsock_sock *vsk)
if (!vsock)
goto out;
- spin_lock_bh(&vsock->send_pkt_list_lock);
- list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
- if (pkt->vsk != vsk)
- continue;
- list_move(&pkt->list, &freeme);
- }
- spin_unlock_bh(&vsock->send_pkt_list_lock);
-
- list_for_each_entry_safe(pkt, n, &freeme, list) {
- if (pkt->reply)
- cnt++;
- list_del(&pkt->list);
- virtio_transport_free_pkt(pkt);
- }
+ cnt = virtio_transport_purge_skbs(vsk, &vsock->send_pkt_queue);
if (cnt) {
struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
@@ -352,12 +322,14 @@ out:
return ret;
}
-static struct virtio_vsock_pkt *
-vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
+static struct sk_buff *
+vhost_vsock_alloc_skb(struct vhost_virtqueue *vq,
unsigned int out, unsigned int in)
{
- struct virtio_vsock_pkt *pkt;
+ struct virtio_vsock_hdr *hdr;
struct iov_iter iov_iter;
+ struct sk_buff *skb;
+ size_t payload_len;
size_t nbytes;
size_t len;
@@ -366,50 +338,48 @@ vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
return NULL;
}
- pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
- if (!pkt)
+ len = iov_length(vq->iov, out);
+
+ /* len contains both payload and hdr */
+ skb = virtio_vsock_alloc_skb(len, GFP_KERNEL);
+ if (!skb)
return NULL;
- len = iov_length(vq->iov, out);
iov_iter_init(&iov_iter, ITER_SOURCE, vq->iov, out, len);
- nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
- if (nbytes != sizeof(pkt->hdr)) {
+ hdr = virtio_vsock_hdr(skb);
+ nbytes = copy_from_iter(hdr, sizeof(*hdr), &iov_iter);
+ if (nbytes != sizeof(*hdr)) {
vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
- sizeof(pkt->hdr), nbytes);
- kfree(pkt);
+ sizeof(*hdr), nbytes);
+ kfree_skb(skb);
return NULL;
}
- pkt->len = le32_to_cpu(pkt->hdr.len);
+ payload_len = le32_to_cpu(hdr->len);
/* No payload */
- if (!pkt->len)
- return pkt;
+ if (!payload_len)
+ return skb;
- /* The pkt is too big */
- if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
- kfree(pkt);
+ /* The pkt is too big or the length in the header is invalid */
+ if (payload_len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE ||
+ payload_len + sizeof(*hdr) > len) {
+ kfree_skb(skb);
return NULL;
}
- pkt->buf = kvmalloc(pkt->len, GFP_KERNEL);
- if (!pkt->buf) {
- kfree(pkt);
- return NULL;
- }
+ virtio_vsock_skb_rx_put(skb);
- pkt->buf_len = pkt->len;
-
- nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
- if (nbytes != pkt->len) {
- vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
- pkt->len, nbytes);
- virtio_transport_free_pkt(pkt);
+ nbytes = copy_from_iter(skb->data, payload_len, &iov_iter);
+ if (nbytes != payload_len) {
+ vq_err(vq, "Expected %zu byte payload, got %zu bytes\n",
+ payload_len, nbytes);
+ kfree_skb(skb);
return NULL;
}
- return pkt;
+ return skb;
}
/* Is there space left for replies to rx packets? */
@@ -496,9 +466,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
poll.work);
struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
dev);
- struct virtio_vsock_pkt *pkt;
int head, pkts = 0, total_len = 0;
unsigned int out, in;
+ struct sk_buff *skb;
bool added = false;
mutex_lock(&vq->mutex);
@@ -511,6 +481,8 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
vhost_disable_notify(&vsock->dev, vq);
do {
+ struct virtio_vsock_hdr *hdr;
+
if (!vhost_vsock_more_replies(vsock)) {
/* Stop tx until the device processes already
* pending replies. Leave tx virtqueue
@@ -532,24 +504,26 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
break;
}
- pkt = vhost_vsock_alloc_pkt(vq, out, in);
- if (!pkt) {
+ skb = vhost_vsock_alloc_skb(vq, out, in);
+ if (!skb) {
vq_err(vq, "Faulted on pkt\n");
continue;
}
- total_len += sizeof(pkt->hdr) + pkt->len;
+ total_len += sizeof(*hdr) + skb->len;
/* Deliver to monitoring devices all received packets */
- virtio_transport_deliver_tap_pkt(pkt);
+ virtio_transport_deliver_tap_pkt(skb);
+
+ hdr = virtio_vsock_hdr(skb);
/* Only accept correctly addressed packets */
- if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid &&
- le64_to_cpu(pkt->hdr.dst_cid) ==
+ if (le64_to_cpu(hdr->src_cid) == vsock->guest_cid &&
+ le64_to_cpu(hdr->dst_cid) ==
vhost_transport_get_local_cid())
- virtio_transport_recv_pkt(&vhost_transport, pkt);
+ virtio_transport_recv_pkt(&vhost_transport, skb);
else
- virtio_transport_free_pkt(pkt);
+ kfree_skb(skb);
vhost_add_used(vq, head, 0);
added = true;
@@ -693,8 +667,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
VHOST_VSOCK_WEIGHT, true, NULL);
file->private_data = vsock;
- spin_lock_init(&vsock->send_pkt_list_lock);
- INIT_LIST_HEAD(&vsock->send_pkt_list);
+ skb_queue_head_init(&vsock->send_pkt_queue);
vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
return 0;
@@ -760,16 +733,7 @@ static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
vhost_vsock_flush(vsock);
vhost_dev_stop(&vsock->dev);
- spin_lock_bh(&vsock->send_pkt_list_lock);
- while (!list_empty(&vsock->send_pkt_list)) {
- struct virtio_vsock_pkt *pkt;
-
- pkt = list_first_entry(&vsock->send_pkt_list,
- struct virtio_vsock_pkt, list);
- list_del_init(&pkt->list);
- virtio_transport_free_pkt(pkt);
- }
- spin_unlock_bh(&vsock->send_pkt_list_lock);
+ virtio_vsock_skb_queue_purge(&vsock->send_pkt_queue);
vhost_dev_cleanup(&vsock->dev);
kfree(vsock->dev.vqs);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 936ba1e4d35e..4c33e971c0f0 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -90,13 +90,6 @@ config LCD_PLATFORM
This driver provides a platform-device registered LCD power
control interface.
-config LCD_TOSA
- tristate "Sharp SL-6000 LCD Driver"
- depends on I2C && SPI && MACH_TOSA
- help
- If you have an Sharp SL-6000 Zaurus say Y to enable a driver
- for its LCD.
-
config LCD_HP700
tristate "HP Jornada 700 series LCD Driver"
depends on SA1100_JORNADA720_SSP && !PREEMPTION
@@ -190,6 +183,14 @@ config BACKLIGHT_KTD253
which is a 1-wire GPIO-controlled backlight found in some mobile
phones.
+config BACKLIGHT_KTZ8866
+ tristate "Backlight Driver for Kinetic KTZ8866"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y to enable the backlight driver for the Kinetic KTZ8866
+ found in Xiaomi Mi Pad 5 series.
+
config BACKLIGHT_LM3533
tristate "Backlight Driver for LM3533"
depends on MFD_LM3533
@@ -288,13 +289,6 @@ config BACKLIGHT_APPLE
If you have an Intel-based Apple say Y to enable a driver for its
backlight.
-config BACKLIGHT_TOSA
- tristate "Sharp SL-6000 Backlight Driver"
- depends on I2C && MACH_TOSA && LCD_TOSA
- help
- If you have an Sharp SL-6000 Zaurus say Y to enable a driver
- for its backlight
-
config BACKLIGHT_QCOM_WLED
tristate "Qualcomm PMIC WLED Driver"
select REGMAP
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index e815f3f1deff..f72e1c3c59e9 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
obj-$(CONFIG_LCD_OTM3225A) += otm3225a.o
obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
-obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
@@ -36,6 +35,7 @@ obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
obj-$(CONFIG_BACKLIGHT_IPAQ_MICRO) += ipaq_micro_bl.o
obj-$(CONFIG_BACKLIGHT_KTD253) += ktd253-backlight.o
+obj-$(CONFIG_BACKLIGHT_KTZ8866) += ktz8866.o
obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
obj-$(CONFIG_BACKLIGHT_LM3630A) += lm3630a_bl.o
obj-$(CONFIG_BACKLIGHT_LM3639) += lm3639_bl.o
@@ -53,7 +53,6 @@ obj-$(CONFIG_BACKLIGHT_QCOM_WLED) += qcom-wled.o
obj-$(CONFIG_BACKLIGHT_RT4831) += rt4831-backlight.o
obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
obj-$(CONFIG_BACKLIGHT_SKY81452) += sky81452-backlight.o
-obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
obj-$(CONFIG_BACKLIGHT_TPS65217) += tps65217_bl.o
obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
obj-$(CONFIG_BACKLIGHT_ARCXCNN) += arcxcnn_bl.o
diff --git a/drivers/video/backlight/aat2870_bl.c b/drivers/video/backlight/aat2870_bl.c
index a7af9adafad6..1cbb303e9c88 100644
--- a/drivers/video/backlight/aat2870_bl.c
+++ b/drivers/video/backlight/aat2870_bl.c
@@ -59,7 +59,7 @@ static int aat2870_bl_update_status(struct backlight_device *bd)
struct aat2870_bl_driver_data *aat2870_bl = bl_get_data(bd);
struct aat2870_data *aat2870 =
dev_get_drvdata(aat2870_bl->pdev->dev.parent);
- int brightness = bd->props.brightness;
+ int brightness = backlight_get_brightness(bd);
int ret;
if ((brightness < 0) || (bd->props.max_brightness < brightness)) {
@@ -70,11 +70,6 @@ static int aat2870_bl_update_status(struct backlight_device *bd)
dev_dbg(&bd->dev, "brightness=%d, power=%d, state=%d\n",
bd->props.brightness, bd->props.power, bd->props.state);
- if ((bd->props.power != FB_BLANK_UNBLANK) ||
- (bd->props.state & BL_CORE_FBBLANK) ||
- (bd->props.state & BL_CORE_SUSPENDED))
- brightness = 0;
-
ret = aat2870->write(aat2870, AAT2870_BLM,
(u8)aat2870_brightness(aat2870_bl, brightness));
if (ret < 0)
diff --git a/drivers/video/backlight/arcxcnn_bl.c b/drivers/video/backlight/arcxcnn_bl.c
index 555b036643fb..e610d7a1d13d 100644
--- a/drivers/video/backlight/arcxcnn_bl.c
+++ b/drivers/video/backlight/arcxcnn_bl.c
@@ -130,12 +130,9 @@ static int arcxcnn_set_brightness(struct arcxcnn *lp, u32 brightness)
static int arcxcnn_bl_update_status(struct backlight_device *bl)
{
struct arcxcnn *lp = bl_get_data(bl);
- u32 brightness = bl->props.brightness;
+ u32 brightness = backlight_get_brightness(bl);
int ret;
- if (bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
- brightness = 0;
-
ret = arcxcnn_set_brightness(lp, brightness);
if (ret)
return ret;
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index b788ff3d0f45..6eea72aa8dbf 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -501,7 +501,7 @@ EXPORT_SYMBOL(backlight_device_get_by_type);
*
* This function looks up a backlight device by its name. It obtains a reference
* on the backlight device and it is the caller's responsibility to drop the
- * reference by calling backlight_put().
+ * reference by calling put_device().
*
* Returns:
* A pointer to the backlight device if found, otherwise NULL.
diff --git a/drivers/video/backlight/ipaq_micro_bl.c b/drivers/video/backlight/ipaq_micro_bl.c
index 85b16cc82878..f595b8c8cbb2 100644
--- a/drivers/video/backlight/ipaq_micro_bl.c
+++ b/drivers/video/backlight/ipaq_micro_bl.c
@@ -16,17 +16,12 @@
static int micro_bl_update_status(struct backlight_device *bd)
{
struct ipaq_micro *micro = dev_get_drvdata(&bd->dev);
- int intensity = bd->props.brightness;
+ int intensity = backlight_get_brightness(bd);
struct ipaq_micro_msg msg = {
.id = MSG_BACKLIGHT,
.tx_len = 3,
};
- if (bd->props.power != FB_BLANK_UNBLANK)
- intensity = 0;
- if (bd->props.state & (BL_CORE_FBBLANK | BL_CORE_SUSPENDED))
- intensity = 0;
-
/*
* Message format:
* Byte 0: backlight instance (usually 1)
diff --git a/drivers/video/backlight/ktd253-backlight.c b/drivers/video/backlight/ktd253-backlight.c
index 37aa5a669530..d7d43454f64a 100644
--- a/drivers/video/backlight/ktd253-backlight.c
+++ b/drivers/video/backlight/ktd253-backlight.c
@@ -173,12 +173,9 @@ static int ktd253_backlight_probe(struct platform_device *pdev)
}
ktd253->gpiod = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(ktd253->gpiod)) {
- ret = PTR_ERR(ktd253->gpiod);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "gpio line missing or invalid.\n");
- return ret;
- }
+ if (IS_ERR(ktd253->gpiod))
+ return dev_err_probe(dev, PTR_ERR(ktd253->gpiod),
+ "gpio line missing or invalid.\n");
gpiod_set_consumer_name(ktd253->gpiod, dev_name(dev));
/* Bring backlight to a known off state */
msleep(KTD253_T_OFF_MS);
diff --git a/drivers/video/backlight/ktz8866.c b/drivers/video/backlight/ktz8866.c
new file mode 100644
index 000000000000..d38c13ad39c7
--- /dev/null
+++ b/drivers/video/backlight/ktz8866.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Backlight driver for the Kinetic KTZ8866
+ *
+ * Copyright (C) 2022, 2023 Jianhua Lu <lujianhua000@gmail.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#define DEFAULT_BRIGHTNESS 1500
+#define MAX_BRIGHTNESS 2047
+#define REG_MAX 0x15
+
+/* reg */
+#define DEVICE_ID 0x01
+#define BL_CFG1 0x02
+#define BL_CFG2 0x03
+#define BL_BRT_LSB 0x04
+#define BL_BRT_MSB 0x05
+#define BL_EN 0x08
+#define LCD_BIAS_CFG1 0x09
+#define LCD_BIAS_CFG2 0x0A
+#define LCD_BIAS_CFG3 0x0B
+#define LCD_BOOST_CFG 0x0C
+#define OUTP_CFG 0x0D
+#define OUTN_CFG 0x0E
+#define FLAG 0x0F
+#define BL_OPTION1 0x10
+#define BL_OPTION2 0x11
+#define PWM2DIG_LSBs 0x12
+#define PWM2DIG_MSBs 0x13
+#define BL_DIMMING 0x14
+#define PWM_RAMP_TIME 0x15
+
+/* definition */
+#define BL_EN_BIT BIT(6)
+#define LCD_BIAS_EN 0x9F
+#define PWM_HYST 0x5
+
+struct ktz8866 {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ bool led_on;
+ struct gpio_desc *enable_gpio;
+};
+
+static const struct regmap_config ktz8866_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = REG_MAX,
+};
+
+static int ktz8866_write(struct ktz8866 *ktz, unsigned int reg,
+ unsigned int val)
+{
+ return regmap_write(ktz->regmap, reg, val);
+}
+
+static int ktz8866_update_bits(struct ktz8866 *ktz, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ return regmap_update_bits(ktz->regmap, reg, mask, val);
+}
+
+static int ktz8866_backlight_update_status(struct backlight_device *backlight_dev)
+{
+ struct ktz8866 *ktz = bl_get_data(backlight_dev);
+ unsigned int brightness = backlight_get_brightness(backlight_dev);
+
+ if (!ktz->led_on && brightness > 0) {
+ ktz8866_update_bits(ktz, BL_EN, BL_EN_BIT, BL_EN_BIT);
+ ktz->led_on = true;
+ } else if (brightness == 0) {
+ ktz8866_update_bits(ktz, BL_EN, BL_EN_BIT, 0);
+ ktz->led_on = false;
+ }
+
+ /* Set brightness */
+ ktz8866_write(ktz, BL_BRT_LSB, brightness & 0x7);
+ ktz8866_write(ktz, BL_BRT_MSB, (brightness >> 3) & 0xFF);
+
+ return 0;
+}
+
+static const struct backlight_ops ktz8866_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = ktz8866_backlight_update_status,
+};
+
+static void ktz8866_init(struct ktz8866 *ktz)
+{
+ unsigned int val = 0;
+
+ if (of_property_read_u32(ktz->client->dev.of_node, "current-num-sinks", &val))
+ ktz8866_write(ktz, BL_EN, BIT(val) - 1);
+ else
+ /* Enable all 6 current sinks if the number of current sinks isn't specified. */
+ ktz8866_write(ktz, BL_EN, BIT(6) - 1);
+
+ if (of_property_read_u32(ktz->client->dev.of_node, "kinetic,current-ramp-delay-ms", &val)) {
+ if (val <= 128)
+ ktz8866_write(ktz, BL_CFG2, BIT(7) | (ilog2(val) << 3) | PWM_HYST);
+ else
+ ktz8866_write(ktz, BL_CFG2, BIT(7) | ((5 + val / 64) << 3) | PWM_HYST);
+ }
+
+ if (of_property_read_u32(ktz->client->dev.of_node, "kinetic,led-enable-ramp-delay-ms", &val)) {
+ if (val == 0)
+ ktz8866_write(ktz, BL_DIMMING, 0);
+ else {
+ unsigned int ramp_off_time = ilog2(val) + 1;
+ unsigned int ramp_on_time = ramp_off_time << 4;
+ ktz8866_write(ktz, BL_DIMMING, ramp_on_time | ramp_off_time);
+ }
+ }
+
+ if (of_property_read_bool(ktz->client->dev.of_node, "kinetic,enable-lcd-bias"))
+ ktz8866_write(ktz, LCD_BIAS_CFG1, LCD_BIAS_EN);
+}
+
+static int ktz8866_probe(struct i2c_client *client)
+{
+ struct backlight_device *backlight_dev;
+ struct backlight_properties props;
+ struct ktz8866 *ktz;
+ int ret = 0;
+
+ ktz = devm_kzalloc(&client->dev, sizeof(*ktz), GFP_KERNEL);
+ if (!ktz)
+ return -ENOMEM;
+
+ ktz->client = client;
+ ktz->regmap = devm_regmap_init_i2c(client, &ktz8866_regmap_config);
+ if (IS_ERR(ktz->regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(ktz->regmap), "failed to init regmap\n");
+
+ ret = devm_regulator_get_enable(&client->dev, "vddpos");
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "get regulator vddpos failed\n");
+ ret = devm_regulator_get_enable(&client->dev, "vddneg");
+ if (ret)
+ return dev_err_probe(&client->dev, ret, "get regulator vddneg failed\n");
+
+ ktz->enable_gpio = devm_gpiod_get_optional(&client->dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(ktz->enable_gpio))
+ return PTR_ERR(ktz->enable_gpio);
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = MAX_BRIGHTNESS;
+ props.brightness = DEFAULT_BRIGHTNESS;
+ props.scale = BACKLIGHT_SCALE_LINEAR;
+
+ backlight_dev = devm_backlight_device_register(&client->dev, "ktz8866-backlight",
+ &client->dev, ktz, &ktz8866_backlight_ops, &props);
+ if (IS_ERR(backlight_dev))
+ return dev_err_probe(&client->dev, PTR_ERR(backlight_dev),
+ "failed to register backlight device\n");
+
+ ktz8866_init(ktz);
+
+ i2c_set_clientdata(client, backlight_dev);
+ backlight_update_status(backlight_dev);
+
+ return 0;
+}
+
+static void ktz8866_remove(struct i2c_client *client)
+{
+ struct backlight_device *backlight_dev = i2c_get_clientdata(client);
+ backlight_dev->props.brightness = 0;
+ backlight_update_status(backlight_dev);
+}
+
+static const struct i2c_device_id ktz8866_ids[] = {
+ { "ktz8866", 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ktz8866_ids);
+
+static const struct of_device_id ktz8866_match_table[] = {
+ {
+ .compatible = "kinetic,ktz8866",
+ },
+ {},
+};
+
+static struct i2c_driver ktz8866_driver = {
+ .driver = {
+ .name = "ktz8866",
+ .of_match_table = ktz8866_match_table,
+ },
+ .probe_new = ktz8866_probe,
+ .remove = ktz8866_remove,
+ .id_table = ktz8866_ids,
+};
+
+module_i2c_driver(ktz8866_driver);
+
+MODULE_DESCRIPTION("Kinetic KTZ8866 Backlight Driver");
+MODULE_AUTHOR("Jianhua Lu <lujianhua000@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 0468ea82159f..346d3e29a843 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -95,8 +95,6 @@ void locomolcd_power(int on)
/* read comadj */
if (comadj == -1 && machine_is_collie())
comadj = 128;
- if (comadj == -1 && machine_is_poodle())
- comadj = 118;
if (on)
locomolcd_on(comadj);
@@ -181,14 +179,6 @@ static int locomolcd_probe(struct locomo_dev *ldev)
locomo_gpio_set_dir(ldev->dev.parent, LOCOMO_GPIO_FL_VR, 0);
- /*
- * the poodle_lcd_power function is called for the first time
- * from fs_initcall, which is before locomo is activated.
- * We need to recall poodle_lcd_power here
- */
- if (machine_is_poodle())
- locomolcd_power(1);
-
local_irq_restore(flags);
memset(&props, 0, sizeof(struct backlight_properties));
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index c0523a0269ee..fb388148d98f 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -28,7 +28,6 @@ struct pwm_bl_data {
struct regulator *power_supply;
struct gpio_desc *enable_gpio;
unsigned int scale;
- bool legacy;
unsigned int post_pwm_on_delay;
unsigned int pwm_off_delay;
int (*notify)(struct device *,
@@ -41,19 +40,16 @@ struct pwm_bl_data {
static void pwm_backlight_power_on(struct pwm_bl_data *pb)
{
- struct pwm_state state;
int err;
- pwm_get_state(pb->pwm, &state);
if (pb->enabled)
return;
- err = regulator_enable(pb->power_supply);
- if (err < 0)
- dev_err(pb->dev, "failed to enable power supply\n");
-
- state.enabled = true;
- pwm_apply_state(pb->pwm, &state);
+ if (pb->power_supply) {
+ err = regulator_enable(pb->power_supply);
+ if (err < 0)
+ dev_err(pb->dev, "failed to enable power supply\n");
+ }
if (pb->post_pwm_on_delay)
msleep(pb->post_pwm_on_delay);
@@ -66,9 +62,6 @@ static void pwm_backlight_power_on(struct pwm_bl_data *pb)
static void pwm_backlight_power_off(struct pwm_bl_data *pb)
{
- struct pwm_state state;
-
- pwm_get_state(pb->pwm, &state);
if (!pb->enabled)
return;
@@ -78,28 +71,22 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
if (pb->pwm_off_delay)
msleep(pb->pwm_off_delay);
- state.enabled = false;
- state.duty_cycle = 0;
- pwm_apply_state(pb->pwm, &state);
-
- regulator_disable(pb->power_supply);
+ if (pb->power_supply)
+ regulator_disable(pb->power_supply);
pb->enabled = false;
}
-static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
+static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness, struct pwm_state *state)
{
unsigned int lth = pb->lth_brightness;
- struct pwm_state state;
u64 duty_cycle;
- pwm_get_state(pb->pwm, &state);
-
if (pb->levels)
duty_cycle = pb->levels[brightness];
else
duty_cycle = brightness;
- duty_cycle *= state.period - lth;
+ duty_cycle *= state->period - lth;
do_div(duty_cycle, pb->scale);
return duty_cycle + lth;
@@ -116,11 +103,26 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
if (brightness > 0) {
pwm_get_state(pb->pwm, &state);
- state.duty_cycle = compute_duty_cycle(pb, brightness);
+ state.duty_cycle = compute_duty_cycle(pb, brightness, &state);
+ state.enabled = true;
pwm_apply_state(pb->pwm, &state);
+
pwm_backlight_power_on(pb);
} else {
pwm_backlight_power_off(pb);
+
+ pwm_get_state(pb->pwm, &state);
+ state.duty_cycle = 0;
+ /*
+ * We cannot assume a disabled PWM to drive its output to the
+ * inactive state. If we have an enable GPIO and/or a regulator
+ * we assume that this isn't relevant and we can disable the PWM
+ * to save power. If however there is neither an enable GPIO nor
+ * a regulator keep the PWM on be sure to get a constant
+ * inactive output.
+ */
+ state.enabled = !pb->power_supply && !pb->enable_gpio;
+ pwm_apply_state(pb->pwm, &state);
}
if (pb->notify_after)
@@ -417,7 +419,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
active = false;
- if (!regulator_is_enabled(pb->power_supply))
+ if (pb->power_supply && !regulator_is_enabled(pb->power_supply))
active = false;
if (!pwm_is_enabled(pb->pwm))
@@ -455,7 +457,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
struct platform_pwm_backlight_data defdata;
struct backlight_properties props;
struct backlight_device *bl;
- struct device_node *node = pdev->dev.of_node;
struct pwm_bl_data *pb;
struct pwm_state state;
unsigned int i;
@@ -499,19 +500,16 @@ static int pwm_backlight_probe(struct platform_device *pdev)
goto err_alloc;
}
- pb->power_supply = devm_regulator_get(&pdev->dev, "power");
+ pb->power_supply = devm_regulator_get_optional(&pdev->dev, "power");
if (IS_ERR(pb->power_supply)) {
ret = PTR_ERR(pb->power_supply);
- goto err_alloc;
+ if (ret == -ENODEV)
+ pb->power_supply = NULL;
+ else
+ goto err_alloc;
}
pb->pwm = devm_pwm_get(&pdev->dev, NULL);
- if (IS_ERR(pb->pwm) && PTR_ERR(pb->pwm) != -EPROBE_DEFER && !node) {
- dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n");
- pb->legacy = true;
- pb->pwm = pwm_request(data->pwm_id, "pwm-backlight");
- }
-
if (IS_ERR(pb->pwm)) {
ret = PTR_ERR(pb->pwm);
if (ret != -EPROBE_DEFER)
@@ -604,8 +602,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
if (IS_ERR(bl)) {
dev_err(&pdev->dev, "failed to register backlight\n");
ret = PTR_ERR(bl);
- if (pb->legacy)
- pwm_free(pb->pwm);
goto err_alloc;
}
@@ -639,8 +635,6 @@ static int pwm_backlight_remove(struct platform_device *pdev)
if (pb->exit)
pb->exit(&pdev->dev);
- if (pb->legacy)
- pwm_free(pb->pwm);
return 0;
}
diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c
index c95e0de7f4e7..0172438c38ce 100644
--- a/drivers/video/backlight/sky81452-backlight.c
+++ b/drivers/video/backlight/sky81452-backlight.c
@@ -41,7 +41,7 @@
#define SKY81452_MAX_BRIGHTNESS (SKY81452_CS + 1)
/**
- * struct sky81452_platform_data
+ * struct sky81452_bl_platform_data - backlight platform data
* @name: backlight driver name.
* If it is not defined, default name is lcd-backlight.
* @gpiod_enable:GPIO descriptor which control EN pin
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
deleted file mode 100644
index 77b71f6c19b5..000000000000
--- a/drivers/video/backlight/tosa_bl.c
+++ /dev/null
@@ -1,172 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * LCD / Backlight control code for Sharp SL-6000x (tosa)
- *
- * Copyright (c) 2005 Dirk Opfer
- * Copyright (c) 2007,2008 Dmitry Baryshkov
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/i2c.h>
-#include <linux/gpio/consumer.h>
-#include <linux/fb.h>
-#include <linux/backlight.h>
-#include <linux/slab.h>
-
-#include <asm/mach/sharpsl_param.h>
-
-#include "tosa_bl.h"
-
-#define COMADJ_DEFAULT 97
-
-#define DAC_CH1 0
-#define DAC_CH2 1
-
-struct tosa_bl_data {
- struct i2c_client *i2c;
- struct backlight_device *bl;
- struct gpio_desc *gpio;
-
- int comadj;
-};
-
-static void tosa_bl_set_backlight(struct tosa_bl_data *data, int brightness)
-{
- struct spi_device *spi = dev_get_platdata(&data->i2c->dev);
-
- i2c_smbus_write_byte_data(data->i2c, DAC_CH1, data->comadj);
-
- /* SetBacklightDuty */
- i2c_smbus_write_byte_data(data->i2c, DAC_CH2, (u8)(brightness & 0xff));
-
- /* SetBacklightVR */
- gpiod_set_value(data->gpio, brightness & 0x100);
-
- tosa_bl_enable(spi, brightness);
-}
-
-static int tosa_bl_update_status(struct backlight_device *dev)
-{
- struct backlight_properties *props = &dev->props;
- struct tosa_bl_data *data = bl_get_data(dev);
- int power = max(props->power, props->fb_blank);
- int brightness = props->brightness;
-
- if (power)
- brightness = 0;
-
- tosa_bl_set_backlight(data, brightness);
-
- return 0;
-}
-
-static int tosa_bl_get_brightness(struct backlight_device *dev)
-{
- struct backlight_properties *props = &dev->props;
-
- return props->brightness;
-}
-
-static const struct backlight_ops bl_ops = {
- .get_brightness = tosa_bl_get_brightness,
- .update_status = tosa_bl_update_status,
-};
-
-static int tosa_bl_probe(struct i2c_client *client)
-{
- struct backlight_properties props;
- struct tosa_bl_data *data;
- int ret = 0;
-
- data = devm_kzalloc(&client->dev, sizeof(struct tosa_bl_data),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->comadj = sharpsl_param.comadj == -1 ? COMADJ_DEFAULT : sharpsl_param.comadj;
- data->gpio = devm_gpiod_get(&client->dev, "backlight", GPIOD_OUT_LOW);
- ret = PTR_ERR_OR_ZERO(data->gpio);
- if (ret) {
- dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
- return ret;
- }
-
- i2c_set_clientdata(client, data);
- data->i2c = client;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = 512 - 1;
- data->bl = devm_backlight_device_register(&client->dev, "tosa-bl",
- &client->dev, data, &bl_ops,
- &props);
- if (IS_ERR(data->bl)) {
- ret = PTR_ERR(data->bl);
- goto err_reg;
- }
-
- data->bl->props.brightness = 69;
- data->bl->props.power = FB_BLANK_UNBLANK;
-
- backlight_update_status(data->bl);
-
- return 0;
-
-err_reg:
- data->bl = NULL;
- return ret;
-}
-
-static void tosa_bl_remove(struct i2c_client *client)
-{
- struct tosa_bl_data *data = i2c_get_clientdata(client);
-
- data->bl = NULL;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tosa_bl_suspend(struct device *dev)
-{
- struct tosa_bl_data *data = dev_get_drvdata(dev);
-
- tosa_bl_set_backlight(data, 0);
-
- return 0;
-}
-
-static int tosa_bl_resume(struct device *dev)
-{
- struct tosa_bl_data *data = dev_get_drvdata(dev);
-
- backlight_update_status(data->bl);
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(tosa_bl_pm_ops, tosa_bl_suspend, tosa_bl_resume);
-
-static const struct i2c_device_id tosa_bl_id[] = {
- { "tosa-bl", 0 },
- { },
-};
-MODULE_DEVICE_TABLE(i2c, tosa_bl_id);
-
-static struct i2c_driver tosa_bl_driver = {
- .driver = {
- .name = "tosa-bl",
- .pm = &tosa_bl_pm_ops,
- },
- .probe_new = tosa_bl_probe,
- .remove = tosa_bl_remove,
- .id_table = tosa_bl_id,
-};
-
-module_i2c_driver(tosa_bl_driver);
-
-MODULE_AUTHOR("Dmitry Baryshkov");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("LCD/Backlight control for Sharp SL-6000 PDA");
-
diff --git a/drivers/video/backlight/tosa_bl.h b/drivers/video/backlight/tosa_bl.h
deleted file mode 100644
index 589e17e6fdb2..000000000000
--- a/drivers/video/backlight/tosa_bl.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _TOSA_BL_H
-#define _TOSA_BL_H
-
-struct spi_device;
-extern int tosa_bl_enable(struct spi_device *spi, int enable);
-
-#endif
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
deleted file mode 100644
index 23d6c6bf0f54..000000000000
--- a/drivers/video/backlight/tosa_lcd.c
+++ /dev/null
@@ -1,284 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * LCD / Backlight control code for Sharp SL-6000x (tosa)
- *
- * Copyright (c) 2005 Dirk Opfer
- * Copyright (c) 2007,2008 Dmitry Baryshkov
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/gpio/consumer.h>
-#include <linux/delay.h>
-#include <linux/lcd.h>
-#include <linux/fb.h>
-
-#include <asm/mach/sharpsl_param.h>
-
-#include "tosa_bl.h"
-
-#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
-
-#define TG_REG0_VQV 0x0001
-#define TG_REG0_COLOR 0x0002
-#define TG_REG0_UD 0x0004
-#define TG_REG0_LR 0x0008
-
-/*
- * Timing Generator
- */
-#define TG_PNLCTL 0x00
-#define TG_TPOSCTL 0x01
-#define TG_DUTYCTL 0x02
-#define TG_GPOSR 0x03
-#define TG_GPODR1 0x04
-#define TG_GPODR2 0x05
-#define TG_PINICTL 0x06
-#define TG_HPOSCTL 0x07
-
-
-#define DAC_BASE 0x4e
-
-struct tosa_lcd_data {
- struct spi_device *spi;
- struct lcd_device *lcd;
- struct i2c_client *i2c;
- struct gpio_desc *gpiod_tg;
-
- int lcd_power;
- bool is_vga;
-};
-
-static int tosa_tg_send(struct spi_device *spi, int adrs, uint8_t data)
-{
- u8 buf[1];
- struct spi_message msg;
- struct spi_transfer xfer = {
- .len = 1,
- .cs_change = 0,
- .tx_buf = buf,
- };
-
- buf[0] = ((adrs & 0x07) << 5) | (data & 0x1f);
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- return spi_sync(spi, &msg);
-}
-
-int tosa_bl_enable(struct spi_device *spi, int enable)
-{
- /* bl_enable GP04=1 otherwise GP04=0*/
- return tosa_tg_send(spi, TG_GPODR2, enable ? 0x01 : 0x00);
-}
-EXPORT_SYMBOL(tosa_bl_enable);
-
-static void tosa_lcd_tg_init(struct tosa_lcd_data *data)
-{
- /* TG on */
- gpiod_set_value(data->gpiod_tg, 0);
-
- mdelay(60);
-
- /* delayed 0clk TCTL signal for VGA */
- tosa_tg_send(data->spi, TG_TPOSCTL, 0x00);
- /* GPOS0=powercontrol, GPOS1=GPIO, GPOS2=TCTL */
- tosa_tg_send(data->spi, TG_GPOSR, 0x02);
-}
-
-static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
-{
- struct spi_device *spi = data->spi;
- int value = TG_REG0_COLOR | TG_REG0_UD | TG_REG0_LR;
-
- if (data->is_vga)
- value |= TG_REG0_VQV;
-
- tosa_tg_send(spi, TG_PNLCTL, value);
-
- /* TG LCD pannel power up */
- tosa_tg_send(spi, TG_PINICTL, 0x4);
- mdelay(50);
-
- /* TG LCD GVSS */
- tosa_tg_send(spi, TG_PINICTL, 0x0);
-
- if (IS_ERR_OR_NULL(data->i2c)) {
- /*
- * after the pannel is powered up the first time,
- * we can access the i2c bus so probe for the DAC
- */
- struct i2c_adapter *adap = i2c_get_adapter(0);
- struct i2c_board_info info = {
- .dev_name = "tosa-bl",
- .type = "tosa-bl",
- .addr = DAC_BASE,
- .platform_data = data->spi,
- };
- data->i2c = i2c_new_client_device(adap, &info);
- }
-}
-
-static void tosa_lcd_tg_off(struct tosa_lcd_data *data)
-{
- struct spi_device *spi = data->spi;
-
- /* TG LCD VHSA off */
- tosa_tg_send(spi, TG_PINICTL, 0x4);
- mdelay(50);
-
- /* TG LCD signal off */
- tosa_tg_send(spi, TG_PINICTL, 0x6);
- mdelay(50);
-
- /* TG Off */
- gpiod_set_value(data->gpiod_tg, 1);
- mdelay(100);
-}
-
-int tosa_lcd_set_power(struct lcd_device *lcd, int power)
-{
- struct tosa_lcd_data *data = lcd_get_data(lcd);
-
- if (POWER_IS_ON(power) && !POWER_IS_ON(data->lcd_power))
- tosa_lcd_tg_on(data);
-
- if (!POWER_IS_ON(power) && POWER_IS_ON(data->lcd_power))
- tosa_lcd_tg_off(data);
-
- data->lcd_power = power;
- return 0;
-}
-
-static int tosa_lcd_get_power(struct lcd_device *lcd)
-{
- struct tosa_lcd_data *data = lcd_get_data(lcd);
-
- return data->lcd_power;
-}
-
-static int tosa_lcd_set_mode(struct lcd_device *lcd, struct fb_videomode *mode)
-{
- struct tosa_lcd_data *data = lcd_get_data(lcd);
-
- if (mode->xres == 320 || mode->yres == 320)
- data->is_vga = false;
- else
- data->is_vga = true;
-
- if (POWER_IS_ON(data->lcd_power))
- tosa_lcd_tg_on(data);
-
- return 0;
-}
-
-static struct lcd_ops tosa_lcd_ops = {
- .set_power = tosa_lcd_set_power,
- .get_power = tosa_lcd_get_power,
- .set_mode = tosa_lcd_set_mode,
-};
-
-static int tosa_lcd_probe(struct spi_device *spi)
-{
- int ret;
- struct tosa_lcd_data *data;
-
- data = devm_kzalloc(&spi->dev, sizeof(struct tosa_lcd_data),
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- data->is_vga = true; /* default to VGA mode */
-
- /*
- * bits_per_word cannot be configured in platform data
- */
- spi->bits_per_word = 8;
-
- ret = spi_setup(spi);
- if (ret < 0)
- return ret;
-
- data->spi = spi;
- spi_set_drvdata(spi, data);
-
- data->gpiod_tg = devm_gpiod_get(&spi->dev, "tg #pwr", GPIOD_OUT_LOW);
- if (IS_ERR(data->gpiod_tg))
- return PTR_ERR(data->gpiod_tg);
-
- mdelay(60);
-
- tosa_lcd_tg_init(data);
-
- tosa_lcd_tg_on(data);
-
- data->lcd = devm_lcd_device_register(&spi->dev, "tosa-lcd", &spi->dev,
- data, &tosa_lcd_ops);
-
- if (IS_ERR(data->lcd)) {
- ret = PTR_ERR(data->lcd);
- data->lcd = NULL;
- goto err_register;
- }
-
- return 0;
-
-err_register:
- tosa_lcd_tg_off(data);
- return ret;
-}
-
-static void tosa_lcd_remove(struct spi_device *spi)
-{
- struct tosa_lcd_data *data = spi_get_drvdata(spi);
-
- i2c_unregister_device(data->i2c);
-
- tosa_lcd_tg_off(data);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tosa_lcd_suspend(struct device *dev)
-{
- struct tosa_lcd_data *data = dev_get_drvdata(dev);
-
- tosa_lcd_tg_off(data);
-
- return 0;
-}
-
-static int tosa_lcd_resume(struct device *dev)
-{
- struct tosa_lcd_data *data = dev_get_drvdata(dev);
-
- tosa_lcd_tg_init(data);
- if (POWER_IS_ON(data->lcd_power))
- tosa_lcd_tg_on(data);
- else
- tosa_lcd_tg_off(data);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(tosa_lcd_pm_ops, tosa_lcd_suspend, tosa_lcd_resume);
-
-static struct spi_driver tosa_lcd_driver = {
- .driver = {
- .name = "tosa-lcd",
- .pm = &tosa_lcd_pm_ops,
- },
- .probe = tosa_lcd_probe,
- .remove = tosa_lcd_remove,
-};
-
-module_spi_driver(tosa_lcd_driver);
-
-MODULE_AUTHOR("Dmitry Baryshkov");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("LCD/Backlight control for Sharp SL-6000 PDA");
-MODULE_ALIAS("spi:tosa-lcd");
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index d9c682ae0392..e8e4f82cd4a1 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -497,7 +497,7 @@ static int newport_blank(struct vc_data *c, int blank, int mode_switch)
return 1;
}
-static int newport_set_font(int unit, struct console_font *op)
+static int newport_set_font(int unit, struct console_font *op, unsigned int vpitch)
{
int w = op->width;
int h = op->height;
@@ -507,7 +507,7 @@ static int newport_set_font(int unit, struct console_font *op)
/* ladis: when I grow up, there will be a day... and more sizes will
* be supported ;-) */
- if ((w != 8) || (h != 16)
+ if ((w != 8) || (h != 16) || (vpitch != 32)
|| (op->charcount != 256 && op->charcount != 512))
return -EINVAL;
@@ -569,9 +569,10 @@ static int newport_font_default(struct vc_data *vc, struct console_font *op, cha
return newport_set_def_font(vc->vc_num, op);
}
-static int newport_font_set(struct vc_data *vc, struct console_font *font, unsigned flags)
+static int newport_font_set(struct vc_data *vc, struct console_font *font,
+ unsigned int vpitch, unsigned int flags)
{
- return newport_set_font(vc->vc_num, font);
+ return newport_set_font(vc->vc_num, font, vpitch);
}
static bool newport_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index f304163e87e9..2cea69418a83 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -169,7 +169,8 @@ static int sticon_set_def_font(int unit, struct console_font *op)
return 0;
}
-static int sticon_set_font(struct vc_data *vc, struct console_font *op)
+static int sticon_set_font(struct vc_data *vc, struct console_font *op,
+ unsigned int vpitch)
{
struct sti_struct *sti = sticon_sti;
int vc_cols, vc_rows, vc_old_cols, vc_old_rows;
@@ -181,7 +182,7 @@ static int sticon_set_font(struct vc_data *vc, struct console_font *op)
struct sti_cooked_font *cooked_font;
unsigned char *data = op->data, *p;
- if ((w < 6) || (h < 6) || (w > 32) || (h > 32)
+ if ((w < 6) || (h < 6) || (w > 32) || (h > 32) || (vpitch != 32)
|| (op->charcount != 256 && op->charcount != 512))
return -EINVAL;
pitch = ALIGN(w, 8) / 8;
@@ -267,9 +268,9 @@ static int sticon_font_default(struct vc_data *vc, struct console_font *op, char
}
static int sticon_font_set(struct vc_data *vc, struct console_font *font,
- unsigned int flags)
+ unsigned int vpitch, unsigned int flags)
{
- return sticon_set_font(vc, font);
+ return sticon_set_font(vc, font, vpitch);
}
static void sticon_init(struct vc_data *c, int init)
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index fcdf017e2665..e25ba523892e 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1029,7 +1029,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
}
static int vgacon_font_set(struct vc_data *c, struct console_font *font,
- unsigned int flags)
+ unsigned int vpitch, unsigned int flags)
{
unsigned charcount = font->charcount;
int rc;
@@ -1037,7 +1037,7 @@ static int vgacon_font_set(struct vc_data *c, struct console_font *font,
if (vga_video_type < VIDEO_TYPE_EGAM)
return -EINVAL;
- if (font->width != VGA_FONTWIDTH ||
+ if (font->width != VGA_FONTWIDTH || font->height > 32 || vpitch != 32 ||
(charcount != 256 && charcount != 512))
return -EINVAL;
@@ -1050,9 +1050,9 @@ static int vgacon_font_set(struct vc_data *c, struct console_font *font,
return rc;
}
-static int vgacon_font_get(struct vc_data *c, struct console_font *font)
+static int vgacon_font_get(struct vc_data *c, struct console_font *font, unsigned int vpitch)
{
- if (vga_video_type < VIDEO_TYPE_EGAM)
+ if (vga_video_type < VIDEO_TYPE_EGAM || vpitch != 32)
return -EINVAL;
font->width = VGA_FONTWIDTH;
diff --git a/drivers/video/fbdev/68328fb.c b/drivers/video/fbdev/68328fb.c
index 7db03ed77c76..41df61b37a18 100644
--- a/drivers/video/fbdev/68328fb.c
+++ b/drivers/video/fbdev/68328fb.c
@@ -391,7 +391,7 @@ static int mc68x328fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
#ifndef MMU
/* this is uClinux (no MMU) specific code */
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_start = videomemory;
return 0;
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index b2bed599e6c6..ff3646c30d0d 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1841,23 +1841,6 @@ config FB_FSL_DIU
help
Framebuffer driver for the Freescale SoC DIU
-config FB_W100
- tristate "W100 frame buffer support"
- depends on FB && HAS_IOMEM && (ARCH_PXA || COMPILE_TEST)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- Frame buffer driver for the w100 as found on the Sharp SL-Cxx series.
- It can also drive the w3220 chip found on iPAQ hx4700.
-
- This driver is also available as a module ( = code which can be
- inserted and removed from the running kernel whenever you want). The
- module will be called w100fb. If you want to compile it as a module,
- say M here and read <file:Documentation/kbuild/modules.rst>.
-
- If unsure, say N.
-
config FB_SH_MOBILE_LCDC
tristate "SuperH Mobile LCDC framebuffer support"
depends on FB && HAVE_CLK && HAS_IOMEM
@@ -1871,44 +1854,20 @@ config FB_SH_MOBILE_LCDC
help
Frame buffer driver for the on-chip SH-Mobile LCD controller.
-config FB_TMIO
- tristate "Toshiba Mobile IO FrameBuffer support"
- depends on FB && (MFD_TMIO || COMPILE_TEST)
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- Frame buffer driver for the Toshiba Mobile IO integrated as found
- on the Sharp SL-6000 series
-
- This driver is also available as a module ( = code which can be
- inserted and removed from the running kernel whenever you want). The
- module will be called tmiofb. If you want to compile it as a module,
- say M here and read <file:Documentation/kbuild/modules.rst>.
-
- If unsure, say N.
-
-config FB_TMIO_ACCELL
- bool "tmiofb acceleration"
- depends on FB_TMIO
- default y
-
config FB_S3C
tristate "Samsung S3C framebuffer support"
depends on FB && HAVE_CLK && HAS_IOMEM
- depends on (CPU_S3C2416 || ARCH_S3C64XX) || COMPILE_TEST
+ depends on ARCH_S3C64XX || COMPILE_TEST
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
help
Frame buffer driver for the built-in FB controller in the Samsung
- SoC line from the S3C2443 onwards, including the S3C2416, S3C2450,
- and the S3C64XX series such as the S3C6400 and S3C6410.
+ SoC line such as the S3C6400 and S3C6410.
These chips all have the same basic framebuffer design with the
- actual capabilities depending on the chip. For instance the S3C6400
- and S3C6410 support 4 hardware windows whereas the S3C24XX series
- currently only have two.
+ actual capabilities depending on the chip. The S3C6400
+ and S3C6410 support 4 hardware windows.
Currently the support is only for the S3C6400 and S3C6410 SoCs.
@@ -1918,29 +1877,6 @@ config FB_S3C_DEBUG_REGWRITE
help
Show all register writes via pr_debug()
-config FB_S3C2410
- tristate "S3C2410 LCD framebuffer support"
- depends on FB && ARCH_S3C24XX
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- help
- Frame buffer driver for the built-in LCD controller in the Samsung
- S3C2410 processor.
-
- This driver is also available as a module ( = code which can be
- inserted and removed from the running kernel whenever you want). The
- module will be called s3c2410fb. If you want to compile it as a module,
- say M here and read <file:Documentation/kbuild/modules.rst>.
-
- If unsure, say N.
-config FB_S3C2410_DEBUG
- bool "S3C2410 lcd debug messages"
- depends on FB_S3C2410
- help
- Turn on debugging messages. Note that you can set/unset at run time
- through sysfs
-
config FB_SM501
tristate "Silicon Motion SM501 framebuffer support"
depends on FB && MFD_SM501
diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile
index 7795c4126706..e6b0ae094b8b 100644
--- a/drivers/video/fbdev/Makefile
+++ b/drivers/video/fbdev/Makefile
@@ -84,8 +84,6 @@ obj-$(CONFIG_FB_PXA) += pxafb.o
obj-$(CONFIG_FB_PXA168) += pxa168fb.o
obj-$(CONFIG_PXA3XX_GCU) += pxa3xx-gcu.o
obj-$(CONFIG_MMP_DISP) += mmp/
-obj-$(CONFIG_FB_W100) += w100fb.o
-obj-$(CONFIG_FB_TMIO) += tmiofb.o
obj-$(CONFIG_FB_AU1100) += au1100fb.o
obj-$(CONFIG_FB_AU1200) += au1200fb.o
obj-$(CONFIG_FB_VT8500) += vt8500lcdfb.o
@@ -100,7 +98,6 @@ obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
obj-$(CONFIG_FB_SH7760) += sh7760fb.o
obj-$(CONFIG_FB_IMX) += imxfb.o
obj-$(CONFIG_FB_S3C) += s3c-fb.o
-obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
obj-$(CONFIG_FB_FSL_DIU) += fsl-diu-fb.o
obj-$(CONFIG_FB_COBALT) += cobalt_lcdfb.o
obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 1fc8de4ecbeb..8187a7c4f910 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -49,7 +49,6 @@ struct atmel_lcdfb_info {
struct clk *lcdc_clk;
struct backlight_device *backlight;
- u8 bl_power;
u8 saved_lcdcon;
u32 pseudo_palette[16];
@@ -109,22 +108,7 @@ static u32 contrast_ctr = ATMEL_LCDC_PS_DIV8
static int atmel_bl_update_status(struct backlight_device *bl)
{
struct atmel_lcdfb_info *sinfo = bl_get_data(bl);
- int power = sinfo->bl_power;
- int brightness = bl->props.brightness;
-
- /* REVISIT there may be a meaningful difference between
- * fb_blank and power ... there seem to be some cases
- * this doesn't handle correctly.
- */
- if (bl->props.fb_blank != sinfo->bl_power)
- power = bl->props.fb_blank;
- else if (bl->props.power != sinfo->bl_power)
- power = bl->props.power;
-
- if (brightness < 0 && power == FB_BLANK_UNBLANK)
- brightness = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
- else if (power != FB_BLANK_UNBLANK)
- brightness = 0;
+ int brightness = backlight_get_brightness(bl);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, brightness);
if (contrast_ctr & ATMEL_LCDC_POL_POSITIVE)
@@ -133,8 +117,6 @@ static int atmel_bl_update_status(struct backlight_device *bl)
else
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
- bl->props.fb_blank = bl->props.power = sinfo->bl_power = power;
-
return 0;
}
@@ -155,8 +137,6 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo)
struct backlight_properties props;
struct backlight_device *bl;
- sinfo->bl_power = FB_BLANK_UNBLANK;
-
if (sinfo->backlight)
return;
diff --git a/drivers/video/fbdev/aty/aty128fb.c b/drivers/video/fbdev/aty/aty128fb.c
index dd31b9d7d337..36a9ac05a340 100644
--- a/drivers/video/fbdev/aty/aty128fb.c
+++ b/drivers/video/fbdev/aty/aty128fb.c
@@ -1766,12 +1766,10 @@ static int aty128_bl_update_status(struct backlight_device *bd)
unsigned int reg = aty_ld_le32(LVDS_GEN_CNTL);
int level;
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK ||
- !par->lcd_on)
+ if (!par->lcd_on)
level = 0;
else
- level = bd->props.brightness;
+ level = backlight_get_brightness(bd);
reg |= LVDS_BL_MOD_EN | LVDS_BLON;
if (level > 0) {
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index d59215a4992e..b02e4e645035 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -2219,13 +2219,7 @@ static int aty_bl_update_status(struct backlight_device *bd)
{
struct atyfb_par *par = bl_get_data(bd);
unsigned int reg = aty_ld_lcd(LCD_MISC_CNTL, par);
- int level;
-
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
+ int level = backlight_get_brightness(bd);
reg |= (BLMOD_EN | BIASMOD_EN);
if (level > 0) {
diff --git a/drivers/video/fbdev/aty/radeon_backlight.c b/drivers/video/fbdev/aty/radeon_backlight.c
index d2c1263ad260..427adc838f77 100644
--- a/drivers/video/fbdev/aty/radeon_backlight.c
+++ b/drivers/video/fbdev/aty/radeon_backlight.c
@@ -57,11 +57,7 @@ static int radeon_bl_update_status(struct backlight_device *bd)
* backlight. This provides some greater power saving and the display
* is useless without backlight anyway.
*/
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
+ level = backlight_get_brightness(bd);
del_timer_sync(&rinfo->lvds_timer);
radeon_engine_idle();
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index a1061c2f1640..45c75ff01eca 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -251,16 +251,8 @@ static int clps711x_fb_probe(struct platform_device *pdev)
goto out_fb_release;
}
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto out_fb_release;
- }
-
cfb->buffsize = resource_size(res);
info->fix.smem_start = res->start;
- info->apertures->ranges[0].base = info->fix.smem_start;
- info->apertures->ranges[0].size = cfb->buffsize;
cfb->clk = devm_clk_get(dev, NULL);
if (IS_ERR(cfb->clk)) {
@@ -345,7 +337,7 @@ static int clps711x_fb_probe(struct platform_device *pdev)
&clps711x_lcd_ops);
if (!IS_ERR(lcd))
return 0;
-
+
ret = PTR_ERR(lcd);
unregister_framebuffer(info);
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index c730253ab85c..aa5f059d0222 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -157,10 +157,6 @@ static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long
/* protect against the workqueue changing the page list */
mutex_lock(&fbdefio->lock);
- /* first write in this cycle, notify the driver */
- if (fbdefio->first_io && list_empty(&fbdefio->pagereflist))
- fbdefio->first_io(info);
-
pageref = fb_deferred_io_pageref_get(info, offset, page);
if (WARN_ON_ONCE(!pageref)) {
ret = VM_FAULT_OOM;
@@ -232,9 +228,9 @@ static const struct address_space_operations fb_deferred_io_aops = {
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
vma->vm_ops = &fb_deferred_io_vm_ops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
if (!(info->flags & FBINFO_VIRTFB))
- vma->vm_flags |= VM_IO;
+ vm_flags_set(vma, VM_IO);
vma->vm_private_data = info;
return 0;
}
@@ -313,7 +309,7 @@ void fb_deferred_io_open(struct fb_info *info,
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);
-void fb_deferred_io_cleanup(struct fb_info *info)
+void fb_deferred_io_release(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
struct page *page;
@@ -327,6 +323,14 @@ void fb_deferred_io_cleanup(struct fb_info *info)
page = fb_deferred_io_page(info, i);
page->mapping = NULL;
}
+}
+EXPORT_SYMBOL_GPL(fb_deferred_io_release);
+
+void fb_deferred_io_cleanup(struct fb_info *info)
+{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+
+ fb_deferred_io_release(info);
kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 14a7d404062c..0a2c47df01f4 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -26,7 +26,7 @@
*
* Hardware cursor support added by Emmanuel Marty (core@ggi-project.org)
* Smart redraw scrolling, arbitrary font width support, 512char font support
- * and software scrollback added by
+ * and software scrollback added by
* Jakub Jelinek (jj@ultra.linux.cz)
*
* Random hacking by Martin Mares <mj@ucw.cz>
@@ -127,7 +127,7 @@ static int logo_shown = FBCON_LOGO_CANSHOW;
/* console mappings */
static unsigned int first_fb_vc;
static unsigned int last_fb_vc = MAX_NR_CONSOLES - 1;
-static int fbcon_is_default = 1;
+static int fbcon_is_default = 1;
static int primary_device = -1;
static int fbcon_has_console_bind;
@@ -415,12 +415,12 @@ static int __init fb_console_setup(char *this_opt)
strscpy(fontname, options + 5, sizeof(fontname));
continue;
}
-
+
if (!strncmp(options, "scrollback:", 11)) {
pr_warn("Ignoring scrollback size option\n");
continue;
}
-
+
if (!strncmp(options, "map:", 4)) {
options += 4;
if (*options) {
@@ -446,7 +446,7 @@ static int __init fb_console_setup(char *this_opt)
last_fb_vc = simple_strtoul(options, &options, 10) - 1;
if (last_fb_vc < first_fb_vc || last_fb_vc >= MAX_NR_CONSOLES)
last_fb_vc = MAX_NR_CONSOLES - 1;
- fbcon_is_default = 0;
+ fbcon_is_default = 0;
continue;
}
@@ -940,7 +940,7 @@ static const char *fbcon_startup(void)
info = fbcon_registered_fb[info_idx];
if (!info)
return NULL;
-
+
if (fbcon_open(info))
return NULL;
@@ -958,7 +958,7 @@ static const char *fbcon_startup(void)
set_blitting_type(vc, info);
/* Setup default font */
- if (!p->fontdata && !vc->vc_font.data) {
+ if (!p->fontdata) {
if (!fontname[0] || !(font = find_font(fontname)))
font = get_default_font(info->var.xres,
info->var.yres,
@@ -968,8 +968,6 @@ static const char *fbcon_startup(void)
vc->vc_font.height = font->height;
vc->vc_font.data = (void *)(p->fontdata = font->data);
vc->vc_font.charcount = font->charcount;
- } else {
- p->fontdata = vc->vc_font.data;
}
cols = FBCON_SWAP(ops->rotate, info->var.xres, info->var.yres);
@@ -1135,9 +1133,9 @@ static void fbcon_init(struct vc_data *vc, int init)
ops->p = &fb_display[fg_console];
}
-static void fbcon_free_font(struct fbcon_display *p, bool freefont)
+static void fbcon_free_font(struct fbcon_display *p)
{
- if (freefont && p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
+ if (p->userfont && p->fontdata && (--REFCOUNT(p->fontdata) == 0))
kfree(p->fontdata - FONT_EXTRA_WORDS * sizeof(int));
p->fontdata = NULL;
p->userfont = 0;
@@ -1172,8 +1170,8 @@ static void fbcon_deinit(struct vc_data *vc)
struct fb_info *info;
struct fbcon_ops *ops;
int idx;
- bool free_font = true;
+ fbcon_free_font(p);
idx = con2fb_map[vc->vc_num];
if (idx == -1)
@@ -1184,8 +1182,6 @@ static void fbcon_deinit(struct vc_data *vc)
if (!info)
goto finished;
- if (info->flags & FBINFO_MISC_FIRMWARE)
- free_font = false;
ops = info->fbcon_par;
if (!ops)
@@ -1197,9 +1193,8 @@ static void fbcon_deinit(struct vc_data *vc)
ops->initialized = false;
finished:
- fbcon_free_font(p, free_font);
- if (free_font)
- vc->vc_font.data = NULL;
+ fbcon_free_font(p);
+ vc->vc_font.data = NULL;
if (vc->vc_hi_font_mask && vc->vc_screenbuf)
set_vc_hi_font(vc, false);
@@ -1999,7 +1994,7 @@ static void updatescrollmode(struct fbcon_display *p,
#define PITCH(w) (((w) + 7) >> 3)
#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */
-static int fbcon_resize(struct vc_data *vc, unsigned int width,
+static int fbcon_resize(struct vc_data *vc, unsigned int width,
unsigned int height, unsigned int user)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
@@ -2174,7 +2169,7 @@ static int fbcon_switch(struct vc_data *vc)
ops->update_start(info);
}
- fbcon_set_palette(vc, color_table);
+ fbcon_set_palette(vc, color_table);
fbcon_clear_margins(vc, 0);
if (logo_shown == FBCON_LOGO_DRAW) {
@@ -2271,7 +2266,7 @@ static int fbcon_debug_leave(struct vc_data *vc)
return 0;
}
-static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
+static int fbcon_get_font(struct vc_data *vc, struct console_font *font, unsigned int vpitch)
{
u8 *fontdata = vc->vc_font.data;
u8 *data = font->data;
@@ -2279,6 +2274,8 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
font->width = vc->vc_font.width;
font->height = vc->vc_font.height;
+ if (font->height > vpitch)
+ return -ENOSPC;
font->charcount = vc->vc_hi_font_mask ? 512 : 256;
if (!font->data)
return 0;
@@ -2290,8 +2287,8 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
for (i = 0; i < font->charcount; i++) {
memcpy(data, fontdata, j);
- memset(data + j, 0, 32 - j);
- data += 32;
+ memset(data + j, 0, vpitch - j);
+ data += vpitch;
fontdata += j;
}
} else if (font->width <= 16) {
@@ -2301,8 +2298,8 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
for (i = 0; i < font->charcount; i++) {
memcpy(data, fontdata, j);
- memset(data + j, 0, 64 - j);
- data += 64;
+ memset(data + j, 0, 2*vpitch - j);
+ data += 2*vpitch;
fontdata += j;
}
} else if (font->width <= 24) {
@@ -2316,8 +2313,8 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
*data++ = fontdata[2];
fontdata += sizeof(u32);
}
- memset(data, 0, 3 * (32 - j));
- data += 3 * (32 - j);
+ memset(data, 0, 3 * (vpitch - j));
+ data += 3 * (vpitch - j);
}
} else {
j = vc->vc_font.height * 4;
@@ -2326,8 +2323,8 @@ static int fbcon_get_font(struct vc_data *vc, struct console_font *font)
for (i = 0; i < font->charcount; i++) {
memcpy(data, fontdata, j);
- memset(data + j, 0, 128 - j);
- data += 128;
+ memset(data + j, 0, 4 * vpitch - j);
+ data += 4 * vpitch;
fontdata += j;
}
}
@@ -2343,7 +2340,7 @@ static void set_vc_hi_font(struct vc_data *vc, bool set)
vc->vc_complement_mask >>= 1;
vc->vc_s_complement_mask >>= 1;
}
-
+
/* ++Edmund: reorder the attribute bits */
if (vc->vc_can_do_color) {
unsigned short *cp =
@@ -2366,7 +2363,7 @@ static void set_vc_hi_font(struct vc_data *vc, bool set)
vc->vc_complement_mask <<= 1;
vc->vc_s_complement_mask <<= 1;
}
-
+
/* ++Edmund: reorder the attribute bits */
{
unsigned short *cp =
@@ -2462,19 +2459,12 @@ err_out:
}
/*
- * User asked to set font; we are guaranteed that
- * a) width and height are in range 1..32
- * b) charcount does not exceed 512
- * but lets not assume that, since someone might someday want to use larger
- * fonts. And charcount of 512 is small for unicode support.
- *
- * However, user space gives the font in 32 rows , regardless of
- * actual font height. So a new API is needed if support for larger fonts
- * is ever implemented.
+ * User asked to set font; we are guaranteed that charcount does not exceed 512
+ * but lets not assume that, since charcount of 512 is small for unicode support.
*/
static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
- unsigned int flags)
+ unsigned int vpitch, unsigned int flags)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
unsigned charcount = font->charcount;
@@ -2495,9 +2485,12 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
return -EINVAL;
+ if (font->width > 32 || font->height > 32)
+ return -EINVAL;
+
/* Make sure drawing engine can handle the font */
- if (!(info->pixmap.blit_x & (1 << (font->width - 1))) ||
- !(info->pixmap.blit_y & (1 << (font->height - 1))))
+ if (!(info->pixmap.blit_x & BIT(font->width - 1)) ||
+ !(info->pixmap.blit_y & BIT(font->height - 1)))
return -EINVAL;
/* Make sure driver can handle the font length */
@@ -2517,7 +2510,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
FNTSIZE(new_data) = size;
REFCOUNT(new_data) = 0; /* usage counter */
for (i=0; i< charcount; i++) {
- memcpy(new_data + i*h*pitch, data + i*32*pitch, h*pitch);
+ memcpy(new_data + i*h*pitch, data + i*vpitch*pitch, h*pitch);
}
/* Since linux has a nice crc32 function use it for counting font
@@ -2528,7 +2521,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
/* Check if the same font is on some other console already */
for (i = first_fb_vc; i <= last_fb_vc; i++) {
struct vc_data *tmp = vc_cons[i].d;
-
+
if (fb_display[i].userfont &&
fb_display[i].fontdata &&
FNTSUM(fb_display[i].fontdata) == csum &&
@@ -3436,5 +3429,5 @@ void __exit fb_console_exit(void)
do_unregister_con_driver(&fb_con);
console_unlock();
-}
+}
#endif
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 3a6c8458eb8d..875541ff185b 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
-#include <linux/aperture.h>
#include <linux/compat.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -1454,6 +1453,10 @@ __releases(&info->lock)
struct fb_info * const info = file->private_data;
lock_fb_info(info);
+#if IS_ENABLED(CONFIG_FB_DEFERRED_IO)
+ if (info->fbdefio)
+ fb_deferred_io_release(info);
+#endif
if (info->fbops->fb_release)
info->fbops->fb_release(info,1);
module_put(info->fbops->owner);
@@ -1653,32 +1656,6 @@ static void do_unregister_framebuffer(struct fb_info *fb_info)
put_fb_info(fb_info);
}
-static int fb_aperture_acquire_for_platform_device(struct fb_info *fb_info)
-{
- struct apertures_struct *ap = fb_info->apertures;
- struct device *dev = fb_info->device;
- struct platform_device *pdev;
- unsigned int i;
- int ret;
-
- if (!ap)
- return 0;
-
- if (!dev_is_platform(dev))
- return 0;
-
- pdev = to_platform_device(dev);
-
- for (ret = 0, i = 0; i < ap->count; ++i) {
- ret = devm_aperture_acquire_for_platform_device(pdev, ap->ranges[i].base,
- ap->ranges[i].size);
- if (ret)
- break;
- }
-
- return ret;
-}
-
/**
* register_framebuffer - registers a frame buffer device
* @fb_info: frame buffer info structure
@@ -1693,12 +1670,6 @@ register_framebuffer(struct fb_info *fb_info)
{
int ret;
- if (fb_info->flags & FBINFO_MISC_FIRMWARE) {
- ret = fb_aperture_acquire_for_platform_device(fb_info);
- if (ret)
- return ret;
- }
-
mutex_lock(&registration_lock);
ret = do_register_framebuffer(fb_info);
mutex_unlock(&registration_lock);
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index b0e690f41025..79e5bfbdd34c 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1050,7 +1050,7 @@ static u32 fb_get_vblank(u32 hfreq)
}
/**
- * fb_get_hblank_by_freq - get horizontal blank time given hfreq
+ * fb_get_hblank_by_hfreq - get horizontal blank time given hfreq
* @hfreq: horizontal freq
* @xres: horizontal resolution in pixels
*
diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
index 4d7f63892dcc..0c33c4adcd79 100644
--- a/drivers/video/fbdev/core/fbsysfs.c
+++ b/drivers/video/fbdev/core/fbsysfs.c
@@ -88,7 +88,6 @@ void framebuffer_release(struct fb_info *info)
mutex_destroy(&info->bl_curve_mutex);
#endif
- kfree(info->apertures);
kfree(info);
}
EXPORT_SYMBOL(framebuffer_release);
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 16c1aaae9afa..a5779fb453a2 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -7,6 +7,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/kernel.h>
#include <linux/efi.h>
#include <linux/efi-bgrt.h>
@@ -49,6 +50,12 @@ static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
static struct pci_dev *efifb_pci_dev; /* dev with BAR covering the efifb */
+struct efifb_par {
+ u32 pseudo_palette[16];
+ resource_size_t base;
+ resource_size_t size;
+};
+
static struct fb_var_screeninfo efifb_defined = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
@@ -249,6 +256,8 @@ static inline void efifb_show_boot_graphics(struct fb_info *info) {}
*/
static void efifb_destroy(struct fb_info *info)
{
+ struct efifb_par *par = info->par;
+
if (efifb_pci_dev)
pm_runtime_put(&efifb_pci_dev->dev);
@@ -260,8 +269,7 @@ static void efifb_destroy(struct fb_info *info)
}
if (request_mem_succeeded)
- release_mem_region(info->apertures->ranges[0].base,
- info->apertures->ranges[0].size);
+ release_mem_region(par->base, par->size);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
@@ -351,6 +359,7 @@ static u64 bar_offset;
static int efifb_probe(struct platform_device *dev)
{
struct fb_info *info;
+ struct efifb_par *par;
int err, orientation;
unsigned int size_vmode;
unsigned int size_remap;
@@ -447,22 +456,17 @@ static int efifb_probe(struct platform_device *dev)
efifb_fix.smem_start);
}
- info = framebuffer_alloc(sizeof(u32) * 16, &dev->dev);
+ info = framebuffer_alloc(sizeof(*par), &dev->dev);
if (!info) {
err = -ENOMEM;
goto err_release_mem;
}
platform_set_drvdata(dev, info);
- info->pseudo_palette = info->par;
- info->par = NULL;
+ par = info->par;
+ info->pseudo_palette = par->pseudo_palette;
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- err = -ENOMEM;
- goto err_release_fb;
- }
- info->apertures->ranges[0].base = efifb_fix.smem_start;
- info->apertures->ranges[0].size = size_remap;
+ par->base = efifb_fix.smem_start;
+ par->size = size_remap;
if (efi_enabled(EFI_MEMMAP) &&
!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
@@ -551,7 +555,7 @@ static int efifb_probe(struct platform_device *dev)
info->fbops = &efifb_ops;
info->var = efifb_defined;
info->fix = efifb_fix;
- info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE;
+ info->flags = FBINFO_FLAG_DEFAULT;
orientation = drm_get_panel_orientation_quirk(efifb_defined.xres,
efifb_defined.yres);
@@ -584,6 +588,11 @@ static int efifb_probe(struct platform_device *dev)
if (efifb_pci_dev)
WARN_ON(pm_runtime_get_sync(&efifb_pci_dev->dev) < 0);
+ err = devm_aperture_acquire_for_platform_device(dev, par->base, par->size);
+ if (err) {
+ pr_err("efifb: cannot acquire aperture\n");
+ goto err_put_rpm_ref;
+ }
err = register_framebuffer(info);
if (err < 0) {
pr_err("efifb: cannot register framebuffer\n");
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index fdbf02b42723..ec3f6cf05f8c 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -994,13 +994,10 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
struct pci_dev *pdev = NULL;
void __iomem *fb_virt;
int gen2vm = efi_enabled(EFI_BOOT);
+ resource_size_t base, size;
phys_addr_t paddr;
int ret;
- info->apertures = alloc_apertures(1);
- if (!info->apertures)
- return -ENOMEM;
-
if (!gen2vm) {
pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
@@ -1009,8 +1006,8 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
return -ENODEV;
}
- info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
- info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
+ base = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
/*
* For Gen 1 VM, we can directly use the contiguous memory
@@ -1033,8 +1030,8 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
}
pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
} else {
- info->apertures->ranges[0].base = screen_info.lfb_base;
- info->apertures->ranges[0].size = screen_info.lfb_size;
+ base = screen_info.lfb_base;
+ size = screen_info.lfb_size;
}
/*
@@ -1076,9 +1073,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
info->screen_size = dio_fb_size;
getmem_done:
- aperture_remove_conflicting_devices(info->apertures->ranges[0].base,
- info->apertures->ranges[0].size,
- false, KBUILD_MODNAME);
+ aperture_remove_conflicting_devices(base, size, false, KBUILD_MODNAME);
if (gen2vm) {
/* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
@@ -1239,8 +1234,7 @@ error1:
return ret;
}
-
-static int hvfb_remove(struct hv_device *hdev)
+static void hvfb_remove(struct hv_device *hdev)
{
struct fb_info *info = hv_get_drvdata(hdev);
struct hvfb_par *par = info->par;
@@ -1261,8 +1255,6 @@ static int hvfb_remove(struct hv_device *hdev)
hvfb_putmem(hdev, info);
framebuffer_release(info);
-
- return 0;
}
static int hvfb_suspend(struct hv_device *hdev)
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index b945b68984b9..76771e126d0a 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -283,12 +283,7 @@ static int mx3fb_bl_get_brightness(struct backlight_device *bl)
static int mx3fb_bl_update_status(struct backlight_device *bl)
{
struct mx3fb_data *fbd = bl_get_data(bl);
- int brightness = bl->props.brightness;
-
- if (bl->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
- if (bl->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
+ int brightness = backlight_get_brightness(bl);
fbd->backlight_level = (fbd->backlight_level & ~0xFF) | brightness;
diff --git a/drivers/video/fbdev/nvidia/nv_backlight.c b/drivers/video/fbdev/nvidia/nv_backlight.c
index 2ce53529f636..503a7a683855 100644
--- a/drivers/video/fbdev/nvidia/nv_backlight.c
+++ b/drivers/video/fbdev/nvidia/nv_backlight.c
@@ -49,17 +49,11 @@ static int nvidia_bl_update_status(struct backlight_device *bd)
{
struct nvidia_par *par = bl_get_data(bd);
u32 tmp_pcrt, tmp_pmc, fpcontrol;
- int level;
+ int level = backlight_get_brightness(bd);
if (!par->FlatPanel)
return 0;
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
-
tmp_pmc = NV_RD32(par->PMC, 0x10F0) & 0x0000FFFF;
tmp_pcrt = NV_RD32(par->PCRTC0, 0x081C) & 0xFFFFFFFC;
fpcontrol = NV_RD32(par->PRAMDAC, 0x0848) & 0xCFFFFFCC;
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index 1960916098d4..e60a276b4855 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -1197,17 +1197,17 @@ static int nvidia_set_fbinfo(struct fb_info *info)
return nvidiafb_check_var(&info->var, info);
}
-static u32 nvidia_get_chipset(struct fb_info *info)
+static u32 nvidia_get_chipset(struct pci_dev *pci_dev,
+ volatile u32 __iomem *REGS)
{
- struct nvidia_par *par = info->par;
- u32 id = (par->pci_dev->vendor << 16) | par->pci_dev->device;
+ u32 id = (pci_dev->vendor << 16) | pci_dev->device;
printk(KERN_INFO PFX "Device ID: %x \n", id);
if ((id & 0xfff0) == 0x00f0 ||
(id & 0xfff0) == 0x02e0) {
/* pci-e */
- id = NV_RD32(par->REGS, 0x1800);
+ id = NV_RD32(REGS, 0x1800);
if ((id & 0x0000ffff) == 0x000010DE)
id = 0x10DE0000 | (id >> 16);
@@ -1220,12 +1220,11 @@ static u32 nvidia_get_chipset(struct fb_info *info)
return id;
}
-static u32 nvidia_get_arch(struct fb_info *info)
+static u32 nvidia_get_arch(u32 Chipset)
{
- struct nvidia_par *par = info->par;
u32 arch = 0;
- switch (par->Chipset & 0x0ff0) {
+ switch (Chipset & 0x0ff0) {
case 0x0100: /* GeForce 256 */
case 0x0110: /* GeForce2 MX */
case 0x0150: /* GeForce2 */
@@ -1278,16 +1277,44 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
struct fb_info *info;
unsigned short cmd;
int ret;
+ volatile u32 __iomem *REGS;
+ int Chipset;
+ u32 Architecture;
NVTRACE_ENTER();
assert(pd != NULL);
+ if (pci_enable_device(pd)) {
+ printk(KERN_ERR PFX "cannot enable PCI device\n");
+ return -ENODEV;
+ }
+
+ /* enable IO and mem if not already done */
+ pci_read_config_word(pd, PCI_COMMAND, &cmd);
+ cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pd, PCI_COMMAND, cmd);
+
+ nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
+ nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
+
+ REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
+ if (!REGS) {
+ printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
+ return -ENODEV;
+ }
+
+ Chipset = nvidia_get_chipset(pd, REGS);
+ Architecture = nvidia_get_arch(Chipset);
+ if (Architecture == 0) {
+ printk(KERN_ERR PFX "unknown NV_ARCH\n");
+ goto err_out;
+ }
+
ret = aperture_remove_conflicting_pci_devices(pd, "nvidiafb");
if (ret)
- return ret;
+ goto err_out;
info = framebuffer_alloc(sizeof(struct nvidia_par), &pd->dev);
-
if (!info)
goto err_out;
@@ -1298,11 +1325,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
if (info->pixmap.addr == NULL)
goto err_out_kfree;
- if (pci_enable_device(pd)) {
- printk(KERN_ERR PFX "cannot enable PCI device\n");
- goto err_out_enable;
- }
-
if (pci_request_regions(pd, "nvidiafb")) {
printk(KERN_ERR PFX "cannot request PCI regions\n");
goto err_out_enable;
@@ -1318,34 +1340,17 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
par->paneltweak = paneltweak;
par->reverse_i2c = reverse_i2c;
- /* enable IO and mem if not already done */
- pci_read_config_word(pd, PCI_COMMAND, &cmd);
- cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
- pci_write_config_word(pd, PCI_COMMAND, cmd);
-
- nvidiafb_fix.mmio_start = pci_resource_start(pd, 0);
nvidiafb_fix.smem_start = pci_resource_start(pd, 1);
- nvidiafb_fix.mmio_len = pci_resource_len(pd, 0);
-
- par->REGS = ioremap(nvidiafb_fix.mmio_start, nvidiafb_fix.mmio_len);
- if (!par->REGS) {
- printk(KERN_ERR PFX "cannot ioremap MMIO base\n");
- goto err_out_free_base0;
- }
+ par->REGS = REGS;
- par->Chipset = nvidia_get_chipset(info);
- par->Architecture = nvidia_get_arch(info);
-
- if (par->Architecture == 0) {
- printk(KERN_ERR PFX "unknown NV_ARCH\n");
- goto err_out_arch;
- }
+ par->Chipset = Chipset;
+ par->Architecture = Architecture;
sprintf(nvidiafb_fix.id, "NV%x", (pd->device & 0x0ff0) >> 4);
if (NVCommonSetup(info))
- goto err_out_arch;
+ goto err_out_free_base0;
par->FbAddress = nvidiafb_fix.smem_start;
par->FbMapSize = par->RamAmountKBytes * 1024;
@@ -1401,7 +1406,6 @@ static int nvidiafb_probe(struct pci_dev *pd, const struct pci_device_id *ent)
goto err_out_iounmap_fb;
}
-
printk(KERN_INFO PFX
"PCI nVidia %s framebuffer (%dMB @ 0x%lX)\n",
info->fix.id,
@@ -1415,15 +1419,14 @@ err_out_iounmap_fb:
err_out_free_base1:
fb_destroy_modedb(info->monspecs.modedb);
nvidia_delete_i2c_busses(par);
-err_out_arch:
- iounmap(par->REGS);
- err_out_free_base0:
+err_out_free_base0:
pci_release_regions(pd);
err_out_enable:
kfree(info->pixmap.addr);
err_out_kfree:
framebuffer_release(info);
err_out:
+ iounmap(REGS);
return -ENODEV;
}
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 91001990e351..f7ad6bc9d02d 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -12,6 +12,7 @@
* more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -53,10 +54,11 @@ struct offb_par {
volatile void __iomem *cmap_data;
int cmap_type;
int blanked;
+ u32 pseudo_palette[16];
+ resource_size_t base;
+ resource_size_t size;
};
-struct offb_par default_par;
-
#ifdef CONFIG_PPC32
extern boot_infos_t *boot_infos;
#endif
@@ -280,9 +282,11 @@ static int offb_set_par(struct fb_info *info)
static void offb_destroy(struct fb_info *info)
{
+ struct offb_par *par = info->par;
+
if (info->screen_base)
iounmap(info->screen_base);
- release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
+ release_mem_region(par->base, par->size);
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
@@ -393,11 +397,11 @@ static void offb_init_fb(struct platform_device *parent, const char *name,
int foreign_endian, struct device_node *dp)
{
unsigned long res_size = pitch * height;
- struct offb_par *par = &default_par;
unsigned long res_start = address;
struct fb_fix_screeninfo *fix;
struct fb_var_screeninfo *var;
struct fb_info *info;
+ struct offb_par *par;
if (!request_mem_region(res_start, res_size, "offb"))
return;
@@ -411,17 +415,15 @@ static void offb_init_fb(struct platform_device *parent, const char *name,
return;
}
- info = framebuffer_alloc(sizeof(u32) * 16, &parent->dev);
-
+ info = framebuffer_alloc(sizeof(*par), &parent->dev);
if (!info) {
release_mem_region(res_start, res_size);
return;
}
platform_set_drvdata(parent, info);
-
+ par = info->par;
fix = &info->fix;
var = &info->var;
- info->par = par;
if (name) {
strcpy(fix->id, "OFfb ");
@@ -506,20 +508,18 @@ static void offb_init_fb(struct platform_device *parent, const char *name,
var->sync = 0;
var->vmode = FB_VMODE_NONINTERLACED;
- /* set offb aperture size for generic probing */
- info->apertures = alloc_apertures(1);
- if (!info->apertures)
- goto out_aper;
- info->apertures->ranges[0].base = address;
- info->apertures->ranges[0].size = fix->smem_len;
+ par->base = address;
+ par->size = fix->smem_len;
info->fbops = &offb_ops;
info->screen_base = ioremap(address, fix->smem_len);
- info->pseudo_palette = (void *) (info + 1);
- info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE | foreign_endian;
+ info->pseudo_palette = par->pseudo_palette;
+ info->flags = FBINFO_DEFAULT | foreign_endian;
fb_alloc_cmap(&info->cmap, 256, 0);
+ if (devm_aperture_acquire_for_platform_device(parent, par->base, par->size) < 0)
+ goto out_err;
if (register_framebuffer(info) < 0)
goto out_err;
@@ -529,7 +529,6 @@ static void offb_init_fb(struct platform_device *parent, const char *name,
out_err:
fb_dealloc_cmap(&info->cmap);
iounmap(info->screen_base);
-out_aper:
iounmap(par->cmap_adr);
par->cmap_adr = NULL;
framebuffer_release(info);
diff --git a/drivers/video/fbdev/omap/Kconfig b/drivers/video/fbdev/omap/Kconfig
index b1786cf1b486..a6548283451f 100644
--- a/drivers/video/fbdev/omap/Kconfig
+++ b/drivers/video/fbdev/omap/Kconfig
@@ -40,15 +40,6 @@ config FB_OMAP_LCD_MIPID
the Mobile Industry Processor Interface DBI-C/DCS
specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3)
-config FB_OMAP_LCD_H3
- bool "TPS65010 LCD controller on OMAP-H3"
- depends on MACH_OMAP_H3 || COMPILE_TEST
- depends on TPS65010=y
- default y
- help
- Say Y here if you want to have support for the LCD on the
- H3 board.
-
config FB_OMAP_DMA_TUNE
bool "Set DMA SDRAM access priority high"
depends on FB_OMAP
diff --git a/drivers/video/fbdev/omap/Makefile b/drivers/video/fbdev/omap/Makefile
index b88e02f5cb1f..504edb9c09dd 100644
--- a/drivers/video/fbdev/omap/Makefile
+++ b/drivers/video/fbdev/omap/Makefile
@@ -17,16 +17,10 @@ objs-y$(CONFIG_FB_OMAP_LCDC_EXTERNAL) += sossi.o
objs-y$(CONFIG_FB_OMAP_LCDC_HWA742) += hwa742.o
lcds-y$(CONFIG_MACH_AMS_DELTA) += lcd_ams_delta.o
-lcds-y$(CONFIG_FB_OMAP_LCD_H3) += lcd_h3.o
lcds-y$(CONFIG_MACH_OMAP_PALMTE) += lcd_palmte.o
-lcds-y$(CONFIG_MACH_OMAP_PALMTT) += lcd_palmtt.o
-lcds-y$(CONFIG_MACH_OMAP_PALMZ71) += lcd_palmz71.o
-lcds-$(CONFIG_ARCH_OMAP16XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1610.o
-lcds-$(CONFIG_ARCH_OMAP15XX)$(CONFIG_MACH_OMAP_INNOVATOR) += lcd_inn1510.o
lcds-y$(CONFIG_MACH_OMAP_OSK) += lcd_osk.o
lcds-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o
-lcds-y$(CONFIG_MACH_HERALD) += lcd_htcherald.o
omapfb-objs := $(objs-yy)
diff --git a/drivers/video/fbdev/omap/lcd_h3.c b/drivers/video/fbdev/omap/lcd_h3.c
deleted file mode 100644
index 1766dff767bb..000000000000
--- a/drivers/video/fbdev/omap/lcd_h3.c
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * LCD panel support for the TI OMAP H3 board
- *
- * Copyright (C) 2004 Nokia Corporation
- * Author: Imre Deak <imre.deak@nokia.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/tps65010.h>
-#include <linux/gpio.h>
-
-#include "omapfb.h"
-
-#define MODULE_NAME "omapfb-lcd_h3"
-
-static int h3_panel_enable(struct lcd_panel *panel)
-{
- int r = 0;
-
- /* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
- r = tps65010_set_gpio_out_value(GPIO1, HIGH);
- if (!r)
- r = tps65010_set_gpio_out_value(GPIO2, HIGH);
- if (r)
- pr_err(MODULE_NAME ": Unable to turn on LCD panel\n");
-
- return r;
-}
-
-static void h3_panel_disable(struct lcd_panel *panel)
-{
- int r = 0;
-
- /* GPIO1 and GPIO2 of TPS65010 send LCD_ENBKL and LCD_ENVDD signals */
- r = tps65010_set_gpio_out_value(GPIO1, LOW);
- if (!r)
- tps65010_set_gpio_out_value(GPIO2, LOW);
- if (r)
- pr_err(MODULE_NAME ": Unable to turn off LCD panel\n");
-}
-
-static struct lcd_panel h3_panel = {
- .name = "h3",
- .config = OMAP_LCDC_PANEL_TFT,
-
- .data_lines = 16,
- .bpp = 16,
- .x_res = 240,
- .y_res = 320,
- .pixel_clock = 12000,
- .hsw = 12,
- .hfp = 14,
- .hbp = 72 - 12,
- .vsw = 1,
- .vfp = 1,
- .vbp = 0,
- .pcd = 0,
-
- .enable = h3_panel_enable,
- .disable = h3_panel_disable,
-};
-
-static int h3_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&h3_panel);
- return 0;
-}
-
-static struct platform_driver h3_panel_driver = {
- .probe = h3_panel_probe,
- .driver = {
- .name = "lcd_h3",
- },
-};
-
-module_platform_driver(h3_panel_driver);
-
-MODULE_AUTHOR("Imre Deak");
-MODULE_DESCRIPTION("LCD panel support for the TI OMAP H3 board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/omap/lcd_htcherald.c b/drivers/video/fbdev/omap/lcd_htcherald.c
deleted file mode 100644
index d1c615c516dd..000000000000
--- a/drivers/video/fbdev/omap/lcd_htcherald.c
+++ /dev/null
@@ -1,59 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * File: drivers/video/omap/lcd-htcherald.c
- *
- * LCD panel support for the HTC Herald
- *
- * Copyright (C) 2009 Cory Maccarrone <darkstar6262@gmail.com>
- * Copyright (C) 2009 Wing Linux
- *
- * Based on the lcd_htcwizard.c file from the linwizard project:
- * Copyright (C) linwizard.sourceforge.net
- * Author: Angelo Arrifano <miknix@gmail.com>
- * Based on lcd_h4 by Imre Deak <imre.deak@nokia.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "omapfb.h"
-
-/* Found on WIZ200 (miknix) and some HERA110 models (darkstar62) */
-static struct lcd_panel htcherald_panel_1 = {
- .name = "lcd_herald",
- .config = OMAP_LCDC_PANEL_TFT |
- OMAP_LCDC_INV_HSYNC |
- OMAP_LCDC_INV_VSYNC |
- OMAP_LCDC_INV_PIX_CLOCK,
- .bpp = 16,
- .data_lines = 16,
- .x_res = 240,
- .y_res = 320,
- .pixel_clock = 6093,
- .pcd = 0, /* 15 */
- .hsw = 10,
- .hfp = 10,
- .hbp = 20,
- .vsw = 3,
- .vfp = 2,
- .vbp = 2,
-};
-
-static int htcherald_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&htcherald_panel_1);
- return 0;
-}
-
-static struct platform_driver htcherald_panel_driver = {
- .probe = htcherald_panel_probe,
- .driver = {
- .name = "lcd_htcherald",
- },
-};
-
-module_platform_driver(htcherald_panel_driver);
-
-MODULE_AUTHOR("Cory Maccarrone");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("LCD panel support for the HTC Herald");
diff --git a/drivers/video/fbdev/omap/lcd_inn1510.c b/drivers/video/fbdev/omap/lcd_inn1510.c
deleted file mode 100644
index bb915637e9b6..000000000000
--- a/drivers/video/fbdev/omap/lcd_inn1510.c
+++ /dev/null
@@ -1,69 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * LCD panel support for the TI OMAP1510 Innovator board
- *
- * Copyright (C) 2004 Nokia Corporation
- * Author: Imre Deak <imre.deak@nokia.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <linux/soc/ti/omap1-soc.h>
-
-#include "omapfb.h"
-
-static void __iomem *omap1510_fpga_lcd_panel_control;
-
-static int innovator1510_panel_enable(struct lcd_panel *panel)
-{
- __raw_writeb(0x7, omap1510_fpga_lcd_panel_control);
- return 0;
-}
-
-static void innovator1510_panel_disable(struct lcd_panel *panel)
-{
- __raw_writeb(0x0, omap1510_fpga_lcd_panel_control);
-}
-
-static struct lcd_panel innovator1510_panel = {
- .name = "inn1510",
- .config = OMAP_LCDC_PANEL_TFT,
-
- .bpp = 16,
- .data_lines = 16,
- .x_res = 240,
- .y_res = 320,
- .pixel_clock = 12500,
- .hsw = 40,
- .hfp = 40,
- .hbp = 72,
- .vsw = 1,
- .vfp = 1,
- .vbp = 0,
- .pcd = 12,
-
- .enable = innovator1510_panel_enable,
- .disable = innovator1510_panel_disable,
-};
-
-static int innovator1510_panel_probe(struct platform_device *pdev)
-{
- omap1510_fpga_lcd_panel_control = (void __iomem *)pdev->dev.platform_data;
- omapfb_register_panel(&innovator1510_panel);
- return 0;
-}
-
-static struct platform_driver innovator1510_panel_driver = {
- .probe = innovator1510_panel_probe,
- .driver = {
- .name = "lcd_inn1510",
- },
-};
-
-module_platform_driver(innovator1510_panel_driver);
-
-MODULE_AUTHOR("Imre Deak");
-MODULE_DESCRIPTION("LCD panel support for the TI OMAP1510 Innovator board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/omap/lcd_inn1610.c b/drivers/video/fbdev/omap/lcd_inn1610.c
deleted file mode 100644
index 901b28f35fab..000000000000
--- a/drivers/video/fbdev/omap/lcd_inn1610.c
+++ /dev/null
@@ -1,99 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * LCD panel support for the TI OMAP1610 Innovator board
- *
- * Copyright (C) 2004 Nokia Corporation
- * Author: Imre Deak <imre.deak@nokia.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <linux/gpio.h>
-#include "omapfb.h"
-
-#define MODULE_NAME "omapfb-lcd_h3"
-
-static int innovator1610_panel_init(struct lcd_panel *panel,
- struct omapfb_device *fbdev)
-{
- int r = 0;
-
- /* configure GPIO(14, 15) as outputs */
- if (gpio_request_one(14, GPIOF_OUT_INIT_LOW, "lcd_en0")) {
- pr_err(MODULE_NAME ": can't request GPIO 14\n");
- r = -1;
- goto exit;
- }
- if (gpio_request_one(15, GPIOF_OUT_INIT_LOW, "lcd_en1")) {
- pr_err(MODULE_NAME ": can't request GPIO 15\n");
- gpio_free(14);
- r = -1;
- goto exit;
- }
-exit:
- return r;
-}
-
-static void innovator1610_panel_cleanup(struct lcd_panel *panel)
-{
- gpio_free(15);
- gpio_free(14);
-}
-
-static int innovator1610_panel_enable(struct lcd_panel *panel)
-{
- /* set GPIO14 and GPIO15 high */
- gpio_set_value(14, 1);
- gpio_set_value(15, 1);
- return 0;
-}
-
-static void innovator1610_panel_disable(struct lcd_panel *panel)
-{
- /* set GPIO13, GPIO14 and GPIO15 low */
- gpio_set_value(14, 0);
- gpio_set_value(15, 0);
-}
-
-static struct lcd_panel innovator1610_panel = {
- .name = "inn1610",
- .config = OMAP_LCDC_PANEL_TFT,
-
- .bpp = 16,
- .data_lines = 16,
- .x_res = 320,
- .y_res = 240,
- .pixel_clock = 12500,
- .hsw = 40,
- .hfp = 40,
- .hbp = 72,
- .vsw = 1,
- .vfp = 1,
- .vbp = 0,
- .pcd = 12,
-
- .init = innovator1610_panel_init,
- .cleanup = innovator1610_panel_cleanup,
- .enable = innovator1610_panel_enable,
- .disable = innovator1610_panel_disable,
-};
-
-static int innovator1610_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&innovator1610_panel);
- return 0;
-}
-
-static struct platform_driver innovator1610_panel_driver = {
- .probe = innovator1610_panel_probe,
- .driver = {
- .name = "lcd_inn1610",
- },
-};
-
-module_platform_driver(innovator1610_panel_driver);
-
-MODULE_AUTHOR("Imre Deak");
-MODULE_DESCRIPTION("LCD panel support for the TI OMAP1610 Innovator board");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/omap/lcd_palmtt.c b/drivers/video/fbdev/omap/lcd_palmtt.c
deleted file mode 100644
index 703af0bc5c92..000000000000
--- a/drivers/video/fbdev/omap/lcd_palmtt.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * LCD panel support for Palm Tungsten|T
- * Current version : Marek Vasut <marek.vasut@gmail.com>
- *
- * Modified from lcd_inn1510.c
- */
-
-/*
-GPIO11 - backlight
-GPIO12 - screen blanking
-GPIO13 - screen blanking
-*/
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-
-#include "omapfb.h"
-
-static unsigned long palmtt_panel_get_caps(struct lcd_panel *panel)
-{
- return OMAPFB_CAPS_SET_BACKLIGHT;
-}
-
-static struct lcd_panel palmtt_panel = {
- .name = "palmtt",
- .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
- OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
- OMAP_LCDC_HSVS_OPPOSITE,
- .bpp = 16,
- .data_lines = 16,
- .x_res = 320,
- .y_res = 320,
- .pixel_clock = 10000,
- .hsw = 4,
- .hfp = 8,
- .hbp = 28,
- .vsw = 1,
- .vfp = 8,
- .vbp = 7,
- .pcd = 0,
-
- .get_caps = palmtt_panel_get_caps,
-};
-
-static int palmtt_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&palmtt_panel);
- return 0;
-}
-
-static struct platform_driver palmtt_panel_driver = {
- .probe = palmtt_panel_probe,
- .driver = {
- .name = "lcd_palmtt",
- },
-};
-
-module_platform_driver(palmtt_panel_driver);
-
-MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
-MODULE_DESCRIPTION("LCD panel support for Palm Tungsten|T");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/omap/lcd_palmz71.c b/drivers/video/fbdev/omap/lcd_palmz71.c
deleted file mode 100644
index a955c908ab14..000000000000
--- a/drivers/video/fbdev/omap/lcd_palmz71.c
+++ /dev/null
@@ -1,59 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * LCD panel support for the Palm Zire71
- *
- * Original version : Romain Goyet
- * Current version : Laurent Gonzalez
- * Modified for zire71 : Marek Vasut
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "omapfb.h"
-
-static unsigned long palmz71_panel_get_caps(struct lcd_panel *panel)
-{
- return OMAPFB_CAPS_SET_BACKLIGHT;
-}
-
-static struct lcd_panel palmz71_panel = {
- .name = "palmz71",
- .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
- OMAP_LCDC_INV_HSYNC | OMAP_LCDC_HSVS_RISING_EDGE |
- OMAP_LCDC_HSVS_OPPOSITE,
- .data_lines = 16,
- .bpp = 16,
- .pixel_clock = 24000,
- .x_res = 320,
- .y_res = 320,
- .hsw = 4,
- .hfp = 8,
- .hbp = 28,
- .vsw = 1,
- .vfp = 8,
- .vbp = 7,
- .pcd = 0,
-
- .get_caps = palmz71_panel_get_caps,
-};
-
-static int palmz71_panel_probe(struct platform_device *pdev)
-{
- omapfb_register_panel(&palmz71_panel);
- return 0;
-}
-
-static struct platform_driver palmz71_panel_driver = {
- .probe = palmz71_panel_probe,
- .driver = {
- .name = "lcd_palmz71",
- },
-};
-
-module_platform_driver(palmz71_panel_driver);
-
-MODULE_AUTHOR("Romain Goyet, Laurent Gonzalez, Marek Vasut");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("LCD panel support for the Palm Zire71");
diff --git a/drivers/video/fbdev/omap/lcdc.c b/drivers/video/fbdev/omap/lcdc.c
index e7ce783e5215..abb8b11464e8 100644
--- a/drivers/video/fbdev/omap/lcdc.c
+++ b/drivers/video/fbdev/omap/lcdc.c
@@ -706,8 +706,6 @@ static int omap_lcdc_init(struct omapfb_device *fbdev, int ext_mode,
if (machine_is_ams_delta())
rate /= 4;
- if (machine_is_omap_h3())
- rate /= 3;
r = clk_set_rate(lcdc.lcd_ck, rate);
if (r) {
dev_err(fbdev->dev, "failed to adjust LCD rate\n");
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index 4fc4b26a8d30..ba94a0a7bd4f 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -331,13 +331,7 @@ static int dsicm_bl_update_status(struct backlight_device *dev)
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
struct omap_dss_device *in = ddata->in;
int r;
- int level;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
+ int level = backlight_get_brightness(dev);
dev_dbg(&ddata->pdev->dev, "update brightness to %d\n", level);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
index bc5a44c2a144..ae937854403b 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
@@ -10,6 +10,7 @@
#define DSS_SUBSYS_NAME "DISPLAY"
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
@@ -36,7 +37,7 @@ static ssize_t display_enabled_store(struct omap_dss_device *dssdev,
int r;
bool enable;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
@@ -73,7 +74,7 @@ static ssize_t display_tear_store(struct omap_dss_device *dssdev,
if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
return -ENOENT;
- r = strtobool(buf, &te);
+ r = kstrtobool(buf, &te);
if (r)
return r;
@@ -183,7 +184,7 @@ static ssize_t display_mirror_store(struct omap_dss_device *dssdev,
if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
return -ENOENT;
- r = strtobool(buf, &mirror);
+ r = kstrtobool(buf, &mirror);
if (r)
return r;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index ba21c4a2633d..1b644be5fe2e 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -10,6 +10,7 @@
#define DSS_SUBSYS_NAME "MANAGER"
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -246,7 +247,7 @@ static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
bool enable;
int r;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
@@ -290,7 +291,7 @@ static ssize_t manager_alpha_blending_enabled_store(
if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
return -ENODEV;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
@@ -329,7 +330,7 @@ static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
if (!dss_has_feature(FEAT_CPR))
return -ENODEV;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
index 601c0beb6de9..1da4fb1c77b4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
@@ -13,6 +13,7 @@
#include <linux/err.h>
#include <linux/sysfs.h>
#include <linux/kobject.h>
+#include <linux/kstrtox.h>
#include <linux/platform_device.h>
#include <video/omapfb_dss.h>
@@ -210,7 +211,7 @@ static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
int r;
bool enable;
- r = strtobool(buf, &enable);
+ r = kstrtobool(buf, &enable);
if (r)
return r;
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index 06dc41aa0354..831b2c2fbdf9 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
@@ -15,6 +15,7 @@
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
+#include <linux/kstrtox.h>
#include <linux/mm.h>
#include <linux/omapfb.h>
@@ -96,7 +97,7 @@ static ssize_t store_mirror(struct device *dev,
int r;
struct fb_var_screeninfo new_var;
- r = strtobool(buf, &mirror);
+ r = kstrtobool(buf, &mirror);
if (r)
return r;
diff --git a/drivers/video/fbdev/riva/fbdev.c b/drivers/video/fbdev/riva/fbdev.c
index 644278146d3b..41edc6e79460 100644
--- a/drivers/video/fbdev/riva/fbdev.c
+++ b/drivers/video/fbdev/riva/fbdev.c
@@ -293,13 +293,7 @@ static int riva_bl_update_status(struct backlight_device *bd)
{
struct riva_par *par = bl_get_data(bd);
U032 tmp_pcrt, tmp_pmc;
- int level;
-
- if (bd->props.power != FB_BLANK_UNBLANK ||
- bd->props.fb_blank != FB_BLANK_UNBLANK)
- level = 0;
- else
- level = bd->props.brightness;
+ int level = backlight_get_brightness(bd);
tmp_pmc = NV_RD32(par->riva.PMC, 0x10F0) & 0x0000FFFF;
tmp_pcrt = NV_RD32(par->riva.PCRTC0, 0x081C) & 0xFFFFFFFC;
diff --git a/drivers/video/fbdev/s3c2410fb-regs-lcd.h b/drivers/video/fbdev/s3c2410fb-regs-lcd.h
deleted file mode 100644
index 1e46f7a788e5..000000000000
--- a/drivers/video/fbdev/s3c2410fb-regs-lcd.h
+++ /dev/null
@@ -1,143 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2003 Simtec Electronics <linux@simtec.co.uk>
- * http://www.simtec.co.uk/products/SWLINUX/
- */
-
-#ifndef ___ASM_ARCH_REGS_LCD_H
-#define ___ASM_ARCH_REGS_LCD_H
-
-/*
- * a couple of values are used as platform data in
- * include/linux/platform_data/fb-s3c2410.h and not
- * duplicated here.
- */
-#include <linux/platform_data/fb-s3c2410.h>
-
-#define S3C2410_LCDREG(x) (x)
-
-/* LCD control registers */
-#define S3C2410_LCDCON1 S3C2410_LCDREG(0x00)
-#define S3C2410_LCDCON2 S3C2410_LCDREG(0x04)
-#define S3C2410_LCDCON3 S3C2410_LCDREG(0x08)
-#define S3C2410_LCDCON4 S3C2410_LCDREG(0x0C)
-#define S3C2410_LCDCON5 S3C2410_LCDREG(0x10)
-
-#define S3C2410_LCDCON1_CLKVAL(x) ((x) << 8)
-#define S3C2410_LCDCON1_MMODE (1<<7)
-#define S3C2410_LCDCON1_DSCAN4 (0<<5)
-#define S3C2410_LCDCON1_STN4 (1<<5)
-#define S3C2410_LCDCON1_STN8 (2<<5)
-#define S3C2410_LCDCON1_TFT (3<<5)
-
-#define S3C2410_LCDCON1_STN1BPP (0<<1)
-#define S3C2410_LCDCON1_STN2GREY (1<<1)
-#define S3C2410_LCDCON1_STN4GREY (2<<1)
-#define S3C2410_LCDCON1_STN8BPP (3<<1)
-#define S3C2410_LCDCON1_STN12BPP (4<<1)
-
-#define S3C2410_LCDCON1_ENVID (1)
-
-#define S3C2410_LCDCON1_MODEMASK 0x1E
-
-#define S3C2410_LCDCON2_VBPD(x) ((x) << 24)
-#define S3C2410_LCDCON2_LINEVAL(x) ((x) << 14)
-#define S3C2410_LCDCON2_VFPD(x) ((x) << 6)
-#define S3C2410_LCDCON2_VSPW(x) ((x) << 0)
-
-#define S3C2410_LCDCON2_GET_VBPD(x) ( ((x) >> 24) & 0xFF)
-#define S3C2410_LCDCON2_GET_VFPD(x) ( ((x) >> 6) & 0xFF)
-#define S3C2410_LCDCON2_GET_VSPW(x) ( ((x) >> 0) & 0x3F)
-
-#define S3C2410_LCDCON3_HBPD(x) ((x) << 19)
-#define S3C2410_LCDCON3_WDLY(x) ((x) << 19)
-#define S3C2410_LCDCON3_HOZVAL(x) ((x) << 8)
-#define S3C2410_LCDCON3_HFPD(x) ((x) << 0)
-#define S3C2410_LCDCON3_LINEBLANK(x)((x) << 0)
-
-#define S3C2410_LCDCON3_GET_HBPD(x) ( ((x) >> 19) & 0x7F)
-#define S3C2410_LCDCON3_GET_HFPD(x) ( ((x) >> 0) & 0xFF)
-
-/* LDCCON4 changes for STN mode on the S3C2412 */
-
-#define S3C2410_LCDCON4_MVAL(x) ((x) << 8)
-#define S3C2410_LCDCON4_HSPW(x) ((x) << 0)
-#define S3C2410_LCDCON4_WLH(x) ((x) << 0)
-
-#define S3C2410_LCDCON4_GET_HSPW(x) ( ((x) >> 0) & 0xFF)
-
-/* framebuffer start addressed */
-#define S3C2410_LCDSADDR1 S3C2410_LCDREG(0x14)
-#define S3C2410_LCDSADDR2 S3C2410_LCDREG(0x18)
-#define S3C2410_LCDSADDR3 S3C2410_LCDREG(0x1C)
-
-#define S3C2410_LCDBANK(x) ((x) << 21)
-#define S3C2410_LCDBASEU(x) (x)
-
-#define S3C2410_OFFSIZE(x) ((x) << 11)
-#define S3C2410_PAGEWIDTH(x) (x)
-
-/* colour lookup and miscellaneous controls */
-
-#define S3C2410_REDLUT S3C2410_LCDREG(0x20)
-#define S3C2410_GREENLUT S3C2410_LCDREG(0x24)
-#define S3C2410_BLUELUT S3C2410_LCDREG(0x28)
-
-#define S3C2410_DITHMODE S3C2410_LCDREG(0x4C)
-#define S3C2410_TPAL S3C2410_LCDREG(0x50)
-
-#define S3C2410_TPAL_EN (1<<24)
-
-/* interrupt info */
-#define S3C2410_LCDINTPND S3C2410_LCDREG(0x54)
-#define S3C2410_LCDSRCPND S3C2410_LCDREG(0x58)
-#define S3C2410_LCDINTMSK S3C2410_LCDREG(0x5C)
-#define S3C2410_LCDINT_FIWSEL (1<<2)
-#define S3C2410_LCDINT_FRSYNC (1<<1)
-#define S3C2410_LCDINT_FICNT (1<<0)
-
-/* s3c2442 extra stn registers */
-
-#define S3C2442_REDLUT S3C2410_LCDREG(0x20)
-#define S3C2442_GREENLUT S3C2410_LCDREG(0x24)
-#define S3C2442_BLUELUT S3C2410_LCDREG(0x28)
-#define S3C2442_DITHMODE S3C2410_LCDREG(0x20)
-
-#define S3C2410_LPCSEL S3C2410_LCDREG(0x60)
-
-#define S3C2410_TFTPAL(x) S3C2410_LCDREG((0x400 + (x)*4))
-
-/* S3C2412 registers */
-
-#define S3C2412_TPAL S3C2410_LCDREG(0x20)
-
-#define S3C2412_LCDINTPND S3C2410_LCDREG(0x24)
-#define S3C2412_LCDSRCPND S3C2410_LCDREG(0x28)
-#define S3C2412_LCDINTMSK S3C2410_LCDREG(0x2C)
-
-#define S3C2412_TCONSEL S3C2410_LCDREG(0x30)
-
-#define S3C2412_LCDCON6 S3C2410_LCDREG(0x34)
-#define S3C2412_LCDCON7 S3C2410_LCDREG(0x38)
-#define S3C2412_LCDCON8 S3C2410_LCDREG(0x3C)
-#define S3C2412_LCDCON9 S3C2410_LCDREG(0x40)
-
-#define S3C2412_REDLUT(x) S3C2410_LCDREG(0x44 + ((x)*4))
-#define S3C2412_GREENLUT(x) S3C2410_LCDREG(0x60 + ((x)*4))
-#define S3C2412_BLUELUT(x) S3C2410_LCDREG(0x98 + ((x)*4))
-
-#define S3C2412_FRCPAT(x) S3C2410_LCDREG(0xB4 + ((x)*4))
-
-/* general registers */
-
-/* base of the LCD registers, where INTPND, INTSRC and then INTMSK
- * are available. */
-
-#define S3C2410_LCDINTBASE S3C2410_LCDREG(0x54)
-#define S3C2412_LCDINTBASE S3C2410_LCDREG(0x24)
-
-#define S3C24XX_LCDINTPND (0x00)
-#define S3C24XX_LCDSRCPND (0x04)
-#define S3C24XX_LCDINTMSK (0x08)
-
-#endif /* ___ASM_ARCH_REGS_LCD_H */
diff --git a/drivers/video/fbdev/s3c2410fb.c b/drivers/video/fbdev/s3c2410fb.c
deleted file mode 100644
index d8ae5258de46..000000000000
--- a/drivers/video/fbdev/s3c2410fb.c
+++ /dev/null
@@ -1,1142 +0,0 @@
-/* linux/drivers/video/s3c2410fb.c
- * Copyright (c) 2004,2005 Arnaud Patard
- * Copyright (c) 2004-2008 Ben Dooks
- *
- * S3C2410 LCD Framebuffer Driver
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
- * Driver based on skeletonfb.c, sa1100fb.c and others.
-*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
-#include <linux/io.h>
-#include <linux/platform_data/fb-s3c2410.h>
-
-#include <asm/div64.h>
-
-#include <asm/mach/map.h>
-
-#ifdef CONFIG_PM
-#include <linux/pm.h>
-#endif
-
-#include "s3c2410fb.h"
-#include "s3c2410fb-regs-lcd.h"
-
-/* Debugging stuff */
-static int debug = IS_BUILTIN(CONFIG_FB_S3C2410_DEBUG);
-
-#define dprintk(msg...) \
-do { \
- if (debug) \
- pr_debug(msg); \
-} while (0)
-
-/* useful functions */
-
-static int is_s3c2412(struct s3c2410fb_info *fbi)
-{
- return (fbi->drv_type == DRV_S3C2412);
-}
-
-/* s3c2410fb_set_lcdaddr
- *
- * initialise lcd controller address pointers
- */
-static void s3c2410fb_set_lcdaddr(struct fb_info *info)
-{
- unsigned long saddr1, saddr2, saddr3;
- struct s3c2410fb_info *fbi = info->par;
- void __iomem *regs = fbi->io;
-
- saddr1 = info->fix.smem_start >> 1;
- saddr2 = info->fix.smem_start;
- saddr2 += info->fix.line_length * info->var.yres;
- saddr2 >>= 1;
-
- saddr3 = S3C2410_OFFSIZE(0) |
- S3C2410_PAGEWIDTH((info->fix.line_length / 2) & 0x3ff);
-
- dprintk("LCDSADDR1 = 0x%08lx\n", saddr1);
- dprintk("LCDSADDR2 = 0x%08lx\n", saddr2);
- dprintk("LCDSADDR3 = 0x%08lx\n", saddr3);
-
- writel(saddr1, regs + S3C2410_LCDSADDR1);
- writel(saddr2, regs + S3C2410_LCDSADDR2);
- writel(saddr3, regs + S3C2410_LCDSADDR3);
-}
-
-/* s3c2410fb_calc_pixclk()
- *
- * calculate divisor for clk->pixclk
- */
-static unsigned int s3c2410fb_calc_pixclk(struct s3c2410fb_info *fbi,
- unsigned long pixclk)
-{
- unsigned long clk = fbi->clk_rate;
- unsigned long long div;
-
- /* pixclk is in picoseconds, our clock is in Hz
- *
- * Hz -> picoseconds is / 10^-12
- */
-
- div = (unsigned long long)clk * pixclk;
- div >>= 12; /* div / 2^12 */
- do_div(div, 625 * 625UL * 625); /* div / 5^12 */
-
- dprintk("pixclk %ld, divisor is %ld\n", pixclk, (long)div);
- return div;
-}
-
-/*
- * s3c2410fb_check_var():
- * Get the video params out of 'var'. If a value doesn't fit, round it up,
- * if it's too big, return -EINVAL.
- *
- */
-static int s3c2410fb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- struct s3c2410fb_info *fbi = info->par;
- struct s3c2410fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
- struct s3c2410fb_display *display = NULL;
- struct s3c2410fb_display *default_display = mach_info->displays +
- mach_info->default_display;
- int type = default_display->type;
- unsigned i;
-
- dprintk("check_var(var=%p, info=%p)\n", var, info);
-
- /* validate x/y resolution */
- /* choose default mode if possible */
- if (var->yres == default_display->yres &&
- var->xres == default_display->xres &&
- var->bits_per_pixel == default_display->bpp)
- display = default_display;
- else
- for (i = 0; i < mach_info->num_displays; i++)
- if (type == mach_info->displays[i].type &&
- var->yres == mach_info->displays[i].yres &&
- var->xres == mach_info->displays[i].xres &&
- var->bits_per_pixel == mach_info->displays[i].bpp) {
- display = mach_info->displays + i;
- break;
- }
-
- if (!display) {
- dprintk("wrong resolution or depth %dx%d at %d bpp\n",
- var->xres, var->yres, var->bits_per_pixel);
- return -EINVAL;
- }
-
- /* it is always the size as the display */
- var->xres_virtual = display->xres;
- var->yres_virtual = display->yres;
- var->height = display->height;
- var->width = display->width;
-
- /* copy lcd settings */
- var->pixclock = display->pixclock;
- var->left_margin = display->left_margin;
- var->right_margin = display->right_margin;
- var->upper_margin = display->upper_margin;
- var->lower_margin = display->lower_margin;
- var->vsync_len = display->vsync_len;
- var->hsync_len = display->hsync_len;
-
- fbi->regs.lcdcon5 = display->lcdcon5;
- /* set display type */
- fbi->regs.lcdcon1 = display->type;
-
- var->transp.offset = 0;
- var->transp.length = 0;
- /* set r/g/b positions */
- switch (var->bits_per_pixel) {
- case 1:
- case 2:
- case 4:
- var->red.offset = 0;
- var->red.length = var->bits_per_pixel;
- var->green = var->red;
- var->blue = var->red;
- break;
- case 8:
- if (display->type != S3C2410_LCDCON1_TFT) {
- /* 8 bpp 332 */
- var->red.length = 3;
- var->red.offset = 5;
- var->green.length = 3;
- var->green.offset = 2;
- var->blue.length = 2;
- var->blue.offset = 0;
- } else {
- var->red.offset = 0;
- var->red.length = 8;
- var->green = var->red;
- var->blue = var->red;
- }
- break;
- case 12:
- /* 12 bpp 444 */
- var->red.length = 4;
- var->red.offset = 8;
- var->green.length = 4;
- var->green.offset = 4;
- var->blue.length = 4;
- var->blue.offset = 0;
- break;
-
- default:
- case 16:
- if (display->lcdcon5 & S3C2410_LCDCON5_FRM565) {
- /* 16 bpp, 565 format */
- var->red.offset = 11;
- var->green.offset = 5;
- var->blue.offset = 0;
- var->red.length = 5;
- var->green.length = 6;
- var->blue.length = 5;
- } else {
- /* 16 bpp, 5551 format */
- var->red.offset = 11;
- var->green.offset = 6;
- var->blue.offset = 1;
- var->red.length = 5;
- var->green.length = 5;
- var->blue.length = 5;
- }
- break;
- case 32:
- /* 24 bpp 888 and 8 dummy */
- var->red.length = 8;
- var->red.offset = 16;
- var->green.length = 8;
- var->green.offset = 8;
- var->blue.length = 8;
- var->blue.offset = 0;
- break;
- }
- return 0;
-}
-
-/* s3c2410fb_calculate_stn_lcd_regs
- *
- * calculate register values from var settings
- */
-static void s3c2410fb_calculate_stn_lcd_regs(const struct fb_info *info,
- struct s3c2410fb_hw *regs)
-{
- const struct s3c2410fb_info *fbi = info->par;
- const struct fb_var_screeninfo *var = &info->var;
- int type = regs->lcdcon1 & ~S3C2410_LCDCON1_TFT;
- int hs = var->xres >> 2;
- unsigned wdly = (var->left_margin >> 4) - 1;
- unsigned wlh = (var->hsync_len >> 4) - 1;
-
- if (type != S3C2410_LCDCON1_STN4)
- hs >>= 1;
-
- switch (var->bits_per_pixel) {
- case 1:
- regs->lcdcon1 |= S3C2410_LCDCON1_STN1BPP;
- break;
- case 2:
- regs->lcdcon1 |= S3C2410_LCDCON1_STN2GREY;
- break;
- case 4:
- regs->lcdcon1 |= S3C2410_LCDCON1_STN4GREY;
- break;
- case 8:
- regs->lcdcon1 |= S3C2410_LCDCON1_STN8BPP;
- hs *= 3;
- break;
- case 12:
- regs->lcdcon1 |= S3C2410_LCDCON1_STN12BPP;
- hs *= 3;
- break;
-
- default:
- /* invalid pixel depth */
- dev_err(fbi->dev, "invalid bpp %d\n",
- var->bits_per_pixel);
- }
- /* update X/Y info */
- dprintk("setting horz: lft=%d, rt=%d, sync=%d\n",
- var->left_margin, var->right_margin, var->hsync_len);
-
- regs->lcdcon2 = S3C2410_LCDCON2_LINEVAL(var->yres - 1);
-
- if (wdly > 3)
- wdly = 3;
-
- if (wlh > 3)
- wlh = 3;
-
- regs->lcdcon3 = S3C2410_LCDCON3_WDLY(wdly) |
- S3C2410_LCDCON3_LINEBLANK(var->right_margin / 8) |
- S3C2410_LCDCON3_HOZVAL(hs - 1);
-
- regs->lcdcon4 = S3C2410_LCDCON4_WLH(wlh);
-}
-
-/* s3c2410fb_calculate_tft_lcd_regs
- *
- * calculate register values from var settings
- */
-static void s3c2410fb_calculate_tft_lcd_regs(const struct fb_info *info,
- struct s3c2410fb_hw *regs)
-{
- const struct s3c2410fb_info *fbi = info->par;
- const struct fb_var_screeninfo *var = &info->var;
-
- switch (var->bits_per_pixel) {
- case 1:
- regs->lcdcon1 |= S3C2410_LCDCON1_TFT1BPP;
- break;
- case 2:
- regs->lcdcon1 |= S3C2410_LCDCON1_TFT2BPP;
- break;
- case 4:
- regs->lcdcon1 |= S3C2410_LCDCON1_TFT4BPP;
- break;
- case 8:
- regs->lcdcon1 |= S3C2410_LCDCON1_TFT8BPP;
- regs->lcdcon5 |= S3C2410_LCDCON5_BSWP |
- S3C2410_LCDCON5_FRM565;
- regs->lcdcon5 &= ~S3C2410_LCDCON5_HWSWP;
- break;
- case 16:
- regs->lcdcon1 |= S3C2410_LCDCON1_TFT16BPP;
- regs->lcdcon5 &= ~S3C2410_LCDCON5_BSWP;
- regs->lcdcon5 |= S3C2410_LCDCON5_HWSWP;
- break;
- case 32:
- regs->lcdcon1 |= S3C2410_LCDCON1_TFT24BPP;
- regs->lcdcon5 &= ~(S3C2410_LCDCON5_BSWP |
- S3C2410_LCDCON5_HWSWP |
- S3C2410_LCDCON5_BPP24BL);
- break;
- default:
- /* invalid pixel depth */
- dev_err(fbi->dev, "invalid bpp %d\n",
- var->bits_per_pixel);
- }
- /* update X/Y info */
- dprintk("setting vert: up=%d, low=%d, sync=%d\n",
- var->upper_margin, var->lower_margin, var->vsync_len);
-
- dprintk("setting horz: lft=%d, rt=%d, sync=%d\n",
- var->left_margin, var->right_margin, var->hsync_len);
-
- regs->lcdcon2 = S3C2410_LCDCON2_LINEVAL(var->yres - 1) |
- S3C2410_LCDCON2_VBPD(var->upper_margin - 1) |
- S3C2410_LCDCON2_VFPD(var->lower_margin - 1) |
- S3C2410_LCDCON2_VSPW(var->vsync_len - 1);
-
- regs->lcdcon3 = S3C2410_LCDCON3_HBPD(var->right_margin - 1) |
- S3C2410_LCDCON3_HFPD(var->left_margin - 1) |
- S3C2410_LCDCON3_HOZVAL(var->xres - 1);
-
- regs->lcdcon4 = S3C2410_LCDCON4_HSPW(var->hsync_len - 1);
-}
-
-/* s3c2410fb_activate_var
- *
- * activate (set) the controller from the given framebuffer
- * information
- */
-static void s3c2410fb_activate_var(struct fb_info *info)
-{
- struct s3c2410fb_info *fbi = info->par;
- void __iomem *regs = fbi->io;
- int type = fbi->regs.lcdcon1 & S3C2410_LCDCON1_TFT;
- struct fb_var_screeninfo *var = &info->var;
- int clkdiv;
-
- clkdiv = DIV_ROUND_UP(s3c2410fb_calc_pixclk(fbi, var->pixclock), 2);
-
- dprintk("%s: var->xres = %d\n", __func__, var->xres);
- dprintk("%s: var->yres = %d\n", __func__, var->yres);
- dprintk("%s: var->bpp = %d\n", __func__, var->bits_per_pixel);
-
- if (type == S3C2410_LCDCON1_TFT) {
- s3c2410fb_calculate_tft_lcd_regs(info, &fbi->regs);
- --clkdiv;
- if (clkdiv < 0)
- clkdiv = 0;
- } else {
- s3c2410fb_calculate_stn_lcd_regs(info, &fbi->regs);
- if (clkdiv < 2)
- clkdiv = 2;
- }
-
- fbi->regs.lcdcon1 |= S3C2410_LCDCON1_CLKVAL(clkdiv);
-
- /* write new registers */
-
- dprintk("new register set:\n");
- dprintk("lcdcon[1] = 0x%08lx\n", fbi->regs.lcdcon1);
- dprintk("lcdcon[2] = 0x%08lx\n", fbi->regs.lcdcon2);
- dprintk("lcdcon[3] = 0x%08lx\n", fbi->regs.lcdcon3);
- dprintk("lcdcon[4] = 0x%08lx\n", fbi->regs.lcdcon4);
- dprintk("lcdcon[5] = 0x%08lx\n", fbi->regs.lcdcon5);
-
- writel(fbi->regs.lcdcon1 & ~S3C2410_LCDCON1_ENVID,
- regs + S3C2410_LCDCON1);
- writel(fbi->regs.lcdcon2, regs + S3C2410_LCDCON2);
- writel(fbi->regs.lcdcon3, regs + S3C2410_LCDCON3);
- writel(fbi->regs.lcdcon4, regs + S3C2410_LCDCON4);
- writel(fbi->regs.lcdcon5, regs + S3C2410_LCDCON5);
-
- /* set lcd address pointers */
- s3c2410fb_set_lcdaddr(info);
-
- fbi->regs.lcdcon1 |= S3C2410_LCDCON1_ENVID,
- writel(fbi->regs.lcdcon1, regs + S3C2410_LCDCON1);
-}
-
-/*
- * s3c2410fb_set_par - Alters the hardware state.
- * @info: frame buffer structure that represents a single frame buffer
- *
- */
-static int s3c2410fb_set_par(struct fb_info *info)
-{
- struct fb_var_screeninfo *var = &info->var;
-
- switch (var->bits_per_pixel) {
- case 32:
- case 16:
- case 12:
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- break;
- case 1:
- info->fix.visual = FB_VISUAL_MONO01;
- break;
- default:
- info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
- break;
- }
-
- info->fix.line_length = (var->xres_virtual * var->bits_per_pixel) / 8;
-
- /* activate this new configuration */
-
- s3c2410fb_activate_var(info);
- return 0;
-}
-
-static void schedule_palette_update(struct s3c2410fb_info *fbi,
- unsigned int regno, unsigned int val)
-{
- unsigned long flags;
- unsigned long irqen;
- void __iomem *irq_base = fbi->irq_base;
-
- local_irq_save(flags);
-
- fbi->palette_buffer[regno] = val;
-
- if (!fbi->palette_ready) {
- fbi->palette_ready = 1;
-
- /* enable IRQ */
- irqen = readl(irq_base + S3C24XX_LCDINTMSK);
- irqen &= ~S3C2410_LCDINT_FRSYNC;
- writel(irqen, irq_base + S3C24XX_LCDINTMSK);
- }
-
- local_irq_restore(flags);
-}
-
-/* from pxafb.c */
-static inline unsigned int chan_to_field(unsigned int chan,
- struct fb_bitfield *bf)
-{
- chan &= 0xffff;
- chan >>= 16 - bf->length;
- return chan << bf->offset;
-}
-
-static int s3c2410fb_setcolreg(unsigned regno,
- unsigned red, unsigned green, unsigned blue,
- unsigned transp, struct fb_info *info)
-{
- struct s3c2410fb_info *fbi = info->par;
- void __iomem *regs = fbi->io;
- unsigned int val;
-
- /* dprintk("setcol: regno=%d, rgb=%d,%d,%d\n",
- regno, red, green, blue); */
-
- switch (info->fix.visual) {
- case FB_VISUAL_TRUECOLOR:
- /* true-colour, use pseudo-palette */
-
- if (regno < 16) {
- u32 *pal = info->pseudo_palette;
-
- val = chan_to_field(red, &info->var.red);
- val |= chan_to_field(green, &info->var.green);
- val |= chan_to_field(blue, &info->var.blue);
-
- pal[regno] = val;
- }
- break;
-
- case FB_VISUAL_PSEUDOCOLOR:
- if (regno < 256) {
- /* currently assume RGB 5-6-5 mode */
-
- val = (red >> 0) & 0xf800;
- val |= (green >> 5) & 0x07e0;
- val |= (blue >> 11) & 0x001f;
-
- writel(val, regs + S3C2410_TFTPAL(regno));
- schedule_palette_update(fbi, regno, val);
- }
-
- break;
-
- default:
- return 1; /* unknown type */
- }
-
- return 0;
-}
-
-/* s3c2410fb_lcd_enable
- *
- * shutdown the lcd controller
- */
-static void s3c2410fb_lcd_enable(struct s3c2410fb_info *fbi, int enable)
-{
- unsigned long flags;
-
- local_irq_save(flags);
-
- if (enable)
- fbi->regs.lcdcon1 |= S3C2410_LCDCON1_ENVID;
- else
- fbi->regs.lcdcon1 &= ~S3C2410_LCDCON1_ENVID;
-
- writel(fbi->regs.lcdcon1, fbi->io + S3C2410_LCDCON1);
-
- local_irq_restore(flags);
-}
-
-
-/*
- * s3c2410fb_blank
- * @blank_mode: the blank mode we want.
- * @info: frame buffer structure that represents a single frame buffer
- *
- * Blank the screen if blank_mode != 0, else unblank. Return 0 if
- * blanking succeeded, != 0 if un-/blanking failed due to e.g. a
- * video mode which doesn't support it. Implements VESA suspend
- * and powerdown modes on hardware that supports disabling hsync/vsync:
- *
- * Returns negative errno on error, or zero on success.
- *
- */
-static int s3c2410fb_blank(int blank_mode, struct fb_info *info)
-{
- struct s3c2410fb_info *fbi = info->par;
- void __iomem *tpal_reg = fbi->io;
-
- dprintk("blank(mode=%d, info=%p)\n", blank_mode, info);
-
- tpal_reg += is_s3c2412(fbi) ? S3C2412_TPAL : S3C2410_TPAL;
-
- if (blank_mode == FB_BLANK_POWERDOWN)
- s3c2410fb_lcd_enable(fbi, 0);
- else
- s3c2410fb_lcd_enable(fbi, 1);
-
- if (blank_mode == FB_BLANK_UNBLANK)
- writel(0x0, tpal_reg);
- else {
- dprintk("setting TPAL to output 0x000000\n");
- writel(S3C2410_TPAL_EN, tpal_reg);
- }
-
- return 0;
-}
-
-static int s3c2410fb_debug_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%s\n", debug ? "on" : "off");
-}
-
-static int s3c2410fb_debug_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- if (len < 1)
- return -EINVAL;
-
- if (strncasecmp(buf, "on", 2) == 0 ||
- strncasecmp(buf, "1", 1) == 0) {
- debug = 1;
- dev_dbg(dev, "s3c2410fb: Debug On");
- } else if (strncasecmp(buf, "off", 3) == 0 ||
- strncasecmp(buf, "0", 1) == 0) {
- debug = 0;
- dev_dbg(dev, "s3c2410fb: Debug Off");
- } else {
- return -EINVAL;
- }
-
- return len;
-}
-
-static DEVICE_ATTR(debug, 0664, s3c2410fb_debug_show, s3c2410fb_debug_store);
-
-static const struct fb_ops s3c2410fb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = s3c2410fb_check_var,
- .fb_set_par = s3c2410fb_set_par,
- .fb_blank = s3c2410fb_blank,
- .fb_setcolreg = s3c2410fb_setcolreg,
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
- .fb_imageblit = cfb_imageblit,
-};
-
-/*
- * s3c2410fb_map_video_memory():
- * Allocates the DRAM memory for the frame buffer. This buffer is
- * remapped into a non-cached, non-buffered, memory region to
- * allow palette and pixel writes to occur without flushing the
- * cache. Once this area is remapped, all virtual memory
- * access to the video memory should occur at the new region.
- */
-static int s3c2410fb_map_video_memory(struct fb_info *info)
-{
- struct s3c2410fb_info *fbi = info->par;
- dma_addr_t map_dma;
- unsigned map_size = PAGE_ALIGN(info->fix.smem_len);
-
- dprintk("map_video_memory(fbi=%p) map_size %u\n", fbi, map_size);
-
- info->screen_base = dma_alloc_wc(fbi->dev, map_size, &map_dma,
- GFP_KERNEL);
-
- if (info->screen_base) {
- /* prevent initial garbage on screen */
- dprintk("map_video_memory: clear %p:%08x\n",
- info->screen_base, map_size);
- memset(info->screen_base, 0x00, map_size);
-
- info->fix.smem_start = map_dma;
-
- dprintk("map_video_memory: dma=%08lx cpu=%p size=%08x\n",
- info->fix.smem_start, info->screen_base, map_size);
- }
-
- return info->screen_base ? 0 : -ENOMEM;
-}
-
-static inline void s3c2410fb_unmap_video_memory(struct fb_info *info)
-{
- struct s3c2410fb_info *fbi = info->par;
-
- dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
- info->screen_base, info->fix.smem_start);
-}
-
-static inline void modify_gpio(void __iomem *reg,
- unsigned long set, unsigned long mask)
-{
- unsigned long tmp;
-
- if (!reg)
- return;
-
- tmp = readl(reg) & ~mask;
- writel(tmp | set, reg);
-}
-
-/*
- * s3c2410fb_init_registers - Initialise all LCD-related registers
- */
-static int s3c2410fb_init_registers(struct fb_info *info)
-{
- struct s3c2410fb_info *fbi = info->par;
- struct s3c2410fb_mach_info *mach_info = dev_get_platdata(fbi->dev);
- unsigned long flags;
- void __iomem *regs = fbi->io;
- void __iomem *tpal;
- void __iomem *lpcsel;
-
- if (is_s3c2412(fbi)) {
- tpal = regs + S3C2412_TPAL;
- lpcsel = regs + S3C2412_TCONSEL;
- } else {
- tpal = regs + S3C2410_TPAL;
- lpcsel = regs + S3C2410_LPCSEL;
- }
-
- /* Initialise LCD with values from haret */
-
- local_irq_save(flags);
-
- /* modify the gpio(s) with interrupts set (bjd) */
-
- modify_gpio(mach_info->gpcup_reg, mach_info->gpcup, mach_info->gpcup_mask);
- modify_gpio(mach_info->gpccon_reg, mach_info->gpccon, mach_info->gpccon_mask);
- modify_gpio(mach_info->gpdup_reg, mach_info->gpdup, mach_info->gpdup_mask);
- modify_gpio(mach_info->gpdcon_reg, mach_info->gpdcon, mach_info->gpdcon_mask);
-
- local_irq_restore(flags);
-
- dprintk("LPCSEL = 0x%08lx\n", mach_info->lpcsel);
- writel(mach_info->lpcsel, lpcsel);
-
- dprintk("replacing TPAL %08x\n", readl(tpal));
-
- /* ensure temporary palette disabled */
- writel(0x00, tpal);
-
- return 0;
-}
-
-static void s3c2410fb_write_palette(struct s3c2410fb_info *fbi)
-{
- unsigned int i;
- void __iomem *regs = fbi->io;
-
- fbi->palette_ready = 0;
-
- for (i = 0; i < 256; i++) {
- unsigned long ent = fbi->palette_buffer[i];
- if (ent == PALETTE_BUFF_CLEAR)
- continue;
-
- writel(ent, regs + S3C2410_TFTPAL(i));
-
- /* it seems the only way to know exactly
- * if the palette wrote ok, is to check
- * to see if the value verifies ok
- */
-
- if (readw(regs + S3C2410_TFTPAL(i)) == ent)
- fbi->palette_buffer[i] = PALETTE_BUFF_CLEAR;
- else
- fbi->palette_ready = 1; /* retry */
- }
-}
-
-static irqreturn_t s3c2410fb_irq(int irq, void *dev_id)
-{
- struct s3c2410fb_info *fbi = dev_id;
- void __iomem *irq_base = fbi->irq_base;
- unsigned long lcdirq = readl(irq_base + S3C24XX_LCDINTPND);
-
- if (lcdirq & S3C2410_LCDINT_FRSYNC) {
- if (fbi->palette_ready)
- s3c2410fb_write_palette(fbi);
-
- writel(S3C2410_LCDINT_FRSYNC, irq_base + S3C24XX_LCDINTPND);
- writel(S3C2410_LCDINT_FRSYNC, irq_base + S3C24XX_LCDSRCPND);
- }
-
- return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
-
-static int s3c2410fb_cpufreq_transition(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct s3c2410fb_info *info;
- struct fb_info *fbinfo;
- long delta_f;
-
- info = container_of(nb, struct s3c2410fb_info, freq_transition);
- fbinfo = dev_get_drvdata(info->dev);
-
- /* work out change, <0 for speed-up */
- delta_f = info->clk_rate - clk_get_rate(info->clk);
-
- if ((val == CPUFREQ_POSTCHANGE && delta_f > 0) ||
- (val == CPUFREQ_PRECHANGE && delta_f < 0)) {
- info->clk_rate = clk_get_rate(info->clk);
- s3c2410fb_activate_var(fbinfo);
- }
-
- return 0;
-}
-
-static inline int s3c2410fb_cpufreq_register(struct s3c2410fb_info *info)
-{
- info->freq_transition.notifier_call = s3c2410fb_cpufreq_transition;
-
- return cpufreq_register_notifier(&info->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
-{
- cpufreq_unregister_notifier(&info->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-#else
-static inline int s3c2410fb_cpufreq_register(struct s3c2410fb_info *info)
-{
- return 0;
-}
-
-static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
-{
-}
-#endif
-
-
-static const char driver_name[] = "s3c2410fb";
-
-static int s3c24xxfb_probe(struct platform_device *pdev,
- enum s3c_drv_type drv_type)
-{
- struct s3c2410fb_info *info;
- struct s3c2410fb_display *display;
- struct fb_info *fbinfo;
- struct s3c2410fb_mach_info *mach_info;
- struct resource *res;
- int ret;
- int irq;
- int i;
- int size;
- u32 lcdcon1;
-
- mach_info = dev_get_platdata(&pdev->dev);
- if (mach_info == NULL) {
- dev_err(&pdev->dev,
- "no platform data for lcd, cannot attach\n");
- return -EINVAL;
- }
-
- if (mach_info->default_display >= mach_info->num_displays) {
- dev_err(&pdev->dev, "default is %d but only %d displays\n",
- mach_info->default_display, mach_info->num_displays);
- return -EINVAL;
- }
-
- display = mach_info->displays + mach_info->default_display;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "no irq for device\n");
- return -ENOENT;
- }
-
- fbinfo = framebuffer_alloc(sizeof(struct s3c2410fb_info), &pdev->dev);
- if (!fbinfo)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, fbinfo);
-
- info = fbinfo->par;
- info->dev = &pdev->dev;
- info->drv_type = drv_type;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get memory registers\n");
- ret = -ENXIO;
- goto dealloc_fb;
- }
-
- size = resource_size(res);
- info->mem = request_mem_region(res->start, size, pdev->name);
- if (info->mem == NULL) {
- dev_err(&pdev->dev, "failed to get memory region\n");
- ret = -ENOENT;
- goto dealloc_fb;
- }
-
- info->io = ioremap(res->start, size);
- if (info->io == NULL) {
- dev_err(&pdev->dev, "ioremap() of registers failed\n");
- ret = -ENXIO;
- goto release_mem;
- }
-
- if (drv_type == DRV_S3C2412)
- info->irq_base = info->io + S3C2412_LCDINTBASE;
- else
- info->irq_base = info->io + S3C2410_LCDINTBASE;
-
- dprintk("devinit\n");
-
- strcpy(fbinfo->fix.id, driver_name);
-
- /* Stop the video */
- lcdcon1 = readl(info->io + S3C2410_LCDCON1);
- writel(lcdcon1 & ~S3C2410_LCDCON1_ENVID, info->io + S3C2410_LCDCON1);
-
- fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
- fbinfo->fix.type_aux = 0;
- fbinfo->fix.xpanstep = 0;
- fbinfo->fix.ypanstep = 0;
- fbinfo->fix.ywrapstep = 0;
- fbinfo->fix.accel = FB_ACCEL_NONE;
-
- fbinfo->var.nonstd = 0;
- fbinfo->var.activate = FB_ACTIVATE_NOW;
- fbinfo->var.accel_flags = 0;
- fbinfo->var.vmode = FB_VMODE_NONINTERLACED;
-
- fbinfo->fbops = &s3c2410fb_ops;
- fbinfo->flags = FBINFO_FLAG_DEFAULT;
- fbinfo->pseudo_palette = &info->pseudo_pal;
-
- for (i = 0; i < 256; i++)
- info->palette_buffer[i] = PALETTE_BUFF_CLEAR;
-
- ret = request_irq(irq, s3c2410fb_irq, 0, pdev->name, info);
- if (ret) {
- dev_err(&pdev->dev, "cannot get irq %d - err %d\n", irq, ret);
- ret = -EBUSY;
- goto release_regs;
- }
-
- info->clk = clk_get(NULL, "lcd");
- if (IS_ERR(info->clk)) {
- dev_err(&pdev->dev, "failed to get lcd clock source\n");
- ret = PTR_ERR(info->clk);
- goto release_irq;
- }
-
- clk_prepare_enable(info->clk);
- dprintk("got and enabled clock\n");
-
- usleep_range(1000, 1100);
-
- info->clk_rate = clk_get_rate(info->clk);
-
- /* find maximum required memory size for display */
- for (i = 0; i < mach_info->num_displays; i++) {
- unsigned long smem_len = mach_info->displays[i].xres;
-
- smem_len *= mach_info->displays[i].yres;
- smem_len *= mach_info->displays[i].bpp;
- smem_len >>= 3;
- if (fbinfo->fix.smem_len < smem_len)
- fbinfo->fix.smem_len = smem_len;
- }
-
- /* Initialize video memory */
- ret = s3c2410fb_map_video_memory(fbinfo);
- if (ret) {
- dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
- ret = -ENOMEM;
- goto release_clock;
- }
-
- dprintk("got video memory\n");
-
- fbinfo->var.xres = display->xres;
- fbinfo->var.yres = display->yres;
- fbinfo->var.bits_per_pixel = display->bpp;
-
- s3c2410fb_init_registers(fbinfo);
-
- s3c2410fb_check_var(&fbinfo->var, fbinfo);
-
- ret = s3c2410fb_cpufreq_register(info);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register cpufreq\n");
- goto free_video_memory;
- }
-
- ret = register_framebuffer(fbinfo);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to register framebuffer device: %d\n",
- ret);
- goto free_cpufreq;
- }
-
- /* create device files */
- ret = device_create_file(&pdev->dev, &dev_attr_debug);
- if (ret)
- dev_err(&pdev->dev, "failed to add debug attribute\n");
-
- dev_info(&pdev->dev, "fb%d: %s frame buffer device\n",
- fbinfo->node, fbinfo->fix.id);
-
- return 0;
-
- free_cpufreq:
- s3c2410fb_cpufreq_deregister(info);
-free_video_memory:
- s3c2410fb_unmap_video_memory(fbinfo);
-release_clock:
- clk_disable_unprepare(info->clk);
- clk_put(info->clk);
-release_irq:
- free_irq(irq, info);
-release_regs:
- iounmap(info->io);
-release_mem:
- release_mem_region(res->start, size);
-dealloc_fb:
- framebuffer_release(fbinfo);
- return ret;
-}
-
-static int s3c2410fb_probe(struct platform_device *pdev)
-{
- return s3c24xxfb_probe(pdev, DRV_S3C2410);
-}
-
-static int s3c2412fb_probe(struct platform_device *pdev)
-{
- return s3c24xxfb_probe(pdev, DRV_S3C2412);
-}
-
-
-/*
- * Cleanup
- */
-static int s3c2410fb_remove(struct platform_device *pdev)
-{
- struct fb_info *fbinfo = platform_get_drvdata(pdev);
- struct s3c2410fb_info *info = fbinfo->par;
- int irq;
-
- unregister_framebuffer(fbinfo);
- s3c2410fb_cpufreq_deregister(info);
-
- s3c2410fb_lcd_enable(info, 0);
- usleep_range(1000, 1100);
-
- s3c2410fb_unmap_video_memory(fbinfo);
-
- if (info->clk) {
- clk_disable_unprepare(info->clk);
- clk_put(info->clk);
- info->clk = NULL;
- }
-
- irq = platform_get_irq(pdev, 0);
- free_irq(irq, info);
-
- iounmap(info->io);
-
- release_mem_region(info->mem->start, resource_size(info->mem));
-
- framebuffer_release(fbinfo);
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-
-/* suspend and resume support for the lcd controller */
-static int s3c2410fb_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct fb_info *fbinfo = platform_get_drvdata(dev);
- struct s3c2410fb_info *info = fbinfo->par;
-
- s3c2410fb_lcd_enable(info, 0);
-
- /* sleep before disabling the clock, we need to ensure
- * the LCD DMA engine is not going to get back on the bus
- * before the clock goes off again (bjd) */
-
- usleep_range(1000, 1100);
- clk_disable_unprepare(info->clk);
-
- return 0;
-}
-
-static int s3c2410fb_resume(struct platform_device *dev)
-{
- struct fb_info *fbinfo = platform_get_drvdata(dev);
- struct s3c2410fb_info *info = fbinfo->par;
-
- clk_prepare_enable(info->clk);
- usleep_range(1000, 1100);
-
- s3c2410fb_init_registers(fbinfo);
-
- /* re-activate our display after resume */
- s3c2410fb_activate_var(fbinfo);
- s3c2410fb_blank(FB_BLANK_UNBLANK, fbinfo);
-
- return 0;
-}
-
-#else
-#define s3c2410fb_suspend NULL
-#define s3c2410fb_resume NULL
-#endif
-
-static struct platform_driver s3c2410fb_driver = {
- .probe = s3c2410fb_probe,
- .remove = s3c2410fb_remove,
- .suspend = s3c2410fb_suspend,
- .resume = s3c2410fb_resume,
- .driver = {
- .name = "s3c2410-lcd",
- },
-};
-
-static struct platform_driver s3c2412fb_driver = {
- .probe = s3c2412fb_probe,
- .remove = s3c2410fb_remove,
- .suspend = s3c2410fb_suspend,
- .resume = s3c2410fb_resume,
- .driver = {
- .name = "s3c2412-lcd",
- },
-};
-
-int __init s3c2410fb_init(void)
-{
- int ret = platform_driver_register(&s3c2410fb_driver);
-
- if (ret == 0)
- ret = platform_driver_register(&s3c2412fb_driver);
-
- return ret;
-}
-
-static void __exit s3c2410fb_cleanup(void)
-{
- platform_driver_unregister(&s3c2410fb_driver);
- platform_driver_unregister(&s3c2412fb_driver);
-}
-
-module_init(s3c2410fb_init);
-module_exit(s3c2410fb_cleanup);
-
-MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
-MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
-MODULE_DESCRIPTION("Framebuffer driver for the s3c2410");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s3c2410-lcd");
-MODULE_ALIAS("platform:s3c2412-lcd");
diff --git a/drivers/video/fbdev/s3c2410fb.h b/drivers/video/fbdev/s3c2410fb.h
deleted file mode 100644
index cdd11e2f8859..000000000000
--- a/drivers/video/fbdev/s3c2410fb.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * linux/drivers/video/s3c2410fb.h
- * Copyright (c) 2004 Arnaud Patard
- *
- * S3C2410 LCD Framebuffer Driver
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file COPYING in the main directory of this archive for
- * more details.
- *
-*/
-
-#ifndef __S3C2410FB_H
-#define __S3C2410FB_H
-
-enum s3c_drv_type {
- DRV_S3C2410,
- DRV_S3C2412,
-};
-
-struct s3c2410fb_info {
- struct device *dev;
- struct clk *clk;
-
- struct resource *mem;
- void __iomem *io;
- void __iomem *irq_base;
-
- enum s3c_drv_type drv_type;
- struct s3c2410fb_hw regs;
-
- unsigned long clk_rate;
- unsigned int palette_ready;
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
- struct notifier_block freq_transition;
-#endif
-
- /* keep these registers in case we need to re-write palette */
- u32 palette_buffer[256];
- u32 pseudo_pal[16];
-};
-
-#define PALETTE_BUFF_CLEAR (0x80000000) /* entry is clear/invalid */
-
-int s3c2410fb_init(void);
-
-#endif
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 017c8efe8267..b1b8ccdbac4a 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -184,7 +184,6 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
-#include <mach/shannon.h>
/*
* Complain if VAR is out of range.
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index e770b4a356b5..10d71879d340 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -12,6 +12,7 @@
* Copyright (C) 1996 Paul Mackerras
*/
+#include <linux/aperture.h>
#include <linux/errno.h>
#include <linux/fb.h>
#include <linux/io.h>
@@ -68,6 +69,8 @@ static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
struct simplefb_par {
u32 palette[PSEUDO_PALETTE_SIZE];
+ resource_size_t base;
+ resource_size_t size;
struct resource *mem;
#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
bool clks_enabled;
@@ -472,16 +475,11 @@ static int simplefb_probe(struct platform_device *pdev)
info->var.blue = params.format->blue;
info->var.transp = params.format->transp;
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto error_fb_release;
- }
- info->apertures->ranges[0].base = info->fix.smem_start;
- info->apertures->ranges[0].size = info->fix.smem_len;
+ par->base = info->fix.smem_start;
+ par->size = info->fix.smem_len;
info->fbops = &simplefb_ops;
- info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE;
+ info->flags = FBINFO_DEFAULT;
info->screen_base = ioremap_wc(info->fix.smem_start,
info->fix.smem_len);
if (!info->screen_base) {
@@ -511,6 +509,11 @@ static int simplefb_probe(struct platform_device *pdev)
if (mem != res)
par->mem = mem; /* release in clean-up handler */
+ ret = devm_aperture_acquire_for_platform_device(pdev, par->base, par->size);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to acquire aperture: %d\n", ret);
+ goto error_regulators;
+ }
ret = register_framebuffer(info);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
diff --git a/drivers/video/fbdev/tmiofb.c b/drivers/video/fbdev/tmiofb.c
deleted file mode 100644
index 50111966c981..000000000000
--- a/drivers/video/fbdev/tmiofb.c
+++ /dev/null
@@ -1,1040 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Frame Buffer Device for Toshiba Mobile IO(TMIO) controller
- *
- * Copyright(C) 2005-2006 Chris Humbert
- * Copyright(C) 2005 Dirk Opfer
- * Copytight(C) 2007,2008 Dmitry Baryshkov
- *
- * Based on:
- * drivers/video/w100fb.c
- * code written by Sharp/Lineo for 2.4 kernels
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/fb.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-/* Why should fb driver call console functions? because console_lock() */
-#include <linux/console.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-#include <linux/uaccess.h>
-
-/*
- * accelerator commands
- */
-#define TMIOFB_ACC_CSADR(x) (0x00000000 | ((x) & 0x001ffffe))
-#define TMIOFB_ACC_CHPIX(x) (0x01000000 | ((x) & 0x000003ff))
-#define TMIOFB_ACC_CVPIX(x) (0x02000000 | ((x) & 0x000003ff))
-#define TMIOFB_ACC_PSADR(x) (0x03000000 | ((x) & 0x00fffffe))
-#define TMIOFB_ACC_PHPIX(x) (0x04000000 | ((x) & 0x000003ff))
-#define TMIOFB_ACC_PVPIX(x) (0x05000000 | ((x) & 0x000003ff))
-#define TMIOFB_ACC_PHOFS(x) (0x06000000 | ((x) & 0x000003ff))
-#define TMIOFB_ACC_PVOFS(x) (0x07000000 | ((x) & 0x000003ff))
-#define TMIOFB_ACC_POADR(x) (0x08000000 | ((x) & 0x00fffffe))
-#define TMIOFB_ACC_RSTR(x) (0x09000000 | ((x) & 0x000000ff))
-#define TMIOFB_ACC_TCLOR(x) (0x0A000000 | ((x) & 0x0000ffff))
-#define TMIOFB_ACC_FILL(x) (0x0B000000 | ((x) & 0x0000ffff))
-#define TMIOFB_ACC_DSADR(x) (0x0C000000 | ((x) & 0x00fffffe))
-#define TMIOFB_ACC_SSADR(x) (0x0D000000 | ((x) & 0x00fffffe))
-#define TMIOFB_ACC_DHPIX(x) (0x0E000000 | ((x) & 0x000003ff))
-#define TMIOFB_ACC_DVPIX(x) (0x0F000000 | ((x) & 0x000003ff))
-#define TMIOFB_ACC_SHPIX(x) (0x10000000 | ((x) & 0x000003ff))
-#define TMIOFB_ACC_SVPIX(x) (0x11000000 | ((x) & 0x000003ff))
-#define TMIOFB_ACC_LBINI(x) (0x12000000 | ((x) & 0x0000ffff))
-#define TMIOFB_ACC_LBK2(x) (0x13000000 | ((x) & 0x0000ffff))
-#define TMIOFB_ACC_SHBINI(x) (0x14000000 | ((x) & 0x0000ffff))
-#define TMIOFB_ACC_SHBK2(x) (0x15000000 | ((x) & 0x0000ffff))
-#define TMIOFB_ACC_SVBINI(x) (0x16000000 | ((x) & 0x0000ffff))
-#define TMIOFB_ACC_SVBK2(x) (0x17000000 | ((x) & 0x0000ffff))
-
-#define TMIOFB_ACC_CMGO 0x20000000
-#define TMIOFB_ACC_CMGO_CEND 0x00000001
-#define TMIOFB_ACC_CMGO_INT 0x00000002
-#define TMIOFB_ACC_CMGO_CMOD 0x00000010
-#define TMIOFB_ACC_CMGO_CDVRV 0x00000020
-#define TMIOFB_ACC_CMGO_CDHRV 0x00000040
-#define TMIOFB_ACC_CMGO_RUND 0x00008000
-#define TMIOFB_ACC_SCGO 0x21000000
-#define TMIOFB_ACC_SCGO_CEND 0x00000001
-#define TMIOFB_ACC_SCGO_INT 0x00000002
-#define TMIOFB_ACC_SCGO_ROP3 0x00000004
-#define TMIOFB_ACC_SCGO_TRNS 0x00000008
-#define TMIOFB_ACC_SCGO_DVRV 0x00000010
-#define TMIOFB_ACC_SCGO_DHRV 0x00000020
-#define TMIOFB_ACC_SCGO_SVRV 0x00000040
-#define TMIOFB_ACC_SCGO_SHRV 0x00000080
-#define TMIOFB_ACC_SCGO_DSTXY 0x00008000
-#define TMIOFB_ACC_SBGO 0x22000000
-#define TMIOFB_ACC_SBGO_CEND 0x00000001
-#define TMIOFB_ACC_SBGO_INT 0x00000002
-#define TMIOFB_ACC_SBGO_DVRV 0x00000010
-#define TMIOFB_ACC_SBGO_DHRV 0x00000020
-#define TMIOFB_ACC_SBGO_SVRV 0x00000040
-#define TMIOFB_ACC_SBGO_SHRV 0x00000080
-#define TMIOFB_ACC_SBGO_SBMD 0x00000100
-#define TMIOFB_ACC_FLGO 0x23000000
-#define TMIOFB_ACC_FLGO_CEND 0x00000001
-#define TMIOFB_ACC_FLGO_INT 0x00000002
-#define TMIOFB_ACC_FLGO_ROP3 0x00000004
-#define TMIOFB_ACC_LDGO 0x24000000
-#define TMIOFB_ACC_LDGO_CEND 0x00000001
-#define TMIOFB_ACC_LDGO_INT 0x00000002
-#define TMIOFB_ACC_LDGO_ROP3 0x00000004
-#define TMIOFB_ACC_LDGO_ENDPX 0x00000008
-#define TMIOFB_ACC_LDGO_LVRV 0x00000010
-#define TMIOFB_ACC_LDGO_LHRV 0x00000020
-#define TMIOFB_ACC_LDGO_LDMOD 0x00000040
-
-/* a FIFO is always allocated, even if acceleration is not used */
-#define TMIOFB_FIFO_SIZE 512
-
-/*
- * LCD Host Controller Configuration Register
- *
- * This iomem area supports only 16-bit IO.
- */
-#define CCR_CMD 0x04 /* Command */
-#define CCR_REVID 0x08 /* Revision ID */
-#define CCR_BASEL 0x10 /* LCD Control Reg Base Addr Low */
-#define CCR_BASEH 0x12 /* LCD Control Reg Base Addr High */
-#define CCR_UGCC 0x40 /* Unified Gated Clock Control */
-#define CCR_GCC 0x42 /* Gated Clock Control */
-#define CCR_USC 0x50 /* Unified Software Clear */
-#define CCR_VRAMRTC 0x60 /* VRAM Timing Control */
- /* 0x61 VRAM Refresh Control */
-#define CCR_VRAMSAC 0x62 /* VRAM Access Control */
- /* 0x63 VRAM Status */
-#define CCR_VRAMBC 0x64 /* VRAM Block Control */
-
-/*
- * LCD Control Register
- *
- * This iomem area supports only 16-bit IO.
- */
-#define LCR_UIS 0x000 /* Unified Interrupt Status */
-#define LCR_VHPN 0x008 /* VRAM Horizontal Pixel Number */
-#define LCR_CFSAL 0x00a /* Command FIFO Start Address Low */
-#define LCR_CFSAH 0x00c /* Command FIFO Start Address High */
-#define LCR_CFS 0x00e /* Command FIFO Size */
-#define LCR_CFWS 0x010 /* Command FIFO Writeable Size */
-#define LCR_BBIE 0x012 /* BitBLT Interrupt Enable */
-#define LCR_BBISC 0x014 /* BitBLT Interrupt Status and Clear */
-#define LCR_CCS 0x016 /* Command Count Status */
-#define LCR_BBES 0x018 /* BitBLT Execution Status */
-#define LCR_CMDL 0x01c /* Command Low */
-#define LCR_CMDH 0x01e /* Command High */
-#define LCR_CFC 0x022 /* Command FIFO Clear */
-#define LCR_CCIFC 0x024 /* CMOS Camera IF Control */
-#define LCR_HWT 0x026 /* Hardware Test */
-#define LCR_LCDCCRC 0x100 /* LCDC Clock and Reset Control */
-#define LCR_LCDCC 0x102 /* LCDC Control */
-#define LCR_LCDCOPC 0x104 /* LCDC Output Pin Control */
-#define LCR_LCDIS 0x108 /* LCD Interrupt Status */
-#define LCR_LCDIM 0x10a /* LCD Interrupt Mask */
-#define LCR_LCDIE 0x10c /* LCD Interrupt Enable */
-#define LCR_GDSAL 0x122 /* Graphics Display Start Address Low */
-#define LCR_GDSAH 0x124 /* Graphics Display Start Address High */
-#define LCR_VHPCL 0x12a /* VRAM Horizontal Pixel Count Low */
-#define LCR_VHPCH 0x12c /* VRAM Horizontal Pixel Count High */
-#define LCR_GM 0x12e /* Graphic Mode(VRAM access enable) */
-#define LCR_HT 0x140 /* Horizontal Total */
-#define LCR_HDS 0x142 /* Horizontal Display Start */
-#define LCR_HSS 0x144 /* H-Sync Start */
-#define LCR_HSE 0x146 /* H-Sync End */
-#define LCR_HNP 0x14c /* Horizontal Number of Pixels */
-#define LCR_VT 0x150 /* Vertical Total */
-#define LCR_VDS 0x152 /* Vertical Display Start */
-#define LCR_VSS 0x154 /* V-Sync Start */
-#define LCR_VSE 0x156 /* V-Sync End */
-#define LCR_CDLN 0x160 /* Current Display Line Number */
-#define LCR_ILN 0x162 /* Interrupt Line Number */
-#define LCR_SP 0x164 /* Sync Polarity */
-#define LCR_MISC 0x166 /* MISC(RGB565 mode) */
-#define LCR_VIHSS 0x16a /* Video Interface H-Sync Start */
-#define LCR_VIVS 0x16c /* Video Interface Vertical Start */
-#define LCR_VIVE 0x16e /* Video Interface Vertical End */
-#define LCR_VIVSS 0x170 /* Video Interface V-Sync Start */
-#define LCR_VCCIS 0x17e /* Video / CMOS Camera Interface Select */
-#define LCR_VIDWSAL 0x180 /* VI Data Write Start Address Low */
-#define LCR_VIDWSAH 0x182 /* VI Data Write Start Address High */
-#define LCR_VIDRSAL 0x184 /* VI Data Read Start Address Low */
-#define LCR_VIDRSAH 0x186 /* VI Data Read Start Address High */
-#define LCR_VIPDDST 0x188 /* VI Picture Data Display Start Timing */
-#define LCR_VIPDDET 0x186 /* VI Picture Data Display End Timing */
-#define LCR_VIE 0x18c /* Video Interface Enable */
-#define LCR_VCS 0x18e /* Video/Camera Select */
-#define LCR_VPHWC 0x194 /* Video Picture Horizontal Wait Count */
-#define LCR_VPHS 0x196 /* Video Picture Horizontal Size */
-#define LCR_VPVWC 0x198 /* Video Picture Vertical Wait Count */
-#define LCR_VPVS 0x19a /* Video Picture Vertical Size */
-#define LCR_PLHPIX 0x1a0 /* PLHPIX */
-#define LCR_XS 0x1a2 /* XStart */
-#define LCR_XCKHW 0x1a4 /* XCK High Width */
-#define LCR_STHS 0x1a8 /* STH Start */
-#define LCR_VT2 0x1aa /* Vertical Total */
-#define LCR_YCKSW 0x1ac /* YCK Start Wait */
-#define LCR_YSTS 0x1ae /* YST Start */
-#define LCR_PPOLS 0x1b0 /* #PPOL Start */
-#define LCR_PRECW 0x1b2 /* PREC Width */
-#define LCR_VCLKHW 0x1b4 /* VCLK High Width */
-#define LCR_OC 0x1b6 /* Output Control */
-
-static char *mode_option;
-
-struct tmiofb_par {
- u32 pseudo_palette[16];
-
-#ifdef CONFIG_FB_TMIO_ACCELL
- wait_queue_head_t wait_acc;
- bool use_polling;
-#endif
-
- void __iomem *ccr;
- void __iomem *lcr;
-};
-
-/*--------------------------------------------------------------------------*/
-
-/*
- * reasons for an interrupt:
- * uis bbisc lcdis
- * 0100 0001 accelerator command completed
- * 2000 0001 vsync start
- * 2000 0002 display start
- * 2000 0004 line number match(0x1ff mask???)
- */
-static irqreturn_t tmiofb_irq(int irq, void *__info)
-{
- struct fb_info *info = __info;
- struct tmiofb_par *par = info->par;
- unsigned int bbisc = tmio_ioread16(par->lcr + LCR_BBISC);
-
-
- tmio_iowrite16(bbisc, par->lcr + LCR_BBISC);
-
-#ifdef CONFIG_FB_TMIO_ACCELL
- /*
- * We were in polling mode and now we got correct irq.
- * Switch back to IRQ-based sync of command FIFO
- */
- if (unlikely(par->use_polling && irq != -1)) {
- printk(KERN_INFO "tmiofb: switching to waitq\n");
- par->use_polling = false;
- }
-
- if (bbisc & 1)
- wake_up(&par->wait_acc);
-#endif
-
- return IRQ_HANDLED;
-}
-
-
-/*--------------------------------------------------------------------------*/
-
-
-/*
- * Turns off the LCD controller and LCD host controller.
- */
-static int tmiofb_hw_stop(struct platform_device *dev)
-{
- struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
- struct fb_info *info = platform_get_drvdata(dev);
- struct tmiofb_par *par = info->par;
-
- tmio_iowrite16(0, par->ccr + CCR_UGCC);
- tmio_iowrite16(0, par->lcr + LCR_GM);
- data->lcd_set_power(dev, 0);
- tmio_iowrite16(0x0010, par->lcr + LCR_LCDCCRC);
-
- return 0;
-}
-
-/*
- * Initializes the LCD host controller.
- */
-static int tmiofb_hw_init(struct platform_device *dev)
-{
- const struct mfd_cell *cell = mfd_get_cell(dev);
- struct fb_info *info = platform_get_drvdata(dev);
- struct tmiofb_par *par = info->par;
- const struct resource *nlcr = &cell->resources[0];
- const struct resource *vram = &cell->resources[2];
- unsigned long base;
-
- if (nlcr == NULL || vram == NULL)
- return -EINVAL;
-
- base = nlcr->start;
-
- tmio_iowrite16(0x003a, par->ccr + CCR_UGCC);
- tmio_iowrite16(0x003a, par->ccr + CCR_GCC);
- tmio_iowrite16(0x3f00, par->ccr + CCR_USC);
-
- msleep(2); /* wait for device to settle */
-
- tmio_iowrite16(0x0000, par->ccr + CCR_USC);
- tmio_iowrite16(base >> 16, par->ccr + CCR_BASEH);
- tmio_iowrite16(base, par->ccr + CCR_BASEL);
- tmio_iowrite16(0x0002, par->ccr + CCR_CMD); /* base address enable */
- tmio_iowrite16(0x40a8, par->ccr + CCR_VRAMRTC); /* VRAMRC, VRAMTC */
- tmio_iowrite16(0x0018, par->ccr + CCR_VRAMSAC); /* VRAMSTS, VRAMAC */
- tmio_iowrite16(0x0002, par->ccr + CCR_VRAMBC);
- msleep(2); /* wait for device to settle */
- tmio_iowrite16(0x000b, par->ccr + CCR_VRAMBC);
-
- base = vram->start + info->screen_size;
- tmio_iowrite16(base >> 16, par->lcr + LCR_CFSAH);
- tmio_iowrite16(base, par->lcr + LCR_CFSAL);
- tmio_iowrite16(TMIOFB_FIFO_SIZE - 1, par->lcr + LCR_CFS);
- tmio_iowrite16(1, par->lcr + LCR_CFC);
- tmio_iowrite16(1, par->lcr + LCR_BBIE);
- tmio_iowrite16(0, par->lcr + LCR_CFWS);
-
- return 0;
-}
-
-/*
- * Sets the LCD controller's output resolution and pixel clock
- */
-static void tmiofb_hw_mode(struct platform_device *dev)
-{
- struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
- struct fb_info *info = platform_get_drvdata(dev);
- struct fb_videomode *mode = info->mode;
- struct tmiofb_par *par = info->par;
- unsigned int i;
-
- tmio_iowrite16(0, par->lcr + LCR_GM);
- data->lcd_set_power(dev, 0);
- tmio_iowrite16(0x0010, par->lcr + LCR_LCDCCRC);
- data->lcd_mode(dev, mode);
- data->lcd_set_power(dev, 1);
-
- tmio_iowrite16(info->fix.line_length, par->lcr + LCR_VHPN);
- tmio_iowrite16(0, par->lcr + LCR_GDSAH);
- tmio_iowrite16(0, par->lcr + LCR_GDSAL);
- tmio_iowrite16(info->fix.line_length >> 16, par->lcr + LCR_VHPCH);
- tmio_iowrite16(info->fix.line_length, par->lcr + LCR_VHPCL);
- tmio_iowrite16(i = 0, par->lcr + LCR_HSS);
- tmio_iowrite16(i += mode->hsync_len, par->lcr + LCR_HSE);
- tmio_iowrite16(i += mode->left_margin, par->lcr + LCR_HDS);
- tmio_iowrite16(i += mode->xres + mode->right_margin, par->lcr + LCR_HT);
- tmio_iowrite16(mode->xres, par->lcr + LCR_HNP);
- tmio_iowrite16(i = 0, par->lcr + LCR_VSS);
- tmio_iowrite16(i += mode->vsync_len, par->lcr + LCR_VSE);
- tmio_iowrite16(i += mode->upper_margin, par->lcr + LCR_VDS);
- tmio_iowrite16(i += mode->yres, par->lcr + LCR_ILN);
- tmio_iowrite16(i += mode->lower_margin, par->lcr + LCR_VT);
- tmio_iowrite16(3, par->lcr + LCR_MISC); /* RGB565 mode */
- tmio_iowrite16(1, par->lcr + LCR_GM); /* VRAM enable */
- tmio_iowrite16(0x4007, par->lcr + LCR_LCDCC);
- tmio_iowrite16(3, par->lcr + LCR_SP); /* sync polarity */
-
- tmio_iowrite16(0x0010, par->lcr + LCR_LCDCCRC);
- msleep(5); /* wait for device to settle */
- tmio_iowrite16(0x0014, par->lcr + LCR_LCDCCRC); /* STOP_CKP */
- msleep(5); /* wait for device to settle */
- tmio_iowrite16(0x0015, par->lcr + LCR_LCDCCRC); /* STOP_CKP|SOFT_RESET*/
- tmio_iowrite16(0xfffa, par->lcr + LCR_VCS);
-}
-
-/*--------------------------------------------------------------------------*/
-
-#ifdef CONFIG_FB_TMIO_ACCELL
-static int __must_check
-tmiofb_acc_wait(struct fb_info *info, unsigned int ccs)
-{
- struct tmiofb_par *par = info->par;
- /*
- * This code can be called with interrupts disabled.
- * So instead of relaying on irq to trigger the event,
- * poll the state till the necessary command is executed.
- */
- if (irqs_disabled() || par->use_polling) {
- int i = 0;
- while (tmio_ioread16(par->lcr + LCR_CCS) > ccs) {
- udelay(1);
- i++;
- if (i > 10000) {
- pr_err("tmiofb: timeout waiting for %d\n",
- ccs);
- return -ETIMEDOUT;
- }
- tmiofb_irq(-1, info);
- }
- } else {
- if (!wait_event_interruptible_timeout(par->wait_acc,
- tmio_ioread16(par->lcr + LCR_CCS) <= ccs,
- 1000)) {
- pr_err("tmiofb: timeout waiting for %d\n", ccs);
- return -ETIMEDOUT;
- }
- }
-
- return 0;
-}
-
-/*
- * Writes an accelerator command to the accelerator's FIFO.
- */
-static int
-tmiofb_acc_write(struct fb_info *info, const u32 *cmd, unsigned int count)
-{
- struct tmiofb_par *par = info->par;
- int ret;
-
- ret = tmiofb_acc_wait(info, TMIOFB_FIFO_SIZE - count);
- if (ret)
- return ret;
-
- for (; count; count--, cmd++) {
- tmio_iowrite16(*cmd >> 16, par->lcr + LCR_CMDH);
- tmio_iowrite16(*cmd, par->lcr + LCR_CMDL);
- }
-
- return ret;
-}
-
-/*
- * Wait for the accelerator to finish its operations before writing
- * to the framebuffer for consistent display output.
- */
-static int tmiofb_sync(struct fb_info *fbi)
-{
- struct tmiofb_par *par = fbi->par;
-
- int ret;
- int i = 0;
-
- ret = tmiofb_acc_wait(fbi, 0);
-
- while (tmio_ioread16(par->lcr + LCR_BBES) & 2) { /* blit active */
- udelay(1);
- i++ ;
- if (i > 10000) {
- printk(KERN_ERR "timeout waiting for blit to end!\n");
- return -ETIMEDOUT;
- }
- }
-
- return ret;
-}
-
-static void
-tmiofb_fillrect(struct fb_info *fbi, const struct fb_fillrect *rect)
-{
- const u32 cmd[] = {
- TMIOFB_ACC_DSADR((rect->dy * fbi->mode->xres + rect->dx) * 2),
- TMIOFB_ACC_DHPIX(rect->width - 1),
- TMIOFB_ACC_DVPIX(rect->height - 1),
- TMIOFB_ACC_FILL(rect->color),
- TMIOFB_ACC_FLGO,
- };
-
- if (fbi->state != FBINFO_STATE_RUNNING ||
- fbi->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_fillrect(fbi, rect);
- return;
- }
-
- tmiofb_acc_write(fbi, cmd, ARRAY_SIZE(cmd));
-}
-
-static void
-tmiofb_copyarea(struct fb_info *fbi, const struct fb_copyarea *area)
-{
- const u32 cmd[] = {
- TMIOFB_ACC_DSADR((area->dy * fbi->mode->xres + area->dx) * 2),
- TMIOFB_ACC_DHPIX(area->width - 1),
- TMIOFB_ACC_DVPIX(area->height - 1),
- TMIOFB_ACC_SSADR((area->sy * fbi->mode->xres + area->sx) * 2),
- TMIOFB_ACC_SCGO,
- };
-
- if (fbi->state != FBINFO_STATE_RUNNING ||
- fbi->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_copyarea(fbi, area);
- return;
- }
-
- tmiofb_acc_write(fbi, cmd, ARRAY_SIZE(cmd));
-}
-#endif
-
-static void tmiofb_clearscreen(struct fb_info *info)
-{
- const struct fb_fillrect rect = {
- .dx = 0,
- .dy = 0,
- .width = info->mode->xres,
- .height = info->mode->yres,
- .color = 0,
- .rop = ROP_COPY,
- };
-
- info->fbops->fb_fillrect(info, &rect);
-}
-
-static int tmiofb_vblank(struct fb_info *fbi, struct fb_vblank *vblank)
-{
- struct tmiofb_par *par = fbi->par;
- struct fb_videomode *mode = fbi->mode;
- unsigned int vcount = tmio_ioread16(par->lcr + LCR_CDLN);
- unsigned int vds = mode->vsync_len + mode->upper_margin;
-
- vblank->vcount = vcount;
- vblank->flags = FB_VBLANK_HAVE_VBLANK | FB_VBLANK_HAVE_VCOUNT
- | FB_VBLANK_HAVE_VSYNC;
-
- if (vcount < mode->vsync_len)
- vblank->flags |= FB_VBLANK_VSYNCING;
-
- if (vcount < vds || vcount > vds + mode->yres)
- vblank->flags |= FB_VBLANK_VBLANKING;
-
- return 0;
-}
-
-
-static int tmiofb_ioctl(struct fb_info *fbi,
- unsigned int cmd, unsigned long arg)
-{
- switch (cmd) {
- case FBIOGET_VBLANK: {
- struct fb_vblank vblank = {0};
- void __user *argp = (void __user *) arg;
-
- tmiofb_vblank(fbi, &vblank);
- if (copy_to_user(argp, &vblank, sizeof vblank))
- return -EFAULT;
- return 0;
- }
-
-#ifdef CONFIG_FB_TMIO_ACCELL
- case FBIO_TMIO_ACC_SYNC:
- tmiofb_sync(fbi);
- return 0;
-
- case FBIO_TMIO_ACC_WRITE: {
- u32 __user *argp = (void __user *) arg;
- u32 len;
- u32 acc[16];
-
- if (get_user(len, argp))
- return -EFAULT;
- if (len > ARRAY_SIZE(acc))
- return -EINVAL;
- if (copy_from_user(acc, argp + 1, sizeof(u32) * len))
- return -EFAULT;
-
- return tmiofb_acc_write(fbi, acc, len);
- }
-#endif
- }
-
- return -ENOTTY;
-}
-
-/*--------------------------------------------------------------------------*/
-
-/* Select the smallest mode that allows the desired resolution to be
- * displayed. If desired, the x and y parameters can be rounded up to
- * match the selected mode.
- */
-static struct fb_videomode *
-tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var)
-{
- struct tmio_fb_data *data = dev_get_platdata(info->device);
- struct fb_videomode *best = NULL;
- int i;
-
- for (i = 0; i < data->num_modes; i++) {
- struct fb_videomode *mode = data->modes + i;
-
- if (mode->xres >= var->xres && mode->yres >= var->yres
- && (!best || (mode->xres < best->xres
- && mode->yres < best->yres)))
- best = mode;
- }
-
- return best;
-}
-
-static int tmiofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
-
- struct fb_videomode *mode;
- struct tmio_fb_data *data = dev_get_platdata(info->device);
-
- mode = tmiofb_find_mode(info, var);
- if (!mode || var->bits_per_pixel > 16)
- return -EINVAL;
-
- fb_videomode_to_var(var, mode);
-
- var->xres_virtual = mode->xres;
- var->yres_virtual = info->screen_size / (mode->xres * 2);
-
- if (var->yres_virtual < var->yres)
- return -EINVAL;
-
- var->xoffset = 0;
- var->yoffset = 0;
- var->bits_per_pixel = 16;
- var->grayscale = 0;
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.offset = 0;
- var->transp.length = 0;
- var->nonstd = 0;
- var->height = data->height; /* mm */
- var->width = data->width; /* mm */
- var->rotate = 0;
- return 0;
-}
-
-static int tmiofb_set_par(struct fb_info *info)
-{
- struct fb_var_screeninfo *var = &info->var;
- struct fb_videomode *mode;
-
- mode = tmiofb_find_mode(info, var);
- if (!mode)
- return -EINVAL;
-
- info->mode = mode;
- info->fix.line_length = info->mode->xres *
- var->bits_per_pixel / 8;
-
- tmiofb_hw_mode(to_platform_device(info->device));
- tmiofb_clearscreen(info);
- return 0;
-}
-
-static int tmiofb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info)
-{
- struct tmiofb_par *par = info->par;
-
- if (regno < ARRAY_SIZE(par->pseudo_palette)) {
- par->pseudo_palette[regno] =
- ((red & 0xf800)) |
- ((green & 0xfc00) >> 5) |
- ((blue & 0xf800) >> 11);
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int tmiofb_blank(int blank, struct fb_info *info)
-{
- /*
- * everything is done in lcd/bl drivers.
- * this is purely to make sysfs happy and work.
- */
- return 0;
-}
-
-static const struct fb_ops tmiofb_ops = {
- .owner = THIS_MODULE,
-
- .fb_ioctl = tmiofb_ioctl,
- .fb_check_var = tmiofb_check_var,
- .fb_set_par = tmiofb_set_par,
- .fb_setcolreg = tmiofb_setcolreg,
- .fb_blank = tmiofb_blank,
- .fb_imageblit = cfb_imageblit,
-#ifdef CONFIG_FB_TMIO_ACCELL
- .fb_sync = tmiofb_sync,
- .fb_fillrect = tmiofb_fillrect,
- .fb_copyarea = tmiofb_copyarea,
-#else
- .fb_fillrect = cfb_fillrect,
- .fb_copyarea = cfb_copyarea,
-#endif
-};
-
-/*--------------------------------------------------------------------------*/
-
-static int tmiofb_probe(struct platform_device *dev)
-{
- const struct mfd_cell *cell = mfd_get_cell(dev);
- struct tmio_fb_data *data = dev_get_platdata(&dev->dev);
- struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1);
- struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0);
- struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2);
- int irq = platform_get_irq(dev, 0);
- struct fb_info *info;
- struct tmiofb_par *par;
- int retval;
-
- /*
- * This is the only way ATM to disable the fb
- */
- if (data == NULL) {
- dev_err(&dev->dev, "NULL platform data!\n");
- return -EINVAL;
- }
- if (ccr == NULL || lcr == NULL || vram == NULL || irq < 0) {
- dev_err(&dev->dev, "missing resources\n");
- return -EINVAL;
- }
-
- info = framebuffer_alloc(sizeof(struct tmiofb_par), &dev->dev);
-
- if (!info)
- return -ENOMEM;
-
- par = info->par;
-
-#ifdef CONFIG_FB_TMIO_ACCELL
- init_waitqueue_head(&par->wait_acc);
-
- par->use_polling = true;
-
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA
- | FBINFO_HWACCEL_FILLRECT;
-#else
- info->flags = FBINFO_DEFAULT;
-#endif
-
- info->fbops = &tmiofb_ops;
-
- strcpy(info->fix.id, "tmio-fb");
- info->fix.smem_start = vram->start;
- info->fix.smem_len = resource_size(vram);
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- info->fix.mmio_start = lcr->start;
- info->fix.mmio_len = resource_size(lcr);
- info->fix.accel = FB_ACCEL_NONE;
- info->screen_size = info->fix.smem_len - (4 * TMIOFB_FIFO_SIZE);
- info->pseudo_palette = par->pseudo_palette;
-
- par->ccr = ioremap(ccr->start, resource_size(ccr));
- if (!par->ccr) {
- retval = -ENOMEM;
- goto err_ioremap_ccr;
- }
-
- par->lcr = ioremap(info->fix.mmio_start, info->fix.mmio_len);
- if (!par->lcr) {
- retval = -ENOMEM;
- goto err_ioremap_lcr;
- }
-
- info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
- if (!info->screen_base) {
- retval = -ENOMEM;
- goto err_ioremap_vram;
- }
-
- retval = request_irq(irq, &tmiofb_irq, 0,
- dev_name(&dev->dev), info);
-
- if (retval)
- goto err_request_irq;
-
- platform_set_drvdata(dev, info);
-
- retval = fb_find_mode(&info->var, info, mode_option,
- data->modes, data->num_modes,
- data->modes, 16);
- if (!retval) {
- retval = -EINVAL;
- goto err_find_mode;
- }
-
- if (cell->enable) {
- retval = cell->enable(dev);
- if (retval)
- goto err_enable;
- }
-
- retval = tmiofb_hw_init(dev);
- if (retval)
- goto err_hw_init;
-
- fb_videomode_to_modelist(data->modes, data->num_modes,
- &info->modelist);
-
- retval = register_framebuffer(info);
- if (retval < 0)
- goto err_register_framebuffer;
-
- fb_info(info, "%s frame buffer device\n", info->fix.id);
-
- return 0;
-
-err_register_framebuffer:
-/*err_set_par:*/
- tmiofb_hw_stop(dev);
-err_hw_init:
- if (cell->disable)
- cell->disable(dev);
-err_enable:
-err_find_mode:
- free_irq(irq, info);
-err_request_irq:
- iounmap(info->screen_base);
-err_ioremap_vram:
- iounmap(par->lcr);
-err_ioremap_lcr:
- iounmap(par->ccr);
-err_ioremap_ccr:
- framebuffer_release(info);
- return retval;
-}
-
-static int tmiofb_remove(struct platform_device *dev)
-{
- const struct mfd_cell *cell = mfd_get_cell(dev);
- struct fb_info *info = platform_get_drvdata(dev);
- int irq = platform_get_irq(dev, 0);
- struct tmiofb_par *par;
-
- if (info) {
- par = info->par;
- unregister_framebuffer(info);
-
- tmiofb_hw_stop(dev);
-
- if (cell->disable)
- cell->disable(dev);
-
- free_irq(irq, info);
-
- iounmap(info->screen_base);
- iounmap(par->lcr);
- iounmap(par->ccr);
-
- framebuffer_release(info);
- }
-
- return 0;
-}
-
-#ifdef DEBUG
-static void tmiofb_dump_regs(struct platform_device *dev)
-{
- struct fb_info *info = platform_get_drvdata(dev);
- struct tmiofb_par *par = info->par;
-
- printk(KERN_DEBUG "lhccr:\n");
-#define CCR_PR(n) printk(KERN_DEBUG "\t" #n " = \t%04x\n",\
- tmio_ioread16(par->ccr + CCR_ ## n));
- CCR_PR(CMD);
- CCR_PR(REVID);
- CCR_PR(BASEL);
- CCR_PR(BASEH);
- CCR_PR(UGCC);
- CCR_PR(GCC);
- CCR_PR(USC);
- CCR_PR(VRAMRTC);
- CCR_PR(VRAMSAC);
- CCR_PR(VRAMBC);
-#undef CCR_PR
-
- printk(KERN_DEBUG "lcr: \n");
-#define LCR_PR(n) printk(KERN_DEBUG "\t" #n " = \t%04x\n",\
- tmio_ioread16(par->lcr + LCR_ ## n));
- LCR_PR(UIS);
- LCR_PR(VHPN);
- LCR_PR(CFSAL);
- LCR_PR(CFSAH);
- LCR_PR(CFS);
- LCR_PR(CFWS);
- LCR_PR(BBIE);
- LCR_PR(BBISC);
- LCR_PR(CCS);
- LCR_PR(BBES);
- LCR_PR(CMDL);
- LCR_PR(CMDH);
- LCR_PR(CFC);
- LCR_PR(CCIFC);
- LCR_PR(HWT);
- LCR_PR(LCDCCRC);
- LCR_PR(LCDCC);
- LCR_PR(LCDCOPC);
- LCR_PR(LCDIS);
- LCR_PR(LCDIM);
- LCR_PR(LCDIE);
- LCR_PR(GDSAL);
- LCR_PR(GDSAH);
- LCR_PR(VHPCL);
- LCR_PR(VHPCH);
- LCR_PR(GM);
- LCR_PR(HT);
- LCR_PR(HDS);
- LCR_PR(HSS);
- LCR_PR(HSE);
- LCR_PR(HNP);
- LCR_PR(VT);
- LCR_PR(VDS);
- LCR_PR(VSS);
- LCR_PR(VSE);
- LCR_PR(CDLN);
- LCR_PR(ILN);
- LCR_PR(SP);
- LCR_PR(MISC);
- LCR_PR(VIHSS);
- LCR_PR(VIVS);
- LCR_PR(VIVE);
- LCR_PR(VIVSS);
- LCR_PR(VCCIS);
- LCR_PR(VIDWSAL);
- LCR_PR(VIDWSAH);
- LCR_PR(VIDRSAL);
- LCR_PR(VIDRSAH);
- LCR_PR(VIPDDST);
- LCR_PR(VIPDDET);
- LCR_PR(VIE);
- LCR_PR(VCS);
- LCR_PR(VPHWC);
- LCR_PR(VPHS);
- LCR_PR(VPVWC);
- LCR_PR(VPVS);
- LCR_PR(PLHPIX);
- LCR_PR(XS);
- LCR_PR(XCKHW);
- LCR_PR(STHS);
- LCR_PR(VT2);
- LCR_PR(YCKSW);
- LCR_PR(YSTS);
- LCR_PR(PPOLS);
- LCR_PR(PRECW);
- LCR_PR(VCLKHW);
- LCR_PR(OC);
-#undef LCR_PR
-}
-#endif
-
-#ifdef CONFIG_PM
-static int tmiofb_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct fb_info *info = platform_get_drvdata(dev);
-#ifdef CONFIG_FB_TMIO_ACCELL
- struct tmiofb_par *par = info->par;
-#endif
- const struct mfd_cell *cell = mfd_get_cell(dev);
- int retval = 0;
-
- console_lock();
-
- fb_set_suspend(info, 1);
-
- if (info->fbops->fb_sync)
- info->fbops->fb_sync(info);
-
-
-#ifdef CONFIG_FB_TMIO_ACCELL
- /*
- * The fb should be usable even if interrupts are disabled (and they are
- * during suspend/resume). Switch temporary to forced polling.
- */
- printk(KERN_INFO "tmiofb: switching to polling\n");
- par->use_polling = true;
-#endif
- tmiofb_hw_stop(dev);
-
- if (cell->suspend)
- retval = cell->suspend(dev);
-
- console_unlock();
-
- return retval;
-}
-
-static int tmiofb_resume(struct platform_device *dev)
-{
- struct fb_info *info = platform_get_drvdata(dev);
- const struct mfd_cell *cell = mfd_get_cell(dev);
- int retval = 0;
-
- console_lock();
-
- if (cell->resume) {
- retval = cell->resume(dev);
- if (retval)
- goto out;
- }
-
- tmiofb_irq(-1, info);
-
- tmiofb_hw_init(dev);
-
- tmiofb_hw_mode(dev);
-
- fb_set_suspend(info, 0);
-out:
- console_unlock();
- return retval;
-}
-#else
-#define tmiofb_suspend NULL
-#define tmiofb_resume NULL
-#endif
-
-static struct platform_driver tmiofb_driver = {
- .driver.name = "tmio-fb",
- .driver.owner = THIS_MODULE,
- .probe = tmiofb_probe,
- .remove = tmiofb_remove,
- .suspend = tmiofb_suspend,
- .resume = tmiofb_resume,
-};
-
-/*--------------------------------------------------------------------------*/
-
-#ifndef MODULE
-static void __init tmiofb_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!*this_opt)
- continue;
- /*
- * FIXME
- */
- }
-}
-#endif
-
-static int __init tmiofb_init(void)
-{
-#ifndef MODULE
- char *option = NULL;
-
- if (fb_get_options("tmiofb", &option))
- return -ENODEV;
- tmiofb_setup(option);
-#endif
- return platform_driver_register(&tmiofb_driver);
-}
-
-static void __exit tmiofb_cleanup(void)
-{
- platform_driver_unregister(&tmiofb_driver);
-}
-
-module_init(tmiofb_init);
-module_exit(tmiofb_cleanup);
-
-MODULE_DESCRIPTION("TMIO framebuffer driver");
-MODULE_AUTHOR("Chris Humbert, Dirk Opfer, Dmitry Baryshkov");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/vesafb.c b/drivers/video/fbdev/vesafb.c
index 929d4775cb4b..3f8bdfcf51f0 100644
--- a/drivers/video/fbdev/vesafb.c
+++ b/drivers/video/fbdev/vesafb.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -31,6 +32,8 @@
struct vesafb_par {
u32 pseudo_palette[256];
+ resource_size_t base;
+ resource_size_t size;
int wc_cookie;
struct resource *region;
};
@@ -140,7 +143,7 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
* (according to the entries in the `var' structure). Return
* != 0 for invalid regno.
*/
-
+
if (regno >= info->cmap.len)
return 1;
@@ -191,7 +194,7 @@ static void vesafb_destroy(struct fb_info *info)
arch_phys_wc_del(par->wc_cookie);
if (info->screen_base)
iounmap(info->screen_base);
- release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
+ release_mem_region(par->base, par->size);
framebuffer_release(info);
}
@@ -209,13 +212,13 @@ static struct fb_ops vesafb_ops = {
static int vesafb_setup(char *options)
{
char *this_opt;
-
+
if (!options || !*options)
return 0;
-
+
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt) continue;
-
+
if (! strcmp(this_opt, "inverse"))
inverse=1;
else if (! strcmp(this_opt, "redraw"))
@@ -316,14 +319,8 @@ static int vesafb_probe(struct platform_device *dev)
par = info->par;
info->pseudo_palette = par->pseudo_palette;
- /* set vesafb aperture size for generic probing */
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- err = -ENOMEM;
- goto err;
- }
- info->apertures->ranges[0].base = screen_info.lfb_base;
- info->apertures->ranges[0].size = size_total;
+ par->base = screen_info.lfb_base;
+ par->size = size_total;
printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
@@ -381,7 +378,7 @@ static int vesafb_probe(struct platform_device *dev)
vesafb_defined.pixclock = 10000000 / vesafb_defined.xres * 1000 / vesafb_defined.yres;
vesafb_defined.left_margin = (vesafb_defined.xres / 8) & 0xf8;
vesafb_defined.hsync_len = (vesafb_defined.xres / 8) & 0xf8;
-
+
vesafb_defined.red.offset = screen_info.red_pos;
vesafb_defined.red.length = screen_info.red_size;
vesafb_defined.green.offset = screen_info.green_pos;
@@ -460,27 +457,29 @@ static int vesafb_probe(struct platform_device *dev)
info->fbops = &vesafb_ops;
info->var = vesafb_defined;
info->fix = vesafb_fix;
- info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
- (ypan ? FBINFO_HWACCEL_YPAN : 0);
+ info->flags = FBINFO_FLAG_DEFAULT | (ypan ? FBINFO_HWACCEL_YPAN : 0);
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
err = -ENOMEM;
goto err_release_region;
}
+ err = devm_aperture_acquire_for_platform_device(dev, par->base, par->size);
+ if (err)
+ goto err_fb_dealloc_cmap;
if (register_framebuffer(info)<0) {
err = -EINVAL;
- fb_dealloc_cmap(&info->cmap);
- goto err_release_region;
+ goto err_fb_dealloc_cmap;
}
fb_info(info, "%s frame buffer device\n", info->fix.id);
return 0;
+err_fb_dealloc_cmap:
+ fb_dealloc_cmap(&info->cmap);
err_release_region:
arch_phys_wc_del(par->wc_cookie);
if (info->screen_base)
iounmap(info->screen_base);
if (par->region)
release_region(0x3c0, 32);
-err:
framebuffer_release(info);
release_mem_region(vesafb_fix.smem_start, size_total);
return err;
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index af47f8217095..1a8ffdb2be26 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -10,6 +10,7 @@
* archive for more details.
*/
+#include <linux/aperture.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1324,11 +1325,6 @@ static int vga16fb_probe(struct platform_device *dev)
ret = -ENOMEM;
goto err_fb_alloc;
}
- info->apertures = alloc_apertures(1);
- if (!info->apertures) {
- ret = -ENOMEM;
- goto err_ioremap;
- }
/* XXX share VGA_FB_PHYS_BASE and I/O region with vgacon and others */
info->screen_base = (void __iomem *)VGA_MAP_MEM(VGA_FB_PHYS_BASE, 0);
@@ -1363,8 +1359,7 @@ static int vga16fb_probe(struct platform_device *dev)
info->fix = vga16fb_fix;
/* supports rectangles with widths of multiples of 8 */
info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31;
- info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
- FBINFO_HWACCEL_YPAN;
+ info->flags = FBINFO_FLAG_DEFAULT | FBINFO_HWACCEL_YPAN;
i = (info->var.bits_per_pixel == 8) ? 256 : 16;
ret = fb_alloc_cmap(&info->cmap, i, 0);
@@ -1382,9 +1377,9 @@ static int vga16fb_probe(struct platform_device *dev)
vga16fb_update_fix(info);
- info->apertures->ranges[0].base = VGA_FB_PHYS_BASE;
- info->apertures->ranges[0].size = VGA_FB_PHYS_SIZE;
-
+ ret = devm_aperture_acquire_for_platform_device(dev, VGA_FB_PHYS_BASE, VGA_FB_PHYS_SIZE);
+ if (ret)
+ goto err_check_var;
if (register_framebuffer(info) < 0) {
printk(KERN_ERR "vga16fb: unable to register framebuffer\n");
ret = -EINVAL;
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
deleted file mode 100644
index 4e641a780726..000000000000
--- a/drivers/video/fbdev/w100fb.c
+++ /dev/null
@@ -1,1644 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * linux/drivers/video/w100fb.c
- *
- * Frame Buffer Device for ATI Imageon w100 (Wallaby)
- *
- * Copyright (C) 2002, ATI Corp.
- * Copyright (C) 2004-2006 Richard Purdie
- * Copyright (c) 2005 Ian Molton
- * Copyright (c) 2006 Alberto Mardegan
- *
- * Rewritten for 2.6 by Richard Purdie <rpurdie@rpsys.net>
- *
- * Generic platform support by Ian Molton <spyro@f2s.com>
- * and Richard Purdie <rpurdie@rpsys.net>
- *
- * w32xx support by Ian Molton
- *
- * Hardware acceleration support by Alberto Mardegan
- * <mardy@users.sourceforge.net>
- */
-
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <video/w100fb.h>
-#include "w100fb.h"
-
-/*
- * Prototypes
- */
-static void w100_suspend(u32 mode);
-static void w100_vsync(void);
-static void w100_hw_init(struct w100fb_par*);
-static void w100_pwm_setup(struct w100fb_par*);
-static void w100_init_clocks(struct w100fb_par*);
-static void w100_setup_memory(struct w100fb_par*);
-static void w100_init_lcd(struct w100fb_par*);
-static void w100_set_dispregs(struct w100fb_par*);
-static void w100_update_enable(void);
-static void w100_update_disable(void);
-static void calc_hsync(struct w100fb_par *par);
-static void w100_init_graphic_engine(struct w100fb_par *par);
-struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
-
-/* Pseudo palette size */
-#define MAX_PALETTES 16
-
-#define W100_SUSPEND_EXTMEM 0
-#define W100_SUSPEND_ALL 1
-
-#define BITS_PER_PIXEL 16
-
-/* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */
-static void __iomem *remapped_base;
-static void __iomem *remapped_regs;
-static void __iomem *remapped_fbuf;
-
-#define REMAPPED_FB_LEN 0x15ffff
-
-/* This is the offset in the w100's address space we map the current
- framebuffer memory to. We use the position of external memory as
- we can remap internal memory to there if external isn't present. */
-#define W100_FB_BASE MEM_EXT_BASE_VALUE
-
-
-/*
- * Sysfs functions
- */
-static ssize_t flip_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct fb_info *info = dev_get_drvdata(dev);
- struct w100fb_par *par=info->par;
-
- return sprintf(buf, "%d\n",par->flip);
-}
-
-static ssize_t flip_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- unsigned int flip;
- struct fb_info *info = dev_get_drvdata(dev);
- struct w100fb_par *par=info->par;
-
- flip = simple_strtoul(buf, NULL, 10);
-
- if (flip > 0)
- par->flip = 1;
- else
- par->flip = 0;
-
- w100_update_disable();
- w100_set_dispregs(par);
- w100_update_enable();
-
- calc_hsync(par);
-
- return count;
-}
-
-static DEVICE_ATTR_RW(flip);
-
-static ssize_t w100fb_reg_read(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- unsigned long regs, param;
- regs = simple_strtoul(buf, NULL, 16);
- param = readl(remapped_regs + regs);
- printk("Read Register 0x%08lX: 0x%08lX\n", regs, param);
- return count;
-}
-
-static DEVICE_ATTR(reg_read, 0200, NULL, w100fb_reg_read);
-
-static ssize_t w100fb_reg_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- unsigned long regs, param;
- sscanf(buf, "%lx %lx", &regs, &param);
-
- if (regs <= 0x2000) {
- printk("Write Register 0x%08lX: 0x%08lX\n", regs, param);
- writel(param, remapped_regs + regs);
- }
-
- return count;
-}
-
-static DEVICE_ATTR(reg_write, 0200, NULL, w100fb_reg_write);
-
-
-static ssize_t fastpllclk_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct fb_info *info = dev_get_drvdata(dev);
- struct w100fb_par *par=info->par;
-
- return sprintf(buf, "%d\n",par->fastpll_mode);
-}
-
-static ssize_t fastpllclk_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
-{
- struct fb_info *info = dev_get_drvdata(dev);
- struct w100fb_par *par=info->par;
-
- if (simple_strtoul(buf, NULL, 10) > 0) {
- par->fastpll_mode=1;
- printk("w100fb: Using fast system clock (if possible)\n");
- } else {
- par->fastpll_mode=0;
- printk("w100fb: Using normal system clock\n");
- }
-
- w100_init_clocks(par);
- calc_hsync(par);
-
- return count;
-}
-
-static DEVICE_ATTR_RW(fastpllclk);
-
-static struct attribute *w100fb_attrs[] = {
- &dev_attr_fastpllclk.attr,
- &dev_attr_reg_read.attr,
- &dev_attr_reg_write.attr,
- &dev_attr_flip.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(w100fb);
-
-/*
- * Some touchscreens need hsync information from the video driver to
- * function correctly. We export it here.
- */
-unsigned long w100fb_get_hsynclen(struct device *dev)
-{
- struct fb_info *info = dev_get_drvdata(dev);
- struct w100fb_par *par=info->par;
-
- /* If display is blanked/suspended, hsync isn't active */
- if (par->blanked)
- return 0;
- else
- return par->hsync_len;
-}
-EXPORT_SYMBOL(w100fb_get_hsynclen);
-
-static void w100fb_clear_screen(struct w100fb_par *par)
-{
- memset_io(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), 0, (par->xres * par->yres * BITS_PER_PIXEL/8));
-}
-
-
-/*
- * Set a palette value from rgb components
- */
-static int w100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int trans, struct fb_info *info)
-{
- unsigned int val;
- int ret = 1;
-
- /*
- * If greyscale is true, then we convert the RGB value
- * to greyscale no matter what visual we are using.
- */
- if (info->var.grayscale)
- red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16;
-
- /*
- * 16-bit True Colour. We encode the RGB value
- * according to the RGB bitfield information.
- */
- if (regno < MAX_PALETTES) {
- u32 *pal = info->pseudo_palette;
-
- val = (red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11);
- pal[regno] = val;
- ret = 0;
- }
- return ret;
-}
-
-
-/*
- * Blank the display based on value in blank_mode
- */
-static int w100fb_blank(int blank_mode, struct fb_info *info)
-{
- struct w100fb_par *par = info->par;
- struct w100_tg_info *tg = par->mach->tg;
-
- switch(blank_mode) {
-
- case FB_BLANK_NORMAL: /* Normal blanking */
- case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */
- case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */
- case FB_BLANK_POWERDOWN: /* Poweroff */
- if (par->blanked == 0) {
- if(tg && tg->suspend)
- tg->suspend(par);
- par->blanked = 1;
- }
- break;
-
- case FB_BLANK_UNBLANK: /* Unblanking */
- if (par->blanked != 0) {
- if(tg && tg->resume)
- tg->resume(par);
- par->blanked = 0;
- }
- break;
- }
- return 0;
-}
-
-
-static void w100_fifo_wait(int entries)
-{
- union rbbm_status_u status;
- int i;
-
- for (i = 0; i < 2000000; i++) {
- status.val = readl(remapped_regs + mmRBBM_STATUS);
- if (status.f.cmdfifo_avail >= entries)
- return;
- udelay(1);
- }
- printk(KERN_ERR "w100fb: FIFO Timeout!\n");
-}
-
-
-static int w100fb_sync(struct fb_info *info)
-{
- union rbbm_status_u status;
- int i;
-
- for (i = 0; i < 2000000; i++) {
- status.val = readl(remapped_regs + mmRBBM_STATUS);
- if (!status.f.gui_active)
- return 0;
- udelay(1);
- }
- printk(KERN_ERR "w100fb: Graphic engine timeout!\n");
- return -EBUSY;
-}
-
-
-static void w100_init_graphic_engine(struct w100fb_par *par)
-{
- union dp_gui_master_cntl_u gmc;
- union dp_mix_u dp_mix;
- union dp_datatype_u dp_datatype;
- union dp_cntl_u dp_cntl;
-
- w100_fifo_wait(4);
- writel(W100_FB_BASE, remapped_regs + mmDST_OFFSET);
- writel(par->xres, remapped_regs + mmDST_PITCH);
- writel(W100_FB_BASE, remapped_regs + mmSRC_OFFSET);
- writel(par->xres, remapped_regs + mmSRC_PITCH);
-
- w100_fifo_wait(3);
- writel(0, remapped_regs + mmSC_TOP_LEFT);
- writel((par->yres << 16) | par->xres, remapped_regs + mmSC_BOTTOM_RIGHT);
- writel(0x1fff1fff, remapped_regs + mmSRC_SC_BOTTOM_RIGHT);
-
- w100_fifo_wait(4);
- dp_cntl.val = 0;
- dp_cntl.f.dst_x_dir = 1;
- dp_cntl.f.dst_y_dir = 1;
- dp_cntl.f.src_x_dir = 1;
- dp_cntl.f.src_y_dir = 1;
- dp_cntl.f.dst_major_x = 1;
- dp_cntl.f.src_major_x = 1;
- writel(dp_cntl.val, remapped_regs + mmDP_CNTL);
-
- gmc.val = 0;
- gmc.f.gmc_src_pitch_offset_cntl = 1;
- gmc.f.gmc_dst_pitch_offset_cntl = 1;
- gmc.f.gmc_src_clipping = 1;
- gmc.f.gmc_dst_clipping = 1;
- gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE;
- gmc.f.gmc_dst_datatype = 3; /* from DstType_16Bpp_444 */
- gmc.f.gmc_src_datatype = SRC_DATATYPE_EQU_DST;
- gmc.f.gmc_byte_pix_order = 1;
- gmc.f.gmc_default_sel = 0;
- gmc.f.gmc_rop3 = ROP3_SRCCOPY;
- gmc.f.gmc_dp_src_source = DP_SRC_MEM_RECTANGULAR;
- gmc.f.gmc_clr_cmp_fcn_dis = 1;
- gmc.f.gmc_wr_msk_dis = 1;
- gmc.f.gmc_dp_op = DP_OP_ROP;
- writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
-
- dp_datatype.val = dp_mix.val = 0;
- dp_datatype.f.dp_dst_datatype = gmc.f.gmc_dst_datatype;
- dp_datatype.f.dp_brush_datatype = gmc.f.gmc_brush_datatype;
- dp_datatype.f.dp_src2_type = 0;
- dp_datatype.f.dp_src2_datatype = gmc.f.gmc_src_datatype;
- dp_datatype.f.dp_src_datatype = gmc.f.gmc_src_datatype;
- dp_datatype.f.dp_byte_pix_order = gmc.f.gmc_byte_pix_order;
- writel(dp_datatype.val, remapped_regs + mmDP_DATATYPE);
-
- dp_mix.f.dp_src_source = gmc.f.gmc_dp_src_source;
- dp_mix.f.dp_src2_source = 1;
- dp_mix.f.dp_rop3 = gmc.f.gmc_rop3;
- dp_mix.f.dp_op = gmc.f.gmc_dp_op;
- writel(dp_mix.val, remapped_regs + mmDP_MIX);
-}
-
-
-static void w100fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- union dp_gui_master_cntl_u gmc;
-
- if (info->state != FBINFO_STATE_RUNNING)
- return;
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_fillrect(info, rect);
- return;
- }
-
- gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL);
- gmc.f.gmc_rop3 = ROP3_PATCOPY;
- gmc.f.gmc_brush_datatype = GMC_BRUSH_SOLID_COLOR;
- w100_fifo_wait(2);
- writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
- writel(rect->color, remapped_regs + mmDP_BRUSH_FRGD_CLR);
-
- w100_fifo_wait(2);
- writel((rect->dy << 16) | (rect->dx & 0xffff), remapped_regs + mmDST_Y_X);
- writel((rect->width << 16) | (rect->height & 0xffff),
- remapped_regs + mmDST_WIDTH_HEIGHT);
-}
-
-
-static void w100fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
- u32 h = area->height, w = area->width;
- union dp_gui_master_cntl_u gmc;
-
- if (info->state != FBINFO_STATE_RUNNING)
- return;
- if (info->flags & FBINFO_HWACCEL_DISABLED) {
- cfb_copyarea(info, area);
- return;
- }
-
- gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL);
- gmc.f.gmc_rop3 = ROP3_SRCCOPY;
- gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE;
- w100_fifo_wait(1);
- writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
-
- w100_fifo_wait(3);
- writel((sy << 16) | (sx & 0xffff), remapped_regs + mmSRC_Y_X);
- writel((dy << 16) | (dx & 0xffff), remapped_regs + mmDST_Y_X);
- writel((w << 16) | (h & 0xffff), remapped_regs + mmDST_WIDTH_HEIGHT);
-}
-
-
-/*
- * Change the resolution by calling the appropriate hardware functions
- */
-static void w100fb_activate_var(struct w100fb_par *par)
-{
- struct w100_tg_info *tg = par->mach->tg;
-
- w100_pwm_setup(par);
- w100_setup_memory(par);
- w100_init_clocks(par);
- w100fb_clear_screen(par);
- w100_vsync();
-
- w100_update_disable();
- w100_init_lcd(par);
- w100_set_dispregs(par);
- w100_update_enable();
- w100_init_graphic_engine(par);
-
- calc_hsync(par);
-
- if (!par->blanked && tg && tg->change)
- tg->change(par);
-}
-
-
-/* Select the smallest mode that allows the desired resolution to be
- * displayed. If desired, the x and y parameters can be rounded up to
- * match the selected mode.
- */
-static struct w100_mode *w100fb_get_mode(struct w100fb_par *par, unsigned int *x, unsigned int *y, int saveval)
-{
- struct w100_mode *mode = NULL;
- struct w100_mode *modelist = par->mach->modelist;
- unsigned int best_x = 0xffffffff, best_y = 0xffffffff;
- unsigned int i;
-
- for (i = 0 ; i < par->mach->num_modes ; i++) {
- if (modelist[i].xres >= *x && modelist[i].yres >= *y &&
- modelist[i].xres < best_x && modelist[i].yres < best_y) {
- best_x = modelist[i].xres;
- best_y = modelist[i].yres;
- mode = &modelist[i];
- } else if(modelist[i].xres >= *y && modelist[i].yres >= *x &&
- modelist[i].xres < best_y && modelist[i].yres < best_x) {
- best_x = modelist[i].yres;
- best_y = modelist[i].xres;
- mode = &modelist[i];
- }
- }
-
- if (mode && saveval) {
- *x = best_x;
- *y = best_y;
- }
-
- return mode;
-}
-
-
-/*
- * w100fb_check_var():
- * Get the video params out of 'var'. If a value doesn't fit, round it up,
- * if it's too big, return -EINVAL.
- */
-static int w100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
-{
- struct w100fb_par *par=info->par;
-
- if(!w100fb_get_mode(par, &var->xres, &var->yres, 1))
- return -EINVAL;
-
- if (par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (par->mach->mem->size+1)))
- return -EINVAL;
-
- if (!par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)))
- return -EINVAL;
-
- var->xres_virtual = max(var->xres_virtual, var->xres);
- var->yres_virtual = max(var->yres_virtual, var->yres);
-
- if (var->bits_per_pixel > BITS_PER_PIXEL)
- return -EINVAL;
- else
- var->bits_per_pixel = BITS_PER_PIXEL;
-
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.offset = var->transp.length = 0;
-
- var->nonstd = 0;
- var->height = -1;
- var->width = -1;
- var->vmode = FB_VMODE_NONINTERLACED;
- var->sync = 0;
- var->pixclock = 0x04; /* 171521; */
-
- return 0;
-}
-
-
-/*
- * w100fb_set_par():
- * Set the user defined part of the display for the specified console
- * by looking at the values in info.var
- */
-static int w100fb_set_par(struct fb_info *info)
-{
- struct w100fb_par *par=info->par;
-
- if (par->xres != info->var.xres || par->yres != info->var.yres) {
- par->xres = info->var.xres;
- par->yres = info->var.yres;
- par->mode = w100fb_get_mode(par, &par->xres, &par->yres, 0);
-
- info->fix.visual = FB_VISUAL_TRUECOLOR;
- info->fix.ypanstep = 0;
- info->fix.ywrapstep = 0;
- info->fix.line_length = par->xres * BITS_PER_PIXEL / 8;
-
- mutex_lock(&info->mm_lock);
- if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) {
- par->extmem_active = 1;
- info->fix.smem_len = par->mach->mem->size+1;
- } else {
- par->extmem_active = 0;
- info->fix.smem_len = MEM_INT_SIZE+1;
- }
- mutex_unlock(&info->mm_lock);
-
- w100fb_activate_var(par);
- }
- return 0;
-}
-
-
-/*
- * Frame buffer operations
- */
-static const struct fb_ops w100fb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = w100fb_check_var,
- .fb_set_par = w100fb_set_par,
- .fb_setcolreg = w100fb_setcolreg,
- .fb_blank = w100fb_blank,
- .fb_fillrect = w100fb_fillrect,
- .fb_copyarea = w100fb_copyarea,
- .fb_imageblit = cfb_imageblit,
- .fb_sync = w100fb_sync,
-};
-
-#ifdef CONFIG_PM
-static void w100fb_save_vidmem(struct w100fb_par *par)
-{
- int memsize;
-
- if (par->extmem_active) {
- memsize=par->mach->mem->size;
- par->saved_extmem = vmalloc(memsize);
- if (par->saved_extmem)
- memcpy_fromio(par->saved_extmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize);
- }
- memsize=MEM_INT_SIZE;
- par->saved_intmem = vmalloc(memsize);
- if (par->saved_intmem && par->extmem_active)
- memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), memsize);
- else if (par->saved_intmem)
- memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize);
-}
-
-static void w100fb_restore_vidmem(struct w100fb_par *par)
-{
- int memsize;
-
- if (par->extmem_active && par->saved_extmem) {
- memsize=par->mach->mem->size;
- memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize);
- vfree(par->saved_extmem);
- par->saved_extmem = NULL;
- }
- if (par->saved_intmem) {
- memsize=MEM_INT_SIZE;
- if (par->extmem_active)
- memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), par->saved_intmem, memsize);
- else
- memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize);
- vfree(par->saved_intmem);
- par->saved_intmem = NULL;
- }
-}
-
-static int w100fb_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct fb_info *info = platform_get_drvdata(dev);
- struct w100fb_par *par=info->par;
- struct w100_tg_info *tg = par->mach->tg;
-
- w100fb_save_vidmem(par);
- if(tg && tg->suspend)
- tg->suspend(par);
- w100_suspend(W100_SUSPEND_ALL);
- par->blanked = 1;
-
- return 0;
-}
-
-static int w100fb_resume(struct platform_device *dev)
-{
- struct fb_info *info = platform_get_drvdata(dev);
- struct w100fb_par *par=info->par;
- struct w100_tg_info *tg = par->mach->tg;
-
- w100_hw_init(par);
- w100fb_activate_var(par);
- w100fb_restore_vidmem(par);
- if(tg && tg->resume)
- tg->resume(par);
- par->blanked = 0;
-
- return 0;
-}
-#else
-#define w100fb_suspend NULL
-#define w100fb_resume NULL
-#endif
-
-
-static int w100fb_probe(struct platform_device *pdev)
-{
- int err = -EIO;
- struct w100fb_mach_info *inf;
- struct fb_info *info = NULL;
- struct w100fb_par *par;
- struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- unsigned int chip_id;
-
- if (!mem)
- return -EINVAL;
-
- /* Remap the chip base address */
- remapped_base = ioremap(mem->start+W100_CFG_BASE, W100_CFG_LEN);
- if (remapped_base == NULL)
- goto out;
-
- /* Map the register space */
- remapped_regs = ioremap(mem->start+W100_REG_BASE, W100_REG_LEN);
- if (remapped_regs == NULL)
- goto out;
-
- /* Identify the chip */
- printk("Found ");
- chip_id = readl(remapped_regs + mmCHIP_ID);
- switch(chip_id) {
- case CHIP_ID_W100: printk("w100"); break;
- case CHIP_ID_W3200: printk("w3200"); break;
- case CHIP_ID_W3220: printk("w3220"); break;
- default:
- printk("Unknown imageon chip ID\n");
- err = -ENODEV;
- goto out;
- }
- printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE);
-
- /* Remap the framebuffer */
- remapped_fbuf = ioremap(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
- if (remapped_fbuf == NULL)
- goto out;
-
- info=framebuffer_alloc(sizeof(struct w100fb_par), &pdev->dev);
- if (!info) {
- err = -ENOMEM;
- goto out;
- }
-
- par = info->par;
- platform_set_drvdata(pdev, info);
-
- inf = dev_get_platdata(&pdev->dev);
- par->chip_id = chip_id;
- par->mach = inf;
- par->fastpll_mode = 0;
- par->blanked = 0;
-
- par->pll_table=w100_get_xtal_table(inf->xtal_freq);
- if (!par->pll_table) {
- printk(KERN_ERR "No matching Xtal definition found\n");
- err = -EINVAL;
- goto out;
- }
-
- info->pseudo_palette = kmalloc_array(MAX_PALETTES, sizeof(u32),
- GFP_KERNEL);
- if (!info->pseudo_palette) {
- err = -ENOMEM;
- goto out;
- }
-
- info->fbops = &w100fb_ops;
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
- FBINFO_HWACCEL_FILLRECT;
- info->node = -1;
- info->screen_base = remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE);
- info->screen_size = REMAPPED_FB_LEN;
-
- strcpy(info->fix.id, "w100fb");
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.type_aux = 0;
- info->fix.accel = FB_ACCEL_NONE;
- info->fix.smem_start = mem->start+W100_FB_BASE;
- info->fix.mmio_start = mem->start+W100_REG_BASE;
- info->fix.mmio_len = W100_REG_LEN;
-
- if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
- err = -ENOMEM;
- goto out;
- }
-
- par->mode = &inf->modelist[0];
- if(inf->init_mode & INIT_MODE_ROTATED) {
- info->var.xres = par->mode->yres;
- info->var.yres = par->mode->xres;
- }
- else {
- info->var.xres = par->mode->xres;
- info->var.yres = par->mode->yres;
- }
-
- if(inf->init_mode &= INIT_MODE_FLIPPED)
- par->flip = 1;
- else
- par->flip = 0;
-
- info->var.xres_virtual = info->var.xres;
- info->var.yres_virtual = info->var.yres;
- info->var.pixclock = 0x04; /* 171521; */
- info->var.sync = 0;
- info->var.grayscale = 0;
- info->var.xoffset = info->var.yoffset = 0;
- info->var.accel_flags = 0;
- info->var.activate = FB_ACTIVATE_NOW;
-
- w100_hw_init(par);
-
- if (w100fb_check_var(&info->var, info) < 0) {
- err = -EINVAL;
- goto out;
- }
-
- if (register_framebuffer(info) < 0) {
- err = -EINVAL;
- goto out;
- }
-
- fb_info(info, "%s frame buffer device\n", info->fix.id);
- return 0;
-out:
- if (info) {
- fb_dealloc_cmap(&info->cmap);
- kfree(info->pseudo_palette);
- }
- if (remapped_fbuf != NULL) {
- iounmap(remapped_fbuf);
- remapped_fbuf = NULL;
- }
- if (remapped_regs != NULL) {
- iounmap(remapped_regs);
- remapped_regs = NULL;
- }
- if (remapped_base != NULL) {
- iounmap(remapped_base);
- remapped_base = NULL;
- }
- if (info)
- framebuffer_release(info);
- return err;
-}
-
-
-static int w100fb_remove(struct platform_device *pdev)
-{
- struct fb_info *info = platform_get_drvdata(pdev);
- struct w100fb_par *par=info->par;
-
- unregister_framebuffer(info);
-
- vfree(par->saved_intmem);
- vfree(par->saved_extmem);
- kfree(info->pseudo_palette);
- fb_dealloc_cmap(&info->cmap);
-
- iounmap(remapped_base);
- remapped_base = NULL;
- iounmap(remapped_regs);
- remapped_regs = NULL;
- iounmap(remapped_fbuf);
- remapped_fbuf = NULL;
-
- framebuffer_release(info);
-
- return 0;
-}
-
-
-/* ------------------- chipset specific functions -------------------------- */
-
-
-static void w100_soft_reset(void)
-{
- u16 val = readw((u16 __iomem *)remapped_base + cfgSTATUS);
-
- writew(val | 0x08, (u16 __iomem *)remapped_base + cfgSTATUS);
- udelay(100);
- writew(0x00, (u16 __iomem *)remapped_base + cfgSTATUS);
- udelay(100);
-}
-
-static void w100_update_disable(void)
-{
- union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl;
-
- /* Prevent display updates */
- disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e;
- disp_db_buf_wr_cntl.f.update_db_buf = 0;
- disp_db_buf_wr_cntl.f.en_db_buf = 0;
- writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
-}
-
-static void w100_update_enable(void)
-{
- union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl;
-
- /* Enable display updates */
- disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e;
- disp_db_buf_wr_cntl.f.update_db_buf = 1;
- disp_db_buf_wr_cntl.f.en_db_buf = 1;
- writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
-}
-
-unsigned long w100fb_gpio_read(int port)
-{
- unsigned long value;
-
- if (port==W100_GPIO_PORT_A)
- value = readl(remapped_regs + mmGPIO_DATA);
- else
- value = readl(remapped_regs + mmGPIO_DATA2);
-
- return value;
-}
-
-void w100fb_gpio_write(int port, unsigned long value)
-{
- if (port==W100_GPIO_PORT_A)
- writel(value, remapped_regs + mmGPIO_DATA);
- else
- writel(value, remapped_regs + mmGPIO_DATA2);
-}
-EXPORT_SYMBOL(w100fb_gpio_read);
-EXPORT_SYMBOL(w100fb_gpio_write);
-
-/*
- * Initialization of critical w100 hardware
- */
-static void w100_hw_init(struct w100fb_par *par)
-{
- u32 temp32;
- union cif_cntl_u cif_cntl;
- union intf_cntl_u intf_cntl;
- union cfgreg_base_u cfgreg_base;
- union wrap_top_dir_u wrap_top_dir;
- union cif_read_dbg_u cif_read_dbg;
- union cpu_defaults_u cpu_default;
- union cif_write_dbg_u cif_write_dbg;
- union wrap_start_dir_u wrap_start_dir;
- union cif_io_u cif_io;
- struct w100_gpio_regs *gpio = par->mach->gpio;
-
- w100_soft_reset();
-
- /* This is what the fpga_init code does on reset. May be wrong
- but there is little info available */
- writel(0x31, remapped_regs + mmSCRATCH_UMSK);
- for (temp32 = 0; temp32 < 10000; temp32++)
- readl(remapped_regs + mmSCRATCH_UMSK);
- writel(0x30, remapped_regs + mmSCRATCH_UMSK);
-
- /* Set up CIF */
- cif_io.val = defCIF_IO;
- writel((u32)(cif_io.val), remapped_regs + mmCIF_IO);
-
- cif_write_dbg.val = readl(remapped_regs + mmCIF_WRITE_DBG);
- cif_write_dbg.f.dis_packer_ful_during_rbbm_timeout = 0;
- cif_write_dbg.f.en_dword_split_to_rbbm = 1;
- cif_write_dbg.f.dis_timeout_during_rbbm = 1;
- writel((u32) (cif_write_dbg.val), remapped_regs + mmCIF_WRITE_DBG);
-
- cif_read_dbg.val = readl(remapped_regs + mmCIF_READ_DBG);
- cif_read_dbg.f.dis_rd_same_byte_to_trig_fetch = 1;
- writel((u32) (cif_read_dbg.val), remapped_regs + mmCIF_READ_DBG);
-
- cif_cntl.val = readl(remapped_regs + mmCIF_CNTL);
- cif_cntl.f.dis_system_bits = 1;
- cif_cntl.f.dis_mr = 1;
- cif_cntl.f.en_wait_to_compensate_dq_prop_dly = 0;
- cif_cntl.f.intb_oe = 1;
- cif_cntl.f.interrupt_active_high = 1;
- writel((u32) (cif_cntl.val), remapped_regs + mmCIF_CNTL);
-
- /* Setup cfgINTF_CNTL and cfgCPU defaults */
- intf_cntl.val = defINTF_CNTL;
- intf_cntl.f.ad_inc_a = 1;
- intf_cntl.f.ad_inc_b = 1;
- intf_cntl.f.rd_data_rdy_a = 0;
- intf_cntl.f.rd_data_rdy_b = 0;
- writeb((u8) (intf_cntl.val), remapped_base + cfgINTF_CNTL);
-
- cpu_default.val = defCPU_DEFAULTS;
- cpu_default.f.access_ind_addr_a = 1;
- cpu_default.f.access_ind_addr_b = 1;
- cpu_default.f.access_scratch_reg = 1;
- cpu_default.f.transition_size = 0;
- writeb((u8) (cpu_default.val), remapped_base + cfgCPU_DEFAULTS);
-
- /* set up the apertures */
- writeb((u8) (W100_REG_BASE >> 16), remapped_base + cfgREG_BASE);
-
- cfgreg_base.val = defCFGREG_BASE;
- cfgreg_base.f.cfgreg_base = W100_CFG_BASE;
- writel((u32) (cfgreg_base.val), remapped_regs + mmCFGREG_BASE);
-
- wrap_start_dir.val = defWRAP_START_DIR;
- wrap_start_dir.f.start_addr = WRAP_BUF_BASE_VALUE >> 1;
- writel((u32) (wrap_start_dir.val), remapped_regs + mmWRAP_START_DIR);
-
- wrap_top_dir.val = defWRAP_TOP_DIR;
- wrap_top_dir.f.top_addr = WRAP_BUF_TOP_VALUE >> 1;
- writel((u32) (wrap_top_dir.val), remapped_regs + mmWRAP_TOP_DIR);
-
- writel((u32) 0x2440, remapped_regs + mmRBBM_CNTL);
-
- /* Set the hardware to 565 colour */
- temp32 = readl(remapped_regs + mmDISP_DEBUG2);
- temp32 &= 0xff7fffff;
- temp32 |= 0x00800000;
- writel(temp32, remapped_regs + mmDISP_DEBUG2);
-
- /* Initialise the GPIO lines */
- if (gpio) {
- writel(gpio->init_data1, remapped_regs + mmGPIO_DATA);
- writel(gpio->init_data2, remapped_regs + mmGPIO_DATA2);
- writel(gpio->gpio_dir1, remapped_regs + mmGPIO_CNTL1);
- writel(gpio->gpio_oe1, remapped_regs + mmGPIO_CNTL2);
- writel(gpio->gpio_dir2, remapped_regs + mmGPIO_CNTL3);
- writel(gpio->gpio_oe2, remapped_regs + mmGPIO_CNTL4);
- }
-}
-
-
-struct power_state {
- union clk_pin_cntl_u clk_pin_cntl;
- union pll_ref_fb_div_u pll_ref_fb_div;
- union pll_cntl_u pll_cntl;
- union sclk_cntl_u sclk_cntl;
- union pclk_cntl_u pclk_cntl;
- union pwrmgt_cntl_u pwrmgt_cntl;
- int auto_mode; /* system clock auto changing? */
-};
-
-
-static struct power_state w100_pwr_state;
-
-/* The PLL Fout is determined by (XtalFreq/(M+1)) * ((N_int+1) + (N_fac/8)) */
-
-/* 12.5MHz Crystal PLL Table */
-static struct w100_pll_info xtal_12500000[] = {
- /*freq M N_int N_fac tfgoal lock_time */
- { 50, 0, 1, 0, 0xe0, 56}, /* 50.00 MHz */
- { 75, 0, 5, 0, 0xde, 37}, /* 75.00 MHz */
- {100, 0, 7, 0, 0xe0, 28}, /* 100.00 MHz */
- {125, 0, 9, 0, 0xe0, 22}, /* 125.00 MHz */
- {150, 0, 11, 0, 0xe0, 17}, /* 150.00 MHz */
- { 0, 0, 0, 0, 0, 0}, /* Terminator */
-};
-
-/* 14.318MHz Crystal PLL Table */
-static struct w100_pll_info xtal_14318000[] = {
- /*freq M N_int N_fac tfgoal lock_time */
- { 40, 4, 13, 0, 0xe0, 80}, /* tfgoal guessed */
- { 50, 1, 6, 0, 0xe0, 64}, /* 50.05 MHz */
- { 57, 2, 11, 0, 0xe0, 53}, /* tfgoal guessed */
- { 75, 0, 4, 3, 0xe0, 43}, /* 75.08 MHz */
- {100, 0, 6, 0, 0xe0, 32}, /* 100.10 MHz */
- { 0, 0, 0, 0, 0, 0},
-};
-
-/* 16MHz Crystal PLL Table */
-static struct w100_pll_info xtal_16000000[] = {
- /*freq M N_int N_fac tfgoal lock_time */
- { 72, 1, 8, 0, 0xe0, 48}, /* tfgoal guessed */
- { 80, 1, 9, 0, 0xe0, 13}, /* tfgoal guessed */
- { 95, 1, 10, 7, 0xe0, 38}, /* tfgoal guessed */
- { 96, 1, 11, 0, 0xe0, 36}, /* tfgoal guessed */
- { 0, 0, 0, 0, 0, 0},
-};
-
-static struct pll_entries {
- int xtal_freq;
- struct w100_pll_info *pll_table;
-} w100_pll_tables[] = {
- { 12500000, &xtal_12500000[0] },
- { 14318000, &xtal_14318000[0] },
- { 16000000, &xtal_16000000[0] },
- { 0 },
-};
-
-struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
-{
- struct pll_entries *pll_entry = w100_pll_tables;
-
- do {
- if (freq == pll_entry->xtal_freq)
- return pll_entry->pll_table;
- pll_entry++;
- } while (pll_entry->xtal_freq);
-
- return NULL;
-}
-
-
-static unsigned int w100_get_testcount(unsigned int testclk_sel)
-{
- union clk_test_cntl_u clk_test_cntl;
-
- udelay(5);
-
- /* Select the test clock source and reset */
- clk_test_cntl.f.start_check_freq = 0x0;
- clk_test_cntl.f.testclk_sel = testclk_sel;
- clk_test_cntl.f.tstcount_rst = 0x1; /* set reset */
- writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
-
- clk_test_cntl.f.tstcount_rst = 0x0; /* clear reset */
- writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
-
- /* Run clock test */
- clk_test_cntl.f.start_check_freq = 0x1;
- writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
-
- /* Give the test time to complete */
- udelay(20);
-
- /* Return the result */
- clk_test_cntl.val = readl(remapped_regs + mmCLK_TEST_CNTL);
- clk_test_cntl.f.start_check_freq = 0x0;
- writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
-
- return clk_test_cntl.f.test_count;
-}
-
-
-static int w100_pll_adjust(struct w100_pll_info *pll)
-{
- unsigned int tf80;
- unsigned int tf20;
-
- /* Initial Settings */
- w100_pwr_state.pll_cntl.f.pll_pwdn = 0x0; /* power down */
- w100_pwr_state.pll_cntl.f.pll_reset = 0x0; /* not reset */
- w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x1; /* Hi-Z */
- w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; /* VCO gain = 0 */
- w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0; /* VCO frequency range control = off */
- w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; /* current offset inside VCO = 0 */
- w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0;
-
- /* Wai Ming 80 percent of VDD 1.3V gives 1.04V, minimum operating voltage is 1.08V
- * therefore, commented out the following lines
- * tf80 meant tf100
- */
- do {
- /* set VCO input = 0.8 * VDD */
- w100_pwr_state.pll_cntl.f.pll_dactal = 0xd;
- writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
-
- tf80 = w100_get_testcount(TESTCLK_SRC_PLL);
- if (tf80 >= (pll->tfgoal)) {
- /* set VCO input = 0.2 * VDD */
- w100_pwr_state.pll_cntl.f.pll_dactal = 0x7;
- writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
-
- tf20 = w100_get_testcount(TESTCLK_SRC_PLL);
- if (tf20 <= (pll->tfgoal))
- return 1; /* Success */
-
- if ((w100_pwr_state.pll_cntl.f.pll_vcofr == 0x0) &&
- ((w100_pwr_state.pll_cntl.f.pll_pvg == 0x7) ||
- (w100_pwr_state.pll_cntl.f.pll_ioffset == 0x0))) {
- /* slow VCO config */
- w100_pwr_state.pll_cntl.f.pll_vcofr = 0x1;
- w100_pwr_state.pll_cntl.f.pll_pvg = 0x0;
- w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
- continue;
- }
- }
- if ((w100_pwr_state.pll_cntl.f.pll_ioffset) < 0x3) {
- w100_pwr_state.pll_cntl.f.pll_ioffset += 0x1;
- } else if ((w100_pwr_state.pll_cntl.f.pll_pvg) < 0x7) {
- w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
- w100_pwr_state.pll_cntl.f.pll_pvg += 0x1;
- } else {
- return 0; /* Error */
- }
- } while(1);
-}
-
-
-/*
- * w100_pll_calibration
- */
-static int w100_pll_calibration(struct w100_pll_info *pll)
-{
- int status;
-
- status = w100_pll_adjust(pll);
-
- /* PLL Reset And Lock */
- /* set VCO input = 0.5 * VDD */
- w100_pwr_state.pll_cntl.f.pll_dactal = 0xa;
- writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
-
- udelay(1); /* reset time */
-
- /* enable charge pump */
- w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; /* normal */
- writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
-
- /* set VCO input = Hi-Z, disable DAC */
- w100_pwr_state.pll_cntl.f.pll_dactal = 0x0;
- writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
-
- udelay(400); /* lock time */
-
- /* PLL locked */
-
- return status;
-}
-
-
-static int w100_pll_set_clk(struct w100_pll_info *pll)
-{
- int status;
-
- if (w100_pwr_state.auto_mode == 1) /* auto mode */
- {
- w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; /* disable fast to normal */
- w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; /* disable normal to fast */
- writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
- }
-
- /* Set system clock source to XTAL whilst adjusting the PLL! */
- w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL;
- writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
-
- w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = pll->M;
- w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = pll->N_int;
- w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = pll->N_fac;
- w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = pll->lock_time;
- writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV);
-
- w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0;
- writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
-
- status = w100_pll_calibration(pll);
-
- if (w100_pwr_state.auto_mode == 1) /* auto mode */
- {
- w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x1; /* reenable fast to normal */
- w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x1; /* reenable normal to fast */
- writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
- }
- return status;
-}
-
-/* freq = target frequency of the PLL */
-static int w100_set_pll_freq(struct w100fb_par *par, unsigned int freq)
-{
- struct w100_pll_info *pll = par->pll_table;
-
- do {
- if (freq == pll->freq) {
- return w100_pll_set_clk(pll);
- }
- pll++;
- } while(pll->freq);
- return 0;
-}
-
-/* Set up an initial state. Some values/fields set
- here will be overwritten. */
-static void w100_pwm_setup(struct w100fb_par *par)
-{
- w100_pwr_state.clk_pin_cntl.f.osc_en = 0x1;
- w100_pwr_state.clk_pin_cntl.f.osc_gain = 0x1f;
- w100_pwr_state.clk_pin_cntl.f.dont_use_xtalin = 0x0;
- w100_pwr_state.clk_pin_cntl.f.xtalin_pm_en = 0x0;
- w100_pwr_state.clk_pin_cntl.f.xtalin_dbl_en = par->mach->xtal_dbl ? 1 : 0;
- w100_pwr_state.clk_pin_cntl.f.cg_debug = 0x0;
- writel((u32) (w100_pwr_state.clk_pin_cntl.val), remapped_regs + mmCLK_PIN_CNTL);
-
- w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL;
- w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = 0x0; /* Pfast = 1 */
- w100_pwr_state.sclk_cntl.f.sclk_clkon_hys = 0x3;
- w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = 0x0; /* Pslow = 1 */
- w100_pwr_state.sclk_cntl.f.disp_cg_ok2switch_en = 0x0;
- w100_pwr_state.sclk_cntl.f.sclk_force_reg = 0x0; /* Dynamic */
- w100_pwr_state.sclk_cntl.f.sclk_force_disp = 0x0; /* Dynamic */
- w100_pwr_state.sclk_cntl.f.sclk_force_mc = 0x0; /* Dynamic */
- w100_pwr_state.sclk_cntl.f.sclk_force_extmc = 0x0; /* Dynamic */
- w100_pwr_state.sclk_cntl.f.sclk_force_cp = 0x0; /* Dynamic */
- w100_pwr_state.sclk_cntl.f.sclk_force_e2 = 0x0; /* Dynamic */
- w100_pwr_state.sclk_cntl.f.sclk_force_e3 = 0x0; /* Dynamic */
- w100_pwr_state.sclk_cntl.f.sclk_force_idct = 0x0; /* Dynamic */
- w100_pwr_state.sclk_cntl.f.sclk_force_bist = 0x0; /* Dynamic */
- w100_pwr_state.sclk_cntl.f.busy_extend_cp = 0x0;
- w100_pwr_state.sclk_cntl.f.busy_extend_e2 = 0x0;
- w100_pwr_state.sclk_cntl.f.busy_extend_e3 = 0x0;
- w100_pwr_state.sclk_cntl.f.busy_extend_idct = 0x0;
- writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
-
- w100_pwr_state.pclk_cntl.f.pclk_src_sel = CLK_SRC_XTAL;
- w100_pwr_state.pclk_cntl.f.pclk_post_div = 0x1; /* P = 2 */
- w100_pwr_state.pclk_cntl.f.pclk_force_disp = 0x0; /* Dynamic */
- writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL);
-
- w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = 0x0; /* M = 1 */
- w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = 0x0; /* N = 1.0 */
- w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = 0x0;
- w100_pwr_state.pll_ref_fb_div.f.pll_reset_time = 0x5;
- w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = 0xff;
- writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV);
-
- w100_pwr_state.pll_cntl.f.pll_pwdn = 0x1;
- w100_pwr_state.pll_cntl.f.pll_reset = 0x1;
- w100_pwr_state.pll_cntl.f.pll_pm_en = 0x0;
- w100_pwr_state.pll_cntl.f.pll_mode = 0x0; /* uses VCO clock */
- w100_pwr_state.pll_cntl.f.pll_refclk_sel = 0x0;
- w100_pwr_state.pll_cntl.f.pll_fbclk_sel = 0x0;
- w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0;
- w100_pwr_state.pll_cntl.f.pll_pcp = 0x4;
- w100_pwr_state.pll_cntl.f.pll_pvg = 0x0;
- w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0;
- w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
- w100_pwr_state.pll_cntl.f.pll_pecc_mode = 0x0;
- w100_pwr_state.pll_cntl.f.pll_pecc_scon = 0x0;
- w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; /* Hi-Z */
- w100_pwr_state.pll_cntl.f.pll_cp_clip = 0x3;
- w100_pwr_state.pll_cntl.f.pll_conf = 0x2;
- w100_pwr_state.pll_cntl.f.pll_mbctrl = 0x2;
- w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0;
- writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
-
- w100_pwr_state.pwrmgt_cntl.f.pwm_enable = 0x0;
- w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0x1; /* normal mode (0, 1, 3) */
- w100_pwr_state.pwrmgt_cntl.f.pwm_wakeup_cond = 0x0;
- w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0;
- w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0;
- w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_cond = 0x1; /* PM4,ENG */
- w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_cond = 0x1; /* PM4,ENG */
- w100_pwr_state.pwrmgt_cntl.f.pwm_idle_timer = 0xFF;
- w100_pwr_state.pwrmgt_cntl.f.pwm_busy_timer = 0xFF;
- writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
-
- w100_pwr_state.auto_mode = 0; /* manual mode */
-}
-
-
-/*
- * Setup the w100 clocks for the specified mode
- */
-static void w100_init_clocks(struct w100fb_par *par)
-{
- struct w100_mode *mode = par->mode;
-
- if (mode->pixclk_src == CLK_SRC_PLL || mode->sysclk_src == CLK_SRC_PLL)
- w100_set_pll_freq(par, (par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq);
-
- w100_pwr_state.sclk_cntl.f.sclk_src_sel = mode->sysclk_src;
- w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = mode->sysclk_divider;
- w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = mode->sysclk_divider;
- writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
-}
-
-static void w100_init_lcd(struct w100fb_par *par)
-{
- u32 temp32;
- struct w100_mode *mode = par->mode;
- struct w100_gen_regs *regs = par->mach->regs;
- union active_h_disp_u active_h_disp;
- union active_v_disp_u active_v_disp;
- union graphic_h_disp_u graphic_h_disp;
- union graphic_v_disp_u graphic_v_disp;
- union crtc_total_u crtc_total;
-
- /* w3200 doesn't like undefined bits being set so zero register values first */
-
- active_h_disp.val = 0;
- active_h_disp.f.active_h_start=mode->left_margin;
- active_h_disp.f.active_h_end=mode->left_margin + mode->xres;
- writel(active_h_disp.val, remapped_regs + mmACTIVE_H_DISP);
-
- active_v_disp.val = 0;
- active_v_disp.f.active_v_start=mode->upper_margin;
- active_v_disp.f.active_v_end=mode->upper_margin + mode->yres;
- writel(active_v_disp.val, remapped_regs + mmACTIVE_V_DISP);
-
- graphic_h_disp.val = 0;
- graphic_h_disp.f.graphic_h_start=mode->left_margin;
- graphic_h_disp.f.graphic_h_end=mode->left_margin + mode->xres;
- writel(graphic_h_disp.val, remapped_regs + mmGRAPHIC_H_DISP);
-
- graphic_v_disp.val = 0;
- graphic_v_disp.f.graphic_v_start=mode->upper_margin;
- graphic_v_disp.f.graphic_v_end=mode->upper_margin + mode->yres;
- writel(graphic_v_disp.val, remapped_regs + mmGRAPHIC_V_DISP);
-
- crtc_total.val = 0;
- crtc_total.f.crtc_h_total=mode->left_margin + mode->xres + mode->right_margin;
- crtc_total.f.crtc_v_total=mode->upper_margin + mode->yres + mode->lower_margin;
- writel(crtc_total.val, remapped_regs + mmCRTC_TOTAL);
-
- writel(mode->crtc_ss, remapped_regs + mmCRTC_SS);
- writel(mode->crtc_ls, remapped_regs + mmCRTC_LS);
- writel(mode->crtc_gs, remapped_regs + mmCRTC_GS);
- writel(mode->crtc_vpos_gs, remapped_regs + mmCRTC_VPOS_GS);
- writel(mode->crtc_rev, remapped_regs + mmCRTC_REV);
- writel(mode->crtc_dclk, remapped_regs + mmCRTC_DCLK);
- writel(mode->crtc_gclk, remapped_regs + mmCRTC_GCLK);
- writel(mode->crtc_goe, remapped_regs + mmCRTC_GOE);
- writel(mode->crtc_ps1_active, remapped_regs + mmCRTC_PS1_ACTIVE);
-
- writel(regs->lcd_format, remapped_regs + mmLCD_FORMAT);
- writel(regs->lcdd_cntl1, remapped_regs + mmLCDD_CNTL1);
- writel(regs->lcdd_cntl2, remapped_regs + mmLCDD_CNTL2);
- writel(regs->genlcd_cntl1, remapped_regs + mmGENLCD_CNTL1);
- writel(regs->genlcd_cntl2, remapped_regs + mmGENLCD_CNTL2);
- writel(regs->genlcd_cntl3, remapped_regs + mmGENLCD_CNTL3);
-
- writel(0x00000000, remapped_regs + mmCRTC_FRAME);
- writel(0x00000000, remapped_regs + mmCRTC_FRAME_VPOS);
- writel(0x00000000, remapped_regs + mmCRTC_DEFAULT_COUNT);
- writel(0x0000FF00, remapped_regs + mmLCD_BACKGROUND_COLOR);
-
- /* Hack for overlay in ext memory */
- temp32 = readl(remapped_regs + mmDISP_DEBUG2);
- temp32 |= 0xc0000000;
- writel(temp32, remapped_regs + mmDISP_DEBUG2);
-}
-
-
-static void w100_setup_memory(struct w100fb_par *par)
-{
- union mc_ext_mem_location_u extmem_location;
- union mc_fb_location_u intmem_location;
- struct w100_mem_info *mem = par->mach->mem;
- struct w100_bm_mem_info *bm_mem = par->mach->bm_mem;
-
- if (!par->extmem_active) {
- w100_suspend(W100_SUSPEND_EXTMEM);
-
- /* Map Internal Memory at FB Base */
- intmem_location.f.mc_fb_start = W100_FB_BASE >> 8;
- intmem_location.f.mc_fb_top = (W100_FB_BASE+MEM_INT_SIZE) >> 8;
- writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION);
-
- /* Unmap External Memory - value is *probably* irrelevant but may have meaning
- to acceleration libraries */
- extmem_location.f.mc_ext_mem_start = MEM_EXT_BASE_VALUE >> 8;
- extmem_location.f.mc_ext_mem_top = (MEM_EXT_BASE_VALUE-1) >> 8;
- writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION);
- } else {
- /* Map Internal Memory to its default location */
- intmem_location.f.mc_fb_start = MEM_INT_BASE_VALUE >> 8;
- intmem_location.f.mc_fb_top = (MEM_INT_BASE_VALUE+MEM_INT_SIZE) >> 8;
- writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION);
-
- /* Map External Memory at FB Base */
- extmem_location.f.mc_ext_mem_start = W100_FB_BASE >> 8;
- extmem_location.f.mc_ext_mem_top = (W100_FB_BASE+par->mach->mem->size) >> 8;
- writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION);
-
- writel(0x00007800, remapped_regs + mmMC_BIST_CTRL);
- writel(mem->ext_cntl, remapped_regs + mmMEM_EXT_CNTL);
- writel(0x00200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
- udelay(100);
- writel(0x80200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
- udelay(100);
- writel(mem->sdram_mode_reg, remapped_regs + mmMEM_SDRAM_MODE_REG);
- udelay(100);
- writel(mem->ext_timing_cntl, remapped_regs + mmMEM_EXT_TIMING_CNTL);
- writel(mem->io_cntl, remapped_regs + mmMEM_IO_CNTL);
- if (bm_mem) {
- writel(bm_mem->ext_mem_bw, remapped_regs + mmBM_EXT_MEM_BANDWIDTH);
- writel(bm_mem->offset, remapped_regs + mmBM_OFFSET);
- writel(bm_mem->ext_timing_ctl, remapped_regs + mmBM_MEM_EXT_TIMING_CNTL);
- writel(bm_mem->ext_cntl, remapped_regs + mmBM_MEM_EXT_CNTL);
- writel(bm_mem->mode_reg, remapped_regs + mmBM_MEM_MODE_REG);
- writel(bm_mem->io_cntl, remapped_regs + mmBM_MEM_IO_CNTL);
- writel(bm_mem->config, remapped_regs + mmBM_CONFIG);
- }
- }
-}
-
-static void w100_set_dispregs(struct w100fb_par *par)
-{
- unsigned long rot=0, divider, offset=0;
- union graphic_ctrl_u graphic_ctrl;
-
- /* See if the mode has been rotated */
- if (par->xres == par->mode->xres) {
- if (par->flip) {
- rot=3; /* 180 degree */
- offset=(par->xres * par->yres) - 1;
- } /* else 0 degree */
- divider = par->mode->pixclk_divider;
- } else {
- if (par->flip) {
- rot=2; /* 270 degree */
- offset=par->xres - 1;
- } else {
- rot=1; /* 90 degree */
- offset=par->xres * (par->yres - 1);
- }
- divider = par->mode->pixclk_divider_rotated;
- }
-
- graphic_ctrl.val = 0; /* w32xx doesn't like undefined bits */
- switch (par->chip_id) {
- case CHIP_ID_W100:
- graphic_ctrl.f_w100.color_depth=6;
- graphic_ctrl.f_w100.en_crtc=1;
- graphic_ctrl.f_w100.en_graphic_req=1;
- graphic_ctrl.f_w100.en_graphic_crtc=1;
- graphic_ctrl.f_w100.lcd_pclk_on=1;
- graphic_ctrl.f_w100.lcd_sclk_on=1;
- graphic_ctrl.f_w100.low_power_on=0;
- graphic_ctrl.f_w100.req_freq=0;
- graphic_ctrl.f_w100.portrait_mode=rot;
-
- /* Zaurus needs this */
- switch(par->xres) {
- case 240:
- case 320:
- default:
- graphic_ctrl.f_w100.total_req_graphic=0xa0;
- break;
- case 480:
- case 640:
- switch(rot) {
- case 0: /* 0 */
- case 3: /* 180 */
- graphic_ctrl.f_w100.low_power_on=1;
- graphic_ctrl.f_w100.req_freq=5;
- break;
- case 1: /* 90 */
- case 2: /* 270 */
- graphic_ctrl.f_w100.req_freq=4;
- break;
- default:
- break;
- }
- graphic_ctrl.f_w100.total_req_graphic=0xf0;
- break;
- }
- break;
- case CHIP_ID_W3200:
- case CHIP_ID_W3220:
- graphic_ctrl.f_w32xx.color_depth=6;
- graphic_ctrl.f_w32xx.en_crtc=1;
- graphic_ctrl.f_w32xx.en_graphic_req=1;
- graphic_ctrl.f_w32xx.en_graphic_crtc=1;
- graphic_ctrl.f_w32xx.lcd_pclk_on=1;
- graphic_ctrl.f_w32xx.lcd_sclk_on=1;
- graphic_ctrl.f_w32xx.low_power_on=0;
- graphic_ctrl.f_w32xx.req_freq=0;
- graphic_ctrl.f_w32xx.total_req_graphic=par->mode->xres >> 1; /* panel xres, not mode */
- graphic_ctrl.f_w32xx.portrait_mode=rot;
- break;
- }
-
- /* Set the pixel clock source and divider */
- w100_pwr_state.pclk_cntl.f.pclk_src_sel = par->mode->pixclk_src;
- w100_pwr_state.pclk_cntl.f.pclk_post_div = divider;
- writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL);
-
- writel(graphic_ctrl.val, remapped_regs + mmGRAPHIC_CTRL);
- writel(W100_FB_BASE + ((offset * BITS_PER_PIXEL/8)&~0x03UL), remapped_regs + mmGRAPHIC_OFFSET);
- writel((par->xres*BITS_PER_PIXEL/8), remapped_regs + mmGRAPHIC_PITCH);
-}
-
-
-/*
- * Work out how long the sync pulse lasts
- * Value is 1/(time in seconds)
- */
-static void calc_hsync(struct w100fb_par *par)
-{
- unsigned long hsync;
- struct w100_mode *mode = par->mode;
- union crtc_ss_u crtc_ss;
-
- if (mode->pixclk_src == CLK_SRC_XTAL)
- hsync=par->mach->xtal_freq;
- else
- hsync=((par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq)*100000;
-
- hsync /= (w100_pwr_state.pclk_cntl.f.pclk_post_div + 1);
-
- crtc_ss.val = readl(remapped_regs + mmCRTC_SS);
- if (crtc_ss.val)
- par->hsync_len = hsync / (crtc_ss.f.ss_end-crtc_ss.f.ss_start);
- else
- par->hsync_len = 0;
-}
-
-static void w100_suspend(u32 mode)
-{
- u32 val;
-
- writel(0x7FFF8000, remapped_regs + mmMC_EXT_MEM_LOCATION);
- writel(0x00FF0000, remapped_regs + mmMC_PERF_MON_CNTL);
-
- val = readl(remapped_regs + mmMEM_EXT_TIMING_CNTL);
- val &= ~(0x00100000); /* bit20=0 */
- val |= 0xFF000000; /* bit31:24=0xff */
- writel(val, remapped_regs + mmMEM_EXT_TIMING_CNTL);
-
- val = readl(remapped_regs + mmMEM_EXT_CNTL);
- val &= ~(0x00040000); /* bit18=0 */
- val |= 0x00080000; /* bit19=1 */
- writel(val, remapped_regs + mmMEM_EXT_CNTL);
-
- udelay(1); /* wait 1us */
-
- if (mode == W100_SUSPEND_EXTMEM) {
- /* CKE: Tri-State */
- val = readl(remapped_regs + mmMEM_EXT_CNTL);
- val |= 0x40000000; /* bit30=1 */
- writel(val, remapped_regs + mmMEM_EXT_CNTL);
-
- /* CLK: Stop */
- val = readl(remapped_regs + mmMEM_EXT_CNTL);
- val &= ~(0x00000001); /* bit0=0 */
- writel(val, remapped_regs + mmMEM_EXT_CNTL);
- } else {
- writel(0x00000000, remapped_regs + mmSCLK_CNTL);
- writel(0x000000BF, remapped_regs + mmCLK_PIN_CNTL);
- writel(0x00000015, remapped_regs + mmPWRMGT_CNTL);
-
- udelay(5);
-
- val = readl(remapped_regs + mmPLL_CNTL);
- val |= 0x00000004; /* bit2=1 */
- writel(val, remapped_regs + mmPLL_CNTL);
-
- writel(0x00000000, remapped_regs + mmLCDD_CNTL1);
- writel(0x00000000, remapped_regs + mmLCDD_CNTL2);
- writel(0x00000000, remapped_regs + mmGENLCD_CNTL1);
- writel(0x00000000, remapped_regs + mmGENLCD_CNTL2);
- writel(0x00000000, remapped_regs + mmGENLCD_CNTL3);
-
- val = readl(remapped_regs + mmMEM_EXT_CNTL);
- val |= 0xF0000000;
- val &= ~(0x00000001);
- writel(val, remapped_regs + mmMEM_EXT_CNTL);
-
- writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL);
- }
-}
-
-static void w100_vsync(void)
-{
- u32 tmp;
- int timeout = 30000; /* VSync timeout = 30[ms] > 16.8[ms] */
-
- tmp = readl(remapped_regs + mmACTIVE_V_DISP);
-
- /* set vline pos */
- writel((tmp >> 16) & 0x3ff, remapped_regs + mmDISP_INT_CNTL);
-
- /* disable vline irq */
- tmp = readl(remapped_regs + mmGEN_INT_CNTL);
-
- tmp &= ~0x00000002;
- writel(tmp, remapped_regs + mmGEN_INT_CNTL);
-
- /* clear vline irq status */
- writel(0x00000002, remapped_regs + mmGEN_INT_STATUS);
-
- /* enable vline irq */
- writel((tmp | 0x00000002), remapped_regs + mmGEN_INT_CNTL);
-
- /* clear vline irq status */
- writel(0x00000002, remapped_regs + mmGEN_INT_STATUS);
-
- while(timeout > 0) {
- if (readl(remapped_regs + mmGEN_INT_STATUS) & 0x00000002)
- break;
- udelay(1);
- timeout--;
- }
-
- /* disable vline irq */
- writel(tmp, remapped_regs + mmGEN_INT_CNTL);
-
- /* clear vline irq status */
- writel(0x00000002, remapped_regs + mmGEN_INT_STATUS);
-}
-
-static struct platform_driver w100fb_driver = {
- .probe = w100fb_probe,
- .remove = w100fb_remove,
- .suspend = w100fb_suspend,
- .resume = w100fb_resume,
- .driver = {
- .name = "w100fb",
- .dev_groups = w100fb_groups,
- },
-};
-
-module_platform_driver(w100fb_driver);
-
-MODULE_DESCRIPTION("ATI Imageon w100 framebuffer driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/w100fb.h b/drivers/video/fbdev/w100fb.h
deleted file mode 100644
index 52c96d155b4c..000000000000
--- a/drivers/video/fbdev/w100fb.h
+++ /dev/null
@@ -1,924 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * linux/drivers/video/w100fb.h
- *
- * Frame Buffer Device for ATI w100 (Wallaby)
- *
- * Copyright (C) 2002, ATI Corp.
- * Copyright (C) 2004-2005 Richard Purdie
- * Copyright (c) 2005 Ian Molton <spyro@f2s.com>
- *
- * Modified to work with 2.6 by Richard Purdie <rpurdie@rpsys.net>
- *
- * w32xx support by Ian Molton
- */
-
-#if !defined (_W100FB_H)
-#define _W100FB_H
-
-/* Block CIF Start: */
-#define mmCHIP_ID 0x0000
-#define mmREVISION_ID 0x0004
-#define mmWRAP_BUF_A 0x0008
-#define mmWRAP_BUF_B 0x000C
-#define mmWRAP_TOP_DIR 0x0010
-#define mmWRAP_START_DIR 0x0014
-#define mmCIF_CNTL 0x0018
-#define mmCFGREG_BASE 0x001C
-#define mmCIF_IO 0x0020
-#define mmCIF_READ_DBG 0x0024
-#define mmCIF_WRITE_DBG 0x0028
-#define cfgIND_ADDR_A_0 0x0000
-#define cfgIND_ADDR_A_1 0x0001
-#define cfgIND_ADDR_A_2 0x0002
-#define cfgIND_DATA_A 0x0003
-#define cfgREG_BASE 0x0004
-#define cfgINTF_CNTL 0x0005
-#define cfgSTATUS 0x0006
-#define cfgCPU_DEFAULTS 0x0007
-#define cfgIND_ADDR_B_0 0x0008
-#define cfgIND_ADDR_B_1 0x0009
-#define cfgIND_ADDR_B_2 0x000A
-#define cfgIND_DATA_B 0x000B
-#define cfgPM4_RPTR 0x000C
-#define cfgSCRATCH 0x000D
-#define cfgPM4_WRPTR_0 0x000E
-#define cfgPM4_WRPTR_1 0x000F
-/* Block CIF End: */
-
-/* Block CP Start: */
-#define mmSCRATCH_UMSK 0x0280
-#define mmSCRATCH_ADDR 0x0284
-#define mmGEN_INT_CNTL 0x0200
-#define mmGEN_INT_STATUS 0x0204
-/* Block CP End: */
-
-/* Block DISPLAY Start: */
-#define mmLCD_FORMAT 0x0410
-#define mmGRAPHIC_CTRL 0x0414
-#define mmGRAPHIC_OFFSET 0x0418
-#define mmGRAPHIC_PITCH 0x041C
-#define mmCRTC_TOTAL 0x0420
-#define mmACTIVE_H_DISP 0x0424
-#define mmACTIVE_V_DISP 0x0428
-#define mmGRAPHIC_H_DISP 0x042C
-#define mmGRAPHIC_V_DISP 0x0430
-#define mmVIDEO_CTRL 0x0434
-#define mmGRAPHIC_KEY 0x0438
-#define mmBRIGHTNESS_CNTL 0x045C
-#define mmDISP_INT_CNTL 0x0488
-#define mmCRTC_SS 0x048C
-#define mmCRTC_LS 0x0490
-#define mmCRTC_REV 0x0494
-#define mmCRTC_DCLK 0x049C
-#define mmCRTC_GS 0x04A0
-#define mmCRTC_VPOS_GS 0x04A4
-#define mmCRTC_GCLK 0x04A8
-#define mmCRTC_GOE 0x04AC
-#define mmCRTC_FRAME 0x04B0
-#define mmCRTC_FRAME_VPOS 0x04B4
-#define mmGPIO_DATA 0x04B8
-#define mmGPIO_CNTL1 0x04BC
-#define mmGPIO_CNTL2 0x04C0
-#define mmLCDD_CNTL1 0x04C4
-#define mmLCDD_CNTL2 0x04C8
-#define mmGENLCD_CNTL1 0x04CC
-#define mmGENLCD_CNTL2 0x04D0
-#define mmDISP_DEBUG 0x04D4
-#define mmDISP_DB_BUF_CNTL 0x04D8
-#define mmDISP_CRC_SIG 0x04DC
-#define mmCRTC_DEFAULT_COUNT 0x04E0
-#define mmLCD_BACKGROUND_COLOR 0x04E4
-#define mmCRTC_PS2 0x04E8
-#define mmCRTC_PS2_VPOS 0x04EC
-#define mmCRTC_PS1_ACTIVE 0x04F0
-#define mmCRTC_PS1_NACTIVE 0x04F4
-#define mmCRTC_GCLK_EXT 0x04F8
-#define mmCRTC_ALW 0x04FC
-#define mmCRTC_ALW_VPOS 0x0500
-#define mmCRTC_PSK 0x0504
-#define mmCRTC_PSK_HPOS 0x0508
-#define mmCRTC_CV4_START 0x050C
-#define mmCRTC_CV4_END 0x0510
-#define mmCRTC_CV4_HPOS 0x0514
-#define mmCRTC_ECK 0x051C
-#define mmREFRESH_CNTL 0x0520
-#define mmGENLCD_CNTL3 0x0524
-#define mmGPIO_DATA2 0x0528
-#define mmGPIO_CNTL3 0x052C
-#define mmGPIO_CNTL4 0x0530
-#define mmCHIP_STRAP 0x0534
-#define mmDISP_DEBUG2 0x0538
-#define mmDEBUG_BUS_CNTL 0x053C
-#define mmGAMMA_VALUE1 0x0540
-#define mmGAMMA_VALUE2 0x0544
-#define mmGAMMA_SLOPE 0x0548
-#define mmGEN_STATUS 0x054C
-#define mmHW_INT 0x0550
-/* Block DISPLAY End: */
-
-/* Block GFX Start: */
-#define mmDST_OFFSET 0x1004
-#define mmDST_PITCH 0x1008
-#define mmDST_Y_X 0x1038
-#define mmDST_WIDTH_HEIGHT 0x1198
-#define mmDP_GUI_MASTER_CNTL 0x106C
-#define mmBRUSH_OFFSET 0x108C
-#define mmBRUSH_Y_X 0x1074
-#define mmDP_BRUSH_FRGD_CLR 0x107C
-#define mmSRC_OFFSET 0x11AC
-#define mmSRC_PITCH 0x11B0
-#define mmSRC_Y_X 0x1034
-#define mmDEFAULT_PITCH_OFFSET 0x10A0
-#define mmDEFAULT_SC_BOTTOM_RIGHT 0x10A8
-#define mmDEFAULT2_SC_BOTTOM_RIGHT 0x10AC
-#define mmSC_TOP_LEFT 0x11BC
-#define mmSC_BOTTOM_RIGHT 0x11C0
-#define mmSRC_SC_BOTTOM_RIGHT 0x11C4
-#define mmGLOBAL_ALPHA 0x1210
-#define mmFILTER_COEF 0x1214
-#define mmMVC_CNTL_START 0x11E0
-#define mmE2_ARITHMETIC_CNTL 0x1220
-#define mmDP_CNTL 0x11C8
-#define mmDP_CNTL_DST_DIR 0x11CC
-#define mmDP_DATATYPE 0x12C4
-#define mmDP_MIX 0x12C8
-#define mmDP_WRITE_MSK 0x12CC
-#define mmENG_CNTL 0x13E8
-#define mmENG_PERF_CNT 0x13F0
-/* Block GFX End: */
-
-/* Block IDCT Start: */
-#define mmIDCT_RUNS 0x0C00
-#define mmIDCT_LEVELS 0x0C04
-#define mmIDCT_CONTROL 0x0C3C
-#define mmIDCT_AUTH_CONTROL 0x0C08
-#define mmIDCT_AUTH 0x0C0C
-/* Block IDCT End: */
-
-/* Block MC Start: */
-#define mmMEM_CNTL 0x0180
-#define mmMEM_ARB 0x0184
-#define mmMC_FB_LOCATION 0x0188
-#define mmMEM_EXT_CNTL 0x018C
-#define mmMC_EXT_MEM_LOCATION 0x0190
-#define mmMEM_EXT_TIMING_CNTL 0x0194
-#define mmMEM_SDRAM_MODE_REG 0x0198
-#define mmMEM_IO_CNTL 0x019C
-#define mmMC_DEBUG 0x01A0
-#define mmMC_BIST_CTRL 0x01A4
-#define mmMC_BIST_COLLAR_READ 0x01A8
-#define mmTC_MISMATCH 0x01AC
-#define mmMC_PERF_MON_CNTL 0x01B0
-#define mmMC_PERF_COUNTERS 0x01B4
-/* Block MC End: */
-
-/* Block BM Start: */
-#define mmBM_EXT_MEM_BANDWIDTH 0x0A00
-#define mmBM_OFFSET 0x0A04
-#define mmBM_MEM_EXT_TIMING_CNTL 0x0A08
-#define mmBM_MEM_EXT_CNTL 0x0A0C
-#define mmBM_MEM_MODE_REG 0x0A10
-#define mmBM_MEM_IO_CNTL 0x0A18
-#define mmBM_CONFIG 0x0A1C
-#define mmBM_STATUS 0x0A20
-#define mmBM_DEBUG 0x0A24
-#define mmBM_PERF_MON_CNTL 0x0A28
-#define mmBM_PERF_COUNTERS 0x0A2C
-#define mmBM_PERF2_MON_CNTL 0x0A30
-#define mmBM_PERF2_COUNTERS 0x0A34
-/* Block BM End: */
-
-/* Block RBBM Start: */
-#define mmWAIT_UNTIL 0x1400
-#define mmISYNC_CNTL 0x1404
-#define mmRBBM_STATUS 0x0140
-#define mmRBBM_CNTL 0x0144
-#define mmNQWAIT_UNTIL 0x0150
-/* Block RBBM End: */
-
-/* Block CG Start: */
-#define mmCLK_PIN_CNTL 0x0080
-#define mmPLL_REF_FB_DIV 0x0084
-#define mmPLL_CNTL 0x0088
-#define mmSCLK_CNTL 0x008C
-#define mmPCLK_CNTL 0x0090
-#define mmCLK_TEST_CNTL 0x0094
-#define mmPWRMGT_CNTL 0x0098
-#define mmPWRMGT_STATUS 0x009C
-/* Block CG End: */
-
-/* default value definitions */
-#define defWRAP_TOP_DIR 0x00000000
-#define defWRAP_START_DIR 0x00000000
-#define defCFGREG_BASE 0x00000000
-#define defCIF_IO 0x000C0902
-#define defINTF_CNTL 0x00000011
-#define defCPU_DEFAULTS 0x00000006
-#define defHW_INT 0x00000000
-#define defMC_EXT_MEM_LOCATION 0x07ff0000
-#define defTC_MISMATCH 0x00000000
-
-#define W100_CFG_BASE 0x0
-#define W100_CFG_LEN 0x10
-#define W100_REG_BASE 0x10000
-#define W100_REG_LEN 0x2000
-#define MEM_INT_BASE_VALUE 0x100000
-#define MEM_EXT_BASE_VALUE 0x800000
-#define MEM_INT_SIZE 0x05ffff
-#define MEM_WINDOW_BASE 0x100000
-#define MEM_WINDOW_SIZE 0xf00000
-
-#define WRAP_BUF_BASE_VALUE 0x80000
-#define WRAP_BUF_TOP_VALUE 0xbffff
-
-#define CHIP_ID_W100 0x57411002
-#define CHIP_ID_W3200 0x56441002
-#define CHIP_ID_W3220 0x57441002
-
-/* Register structure definitions */
-
-struct wrap_top_dir_t {
- u32 top_addr : 23;
- u32 : 9;
-} __attribute__((packed));
-
-union wrap_top_dir_u {
- u32 val : 32;
- struct wrap_top_dir_t f;
-} __attribute__((packed));
-
-struct wrap_start_dir_t {
- u32 start_addr : 23;
- u32 : 9;
-} __attribute__((packed));
-
-union wrap_start_dir_u {
- u32 val : 32;
- struct wrap_start_dir_t f;
-} __attribute__((packed));
-
-struct cif_cntl_t {
- u32 swap_reg : 2;
- u32 swap_fbuf_1 : 2;
- u32 swap_fbuf_2 : 2;
- u32 swap_fbuf_3 : 2;
- u32 pmi_int_disable : 1;
- u32 pmi_schmen_disable : 1;
- u32 intb_oe : 1;
- u32 en_wait_to_compensate_dq_prop_dly : 1;
- u32 compensate_wait_rd_size : 2;
- u32 wait_asserted_timeout_val : 2;
- u32 wait_masked_val : 2;
- u32 en_wait_timeout : 1;
- u32 en_one_clk_setup_before_wait : 1;
- u32 interrupt_active_high : 1;
- u32 en_overwrite_straps : 1;
- u32 strap_wait_active_hi : 1;
- u32 lat_busy_count : 2;
- u32 lat_rd_pm4_sclk_busy : 1;
- u32 dis_system_bits : 1;
- u32 dis_mr : 1;
- u32 cif_spare_1 : 4;
-} __attribute__((packed));
-
-union cif_cntl_u {
- u32 val : 32;
- struct cif_cntl_t f;
-} __attribute__((packed));
-
-struct cfgreg_base_t {
- u32 cfgreg_base : 24;
- u32 : 8;
-} __attribute__((packed));
-
-union cfgreg_base_u {
- u32 val : 32;
- struct cfgreg_base_t f;
-} __attribute__((packed));
-
-struct cif_io_t {
- u32 dq_srp : 1;
- u32 dq_srn : 1;
- u32 dq_sp : 4;
- u32 dq_sn : 4;
- u32 waitb_srp : 1;
- u32 waitb_srn : 1;
- u32 waitb_sp : 4;
- u32 waitb_sn : 4;
- u32 intb_srp : 1;
- u32 intb_srn : 1;
- u32 intb_sp : 4;
- u32 intb_sn : 4;
- u32 : 2;
-} __attribute__((packed));
-
-union cif_io_u {
- u32 val : 32;
- struct cif_io_t f;
-} __attribute__((packed));
-
-struct cif_read_dbg_t {
- u32 unpacker_pre_fetch_trig_gen : 2;
- u32 dly_second_rd_fetch_trig : 1;
- u32 rst_rd_burst_id : 1;
- u32 dis_rd_burst_id : 1;
- u32 en_block_rd_when_packer_is_not_emp : 1;
- u32 dis_pre_fetch_cntl_sm : 1;
- u32 rbbm_chrncy_dis : 1;
- u32 rbbm_rd_after_wr_lat : 2;
- u32 dis_be_during_rd : 1;
- u32 one_clk_invalidate_pulse : 1;
- u32 dis_chnl_priority : 1;
- u32 rst_read_path_a_pls : 1;
- u32 rst_read_path_b_pls : 1;
- u32 dis_reg_rd_fetch_trig : 1;
- u32 dis_rd_fetch_trig_from_ind_addr : 1;
- u32 dis_rd_same_byte_to_trig_fetch : 1;
- u32 dis_dir_wrap : 1;
- u32 dis_ring_buf_to_force_dec : 1;
- u32 dis_addr_comp_in_16bit : 1;
- u32 clr_w : 1;
- u32 err_rd_tag_is_3 : 1;
- u32 err_load_when_ful_a : 1;
- u32 err_load_when_ful_b : 1;
- u32 : 7;
-} __attribute__((packed));
-
-union cif_read_dbg_u {
- u32 val : 32;
- struct cif_read_dbg_t f;
-} __attribute__((packed));
-
-struct cif_write_dbg_t {
- u32 packer_timeout_count : 2;
- u32 en_upper_load_cond : 1;
- u32 en_chnl_change_cond : 1;
- u32 dis_addr_comp_cond : 1;
- u32 dis_load_same_byte_addr_cond : 1;
- u32 dis_timeout_cond : 1;
- u32 dis_timeout_during_rbbm : 1;
- u32 dis_packer_ful_during_rbbm_timeout : 1;
- u32 en_dword_split_to_rbbm : 1;
- u32 en_dummy_val : 1;
- u32 dummy_val_sel : 1;
- u32 mask_pm4_wrptr_dec : 1;
- u32 dis_mc_clean_cond : 1;
- u32 err_two_reqi_during_ful : 1;
- u32 err_reqi_during_idle_clk : 1;
- u32 err_global : 1;
- u32 en_wr_buf_dbg_load : 1;
- u32 en_wr_buf_dbg_path : 1;
- u32 sel_wr_buf_byte : 3;
- u32 dis_rd_flush_wr : 1;
- u32 dis_packer_ful_cond : 1;
- u32 dis_invalidate_by_ops_chnl : 1;
- u32 en_halt_when_reqi_err : 1;
- u32 cif_spare_2 : 5;
- u32 : 1;
-} __attribute__((packed));
-
-union cif_write_dbg_u {
- u32 val : 32;
- struct cif_write_dbg_t f;
-} __attribute__((packed));
-
-
-struct intf_cntl_t {
- unsigned char ad_inc_a : 1;
- unsigned char ring_buf_a : 1;
- unsigned char rd_fetch_trigger_a : 1;
- unsigned char rd_data_rdy_a : 1;
- unsigned char ad_inc_b : 1;
- unsigned char ring_buf_b : 1;
- unsigned char rd_fetch_trigger_b : 1;
- unsigned char rd_data_rdy_b : 1;
-} __attribute__((packed));
-
-union intf_cntl_u {
- unsigned char val : 8;
- struct intf_cntl_t f;
-} __attribute__((packed));
-
-struct cpu_defaults_t {
- unsigned char unpack_rd_data : 1;
- unsigned char access_ind_addr_a : 1;
- unsigned char access_ind_addr_b : 1;
- unsigned char access_scratch_reg : 1;
- unsigned char pack_wr_data : 1;
- unsigned char transition_size : 1;
- unsigned char en_read_buf_mode : 1;
- unsigned char rd_fetch_scratch : 1;
-} __attribute__((packed));
-
-union cpu_defaults_u {
- unsigned char val : 8;
- struct cpu_defaults_t f;
-} __attribute__((packed));
-
-struct crtc_total_t {
- u32 crtc_h_total : 10;
- u32 : 6;
- u32 crtc_v_total : 10;
- u32 : 6;
-} __attribute__((packed));
-
-union crtc_total_u {
- u32 val : 32;
- struct crtc_total_t f;
-} __attribute__((packed));
-
-struct crtc_ss_t {
- u32 ss_start : 10;
- u32 : 6;
- u32 ss_end : 10;
- u32 : 2;
- u32 ss_align : 1;
- u32 ss_pol : 1;
- u32 ss_run_mode : 1;
- u32 ss_en : 1;
-} __attribute__((packed));
-
-union crtc_ss_u {
- u32 val : 32;
- struct crtc_ss_t f;
-} __attribute__((packed));
-
-struct active_h_disp_t {
- u32 active_h_start : 10;
- u32 : 6;
- u32 active_h_end : 10;
- u32 : 6;
-} __attribute__((packed));
-
-union active_h_disp_u {
- u32 val : 32;
- struct active_h_disp_t f;
-} __attribute__((packed));
-
-struct active_v_disp_t {
- u32 active_v_start : 10;
- u32 : 6;
- u32 active_v_end : 10;
- u32 : 6;
-} __attribute__((packed));
-
-union active_v_disp_u {
- u32 val : 32;
- struct active_v_disp_t f;
-} __attribute__((packed));
-
-struct graphic_h_disp_t {
- u32 graphic_h_start : 10;
- u32 : 6;
- u32 graphic_h_end : 10;
- u32 : 6;
-} __attribute__((packed));
-
-union graphic_h_disp_u {
- u32 val : 32;
- struct graphic_h_disp_t f;
-} __attribute__((packed));
-
-struct graphic_v_disp_t {
- u32 graphic_v_start : 10;
- u32 : 6;
- u32 graphic_v_end : 10;
- u32 : 6;
-} __attribute__((packed));
-
-union graphic_v_disp_u{
- u32 val : 32;
- struct graphic_v_disp_t f;
-} __attribute__((packed));
-
-struct graphic_ctrl_t_w100 {
- u32 color_depth : 3;
- u32 portrait_mode : 2;
- u32 low_power_on : 1;
- u32 req_freq : 4;
- u32 en_crtc : 1;
- u32 en_graphic_req : 1;
- u32 en_graphic_crtc : 1;
- u32 total_req_graphic : 9;
- u32 lcd_pclk_on : 1;
- u32 lcd_sclk_on : 1;
- u32 pclk_running : 1;
- u32 sclk_running : 1;
- u32 : 6;
-} __attribute__((packed));
-
-struct graphic_ctrl_t_w32xx {
- u32 color_depth : 3;
- u32 portrait_mode : 2;
- u32 low_power_on : 1;
- u32 req_freq : 4;
- u32 en_crtc : 1;
- u32 en_graphic_req : 1;
- u32 en_graphic_crtc : 1;
- u32 total_req_graphic : 10;
- u32 lcd_pclk_on : 1;
- u32 lcd_sclk_on : 1;
- u32 pclk_running : 1;
- u32 sclk_running : 1;
- u32 : 5;
-} __attribute__((packed));
-
-union graphic_ctrl_u {
- u32 val : 32;
- struct graphic_ctrl_t_w100 f_w100;
- struct graphic_ctrl_t_w32xx f_w32xx;
-} __attribute__((packed));
-
-struct video_ctrl_t {
- u32 video_mode : 1;
- u32 keyer_en : 1;
- u32 en_video_req : 1;
- u32 en_graphic_req_video : 1;
- u32 en_video_crtc : 1;
- u32 video_hor_exp : 2;
- u32 video_ver_exp : 2;
- u32 uv_combine : 1;
- u32 total_req_video : 9;
- u32 video_ch_sel : 1;
- u32 video_portrait : 2;
- u32 yuv2rgb_en : 1;
- u32 yuv2rgb_option : 1;
- u32 video_inv_hor : 1;
- u32 video_inv_ver : 1;
- u32 gamma_sel : 2;
- u32 dis_limit : 1;
- u32 en_uv_hblend : 1;
- u32 rgb_gamma_sel : 2;
-} __attribute__((packed));
-
-union video_ctrl_u {
- u32 val : 32;
- struct video_ctrl_t f;
-} __attribute__((packed));
-
-struct disp_db_buf_cntl_rd_t {
- u32 en_db_buf : 1;
- u32 update_db_buf_done : 1;
- u32 db_buf_cntl : 6;
- u32 : 24;
-} __attribute__((packed));
-
-union disp_db_buf_cntl_rd_u {
- u32 val : 32;
- struct disp_db_buf_cntl_rd_t f;
-} __attribute__((packed));
-
-struct disp_db_buf_cntl_wr_t {
- u32 en_db_buf : 1;
- u32 update_db_buf : 1;
- u32 db_buf_cntl : 6;
- u32 : 24;
-} __attribute__((packed));
-
-union disp_db_buf_cntl_wr_u {
- u32 val : 32;
- struct disp_db_buf_cntl_wr_t f;
-} __attribute__((packed));
-
-struct gamma_value1_t {
- u32 gamma1 : 8;
- u32 gamma2 : 8;
- u32 gamma3 : 8;
- u32 gamma4 : 8;
-} __attribute__((packed));
-
-union gamma_value1_u {
- u32 val : 32;
- struct gamma_value1_t f;
-} __attribute__((packed));
-
-struct gamma_value2_t {
- u32 gamma5 : 8;
- u32 gamma6 : 8;
- u32 gamma7 : 8;
- u32 gamma8 : 8;
-} __attribute__((packed));
-
-union gamma_value2_u {
- u32 val : 32;
- struct gamma_value2_t f;
-} __attribute__((packed));
-
-struct gamma_slope_t {
- u32 slope1 : 3;
- u32 slope2 : 3;
- u32 slope3 : 3;
- u32 slope4 : 3;
- u32 slope5 : 3;
- u32 slope6 : 3;
- u32 slope7 : 3;
- u32 slope8 : 3;
- u32 : 8;
-} __attribute__((packed));
-
-union gamma_slope_u {
- u32 val : 32;
- struct gamma_slope_t f;
-} __attribute__((packed));
-
-struct mc_ext_mem_location_t {
- u32 mc_ext_mem_start : 16;
- u32 mc_ext_mem_top : 16;
-} __attribute__((packed));
-
-union mc_ext_mem_location_u {
- u32 val : 32;
- struct mc_ext_mem_location_t f;
-} __attribute__((packed));
-
-struct mc_fb_location_t {
- u32 mc_fb_start : 16;
- u32 mc_fb_top : 16;
-} __attribute__((packed));
-
-union mc_fb_location_u {
- u32 val : 32;
- struct mc_fb_location_t f;
-} __attribute__((packed));
-
-struct clk_pin_cntl_t {
- u32 osc_en : 1;
- u32 osc_gain : 5;
- u32 dont_use_xtalin : 1;
- u32 xtalin_pm_en : 1;
- u32 xtalin_dbl_en : 1;
- u32 : 7;
- u32 cg_debug : 16;
-} __attribute__((packed));
-
-union clk_pin_cntl_u {
- u32 val : 32;
- struct clk_pin_cntl_t f;
-} __attribute__((packed));
-
-struct pll_ref_fb_div_t {
- u32 pll_ref_div : 4;
- u32 : 4;
- u32 pll_fb_div_int : 6;
- u32 : 2;
- u32 pll_fb_div_frac : 3;
- u32 : 1;
- u32 pll_reset_time : 4;
- u32 pll_lock_time : 8;
-} __attribute__((packed));
-
-union pll_ref_fb_div_u {
- u32 val : 32;
- struct pll_ref_fb_div_t f;
-} __attribute__((packed));
-
-struct pll_cntl_t {
- u32 pll_pwdn : 1;
- u32 pll_reset : 1;
- u32 pll_pm_en : 1;
- u32 pll_mode : 1;
- u32 pll_refclk_sel : 1;
- u32 pll_fbclk_sel : 1;
- u32 pll_tcpoff : 1;
- u32 pll_pcp : 3;
- u32 pll_pvg : 3;
- u32 pll_vcofr : 1;
- u32 pll_ioffset : 2;
- u32 pll_pecc_mode : 2;
- u32 pll_pecc_scon : 2;
- u32 pll_dactal : 4;
- u32 pll_cp_clip : 2;
- u32 pll_conf : 3;
- u32 pll_mbctrl : 2;
- u32 pll_ring_off : 1;
-} __attribute__((packed));
-
-union pll_cntl_u {
- u32 val : 32;
- struct pll_cntl_t f;
-} __attribute__((packed));
-
-struct sclk_cntl_t {
- u32 sclk_src_sel : 2;
- u32 : 2;
- u32 sclk_post_div_fast : 4;
- u32 sclk_clkon_hys : 3;
- u32 sclk_post_div_slow : 4;
- u32 disp_cg_ok2switch_en : 1;
- u32 sclk_force_reg : 1;
- u32 sclk_force_disp : 1;
- u32 sclk_force_mc : 1;
- u32 sclk_force_extmc : 1;
- u32 sclk_force_cp : 1;
- u32 sclk_force_e2 : 1;
- u32 sclk_force_e3 : 1;
- u32 sclk_force_idct : 1;
- u32 sclk_force_bist : 1;
- u32 busy_extend_cp : 1;
- u32 busy_extend_e2 : 1;
- u32 busy_extend_e3 : 1;
- u32 busy_extend_idct : 1;
- u32 : 3;
-} __attribute__((packed));
-
-union sclk_cntl_u {
- u32 val : 32;
- struct sclk_cntl_t f;
-} __attribute__((packed));
-
-struct pclk_cntl_t {
- u32 pclk_src_sel : 2;
- u32 : 2;
- u32 pclk_post_div : 4;
- u32 : 8;
- u32 pclk_force_disp : 1;
- u32 : 15;
-} __attribute__((packed));
-
-union pclk_cntl_u {
- u32 val : 32;
- struct pclk_cntl_t f;
-} __attribute__((packed));
-
-
-#define TESTCLK_SRC_PLL 0x01
-#define TESTCLK_SRC_SCLK 0x02
-#define TESTCLK_SRC_PCLK 0x03
-/* 4 and 5 seem to by XTAL/M */
-#define TESTCLK_SRC_XTAL 0x06
-
-struct clk_test_cntl_t {
- u32 testclk_sel : 4;
- u32 : 3;
- u32 start_check_freq : 1;
- u32 tstcount_rst : 1;
- u32 : 15;
- u32 test_count : 8;
-} __attribute__((packed));
-
-union clk_test_cntl_u {
- u32 val : 32;
- struct clk_test_cntl_t f;
-} __attribute__((packed));
-
-struct pwrmgt_cntl_t {
- u32 pwm_enable : 1;
- u32 : 1;
- u32 pwm_mode_req : 2;
- u32 pwm_wakeup_cond : 2;
- u32 pwm_fast_noml_hw_en : 1;
- u32 pwm_noml_fast_hw_en : 1;
- u32 pwm_fast_noml_cond : 4;
- u32 pwm_noml_fast_cond : 4;
- u32 pwm_idle_timer : 8;
- u32 pwm_busy_timer : 8;
-} __attribute__((packed));
-
-union pwrmgt_cntl_u {
- u32 val : 32;
- struct pwrmgt_cntl_t f;
-} __attribute__((packed));
-
-#define SRC_DATATYPE_EQU_DST 3
-
-#define ROP3_SRCCOPY 0xcc
-#define ROP3_PATCOPY 0xf0
-
-#define GMC_BRUSH_SOLID_COLOR 13
-#define GMC_BRUSH_NONE 15
-
-#define DP_SRC_MEM_RECTANGULAR 2
-
-#define DP_OP_ROP 0
-
-struct dp_gui_master_cntl_t {
- u32 gmc_src_pitch_offset_cntl : 1;
- u32 gmc_dst_pitch_offset_cntl : 1;
- u32 gmc_src_clipping : 1;
- u32 gmc_dst_clipping : 1;
- u32 gmc_brush_datatype : 4;
- u32 gmc_dst_datatype : 4;
- u32 gmc_src_datatype : 3;
- u32 gmc_byte_pix_order : 1;
- u32 gmc_default_sel : 1;
- u32 gmc_rop3 : 8;
- u32 gmc_dp_src_source : 3;
- u32 gmc_clr_cmp_fcn_dis : 1;
- u32 : 1;
- u32 gmc_wr_msk_dis : 1;
- u32 gmc_dp_op : 1;
-} __attribute__((packed));
-
-union dp_gui_master_cntl_u {
- u32 val : 32;
- struct dp_gui_master_cntl_t f;
-} __attribute__((packed));
-
-struct rbbm_status_t {
- u32 cmdfifo_avail : 7;
- u32 : 1;
- u32 hirq_on_rbb : 1;
- u32 cprq_on_rbb : 1;
- u32 cfrq_on_rbb : 1;
- u32 hirq_in_rtbuf : 1;
- u32 cprq_in_rtbuf : 1;
- u32 cfrq_in_rtbuf : 1;
- u32 cf_pipe_busy : 1;
- u32 eng_ev_busy : 1;
- u32 cp_cmdstrm_busy : 1;
- u32 e2_busy : 1;
- u32 rb2d_busy : 1;
- u32 rb3d_busy : 1;
- u32 se_busy : 1;
- u32 re_busy : 1;
- u32 tam_busy : 1;
- u32 tdm_busy : 1;
- u32 pb_busy : 1;
- u32 : 6;
- u32 gui_active : 1;
-} __attribute__((packed));
-
-union rbbm_status_u {
- u32 val : 32;
- struct rbbm_status_t f;
-} __attribute__((packed));
-
-struct dp_datatype_t {
- u32 dp_dst_datatype : 4;
- u32 : 4;
- u32 dp_brush_datatype : 4;
- u32 dp_src2_type : 1;
- u32 dp_src2_datatype : 3;
- u32 dp_src_datatype : 3;
- u32 : 11;
- u32 dp_byte_pix_order : 1;
- u32 : 1;
-} __attribute__((packed));
-
-union dp_datatype_u {
- u32 val : 32;
- struct dp_datatype_t f;
-} __attribute__((packed));
-
-struct dp_mix_t {
- u32 : 8;
- u32 dp_src_source : 3;
- u32 dp_src2_source : 3;
- u32 : 2;
- u32 dp_rop3 : 8;
- u32 dp_op : 1;
- u32 : 7;
-} __attribute__((packed));
-
-union dp_mix_u {
- u32 val : 32;
- struct dp_mix_t f;
-} __attribute__((packed));
-
-struct eng_cntl_t {
- u32 erc_reg_rd_ws : 1;
- u32 erc_reg_wr_ws : 1;
- u32 erc_idle_reg_wr : 1;
- u32 dis_engine_triggers : 1;
- u32 dis_rop_src_uses_dst_w_h : 1;
- u32 dis_src_uses_dst_dirmaj : 1;
- u32 : 6;
- u32 force_3dclk_when_2dclk : 1;
- u32 : 19;
-} __attribute__((packed));
-
-union eng_cntl_u {
- u32 val : 32;
- struct eng_cntl_t f;
-} __attribute__((packed));
-
-struct dp_cntl_t {
- u32 dst_x_dir : 1;
- u32 dst_y_dir : 1;
- u32 src_x_dir : 1;
- u32 src_y_dir : 1;
- u32 dst_major_x : 1;
- u32 src_major_x : 1;
- u32 : 26;
-} __attribute__((packed));
-
-union dp_cntl_u {
- u32 val : 32;
- struct dp_cntl_t f;
-} __attribute__((packed));
-
-struct dp_cntl_dst_dir_t {
- u32 : 15;
- u32 dst_y_dir : 1;
- u32 : 15;
- u32 dst_x_dir : 1;
-} __attribute__((packed));
-
-union dp_cntl_dst_dir_u {
- u32 val : 32;
- struct dp_cntl_dst_dir_t f;
-} __attribute__((packed));
-
-#endif
-
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index b9a80aedee1b..3893dc29eb26 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -95,9 +95,9 @@ static int virtio_dev_match(struct device *_dv, struct device_driver *_dr)
return 0;
}
-static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env)
+static int virtio_uevent(const struct device *_dv, struct kobj_uevent_env *env)
{
- struct virtio_device *dev = dev_to_virtio(_dv);
+ const struct virtio_device *dev = dev_to_virtio(_dv);
return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X",
dev->id.device, dev->id.vendor);
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 24b9a8e05f64..692cac3ff0ee 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -41,13 +41,6 @@ config W1_MASTER_MXC
help
Say Y here to enable MXC 1-wire host
-config W1_MASTER_DS1WM
- tristate "Maxim DS1WM 1-wire busmaster"
- help
- Say Y here to enable the DS1WM 1-wire driver, such as that
- in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
- hx4700.
-
config W1_MASTER_GPIO
tristate "GPIO 1-wire busmaster"
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index dae629b7ab49..c5d85a827e52 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o
obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o
obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o
-obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o
obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
obj-$(CONFIG_W1_MASTER_SGI) += sgi_w1.o
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
deleted file mode 100644
index f661695fb589..000000000000
--- a/drivers/w1/masters/ds1wm.c
+++ /dev/null
@@ -1,675 +0,0 @@
-/*
- * 1-wire busmaster driver for DS1WM and ASICs with embedded DS1WMs
- * such as HP iPAQs (including h5xxx, h2200, and devices with ASIC3
- * like hx4700).
- *
- * Copyright (c) 2004-2005, Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>
- * Copyright (c) 2004-2007, Matt Reimer <mreimer@vpop.net>
- *
- * Use consistent with the GNU GPL is permitted,
- * provided that this copyright notice is
- * preserved in its entirety in all copies and derived works.
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/pm.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/ds1wm.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-
-#include <linux/w1.h>
-
-
-#define DS1WM_CMD 0x00 /* R/W 4 bits command */
-#define DS1WM_DATA 0x01 /* R/W 8 bits, transmit/receive buffer */
-#define DS1WM_INT 0x02 /* R/W interrupt status */
-#define DS1WM_INT_EN 0x03 /* R/W interrupt enable */
-#define DS1WM_CLKDIV 0x04 /* R/W 5 bits of divisor and pre-scale */
-#define DS1WM_CNTRL 0x05 /* R/W master control register (not used yet) */
-
-#define DS1WM_CMD_1W_RESET (1 << 0) /* force reset on 1-wire bus */
-#define DS1WM_CMD_SRA (1 << 1) /* enable Search ROM accelerator mode */
-#define DS1WM_CMD_DQ_OUTPUT (1 << 2) /* write only - forces bus low */
-#define DS1WM_CMD_DQ_INPUT (1 << 3) /* read only - reflects state of bus */
-#define DS1WM_CMD_RST (1 << 5) /* software reset */
-#define DS1WM_CMD_OD (1 << 7) /* overdrive */
-
-#define DS1WM_INT_PD (1 << 0) /* presence detect */
-#define DS1WM_INT_PDR (1 << 1) /* presence detect result */
-#define DS1WM_INT_TBE (1 << 2) /* tx buffer empty */
-#define DS1WM_INT_TSRE (1 << 3) /* tx shift register empty */
-#define DS1WM_INT_RBF (1 << 4) /* rx buffer full */
-#define DS1WM_INT_RSRF (1 << 5) /* rx shift register full */
-
-#define DS1WM_INTEN_EPD (1 << 0) /* enable presence detect int */
-#define DS1WM_INTEN_IAS (1 << 1) /* INTR active state */
-#define DS1WM_INTEN_ETBE (1 << 2) /* enable tx buffer empty int */
-#define DS1WM_INTEN_ETMT (1 << 3) /* enable tx shift register empty int */
-#define DS1WM_INTEN_ERBF (1 << 4) /* enable rx buffer full int */
-#define DS1WM_INTEN_ERSRF (1 << 5) /* enable rx shift register full int */
-#define DS1WM_INTEN_DQO (1 << 6) /* enable direct bus driving ops */
-
-#define DS1WM_INTEN_NOT_IAS (~DS1WM_INTEN_IAS) /* all but INTR active state */
-
-#define DS1WM_TIMEOUT (HZ * 5)
-
-static struct {
- unsigned long freq;
- unsigned long divisor;
-} freq[] = {
- { 1000000, 0x80 },
- { 2000000, 0x84 },
- { 3000000, 0x81 },
- { 4000000, 0x88 },
- { 5000000, 0x82 },
- { 6000000, 0x85 },
- { 7000000, 0x83 },
- { 8000000, 0x8c },
- { 10000000, 0x86 },
- { 12000000, 0x89 },
- { 14000000, 0x87 },
- { 16000000, 0x90 },
- { 20000000, 0x8a },
- { 24000000, 0x8d },
- { 28000000, 0x8b },
- { 32000000, 0x94 },
- { 40000000, 0x8e },
- { 48000000, 0x91 },
- { 56000000, 0x8f },
- { 64000000, 0x98 },
- { 80000000, 0x92 },
- { 96000000, 0x95 },
- { 112000000, 0x93 },
- { 128000000, 0x9c },
-/* you can continue this table, consult the OPERATION - CLOCK DIVISOR
- section of the ds1wm spec sheet. */
-};
-
-struct ds1wm_data {
- void __iomem *map;
- unsigned int bus_shift; /* # of shifts to calc register offsets */
- bool is_hw_big_endian;
- struct platform_device *pdev;
- const struct mfd_cell *cell;
- int irq;
- int slave_present;
- void *reset_complete;
- void *read_complete;
- void *write_complete;
- int read_error;
- /* last byte received */
- u8 read_byte;
- /* byte to write that makes all intr disabled, */
- /* considering active_state (IAS) (optimization) */
- u8 int_en_reg_none;
- unsigned int reset_recover_delay; /* see ds1wm.h */
-};
-
-static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
- u8 val)
-{
- if (ds1wm_data->is_hw_big_endian) {
- switch (ds1wm_data->bus_shift) {
- case 0:
- iowrite8(val, ds1wm_data->map + (reg << 0));
- break;
- case 1:
- iowrite16be((u16)val, ds1wm_data->map + (reg << 1));
- break;
- case 2:
- iowrite32be((u32)val, ds1wm_data->map + (reg << 2));
- break;
- }
- } else {
- switch (ds1wm_data->bus_shift) {
- case 0:
- iowrite8(val, ds1wm_data->map + (reg << 0));
- break;
- case 1:
- iowrite16((u16)val, ds1wm_data->map + (reg << 1));
- break;
- case 2:
- iowrite32((u32)val, ds1wm_data->map + (reg << 2));
- break;
- }
- }
-}
-
-static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg)
-{
- u32 val = 0;
-
- if (ds1wm_data->is_hw_big_endian) {
- switch (ds1wm_data->bus_shift) {
- case 0:
- val = ioread8(ds1wm_data->map + (reg << 0));
- break;
- case 1:
- val = ioread16be(ds1wm_data->map + (reg << 1));
- break;
- case 2:
- val = ioread32be(ds1wm_data->map + (reg << 2));
- break;
- }
- } else {
- switch (ds1wm_data->bus_shift) {
- case 0:
- val = ioread8(ds1wm_data->map + (reg << 0));
- break;
- case 1:
- val = ioread16(ds1wm_data->map + (reg << 1));
- break;
- case 2:
- val = ioread32(ds1wm_data->map + (reg << 2));
- break;
- }
- }
- dev_dbg(&ds1wm_data->pdev->dev,
- "ds1wm_read_register reg: %d, 32 bit val:%x\n", reg, val);
- return (u8)val;
-}
-
-
-static irqreturn_t ds1wm_isr(int isr, void *data)
-{
- struct ds1wm_data *ds1wm_data = data;
- u8 intr;
- u8 inten = ds1wm_read_register(ds1wm_data, DS1WM_INT_EN);
- /* if no bits are set in int enable register (except the IAS)
- than go no further, reading the regs below has side effects */
- if (!(inten & DS1WM_INTEN_NOT_IAS))
- return IRQ_NONE;
-
- ds1wm_write_register(ds1wm_data,
- DS1WM_INT_EN, ds1wm_data->int_en_reg_none);
-
- /* this read action clears the INTR and certain flags in ds1wm */
- intr = ds1wm_read_register(ds1wm_data, DS1WM_INT);
-
- ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1;
-
- if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete) {
- inten &= ~DS1WM_INTEN_ETMT;
- complete(ds1wm_data->write_complete);
- }
- if (intr & DS1WM_INT_RBF) {
- /* this read clears the RBF flag */
- ds1wm_data->read_byte = ds1wm_read_register(ds1wm_data,
- DS1WM_DATA);
- inten &= ~DS1WM_INTEN_ERBF;
- if (ds1wm_data->read_complete)
- complete(ds1wm_data->read_complete);
- }
- if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete) {
- inten &= ~DS1WM_INTEN_EPD;
- complete(ds1wm_data->reset_complete);
- }
-
- ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, inten);
- return IRQ_HANDLED;
-}
-
-static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
-{
- unsigned long timeleft;
- DECLARE_COMPLETION_ONSTACK(reset_done);
-
- ds1wm_data->reset_complete = &reset_done;
-
- /* enable Presence detect only */
- ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_EPD |
- ds1wm_data->int_en_reg_none);
-
- ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_1W_RESET);
-
- timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT);
- ds1wm_data->reset_complete = NULL;
- if (!timeleft) {
- dev_err(&ds1wm_data->pdev->dev, "reset failed, timed out\n");
- return 1;
- }
-
- if (!ds1wm_data->slave_present) {
- dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n");
- return 1;
- }
-
- if (ds1wm_data->reset_recover_delay)
- msleep(ds1wm_data->reset_recover_delay);
-
- return 0;
-}
-
-static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data)
-{
- unsigned long timeleft;
- DECLARE_COMPLETION_ONSTACK(write_done);
- ds1wm_data->write_complete = &write_done;
-
- ds1wm_write_register(ds1wm_data, DS1WM_INT_EN,
- ds1wm_data->int_en_reg_none | DS1WM_INTEN_ETMT);
-
- ds1wm_write_register(ds1wm_data, DS1WM_DATA, data);
-
- timeleft = wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT);
-
- ds1wm_data->write_complete = NULL;
- if (!timeleft) {
- dev_err(&ds1wm_data->pdev->dev, "write failed, timed out\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static u8 ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data)
-{
- unsigned long timeleft;
- u8 intEnable = DS1WM_INTEN_ERBF | ds1wm_data->int_en_reg_none;
- DECLARE_COMPLETION_ONSTACK(read_done);
-
- ds1wm_read_register(ds1wm_data, DS1WM_DATA);
-
- ds1wm_data->read_complete = &read_done;
- ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, intEnable);
-
- ds1wm_write_register(ds1wm_data, DS1WM_DATA, write_data);
- timeleft = wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT);
-
- ds1wm_data->read_complete = NULL;
- if (!timeleft) {
- dev_err(&ds1wm_data->pdev->dev, "read failed, timed out\n");
- ds1wm_data->read_error = -ETIMEDOUT;
- return 0xFF;
- }
- ds1wm_data->read_error = 0;
- return ds1wm_data->read_byte;
-}
-
-static int ds1wm_find_divisor(int gclk)
-{
- int i;
-
- for (i = ARRAY_SIZE(freq)-1; i >= 0; --i)
- if (gclk >= freq[i].freq)
- return freq[i].divisor;
-
- return 0;
-}
-
-static void ds1wm_up(struct ds1wm_data *ds1wm_data)
-{
- int divisor;
- struct device *dev = &ds1wm_data->pdev->dev;
- struct ds1wm_driver_data *plat = dev_get_platdata(dev);
-
- if (ds1wm_data->cell->enable)
- ds1wm_data->cell->enable(ds1wm_data->pdev);
-
- divisor = ds1wm_find_divisor(plat->clock_rate);
- dev_dbg(dev, "found divisor 0x%x for clock %d\n",
- divisor, plat->clock_rate);
- if (divisor == 0) {
- dev_err(dev, "no suitable divisor for %dHz clock\n",
- plat->clock_rate);
- return;
- }
- ds1wm_write_register(ds1wm_data, DS1WM_CLKDIV, divisor);
-
- /* Let the w1 clock stabilize. */
- msleep(1);
-
- ds1wm_reset(ds1wm_data);
-}
-
-static void ds1wm_down(struct ds1wm_data *ds1wm_data)
-{
- ds1wm_reset(ds1wm_data);
-
- /* Disable interrupts. */
- ds1wm_write_register(ds1wm_data, DS1WM_INT_EN,
- ds1wm_data->int_en_reg_none);
-
- if (ds1wm_data->cell->disable)
- ds1wm_data->cell->disable(ds1wm_data->pdev);
-}
-
-/* --------------------------------------------------------------------- */
-/* w1 methods */
-
-static u8 ds1wm_read_byte(void *data)
-{
- struct ds1wm_data *ds1wm_data = data;
-
- return ds1wm_read(ds1wm_data, 0xff);
-}
-
-static void ds1wm_write_byte(void *data, u8 byte)
-{
- struct ds1wm_data *ds1wm_data = data;
-
- ds1wm_write(ds1wm_data, byte);
-}
-
-static u8 ds1wm_reset_bus(void *data)
-{
- struct ds1wm_data *ds1wm_data = data;
-
- ds1wm_reset(ds1wm_data);
-
- return 0;
-}
-
-static void ds1wm_search(void *data, struct w1_master *master_dev,
- u8 search_type, w1_slave_found_callback slave_found)
-{
- struct ds1wm_data *ds1wm_data = data;
- int i;
- int ms_discrep_bit = -1;
- u64 r = 0; /* holds the progress of the search */
- u64 r_prime, d;
- unsigned slaves_found = 0;
- unsigned int pass = 0;
-
- dev_dbg(&ds1wm_data->pdev->dev, "search begin\n");
- while (true) {
- ++pass;
- if (pass > 100) {
- dev_dbg(&ds1wm_data->pdev->dev,
- "too many attempts (100), search aborted\n");
- return;
- }
-
- mutex_lock(&master_dev->bus_mutex);
- if (ds1wm_reset(ds1wm_data)) {
- mutex_unlock(&master_dev->bus_mutex);
- dev_dbg(&ds1wm_data->pdev->dev,
- "pass: %d reset error (or no slaves)\n", pass);
- break;
- }
-
- dev_dbg(&ds1wm_data->pdev->dev,
- "pass: %d r : %0#18llx writing SEARCH_ROM\n", pass, r);
- ds1wm_write(ds1wm_data, search_type);
- dev_dbg(&ds1wm_data->pdev->dev,
- "pass: %d entering ASM\n", pass);
- ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA);
- dev_dbg(&ds1wm_data->pdev->dev,
- "pass: %d beginning nibble loop\n", pass);
-
- r_prime = 0;
- d = 0;
- /* we work one nibble at a time */
- /* each nibble is interleaved to form a byte */
- for (i = 0; i < 16; i++) {
-
- unsigned char resp, _r, _r_prime, _d;
-
- _r = (r >> (4*i)) & 0xf;
- _r = ((_r & 0x1) << 1) |
- ((_r & 0x2) << 2) |
- ((_r & 0x4) << 3) |
- ((_r & 0x8) << 4);
-
- /* writes _r, then reads back: */
- resp = ds1wm_read(ds1wm_data, _r);
-
- if (ds1wm_data->read_error) {
- dev_err(&ds1wm_data->pdev->dev,
- "pass: %d nibble: %d read error\n", pass, i);
- break;
- }
-
- _r_prime = ((resp & 0x02) >> 1) |
- ((resp & 0x08) >> 2) |
- ((resp & 0x20) >> 3) |
- ((resp & 0x80) >> 4);
-
- _d = ((resp & 0x01) >> 0) |
- ((resp & 0x04) >> 1) |
- ((resp & 0x10) >> 2) |
- ((resp & 0x40) >> 3);
-
- r_prime |= (unsigned long long) _r_prime << (i * 4);
- d |= (unsigned long long) _d << (i * 4);
-
- }
- if (ds1wm_data->read_error) {
- mutex_unlock(&master_dev->bus_mutex);
- dev_err(&ds1wm_data->pdev->dev,
- "pass: %d read error, retrying\n", pass);
- break;
- }
- dev_dbg(&ds1wm_data->pdev->dev,
- "pass: %d r\': %0#18llx d:%0#18llx\n",
- pass, r_prime, d);
- dev_dbg(&ds1wm_data->pdev->dev,
- "pass: %d nibble loop complete, exiting ASM\n", pass);
- ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA);
- dev_dbg(&ds1wm_data->pdev->dev,
- "pass: %d resetting bus\n", pass);
- ds1wm_reset(ds1wm_data);
- mutex_unlock(&master_dev->bus_mutex);
- if ((r_prime & ((u64)1 << 63)) && (d & ((u64)1 << 63))) {
- dev_err(&ds1wm_data->pdev->dev,
- "pass: %d bus error, retrying\n", pass);
- continue; /* start over */
- }
-
-
- dev_dbg(&ds1wm_data->pdev->dev,
- "pass: %d found %0#18llx\n", pass, r_prime);
- slave_found(master_dev, r_prime);
- ++slaves_found;
- dev_dbg(&ds1wm_data->pdev->dev,
- "pass: %d complete, preparing next pass\n", pass);
-
- /* any discrepency found which we already choose the
- '1' branch is now is now irrelevant we reveal the
- next branch with this: */
- d &= ~r;
- /* find last bit set, i.e. the most signif. bit set */
- ms_discrep_bit = fls64(d) - 1;
- dev_dbg(&ds1wm_data->pdev->dev,
- "pass: %d new d:%0#18llx MS discrep bit:%d\n",
- pass, d, ms_discrep_bit);
-
- /* prev_ms_discrep_bit = ms_discrep_bit;
- prepare for next ROM search: */
- if (ms_discrep_bit == -1)
- break;
-
- r = (r & ~(~0ull << (ms_discrep_bit))) | 1 << ms_discrep_bit;
- } /* end while true */
- dev_dbg(&ds1wm_data->pdev->dev,
- "pass: %d total: %d search done ms d bit pos: %d\n", pass,
- slaves_found, ms_discrep_bit);
-}
-
-/* --------------------------------------------------------------------- */
-
-static struct w1_bus_master ds1wm_master = {
- .read_byte = ds1wm_read_byte,
- .write_byte = ds1wm_write_byte,
- .reset_bus = ds1wm_reset_bus,
- .search = ds1wm_search,
-};
-
-static int ds1wm_probe(struct platform_device *pdev)
-{
- struct ds1wm_data *ds1wm_data;
- struct ds1wm_driver_data *plat;
- struct resource *res;
- int ret;
- u8 inten;
-
- if (!pdev)
- return -ENODEV;
-
- ds1wm_data = devm_kzalloc(&pdev->dev, sizeof(*ds1wm_data), GFP_KERNEL);
- if (!ds1wm_data)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ds1wm_data);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENXIO;
- ds1wm_data->map = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!ds1wm_data->map)
- return -ENOMEM;
-
- ds1wm_data->pdev = pdev;
- ds1wm_data->cell = mfd_get_cell(pdev);
- if (!ds1wm_data->cell)
- return -ENODEV;
- plat = dev_get_platdata(&pdev->dev);
- if (!plat)
- return -ENODEV;
-
- /* how many bits to shift register number to get register offset */
- if (plat->bus_shift > 2) {
- dev_err(&ds1wm_data->pdev->dev,
- "illegal bus shift %d, not written",
- ds1wm_data->bus_shift);
- return -EINVAL;
- }
-
- ds1wm_data->bus_shift = plat->bus_shift;
- /* make sure resource has space for 8 registers */
- if ((8 << ds1wm_data->bus_shift) > resource_size(res)) {
- dev_err(&ds1wm_data->pdev->dev,
- "memory resource size %d to small, should be %d\n",
- (int)resource_size(res),
- 8 << ds1wm_data->bus_shift);
- return -EINVAL;
- }
-
- ds1wm_data->is_hw_big_endian = plat->is_hw_big_endian;
-
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res)
- return -ENXIO;
- ds1wm_data->irq = res->start;
- ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
- ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
-
- /* Mask interrupts, set IAS before claiming interrupt */
- inten = ds1wm_read_register(ds1wm_data, DS1WM_INT_EN);
- ds1wm_write_register(ds1wm_data,
- DS1WM_INT_EN, ds1wm_data->int_en_reg_none);
-
- if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
- irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
- if (res->flags & IORESOURCE_IRQ_LOWEDGE)
- irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
- if (res->flags & IORESOURCE_IRQ_HIGHLEVEL)
- irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_LEVEL_HIGH);
- if (res->flags & IORESOURCE_IRQ_LOWLEVEL)
- irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_LEVEL_LOW);
-
- ret = devm_request_irq(&pdev->dev, ds1wm_data->irq, ds1wm_isr,
- IRQF_SHARED, "ds1wm", ds1wm_data);
- if (ret) {
- dev_err(&ds1wm_data->pdev->dev,
- "devm_request_irq %d failed with errno %d\n",
- ds1wm_data->irq,
- ret);
-
- return ret;
- }
-
- ds1wm_up(ds1wm_data);
-
- ds1wm_master.data = (void *)ds1wm_data;
-
- ret = w1_add_master_device(&ds1wm_master);
- if (ret)
- goto err;
-
- dev_dbg(&ds1wm_data->pdev->dev,
- "ds1wm: probe successful, IAS: %d, rec.delay: %d, clockrate: %d, bus-shift: %d, is Hw Big Endian: %d\n",
- plat->active_high,
- plat->reset_recover_delay,
- plat->clock_rate,
- ds1wm_data->bus_shift,
- ds1wm_data->is_hw_big_endian);
- return 0;
-
-err:
- ds1wm_down(ds1wm_data);
-
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int ds1wm_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev);
-
- ds1wm_down(ds1wm_data);
-
- return 0;
-}
-
-static int ds1wm_resume(struct platform_device *pdev)
-{
- struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev);
-
- ds1wm_up(ds1wm_data);
-
- return 0;
-}
-#else
-#define ds1wm_suspend NULL
-#define ds1wm_resume NULL
-#endif
-
-static int ds1wm_remove(struct platform_device *pdev)
-{
- struct ds1wm_data *ds1wm_data = platform_get_drvdata(pdev);
-
- w1_remove_master_device(&ds1wm_master);
- ds1wm_down(ds1wm_data);
-
- return 0;
-}
-
-static struct platform_driver ds1wm_driver = {
- .driver = {
- .name = "ds1wm",
- },
- .probe = ds1wm_probe,
- .remove = ds1wm_remove,
- .suspend = ds1wm_suspend,
- .resume = ds1wm_resume
-};
-
-static int __init ds1wm_init(void)
-{
- pr_info("DS1WM w1 busmaster driver - (c) 2004 Szabolcs Gyurko\n");
- return platform_driver_register(&ds1wm_driver);
-}
-
-static void __exit ds1wm_exit(void)
-{
- platform_driver_unregister(&ds1wm_driver);
-}
-
-module_init(ds1wm_init);
-module_exit(ds1wm_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, "
- "Matt Reimer <mreimer@vpop.net>,"
- "Jean-Francois Dagenais <dagenaisj@sonatest.com>");
-MODULE_DESCRIPTION("DS1WM w1 busmaster driver");
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 4a2ddf730a3a..9d199fed9628 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -170,7 +170,7 @@ static struct w1_family w1_default_family = {
.fops = &w1_default_fops,
};
-static int w1_uevent(struct device *dev, struct kobj_uevent_env *env);
+static int w1_uevent(const struct device *dev, struct kobj_uevent_env *env);
static struct bus_type w1_bus_type = {
.name = "w1",
@@ -577,11 +577,11 @@ void w1_destroy_master_attributes(struct w1_master *master)
sysfs_remove_group(&master->dev.kobj, &w1_master_defattr_group);
}
-static int w1_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int w1_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct w1_master *md = NULL;
- struct w1_slave *sl = NULL;
- char *event_owner, *name;
+ const struct w1_master *md = NULL;
+ const struct w1_slave *sl = NULL;
+ const char *event_owner, *name;
int err = 0;
if (dev->driver == &w1_master_driver) {
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0bc40b763b06..5de74686f12b 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -491,14 +491,13 @@ config IXP4XX_WATCHDOG
Say N if you are unsure.
config S3C2410_WATCHDOG
- tristate "S3C2410 Watchdog"
- depends on ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || \
- COMPILE_TEST
+ tristate "S3C6410/S5Pv210/Exynos Watchdog"
+ depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
select WATCHDOG_CORE
select MFD_SYSCON if ARCH_EXYNOS
help
- Watchdog timer block in the Samsung S3C24xx, S3C64xx, S5Pv210 and
- Exynos SoCs. This will reboot the system when the timer expires with
+ Watchdog timer block in the Samsung S3C64xx, S5Pv210 and Exynos
+ SoCs. This will reboot the system when the timer expires with
the watchdog enabled.
The driver is limited by the speed of the system's PCLK
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
index 4cb10877017c..4631d0a3866a 100644
--- a/drivers/watchdog/diag288_wdt.c
+++ b/drivers/watchdog/diag288_wdt.c
@@ -27,7 +27,6 @@
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
-#include <linux/suspend.h>
#include <asm/ebcdic.h>
#include <asm/diag.h>
#include <linux/io.h>
@@ -70,77 +69,58 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default = C
MODULE_ALIAS("vmwatchdog");
-static int __diag288(unsigned int func, unsigned int timeout,
- unsigned long action, unsigned int len)
+static char *cmd_buf;
+
+static int diag288(unsigned int func, unsigned int timeout,
+ unsigned long action, unsigned int len)
{
- register unsigned long __func asm("2") = func;
- register unsigned long __timeout asm("3") = timeout;
- register unsigned long __action asm("4") = action;
- register unsigned long __len asm("5") = len;
+ union register_pair r1 = { .even = func, .odd = timeout, };
+ union register_pair r3 = { .even = action, .odd = len, };
int err;
+ diag_stat_inc(DIAG_STAT_X288);
+
err = -EINVAL;
asm volatile(
- " diag %1, %3, 0x288\n"
- "0: la %0, 0\n"
+ " diag %[r1],%[r3],0x288\n"
+ "0: la %[err],0\n"
"1:\n"
EX_TABLE(0b, 1b)
- : "+d" (err) : "d"(__func), "d"(__timeout),
- "d"(__action), "d"(__len) : "1", "cc");
+ : [err] "+d" (err)
+ : [r1] "d" (r1.pair), [r3] "d" (r3.pair)
+ : "cc", "memory");
return err;
}
-static int __diag288_vm(unsigned int func, unsigned int timeout,
- char *cmd, size_t len)
-{
- diag_stat_inc(DIAG_STAT_X288);
- return __diag288(func, timeout, virt_to_phys(cmd), len);
-}
-
-static int __diag288_lpar(unsigned int func, unsigned int timeout,
- unsigned long action)
+static int diag288_str(unsigned int func, unsigned int timeout, char *cmd)
{
- diag_stat_inc(DIAG_STAT_X288);
- return __diag288(func, timeout, action, 0);
-}
+ ssize_t len;
-static unsigned long wdt_status;
+ len = strscpy(cmd_buf, cmd, MAX_CMDLEN);
+ if (len < 0)
+ return len;
+ ASCEBC(cmd_buf, MAX_CMDLEN);
+ EBC_TOUPPER(cmd_buf, MAX_CMDLEN);
-#define DIAG_WDOG_BUSY 0
+ return diag288(func, timeout, virt_to_phys(cmd_buf), len);
+}
static int wdt_start(struct watchdog_device *dev)
{
- char *ebc_cmd;
- size_t len;
int ret;
unsigned int func;
- if (test_and_set_bit(DIAG_WDOG_BUSY, &wdt_status))
- return -EBUSY;
-
if (MACHINE_IS_VM) {
- ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
- if (!ebc_cmd) {
- clear_bit(DIAG_WDOG_BUSY, &wdt_status);
- return -ENOMEM;
- }
- len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
- ASCEBC(ebc_cmd, MAX_CMDLEN);
- EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
-
func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
: WDT_FUNC_INIT;
- ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
+ ret = diag288_str(func, dev->timeout, wdt_cmd);
WARN_ON(ret != 0);
- kfree(ebc_cmd);
} else {
- ret = __diag288_lpar(WDT_FUNC_INIT,
- dev->timeout, LPARWDT_RESTART);
+ ret = diag288(WDT_FUNC_INIT, dev->timeout, LPARWDT_RESTART, 0);
}
if (ret) {
pr_err("The watchdog cannot be activated\n");
- clear_bit(DIAG_WDOG_BUSY, &wdt_status);
return ret;
}
return 0;
@@ -148,31 +128,15 @@ static int wdt_start(struct watchdog_device *dev)
static int wdt_stop(struct watchdog_device *dev)
{
- int ret;
-
- diag_stat_inc(DIAG_STAT_X288);
- ret = __diag288(WDT_FUNC_CANCEL, 0, 0, 0);
-
- clear_bit(DIAG_WDOG_BUSY, &wdt_status);
-
- return ret;
+ return diag288(WDT_FUNC_CANCEL, 0, 0, 0);
}
static int wdt_ping(struct watchdog_device *dev)
{
- char *ebc_cmd;
- size_t len;
int ret;
unsigned int func;
if (MACHINE_IS_VM) {
- ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
- if (!ebc_cmd)
- return -ENOMEM;
- len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
- ASCEBC(ebc_cmd, MAX_CMDLEN);
- EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
-
/*
* It seems to be ok to z/VM to use the init function to
* retrigger the watchdog. On LPAR WDT_FUNC_CHANGE must
@@ -181,11 +145,10 @@ static int wdt_ping(struct watchdog_device *dev)
func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
: WDT_FUNC_INIT;
- ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
+ ret = diag288_str(func, dev->timeout, wdt_cmd);
WARN_ON(ret != 0);
- kfree(ebc_cmd);
} else {
- ret = __diag288_lpar(WDT_FUNC_CHANGE, dev->timeout, 0);
+ ret = diag288(WDT_FUNC_CHANGE, dev->timeout, 0, 0);
}
if (ret)
@@ -223,87 +186,45 @@ static struct watchdog_device wdt_dev = {
.max_timeout = MAX_INTERVAL,
};
-/*
- * It makes no sense to go into suspend while the watchdog is running.
- * Depending on the memory size, the watchdog might trigger, while we
- * are still saving the memory.
- */
-static int wdt_suspend(void)
-{
- if (test_and_set_bit(DIAG_WDOG_BUSY, &wdt_status)) {
- pr_err("Linux cannot be suspended while the watchdog is in use\n");
- return notifier_from_errno(-EBUSY);
- }
- return NOTIFY_DONE;
-}
-
-static int wdt_resume(void)
-{
- clear_bit(DIAG_WDOG_BUSY, &wdt_status);
- return NOTIFY_DONE;
-}
-
-static int wdt_power_event(struct notifier_block *this, unsigned long event,
- void *ptr)
-{
- switch (event) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- return wdt_resume();
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- return wdt_suspend();
- default:
- return NOTIFY_DONE;
- }
-}
-
-static struct notifier_block wdt_power_notifier = {
- .notifier_call = wdt_power_event,
-};
-
static int __init diag288_init(void)
{
int ret;
- char ebc_begin[] = {
- 194, 197, 199, 201, 213
- };
watchdog_set_nowayout(&wdt_dev, nowayout_info);
if (MACHINE_IS_VM) {
- if (__diag288_vm(WDT_FUNC_INIT, 15,
- ebc_begin, sizeof(ebc_begin)) != 0) {
+ cmd_buf = kmalloc(MAX_CMDLEN, GFP_KERNEL);
+ if (!cmd_buf) {
+ pr_err("The watchdog cannot be initialized\n");
+ return -ENOMEM;
+ }
+
+ ret = diag288_str(WDT_FUNC_INIT, MIN_INTERVAL, "BEGIN");
+ if (ret != 0) {
pr_err("The watchdog cannot be initialized\n");
+ kfree(cmd_buf);
return -EINVAL;
}
} else {
- if (__diag288_lpar(WDT_FUNC_INIT, 30, LPARWDT_RESTART)) {
+ if (diag288(WDT_FUNC_INIT, WDT_DEFAULT_TIMEOUT,
+ LPARWDT_RESTART, 0)) {
pr_err("The watchdog cannot be initialized\n");
return -EINVAL;
}
}
- if (__diag288_lpar(WDT_FUNC_CANCEL, 0, 0)) {
+ if (diag288(WDT_FUNC_CANCEL, 0, 0, 0)) {
pr_err("The watchdog cannot be deactivated\n");
return -EINVAL;
}
- ret = register_pm_notifier(&wdt_power_notifier);
- if (ret)
- return ret;
-
- ret = watchdog_register_device(&wdt_dev);
- if (ret)
- unregister_pm_notifier(&wdt_power_notifier);
-
- return ret;
+ return watchdog_register_device(&wdt_dev);
}
static void __exit diag288_exit(void)
{
watchdog_unregister_device(&wdt_dev);
- unregister_pm_notifier(&wdt_power_notifier);
+ kfree(cmd_buf);
}
module_init(diag288_init);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index d3fc8ed886ff..200ba236a72e 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -562,73 +562,6 @@ static irqreturn_t s3c2410wdt_irq(int irqno, void *param)
return IRQ_HANDLED;
}
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
-
-static int s3c2410wdt_cpufreq_transition(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- int ret;
- struct s3c2410_wdt *wdt = freq_to_wdt(nb);
-
- if (!s3c2410wdt_is_running(wdt))
- goto done;
-
- if (val == CPUFREQ_PRECHANGE) {
- /* To ensure that over the change we don't cause the
- * watchdog to trigger, we perform an keep-alive if
- * the watchdog is running.
- */
-
- s3c2410wdt_keepalive(&wdt->wdt_device);
- } else if (val == CPUFREQ_POSTCHANGE) {
- s3c2410wdt_stop(&wdt->wdt_device);
-
- ret = s3c2410wdt_set_heartbeat(&wdt->wdt_device,
- wdt->wdt_device.timeout);
-
- if (ret >= 0)
- s3c2410wdt_start(&wdt->wdt_device);
- else
- goto err;
- }
-
-done:
- return 0;
-
- err:
- dev_err(wdt->dev, "cannot set new value for timeout %d\n",
- wdt->wdt_device.timeout);
- return ret;
-}
-
-static inline int s3c2410wdt_cpufreq_register(struct s3c2410_wdt *wdt)
-{
- wdt->freq_transition.notifier_call = s3c2410wdt_cpufreq_transition;
-
- return cpufreq_register_notifier(&wdt->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-static inline void s3c2410wdt_cpufreq_deregister(struct s3c2410_wdt *wdt)
-{
- wdt->freq_transition.notifier_call = s3c2410wdt_cpufreq_transition;
-
- cpufreq_unregister_notifier(&wdt->freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-#else
-
-static inline int s3c2410wdt_cpufreq_register(struct s3c2410_wdt *wdt)
-{
- return 0;
-}
-
-static inline void s3c2410wdt_cpufreq_deregister(struct s3c2410_wdt *wdt)
-{
-}
-#endif
-
static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt)
{
unsigned int rst_stat;
@@ -761,12 +694,6 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
wdt->wdt_device.min_timeout = 1;
wdt->wdt_device.max_timeout = s3c2410wdt_max_timeout(wdt);
- ret = s3c2410wdt_cpufreq_register(wdt);
- if (ret < 0) {
- dev_err(dev, "failed to register cpufreq\n");
- goto err_src_clk;
- }
-
watchdog_set_drvdata(&wdt->wdt_device, wdt);
/* see if we can actually set the requested timer margin, and if
@@ -783,7 +710,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
S3C2410_WATCHDOG_DEFAULT_TIME);
} else {
dev_err(dev, "failed to use default timeout\n");
- goto err_cpufreq;
+ goto err_src_clk;
}
}
@@ -791,7 +718,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
pdev->name, pdev);
if (ret != 0) {
dev_err(dev, "failed to install irq (%d)\n", ret);
- goto err_cpufreq;
+ goto err_src_clk;
}
watchdog_set_nowayout(&wdt->wdt_device, nowayout);
@@ -817,7 +744,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
ret = watchdog_register_device(&wdt->wdt_device);
if (ret)
- goto err_cpufreq;
+ goto err_src_clk;
ret = s3c2410wdt_enable(wdt, true);
if (ret < 0)
@@ -839,9 +766,6 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
err_unregister:
watchdog_unregister_device(&wdt->wdt_device);
- err_cpufreq:
- s3c2410wdt_cpufreq_deregister(wdt);
-
err_src_clk:
clk_disable_unprepare(wdt->src_clk);
@@ -862,8 +786,6 @@ static int s3c2410wdt_remove(struct platform_device *dev)
watchdog_unregister_device(&wdt->wdt_device);
- s3c2410wdt_cpufreq_deregister(wdt);
-
clk_disable_unprepare(wdt->src_clk);
clk_disable_unprepare(wdt->bus_clk);
diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c
index 110249e5f642..5b7be7a62d54 100644
--- a/drivers/watchdog/wdt285.c
+++ b/drivers/watchdog/wdt285.c
@@ -206,8 +206,6 @@ static int __init footbridge_watchdog_init(void)
pr_info("Footbridge Watchdog Timer: 0.01, timer margin: %d sec\n",
soft_margin);
- if (machine_is_cats())
- pr_warn("Warning: Watchdog reset may not work on this machine\n");
return 0;
}
diff --git a/drivers/xen/efi.c b/drivers/xen/efi.c
index d1ff2186ebb4..fb321cd6415a 100644
--- a/drivers/xen/efi.c
+++ b/drivers/xen/efi.c
@@ -26,6 +26,7 @@
#include <xen/interface/xen.h>
#include <xen/interface/platform.h>
+#include <xen/page.h>
#include <xen/xen.h>
#include <xen/xen-ops.h>
@@ -292,3 +293,63 @@ void __init xen_efi_runtime_setup(void)
efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
efi.reset_system = xen_efi_reset_system;
}
+
+int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+{
+ static_assert(XEN_PAGE_SHIFT == EFI_PAGE_SHIFT,
+ "Mismatch between EFI_PAGE_SHIFT and XEN_PAGE_SHIFT");
+ struct xen_platform_op op;
+ union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info;
+ int rc;
+
+ if (!efi_enabled(EFI_PARAVIRT) || efi_enabled(EFI_MEMMAP))
+ return __efi_mem_desc_lookup(phys_addr, out_md);
+ phys_addr &= ~(u64)(EFI_PAGE_SIZE - 1);
+ op = (struct xen_platform_op) {
+ .cmd = XENPF_firmware_info,
+ .u.firmware_info = {
+ .type = XEN_FW_EFI_INFO,
+ .index = XEN_FW_EFI_MEM_INFO,
+ .u.efi_info.mem.addr = phys_addr,
+ .u.efi_info.mem.size = U64_MAX - phys_addr,
+ },
+ };
+
+ rc = HYPERVISOR_platform_op(&op);
+ if (rc) {
+ pr_warn("Failed to lookup header 0x%llx in Xen memory map: error %d\n",
+ phys_addr, rc);
+ }
+
+ out_md->phys_addr = info->mem.addr;
+ out_md->num_pages = info->mem.size >> EFI_PAGE_SHIFT;
+ out_md->type = info->mem.type;
+ out_md->attribute = info->mem.attr;
+
+ return 0;
+}
+
+bool __init xen_efi_config_table_is_usable(const efi_guid_t *guid,
+ unsigned long table)
+{
+ efi_memory_desc_t md;
+ int rc;
+
+ if (!efi_enabled(EFI_PARAVIRT))
+ return true;
+
+ rc = efi_mem_desc_lookup(table, &md);
+ if (rc)
+ return false;
+
+ switch (md.type) {
+ case EFI_RUNTIME_SERVICES_CODE:
+ case EFI_RUNTIME_SERVICES_DATA:
+ case EFI_ACPI_RECLAIM_MEMORY:
+ case EFI_ACPI_MEMORY_NVS:
+ case EFI_RESERVED_TYPE:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index c443f04aaad7..c7715f8bd452 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1710,9 +1710,10 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
generic_handle_irq(irq);
}
-static void __xen_evtchn_do_upcall(void)
+static int __xen_evtchn_do_upcall(void)
{
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+ int ret = vcpu_info->evtchn_upcall_pending ? IRQ_HANDLED : IRQ_NONE;
int cpu = smp_processor_id();
struct evtchn_loop_ctrl ctrl = { 0 };
@@ -1737,6 +1738,8 @@ static void __xen_evtchn_do_upcall(void)
* above.
*/
__this_cpu_inc(irq_epoch);
+
+ return ret;
}
void xen_evtchn_do_upcall(struct pt_regs *regs)
@@ -1751,9 +1754,9 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
set_irq_regs(old_regs);
}
-void xen_hvm_evtchn_do_upcall(void)
+int xen_hvm_evtchn_do_upcall(void)
{
- __xen_evtchn_do_upcall();
+ return __xen_evtchn_do_upcall();
}
EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index a15729beb9d1..26ffb8755ffb 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -525,7 +525,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
vma->vm_private_data = vm_priv;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &gntalloc_vmops;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 4d9a3050de6a..61faea1f0663 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1055,10 +1055,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
vma->vm_ops = &gntdev_vmops;
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
+ vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP);
if (use_ptemod)
- vma->vm_flags |= VM_DONTCOPY;
+ vm_flags_set(vma, VM_DONTCOPY);
vma->vm_private_data = map;
if (map->flags) {
diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c
index 16b8bc0c0b33..6a9fe02c6bfc 100644
--- a/drivers/xen/grant-dma-iommu.c
+++ b/drivers/xen/grant-dma-iommu.c
@@ -16,8 +16,15 @@ struct grant_dma_iommu_device {
struct iommu_device iommu;
};
-/* Nothing is really needed here */
-static const struct iommu_ops grant_dma_iommu_ops;
+static struct iommu_device *grant_dma_iommu_probe_device(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+/* Nothing is really needed here except a dummy probe_device callback */
+static const struct iommu_ops grant_dma_iommu_ops = {
+ .probe_device = grant_dma_iommu_probe_device,
+};
static const struct of_device_id grant_dma_iommu_of_match[] = {
{ .compatible = "xen,grant-dma" },
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index cd07e3fed0fa..fcc819131572 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -64,14 +64,13 @@ static uint64_t get_callback_via(struct pci_dev *pdev)
static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
{
- xen_hvm_evtchn_do_upcall();
- return IRQ_HANDLED;
+ return xen_hvm_evtchn_do_upcall();
}
static int xen_allocate_irq(struct pci_dev *pdev)
{
return request_irq(pdev->irq, do_hvm_evtchn_intr,
- IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+ IRQF_NOBALANCING | IRQF_SHARED,
"xen-platform-pci", pdev);
}
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index dd5bbb6e1b6b..2fa10ca5be14 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -156,7 +156,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
vma_priv->file_priv = file_priv;
vma_priv->users = 1;
- vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+ vm_flags_set(vma, VM_IO | VM_DONTEXPAND);
vma->vm_ops = &privcmd_buf_vm_ops;
vma->vm_private_data = vma_priv;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 1edf45ee9890..e2f580e30a86 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -934,8 +934,8 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
{
/* DONTCOPY is essential for Xen because copy_page_range doesn't know
* how to recreate these mappings */
- vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
- VM_DONTEXPAND | VM_DONTDUMP;
+ vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
+ VM_DONTEXPAND | VM_DONTDUMP);
vma->vm_ops = &privcmd_vm_ops;
vma->vm_private_data = NULL;
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 0d4f8f4f4948..1f5219e12cc3 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -14,6 +14,7 @@
#include <net/inet_common.h>
#include <net/inet_connection_sock.h>
#include <net/request_sock.h>
+#include <trace/events/sock.h>
#include <xen/events.h>
#include <xen/grant_table.h>
@@ -173,6 +174,8 @@ static bool pvcalls_conn_back_write(struct sock_mapping *map)
RING_IDX cons, prod, size, array_size;
int ret;
+ atomic_set(&map->write, 0);
+
cons = intf->out_cons;
prod = intf->out_prod;
/* read the indexes before dealing with the data */
@@ -197,7 +200,6 @@ static bool pvcalls_conn_back_write(struct sock_mapping *map)
iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 2, size);
}
- atomic_set(&map->write, 0);
ret = inet_sendmsg(map->sock, &msg, size);
if (ret == -EAGAIN) {
atomic_inc(&map->write);
@@ -300,6 +302,8 @@ static void pvcalls_sk_data_ready(struct sock *sock)
struct sock_mapping *map = sock->sk_user_data;
struct pvcalls_ioworker *iow;
+ trace_sk_data_ready(sock);
+
if (map == NULL)
return;
@@ -588,6 +592,8 @@ static void pvcalls_pass_sk_data_ready(struct sock *sock)
unsigned long flags;
int notify;
+ trace_sk_data_ready(sock);
+
if (mappass == NULL)
return;
@@ -1185,7 +1191,7 @@ static void pvcalls_back_remove(struct xenbus_device *dev)
{
}
-static int pvcalls_back_uevent(struct xenbus_device *xdev,
+static int pvcalls_back_uevent(const struct xenbus_device *xdev,
struct kobj_uevent_env *env)
{
return 0;
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index fcb0792f090e..2f880374b463 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -31,7 +31,10 @@ struct hyp_sysfs_attr {
struct attribute attr;
ssize_t (*show)(struct hyp_sysfs_attr *, char *);
ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
- void *hyp_attr_data;
+ union {
+ void *hyp_attr_data;
+ unsigned long hyp_attr_value;
+ };
};
static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer)
@@ -399,6 +402,60 @@ static int __init xen_sysfs_properties_init(void)
return sysfs_create_group(hypervisor_kobj, &xen_properties_group);
}
+#define FLAG_UNAME "unknown"
+#define FLAG_UNAME_FMT FLAG_UNAME "%02u"
+#define FLAG_UNAME_MAX sizeof(FLAG_UNAME "XX")
+#define FLAG_COUNT (sizeof(xen_start_flags) * BITS_PER_BYTE)
+static_assert(sizeof(xen_start_flags) <=
+ sizeof_field(struct hyp_sysfs_attr, hyp_attr_value));
+
+static ssize_t flag_show(struct hyp_sysfs_attr *attr, char *buffer)
+{
+ char *p = buffer;
+
+ *p++ = '0' + ((xen_start_flags & attr->hyp_attr_value) != 0);
+ *p++ = '\n';
+ return p - buffer;
+}
+
+#define FLAG_NODE(flag, node) \
+ [ilog2(flag)] = { \
+ .attr = { .name = #node, .mode = 0444 },\
+ .show = flag_show, \
+ .hyp_attr_value = flag \
+ }
+
+/*
+ * Add new, known flags here. No other changes are required, but
+ * note that each known flag wastes one entry in flag_unames[].
+ * The code/complexity machinations to avoid this isn't worth it
+ * for a few entries, but keep it in mind.
+ */
+static struct hyp_sysfs_attr flag_attrs[FLAG_COUNT] = {
+ FLAG_NODE(SIF_PRIVILEGED, privileged),
+ FLAG_NODE(SIF_INITDOMAIN, initdomain)
+};
+static struct attribute_group xen_flags_group = {
+ .name = "start_flags",
+ .attrs = (struct attribute *[FLAG_COUNT + 1]){}
+};
+static char flag_unames[FLAG_COUNT][FLAG_UNAME_MAX];
+
+static int __init xen_sysfs_flags_init(void)
+{
+ for (unsigned fnum = 0; fnum != FLAG_COUNT; fnum++) {
+ if (likely(flag_attrs[fnum].attr.name == NULL)) {
+ sprintf(flag_unames[fnum], FLAG_UNAME_FMT, fnum);
+ flag_attrs[fnum].attr.name = flag_unames[fnum];
+ flag_attrs[fnum].attr.mode = 0444;
+ flag_attrs[fnum].show = flag_show;
+ flag_attrs[fnum].hyp_attr_value = 1 << fnum;
+ }
+ xen_flags_group.attrs[fnum] = &flag_attrs[fnum].attr;
+ }
+ return sysfs_create_group(hypervisor_kobj, &xen_flags_group);
+}
+
#ifdef CONFIG_XEN_HAVE_VPMU
struct pmu_mode {
const char *name;
@@ -539,18 +596,22 @@ static int __init hyper_sysfs_init(void)
ret = xen_sysfs_properties_init();
if (ret)
goto prop_out;
+ ret = xen_sysfs_flags_init();
+ if (ret)
+ goto flags_out;
#ifdef CONFIG_XEN_HAVE_VPMU
if (xen_initial_domain()) {
ret = xen_sysfs_pmu_init();
if (ret) {
- sysfs_remove_group(hypervisor_kobj,
- &xen_properties_group);
- goto prop_out;
+ sysfs_remove_group(hypervisor_kobj, &xen_flags_group);
+ goto flags_out;
}
}
#endif
goto out;
+flags_out:
+ sysfs_remove_group(hypervisor_kobj, &xen_properties_group);
prop_out:
sysfs_remove_file(hypervisor_kobj, &uuid_attr.attr);
uuid_out:
@@ -594,7 +655,7 @@ static const struct sysfs_ops hyp_sysfs_ops = {
.store = hyp_sysfs_store,
};
-static struct kobj_type hyp_sysfs_kobj_type = {
+static const struct kobj_type hyp_sysfs_kobj_type = {
.sysfs_ops = &hyp_sysfs_ops,
};
diff --git a/drivers/xen/xen-front-pgdir-shbuf.c b/drivers/xen/xen-front-pgdir-shbuf.c
index 5c0b5cb5b419..b52e0fa595a9 100644
--- a/drivers/xen/xen-front-pgdir-shbuf.c
+++ b/drivers/xen/xen-front-pgdir-shbuf.c
@@ -30,7 +30,7 @@
struct xen_page_directory {
grant_ref_t gref_dir_next_page;
#define XEN_GREF_LIST_END 0
- grant_ref_t gref[1]; /* Variable length */
+ grant_ref_t gref[]; /* Variable length */
};
/**
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 9c09f89d8278..da96c260e26b 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -92,12 +92,12 @@ static int backend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename)
return 0;
}
-static int xenbus_uevent_backend(struct device *dev,
+static int xenbus_uevent_backend(const struct device *dev,
struct kobj_uevent_env *env)
{
- struct xenbus_device *xdev;
- struct xenbus_driver *drv;
- struct xen_bus_type *bus;
+ const struct xenbus_device *xdev;
+ const struct xenbus_driver *drv;
+ const struct xen_bus_type *bus;
DPRINTK("");
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index f44d5a64351e..3f3836cb7279 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -73,10 +73,10 @@ static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type,
return err;
}
-static int xenbus_uevent_frontend(struct device *_dev,
+static int xenbus_uevent_frontend(const struct device *_dev,
struct kobj_uevent_env *env)
{
- struct xenbus_device *dev = to_xenbus_device(_dev);
+ const struct xenbus_device *dev = to_xenbus_device(_dev);
if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype))
return -ENOMEM;
diff --git a/drivers/zorro/zorro-driver.c b/drivers/zorro/zorro-driver.c
index 96f068830549..025edfccedcf 100644
--- a/drivers/zorro/zorro-driver.c
+++ b/drivers/zorro/zorro-driver.c
@@ -130,9 +130,9 @@ static int zorro_bus_match(struct device *dev, struct device_driver *drv)
return !!zorro_match_device(ids, z);
}
-static int zorro_uevent(struct device *dev, struct kobj_uevent_env *env)
+static int zorro_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
- struct zorro_dev *z;
+ const struct zorro_dev *z;
if (!dev)
return -ENODEV;